repo_name
string
path
string
copies
string
size
string
content
string
license
string
bsmitty83/NeWsEnSe
arch/mips/math-emu/dp_add.c
7838
4674
/* IEEE754 floating point arithmetic * double precision: common utilities */ /* * MIPS floating point support * Copyright (C) 1994-2000 Algorithmics Ltd. * * ######################################################################## * * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. * * ######################################################################## * */ #include "ieee754dp.h" ieee754dp ieee754dp_add(ieee754dp x, ieee754dp y) { COMPXDP; COMPYDP; EXPLODEXDP; EXPLODEYDP; CLEARCX; FLUSHXDP; FLUSHYDP; switch (CLPAIR(xc, yc)) { case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN): case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN): case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN): case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN): case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM): case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): SETCX(IEEE754_INVALID_OPERATION); return ieee754dp_nanxcpt(ieee754dp_indef(), "add", x, y); case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN): return y; case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF): return x; /* Infinity handling */ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF): if (xs == ys) return x; SETCX(IEEE754_INVALID_OPERATION); return ieee754dp_xcpt(ieee754dp_indef(), "add", x, y); case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF): case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF): return y; case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM): return x; /* Zero handling */ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO): if (xs == ys) return x; else return ieee754dp_zero(ieee754_csr.rm == IEEE754_RD); case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO): return x; case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM): return y; case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM): DPDNORMX; /* FALL THROUGH */ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM): DPDNORMY; break; case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM): DPDNORMX; break; case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM): break; } assert(xm & DP_HIDDEN_BIT); assert(ym & DP_HIDDEN_BIT); /* provide guard,round and stick bit space */ xm <<= 3; ym <<= 3; if (xe > ye) { /* have to shift y fraction right to align */ int s = xe - ye; ym = XDPSRS(ym, s); ye += s; } else if (ye > xe) { /* have to shift x fraction right to align */ int s = ye - xe; xm = XDPSRS(xm, s); xe += s; } assert(xe == ye); assert(xe <= DP_EMAX); if (xs == ys) { /* generate 28 bit result of adding two 27 bit numbers * leaving result in xm,xs,xe */ xm = xm + ym; xe = xe; xs = xs; if (xm >> (DP_MBITS + 1 + 3)) { /* carry out */ xm = XDPSRS1(xm); xe++; } } else { if (xm >= ym) { xm = xm - ym; xe = xe; xs = xs; } else { xm = ym - xm; xe = xe; xs = ys; } if (xm == 0) return ieee754dp_zero(ieee754_csr.rm == IEEE754_RD); /* normalize to rounding precision */ while ((xm >> (DP_MBITS + 3)) == 0) { xm <<= 1; xe--; } } DPNORMRET2(xs, xe, xm, "add", x, y); }
gpl-2.0
Pafcholini/Nadia-kernel-LL-N910F-EUR-LL-OpenSource
drivers/media/pci/saa7164/saa7164-dvb.c
8094
15853
/* * Driver for the NXP SAA7164 PCIe bridge * * Copyright (c) 2010 Steven Toth <stoth@kernellabs.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "saa7164.h" #include "tda10048.h" #include "tda18271.h" #include "s5h1411.h" #define DRIVER_NAME "saa7164" DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); /* addr is in the card struct, get it from there */ static struct tda10048_config hauppauge_hvr2200_1_config = { .demod_address = 0x10 >> 1, .output_mode = TDA10048_SERIAL_OUTPUT, .fwbulkwritelen = TDA10048_BULKWRITE_200, .inversion = TDA10048_INVERSION_ON, .dtv6_if_freq_khz = TDA10048_IF_3300, .dtv7_if_freq_khz = TDA10048_IF_3500, .dtv8_if_freq_khz = TDA10048_IF_4000, .clk_freq_khz = TDA10048_CLK_16000, }; static struct tda10048_config hauppauge_hvr2200_2_config = { .demod_address = 0x12 >> 1, .output_mode = TDA10048_SERIAL_OUTPUT, .fwbulkwritelen = TDA10048_BULKWRITE_200, .inversion = TDA10048_INVERSION_ON, .dtv6_if_freq_khz = TDA10048_IF_3300, .dtv7_if_freq_khz = TDA10048_IF_3500, .dtv8_if_freq_khz = TDA10048_IF_4000, .clk_freq_khz = TDA10048_CLK_16000, }; static struct tda18271_std_map hauppauge_tda18271_std_map = { .atsc_6 = { .if_freq = 3250, .agc_mode = 3, .std = 3, .if_lvl = 6, .rfagc_top = 0x37 }, .qam_6 = { .if_freq = 4000, .agc_mode = 3, .std = 0, .if_lvl = 6, .rfagc_top = 0x37 }, }; static struct tda18271_config hauppauge_hvr22x0_tuner_config = { .std_map = &hauppauge_tda18271_std_map, .gate = TDA18271_GATE_ANALOG, .role = TDA18271_MASTER, }; static struct tda18271_config hauppauge_hvr22x0s_tuner_config = { .std_map = &hauppauge_tda18271_std_map, .gate = TDA18271_GATE_ANALOG, .role = TDA18271_SLAVE, .output_opt = TDA18271_OUTPUT_LT_OFF, .rf_cal_on_startup = 1 }; static struct s5h1411_config hauppauge_s5h1411_config = { .output_mode = S5H1411_SERIAL_OUTPUT, .gpio = S5H1411_GPIO_ON, .qam_if = S5H1411_IF_4000, .vsb_if = S5H1411_IF_3250, .inversion = S5H1411_INVERSION_ON, .status_mode = S5H1411_DEMODLOCKING, .mpeg_timing = S5H1411_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK, }; static int saa7164_dvb_stop_port(struct saa7164_port *port) { struct saa7164_dev *dev = port->dev; int ret; ret = saa7164_api_transition_port(port, SAA_DMASTATE_STOP); if ((ret != SAA_OK) && (ret != SAA_ERR_ALREADY_STOPPED)) { printk(KERN_ERR "%s() stop transition failed, ret = 0x%x\n", __func__, ret); ret = -EIO; } else { dprintk(DBGLVL_DVB, "%s() Stopped\n", __func__); ret = 0; } return ret; } static int saa7164_dvb_acquire_port(struct saa7164_port *port) { struct saa7164_dev *dev = port->dev; int ret; ret = saa7164_api_transition_port(port, SAA_DMASTATE_ACQUIRE); if ((ret != SAA_OK) && (ret != SAA_ERR_ALREADY_STOPPED)) { printk(KERN_ERR "%s() acquire transition failed, ret = 0x%x\n", __func__, ret); ret = -EIO; } else { dprintk(DBGLVL_DVB, "%s() Acquired\n", __func__); ret = 0; } return ret; } static int saa7164_dvb_pause_port(struct saa7164_port *port) { struct saa7164_dev *dev = port->dev; int ret; ret = saa7164_api_transition_port(port, SAA_DMASTATE_PAUSE); if ((ret != SAA_OK) && (ret != SAA_ERR_ALREADY_STOPPED)) { printk(KERN_ERR "%s() pause transition failed, ret = 0x%x\n", __func__, ret); ret = -EIO; } else { dprintk(DBGLVL_DVB, "%s() Paused\n", __func__); ret = 0; } return ret; } /* Firmware is very windows centric, meaning you have to transition * the part through AVStream / KS Windows stages, forwards or backwards. * States are: stopped, acquired (h/w), paused, started. */ static int saa7164_dvb_stop_streaming(struct saa7164_port *port) { struct saa7164_dev *dev = port->dev; struct saa7164_buffer *buf; struct list_head *p, *q; int ret; dprintk(DBGLVL_DVB, "%s(port=%d)\n", __func__, port->nr); ret = saa7164_dvb_pause_port(port); ret = saa7164_dvb_acquire_port(port); ret = saa7164_dvb_stop_port(port); /* Mark the hardware buffers as free */ mutex_lock(&port->dmaqueue_lock); list_for_each_safe(p, q, &port->dmaqueue.list) { buf = list_entry(p, struct saa7164_buffer, list); buf->flags = SAA7164_BUFFER_FREE; } mutex_unlock(&port->dmaqueue_lock); return ret; } static int saa7164_dvb_start_port(struct saa7164_port *port) { struct saa7164_dev *dev = port->dev; int ret = 0, result; dprintk(DBGLVL_DVB, "%s(port=%d)\n", __func__, port->nr); saa7164_buffer_cfg_port(port); /* Acquire the hardware */ result = saa7164_api_transition_port(port, SAA_DMASTATE_ACQUIRE); if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) { printk(KERN_ERR "%s() acquire transition failed, res = 0x%x\n", __func__, result); /* Stop the hardware, regardless */ result = saa7164_api_transition_port(port, SAA_DMASTATE_STOP); if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) { printk(KERN_ERR "%s() acquire/forced stop transition " "failed, res = 0x%x\n", __func__, result); } ret = -EIO; goto out; } else dprintk(DBGLVL_DVB, "%s() Acquired\n", __func__); /* Pause the hardware */ result = saa7164_api_transition_port(port, SAA_DMASTATE_PAUSE); if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) { printk(KERN_ERR "%s() pause transition failed, res = 0x%x\n", __func__, result); /* Stop the hardware, regardless */ result = saa7164_api_transition_port(port, SAA_DMASTATE_STOP); if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) { printk(KERN_ERR "%s() pause/forced stop transition " "failed, res = 0x%x\n", __func__, result); } ret = -EIO; goto out; } else dprintk(DBGLVL_DVB, "%s() Paused\n", __func__); /* Start the hardware */ result = saa7164_api_transition_port(port, SAA_DMASTATE_RUN); if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) { printk(KERN_ERR "%s() run transition failed, result = 0x%x\n", __func__, result); /* Stop the hardware, regardless */ result = saa7164_api_transition_port(port, SAA_DMASTATE_STOP); if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) { printk(KERN_ERR "%s() run/forced stop transition " "failed, res = 0x%x\n", __func__, result); } ret = -EIO; } else dprintk(DBGLVL_DVB, "%s() Running\n", __func__); out: return ret; } static int saa7164_dvb_start_feed(struct dvb_demux_feed *feed) { struct dvb_demux *demux = feed->demux; struct saa7164_port *port = (struct saa7164_port *) demux->priv; struct saa7164_dvb *dvb = &port->dvb; struct saa7164_dev *dev = port->dev; int ret = 0; dprintk(DBGLVL_DVB, "%s(port=%d)\n", __func__, port->nr); if (!demux->dmx.frontend) return -EINVAL; if (dvb) { mutex_lock(&dvb->lock); if (dvb->feeding++ == 0) { /* Start transport */ ret = saa7164_dvb_start_port(port); } mutex_unlock(&dvb->lock); dprintk(DBGLVL_DVB, "%s(port=%d) now feeding = %d\n", __func__, port->nr, dvb->feeding); } return ret; } static int saa7164_dvb_stop_feed(struct dvb_demux_feed *feed) { struct dvb_demux *demux = feed->demux; struct saa7164_port *port = (struct saa7164_port *) demux->priv; struct saa7164_dvb *dvb = &port->dvb; struct saa7164_dev *dev = port->dev; int ret = 0; dprintk(DBGLVL_DVB, "%s(port=%d)\n", __func__, port->nr); if (dvb) { mutex_lock(&dvb->lock); if (--dvb->feeding == 0) { /* Stop transport */ ret = saa7164_dvb_stop_streaming(port); } mutex_unlock(&dvb->lock); dprintk(DBGLVL_DVB, "%s(port=%d) now feeding = %d\n", __func__, port->nr, dvb->feeding); } return ret; } static int dvb_register(struct saa7164_port *port) { struct saa7164_dvb *dvb = &port->dvb; struct saa7164_dev *dev = port->dev; struct saa7164_buffer *buf; int result, i; dprintk(DBGLVL_DVB, "%s(port=%d)\n", __func__, port->nr); if (port->type != SAA7164_MPEG_DVB) BUG(); /* Sanity check that the PCI configuration space is active */ if (port->hwcfg.BARLocation == 0) { result = -ENOMEM; printk(KERN_ERR "%s: dvb_register_adapter failed " "(errno = %d), NO PCI configuration\n", DRIVER_NAME, result); goto fail_adapter; } /* Init and establish defaults */ port->hw_streamingparams.bitspersample = 8; port->hw_streamingparams.samplesperline = 188; port->hw_streamingparams.numberoflines = (SAA7164_TS_NUMBER_OF_LINES * 188) / 188; port->hw_streamingparams.pitch = 188; port->hw_streamingparams.linethreshold = 0; port->hw_streamingparams.pagetablelistvirt = NULL; port->hw_streamingparams.pagetablelistphys = NULL; port->hw_streamingparams.numpagetables = 2 + ((SAA7164_TS_NUMBER_OF_LINES * 188) / PAGE_SIZE); port->hw_streamingparams.numpagetableentries = port->hwcfg.buffercount; /* Allocate the PCI resources */ for (i = 0; i < port->hwcfg.buffercount; i++) { buf = saa7164_buffer_alloc(port, port->hw_streamingparams.numberoflines * port->hw_streamingparams.pitch); if (!buf) { result = -ENOMEM; printk(KERN_ERR "%s: dvb_register_adapter failed " "(errno = %d), unable to allocate buffers\n", DRIVER_NAME, result); goto fail_adapter; } mutex_lock(&port->dmaqueue_lock); list_add_tail(&buf->list, &port->dmaqueue.list); mutex_unlock(&port->dmaqueue_lock); } /* register adapter */ result = dvb_register_adapter(&dvb->adapter, DRIVER_NAME, THIS_MODULE, &dev->pci->dev, adapter_nr); if (result < 0) { printk(KERN_ERR "%s: dvb_register_adapter failed " "(errno = %d)\n", DRIVER_NAME, result); goto fail_adapter; } dvb->adapter.priv = port; /* register frontend */ result = dvb_register_frontend(&dvb->adapter, dvb->frontend); if (result < 0) { printk(KERN_ERR "%s: dvb_register_frontend failed " "(errno = %d)\n", DRIVER_NAME, result); goto fail_frontend; } /* register demux stuff */ dvb->demux.dmx.capabilities = DMX_TS_FILTERING | DMX_SECTION_FILTERING | DMX_MEMORY_BASED_FILTERING; dvb->demux.priv = port; dvb->demux.filternum = 256; dvb->demux.feednum = 256; dvb->demux.start_feed = saa7164_dvb_start_feed; dvb->demux.stop_feed = saa7164_dvb_stop_feed; result = dvb_dmx_init(&dvb->demux); if (result < 0) { printk(KERN_ERR "%s: dvb_dmx_init failed (errno = %d)\n", DRIVER_NAME, result); goto fail_dmx; } dvb->dmxdev.filternum = 256; dvb->dmxdev.demux = &dvb->demux.dmx; dvb->dmxdev.capabilities = 0; result = dvb_dmxdev_init(&dvb->dmxdev, &dvb->adapter); if (result < 0) { printk(KERN_ERR "%s: dvb_dmxdev_init failed (errno = %d)\n", DRIVER_NAME, result); goto fail_dmxdev; } dvb->fe_hw.source = DMX_FRONTEND_0; result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_hw); if (result < 0) { printk(KERN_ERR "%s: add_frontend failed " "(DMX_FRONTEND_0, errno = %d)\n", DRIVER_NAME, result); goto fail_fe_hw; } dvb->fe_mem.source = DMX_MEMORY_FE; result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_mem); if (result < 0) { printk(KERN_ERR "%s: add_frontend failed " "(DMX_MEMORY_FE, errno = %d)\n", DRIVER_NAME, result); goto fail_fe_mem; } result = dvb->demux.dmx.connect_frontend(&dvb->demux.dmx, &dvb->fe_hw); if (result < 0) { printk(KERN_ERR "%s: connect_frontend failed (errno = %d)\n", DRIVER_NAME, result); goto fail_fe_conn; } /* register network adapter */ dvb_net_init(&dvb->adapter, &dvb->net, &dvb->demux.dmx); return 0; fail_fe_conn: dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_mem); fail_fe_mem: dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_hw); fail_fe_hw: dvb_dmxdev_release(&dvb->dmxdev); fail_dmxdev: dvb_dmx_release(&dvb->demux); fail_dmx: dvb_unregister_frontend(dvb->frontend); fail_frontend: dvb_frontend_detach(dvb->frontend); dvb_unregister_adapter(&dvb->adapter); fail_adapter: return result; } int saa7164_dvb_unregister(struct saa7164_port *port) { struct saa7164_dvb *dvb = &port->dvb; struct saa7164_dev *dev = port->dev; struct saa7164_buffer *b; struct list_head *c, *n; dprintk(DBGLVL_DVB, "%s()\n", __func__); if (port->type != SAA7164_MPEG_DVB) BUG(); /* Remove any allocated buffers */ mutex_lock(&port->dmaqueue_lock); list_for_each_safe(c, n, &port->dmaqueue.list) { b = list_entry(c, struct saa7164_buffer, list); list_del(c); saa7164_buffer_dealloc(b); } mutex_unlock(&port->dmaqueue_lock); if (dvb->frontend == NULL) return 0; dvb_net_release(&dvb->net); dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_mem); dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_hw); dvb_dmxdev_release(&dvb->dmxdev); dvb_dmx_release(&dvb->demux); dvb_unregister_frontend(dvb->frontend); dvb_frontend_detach(dvb->frontend); dvb_unregister_adapter(&dvb->adapter); return 0; } /* All the DVB attach calls go here, this function get's modified * for each new card. */ int saa7164_dvb_register(struct saa7164_port *port) { struct saa7164_dev *dev = port->dev; struct saa7164_dvb *dvb = &port->dvb; struct saa7164_i2c *i2c_bus = NULL; int ret; dprintk(DBGLVL_DVB, "%s()\n", __func__); /* init frontend */ switch (dev->board) { case SAA7164_BOARD_HAUPPAUGE_HVR2200: case SAA7164_BOARD_HAUPPAUGE_HVR2200_2: case SAA7164_BOARD_HAUPPAUGE_HVR2200_3: case SAA7164_BOARD_HAUPPAUGE_HVR2200_4: case SAA7164_BOARD_HAUPPAUGE_HVR2200_5: i2c_bus = &dev->i2c_bus[port->nr + 1]; switch (port->nr) { case 0: port->dvb.frontend = dvb_attach(tda10048_attach, &hauppauge_hvr2200_1_config, &i2c_bus->i2c_adap); if (port->dvb.frontend != NULL) { /* TODO: addr is in the card struct */ dvb_attach(tda18271_attach, port->dvb.frontend, 0xc0 >> 1, &i2c_bus->i2c_adap, &hauppauge_hvr22x0_tuner_config); } break; case 1: port->dvb.frontend = dvb_attach(tda10048_attach, &hauppauge_hvr2200_2_config, &i2c_bus->i2c_adap); if (port->dvb.frontend != NULL) { /* TODO: addr is in the card struct */ dvb_attach(tda18271_attach, port->dvb.frontend, 0xc0 >> 1, &i2c_bus->i2c_adap, &hauppauge_hvr22x0s_tuner_config); } break; } break; case SAA7164_BOARD_HAUPPAUGE_HVR2250: case SAA7164_BOARD_HAUPPAUGE_HVR2250_2: case SAA7164_BOARD_HAUPPAUGE_HVR2250_3: i2c_bus = &dev->i2c_bus[port->nr + 1]; port->dvb.frontend = dvb_attach(s5h1411_attach, &hauppauge_s5h1411_config, &i2c_bus->i2c_adap); if (port->dvb.frontend != NULL) { if (port->nr == 0) { /* Master TDA18271 */ /* TODO: addr is in the card struct */ dvb_attach(tda18271_attach, port->dvb.frontend, 0xc0 >> 1, &i2c_bus->i2c_adap, &hauppauge_hvr22x0_tuner_config); } else { /* Slave TDA18271 */ dvb_attach(tda18271_attach, port->dvb.frontend, 0xc0 >> 1, &i2c_bus->i2c_adap, &hauppauge_hvr22x0s_tuner_config); } } break; default: printk(KERN_ERR "%s: The frontend isn't supported\n", dev->name); break; } if (NULL == dvb->frontend) { printk(KERN_ERR "%s() Frontend initialization failed\n", __func__); return -1; } /* register everything */ ret = dvb_register(port); if (ret < 0) { if (dvb->frontend->ops.release) dvb->frontend->ops.release(dvb->frontend); return ret; } return 0; }
gpl-2.0
nyterage/Galaxy_Tab_3_217s
drivers/infiniband/hw/ipath/ipath_qp.c
11678
26688
/* * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/err.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include "ipath_verbs.h" #include "ipath_kernel.h" #define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE) #define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1) #define mk_qpn(qpt, map, off) (((map) - (qpt)->map) * BITS_PER_PAGE + \ (off)) #define find_next_offset(map, off) find_next_zero_bit((map)->page, \ BITS_PER_PAGE, off) /* * Convert the AETH credit code into the number of credits. */ static u32 credit_table[31] = { 0, /* 0 */ 1, /* 1 */ 2, /* 2 */ 3, /* 3 */ 4, /* 4 */ 6, /* 5 */ 8, /* 6 */ 12, /* 7 */ 16, /* 8 */ 24, /* 9 */ 32, /* A */ 48, /* B */ 64, /* C */ 96, /* D */ 128, /* E */ 192, /* F */ 256, /* 10 */ 384, /* 11 */ 512, /* 12 */ 768, /* 13 */ 1024, /* 14 */ 1536, /* 15 */ 2048, /* 16 */ 3072, /* 17 */ 4096, /* 18 */ 6144, /* 19 */ 8192, /* 1A */ 12288, /* 1B */ 16384, /* 1C */ 24576, /* 1D */ 32768 /* 1E */ }; static void get_map_page(struct ipath_qp_table *qpt, struct qpn_map *map) { unsigned long page = get_zeroed_page(GFP_KERNEL); unsigned long flags; /* * Free the page if someone raced with us installing it. */ spin_lock_irqsave(&qpt->lock, flags); if (map->page) free_page(page); else map->page = (void *)page; spin_unlock_irqrestore(&qpt->lock, flags); } static int alloc_qpn(struct ipath_qp_table *qpt, enum ib_qp_type type) { u32 i, offset, max_scan, qpn; struct qpn_map *map; u32 ret = -1; if (type == IB_QPT_SMI) ret = 0; else if (type == IB_QPT_GSI) ret = 1; if (ret != -1) { map = &qpt->map[0]; if (unlikely(!map->page)) { get_map_page(qpt, map); if (unlikely(!map->page)) { ret = -ENOMEM; goto bail; } } if (!test_and_set_bit(ret, map->page)) atomic_dec(&map->n_free); else ret = -EBUSY; goto bail; } qpn = qpt->last + 1; if (qpn >= QPN_MAX) qpn = 2; offset = qpn & BITS_PER_PAGE_MASK; map = &qpt->map[qpn / BITS_PER_PAGE]; max_scan = qpt->nmaps - !offset; for (i = 0;;) { if (unlikely(!map->page)) { get_map_page(qpt, map); if (unlikely(!map->page)) break; } if (likely(atomic_read(&map->n_free))) { do { if (!test_and_set_bit(offset, map->page)) { atomic_dec(&map->n_free); qpt->last = qpn; ret = qpn; goto bail; } offset = find_next_offset(map, offset); qpn = mk_qpn(qpt, map, offset); /* * This test differs from alloc_pidmap(). * If find_next_offset() does find a zero * bit, we don't need to check for QPN * wrapping around past our starting QPN. * We just need to be sure we don't loop * forever. */ } while (offset < BITS_PER_PAGE && qpn < QPN_MAX); } /* * In order to keep the number of pages allocated to a * minimum, we scan the all existing pages before increasing * the size of the bitmap table. */ if (++i > max_scan) { if (qpt->nmaps == QPNMAP_ENTRIES) break; map = &qpt->map[qpt->nmaps++]; offset = 0; } else if (map < &qpt->map[qpt->nmaps]) { ++map; offset = 0; } else { map = &qpt->map[0]; offset = 2; } qpn = mk_qpn(qpt, map, offset); } ret = -ENOMEM; bail: return ret; } static void free_qpn(struct ipath_qp_table *qpt, u32 qpn) { struct qpn_map *map; map = qpt->map + qpn / BITS_PER_PAGE; if (map->page) clear_bit(qpn & BITS_PER_PAGE_MASK, map->page); atomic_inc(&map->n_free); } /** * ipath_alloc_qpn - allocate a QP number * @qpt: the QP table * @qp: the QP * @type: the QP type (IB_QPT_SMI and IB_QPT_GSI are special) * * Allocate the next available QPN and put the QP into the hash table. * The hash table holds a reference to the QP. */ static int ipath_alloc_qpn(struct ipath_qp_table *qpt, struct ipath_qp *qp, enum ib_qp_type type) { unsigned long flags; int ret; ret = alloc_qpn(qpt, type); if (ret < 0) goto bail; qp->ibqp.qp_num = ret; /* Add the QP to the hash table. */ spin_lock_irqsave(&qpt->lock, flags); ret %= qpt->max; qp->next = qpt->table[ret]; qpt->table[ret] = qp; atomic_inc(&qp->refcount); spin_unlock_irqrestore(&qpt->lock, flags); ret = 0; bail: return ret; } /** * ipath_free_qp - remove a QP from the QP table * @qpt: the QP table * @qp: the QP to remove * * Remove the QP from the table so it can't be found asynchronously by * the receive interrupt routine. */ static void ipath_free_qp(struct ipath_qp_table *qpt, struct ipath_qp *qp) { struct ipath_qp *q, **qpp; unsigned long flags; spin_lock_irqsave(&qpt->lock, flags); /* Remove QP from the hash table. */ qpp = &qpt->table[qp->ibqp.qp_num % qpt->max]; for (; (q = *qpp) != NULL; qpp = &q->next) { if (q == qp) { *qpp = qp->next; qp->next = NULL; atomic_dec(&qp->refcount); break; } } spin_unlock_irqrestore(&qpt->lock, flags); } /** * ipath_free_all_qps - check for QPs still in use * @qpt: the QP table to empty * * There should not be any QPs still in use. * Free memory for table. */ unsigned ipath_free_all_qps(struct ipath_qp_table *qpt) { unsigned long flags; struct ipath_qp *qp; u32 n, qp_inuse = 0; spin_lock_irqsave(&qpt->lock, flags); for (n = 0; n < qpt->max; n++) { qp = qpt->table[n]; qpt->table[n] = NULL; for (; qp; qp = qp->next) qp_inuse++; } spin_unlock_irqrestore(&qpt->lock, flags); for (n = 0; n < ARRAY_SIZE(qpt->map); n++) if (qpt->map[n].page) free_page((unsigned long) qpt->map[n].page); return qp_inuse; } /** * ipath_lookup_qpn - return the QP with the given QPN * @qpt: the QP table * @qpn: the QP number to look up * * The caller is responsible for decrementing the QP reference count * when done. */ struct ipath_qp *ipath_lookup_qpn(struct ipath_qp_table *qpt, u32 qpn) { unsigned long flags; struct ipath_qp *qp; spin_lock_irqsave(&qpt->lock, flags); for (qp = qpt->table[qpn % qpt->max]; qp; qp = qp->next) { if (qp->ibqp.qp_num == qpn) { atomic_inc(&qp->refcount); break; } } spin_unlock_irqrestore(&qpt->lock, flags); return qp; } /** * ipath_reset_qp - initialize the QP state to the reset state * @qp: the QP to reset * @type: the QP type */ static void ipath_reset_qp(struct ipath_qp *qp, enum ib_qp_type type) { qp->remote_qpn = 0; qp->qkey = 0; qp->qp_access_flags = 0; atomic_set(&qp->s_dma_busy, 0); qp->s_flags &= IPATH_S_SIGNAL_REQ_WR; qp->s_hdrwords = 0; qp->s_wqe = NULL; qp->s_pkt_delay = 0; qp->s_draining = 0; qp->s_psn = 0; qp->r_psn = 0; qp->r_msn = 0; if (type == IB_QPT_RC) { qp->s_state = IB_OPCODE_RC_SEND_LAST; qp->r_state = IB_OPCODE_RC_SEND_LAST; } else { qp->s_state = IB_OPCODE_UC_SEND_LAST; qp->r_state = IB_OPCODE_UC_SEND_LAST; } qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; qp->r_nak_state = 0; qp->r_aflags = 0; qp->r_flags = 0; qp->s_rnr_timeout = 0; qp->s_head = 0; qp->s_tail = 0; qp->s_cur = 0; qp->s_last = 0; qp->s_ssn = 1; qp->s_lsn = 0; memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue)); qp->r_head_ack_queue = 0; qp->s_tail_ack_queue = 0; qp->s_num_rd_atomic = 0; if (qp->r_rq.wq) { qp->r_rq.wq->head = 0; qp->r_rq.wq->tail = 0; } } /** * ipath_error_qp - put a QP into the error state * @qp: the QP to put into the error state * @err: the receive completion error to signal if a RWQE is active * * Flushes both send and receive work queues. * Returns true if last WQE event should be generated. * The QP s_lock should be held and interrupts disabled. * If we are already in error state, just return. */ int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err) { struct ipath_ibdev *dev = to_idev(qp->ibqp.device); struct ib_wc wc; int ret = 0; if (qp->state == IB_QPS_ERR) goto bail; qp->state = IB_QPS_ERR; spin_lock(&dev->pending_lock); if (!list_empty(&qp->timerwait)) list_del_init(&qp->timerwait); if (!list_empty(&qp->piowait)) list_del_init(&qp->piowait); spin_unlock(&dev->pending_lock); /* Schedule the sending tasklet to drain the send work queue. */ if (qp->s_last != qp->s_head) ipath_schedule_send(qp); memset(&wc, 0, sizeof(wc)); wc.qp = &qp->ibqp; wc.opcode = IB_WC_RECV; if (test_and_clear_bit(IPATH_R_WRID_VALID, &qp->r_aflags)) { wc.wr_id = qp->r_wr_id; wc.status = err; ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); } wc.status = IB_WC_WR_FLUSH_ERR; if (qp->r_rq.wq) { struct ipath_rwq *wq; u32 head; u32 tail; spin_lock(&qp->r_rq.lock); /* sanity check pointers before trusting them */ wq = qp->r_rq.wq; head = wq->head; if (head >= qp->r_rq.size) head = 0; tail = wq->tail; if (tail >= qp->r_rq.size) tail = 0; while (tail != head) { wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id; if (++tail >= qp->r_rq.size) tail = 0; ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); } wq->tail = tail; spin_unlock(&qp->r_rq.lock); } else if (qp->ibqp.event_handler) ret = 1; bail: return ret; } /** * ipath_modify_qp - modify the attributes of a queue pair * @ibqp: the queue pair who's attributes we're modifying * @attr: the new attributes * @attr_mask: the mask of attributes to modify * @udata: user data for ipathverbs.so * * Returns 0 on success, otherwise returns an errno. */ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) { struct ipath_ibdev *dev = to_idev(ibqp->device); struct ipath_qp *qp = to_iqp(ibqp); enum ib_qp_state cur_state, new_state; int lastwqe = 0; int ret; spin_lock_irq(&qp->s_lock); cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) goto inval; if (attr_mask & IB_QP_AV) { if (attr->ah_attr.dlid == 0 || attr->ah_attr.dlid >= IPATH_MULTICAST_LID_BASE) goto inval; if ((attr->ah_attr.ah_flags & IB_AH_GRH) && (attr->ah_attr.grh.sgid_index > 1)) goto inval; } if (attr_mask & IB_QP_PKEY_INDEX) if (attr->pkey_index >= ipath_get_npkeys(dev->dd)) goto inval; if (attr_mask & IB_QP_MIN_RNR_TIMER) if (attr->min_rnr_timer > 31) goto inval; if (attr_mask & IB_QP_PORT) if (attr->port_num == 0 || attr->port_num > ibqp->device->phys_port_cnt) goto inval; /* * don't allow invalid Path MTU values or greater than 2048 * unless we are configured for a 4KB MTU */ if ((attr_mask & IB_QP_PATH_MTU) && (ib_mtu_enum_to_int(attr->path_mtu) == -1 || (attr->path_mtu > IB_MTU_2048 && !ipath_mtu4096))) goto inval; if (attr_mask & IB_QP_PATH_MIG_STATE) if (attr->path_mig_state != IB_MIG_MIGRATED && attr->path_mig_state != IB_MIG_REARM) goto inval; if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) if (attr->max_dest_rd_atomic > IPATH_MAX_RDMA_ATOMIC) goto inval; switch (new_state) { case IB_QPS_RESET: if (qp->state != IB_QPS_RESET) { qp->state = IB_QPS_RESET; spin_lock(&dev->pending_lock); if (!list_empty(&qp->timerwait)) list_del_init(&qp->timerwait); if (!list_empty(&qp->piowait)) list_del_init(&qp->piowait); spin_unlock(&dev->pending_lock); qp->s_flags &= ~IPATH_S_ANY_WAIT; spin_unlock_irq(&qp->s_lock); /* Stop the sending tasklet */ tasklet_kill(&qp->s_task); wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy)); spin_lock_irq(&qp->s_lock); } ipath_reset_qp(qp, ibqp->qp_type); break; case IB_QPS_SQD: qp->s_draining = qp->s_last != qp->s_cur; qp->state = new_state; break; case IB_QPS_SQE: if (qp->ibqp.qp_type == IB_QPT_RC) goto inval; qp->state = new_state; break; case IB_QPS_ERR: lastwqe = ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR); break; default: qp->state = new_state; break; } if (attr_mask & IB_QP_PKEY_INDEX) qp->s_pkey_index = attr->pkey_index; if (attr_mask & IB_QP_DEST_QPN) qp->remote_qpn = attr->dest_qp_num; if (attr_mask & IB_QP_SQ_PSN) { qp->s_psn = qp->s_next_psn = attr->sq_psn; qp->s_last_psn = qp->s_next_psn - 1; } if (attr_mask & IB_QP_RQ_PSN) qp->r_psn = attr->rq_psn; if (attr_mask & IB_QP_ACCESS_FLAGS) qp->qp_access_flags = attr->qp_access_flags; if (attr_mask & IB_QP_AV) { qp->remote_ah_attr = attr->ah_attr; qp->s_dmult = ipath_ib_rate_to_mult(attr->ah_attr.static_rate); } if (attr_mask & IB_QP_PATH_MTU) qp->path_mtu = attr->path_mtu; if (attr_mask & IB_QP_RETRY_CNT) qp->s_retry = qp->s_retry_cnt = attr->retry_cnt; if (attr_mask & IB_QP_RNR_RETRY) { qp->s_rnr_retry = attr->rnr_retry; if (qp->s_rnr_retry > 7) qp->s_rnr_retry = 7; qp->s_rnr_retry_cnt = qp->s_rnr_retry; } if (attr_mask & IB_QP_MIN_RNR_TIMER) qp->r_min_rnr_timer = attr->min_rnr_timer; if (attr_mask & IB_QP_TIMEOUT) qp->timeout = attr->timeout; if (attr_mask & IB_QP_QKEY) qp->qkey = attr->qkey; if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) qp->r_max_rd_atomic = attr->max_dest_rd_atomic; if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) qp->s_max_rd_atomic = attr->max_rd_atomic; spin_unlock_irq(&qp->s_lock); if (lastwqe) { struct ib_event ev; ev.device = qp->ibqp.device; ev.element.qp = &qp->ibqp; ev.event = IB_EVENT_QP_LAST_WQE_REACHED; qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); } ret = 0; goto bail; inval: spin_unlock_irq(&qp->s_lock); ret = -EINVAL; bail: return ret; } int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_qp_init_attr *init_attr) { struct ipath_qp *qp = to_iqp(ibqp); attr->qp_state = qp->state; attr->cur_qp_state = attr->qp_state; attr->path_mtu = qp->path_mtu; attr->path_mig_state = 0; attr->qkey = qp->qkey; attr->rq_psn = qp->r_psn; attr->sq_psn = qp->s_next_psn; attr->dest_qp_num = qp->remote_qpn; attr->qp_access_flags = qp->qp_access_flags; attr->cap.max_send_wr = qp->s_size - 1; attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1; attr->cap.max_send_sge = qp->s_max_sge; attr->cap.max_recv_sge = qp->r_rq.max_sge; attr->cap.max_inline_data = 0; attr->ah_attr = qp->remote_ah_attr; memset(&attr->alt_ah_attr, 0, sizeof(attr->alt_ah_attr)); attr->pkey_index = qp->s_pkey_index; attr->alt_pkey_index = 0; attr->en_sqd_async_notify = 0; attr->sq_draining = qp->s_draining; attr->max_rd_atomic = qp->s_max_rd_atomic; attr->max_dest_rd_atomic = qp->r_max_rd_atomic; attr->min_rnr_timer = qp->r_min_rnr_timer; attr->port_num = 1; attr->timeout = qp->timeout; attr->retry_cnt = qp->s_retry_cnt; attr->rnr_retry = qp->s_rnr_retry_cnt; attr->alt_port_num = 0; attr->alt_timeout = 0; init_attr->event_handler = qp->ibqp.event_handler; init_attr->qp_context = qp->ibqp.qp_context; init_attr->send_cq = qp->ibqp.send_cq; init_attr->recv_cq = qp->ibqp.recv_cq; init_attr->srq = qp->ibqp.srq; init_attr->cap = attr->cap; if (qp->s_flags & IPATH_S_SIGNAL_REQ_WR) init_attr->sq_sig_type = IB_SIGNAL_REQ_WR; else init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; init_attr->qp_type = qp->ibqp.qp_type; init_attr->port_num = 1; return 0; } /** * ipath_compute_aeth - compute the AETH (syndrome + MSN) * @qp: the queue pair to compute the AETH for * * Returns the AETH. */ __be32 ipath_compute_aeth(struct ipath_qp *qp) { u32 aeth = qp->r_msn & IPATH_MSN_MASK; if (qp->ibqp.srq) { /* * Shared receive queues don't generate credits. * Set the credit field to the invalid value. */ aeth |= IPATH_AETH_CREDIT_INVAL << IPATH_AETH_CREDIT_SHIFT; } else { u32 min, max, x; u32 credits; struct ipath_rwq *wq = qp->r_rq.wq; u32 head; u32 tail; /* sanity check pointers before trusting them */ head = wq->head; if (head >= qp->r_rq.size) head = 0; tail = wq->tail; if (tail >= qp->r_rq.size) tail = 0; /* * Compute the number of credits available (RWQEs). * XXX Not holding the r_rq.lock here so there is a small * chance that the pair of reads are not atomic. */ credits = head - tail; if ((int)credits < 0) credits += qp->r_rq.size; /* * Binary search the credit table to find the code to * use. */ min = 0; max = 31; for (;;) { x = (min + max) / 2; if (credit_table[x] == credits) break; if (credit_table[x] > credits) max = x; else if (min == x) break; else min = x; } aeth |= x << IPATH_AETH_CREDIT_SHIFT; } return cpu_to_be32(aeth); } /** * ipath_create_qp - create a queue pair for a device * @ibpd: the protection domain who's device we create the queue pair for * @init_attr: the attributes of the queue pair * @udata: unused by InfiniPath * * Returns the queue pair on success, otherwise returns an errno. * * Called by the ib_create_qp() core verbs function. */ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, struct ib_qp_init_attr *init_attr, struct ib_udata *udata) { struct ipath_qp *qp; int err; struct ipath_swqe *swq = NULL; struct ipath_ibdev *dev; size_t sz; size_t sg_list_sz; struct ib_qp *ret; if (init_attr->create_flags) { ret = ERR_PTR(-EINVAL); goto bail; } if (init_attr->cap.max_send_sge > ib_ipath_max_sges || init_attr->cap.max_send_wr > ib_ipath_max_qp_wrs) { ret = ERR_PTR(-EINVAL); goto bail; } /* Check receive queue parameters if no SRQ is specified. */ if (!init_attr->srq) { if (init_attr->cap.max_recv_sge > ib_ipath_max_sges || init_attr->cap.max_recv_wr > ib_ipath_max_qp_wrs) { ret = ERR_PTR(-EINVAL); goto bail; } if (init_attr->cap.max_send_sge + init_attr->cap.max_send_wr + init_attr->cap.max_recv_sge + init_attr->cap.max_recv_wr == 0) { ret = ERR_PTR(-EINVAL); goto bail; } } switch (init_attr->qp_type) { case IB_QPT_UC: case IB_QPT_RC: case IB_QPT_UD: case IB_QPT_SMI: case IB_QPT_GSI: sz = sizeof(struct ipath_sge) * init_attr->cap.max_send_sge + sizeof(struct ipath_swqe); swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz); if (swq == NULL) { ret = ERR_PTR(-ENOMEM); goto bail; } sz = sizeof(*qp); sg_list_sz = 0; if (init_attr->srq) { struct ipath_srq *srq = to_isrq(init_attr->srq); if (srq->rq.max_sge > 1) sg_list_sz = sizeof(*qp->r_sg_list) * (srq->rq.max_sge - 1); } else if (init_attr->cap.max_recv_sge > 1) sg_list_sz = sizeof(*qp->r_sg_list) * (init_attr->cap.max_recv_sge - 1); qp = kmalloc(sz + sg_list_sz, GFP_KERNEL); if (!qp) { ret = ERR_PTR(-ENOMEM); goto bail_swq; } if (sg_list_sz && (init_attr->qp_type == IB_QPT_UD || init_attr->qp_type == IB_QPT_SMI || init_attr->qp_type == IB_QPT_GSI)) { qp->r_ud_sg_list = kmalloc(sg_list_sz, GFP_KERNEL); if (!qp->r_ud_sg_list) { ret = ERR_PTR(-ENOMEM); goto bail_qp; } } else qp->r_ud_sg_list = NULL; if (init_attr->srq) { sz = 0; qp->r_rq.size = 0; qp->r_rq.max_sge = 0; qp->r_rq.wq = NULL; init_attr->cap.max_recv_wr = 0; init_attr->cap.max_recv_sge = 0; } else { qp->r_rq.size = init_attr->cap.max_recv_wr + 1; qp->r_rq.max_sge = init_attr->cap.max_recv_sge; sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) + sizeof(struct ipath_rwqe); qp->r_rq.wq = vmalloc_user(sizeof(struct ipath_rwq) + qp->r_rq.size * sz); if (!qp->r_rq.wq) { ret = ERR_PTR(-ENOMEM); goto bail_sg_list; } } /* * ib_create_qp() will initialize qp->ibqp * except for qp->ibqp.qp_num. */ spin_lock_init(&qp->s_lock); spin_lock_init(&qp->r_rq.lock); atomic_set(&qp->refcount, 0); init_waitqueue_head(&qp->wait); init_waitqueue_head(&qp->wait_dma); tasklet_init(&qp->s_task, ipath_do_send, (unsigned long)qp); INIT_LIST_HEAD(&qp->piowait); INIT_LIST_HEAD(&qp->timerwait); qp->state = IB_QPS_RESET; qp->s_wq = swq; qp->s_size = init_attr->cap.max_send_wr + 1; qp->s_max_sge = init_attr->cap.max_send_sge; if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR) qp->s_flags = IPATH_S_SIGNAL_REQ_WR; else qp->s_flags = 0; dev = to_idev(ibpd->device); err = ipath_alloc_qpn(&dev->qp_table, qp, init_attr->qp_type); if (err) { ret = ERR_PTR(err); vfree(qp->r_rq.wq); goto bail_sg_list; } qp->ip = NULL; qp->s_tx = NULL; ipath_reset_qp(qp, init_attr->qp_type); break; default: /* Don't support raw QPs */ ret = ERR_PTR(-ENOSYS); goto bail; } init_attr->cap.max_inline_data = 0; /* * Return the address of the RWQ as the offset to mmap. * See ipath_mmap() for details. */ if (udata && udata->outlen >= sizeof(__u64)) { if (!qp->r_rq.wq) { __u64 offset = 0; err = ib_copy_to_udata(udata, &offset, sizeof(offset)); if (err) { ret = ERR_PTR(err); goto bail_ip; } } else { u32 s = sizeof(struct ipath_rwq) + qp->r_rq.size * sz; qp->ip = ipath_create_mmap_info(dev, s, ibpd->uobject->context, qp->r_rq.wq); if (!qp->ip) { ret = ERR_PTR(-ENOMEM); goto bail_ip; } err = ib_copy_to_udata(udata, &(qp->ip->offset), sizeof(qp->ip->offset)); if (err) { ret = ERR_PTR(err); goto bail_ip; } } } spin_lock(&dev->n_qps_lock); if (dev->n_qps_allocated == ib_ipath_max_qps) { spin_unlock(&dev->n_qps_lock); ret = ERR_PTR(-ENOMEM); goto bail_ip; } dev->n_qps_allocated++; spin_unlock(&dev->n_qps_lock); if (qp->ip) { spin_lock_irq(&dev->pending_lock); list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps); spin_unlock_irq(&dev->pending_lock); } ret = &qp->ibqp; goto bail; bail_ip: if (qp->ip) kref_put(&qp->ip->ref, ipath_release_mmap_info); else vfree(qp->r_rq.wq); ipath_free_qp(&dev->qp_table, qp); free_qpn(&dev->qp_table, qp->ibqp.qp_num); bail_sg_list: kfree(qp->r_ud_sg_list); bail_qp: kfree(qp); bail_swq: vfree(swq); bail: return ret; } /** * ipath_destroy_qp - destroy a queue pair * @ibqp: the queue pair to destroy * * Returns 0 on success. * * Note that this can be called while the QP is actively sending or * receiving! */ int ipath_destroy_qp(struct ib_qp *ibqp) { struct ipath_qp *qp = to_iqp(ibqp); struct ipath_ibdev *dev = to_idev(ibqp->device); /* Make sure HW and driver activity is stopped. */ spin_lock_irq(&qp->s_lock); if (qp->state != IB_QPS_RESET) { qp->state = IB_QPS_RESET; spin_lock(&dev->pending_lock); if (!list_empty(&qp->timerwait)) list_del_init(&qp->timerwait); if (!list_empty(&qp->piowait)) list_del_init(&qp->piowait); spin_unlock(&dev->pending_lock); qp->s_flags &= ~IPATH_S_ANY_WAIT; spin_unlock_irq(&qp->s_lock); /* Stop the sending tasklet */ tasklet_kill(&qp->s_task); wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy)); } else spin_unlock_irq(&qp->s_lock); ipath_free_qp(&dev->qp_table, qp); if (qp->s_tx) { atomic_dec(&qp->refcount); if (qp->s_tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEBUF) kfree(qp->s_tx->txreq.map_addr); spin_lock_irq(&dev->pending_lock); list_add(&qp->s_tx->txreq.list, &dev->txreq_free); spin_unlock_irq(&dev->pending_lock); qp->s_tx = NULL; } wait_event(qp->wait, !atomic_read(&qp->refcount)); /* all user's cleaned up, mark it available */ free_qpn(&dev->qp_table, qp->ibqp.qp_num); spin_lock(&dev->n_qps_lock); dev->n_qps_allocated--; spin_unlock(&dev->n_qps_lock); if (qp->ip) kref_put(&qp->ip->ref, ipath_release_mmap_info); else vfree(qp->r_rq.wq); kfree(qp->r_ud_sg_list); vfree(qp->s_wq); kfree(qp); return 0; } /** * ipath_init_qp_table - initialize the QP table for a device * @idev: the device who's QP table we're initializing * @size: the size of the QP table * * Returns 0 on success, otherwise returns an errno. */ int ipath_init_qp_table(struct ipath_ibdev *idev, int size) { int i; int ret; idev->qp_table.last = 1; /* QPN 0 and 1 are special. */ idev->qp_table.max = size; idev->qp_table.nmaps = 1; idev->qp_table.table = kzalloc(size * sizeof(*idev->qp_table.table), GFP_KERNEL); if (idev->qp_table.table == NULL) { ret = -ENOMEM; goto bail; } for (i = 0; i < ARRAY_SIZE(idev->qp_table.map); i++) { atomic_set(&idev->qp_table.map[i].n_free, BITS_PER_PAGE); idev->qp_table.map[i].page = NULL; } ret = 0; bail: return ret; } /** * ipath_get_credit - flush the send work queue of a QP * @qp: the qp who's send work queue to flush * @aeth: the Acknowledge Extended Transport Header * * The QP s_lock should be held. */ void ipath_get_credit(struct ipath_qp *qp, u32 aeth) { u32 credit = (aeth >> IPATH_AETH_CREDIT_SHIFT) & IPATH_AETH_CREDIT_MASK; /* * If the credit is invalid, we can send * as many packets as we like. Otherwise, we have to * honor the credit field. */ if (credit == IPATH_AETH_CREDIT_INVAL) qp->s_lsn = (u32) -1; else if (qp->s_lsn != (u32) -1) { /* Compute new LSN (i.e., MSN + credit) */ credit = (aeth + credit_table[credit]) & IPATH_MSN_MASK; if (ipath_cmp24(credit, qp->s_lsn) > 0) qp->s_lsn = credit; } /* Restart sending if it was blocked due to lack of credits. */ if ((qp->s_flags & IPATH_S_WAIT_SSN_CREDIT) && qp->s_cur != qp->s_head && (qp->s_lsn == (u32) -1 || ipath_cmp24(get_swqe_ptr(qp, qp->s_cur)->ssn, qp->s_lsn + 1) <= 0)) ipath_schedule_send(qp); }
gpl-2.0
DevriesL/HeroQLTE_ImageBreaker
fs/jfs/jfs_debug.c
14238
2823
/* * Copyright (C) International Business Machines Corp., 2000-2004 * Portions Copyright (C) Christoph Hellwig, 2001-2002 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/fs.h> #include <linux/ctype.h> #include <linux/module.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <asm/uaccess.h> #include "jfs_incore.h" #include "jfs_filsys.h" #include "jfs_debug.h" #ifdef PROC_FS_JFS /* see jfs_debug.h */ static struct proc_dir_entry *base; #ifdef CONFIG_JFS_DEBUG static int jfs_loglevel_proc_show(struct seq_file *m, void *v) { seq_printf(m, "%d\n", jfsloglevel); return 0; } static int jfs_loglevel_proc_open(struct inode *inode, struct file *file) { return single_open(file, jfs_loglevel_proc_show, NULL); } static ssize_t jfs_loglevel_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { char c; if (get_user(c, buffer)) return -EFAULT; /* yes, I know this is an ASCIIism. --hch */ if (c < '0' || c > '9') return -EINVAL; jfsloglevel = c - '0'; return count; } static const struct file_operations jfs_loglevel_proc_fops = { .owner = THIS_MODULE, .open = jfs_loglevel_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = jfs_loglevel_proc_write, }; #endif static struct { const char *name; const struct file_operations *proc_fops; } Entries[] = { #ifdef CONFIG_JFS_STATISTICS { "lmstats", &jfs_lmstats_proc_fops, }, { "txstats", &jfs_txstats_proc_fops, }, { "xtstat", &jfs_xtstat_proc_fops, }, { "mpstat", &jfs_mpstat_proc_fops, }, #endif #ifdef CONFIG_JFS_DEBUG { "TxAnchor", &jfs_txanchor_proc_fops, }, { "loglevel", &jfs_loglevel_proc_fops } #endif }; #define NPROCENT ARRAY_SIZE(Entries) void jfs_proc_init(void) { int i; if (!(base = proc_mkdir("fs/jfs", NULL))) return; for (i = 0; i < NPROCENT; i++) proc_create(Entries[i].name, 0, base, Entries[i].proc_fops); } void jfs_proc_clean(void) { int i; if (base) { for (i = 0; i < NPROCENT; i++) remove_proc_entry(Entries[i].name, base); remove_proc_entry("fs/jfs", NULL); } } #endif /* PROC_FS_JFS */
gpl-2.0
perillamint/android_kernel_casio_gzone
fs/jfs/jfs_debug.c
14238
2823
/* * Copyright (C) International Business Machines Corp., 2000-2004 * Portions Copyright (C) Christoph Hellwig, 2001-2002 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/fs.h> #include <linux/ctype.h> #include <linux/module.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <asm/uaccess.h> #include "jfs_incore.h" #include "jfs_filsys.h" #include "jfs_debug.h" #ifdef PROC_FS_JFS /* see jfs_debug.h */ static struct proc_dir_entry *base; #ifdef CONFIG_JFS_DEBUG static int jfs_loglevel_proc_show(struct seq_file *m, void *v) { seq_printf(m, "%d\n", jfsloglevel); return 0; } static int jfs_loglevel_proc_open(struct inode *inode, struct file *file) { return single_open(file, jfs_loglevel_proc_show, NULL); } static ssize_t jfs_loglevel_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { char c; if (get_user(c, buffer)) return -EFAULT; /* yes, I know this is an ASCIIism. --hch */ if (c < '0' || c > '9') return -EINVAL; jfsloglevel = c - '0'; return count; } static const struct file_operations jfs_loglevel_proc_fops = { .owner = THIS_MODULE, .open = jfs_loglevel_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = jfs_loglevel_proc_write, }; #endif static struct { const char *name; const struct file_operations *proc_fops; } Entries[] = { #ifdef CONFIG_JFS_STATISTICS { "lmstats", &jfs_lmstats_proc_fops, }, { "txstats", &jfs_txstats_proc_fops, }, { "xtstat", &jfs_xtstat_proc_fops, }, { "mpstat", &jfs_mpstat_proc_fops, }, #endif #ifdef CONFIG_JFS_DEBUG { "TxAnchor", &jfs_txanchor_proc_fops, }, { "loglevel", &jfs_loglevel_proc_fops } #endif }; #define NPROCENT ARRAY_SIZE(Entries) void jfs_proc_init(void) { int i; if (!(base = proc_mkdir("fs/jfs", NULL))) return; for (i = 0; i < NPROCENT; i++) proc_create(Entries[i].name, 0, base, Entries[i].proc_fops); } void jfs_proc_clean(void) { int i; if (base) { for (i = 0; i < NPROCENT; i++) remove_proc_entry(Entries[i].name, base); remove_proc_entry("fs/jfs", NULL); } } #endif /* PROC_FS_JFS */
gpl-2.0
NamJa/surface3-kernel
drivers/iio/industrialio-trigger.c
159
14080
/* The industrial I/O core, trigger handling functions * * Copyright (c) 2008 Jonathan Cameron * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/idr.h> #include <linux/err.h> #include <linux/device.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/iio/iio.h> #include <linux/iio/trigger.h> #include "iio_core.h" #include "iio_core_trigger.h" #include <linux/iio/trigger_consumer.h> /* RFC - Question of approach * Make the common case (single sensor single trigger) * simple by starting trigger capture from when first sensors * is added. * * Complex simultaneous start requires use of 'hold' functionality * of the trigger. (not implemented) * * Any other suggestions? */ static DEFINE_IDA(iio_trigger_ida); /* Single list of all available triggers */ static LIST_HEAD(iio_trigger_list); static DEFINE_MUTEX(iio_trigger_list_lock); /** * iio_trigger_read_name() - retrieve useful identifying name **/ static ssize_t iio_trigger_read_name(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_trigger *trig = to_iio_trigger(dev); return sprintf(buf, "%s\n", trig->name); } static DEVICE_ATTR(name, S_IRUGO, iio_trigger_read_name, NULL); static struct attribute *iio_trig_dev_attrs[] = { &dev_attr_name.attr, NULL, }; static struct attribute_group iio_trig_attr_group = { .attrs = iio_trig_dev_attrs, }; static const struct attribute_group *iio_trig_attr_groups[] = { &iio_trig_attr_group, NULL }; int iio_trigger_register(struct iio_trigger *trig_info) { int ret; trig_info->id = ida_simple_get(&iio_trigger_ida, 0, 0, GFP_KERNEL); if (trig_info->id < 0) { ret = trig_info->id; goto error_ret; } /* Set the name used for the sysfs directory etc */ dev_set_name(&trig_info->dev, "trigger%ld", (unsigned long) trig_info->id); ret = device_add(&trig_info->dev); if (ret) goto error_unregister_id; /* Add to list of available triggers held by the IIO core */ mutex_lock(&iio_trigger_list_lock); list_add_tail(&trig_info->list, &iio_trigger_list); mutex_unlock(&iio_trigger_list_lock); return 0; error_unregister_id: ida_simple_remove(&iio_trigger_ida, trig_info->id); error_ret: return ret; } EXPORT_SYMBOL(iio_trigger_register); void iio_trigger_unregister(struct iio_trigger *trig_info) { mutex_lock(&iio_trigger_list_lock); list_del(&trig_info->list); mutex_unlock(&iio_trigger_list_lock); ida_simple_remove(&iio_trigger_ida, trig_info->id); /* Possible issue in here */ device_del(&trig_info->dev); } EXPORT_SYMBOL(iio_trigger_unregister); static struct iio_trigger *iio_trigger_find_by_name(const char *name, size_t len) { struct iio_trigger *trig = NULL, *iter; mutex_lock(&iio_trigger_list_lock); list_for_each_entry(iter, &iio_trigger_list, list) if (sysfs_streq(iter->name, name)) { trig = iter; break; } mutex_unlock(&iio_trigger_list_lock); return trig; } void iio_trigger_poll(struct iio_trigger *trig, s64 time) { int i; if (!atomic_read(&trig->use_count)) { atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER); for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) { if (trig->subirqs[i].enabled) generic_handle_irq(trig->subirq_base + i); else iio_trigger_notify_done(trig); } } } EXPORT_SYMBOL(iio_trigger_poll); irqreturn_t iio_trigger_generic_data_rdy_poll(int irq, void *private) { iio_trigger_poll(private, iio_get_time_ns()); return IRQ_HANDLED; } EXPORT_SYMBOL(iio_trigger_generic_data_rdy_poll); void iio_trigger_poll_chained(struct iio_trigger *trig, s64 time) { int i; if (!atomic_read(&trig->use_count)) { atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER); for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) { if (trig->subirqs[i].enabled) handle_nested_irq(trig->subirq_base + i); else iio_trigger_notify_done(trig); } } } EXPORT_SYMBOL(iio_trigger_poll_chained); void iio_trigger_notify_done(struct iio_trigger *trig) { if (atomic_dec_and_test(&trig->use_count) && trig->ops && trig->ops->try_reenable) if (trig->ops->try_reenable(trig)) /* Missed an interrupt so launch new poll now */ iio_trigger_poll(trig, 0); } EXPORT_SYMBOL(iio_trigger_notify_done); /* Trigger Consumer related functions */ static int iio_trigger_get_irq(struct iio_trigger *trig) { int ret; mutex_lock(&trig->pool_lock); ret = bitmap_find_free_region(trig->pool, CONFIG_IIO_CONSUMERS_PER_TRIGGER, ilog2(1)); mutex_unlock(&trig->pool_lock); if (ret >= 0) ret += trig->subirq_base; return ret; } static void iio_trigger_put_irq(struct iio_trigger *trig, int irq) { mutex_lock(&trig->pool_lock); clear_bit(irq - trig->subirq_base, trig->pool); mutex_unlock(&trig->pool_lock); } /* Complexity in here. With certain triggers (datardy) an acknowledgement * may be needed if the pollfuncs do not include the data read for the * triggering device. * This is not currently handled. Alternative of not enabling trigger unless * the relevant function is in there may be the best option. */ /* Worth protecting against double additions? */ static int iio_trigger_attach_poll_func(struct iio_trigger *trig, struct iio_poll_func *pf) { int ret = 0; bool notinuse = bitmap_empty(trig->pool, CONFIG_IIO_CONSUMERS_PER_TRIGGER); /* Prevent the module from being removed whilst attached to a trigger */ __module_get(pf->indio_dev->info->driver_module); pf->irq = iio_trigger_get_irq(trig); ret = request_threaded_irq(pf->irq, pf->h, pf->thread, pf->type, pf->name, pf); if (ret < 0) { module_put(pf->indio_dev->info->driver_module); return ret; } if (trig->ops && trig->ops->set_trigger_state && notinuse) { ret = trig->ops->set_trigger_state(trig, true); if (ret < 0) module_put(pf->indio_dev->info->driver_module); } return ret; } static int iio_trigger_detach_poll_func(struct iio_trigger *trig, struct iio_poll_func *pf) { int ret = 0; bool no_other_users = (bitmap_weight(trig->pool, CONFIG_IIO_CONSUMERS_PER_TRIGGER) == 1); if (trig->ops && trig->ops->set_trigger_state && no_other_users) { ret = trig->ops->set_trigger_state(trig, false); if (ret) goto error_ret; } iio_trigger_put_irq(trig, pf->irq); free_irq(pf->irq, pf); module_put(pf->indio_dev->info->driver_module); error_ret: return ret; } irqreturn_t iio_pollfunc_store_time(int irq, void *p) { struct iio_poll_func *pf = p; pf->timestamp = iio_get_time_ns(); return IRQ_WAKE_THREAD; } EXPORT_SYMBOL(iio_pollfunc_store_time); struct iio_poll_func *iio_alloc_pollfunc(irqreturn_t (*h)(int irq, void *p), irqreturn_t (*thread)(int irq, void *p), int type, struct iio_dev *indio_dev, const char *fmt, ...) { va_list vargs; struct iio_poll_func *pf; pf = kmalloc(sizeof *pf, GFP_KERNEL); if (pf == NULL) return NULL; va_start(vargs, fmt); pf->name = kvasprintf(GFP_KERNEL, fmt, vargs); va_end(vargs); if (pf->name == NULL) { kfree(pf); return NULL; } pf->h = h; pf->thread = thread; pf->type = type; pf->indio_dev = indio_dev; return pf; } EXPORT_SYMBOL_GPL(iio_alloc_pollfunc); void iio_dealloc_pollfunc(struct iio_poll_func *pf) { kfree(pf->name); kfree(pf); } EXPORT_SYMBOL_GPL(iio_dealloc_pollfunc); /** * iio_trigger_read_current() - trigger consumer sysfs query current trigger * * For trigger consumers the current_trigger interface allows the trigger * used by the device to be queried. **/ static ssize_t iio_trigger_read_current(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); if (indio_dev->trig) return sprintf(buf, "%s\n", indio_dev->trig->name); return 0; } /** * iio_trigger_write_current() - trigger consumer sysfs set current trigger * * For trigger consumers the current_trigger interface allows the trigger * used for this device to be specified at run time based on the triggers * name. **/ static ssize_t iio_trigger_write_current(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct iio_trigger *oldtrig = indio_dev->trig; struct iio_trigger *trig; int ret; mutex_lock(&indio_dev->mlock); if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) { mutex_unlock(&indio_dev->mlock); return -EBUSY; } mutex_unlock(&indio_dev->mlock); trig = iio_trigger_find_by_name(buf, len); if (oldtrig == trig) return len; if (trig && indio_dev->info->validate_trigger) { ret = indio_dev->info->validate_trigger(indio_dev, trig); if (ret) return ret; } if (trig && trig->ops && trig->ops->validate_device) { ret = trig->ops->validate_device(trig, indio_dev); if (ret) return ret; } indio_dev->trig = trig; if (oldtrig && indio_dev->trig != oldtrig) iio_trigger_put(oldtrig); if (indio_dev->trig) iio_trigger_get(indio_dev->trig); return len; } static DEVICE_ATTR(current_trigger, S_IRUGO | S_IWUSR, iio_trigger_read_current, iio_trigger_write_current); static struct attribute *iio_trigger_consumer_attrs[] = { &dev_attr_current_trigger.attr, NULL, }; static const struct attribute_group iio_trigger_consumer_attr_group = { .name = "trigger", .attrs = iio_trigger_consumer_attrs, }; static void iio_trig_release(struct device *device) { struct iio_trigger *trig = to_iio_trigger(device); int i; if (trig->subirq_base) { for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) { irq_modify_status(trig->subirq_base + i, IRQ_NOAUTOEN, IRQ_NOREQUEST | IRQ_NOPROBE); irq_set_chip(trig->subirq_base + i, NULL); irq_set_handler(trig->subirq_base + i, NULL); } irq_free_descs(trig->subirq_base, CONFIG_IIO_CONSUMERS_PER_TRIGGER); } kfree(trig->name); kfree(trig); } static struct device_type iio_trig_type = { .release = iio_trig_release, .groups = iio_trig_attr_groups, }; static void iio_trig_subirqmask(struct irq_data *d) { struct irq_chip *chip = irq_data_get_irq_chip(d); struct iio_trigger *trig = container_of(chip, struct iio_trigger, subirq_chip); trig->subirqs[d->irq - trig->subirq_base].enabled = false; } static void iio_trig_subirqunmask(struct irq_data *d) { struct irq_chip *chip = irq_data_get_irq_chip(d); struct iio_trigger *trig = container_of(chip, struct iio_trigger, subirq_chip); trig->subirqs[d->irq - trig->subirq_base].enabled = true; } static struct iio_trigger *viio_trigger_alloc(const char *fmt, va_list vargs) { struct iio_trigger *trig; trig = kzalloc(sizeof *trig, GFP_KERNEL); if (trig) { int i; trig->dev.type = &iio_trig_type; trig->dev.bus = &iio_bus_type; device_initialize(&trig->dev); mutex_init(&trig->pool_lock); trig->subirq_base = irq_alloc_descs(-1, 0, CONFIG_IIO_CONSUMERS_PER_TRIGGER, 0); if (trig->subirq_base < 0) { kfree(trig); return NULL; } trig->name = kvasprintf(GFP_KERNEL, fmt, vargs); if (trig->name == NULL) { irq_free_descs(trig->subirq_base, CONFIG_IIO_CONSUMERS_PER_TRIGGER); kfree(trig); return NULL; } trig->subirq_chip.name = trig->name; trig->subirq_chip.irq_mask = &iio_trig_subirqmask; trig->subirq_chip.irq_unmask = &iio_trig_subirqunmask; for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) { irq_set_chip(trig->subirq_base + i, &trig->subirq_chip); irq_set_handler(trig->subirq_base + i, &handle_simple_irq); irq_modify_status(trig->subirq_base + i, IRQ_NOREQUEST | IRQ_NOAUTOEN, IRQ_NOPROBE); } get_device(&trig->dev); } return trig; } struct iio_trigger *iio_trigger_alloc(const char *fmt, ...) { struct iio_trigger *trig; va_list vargs; va_start(vargs, fmt); trig = viio_trigger_alloc(fmt, vargs); va_end(vargs); return trig; } EXPORT_SYMBOL(iio_trigger_alloc); void iio_trigger_free(struct iio_trigger *trig) { if (trig) put_device(&trig->dev); } EXPORT_SYMBOL(iio_trigger_free); static void devm_iio_trigger_release(struct device *dev, void *res) { iio_trigger_free(*(struct iio_trigger **)res); } static int devm_iio_trigger_match(struct device *dev, void *res, void *data) { struct iio_trigger **r = res; if (!r || !*r) { WARN_ON(!r || !*r); return 0; } return *r == data; } struct iio_trigger *devm_iio_trigger_alloc(struct device *dev, const char *fmt, ...) { struct iio_trigger **ptr, *trig; va_list vargs; ptr = devres_alloc(devm_iio_trigger_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) return NULL; /* use raw alloc_dr for kmalloc caller tracing */ va_start(vargs, fmt); trig = viio_trigger_alloc(fmt, vargs); va_end(vargs); if (trig) { *ptr = trig; devres_add(dev, ptr); } else { devres_free(ptr); } return trig; } EXPORT_SYMBOL_GPL(devm_iio_trigger_alloc); void devm_iio_trigger_free(struct device *dev, struct iio_trigger *iio_trig) { int rc; rc = devres_release(dev, devm_iio_trigger_release, devm_iio_trigger_match, iio_trig); WARN_ON(rc); } EXPORT_SYMBOL_GPL(devm_iio_trigger_free); void iio_device_register_trigger_consumer(struct iio_dev *indio_dev) { indio_dev->groups[indio_dev->groupcounter++] = &iio_trigger_consumer_attr_group; } void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev) { /* Clean up an associated but not attached trigger reference */ if (indio_dev->trig) iio_trigger_put(indio_dev->trig); } int iio_triggered_buffer_postenable(struct iio_dev *indio_dev) { return iio_trigger_attach_poll_func(indio_dev->trig, indio_dev->pollfunc); } EXPORT_SYMBOL(iio_triggered_buffer_postenable); int iio_triggered_buffer_predisable(struct iio_dev *indio_dev) { return iio_trigger_detach_poll_func(indio_dev->trig, indio_dev->pollfunc); } EXPORT_SYMBOL(iio_triggered_buffer_predisable);
gpl-2.0
animalcreek/linux
drivers/pinctrl/sh-pfc/pfc-r8a7792.c
159
80070
/* * r8a7792 processor support - PFC hardware block. * * Copyright (C) 2013-2014 Renesas Electronics Corporation * Copyright (C) 2016 Cogent Embedded, Inc., <source@cogentembedded.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. */ #include <linux/kernel.h> #include "core.h" #include "sh_pfc.h" #define CPU_ALL_PORT(fn, sfx) \ PORT_GP_29(0, fn, sfx), \ PORT_GP_23(1, fn, sfx), \ PORT_GP_32(2, fn, sfx), \ PORT_GP_28(3, fn, sfx), \ PORT_GP_17(4, fn, sfx), \ PORT_GP_17(5, fn, sfx), \ PORT_GP_17(6, fn, sfx), \ PORT_GP_17(7, fn, sfx), \ PORT_GP_17(8, fn, sfx), \ PORT_GP_17(9, fn, sfx), \ PORT_GP_32(10, fn, sfx), \ PORT_GP_30(11, fn, sfx) enum { PINMUX_RESERVED = 0, PINMUX_DATA_BEGIN, GP_ALL(DATA), PINMUX_DATA_END, PINMUX_FUNCTION_BEGIN, GP_ALL(FN), /* GPSR0 */ FN_IP0_0, FN_IP0_1, FN_IP0_2, FN_IP0_3, FN_IP0_4, FN_IP0_5, FN_IP0_6, FN_IP0_7, FN_IP0_8, FN_IP0_9, FN_IP0_10, FN_IP0_11, FN_IP0_12, FN_IP0_13, FN_IP0_14, FN_IP0_15, FN_IP0_16, FN_IP0_17, FN_IP0_18, FN_IP0_19, FN_IP0_20, FN_IP0_21, FN_IP0_22, FN_IP0_23, FN_IP1_0, FN_IP1_1, FN_IP1_2, FN_IP1_3, FN_IP1_4, /* GPSR1 */ FN_IP1_5, FN_IP1_6, FN_IP1_7, FN_IP1_8, FN_IP1_9, FN_IP1_10, FN_IP1_11, FN_IP1_12, FN_IP1_13, FN_IP1_14, FN_IP1_15, FN_IP1_16, FN_DU1_DB2_C0_DATA12, FN_DU1_DB3_C1_DATA13, FN_DU1_DB4_C2_DATA14, FN_DU1_DB5_C3_DATA15, FN_DU1_DB6_C4, FN_DU1_DB7_C5, FN_DU1_EXHSYNC_DU1_HSYNC, FN_DU1_EXVSYNC_DU1_VSYNC, FN_DU1_EXODDF_DU1_ODDF_DISP_CDE, FN_DU1_DISP, FN_DU1_CDE, /* GPSR2 */ FN_D0, FN_D1, FN_D2, FN_D3, FN_D4, FN_D5, FN_D6, FN_D7, FN_D8, FN_D9, FN_D10, FN_D11, FN_D12, FN_D13, FN_D14, FN_D15, FN_A0, FN_A1, FN_A2, FN_A3, FN_A4, FN_A5, FN_A6, FN_A7, FN_A8, FN_A9, FN_A10, FN_A11, FN_A12, FN_A13, FN_A14, FN_A15, /* GPSR3 */ FN_A16, FN_A17, FN_A18, FN_A19, FN_IP1_17, FN_IP1_18, FN_CS1_N_A26, FN_EX_CS0_N, FN_EX_CS1_N, FN_EX_CS2_N, FN_EX_CS3_N, FN_EX_CS4_N, FN_EX_CS5_N, FN_BS_N, FN_RD_N, FN_RD_WR_N, FN_WE0_N, FN_WE1_N, FN_EX_WAIT0, FN_IRQ0, FN_IRQ1, FN_IRQ2, FN_IRQ3, FN_IP1_19, FN_IP1_20, FN_IP1_21, FN_IP1_22, FN_CS0_N, /* GPSR4 */ FN_VI0_CLK, FN_VI0_CLKENB, FN_VI0_HSYNC_N, FN_VI0_VSYNC_N, FN_VI0_D0_B0_C0, FN_VI0_D1_B1_C1, FN_VI0_D2_B2_C2, FN_VI0_D3_B3_C3, FN_VI0_D4_B4_C4, FN_VI0_D5_B5_C5, FN_VI0_D6_B6_C6, FN_VI0_D7_B7_C7, FN_VI0_D8_G0_Y0, FN_VI0_D9_G1_Y1, FN_VI0_D10_G2_Y2, FN_VI0_D11_G3_Y3, FN_VI0_FIELD, /* GPSR5 */ FN_VI1_CLK, FN_VI1_CLKENB, FN_VI1_HSYNC_N, FN_VI1_VSYNC_N, FN_VI1_D0_B0_C0, FN_VI1_D1_B1_C1, FN_VI1_D2_B2_C2, FN_VI1_D3_B3_C3, FN_VI1_D4_B4_C4, FN_VI1_D5_B5_C5, FN_VI1_D6_B6_C6, FN_VI1_D7_B7_C7, FN_VI1_D8_G0_Y0, FN_VI1_D9_G1_Y1, FN_VI1_D10_G2_Y2, FN_VI1_D11_G3_Y3, FN_VI1_FIELD, /* GPSR6 */ FN_IP2_0, FN_IP2_1, FN_IP2_2, FN_IP2_3, FN_IP2_4, FN_IP2_5, FN_IP2_6, FN_IP2_7, FN_IP2_8, FN_IP2_9, FN_IP2_10, FN_IP2_11, FN_IP2_12, FN_IP2_13, FN_IP2_14, FN_IP2_15, FN_IP2_16, /* GPSR7 */ FN_IP3_0, FN_IP3_1, FN_IP3_2, FN_IP3_3, FN_IP3_4, FN_IP3_5, FN_IP3_6, FN_IP3_7, FN_IP3_8, FN_IP3_9, FN_IP3_10, FN_IP3_11, FN_IP3_12, FN_IP3_13, FN_VI3_D10_Y2, FN_IP3_14, FN_VI3_FIELD, /* GPSR8 */ FN_VI4_CLK, FN_IP4_0, FN_IP4_1, FN_IP4_3_2, FN_IP4_4, FN_IP4_6_5, FN_IP4_8_7, FN_IP4_10_9, FN_IP4_12_11, FN_IP4_14_13, FN_IP4_16_15, FN_IP4_18_17, FN_IP4_20_19, FN_IP4_21, FN_IP4_22, FN_IP4_23, FN_IP4_24, /* GPSR9 */ FN_VI5_CLK, FN_IP5_0, FN_IP5_1, FN_IP5_2, FN_IP5_3, FN_IP5_4, FN_IP5_5, FN_IP5_6, FN_IP5_7, FN_IP5_8, FN_IP5_9, FN_IP5_10, FN_IP5_11, FN_VI5_D9_Y1, FN_VI5_D10_Y2, FN_VI5_D11_Y3, FN_VI5_FIELD, /* GPSR10 */ FN_IP6_0, FN_IP6_1, FN_HRTS0_N, FN_IP6_2, FN_IP6_3, FN_IP6_4, FN_IP6_5, FN_HCTS1_N, FN_IP6_6, FN_IP6_7, FN_SCK0, FN_CTS0_N, FN_RTS0_N, FN_TX0, FN_RX0, FN_SCK1, FN_CTS1_N, FN_RTS1_N, FN_TX1, FN_RX1, FN_IP6_9_8, FN_IP6_11_10, FN_IP6_13_12, FN_IP6_15_14, FN_IP6_16, FN_IP6_18_17, FN_SCIF_CLK, FN_CAN0_TX, FN_CAN0_RX, FN_CAN_CLK, FN_CAN1_TX, FN_CAN1_RX, /* GPSR11 */ FN_IP7_1_0, FN_IP7_3_2, FN_IP7_5_4, FN_IP7_6, FN_IP7_7, FN_SD0_CLK, FN_SD0_CMD, FN_SD0_DAT0, FN_SD0_DAT1, FN_SD0_DAT2, FN_SD0_DAT3, FN_SD0_CD, FN_SD0_WP, FN_IP7_9_8, FN_IP7_11_10, FN_IP7_13_12, FN_IP7_15_14, FN_IP7_16, FN_IP7_17, FN_IP7_18, FN_IP7_19, FN_IP7_20, FN_ADICLK, FN_ADICS_SAMP, FN_ADIDATA, FN_ADICHS0, FN_ADICHS1, FN_ADICHS2, FN_AVS1, FN_AVS2, /* IPSR0 */ FN_DU0_DR0_DATA0, FN_DU0_DR1_DATA1, FN_DU0_DR2_Y4_DATA2, FN_DU0_DR3_Y5_DATA3, FN_DU0_DR4_Y6_DATA4, FN_DU0_DR5_Y7_DATA5, FN_DU0_DR6_Y8_DATA6, FN_DU0_DR7_Y9_DATA7, FN_DU0_DG0_DATA8, FN_DU0_DG1_DATA9, FN_DU0_DG2_C6_DATA10, FN_DU0_DG3_C7_DATA11, FN_DU0_DG4_Y0_DATA12, FN_DU0_DG5_Y1_DATA13, FN_DU0_DG6_Y2_DATA14, FN_DU0_DG7_Y3_DATA15, FN_DU0_DB0, FN_DU0_DB1, FN_DU0_DB2_C0, FN_DU0_DB3_C1, FN_DU0_DB4_C2, FN_DU0_DB5_C3, FN_DU0_DB6_C4, FN_DU0_DB7_C5, /* IPSR1 */ FN_DU0_EXHSYNC_DU0_HSYNC, FN_DU0_EXVSYNC_DU0_VSYNC, FN_DU0_EXODDF_DU0_ODDF_DISP_CDE, FN_DU0_DISP, FN_DU0_CDE, FN_DU1_DR2_Y4_DATA0, FN_DU1_DR3_Y5_DATA1, FN_DU1_DR4_Y6_DATA2, FN_DU1_DR5_Y7_DATA3, FN_DU1_DR6_DATA4, FN_DU1_DR7_DATA5, FN_DU1_DG2_C6_DATA6, FN_DU1_DG3_C7_DATA7, FN_DU1_DG4_Y0_DATA8, FN_DU1_DG5_Y1_DATA9, FN_DU1_DG6_Y2_DATA10, FN_DU1_DG7_Y3_DATA11, FN_A20, FN_MOSI_IO0, FN_A21, FN_MISO_IO1, FN_A22, FN_IO2, FN_A23, FN_IO3, FN_A24, FN_SPCLK, FN_A25, FN_SSL, /* IPSR2 */ FN_VI2_CLK, FN_AVB_RX_CLK, FN_VI2_CLKENB, FN_AVB_RX_DV, FN_VI2_HSYNC_N, FN_AVB_RXD0, FN_VI2_VSYNC_N, FN_AVB_RXD1, FN_VI2_D0_C0, FN_AVB_RXD2, FN_VI2_D1_C1, FN_AVB_RXD3, FN_VI2_D2_C2, FN_AVB_RXD4, FN_VI2_D3_C3, FN_AVB_RXD5, FN_VI2_D4_C4, FN_AVB_RXD6, FN_VI2_D5_C5, FN_AVB_RXD7, FN_VI2_D6_C6, FN_AVB_RX_ER, FN_VI2_D7_C7, FN_AVB_COL, FN_VI2_D8_Y0, FN_AVB_TXD3, FN_VI2_D9_Y1, FN_AVB_TX_EN, FN_VI2_D10_Y2, FN_AVB_TXD0, FN_VI2_D11_Y3, FN_AVB_TXD1, FN_VI2_FIELD, FN_AVB_TXD2, /* IPSR3 */ FN_VI3_CLK, FN_AVB_TX_CLK, FN_VI3_CLKENB, FN_AVB_TXD4, FN_VI3_HSYNC_N, FN_AVB_TXD5, FN_VI3_VSYNC_N, FN_AVB_TXD6, FN_VI3_D0_C0, FN_AVB_TXD7, FN_VI3_D1_C1, FN_AVB_TX_ER, FN_VI3_D2_C2, FN_AVB_GTX_CLK, FN_VI3_D3_C3, FN_AVB_MDC, FN_VI3_D4_C4, FN_AVB_MDIO, FN_VI3_D5_C5, FN_AVB_LINK, FN_VI3_D6_C6, FN_AVB_MAGIC, FN_VI3_D7_C7, FN_AVB_PHY_INT, FN_VI3_D8_Y0, FN_AVB_CRS, FN_VI3_D9_Y1, FN_AVB_GTXREFCLK, FN_VI3_D11_Y3, FN_AVB_AVTP_MATCH, /* IPSR4 */ FN_VI4_CLKENB, FN_VI0_D12_G4_Y4, FN_VI4_HSYNC_N, FN_VI0_D13_G5_Y5, FN_VI4_VSYNC_N, FN_VI0_D14_G6_Y6, FN_RDR_CLKOUT, FN_VI4_D0_C0, FN_VI0_D15_G7_Y7, FN_VI4_D1_C1, FN_VI0_D16_R0, FN_VI1_D12_G4_Y4, FN_VI4_D2_C2, FN_VI0_D17_R1, FN_VI1_D13_G5_Y5, FN_VI4_D3_C3, FN_VI0_D18_R2, FN_VI1_D14_G6_Y6, FN_VI4_D4_C4, FN_VI0_D19_R3, FN_VI1_D15_G7_Y7, FN_VI4_D5_C5, FN_VI0_D20_R4, FN_VI2_D12_Y4, FN_VI4_D6_C6, FN_VI0_D21_R5, FN_VI2_D13_Y5, FN_VI4_D7_C7, FN_VI0_D22_R6, FN_VI2_D14_Y6, FN_VI4_D8_Y0, FN_VI0_D23_R7, FN_VI2_D15_Y7, FN_VI4_D9_Y1, FN_VI3_D12_Y4, FN_VI4_D10_Y2, FN_VI3_D13_Y5, FN_VI4_D11_Y3, FN_VI3_D14_Y6, FN_VI4_FIELD, FN_VI3_D15_Y7, /* IPSR5 */ FN_VI5_CLKENB, FN_VI1_D12_G4_Y4_B, FN_VI5_HSYNC_N, FN_VI1_D13_G5_Y5_B, FN_VI5_VSYNC_N, FN_VI1_D14_G6_Y6_B, FN_VI5_D0_C0, FN_VI1_D15_G7_Y7_B, FN_VI5_D1_C1, FN_VI1_D16_R0, FN_VI5_D2_C2, FN_VI1_D17_R1, FN_VI5_D3_C3, FN_VI1_D18_R2, FN_VI5_D4_C4, FN_VI1_D19_R3, FN_VI5_D5_C5, FN_VI1_D20_R4, FN_VI5_D6_C6, FN_VI1_D21_R5, FN_VI5_D7_C7, FN_VI1_D22_R6, FN_VI5_D8_Y0, FN_VI1_D23_R7, /* IPSR6 */ FN_MSIOF0_SCK, FN_HSCK0, FN_MSIOF0_SYNC, FN_HCTS0_N, FN_MSIOF0_TXD, FN_HTX0, FN_MSIOF0_RXD, FN_HRX0, FN_MSIOF1_SCK, FN_HSCK1, FN_MSIOF1_SYNC, FN_HRTS1_N, FN_MSIOF1_TXD, FN_HTX1, FN_MSIOF1_RXD, FN_HRX1, FN_DRACK0, FN_SCK2, FN_DACK0, FN_TX2, FN_DREQ0_N, FN_RX2, FN_DACK1, FN_SCK3, FN_TX3, FN_DREQ1_N, FN_RX3, /* IPSR7 */ FN_PWM0, FN_TCLK1, FN_FSO_CFE_0, FN_PWM1, FN_TCLK2, FN_FSO_CFE_1, FN_PWM2, FN_TCLK3, FN_FSO_TOE, FN_PWM3, FN_PWM4, FN_SSI_SCK34, FN_TPU0TO0, FN_SSI_WS34, FN_TPU0TO1, FN_SSI_SDATA3, FN_TPU0TO2, FN_SSI_SCK4, FN_TPU0TO3, FN_SSI_WS4, FN_SSI_SDATA4, FN_AUDIO_CLKOUT, FN_AUDIO_CLKA, FN_AUDIO_CLKB, /* MOD_SEL */ FN_SEL_VI1_0, FN_SEL_VI1_1, PINMUX_FUNCTION_END, PINMUX_MARK_BEGIN, DU1_DB2_C0_DATA12_MARK, DU1_DB3_C1_DATA13_MARK, DU1_DB4_C2_DATA14_MARK, DU1_DB5_C3_DATA15_MARK, DU1_DB6_C4_MARK, DU1_DB7_C5_MARK, DU1_EXHSYNC_DU1_HSYNC_MARK, DU1_EXVSYNC_DU1_VSYNC_MARK, DU1_EXODDF_DU1_ODDF_DISP_CDE_MARK, DU1_DISP_MARK, DU1_CDE_MARK, D0_MARK, D1_MARK, D2_MARK, D3_MARK, D4_MARK, D5_MARK, D6_MARK, D7_MARK, D8_MARK, D9_MARK, D10_MARK, D11_MARK, D12_MARK, D13_MARK, D14_MARK, D15_MARK, A0_MARK, A1_MARK, A2_MARK, A3_MARK, A4_MARK, A5_MARK, A6_MARK, A7_MARK, A8_MARK, A9_MARK, A10_MARK, A11_MARK, A12_MARK, A13_MARK, A14_MARK, A15_MARK, A16_MARK, A17_MARK, A18_MARK, A19_MARK, CS1_N_A26_MARK, EX_CS0_N_MARK, EX_CS1_N_MARK, EX_CS2_N_MARK, EX_CS3_N_MARK, EX_CS4_N_MARK, EX_CS5_N_MARK, BS_N_MARK, RD_N_MARK, RD_WR_N_MARK, WE0_N_MARK, WE1_N_MARK, EX_WAIT0_MARK, IRQ0_MARK, IRQ1_MARK, IRQ2_MARK, IRQ3_MARK, CS0_N_MARK, VI0_CLK_MARK, VI0_CLKENB_MARK, VI0_HSYNC_N_MARK, VI0_VSYNC_N_MARK, VI0_D0_B0_C0_MARK, VI0_D1_B1_C1_MARK, VI0_D2_B2_C2_MARK, VI0_D3_B3_C3_MARK, VI0_D4_B4_C4_MARK, VI0_D5_B5_C5_MARK, VI0_D6_B6_C6_MARK, VI0_D7_B7_C7_MARK, VI0_D8_G0_Y0_MARK, VI0_D9_G1_Y1_MARK, VI0_D10_G2_Y2_MARK, VI0_D11_G3_Y3_MARK, VI0_FIELD_MARK, VI1_CLK_MARK, VI1_CLKENB_MARK, VI1_HSYNC_N_MARK, VI1_VSYNC_N_MARK, VI1_D0_B0_C0_MARK, VI1_D1_B1_C1_MARK, VI1_D2_B2_C2_MARK, VI1_D3_B3_C3_MARK, VI1_D4_B4_C4_MARK, VI1_D5_B5_C5_MARK, VI1_D6_B6_C6_MARK, VI1_D7_B7_C7_MARK, VI1_D8_G0_Y0_MARK, VI1_D9_G1_Y1_MARK, VI1_D10_G2_Y2_MARK, VI1_D11_G3_Y3_MARK, VI1_FIELD_MARK, VI3_D10_Y2_MARK, VI3_FIELD_MARK, VI4_CLK_MARK, VI5_CLK_MARK, VI5_D9_Y1_MARK, VI5_D10_Y2_MARK, VI5_D11_Y3_MARK, VI5_FIELD_MARK, HRTS0_N_MARK, HCTS1_N_MARK, SCK0_MARK, CTS0_N_MARK, RTS0_N_MARK, TX0_MARK, RX0_MARK, SCK1_MARK, CTS1_N_MARK, RTS1_N_MARK, TX1_MARK, RX1_MARK, SCIF_CLK_MARK, CAN0_TX_MARK, CAN0_RX_MARK, CAN_CLK_MARK, CAN1_TX_MARK, CAN1_RX_MARK, SD0_CLK_MARK, SD0_CMD_MARK, SD0_DAT0_MARK, SD0_DAT1_MARK, SD0_DAT2_MARK, SD0_DAT3_MARK, SD0_CD_MARK, SD0_WP_MARK, ADICLK_MARK, ADICS_SAMP_MARK, ADIDATA_MARK, ADICHS0_MARK, ADICHS1_MARK, ADICHS2_MARK, AVS1_MARK, AVS2_MARK, /* IPSR0 */ DU0_DR0_DATA0_MARK, DU0_DR1_DATA1_MARK, DU0_DR2_Y4_DATA2_MARK, DU0_DR3_Y5_DATA3_MARK, DU0_DR4_Y6_DATA4_MARK, DU0_DR5_Y7_DATA5_MARK, DU0_DR6_Y8_DATA6_MARK, DU0_DR7_Y9_DATA7_MARK, DU0_DG0_DATA8_MARK, DU0_DG1_DATA9_MARK, DU0_DG2_C6_DATA10_MARK, DU0_DG3_C7_DATA11_MARK, DU0_DG4_Y0_DATA12_MARK, DU0_DG5_Y1_DATA13_MARK, DU0_DG6_Y2_DATA14_MARK, DU0_DG7_Y3_DATA15_MARK, DU0_DB0_MARK, DU0_DB1_MARK, DU0_DB2_C0_MARK, DU0_DB3_C1_MARK, DU0_DB4_C2_MARK, DU0_DB5_C3_MARK, DU0_DB6_C4_MARK, DU0_DB7_C5_MARK, /* IPSR1 */ DU0_EXHSYNC_DU0_HSYNC_MARK, DU0_EXVSYNC_DU0_VSYNC_MARK, DU0_EXODDF_DU0_ODDF_DISP_CDE_MARK, DU0_DISP_MARK, DU0_CDE_MARK, DU1_DR2_Y4_DATA0_MARK, DU1_DR3_Y5_DATA1_MARK, DU1_DR4_Y6_DATA2_MARK, DU1_DR5_Y7_DATA3_MARK, DU1_DR6_DATA4_MARK, DU1_DR7_DATA5_MARK, DU1_DG2_C6_DATA6_MARK, DU1_DG3_C7_DATA7_MARK, DU1_DG4_Y0_DATA8_MARK, DU1_DG5_Y1_DATA9_MARK, DU1_DG6_Y2_DATA10_MARK, DU1_DG7_Y3_DATA11_MARK, A20_MARK, MOSI_IO0_MARK, A21_MARK, MISO_IO1_MARK, A22_MARK, IO2_MARK, A23_MARK, IO3_MARK, A24_MARK, SPCLK_MARK, A25_MARK, SSL_MARK, /* IPSR2 */ VI2_CLK_MARK, AVB_RX_CLK_MARK, VI2_CLKENB_MARK, AVB_RX_DV_MARK, VI2_HSYNC_N_MARK, AVB_RXD0_MARK, VI2_VSYNC_N_MARK, AVB_RXD1_MARK, VI2_D0_C0_MARK, AVB_RXD2_MARK, VI2_D1_C1_MARK, AVB_TX_CLK_MARK, VI2_D2_C2_MARK, AVB_RXD4_MARK, VI2_D3_C3_MARK, AVB_RXD5_MARK, VI2_D4_C4_MARK, AVB_RXD6_MARK, VI2_D5_C5_MARK, AVB_RXD7_MARK, VI2_D6_C6_MARK, AVB_RX_ER_MARK, VI2_D7_C7_MARK, AVB_COL_MARK, VI2_D8_Y0_MARK, AVB_RXD3_MARK, VI2_D9_Y1_MARK, AVB_TX_EN_MARK, VI2_D10_Y2_MARK, AVB_TXD0_MARK, VI2_D11_Y3_MARK, AVB_TXD1_MARK, VI2_FIELD_MARK, AVB_TXD2_MARK, /* IPSR3 */ VI3_CLK_MARK, AVB_TXD3_MARK, VI3_CLKENB_MARK, AVB_TXD4_MARK, VI3_HSYNC_N_MARK, AVB_TXD5_MARK, VI3_VSYNC_N_MARK, AVB_TXD6_MARK, VI3_D0_C0_MARK, AVB_TXD7_MARK, VI3_D1_C1_MARK, AVB_TX_ER_MARK, VI3_D2_C2_MARK, AVB_GTX_CLK_MARK, VI3_D3_C3_MARK, AVB_MDC_MARK, VI3_D4_C4_MARK, AVB_MDIO_MARK, VI3_D5_C5_MARK, AVB_LINK_MARK, VI3_D6_C6_MARK, AVB_MAGIC_MARK, VI3_D7_C7_MARK, AVB_PHY_INT_MARK, VI3_D8_Y0_MARK, AVB_CRS_MARK, VI3_D9_Y1_MARK, AVB_GTXREFCLK_MARK, VI3_D11_Y3_MARK, AVB_AVTP_MATCH_MARK, /* IPSR4 */ VI4_CLKENB_MARK, VI0_D12_G4_Y4_MARK, VI4_HSYNC_N_MARK, VI0_D13_G5_Y5_MARK, VI4_VSYNC_N_MARK, VI0_D14_G6_Y6_MARK, RDR_CLKOUT_MARK, VI4_D0_C0_MARK, VI0_D15_G7_Y7_MARK, VI4_D1_C1_MARK, VI0_D16_R0_MARK, VI1_D12_G4_Y4_MARK, VI4_D2_C2_MARK, VI0_D17_R1_MARK, VI1_D13_G5_Y5_MARK, VI4_D3_C3_MARK, VI0_D18_R2_MARK, VI1_D14_G6_Y6_MARK, VI4_D4_C4_MARK, VI0_D19_R3_MARK, VI1_D15_G7_Y7_MARK, VI4_D5_C5_MARK, VI0_D20_R4_MARK, VI2_D12_Y4_MARK, VI4_D6_C6_MARK, VI0_D21_R5_MARK, VI2_D13_Y5_MARK, VI4_D7_C7_MARK, VI0_D22_R6_MARK, VI2_D14_Y6_MARK, VI4_D8_Y0_MARK, VI0_D23_R7_MARK, VI2_D15_Y7_MARK, VI4_D9_Y1_MARK, VI3_D12_Y4_MARK, VI4_D10_Y2_MARK, VI3_D13_Y5_MARK, VI4_D11_Y3_MARK, VI3_D14_Y6_MARK, VI4_FIELD_MARK, VI3_D15_Y7_MARK, /* IPSR5 */ VI5_CLKENB_MARK, VI1_D12_G4_Y4_B_MARK, VI5_HSYNC_N_MARK, VI1_D13_G5_Y5_B_MARK, VI5_VSYNC_N_MARK, VI1_D14_G6_Y6_B_MARK, VI5_D0_C0_MARK, VI1_D15_G7_Y7_B_MARK, VI5_D1_C1_MARK, VI1_D16_R0_MARK, VI5_D2_C2_MARK, VI1_D17_R1_MARK, VI5_D3_C3_MARK, VI1_D18_R2_MARK, VI5_D4_C4_MARK, VI1_D19_R3_MARK, VI5_D5_C5_MARK, VI1_D20_R4_MARK, VI5_D6_C6_MARK, VI1_D21_R5_MARK, VI5_D7_C7_MARK, VI1_D22_R6_MARK, VI5_D8_Y0_MARK, VI1_D23_R7_MARK, /* IPSR6 */ MSIOF0_SCK_MARK, HSCK0_MARK, MSIOF0_SYNC_MARK, HCTS0_N_MARK, MSIOF0_TXD_MARK, HTX0_MARK, MSIOF0_RXD_MARK, HRX0_MARK, MSIOF1_SCK_MARK, HSCK1_MARK, MSIOF1_SYNC_MARK, HRTS1_N_MARK, MSIOF1_TXD_MARK, HTX1_MARK, MSIOF1_RXD_MARK, HRX1_MARK, DRACK0_MARK, SCK2_MARK, DACK0_MARK, TX2_MARK, DREQ0_N_MARK, RX2_MARK, DACK1_MARK, SCK3_MARK, TX3_MARK, DREQ1_N_MARK, RX3_MARK, /* IPSR7 */ PWM0_MARK, TCLK1_MARK, FSO_CFE_0_MARK, PWM1_MARK, TCLK2_MARK, FSO_CFE_1_MARK, PWM2_MARK, TCLK3_MARK, FSO_TOE_MARK, PWM3_MARK, PWM4_MARK, SSI_SCK34_MARK, TPU0TO0_MARK, SSI_WS34_MARK, TPU0TO1_MARK, SSI_SDATA3_MARK, TPU0TO2_MARK, SSI_SCK4_MARK, TPU0TO3_MARK, SSI_WS4_MARK, SSI_SDATA4_MARK, AUDIO_CLKOUT_MARK, AUDIO_CLKA_MARK, AUDIO_CLKB_MARK, PINMUX_MARK_END, }; static const u16 pinmux_data[] = { PINMUX_DATA_GP_ALL(), /* PINMUX_DATA(GP_M_N_DATA, GP_M_N_FN...), */ PINMUX_SINGLE(DU1_DB2_C0_DATA12), PINMUX_SINGLE(DU1_DB3_C1_DATA13), PINMUX_SINGLE(DU1_DB4_C2_DATA14), PINMUX_SINGLE(DU1_DB5_C3_DATA15), PINMUX_SINGLE(DU1_DB6_C4), PINMUX_SINGLE(DU1_DB7_C5), PINMUX_SINGLE(DU1_EXHSYNC_DU1_HSYNC), PINMUX_SINGLE(DU1_EXVSYNC_DU1_VSYNC), PINMUX_SINGLE(DU1_EXODDF_DU1_ODDF_DISP_CDE), PINMUX_SINGLE(DU1_DISP), PINMUX_SINGLE(DU1_CDE), PINMUX_SINGLE(D0), PINMUX_SINGLE(D1), PINMUX_SINGLE(D2), PINMUX_SINGLE(D3), PINMUX_SINGLE(D4), PINMUX_SINGLE(D5), PINMUX_SINGLE(D6), PINMUX_SINGLE(D7), PINMUX_SINGLE(D8), PINMUX_SINGLE(D9), PINMUX_SINGLE(D10), PINMUX_SINGLE(D11), PINMUX_SINGLE(D12), PINMUX_SINGLE(D13), PINMUX_SINGLE(D14), PINMUX_SINGLE(D15), PINMUX_SINGLE(A0), PINMUX_SINGLE(A1), PINMUX_SINGLE(A2), PINMUX_SINGLE(A3), PINMUX_SINGLE(A4), PINMUX_SINGLE(A5), PINMUX_SINGLE(A6), PINMUX_SINGLE(A7), PINMUX_SINGLE(A8), PINMUX_SINGLE(A9), PINMUX_SINGLE(A10), PINMUX_SINGLE(A11), PINMUX_SINGLE(A12), PINMUX_SINGLE(A13), PINMUX_SINGLE(A14), PINMUX_SINGLE(A15), PINMUX_SINGLE(A16), PINMUX_SINGLE(A17), PINMUX_SINGLE(A18), PINMUX_SINGLE(A19), PINMUX_SINGLE(CS1_N_A26), PINMUX_SINGLE(EX_CS0_N), PINMUX_SINGLE(EX_CS1_N), PINMUX_SINGLE(EX_CS2_N), PINMUX_SINGLE(EX_CS3_N), PINMUX_SINGLE(EX_CS4_N), PINMUX_SINGLE(EX_CS5_N), PINMUX_SINGLE(BS_N), PINMUX_SINGLE(RD_N), PINMUX_SINGLE(RD_WR_N), PINMUX_SINGLE(WE0_N), PINMUX_SINGLE(WE1_N), PINMUX_SINGLE(EX_WAIT0), PINMUX_SINGLE(IRQ0), PINMUX_SINGLE(IRQ1), PINMUX_SINGLE(IRQ2), PINMUX_SINGLE(IRQ3), PINMUX_SINGLE(CS0_N), PINMUX_SINGLE(VI0_CLK), PINMUX_SINGLE(VI0_CLKENB), PINMUX_SINGLE(VI0_HSYNC_N), PINMUX_SINGLE(VI0_VSYNC_N), PINMUX_SINGLE(VI0_D0_B0_C0), PINMUX_SINGLE(VI0_D1_B1_C1), PINMUX_SINGLE(VI0_D2_B2_C2), PINMUX_SINGLE(VI0_D3_B3_C3), PINMUX_SINGLE(VI0_D4_B4_C4), PINMUX_SINGLE(VI0_D5_B5_C5), PINMUX_SINGLE(VI0_D6_B6_C6), PINMUX_SINGLE(VI0_D7_B7_C7), PINMUX_SINGLE(VI0_D8_G0_Y0), PINMUX_SINGLE(VI0_D9_G1_Y1), PINMUX_SINGLE(VI0_D10_G2_Y2), PINMUX_SINGLE(VI0_D11_G3_Y3), PINMUX_SINGLE(VI0_FIELD), PINMUX_SINGLE(VI1_CLK), PINMUX_SINGLE(VI1_CLKENB), PINMUX_SINGLE(VI1_HSYNC_N), PINMUX_SINGLE(VI1_VSYNC_N), PINMUX_SINGLE(VI1_D0_B0_C0), PINMUX_SINGLE(VI1_D1_B1_C1), PINMUX_SINGLE(VI1_D2_B2_C2), PINMUX_SINGLE(VI1_D3_B3_C3), PINMUX_SINGLE(VI1_D4_B4_C4), PINMUX_SINGLE(VI1_D5_B5_C5), PINMUX_SINGLE(VI1_D6_B6_C6), PINMUX_SINGLE(VI1_D7_B7_C7), PINMUX_SINGLE(VI1_D8_G0_Y0), PINMUX_SINGLE(VI1_D9_G1_Y1), PINMUX_SINGLE(VI1_D10_G2_Y2), PINMUX_SINGLE(VI1_D11_G3_Y3), PINMUX_SINGLE(VI1_FIELD), PINMUX_SINGLE(VI3_D10_Y2), PINMUX_SINGLE(VI3_FIELD), PINMUX_SINGLE(VI4_CLK), PINMUX_SINGLE(VI5_CLK), PINMUX_SINGLE(VI5_D9_Y1), PINMUX_SINGLE(VI5_D10_Y2), PINMUX_SINGLE(VI5_D11_Y3), PINMUX_SINGLE(VI5_FIELD), PINMUX_SINGLE(HRTS0_N), PINMUX_SINGLE(HCTS1_N), PINMUX_SINGLE(SCK0), PINMUX_SINGLE(CTS0_N), PINMUX_SINGLE(RTS0_N), PINMUX_SINGLE(TX0), PINMUX_SINGLE(RX0), PINMUX_SINGLE(SCK1), PINMUX_SINGLE(CTS1_N), PINMUX_SINGLE(RTS1_N), PINMUX_SINGLE(TX1), PINMUX_SINGLE(RX1), PINMUX_SINGLE(SCIF_CLK), PINMUX_SINGLE(CAN0_TX), PINMUX_SINGLE(CAN0_RX), PINMUX_SINGLE(CAN_CLK), PINMUX_SINGLE(CAN1_TX), PINMUX_SINGLE(CAN1_RX), PINMUX_SINGLE(SD0_CLK), PINMUX_SINGLE(SD0_CMD), PINMUX_SINGLE(SD0_DAT0), PINMUX_SINGLE(SD0_DAT1), PINMUX_SINGLE(SD0_DAT2), PINMUX_SINGLE(SD0_DAT3), PINMUX_SINGLE(SD0_CD), PINMUX_SINGLE(SD0_WP), PINMUX_SINGLE(ADICLK), PINMUX_SINGLE(ADICS_SAMP), PINMUX_SINGLE(ADIDATA), PINMUX_SINGLE(ADICHS0), PINMUX_SINGLE(ADICHS1), PINMUX_SINGLE(ADICHS2), PINMUX_SINGLE(AVS1), PINMUX_SINGLE(AVS2), /* IPSR0 */ PINMUX_IPSR_GPSR(IP0_0, DU0_DR0_DATA0), PINMUX_IPSR_GPSR(IP0_1, DU0_DR1_DATA1), PINMUX_IPSR_GPSR(IP0_2, DU0_DR2_Y4_DATA2), PINMUX_IPSR_GPSR(IP0_3, DU0_DR3_Y5_DATA3), PINMUX_IPSR_GPSR(IP0_4, DU0_DR4_Y6_DATA4), PINMUX_IPSR_GPSR(IP0_5, DU0_DR5_Y7_DATA5), PINMUX_IPSR_GPSR(IP0_6, DU0_DR6_Y8_DATA6), PINMUX_IPSR_GPSR(IP0_7, DU0_DR7_Y9_DATA7), PINMUX_IPSR_GPSR(IP0_8, DU0_DG0_DATA8), PINMUX_IPSR_GPSR(IP0_9, DU0_DG1_DATA9), PINMUX_IPSR_GPSR(IP0_10, DU0_DG2_C6_DATA10), PINMUX_IPSR_GPSR(IP0_11, DU0_DG3_C7_DATA11), PINMUX_IPSR_GPSR(IP0_12, DU0_DG4_Y0_DATA12), PINMUX_IPSR_GPSR(IP0_13, DU0_DG5_Y1_DATA13), PINMUX_IPSR_GPSR(IP0_14, DU0_DG6_Y2_DATA14), PINMUX_IPSR_GPSR(IP0_15, DU0_DG7_Y3_DATA15), PINMUX_IPSR_GPSR(IP0_16, DU0_DB0), PINMUX_IPSR_GPSR(IP0_17, DU0_DB1), PINMUX_IPSR_GPSR(IP0_18, DU0_DB2_C0), PINMUX_IPSR_GPSR(IP0_19, DU0_DB3_C1), PINMUX_IPSR_GPSR(IP0_20, DU0_DB4_C2), PINMUX_IPSR_GPSR(IP0_21, DU0_DB5_C3), PINMUX_IPSR_GPSR(IP0_22, DU0_DB6_C4), PINMUX_IPSR_GPSR(IP0_23, DU0_DB7_C5), /* IPSR1 */ PINMUX_IPSR_GPSR(IP1_0, DU0_EXHSYNC_DU0_HSYNC), PINMUX_IPSR_GPSR(IP1_1, DU0_EXVSYNC_DU0_VSYNC), PINMUX_IPSR_GPSR(IP1_2, DU0_EXODDF_DU0_ODDF_DISP_CDE), PINMUX_IPSR_GPSR(IP1_3, DU0_DISP), PINMUX_IPSR_GPSR(IP1_4, DU0_CDE), PINMUX_IPSR_GPSR(IP1_5, DU1_DR2_Y4_DATA0), PINMUX_IPSR_GPSR(IP1_6, DU1_DR3_Y5_DATA1), PINMUX_IPSR_GPSR(IP1_7, DU1_DR4_Y6_DATA2), PINMUX_IPSR_GPSR(IP1_8, DU1_DR5_Y7_DATA3), PINMUX_IPSR_GPSR(IP1_9, DU1_DR6_DATA4), PINMUX_IPSR_GPSR(IP1_10, DU1_DR7_DATA5), PINMUX_IPSR_GPSR(IP1_11, DU1_DG2_C6_DATA6), PINMUX_IPSR_GPSR(IP1_12, DU1_DG3_C7_DATA7), PINMUX_IPSR_GPSR(IP1_13, DU1_DG4_Y0_DATA8), PINMUX_IPSR_GPSR(IP1_14, DU1_DG5_Y1_DATA9), PINMUX_IPSR_GPSR(IP1_15, DU1_DG6_Y2_DATA10), PINMUX_IPSR_GPSR(IP1_16, DU1_DG7_Y3_DATA11), PINMUX_IPSR_GPSR(IP1_17, A20), PINMUX_IPSR_GPSR(IP1_17, MOSI_IO0), PINMUX_IPSR_GPSR(IP1_18, A21), PINMUX_IPSR_GPSR(IP1_18, MISO_IO1), PINMUX_IPSR_GPSR(IP1_19, A22), PINMUX_IPSR_GPSR(IP1_19, IO2), PINMUX_IPSR_GPSR(IP1_20, A23), PINMUX_IPSR_GPSR(IP1_20, IO3), PINMUX_IPSR_GPSR(IP1_21, A24), PINMUX_IPSR_GPSR(IP1_21, SPCLK), PINMUX_IPSR_GPSR(IP1_22, A25), PINMUX_IPSR_GPSR(IP1_22, SSL), /* IPSR2 */ PINMUX_IPSR_GPSR(IP2_0, VI2_CLK), PINMUX_IPSR_GPSR(IP2_0, AVB_RX_CLK), PINMUX_IPSR_GPSR(IP2_1, VI2_CLKENB), PINMUX_IPSR_GPSR(IP2_1, AVB_RX_DV), PINMUX_IPSR_GPSR(IP2_2, VI2_HSYNC_N), PINMUX_IPSR_GPSR(IP2_2, AVB_RXD0), PINMUX_IPSR_GPSR(IP2_3, VI2_VSYNC_N), PINMUX_IPSR_GPSR(IP2_3, AVB_RXD1), PINMUX_IPSR_GPSR(IP2_4, VI2_D0_C0), PINMUX_IPSR_GPSR(IP2_4, AVB_RXD2), PINMUX_IPSR_GPSR(IP2_5, VI2_D1_C1), PINMUX_IPSR_GPSR(IP2_5, AVB_RXD3), PINMUX_IPSR_GPSR(IP2_6, VI2_D2_C2), PINMUX_IPSR_GPSR(IP2_6, AVB_RXD4), PINMUX_IPSR_GPSR(IP2_7, VI2_D3_C3), PINMUX_IPSR_GPSR(IP2_7, AVB_RXD5), PINMUX_IPSR_GPSR(IP2_8, VI2_D4_C4), PINMUX_IPSR_GPSR(IP2_8, AVB_RXD6), PINMUX_IPSR_GPSR(IP2_9, VI2_D5_C5), PINMUX_IPSR_GPSR(IP2_9, AVB_RXD7), PINMUX_IPSR_GPSR(IP2_10, VI2_D6_C6), PINMUX_IPSR_GPSR(IP2_10, AVB_RX_ER), PINMUX_IPSR_GPSR(IP2_11, VI2_D7_C7), PINMUX_IPSR_GPSR(IP2_11, AVB_COL), PINMUX_IPSR_GPSR(IP2_12, VI2_D8_Y0), PINMUX_IPSR_GPSR(IP2_12, AVB_TXD3), PINMUX_IPSR_GPSR(IP2_13, VI2_D9_Y1), PINMUX_IPSR_GPSR(IP2_13, AVB_TX_EN), PINMUX_IPSR_GPSR(IP2_14, VI2_D10_Y2), PINMUX_IPSR_GPSR(IP2_14, AVB_TXD0), PINMUX_IPSR_GPSR(IP2_15, VI2_D11_Y3), PINMUX_IPSR_GPSR(IP2_15, AVB_TXD1), PINMUX_IPSR_GPSR(IP2_16, VI2_FIELD), PINMUX_IPSR_GPSR(IP2_16, AVB_TXD2), /* IPSR3 */ PINMUX_IPSR_GPSR(IP3_0, VI3_CLK), PINMUX_IPSR_GPSR(IP3_0, AVB_TX_CLK), PINMUX_IPSR_GPSR(IP3_1, VI3_CLKENB), PINMUX_IPSR_GPSR(IP3_1, AVB_TXD4), PINMUX_IPSR_GPSR(IP3_2, VI3_HSYNC_N), PINMUX_IPSR_GPSR(IP3_2, AVB_TXD5), PINMUX_IPSR_GPSR(IP3_3, VI3_VSYNC_N), PINMUX_IPSR_GPSR(IP3_3, AVB_TXD6), PINMUX_IPSR_GPSR(IP3_4, VI3_D0_C0), PINMUX_IPSR_GPSR(IP3_4, AVB_TXD7), PINMUX_IPSR_GPSR(IP3_5, VI3_D1_C1), PINMUX_IPSR_GPSR(IP3_5, AVB_TX_ER), PINMUX_IPSR_GPSR(IP3_6, VI3_D2_C2), PINMUX_IPSR_GPSR(IP3_6, AVB_GTX_CLK), PINMUX_IPSR_GPSR(IP3_7, VI3_D3_C3), PINMUX_IPSR_GPSR(IP3_7, AVB_MDC), PINMUX_IPSR_GPSR(IP3_8, VI3_D4_C4), PINMUX_IPSR_GPSR(IP3_8, AVB_MDIO), PINMUX_IPSR_GPSR(IP3_9, VI3_D5_C5), PINMUX_IPSR_GPSR(IP3_9, AVB_LINK), PINMUX_IPSR_GPSR(IP3_10, VI3_D6_C6), PINMUX_IPSR_GPSR(IP3_10, AVB_MAGIC), PINMUX_IPSR_GPSR(IP3_11, VI3_D7_C7), PINMUX_IPSR_GPSR(IP3_11, AVB_PHY_INT), PINMUX_IPSR_GPSR(IP3_12, VI3_D8_Y0), PINMUX_IPSR_GPSR(IP3_12, AVB_CRS), PINMUX_IPSR_GPSR(IP3_13, VI3_D9_Y1), PINMUX_IPSR_GPSR(IP3_13, AVB_GTXREFCLK), PINMUX_IPSR_GPSR(IP3_14, VI3_D11_Y3), PINMUX_IPSR_GPSR(IP3_14, AVB_AVTP_MATCH), /* IPSR4 */ PINMUX_IPSR_GPSR(IP4_0, VI4_CLKENB), PINMUX_IPSR_GPSR(IP4_0, VI0_D12_G4_Y4), PINMUX_IPSR_GPSR(IP4_1, VI4_HSYNC_N), PINMUX_IPSR_GPSR(IP4_1, VI0_D13_G5_Y5), PINMUX_IPSR_GPSR(IP4_3_2, VI4_VSYNC_N), PINMUX_IPSR_GPSR(IP4_3_2, VI0_D14_G6_Y6), PINMUX_IPSR_GPSR(IP4_4, VI4_D0_C0), PINMUX_IPSR_GPSR(IP4_4, VI0_D15_G7_Y7), PINMUX_IPSR_GPSR(IP4_6_5, VI4_D1_C1), PINMUX_IPSR_GPSR(IP4_6_5, VI0_D16_R0), PINMUX_IPSR_MSEL(IP4_6_5, VI1_D12_G4_Y4, SEL_VI1_0), PINMUX_IPSR_GPSR(IP4_8_7, VI4_D2_C2), PINMUX_IPSR_GPSR(IP4_8_7, VI0_D17_R1), PINMUX_IPSR_MSEL(IP4_8_7, VI1_D13_G5_Y5, SEL_VI1_0), PINMUX_IPSR_GPSR(IP4_10_9, VI4_D3_C3), PINMUX_IPSR_GPSR(IP4_10_9, VI0_D18_R2), PINMUX_IPSR_MSEL(IP4_10_9, VI1_D14_G6_Y6, SEL_VI1_0), PINMUX_IPSR_GPSR(IP4_12_11, VI4_D4_C4), PINMUX_IPSR_GPSR(IP4_12_11, VI0_D19_R3), PINMUX_IPSR_MSEL(IP4_12_11, VI1_D15_G7_Y7, SEL_VI1_0), PINMUX_IPSR_GPSR(IP4_14_13, VI4_D5_C5), PINMUX_IPSR_GPSR(IP4_14_13, VI0_D20_R4), PINMUX_IPSR_GPSR(IP4_14_13, VI2_D12_Y4), PINMUX_IPSR_GPSR(IP4_16_15, VI4_D6_C6), PINMUX_IPSR_GPSR(IP4_16_15, VI0_D21_R5), PINMUX_IPSR_GPSR(IP4_16_15, VI2_D13_Y5), PINMUX_IPSR_GPSR(IP4_18_17, VI4_D7_C7), PINMUX_IPSR_GPSR(IP4_18_17, VI0_D22_R6), PINMUX_IPSR_GPSR(IP4_18_17, VI2_D14_Y6), PINMUX_IPSR_GPSR(IP4_20_19, VI4_D8_Y0), PINMUX_IPSR_GPSR(IP4_20_19, VI0_D23_R7), PINMUX_IPSR_GPSR(IP4_20_19, VI2_D15_Y7), PINMUX_IPSR_GPSR(IP4_21, VI4_D9_Y1), PINMUX_IPSR_GPSR(IP4_21, VI3_D12_Y4), PINMUX_IPSR_GPSR(IP4_22, VI4_D10_Y2), PINMUX_IPSR_GPSR(IP4_22, VI3_D13_Y5), PINMUX_IPSR_GPSR(IP4_23, VI4_D11_Y3), PINMUX_IPSR_GPSR(IP4_23, VI3_D14_Y6), PINMUX_IPSR_GPSR(IP4_24, VI4_FIELD), PINMUX_IPSR_GPSR(IP4_24, VI3_D15_Y7), /* IPSR5 */ PINMUX_IPSR_GPSR(IP5_0, VI5_CLKENB), PINMUX_IPSR_MSEL(IP5_0, VI1_D12_G4_Y4_B, SEL_VI1_1), PINMUX_IPSR_GPSR(IP5_1, VI5_HSYNC_N), PINMUX_IPSR_MSEL(IP5_1, VI1_D13_G5_Y5_B, SEL_VI1_1), PINMUX_IPSR_GPSR(IP5_2, VI5_VSYNC_N), PINMUX_IPSR_MSEL(IP5_2, VI1_D14_G6_Y6_B, SEL_VI1_1), PINMUX_IPSR_GPSR(IP5_3, VI5_D0_C0), PINMUX_IPSR_MSEL(IP5_3, VI1_D15_G7_Y7_B, SEL_VI1_1), PINMUX_IPSR_GPSR(IP5_4, VI5_D1_C1), PINMUX_IPSR_GPSR(IP5_4, VI1_D16_R0), PINMUX_IPSR_GPSR(IP5_5, VI5_D2_C2), PINMUX_IPSR_GPSR(IP5_5, VI1_D17_R1), PINMUX_IPSR_GPSR(IP5_6, VI5_D3_C3), PINMUX_IPSR_GPSR(IP5_6, VI1_D18_R2), PINMUX_IPSR_GPSR(IP5_7, VI5_D4_C4), PINMUX_IPSR_GPSR(IP5_7, VI1_D19_R3), PINMUX_IPSR_GPSR(IP5_8, VI5_D5_C5), PINMUX_IPSR_GPSR(IP5_8, VI1_D20_R4), PINMUX_IPSR_GPSR(IP5_9, VI5_D6_C6), PINMUX_IPSR_GPSR(IP5_9, VI1_D21_R5), PINMUX_IPSR_GPSR(IP5_10, VI5_D7_C7), PINMUX_IPSR_GPSR(IP5_10, VI1_D22_R6), PINMUX_IPSR_GPSR(IP5_11, VI5_D8_Y0), PINMUX_IPSR_GPSR(IP5_11, VI1_D23_R7), /* IPSR6 */ PINMUX_IPSR_GPSR(IP6_0, MSIOF0_SCK), PINMUX_IPSR_GPSR(IP6_0, HSCK0), PINMUX_IPSR_GPSR(IP6_1, MSIOF0_SYNC), PINMUX_IPSR_GPSR(IP6_1, HCTS0_N), PINMUX_IPSR_GPSR(IP6_2, MSIOF0_TXD), PINMUX_IPSR_GPSR(IP6_2, HTX0), PINMUX_IPSR_GPSR(IP6_3, MSIOF0_RXD), PINMUX_IPSR_GPSR(IP6_3, HRX0), PINMUX_IPSR_GPSR(IP6_4, MSIOF1_SCK), PINMUX_IPSR_GPSR(IP6_4, HSCK1), PINMUX_IPSR_GPSR(IP6_5, MSIOF1_SYNC), PINMUX_IPSR_GPSR(IP6_5, HRTS1_N), PINMUX_IPSR_GPSR(IP6_6, MSIOF1_TXD), PINMUX_IPSR_GPSR(IP6_6, HTX1), PINMUX_IPSR_GPSR(IP6_7, MSIOF1_RXD), PINMUX_IPSR_GPSR(IP6_7, HRX1), PINMUX_IPSR_GPSR(IP6_9_8, DRACK0), PINMUX_IPSR_GPSR(IP6_9_8, SCK2), PINMUX_IPSR_GPSR(IP6_11_10, DACK0), PINMUX_IPSR_GPSR(IP6_11_10, TX2), PINMUX_IPSR_GPSR(IP6_13_12, DREQ0_N), PINMUX_IPSR_GPSR(IP6_13_12, RX2), PINMUX_IPSR_GPSR(IP6_15_14, DACK1), PINMUX_IPSR_GPSR(IP6_15_14, SCK3), PINMUX_IPSR_GPSR(IP6_16, TX3), PINMUX_IPSR_GPSR(IP6_18_17, DREQ1_N), PINMUX_IPSR_GPSR(IP6_18_17, RX3), /* IPSR7 */ PINMUX_IPSR_GPSR(IP7_1_0, PWM0), PINMUX_IPSR_GPSR(IP7_1_0, TCLK1), PINMUX_IPSR_GPSR(IP7_1_0, FSO_CFE_0), PINMUX_IPSR_GPSR(IP7_3_2, PWM1), PINMUX_IPSR_GPSR(IP7_3_2, TCLK2), PINMUX_IPSR_GPSR(IP7_3_2, FSO_CFE_1), PINMUX_IPSR_GPSR(IP7_5_4, PWM2), PINMUX_IPSR_GPSR(IP7_5_4, TCLK3), PINMUX_IPSR_GPSR(IP7_5_4, FSO_TOE), PINMUX_IPSR_GPSR(IP7_6, PWM3), PINMUX_IPSR_GPSR(IP7_7, PWM4), PINMUX_IPSR_GPSR(IP7_9_8, SSI_SCK34), PINMUX_IPSR_GPSR(IP7_9_8, TPU0TO0), PINMUX_IPSR_GPSR(IP7_11_10, SSI_WS34), PINMUX_IPSR_GPSR(IP7_11_10, TPU0TO1), PINMUX_IPSR_GPSR(IP7_13_12, SSI_SDATA3), PINMUX_IPSR_GPSR(IP7_13_12, TPU0TO2), PINMUX_IPSR_GPSR(IP7_15_14, SSI_SCK4), PINMUX_IPSR_GPSR(IP7_15_14, TPU0TO3), PINMUX_IPSR_GPSR(IP7_16, SSI_WS4), PINMUX_IPSR_GPSR(IP7_17, SSI_SDATA4), PINMUX_IPSR_GPSR(IP7_18, AUDIO_CLKOUT), PINMUX_IPSR_GPSR(IP7_19, AUDIO_CLKA), PINMUX_IPSR_GPSR(IP7_20, AUDIO_CLKB), }; static const struct sh_pfc_pin pinmux_pins[] = { PINMUX_GPIO_GP_ALL(), }; /* - AVB -------------------------------------------------------------------- */ static const unsigned int avb_link_pins[] = { RCAR_GP_PIN(7, 9), }; static const unsigned int avb_link_mux[] = { AVB_LINK_MARK, }; static const unsigned int avb_magic_pins[] = { RCAR_GP_PIN(7, 10), }; static const unsigned int avb_magic_mux[] = { AVB_MAGIC_MARK, }; static const unsigned int avb_phy_int_pins[] = { RCAR_GP_PIN(7, 11), }; static const unsigned int avb_phy_int_mux[] = { AVB_PHY_INT_MARK, }; static const unsigned int avb_mdio_pins[] = { RCAR_GP_PIN(7, 7), RCAR_GP_PIN(7, 8), }; static const unsigned int avb_mdio_mux[] = { AVB_MDC_MARK, AVB_MDIO_MARK, }; static const unsigned int avb_mii_pins[] = { RCAR_GP_PIN(6, 14), RCAR_GP_PIN(6, 15), RCAR_GP_PIN(6, 16), RCAR_GP_PIN(6, 12), RCAR_GP_PIN(6, 2), RCAR_GP_PIN(6, 3), RCAR_GP_PIN(6, 4), RCAR_GP_PIN(6, 5), RCAR_GP_PIN(6, 10), RCAR_GP_PIN(6, 0), RCAR_GP_PIN(6, 1), RCAR_GP_PIN(7, 12), RCAR_GP_PIN(6, 13), RCAR_GP_PIN(7, 5), RCAR_GP_PIN(7, 0), RCAR_GP_PIN(6, 11), }; static const unsigned int avb_mii_mux[] = { AVB_TXD0_MARK, AVB_TXD1_MARK, AVB_TXD2_MARK, AVB_TXD3_MARK, AVB_RXD0_MARK, AVB_RXD1_MARK, AVB_RXD2_MARK, AVB_RXD3_MARK, AVB_RX_ER_MARK, AVB_RX_CLK_MARK, AVB_RX_DV_MARK, AVB_CRS_MARK, AVB_TX_EN_MARK, AVB_TX_ER_MARK, AVB_TX_CLK_MARK, AVB_COL_MARK, }; static const unsigned int avb_gmii_pins[] = { RCAR_GP_PIN(6, 14), RCAR_GP_PIN(6, 15), RCAR_GP_PIN(6, 16), RCAR_GP_PIN(6, 12), RCAR_GP_PIN(7, 1), RCAR_GP_PIN(7, 2), RCAR_GP_PIN(7, 3), RCAR_GP_PIN(7, 4), RCAR_GP_PIN(6, 2), RCAR_GP_PIN(6, 3), RCAR_GP_PIN(6, 4), RCAR_GP_PIN(6, 5), RCAR_GP_PIN(6, 6), RCAR_GP_PIN(6, 7), RCAR_GP_PIN(6, 8), RCAR_GP_PIN(6, 9), RCAR_GP_PIN(6, 10), RCAR_GP_PIN(6, 0), RCAR_GP_PIN(6, 1), RCAR_GP_PIN(7, 12), RCAR_GP_PIN(7, 6), RCAR_GP_PIN(7, 13), RCAR_GP_PIN(6, 13), RCAR_GP_PIN(7, 5), RCAR_GP_PIN(7, 0), RCAR_GP_PIN(6, 11), }; static const unsigned int avb_gmii_mux[] = { AVB_TXD0_MARK, AVB_TXD1_MARK, AVB_TXD2_MARK, AVB_TXD3_MARK, AVB_TXD4_MARK, AVB_TXD5_MARK, AVB_TXD6_MARK, AVB_TXD7_MARK, AVB_RXD0_MARK, AVB_RXD1_MARK, AVB_RXD2_MARK, AVB_RXD3_MARK, AVB_RXD4_MARK, AVB_RXD5_MARK, AVB_RXD6_MARK, AVB_RXD7_MARK, AVB_RX_ER_MARK, AVB_RX_CLK_MARK, AVB_RX_DV_MARK, AVB_CRS_MARK, AVB_GTX_CLK_MARK, AVB_GTXREFCLK_MARK, AVB_TX_EN_MARK, AVB_TX_ER_MARK, AVB_TX_CLK_MARK, AVB_COL_MARK, }; static const unsigned int avb_avtp_match_pins[] = { RCAR_GP_PIN(7, 15), }; static const unsigned int avb_avtp_match_mux[] = { AVB_AVTP_MATCH_MARK, }; /* - CAN -------------------------------------------------------------------- */ static const unsigned int can0_data_pins[] = { /* TX, RX */ RCAR_GP_PIN(10, 27), RCAR_GP_PIN(10, 28), }; static const unsigned int can0_data_mux[] = { CAN0_TX_MARK, CAN0_RX_MARK, }; static const unsigned int can1_data_pins[] = { /* TX, RX */ RCAR_GP_PIN(10, 30), RCAR_GP_PIN(10, 31), }; static const unsigned int can1_data_mux[] = { CAN1_TX_MARK, CAN1_RX_MARK, }; static const unsigned int can_clk_pins[] = { /* CAN_CLK */ RCAR_GP_PIN(10, 29), }; static const unsigned int can_clk_mux[] = { CAN_CLK_MARK, }; /* - DU --------------------------------------------------------------------- */ static const unsigned int du0_rgb666_pins[] = { /* R[7:2], G[7:2], B[7:2] */ RCAR_GP_PIN(0, 7), RCAR_GP_PIN(0, 6), RCAR_GP_PIN(0, 5), RCAR_GP_PIN(0, 4), RCAR_GP_PIN(0, 3), RCAR_GP_PIN(0, 2), RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14), RCAR_GP_PIN(0, 13), RCAR_GP_PIN(0, 12), RCAR_GP_PIN(0, 11), RCAR_GP_PIN(0, 10), RCAR_GP_PIN(0, 23), RCAR_GP_PIN(0, 22), RCAR_GP_PIN(0, 21), RCAR_GP_PIN(0, 20), RCAR_GP_PIN(0, 19), RCAR_GP_PIN(0, 18), }; static const unsigned int du0_rgb666_mux[] = { DU0_DR7_Y9_DATA7_MARK, DU0_DR6_Y8_DATA6_MARK, DU0_DR5_Y7_DATA5_MARK, DU0_DR4_Y6_DATA4_MARK, DU0_DR3_Y5_DATA3_MARK, DU0_DR2_Y4_DATA2_MARK, DU0_DG7_Y3_DATA15_MARK, DU0_DG6_Y2_DATA14_MARK, DU0_DG5_Y1_DATA13_MARK, DU0_DG4_Y0_DATA12_MARK, DU0_DG3_C7_DATA11_MARK, DU0_DG2_C6_DATA10_MARK, DU0_DB7_C5_MARK, DU0_DB6_C4_MARK, DU0_DB5_C3_MARK, DU0_DB4_C2_MARK, DU0_DB3_C1_MARK, DU0_DB2_C0_MARK, }; static const unsigned int du0_rgb888_pins[] = { /* R[7:0], G[7:0], B[7:0] */ RCAR_GP_PIN(0, 7), RCAR_GP_PIN(0, 6), RCAR_GP_PIN(0, 5), RCAR_GP_PIN(0, 4), RCAR_GP_PIN(0, 3), RCAR_GP_PIN(0, 2), RCAR_GP_PIN(0, 1), RCAR_GP_PIN(0, 0), RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14), RCAR_GP_PIN(0, 13), RCAR_GP_PIN(0, 12), RCAR_GP_PIN(0, 11), RCAR_GP_PIN(0, 10), RCAR_GP_PIN(0, 9), RCAR_GP_PIN(0, 8), RCAR_GP_PIN(0, 23), RCAR_GP_PIN(0, 22), RCAR_GP_PIN(0, 21), RCAR_GP_PIN(0, 20), RCAR_GP_PIN(0, 19), RCAR_GP_PIN(0, 18), RCAR_GP_PIN(0, 17), RCAR_GP_PIN(0, 16), }; static const unsigned int du0_rgb888_mux[] = { DU0_DR7_Y9_DATA7_MARK, DU0_DR6_Y8_DATA6_MARK, DU0_DR5_Y7_DATA5_MARK, DU0_DR4_Y6_DATA4_MARK, DU0_DR3_Y5_DATA3_MARK, DU0_DR2_Y4_DATA2_MARK, DU0_DR1_DATA1_MARK, DU0_DR0_DATA0_MARK, DU0_DG7_Y3_DATA15_MARK, DU0_DG6_Y2_DATA14_MARK, DU0_DG5_Y1_DATA13_MARK, DU0_DG4_Y0_DATA12_MARK, DU0_DG3_C7_DATA11_MARK, DU0_DG2_C6_DATA10_MARK, DU0_DG1_DATA9_MARK, DU0_DG0_DATA8_MARK, DU0_DB7_C5_MARK, DU0_DB6_C4_MARK, DU0_DB5_C3_MARK, DU0_DB4_C2_MARK, DU0_DB3_C1_MARK, DU0_DB2_C0_MARK, DU0_DB1_MARK, DU0_DB0_MARK, }; static const unsigned int du0_sync_pins[] = { /* EXVSYNC/VSYNC, EXHSYNC/HSYNC */ RCAR_GP_PIN(0, 25), RCAR_GP_PIN(0, 24), }; static const unsigned int du0_sync_mux[] = { DU0_EXVSYNC_DU0_VSYNC_MARK, DU0_EXHSYNC_DU0_HSYNC_MARK, }; static const unsigned int du0_oddf_pins[] = { /* EXODDF/ODDF/DISP/CDE */ RCAR_GP_PIN(0, 26), }; static const unsigned int du0_oddf_mux[] = { DU0_EXODDF_DU0_ODDF_DISP_CDE_MARK }; static const unsigned int du0_disp_pins[] = { /* DISP */ RCAR_GP_PIN(0, 27), }; static const unsigned int du0_disp_mux[] = { DU0_DISP_MARK, }; static const unsigned int du0_cde_pins[] = { /* CDE */ RCAR_GP_PIN(0, 28), }; static const unsigned int du0_cde_mux[] = { DU0_CDE_MARK, }; static const unsigned int du1_rgb666_pins[] = { /* R[7:2], G[7:2], B[7:2] */ RCAR_GP_PIN(1, 5), RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 3), RCAR_GP_PIN(1, 2), RCAR_GP_PIN(1, 1), RCAR_GP_PIN(1, 0), RCAR_GP_PIN(1, 11), RCAR_GP_PIN(1, 10), RCAR_GP_PIN(1, 9), RCAR_GP_PIN(1, 8), RCAR_GP_PIN(1, 7), RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 17), RCAR_GP_PIN(1, 16), RCAR_GP_PIN(1, 15), RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 13), RCAR_GP_PIN(1, 12), }; static const unsigned int du1_rgb666_mux[] = { DU1_DR7_DATA5_MARK, DU1_DR6_DATA4_MARK, DU1_DR5_Y7_DATA3_MARK, DU1_DR4_Y6_DATA2_MARK, DU1_DR3_Y5_DATA1_MARK, DU1_DR2_Y4_DATA0_MARK, DU1_DG7_Y3_DATA11_MARK, DU1_DG6_Y2_DATA10_MARK, DU1_DG5_Y1_DATA9_MARK, DU1_DG4_Y0_DATA8_MARK, DU1_DG3_C7_DATA7_MARK, DU1_DG2_C6_DATA6_MARK, DU1_DB7_C5_MARK, DU1_DB6_C4_MARK, DU1_DB5_C3_DATA15_MARK, DU1_DB4_C2_DATA14_MARK, DU1_DB3_C1_DATA13_MARK, DU1_DB2_C0_DATA12_MARK, }; static const unsigned int du1_sync_pins[] = { /* EXVSYNC/VSYNC, EXHSYNC/HSYNC */ RCAR_GP_PIN(1, 19), RCAR_GP_PIN(1, 18), }; static const unsigned int du1_sync_mux[] = { DU1_EXVSYNC_DU1_VSYNC_MARK, DU1_EXHSYNC_DU1_HSYNC_MARK, }; static const unsigned int du1_oddf_pins[] = { /* EXODDF/ODDF/DISP/CDE */ RCAR_GP_PIN(1, 20), }; static const unsigned int du1_oddf_mux[] = { DU1_EXODDF_DU1_ODDF_DISP_CDE_MARK }; static const unsigned int du1_disp_pins[] = { /* DISP */ RCAR_GP_PIN(1, 21), }; static const unsigned int du1_disp_mux[] = { DU1_DISP_MARK, }; static const unsigned int du1_cde_pins[] = { /* CDE */ RCAR_GP_PIN(1, 22), }; static const unsigned int du1_cde_mux[] = { DU1_CDE_MARK, }; /* - INTC ------------------------------------------------------------------- */ static const unsigned int intc_irq0_pins[] = { /* IRQ0 */ RCAR_GP_PIN(3, 19), }; static const unsigned int intc_irq0_mux[] = { IRQ0_MARK, }; static const unsigned int intc_irq1_pins[] = { /* IRQ1 */ RCAR_GP_PIN(3, 20), }; static const unsigned int intc_irq1_mux[] = { IRQ1_MARK, }; static const unsigned int intc_irq2_pins[] = { /* IRQ2 */ RCAR_GP_PIN(3, 21), }; static const unsigned int intc_irq2_mux[] = { IRQ2_MARK, }; static const unsigned int intc_irq3_pins[] = { /* IRQ3 */ RCAR_GP_PIN(3, 22), }; static const unsigned int intc_irq3_mux[] = { IRQ3_MARK, }; /* - LBSC ------------------------------------------------------------------- */ static const unsigned int lbsc_cs0_pins[] = { /* CS0# */ RCAR_GP_PIN(3, 27), }; static const unsigned int lbsc_cs0_mux[] = { CS0_N_MARK, }; static const unsigned int lbsc_cs1_pins[] = { /* CS1#_A26 */ RCAR_GP_PIN(3, 6), }; static const unsigned int lbsc_cs1_mux[] = { CS1_N_A26_MARK, }; static const unsigned int lbsc_ex_cs0_pins[] = { /* EX_CS0# */ RCAR_GP_PIN(3, 7), }; static const unsigned int lbsc_ex_cs0_mux[] = { EX_CS0_N_MARK, }; static const unsigned int lbsc_ex_cs1_pins[] = { /* EX_CS1# */ RCAR_GP_PIN(3, 8), }; static const unsigned int lbsc_ex_cs1_mux[] = { EX_CS1_N_MARK, }; static const unsigned int lbsc_ex_cs2_pins[] = { /* EX_CS2# */ RCAR_GP_PIN(3, 9), }; static const unsigned int lbsc_ex_cs2_mux[] = { EX_CS2_N_MARK, }; static const unsigned int lbsc_ex_cs3_pins[] = { /* EX_CS3# */ RCAR_GP_PIN(3, 10), }; static const unsigned int lbsc_ex_cs3_mux[] = { EX_CS3_N_MARK, }; static const unsigned int lbsc_ex_cs4_pins[] = { /* EX_CS4# */ RCAR_GP_PIN(3, 11), }; static const unsigned int lbsc_ex_cs4_mux[] = { EX_CS4_N_MARK, }; static const unsigned int lbsc_ex_cs5_pins[] = { /* EX_CS5# */ RCAR_GP_PIN(3, 12), }; static const unsigned int lbsc_ex_cs5_mux[] = { EX_CS5_N_MARK, }; /* - MSIOF0 ----------------------------------------------------------------- */ static const unsigned int msiof0_clk_pins[] = { /* SCK */ RCAR_GP_PIN(10, 0), }; static const unsigned int msiof0_clk_mux[] = { MSIOF0_SCK_MARK, }; static const unsigned int msiof0_sync_pins[] = { /* SYNC */ RCAR_GP_PIN(10, 1), }; static const unsigned int msiof0_sync_mux[] = { MSIOF0_SYNC_MARK, }; static const unsigned int msiof0_rx_pins[] = { /* RXD */ RCAR_GP_PIN(10, 4), }; static const unsigned int msiof0_rx_mux[] = { MSIOF0_RXD_MARK, }; static const unsigned int msiof0_tx_pins[] = { /* TXD */ RCAR_GP_PIN(10, 3), }; static const unsigned int msiof0_tx_mux[] = { MSIOF0_TXD_MARK, }; /* - MSIOF1 ----------------------------------------------------------------- */ static const unsigned int msiof1_clk_pins[] = { /* SCK */ RCAR_GP_PIN(10, 5), }; static const unsigned int msiof1_clk_mux[] = { MSIOF1_SCK_MARK, }; static const unsigned int msiof1_sync_pins[] = { /* SYNC */ RCAR_GP_PIN(10, 6), }; static const unsigned int msiof1_sync_mux[] = { MSIOF1_SYNC_MARK, }; static const unsigned int msiof1_rx_pins[] = { /* RXD */ RCAR_GP_PIN(10, 9), }; static const unsigned int msiof1_rx_mux[] = { MSIOF1_RXD_MARK, }; static const unsigned int msiof1_tx_pins[] = { /* TXD */ RCAR_GP_PIN(10, 8), }; static const unsigned int msiof1_tx_mux[] = { MSIOF1_TXD_MARK, }; /* - QSPI ------------------------------------------------------------------- */ static const unsigned int qspi_ctrl_pins[] = { /* SPCLK, SSL */ RCAR_GP_PIN(3, 25), RCAR_GP_PIN(3, 26), }; static const unsigned int qspi_ctrl_mux[] = { SPCLK_MARK, SSL_MARK, }; static const unsigned int qspi_data2_pins[] = { /* MOSI_IO0, MISO_IO1 */ RCAR_GP_PIN(3, 4), RCAR_GP_PIN(3, 5), }; static const unsigned int qspi_data2_mux[] = { MOSI_IO0_MARK, MISO_IO1_MARK, }; static const unsigned int qspi_data4_pins[] = { /* MOSI_IO0, MISO_IO1, IO2, IO3 */ RCAR_GP_PIN(3, 4), RCAR_GP_PIN(3, 5), RCAR_GP_PIN(3, 23), RCAR_GP_PIN(3, 24), }; static const unsigned int qspi_data4_mux[] = { MOSI_IO0_MARK, MISO_IO1_MARK, IO2_MARK, IO3_MARK, }; /* - SCIF0 ------------------------------------------------------------------ */ static const unsigned int scif0_data_pins[] = { /* RX, TX */ RCAR_GP_PIN(10, 14), RCAR_GP_PIN(10, 13), }; static const unsigned int scif0_data_mux[] = { RX0_MARK, TX0_MARK, }; static const unsigned int scif0_clk_pins[] = { /* SCK */ RCAR_GP_PIN(10, 10), }; static const unsigned int scif0_clk_mux[] = { SCK0_MARK, }; static const unsigned int scif0_ctrl_pins[] = { /* RTS, CTS */ RCAR_GP_PIN(10, 12), RCAR_GP_PIN(10, 11), }; static const unsigned int scif0_ctrl_mux[] = { RTS0_N_MARK, CTS0_N_MARK, }; /* - SCIF1 ------------------------------------------------------------------ */ static const unsigned int scif1_data_pins[] = { /* RX, TX */ RCAR_GP_PIN(10, 19), RCAR_GP_PIN(10, 18), }; static const unsigned int scif1_data_mux[] = { RX1_MARK, TX1_MARK, }; static const unsigned int scif1_clk_pins[] = { /* SCK */ RCAR_GP_PIN(10, 15), }; static const unsigned int scif1_clk_mux[] = { SCK1_MARK, }; static const unsigned int scif1_ctrl_pins[] = { /* RTS, CTS */ RCAR_GP_PIN(10, 17), RCAR_GP_PIN(10, 16), }; static const unsigned int scif1_ctrl_mux[] = { RTS1_N_MARK, CTS1_N_MARK, }; /* - SCIF2 ------------------------------------------------------------------ */ static const unsigned int scif2_data_pins[] = { /* RX, TX */ RCAR_GP_PIN(10, 22), RCAR_GP_PIN(10, 21), }; static const unsigned int scif2_data_mux[] = { RX2_MARK, TX2_MARK, }; static const unsigned int scif2_clk_pins[] = { /* SCK */ RCAR_GP_PIN(10, 20), }; static const unsigned int scif2_clk_mux[] = { SCK2_MARK, }; /* - SCIF3 ------------------------------------------------------------------ */ static const unsigned int scif3_data_pins[] = { /* RX, TX */ RCAR_GP_PIN(10, 25), RCAR_GP_PIN(10, 24), }; static const unsigned int scif3_data_mux[] = { RX3_MARK, TX3_MARK, }; static const unsigned int scif3_clk_pins[] = { /* SCK */ RCAR_GP_PIN(10, 23), }; static const unsigned int scif3_clk_mux[] = { SCK3_MARK, }; /* - SDHI0 ------------------------------------------------------------------ */ static const unsigned int sdhi0_data1_pins[] = { /* DAT0 */ RCAR_GP_PIN(11, 7), }; static const unsigned int sdhi0_data1_mux[] = { SD0_DAT0_MARK, }; static const unsigned int sdhi0_data4_pins[] = { /* DAT[0-3] */ RCAR_GP_PIN(11, 7), RCAR_GP_PIN(11, 8), RCAR_GP_PIN(11, 9), RCAR_GP_PIN(11, 10), }; static const unsigned int sdhi0_data4_mux[] = { SD0_DAT0_MARK, SD0_DAT1_MARK, SD0_DAT2_MARK, SD0_DAT3_MARK, }; static const unsigned int sdhi0_ctrl_pins[] = { /* CLK, CMD */ RCAR_GP_PIN(11, 5), RCAR_GP_PIN(11, 6), }; static const unsigned int sdhi0_ctrl_mux[] = { SD0_CLK_MARK, SD0_CMD_MARK, }; static const unsigned int sdhi0_cd_pins[] = { /* CD */ RCAR_GP_PIN(11, 11), }; static const unsigned int sdhi0_cd_mux[] = { SD0_CD_MARK, }; static const unsigned int sdhi0_wp_pins[] = { /* WP */ RCAR_GP_PIN(11, 12), }; static const unsigned int sdhi0_wp_mux[] = { SD0_WP_MARK, }; /* - VIN0 ------------------------------------------------------------------- */ static const union vin_data vin0_data_pins = { .data24 = { /* B */ RCAR_GP_PIN(4, 4), RCAR_GP_PIN(4, 5), RCAR_GP_PIN(4, 6), RCAR_GP_PIN(4, 7), RCAR_GP_PIN(4, 8), RCAR_GP_PIN(4, 9), RCAR_GP_PIN(4, 10), RCAR_GP_PIN(4, 11), /* G */ RCAR_GP_PIN(4, 12), RCAR_GP_PIN(4, 13), RCAR_GP_PIN(4, 14), RCAR_GP_PIN(4, 15), RCAR_GP_PIN(8, 1), RCAR_GP_PIN(8, 2), RCAR_GP_PIN(8, 3), RCAR_GP_PIN(8, 4), /* R */ RCAR_GP_PIN(8, 5), RCAR_GP_PIN(8, 6), RCAR_GP_PIN(8, 7), RCAR_GP_PIN(8, 8), RCAR_GP_PIN(8, 9), RCAR_GP_PIN(8, 10), RCAR_GP_PIN(8, 11), RCAR_GP_PIN(8, 12), }, }; static const union vin_data vin0_data_mux = { .data24 = { /* B */ VI0_D0_B0_C0_MARK, VI0_D1_B1_C1_MARK, VI0_D2_B2_C2_MARK, VI0_D3_B3_C3_MARK, VI0_D4_B4_C4_MARK, VI0_D5_B5_C5_MARK, VI0_D6_B6_C6_MARK, VI0_D7_B7_C7_MARK, /* G */ VI0_D8_G0_Y0_MARK, VI0_D9_G1_Y1_MARK, VI0_D10_G2_Y2_MARK, VI0_D11_G3_Y3_MARK, VI0_D12_G4_Y4_MARK, VI0_D13_G5_Y5_MARK, VI0_D14_G6_Y6_MARK, VI0_D15_G7_Y7_MARK, /* R */ VI0_D16_R0_MARK, VI0_D17_R1_MARK, VI0_D18_R2_MARK, VI0_D19_R3_MARK, VI0_D20_R4_MARK, VI0_D21_R5_MARK, VI0_D22_R6_MARK, VI0_D23_R7_MARK, }, }; static const unsigned int vin0_data18_pins[] = { /* B */ RCAR_GP_PIN(4, 6), RCAR_GP_PIN(4, 7), RCAR_GP_PIN(4, 8), RCAR_GP_PIN(4, 9), RCAR_GP_PIN(4, 10), RCAR_GP_PIN(4, 11), /* G */ RCAR_GP_PIN(4, 14), RCAR_GP_PIN(4, 15), RCAR_GP_PIN(8, 1), RCAR_GP_PIN(8, 2), RCAR_GP_PIN(8, 3), RCAR_GP_PIN(8, 4), /* R */ RCAR_GP_PIN(8, 7), RCAR_GP_PIN(8, 8), RCAR_GP_PIN(8, 9), RCAR_GP_PIN(8, 10), RCAR_GP_PIN(8, 11), RCAR_GP_PIN(8, 12), }; static const unsigned int vin0_data18_mux[] = { /* B */ VI0_D2_B2_C2_MARK, VI0_D3_B3_C3_MARK, VI0_D4_B4_C4_MARK, VI0_D5_B5_C5_MARK, VI0_D6_B6_C6_MARK, VI0_D7_B7_C7_MARK, /* G */ VI0_D10_G2_Y2_MARK, VI0_D11_G3_Y3_MARK, VI0_D12_G4_Y4_MARK, VI0_D13_G5_Y5_MARK, VI0_D14_G6_Y6_MARK, VI0_D15_G7_Y7_MARK, /* R */ VI0_D18_R2_MARK, VI0_D19_R3_MARK, VI0_D20_R4_MARK, VI0_D21_R5_MARK, VI0_D22_R6_MARK, VI0_D23_R7_MARK, }; static const unsigned int vin0_sync_pins[] = { /* HSYNC#, VSYNC# */ RCAR_GP_PIN(4, 2), RCAR_GP_PIN(4, 3), }; static const unsigned int vin0_sync_mux[] = { VI0_HSYNC_N_MARK, VI0_VSYNC_N_MARK, }; static const unsigned int vin0_field_pins[] = { RCAR_GP_PIN(4, 16), }; static const unsigned int vin0_field_mux[] = { VI0_FIELD_MARK, }; static const unsigned int vin0_clkenb_pins[] = { RCAR_GP_PIN(4, 1), }; static const unsigned int vin0_clkenb_mux[] = { VI0_CLKENB_MARK, }; static const unsigned int vin0_clk_pins[] = { RCAR_GP_PIN(4, 0), }; static const unsigned int vin0_clk_mux[] = { VI0_CLK_MARK, }; /* - VIN1 ------------------------------------------------------------------- */ static const union vin_data vin1_data_pins = { .data24 = { /* B */ RCAR_GP_PIN(5, 4), RCAR_GP_PIN(5, 5), RCAR_GP_PIN(5, 6), RCAR_GP_PIN(5, 7), RCAR_GP_PIN(5, 8), RCAR_GP_PIN(5, 9), RCAR_GP_PIN(5, 10), RCAR_GP_PIN(5, 11), /* G */ RCAR_GP_PIN(5, 12), RCAR_GP_PIN(5, 13), RCAR_GP_PIN(5, 14), RCAR_GP_PIN(5, 15), RCAR_GP_PIN(8, 5), RCAR_GP_PIN(8, 6), RCAR_GP_PIN(8, 7), RCAR_GP_PIN(8, 8), /* R */ RCAR_GP_PIN(9, 5), RCAR_GP_PIN(9, 6), RCAR_GP_PIN(9, 7), RCAR_GP_PIN(9, 8), RCAR_GP_PIN(9, 9), RCAR_GP_PIN(9, 10), RCAR_GP_PIN(9, 11), RCAR_GP_PIN(9, 12), }, }; static const union vin_data vin1_data_mux = { .data24 = { /* B */ VI1_D0_B0_C0_MARK, VI1_D1_B1_C1_MARK, VI1_D2_B2_C2_MARK, VI1_D3_B3_C3_MARK, VI1_D4_B4_C4_MARK, VI1_D5_B5_C5_MARK, VI1_D6_B6_C6_MARK, VI1_D7_B7_C7_MARK, /* G */ VI1_D8_G0_Y0_MARK, VI1_D9_G1_Y1_MARK, VI1_D10_G2_Y2_MARK, VI1_D11_G3_Y3_MARK, VI1_D12_G4_Y4_MARK, VI1_D13_G5_Y5_MARK, VI1_D14_G6_Y6_MARK, VI1_D15_G7_Y7_MARK, /* R */ VI1_D16_R0_MARK, VI1_D17_R1_MARK, VI1_D18_R2_MARK, VI1_D19_R3_MARK, VI1_D20_R4_MARK, VI1_D21_R5_MARK, VI1_D22_R6_MARK, VI1_D23_R7_MARK, }, }; static const unsigned int vin1_data18_pins[] = { /* B */ RCAR_GP_PIN(5, 6), RCAR_GP_PIN(5, 7), RCAR_GP_PIN(5, 8), RCAR_GP_PIN(5, 9), RCAR_GP_PIN(5, 10), RCAR_GP_PIN(5, 11), /* G */ RCAR_GP_PIN(5, 14), RCAR_GP_PIN(5, 15), RCAR_GP_PIN(8, 5), RCAR_GP_PIN(8, 6), RCAR_GP_PIN(8, 7), RCAR_GP_PIN(8, 8), /* R */ RCAR_GP_PIN(9, 7), RCAR_GP_PIN(9, 8), RCAR_GP_PIN(9, 9), RCAR_GP_PIN(9, 10), RCAR_GP_PIN(9, 11), RCAR_GP_PIN(9, 12), }; static const unsigned int vin1_data18_mux[] = { /* B */ VI1_D2_B2_C2_MARK, VI1_D3_B3_C3_MARK, VI1_D4_B4_C4_MARK, VI1_D5_B5_C5_MARK, VI1_D6_B6_C6_MARK, VI1_D7_B7_C7_MARK, /* G */ VI1_D10_G2_Y2_MARK, VI1_D11_G3_Y3_MARK, VI1_D12_G4_Y4_MARK, VI1_D13_G5_Y5_MARK, VI1_D14_G6_Y6_MARK, VI1_D15_G7_Y7_MARK, /* R */ VI1_D18_R2_MARK, VI1_D19_R3_MARK, VI1_D20_R4_MARK, VI1_D21_R5_MARK, VI1_D22_R6_MARK, VI1_D23_R7_MARK, }; static const union vin_data vin1_data_b_pins = { .data24 = { /* B */ RCAR_GP_PIN(5, 4), RCAR_GP_PIN(5, 5), RCAR_GP_PIN(5, 6), RCAR_GP_PIN(5, 7), RCAR_GP_PIN(5, 8), RCAR_GP_PIN(5, 9), RCAR_GP_PIN(5, 10), RCAR_GP_PIN(5, 11), /* G */ RCAR_GP_PIN(5, 12), RCAR_GP_PIN(5, 13), RCAR_GP_PIN(5, 14), RCAR_GP_PIN(5, 15), RCAR_GP_PIN(9, 1), RCAR_GP_PIN(9, 2), RCAR_GP_PIN(9, 3), RCAR_GP_PIN(9, 4), /* R */ RCAR_GP_PIN(9, 5), RCAR_GP_PIN(9, 6), RCAR_GP_PIN(9, 7), RCAR_GP_PIN(9, 8), RCAR_GP_PIN(9, 9), RCAR_GP_PIN(9, 10), RCAR_GP_PIN(9, 11), RCAR_GP_PIN(9, 12), }, }; static const union vin_data vin1_data_b_mux = { .data24 = { /* B */ VI1_D0_B0_C0_MARK, VI1_D1_B1_C1_MARK, VI1_D2_B2_C2_MARK, VI1_D3_B3_C3_MARK, VI1_D4_B4_C4_MARK, VI1_D5_B5_C5_MARK, VI1_D6_B6_C6_MARK, VI1_D7_B7_C7_MARK, /* G */ VI1_D8_G0_Y0_MARK, VI1_D9_G1_Y1_MARK, VI1_D10_G2_Y2_MARK, VI1_D11_G3_Y3_MARK, VI1_D12_G4_Y4_B_MARK, VI1_D13_G5_Y5_B_MARK, VI1_D14_G6_Y6_B_MARK, VI1_D15_G7_Y7_B_MARK, /* R */ VI1_D16_R0_MARK, VI1_D17_R1_MARK, VI1_D18_R2_MARK, VI1_D19_R3_MARK, VI1_D20_R4_MARK, VI1_D21_R5_MARK, VI1_D22_R6_MARK, VI1_D23_R7_MARK, }, }; static const unsigned int vin1_data18_b_pins[] = { /* B */ RCAR_GP_PIN(5, 6), RCAR_GP_PIN(5, 7), RCAR_GP_PIN(5, 8), RCAR_GP_PIN(5, 9), RCAR_GP_PIN(5, 10), RCAR_GP_PIN(5, 11), /* G */ RCAR_GP_PIN(5, 14), RCAR_GP_PIN(5, 15), RCAR_GP_PIN(9, 1), RCAR_GP_PIN(9, 2), RCAR_GP_PIN(9, 3), RCAR_GP_PIN(9, 4), /* R */ RCAR_GP_PIN(9, 7), RCAR_GP_PIN(9, 8), RCAR_GP_PIN(9, 9), RCAR_GP_PIN(9, 10), RCAR_GP_PIN(9, 11), RCAR_GP_PIN(9, 12), }; static const unsigned int vin1_data18_b_mux[] = { /* B */ VI1_D2_B2_C2_MARK, VI1_D3_B3_C3_MARK, VI1_D4_B4_C4_MARK, VI1_D5_B5_C5_MARK, VI1_D6_B6_C6_MARK, VI1_D7_B7_C7_MARK, /* G */ VI1_D10_G2_Y2_MARK, VI1_D11_G3_Y3_MARK, VI1_D12_G4_Y4_B_MARK, VI1_D13_G5_Y5_B_MARK, VI1_D14_G6_Y6_B_MARK, VI1_D15_G7_Y7_B_MARK, /* R */ VI1_D18_R2_MARK, VI1_D19_R3_MARK, VI1_D20_R4_MARK, VI1_D21_R5_MARK, VI1_D22_R6_MARK, VI1_D23_R7_MARK, }; static const unsigned int vin1_sync_pins[] = { /* HSYNC#, VSYNC# */ RCAR_GP_PIN(5, 2), RCAR_GP_PIN(5, 3), }; static const unsigned int vin1_sync_mux[] = { VI1_HSYNC_N_MARK, VI1_VSYNC_N_MARK, }; static const unsigned int vin1_field_pins[] = { RCAR_GP_PIN(5, 16), }; static const unsigned int vin1_field_mux[] = { VI1_FIELD_MARK, }; static const unsigned int vin1_clkenb_pins[] = { RCAR_GP_PIN(5, 1), }; static const unsigned int vin1_clkenb_mux[] = { VI1_CLKENB_MARK, }; static const unsigned int vin1_clk_pins[] = { RCAR_GP_PIN(5, 0), }; static const unsigned int vin1_clk_mux[] = { VI1_CLK_MARK, }; /* - VIN2 ------------------------------------------------------------------- */ static const union vin_data vin2_data_pins = { .data16 = { RCAR_GP_PIN(6, 4), RCAR_GP_PIN(6, 5), RCAR_GP_PIN(6, 6), RCAR_GP_PIN(6, 7), RCAR_GP_PIN(6, 8), RCAR_GP_PIN(6, 9), RCAR_GP_PIN(6, 10), RCAR_GP_PIN(6, 11), RCAR_GP_PIN(6, 12), RCAR_GP_PIN(6, 13), RCAR_GP_PIN(6, 14), RCAR_GP_PIN(6, 15), RCAR_GP_PIN(8, 9), RCAR_GP_PIN(8, 10), RCAR_GP_PIN(8, 11), RCAR_GP_PIN(8, 12), }, }; static const union vin_data vin2_data_mux = { .data16 = { VI2_D0_C0_MARK, VI2_D1_C1_MARK, VI2_D2_C2_MARK, VI2_D3_C3_MARK, VI2_D4_C4_MARK, VI2_D5_C5_MARK, VI2_D6_C6_MARK, VI2_D7_C7_MARK, VI2_D8_Y0_MARK, VI2_D9_Y1_MARK, VI2_D10_Y2_MARK, VI2_D11_Y3_MARK, VI2_D12_Y4_MARK, VI2_D13_Y5_MARK, VI2_D14_Y6_MARK, VI2_D15_Y7_MARK, }, }; static const unsigned int vin2_sync_pins[] = { /* HSYNC#, VSYNC# */ RCAR_GP_PIN(6, 2), RCAR_GP_PIN(6, 3), }; static const unsigned int vin2_sync_mux[] = { VI2_HSYNC_N_MARK, VI2_VSYNC_N_MARK, }; static const unsigned int vin2_field_pins[] = { RCAR_GP_PIN(6, 16), }; static const unsigned int vin2_field_mux[] = { VI2_FIELD_MARK, }; static const unsigned int vin2_clkenb_pins[] = { RCAR_GP_PIN(6, 1), }; static const unsigned int vin2_clkenb_mux[] = { VI2_CLKENB_MARK, }; static const unsigned int vin2_clk_pins[] = { RCAR_GP_PIN(6, 0), }; static const unsigned int vin2_clk_mux[] = { VI2_CLK_MARK, }; /* - VIN3 ------------------------------------------------------------------- */ static const union vin_data vin3_data_pins = { .data16 = { RCAR_GP_PIN(7, 4), RCAR_GP_PIN(7, 5), RCAR_GP_PIN(7, 6), RCAR_GP_PIN(7, 7), RCAR_GP_PIN(7, 8), RCAR_GP_PIN(7, 9), RCAR_GP_PIN(7, 10), RCAR_GP_PIN(7, 11), RCAR_GP_PIN(7, 12), RCAR_GP_PIN(7, 13), RCAR_GP_PIN(7, 14), RCAR_GP_PIN(7, 15), RCAR_GP_PIN(8, 13), RCAR_GP_PIN(8, 14), RCAR_GP_PIN(8, 15), RCAR_GP_PIN(8, 16), }, }; static const union vin_data vin3_data_mux = { .data16 = { VI3_D0_C0_MARK, VI3_D1_C1_MARK, VI3_D2_C2_MARK, VI3_D3_C3_MARK, VI3_D4_C4_MARK, VI3_D5_C5_MARK, VI3_D6_C6_MARK, VI3_D7_C7_MARK, VI3_D8_Y0_MARK, VI3_D9_Y1_MARK, VI3_D10_Y2_MARK, VI3_D11_Y3_MARK, VI3_D12_Y4_MARK, VI3_D13_Y5_MARK, VI3_D14_Y6_MARK, VI3_D15_Y7_MARK, }, }; static const unsigned int vin3_sync_pins[] = { /* HSYNC#, VSYNC# */ RCAR_GP_PIN(7, 2), RCAR_GP_PIN(7, 3), }; static const unsigned int vin3_sync_mux[] = { VI3_HSYNC_N_MARK, VI3_VSYNC_N_MARK, }; static const unsigned int vin3_field_pins[] = { RCAR_GP_PIN(7, 16), }; static const unsigned int vin3_field_mux[] = { VI3_FIELD_MARK, }; static const unsigned int vin3_clkenb_pins[] = { RCAR_GP_PIN(7, 1), }; static const unsigned int vin3_clkenb_mux[] = { VI3_CLKENB_MARK, }; static const unsigned int vin3_clk_pins[] = { RCAR_GP_PIN(7, 0), }; static const unsigned int vin3_clk_mux[] = { VI3_CLK_MARK, }; /* - VIN4 ------------------------------------------------------------------- */ static const union vin_data vin4_data_pins = { .data12 = { RCAR_GP_PIN(8, 4), RCAR_GP_PIN(8, 5), RCAR_GP_PIN(8, 6), RCAR_GP_PIN(8, 7), RCAR_GP_PIN(8, 8), RCAR_GP_PIN(8, 9), RCAR_GP_PIN(8, 10), RCAR_GP_PIN(8, 11), RCAR_GP_PIN(8, 12), RCAR_GP_PIN(8, 13), RCAR_GP_PIN(8, 14), RCAR_GP_PIN(8, 15), }, }; static const union vin_data vin4_data_mux = { .data12 = { VI4_D0_C0_MARK, VI4_D1_C1_MARK, VI4_D2_C2_MARK, VI4_D3_C3_MARK, VI4_D4_C4_MARK, VI4_D5_C5_MARK, VI4_D6_C6_MARK, VI4_D7_C7_MARK, VI4_D8_Y0_MARK, VI4_D9_Y1_MARK, VI4_D10_Y2_MARK, VI4_D11_Y3_MARK, }, }; static const unsigned int vin4_sync_pins[] = { /* HSYNC#, VSYNC# */ RCAR_GP_PIN(8, 2), RCAR_GP_PIN(8, 3), }; static const unsigned int vin4_sync_mux[] = { VI4_HSYNC_N_MARK, VI4_VSYNC_N_MARK, }; static const unsigned int vin4_field_pins[] = { RCAR_GP_PIN(8, 16), }; static const unsigned int vin4_field_mux[] = { VI4_FIELD_MARK, }; static const unsigned int vin4_clkenb_pins[] = { RCAR_GP_PIN(8, 1), }; static const unsigned int vin4_clkenb_mux[] = { VI4_CLKENB_MARK, }; static const unsigned int vin4_clk_pins[] = { RCAR_GP_PIN(8, 0), }; static const unsigned int vin4_clk_mux[] = { VI4_CLK_MARK, }; /* - VIN5 ------------------------------------------------------------------- */ static const union vin_data vin5_data_pins = { .data12 = { RCAR_GP_PIN(9, 4), RCAR_GP_PIN(9, 5), RCAR_GP_PIN(9, 6), RCAR_GP_PIN(9, 7), RCAR_GP_PIN(9, 8), RCAR_GP_PIN(9, 9), RCAR_GP_PIN(9, 10), RCAR_GP_PIN(9, 11), RCAR_GP_PIN(9, 12), RCAR_GP_PIN(9, 13), RCAR_GP_PIN(9, 14), RCAR_GP_PIN(9, 15), }, }; static const union vin_data vin5_data_mux = { .data12 = { VI5_D0_C0_MARK, VI5_D1_C1_MARK, VI5_D2_C2_MARK, VI5_D3_C3_MARK, VI5_D4_C4_MARK, VI5_D5_C5_MARK, VI5_D6_C6_MARK, VI5_D7_C7_MARK, VI5_D8_Y0_MARK, VI5_D9_Y1_MARK, VI5_D10_Y2_MARK, VI5_D11_Y3_MARK, }, }; static const unsigned int vin5_sync_pins[] = { /* HSYNC#, VSYNC# */ RCAR_GP_PIN(9, 2), RCAR_GP_PIN(9, 3), }; static const unsigned int vin5_sync_mux[] = { VI5_HSYNC_N_MARK, VI5_VSYNC_N_MARK, }; static const unsigned int vin5_field_pins[] = { RCAR_GP_PIN(9, 16), }; static const unsigned int vin5_field_mux[] = { VI5_FIELD_MARK, }; static const unsigned int vin5_clkenb_pins[] = { RCAR_GP_PIN(9, 1), }; static const unsigned int vin5_clkenb_mux[] = { VI5_CLKENB_MARK, }; static const unsigned int vin5_clk_pins[] = { RCAR_GP_PIN(9, 0), }; static const unsigned int vin5_clk_mux[] = { VI5_CLK_MARK, }; static const struct sh_pfc_pin_group pinmux_groups[] = { SH_PFC_PIN_GROUP(avb_link), SH_PFC_PIN_GROUP(avb_magic), SH_PFC_PIN_GROUP(avb_phy_int), SH_PFC_PIN_GROUP(avb_mdio), SH_PFC_PIN_GROUP(avb_mii), SH_PFC_PIN_GROUP(avb_gmii), SH_PFC_PIN_GROUP(avb_avtp_match), SH_PFC_PIN_GROUP(can0_data), SH_PFC_PIN_GROUP(can1_data), SH_PFC_PIN_GROUP(can_clk), SH_PFC_PIN_GROUP(du0_rgb666), SH_PFC_PIN_GROUP(du0_rgb888), SH_PFC_PIN_GROUP(du0_sync), SH_PFC_PIN_GROUP(du0_oddf), SH_PFC_PIN_GROUP(du0_disp), SH_PFC_PIN_GROUP(du0_cde), SH_PFC_PIN_GROUP(du1_rgb666), SH_PFC_PIN_GROUP(du1_sync), SH_PFC_PIN_GROUP(du1_oddf), SH_PFC_PIN_GROUP(du1_disp), SH_PFC_PIN_GROUP(du1_cde), SH_PFC_PIN_GROUP(intc_irq0), SH_PFC_PIN_GROUP(intc_irq1), SH_PFC_PIN_GROUP(intc_irq2), SH_PFC_PIN_GROUP(intc_irq3), SH_PFC_PIN_GROUP(lbsc_cs0), SH_PFC_PIN_GROUP(lbsc_cs1), SH_PFC_PIN_GROUP(lbsc_ex_cs0), SH_PFC_PIN_GROUP(lbsc_ex_cs1), SH_PFC_PIN_GROUP(lbsc_ex_cs2), SH_PFC_PIN_GROUP(lbsc_ex_cs3), SH_PFC_PIN_GROUP(lbsc_ex_cs4), SH_PFC_PIN_GROUP(lbsc_ex_cs5), SH_PFC_PIN_GROUP(msiof0_clk), SH_PFC_PIN_GROUP(msiof0_sync), SH_PFC_PIN_GROUP(msiof0_rx), SH_PFC_PIN_GROUP(msiof0_tx), SH_PFC_PIN_GROUP(msiof1_clk), SH_PFC_PIN_GROUP(msiof1_sync), SH_PFC_PIN_GROUP(msiof1_rx), SH_PFC_PIN_GROUP(msiof1_tx), SH_PFC_PIN_GROUP(qspi_ctrl), SH_PFC_PIN_GROUP(qspi_data2), SH_PFC_PIN_GROUP(qspi_data4), SH_PFC_PIN_GROUP(scif0_data), SH_PFC_PIN_GROUP(scif0_clk), SH_PFC_PIN_GROUP(scif0_ctrl), SH_PFC_PIN_GROUP(scif1_data), SH_PFC_PIN_GROUP(scif1_clk), SH_PFC_PIN_GROUP(scif1_ctrl), SH_PFC_PIN_GROUP(scif2_data), SH_PFC_PIN_GROUP(scif2_clk), SH_PFC_PIN_GROUP(scif3_data), SH_PFC_PIN_GROUP(scif3_clk), SH_PFC_PIN_GROUP(sdhi0_data1), SH_PFC_PIN_GROUP(sdhi0_data4), SH_PFC_PIN_GROUP(sdhi0_ctrl), SH_PFC_PIN_GROUP(sdhi0_cd), SH_PFC_PIN_GROUP(sdhi0_wp), VIN_DATA_PIN_GROUP(vin0_data, 24), VIN_DATA_PIN_GROUP(vin0_data, 20), SH_PFC_PIN_GROUP(vin0_data18), VIN_DATA_PIN_GROUP(vin0_data, 16), VIN_DATA_PIN_GROUP(vin0_data, 12), VIN_DATA_PIN_GROUP(vin0_data, 10), VIN_DATA_PIN_GROUP(vin0_data, 8), SH_PFC_PIN_GROUP(vin0_sync), SH_PFC_PIN_GROUP(vin0_field), SH_PFC_PIN_GROUP(vin0_clkenb), SH_PFC_PIN_GROUP(vin0_clk), VIN_DATA_PIN_GROUP(vin1_data, 24), VIN_DATA_PIN_GROUP(vin1_data, 20), SH_PFC_PIN_GROUP(vin1_data18), VIN_DATA_PIN_GROUP(vin1_data, 16), VIN_DATA_PIN_GROUP(vin1_data, 12), VIN_DATA_PIN_GROUP(vin1_data, 10), VIN_DATA_PIN_GROUP(vin1_data, 8), VIN_DATA_PIN_GROUP(vin1_data_b, 24), VIN_DATA_PIN_GROUP(vin1_data_b, 20), SH_PFC_PIN_GROUP(vin1_data18_b), VIN_DATA_PIN_GROUP(vin1_data_b, 16), SH_PFC_PIN_GROUP(vin1_sync), SH_PFC_PIN_GROUP(vin1_field), SH_PFC_PIN_GROUP(vin1_clkenb), SH_PFC_PIN_GROUP(vin1_clk), VIN_DATA_PIN_GROUP(vin2_data, 16), VIN_DATA_PIN_GROUP(vin2_data, 12), VIN_DATA_PIN_GROUP(vin2_data, 10), VIN_DATA_PIN_GROUP(vin2_data, 8), SH_PFC_PIN_GROUP(vin2_sync), SH_PFC_PIN_GROUP(vin2_field), SH_PFC_PIN_GROUP(vin2_clkenb), SH_PFC_PIN_GROUP(vin2_clk), VIN_DATA_PIN_GROUP(vin3_data, 16), VIN_DATA_PIN_GROUP(vin3_data, 12), VIN_DATA_PIN_GROUP(vin3_data, 10), VIN_DATA_PIN_GROUP(vin3_data, 8), SH_PFC_PIN_GROUP(vin3_sync), SH_PFC_PIN_GROUP(vin3_field), SH_PFC_PIN_GROUP(vin3_clkenb), SH_PFC_PIN_GROUP(vin3_clk), VIN_DATA_PIN_GROUP(vin4_data, 12), VIN_DATA_PIN_GROUP(vin4_data, 10), VIN_DATA_PIN_GROUP(vin4_data, 8), SH_PFC_PIN_GROUP(vin4_sync), SH_PFC_PIN_GROUP(vin4_field), SH_PFC_PIN_GROUP(vin4_clkenb), SH_PFC_PIN_GROUP(vin4_clk), VIN_DATA_PIN_GROUP(vin5_data, 12), VIN_DATA_PIN_GROUP(vin5_data, 10), VIN_DATA_PIN_GROUP(vin5_data, 8), SH_PFC_PIN_GROUP(vin5_sync), SH_PFC_PIN_GROUP(vin5_field), SH_PFC_PIN_GROUP(vin5_clkenb), SH_PFC_PIN_GROUP(vin5_clk), }; static const char * const avb_groups[] = { "avb_link", "avb_magic", "avb_phy_int", "avb_mdio", "avb_mii", "avb_gmii", "avb_avtp_match", }; static const char * const can0_groups[] = { "can0_data", "can_clk", }; static const char * const can1_groups[] = { "can1_data", "can_clk", }; static const char * const du0_groups[] = { "du0_rgb666", "du0_rgb888", "du0_sync", "du0_oddf", "du0_disp", "du0_cde", }; static const char * const du1_groups[] = { "du1_rgb666", "du1_sync", "du1_oddf", "du1_disp", "du1_cde", }; static const char * const intc_groups[] = { "intc_irq0", "intc_irq1", "intc_irq2", "intc_irq3", }; static const char * const lbsc_groups[] = { "lbsc_cs0", "lbsc_cs1", "lbsc_ex_cs0", "lbsc_ex_cs1", "lbsc_ex_cs2", "lbsc_ex_cs3", "lbsc_ex_cs4", "lbsc_ex_cs5", }; static const char * const msiof0_groups[] = { "msiof0_clk", "msiof0_sync", "msiof0_rx", "msiof0_tx", }; static const char * const msiof1_groups[] = { "msiof1_clk", "msiof1_sync", "msiof1_rx", "msiof1_tx", }; static const char * const qspi_groups[] = { "qspi_ctrl", "qspi_data2", "qspi_data4", }; static const char * const scif0_groups[] = { "scif0_data", "scif0_clk", "scif0_ctrl", }; static const char * const scif1_groups[] = { "scif1_data", "scif1_clk", "scif1_ctrl", }; static const char * const scif2_groups[] = { "scif2_data", "scif2_clk", }; static const char * const scif3_groups[] = { "scif3_data", "scif3_clk", }; static const char * const sdhi0_groups[] = { "sdhi0_data1", "sdhi0_data4", "sdhi0_ctrl", "sdhi0_cd", "sdhi0_wp", }; static const char * const vin0_groups[] = { "vin0_data24", "vin0_data20", "vin0_data18", "vin0_data16", "vin0_data12", "vin0_data10", "vin0_data8", "vin0_sync", "vin0_field", "vin0_clkenb", "vin0_clk", }; static const char * const vin1_groups[] = { "vin1_data24", "vin1_data20", "vin1_data18", "vin1_data16", "vin1_data12", "vin1_data10", "vin1_data8", "vin1_data24_b", "vin1_data20_b", "vin1_data16_b", "vin1_sync", "vin1_field", "vin1_clkenb", "vin1_clk", }; static const char * const vin2_groups[] = { "vin2_data16", "vin2_data12", "vin2_data10", "vin2_data8", "vin2_sync", "vin2_field", "vin2_clkenb", "vin2_clk", }; static const char * const vin3_groups[] = { "vin3_data16", "vin3_data12", "vin3_data10", "vin3_data8", "vin3_sync", "vin3_field", "vin3_clkenb", "vin3_clk", }; static const char * const vin4_groups[] = { "vin4_data12", "vin4_data10", "vin4_data8", "vin4_sync", "vin4_field", "vin4_clkenb", "vin4_clk", }; static const char * const vin5_groups[] = { "vin5_data12", "vin5_data10", "vin5_data8", "vin5_sync", "vin5_field", "vin5_clkenb", "vin5_clk", }; static const struct sh_pfc_function pinmux_functions[] = { SH_PFC_FUNCTION(avb), SH_PFC_FUNCTION(can0), SH_PFC_FUNCTION(can1), SH_PFC_FUNCTION(du0), SH_PFC_FUNCTION(du1), SH_PFC_FUNCTION(intc), SH_PFC_FUNCTION(lbsc), SH_PFC_FUNCTION(msiof0), SH_PFC_FUNCTION(msiof1), SH_PFC_FUNCTION(qspi), SH_PFC_FUNCTION(scif0), SH_PFC_FUNCTION(scif1), SH_PFC_FUNCTION(scif2), SH_PFC_FUNCTION(scif3), SH_PFC_FUNCTION(sdhi0), SH_PFC_FUNCTION(vin0), SH_PFC_FUNCTION(vin1), SH_PFC_FUNCTION(vin2), SH_PFC_FUNCTION(vin3), SH_PFC_FUNCTION(vin4), SH_PFC_FUNCTION(vin5), }; static const struct pinmux_cfg_reg pinmux_config_regs[] = { { PINMUX_CFG_REG("GPSR0", 0xE6060004, 32, 1) { 0, 0, 0, 0, 0, 0, GP_0_28_FN, FN_IP1_4, GP_0_27_FN, FN_IP1_3, GP_0_26_FN, FN_IP1_2, GP_0_25_FN, FN_IP1_1, GP_0_24_FN, FN_IP1_0, GP_0_23_FN, FN_IP0_23, GP_0_22_FN, FN_IP0_22, GP_0_21_FN, FN_IP0_21, GP_0_20_FN, FN_IP0_20, GP_0_19_FN, FN_IP0_19, GP_0_18_FN, FN_IP0_18, GP_0_17_FN, FN_IP0_17, GP_0_16_FN, FN_IP0_16, GP_0_15_FN, FN_IP0_15, GP_0_14_FN, FN_IP0_14, GP_0_13_FN, FN_IP0_13, GP_0_12_FN, FN_IP0_12, GP_0_11_FN, FN_IP0_11, GP_0_10_FN, FN_IP0_10, GP_0_9_FN, FN_IP0_9, GP_0_8_FN, FN_IP0_8, GP_0_7_FN, FN_IP0_7, GP_0_6_FN, FN_IP0_6, GP_0_5_FN, FN_IP0_5, GP_0_4_FN, FN_IP0_4, GP_0_3_FN, FN_IP0_3, GP_0_2_FN, FN_IP0_2, GP_0_1_FN, FN_IP0_1, GP_0_0_FN, FN_IP0_0 } }, { PINMUX_CFG_REG("GPSR1", 0xE6060008, 32, 1) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, GP_1_22_FN, FN_DU1_CDE, GP_1_21_FN, FN_DU1_DISP, GP_1_20_FN, FN_DU1_EXODDF_DU1_ODDF_DISP_CDE, GP_1_19_FN, FN_DU1_EXVSYNC_DU1_VSYNC, GP_1_18_FN, FN_DU1_EXHSYNC_DU1_HSYNC, GP_1_17_FN, FN_DU1_DB7_C5, GP_1_16_FN, FN_DU1_DB6_C4, GP_1_15_FN, FN_DU1_DB5_C3_DATA15, GP_1_14_FN, FN_DU1_DB4_C2_DATA14, GP_1_13_FN, FN_DU1_DB3_C1_DATA13, GP_1_12_FN, FN_DU1_DB2_C0_DATA12, GP_1_11_FN, FN_IP1_16, GP_1_10_FN, FN_IP1_15, GP_1_9_FN, FN_IP1_14, GP_1_8_FN, FN_IP1_13, GP_1_7_FN, FN_IP1_12, GP_1_6_FN, FN_IP1_11, GP_1_5_FN, FN_IP1_10, GP_1_4_FN, FN_IP1_9, GP_1_3_FN, FN_IP1_8, GP_1_2_FN, FN_IP1_7, GP_1_1_FN, FN_IP1_6, GP_1_0_FN, FN_IP1_5, } }, { PINMUX_CFG_REG("GPSR2", 0xE606000C, 32, 1) { GP_2_31_FN, FN_A15, GP_2_30_FN, FN_A14, GP_2_29_FN, FN_A13, GP_2_28_FN, FN_A12, GP_2_27_FN, FN_A11, GP_2_26_FN, FN_A10, GP_2_25_FN, FN_A9, GP_2_24_FN, FN_A8, GP_2_23_FN, FN_A7, GP_2_22_FN, FN_A6, GP_2_21_FN, FN_A5, GP_2_20_FN, FN_A4, GP_2_19_FN, FN_A3, GP_2_18_FN, FN_A2, GP_2_17_FN, FN_A1, GP_2_16_FN, FN_A0, GP_2_15_FN, FN_D15, GP_2_14_FN, FN_D14, GP_2_13_FN, FN_D13, GP_2_12_FN, FN_D12, GP_2_11_FN, FN_D11, GP_2_10_FN, FN_D10, GP_2_9_FN, FN_D9, GP_2_8_FN, FN_D8, GP_2_7_FN, FN_D7, GP_2_6_FN, FN_D6, GP_2_5_FN, FN_D5, GP_2_4_FN, FN_D4, GP_2_3_FN, FN_D3, GP_2_2_FN, FN_D2, GP_2_1_FN, FN_D1, GP_2_0_FN, FN_D0 } }, { PINMUX_CFG_REG("GPSR3", 0xE6060010, 32, 1) { 0, 0, 0, 0, 0, 0, 0, 0, GP_3_27_FN, FN_CS0_N, GP_3_26_FN, FN_IP1_22, GP_3_25_FN, FN_IP1_21, GP_3_24_FN, FN_IP1_20, GP_3_23_FN, FN_IP1_19, GP_3_22_FN, FN_IRQ3, GP_3_21_FN, FN_IRQ2, GP_3_20_FN, FN_IRQ1, GP_3_19_FN, FN_IRQ0, GP_3_18_FN, FN_EX_WAIT0, GP_3_17_FN, FN_WE1_N, GP_3_16_FN, FN_WE0_N, GP_3_15_FN, FN_RD_WR_N, GP_3_14_FN, FN_RD_N, GP_3_13_FN, FN_BS_N, GP_3_12_FN, FN_EX_CS5_N, GP_3_11_FN, FN_EX_CS4_N, GP_3_10_FN, FN_EX_CS3_N, GP_3_9_FN, FN_EX_CS2_N, GP_3_8_FN, FN_EX_CS1_N, GP_3_7_FN, FN_EX_CS0_N, GP_3_6_FN, FN_CS1_N_A26, GP_3_5_FN, FN_IP1_18, GP_3_4_FN, FN_IP1_17, GP_3_3_FN, FN_A19, GP_3_2_FN, FN_A18, GP_3_1_FN, FN_A17, GP_3_0_FN, FN_A16 } }, { PINMUX_CFG_REG("GPSR4", 0xE6060014, 32, 1) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, GP_4_16_FN, FN_VI0_FIELD, GP_4_15_FN, FN_VI0_D11_G3_Y3, GP_4_14_FN, FN_VI0_D10_G2_Y2, GP_4_13_FN, FN_VI0_D9_G1_Y1, GP_4_12_FN, FN_VI0_D8_G0_Y0, GP_4_11_FN, FN_VI0_D7_B7_C7, GP_4_10_FN, FN_VI0_D6_B6_C6, GP_4_9_FN, FN_VI0_D5_B5_C5, GP_4_8_FN, FN_VI0_D4_B4_C4, GP_4_7_FN, FN_VI0_D3_B3_C3, GP_4_6_FN, FN_VI0_D2_B2_C2, GP_4_5_FN, FN_VI0_D1_B1_C1, GP_4_4_FN, FN_VI0_D0_B0_C0, GP_4_3_FN, FN_VI0_VSYNC_N, GP_4_2_FN, FN_VI0_HSYNC_N, GP_4_1_FN, FN_VI0_CLKENB, GP_4_0_FN, FN_VI0_CLK } }, { PINMUX_CFG_REG("GPSR5", 0xE6060018, 32, 1) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, GP_5_16_FN, FN_VI1_FIELD, GP_5_15_FN, FN_VI1_D11_G3_Y3, GP_5_14_FN, FN_VI1_D10_G2_Y2, GP_5_13_FN, FN_VI1_D9_G1_Y1, GP_5_12_FN, FN_VI1_D8_G0_Y0, GP_5_11_FN, FN_VI1_D7_B7_C7, GP_5_10_FN, FN_VI1_D6_B6_C6, GP_5_9_FN, FN_VI1_D5_B5_C5, GP_5_8_FN, FN_VI1_D4_B4_C4, GP_5_7_FN, FN_VI1_D3_B3_C3, GP_5_6_FN, FN_VI1_D2_B2_C2, GP_5_5_FN, FN_VI1_D1_B1_C1, GP_5_4_FN, FN_VI1_D0_B0_C0, GP_5_3_FN, FN_VI1_VSYNC_N, GP_5_2_FN, FN_VI1_HSYNC_N, GP_5_1_FN, FN_VI1_CLKENB, GP_5_0_FN, FN_VI1_CLK } }, { PINMUX_CFG_REG("GPSR6", 0xE606001C, 32, 1) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, GP_6_16_FN, FN_IP2_16, GP_6_15_FN, FN_IP2_15, GP_6_14_FN, FN_IP2_14, GP_6_13_FN, FN_IP2_13, GP_6_12_FN, FN_IP2_12, GP_6_11_FN, FN_IP2_11, GP_6_10_FN, FN_IP2_10, GP_6_9_FN, FN_IP2_9, GP_6_8_FN, FN_IP2_8, GP_6_7_FN, FN_IP2_7, GP_6_6_FN, FN_IP2_6, GP_6_5_FN, FN_IP2_5, GP_6_4_FN, FN_IP2_4, GP_6_3_FN, FN_IP2_3, GP_6_2_FN, FN_IP2_2, GP_6_1_FN, FN_IP2_1, GP_6_0_FN, FN_IP2_0 } }, { PINMUX_CFG_REG("GPSR7", 0xE6060020, 32, 1) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, GP_7_16_FN, FN_VI3_FIELD, GP_7_15_FN, FN_IP3_14, GP_7_14_FN, FN_VI3_D10_Y2, GP_7_13_FN, FN_IP3_13, GP_7_12_FN, FN_IP3_12, GP_7_11_FN, FN_IP3_11, GP_7_10_FN, FN_IP3_10, GP_7_9_FN, FN_IP3_9, GP_7_8_FN, FN_IP3_8, GP_7_7_FN, FN_IP3_7, GP_7_6_FN, FN_IP3_6, GP_7_5_FN, FN_IP3_5, GP_7_4_FN, FN_IP3_4, GP_7_3_FN, FN_IP3_3, GP_7_2_FN, FN_IP3_2, GP_7_1_FN, FN_IP3_1, GP_7_0_FN, FN_IP3_0 } }, { PINMUX_CFG_REG("GPSR8", 0xE6060024, 32, 1) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, GP_8_16_FN, FN_IP4_24, GP_8_15_FN, FN_IP4_23, GP_8_14_FN, FN_IP4_22, GP_8_13_FN, FN_IP4_21, GP_8_12_FN, FN_IP4_20_19, GP_8_11_FN, FN_IP4_18_17, GP_8_10_FN, FN_IP4_16_15, GP_8_9_FN, FN_IP4_14_13, GP_8_8_FN, FN_IP4_12_11, GP_8_7_FN, FN_IP4_10_9, GP_8_6_FN, FN_IP4_8_7, GP_8_5_FN, FN_IP4_6_5, GP_8_4_FN, FN_IP4_4, GP_8_3_FN, FN_IP4_3_2, GP_8_2_FN, FN_IP4_1, GP_8_1_FN, FN_IP4_0, GP_8_0_FN, FN_VI4_CLK } }, { PINMUX_CFG_REG("GPSR9", 0xE6060028, 32, 1) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, GP_9_16_FN, FN_VI5_FIELD, GP_9_15_FN, FN_VI5_D11_Y3, GP_9_14_FN, FN_VI5_D10_Y2, GP_9_13_FN, FN_VI5_D9_Y1, GP_9_12_FN, FN_IP5_11, GP_9_11_FN, FN_IP5_10, GP_9_10_FN, FN_IP5_9, GP_9_9_FN, FN_IP5_8, GP_9_8_FN, FN_IP5_7, GP_9_7_FN, FN_IP5_6, GP_9_6_FN, FN_IP5_5, GP_9_5_FN, FN_IP5_4, GP_9_4_FN, FN_IP5_3, GP_9_3_FN, FN_IP5_2, GP_9_2_FN, FN_IP5_1, GP_9_1_FN, FN_IP5_0, GP_9_0_FN, FN_VI5_CLK } }, { PINMUX_CFG_REG("GPSR10", 0xE606002C, 32, 1) { GP_10_31_FN, FN_CAN1_RX, GP_10_30_FN, FN_CAN1_TX, GP_10_29_FN, FN_CAN_CLK, GP_10_28_FN, FN_CAN0_RX, GP_10_27_FN, FN_CAN0_TX, GP_10_26_FN, FN_SCIF_CLK, GP_10_25_FN, FN_IP6_18_17, GP_10_24_FN, FN_IP6_16, GP_10_23_FN, FN_IP6_15_14, GP_10_22_FN, FN_IP6_13_12, GP_10_21_FN, FN_IP6_11_10, GP_10_20_FN, FN_IP6_9_8, GP_10_19_FN, FN_RX1, GP_10_18_FN, FN_TX1, GP_10_17_FN, FN_RTS1_N, GP_10_16_FN, FN_CTS1_N, GP_10_15_FN, FN_SCK1, GP_10_14_FN, FN_RX0, GP_10_13_FN, FN_TX0, GP_10_12_FN, FN_RTS0_N, GP_10_11_FN, FN_CTS0_N, GP_10_10_FN, FN_SCK0, GP_10_9_FN, FN_IP6_7, GP_10_8_FN, FN_IP6_6, GP_10_7_FN, FN_HCTS1_N, GP_10_6_FN, FN_IP6_5, GP_10_5_FN, FN_IP6_4, GP_10_4_FN, FN_IP6_3, GP_10_3_FN, FN_IP6_2, GP_10_2_FN, FN_HRTS0_N, GP_10_1_FN, FN_IP6_1, GP_10_0_FN, FN_IP6_0 } }, { PINMUX_CFG_REG("GPSR11", 0xE6060030, 32, 1) { 0, 0, 0, 0, GP_11_29_FN, FN_AVS2, GP_11_28_FN, FN_AVS1, GP_11_27_FN, FN_ADICHS2, GP_11_26_FN, FN_ADICHS1, GP_11_25_FN, FN_ADICHS0, GP_11_24_FN, FN_ADIDATA, GP_11_23_FN, FN_ADICS_SAMP, GP_11_22_FN, FN_ADICLK, GP_11_21_FN, FN_IP7_20, GP_11_20_FN, FN_IP7_19, GP_11_19_FN, FN_IP7_18, GP_11_18_FN, FN_IP7_17, GP_11_17_FN, FN_IP7_16, GP_11_16_FN, FN_IP7_15_14, GP_11_15_FN, FN_IP7_13_12, GP_11_14_FN, FN_IP7_11_10, GP_11_13_FN, FN_IP7_9_8, GP_11_12_FN, FN_SD0_WP, GP_11_11_FN, FN_SD0_CD, GP_11_10_FN, FN_SD0_DAT3, GP_11_9_FN, FN_SD0_DAT2, GP_11_8_FN, FN_SD0_DAT1, GP_11_7_FN, FN_SD0_DAT0, GP_11_6_FN, FN_SD0_CMD, GP_11_5_FN, FN_SD0_CLK, GP_11_4_FN, FN_IP7_7, GP_11_3_FN, FN_IP7_6, GP_11_2_FN, FN_IP7_5_4, GP_11_1_FN, FN_IP7_3_2, GP_11_0_FN, FN_IP7_1_0 } }, { PINMUX_CFG_REG_VAR("IPSR0", 0xE6060040, 32, 4, 4, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) { /* IP0_31_28 [4] */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* IP0_27_24 [4] */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* IP0_23 [1] */ FN_DU0_DB7_C5, 0, /* IP0_22 [1] */ FN_DU0_DB6_C4, 0, /* IP0_21 [1] */ FN_DU0_DB5_C3, 0, /* IP0_20 [1] */ FN_DU0_DB4_C2, 0, /* IP0_19 [1] */ FN_DU0_DB3_C1, 0, /* IP0_18 [1] */ FN_DU0_DB2_C0, 0, /* IP0_17 [1] */ FN_DU0_DB1, 0, /* IP0_16 [1] */ FN_DU0_DB0, 0, /* IP0_15 [1] */ FN_DU0_DG7_Y3_DATA15, 0, /* IP0_14 [1] */ FN_DU0_DG6_Y2_DATA14, 0, /* IP0_13 [1] */ FN_DU0_DG5_Y1_DATA13, 0, /* IP0_12 [1] */ FN_DU0_DG4_Y0_DATA12, 0, /* IP0_11 [1] */ FN_DU0_DG3_C7_DATA11, 0, /* IP0_10 [1] */ FN_DU0_DG2_C6_DATA10, 0, /* IP0_9 [1] */ FN_DU0_DG1_DATA9, 0, /* IP0_8 [1] */ FN_DU0_DG0_DATA8, 0, /* IP0_7 [1] */ FN_DU0_DR7_Y9_DATA7, 0, /* IP0_6 [1] */ FN_DU0_DR6_Y8_DATA6, 0, /* IP0_5 [1] */ FN_DU0_DR5_Y7_DATA5, 0, /* IP0_4 [1] */ FN_DU0_DR4_Y6_DATA4, 0, /* IP0_3 [1] */ FN_DU0_DR3_Y5_DATA3, 0, /* IP0_2 [1] */ FN_DU0_DR2_Y4_DATA2, 0, /* IP0_1 [1] */ FN_DU0_DR1_DATA1, 0, /* IP0_0 [1] */ FN_DU0_DR0_DATA0, 0 } }, { PINMUX_CFG_REG_VAR("IPSR1", 0xE6060044, 32, 4, 4, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) { /* IP1_31_28 [4] */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* IP1_27_24 [4] */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* IP1_23 [1] */ 0, 0, /* IP1_22 [1] */ FN_A25, FN_SSL, /* IP1_21 [1] */ FN_A24, FN_SPCLK, /* IP1_20 [1] */ FN_A23, FN_IO3, /* IP1_19 [1] */ FN_A22, FN_IO2, /* IP1_18 [1] */ FN_A21, FN_MISO_IO1, /* IP1_17 [1] */ FN_A20, FN_MOSI_IO0, /* IP1_16 [1] */ FN_DU1_DG7_Y3_DATA11, 0, /* IP1_15 [1] */ FN_DU1_DG6_Y2_DATA10, 0, /* IP1_14 [1] */ FN_DU1_DG5_Y1_DATA9, 0, /* IP1_13 [1] */ FN_DU1_DG4_Y0_DATA8, 0, /* IP1_12 [1] */ FN_DU1_DG3_C7_DATA7, 0, /* IP1_11 [1] */ FN_DU1_DG2_C6_DATA6, 0, /* IP1_10 [1] */ FN_DU1_DR7_DATA5, 0, /* IP1_9 [1] */ FN_DU1_DR6_DATA4, 0, /* IP1_8 [1] */ FN_DU1_DR5_Y7_DATA3, 0, /* IP1_7 [1] */ FN_DU1_DR4_Y6_DATA2, 0, /* IP1_6 [1] */ FN_DU1_DR3_Y5_DATA1, 0, /* IP1_5 [1] */ FN_DU1_DR2_Y4_DATA0, 0, /* IP1_4 [1] */ FN_DU0_CDE, 0, /* IP1_3 [1] */ FN_DU0_DISP, 0, /* IP1_2 [1] */ FN_DU0_EXODDF_DU0_ODDF_DISP_CDE, 0, /* IP1_1 [1] */ FN_DU0_EXVSYNC_DU0_VSYNC, 0, /* IP1_0 [1] */ FN_DU0_EXHSYNC_DU0_HSYNC, 0 } }, { PINMUX_CFG_REG_VAR("IPSR2", 0xE6060048, 32, 4, 4, 4, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) { /* IP2_31_28 [4] */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* IP2_27_24 [4] */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* IP2_23_20 [4] */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* IP2_19_17 [3] */ 0, 0, 0, 0, 0, 0, 0, 0, /* IP2_16 [1] */ FN_VI2_FIELD, FN_AVB_TXD2, /* IP2_15 [1] */ FN_VI2_D11_Y3, FN_AVB_TXD1, /* IP2_14 [1] */ FN_VI2_D10_Y2, FN_AVB_TXD0, /* IP2_13 [1] */ FN_VI2_D9_Y1, FN_AVB_TX_EN, /* IP2_12 [1] */ FN_VI2_D8_Y0, FN_AVB_TXD3, /* IP2_11 [1] */ FN_VI2_D7_C7, FN_AVB_COL, /* IP2_10 [1] */ FN_VI2_D6_C6, FN_AVB_RX_ER, /* IP2_9 [1] */ FN_VI2_D5_C5, FN_AVB_RXD7, /* IP2_8 [1] */ FN_VI2_D4_C4, FN_AVB_RXD6, /* IP2_7 [1] */ FN_VI2_D3_C3, FN_AVB_RXD5, /* IP2_6 [1] */ FN_VI2_D2_C2, FN_AVB_RXD4, /* IP2_5 [1] */ FN_VI2_D1_C1, FN_AVB_RXD3, /* IP2_4 [1] */ FN_VI2_D0_C0, FN_AVB_RXD2, /* IP2_3 [1] */ FN_VI2_VSYNC_N, FN_AVB_RXD1, /* IP2_2 [1] */ FN_VI2_HSYNC_N, FN_AVB_RXD0, /* IP2_1 [1] */ FN_VI2_CLKENB, FN_AVB_RX_DV, /* IP2_0 [1] */ FN_VI2_CLK, FN_AVB_RX_CLK } }, { PINMUX_CFG_REG_VAR("IPSR3", 0xE606004C, 32, 4, 4, 4, 4, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) { /* IP3_31_28 [4] */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* IP3_27_24 [4] */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* IP3_23_20 [4] */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* IP3_19_16 [4] */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* IP3_15 [1] */ 0, 0, /* IP3_14 [1] */ FN_VI3_D11_Y3, FN_AVB_AVTP_MATCH, /* IP3_13 [1] */ FN_VI3_D9_Y1, FN_AVB_GTXREFCLK, /* IP3_12 [1] */ FN_VI3_D8_Y0, FN_AVB_CRS, /* IP3_11 [1] */ FN_VI3_D7_C7, FN_AVB_PHY_INT, /* IP3_10 [1] */ FN_VI3_D6_C6, FN_AVB_MAGIC, /* IP3_9 [1] */ FN_VI3_D5_C5, FN_AVB_LINK, /* IP3_8 [1] */ FN_VI3_D4_C4, FN_AVB_MDIO, /* IP3_7 [1] */ FN_VI3_D3_C3, FN_AVB_MDC, /* IP3_6 [1] */ FN_VI3_D2_C2, FN_AVB_GTX_CLK, /* IP3_5 [1] */ FN_VI3_D1_C1, FN_AVB_TX_ER, /* IP3_4 [1] */ FN_VI3_D0_C0, FN_AVB_TXD7, /* IP3_3 [1] */ FN_VI3_VSYNC_N, FN_AVB_TXD6, /* IP3_2 [1] */ FN_VI3_HSYNC_N, FN_AVB_TXD5, /* IP3_1 [1] */ FN_VI3_CLKENB, FN_AVB_TXD4, /* IP3_0 [1] */ FN_VI3_CLK, FN_AVB_TX_CLK } }, { PINMUX_CFG_REG_VAR("IPSR4", 0xE6060050, 32, 4, 3, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 1, 2, 1, 1) { /* IP4_31_28 [4] */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* IP4_27_25 [3] */ 0, 0, 0, 0, 0, 0, 0, 0, /* IP4_24 [1] */ FN_VI4_FIELD, FN_VI3_D15_Y7, /* IP4_23 [1] */ FN_VI4_D11_Y3, FN_VI3_D14_Y6, /* IP4_22 [1] */ FN_VI4_D10_Y2, FN_VI3_D13_Y5, /* IP4_21 [1] */ FN_VI4_D9_Y1, FN_VI3_D12_Y4, /* IP4_20_19 [2] */ FN_VI4_D8_Y0, FN_VI0_D23_R7, FN_VI2_D15_Y7, 0, /* IP4_18_17 [2] */ FN_VI4_D7_C7, FN_VI0_D22_R6, FN_VI2_D14_Y6, 0, /* IP4_16_15 [2] */ FN_VI4_D6_C6, FN_VI0_D21_R5, FN_VI2_D13_Y5, 0, /* IP4_14_13 [2] */ FN_VI4_D5_C5, FN_VI0_D20_R4, FN_VI2_D12_Y4, 0, /* IP4_12_11 [2] */ FN_VI4_D4_C4, FN_VI0_D19_R3, FN_VI1_D15_G7_Y7, 0, /* IP4_10_9 [2] */ FN_VI4_D3_C3, FN_VI0_D18_R2, FN_VI1_D14_G6_Y6, 0, /* IP4_8_7 [2] */ FN_VI4_D2_C2, 0, FN_VI0_D17_R1, FN_VI1_D13_G5_Y5, /* IP4_6_5 [2] */ FN_VI4_D1_C1, FN_VI0_D16_R0, FN_VI1_D12_G4_Y4, 0, /* IP4_4 [1] */ FN_VI4_D0_C0, FN_VI0_D15_G7_Y7, /* IP4_3_2 [2] */ FN_VI4_VSYNC_N, FN_VI0_D14_G6_Y6, 0, 0, /* IP4_1 [1] */ FN_VI4_HSYNC_N, FN_VI0_D13_G5_Y5, /* IP4_0 [1] */ FN_VI4_CLKENB, FN_VI0_D12_G4_Y4 } }, { PINMUX_CFG_REG_VAR("IPSR5", 0xE6060054, 32, 4, 4, 4, 4, 4, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) { /* IP5_31_28 [4] */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* IP5_27_24 [4] */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* IP5_23_20 [4] */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* IP5_19_16 [4] */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* IP5_15_12 [4] */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* IP5_11 [1] */ FN_VI5_D8_Y0, FN_VI1_D23_R7, /* IP5_10 [1] */ FN_VI5_D7_C7, FN_VI1_D22_R6, /* IP5_9 [1] */ FN_VI5_D6_C6, FN_VI1_D21_R5, /* IP5_8 [1] */ FN_VI5_D5_C5, FN_VI1_D20_R4, /* IP5_7 [1] */ FN_VI5_D4_C4, FN_VI1_D19_R3, /* IP5_6 [1] */ FN_VI5_D3_C3, FN_VI1_D18_R2, /* IP5_5 [1] */ FN_VI5_D2_C2, FN_VI1_D17_R1, /* IP5_4 [1] */ FN_VI5_D1_C1, FN_VI1_D16_R0, /* IP5_3 [1] */ FN_VI5_D0_C0, FN_VI1_D15_G7_Y7_B, /* IP5_2 [1] */ FN_VI5_VSYNC_N, FN_VI1_D14_G6_Y6_B, /* IP5_1 [1] */ FN_VI5_HSYNC_N, FN_VI1_D13_G5_Y5_B, /* IP5_0 [1] */ FN_VI5_CLKENB, FN_VI1_D12_G4_Y4_B } }, { PINMUX_CFG_REG_VAR("IPSR6", 0xE6060058, 32, 4, 4, 4, 1, 2, 1, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1) { /* IP6_31_28 [4] */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* IP6_27_24 [4] */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* IP6_23_20 [4] */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* IP6_19 [1] */ 0, 0, /* IP6_18_17 [2] */ FN_DREQ1_N, FN_RX3, 0, 0, /* IP6_16 [1] */ FN_TX3, 0, /* IP6_15_14 [2] */ FN_DACK1, FN_SCK3, 0, 0, /* IP6_13_12 [2] */ FN_DREQ0_N, FN_RX2, 0, 0, /* IP6_11_10 [2] */ FN_DACK0, FN_TX2, 0, 0, /* IP6_9_8 [2] */ FN_DRACK0, FN_SCK2, 0, 0, /* IP6_7 [1] */ FN_MSIOF1_RXD, FN_HRX1, /* IP6_6 [1] */ FN_MSIOF1_TXD, FN_HTX1, /* IP6_5 [1] */ FN_MSIOF1_SYNC, FN_HRTS1_N, /* IP6_4 [1] */ FN_MSIOF1_SCK, FN_HSCK1, /* IP6_3 [1] */ FN_MSIOF0_RXD, FN_HRX0, /* IP6_2 [1] */ FN_MSIOF0_TXD, FN_HTX0, /* IP6_1 [1] */ FN_MSIOF0_SYNC, FN_HCTS0_N, /* IP6_0 [1] */ FN_MSIOF0_SCK, FN_HSCK0 } }, { PINMUX_CFG_REG_VAR("IPSR7", 0xE606005C, 32, 4, 4, 3, 1, 1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 2, 2, 2) { /* IP7_31_28 [4] */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* IP7_27_24 [4] */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* IP7_23_21 [3] */ 0, 0, 0, 0, 0, 0, 0, 0, /* IP7_20 [1] */ FN_AUDIO_CLKB, 0, /* IP7_19 [1] */ FN_AUDIO_CLKA, 0, /* IP7_18 [1] */ FN_AUDIO_CLKOUT, 0, /* IP7_17 [1] */ FN_SSI_SDATA4, 0, /* IP7_16 [1] */ FN_SSI_WS4, 0, /* IP7_15_14 [2] */ FN_SSI_SCK4, FN_TPU0TO3, 0, 0, /* IP7_13_12 [2] */ FN_SSI_SDATA3, FN_TPU0TO2, 0, 0, /* IP7_11_10 [2] */ FN_SSI_WS34, FN_TPU0TO1, 0, 0, /* IP7_9_8 [2] */ FN_SSI_SCK34, FN_TPU0TO0, 0, 0, /* IP7_7 [1] */ FN_PWM4, 0, /* IP7_6 [1] */ FN_PWM3, 0, /* IP7_5_4 [2] */ FN_PWM2, FN_TCLK3, FN_FSO_TOE, 0, /* IP7_3_2 [2] */ FN_PWM1, FN_TCLK2, FN_FSO_CFE_1, 0, /* IP7_1_0 [2] */ FN_PWM0, FN_TCLK1, FN_FSO_CFE_0, 0 } }, { }, }; const struct sh_pfc_soc_info r8a7792_pinmux_info = { .name = "r8a77920_pfc", .unlock_reg = 0xe6060000, /* PMMR */ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END }, .pins = pinmux_pins, .nr_pins = ARRAY_SIZE(pinmux_pins), .groups = pinmux_groups, .nr_groups = ARRAY_SIZE(pinmux_groups), .functions = pinmux_functions, .nr_functions = ARRAY_SIZE(pinmux_functions), .cfg_regs = pinmux_config_regs, .pinmux_data = pinmux_data, .pinmux_data_size = ARRAY_SIZE(pinmux_data), };
gpl-2.0
zachf714/android_kernel_common
drivers/pci/probe.c
159
39744
/* * probe.c - PCI detection and setup code */ #include <linux/kernel.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/cpumask.h> #include <linux/pci-aspm.h> #include "pci.h" #define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */ #define CARDBUS_RESERVE_BUSNR 3 /* Ugh. Need to stop exporting this to modules. */ LIST_HEAD(pci_root_buses); EXPORT_SYMBOL(pci_root_buses); static int find_anything(struct device *dev, void *data) { return 1; } /* * Some device drivers need know if pci is initiated. * Basically, we think pci is not initiated when there * is no device to be found on the pci_bus_type. */ int no_pci_devices(void) { struct device *dev; int no_devices; dev = bus_find_device(&pci_bus_type, NULL, NULL, find_anything); no_devices = (dev == NULL); put_device(dev); return no_devices; } EXPORT_SYMBOL(no_pci_devices); /* * PCI Bus Class Devices */ static ssize_t pci_bus_show_cpuaffinity(struct device *dev, int type, struct device_attribute *attr, char *buf) { int ret; const struct cpumask *cpumask; cpumask = cpumask_of_pcibus(to_pci_bus(dev)); ret = type? cpulist_scnprintf(buf, PAGE_SIZE-2, cpumask) : cpumask_scnprintf(buf, PAGE_SIZE-2, cpumask); buf[ret++] = '\n'; buf[ret] = '\0'; return ret; } static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev, struct device_attribute *attr, char *buf) { return pci_bus_show_cpuaffinity(dev, 0, attr, buf); } static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev, struct device_attribute *attr, char *buf) { return pci_bus_show_cpuaffinity(dev, 1, attr, buf); } DEVICE_ATTR(cpuaffinity, S_IRUGO, pci_bus_show_cpumaskaffinity, NULL); DEVICE_ATTR(cpulistaffinity, S_IRUGO, pci_bus_show_cpulistaffinity, NULL); /* * PCI Bus Class */ static void release_pcibus_dev(struct device *dev) { struct pci_bus *pci_bus = to_pci_bus(dev); if (pci_bus->bridge) put_device(pci_bus->bridge); pci_bus_remove_resources(pci_bus); kfree(pci_bus); } static struct class pcibus_class = { .name = "pci_bus", .dev_release = &release_pcibus_dev, }; static int __init pcibus_class_init(void) { return class_register(&pcibus_class); } postcore_initcall(pcibus_class_init); /* * Translate the low bits of the PCI base * to the resource type */ static inline unsigned int pci_calc_resource_flags(unsigned int flags) { if (flags & PCI_BASE_ADDRESS_SPACE_IO) return IORESOURCE_IO; if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH) return IORESOURCE_MEM | IORESOURCE_PREFETCH; return IORESOURCE_MEM; } static u64 pci_size(u64 base, u64 maxbase, u64 mask) { u64 size = mask & maxbase; /* Find the significant bits */ if (!size) return 0; /* Get the lowest of them to find the decode size, and from that the extent. */ size = (size & ~(size-1)) - 1; /* base == maxbase can be valid only if the BAR has already been programmed with all 1s. */ if (base == maxbase && ((base | size) & mask) != mask) return 0; return size; } static inline enum pci_bar_type decode_bar(struct resource *res, u32 bar) { if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) { res->flags = bar & ~PCI_BASE_ADDRESS_IO_MASK; return pci_bar_io; } res->flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK; if (res->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) return pci_bar_mem64; return pci_bar_mem32; } /** * pci_read_base - read a PCI BAR * @dev: the PCI device * @type: type of the BAR * @res: resource buffer to be filled in * @pos: BAR position in the config space * * Returns 1 if the BAR is 64-bit, or 0 if 32-bit. */ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, struct resource *res, unsigned int pos) { u32 l, sz, mask; u16 orig_cmd; mask = type ? PCI_ROM_ADDRESS_MASK : ~0; if (!dev->mmio_always_on) { pci_read_config_word(dev, PCI_COMMAND, &orig_cmd); pci_write_config_word(dev, PCI_COMMAND, orig_cmd & ~(PCI_COMMAND_MEMORY | PCI_COMMAND_IO)); } res->name = pci_name(dev); pci_read_config_dword(dev, pos, &l); pci_write_config_dword(dev, pos, l | mask); pci_read_config_dword(dev, pos, &sz); pci_write_config_dword(dev, pos, l); if (!dev->mmio_always_on) pci_write_config_word(dev, PCI_COMMAND, orig_cmd); /* * All bits set in sz means the device isn't working properly. * If the BAR isn't implemented, all bits must be 0. If it's a * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit * 1 must be clear. */ if (!sz || sz == 0xffffffff) goto fail; /* * I don't know how l can have all bits set. Copied from old code. * Maybe it fixes a bug on some ancient platform. */ if (l == 0xffffffff) l = 0; if (type == pci_bar_unknown) { type = decode_bar(res, l); res->flags |= pci_calc_resource_flags(l) | IORESOURCE_SIZEALIGN; if (type == pci_bar_io) { l &= PCI_BASE_ADDRESS_IO_MASK; mask = PCI_BASE_ADDRESS_IO_MASK & IO_SPACE_LIMIT; } else { l &= PCI_BASE_ADDRESS_MEM_MASK; mask = (u32)PCI_BASE_ADDRESS_MEM_MASK; } } else { res->flags |= (l & IORESOURCE_ROM_ENABLE); l &= PCI_ROM_ADDRESS_MASK; mask = (u32)PCI_ROM_ADDRESS_MASK; } if (type == pci_bar_mem64) { u64 l64 = l; u64 sz64 = sz; u64 mask64 = mask | (u64)~0 << 32; pci_read_config_dword(dev, pos + 4, &l); pci_write_config_dword(dev, pos + 4, ~0); pci_read_config_dword(dev, pos + 4, &sz); pci_write_config_dword(dev, pos + 4, l); l64 |= ((u64)l << 32); sz64 |= ((u64)sz << 32); sz64 = pci_size(l64, sz64, mask64); if (!sz64) goto fail; if ((sizeof(resource_size_t) < 8) && (sz64 > 0x100000000ULL)) { dev_err(&dev->dev, "reg %x: can't handle 64-bit BAR\n", pos); goto fail; } res->flags |= IORESOURCE_MEM_64; if ((sizeof(resource_size_t) < 8) && l) { /* Address above 32-bit boundary; disable the BAR */ pci_write_config_dword(dev, pos, 0); pci_write_config_dword(dev, pos + 4, 0); res->start = 0; res->end = sz64; } else { res->start = l64; res->end = l64 + sz64; dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", pos, res); } } else { sz = pci_size(l, sz, mask); if (!sz) goto fail; res->start = l; res->end = l + sz; dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", pos, res); } out: return (type == pci_bar_mem64) ? 1 : 0; fail: res->flags = 0; goto out; } static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) { unsigned int pos, reg; for (pos = 0; pos < howmany; pos++) { struct resource *res = &dev->resource[pos]; reg = PCI_BASE_ADDRESS_0 + (pos << 2); pos += __pci_read_base(dev, pci_bar_unknown, res, reg); } if (rom) { struct resource *res = &dev->resource[PCI_ROM_RESOURCE]; dev->rom_base_reg = rom; res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH | IORESOURCE_READONLY | IORESOURCE_CACHEABLE | IORESOURCE_SIZEALIGN; __pci_read_base(dev, pci_bar_mem32, res, rom); } } static void __devinit pci_read_bridge_io(struct pci_bus *child) { struct pci_dev *dev = child->self; u8 io_base_lo, io_limit_lo; unsigned long base, limit; struct resource *res; res = child->resource[0]; pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo); pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo); base = (io_base_lo & PCI_IO_RANGE_MASK) << 8; limit = (io_limit_lo & PCI_IO_RANGE_MASK) << 8; if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) { u16 io_base_hi, io_limit_hi; pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi); pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi); base |= (io_base_hi << 16); limit |= (io_limit_hi << 16); } if (base && base <= limit) { res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO; if (!res->start) res->start = base; if (!res->end) res->end = limit + 0xfff; dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); } else { dev_printk(KERN_DEBUG, &dev->dev, " bridge window [io %#06lx-%#06lx] (disabled)\n", base, limit); } } static void __devinit pci_read_bridge_mmio(struct pci_bus *child) { struct pci_dev *dev = child->self; u16 mem_base_lo, mem_limit_lo; unsigned long base, limit; struct resource *res; res = child->resource[1]; pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo); pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo); base = (mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16; limit = (mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16; if (base && base <= limit) { res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM; res->start = base; res->end = limit + 0xfffff; dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); } else { dev_printk(KERN_DEBUG, &dev->dev, " bridge window [mem %#010lx-%#010lx] (disabled)\n", base, limit + 0xfffff); } } static void __devinit pci_read_bridge_mmio_pref(struct pci_bus *child) { struct pci_dev *dev = child->self; u16 mem_base_lo, mem_limit_lo; unsigned long base, limit; struct resource *res; res = child->resource[2]; pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo); pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo); base = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16; limit = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16; if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) { u32 mem_base_hi, mem_limit_hi; pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi); pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi); /* * Some bridges set the base > limit by default, and some * (broken) BIOSes do not initialize them. If we find * this, just assume they are not being used. */ if (mem_base_hi <= mem_limit_hi) { #if BITS_PER_LONG == 64 base |= ((long) mem_base_hi) << 32; limit |= ((long) mem_limit_hi) << 32; #else if (mem_base_hi || mem_limit_hi) { dev_err(&dev->dev, "can't handle 64-bit " "address space for bridge\n"); return; } #endif } } if (base && base <= limit) { res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) | IORESOURCE_MEM | IORESOURCE_PREFETCH; if (res->flags & PCI_PREF_RANGE_TYPE_64) res->flags |= IORESOURCE_MEM_64; res->start = base; res->end = limit + 0xfffff; dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); } else { dev_printk(KERN_DEBUG, &dev->dev, " bridge window [mem %#010lx-%#010lx pref] (disabled)\n", base, limit + 0xfffff); } } void __devinit pci_read_bridge_bases(struct pci_bus *child) { struct pci_dev *dev = child->self; struct resource *res; int i; if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */ return; dev_info(&dev->dev, "PCI bridge to [bus %02x-%02x]%s\n", child->secondary, child->subordinate, dev->transparent ? " (subtractive decode)" : ""); pci_bus_remove_resources(child); for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i]; pci_read_bridge_io(child); pci_read_bridge_mmio(child); pci_read_bridge_mmio_pref(child); if (dev->transparent) { pci_bus_for_each_resource(child->parent, res, i) { if (res) { pci_bus_add_resource(child, res, PCI_SUBTRACTIVE_DECODE); dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR (subtractive decode)\n", res); } } } } static struct pci_bus * pci_alloc_bus(void) { struct pci_bus *b; b = kzalloc(sizeof(*b), GFP_KERNEL); if (b) { INIT_LIST_HEAD(&b->node); INIT_LIST_HEAD(&b->children); INIT_LIST_HEAD(&b->devices); INIT_LIST_HEAD(&b->slots); INIT_LIST_HEAD(&b->resources); b->max_bus_speed = PCI_SPEED_UNKNOWN; b->cur_bus_speed = PCI_SPEED_UNKNOWN; } return b; } static unsigned char pcix_bus_speed[] = { PCI_SPEED_UNKNOWN, /* 0 */ PCI_SPEED_66MHz_PCIX, /* 1 */ PCI_SPEED_100MHz_PCIX, /* 2 */ PCI_SPEED_133MHz_PCIX, /* 3 */ PCI_SPEED_UNKNOWN, /* 4 */ PCI_SPEED_66MHz_PCIX_ECC, /* 5 */ PCI_SPEED_100MHz_PCIX_ECC, /* 6 */ PCI_SPEED_133MHz_PCIX_ECC, /* 7 */ PCI_SPEED_UNKNOWN, /* 8 */ PCI_SPEED_66MHz_PCIX_266, /* 9 */ PCI_SPEED_100MHz_PCIX_266, /* A */ PCI_SPEED_133MHz_PCIX_266, /* B */ PCI_SPEED_UNKNOWN, /* C */ PCI_SPEED_66MHz_PCIX_533, /* D */ PCI_SPEED_100MHz_PCIX_533, /* E */ PCI_SPEED_133MHz_PCIX_533 /* F */ }; static unsigned char pcie_link_speed[] = { PCI_SPEED_UNKNOWN, /* 0 */ PCIE_SPEED_2_5GT, /* 1 */ PCIE_SPEED_5_0GT, /* 2 */ PCIE_SPEED_8_0GT, /* 3 */ PCI_SPEED_UNKNOWN, /* 4 */ PCI_SPEED_UNKNOWN, /* 5 */ PCI_SPEED_UNKNOWN, /* 6 */ PCI_SPEED_UNKNOWN, /* 7 */ PCI_SPEED_UNKNOWN, /* 8 */ PCI_SPEED_UNKNOWN, /* 9 */ PCI_SPEED_UNKNOWN, /* A */ PCI_SPEED_UNKNOWN, /* B */ PCI_SPEED_UNKNOWN, /* C */ PCI_SPEED_UNKNOWN, /* D */ PCI_SPEED_UNKNOWN, /* E */ PCI_SPEED_UNKNOWN /* F */ }; void pcie_update_link_speed(struct pci_bus *bus, u16 linksta) { bus->cur_bus_speed = pcie_link_speed[linksta & 0xf]; } EXPORT_SYMBOL_GPL(pcie_update_link_speed); static unsigned char agp_speeds[] = { AGP_UNKNOWN, AGP_1X, AGP_2X, AGP_4X, AGP_8X }; static enum pci_bus_speed agp_speed(int agp3, int agpstat) { int index = 0; if (agpstat & 4) index = 3; else if (agpstat & 2) index = 2; else if (agpstat & 1) index = 1; else goto out; if (agp3) { index += 2; if (index == 5) index = 0; } out: return agp_speeds[index]; } static void pci_set_bus_speed(struct pci_bus *bus) { struct pci_dev *bridge = bus->self; int pos; pos = pci_find_capability(bridge, PCI_CAP_ID_AGP); if (!pos) pos = pci_find_capability(bridge, PCI_CAP_ID_AGP3); if (pos) { u32 agpstat, agpcmd; pci_read_config_dword(bridge, pos + PCI_AGP_STATUS, &agpstat); bus->max_bus_speed = agp_speed(agpstat & 8, agpstat & 7); pci_read_config_dword(bridge, pos + PCI_AGP_COMMAND, &agpcmd); bus->cur_bus_speed = agp_speed(agpstat & 8, agpcmd & 7); } pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX); if (pos) { u16 status; enum pci_bus_speed max; pci_read_config_word(bridge, pos + 2, &status); if (status & 0x8000) { max = PCI_SPEED_133MHz_PCIX_533; } else if (status & 0x4000) { max = PCI_SPEED_133MHz_PCIX_266; } else if (status & 0x0002) { if (((status >> 12) & 0x3) == 2) { max = PCI_SPEED_133MHz_PCIX_ECC; } else { max = PCI_SPEED_133MHz_PCIX; } } else { max = PCI_SPEED_66MHz_PCIX; } bus->max_bus_speed = max; bus->cur_bus_speed = pcix_bus_speed[(status >> 6) & 0xf]; return; } pos = pci_find_capability(bridge, PCI_CAP_ID_EXP); if (pos) { u32 linkcap; u16 linksta; pci_read_config_dword(bridge, pos + PCI_EXP_LNKCAP, &linkcap); bus->max_bus_speed = pcie_link_speed[linkcap & 0xf]; pci_read_config_word(bridge, pos + PCI_EXP_LNKSTA, &linksta); pcie_update_link_speed(bus, linksta); } } static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent, struct pci_dev *bridge, int busnr) { struct pci_bus *child; int i; /* * Allocate a new bus, and inherit stuff from the parent.. */ child = pci_alloc_bus(); if (!child) return NULL; child->parent = parent; child->ops = parent->ops; child->sysdata = parent->sysdata; child->bus_flags = parent->bus_flags; /* initialize some portions of the bus device, but don't register it * now as the parent is not properly set up yet. This device will get * registered later in pci_bus_add_devices() */ child->dev.class = &pcibus_class; dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr); /* * Set up the primary, secondary and subordinate * bus numbers. */ child->number = child->secondary = busnr; child->primary = parent->secondary; child->subordinate = 0xff; if (!bridge) return child; child->self = bridge; child->bridge = get_device(&bridge->dev); pci_set_bus_speed(child); /* Set up default resource pointers and names.. */ for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) { child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i]; child->resource[i]->name = child->name; } bridge->subordinate = child; return child; } struct pci_bus *__ref pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr) { struct pci_bus *child; child = pci_alloc_child_bus(parent, dev, busnr); if (child) { down_write(&pci_bus_sem); list_add_tail(&child->node, &parent->children); up_write(&pci_bus_sem); } return child; } static void pci_fixup_parent_subordinate_busnr(struct pci_bus *child, int max) { struct pci_bus *parent = child->parent; /* Attempts to fix that up are really dangerous unless we're going to re-assign all bus numbers. */ if (!pcibios_assign_all_busses()) return; while (parent->parent && parent->subordinate < max) { parent->subordinate = max; pci_write_config_byte(parent->self, PCI_SUBORDINATE_BUS, max); parent = parent->parent; } } /* * If it's a bridge, configure it and scan the bus behind it. * For CardBus bridges, we don't scan behind as the devices will * be handled by the bridge driver itself. * * We need to process bridges in two passes -- first we scan those * already configured by the BIOS and after we are done with all of * them, we proceed to assigning numbers to the remaining buses in * order to avoid overlaps between old and new bus numbers. */ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass) { struct pci_bus *child; int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS); u32 buses, i, j = 0; u16 bctl; u8 primary, secondary, subordinate; int broken = 0; pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses); primary = buses & 0xFF; secondary = (buses >> 8) & 0xFF; subordinate = (buses >> 16) & 0xFF; dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n", secondary, subordinate, pass); /* Check if setup is sensible at all */ if (!pass && (primary != bus->number || secondary <= bus->number)) { dev_dbg(&dev->dev, "bus configuration invalid, reconfiguring\n"); broken = 1; } /* Disable MasterAbortMode during probing to avoid reporting of bus errors (in some architectures) */ pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl); pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT); if ((secondary || subordinate) && !pcibios_assign_all_busses() && !is_cardbus && !broken) { unsigned int cmax; /* * Bus already configured by firmware, process it in the first * pass and just note the configuration. */ if (pass) goto out; /* * If we already got to this bus through a different bridge, * don't re-add it. This can happen with the i450NX chipset. * * However, we continue to descend down the hierarchy and * scan remaining child buses. */ child = pci_find_bus(pci_domain_nr(bus), secondary); if (!child) { child = pci_add_new_bus(bus, dev, secondary); if (!child) goto out; child->primary = primary; child->subordinate = subordinate; child->bridge_ctl = bctl; } cmax = pci_scan_child_bus(child); if (cmax > max) max = cmax; if (child->subordinate > max) max = child->subordinate; } else { /* * We need to assign a number to this bus which we always * do in the second pass. */ if (!pass) { if (pcibios_assign_all_busses() || broken) /* Temporarily disable forwarding of the configuration cycles on all bridges in this bus segment to avoid possible conflicts in the second pass between two bridges programmed with overlapping bus ranges. */ pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses & ~0xffffff); goto out; } /* Clear errors */ pci_write_config_word(dev, PCI_STATUS, 0xffff); /* Prevent assigning a bus number that already exists. * This can happen when a bridge is hot-plugged */ if (pci_find_bus(pci_domain_nr(bus), max+1)) goto out; child = pci_add_new_bus(bus, dev, ++max); buses = (buses & 0xff000000) | ((unsigned int)(child->primary) << 0) | ((unsigned int)(child->secondary) << 8) | ((unsigned int)(child->subordinate) << 16); /* * yenta.c forces a secondary latency timer of 176. * Copy that behaviour here. */ if (is_cardbus) { buses &= ~0xff000000; buses |= CARDBUS_LATENCY_TIMER << 24; } /* * We need to blast all three values with a single write. */ pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses); if (!is_cardbus) { child->bridge_ctl = bctl; /* * Adjust subordinate busnr in parent buses. * We do this before scanning for children because * some devices may not be detected if the bios * was lazy. */ pci_fixup_parent_subordinate_busnr(child, max); /* Now we can scan all subordinate buses... */ max = pci_scan_child_bus(child); /* * now fix it up again since we have found * the real value of max. */ pci_fixup_parent_subordinate_busnr(child, max); } else { /* * For CardBus bridges, we leave 4 bus numbers * as cards with a PCI-to-PCI bridge can be * inserted later. */ for (i=0; i<CARDBUS_RESERVE_BUSNR; i++) { struct pci_bus *parent = bus; if (pci_find_bus(pci_domain_nr(bus), max+i+1)) break; while (parent->parent) { if ((!pcibios_assign_all_busses()) && (parent->subordinate > max) && (parent->subordinate <= max+i)) { j = 1; } parent = parent->parent; } if (j) { /* * Often, there are two cardbus bridges * -- try to leave one valid bus number * for each one. */ i /= 2; break; } } max += i; pci_fixup_parent_subordinate_busnr(child, max); } /* * Set the subordinate bus number to its real value. */ child->subordinate = max; pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max); } sprintf(child->name, (is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"), pci_domain_nr(bus), child->number); /* Has only triggered on CardBus, fixup is in yenta_socket */ while (bus->parent) { if ((child->subordinate > bus->subordinate) || (child->number > bus->subordinate) || (child->number < bus->number) || (child->subordinate < bus->number)) { dev_info(&child->dev, "[bus %02x-%02x] %s " "hidden behind%s bridge %s [bus %02x-%02x]\n", child->number, child->subordinate, (bus->number > child->subordinate && bus->subordinate < child->number) ? "wholly" : "partially", bus->self->transparent ? " transparent" : "", dev_name(&bus->dev), bus->number, bus->subordinate); } bus = bus->parent; } out: pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl); return max; } /* * Read interrupt line and base address registers. * The architecture-dependent code can tweak these, of course. */ static void pci_read_irq(struct pci_dev *dev) { unsigned char irq; pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq); dev->pin = irq; if (irq) pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq); dev->irq = irq; } void set_pcie_port_type(struct pci_dev *pdev) { int pos; u16 reg16; pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); if (!pos) return; pdev->is_pcie = 1; pdev->pcie_cap = pos; pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16); pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4; } void set_pcie_hotplug_bridge(struct pci_dev *pdev) { int pos; u16 reg16; u32 reg32; pos = pci_pcie_cap(pdev); if (!pos) return; pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16); if (!(reg16 & PCI_EXP_FLAGS_SLOT)) return; pci_read_config_dword(pdev, pos + PCI_EXP_SLTCAP, &reg32); if (reg32 & PCI_EXP_SLTCAP_HPC) pdev->is_hotplug_bridge = 1; } #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) /** * pci_setup_device - fill in class and map information of a device * @dev: the device structure to fill * * Initialize the device structure with information about the device's * vendor,class,memory and IO-space addresses,IRQ lines etc. * Called at initialisation of the PCI subsystem and by CardBus services. * Returns 0 on success and negative if unknown type of device (not normal, * bridge or CardBus). */ int pci_setup_device(struct pci_dev *dev) { u32 class; u8 hdr_type; struct pci_slot *slot; int pos = 0; if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type)) return -EIO; dev->sysdata = dev->bus->sysdata; dev->dev.parent = dev->bus->bridge; dev->dev.bus = &pci_bus_type; dev->hdr_type = hdr_type & 0x7f; dev->multifunction = !!(hdr_type & 0x80); dev->error_state = pci_channel_io_normal; set_pcie_port_type(dev); list_for_each_entry(slot, &dev->bus->slots, list) if (PCI_SLOT(dev->devfn) == slot->number) dev->slot = slot; /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer) set this higher, assuming the system even supports it. */ dev->dma_mask = 0xffffffff; dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus), dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn)); pci_read_config_dword(dev, PCI_CLASS_REVISION, &class); dev->revision = class & 0xff; class >>= 8; /* upper 3 bytes */ dev->class = class; class >>= 8; dev_printk(KERN_DEBUG, &dev->dev, "[%04x:%04x] type %d class %#08x\n", dev->vendor, dev->device, dev->hdr_type, class); /* need to have dev->class ready */ dev->cfg_size = pci_cfg_space_size(dev); /* "Unknown power state" */ dev->current_state = PCI_UNKNOWN; /* Early fixups, before probing the BARs */ pci_fixup_device(pci_fixup_early, dev); /* device class may be changed after fixup */ class = dev->class >> 8; switch (dev->hdr_type) { /* header type */ case PCI_HEADER_TYPE_NORMAL: /* standard header */ if (class == PCI_CLASS_BRIDGE_PCI) goto bad; pci_read_irq(dev); pci_read_bases(dev, 6, PCI_ROM_ADDRESS); pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor); pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device); /* * Do the ugly legacy mode stuff here rather than broken chip * quirk code. Legacy mode ATA controllers have fixed * addresses. These are not always echoed in BAR0-3, and * BAR0-3 in a few cases contain junk! */ if (class == PCI_CLASS_STORAGE_IDE) { u8 progif; pci_read_config_byte(dev, PCI_CLASS_PROG, &progif); if ((progif & 1) == 0) { dev->resource[0].start = 0x1F0; dev->resource[0].end = 0x1F7; dev->resource[0].flags = LEGACY_IO_RESOURCE; dev->resource[1].start = 0x3F6; dev->resource[1].end = 0x3F6; dev->resource[1].flags = LEGACY_IO_RESOURCE; } if ((progif & 4) == 0) { dev->resource[2].start = 0x170; dev->resource[2].end = 0x177; dev->resource[2].flags = LEGACY_IO_RESOURCE; dev->resource[3].start = 0x376; dev->resource[3].end = 0x376; dev->resource[3].flags = LEGACY_IO_RESOURCE; } } break; case PCI_HEADER_TYPE_BRIDGE: /* bridge header */ if (class != PCI_CLASS_BRIDGE_PCI) goto bad; /* The PCI-to-PCI bridge spec requires that subtractive decoding (i.e. transparent) bridge must have programming interface code of 0x01. */ pci_read_irq(dev); dev->transparent = ((dev->class & 0xff) == 1); pci_read_bases(dev, 2, PCI_ROM_ADDRESS1); set_pcie_hotplug_bridge(dev); pos = pci_find_capability(dev, PCI_CAP_ID_SSVID); if (pos) { pci_read_config_word(dev, pos + PCI_SSVID_VENDOR_ID, &dev->subsystem_vendor); pci_read_config_word(dev, pos + PCI_SSVID_DEVICE_ID, &dev->subsystem_device); } break; case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */ if (class != PCI_CLASS_BRIDGE_CARDBUS) goto bad; pci_read_irq(dev); pci_read_bases(dev, 1, 0); pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor); pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device); break; default: /* unknown header */ dev_err(&dev->dev, "unknown header type %02x, " "ignoring device\n", dev->hdr_type); return -EIO; bad: dev_err(&dev->dev, "ignoring class %02x (doesn't match header " "type %02x)\n", class, dev->hdr_type); dev->class = PCI_CLASS_NOT_DEFINED; } /* We found a fine healthy device, go go go... */ return 0; } static void pci_release_capabilities(struct pci_dev *dev) { pci_vpd_release(dev); pci_iov_release(dev); } /** * pci_release_dev - free a pci device structure when all users of it are finished. * @dev: device that's been disconnected * * Will be called only by the device core when all users of this pci device are * done. */ static void pci_release_dev(struct device *dev) { struct pci_dev *pci_dev; pci_dev = to_pci_dev(dev); pci_release_capabilities(pci_dev); kfree(pci_dev); } /** * pci_cfg_space_size - get the configuration space size of the PCI device. * @dev: PCI device * * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices * have 4096 bytes. Even if the device is capable, that doesn't mean we can * access it. Maybe we don't have a way to generate extended config space * accesses, or the device is behind a reverse Express bridge. So we try * reading the dword at 0x100 which must either be 0 or a valid extended * capability header. */ int pci_cfg_space_size_ext(struct pci_dev *dev) { u32 status; int pos = PCI_CFG_SPACE_SIZE; if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL) goto fail; if (status == 0xffffffff) goto fail; return PCI_CFG_SPACE_EXP_SIZE; fail: return PCI_CFG_SPACE_SIZE; } int pci_cfg_space_size(struct pci_dev *dev) { int pos; u32 status; u16 class; class = dev->class >> 8; if (class == PCI_CLASS_BRIDGE_HOST) return pci_cfg_space_size_ext(dev); pos = pci_pcie_cap(dev); if (!pos) { pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); if (!pos) goto fail; pci_read_config_dword(dev, pos + PCI_X_STATUS, &status); if (!(status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ))) goto fail; } return pci_cfg_space_size_ext(dev); fail: return PCI_CFG_SPACE_SIZE; } static void pci_release_bus_bridge_dev(struct device *dev) { kfree(dev); } struct pci_dev *alloc_pci_dev(void) { struct pci_dev *dev; dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL); if (!dev) return NULL; INIT_LIST_HEAD(&dev->bus_list); return dev; } EXPORT_SYMBOL(alloc_pci_dev); /* * Read the config data for a PCI device, sanity-check it * and fill in the dev structure... */ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn) { struct pci_dev *dev; u32 l; int delay = 1; if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, &l)) return NULL; /* some broken boards return 0 or ~0 if a slot is empty: */ if (l == 0xffffffff || l == 0x00000000 || l == 0x0000ffff || l == 0xffff0000) return NULL; /* Configuration request Retry Status */ while (l == 0xffff0001) { msleep(delay); delay *= 2; if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, &l)) return NULL; /* Card hasn't responded in 60 seconds? Must be stuck. */ if (delay > 60 * 1000) { printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not " "responding\n", pci_domain_nr(bus), bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn)); return NULL; } } dev = alloc_pci_dev(); if (!dev) return NULL; dev->bus = bus; dev->devfn = devfn; dev->vendor = l & 0xffff; dev->device = (l >> 16) & 0xffff; if (pci_setup_device(dev)) { kfree(dev); return NULL; } return dev; } static void pci_init_capabilities(struct pci_dev *dev) { /* MSI/MSI-X list */ pci_msi_init_pci_dev(dev); /* Buffers for saving PCIe and PCI-X capabilities */ pci_allocate_cap_save_buffers(dev); /* Power Management */ pci_pm_init(dev); platform_pci_wakeup_init(dev); /* Vital Product Data */ pci_vpd_pci22_init(dev); /* Alternative Routing-ID Forwarding */ pci_enable_ari(dev); /* Single Root I/O Virtualization */ pci_iov_init(dev); /* Enable ACS P2P upstream forwarding */ pci_enable_acs(dev); } void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) { device_initialize(&dev->dev); dev->dev.release = pci_release_dev; pci_dev_get(dev); dev->dev.dma_mask = &dev->dma_mask; dev->dev.dma_parms = &dev->dma_parms; dev->dev.coherent_dma_mask = 0xffffffffull; pci_set_dma_max_seg_size(dev, 65536); pci_set_dma_seg_boundary(dev, 0xffffffff); /* Fix up broken headers */ pci_fixup_device(pci_fixup_header, dev); /* Clear the state_saved flag. */ dev->state_saved = false; /* Initialize various capabilities */ pci_init_capabilities(dev); /* * Add the device to our list of discovered devices * and the bus list for fixup functions, etc. */ down_write(&pci_bus_sem); list_add_tail(&dev->bus_list, &bus->devices); up_write(&pci_bus_sem); } struct pci_dev *__ref pci_scan_single_device(struct pci_bus *bus, int devfn) { struct pci_dev *dev; dev = pci_get_slot(bus, devfn); if (dev) { pci_dev_put(dev); return dev; } dev = pci_scan_device(bus, devfn); if (!dev) return NULL; pci_device_add(dev, bus); return dev; } EXPORT_SYMBOL(pci_scan_single_device); static unsigned next_ari_fn(struct pci_dev *dev, unsigned fn) { u16 cap; unsigned pos, next_fn; if (!dev) return 0; pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI); if (!pos) return 0; pci_read_config_word(dev, pos + 4, &cap); next_fn = cap >> 8; if (next_fn <= fn) return 0; return next_fn; } static unsigned next_trad_fn(struct pci_dev *dev, unsigned fn) { return (fn + 1) % 8; } static unsigned no_next_fn(struct pci_dev *dev, unsigned fn) { return 0; } static int only_one_child(struct pci_bus *bus) { struct pci_dev *parent = bus->self; if (!parent || !pci_is_pcie(parent)) return 0; if (parent->pcie_type == PCI_EXP_TYPE_ROOT_PORT || parent->pcie_type == PCI_EXP_TYPE_DOWNSTREAM) return 1; return 0; } /** * pci_scan_slot - scan a PCI slot on a bus for devices. * @bus: PCI bus to scan * @devfn: slot number to scan (must have zero function.) * * Scan a PCI slot on the specified PCI bus for devices, adding * discovered devices to the @bus->devices list. New devices * will not have is_added set. * * Returns the number of new devices found. */ int pci_scan_slot(struct pci_bus *bus, int devfn) { unsigned fn, nr = 0; struct pci_dev *dev; unsigned (*next_fn)(struct pci_dev *, unsigned) = no_next_fn; if (only_one_child(bus) && (devfn > 0)) return 0; /* Already scanned the entire slot */ dev = pci_scan_single_device(bus, devfn); if (!dev) return 0; if (!dev->is_added) nr++; if (pci_ari_enabled(bus)) next_fn = next_ari_fn; else if (dev->multifunction) next_fn = next_trad_fn; for (fn = next_fn(dev, 0); fn > 0; fn = next_fn(dev, fn)) { dev = pci_scan_single_device(bus, devfn + fn); if (dev) { if (!dev->is_added) nr++; dev->multifunction = 1; } } /* only one slot has pcie device */ if (bus->self && nr) pcie_aspm_init_link_state(bus->self); return nr; } unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus) { unsigned int devfn, pass, max = bus->secondary; struct pci_dev *dev; dev_dbg(&bus->dev, "scanning bus\n"); /* Go find them, Rover! */ for (devfn = 0; devfn < 0x100; devfn += 8) pci_scan_slot(bus, devfn); /* Reserve buses for SR-IOV capability. */ max += pci_iov_bus_range(bus); /* * After performing arch-dependent fixup of the bus, look behind * all PCI-to-PCI bridges on this bus. */ if (!bus->is_added) { dev_dbg(&bus->dev, "fixups for bus\n"); pcibios_fixup_bus(bus); if (pci_is_root_bus(bus)) bus->is_added = 1; } for (pass=0; pass < 2; pass++) list_for_each_entry(dev, &bus->devices, bus_list) { if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) max = pci_scan_bridge(bus, dev, max, pass); } /* * We've scanned the bus and so we know all about what's on * the other side of any bridges that may be on this bus plus * any devices. * * Return how far we've got finding sub-buses. */ dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max); return max; } struct pci_bus * pci_create_bus(struct device *parent, int bus, struct pci_ops *ops, void *sysdata) { int error; struct pci_bus *b, *b2; struct device *dev; b = pci_alloc_bus(); if (!b) return NULL; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev){ kfree(b); return NULL; } b->sysdata = sysdata; b->ops = ops; b2 = pci_find_bus(pci_domain_nr(b), bus); if (b2) { /* If we already got to this bus through a different bridge, ignore it */ dev_dbg(&b2->dev, "bus already known\n"); goto err_out; } down_write(&pci_bus_sem); list_add_tail(&b->node, &pci_root_buses); up_write(&pci_bus_sem); dev->parent = parent; dev->release = pci_release_bus_bridge_dev; dev_set_name(dev, "pci%04x:%02x", pci_domain_nr(b), bus); error = device_register(dev); if (error) goto dev_reg_err; b->bridge = get_device(dev); device_enable_async_suspend(b->bridge); if (!parent) set_dev_node(b->bridge, pcibus_to_node(b)); b->dev.class = &pcibus_class; b->dev.parent = b->bridge; dev_set_name(&b->dev, "%04x:%02x", pci_domain_nr(b), bus); error = device_register(&b->dev); if (error) goto class_dev_reg_err; error = device_create_file(&b->dev, &dev_attr_cpuaffinity); if (error) goto dev_create_file_err; /* Create legacy_io and legacy_mem files for this bus */ pci_create_legacy_files(b); b->number = b->secondary = bus; b->resource[0] = &ioport_resource; b->resource[1] = &iomem_resource; return b; dev_create_file_err: device_unregister(&b->dev); class_dev_reg_err: device_unregister(dev); dev_reg_err: down_write(&pci_bus_sem); list_del(&b->node); up_write(&pci_bus_sem); err_out: kfree(dev); kfree(b); return NULL; } struct pci_bus * __devinit pci_scan_bus_parented(struct device *parent, int bus, struct pci_ops *ops, void *sysdata) { struct pci_bus *b; b = pci_create_bus(parent, bus, ops, sysdata); if (b) b->subordinate = pci_scan_child_bus(b); return b; } EXPORT_SYMBOL(pci_scan_bus_parented); #ifdef CONFIG_HOTPLUG /** * pci_rescan_bus - scan a PCI bus for devices. * @bus: PCI bus to scan * * Scan a PCI bus and child buses for new devices, adds them, * and enables them. * * Returns the max number of subordinate bus discovered. */ unsigned int __ref pci_rescan_bus(struct pci_bus *bus) { unsigned int max; struct pci_dev *dev; max = pci_scan_child_bus(bus); down_read(&pci_bus_sem); list_for_each_entry(dev, &bus->devices, bus_list) if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) if (dev->subordinate) pci_bus_size_bridges(dev->subordinate); up_read(&pci_bus_sem); pci_bus_assign_resources(bus); pci_enable_bridges(bus); pci_bus_add_devices(bus); return max; } EXPORT_SYMBOL_GPL(pci_rescan_bus); EXPORT_SYMBOL(pci_add_new_bus); EXPORT_SYMBOL(pci_scan_slot); EXPORT_SYMBOL(pci_scan_bridge); EXPORT_SYMBOL_GPL(pci_scan_child_bus); #endif static int __init pci_sort_bf_cmp(const struct device *d_a, const struct device *d_b) { const struct pci_dev *a = to_pci_dev(d_a); const struct pci_dev *b = to_pci_dev(d_b); if (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1; else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return 1; if (a->bus->number < b->bus->number) return -1; else if (a->bus->number > b->bus->number) return 1; if (a->devfn < b->devfn) return -1; else if (a->devfn > b->devfn) return 1; return 0; } void __init pci_sort_breadthfirst(void) { bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp); }
gpl-2.0
huzl008/zeda-android-kernel
arch/tile/mm/homecache.c
159
12406
/* * Copyright 2010 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. * * This code maintains the "home" for each page in the system. */ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/spinlock.h> #include <linux/list.h> #include <linux/bootmem.h> #include <linux/rmap.h> #include <linux/pagemap.h> #include <linux/mutex.h> #include <linux/interrupt.h> #include <linux/sysctl.h> #include <linux/pagevec.h> #include <linux/ptrace.h> #include <linux/timex.h> #include <linux/cache.h> #include <linux/smp.h> #include <linux/module.h> #include <asm/page.h> #include <asm/sections.h> #include <asm/tlbflush.h> #include <asm/pgalloc.h> #include <asm/homecache.h> #include <arch/sim.h> #include "migrate.h" #if CHIP_HAS_COHERENT_LOCAL_CACHE() /* * The noallocl2 option suppresses all use of the L2 cache to cache * locally from a remote home. There's no point in using it if we * don't have coherent local caching, though. */ static int __write_once noallocl2; static int __init set_noallocl2(char *str) { noallocl2 = 1; return 0; } early_param("noallocl2", set_noallocl2); #else #define noallocl2 0 #endif /* Provide no-op versions of these routines to keep flush_remote() cleaner. */ #define mark_caches_evicted_start() 0 #define mark_caches_evicted_finish(mask, timestamp) do {} while (0) /* * Update the irq_stat for cpus that we are going to interrupt * with TLB or cache flushes. Also handle removing dataplane cpus * from the TLB flush set, and setting dataplane_tlb_state instead. */ static void hv_flush_update(const struct cpumask *cache_cpumask, struct cpumask *tlb_cpumask, unsigned long tlb_va, unsigned long tlb_length, HV_Remote_ASID *asids, int asidcount) { struct cpumask mask; int i, cpu; cpumask_clear(&mask); if (cache_cpumask) cpumask_or(&mask, &mask, cache_cpumask); if (tlb_cpumask && tlb_length) { cpumask_or(&mask, &mask, tlb_cpumask); } for (i = 0; i < asidcount; ++i) cpumask_set_cpu(asids[i].y * smp_width + asids[i].x, &mask); /* * Don't bother to update atomically; losing a count * here is not that critical. */ for_each_cpu(cpu, &mask) ++per_cpu(irq_stat, cpu).irq_hv_flush_count; } /* * This wrapper function around hv_flush_remote() does several things: * * - Provides a return value error-checking panic path, since * there's never any good reason for hv_flush_remote() to fail. * - Accepts a 32-bit PFN rather than a 64-bit PA, which generally * is the type that Linux wants to pass around anyway. * - Centralizes the mark_caches_evicted() handling. * - Canonicalizes that lengths of zero make cpumasks NULL. * - Handles deferring TLB flushes for dataplane tiles. * - Tracks remote interrupts in the per-cpu irq_cpustat_t. * * Note that we have to wait until the cache flush completes before * updating the per-cpu last_cache_flush word, since otherwise another * concurrent flush can race, conclude the flush has already * completed, and start to use the page while it's still dirty * remotely (running concurrently with the actual evict, presumably). */ void flush_remote(unsigned long cache_pfn, unsigned long cache_control, const struct cpumask *cache_cpumask_orig, HV_VirtAddr tlb_va, unsigned long tlb_length, unsigned long tlb_pgsize, const struct cpumask *tlb_cpumask_orig, HV_Remote_ASID *asids, int asidcount) { int rc; int timestamp = 0; /* happy compiler */ struct cpumask cache_cpumask_copy, tlb_cpumask_copy; struct cpumask *cache_cpumask, *tlb_cpumask; HV_PhysAddr cache_pa; char cache_buf[NR_CPUS*5], tlb_buf[NR_CPUS*5]; mb(); /* provided just to simplify "magic hypervisor" mode */ /* * Canonicalize and copy the cpumasks. */ if (cache_cpumask_orig && cache_control) { cpumask_copy(&cache_cpumask_copy, cache_cpumask_orig); cache_cpumask = &cache_cpumask_copy; } else { cpumask_clear(&cache_cpumask_copy); cache_cpumask = NULL; } if (cache_cpumask == NULL) cache_control = 0; if (tlb_cpumask_orig && tlb_length) { cpumask_copy(&tlb_cpumask_copy, tlb_cpumask_orig); tlb_cpumask = &tlb_cpumask_copy; } else { cpumask_clear(&tlb_cpumask_copy); tlb_cpumask = NULL; } hv_flush_update(cache_cpumask, tlb_cpumask, tlb_va, tlb_length, asids, asidcount); cache_pa = (HV_PhysAddr)cache_pfn << PAGE_SHIFT; if (cache_control & HV_FLUSH_EVICT_L2) timestamp = mark_caches_evicted_start(); rc = hv_flush_remote(cache_pa, cache_control, cpumask_bits(cache_cpumask), tlb_va, tlb_length, tlb_pgsize, cpumask_bits(tlb_cpumask), asids, asidcount); if (cache_control & HV_FLUSH_EVICT_L2) mark_caches_evicted_finish(cache_cpumask, timestamp); if (rc == 0) return; cpumask_scnprintf(cache_buf, sizeof(cache_buf), &cache_cpumask_copy); cpumask_scnprintf(tlb_buf, sizeof(tlb_buf), &tlb_cpumask_copy); pr_err("hv_flush_remote(%#llx, %#lx, %p [%s]," " %#lx, %#lx, %#lx, %p [%s], %p, %d) = %d\n", cache_pa, cache_control, cache_cpumask, cache_buf, (unsigned long)tlb_va, tlb_length, tlb_pgsize, tlb_cpumask, tlb_buf, asids, asidcount, rc); panic("Unsafe to continue."); } void homecache_evict(const struct cpumask *mask) { flush_remote(0, HV_FLUSH_EVICT_L2, mask, 0, 0, 0, NULL, NULL, 0); } /* Return a mask of the cpus whose caches currently own these pages. */ static void homecache_mask(struct page *page, int pages, struct cpumask *home_mask) { int i; cpumask_clear(home_mask); for (i = 0; i < pages; ++i) { int home = page_home(&page[i]); if (home == PAGE_HOME_IMMUTABLE || home == PAGE_HOME_INCOHERENT) { cpumask_copy(home_mask, cpu_possible_mask); return; } #if CHIP_HAS_CBOX_HOME_MAP() if (home == PAGE_HOME_HASH) { cpumask_or(home_mask, home_mask, &hash_for_home_map); continue; } #endif if (home == PAGE_HOME_UNCACHED) continue; BUG_ON(home < 0 || home >= NR_CPUS); cpumask_set_cpu(home, home_mask); } } /* * Return the passed length, or zero if it's long enough that we * believe we should evict the whole L2 cache. */ static unsigned long cache_flush_length(unsigned long length) { return (length >= CHIP_L2_CACHE_SIZE()) ? HV_FLUSH_EVICT_L2 : length; } /* Flush a page out of whatever cache(s) it is in. */ void homecache_flush_cache(struct page *page, int order) { int pages = 1 << order; int length = cache_flush_length(pages * PAGE_SIZE); unsigned long pfn = page_to_pfn(page); struct cpumask home_mask; homecache_mask(page, pages, &home_mask); flush_remote(pfn, length, &home_mask, 0, 0, 0, NULL, NULL, 0); sim_validate_lines_evicted(PFN_PHYS(pfn), pages * PAGE_SIZE); } /* Report the home corresponding to a given PTE. */ static int pte_to_home(pte_t pte) { if (hv_pte_get_nc(pte)) return PAGE_HOME_IMMUTABLE; switch (hv_pte_get_mode(pte)) { case HV_PTE_MODE_CACHE_TILE_L3: return get_remote_cache_cpu(pte); case HV_PTE_MODE_CACHE_NO_L3: return PAGE_HOME_INCOHERENT; case HV_PTE_MODE_UNCACHED: return PAGE_HOME_UNCACHED; #if CHIP_HAS_CBOX_HOME_MAP() case HV_PTE_MODE_CACHE_HASH_L3: return PAGE_HOME_HASH; #endif } panic("Bad PTE %#llx\n", pte.val); } /* Update the home of a PTE if necessary (can also be used for a pgprot_t). */ pte_t pte_set_home(pte_t pte, int home) { /* Check for non-linear file mapping "PTEs" and pass them through. */ if (pte_file(pte)) return pte; #if CHIP_HAS_MMIO() /* Check for MMIO mappings and pass them through. */ if (hv_pte_get_mode(pte) == HV_PTE_MODE_MMIO) return pte; #endif /* * Only immutable pages get NC mappings. If we have a * non-coherent PTE, but the underlying page is not * immutable, it's likely the result of a forced * caching setting running up against ptrace setting * the page to be writable underneath. In this case, * just keep the PTE coherent. */ if (hv_pte_get_nc(pte) && home != PAGE_HOME_IMMUTABLE) { pte = hv_pte_clear_nc(pte); pr_err("non-immutable page incoherently referenced: %#llx\n", pte.val); } switch (home) { case PAGE_HOME_UNCACHED: pte = hv_pte_set_mode(pte, HV_PTE_MODE_UNCACHED); break; case PAGE_HOME_INCOHERENT: pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3); break; case PAGE_HOME_IMMUTABLE: /* * We could home this page anywhere, since it's immutable, * but by default just home it to follow "hash_default". */ BUG_ON(hv_pte_get_writable(pte)); if (pte_get_forcecache(pte)) { /* Upgrade "force any cpu" to "No L3" for immutable. */ if (hv_pte_get_mode(pte) == HV_PTE_MODE_CACHE_TILE_L3 && pte_get_anyhome(pte)) { pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3); } } else #if CHIP_HAS_CBOX_HOME_MAP() if (hash_default) pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_HASH_L3); else #endif pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3); pte = hv_pte_set_nc(pte); break; #if CHIP_HAS_CBOX_HOME_MAP() case PAGE_HOME_HASH: pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_HASH_L3); break; #endif default: BUG_ON(home < 0 || home >= NR_CPUS || !cpu_is_valid_lotar(home)); pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3); pte = set_remote_cache_cpu(pte, home); break; } #if CHIP_HAS_NC_AND_NOALLOC_BITS() if (noallocl2) pte = hv_pte_set_no_alloc_l2(pte); /* Simplify "no local and no l3" to "uncached" */ if (hv_pte_get_no_alloc_l2(pte) && hv_pte_get_no_alloc_l1(pte) && hv_pte_get_mode(pte) == HV_PTE_MODE_CACHE_NO_L3) { pte = hv_pte_set_mode(pte, HV_PTE_MODE_UNCACHED); } #endif /* Checking this case here gives a better panic than from the hv. */ BUG_ON(hv_pte_get_mode(pte) == 0); return pte; } EXPORT_SYMBOL(pte_set_home); /* * The routines in this section are the "static" versions of the normal * dynamic homecaching routines; they just set the home cache * of a kernel page once, and require a full-chip cache/TLB flush, * so they're not suitable for anything but infrequent use. */ #if CHIP_HAS_CBOX_HOME_MAP() static inline int initial_page_home(void) { return PAGE_HOME_HASH; } #else static inline int initial_page_home(void) { return 0; } #endif int page_home(struct page *page) { if (PageHighMem(page)) { return initial_page_home(); } else { unsigned long kva = (unsigned long)page_address(page); return pte_to_home(*virt_to_pte(NULL, kva)); } } void homecache_change_page_home(struct page *page, int order, int home) { int i, pages = (1 << order); unsigned long kva; BUG_ON(PageHighMem(page)); BUG_ON(page_count(page) > 1); BUG_ON(page_mapcount(page) != 0); kva = (unsigned long) page_address(page); flush_remote(0, HV_FLUSH_EVICT_L2, &cpu_cacheable_map, kva, pages * PAGE_SIZE, PAGE_SIZE, cpu_online_mask, NULL, 0); for (i = 0; i < pages; ++i, kva += PAGE_SIZE) { pte_t *ptep = virt_to_pte(NULL, kva); pte_t pteval = *ptep; BUG_ON(!pte_present(pteval) || pte_huge(pteval)); *ptep = pte_set_home(pteval, home); } } struct page *homecache_alloc_pages(gfp_t gfp_mask, unsigned int order, int home) { struct page *page; BUG_ON(gfp_mask & __GFP_HIGHMEM); /* must be lowmem */ page = alloc_pages(gfp_mask, order); if (page) homecache_change_page_home(page, order, home); return page; } EXPORT_SYMBOL(homecache_alloc_pages); struct page *homecache_alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order, int home) { struct page *page; BUG_ON(gfp_mask & __GFP_HIGHMEM); /* must be lowmem */ page = alloc_pages_node(nid, gfp_mask, order); if (page) homecache_change_page_home(page, order, home); return page; } void homecache_free_pages(unsigned long addr, unsigned int order) { struct page *page; if (addr == 0) return; VM_BUG_ON(!virt_addr_valid((void *)addr)); page = virt_to_page((void *)addr); if (put_page_testzero(page)) { int pages = (1 << order); homecache_change_page_home(page, order, initial_page_home()); while (pages--) __free_page(page++); } }
gpl-2.0
android-armv7a-belalang-tempur/RAZORFERRARI
fs/ext4/mballoc.c
415
144899
/* * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com * Written by Alex Tomas <alex@clusterfs.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public Licens * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- */ /* * mballoc.c contains the multiblocks allocation routines */ #include "ext4_jbd2.h" #include "mballoc.h" #include <linux/log2.h> #include <linux/module.h> #include <linux/slab.h> #include <trace/events/ext4.h> #ifdef CONFIG_EXT4_DEBUG ushort ext4_mballoc_debug __read_mostly; module_param_named(mballoc_debug, ext4_mballoc_debug, ushort, 0644); MODULE_PARM_DESC(mballoc_debug, "Debugging level for ext4's mballoc"); #endif /* * MUSTDO: * - test ext4_ext_search_left() and ext4_ext_search_right() * - search for metadata in few groups * * TODO v4: * - normalization should take into account whether file is still open * - discard preallocations if no free space left (policy?) * - don't normalize tails * - quota * - reservation for superuser * * TODO v3: * - bitmap read-ahead (proposed by Oleg Drokin aka green) * - track min/max extents in each group for better group selection * - mb_mark_used() may allocate chunk right after splitting buddy * - tree of groups sorted by number of free blocks * - error handling */ /* * The allocation request involve request for multiple number of blocks * near to the goal(block) value specified. * * During initialization phase of the allocator we decide to use the * group preallocation or inode preallocation depending on the size of * the file. The size of the file could be the resulting file size we * would have after allocation, or the current file size, which ever * is larger. If the size is less than sbi->s_mb_stream_request we * select to use the group preallocation. The default value of * s_mb_stream_request is 16 blocks. This can also be tuned via * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in * terms of number of blocks. * * The main motivation for having small file use group preallocation is to * ensure that we have small files closer together on the disk. * * First stage the allocator looks at the inode prealloc list, * ext4_inode_info->i_prealloc_list, which contains list of prealloc * spaces for this particular inode. The inode prealloc space is * represented as: * * pa_lstart -> the logical start block for this prealloc space * pa_pstart -> the physical start block for this prealloc space * pa_len -> length for this prealloc space (in clusters) * pa_free -> free space available in this prealloc space (in clusters) * * The inode preallocation space is used looking at the _logical_ start * block. If only the logical file block falls within the range of prealloc * space we will consume the particular prealloc space. This makes sure that * we have contiguous physical blocks representing the file blocks * * The important thing to be noted in case of inode prealloc space is that * we don't modify the values associated to inode prealloc space except * pa_free. * * If we are not able to find blocks in the inode prealloc space and if we * have the group allocation flag set then we look at the locality group * prealloc space. These are per CPU prealloc list represented as * * ext4_sb_info.s_locality_groups[smp_processor_id()] * * The reason for having a per cpu locality group is to reduce the contention * between CPUs. It is possible to get scheduled at this point. * * The locality group prealloc space is used looking at whether we have * enough free space (pa_free) within the prealloc space. * * If we can't allocate blocks via inode prealloc or/and locality group * prealloc then we look at the buddy cache. The buddy cache is represented * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets * mapped to the buddy and bitmap information regarding different * groups. The buddy information is attached to buddy cache inode so that * we can access them through the page cache. The information regarding * each group is loaded via ext4_mb_load_buddy. The information involve * block bitmap and buddy information. The information are stored in the * inode as: * * { page } * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]... * * * one block each for bitmap and buddy information. So for each group we * take up 2 blocks. A page can contain blocks_per_page (PAGE_CACHE_SIZE / * blocksize) blocks. So it can have information regarding groups_per_page * which is blocks_per_page/2 * * The buddy cache inode is not stored on disk. The inode is thrown * away when the filesystem is unmounted. * * We look for count number of blocks in the buddy cache. If we were able * to locate that many free blocks we return with additional information * regarding rest of the contiguous physical block available * * Before allocating blocks via buddy cache we normalize the request * blocks. This ensure we ask for more blocks that we needed. The extra * blocks that we get after allocation is added to the respective prealloc * list. In case of inode preallocation we follow a list of heuristics * based on file size. This can be found in ext4_mb_normalize_request. If * we are doing a group prealloc we try to normalize the request to * sbi->s_mb_group_prealloc. The default value of s_mb_group_prealloc is * dependent on the cluster size; for non-bigalloc file systems, it is * 512 blocks. This can be tuned via * /sys/fs/ext4/<partition>/mb_group_prealloc. The value is represented in * terms of number of blocks. If we have mounted the file system with -O * stripe=<value> option the group prealloc request is normalized to the * the smallest multiple of the stripe value (sbi->s_stripe) which is * greater than the default mb_group_prealloc. * * The regular allocator (using the buddy cache) supports a few tunables. * * /sys/fs/ext4/<partition>/mb_min_to_scan * /sys/fs/ext4/<partition>/mb_max_to_scan * /sys/fs/ext4/<partition>/mb_order2_req * * The regular allocator uses buddy scan only if the request len is power of * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The * value of s_mb_order2_reqs can be tuned via * /sys/fs/ext4/<partition>/mb_order2_req. If the request len is equal to * stripe size (sbi->s_stripe), we try to search for contiguous block in * stripe size. This should result in better allocation on RAID setups. If * not, we search in the specific group using bitmap for best extents. The * tunable min_to_scan and max_to_scan control the behaviour here. * min_to_scan indicate how long the mballoc __must__ look for a best * extent and max_to_scan indicates how long the mballoc __can__ look for a * best extent in the found extents. Searching for the blocks starts with * the group specified as the goal value in allocation context via * ac_g_ex. Each group is first checked based on the criteria whether it * can be used for allocation. ext4_mb_good_group explains how the groups are * checked. * * Both the prealloc space are getting populated as above. So for the first * request we will hit the buddy cache which will result in this prealloc * space getting filled. The prealloc space is then later used for the * subsequent request. */ /* * mballoc operates on the following data: * - on-disk bitmap * - in-core buddy (actually includes buddy and bitmap) * - preallocation descriptors (PAs) * * there are two types of preallocations: * - inode * assiged to specific inode and can be used for this inode only. * it describes part of inode's space preallocated to specific * physical blocks. any block from that preallocated can be used * independent. the descriptor just tracks number of blocks left * unused. so, before taking some block from descriptor, one must * make sure corresponded logical block isn't allocated yet. this * also means that freeing any block within descriptor's range * must discard all preallocated blocks. * - locality group * assigned to specific locality group which does not translate to * permanent set of inodes: inode can join and leave group. space * from this type of preallocation can be used for any inode. thus * it's consumed from the beginning to the end. * * relation between them can be expressed as: * in-core buddy = on-disk bitmap + preallocation descriptors * * this mean blocks mballoc considers used are: * - allocated blocks (persistent) * - preallocated blocks (non-persistent) * * consistency in mballoc world means that at any time a block is either * free or used in ALL structures. notice: "any time" should not be read * literally -- time is discrete and delimited by locks. * * to keep it simple, we don't use block numbers, instead we count number of * blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA. * * all operations can be expressed as: * - init buddy: buddy = on-disk + PAs * - new PA: buddy += N; PA = N * - use inode PA: on-disk += N; PA -= N * - discard inode PA buddy -= on-disk - PA; PA = 0 * - use locality group PA on-disk += N; PA -= N * - discard locality group PA buddy -= PA; PA = 0 * note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap * is used in real operation because we can't know actual used * bits from PA, only from on-disk bitmap * * if we follow this strict logic, then all operations above should be atomic. * given some of them can block, we'd have to use something like semaphores * killing performance on high-end SMP hardware. let's try to relax it using * the following knowledge: * 1) if buddy is referenced, it's already initialized * 2) while block is used in buddy and the buddy is referenced, * nobody can re-allocate that block * 3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has * bit set and PA claims same block, it's OK. IOW, one can set bit in * on-disk bitmap if buddy has same bit set or/and PA covers corresponded * block * * so, now we're building a concurrency table: * - init buddy vs. * - new PA * blocks for PA are allocated in the buddy, buddy must be referenced * until PA is linked to allocation group to avoid concurrent buddy init * - use inode PA * we need to make sure that either on-disk bitmap or PA has uptodate data * given (3) we care that PA-=N operation doesn't interfere with init * - discard inode PA * the simplest way would be to have buddy initialized by the discard * - use locality group PA * again PA-=N must be serialized with init * - discard locality group PA * the simplest way would be to have buddy initialized by the discard * - new PA vs. * - use inode PA * i_data_sem serializes them * - discard inode PA * discard process must wait until PA isn't used by another process * - use locality group PA * some mutex should serialize them * - discard locality group PA * discard process must wait until PA isn't used by another process * - use inode PA * - use inode PA * i_data_sem or another mutex should serializes them * - discard inode PA * discard process must wait until PA isn't used by another process * - use locality group PA * nothing wrong here -- they're different PAs covering different blocks * - discard locality group PA * discard process must wait until PA isn't used by another process * * now we're ready to make few consequences: * - PA is referenced and while it is no discard is possible * - PA is referenced until block isn't marked in on-disk bitmap * - PA changes only after on-disk bitmap * - discard must not compete with init. either init is done before * any discard or they're serialized somehow * - buddy init as sum of on-disk bitmap and PAs is done atomically * * a special case when we've used PA to emptiness. no need to modify buddy * in this case, but we should care about concurrent init * */ /* * Logic in few words: * * - allocation: * load group * find blocks * mark bits in on-disk bitmap * release group * * - use preallocation: * find proper PA (per-inode or group) * load group * mark bits in on-disk bitmap * release group * release PA * * - free: * load group * mark bits in on-disk bitmap * release group * * - discard preallocations in group: * mark PAs deleted * move them onto local list * load on-disk bitmap * load group * remove PA from object (inode or locality group) * mark free blocks in-core * * - discard inode's preallocations: */ /* * Locking rules * * Locks: * - bitlock on a group (group) * - object (inode/locality) (object) * - per-pa lock (pa) * * Paths: * - new pa * object * group * * - find and use pa: * pa * * - release consumed pa: * pa * group * object * * - generate in-core bitmap: * group * pa * * - discard all for given object (inode, locality group): * object * pa * group * * - discard all for given group: * group * pa * group * object * */ static struct kmem_cache *ext4_pspace_cachep; static struct kmem_cache *ext4_ac_cachep; static struct kmem_cache *ext4_free_data_cachep; /* We create slab caches for groupinfo data structures based on the * superblock block size. There will be one per mounted filesystem for * each unique s_blocksize_bits */ #define NR_GRPINFO_CACHES 8 static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES]; static const char *ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = { "ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k", "ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k", "ext4_groupinfo_64k", "ext4_groupinfo_128k" }; static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, ext4_group_t group); static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, ext4_group_t group); static void ext4_free_data_callback(struct super_block *sb, struct ext4_journal_cb_entry *jce, int rc); static inline void *mb_correct_addr_and_bit(int *bit, void *addr) { #if BITS_PER_LONG == 64 *bit += ((unsigned long) addr & 7UL) << 3; addr = (void *) ((unsigned long) addr & ~7UL); #elif BITS_PER_LONG == 32 *bit += ((unsigned long) addr & 3UL) << 3; addr = (void *) ((unsigned long) addr & ~3UL); #else #error "how many bits you are?!" #endif return addr; } static inline int mb_test_bit(int bit, void *addr) { /* * ext4_test_bit on architecture like powerpc * needs unsigned long aligned address */ addr = mb_correct_addr_and_bit(&bit, addr); return ext4_test_bit(bit, addr); } static inline void mb_set_bit(int bit, void *addr) { addr = mb_correct_addr_and_bit(&bit, addr); ext4_set_bit(bit, addr); } static inline void mb_clear_bit(int bit, void *addr) { addr = mb_correct_addr_and_bit(&bit, addr); ext4_clear_bit(bit, addr); } static inline int mb_test_and_clear_bit(int bit, void *addr) { addr = mb_correct_addr_and_bit(&bit, addr); return ext4_test_and_clear_bit(bit, addr); } static inline int mb_find_next_zero_bit(void *addr, int max, int start) { int fix = 0, ret, tmpmax; addr = mb_correct_addr_and_bit(&fix, addr); tmpmax = max + fix; start += fix; ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix; if (ret > max) return max; return ret; } static inline int mb_find_next_bit(void *addr, int max, int start) { int fix = 0, ret, tmpmax; addr = mb_correct_addr_and_bit(&fix, addr); tmpmax = max + fix; start += fix; ret = ext4_find_next_bit(addr, tmpmax, start) - fix; if (ret > max) return max; return ret; } static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max) { char *bb; BUG_ON(e4b->bd_bitmap == e4b->bd_buddy); BUG_ON(max == NULL); if (order > e4b->bd_blkbits + 1) { *max = 0; return NULL; } /* at order 0 we see each particular block */ if (order == 0) { *max = 1 << (e4b->bd_blkbits + 3); return e4b->bd_bitmap; } bb = e4b->bd_buddy + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order]; *max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order]; return bb; } #ifdef DOUBLE_CHECK static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b, int first, int count) { int i; struct super_block *sb = e4b->bd_sb; if (unlikely(e4b->bd_info->bb_bitmap == NULL)) return; assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group)); for (i = 0; i < count; i++) { if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) { ext4_fsblk_t blocknr; blocknr = ext4_group_first_block_no(sb, e4b->bd_group); blocknr += EXT4_C2B(EXT4_SB(sb), first + i); ext4_grp_locked_error(sb, e4b->bd_group, inode ? inode->i_ino : 0, blocknr, "freeing block already freed " "(bit %u)", first + i); } mb_clear_bit(first + i, e4b->bd_info->bb_bitmap); } } static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count) { int i; if (unlikely(e4b->bd_info->bb_bitmap == NULL)) return; assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); for (i = 0; i < count; i++) { BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap)); mb_set_bit(first + i, e4b->bd_info->bb_bitmap); } } static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap) { if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) { unsigned char *b1, *b2; int i; b1 = (unsigned char *) e4b->bd_info->bb_bitmap; b2 = (unsigned char *) bitmap; for (i = 0; i < e4b->bd_sb->s_blocksize; i++) { if (b1[i] != b2[i]) { ext4_msg(e4b->bd_sb, KERN_ERR, "corruption in group %u " "at byte %u(%u): %x in copy != %x " "on disk/prealloc", e4b->bd_group, i, i * 8, b1[i], b2[i]); BUG(); } } } } #else static inline void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b, int first, int count) { return; } static inline void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count) { return; } static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap) { return; } #endif #ifdef AGGRESSIVE_CHECK #define MB_CHECK_ASSERT(assert) \ do { \ if (!(assert)) { \ printk(KERN_EMERG \ "Assertion failure in %s() at %s:%d: \"%s\"\n", \ function, file, line, # assert); \ BUG(); \ } \ } while (0) static int __mb_check_buddy(struct ext4_buddy *e4b, char *file, const char *function, int line) { struct super_block *sb = e4b->bd_sb; int order = e4b->bd_blkbits + 1; int max; int max2; int i; int j; int k; int count; struct ext4_group_info *grp; int fragments = 0; int fstart; struct list_head *cur; void *buddy; void *buddy2; { static int mb_check_counter; if (mb_check_counter++ % 100 != 0) return 0; } while (order > 1) { buddy = mb_find_buddy(e4b, order, &max); MB_CHECK_ASSERT(buddy); buddy2 = mb_find_buddy(e4b, order - 1, &max2); MB_CHECK_ASSERT(buddy2); MB_CHECK_ASSERT(buddy != buddy2); MB_CHECK_ASSERT(max * 2 == max2); count = 0; for (i = 0; i < max; i++) { if (mb_test_bit(i, buddy)) { /* only single bit in buddy2 may be 1 */ if (!mb_test_bit(i << 1, buddy2)) { MB_CHECK_ASSERT( mb_test_bit((i<<1)+1, buddy2)); } else if (!mb_test_bit((i << 1) + 1, buddy2)) { MB_CHECK_ASSERT( mb_test_bit(i << 1, buddy2)); } continue; } /* both bits in buddy2 must be 1 */ MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2)); MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2)); for (j = 0; j < (1 << order); j++) { k = (i * (1 << order)) + j; MB_CHECK_ASSERT( !mb_test_bit(k, e4b->bd_bitmap)); } count++; } MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count); order--; } fstart = -1; buddy = mb_find_buddy(e4b, 0, &max); for (i = 0; i < max; i++) { if (!mb_test_bit(i, buddy)) { MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free); if (fstart == -1) { fragments++; fstart = i; } continue; } fstart = -1; /* check used bits only */ for (j = 0; j < e4b->bd_blkbits + 1; j++) { buddy2 = mb_find_buddy(e4b, j, &max2); k = i >> j; MB_CHECK_ASSERT(k < max2); MB_CHECK_ASSERT(mb_test_bit(k, buddy2)); } } MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info)); MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments); grp = ext4_get_group_info(sb, e4b->bd_group); list_for_each(cur, &grp->bb_prealloc_list) { ext4_group_t groupnr; struct ext4_prealloc_space *pa; pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k); MB_CHECK_ASSERT(groupnr == e4b->bd_group); for (i = 0; i < pa->pa_len; i++) MB_CHECK_ASSERT(mb_test_bit(k + i, buddy)); } return 0; } #undef MB_CHECK_ASSERT #define mb_check_buddy(e4b) __mb_check_buddy(e4b, \ __FILE__, __func__, __LINE__) #else #define mb_check_buddy(e4b) #endif /* * Divide blocks started from @first with length @len into * smaller chunks with power of 2 blocks. * Clear the bits in bitmap which the blocks of the chunk(s) covered, * then increase bb_counters[] for corresponded chunk size. */ static void ext4_mb_mark_free_simple(struct super_block *sb, void *buddy, ext4_grpblk_t first, ext4_grpblk_t len, struct ext4_group_info *grp) { struct ext4_sb_info *sbi = EXT4_SB(sb); ext4_grpblk_t min; ext4_grpblk_t max; ext4_grpblk_t chunk; unsigned short border; BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb)); border = 2 << sb->s_blocksize_bits; while (len > 0) { /* find how many blocks can be covered since this position */ max = ffs(first | border) - 1; /* find how many blocks of power 2 we need to mark */ min = fls(len) - 1; if (max < min) min = max; chunk = 1 << min; /* mark multiblock chunks only */ grp->bb_counters[min]++; if (min > 0) mb_clear_bit(first >> min, buddy + sbi->s_mb_offsets[min]); len -= chunk; first += chunk; } } /* * Cache the order of the largest free extent we have available in this block * group. */ static void mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp) { int i; int bits; grp->bb_largest_free_order = -1; /* uninit */ bits = sb->s_blocksize_bits + 1; for (i = bits; i >= 0; i--) { if (grp->bb_counters[i] > 0) { grp->bb_largest_free_order = i; break; } } } static noinline_for_stack void ext4_mb_generate_buddy(struct super_block *sb, void *buddy, void *bitmap, ext4_group_t group) { struct ext4_group_info *grp = ext4_get_group_info(sb, group); ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb); ext4_grpblk_t i = 0; ext4_grpblk_t first; ext4_grpblk_t len; unsigned free = 0; unsigned fragments = 0; unsigned long long period = get_cycles(); /* initialize buddy from bitmap which is aggregation * of on-disk bitmap and preallocations */ i = mb_find_next_zero_bit(bitmap, max, 0); grp->bb_first_free = i; while (i < max) { fragments++; first = i; i = mb_find_next_bit(bitmap, max, i); len = i - first; free += len; if (len > 1) ext4_mb_mark_free_simple(sb, buddy, first, len, grp); else grp->bb_counters[0]++; if (i < max) i = mb_find_next_zero_bit(bitmap, max, i); } grp->bb_fragments = fragments; if (free != grp->bb_free) { ext4_grp_locked_error(sb, group, 0, 0, "%u clusters in bitmap, %u in gd", free, grp->bb_free); /* * If we intent to continue, we consider group descritor * corrupt and update bb_free using bitmap value */ grp->bb_free = free; } mb_set_largest_free_order(sb, grp); clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state)); period = get_cycles() - period; spin_lock(&EXT4_SB(sb)->s_bal_lock); EXT4_SB(sb)->s_mb_buddies_generated++; EXT4_SB(sb)->s_mb_generation_time += period; spin_unlock(&EXT4_SB(sb)->s_bal_lock); } static void mb_regenerate_buddy(struct ext4_buddy *e4b) { int count; int order = 1; void *buddy; while ((buddy = mb_find_buddy(e4b, order++, &count))) { ext4_set_bits(buddy, 0, count); } e4b->bd_info->bb_fragments = 0; memset(e4b->bd_info->bb_counters, 0, sizeof(*e4b->bd_info->bb_counters) * (e4b->bd_sb->s_blocksize_bits + 2)); ext4_mb_generate_buddy(e4b->bd_sb, e4b->bd_buddy, e4b->bd_bitmap, e4b->bd_group); } /* The buddy information is attached the buddy cache inode * for convenience. The information regarding each group * is loaded via ext4_mb_load_buddy. The information involve * block bitmap and buddy information. The information are * stored in the inode as * * { page } * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]... * * * one block each for bitmap and buddy information. * So for each group we take up 2 blocks. A page can * contain blocks_per_page (PAGE_CACHE_SIZE / blocksize) blocks. * So it can have information regarding groups_per_page which * is blocks_per_page/2 * * Locking note: This routine takes the block group lock of all groups * for this page; do not hold this lock when calling this routine! */ static int ext4_mb_init_cache(struct page *page, char *incore) { ext4_group_t ngroups; int blocksize; int blocks_per_page; int groups_per_page; int err = 0; int i; ext4_group_t first_group, group; int first_block; struct super_block *sb; struct buffer_head *bhs; struct buffer_head **bh = NULL; struct inode *inode; char *data; char *bitmap; struct ext4_group_info *grinfo; mb_debug(1, "init page %lu\n", page->index); inode = page->mapping->host; sb = inode->i_sb; ngroups = ext4_get_groups_count(sb); blocksize = 1 << inode->i_blkbits; blocks_per_page = PAGE_CACHE_SIZE / blocksize; groups_per_page = blocks_per_page >> 1; if (groups_per_page == 0) groups_per_page = 1; /* allocate buffer_heads to read bitmaps */ if (groups_per_page > 1) { i = sizeof(struct buffer_head *) * groups_per_page; bh = kzalloc(i, GFP_NOFS); if (bh == NULL) { err = -ENOMEM; goto out; } } else bh = &bhs; first_group = page->index * blocks_per_page / 2; /* read all groups the page covers into the cache */ for (i = 0, group = first_group; i < groups_per_page; i++, group++) { if (group >= ngroups) break; grinfo = ext4_get_group_info(sb, group); /* * If page is uptodate then we came here after online resize * which added some new uninitialized group info structs, so * we must skip all initialized uptodate buddies on the page, * which may be currently in use by an allocating task. */ if (PageUptodate(page) && !EXT4_MB_GRP_NEED_INIT(grinfo)) { bh[i] = NULL; continue; } if (!(bh[i] = ext4_read_block_bitmap_nowait(sb, group))) { err = -ENOMEM; goto out; } mb_debug(1, "read bitmap for group %u\n", group); } /* wait for I/O completion */ for (i = 0, group = first_group; i < groups_per_page; i++, group++) { if (bh[i] && ext4_wait_block_bitmap(sb, group, bh[i])) { err = -EIO; goto out; } } first_block = page->index * blocks_per_page; for (i = 0; i < blocks_per_page; i++) { group = (first_block + i) >> 1; if (group >= ngroups) break; if (!bh[group - first_group]) /* skip initialized uptodate buddy */ continue; /* * data carry information regarding this * particular group in the format specified * above * */ data = page_address(page) + (i * blocksize); bitmap = bh[group - first_group]->b_data; /* * We place the buddy block and bitmap block * close together */ if ((first_block + i) & 1) { /* this is block of buddy */ BUG_ON(incore == NULL); mb_debug(1, "put buddy for group %u in page %lu/%x\n", group, page->index, i * blocksize); trace_ext4_mb_buddy_bitmap_load(sb, group); grinfo = ext4_get_group_info(sb, group); grinfo->bb_fragments = 0; memset(grinfo->bb_counters, 0, sizeof(*grinfo->bb_counters) * (sb->s_blocksize_bits+2)); /* * incore got set to the group block bitmap below */ ext4_lock_group(sb, group); /* init the buddy */ memset(data, 0xff, blocksize); ext4_mb_generate_buddy(sb, data, incore, group); ext4_unlock_group(sb, group); incore = NULL; } else { /* this is block of bitmap */ BUG_ON(incore != NULL); mb_debug(1, "put bitmap for group %u in page %lu/%x\n", group, page->index, i * blocksize); trace_ext4_mb_bitmap_load(sb, group); /* see comments in ext4_mb_put_pa() */ ext4_lock_group(sb, group); memcpy(data, bitmap, blocksize); /* mark all preallocated blks used in in-core bitmap */ ext4_mb_generate_from_pa(sb, data, group); ext4_mb_generate_from_freelist(sb, data, group); ext4_unlock_group(sb, group); /* set incore so that the buddy information can be * generated using this */ incore = data; } } SetPageUptodate(page); out: if (bh) { for (i = 0; i < groups_per_page; i++) brelse(bh[i]); if (bh != &bhs) kfree(bh); } return err; } /* * Lock the buddy and bitmap pages. This make sure other parallel init_group * on the same buddy page doesn't happen whild holding the buddy page lock. * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap * are on the same page e4b->bd_buddy_page is NULL and return value is 0. */ static int ext4_mb_get_buddy_page_lock(struct super_block *sb, ext4_group_t group, struct ext4_buddy *e4b) { struct inode *inode = EXT4_SB(sb)->s_buddy_cache; int block, pnum, poff; int blocks_per_page; struct page *page; e4b->bd_buddy_page = NULL; e4b->bd_bitmap_page = NULL; blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize; /* * the buddy cache inode stores the block bitmap * and buddy information in consecutive blocks. * So for each group we need two blocks. */ block = group * 2; pnum = block / blocks_per_page; poff = block % blocks_per_page; page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); if (!page) return -EIO; BUG_ON(page->mapping != inode->i_mapping); e4b->bd_bitmap_page = page; e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize); if (blocks_per_page >= 2) { /* buddy and bitmap are on the same page */ return 0; } block++; pnum = block / blocks_per_page; page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); if (!page) return -EIO; BUG_ON(page->mapping != inode->i_mapping); e4b->bd_buddy_page = page; return 0; } static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b) { if (e4b->bd_bitmap_page) { unlock_page(e4b->bd_bitmap_page); page_cache_release(e4b->bd_bitmap_page); } if (e4b->bd_buddy_page) { unlock_page(e4b->bd_buddy_page); page_cache_release(e4b->bd_buddy_page); } } /* * Locking note: This routine calls ext4_mb_init_cache(), which takes the * block group lock of all groups for this page; do not hold the BG lock when * calling this routine! */ static noinline_for_stack int ext4_mb_init_group(struct super_block *sb, ext4_group_t group) { struct ext4_group_info *this_grp; struct ext4_buddy e4b; struct page *page; int ret = 0; might_sleep(); mb_debug(1, "init group %u\n", group); this_grp = ext4_get_group_info(sb, group); /* * This ensures that we don't reinit the buddy cache * page which map to the group from which we are already * allocating. If we are looking at the buddy cache we would * have taken a reference using ext4_mb_load_buddy and that * would have pinned buddy page to page cache. */ ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b); if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) { /* * somebody initialized the group * return without doing anything */ goto err; } page = e4b.bd_bitmap_page; ret = ext4_mb_init_cache(page, NULL); if (ret) goto err; if (!PageUptodate(page)) { ret = -EIO; goto err; } mark_page_accessed(page); if (e4b.bd_buddy_page == NULL) { /* * If both the bitmap and buddy are in * the same page we don't need to force * init the buddy */ ret = 0; goto err; } /* init buddy cache */ page = e4b.bd_buddy_page; ret = ext4_mb_init_cache(page, e4b.bd_bitmap); if (ret) goto err; if (!PageUptodate(page)) { ret = -EIO; goto err; } mark_page_accessed(page); err: ext4_mb_put_buddy_page_lock(&e4b); return ret; } /* * Locking note: This routine calls ext4_mb_init_cache(), which takes the * block group lock of all groups for this page; do not hold the BG lock when * calling this routine! */ static noinline_for_stack int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, struct ext4_buddy *e4b) { int blocks_per_page; int block; int pnum; int poff; struct page *page; int ret; struct ext4_group_info *grp; struct ext4_sb_info *sbi = EXT4_SB(sb); struct inode *inode = sbi->s_buddy_cache; might_sleep(); mb_debug(1, "load group %u\n", group); blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize; grp = ext4_get_group_info(sb, group); e4b->bd_blkbits = sb->s_blocksize_bits; e4b->bd_info = grp; e4b->bd_sb = sb; e4b->bd_group = group; e4b->bd_buddy_page = NULL; e4b->bd_bitmap_page = NULL; if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { /* * we need full data about the group * to make a good selection */ ret = ext4_mb_init_group(sb, group); if (ret) return ret; } /* * the buddy cache inode stores the block bitmap * and buddy information in consecutive blocks. * So for each group we need two blocks. */ block = group * 2; pnum = block / blocks_per_page; poff = block % blocks_per_page; /* we could use find_or_create_page(), but it locks page * what we'd like to avoid in fast path ... */ page = find_get_page(inode->i_mapping, pnum); if (page == NULL || !PageUptodate(page)) { if (page) /* * drop the page reference and try * to get the page with lock. If we * are not uptodate that implies * somebody just created the page but * is yet to initialize the same. So * wait for it to initialize. */ page_cache_release(page); page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); if (page) { BUG_ON(page->mapping != inode->i_mapping); if (!PageUptodate(page)) { ret = ext4_mb_init_cache(page, NULL); if (ret) { unlock_page(page); goto err; } mb_cmp_bitmaps(e4b, page_address(page) + (poff * sb->s_blocksize)); } unlock_page(page); } } if (page == NULL || !PageUptodate(page)) { ret = -EIO; goto err; } e4b->bd_bitmap_page = page; e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize); mark_page_accessed(page); block++; pnum = block / blocks_per_page; poff = block % blocks_per_page; page = find_get_page(inode->i_mapping, pnum); if (page == NULL || !PageUptodate(page)) { if (page) page_cache_release(page); page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); if (page) { BUG_ON(page->mapping != inode->i_mapping); if (!PageUptodate(page)) { ret = ext4_mb_init_cache(page, e4b->bd_bitmap); if (ret) { unlock_page(page); goto err; } } unlock_page(page); } } if (page == NULL || !PageUptodate(page)) { ret = -EIO; goto err; } e4b->bd_buddy_page = page; e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize); mark_page_accessed(page); BUG_ON(e4b->bd_bitmap_page == NULL); BUG_ON(e4b->bd_buddy_page == NULL); return 0; err: if (page) page_cache_release(page); if (e4b->bd_bitmap_page) page_cache_release(e4b->bd_bitmap_page); if (e4b->bd_buddy_page) page_cache_release(e4b->bd_buddy_page); e4b->bd_buddy = NULL; e4b->bd_bitmap = NULL; return ret; } static void ext4_mb_unload_buddy(struct ext4_buddy *e4b) { if (e4b->bd_bitmap_page) page_cache_release(e4b->bd_bitmap_page); if (e4b->bd_buddy_page) page_cache_release(e4b->bd_buddy_page); } static int mb_find_order_for_block(struct ext4_buddy *e4b, int block) { int order = 1; void *bb; BUG_ON(e4b->bd_bitmap == e4b->bd_buddy); BUG_ON(block >= (1 << (e4b->bd_blkbits + 3))); bb = e4b->bd_buddy; while (order <= e4b->bd_blkbits + 1) { block = block >> 1; if (!mb_test_bit(block, bb)) { /* this block is part of buddy of order 'order' */ return order; } bb += 1 << (e4b->bd_blkbits - order); order++; } return 0; } static void mb_clear_bits(void *bm, int cur, int len) { __u32 *addr; len = cur + len; while (cur < len) { if ((cur & 31) == 0 && (len - cur) >= 32) { /* fast path: clear whole word at once */ addr = bm + (cur >> 3); *addr = 0; cur += 32; continue; } mb_clear_bit(cur, bm); cur++; } } /* clear bits in given range * will return first found zero bit if any, -1 otherwise */ static int mb_test_and_clear_bits(void *bm, int cur, int len) { __u32 *addr; int zero_bit = -1; len = cur + len; while (cur < len) { if ((cur & 31) == 0 && (len - cur) >= 32) { /* fast path: clear whole word at once */ addr = bm + (cur >> 3); if (*addr != (__u32)(-1) && zero_bit == -1) zero_bit = cur + mb_find_next_zero_bit(addr, 32, 0); *addr = 0; cur += 32; continue; } if (!mb_test_and_clear_bit(cur, bm) && zero_bit == -1) zero_bit = cur; cur++; } return zero_bit; } void ext4_set_bits(void *bm, int cur, int len) { __u32 *addr; len = cur + len; while (cur < len) { if ((cur & 31) == 0 && (len - cur) >= 32) { /* fast path: set whole word at once */ addr = bm + (cur >> 3); *addr = 0xffffffff; cur += 32; continue; } mb_set_bit(cur, bm); cur++; } } /* * _________________________________________________________________ */ static inline int mb_buddy_adjust_border(int* bit, void* bitmap, int side) { if (mb_test_bit(*bit + side, bitmap)) { mb_clear_bit(*bit, bitmap); (*bit) -= side; return 1; } else { (*bit) += side; mb_set_bit(*bit, bitmap); return -1; } } static void mb_buddy_mark_free(struct ext4_buddy *e4b, int first, int last) { int max; int order = 1; void *buddy = mb_find_buddy(e4b, order, &max); while (buddy) { void *buddy2; /* Bits in range [first; last] are known to be set since * corresponding blocks were allocated. Bits in range * (first; last) will stay set because they form buddies on * upper layer. We just deal with borders if they don't * align with upper layer and then go up. * Releasing entire group is all about clearing * single bit of highest order buddy. */ /* Example: * --------------------------------- * | 1 | 1 | 1 | 1 | * --------------------------------- * | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | * --------------------------------- * 0 1 2 3 4 5 6 7 * \_____________________/ * * Neither [1] nor [6] is aligned to above layer. * Left neighbour [0] is free, so mark it busy, * decrease bb_counters and extend range to * [0; 6] * Right neighbour [7] is busy. It can't be coaleasced with [6], so * mark [6] free, increase bb_counters and shrink range to * [0; 5]. * Then shift range to [0; 2], go up and do the same. */ if (first & 1) e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&first, buddy, -1); if (!(last & 1)) e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&last, buddy, 1); if (first > last) break; order++; if (first == last || !(buddy2 = mb_find_buddy(e4b, order, &max))) { mb_clear_bits(buddy, first, last - first + 1); e4b->bd_info->bb_counters[order - 1] += last - first + 1; break; } first >>= 1; last >>= 1; buddy = buddy2; } } static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b, int first, int count) { int left_is_free = 0; int right_is_free = 0; int block; int last = first + count - 1; struct super_block *sb = e4b->bd_sb; BUG_ON(last >= (sb->s_blocksize << 3)); assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group)); mb_check_buddy(e4b); mb_free_blocks_double(inode, e4b, first, count); e4b->bd_info->bb_free += count; if (first < e4b->bd_info->bb_first_free) e4b->bd_info->bb_first_free = first; /* access memory sequentially: check left neighbour, * clear range and then check right neighbour */ if (first != 0) left_is_free = !mb_test_bit(first - 1, e4b->bd_bitmap); block = mb_test_and_clear_bits(e4b->bd_bitmap, first, count); if (last + 1 < EXT4_SB(sb)->s_mb_maxs[0]) right_is_free = !mb_test_bit(last + 1, e4b->bd_bitmap); if (unlikely(block != -1)) { ext4_fsblk_t blocknr; blocknr = ext4_group_first_block_no(sb, e4b->bd_group); blocknr += EXT4_C2B(EXT4_SB(sb), block); ext4_grp_locked_error(sb, e4b->bd_group, inode ? inode->i_ino : 0, blocknr, "freeing already freed block " "(bit %u)", block); mb_regenerate_buddy(e4b); goto done; } /* let's maintain fragments counter */ if (left_is_free && right_is_free) e4b->bd_info->bb_fragments--; else if (!left_is_free && !right_is_free) e4b->bd_info->bb_fragments++; /* buddy[0] == bd_bitmap is a special case, so handle * it right away and let mb_buddy_mark_free stay free of * zero order checks. * Check if neighbours are to be coaleasced, * adjust bitmap bb_counters and borders appropriately. */ if (first & 1) { first += !left_is_free; e4b->bd_info->bb_counters[0] += left_is_free ? -1 : 1; } if (!(last & 1)) { last -= !right_is_free; e4b->bd_info->bb_counters[0] += right_is_free ? -1 : 1; } if (first <= last) mb_buddy_mark_free(e4b, first >> 1, last >> 1); done: mb_set_largest_free_order(sb, e4b->bd_info); mb_check_buddy(e4b); } static int mb_find_extent(struct ext4_buddy *e4b, int block, int needed, struct ext4_free_extent *ex) { int next = block; int max, order; void *buddy; assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); BUG_ON(ex == NULL); buddy = mb_find_buddy(e4b, 0, &max); BUG_ON(buddy == NULL); BUG_ON(block >= max); if (mb_test_bit(block, buddy)) { ex->fe_len = 0; ex->fe_start = 0; ex->fe_group = 0; return 0; } /* find actual order */ order = mb_find_order_for_block(e4b, block); block = block >> order; ex->fe_len = 1 << order; ex->fe_start = block << order; ex->fe_group = e4b->bd_group; /* calc difference from given start */ next = next - ex->fe_start; ex->fe_len -= next; ex->fe_start += next; while (needed > ex->fe_len && mb_find_buddy(e4b, order, &max)) { if (block + 1 >= max) break; next = (block + 1) * (1 << order); if (mb_test_bit(next, e4b->bd_bitmap)) break; order = mb_find_order_for_block(e4b, next); block = next >> order; ex->fe_len += 1 << order; } BUG_ON(ex->fe_start + ex->fe_len > (1 << (e4b->bd_blkbits + 3))); return ex->fe_len; } static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex) { int ord; int mlen = 0; int max = 0; int cur; int start = ex->fe_start; int len = ex->fe_len; unsigned ret = 0; int len0 = len; void *buddy; BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3)); BUG_ON(e4b->bd_group != ex->fe_group); assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); mb_check_buddy(e4b); mb_mark_used_double(e4b, start, len); e4b->bd_info->bb_free -= len; if (e4b->bd_info->bb_first_free == start) e4b->bd_info->bb_first_free += len; /* let's maintain fragments counter */ if (start != 0) mlen = !mb_test_bit(start - 1, e4b->bd_bitmap); if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0]) max = !mb_test_bit(start + len, e4b->bd_bitmap); if (mlen && max) e4b->bd_info->bb_fragments++; else if (!mlen && !max) e4b->bd_info->bb_fragments--; /* let's maintain buddy itself */ while (len) { ord = mb_find_order_for_block(e4b, start); if (((start >> ord) << ord) == start && len >= (1 << ord)) { /* the whole chunk may be allocated at once! */ mlen = 1 << ord; buddy = mb_find_buddy(e4b, ord, &max); BUG_ON((start >> ord) >= max); mb_set_bit(start >> ord, buddy); e4b->bd_info->bb_counters[ord]--; start += mlen; len -= mlen; BUG_ON(len < 0); continue; } /* store for history */ if (ret == 0) ret = len | (ord << 16); /* we have to split large buddy */ BUG_ON(ord <= 0); buddy = mb_find_buddy(e4b, ord, &max); mb_set_bit(start >> ord, buddy); e4b->bd_info->bb_counters[ord]--; ord--; cur = (start >> ord) & ~1U; buddy = mb_find_buddy(e4b, ord, &max); mb_clear_bit(cur, buddy); mb_clear_bit(cur + 1, buddy); e4b->bd_info->bb_counters[ord]++; e4b->bd_info->bb_counters[ord]++; } mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info); ext4_set_bits(e4b->bd_bitmap, ex->fe_start, len0); mb_check_buddy(e4b); return ret; } /* * Must be called under group lock! */ static void ext4_mb_use_best_found(struct ext4_allocation_context *ac, struct ext4_buddy *e4b) { struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); int ret; BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group); BUG_ON(ac->ac_status == AC_STATUS_FOUND); ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len); ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical; ret = mb_mark_used(e4b, &ac->ac_b_ex); /* preallocation can change ac_b_ex, thus we store actually * allocated blocks for history */ ac->ac_f_ex = ac->ac_b_ex; ac->ac_status = AC_STATUS_FOUND; ac->ac_tail = ret & 0xffff; ac->ac_buddy = ret >> 16; /* * take the page reference. We want the page to be pinned * so that we don't get a ext4_mb_init_cache_call for this * group until we update the bitmap. That would mean we * double allocate blocks. The reference is dropped * in ext4_mb_release_context */ ac->ac_bitmap_page = e4b->bd_bitmap_page; get_page(ac->ac_bitmap_page); ac->ac_buddy_page = e4b->bd_buddy_page; get_page(ac->ac_buddy_page); /* store last allocated for subsequent stream allocation */ if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { spin_lock(&sbi->s_md_lock); sbi->s_mb_last_group = ac->ac_f_ex.fe_group; sbi->s_mb_last_start = ac->ac_f_ex.fe_start; spin_unlock(&sbi->s_md_lock); } } /* * regular allocator, for general purposes allocation */ static void ext4_mb_check_limits(struct ext4_allocation_context *ac, struct ext4_buddy *e4b, int finish_group) { struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); struct ext4_free_extent *bex = &ac->ac_b_ex; struct ext4_free_extent *gex = &ac->ac_g_ex; struct ext4_free_extent ex; int max; if (ac->ac_status == AC_STATUS_FOUND) return; /* * We don't want to scan for a whole year */ if (ac->ac_found > sbi->s_mb_max_to_scan && !(ac->ac_flags & EXT4_MB_HINT_FIRST)) { ac->ac_status = AC_STATUS_BREAK; return; } /* * Haven't found good chunk so far, let's continue */ if (bex->fe_len < gex->fe_len) return; if ((finish_group || ac->ac_found > sbi->s_mb_min_to_scan) && bex->fe_group == e4b->bd_group) { /* recheck chunk's availability - we don't know * when it was found (within this lock-unlock * period or not) */ max = mb_find_extent(e4b, bex->fe_start, gex->fe_len, &ex); if (max >= gex->fe_len) { ext4_mb_use_best_found(ac, e4b); return; } } } /* * The routine checks whether found extent is good enough. If it is, * then the extent gets marked used and flag is set to the context * to stop scanning. Otherwise, the extent is compared with the * previous found extent and if new one is better, then it's stored * in the context. Later, the best found extent will be used, if * mballoc can't find good enough extent. * * FIXME: real allocation policy is to be designed yet! */ static void ext4_mb_measure_extent(struct ext4_allocation_context *ac, struct ext4_free_extent *ex, struct ext4_buddy *e4b) { struct ext4_free_extent *bex = &ac->ac_b_ex; struct ext4_free_extent *gex = &ac->ac_g_ex; BUG_ON(ex->fe_len <= 0); BUG_ON(ex->fe_len > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb)); BUG_ON(ex->fe_start >= EXT4_CLUSTERS_PER_GROUP(ac->ac_sb)); BUG_ON(ac->ac_status != AC_STATUS_CONTINUE); ac->ac_found++; /* * The special case - take what you catch first */ if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) { *bex = *ex; ext4_mb_use_best_found(ac, e4b); return; } /* * Let's check whether the chuck is good enough */ if (ex->fe_len == gex->fe_len) { *bex = *ex; ext4_mb_use_best_found(ac, e4b); return; } /* * If this is first found extent, just store it in the context */ if (bex->fe_len == 0) { *bex = *ex; return; } /* * If new found extent is better, store it in the context */ if (bex->fe_len < gex->fe_len) { /* if the request isn't satisfied, any found extent * larger than previous best one is better */ if (ex->fe_len > bex->fe_len) *bex = *ex; } else if (ex->fe_len > gex->fe_len) { /* if the request is satisfied, then we try to find * an extent that still satisfy the request, but is * smaller than previous one */ if (ex->fe_len < bex->fe_len) *bex = *ex; } ext4_mb_check_limits(ac, e4b, 0); } static noinline_for_stack int ext4_mb_try_best_found(struct ext4_allocation_context *ac, struct ext4_buddy *e4b) { struct ext4_free_extent ex = ac->ac_b_ex; ext4_group_t group = ex.fe_group; int max; int err; BUG_ON(ex.fe_len <= 0); err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); if (err) return err; ext4_lock_group(ac->ac_sb, group); max = mb_find_extent(e4b, ex.fe_start, ex.fe_len, &ex); if (max > 0) { ac->ac_b_ex = ex; ext4_mb_use_best_found(ac, e4b); } ext4_unlock_group(ac->ac_sb, group); ext4_mb_unload_buddy(e4b); return 0; } static noinline_for_stack int ext4_mb_find_by_goal(struct ext4_allocation_context *ac, struct ext4_buddy *e4b) { ext4_group_t group = ac->ac_g_ex.fe_group; int max; int err; struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); struct ext4_free_extent ex; if (!(ac->ac_flags & EXT4_MB_HINT_TRY_GOAL)) return 0; if (grp->bb_free == 0) return 0; err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); if (err) return err; ext4_lock_group(ac->ac_sb, group); max = mb_find_extent(e4b, ac->ac_g_ex.fe_start, ac->ac_g_ex.fe_len, &ex); if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) { ext4_fsblk_t start; start = ext4_group_first_block_no(ac->ac_sb, e4b->bd_group) + ex.fe_start; /* use do_div to get remainder (would be 64-bit modulo) */ if (do_div(start, sbi->s_stripe) == 0) { ac->ac_found++; ac->ac_b_ex = ex; ext4_mb_use_best_found(ac, e4b); } } else if (max >= ac->ac_g_ex.fe_len) { BUG_ON(ex.fe_len <= 0); BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group); BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start); ac->ac_found++; ac->ac_b_ex = ex; ext4_mb_use_best_found(ac, e4b); } else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) { /* Sometimes, caller may want to merge even small * number of blocks to an existing extent */ BUG_ON(ex.fe_len <= 0); BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group); BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start); ac->ac_found++; ac->ac_b_ex = ex; ext4_mb_use_best_found(ac, e4b); } ext4_unlock_group(ac->ac_sb, group); ext4_mb_unload_buddy(e4b); return 0; } /* * The routine scans buddy structures (not bitmap!) from given order * to max order and tries to find big enough chunk to satisfy the req */ static noinline_for_stack void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac, struct ext4_buddy *e4b) { struct super_block *sb = ac->ac_sb; struct ext4_group_info *grp = e4b->bd_info; void *buddy; int i; int k; int max; BUG_ON(ac->ac_2order <= 0); for (i = ac->ac_2order; i <= sb->s_blocksize_bits + 1; i++) { if (grp->bb_counters[i] == 0) continue; buddy = mb_find_buddy(e4b, i, &max); BUG_ON(buddy == NULL); k = mb_find_next_zero_bit(buddy, max, 0); BUG_ON(k >= max); ac->ac_found++; ac->ac_b_ex.fe_len = 1 << i; ac->ac_b_ex.fe_start = k << i; ac->ac_b_ex.fe_group = e4b->bd_group; ext4_mb_use_best_found(ac, e4b); BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len); if (EXT4_SB(sb)->s_mb_stats) atomic_inc(&EXT4_SB(sb)->s_bal_2orders); break; } } /* * The routine scans the group and measures all found extents. * In order to optimize scanning, caller must pass number of * free blocks in the group, so the routine can know upper limit. */ static noinline_for_stack void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac, struct ext4_buddy *e4b) { struct super_block *sb = ac->ac_sb; void *bitmap = e4b->bd_bitmap; struct ext4_free_extent ex; int i; int free; free = e4b->bd_info->bb_free; BUG_ON(free <= 0); i = e4b->bd_info->bb_first_free; while (free && ac->ac_status == AC_STATUS_CONTINUE) { i = mb_find_next_zero_bit(bitmap, EXT4_CLUSTERS_PER_GROUP(sb), i); if (i >= EXT4_CLUSTERS_PER_GROUP(sb)) { /* * IF we have corrupt bitmap, we won't find any * free blocks even though group info says we * we have free blocks */ ext4_grp_locked_error(sb, e4b->bd_group, 0, 0, "%d free clusters as per " "group info. But bitmap says 0", free); break; } mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex); BUG_ON(ex.fe_len <= 0); if (free < ex.fe_len) { ext4_grp_locked_error(sb, e4b->bd_group, 0, 0, "%d free clusters as per " "group info. But got %d blocks", free, ex.fe_len); /* * The number of free blocks differs. This mostly * indicate that the bitmap is corrupt. So exit * without claiming the space. */ break; } ext4_mb_measure_extent(ac, &ex, e4b); i += ex.fe_len; free -= ex.fe_len; } ext4_mb_check_limits(ac, e4b, 1); } /* * This is a special case for storages like raid5 * we try to find stripe-aligned chunks for stripe-size-multiple requests */ static noinline_for_stack void ext4_mb_scan_aligned(struct ext4_allocation_context *ac, struct ext4_buddy *e4b) { struct super_block *sb = ac->ac_sb; struct ext4_sb_info *sbi = EXT4_SB(sb); void *bitmap = e4b->bd_bitmap; struct ext4_free_extent ex; ext4_fsblk_t first_group_block; ext4_fsblk_t a; ext4_grpblk_t i; int max; BUG_ON(sbi->s_stripe == 0); /* find first stripe-aligned block in group */ first_group_block = ext4_group_first_block_no(sb, e4b->bd_group); a = first_group_block + sbi->s_stripe - 1; do_div(a, sbi->s_stripe); i = (a * sbi->s_stripe) - first_group_block; while (i < EXT4_CLUSTERS_PER_GROUP(sb)) { if (!mb_test_bit(i, bitmap)) { max = mb_find_extent(e4b, i, sbi->s_stripe, &ex); if (max >= sbi->s_stripe) { ac->ac_found++; ac->ac_b_ex = ex; ext4_mb_use_best_found(ac, e4b); break; } } i += sbi->s_stripe; } } /* This is now called BEFORE we load the buddy bitmap. */ static int ext4_mb_good_group(struct ext4_allocation_context *ac, ext4_group_t group, int cr) { unsigned free, fragments; int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb)); struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); BUG_ON(cr < 0 || cr >= 4); free = grp->bb_free; if (free == 0) return 0; if (cr <= 2 && free < ac->ac_g_ex.fe_len) return 0; /* We only do this if the grp has never been initialized */ if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { int ret = ext4_mb_init_group(ac->ac_sb, group); if (ret) return 0; } fragments = grp->bb_fragments; if (fragments == 0) return 0; switch (cr) { case 0: BUG_ON(ac->ac_2order == 0); /* Avoid using the first bg of a flexgroup for data files */ if ((ac->ac_flags & EXT4_MB_HINT_DATA) && (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) && ((group % flex_size) == 0)) return 0; if ((ac->ac_2order > ac->ac_sb->s_blocksize_bits+1) || (free / fragments) >= ac->ac_g_ex.fe_len) return 1; if (grp->bb_largest_free_order < ac->ac_2order) return 0; return 1; case 1: if ((free / fragments) >= ac->ac_g_ex.fe_len) return 1; break; case 2: if (free >= ac->ac_g_ex.fe_len) return 1; break; case 3: return 1; default: BUG(); } return 0; } static noinline_for_stack int ext4_mb_regular_allocator(struct ext4_allocation_context *ac) { ext4_group_t ngroups, group, i; int cr; int err = 0; struct ext4_sb_info *sbi; struct super_block *sb; struct ext4_buddy e4b; sb = ac->ac_sb; sbi = EXT4_SB(sb); ngroups = ext4_get_groups_count(sb); /* non-extent files are limited to low blocks/groups */ if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS))) ngroups = sbi->s_blockfile_groups; BUG_ON(ac->ac_status == AC_STATUS_FOUND); /* first, try the goal */ err = ext4_mb_find_by_goal(ac, &e4b); if (err || ac->ac_status == AC_STATUS_FOUND) goto out; if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) goto out; /* * ac->ac2_order is set only if the fe_len is a power of 2 * if ac2_order is set we also set criteria to 0 so that we * try exact allocation using buddy. */ i = fls(ac->ac_g_ex.fe_len); ac->ac_2order = 0; /* * We search using buddy data only if the order of the request * is greater than equal to the sbi_s_mb_order2_reqs * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req */ if (i >= sbi->s_mb_order2_reqs) { /* * This should tell if fe_len is exactly power of 2 */ if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0) ac->ac_2order = i - 1; } /* if stream allocation is enabled, use global goal */ if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { /* TBD: may be hot point */ spin_lock(&sbi->s_md_lock); ac->ac_g_ex.fe_group = sbi->s_mb_last_group; ac->ac_g_ex.fe_start = sbi->s_mb_last_start; spin_unlock(&sbi->s_md_lock); } /* Let's just scan groups to find more-less suitable blocks */ cr = ac->ac_2order ? 0 : 1; /* * cr == 0 try to get exact allocation, * cr == 3 try to get anything */ repeat: for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) { ac->ac_criteria = cr; /* * searching for the right group start * from the goal value specified */ group = ac->ac_g_ex.fe_group; for (i = 0; i < ngroups; group++, i++) { /* * Artificially restricted ngroups for non-extent * files makes group > ngroups possible on first loop. */ if (group >= ngroups) group = 0; /* This now checks without needing the buddy page */ if (!ext4_mb_good_group(ac, group, cr)) continue; err = ext4_mb_load_buddy(sb, group, &e4b); if (err) goto out; ext4_lock_group(sb, group); /* * We need to check again after locking the * block group */ if (!ext4_mb_good_group(ac, group, cr)) { ext4_unlock_group(sb, group); ext4_mb_unload_buddy(&e4b); continue; } ac->ac_groups_scanned++; if (cr == 0 && ac->ac_2order < sb->s_blocksize_bits+2) ext4_mb_simple_scan_group(ac, &e4b); else if (cr == 1 && sbi->s_stripe && !(ac->ac_g_ex.fe_len % sbi->s_stripe)) ext4_mb_scan_aligned(ac, &e4b); else ext4_mb_complex_scan_group(ac, &e4b); ext4_unlock_group(sb, group); ext4_mb_unload_buddy(&e4b); if (ac->ac_status != AC_STATUS_CONTINUE) break; } } if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND && !(ac->ac_flags & EXT4_MB_HINT_FIRST)) { /* * We've been searching too long. Let's try to allocate * the best chunk we've found so far */ ext4_mb_try_best_found(ac, &e4b); if (ac->ac_status != AC_STATUS_FOUND) { /* * Someone more lucky has already allocated it. * The only thing we can do is just take first * found block(s) printk(KERN_DEBUG "EXT4-fs: someone won our chunk\n"); */ ac->ac_b_ex.fe_group = 0; ac->ac_b_ex.fe_start = 0; ac->ac_b_ex.fe_len = 0; ac->ac_status = AC_STATUS_CONTINUE; ac->ac_flags |= EXT4_MB_HINT_FIRST; cr = 3; atomic_inc(&sbi->s_mb_lost_chunks); goto repeat; } } out: return err; } static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos) { struct super_block *sb = seq->private; ext4_group_t group; if (*pos < 0 || *pos >= ext4_get_groups_count(sb)) return NULL; group = *pos + 1; return (void *) ((unsigned long) group); } static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos) { struct super_block *sb = seq->private; ext4_group_t group; ++*pos; if (*pos < 0 || *pos >= ext4_get_groups_count(sb)) return NULL; group = *pos + 1; return (void *) ((unsigned long) group); } static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v) { struct super_block *sb = seq->private; ext4_group_t group = (ext4_group_t) ((unsigned long) v); int i; int err, buddy_loaded = 0; struct ext4_buddy e4b; struct ext4_group_info *grinfo; struct sg { struct ext4_group_info info; ext4_grpblk_t counters[16]; } sg; group--; if (group == 0) seq_printf(seq, "#%-5s: %-5s %-5s %-5s " "[ %-5s %-5s %-5s %-5s %-5s %-5s %-5s " "%-5s %-5s %-5s %-5s %-5s %-5s %-5s ]\n", "group", "free", "frags", "first", "2^0", "2^1", "2^2", "2^3", "2^4", "2^5", "2^6", "2^7", "2^8", "2^9", "2^10", "2^11", "2^12", "2^13"); i = (sb->s_blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) + sizeof(struct ext4_group_info); grinfo = ext4_get_group_info(sb, group); /* Load the group info in memory only if not already loaded. */ if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) { err = ext4_mb_load_buddy(sb, group, &e4b); if (err) { seq_printf(seq, "#%-5u: I/O error\n", group); return 0; } buddy_loaded = 1; } memcpy(&sg, ext4_get_group_info(sb, group), i); if (buddy_loaded) ext4_mb_unload_buddy(&e4b); seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free, sg.info.bb_fragments, sg.info.bb_first_free); for (i = 0; i <= 13; i++) seq_printf(seq, " %-5u", i <= sb->s_blocksize_bits + 1 ? sg.info.bb_counters[i] : 0); seq_printf(seq, " ]\n"); return 0; } static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v) { } static const struct seq_operations ext4_mb_seq_groups_ops = { .start = ext4_mb_seq_groups_start, .next = ext4_mb_seq_groups_next, .stop = ext4_mb_seq_groups_stop, .show = ext4_mb_seq_groups_show, }; static int ext4_mb_seq_groups_open(struct inode *inode, struct file *file) { struct super_block *sb = PDE_DATA(inode); int rc; rc = seq_open(file, &ext4_mb_seq_groups_ops); if (rc == 0) { struct seq_file *m = file->private_data; m->private = sb; } return rc; } static const struct file_operations ext4_mb_seq_groups_fops = { .owner = THIS_MODULE, .open = ext4_mb_seq_groups_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static struct kmem_cache *get_groupinfo_cache(int blocksize_bits) { int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE; struct kmem_cache *cachep = ext4_groupinfo_caches[cache_index]; BUG_ON(!cachep); return cachep; } /* * Allocate the top-level s_group_info array for the specified number * of groups */ int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups) { struct ext4_sb_info *sbi = EXT4_SB(sb); unsigned size; struct ext4_group_info ***new_groupinfo; size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >> EXT4_DESC_PER_BLOCK_BITS(sb); if (size <= sbi->s_group_info_size) return 0; size = roundup_pow_of_two(sizeof(*sbi->s_group_info) * size); new_groupinfo = ext4_kvzalloc(size, GFP_KERNEL); if (!new_groupinfo) { ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group"); return -ENOMEM; } if (sbi->s_group_info) { memcpy(new_groupinfo, sbi->s_group_info, sbi->s_group_info_size * sizeof(*sbi->s_group_info)); ext4_kvfree(sbi->s_group_info); } sbi->s_group_info = new_groupinfo; sbi->s_group_info_size = size / sizeof(*sbi->s_group_info); ext4_debug("allocated s_groupinfo array for %d meta_bg's\n", sbi->s_group_info_size); return 0; } /* Create and initialize ext4_group_info data for the given group. */ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group, struct ext4_group_desc *desc) { int i; int metalen = 0; struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_group_info **meta_group_info; struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); /* * First check if this group is the first of a reserved block. * If it's true, we have to allocate a new table of pointers * to ext4_group_info structures */ if (group % EXT4_DESC_PER_BLOCK(sb) == 0) { metalen = sizeof(*meta_group_info) << EXT4_DESC_PER_BLOCK_BITS(sb); meta_group_info = kmalloc(metalen, GFP_KERNEL); if (meta_group_info == NULL) { ext4_msg(sb, KERN_ERR, "can't allocate mem " "for a buddy group"); goto exit_meta_group_info; } sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] = meta_group_info; } meta_group_info = sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]; i = group & (EXT4_DESC_PER_BLOCK(sb) - 1); meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_KERNEL); if (meta_group_info[i] == NULL) { ext4_msg(sb, KERN_ERR, "can't allocate buddy mem"); goto exit_group_info; } set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(meta_group_info[i]->bb_state)); /* * initialize bb_free to be able to skip * empty groups without initialization */ if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { meta_group_info[i]->bb_free = ext4_free_clusters_after_init(sb, group, desc); } else { meta_group_info[i]->bb_free = ext4_free_group_clusters(sb, desc); } INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list); init_rwsem(&meta_group_info[i]->alloc_sem); meta_group_info[i]->bb_free_root = RB_ROOT; meta_group_info[i]->bb_largest_free_order = -1; /* uninit */ #ifdef DOUBLE_CHECK { struct buffer_head *bh; meta_group_info[i]->bb_bitmap = kmalloc(sb->s_blocksize, GFP_KERNEL); BUG_ON(meta_group_info[i]->bb_bitmap == NULL); bh = ext4_read_block_bitmap(sb, group); BUG_ON(bh == NULL); memcpy(meta_group_info[i]->bb_bitmap, bh->b_data, sb->s_blocksize); put_bh(bh); } #endif return 0; exit_group_info: /* If a meta_group_info table has been allocated, release it now */ if (group % EXT4_DESC_PER_BLOCK(sb) == 0) { kfree(sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]); sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] = NULL; } exit_meta_group_info: return -ENOMEM; } /* ext4_mb_add_groupinfo */ static int ext4_mb_init_backend(struct super_block *sb) { ext4_group_t ngroups = ext4_get_groups_count(sb); ext4_group_t i; struct ext4_sb_info *sbi = EXT4_SB(sb); int err; struct ext4_group_desc *desc; struct kmem_cache *cachep; err = ext4_mb_alloc_groupinfo(sb, ngroups); if (err) return err; sbi->s_buddy_cache = new_inode(sb); if (sbi->s_buddy_cache == NULL) { ext4_msg(sb, KERN_ERR, "can't get new inode"); goto err_freesgi; } /* To avoid potentially colliding with an valid on-disk inode number, * use EXT4_BAD_INO for the buddy cache inode number. This inode is * not in the inode hash, so it should never be found by iget(), but * this will avoid confusion if it ever shows up during debugging. */ sbi->s_buddy_cache->i_ino = EXT4_BAD_INO; EXT4_I(sbi->s_buddy_cache)->i_disksize = 0; for (i = 0; i < ngroups; i++) { desc = ext4_get_group_desc(sb, i, NULL); if (desc == NULL) { ext4_msg(sb, KERN_ERR, "can't read descriptor %u", i); goto err_freebuddy; } if (ext4_mb_add_groupinfo(sb, i, desc) != 0) goto err_freebuddy; } return 0; err_freebuddy: cachep = get_groupinfo_cache(sb->s_blocksize_bits); while (i-- > 0) kmem_cache_free(cachep, ext4_get_group_info(sb, i)); i = sbi->s_group_info_size; while (i-- > 0) kfree(sbi->s_group_info[i]); iput(sbi->s_buddy_cache); err_freesgi: ext4_kvfree(sbi->s_group_info); return -ENOMEM; } static void ext4_groupinfo_destroy_slabs(void) { int i; for (i = 0; i < NR_GRPINFO_CACHES; i++) { if (ext4_groupinfo_caches[i]) kmem_cache_destroy(ext4_groupinfo_caches[i]); ext4_groupinfo_caches[i] = NULL; } } static int ext4_groupinfo_create_slab(size_t size) { static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex); int slab_size; int blocksize_bits = order_base_2(size); int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE; struct kmem_cache *cachep; if (cache_index >= NR_GRPINFO_CACHES) return -EINVAL; if (unlikely(cache_index < 0)) cache_index = 0; mutex_lock(&ext4_grpinfo_slab_create_mutex); if (ext4_groupinfo_caches[cache_index]) { mutex_unlock(&ext4_grpinfo_slab_create_mutex); return 0; /* Already created */ } slab_size = offsetof(struct ext4_group_info, bb_counters[blocksize_bits + 2]); cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index], slab_size, 0, SLAB_RECLAIM_ACCOUNT, NULL); ext4_groupinfo_caches[cache_index] = cachep; mutex_unlock(&ext4_grpinfo_slab_create_mutex); if (!cachep) { printk(KERN_EMERG "EXT4-fs: no memory for groupinfo slab cache\n"); return -ENOMEM; } return 0; } int ext4_mb_init(struct super_block *sb) { struct ext4_sb_info *sbi = EXT4_SB(sb); unsigned i, j; unsigned offset; unsigned max; int ret; i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_offsets); sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL); if (sbi->s_mb_offsets == NULL) { ret = -ENOMEM; goto out; } i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_maxs); sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL); if (sbi->s_mb_maxs == NULL) { ret = -ENOMEM; goto out; } ret = ext4_groupinfo_create_slab(sb->s_blocksize); if (ret < 0) goto out; /* order 0 is regular bitmap */ sbi->s_mb_maxs[0] = sb->s_blocksize << 3; sbi->s_mb_offsets[0] = 0; i = 1; offset = 0; max = sb->s_blocksize << 2; do { sbi->s_mb_offsets[i] = offset; sbi->s_mb_maxs[i] = max; offset += 1 << (sb->s_blocksize_bits - i); max = max >> 1; i++; } while (i <= sb->s_blocksize_bits + 1); spin_lock_init(&sbi->s_md_lock); spin_lock_init(&sbi->s_bal_lock); sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN; sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN; sbi->s_mb_stats = MB_DEFAULT_STATS; sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD; sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS; /* * The default group preallocation is 512, which for 4k block * sizes translates to 2 megabytes. However for bigalloc file * systems, this is probably too big (i.e, if the cluster size * is 1 megabyte, then group preallocation size becomes half a * gigabyte!). As a default, we will keep a two megabyte * group pralloc size for cluster sizes up to 64k, and after * that, we will force a minimum group preallocation size of * 32 clusters. This translates to 8 megs when the cluster * size is 256k, and 32 megs when the cluster size is 1 meg, * which seems reasonable as a default. */ sbi->s_mb_group_prealloc = max(MB_DEFAULT_GROUP_PREALLOC >> sbi->s_cluster_bits, 32); /* * If there is a s_stripe > 1, then we set the s_mb_group_prealloc * to the lowest multiple of s_stripe which is bigger than * the s_mb_group_prealloc as determined above. We want * the preallocation size to be an exact multiple of the * RAID stripe size so that preallocations don't fragment * the stripes. */ if (sbi->s_stripe > 1) { sbi->s_mb_group_prealloc = roundup( sbi->s_mb_group_prealloc, sbi->s_stripe); } sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group); if (sbi->s_locality_groups == NULL) { ret = -ENOMEM; goto out_free_groupinfo_slab; } for_each_possible_cpu(i) { struct ext4_locality_group *lg; lg = per_cpu_ptr(sbi->s_locality_groups, i); mutex_init(&lg->lg_mutex); for (j = 0; j < PREALLOC_TB_SIZE; j++) INIT_LIST_HEAD(&lg->lg_prealloc_list[j]); spin_lock_init(&lg->lg_prealloc_lock); } /* init file for buddy data */ ret = ext4_mb_init_backend(sb); if (ret != 0) goto out_free_locality_groups; if (sbi->s_proc) proc_create_data("mb_groups", S_IRUGO, sbi->s_proc, &ext4_mb_seq_groups_fops, sb); return 0; out_free_locality_groups: free_percpu(sbi->s_locality_groups); sbi->s_locality_groups = NULL; out_free_groupinfo_slab: ext4_groupinfo_destroy_slabs(); out: kfree(sbi->s_mb_offsets); sbi->s_mb_offsets = NULL; kfree(sbi->s_mb_maxs); sbi->s_mb_maxs = NULL; return ret; } /* need to called with the ext4 group lock held */ static void ext4_mb_cleanup_pa(struct ext4_group_info *grp) { struct ext4_prealloc_space *pa; struct list_head *cur, *tmp; int count = 0; list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) { pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); list_del(&pa->pa_group_list); count++; kmem_cache_free(ext4_pspace_cachep, pa); } if (count) mb_debug(1, "mballoc: %u PAs left\n", count); } int ext4_mb_release(struct super_block *sb) { ext4_group_t ngroups = ext4_get_groups_count(sb); ext4_group_t i; int num_meta_group_infos; struct ext4_group_info *grinfo; struct ext4_sb_info *sbi = EXT4_SB(sb); struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); if (sbi->s_proc) remove_proc_entry("mb_groups", sbi->s_proc); if (sbi->s_group_info) { for (i = 0; i < ngroups; i++) { grinfo = ext4_get_group_info(sb, i); #ifdef DOUBLE_CHECK kfree(grinfo->bb_bitmap); #endif ext4_lock_group(sb, i); ext4_mb_cleanup_pa(grinfo); ext4_unlock_group(sb, i); kmem_cache_free(cachep, grinfo); } num_meta_group_infos = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >> EXT4_DESC_PER_BLOCK_BITS(sb); for (i = 0; i < num_meta_group_infos; i++) kfree(sbi->s_group_info[i]); ext4_kvfree(sbi->s_group_info); } kfree(sbi->s_mb_offsets); kfree(sbi->s_mb_maxs); if (sbi->s_buddy_cache) iput(sbi->s_buddy_cache); if (sbi->s_mb_stats) { ext4_msg(sb, KERN_INFO, "mballoc: %u blocks %u reqs (%u success)", atomic_read(&sbi->s_bal_allocated), atomic_read(&sbi->s_bal_reqs), atomic_read(&sbi->s_bal_success)); ext4_msg(sb, KERN_INFO, "mballoc: %u extents scanned, %u goal hits, " "%u 2^N hits, %u breaks, %u lost", atomic_read(&sbi->s_bal_ex_scanned), atomic_read(&sbi->s_bal_goals), atomic_read(&sbi->s_bal_2orders), atomic_read(&sbi->s_bal_breaks), atomic_read(&sbi->s_mb_lost_chunks)); ext4_msg(sb, KERN_INFO, "mballoc: %lu generated and it took %Lu", sbi->s_mb_buddies_generated, sbi->s_mb_generation_time); ext4_msg(sb, KERN_INFO, "mballoc: %u preallocated, %u discarded", atomic_read(&sbi->s_mb_preallocated), atomic_read(&sbi->s_mb_discarded)); } free_percpu(sbi->s_locality_groups); return 0; } static inline int ext4_issue_discard(struct super_block *sb, ext4_group_t block_group, ext4_grpblk_t cluster, int count) { ext4_fsblk_t discard_block; discard_block = (EXT4_C2B(EXT4_SB(sb), cluster) + ext4_group_first_block_no(sb, block_group)); count = EXT4_C2B(EXT4_SB(sb), count); trace_ext4_discard_blocks(sb, (unsigned long long) discard_block, count); return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0); } /* * This function is called by the jbd2 layer once the commit has finished, * so we know we can free the blocks that were released with that commit. */ static void ext4_free_data_callback(struct super_block *sb, struct ext4_journal_cb_entry *jce, int rc) { struct ext4_free_data *entry = (struct ext4_free_data *)jce; struct ext4_buddy e4b; struct ext4_group_info *db; int err, count = 0, count2 = 0; mb_debug(1, "gonna free %u blocks in group %u (0x%p):", entry->efd_count, entry->efd_group, entry); if (test_opt(sb, DISCARD)) { err = ext4_issue_discard(sb, entry->efd_group, entry->efd_start_cluster, entry->efd_count); if (err && err != -EOPNOTSUPP) ext4_msg(sb, KERN_WARNING, "discard request in" " group:%d block:%d count:%d failed" " with %d", entry->efd_group, entry->efd_start_cluster, entry->efd_count, err); } err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b); /* we expect to find existing buddy because it's pinned */ BUG_ON(err != 0); db = e4b.bd_info; /* there are blocks to put in buddy to make them really free */ count += entry->efd_count; count2++; ext4_lock_group(sb, entry->efd_group); /* Take it out of per group rb tree */ rb_erase(&entry->efd_node, &(db->bb_free_root)); mb_free_blocks(NULL, &e4b, entry->efd_start_cluster, entry->efd_count); /* * Clear the trimmed flag for the group so that the next * ext4_trim_fs can trim it. * If the volume is mounted with -o discard, online discard * is supported and the free blocks will be trimmed online. */ if (!test_opt(sb, DISCARD)) EXT4_MB_GRP_CLEAR_TRIMMED(db); if (!db->bb_free_root.rb_node) { /* No more items in the per group rb tree * balance refcounts from ext4_mb_free_metadata() */ page_cache_release(e4b.bd_buddy_page); page_cache_release(e4b.bd_bitmap_page); } ext4_unlock_group(sb, entry->efd_group); kmem_cache_free(ext4_free_data_cachep, entry); ext4_mb_unload_buddy(&e4b); mb_debug(1, "freed %u blocks in %u structures\n", count, count2); } int __init ext4_init_mballoc(void) { ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space, SLAB_RECLAIM_ACCOUNT); if (ext4_pspace_cachep == NULL) return -ENOMEM; ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context, SLAB_RECLAIM_ACCOUNT); if (ext4_ac_cachep == NULL) { kmem_cache_destroy(ext4_pspace_cachep); return -ENOMEM; } ext4_free_data_cachep = KMEM_CACHE(ext4_free_data, SLAB_RECLAIM_ACCOUNT); if (ext4_free_data_cachep == NULL) { kmem_cache_destroy(ext4_pspace_cachep); kmem_cache_destroy(ext4_ac_cachep); return -ENOMEM; } return 0; } void ext4_exit_mballoc(void) { /* * Wait for completion of call_rcu()'s on ext4_pspace_cachep * before destroying the slab cache. */ rcu_barrier(); kmem_cache_destroy(ext4_pspace_cachep); kmem_cache_destroy(ext4_ac_cachep); kmem_cache_destroy(ext4_free_data_cachep); ext4_groupinfo_destroy_slabs(); } /* * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps * Returns 0 if success or error code */ static noinline_for_stack int ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, handle_t *handle, unsigned int reserv_clstrs) { struct buffer_head *bitmap_bh = NULL; struct ext4_group_desc *gdp; struct buffer_head *gdp_bh; struct ext4_sb_info *sbi; struct super_block *sb; ext4_fsblk_t block; int err, len; BUG_ON(ac->ac_status != AC_STATUS_FOUND); BUG_ON(ac->ac_b_ex.fe_len <= 0); sb = ac->ac_sb; sbi = EXT4_SB(sb); err = -EIO; bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group); if (!bitmap_bh) goto out_err; err = ext4_journal_get_write_access(handle, bitmap_bh); if (err) goto out_err; err = -EIO; gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh); if (!gdp) goto out_err; ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group, ext4_free_group_clusters(sb, gdp)); err = ext4_journal_get_write_access(handle, gdp_bh); if (err) goto out_err; block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len); if (!ext4_data_block_valid(sbi, block, len)) { ext4_error(sb, "Allocating blocks %llu-%llu which overlap " "fs metadata", block, block+len); /* File system mounted not to panic on error * Fix the bitmap and repeat the block allocation * We leak some of the blocks here. */ ext4_lock_group(sb, ac->ac_b_ex.fe_group); ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start, ac->ac_b_ex.fe_len); ext4_unlock_group(sb, ac->ac_b_ex.fe_group); err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); if (!err) err = -EAGAIN; goto out_err; } ext4_lock_group(sb, ac->ac_b_ex.fe_group); #ifdef AGGRESSIVE_CHECK { int i; for (i = 0; i < ac->ac_b_ex.fe_len; i++) { BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i, bitmap_bh->b_data)); } } #endif ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start, ac->ac_b_ex.fe_len); if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); ext4_free_group_clusters_set(sb, gdp, ext4_free_clusters_after_init(sb, ac->ac_b_ex.fe_group, gdp)); } len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len; ext4_free_group_clusters_set(sb, gdp, len); ext4_block_bitmap_csum_set(sb, ac->ac_b_ex.fe_group, gdp, bitmap_bh); ext4_group_desc_csum_set(sb, ac->ac_b_ex.fe_group, gdp); ext4_unlock_group(sb, ac->ac_b_ex.fe_group); percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len); /* * Now reduce the dirty block count also. Should not go negative */ if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED)) /* release all the reserved blocks if non delalloc */ percpu_counter_sub(&sbi->s_dirtyclusters_counter, reserv_clstrs); if (sbi->s_log_groups_per_flex) { ext4_group_t flex_group = ext4_flex_group(sbi, ac->ac_b_ex.fe_group); atomic64_sub(ac->ac_b_ex.fe_len, &sbi->s_flex_groups[flex_group].free_clusters); } err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); if (err) goto out_err; err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh); out_err: brelse(bitmap_bh); return err; } /* * here we normalize request for locality group * Group request are normalized to s_mb_group_prealloc, which goes to * s_strip if we set the same via mount option. * s_mb_group_prealloc can be configured via * /sys/fs/ext4/<partition>/mb_group_prealloc * * XXX: should we try to preallocate more than the group has now? */ static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac) { struct super_block *sb = ac->ac_sb; struct ext4_locality_group *lg = ac->ac_lg; BUG_ON(lg == NULL); ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc; mb_debug(1, "#%u: goal %u blocks for locality group\n", current->pid, ac->ac_g_ex.fe_len); } /* * Normalization means making request better in terms of * size and alignment */ static noinline_for_stack void ext4_mb_normalize_request(struct ext4_allocation_context *ac, struct ext4_allocation_request *ar) { struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); int bsbits, max; ext4_lblk_t end; loff_t size, start_off; loff_t orig_size __maybe_unused; ext4_lblk_t start; struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); struct ext4_prealloc_space *pa; /* do normalize only data requests, metadata requests do not need preallocation */ if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) return; /* sometime caller may want exact blocks */ if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) return; /* caller may indicate that preallocation isn't * required (it's a tail, for example) */ if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC) return; if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) { ext4_mb_normalize_group_request(ac); return ; } bsbits = ac->ac_sb->s_blocksize_bits; /* first, let's learn actual file size * given current request is allocated */ size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len); size = size << bsbits; if (size < i_size_read(ac->ac_inode)) size = i_size_read(ac->ac_inode); orig_size = size; /* max size of free chunks */ max = 2 << bsbits; #define NRL_CHECK_SIZE(req, size, max, chunk_size) \ (req <= (size) || max <= (chunk_size)) /* first, try to predict filesize */ /* XXX: should this table be tunable? */ start_off = 0; if (size <= 16 * 1024) { size = 16 * 1024; } else if (size <= 32 * 1024) { size = 32 * 1024; } else if (size <= 64 * 1024) { size = 64 * 1024; } else if (size <= 128 * 1024) { size = 128 * 1024; } else if (size <= 256 * 1024) { size = 256 * 1024; } else if (size <= 512 * 1024) { size = 512 * 1024; } else if (size <= 1024 * 1024) { size = 1024 * 1024; } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) { start_off = ((loff_t)ac->ac_o_ex.fe_logical >> (21 - bsbits)) << 21; size = 2 * 1024 * 1024; } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) { start_off = ((loff_t)ac->ac_o_ex.fe_logical >> (22 - bsbits)) << 22; size = 4 * 1024 * 1024; } else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len, (8<<20)>>bsbits, max, 8 * 1024)) { start_off = ((loff_t)ac->ac_o_ex.fe_logical >> (23 - bsbits)) << 23; size = 8 * 1024 * 1024; } else { start_off = (loff_t)ac->ac_o_ex.fe_logical << bsbits; size = ac->ac_o_ex.fe_len << bsbits; } size = size >> bsbits; start = start_off >> bsbits; /* don't cover already allocated blocks in selected range */ if (ar->pleft && start <= ar->lleft) { size -= ar->lleft + 1 - start; start = ar->lleft + 1; } if (ar->pright && start + size - 1 >= ar->lright) size -= start + size - ar->lright; end = start + size; /* check we don't cross already preallocated blocks */ rcu_read_lock(); list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { ext4_lblk_t pa_end; if (pa->pa_deleted) continue; spin_lock(&pa->pa_lock); if (pa->pa_deleted) { spin_unlock(&pa->pa_lock); continue; } pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb), pa->pa_len); /* PA must not overlap original request */ BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end || ac->ac_o_ex.fe_logical < pa->pa_lstart)); /* skip PAs this normalized request doesn't overlap with */ if (pa->pa_lstart >= end || pa_end <= start) { spin_unlock(&pa->pa_lock); continue; } BUG_ON(pa->pa_lstart <= start && pa_end >= end); /* adjust start or end to be adjacent to this pa */ if (pa_end <= ac->ac_o_ex.fe_logical) { BUG_ON(pa_end < start); start = pa_end; } else if (pa->pa_lstart > ac->ac_o_ex.fe_logical) { BUG_ON(pa->pa_lstart > end); end = pa->pa_lstart; } spin_unlock(&pa->pa_lock); } rcu_read_unlock(); size = end - start; /* XXX: extra loop to check we really don't overlap preallocations */ rcu_read_lock(); list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { ext4_lblk_t pa_end; spin_lock(&pa->pa_lock); if (pa->pa_deleted == 0) { pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb), pa->pa_len); BUG_ON(!(start >= pa_end || end <= pa->pa_lstart)); } spin_unlock(&pa->pa_lock); } rcu_read_unlock(); if (start + size <= ac->ac_o_ex.fe_logical && start > ac->ac_o_ex.fe_logical) { ext4_msg(ac->ac_sb, KERN_ERR, "start %lu, size %lu, fe_logical %lu", (unsigned long) start, (unsigned long) size, (unsigned long) ac->ac_o_ex.fe_logical); } BUG_ON(start + size <= ac->ac_o_ex.fe_logical && start > ac->ac_o_ex.fe_logical); BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)); /* now prepare goal request */ /* XXX: is it better to align blocks WRT to logical * placement or satisfy big request as is */ ac->ac_g_ex.fe_logical = start; ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size); /* define goal start in order to merge */ if (ar->pright && (ar->lright == (start + size))) { /* merge to the right */ ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size, &ac->ac_f_ex.fe_group, &ac->ac_f_ex.fe_start); ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; } if (ar->pleft && (ar->lleft + 1 == start)) { /* merge to the left */ ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1, &ac->ac_f_ex.fe_group, &ac->ac_f_ex.fe_start); ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; } mb_debug(1, "goal: %u(was %u) blocks at %u\n", (unsigned) size, (unsigned) orig_size, (unsigned) start); } static void ext4_mb_collect_stats(struct ext4_allocation_context *ac) { struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) { atomic_inc(&sbi->s_bal_reqs); atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated); if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len) atomic_inc(&sbi->s_bal_success); atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned); if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start && ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group) atomic_inc(&sbi->s_bal_goals); if (ac->ac_found > sbi->s_mb_max_to_scan) atomic_inc(&sbi->s_bal_breaks); } if (ac->ac_op == EXT4_MB_HISTORY_ALLOC) trace_ext4_mballoc_alloc(ac); else trace_ext4_mballoc_prealloc(ac); } /* * Called on failure; free up any blocks from the inode PA for this * context. We don't need this for MB_GROUP_PA because we only change * pa_free in ext4_mb_release_context(), but on failure, we've already * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed. */ static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac) { struct ext4_prealloc_space *pa = ac->ac_pa; if (pa && pa->pa_type == MB_INODE_PA) pa->pa_free += ac->ac_b_ex.fe_len; } /* * use blocks preallocated to inode */ static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac, struct ext4_prealloc_space *pa) { struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); ext4_fsblk_t start; ext4_fsblk_t end; int len; /* found preallocated blocks, use them */ start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart); end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len), start + EXT4_C2B(sbi, ac->ac_o_ex.fe_len)); len = EXT4_NUM_B2C(sbi, end - start); ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group, &ac->ac_b_ex.fe_start); ac->ac_b_ex.fe_len = len; ac->ac_status = AC_STATUS_FOUND; ac->ac_pa = pa; BUG_ON(start < pa->pa_pstart); BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len)); BUG_ON(pa->pa_free < len); pa->pa_free -= len; mb_debug(1, "use %llu/%u from inode pa %p\n", start, len, pa); } /* * use blocks preallocated to locality group */ static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac, struct ext4_prealloc_space *pa) { unsigned int len = ac->ac_o_ex.fe_len; ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart, &ac->ac_b_ex.fe_group, &ac->ac_b_ex.fe_start); ac->ac_b_ex.fe_len = len; ac->ac_status = AC_STATUS_FOUND; ac->ac_pa = pa; /* we don't correct pa_pstart or pa_plen here to avoid * possible race when the group is being loaded concurrently * instead we correct pa later, after blocks are marked * in on-disk bitmap -- see ext4_mb_release_context() * Other CPUs are prevented from allocating from this pa by lg_mutex */ mb_debug(1, "use %u/%u from group pa %p\n", pa->pa_lstart-len, len, pa); } /* * Return the prealloc space that have minimal distance * from the goal block. @cpa is the prealloc * space that is having currently known minimal distance * from the goal block. */ static struct ext4_prealloc_space * ext4_mb_check_group_pa(ext4_fsblk_t goal_block, struct ext4_prealloc_space *pa, struct ext4_prealloc_space *cpa) { ext4_fsblk_t cur_distance, new_distance; if (cpa == NULL) { atomic_inc(&pa->pa_count); return pa; } cur_distance = abs(goal_block - cpa->pa_pstart); new_distance = abs(goal_block - pa->pa_pstart); if (cur_distance <= new_distance) return cpa; /* drop the previous reference */ atomic_dec(&cpa->pa_count); atomic_inc(&pa->pa_count); return pa; } /* * search goal blocks in preallocated space */ static noinline_for_stack int ext4_mb_use_preallocated(struct ext4_allocation_context *ac) { struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); int order, i; struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); struct ext4_locality_group *lg; struct ext4_prealloc_space *pa, *cpa = NULL; ext4_fsblk_t goal_block; /* only data can be preallocated */ if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) return 0; /* first, try per-file preallocation */ rcu_read_lock(); list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { /* all fields in this condition don't change, * so we can skip locking for them */ if (ac->ac_o_ex.fe_logical < pa->pa_lstart || ac->ac_o_ex.fe_logical >= (pa->pa_lstart + EXT4_C2B(sbi, pa->pa_len))) continue; /* non-extent files can't have physical blocks past 2^32 */ if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) && (pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len) > EXT4_MAX_BLOCK_FILE_PHYS)) continue; /* found preallocated blocks, use them */ spin_lock(&pa->pa_lock); if (pa->pa_deleted == 0 && pa->pa_free) { atomic_inc(&pa->pa_count); ext4_mb_use_inode_pa(ac, pa); spin_unlock(&pa->pa_lock); ac->ac_criteria = 10; rcu_read_unlock(); return 1; } spin_unlock(&pa->pa_lock); } rcu_read_unlock(); /* can we use group allocation? */ if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)) return 0; /* inode may have no locality group for some reason */ lg = ac->ac_lg; if (lg == NULL) return 0; order = fls(ac->ac_o_ex.fe_len) - 1; if (order > PREALLOC_TB_SIZE - 1) /* The max size of hash table is PREALLOC_TB_SIZE */ order = PREALLOC_TB_SIZE - 1; goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex); /* * search for the prealloc space that is having * minimal distance from the goal block. */ for (i = order; i < PREALLOC_TB_SIZE; i++) { rcu_read_lock(); list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i], pa_inode_list) { spin_lock(&pa->pa_lock); if (pa->pa_deleted == 0 && pa->pa_free >= ac->ac_o_ex.fe_len) { cpa = ext4_mb_check_group_pa(goal_block, pa, cpa); } spin_unlock(&pa->pa_lock); } rcu_read_unlock(); } if (cpa) { ext4_mb_use_group_pa(ac, cpa); ac->ac_criteria = 20; return 1; } return 0; } /* * the function goes through all block freed in the group * but not yet committed and marks them used in in-core bitmap. * buddy must be generated from this bitmap * Need to be called with the ext4 group lock held */ static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, ext4_group_t group) { struct rb_node *n; struct ext4_group_info *grp; struct ext4_free_data *entry; grp = ext4_get_group_info(sb, group); n = rb_first(&(grp->bb_free_root)); while (n) { entry = rb_entry(n, struct ext4_free_data, efd_node); ext4_set_bits(bitmap, entry->efd_start_cluster, entry->efd_count); n = rb_next(n); } return; } /* * the function goes through all preallocation in this group and marks them * used in in-core bitmap. buddy must be generated from this bitmap * Need to be called with ext4 group lock held */ static noinline_for_stack void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, ext4_group_t group) { struct ext4_group_info *grp = ext4_get_group_info(sb, group); struct ext4_prealloc_space *pa; struct list_head *cur; ext4_group_t groupnr; ext4_grpblk_t start; int preallocated = 0; int len; /* all form of preallocation discards first load group, * so the only competing code is preallocation use. * we don't need any locking here * notice we do NOT ignore preallocations with pa_deleted * otherwise we could leave used blocks available for * allocation in buddy when concurrent ext4_mb_put_pa() * is dropping preallocation */ list_for_each(cur, &grp->bb_prealloc_list) { pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); spin_lock(&pa->pa_lock); ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &start); len = pa->pa_len; spin_unlock(&pa->pa_lock); if (unlikely(len == 0)) continue; BUG_ON(groupnr != group); ext4_set_bits(bitmap, start, len); preallocated += len; } mb_debug(1, "prellocated %u for group %u\n", preallocated, group); } static void ext4_mb_pa_callback(struct rcu_head *head) { struct ext4_prealloc_space *pa; pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu); BUG_ON(atomic_read(&pa->pa_count)); BUG_ON(pa->pa_deleted == 0); kmem_cache_free(ext4_pspace_cachep, pa); } /* * drops a reference to preallocated space descriptor * if this was the last reference and the space is consumed */ static void ext4_mb_put_pa(struct ext4_allocation_context *ac, struct super_block *sb, struct ext4_prealloc_space *pa) { ext4_group_t grp; ext4_fsblk_t grp_blk; /* in this short window concurrent discard can set pa_deleted */ spin_lock(&pa->pa_lock); if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) { spin_unlock(&pa->pa_lock); return; } if (pa->pa_deleted == 1) { spin_unlock(&pa->pa_lock); return; } pa->pa_deleted = 1; spin_unlock(&pa->pa_lock); grp_blk = pa->pa_pstart; /* * If doing group-based preallocation, pa_pstart may be in the * next group when pa is used up */ if (pa->pa_type == MB_GROUP_PA) grp_blk--; grp = ext4_get_group_number(sb, grp_blk); /* * possible race: * * P1 (buddy init) P2 (regular allocation) * find block B in PA * copy on-disk bitmap to buddy * mark B in on-disk bitmap * drop PA from group * mark all PAs in buddy * * thus, P1 initializes buddy with B available. to prevent this * we make "copy" and "mark all PAs" atomic and serialize "drop PA" * against that pair */ ext4_lock_group(sb, grp); list_del(&pa->pa_group_list); ext4_unlock_group(sb, grp); spin_lock(pa->pa_obj_lock); list_del_rcu(&pa->pa_inode_list); spin_unlock(pa->pa_obj_lock); call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); } /* * creates new preallocated space for given inode */ static noinline_for_stack int ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) { struct super_block *sb = ac->ac_sb; struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_prealloc_space *pa; struct ext4_group_info *grp; struct ext4_inode_info *ei; /* preallocate only when found space is larger then requested */ BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); BUG_ON(ac->ac_status != AC_STATUS_FOUND); BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS); if (pa == NULL) return -ENOMEM; if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) { int winl; int wins; int win; int offs; /* we can't allocate as much as normalizer wants. * so, found space must get proper lstart * to cover original request */ BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical); BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len); /* we're limited by original request in that * logical block must be covered any way * winl is window we can move our chunk within */ winl = ac->ac_o_ex.fe_logical - ac->ac_g_ex.fe_logical; /* also, we should cover whole original request */ wins = EXT4_C2B(sbi, ac->ac_b_ex.fe_len - ac->ac_o_ex.fe_len); /* the smallest one defines real window */ win = min(winl, wins); offs = ac->ac_o_ex.fe_logical % EXT4_C2B(sbi, ac->ac_b_ex.fe_len); if (offs && offs < win) win = offs; ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical - EXT4_NUM_B2C(sbi, win); BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical); BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len); } /* preallocation can change ac_b_ex, thus we store actually * allocated blocks for history */ ac->ac_f_ex = ac->ac_b_ex; pa->pa_lstart = ac->ac_b_ex.fe_logical; pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); pa->pa_len = ac->ac_b_ex.fe_len; pa->pa_free = pa->pa_len; atomic_set(&pa->pa_count, 1); spin_lock_init(&pa->pa_lock); INIT_LIST_HEAD(&pa->pa_inode_list); INIT_LIST_HEAD(&pa->pa_group_list); pa->pa_deleted = 0; pa->pa_type = MB_INODE_PA; mb_debug(1, "new inode pa %p: %llu/%u for %u\n", pa, pa->pa_pstart, pa->pa_len, pa->pa_lstart); trace_ext4_mb_new_inode_pa(ac, pa); ext4_mb_use_inode_pa(ac, pa); atomic_add(pa->pa_free, &sbi->s_mb_preallocated); ei = EXT4_I(ac->ac_inode); grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); pa->pa_obj_lock = &ei->i_prealloc_lock; pa->pa_inode = ac->ac_inode; ext4_lock_group(sb, ac->ac_b_ex.fe_group); list_add(&pa->pa_group_list, &grp->bb_prealloc_list); ext4_unlock_group(sb, ac->ac_b_ex.fe_group); spin_lock(pa->pa_obj_lock); list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list); spin_unlock(pa->pa_obj_lock); return 0; } /* * creates new preallocated space for locality group inodes belongs to */ static noinline_for_stack int ext4_mb_new_group_pa(struct ext4_allocation_context *ac) { struct super_block *sb = ac->ac_sb; struct ext4_locality_group *lg; struct ext4_prealloc_space *pa; struct ext4_group_info *grp; /* preallocate only when found space is larger then requested */ BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); BUG_ON(ac->ac_status != AC_STATUS_FOUND); BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); BUG_ON(ext4_pspace_cachep == NULL); pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS); if (pa == NULL) return -ENOMEM; /* preallocation can change ac_b_ex, thus we store actually * allocated blocks for history */ ac->ac_f_ex = ac->ac_b_ex; pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); pa->pa_lstart = pa->pa_pstart; pa->pa_len = ac->ac_b_ex.fe_len; pa->pa_free = pa->pa_len; atomic_set(&pa->pa_count, 1); spin_lock_init(&pa->pa_lock); INIT_LIST_HEAD(&pa->pa_inode_list); INIT_LIST_HEAD(&pa->pa_group_list); pa->pa_deleted = 0; pa->pa_type = MB_GROUP_PA; mb_debug(1, "new group pa %p: %llu/%u for %u\n", pa, pa->pa_pstart, pa->pa_len, pa->pa_lstart); trace_ext4_mb_new_group_pa(ac, pa); ext4_mb_use_group_pa(ac, pa); atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated); grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); lg = ac->ac_lg; BUG_ON(lg == NULL); pa->pa_obj_lock = &lg->lg_prealloc_lock; pa->pa_inode = NULL; ext4_lock_group(sb, ac->ac_b_ex.fe_group); list_add(&pa->pa_group_list, &grp->bb_prealloc_list); ext4_unlock_group(sb, ac->ac_b_ex.fe_group); /* * We will later add the new pa to the right bucket * after updating the pa_free in ext4_mb_release_context */ return 0; } static int ext4_mb_new_preallocation(struct ext4_allocation_context *ac) { int err; if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) err = ext4_mb_new_group_pa(ac); else err = ext4_mb_new_inode_pa(ac); return err; } /* * finds all unused blocks in on-disk bitmap, frees them in * in-core bitmap and buddy. * @pa must be unlinked from inode and group lists, so that * nobody else can find/use it. * the caller MUST hold group/inode locks. * TODO: optimize the case when there are no in-core structures yet */ static noinline_for_stack int ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh, struct ext4_prealloc_space *pa) { struct super_block *sb = e4b->bd_sb; struct ext4_sb_info *sbi = EXT4_SB(sb); unsigned int end; unsigned int next; ext4_group_t group; ext4_grpblk_t bit; unsigned long long grp_blk_start; int err = 0; int free = 0; BUG_ON(pa->pa_deleted == 0); ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit); BUG_ON(group != e4b->bd_group && pa->pa_len != 0); end = bit + pa->pa_len; while (bit < end) { bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit); if (bit >= end) break; next = mb_find_next_bit(bitmap_bh->b_data, end, bit); mb_debug(1, " free preallocated %u/%u in group %u\n", (unsigned) ext4_group_first_block_no(sb, group) + bit, (unsigned) next - bit, (unsigned) group); free += next - bit; trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit); trace_ext4_mb_release_inode_pa(pa, (grp_blk_start + EXT4_C2B(sbi, bit)), next - bit); mb_free_blocks(pa->pa_inode, e4b, bit, next - bit); bit = next + 1; } if (free != pa->pa_free) { ext4_msg(e4b->bd_sb, KERN_CRIT, "pa %p: logic %lu, phys. %lu, len %lu", pa, (unsigned long) pa->pa_lstart, (unsigned long) pa->pa_pstart, (unsigned long) pa->pa_len); ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u", free, pa->pa_free); /* * pa is already deleted so we use the value obtained * from the bitmap and continue. */ } atomic_add(free, &sbi->s_mb_discarded); return err; } static noinline_for_stack int ext4_mb_release_group_pa(struct ext4_buddy *e4b, struct ext4_prealloc_space *pa) { struct super_block *sb = e4b->bd_sb; ext4_group_t group; ext4_grpblk_t bit; trace_ext4_mb_release_group_pa(sb, pa); BUG_ON(pa->pa_deleted == 0); ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); BUG_ON(group != e4b->bd_group && pa->pa_len != 0); mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len); atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded); trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len); return 0; } /* * releases all preallocations in given group * * first, we need to decide discard policy: * - when do we discard * 1) ENOSPC * - how many do we discard * 1) how many requested */ static noinline_for_stack int ext4_mb_discard_group_preallocations(struct super_block *sb, ext4_group_t group, int needed) { struct ext4_group_info *grp = ext4_get_group_info(sb, group); struct buffer_head *bitmap_bh = NULL; struct ext4_prealloc_space *pa, *tmp; struct list_head list; struct ext4_buddy e4b; int err; int busy = 0; int free = 0; mb_debug(1, "discard preallocation for group %u\n", group); if (list_empty(&grp->bb_prealloc_list)) return 0; bitmap_bh = ext4_read_block_bitmap(sb, group); if (bitmap_bh == NULL) { ext4_error(sb, "Error reading block bitmap for %u", group); return 0; } err = ext4_mb_load_buddy(sb, group, &e4b); if (err) { ext4_error(sb, "Error loading buddy information for %u", group); put_bh(bitmap_bh); return 0; } if (needed == 0) needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1; INIT_LIST_HEAD(&list); repeat: ext4_lock_group(sb, group); list_for_each_entry_safe(pa, tmp, &grp->bb_prealloc_list, pa_group_list) { spin_lock(&pa->pa_lock); if (atomic_read(&pa->pa_count)) { spin_unlock(&pa->pa_lock); busy = 1; continue; } if (pa->pa_deleted) { spin_unlock(&pa->pa_lock); continue; } /* seems this one can be freed ... */ pa->pa_deleted = 1; /* we can trust pa_free ... */ free += pa->pa_free; spin_unlock(&pa->pa_lock); list_del(&pa->pa_group_list); list_add(&pa->u.pa_tmp_list, &list); } /* if we still need more blocks and some PAs were used, try again */ if (free < needed && busy) { busy = 0; ext4_unlock_group(sb, group); cond_resched(); goto repeat; } /* found anything to free? */ if (list_empty(&list)) { BUG_ON(free != 0); goto out; } /* now free all selected PAs */ list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { /* remove from object (inode or locality group) */ spin_lock(pa->pa_obj_lock); list_del_rcu(&pa->pa_inode_list); spin_unlock(pa->pa_obj_lock); if (pa->pa_type == MB_GROUP_PA) ext4_mb_release_group_pa(&e4b, pa); else ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa); list_del(&pa->u.pa_tmp_list); call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); } out: ext4_unlock_group(sb, group); ext4_mb_unload_buddy(&e4b); put_bh(bitmap_bh); return free; } /* * releases all non-used preallocated blocks for given inode * * It's important to discard preallocations under i_data_sem * We don't want another block to be served from the prealloc * space when we are discarding the inode prealloc space. * * FIXME!! Make sure it is valid at all the call sites */ void ext4_discard_preallocations(struct inode *inode) { struct ext4_inode_info *ei = EXT4_I(inode); struct super_block *sb = inode->i_sb; struct buffer_head *bitmap_bh = NULL; struct ext4_prealloc_space *pa, *tmp; ext4_group_t group = 0; struct list_head list; struct ext4_buddy e4b; int err; if (!S_ISREG(inode->i_mode)) { /*BUG_ON(!list_empty(&ei->i_prealloc_list));*/ return; } mb_debug(1, "discard preallocation for inode %lu\n", inode->i_ino); trace_ext4_discard_preallocations(inode); INIT_LIST_HEAD(&list); repeat: /* first, collect all pa's in the inode */ spin_lock(&ei->i_prealloc_lock); while (!list_empty(&ei->i_prealloc_list)) { pa = list_entry(ei->i_prealloc_list.next, struct ext4_prealloc_space, pa_inode_list); BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock); spin_lock(&pa->pa_lock); if (atomic_read(&pa->pa_count)) { /* this shouldn't happen often - nobody should * use preallocation while we're discarding it */ spin_unlock(&pa->pa_lock); spin_unlock(&ei->i_prealloc_lock); ext4_msg(sb, KERN_ERR, "uh-oh! used pa while discarding"); WARN_ON(1); schedule_timeout_uninterruptible(HZ); goto repeat; } if (pa->pa_deleted == 0) { pa->pa_deleted = 1; spin_unlock(&pa->pa_lock); list_del_rcu(&pa->pa_inode_list); list_add(&pa->u.pa_tmp_list, &list); continue; } /* someone is deleting pa right now */ spin_unlock(&pa->pa_lock); spin_unlock(&ei->i_prealloc_lock); /* we have to wait here because pa_deleted * doesn't mean pa is already unlinked from * the list. as we might be called from * ->clear_inode() the inode will get freed * and concurrent thread which is unlinking * pa from inode's list may access already * freed memory, bad-bad-bad */ /* XXX: if this happens too often, we can * add a flag to force wait only in case * of ->clear_inode(), but not in case of * regular truncate */ schedule_timeout_uninterruptible(HZ); goto repeat; } spin_unlock(&ei->i_prealloc_lock); list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { BUG_ON(pa->pa_type != MB_INODE_PA); group = ext4_get_group_number(sb, pa->pa_pstart); err = ext4_mb_load_buddy(sb, group, &e4b); if (err) { ext4_error(sb, "Error loading buddy information for %u", group); continue; } bitmap_bh = ext4_read_block_bitmap(sb, group); if (bitmap_bh == NULL) { ext4_error(sb, "Error reading block bitmap for %u", group); ext4_mb_unload_buddy(&e4b); continue; } ext4_lock_group(sb, group); list_del(&pa->pa_group_list); ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa); ext4_unlock_group(sb, group); ext4_mb_unload_buddy(&e4b); put_bh(bitmap_bh); list_del(&pa->u.pa_tmp_list); call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); } } #ifdef CONFIG_EXT4_DEBUG static void ext4_mb_show_ac(struct ext4_allocation_context *ac) { struct super_block *sb = ac->ac_sb; ext4_group_t ngroups, i; if (!ext4_mballoc_debug || (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)) return; ext4_msg(ac->ac_sb, KERN_ERR, "Can't allocate:" " Allocation context details:"); ext4_msg(ac->ac_sb, KERN_ERR, "status %d flags %d", ac->ac_status, ac->ac_flags); ext4_msg(ac->ac_sb, KERN_ERR, "orig %lu/%lu/%lu@%lu, " "goal %lu/%lu/%lu@%lu, " "best %lu/%lu/%lu@%lu cr %d", (unsigned long)ac->ac_o_ex.fe_group, (unsigned long)ac->ac_o_ex.fe_start, (unsigned long)ac->ac_o_ex.fe_len, (unsigned long)ac->ac_o_ex.fe_logical, (unsigned long)ac->ac_g_ex.fe_group, (unsigned long)ac->ac_g_ex.fe_start, (unsigned long)ac->ac_g_ex.fe_len, (unsigned long)ac->ac_g_ex.fe_logical, (unsigned long)ac->ac_b_ex.fe_group, (unsigned long)ac->ac_b_ex.fe_start, (unsigned long)ac->ac_b_ex.fe_len, (unsigned long)ac->ac_b_ex.fe_logical, (int)ac->ac_criteria); ext4_msg(ac->ac_sb, KERN_ERR, "%lu scanned, %d found", ac->ac_ex_scanned, ac->ac_found); ext4_msg(ac->ac_sb, KERN_ERR, "groups: "); ngroups = ext4_get_groups_count(sb); for (i = 0; i < ngroups; i++) { struct ext4_group_info *grp = ext4_get_group_info(sb, i); struct ext4_prealloc_space *pa; ext4_grpblk_t start; struct list_head *cur; ext4_lock_group(sb, i); list_for_each(cur, &grp->bb_prealloc_list) { pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); spin_lock(&pa->pa_lock); ext4_get_group_no_and_offset(sb, pa->pa_pstart, NULL, &start); spin_unlock(&pa->pa_lock); printk(KERN_ERR "PA:%u:%d:%u \n", i, start, pa->pa_len); } ext4_unlock_group(sb, i); if (grp->bb_free == 0) continue; printk(KERN_ERR "%u: %d/%d \n", i, grp->bb_free, grp->bb_fragments); } printk(KERN_ERR "\n"); } #else static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac) { return; } #endif /* * We use locality group preallocation for small size file. The size of the * file is determined by the current size or the resulting size after * allocation which ever is larger * * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req */ static void ext4_mb_group_or_file(struct ext4_allocation_context *ac) { struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); int bsbits = ac->ac_sb->s_blocksize_bits; loff_t size, isize; if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) return; if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) return; size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len); isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1) >> bsbits; if ((size == isize) && !ext4_fs_is_busy(sbi) && (atomic_read(&ac->ac_inode->i_writecount) == 0)) { ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC; return; } if (sbi->s_mb_group_prealloc <= 0) { ac->ac_flags |= EXT4_MB_STREAM_ALLOC; return; } /* don't use group allocation for large files */ size = max(size, isize); if (size > sbi->s_mb_stream_request) { ac->ac_flags |= EXT4_MB_STREAM_ALLOC; return; } BUG_ON(ac->ac_lg != NULL); /* * locality group prealloc space are per cpu. The reason for having * per cpu locality group is to reduce the contention between block * request from multiple CPUs. */ ac->ac_lg = __this_cpu_ptr(sbi->s_locality_groups); /* we're going to use group allocation */ ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC; /* serialize all allocations in the group */ mutex_lock(&ac->ac_lg->lg_mutex); } static noinline_for_stack int ext4_mb_initialize_context(struct ext4_allocation_context *ac, struct ext4_allocation_request *ar) { struct super_block *sb = ar->inode->i_sb; struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_super_block *es = sbi->s_es; ext4_group_t group; unsigned int len; ext4_fsblk_t goal; ext4_grpblk_t block; /* we can't allocate > group size */ len = ar->len; /* just a dirty hack to filter too big requests */ if (len >= EXT4_CLUSTERS_PER_GROUP(sb)) len = EXT4_CLUSTERS_PER_GROUP(sb); /* start searching from the goal */ goal = ar->goal; if (goal < le32_to_cpu(es->s_first_data_block) || goal >= ext4_blocks_count(es)) goal = le32_to_cpu(es->s_first_data_block); ext4_get_group_no_and_offset(sb, goal, &group, &block); /* set up allocation goals */ ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical); ac->ac_status = AC_STATUS_CONTINUE; ac->ac_sb = sb; ac->ac_inode = ar->inode; ac->ac_o_ex.fe_logical = ac->ac_b_ex.fe_logical; ac->ac_o_ex.fe_group = group; ac->ac_o_ex.fe_start = block; ac->ac_o_ex.fe_len = len; ac->ac_g_ex = ac->ac_o_ex; ac->ac_flags = ar->flags; /* we have to define context: we'll we work with a file or * locality group. this is a policy, actually */ ext4_mb_group_or_file(ac); mb_debug(1, "init ac: %u blocks @ %u, goal %u, flags %x, 2^%d, " "left: %u/%u, right %u/%u to %swritable\n", (unsigned) ar->len, (unsigned) ar->logical, (unsigned) ar->goal, ac->ac_flags, ac->ac_2order, (unsigned) ar->lleft, (unsigned) ar->pleft, (unsigned) ar->lright, (unsigned) ar->pright, atomic_read(&ar->inode->i_writecount) ? "" : "non-"); return 0; } static noinline_for_stack void ext4_mb_discard_lg_preallocations(struct super_block *sb, struct ext4_locality_group *lg, int order, int total_entries) { ext4_group_t group = 0; struct ext4_buddy e4b; struct list_head discard_list; struct ext4_prealloc_space *pa, *tmp; mb_debug(1, "discard locality group preallocation\n"); INIT_LIST_HEAD(&discard_list); spin_lock(&lg->lg_prealloc_lock); list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order], pa_inode_list) { spin_lock(&pa->pa_lock); if (atomic_read(&pa->pa_count)) { /* * This is the pa that we just used * for block allocation. So don't * free that */ spin_unlock(&pa->pa_lock); continue; } if (pa->pa_deleted) { spin_unlock(&pa->pa_lock); continue; } /* only lg prealloc space */ BUG_ON(pa->pa_type != MB_GROUP_PA); /* seems this one can be freed ... */ pa->pa_deleted = 1; spin_unlock(&pa->pa_lock); list_del_rcu(&pa->pa_inode_list); list_add(&pa->u.pa_tmp_list, &discard_list); total_entries--; if (total_entries <= 5) { /* * we want to keep only 5 entries * allowing it to grow to 8. This * mak sure we don't call discard * soon for this list. */ break; } } spin_unlock(&lg->lg_prealloc_lock); list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) { group = ext4_get_group_number(sb, pa->pa_pstart); if (ext4_mb_load_buddy(sb, group, &e4b)) { ext4_error(sb, "Error loading buddy information for %u", group); continue; } ext4_lock_group(sb, group); list_del(&pa->pa_group_list); ext4_mb_release_group_pa(&e4b, pa); ext4_unlock_group(sb, group); ext4_mb_unload_buddy(&e4b); list_del(&pa->u.pa_tmp_list); call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); } } /* * We have incremented pa_count. So it cannot be freed at this * point. Also we hold lg_mutex. So no parallel allocation is * possible from this lg. That means pa_free cannot be updated. * * A parallel ext4_mb_discard_group_preallocations is possible. * which can cause the lg_prealloc_list to be updated. */ static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac) { int order, added = 0, lg_prealloc_count = 1; struct super_block *sb = ac->ac_sb; struct ext4_locality_group *lg = ac->ac_lg; struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa; order = fls(pa->pa_free) - 1; if (order > PREALLOC_TB_SIZE - 1) /* The max size of hash table is PREALLOC_TB_SIZE */ order = PREALLOC_TB_SIZE - 1; /* Add the prealloc space to lg */ spin_lock(&lg->lg_prealloc_lock); list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order], pa_inode_list) { spin_lock(&tmp_pa->pa_lock); if (tmp_pa->pa_deleted) { spin_unlock(&tmp_pa->pa_lock); continue; } if (!added && pa->pa_free < tmp_pa->pa_free) { /* Add to the tail of the previous entry */ list_add_tail_rcu(&pa->pa_inode_list, &tmp_pa->pa_inode_list); added = 1; /* * we want to count the total * number of entries in the list */ } spin_unlock(&tmp_pa->pa_lock); lg_prealloc_count++; } if (!added) list_add_tail_rcu(&pa->pa_inode_list, &lg->lg_prealloc_list[order]); spin_unlock(&lg->lg_prealloc_lock); /* Now trim the list to be not more than 8 elements */ if (lg_prealloc_count > 8) { ext4_mb_discard_lg_preallocations(sb, lg, order, lg_prealloc_count); return; } return ; } /* * release all resource we used in allocation */ static int ext4_mb_release_context(struct ext4_allocation_context *ac) { struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); struct ext4_prealloc_space *pa = ac->ac_pa; if (pa) { if (pa->pa_type == MB_GROUP_PA) { /* see comment in ext4_mb_use_group_pa() */ spin_lock(&pa->pa_lock); pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); pa->pa_free -= ac->ac_b_ex.fe_len; pa->pa_len -= ac->ac_b_ex.fe_len; spin_unlock(&pa->pa_lock); } } if (pa) { /* * We want to add the pa to the right bucket. * Remove it from the list and while adding * make sure the list to which we are adding * doesn't grow big. */ if ((pa->pa_type == MB_GROUP_PA) && likely(pa->pa_free)) { spin_lock(pa->pa_obj_lock); list_del_rcu(&pa->pa_inode_list); spin_unlock(pa->pa_obj_lock); ext4_mb_add_n_trim(ac); } ext4_mb_put_pa(ac, ac->ac_sb, pa); } if (ac->ac_bitmap_page) page_cache_release(ac->ac_bitmap_page); if (ac->ac_buddy_page) page_cache_release(ac->ac_buddy_page); if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) mutex_unlock(&ac->ac_lg->lg_mutex); ext4_mb_collect_stats(ac); return 0; } static int ext4_mb_discard_preallocations(struct super_block *sb, int needed) { ext4_group_t i, ngroups = ext4_get_groups_count(sb); int ret; int freed = 0; trace_ext4_mb_discard_preallocations(sb, needed); for (i = 0; i < ngroups && needed > 0; i++) { ret = ext4_mb_discard_group_preallocations(sb, i, needed); freed += ret; needed -= ret; } return freed; } /* * Main entry point into mballoc to allocate blocks * it tries to use preallocation first, then falls back * to usual allocation */ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, struct ext4_allocation_request *ar, int *errp) { int freed; struct ext4_allocation_context *ac = NULL; struct ext4_sb_info *sbi; struct super_block *sb; ext4_fsblk_t block = 0; unsigned int inquota = 0; unsigned int reserv_clstrs = 0; might_sleep(); sb = ar->inode->i_sb; sbi = EXT4_SB(sb); trace_ext4_request_blocks(ar); /* Allow to use superuser reservation for quota file */ if (IS_NOQUOTA(ar->inode)) ar->flags |= EXT4_MB_USE_ROOT_BLOCKS; /* * For delayed allocation, we could skip the ENOSPC and * EDQUOT check, as blocks and quotas have been already * reserved when data being copied into pagecache. */ if (ext4_test_inode_state(ar->inode, EXT4_STATE_DELALLOC_RESERVED)) ar->flags |= EXT4_MB_DELALLOC_RESERVED; else { /* Without delayed allocation we need to verify * there is enough free blocks to do block allocation * and verify allocation doesn't exceed the quota limits. */ while (ar->len && ext4_claim_free_clusters(sbi, ar->len, ar->flags)) { /* let others to free the space */ cond_resched(); ar->len = ar->len >> 1; } if (!ar->len) { *errp = -ENOSPC; return 0; } reserv_clstrs = ar->len; if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) { dquot_alloc_block_nofail(ar->inode, EXT4_C2B(sbi, ar->len)); } else { while (ar->len && dquot_alloc_block(ar->inode, EXT4_C2B(sbi, ar->len))) { ar->flags |= EXT4_MB_HINT_NOPREALLOC; ar->len--; } } inquota = ar->len; if (ar->len == 0) { *errp = -EDQUOT; goto out; } } ac = kmem_cache_zalloc(ext4_ac_cachep, GFP_NOFS); if (!ac) { ar->len = 0; *errp = -ENOMEM; goto out; } *errp = ext4_mb_initialize_context(ac, ar); if (*errp) { ar->len = 0; goto out; } ac->ac_op = EXT4_MB_HISTORY_PREALLOC; if (!ext4_mb_use_preallocated(ac)) { ac->ac_op = EXT4_MB_HISTORY_ALLOC; ext4_mb_normalize_request(ac, ar); repeat: /* allocate space in core */ *errp = ext4_mb_regular_allocator(ac); if (*errp) { ext4_discard_allocated_blocks(ac); goto errout; } /* as we've just preallocated more space than * user requested orinally, we store allocated * space in a special descriptor */ if (ac->ac_status == AC_STATUS_FOUND && ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len) ext4_mb_new_preallocation(ac); } if (likely(ac->ac_status == AC_STATUS_FOUND)) { *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs); if (*errp == -EAGAIN) { /* * drop the reference that we took * in ext4_mb_use_best_found */ ext4_mb_release_context(ac); ac->ac_b_ex.fe_group = 0; ac->ac_b_ex.fe_start = 0; ac->ac_b_ex.fe_len = 0; ac->ac_status = AC_STATUS_CONTINUE; goto repeat; } else if (*errp) { ext4_discard_allocated_blocks(ac); goto errout; } else { block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); ar->len = ac->ac_b_ex.fe_len; } } else { freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len); if (freed) goto repeat; *errp = -ENOSPC; } errout: if (*errp) { ac->ac_b_ex.fe_len = 0; ar->len = 0; ext4_mb_show_ac(ac); } ext4_mb_release_context(ac); out: if (ac) kmem_cache_free(ext4_ac_cachep, ac); if (inquota && ar->len < inquota) dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len)); if (!ar->len) { if (!ext4_test_inode_state(ar->inode, EXT4_STATE_DELALLOC_RESERVED)) /* release all the reserved blocks if non delalloc */ percpu_counter_sub(&sbi->s_dirtyclusters_counter, reserv_clstrs); } trace_ext4_allocate_blocks(ar, (unsigned long long)block); return block; } /* * We can merge two free data extents only if the physical blocks * are contiguous, AND the extents were freed by the same transaction, * AND the blocks are associated with the same group. */ static int can_merge(struct ext4_free_data *entry1, struct ext4_free_data *entry2) { if ((entry1->efd_tid == entry2->efd_tid) && (entry1->efd_group == entry2->efd_group) && ((entry1->efd_start_cluster + entry1->efd_count) == entry2->efd_start_cluster)) return 1; return 0; } static noinline_for_stack int ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b, struct ext4_free_data *new_entry) { ext4_group_t group = e4b->bd_group; ext4_grpblk_t cluster; struct ext4_free_data *entry; struct ext4_group_info *db = e4b->bd_info; struct super_block *sb = e4b->bd_sb; struct ext4_sb_info *sbi = EXT4_SB(sb); struct rb_node **n = &db->bb_free_root.rb_node, *node; struct rb_node *parent = NULL, *new_node; BUG_ON(!ext4_handle_valid(handle)); BUG_ON(e4b->bd_bitmap_page == NULL); BUG_ON(e4b->bd_buddy_page == NULL); new_node = &new_entry->efd_node; cluster = new_entry->efd_start_cluster; if (!*n) { /* first free block exent. We need to protect buddy cache from being freed, * otherwise we'll refresh it from * on-disk bitmap and lose not-yet-available * blocks */ page_cache_get(e4b->bd_buddy_page); page_cache_get(e4b->bd_bitmap_page); } while (*n) { parent = *n; entry = rb_entry(parent, struct ext4_free_data, efd_node); if (cluster < entry->efd_start_cluster) n = &(*n)->rb_left; else if (cluster >= (entry->efd_start_cluster + entry->efd_count)) n = &(*n)->rb_right; else { ext4_grp_locked_error(sb, group, 0, ext4_group_first_block_no(sb, group) + EXT4_C2B(sbi, cluster), "Block already on to-be-freed list"); return 0; } } rb_link_node(new_node, parent, n); rb_insert_color(new_node, &db->bb_free_root); /* Now try to see the extent can be merged to left and right */ node = rb_prev(new_node); if (node) { entry = rb_entry(node, struct ext4_free_data, efd_node); if (can_merge(entry, new_entry) && ext4_journal_callback_try_del(handle, &entry->efd_jce)) { new_entry->efd_start_cluster = entry->efd_start_cluster; new_entry->efd_count += entry->efd_count; rb_erase(node, &(db->bb_free_root)); kmem_cache_free(ext4_free_data_cachep, entry); } } node = rb_next(new_node); if (node) { entry = rb_entry(node, struct ext4_free_data, efd_node); if (can_merge(new_entry, entry) && ext4_journal_callback_try_del(handle, &entry->efd_jce)) { new_entry->efd_count += entry->efd_count; rb_erase(node, &(db->bb_free_root)); kmem_cache_free(ext4_free_data_cachep, entry); } } /* Add the extent to transaction's private list */ ext4_journal_callback_add(handle, ext4_free_data_callback, &new_entry->efd_jce); return 0; } /** * ext4_free_blocks() -- Free given blocks and update quota * @handle: handle for this transaction * @inode: inode * @block: start physical block to free * @count: number of blocks to count * @flags: flags used by ext4_free_blocks */ void ext4_free_blocks(handle_t *handle, struct inode *inode, struct buffer_head *bh, ext4_fsblk_t block, unsigned long count, int flags) { struct buffer_head *bitmap_bh = NULL; struct super_block *sb = inode->i_sb; struct ext4_group_desc *gdp; unsigned int overflow; ext4_grpblk_t bit; struct buffer_head *gd_bh; ext4_group_t block_group; struct ext4_sb_info *sbi; struct ext4_buddy e4b; unsigned int count_clusters; int err = 0; int ret; might_sleep(); if (bh) { if (block) BUG_ON(block != bh->b_blocknr); else block = bh->b_blocknr; } sbi = EXT4_SB(sb); if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) && !ext4_data_block_valid(sbi, block, count)) { ext4_error(sb, "Freeing blocks not in datazone - " "block = %llu, count = %lu", block, count); goto error_return; } ext4_debug("freeing block %llu\n", block); trace_ext4_free_blocks(inode, block, count, flags); if (flags & EXT4_FREE_BLOCKS_FORGET) { struct buffer_head *tbh = bh; int i; BUG_ON(bh && (count > 1)); for (i = 0; i < count; i++) { if (!bh) tbh = sb_find_get_block(inode->i_sb, block + i); if (unlikely(!tbh)) continue; ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA, inode, tbh, block + i); } } /* * We need to make sure we don't reuse the freed block until * after the transaction is committed, which we can do by * treating the block as metadata, below. We make an * exception if the inode is to be written in writeback mode * since writeback mode has weak data consistency guarantees. */ if (!ext4_should_writeback_data(inode)) flags |= EXT4_FREE_BLOCKS_METADATA; /* * If the extent to be freed does not begin on a cluster * boundary, we need to deal with partial clusters at the * beginning and end of the extent. Normally we will free * blocks at the beginning or the end unless we are explicitly * requested to avoid doing so. */ overflow = EXT4_PBLK_COFF(sbi, block); if (overflow) { if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) { overflow = sbi->s_cluster_ratio - overflow; block += overflow; if (count > overflow) count -= overflow; else return; } else { block -= overflow; count += overflow; } } overflow = EXT4_LBLK_COFF(sbi, count); if (overflow) { if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) { if (count > overflow) count -= overflow; else return; } else count += sbi->s_cluster_ratio - overflow; } do_more: overflow = 0; ext4_get_group_no_and_offset(sb, block, &block_group, &bit); /* * Check to see if we are freeing blocks across a group * boundary. */ if (EXT4_C2B(sbi, bit) + count > EXT4_BLOCKS_PER_GROUP(sb)) { overflow = EXT4_C2B(sbi, bit) + count - EXT4_BLOCKS_PER_GROUP(sb); count -= overflow; } count_clusters = EXT4_NUM_B2C(sbi, count); bitmap_bh = ext4_read_block_bitmap(sb, block_group); if (!bitmap_bh) { err = -EIO; goto error_return; } gdp = ext4_get_group_desc(sb, block_group, &gd_bh); if (!gdp) { err = -EIO; goto error_return; } if (in_range(ext4_block_bitmap(sb, gdp), block, count) || in_range(ext4_inode_bitmap(sb, gdp), block, count) || in_range(block, ext4_inode_table(sb, gdp), EXT4_SB(sb)->s_itb_per_group) || in_range(block + count - 1, ext4_inode_table(sb, gdp), EXT4_SB(sb)->s_itb_per_group)) { ext4_error(sb, "Freeing blocks in system zone - " "Block = %llu, count = %lu", block, count); /* err = 0. ext4_std_error should be a no op */ goto error_return; } BUFFER_TRACE(bitmap_bh, "getting write access"); err = ext4_journal_get_write_access(handle, bitmap_bh); if (err) goto error_return; /* * We are about to modify some metadata. Call the journal APIs * to unshare ->b_data if a currently-committing transaction is * using it */ BUFFER_TRACE(gd_bh, "get_write_access"); err = ext4_journal_get_write_access(handle, gd_bh); if (err) goto error_return; #ifdef AGGRESSIVE_CHECK { int i; for (i = 0; i < count_clusters; i++) BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data)); } #endif trace_ext4_mballoc_free(sb, inode, block_group, bit, count_clusters); err = ext4_mb_load_buddy(sb, block_group, &e4b); if (err) goto error_return; if ((flags & EXT4_FREE_BLOCKS_METADATA) && ext4_handle_valid(handle)) { struct ext4_free_data *new_entry; /* * blocks being freed are metadata. these blocks shouldn't * be used until this transaction is committed */ retry: new_entry = kmem_cache_alloc(ext4_free_data_cachep, GFP_NOFS); if (!new_entry) { /* * We use a retry loop because * ext4_free_blocks() is not allowed to fail. */ cond_resched(); congestion_wait(BLK_RW_ASYNC, HZ/50); goto retry; } new_entry->efd_start_cluster = bit; new_entry->efd_group = block_group; new_entry->efd_count = count_clusters; new_entry->efd_tid = handle->h_transaction->t_tid; ext4_lock_group(sb, block_group); mb_clear_bits(bitmap_bh->b_data, bit, count_clusters); ext4_mb_free_metadata(handle, &e4b, new_entry); } else { /* need to update group_info->bb_free and bitmap * with group lock held. generate_buddy look at * them with group lock_held */ if (test_opt(sb, DISCARD)) { err = ext4_issue_discard(sb, block_group, bit, count); if (err && err != -EOPNOTSUPP) ext4_msg(sb, KERN_WARNING, "discard request in" " group:%d block:%d count:%lu failed" " with %d", block_group, bit, count, err); } else EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info); ext4_lock_group(sb, block_group); mb_clear_bits(bitmap_bh->b_data, bit, count_clusters); mb_free_blocks(inode, &e4b, bit, count_clusters); } ret = ext4_free_group_clusters(sb, gdp) + count_clusters; ext4_free_group_clusters_set(sb, gdp, ret); ext4_block_bitmap_csum_set(sb, block_group, gdp, bitmap_bh); ext4_group_desc_csum_set(sb, block_group, gdp); ext4_unlock_group(sb, block_group); percpu_counter_add(&sbi->s_freeclusters_counter, count_clusters); if (sbi->s_log_groups_per_flex) { ext4_group_t flex_group = ext4_flex_group(sbi, block_group); atomic64_add(count_clusters, &sbi->s_flex_groups[flex_group].free_clusters); } ext4_mb_unload_buddy(&e4b); if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE)) dquot_free_block(inode, EXT4_C2B(sbi, count_clusters)); /* We dirtied the bitmap block */ BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); /* And the group descriptor block */ BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh); if (!err) err = ret; if (overflow && !err) { block += count; count = overflow; put_bh(bitmap_bh); goto do_more; } error_return: brelse(bitmap_bh); ext4_std_error(sb, err); return; } /** * ext4_group_add_blocks() -- Add given blocks to an existing group * @handle: handle to this transaction * @sb: super block * @block: start physical block to add to the block group * @count: number of blocks to free * * This marks the blocks as free in the bitmap and buddy. */ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb, ext4_fsblk_t block, unsigned long count) { struct buffer_head *bitmap_bh = NULL; struct buffer_head *gd_bh; ext4_group_t block_group; ext4_grpblk_t bit; unsigned int i; struct ext4_group_desc *desc; struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_buddy e4b; int err = 0, ret, blk_free_count; ext4_grpblk_t blocks_freed; ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1); if (count == 0) return 0; ext4_get_group_no_and_offset(sb, block, &block_group, &bit); /* * Check to see if we are freeing blocks across a group * boundary. */ if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) { ext4_warning(sb, "too much blocks added to group %u\n", block_group); err = -EINVAL; goto error_return; } bitmap_bh = ext4_read_block_bitmap(sb, block_group); if (!bitmap_bh) { err = -EIO; goto error_return; } desc = ext4_get_group_desc(sb, block_group, &gd_bh); if (!desc) { err = -EIO; goto error_return; } if (in_range(ext4_block_bitmap(sb, desc), block, count) || in_range(ext4_inode_bitmap(sb, desc), block, count) || in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) || in_range(block + count - 1, ext4_inode_table(sb, desc), sbi->s_itb_per_group)) { ext4_error(sb, "Adding blocks in system zones - " "Block = %llu, count = %lu", block, count); err = -EINVAL; goto error_return; } BUFFER_TRACE(bitmap_bh, "getting write access"); err = ext4_journal_get_write_access(handle, bitmap_bh); if (err) goto error_return; /* * We are about to modify some metadata. Call the journal APIs * to unshare ->b_data if a currently-committing transaction is * using it */ BUFFER_TRACE(gd_bh, "get_write_access"); err = ext4_journal_get_write_access(handle, gd_bh); if (err) goto error_return; for (i = 0, blocks_freed = 0; i < count; i++) { BUFFER_TRACE(bitmap_bh, "clear bit"); if (!mb_test_bit(bit + i, bitmap_bh->b_data)) { ext4_error(sb, "bit already cleared for block %llu", (ext4_fsblk_t)(block + i)); BUFFER_TRACE(bitmap_bh, "bit already cleared"); } else { blocks_freed++; } } err = ext4_mb_load_buddy(sb, block_group, &e4b); if (err) goto error_return; /* * need to update group_info->bb_free and bitmap * with group lock held. generate_buddy look at * them with group lock_held */ ext4_lock_group(sb, block_group); mb_clear_bits(bitmap_bh->b_data, bit, count); mb_free_blocks(NULL, &e4b, bit, count); blk_free_count = blocks_freed + ext4_free_group_clusters(sb, desc); ext4_free_group_clusters_set(sb, desc, blk_free_count); ext4_block_bitmap_csum_set(sb, block_group, desc, bitmap_bh); ext4_group_desc_csum_set(sb, block_group, desc); ext4_unlock_group(sb, block_group); percpu_counter_add(&sbi->s_freeclusters_counter, EXT4_NUM_B2C(sbi, blocks_freed)); if (sbi->s_log_groups_per_flex) { ext4_group_t flex_group = ext4_flex_group(sbi, block_group); atomic64_add(EXT4_NUM_B2C(sbi, blocks_freed), &sbi->s_flex_groups[flex_group].free_clusters); } ext4_mb_unload_buddy(&e4b); /* We dirtied the bitmap block */ BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); /* And the group descriptor block */ BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh); if (!err) err = ret; error_return: brelse(bitmap_bh); ext4_std_error(sb, err); return err; } /** * ext4_trim_extent -- function to TRIM one single free extent in the group * @sb: super block for the file system * @start: starting block of the free extent in the alloc. group * @count: number of blocks to TRIM * @group: alloc. group we are working with * @e4b: ext4 buddy for the group * * Trim "count" blocks starting at "start" in the "group". To assure that no * one will allocate those blocks, mark it as used in buddy bitmap. This must * be called with under the group lock. */ static int ext4_trim_extent(struct super_block *sb, int start, int count, ext4_group_t group, struct ext4_buddy *e4b) { struct ext4_free_extent ex; int ret = 0; trace_ext4_trim_extent(sb, group, start, count); assert_spin_locked(ext4_group_lock_ptr(sb, group)); ex.fe_start = start; ex.fe_group = group; ex.fe_len = count; /* * Mark blocks used, so no one can reuse them while * being trimmed. */ mb_mark_used(e4b, &ex); ext4_unlock_group(sb, group); ret = ext4_issue_discard(sb, group, start, count); ext4_lock_group(sb, group); mb_free_blocks(NULL, e4b, start, ex.fe_len); return ret; } /** * ext4_trim_all_free -- function to trim all free space in alloc. group * @sb: super block for file system * @group: group to be trimmed * @start: first group block to examine * @max: last group block to examine * @minblocks: minimum extent block count * * ext4_trim_all_free walks through group's buddy bitmap searching for free * extents. When the free block is found, ext4_trim_extent is called to TRIM * the extent. * * * ext4_trim_all_free walks through group's block bitmap searching for free * extents. When the free extent is found, mark it as used in group buddy * bitmap. Then issue a TRIM command on this extent and free the extent in * the group buddy bitmap. This is done until whole group is scanned. */ static ext4_grpblk_t ext4_trim_all_free(struct super_block *sb, ext4_group_t group, ext4_grpblk_t start, ext4_grpblk_t max, ext4_grpblk_t minblocks) { void *bitmap; ext4_grpblk_t next, count = 0, free_count = 0; struct ext4_buddy e4b; int ret = 0; trace_ext4_trim_all_free(sb, group, start, max); ret = ext4_mb_load_buddy(sb, group, &e4b); if (ret) { ext4_error(sb, "Error in loading buddy " "information for %u", group); return ret; } bitmap = e4b.bd_bitmap; ext4_lock_group(sb, group); if (EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) && minblocks >= atomic_read(&EXT4_SB(sb)->s_last_trim_minblks)) goto out; start = (e4b.bd_info->bb_first_free > start) ? e4b.bd_info->bb_first_free : start; while (start <= max) { start = mb_find_next_zero_bit(bitmap, max + 1, start); if (start > max) break; next = mb_find_next_bit(bitmap, max + 1, start); if ((next - start) >= minblocks) { ret = ext4_trim_extent(sb, start, next - start, group, &e4b); if (ret && ret != -EOPNOTSUPP) break; ret = 0; count += next - start; } free_count += next - start; start = next + 1; if (fatal_signal_pending(current)) { count = -ERESTARTSYS; break; } if (need_resched()) { ext4_unlock_group(sb, group); cond_resched(); ext4_lock_group(sb, group); } if ((e4b.bd_info->bb_free - free_count) < minblocks) break; } if (!ret) { ret = count; EXT4_MB_GRP_SET_TRIMMED(e4b.bd_info); } out: ext4_unlock_group(sb, group); ext4_mb_unload_buddy(&e4b); ext4_debug("trimmed %d blocks in the group %d\n", count, group); return ret; } /** * ext4_trim_fs() -- trim ioctl handle function * @sb: superblock for filesystem * @range: fstrim_range structure * * start: First Byte to trim * len: number of Bytes to trim from start * minlen: minimum extent length in Bytes * ext4_trim_fs goes through all allocation groups containing Bytes from * start to start+len. For each such a group ext4_trim_all_free function * is invoked to trim all free space. */ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range) { struct ext4_group_info *grp; ext4_group_t group, first_group, last_group; ext4_grpblk_t cnt = 0, first_cluster, last_cluster; uint64_t start, end, minlen, trimmed = 0; ext4_fsblk_t first_data_blk = le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block); ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es); int ret = 0; start = range->start >> sb->s_blocksize_bits; end = start + (range->len >> sb->s_blocksize_bits) - 1; minlen = EXT4_NUM_B2C(EXT4_SB(sb), range->minlen >> sb->s_blocksize_bits); if (minlen > EXT4_CLUSTERS_PER_GROUP(sb) || start >= max_blks || range->len < sb->s_blocksize) return -EINVAL; if (end >= max_blks) end = max_blks - 1; if (end <= first_data_blk) goto out; if (start < first_data_blk) start = first_data_blk; /* Determine first and last group to examine based on start and end */ ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start, &first_group, &first_cluster); ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) end, &last_group, &last_cluster); /* end now represents the last cluster to discard in this group */ end = EXT4_CLUSTERS_PER_GROUP(sb) - 1; for (group = first_group; group <= last_group; group++) { grp = ext4_get_group_info(sb, group); /* We only do this if the grp has never been initialized */ if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { ret = ext4_mb_init_group(sb, group); if (ret) break; } /* * For all the groups except the last one, last cluster will * always be EXT4_CLUSTERS_PER_GROUP(sb)-1, so we only need to * change it for the last group, note that last_cluster is * already computed earlier by ext4_get_group_no_and_offset() */ if (group == last_group) end = last_cluster; if (grp->bb_free >= minlen) { cnt = ext4_trim_all_free(sb, group, first_cluster, end, minlen); if (cnt < 0) { ret = cnt; break; } trimmed += cnt; } /* * For every group except the first one, we are sure * that the first cluster to discard will be cluster #0. */ first_cluster = 0; } if (!ret) atomic_set(&EXT4_SB(sb)->s_last_trim_minblks, minlen); out: range->len = EXT4_C2B(EXT4_SB(sb), trimmed) << sb->s_blocksize_bits; return ret; }
gpl-2.0
adrienverge/linux
drivers/net/wireless/intersil/orinoco/hw.c
671
34740
/* Encapsulate basic setting changes and retrieval on Hermes hardware * * See copyright notice in main.c */ #include <linux/kernel.h> #include <linux/device.h> #include <linux/if_arp.h> #include <linux/ieee80211.h> #include <linux/wireless.h> #include <net/cfg80211.h> #include "hermes.h" #include "hermes_rid.h" #include "orinoco.h" #include "hw.h" #define SYMBOL_MAX_VER_LEN (14) /* Symbol firmware has a bug allocating buffers larger than this */ #define TX_NICBUF_SIZE_BUG 1585 /********************************************************************/ /* Data tables */ /********************************************************************/ /* This tables gives the actual meanings of the bitrate IDs returned * by the firmware. */ static const struct { int bitrate; /* in 100s of kilobits */ int automatic; u16 agere_txratectrl; u16 intersil_txratectrl; } bitrate_table[] = { {110, 1, 3, 15}, /* Entry 0 is the default */ {10, 0, 1, 1}, {10, 1, 1, 1}, {20, 0, 2, 2}, {20, 1, 6, 3}, {55, 0, 4, 4}, {55, 1, 7, 7}, {110, 0, 5, 8}, }; #define BITRATE_TABLE_SIZE ARRAY_SIZE(bitrate_table) /* Firmware version encoding */ struct comp_id { u16 id, variant, major, minor; } __packed; static inline enum fwtype determine_firmware_type(struct comp_id *nic_id) { if (nic_id->id < 0x8000) return FIRMWARE_TYPE_AGERE; else if (nic_id->id == 0x8000 && nic_id->major == 0) return FIRMWARE_TYPE_SYMBOL; else return FIRMWARE_TYPE_INTERSIL; } /* Set priv->firmware type, determine firmware properties * This function can be called before we have registerred with netdev, * so all errors go out with dev_* rather than printk * * If non-NULL stores a firmware description in fw_name. * If non-NULL stores a HW version in hw_ver * * These are output via generic cfg80211 ethtool support. */ int determine_fw_capabilities(struct orinoco_private *priv, char *fw_name, size_t fw_name_len, u32 *hw_ver) { struct device *dev = priv->dev; struct hermes *hw = &priv->hw; int err; struct comp_id nic_id, sta_id; unsigned int firmver; char tmp[SYMBOL_MAX_VER_LEN + 1] __attribute__((aligned(2))); /* Get the hardware version */ err = HERMES_READ_RECORD(hw, USER_BAP, HERMES_RID_NICID, &nic_id); if (err) { dev_err(dev, "Cannot read hardware identity: error %d\n", err); return err; } le16_to_cpus(&nic_id.id); le16_to_cpus(&nic_id.variant); le16_to_cpus(&nic_id.major); le16_to_cpus(&nic_id.minor); dev_info(dev, "Hardware identity %04x:%04x:%04x:%04x\n", nic_id.id, nic_id.variant, nic_id.major, nic_id.minor); if (hw_ver) *hw_ver = (((nic_id.id & 0xff) << 24) | ((nic_id.variant & 0xff) << 16) | ((nic_id.major & 0xff) << 8) | (nic_id.minor & 0xff)); priv->firmware_type = determine_firmware_type(&nic_id); /* Get the firmware version */ err = HERMES_READ_RECORD(hw, USER_BAP, HERMES_RID_STAID, &sta_id); if (err) { dev_err(dev, "Cannot read station identity: error %d\n", err); return err; } le16_to_cpus(&sta_id.id); le16_to_cpus(&sta_id.variant); le16_to_cpus(&sta_id.major); le16_to_cpus(&sta_id.minor); dev_info(dev, "Station identity %04x:%04x:%04x:%04x\n", sta_id.id, sta_id.variant, sta_id.major, sta_id.minor); switch (sta_id.id) { case 0x15: dev_err(dev, "Primary firmware is active\n"); return -ENODEV; case 0x14b: dev_err(dev, "Tertiary firmware is active\n"); return -ENODEV; case 0x1f: /* Intersil, Agere, Symbol Spectrum24 */ case 0x21: /* Symbol Spectrum24 Trilogy */ break; default: dev_notice(dev, "Unknown station ID, please report\n"); break; } /* Default capabilities */ priv->has_sensitivity = 1; priv->has_mwo = 0; priv->has_preamble = 0; priv->has_port3 = 1; priv->has_ibss = 1; priv->has_wep = 0; priv->has_big_wep = 0; priv->has_alt_txcntl = 0; priv->has_ext_scan = 0; priv->has_wpa = 0; priv->do_fw_download = 0; /* Determine capabilities from the firmware version */ switch (priv->firmware_type) { case FIRMWARE_TYPE_AGERE: /* Lucent Wavelan IEEE, Lucent Orinoco, Cabletron RoamAbout, ELSA, Melco, HP, IBM, Dell 1150, Compaq 110/210 */ if (fw_name) snprintf(fw_name, fw_name_len, "Lucent/Agere %d.%02d", sta_id.major, sta_id.minor); firmver = ((unsigned long)sta_id.major << 16) | sta_id.minor; priv->has_ibss = (firmver >= 0x60006); priv->has_wep = (firmver >= 0x40020); priv->has_big_wep = 1; /* FIXME: this is wrong - how do we tell Gold cards from the others? */ priv->has_mwo = (firmver >= 0x60000); priv->has_pm = (firmver >= 0x40020); /* Don't work in 7.52 ? */ priv->ibss_port = 1; priv->has_hostscan = (firmver >= 0x8000a); priv->do_fw_download = 1; priv->broken_monitor = (firmver >= 0x80000); priv->has_alt_txcntl = (firmver >= 0x90000); /* All 9.x ? */ priv->has_ext_scan = (firmver >= 0x90000); /* All 9.x ? */ priv->has_wpa = (firmver >= 0x9002a); /* Tested with Agere firmware : * 1.16 ; 4.08 ; 4.52 ; 6.04 ; 6.16 ; 7.28 => Jean II * Tested CableTron firmware : 4.32 => Anton */ break; case FIRMWARE_TYPE_SYMBOL: /* Symbol , 3Com AirConnect, Intel, Ericsson WLAN */ /* Intel MAC : 00:02:B3:* */ /* 3Com MAC : 00:50:DA:* */ memset(tmp, 0, sizeof(tmp)); /* Get the Symbol firmware version */ err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_SECONDARYVERSION_SYMBOL, SYMBOL_MAX_VER_LEN, NULL, &tmp); if (err) { dev_warn(dev, "Error %d reading Symbol firmware info. " "Wildly guessing capabilities...\n", err); firmver = 0; tmp[0] = '\0'; } else { /* The firmware revision is a string, the format is * something like : "V2.20-01". * Quick and dirty parsing... - Jean II */ firmver = ((tmp[1] - '0') << 16) | ((tmp[3] - '0') << 12) | ((tmp[4] - '0') << 8) | ((tmp[6] - '0') << 4) | (tmp[7] - '0'); tmp[SYMBOL_MAX_VER_LEN] = '\0'; } if (fw_name) snprintf(fw_name, fw_name_len, "Symbol %s", tmp); priv->has_ibss = (firmver >= 0x20000); priv->has_wep = (firmver >= 0x15012); priv->has_big_wep = (firmver >= 0x20000); priv->has_pm = (firmver >= 0x20000 && firmver < 0x22000) || (firmver >= 0x29000 && firmver < 0x30000) || firmver >= 0x31000; priv->has_preamble = (firmver >= 0x20000); priv->ibss_port = 4; /* Symbol firmware is found on various cards, but * there has been no attempt to check firmware * download on non-spectrum_cs based cards. * * Given that the Agere firmware download works * differently, we should avoid doing a firmware * download with the Symbol algorithm on non-spectrum * cards. * * For now we can identify a spectrum_cs based card * because it has a firmware reset function. */ priv->do_fw_download = (priv->stop_fw != NULL); priv->broken_disableport = (firmver == 0x25013) || (firmver >= 0x30000 && firmver <= 0x31000); priv->has_hostscan = (firmver >= 0x31001) || (firmver >= 0x29057 && firmver < 0x30000); /* Tested with Intel firmware : 0x20015 => Jean II */ /* Tested with 3Com firmware : 0x15012 & 0x22001 => Jean II */ break; case FIRMWARE_TYPE_INTERSIL: /* D-Link, Linksys, Adtron, ZoomAir, and many others... * Samsung, Compaq 100/200 and Proxim are slightly * different and less well tested */ /* D-Link MAC : 00:40:05:* */ /* Addtron MAC : 00:90:D1:* */ if (fw_name) snprintf(fw_name, fw_name_len, "Intersil %d.%d.%d", sta_id.major, sta_id.minor, sta_id.variant); firmver = ((unsigned long)sta_id.major << 16) | ((unsigned long)sta_id.minor << 8) | sta_id.variant; priv->has_ibss = (firmver >= 0x000700); /* FIXME */ priv->has_big_wep = priv->has_wep = (firmver >= 0x000800); priv->has_pm = (firmver >= 0x000700); priv->has_hostscan = (firmver >= 0x010301); if (firmver >= 0x000800) priv->ibss_port = 0; else { dev_notice(dev, "Intersil firmware earlier than v0.8.x" " - several features not supported\n"); priv->ibss_port = 1; } break; } if (fw_name) dev_info(dev, "Firmware determined as %s\n", fw_name); #ifndef CONFIG_HERMES_PRISM if (priv->firmware_type == FIRMWARE_TYPE_INTERSIL) { dev_err(dev, "Support for Prism chipset is not enabled\n"); return -ENODEV; } #endif return 0; } /* Read settings from EEPROM into our private structure. * MAC address gets dropped into callers buffer * Can be called before netdev registration. */ int orinoco_hw_read_card_settings(struct orinoco_private *priv, u8 *dev_addr) { struct device *dev = priv->dev; struct hermes_idstring nickbuf; struct hermes *hw = &priv->hw; int len; int err; u16 reclen; /* Get the MAC address */ err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CNFOWNMACADDR, ETH_ALEN, NULL, dev_addr); if (err) { dev_warn(dev, "Failed to read MAC address!\n"); goto out; } dev_dbg(dev, "MAC address %pM\n", dev_addr); /* Get the station name */ err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CNFOWNNAME, sizeof(nickbuf), &reclen, &nickbuf); if (err) { dev_err(dev, "failed to read station name\n"); goto out; } if (nickbuf.len) len = min(IW_ESSID_MAX_SIZE, (int)le16_to_cpu(nickbuf.len)); else len = min(IW_ESSID_MAX_SIZE, 2 * reclen); memcpy(priv->nick, &nickbuf.val, len); priv->nick[len] = '\0'; dev_dbg(dev, "Station name \"%s\"\n", priv->nick); /* Get allowed channels */ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CHANNELLIST, &priv->channel_mask); if (err) { dev_err(dev, "Failed to read channel list!\n"); goto out; } /* Get initial AP density */ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFSYSTEMSCALE, &priv->ap_density); if (err || priv->ap_density < 1 || priv->ap_density > 3) priv->has_sensitivity = 0; /* Get initial RTS threshold */ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFRTSTHRESHOLD, &priv->rts_thresh); if (err) { dev_err(dev, "Failed to read RTS threshold!\n"); goto out; } /* Get initial fragmentation settings */ if (priv->has_mwo) err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFMWOROBUST_AGERE, &priv->mwo_robust); else err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFFRAGMENTATIONTHRESHOLD, &priv->frag_thresh); if (err) { dev_err(dev, "Failed to read fragmentation settings!\n"); goto out; } /* Power management setup */ if (priv->has_pm) { priv->pm_on = 0; priv->pm_mcast = 1; err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFMAXSLEEPDURATION, &priv->pm_period); if (err) { dev_err(dev, "Failed to read power management " "period!\n"); goto out; } err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFPMHOLDOVERDURATION, &priv->pm_timeout); if (err) { dev_err(dev, "Failed to read power management " "timeout!\n"); goto out; } } /* Preamble setup */ if (priv->has_preamble) { err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFPREAMBLE_SYMBOL, &priv->preamble); if (err) { dev_err(dev, "Failed to read preamble setup\n"); goto out; } } /* Retry settings */ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_SHORTRETRYLIMIT, &priv->short_retry_limit); if (err) { dev_err(dev, "Failed to read short retry limit\n"); goto out; } err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_LONGRETRYLIMIT, &priv->long_retry_limit); if (err) { dev_err(dev, "Failed to read long retry limit\n"); goto out; } err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_MAXTRANSMITLIFETIME, &priv->retry_lifetime); if (err) { dev_err(dev, "Failed to read max retry lifetime\n"); goto out; } out: return err; } /* Can be called before netdev registration */ int orinoco_hw_allocate_fid(struct orinoco_private *priv) { struct device *dev = priv->dev; struct hermes *hw = &priv->hw; int err; err = hw->ops->allocate(hw, priv->nicbuf_size, &priv->txfid); if (err == -EIO && priv->nicbuf_size > TX_NICBUF_SIZE_BUG) { /* Try workaround for old Symbol firmware bug */ priv->nicbuf_size = TX_NICBUF_SIZE_BUG; err = hw->ops->allocate(hw, priv->nicbuf_size, &priv->txfid); dev_warn(dev, "Firmware ALLOC bug detected " "(old Symbol firmware?). Work around %s\n", err ? "failed!" : "ok."); } return err; } int orinoco_get_bitratemode(int bitrate, int automatic) { int ratemode = -1; int i; if ((bitrate != 10) && (bitrate != 20) && (bitrate != 55) && (bitrate != 110)) return ratemode; for (i = 0; i < BITRATE_TABLE_SIZE; i++) { if ((bitrate_table[i].bitrate == bitrate) && (bitrate_table[i].automatic == automatic)) { ratemode = i; break; } } return ratemode; } void orinoco_get_ratemode_cfg(int ratemode, int *bitrate, int *automatic) { BUG_ON((ratemode < 0) || (ratemode >= BITRATE_TABLE_SIZE)); *bitrate = bitrate_table[ratemode].bitrate * 100000; *automatic = bitrate_table[ratemode].automatic; } int orinoco_hw_program_rids(struct orinoco_private *priv) { struct net_device *dev = priv->ndev; struct wireless_dev *wdev = netdev_priv(dev); struct hermes *hw = &priv->hw; int err; struct hermes_idstring idbuf; /* Set the MAC address */ err = hw->ops->write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNMACADDR, HERMES_BYTES_TO_RECLEN(ETH_ALEN), dev->dev_addr); if (err) { printk(KERN_ERR "%s: Error %d setting MAC address\n", dev->name, err); return err; } /* Set up the link mode */ err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFPORTTYPE, priv->port_type); if (err) { printk(KERN_ERR "%s: Error %d setting port type\n", dev->name, err); return err; } /* Set the channel/frequency */ if (priv->channel != 0 && priv->iw_mode != NL80211_IFTYPE_STATION) { err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFOWNCHANNEL, priv->channel); if (err) { printk(KERN_ERR "%s: Error %d setting channel %d\n", dev->name, err, priv->channel); return err; } } if (priv->has_ibss) { u16 createibss; if ((strlen(priv->desired_essid) == 0) && (priv->createibss)) { printk(KERN_WARNING "%s: This firmware requires an " "ESSID in IBSS-Ad-Hoc mode.\n", dev->name); /* With wvlan_cs, in this case, we would crash. * hopefully, this driver will behave better... * Jean II */ createibss = 0; } else { createibss = priv->createibss; } err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFCREATEIBSS, createibss); if (err) { printk(KERN_ERR "%s: Error %d setting CREATEIBSS\n", dev->name, err); return err; } } /* Set the desired BSSID */ err = __orinoco_hw_set_wap(priv); if (err) { printk(KERN_ERR "%s: Error %d setting AP address\n", dev->name, err); return err; } /* Set the desired ESSID */ idbuf.len = cpu_to_le16(strlen(priv->desired_essid)); memcpy(&idbuf.val, priv->desired_essid, sizeof(idbuf.val)); /* WinXP wants partner to configure OWNSSID even in IBSS mode. (jimc) */ err = hw->ops->write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNSSID, HERMES_BYTES_TO_RECLEN(strlen(priv->desired_essid) + 2), &idbuf); if (err) { printk(KERN_ERR "%s: Error %d setting OWNSSID\n", dev->name, err); return err; } err = hw->ops->write_ltv(hw, USER_BAP, HERMES_RID_CNFDESIREDSSID, HERMES_BYTES_TO_RECLEN(strlen(priv->desired_essid) + 2), &idbuf); if (err) { printk(KERN_ERR "%s: Error %d setting DESIREDSSID\n", dev->name, err); return err; } /* Set the station name */ idbuf.len = cpu_to_le16(strlen(priv->nick)); memcpy(&idbuf.val, priv->nick, sizeof(idbuf.val)); err = hw->ops->write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNNAME, HERMES_BYTES_TO_RECLEN(strlen(priv->nick) + 2), &idbuf); if (err) { printk(KERN_ERR "%s: Error %d setting nickname\n", dev->name, err); return err; } /* Set AP density */ if (priv->has_sensitivity) { err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFSYSTEMSCALE, priv->ap_density); if (err) { printk(KERN_WARNING "%s: Error %d setting SYSTEMSCALE. " "Disabling sensitivity control\n", dev->name, err); priv->has_sensitivity = 0; } } /* Set RTS threshold */ err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFRTSTHRESHOLD, priv->rts_thresh); if (err) { printk(KERN_ERR "%s: Error %d setting RTS threshold\n", dev->name, err); return err; } /* Set fragmentation threshold or MWO robustness */ if (priv->has_mwo) err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFMWOROBUST_AGERE, priv->mwo_robust); else err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFFRAGMENTATIONTHRESHOLD, priv->frag_thresh); if (err) { printk(KERN_ERR "%s: Error %d setting fragmentation\n", dev->name, err); return err; } /* Set bitrate */ err = __orinoco_hw_set_bitrate(priv); if (err) { printk(KERN_ERR "%s: Error %d setting bitrate\n", dev->name, err); return err; } /* Set power management */ if (priv->has_pm) { err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFPMENABLED, priv->pm_on); if (err) { printk(KERN_ERR "%s: Error %d setting up PM\n", dev->name, err); return err; } err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFMULTICASTRECEIVE, priv->pm_mcast); if (err) { printk(KERN_ERR "%s: Error %d setting up PM\n", dev->name, err); return err; } err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFMAXSLEEPDURATION, priv->pm_period); if (err) { printk(KERN_ERR "%s: Error %d setting up PM\n", dev->name, err); return err; } err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFPMHOLDOVERDURATION, priv->pm_timeout); if (err) { printk(KERN_ERR "%s: Error %d setting up PM\n", dev->name, err); return err; } } /* Set preamble - only for Symbol so far... */ if (priv->has_preamble) { err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFPREAMBLE_SYMBOL, priv->preamble); if (err) { printk(KERN_ERR "%s: Error %d setting preamble\n", dev->name, err); return err; } } /* Set up encryption */ if (priv->has_wep || priv->has_wpa) { err = __orinoco_hw_setup_enc(priv); if (err) { printk(KERN_ERR "%s: Error %d activating encryption\n", dev->name, err); return err; } } if (priv->iw_mode == NL80211_IFTYPE_MONITOR) { /* Enable monitor mode */ dev->type = ARPHRD_IEEE80211; err = hw->ops->cmd_wait(hw, HERMES_CMD_TEST | HERMES_TEST_MONITOR, 0, NULL); } else { /* Disable monitor mode */ dev->type = ARPHRD_ETHER; err = hw->ops->cmd_wait(hw, HERMES_CMD_TEST | HERMES_TEST_STOP, 0, NULL); } if (err) return err; /* Reset promiscuity / multicast*/ priv->promiscuous = 0; priv->mc_count = 0; /* Record mode change */ wdev->iftype = priv->iw_mode; return 0; } /* Get tsc from the firmware */ int orinoco_hw_get_tkip_iv(struct orinoco_private *priv, int key, u8 *tsc) { struct hermes *hw = &priv->hw; int err = 0; u8 tsc_arr[4][ORINOCO_SEQ_LEN]; if ((key < 0) || (key >= 4)) return -EINVAL; err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CURRENT_TKIP_IV, sizeof(tsc_arr), NULL, &tsc_arr); if (!err) memcpy(tsc, &tsc_arr[key][0], sizeof(tsc_arr[0])); return err; } int __orinoco_hw_set_bitrate(struct orinoco_private *priv) { struct hermes *hw = &priv->hw; int ratemode = priv->bitratemode; int err = 0; if (ratemode >= BITRATE_TABLE_SIZE) { printk(KERN_ERR "%s: BUG: Invalid bitrate mode %d\n", priv->ndev->name, ratemode); return -EINVAL; } switch (priv->firmware_type) { case FIRMWARE_TYPE_AGERE: err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFTXRATECONTROL, bitrate_table[ratemode].agere_txratectrl); break; case FIRMWARE_TYPE_INTERSIL: case FIRMWARE_TYPE_SYMBOL: err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFTXRATECONTROL, bitrate_table[ratemode].intersil_txratectrl); break; default: BUG(); } return err; } int orinoco_hw_get_act_bitrate(struct orinoco_private *priv, int *bitrate) { struct hermes *hw = &priv->hw; int i; int err = 0; u16 val; err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CURRENTTXRATE, &val); if (err) return err; switch (priv->firmware_type) { case FIRMWARE_TYPE_AGERE: /* Lucent style rate */ /* Note : in Lucent firmware, the return value of * HERMES_RID_CURRENTTXRATE is the bitrate in Mb/s, * and therefore is totally different from the * encoding of HERMES_RID_CNFTXRATECONTROL. * Don't forget that 6Mb/s is really 5.5Mb/s */ if (val == 6) *bitrate = 5500000; else *bitrate = val * 1000000; break; case FIRMWARE_TYPE_INTERSIL: /* Intersil style rate */ case FIRMWARE_TYPE_SYMBOL: /* Symbol style rate */ for (i = 0; i < BITRATE_TABLE_SIZE; i++) if (bitrate_table[i].intersil_txratectrl == val) { *bitrate = bitrate_table[i].bitrate * 100000; break; } if (i >= BITRATE_TABLE_SIZE) { printk(KERN_INFO "%s: Unable to determine current bitrate (0x%04hx)\n", priv->ndev->name, val); err = -EIO; } break; default: BUG(); } return err; } /* Set fixed AP address */ int __orinoco_hw_set_wap(struct orinoco_private *priv) { int roaming_flag; int err = 0; struct hermes *hw = &priv->hw; switch (priv->firmware_type) { case FIRMWARE_TYPE_AGERE: /* not supported */ break; case FIRMWARE_TYPE_INTERSIL: if (priv->bssid_fixed) roaming_flag = 2; else roaming_flag = 1; err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFROAMINGMODE, roaming_flag); break; case FIRMWARE_TYPE_SYMBOL: err = HERMES_WRITE_RECORD(hw, USER_BAP, HERMES_RID_CNFMANDATORYBSSID_SYMBOL, &priv->desired_bssid); break; } return err; } /* Change the WEP keys and/or the current keys. Can be called * either from __orinoco_hw_setup_enc() or directly from * orinoco_ioctl_setiwencode(). In the later case the association * with the AP is not broken (if the firmware can handle it), * which is needed for 802.1x implementations. */ int __orinoco_hw_setup_wepkeys(struct orinoco_private *priv) { struct hermes *hw = &priv->hw; int err = 0; int i; switch (priv->firmware_type) { case FIRMWARE_TYPE_AGERE: { struct orinoco_key keys[ORINOCO_MAX_KEYS]; memset(&keys, 0, sizeof(keys)); for (i = 0; i < ORINOCO_MAX_KEYS; i++) { int len = min(priv->keys[i].key_len, ORINOCO_MAX_KEY_SIZE); memcpy(&keys[i].data, priv->keys[i].key, len); if (len > SMALL_KEY_SIZE) keys[i].len = cpu_to_le16(LARGE_KEY_SIZE); else if (len > 0) keys[i].len = cpu_to_le16(SMALL_KEY_SIZE); else keys[i].len = cpu_to_le16(0); } err = HERMES_WRITE_RECORD(hw, USER_BAP, HERMES_RID_CNFWEPKEYS_AGERE, &keys); if (err) return err; err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFTXKEY_AGERE, priv->tx_key); if (err) return err; break; } case FIRMWARE_TYPE_INTERSIL: case FIRMWARE_TYPE_SYMBOL: { int keylen; /* Force uniform key length to work around * firmware bugs */ keylen = priv->keys[priv->tx_key].key_len; if (keylen > LARGE_KEY_SIZE) { printk(KERN_ERR "%s: BUG: Key %d has oversize length %d.\n", priv->ndev->name, priv->tx_key, keylen); return -E2BIG; } else if (keylen > SMALL_KEY_SIZE) keylen = LARGE_KEY_SIZE; else if (keylen > 0) keylen = SMALL_KEY_SIZE; else keylen = 0; /* Write all 4 keys */ for (i = 0; i < ORINOCO_MAX_KEYS; i++) { u8 key[LARGE_KEY_SIZE] = { 0 }; memcpy(key, priv->keys[i].key, priv->keys[i].key_len); err = hw->ops->write_ltv(hw, USER_BAP, HERMES_RID_CNFDEFAULTKEY0 + i, HERMES_BYTES_TO_RECLEN(keylen), key); if (err) return err; } /* Write the index of the key used in transmission */ err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFWEPDEFAULTKEYID, priv->tx_key); if (err) return err; } break; } return 0; } int __orinoco_hw_setup_enc(struct orinoco_private *priv) { struct hermes *hw = &priv->hw; int err = 0; int master_wep_flag; int auth_flag; int enc_flag; /* Setup WEP keys */ if (priv->encode_alg == ORINOCO_ALG_WEP) __orinoco_hw_setup_wepkeys(priv); if (priv->wep_restrict) auth_flag = HERMES_AUTH_SHARED_KEY; else auth_flag = HERMES_AUTH_OPEN; if (priv->wpa_enabled) enc_flag = 2; else if (priv->encode_alg == ORINOCO_ALG_WEP) enc_flag = 1; else enc_flag = 0; switch (priv->firmware_type) { case FIRMWARE_TYPE_AGERE: /* Agere style WEP */ if (priv->encode_alg == ORINOCO_ALG_WEP) { /* Enable the shared-key authentication. */ err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFAUTHENTICATION_AGERE, auth_flag); } err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFWEPENABLED_AGERE, enc_flag); if (err) return err; if (priv->has_wpa) { /* Set WPA key management */ err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFSETWPAAUTHMGMTSUITE_AGERE, priv->key_mgmt); if (err) return err; } break; case FIRMWARE_TYPE_INTERSIL: /* Intersil style WEP */ case FIRMWARE_TYPE_SYMBOL: /* Symbol style WEP */ if (priv->encode_alg == ORINOCO_ALG_WEP) { if (priv->wep_restrict || (priv->firmware_type == FIRMWARE_TYPE_SYMBOL)) master_wep_flag = HERMES_WEP_PRIVACY_INVOKED | HERMES_WEP_EXCL_UNENCRYPTED; else master_wep_flag = HERMES_WEP_PRIVACY_INVOKED; err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFAUTHENTICATION, auth_flag); if (err) return err; } else master_wep_flag = 0; if (priv->iw_mode == NL80211_IFTYPE_MONITOR) master_wep_flag |= HERMES_WEP_HOST_DECRYPT; /* Master WEP setting : on/off */ err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFWEPFLAGS_INTERSIL, master_wep_flag); if (err) return err; break; } return 0; } /* key must be 32 bytes, including the tx and rx MIC keys. * rsc must be NULL or up to 8 bytes * tsc must be NULL or up to 8 bytes */ int __orinoco_hw_set_tkip_key(struct orinoco_private *priv, int key_idx, int set_tx, const u8 *key, const u8 *rsc, size_t rsc_len, const u8 *tsc, size_t tsc_len) { struct { __le16 idx; u8 rsc[ORINOCO_SEQ_LEN]; u8 key[TKIP_KEYLEN]; u8 tx_mic[MIC_KEYLEN]; u8 rx_mic[MIC_KEYLEN]; u8 tsc[ORINOCO_SEQ_LEN]; } __packed buf; struct hermes *hw = &priv->hw; int ret; int err; int k; u16 xmitting; key_idx &= 0x3; if (set_tx) key_idx |= 0x8000; buf.idx = cpu_to_le16(key_idx); memcpy(buf.key, key, sizeof(buf.key) + sizeof(buf.tx_mic) + sizeof(buf.rx_mic)); if (rsc_len > sizeof(buf.rsc)) rsc_len = sizeof(buf.rsc); if (tsc_len > sizeof(buf.tsc)) tsc_len = sizeof(buf.tsc); memset(buf.rsc, 0, sizeof(buf.rsc)); memset(buf.tsc, 0, sizeof(buf.tsc)); if (rsc != NULL) memcpy(buf.rsc, rsc, rsc_len); if (tsc != NULL) memcpy(buf.tsc, tsc, tsc_len); else buf.tsc[4] = 0x10; /* Wait up to 100ms for tx queue to empty */ for (k = 100; k > 0; k--) { udelay(1000); ret = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_TXQUEUEEMPTY, &xmitting); if (ret || !xmitting) break; } if (k == 0) ret = -ETIMEDOUT; err = HERMES_WRITE_RECORD(hw, USER_BAP, HERMES_RID_CNFADDDEFAULTTKIPKEY_AGERE, &buf); return ret ? ret : err; } int orinoco_clear_tkip_key(struct orinoco_private *priv, int key_idx) { struct hermes *hw = &priv->hw; int err; err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFREMDEFAULTTKIPKEY_AGERE, key_idx); if (err) printk(KERN_WARNING "%s: Error %d clearing TKIP key %d\n", priv->ndev->name, err, key_idx); return err; } int __orinoco_hw_set_multicast_list(struct orinoco_private *priv, struct net_device *dev, int mc_count, int promisc) { struct hermes *hw = &priv->hw; int err = 0; if (promisc != priv->promiscuous) { err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFPROMISCUOUSMODE, promisc); if (err) { printk(KERN_ERR "%s: Error %d setting PROMISCUOUSMODE to 1.\n", priv->ndev->name, err); } else priv->promiscuous = promisc; } /* If we're not in promiscuous mode, then we need to set the * group address if either we want to multicast, or if we were * multicasting and want to stop */ if (!promisc && (mc_count || priv->mc_count)) { struct netdev_hw_addr *ha; struct hermes_multicast mclist; int i = 0; netdev_for_each_mc_addr(ha, dev) { if (i == mc_count) break; memcpy(mclist.addr[i++], ha->addr, ETH_ALEN); } err = hw->ops->write_ltv(hw, USER_BAP, HERMES_RID_CNFGROUPADDRESSES, HERMES_BYTES_TO_RECLEN(mc_count * ETH_ALEN), &mclist); if (err) printk(KERN_ERR "%s: Error %d setting multicast list.\n", priv->ndev->name, err); else priv->mc_count = mc_count; } return err; } /* Return : < 0 -> error code ; >= 0 -> length */ int orinoco_hw_get_essid(struct orinoco_private *priv, int *active, char buf[IW_ESSID_MAX_SIZE + 1]) { struct hermes *hw = &priv->hw; int err = 0; struct hermes_idstring essidbuf; char *p = (char *)(&essidbuf.val); int len; unsigned long flags; if (orinoco_lock(priv, &flags) != 0) return -EBUSY; if (strlen(priv->desired_essid) > 0) { /* We read the desired SSID from the hardware rather than from priv->desired_essid, just in case the firmware is allowed to change it on us. I'm not sure about this */ /* My guess is that the OWNSSID should always be whatever * we set to the card, whereas CURRENT_SSID is the one that * may change... - Jean II */ u16 rid; *active = 1; rid = (priv->port_type == 3) ? HERMES_RID_CNFOWNSSID : HERMES_RID_CNFDESIREDSSID; err = hw->ops->read_ltv(hw, USER_BAP, rid, sizeof(essidbuf), NULL, &essidbuf); if (err) goto fail_unlock; } else { *active = 0; err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CURRENTSSID, sizeof(essidbuf), NULL, &essidbuf); if (err) goto fail_unlock; } len = le16_to_cpu(essidbuf.len); BUG_ON(len > IW_ESSID_MAX_SIZE); memset(buf, 0, IW_ESSID_MAX_SIZE); memcpy(buf, p, len); err = len; fail_unlock: orinoco_unlock(priv, &flags); return err; } int orinoco_hw_get_freq(struct orinoco_private *priv) { struct hermes *hw = &priv->hw; int err = 0; u16 channel; int freq = 0; unsigned long flags; if (orinoco_lock(priv, &flags) != 0) return -EBUSY; err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CURRENTCHANNEL, &channel); if (err) goto out; /* Intersil firmware 1.3.5 returns 0 when the interface is down */ if (channel == 0) { err = -EBUSY; goto out; } if ((channel < 1) || (channel > NUM_CHANNELS)) { printk(KERN_WARNING "%s: Channel out of range (%d)!\n", priv->ndev->name, channel); err = -EBUSY; goto out; } freq = ieee80211_channel_to_frequency(channel, NL80211_BAND_2GHZ); out: orinoco_unlock(priv, &flags); if (err > 0) err = -EBUSY; return err ? err : freq; } int orinoco_hw_get_bitratelist(struct orinoco_private *priv, int *numrates, s32 *rates, int max) { struct hermes *hw = &priv->hw; struct hermes_idstring list; unsigned char *p = (unsigned char *)&list.val; int err = 0; int num; int i; unsigned long flags; if (orinoco_lock(priv, &flags) != 0) return -EBUSY; err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_SUPPORTEDDATARATES, sizeof(list), NULL, &list); orinoco_unlock(priv, &flags); if (err) return err; num = le16_to_cpu(list.len); *numrates = num; num = min(num, max); for (i = 0; i < num; i++) rates[i] = (p[i] & 0x7f) * 500000; /* convert to bps */ return 0; } int orinoco_hw_trigger_scan(struct orinoco_private *priv, const struct cfg80211_ssid *ssid) { struct net_device *dev = priv->ndev; struct hermes *hw = &priv->hw; unsigned long flags; int err = 0; if (orinoco_lock(priv, &flags) != 0) return -EBUSY; /* Scanning with port 0 disabled would fail */ if (!netif_running(dev)) { err = -ENETDOWN; goto out; } /* In monitor mode, the scan results are always empty. * Probe responses are passed to the driver as received * frames and could be processed in software. */ if (priv->iw_mode == NL80211_IFTYPE_MONITOR) { err = -EOPNOTSUPP; goto out; } if (priv->has_hostscan) { switch (priv->firmware_type) { case FIRMWARE_TYPE_SYMBOL: err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFHOSTSCAN_SYMBOL, HERMES_HOSTSCAN_SYMBOL_ONCE | HERMES_HOSTSCAN_SYMBOL_BCAST); break; case FIRMWARE_TYPE_INTERSIL: { __le16 req[3]; req[0] = cpu_to_le16(0x3fff); /* All channels */ req[1] = cpu_to_le16(0x0001); /* rate 1 Mbps */ req[2] = 0; /* Any ESSID */ err = HERMES_WRITE_RECORD(hw, USER_BAP, HERMES_RID_CNFHOSTSCAN, &req); break; } case FIRMWARE_TYPE_AGERE: if (ssid->ssid_len > 0) { struct hermes_idstring idbuf; size_t len = ssid->ssid_len; idbuf.len = cpu_to_le16(len); memcpy(idbuf.val, ssid->ssid, len); err = hw->ops->write_ltv(hw, USER_BAP, HERMES_RID_CNFSCANSSID_AGERE, HERMES_BYTES_TO_RECLEN(len + 2), &idbuf); } else err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFSCANSSID_AGERE, 0); /* Any ESSID */ if (err) break; if (priv->has_ext_scan) { err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFSCANCHANNELS2GHZ, 0x7FFF); if (err) goto out; err = hermes_inquire(hw, HERMES_INQ_CHANNELINFO); } else err = hermes_inquire(hw, HERMES_INQ_SCAN); break; } } else err = hermes_inquire(hw, HERMES_INQ_SCAN); out: orinoco_unlock(priv, &flags); return err; } /* Disassociate from node with BSSID addr */ int orinoco_hw_disassociate(struct orinoco_private *priv, u8 *addr, u16 reason_code) { struct hermes *hw = &priv->hw; int err; struct { u8 addr[ETH_ALEN]; __le16 reason_code; } __packed buf; /* Currently only supported by WPA enabled Agere fw */ if (!priv->has_wpa) return -EOPNOTSUPP; memcpy(buf.addr, addr, ETH_ALEN); buf.reason_code = cpu_to_le16(reason_code); err = HERMES_WRITE_RECORD(hw, USER_BAP, HERMES_RID_CNFDISASSOCIATE, &buf); return err; } int orinoco_hw_get_current_bssid(struct orinoco_private *priv, u8 *addr) { struct hermes *hw = &priv->hw; int err; err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CURRENTBSSID, ETH_ALEN, NULL, addr); return err; }
gpl-2.0
GusBricker/surfacepro2-kernel
drivers/media/pci/cx23885/cx23885-ioctl.c
671
2955
/* * Driver for the Conexant CX23885/7/8 PCIe bridge * * Various common ioctl() support functions * * Copyright (c) 2009 Andy Walls <awalls@md.metrocast.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "cx23885.h" #include "cx23885-ioctl.h" #ifdef CONFIG_VIDEO_ADV_DEBUG int cx23885_g_chip_info(struct file *file, void *fh, struct v4l2_dbg_chip_info *chip) { struct cx23885_dev *dev = ((struct cx23885_fh *)fh)->dev; if (chip->match.addr > 1) return -EINVAL; if (chip->match.addr == 1) { if (dev->v4l_device == NULL) return -EINVAL; strlcpy(chip->name, "cx23417", sizeof(chip->name)); } else { strlcpy(chip->name, dev->v4l2_dev.name, sizeof(chip->name)); } return 0; } static int cx23417_g_register(struct cx23885_dev *dev, struct v4l2_dbg_register *reg) { u32 value; if (dev->v4l_device == NULL) return -EINVAL; if ((reg->reg & 0x3) != 0 || reg->reg >= 0x10000) return -EINVAL; if (mc417_register_read(dev, (u16) reg->reg, &value)) return -EINVAL; /* V4L2 spec, but -EREMOTEIO really */ reg->size = 4; reg->val = value; return 0; } int cx23885_g_register(struct file *file, void *fh, struct v4l2_dbg_register *reg) { struct cx23885_dev *dev = ((struct cx23885_fh *)fh)->dev; if (reg->match.addr > 1) return -EINVAL; if (reg->match.addr) return cx23417_g_register(dev, reg); if ((reg->reg & 0x3) != 0 || reg->reg >= pci_resource_len(dev->pci, 0)) return -EINVAL; reg->size = 4; reg->val = cx_read(reg->reg); return 0; } static int cx23417_s_register(struct cx23885_dev *dev, const struct v4l2_dbg_register *reg) { if (dev->v4l_device == NULL) return -EINVAL; if ((reg->reg & 0x3) != 0 || reg->reg >= 0x10000) return -EINVAL; if (mc417_register_write(dev, (u16) reg->reg, (u32) reg->val)) return -EINVAL; /* V4L2 spec, but -EREMOTEIO really */ return 0; } int cx23885_s_register(struct file *file, void *fh, const struct v4l2_dbg_register *reg) { struct cx23885_dev *dev = ((struct cx23885_fh *)fh)->dev; if (reg->match.addr > 1) return -EINVAL; if (reg->match.addr) return cx23417_s_register(dev, reg); if ((reg->reg & 0x3) != 0 || reg->reg >= pci_resource_len(dev->pci, 0)) return -EINVAL; cx_write(reg->reg, reg->val); return 0; } #endif
gpl-2.0
Dm47021/AlienKernel4Jellybean
net/wireless/sysfs.c
927
2957
/* * This file provides /sys/class/ieee80211/<wiphy name>/ * and some default attributes. * * Copyright 2005-2006 Jiri Benc <jbenc@suse.cz> * Copyright 2006 Johannes Berg <johannes@sipsolutions.net> * * This file is GPLv2 as found in COPYING. */ #include <linux/device.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/nl80211.h> #include <linux/rtnetlink.h> #include <net/cfg80211.h> #include "sysfs.h" #include "core.h" static inline struct cfg80211_registered_device *dev_to_rdev( struct device *dev) { return container_of(dev, struct cfg80211_registered_device, wiphy.dev); } #define SHOW_FMT(name, fmt, member) \ static ssize_t name ## _show(struct device *dev, \ struct device_attribute *attr, \ char *buf) \ { \ return sprintf(buf, fmt "\n", dev_to_rdev(dev)->member); \ } SHOW_FMT(index, "%d", wiphy_idx); SHOW_FMT(macaddress, "%pM", wiphy.perm_addr); SHOW_FMT(address_mask, "%pM", wiphy.addr_mask); static ssize_t addresses_show(struct device *dev, struct device_attribute *attr, char *buf) { struct wiphy *wiphy = &dev_to_rdev(dev)->wiphy; char *start = buf; int i; if (!wiphy->addresses) return sprintf(buf, "%pM\n", wiphy->perm_addr); for (i = 0; i < wiphy->n_addresses; i++) buf += sprintf(buf, "%pM\n", &wiphy->addresses[i].addr); return buf - start; } static struct device_attribute ieee80211_dev_attrs[] = { __ATTR_RO(index), __ATTR_RO(macaddress), __ATTR_RO(address_mask), __ATTR_RO(addresses), {} }; static void wiphy_dev_release(struct device *dev) { struct cfg80211_registered_device *rdev = dev_to_rdev(dev); cfg80211_dev_free(rdev); } #ifdef CONFIG_HOTPLUG static int wiphy_uevent(struct device *dev, struct kobj_uevent_env *env) { /* TODO, we probably need stuff here */ return 0; } #endif static int wiphy_suspend(struct device *dev, pm_message_t state) { struct cfg80211_registered_device *rdev = dev_to_rdev(dev); int ret = 0; rdev->suspend_at = get_seconds(); if (rdev->ops->suspend) { rtnl_lock(); ret = rdev->ops->suspend(&rdev->wiphy); rtnl_unlock(); } return ret; } static int wiphy_resume(struct device *dev) { struct cfg80211_registered_device *rdev = dev_to_rdev(dev); int ret = 0; /* Age scan results with time spent in suspend */ spin_lock_bh(&rdev->bss_lock); cfg80211_bss_age(rdev, get_seconds() - rdev->suspend_at); spin_unlock_bh(&rdev->bss_lock); if (rdev->ops->resume) { rtnl_lock(); ret = rdev->ops->resume(&rdev->wiphy); rtnl_unlock(); } return ret; } struct class ieee80211_class = { .name = "ieee80211", .owner = THIS_MODULE, .dev_release = wiphy_dev_release, .dev_attrs = ieee80211_dev_attrs, #ifdef CONFIG_HOTPLUG .dev_uevent = wiphy_uevent, #endif .suspend = wiphy_suspend, .resume = wiphy_resume, }; int wiphy_sysfs_init(void) { return class_register(&ieee80211_class); } void wiphy_sysfs_exit(void) { class_unregister(&ieee80211_class); }
gpl-2.0
embeddedarm/linux-2.6.35-ts4800
arch/m68knommu/platform/68360/config.c
1695
4731
/* * linux/arch/m68knommu/platform/68360/config.c * * Copyright (c) 2000 Michael Leslie <mleslie@lineo.com> * Copyright (C) 1993 Hamish Macdonald * Copyright (C) 1999 D. Jeff Dionne <jeff@uclinux.org> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ #include <stdarg.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <asm/setup.h> #include <asm/system.h> #include <asm/pgtable.h> #include <asm/machdep.h> #include <asm/m68360.h> #ifdef CONFIG_UCQUICC #include <asm/bootstd.h> #endif extern void m360_cpm_reset(void); // Mask to select if the PLL prescaler is enabled. #define MCU_PREEN ((unsigned short)(0x0001 << 13)) #if defined(CONFIG_UCQUICC) #define OSCILLATOR (unsigned long int)33000000 #endif unsigned long int system_clock; extern QUICC *pquicc; /* TODO DON"T Hard Code this */ /* calculate properly using the right PLL and prescaller */ // unsigned int system_clock = 33000000l; extern unsigned long int system_clock; //In kernel setup.c static irqreturn_t hw_tick(int irq, void *dummy) { /* Reset Timer1 */ /* TSTAT &= 0; */ pquicc->timer_ter1 = 0x0002; /* clear timer event */ return arch_timer_interrupt(irq, dummy); } static struct irqaction m68360_timer_irq = { .name = "timer", .flags = IRQF_DISABLED | IRQF_TIMER, .handler = hw_tick, }; void hw_timer_init(void) { unsigned char prescaler; unsigned short tgcr_save; #if 0 /* Restart mode, Enable int, 32KHz, Enable timer */ TCTL = TCTL_OM | TCTL_IRQEN | TCTL_CLKSOURCE_32KHZ | TCTL_TEN; /* Set prescaler (Divide 32KHz by 32)*/ TPRER = 31; /* Set compare register 32Khz / 32 / 10 = 100 */ TCMP = 10; request_irq(IRQ_MACHSPEC | 1, timer_routine, IRQ_FLG_LOCK, "timer", NULL); #endif /* General purpose quicc timers: MC68360UM p7-20 */ /* Set up timer 1 (in [1..4]) to do 100Hz */ tgcr_save = pquicc->timer_tgcr & 0xfff0; pquicc->timer_tgcr = tgcr_save; /* stop and reset timer 1 */ /* pquicc->timer_tgcr |= 0x4444; */ /* halt timers when FREEZE (ie bdm freeze) */ prescaler = 8; pquicc->timer_tmr1 = 0x001a | /* or=1, frr=1, iclk=01b */ (unsigned short)((prescaler - 1) << 8); pquicc->timer_tcn1 = 0x0000; /* initial count */ /* calculate interval for 100Hz based on the _system_clock: */ pquicc->timer_trr1 = (system_clock/ prescaler) / HZ; /* reference count */ pquicc->timer_ter1 = 0x0003; /* clear timer events */ /* enable timer 1 interrupt in CIMR */ setup_irq(CPMVEC_TIMER1, &m68360_timer_irq); /* Start timer 1: */ tgcr_save = (pquicc->timer_tgcr & 0xfff0) | 0x0001; pquicc->timer_tgcr = tgcr_save; } void BSP_gettod (int *yearp, int *monp, int *dayp, int *hourp, int *minp, int *secp) { } int BSP_set_clock_mmss(unsigned long nowtime) { #if 0 short real_seconds = nowtime % 60, real_minutes = (nowtime / 60) % 60; tod->second1 = real_seconds / 10; tod->second2 = real_seconds % 10; tod->minute1 = real_minutes / 10; tod->minute2 = real_minutes % 10; #endif return 0; } void BSP_reset (void) { local_irq_disable(); asm volatile ( "moveal #_start, %a0;\n" "moveb #0, 0xFFFFF300;\n" "moveal 0(%a0), %sp;\n" "moveal 4(%a0), %a0;\n" "jmp (%a0);\n" ); } unsigned char *scc1_hwaddr; static int errno; #if defined (CONFIG_UCQUICC) _bsc0(char *, getserialnum) _bsc1(unsigned char *, gethwaddr, int, a) _bsc1(char *, getbenv, char *, a) #endif void config_BSP(char *command, int len) { unsigned char *p; m360_cpm_reset(); /* Calculate the real system clock value. */ { unsigned int local_pllcr = (unsigned int)(pquicc->sim_pllcr); if( local_pllcr & MCU_PREEN ) // If the prescaler is dividing by 128 { int mf = (int)(pquicc->sim_pllcr & 0x0fff); system_clock = (OSCILLATOR / 128) * (mf + 1); } else { int mf = (int)(pquicc->sim_pllcr & 0x0fff); system_clock = (OSCILLATOR) * (mf + 1); } } printk(KERN_INFO "\n68360 QUICC support (C) 2000 Lineo Inc.\n"); #if defined(CONFIG_UCQUICC) && 0 printk(KERN_INFO "uCquicc serial string [%s]\n",getserialnum()); p = scc1_hwaddr = gethwaddr(0); printk(KERN_INFO "uCquicc hwaddr %.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n", p[0], p[1], p[2], p[3], p[4], p[5]); p = getbenv("APPEND"); if (p) strcpy(p,command); else command[0] = 0; #else scc1_hwaddr = "\00\01\02\03\04\05"; #endif mach_gettod = BSP_gettod; mach_reset = BSP_reset; }
gpl-2.0
ivanmeler/android_kernel_samsung_smdk4412
drivers/tty/serial/jsm/jsm_neo.c
2207
37237
/************************************************************************ * Copyright 2003 Digi International (www.digi.com) * * Copyright (C) 2004 IBM Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR * PURPOSE. See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 * Temple Place - Suite 330, Boston, * MA 02111-1307, USA. * * Contact Information: * Scott H Kilau <Scott_Kilau@digi.com> * Wendy Xiong <wendyx@us.ibm.com> * ***********************************************************************/ #include <linux/delay.h> /* For udelay */ #include <linux/serial_reg.h> /* For the various UART offsets */ #include <linux/tty.h> #include <linux/pci.h> #include <asm/io.h> #include "jsm.h" /* Driver main header file */ static u32 jsm_offset_table[8] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80 }; /* * This function allows calls to ensure that all outstanding * PCI writes have been completed, by doing a PCI read against * a non-destructive, read-only location on the Neo card. * * In this case, we are reading the DVID (Read-only Device Identification) * value of the Neo card. */ static inline void neo_pci_posting_flush(struct jsm_board *bd) { readb(bd->re_map_membase + 0x8D); } static void neo_set_cts_flow_control(struct jsm_channel *ch) { u8 ier, efr; ier = readb(&ch->ch_neo_uart->ier); efr = readb(&ch->ch_neo_uart->efr); jsm_printk(PARAM, INFO, &ch->ch_bd->pci_dev, "Setting CTSFLOW\n"); /* Turn on auto CTS flow control */ ier |= (UART_17158_IER_CTSDSR); efr |= (UART_17158_EFR_ECB | UART_17158_EFR_CTSDSR); /* Turn off auto Xon flow control */ efr &= ~(UART_17158_EFR_IXON); /* Why? Becuz Exar's spec says we have to zero it out before setting it */ writeb(0, &ch->ch_neo_uart->efr); /* Turn on UART enhanced bits */ writeb(efr, &ch->ch_neo_uart->efr); /* Turn on table D, with 8 char hi/low watermarks */ writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_4DELAY), &ch->ch_neo_uart->fctr); /* Feed the UART our trigger levels */ writeb(8, &ch->ch_neo_uart->tfifo); ch->ch_t_tlevel = 8; writeb(ier, &ch->ch_neo_uart->ier); } static void neo_set_rts_flow_control(struct jsm_channel *ch) { u8 ier, efr; ier = readb(&ch->ch_neo_uart->ier); efr = readb(&ch->ch_neo_uart->efr); jsm_printk(PARAM, INFO, &ch->ch_bd->pci_dev, "Setting RTSFLOW\n"); /* Turn on auto RTS flow control */ ier |= (UART_17158_IER_RTSDTR); efr |= (UART_17158_EFR_ECB | UART_17158_EFR_RTSDTR); /* Turn off auto Xoff flow control */ ier &= ~(UART_17158_IER_XOFF); efr &= ~(UART_17158_EFR_IXOFF); /* Why? Becuz Exar's spec says we have to zero it out before setting it */ writeb(0, &ch->ch_neo_uart->efr); /* Turn on UART enhanced bits */ writeb(efr, &ch->ch_neo_uart->efr); writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_4DELAY), &ch->ch_neo_uart->fctr); ch->ch_r_watermark = 4; writeb(56, &ch->ch_neo_uart->rfifo); ch->ch_r_tlevel = 56; writeb(ier, &ch->ch_neo_uart->ier); /* * From the Neo UART spec sheet: * The auto RTS/DTR function must be started by asserting * RTS/DTR# output pin (MCR bit-0 or 1 to logic 1 after * it is enabled. */ ch->ch_mostat |= (UART_MCR_RTS); } static void neo_set_ixon_flow_control(struct jsm_channel *ch) { u8 ier, efr; ier = readb(&ch->ch_neo_uart->ier); efr = readb(&ch->ch_neo_uart->efr); jsm_printk(PARAM, INFO, &ch->ch_bd->pci_dev, "Setting IXON FLOW\n"); /* Turn off auto CTS flow control */ ier &= ~(UART_17158_IER_CTSDSR); efr &= ~(UART_17158_EFR_CTSDSR); /* Turn on auto Xon flow control */ efr |= (UART_17158_EFR_ECB | UART_17158_EFR_IXON); /* Why? Becuz Exar's spec says we have to zero it out before setting it */ writeb(0, &ch->ch_neo_uart->efr); /* Turn on UART enhanced bits */ writeb(efr, &ch->ch_neo_uart->efr); writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY), &ch->ch_neo_uart->fctr); ch->ch_r_watermark = 4; writeb(32, &ch->ch_neo_uart->rfifo); ch->ch_r_tlevel = 32; /* Tell UART what start/stop chars it should be looking for */ writeb(ch->ch_startc, &ch->ch_neo_uart->xonchar1); writeb(0, &ch->ch_neo_uart->xonchar2); writeb(ch->ch_stopc, &ch->ch_neo_uart->xoffchar1); writeb(0, &ch->ch_neo_uart->xoffchar2); writeb(ier, &ch->ch_neo_uart->ier); } static void neo_set_ixoff_flow_control(struct jsm_channel *ch) { u8 ier, efr; ier = readb(&ch->ch_neo_uart->ier); efr = readb(&ch->ch_neo_uart->efr); jsm_printk(PARAM, INFO, &ch->ch_bd->pci_dev, "Setting IXOFF FLOW\n"); /* Turn off auto RTS flow control */ ier &= ~(UART_17158_IER_RTSDTR); efr &= ~(UART_17158_EFR_RTSDTR); /* Turn on auto Xoff flow control */ ier |= (UART_17158_IER_XOFF); efr |= (UART_17158_EFR_ECB | UART_17158_EFR_IXOFF); /* Why? Becuz Exar's spec says we have to zero it out before setting it */ writeb(0, &ch->ch_neo_uart->efr); /* Turn on UART enhanced bits */ writeb(efr, &ch->ch_neo_uart->efr); /* Turn on table D, with 8 char hi/low watermarks */ writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY), &ch->ch_neo_uart->fctr); writeb(8, &ch->ch_neo_uart->tfifo); ch->ch_t_tlevel = 8; /* Tell UART what start/stop chars it should be looking for */ writeb(ch->ch_startc, &ch->ch_neo_uart->xonchar1); writeb(0, &ch->ch_neo_uart->xonchar2); writeb(ch->ch_stopc, &ch->ch_neo_uart->xoffchar1); writeb(0, &ch->ch_neo_uart->xoffchar2); writeb(ier, &ch->ch_neo_uart->ier); } static void neo_set_no_input_flow_control(struct jsm_channel *ch) { u8 ier, efr; ier = readb(&ch->ch_neo_uart->ier); efr = readb(&ch->ch_neo_uart->efr); jsm_printk(PARAM, INFO, &ch->ch_bd->pci_dev, "Unsetting Input FLOW\n"); /* Turn off auto RTS flow control */ ier &= ~(UART_17158_IER_RTSDTR); efr &= ~(UART_17158_EFR_RTSDTR); /* Turn off auto Xoff flow control */ ier &= ~(UART_17158_IER_XOFF); if (ch->ch_c_iflag & IXON) efr &= ~(UART_17158_EFR_IXOFF); else efr &= ~(UART_17158_EFR_ECB | UART_17158_EFR_IXOFF); /* Why? Becuz Exar's spec says we have to zero it out before setting it */ writeb(0, &ch->ch_neo_uart->efr); /* Turn on UART enhanced bits */ writeb(efr, &ch->ch_neo_uart->efr); /* Turn on table D, with 8 char hi/low watermarks */ writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY), &ch->ch_neo_uart->fctr); ch->ch_r_watermark = 0; writeb(16, &ch->ch_neo_uart->tfifo); ch->ch_t_tlevel = 16; writeb(16, &ch->ch_neo_uart->rfifo); ch->ch_r_tlevel = 16; writeb(ier, &ch->ch_neo_uart->ier); } static void neo_set_no_output_flow_control(struct jsm_channel *ch) { u8 ier, efr; ier = readb(&ch->ch_neo_uart->ier); efr = readb(&ch->ch_neo_uart->efr); jsm_printk(PARAM, INFO, &ch->ch_bd->pci_dev, "Unsetting Output FLOW\n"); /* Turn off auto CTS flow control */ ier &= ~(UART_17158_IER_CTSDSR); efr &= ~(UART_17158_EFR_CTSDSR); /* Turn off auto Xon flow control */ if (ch->ch_c_iflag & IXOFF) efr &= ~(UART_17158_EFR_IXON); else efr &= ~(UART_17158_EFR_ECB | UART_17158_EFR_IXON); /* Why? Becuz Exar's spec says we have to zero it out before setting it */ writeb(0, &ch->ch_neo_uart->efr); /* Turn on UART enhanced bits */ writeb(efr, &ch->ch_neo_uart->efr); /* Turn on table D, with 8 char hi/low watermarks */ writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY), &ch->ch_neo_uart->fctr); ch->ch_r_watermark = 0; writeb(16, &ch->ch_neo_uart->tfifo); ch->ch_t_tlevel = 16; writeb(16, &ch->ch_neo_uart->rfifo); ch->ch_r_tlevel = 16; writeb(ier, &ch->ch_neo_uart->ier); } static inline void neo_set_new_start_stop_chars(struct jsm_channel *ch) { /* if hardware flow control is set, then skip this whole thing */ if (ch->ch_c_cflag & CRTSCTS) return; jsm_printk(PARAM, INFO, &ch->ch_bd->pci_dev, "start\n"); /* Tell UART what start/stop chars it should be looking for */ writeb(ch->ch_startc, &ch->ch_neo_uart->xonchar1); writeb(0, &ch->ch_neo_uart->xonchar2); writeb(ch->ch_stopc, &ch->ch_neo_uart->xoffchar1); writeb(0, &ch->ch_neo_uart->xoffchar2); } static void neo_copy_data_from_uart_to_queue(struct jsm_channel *ch) { int qleft = 0; u8 linestatus = 0; u8 error_mask = 0; int n = 0; int total = 0; u16 head; u16 tail; if (!ch) return; /* cache head and tail of queue */ head = ch->ch_r_head & RQUEUEMASK; tail = ch->ch_r_tail & RQUEUEMASK; /* Get our cached LSR */ linestatus = ch->ch_cached_lsr; ch->ch_cached_lsr = 0; /* Store how much space we have left in the queue */ if ((qleft = tail - head - 1) < 0) qleft += RQUEUEMASK + 1; /* * If the UART is not in FIFO mode, force the FIFO copy to * NOT be run, by setting total to 0. * * On the other hand, if the UART IS in FIFO mode, then ask * the UART to give us an approximation of data it has RX'ed. */ if (!(ch->ch_flags & CH_FIFO_ENABLED)) total = 0; else { total = readb(&ch->ch_neo_uart->rfifo); /* * EXAR chip bug - RX FIFO COUNT - Fudge factor. * * This resolves a problem/bug with the Exar chip that sometimes * returns a bogus value in the rfifo register. * The count can be any where from 0-3 bytes "off". * Bizarre, but true. */ total -= 3; } /* * Finally, bound the copy to make sure we don't overflow * our own queue... * The byte by byte copy loop below this loop this will * deal with the queue overflow possibility. */ total = min(total, qleft); while (total > 0) { /* * Grab the linestatus register, we need to check * to see if there are any errors in the FIFO. */ linestatus = readb(&ch->ch_neo_uart->lsr); /* * Break out if there is a FIFO error somewhere. * This will allow us to go byte by byte down below, * finding the exact location of the error. */ if (linestatus & UART_17158_RX_FIFO_DATA_ERROR) break; /* Make sure we don't go over the end of our queue */ n = min(((u32) total), (RQUEUESIZE - (u32) head)); /* * Cut down n even further if needed, this is to fix * a problem with memcpy_fromio() with the Neo on the * IBM pSeries platform. * 15 bytes max appears to be the magic number. */ n = min((u32) n, (u32) 12); /* * Since we are grabbing the linestatus register, which * will reset some bits after our read, we need to ensure * we don't miss our TX FIFO emptys. */ if (linestatus & (UART_LSR_THRE | UART_17158_TX_AND_FIFO_CLR)) ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); linestatus = 0; /* Copy data from uart to the queue */ memcpy_fromio(ch->ch_rqueue + head, &ch->ch_neo_uart->txrxburst, n); /* * Since RX_FIFO_DATA_ERROR was 0, we are guaranteed * that all the data currently in the FIFO is free of * breaks and parity/frame/orun errors. */ memset(ch->ch_equeue + head, 0, n); /* Add to and flip head if needed */ head = (head + n) & RQUEUEMASK; total -= n; qleft -= n; ch->ch_rxcount += n; } /* * Create a mask to determine whether we should * insert the character (if any) into our queue. */ if (ch->ch_c_iflag & IGNBRK) error_mask |= UART_LSR_BI; /* * Now cleanup any leftover bytes still in the UART. * Also deal with any possible queue overflow here as well. */ while (1) { /* * Its possible we have a linestatus from the loop above * this, so we "OR" on any extra bits. */ linestatus |= readb(&ch->ch_neo_uart->lsr); /* * If the chip tells us there is no more data pending to * be read, we can then leave. * But before we do, cache the linestatus, just in case. */ if (!(linestatus & UART_LSR_DR)) { ch->ch_cached_lsr = linestatus; break; } /* No need to store this bit */ linestatus &= ~UART_LSR_DR; /* * Since we are grabbing the linestatus register, which * will reset some bits after our read, we need to ensure * we don't miss our TX FIFO emptys. */ if (linestatus & (UART_LSR_THRE | UART_17158_TX_AND_FIFO_CLR)) { linestatus &= ~(UART_LSR_THRE | UART_17158_TX_AND_FIFO_CLR); ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); } /* * Discard character if we are ignoring the error mask. */ if (linestatus & error_mask) { u8 discard; linestatus = 0; memcpy_fromio(&discard, &ch->ch_neo_uart->txrxburst, 1); continue; } /* * If our queue is full, we have no choice but to drop some data. * The assumption is that HWFLOW or SWFLOW should have stopped * things way way before we got to this point. * * I decided that I wanted to ditch the oldest data first, * I hope thats okay with everyone? Yes? Good. */ while (qleft < 1) { jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, "Queue full, dropping DATA:%x LSR:%x\n", ch->ch_rqueue[tail], ch->ch_equeue[tail]); ch->ch_r_tail = tail = (tail + 1) & RQUEUEMASK; ch->ch_err_overrun++; qleft++; } memcpy_fromio(ch->ch_rqueue + head, &ch->ch_neo_uart->txrxburst, 1); ch->ch_equeue[head] = (u8) linestatus; jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, "DATA/LSR pair: %x %x\n", ch->ch_rqueue[head], ch->ch_equeue[head]); /* Ditch any remaining linestatus value. */ linestatus = 0; /* Add to and flip head if needed */ head = (head + 1) & RQUEUEMASK; qleft--; ch->ch_rxcount++; } /* * Write new final heads to channel structure. */ ch->ch_r_head = head & RQUEUEMASK; ch->ch_e_head = head & EQUEUEMASK; jsm_input(ch); } static void neo_copy_data_from_queue_to_uart(struct jsm_channel *ch) { u16 head; u16 tail; int n; int s; int qlen; u32 len_written = 0; struct circ_buf *circ; if (!ch) return; circ = &ch->uart_port.state->xmit; /* No data to write to the UART */ if (uart_circ_empty(circ)) return; /* If port is "stopped", don't send any data to the UART */ if ((ch->ch_flags & CH_STOP) || (ch->ch_flags & CH_BREAK_SENDING)) return; /* * If FIFOs are disabled. Send data directly to txrx register */ if (!(ch->ch_flags & CH_FIFO_ENABLED)) { u8 lsrbits = readb(&ch->ch_neo_uart->lsr); ch->ch_cached_lsr |= lsrbits; if (ch->ch_cached_lsr & UART_LSR_THRE) { ch->ch_cached_lsr &= ~(UART_LSR_THRE); writeb(circ->buf[circ->tail], &ch->ch_neo_uart->txrx); jsm_printk(WRITE, INFO, &ch->ch_bd->pci_dev, "Tx data: %x\n", circ->buf[circ->head]); circ->tail = (circ->tail + 1) & (UART_XMIT_SIZE - 1); ch->ch_txcount++; } return; } /* * We have to do it this way, because of the EXAR TXFIFO count bug. */ if (!(ch->ch_flags & (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM))) return; n = UART_17158_TX_FIFOSIZE - ch->ch_t_tlevel; /* cache head and tail of queue */ head = circ->head & (UART_XMIT_SIZE - 1); tail = circ->tail & (UART_XMIT_SIZE - 1); qlen = uart_circ_chars_pending(circ); /* Find minimum of the FIFO space, versus queue length */ n = min(n, qlen); while (n > 0) { s = ((head >= tail) ? head : UART_XMIT_SIZE) - tail; s = min(s, n); if (s <= 0) break; memcpy_toio(&ch->ch_neo_uart->txrxburst, circ->buf + tail, s); /* Add and flip queue if needed */ tail = (tail + s) & (UART_XMIT_SIZE - 1); n -= s; ch->ch_txcount += s; len_written += s; } /* Update the final tail */ circ->tail = tail & (UART_XMIT_SIZE - 1); if (len_written >= ch->ch_t_tlevel) ch->ch_flags &= ~(CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); if (uart_circ_empty(circ)) uart_write_wakeup(&ch->uart_port); } static void neo_parse_modem(struct jsm_channel *ch, u8 signals) { u8 msignals = signals; jsm_printk(MSIGS, INFO, &ch->ch_bd->pci_dev, "neo_parse_modem: port: %d msignals: %x\n", ch->ch_portnum, msignals); /* Scrub off lower bits. They signify delta's, which I don't care about */ /* Keep DDCD and DDSR though */ msignals &= 0xf8; if (msignals & UART_MSR_DDCD) uart_handle_dcd_change(&ch->uart_port, msignals & UART_MSR_DCD); if (msignals & UART_MSR_DDSR) uart_handle_cts_change(&ch->uart_port, msignals & UART_MSR_CTS); if (msignals & UART_MSR_DCD) ch->ch_mistat |= UART_MSR_DCD; else ch->ch_mistat &= ~UART_MSR_DCD; if (msignals & UART_MSR_DSR) ch->ch_mistat |= UART_MSR_DSR; else ch->ch_mistat &= ~UART_MSR_DSR; if (msignals & UART_MSR_RI) ch->ch_mistat |= UART_MSR_RI; else ch->ch_mistat &= ~UART_MSR_RI; if (msignals & UART_MSR_CTS) ch->ch_mistat |= UART_MSR_CTS; else ch->ch_mistat &= ~UART_MSR_CTS; jsm_printk(MSIGS, INFO, &ch->ch_bd->pci_dev, "Port: %d DTR: %d RTS: %d CTS: %d DSR: %d " "RI: %d CD: %d\n", ch->ch_portnum, !!((ch->ch_mistat | ch->ch_mostat) & UART_MCR_DTR), !!((ch->ch_mistat | ch->ch_mostat) & UART_MCR_RTS), !!((ch->ch_mistat | ch->ch_mostat) & UART_MSR_CTS), !!((ch->ch_mistat | ch->ch_mostat) & UART_MSR_DSR), !!((ch->ch_mistat | ch->ch_mostat) & UART_MSR_RI), !!((ch->ch_mistat | ch->ch_mostat) & UART_MSR_DCD)); } /* Make the UART raise any of the output signals we want up */ static void neo_assert_modem_signals(struct jsm_channel *ch) { if (!ch) return; writeb(ch->ch_mostat, &ch->ch_neo_uart->mcr); /* flush write operation */ neo_pci_posting_flush(ch->ch_bd); } /* * Flush the WRITE FIFO on the Neo. * * NOTE: Channel lock MUST be held before calling this function! */ static void neo_flush_uart_write(struct jsm_channel *ch) { u8 tmp = 0; int i = 0; if (!ch) return; writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_XMIT), &ch->ch_neo_uart->isr_fcr); for (i = 0; i < 10; i++) { /* Check to see if the UART feels it completely flushed the FIFO. */ tmp = readb(&ch->ch_neo_uart->isr_fcr); if (tmp & 4) { jsm_printk(IOCTL, INFO, &ch->ch_bd->pci_dev, "Still flushing TX UART... i: %d\n", i); udelay(10); } else break; } ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); } /* * Flush the READ FIFO on the Neo. * * NOTE: Channel lock MUST be held before calling this function! */ static void neo_flush_uart_read(struct jsm_channel *ch) { u8 tmp = 0; int i = 0; if (!ch) return; writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR), &ch->ch_neo_uart->isr_fcr); for (i = 0; i < 10; i++) { /* Check to see if the UART feels it completely flushed the FIFO. */ tmp = readb(&ch->ch_neo_uart->isr_fcr); if (tmp & 2) { jsm_printk(IOCTL, INFO, &ch->ch_bd->pci_dev, "Still flushing RX UART... i: %d\n", i); udelay(10); } else break; } } /* * No locks are assumed to be held when calling this function. */ static void neo_clear_break(struct jsm_channel *ch, int force) { unsigned long lock_flags; spin_lock_irqsave(&ch->ch_lock, lock_flags); /* Turn break off, and unset some variables */ if (ch->ch_flags & CH_BREAK_SENDING) { u8 temp = readb(&ch->ch_neo_uart->lcr); writeb((temp & ~UART_LCR_SBC), &ch->ch_neo_uart->lcr); ch->ch_flags &= ~(CH_BREAK_SENDING); jsm_printk(IOCTL, INFO, &ch->ch_bd->pci_dev, "clear break Finishing UART_LCR_SBC! finished: %lx\n", jiffies); /* flush write operation */ neo_pci_posting_flush(ch->ch_bd); } spin_unlock_irqrestore(&ch->ch_lock, lock_flags); } /* * Parse the ISR register. */ static inline void neo_parse_isr(struct jsm_board *brd, u32 port) { struct jsm_channel *ch; u8 isr; u8 cause; unsigned long lock_flags; if (!brd) return; if (port > brd->maxports) return; ch = brd->channels[port]; if (!ch) return; /* Here we try to figure out what caused the interrupt to happen */ while (1) { isr = readb(&ch->ch_neo_uart->isr_fcr); /* Bail if no pending interrupt */ if (isr & UART_IIR_NO_INT) break; /* * Yank off the upper 2 bits, which just show that the FIFO's are enabled. */ isr &= ~(UART_17158_IIR_FIFO_ENABLED); jsm_printk(INTR, INFO, &ch->ch_bd->pci_dev, "%s:%d isr: %x\n", __FILE__, __LINE__, isr); if (isr & (UART_17158_IIR_RDI_TIMEOUT | UART_IIR_RDI)) { /* Read data from uart -> queue */ neo_copy_data_from_uart_to_queue(ch); /* Call our tty layer to enforce queue flow control if needed. */ spin_lock_irqsave(&ch->ch_lock, lock_flags); jsm_check_queue_flow_control(ch); spin_unlock_irqrestore(&ch->ch_lock, lock_flags); } if (isr & UART_IIR_THRI) { /* Transfer data (if any) from Write Queue -> UART. */ spin_lock_irqsave(&ch->ch_lock, lock_flags); ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); spin_unlock_irqrestore(&ch->ch_lock, lock_flags); neo_copy_data_from_queue_to_uart(ch); } if (isr & UART_17158_IIR_XONXOFF) { cause = readb(&ch->ch_neo_uart->xoffchar1); jsm_printk(INTR, INFO, &ch->ch_bd->pci_dev, "Port %d. Got ISR_XONXOFF: cause:%x\n", port, cause); /* * Since the UART detected either an XON or * XOFF match, we need to figure out which * one it was, so we can suspend or resume data flow. */ spin_lock_irqsave(&ch->ch_lock, lock_flags); if (cause == UART_17158_XON_DETECT) { /* Is output stopped right now, if so, resume it */ if (brd->channels[port]->ch_flags & CH_STOP) { ch->ch_flags &= ~(CH_STOP); } jsm_printk(INTR, INFO, &ch->ch_bd->pci_dev, "Port %d. XON detected in incoming data\n", port); } else if (cause == UART_17158_XOFF_DETECT) { if (!(brd->channels[port]->ch_flags & CH_STOP)) { ch->ch_flags |= CH_STOP; jsm_printk(INTR, INFO, &ch->ch_bd->pci_dev, "Setting CH_STOP\n"); } jsm_printk(INTR, INFO, &ch->ch_bd->pci_dev, "Port: %d. XOFF detected in incoming data\n", port); } spin_unlock_irqrestore(&ch->ch_lock, lock_flags); } if (isr & UART_17158_IIR_HWFLOW_STATE_CHANGE) { /* * If we get here, this means the hardware is doing auto flow control. * Check to see whether RTS/DTR or CTS/DSR caused this interrupt. */ cause = readb(&ch->ch_neo_uart->mcr); /* Which pin is doing auto flow? RTS or DTR? */ spin_lock_irqsave(&ch->ch_lock, lock_flags); if ((cause & 0x4) == 0) { if (cause & UART_MCR_RTS) ch->ch_mostat |= UART_MCR_RTS; else ch->ch_mostat &= ~(UART_MCR_RTS); } else { if (cause & UART_MCR_DTR) ch->ch_mostat |= UART_MCR_DTR; else ch->ch_mostat &= ~(UART_MCR_DTR); } spin_unlock_irqrestore(&ch->ch_lock, lock_flags); } /* Parse any modem signal changes */ jsm_printk(INTR, INFO, &ch->ch_bd->pci_dev, "MOD_STAT: sending to parse_modem_sigs\n"); neo_parse_modem(ch, readb(&ch->ch_neo_uart->msr)); } } static inline void neo_parse_lsr(struct jsm_board *brd, u32 port) { struct jsm_channel *ch; int linestatus; unsigned long lock_flags; if (!brd) return; if (port > brd->maxports) return; ch = brd->channels[port]; if (!ch) return; linestatus = readb(&ch->ch_neo_uart->lsr); jsm_printk(INTR, INFO, &ch->ch_bd->pci_dev, "%s:%d port: %d linestatus: %x\n", __FILE__, __LINE__, port, linestatus); ch->ch_cached_lsr |= linestatus; if (ch->ch_cached_lsr & UART_LSR_DR) { /* Read data from uart -> queue */ neo_copy_data_from_uart_to_queue(ch); spin_lock_irqsave(&ch->ch_lock, lock_flags); jsm_check_queue_flow_control(ch); spin_unlock_irqrestore(&ch->ch_lock, lock_flags); } /* * This is a special flag. It indicates that at least 1 * RX error (parity, framing, or break) has happened. * Mark this in our struct, which will tell me that I have *to do the special RX+LSR read for this FIFO load. */ if (linestatus & UART_17158_RX_FIFO_DATA_ERROR) jsm_printk(INTR, DEBUG, &ch->ch_bd->pci_dev, "%s:%d Port: %d Got an RX error, need to parse LSR\n", __FILE__, __LINE__, port); /* * The next 3 tests should *NOT* happen, as the above test * should encapsulate all 3... At least, thats what Exar says. */ if (linestatus & UART_LSR_PE) { ch->ch_err_parity++; jsm_printk(INTR, DEBUG, &ch->ch_bd->pci_dev, "%s:%d Port: %d. PAR ERR!\n", __FILE__, __LINE__, port); } if (linestatus & UART_LSR_FE) { ch->ch_err_frame++; jsm_printk(INTR, DEBUG, &ch->ch_bd->pci_dev, "%s:%d Port: %d. FRM ERR!\n", __FILE__, __LINE__, port); } if (linestatus & UART_LSR_BI) { ch->ch_err_break++; jsm_printk(INTR, DEBUG, &ch->ch_bd->pci_dev, "%s:%d Port: %d. BRK INTR!\n", __FILE__, __LINE__, port); } if (linestatus & UART_LSR_OE) { /* * Rx Oruns. Exar says that an orun will NOT corrupt * the FIFO. It will just replace the holding register * with this new data byte. So basically just ignore this. * Probably we should eventually have an orun stat in our driver... */ ch->ch_err_overrun++; jsm_printk(INTR, DEBUG, &ch->ch_bd->pci_dev, "%s:%d Port: %d. Rx Overrun!\n", __FILE__, __LINE__, port); } if (linestatus & UART_LSR_THRE) { spin_lock_irqsave(&ch->ch_lock, lock_flags); ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); spin_unlock_irqrestore(&ch->ch_lock, lock_flags); /* Transfer data (if any) from Write Queue -> UART. */ neo_copy_data_from_queue_to_uart(ch); } else if (linestatus & UART_17158_TX_AND_FIFO_CLR) { spin_lock_irqsave(&ch->ch_lock, lock_flags); ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); spin_unlock_irqrestore(&ch->ch_lock, lock_flags); /* Transfer data (if any) from Write Queue -> UART. */ neo_copy_data_from_queue_to_uart(ch); } } /* * neo_param() * Send any/all changes to the line to the UART. */ static void neo_param(struct jsm_channel *ch) { u8 lcr = 0; u8 uart_lcr, ier; u32 baud; int quot; struct jsm_board *bd; bd = ch->ch_bd; if (!bd) return; /* * If baud rate is zero, flush queues, and set mval to drop DTR. */ if ((ch->ch_c_cflag & (CBAUD)) == 0) { ch->ch_r_head = ch->ch_r_tail = 0; ch->ch_e_head = ch->ch_e_tail = 0; neo_flush_uart_write(ch); neo_flush_uart_read(ch); ch->ch_flags |= (CH_BAUD0); ch->ch_mostat &= ~(UART_MCR_RTS | UART_MCR_DTR); neo_assert_modem_signals(ch); return; } else { int i; unsigned int cflag; static struct { unsigned int rate; unsigned int cflag; } baud_rates[] = { { 921600, B921600 }, { 460800, B460800 }, { 230400, B230400 }, { 115200, B115200 }, { 57600, B57600 }, { 38400, B38400 }, { 19200, B19200 }, { 9600, B9600 }, { 4800, B4800 }, { 2400, B2400 }, { 1200, B1200 }, { 600, B600 }, { 300, B300 }, { 200, B200 }, { 150, B150 }, { 134, B134 }, { 110, B110 }, { 75, B75 }, { 50, B50 }, }; cflag = C_BAUD(ch->uart_port.state->port.tty); baud = 9600; for (i = 0; i < ARRAY_SIZE(baud_rates); i++) { if (baud_rates[i].cflag == cflag) { baud = baud_rates[i].rate; break; } } if (ch->ch_flags & CH_BAUD0) ch->ch_flags &= ~(CH_BAUD0); } if (ch->ch_c_cflag & PARENB) lcr |= UART_LCR_PARITY; if (!(ch->ch_c_cflag & PARODD)) lcr |= UART_LCR_EPAR; /* * Not all platforms support mark/space parity, * so this will hide behind an ifdef. */ #ifdef CMSPAR if (ch->ch_c_cflag & CMSPAR) lcr |= UART_LCR_SPAR; #endif if (ch->ch_c_cflag & CSTOPB) lcr |= UART_LCR_STOP; switch (ch->ch_c_cflag & CSIZE) { case CS5: lcr |= UART_LCR_WLEN5; break; case CS6: lcr |= UART_LCR_WLEN6; break; case CS7: lcr |= UART_LCR_WLEN7; break; case CS8: default: lcr |= UART_LCR_WLEN8; break; } ier = readb(&ch->ch_neo_uart->ier); uart_lcr = readb(&ch->ch_neo_uart->lcr); if (baud == 0) baud = 9600; quot = ch->ch_bd->bd_dividend / baud; if (quot != 0) { writeb(UART_LCR_DLAB, &ch->ch_neo_uart->lcr); writeb((quot & 0xff), &ch->ch_neo_uart->txrx); writeb((quot >> 8), &ch->ch_neo_uart->ier); writeb(lcr, &ch->ch_neo_uart->lcr); } if (uart_lcr != lcr) writeb(lcr, &ch->ch_neo_uart->lcr); if (ch->ch_c_cflag & CREAD) ier |= (UART_IER_RDI | UART_IER_RLSI); ier |= (UART_IER_THRI | UART_IER_MSI); writeb(ier, &ch->ch_neo_uart->ier); /* Set new start/stop chars */ neo_set_new_start_stop_chars(ch); if (ch->ch_c_cflag & CRTSCTS) neo_set_cts_flow_control(ch); else if (ch->ch_c_iflag & IXON) { /* If start/stop is set to disable, then we should disable flow control */ if ((ch->ch_startc == __DISABLED_CHAR) || (ch->ch_stopc == __DISABLED_CHAR)) neo_set_no_output_flow_control(ch); else neo_set_ixon_flow_control(ch); } else neo_set_no_output_flow_control(ch); if (ch->ch_c_cflag & CRTSCTS) neo_set_rts_flow_control(ch); else if (ch->ch_c_iflag & IXOFF) { /* If start/stop is set to disable, then we should disable flow control */ if ((ch->ch_startc == __DISABLED_CHAR) || (ch->ch_stopc == __DISABLED_CHAR)) neo_set_no_input_flow_control(ch); else neo_set_ixoff_flow_control(ch); } else neo_set_no_input_flow_control(ch); /* * Adjust the RX FIFO Trigger level if baud is less than 9600. * Not exactly elegant, but this is needed because of the Exar chip's * delay on firing off the RX FIFO interrupt on slower baud rates. */ if (baud < 9600) { writeb(1, &ch->ch_neo_uart->rfifo); ch->ch_r_tlevel = 1; } neo_assert_modem_signals(ch); /* Get current status of the modem signals now */ neo_parse_modem(ch, readb(&ch->ch_neo_uart->msr)); return; } /* * jsm_neo_intr() * * Neo specific interrupt handler. */ static irqreturn_t neo_intr(int irq, void *voidbrd) { struct jsm_board *brd = voidbrd; struct jsm_channel *ch; int port = 0; int type = 0; int current_port; u32 tmp; u32 uart_poll; unsigned long lock_flags; unsigned long lock_flags2; int outofloop_count = 0; /* Lock out the slow poller from running on this board. */ spin_lock_irqsave(&brd->bd_intr_lock, lock_flags); /* * Read in "extended" IRQ information from the 32bit Neo register. * Bits 0-7: What port triggered the interrupt. * Bits 8-31: Each 3bits indicate what type of interrupt occurred. */ uart_poll = readl(brd->re_map_membase + UART_17158_POLL_ADDR_OFFSET); jsm_printk(INTR, INFO, &brd->pci_dev, "%s:%d uart_poll: %x\n", __FILE__, __LINE__, uart_poll); if (!uart_poll) { jsm_printk(INTR, INFO, &brd->pci_dev, "Kernel interrupted to me, but no pending interrupts...\n"); spin_unlock_irqrestore(&brd->bd_intr_lock, lock_flags); return IRQ_NONE; } /* At this point, we have at least SOMETHING to service, dig further... */ current_port = 0; /* Loop on each port */ while (((uart_poll & 0xff) != 0) && (outofloop_count < 0xff)){ tmp = uart_poll; outofloop_count++; /* Check current port to see if it has interrupt pending */ if ((tmp & jsm_offset_table[current_port]) != 0) { port = current_port; type = tmp >> (8 + (port * 3)); type &= 0x7; } else { current_port++; continue; } jsm_printk(INTR, INFO, &brd->pci_dev, "%s:%d port: %x type: %x\n", __FILE__, __LINE__, port, type); /* Remove this port + type from uart_poll */ uart_poll &= ~(jsm_offset_table[port]); if (!type) { /* If no type, just ignore it, and move onto next port */ jsm_printk(INTR, ERR, &brd->pci_dev, "Interrupt with no type! port: %d\n", port); continue; } /* Switch on type of interrupt we have */ switch (type) { case UART_17158_RXRDY_TIMEOUT: /* * RXRDY Time-out is cleared by reading data in the * RX FIFO until it falls below the trigger level. */ /* Verify the port is in range. */ if (port > brd->nasync) continue; ch = brd->channels[port]; neo_copy_data_from_uart_to_queue(ch); /* Call our tty layer to enforce queue flow control if needed. */ spin_lock_irqsave(&ch->ch_lock, lock_flags2); jsm_check_queue_flow_control(ch); spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); continue; case UART_17158_RX_LINE_STATUS: /* * RXRDY and RX LINE Status (logic OR of LSR[4:1]) */ neo_parse_lsr(brd, port); continue; case UART_17158_TXRDY: /* * TXRDY interrupt clears after reading ISR register for the UART channel. */ /* * Yes, this is odd... * Why would I check EVERY possibility of type of * interrupt, when we know its TXRDY??? * Becuz for some reason, even tho we got triggered for TXRDY, * it seems to be occasionally wrong. Instead of TX, which * it should be, I was getting things like RXDY too. Weird. */ neo_parse_isr(brd, port); continue; case UART_17158_MSR: /* * MSR or flow control was seen. */ neo_parse_isr(brd, port); continue; default: /* * The UART triggered us with a bogus interrupt type. * It appears the Exar chip, when REALLY bogged down, will throw * these once and awhile. * Its harmless, just ignore it and move on. */ jsm_printk(INTR, ERR, &brd->pci_dev, "%s:%d Unknown Interrupt type: %x\n", __FILE__, __LINE__, type); continue; } } spin_unlock_irqrestore(&brd->bd_intr_lock, lock_flags); jsm_printk(INTR, INFO, &brd->pci_dev, "finish.\n"); return IRQ_HANDLED; } /* * Neo specific way of turning off the receiver. * Used as a way to enforce queue flow control when in * hardware flow control mode. */ static void neo_disable_receiver(struct jsm_channel *ch) { u8 tmp = readb(&ch->ch_neo_uart->ier); tmp &= ~(UART_IER_RDI); writeb(tmp, &ch->ch_neo_uart->ier); /* flush write operation */ neo_pci_posting_flush(ch->ch_bd); } /* * Neo specific way of turning on the receiver. * Used as a way to un-enforce queue flow control when in * hardware flow control mode. */ static void neo_enable_receiver(struct jsm_channel *ch) { u8 tmp = readb(&ch->ch_neo_uart->ier); tmp |= (UART_IER_RDI); writeb(tmp, &ch->ch_neo_uart->ier); /* flush write operation */ neo_pci_posting_flush(ch->ch_bd); } static void neo_send_start_character(struct jsm_channel *ch) { if (!ch) return; if (ch->ch_startc != __DISABLED_CHAR) { ch->ch_xon_sends++; writeb(ch->ch_startc, &ch->ch_neo_uart->txrx); /* flush write operation */ neo_pci_posting_flush(ch->ch_bd); } } static void neo_send_stop_character(struct jsm_channel *ch) { if (!ch) return; if (ch->ch_stopc != __DISABLED_CHAR) { ch->ch_xoff_sends++; writeb(ch->ch_stopc, &ch->ch_neo_uart->txrx); /* flush write operation */ neo_pci_posting_flush(ch->ch_bd); } } /* * neo_uart_init */ static void neo_uart_init(struct jsm_channel *ch) { writeb(0, &ch->ch_neo_uart->ier); writeb(0, &ch->ch_neo_uart->efr); writeb(UART_EFR_ECB, &ch->ch_neo_uart->efr); /* Clear out UART and FIFO */ readb(&ch->ch_neo_uart->txrx); writeb((UART_FCR_ENABLE_FIFO|UART_FCR_CLEAR_RCVR|UART_FCR_CLEAR_XMIT), &ch->ch_neo_uart->isr_fcr); readb(&ch->ch_neo_uart->lsr); readb(&ch->ch_neo_uart->msr); ch->ch_flags |= CH_FIFO_ENABLED; /* Assert any signals we want up */ writeb(ch->ch_mostat, &ch->ch_neo_uart->mcr); } /* * Make the UART completely turn off. */ static void neo_uart_off(struct jsm_channel *ch) { /* Turn off UART enhanced bits */ writeb(0, &ch->ch_neo_uart->efr); /* Stop all interrupts from occurring. */ writeb(0, &ch->ch_neo_uart->ier); } static u32 neo_get_uart_bytes_left(struct jsm_channel *ch) { u8 left = 0; u8 lsr = readb(&ch->ch_neo_uart->lsr); /* We must cache the LSR as some of the bits get reset once read... */ ch->ch_cached_lsr |= lsr; /* Determine whether the Transmitter is empty or not */ if (!(lsr & UART_LSR_TEMT)) left = 1; else { ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); left = 0; } return left; } /* Channel lock MUST be held by the calling function! */ static void neo_send_break(struct jsm_channel *ch) { /* * Set the time we should stop sending the break. * If we are already sending a break, toss away the existing * time to stop, and use this new value instead. */ /* Tell the UART to start sending the break */ if (!(ch->ch_flags & CH_BREAK_SENDING)) { u8 temp = readb(&ch->ch_neo_uart->lcr); writeb((temp | UART_LCR_SBC), &ch->ch_neo_uart->lcr); ch->ch_flags |= (CH_BREAK_SENDING); /* flush write operation */ neo_pci_posting_flush(ch->ch_bd); } } /* * neo_send_immediate_char. * * Sends a specific character as soon as possible to the UART, * jumping over any bytes that might be in the write queue. * * The channel lock MUST be held by the calling function. */ static void neo_send_immediate_char(struct jsm_channel *ch, unsigned char c) { if (!ch) return; writeb(c, &ch->ch_neo_uart->txrx); /* flush write operation */ neo_pci_posting_flush(ch->ch_bd); } struct board_ops jsm_neo_ops = { .intr = neo_intr, .uart_init = neo_uart_init, .uart_off = neo_uart_off, .param = neo_param, .assert_modem_signals = neo_assert_modem_signals, .flush_uart_write = neo_flush_uart_write, .flush_uart_read = neo_flush_uart_read, .disable_receiver = neo_disable_receiver, .enable_receiver = neo_enable_receiver, .send_break = neo_send_break, .clear_break = neo_clear_break, .send_start_character = neo_send_start_character, .send_stop_character = neo_send_stop_character, .copy_data_from_queue_to_uart = neo_copy_data_from_queue_to_uart, .get_uart_bytes_left = neo_get_uart_bytes_left, .send_immediate_char = neo_send_immediate_char };
gpl-2.0
Schischu/android_kernel_samsung_lt03lte
tools/usb/ffs-test.c
2719
12215
/* * ffs-test.c.c -- user mode filesystem api for usb composite function * * Copyright (C) 2010 Samsung Electronics * Author: Michal Nazarewicz <mina86@mina86.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* $(CROSS_COMPILE)cc -Wall -Wextra -g -o ffs-test ffs-test.c -lpthread */ #define _BSD_SOURCE /* for endian.h */ #include <endian.h> #include <errno.h> #include <fcntl.h> #include <pthread.h> #include <stdarg.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/ioctl.h> #include <sys/stat.h> #include <sys/types.h> #include <unistd.h> #include <tools/le_byteshift.h> #include "../../include/linux/usb/functionfs.h" /******************** Little Endian Handling ********************************/ #define cpu_to_le16(x) htole16(x) #define cpu_to_le32(x) htole32(x) #define le32_to_cpu(x) le32toh(x) #define le16_to_cpu(x) le16toh(x) /******************** Messages and Errors ***********************************/ static const char argv0[] = "ffs-test"; static unsigned verbosity = 7; static void _msg(unsigned level, const char *fmt, ...) { if (level < 2) level = 2; else if (level > 7) level = 7; if (level <= verbosity) { static const char levels[8][6] = { [2] = "crit:", [3] = "err: ", [4] = "warn:", [5] = "note:", [6] = "info:", [7] = "dbg: " }; int _errno = errno; va_list ap; fprintf(stderr, "%s: %s ", argv0, levels[level]); va_start(ap, fmt); vfprintf(stderr, fmt, ap); va_end(ap); if (fmt[strlen(fmt) - 1] != '\n') { char buffer[128]; strerror_r(_errno, buffer, sizeof buffer); fprintf(stderr, ": (-%d) %s\n", _errno, buffer); } fflush(stderr); } } #define die(...) (_msg(2, __VA_ARGS__), exit(1)) #define err(...) _msg(3, __VA_ARGS__) #define warn(...) _msg(4, __VA_ARGS__) #define note(...) _msg(5, __VA_ARGS__) #define info(...) _msg(6, __VA_ARGS__) #define debug(...) _msg(7, __VA_ARGS__) #define die_on(cond, ...) do { \ if (cond) \ die(__VA_ARGS__); \ } while (0) /******************** Descriptors and Strings *******************************/ static const struct { struct usb_functionfs_descs_head header; struct { struct usb_interface_descriptor intf; struct usb_endpoint_descriptor_no_audio sink; struct usb_endpoint_descriptor_no_audio source; } __attribute__((packed)) fs_descs, hs_descs; } __attribute__((packed)) descriptors = { .header = { .magic = cpu_to_le32(FUNCTIONFS_DESCRIPTORS_MAGIC), .length = cpu_to_le32(sizeof descriptors), .fs_count = 3, .hs_count = 3, }, .fs_descs = { .intf = { .bLength = sizeof descriptors.fs_descs.intf, .bDescriptorType = USB_DT_INTERFACE, .bNumEndpoints = 2, .bInterfaceClass = USB_CLASS_VENDOR_SPEC, .iInterface = 1, }, .sink = { .bLength = sizeof descriptors.fs_descs.sink, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = 1 | USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_BULK, /* .wMaxPacketSize = autoconfiguration (kernel) */ }, .source = { .bLength = sizeof descriptors.fs_descs.source, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = 2 | USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_BULK, /* .wMaxPacketSize = autoconfiguration (kernel) */ }, }, .hs_descs = { .intf = { .bLength = sizeof descriptors.fs_descs.intf, .bDescriptorType = USB_DT_INTERFACE, .bNumEndpoints = 2, .bInterfaceClass = USB_CLASS_VENDOR_SPEC, .iInterface = 1, }, .sink = { .bLength = sizeof descriptors.hs_descs.sink, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = 1 | USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = cpu_to_le16(512), }, .source = { .bLength = sizeof descriptors.hs_descs.source, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = 2 | USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = cpu_to_le16(512), .bInterval = 1, /* NAK every 1 uframe */ }, }, }; #define STR_INTERFACE_ "Source/Sink" static const struct { struct usb_functionfs_strings_head header; struct { __le16 code; const char str1[sizeof STR_INTERFACE_]; } __attribute__((packed)) lang0; } __attribute__((packed)) strings = { .header = { .magic = cpu_to_le32(FUNCTIONFS_STRINGS_MAGIC), .length = cpu_to_le32(sizeof strings), .str_count = cpu_to_le32(1), .lang_count = cpu_to_le32(1), }, .lang0 = { cpu_to_le16(0x0409), /* en-us */ STR_INTERFACE_, }, }; #define STR_INTERFACE strings.lang0.str1 /******************** Files and Threads Handling ****************************/ struct thread; static ssize_t read_wrap(struct thread *t, void *buf, size_t nbytes); static ssize_t write_wrap(struct thread *t, const void *buf, size_t nbytes); static ssize_t ep0_consume(struct thread *t, const void *buf, size_t nbytes); static ssize_t fill_in_buf(struct thread *t, void *buf, size_t nbytes); static ssize_t empty_out_buf(struct thread *t, const void *buf, size_t nbytes); static struct thread { const char *const filename; size_t buf_size; ssize_t (*in)(struct thread *, void *, size_t); const char *const in_name; ssize_t (*out)(struct thread *, const void *, size_t); const char *const out_name; int fd; pthread_t id; void *buf; ssize_t status; } threads[] = { { "ep0", 4 * sizeof(struct usb_functionfs_event), read_wrap, NULL, ep0_consume, "<consume>", 0, 0, NULL, 0 }, { "ep1", 8 * 1024, fill_in_buf, "<in>", write_wrap, NULL, 0, 0, NULL, 0 }, { "ep2", 8 * 1024, read_wrap, NULL, empty_out_buf, "<out>", 0, 0, NULL, 0 }, }; static void init_thread(struct thread *t) { t->buf = malloc(t->buf_size); die_on(!t->buf, "malloc"); t->fd = open(t->filename, O_RDWR); die_on(t->fd < 0, "%s", t->filename); } static void cleanup_thread(void *arg) { struct thread *t = arg; int ret, fd; fd = t->fd; if (t->fd < 0) return; t->fd = -1; /* test the FIFO ioctls (non-ep0 code paths) */ if (t != threads) { ret = ioctl(fd, FUNCTIONFS_FIFO_STATUS); if (ret < 0) { /* ENODEV reported after disconnect */ if (errno != ENODEV) err("%s: get fifo status", t->filename); } else if (ret) { warn("%s: unclaimed = %d\n", t->filename, ret); if (ioctl(fd, FUNCTIONFS_FIFO_FLUSH) < 0) err("%s: fifo flush", t->filename); } } if (close(fd) < 0) err("%s: close", t->filename); free(t->buf); t->buf = NULL; } static void *start_thread_helper(void *arg) { const char *name, *op, *in_name, *out_name; struct thread *t = arg; ssize_t ret; info("%s: starts\n", t->filename); in_name = t->in_name ? t->in_name : t->filename; out_name = t->out_name ? t->out_name : t->filename; pthread_cleanup_push(cleanup_thread, arg); for (;;) { pthread_testcancel(); ret = t->in(t, t->buf, t->buf_size); if (ret > 0) { ret = t->out(t, t->buf, t->buf_size); name = out_name; op = "write"; } else { name = in_name; op = "read"; } if (ret > 0) { /* nop */ } else if (!ret) { debug("%s: %s: EOF", name, op); break; } else if (errno == EINTR || errno == EAGAIN) { debug("%s: %s", name, op); } else { warn("%s: %s", name, op); break; } } pthread_cleanup_pop(1); t->status = ret; info("%s: ends\n", t->filename); return NULL; } static void start_thread(struct thread *t) { debug("%s: starting\n", t->filename); die_on(pthread_create(&t->id, NULL, start_thread_helper, t) < 0, "pthread_create(%s)", t->filename); } static void join_thread(struct thread *t) { int ret = pthread_join(t->id, NULL); if (ret < 0) err("%s: joining thread", t->filename); else debug("%s: joined\n", t->filename); } static ssize_t read_wrap(struct thread *t, void *buf, size_t nbytes) { return read(t->fd, buf, nbytes); } static ssize_t write_wrap(struct thread *t, const void *buf, size_t nbytes) { return write(t->fd, buf, nbytes); } /******************** Empty/Fill buffer routines ****************************/ /* 0 -- stream of zeros, 1 -- i % 63, 2 -- pipe */ enum pattern { PAT_ZERO, PAT_SEQ, PAT_PIPE }; static enum pattern pattern; static ssize_t fill_in_buf(struct thread *ignore, void *buf, size_t nbytes) { size_t i; __u8 *p; (void)ignore; switch (pattern) { case PAT_ZERO: memset(buf, 0, nbytes); break; case PAT_SEQ: for (p = buf, i = 0; i < nbytes; ++i, ++p) *p = i % 63; break; case PAT_PIPE: return fread(buf, 1, nbytes, stdin); } return nbytes; } static ssize_t empty_out_buf(struct thread *ignore, const void *buf, size_t nbytes) { const __u8 *p; __u8 expected; ssize_t ret; size_t len; (void)ignore; switch (pattern) { case PAT_ZERO: expected = 0; for (p = buf, len = 0; len < nbytes; ++p, ++len) if (*p) goto invalid; break; case PAT_SEQ: for (p = buf, len = 0; len < nbytes; ++p, ++len) if (*p != len % 63) { expected = len % 63; goto invalid; } break; case PAT_PIPE: ret = fwrite(buf, nbytes, 1, stdout); if (ret > 0) fflush(stdout); break; invalid: err("bad OUT byte %zd, expected %02x got %02x\n", len, expected, *p); for (p = buf, len = 0; len < nbytes; ++p, ++len) { if (0 == (len % 32)) fprintf(stderr, "%4zd:", len); fprintf(stderr, " %02x", *p); if (31 == (len % 32)) fprintf(stderr, "\n"); } fflush(stderr); errno = EILSEQ; return -1; } return len; } /******************** Endpoints routines ************************************/ static void handle_setup(const struct usb_ctrlrequest *setup) { printf("bRequestType = %d\n", setup->bRequestType); printf("bRequest = %d\n", setup->bRequest); printf("wValue = %d\n", le16_to_cpu(setup->wValue)); printf("wIndex = %d\n", le16_to_cpu(setup->wIndex)); printf("wLength = %d\n", le16_to_cpu(setup->wLength)); } static ssize_t ep0_consume(struct thread *ignore, const void *buf, size_t nbytes) { static const char *const names[] = { [FUNCTIONFS_BIND] = "BIND", [FUNCTIONFS_UNBIND] = "UNBIND", [FUNCTIONFS_ENABLE] = "ENABLE", [FUNCTIONFS_DISABLE] = "DISABLE", [FUNCTIONFS_SETUP] = "SETUP", [FUNCTIONFS_SUSPEND] = "SUSPEND", [FUNCTIONFS_RESUME] = "RESUME", }; const struct usb_functionfs_event *event = buf; size_t n; (void)ignore; for (n = nbytes / sizeof *event; n; --n, ++event) switch (event->type) { case FUNCTIONFS_BIND: case FUNCTIONFS_UNBIND: case FUNCTIONFS_ENABLE: case FUNCTIONFS_DISABLE: case FUNCTIONFS_SETUP: case FUNCTIONFS_SUSPEND: case FUNCTIONFS_RESUME: printf("Event %s\n", names[event->type]); if (event->type == FUNCTIONFS_SETUP) handle_setup(&event->u.setup); break; default: printf("Event %03u (unknown)\n", event->type); } return nbytes; } static void ep0_init(struct thread *t) { ssize_t ret; info("%s: writing descriptors\n", t->filename); ret = write(t->fd, &descriptors, sizeof descriptors); die_on(ret < 0, "%s: write: descriptors", t->filename); info("%s: writing strings\n", t->filename); ret = write(t->fd, &strings, sizeof strings); die_on(ret < 0, "%s: write: strings", t->filename); } /******************** Main **************************************************/ int main(void) { unsigned i; /* XXX TODO: Argument parsing missing */ init_thread(threads); ep0_init(threads); for (i = 1; i < sizeof threads / sizeof *threads; ++i) init_thread(threads + i); for (i = 1; i < sizeof threads / sizeof *threads; ++i) start_thread(threads + i); start_thread_helper(threads); for (i = 1; i < sizeof threads / sizeof *threads; ++i) join_thread(threads + i); return 0; }
gpl-2.0
arasilinux/arasievm-kernel
arch/x86/kernel/tce_64.c
5023
4149
/* * This file manages the translation entries for the IBM Calgary IOMMU. * * Derived from arch/powerpc/platforms/pseries/iommu.c * * Copyright (C) IBM Corporation, 2006 * * Author: Jon Mason <jdmason@us.ibm.com> * Author: Muli Ben-Yehuda <muli@il.ibm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/types.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/bootmem.h> #include <asm/tce.h> #include <asm/calgary.h> #include <asm/proto.h> /* flush a tce at 'tceaddr' to main memory */ static inline void flush_tce(void* tceaddr) { /* a single tce can't cross a cache line */ if (cpu_has_clflush) clflush(tceaddr); else wbinvd(); } void tce_build(struct iommu_table *tbl, unsigned long index, unsigned int npages, unsigned long uaddr, int direction) { u64* tp; u64 t; u64 rpn; t = (1 << TCE_READ_SHIFT); if (direction != DMA_TO_DEVICE) t |= (1 << TCE_WRITE_SHIFT); tp = ((u64*)tbl->it_base) + index; while (npages--) { rpn = (virt_to_bus((void*)uaddr)) >> PAGE_SHIFT; t &= ~TCE_RPN_MASK; t |= (rpn << TCE_RPN_SHIFT); *tp = cpu_to_be64(t); flush_tce(tp); uaddr += PAGE_SIZE; tp++; } } void tce_free(struct iommu_table *tbl, long index, unsigned int npages) { u64* tp; tp = ((u64*)tbl->it_base) + index; while (npages--) { *tp = cpu_to_be64(0); flush_tce(tp); tp++; } } static inline unsigned int table_size_to_number_of_entries(unsigned char size) { /* * size is the order of the table, 0-7 * smallest table is 8K entries, so shift result by 13 to * multiply by 8K */ return (1 << size) << 13; } static int tce_table_setparms(struct pci_dev *dev, struct iommu_table *tbl) { unsigned int bitmapsz; unsigned long bmppages; int ret; tbl->it_busno = dev->bus->number; /* set the tce table size - measured in entries */ tbl->it_size = table_size_to_number_of_entries(specified_table_size); /* * number of bytes needed for the bitmap size in number of * entries; we need one bit per entry */ bitmapsz = tbl->it_size / BITS_PER_BYTE; bmppages = __get_free_pages(GFP_KERNEL, get_order(bitmapsz)); if (!bmppages) { printk(KERN_ERR "Calgary: cannot allocate bitmap\n"); ret = -ENOMEM; goto done; } tbl->it_map = (unsigned long*)bmppages; memset(tbl->it_map, 0, bitmapsz); tbl->it_hint = 0; spin_lock_init(&tbl->it_lock); return 0; done: return ret; } int __init build_tce_table(struct pci_dev *dev, void __iomem *bbar) { struct iommu_table *tbl; int ret; if (pci_iommu(dev->bus)) { printk(KERN_ERR "Calgary: dev %p has sysdata->iommu %p\n", dev, pci_iommu(dev->bus)); BUG(); } tbl = kzalloc(sizeof(struct iommu_table), GFP_KERNEL); if (!tbl) { printk(KERN_ERR "Calgary: error allocating iommu_table\n"); ret = -ENOMEM; goto done; } ret = tce_table_setparms(dev, tbl); if (ret) goto free_tbl; tbl->bbar = bbar; set_pci_iommu(dev->bus, tbl); return 0; free_tbl: kfree(tbl); done: return ret; } void * __init alloc_tce_table(void) { unsigned int size; size = table_size_to_number_of_entries(specified_table_size); size *= TCE_ENTRY_SIZE; return __alloc_bootmem_low(size, size, 0); } void __init free_tce_table(void *tbl) { unsigned int size; if (!tbl) return; size = table_size_to_number_of_entries(specified_table_size); size *= TCE_ENTRY_SIZE; free_bootmem(__pa(tbl), size); }
gpl-2.0
CyanogenMod/android_kernel_htc_msm8974
drivers/net/ethernet/cisco/enic/vnic_wq.c
7327
4826
/* * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. * * This program is free software; you may redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/slab.h> #include "vnic_dev.h" #include "vnic_wq.h" static int vnic_wq_alloc_bufs(struct vnic_wq *wq) { struct vnic_wq_buf *buf; struct vnic_dev *vdev; unsigned int i, j, count = wq->ring.desc_count; unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count); vdev = wq->vdev; for (i = 0; i < blks; i++) { wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ(count), GFP_ATOMIC); if (!wq->bufs[i]) return -ENOMEM; } for (i = 0; i < blks; i++) { buf = wq->bufs[i]; for (j = 0; j < VNIC_WQ_BUF_BLK_ENTRIES(count); j++) { buf->index = i * VNIC_WQ_BUF_BLK_ENTRIES(count) + j; buf->desc = (u8 *)wq->ring.descs + wq->ring.desc_size * buf->index; if (buf->index + 1 == count) { buf->next = wq->bufs[0]; break; } else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES(count)) { buf->next = wq->bufs[i + 1]; } else { buf->next = buf + 1; buf++; } } } wq->to_use = wq->to_clean = wq->bufs[0]; return 0; } void vnic_wq_free(struct vnic_wq *wq) { struct vnic_dev *vdev; unsigned int i; vdev = wq->vdev; vnic_dev_free_desc_ring(vdev, &wq->ring); for (i = 0; i < VNIC_WQ_BUF_BLKS_MAX; i++) { if (wq->bufs[i]) { kfree(wq->bufs[i]); wq->bufs[i] = NULL; } } wq->ctrl = NULL; } int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index, unsigned int desc_count, unsigned int desc_size) { int err; wq->index = index; wq->vdev = vdev; wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index); if (!wq->ctrl) { pr_err("Failed to hook WQ[%d] resource\n", index); return -EINVAL; } vnic_wq_disable(wq); err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size); if (err) return err; err = vnic_wq_alloc_bufs(wq); if (err) { vnic_wq_free(wq); return err; } return 0; } static void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index, unsigned int fetch_index, unsigned int posted_index, unsigned int error_interrupt_enable, unsigned int error_interrupt_offset) { u64 paddr; unsigned int count = wq->ring.desc_count; paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET; writeq(paddr, &wq->ctrl->ring_base); iowrite32(count, &wq->ctrl->ring_size); iowrite32(fetch_index, &wq->ctrl->fetch_index); iowrite32(posted_index, &wq->ctrl->posted_index); iowrite32(cq_index, &wq->ctrl->cq_index); iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable); iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset); iowrite32(0, &wq->ctrl->error_status); wq->to_use = wq->to_clean = &wq->bufs[fetch_index / VNIC_WQ_BUF_BLK_ENTRIES(count)] [fetch_index % VNIC_WQ_BUF_BLK_ENTRIES(count)]; } void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, unsigned int error_interrupt_enable, unsigned int error_interrupt_offset) { vnic_wq_init_start(wq, cq_index, 0, 0, error_interrupt_enable, error_interrupt_offset); } unsigned int vnic_wq_error_status(struct vnic_wq *wq) { return ioread32(&wq->ctrl->error_status); } void vnic_wq_enable(struct vnic_wq *wq) { iowrite32(1, &wq->ctrl->enable); } int vnic_wq_disable(struct vnic_wq *wq) { unsigned int wait; iowrite32(0, &wq->ctrl->enable); /* Wait for HW to ACK disable request */ for (wait = 0; wait < 1000; wait++) { if (!(ioread32(&wq->ctrl->running))) return 0; udelay(10); } pr_err("Failed to disable WQ[%d]\n", wq->index); return -ETIMEDOUT; } void vnic_wq_clean(struct vnic_wq *wq, void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf)) { struct vnic_wq_buf *buf; buf = wq->to_clean; while (vnic_wq_desc_used(wq) > 0) { (*buf_clean)(wq, buf); buf = wq->to_clean = buf->next; wq->ring.desc_avail++; } wq->to_use = wq->to_clean = wq->bufs[0]; iowrite32(0, &wq->ctrl->fetch_index); iowrite32(0, &wq->ctrl->posted_index); iowrite32(0, &wq->ctrl->error_status); vnic_dev_clear_desc_ring(&wq->ring); }
gpl-2.0
brymaster5000/Lunar_Max
arch/unicore32/kernel/setup.c
7327
7677
/* * linux/arch/unicore32/kernel/setup.c * * Code specific to PKUnity SoC and UniCore ISA * * Copyright (C) 2001-2010 GUAN Xue-tao * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/stddef.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/utsname.h> #include <linux/initrd.h> #include <linux/console.h> #include <linux/bootmem.h> #include <linux/seq_file.h> #include <linux/screen_info.h> #include <linux/init.h> #include <linux/root_dev.h> #include <linux/cpu.h> #include <linux/interrupt.h> #include <linux/smp.h> #include <linux/fs.h> #include <linux/proc_fs.h> #include <linux/memblock.h> #include <linux/elf.h> #include <linux/io.h> #include <asm/cputype.h> #include <asm/sections.h> #include <asm/setup.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> #include <asm/traps.h> #include <asm/memblock.h> #include "setup.h" #ifndef MEM_SIZE #define MEM_SIZE (16*1024*1024) #endif struct stack { u32 irq[3]; u32 abt[3]; u32 und[3]; } ____cacheline_aligned; static struct stack stacks[NR_CPUS]; char elf_platform[ELF_PLATFORM_SIZE]; EXPORT_SYMBOL(elf_platform); static char __initdata cmd_line[COMMAND_LINE_SIZE]; static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE; /* * Standard memory resources */ static struct resource mem_res[] = { { .name = "Kernel code", .start = 0, .end = 0, .flags = IORESOURCE_MEM }, { .name = "Kernel data", .start = 0, .end = 0, .flags = IORESOURCE_MEM } }; #define kernel_code mem_res[0] #define kernel_data mem_res[1] /* * These functions re-use the assembly code in head.S, which * already provide the required functionality. */ static void __init setup_processor(void) { printk(KERN_DEFAULT "CPU: UniCore-II [%08x] revision %d, cr=%08lx\n", uc32_cpuid, (int)(uc32_cpuid >> 16) & 15, cr_alignment); sprintf(init_utsname()->machine, "puv3"); sprintf(elf_platform, "ucv2"); } /* * cpu_init - initialise one CPU. * * cpu_init sets up the per-CPU stacks. */ void cpu_init(void) { unsigned int cpu = smp_processor_id(); struct stack *stk = &stacks[cpu]; /* * setup stacks for re-entrant exception handlers */ __asm__ ( "mov.a asr, %1\n\t" "add sp, %0, %2\n\t" "mov.a asr, %3\n\t" "add sp, %0, %4\n\t" "mov.a asr, %5\n\t" "add sp, %0, %6\n\t" "mov.a asr, %7" : : "r" (stk), "r" (PSR_R_BIT | PSR_I_BIT | INTR_MODE), "I" (offsetof(struct stack, irq[0])), "r" (PSR_R_BIT | PSR_I_BIT | ABRT_MODE), "I" (offsetof(struct stack, abt[0])), "r" (PSR_R_BIT | PSR_I_BIT | EXTN_MODE), "I" (offsetof(struct stack, und[0])), "r" (PSR_R_BIT | PSR_I_BIT | PRIV_MODE) : "r30", "cc"); } static int __init uc32_add_memory(unsigned long start, unsigned long size) { struct membank *bank = &meminfo.bank[meminfo.nr_banks]; if (meminfo.nr_banks >= NR_BANKS) { printk(KERN_CRIT "NR_BANKS too low, " "ignoring memory at %#lx\n", start); return -EINVAL; } /* * Ensure that start/size are aligned to a page boundary. * Size is appropriately rounded down, start is rounded up. */ size -= start & ~PAGE_MASK; bank->start = PAGE_ALIGN(start); bank->size = size & PAGE_MASK; /* * Check whether this memory region has non-zero size or * invalid node number. */ if (bank->size == 0) return -EINVAL; meminfo.nr_banks++; return 0; } /* * Pick out the memory size. We look for mem=size@start, * where start and size are "size[KkMm]" */ static int __init early_mem(char *p) { static int usermem __initdata = 1; unsigned long size, start; char *endp; /* * If the user specifies memory size, we * blow away any automatically generated * size. */ if (usermem) { usermem = 0; meminfo.nr_banks = 0; } start = PHYS_OFFSET; size = memparse(p, &endp); if (*endp == '@') start = memparse(endp + 1, NULL); uc32_add_memory(start, size); return 0; } early_param("mem", early_mem); static void __init request_standard_resources(struct meminfo *mi) { struct resource *res; int i; kernel_code.start = virt_to_phys(_stext); kernel_code.end = virt_to_phys(_etext - 1); kernel_data.start = virt_to_phys(_sdata); kernel_data.end = virt_to_phys(_end - 1); for (i = 0; i < mi->nr_banks; i++) { if (mi->bank[i].size == 0) continue; res = alloc_bootmem_low(sizeof(*res)); res->name = "System RAM"; res->start = mi->bank[i].start; res->end = mi->bank[i].start + mi->bank[i].size - 1; res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; request_resource(&iomem_resource, res); if (kernel_code.start >= res->start && kernel_code.end <= res->end) request_resource(res, &kernel_code); if (kernel_data.start >= res->start && kernel_data.end <= res->end) request_resource(res, &kernel_data); } } static void (*init_machine)(void) __initdata; static int __init customize_machine(void) { /* customizes platform devices, or adds new ones */ if (init_machine) init_machine(); return 0; } arch_initcall(customize_machine); void __init setup_arch(char **cmdline_p) { char *from = default_command_line; setup_processor(); init_mm.start_code = (unsigned long) _stext; init_mm.end_code = (unsigned long) _etext; init_mm.end_data = (unsigned long) _edata; init_mm.brk = (unsigned long) _end; /* parse_early_param needs a boot_command_line */ strlcpy(boot_command_line, from, COMMAND_LINE_SIZE); /* populate cmd_line too for later use, preserving boot_command_line */ strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE); *cmdline_p = cmd_line; parse_early_param(); uc32_memblock_init(&meminfo); paging_init(); request_standard_resources(&meminfo); cpu_init(); /* * Set up various architecture-specific pointers */ init_machine = puv3_core_init; #ifdef CONFIG_VT #if defined(CONFIG_VGA_CONSOLE) conswitchp = &vga_con; #elif defined(CONFIG_DUMMY_CONSOLE) conswitchp = &dummy_con; #endif #endif early_trap_init(); } static struct cpu cpuinfo_unicore; static int __init topology_init(void) { int i; for_each_possible_cpu(i) register_cpu(&cpuinfo_unicore, i); return 0; } subsys_initcall(topology_init); #ifdef CONFIG_HAVE_PROC_CPU static int __init proc_cpu_init(void) { struct proc_dir_entry *res; res = proc_mkdir("cpu", NULL); if (!res) return -ENOMEM; return 0; } fs_initcall(proc_cpu_init); #endif static int c_show(struct seq_file *m, void *v) { seq_printf(m, "Processor\t: UniCore-II rev %d (%s)\n", (int)(uc32_cpuid >> 16) & 15, elf_platform); seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", loops_per_jiffy / (500000/HZ), (loops_per_jiffy / (5000/HZ)) % 100); /* dump out the processor features */ seq_puts(m, "Features\t: CMOV UC-F64"); seq_printf(m, "\nCPU implementer\t: 0x%02x\n", uc32_cpuid >> 24); seq_printf(m, "CPU architecture: 2\n"); seq_printf(m, "CPU revision\t: %d\n", (uc32_cpuid >> 16) & 15); seq_printf(m, "Cache type\t: write-back\n" "Cache clean\t: cp0 c5 ops\n" "Cache lockdown\t: not support\n" "Cache format\t: Harvard\n"); seq_puts(m, "\n"); seq_printf(m, "Hardware\t: PKUnity v3\n"); return 0; } static void *c_start(struct seq_file *m, loff_t *pos) { return *pos < 1 ? (void *)1 : NULL; } static void *c_next(struct seq_file *m, void *v, loff_t *pos) { ++*pos; return NULL; } static void c_stop(struct seq_file *m, void *v) { } const struct seq_operations cpuinfo_op = { .start = c_start, .next = c_next, .stop = c_stop, .show = c_show };
gpl-2.0
ashwinr64/android_kernel_motorola_msm8974
drivers/staging/speakup/speakup_apollo.c
7583
6834
/* * originally written by: Kirk Reiser <kirk@braille.uwo.ca> * this version considerably modified by David Borowski, david575@rogers.com * * Copyright (C) 1998-99 Kirk Reiser. * Copyright (C) 2003 David Borowski. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * this code is specificly written as a driver for the speakup screenreview * package and is not a general device driver. */ #include <linux/jiffies.h> #include <linux/sched.h> #include <linux/timer.h> #include <linux/kthread.h> #include "spk_priv.h" #include "serialio.h" #include "speakup.h" #define DRV_VERSION "2.21" #define SYNTH_CLEAR 0x18 #define PROCSPEECH '\r' static void do_catch_up(struct spk_synth *synth); static struct var_t vars[] = { { CAPS_START, .u.s = {"cap, " } }, { CAPS_STOP, .u.s = {"" } }, { RATE, .u.n = {"@W%d", 6, 1, 9, 0, 0, NULL } }, { PITCH, .u.n = {"@F%x", 10, 0, 15, 0, 0, NULL } }, { VOL, .u.n = {"@A%x", 10, 0, 15, 0, 0, NULL } }, { VOICE, .u.n = {"@V%d", 1, 1, 6, 0, 0, NULL } }, { LANG, .u.n = {"@=%d,", 1, 1, 4, 0, 0, NULL } }, { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, V_LAST_VAR }; /* * These attributes will appear in /sys/accessibility/speakup/apollo. */ static struct kobj_attribute caps_start_attribute = __ATTR(caps_start, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute caps_stop_attribute = __ATTR(caps_stop, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute lang_attribute = __ATTR(lang, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute pitch_attribute = __ATTR(pitch, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute rate_attribute = __ATTR(rate, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute voice_attribute = __ATTR(voice, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute vol_attribute = __ATTR(vol, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute delay_time_attribute = __ATTR(delay_time, ROOT_W, spk_var_show, spk_var_store); static struct kobj_attribute direct_attribute = __ATTR(direct, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute full_time_attribute = __ATTR(full_time, ROOT_W, spk_var_show, spk_var_store); static struct kobj_attribute jiffy_delta_attribute = __ATTR(jiffy_delta, ROOT_W, spk_var_show, spk_var_store); static struct kobj_attribute trigger_time_attribute = __ATTR(trigger_time, ROOT_W, spk_var_show, spk_var_store); /* * Create a group of attributes so that we can create and destroy them all * at once. */ static struct attribute *synth_attrs[] = { &caps_start_attribute.attr, &caps_stop_attribute.attr, &lang_attribute.attr, &pitch_attribute.attr, &rate_attribute.attr, &voice_attribute.attr, &vol_attribute.attr, &delay_time_attribute.attr, &direct_attribute.attr, &full_time_attribute.attr, &jiffy_delta_attribute.attr, &trigger_time_attribute.attr, NULL, /* need to NULL terminate the list of attributes */ }; static struct spk_synth synth_apollo = { .name = "apollo", .version = DRV_VERSION, .long_name = "Apollo", .init = "@R3@D0@K1\r", .procspeech = PROCSPEECH, .clear = SYNTH_CLEAR, .delay = 500, .trigger = 50, .jiffies = 50, .full = 40000, .startup = SYNTH_START, .checkval = SYNTH_CHECK, .vars = vars, .probe = serial_synth_probe, .release = spk_serial_release, .synth_immediate = spk_synth_immediate, .catch_up = do_catch_up, .flush = spk_synth_flush, .is_alive = spk_synth_is_alive_restart, .synth_adjust = NULL, .read_buff_add = NULL, .get_index = NULL, .indexing = { .command = NULL, .lowindex = 0, .highindex = 0, .currindex = 0, }, .attributes = { .attrs = synth_attrs, .name = "apollo", }, }; static void do_catch_up(struct spk_synth *synth) { u_char ch; unsigned long flags; unsigned long jiff_max; struct var_t *jiffy_delta; struct var_t *delay_time; struct var_t *full_time; int full_time_val = 0; int delay_time_val = 0; int jiffy_delta_val = 0; jiffy_delta = get_var(JIFFY); delay_time = get_var(DELAY); full_time = get_var(FULL); spk_lock(flags); jiffy_delta_val = jiffy_delta->u.n.value; spk_unlock(flags); jiff_max = jiffies + jiffy_delta_val; while (!kthread_should_stop()) { spk_lock(flags); jiffy_delta_val = jiffy_delta->u.n.value; full_time_val = full_time->u.n.value; delay_time_val = delay_time->u.n.value; if (speakup_info.flushing) { speakup_info.flushing = 0; spk_unlock(flags); synth->flush(synth); continue; } if (synth_buffer_empty()) { spk_unlock(flags); break; } ch = synth_buffer_peek(); set_current_state(TASK_INTERRUPTIBLE); full_time_val = full_time->u.n.value; spk_unlock(flags); if (!spk_serial_out(ch)) { outb(UART_MCR_DTR, speakup_info.port_tts + UART_MCR); outb(UART_MCR_DTR | UART_MCR_RTS, speakup_info.port_tts + UART_MCR); schedule_timeout(msecs_to_jiffies(full_time_val)); continue; } if ((jiffies >= jiff_max) && (ch == SPACE)) { spk_lock(flags); jiffy_delta_val = jiffy_delta->u.n.value; full_time_val = full_time->u.n.value; delay_time_val = delay_time->u.n.value; spk_unlock(flags); if (spk_serial_out(synth->procspeech)) schedule_timeout(msecs_to_jiffies (delay_time_val)); else schedule_timeout(msecs_to_jiffies (full_time_val)); jiff_max = jiffies + jiffy_delta_val; } set_current_state(TASK_RUNNING); spk_lock(flags); synth_buffer_getc(); spk_unlock(flags); } spk_serial_out(PROCSPEECH); } module_param_named(ser, synth_apollo.ser, int, S_IRUGO); module_param_named(start, synth_apollo.startup, short, S_IRUGO); MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based)."); MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded."); static int __init apollo_init(void) { return synth_add(&synth_apollo); } static void __exit apollo_exit(void) { synth_remove(&synth_apollo); } module_init(apollo_init); module_exit(apollo_exit); MODULE_AUTHOR("Kirk Reiser <kirk@braille.uwo.ca>"); MODULE_AUTHOR("David Borowski"); MODULE_DESCRIPTION("Speakup support for Apollo II synthesizer"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION);
gpl-2.0
xiaogaogao/linuxFromDigilent
drivers/staging/speakup/speakup_dectlk.c
7583
8873
/* * originally written by: Kirk Reiser <kirk@braille.uwo.ca> * this version considerably modified by David Borowski, david575@rogers.com * * Copyright (C) 1998-99 Kirk Reiser. * Copyright (C) 2003 David Borowski. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * specificly written as a driver for the speakup screenreview * s not a general device driver. */ #include <linux/unistd.h> #include <linux/proc_fs.h> #include <linux/jiffies.h> #include <linux/spinlock.h> #include <linux/sched.h> #include <linux/timer.h> #include <linux/kthread.h> #include "speakup.h" #include "spk_priv.h" #include "serialio.h" #define DRV_VERSION "2.20" #define SYNTH_CLEAR 0x03 #define PROCSPEECH 0x0b static int xoff; static inline int synth_full(void) { return xoff; } static void do_catch_up(struct spk_synth *synth); static void synth_flush(struct spk_synth *synth); static void read_buff_add(u_char c); static unsigned char get_index(void); static int in_escape; static int is_flushing; static spinlock_t flush_lock; static DECLARE_WAIT_QUEUE_HEAD(flush); static struct var_t vars[] = { { CAPS_START, .u.s = {"[:dv ap 160] " } }, { CAPS_STOP, .u.s = {"[:dv ap 100 ] " } }, { RATE, .u.n = {"[:ra %d] ", 180, 75, 650, 0, 0, NULL } }, { PITCH, .u.n = {"[:dv ap %d] ", 122, 50, 350, 0, 0, NULL } }, { VOL, .u.n = {"[:dv g5 %d] ", 86, 60, 86, 0, 0, NULL } }, { PUNCT, .u.n = {"[:pu %c] ", 0, 0, 2, 0, 0, "nsa" } }, { VOICE, .u.n = {"[:n%c] ", 0, 0, 9, 0, 0, "phfdburwkv" } }, { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, V_LAST_VAR }; /* * These attributes will appear in /sys/accessibility/speakup/dectlk. */ static struct kobj_attribute caps_start_attribute = __ATTR(caps_start, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute caps_stop_attribute = __ATTR(caps_stop, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute pitch_attribute = __ATTR(pitch, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute punct_attribute = __ATTR(punct, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute rate_attribute = __ATTR(rate, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute voice_attribute = __ATTR(voice, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute vol_attribute = __ATTR(vol, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute delay_time_attribute = __ATTR(delay_time, ROOT_W, spk_var_show, spk_var_store); static struct kobj_attribute direct_attribute = __ATTR(direct, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute full_time_attribute = __ATTR(full_time, ROOT_W, spk_var_show, spk_var_store); static struct kobj_attribute jiffy_delta_attribute = __ATTR(jiffy_delta, ROOT_W, spk_var_show, spk_var_store); static struct kobj_attribute trigger_time_attribute = __ATTR(trigger_time, ROOT_W, spk_var_show, spk_var_store); /* * Create a group of attributes so that we can create and destroy them all * at once. */ static struct attribute *synth_attrs[] = { &caps_start_attribute.attr, &caps_stop_attribute.attr, &pitch_attribute.attr, &punct_attribute.attr, &rate_attribute.attr, &voice_attribute.attr, &vol_attribute.attr, &delay_time_attribute.attr, &direct_attribute.attr, &full_time_attribute.attr, &jiffy_delta_attribute.attr, &trigger_time_attribute.attr, NULL, /* need to NULL terminate the list of attributes */ }; static int ap_defaults[] = {122, 89, 155, 110, 208, 240, 200, 106, 306}; static int g5_defaults[] = {86, 81, 86, 84, 81, 80, 83, 83, 73}; static struct spk_synth synth_dectlk = { .name = "dectlk", .version = DRV_VERSION, .long_name = "Dectalk Express", .init = "[:error sp :name paul :rate 180 :tsr off] ", .procspeech = PROCSPEECH, .clear = SYNTH_CLEAR, .delay = 500, .trigger = 50, .jiffies = 50, .full = 40000, .startup = SYNTH_START, .checkval = SYNTH_CHECK, .vars = vars, .default_pitch = ap_defaults, .default_vol = g5_defaults, .probe = serial_synth_probe, .release = spk_serial_release, .synth_immediate = spk_synth_immediate, .catch_up = do_catch_up, .flush = synth_flush, .is_alive = spk_synth_is_alive_restart, .synth_adjust = NULL, .read_buff_add = read_buff_add, .get_index = get_index, .indexing = { .command = "[:in re %d ] ", .lowindex = 1, .highindex = 8, .currindex = 1, }, .attributes = { .attrs = synth_attrs, .name = "dectlk", }, }; static int is_indnum(u_char *ch) { if ((*ch >= '0') && (*ch <= '9')) { *ch = *ch - '0'; return 1; } return 0; } static u_char lastind; static unsigned char get_index(void) { u_char rv; rv = lastind; lastind = 0; return rv; } static void read_buff_add(u_char c) { static int ind = -1; if (c == 0x01) { unsigned long flags; spin_lock_irqsave(&flush_lock, flags); is_flushing = 0; wake_up_interruptible(&flush); spin_unlock_irqrestore(&flush_lock, flags); } else if (c == 0x13) { xoff = 1; } else if (c == 0x11) { xoff = 0; } else if (is_indnum(&c)) { if (ind == -1) ind = c; else ind = ind * 10 + c; } else if ((c > 31) && (c < 127)) { if (ind != -1) lastind = (u_char)ind; ind = -1; } } static void do_catch_up(struct spk_synth *synth) { int synth_full_val = 0; static u_char ch; static u_char last = '\0'; unsigned long flags; unsigned long jiff_max; unsigned long timeout = msecs_to_jiffies(4000); DEFINE_WAIT(wait); struct var_t *jiffy_delta; struct var_t *delay_time; int jiffy_delta_val; int delay_time_val; jiffy_delta = get_var(JIFFY); delay_time = get_var(DELAY); spk_lock(flags); jiffy_delta_val = jiffy_delta->u.n.value; spk_unlock(flags); jiff_max = jiffies + jiffy_delta_val; while (!kthread_should_stop()) { /* if no ctl-a in 4, send data anyway */ spin_lock_irqsave(&flush_lock, flags); while (is_flushing && timeout) { prepare_to_wait(&flush, &wait, TASK_INTERRUPTIBLE); spin_unlock_irqrestore(&flush_lock, flags); timeout = schedule_timeout(timeout); spin_lock_irqsave(&flush_lock, flags); } finish_wait(&flush, &wait); is_flushing = 0; spin_unlock_irqrestore(&flush_lock, flags); spk_lock(flags); if (speakup_info.flushing) { speakup_info.flushing = 0; spk_unlock(flags); synth->flush(synth); continue; } if (synth_buffer_empty()) { spk_unlock(flags); break; } ch = synth_buffer_peek(); set_current_state(TASK_INTERRUPTIBLE); delay_time_val = delay_time->u.n.value; synth_full_val = synth_full(); spk_unlock(flags); if (ch == '\n') ch = 0x0D; if (synth_full_val || !spk_serial_out(ch)) { schedule_timeout(msecs_to_jiffies(delay_time_val)); continue; } set_current_state(TASK_RUNNING); spk_lock(flags); synth_buffer_getc(); spk_unlock(flags); if (ch == '[') in_escape = 1; else if (ch == ']') in_escape = 0; else if (ch <= SPACE) { if (!in_escape && strchr(",.!?;:", last)) spk_serial_out(PROCSPEECH); if (jiffies >= jiff_max) { if (!in_escape) spk_serial_out(PROCSPEECH); spk_lock(flags); jiffy_delta_val = jiffy_delta->u.n.value; delay_time_val = delay_time->u.n.value; spk_unlock(flags); schedule_timeout(msecs_to_jiffies (delay_time_val)); jiff_max = jiffies + jiffy_delta_val; } } last = ch; } if (!in_escape) spk_serial_out(PROCSPEECH); } static void synth_flush(struct spk_synth *synth) { if (in_escape) { /* if in command output ']' so we don't get an error */ spk_serial_out(']'); } in_escape = 0; is_flushing = 1; spk_serial_out(SYNTH_CLEAR); } module_param_named(ser, synth_dectlk.ser, int, S_IRUGO); module_param_named(start, synth_dectlk.startup, short, S_IRUGO); MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based)."); MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded."); static int __init dectlk_init(void) { return synth_add(&synth_dectlk); } static void __exit dectlk_exit(void) { synth_remove(&synth_dectlk); } module_init(dectlk_init); module_exit(dectlk_exit); MODULE_AUTHOR("Kirk Reiser <kirk@braille.uwo.ca>"); MODULE_AUTHOR("David Borowski"); MODULE_DESCRIPTION("Speakup support for DECtalk Express synthesizers"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION);
gpl-2.0
Plain-Devices/android_kernel_lge_msm8974
arch/arm/plat-mxc/devices/platform-mxc_rnga.c
8095
1333
/* * Copyright (C) 2010 Pengutronix * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de> * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License version 2 as published by the * Free Software Foundation. */ #include <mach/hardware.h> #include <mach/devices-common.h> struct imx_mxc_rnga_data { resource_size_t iobase; }; #define imx_mxc_rnga_data_entry_single(soc) \ { \ .iobase = soc ## _RNGA_BASE_ADDR, \ } #ifdef CONFIG_SOC_IMX31 static const struct imx_mxc_rnga_data imx31_mxc_rnga_data __initconst = imx_mxc_rnga_data_entry_single(MX31); #endif /* ifdef CONFIG_SOC_IMX31 */ static struct platform_device *__init imx_add_mxc_rnga( const struct imx_mxc_rnga_data *data) { struct resource res[] = { { .start = data->iobase, .end = data->iobase + SZ_16K - 1, .flags = IORESOURCE_MEM, }, }; return imx_add_platform_device("mxc_rnga", -1, res, ARRAY_SIZE(res), NULL, 0); } static int __init imxXX_add_mxc_rnga(void) { struct platform_device *ret; #if defined(CONFIG_SOC_IMX31) if (cpu_is_mx31()) ret = imx_add_mxc_rnga(&imx31_mxc_rnga_data); else #endif /* if defined(CONFIG_SOC_IMX31) */ ret = ERR_PTR(-ENODEV); if (IS_ERR(ret)) return PTR_ERR(ret); return 0; } arch_initcall(imxXX_add_mxc_rnga);
gpl-2.0
tsfs/Vybrid-Linux
sound/usb/caiaq/control.c
8351
19099
/* * Copyright (c) 2007 Daniel Mack * friendly supported by NI. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/usb.h> #include <sound/control.h> #include <sound/core.h> #include <sound/pcm.h> #include "device.h" #include "control.h" #define CNT_INTVAL 0x10000 static int control_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct snd_usb_audio *chip = snd_kcontrol_chip(kcontrol); struct snd_usb_caiaqdev *dev = caiaqdev(chip->card); int pos = kcontrol->private_value; int is_intval = pos & CNT_INTVAL; int maxval = 63; uinfo->count = 1; pos &= ~CNT_INTVAL; switch (dev->chip.usb_id) { case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_AUDIO8DJ): case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_AUDIO4DJ): if (pos == 0) { /* current input mode of A8DJ and A4DJ */ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->value.integer.min = 0; uinfo->value.integer.max = 2; return 0; } break; case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_TRAKTORKONTROLX1): maxval = 127; break; case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_TRAKTORKONTROLS4): maxval = 31; break; } if (is_intval) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->value.integer.min = 0; uinfo->value.integer.max = maxval; } else { uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN; uinfo->value.integer.min = 0; uinfo->value.integer.max = 1; } return 0; } static int control_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_usb_audio *chip = snd_kcontrol_chip(kcontrol); struct snd_usb_caiaqdev *dev = caiaqdev(chip->card); int pos = kcontrol->private_value; if (pos & CNT_INTVAL) ucontrol->value.integer.value[0] = dev->control_state[pos & ~CNT_INTVAL]; else ucontrol->value.integer.value[0] = !!(dev->control_state[pos / 8] & (1 << pos % 8)); return 0; } static int control_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_usb_audio *chip = snd_kcontrol_chip(kcontrol); struct snd_usb_caiaqdev *dev = caiaqdev(chip->card); int pos = kcontrol->private_value; int v = ucontrol->value.integer.value[0]; unsigned char cmd = EP1_CMD_WRITE_IO; if (dev->chip.usb_id == USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_TRAKTORKONTROLX1)) cmd = EP1_CMD_DIMM_LEDS; if (pos & CNT_INTVAL) { int i = pos & ~CNT_INTVAL; dev->control_state[i] = v; if (dev->chip.usb_id == USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_TRAKTORKONTROLS4)) { int actual_len; dev->ep8_out_buf[0] = i; dev->ep8_out_buf[1] = v; usb_bulk_msg(dev->chip.dev, usb_sndbulkpipe(dev->chip.dev, 8), dev->ep8_out_buf, sizeof(dev->ep8_out_buf), &actual_len, 200); } else { snd_usb_caiaq_send_command(dev, cmd, dev->control_state, sizeof(dev->control_state)); } } else { if (v) dev->control_state[pos / 8] |= 1 << (pos % 8); else dev->control_state[pos / 8] &= ~(1 << (pos % 8)); snd_usb_caiaq_send_command(dev, cmd, dev->control_state, sizeof(dev->control_state)); } return 1; } static struct snd_kcontrol_new kcontrol_template __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, .index = 0, .info = control_info, .get = control_get, .put = control_put, /* name and private_value filled later */ }; struct caiaq_controller { char *name; int index; }; static struct caiaq_controller ak1_controller[] = { { "LED left", 2 }, { "LED middle", 1 }, { "LED right", 0 }, { "LED ring", 3 } }; static struct caiaq_controller rk2_controller[] = { { "LED 1", 5 }, { "LED 2", 4 }, { "LED 3", 3 }, { "LED 4", 2 }, { "LED 5", 1 }, { "LED 6", 0 }, { "LED pedal", 6 }, { "LED 7seg_1b", 8 }, { "LED 7seg_1c", 9 }, { "LED 7seg_2a", 10 }, { "LED 7seg_2b", 11 }, { "LED 7seg_2c", 12 }, { "LED 7seg_2d", 13 }, { "LED 7seg_2e", 14 }, { "LED 7seg_2f", 15 }, { "LED 7seg_2g", 16 }, { "LED 7seg_3a", 17 }, { "LED 7seg_3b", 18 }, { "LED 7seg_3c", 19 }, { "LED 7seg_3d", 20 }, { "LED 7seg_3e", 21 }, { "LED 7seg_3f", 22 }, { "LED 7seg_3g", 23 } }; static struct caiaq_controller rk3_controller[] = { { "LED 7seg_1a", 0 + 0 }, { "LED 7seg_1b", 0 + 1 }, { "LED 7seg_1c", 0 + 2 }, { "LED 7seg_1d", 0 + 3 }, { "LED 7seg_1e", 0 + 4 }, { "LED 7seg_1f", 0 + 5 }, { "LED 7seg_1g", 0 + 6 }, { "LED 7seg_1p", 0 + 7 }, { "LED 7seg_2a", 8 + 0 }, { "LED 7seg_2b", 8 + 1 }, { "LED 7seg_2c", 8 + 2 }, { "LED 7seg_2d", 8 + 3 }, { "LED 7seg_2e", 8 + 4 }, { "LED 7seg_2f", 8 + 5 }, { "LED 7seg_2g", 8 + 6 }, { "LED 7seg_2p", 8 + 7 }, { "LED 7seg_3a", 16 + 0 }, { "LED 7seg_3b", 16 + 1 }, { "LED 7seg_3c", 16 + 2 }, { "LED 7seg_3d", 16 + 3 }, { "LED 7seg_3e", 16 + 4 }, { "LED 7seg_3f", 16 + 5 }, { "LED 7seg_3g", 16 + 6 }, { "LED 7seg_3p", 16 + 7 }, { "LED 7seg_4a", 24 + 0 }, { "LED 7seg_4b", 24 + 1 }, { "LED 7seg_4c", 24 + 2 }, { "LED 7seg_4d", 24 + 3 }, { "LED 7seg_4e", 24 + 4 }, { "LED 7seg_4f", 24 + 5 }, { "LED 7seg_4g", 24 + 6 }, { "LED 7seg_4p", 24 + 7 }, { "LED 1", 32 + 0 }, { "LED 2", 32 + 1 }, { "LED 3", 32 + 2 }, { "LED 4", 32 + 3 }, { "LED 5", 32 + 4 }, { "LED 6", 32 + 5 }, { "LED 7", 32 + 6 }, { "LED 8", 32 + 7 }, { "LED pedal", 32 + 8 } }; static struct caiaq_controller kore_controller[] = { { "LED F1", 8 | CNT_INTVAL }, { "LED F2", 12 | CNT_INTVAL }, { "LED F3", 0 | CNT_INTVAL }, { "LED F4", 4 | CNT_INTVAL }, { "LED F5", 11 | CNT_INTVAL }, { "LED F6", 15 | CNT_INTVAL }, { "LED F7", 3 | CNT_INTVAL }, { "LED F8", 7 | CNT_INTVAL }, { "LED touch1", 10 | CNT_INTVAL }, { "LED touch2", 14 | CNT_INTVAL }, { "LED touch3", 2 | CNT_INTVAL }, { "LED touch4", 6 | CNT_INTVAL }, { "LED touch5", 9 | CNT_INTVAL }, { "LED touch6", 13 | CNT_INTVAL }, { "LED touch7", 1 | CNT_INTVAL }, { "LED touch8", 5 | CNT_INTVAL }, { "LED left", 18 | CNT_INTVAL }, { "LED right", 22 | CNT_INTVAL }, { "LED up", 16 | CNT_INTVAL }, { "LED down", 20 | CNT_INTVAL }, { "LED stop", 23 | CNT_INTVAL }, { "LED play", 21 | CNT_INTVAL }, { "LED record", 19 | CNT_INTVAL }, { "LED listen", 17 | CNT_INTVAL }, { "LED lcd", 30 | CNT_INTVAL }, { "LED menu", 28 | CNT_INTVAL }, { "LED sound", 31 | CNT_INTVAL }, { "LED esc", 29 | CNT_INTVAL }, { "LED view", 27 | CNT_INTVAL }, { "LED enter", 24 | CNT_INTVAL }, { "LED control", 26 | CNT_INTVAL } }; static struct caiaq_controller a8dj_controller[] = { { "Current input mode", 0 | CNT_INTVAL }, { "GND lift for TC Vinyl mode", 24 + 0 }, { "GND lift for TC CD/Line mode", 24 + 1 }, { "GND lift for phono mode", 24 + 2 }, { "Software lock", 40 } }; static struct caiaq_controller a4dj_controller[] = { { "Current input mode", 0 | CNT_INTVAL } }; static struct caiaq_controller kontrolx1_controller[] = { { "LED FX A: ON", 7 | CNT_INTVAL }, { "LED FX A: 1", 6 | CNT_INTVAL }, { "LED FX A: 2", 5 | CNT_INTVAL }, { "LED FX A: 3", 4 | CNT_INTVAL }, { "LED FX B: ON", 3 | CNT_INTVAL }, { "LED FX B: 1", 2 | CNT_INTVAL }, { "LED FX B: 2", 1 | CNT_INTVAL }, { "LED FX B: 3", 0 | CNT_INTVAL }, { "LED Hotcue", 28 | CNT_INTVAL }, { "LED Shift (white)", 29 | CNT_INTVAL }, { "LED Shift (green)", 30 | CNT_INTVAL }, { "LED Deck A: FX1", 24 | CNT_INTVAL }, { "LED Deck A: FX2", 25 | CNT_INTVAL }, { "LED Deck A: IN", 17 | CNT_INTVAL }, { "LED Deck A: OUT", 16 | CNT_INTVAL }, { "LED Deck A: < BEAT", 19 | CNT_INTVAL }, { "LED Deck A: BEAT >", 18 | CNT_INTVAL }, { "LED Deck A: CUE/ABS", 21 | CNT_INTVAL }, { "LED Deck A: CUP/REL", 20 | CNT_INTVAL }, { "LED Deck A: PLAY", 23 | CNT_INTVAL }, { "LED Deck A: SYNC", 22 | CNT_INTVAL }, { "LED Deck B: FX1", 26 | CNT_INTVAL }, { "LED Deck B: FX2", 27 | CNT_INTVAL }, { "LED Deck B: IN", 15 | CNT_INTVAL }, { "LED Deck B: OUT", 14 | CNT_INTVAL }, { "LED Deck B: < BEAT", 13 | CNT_INTVAL }, { "LED Deck B: BEAT >", 12 | CNT_INTVAL }, { "LED Deck B: CUE/ABS", 11 | CNT_INTVAL }, { "LED Deck B: CUP/REL", 10 | CNT_INTVAL }, { "LED Deck B: PLAY", 9 | CNT_INTVAL }, { "LED Deck B: SYNC", 8 | CNT_INTVAL }, }; static struct caiaq_controller kontrols4_controller[] = { { "LED: Master: Quant", 10 | CNT_INTVAL }, { "LED: Master: Headphone", 11 | CNT_INTVAL }, { "LED: Master: Master", 12 | CNT_INTVAL }, { "LED: Master: Snap", 14 | CNT_INTVAL }, { "LED: Master: Warning", 15 | CNT_INTVAL }, { "LED: Master: Master button", 112 | CNT_INTVAL }, { "LED: Master: Snap button", 113 | CNT_INTVAL }, { "LED: Master: Rec", 118 | CNT_INTVAL }, { "LED: Master: Size", 119 | CNT_INTVAL }, { "LED: Master: Quant button", 120 | CNT_INTVAL }, { "LED: Master: Browser button", 121 | CNT_INTVAL }, { "LED: Master: Play button", 126 | CNT_INTVAL }, { "LED: Master: Undo button", 127 | CNT_INTVAL }, { "LED: Channel A: >", 4 | CNT_INTVAL }, { "LED: Channel A: <", 5 | CNT_INTVAL }, { "LED: Channel A: Meter 1", 97 | CNT_INTVAL }, { "LED: Channel A: Meter 2", 98 | CNT_INTVAL }, { "LED: Channel A: Meter 3", 99 | CNT_INTVAL }, { "LED: Channel A: Meter 4", 100 | CNT_INTVAL }, { "LED: Channel A: Meter 5", 101 | CNT_INTVAL }, { "LED: Channel A: Meter 6", 102 | CNT_INTVAL }, { "LED: Channel A: Meter clip", 103 | CNT_INTVAL }, { "LED: Channel A: Active", 114 | CNT_INTVAL }, { "LED: Channel A: Cue", 116 | CNT_INTVAL }, { "LED: Channel A: FX1", 149 | CNT_INTVAL }, { "LED: Channel A: FX2", 148 | CNT_INTVAL }, { "LED: Channel B: >", 2 | CNT_INTVAL }, { "LED: Channel B: <", 3 | CNT_INTVAL }, { "LED: Channel B: Meter 1", 89 | CNT_INTVAL }, { "LED: Channel B: Meter 2", 90 | CNT_INTVAL }, { "LED: Channel B: Meter 3", 91 | CNT_INTVAL }, { "LED: Channel B: Meter 4", 92 | CNT_INTVAL }, { "LED: Channel B: Meter 5", 93 | CNT_INTVAL }, { "LED: Channel B: Meter 6", 94 | CNT_INTVAL }, { "LED: Channel B: Meter clip", 95 | CNT_INTVAL }, { "LED: Channel B: Active", 122 | CNT_INTVAL }, { "LED: Channel B: Cue", 125 | CNT_INTVAL }, { "LED: Channel B: FX1", 147 | CNT_INTVAL }, { "LED: Channel B: FX2", 146 | CNT_INTVAL }, { "LED: Channel C: >", 6 | CNT_INTVAL }, { "LED: Channel C: <", 7 | CNT_INTVAL }, { "LED: Channel C: Meter 1", 105 | CNT_INTVAL }, { "LED: Channel C: Meter 2", 106 | CNT_INTVAL }, { "LED: Channel C: Meter 3", 107 | CNT_INTVAL }, { "LED: Channel C: Meter 4", 108 | CNT_INTVAL }, { "LED: Channel C: Meter 5", 109 | CNT_INTVAL }, { "LED: Channel C: Meter 6", 110 | CNT_INTVAL }, { "LED: Channel C: Meter clip", 111 | CNT_INTVAL }, { "LED: Channel C: Active", 115 | CNT_INTVAL }, { "LED: Channel C: Cue", 117 | CNT_INTVAL }, { "LED: Channel C: FX1", 151 | CNT_INTVAL }, { "LED: Channel C: FX2", 150 | CNT_INTVAL }, { "LED: Channel D: >", 0 | CNT_INTVAL }, { "LED: Channel D: <", 1 | CNT_INTVAL }, { "LED: Channel D: Meter 1", 81 | CNT_INTVAL }, { "LED: Channel D: Meter 2", 82 | CNT_INTVAL }, { "LED: Channel D: Meter 3", 83 | CNT_INTVAL }, { "LED: Channel D: Meter 4", 84 | CNT_INTVAL }, { "LED: Channel D: Meter 5", 85 | CNT_INTVAL }, { "LED: Channel D: Meter 6", 86 | CNT_INTVAL }, { "LED: Channel D: Meter clip", 87 | CNT_INTVAL }, { "LED: Channel D: Active", 123 | CNT_INTVAL }, { "LED: Channel D: Cue", 124 | CNT_INTVAL }, { "LED: Channel D: FX1", 145 | CNT_INTVAL }, { "LED: Channel D: FX2", 144 | CNT_INTVAL }, { "LED: Deck A: 1 (blue)", 22 | CNT_INTVAL }, { "LED: Deck A: 1 (green)", 23 | CNT_INTVAL }, { "LED: Deck A: 2 (blue)", 20 | CNT_INTVAL }, { "LED: Deck A: 2 (green)", 21 | CNT_INTVAL }, { "LED: Deck A: 3 (blue)", 18 | CNT_INTVAL }, { "LED: Deck A: 3 (green)", 19 | CNT_INTVAL }, { "LED: Deck A: 4 (blue)", 16 | CNT_INTVAL }, { "LED: Deck A: 4 (green)", 17 | CNT_INTVAL }, { "LED: Deck A: Load", 44 | CNT_INTVAL }, { "LED: Deck A: Deck C button", 45 | CNT_INTVAL }, { "LED: Deck A: In", 47 | CNT_INTVAL }, { "LED: Deck A: Out", 46 | CNT_INTVAL }, { "LED: Deck A: Shift", 24 | CNT_INTVAL }, { "LED: Deck A: Sync", 27 | CNT_INTVAL }, { "LED: Deck A: Cue", 26 | CNT_INTVAL }, { "LED: Deck A: Play", 25 | CNT_INTVAL }, { "LED: Deck A: Tempo up", 33 | CNT_INTVAL }, { "LED: Deck A: Tempo down", 32 | CNT_INTVAL }, { "LED: Deck A: Master", 34 | CNT_INTVAL }, { "LED: Deck A: Keylock", 35 | CNT_INTVAL }, { "LED: Deck A: Deck A", 37 | CNT_INTVAL }, { "LED: Deck A: Deck C", 36 | CNT_INTVAL }, { "LED: Deck A: Samples", 38 | CNT_INTVAL }, { "LED: Deck A: On Air", 39 | CNT_INTVAL }, { "LED: Deck A: Sample 1", 31 | CNT_INTVAL }, { "LED: Deck A: Sample 2", 30 | CNT_INTVAL }, { "LED: Deck A: Sample 3", 29 | CNT_INTVAL }, { "LED: Deck A: Sample 4", 28 | CNT_INTVAL }, { "LED: Deck A: Digit 1 - A", 55 | CNT_INTVAL }, { "LED: Deck A: Digit 1 - B", 54 | CNT_INTVAL }, { "LED: Deck A: Digit 1 - C", 53 | CNT_INTVAL }, { "LED: Deck A: Digit 1 - D", 52 | CNT_INTVAL }, { "LED: Deck A: Digit 1 - E", 51 | CNT_INTVAL }, { "LED: Deck A: Digit 1 - F", 50 | CNT_INTVAL }, { "LED: Deck A: Digit 1 - G", 49 | CNT_INTVAL }, { "LED: Deck A: Digit 1 - dot", 48 | CNT_INTVAL }, { "LED: Deck A: Digit 2 - A", 63 | CNT_INTVAL }, { "LED: Deck A: Digit 2 - B", 62 | CNT_INTVAL }, { "LED: Deck A: Digit 2 - C", 61 | CNT_INTVAL }, { "LED: Deck A: Digit 2 - D", 60 | CNT_INTVAL }, { "LED: Deck A: Digit 2 - E", 59 | CNT_INTVAL }, { "LED: Deck A: Digit 2 - F", 58 | CNT_INTVAL }, { "LED: Deck A: Digit 2 - G", 57 | CNT_INTVAL }, { "LED: Deck A: Digit 2 - dot", 56 | CNT_INTVAL }, { "LED: Deck B: 1 (blue)", 78 | CNT_INTVAL }, { "LED: Deck B: 1 (green)", 79 | CNT_INTVAL }, { "LED: Deck B: 2 (blue)", 76 | CNT_INTVAL }, { "LED: Deck B: 2 (green)", 77 | CNT_INTVAL }, { "LED: Deck B: 3 (blue)", 74 | CNT_INTVAL }, { "LED: Deck B: 3 (green)", 75 | CNT_INTVAL }, { "LED: Deck B: 4 (blue)", 72 | CNT_INTVAL }, { "LED: Deck B: 4 (green)", 73 | CNT_INTVAL }, { "LED: Deck B: Load", 180 | CNT_INTVAL }, { "LED: Deck B: Deck D button", 181 | CNT_INTVAL }, { "LED: Deck B: In", 183 | CNT_INTVAL }, { "LED: Deck B: Out", 182 | CNT_INTVAL }, { "LED: Deck B: Shift", 64 | CNT_INTVAL }, { "LED: Deck B: Sync", 67 | CNT_INTVAL }, { "LED: Deck B: Cue", 66 | CNT_INTVAL }, { "LED: Deck B: Play", 65 | CNT_INTVAL }, { "LED: Deck B: Tempo up", 185 | CNT_INTVAL }, { "LED: Deck B: Tempo down", 184 | CNT_INTVAL }, { "LED: Deck B: Master", 186 | CNT_INTVAL }, { "LED: Deck B: Keylock", 187 | CNT_INTVAL }, { "LED: Deck B: Deck B", 189 | CNT_INTVAL }, { "LED: Deck B: Deck D", 188 | CNT_INTVAL }, { "LED: Deck B: Samples", 190 | CNT_INTVAL }, { "LED: Deck B: On Air", 191 | CNT_INTVAL }, { "LED: Deck B: Sample 1", 71 | CNT_INTVAL }, { "LED: Deck B: Sample 2", 70 | CNT_INTVAL }, { "LED: Deck B: Sample 3", 69 | CNT_INTVAL }, { "LED: Deck B: Sample 4", 68 | CNT_INTVAL }, { "LED: Deck B: Digit 1 - A", 175 | CNT_INTVAL }, { "LED: Deck B: Digit 1 - B", 174 | CNT_INTVAL }, { "LED: Deck B: Digit 1 - C", 173 | CNT_INTVAL }, { "LED: Deck B: Digit 1 - D", 172 | CNT_INTVAL }, { "LED: Deck B: Digit 1 - E", 171 | CNT_INTVAL }, { "LED: Deck B: Digit 1 - F", 170 | CNT_INTVAL }, { "LED: Deck B: Digit 1 - G", 169 | CNT_INTVAL }, { "LED: Deck B: Digit 1 - dot", 168 | CNT_INTVAL }, { "LED: Deck B: Digit 2 - A", 167 | CNT_INTVAL }, { "LED: Deck B: Digit 2 - B", 166 | CNT_INTVAL }, { "LED: Deck B: Digit 2 - C", 165 | CNT_INTVAL }, { "LED: Deck B: Digit 2 - D", 164 | CNT_INTVAL }, { "LED: Deck B: Digit 2 - E", 163 | CNT_INTVAL }, { "LED: Deck B: Digit 2 - F", 162 | CNT_INTVAL }, { "LED: Deck B: Digit 2 - G", 161 | CNT_INTVAL }, { "LED: Deck B: Digit 2 - dot", 160 | CNT_INTVAL }, { "LED: FX1: dry/wet", 153 | CNT_INTVAL }, { "LED: FX1: 1", 154 | CNT_INTVAL }, { "LED: FX1: 2", 155 | CNT_INTVAL }, { "LED: FX1: 3", 156 | CNT_INTVAL }, { "LED: FX1: Mode", 157 | CNT_INTVAL }, { "LED: FX2: dry/wet", 129 | CNT_INTVAL }, { "LED: FX2: 1", 130 | CNT_INTVAL }, { "LED: FX2: 2", 131 | CNT_INTVAL }, { "LED: FX2: 3", 132 | CNT_INTVAL }, { "LED: FX2: Mode", 133 | CNT_INTVAL }, }; static int __devinit add_controls(struct caiaq_controller *c, int num, struct snd_usb_caiaqdev *dev) { int i, ret; struct snd_kcontrol *kc; for (i = 0; i < num; i++, c++) { kcontrol_template.name = c->name; kcontrol_template.private_value = c->index; kc = snd_ctl_new1(&kcontrol_template, dev); ret = snd_ctl_add(dev->chip.card, kc); if (ret < 0) return ret; } return 0; } int __devinit snd_usb_caiaq_control_init(struct snd_usb_caiaqdev *dev) { int ret = 0; switch (dev->chip.usb_id) { case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_AK1): ret = add_controls(ak1_controller, ARRAY_SIZE(ak1_controller), dev); break; case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_RIGKONTROL2): ret = add_controls(rk2_controller, ARRAY_SIZE(rk2_controller), dev); break; case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_RIGKONTROL3): ret = add_controls(rk3_controller, ARRAY_SIZE(rk3_controller), dev); break; case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_KORECONTROLLER): case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_KORECONTROLLER2): ret = add_controls(kore_controller, ARRAY_SIZE(kore_controller), dev); break; case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_AUDIO8DJ): ret = add_controls(a8dj_controller, ARRAY_SIZE(a8dj_controller), dev); break; case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_AUDIO4DJ): ret = add_controls(a4dj_controller, ARRAY_SIZE(a4dj_controller), dev); break; case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_TRAKTORKONTROLX1): ret = add_controls(kontrolx1_controller, ARRAY_SIZE(kontrolx1_controller), dev); break; case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_TRAKTORKONTROLS4): ret = add_controls(kontrols4_controller, ARRAY_SIZE(kontrols4_controller), dev); break; } return ret; }
gpl-2.0
cellphone/lge_p880_kernel_3.0
net/phonet/pep-gprs.c
9119
6982
/* * File: pep-gprs.c * * GPRS over Phonet pipe end point socket * * Copyright (C) 2008 Nokia Corporation. * * Author: Rémi Denis-Courmont <remi.denis-courmont@nokia.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA */ #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/if_ether.h> #include <linux/if_arp.h> #include <net/sock.h> #include <linux/if_phonet.h> #include <net/tcp_states.h> #include <net/phonet/gprs.h> #define GPRS_DEFAULT_MTU 1400 struct gprs_dev { struct sock *sk; void (*old_state_change)(struct sock *); void (*old_data_ready)(struct sock *, int); void (*old_write_space)(struct sock *); struct net_device *dev; }; static __be16 gprs_type_trans(struct sk_buff *skb) { const u8 *pvfc; u8 buf; pvfc = skb_header_pointer(skb, 0, 1, &buf); if (!pvfc) return htons(0); /* Look at IP version field */ switch (*pvfc >> 4) { case 4: return htons(ETH_P_IP); case 6: return htons(ETH_P_IPV6); } return htons(0); } static void gprs_writeable(struct gprs_dev *gp) { struct net_device *dev = gp->dev; if (pep_writeable(gp->sk)) netif_wake_queue(dev); } /* * Socket callbacks */ static void gprs_state_change(struct sock *sk) { struct gprs_dev *gp = sk->sk_user_data; if (sk->sk_state == TCP_CLOSE_WAIT) { struct net_device *dev = gp->dev; netif_stop_queue(dev); netif_carrier_off(dev); } } static int gprs_recv(struct gprs_dev *gp, struct sk_buff *skb) { struct net_device *dev = gp->dev; int err = 0; __be16 protocol = gprs_type_trans(skb); if (!protocol) { err = -EINVAL; goto drop; } if (skb_headroom(skb) & 3) { struct sk_buff *rskb, *fs; int flen = 0; /* Phonet Pipe data header may be misaligned (3 bytes), * so wrap the IP packet as a single fragment of an head-less * socket buffer. The network stack will pull what it needs, * but at least, the whole IP payload is not memcpy'd. */ rskb = netdev_alloc_skb(dev, 0); if (!rskb) { err = -ENOBUFS; goto drop; } skb_shinfo(rskb)->frag_list = skb; rskb->len += skb->len; rskb->data_len += rskb->len; rskb->truesize += rskb->len; /* Avoid nested fragments */ skb_walk_frags(skb, fs) flen += fs->len; skb->next = skb_shinfo(skb)->frag_list; skb_frag_list_init(skb); skb->len -= flen; skb->data_len -= flen; skb->truesize -= flen; skb = rskb; } skb->protocol = protocol; skb_reset_mac_header(skb); skb->dev = dev; if (likely(dev->flags & IFF_UP)) { dev->stats.rx_packets++; dev->stats.rx_bytes += skb->len; netif_rx(skb); skb = NULL; } else err = -ENODEV; drop: if (skb) { dev_kfree_skb(skb); dev->stats.rx_dropped++; } return err; } static void gprs_data_ready(struct sock *sk, int len) { struct gprs_dev *gp = sk->sk_user_data; struct sk_buff *skb; while ((skb = pep_read(sk)) != NULL) { skb_orphan(skb); gprs_recv(gp, skb); } } static void gprs_write_space(struct sock *sk) { struct gprs_dev *gp = sk->sk_user_data; if (netif_running(gp->dev)) gprs_writeable(gp); } /* * Network device callbacks */ static int gprs_open(struct net_device *dev) { struct gprs_dev *gp = netdev_priv(dev); gprs_writeable(gp); return 0; } static int gprs_close(struct net_device *dev) { netif_stop_queue(dev); return 0; } static netdev_tx_t gprs_xmit(struct sk_buff *skb, struct net_device *dev) { struct gprs_dev *gp = netdev_priv(dev); struct sock *sk = gp->sk; int len, err; switch (skb->protocol) { case htons(ETH_P_IP): case htons(ETH_P_IPV6): break; default: dev_kfree_skb(skb); return NETDEV_TX_OK; } skb_orphan(skb); skb_set_owner_w(skb, sk); len = skb->len; err = pep_write(sk, skb); if (err) { LIMIT_NETDEBUG(KERN_WARNING"%s: TX error (%d)\n", dev->name, err); dev->stats.tx_aborted_errors++; dev->stats.tx_errors++; } else { dev->stats.tx_packets++; dev->stats.tx_bytes += len; } netif_stop_queue(dev); if (pep_writeable(sk)) netif_wake_queue(dev); return NETDEV_TX_OK; } static int gprs_set_mtu(struct net_device *dev, int new_mtu) { if ((new_mtu < 576) || (new_mtu > (PHONET_MAX_MTU - 11))) return -EINVAL; dev->mtu = new_mtu; return 0; } static const struct net_device_ops gprs_netdev_ops = { .ndo_open = gprs_open, .ndo_stop = gprs_close, .ndo_start_xmit = gprs_xmit, .ndo_change_mtu = gprs_set_mtu, }; static void gprs_setup(struct net_device *dev) { dev->features = NETIF_F_FRAGLIST; dev->type = ARPHRD_PHONET_PIPE; dev->flags = IFF_POINTOPOINT | IFF_NOARP; dev->mtu = GPRS_DEFAULT_MTU; dev->hard_header_len = 0; dev->addr_len = 0; dev->tx_queue_len = 10; dev->netdev_ops = &gprs_netdev_ops; dev->destructor = free_netdev; } /* * External interface */ /* * Attach a GPRS interface to a datagram socket. * Returns the interface index on success, negative error code on error. */ int gprs_attach(struct sock *sk) { static const char ifname[] = "gprs%d"; struct gprs_dev *gp; struct net_device *dev; int err; if (unlikely(sk->sk_type == SOCK_STREAM)) return -EINVAL; /* need packet boundaries */ /* Create net device */ dev = alloc_netdev(sizeof(*gp), ifname, gprs_setup); if (!dev) return -ENOMEM; gp = netdev_priv(dev); gp->sk = sk; gp->dev = dev; netif_stop_queue(dev); err = register_netdev(dev); if (err) { free_netdev(dev); return err; } lock_sock(sk); if (unlikely(sk->sk_user_data)) { err = -EBUSY; goto out_rel; } if (unlikely((1 << sk->sk_state & (TCPF_CLOSE|TCPF_LISTEN)) || sock_flag(sk, SOCK_DEAD))) { err = -EINVAL; goto out_rel; } sk->sk_user_data = gp; gp->old_state_change = sk->sk_state_change; gp->old_data_ready = sk->sk_data_ready; gp->old_write_space = sk->sk_write_space; sk->sk_state_change = gprs_state_change; sk->sk_data_ready = gprs_data_ready; sk->sk_write_space = gprs_write_space; release_sock(sk); sock_hold(sk); printk(KERN_DEBUG"%s: attached\n", dev->name); return dev->ifindex; out_rel: release_sock(sk); unregister_netdev(dev); return err; } void gprs_detach(struct sock *sk) { struct gprs_dev *gp = sk->sk_user_data; struct net_device *dev = gp->dev; lock_sock(sk); sk->sk_user_data = NULL; sk->sk_state_change = gp->old_state_change; sk->sk_data_ready = gp->old_data_ready; sk->sk_write_space = gp->old_write_space; release_sock(sk); printk(KERN_DEBUG"%s: detached\n", dev->name); unregister_netdev(dev); sock_put(sk); }
gpl-2.0
harryxue1999/linux
arch/tile/kernel/tile-desc_32.c
9375
113746
/* TILEPro opcode information. * * Copyright 2011 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. * * * * * */ /* This define is BFD_RELOC_##x for real bfd, or -1 for everyone else. */ #define BFD_RELOC(x) -1 /* Special registers. */ #define TREG_LR 55 #define TREG_SN 56 #define TREG_ZERO 63 #include <linux/stddef.h> #include <asm/tile-desc.h> const struct tilepro_opcode tilepro_opcodes[395] = { { "bpt", TILEPRO_OPC_BPT, 0x2, 0, TREG_ZERO, 0, { { 0, }, { }, { 0, }, { 0, }, { 0, } }, }, { "info", TILEPRO_OPC_INFO, 0xf, 1, TREG_ZERO, 1, { { 0 }, { 1 }, { 2 }, { 3 }, { 0, } }, }, { "infol", TILEPRO_OPC_INFOL, 0x3, 1, TREG_ZERO, 1, { { 4 }, { 5 }, { 0, }, { 0, }, { 0, } }, }, { "j", TILEPRO_OPC_J, 0x2, 1, TREG_ZERO, 1, { { 0, }, { 6 }, { 0, }, { 0, }, { 0, } }, }, { "jal", TILEPRO_OPC_JAL, 0x2, 1, TREG_LR, 1, { { 0, }, { 6 }, { 0, }, { 0, }, { 0, } }, }, { "move", TILEPRO_OPC_MOVE, 0xf, 2, TREG_ZERO, 1, { { 7, 8 }, { 9, 10 }, { 11, 12 }, { 13, 14 }, { 0, } }, }, { "move.sn", TILEPRO_OPC_MOVE_SN, 0x3, 2, TREG_SN, 1, { { 7, 8 }, { 9, 10 }, { 0, }, { 0, }, { 0, } }, }, { "movei", TILEPRO_OPC_MOVEI, 0xf, 2, TREG_ZERO, 1, { { 7, 0 }, { 9, 1 }, { 11, 2 }, { 13, 3 }, { 0, } }, }, { "movei.sn", TILEPRO_OPC_MOVEI_SN, 0x3, 2, TREG_SN, 1, { { 7, 0 }, { 9, 1 }, { 0, }, { 0, }, { 0, } }, }, { "moveli", TILEPRO_OPC_MOVELI, 0x3, 2, TREG_ZERO, 1, { { 7, 4 }, { 9, 5 }, { 0, }, { 0, }, { 0, } }, }, { "moveli.sn", TILEPRO_OPC_MOVELI_SN, 0x3, 2, TREG_SN, 1, { { 7, 4 }, { 9, 5 }, { 0, }, { 0, }, { 0, } }, }, { "movelis", TILEPRO_OPC_MOVELIS, 0x3, 2, TREG_SN, 1, { { 7, 4 }, { 9, 5 }, { 0, }, { 0, }, { 0, } }, }, { "prefetch", TILEPRO_OPC_PREFETCH, 0x12, 1, TREG_ZERO, 1, { { 0, }, { 10 }, { 0, }, { 0, }, { 15 } }, }, { "raise", TILEPRO_OPC_RAISE, 0x2, 0, TREG_ZERO, 1, { { 0, }, { }, { 0, }, { 0, }, { 0, } }, }, { "add", TILEPRO_OPC_ADD, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, }, { "add.sn", TILEPRO_OPC_ADD_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "addb", TILEPRO_OPC_ADDB, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "addb.sn", TILEPRO_OPC_ADDB_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "addbs_u", TILEPRO_OPC_ADDBS_U, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "addbs_u.sn", TILEPRO_OPC_ADDBS_U_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "addh", TILEPRO_OPC_ADDH, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "addh.sn", TILEPRO_OPC_ADDH_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "addhs", TILEPRO_OPC_ADDHS, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "addhs.sn", TILEPRO_OPC_ADDHS_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "addi", TILEPRO_OPC_ADDI, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 11, 12, 2 }, { 13, 14, 3 }, { 0, } }, }, { "addi.sn", TILEPRO_OPC_ADDI_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "addib", TILEPRO_OPC_ADDIB, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "addib.sn", TILEPRO_OPC_ADDIB_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "addih", TILEPRO_OPC_ADDIH, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "addih.sn", TILEPRO_OPC_ADDIH_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "addli", TILEPRO_OPC_ADDLI, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 4 }, { 9, 10, 5 }, { 0, }, { 0, }, { 0, } }, }, { "addli.sn", TILEPRO_OPC_ADDLI_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 4 }, { 9, 10, 5 }, { 0, }, { 0, }, { 0, } }, }, { "addlis", TILEPRO_OPC_ADDLIS, 0x3, 3, TREG_SN, 1, { { 7, 8, 4 }, { 9, 10, 5 }, { 0, }, { 0, }, { 0, } }, }, { "adds", TILEPRO_OPC_ADDS, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "adds.sn", TILEPRO_OPC_ADDS_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "adiffb_u", TILEPRO_OPC_ADIFFB_U, 0x1, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "adiffb_u.sn", TILEPRO_OPC_ADIFFB_U_SN, 0x1, 3, TREG_SN, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "adiffh", TILEPRO_OPC_ADIFFH, 0x1, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "adiffh.sn", TILEPRO_OPC_ADIFFH_SN, 0x1, 3, TREG_SN, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "and", TILEPRO_OPC_AND, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, }, { "and.sn", TILEPRO_OPC_AND_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "andi", TILEPRO_OPC_ANDI, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 11, 12, 2 }, { 13, 14, 3 }, { 0, } }, }, { "andi.sn", TILEPRO_OPC_ANDI_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "auli", TILEPRO_OPC_AULI, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 4 }, { 9, 10, 5 }, { 0, }, { 0, }, { 0, } }, }, { "avgb_u", TILEPRO_OPC_AVGB_U, 0x1, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "avgb_u.sn", TILEPRO_OPC_AVGB_U_SN, 0x1, 3, TREG_SN, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "avgh", TILEPRO_OPC_AVGH, 0x1, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "avgh.sn", TILEPRO_OPC_AVGH_SN, 0x1, 3, TREG_SN, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "bbns", TILEPRO_OPC_BBNS, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "bbns.sn", TILEPRO_OPC_BBNS_SN, 0x2, 2, TREG_SN, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "bbnst", TILEPRO_OPC_BBNST, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "bbnst.sn", TILEPRO_OPC_BBNST_SN, 0x2, 2, TREG_SN, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "bbs", TILEPRO_OPC_BBS, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "bbs.sn", TILEPRO_OPC_BBS_SN, 0x2, 2, TREG_SN, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "bbst", TILEPRO_OPC_BBST, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "bbst.sn", TILEPRO_OPC_BBST_SN, 0x2, 2, TREG_SN, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "bgez", TILEPRO_OPC_BGEZ, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "bgez.sn", TILEPRO_OPC_BGEZ_SN, 0x2, 2, TREG_SN, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "bgezt", TILEPRO_OPC_BGEZT, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "bgezt.sn", TILEPRO_OPC_BGEZT_SN, 0x2, 2, TREG_SN, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "bgz", TILEPRO_OPC_BGZ, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "bgz.sn", TILEPRO_OPC_BGZ_SN, 0x2, 2, TREG_SN, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "bgzt", TILEPRO_OPC_BGZT, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "bgzt.sn", TILEPRO_OPC_BGZT_SN, 0x2, 2, TREG_SN, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "bitx", TILEPRO_OPC_BITX, 0x5, 2, TREG_ZERO, 1, { { 7, 8 }, { 0, }, { 11, 12 }, { 0, }, { 0, } }, }, { "bitx.sn", TILEPRO_OPC_BITX_SN, 0x1, 2, TREG_SN, 1, { { 7, 8 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "blez", TILEPRO_OPC_BLEZ, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "blez.sn", TILEPRO_OPC_BLEZ_SN, 0x2, 2, TREG_SN, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "blezt", TILEPRO_OPC_BLEZT, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "blezt.sn", TILEPRO_OPC_BLEZT_SN, 0x2, 2, TREG_SN, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "blz", TILEPRO_OPC_BLZ, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "blz.sn", TILEPRO_OPC_BLZ_SN, 0x2, 2, TREG_SN, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "blzt", TILEPRO_OPC_BLZT, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "blzt.sn", TILEPRO_OPC_BLZT_SN, 0x2, 2, TREG_SN, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "bnz", TILEPRO_OPC_BNZ, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "bnz.sn", TILEPRO_OPC_BNZ_SN, 0x2, 2, TREG_SN, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "bnzt", TILEPRO_OPC_BNZT, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "bnzt.sn", TILEPRO_OPC_BNZT_SN, 0x2, 2, TREG_SN, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "bytex", TILEPRO_OPC_BYTEX, 0x5, 2, TREG_ZERO, 1, { { 7, 8 }, { 0, }, { 11, 12 }, { 0, }, { 0, } }, }, { "bytex.sn", TILEPRO_OPC_BYTEX_SN, 0x1, 2, TREG_SN, 1, { { 7, 8 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "bz", TILEPRO_OPC_BZ, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "bz.sn", TILEPRO_OPC_BZ_SN, 0x2, 2, TREG_SN, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "bzt", TILEPRO_OPC_BZT, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "bzt.sn", TILEPRO_OPC_BZT_SN, 0x2, 2, TREG_SN, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "clz", TILEPRO_OPC_CLZ, 0x5, 2, TREG_ZERO, 1, { { 7, 8 }, { 0, }, { 11, 12 }, { 0, }, { 0, } }, }, { "clz.sn", TILEPRO_OPC_CLZ_SN, 0x1, 2, TREG_SN, 1, { { 7, 8 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "crc32_32", TILEPRO_OPC_CRC32_32, 0x1, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "crc32_32.sn", TILEPRO_OPC_CRC32_32_SN, 0x1, 3, TREG_SN, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "crc32_8", TILEPRO_OPC_CRC32_8, 0x1, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "crc32_8.sn", TILEPRO_OPC_CRC32_8_SN, 0x1, 3, TREG_SN, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "ctz", TILEPRO_OPC_CTZ, 0x5, 2, TREG_ZERO, 1, { { 7, 8 }, { 0, }, { 11, 12 }, { 0, }, { 0, } }, }, { "ctz.sn", TILEPRO_OPC_CTZ_SN, 0x1, 2, TREG_SN, 1, { { 7, 8 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "drain", TILEPRO_OPC_DRAIN, 0x2, 0, TREG_ZERO, 0, { { 0, }, { }, { 0, }, { 0, }, { 0, } }, }, { "dtlbpr", TILEPRO_OPC_DTLBPR, 0x2, 1, TREG_ZERO, 1, { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } }, }, { "dword_align", TILEPRO_OPC_DWORD_ALIGN, 0x1, 3, TREG_ZERO, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "dword_align.sn", TILEPRO_OPC_DWORD_ALIGN_SN, 0x1, 3, TREG_SN, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "finv", TILEPRO_OPC_FINV, 0x2, 1, TREG_ZERO, 1, { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } }, }, { "flush", TILEPRO_OPC_FLUSH, 0x2, 1, TREG_ZERO, 1, { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } }, }, { "fnop", TILEPRO_OPC_FNOP, 0xf, 0, TREG_ZERO, 1, { { }, { }, { }, { }, { 0, } }, }, { "icoh", TILEPRO_OPC_ICOH, 0x2, 1, TREG_ZERO, 1, { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } }, }, { "ill", TILEPRO_OPC_ILL, 0xa, 0, TREG_ZERO, 1, { { 0, }, { }, { 0, }, { }, { 0, } }, }, { "inthb", TILEPRO_OPC_INTHB, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "inthb.sn", TILEPRO_OPC_INTHB_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "inthh", TILEPRO_OPC_INTHH, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "inthh.sn", TILEPRO_OPC_INTHH_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "intlb", TILEPRO_OPC_INTLB, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "intlb.sn", TILEPRO_OPC_INTLB_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "intlh", TILEPRO_OPC_INTLH, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "intlh.sn", TILEPRO_OPC_INTLH_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "inv", TILEPRO_OPC_INV, 0x2, 1, TREG_ZERO, 1, { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } }, }, { "iret", TILEPRO_OPC_IRET, 0x2, 0, TREG_ZERO, 1, { { 0, }, { }, { 0, }, { 0, }, { 0, } }, }, { "jalb", TILEPRO_OPC_JALB, 0x2, 1, TREG_LR, 1, { { 0, }, { 22 }, { 0, }, { 0, }, { 0, } }, }, { "jalf", TILEPRO_OPC_JALF, 0x2, 1, TREG_LR, 1, { { 0, }, { 22 }, { 0, }, { 0, }, { 0, } }, }, { "jalr", TILEPRO_OPC_JALR, 0x2, 1, TREG_LR, 1, { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } }, }, { "jalrp", TILEPRO_OPC_JALRP, 0x2, 1, TREG_LR, 1, { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } }, }, { "jb", TILEPRO_OPC_JB, 0x2, 1, TREG_ZERO, 1, { { 0, }, { 22 }, { 0, }, { 0, }, { 0, } }, }, { "jf", TILEPRO_OPC_JF, 0x2, 1, TREG_ZERO, 1, { { 0, }, { 22 }, { 0, }, { 0, }, { 0, } }, }, { "jr", TILEPRO_OPC_JR, 0x2, 1, TREG_ZERO, 1, { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } }, }, { "jrp", TILEPRO_OPC_JRP, 0x2, 1, TREG_ZERO, 1, { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } }, }, { "lb", TILEPRO_OPC_LB, 0x12, 2, TREG_ZERO, 1, { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 23, 15 } }, }, { "lb.sn", TILEPRO_OPC_LB_SN, 0x2, 2, TREG_SN, 1, { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 0, } }, }, { "lb_u", TILEPRO_OPC_LB_U, 0x12, 2, TREG_ZERO, 1, { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 23, 15 } }, }, { "lb_u.sn", TILEPRO_OPC_LB_U_SN, 0x2, 2, TREG_SN, 1, { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 0, } }, }, { "lbadd", TILEPRO_OPC_LBADD, 0x2, 3, TREG_ZERO, 1, { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } }, }, { "lbadd.sn", TILEPRO_OPC_LBADD_SN, 0x2, 3, TREG_SN, 1, { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } }, }, { "lbadd_u", TILEPRO_OPC_LBADD_U, 0x2, 3, TREG_ZERO, 1, { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } }, }, { "lbadd_u.sn", TILEPRO_OPC_LBADD_U_SN, 0x2, 3, TREG_SN, 1, { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } }, }, { "lh", TILEPRO_OPC_LH, 0x12, 2, TREG_ZERO, 1, { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 23, 15 } }, }, { "lh.sn", TILEPRO_OPC_LH_SN, 0x2, 2, TREG_SN, 1, { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 0, } }, }, { "lh_u", TILEPRO_OPC_LH_U, 0x12, 2, TREG_ZERO, 1, { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 23, 15 } }, }, { "lh_u.sn", TILEPRO_OPC_LH_U_SN, 0x2, 2, TREG_SN, 1, { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 0, } }, }, { "lhadd", TILEPRO_OPC_LHADD, 0x2, 3, TREG_ZERO, 1, { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } }, }, { "lhadd.sn", TILEPRO_OPC_LHADD_SN, 0x2, 3, TREG_SN, 1, { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } }, }, { "lhadd_u", TILEPRO_OPC_LHADD_U, 0x2, 3, TREG_ZERO, 1, { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } }, }, { "lhadd_u.sn", TILEPRO_OPC_LHADD_U_SN, 0x2, 3, TREG_SN, 1, { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } }, }, { "lnk", TILEPRO_OPC_LNK, 0x2, 1, TREG_ZERO, 1, { { 0, }, { 9 }, { 0, }, { 0, }, { 0, } }, }, { "lnk.sn", TILEPRO_OPC_LNK_SN, 0x2, 1, TREG_SN, 1, { { 0, }, { 9 }, { 0, }, { 0, }, { 0, } }, }, { "lw", TILEPRO_OPC_LW, 0x12, 2, TREG_ZERO, 1, { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 23, 15 } }, }, { "lw.sn", TILEPRO_OPC_LW_SN, 0x2, 2, TREG_SN, 1, { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 0, } }, }, { "lw_na", TILEPRO_OPC_LW_NA, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 0, } }, }, { "lw_na.sn", TILEPRO_OPC_LW_NA_SN, 0x2, 2, TREG_SN, 1, { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 0, } }, }, { "lwadd", TILEPRO_OPC_LWADD, 0x2, 3, TREG_ZERO, 1, { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } }, }, { "lwadd.sn", TILEPRO_OPC_LWADD_SN, 0x2, 3, TREG_SN, 1, { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } }, }, { "lwadd_na", TILEPRO_OPC_LWADD_NA, 0x2, 3, TREG_ZERO, 1, { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } }, }, { "lwadd_na.sn", TILEPRO_OPC_LWADD_NA_SN, 0x2, 3, TREG_SN, 1, { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } }, }, { "maxb_u", TILEPRO_OPC_MAXB_U, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "maxb_u.sn", TILEPRO_OPC_MAXB_U_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "maxh", TILEPRO_OPC_MAXH, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "maxh.sn", TILEPRO_OPC_MAXH_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "maxib_u", TILEPRO_OPC_MAXIB_U, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "maxib_u.sn", TILEPRO_OPC_MAXIB_U_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "maxih", TILEPRO_OPC_MAXIH, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "maxih.sn", TILEPRO_OPC_MAXIH_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "mf", TILEPRO_OPC_MF, 0x2, 0, TREG_ZERO, 1, { { 0, }, { }, { 0, }, { 0, }, { 0, } }, }, { "mfspr", TILEPRO_OPC_MFSPR, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 9, 25 }, { 0, }, { 0, }, { 0, } }, }, { "minb_u", TILEPRO_OPC_MINB_U, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "minb_u.sn", TILEPRO_OPC_MINB_U_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "minh", TILEPRO_OPC_MINH, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "minh.sn", TILEPRO_OPC_MINH_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "minib_u", TILEPRO_OPC_MINIB_U, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "minib_u.sn", TILEPRO_OPC_MINIB_U_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "minih", TILEPRO_OPC_MINIH, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "minih.sn", TILEPRO_OPC_MINIH_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "mm", TILEPRO_OPC_MM, 0x3, 5, TREG_ZERO, 1, { { 7, 8, 16, 26, 27 }, { 9, 10, 17, 28, 29 }, { 0, }, { 0, }, { 0, } }, }, { "mnz", TILEPRO_OPC_MNZ, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, }, { "mnz.sn", TILEPRO_OPC_MNZ_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "mnzb", TILEPRO_OPC_MNZB, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "mnzb.sn", TILEPRO_OPC_MNZB_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "mnzh", TILEPRO_OPC_MNZH, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "mnzh.sn", TILEPRO_OPC_MNZH_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "mtspr", TILEPRO_OPC_MTSPR, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 30, 10 }, { 0, }, { 0, }, { 0, } }, }, { "mulhh_ss", TILEPRO_OPC_MULHH_SS, 0x5, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 0, }, { 11, 12, 18 }, { 0, }, { 0, } }, }, { "mulhh_ss.sn", TILEPRO_OPC_MULHH_SS_SN, 0x1, 3, TREG_SN, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulhh_su", TILEPRO_OPC_MULHH_SU, 0x1, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulhh_su.sn", TILEPRO_OPC_MULHH_SU_SN, 0x1, 3, TREG_SN, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulhh_uu", TILEPRO_OPC_MULHH_UU, 0x5, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 0, }, { 11, 12, 18 }, { 0, }, { 0, } }, }, { "mulhh_uu.sn", TILEPRO_OPC_MULHH_UU_SN, 0x1, 3, TREG_SN, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulhha_ss", TILEPRO_OPC_MULHHA_SS, 0x5, 3, TREG_ZERO, 1, { { 21, 8, 16 }, { 0, }, { 31, 12, 18 }, { 0, }, { 0, } }, }, { "mulhha_ss.sn", TILEPRO_OPC_MULHHA_SS_SN, 0x1, 3, TREG_SN, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulhha_su", TILEPRO_OPC_MULHHA_SU, 0x1, 3, TREG_ZERO, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulhha_su.sn", TILEPRO_OPC_MULHHA_SU_SN, 0x1, 3, TREG_SN, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulhha_uu", TILEPRO_OPC_MULHHA_UU, 0x5, 3, TREG_ZERO, 1, { { 21, 8, 16 }, { 0, }, { 31, 12, 18 }, { 0, }, { 0, } }, }, { "mulhha_uu.sn", TILEPRO_OPC_MULHHA_UU_SN, 0x1, 3, TREG_SN, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulhhsa_uu", TILEPRO_OPC_MULHHSA_UU, 0x1, 3, TREG_ZERO, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulhhsa_uu.sn", TILEPRO_OPC_MULHHSA_UU_SN, 0x1, 3, TREG_SN, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulhl_ss", TILEPRO_OPC_MULHL_SS, 0x1, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulhl_ss.sn", TILEPRO_OPC_MULHL_SS_SN, 0x1, 3, TREG_SN, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulhl_su", TILEPRO_OPC_MULHL_SU, 0x1, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulhl_su.sn", TILEPRO_OPC_MULHL_SU_SN, 0x1, 3, TREG_SN, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulhl_us", TILEPRO_OPC_MULHL_US, 0x1, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulhl_us.sn", TILEPRO_OPC_MULHL_US_SN, 0x1, 3, TREG_SN, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulhl_uu", TILEPRO_OPC_MULHL_UU, 0x1, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulhl_uu.sn", TILEPRO_OPC_MULHL_UU_SN, 0x1, 3, TREG_SN, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulhla_ss", TILEPRO_OPC_MULHLA_SS, 0x1, 3, TREG_ZERO, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulhla_ss.sn", TILEPRO_OPC_MULHLA_SS_SN, 0x1, 3, TREG_SN, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulhla_su", TILEPRO_OPC_MULHLA_SU, 0x1, 3, TREG_ZERO, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulhla_su.sn", TILEPRO_OPC_MULHLA_SU_SN, 0x1, 3, TREG_SN, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulhla_us", TILEPRO_OPC_MULHLA_US, 0x1, 3, TREG_ZERO, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulhla_us.sn", TILEPRO_OPC_MULHLA_US_SN, 0x1, 3, TREG_SN, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulhla_uu", TILEPRO_OPC_MULHLA_UU, 0x1, 3, TREG_ZERO, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulhla_uu.sn", TILEPRO_OPC_MULHLA_UU_SN, 0x1, 3, TREG_SN, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulhlsa_uu", TILEPRO_OPC_MULHLSA_UU, 0x5, 3, TREG_ZERO, 1, { { 21, 8, 16 }, { 0, }, { 31, 12, 18 }, { 0, }, { 0, } }, }, { "mulhlsa_uu.sn", TILEPRO_OPC_MULHLSA_UU_SN, 0x1, 3, TREG_SN, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulll_ss", TILEPRO_OPC_MULLL_SS, 0x5, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 0, }, { 11, 12, 18 }, { 0, }, { 0, } }, }, { "mulll_ss.sn", TILEPRO_OPC_MULLL_SS_SN, 0x1, 3, TREG_SN, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulll_su", TILEPRO_OPC_MULLL_SU, 0x1, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulll_su.sn", TILEPRO_OPC_MULLL_SU_SN, 0x1, 3, TREG_SN, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulll_uu", TILEPRO_OPC_MULLL_UU, 0x5, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 0, }, { 11, 12, 18 }, { 0, }, { 0, } }, }, { "mulll_uu.sn", TILEPRO_OPC_MULLL_UU_SN, 0x1, 3, TREG_SN, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mullla_ss", TILEPRO_OPC_MULLLA_SS, 0x5, 3, TREG_ZERO, 1, { { 21, 8, 16 }, { 0, }, { 31, 12, 18 }, { 0, }, { 0, } }, }, { "mullla_ss.sn", TILEPRO_OPC_MULLLA_SS_SN, 0x1, 3, TREG_SN, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mullla_su", TILEPRO_OPC_MULLLA_SU, 0x1, 3, TREG_ZERO, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mullla_su.sn", TILEPRO_OPC_MULLLA_SU_SN, 0x1, 3, TREG_SN, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mullla_uu", TILEPRO_OPC_MULLLA_UU, 0x5, 3, TREG_ZERO, 1, { { 21, 8, 16 }, { 0, }, { 31, 12, 18 }, { 0, }, { 0, } }, }, { "mullla_uu.sn", TILEPRO_OPC_MULLLA_UU_SN, 0x1, 3, TREG_SN, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulllsa_uu", TILEPRO_OPC_MULLLSA_UU, 0x1, 3, TREG_ZERO, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulllsa_uu.sn", TILEPRO_OPC_MULLLSA_UU_SN, 0x1, 3, TREG_SN, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mvnz", TILEPRO_OPC_MVNZ, 0x5, 3, TREG_ZERO, 1, { { 21, 8, 16 }, { 0, }, { 31, 12, 18 }, { 0, }, { 0, } }, }, { "mvnz.sn", TILEPRO_OPC_MVNZ_SN, 0x1, 3, TREG_SN, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mvz", TILEPRO_OPC_MVZ, 0x5, 3, TREG_ZERO, 1, { { 21, 8, 16 }, { 0, }, { 31, 12, 18 }, { 0, }, { 0, } }, }, { "mvz.sn", TILEPRO_OPC_MVZ_SN, 0x1, 3, TREG_SN, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mz", TILEPRO_OPC_MZ, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, }, { "mz.sn", TILEPRO_OPC_MZ_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "mzb", TILEPRO_OPC_MZB, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "mzb.sn", TILEPRO_OPC_MZB_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "mzh", TILEPRO_OPC_MZH, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "mzh.sn", TILEPRO_OPC_MZH_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "nap", TILEPRO_OPC_NAP, 0x2, 0, TREG_ZERO, 0, { { 0, }, { }, { 0, }, { 0, }, { 0, } }, }, { "nop", TILEPRO_OPC_NOP, 0xf, 0, TREG_ZERO, 1, { { }, { }, { }, { }, { 0, } }, }, { "nor", TILEPRO_OPC_NOR, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, }, { "nor.sn", TILEPRO_OPC_NOR_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "or", TILEPRO_OPC_OR, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, }, { "or.sn", TILEPRO_OPC_OR_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "ori", TILEPRO_OPC_ORI, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 11, 12, 2 }, { 13, 14, 3 }, { 0, } }, }, { "ori.sn", TILEPRO_OPC_ORI_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "packbs_u", TILEPRO_OPC_PACKBS_U, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "packbs_u.sn", TILEPRO_OPC_PACKBS_U_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "packhb", TILEPRO_OPC_PACKHB, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "packhb.sn", TILEPRO_OPC_PACKHB_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "packhs", TILEPRO_OPC_PACKHS, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "packhs.sn", TILEPRO_OPC_PACKHS_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "packlb", TILEPRO_OPC_PACKLB, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "packlb.sn", TILEPRO_OPC_PACKLB_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "pcnt", TILEPRO_OPC_PCNT, 0x5, 2, TREG_ZERO, 1, { { 7, 8 }, { 0, }, { 11, 12 }, { 0, }, { 0, } }, }, { "pcnt.sn", TILEPRO_OPC_PCNT_SN, 0x1, 2, TREG_SN, 1, { { 7, 8 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "rl", TILEPRO_OPC_RL, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, }, { "rl.sn", TILEPRO_OPC_RL_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "rli", TILEPRO_OPC_RLI, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 32 }, { 9, 10, 33 }, { 11, 12, 34 }, { 13, 14, 35 }, { 0, } }, }, { "rli.sn", TILEPRO_OPC_RLI_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, }, { "s1a", TILEPRO_OPC_S1A, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, }, { "s1a.sn", TILEPRO_OPC_S1A_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "s2a", TILEPRO_OPC_S2A, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, }, { "s2a.sn", TILEPRO_OPC_S2A_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "s3a", TILEPRO_OPC_S3A, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, }, { "s3a.sn", TILEPRO_OPC_S3A_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "sadab_u", TILEPRO_OPC_SADAB_U, 0x1, 3, TREG_ZERO, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "sadab_u.sn", TILEPRO_OPC_SADAB_U_SN, 0x1, 3, TREG_SN, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "sadah", TILEPRO_OPC_SADAH, 0x1, 3, TREG_ZERO, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "sadah.sn", TILEPRO_OPC_SADAH_SN, 0x1, 3, TREG_SN, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "sadah_u", TILEPRO_OPC_SADAH_U, 0x1, 3, TREG_ZERO, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "sadah_u.sn", TILEPRO_OPC_SADAH_U_SN, 0x1, 3, TREG_SN, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "sadb_u", TILEPRO_OPC_SADB_U, 0x1, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "sadb_u.sn", TILEPRO_OPC_SADB_U_SN, 0x1, 3, TREG_SN, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "sadh", TILEPRO_OPC_SADH, 0x1, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "sadh.sn", TILEPRO_OPC_SADH_SN, 0x1, 3, TREG_SN, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "sadh_u", TILEPRO_OPC_SADH_U, 0x1, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "sadh_u.sn", TILEPRO_OPC_SADH_U_SN, 0x1, 3, TREG_SN, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "sb", TILEPRO_OPC_SB, 0x12, 2, TREG_ZERO, 1, { { 0, }, { 10, 17 }, { 0, }, { 0, }, { 15, 36 } }, }, { "sbadd", TILEPRO_OPC_SBADD, 0x2, 3, TREG_ZERO, 1, { { 0, }, { 24, 17, 37 }, { 0, }, { 0, }, { 0, } }, }, { "seq", TILEPRO_OPC_SEQ, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, }, { "seq.sn", TILEPRO_OPC_SEQ_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "seqb", TILEPRO_OPC_SEQB, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "seqb.sn", TILEPRO_OPC_SEQB_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "seqh", TILEPRO_OPC_SEQH, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "seqh.sn", TILEPRO_OPC_SEQH_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "seqi", TILEPRO_OPC_SEQI, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 11, 12, 2 }, { 13, 14, 3 }, { 0, } }, }, { "seqi.sn", TILEPRO_OPC_SEQI_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "seqib", TILEPRO_OPC_SEQIB, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "seqib.sn", TILEPRO_OPC_SEQIB_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "seqih", TILEPRO_OPC_SEQIH, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "seqih.sn", TILEPRO_OPC_SEQIH_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "sh", TILEPRO_OPC_SH, 0x12, 2, TREG_ZERO, 1, { { 0, }, { 10, 17 }, { 0, }, { 0, }, { 15, 36 } }, }, { "shadd", TILEPRO_OPC_SHADD, 0x2, 3, TREG_ZERO, 1, { { 0, }, { 24, 17, 37 }, { 0, }, { 0, }, { 0, } }, }, { "shl", TILEPRO_OPC_SHL, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, }, { "shl.sn", TILEPRO_OPC_SHL_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "shlb", TILEPRO_OPC_SHLB, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "shlb.sn", TILEPRO_OPC_SHLB_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "shlh", TILEPRO_OPC_SHLH, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "shlh.sn", TILEPRO_OPC_SHLH_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "shli", TILEPRO_OPC_SHLI, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 32 }, { 9, 10, 33 }, { 11, 12, 34 }, { 13, 14, 35 }, { 0, } }, }, { "shli.sn", TILEPRO_OPC_SHLI_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, }, { "shlib", TILEPRO_OPC_SHLIB, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, }, { "shlib.sn", TILEPRO_OPC_SHLIB_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, }, { "shlih", TILEPRO_OPC_SHLIH, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, }, { "shlih.sn", TILEPRO_OPC_SHLIH_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, }, { "shr", TILEPRO_OPC_SHR, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, }, { "shr.sn", TILEPRO_OPC_SHR_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "shrb", TILEPRO_OPC_SHRB, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "shrb.sn", TILEPRO_OPC_SHRB_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "shrh", TILEPRO_OPC_SHRH, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "shrh.sn", TILEPRO_OPC_SHRH_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "shri", TILEPRO_OPC_SHRI, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 32 }, { 9, 10, 33 }, { 11, 12, 34 }, { 13, 14, 35 }, { 0, } }, }, { "shri.sn", TILEPRO_OPC_SHRI_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, }, { "shrib", TILEPRO_OPC_SHRIB, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, }, { "shrib.sn", TILEPRO_OPC_SHRIB_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, }, { "shrih", TILEPRO_OPC_SHRIH, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, }, { "shrih.sn", TILEPRO_OPC_SHRIH_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, }, { "slt", TILEPRO_OPC_SLT, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, }, { "slt.sn", TILEPRO_OPC_SLT_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "slt_u", TILEPRO_OPC_SLT_U, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, }, { "slt_u.sn", TILEPRO_OPC_SLT_U_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "sltb", TILEPRO_OPC_SLTB, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "sltb.sn", TILEPRO_OPC_SLTB_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "sltb_u", TILEPRO_OPC_SLTB_U, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "sltb_u.sn", TILEPRO_OPC_SLTB_U_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "slte", TILEPRO_OPC_SLTE, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, }, { "slte.sn", TILEPRO_OPC_SLTE_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "slte_u", TILEPRO_OPC_SLTE_U, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, }, { "slte_u.sn", TILEPRO_OPC_SLTE_U_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "slteb", TILEPRO_OPC_SLTEB, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "slteb.sn", TILEPRO_OPC_SLTEB_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "slteb_u", TILEPRO_OPC_SLTEB_U, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "slteb_u.sn", TILEPRO_OPC_SLTEB_U_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "slteh", TILEPRO_OPC_SLTEH, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "slteh.sn", TILEPRO_OPC_SLTEH_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "slteh_u", TILEPRO_OPC_SLTEH_U, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "slteh_u.sn", TILEPRO_OPC_SLTEH_U_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "slth", TILEPRO_OPC_SLTH, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "slth.sn", TILEPRO_OPC_SLTH_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "slth_u", TILEPRO_OPC_SLTH_U, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "slth_u.sn", TILEPRO_OPC_SLTH_U_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "slti", TILEPRO_OPC_SLTI, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 11, 12, 2 }, { 13, 14, 3 }, { 0, } }, }, { "slti.sn", TILEPRO_OPC_SLTI_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "slti_u", TILEPRO_OPC_SLTI_U, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 11, 12, 2 }, { 13, 14, 3 }, { 0, } }, }, { "slti_u.sn", TILEPRO_OPC_SLTI_U_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "sltib", TILEPRO_OPC_SLTIB, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "sltib.sn", TILEPRO_OPC_SLTIB_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "sltib_u", TILEPRO_OPC_SLTIB_U, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "sltib_u.sn", TILEPRO_OPC_SLTIB_U_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "sltih", TILEPRO_OPC_SLTIH, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "sltih.sn", TILEPRO_OPC_SLTIH_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "sltih_u", TILEPRO_OPC_SLTIH_U, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "sltih_u.sn", TILEPRO_OPC_SLTIH_U_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "sne", TILEPRO_OPC_SNE, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, }, { "sne.sn", TILEPRO_OPC_SNE_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "sneb", TILEPRO_OPC_SNEB, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "sneb.sn", TILEPRO_OPC_SNEB_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "sneh", TILEPRO_OPC_SNEH, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "sneh.sn", TILEPRO_OPC_SNEH_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "sra", TILEPRO_OPC_SRA, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, }, { "sra.sn", TILEPRO_OPC_SRA_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "srab", TILEPRO_OPC_SRAB, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "srab.sn", TILEPRO_OPC_SRAB_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "srah", TILEPRO_OPC_SRAH, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "srah.sn", TILEPRO_OPC_SRAH_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "srai", TILEPRO_OPC_SRAI, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 32 }, { 9, 10, 33 }, { 11, 12, 34 }, { 13, 14, 35 }, { 0, } }, }, { "srai.sn", TILEPRO_OPC_SRAI_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, }, { "sraib", TILEPRO_OPC_SRAIB, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, }, { "sraib.sn", TILEPRO_OPC_SRAIB_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, }, { "sraih", TILEPRO_OPC_SRAIH, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, }, { "sraih.sn", TILEPRO_OPC_SRAIH_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, }, { "sub", TILEPRO_OPC_SUB, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, }, { "sub.sn", TILEPRO_OPC_SUB_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "subb", TILEPRO_OPC_SUBB, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "subb.sn", TILEPRO_OPC_SUBB_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "subbs_u", TILEPRO_OPC_SUBBS_U, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "subbs_u.sn", TILEPRO_OPC_SUBBS_U_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "subh", TILEPRO_OPC_SUBH, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "subh.sn", TILEPRO_OPC_SUBH_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "subhs", TILEPRO_OPC_SUBHS, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "subhs.sn", TILEPRO_OPC_SUBHS_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "subs", TILEPRO_OPC_SUBS, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "subs.sn", TILEPRO_OPC_SUBS_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "sw", TILEPRO_OPC_SW, 0x12, 2, TREG_ZERO, 1, { { 0, }, { 10, 17 }, { 0, }, { 0, }, { 15, 36 } }, }, { "swadd", TILEPRO_OPC_SWADD, 0x2, 3, TREG_ZERO, 1, { { 0, }, { 24, 17, 37 }, { 0, }, { 0, }, { 0, } }, }, { "swint0", TILEPRO_OPC_SWINT0, 0x2, 0, TREG_ZERO, 0, { { 0, }, { }, { 0, }, { 0, }, { 0, } }, }, { "swint1", TILEPRO_OPC_SWINT1, 0x2, 0, TREG_ZERO, 0, { { 0, }, { }, { 0, }, { 0, }, { 0, } }, }, { "swint2", TILEPRO_OPC_SWINT2, 0x2, 0, TREG_ZERO, 0, { { 0, }, { }, { 0, }, { 0, }, { 0, } }, }, { "swint3", TILEPRO_OPC_SWINT3, 0x2, 0, TREG_ZERO, 0, { { 0, }, { }, { 0, }, { 0, }, { 0, } }, }, { "tblidxb0", TILEPRO_OPC_TBLIDXB0, 0x5, 2, TREG_ZERO, 1, { { 21, 8 }, { 0, }, { 31, 12 }, { 0, }, { 0, } }, }, { "tblidxb0.sn", TILEPRO_OPC_TBLIDXB0_SN, 0x1, 2, TREG_SN, 1, { { 21, 8 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "tblidxb1", TILEPRO_OPC_TBLIDXB1, 0x5, 2, TREG_ZERO, 1, { { 21, 8 }, { 0, }, { 31, 12 }, { 0, }, { 0, } }, }, { "tblidxb1.sn", TILEPRO_OPC_TBLIDXB1_SN, 0x1, 2, TREG_SN, 1, { { 21, 8 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "tblidxb2", TILEPRO_OPC_TBLIDXB2, 0x5, 2, TREG_ZERO, 1, { { 21, 8 }, { 0, }, { 31, 12 }, { 0, }, { 0, } }, }, { "tblidxb2.sn", TILEPRO_OPC_TBLIDXB2_SN, 0x1, 2, TREG_SN, 1, { { 21, 8 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "tblidxb3", TILEPRO_OPC_TBLIDXB3, 0x5, 2, TREG_ZERO, 1, { { 21, 8 }, { 0, }, { 31, 12 }, { 0, }, { 0, } }, }, { "tblidxb3.sn", TILEPRO_OPC_TBLIDXB3_SN, 0x1, 2, TREG_SN, 1, { { 21, 8 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "tns", TILEPRO_OPC_TNS, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 0, } }, }, { "tns.sn", TILEPRO_OPC_TNS_SN, 0x2, 2, TREG_SN, 1, { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 0, } }, }, { "wh64", TILEPRO_OPC_WH64, 0x2, 1, TREG_ZERO, 1, { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } }, }, { "xor", TILEPRO_OPC_XOR, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, }, { "xor.sn", TILEPRO_OPC_XOR_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "xori", TILEPRO_OPC_XORI, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "xori.sn", TILEPRO_OPC_XORI_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { NULL, TILEPRO_OPC_NONE, 0, 0, TREG_ZERO, 0, { { 0, } }, } }; #define BITFIELD(start, size) ((start) | (((1 << (size)) - 1) << 6)) #define CHILD(array_index) (TILEPRO_OPC_NONE + (array_index)) static const unsigned short decode_X0_fsm[1153] = { BITFIELD(22, 9) /* index 0 */, CHILD(513), CHILD(530), CHILD(547), CHILD(564), CHILD(596), CHILD(613), CHILD(630), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, CHILD(663), CHILD(680), CHILD(697), CHILD(714), CHILD(746), CHILD(763), CHILD(780), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(873), CHILD(878), CHILD(883), CHILD(903), CHILD(908), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, CHILD(913), CHILD(918), CHILD(923), CHILD(943), CHILD(948), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, CHILD(953), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, CHILD(988), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, CHILD(993), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, CHILD(1076), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, BITFIELD(18, 4) /* index 513 */, TILEPRO_OPC_NONE, TILEPRO_OPC_ADDB, TILEPRO_OPC_ADDH, TILEPRO_OPC_ADD, TILEPRO_OPC_ADIFFB_U, TILEPRO_OPC_ADIFFH, TILEPRO_OPC_AND, TILEPRO_OPC_AVGB_U, TILEPRO_OPC_AVGH, TILEPRO_OPC_CRC32_32, TILEPRO_OPC_CRC32_8, TILEPRO_OPC_INTHB, TILEPRO_OPC_INTHH, TILEPRO_OPC_INTLB, TILEPRO_OPC_INTLH, TILEPRO_OPC_MAXB_U, BITFIELD(18, 4) /* index 530 */, TILEPRO_OPC_MAXH, TILEPRO_OPC_MINB_U, TILEPRO_OPC_MINH, TILEPRO_OPC_MNZB, TILEPRO_OPC_MNZH, TILEPRO_OPC_MNZ, TILEPRO_OPC_MULHHA_SS, TILEPRO_OPC_MULHHA_SU, TILEPRO_OPC_MULHHA_UU, TILEPRO_OPC_MULHHSA_UU, TILEPRO_OPC_MULHH_SS, TILEPRO_OPC_MULHH_SU, TILEPRO_OPC_MULHH_UU, TILEPRO_OPC_MULHLA_SS, TILEPRO_OPC_MULHLA_SU, TILEPRO_OPC_MULHLA_US, BITFIELD(18, 4) /* index 547 */, TILEPRO_OPC_MULHLA_UU, TILEPRO_OPC_MULHLSA_UU, TILEPRO_OPC_MULHL_SS, TILEPRO_OPC_MULHL_SU, TILEPRO_OPC_MULHL_US, TILEPRO_OPC_MULHL_UU, TILEPRO_OPC_MULLLA_SS, TILEPRO_OPC_MULLLA_SU, TILEPRO_OPC_MULLLA_UU, TILEPRO_OPC_MULLLSA_UU, TILEPRO_OPC_MULLL_SS, TILEPRO_OPC_MULLL_SU, TILEPRO_OPC_MULLL_UU, TILEPRO_OPC_MVNZ, TILEPRO_OPC_MVZ, TILEPRO_OPC_MZB, BITFIELD(18, 4) /* index 564 */, TILEPRO_OPC_MZH, TILEPRO_OPC_MZ, TILEPRO_OPC_NOR, CHILD(581), TILEPRO_OPC_PACKHB, TILEPRO_OPC_PACKLB, TILEPRO_OPC_RL, TILEPRO_OPC_S1A, TILEPRO_OPC_S2A, TILEPRO_OPC_S3A, TILEPRO_OPC_SADAB_U, TILEPRO_OPC_SADAH, TILEPRO_OPC_SADAH_U, TILEPRO_OPC_SADB_U, TILEPRO_OPC_SADH, TILEPRO_OPC_SADH_U, BITFIELD(12, 2) /* index 581 */, TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_OR, CHILD(586), BITFIELD(14, 2) /* index 586 */, TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_OR, CHILD(591), BITFIELD(16, 2) /* index 591 */, TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_MOVE, BITFIELD(18, 4) /* index 596 */, TILEPRO_OPC_SEQB, TILEPRO_OPC_SEQH, TILEPRO_OPC_SEQ, TILEPRO_OPC_SHLB, TILEPRO_OPC_SHLH, TILEPRO_OPC_SHL, TILEPRO_OPC_SHRB, TILEPRO_OPC_SHRH, TILEPRO_OPC_SHR, TILEPRO_OPC_SLTB, TILEPRO_OPC_SLTB_U, TILEPRO_OPC_SLTEB, TILEPRO_OPC_SLTEB_U, TILEPRO_OPC_SLTEH, TILEPRO_OPC_SLTEH_U, TILEPRO_OPC_SLTE, BITFIELD(18, 4) /* index 613 */, TILEPRO_OPC_SLTE_U, TILEPRO_OPC_SLTH, TILEPRO_OPC_SLTH_U, TILEPRO_OPC_SLT, TILEPRO_OPC_SLT_U, TILEPRO_OPC_SNEB, TILEPRO_OPC_SNEH, TILEPRO_OPC_SNE, TILEPRO_OPC_SRAB, TILEPRO_OPC_SRAH, TILEPRO_OPC_SRA, TILEPRO_OPC_SUBB, TILEPRO_OPC_SUBH, TILEPRO_OPC_SUB, TILEPRO_OPC_XOR, TILEPRO_OPC_DWORD_ALIGN, BITFIELD(18, 3) /* index 630 */, CHILD(639), CHILD(642), CHILD(645), CHILD(648), CHILD(651), CHILD(654), CHILD(657), CHILD(660), BITFIELD(21, 1) /* index 639 */, TILEPRO_OPC_ADDS, TILEPRO_OPC_NONE, BITFIELD(21, 1) /* index 642 */, TILEPRO_OPC_SUBS, TILEPRO_OPC_NONE, BITFIELD(21, 1) /* index 645 */, TILEPRO_OPC_ADDBS_U, TILEPRO_OPC_NONE, BITFIELD(21, 1) /* index 648 */, TILEPRO_OPC_ADDHS, TILEPRO_OPC_NONE, BITFIELD(21, 1) /* index 651 */, TILEPRO_OPC_SUBBS_U, TILEPRO_OPC_NONE, BITFIELD(21, 1) /* index 654 */, TILEPRO_OPC_SUBHS, TILEPRO_OPC_NONE, BITFIELD(21, 1) /* index 657 */, TILEPRO_OPC_PACKHS, TILEPRO_OPC_NONE, BITFIELD(21, 1) /* index 660 */, TILEPRO_OPC_PACKBS_U, TILEPRO_OPC_NONE, BITFIELD(18, 4) /* index 663 */, TILEPRO_OPC_NONE, TILEPRO_OPC_ADDB_SN, TILEPRO_OPC_ADDH_SN, TILEPRO_OPC_ADD_SN, TILEPRO_OPC_ADIFFB_U_SN, TILEPRO_OPC_ADIFFH_SN, TILEPRO_OPC_AND_SN, TILEPRO_OPC_AVGB_U_SN, TILEPRO_OPC_AVGH_SN, TILEPRO_OPC_CRC32_32_SN, TILEPRO_OPC_CRC32_8_SN, TILEPRO_OPC_INTHB_SN, TILEPRO_OPC_INTHH_SN, TILEPRO_OPC_INTLB_SN, TILEPRO_OPC_INTLH_SN, TILEPRO_OPC_MAXB_U_SN, BITFIELD(18, 4) /* index 680 */, TILEPRO_OPC_MAXH_SN, TILEPRO_OPC_MINB_U_SN, TILEPRO_OPC_MINH_SN, TILEPRO_OPC_MNZB_SN, TILEPRO_OPC_MNZH_SN, TILEPRO_OPC_MNZ_SN, TILEPRO_OPC_MULHHA_SS_SN, TILEPRO_OPC_MULHHA_SU_SN, TILEPRO_OPC_MULHHA_UU_SN, TILEPRO_OPC_MULHHSA_UU_SN, TILEPRO_OPC_MULHH_SS_SN, TILEPRO_OPC_MULHH_SU_SN, TILEPRO_OPC_MULHH_UU_SN, TILEPRO_OPC_MULHLA_SS_SN, TILEPRO_OPC_MULHLA_SU_SN, TILEPRO_OPC_MULHLA_US_SN, BITFIELD(18, 4) /* index 697 */, TILEPRO_OPC_MULHLA_UU_SN, TILEPRO_OPC_MULHLSA_UU_SN, TILEPRO_OPC_MULHL_SS_SN, TILEPRO_OPC_MULHL_SU_SN, TILEPRO_OPC_MULHL_US_SN, TILEPRO_OPC_MULHL_UU_SN, TILEPRO_OPC_MULLLA_SS_SN, TILEPRO_OPC_MULLLA_SU_SN, TILEPRO_OPC_MULLLA_UU_SN, TILEPRO_OPC_MULLLSA_UU_SN, TILEPRO_OPC_MULLL_SS_SN, TILEPRO_OPC_MULLL_SU_SN, TILEPRO_OPC_MULLL_UU_SN, TILEPRO_OPC_MVNZ_SN, TILEPRO_OPC_MVZ_SN, TILEPRO_OPC_MZB_SN, BITFIELD(18, 4) /* index 714 */, TILEPRO_OPC_MZH_SN, TILEPRO_OPC_MZ_SN, TILEPRO_OPC_NOR_SN, CHILD(731), TILEPRO_OPC_PACKHB_SN, TILEPRO_OPC_PACKLB_SN, TILEPRO_OPC_RL_SN, TILEPRO_OPC_S1A_SN, TILEPRO_OPC_S2A_SN, TILEPRO_OPC_S3A_SN, TILEPRO_OPC_SADAB_U_SN, TILEPRO_OPC_SADAH_SN, TILEPRO_OPC_SADAH_U_SN, TILEPRO_OPC_SADB_U_SN, TILEPRO_OPC_SADH_SN, TILEPRO_OPC_SADH_U_SN, BITFIELD(12, 2) /* index 731 */, TILEPRO_OPC_OR_SN, TILEPRO_OPC_OR_SN, TILEPRO_OPC_OR_SN, CHILD(736), BITFIELD(14, 2) /* index 736 */, TILEPRO_OPC_OR_SN, TILEPRO_OPC_OR_SN, TILEPRO_OPC_OR_SN, CHILD(741), BITFIELD(16, 2) /* index 741 */, TILEPRO_OPC_OR_SN, TILEPRO_OPC_OR_SN, TILEPRO_OPC_OR_SN, TILEPRO_OPC_MOVE_SN, BITFIELD(18, 4) /* index 746 */, TILEPRO_OPC_SEQB_SN, TILEPRO_OPC_SEQH_SN, TILEPRO_OPC_SEQ_SN, TILEPRO_OPC_SHLB_SN, TILEPRO_OPC_SHLH_SN, TILEPRO_OPC_SHL_SN, TILEPRO_OPC_SHRB_SN, TILEPRO_OPC_SHRH_SN, TILEPRO_OPC_SHR_SN, TILEPRO_OPC_SLTB_SN, TILEPRO_OPC_SLTB_U_SN, TILEPRO_OPC_SLTEB_SN, TILEPRO_OPC_SLTEB_U_SN, TILEPRO_OPC_SLTEH_SN, TILEPRO_OPC_SLTEH_U_SN, TILEPRO_OPC_SLTE_SN, BITFIELD(18, 4) /* index 763 */, TILEPRO_OPC_SLTE_U_SN, TILEPRO_OPC_SLTH_SN, TILEPRO_OPC_SLTH_U_SN, TILEPRO_OPC_SLT_SN, TILEPRO_OPC_SLT_U_SN, TILEPRO_OPC_SNEB_SN, TILEPRO_OPC_SNEH_SN, TILEPRO_OPC_SNE_SN, TILEPRO_OPC_SRAB_SN, TILEPRO_OPC_SRAH_SN, TILEPRO_OPC_SRA_SN, TILEPRO_OPC_SUBB_SN, TILEPRO_OPC_SUBH_SN, TILEPRO_OPC_SUB_SN, TILEPRO_OPC_XOR_SN, TILEPRO_OPC_DWORD_ALIGN_SN, BITFIELD(18, 3) /* index 780 */, CHILD(789), CHILD(792), CHILD(795), CHILD(798), CHILD(801), CHILD(804), CHILD(807), CHILD(810), BITFIELD(21, 1) /* index 789 */, TILEPRO_OPC_ADDS_SN, TILEPRO_OPC_NONE, BITFIELD(21, 1) /* index 792 */, TILEPRO_OPC_SUBS_SN, TILEPRO_OPC_NONE, BITFIELD(21, 1) /* index 795 */, TILEPRO_OPC_ADDBS_U_SN, TILEPRO_OPC_NONE, BITFIELD(21, 1) /* index 798 */, TILEPRO_OPC_ADDHS_SN, TILEPRO_OPC_NONE, BITFIELD(21, 1) /* index 801 */, TILEPRO_OPC_SUBBS_U_SN, TILEPRO_OPC_NONE, BITFIELD(21, 1) /* index 804 */, TILEPRO_OPC_SUBHS_SN, TILEPRO_OPC_NONE, BITFIELD(21, 1) /* index 807 */, TILEPRO_OPC_PACKHS_SN, TILEPRO_OPC_NONE, BITFIELD(21, 1) /* index 810 */, TILEPRO_OPC_PACKBS_U_SN, TILEPRO_OPC_NONE, BITFIELD(6, 2) /* index 813 */, TILEPRO_OPC_ADDLI_SN, TILEPRO_OPC_ADDLI_SN, TILEPRO_OPC_ADDLI_SN, CHILD(818), BITFIELD(8, 2) /* index 818 */, TILEPRO_OPC_ADDLI_SN, TILEPRO_OPC_ADDLI_SN, TILEPRO_OPC_ADDLI_SN, CHILD(823), BITFIELD(10, 2) /* index 823 */, TILEPRO_OPC_ADDLI_SN, TILEPRO_OPC_ADDLI_SN, TILEPRO_OPC_ADDLI_SN, TILEPRO_OPC_MOVELI_SN, BITFIELD(6, 2) /* index 828 */, TILEPRO_OPC_ADDLI, TILEPRO_OPC_ADDLI, TILEPRO_OPC_ADDLI, CHILD(833), BITFIELD(8, 2) /* index 833 */, TILEPRO_OPC_ADDLI, TILEPRO_OPC_ADDLI, TILEPRO_OPC_ADDLI, CHILD(838), BITFIELD(10, 2) /* index 838 */, TILEPRO_OPC_ADDLI, TILEPRO_OPC_ADDLI, TILEPRO_OPC_ADDLI, TILEPRO_OPC_MOVELI, BITFIELD(0, 2) /* index 843 */, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, CHILD(848), BITFIELD(2, 2) /* index 848 */, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, CHILD(853), BITFIELD(4, 2) /* index 853 */, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, CHILD(858), BITFIELD(6, 2) /* index 858 */, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, CHILD(863), BITFIELD(8, 2) /* index 863 */, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, CHILD(868), BITFIELD(10, 2) /* index 868 */, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, TILEPRO_OPC_INFOL, BITFIELD(20, 2) /* index 873 */, TILEPRO_OPC_NONE, TILEPRO_OPC_ADDIB, TILEPRO_OPC_ADDIH, TILEPRO_OPC_ADDI, BITFIELD(20, 2) /* index 878 */, TILEPRO_OPC_MAXIB_U, TILEPRO_OPC_MAXIH, TILEPRO_OPC_MINIB_U, TILEPRO_OPC_MINIH, BITFIELD(20, 2) /* index 883 */, CHILD(888), TILEPRO_OPC_SEQIB, TILEPRO_OPC_SEQIH, TILEPRO_OPC_SEQI, BITFIELD(6, 2) /* index 888 */, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, CHILD(893), BITFIELD(8, 2) /* index 893 */, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, CHILD(898), BITFIELD(10, 2) /* index 898 */, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_MOVEI, BITFIELD(20, 2) /* index 903 */, TILEPRO_OPC_SLTIB, TILEPRO_OPC_SLTIB_U, TILEPRO_OPC_SLTIH, TILEPRO_OPC_SLTIH_U, BITFIELD(20, 2) /* index 908 */, TILEPRO_OPC_SLTI, TILEPRO_OPC_SLTI_U, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, BITFIELD(20, 2) /* index 913 */, TILEPRO_OPC_NONE, TILEPRO_OPC_ADDIB_SN, TILEPRO_OPC_ADDIH_SN, TILEPRO_OPC_ADDI_SN, BITFIELD(20, 2) /* index 918 */, TILEPRO_OPC_MAXIB_U_SN, TILEPRO_OPC_MAXIH_SN, TILEPRO_OPC_MINIB_U_SN, TILEPRO_OPC_MINIH_SN, BITFIELD(20, 2) /* index 923 */, CHILD(928), TILEPRO_OPC_SEQIB_SN, TILEPRO_OPC_SEQIH_SN, TILEPRO_OPC_SEQI_SN, BITFIELD(6, 2) /* index 928 */, TILEPRO_OPC_ORI_SN, TILEPRO_OPC_ORI_SN, TILEPRO_OPC_ORI_SN, CHILD(933), BITFIELD(8, 2) /* index 933 */, TILEPRO_OPC_ORI_SN, TILEPRO_OPC_ORI_SN, TILEPRO_OPC_ORI_SN, CHILD(938), BITFIELD(10, 2) /* index 938 */, TILEPRO_OPC_ORI_SN, TILEPRO_OPC_ORI_SN, TILEPRO_OPC_ORI_SN, TILEPRO_OPC_MOVEI_SN, BITFIELD(20, 2) /* index 943 */, TILEPRO_OPC_SLTIB_SN, TILEPRO_OPC_SLTIB_U_SN, TILEPRO_OPC_SLTIH_SN, TILEPRO_OPC_SLTIH_U_SN, BITFIELD(20, 2) /* index 948 */, TILEPRO_OPC_SLTI_SN, TILEPRO_OPC_SLTI_U_SN, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, BITFIELD(20, 2) /* index 953 */, TILEPRO_OPC_NONE, CHILD(958), TILEPRO_OPC_XORI, TILEPRO_OPC_NONE, BITFIELD(0, 2) /* index 958 */, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(963), BITFIELD(2, 2) /* index 963 */, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(968), BITFIELD(4, 2) /* index 968 */, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(973), BITFIELD(6, 2) /* index 973 */, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(978), BITFIELD(8, 2) /* index 978 */, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(983), BITFIELD(10, 2) /* index 983 */, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_INFO, BITFIELD(20, 2) /* index 988 */, TILEPRO_OPC_NONE, TILEPRO_OPC_ANDI_SN, TILEPRO_OPC_XORI_SN, TILEPRO_OPC_NONE, BITFIELD(17, 5) /* index 993 */, TILEPRO_OPC_NONE, TILEPRO_OPC_RLI, TILEPRO_OPC_SHLIB, TILEPRO_OPC_SHLIH, TILEPRO_OPC_SHLI, TILEPRO_OPC_SHRIB, TILEPRO_OPC_SHRIH, TILEPRO_OPC_SHRI, TILEPRO_OPC_SRAIB, TILEPRO_OPC_SRAIH, TILEPRO_OPC_SRAI, CHILD(1026), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, BITFIELD(12, 4) /* index 1026 */, TILEPRO_OPC_NONE, CHILD(1043), CHILD(1046), CHILD(1049), CHILD(1052), CHILD(1055), CHILD(1058), CHILD(1061), CHILD(1064), CHILD(1067), CHILD(1070), CHILD(1073), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, BITFIELD(16, 1) /* index 1043 */, TILEPRO_OPC_BITX, TILEPRO_OPC_NONE, BITFIELD(16, 1) /* index 1046 */, TILEPRO_OPC_BYTEX, TILEPRO_OPC_NONE, BITFIELD(16, 1) /* index 1049 */, TILEPRO_OPC_CLZ, TILEPRO_OPC_NONE, BITFIELD(16, 1) /* index 1052 */, TILEPRO_OPC_CTZ, TILEPRO_OPC_NONE, BITFIELD(16, 1) /* index 1055 */, TILEPRO_OPC_FNOP, TILEPRO_OPC_NONE, BITFIELD(16, 1) /* index 1058 */, TILEPRO_OPC_NOP, TILEPRO_OPC_NONE, BITFIELD(16, 1) /* index 1061 */, TILEPRO_OPC_PCNT, TILEPRO_OPC_NONE, BITFIELD(16, 1) /* index 1064 */, TILEPRO_OPC_TBLIDXB0, TILEPRO_OPC_NONE, BITFIELD(16, 1) /* index 1067 */, TILEPRO_OPC_TBLIDXB1, TILEPRO_OPC_NONE, BITFIELD(16, 1) /* index 1070 */, TILEPRO_OPC_TBLIDXB2, TILEPRO_OPC_NONE, BITFIELD(16, 1) /* index 1073 */, TILEPRO_OPC_TBLIDXB3, TILEPRO_OPC_NONE, BITFIELD(17, 5) /* index 1076 */, TILEPRO_OPC_NONE, TILEPRO_OPC_RLI_SN, TILEPRO_OPC_SHLIB_SN, TILEPRO_OPC_SHLIH_SN, TILEPRO_OPC_SHLI_SN, TILEPRO_OPC_SHRIB_SN, TILEPRO_OPC_SHRIH_SN, TILEPRO_OPC_SHRI_SN, TILEPRO_OPC_SRAIB_SN, TILEPRO_OPC_SRAIH_SN, TILEPRO_OPC_SRAI_SN, CHILD(1109), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, BITFIELD(12, 4) /* index 1109 */, TILEPRO_OPC_NONE, CHILD(1126), CHILD(1129), CHILD(1132), CHILD(1135), CHILD(1055), CHILD(1058), CHILD(1138), CHILD(1141), CHILD(1144), CHILD(1147), CHILD(1150), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, BITFIELD(16, 1) /* index 1126 */, TILEPRO_OPC_BITX_SN, TILEPRO_OPC_NONE, BITFIELD(16, 1) /* index 1129 */, TILEPRO_OPC_BYTEX_SN, TILEPRO_OPC_NONE, BITFIELD(16, 1) /* index 1132 */, TILEPRO_OPC_CLZ_SN, TILEPRO_OPC_NONE, BITFIELD(16, 1) /* index 1135 */, TILEPRO_OPC_CTZ_SN, TILEPRO_OPC_NONE, BITFIELD(16, 1) /* index 1138 */, TILEPRO_OPC_PCNT_SN, TILEPRO_OPC_NONE, BITFIELD(16, 1) /* index 1141 */, TILEPRO_OPC_TBLIDXB0_SN, TILEPRO_OPC_NONE, BITFIELD(16, 1) /* index 1144 */, TILEPRO_OPC_TBLIDXB1_SN, TILEPRO_OPC_NONE, BITFIELD(16, 1) /* index 1147 */, TILEPRO_OPC_TBLIDXB2_SN, TILEPRO_OPC_NONE, BITFIELD(16, 1) /* index 1150 */, TILEPRO_OPC_TBLIDXB3_SN, TILEPRO_OPC_NONE, }; static const unsigned short decode_X1_fsm[1540] = { BITFIELD(54, 9) /* index 0 */, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, CHILD(513), CHILD(561), CHILD(594), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, CHILD(641), CHILD(689), CHILD(722), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(826), CHILD(826), CHILD(826), CHILD(826), CHILD(826), CHILD(826), CHILD(826), CHILD(826), CHILD(826), CHILD(826), CHILD(826), CHILD(826), CHILD(826), CHILD(826), CHILD(826), CHILD(826), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(860), CHILD(899), CHILD(923), CHILD(932), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, CHILD(941), CHILD(950), CHILD(974), CHILD(983), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, CHILD(992), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, CHILD(1334), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, BITFIELD(49, 5) /* index 513 */, TILEPRO_OPC_NONE, TILEPRO_OPC_ADDB, TILEPRO_OPC_ADDH, TILEPRO_OPC_ADD, TILEPRO_OPC_AND, TILEPRO_OPC_INTHB, TILEPRO_OPC_INTHH, TILEPRO_OPC_INTLB, TILEPRO_OPC_INTLH, TILEPRO_OPC_JALRP, TILEPRO_OPC_JALR, TILEPRO_OPC_JRP, TILEPRO_OPC_JR, TILEPRO_OPC_LNK, TILEPRO_OPC_MAXB_U, TILEPRO_OPC_MAXH, TILEPRO_OPC_MINB_U, TILEPRO_OPC_MINH, TILEPRO_OPC_MNZB, TILEPRO_OPC_MNZH, TILEPRO_OPC_MNZ, TILEPRO_OPC_MZB, TILEPRO_OPC_MZH, TILEPRO_OPC_MZ, TILEPRO_OPC_NOR, CHILD(546), TILEPRO_OPC_PACKHB, TILEPRO_OPC_PACKLB, TILEPRO_OPC_RL, TILEPRO_OPC_S1A, TILEPRO_OPC_S2A, TILEPRO_OPC_S3A, BITFIELD(43, 2) /* index 546 */, TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_OR, CHILD(551), BITFIELD(45, 2) /* index 551 */, TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_OR, CHILD(556), BITFIELD(47, 2) /* index 556 */, TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_MOVE, BITFIELD(49, 5) /* index 561 */, TILEPRO_OPC_SB, TILEPRO_OPC_SEQB, TILEPRO_OPC_SEQH, TILEPRO_OPC_SEQ, TILEPRO_OPC_SHLB, TILEPRO_OPC_SHLH, TILEPRO_OPC_SHL, TILEPRO_OPC_SHRB, TILEPRO_OPC_SHRH, TILEPRO_OPC_SHR, TILEPRO_OPC_SH, TILEPRO_OPC_SLTB, TILEPRO_OPC_SLTB_U, TILEPRO_OPC_SLTEB, TILEPRO_OPC_SLTEB_U, TILEPRO_OPC_SLTEH, TILEPRO_OPC_SLTEH_U, TILEPRO_OPC_SLTE, TILEPRO_OPC_SLTE_U, TILEPRO_OPC_SLTH, TILEPRO_OPC_SLTH_U, TILEPRO_OPC_SLT, TILEPRO_OPC_SLT_U, TILEPRO_OPC_SNEB, TILEPRO_OPC_SNEH, TILEPRO_OPC_SNE, TILEPRO_OPC_SRAB, TILEPRO_OPC_SRAH, TILEPRO_OPC_SRA, TILEPRO_OPC_SUBB, TILEPRO_OPC_SUBH, TILEPRO_OPC_SUB, BITFIELD(49, 4) /* index 594 */, CHILD(611), CHILD(614), CHILD(617), CHILD(620), CHILD(623), CHILD(626), CHILD(629), CHILD(632), CHILD(635), CHILD(638), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 611 */, TILEPRO_OPC_SW, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 614 */, TILEPRO_OPC_XOR, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 617 */, TILEPRO_OPC_ADDS, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 620 */, TILEPRO_OPC_SUBS, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 623 */, TILEPRO_OPC_ADDBS_U, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 626 */, TILEPRO_OPC_ADDHS, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 629 */, TILEPRO_OPC_SUBBS_U, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 632 */, TILEPRO_OPC_SUBHS, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 635 */, TILEPRO_OPC_PACKHS, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 638 */, TILEPRO_OPC_PACKBS_U, TILEPRO_OPC_NONE, BITFIELD(49, 5) /* index 641 */, TILEPRO_OPC_NONE, TILEPRO_OPC_ADDB_SN, TILEPRO_OPC_ADDH_SN, TILEPRO_OPC_ADD_SN, TILEPRO_OPC_AND_SN, TILEPRO_OPC_INTHB_SN, TILEPRO_OPC_INTHH_SN, TILEPRO_OPC_INTLB_SN, TILEPRO_OPC_INTLH_SN, TILEPRO_OPC_JALRP, TILEPRO_OPC_JALR, TILEPRO_OPC_JRP, TILEPRO_OPC_JR, TILEPRO_OPC_LNK_SN, TILEPRO_OPC_MAXB_U_SN, TILEPRO_OPC_MAXH_SN, TILEPRO_OPC_MINB_U_SN, TILEPRO_OPC_MINH_SN, TILEPRO_OPC_MNZB_SN, TILEPRO_OPC_MNZH_SN, TILEPRO_OPC_MNZ_SN, TILEPRO_OPC_MZB_SN, TILEPRO_OPC_MZH_SN, TILEPRO_OPC_MZ_SN, TILEPRO_OPC_NOR_SN, CHILD(674), TILEPRO_OPC_PACKHB_SN, TILEPRO_OPC_PACKLB_SN, TILEPRO_OPC_RL_SN, TILEPRO_OPC_S1A_SN, TILEPRO_OPC_S2A_SN, TILEPRO_OPC_S3A_SN, BITFIELD(43, 2) /* index 674 */, TILEPRO_OPC_OR_SN, TILEPRO_OPC_OR_SN, TILEPRO_OPC_OR_SN, CHILD(679), BITFIELD(45, 2) /* index 679 */, TILEPRO_OPC_OR_SN, TILEPRO_OPC_OR_SN, TILEPRO_OPC_OR_SN, CHILD(684), BITFIELD(47, 2) /* index 684 */, TILEPRO_OPC_OR_SN, TILEPRO_OPC_OR_SN, TILEPRO_OPC_OR_SN, TILEPRO_OPC_MOVE_SN, BITFIELD(49, 5) /* index 689 */, TILEPRO_OPC_SB, TILEPRO_OPC_SEQB_SN, TILEPRO_OPC_SEQH_SN, TILEPRO_OPC_SEQ_SN, TILEPRO_OPC_SHLB_SN, TILEPRO_OPC_SHLH_SN, TILEPRO_OPC_SHL_SN, TILEPRO_OPC_SHRB_SN, TILEPRO_OPC_SHRH_SN, TILEPRO_OPC_SHR_SN, TILEPRO_OPC_SH, TILEPRO_OPC_SLTB_SN, TILEPRO_OPC_SLTB_U_SN, TILEPRO_OPC_SLTEB_SN, TILEPRO_OPC_SLTEB_U_SN, TILEPRO_OPC_SLTEH_SN, TILEPRO_OPC_SLTEH_U_SN, TILEPRO_OPC_SLTE_SN, TILEPRO_OPC_SLTE_U_SN, TILEPRO_OPC_SLTH_SN, TILEPRO_OPC_SLTH_U_SN, TILEPRO_OPC_SLT_SN, TILEPRO_OPC_SLT_U_SN, TILEPRO_OPC_SNEB_SN, TILEPRO_OPC_SNEH_SN, TILEPRO_OPC_SNE_SN, TILEPRO_OPC_SRAB_SN, TILEPRO_OPC_SRAH_SN, TILEPRO_OPC_SRA_SN, TILEPRO_OPC_SUBB_SN, TILEPRO_OPC_SUBH_SN, TILEPRO_OPC_SUB_SN, BITFIELD(49, 4) /* index 722 */, CHILD(611), CHILD(739), CHILD(742), CHILD(745), CHILD(748), CHILD(751), CHILD(754), CHILD(757), CHILD(760), CHILD(763), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 739 */, TILEPRO_OPC_XOR_SN, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 742 */, TILEPRO_OPC_ADDS_SN, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 745 */, TILEPRO_OPC_SUBS_SN, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 748 */, TILEPRO_OPC_ADDBS_U_SN, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 751 */, TILEPRO_OPC_ADDHS_SN, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 754 */, TILEPRO_OPC_SUBBS_U_SN, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 757 */, TILEPRO_OPC_SUBHS_SN, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 760 */, TILEPRO_OPC_PACKHS_SN, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 763 */, TILEPRO_OPC_PACKBS_U_SN, TILEPRO_OPC_NONE, BITFIELD(37, 2) /* index 766 */, TILEPRO_OPC_ADDLI_SN, TILEPRO_OPC_ADDLI_SN, TILEPRO_OPC_ADDLI_SN, CHILD(771), BITFIELD(39, 2) /* index 771 */, TILEPRO_OPC_ADDLI_SN, TILEPRO_OPC_ADDLI_SN, TILEPRO_OPC_ADDLI_SN, CHILD(776), BITFIELD(41, 2) /* index 776 */, TILEPRO_OPC_ADDLI_SN, TILEPRO_OPC_ADDLI_SN, TILEPRO_OPC_ADDLI_SN, TILEPRO_OPC_MOVELI_SN, BITFIELD(37, 2) /* index 781 */, TILEPRO_OPC_ADDLI, TILEPRO_OPC_ADDLI, TILEPRO_OPC_ADDLI, CHILD(786), BITFIELD(39, 2) /* index 786 */, TILEPRO_OPC_ADDLI, TILEPRO_OPC_ADDLI, TILEPRO_OPC_ADDLI, CHILD(791), BITFIELD(41, 2) /* index 791 */, TILEPRO_OPC_ADDLI, TILEPRO_OPC_ADDLI, TILEPRO_OPC_ADDLI, TILEPRO_OPC_MOVELI, BITFIELD(31, 2) /* index 796 */, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, CHILD(801), BITFIELD(33, 2) /* index 801 */, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, CHILD(806), BITFIELD(35, 2) /* index 806 */, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, CHILD(811), BITFIELD(37, 2) /* index 811 */, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, CHILD(816), BITFIELD(39, 2) /* index 816 */, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, CHILD(821), BITFIELD(41, 2) /* index 821 */, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, TILEPRO_OPC_INFOL, BITFIELD(31, 4) /* index 826 */, TILEPRO_OPC_BZ, TILEPRO_OPC_BZT, TILEPRO_OPC_BNZ, TILEPRO_OPC_BNZT, TILEPRO_OPC_BGZ, TILEPRO_OPC_BGZT, TILEPRO_OPC_BGEZ, TILEPRO_OPC_BGEZT, TILEPRO_OPC_BLZ, TILEPRO_OPC_BLZT, TILEPRO_OPC_BLEZ, TILEPRO_OPC_BLEZT, TILEPRO_OPC_BBS, TILEPRO_OPC_BBST, TILEPRO_OPC_BBNS, TILEPRO_OPC_BBNST, BITFIELD(31, 4) /* index 843 */, TILEPRO_OPC_BZ_SN, TILEPRO_OPC_BZT_SN, TILEPRO_OPC_BNZ_SN, TILEPRO_OPC_BNZT_SN, TILEPRO_OPC_BGZ_SN, TILEPRO_OPC_BGZT_SN, TILEPRO_OPC_BGEZ_SN, TILEPRO_OPC_BGEZT_SN, TILEPRO_OPC_BLZ_SN, TILEPRO_OPC_BLZT_SN, TILEPRO_OPC_BLEZ_SN, TILEPRO_OPC_BLEZT_SN, TILEPRO_OPC_BBS_SN, TILEPRO_OPC_BBST_SN, TILEPRO_OPC_BBNS_SN, TILEPRO_OPC_BBNST_SN, BITFIELD(51, 3) /* index 860 */, TILEPRO_OPC_NONE, TILEPRO_OPC_ADDIB, TILEPRO_OPC_ADDIH, TILEPRO_OPC_ADDI, CHILD(869), TILEPRO_OPC_MAXIB_U, TILEPRO_OPC_MAXIH, TILEPRO_OPC_MFSPR, BITFIELD(31, 2) /* index 869 */, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(874), BITFIELD(33, 2) /* index 874 */, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(879), BITFIELD(35, 2) /* index 879 */, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(884), BITFIELD(37, 2) /* index 884 */, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(889), BITFIELD(39, 2) /* index 889 */, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(894), BITFIELD(41, 2) /* index 894 */, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_INFO, BITFIELD(51, 3) /* index 899 */, TILEPRO_OPC_MINIB_U, TILEPRO_OPC_MINIH, TILEPRO_OPC_MTSPR, CHILD(908), TILEPRO_OPC_SEQIB, TILEPRO_OPC_SEQIH, TILEPRO_OPC_SEQI, TILEPRO_OPC_SLTIB, BITFIELD(37, 2) /* index 908 */, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, CHILD(913), BITFIELD(39, 2) /* index 913 */, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, CHILD(918), BITFIELD(41, 2) /* index 918 */, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_MOVEI, BITFIELD(51, 3) /* index 923 */, TILEPRO_OPC_SLTIB_U, TILEPRO_OPC_SLTIH, TILEPRO_OPC_SLTIH_U, TILEPRO_OPC_SLTI, TILEPRO_OPC_SLTI_U, TILEPRO_OPC_XORI, TILEPRO_OPC_LBADD, TILEPRO_OPC_LBADD_U, BITFIELD(51, 3) /* index 932 */, TILEPRO_OPC_LHADD, TILEPRO_OPC_LHADD_U, TILEPRO_OPC_LWADD, TILEPRO_OPC_LWADD_NA, TILEPRO_OPC_SBADD, TILEPRO_OPC_SHADD, TILEPRO_OPC_SWADD, TILEPRO_OPC_NONE, BITFIELD(51, 3) /* index 941 */, TILEPRO_OPC_NONE, TILEPRO_OPC_ADDIB_SN, TILEPRO_OPC_ADDIH_SN, TILEPRO_OPC_ADDI_SN, TILEPRO_OPC_ANDI_SN, TILEPRO_OPC_MAXIB_U_SN, TILEPRO_OPC_MAXIH_SN, TILEPRO_OPC_MFSPR, BITFIELD(51, 3) /* index 950 */, TILEPRO_OPC_MINIB_U_SN, TILEPRO_OPC_MINIH_SN, TILEPRO_OPC_MTSPR, CHILD(959), TILEPRO_OPC_SEQIB_SN, TILEPRO_OPC_SEQIH_SN, TILEPRO_OPC_SEQI_SN, TILEPRO_OPC_SLTIB_SN, BITFIELD(37, 2) /* index 959 */, TILEPRO_OPC_ORI_SN, TILEPRO_OPC_ORI_SN, TILEPRO_OPC_ORI_SN, CHILD(964), BITFIELD(39, 2) /* index 964 */, TILEPRO_OPC_ORI_SN, TILEPRO_OPC_ORI_SN, TILEPRO_OPC_ORI_SN, CHILD(969), BITFIELD(41, 2) /* index 969 */, TILEPRO_OPC_ORI_SN, TILEPRO_OPC_ORI_SN, TILEPRO_OPC_ORI_SN, TILEPRO_OPC_MOVEI_SN, BITFIELD(51, 3) /* index 974 */, TILEPRO_OPC_SLTIB_U_SN, TILEPRO_OPC_SLTIH_SN, TILEPRO_OPC_SLTIH_U_SN, TILEPRO_OPC_SLTI_SN, TILEPRO_OPC_SLTI_U_SN, TILEPRO_OPC_XORI_SN, TILEPRO_OPC_LBADD_SN, TILEPRO_OPC_LBADD_U_SN, BITFIELD(51, 3) /* index 983 */, TILEPRO_OPC_LHADD_SN, TILEPRO_OPC_LHADD_U_SN, TILEPRO_OPC_LWADD_SN, TILEPRO_OPC_LWADD_NA_SN, TILEPRO_OPC_SBADD, TILEPRO_OPC_SHADD, TILEPRO_OPC_SWADD, TILEPRO_OPC_NONE, BITFIELD(46, 7) /* index 992 */, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, CHILD(1121), CHILD(1121), CHILD(1121), CHILD(1121), CHILD(1124), CHILD(1124), CHILD(1124), CHILD(1124), CHILD(1127), CHILD(1127), CHILD(1127), CHILD(1127), CHILD(1130), CHILD(1130), CHILD(1130), CHILD(1130), CHILD(1133), CHILD(1133), CHILD(1133), CHILD(1133), CHILD(1136), CHILD(1136), CHILD(1136), CHILD(1136), CHILD(1139), CHILD(1139), CHILD(1139), CHILD(1139), CHILD(1142), CHILD(1142), CHILD(1142), CHILD(1142), CHILD(1145), CHILD(1145), CHILD(1145), CHILD(1145), CHILD(1148), CHILD(1148), CHILD(1148), CHILD(1148), CHILD(1151), CHILD(1242), CHILD(1290), CHILD(1323), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1121 */, TILEPRO_OPC_RLI, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1124 */, TILEPRO_OPC_SHLIB, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1127 */, TILEPRO_OPC_SHLIH, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1130 */, TILEPRO_OPC_SHLI, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1133 */, TILEPRO_OPC_SHRIB, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1136 */, TILEPRO_OPC_SHRIH, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1139 */, TILEPRO_OPC_SHRI, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1142 */, TILEPRO_OPC_SRAIB, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1145 */, TILEPRO_OPC_SRAIH, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1148 */, TILEPRO_OPC_SRAI, TILEPRO_OPC_NONE, BITFIELD(43, 3) /* index 1151 */, TILEPRO_OPC_NONE, CHILD(1160), CHILD(1163), CHILD(1166), CHILD(1169), CHILD(1172), CHILD(1175), CHILD(1178), BITFIELD(53, 1) /* index 1160 */, TILEPRO_OPC_DRAIN, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1163 */, TILEPRO_OPC_DTLBPR, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1166 */, TILEPRO_OPC_FINV, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1169 */, TILEPRO_OPC_FLUSH, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1172 */, TILEPRO_OPC_FNOP, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1175 */, TILEPRO_OPC_ICOH, TILEPRO_OPC_NONE, BITFIELD(31, 2) /* index 1178 */, CHILD(1183), CHILD(1211), CHILD(1239), CHILD(1239), BITFIELD(53, 1) /* index 1183 */, CHILD(1186), TILEPRO_OPC_NONE, BITFIELD(33, 2) /* index 1186 */, TILEPRO_OPC_ILL, TILEPRO_OPC_ILL, TILEPRO_OPC_ILL, CHILD(1191), BITFIELD(35, 2) /* index 1191 */, TILEPRO_OPC_ILL, CHILD(1196), TILEPRO_OPC_ILL, TILEPRO_OPC_ILL, BITFIELD(37, 2) /* index 1196 */, TILEPRO_OPC_ILL, CHILD(1201), TILEPRO_OPC_ILL, TILEPRO_OPC_ILL, BITFIELD(39, 2) /* index 1201 */, TILEPRO_OPC_ILL, CHILD(1206), TILEPRO_OPC_ILL, TILEPRO_OPC_ILL, BITFIELD(41, 2) /* index 1206 */, TILEPRO_OPC_ILL, TILEPRO_OPC_ILL, TILEPRO_OPC_BPT, TILEPRO_OPC_ILL, BITFIELD(53, 1) /* index 1211 */, CHILD(1214), TILEPRO_OPC_NONE, BITFIELD(33, 2) /* index 1214 */, TILEPRO_OPC_ILL, TILEPRO_OPC_ILL, TILEPRO_OPC_ILL, CHILD(1219), BITFIELD(35, 2) /* index 1219 */, TILEPRO_OPC_ILL, CHILD(1224), TILEPRO_OPC_ILL, TILEPRO_OPC_ILL, BITFIELD(37, 2) /* index 1224 */, TILEPRO_OPC_ILL, CHILD(1229), TILEPRO_OPC_ILL, TILEPRO_OPC_ILL, BITFIELD(39, 2) /* index 1229 */, TILEPRO_OPC_ILL, CHILD(1234), TILEPRO_OPC_ILL, TILEPRO_OPC_ILL, BITFIELD(41, 2) /* index 1234 */, TILEPRO_OPC_ILL, TILEPRO_OPC_ILL, TILEPRO_OPC_RAISE, TILEPRO_OPC_ILL, BITFIELD(53, 1) /* index 1239 */, TILEPRO_OPC_ILL, TILEPRO_OPC_NONE, BITFIELD(43, 3) /* index 1242 */, CHILD(1251), CHILD(1254), CHILD(1257), CHILD(1275), CHILD(1278), CHILD(1281), CHILD(1284), CHILD(1287), BITFIELD(53, 1) /* index 1251 */, TILEPRO_OPC_INV, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1254 */, TILEPRO_OPC_IRET, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1257 */, CHILD(1260), TILEPRO_OPC_NONE, BITFIELD(31, 2) /* index 1260 */, TILEPRO_OPC_LB, TILEPRO_OPC_LB, TILEPRO_OPC_LB, CHILD(1265), BITFIELD(33, 2) /* index 1265 */, TILEPRO_OPC_LB, TILEPRO_OPC_LB, TILEPRO_OPC_LB, CHILD(1270), BITFIELD(35, 2) /* index 1270 */, TILEPRO_OPC_LB, TILEPRO_OPC_LB, TILEPRO_OPC_LB, TILEPRO_OPC_PREFETCH, BITFIELD(53, 1) /* index 1275 */, TILEPRO_OPC_LB_U, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1278 */, TILEPRO_OPC_LH, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1281 */, TILEPRO_OPC_LH_U, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1284 */, TILEPRO_OPC_LW, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1287 */, TILEPRO_OPC_MF, TILEPRO_OPC_NONE, BITFIELD(43, 3) /* index 1290 */, CHILD(1299), CHILD(1302), CHILD(1305), CHILD(1308), CHILD(1311), CHILD(1314), CHILD(1317), CHILD(1320), BITFIELD(53, 1) /* index 1299 */, TILEPRO_OPC_NAP, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1302 */, TILEPRO_OPC_NOP, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1305 */, TILEPRO_OPC_SWINT0, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1308 */, TILEPRO_OPC_SWINT1, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1311 */, TILEPRO_OPC_SWINT2, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1314 */, TILEPRO_OPC_SWINT3, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1317 */, TILEPRO_OPC_TNS, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1320 */, TILEPRO_OPC_WH64, TILEPRO_OPC_NONE, BITFIELD(43, 2) /* index 1323 */, CHILD(1328), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, BITFIELD(45, 1) /* index 1328 */, CHILD(1331), TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1331 */, TILEPRO_OPC_LW_NA, TILEPRO_OPC_NONE, BITFIELD(46, 7) /* index 1334 */, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, CHILD(1463), CHILD(1463), CHILD(1463), CHILD(1463), CHILD(1466), CHILD(1466), CHILD(1466), CHILD(1466), CHILD(1469), CHILD(1469), CHILD(1469), CHILD(1469), CHILD(1472), CHILD(1472), CHILD(1472), CHILD(1472), CHILD(1475), CHILD(1475), CHILD(1475), CHILD(1475), CHILD(1478), CHILD(1478), CHILD(1478), CHILD(1478), CHILD(1481), CHILD(1481), CHILD(1481), CHILD(1481), CHILD(1484), CHILD(1484), CHILD(1484), CHILD(1484), CHILD(1487), CHILD(1487), CHILD(1487), CHILD(1487), CHILD(1490), CHILD(1490), CHILD(1490), CHILD(1490), CHILD(1151), CHILD(1493), CHILD(1517), CHILD(1529), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1463 */, TILEPRO_OPC_RLI_SN, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1466 */, TILEPRO_OPC_SHLIB_SN, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1469 */, TILEPRO_OPC_SHLIH_SN, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1472 */, TILEPRO_OPC_SHLI_SN, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1475 */, TILEPRO_OPC_SHRIB_SN, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1478 */, TILEPRO_OPC_SHRIH_SN, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1481 */, TILEPRO_OPC_SHRI_SN, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1484 */, TILEPRO_OPC_SRAIB_SN, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1487 */, TILEPRO_OPC_SRAIH_SN, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1490 */, TILEPRO_OPC_SRAI_SN, TILEPRO_OPC_NONE, BITFIELD(43, 3) /* index 1493 */, CHILD(1251), CHILD(1254), CHILD(1502), CHILD(1505), CHILD(1508), CHILD(1511), CHILD(1514), CHILD(1287), BITFIELD(53, 1) /* index 1502 */, TILEPRO_OPC_LB_SN, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1505 */, TILEPRO_OPC_LB_U_SN, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1508 */, TILEPRO_OPC_LH_SN, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1511 */, TILEPRO_OPC_LH_U_SN, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1514 */, TILEPRO_OPC_LW_SN, TILEPRO_OPC_NONE, BITFIELD(43, 3) /* index 1517 */, CHILD(1299), CHILD(1302), CHILD(1305), CHILD(1308), CHILD(1311), CHILD(1314), CHILD(1526), CHILD(1320), BITFIELD(53, 1) /* index 1526 */, TILEPRO_OPC_TNS_SN, TILEPRO_OPC_NONE, BITFIELD(43, 2) /* index 1529 */, CHILD(1534), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, BITFIELD(45, 1) /* index 1534 */, CHILD(1537), TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1537 */, TILEPRO_OPC_LW_NA_SN, TILEPRO_OPC_NONE, }; static const unsigned short decode_Y0_fsm[168] = { BITFIELD(27, 4) /* index 0 */, TILEPRO_OPC_NONE, CHILD(17), CHILD(22), CHILD(27), CHILD(47), CHILD(52), CHILD(57), CHILD(62), CHILD(67), TILEPRO_OPC_ADDI, CHILD(72), CHILD(102), TILEPRO_OPC_SEQI, CHILD(117), TILEPRO_OPC_SLTI, TILEPRO_OPC_SLTI_U, BITFIELD(18, 2) /* index 17 */, TILEPRO_OPC_ADD, TILEPRO_OPC_S1A, TILEPRO_OPC_S2A, TILEPRO_OPC_SUB, BITFIELD(18, 2) /* index 22 */, TILEPRO_OPC_MNZ, TILEPRO_OPC_MVNZ, TILEPRO_OPC_MVZ, TILEPRO_OPC_MZ, BITFIELD(18, 2) /* index 27 */, TILEPRO_OPC_AND, TILEPRO_OPC_NOR, CHILD(32), TILEPRO_OPC_XOR, BITFIELD(12, 2) /* index 32 */, TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_OR, CHILD(37), BITFIELD(14, 2) /* index 37 */, TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_OR, CHILD(42), BITFIELD(16, 2) /* index 42 */, TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_MOVE, BITFIELD(18, 2) /* index 47 */, TILEPRO_OPC_RL, TILEPRO_OPC_SHL, TILEPRO_OPC_SHR, TILEPRO_OPC_SRA, BITFIELD(18, 2) /* index 52 */, TILEPRO_OPC_SLTE, TILEPRO_OPC_SLTE_U, TILEPRO_OPC_SLT, TILEPRO_OPC_SLT_U, BITFIELD(18, 2) /* index 57 */, TILEPRO_OPC_MULHLSA_UU, TILEPRO_OPC_S3A, TILEPRO_OPC_SEQ, TILEPRO_OPC_SNE, BITFIELD(18, 2) /* index 62 */, TILEPRO_OPC_MULHH_SS, TILEPRO_OPC_MULHH_UU, TILEPRO_OPC_MULLL_SS, TILEPRO_OPC_MULLL_UU, BITFIELD(18, 2) /* index 67 */, TILEPRO_OPC_MULHHA_SS, TILEPRO_OPC_MULHHA_UU, TILEPRO_OPC_MULLLA_SS, TILEPRO_OPC_MULLLA_UU, BITFIELD(0, 2) /* index 72 */, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(77), BITFIELD(2, 2) /* index 77 */, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(82), BITFIELD(4, 2) /* index 82 */, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(87), BITFIELD(6, 2) /* index 87 */, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(92), BITFIELD(8, 2) /* index 92 */, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(97), BITFIELD(10, 2) /* index 97 */, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_INFO, BITFIELD(6, 2) /* index 102 */, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, CHILD(107), BITFIELD(8, 2) /* index 107 */, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, CHILD(112), BITFIELD(10, 2) /* index 112 */, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_MOVEI, BITFIELD(15, 5) /* index 117 */, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_RLI, TILEPRO_OPC_RLI, TILEPRO_OPC_RLI, TILEPRO_OPC_RLI, TILEPRO_OPC_SHLI, TILEPRO_OPC_SHLI, TILEPRO_OPC_SHLI, TILEPRO_OPC_SHLI, TILEPRO_OPC_SHRI, TILEPRO_OPC_SHRI, TILEPRO_OPC_SHRI, TILEPRO_OPC_SHRI, TILEPRO_OPC_SRAI, TILEPRO_OPC_SRAI, TILEPRO_OPC_SRAI, TILEPRO_OPC_SRAI, CHILD(150), CHILD(159), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, BITFIELD(12, 3) /* index 150 */, TILEPRO_OPC_NONE, TILEPRO_OPC_BITX, TILEPRO_OPC_BYTEX, TILEPRO_OPC_CLZ, TILEPRO_OPC_CTZ, TILEPRO_OPC_FNOP, TILEPRO_OPC_NOP, TILEPRO_OPC_PCNT, BITFIELD(12, 3) /* index 159 */, TILEPRO_OPC_TBLIDXB0, TILEPRO_OPC_TBLIDXB1, TILEPRO_OPC_TBLIDXB2, TILEPRO_OPC_TBLIDXB3, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, }; static const unsigned short decode_Y1_fsm[140] = { BITFIELD(59, 4) /* index 0 */, TILEPRO_OPC_NONE, CHILD(17), CHILD(22), CHILD(27), CHILD(47), CHILD(52), CHILD(57), TILEPRO_OPC_ADDI, CHILD(62), CHILD(92), TILEPRO_OPC_SEQI, CHILD(107), TILEPRO_OPC_SLTI, TILEPRO_OPC_SLTI_U, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, BITFIELD(49, 2) /* index 17 */, TILEPRO_OPC_ADD, TILEPRO_OPC_S1A, TILEPRO_OPC_S2A, TILEPRO_OPC_SUB, BITFIELD(49, 2) /* index 22 */, TILEPRO_OPC_NONE, TILEPRO_OPC_MNZ, TILEPRO_OPC_MZ, TILEPRO_OPC_NONE, BITFIELD(49, 2) /* index 27 */, TILEPRO_OPC_AND, TILEPRO_OPC_NOR, CHILD(32), TILEPRO_OPC_XOR, BITFIELD(43, 2) /* index 32 */, TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_OR, CHILD(37), BITFIELD(45, 2) /* index 37 */, TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_OR, CHILD(42), BITFIELD(47, 2) /* index 42 */, TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_MOVE, BITFIELD(49, 2) /* index 47 */, TILEPRO_OPC_RL, TILEPRO_OPC_SHL, TILEPRO_OPC_SHR, TILEPRO_OPC_SRA, BITFIELD(49, 2) /* index 52 */, TILEPRO_OPC_SLTE, TILEPRO_OPC_SLTE_U, TILEPRO_OPC_SLT, TILEPRO_OPC_SLT_U, BITFIELD(49, 2) /* index 57 */, TILEPRO_OPC_NONE, TILEPRO_OPC_S3A, TILEPRO_OPC_SEQ, TILEPRO_OPC_SNE, BITFIELD(31, 2) /* index 62 */, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(67), BITFIELD(33, 2) /* index 67 */, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(72), BITFIELD(35, 2) /* index 72 */, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(77), BITFIELD(37, 2) /* index 77 */, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(82), BITFIELD(39, 2) /* index 82 */, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(87), BITFIELD(41, 2) /* index 87 */, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_INFO, BITFIELD(37, 2) /* index 92 */, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, CHILD(97), BITFIELD(39, 2) /* index 97 */, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, CHILD(102), BITFIELD(41, 2) /* index 102 */, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_MOVEI, BITFIELD(48, 3) /* index 107 */, TILEPRO_OPC_NONE, TILEPRO_OPC_RLI, TILEPRO_OPC_SHLI, TILEPRO_OPC_SHRI, TILEPRO_OPC_SRAI, CHILD(116), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, BITFIELD(43, 3) /* index 116 */, TILEPRO_OPC_NONE, CHILD(125), CHILD(130), CHILD(135), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, BITFIELD(46, 2) /* index 125 */, TILEPRO_OPC_FNOP, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, BITFIELD(46, 2) /* index 130 */, TILEPRO_OPC_ILL, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, BITFIELD(46, 2) /* index 135 */, TILEPRO_OPC_NOP, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, }; static const unsigned short decode_Y2_fsm[24] = { BITFIELD(56, 3) /* index 0 */, CHILD(9), TILEPRO_OPC_LB_U, TILEPRO_OPC_LH, TILEPRO_OPC_LH_U, TILEPRO_OPC_LW, TILEPRO_OPC_SB, TILEPRO_OPC_SH, TILEPRO_OPC_SW, BITFIELD(20, 2) /* index 9 */, TILEPRO_OPC_LB, TILEPRO_OPC_LB, TILEPRO_OPC_LB, CHILD(14), BITFIELD(22, 2) /* index 14 */, TILEPRO_OPC_LB, TILEPRO_OPC_LB, TILEPRO_OPC_LB, CHILD(19), BITFIELD(24, 2) /* index 19 */, TILEPRO_OPC_LB, TILEPRO_OPC_LB, TILEPRO_OPC_LB, TILEPRO_OPC_PREFETCH, }; #undef BITFIELD #undef CHILD const unsigned short * const tilepro_bundle_decoder_fsms[TILEPRO_NUM_PIPELINE_ENCODINGS] = { decode_X0_fsm, decode_X1_fsm, decode_Y0_fsm, decode_Y1_fsm, decode_Y2_fsm }; const struct tilepro_operand tilepro_operands[43] = { { TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_IMM8_X0), 8, 1, 0, 0, 0, 0, create_Imm8_X0, get_Imm8_X0 }, { TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_IMM8_X1), 8, 1, 0, 0, 0, 0, create_Imm8_X1, get_Imm8_X1 }, { TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_IMM8_Y0), 8, 1, 0, 0, 0, 0, create_Imm8_Y0, get_Imm8_Y0 }, { TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_IMM8_Y1), 8, 1, 0, 0, 0, 0, create_Imm8_Y1, get_Imm8_Y1 }, { TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_IMM16_X0), 16, 1, 0, 0, 0, 0, create_Imm16_X0, get_Imm16_X0 }, { TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_IMM16_X1), 16, 1, 0, 0, 0, 0, create_Imm16_X1, get_Imm16_X1 }, { TILEPRO_OP_TYPE_ADDRESS, BFD_RELOC(TILEPRO_JOFFLONG_X1), 29, 1, 0, 0, 1, TILEPRO_LOG2_BUNDLE_ALIGNMENT_IN_BYTES, create_JOffLong_X1, get_JOffLong_X1 }, { TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE), 6, 0, 0, 1, 0, 0, create_Dest_X0, get_Dest_X0 }, { TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE), 6, 0, 1, 0, 0, 0, create_SrcA_X0, get_SrcA_X0 }, { TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE), 6, 0, 0, 1, 0, 0, create_Dest_X1, get_Dest_X1 }, { TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE), 6, 0, 1, 0, 0, 0, create_SrcA_X1, get_SrcA_X1 }, { TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE), 6, 0, 0, 1, 0, 0, create_Dest_Y0, get_Dest_Y0 }, { TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE), 6, 0, 1, 0, 0, 0, create_SrcA_Y0, get_SrcA_Y0 }, { TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE), 6, 0, 0, 1, 0, 0, create_Dest_Y1, get_Dest_Y1 }, { TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE), 6, 0, 1, 0, 0, 0, create_SrcA_Y1, get_SrcA_Y1 }, { TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE), 6, 0, 1, 0, 0, 0, create_SrcA_Y2, get_SrcA_Y2 }, { TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE), 6, 0, 1, 0, 0, 0, create_SrcB_X0, get_SrcB_X0 }, { TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE), 6, 0, 1, 0, 0, 0, create_SrcB_X1, get_SrcB_X1 }, { TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE), 6, 0, 1, 0, 0, 0, create_SrcB_Y0, get_SrcB_Y0 }, { TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE), 6, 0, 1, 0, 0, 0, create_SrcB_Y1, get_SrcB_Y1 }, { TILEPRO_OP_TYPE_ADDRESS, BFD_RELOC(TILEPRO_BROFF_X1), 17, 1, 0, 0, 1, TILEPRO_LOG2_BUNDLE_ALIGNMENT_IN_BYTES, create_BrOff_X1, get_BrOff_X1 }, { TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE), 6, 0, 1, 1, 0, 0, create_Dest_X0, get_Dest_X0 }, { TILEPRO_OP_TYPE_ADDRESS, BFD_RELOC(NONE), 28, 1, 0, 0, 1, TILEPRO_LOG2_BUNDLE_ALIGNMENT_IN_BYTES, create_JOff_X1, get_JOff_X1 }, { TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE), 6, 0, 0, 1, 0, 0, create_SrcBDest_Y2, get_SrcBDest_Y2 }, { TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE), 6, 0, 1, 1, 0, 0, create_SrcA_X1, get_SrcA_X1 }, { TILEPRO_OP_TYPE_SPR, BFD_RELOC(TILEPRO_MF_IMM15_X1), 15, 0, 0, 0, 0, 0, create_MF_Imm15_X1, get_MF_Imm15_X1 }, { TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_MMSTART_X0), 5, 0, 0, 0, 0, 0, create_MMStart_X0, get_MMStart_X0 }, { TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_MMEND_X0), 5, 0, 0, 0, 0, 0, create_MMEnd_X0, get_MMEnd_X0 }, { TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_MMSTART_X1), 5, 0, 0, 0, 0, 0, create_MMStart_X1, get_MMStart_X1 }, { TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_MMEND_X1), 5, 0, 0, 0, 0, 0, create_MMEnd_X1, get_MMEnd_X1 }, { TILEPRO_OP_TYPE_SPR, BFD_RELOC(TILEPRO_MT_IMM15_X1), 15, 0, 0, 0, 0, 0, create_MT_Imm15_X1, get_MT_Imm15_X1 }, { TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE), 6, 0, 1, 1, 0, 0, create_Dest_Y0, get_Dest_Y0 }, { TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_SHAMT_X0), 5, 0, 0, 0, 0, 0, create_ShAmt_X0, get_ShAmt_X0 }, { TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_SHAMT_X1), 5, 0, 0, 0, 0, 0, create_ShAmt_X1, get_ShAmt_X1 }, { TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_SHAMT_Y0), 5, 0, 0, 0, 0, 0, create_ShAmt_Y0, get_ShAmt_Y0 }, { TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_SHAMT_Y1), 5, 0, 0, 0, 0, 0, create_ShAmt_Y1, get_ShAmt_Y1 }, { TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE), 6, 0, 1, 0, 0, 0, create_SrcBDest_Y2, get_SrcBDest_Y2 }, { TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_DEST_IMM8_X1), 8, 1, 0, 0, 0, 0, create_Dest_Imm8_X1, get_Dest_Imm8_X1 }, { TILEPRO_OP_TYPE_ADDRESS, BFD_RELOC(NONE), 10, 1, 0, 0, 1, TILEPRO_LOG2_SN_INSTRUCTION_SIZE_IN_BYTES, create_BrOff_SN, get_BrOff_SN }, { TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(NONE), 8, 0, 0, 0, 0, 0, create_Imm8_SN, get_Imm8_SN }, { TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(NONE), 8, 1, 0, 0, 0, 0, create_Imm8_SN, get_Imm8_SN }, { TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE), 2, 0, 0, 1, 0, 0, create_Dest_SN, get_Dest_SN }, { TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE), 2, 0, 1, 0, 0, 0, create_Src_SN, get_Src_SN } }; /* Given a set of bundle bits and a specific pipe, returns which * instruction the bundle contains in that pipe. */ const struct tilepro_opcode * find_opcode(tilepro_bundle_bits bits, tilepro_pipeline pipe) { const unsigned short *table = tilepro_bundle_decoder_fsms[pipe]; int index = 0; while (1) { unsigned short bitspec = table[index]; unsigned int bitfield = ((unsigned int)(bits >> (bitspec & 63))) & (bitspec >> 6); unsigned short next = table[index + 1 + bitfield]; if (next <= TILEPRO_OPC_NONE) return &tilepro_opcodes[next]; index = next - TILEPRO_OPC_NONE; } } int parse_insn_tilepro(tilepro_bundle_bits bits, unsigned int pc, struct tilepro_decoded_instruction decoded[TILEPRO_MAX_INSTRUCTIONS_PER_BUNDLE]) { int num_instructions = 0; int pipe; int min_pipe, max_pipe; if ((bits & TILEPRO_BUNDLE_Y_ENCODING_MASK) == 0) { min_pipe = TILEPRO_PIPELINE_X0; max_pipe = TILEPRO_PIPELINE_X1; } else { min_pipe = TILEPRO_PIPELINE_Y0; max_pipe = TILEPRO_PIPELINE_Y2; } /* For each pipe, find an instruction that fits. */ for (pipe = min_pipe; pipe <= max_pipe; pipe++) { const struct tilepro_opcode *opc; struct tilepro_decoded_instruction *d; int i; d = &decoded[num_instructions++]; opc = find_opcode (bits, (tilepro_pipeline)pipe); d->opcode = opc; /* Decode each operand, sign extending, etc. as appropriate. */ for (i = 0; i < opc->num_operands; i++) { const struct tilepro_operand *op = &tilepro_operands[opc->operands[pipe][i]]; int opval = op->extract (bits); if (op->is_signed) { /* Sign-extend the operand. */ int shift = (int)((sizeof(int) * 8) - op->num_bits); opval = (opval << shift) >> shift; } /* Adjust PC-relative scaled branch offsets. */ if (op->type == TILEPRO_OP_TYPE_ADDRESS) { opval *= TILEPRO_BUNDLE_SIZE_IN_BYTES; opval += (int)pc; } /* Record the final value. */ d->operands[i] = op; d->operand_values[i] = opval; } } return num_instructions; }
gpl-2.0
LiquidSmooth-Devices/Deathly_Kernel_D2
arch/powerpc/sysdev/grackle.c
9887
2083
/* * Functions for setting up and using a MPC106 northbridge * Extracted from arch/powerpc/platforms/powermac/pci.c. * * Copyright (C) 2003 Benjamin Herrenschmuidt (benh@kernel.crashing.org) * Copyright (C) 1997 Paul Mackerras (paulus@samba.org) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/init.h> #include <asm/io.h> #include <asm/prom.h> #include <asm/pci-bridge.h> #include <asm/grackle.h> #define GRACKLE_CFA(b, d, o) (0x80 | ((b) << 8) | ((d) << 16) \ | (((o) & ~3) << 24)) #define GRACKLE_PICR1_STG 0x00000040 #define GRACKLE_PICR1_LOOPSNOOP 0x00000010 /* N.B. this is called before bridges is initialized, so we can't use grackle_pcibios_{read,write}_config_dword. */ static inline void grackle_set_stg(struct pci_controller* bp, int enable) { unsigned int val; out_be32(bp->cfg_addr, GRACKLE_CFA(0, 0, 0xa8)); val = in_le32(bp->cfg_data); val = enable? (val | GRACKLE_PICR1_STG) : (val & ~GRACKLE_PICR1_STG); out_be32(bp->cfg_addr, GRACKLE_CFA(0, 0, 0xa8)); out_le32(bp->cfg_data, val); (void)in_le32(bp->cfg_data); } static inline void grackle_set_loop_snoop(struct pci_controller *bp, int enable) { unsigned int val; out_be32(bp->cfg_addr, GRACKLE_CFA(0, 0, 0xa8)); val = in_le32(bp->cfg_data); val = enable? (val | GRACKLE_PICR1_LOOPSNOOP) : (val & ~GRACKLE_PICR1_LOOPSNOOP); out_be32(bp->cfg_addr, GRACKLE_CFA(0, 0, 0xa8)); out_le32(bp->cfg_data, val); (void)in_le32(bp->cfg_data); } void __init setup_grackle(struct pci_controller *hose) { setup_indirect_pci(hose, 0xfec00000, 0xfee00000, 0); if (of_machine_is_compatible("PowerMac1,1")) pci_add_flags(PCI_REASSIGN_ALL_BUS); if (of_machine_is_compatible("AAPL,PowerBook1998")) grackle_set_loop_snoop(hose, 1); #if 0 /* Disabled for now, HW problems ??? */ grackle_set_stg(hose, 1); #endif }
gpl-2.0
MoKee/android_kernel_lge_omap4-common
net/bridge/br_stp_bpdu.c
1952
5285
/* * Spanning tree protocol; BPDU handling * Linux ethernet bridge * * Authors: * Lennert Buytenhek <buytenh@gnu.org> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/netfilter_bridge.h> #include <linux/etherdevice.h> #include <linux/llc.h> #include <linux/slab.h> #include <net/net_namespace.h> #include <net/llc.h> #include <net/llc_pdu.h> #include <net/stp.h> #include <asm/unaligned.h> #include "br_private.h" #include "br_private_stp.h" #define STP_HZ 256 #define LLC_RESERVE sizeof(struct llc_pdu_un) static void br_send_bpdu(struct net_bridge_port *p, const unsigned char *data, int length) { struct sk_buff *skb; skb = dev_alloc_skb(length+LLC_RESERVE); if (!skb) return; skb->dev = p->dev; skb->protocol = htons(ETH_P_802_2); skb_reserve(skb, LLC_RESERVE); memcpy(__skb_put(skb, length), data, length); llc_pdu_header_init(skb, LLC_PDU_TYPE_U, LLC_SAP_BSPAN, LLC_SAP_BSPAN, LLC_PDU_CMD); llc_pdu_init_as_ui_cmd(skb); llc_mac_hdr_init(skb, p->dev->dev_addr, p->br->group_addr); skb_reset_mac_header(skb); NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, dev_queue_xmit); } static inline void br_set_ticks(unsigned char *dest, int j) { unsigned long ticks = (STP_HZ * j)/ HZ; put_unaligned_be16(ticks, dest); } static inline int br_get_ticks(const unsigned char *src) { unsigned long ticks = get_unaligned_be16(src); return DIV_ROUND_UP(ticks * HZ, STP_HZ); } /* called under bridge lock */ void br_send_config_bpdu(struct net_bridge_port *p, struct br_config_bpdu *bpdu) { unsigned char buf[35]; if (p->br->stp_enabled != BR_KERNEL_STP) return; buf[0] = 0; buf[1] = 0; buf[2] = 0; buf[3] = BPDU_TYPE_CONFIG; buf[4] = (bpdu->topology_change ? 0x01 : 0) | (bpdu->topology_change_ack ? 0x80 : 0); buf[5] = bpdu->root.prio[0]; buf[6] = bpdu->root.prio[1]; buf[7] = bpdu->root.addr[0]; buf[8] = bpdu->root.addr[1]; buf[9] = bpdu->root.addr[2]; buf[10] = bpdu->root.addr[3]; buf[11] = bpdu->root.addr[4]; buf[12] = bpdu->root.addr[5]; buf[13] = (bpdu->root_path_cost >> 24) & 0xFF; buf[14] = (bpdu->root_path_cost >> 16) & 0xFF; buf[15] = (bpdu->root_path_cost >> 8) & 0xFF; buf[16] = bpdu->root_path_cost & 0xFF; buf[17] = bpdu->bridge_id.prio[0]; buf[18] = bpdu->bridge_id.prio[1]; buf[19] = bpdu->bridge_id.addr[0]; buf[20] = bpdu->bridge_id.addr[1]; buf[21] = bpdu->bridge_id.addr[2]; buf[22] = bpdu->bridge_id.addr[3]; buf[23] = bpdu->bridge_id.addr[4]; buf[24] = bpdu->bridge_id.addr[5]; buf[25] = (bpdu->port_id >> 8) & 0xFF; buf[26] = bpdu->port_id & 0xFF; br_set_ticks(buf+27, bpdu->message_age); br_set_ticks(buf+29, bpdu->max_age); br_set_ticks(buf+31, bpdu->hello_time); br_set_ticks(buf+33, bpdu->forward_delay); br_send_bpdu(p, buf, 35); } /* called under bridge lock */ void br_send_tcn_bpdu(struct net_bridge_port *p) { unsigned char buf[4]; if (p->br->stp_enabled != BR_KERNEL_STP) return; buf[0] = 0; buf[1] = 0; buf[2] = 0; buf[3] = BPDU_TYPE_TCN; br_send_bpdu(p, buf, 4); } /* * Called from llc. * * NO locks, but rcu_read_lock */ void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb, struct net_device *dev) { const unsigned char *dest = eth_hdr(skb)->h_dest; struct net_bridge_port *p; struct net_bridge *br; const unsigned char *buf; if (!pskb_may_pull(skb, 4)) goto err; /* compare of protocol id and version */ buf = skb->data; if (buf[0] != 0 || buf[1] != 0 || buf[2] != 0) goto err; p = br_port_get_rcu(dev); if (!p) goto err; br = p->br; spin_lock(&br->lock); if (br->stp_enabled != BR_KERNEL_STP) goto out; if (!(br->dev->flags & IFF_UP)) goto out; if (p->state == BR_STATE_DISABLED) goto out; if (compare_ether_addr(dest, br->group_addr) != 0) goto out; buf = skb_pull(skb, 3); if (buf[0] == BPDU_TYPE_CONFIG) { struct br_config_bpdu bpdu; if (!pskb_may_pull(skb, 32)) goto out; buf = skb->data; bpdu.topology_change = (buf[1] & 0x01) ? 1 : 0; bpdu.topology_change_ack = (buf[1] & 0x80) ? 1 : 0; bpdu.root.prio[0] = buf[2]; bpdu.root.prio[1] = buf[3]; bpdu.root.addr[0] = buf[4]; bpdu.root.addr[1] = buf[5]; bpdu.root.addr[2] = buf[6]; bpdu.root.addr[3] = buf[7]; bpdu.root.addr[4] = buf[8]; bpdu.root.addr[5] = buf[9]; bpdu.root_path_cost = (buf[10] << 24) | (buf[11] << 16) | (buf[12] << 8) | buf[13]; bpdu.bridge_id.prio[0] = buf[14]; bpdu.bridge_id.prio[1] = buf[15]; bpdu.bridge_id.addr[0] = buf[16]; bpdu.bridge_id.addr[1] = buf[17]; bpdu.bridge_id.addr[2] = buf[18]; bpdu.bridge_id.addr[3] = buf[19]; bpdu.bridge_id.addr[4] = buf[20]; bpdu.bridge_id.addr[5] = buf[21]; bpdu.port_id = (buf[22] << 8) | buf[23]; bpdu.message_age = br_get_ticks(buf+24); bpdu.max_age = br_get_ticks(buf+26); bpdu.hello_time = br_get_ticks(buf+28); bpdu.forward_delay = br_get_ticks(buf+30); br_received_config_bpdu(p, &bpdu); } else if (buf[0] == BPDU_TYPE_TCN) { br_received_tcn_bpdu(p); } out: spin_unlock(&br->lock); err: kfree_skb(skb); }
gpl-2.0
mozilla-b2g/codeaurora_kernel_msm
arch/arm/mach-msm/board-8064-camera.c
1952
19010
/* Copyright (c) 2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/i2c.h> #include <linux/gpio.h> #include <asm/mach-types.h> #include <mach/camera.h> #include <mach/msm_bus_board.h> #include <mach/gpiomux.h> #include "devices.h" #include "board-8064.h" #ifdef CONFIG_MSM_CAMERA static struct gpiomux_setting cam_settings[] = { { .func = GPIOMUX_FUNC_GPIO, /*suspend*/ .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, }, { .func = GPIOMUX_FUNC_1, /*active 1*/ .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }, { .func = GPIOMUX_FUNC_GPIO, /*active 2*/ .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }, { .func = GPIOMUX_FUNC_2, /*active 3*/ .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }, { .func = GPIOMUX_FUNC_5, /*active 4*/ .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_UP, }, { .func = GPIOMUX_FUNC_6, /*active 5*/ .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_UP, }, { .func = GPIOMUX_FUNC_2, /*active 6*/ .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_UP, }, { .func = GPIOMUX_FUNC_3, /*active 7*/ .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_UP, }, { .func = GPIOMUX_FUNC_GPIO, /*i2c suspend*/ .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_KEEPER, }, { .func = GPIOMUX_FUNC_9, /*active 9*/ .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_NONE, }, { .func = GPIOMUX_FUNC_A, /*active 10*/ .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_NONE, }, { .func = GPIOMUX_FUNC_6, /*active 11*/ .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_NONE, }, { .func = GPIOMUX_FUNC_4, /*active 12*/ .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }, }; static struct msm_gpiomux_config apq8064_cam_common_configs[] = { { .gpio = 1, .settings = { [GPIOMUX_ACTIVE] = &cam_settings[2], [GPIOMUX_SUSPENDED] = &cam_settings[0], }, }, { .gpio = 2, .settings = { [GPIOMUX_ACTIVE] = &cam_settings[12], [GPIOMUX_SUSPENDED] = &cam_settings[0], }, }, { .gpio = 3, .settings = { [GPIOMUX_ACTIVE] = &cam_settings[2], [GPIOMUX_SUSPENDED] = &cam_settings[0], }, }, { .gpio = 4, .settings = { [GPIOMUX_ACTIVE] = &cam_settings[3], [GPIOMUX_SUSPENDED] = &cam_settings[0], }, }, { .gpio = 5, .settings = { [GPIOMUX_ACTIVE] = &cam_settings[1], [GPIOMUX_SUSPENDED] = &cam_settings[0], }, }, { .gpio = 34, .settings = { [GPIOMUX_ACTIVE] = &cam_settings[2], [GPIOMUX_SUSPENDED] = &cam_settings[0], }, }, { .gpio = 107, .settings = { [GPIOMUX_ACTIVE] = &cam_settings[2], [GPIOMUX_SUSPENDED] = &cam_settings[0], }, }, { .gpio = 10, .settings = { [GPIOMUX_ACTIVE] = &cam_settings[9], [GPIOMUX_SUSPENDED] = &cam_settings[8], }, }, { .gpio = 11, .settings = { [GPIOMUX_ACTIVE] = &cam_settings[10], [GPIOMUX_SUSPENDED] = &cam_settings[8], }, }, { .gpio = 12, .settings = { [GPIOMUX_ACTIVE] = &cam_settings[11], [GPIOMUX_SUSPENDED] = &cam_settings[8], }, }, { .gpio = 13, .settings = { [GPIOMUX_ACTIVE] = &cam_settings[11], [GPIOMUX_SUSPENDED] = &cam_settings[8], }, }, }; #define VFE_CAMIF_TIMER1_GPIO 3 #define VFE_CAMIF_TIMER2_GPIO 1 static struct gpio flash_init_gpio[] = { {VFE_CAMIF_TIMER1_GPIO, GPIOF_OUT_INIT_LOW, "CAMIF_TIMER1"}, {VFE_CAMIF_TIMER2_GPIO, GPIOF_OUT_INIT_LOW, "CAMIF_TIMER2"}, }; static struct msm_gpio_set_tbl flash_set_gpio[] = { {VFE_CAMIF_TIMER1_GPIO, GPIOF_OUT_INIT_HIGH, 2000}, {VFE_CAMIF_TIMER2_GPIO, GPIOF_OUT_INIT_HIGH, 2000}, }; static struct msm_camera_sensor_flash_src msm_flash_src = { .flash_sr_type = MSM_CAMERA_FLASH_SRC_EXT, .init_gpio_tbl = flash_init_gpio, .init_gpio_tbl_size = ARRAY_SIZE(flash_init_gpio), .set_gpio_tbl = flash_set_gpio, .set_gpio_tbl_size = ARRAY_SIZE(flash_set_gpio), ._fsrc.ext_driver_src.led_en = VFE_CAMIF_TIMER1_GPIO, ._fsrc.ext_driver_src.led_flash_en = VFE_CAMIF_TIMER2_GPIO, ._fsrc.ext_driver_src.flash_id = MAM_CAMERA_EXT_LED_FLASH_SC628A, }; static struct msm_gpiomux_config apq8064_cam_2d_configs[] = { }; static struct msm_bus_vectors cam_init_vectors[] = { { .src = MSM_BUS_MASTER_VFE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 0, .ib = 0, }, { .src = MSM_BUS_MASTER_VPE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 0, .ib = 0, }, { .src = MSM_BUS_MASTER_JPEG_ENC, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 0, .ib = 0, }, }; static struct msm_bus_vectors cam_preview_vectors[] = { { .src = MSM_BUS_MASTER_VFE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 27648000, .ib = 110592000, }, { .src = MSM_BUS_MASTER_VPE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 0, .ib = 0, }, { .src = MSM_BUS_MASTER_JPEG_ENC, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 0, .ib = 0, }, }; static struct msm_bus_vectors cam_video_vectors[] = { { .src = MSM_BUS_MASTER_VFE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 274406400, .ib = 561807360, }, { .src = MSM_BUS_MASTER_VPE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 206807040, .ib = 488816640, }, { .src = MSM_BUS_MASTER_JPEG_ENC, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 0, .ib = 0, }, }; static struct msm_bus_vectors cam_snapshot_vectors[] = { { .src = MSM_BUS_MASTER_VFE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 274423680, .ib = 1097694720, }, { .src = MSM_BUS_MASTER_VPE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 0, .ib = 0, }, { .src = MSM_BUS_MASTER_JPEG_ENC, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 540000000, .ib = 1350000000, }, }; static struct msm_bus_vectors cam_zsl_vectors[] = { { .src = MSM_BUS_MASTER_VFE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 302071680, .ib = 1208286720, }, { .src = MSM_BUS_MASTER_VPE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 0, .ib = 0, }, { .src = MSM_BUS_MASTER_JPEG_ENC, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 540000000, .ib = 1350000000, }, }; static struct msm_bus_vectors cam_video_ls_vectors[] = { { .src = MSM_BUS_MASTER_VFE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 348192000, .ib = 617103360, }, { .src = MSM_BUS_MASTER_VPE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 206807040, .ib = 488816640, }, { .src = MSM_BUS_MASTER_JPEG_ENC, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 540000000, .ib = 1350000000, }, }; static struct msm_bus_vectors cam_dual_vectors[] = { { .src = MSM_BUS_MASTER_VFE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 348192000, .ib = 1208286720, }, { .src = MSM_BUS_MASTER_VPE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 206807040, .ib = 488816640, }, { .src = MSM_BUS_MASTER_JPEG_ENC, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 540000000, .ib = 1350000000, }, { .src = MSM_BUS_MASTER_JPEG_ENC, .dst = MSM_BUS_SLAVE_MM_IMEM, .ab = 43200000, .ib = 69120000, }, { .src = MSM_BUS_MASTER_VFE, .dst = MSM_BUS_SLAVE_MM_IMEM, .ab = 43200000, .ib = 69120000, }, }; static struct msm_bus_paths cam_bus_client_config[] = { { ARRAY_SIZE(cam_init_vectors), cam_init_vectors, }, { ARRAY_SIZE(cam_preview_vectors), cam_preview_vectors, }, { ARRAY_SIZE(cam_video_vectors), cam_video_vectors, }, { ARRAY_SIZE(cam_snapshot_vectors), cam_snapshot_vectors, }, { ARRAY_SIZE(cam_zsl_vectors), cam_zsl_vectors, }, { ARRAY_SIZE(cam_video_ls_vectors), cam_video_ls_vectors, }, { ARRAY_SIZE(cam_dual_vectors), cam_dual_vectors, }, }; static struct msm_bus_scale_pdata cam_bus_client_pdata = { cam_bus_client_config, ARRAY_SIZE(cam_bus_client_config), .name = "msm_camera", }; static struct msm_camera_device_platform_data msm_camera_csi_device_data[] = { { .csiphy_core = 0, .csid_core = 0, .is_vpe = 1, .cam_bus_scale_table = &cam_bus_client_pdata, }, { .csiphy_core = 1, .csid_core = 1, .is_vpe = 1, .cam_bus_scale_table = &cam_bus_client_pdata, }, }; static struct camera_vreg_t apq_8064_cam_vreg[] = { {"cam_vdig", REG_LDO, 1200000, 1200000, 105000}, {"cam_vio", REG_VS, 0, 0, 0}, {"cam_vana", REG_LDO, 2800000, 2850000, 85600}, {"cam_vaf", REG_LDO, 2800000, 2850000, 300000}, }; #define CAML_RSTN PM8921_GPIO_PM_TO_SYS(28) #define CAMR_RSTN 34 static struct gpio apq8064_common_cam_gpio[] = { }; static struct gpio apq8064_back_cam_gpio[] = { {5, GPIOF_DIR_IN, "CAMIF_MCLK"}, {CAML_RSTN, GPIOF_DIR_OUT, "CAM_RESET"}, }; static struct msm_gpio_set_tbl apq8064_back_cam_gpio_set_tbl[] = { {CAML_RSTN, GPIOF_OUT_INIT_LOW, 10000}, {CAML_RSTN, GPIOF_OUT_INIT_HIGH, 10000}, }; static struct msm_camera_gpio_conf apq8064_back_cam_gpio_conf = { .cam_gpiomux_conf_tbl = apq8064_cam_2d_configs, .cam_gpiomux_conf_tbl_size = ARRAY_SIZE(apq8064_cam_2d_configs), .cam_gpio_common_tbl = apq8064_common_cam_gpio, .cam_gpio_common_tbl_size = ARRAY_SIZE(apq8064_common_cam_gpio), .cam_gpio_req_tbl = apq8064_back_cam_gpio, .cam_gpio_req_tbl_size = ARRAY_SIZE(apq8064_back_cam_gpio), .cam_gpio_set_tbl = apq8064_back_cam_gpio_set_tbl, .cam_gpio_set_tbl_size = ARRAY_SIZE(apq8064_back_cam_gpio_set_tbl), }; static struct gpio apq8064_front_cam_gpio[] = { {4, GPIOF_DIR_IN, "CAMIF_MCLK"}, {12, GPIOF_DIR_IN, "CAMIF_I2C_DATA"}, {13, GPIOF_DIR_IN, "CAMIF_I2C_CLK"}, {CAMR_RSTN, GPIOF_DIR_OUT, "CAM_RESET"}, }; static struct msm_gpio_set_tbl apq8064_front_cam_gpio_set_tbl[] = { {CAMR_RSTN, GPIOF_OUT_INIT_LOW, 10000}, {CAMR_RSTN, GPIOF_OUT_INIT_HIGH, 10000}, }; static struct msm_camera_gpio_conf apq8064_front_cam_gpio_conf = { .cam_gpiomux_conf_tbl = apq8064_cam_2d_configs, .cam_gpiomux_conf_tbl_size = ARRAY_SIZE(apq8064_cam_2d_configs), .cam_gpio_common_tbl = apq8064_common_cam_gpio, .cam_gpio_common_tbl_size = ARRAY_SIZE(apq8064_common_cam_gpio), .cam_gpio_req_tbl = apq8064_front_cam_gpio, .cam_gpio_req_tbl_size = ARRAY_SIZE(apq8064_front_cam_gpio), .cam_gpio_set_tbl = apq8064_front_cam_gpio_set_tbl, .cam_gpio_set_tbl_size = ARRAY_SIZE(apq8064_front_cam_gpio_set_tbl), }; static struct msm_camera_i2c_conf apq8064_back_cam_i2c_conf = { .use_i2c_mux = 1, .mux_dev = &msm8960_device_i2c_mux_gsbi4, .i2c_mux_mode = MODE_L, }; static struct i2c_board_info msm_act_main_cam_i2c_info = { I2C_BOARD_INFO("msm_actuator", 0x11), }; static struct msm_actuator_info msm_act_main_cam_0_info = { .board_info = &msm_act_main_cam_i2c_info, .cam_name = MSM_ACTUATOR_MAIN_CAM_0, .bus_id = APQ_8064_GSBI4_QUP_I2C_BUS_ID, .vcm_pwd = 0, .vcm_enable = 0, }; static struct i2c_board_info msm_act_main_cam1_i2c_info = { I2C_BOARD_INFO("msm_actuator", 0x18), }; static struct msm_actuator_info msm_act_main_cam_1_info = { .board_info = &msm_act_main_cam1_i2c_info, .cam_name = MSM_ACTUATOR_MAIN_CAM_1, .bus_id = APQ_8064_GSBI4_QUP_I2C_BUS_ID, .vcm_pwd = 0, .vcm_enable = 0, }; static struct msm_camera_i2c_conf apq8064_front_cam_i2c_conf = { .use_i2c_mux = 1, .mux_dev = &msm8960_device_i2c_mux_gsbi4, .i2c_mux_mode = MODE_L, }; static struct msm_camera_sensor_flash_data flash_imx135 = { .flash_type = MSM_CAMERA_FLASH_NONE, }; static struct msm_camera_csi_lane_params imx135_csi_lane_params = { .csi_lane_assign = 0xE4, .csi_lane_mask = 0xF, }; static struct msm_camera_sensor_platform_info sensor_board_info_imx135 = { .mount_angle = 90, .cam_vreg = apq_8064_cam_vreg, .num_vreg = ARRAY_SIZE(apq_8064_cam_vreg), .gpio_conf = &apq8064_back_cam_gpio_conf, .i2c_conf = &apq8064_back_cam_i2c_conf, .csi_lane_params = &imx135_csi_lane_params, }; static struct msm_camera_sensor_info msm_camera_sensor_imx135_data = { .sensor_name = "imx135", .pdata = &msm_camera_csi_device_data[0], .flash_data = &flash_imx135, .sensor_platform_info = &sensor_board_info_imx135, .csi_if = 1, .camera_type = BACK_CAMERA_2D, .sensor_type = BAYER_SENSOR, .actuator_info = &msm_act_main_cam_1_info, }; static struct i2c_board_info sc628a_flash_i2c_info = { I2C_BOARD_INFO("sc628a", 0x6E), }; static struct msm_camera_sensor_flash_data flash_imx074 = { .flash_type = MSM_CAMERA_FLASH_LED, .flash_src = &msm_flash_src, .board_info = &sc628a_flash_i2c_info, .bus_id = APQ_8064_GSBI4_QUP_I2C_BUS_ID, }; static struct msm_camera_csi_lane_params imx074_csi_lane_params = { .csi_lane_assign = 0xE4, .csi_lane_mask = 0xF, }; static struct msm_camera_sensor_platform_info sensor_board_info_imx074 = { .mount_angle = 90, .cam_vreg = apq_8064_cam_vreg, .num_vreg = ARRAY_SIZE(apq_8064_cam_vreg), .gpio_conf = &apq8064_back_cam_gpio_conf, .i2c_conf = &apq8064_back_cam_i2c_conf, .csi_lane_params = &imx074_csi_lane_params, }; static struct i2c_board_info imx074_eeprom_i2c_info = { I2C_BOARD_INFO("imx074_eeprom", 0x34 << 1), }; static struct msm_eeprom_info imx074_eeprom_info = { .board_info = &imx074_eeprom_i2c_info, .bus_id = APQ_8064_GSBI4_QUP_I2C_BUS_ID, }; static struct msm_camera_sensor_info msm_camera_sensor_imx074_data = { .sensor_name = "imx074", .pdata = &msm_camera_csi_device_data[0], .flash_data = &flash_imx074, .sensor_platform_info = &sensor_board_info_imx074, .csi_if = 1, .camera_type = BACK_CAMERA_2D, .sensor_type = BAYER_SENSOR, .actuator_info = &msm_act_main_cam_0_info, .eeprom_info = &imx074_eeprom_info, }; static struct msm_camera_csi_lane_params imx091_csi_lane_params = { .csi_lane_assign = 0xE4, .csi_lane_mask = 0xF, }; static struct msm_camera_sensor_flash_data flash_imx091 = { .flash_type = MSM_CAMERA_FLASH_NONE, }; static struct msm_camera_sensor_platform_info sensor_board_info_imx091 = { .mount_angle = 0, .cam_vreg = apq_8064_cam_vreg, .num_vreg = ARRAY_SIZE(apq_8064_cam_vreg), .gpio_conf = &apq8064_back_cam_gpio_conf, .i2c_conf = &apq8064_back_cam_i2c_conf, .csi_lane_params = &imx091_csi_lane_params, }; static struct i2c_board_info imx091_eeprom_i2c_info = { I2C_BOARD_INFO("imx091_eeprom", 0x21), }; static struct msm_eeprom_info imx091_eeprom_info = { .board_info = &imx091_eeprom_i2c_info, .bus_id = APQ_8064_GSBI4_QUP_I2C_BUS_ID, }; static struct msm_camera_sensor_info msm_camera_sensor_imx091_data = { .sensor_name = "imx091", .pdata = &msm_camera_csi_device_data[0], .flash_data = &flash_imx091, .sensor_platform_info = &sensor_board_info_imx091, .csi_if = 1, .camera_type = BACK_CAMERA_2D, .sensor_type = BAYER_SENSOR, .actuator_info = &msm_act_main_cam_1_info, .eeprom_info = &imx091_eeprom_info, }; static struct msm_camera_sensor_flash_data flash_s5k3l1yx = { .flash_type = MSM_CAMERA_FLASH_NONE, }; static struct msm_camera_csi_lane_params s5k3l1yx_csi_lane_params = { .csi_lane_assign = 0xE4, .csi_lane_mask = 0xF, }; static struct msm_camera_sensor_platform_info sensor_board_info_s5k3l1yx = { .mount_angle = 90, .cam_vreg = apq_8064_cam_vreg, .num_vreg = ARRAY_SIZE(apq_8064_cam_vreg), .gpio_conf = &apq8064_back_cam_gpio_conf, .i2c_conf = &apq8064_back_cam_i2c_conf, .csi_lane_params = &s5k3l1yx_csi_lane_params, }; static struct msm_camera_sensor_info msm_camera_sensor_s5k3l1yx_data = { .sensor_name = "s5k3l1yx", .pdata = &msm_camera_csi_device_data[0], .flash_data = &flash_s5k3l1yx, .sensor_platform_info = &sensor_board_info_s5k3l1yx, .csi_if = 1, .camera_type = BACK_CAMERA_2D, .sensor_type = BAYER_SENSOR, }; static struct msm_camera_sensor_flash_data flash_mt9m114 = { .flash_type = MSM_CAMERA_FLASH_NONE }; static struct msm_camera_csi_lane_params mt9m114_csi_lane_params = { .csi_lane_assign = 0xE4, .csi_lane_mask = 0x1, }; static struct msm_camera_sensor_platform_info sensor_board_info_mt9m114 = { .mount_angle = 90, .cam_vreg = apq_8064_cam_vreg, .num_vreg = ARRAY_SIZE(apq_8064_cam_vreg), .gpio_conf = &apq8064_front_cam_gpio_conf, .i2c_conf = &apq8064_front_cam_i2c_conf, .csi_lane_params = &mt9m114_csi_lane_params, }; static struct msm_camera_sensor_info msm_camera_sensor_mt9m114_data = { .sensor_name = "mt9m114", .pdata = &msm_camera_csi_device_data[1], .flash_data = &flash_mt9m114, .sensor_platform_info = &sensor_board_info_mt9m114, .csi_if = 1, .camera_type = FRONT_CAMERA_2D, .sensor_type = YUV_SENSOR, }; static struct msm_camera_sensor_flash_data flash_ov2720 = { .flash_type = MSM_CAMERA_FLASH_NONE, }; static struct msm_camera_csi_lane_params ov2720_csi_lane_params = { .csi_lane_assign = 0xE4, .csi_lane_mask = 0x3, }; static struct msm_camera_sensor_platform_info sensor_board_info_ov2720 = { .mount_angle = 0, .cam_vreg = apq_8064_cam_vreg, .num_vreg = ARRAY_SIZE(apq_8064_cam_vreg), .gpio_conf = &apq8064_front_cam_gpio_conf, .i2c_conf = &apq8064_front_cam_i2c_conf, .csi_lane_params = &ov2720_csi_lane_params, }; static struct msm_camera_sensor_info msm_camera_sensor_ov2720_data = { .sensor_name = "ov2720", .pdata = &msm_camera_csi_device_data[1], .flash_data = &flash_ov2720, .sensor_platform_info = &sensor_board_info_ov2720, .csi_if = 1, .camera_type = FRONT_CAMERA_2D, .sensor_type = BAYER_SENSOR, }; static struct platform_device msm_camera_server = { .name = "msm_cam_server", .id = 0, }; void __init apq8064_init_cam(void) { msm_gpiomux_install(apq8064_cam_common_configs, ARRAY_SIZE(apq8064_cam_common_configs)); if (machine_is_apq8064_cdp()) { sensor_board_info_imx074.mount_angle = 0; sensor_board_info_mt9m114.mount_angle = 0; } else if (machine_is_apq8064_liquid()) sensor_board_info_imx074.mount_angle = 180; platform_device_register(&msm_camera_server); platform_device_register(&msm8960_device_i2c_mux_gsbi4); platform_device_register(&msm8960_device_csiphy0); platform_device_register(&msm8960_device_csiphy1); platform_device_register(&msm8960_device_csid0); platform_device_register(&msm8960_device_csid1); platform_device_register(&msm8960_device_ispif); platform_device_register(&msm8960_device_vfe); platform_device_register(&msm8960_device_vpe); } #ifdef CONFIG_I2C static struct i2c_board_info apq8064_camera_i2c_boardinfo[] = { { I2C_BOARD_INFO("imx074", 0x1A), .platform_data = &msm_camera_sensor_imx074_data, }, { I2C_BOARD_INFO("imx135", 0x10), .platform_data = &msm_camera_sensor_imx135_data, }, { I2C_BOARD_INFO("mt9m114", 0x48), .platform_data = &msm_camera_sensor_mt9m114_data, }, { I2C_BOARD_INFO("ov2720", 0x6C), .platform_data = &msm_camera_sensor_ov2720_data, }, { I2C_BOARD_INFO("imx091", 0x34), .platform_data = &msm_camera_sensor_imx091_data, }, { I2C_BOARD_INFO("s5k3l1yx", 0x20), .platform_data = &msm_camera_sensor_s5k3l1yx_data, }, }; struct msm_camera_board_info apq8064_camera_board_info = { .board_info = apq8064_camera_i2c_boardinfo, .num_i2c_board_info = ARRAY_SIZE(apq8064_camera_i2c_boardinfo), }; #endif #endif
gpl-2.0
limbo127/KVMGT-kernel
sound/oss/pas2_card.c
2720
9701
/* * sound/oss/pas2_card.c * * Detection routine for the Pro Audio Spectrum cards. */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/spinlock.h> #include "sound_config.h" #include "pas2.h" #include "sb.h" static unsigned char dma_bits[] = { 4, 1, 2, 3, 0, 5, 6, 7 }; static unsigned char irq_bits[] = { 0, 0, 1, 2, 3, 4, 5, 6, 0, 1, 7, 8, 9, 0, 10, 11 }; static unsigned char sb_irq_bits[] = { 0x00, 0x00, 0x08, 0x10, 0x00, 0x18, 0x00, 0x20, 0x00, 0x08, 0x28, 0x30, 0x38, 0, 0 }; static unsigned char sb_dma_bits[] = { 0x00, 0x40, 0x80, 0xC0, 0, 0, 0, 0 }; /* * The Address Translation code is used to convert I/O register addresses to * be relative to the given base -register */ int pas_translate_code = 0; static int pas_intr_mask; static int pas_irq; static int pas_sb_base; DEFINE_SPINLOCK(pas_lock); #ifndef CONFIG_PAS_JOYSTICK static bool joystick; #else static bool joystick = 1; #endif #ifdef SYMPHONY_PAS static bool symphony = 1; #else static bool symphony; #endif #ifdef BROKEN_BUS_CLOCK static bool broken_bus_clock = 1; #else static bool broken_bus_clock; #endif static struct address_info cfg; static struct address_info cfg2; char pas_model = 0; static char *pas_model_names[] = { "", "Pro AudioSpectrum+", "CDPC", "Pro AudioSpectrum 16", "Pro AudioSpectrum 16D" }; /* * pas_read() and pas_write() are equivalents of inb and outb * These routines perform the I/O address translation required * to support other than the default base address */ extern void mix_write(unsigned char data, int ioaddr); unsigned char pas_read(int ioaddr) { return inb(ioaddr + pas_translate_code); } void pas_write(unsigned char data, int ioaddr) { outb((data), ioaddr + pas_translate_code); } /******************* Begin of the Interrupt Handler ********************/ static irqreturn_t pasintr(int irq, void *dev_id) { int status; status = pas_read(0x0B89); pas_write(status, 0x0B89); /* Clear interrupt */ if (status & 0x08) { pas_pcm_interrupt(status, 1); status &= ~0x08; } if (status & 0x10) { pas_midi_interrupt(); status &= ~0x10; } return IRQ_HANDLED; } int pas_set_intr(int mask) { if (!mask) return 0; pas_intr_mask |= mask; pas_write(pas_intr_mask, 0x0B8B); return 0; } int pas_remove_intr(int mask) { if (!mask) return 0; pas_intr_mask &= ~mask; pas_write(pas_intr_mask, 0x0B8B); return 0; } /******************* End of the Interrupt handler **********************/ /******************* Begin of the Initialization Code ******************/ static int __init config_pas_hw(struct address_info *hw_config) { char ok = 1; unsigned int_ptrs; /* scsi/sound interrupt pointers */ pas_irq = hw_config->irq; pas_write(0x00, 0x0B8B); pas_write(0x36, 0x138B); pas_write(0x36, 0x1388); pas_write(0, 0x1388); pas_write(0x74, 0x138B); pas_write(0x74, 0x1389); pas_write(0, 0x1389); pas_write(0x80 | 0x40 | 0x20 | 1, 0x0B8A); pas_write(0x80 | 0x20 | 0x10 | 0x08 | 0x01, 0xF8A); pas_write(0x01 | 0x02 | 0x04 | 0x10 /* * | * 0x80 */ , 0xB88); pas_write(0x80 | (joystick ? 0x40 : 0), 0xF388); if (pas_irq < 0 || pas_irq > 15) { printk(KERN_ERR "PAS16: Invalid IRQ %d", pas_irq); hw_config->irq=-1; ok = 0; } else { int_ptrs = pas_read(0xF38A); int_ptrs = (int_ptrs & 0xf0) | irq_bits[pas_irq]; pas_write(int_ptrs, 0xF38A); if (!irq_bits[pas_irq]) { printk(KERN_ERR "PAS16: Invalid IRQ %d", pas_irq); hw_config->irq=-1; ok = 0; } else { if (request_irq(pas_irq, pasintr, 0, "PAS16",hw_config) < 0) { printk(KERN_ERR "PAS16: Cannot allocate IRQ %d\n",pas_irq); hw_config->irq=-1; ok = 0; } } } if (hw_config->dma < 0 || hw_config->dma > 7) { printk(KERN_ERR "PAS16: Invalid DMA selection %d", hw_config->dma); hw_config->dma=-1; ok = 0; } else { pas_write(dma_bits[hw_config->dma], 0xF389); if (!dma_bits[hw_config->dma]) { printk(KERN_ERR "PAS16: Invalid DMA selection %d", hw_config->dma); hw_config->dma=-1; ok = 0; } else { if (sound_alloc_dma(hw_config->dma, "PAS16")) { printk(KERN_ERR "pas2_card.c: Can't allocate DMA channel\n"); hw_config->dma=-1; ok = 0; } } } /* * This fixes the timing problems of the PAS due to the Symphony chipset * as per Media Vision. Only define this if your PAS doesn't work correctly. */ if(symphony) { outb((0x05), 0xa8); outb((0x60), 0xa9); } if(broken_bus_clock) pas_write(0x01 | 0x10 | 0x20 | 0x04, 0x8388); else /* * pas_write(0x01, 0x8388); */ pas_write(0x01 | 0x10 | 0x20, 0x8388); pas_write(0x18, 0x838A); /* ??? */ pas_write(0x20 | 0x01, 0x0B8A); /* Mute off, filter = 17.897 kHz */ pas_write(8, 0xBF8A); mix_write(0x80 | 5, 0x078B); mix_write(5, 0x078B); { struct address_info *sb_config; sb_config = &cfg2; if (sb_config->io_base) { unsigned char irq_dma; /* * Turn on Sound Blaster compatibility * bit 1 = SB emulation * bit 0 = MPU401 emulation (CDPC only :-( ) */ pas_write(0x02, 0xF788); /* * "Emulation address" */ pas_write((sb_config->io_base >> 4) & 0x0f, 0xF789); pas_sb_base = sb_config->io_base; if (!sb_dma_bits[sb_config->dma]) printk(KERN_ERR "PAS16 Warning: Invalid SB DMA %d\n\n", sb_config->dma); if (!sb_irq_bits[sb_config->irq]) printk(KERN_ERR "PAS16 Warning: Invalid SB IRQ %d\n\n", sb_config->irq); irq_dma = sb_dma_bits[sb_config->dma] | sb_irq_bits[sb_config->irq]; pas_write(irq_dma, 0xFB8A); } else pas_write(0x00, 0xF788); } if (!ok) printk(KERN_WARNING "PAS16: Driver not enabled\n"); return ok; } static int __init detect_pas_hw(struct address_info *hw_config) { unsigned char board_id, foo; /* * WARNING: Setting an option like W:1 or so that disables warm boot reset * of the card will screw up this detect code something fierce. Adding code * to handle this means possibly interfering with other cards on the bus if * you have something on base port 0x388. SO be forewarned. */ outb((0xBC), 0x9A01); /* Activate first board */ outb((hw_config->io_base >> 2), 0x9A01); /* Set base address */ pas_translate_code = hw_config->io_base - 0x388; pas_write(1, 0xBF88); /* Select one wait states */ board_id = pas_read(0x0B8B); if (board_id == 0xff) return 0; /* * We probably have a PAS-series board, now check for a PAS16-series board * by trying to change the board revision bits. PAS16-series hardware won't * let you do this - the bits are read-only. */ foo = board_id ^ 0xe0; pas_write(foo, 0x0B8B); foo = pas_read(0x0B8B); pas_write(board_id, 0x0B8B); if (board_id != foo) return 0; pas_model = pas_read(0xFF88); return pas_model; } static void __init attach_pas_card(struct address_info *hw_config) { pas_irq = hw_config->irq; if (detect_pas_hw(hw_config)) { if ((pas_model = pas_read(0xFF88))) { char temp[100]; if (pas_model < 0 || pas_model >= ARRAY_SIZE(pas_model_names)) { printk(KERN_ERR "pas2 unrecognized model.\n"); return; } sprintf(temp, "%s rev %d", pas_model_names[(int) pas_model], pas_read(0x2789)); conf_printf(temp, hw_config); } if (config_pas_hw(hw_config)) { pas_pcm_init(hw_config); pas_midi_init(); pas_init_mixer(); } } } static inline int __init probe_pas(struct address_info *hw_config) { return detect_pas_hw(hw_config); } static void __exit unload_pas(struct address_info *hw_config) { extern int pas_audiodev; extern int pas2_mididev; if (hw_config->dma>0) sound_free_dma(hw_config->dma); if (hw_config->irq>0) free_irq(hw_config->irq, hw_config); if(pas_audiodev!=-1) sound_unload_mixerdev(audio_devs[pas_audiodev]->mixer_dev); if(pas2_mididev!=-1) sound_unload_mididev(pas2_mididev); if(pas_audiodev!=-1) sound_unload_audiodev(pas_audiodev); } static int __initdata io = -1; static int __initdata irq = -1; static int __initdata dma = -1; static int __initdata dma16 = -1; /* Set this for modules that need it */ static int __initdata sb_io = 0; static int __initdata sb_irq = -1; static int __initdata sb_dma = -1; static int __initdata sb_dma16 = -1; module_param(io, int, 0); module_param(irq, int, 0); module_param(dma, int, 0); module_param(dma16, int, 0); module_param(sb_io, int, 0); module_param(sb_irq, int, 0); module_param(sb_dma, int, 0); module_param(sb_dma16, int, 0); module_param(joystick, bool, 0); module_param(symphony, bool, 0); module_param(broken_bus_clock, bool, 0); MODULE_LICENSE("GPL"); static int __init init_pas2(void) { printk(KERN_INFO "Pro Audio Spectrum driver Copyright (C) by Hannu Savolainen 1993-1996\n"); cfg.io_base = io; cfg.irq = irq; cfg.dma = dma; cfg.dma2 = dma16; cfg2.io_base = sb_io; cfg2.irq = sb_irq; cfg2.dma = sb_dma; cfg2.dma2 = sb_dma16; if (cfg.io_base == -1 || cfg.dma == -1 || cfg.irq == -1) { printk(KERN_INFO "I/O, IRQ, DMA and type are mandatory\n"); return -EINVAL; } if (!probe_pas(&cfg)) return -ENODEV; attach_pas_card(&cfg); return 0; } static void __exit cleanup_pas2(void) { unload_pas(&cfg); } module_init(init_pas2); module_exit(cleanup_pas2); #ifndef MODULE static int __init setup_pas2(char *str) { /* io, irq, dma, dma2, sb_io, sb_irq, sb_dma, sb_dma2 */ int ints[9]; str = get_options(str, ARRAY_SIZE(ints), ints); io = ints[1]; irq = ints[2]; dma = ints[3]; dma16 = ints[4]; sb_io = ints[5]; sb_irq = ints[6]; sb_dma = ints[7]; sb_dma16 = ints[8]; return 1; } __setup("pas2=", setup_pas2); #endif
gpl-2.0
perkarom/Shark
fs/jfs/jfs_imap.c
2720
86272
/* * Copyright (C) International Business Machines Corp., 2000-2004 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * jfs_imap.c: inode allocation map manager * * Serialization: * Each AG has a simple lock which is used to control the serialization of * the AG level lists. This lock should be taken first whenever an AG * level list will be modified or accessed. * * Each IAG is locked by obtaining the buffer for the IAG page. * * There is also a inode lock for the inode map inode. A read lock needs to * be taken whenever an IAG is read from the map or the global level * information is read. A write lock needs to be taken whenever the global * level information is modified or an atomic operation needs to be used. * * If more than one IAG is read at one time, the read lock may not * be given up until all of the IAG's are read. Otherwise, a deadlock * may occur when trying to obtain the read lock while another thread * holding the read lock is waiting on the IAG already being held. * * The control page of the inode map is read into memory by diMount(). * Thereafter it should only be modified in memory and then it will be * written out when the filesystem is unmounted by diUnmount(). */ #include <linux/fs.h> #include <linux/buffer_head.h> #include <linux/pagemap.h> #include <linux/quotaops.h> #include <linux/slab.h> #include "jfs_incore.h" #include "jfs_inode.h" #include "jfs_filsys.h" #include "jfs_dinode.h" #include "jfs_dmap.h" #include "jfs_imap.h" #include "jfs_metapage.h" #include "jfs_superblock.h" #include "jfs_debug.h" /* * imap locks */ /* iag free list lock */ #define IAGFREE_LOCK_INIT(imap) mutex_init(&imap->im_freelock) #define IAGFREE_LOCK(imap) mutex_lock(&imap->im_freelock) #define IAGFREE_UNLOCK(imap) mutex_unlock(&imap->im_freelock) /* per ag iag list locks */ #define AG_LOCK_INIT(imap,index) mutex_init(&(imap->im_aglock[index])) #define AG_LOCK(imap,agno) mutex_lock(&imap->im_aglock[agno]) #define AG_UNLOCK(imap,agno) mutex_unlock(&imap->im_aglock[agno]) /* * forward references */ static int diAllocAG(struct inomap *, int, bool, struct inode *); static int diAllocAny(struct inomap *, int, bool, struct inode *); static int diAllocBit(struct inomap *, struct iag *, int); static int diAllocExt(struct inomap *, int, struct inode *); static int diAllocIno(struct inomap *, int, struct inode *); static int diFindFree(u32, int); static int diNewExt(struct inomap *, struct iag *, int); static int diNewIAG(struct inomap *, int *, int, struct metapage **); static void duplicateIXtree(struct super_block *, s64, int, s64 *); static int diIAGRead(struct inomap * imap, int, struct metapage **); static int copy_from_dinode(struct dinode *, struct inode *); static void copy_to_dinode(struct dinode *, struct inode *); /* * NAME: diMount() * * FUNCTION: initialize the incore inode map control structures for * a fileset or aggregate init time. * * the inode map's control structure (dinomap) is * brought in from disk and placed in virtual memory. * * PARAMETERS: * ipimap - pointer to inode map inode for the aggregate or fileset. * * RETURN VALUES: * 0 - success * -ENOMEM - insufficient free virtual memory. * -EIO - i/o error. */ int diMount(struct inode *ipimap) { struct inomap *imap; struct metapage *mp; int index; struct dinomap_disk *dinom_le; /* * allocate/initialize the in-memory inode map control structure */ /* allocate the in-memory inode map control structure. */ imap = kmalloc(sizeof(struct inomap), GFP_KERNEL); if (imap == NULL) { jfs_err("diMount: kmalloc returned NULL!"); return -ENOMEM; } /* read the on-disk inode map control structure. */ mp = read_metapage(ipimap, IMAPBLKNO << JFS_SBI(ipimap->i_sb)->l2nbperpage, PSIZE, 0); if (mp == NULL) { kfree(imap); return -EIO; } /* copy the on-disk version to the in-memory version. */ dinom_le = (struct dinomap_disk *) mp->data; imap->im_freeiag = le32_to_cpu(dinom_le->in_freeiag); imap->im_nextiag = le32_to_cpu(dinom_le->in_nextiag); atomic_set(&imap->im_numinos, le32_to_cpu(dinom_le->in_numinos)); atomic_set(&imap->im_numfree, le32_to_cpu(dinom_le->in_numfree)); imap->im_nbperiext = le32_to_cpu(dinom_le->in_nbperiext); imap->im_l2nbperiext = le32_to_cpu(dinom_le->in_l2nbperiext); for (index = 0; index < MAXAG; index++) { imap->im_agctl[index].inofree = le32_to_cpu(dinom_le->in_agctl[index].inofree); imap->im_agctl[index].extfree = le32_to_cpu(dinom_le->in_agctl[index].extfree); imap->im_agctl[index].numinos = le32_to_cpu(dinom_le->in_agctl[index].numinos); imap->im_agctl[index].numfree = le32_to_cpu(dinom_le->in_agctl[index].numfree); } /* release the buffer. */ release_metapage(mp); /* * allocate/initialize inode allocation map locks */ /* allocate and init iag free list lock */ IAGFREE_LOCK_INIT(imap); /* allocate and init ag list locks */ for (index = 0; index < MAXAG; index++) { AG_LOCK_INIT(imap, index); } /* bind the inode map inode and inode map control structure * to each other. */ imap->im_ipimap = ipimap; JFS_IP(ipimap)->i_imap = imap; return (0); } /* * NAME: diUnmount() * * FUNCTION: write to disk the incore inode map control structures for * a fileset or aggregate at unmount time. * * PARAMETERS: * ipimap - pointer to inode map inode for the aggregate or fileset. * * RETURN VALUES: * 0 - success * -ENOMEM - insufficient free virtual memory. * -EIO - i/o error. */ int diUnmount(struct inode *ipimap, int mounterror) { struct inomap *imap = JFS_IP(ipimap)->i_imap; /* * update the on-disk inode map control structure */ if (!(mounterror || isReadOnly(ipimap))) diSync(ipimap); /* * Invalidate the page cache buffers */ truncate_inode_pages(ipimap->i_mapping, 0); /* * free in-memory control structure */ kfree(imap); return (0); } /* * diSync() */ int diSync(struct inode *ipimap) { struct dinomap_disk *dinom_le; struct inomap *imp = JFS_IP(ipimap)->i_imap; struct metapage *mp; int index; /* * write imap global conrol page */ /* read the on-disk inode map control structure */ mp = get_metapage(ipimap, IMAPBLKNO << JFS_SBI(ipimap->i_sb)->l2nbperpage, PSIZE, 0); if (mp == NULL) { jfs_err("diSync: get_metapage failed!"); return -EIO; } /* copy the in-memory version to the on-disk version */ dinom_le = (struct dinomap_disk *) mp->data; dinom_le->in_freeiag = cpu_to_le32(imp->im_freeiag); dinom_le->in_nextiag = cpu_to_le32(imp->im_nextiag); dinom_le->in_numinos = cpu_to_le32(atomic_read(&imp->im_numinos)); dinom_le->in_numfree = cpu_to_le32(atomic_read(&imp->im_numfree)); dinom_le->in_nbperiext = cpu_to_le32(imp->im_nbperiext); dinom_le->in_l2nbperiext = cpu_to_le32(imp->im_l2nbperiext); for (index = 0; index < MAXAG; index++) { dinom_le->in_agctl[index].inofree = cpu_to_le32(imp->im_agctl[index].inofree); dinom_le->in_agctl[index].extfree = cpu_to_le32(imp->im_agctl[index].extfree); dinom_le->in_agctl[index].numinos = cpu_to_le32(imp->im_agctl[index].numinos); dinom_le->in_agctl[index].numfree = cpu_to_le32(imp->im_agctl[index].numfree); } /* write out the control structure */ write_metapage(mp); /* * write out dirty pages of imap */ filemap_write_and_wait(ipimap->i_mapping); diWriteSpecial(ipimap, 0); return (0); } /* * NAME: diRead() * * FUNCTION: initialize an incore inode from disk. * * on entry, the specifed incore inode should itself * specify the disk inode number corresponding to the * incore inode (i.e. i_number should be initialized). * * this routine handles incore inode initialization for * both "special" and "regular" inodes. special inodes * are those required early in the mount process and * require special handling since much of the file system * is not yet initialized. these "special" inodes are * identified by a NULL inode map inode pointer and are * actually initialized by a call to diReadSpecial(). * * for regular inodes, the iag describing the disk inode * is read from disk to determine the inode extent address * for the disk inode. with the inode extent address in * hand, the page of the extent that contains the disk * inode is read and the disk inode is copied to the * incore inode. * * PARAMETERS: * ip - pointer to incore inode to be initialized from disk. * * RETURN VALUES: * 0 - success * -EIO - i/o error. * -ENOMEM - insufficient memory * */ int diRead(struct inode *ip) { struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb); int iagno, ino, extno, rc; struct inode *ipimap; struct dinode *dp; struct iag *iagp; struct metapage *mp; s64 blkno, agstart; struct inomap *imap; int block_offset; int inodes_left; unsigned long pageno; int rel_inode; jfs_info("diRead: ino = %ld", ip->i_ino); ipimap = sbi->ipimap; JFS_IP(ip)->ipimap = ipimap; /* determine the iag number for this inode (number) */ iagno = INOTOIAG(ip->i_ino); /* read the iag */ imap = JFS_IP(ipimap)->i_imap; IREAD_LOCK(ipimap, RDWRLOCK_IMAP); rc = diIAGRead(imap, iagno, &mp); IREAD_UNLOCK(ipimap); if (rc) { jfs_err("diRead: diIAGRead returned %d", rc); return (rc); } iagp = (struct iag *) mp->data; /* determine inode extent that holds the disk inode */ ino = ip->i_ino & (INOSPERIAG - 1); extno = ino >> L2INOSPEREXT; if ((lengthPXD(&iagp->inoext[extno]) != imap->im_nbperiext) || (addressPXD(&iagp->inoext[extno]) == 0)) { release_metapage(mp); return -ESTALE; } /* get disk block number of the page within the inode extent * that holds the disk inode. */ blkno = INOPBLK(&iagp->inoext[extno], ino, sbi->l2nbperpage); /* get the ag for the iag */ agstart = le64_to_cpu(iagp->agstart); release_metapage(mp); rel_inode = (ino & (INOSPERPAGE - 1)); pageno = blkno >> sbi->l2nbperpage; if ((block_offset = ((u32) blkno & (sbi->nbperpage - 1)))) { /* * OS/2 didn't always align inode extents on page boundaries */ inodes_left = (sbi->nbperpage - block_offset) << sbi->l2niperblk; if (rel_inode < inodes_left) rel_inode += block_offset << sbi->l2niperblk; else { pageno += 1; rel_inode -= inodes_left; } } /* read the page of disk inode */ mp = read_metapage(ipimap, pageno << sbi->l2nbperpage, PSIZE, 1); if (!mp) { jfs_err("diRead: read_metapage failed"); return -EIO; } /* locate the disk inode requested */ dp = (struct dinode *) mp->data; dp += rel_inode; if (ip->i_ino != le32_to_cpu(dp->di_number)) { jfs_error(ip->i_sb, "diRead: i_ino != di_number"); rc = -EIO; } else if (le32_to_cpu(dp->di_nlink) == 0) rc = -ESTALE; else /* copy the disk inode to the in-memory inode */ rc = copy_from_dinode(dp, ip); release_metapage(mp); /* set the ag for the inode */ JFS_IP(ip)->agstart = agstart; JFS_IP(ip)->active_ag = -1; return (rc); } /* * NAME: diReadSpecial() * * FUNCTION: initialize a 'special' inode from disk. * * this routines handles aggregate level inodes. The * inode cache cannot differentiate between the * aggregate inodes and the filesystem inodes, so we * handle these here. We don't actually use the aggregate * inode map, since these inodes are at a fixed location * and in some cases the aggregate inode map isn't initialized * yet. * * PARAMETERS: * sb - filesystem superblock * inum - aggregate inode number * secondary - 1 if secondary aggregate inode table * * RETURN VALUES: * new inode - success * NULL - i/o error. */ struct inode *diReadSpecial(struct super_block *sb, ino_t inum, int secondary) { struct jfs_sb_info *sbi = JFS_SBI(sb); uint address; struct dinode *dp; struct inode *ip; struct metapage *mp; ip = new_inode(sb); if (ip == NULL) { jfs_err("diReadSpecial: new_inode returned NULL!"); return ip; } if (secondary) { address = addressPXD(&sbi->ait2) >> sbi->l2nbperpage; JFS_IP(ip)->ipimap = sbi->ipaimap2; } else { address = AITBL_OFF >> L2PSIZE; JFS_IP(ip)->ipimap = sbi->ipaimap; } ASSERT(inum < INOSPEREXT); ip->i_ino = inum; address += inum >> 3; /* 8 inodes per 4K page */ /* read the page of fixed disk inode (AIT) in raw mode */ mp = read_metapage(ip, address << sbi->l2nbperpage, PSIZE, 1); if (mp == NULL) { ip->i_nlink = 1; /* Don't want iput() deleting it */ iput(ip); return (NULL); } /* get the pointer to the disk inode of interest */ dp = (struct dinode *) (mp->data); dp += inum % 8; /* 8 inodes per 4K page */ /* copy on-disk inode to in-memory inode */ if ((copy_from_dinode(dp, ip)) != 0) { /* handle bad return by returning NULL for ip */ ip->i_nlink = 1; /* Don't want iput() deleting it */ iput(ip); /* release the page */ release_metapage(mp); return (NULL); } ip->i_mapping->a_ops = &jfs_metapage_aops; mapping_set_gfp_mask(ip->i_mapping, GFP_NOFS); /* Allocations to metadata inodes should not affect quotas */ ip->i_flags |= S_NOQUOTA; if ((inum == FILESYSTEM_I) && (JFS_IP(ip)->ipimap == sbi->ipaimap)) { sbi->gengen = le32_to_cpu(dp->di_gengen); sbi->inostamp = le32_to_cpu(dp->di_inostamp); } /* release the page */ release_metapage(mp); /* * __mark_inode_dirty expects inodes to be hashed. Since we don't * want special inodes in the fileset inode space, we make them * appear hashed, but do not put on any lists. hlist_del() * will work fine and require no locking. */ hlist_add_fake(&ip->i_hash); return (ip); } /* * NAME: diWriteSpecial() * * FUNCTION: Write the special inode to disk * * PARAMETERS: * ip - special inode * secondary - 1 if secondary aggregate inode table * * RETURN VALUES: none */ void diWriteSpecial(struct inode *ip, int secondary) { struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb); uint address; struct dinode *dp; ino_t inum = ip->i_ino; struct metapage *mp; if (secondary) address = addressPXD(&sbi->ait2) >> sbi->l2nbperpage; else address = AITBL_OFF >> L2PSIZE; ASSERT(inum < INOSPEREXT); address += inum >> 3; /* 8 inodes per 4K page */ /* read the page of fixed disk inode (AIT) in raw mode */ mp = read_metapage(ip, address << sbi->l2nbperpage, PSIZE, 1); if (mp == NULL) { jfs_err("diWriteSpecial: failed to read aggregate inode " "extent!"); return; } /* get the pointer to the disk inode of interest */ dp = (struct dinode *) (mp->data); dp += inum % 8; /* 8 inodes per 4K page */ /* copy on-disk inode to in-memory inode */ copy_to_dinode(dp, ip); memcpy(&dp->di_xtroot, &JFS_IP(ip)->i_xtroot, 288); if (inum == FILESYSTEM_I) dp->di_gengen = cpu_to_le32(sbi->gengen); /* write the page */ write_metapage(mp); } /* * NAME: diFreeSpecial() * * FUNCTION: Free allocated space for special inode */ void diFreeSpecial(struct inode *ip) { if (ip == NULL) { jfs_err("diFreeSpecial called with NULL ip!"); return; } filemap_write_and_wait(ip->i_mapping); truncate_inode_pages(ip->i_mapping, 0); iput(ip); } /* * NAME: diWrite() * * FUNCTION: write the on-disk inode portion of the in-memory inode * to its corresponding on-disk inode. * * on entry, the specifed incore inode should itself * specify the disk inode number corresponding to the * incore inode (i.e. i_number should be initialized). * * the inode contains the inode extent address for the disk * inode. with the inode extent address in hand, the * page of the extent that contains the disk inode is * read and the disk inode portion of the incore inode * is copied to the disk inode. * * PARAMETERS: * tid - transacation id * ip - pointer to incore inode to be written to the inode extent. * * RETURN VALUES: * 0 - success * -EIO - i/o error. */ int diWrite(tid_t tid, struct inode *ip) { struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb); struct jfs_inode_info *jfs_ip = JFS_IP(ip); int rc = 0; s32 ino; struct dinode *dp; s64 blkno; int block_offset; int inodes_left; struct metapage *mp; unsigned long pageno; int rel_inode; int dioffset; struct inode *ipimap; uint type; lid_t lid; struct tlock *ditlck, *tlck; struct linelock *dilinelock, *ilinelock; struct lv *lv; int n; ipimap = jfs_ip->ipimap; ino = ip->i_ino & (INOSPERIAG - 1); if (!addressPXD(&(jfs_ip->ixpxd)) || (lengthPXD(&(jfs_ip->ixpxd)) != JFS_IP(ipimap)->i_imap->im_nbperiext)) { jfs_error(ip->i_sb, "diWrite: ixpxd invalid"); return -EIO; } /* * read the page of disk inode containing the specified inode: */ /* compute the block address of the page */ blkno = INOPBLK(&(jfs_ip->ixpxd), ino, sbi->l2nbperpage); rel_inode = (ino & (INOSPERPAGE - 1)); pageno = blkno >> sbi->l2nbperpage; if ((block_offset = ((u32) blkno & (sbi->nbperpage - 1)))) { /* * OS/2 didn't always align inode extents on page boundaries */ inodes_left = (sbi->nbperpage - block_offset) << sbi->l2niperblk; if (rel_inode < inodes_left) rel_inode += block_offset << sbi->l2niperblk; else { pageno += 1; rel_inode -= inodes_left; } } /* read the page of disk inode */ retry: mp = read_metapage(ipimap, pageno << sbi->l2nbperpage, PSIZE, 1); if (!mp) return -EIO; /* get the pointer to the disk inode */ dp = (struct dinode *) mp->data; dp += rel_inode; dioffset = (ino & (INOSPERPAGE - 1)) << L2DISIZE; /* * acquire transaction lock on the on-disk inode; * N.B. tlock is acquired on ipimap not ip; */ if ((ditlck = txLock(tid, ipimap, mp, tlckINODE | tlckENTRY)) == NULL) goto retry; dilinelock = (struct linelock *) & ditlck->lock; /* * copy btree root from in-memory inode to on-disk inode * * (tlock is taken from inline B+-tree root in in-memory * inode when the B+-tree root is updated, which is pointed * by jfs_ip->blid as well as being on tx tlock list) * * further processing of btree root is based on the copy * in in-memory inode, where txLog() will log from, and, * for xtree root, txUpdateMap() will update map and reset * XAD_NEW bit; */ if (S_ISDIR(ip->i_mode) && (lid = jfs_ip->xtlid)) { /* * This is the special xtree inside the directory for storing * the directory table */ xtpage_t *p, *xp; xad_t *xad; jfs_ip->xtlid = 0; tlck = lid_to_tlock(lid); assert(tlck->type & tlckXTREE); tlck->type |= tlckBTROOT; tlck->mp = mp; ilinelock = (struct linelock *) & tlck->lock; /* * copy xtree root from inode to dinode: */ p = &jfs_ip->i_xtroot; xp = (xtpage_t *) &dp->di_dirtable; lv = ilinelock->lv; for (n = 0; n < ilinelock->index; n++, lv++) { memcpy(&xp->xad[lv->offset], &p->xad[lv->offset], lv->length << L2XTSLOTSIZE); } /* reset on-disk (metadata page) xtree XAD_NEW bit */ xad = &xp->xad[XTENTRYSTART]; for (n = XTENTRYSTART; n < le16_to_cpu(xp->header.nextindex); n++, xad++) if (xad->flag & (XAD_NEW | XAD_EXTENDED)) xad->flag &= ~(XAD_NEW | XAD_EXTENDED); } if ((lid = jfs_ip->blid) == 0) goto inlineData; jfs_ip->blid = 0; tlck = lid_to_tlock(lid); type = tlck->type; tlck->type |= tlckBTROOT; tlck->mp = mp; ilinelock = (struct linelock *) & tlck->lock; /* * regular file: 16 byte (XAD slot) granularity */ if (type & tlckXTREE) { xtpage_t *p, *xp; xad_t *xad; /* * copy xtree root from inode to dinode: */ p = &jfs_ip->i_xtroot; xp = &dp->di_xtroot; lv = ilinelock->lv; for (n = 0; n < ilinelock->index; n++, lv++) { memcpy(&xp->xad[lv->offset], &p->xad[lv->offset], lv->length << L2XTSLOTSIZE); } /* reset on-disk (metadata page) xtree XAD_NEW bit */ xad = &xp->xad[XTENTRYSTART]; for (n = XTENTRYSTART; n < le16_to_cpu(xp->header.nextindex); n++, xad++) if (xad->flag & (XAD_NEW | XAD_EXTENDED)) xad->flag &= ~(XAD_NEW | XAD_EXTENDED); } /* * directory: 32 byte (directory entry slot) granularity */ else if (type & tlckDTREE) { dtpage_t *p, *xp; /* * copy dtree root from inode to dinode: */ p = (dtpage_t *) &jfs_ip->i_dtroot; xp = (dtpage_t *) & dp->di_dtroot; lv = ilinelock->lv; for (n = 0; n < ilinelock->index; n++, lv++) { memcpy(&xp->slot[lv->offset], &p->slot[lv->offset], lv->length << L2DTSLOTSIZE); } } else { jfs_err("diWrite: UFO tlock"); } inlineData: /* * copy inline symlink from in-memory inode to on-disk inode */ if (S_ISLNK(ip->i_mode) && ip->i_size < IDATASIZE) { lv = & dilinelock->lv[dilinelock->index]; lv->offset = (dioffset + 2 * 128) >> L2INODESLOTSIZE; lv->length = 2; memcpy(&dp->di_fastsymlink, jfs_ip->i_inline, IDATASIZE); dilinelock->index++; } /* * copy inline data from in-memory inode to on-disk inode: * 128 byte slot granularity */ if (test_cflag(COMMIT_Inlineea, ip)) { lv = & dilinelock->lv[dilinelock->index]; lv->offset = (dioffset + 3 * 128) >> L2INODESLOTSIZE; lv->length = 1; memcpy(&dp->di_inlineea, jfs_ip->i_inline_ea, INODESLOTSIZE); dilinelock->index++; clear_cflag(COMMIT_Inlineea, ip); } /* * lock/copy inode base: 128 byte slot granularity */ lv = & dilinelock->lv[dilinelock->index]; lv->offset = dioffset >> L2INODESLOTSIZE; copy_to_dinode(dp, ip); if (test_and_clear_cflag(COMMIT_Dirtable, ip)) { lv->length = 2; memcpy(&dp->di_dirtable, &jfs_ip->i_dirtable, 96); } else lv->length = 1; dilinelock->index++; /* release the buffer holding the updated on-disk inode. * the buffer will be later written by commit processing. */ write_metapage(mp); return (rc); } /* * NAME: diFree(ip) * * FUNCTION: free a specified inode from the inode working map * for a fileset or aggregate. * * if the inode to be freed represents the first (only) * free inode within the iag, the iag will be placed on * the ag free inode list. * * freeing the inode will cause the inode extent to be * freed if the inode is the only allocated inode within * the extent. in this case all the disk resource backing * up the inode extent will be freed. in addition, the iag * will be placed on the ag extent free list if the extent * is the first free extent in the iag. if freeing the * extent also means that no free inodes will exist for * the iag, the iag will also be removed from the ag free * inode list. * * the iag describing the inode will be freed if the extent * is to be freed and it is the only backed extent within * the iag. in this case, the iag will be removed from the * ag free extent list and ag free inode list and placed on * the inode map's free iag list. * * a careful update approach is used to provide consistency * in the face of updates to multiple buffers. under this * approach, all required buffers are obtained before making * any updates and are held until all updates are complete. * * PARAMETERS: * ip - inode to be freed. * * RETURN VALUES: * 0 - success * -EIO - i/o error. */ int diFree(struct inode *ip) { int rc; ino_t inum = ip->i_ino; struct iag *iagp, *aiagp, *biagp, *ciagp, *diagp; struct metapage *mp, *amp, *bmp, *cmp, *dmp; int iagno, ino, extno, bitno, sword, agno; int back, fwd; u32 bitmap, mask; struct inode *ipimap = JFS_SBI(ip->i_sb)->ipimap; struct inomap *imap = JFS_IP(ipimap)->i_imap; pxd_t freepxd; tid_t tid; struct inode *iplist[3]; struct tlock *tlck; struct pxd_lock *pxdlock; /* * This is just to suppress compiler warnings. The same logic that * references these variables is used to initialize them. */ aiagp = biagp = ciagp = diagp = NULL; /* get the iag number containing the inode. */ iagno = INOTOIAG(inum); /* make sure that the iag is contained within * the map. */ if (iagno >= imap->im_nextiag) { print_hex_dump(KERN_ERR, "imap: ", DUMP_PREFIX_ADDRESS, 16, 4, imap, 32, 0); jfs_error(ip->i_sb, "diFree: inum = %d, iagno = %d, nextiag = %d", (uint) inum, iagno, imap->im_nextiag); return -EIO; } /* get the allocation group for this ino. */ agno = BLKTOAG(JFS_IP(ip)->agstart, JFS_SBI(ip->i_sb)); /* Lock the AG specific inode map information */ AG_LOCK(imap, agno); /* Obtain read lock in imap inode. Don't release it until we have * read all of the IAG's that we are going to. */ IREAD_LOCK(ipimap, RDWRLOCK_IMAP); /* read the iag. */ if ((rc = diIAGRead(imap, iagno, &mp))) { IREAD_UNLOCK(ipimap); AG_UNLOCK(imap, agno); return (rc); } iagp = (struct iag *) mp->data; /* get the inode number and extent number of the inode within * the iag and the inode number within the extent. */ ino = inum & (INOSPERIAG - 1); extno = ino >> L2INOSPEREXT; bitno = ino & (INOSPEREXT - 1); mask = HIGHORDER >> bitno; if (!(le32_to_cpu(iagp->wmap[extno]) & mask)) { jfs_error(ip->i_sb, "diFree: wmap shows inode already free"); } if (!addressPXD(&iagp->inoext[extno])) { release_metapage(mp); IREAD_UNLOCK(ipimap); AG_UNLOCK(imap, agno); jfs_error(ip->i_sb, "diFree: invalid inoext"); return -EIO; } /* compute the bitmap for the extent reflecting the freed inode. */ bitmap = le32_to_cpu(iagp->wmap[extno]) & ~mask; if (imap->im_agctl[agno].numfree > imap->im_agctl[agno].numinos) { release_metapage(mp); IREAD_UNLOCK(ipimap); AG_UNLOCK(imap, agno); jfs_error(ip->i_sb, "diFree: numfree > numinos"); return -EIO; } /* * inode extent still has some inodes or below low water mark: * keep the inode extent; */ if (bitmap || imap->im_agctl[agno].numfree < 96 || (imap->im_agctl[agno].numfree < 288 && (((imap->im_agctl[agno].numfree * 100) / imap->im_agctl[agno].numinos) <= 25))) { /* if the iag currently has no free inodes (i.e., * the inode being freed is the first free inode of iag), * insert the iag at head of the inode free list for the ag. */ if (iagp->nfreeinos == 0) { /* check if there are any iags on the ag inode * free list. if so, read the first one so that * we can link the current iag onto the list at * the head. */ if ((fwd = imap->im_agctl[agno].inofree) >= 0) { /* read the iag that currently is the head * of the list. */ if ((rc = diIAGRead(imap, fwd, &amp))) { IREAD_UNLOCK(ipimap); AG_UNLOCK(imap, agno); release_metapage(mp); return (rc); } aiagp = (struct iag *) amp->data; /* make current head point back to the iag. */ aiagp->inofreeback = cpu_to_le32(iagno); write_metapage(amp); } /* iag points forward to current head and iag * becomes the new head of the list. */ iagp->inofreefwd = cpu_to_le32(imap->im_agctl[agno].inofree); iagp->inofreeback = cpu_to_le32(-1); imap->im_agctl[agno].inofree = iagno; } IREAD_UNLOCK(ipimap); /* update the free inode summary map for the extent if * freeing the inode means the extent will now have free * inodes (i.e., the inode being freed is the first free * inode of extent), */ if (iagp->wmap[extno] == cpu_to_le32(ONES)) { sword = extno >> L2EXTSPERSUM; bitno = extno & (EXTSPERSUM - 1); iagp->inosmap[sword] &= cpu_to_le32(~(HIGHORDER >> bitno)); } /* update the bitmap. */ iagp->wmap[extno] = cpu_to_le32(bitmap); /* update the free inode counts at the iag, ag and * map level. */ le32_add_cpu(&iagp->nfreeinos, 1); imap->im_agctl[agno].numfree += 1; atomic_inc(&imap->im_numfree); /* release the AG inode map lock */ AG_UNLOCK(imap, agno); /* write the iag */ write_metapage(mp); return (0); } /* * inode extent has become free and above low water mark: * free the inode extent; */ /* * prepare to update iag list(s) (careful update step 1) */ amp = bmp = cmp = dmp = NULL; fwd = back = -1; /* check if the iag currently has no free extents. if so, * it will be placed on the head of the ag extent free list. */ if (iagp->nfreeexts == 0) { /* check if the ag extent free list has any iags. * if so, read the iag at the head of the list now. * this (head) iag will be updated later to reflect * the addition of the current iag at the head of * the list. */ if ((fwd = imap->im_agctl[agno].extfree) >= 0) { if ((rc = diIAGRead(imap, fwd, &amp))) goto error_out; aiagp = (struct iag *) amp->data; } } else { /* iag has free extents. check if the addition of a free * extent will cause all extents to be free within this * iag. if so, the iag will be removed from the ag extent * free list and placed on the inode map's free iag list. */ if (iagp->nfreeexts == cpu_to_le32(EXTSPERIAG - 1)) { /* in preparation for removing the iag from the * ag extent free list, read the iags preceding * and following the iag on the ag extent free * list. */ if ((fwd = le32_to_cpu(iagp->extfreefwd)) >= 0) { if ((rc = diIAGRead(imap, fwd, &amp))) goto error_out; aiagp = (struct iag *) amp->data; } if ((back = le32_to_cpu(iagp->extfreeback)) >= 0) { if ((rc = diIAGRead(imap, back, &bmp))) goto error_out; biagp = (struct iag *) bmp->data; } } } /* remove the iag from the ag inode free list if freeing * this extent cause the iag to have no free inodes. */ if (iagp->nfreeinos == cpu_to_le32(INOSPEREXT - 1)) { int inofreeback = le32_to_cpu(iagp->inofreeback); int inofreefwd = le32_to_cpu(iagp->inofreefwd); /* in preparation for removing the iag from the * ag inode free list, read the iags preceding * and following the iag on the ag inode free * list. before reading these iags, we must make * sure that we already don't have them in hand * from up above, since re-reading an iag (buffer) * we are currently holding would cause a deadlock. */ if (inofreefwd >= 0) { if (inofreefwd == fwd) ciagp = (struct iag *) amp->data; else if (inofreefwd == back) ciagp = (struct iag *) bmp->data; else { if ((rc = diIAGRead(imap, inofreefwd, &cmp))) goto error_out; ciagp = (struct iag *) cmp->data; } assert(ciagp != NULL); } if (inofreeback >= 0) { if (inofreeback == fwd) diagp = (struct iag *) amp->data; else if (inofreeback == back) diagp = (struct iag *) bmp->data; else { if ((rc = diIAGRead(imap, inofreeback, &dmp))) goto error_out; diagp = (struct iag *) dmp->data; } assert(diagp != NULL); } } IREAD_UNLOCK(ipimap); /* * invalidate any page of the inode extent freed from buffer cache; */ freepxd = iagp->inoext[extno]; invalidate_pxd_metapages(ip, freepxd); /* * update iag list(s) (careful update step 2) */ /* add the iag to the ag extent free list if this is the * first free extent for the iag. */ if (iagp->nfreeexts == 0) { if (fwd >= 0) aiagp->extfreeback = cpu_to_le32(iagno); iagp->extfreefwd = cpu_to_le32(imap->im_agctl[agno].extfree); iagp->extfreeback = cpu_to_le32(-1); imap->im_agctl[agno].extfree = iagno; } else { /* remove the iag from the ag extent list if all extents * are now free and place it on the inode map iag free list. */ if (iagp->nfreeexts == cpu_to_le32(EXTSPERIAG - 1)) { if (fwd >= 0) aiagp->extfreeback = iagp->extfreeback; if (back >= 0) biagp->extfreefwd = iagp->extfreefwd; else imap->im_agctl[agno].extfree = le32_to_cpu(iagp->extfreefwd); iagp->extfreefwd = iagp->extfreeback = cpu_to_le32(-1); IAGFREE_LOCK(imap); iagp->iagfree = cpu_to_le32(imap->im_freeiag); imap->im_freeiag = iagno; IAGFREE_UNLOCK(imap); } } /* remove the iag from the ag inode free list if freeing * this extent causes the iag to have no free inodes. */ if (iagp->nfreeinos == cpu_to_le32(INOSPEREXT - 1)) { if ((int) le32_to_cpu(iagp->inofreefwd) >= 0) ciagp->inofreeback = iagp->inofreeback; if ((int) le32_to_cpu(iagp->inofreeback) >= 0) diagp->inofreefwd = iagp->inofreefwd; else imap->im_agctl[agno].inofree = le32_to_cpu(iagp->inofreefwd); iagp->inofreefwd = iagp->inofreeback = cpu_to_le32(-1); } /* update the inode extent address and working map * to reflect the free extent. * the permanent map should have been updated already * for the inode being freed. */ if (iagp->pmap[extno] != 0) { jfs_error(ip->i_sb, "diFree: the pmap does not show inode free"); } iagp->wmap[extno] = 0; PXDlength(&iagp->inoext[extno], 0); PXDaddress(&iagp->inoext[extno], 0); /* update the free extent and free inode summary maps * to reflect the freed extent. * the inode summary map is marked to indicate no inodes * available for the freed extent. */ sword = extno >> L2EXTSPERSUM; bitno = extno & (EXTSPERSUM - 1); mask = HIGHORDER >> bitno; iagp->inosmap[sword] |= cpu_to_le32(mask); iagp->extsmap[sword] &= cpu_to_le32(~mask); /* update the number of free inodes and number of free extents * for the iag. */ le32_add_cpu(&iagp->nfreeinos, -(INOSPEREXT - 1)); le32_add_cpu(&iagp->nfreeexts, 1); /* update the number of free inodes and backed inodes * at the ag and inode map level. */ imap->im_agctl[agno].numfree -= (INOSPEREXT - 1); imap->im_agctl[agno].numinos -= INOSPEREXT; atomic_sub(INOSPEREXT - 1, &imap->im_numfree); atomic_sub(INOSPEREXT, &imap->im_numinos); if (amp) write_metapage(amp); if (bmp) write_metapage(bmp); if (cmp) write_metapage(cmp); if (dmp) write_metapage(dmp); /* * start transaction to update block allocation map * for the inode extent freed; * * N.B. AG_LOCK is released and iag will be released below, and * other thread may allocate inode from/reusing the ixad freed * BUT with new/different backing inode extent from the extent * to be freed by the transaction; */ tid = txBegin(ipimap->i_sb, COMMIT_FORCE); mutex_lock(&JFS_IP(ipimap)->commit_mutex); /* acquire tlock of the iag page of the freed ixad * to force the page NOHOMEOK (even though no data is * logged from the iag page) until NOREDOPAGE|FREEXTENT log * for the free of the extent is committed; * write FREEXTENT|NOREDOPAGE log record * N.B. linelock is overlaid as freed extent descriptor; */ tlck = txLock(tid, ipimap, mp, tlckINODE | tlckFREE); pxdlock = (struct pxd_lock *) & tlck->lock; pxdlock->flag = mlckFREEPXD; pxdlock->pxd = freepxd; pxdlock->index = 1; write_metapage(mp); iplist[0] = ipimap; /* * logredo needs the IAG number and IAG extent index in order * to ensure that the IMap is consistent. The least disruptive * way to pass these values through to the transaction manager * is in the iplist array. * * It's not pretty, but it works. */ iplist[1] = (struct inode *) (size_t)iagno; iplist[2] = (struct inode *) (size_t)extno; rc = txCommit(tid, 1, &iplist[0], COMMIT_FORCE); txEnd(tid); mutex_unlock(&JFS_IP(ipimap)->commit_mutex); /* unlock the AG inode map information */ AG_UNLOCK(imap, agno); return (0); error_out: IREAD_UNLOCK(ipimap); if (amp) release_metapage(amp); if (bmp) release_metapage(bmp); if (cmp) release_metapage(cmp); if (dmp) release_metapage(dmp); AG_UNLOCK(imap, agno); release_metapage(mp); return (rc); } /* * There are several places in the diAlloc* routines where we initialize * the inode. */ static inline void diInitInode(struct inode *ip, int iagno, int ino, int extno, struct iag * iagp) { struct jfs_inode_info *jfs_ip = JFS_IP(ip); ip->i_ino = (iagno << L2INOSPERIAG) + ino; jfs_ip->ixpxd = iagp->inoext[extno]; jfs_ip->agstart = le64_to_cpu(iagp->agstart); jfs_ip->active_ag = -1; } /* * NAME: diAlloc(pip,dir,ip) * * FUNCTION: allocate a disk inode from the inode working map * for a fileset or aggregate. * * PARAMETERS: * pip - pointer to incore inode for the parent inode. * dir - 'true' if the new disk inode is for a directory. * ip - pointer to a new inode * * RETURN VALUES: * 0 - success. * -ENOSPC - insufficient disk resources. * -EIO - i/o error. */ int diAlloc(struct inode *pip, bool dir, struct inode *ip) { int rc, ino, iagno, addext, extno, bitno, sword; int nwords, rem, i, agno; u32 mask, inosmap, extsmap; struct inode *ipimap; struct metapage *mp; ino_t inum; struct iag *iagp; struct inomap *imap; /* get the pointers to the inode map inode and the * corresponding imap control structure. */ ipimap = JFS_SBI(pip->i_sb)->ipimap; imap = JFS_IP(ipimap)->i_imap; JFS_IP(ip)->ipimap = ipimap; JFS_IP(ip)->fileset = FILESYSTEM_I; /* for a directory, the allocation policy is to start * at the ag level using the preferred ag. */ if (dir) { agno = dbNextAG(JFS_SBI(pip->i_sb)->ipbmap); AG_LOCK(imap, agno); goto tryag; } /* for files, the policy starts off by trying to allocate from * the same iag containing the parent disk inode: * try to allocate the new disk inode close to the parent disk * inode, using parent disk inode number + 1 as the allocation * hint. (we use a left-to-right policy to attempt to avoid * moving backward on the disk.) compute the hint within the * file system and the iag. */ /* get the ag number of this iag */ agno = BLKTOAG(JFS_IP(pip)->agstart, JFS_SBI(pip->i_sb)); if (atomic_read(&JFS_SBI(pip->i_sb)->bmap->db_active[agno])) { /* * There is an open file actively growing. We want to * allocate new inodes from a different ag to avoid * fragmentation problems. */ agno = dbNextAG(JFS_SBI(pip->i_sb)->ipbmap); AG_LOCK(imap, agno); goto tryag; } inum = pip->i_ino + 1; ino = inum & (INOSPERIAG - 1); /* back off the hint if it is outside of the iag */ if (ino == 0) inum = pip->i_ino; /* lock the AG inode map information */ AG_LOCK(imap, agno); /* Get read lock on imap inode */ IREAD_LOCK(ipimap, RDWRLOCK_IMAP); /* get the iag number and read the iag */ iagno = INOTOIAG(inum); if ((rc = diIAGRead(imap, iagno, &mp))) { IREAD_UNLOCK(ipimap); AG_UNLOCK(imap, agno); return (rc); } iagp = (struct iag *) mp->data; /* determine if new inode extent is allowed to be added to the iag. * new inode extent can be added to the iag if the ag * has less than 32 free disk inodes and the iag has free extents. */ addext = (imap->im_agctl[agno].numfree < 32 && iagp->nfreeexts); /* * try to allocate from the IAG */ /* check if the inode may be allocated from the iag * (i.e. the inode has free inodes or new extent can be added). */ if (iagp->nfreeinos || addext) { /* determine the extent number of the hint. */ extno = ino >> L2INOSPEREXT; /* check if the extent containing the hint has backed * inodes. if so, try to allocate within this extent. */ if (addressPXD(&iagp->inoext[extno])) { bitno = ino & (INOSPEREXT - 1); if ((bitno = diFindFree(le32_to_cpu(iagp->wmap[extno]), bitno)) < INOSPEREXT) { ino = (extno << L2INOSPEREXT) + bitno; /* a free inode (bit) was found within this * extent, so allocate it. */ rc = diAllocBit(imap, iagp, ino); IREAD_UNLOCK(ipimap); if (rc) { assert(rc == -EIO); } else { /* set the results of the allocation * and write the iag. */ diInitInode(ip, iagno, ino, extno, iagp); mark_metapage_dirty(mp); } release_metapage(mp); /* free the AG lock and return. */ AG_UNLOCK(imap, agno); return (rc); } if (!addext) extno = (extno == EXTSPERIAG - 1) ? 0 : extno + 1; } /* * no free inodes within the extent containing the hint. * * try to allocate from the backed extents following * hint or, if appropriate (i.e. addext is true), allocate * an extent of free inodes at or following the extent * containing the hint. * * the free inode and free extent summary maps are used * here, so determine the starting summary map position * and the number of words we'll have to examine. again, * the approach is to allocate following the hint, so we * might have to initially ignore prior bits of the summary * map that represent extents prior to the extent containing * the hint and later revisit these bits. */ bitno = extno & (EXTSPERSUM - 1); nwords = (bitno == 0) ? SMAPSZ : SMAPSZ + 1; sword = extno >> L2EXTSPERSUM; /* mask any prior bits for the starting words of the * summary map. */ mask = ONES << (EXTSPERSUM - bitno); inosmap = le32_to_cpu(iagp->inosmap[sword]) | mask; extsmap = le32_to_cpu(iagp->extsmap[sword]) | mask; /* scan the free inode and free extent summary maps for * free resources. */ for (i = 0; i < nwords; i++) { /* check if this word of the free inode summary * map describes an extent with free inodes. */ if (~inosmap) { /* an extent with free inodes has been * found. determine the extent number * and the inode number within the extent. */ rem = diFindFree(inosmap, 0); extno = (sword << L2EXTSPERSUM) + rem; rem = diFindFree(le32_to_cpu(iagp->wmap[extno]), 0); if (rem >= INOSPEREXT) { IREAD_UNLOCK(ipimap); release_metapage(mp); AG_UNLOCK(imap, agno); jfs_error(ip->i_sb, "diAlloc: can't find free bit " "in wmap"); return -EIO; } /* determine the inode number within the * iag and allocate the inode from the * map. */ ino = (extno << L2INOSPEREXT) + rem; rc = diAllocBit(imap, iagp, ino); IREAD_UNLOCK(ipimap); if (rc) assert(rc == -EIO); else { /* set the results of the allocation * and write the iag. */ diInitInode(ip, iagno, ino, extno, iagp); mark_metapage_dirty(mp); } release_metapage(mp); /* free the AG lock and return. */ AG_UNLOCK(imap, agno); return (rc); } /* check if we may allocate an extent of free * inodes and whether this word of the free * extents summary map describes a free extent. */ if (addext && ~extsmap) { /* a free extent has been found. determine * the extent number. */ rem = diFindFree(extsmap, 0); extno = (sword << L2EXTSPERSUM) + rem; /* allocate an extent of free inodes. */ if ((rc = diNewExt(imap, iagp, extno))) { /* if there is no disk space for a * new extent, try to allocate the * disk inode from somewhere else. */ if (rc == -ENOSPC) break; assert(rc == -EIO); } else { /* set the results of the allocation * and write the iag. */ diInitInode(ip, iagno, extno << L2INOSPEREXT, extno, iagp); mark_metapage_dirty(mp); } release_metapage(mp); /* free the imap inode & the AG lock & return. */ IREAD_UNLOCK(ipimap); AG_UNLOCK(imap, agno); return (rc); } /* move on to the next set of summary map words. */ sword = (sword == SMAPSZ - 1) ? 0 : sword + 1; inosmap = le32_to_cpu(iagp->inosmap[sword]); extsmap = le32_to_cpu(iagp->extsmap[sword]); } } /* unlock imap inode */ IREAD_UNLOCK(ipimap); /* nothing doing in this iag, so release it. */ release_metapage(mp); tryag: /* * try to allocate anywhere within the same AG as the parent inode. */ rc = diAllocAG(imap, agno, dir, ip); AG_UNLOCK(imap, agno); if (rc != -ENOSPC) return (rc); /* * try to allocate in any AG. */ return (diAllocAny(imap, agno, dir, ip)); } /* * NAME: diAllocAG(imap,agno,dir,ip) * * FUNCTION: allocate a disk inode from the allocation group. * * this routine first determines if a new extent of free * inodes should be added for the allocation group, with * the current request satisfied from this extent. if this * is the case, an attempt will be made to do just that. if * this attempt fails or it has been determined that a new * extent should not be added, an attempt is made to satisfy * the request by allocating an existing (backed) free inode * from the allocation group. * * PRE CONDITION: Already have the AG lock for this AG. * * PARAMETERS: * imap - pointer to inode map control structure. * agno - allocation group to allocate from. * dir - 'true' if the new disk inode is for a directory. * ip - pointer to the new inode to be filled in on successful return * with the disk inode number allocated, its extent address * and the start of the ag. * * RETURN VALUES: * 0 - success. * -ENOSPC - insufficient disk resources. * -EIO - i/o error. */ static int diAllocAG(struct inomap * imap, int agno, bool dir, struct inode *ip) { int rc, addext, numfree, numinos; /* get the number of free and the number of backed disk * inodes currently within the ag. */ numfree = imap->im_agctl[agno].numfree; numinos = imap->im_agctl[agno].numinos; if (numfree > numinos) { jfs_error(ip->i_sb, "diAllocAG: numfree > numinos"); return -EIO; } /* determine if we should allocate a new extent of free inodes * within the ag: for directory inodes, add a new extent * if there are a small number of free inodes or number of free * inodes is a small percentage of the number of backed inodes. */ if (dir) addext = (numfree < 64 || (numfree < 256 && ((numfree * 100) / numinos) <= 20)); else addext = (numfree == 0); /* * try to allocate a new extent of free inodes. */ if (addext) { /* if free space is not available for this new extent, try * below to allocate a free and existing (already backed) * inode from the ag. */ if ((rc = diAllocExt(imap, agno, ip)) != -ENOSPC) return (rc); } /* * try to allocate an existing free inode from the ag. */ return (diAllocIno(imap, agno, ip)); } /* * NAME: diAllocAny(imap,agno,dir,iap) * * FUNCTION: allocate a disk inode from any other allocation group. * * this routine is called when an allocation attempt within * the primary allocation group has failed. if attempts to * allocate an inode from any allocation group other than the * specified primary group. * * PARAMETERS: * imap - pointer to inode map control structure. * agno - primary allocation group (to avoid). * dir - 'true' if the new disk inode is for a directory. * ip - pointer to a new inode to be filled in on successful return * with the disk inode number allocated, its extent address * and the start of the ag. * * RETURN VALUES: * 0 - success. * -ENOSPC - insufficient disk resources. * -EIO - i/o error. */ static int diAllocAny(struct inomap * imap, int agno, bool dir, struct inode *ip) { int ag, rc; int maxag = JFS_SBI(imap->im_ipimap->i_sb)->bmap->db_maxag; /* try to allocate from the ags following agno up to * the maximum ag number. */ for (ag = agno + 1; ag <= maxag; ag++) { AG_LOCK(imap, ag); rc = diAllocAG(imap, ag, dir, ip); AG_UNLOCK(imap, ag); if (rc != -ENOSPC) return (rc); } /* try to allocate from the ags in front of agno. */ for (ag = 0; ag < agno; ag++) { AG_LOCK(imap, ag); rc = diAllocAG(imap, ag, dir, ip); AG_UNLOCK(imap, ag); if (rc != -ENOSPC) return (rc); } /* no free disk inodes. */ return -ENOSPC; } /* * NAME: diAllocIno(imap,agno,ip) * * FUNCTION: allocate a disk inode from the allocation group's free * inode list, returning an error if this free list is * empty (i.e. no iags on the list). * * allocation occurs from the first iag on the list using * the iag's free inode summary map to find the leftmost * free inode in the iag. * * PRE CONDITION: Already have AG lock for this AG. * * PARAMETERS: * imap - pointer to inode map control structure. * agno - allocation group. * ip - pointer to new inode to be filled in on successful return * with the disk inode number allocated, its extent address * and the start of the ag. * * RETURN VALUES: * 0 - success. * -ENOSPC - insufficient disk resources. * -EIO - i/o error. */ static int diAllocIno(struct inomap * imap, int agno, struct inode *ip) { int iagno, ino, rc, rem, extno, sword; struct metapage *mp; struct iag *iagp; /* check if there are iags on the ag's free inode list. */ if ((iagno = imap->im_agctl[agno].inofree) < 0) return -ENOSPC; /* obtain read lock on imap inode */ IREAD_LOCK(imap->im_ipimap, RDWRLOCK_IMAP); /* read the iag at the head of the list. */ if ((rc = diIAGRead(imap, iagno, &mp))) { IREAD_UNLOCK(imap->im_ipimap); return (rc); } iagp = (struct iag *) mp->data; /* better be free inodes in this iag if it is on the * list. */ if (!iagp->nfreeinos) { IREAD_UNLOCK(imap->im_ipimap); release_metapage(mp); jfs_error(ip->i_sb, "diAllocIno: nfreeinos = 0, but iag on freelist"); return -EIO; } /* scan the free inode summary map to find an extent * with free inodes. */ for (sword = 0;; sword++) { if (sword >= SMAPSZ) { IREAD_UNLOCK(imap->im_ipimap); release_metapage(mp); jfs_error(ip->i_sb, "diAllocIno: free inode not found in summary map"); return -EIO; } if (~iagp->inosmap[sword]) break; } /* found a extent with free inodes. determine * the extent number. */ rem = diFindFree(le32_to_cpu(iagp->inosmap[sword]), 0); if (rem >= EXTSPERSUM) { IREAD_UNLOCK(imap->im_ipimap); release_metapage(mp); jfs_error(ip->i_sb, "diAllocIno: no free extent found"); return -EIO; } extno = (sword << L2EXTSPERSUM) + rem; /* find the first free inode in the extent. */ rem = diFindFree(le32_to_cpu(iagp->wmap[extno]), 0); if (rem >= INOSPEREXT) { IREAD_UNLOCK(imap->im_ipimap); release_metapage(mp); jfs_error(ip->i_sb, "diAllocIno: free inode not found"); return -EIO; } /* compute the inode number within the iag. */ ino = (extno << L2INOSPEREXT) + rem; /* allocate the inode. */ rc = diAllocBit(imap, iagp, ino); IREAD_UNLOCK(imap->im_ipimap); if (rc) { release_metapage(mp); return (rc); } /* set the results of the allocation and write the iag. */ diInitInode(ip, iagno, ino, extno, iagp); write_metapage(mp); return (0); } /* * NAME: diAllocExt(imap,agno,ip) * * FUNCTION: add a new extent of free inodes to an iag, allocating * an inode from this extent to satisfy the current allocation * request. * * this routine first tries to find an existing iag with free * extents through the ag free extent list. if list is not * empty, the head of the list will be selected as the home * of the new extent of free inodes. otherwise (the list is * empty), a new iag will be allocated for the ag to contain * the extent. * * once an iag has been selected, the free extent summary map * is used to locate a free extent within the iag and diNewExt() * is called to initialize the extent, with initialization * including the allocation of the first inode of the extent * for the purpose of satisfying this request. * * PARAMETERS: * imap - pointer to inode map control structure. * agno - allocation group number. * ip - pointer to new inode to be filled in on successful return * with the disk inode number allocated, its extent address * and the start of the ag. * * RETURN VALUES: * 0 - success. * -ENOSPC - insufficient disk resources. * -EIO - i/o error. */ static int diAllocExt(struct inomap * imap, int agno, struct inode *ip) { int rem, iagno, sword, extno, rc; struct metapage *mp; struct iag *iagp; /* check if the ag has any iags with free extents. if not, * allocate a new iag for the ag. */ if ((iagno = imap->im_agctl[agno].extfree) < 0) { /* If successful, diNewIAG will obtain the read lock on the * imap inode. */ if ((rc = diNewIAG(imap, &iagno, agno, &mp))) { return (rc); } iagp = (struct iag *) mp->data; /* set the ag number if this a brand new iag */ iagp->agstart = cpu_to_le64(AGTOBLK(agno, imap->im_ipimap)); } else { /* read the iag. */ IREAD_LOCK(imap->im_ipimap, RDWRLOCK_IMAP); if ((rc = diIAGRead(imap, iagno, &mp))) { IREAD_UNLOCK(imap->im_ipimap); jfs_error(ip->i_sb, "diAllocExt: error reading iag"); return rc; } iagp = (struct iag *) mp->data; } /* using the free extent summary map, find a free extent. */ for (sword = 0;; sword++) { if (sword >= SMAPSZ) { release_metapage(mp); IREAD_UNLOCK(imap->im_ipimap); jfs_error(ip->i_sb, "diAllocExt: free ext summary map not found"); return -EIO; } if (~iagp->extsmap[sword]) break; } /* determine the extent number of the free extent. */ rem = diFindFree(le32_to_cpu(iagp->extsmap[sword]), 0); if (rem >= EXTSPERSUM) { release_metapage(mp); IREAD_UNLOCK(imap->im_ipimap); jfs_error(ip->i_sb, "diAllocExt: free extent not found"); return -EIO; } extno = (sword << L2EXTSPERSUM) + rem; /* initialize the new extent. */ rc = diNewExt(imap, iagp, extno); IREAD_UNLOCK(imap->im_ipimap); if (rc) { /* something bad happened. if a new iag was allocated, * place it back on the inode map's iag free list, and * clear the ag number information. */ if (iagp->nfreeexts == cpu_to_le32(EXTSPERIAG)) { IAGFREE_LOCK(imap); iagp->iagfree = cpu_to_le32(imap->im_freeiag); imap->im_freeiag = iagno; IAGFREE_UNLOCK(imap); } write_metapage(mp); return (rc); } /* set the results of the allocation and write the iag. */ diInitInode(ip, iagno, extno << L2INOSPEREXT, extno, iagp); write_metapage(mp); return (0); } /* * NAME: diAllocBit(imap,iagp,ino) * * FUNCTION: allocate a backed inode from an iag. * * this routine performs the mechanics of allocating a * specified inode from a backed extent. * * if the inode to be allocated represents the last free * inode within the iag, the iag will be removed from the * ag free inode list. * * a careful update approach is used to provide consistency * in the face of updates to multiple buffers. under this * approach, all required buffers are obtained before making * any updates and are held all are updates are complete. * * PRE CONDITION: Already have buffer lock on iagp. Already have AG lock on * this AG. Must have read lock on imap inode. * * PARAMETERS: * imap - pointer to inode map control structure. * iagp - pointer to iag. * ino - inode number to be allocated within the iag. * * RETURN VALUES: * 0 - success. * -ENOSPC - insufficient disk resources. * -EIO - i/o error. */ static int diAllocBit(struct inomap * imap, struct iag * iagp, int ino) { int extno, bitno, agno, sword, rc; struct metapage *amp = NULL, *bmp = NULL; struct iag *aiagp = NULL, *biagp = NULL; u32 mask; /* check if this is the last free inode within the iag. * if so, it will have to be removed from the ag free * inode list, so get the iags preceding and following * it on the list. */ if (iagp->nfreeinos == cpu_to_le32(1)) { if ((int) le32_to_cpu(iagp->inofreefwd) >= 0) { if ((rc = diIAGRead(imap, le32_to_cpu(iagp->inofreefwd), &amp))) return (rc); aiagp = (struct iag *) amp->data; } if ((int) le32_to_cpu(iagp->inofreeback) >= 0) { if ((rc = diIAGRead(imap, le32_to_cpu(iagp->inofreeback), &bmp))) { if (amp) release_metapage(amp); return (rc); } biagp = (struct iag *) bmp->data; } } /* get the ag number, extent number, inode number within * the extent. */ agno = BLKTOAG(le64_to_cpu(iagp->agstart), JFS_SBI(imap->im_ipimap->i_sb)); extno = ino >> L2INOSPEREXT; bitno = ino & (INOSPEREXT - 1); /* compute the mask for setting the map. */ mask = HIGHORDER >> bitno; /* the inode should be free and backed. */ if (((le32_to_cpu(iagp->pmap[extno]) & mask) != 0) || ((le32_to_cpu(iagp->wmap[extno]) & mask) != 0) || (addressPXD(&iagp->inoext[extno]) == 0)) { if (amp) release_metapage(amp); if (bmp) release_metapage(bmp); jfs_error(imap->im_ipimap->i_sb, "diAllocBit: iag inconsistent"); return -EIO; } /* mark the inode as allocated in the working map. */ iagp->wmap[extno] |= cpu_to_le32(mask); /* check if all inodes within the extent are now * allocated. if so, update the free inode summary * map to reflect this. */ if (iagp->wmap[extno] == cpu_to_le32(ONES)) { sword = extno >> L2EXTSPERSUM; bitno = extno & (EXTSPERSUM - 1); iagp->inosmap[sword] |= cpu_to_le32(HIGHORDER >> bitno); } /* if this was the last free inode in the iag, remove the * iag from the ag free inode list. */ if (iagp->nfreeinos == cpu_to_le32(1)) { if (amp) { aiagp->inofreeback = iagp->inofreeback; write_metapage(amp); } if (bmp) { biagp->inofreefwd = iagp->inofreefwd; write_metapage(bmp); } else { imap->im_agctl[agno].inofree = le32_to_cpu(iagp->inofreefwd); } iagp->inofreefwd = iagp->inofreeback = cpu_to_le32(-1); } /* update the free inode count at the iag, ag, inode * map levels. */ le32_add_cpu(&iagp->nfreeinos, -1); imap->im_agctl[agno].numfree -= 1; atomic_dec(&imap->im_numfree); return (0); } /* * NAME: diNewExt(imap,iagp,extno) * * FUNCTION: initialize a new extent of inodes for an iag, allocating * the first inode of the extent for use for the current * allocation request. * * disk resources are allocated for the new extent of inodes * and the inodes themselves are initialized to reflect their * existence within the extent (i.e. their inode numbers and * inode extent addresses are set) and their initial state * (mode and link count are set to zero). * * if the iag is new, it is not yet on an ag extent free list * but will now be placed on this list. * * if the allocation of the new extent causes the iag to * have no free extent, the iag will be removed from the * ag extent free list. * * if the iag has no free backed inodes, it will be placed * on the ag free inode list, since the addition of the new * extent will now cause it to have free inodes. * * a careful update approach is used to provide consistency * (i.e. list consistency) in the face of updates to multiple * buffers. under this approach, all required buffers are * obtained before making any updates and are held until all * updates are complete. * * PRE CONDITION: Already have buffer lock on iagp. Already have AG lock on * this AG. Must have read lock on imap inode. * * PARAMETERS: * imap - pointer to inode map control structure. * iagp - pointer to iag. * extno - extent number. * * RETURN VALUES: * 0 - success. * -ENOSPC - insufficient disk resources. * -EIO - i/o error. */ static int diNewExt(struct inomap * imap, struct iag * iagp, int extno) { int agno, iagno, fwd, back, freei = 0, sword, rc; struct iag *aiagp = NULL, *biagp = NULL, *ciagp = NULL; struct metapage *amp, *bmp, *cmp, *dmp; struct inode *ipimap; s64 blkno, hint; int i, j; u32 mask; ino_t ino; struct dinode *dp; struct jfs_sb_info *sbi; /* better have free extents. */ if (!iagp->nfreeexts) { jfs_error(imap->im_ipimap->i_sb, "diNewExt: no free extents"); return -EIO; } /* get the inode map inode. */ ipimap = imap->im_ipimap; sbi = JFS_SBI(ipimap->i_sb); amp = bmp = cmp = NULL; /* get the ag and iag numbers for this iag. */ agno = BLKTOAG(le64_to_cpu(iagp->agstart), sbi); iagno = le32_to_cpu(iagp->iagnum); /* check if this is the last free extent within the * iag. if so, the iag must be removed from the ag * free extent list, so get the iags preceding and * following the iag on this list. */ if (iagp->nfreeexts == cpu_to_le32(1)) { if ((fwd = le32_to_cpu(iagp->extfreefwd)) >= 0) { if ((rc = diIAGRead(imap, fwd, &amp))) return (rc); aiagp = (struct iag *) amp->data; } if ((back = le32_to_cpu(iagp->extfreeback)) >= 0) { if ((rc = diIAGRead(imap, back, &bmp))) goto error_out; biagp = (struct iag *) bmp->data; } } else { /* the iag has free extents. if all extents are free * (as is the case for a newly allocated iag), the iag * must be added to the ag free extent list, so get * the iag at the head of the list in preparation for * adding this iag to this list. */ fwd = back = -1; if (iagp->nfreeexts == cpu_to_le32(EXTSPERIAG)) { if ((fwd = imap->im_agctl[agno].extfree) >= 0) { if ((rc = diIAGRead(imap, fwd, &amp))) goto error_out; aiagp = (struct iag *) amp->data; } } } /* check if the iag has no free inodes. if so, the iag * will have to be added to the ag free inode list, so get * the iag at the head of the list in preparation for * adding this iag to this list. in doing this, we must * check if we already have the iag at the head of * the list in hand. */ if (iagp->nfreeinos == 0) { freei = imap->im_agctl[agno].inofree; if (freei >= 0) { if (freei == fwd) { ciagp = aiagp; } else if (freei == back) { ciagp = biagp; } else { if ((rc = diIAGRead(imap, freei, &cmp))) goto error_out; ciagp = (struct iag *) cmp->data; } if (ciagp == NULL) { jfs_error(imap->im_ipimap->i_sb, "diNewExt: ciagp == NULL"); rc = -EIO; goto error_out; } } } /* allocate disk space for the inode extent. */ if ((extno == 0) || (addressPXD(&iagp->inoext[extno - 1]) == 0)) hint = ((s64) agno << sbi->bmap->db_agl2size) - 1; else hint = addressPXD(&iagp->inoext[extno - 1]) + lengthPXD(&iagp->inoext[extno - 1]) - 1; if ((rc = dbAlloc(ipimap, hint, (s64) imap->im_nbperiext, &blkno))) goto error_out; /* compute the inode number of the first inode within the * extent. */ ino = (iagno << L2INOSPERIAG) + (extno << L2INOSPEREXT); /* initialize the inodes within the newly allocated extent a * page at a time. */ for (i = 0; i < imap->im_nbperiext; i += sbi->nbperpage) { /* get a buffer for this page of disk inodes. */ dmp = get_metapage(ipimap, blkno + i, PSIZE, 1); if (dmp == NULL) { rc = -EIO; goto error_out; } dp = (struct dinode *) dmp->data; /* initialize the inode number, mode, link count and * inode extent address. */ for (j = 0; j < INOSPERPAGE; j++, dp++, ino++) { dp->di_inostamp = cpu_to_le32(sbi->inostamp); dp->di_number = cpu_to_le32(ino); dp->di_fileset = cpu_to_le32(FILESYSTEM_I); dp->di_mode = 0; dp->di_nlink = 0; PXDaddress(&(dp->di_ixpxd), blkno); PXDlength(&(dp->di_ixpxd), imap->im_nbperiext); } write_metapage(dmp); } /* if this is the last free extent within the iag, remove the * iag from the ag free extent list. */ if (iagp->nfreeexts == cpu_to_le32(1)) { if (fwd >= 0) aiagp->extfreeback = iagp->extfreeback; if (back >= 0) biagp->extfreefwd = iagp->extfreefwd; else imap->im_agctl[agno].extfree = le32_to_cpu(iagp->extfreefwd); iagp->extfreefwd = iagp->extfreeback = cpu_to_le32(-1); } else { /* if the iag has all free extents (newly allocated iag), * add the iag to the ag free extent list. */ if (iagp->nfreeexts == cpu_to_le32(EXTSPERIAG)) { if (fwd >= 0) aiagp->extfreeback = cpu_to_le32(iagno); iagp->extfreefwd = cpu_to_le32(fwd); iagp->extfreeback = cpu_to_le32(-1); imap->im_agctl[agno].extfree = iagno; } } /* if the iag has no free inodes, add the iag to the * ag free inode list. */ if (iagp->nfreeinos == 0) { if (freei >= 0) ciagp->inofreeback = cpu_to_le32(iagno); iagp->inofreefwd = cpu_to_le32(imap->im_agctl[agno].inofree); iagp->inofreeback = cpu_to_le32(-1); imap->im_agctl[agno].inofree = iagno; } /* initialize the extent descriptor of the extent. */ PXDlength(&iagp->inoext[extno], imap->im_nbperiext); PXDaddress(&iagp->inoext[extno], blkno); /* initialize the working and persistent map of the extent. * the working map will be initialized such that * it indicates the first inode of the extent is allocated. */ iagp->wmap[extno] = cpu_to_le32(HIGHORDER); iagp->pmap[extno] = 0; /* update the free inode and free extent summary maps * for the extent to indicate the extent has free inodes * and no longer represents a free extent. */ sword = extno >> L2EXTSPERSUM; mask = HIGHORDER >> (extno & (EXTSPERSUM - 1)); iagp->extsmap[sword] |= cpu_to_le32(mask); iagp->inosmap[sword] &= cpu_to_le32(~mask); /* update the free inode and free extent counts for the * iag. */ le32_add_cpu(&iagp->nfreeinos, (INOSPEREXT - 1)); le32_add_cpu(&iagp->nfreeexts, -1); /* update the free and backed inode counts for the ag. */ imap->im_agctl[agno].numfree += (INOSPEREXT - 1); imap->im_agctl[agno].numinos += INOSPEREXT; /* update the free and backed inode counts for the inode map. */ atomic_add(INOSPEREXT - 1, &imap->im_numfree); atomic_add(INOSPEREXT, &imap->im_numinos); /* write the iags. */ if (amp) write_metapage(amp); if (bmp) write_metapage(bmp); if (cmp) write_metapage(cmp); return (0); error_out: /* release the iags. */ if (amp) release_metapage(amp); if (bmp) release_metapage(bmp); if (cmp) release_metapage(cmp); return (rc); } /* * NAME: diNewIAG(imap,iagnop,agno) * * FUNCTION: allocate a new iag for an allocation group. * * first tries to allocate the iag from the inode map * iagfree list: * if the list has free iags, the head of the list is removed * and returned to satisfy the request. * if the inode map's iag free list is empty, the inode map * is extended to hold a new iag. this new iag is initialized * and returned to satisfy the request. * * PARAMETERS: * imap - pointer to inode map control structure. * iagnop - pointer to an iag number set with the number of the * newly allocated iag upon successful return. * agno - allocation group number. * bpp - Buffer pointer to be filled in with new IAG's buffer * * RETURN VALUES: * 0 - success. * -ENOSPC - insufficient disk resources. * -EIO - i/o error. * * serialization: * AG lock held on entry/exit; * write lock on the map is held inside; * read lock on the map is held on successful completion; * * note: new iag transaction: * . synchronously write iag; * . write log of xtree and inode of imap; * . commit; * . synchronous write of xtree (right to left, bottom to top); * . at start of logredo(): init in-memory imap with one additional iag page; * . at end of logredo(): re-read imap inode to determine * new imap size; */ static int diNewIAG(struct inomap * imap, int *iagnop, int agno, struct metapage ** mpp) { int rc; int iagno, i, xlen; struct inode *ipimap; struct super_block *sb; struct jfs_sb_info *sbi; struct metapage *mp; struct iag *iagp; s64 xaddr = 0; s64 blkno; tid_t tid; struct inode *iplist[1]; /* pick up pointers to the inode map and mount inodes */ ipimap = imap->im_ipimap; sb = ipimap->i_sb; sbi = JFS_SBI(sb); /* acquire the free iag lock */ IAGFREE_LOCK(imap); /* if there are any iags on the inode map free iag list, * allocate the iag from the head of the list. */ if (imap->im_freeiag >= 0) { /* pick up the iag number at the head of the list */ iagno = imap->im_freeiag; /* determine the logical block number of the iag */ blkno = IAGTOLBLK(iagno, sbi->l2nbperpage); } else { /* no free iags. the inode map will have to be extented * to include a new iag. */ /* acquire inode map lock */ IWRITE_LOCK(ipimap, RDWRLOCK_IMAP); if (ipimap->i_size >> L2PSIZE != imap->im_nextiag + 1) { IWRITE_UNLOCK(ipimap); IAGFREE_UNLOCK(imap); jfs_error(imap->im_ipimap->i_sb, "diNewIAG: ipimap->i_size is wrong"); return -EIO; } /* get the next available iag number */ iagno = imap->im_nextiag; /* make sure that we have not exceeded the maximum inode * number limit. */ if (iagno > (MAXIAGS - 1)) { /* release the inode map lock */ IWRITE_UNLOCK(ipimap); rc = -ENOSPC; goto out; } /* * synchronously append new iag page. */ /* determine the logical address of iag page to append */ blkno = IAGTOLBLK(iagno, sbi->l2nbperpage); /* Allocate extent for new iag page */ xlen = sbi->nbperpage; if ((rc = dbAlloc(ipimap, 0, (s64) xlen, &xaddr))) { /* release the inode map lock */ IWRITE_UNLOCK(ipimap); goto out; } /* * start transaction of update of the inode map * addressing structure pointing to the new iag page; */ tid = txBegin(sb, COMMIT_FORCE); mutex_lock(&JFS_IP(ipimap)->commit_mutex); /* update the inode map addressing structure to point to it */ if ((rc = xtInsert(tid, ipimap, 0, blkno, xlen, &xaddr, 0))) { txEnd(tid); mutex_unlock(&JFS_IP(ipimap)->commit_mutex); /* Free the blocks allocated for the iag since it was * not successfully added to the inode map */ dbFree(ipimap, xaddr, (s64) xlen); /* release the inode map lock */ IWRITE_UNLOCK(ipimap); goto out; } /* update the inode map's inode to reflect the extension */ ipimap->i_size += PSIZE; inode_add_bytes(ipimap, PSIZE); /* assign a buffer for the page */ mp = get_metapage(ipimap, blkno, PSIZE, 0); if (!mp) { /* * This is very unlikely since we just created the * extent, but let's try to handle it correctly */ xtTruncate(tid, ipimap, ipimap->i_size - PSIZE, COMMIT_PWMAP); txAbort(tid, 0); txEnd(tid); mutex_unlock(&JFS_IP(ipimap)->commit_mutex); /* release the inode map lock */ IWRITE_UNLOCK(ipimap); rc = -EIO; goto out; } iagp = (struct iag *) mp->data; /* init the iag */ memset(iagp, 0, sizeof(struct iag)); iagp->iagnum = cpu_to_le32(iagno); iagp->inofreefwd = iagp->inofreeback = cpu_to_le32(-1); iagp->extfreefwd = iagp->extfreeback = cpu_to_le32(-1); iagp->iagfree = cpu_to_le32(-1); iagp->nfreeinos = 0; iagp->nfreeexts = cpu_to_le32(EXTSPERIAG); /* initialize the free inode summary map (free extent * summary map initialization handled by bzero). */ for (i = 0; i < SMAPSZ; i++) iagp->inosmap[i] = cpu_to_le32(ONES); /* * Write and sync the metapage */ flush_metapage(mp); /* * txCommit(COMMIT_FORCE) will synchronously write address * index pages and inode after commit in careful update order * of address index pages (right to left, bottom up); */ iplist[0] = ipimap; rc = txCommit(tid, 1, &iplist[0], COMMIT_FORCE); txEnd(tid); mutex_unlock(&JFS_IP(ipimap)->commit_mutex); duplicateIXtree(sb, blkno, xlen, &xaddr); /* update the next available iag number */ imap->im_nextiag += 1; /* Add the iag to the iag free list so we don't lose the iag * if a failure happens now. */ imap->im_freeiag = iagno; /* Until we have logredo working, we want the imap inode & * control page to be up to date. */ diSync(ipimap); /* release the inode map lock */ IWRITE_UNLOCK(ipimap); } /* obtain read lock on map */ IREAD_LOCK(ipimap, RDWRLOCK_IMAP); /* read the iag */ if ((rc = diIAGRead(imap, iagno, &mp))) { IREAD_UNLOCK(ipimap); rc = -EIO; goto out; } iagp = (struct iag *) mp->data; /* remove the iag from the iag free list */ imap->im_freeiag = le32_to_cpu(iagp->iagfree); iagp->iagfree = cpu_to_le32(-1); /* set the return iag number and buffer pointer */ *iagnop = iagno; *mpp = mp; out: /* release the iag free lock */ IAGFREE_UNLOCK(imap); return (rc); } /* * NAME: diIAGRead() * * FUNCTION: get the buffer for the specified iag within a fileset * or aggregate inode map. * * PARAMETERS: * imap - pointer to inode map control structure. * iagno - iag number. * bpp - point to buffer pointer to be filled in on successful * exit. * * SERIALIZATION: * must have read lock on imap inode * (When called by diExtendFS, the filesystem is quiesced, therefore * the read lock is unnecessary.) * * RETURN VALUES: * 0 - success. * -EIO - i/o error. */ static int diIAGRead(struct inomap * imap, int iagno, struct metapage ** mpp) { struct inode *ipimap = imap->im_ipimap; s64 blkno; /* compute the logical block number of the iag. */ blkno = IAGTOLBLK(iagno, JFS_SBI(ipimap->i_sb)->l2nbperpage); /* read the iag. */ *mpp = read_metapage(ipimap, blkno, PSIZE, 0); if (*mpp == NULL) { return -EIO; } return (0); } /* * NAME: diFindFree() * * FUNCTION: find the first free bit in a word starting at * the specified bit position. * * PARAMETERS: * word - word to be examined. * start - starting bit position. * * RETURN VALUES: * bit position of first free bit in the word or 32 if * no free bits were found. */ static int diFindFree(u32 word, int start) { int bitno; assert(start < 32); /* scan the word for the first free bit. */ for (word <<= start, bitno = start; bitno < 32; bitno++, word <<= 1) { if ((word & HIGHORDER) == 0) break; } return (bitno); } /* * NAME: diUpdatePMap() * * FUNCTION: Update the persistent map in an IAG for the allocation or * freeing of the specified inode. * * PRE CONDITIONS: Working map has already been updated for allocate. * * PARAMETERS: * ipimap - Incore inode map inode * inum - Number of inode to mark in permanent map * is_free - If 'true' indicates inode should be marked freed, otherwise * indicates inode should be marked allocated. * * RETURN VALUES: * 0 for success */ int diUpdatePMap(struct inode *ipimap, unsigned long inum, bool is_free, struct tblock * tblk) { int rc; struct iag *iagp; struct metapage *mp; int iagno, ino, extno, bitno; struct inomap *imap; u32 mask; struct jfs_log *log; int lsn, difft, diffp; unsigned long flags; imap = JFS_IP(ipimap)->i_imap; /* get the iag number containing the inode */ iagno = INOTOIAG(inum); /* make sure that the iag is contained within the map */ if (iagno >= imap->im_nextiag) { jfs_error(ipimap->i_sb, "diUpdatePMap: the iag is outside the map"); return -EIO; } /* read the iag */ IREAD_LOCK(ipimap, RDWRLOCK_IMAP); rc = diIAGRead(imap, iagno, &mp); IREAD_UNLOCK(ipimap); if (rc) return (rc); metapage_wait_for_io(mp); iagp = (struct iag *) mp->data; /* get the inode number and extent number of the inode within * the iag and the inode number within the extent. */ ino = inum & (INOSPERIAG - 1); extno = ino >> L2INOSPEREXT; bitno = ino & (INOSPEREXT - 1); mask = HIGHORDER >> bitno; /* * mark the inode free in persistent map: */ if (is_free) { /* The inode should have been allocated both in working * map and in persistent map; * the inode will be freed from working map at the release * of last reference release; */ if (!(le32_to_cpu(iagp->wmap[extno]) & mask)) { jfs_error(ipimap->i_sb, "diUpdatePMap: inode %ld not marked as " "allocated in wmap!", inum); } if (!(le32_to_cpu(iagp->pmap[extno]) & mask)) { jfs_error(ipimap->i_sb, "diUpdatePMap: inode %ld not marked as " "allocated in pmap!", inum); } /* update the bitmap for the extent of the freed inode */ iagp->pmap[extno] &= cpu_to_le32(~mask); } /* * mark the inode allocated in persistent map: */ else { /* The inode should be already allocated in the working map * and should be free in persistent map; */ if (!(le32_to_cpu(iagp->wmap[extno]) & mask)) { release_metapage(mp); jfs_error(ipimap->i_sb, "diUpdatePMap: the inode is not allocated in " "the working map"); return -EIO; } if ((le32_to_cpu(iagp->pmap[extno]) & mask) != 0) { release_metapage(mp); jfs_error(ipimap->i_sb, "diUpdatePMap: the inode is not free in the " "persistent map"); return -EIO; } /* update the bitmap for the extent of the allocated inode */ iagp->pmap[extno] |= cpu_to_le32(mask); } /* * update iag lsn */ lsn = tblk->lsn; log = JFS_SBI(tblk->sb)->log; LOGSYNC_LOCK(log, flags); if (mp->lsn != 0) { /* inherit older/smaller lsn */ logdiff(difft, lsn, log); logdiff(diffp, mp->lsn, log); if (difft < diffp) { mp->lsn = lsn; /* move mp after tblock in logsync list */ list_move(&mp->synclist, &tblk->synclist); } /* inherit younger/larger clsn */ assert(mp->clsn); logdiff(difft, tblk->clsn, log); logdiff(diffp, mp->clsn, log); if (difft > diffp) mp->clsn = tblk->clsn; } else { mp->log = log; mp->lsn = lsn; /* insert mp after tblock in logsync list */ log->count++; list_add(&mp->synclist, &tblk->synclist); mp->clsn = tblk->clsn; } LOGSYNC_UNLOCK(log, flags); write_metapage(mp); return (0); } /* * diExtendFS() * * function: update imap for extendfs(); * * note: AG size has been increased s.t. each k old contiguous AGs are * coalesced into a new AG; */ int diExtendFS(struct inode *ipimap, struct inode *ipbmap) { int rc, rcx = 0; struct inomap *imap = JFS_IP(ipimap)->i_imap; struct iag *iagp = NULL, *hiagp = NULL; struct bmap *mp = JFS_SBI(ipbmap->i_sb)->bmap; struct metapage *bp, *hbp; int i, n, head; int numinos, xnuminos = 0, xnumfree = 0; s64 agstart; jfs_info("diExtendFS: nextiag:%d numinos:%d numfree:%d", imap->im_nextiag, atomic_read(&imap->im_numinos), atomic_read(&imap->im_numfree)); /* * reconstruct imap * * coalesce contiguous k (newAGSize/oldAGSize) AGs; * i.e., (AGi, ..., AGj) where i = k*n and j = k*(n+1) - 1 to AGn; * note: new AG size = old AG size * (2**x). */ /* init per AG control information im_agctl[] */ for (i = 0; i < MAXAG; i++) { imap->im_agctl[i].inofree = -1; imap->im_agctl[i].extfree = -1; imap->im_agctl[i].numinos = 0; /* number of backed inodes */ imap->im_agctl[i].numfree = 0; /* number of free backed inodes */ } /* * process each iag page of the map. * * rebuild AG Free Inode List, AG Free Inode Extent List; */ for (i = 0; i < imap->im_nextiag; i++) { if ((rc = diIAGRead(imap, i, &bp))) { rcx = rc; continue; } iagp = (struct iag *) bp->data; if (le32_to_cpu(iagp->iagnum) != i) { release_metapage(bp); jfs_error(ipimap->i_sb, "diExtendFs: unexpected value of iagnum"); return -EIO; } /* leave free iag in the free iag list */ if (iagp->nfreeexts == cpu_to_le32(EXTSPERIAG)) { release_metapage(bp); continue; } agstart = le64_to_cpu(iagp->agstart); n = agstart >> mp->db_agl2size; iagp->agstart = cpu_to_le64((s64)n << mp->db_agl2size); /* compute backed inodes */ numinos = (EXTSPERIAG - le32_to_cpu(iagp->nfreeexts)) << L2INOSPEREXT; if (numinos > 0) { /* merge AG backed inodes */ imap->im_agctl[n].numinos += numinos; xnuminos += numinos; } /* if any backed free inodes, insert at AG free inode list */ if ((int) le32_to_cpu(iagp->nfreeinos) > 0) { if ((head = imap->im_agctl[n].inofree) == -1) { iagp->inofreefwd = cpu_to_le32(-1); iagp->inofreeback = cpu_to_le32(-1); } else { if ((rc = diIAGRead(imap, head, &hbp))) { rcx = rc; goto nextiag; } hiagp = (struct iag *) hbp->data; hiagp->inofreeback = iagp->iagnum; iagp->inofreefwd = cpu_to_le32(head); iagp->inofreeback = cpu_to_le32(-1); write_metapage(hbp); } imap->im_agctl[n].inofree = le32_to_cpu(iagp->iagnum); /* merge AG backed free inodes */ imap->im_agctl[n].numfree += le32_to_cpu(iagp->nfreeinos); xnumfree += le32_to_cpu(iagp->nfreeinos); } /* if any free extents, insert at AG free extent list */ if (le32_to_cpu(iagp->nfreeexts) > 0) { if ((head = imap->im_agctl[n].extfree) == -1) { iagp->extfreefwd = cpu_to_le32(-1); iagp->extfreeback = cpu_to_le32(-1); } else { if ((rc = diIAGRead(imap, head, &hbp))) { rcx = rc; goto nextiag; } hiagp = (struct iag *) hbp->data; hiagp->extfreeback = iagp->iagnum; iagp->extfreefwd = cpu_to_le32(head); iagp->extfreeback = cpu_to_le32(-1); write_metapage(hbp); } imap->im_agctl[n].extfree = le32_to_cpu(iagp->iagnum); } nextiag: write_metapage(bp); } if (xnuminos != atomic_read(&imap->im_numinos) || xnumfree != atomic_read(&imap->im_numfree)) { jfs_error(ipimap->i_sb, "diExtendFs: numinos or numfree incorrect"); return -EIO; } return rcx; } /* * duplicateIXtree() * * serialization: IWRITE_LOCK held on entry/exit * * note: shadow page with regular inode (rel.2); */ static void duplicateIXtree(struct super_block *sb, s64 blkno, int xlen, s64 *xaddr) { struct jfs_superblock *j_sb; struct buffer_head *bh; struct inode *ip; tid_t tid; /* if AIT2 ipmap2 is bad, do not try to update it */ if (JFS_SBI(sb)->mntflag & JFS_BAD_SAIT) /* s_flag */ return; ip = diReadSpecial(sb, FILESYSTEM_I, 1); if (ip == NULL) { JFS_SBI(sb)->mntflag |= JFS_BAD_SAIT; if (readSuper(sb, &bh)) return; j_sb = (struct jfs_superblock *)bh->b_data; j_sb->s_flag |= cpu_to_le32(JFS_BAD_SAIT); mark_buffer_dirty(bh); sync_dirty_buffer(bh); brelse(bh); return; } /* start transaction */ tid = txBegin(sb, COMMIT_FORCE); /* update the inode map addressing structure to point to it */ if (xtInsert(tid, ip, 0, blkno, xlen, xaddr, 0)) { JFS_SBI(sb)->mntflag |= JFS_BAD_SAIT; txAbort(tid, 1); goto cleanup; } /* update the inode map's inode to reflect the extension */ ip->i_size += PSIZE; inode_add_bytes(ip, PSIZE); txCommit(tid, 1, &ip, COMMIT_FORCE); cleanup: txEnd(tid); diFreeSpecial(ip); } /* * NAME: copy_from_dinode() * * FUNCTION: Copies inode info from disk inode to in-memory inode * * RETURN VALUES: * 0 - success * -ENOMEM - insufficient memory */ static int copy_from_dinode(struct dinode * dip, struct inode *ip) { struct jfs_inode_info *jfs_ip = JFS_IP(ip); struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb); jfs_ip->fileset = le32_to_cpu(dip->di_fileset); jfs_ip->mode2 = le32_to_cpu(dip->di_mode); jfs_set_inode_flags(ip); ip->i_mode = le32_to_cpu(dip->di_mode) & 0xffff; if (sbi->umask != -1) { ip->i_mode = (ip->i_mode & ~0777) | (0777 & ~sbi->umask); /* For directories, add x permission if r is allowed by umask */ if (S_ISDIR(ip->i_mode)) { if (ip->i_mode & 0400) ip->i_mode |= 0100; if (ip->i_mode & 0040) ip->i_mode |= 0010; if (ip->i_mode & 0004) ip->i_mode |= 0001; } } ip->i_nlink = le32_to_cpu(dip->di_nlink); jfs_ip->saved_uid = le32_to_cpu(dip->di_uid); if (sbi->uid == -1) ip->i_uid = jfs_ip->saved_uid; else { ip->i_uid = sbi->uid; } jfs_ip->saved_gid = le32_to_cpu(dip->di_gid); if (sbi->gid == -1) ip->i_gid = jfs_ip->saved_gid; else { ip->i_gid = sbi->gid; } ip->i_size = le64_to_cpu(dip->di_size); ip->i_atime.tv_sec = le32_to_cpu(dip->di_atime.tv_sec); ip->i_atime.tv_nsec = le32_to_cpu(dip->di_atime.tv_nsec); ip->i_mtime.tv_sec = le32_to_cpu(dip->di_mtime.tv_sec); ip->i_mtime.tv_nsec = le32_to_cpu(dip->di_mtime.tv_nsec); ip->i_ctime.tv_sec = le32_to_cpu(dip->di_ctime.tv_sec); ip->i_ctime.tv_nsec = le32_to_cpu(dip->di_ctime.tv_nsec); ip->i_blocks = LBLK2PBLK(ip->i_sb, le64_to_cpu(dip->di_nblocks)); ip->i_generation = le32_to_cpu(dip->di_gen); jfs_ip->ixpxd = dip->di_ixpxd; /* in-memory pxd's are little-endian */ jfs_ip->acl = dip->di_acl; /* as are dxd's */ jfs_ip->ea = dip->di_ea; jfs_ip->next_index = le32_to_cpu(dip->di_next_index); jfs_ip->otime = le32_to_cpu(dip->di_otime.tv_sec); jfs_ip->acltype = le32_to_cpu(dip->di_acltype); if (S_ISCHR(ip->i_mode) || S_ISBLK(ip->i_mode)) { jfs_ip->dev = le32_to_cpu(dip->di_rdev); ip->i_rdev = new_decode_dev(jfs_ip->dev); } if (S_ISDIR(ip->i_mode)) { memcpy(&jfs_ip->i_dirtable, &dip->di_dirtable, 384); } else if (S_ISREG(ip->i_mode) || S_ISLNK(ip->i_mode)) { memcpy(&jfs_ip->i_xtroot, &dip->di_xtroot, 288); } else memcpy(&jfs_ip->i_inline_ea, &dip->di_inlineea, 128); /* Zero the in-memory-only stuff */ jfs_ip->cflag = 0; jfs_ip->btindex = 0; jfs_ip->btorder = 0; jfs_ip->bxflag = 0; jfs_ip->blid = 0; jfs_ip->atlhead = 0; jfs_ip->atltail = 0; jfs_ip->xtlid = 0; return (0); } /* * NAME: copy_to_dinode() * * FUNCTION: Copies inode info from in-memory inode to disk inode */ static void copy_to_dinode(struct dinode * dip, struct inode *ip) { struct jfs_inode_info *jfs_ip = JFS_IP(ip); struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb); dip->di_fileset = cpu_to_le32(jfs_ip->fileset); dip->di_inostamp = cpu_to_le32(sbi->inostamp); dip->di_number = cpu_to_le32(ip->i_ino); dip->di_gen = cpu_to_le32(ip->i_generation); dip->di_size = cpu_to_le64(ip->i_size); dip->di_nblocks = cpu_to_le64(PBLK2LBLK(ip->i_sb, ip->i_blocks)); dip->di_nlink = cpu_to_le32(ip->i_nlink); if (sbi->uid == -1) dip->di_uid = cpu_to_le32(ip->i_uid); else dip->di_uid = cpu_to_le32(jfs_ip->saved_uid); if (sbi->gid == -1) dip->di_gid = cpu_to_le32(ip->i_gid); else dip->di_gid = cpu_to_le32(jfs_ip->saved_gid); jfs_get_inode_flags(jfs_ip); /* * mode2 is only needed for storing the higher order bits. * Trust i_mode for the lower order ones */ if (sbi->umask == -1) dip->di_mode = cpu_to_le32((jfs_ip->mode2 & 0xffff0000) | ip->i_mode); else /* Leave the original permissions alone */ dip->di_mode = cpu_to_le32(jfs_ip->mode2); dip->di_atime.tv_sec = cpu_to_le32(ip->i_atime.tv_sec); dip->di_atime.tv_nsec = cpu_to_le32(ip->i_atime.tv_nsec); dip->di_ctime.tv_sec = cpu_to_le32(ip->i_ctime.tv_sec); dip->di_ctime.tv_nsec = cpu_to_le32(ip->i_ctime.tv_nsec); dip->di_mtime.tv_sec = cpu_to_le32(ip->i_mtime.tv_sec); dip->di_mtime.tv_nsec = cpu_to_le32(ip->i_mtime.tv_nsec); dip->di_ixpxd = jfs_ip->ixpxd; /* in-memory pxd's are little-endian */ dip->di_acl = jfs_ip->acl; /* as are dxd's */ dip->di_ea = jfs_ip->ea; dip->di_next_index = cpu_to_le32(jfs_ip->next_index); dip->di_otime.tv_sec = cpu_to_le32(jfs_ip->otime); dip->di_otime.tv_nsec = 0; dip->di_acltype = cpu_to_le32(jfs_ip->acltype); if (S_ISCHR(ip->i_mode) || S_ISBLK(ip->i_mode)) dip->di_rdev = cpu_to_le32(jfs_ip->dev); }
gpl-2.0
linux4kix/linux-linaro-stable-mx6
lib/lz4/lz4hc_compress.c
4000
12549
/* * LZ4 HC - High Compression Mode of LZ4 * Copyright (C) 2011-2012, Yann Collet. * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You can contact the author at : * - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html * - LZ4 source repository : http://code.google.com/p/lz4/ * * Changed for kernel use by: * Chanho Min <chanho.min@lge.com> */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/lz4.h> #include <asm/unaligned.h> #include "lz4defs.h" struct lz4hc_data { const u8 *base; HTYPE hashtable[HASHTABLESIZE]; u16 chaintable[MAXD]; const u8 *nexttoupdate; } __attribute__((__packed__)); static inline int lz4hc_init(struct lz4hc_data *hc4, const u8 *base) { memset((void *)hc4->hashtable, 0, sizeof(hc4->hashtable)); memset(hc4->chaintable, 0xFF, sizeof(hc4->chaintable)); #if LZ4_ARCH64 hc4->nexttoupdate = base + 1; #else hc4->nexttoupdate = base; #endif hc4->base = base; return 1; } /* Update chains up to ip (excluded) */ static inline void lz4hc_insert(struct lz4hc_data *hc4, const u8 *ip) { u16 *chaintable = hc4->chaintable; HTYPE *hashtable = hc4->hashtable; #if LZ4_ARCH64 const BYTE * const base = hc4->base; #else const int base = 0; #endif while (hc4->nexttoupdate < ip) { const u8 *p = hc4->nexttoupdate; size_t delta = p - (hashtable[HASH_VALUE(p)] + base); if (delta > MAX_DISTANCE) delta = MAX_DISTANCE; chaintable[(size_t)(p) & MAXD_MASK] = (u16)delta; hashtable[HASH_VALUE(p)] = (p) - base; hc4->nexttoupdate++; } } static inline size_t lz4hc_commonlength(const u8 *p1, const u8 *p2, const u8 *const matchlimit) { const u8 *p1t = p1; while (p1t < matchlimit - (STEPSIZE - 1)) { #if LZ4_ARCH64 u64 diff = A64(p2) ^ A64(p1t); #else u32 diff = A32(p2) ^ A32(p1t); #endif if (!diff) { p1t += STEPSIZE; p2 += STEPSIZE; continue; } p1t += LZ4_NBCOMMONBYTES(diff); return p1t - p1; } #if LZ4_ARCH64 if ((p1t < (matchlimit-3)) && (A32(p2) == A32(p1t))) { p1t += 4; p2 += 4; } #endif if ((p1t < (matchlimit - 1)) && (A16(p2) == A16(p1t))) { p1t += 2; p2 += 2; } if ((p1t < matchlimit) && (*p2 == *p1t)) p1t++; return p1t - p1; } static inline int lz4hc_insertandfindbestmatch(struct lz4hc_data *hc4, const u8 *ip, const u8 *const matchlimit, const u8 **matchpos) { u16 *const chaintable = hc4->chaintable; HTYPE *const hashtable = hc4->hashtable; const u8 *ref; #if LZ4_ARCH64 const BYTE * const base = hc4->base; #else const int base = 0; #endif int nbattempts = MAX_NB_ATTEMPTS; size_t repl = 0, ml = 0; u16 delta; /* HC4 match finder */ lz4hc_insert(hc4, ip); ref = hashtable[HASH_VALUE(ip)] + base; /* potential repetition */ if (ref >= ip-4) { /* confirmed */ if (A32(ref) == A32(ip)) { delta = (u16)(ip-ref); repl = ml = lz4hc_commonlength(ip + MINMATCH, ref + MINMATCH, matchlimit) + MINMATCH; *matchpos = ref; } ref -= (size_t)chaintable[(size_t)(ref) & MAXD_MASK]; } while ((ref >= ip - MAX_DISTANCE) && nbattempts) { nbattempts--; if (*(ref + ml) == *(ip + ml)) { if (A32(ref) == A32(ip)) { size_t mlt = lz4hc_commonlength(ip + MINMATCH, ref + MINMATCH, matchlimit) + MINMATCH; if (mlt > ml) { ml = mlt; *matchpos = ref; } } } ref -= (size_t)chaintable[(size_t)(ref) & MAXD_MASK]; } /* Complete table */ if (repl) { const BYTE *ptr = ip; const BYTE *end; end = ip + repl - (MINMATCH-1); /* Pre-Load */ while (ptr < end - delta) { chaintable[(size_t)(ptr) & MAXD_MASK] = delta; ptr++; } do { chaintable[(size_t)(ptr) & MAXD_MASK] = delta; /* Head of chain */ hashtable[HASH_VALUE(ptr)] = (ptr) - base; ptr++; } while (ptr < end); hc4->nexttoupdate = end; } return (int)ml; } static inline int lz4hc_insertandgetwidermatch(struct lz4hc_data *hc4, const u8 *ip, const u8 *startlimit, const u8 *matchlimit, int longest, const u8 **matchpos, const u8 **startpos) { u16 *const chaintable = hc4->chaintable; HTYPE *const hashtable = hc4->hashtable; #if LZ4_ARCH64 const BYTE * const base = hc4->base; #else const int base = 0; #endif const u8 *ref; int nbattempts = MAX_NB_ATTEMPTS; int delta = (int)(ip - startlimit); /* First Match */ lz4hc_insert(hc4, ip); ref = hashtable[HASH_VALUE(ip)] + base; while ((ref >= ip - MAX_DISTANCE) && (ref >= hc4->base) && (nbattempts)) { nbattempts--; if (*(startlimit + longest) == *(ref - delta + longest)) { if (A32(ref) == A32(ip)) { const u8 *reft = ref + MINMATCH; const u8 *ipt = ip + MINMATCH; const u8 *startt = ip; while (ipt < matchlimit-(STEPSIZE - 1)) { #if LZ4_ARCH64 u64 diff = A64(reft) ^ A64(ipt); #else u32 diff = A32(reft) ^ A32(ipt); #endif if (!diff) { ipt += STEPSIZE; reft += STEPSIZE; continue; } ipt += LZ4_NBCOMMONBYTES(diff); goto _endcount; } #if LZ4_ARCH64 if ((ipt < (matchlimit - 3)) && (A32(reft) == A32(ipt))) { ipt += 4; reft += 4; } ipt += 2; #endif if ((ipt < (matchlimit - 1)) && (A16(reft) == A16(ipt))) { reft += 2; } if ((ipt < matchlimit) && (*reft == *ipt)) ipt++; _endcount: reft = ref; while ((startt > startlimit) && (reft > hc4->base) && (startt[-1] == reft[-1])) { startt--; reft--; } if ((ipt - startt) > longest) { longest = (int)(ipt - startt); *matchpos = reft; *startpos = startt; } } } ref -= (size_t)chaintable[(size_t)(ref) & MAXD_MASK]; } return longest; } static inline int lz4_encodesequence(const u8 **ip, u8 **op, const u8 **anchor, int ml, const u8 *ref) { int length, len; u8 *token; /* Encode Literal length */ length = (int)(*ip - *anchor); token = (*op)++; if (length >= (int)RUN_MASK) { *token = (RUN_MASK << ML_BITS); len = length - RUN_MASK; for (; len > 254 ; len -= 255) *(*op)++ = 255; *(*op)++ = (u8)len; } else *token = (length << ML_BITS); /* Copy Literals */ LZ4_BLINDCOPY(*anchor, *op, length); /* Encode Offset */ LZ4_WRITE_LITTLEENDIAN_16(*op, (u16)(*ip - ref)); /* Encode MatchLength */ len = (int)(ml - MINMATCH); if (len >= (int)ML_MASK) { *token += ML_MASK; len -= ML_MASK; for (; len > 509 ; len -= 510) { *(*op)++ = 255; *(*op)++ = 255; } if (len > 254) { len -= 255; *(*op)++ = 255; } *(*op)++ = (u8)len; } else *token += len; /* Prepare next loop */ *ip += ml; *anchor = *ip; return 0; } static int lz4_compresshcctx(struct lz4hc_data *ctx, const char *source, char *dest, int isize) { const u8 *ip = (const u8 *)source; const u8 *anchor = ip; const u8 *const iend = ip + isize; const u8 *const mflimit = iend - MFLIMIT; const u8 *const matchlimit = (iend - LASTLITERALS); u8 *op = (u8 *)dest; int ml, ml2, ml3, ml0; const u8 *ref = NULL; const u8 *start2 = NULL; const u8 *ref2 = NULL; const u8 *start3 = NULL; const u8 *ref3 = NULL; const u8 *start0; const u8 *ref0; int lastrun; ip++; /* Main Loop */ while (ip < mflimit) { ml = lz4hc_insertandfindbestmatch(ctx, ip, matchlimit, (&ref)); if (!ml) { ip++; continue; } /* saved, in case we would skip too much */ start0 = ip; ref0 = ref; ml0 = ml; _search2: if (ip+ml < mflimit) ml2 = lz4hc_insertandgetwidermatch(ctx, ip + ml - 2, ip + 1, matchlimit, ml, &ref2, &start2); else ml2 = ml; /* No better match */ if (ml2 == ml) { lz4_encodesequence(&ip, &op, &anchor, ml, ref); continue; } if (start0 < ip) { /* empirical */ if (start2 < ip + ml0) { ip = start0; ref = ref0; ml = ml0; } } /* * Here, start0==ip * First Match too small : removed */ if ((start2 - ip) < 3) { ml = ml2; ip = start2; ref = ref2; goto _search2; } _search3: /* * Currently we have : * ml2 > ml1, and * ip1+3 <= ip2 (usually < ip1+ml1) */ if ((start2 - ip) < OPTIMAL_ML) { int correction; int new_ml = ml; if (new_ml > OPTIMAL_ML) new_ml = OPTIMAL_ML; if (ip + new_ml > start2 + ml2 - MINMATCH) new_ml = (int)(start2 - ip) + ml2 - MINMATCH; correction = new_ml - (int)(start2 - ip); if (correction > 0) { start2 += correction; ref2 += correction; ml2 -= correction; } } /* * Now, we have start2 = ip+new_ml, * with new_ml=min(ml, OPTIMAL_ML=18) */ if (start2 + ml2 < mflimit) ml3 = lz4hc_insertandgetwidermatch(ctx, start2 + ml2 - 3, start2, matchlimit, ml2, &ref3, &start3); else ml3 = ml2; /* No better match : 2 sequences to encode */ if (ml3 == ml2) { /* ip & ref are known; Now for ml */ if (start2 < ip+ml) ml = (int)(start2 - ip); /* Now, encode 2 sequences */ lz4_encodesequence(&ip, &op, &anchor, ml, ref); ip = start2; lz4_encodesequence(&ip, &op, &anchor, ml2, ref2); continue; } /* Not enough space for match 2 : remove it */ if (start3 < ip + ml + 3) { /* * can write Seq1 immediately ==> Seq2 is removed, * so Seq3 becomes Seq1 */ if (start3 >= (ip + ml)) { if (start2 < ip + ml) { int correction = (int)(ip + ml - start2); start2 += correction; ref2 += correction; ml2 -= correction; if (ml2 < MINMATCH) { start2 = start3; ref2 = ref3; ml2 = ml3; } } lz4_encodesequence(&ip, &op, &anchor, ml, ref); ip = start3; ref = ref3; ml = ml3; start0 = start2; ref0 = ref2; ml0 = ml2; goto _search2; } start2 = start3; ref2 = ref3; ml2 = ml3; goto _search3; } /* * OK, now we have 3 ascending matches; let's write at least * the first one ip & ref are known; Now for ml */ if (start2 < ip + ml) { if ((start2 - ip) < (int)ML_MASK) { int correction; if (ml > OPTIMAL_ML) ml = OPTIMAL_ML; if (ip + ml > start2 + ml2 - MINMATCH) ml = (int)(start2 - ip) + ml2 - MINMATCH; correction = ml - (int)(start2 - ip); if (correction > 0) { start2 += correction; ref2 += correction; ml2 -= correction; } } else ml = (int)(start2 - ip); } lz4_encodesequence(&ip, &op, &anchor, ml, ref); ip = start2; ref = ref2; ml = ml2; start2 = start3; ref2 = ref3; ml2 = ml3; goto _search3; } /* Encode Last Literals */ lastrun = (int)(iend - anchor); if (lastrun >= (int)RUN_MASK) { *op++ = (RUN_MASK << ML_BITS); lastrun -= RUN_MASK; for (; lastrun > 254 ; lastrun -= 255) *op++ = 255; *op++ = (u8) lastrun; } else *op++ = (lastrun << ML_BITS); memcpy(op, anchor, iend - anchor); op += iend - anchor; /* End */ return (int) (((char *)op) - dest); } int lz4hc_compress(const unsigned char *src, size_t src_len, unsigned char *dst, size_t *dst_len, void *wrkmem) { int ret = -1; int out_len = 0; struct lz4hc_data *hc4 = (struct lz4hc_data *)wrkmem; lz4hc_init(hc4, (const u8 *)src); out_len = lz4_compresshcctx((struct lz4hc_data *)hc4, (const u8 *)src, (char *)dst, (int)src_len); if (out_len < 0) goto exit; *dst_len = out_len; return 0; exit: return ret; } EXPORT_SYMBOL(lz4hc_compress); MODULE_LICENSE("Dual BSD/GPL"); MODULE_DESCRIPTION("LZ4HC compressor");
gpl-2.0
sayeed99/kernel-FlareM
arch/um/kernel/initrd.c
4256
1727
/* * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) * Licensed under the GPL */ #include <linux/init.h> #include <linux/bootmem.h> #include <linux/initrd.h> #include <asm/types.h> #include <init.h> #include <os.h> /* Changed by uml_initrd_setup, which is a setup */ static char *initrd __initdata = NULL; static int load_initrd(char *filename, void *buf, int size); static int __init read_initrd(void) { void *area; long long size; int err; if (initrd == NULL) return 0; err = os_file_size(initrd, &size); if (err) return 0; /* * This is necessary because alloc_bootmem craps out if you * ask for no memory. */ if (size == 0) { printk(KERN_ERR "\"%s\" is a zero-size initrd\n", initrd); return 0; } area = alloc_bootmem(size); if (area == NULL) return 0; if (load_initrd(initrd, area, size) == -1) return 0; initrd_start = (unsigned long) area; initrd_end = initrd_start + size; return 0; } __uml_postsetup(read_initrd); static int __init uml_initrd_setup(char *line, int *add) { initrd = line; return 0; } __uml_setup("initrd=", uml_initrd_setup, "initrd=<initrd image>\n" " This is used to boot UML from an initrd image. The argument is the\n" " name of the file containing the image.\n\n" ); static int load_initrd(char *filename, void *buf, int size) { int fd, n; fd = os_open_file(filename, of_read(OPENFLAGS()), 0); if (fd < 0) { printk(KERN_ERR "Opening '%s' failed - err = %d\n", filename, -fd); return -1; } n = os_read_file(fd, buf, size); if (n != size) { printk(KERN_ERR "Read of %d bytes from '%s' failed, " "err = %d\n", size, filename, -n); return -1; } os_close_file(fd); return 0; }
gpl-2.0
EPDCenterSpain/kernel-mk908
drivers/media/dvb/dvb-usb/cinergyT2-fe.c
4768
8474
/* * TerraTec Cinergy T2/qanu USB2 DVB-T adapter. * * Copyright (C) 2007 Tomi Orava (tomimo@ncircle.nullnet.fi) * * Based on the dvb-usb-framework code and the * original Terratec Cinergy T2 driver by: * * Copyright (C) 2004 Daniel Mack <daniel@qanu.de> and * Holger Waechtler <holger@qanu.de> * * Protocol Spec published on http://qanu.de/specs/terratec_cinergyT2.pdf * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include "cinergyT2.h" /** * convert linux-dvb frontend parameter set into TPS. * See ETSI ETS-300744, section 4.6.2, table 9 for details. * * This function is probably reusable and may better get placed in a support * library. * * We replace errornous fields by default TPS fields (the ones with value 0). */ static uint16_t compute_tps(struct dvb_frontend_parameters *p) { struct dvb_ofdm_parameters *op = &p->u.ofdm; uint16_t tps = 0; switch (op->code_rate_HP) { case FEC_2_3: tps |= (1 << 7); break; case FEC_3_4: tps |= (2 << 7); break; case FEC_5_6: tps |= (3 << 7); break; case FEC_7_8: tps |= (4 << 7); break; case FEC_1_2: case FEC_AUTO: default: /* tps |= (0 << 7) */; } switch (op->code_rate_LP) { case FEC_2_3: tps |= (1 << 4); break; case FEC_3_4: tps |= (2 << 4); break; case FEC_5_6: tps |= (3 << 4); break; case FEC_7_8: tps |= (4 << 4); break; case FEC_1_2: case FEC_AUTO: default: /* tps |= (0 << 4) */; } switch (op->constellation) { case QAM_16: tps |= (1 << 13); break; case QAM_64: tps |= (2 << 13); break; case QPSK: default: /* tps |= (0 << 13) */; } switch (op->transmission_mode) { case TRANSMISSION_MODE_8K: tps |= (1 << 0); break; case TRANSMISSION_MODE_2K: default: /* tps |= (0 << 0) */; } switch (op->guard_interval) { case GUARD_INTERVAL_1_16: tps |= (1 << 2); break; case GUARD_INTERVAL_1_8: tps |= (2 << 2); break; case GUARD_INTERVAL_1_4: tps |= (3 << 2); break; case GUARD_INTERVAL_1_32: default: /* tps |= (0 << 2) */; } switch (op->hierarchy_information) { case HIERARCHY_1: tps |= (1 << 10); break; case HIERARCHY_2: tps |= (2 << 10); break; case HIERARCHY_4: tps |= (3 << 10); break; case HIERARCHY_NONE: default: /* tps |= (0 << 10) */; } return tps; } struct cinergyt2_fe_state { struct dvb_frontend fe; struct dvb_usb_device *d; }; static int cinergyt2_fe_read_status(struct dvb_frontend *fe, fe_status_t *status) { struct cinergyt2_fe_state *state = fe->demodulator_priv; struct dvbt_get_status_msg result; u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS }; int ret; ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&result, sizeof(result), 0); if (ret < 0) return ret; *status = 0; if (0xffff - le16_to_cpu(result.gain) > 30) *status |= FE_HAS_SIGNAL; if (result.lock_bits & (1 << 6)) *status |= FE_HAS_LOCK; if (result.lock_bits & (1 << 5)) *status |= FE_HAS_SYNC; if (result.lock_bits & (1 << 4)) *status |= FE_HAS_CARRIER; if (result.lock_bits & (1 << 1)) *status |= FE_HAS_VITERBI; if ((*status & (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC)) != (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC)) *status &= ~FE_HAS_LOCK; return 0; } static int cinergyt2_fe_read_ber(struct dvb_frontend *fe, u32 *ber) { struct cinergyt2_fe_state *state = fe->demodulator_priv; struct dvbt_get_status_msg status; char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS }; int ret; ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status, sizeof(status), 0); if (ret < 0) return ret; *ber = le32_to_cpu(status.viterbi_error_rate); return 0; } static int cinergyt2_fe_read_unc_blocks(struct dvb_frontend *fe, u32 *unc) { struct cinergyt2_fe_state *state = fe->demodulator_priv; struct dvbt_get_status_msg status; u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS }; int ret; ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&status, sizeof(status), 0); if (ret < 0) { err("cinergyt2_fe_read_unc_blocks() Failed! (Error=%d)\n", ret); return ret; } *unc = le32_to_cpu(status.uncorrected_block_count); return 0; } static int cinergyt2_fe_read_signal_strength(struct dvb_frontend *fe, u16 *strength) { struct cinergyt2_fe_state *state = fe->demodulator_priv; struct dvbt_get_status_msg status; char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS }; int ret; ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status, sizeof(status), 0); if (ret < 0) { err("cinergyt2_fe_read_signal_strength() Failed!" " (Error=%d)\n", ret); return ret; } *strength = (0xffff - le16_to_cpu(status.gain)); return 0; } static int cinergyt2_fe_read_snr(struct dvb_frontend *fe, u16 *snr) { struct cinergyt2_fe_state *state = fe->demodulator_priv; struct dvbt_get_status_msg status; char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS }; int ret; ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status, sizeof(status), 0); if (ret < 0) { err("cinergyt2_fe_read_snr() Failed! (Error=%d)\n", ret); return ret; } *snr = (status.snr << 8) | status.snr; return 0; } static int cinergyt2_fe_init(struct dvb_frontend *fe) { return 0; } static int cinergyt2_fe_sleep(struct dvb_frontend *fe) { deb_info("cinergyt2_fe_sleep() Called\n"); return 0; } static int cinergyt2_fe_get_tune_settings(struct dvb_frontend *fe, struct dvb_frontend_tune_settings *tune) { tune->min_delay_ms = 800; return 0; } static int cinergyt2_fe_set_frontend(struct dvb_frontend *fe, struct dvb_frontend_parameters *fep) { struct cinergyt2_fe_state *state = fe->demodulator_priv; struct dvbt_set_parameters_msg param; char result[2]; int err; param.cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS; param.tps = cpu_to_le16(compute_tps(fep)); param.freq = cpu_to_le32(fep->frequency / 1000); param.bandwidth = 8 - fep->u.ofdm.bandwidth - BANDWIDTH_8_MHZ; param.flags = 0; err = dvb_usb_generic_rw(state->d, (char *)&param, sizeof(param), result, sizeof(result), 0); if (err < 0) err("cinergyt2_fe_set_frontend() Failed! err=%d\n", err); return (err < 0) ? err : 0; } static int cinergyt2_fe_get_frontend(struct dvb_frontend *fe, struct dvb_frontend_parameters *fep) { return 0; } static void cinergyt2_fe_release(struct dvb_frontend *fe) { struct cinergyt2_fe_state *state = fe->demodulator_priv; if (state != NULL) kfree(state); } static struct dvb_frontend_ops cinergyt2_fe_ops; struct dvb_frontend *cinergyt2_fe_attach(struct dvb_usb_device *d) { struct cinergyt2_fe_state *s = kzalloc(sizeof( struct cinergyt2_fe_state), GFP_KERNEL); if (s == NULL) return NULL; s->d = d; memcpy(&s->fe.ops, &cinergyt2_fe_ops, sizeof(struct dvb_frontend_ops)); s->fe.demodulator_priv = s; return &s->fe; } static struct dvb_frontend_ops cinergyt2_fe_ops = { .info = { .name = DRIVER_NAME, .type = FE_OFDM, .frequency_min = 174000000, .frequency_max = 862000000, .frequency_stepsize = 166667, .caps = FE_CAN_INVERSION_AUTO | FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO | FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO | FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_GUARD_INTERVAL_AUTO | FE_CAN_HIERARCHY_AUTO | FE_CAN_RECOVER | FE_CAN_MUTE_TS }, .release = cinergyt2_fe_release, .init = cinergyt2_fe_init, .sleep = cinergyt2_fe_sleep, .set_frontend = cinergyt2_fe_set_frontend, .get_frontend = cinergyt2_fe_get_frontend, .get_tune_settings = cinergyt2_fe_get_tune_settings, .read_status = cinergyt2_fe_read_status, .read_ber = cinergyt2_fe_read_ber, .read_signal_strength = cinergyt2_fe_read_signal_strength, .read_snr = cinergyt2_fe_read_snr, .read_ucblocks = cinergyt2_fe_read_unc_blocks, };
gpl-2.0
awifi-dev/android_kernel_lge_palman
drivers/scsi/sun3_NCR5380.c
5280
93465
/* sun3_NCR5380.c -- adapted from atari_NCR5380.c for the sun3 by Sam Creasey. */ /* * NCR 5380 generic driver routines. These should make it *trivial* * to implement 5380 SCSI drivers under Linux with a non-trantor * architecture. * * Note that these routines also work with NR53c400 family chips. * * Copyright 1993, Drew Eckhardt * Visionary Computing * (Unix and Linux consulting and custom programming) * drew@colorado.edu * +1 (303) 666-5836 * * DISTRIBUTION RELEASE 6. * * For more information, please consult * * NCR 5380 Family * SCSI Protocol Controller * Databook * * NCR Microelectronics * 1635 Aeroplaza Drive * Colorado Springs, CO 80916 * 1+ (719) 578-3400 * 1+ (800) 334-5454 */ /* * ++roman: To port the 5380 driver to the Atari, I had to do some changes in * this file, too: * * - Some of the debug statements were incorrect (undefined variables and the * like). I fixed that. * * - In information_transfer(), I think a #ifdef was wrong. Looking at the * possible DMA transfer size should also happen for REAL_DMA. I added this * in the #if statement. * * - When using real DMA, information_transfer() should return in a DATAOUT * phase after starting the DMA. It has nothing more to do. * * - The interrupt service routine should run main after end of DMA, too (not * only after RESELECTION interrupts). Additionally, it should _not_ test * for more interrupts after running main, since a DMA process may have * been started and interrupts are turned on now. The new int could happen * inside the execution of NCR5380_intr(), leading to recursive * calls. * * - I've deleted all the stuff for AUTOPROBE_IRQ, REAL_DMA_POLL, PSEUDO_DMA * and USLEEP, because these were messing up readability and will never be * needed for Atari SCSI. * * - I've revised the NCR5380_main() calling scheme (relax the 'main_running' * stuff), and 'main' is executed in a bottom half if awoken by an * interrupt. * * - The code was quite cluttered up by "#if (NDEBUG & NDEBUG_*) printk..." * constructs. In my eyes, this made the source rather unreadable, so I * finally replaced that by the *_PRINTK() macros. * */ #include <scsi/scsi_dbg.h> #include <scsi/scsi_transport_spi.h> /* * Further development / testing that should be done : * 1. Test linked command handling code after Eric is ready with * the high level code. */ #if (NDEBUG & NDEBUG_LISTS) #define LIST(x,y) \ { printk("LINE:%d Adding %p to %p\n", __LINE__, (void*)(x), (void*)(y)); \ if ((x)==(y)) udelay(5); } #define REMOVE(w,x,y,z) \ { printk("LINE:%d Removing: %p->%p %p->%p \n", __LINE__, \ (void*)(w), (void*)(x), (void*)(y), (void*)(z)); \ if ((x)==(y)) udelay(5); } #else #define LIST(x,y) #define REMOVE(w,x,y,z) #endif #ifndef notyet #undef LINKED #endif /* * Design * Issues : * * The other Linux SCSI drivers were written when Linux was Intel PC-only, * and specifically for each board rather than each chip. This makes their * adaptation to platforms like the Mac (Some of which use NCR5380's) * more difficult than it has to be. * * Also, many of the SCSI drivers were written before the command queuing * routines were implemented, meaning their implementations of queued * commands were hacked on rather than designed in from the start. * * When I designed the Linux SCSI drivers I figured that * while having two different SCSI boards in a system might be useful * for debugging things, two of the same type wouldn't be used. * Well, I was wrong and a number of users have mailed me about running * multiple high-performance SCSI boards in a server. * * Finally, when I get questions from users, I have no idea what * revision of my driver they are running. * * This driver attempts to address these problems : * This is a generic 5380 driver. To use it on a different platform, * one simply writes appropriate system specific macros (ie, data * transfer - some PC's will use the I/O bus, 68K's must use * memory mapped) and drops this file in their 'C' wrapper. * * As far as command queueing, two queues are maintained for * each 5380 in the system - commands that haven't been issued yet, * and commands that are currently executing. This means that an * unlimited number of commands may be queued, letting * more commands propagate from the higher driver levels giving higher * throughput. Note that both I_T_L and I_T_L_Q nexuses are supported, * allowing multiple commands to propagate all the way to a SCSI-II device * while a command is already executing. * * To solve the multiple-boards-in-the-same-system problem, * there is a separate instance structure for each instance * of a 5380 in the system. So, multiple NCR5380 drivers will * be able to coexist with appropriate changes to the high level * SCSI code. * * A NCR5380_PUBLIC_REVISION macro is provided, with the release * number (updated for each public release) printed by the * NCR5380_print_options command, which should be called from the * wrapper detect function, so that I know what release of the driver * users are using. * * Issues specific to the NCR5380 : * * When used in a PIO or pseudo-dma mode, the NCR5380 is a braindead * piece of hardware that requires you to sit in a loop polling for * the REQ signal as long as you are connected. Some devices are * brain dead (ie, many TEXEL CD ROM drives) and won't disconnect * while doing long seek operations. * * The workaround for this is to keep track of devices that have * disconnected. If the device hasn't disconnected, for commands that * should disconnect, we do something like * * while (!REQ is asserted) { sleep for N usecs; poll for M usecs } * * Some tweaking of N and M needs to be done. An algorithm based * on "time to data" would give the best results as long as short time * to datas (ie, on the same track) were considered, however these * broken devices are the exception rather than the rule and I'd rather * spend my time optimizing for the normal case. * * Architecture : * * At the heart of the design is a coroutine, NCR5380_main, * which is started when not running by the interrupt handler, * timer, and queue command function. It attempts to establish * I_T_L or I_T_L_Q nexuses by removing the commands from the * issue queue and calling NCR5380_select() if a nexus * is not established. * * Once a nexus is established, the NCR5380_information_transfer() * phase goes through the various phases as instructed by the target. * if the target goes into MSG IN and sends a DISCONNECT message, * the command structure is placed into the per instance disconnected * queue, and NCR5380_main tries to find more work. If USLEEP * was defined, and the target is idle for too long, the system * will try to sleep. * * If a command has disconnected, eventually an interrupt will trigger, * calling NCR5380_intr() which will in turn call NCR5380_reselect * to reestablish a nexus. This will run main if necessary. * * On command termination, the done function will be called as * appropriate. * * SCSI pointers are maintained in the SCp field of SCSI command * structures, being initialized after the command is connected * in NCR5380_select, and set as appropriate in NCR5380_information_transfer. * Note that in violation of the standard, an implicit SAVE POINTERS operation * is done, since some BROKEN disks fail to issue an explicit SAVE POINTERS. */ /* * Using this file : * This file a skeleton Linux SCSI driver for the NCR 5380 series * of chips. To use it, you write an architecture specific functions * and macros and include this file in your driver. * * These macros control options : * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically * for commands that return with a CHECK CONDITION status. * * LINKED - if defined, linked commands are supported. * * REAL_DMA - if defined, REAL DMA is used during the data transfer phases. * * SUPPORT_TAGS - if defined, SCSI-2 tagged queuing is used where possible * * These macros MUST be defined : * * NCR5380_read(register) - read from the specified register * * NCR5380_write(register, value) - write to the specific register * * Either real DMA *or* pseudo DMA may be implemented * REAL functions : * NCR5380_REAL_DMA should be defined if real DMA is to be used. * Note that the DMA setup functions should return the number of bytes * that they were able to program the controller for. * * Also note that generic i386/PC versions of these macros are * available as NCR5380_i386_dma_write_setup, * NCR5380_i386_dma_read_setup, and NCR5380_i386_dma_residual. * * NCR5380_dma_write_setup(instance, src, count) - initialize * NCR5380_dma_read_setup(instance, dst, count) - initialize * NCR5380_dma_residual(instance); - residual count * * PSEUDO functions : * NCR5380_pwrite(instance, src, count) * NCR5380_pread(instance, dst, count); * * If nothing specific to this implementation needs doing (ie, with external * hardware), you must also define * * NCR5380_queue_command * NCR5380_reset * NCR5380_abort * NCR5380_proc_info * * to be the global entry points into the specific driver, ie * #define NCR5380_queue_command t128_queue_command. * * If this is not done, the routines will be defined as static functions * with the NCR5380* names and the user must provide a globally * accessible wrapper function. * * The generic driver is initialized by calling NCR5380_init(instance), * after setting the appropriate host specific fields and ID. If the * driver wishes to autoprobe for an IRQ line, the NCR5380_probe_irq(instance, * possible) function may be used. Before the specific driver initialization * code finishes, NCR5380_print_options should be called. */ static struct Scsi_Host *first_instance = NULL; static struct scsi_host_template *the_template = NULL; /* Macros ease life... :-) */ #define SETUP_HOSTDATA(in) \ struct NCR5380_hostdata *hostdata = \ (struct NCR5380_hostdata *)(in)->hostdata #define HOSTDATA(in) ((struct NCR5380_hostdata *)(in)->hostdata) #define NEXT(cmd) ((struct scsi_cmnd *)(cmd)->host_scribble) #define SET_NEXT(cmd, next) ((cmd)->host_scribble = (void *)(next)) #define NEXTADDR(cmd) ((struct scsi_cmnd **)&((cmd)->host_scribble)) #define HOSTNO instance->host_no #define H_NO(cmd) (cmd)->device->host->host_no #define SGADDR(buffer) (void *)(((unsigned long)sg_virt(((buffer))))) #ifdef SUPPORT_TAGS /* * Functions for handling tagged queuing * ===================================== * * ++roman (01/96): Now I've implemented SCSI-2 tagged queuing. Some notes: * * Using consecutive numbers for the tags is no good idea in my eyes. There * could be wrong re-usings if the counter (8 bit!) wraps and some early * command has been preempted for a long time. My solution: a bitfield for * remembering used tags. * * There's also the problem that each target has a certain queue size, but we * cannot know it in advance :-( We just see a QUEUE_FULL status being * returned. So, in this case, the driver internal queue size assumption is * reduced to the number of active tags if QUEUE_FULL is returned by the * target. The command is returned to the mid-level, but with status changed * to BUSY, since --as I've seen-- the mid-level can't handle QUEUE_FULL * correctly. * * We're also not allowed running tagged commands as long as an untagged * command is active. And REQUEST SENSE commands after a contingent allegiance * condition _must_ be untagged. To keep track whether an untagged command has * been issued, the host->busy array is still employed, as it is without * support for tagged queuing. * * One could suspect that there are possible race conditions between * is_lun_busy(), cmd_get_tag() and cmd_free_tag(). But I think this isn't the * case: is_lun_busy() and cmd_get_tag() are both called from NCR5380_main(), * which already guaranteed to be running at most once. It is also the only * place where tags/LUNs are allocated. So no other allocation can slip * between that pair, there could only happen a reselection, which can free a * tag, but that doesn't hurt. Only the sequence in cmd_free_tag() becomes * important: the tag bit must be cleared before 'nr_allocated' is decreased. */ /* -1 for TAG_NONE is not possible with unsigned char cmd->tag */ #undef TAG_NONE #define TAG_NONE 0xff /* For the m68k, the number of bits in 'allocated' must be a multiple of 32! */ #if (MAX_TAGS % 32) != 0 #error "MAX_TAGS must be a multiple of 32!" #endif typedef struct { char allocated[MAX_TAGS/8]; int nr_allocated; int queue_size; } TAG_ALLOC; static TAG_ALLOC TagAlloc[8][8]; /* 8 targets and 8 LUNs */ static void __init init_tags( void ) { int target, lun; TAG_ALLOC *ta; if (!setup_use_tagged_queuing) return; for( target = 0; target < 8; ++target ) { for( lun = 0; lun < 8; ++lun ) { ta = &TagAlloc[target][lun]; memset( &ta->allocated, 0, MAX_TAGS/8 ); ta->nr_allocated = 0; /* At the beginning, assume the maximum queue size we could * support (MAX_TAGS). This value will be decreased if the target * returns QUEUE_FULL status. */ ta->queue_size = MAX_TAGS; } } } /* Check if we can issue a command to this LUN: First see if the LUN is marked * busy by an untagged command. If the command should use tagged queuing, also * check that there is a free tag and the target's queue won't overflow. This * function should be called with interrupts disabled to avoid race * conditions. */ static int is_lun_busy(struct scsi_cmnd *cmd, int should_be_tagged) { SETUP_HOSTDATA(cmd->device->host); if (hostdata->busy[cmd->device->id] & (1 << cmd->device->lun)) return( 1 ); if (!should_be_tagged || !setup_use_tagged_queuing || !cmd->device->tagged_supported) return( 0 ); if (TagAlloc[cmd->device->id][cmd->device->lun].nr_allocated >= TagAlloc[cmd->device->id][cmd->device->lun].queue_size ) { TAG_PRINTK( "scsi%d: target %d lun %d: no free tags\n", H_NO(cmd), cmd->device->id, cmd->device->lun ); return( 1 ); } return( 0 ); } /* Allocate a tag for a command (there are no checks anymore, check_lun_busy() * must be called before!), or reserve the LUN in 'busy' if the command is * untagged. */ static void cmd_get_tag(struct scsi_cmnd *cmd, int should_be_tagged) { SETUP_HOSTDATA(cmd->device->host); /* If we or the target don't support tagged queuing, allocate the LUN for * an untagged command. */ if (!should_be_tagged || !setup_use_tagged_queuing || !cmd->device->tagged_supported) { cmd->tag = TAG_NONE; hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); TAG_PRINTK( "scsi%d: target %d lun %d now allocated by untagged " "command\n", H_NO(cmd), cmd->device->id, cmd->device->lun ); } else { TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; cmd->tag = find_first_zero_bit( &ta->allocated, MAX_TAGS ); set_bit( cmd->tag, &ta->allocated ); ta->nr_allocated++; TAG_PRINTK( "scsi%d: using tag %d for target %d lun %d " "(now %d tags in use)\n", H_NO(cmd), cmd->tag, cmd->device->id, cmd->device->lun, ta->nr_allocated ); } } /* Mark the tag of command 'cmd' as free, or in case of an untagged command, * unlock the LUN. */ static void cmd_free_tag(struct scsi_cmnd *cmd) { SETUP_HOSTDATA(cmd->device->host); if (cmd->tag == TAG_NONE) { hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); TAG_PRINTK( "scsi%d: target %d lun %d untagged cmd finished\n", H_NO(cmd), cmd->device->id, cmd->device->lun ); } else if (cmd->tag >= MAX_TAGS) { printk(KERN_NOTICE "scsi%d: trying to free bad tag %d!\n", H_NO(cmd), cmd->tag ); } else { TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; clear_bit( cmd->tag, &ta->allocated ); ta->nr_allocated--; TAG_PRINTK( "scsi%d: freed tag %d for target %d lun %d\n", H_NO(cmd), cmd->tag, cmd->device->id, cmd->device->lun ); } } static void free_all_tags( void ) { int target, lun; TAG_ALLOC *ta; if (!setup_use_tagged_queuing) return; for( target = 0; target < 8; ++target ) { for( lun = 0; lun < 8; ++lun ) { ta = &TagAlloc[target][lun]; memset( &ta->allocated, 0, MAX_TAGS/8 ); ta->nr_allocated = 0; } } } #endif /* SUPPORT_TAGS */ /* * Function : void initialize_SCp(struct scsi_cmnd *cmd) * * Purpose : initialize the saved data pointers for cmd to point to the * start of the buffer. * * Inputs : cmd - struct scsi_cmnd structure to have pointers reset. */ static __inline__ void initialize_SCp(struct scsi_cmnd *cmd) { /* * Initialize the Scsi Pointer field so that all of the commands in the * various queues are valid. */ if (scsi_bufflen(cmd)) { cmd->SCp.buffer = scsi_sglist(cmd); cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1; cmd->SCp.ptr = (char *) SGADDR(cmd->SCp.buffer); cmd->SCp.this_residual = cmd->SCp.buffer->length; } else { cmd->SCp.buffer = NULL; cmd->SCp.buffers_residual = 0; cmd->SCp.ptr = NULL; cmd->SCp.this_residual = 0; } } #include <linux/delay.h> #if 1 static struct { unsigned char mask; const char * name;} signals[] = {{ SR_DBP, "PARITY"}, { SR_RST, "RST" }, { SR_BSY, "BSY" }, { SR_REQ, "REQ" }, { SR_MSG, "MSG" }, { SR_CD, "CD" }, { SR_IO, "IO" }, { SR_SEL, "SEL" }, {0, NULL}}, basrs[] = {{BASR_ATN, "ATN"}, {BASR_ACK, "ACK"}, {0, NULL}}, icrs[] = {{ICR_ASSERT_RST, "ASSERT RST"},{ICR_ASSERT_ACK, "ASSERT ACK"}, {ICR_ASSERT_BSY, "ASSERT BSY"}, {ICR_ASSERT_SEL, "ASSERT SEL"}, {ICR_ASSERT_ATN, "ASSERT ATN"}, {ICR_ASSERT_DATA, "ASSERT DATA"}, {0, NULL}}, mrs[] = {{MR_BLOCK_DMA_MODE, "MODE BLOCK DMA"}, {MR_TARGET, "MODE TARGET"}, {MR_ENABLE_PAR_CHECK, "MODE PARITY CHECK"}, {MR_ENABLE_PAR_INTR, "MODE PARITY INTR"}, {MR_ENABLE_EOP_INTR,"MODE EOP INTR"}, {MR_MONITOR_BSY, "MODE MONITOR BSY"}, {MR_DMA_MODE, "MODE DMA"}, {MR_ARBITRATE, "MODE ARBITRATION"}, {0, NULL}}; /* * Function : void NCR5380_print(struct Scsi_Host *instance) * * Purpose : print the SCSI bus signals for debugging purposes * * Input : instance - which NCR5380 */ static void NCR5380_print(struct Scsi_Host *instance) { unsigned char status, data, basr, mr, icr, i; unsigned long flags; local_irq_save(flags); data = NCR5380_read(CURRENT_SCSI_DATA_REG); status = NCR5380_read(STATUS_REG); mr = NCR5380_read(MODE_REG); icr = NCR5380_read(INITIATOR_COMMAND_REG); basr = NCR5380_read(BUS_AND_STATUS_REG); local_irq_restore(flags); printk("STATUS_REG: %02x ", status); for (i = 0; signals[i].mask ; ++i) if (status & signals[i].mask) printk(",%s", signals[i].name); printk("\nBASR: %02x ", basr); for (i = 0; basrs[i].mask ; ++i) if (basr & basrs[i].mask) printk(",%s", basrs[i].name); printk("\nICR: %02x ", icr); for (i = 0; icrs[i].mask; ++i) if (icr & icrs[i].mask) printk(",%s", icrs[i].name); printk("\nMODE: %02x ", mr); for (i = 0; mrs[i].mask; ++i) if (mr & mrs[i].mask) printk(",%s", mrs[i].name); printk("\n"); } static struct { unsigned char value; const char *name; } phases[] = { {PHASE_DATAOUT, "DATAOUT"}, {PHASE_DATAIN, "DATAIN"}, {PHASE_CMDOUT, "CMDOUT"}, {PHASE_STATIN, "STATIN"}, {PHASE_MSGOUT, "MSGOUT"}, {PHASE_MSGIN, "MSGIN"}, {PHASE_UNKNOWN, "UNKNOWN"}}; /* * Function : void NCR5380_print_phase(struct Scsi_Host *instance) * * Purpose : print the current SCSI phase for debugging purposes * * Input : instance - which NCR5380 */ static void NCR5380_print_phase(struct Scsi_Host *instance) { unsigned char status; int i; status = NCR5380_read(STATUS_REG); if (!(status & SR_REQ)) printk(KERN_DEBUG "scsi%d: REQ not asserted, phase unknown.\n", HOSTNO); else { for (i = 0; (phases[i].value != PHASE_UNKNOWN) && (phases[i].value != (status & PHASE_MASK)); ++i); printk(KERN_DEBUG "scsi%d: phase %s\n", HOSTNO, phases[i].name); } } #else /* !NDEBUG */ /* dummies... */ __inline__ void NCR5380_print(struct Scsi_Host *instance) { }; __inline__ void NCR5380_print_phase(struct Scsi_Host *instance) { }; #endif /* * ++roman: New scheme of calling NCR5380_main() * * If we're not in an interrupt, we can call our main directly, it cannot be * already running. Else, we queue it on a task queue, if not 'main_running' * tells us that a lower level is already executing it. This way, * 'main_running' needs not be protected in a special way. * * queue_main() is a utility function for putting our main onto the task * queue, if main_running is false. It should be called only from a * interrupt or bottom half. */ #include <linux/gfp.h> #include <linux/workqueue.h> #include <linux/interrupt.h> static volatile int main_running = 0; static DECLARE_WORK(NCR5380_tqueue, NCR5380_main); static __inline__ void queue_main(void) { if (!main_running) { /* If in interrupt and NCR5380_main() not already running, queue it on the 'immediate' task queue, to be processed immediately after the current interrupt processing has finished. */ schedule_work(&NCR5380_tqueue); } /* else: nothing to do: the running NCR5380_main() will pick up any newly queued command. */ } static inline void NCR5380_all_init (void) { static int done = 0; if (!done) { INI_PRINTK("scsi : NCR5380_all_init()\n"); done = 1; } } /* * Function : void NCR58380_print_options (struct Scsi_Host *instance) * * Purpose : called by probe code indicating the NCR5380 driver * options that were selected. * * Inputs : instance, pointer to this instance. Unused. */ static void __init NCR5380_print_options (struct Scsi_Host *instance) { printk(" generic options" #ifdef AUTOSENSE " AUTOSENSE" #endif #ifdef REAL_DMA " REAL DMA" #endif #ifdef PARITY " PARITY" #endif #ifdef SUPPORT_TAGS " SCSI-2 TAGGED QUEUING" #endif ); printk(" generic release=%d", NCR5380_PUBLIC_RELEASE); } /* * Function : void NCR5380_print_status (struct Scsi_Host *instance) * * Purpose : print commands in the various queues, called from * NCR5380_abort and NCR5380_debug to aid debugging. * * Inputs : instance, pointer to this instance. */ static void NCR5380_print_status (struct Scsi_Host *instance) { char *pr_bfr; char *start; int len; NCR_PRINT(NDEBUG_ANY); NCR_PRINT_PHASE(NDEBUG_ANY); pr_bfr = (char *) __get_free_page(GFP_ATOMIC); if (!pr_bfr) { printk("NCR5380_print_status: no memory for print buffer\n"); return; } len = NCR5380_proc_info(instance, pr_bfr, &start, 0, PAGE_SIZE, 0); pr_bfr[len] = 0; printk("\n%s\n", pr_bfr); free_page((unsigned long) pr_bfr); } /******************************************/ /* * /proc/scsi/[dtc pas16 t128 generic]/[0-ASC_NUM_BOARD_SUPPORTED] * * *buffer: I/O buffer * **start: if inout == FALSE pointer into buffer where user read should start * offset: current offset * length: length of buffer * hostno: Scsi_Host host_no * inout: TRUE - user is writing; FALSE - user is reading * * Return the number of bytes read from or written */ #undef SPRINTF #define SPRINTF(fmt,args...) \ do { if (pos + strlen(fmt) + 20 /* slop */ < buffer + length) \ pos += sprintf(pos, fmt , ## args); } while(0) static char *lprint_Scsi_Cmnd(struct scsi_cmnd *cmd, char *pos, char *buffer, int length); static int NCR5380_proc_info(struct Scsi_Host *instance, char *buffer, char **start, off_t offset, int length, int inout) { char *pos = buffer; struct NCR5380_hostdata *hostdata; struct scsi_cmnd *ptr; unsigned long flags; off_t begin = 0; #define check_offset() \ do { \ if (pos - buffer < offset - begin) { \ begin += pos - buffer; \ pos = buffer; \ } \ } while (0) hostdata = (struct NCR5380_hostdata *)instance->hostdata; if (inout) { /* Has data been written to the file ? */ return(-ENOSYS); /* Currently this is a no-op */ } SPRINTF("NCR5380 core release=%d.\n", NCR5380_PUBLIC_RELEASE); check_offset(); local_irq_save(flags); SPRINTF("NCR5380: coroutine is%s running.\n", main_running ? "" : "n't"); check_offset(); if (!hostdata->connected) SPRINTF("scsi%d: no currently connected command\n", HOSTNO); else pos = lprint_Scsi_Cmnd ((struct scsi_cmnd *) hostdata->connected, pos, buffer, length); SPRINTF("scsi%d: issue_queue\n", HOSTNO); check_offset(); for (ptr = (struct scsi_cmnd *) hostdata->issue_queue; ptr; ptr = NEXT(ptr)) { pos = lprint_Scsi_Cmnd (ptr, pos, buffer, length); check_offset(); } SPRINTF("scsi%d: disconnected_queue\n", HOSTNO); check_offset(); for (ptr = (struct scsi_cmnd *) hostdata->disconnected_queue; ptr; ptr = NEXT(ptr)) { pos = lprint_Scsi_Cmnd (ptr, pos, buffer, length); check_offset(); } local_irq_restore(flags); *start = buffer + (offset - begin); if (pos - buffer < offset - begin) return 0; else if (pos - buffer - (offset - begin) < length) return pos - buffer - (offset - begin); return length; } static char *lprint_Scsi_Cmnd(struct scsi_cmnd *cmd, char *pos, char *buffer, int length) { int i, s; unsigned char *command; SPRINTF("scsi%d: destination target %d, lun %d\n", H_NO(cmd), cmd->device->id, cmd->device->lun); SPRINTF(" command = "); command = cmd->cmnd; SPRINTF("%2d (0x%02x)", command[0], command[0]); for (i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i) SPRINTF(" %02x", command[i]); SPRINTF("\n"); return pos; } /* * Function : void NCR5380_init (struct Scsi_Host *instance) * * Purpose : initializes *instance and corresponding 5380 chip. * * Inputs : instance - instantiation of the 5380 driver. * * Notes : I assume that the host, hostno, and id bits have been * set correctly. I don't care about the irq and other fields. * */ static int __init NCR5380_init(struct Scsi_Host *instance, int flags) { int i; SETUP_HOSTDATA(instance); NCR5380_all_init(); hostdata->aborted = 0; hostdata->id_mask = 1 << instance->this_id; hostdata->id_higher_mask = 0; for (i = hostdata->id_mask; i <= 0x80; i <<= 1) if (i > hostdata->id_mask) hostdata->id_higher_mask |= i; for (i = 0; i < 8; ++i) hostdata->busy[i] = 0; #ifdef SUPPORT_TAGS init_tags(); #endif #if defined (REAL_DMA) hostdata->dma_len = 0; #endif hostdata->targets_present = 0; hostdata->connected = NULL; hostdata->issue_queue = NULL; hostdata->disconnected_queue = NULL; hostdata->flags = FLAG_CHECK_LAST_BYTE_SENT; if (!the_template) { the_template = instance->hostt; first_instance = instance; } #ifndef AUTOSENSE if ((instance->cmd_per_lun > 1) || (instance->can_queue > 1)) printk("scsi%d: WARNING : support for multiple outstanding commands enabled\n" " without AUTOSENSE option, contingent allegiance conditions may\n" " be incorrectly cleared.\n", HOSTNO); #endif /* def AUTOSENSE */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); NCR5380_write(MODE_REG, MR_BASE); NCR5380_write(TARGET_COMMAND_REG, 0); NCR5380_write(SELECT_ENABLE_REG, 0); return 0; } static void NCR5380_exit(struct Scsi_Host *instance) { /* Empty, as we didn't schedule any delayed work */ } /* * Function : int NCR5380_queue_command (struct scsi_cmnd *cmd, * void (*done)(struct scsi_cmnd *)) * * Purpose : enqueues a SCSI command * * Inputs : cmd - SCSI command, done - function called on completion, with * a pointer to the command descriptor. * * Returns : 0 * * Side effects : * cmd is added to the per instance issue_queue, with minor * twiddling done to the host specific fields of cmd. If the * main coroutine is not running, it is restarted. * */ /* Only make static if a wrapper function is used */ static int NCR5380_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) { SETUP_HOSTDATA(cmd->device->host); struct scsi_cmnd *tmp; unsigned long flags; #if (NDEBUG & NDEBUG_NO_WRITE) switch (cmd->cmnd[0]) { case WRITE_6: case WRITE_10: printk(KERN_NOTICE "scsi%d: WRITE attempted with NO_WRITE debugging flag set\n", H_NO(cmd)); cmd->result = (DID_ERROR << 16); done(cmd); return 0; } #endif /* (NDEBUG & NDEBUG_NO_WRITE) */ #ifdef NCR5380_STATS # if 0 if (!hostdata->connected && !hostdata->issue_queue && !hostdata->disconnected_queue) { hostdata->timebase = jiffies; } # endif # ifdef NCR5380_STAT_LIMIT if (scsi_bufflen(cmd) > NCR5380_STAT_LIMIT) # endif switch (cmd->cmnd[0]) { case WRITE: case WRITE_6: case WRITE_10: hostdata->time_write[cmd->device->id] -= (jiffies - hostdata->timebase); hostdata->bytes_write[cmd->device->id] += scsi_bufflen(cmd); hostdata->pendingw++; break; case READ: case READ_6: case READ_10: hostdata->time_read[cmd->device->id] -= (jiffies - hostdata->timebase); hostdata->bytes_read[cmd->device->id] += scsi_bufflen(cmd); hostdata->pendingr++; break; } #endif /* * We use the host_scribble field as a pointer to the next command * in a queue */ SET_NEXT(cmd, NULL); cmd->scsi_done = done; cmd->result = 0; /* * Insert the cmd into the issue queue. Note that REQUEST SENSE * commands are added to the head of the queue since any command will * clear the contingent allegiance condition that exists and the * sense data is only guaranteed to be valid while the condition exists. */ local_irq_save(flags); /* ++guenther: now that the issue queue is being set up, we can lock ST-DMA. * Otherwise a running NCR5380_main may steal the lock. * Lock before actually inserting due to fairness reasons explained in * atari_scsi.c. If we insert first, then it's impossible for this driver * to release the lock. * Stop timer for this command while waiting for the lock, or timeouts * may happen (and they really do), and it's no good if the command doesn't * appear in any of the queues. * ++roman: Just disabling the NCR interrupt isn't sufficient here, * because also a timer int can trigger an abort or reset, which would * alter queues and touch the lock. */ if (!(hostdata->issue_queue) || (cmd->cmnd[0] == REQUEST_SENSE)) { LIST(cmd, hostdata->issue_queue); SET_NEXT(cmd, hostdata->issue_queue); hostdata->issue_queue = cmd; } else { for (tmp = (struct scsi_cmnd *)hostdata->issue_queue; NEXT(tmp); tmp = NEXT(tmp)) ; LIST(cmd, tmp); SET_NEXT(tmp, cmd); } local_irq_restore(flags); QU_PRINTK("scsi%d: command added to %s of queue\n", H_NO(cmd), (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail"); /* If queue_command() is called from an interrupt (real one or bottom * half), we let queue_main() do the job of taking care about main. If it * is already running, this is a no-op, else main will be queued. * * If we're not in an interrupt, we can call NCR5380_main() * unconditionally, because it cannot be already running. */ if (in_interrupt() || ((flags >> 8) & 7) >= 6) queue_main(); else NCR5380_main(NULL); return 0; } static DEF_SCSI_QCMD(NCR5380_queue_command) /* * Function : NCR5380_main (void) * * Purpose : NCR5380_main is a coroutine that runs as long as more work can * be done on the NCR5380 host adapters in a system. Both * NCR5380_queue_command() and NCR5380_intr() will try to start it * in case it is not running. * * NOTE : NCR5380_main exits with interrupts *disabled*, the caller should * reenable them. This prevents reentrancy and kernel stack overflow. */ static void NCR5380_main (struct work_struct *bl) { struct scsi_cmnd *tmp, *prev; struct Scsi_Host *instance = first_instance; struct NCR5380_hostdata *hostdata = HOSTDATA(instance); int done; unsigned long flags; /* * We run (with interrupts disabled) until we're sure that none of * the host adapters have anything that can be done, at which point * we set main_running to 0 and exit. * * Interrupts are enabled before doing various other internal * instructions, after we've decided that we need to run through * the loop again. * * this should prevent any race conditions. * * ++roman: Just disabling the NCR interrupt isn't sufficient here, * because also a timer int can trigger an abort or reset, which can * alter queues and touch the Falcon lock. */ /* Tell int handlers main() is now already executing. Note that no races are possible here. If an int comes in before 'main_running' is set here, and queues/executes main via the task queue, it doesn't do any harm, just this instance of main won't find any work left to do. */ if (main_running) return; main_running = 1; local_save_flags(flags); do { local_irq_disable(); /* Freeze request queues */ done = 1; if (!hostdata->connected) { MAIN_PRINTK( "scsi%d: not connected\n", HOSTNO ); /* * Search through the issue_queue for a command destined * for a target that's not busy. */ #if (NDEBUG & NDEBUG_LISTS) for (tmp = (struct scsi_cmnd *) hostdata->issue_queue, prev = NULL; tmp && (tmp != prev); prev = tmp, tmp = NEXT(tmp)) ; if ((tmp == prev) && tmp) printk(" LOOP\n");/* else printk("\n");*/ #endif for (tmp = (struct scsi_cmnd *) hostdata->issue_queue, prev = NULL; tmp; prev = tmp, tmp = NEXT(tmp) ) { #if (NDEBUG & NDEBUG_LISTS) if (prev != tmp) printk("MAIN tmp=%p target=%d busy=%d lun=%d\n", tmp, tmp->target, hostdata->busy[tmp->target], tmp->lun); #endif /* When we find one, remove it from the issue queue. */ /* ++guenther: possible race with Falcon locking */ if ( #ifdef SUPPORT_TAGS !is_lun_busy( tmp, tmp->cmnd[0] != REQUEST_SENSE) #else !(hostdata->busy[tmp->device->id] & (1 << tmp->device->lun)) #endif ) { /* ++guenther: just to be sure, this must be atomic */ local_irq_disable(); if (prev) { REMOVE(prev, NEXT(prev), tmp, NEXT(tmp)); SET_NEXT(prev, NEXT(tmp)); } else { REMOVE(-1, hostdata->issue_queue, tmp, NEXT(tmp)); hostdata->issue_queue = NEXT(tmp); } SET_NEXT(tmp, NULL); /* reenable interrupts after finding one */ local_irq_restore(flags); /* * Attempt to establish an I_T_L nexus here. * On success, instance->hostdata->connected is set. * On failure, we must add the command back to the * issue queue so we can keep trying. */ MAIN_PRINTK("scsi%d: main(): command for target %d " "lun %d removed from issue_queue\n", HOSTNO, tmp->target, tmp->lun); /* * REQUEST SENSE commands are issued without tagged * queueing, even on SCSI-II devices because the * contingent allegiance condition exists for the * entire unit. */ /* ++roman: ...and the standard also requires that * REQUEST SENSE command are untagged. */ #ifdef SUPPORT_TAGS cmd_get_tag( tmp, tmp->cmnd[0] != REQUEST_SENSE ); #endif if (!NCR5380_select(instance, tmp, (tmp->cmnd[0] == REQUEST_SENSE) ? TAG_NONE : TAG_NEXT)) { break; } else { local_irq_disable(); LIST(tmp, hostdata->issue_queue); SET_NEXT(tmp, hostdata->issue_queue); hostdata->issue_queue = tmp; #ifdef SUPPORT_TAGS cmd_free_tag( tmp ); #endif local_irq_restore(flags); MAIN_PRINTK("scsi%d: main(): select() failed, " "returned to issue_queue\n", HOSTNO); if (hostdata->connected) break; } } /* if target/lun/target queue is not busy */ } /* for issue_queue */ } /* if (!hostdata->connected) */ if (hostdata->connected #ifdef REAL_DMA && !hostdata->dma_len #endif ) { local_irq_restore(flags); MAIN_PRINTK("scsi%d: main: performing information transfer\n", HOSTNO); NCR5380_information_transfer(instance); MAIN_PRINTK("scsi%d: main: done set false\n", HOSTNO); done = 0; } } while (!done); /* Better allow ints _after_ 'main_running' has been cleared, else an interrupt could believe we'll pick up the work it left for us, but we won't see it anymore here... */ main_running = 0; local_irq_restore(flags); } #ifdef REAL_DMA /* * Function : void NCR5380_dma_complete (struct Scsi_Host *instance) * * Purpose : Called by interrupt handler when DMA finishes or a phase * mismatch occurs (which would finish the DMA transfer). * * Inputs : instance - this instance of the NCR5380. * */ static void NCR5380_dma_complete( struct Scsi_Host *instance ) { SETUP_HOSTDATA(instance); int transfered; unsigned char **data; volatile int *count; if (!hostdata->connected) { printk(KERN_WARNING "scsi%d: received end of DMA interrupt with " "no connected cmd\n", HOSTNO); return; } DMA_PRINTK("scsi%d: real DMA transfer complete, basr 0x%X, sr 0x%X\n", HOSTNO, NCR5380_read(BUS_AND_STATUS_REG), NCR5380_read(STATUS_REG)); if((sun3scsi_dma_finish(rq_data_dir(hostdata->connected->request)))) { printk("scsi%d: overrun in UDC counter -- not prepared to deal with this!\n", HOSTNO); printk("please e-mail sammy@sammy.net with a description of how this\n"); printk("error was produced.\n"); BUG(); } /* make sure we're not stuck in a data phase */ if((NCR5380_read(BUS_AND_STATUS_REG) & (BASR_PHASE_MATCH | BASR_ACK)) == (BASR_PHASE_MATCH | BASR_ACK)) { printk("scsi%d: BASR %02x\n", HOSTNO, NCR5380_read(BUS_AND_STATUS_REG)); printk("scsi%d: bus stuck in data phase -- probably a single byte " "overrun!\n", HOSTNO); printk("not prepared for this error!\n"); printk("please e-mail sammy@sammy.net with a description of how this\n"); printk("error was produced.\n"); BUG(); } (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); NCR5380_write(MODE_REG, MR_BASE); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); transfered = hostdata->dma_len - NCR5380_dma_residual(instance); hostdata->dma_len = 0; data = (unsigned char **) &(hostdata->connected->SCp.ptr); count = &(hostdata->connected->SCp.this_residual); *data += transfered; *count -= transfered; } #endif /* REAL_DMA */ /* * Function : void NCR5380_intr (int irq) * * Purpose : handle interrupts, reestablishing I_T_L or I_T_L_Q nexuses * from the disconnected queue, and restarting NCR5380_main() * as required. * * Inputs : int irq, irq that caused this interrupt. * */ static irqreturn_t NCR5380_intr (int irq, void *dev_id) { struct Scsi_Host *instance = first_instance; int done = 1, handled = 0; unsigned char basr; INT_PRINTK("scsi%d: NCR5380 irq triggered\n", HOSTNO); /* Look for pending interrupts */ basr = NCR5380_read(BUS_AND_STATUS_REG); INT_PRINTK("scsi%d: BASR=%02x\n", HOSTNO, basr); /* dispatch to appropriate routine if found and done=0 */ if (basr & BASR_IRQ) { NCR_PRINT(NDEBUG_INTR); if ((NCR5380_read(STATUS_REG) & (SR_SEL|SR_IO)) == (SR_SEL|SR_IO)) { done = 0; // ENABLE_IRQ(); INT_PRINTK("scsi%d: SEL interrupt\n", HOSTNO); NCR5380_reselect(instance); (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); } else if (basr & BASR_PARITY_ERROR) { INT_PRINTK("scsi%d: PARITY interrupt\n", HOSTNO); (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); } else if ((NCR5380_read(STATUS_REG) & SR_RST) == SR_RST) { INT_PRINTK("scsi%d: RESET interrupt\n", HOSTNO); (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); } else { /* * The rest of the interrupt conditions can occur only during a * DMA transfer */ #if defined(REAL_DMA) /* * We should only get PHASE MISMATCH and EOP interrupts if we have * DMA enabled, so do a sanity check based on the current setting * of the MODE register. */ if ((NCR5380_read(MODE_REG) & MR_DMA_MODE) && ((basr & BASR_END_DMA_TRANSFER) || !(basr & BASR_PHASE_MATCH))) { INT_PRINTK("scsi%d: PHASE MISM or EOP interrupt\n", HOSTNO); NCR5380_dma_complete( instance ); done = 0; // ENABLE_IRQ(); } else #endif /* REAL_DMA */ { /* MS: Ignore unknown phase mismatch interrupts (caused by EOP interrupt) */ if (basr & BASR_PHASE_MATCH) INT_PRINTK("scsi%d: unknown interrupt, " "BASR 0x%x, MR 0x%x, SR 0x%x\n", HOSTNO, basr, NCR5380_read(MODE_REG), NCR5380_read(STATUS_REG)); (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); #ifdef SUN3_SCSI_VME dregs->csr |= CSR_DMA_ENABLE; #endif } } /* if !(SELECTION || PARITY) */ handled = 1; } /* BASR & IRQ */ else { printk(KERN_NOTICE "scsi%d: interrupt without IRQ bit set in BASR, " "BASR 0x%X, MR 0x%X, SR 0x%x\n", HOSTNO, basr, NCR5380_read(MODE_REG), NCR5380_read(STATUS_REG)); (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); #ifdef SUN3_SCSI_VME dregs->csr |= CSR_DMA_ENABLE; #endif } if (!done) { INT_PRINTK("scsi%d: in int routine, calling main\n", HOSTNO); /* Put a call to NCR5380_main() on the queue... */ queue_main(); } return IRQ_RETVAL(handled); } #ifdef NCR5380_STATS static void collect_stats(struct NCR5380_hostdata *hostdata, struct scsi_cmnd *cmd) { # ifdef NCR5380_STAT_LIMIT if (scsi_bufflen(cmd) > NCR5380_STAT_LIMIT) # endif switch (cmd->cmnd[0]) { case WRITE: case WRITE_6: case WRITE_10: hostdata->time_write[cmd->device->id] += (jiffies - hostdata->timebase); /*hostdata->bytes_write[cmd->device->id] += scsi_bufflen(cmd);*/ hostdata->pendingw--; break; case READ: case READ_6: case READ_10: hostdata->time_read[cmd->device->id] += (jiffies - hostdata->timebase); /*hostdata->bytes_read[cmd->device->id] += scsi_bufflen(cmd);*/ hostdata->pendingr--; break; } } #endif /* * Function : int NCR5380_select(struct Scsi_Host *instance, * struct scsi_cmnd *cmd, int tag); * * Purpose : establishes I_T_L or I_T_L_Q nexus for new or existing command, * including ARBITRATION, SELECTION, and initial message out for * IDENTIFY and queue messages. * * Inputs : instance - instantiation of the 5380 driver on which this * target lives, cmd - SCSI command to execute, tag - set to TAG_NEXT for * new tag, TAG_NONE for untagged queueing, otherwise set to the tag for * the command that is presently connected. * * Returns : -1 if selection could not execute for some reason, * 0 if selection succeeded or failed because the target * did not respond. * * Side effects : * If bus busy, arbitration failed, etc, NCR5380_select() will exit * with registers as they should have been on entry - ie * SELECT_ENABLE will be set appropriately, the NCR5380 * will cease to drive any SCSI bus signals. * * If successful : I_T_L or I_T_L_Q nexus will be established, * instance->connected will be set to cmd. * SELECT interrupt will be disabled. * * If failed (no target) : cmd->scsi_done() will be called, and the * cmd->result host byte set to DID_BAD_TARGET. */ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd, int tag) { SETUP_HOSTDATA(instance); unsigned char tmp[3], phase; unsigned char *data; int len; unsigned long timeout; unsigned long flags; hostdata->restart_select = 0; NCR_PRINT(NDEBUG_ARBITRATION); ARB_PRINTK("scsi%d: starting arbitration, id = %d\n", HOSTNO, instance->this_id); /* * Set the phase bits to 0, otherwise the NCR5380 won't drive the * data bus during SELECTION. */ local_irq_save(flags); if (hostdata->connected) { local_irq_restore(flags); return -1; } NCR5380_write(TARGET_COMMAND_REG, 0); /* * Start arbitration. */ NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask); NCR5380_write(MODE_REG, MR_ARBITRATE); local_irq_restore(flags); /* Wait for arbitration logic to complete */ #ifdef NCR_TIMEOUT { unsigned long timeout = jiffies + 2*NCR_TIMEOUT; while (!(NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_PROGRESS) && time_before(jiffies, timeout) && !hostdata->connected) ; if (time_after_eq(jiffies, timeout)) { printk("scsi : arbitration timeout at %d\n", __LINE__); NCR5380_write(MODE_REG, MR_BASE); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); return -1; } } #else /* NCR_TIMEOUT */ while (!(NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_PROGRESS) && !hostdata->connected); #endif ARB_PRINTK("scsi%d: arbitration complete\n", HOSTNO); if (hostdata->connected) { NCR5380_write(MODE_REG, MR_BASE); return -1; } /* * The arbitration delay is 2.2us, but this is a minimum and there is * no maximum so we can safely sleep for ceil(2.2) usecs to accommodate * the integral nature of udelay(). * */ udelay(3); /* Check for lost arbitration */ if ((NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || (NCR5380_read(CURRENT_SCSI_DATA_REG) & hostdata->id_higher_mask) || (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || hostdata->connected) { NCR5380_write(MODE_REG, MR_BASE); ARB_PRINTK("scsi%d: lost arbitration, deasserting MR_ARBITRATE\n", HOSTNO); return -1; } /* after/during arbitration, BSY should be asserted. IBM DPES-31080 Version S31Q works now */ /* Tnx to Thomas_Roesch@m2.maus.de for finding this! (Roman) */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_SEL | ICR_ASSERT_BSY ) ; if ((NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || hostdata->connected) { NCR5380_write(MODE_REG, MR_BASE); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); ARB_PRINTK("scsi%d: lost arbitration, deasserting ICR_ASSERT_SEL\n", HOSTNO); return -1; } /* * Again, bus clear + bus settle time is 1.2us, however, this is * a minimum so we'll udelay ceil(1.2) */ #ifdef CONFIG_ATARI_SCSI_TOSHIBA_DELAY /* ++roman: But some targets (see above :-) seem to need a bit more... */ udelay(15); #else udelay(2); #endif if (hostdata->connected) { NCR5380_write(MODE_REG, MR_BASE); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); return -1; } ARB_PRINTK("scsi%d: won arbitration\n", HOSTNO); /* * Now that we have won arbitration, start Selection process, asserting * the host and target ID's on the SCSI bus. */ NCR5380_write(OUTPUT_DATA_REG, (hostdata->id_mask | (1 << cmd->device->id))); /* * Raise ATN while SEL is true before BSY goes false from arbitration, * since this is the only way to guarantee that we'll get a MESSAGE OUT * phase immediately after selection. */ NCR5380_write(INITIATOR_COMMAND_REG, (ICR_BASE | ICR_ASSERT_BSY | ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_SEL )); NCR5380_write(MODE_REG, MR_BASE); /* * Reselect interrupts must be turned off prior to the dropping of BSY, * otherwise we will trigger an interrupt. */ if (hostdata->connected) { NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); return -1; } NCR5380_write(SELECT_ENABLE_REG, 0); /* * The initiator shall then wait at least two deskew delays and release * the BSY signal. */ udelay(1); /* wingel -- wait two bus deskew delay >2*45ns */ /* Reset BSY */ NCR5380_write(INITIATOR_COMMAND_REG, (ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_SEL)); /* * Something weird happens when we cease to drive BSY - looks * like the board/chip is letting us do another read before the * appropriate propagation delay has expired, and we're confusing * a BSY signal from ourselves as the target's response to SELECTION. * * A small delay (the 'C++' frontend breaks the pipeline with an * unnecessary jump, making it work on my 386-33/Trantor T128, the * tighter 'C' code breaks and requires this) solves the problem - * the 1 us delay is arbitrary, and only used because this delay will * be the same on other platforms and since it works here, it should * work there. * * wingel suggests that this could be due to failing to wait * one deskew delay. */ udelay(1); SEL_PRINTK("scsi%d: selecting target %d\n", HOSTNO, cmd->device->id); /* * The SCSI specification calls for a 250 ms timeout for the actual * selection. */ timeout = jiffies + 25; /* * XXX very interesting - we're seeing a bounce where the BSY we * asserted is being reflected / still asserted (propagation delay?) * and it's detecting as true. Sigh. */ #if 0 /* ++roman: If a target conformed to the SCSI standard, it wouldn't assert * IO while SEL is true. But again, there are some disks out the in the * world that do that nevertheless. (Somebody claimed that this announces * reselection capability of the target.) So we better skip that test and * only wait for BSY... (Famous german words: Der Klügere gibt nach :-) */ while (time_before(jiffies, timeout) && !(NCR5380_read(STATUS_REG) & (SR_BSY | SR_IO))); if ((NCR5380_read(STATUS_REG) & (SR_SEL | SR_IO)) == (SR_SEL | SR_IO)) { NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); NCR5380_reselect(instance); printk (KERN_ERR "scsi%d: reselection after won arbitration?\n", HOSTNO); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); return -1; } #else while (time_before(jiffies, timeout) && !(NCR5380_read(STATUS_REG) & SR_BSY)); #endif /* * No less than two deskew delays after the initiator detects the * BSY signal is true, it shall release the SEL signal and may * change the DATA BUS. -wingel */ udelay(1); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); if (!(NCR5380_read(STATUS_REG) & SR_BSY)) { NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); if (hostdata->targets_present & (1 << cmd->device->id)) { printk(KERN_ERR "scsi%d: weirdness\n", HOSTNO); if (hostdata->restart_select) printk(KERN_NOTICE "\trestart select\n"); NCR_PRINT(NDEBUG_ANY); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); return -1; } cmd->result = DID_BAD_TARGET << 16; #ifdef NCR5380_STATS collect_stats(hostdata, cmd); #endif #ifdef SUPPORT_TAGS cmd_free_tag( cmd ); #endif cmd->scsi_done(cmd); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); SEL_PRINTK("scsi%d: target did not respond within 250ms\n", HOSTNO); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); return 0; } hostdata->targets_present |= (1 << cmd->device->id); /* * Since we followed the SCSI spec, and raised ATN while SEL * was true but before BSY was false during selection, the information * transfer phase should be a MESSAGE OUT phase so that we can send the * IDENTIFY message. * * If SCSI-II tagged queuing is enabled, we also send a SIMPLE_QUEUE_TAG * message (2 bytes) with a tag ID that we increment with every command * until it wraps back to 0. * * XXX - it turns out that there are some broken SCSI-II devices, * which claim to support tagged queuing but fail when more than * some number of commands are issued at once. */ /* Wait for start of REQ/ACK handshake */ while (!(NCR5380_read(STATUS_REG) & SR_REQ)); SEL_PRINTK("scsi%d: target %d selected, going into MESSAGE OUT phase.\n", HOSTNO, cmd->device->id); tmp[0] = IDENTIFY(1, cmd->device->lun); #ifdef SUPPORT_TAGS if (cmd->tag != TAG_NONE) { tmp[1] = hostdata->last_message = SIMPLE_QUEUE_TAG; tmp[2] = cmd->tag; len = 3; } else len = 1; #else len = 1; cmd->tag=0; #endif /* SUPPORT_TAGS */ /* Send message(s) */ data = tmp; phase = PHASE_MSGOUT; NCR5380_transfer_pio(instance, &phase, &len, &data); SEL_PRINTK("scsi%d: nexus established.\n", HOSTNO); /* XXX need to handle errors here */ hostdata->connected = cmd; #ifndef SUPPORT_TAGS hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); #endif #ifdef SUN3_SCSI_VME dregs->csr |= CSR_INTR; #endif initialize_SCp(cmd); return 0; } /* * Function : int NCR5380_transfer_pio (struct Scsi_Host *instance, * unsigned char *phase, int *count, unsigned char **data) * * Purpose : transfers data in given phase using polled I/O * * Inputs : instance - instance of driver, *phase - pointer to * what phase is expected, *count - pointer to number of * bytes to transfer, **data - pointer to data pointer. * * Returns : -1 when different phase is entered without transferring * maximum number of bytes, 0 if all bytes are transferred or exit * is in same phase. * * Also, *phase, *count, *data are modified in place. * * XXX Note : handling for bus free may be useful. */ /* * Note : this code is not as quick as it could be, however it * IS 100% reliable, and for the actual data transfer where speed * counts, we will always do a pseudo DMA or DMA transfer. */ static int NCR5380_transfer_pio( struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data) { register unsigned char p = *phase, tmp; register int c = *count; register unsigned char *d = *data; /* * The NCR5380 chip will only drive the SCSI bus when the * phase specified in the appropriate bits of the TARGET COMMAND * REGISTER match the STATUS REGISTER */ NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p)); do { /* * Wait for assertion of REQ, after which the phase bits will be * valid */ while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ)); HSH_PRINTK("scsi%d: REQ detected\n", HOSTNO); /* Check for phase mismatch */ if ((tmp & PHASE_MASK) != p) { PIO_PRINTK("scsi%d: phase mismatch\n", HOSTNO); NCR_PRINT_PHASE(NDEBUG_PIO); break; } /* Do actual transfer from SCSI bus to / from memory */ if (!(p & SR_IO)) NCR5380_write(OUTPUT_DATA_REG, *d); else *d = NCR5380_read(CURRENT_SCSI_DATA_REG); ++d; /* * The SCSI standard suggests that in MSGOUT phase, the initiator * should drop ATN on the last byte of the message phase * after REQ has been asserted for the handshake but before * the initiator raises ACK. */ if (!(p & SR_IO)) { if (!((p & SR_MSG) && c > 1)) { NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA); NCR_PRINT(NDEBUG_PIO); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_ACK); } else { NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_ATN); NCR_PRINT(NDEBUG_PIO); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_ACK); } } else { NCR_PRINT(NDEBUG_PIO); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK); } while (NCR5380_read(STATUS_REG) & SR_REQ); HSH_PRINTK("scsi%d: req false, handshake complete\n", HOSTNO); /* * We have several special cases to consider during REQ/ACK handshaking : * 1. We were in MSGOUT phase, and we are on the last byte of the * message. ATN must be dropped as ACK is dropped. * * 2. We are in a MSGIN phase, and we are on the last byte of the * message. We must exit with ACK asserted, so that the calling * code may raise ATN before dropping ACK to reject the message. * * 3. ACK and ATN are clear and the target may proceed as normal. */ if (!(p == PHASE_MSGIN && c == 1)) { if (p == PHASE_MSGOUT && c > 1) NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); else NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); } } while (--c); PIO_PRINTK("scsi%d: residual %d\n", HOSTNO, c); *count = c; *data = d; tmp = NCR5380_read(STATUS_REG); /* The phase read from the bus is valid if either REQ is (already) * asserted or if ACK hasn't been released yet. The latter is the case if * we're in MSGIN and all wanted bytes have been received. */ if ((tmp & SR_REQ) || (p == PHASE_MSGIN && c == 0)) *phase = tmp & PHASE_MASK; else *phase = PHASE_UNKNOWN; if (!c || (*phase == p)) return 0; else return -1; } /* * Function : do_abort (Scsi_Host *host) * * Purpose : abort the currently established nexus. Should only be * called from a routine which can drop into a * * Returns : 0 on success, -1 on failure. */ static int do_abort (struct Scsi_Host *host) { unsigned char tmp, *msgptr, phase; int len; /* Request message out phase */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); /* * Wait for the target to indicate a valid phase by asserting * REQ. Once this happens, we'll have either a MSGOUT phase * and can immediately send the ABORT message, or we'll have some * other phase and will have to source/sink data. * * We really don't care what value was on the bus or what value * the target sees, so we just handshake. */ while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ)); NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp)); if ((tmp & PHASE_MASK) != PHASE_MSGOUT) { NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN | ICR_ASSERT_ACK); while (NCR5380_read(STATUS_REG) & SR_REQ); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); } tmp = ABORT; msgptr = &tmp; len = 1; phase = PHASE_MSGOUT; NCR5380_transfer_pio (host, &phase, &len, &msgptr); /* * If we got here, and the command completed successfully, * we're about to go into bus free state. */ return len ? -1 : 0; } #if defined(REAL_DMA) /* * Function : int NCR5380_transfer_dma (struct Scsi_Host *instance, * unsigned char *phase, int *count, unsigned char **data) * * Purpose : transfers data in given phase using either real * or pseudo DMA. * * Inputs : instance - instance of driver, *phase - pointer to * what phase is expected, *count - pointer to number of * bytes to transfer, **data - pointer to data pointer. * * Returns : -1 when different phase is entered without transferring * maximum number of bytes, 0 if all bytes or transferred or exit * is in same phase. * * Also, *phase, *count, *data are modified in place. * */ static int NCR5380_transfer_dma( struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data) { SETUP_HOSTDATA(instance); register int c = *count; register unsigned char p = *phase; unsigned long flags; /* sanity check */ if(!sun3_dma_setup_done) { printk("scsi%d: transfer_dma without setup!\n", HOSTNO); BUG(); } hostdata->dma_len = c; DMA_PRINTK("scsi%d: initializing DMA for %s, %d bytes %s %p\n", HOSTNO, (p & SR_IO) ? "reading" : "writing", c, (p & SR_IO) ? "to" : "from", *data); /* netbsd turns off ints here, why not be safe and do it too */ local_irq_save(flags); /* send start chain */ sun3scsi_dma_start(c, *data); if (p & SR_IO) { NCR5380_write(TARGET_COMMAND_REG, 1); NCR5380_read(RESET_PARITY_INTERRUPT_REG); NCR5380_write(INITIATOR_COMMAND_REG, 0); NCR5380_write(MODE_REG, (NCR5380_read(MODE_REG) | MR_DMA_MODE | MR_ENABLE_EOP_INTR)); NCR5380_write(START_DMA_INITIATOR_RECEIVE_REG, 0); } else { NCR5380_write(TARGET_COMMAND_REG, 0); NCR5380_read(RESET_PARITY_INTERRUPT_REG); NCR5380_write(INITIATOR_COMMAND_REG, ICR_ASSERT_DATA); NCR5380_write(MODE_REG, (NCR5380_read(MODE_REG) | MR_DMA_MODE | MR_ENABLE_EOP_INTR)); NCR5380_write(START_DMA_SEND_REG, 0); } #ifdef SUN3_SCSI_VME dregs->csr |= CSR_DMA_ENABLE; #endif local_irq_restore(flags); sun3_dma_active = 1; return 0; } #endif /* defined(REAL_DMA) */ /* * Function : NCR5380_information_transfer (struct Scsi_Host *instance) * * Purpose : run through the various SCSI phases and do as the target * directs us to. Operates on the currently connected command, * instance->connected. * * Inputs : instance, instance for which we are doing commands * * Side effects : SCSI things happen, the disconnected queue will be * modified if a command disconnects, *instance->connected will * change. * * XXX Note : we need to watch for bus free or a reset condition here * to recover from an unexpected bus free condition. */ static void NCR5380_information_transfer (struct Scsi_Host *instance) { SETUP_HOSTDATA(instance); unsigned long flags; unsigned char msgout = NOP; int sink = 0; int len; #if defined(REAL_DMA) int transfersize; #endif unsigned char *data; unsigned char phase, tmp, extended_msg[10], old_phase=0xff; struct scsi_cmnd *cmd = (struct scsi_cmnd *) hostdata->connected; #ifdef SUN3_SCSI_VME dregs->csr |= CSR_INTR; #endif while (1) { tmp = NCR5380_read(STATUS_REG); /* We only have a valid SCSI phase when REQ is asserted */ if (tmp & SR_REQ) { phase = (tmp & PHASE_MASK); if (phase != old_phase) { old_phase = phase; NCR_PRINT_PHASE(NDEBUG_INFORMATION); } if(phase == PHASE_CMDOUT) { void *d; unsigned long count; if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) { count = cmd->SCp.buffer->length; d = SGADDR(cmd->SCp.buffer); } else { count = cmd->SCp.this_residual; d = cmd->SCp.ptr; } #ifdef REAL_DMA /* this command setup for dma yet? */ if((count > SUN3_DMA_MINSIZE) && (sun3_dma_setup_done != cmd)) { if (cmd->request->cmd_type == REQ_TYPE_FS) { sun3scsi_dma_setup(d, count, rq_data_dir(cmd->request)); sun3_dma_setup_done = cmd; } } #endif #ifdef SUN3_SCSI_VME dregs->csr |= CSR_INTR; #endif } if (sink && (phase != PHASE_MSGOUT)) { NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp)); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN | ICR_ASSERT_ACK); while (NCR5380_read(STATUS_REG) & SR_REQ); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); sink = 0; continue; } switch (phase) { case PHASE_DATAOUT: #if (NDEBUG & NDEBUG_NO_DATAOUT) printk("scsi%d: NDEBUG_NO_DATAOUT set, attempted DATAOUT " "aborted\n", HOSTNO); sink = 1; do_abort(instance); cmd->result = DID_ERROR << 16; cmd->scsi_done(cmd); return; #endif case PHASE_DATAIN: /* * If there is no room left in the current buffer in the * scatter-gather list, move onto the next one. */ if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) { ++cmd->SCp.buffer; --cmd->SCp.buffers_residual; cmd->SCp.this_residual = cmd->SCp.buffer->length; cmd->SCp.ptr = SGADDR(cmd->SCp.buffer); INF_PRINTK("scsi%d: %d bytes and %d buffers left\n", HOSTNO, cmd->SCp.this_residual, cmd->SCp.buffers_residual); } /* * The preferred transfer method is going to be * PSEUDO-DMA for systems that are strictly PIO, * since we can let the hardware do the handshaking. * * For this to work, we need to know the transfersize * ahead of time, since the pseudo-DMA code will sit * in an unconditional loop. */ /* ++roman: I suggest, this should be * #if def(REAL_DMA) * instead of leaving REAL_DMA out. */ #if defined(REAL_DMA) // if (!cmd->device->borken && if((transfersize = NCR5380_dma_xfer_len(instance,cmd,phase)) > SUN3_DMA_MINSIZE) { len = transfersize; cmd->SCp.phase = phase; if (NCR5380_transfer_dma(instance, &phase, &len, (unsigned char **) &cmd->SCp.ptr)) { /* * If the watchdog timer fires, all future * accesses to this device will use the * polled-IO. */ printk(KERN_NOTICE "scsi%d: switching target %d " "lun %d to slow handshake\n", HOSTNO, cmd->device->id, cmd->device->lun); cmd->device->borken = 1; NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); sink = 1; do_abort(instance); cmd->result = DID_ERROR << 16; cmd->scsi_done(cmd); /* XXX - need to source or sink data here, as appropriate */ } else { #ifdef REAL_DMA /* ++roman: When using real DMA, * information_transfer() should return after * starting DMA since it has nothing more to * do. */ return; #else cmd->SCp.this_residual -= transfersize - len; #endif } } else #endif /* defined(REAL_DMA) */ NCR5380_transfer_pio(instance, &phase, (int *) &cmd->SCp.this_residual, (unsigned char **) &cmd->SCp.ptr); #ifdef REAL_DMA /* if we had intended to dma that command clear it */ if(sun3_dma_setup_done == cmd) sun3_dma_setup_done = NULL; #endif break; case PHASE_MSGIN: len = 1; data = &tmp; NCR5380_write(SELECT_ENABLE_REG, 0); /* disable reselects */ NCR5380_transfer_pio(instance, &phase, &len, &data); cmd->SCp.Message = tmp; switch (tmp) { /* * Linking lets us reduce the time required to get the * next command out to the device, hopefully this will * mean we don't waste another revolution due to the delays * required by ARBITRATION and another SELECTION. * * In the current implementation proposal, low level drivers * merely have to start the next command, pointed to by * next_link, done() is called as with unlinked commands. */ #ifdef LINKED case LINKED_CMD_COMPLETE: case LINKED_FLG_CMD_COMPLETE: /* Accept message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); LNK_PRINTK("scsi%d: target %d lun %d linked command " "complete.\n", HOSTNO, cmd->device->id, cmd->device->lun); /* Enable reselect interrupts */ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); /* * Sanity check : A linked command should only terminate * with one of these messages if there are more linked * commands available. */ if (!cmd->next_link) { printk(KERN_NOTICE "scsi%d: target %d lun %d " "linked command complete, no next_link\n", HOSTNO, cmd->device->id, cmd->device->lun); sink = 1; do_abort (instance); return; } initialize_SCp(cmd->next_link); /* The next command is still part of this process; copy it * and don't free it! */ cmd->next_link->tag = cmd->tag; cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); LNK_PRINTK("scsi%d: target %d lun %d linked request " "done, calling scsi_done().\n", HOSTNO, cmd->device->id, cmd->device->lun); #ifdef NCR5380_STATS collect_stats(hostdata, cmd); #endif cmd->scsi_done(cmd); cmd = hostdata->connected; break; #endif /* def LINKED */ case ABORT: case COMMAND_COMPLETE: /* Accept message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); hostdata->connected = NULL; QU_PRINTK("scsi%d: command for target %d, lun %d " "completed\n", HOSTNO, cmd->device->id, cmd->device->lun); #ifdef SUPPORT_TAGS cmd_free_tag( cmd ); if (status_byte(cmd->SCp.Status) == QUEUE_FULL) { /* Turn a QUEUE FULL status into BUSY, I think the * mid level cannot handle QUEUE FULL :-( (The * command is retried after BUSY). Also update our * queue size to the number of currently issued * commands now. */ /* ++Andreas: the mid level code knows about QUEUE_FULL now. */ TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; TAG_PRINTK("scsi%d: target %d lun %d returned " "QUEUE_FULL after %d commands\n", HOSTNO, cmd->device->id, cmd->device->lun, ta->nr_allocated); if (ta->queue_size > ta->nr_allocated) ta->nr_allocated = ta->queue_size; } #else hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); #endif /* Enable reselect interrupts */ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); /* * I'm not sure what the correct thing to do here is : * * If the command that just executed is NOT a request * sense, the obvious thing to do is to set the result * code to the values of the stored parameters. * * If it was a REQUEST SENSE command, we need some way to * differentiate between the failure code of the original * and the failure code of the REQUEST sense - the obvious * case is success, where we fall through and leave the * result code unchanged. * * The non-obvious place is where the REQUEST SENSE failed */ if (cmd->cmnd[0] != REQUEST_SENSE) cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); else if (status_byte(cmd->SCp.Status) != GOOD) cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16); #ifdef AUTOSENSE if ((cmd->cmnd[0] == REQUEST_SENSE) && hostdata->ses.cmd_len) { scsi_eh_restore_cmnd(cmd, &hostdata->ses); hostdata->ses.cmd_len = 0 ; } if ((cmd->cmnd[0] != REQUEST_SENSE) && (status_byte(cmd->SCp.Status) == CHECK_CONDITION)) { scsi_eh_prep_cmnd(cmd, &hostdata->ses, NULL, 0, ~0); ASEN_PRINTK("scsi%d: performing request sense\n", HOSTNO); /* this is initialized from initialize_SCp cmd->SCp.buffer = NULL; cmd->SCp.buffers_residual = 0; */ local_irq_save(flags); LIST(cmd,hostdata->issue_queue); SET_NEXT(cmd, hostdata->issue_queue); hostdata->issue_queue = (struct scsi_cmnd *) cmd; local_irq_restore(flags); QU_PRINTK("scsi%d: REQUEST SENSE added to head of " "issue queue\n", H_NO(cmd)); } else #endif /* def AUTOSENSE */ { #ifdef NCR5380_STATS collect_stats(hostdata, cmd); #endif cmd->scsi_done(cmd); } NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); /* * Restore phase bits to 0 so an interrupted selection, * arbitration can resume. */ NCR5380_write(TARGET_COMMAND_REG, 0); while ((NCR5380_read(STATUS_REG) & SR_BSY) && !hostdata->connected) barrier(); return; case MESSAGE_REJECT: /* Accept message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); /* Enable reselect interrupts */ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); switch (hostdata->last_message) { case HEAD_OF_QUEUE_TAG: case ORDERED_QUEUE_TAG: case SIMPLE_QUEUE_TAG: /* The target obviously doesn't support tagged * queuing, even though it announced this ability in * its INQUIRY data ?!? (maybe only this LUN?) Ok, * clear 'tagged_supported' and lock the LUN, since * the command is treated as untagged further on. */ cmd->device->tagged_supported = 0; hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); cmd->tag = TAG_NONE; TAG_PRINTK("scsi%d: target %d lun %d rejected " "QUEUE_TAG message; tagged queuing " "disabled\n", HOSTNO, cmd->device->id, cmd->device->lun); break; } break; case DISCONNECT: /* Accept message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); local_irq_save(flags); cmd->device->disconnect = 1; LIST(cmd,hostdata->disconnected_queue); SET_NEXT(cmd, hostdata->disconnected_queue); hostdata->connected = NULL; hostdata->disconnected_queue = cmd; local_irq_restore(flags); QU_PRINTK("scsi%d: command for target %d lun %d was " "moved from connected to the " "disconnected_queue\n", HOSTNO, cmd->device->id, cmd->device->lun); /* * Restore phase bits to 0 so an interrupted selection, * arbitration can resume. */ NCR5380_write(TARGET_COMMAND_REG, 0); /* Enable reselect interrupts */ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); /* Wait for bus free to avoid nasty timeouts */ while ((NCR5380_read(STATUS_REG) & SR_BSY) && !hostdata->connected) barrier(); #ifdef SUN3_SCSI_VME dregs->csr |= CSR_DMA_ENABLE; #endif return; /* * The SCSI data pointer is *IMPLICITLY* saved on a disconnect * operation, in violation of the SCSI spec so we can safely * ignore SAVE/RESTORE pointers calls. * * Unfortunately, some disks violate the SCSI spec and * don't issue the required SAVE_POINTERS message before * disconnecting, and we have to break spec to remain * compatible. */ case SAVE_POINTERS: case RESTORE_POINTERS: /* Accept message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); /* Enable reselect interrupts */ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); break; case EXTENDED_MESSAGE: /* * Extended messages are sent in the following format : * Byte * 0 EXTENDED_MESSAGE == 1 * 1 length (includes one byte for code, doesn't * include first two bytes) * 2 code * 3..length+1 arguments * * Start the extended message buffer with the EXTENDED_MESSAGE * byte, since spi_print_msg() wants the whole thing. */ extended_msg[0] = EXTENDED_MESSAGE; /* Accept first byte by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); EXT_PRINTK("scsi%d: receiving extended message\n", HOSTNO); len = 2; data = extended_msg + 1; phase = PHASE_MSGIN; NCR5380_transfer_pio(instance, &phase, &len, &data); EXT_PRINTK("scsi%d: length=%d, code=0x%02x\n", HOSTNO, (int)extended_msg[1], (int)extended_msg[2]); if (!len && extended_msg[1] <= (sizeof (extended_msg) - 1)) { /* Accept third byte by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); len = extended_msg[1] - 1; data = extended_msg + 3; phase = PHASE_MSGIN; NCR5380_transfer_pio(instance, &phase, &len, &data); EXT_PRINTK("scsi%d: message received, residual %d\n", HOSTNO, len); switch (extended_msg[2]) { case EXTENDED_SDTR: case EXTENDED_WDTR: case EXTENDED_MODIFY_DATA_POINTER: case EXTENDED_EXTENDED_IDENTIFY: tmp = 0; } } else if (len) { printk(KERN_NOTICE "scsi%d: error receiving " "extended message\n", HOSTNO); tmp = 0; } else { printk(KERN_NOTICE "scsi%d: extended message " "code %02x length %d is too long\n", HOSTNO, extended_msg[2], extended_msg[1]); tmp = 0; } /* Fall through to reject message */ /* * If we get something weird that we aren't expecting, * reject it. */ default: if (!tmp) { printk(KERN_DEBUG "scsi%d: rejecting message ", HOSTNO); spi_print_msg(extended_msg); printk("\n"); } else if (tmp != EXTENDED_MESSAGE) printk(KERN_DEBUG "scsi%d: rejecting unknown " "message %02x from target %d, lun %d\n", HOSTNO, tmp, cmd->device->id, cmd->device->lun); else printk(KERN_DEBUG "scsi%d: rejecting unknown " "extended message " "code %02x, length %d from target %d, lun %d\n", HOSTNO, extended_msg[1], extended_msg[0], cmd->device->id, cmd->device->lun); msgout = MESSAGE_REJECT; NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); break; } /* switch (tmp) */ break; case PHASE_MSGOUT: len = 1; data = &msgout; hostdata->last_message = msgout; NCR5380_transfer_pio(instance, &phase, &len, &data); if (msgout == ABORT) { #ifdef SUPPORT_TAGS cmd_free_tag( cmd ); #else hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); #endif hostdata->connected = NULL; cmd->result = DID_ERROR << 16; #ifdef NCR5380_STATS collect_stats(hostdata, cmd); #endif cmd->scsi_done(cmd); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); return; } msgout = NOP; break; case PHASE_CMDOUT: len = cmd->cmd_len; data = cmd->cmnd; /* * XXX for performance reasons, on machines with a * PSEUDO-DMA architecture we should probably * use the dma transfer function. */ NCR5380_transfer_pio(instance, &phase, &len, &data); break; case PHASE_STATIN: len = 1; data = &tmp; NCR5380_transfer_pio(instance, &phase, &len, &data); cmd->SCp.Status = tmp; break; default: printk("scsi%d: unknown phase\n", HOSTNO); NCR_PRINT(NDEBUG_ANY); } /* switch(phase) */ } /* if (tmp * SR_REQ) */ } /* while (1) */ } /* * Function : void NCR5380_reselect (struct Scsi_Host *instance) * * Purpose : does reselection, initializing the instance->connected * field to point to the struct scsi_cmnd for which the I_T_L or I_T_L_Q * nexus has been reestablished, * * Inputs : instance - this instance of the NCR5380. * */ /* it might eventually prove necessary to do a dma setup on reselection, but it doesn't seem to be needed now -- sam */ static void NCR5380_reselect (struct Scsi_Host *instance) { SETUP_HOSTDATA(instance); unsigned char target_mask; unsigned char lun; #ifdef SUPPORT_TAGS unsigned char tag; #endif unsigned char msg[3]; struct scsi_cmnd *tmp = NULL, *prev; /* unsigned long flags; */ /* * Disable arbitration, etc. since the host adapter obviously * lost, and tell an interrupted NCR5380_select() to restart. */ NCR5380_write(MODE_REG, MR_BASE); hostdata->restart_select = 1; target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask); RSL_PRINTK("scsi%d: reselect\n", HOSTNO); /* * At this point, we have detected that our SCSI ID is on the bus, * SEL is true and BSY was false for at least one bus settle delay * (400 ns). * * We must assert BSY ourselves, until the target drops the SEL * signal. */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY); while (NCR5380_read(STATUS_REG) & SR_SEL); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); /* * Wait for target to go into MSGIN. */ while (!(NCR5380_read(STATUS_REG) & SR_REQ)); #if 1 // acknowledge toggle to MSGIN NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(PHASE_MSGIN)); // peek at the byte without really hitting the bus msg[0] = NCR5380_read(CURRENT_SCSI_DATA_REG); #endif if (!(msg[0] & 0x80)) { printk(KERN_DEBUG "scsi%d: expecting IDENTIFY message, got ", HOSTNO); spi_print_msg(msg); do_abort(instance); return; } lun = (msg[0] & 0x07); /* * Find the command corresponding to the I_T_L or I_T_L_Q nexus we * just reestablished, and remove it from the disconnected queue. */ for (tmp = (struct scsi_cmnd *) hostdata->disconnected_queue, prev = NULL; tmp; prev = tmp, tmp = NEXT(tmp) ) { if ((target_mask == (1 << tmp->device->id)) && (lun == tmp->device->lun) #ifdef SUPPORT_TAGS && (tag == tmp->tag) #endif ) { if (prev) { REMOVE(prev, NEXT(prev), tmp, NEXT(tmp)); SET_NEXT(prev, NEXT(tmp)); } else { REMOVE(-1, hostdata->disconnected_queue, tmp, NEXT(tmp)); hostdata->disconnected_queue = NEXT(tmp); } SET_NEXT(tmp, NULL); break; } } if (!tmp) { printk(KERN_WARNING "scsi%d: warning: target bitmask %02x lun %d " #ifdef SUPPORT_TAGS "tag %d " #endif "not in disconnected_queue.\n", HOSTNO, target_mask, lun #ifdef SUPPORT_TAGS , tag #endif ); /* * Since we have an established nexus that we can't do anything * with, we must abort it. */ do_abort(instance); return; } #if 1 /* engage dma setup for the command we just saw */ { void *d; unsigned long count; if (!tmp->SCp.this_residual && tmp->SCp.buffers_residual) { count = tmp->SCp.buffer->length; d = SGADDR(tmp->SCp.buffer); } else { count = tmp->SCp.this_residual; d = tmp->SCp.ptr; } #ifdef REAL_DMA /* setup this command for dma if not already */ if((count > SUN3_DMA_MINSIZE) && (sun3_dma_setup_done != tmp)) { sun3scsi_dma_setup(d, count, rq_data_dir(tmp->request)); sun3_dma_setup_done = tmp; } #endif } #endif NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK); /* Accept message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); #ifdef SUPPORT_TAGS /* If the phase is still MSGIN, the target wants to send some more * messages. In case it supports tagged queuing, this is probably a * SIMPLE_QUEUE_TAG for the I_T_L_Q nexus. */ tag = TAG_NONE; if (phase == PHASE_MSGIN && setup_use_tagged_queuing) { /* Accept previous IDENTIFY message by clearing ACK */ NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE ); len = 2; data = msg+1; if (!NCR5380_transfer_pio(instance, &phase, &len, &data) && msg[1] == SIMPLE_QUEUE_TAG) tag = msg[2]; TAG_PRINTK("scsi%d: target mask %02x, lun %d sent tag %d at " "reselection\n", HOSTNO, target_mask, lun, tag); } #endif hostdata->connected = tmp; RSL_PRINTK("scsi%d: nexus established, target = %d, lun = %d, tag = %d\n", HOSTNO, tmp->target, tmp->lun, tmp->tag); } /* * Function : int NCR5380_abort(struct scsi_cmnd *cmd) * * Purpose : abort a command * * Inputs : cmd - the struct scsi_cmnd to abort, code - code to set the * host byte of the result field to, if zero DID_ABORTED is * used. * * Returns : 0 - success, -1 on failure. * * XXX - there is no way to abort the command that is currently * connected, you have to wait for it to complete. If this is * a problem, we could implement longjmp() / setjmp(), setjmp() * called where the loop started in NCR5380_main(). */ static int NCR5380_abort(struct scsi_cmnd *cmd) { struct Scsi_Host *instance = cmd->device->host; SETUP_HOSTDATA(instance); struct scsi_cmnd *tmp, **prev; unsigned long flags; printk(KERN_NOTICE "scsi%d: aborting command\n", HOSTNO); scsi_print_command(cmd); NCR5380_print_status (instance); local_irq_save(flags); ABRT_PRINTK("scsi%d: abort called basr 0x%02x, sr 0x%02x\n", HOSTNO, NCR5380_read(BUS_AND_STATUS_REG), NCR5380_read(STATUS_REG)); #if 1 /* * Case 1 : If the command is the currently executing command, * we'll set the aborted flag and return control so that * information transfer routine can exit cleanly. */ if (hostdata->connected == cmd) { ABRT_PRINTK("scsi%d: aborting connected command\n", HOSTNO); /* * We should perform BSY checking, and make sure we haven't slipped * into BUS FREE. */ /* NCR5380_write(INITIATOR_COMMAND_REG, ICR_ASSERT_ATN); */ /* * Since we can't change phases until we've completed the current * handshake, we have to source or sink a byte of data if the current * phase is not MSGOUT. */ /* * Return control to the executing NCR drive so we can clear the * aborted flag and get back into our main loop. */ if (do_abort(instance) == 0) { hostdata->aborted = 1; hostdata->connected = NULL; cmd->result = DID_ABORT << 16; #ifdef SUPPORT_TAGS cmd_free_tag( cmd ); #else hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); #endif local_irq_restore(flags); cmd->scsi_done(cmd); return SCSI_ABORT_SUCCESS; } else { /* local_irq_restore(flags); */ printk("scsi%d: abort of connected command failed!\n", HOSTNO); return SCSI_ABORT_ERROR; } } #endif /* * Case 2 : If the command hasn't been issued yet, we simply remove it * from the issue queue. */ for (prev = (struct scsi_cmnd **) &(hostdata->issue_queue), tmp = (struct scsi_cmnd *) hostdata->issue_queue; tmp; prev = NEXTADDR(tmp), tmp = NEXT(tmp)) if (cmd == tmp) { REMOVE(5, *prev, tmp, NEXT(tmp)); (*prev) = NEXT(tmp); SET_NEXT(tmp, NULL); tmp->result = DID_ABORT << 16; local_irq_restore(flags); ABRT_PRINTK("scsi%d: abort removed command from issue queue.\n", HOSTNO); /* Tagged queuing note: no tag to free here, hasn't been assigned * yet... */ tmp->scsi_done(tmp); return SCSI_ABORT_SUCCESS; } /* * Case 3 : If any commands are connected, we're going to fail the abort * and let the high level SCSI driver retry at a later time or * issue a reset. * * Timeouts, and therefore aborted commands, will be highly unlikely * and handling them cleanly in this situation would make the common * case of noresets less efficient, and would pollute our code. So, * we fail. */ if (hostdata->connected) { local_irq_restore(flags); ABRT_PRINTK("scsi%d: abort failed, command connected.\n", HOSTNO); return SCSI_ABORT_SNOOZE; } /* * Case 4: If the command is currently disconnected from the bus, and * there are no connected commands, we reconnect the I_T_L or * I_T_L_Q nexus associated with it, go into message out, and send * an abort message. * * This case is especially ugly. In order to reestablish the nexus, we * need to call NCR5380_select(). The easiest way to implement this * function was to abort if the bus was busy, and let the interrupt * handler triggered on the SEL for reselect take care of lost arbitrations * where necessary, meaning interrupts need to be enabled. * * When interrupts are enabled, the queues may change - so we * can't remove it from the disconnected queue before selecting it * because that could cause a failure in hashing the nexus if that * device reselected. * * Since the queues may change, we can't use the pointers from when we * first locate it. * * So, we must first locate the command, and if NCR5380_select() * succeeds, then issue the abort, relocate the command and remove * it from the disconnected queue. */ for (tmp = (struct scsi_cmnd *) hostdata->disconnected_queue; tmp; tmp = NEXT(tmp)) if (cmd == tmp) { local_irq_restore(flags); ABRT_PRINTK("scsi%d: aborting disconnected command.\n", HOSTNO); if (NCR5380_select (instance, cmd, (int) cmd->tag)) return SCSI_ABORT_BUSY; ABRT_PRINTK("scsi%d: nexus reestablished.\n", HOSTNO); do_abort (instance); local_irq_save(flags); for (prev = (struct scsi_cmnd **) &(hostdata->disconnected_queue), tmp = (struct scsi_cmnd *) hostdata->disconnected_queue; tmp; prev = NEXTADDR(tmp), tmp = NEXT(tmp) ) if (cmd == tmp) { REMOVE(5, *prev, tmp, NEXT(tmp)); *prev = NEXT(tmp); SET_NEXT(tmp, NULL); tmp->result = DID_ABORT << 16; /* We must unlock the tag/LUN immediately here, since the * target goes to BUS FREE and doesn't send us another * message (COMMAND_COMPLETE or the like) */ #ifdef SUPPORT_TAGS cmd_free_tag( tmp ); #else hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); #endif local_irq_restore(flags); tmp->scsi_done(tmp); return SCSI_ABORT_SUCCESS; } } /* * Case 5 : If we reached this point, the command was not found in any of * the queues. * * We probably reached this point because of an unlikely race condition * between the command completing successfully and the abortion code, * so we won't panic, but we will notify the user in case something really * broke. */ local_irq_restore(flags); printk(KERN_INFO "scsi%d: warning : SCSI command probably completed successfully before abortion\n", HOSTNO); return SCSI_ABORT_NOT_RUNNING; } /* * Function : int NCR5380_bus_reset(struct scsi_cmnd *cmd) * * Purpose : reset the SCSI bus. * * Returns : SCSI_RESET_WAKEUP * */ static int NCR5380_bus_reset(struct scsi_cmnd *cmd) { SETUP_HOSTDATA(cmd->device->host); int i; unsigned long flags; #if 1 struct scsi_cmnd *connected, *disconnected_queue; #endif NCR5380_print_status (cmd->device->host); /* get in phase */ NCR5380_write( TARGET_COMMAND_REG, PHASE_SR_TO_TCR( NCR5380_read(STATUS_REG) )); /* assert RST */ NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_RST ); udelay (40); /* reset NCR registers */ NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE ); NCR5380_write( MODE_REG, MR_BASE ); NCR5380_write( TARGET_COMMAND_REG, 0 ); NCR5380_write( SELECT_ENABLE_REG, 0 ); /* ++roman: reset interrupt condition! otherwise no interrupts don't get * through anymore ... */ (void)NCR5380_read( RESET_PARITY_INTERRUPT_REG ); #if 1 /* XXX Should now be done by midlevel code, but it's broken XXX */ /* XXX see below XXX */ /* MSch: old-style reset: actually abort all command processing here */ /* After the reset, there are no more connected or disconnected commands * and no busy units; to avoid problems with re-inserting the commands * into the issue_queue (via scsi_done()), the aborted commands are * remembered in local variables first. */ local_irq_save(flags); connected = (struct scsi_cmnd *)hostdata->connected; hostdata->connected = NULL; disconnected_queue = (struct scsi_cmnd *)hostdata->disconnected_queue; hostdata->disconnected_queue = NULL; #ifdef SUPPORT_TAGS free_all_tags(); #endif for( i = 0; i < 8; ++i ) hostdata->busy[i] = 0; #ifdef REAL_DMA hostdata->dma_len = 0; #endif local_irq_restore(flags); /* In order to tell the mid-level code which commands were aborted, * set the command status to DID_RESET and call scsi_done() !!! * This ultimately aborts processing of these commands in the mid-level. */ if ((cmd = connected)) { ABRT_PRINTK("scsi%d: reset aborted a connected command\n", H_NO(cmd)); cmd->result = (cmd->result & 0xffff) | (DID_RESET << 16); cmd->scsi_done( cmd ); } for (i = 0; (cmd = disconnected_queue); ++i) { disconnected_queue = NEXT(cmd); SET_NEXT(cmd, NULL); cmd->result = (cmd->result & 0xffff) | (DID_RESET << 16); cmd->scsi_done( cmd ); } if (i > 0) ABRT_PRINTK("scsi: reset aborted %d disconnected command(s)\n", i); /* since all commands have been explicitly terminated, we need to tell * the midlevel code that the reset was SUCCESSFUL, and there is no * need to 'wake up' the commands by a request_sense */ return SCSI_RESET_SUCCESS | SCSI_RESET_BUS_RESET; #else /* 1 */ /* MSch: new-style reset handling: let the mid-level do what it can */ /* ++guenther: MID-LEVEL IS STILL BROKEN. * Mid-level is supposed to requeue all commands that were active on the * various low-level queues. In fact it does this, but that's not enough * because all these commands are subject to timeout. And if a timeout * happens for any removed command, *_abort() is called but all queues * are now empty. Abort then gives up the falcon lock, which is fatal, * since the mid-level will queue more commands and must have the lock * (it's all happening inside timer interrupt handler!!). * Even worse, abort will return NOT_RUNNING for all those commands not * on any queue, so they won't be retried ... * * Conclusion: either scsi.c disables timeout for all resetted commands * immediately, or we lose! As of linux-2.0.20 it doesn't. */ /* After the reset, there are no more connected or disconnected commands * and no busy units; so clear the low-level status here to avoid * conflicts when the mid-level code tries to wake up the affected * commands! */ if (hostdata->issue_queue) ABRT_PRINTK("scsi%d: reset aborted issued command(s)\n", H_NO(cmd)); if (hostdata->connected) ABRT_PRINTK("scsi%d: reset aborted a connected command\n", H_NO(cmd)); if (hostdata->disconnected_queue) ABRT_PRINTK("scsi%d: reset aborted disconnected command(s)\n", H_NO(cmd)); local_irq_save(flags); hostdata->issue_queue = NULL; hostdata->connected = NULL; hostdata->disconnected_queue = NULL; #ifdef SUPPORT_TAGS free_all_tags(); #endif for( i = 0; i < 8; ++i ) hostdata->busy[i] = 0; #ifdef REAL_DMA hostdata->dma_len = 0; #endif local_irq_restore(flags); /* we did no complete reset of all commands, so a wakeup is required */ return SCSI_RESET_WAKEUP | SCSI_RESET_BUS_RESET; #endif /* 1 */ } /* Local Variables: */ /* tab-width: 8 */ /* End: */
gpl-2.0
GuneetAtwal/kernel_h1s
drivers/gpio/gpio-ucb1400.c
5280
2654
/* * Philips UCB1400 GPIO driver * * Author: Marek Vasut <marek.vasut@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/module.h> #include <linux/ucb1400.h> struct ucb1400_gpio_data *ucbdata; static int ucb1400_gpio_dir_in(struct gpio_chip *gc, unsigned off) { struct ucb1400_gpio *gpio; gpio = container_of(gc, struct ucb1400_gpio, gc); ucb1400_gpio_set_direction(gpio->ac97, off, 0); return 0; } static int ucb1400_gpio_dir_out(struct gpio_chip *gc, unsigned off, int val) { struct ucb1400_gpio *gpio; gpio = container_of(gc, struct ucb1400_gpio, gc); ucb1400_gpio_set_direction(gpio->ac97, off, 1); ucb1400_gpio_set_value(gpio->ac97, off, val); return 0; } static int ucb1400_gpio_get(struct gpio_chip *gc, unsigned off) { struct ucb1400_gpio *gpio; gpio = container_of(gc, struct ucb1400_gpio, gc); return ucb1400_gpio_get_value(gpio->ac97, off); } static void ucb1400_gpio_set(struct gpio_chip *gc, unsigned off, int val) { struct ucb1400_gpio *gpio; gpio = container_of(gc, struct ucb1400_gpio, gc); ucb1400_gpio_set_value(gpio->ac97, off, val); } static int ucb1400_gpio_probe(struct platform_device *dev) { struct ucb1400_gpio *ucb = dev->dev.platform_data; int err = 0; if (!(ucbdata && ucbdata->gpio_offset)) { err = -EINVAL; goto err; } platform_set_drvdata(dev, ucb); ucb->gc.label = "ucb1400_gpio"; ucb->gc.base = ucbdata->gpio_offset; ucb->gc.ngpio = 10; ucb->gc.owner = THIS_MODULE; ucb->gc.direction_input = ucb1400_gpio_dir_in; ucb->gc.direction_output = ucb1400_gpio_dir_out; ucb->gc.get = ucb1400_gpio_get; ucb->gc.set = ucb1400_gpio_set; ucb->gc.can_sleep = 1; err = gpiochip_add(&ucb->gc); if (err) goto err; if (ucbdata && ucbdata->gpio_setup) err = ucbdata->gpio_setup(&dev->dev, ucb->gc.ngpio); err: return err; } static int ucb1400_gpio_remove(struct platform_device *dev) { int err = 0; struct ucb1400_gpio *ucb = platform_get_drvdata(dev); if (ucbdata && ucbdata->gpio_teardown) { err = ucbdata->gpio_teardown(&dev->dev, ucb->gc.ngpio); if (err) return err; } err = gpiochip_remove(&ucb->gc); return err; } static struct platform_driver ucb1400_gpio_driver = { .probe = ucb1400_gpio_probe, .remove = ucb1400_gpio_remove, .driver = { .name = "ucb1400_gpio" }, }; void __init ucb1400_gpio_set_data(struct ucb1400_gpio_data *data) { ucbdata = data; } module_platform_driver(ucb1400_gpio_driver); MODULE_DESCRIPTION("Philips UCB1400 GPIO driver"); MODULE_LICENSE("GPL");
gpl-2.0
meyskld/hammerhead_mr1
drivers/net/wireless/b43/phy_lp.c
5280
99600
/* Broadcom B43 wireless driver IEEE 802.11a/g LP-PHY driver Copyright (c) 2008-2009 Michael Buesch <m@bues.ch> Copyright (c) 2009 Gábor Stefanik <netrolller.3d@gmail.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; see the file COPYING. If not, write to the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <linux/slab.h> #include "b43.h" #include "main.h" #include "phy_lp.h" #include "phy_common.h" #include "tables_lpphy.h" static inline u16 channel2freq_lp(u8 channel) { if (channel < 14) return (2407 + 5 * channel); else if (channel == 14) return 2484; else if (channel < 184) return (5000 + 5 * channel); else return (4000 + 5 * channel); } static unsigned int b43_lpphy_op_get_default_chan(struct b43_wldev *dev) { if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) return 1; return 36; } static int b43_lpphy_op_allocate(struct b43_wldev *dev) { struct b43_phy_lp *lpphy; lpphy = kzalloc(sizeof(*lpphy), GFP_KERNEL); if (!lpphy) return -ENOMEM; dev->phy.lp = lpphy; return 0; } static void b43_lpphy_op_prepare_structs(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; struct b43_phy_lp *lpphy = phy->lp; memset(lpphy, 0, sizeof(*lpphy)); lpphy->antenna = B43_ANTENNA_DEFAULT; //TODO } static void b43_lpphy_op_free(struct b43_wldev *dev) { struct b43_phy_lp *lpphy = dev->phy.lp; kfree(lpphy); dev->phy.lp = NULL; } /* http://bcm-v4.sipsolutions.net/802.11/PHY/LP/ReadBandSrom */ static void lpphy_read_band_sprom(struct b43_wldev *dev) { struct ssb_sprom *sprom = dev->dev->bus_sprom; struct b43_phy_lp *lpphy = dev->phy.lp; u16 cckpo, maxpwr; u32 ofdmpo; int i; if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { lpphy->tx_isolation_med_band = sprom->tri2g; lpphy->bx_arch = sprom->bxa2g; lpphy->rx_pwr_offset = sprom->rxpo2g; lpphy->rssi_vf = sprom->rssismf2g; lpphy->rssi_vc = sprom->rssismc2g; lpphy->rssi_gs = sprom->rssisav2g; lpphy->txpa[0] = sprom->pa0b0; lpphy->txpa[1] = sprom->pa0b1; lpphy->txpa[2] = sprom->pa0b2; maxpwr = sprom->maxpwr_bg; lpphy->max_tx_pwr_med_band = maxpwr; cckpo = sprom->cck2gpo; /* * We don't read SPROM's opo as specs say. On rev8 SPROMs * opo == ofdm2gpo and we don't know any SSB with LP-PHY * and SPROM rev below 8. */ B43_WARN_ON(sprom->revision < 8); ofdmpo = sprom->ofdm2gpo; if (cckpo) { for (i = 0; i < 4; i++) { lpphy->tx_max_rate[i] = maxpwr - (ofdmpo & 0xF) * 2; ofdmpo >>= 4; } ofdmpo = sprom->ofdm2gpo; for (i = 4; i < 15; i++) { lpphy->tx_max_rate[i] = maxpwr - (ofdmpo & 0xF) * 2; ofdmpo >>= 4; } } else { ofdmpo &= 0xFF; for (i = 0; i < 4; i++) lpphy->tx_max_rate[i] = maxpwr; for (i = 4; i < 15; i++) lpphy->tx_max_rate[i] = maxpwr - ofdmpo; } } else { /* 5GHz */ lpphy->tx_isolation_low_band = sprom->tri5gl; lpphy->tx_isolation_med_band = sprom->tri5g; lpphy->tx_isolation_hi_band = sprom->tri5gh; lpphy->bx_arch = sprom->bxa5g; lpphy->rx_pwr_offset = sprom->rxpo5g; lpphy->rssi_vf = sprom->rssismf5g; lpphy->rssi_vc = sprom->rssismc5g; lpphy->rssi_gs = sprom->rssisav5g; lpphy->txpa[0] = sprom->pa1b0; lpphy->txpa[1] = sprom->pa1b1; lpphy->txpa[2] = sprom->pa1b2; lpphy->txpal[0] = sprom->pa1lob0; lpphy->txpal[1] = sprom->pa1lob1; lpphy->txpal[2] = sprom->pa1lob2; lpphy->txpah[0] = sprom->pa1hib0; lpphy->txpah[1] = sprom->pa1hib1; lpphy->txpah[2] = sprom->pa1hib2; maxpwr = sprom->maxpwr_al; ofdmpo = sprom->ofdm5glpo; lpphy->max_tx_pwr_low_band = maxpwr; for (i = 4; i < 12; i++) { lpphy->tx_max_ratel[i] = maxpwr - (ofdmpo & 0xF) * 2; ofdmpo >>= 4; } maxpwr = sprom->maxpwr_a; ofdmpo = sprom->ofdm5gpo; lpphy->max_tx_pwr_med_band = maxpwr; for (i = 4; i < 12; i++) { lpphy->tx_max_rate[i] = maxpwr - (ofdmpo & 0xF) * 2; ofdmpo >>= 4; } maxpwr = sprom->maxpwr_ah; ofdmpo = sprom->ofdm5ghpo; lpphy->max_tx_pwr_hi_band = maxpwr; for (i = 4; i < 12; i++) { lpphy->tx_max_rateh[i] = maxpwr - (ofdmpo & 0xF) * 2; ofdmpo >>= 4; } } } static void lpphy_adjust_gain_table(struct b43_wldev *dev, u32 freq) { struct b43_phy_lp *lpphy = dev->phy.lp; u16 temp[3]; u16 isolation; B43_WARN_ON(dev->phy.rev >= 2); if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) isolation = lpphy->tx_isolation_med_band; else if (freq <= 5320) isolation = lpphy->tx_isolation_low_band; else if (freq <= 5700) isolation = lpphy->tx_isolation_med_band; else isolation = lpphy->tx_isolation_hi_band; temp[0] = ((isolation - 26) / 12) << 12; temp[1] = temp[0] + 0x1000; temp[2] = temp[0] + 0x2000; b43_lptab_write_bulk(dev, B43_LPTAB16(13, 0), 3, temp); b43_lptab_write_bulk(dev, B43_LPTAB16(12, 0), 3, temp); } static void lpphy_table_init(struct b43_wldev *dev) { u32 freq = channel2freq_lp(b43_lpphy_op_get_default_chan(dev)); if (dev->phy.rev < 2) lpphy_rev0_1_table_init(dev); else lpphy_rev2plus_table_init(dev); lpphy_init_tx_gain_table(dev); if (dev->phy.rev < 2) lpphy_adjust_gain_table(dev, freq); } static void lpphy_baseband_rev0_1_init(struct b43_wldev *dev) { struct ssb_bus *bus = dev->dev->sdev->bus; struct ssb_sprom *sprom = dev->dev->bus_sprom; struct b43_phy_lp *lpphy = dev->phy.lp; u16 tmp, tmp2; b43_phy_mask(dev, B43_LPPHY_AFE_DAC_CTL, 0xF7FF); b43_phy_write(dev, B43_LPPHY_AFE_CTL, 0); b43_phy_write(dev, B43_LPPHY_AFE_CTL_OVR, 0); b43_phy_write(dev, B43_LPPHY_RF_OVERRIDE_0, 0); b43_phy_write(dev, B43_LPPHY_RF_OVERRIDE_2, 0); b43_phy_set(dev, B43_LPPHY_AFE_DAC_CTL, 0x0004); b43_phy_maskset(dev, B43_LPPHY_OFDMSYNCTHRESH0, 0xFF00, 0x0078); b43_phy_maskset(dev, B43_LPPHY_CLIPCTRTHRESH, 0x83FF, 0x5800); b43_phy_write(dev, B43_LPPHY_ADC_COMPENSATION_CTL, 0x0016); b43_phy_maskset(dev, B43_LPPHY_AFE_ADC_CTL_0, 0xFFF8, 0x0004); b43_phy_maskset(dev, B43_LPPHY_VERYLOWGAINDB, 0x00FF, 0x5400); b43_phy_maskset(dev, B43_LPPHY_HIGAINDB, 0x00FF, 0x2400); b43_phy_maskset(dev, B43_LPPHY_LOWGAINDB, 0x00FF, 0x2100); b43_phy_maskset(dev, B43_LPPHY_VERYLOWGAINDB, 0xFF00, 0x0006); b43_phy_mask(dev, B43_LPPHY_RX_RADIO_CTL, 0xFFFE); b43_phy_maskset(dev, B43_LPPHY_CLIPCTRTHRESH, 0xFFE0, 0x0005); b43_phy_maskset(dev, B43_LPPHY_CLIPCTRTHRESH, 0xFC1F, 0x0180); b43_phy_maskset(dev, B43_LPPHY_CLIPCTRTHRESH, 0x83FF, 0x3C00); b43_phy_maskset(dev, B43_LPPHY_GAINDIRECTMISMATCH, 0xFFF0, 0x0005); b43_phy_maskset(dev, B43_LPPHY_GAIN_MISMATCH_LIMIT, 0xFFC0, 0x001A); b43_phy_maskset(dev, B43_LPPHY_CRS_ED_THRESH, 0xFF00, 0x00B3); b43_phy_maskset(dev, B43_LPPHY_CRS_ED_THRESH, 0x00FF, 0xAD00); b43_phy_maskset(dev, B43_LPPHY_INPUT_PWRDB, 0xFF00, lpphy->rx_pwr_offset); if ((sprom->boardflags_lo & B43_BFL_FEM) && ((b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) || (sprom->boardflags_hi & B43_BFH_PAREF))) { ssb_pmu_set_ldo_voltage(&bus->chipco, LDO_PAREF, 0x28); ssb_pmu_set_ldo_paref(&bus->chipco, true); if (dev->phy.rev == 0) { b43_phy_maskset(dev, B43_LPPHY_LP_RF_SIGNAL_LUT, 0xFFCF, 0x0010); } b43_lptab_write(dev, B43_LPTAB16(11, 7), 60); } else { ssb_pmu_set_ldo_paref(&bus->chipco, false); b43_phy_maskset(dev, B43_LPPHY_LP_RF_SIGNAL_LUT, 0xFFCF, 0x0020); b43_lptab_write(dev, B43_LPTAB16(11, 7), 100); } tmp = lpphy->rssi_vf | lpphy->rssi_vc << 4 | 0xA000; b43_phy_write(dev, B43_LPPHY_AFE_RSSI_CTL_0, tmp); if (sprom->boardflags_hi & B43_BFH_RSSIINV) b43_phy_maskset(dev, B43_LPPHY_AFE_RSSI_CTL_1, 0xF000, 0x0AAA); else b43_phy_maskset(dev, B43_LPPHY_AFE_RSSI_CTL_1, 0xF000, 0x02AA); b43_lptab_write(dev, B43_LPTAB16(11, 1), 24); b43_phy_maskset(dev, B43_LPPHY_RX_RADIO_CTL, 0xFFF9, (lpphy->bx_arch << 1)); if (dev->phy.rev == 1 && (sprom->boardflags_hi & B43_BFH_FEM_BT)) { b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_1, 0xFFC0, 0x000A); b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_1, 0x3F00, 0x0900); b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_2, 0xFFC0, 0x000A); b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_2, 0xC0FF, 0x0B00); b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_3, 0xFFC0, 0x000A); b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_3, 0xC0FF, 0x0400); b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_4, 0xFFC0, 0x000A); b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_4, 0xC0FF, 0x0B00); b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_5, 0xFFC0, 0x000A); b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_5, 0xC0FF, 0x0900); b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_6, 0xFFC0, 0x000A); b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_6, 0xC0FF, 0x0B00); b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_7, 0xFFC0, 0x000A); b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_7, 0xC0FF, 0x0900); b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_8, 0xFFC0, 0x000A); b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_8, 0xC0FF, 0x0B00); } else if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ || (dev->dev->board_type == 0x048A) || ((dev->phy.rev == 0) && (sprom->boardflags_lo & B43_BFL_FEM))) { b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_1, 0xFFC0, 0x0001); b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_1, 0xC0FF, 0x0400); b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_2, 0xFFC0, 0x0001); b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_2, 0xC0FF, 0x0500); b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_3, 0xFFC0, 0x0002); b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_3, 0xC0FF, 0x0800); b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_4, 0xFFC0, 0x0002); b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_4, 0xC0FF, 0x0A00); } else if (dev->phy.rev == 1 || (sprom->boardflags_lo & B43_BFL_FEM)) { b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_1, 0xFFC0, 0x0004); b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_1, 0xC0FF, 0x0800); b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_2, 0xFFC0, 0x0004); b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_2, 0xC0FF, 0x0C00); b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_3, 0xFFC0, 0x0002); b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_3, 0xC0FF, 0x0100); b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_4, 0xFFC0, 0x0002); b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_4, 0xC0FF, 0x0300); } else { b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_1, 0xFFC0, 0x000A); b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_1, 0xC0FF, 0x0900); b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_2, 0xFFC0, 0x000A); b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_2, 0xC0FF, 0x0B00); b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_3, 0xFFC0, 0x0006); b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_3, 0xC0FF, 0x0500); b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_4, 0xFFC0, 0x0006); b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_4, 0xC0FF, 0x0700); } if (dev->phy.rev == 1 && (sprom->boardflags_hi & B43_BFH_PAREF)) { b43_phy_copy(dev, B43_LPPHY_TR_LOOKUP_5, B43_LPPHY_TR_LOOKUP_1); b43_phy_copy(dev, B43_LPPHY_TR_LOOKUP_6, B43_LPPHY_TR_LOOKUP_2); b43_phy_copy(dev, B43_LPPHY_TR_LOOKUP_7, B43_LPPHY_TR_LOOKUP_3); b43_phy_copy(dev, B43_LPPHY_TR_LOOKUP_8, B43_LPPHY_TR_LOOKUP_4); } if ((sprom->boardflags_hi & B43_BFH_FEM_BT) && (dev->dev->chip_id == 0x5354) && (dev->dev->chip_pkg == SSB_CHIPPACK_BCM4712S)) { b43_phy_set(dev, B43_LPPHY_CRSGAIN_CTL, 0x0006); b43_phy_write(dev, B43_LPPHY_GPIO_SELECT, 0x0005); b43_phy_write(dev, B43_LPPHY_GPIO_OUTEN, 0xFFFF); //FIXME the Broadcom driver caches & delays this HF write! b43_hf_write(dev, b43_hf_read(dev) | B43_HF_PR45960W); } if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { b43_phy_set(dev, B43_LPPHY_LP_PHY_CTL, 0x8000); b43_phy_set(dev, B43_LPPHY_CRSGAIN_CTL, 0x0040); b43_phy_maskset(dev, B43_LPPHY_MINPWR_LEVEL, 0x00FF, 0xA400); b43_phy_maskset(dev, B43_LPPHY_CRSGAIN_CTL, 0xF0FF, 0x0B00); b43_phy_maskset(dev, B43_LPPHY_SYNCPEAKCNT, 0xFFF8, 0x0007); b43_phy_maskset(dev, B43_LPPHY_DSSS_CONFIRM_CNT, 0xFFF8, 0x0003); b43_phy_maskset(dev, B43_LPPHY_DSSS_CONFIRM_CNT, 0xFFC7, 0x0020); b43_phy_mask(dev, B43_LPPHY_IDLEAFTERPKTRXTO, 0x00FF); } else { /* 5GHz */ b43_phy_mask(dev, B43_LPPHY_LP_PHY_CTL, 0x7FFF); b43_phy_mask(dev, B43_LPPHY_CRSGAIN_CTL, 0xFFBF); } if (dev->phy.rev == 1) { tmp = b43_phy_read(dev, B43_LPPHY_CLIPCTRTHRESH); tmp2 = (tmp & 0x03E0) >> 5; tmp2 |= tmp2 << 5; b43_phy_write(dev, B43_LPPHY_4C3, tmp2); tmp = b43_phy_read(dev, B43_LPPHY_GAINDIRECTMISMATCH); tmp2 = (tmp & 0x1F00) >> 8; tmp2 |= tmp2 << 5; b43_phy_write(dev, B43_LPPHY_4C4, tmp2); tmp = b43_phy_read(dev, B43_LPPHY_VERYLOWGAINDB); tmp2 = tmp & 0x00FF; tmp2 |= tmp << 8; b43_phy_write(dev, B43_LPPHY_4C5, tmp2); } } static void lpphy_save_dig_flt_state(struct b43_wldev *dev) { static const u16 addr[] = { B43_PHY_OFDM(0xC1), B43_PHY_OFDM(0xC2), B43_PHY_OFDM(0xC3), B43_PHY_OFDM(0xC4), B43_PHY_OFDM(0xC5), B43_PHY_OFDM(0xC6), B43_PHY_OFDM(0xC7), B43_PHY_OFDM(0xC8), B43_PHY_OFDM(0xCF), }; static const u16 coefs[] = { 0xDE5E, 0xE832, 0xE331, 0x4D26, 0x0026, 0x1420, 0x0020, 0xFE08, 0x0008, }; struct b43_phy_lp *lpphy = dev->phy.lp; int i; for (i = 0; i < ARRAY_SIZE(addr); i++) { lpphy->dig_flt_state[i] = b43_phy_read(dev, addr[i]); b43_phy_write(dev, addr[i], coefs[i]); } } static void lpphy_restore_dig_flt_state(struct b43_wldev *dev) { static const u16 addr[] = { B43_PHY_OFDM(0xC1), B43_PHY_OFDM(0xC2), B43_PHY_OFDM(0xC3), B43_PHY_OFDM(0xC4), B43_PHY_OFDM(0xC5), B43_PHY_OFDM(0xC6), B43_PHY_OFDM(0xC7), B43_PHY_OFDM(0xC8), B43_PHY_OFDM(0xCF), }; struct b43_phy_lp *lpphy = dev->phy.lp; int i; for (i = 0; i < ARRAY_SIZE(addr); i++) b43_phy_write(dev, addr[i], lpphy->dig_flt_state[i]); } static void lpphy_baseband_rev2plus_init(struct b43_wldev *dev) { struct b43_phy_lp *lpphy = dev->phy.lp; b43_phy_write(dev, B43_LPPHY_AFE_DAC_CTL, 0x50); b43_phy_write(dev, B43_LPPHY_AFE_CTL, 0x8800); b43_phy_write(dev, B43_LPPHY_AFE_CTL_OVR, 0); b43_phy_write(dev, B43_LPPHY_AFE_CTL_OVRVAL, 0); b43_phy_write(dev, B43_LPPHY_RF_OVERRIDE_0, 0); b43_phy_write(dev, B43_LPPHY_RF_OVERRIDE_2, 0); b43_phy_write(dev, B43_PHY_OFDM(0xF9), 0); b43_phy_write(dev, B43_LPPHY_TR_LOOKUP_1, 0); b43_phy_set(dev, B43_LPPHY_ADC_COMPENSATION_CTL, 0x10); b43_phy_maskset(dev, B43_LPPHY_OFDMSYNCTHRESH0, 0xFF00, 0xB4); b43_phy_maskset(dev, B43_LPPHY_DCOFFSETTRANSIENT, 0xF8FF, 0x200); b43_phy_maskset(dev, B43_LPPHY_DCOFFSETTRANSIENT, 0xFF00, 0x7F); b43_phy_maskset(dev, B43_LPPHY_GAINDIRECTMISMATCH, 0xFF0F, 0x40); b43_phy_maskset(dev, B43_LPPHY_PREAMBLECONFIRMTO, 0xFF00, 0x2); b43_phy_mask(dev, B43_LPPHY_CRSGAIN_CTL, ~0x4000); b43_phy_mask(dev, B43_LPPHY_CRSGAIN_CTL, ~0x2000); b43_phy_set(dev, B43_PHY_OFDM(0x10A), 0x1); if (dev->dev->board_rev >= 0x18) { b43_lptab_write(dev, B43_LPTAB32(17, 65), 0xEC); b43_phy_maskset(dev, B43_PHY_OFDM(0x10A), 0xFF01, 0x14); } else { b43_phy_maskset(dev, B43_PHY_OFDM(0x10A), 0xFF01, 0x10); } b43_phy_maskset(dev, B43_PHY_OFDM(0xDF), 0xFF00, 0xF4); b43_phy_maskset(dev, B43_PHY_OFDM(0xDF), 0x00FF, 0xF100); b43_phy_write(dev, B43_LPPHY_CLIPTHRESH, 0x48); b43_phy_maskset(dev, B43_LPPHY_HIGAINDB, 0xFF00, 0x46); b43_phy_maskset(dev, B43_PHY_OFDM(0xE4), 0xFF00, 0x10); b43_phy_maskset(dev, B43_LPPHY_PWR_THRESH1, 0xFFF0, 0x9); b43_phy_mask(dev, B43_LPPHY_GAINDIRECTMISMATCH, ~0xF); b43_phy_maskset(dev, B43_LPPHY_VERYLOWGAINDB, 0x00FF, 0x5500); b43_phy_maskset(dev, B43_LPPHY_CLIPCTRTHRESH, 0xFC1F, 0xA0); b43_phy_maskset(dev, B43_LPPHY_GAINDIRECTMISMATCH, 0xE0FF, 0x300); b43_phy_maskset(dev, B43_LPPHY_HIGAINDB, 0x00FF, 0x2A00); if ((dev->dev->chip_id == 0x4325) && (dev->dev->chip_rev == 0)) { b43_phy_maskset(dev, B43_LPPHY_LOWGAINDB, 0x00FF, 0x2100); b43_phy_maskset(dev, B43_LPPHY_VERYLOWGAINDB, 0xFF00, 0xA); } else { b43_phy_maskset(dev, B43_LPPHY_LOWGAINDB, 0x00FF, 0x1E00); b43_phy_maskset(dev, B43_LPPHY_VERYLOWGAINDB, 0xFF00, 0xD); } b43_phy_maskset(dev, B43_PHY_OFDM(0xFE), 0xFFE0, 0x1F); b43_phy_maskset(dev, B43_PHY_OFDM(0xFF), 0xFFE0, 0xC); b43_phy_maskset(dev, B43_PHY_OFDM(0x100), 0xFF00, 0x19); b43_phy_maskset(dev, B43_PHY_OFDM(0xFF), 0x03FF, 0x3C00); b43_phy_maskset(dev, B43_PHY_OFDM(0xFE), 0xFC1F, 0x3E0); b43_phy_maskset(dev, B43_PHY_OFDM(0xFF), 0xFFE0, 0xC); b43_phy_maskset(dev, B43_PHY_OFDM(0x100), 0x00FF, 0x1900); b43_phy_maskset(dev, B43_LPPHY_CLIPCTRTHRESH, 0x83FF, 0x5800); b43_phy_maskset(dev, B43_LPPHY_CLIPCTRTHRESH, 0xFFE0, 0x12); b43_phy_maskset(dev, B43_LPPHY_GAINMISMATCH, 0x0FFF, 0x9000); if ((dev->dev->chip_id == 0x4325) && (dev->dev->chip_rev == 0)) { b43_lptab_write(dev, B43_LPTAB16(0x08, 0x14), 0); b43_lptab_write(dev, B43_LPTAB16(0x08, 0x12), 0x40); } if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { b43_phy_set(dev, B43_LPPHY_CRSGAIN_CTL, 0x40); b43_phy_maskset(dev, B43_LPPHY_CRSGAIN_CTL, 0xF0FF, 0xB00); b43_phy_maskset(dev, B43_LPPHY_SYNCPEAKCNT, 0xFFF8, 0x6); b43_phy_maskset(dev, B43_LPPHY_MINPWR_LEVEL, 0x00FF, 0x9D00); b43_phy_maskset(dev, B43_LPPHY_MINPWR_LEVEL, 0xFF00, 0xA1); b43_phy_mask(dev, B43_LPPHY_IDLEAFTERPKTRXTO, 0x00FF); } else /* 5GHz */ b43_phy_mask(dev, B43_LPPHY_CRSGAIN_CTL, ~0x40); b43_phy_maskset(dev, B43_LPPHY_CRS_ED_THRESH, 0xFF00, 0xB3); b43_phy_maskset(dev, B43_LPPHY_CRS_ED_THRESH, 0x00FF, 0xAD00); b43_phy_maskset(dev, B43_LPPHY_INPUT_PWRDB, 0xFF00, lpphy->rx_pwr_offset); b43_phy_set(dev, B43_LPPHY_RESET_CTL, 0x44); b43_phy_write(dev, B43_LPPHY_RESET_CTL, 0x80); b43_phy_write(dev, B43_LPPHY_AFE_RSSI_CTL_0, 0xA954); b43_phy_write(dev, B43_LPPHY_AFE_RSSI_CTL_1, 0x2000 | ((u16)lpphy->rssi_gs << 10) | ((u16)lpphy->rssi_vc << 4) | lpphy->rssi_vf); if ((dev->dev->chip_id == 0x4325) && (dev->dev->chip_rev == 0)) { b43_phy_set(dev, B43_LPPHY_AFE_ADC_CTL_0, 0x1C); b43_phy_maskset(dev, B43_LPPHY_AFE_CTL, 0x00FF, 0x8800); b43_phy_maskset(dev, B43_LPPHY_AFE_ADC_CTL_1, 0xFC3C, 0x0400); } lpphy_save_dig_flt_state(dev); } static void lpphy_baseband_init(struct b43_wldev *dev) { lpphy_table_init(dev); if (dev->phy.rev >= 2) lpphy_baseband_rev2plus_init(dev); else lpphy_baseband_rev0_1_init(dev); } struct b2062_freqdata { u16 freq; u8 data[6]; }; /* Initialize the 2062 radio. */ static void lpphy_2062_init(struct b43_wldev *dev) { struct b43_phy_lp *lpphy = dev->phy.lp; struct ssb_bus *bus = dev->dev->sdev->bus; u32 crystalfreq, tmp, ref; unsigned int i; const struct b2062_freqdata *fd = NULL; static const struct b2062_freqdata freqdata_tab[] = { { .freq = 12000, .data[0] = 6, .data[1] = 6, .data[2] = 6, .data[3] = 6, .data[4] = 10, .data[5] = 6, }, { .freq = 13000, .data[0] = 4, .data[1] = 4, .data[2] = 4, .data[3] = 4, .data[4] = 11, .data[5] = 7, }, { .freq = 14400, .data[0] = 3, .data[1] = 3, .data[2] = 3, .data[3] = 3, .data[4] = 12, .data[5] = 7, }, { .freq = 16200, .data[0] = 3, .data[1] = 3, .data[2] = 3, .data[3] = 3, .data[4] = 13, .data[5] = 8, }, { .freq = 18000, .data[0] = 2, .data[1] = 2, .data[2] = 2, .data[3] = 2, .data[4] = 14, .data[5] = 8, }, { .freq = 19200, .data[0] = 1, .data[1] = 1, .data[2] = 1, .data[3] = 1, .data[4] = 14, .data[5] = 9, }, }; b2062_upload_init_table(dev); b43_radio_write(dev, B2062_N_TX_CTL3, 0); b43_radio_write(dev, B2062_N_TX_CTL4, 0); b43_radio_write(dev, B2062_N_TX_CTL5, 0); b43_radio_write(dev, B2062_N_TX_CTL6, 0); b43_radio_write(dev, B2062_N_PDN_CTL0, 0x40); b43_radio_write(dev, B2062_N_PDN_CTL0, 0); b43_radio_write(dev, B2062_N_CALIB_TS, 0x10); b43_radio_write(dev, B2062_N_CALIB_TS, 0); if (dev->phy.rev > 0) { b43_radio_write(dev, B2062_S_BG_CTL1, (b43_radio_read(dev, B2062_N_COMM2) >> 1) | 0x80); } if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) b43_radio_set(dev, B2062_N_TSSI_CTL0, 0x1); else b43_radio_mask(dev, B2062_N_TSSI_CTL0, ~0x1); /* Get the crystal freq, in Hz. */ crystalfreq = bus->chipco.pmu.crystalfreq * 1000; B43_WARN_ON(!(bus->chipco.capabilities & SSB_CHIPCO_CAP_PMU)); B43_WARN_ON(crystalfreq == 0); if (crystalfreq <= 30000000) { lpphy->pdiv = 1; b43_radio_mask(dev, B2062_S_RFPLL_CTL1, 0xFFFB); } else { lpphy->pdiv = 2; b43_radio_set(dev, B2062_S_RFPLL_CTL1, 0x4); } tmp = (((800000000 * lpphy->pdiv + crystalfreq) / (2 * crystalfreq)) - 8) & 0xFF; b43_radio_write(dev, B2062_S_RFPLL_CTL7, tmp); tmp = (((100 * crystalfreq + 16000000 * lpphy->pdiv) / (32000000 * lpphy->pdiv)) - 1) & 0xFF; b43_radio_write(dev, B2062_S_RFPLL_CTL18, tmp); tmp = (((2 * crystalfreq + 1000000 * lpphy->pdiv) / (2000000 * lpphy->pdiv)) - 1) & 0xFF; b43_radio_write(dev, B2062_S_RFPLL_CTL19, tmp); ref = (1000 * lpphy->pdiv + 2 * crystalfreq) / (2000 * lpphy->pdiv); ref &= 0xFFFF; for (i = 0; i < ARRAY_SIZE(freqdata_tab); i++) { if (ref < freqdata_tab[i].freq) { fd = &freqdata_tab[i]; break; } } if (!fd) fd = &freqdata_tab[ARRAY_SIZE(freqdata_tab) - 1]; b43dbg(dev->wl, "b2062: Using crystal tab entry %u kHz.\n", fd->freq); /* FIXME: Keep this printk until the code is fully debugged. */ b43_radio_write(dev, B2062_S_RFPLL_CTL8, ((u16)(fd->data[1]) << 4) | fd->data[0]); b43_radio_write(dev, B2062_S_RFPLL_CTL9, ((u16)(fd->data[3]) << 4) | fd->data[2]); b43_radio_write(dev, B2062_S_RFPLL_CTL10, fd->data[4]); b43_radio_write(dev, B2062_S_RFPLL_CTL11, fd->data[5]); } /* Initialize the 2063 radio. */ static void lpphy_2063_init(struct b43_wldev *dev) { b2063_upload_init_table(dev); b43_radio_write(dev, B2063_LOGEN_SP5, 0); b43_radio_set(dev, B2063_COMM8, 0x38); b43_radio_write(dev, B2063_REG_SP1, 0x56); b43_radio_mask(dev, B2063_RX_BB_CTL2, ~0x2); b43_radio_write(dev, B2063_PA_SP7, 0); b43_radio_write(dev, B2063_TX_RF_SP6, 0x20); b43_radio_write(dev, B2063_TX_RF_SP9, 0x40); if (dev->phy.rev == 2) { b43_radio_write(dev, B2063_PA_SP3, 0xa0); b43_radio_write(dev, B2063_PA_SP4, 0xa0); b43_radio_write(dev, B2063_PA_SP2, 0x18); } else { b43_radio_write(dev, B2063_PA_SP3, 0x20); b43_radio_write(dev, B2063_PA_SP2, 0x20); } } struct lpphy_stx_table_entry { u16 phy_offset; u16 phy_shift; u16 rf_addr; u16 rf_shift; u16 mask; }; static const struct lpphy_stx_table_entry lpphy_stx_table[] = { { .phy_offset = 2, .phy_shift = 6, .rf_addr = 0x3d, .rf_shift = 3, .mask = 0x01, }, { .phy_offset = 1, .phy_shift = 12, .rf_addr = 0x4c, .rf_shift = 1, .mask = 0x01, }, { .phy_offset = 1, .phy_shift = 8, .rf_addr = 0x50, .rf_shift = 0, .mask = 0x7f, }, { .phy_offset = 0, .phy_shift = 8, .rf_addr = 0x44, .rf_shift = 0, .mask = 0xff, }, { .phy_offset = 1, .phy_shift = 0, .rf_addr = 0x4a, .rf_shift = 0, .mask = 0xff, }, { .phy_offset = 0, .phy_shift = 4, .rf_addr = 0x4d, .rf_shift = 0, .mask = 0xff, }, { .phy_offset = 1, .phy_shift = 4, .rf_addr = 0x4e, .rf_shift = 0, .mask = 0xff, }, { .phy_offset = 0, .phy_shift = 12, .rf_addr = 0x4f, .rf_shift = 0, .mask = 0x0f, }, { .phy_offset = 1, .phy_shift = 0, .rf_addr = 0x4f, .rf_shift = 4, .mask = 0x0f, }, { .phy_offset = 3, .phy_shift = 0, .rf_addr = 0x49, .rf_shift = 0, .mask = 0x0f, }, { .phy_offset = 4, .phy_shift = 3, .rf_addr = 0x46, .rf_shift = 4, .mask = 0x07, }, { .phy_offset = 3, .phy_shift = 15, .rf_addr = 0x46, .rf_shift = 0, .mask = 0x01, }, { .phy_offset = 4, .phy_shift = 0, .rf_addr = 0x46, .rf_shift = 1, .mask = 0x07, }, { .phy_offset = 3, .phy_shift = 8, .rf_addr = 0x48, .rf_shift = 4, .mask = 0x07, }, { .phy_offset = 3, .phy_shift = 11, .rf_addr = 0x48, .rf_shift = 0, .mask = 0x0f, }, { .phy_offset = 3, .phy_shift = 4, .rf_addr = 0x49, .rf_shift = 4, .mask = 0x0f, }, { .phy_offset = 2, .phy_shift = 15, .rf_addr = 0x45, .rf_shift = 0, .mask = 0x01, }, { .phy_offset = 5, .phy_shift = 13, .rf_addr = 0x52, .rf_shift = 4, .mask = 0x07, }, { .phy_offset = 6, .phy_shift = 0, .rf_addr = 0x52, .rf_shift = 7, .mask = 0x01, }, { .phy_offset = 5, .phy_shift = 3, .rf_addr = 0x41, .rf_shift = 5, .mask = 0x07, }, { .phy_offset = 5, .phy_shift = 6, .rf_addr = 0x41, .rf_shift = 0, .mask = 0x0f, }, { .phy_offset = 5, .phy_shift = 10, .rf_addr = 0x42, .rf_shift = 5, .mask = 0x07, }, { .phy_offset = 4, .phy_shift = 15, .rf_addr = 0x42, .rf_shift = 0, .mask = 0x01, }, { .phy_offset = 5, .phy_shift = 0, .rf_addr = 0x42, .rf_shift = 1, .mask = 0x07, }, { .phy_offset = 4, .phy_shift = 11, .rf_addr = 0x43, .rf_shift = 4, .mask = 0x0f, }, { .phy_offset = 4, .phy_shift = 7, .rf_addr = 0x43, .rf_shift = 0, .mask = 0x0f, }, { .phy_offset = 4, .phy_shift = 6, .rf_addr = 0x45, .rf_shift = 1, .mask = 0x01, }, { .phy_offset = 2, .phy_shift = 7, .rf_addr = 0x40, .rf_shift = 4, .mask = 0x0f, }, { .phy_offset = 2, .phy_shift = 11, .rf_addr = 0x40, .rf_shift = 0, .mask = 0x0f, }, }; static void lpphy_sync_stx(struct b43_wldev *dev) { const struct lpphy_stx_table_entry *e; unsigned int i; u16 tmp; for (i = 0; i < ARRAY_SIZE(lpphy_stx_table); i++) { e = &lpphy_stx_table[i]; tmp = b43_radio_read(dev, e->rf_addr); tmp >>= e->rf_shift; tmp <<= e->phy_shift; b43_phy_maskset(dev, B43_PHY_OFDM(0xF2 + e->phy_offset), ~(e->mask << e->phy_shift), tmp); } } static void lpphy_radio_init(struct b43_wldev *dev) { /* The radio is attached through the 4wire bus. */ b43_phy_set(dev, B43_LPPHY_FOURWIRE_CTL, 0x2); udelay(1); b43_phy_mask(dev, B43_LPPHY_FOURWIRE_CTL, 0xFFFD); udelay(1); if (dev->phy.radio_ver == 0x2062) { lpphy_2062_init(dev); } else { lpphy_2063_init(dev); lpphy_sync_stx(dev); b43_phy_write(dev, B43_PHY_OFDM(0xF0), 0x5F80); b43_phy_write(dev, B43_PHY_OFDM(0xF1), 0); if (dev->dev->chip_id == 0x4325) { // TODO SSB PMU recalibration } } } struct lpphy_iq_est { u32 iq_prod, i_pwr, q_pwr; }; static void lpphy_set_rc_cap(struct b43_wldev *dev) { struct b43_phy_lp *lpphy = dev->phy.lp; u8 rc_cap = (lpphy->rc_cap & 0x1F) >> 1; if (dev->phy.rev == 1) //FIXME check channel 14! rc_cap = min_t(u8, rc_cap + 5, 15); b43_radio_write(dev, B2062_N_RXBB_CALIB2, max_t(u8, lpphy->rc_cap - 4, 0x80)); b43_radio_write(dev, B2062_N_TX_CTL_A, rc_cap | 0x80); b43_radio_write(dev, B2062_S_RXG_CNT16, ((lpphy->rc_cap & 0x1F) >> 2) | 0x80); } static u8 lpphy_get_bb_mult(struct b43_wldev *dev) { return (b43_lptab_read(dev, B43_LPTAB16(0, 87)) & 0xFF00) >> 8; } static void lpphy_set_bb_mult(struct b43_wldev *dev, u8 bb_mult) { b43_lptab_write(dev, B43_LPTAB16(0, 87), (u16)bb_mult << 8); } static void lpphy_set_deaf(struct b43_wldev *dev, bool user) { struct b43_phy_lp *lpphy = dev->phy.lp; if (user) lpphy->crs_usr_disable = true; else lpphy->crs_sys_disable = true; b43_phy_maskset(dev, B43_LPPHY_CRSGAIN_CTL, 0xFF1F, 0x80); } static void lpphy_clear_deaf(struct b43_wldev *dev, bool user) { struct b43_phy_lp *lpphy = dev->phy.lp; if (user) lpphy->crs_usr_disable = false; else lpphy->crs_sys_disable = false; if (!lpphy->crs_usr_disable && !lpphy->crs_sys_disable) { if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) b43_phy_maskset(dev, B43_LPPHY_CRSGAIN_CTL, 0xFF1F, 0x60); else b43_phy_maskset(dev, B43_LPPHY_CRSGAIN_CTL, 0xFF1F, 0x20); } } static void lpphy_set_trsw_over(struct b43_wldev *dev, bool tx, bool rx) { u16 trsw = (tx << 1) | rx; b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0xFFFC, trsw); b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x3); } static void lpphy_disable_crs(struct b43_wldev *dev, bool user) { lpphy_set_deaf(dev, user); lpphy_set_trsw_over(dev, false, true); b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0xFFFB); b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x4); b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0xFFF7); b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x8); b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0x10); b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x10); b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0xFFDF); b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x20); b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0xFFBF); b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x40); b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2_VAL, 0x7); b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2_VAL, 0x38); b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2_VAL, 0xFF3F); b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2_VAL, 0x100); b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2_VAL, 0xFDFF); b43_phy_write(dev, B43_LPPHY_PS_CTL_OVERRIDE_VAL0, 0); b43_phy_write(dev, B43_LPPHY_PS_CTL_OVERRIDE_VAL1, 1); b43_phy_write(dev, B43_LPPHY_PS_CTL_OVERRIDE_VAL2, 0x20); b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2_VAL, 0xFBFF); b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2_VAL, 0xF7FF); b43_phy_write(dev, B43_LPPHY_TX_GAIN_CTL_OVERRIDE_VAL, 0); b43_phy_write(dev, B43_LPPHY_RX_GAIN_CTL_OVERRIDE_VAL, 0x45AF); b43_phy_write(dev, B43_LPPHY_RF_OVERRIDE_2, 0x3FF); } static void lpphy_restore_crs(struct b43_wldev *dev, bool user) { lpphy_clear_deaf(dev, user); b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xFF80); b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFC00); } struct lpphy_tx_gains { u16 gm, pga, pad, dac; }; static void lpphy_disable_rx_gain_override(struct b43_wldev *dev) { b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xFFFE); b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xFFEF); b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xFFBF); if (dev->phy.rev >= 2) { b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFEFF); if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFBFF); b43_phy_mask(dev, B43_PHY_OFDM(0xE5), 0xFFF7); } } else { b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFDFF); } } static void lpphy_enable_rx_gain_override(struct b43_wldev *dev) { b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x1); b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x10); b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x40); if (dev->phy.rev >= 2) { b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x100); if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x400); b43_phy_set(dev, B43_PHY_OFDM(0xE5), 0x8); } } else { b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x200); } } static void lpphy_disable_tx_gain_override(struct b43_wldev *dev) { if (dev->phy.rev < 2) b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFEFF); else { b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFF7F); b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xBFFF); } b43_phy_mask(dev, B43_LPPHY_AFE_CTL_OVR, 0xFFBF); } static void lpphy_enable_tx_gain_override(struct b43_wldev *dev) { if (dev->phy.rev < 2) b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x100); else { b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x80); b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x4000); } b43_phy_set(dev, B43_LPPHY_AFE_CTL_OVR, 0x40); } static struct lpphy_tx_gains lpphy_get_tx_gains(struct b43_wldev *dev) { struct lpphy_tx_gains gains; u16 tmp; gains.dac = (b43_phy_read(dev, B43_LPPHY_AFE_DAC_CTL) & 0x380) >> 7; if (dev->phy.rev < 2) { tmp = b43_phy_read(dev, B43_LPPHY_TX_GAIN_CTL_OVERRIDE_VAL) & 0x7FF; gains.gm = tmp & 0x0007; gains.pga = (tmp & 0x0078) >> 3; gains.pad = (tmp & 0x780) >> 7; } else { tmp = b43_phy_read(dev, B43_LPPHY_TX_GAIN_CTL_OVERRIDE_VAL); gains.pad = b43_phy_read(dev, B43_PHY_OFDM(0xFB)) & 0xFF; gains.gm = tmp & 0xFF; gains.pga = (tmp >> 8) & 0xFF; } return gains; } static void lpphy_set_dac_gain(struct b43_wldev *dev, u16 dac) { u16 ctl = b43_phy_read(dev, B43_LPPHY_AFE_DAC_CTL) & 0xC7F; ctl |= dac << 7; b43_phy_maskset(dev, B43_LPPHY_AFE_DAC_CTL, 0xF000, ctl); } static u16 lpphy_get_pa_gain(struct b43_wldev *dev) { return b43_phy_read(dev, B43_PHY_OFDM(0xFB)) & 0x7F; } static void lpphy_set_pa_gain(struct b43_wldev *dev, u16 gain) { b43_phy_maskset(dev, B43_PHY_OFDM(0xFB), 0xE03F, gain << 6); b43_phy_maskset(dev, B43_PHY_OFDM(0xFD), 0x80FF, gain << 8); } static void lpphy_set_tx_gains(struct b43_wldev *dev, struct lpphy_tx_gains gains) { u16 rf_gain, pa_gain; if (dev->phy.rev < 2) { rf_gain = (gains.pad << 7) | (gains.pga << 3) | gains.gm; b43_phy_maskset(dev, B43_LPPHY_TX_GAIN_CTL_OVERRIDE_VAL, 0xF800, rf_gain); } else { pa_gain = lpphy_get_pa_gain(dev); b43_phy_write(dev, B43_LPPHY_TX_GAIN_CTL_OVERRIDE_VAL, (gains.pga << 8) | gains.gm); /* * SPEC FIXME The spec calls for (pa_gain << 8) here, but that * conflicts with the spec for set_pa_gain! Vendor driver bug? */ b43_phy_maskset(dev, B43_PHY_OFDM(0xFB), 0x8000, gains.pad | (pa_gain << 6)); b43_phy_write(dev, B43_PHY_OFDM(0xFC), (gains.pga << 8) | gains.gm); b43_phy_maskset(dev, B43_PHY_OFDM(0xFD), 0x8000, gains.pad | (pa_gain << 8)); } lpphy_set_dac_gain(dev, gains.dac); lpphy_enable_tx_gain_override(dev); } static void lpphy_rev0_1_set_rx_gain(struct b43_wldev *dev, u32 gain) { u16 trsw = gain & 0x1; u16 lna = (gain & 0xFFFC) | ((gain & 0xC) >> 2); u16 ext_lna = (gain & 2) >> 1; b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0xFFFE, trsw); b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_2_VAL, 0xFBFF, ext_lna << 10); b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_2_VAL, 0xF7FF, ext_lna << 11); b43_phy_write(dev, B43_LPPHY_RX_GAIN_CTL_OVERRIDE_VAL, lna); } static void lpphy_rev2plus_set_rx_gain(struct b43_wldev *dev, u32 gain) { u16 low_gain = gain & 0xFFFF; u16 high_gain = (gain >> 16) & 0xF; u16 ext_lna = (gain >> 21) & 0x1; u16 trsw = ~(gain >> 20) & 0x1; u16 tmp; b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0xFFFE, trsw); b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_2_VAL, 0xFDFF, ext_lna << 9); b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_2_VAL, 0xFBFF, ext_lna << 10); b43_phy_write(dev, B43_LPPHY_RX_GAIN_CTL_OVERRIDE_VAL, low_gain); b43_phy_maskset(dev, B43_LPPHY_AFE_DDFS, 0xFFF0, high_gain); if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { tmp = (gain >> 2) & 0x3; b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_2_VAL, 0xE7FF, tmp<<11); b43_phy_maskset(dev, B43_PHY_OFDM(0xE6), 0xFFE7, tmp << 3); } } static void lpphy_set_rx_gain(struct b43_wldev *dev, u32 gain) { if (dev->phy.rev < 2) lpphy_rev0_1_set_rx_gain(dev, gain); else lpphy_rev2plus_set_rx_gain(dev, gain); lpphy_enable_rx_gain_override(dev); } static void lpphy_set_rx_gain_by_index(struct b43_wldev *dev, u16 idx) { u32 gain = b43_lptab_read(dev, B43_LPTAB16(12, idx)); lpphy_set_rx_gain(dev, gain); } static void lpphy_stop_ddfs(struct b43_wldev *dev) { b43_phy_mask(dev, B43_LPPHY_AFE_DDFS, 0xFFFD); b43_phy_mask(dev, B43_LPPHY_LP_PHY_CTL, 0xFFDF); } static void lpphy_run_ddfs(struct b43_wldev *dev, int i_on, int q_on, int incr1, int incr2, int scale_idx) { lpphy_stop_ddfs(dev); b43_phy_mask(dev, B43_LPPHY_AFE_DDFS_POINTER_INIT, 0xFF80); b43_phy_mask(dev, B43_LPPHY_AFE_DDFS_POINTER_INIT, 0x80FF); b43_phy_maskset(dev, B43_LPPHY_AFE_DDFS_INCR_INIT, 0xFF80, incr1); b43_phy_maskset(dev, B43_LPPHY_AFE_DDFS_INCR_INIT, 0x80FF, incr2 << 8); b43_phy_maskset(dev, B43_LPPHY_AFE_DDFS, 0xFFF7, i_on << 3); b43_phy_maskset(dev, B43_LPPHY_AFE_DDFS, 0xFFEF, q_on << 4); b43_phy_maskset(dev, B43_LPPHY_AFE_DDFS, 0xFF9F, scale_idx << 5); b43_phy_mask(dev, B43_LPPHY_AFE_DDFS, 0xFFFB); b43_phy_set(dev, B43_LPPHY_AFE_DDFS, 0x2); b43_phy_set(dev, B43_LPPHY_LP_PHY_CTL, 0x20); } static bool lpphy_rx_iq_est(struct b43_wldev *dev, u16 samples, u8 time, struct lpphy_iq_est *iq_est) { int i; b43_phy_mask(dev, B43_LPPHY_CRSGAIN_CTL, 0xFFF7); b43_phy_write(dev, B43_LPPHY_IQ_NUM_SMPLS_ADDR, samples); b43_phy_maskset(dev, B43_LPPHY_IQ_ENABLE_WAIT_TIME_ADDR, 0xFF00, time); b43_phy_mask(dev, B43_LPPHY_IQ_ENABLE_WAIT_TIME_ADDR, 0xFEFF); b43_phy_set(dev, B43_LPPHY_IQ_ENABLE_WAIT_TIME_ADDR, 0x200); for (i = 0; i < 500; i++) { if (!(b43_phy_read(dev, B43_LPPHY_IQ_ENABLE_WAIT_TIME_ADDR) & 0x200)) break; msleep(1); } if ((b43_phy_read(dev, B43_LPPHY_IQ_ENABLE_WAIT_TIME_ADDR) & 0x200)) { b43_phy_set(dev, B43_LPPHY_CRSGAIN_CTL, 0x8); return false; } iq_est->iq_prod = b43_phy_read(dev, B43_LPPHY_IQ_ACC_HI_ADDR); iq_est->iq_prod <<= 16; iq_est->iq_prod |= b43_phy_read(dev, B43_LPPHY_IQ_ACC_LO_ADDR); iq_est->i_pwr = b43_phy_read(dev, B43_LPPHY_IQ_I_PWR_ACC_HI_ADDR); iq_est->i_pwr <<= 16; iq_est->i_pwr |= b43_phy_read(dev, B43_LPPHY_IQ_I_PWR_ACC_LO_ADDR); iq_est->q_pwr = b43_phy_read(dev, B43_LPPHY_IQ_Q_PWR_ACC_HI_ADDR); iq_est->q_pwr <<= 16; iq_est->q_pwr |= b43_phy_read(dev, B43_LPPHY_IQ_Q_PWR_ACC_LO_ADDR); b43_phy_set(dev, B43_LPPHY_CRSGAIN_CTL, 0x8); return true; } static int lpphy_loopback(struct b43_wldev *dev) { struct lpphy_iq_est iq_est; int i, index = -1; u32 tmp; memset(&iq_est, 0, sizeof(iq_est)); lpphy_set_trsw_over(dev, true, true); b43_phy_set(dev, B43_LPPHY_AFE_CTL_OVR, 1); b43_phy_mask(dev, B43_LPPHY_AFE_CTL_OVRVAL, 0xFFFE); b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x800); b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0x800); b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x8); b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0x8); b43_radio_write(dev, B2062_N_TX_CTL_A, 0x80); b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x80); b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0x80); for (i = 0; i < 32; i++) { lpphy_set_rx_gain_by_index(dev, i); lpphy_run_ddfs(dev, 1, 1, 5, 5, 0); if (!(lpphy_rx_iq_est(dev, 1000, 32, &iq_est))) continue; tmp = (iq_est.i_pwr + iq_est.q_pwr) / 1000; if ((tmp > 4000) && (tmp < 10000)) { index = i; break; } } lpphy_stop_ddfs(dev); return index; } /* Fixed-point division algorithm using only integer math. */ static u32 lpphy_qdiv_roundup(u32 dividend, u32 divisor, u8 precision) { u32 quotient, remainder; if (divisor == 0) return 0; quotient = dividend / divisor; remainder = dividend % divisor; while (precision > 0) { quotient <<= 1; if (remainder << 1 >= divisor) { quotient++; remainder = (remainder << 1) - divisor; } precision--; } if (remainder << 1 >= divisor) quotient++; return quotient; } /* Read the TX power control mode from hardware. */ static void lpphy_read_tx_pctl_mode_from_hardware(struct b43_wldev *dev) { struct b43_phy_lp *lpphy = dev->phy.lp; u16 ctl; ctl = b43_phy_read(dev, B43_LPPHY_TX_PWR_CTL_CMD); switch (ctl & B43_LPPHY_TX_PWR_CTL_CMD_MODE) { case B43_LPPHY_TX_PWR_CTL_CMD_MODE_OFF: lpphy->txpctl_mode = B43_LPPHY_TXPCTL_OFF; break; case B43_LPPHY_TX_PWR_CTL_CMD_MODE_SW: lpphy->txpctl_mode = B43_LPPHY_TXPCTL_SW; break; case B43_LPPHY_TX_PWR_CTL_CMD_MODE_HW: lpphy->txpctl_mode = B43_LPPHY_TXPCTL_HW; break; default: lpphy->txpctl_mode = B43_LPPHY_TXPCTL_UNKNOWN; B43_WARN_ON(1); break; } } /* Set the TX power control mode in hardware. */ static void lpphy_write_tx_pctl_mode_to_hardware(struct b43_wldev *dev) { struct b43_phy_lp *lpphy = dev->phy.lp; u16 ctl; switch (lpphy->txpctl_mode) { case B43_LPPHY_TXPCTL_OFF: ctl = B43_LPPHY_TX_PWR_CTL_CMD_MODE_OFF; break; case B43_LPPHY_TXPCTL_HW: ctl = B43_LPPHY_TX_PWR_CTL_CMD_MODE_HW; break; case B43_LPPHY_TXPCTL_SW: ctl = B43_LPPHY_TX_PWR_CTL_CMD_MODE_SW; break; default: ctl = 0; B43_WARN_ON(1); } b43_phy_maskset(dev, B43_LPPHY_TX_PWR_CTL_CMD, ~B43_LPPHY_TX_PWR_CTL_CMD_MODE & 0xFFFF, ctl); } static void lpphy_set_tx_power_control(struct b43_wldev *dev, enum b43_lpphy_txpctl_mode mode) { struct b43_phy_lp *lpphy = dev->phy.lp; enum b43_lpphy_txpctl_mode oldmode; lpphy_read_tx_pctl_mode_from_hardware(dev); oldmode = lpphy->txpctl_mode; if (oldmode == mode) return; lpphy->txpctl_mode = mode; if (oldmode == B43_LPPHY_TXPCTL_HW) { //TODO Update TX Power NPT //TODO Clear all TX Power offsets } else { if (mode == B43_LPPHY_TXPCTL_HW) { //TODO Recalculate target TX power b43_phy_maskset(dev, B43_LPPHY_TX_PWR_CTL_CMD, 0xFF80, lpphy->tssi_idx); b43_phy_maskset(dev, B43_LPPHY_TX_PWR_CTL_NNUM, 0x8FFF, ((u16)lpphy->tssi_npt << 16)); //TODO Set "TSSI Transmit Count" variable to total transmitted frame count lpphy_disable_tx_gain_override(dev); lpphy->tx_pwr_idx_over = -1; } } if (dev->phy.rev >= 2) { if (mode == B43_LPPHY_TXPCTL_HW) b43_phy_set(dev, B43_PHY_OFDM(0xD0), 0x2); else b43_phy_mask(dev, B43_PHY_OFDM(0xD0), 0xFFFD); } lpphy_write_tx_pctl_mode_to_hardware(dev); } static int b43_lpphy_op_switch_channel(struct b43_wldev *dev, unsigned int new_channel); static void lpphy_rev0_1_rc_calib(struct b43_wldev *dev) { struct b43_phy_lp *lpphy = dev->phy.lp; struct lpphy_iq_est iq_est; struct lpphy_tx_gains tx_gains; static const u32 ideal_pwr_table[21] = { 0x10000, 0x10557, 0x10e2d, 0x113e0, 0x10f22, 0x0ff64, 0x0eda2, 0x0e5d4, 0x0efd1, 0x0fbe8, 0x0b7b8, 0x04b35, 0x01a5e, 0x00a0b, 0x00444, 0x001fd, 0x000ff, 0x00088, 0x0004c, 0x0002c, 0x0001a, }; bool old_txg_ovr; u8 old_bbmult; u16 old_rf_ovr, old_rf_ovrval, old_afe_ovr, old_afe_ovrval, old_rf2_ovr, old_rf2_ovrval, old_phy_ctl; enum b43_lpphy_txpctl_mode old_txpctl; u32 normal_pwr, ideal_pwr, mean_sq_pwr, tmp = 0, mean_sq_pwr_min = 0; int loopback, i, j, inner_sum, err; memset(&iq_est, 0, sizeof(iq_est)); err = b43_lpphy_op_switch_channel(dev, 7); if (err) { b43dbg(dev->wl, "RC calib: Failed to switch to channel 7, error = %d\n", err); } old_txg_ovr = !!(b43_phy_read(dev, B43_LPPHY_AFE_CTL_OVR) & 0x40); old_bbmult = lpphy_get_bb_mult(dev); if (old_txg_ovr) tx_gains = lpphy_get_tx_gains(dev); old_rf_ovr = b43_phy_read(dev, B43_LPPHY_RF_OVERRIDE_0); old_rf_ovrval = b43_phy_read(dev, B43_LPPHY_RF_OVERRIDE_VAL_0); old_afe_ovr = b43_phy_read(dev, B43_LPPHY_AFE_CTL_OVR); old_afe_ovrval = b43_phy_read(dev, B43_LPPHY_AFE_CTL_OVRVAL); old_rf2_ovr = b43_phy_read(dev, B43_LPPHY_RF_OVERRIDE_2); old_rf2_ovrval = b43_phy_read(dev, B43_LPPHY_RF_OVERRIDE_2_VAL); old_phy_ctl = b43_phy_read(dev, B43_LPPHY_LP_PHY_CTL); lpphy_read_tx_pctl_mode_from_hardware(dev); old_txpctl = lpphy->txpctl_mode; lpphy_set_tx_power_control(dev, B43_LPPHY_TXPCTL_OFF); lpphy_disable_crs(dev, true); loopback = lpphy_loopback(dev); if (loopback == -1) goto finish; lpphy_set_rx_gain_by_index(dev, loopback); b43_phy_maskset(dev, B43_LPPHY_LP_PHY_CTL, 0xFFBF, 0x40); b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_2_VAL, 0xFFF8, 0x1); b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_2_VAL, 0xFFC7, 0x8); b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_2_VAL, 0xFF3F, 0xC0); for (i = 128; i <= 159; i++) { b43_radio_write(dev, B2062_N_RXBB_CALIB2, i); inner_sum = 0; for (j = 5; j <= 25; j++) { lpphy_run_ddfs(dev, 1, 1, j, j, 0); if (!(lpphy_rx_iq_est(dev, 1000, 32, &iq_est))) goto finish; mean_sq_pwr = iq_est.i_pwr + iq_est.q_pwr; if (j == 5) tmp = mean_sq_pwr; ideal_pwr = ((ideal_pwr_table[j-5] >> 3) + 1) >> 1; normal_pwr = lpphy_qdiv_roundup(mean_sq_pwr, tmp, 12); mean_sq_pwr = ideal_pwr - normal_pwr; mean_sq_pwr *= mean_sq_pwr; inner_sum += mean_sq_pwr; if ((i == 128) || (inner_sum < mean_sq_pwr_min)) { lpphy->rc_cap = i; mean_sq_pwr_min = inner_sum; } } } lpphy_stop_ddfs(dev); finish: lpphy_restore_crs(dev, true); b43_phy_write(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, old_rf_ovrval); b43_phy_write(dev, B43_LPPHY_RF_OVERRIDE_0, old_rf_ovr); b43_phy_write(dev, B43_LPPHY_AFE_CTL_OVRVAL, old_afe_ovrval); b43_phy_write(dev, B43_LPPHY_AFE_CTL_OVR, old_afe_ovr); b43_phy_write(dev, B43_LPPHY_RF_OVERRIDE_2_VAL, old_rf2_ovrval); b43_phy_write(dev, B43_LPPHY_RF_OVERRIDE_2, old_rf2_ovr); b43_phy_write(dev, B43_LPPHY_LP_PHY_CTL, old_phy_ctl); lpphy_set_bb_mult(dev, old_bbmult); if (old_txg_ovr) { /* * SPEC FIXME: The specs say "get_tx_gains" here, which is * illogical. According to lwfinger, vendor driver v4.150.10.5 * has a Set here, while v4.174.64.19 has a Get - regression in * the vendor driver? This should be tested this once the code * is testable. */ lpphy_set_tx_gains(dev, tx_gains); } lpphy_set_tx_power_control(dev, old_txpctl); if (lpphy->rc_cap) lpphy_set_rc_cap(dev); } static void lpphy_rev2plus_rc_calib(struct b43_wldev *dev) { struct ssb_bus *bus = dev->dev->sdev->bus; u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000; u8 tmp = b43_radio_read(dev, B2063_RX_BB_SP8) & 0xFF; int i; b43_radio_write(dev, B2063_RX_BB_SP8, 0x0); b43_radio_write(dev, B2063_RC_CALIB_CTL1, 0x7E); b43_radio_mask(dev, B2063_PLL_SP1, 0xF7); b43_radio_write(dev, B2063_RC_CALIB_CTL1, 0x7C); b43_radio_write(dev, B2063_RC_CALIB_CTL2, 0x15); b43_radio_write(dev, B2063_RC_CALIB_CTL3, 0x70); b43_radio_write(dev, B2063_RC_CALIB_CTL4, 0x52); b43_radio_write(dev, B2063_RC_CALIB_CTL5, 0x1); b43_radio_write(dev, B2063_RC_CALIB_CTL1, 0x7D); for (i = 0; i < 10000; i++) { if (b43_radio_read(dev, B2063_RC_CALIB_CTL6) & 0x2) break; msleep(1); } if (!(b43_radio_read(dev, B2063_RC_CALIB_CTL6) & 0x2)) b43_radio_write(dev, B2063_RX_BB_SP8, tmp); tmp = b43_radio_read(dev, B2063_TX_BB_SP3) & 0xFF; b43_radio_write(dev, B2063_TX_BB_SP3, 0x0); b43_radio_write(dev, B2063_RC_CALIB_CTL1, 0x7E); b43_radio_write(dev, B2063_RC_CALIB_CTL1, 0x7C); b43_radio_write(dev, B2063_RC_CALIB_CTL2, 0x55); b43_radio_write(dev, B2063_RC_CALIB_CTL3, 0x76); if (crystal_freq == 24000000) { b43_radio_write(dev, B2063_RC_CALIB_CTL4, 0xFC); b43_radio_write(dev, B2063_RC_CALIB_CTL5, 0x0); } else { b43_radio_write(dev, B2063_RC_CALIB_CTL4, 0x13); b43_radio_write(dev, B2063_RC_CALIB_CTL5, 0x1); } b43_radio_write(dev, B2063_PA_SP7, 0x7D); for (i = 0; i < 10000; i++) { if (b43_radio_read(dev, B2063_RC_CALIB_CTL6) & 0x2) break; msleep(1); } if (!(b43_radio_read(dev, B2063_RC_CALIB_CTL6) & 0x2)) b43_radio_write(dev, B2063_TX_BB_SP3, tmp); b43_radio_write(dev, B2063_RC_CALIB_CTL1, 0x7E); } static void lpphy_calibrate_rc(struct b43_wldev *dev) { struct b43_phy_lp *lpphy = dev->phy.lp; if (dev->phy.rev >= 2) { lpphy_rev2plus_rc_calib(dev); } else if (!lpphy->rc_cap) { if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) lpphy_rev0_1_rc_calib(dev); } else { lpphy_set_rc_cap(dev); } } static void b43_lpphy_op_set_rx_antenna(struct b43_wldev *dev, int antenna) { if (dev->phy.rev >= 2) return; // rev2+ doesn't support antenna diversity if (B43_WARN_ON(antenna > B43_ANTENNA_AUTO1)) return; b43_hf_write(dev, b43_hf_read(dev) & ~B43_HF_ANTDIVHELP); b43_phy_maskset(dev, B43_LPPHY_CRSGAIN_CTL, 0xFFFD, antenna & 0x2); b43_phy_maskset(dev, B43_LPPHY_CRSGAIN_CTL, 0xFFFE, antenna & 0x1); b43_hf_write(dev, b43_hf_read(dev) | B43_HF_ANTDIVHELP); dev->phy.lp->antenna = antenna; } static void lpphy_set_tx_iqcc(struct b43_wldev *dev, u16 a, u16 b) { u16 tmp[2]; tmp[0] = a; tmp[1] = b; b43_lptab_write_bulk(dev, B43_LPTAB16(0, 80), 2, tmp); } static void lpphy_set_tx_power_by_index(struct b43_wldev *dev, u8 index) { struct b43_phy_lp *lpphy = dev->phy.lp; struct lpphy_tx_gains gains; u32 iq_comp, tx_gain, coeff, rf_power; lpphy->tx_pwr_idx_over = index; lpphy_read_tx_pctl_mode_from_hardware(dev); if (lpphy->txpctl_mode != B43_LPPHY_TXPCTL_OFF) lpphy_set_tx_power_control(dev, B43_LPPHY_TXPCTL_SW); if (dev->phy.rev >= 2) { iq_comp = b43_lptab_read(dev, B43_LPTAB32(7, index + 320)); tx_gain = b43_lptab_read(dev, B43_LPTAB32(7, index + 192)); gains.pad = (tx_gain >> 16) & 0xFF; gains.gm = tx_gain & 0xFF; gains.pga = (tx_gain >> 8) & 0xFF; gains.dac = (iq_comp >> 28) & 0xFF; lpphy_set_tx_gains(dev, gains); } else { iq_comp = b43_lptab_read(dev, B43_LPTAB32(10, index + 320)); tx_gain = b43_lptab_read(dev, B43_LPTAB32(10, index + 192)); b43_phy_maskset(dev, B43_LPPHY_TX_GAIN_CTL_OVERRIDE_VAL, 0xF800, (tx_gain >> 4) & 0x7FFF); lpphy_set_dac_gain(dev, tx_gain & 0x7); lpphy_set_pa_gain(dev, (tx_gain >> 24) & 0x7F); } lpphy_set_bb_mult(dev, (iq_comp >> 20) & 0xFF); lpphy_set_tx_iqcc(dev, (iq_comp >> 10) & 0x3FF, iq_comp & 0x3FF); if (dev->phy.rev >= 2) { coeff = b43_lptab_read(dev, B43_LPTAB32(7, index + 448)); } else { coeff = b43_lptab_read(dev, B43_LPTAB32(10, index + 448)); } b43_lptab_write(dev, B43_LPTAB16(0, 85), coeff & 0xFFFF); if (dev->phy.rev >= 2) { rf_power = b43_lptab_read(dev, B43_LPTAB32(7, index + 576)); b43_phy_maskset(dev, B43_LPPHY_RF_PWR_OVERRIDE, 0xFF00, rf_power & 0xFFFF);//SPEC FIXME mask & set != 0 } lpphy_enable_tx_gain_override(dev); } static void lpphy_btcoex_override(struct b43_wldev *dev) { b43_write16(dev, B43_MMIO_BTCOEX_CTL, 0x3); b43_write16(dev, B43_MMIO_BTCOEX_TXCTL, 0xFF); } static void b43_lpphy_op_software_rfkill(struct b43_wldev *dev, bool blocked) { //TODO check MAC control register if (blocked) { if (dev->phy.rev >= 2) { b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0x83FF); b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x1F00); b43_phy_mask(dev, B43_LPPHY_AFE_DDFS, 0x80FF); b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2_VAL, 0xDFFF); b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x0808); } else { b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0xE0FF); b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x1F00); b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2_VAL, 0xFCFF); b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x0018); } } else { b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xE0FF); if (dev->phy.rev >= 2) b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xF7F7); else b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFFE7); } } /* This was previously called lpphy_japan_filter */ static void lpphy_set_analog_filter(struct b43_wldev *dev, int channel) { struct b43_phy_lp *lpphy = dev->phy.lp; u16 tmp = (channel == 14); //SPEC FIXME check japanwidefilter! if (dev->phy.rev < 2) { //SPEC FIXME Isn't this rev0/1-specific? b43_phy_maskset(dev, B43_LPPHY_LP_PHY_CTL, 0xFCFF, tmp << 9); if ((dev->phy.rev == 1) && (lpphy->rc_cap)) lpphy_set_rc_cap(dev); } else { b43_radio_write(dev, B2063_TX_BB_SP3, 0x3F); } } static void lpphy_set_tssi_mux(struct b43_wldev *dev, enum tssi_mux_mode mode) { if (mode != TSSI_MUX_EXT) { b43_radio_set(dev, B2063_PA_SP1, 0x2); b43_phy_set(dev, B43_PHY_OFDM(0xF3), 0x1000); b43_radio_write(dev, B2063_PA_CTL10, 0x51); if (mode == TSSI_MUX_POSTPA) { b43_radio_mask(dev, B2063_PA_SP1, 0xFFFE); b43_phy_mask(dev, B43_LPPHY_AFE_CTL_OVRVAL, 0xFFC7); } else { b43_radio_maskset(dev, B2063_PA_SP1, 0xFFFE, 0x1); b43_phy_maskset(dev, B43_LPPHY_AFE_CTL_OVRVAL, 0xFFC7, 0x20); } } else { B43_WARN_ON(1); } } static void lpphy_tx_pctl_init_hw(struct b43_wldev *dev) { u16 tmp; int i; //SPEC TODO Call LP PHY Clear TX Power offsets for (i = 0; i < 64; i++) { if (dev->phy.rev >= 2) b43_lptab_write(dev, B43_LPTAB32(7, i + 1), i); else b43_lptab_write(dev, B43_LPTAB32(10, i + 1), i); } b43_phy_maskset(dev, B43_LPPHY_TX_PWR_CTL_NNUM, 0xFF00, 0xFF); b43_phy_maskset(dev, B43_LPPHY_TX_PWR_CTL_NNUM, 0x8FFF, 0x5000); b43_phy_maskset(dev, B43_LPPHY_TX_PWR_CTL_IDLETSSI, 0xFFC0, 0x1F); if (dev->phy.rev < 2) { b43_phy_mask(dev, B43_LPPHY_LP_PHY_CTL, 0xEFFF); b43_phy_maskset(dev, B43_LPPHY_LP_PHY_CTL, 0xDFFF, 0x2000); } else { b43_phy_mask(dev, B43_PHY_OFDM(0x103), 0xFFFE); b43_phy_maskset(dev, B43_PHY_OFDM(0x103), 0xFFFB, 0x4); b43_phy_maskset(dev, B43_PHY_OFDM(0x103), 0xFFEF, 0x10); b43_radio_maskset(dev, B2063_IQ_CALIB_CTL2, 0xF3, 0x1); lpphy_set_tssi_mux(dev, TSSI_MUX_POSTPA); } b43_phy_maskset(dev, B43_LPPHY_TX_PWR_CTL_IDLETSSI, 0x7FFF, 0x8000); b43_phy_mask(dev, B43_LPPHY_TX_PWR_CTL_DELTAPWR_LIMIT, 0xFF); b43_phy_write(dev, B43_LPPHY_TX_PWR_CTL_DELTAPWR_LIMIT, 0xA); b43_phy_maskset(dev, B43_LPPHY_TX_PWR_CTL_CMD, ~B43_LPPHY_TX_PWR_CTL_CMD_MODE & 0xFFFF, B43_LPPHY_TX_PWR_CTL_CMD_MODE_OFF); b43_phy_mask(dev, B43_LPPHY_TX_PWR_CTL_NNUM, 0xF8FF); b43_phy_maskset(dev, B43_LPPHY_TX_PWR_CTL_CMD, ~B43_LPPHY_TX_PWR_CTL_CMD_MODE & 0xFFFF, B43_LPPHY_TX_PWR_CTL_CMD_MODE_SW); if (dev->phy.rev < 2) { b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_0, 0xEFFF, 0x1000); b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0xEFFF); } else { lpphy_set_tx_power_by_index(dev, 0x7F); } b43_dummy_transmission(dev, true, true); tmp = b43_phy_read(dev, B43_LPPHY_TX_PWR_CTL_STAT); if (tmp & 0x8000) { b43_phy_maskset(dev, B43_LPPHY_TX_PWR_CTL_IDLETSSI, 0xFFC0, (tmp & 0xFF) - 32); } b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xEFFF); // (SPEC?) TODO Set "Target TX frequency" variable to 0 // SPEC FIXME "Set BB Multiplier to 0xE000" impossible - bb_mult is u8! } static void lpphy_tx_pctl_init_sw(struct b43_wldev *dev) { struct lpphy_tx_gains gains; if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { gains.gm = 4; gains.pad = 12; gains.pga = 12; gains.dac = 0; } else { gains.gm = 7; gains.pad = 14; gains.pga = 15; gains.dac = 0; } lpphy_set_tx_gains(dev, gains); lpphy_set_bb_mult(dev, 150); } /* Initialize TX power control */ static void lpphy_tx_pctl_init(struct b43_wldev *dev) { if (0/*FIXME HWPCTL capable */) { lpphy_tx_pctl_init_hw(dev); } else { /* This device is only software TX power control capable. */ lpphy_tx_pctl_init_sw(dev); } } static void lpphy_pr41573_workaround(struct b43_wldev *dev) { struct b43_phy_lp *lpphy = dev->phy.lp; u32 *saved_tab; const unsigned int saved_tab_size = 256; enum b43_lpphy_txpctl_mode txpctl_mode; s8 tx_pwr_idx_over; u16 tssi_npt, tssi_idx; saved_tab = kcalloc(saved_tab_size, sizeof(saved_tab[0]), GFP_KERNEL); if (!saved_tab) { b43err(dev->wl, "PR41573 failed. Out of memory!\n"); return; } lpphy_read_tx_pctl_mode_from_hardware(dev); txpctl_mode = lpphy->txpctl_mode; tx_pwr_idx_over = lpphy->tx_pwr_idx_over; tssi_npt = lpphy->tssi_npt; tssi_idx = lpphy->tssi_idx; if (dev->phy.rev < 2) { b43_lptab_read_bulk(dev, B43_LPTAB32(10, 0x140), saved_tab_size, saved_tab); } else { b43_lptab_read_bulk(dev, B43_LPTAB32(7, 0x140), saved_tab_size, saved_tab); } //FIXME PHY reset lpphy_table_init(dev); //FIXME is table init needed? lpphy_baseband_init(dev); lpphy_tx_pctl_init(dev); b43_lpphy_op_software_rfkill(dev, false); lpphy_set_tx_power_control(dev, B43_LPPHY_TXPCTL_OFF); if (dev->phy.rev < 2) { b43_lptab_write_bulk(dev, B43_LPTAB32(10, 0x140), saved_tab_size, saved_tab); } else { b43_lptab_write_bulk(dev, B43_LPTAB32(7, 0x140), saved_tab_size, saved_tab); } b43_write16(dev, B43_MMIO_CHANNEL, lpphy->channel); lpphy->tssi_npt = tssi_npt; lpphy->tssi_idx = tssi_idx; lpphy_set_analog_filter(dev, lpphy->channel); if (tx_pwr_idx_over != -1) lpphy_set_tx_power_by_index(dev, tx_pwr_idx_over); if (lpphy->rc_cap) lpphy_set_rc_cap(dev); b43_lpphy_op_set_rx_antenna(dev, lpphy->antenna); lpphy_set_tx_power_control(dev, txpctl_mode); kfree(saved_tab); } struct lpphy_rx_iq_comp { u8 chan; s8 c1, c0; }; static const struct lpphy_rx_iq_comp lpphy_5354_iq_table[] = { { .chan = 1, .c1 = -66, .c0 = 15, }, { .chan = 2, .c1 = -66, .c0 = 15, }, { .chan = 3, .c1 = -66, .c0 = 15, }, { .chan = 4, .c1 = -66, .c0 = 15, }, { .chan = 5, .c1 = -66, .c0 = 15, }, { .chan = 6, .c1 = -66, .c0 = 15, }, { .chan = 7, .c1 = -66, .c0 = 14, }, { .chan = 8, .c1 = -66, .c0 = 14, }, { .chan = 9, .c1 = -66, .c0 = 14, }, { .chan = 10, .c1 = -66, .c0 = 14, }, { .chan = 11, .c1 = -66, .c0 = 14, }, { .chan = 12, .c1 = -66, .c0 = 13, }, { .chan = 13, .c1 = -66, .c0 = 13, }, { .chan = 14, .c1 = -66, .c0 = 13, }, }; static const struct lpphy_rx_iq_comp lpphy_rev0_1_iq_table[] = { { .chan = 1, .c1 = -64, .c0 = 13, }, { .chan = 2, .c1 = -64, .c0 = 13, }, { .chan = 3, .c1 = -64, .c0 = 13, }, { .chan = 4, .c1 = -64, .c0 = 13, }, { .chan = 5, .c1 = -64, .c0 = 12, }, { .chan = 6, .c1 = -64, .c0 = 12, }, { .chan = 7, .c1 = -64, .c0 = 12, }, { .chan = 8, .c1 = -64, .c0 = 12, }, { .chan = 9, .c1 = -64, .c0 = 12, }, { .chan = 10, .c1 = -64, .c0 = 11, }, { .chan = 11, .c1 = -64, .c0 = 11, }, { .chan = 12, .c1 = -64, .c0 = 11, }, { .chan = 13, .c1 = -64, .c0 = 11, }, { .chan = 14, .c1 = -64, .c0 = 10, }, { .chan = 34, .c1 = -62, .c0 = 24, }, { .chan = 38, .c1 = -62, .c0 = 24, }, { .chan = 42, .c1 = -62, .c0 = 24, }, { .chan = 46, .c1 = -62, .c0 = 23, }, { .chan = 36, .c1 = -62, .c0 = 24, }, { .chan = 40, .c1 = -62, .c0 = 24, }, { .chan = 44, .c1 = -62, .c0 = 23, }, { .chan = 48, .c1 = -62, .c0 = 23, }, { .chan = 52, .c1 = -62, .c0 = 23, }, { .chan = 56, .c1 = -62, .c0 = 22, }, { .chan = 60, .c1 = -62, .c0 = 22, }, { .chan = 64, .c1 = -62, .c0 = 22, }, { .chan = 100, .c1 = -62, .c0 = 16, }, { .chan = 104, .c1 = -62, .c0 = 16, }, { .chan = 108, .c1 = -62, .c0 = 15, }, { .chan = 112, .c1 = -62, .c0 = 14, }, { .chan = 116, .c1 = -62, .c0 = 14, }, { .chan = 120, .c1 = -62, .c0 = 13, }, { .chan = 124, .c1 = -62, .c0 = 12, }, { .chan = 128, .c1 = -62, .c0 = 12, }, { .chan = 132, .c1 = -62, .c0 = 12, }, { .chan = 136, .c1 = -62, .c0 = 11, }, { .chan = 140, .c1 = -62, .c0 = 10, }, { .chan = 149, .c1 = -61, .c0 = 9, }, { .chan = 153, .c1 = -61, .c0 = 9, }, { .chan = 157, .c1 = -61, .c0 = 9, }, { .chan = 161, .c1 = -61, .c0 = 8, }, { .chan = 165, .c1 = -61, .c0 = 8, }, { .chan = 184, .c1 = -62, .c0 = 25, }, { .chan = 188, .c1 = -62, .c0 = 25, }, { .chan = 192, .c1 = -62, .c0 = 25, }, { .chan = 196, .c1 = -62, .c0 = 25, }, { .chan = 200, .c1 = -62, .c0 = 25, }, { .chan = 204, .c1 = -62, .c0 = 25, }, { .chan = 208, .c1 = -62, .c0 = 25, }, { .chan = 212, .c1 = -62, .c0 = 25, }, { .chan = 216, .c1 = -62, .c0 = 26, }, }; static const struct lpphy_rx_iq_comp lpphy_rev2plus_iq_comp = { .chan = 0, .c1 = -64, .c0 = 0, }; static int lpphy_calc_rx_iq_comp(struct b43_wldev *dev, u16 samples) { struct lpphy_iq_est iq_est; u16 c0, c1; int prod, ipwr, qpwr, prod_msb, q_msb, tmp1, tmp2, tmp3, tmp4, ret; c1 = b43_phy_read(dev, B43_LPPHY_RX_COMP_COEFF_S); c0 = c1 >> 8; c1 |= 0xFF; b43_phy_maskset(dev, B43_LPPHY_RX_COMP_COEFF_S, 0xFF00, 0x00C0); b43_phy_mask(dev, B43_LPPHY_RX_COMP_COEFF_S, 0x00FF); ret = lpphy_rx_iq_est(dev, samples, 32, &iq_est); if (!ret) goto out; prod = iq_est.iq_prod; ipwr = iq_est.i_pwr; qpwr = iq_est.q_pwr; if (ipwr + qpwr < 2) { ret = 0; goto out; } prod_msb = fls(abs(prod)); q_msb = fls(abs(qpwr)); tmp1 = prod_msb - 20; if (tmp1 >= 0) { tmp3 = ((prod << (30 - prod_msb)) + (ipwr >> (1 + tmp1))) / (ipwr >> tmp1); } else { tmp3 = ((prod << (30 - prod_msb)) + (ipwr << (-1 - tmp1))) / (ipwr << -tmp1); } tmp2 = q_msb - 11; if (tmp2 >= 0) tmp4 = (qpwr << (31 - q_msb)) / (ipwr >> tmp2); else tmp4 = (qpwr << (31 - q_msb)) / (ipwr << -tmp2); tmp4 -= tmp3 * tmp3; tmp4 = -int_sqrt(tmp4); c0 = tmp3 >> 3; c1 = tmp4 >> 4; out: b43_phy_maskset(dev, B43_LPPHY_RX_COMP_COEFF_S, 0xFF00, c1); b43_phy_maskset(dev, B43_LPPHY_RX_COMP_COEFF_S, 0x00FF, c0 << 8); return ret; } static void lpphy_run_samples(struct b43_wldev *dev, u16 samples, u16 loops, u16 wait) { b43_phy_maskset(dev, B43_LPPHY_SMPL_PLAY_BUFFER_CTL, 0xFFC0, samples - 1); if (loops != 0xFFFF) loops--; b43_phy_maskset(dev, B43_LPPHY_SMPL_PLAY_COUNT, 0xF000, loops); b43_phy_maskset(dev, B43_LPPHY_SMPL_PLAY_BUFFER_CTL, 0x3F, wait << 6); b43_phy_set(dev, B43_LPPHY_A_PHY_CTL_ADDR, 0x1); } //SPEC FIXME what does a negative freq mean? static void lpphy_start_tx_tone(struct b43_wldev *dev, s32 freq, u16 max) { struct b43_phy_lp *lpphy = dev->phy.lp; u16 buf[64]; int i, samples = 0, angle = 0; int rotation = (((36 * freq) / 20) << 16) / 100; struct b43_c32 sample; lpphy->tx_tone_freq = freq; if (freq) { /* Find i for which abs(freq) integrally divides 20000 * i */ for (i = 1; samples * abs(freq) != 20000 * i; i++) { samples = (20000 * i) / abs(freq); if(B43_WARN_ON(samples > 63)) return; } } else { samples = 2; } for (i = 0; i < samples; i++) { sample = b43_cordic(angle); angle += rotation; buf[i] = CORDIC_CONVERT((sample.i * max) & 0xFF) << 8; buf[i] |= CORDIC_CONVERT((sample.q * max) & 0xFF); } b43_lptab_write_bulk(dev, B43_LPTAB16(5, 0), samples, buf); lpphy_run_samples(dev, samples, 0xFFFF, 0); } static void lpphy_stop_tx_tone(struct b43_wldev *dev) { struct b43_phy_lp *lpphy = dev->phy.lp; int i; lpphy->tx_tone_freq = 0; b43_phy_mask(dev, B43_LPPHY_SMPL_PLAY_COUNT, 0xF000); for (i = 0; i < 31; i++) { if (!(b43_phy_read(dev, B43_LPPHY_A_PHY_CTL_ADDR) & 0x1)) break; udelay(100); } } static void lpphy_papd_cal(struct b43_wldev *dev, struct lpphy_tx_gains gains, int mode, bool useindex, u8 index) { //TODO } static void lpphy_papd_cal_txpwr(struct b43_wldev *dev) { struct b43_phy_lp *lpphy = dev->phy.lp; struct lpphy_tx_gains gains, oldgains; int old_txpctl, old_afe_ovr, old_rf, old_bbmult; lpphy_read_tx_pctl_mode_from_hardware(dev); old_txpctl = lpphy->txpctl_mode; old_afe_ovr = b43_phy_read(dev, B43_LPPHY_AFE_CTL_OVR) & 0x40; if (old_afe_ovr) oldgains = lpphy_get_tx_gains(dev); old_rf = b43_phy_read(dev, B43_LPPHY_RF_PWR_OVERRIDE) & 0xFF; old_bbmult = lpphy_get_bb_mult(dev); lpphy_set_tx_power_control(dev, B43_LPPHY_TXPCTL_OFF); if (dev->dev->chip_id == 0x4325 && dev->dev->chip_rev == 0) lpphy_papd_cal(dev, gains, 0, 1, 30); else lpphy_papd_cal(dev, gains, 0, 1, 65); if (old_afe_ovr) lpphy_set_tx_gains(dev, oldgains); lpphy_set_bb_mult(dev, old_bbmult); lpphy_set_tx_power_control(dev, old_txpctl); b43_phy_maskset(dev, B43_LPPHY_RF_PWR_OVERRIDE, 0xFF00, old_rf); } static int lpphy_rx_iq_cal(struct b43_wldev *dev, bool noise, bool tx, bool rx, bool pa, struct lpphy_tx_gains *gains) { struct b43_phy_lp *lpphy = dev->phy.lp; const struct lpphy_rx_iq_comp *iqcomp = NULL; struct lpphy_tx_gains nogains, oldgains; u16 tmp; int i, ret; memset(&nogains, 0, sizeof(nogains)); memset(&oldgains, 0, sizeof(oldgains)); if (dev->dev->chip_id == 0x5354) { for (i = 0; i < ARRAY_SIZE(lpphy_5354_iq_table); i++) { if (lpphy_5354_iq_table[i].chan == lpphy->channel) { iqcomp = &lpphy_5354_iq_table[i]; } } } else if (dev->phy.rev >= 2) { iqcomp = &lpphy_rev2plus_iq_comp; } else { for (i = 0; i < ARRAY_SIZE(lpphy_rev0_1_iq_table); i++) { if (lpphy_rev0_1_iq_table[i].chan == lpphy->channel) { iqcomp = &lpphy_rev0_1_iq_table[i]; } } } if (B43_WARN_ON(!iqcomp)) return 0; b43_phy_maskset(dev, B43_LPPHY_RX_COMP_COEFF_S, 0xFF00, iqcomp->c1); b43_phy_maskset(dev, B43_LPPHY_RX_COMP_COEFF_S, 0x00FF, iqcomp->c0 << 8); if (noise) { tx = true; rx = false; pa = false; } lpphy_set_trsw_over(dev, tx, rx); if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x8); b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0xFFF7, pa << 3); } else { b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x20); b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0xFFDF, pa << 5); } tmp = b43_phy_read(dev, B43_LPPHY_AFE_CTL_OVR) & 0x40; if (noise) lpphy_set_rx_gain(dev, 0x2D5D); else { if (tmp) oldgains = lpphy_get_tx_gains(dev); if (!gains) gains = &nogains; lpphy_set_tx_gains(dev, *gains); } b43_phy_mask(dev, B43_LPPHY_AFE_CTL_OVR, 0xFFFE); b43_phy_mask(dev, B43_LPPHY_AFE_CTL_OVRVAL, 0xFFFE); b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x800); b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0x800); lpphy_set_deaf(dev, false); if (noise) ret = lpphy_calc_rx_iq_comp(dev, 0xFFF0); else { lpphy_start_tx_tone(dev, 4000, 100); ret = lpphy_calc_rx_iq_comp(dev, 0x4000); lpphy_stop_tx_tone(dev); } lpphy_clear_deaf(dev, false); b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xFFFC); b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xFFF7); b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xFFDF); if (!noise) { if (tmp) lpphy_set_tx_gains(dev, oldgains); else lpphy_disable_tx_gain_override(dev); } lpphy_disable_rx_gain_override(dev); b43_phy_mask(dev, B43_LPPHY_AFE_CTL_OVR, 0xFFFE); b43_phy_mask(dev, B43_LPPHY_AFE_CTL_OVRVAL, 0xF7FF); return ret; } static void lpphy_calibration(struct b43_wldev *dev) { struct b43_phy_lp *lpphy = dev->phy.lp; enum b43_lpphy_txpctl_mode saved_pctl_mode; bool full_cal = false; if (lpphy->full_calib_chan != lpphy->channel) { full_cal = true; lpphy->full_calib_chan = lpphy->channel; } b43_mac_suspend(dev); lpphy_btcoex_override(dev); if (dev->phy.rev >= 2) lpphy_save_dig_flt_state(dev); lpphy_read_tx_pctl_mode_from_hardware(dev); saved_pctl_mode = lpphy->txpctl_mode; lpphy_set_tx_power_control(dev, B43_LPPHY_TXPCTL_OFF); //TODO Perform transmit power table I/Q LO calibration if ((dev->phy.rev == 0) && (saved_pctl_mode != B43_LPPHY_TXPCTL_OFF)) lpphy_pr41573_workaround(dev); if ((dev->phy.rev >= 2) && full_cal) { lpphy_papd_cal_txpwr(dev); } lpphy_set_tx_power_control(dev, saved_pctl_mode); if (dev->phy.rev >= 2) lpphy_restore_dig_flt_state(dev); lpphy_rx_iq_cal(dev, true, true, false, false, NULL); b43_mac_enable(dev); } static u16 b43_lpphy_op_read(struct b43_wldev *dev, u16 reg) { b43_write16(dev, B43_MMIO_PHY_CONTROL, reg); return b43_read16(dev, B43_MMIO_PHY_DATA); } static void b43_lpphy_op_write(struct b43_wldev *dev, u16 reg, u16 value) { b43_write16(dev, B43_MMIO_PHY_CONTROL, reg); b43_write16(dev, B43_MMIO_PHY_DATA, value); } static void b43_lpphy_op_maskset(struct b43_wldev *dev, u16 reg, u16 mask, u16 set) { b43_write16(dev, B43_MMIO_PHY_CONTROL, reg); b43_write16(dev, B43_MMIO_PHY_DATA, (b43_read16(dev, B43_MMIO_PHY_DATA) & mask) | set); } static u16 b43_lpphy_op_radio_read(struct b43_wldev *dev, u16 reg) { /* Register 1 is a 32-bit register. */ B43_WARN_ON(reg == 1); /* LP-PHY needs a special bit set for read access */ if (dev->phy.rev < 2) { if (reg != 0x4001) reg |= 0x100; } else reg |= 0x200; b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg); return b43_read16(dev, B43_MMIO_RADIO_DATA_LOW); } static void b43_lpphy_op_radio_write(struct b43_wldev *dev, u16 reg, u16 value) { /* Register 1 is a 32-bit register. */ B43_WARN_ON(reg == 1); b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg); b43_write16(dev, B43_MMIO_RADIO_DATA_LOW, value); } struct b206x_channel { u8 channel; u16 freq; u8 data[12]; }; static const struct b206x_channel b2062_chantbl[] = { { .channel = 1, .freq = 2412, .data[0] = 0xFF, .data[1] = 0xFF, .data[2] = 0xB5, .data[3] = 0x1B, .data[4] = 0x24, .data[5] = 0x32, .data[6] = 0x32, .data[7] = 0x88, .data[8] = 0x88, }, { .channel = 2, .freq = 2417, .data[0] = 0xFF, .data[1] = 0xFF, .data[2] = 0xB5, .data[3] = 0x1B, .data[4] = 0x24, .data[5] = 0x32, .data[6] = 0x32, .data[7] = 0x88, .data[8] = 0x88, }, { .channel = 3, .freq = 2422, .data[0] = 0xFF, .data[1] = 0xFF, .data[2] = 0xB5, .data[3] = 0x1B, .data[4] = 0x24, .data[5] = 0x32, .data[6] = 0x32, .data[7] = 0x88, .data[8] = 0x88, }, { .channel = 4, .freq = 2427, .data[0] = 0xFF, .data[1] = 0xFF, .data[2] = 0xB5, .data[3] = 0x1B, .data[4] = 0x24, .data[5] = 0x32, .data[6] = 0x32, .data[7] = 0x88, .data[8] = 0x88, }, { .channel = 5, .freq = 2432, .data[0] = 0xFF, .data[1] = 0xFF, .data[2] = 0xB5, .data[3] = 0x1B, .data[4] = 0x24, .data[5] = 0x32, .data[6] = 0x32, .data[7] = 0x88, .data[8] = 0x88, }, { .channel = 6, .freq = 2437, .data[0] = 0xFF, .data[1] = 0xFF, .data[2] = 0xB5, .data[3] = 0x1B, .data[4] = 0x24, .data[5] = 0x32, .data[6] = 0x32, .data[7] = 0x88, .data[8] = 0x88, }, { .channel = 7, .freq = 2442, .data[0] = 0xFF, .data[1] = 0xFF, .data[2] = 0xB5, .data[3] = 0x1B, .data[4] = 0x24, .data[5] = 0x32, .data[6] = 0x32, .data[7] = 0x88, .data[8] = 0x88, }, { .channel = 8, .freq = 2447, .data[0] = 0xFF, .data[1] = 0xFF, .data[2] = 0xB5, .data[3] = 0x1B, .data[4] = 0x24, .data[5] = 0x32, .data[6] = 0x32, .data[7] = 0x88, .data[8] = 0x88, }, { .channel = 9, .freq = 2452, .data[0] = 0xFF, .data[1] = 0xFF, .data[2] = 0xB5, .data[3] = 0x1B, .data[4] = 0x24, .data[5] = 0x32, .data[6] = 0x32, .data[7] = 0x88, .data[8] = 0x88, }, { .channel = 10, .freq = 2457, .data[0] = 0xFF, .data[1] = 0xFF, .data[2] = 0xB5, .data[3] = 0x1B, .data[4] = 0x24, .data[5] = 0x32, .data[6] = 0x32, .data[7] = 0x88, .data[8] = 0x88, }, { .channel = 11, .freq = 2462, .data[0] = 0xFF, .data[1] = 0xFF, .data[2] = 0xB5, .data[3] = 0x1B, .data[4] = 0x24, .data[5] = 0x32, .data[6] = 0x32, .data[7] = 0x88, .data[8] = 0x88, }, { .channel = 12, .freq = 2467, .data[0] = 0xFF, .data[1] = 0xFF, .data[2] = 0xB5, .data[3] = 0x1B, .data[4] = 0x24, .data[5] = 0x32, .data[6] = 0x32, .data[7] = 0x88, .data[8] = 0x88, }, { .channel = 13, .freq = 2472, .data[0] = 0xFF, .data[1] = 0xFF, .data[2] = 0xB5, .data[3] = 0x1B, .data[4] = 0x24, .data[5] = 0x32, .data[6] = 0x32, .data[7] = 0x88, .data[8] = 0x88, }, { .channel = 14, .freq = 2484, .data[0] = 0xFF, .data[1] = 0xFF, .data[2] = 0xB5, .data[3] = 0x1B, .data[4] = 0x24, .data[5] = 0x32, .data[6] = 0x32, .data[7] = 0x88, .data[8] = 0x88, }, { .channel = 34, .freq = 5170, .data[0] = 0x00, .data[1] = 0x22, .data[2] = 0x20, .data[3] = 0x84, .data[4] = 0x3C, .data[5] = 0x77, .data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0x88, }, { .channel = 38, .freq = 5190, .data[0] = 0x00, .data[1] = 0x11, .data[2] = 0x10, .data[3] = 0x83, .data[4] = 0x3C, .data[5] = 0x77, .data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0x88, }, { .channel = 42, .freq = 5210, .data[0] = 0x00, .data[1] = 0x11, .data[2] = 0x10, .data[3] = 0x83, .data[4] = 0x3C, .data[5] = 0x77, .data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0x88, }, { .channel = 46, .freq = 5230, .data[0] = 0x00, .data[1] = 0x00, .data[2] = 0x00, .data[3] = 0x83, .data[4] = 0x3C, .data[5] = 0x77, .data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0x88, }, { .channel = 36, .freq = 5180, .data[0] = 0x00, .data[1] = 0x11, .data[2] = 0x20, .data[3] = 0x83, .data[4] = 0x3C, .data[5] = 0x77, .data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0x88, }, { .channel = 40, .freq = 5200, .data[0] = 0x00, .data[1] = 0x11, .data[2] = 0x10, .data[3] = 0x84, .data[4] = 0x3C, .data[5] = 0x77, .data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0x88, }, { .channel = 44, .freq = 5220, .data[0] = 0x00, .data[1] = 0x11, .data[2] = 0x00, .data[3] = 0x83, .data[4] = 0x3C, .data[5] = 0x77, .data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0x88, }, { .channel = 48, .freq = 5240, .data[0] = 0x00, .data[1] = 0x00, .data[2] = 0x00, .data[3] = 0x83, .data[4] = 0x3C, .data[5] = 0x77, .data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0x88, }, { .channel = 52, .freq = 5260, .data[0] = 0x00, .data[1] = 0x00, .data[2] = 0x00, .data[3] = 0x83, .data[4] = 0x3C, .data[5] = 0x77, .data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0x88, }, { .channel = 56, .freq = 5280, .data[0] = 0x00, .data[1] = 0x00, .data[2] = 0x00, .data[3] = 0x83, .data[4] = 0x3C, .data[5] = 0x77, .data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0x88, }, { .channel = 60, .freq = 5300, .data[0] = 0x00, .data[1] = 0x00, .data[2] = 0x00, .data[3] = 0x63, .data[4] = 0x3C, .data[5] = 0x77, .data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0x88, }, { .channel = 64, .freq = 5320, .data[0] = 0x00, .data[1] = 0x00, .data[2] = 0x00, .data[3] = 0x62, .data[4] = 0x3C, .data[5] = 0x77, .data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0x88, }, { .channel = 100, .freq = 5500, .data[0] = 0x00, .data[1] = 0x00, .data[2] = 0x00, .data[3] = 0x30, .data[4] = 0x3C, .data[5] = 0x77, .data[6] = 0x37, .data[7] = 0xFF, .data[8] = 0x88, }, { .channel = 104, .freq = 5520, .data[0] = 0x00, .data[1] = 0x00, .data[2] = 0x00, .data[3] = 0x20, .data[4] = 0x3C, .data[5] = 0x77, .data[6] = 0x37, .data[7] = 0xFF, .data[8] = 0x88, }, { .channel = 108, .freq = 5540, .data[0] = 0x00, .data[1] = 0x00, .data[2] = 0x00, .data[3] = 0x20, .data[4] = 0x3C, .data[5] = 0x77, .data[6] = 0x37, .data[7] = 0xFF, .data[8] = 0x88, }, { .channel = 112, .freq = 5560, .data[0] = 0x00, .data[1] = 0x00, .data[2] = 0x00, .data[3] = 0x20, .data[4] = 0x3C, .data[5] = 0x77, .data[6] = 0x37, .data[7] = 0xFF, .data[8] = 0x88, }, { .channel = 116, .freq = 5580, .data[0] = 0x00, .data[1] = 0x00, .data[2] = 0x00, .data[3] = 0x10, .data[4] = 0x3C, .data[5] = 0x77, .data[6] = 0x37, .data[7] = 0xFF, .data[8] = 0x88, }, { .channel = 120, .freq = 5600, .data[0] = 0x00, .data[1] = 0x00, .data[2] = 0x00, .data[3] = 0x00, .data[4] = 0x3C, .data[5] = 0x77, .data[6] = 0x37, .data[7] = 0xFF, .data[8] = 0x88, }, { .channel = 124, .freq = 5620, .data[0] = 0x00, .data[1] = 0x00, .data[2] = 0x00, .data[3] = 0x00, .data[4] = 0x3C, .data[5] = 0x77, .data[6] = 0x37, .data[7] = 0xFF, .data[8] = 0x88, }, { .channel = 128, .freq = 5640, .data[0] = 0x00, .data[1] = 0x00, .data[2] = 0x00, .data[3] = 0x00, .data[4] = 0x3C, .data[5] = 0x77, .data[6] = 0x37, .data[7] = 0xFF, .data[8] = 0x88, }, { .channel = 132, .freq = 5660, .data[0] = 0x00, .data[1] = 0x00, .data[2] = 0x00, .data[3] = 0x00, .data[4] = 0x3C, .data[5] = 0x77, .data[6] = 0x37, .data[7] = 0xFF, .data[8] = 0x88, }, { .channel = 136, .freq = 5680, .data[0] = 0x00, .data[1] = 0x00, .data[2] = 0x00, .data[3] = 0x00, .data[4] = 0x3C, .data[5] = 0x77, .data[6] = 0x37, .data[7] = 0xFF, .data[8] = 0x88, }, { .channel = 140, .freq = 5700, .data[0] = 0x00, .data[1] = 0x00, .data[2] = 0x00, .data[3] = 0x00, .data[4] = 0x3C, .data[5] = 0x77, .data[6] = 0x37, .data[7] = 0xFF, .data[8] = 0x88, }, { .channel = 149, .freq = 5745, .data[0] = 0x00, .data[1] = 0x00, .data[2] = 0x00, .data[3] = 0x00, .data[4] = 0x3C, .data[5] = 0x77, .data[6] = 0x37, .data[7] = 0xFF, .data[8] = 0x88, }, { .channel = 153, .freq = 5765, .data[0] = 0x00, .data[1] = 0x00, .data[2] = 0x00, .data[3] = 0x00, .data[4] = 0x3C, .data[5] = 0x77, .data[6] = 0x37, .data[7] = 0xFF, .data[8] = 0x88, }, { .channel = 157, .freq = 5785, .data[0] = 0x00, .data[1] = 0x00, .data[2] = 0x00, .data[3] = 0x00, .data[4] = 0x3C, .data[5] = 0x77, .data[6] = 0x37, .data[7] = 0xFF, .data[8] = 0x88, }, { .channel = 161, .freq = 5805, .data[0] = 0x00, .data[1] = 0x00, .data[2] = 0x00, .data[3] = 0x00, .data[4] = 0x3C, .data[5] = 0x77, .data[6] = 0x37, .data[7] = 0xFF, .data[8] = 0x88, }, { .channel = 165, .freq = 5825, .data[0] = 0x00, .data[1] = 0x00, .data[2] = 0x00, .data[3] = 0x00, .data[4] = 0x3C, .data[5] = 0x77, .data[6] = 0x37, .data[7] = 0xFF, .data[8] = 0x88, }, { .channel = 184, .freq = 4920, .data[0] = 0x55, .data[1] = 0x77, .data[2] = 0x90, .data[3] = 0xF7, .data[4] = 0x3C, .data[5] = 0x77, .data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0xFF, }, { .channel = 188, .freq = 4940, .data[0] = 0x44, .data[1] = 0x77, .data[2] = 0x80, .data[3] = 0xE7, .data[4] = 0x3C, .data[5] = 0x77, .data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0xFF, }, { .channel = 192, .freq = 4960, .data[0] = 0x44, .data[1] = 0x66, .data[2] = 0x80, .data[3] = 0xE7, .data[4] = 0x3C, .data[5] = 0x77, .data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0xFF, }, { .channel = 196, .freq = 4980, .data[0] = 0x33, .data[1] = 0x66, .data[2] = 0x70, .data[3] = 0xC7, .data[4] = 0x3C, .data[5] = 0x77, .data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0xFF, }, { .channel = 200, .freq = 5000, .data[0] = 0x22, .data[1] = 0x55, .data[2] = 0x60, .data[3] = 0xD7, .data[4] = 0x3C, .data[5] = 0x77, .data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0xFF, }, { .channel = 204, .freq = 5020, .data[0] = 0x22, .data[1] = 0x55, .data[2] = 0x60, .data[3] = 0xC7, .data[4] = 0x3C, .data[5] = 0x77, .data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0xFF, }, { .channel = 208, .freq = 5040, .data[0] = 0x22, .data[1] = 0x44, .data[2] = 0x50, .data[3] = 0xC7, .data[4] = 0x3C, .data[5] = 0x77, .data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0xFF, }, { .channel = 212, .freq = 5060, .data[0] = 0x11, .data[1] = 0x44, .data[2] = 0x50, .data[3] = 0xA5, .data[4] = 0x3C, .data[5] = 0x77, .data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0x88, }, { .channel = 216, .freq = 5080, .data[0] = 0x00, .data[1] = 0x44, .data[2] = 0x40, .data[3] = 0xB6, .data[4] = 0x3C, .data[5] = 0x77, .data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0x88, }, }; static const struct b206x_channel b2063_chantbl[] = { { .channel = 1, .freq = 2412, .data[0] = 0x6F, .data[1] = 0x3C, .data[2] = 0x3C, .data[3] = 0x04, .data[4] = 0x05, .data[5] = 0x05, .data[6] = 0x05, .data[7] = 0x05, .data[8] = 0x77, .data[9] = 0x80, .data[10] = 0x80, .data[11] = 0x70, }, { .channel = 2, .freq = 2417, .data[0] = 0x6F, .data[1] = 0x3C, .data[2] = 0x3C, .data[3] = 0x04, .data[4] = 0x05, .data[5] = 0x05, .data[6] = 0x05, .data[7] = 0x05, .data[8] = 0x77, .data[9] = 0x80, .data[10] = 0x80, .data[11] = 0x70, }, { .channel = 3, .freq = 2422, .data[0] = 0x6F, .data[1] = 0x3C, .data[2] = 0x3C, .data[3] = 0x04, .data[4] = 0x05, .data[5] = 0x05, .data[6] = 0x05, .data[7] = 0x05, .data[8] = 0x77, .data[9] = 0x80, .data[10] = 0x80, .data[11] = 0x70, }, { .channel = 4, .freq = 2427, .data[0] = 0x6F, .data[1] = 0x2C, .data[2] = 0x2C, .data[3] = 0x04, .data[4] = 0x05, .data[5] = 0x05, .data[6] = 0x05, .data[7] = 0x05, .data[8] = 0x77, .data[9] = 0x80, .data[10] = 0x80, .data[11] = 0x70, }, { .channel = 5, .freq = 2432, .data[0] = 0x6F, .data[1] = 0x2C, .data[2] = 0x2C, .data[3] = 0x04, .data[4] = 0x05, .data[5] = 0x05, .data[6] = 0x05, .data[7] = 0x05, .data[8] = 0x77, .data[9] = 0x80, .data[10] = 0x80, .data[11] = 0x70, }, { .channel = 6, .freq = 2437, .data[0] = 0x6F, .data[1] = 0x2C, .data[2] = 0x2C, .data[3] = 0x04, .data[4] = 0x05, .data[5] = 0x05, .data[6] = 0x05, .data[7] = 0x05, .data[8] = 0x77, .data[9] = 0x80, .data[10] = 0x80, .data[11] = 0x70, }, { .channel = 7, .freq = 2442, .data[0] = 0x6F, .data[1] = 0x2C, .data[2] = 0x2C, .data[3] = 0x04, .data[4] = 0x05, .data[5] = 0x05, .data[6] = 0x05, .data[7] = 0x05, .data[8] = 0x77, .data[9] = 0x80, .data[10] = 0x80, .data[11] = 0x70, }, { .channel = 8, .freq = 2447, .data[0] = 0x6F, .data[1] = 0x2C, .data[2] = 0x2C, .data[3] = 0x04, .data[4] = 0x05, .data[5] = 0x05, .data[6] = 0x05, .data[7] = 0x05, .data[8] = 0x77, .data[9] = 0x80, .data[10] = 0x80, .data[11] = 0x70, }, { .channel = 9, .freq = 2452, .data[0] = 0x6F, .data[1] = 0x1C, .data[2] = 0x1C, .data[3] = 0x04, .data[4] = 0x05, .data[5] = 0x05, .data[6] = 0x05, .data[7] = 0x05, .data[8] = 0x77, .data[9] = 0x80, .data[10] = 0x80, .data[11] = 0x70, }, { .channel = 10, .freq = 2457, .data[0] = 0x6F, .data[1] = 0x1C, .data[2] = 0x1C, .data[3] = 0x04, .data[4] = 0x05, .data[5] = 0x05, .data[6] = 0x05, .data[7] = 0x05, .data[8] = 0x77, .data[9] = 0x80, .data[10] = 0x80, .data[11] = 0x70, }, { .channel = 11, .freq = 2462, .data[0] = 0x6E, .data[1] = 0x1C, .data[2] = 0x1C, .data[3] = 0x04, .data[4] = 0x05, .data[5] = 0x05, .data[6] = 0x05, .data[7] = 0x05, .data[8] = 0x77, .data[9] = 0x80, .data[10] = 0x80, .data[11] = 0x70, }, { .channel = 12, .freq = 2467, .data[0] = 0x6E, .data[1] = 0x1C, .data[2] = 0x1C, .data[3] = 0x04, .data[4] = 0x05, .data[5] = 0x05, .data[6] = 0x05, .data[7] = 0x05, .data[8] = 0x77, .data[9] = 0x80, .data[10] = 0x80, .data[11] = 0x70, }, { .channel = 13, .freq = 2472, .data[0] = 0x6E, .data[1] = 0x1C, .data[2] = 0x1C, .data[3] = 0x04, .data[4] = 0x05, .data[5] = 0x05, .data[6] = 0x05, .data[7] = 0x05, .data[8] = 0x77, .data[9] = 0x80, .data[10] = 0x80, .data[11] = 0x70, }, { .channel = 14, .freq = 2484, .data[0] = 0x6E, .data[1] = 0x0C, .data[2] = 0x0C, .data[3] = 0x04, .data[4] = 0x05, .data[5] = 0x05, .data[6] = 0x05, .data[7] = 0x05, .data[8] = 0x77, .data[9] = 0x80, .data[10] = 0x80, .data[11] = 0x70, }, { .channel = 34, .freq = 5170, .data[0] = 0x6A, .data[1] = 0x0C, .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x02, .data[5] = 0x05, .data[6] = 0x0D, .data[7] = 0x0D, .data[8] = 0x77, .data[9] = 0x80, .data[10] = 0x20, .data[11] = 0x00, }, { .channel = 36, .freq = 5180, .data[0] = 0x6A, .data[1] = 0x0C, .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x01, .data[5] = 0x05, .data[6] = 0x0D, .data[7] = 0x0C, .data[8] = 0x77, .data[9] = 0x80, .data[10] = 0x20, .data[11] = 0x00, }, { .channel = 38, .freq = 5190, .data[0] = 0x6A, .data[1] = 0x0C, .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x01, .data[5] = 0x04, .data[6] = 0x0C, .data[7] = 0x0C, .data[8] = 0x77, .data[9] = 0x80, .data[10] = 0x20, .data[11] = 0x00, }, { .channel = 40, .freq = 5200, .data[0] = 0x69, .data[1] = 0x0C, .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x01, .data[5] = 0x04, .data[6] = 0x0C, .data[7] = 0x0C, .data[8] = 0x77, .data[9] = 0x70, .data[10] = 0x20, .data[11] = 0x00, }, { .channel = 42, .freq = 5210, .data[0] = 0x69, .data[1] = 0x0C, .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x01, .data[5] = 0x04, .data[6] = 0x0B, .data[7] = 0x0C, .data[8] = 0x77, .data[9] = 0x70, .data[10] = 0x20, .data[11] = 0x00, }, { .channel = 44, .freq = 5220, .data[0] = 0x69, .data[1] = 0x0C, .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x04, .data[6] = 0x0B, .data[7] = 0x0B, .data[8] = 0x77, .data[9] = 0x60, .data[10] = 0x20, .data[11] = 0x00, }, { .channel = 46, .freq = 5230, .data[0] = 0x69, .data[1] = 0x0C, .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x03, .data[6] = 0x0A, .data[7] = 0x0B, .data[8] = 0x77, .data[9] = 0x60, .data[10] = 0x20, .data[11] = 0x00, }, { .channel = 48, .freq = 5240, .data[0] = 0x69, .data[1] = 0x0C, .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x03, .data[6] = 0x0A, .data[7] = 0x0A, .data[8] = 0x77, .data[9] = 0x60, .data[10] = 0x20, .data[11] = 0x00, }, { .channel = 52, .freq = 5260, .data[0] = 0x68, .data[1] = 0x0C, .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x02, .data[6] = 0x09, .data[7] = 0x09, .data[8] = 0x77, .data[9] = 0x60, .data[10] = 0x20, .data[11] = 0x00, }, { .channel = 56, .freq = 5280, .data[0] = 0x68, .data[1] = 0x0C, .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x01, .data[6] = 0x08, .data[7] = 0x08, .data[8] = 0x77, .data[9] = 0x50, .data[10] = 0x10, .data[11] = 0x00, }, { .channel = 60, .freq = 5300, .data[0] = 0x68, .data[1] = 0x0C, .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x01, .data[6] = 0x08, .data[7] = 0x08, .data[8] = 0x77, .data[9] = 0x50, .data[10] = 0x10, .data[11] = 0x00, }, { .channel = 64, .freq = 5320, .data[0] = 0x67, .data[1] = 0x0C, .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x00, .data[6] = 0x08, .data[7] = 0x08, .data[8] = 0x77, .data[9] = 0x50, .data[10] = 0x10, .data[11] = 0x00, }, { .channel = 100, .freq = 5500, .data[0] = 0x64, .data[1] = 0x0C, .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x00, .data[6] = 0x02, .data[7] = 0x01, .data[8] = 0x77, .data[9] = 0x20, .data[10] = 0x00, .data[11] = 0x00, }, { .channel = 104, .freq = 5520, .data[0] = 0x64, .data[1] = 0x0C, .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x00, .data[6] = 0x01, .data[7] = 0x01, .data[8] = 0x77, .data[9] = 0x20, .data[10] = 0x00, .data[11] = 0x00, }, { .channel = 108, .freq = 5540, .data[0] = 0x63, .data[1] = 0x0C, .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x00, .data[6] = 0x01, .data[7] = 0x00, .data[8] = 0x77, .data[9] = 0x10, .data[10] = 0x00, .data[11] = 0x00, }, { .channel = 112, .freq = 5560, .data[0] = 0x63, .data[1] = 0x0C, .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x00, .data[6] = 0x00, .data[7] = 0x00, .data[8] = 0x77, .data[9] = 0x10, .data[10] = 0x00, .data[11] = 0x00, }, { .channel = 116, .freq = 5580, .data[0] = 0x62, .data[1] = 0x0C, .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x00, .data[6] = 0x00, .data[7] = 0x00, .data[8] = 0x77, .data[9] = 0x10, .data[10] = 0x00, .data[11] = 0x00, }, { .channel = 120, .freq = 5600, .data[0] = 0x62, .data[1] = 0x0C, .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x00, .data[6] = 0x00, .data[7] = 0x00, .data[8] = 0x77, .data[9] = 0x00, .data[10] = 0x00, .data[11] = 0x00, }, { .channel = 124, .freq = 5620, .data[0] = 0x62, .data[1] = 0x0C, .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x00, .data[6] = 0x00, .data[7] = 0x00, .data[8] = 0x77, .data[9] = 0x00, .data[10] = 0x00, .data[11] = 0x00, }, { .channel = 128, .freq = 5640, .data[0] = 0x61, .data[1] = 0x0C, .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x00, .data[6] = 0x00, .data[7] = 0x00, .data[8] = 0x77, .data[9] = 0x00, .data[10] = 0x00, .data[11] = 0x00, }, { .channel = 132, .freq = 5660, .data[0] = 0x61, .data[1] = 0x0C, .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x00, .data[6] = 0x00, .data[7] = 0x00, .data[8] = 0x77, .data[9] = 0x00, .data[10] = 0x00, .data[11] = 0x00, }, { .channel = 136, .freq = 5680, .data[0] = 0x61, .data[1] = 0x0C, .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x00, .data[6] = 0x00, .data[7] = 0x00, .data[8] = 0x77, .data[9] = 0x00, .data[10] = 0x00, .data[11] = 0x00, }, { .channel = 140, .freq = 5700, .data[0] = 0x60, .data[1] = 0x0C, .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x00, .data[6] = 0x00, .data[7] = 0x00, .data[8] = 0x77, .data[9] = 0x00, .data[10] = 0x00, .data[11] = 0x00, }, { .channel = 149, .freq = 5745, .data[0] = 0x60, .data[1] = 0x0C, .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x00, .data[6] = 0x00, .data[7] = 0x00, .data[8] = 0x77, .data[9] = 0x00, .data[10] = 0x00, .data[11] = 0x00, }, { .channel = 153, .freq = 5765, .data[0] = 0x60, .data[1] = 0x0C, .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x00, .data[6] = 0x00, .data[7] = 0x00, .data[8] = 0x77, .data[9] = 0x00, .data[10] = 0x00, .data[11] = 0x00, }, { .channel = 157, .freq = 5785, .data[0] = 0x60, .data[1] = 0x0C, .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x00, .data[6] = 0x00, .data[7] = 0x00, .data[8] = 0x77, .data[9] = 0x00, .data[10] = 0x00, .data[11] = 0x00, }, { .channel = 161, .freq = 5805, .data[0] = 0x60, .data[1] = 0x0C, .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x00, .data[6] = 0x00, .data[7] = 0x00, .data[8] = 0x77, .data[9] = 0x00, .data[10] = 0x00, .data[11] = 0x00, }, { .channel = 165, .freq = 5825, .data[0] = 0x60, .data[1] = 0x0C, .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x00, .data[6] = 0x00, .data[7] = 0x00, .data[8] = 0x77, .data[9] = 0x00, .data[10] = 0x00, .data[11] = 0x00, }, { .channel = 184, .freq = 4920, .data[0] = 0x6E, .data[1] = 0x0C, .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x09, .data[5] = 0x0E, .data[6] = 0x0F, .data[7] = 0x0F, .data[8] = 0x77, .data[9] = 0xC0, .data[10] = 0x50, .data[11] = 0x00, }, { .channel = 188, .freq = 4940, .data[0] = 0x6E, .data[1] = 0x0C, .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x09, .data[5] = 0x0D, .data[6] = 0x0F, .data[7] = 0x0F, .data[8] = 0x77, .data[9] = 0xB0, .data[10] = 0x50, .data[11] = 0x00, }, { .channel = 192, .freq = 4960, .data[0] = 0x6E, .data[1] = 0x0C, .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x08, .data[5] = 0x0C, .data[6] = 0x0F, .data[7] = 0x0F, .data[8] = 0x77, .data[9] = 0xB0, .data[10] = 0x50, .data[11] = 0x00, }, { .channel = 196, .freq = 4980, .data[0] = 0x6D, .data[1] = 0x0C, .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x08, .data[5] = 0x0C, .data[6] = 0x0F, .data[7] = 0x0F, .data[8] = 0x77, .data[9] = 0xA0, .data[10] = 0x40, .data[11] = 0x00, }, { .channel = 200, .freq = 5000, .data[0] = 0x6D, .data[1] = 0x0C, .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x08, .data[5] = 0x0B, .data[6] = 0x0F, .data[7] = 0x0F, .data[8] = 0x77, .data[9] = 0xA0, .data[10] = 0x40, .data[11] = 0x00, }, { .channel = 204, .freq = 5020, .data[0] = 0x6D, .data[1] = 0x0C, .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x08, .data[5] = 0x0A, .data[6] = 0x0F, .data[7] = 0x0F, .data[8] = 0x77, .data[9] = 0xA0, .data[10] = 0x40, .data[11] = 0x00, }, { .channel = 208, .freq = 5040, .data[0] = 0x6C, .data[1] = 0x0C, .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x07, .data[5] = 0x09, .data[6] = 0x0F, .data[7] = 0x0F, .data[8] = 0x77, .data[9] = 0x90, .data[10] = 0x40, .data[11] = 0x00, }, { .channel = 212, .freq = 5060, .data[0] = 0x6C, .data[1] = 0x0C, .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x06, .data[5] = 0x08, .data[6] = 0x0F, .data[7] = 0x0F, .data[8] = 0x77, .data[9] = 0x90, .data[10] = 0x40, .data[11] = 0x00, }, { .channel = 216, .freq = 5080, .data[0] = 0x6C, .data[1] = 0x0C, .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x05, .data[5] = 0x08, .data[6] = 0x0F, .data[7] = 0x0F, .data[8] = 0x77, .data[9] = 0x90, .data[10] = 0x40, .data[11] = 0x00, }, }; static void lpphy_b2062_reset_pll_bias(struct b43_wldev *dev) { b43_radio_write(dev, B2062_S_RFPLL_CTL2, 0xFF); udelay(20); if (dev->dev->chip_id == 0x5354) { b43_radio_write(dev, B2062_N_COMM1, 4); b43_radio_write(dev, B2062_S_RFPLL_CTL2, 4); } else { b43_radio_write(dev, B2062_S_RFPLL_CTL2, 0); } udelay(5); } static void lpphy_b2062_vco_calib(struct b43_wldev *dev) { b43_radio_write(dev, B2062_S_RFPLL_CTL21, 0x42); b43_radio_write(dev, B2062_S_RFPLL_CTL21, 0x62); udelay(200); } static int lpphy_b2062_tune(struct b43_wldev *dev, unsigned int channel) { struct b43_phy_lp *lpphy = dev->phy.lp; struct ssb_bus *bus = dev->dev->sdev->bus; const struct b206x_channel *chandata = NULL; u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000; u32 tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8, tmp9; int i, err = 0; for (i = 0; i < ARRAY_SIZE(b2062_chantbl); i++) { if (b2062_chantbl[i].channel == channel) { chandata = &b2062_chantbl[i]; break; } } if (B43_WARN_ON(!chandata)) return -EINVAL; b43_radio_set(dev, B2062_S_RFPLL_CTL14, 0x04); b43_radio_write(dev, B2062_N_LGENA_TUNE0, chandata->data[0]); b43_radio_write(dev, B2062_N_LGENA_TUNE2, chandata->data[1]); b43_radio_write(dev, B2062_N_LGENA_TUNE3, chandata->data[2]); b43_radio_write(dev, B2062_N_TX_TUNE, chandata->data[3]); b43_radio_write(dev, B2062_S_LGENG_CTL1, chandata->data[4]); b43_radio_write(dev, B2062_N_LGENA_CTL5, chandata->data[5]); b43_radio_write(dev, B2062_N_LGENA_CTL6, chandata->data[6]); b43_radio_write(dev, B2062_N_TX_PGA, chandata->data[7]); b43_radio_write(dev, B2062_N_TX_PAD, chandata->data[8]); tmp1 = crystal_freq / 1000; tmp2 = lpphy->pdiv * 1000; b43_radio_write(dev, B2062_S_RFPLL_CTL33, 0xCC); b43_radio_write(dev, B2062_S_RFPLL_CTL34, 0x07); lpphy_b2062_reset_pll_bias(dev); tmp3 = tmp2 * channel2freq_lp(channel); if (channel2freq_lp(channel) < 4000) tmp3 *= 2; tmp4 = 48 * tmp1; tmp6 = tmp3 / tmp4; tmp7 = tmp3 % tmp4; b43_radio_write(dev, B2062_S_RFPLL_CTL26, tmp6); tmp5 = tmp7 * 0x100; tmp6 = tmp5 / tmp4; tmp7 = tmp5 % tmp4; b43_radio_write(dev, B2062_S_RFPLL_CTL27, tmp6); tmp5 = tmp7 * 0x100; tmp6 = tmp5 / tmp4; tmp7 = tmp5 % tmp4; b43_radio_write(dev, B2062_S_RFPLL_CTL28, tmp6); tmp5 = tmp7 * 0x100; tmp6 = tmp5 / tmp4; tmp7 = tmp5 % tmp4; b43_radio_write(dev, B2062_S_RFPLL_CTL29, tmp6 + ((2 * tmp7) / tmp4)); tmp8 = b43_radio_read(dev, B2062_S_RFPLL_CTL19); tmp9 = ((2 * tmp3 * (tmp8 + 1)) + (3 * tmp1)) / (6 * tmp1); b43_radio_write(dev, B2062_S_RFPLL_CTL23, (tmp9 >> 8) + 16); b43_radio_write(dev, B2062_S_RFPLL_CTL24, tmp9 & 0xFF); lpphy_b2062_vco_calib(dev); if (b43_radio_read(dev, B2062_S_RFPLL_CTL3) & 0x10) { b43_radio_write(dev, B2062_S_RFPLL_CTL33, 0xFC); b43_radio_write(dev, B2062_S_RFPLL_CTL34, 0); lpphy_b2062_reset_pll_bias(dev); lpphy_b2062_vco_calib(dev); if (b43_radio_read(dev, B2062_S_RFPLL_CTL3) & 0x10) err = -EIO; } b43_radio_mask(dev, B2062_S_RFPLL_CTL14, ~0x04); return err; } static void lpphy_b2063_vco_calib(struct b43_wldev *dev) { u16 tmp; b43_radio_mask(dev, B2063_PLL_SP1, ~0x40); tmp = b43_radio_read(dev, B2063_PLL_JTAG_CALNRST) & 0xF8; b43_radio_write(dev, B2063_PLL_JTAG_CALNRST, tmp); udelay(1); b43_radio_write(dev, B2063_PLL_JTAG_CALNRST, tmp | 0x4); udelay(1); b43_radio_write(dev, B2063_PLL_JTAG_CALNRST, tmp | 0x6); udelay(1); b43_radio_write(dev, B2063_PLL_JTAG_CALNRST, tmp | 0x7); udelay(300); b43_radio_set(dev, B2063_PLL_SP1, 0x40); } static int lpphy_b2063_tune(struct b43_wldev *dev, unsigned int channel) { struct ssb_bus *bus = dev->dev->sdev->bus; static const struct b206x_channel *chandata = NULL; u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000; u32 freqref, vco_freq, val1, val2, val3, timeout, timeoutref, count; u16 old_comm15, scale; u32 tmp1, tmp2, tmp3, tmp4, tmp5, tmp6; int i, div = (crystal_freq <= 26000000 ? 1 : 2); for (i = 0; i < ARRAY_SIZE(b2063_chantbl); i++) { if (b2063_chantbl[i].channel == channel) { chandata = &b2063_chantbl[i]; break; } } if (B43_WARN_ON(!chandata)) return -EINVAL; b43_radio_write(dev, B2063_LOGEN_VCOBUF1, chandata->data[0]); b43_radio_write(dev, B2063_LOGEN_MIXER2, chandata->data[1]); b43_radio_write(dev, B2063_LOGEN_BUF2, chandata->data[2]); b43_radio_write(dev, B2063_LOGEN_RCCR1, chandata->data[3]); b43_radio_write(dev, B2063_A_RX_1ST3, chandata->data[4]); b43_radio_write(dev, B2063_A_RX_2ND1, chandata->data[5]); b43_radio_write(dev, B2063_A_RX_2ND4, chandata->data[6]); b43_radio_write(dev, B2063_A_RX_2ND7, chandata->data[7]); b43_radio_write(dev, B2063_A_RX_PS6, chandata->data[8]); b43_radio_write(dev, B2063_TX_RF_CTL2, chandata->data[9]); b43_radio_write(dev, B2063_TX_RF_CTL5, chandata->data[10]); b43_radio_write(dev, B2063_PA_CTL11, chandata->data[11]); old_comm15 = b43_radio_read(dev, B2063_COMM15); b43_radio_set(dev, B2063_COMM15, 0x1E); if (chandata->freq > 4000) /* spec says 2484, but 4000 is safer */ vco_freq = chandata->freq << 1; else vco_freq = chandata->freq << 2; freqref = crystal_freq * 3; val1 = lpphy_qdiv_roundup(crystal_freq, 1000000, 16); val2 = lpphy_qdiv_roundup(crystal_freq, 1000000 * div, 16); val3 = lpphy_qdiv_roundup(vco_freq, 3, 16); timeout = ((((8 * crystal_freq) / (div * 5000000)) + 1) >> 1) - 1; b43_radio_write(dev, B2063_PLL_JTAG_PLL_VCO_CALIB3, 0x2); b43_radio_maskset(dev, B2063_PLL_JTAG_PLL_VCO_CALIB6, 0xFFF8, timeout >> 2); b43_radio_maskset(dev, B2063_PLL_JTAG_PLL_VCO_CALIB7, 0xFF9F,timeout << 5); timeoutref = ((((8 * crystal_freq) / (div * (timeout + 1))) + 999999) / 1000000) + 1; b43_radio_write(dev, B2063_PLL_JTAG_PLL_VCO_CALIB5, timeoutref); count = lpphy_qdiv_roundup(val3, val2 + 16, 16); count *= (timeout + 1) * (timeoutref + 1); count--; b43_radio_maskset(dev, B2063_PLL_JTAG_PLL_VCO_CALIB7, 0xF0, count >> 8); b43_radio_write(dev, B2063_PLL_JTAG_PLL_VCO_CALIB8, count & 0xFF); tmp1 = ((val3 * 62500) / freqref) << 4; tmp2 = ((val3 * 62500) % freqref) << 4; while (tmp2 >= freqref) { tmp1++; tmp2 -= freqref; } b43_radio_maskset(dev, B2063_PLL_JTAG_PLL_SG1, 0xFFE0, tmp1 >> 4); b43_radio_maskset(dev, B2063_PLL_JTAG_PLL_SG2, 0xFE0F, tmp1 << 4); b43_radio_maskset(dev, B2063_PLL_JTAG_PLL_SG2, 0xFFF0, tmp1 >> 16); b43_radio_write(dev, B2063_PLL_JTAG_PLL_SG3, (tmp2 >> 8) & 0xFF); b43_radio_write(dev, B2063_PLL_JTAG_PLL_SG4, tmp2 & 0xFF); b43_radio_write(dev, B2063_PLL_JTAG_PLL_LF1, 0xB9); b43_radio_write(dev, B2063_PLL_JTAG_PLL_LF2, 0x88); b43_radio_write(dev, B2063_PLL_JTAG_PLL_LF3, 0x28); b43_radio_write(dev, B2063_PLL_JTAG_PLL_LF4, 0x63); tmp3 = ((41 * (val3 - 3000)) /1200) + 27; tmp4 = lpphy_qdiv_roundup(132000 * tmp1, 8451, 16); if ((tmp4 + tmp3 - 1) / tmp3 > 60) { scale = 1; tmp5 = ((tmp4 + tmp3) / (tmp3 << 1)) - 8; } else { scale = 0; tmp5 = ((tmp4 + (tmp3 >> 1)) / tmp3) - 8; } b43_radio_maskset(dev, B2063_PLL_JTAG_PLL_CP2, 0xFFC0, tmp5); b43_radio_maskset(dev, B2063_PLL_JTAG_PLL_CP2, 0xFFBF, scale << 6); tmp6 = lpphy_qdiv_roundup(100 * val1, val3, 16); tmp6 *= (tmp5 * 8) * (scale + 1); if (tmp6 > 150) tmp6 = 0; b43_radio_maskset(dev, B2063_PLL_JTAG_PLL_CP3, 0xFFE0, tmp6); b43_radio_maskset(dev, B2063_PLL_JTAG_PLL_CP3, 0xFFDF, scale << 5); b43_radio_maskset(dev, B2063_PLL_JTAG_PLL_XTAL_12, 0xFFFB, 0x4); if (crystal_freq > 26000000) b43_radio_set(dev, B2063_PLL_JTAG_PLL_XTAL_12, 0x2); else b43_radio_mask(dev, B2063_PLL_JTAG_PLL_XTAL_12, 0xFD); if (val1 == 45) b43_radio_set(dev, B2063_PLL_JTAG_PLL_VCO1, 0x2); else b43_radio_mask(dev, B2063_PLL_JTAG_PLL_VCO1, 0xFD); b43_radio_set(dev, B2063_PLL_SP2, 0x3); udelay(1); b43_radio_mask(dev, B2063_PLL_SP2, 0xFFFC); lpphy_b2063_vco_calib(dev); b43_radio_write(dev, B2063_COMM15, old_comm15); return 0; } static int b43_lpphy_op_switch_channel(struct b43_wldev *dev, unsigned int new_channel) { struct b43_phy_lp *lpphy = dev->phy.lp; int err; if (dev->phy.radio_ver == 0x2063) { err = lpphy_b2063_tune(dev, new_channel); if (err) return err; } else { err = lpphy_b2062_tune(dev, new_channel); if (err) return err; lpphy_set_analog_filter(dev, new_channel); lpphy_adjust_gain_table(dev, channel2freq_lp(new_channel)); } lpphy->channel = new_channel; b43_write16(dev, B43_MMIO_CHANNEL, new_channel); return 0; } static int b43_lpphy_op_init(struct b43_wldev *dev) { int err; if (dev->dev->bus_type != B43_BUS_SSB) { b43err(dev->wl, "LP-PHY is supported only on SSB!\n"); return -EOPNOTSUPP; } lpphy_read_band_sprom(dev); //FIXME should this be in prepare_structs? lpphy_baseband_init(dev); lpphy_radio_init(dev); lpphy_calibrate_rc(dev); err = b43_lpphy_op_switch_channel(dev, 7); if (err) { b43dbg(dev->wl, "Switch to channel 7 failed, error = %d.\n", err); } lpphy_tx_pctl_init(dev); lpphy_calibration(dev); //TODO ACI init return 0; } static void b43_lpphy_op_adjust_txpower(struct b43_wldev *dev) { //TODO } static enum b43_txpwr_result b43_lpphy_op_recalc_txpower(struct b43_wldev *dev, bool ignore_tssi) { //TODO return B43_TXPWR_RES_DONE; } static void b43_lpphy_op_switch_analog(struct b43_wldev *dev, bool on) { if (on) { b43_phy_mask(dev, B43_LPPHY_AFE_CTL_OVR, 0xfff8); } else { b43_phy_set(dev, B43_LPPHY_AFE_CTL_OVRVAL, 0x0007); b43_phy_set(dev, B43_LPPHY_AFE_CTL_OVR, 0x0007); } } static void b43_lpphy_op_pwork_15sec(struct b43_wldev *dev) { //TODO } const struct b43_phy_operations b43_phyops_lp = { .allocate = b43_lpphy_op_allocate, .free = b43_lpphy_op_free, .prepare_structs = b43_lpphy_op_prepare_structs, .init = b43_lpphy_op_init, .phy_read = b43_lpphy_op_read, .phy_write = b43_lpphy_op_write, .phy_maskset = b43_lpphy_op_maskset, .radio_read = b43_lpphy_op_radio_read, .radio_write = b43_lpphy_op_radio_write, .software_rfkill = b43_lpphy_op_software_rfkill, .switch_analog = b43_lpphy_op_switch_analog, .switch_channel = b43_lpphy_op_switch_channel, .get_default_chan = b43_lpphy_op_get_default_chan, .set_rx_antenna = b43_lpphy_op_set_rx_antenna, .recalc_txpower = b43_lpphy_op_recalc_txpower, .adjust_txpower = b43_lpphy_op_adjust_txpower, .pwork_15sec = b43_lpphy_op_pwork_15sec, .pwork_60sec = lpphy_calibration, };
gpl-2.0
estiko/kernel_smartfren_d5c
arch/arm/mach-mxs/devices/platform-mxs-i2c.c
7840
1313
/* * Copyright (C) 2011 Pengutronix * Wolfram Sang <w.sang@pengutronix.de> * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License version 2 as published by the * Free Software Foundation. */ #include <asm/sizes.h> #include <mach/mx28.h> #include <mach/devices-common.h> #define mxs_i2c_data_entry_single(soc, _id) \ { \ .id = _id, \ .iobase = soc ## _I2C ## _id ## _BASE_ADDR, \ .errirq = soc ## _INT_I2C ## _id ## _ERROR, \ .dmairq = soc ## _INT_I2C ## _id ## _DMA, \ } #define mxs_i2c_data_entry(soc, _id) \ [_id] = mxs_i2c_data_entry_single(soc, _id) #ifdef CONFIG_SOC_IMX28 const struct mxs_mxs_i2c_data mx28_mxs_i2c_data[] __initconst = { mxs_i2c_data_entry(MX28, 0), mxs_i2c_data_entry(MX28, 1), }; #endif struct platform_device *__init mxs_add_mxs_i2c( const struct mxs_mxs_i2c_data *data) { struct resource res[] = { { .start = data->iobase, .end = data->iobase + SZ_8K - 1, .flags = IORESOURCE_MEM, }, { .start = data->errirq, .end = data->errirq, .flags = IORESOURCE_IRQ, }, { .start = data->dmairq, .end = data->dmairq, .flags = IORESOURCE_IRQ, }, }; return mxs_add_platform_device("mxs-i2c", data->id, res, ARRAY_SIZE(res), NULL, 0); }
gpl-2.0
FennyFatal/i747_kernel_ics
arch/ia64/kvm/process.c
10656
25112
/* * process.c: handle interruption inject for guests. * Copyright (c) 2005, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * * Shaofan Li (Susue Li) <susie.li@intel.com> * Xiaoyan Feng (Fleming Feng) <fleming.feng@intel.com> * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com) * Xiantao Zhang (xiantao.zhang@intel.com) */ #include "vcpu.h" #include <asm/pal.h> #include <asm/sal.h> #include <asm/fpswa.h> #include <asm/kregs.h> #include <asm/tlb.h> fpswa_interface_t *vmm_fpswa_interface; #define IA64_VHPT_TRANS_VECTOR 0x0000 #define IA64_INST_TLB_VECTOR 0x0400 #define IA64_DATA_TLB_VECTOR 0x0800 #define IA64_ALT_INST_TLB_VECTOR 0x0c00 #define IA64_ALT_DATA_TLB_VECTOR 0x1000 #define IA64_DATA_NESTED_TLB_VECTOR 0x1400 #define IA64_INST_KEY_MISS_VECTOR 0x1800 #define IA64_DATA_KEY_MISS_VECTOR 0x1c00 #define IA64_DIRTY_BIT_VECTOR 0x2000 #define IA64_INST_ACCESS_BIT_VECTOR 0x2400 #define IA64_DATA_ACCESS_BIT_VECTOR 0x2800 #define IA64_BREAK_VECTOR 0x2c00 #define IA64_EXTINT_VECTOR 0x3000 #define IA64_PAGE_NOT_PRESENT_VECTOR 0x5000 #define IA64_KEY_PERMISSION_VECTOR 0x5100 #define IA64_INST_ACCESS_RIGHTS_VECTOR 0x5200 #define IA64_DATA_ACCESS_RIGHTS_VECTOR 0x5300 #define IA64_GENEX_VECTOR 0x5400 #define IA64_DISABLED_FPREG_VECTOR 0x5500 #define IA64_NAT_CONSUMPTION_VECTOR 0x5600 #define IA64_SPECULATION_VECTOR 0x5700 /* UNUSED */ #define IA64_DEBUG_VECTOR 0x5900 #define IA64_UNALIGNED_REF_VECTOR 0x5a00 #define IA64_UNSUPPORTED_DATA_REF_VECTOR 0x5b00 #define IA64_FP_FAULT_VECTOR 0x5c00 #define IA64_FP_TRAP_VECTOR 0x5d00 #define IA64_LOWERPRIV_TRANSFER_TRAP_VECTOR 0x5e00 #define IA64_TAKEN_BRANCH_TRAP_VECTOR 0x5f00 #define IA64_SINGLE_STEP_TRAP_VECTOR 0x6000 /* SDM vol2 5.5 - IVA based interruption handling */ #define INITIAL_PSR_VALUE_AT_INTERRUPTION (IA64_PSR_UP | IA64_PSR_MFL |\ IA64_PSR_MFH | IA64_PSR_PK | IA64_PSR_DT | \ IA64_PSR_RT | IA64_PSR_MC|IA64_PSR_IT) #define DOMN_PAL_REQUEST 0x110000 #define DOMN_SAL_REQUEST 0x110001 static u64 vec2off[68] = {0x0, 0x400, 0x800, 0xc00, 0x1000, 0x1400, 0x1800, 0x1c00, 0x2000, 0x2400, 0x2800, 0x2c00, 0x3000, 0x3400, 0x3800, 0x3c00, 0x4000, 0x4400, 0x4800, 0x4c00, 0x5000, 0x5100, 0x5200, 0x5300, 0x5400, 0x5500, 0x5600, 0x5700, 0x5800, 0x5900, 0x5a00, 0x5b00, 0x5c00, 0x5d00, 0x5e00, 0x5f00, 0x6000, 0x6100, 0x6200, 0x6300, 0x6400, 0x6500, 0x6600, 0x6700, 0x6800, 0x6900, 0x6a00, 0x6b00, 0x6c00, 0x6d00, 0x6e00, 0x6f00, 0x7000, 0x7100, 0x7200, 0x7300, 0x7400, 0x7500, 0x7600, 0x7700, 0x7800, 0x7900, 0x7a00, 0x7b00, 0x7c00, 0x7d00, 0x7e00, 0x7f00 }; static void collect_interruption(struct kvm_vcpu *vcpu) { u64 ipsr; u64 vdcr; u64 vifs; unsigned long vpsr; struct kvm_pt_regs *regs = vcpu_regs(vcpu); vpsr = vcpu_get_psr(vcpu); vcpu_bsw0(vcpu); if (vpsr & IA64_PSR_IC) { /* Sync mpsr id/da/dd/ss/ed bits to vipsr * since after guest do rfi, we still want these bits on in * mpsr */ ipsr = regs->cr_ipsr; vpsr = vpsr | (ipsr & (IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD | IA64_PSR_SS | IA64_PSR_ED)); vcpu_set_ipsr(vcpu, vpsr); /* Currently, for trap, we do not advance IIP to next * instruction. That's because we assume caller already * set up IIP correctly */ vcpu_set_iip(vcpu , regs->cr_iip); /* set vifs.v to zero */ vifs = VCPU(vcpu, ifs); vifs &= ~IA64_IFS_V; vcpu_set_ifs(vcpu, vifs); vcpu_set_iipa(vcpu, VMX(vcpu, cr_iipa)); } vdcr = VCPU(vcpu, dcr); /* Set guest psr * up/mfl/mfh/pk/dt/rt/mc/it keeps unchanged * be: set to the value of dcr.be * pp: set to the value of dcr.pp */ vpsr &= INITIAL_PSR_VALUE_AT_INTERRUPTION; vpsr |= (vdcr & IA64_DCR_BE); /* VDCR pp bit position is different from VPSR pp bit */ if (vdcr & IA64_DCR_PP) { vpsr |= IA64_PSR_PP; } else { vpsr &= ~IA64_PSR_PP; } vcpu_set_psr(vcpu, vpsr); } void inject_guest_interruption(struct kvm_vcpu *vcpu, u64 vec) { u64 viva; struct kvm_pt_regs *regs; union ia64_isr pt_isr; regs = vcpu_regs(vcpu); /* clear cr.isr.ir (incomplete register frame)*/ pt_isr.val = VMX(vcpu, cr_isr); pt_isr.ir = 0; VMX(vcpu, cr_isr) = pt_isr.val; collect_interruption(vcpu); viva = vcpu_get_iva(vcpu); regs->cr_iip = viva + vec; } static u64 vcpu_get_itir_on_fault(struct kvm_vcpu *vcpu, u64 ifa) { union ia64_rr rr, rr1; rr.val = vcpu_get_rr(vcpu, ifa); rr1.val = 0; rr1.ps = rr.ps; rr1.rid = rr.rid; return (rr1.val); } /* * Set vIFA & vITIR & vIHA, when vPSR.ic =1 * Parameter: * set_ifa: if true, set vIFA * set_itir: if true, set vITIR * set_iha: if true, set vIHA */ void set_ifa_itir_iha(struct kvm_vcpu *vcpu, u64 vadr, int set_ifa, int set_itir, int set_iha) { long vpsr; u64 value; vpsr = VCPU(vcpu, vpsr); /* Vol2, Table 8-1 */ if (vpsr & IA64_PSR_IC) { if (set_ifa) vcpu_set_ifa(vcpu, vadr); if (set_itir) { value = vcpu_get_itir_on_fault(vcpu, vadr); vcpu_set_itir(vcpu, value); } if (set_iha) { value = vcpu_thash(vcpu, vadr); vcpu_set_iha(vcpu, value); } } } /* * Data TLB Fault * @ Data TLB vector * Refer to SDM Vol2 Table 5-6 & 8-1 */ void dtlb_fault(struct kvm_vcpu *vcpu, u64 vadr) { /* If vPSR.ic, IFA, ITIR, IHA */ set_ifa_itir_iha(vcpu, vadr, 1, 1, 1); inject_guest_interruption(vcpu, IA64_DATA_TLB_VECTOR); } /* * Instruction TLB Fault * @ Instruction TLB vector * Refer to SDM Vol2 Table 5-6 & 8-1 */ void itlb_fault(struct kvm_vcpu *vcpu, u64 vadr) { /* If vPSR.ic, IFA, ITIR, IHA */ set_ifa_itir_iha(vcpu, vadr, 1, 1, 1); inject_guest_interruption(vcpu, IA64_INST_TLB_VECTOR); } /* * Data Nested TLB Fault * @ Data Nested TLB Vector * Refer to SDM Vol2 Table 5-6 & 8-1 */ void nested_dtlb(struct kvm_vcpu *vcpu) { inject_guest_interruption(vcpu, IA64_DATA_NESTED_TLB_VECTOR); } /* * Alternate Data TLB Fault * @ Alternate Data TLB vector * Refer to SDM Vol2 Table 5-6 & 8-1 */ void alt_dtlb(struct kvm_vcpu *vcpu, u64 vadr) { set_ifa_itir_iha(vcpu, vadr, 1, 1, 0); inject_guest_interruption(vcpu, IA64_ALT_DATA_TLB_VECTOR); } /* * Data TLB Fault * @ Data TLB vector * Refer to SDM Vol2 Table 5-6 & 8-1 */ void alt_itlb(struct kvm_vcpu *vcpu, u64 vadr) { set_ifa_itir_iha(vcpu, vadr, 1, 1, 0); inject_guest_interruption(vcpu, IA64_ALT_INST_TLB_VECTOR); } /* Deal with: * VHPT Translation Vector */ static void _vhpt_fault(struct kvm_vcpu *vcpu, u64 vadr) { /* If vPSR.ic, IFA, ITIR, IHA*/ set_ifa_itir_iha(vcpu, vadr, 1, 1, 1); inject_guest_interruption(vcpu, IA64_VHPT_TRANS_VECTOR); } /* * VHPT Instruction Fault * @ VHPT Translation vector * Refer to SDM Vol2 Table 5-6 & 8-1 */ void ivhpt_fault(struct kvm_vcpu *vcpu, u64 vadr) { _vhpt_fault(vcpu, vadr); } /* * VHPT Data Fault * @ VHPT Translation vector * Refer to SDM Vol2 Table 5-6 & 8-1 */ void dvhpt_fault(struct kvm_vcpu *vcpu, u64 vadr) { _vhpt_fault(vcpu, vadr); } /* * Deal with: * General Exception vector */ void _general_exception(struct kvm_vcpu *vcpu) { inject_guest_interruption(vcpu, IA64_GENEX_VECTOR); } /* * Illegal Operation Fault * @ General Exception Vector * Refer to SDM Vol2 Table 5-6 & 8-1 */ void illegal_op(struct kvm_vcpu *vcpu) { _general_exception(vcpu); } /* * Illegal Dependency Fault * @ General Exception Vector * Refer to SDM Vol2 Table 5-6 & 8-1 */ void illegal_dep(struct kvm_vcpu *vcpu) { _general_exception(vcpu); } /* * Reserved Register/Field Fault * @ General Exception Vector * Refer to SDM Vol2 Table 5-6 & 8-1 */ void rsv_reg_field(struct kvm_vcpu *vcpu) { _general_exception(vcpu); } /* * Privileged Operation Fault * @ General Exception Vector * Refer to SDM Vol2 Table 5-6 & 8-1 */ void privilege_op(struct kvm_vcpu *vcpu) { _general_exception(vcpu); } /* * Unimplement Data Address Fault * @ General Exception Vector * Refer to SDM Vol2 Table 5-6 & 8-1 */ void unimpl_daddr(struct kvm_vcpu *vcpu) { _general_exception(vcpu); } /* * Privileged Register Fault * @ General Exception Vector * Refer to SDM Vol2 Table 5-6 & 8-1 */ void privilege_reg(struct kvm_vcpu *vcpu) { _general_exception(vcpu); } /* Deal with * Nat consumption vector * Parameter: * vaddr: Optional, if t == REGISTER */ static void _nat_consumption_fault(struct kvm_vcpu *vcpu, u64 vadr, enum tlb_miss_type t) { /* If vPSR.ic && t == DATA/INST, IFA */ if (t == DATA || t == INSTRUCTION) { /* IFA */ set_ifa_itir_iha(vcpu, vadr, 1, 0, 0); } inject_guest_interruption(vcpu, IA64_NAT_CONSUMPTION_VECTOR); } /* * Instruction Nat Page Consumption Fault * @ Nat Consumption Vector * Refer to SDM Vol2 Table 5-6 & 8-1 */ void inat_page_consumption(struct kvm_vcpu *vcpu, u64 vadr) { _nat_consumption_fault(vcpu, vadr, INSTRUCTION); } /* * Register Nat Consumption Fault * @ Nat Consumption Vector * Refer to SDM Vol2 Table 5-6 & 8-1 */ void rnat_consumption(struct kvm_vcpu *vcpu) { _nat_consumption_fault(vcpu, 0, REGISTER); } /* * Data Nat Page Consumption Fault * @ Nat Consumption Vector * Refer to SDM Vol2 Table 5-6 & 8-1 */ void dnat_page_consumption(struct kvm_vcpu *vcpu, u64 vadr) { _nat_consumption_fault(vcpu, vadr, DATA); } /* Deal with * Page not present vector */ static void __page_not_present(struct kvm_vcpu *vcpu, u64 vadr) { /* If vPSR.ic, IFA, ITIR */ set_ifa_itir_iha(vcpu, vadr, 1, 1, 0); inject_guest_interruption(vcpu, IA64_PAGE_NOT_PRESENT_VECTOR); } void data_page_not_present(struct kvm_vcpu *vcpu, u64 vadr) { __page_not_present(vcpu, vadr); } void inst_page_not_present(struct kvm_vcpu *vcpu, u64 vadr) { __page_not_present(vcpu, vadr); } /* Deal with * Data access rights vector */ void data_access_rights(struct kvm_vcpu *vcpu, u64 vadr) { /* If vPSR.ic, IFA, ITIR */ set_ifa_itir_iha(vcpu, vadr, 1, 1, 0); inject_guest_interruption(vcpu, IA64_DATA_ACCESS_RIGHTS_VECTOR); } fpswa_ret_t vmm_fp_emulate(int fp_fault, void *bundle, unsigned long *ipsr, unsigned long *fpsr, unsigned long *isr, unsigned long *pr, unsigned long *ifs, struct kvm_pt_regs *regs) { fp_state_t fp_state; fpswa_ret_t ret; struct kvm_vcpu *vcpu = current_vcpu; uint64_t old_rr7 = ia64_get_rr(7UL<<61); if (!vmm_fpswa_interface) return (fpswa_ret_t) {-1, 0, 0, 0}; memset(&fp_state, 0, sizeof(fp_state_t)); /* * compute fp_state. only FP registers f6 - f11 are used by the * vmm, so set those bits in the mask and set the low volatile * pointer to point to these registers. */ fp_state.bitmask_low64 = 0xfc0; /* bit6..bit11 */ fp_state.fp_state_low_volatile = (fp_state_low_volatile_t *) &regs->f6; /* * unsigned long (*EFI_FPSWA) ( * unsigned long trap_type, * void *Bundle, * unsigned long *pipsr, * unsigned long *pfsr, * unsigned long *pisr, * unsigned long *ppreds, * unsigned long *pifs, * void *fp_state); */ /*Call host fpswa interface directly to virtualize *guest fpswa request! */ ia64_set_rr(7UL << 61, vcpu->arch.host.rr[7]); ia64_srlz_d(); ret = (*vmm_fpswa_interface->fpswa) (fp_fault, bundle, ipsr, fpsr, isr, pr, ifs, &fp_state); ia64_set_rr(7UL << 61, old_rr7); ia64_srlz_d(); return ret; } /* * Handle floating-point assist faults and traps for domain. */ unsigned long vmm_handle_fpu_swa(int fp_fault, struct kvm_pt_regs *regs, unsigned long isr) { struct kvm_vcpu *v = current_vcpu; IA64_BUNDLE bundle; unsigned long fault_ip; fpswa_ret_t ret; fault_ip = regs->cr_iip; /* * When the FP trap occurs, the trapping instruction is completed. * If ipsr.ri == 0, there is the trapping instruction in previous * bundle. */ if (!fp_fault && (ia64_psr(regs)->ri == 0)) fault_ip -= 16; if (fetch_code(v, fault_ip, &bundle)) return -EAGAIN; if (!bundle.i64[0] && !bundle.i64[1]) return -EACCES; ret = vmm_fp_emulate(fp_fault, &bundle, &regs->cr_ipsr, &regs->ar_fpsr, &isr, &regs->pr, &regs->cr_ifs, regs); return ret.status; } void reflect_interruption(u64 ifa, u64 isr, u64 iim, u64 vec, struct kvm_pt_regs *regs) { u64 vector; int status ; struct kvm_vcpu *vcpu = current_vcpu; u64 vpsr = VCPU(vcpu, vpsr); vector = vec2off[vec]; if (!(vpsr & IA64_PSR_IC) && (vector != IA64_DATA_NESTED_TLB_VECTOR)) { panic_vm(vcpu, "Interruption with vector :0x%lx occurs " "with psr.ic = 0\n", vector); return; } switch (vec) { case 32: /*IA64_FP_FAULT_VECTOR*/ status = vmm_handle_fpu_swa(1, regs, isr); if (!status) { vcpu_increment_iip(vcpu); return; } else if (-EAGAIN == status) return; break; case 33: /*IA64_FP_TRAP_VECTOR*/ status = vmm_handle_fpu_swa(0, regs, isr); if (!status) return ; break; } VCPU(vcpu, isr) = isr; VCPU(vcpu, iipa) = regs->cr_iip; if (vector == IA64_BREAK_VECTOR || vector == IA64_SPECULATION_VECTOR) VCPU(vcpu, iim) = iim; else set_ifa_itir_iha(vcpu, ifa, 1, 1, 1); inject_guest_interruption(vcpu, vector); } static unsigned long kvm_trans_pal_call_args(struct kvm_vcpu *vcpu, unsigned long arg) { struct thash_data *data; unsigned long gpa, poff; if (!is_physical_mode(vcpu)) { /* Depends on caller to provide the DTR or DTC mapping.*/ data = vtlb_lookup(vcpu, arg, D_TLB); if (data) gpa = data->page_flags & _PAGE_PPN_MASK; else { data = vhpt_lookup(arg); if (!data) return 0; gpa = data->gpaddr & _PAGE_PPN_MASK; } poff = arg & (PSIZE(data->ps) - 1); arg = PAGEALIGN(gpa, data->ps) | poff; } arg = kvm_gpa_to_mpa(arg << 1 >> 1); return (unsigned long)__va(arg); } static void set_pal_call_data(struct kvm_vcpu *vcpu) { struct exit_ctl_data *p = &vcpu->arch.exit_data; unsigned long gr28 = vcpu_get_gr(vcpu, 28); unsigned long gr29 = vcpu_get_gr(vcpu, 29); unsigned long gr30 = vcpu_get_gr(vcpu, 30); /*FIXME:For static and stacked convention, firmware * has put the parameters in gr28-gr31 before * break to vmm !!*/ switch (gr28) { case PAL_PERF_MON_INFO: case PAL_HALT_INFO: p->u.pal_data.gr29 = kvm_trans_pal_call_args(vcpu, gr29); p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30); break; case PAL_BRAND_INFO: p->u.pal_data.gr29 = gr29; p->u.pal_data.gr30 = kvm_trans_pal_call_args(vcpu, gr30); break; default: p->u.pal_data.gr29 = gr29; p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30); } p->u.pal_data.gr28 = gr28; p->u.pal_data.gr31 = vcpu_get_gr(vcpu, 31); p->exit_reason = EXIT_REASON_PAL_CALL; } static void get_pal_call_result(struct kvm_vcpu *vcpu) { struct exit_ctl_data *p = &vcpu->arch.exit_data; if (p->exit_reason == EXIT_REASON_PAL_CALL) { vcpu_set_gr(vcpu, 8, p->u.pal_data.ret.status, 0); vcpu_set_gr(vcpu, 9, p->u.pal_data.ret.v0, 0); vcpu_set_gr(vcpu, 10, p->u.pal_data.ret.v1, 0); vcpu_set_gr(vcpu, 11, p->u.pal_data.ret.v2, 0); } else panic_vm(vcpu, "Mis-set for exit reason!\n"); } static void set_sal_call_data(struct kvm_vcpu *vcpu) { struct exit_ctl_data *p = &vcpu->arch.exit_data; p->u.sal_data.in0 = vcpu_get_gr(vcpu, 32); p->u.sal_data.in1 = vcpu_get_gr(vcpu, 33); p->u.sal_data.in2 = vcpu_get_gr(vcpu, 34); p->u.sal_data.in3 = vcpu_get_gr(vcpu, 35); p->u.sal_data.in4 = vcpu_get_gr(vcpu, 36); p->u.sal_data.in5 = vcpu_get_gr(vcpu, 37); p->u.sal_data.in6 = vcpu_get_gr(vcpu, 38); p->u.sal_data.in7 = vcpu_get_gr(vcpu, 39); p->exit_reason = EXIT_REASON_SAL_CALL; } static void get_sal_call_result(struct kvm_vcpu *vcpu) { struct exit_ctl_data *p = &vcpu->arch.exit_data; if (p->exit_reason == EXIT_REASON_SAL_CALL) { vcpu_set_gr(vcpu, 8, p->u.sal_data.ret.r8, 0); vcpu_set_gr(vcpu, 9, p->u.sal_data.ret.r9, 0); vcpu_set_gr(vcpu, 10, p->u.sal_data.ret.r10, 0); vcpu_set_gr(vcpu, 11, p->u.sal_data.ret.r11, 0); } else panic_vm(vcpu, "Mis-set for exit reason!\n"); } void kvm_ia64_handle_break(unsigned long ifa, struct kvm_pt_regs *regs, unsigned long isr, unsigned long iim) { struct kvm_vcpu *v = current_vcpu; long psr; if (ia64_psr(regs)->cpl == 0) { /* Allow hypercalls only when cpl = 0. */ if (iim == DOMN_PAL_REQUEST) { local_irq_save(psr); set_pal_call_data(v); vmm_transition(v); get_pal_call_result(v); vcpu_increment_iip(v); local_irq_restore(psr); return; } else if (iim == DOMN_SAL_REQUEST) { local_irq_save(psr); set_sal_call_data(v); vmm_transition(v); get_sal_call_result(v); vcpu_increment_iip(v); local_irq_restore(psr); return; } } reflect_interruption(ifa, isr, iim, 11, regs); } void check_pending_irq(struct kvm_vcpu *vcpu) { int mask, h_pending, h_inservice; u64 isr; unsigned long vpsr; struct kvm_pt_regs *regs = vcpu_regs(vcpu); h_pending = highest_pending_irq(vcpu); if (h_pending == NULL_VECTOR) { update_vhpi(vcpu, NULL_VECTOR); return; } h_inservice = highest_inservice_irq(vcpu); vpsr = VCPU(vcpu, vpsr); mask = irq_masked(vcpu, h_pending, h_inservice); if ((vpsr & IA64_PSR_I) && IRQ_NO_MASKED == mask) { isr = vpsr & IA64_PSR_RI; update_vhpi(vcpu, h_pending); reflect_interruption(0, isr, 0, 12, regs); /* EXT IRQ */ } else if (mask == IRQ_MASKED_BY_INSVC) { if (VCPU(vcpu, vhpi)) update_vhpi(vcpu, NULL_VECTOR); } else { /* masked by vpsr.i or vtpr.*/ update_vhpi(vcpu, h_pending); } } static void generate_exirq(struct kvm_vcpu *vcpu) { unsigned vpsr; uint64_t isr; struct kvm_pt_regs *regs = vcpu_regs(vcpu); vpsr = VCPU(vcpu, vpsr); isr = vpsr & IA64_PSR_RI; if (!(vpsr & IA64_PSR_IC)) panic_vm(vcpu, "Trying to inject one IRQ with psr.ic=0\n"); reflect_interruption(0, isr, 0, 12, regs); /* EXT IRQ */ } void vhpi_detection(struct kvm_vcpu *vcpu) { uint64_t threshold, vhpi; union ia64_tpr vtpr; struct ia64_psr vpsr; vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr); vtpr.val = VCPU(vcpu, tpr); threshold = ((!vpsr.i) << 5) | (vtpr.mmi << 4) | vtpr.mic; vhpi = VCPU(vcpu, vhpi); if (vhpi > threshold) { /* interrupt actived*/ generate_exirq(vcpu); } } void leave_hypervisor_tail(void) { struct kvm_vcpu *v = current_vcpu; if (VMX(v, timer_check)) { VMX(v, timer_check) = 0; if (VMX(v, itc_check)) { if (vcpu_get_itc(v) > VCPU(v, itm)) { if (!(VCPU(v, itv) & (1 << 16))) { vcpu_pend_interrupt(v, VCPU(v, itv) & 0xff); VMX(v, itc_check) = 0; } else { v->arch.timer_pending = 1; } VMX(v, last_itc) = VCPU(v, itm) + 1; } } } rmb(); if (v->arch.irq_new_pending) { v->arch.irq_new_pending = 0; VMX(v, irq_check) = 0; check_pending_irq(v); return; } if (VMX(v, irq_check)) { VMX(v, irq_check) = 0; vhpi_detection(v); } } static inline void handle_lds(struct kvm_pt_regs *regs) { regs->cr_ipsr |= IA64_PSR_ED; } void physical_tlb_miss(struct kvm_vcpu *vcpu, unsigned long vadr, int type) { unsigned long pte; union ia64_rr rr; rr.val = ia64_get_rr(vadr); pte = vadr & _PAGE_PPN_MASK; pte = pte | PHY_PAGE_WB; thash_vhpt_insert(vcpu, pte, (u64)(rr.ps << 2), vadr, type); return; } void kvm_page_fault(u64 vadr , u64 vec, struct kvm_pt_regs *regs) { unsigned long vpsr; int type; u64 vhpt_adr, gppa, pteval, rr, itir; union ia64_isr misr; union ia64_pta vpta; struct thash_data *data; struct kvm_vcpu *v = current_vcpu; vpsr = VCPU(v, vpsr); misr.val = VMX(v, cr_isr); type = vec; if (is_physical_mode(v) && (!(vadr << 1 >> 62))) { if (vec == 2) { if (__gpfn_is_io((vadr << 1) >> (PAGE_SHIFT + 1))) { emulate_io_inst(v, ((vadr << 1) >> 1), 4); return; } } physical_tlb_miss(v, vadr, type); return; } data = vtlb_lookup(v, vadr, type); if (data != 0) { if (type == D_TLB) { gppa = (vadr & ((1UL << data->ps) - 1)) + (data->ppn >> (data->ps - 12) << data->ps); if (__gpfn_is_io(gppa >> PAGE_SHIFT)) { if (data->pl >= ((regs->cr_ipsr >> IA64_PSR_CPL0_BIT) & 3)) emulate_io_inst(v, gppa, data->ma); else { vcpu_set_isr(v, misr.val); data_access_rights(v, vadr); } return ; } } thash_vhpt_insert(v, data->page_flags, data->itir, vadr, type); } else if (type == D_TLB) { if (misr.sp) { handle_lds(regs); return; } rr = vcpu_get_rr(v, vadr); itir = rr & (RR_RID_MASK | RR_PS_MASK); if (!vhpt_enabled(v, vadr, misr.rs ? RSE_REF : DATA_REF)) { if (vpsr & IA64_PSR_IC) { vcpu_set_isr(v, misr.val); alt_dtlb(v, vadr); } else { nested_dtlb(v); } return ; } vpta.val = vcpu_get_pta(v); /* avoid recursively walking (short format) VHPT */ vhpt_adr = vcpu_thash(v, vadr); if (!guest_vhpt_lookup(vhpt_adr, &pteval)) { /* VHPT successfully read. */ if (!(pteval & _PAGE_P)) { if (vpsr & IA64_PSR_IC) { vcpu_set_isr(v, misr.val); dtlb_fault(v, vadr); } else { nested_dtlb(v); } } else if ((pteval & _PAGE_MA_MASK) != _PAGE_MA_ST) { thash_purge_and_insert(v, pteval, itir, vadr, D_TLB); } else if (vpsr & IA64_PSR_IC) { vcpu_set_isr(v, misr.val); dtlb_fault(v, vadr); } else { nested_dtlb(v); } } else { /* Can't read VHPT. */ if (vpsr & IA64_PSR_IC) { vcpu_set_isr(v, misr.val); dvhpt_fault(v, vadr); } else { nested_dtlb(v); } } } else if (type == I_TLB) { if (!(vpsr & IA64_PSR_IC)) misr.ni = 1; if (!vhpt_enabled(v, vadr, INST_REF)) { vcpu_set_isr(v, misr.val); alt_itlb(v, vadr); return; } vpta.val = vcpu_get_pta(v); vhpt_adr = vcpu_thash(v, vadr); if (!guest_vhpt_lookup(vhpt_adr, &pteval)) { /* VHPT successfully read. */ if (pteval & _PAGE_P) { if ((pteval & _PAGE_MA_MASK) == _PAGE_MA_ST) { vcpu_set_isr(v, misr.val); itlb_fault(v, vadr); return ; } rr = vcpu_get_rr(v, vadr); itir = rr & (RR_RID_MASK | RR_PS_MASK); thash_purge_and_insert(v, pteval, itir, vadr, I_TLB); } else { vcpu_set_isr(v, misr.val); inst_page_not_present(v, vadr); } } else { vcpu_set_isr(v, misr.val); ivhpt_fault(v, vadr); } } } void kvm_vexirq(struct kvm_vcpu *vcpu) { u64 vpsr, isr; struct kvm_pt_regs *regs; regs = vcpu_regs(vcpu); vpsr = VCPU(vcpu, vpsr); isr = vpsr & IA64_PSR_RI; reflect_interruption(0, isr, 0, 12, regs); /*EXT IRQ*/ } void kvm_ia64_handle_irq(struct kvm_vcpu *v) { struct exit_ctl_data *p = &v->arch.exit_data; long psr; local_irq_save(psr); p->exit_reason = EXIT_REASON_EXTERNAL_INTERRUPT; vmm_transition(v); local_irq_restore(psr); VMX(v, timer_check) = 1; } static void ptc_ga_remote_func(struct kvm_vcpu *v, int pos) { u64 oldrid, moldrid, oldpsbits, vaddr; struct kvm_ptc_g *p = &v->arch.ptc_g_data[pos]; vaddr = p->vaddr; oldrid = VMX(v, vrr[0]); VMX(v, vrr[0]) = p->rr; oldpsbits = VMX(v, psbits[0]); VMX(v, psbits[0]) = VMX(v, psbits[REGION_NUMBER(vaddr)]); moldrid = ia64_get_rr(0x0); ia64_set_rr(0x0, vrrtomrr(p->rr)); ia64_srlz_d(); vaddr = PAGEALIGN(vaddr, p->ps); thash_purge_entries_remote(v, vaddr, p->ps); VMX(v, vrr[0]) = oldrid; VMX(v, psbits[0]) = oldpsbits; ia64_set_rr(0x0, moldrid); ia64_dv_serialize_data(); } static void vcpu_do_resume(struct kvm_vcpu *vcpu) { /*Re-init VHPT and VTLB once from resume*/ vcpu->arch.vhpt.num = VHPT_NUM_ENTRIES; thash_init(&vcpu->arch.vhpt, VHPT_SHIFT); vcpu->arch.vtlb.num = VTLB_NUM_ENTRIES; thash_init(&vcpu->arch.vtlb, VTLB_SHIFT); ia64_set_pta(vcpu->arch.vhpt.pta.val); } static void vmm_sanity_check(struct kvm_vcpu *vcpu) { struct exit_ctl_data *p = &vcpu->arch.exit_data; if (!vmm_sanity && p->exit_reason != EXIT_REASON_DEBUG) { panic_vm(vcpu, "Failed to do vmm sanity check," "it maybe caused by crashed vmm!!\n\n"); } } static void kvm_do_resume_op(struct kvm_vcpu *vcpu) { vmm_sanity_check(vcpu); /*Guarantee vcpu running on healthy vmm!*/ if (test_and_clear_bit(KVM_REQ_RESUME, &vcpu->requests)) { vcpu_do_resume(vcpu); return; } if (unlikely(test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))) { thash_purge_all(vcpu); return; } if (test_and_clear_bit(KVM_REQ_PTC_G, &vcpu->requests)) { while (vcpu->arch.ptc_g_count > 0) ptc_ga_remote_func(vcpu, --vcpu->arch.ptc_g_count); } } void vmm_transition(struct kvm_vcpu *vcpu) { ia64_call_vsa(PAL_VPS_SAVE, (unsigned long)vcpu->arch.vpd, 1, 0, 0, 0, 0, 0); vmm_trampoline(&vcpu->arch.guest, &vcpu->arch.host); ia64_call_vsa(PAL_VPS_RESTORE, (unsigned long)vcpu->arch.vpd, 1, 0, 0, 0, 0, 0); kvm_do_resume_op(vcpu); } void vmm_panic_handler(u64 vec) { struct kvm_vcpu *vcpu = current_vcpu; vmm_sanity = 0; panic_vm(vcpu, "Unexpected interruption occurs in VMM, vector:0x%lx\n", vec2off[vec]); }
gpl-2.0
munjeni/kernel_htc_golfu
arch/sparc/kernel/starfire.c
10912
2682
/* * starfire.c: Starfire/E10000 support. * * Copyright (C) 1998 David S. Miller (davem@redhat.com) * Copyright (C) 2000 Anton Blanchard (anton@samba.org) */ #include <linux/kernel.h> #include <linux/slab.h> #include <asm/page.h> #include <asm/oplib.h> #include <asm/smp.h> #include <asm/upa.h> #include <asm/starfire.h> /* * A few places around the kernel check this to see if * they need to call us to do things in a Starfire specific * way. */ int this_is_starfire = 0; void check_if_starfire(void) { phandle ssnode = prom_finddevice("/ssp-serial"); if (ssnode != 0 && (s32)ssnode != -1) this_is_starfire = 1; } int starfire_hard_smp_processor_id(void) { return upa_readl(0x1fff40000d0UL); } /* * Each Starfire board has 32 registers which perform translation * and delivery of traditional interrupt packets into the extended * Starfire hardware format. Essentially UPAID's now have 2 more * bits than in all previous Sun5 systems. */ struct starfire_irqinfo { unsigned long imap_slots[32]; unsigned long tregs[32]; struct starfire_irqinfo *next; int upaid, hwmid; }; static struct starfire_irqinfo *sflist = NULL; /* Beam me up Scott(McNeil)y... */ void starfire_hookup(int upaid) { struct starfire_irqinfo *p; unsigned long treg_base, hwmid, i; p = kmalloc(sizeof(*p), GFP_KERNEL); if (!p) { prom_printf("starfire_hookup: No memory, this is insane.\n"); prom_halt(); } treg_base = 0x100fc000000UL; hwmid = ((upaid & 0x3c) << 1) | ((upaid & 0x40) >> 4) | (upaid & 0x3); p->hwmid = hwmid; treg_base += (hwmid << 33UL); treg_base += 0x200UL; for (i = 0; i < 32; i++) { p->imap_slots[i] = 0UL; p->tregs[i] = treg_base + (i * 0x10UL); /* Lets play it safe and not overwrite existing mappings */ if (upa_readl(p->tregs[i]) != 0) p->imap_slots[i] = 0xdeadbeaf; } p->upaid = upaid; p->next = sflist; sflist = p; } unsigned int starfire_translate(unsigned long imap, unsigned int upaid) { struct starfire_irqinfo *p; unsigned int bus_hwmid; unsigned int i; bus_hwmid = (((unsigned long)imap) >> 33) & 0x7f; for (p = sflist; p != NULL; p = p->next) if (p->hwmid == bus_hwmid) break; if (p == NULL) { prom_printf("XFIRE: Cannot find irqinfo for imap %016lx\n", ((unsigned long)imap)); prom_halt(); } for (i = 0; i < 32; i++) { if (p->imap_slots[i] == imap || p->imap_slots[i] == 0UL) break; } if (i == 32) { printk("starfire_translate: Are you kidding me?\n"); panic("Lucy in the sky...."); } p->imap_slots[i] = imap; /* map to real upaid */ upaid = (((upaid & 0x3c) << 1) | ((upaid & 0x40) >> 4) | (upaid & 0x3)); upa_writel(upaid, p->tregs[i]); return i; }
gpl-2.0
Fusion-Devices/android_kernel_samsung_jf
fs/fscache/netfs.c
11680
2660
/* FS-Cache netfs (client) registration * * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #define FSCACHE_DEBUG_LEVEL COOKIE #include <linux/module.h> #include <linux/slab.h> #include "internal.h" static LIST_HEAD(fscache_netfs_list); /* * register a network filesystem for caching */ int __fscache_register_netfs(struct fscache_netfs *netfs) { struct fscache_netfs *ptr; int ret; _enter("{%s}", netfs->name); INIT_LIST_HEAD(&netfs->link); /* allocate a cookie for the primary index */ netfs->primary_index = kmem_cache_zalloc(fscache_cookie_jar, GFP_KERNEL); if (!netfs->primary_index) { _leave(" = -ENOMEM"); return -ENOMEM; } /* initialise the primary index cookie */ atomic_set(&netfs->primary_index->usage, 1); atomic_set(&netfs->primary_index->n_children, 0); netfs->primary_index->def = &fscache_fsdef_netfs_def; netfs->primary_index->parent = &fscache_fsdef_index; netfs->primary_index->netfs_data = netfs; atomic_inc(&netfs->primary_index->parent->usage); atomic_inc(&netfs->primary_index->parent->n_children); spin_lock_init(&netfs->primary_index->lock); INIT_HLIST_HEAD(&netfs->primary_index->backing_objects); /* check the netfs type is not already present */ down_write(&fscache_addremove_sem); ret = -EEXIST; list_for_each_entry(ptr, &fscache_netfs_list, link) { if (strcmp(ptr->name, netfs->name) == 0) goto already_registered; } list_add(&netfs->link, &fscache_netfs_list); ret = 0; printk(KERN_NOTICE "FS-Cache: Netfs '%s' registered for caching\n", netfs->name); already_registered: up_write(&fscache_addremove_sem); if (ret < 0) { netfs->primary_index->parent = NULL; __fscache_cookie_put(netfs->primary_index); netfs->primary_index = NULL; } _leave(" = %d", ret); return ret; } EXPORT_SYMBOL(__fscache_register_netfs); /* * unregister a network filesystem from the cache * - all cookies must have been released first */ void __fscache_unregister_netfs(struct fscache_netfs *netfs) { _enter("{%s.%u}", netfs->name, netfs->version); down_write(&fscache_addremove_sem); list_del(&netfs->link); fscache_relinquish_cookie(netfs->primary_index, 0); up_write(&fscache_addremove_sem); printk(KERN_NOTICE "FS-Cache: Netfs '%s' unregistered from caching\n", netfs->name); _leave(""); } EXPORT_SYMBOL(__fscache_unregister_netfs);
gpl-2.0
xapp-le/kernel
sound/isa/gus/gus_io.c
14752
17906
/* * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * I/O routines for GF1/InterWave synthesizer chips * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/delay.h> #include <linux/time.h> #include <sound/core.h> #include <sound/gus.h> void snd_gf1_delay(struct snd_gus_card * gus) { int i; for (i = 0; i < 6; i++) { mb(); inb(GUSP(gus, DRAM)); } } /* * ======================================================================= */ /* * ok.. stop of control registers (wave & ramp) need some special things.. * big UltraClick (tm) elimination... */ static inline void __snd_gf1_ctrl_stop(struct snd_gus_card * gus, unsigned char reg) { unsigned char value; outb(reg | 0x80, gus->gf1.reg_regsel); mb(); value = inb(gus->gf1.reg_data8); mb(); outb(reg, gus->gf1.reg_regsel); mb(); outb((value | 0x03) & ~(0x80 | 0x20), gus->gf1.reg_data8); mb(); } static inline void __snd_gf1_write8(struct snd_gus_card * gus, unsigned char reg, unsigned char data) { outb(reg, gus->gf1.reg_regsel); mb(); outb(data, gus->gf1.reg_data8); mb(); } static inline unsigned char __snd_gf1_look8(struct snd_gus_card * gus, unsigned char reg) { outb(reg, gus->gf1.reg_regsel); mb(); return inb(gus->gf1.reg_data8); } static inline void __snd_gf1_write16(struct snd_gus_card * gus, unsigned char reg, unsigned int data) { outb(reg, gus->gf1.reg_regsel); mb(); outw((unsigned short) data, gus->gf1.reg_data16); mb(); } static inline unsigned short __snd_gf1_look16(struct snd_gus_card * gus, unsigned char reg) { outb(reg, gus->gf1.reg_regsel); mb(); return inw(gus->gf1.reg_data16); } static inline void __snd_gf1_adlib_write(struct snd_gus_card * gus, unsigned char reg, unsigned char data) { outb(reg, gus->gf1.reg_timerctrl); inb(gus->gf1.reg_timerctrl); inb(gus->gf1.reg_timerctrl); outb(data, gus->gf1.reg_timerdata); inb(gus->gf1.reg_timerctrl); inb(gus->gf1.reg_timerctrl); } static inline void __snd_gf1_write_addr(struct snd_gus_card * gus, unsigned char reg, unsigned int addr, int w_16bit) { if (gus->gf1.enh_mode) { if (w_16bit) addr = ((addr >> 1) & ~0x0000000f) | (addr & 0x0000000f); __snd_gf1_write8(gus, SNDRV_GF1_VB_UPPER_ADDRESS, (unsigned char) ((addr >> 26) & 0x03)); } else if (w_16bit) addr = (addr & 0x00c0000f) | ((addr & 0x003ffff0) >> 1); __snd_gf1_write16(gus, reg, (unsigned short) (addr >> 11)); __snd_gf1_write16(gus, reg + 1, (unsigned short) (addr << 5)); } static inline unsigned int __snd_gf1_read_addr(struct snd_gus_card * gus, unsigned char reg, short w_16bit) { unsigned int res; res = ((unsigned int) __snd_gf1_look16(gus, reg | 0x80) << 11) & 0xfff800; res |= ((unsigned int) __snd_gf1_look16(gus, (reg + 1) | 0x80) >> 5) & 0x0007ff; if (gus->gf1.enh_mode) { res |= (unsigned int) __snd_gf1_look8(gus, SNDRV_GF1_VB_UPPER_ADDRESS | 0x80) << 26; if (w_16bit) res = ((res << 1) & 0xffffffe0) | (res & 0x0000000f); } else if (w_16bit) res = ((res & 0x001ffff0) << 1) | (res & 0x00c0000f); return res; } /* * ======================================================================= */ void snd_gf1_ctrl_stop(struct snd_gus_card * gus, unsigned char reg) { __snd_gf1_ctrl_stop(gus, reg); } void snd_gf1_write8(struct snd_gus_card * gus, unsigned char reg, unsigned char data) { __snd_gf1_write8(gus, reg, data); } unsigned char snd_gf1_look8(struct snd_gus_card * gus, unsigned char reg) { return __snd_gf1_look8(gus, reg); } void snd_gf1_write16(struct snd_gus_card * gus, unsigned char reg, unsigned int data) { __snd_gf1_write16(gus, reg, data); } unsigned short snd_gf1_look16(struct snd_gus_card * gus, unsigned char reg) { return __snd_gf1_look16(gus, reg); } void snd_gf1_adlib_write(struct snd_gus_card * gus, unsigned char reg, unsigned char data) { __snd_gf1_adlib_write(gus, reg, data); } void snd_gf1_write_addr(struct snd_gus_card * gus, unsigned char reg, unsigned int addr, short w_16bit) { __snd_gf1_write_addr(gus, reg, addr, w_16bit); } unsigned int snd_gf1_read_addr(struct snd_gus_card * gus, unsigned char reg, short w_16bit) { return __snd_gf1_read_addr(gus, reg, w_16bit); } /* */ void snd_gf1_i_ctrl_stop(struct snd_gus_card * gus, unsigned char reg) { unsigned long flags; spin_lock_irqsave(&gus->reg_lock, flags); __snd_gf1_ctrl_stop(gus, reg); spin_unlock_irqrestore(&gus->reg_lock, flags); } void snd_gf1_i_write8(struct snd_gus_card * gus, unsigned char reg, unsigned char data) { unsigned long flags; spin_lock_irqsave(&gus->reg_lock, flags); __snd_gf1_write8(gus, reg, data); spin_unlock_irqrestore(&gus->reg_lock, flags); } unsigned char snd_gf1_i_look8(struct snd_gus_card * gus, unsigned char reg) { unsigned long flags; unsigned char res; spin_lock_irqsave(&gus->reg_lock, flags); res = __snd_gf1_look8(gus, reg); spin_unlock_irqrestore(&gus->reg_lock, flags); return res; } void snd_gf1_i_write16(struct snd_gus_card * gus, unsigned char reg, unsigned int data) { unsigned long flags; spin_lock_irqsave(&gus->reg_lock, flags); __snd_gf1_write16(gus, reg, data); spin_unlock_irqrestore(&gus->reg_lock, flags); } unsigned short snd_gf1_i_look16(struct snd_gus_card * gus, unsigned char reg) { unsigned long flags; unsigned short res; spin_lock_irqsave(&gus->reg_lock, flags); res = __snd_gf1_look16(gus, reg); spin_unlock_irqrestore(&gus->reg_lock, flags); return res; } #if 0 void snd_gf1_i_adlib_write(struct snd_gus_card * gus, unsigned char reg, unsigned char data) { unsigned long flags; spin_lock_irqsave(&gus->reg_lock, flags); __snd_gf1_adlib_write(gus, reg, data); spin_unlock_irqrestore(&gus->reg_lock, flags); } void snd_gf1_i_write_addr(struct snd_gus_card * gus, unsigned char reg, unsigned int addr, short w_16bit) { unsigned long flags; spin_lock_irqsave(&gus->reg_lock, flags); __snd_gf1_write_addr(gus, reg, addr, w_16bit); spin_unlock_irqrestore(&gus->reg_lock, flags); } #endif /* 0 */ #ifdef CONFIG_SND_DEBUG static unsigned int snd_gf1_i_read_addr(struct snd_gus_card * gus, unsigned char reg, short w_16bit) { unsigned int res; unsigned long flags; spin_lock_irqsave(&gus->reg_lock, flags); res = __snd_gf1_read_addr(gus, reg, w_16bit); spin_unlock_irqrestore(&gus->reg_lock, flags); return res; } #endif /* */ void snd_gf1_dram_addr(struct snd_gus_card * gus, unsigned int addr) { outb(0x43, gus->gf1.reg_regsel); mb(); outw((unsigned short) addr, gus->gf1.reg_data16); mb(); outb(0x44, gus->gf1.reg_regsel); mb(); outb((unsigned char) (addr >> 16), gus->gf1.reg_data8); mb(); } void snd_gf1_poke(struct snd_gus_card * gus, unsigned int addr, unsigned char data) { unsigned long flags; spin_lock_irqsave(&gus->reg_lock, flags); outb(SNDRV_GF1_GW_DRAM_IO_LOW, gus->gf1.reg_regsel); mb(); outw((unsigned short) addr, gus->gf1.reg_data16); mb(); outb(SNDRV_GF1_GB_DRAM_IO_HIGH, gus->gf1.reg_regsel); mb(); outb((unsigned char) (addr >> 16), gus->gf1.reg_data8); mb(); outb(data, gus->gf1.reg_dram); spin_unlock_irqrestore(&gus->reg_lock, flags); } unsigned char snd_gf1_peek(struct snd_gus_card * gus, unsigned int addr) { unsigned long flags; unsigned char res; spin_lock_irqsave(&gus->reg_lock, flags); outb(SNDRV_GF1_GW_DRAM_IO_LOW, gus->gf1.reg_regsel); mb(); outw((unsigned short) addr, gus->gf1.reg_data16); mb(); outb(SNDRV_GF1_GB_DRAM_IO_HIGH, gus->gf1.reg_regsel); mb(); outb((unsigned char) (addr >> 16), gus->gf1.reg_data8); mb(); res = inb(gus->gf1.reg_dram); spin_unlock_irqrestore(&gus->reg_lock, flags); return res; } #if 0 void snd_gf1_pokew(struct snd_gus_card * gus, unsigned int addr, unsigned short data) { unsigned long flags; #ifdef CONFIG_SND_DEBUG if (!gus->interwave) snd_printk(KERN_DEBUG "snd_gf1_pokew - GF1!!!\n"); #endif spin_lock_irqsave(&gus->reg_lock, flags); outb(SNDRV_GF1_GW_DRAM_IO_LOW, gus->gf1.reg_regsel); mb(); outw((unsigned short) addr, gus->gf1.reg_data16); mb(); outb(SNDRV_GF1_GB_DRAM_IO_HIGH, gus->gf1.reg_regsel); mb(); outb((unsigned char) (addr >> 16), gus->gf1.reg_data8); mb(); outb(SNDRV_GF1_GW_DRAM_IO16, gus->gf1.reg_regsel); mb(); outw(data, gus->gf1.reg_data16); spin_unlock_irqrestore(&gus->reg_lock, flags); } unsigned short snd_gf1_peekw(struct snd_gus_card * gus, unsigned int addr) { unsigned long flags; unsigned short res; #ifdef CONFIG_SND_DEBUG if (!gus->interwave) snd_printk(KERN_DEBUG "snd_gf1_peekw - GF1!!!\n"); #endif spin_lock_irqsave(&gus->reg_lock, flags); outb(SNDRV_GF1_GW_DRAM_IO_LOW, gus->gf1.reg_regsel); mb(); outw((unsigned short) addr, gus->gf1.reg_data16); mb(); outb(SNDRV_GF1_GB_DRAM_IO_HIGH, gus->gf1.reg_regsel); mb(); outb((unsigned char) (addr >> 16), gus->gf1.reg_data8); mb(); outb(SNDRV_GF1_GW_DRAM_IO16, gus->gf1.reg_regsel); mb(); res = inw(gus->gf1.reg_data16); spin_unlock_irqrestore(&gus->reg_lock, flags); return res; } void snd_gf1_dram_setmem(struct snd_gus_card * gus, unsigned int addr, unsigned short value, unsigned int count) { unsigned long port; unsigned long flags; #ifdef CONFIG_SND_DEBUG if (!gus->interwave) snd_printk(KERN_DEBUG "snd_gf1_dram_setmem - GF1!!!\n"); #endif addr &= ~1; count >>= 1; port = GUSP(gus, GF1DATALOW); spin_lock_irqsave(&gus->reg_lock, flags); outb(SNDRV_GF1_GW_DRAM_IO_LOW, gus->gf1.reg_regsel); mb(); outw((unsigned short) addr, gus->gf1.reg_data16); mb(); outb(SNDRV_GF1_GB_DRAM_IO_HIGH, gus->gf1.reg_regsel); mb(); outb((unsigned char) (addr >> 16), gus->gf1.reg_data8); mb(); outb(SNDRV_GF1_GW_DRAM_IO16, gus->gf1.reg_regsel); while (count--) outw(value, port); spin_unlock_irqrestore(&gus->reg_lock, flags); } #endif /* 0 */ void snd_gf1_select_active_voices(struct snd_gus_card * gus) { unsigned short voices; static unsigned short voices_tbl[32 - 14 + 1] = { 44100, 41160, 38587, 36317, 34300, 32494, 30870, 29400, 28063, 26843, 25725, 24696, 23746, 22866, 22050, 21289, 20580, 19916, 19293 }; voices = gus->gf1.active_voices; if (voices > 32) voices = 32; if (voices < 14) voices = 14; if (gus->gf1.enh_mode) voices = 32; gus->gf1.active_voices = voices; gus->gf1.playback_freq = gus->gf1.enh_mode ? 44100 : voices_tbl[voices - 14]; if (!gus->gf1.enh_mode) { snd_gf1_i_write8(gus, SNDRV_GF1_GB_ACTIVE_VOICES, 0xc0 | (voices - 1)); udelay(100); } } #ifdef CONFIG_SND_DEBUG void snd_gf1_print_voice_registers(struct snd_gus_card * gus) { unsigned char mode; int voice, ctrl; voice = gus->gf1.active_voice; printk(KERN_INFO " -%i- GF1 voice ctrl, ramp ctrl = 0x%x, 0x%x\n", voice, ctrl = snd_gf1_i_read8(gus, 0), snd_gf1_i_read8(gus, 0x0d)); printk(KERN_INFO " -%i- GF1 frequency = 0x%x\n", voice, snd_gf1_i_read16(gus, 1)); printk(KERN_INFO " -%i- GF1 loop start, end = 0x%x (0x%x), 0x%x (0x%x)\n", voice, snd_gf1_i_read_addr(gus, 2, ctrl & 4), snd_gf1_i_read_addr(gus, 2, (ctrl & 4) ^ 4), snd_gf1_i_read_addr(gus, 4, ctrl & 4), snd_gf1_i_read_addr(gus, 4, (ctrl & 4) ^ 4)); printk(KERN_INFO " -%i- GF1 ramp start, end, rate = 0x%x, 0x%x, 0x%x\n", voice, snd_gf1_i_read8(gus, 7), snd_gf1_i_read8(gus, 8), snd_gf1_i_read8(gus, 6)); printk(KERN_INFO" -%i- GF1 volume = 0x%x\n", voice, snd_gf1_i_read16(gus, 9)); printk(KERN_INFO " -%i- GF1 position = 0x%x (0x%x)\n", voice, snd_gf1_i_read_addr(gus, 0x0a, ctrl & 4), snd_gf1_i_read_addr(gus, 0x0a, (ctrl & 4) ^ 4)); if (gus->interwave && snd_gf1_i_read8(gus, 0x19) & 0x01) { /* enhanced mode */ mode = snd_gf1_i_read8(gus, 0x15); printk(KERN_INFO " -%i- GFA1 mode = 0x%x\n", voice, mode); if (mode & 0x01) { /* Effect processor */ printk(KERN_INFO " -%i- GFA1 effect address = 0x%x\n", voice, snd_gf1_i_read_addr(gus, 0x11, ctrl & 4)); printk(KERN_INFO " -%i- GFA1 effect volume = 0x%x\n", voice, snd_gf1_i_read16(gus, 0x16)); printk(KERN_INFO " -%i- GFA1 effect volume final = 0x%x\n", voice, snd_gf1_i_read16(gus, 0x1d)); printk(KERN_INFO " -%i- GFA1 effect acumulator = 0x%x\n", voice, snd_gf1_i_read8(gus, 0x14)); } if (mode & 0x20) { printk(KERN_INFO " -%i- GFA1 left offset = 0x%x (%i)\n", voice, snd_gf1_i_read16(gus, 0x13), snd_gf1_i_read16(gus, 0x13) >> 4); printk(KERN_INFO " -%i- GFA1 left offset final = 0x%x (%i)\n", voice, snd_gf1_i_read16(gus, 0x1c), snd_gf1_i_read16(gus, 0x1c) >> 4); printk(KERN_INFO " -%i- GFA1 right offset = 0x%x (%i)\n", voice, snd_gf1_i_read16(gus, 0x0c), snd_gf1_i_read16(gus, 0x0c) >> 4); printk(KERN_INFO " -%i- GFA1 right offset final = 0x%x (%i)\n", voice, snd_gf1_i_read16(gus, 0x1b), snd_gf1_i_read16(gus, 0x1b) >> 4); } else printk(KERN_INFO " -%i- GF1 pan = 0x%x\n", voice, snd_gf1_i_read8(gus, 0x0c)); } else printk(KERN_INFO " -%i- GF1 pan = 0x%x\n", voice, snd_gf1_i_read8(gus, 0x0c)); } #if 0 void snd_gf1_print_global_registers(struct snd_gus_card * gus) { unsigned char global_mode = 0x00; printk(KERN_INFO " -G- GF1 active voices = 0x%x\n", snd_gf1_i_look8(gus, SNDRV_GF1_GB_ACTIVE_VOICES)); if (gus->interwave) { global_mode = snd_gf1_i_read8(gus, SNDRV_GF1_GB_GLOBAL_MODE); printk(KERN_INFO " -G- GF1 global mode = 0x%x\n", global_mode); } if (global_mode & 0x02) /* LFO enabled? */ printk(KERN_INFO " -G- GF1 LFO base = 0x%x\n", snd_gf1_i_look16(gus, SNDRV_GF1_GW_LFO_BASE)); printk(KERN_INFO " -G- GF1 voices IRQ read = 0x%x\n", snd_gf1_i_look8(gus, SNDRV_GF1_GB_VOICES_IRQ_READ)); printk(KERN_INFO " -G- GF1 DRAM DMA control = 0x%x\n", snd_gf1_i_look8(gus, SNDRV_GF1_GB_DRAM_DMA_CONTROL)); printk(KERN_INFO " -G- GF1 DRAM DMA high/low = 0x%x/0x%x\n", snd_gf1_i_look8(gus, SNDRV_GF1_GB_DRAM_DMA_HIGH), snd_gf1_i_read16(gus, SNDRV_GF1_GW_DRAM_DMA_LOW)); printk(KERN_INFO " -G- GF1 DRAM IO high/low = 0x%x/0x%x\n", snd_gf1_i_look8(gus, SNDRV_GF1_GB_DRAM_IO_HIGH), snd_gf1_i_read16(gus, SNDRV_GF1_GW_DRAM_IO_LOW)); if (!gus->interwave) printk(KERN_INFO " -G- GF1 record DMA control = 0x%x\n", snd_gf1_i_look8(gus, SNDRV_GF1_GB_REC_DMA_CONTROL)); printk(KERN_INFO " -G- GF1 DRAM IO 16 = 0x%x\n", snd_gf1_i_look16(gus, SNDRV_GF1_GW_DRAM_IO16)); if (gus->gf1.enh_mode) { printk(KERN_INFO " -G- GFA1 memory config = 0x%x\n", snd_gf1_i_look16(gus, SNDRV_GF1_GW_MEMORY_CONFIG)); printk(KERN_INFO " -G- GFA1 memory control = 0x%x\n", snd_gf1_i_look8(gus, SNDRV_GF1_GB_MEMORY_CONTROL)); printk(KERN_INFO " -G- GFA1 FIFO record base = 0x%x\n", snd_gf1_i_look16(gus, SNDRV_GF1_GW_FIFO_RECORD_BASE_ADDR)); printk(KERN_INFO " -G- GFA1 FIFO playback base = 0x%x\n", snd_gf1_i_look16(gus, SNDRV_GF1_GW_FIFO_PLAY_BASE_ADDR)); printk(KERN_INFO " -G- GFA1 interleave control = 0x%x\n", snd_gf1_i_look16(gus, SNDRV_GF1_GW_INTERLEAVE)); } } void snd_gf1_print_setup_registers(struct snd_gus_card * gus) { printk(KERN_INFO " -S- mix control = 0x%x\n", inb(GUSP(gus, MIXCNTRLREG))); printk(KERN_INFO " -S- IRQ status = 0x%x\n", inb(GUSP(gus, IRQSTAT))); printk(KERN_INFO " -S- timer control = 0x%x\n", inb(GUSP(gus, TIMERCNTRL))); printk(KERN_INFO " -S- timer data = 0x%x\n", inb(GUSP(gus, TIMERDATA))); printk(KERN_INFO " -S- status read = 0x%x\n", inb(GUSP(gus, REGCNTRLS))); printk(KERN_INFO " -S- Sound Blaster control = 0x%x\n", snd_gf1_i_look8(gus, SNDRV_GF1_GB_SOUND_BLASTER_CONTROL)); printk(KERN_INFO " -S- AdLib timer 1/2 = 0x%x/0x%x\n", snd_gf1_i_look8(gus, SNDRV_GF1_GB_ADLIB_TIMER_1), snd_gf1_i_look8(gus, SNDRV_GF1_GB_ADLIB_TIMER_2)); printk(KERN_INFO " -S- reset = 0x%x\n", snd_gf1_i_look8(gus, SNDRV_GF1_GB_RESET)); if (gus->interwave) { printk(KERN_INFO " -S- compatibility = 0x%x\n", snd_gf1_i_look8(gus, SNDRV_GF1_GB_COMPATIBILITY)); printk(KERN_INFO " -S- decode control = 0x%x\n", snd_gf1_i_look8(gus, SNDRV_GF1_GB_DECODE_CONTROL)); printk(KERN_INFO " -S- version number = 0x%x\n", snd_gf1_i_look8(gus, SNDRV_GF1_GB_VERSION_NUMBER)); printk(KERN_INFO " -S- MPU-401 emul. control A/B = 0x%x/0x%x\n", snd_gf1_i_look8(gus, SNDRV_GF1_GB_MPU401_CONTROL_A), snd_gf1_i_look8(gus, SNDRV_GF1_GB_MPU401_CONTROL_B)); printk(KERN_INFO " -S- emulation IRQ = 0x%x\n", snd_gf1_i_look8(gus, SNDRV_GF1_GB_EMULATION_IRQ)); } } void snd_gf1_peek_print_block(struct snd_gus_card * gus, unsigned int addr, int count, int w_16bit) { if (!w_16bit) { while (count-- > 0) printk(count > 0 ? "%02x:" : "%02x", snd_gf1_peek(gus, addr++)); } else { while (count-- > 0) { printk(count > 0 ? "%04x:" : "%04x", snd_gf1_peek(gus, addr) | (snd_gf1_peek(gus, addr + 1) << 8)); addr += 2; } } } #endif /* 0 */ #endif
gpl-2.0
julian-klode/linux
drivers/base/firmware_class.c
161
40811
/* * firmware_class.c - Multi purpose firmware loading support * * Copyright (c) 2003 Manuel Estrada Sainz * * Please see Documentation/firmware_class/ for more information. * */ #include <linux/capability.h> #include <linux/device.h> #include <linux/module.h> #include <linux/init.h> #include <linux/timer.h> #include <linux/vmalloc.h> #include <linux/interrupt.h> #include <linux/bitops.h> #include <linux/mutex.h> #include <linux/workqueue.h> #include <linux/highmem.h> #include <linux/firmware.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/file.h> #include <linux/list.h> #include <linux/async.h> #include <linux/pm.h> #include <linux/suspend.h> #include <linux/syscore_ops.h> #include <linux/reboot.h> #include <linux/security.h> #include <generated/utsrelease.h> #include "base.h" MODULE_AUTHOR("Manuel Estrada Sainz"); MODULE_DESCRIPTION("Multi purpose firmware loading support"); MODULE_LICENSE("GPL"); /* Builtin firmware support */ #ifdef CONFIG_FW_LOADER extern struct builtin_fw __start_builtin_fw[]; extern struct builtin_fw __end_builtin_fw[]; static bool fw_get_builtin_firmware(struct firmware *fw, const char *name) { struct builtin_fw *b_fw; for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) { if (strcmp(name, b_fw->name) == 0) { fw->size = b_fw->size; fw->data = b_fw->data; return true; } } return false; } static bool fw_is_builtin_firmware(const struct firmware *fw) { struct builtin_fw *b_fw; for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) if (fw->data == b_fw->data) return true; return false; } #else /* Module case - no builtin firmware support */ static inline bool fw_get_builtin_firmware(struct firmware *fw, const char *name) { return false; } static inline bool fw_is_builtin_firmware(const struct firmware *fw) { return false; } #endif enum { FW_STATUS_LOADING, FW_STATUS_DONE, FW_STATUS_ABORT, }; static int loading_timeout = 60; /* In seconds */ static inline long firmware_loading_timeout(void) { return loading_timeout > 0 ? loading_timeout * HZ : MAX_JIFFY_OFFSET; } /* firmware behavior options */ #define FW_OPT_UEVENT (1U << 0) #define FW_OPT_NOWAIT (1U << 1) #ifdef CONFIG_FW_LOADER_USER_HELPER #define FW_OPT_USERHELPER (1U << 2) #else #define FW_OPT_USERHELPER 0 #endif #ifdef CONFIG_FW_LOADER_USER_HELPER_FALLBACK #define FW_OPT_FALLBACK FW_OPT_USERHELPER #else #define FW_OPT_FALLBACK 0 #endif #define FW_OPT_NO_WARN (1U << 3) struct firmware_cache { /* firmware_buf instance will be added into the below list */ spinlock_t lock; struct list_head head; int state; #ifdef CONFIG_PM_SLEEP /* * Names of firmware images which have been cached successfully * will be added into the below list so that device uncache * helper can trace which firmware images have been cached * before. */ spinlock_t name_lock; struct list_head fw_names; struct delayed_work work; struct notifier_block pm_notify; #endif }; struct firmware_buf { struct kref ref; struct list_head list; struct completion completion; struct firmware_cache *fwc; unsigned long status; void *data; size_t size; #ifdef CONFIG_FW_LOADER_USER_HELPER bool is_paged_buf; bool need_uevent; struct page **pages; int nr_pages; int page_array_size; struct list_head pending_list; #endif const char *fw_id; }; struct fw_cache_entry { struct list_head list; const char *name; }; struct fw_name_devm { unsigned long magic; const char *name; }; #define to_fwbuf(d) container_of(d, struct firmware_buf, ref) #define FW_LOADER_NO_CACHE 0 #define FW_LOADER_START_CACHE 1 static int fw_cache_piggyback_on_request(const char *name); /* fw_lock could be moved to 'struct firmware_priv' but since it is just * guarding for corner cases a global lock should be OK */ static DEFINE_MUTEX(fw_lock); static struct firmware_cache fw_cache; static struct firmware_buf *__allocate_fw_buf(const char *fw_name, struct firmware_cache *fwc) { struct firmware_buf *buf; buf = kzalloc(sizeof(*buf), GFP_ATOMIC); if (!buf) return NULL; buf->fw_id = kstrdup_const(fw_name, GFP_ATOMIC); if (!buf->fw_id) { kfree(buf); return NULL; } kref_init(&buf->ref); buf->fwc = fwc; init_completion(&buf->completion); #ifdef CONFIG_FW_LOADER_USER_HELPER INIT_LIST_HEAD(&buf->pending_list); #endif pr_debug("%s: fw-%s buf=%p\n", __func__, fw_name, buf); return buf; } static struct firmware_buf *__fw_lookup_buf(const char *fw_name) { struct firmware_buf *tmp; struct firmware_cache *fwc = &fw_cache; list_for_each_entry(tmp, &fwc->head, list) if (!strcmp(tmp->fw_id, fw_name)) return tmp; return NULL; } static int fw_lookup_and_allocate_buf(const char *fw_name, struct firmware_cache *fwc, struct firmware_buf **buf) { struct firmware_buf *tmp; spin_lock(&fwc->lock); tmp = __fw_lookup_buf(fw_name); if (tmp) { kref_get(&tmp->ref); spin_unlock(&fwc->lock); *buf = tmp; return 1; } tmp = __allocate_fw_buf(fw_name, fwc); if (tmp) list_add(&tmp->list, &fwc->head); spin_unlock(&fwc->lock); *buf = tmp; return tmp ? 0 : -ENOMEM; } static void __fw_free_buf(struct kref *ref) __releases(&fwc->lock) { struct firmware_buf *buf = to_fwbuf(ref); struct firmware_cache *fwc = buf->fwc; pr_debug("%s: fw-%s buf=%p data=%p size=%u\n", __func__, buf->fw_id, buf, buf->data, (unsigned int)buf->size); list_del(&buf->list); spin_unlock(&fwc->lock); #ifdef CONFIG_FW_LOADER_USER_HELPER if (buf->is_paged_buf) { int i; vunmap(buf->data); for (i = 0; i < buf->nr_pages; i++) __free_page(buf->pages[i]); kfree(buf->pages); } else #endif vfree(buf->data); kfree_const(buf->fw_id); kfree(buf); } static void fw_free_buf(struct firmware_buf *buf) { struct firmware_cache *fwc = buf->fwc; spin_lock(&fwc->lock); if (!kref_put(&buf->ref, __fw_free_buf)) spin_unlock(&fwc->lock); } /* direct firmware loading support */ static char fw_path_para[256]; static const char * const fw_path[] = { fw_path_para, "/lib/firmware/updates/" UTS_RELEASE, "/lib/firmware/updates", "/lib/firmware/" UTS_RELEASE, "/lib/firmware" }; /* * Typical usage is that passing 'firmware_class.path=$CUSTOMIZED_PATH' * from kernel command line because firmware_class is generally built in * kernel instead of module. */ module_param_string(path, fw_path_para, sizeof(fw_path_para), 0644); MODULE_PARM_DESC(path, "customized firmware image search path with a higher priority than default path"); static int fw_read_file_contents(struct file *file, struct firmware_buf *fw_buf) { int size; char *buf; int rc; if (!S_ISREG(file_inode(file)->i_mode)) return -EINVAL; size = i_size_read(file_inode(file)); if (size <= 0) return -EINVAL; buf = vmalloc(size); if (!buf) return -ENOMEM; rc = kernel_read(file, 0, buf, size); if (rc != size) { if (rc > 0) rc = -EIO; goto fail; } rc = security_kernel_fw_from_file(file, buf, size); if (rc) goto fail; fw_buf->data = buf; fw_buf->size = size; return 0; fail: vfree(buf); return rc; } static int fw_get_filesystem_firmware(struct device *device, struct firmware_buf *buf) { int i, len; int rc = -ENOENT; char *path; path = __getname(); if (!path) return -ENOMEM; for (i = 0; i < ARRAY_SIZE(fw_path); i++) { struct file *file; /* skip the unset customized path */ if (!fw_path[i][0]) continue; len = snprintf(path, PATH_MAX, "%s/%s", fw_path[i], buf->fw_id); if (len >= PATH_MAX) { rc = -ENAMETOOLONG; break; } file = filp_open(path, O_RDONLY, 0); if (IS_ERR(file)) continue; rc = fw_read_file_contents(file, buf); fput(file); if (rc) dev_warn(device, "firmware, attempted to load %s, but failed with error %d\n", path, rc); else break; } __putname(path); if (!rc) { dev_dbg(device, "firmware: direct-loading firmware %s\n", buf->fw_id); mutex_lock(&fw_lock); set_bit(FW_STATUS_DONE, &buf->status); complete_all(&buf->completion); mutex_unlock(&fw_lock); } return rc; } /* firmware holds the ownership of pages */ static void firmware_free_data(const struct firmware *fw) { /* Loaded directly? */ if (!fw->priv) { vfree(fw->data); return; } fw_free_buf(fw->priv); } /* store the pages buffer info firmware from buf */ static void fw_set_page_data(struct firmware_buf *buf, struct firmware *fw) { fw->priv = buf; #ifdef CONFIG_FW_LOADER_USER_HELPER fw->pages = buf->pages; #endif fw->size = buf->size; fw->data = buf->data; pr_debug("%s: fw-%s buf=%p data=%p size=%u\n", __func__, buf->fw_id, buf, buf->data, (unsigned int)buf->size); } #ifdef CONFIG_PM_SLEEP static void fw_name_devm_release(struct device *dev, void *res) { struct fw_name_devm *fwn = res; if (fwn->magic == (unsigned long)&fw_cache) pr_debug("%s: fw_name-%s devm-%p released\n", __func__, fwn->name, res); kfree_const(fwn->name); } static int fw_devm_match(struct device *dev, void *res, void *match_data) { struct fw_name_devm *fwn = res; return (fwn->magic == (unsigned long)&fw_cache) && !strcmp(fwn->name, match_data); } static struct fw_name_devm *fw_find_devm_name(struct device *dev, const char *name) { struct fw_name_devm *fwn; fwn = devres_find(dev, fw_name_devm_release, fw_devm_match, (void *)name); return fwn; } /* add firmware name into devres list */ static int fw_add_devm_name(struct device *dev, const char *name) { struct fw_name_devm *fwn; fwn = fw_find_devm_name(dev, name); if (fwn) return 1; fwn = devres_alloc(fw_name_devm_release, sizeof(struct fw_name_devm), GFP_KERNEL); if (!fwn) return -ENOMEM; fwn->name = kstrdup_const(name, GFP_KERNEL); if (!fwn->name) { kfree(fwn); return -ENOMEM; } fwn->magic = (unsigned long)&fw_cache; devres_add(dev, fwn); return 0; } #else static int fw_add_devm_name(struct device *dev, const char *name) { return 0; } #endif /* * user-mode helper code */ #ifdef CONFIG_FW_LOADER_USER_HELPER struct firmware_priv { bool nowait; struct device dev; struct firmware_buf *buf; struct firmware *fw; }; static struct firmware_priv *to_firmware_priv(struct device *dev) { return container_of(dev, struct firmware_priv, dev); } static void __fw_load_abort(struct firmware_buf *buf) { /* * There is a small window in which user can write to 'loading' * between loading done and disappearance of 'loading' */ if (test_bit(FW_STATUS_DONE, &buf->status)) return; list_del_init(&buf->pending_list); set_bit(FW_STATUS_ABORT, &buf->status); complete_all(&buf->completion); } static void fw_load_abort(struct firmware_priv *fw_priv) { struct firmware_buf *buf = fw_priv->buf; __fw_load_abort(buf); /* avoid user action after loading abort */ fw_priv->buf = NULL; } #define is_fw_load_aborted(buf) \ test_bit(FW_STATUS_ABORT, &(buf)->status) static LIST_HEAD(pending_fw_head); /* reboot notifier for avoid deadlock with usermode_lock */ static int fw_shutdown_notify(struct notifier_block *unused1, unsigned long unused2, void *unused3) { mutex_lock(&fw_lock); while (!list_empty(&pending_fw_head)) __fw_load_abort(list_first_entry(&pending_fw_head, struct firmware_buf, pending_list)); mutex_unlock(&fw_lock); return NOTIFY_DONE; } static struct notifier_block fw_shutdown_nb = { .notifier_call = fw_shutdown_notify, }; static ssize_t timeout_show(struct class *class, struct class_attribute *attr, char *buf) { return sprintf(buf, "%d\n", loading_timeout); } /** * firmware_timeout_store - set number of seconds to wait for firmware * @class: device class pointer * @attr: device attribute pointer * @buf: buffer to scan for timeout value * @count: number of bytes in @buf * * Sets the number of seconds to wait for the firmware. Once * this expires an error will be returned to the driver and no * firmware will be provided. * * Note: zero means 'wait forever'. **/ static ssize_t timeout_store(struct class *class, struct class_attribute *attr, const char *buf, size_t count) { loading_timeout = simple_strtol(buf, NULL, 10); if (loading_timeout < 0) loading_timeout = 0; return count; } static struct class_attribute firmware_class_attrs[] = { __ATTR_RW(timeout), __ATTR_NULL }; static void fw_dev_release(struct device *dev) { struct firmware_priv *fw_priv = to_firmware_priv(dev); kfree(fw_priv); } static int do_firmware_uevent(struct firmware_priv *fw_priv, struct kobj_uevent_env *env) { if (add_uevent_var(env, "FIRMWARE=%s", fw_priv->buf->fw_id)) return -ENOMEM; if (add_uevent_var(env, "TIMEOUT=%i", loading_timeout)) return -ENOMEM; if (add_uevent_var(env, "ASYNC=%d", fw_priv->nowait)) return -ENOMEM; return 0; } static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env) { struct firmware_priv *fw_priv = to_firmware_priv(dev); int err = 0; mutex_lock(&fw_lock); if (fw_priv->buf) err = do_firmware_uevent(fw_priv, env); mutex_unlock(&fw_lock); return err; } static struct class firmware_class = { .name = "firmware", .class_attrs = firmware_class_attrs, .dev_uevent = firmware_uevent, .dev_release = fw_dev_release, }; static ssize_t firmware_loading_show(struct device *dev, struct device_attribute *attr, char *buf) { struct firmware_priv *fw_priv = to_firmware_priv(dev); int loading = 0; mutex_lock(&fw_lock); if (fw_priv->buf) loading = test_bit(FW_STATUS_LOADING, &fw_priv->buf->status); mutex_unlock(&fw_lock); return sprintf(buf, "%d\n", loading); } /* Some architectures don't have PAGE_KERNEL_RO */ #ifndef PAGE_KERNEL_RO #define PAGE_KERNEL_RO PAGE_KERNEL #endif /* one pages buffer should be mapped/unmapped only once */ static int fw_map_pages_buf(struct firmware_buf *buf) { if (!buf->is_paged_buf) return 0; vunmap(buf->data); buf->data = vmap(buf->pages, buf->nr_pages, 0, PAGE_KERNEL_RO); if (!buf->data) return -ENOMEM; return 0; } /** * firmware_loading_store - set value in the 'loading' control file * @dev: device pointer * @attr: device attribute pointer * @buf: buffer to scan for loading control value * @count: number of bytes in @buf * * The relevant values are: * * 1: Start a load, discarding any previous partial load. * 0: Conclude the load and hand the data to the driver code. * -1: Conclude the load with an error and discard any written data. **/ static ssize_t firmware_loading_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct firmware_priv *fw_priv = to_firmware_priv(dev); struct firmware_buf *fw_buf; ssize_t written = count; int loading = simple_strtol(buf, NULL, 10); int i; mutex_lock(&fw_lock); fw_buf = fw_priv->buf; if (!fw_buf) goto out; switch (loading) { case 1: /* discarding any previous partial load */ if (!test_bit(FW_STATUS_DONE, &fw_buf->status)) { for (i = 0; i < fw_buf->nr_pages; i++) __free_page(fw_buf->pages[i]); kfree(fw_buf->pages); fw_buf->pages = NULL; fw_buf->page_array_size = 0; fw_buf->nr_pages = 0; set_bit(FW_STATUS_LOADING, &fw_buf->status); } break; case 0: if (test_bit(FW_STATUS_LOADING, &fw_buf->status)) { int rc; set_bit(FW_STATUS_DONE, &fw_buf->status); clear_bit(FW_STATUS_LOADING, &fw_buf->status); /* * Several loading requests may be pending on * one same firmware buf, so let all requests * see the mapped 'buf->data' once the loading * is completed. * */ rc = fw_map_pages_buf(fw_buf); if (rc) dev_err(dev, "%s: map pages failed\n", __func__); else rc = security_kernel_fw_from_file(NULL, fw_buf->data, fw_buf->size); /* * Same logic as fw_load_abort, only the DONE bit * is ignored and we set ABORT only on failure. */ list_del_init(&fw_buf->pending_list); if (rc) { set_bit(FW_STATUS_ABORT, &fw_buf->status); written = rc; } complete_all(&fw_buf->completion); break; } /* fallthrough */ default: dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading); /* fallthrough */ case -1: fw_load_abort(fw_priv); break; } out: mutex_unlock(&fw_lock); return written; } static DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store); static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer, loff_t offset, size_t count) { struct device *dev = kobj_to_dev(kobj); struct firmware_priv *fw_priv = to_firmware_priv(dev); struct firmware_buf *buf; ssize_t ret_count; mutex_lock(&fw_lock); buf = fw_priv->buf; if (!buf || test_bit(FW_STATUS_DONE, &buf->status)) { ret_count = -ENODEV; goto out; } if (offset > buf->size) { ret_count = 0; goto out; } if (count > buf->size - offset) count = buf->size - offset; ret_count = count; while (count) { void *page_data; int page_nr = offset >> PAGE_SHIFT; int page_ofs = offset & (PAGE_SIZE-1); int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count); page_data = kmap(buf->pages[page_nr]); memcpy(buffer, page_data + page_ofs, page_cnt); kunmap(buf->pages[page_nr]); buffer += page_cnt; offset += page_cnt; count -= page_cnt; } out: mutex_unlock(&fw_lock); return ret_count; } static int fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size) { struct firmware_buf *buf = fw_priv->buf; int pages_needed = PAGE_ALIGN(min_size) >> PAGE_SHIFT; /* If the array of pages is too small, grow it... */ if (buf->page_array_size < pages_needed) { int new_array_size = max(pages_needed, buf->page_array_size * 2); struct page **new_pages; new_pages = kmalloc(new_array_size * sizeof(void *), GFP_KERNEL); if (!new_pages) { fw_load_abort(fw_priv); return -ENOMEM; } memcpy(new_pages, buf->pages, buf->page_array_size * sizeof(void *)); memset(&new_pages[buf->page_array_size], 0, sizeof(void *) * (new_array_size - buf->page_array_size)); kfree(buf->pages); buf->pages = new_pages; buf->page_array_size = new_array_size; } while (buf->nr_pages < pages_needed) { buf->pages[buf->nr_pages] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM); if (!buf->pages[buf->nr_pages]) { fw_load_abort(fw_priv); return -ENOMEM; } buf->nr_pages++; } return 0; } /** * firmware_data_write - write method for firmware * @filp: open sysfs file * @kobj: kobject for the device * @bin_attr: bin_attr structure * @buffer: buffer being written * @offset: buffer offset for write in total data store area * @count: buffer size * * Data written to the 'data' attribute will be later handed to * the driver as a firmware image. **/ static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer, loff_t offset, size_t count) { struct device *dev = kobj_to_dev(kobj); struct firmware_priv *fw_priv = to_firmware_priv(dev); struct firmware_buf *buf; ssize_t retval; if (!capable(CAP_SYS_RAWIO)) return -EPERM; mutex_lock(&fw_lock); buf = fw_priv->buf; if (!buf || test_bit(FW_STATUS_DONE, &buf->status)) { retval = -ENODEV; goto out; } retval = fw_realloc_buffer(fw_priv, offset + count); if (retval) goto out; retval = count; while (count) { void *page_data; int page_nr = offset >> PAGE_SHIFT; int page_ofs = offset & (PAGE_SIZE - 1); int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count); page_data = kmap(buf->pages[page_nr]); memcpy(page_data + page_ofs, buffer, page_cnt); kunmap(buf->pages[page_nr]); buffer += page_cnt; offset += page_cnt; count -= page_cnt; } buf->size = max_t(size_t, offset, buf->size); out: mutex_unlock(&fw_lock); return retval; } static struct bin_attribute firmware_attr_data = { .attr = { .name = "data", .mode = 0644 }, .size = 0, .read = firmware_data_read, .write = firmware_data_write, }; static struct attribute *fw_dev_attrs[] = { &dev_attr_loading.attr, NULL }; static struct bin_attribute *fw_dev_bin_attrs[] = { &firmware_attr_data, NULL }; static const struct attribute_group fw_dev_attr_group = { .attrs = fw_dev_attrs, .bin_attrs = fw_dev_bin_attrs, }; static const struct attribute_group *fw_dev_attr_groups[] = { &fw_dev_attr_group, NULL }; static struct firmware_priv * fw_create_instance(struct firmware *firmware, const char *fw_name, struct device *device, unsigned int opt_flags) { struct firmware_priv *fw_priv; struct device *f_dev; fw_priv = kzalloc(sizeof(*fw_priv), GFP_KERNEL); if (!fw_priv) { fw_priv = ERR_PTR(-ENOMEM); goto exit; } fw_priv->nowait = !!(opt_flags & FW_OPT_NOWAIT); fw_priv->fw = firmware; f_dev = &fw_priv->dev; device_initialize(f_dev); dev_set_name(f_dev, "%s", fw_name); f_dev->parent = device; f_dev->class = &firmware_class; f_dev->groups = fw_dev_attr_groups; exit: return fw_priv; } /* load a firmware via user helper */ static int _request_firmware_load(struct firmware_priv *fw_priv, unsigned int opt_flags, long timeout) { int retval = 0; struct device *f_dev = &fw_priv->dev; struct firmware_buf *buf = fw_priv->buf; /* fall back on userspace loading */ buf->is_paged_buf = true; dev_set_uevent_suppress(f_dev, true); retval = device_add(f_dev); if (retval) { dev_err(f_dev, "%s: device_register failed\n", __func__); goto err_put_dev; } mutex_lock(&fw_lock); list_add(&buf->pending_list, &pending_fw_head); mutex_unlock(&fw_lock); if (opt_flags & FW_OPT_UEVENT) { buf->need_uevent = true; dev_set_uevent_suppress(f_dev, false); dev_dbg(f_dev, "firmware: requesting %s\n", buf->fw_id); kobject_uevent(&fw_priv->dev.kobj, KOBJ_ADD); } else { timeout = MAX_JIFFY_OFFSET; } retval = wait_for_completion_interruptible_timeout(&buf->completion, timeout); if (retval == -ERESTARTSYS || !retval) { mutex_lock(&fw_lock); fw_load_abort(fw_priv); mutex_unlock(&fw_lock); } else if (retval > 0) { retval = 0; } if (is_fw_load_aborted(buf)) retval = -EAGAIN; else if (!buf->data) retval = -ENOMEM; device_del(f_dev); err_put_dev: put_device(f_dev); return retval; } static int fw_load_from_user_helper(struct firmware *firmware, const char *name, struct device *device, unsigned int opt_flags, long timeout) { struct firmware_priv *fw_priv; fw_priv = fw_create_instance(firmware, name, device, opt_flags); if (IS_ERR(fw_priv)) return PTR_ERR(fw_priv); fw_priv->buf = firmware->priv; return _request_firmware_load(fw_priv, opt_flags, timeout); } #ifdef CONFIG_PM_SLEEP /* kill pending requests without uevent to avoid blocking suspend */ static void kill_requests_without_uevent(void) { struct firmware_buf *buf; struct firmware_buf *next; mutex_lock(&fw_lock); list_for_each_entry_safe(buf, next, &pending_fw_head, pending_list) { if (!buf->need_uevent) __fw_load_abort(buf); } mutex_unlock(&fw_lock); } #endif #else /* CONFIG_FW_LOADER_USER_HELPER */ static inline int fw_load_from_user_helper(struct firmware *firmware, const char *name, struct device *device, unsigned int opt_flags, long timeout) { return -ENOENT; } /* No abort during direct loading */ #define is_fw_load_aborted(buf) false #ifdef CONFIG_PM_SLEEP static inline void kill_requests_without_uevent(void) { } #endif #endif /* CONFIG_FW_LOADER_USER_HELPER */ /* wait until the shared firmware_buf becomes ready (or error) */ static int sync_cached_firmware_buf(struct firmware_buf *buf) { int ret = 0; mutex_lock(&fw_lock); while (!test_bit(FW_STATUS_DONE, &buf->status)) { if (is_fw_load_aborted(buf)) { ret = -ENOENT; break; } mutex_unlock(&fw_lock); ret = wait_for_completion_interruptible(&buf->completion); mutex_lock(&fw_lock); } mutex_unlock(&fw_lock); return ret; } /* prepare firmware and firmware_buf structs; * return 0 if a firmware is already assigned, 1 if need to load one, * or a negative error code */ static int _request_firmware_prepare(struct firmware **firmware_p, const char *name, struct device *device) { struct firmware *firmware; struct firmware_buf *buf; int ret; *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL); if (!firmware) { dev_err(device, "%s: kmalloc(struct firmware) failed\n", __func__); return -ENOMEM; } if (fw_get_builtin_firmware(firmware, name)) { dev_dbg(device, "firmware: using built-in firmware %s\n", name); return 0; /* assigned */ } ret = fw_lookup_and_allocate_buf(name, &fw_cache, &buf); /* * bind with 'buf' now to avoid warning in failure path * of requesting firmware. */ firmware->priv = buf; if (ret > 0) { ret = sync_cached_firmware_buf(buf); if (!ret) { fw_set_page_data(buf, firmware); return 0; /* assigned */ } } if (ret < 0) return ret; return 1; /* need to load */ } static int assign_firmware_buf(struct firmware *fw, struct device *device, unsigned int opt_flags) { struct firmware_buf *buf = fw->priv; mutex_lock(&fw_lock); if (!buf->size || is_fw_load_aborted(buf)) { mutex_unlock(&fw_lock); return -ENOENT; } /* * add firmware name into devres list so that we can auto cache * and uncache firmware for device. * * device may has been deleted already, but the problem * should be fixed in devres or driver core. */ /* don't cache firmware handled without uevent */ if (device && (opt_flags & FW_OPT_UEVENT)) fw_add_devm_name(device, buf->fw_id); /* * After caching firmware image is started, let it piggyback * on request firmware. */ if (buf->fwc->state == FW_LOADER_START_CACHE) { if (fw_cache_piggyback_on_request(buf->fw_id)) kref_get(&buf->ref); } /* pass the pages buffer to driver at the last minute */ fw_set_page_data(buf, fw); mutex_unlock(&fw_lock); return 0; } /* called from request_firmware() and request_firmware_work_func() */ static int _request_firmware(const struct firmware **firmware_p, const char *name, struct device *device, unsigned int opt_flags) { struct firmware *fw; long timeout; int ret; if (!firmware_p) return -EINVAL; if (!name || name[0] == '\0') return -EINVAL; ret = _request_firmware_prepare(&fw, name, device); if (ret <= 0) /* error or already assigned */ goto out; ret = 0; timeout = firmware_loading_timeout(); if (opt_flags & FW_OPT_NOWAIT) { timeout = usermodehelper_read_lock_wait(timeout); if (!timeout) { dev_dbg(device, "firmware: %s loading timed out\n", name); ret = -EBUSY; goto out; } } else { ret = usermodehelper_read_trylock(); if (WARN_ON(ret)) { dev_err(device, "firmware: %s will not be loaded\n", name); goto out; } } ret = fw_get_filesystem_firmware(device, fw->priv); if (ret) { if (!(opt_flags & FW_OPT_NO_WARN)) dev_warn(device, "Direct firmware load for %s failed with error %d\n", name, ret); if (opt_flags & FW_OPT_USERHELPER) { dev_warn(device, "Falling back to user helper\n"); ret = fw_load_from_user_helper(fw, name, device, opt_flags, timeout); } } if (!ret) ret = assign_firmware_buf(fw, device, opt_flags); usermodehelper_read_unlock(); out: if (ret < 0) { release_firmware(fw); fw = NULL; } *firmware_p = fw; return ret; } /** * request_firmware: - send firmware request and wait for it * @firmware_p: pointer to firmware image * @name: name of firmware file * @device: device for which firmware is being loaded * * @firmware_p will be used to return a firmware image by the name * of @name for device @device. * * Should be called from user context where sleeping is allowed. * * @name will be used as $FIRMWARE in the uevent environment and * should be distinctive enough not to be confused with any other * firmware image for this or any other device. * * Caller must hold the reference count of @device. * * The function can be called safely inside device's suspend and * resume callback. **/ int request_firmware(const struct firmware **firmware_p, const char *name, struct device *device) { int ret; /* Need to pin this module until return */ __module_get(THIS_MODULE); ret = _request_firmware(firmware_p, name, device, FW_OPT_UEVENT | FW_OPT_FALLBACK); module_put(THIS_MODULE); return ret; } EXPORT_SYMBOL(request_firmware); /** * request_firmware_direct: - load firmware directly without usermode helper * @firmware_p: pointer to firmware image * @name: name of firmware file * @device: device for which firmware is being loaded * * This function works pretty much like request_firmware(), but this doesn't * fall back to usermode helper even if the firmware couldn't be loaded * directly from fs. Hence it's useful for loading optional firmwares, which * aren't always present, without extra long timeouts of udev. **/ int request_firmware_direct(const struct firmware **firmware_p, const char *name, struct device *device) { int ret; __module_get(THIS_MODULE); ret = _request_firmware(firmware_p, name, device, FW_OPT_UEVENT | FW_OPT_NO_WARN); module_put(THIS_MODULE); return ret; } EXPORT_SYMBOL_GPL(request_firmware_direct); /** * release_firmware: - release the resource associated with a firmware image * @fw: firmware resource to release **/ void release_firmware(const struct firmware *fw) { if (fw) { if (!fw_is_builtin_firmware(fw)) firmware_free_data(fw); kfree(fw); } } EXPORT_SYMBOL(release_firmware); /* Async support */ struct firmware_work { struct work_struct work; struct module *module; const char *name; struct device *device; void *context; void (*cont)(const struct firmware *fw, void *context); unsigned int opt_flags; }; static void request_firmware_work_func(struct work_struct *work) { struct firmware_work *fw_work; const struct firmware *fw; fw_work = container_of(work, struct firmware_work, work); _request_firmware(&fw, fw_work->name, fw_work->device, fw_work->opt_flags); fw_work->cont(fw, fw_work->context); put_device(fw_work->device); /* taken in request_firmware_nowait() */ module_put(fw_work->module); kfree_const(fw_work->name); kfree(fw_work); } /** * request_firmware_nowait - asynchronous version of request_firmware * @module: module requesting the firmware * @uevent: sends uevent to copy the firmware image if this flag * is non-zero else the firmware copy must be done manually. * @name: name of firmware file * @device: device for which firmware is being loaded * @gfp: allocation flags * @context: will be passed over to @cont, and * @fw may be %NULL if firmware request fails. * @cont: function will be called asynchronously when the firmware * request is over. * * Caller must hold the reference count of @device. * * Asynchronous variant of request_firmware() for user contexts: * - sleep for as small periods as possible since it may * increase kernel boot time of built-in device drivers * requesting firmware in their ->probe() methods, if * @gfp is GFP_KERNEL. * * - can't sleep at all if @gfp is GFP_ATOMIC. **/ int request_firmware_nowait( struct module *module, bool uevent, const char *name, struct device *device, gfp_t gfp, void *context, void (*cont)(const struct firmware *fw, void *context)) { struct firmware_work *fw_work; fw_work = kzalloc(sizeof(struct firmware_work), gfp); if (!fw_work) return -ENOMEM; fw_work->module = module; fw_work->name = kstrdup_const(name, gfp); if (!fw_work->name) { kfree(fw_work); return -ENOMEM; } fw_work->device = device; fw_work->context = context; fw_work->cont = cont; fw_work->opt_flags = FW_OPT_NOWAIT | FW_OPT_FALLBACK | (uevent ? FW_OPT_UEVENT : FW_OPT_USERHELPER); if (!try_module_get(module)) { kfree_const(fw_work->name); kfree(fw_work); return -EFAULT; } get_device(fw_work->device); INIT_WORK(&fw_work->work, request_firmware_work_func); schedule_work(&fw_work->work); return 0; } EXPORT_SYMBOL(request_firmware_nowait); #ifdef CONFIG_PM_SLEEP static ASYNC_DOMAIN_EXCLUSIVE(fw_cache_domain); /** * cache_firmware - cache one firmware image in kernel memory space * @fw_name: the firmware image name * * Cache firmware in kernel memory so that drivers can use it when * system isn't ready for them to request firmware image from userspace. * Once it returns successfully, driver can use request_firmware or its * nowait version to get the cached firmware without any interacting * with userspace * * Return 0 if the firmware image has been cached successfully * Return !0 otherwise * */ static int cache_firmware(const char *fw_name) { int ret; const struct firmware *fw; pr_debug("%s: %s\n", __func__, fw_name); ret = request_firmware(&fw, fw_name, NULL); if (!ret) kfree(fw); pr_debug("%s: %s ret=%d\n", __func__, fw_name, ret); return ret; } static struct firmware_buf *fw_lookup_buf(const char *fw_name) { struct firmware_buf *tmp; struct firmware_cache *fwc = &fw_cache; spin_lock(&fwc->lock); tmp = __fw_lookup_buf(fw_name); spin_unlock(&fwc->lock); return tmp; } /** * uncache_firmware - remove one cached firmware image * @fw_name: the firmware image name * * Uncache one firmware image which has been cached successfully * before. * * Return 0 if the firmware cache has been removed successfully * Return !0 otherwise * */ static int uncache_firmware(const char *fw_name) { struct firmware_buf *buf; struct firmware fw; pr_debug("%s: %s\n", __func__, fw_name); if (fw_get_builtin_firmware(&fw, fw_name)) return 0; buf = fw_lookup_buf(fw_name); if (buf) { fw_free_buf(buf); return 0; } return -EINVAL; } static struct fw_cache_entry *alloc_fw_cache_entry(const char *name) { struct fw_cache_entry *fce; fce = kzalloc(sizeof(*fce), GFP_ATOMIC); if (!fce) goto exit; fce->name = kstrdup_const(name, GFP_ATOMIC); if (!fce->name) { kfree(fce); fce = NULL; goto exit; } exit: return fce; } static int __fw_entry_found(const char *name) { struct firmware_cache *fwc = &fw_cache; struct fw_cache_entry *fce; list_for_each_entry(fce, &fwc->fw_names, list) { if (!strcmp(fce->name, name)) return 1; } return 0; } static int fw_cache_piggyback_on_request(const char *name) { struct firmware_cache *fwc = &fw_cache; struct fw_cache_entry *fce; int ret = 0; spin_lock(&fwc->name_lock); if (__fw_entry_found(name)) goto found; fce = alloc_fw_cache_entry(name); if (fce) { ret = 1; list_add(&fce->list, &fwc->fw_names); pr_debug("%s: fw: %s\n", __func__, name); } found: spin_unlock(&fwc->name_lock); return ret; } static void free_fw_cache_entry(struct fw_cache_entry *fce) { kfree_const(fce->name); kfree(fce); } static void __async_dev_cache_fw_image(void *fw_entry, async_cookie_t cookie) { struct fw_cache_entry *fce = fw_entry; struct firmware_cache *fwc = &fw_cache; int ret; ret = cache_firmware(fce->name); if (ret) { spin_lock(&fwc->name_lock); list_del(&fce->list); spin_unlock(&fwc->name_lock); free_fw_cache_entry(fce); } } /* called with dev->devres_lock held */ static void dev_create_fw_entry(struct device *dev, void *res, void *data) { struct fw_name_devm *fwn = res; const char *fw_name = fwn->name; struct list_head *head = data; struct fw_cache_entry *fce; fce = alloc_fw_cache_entry(fw_name); if (fce) list_add(&fce->list, head); } static int devm_name_match(struct device *dev, void *res, void *match_data) { struct fw_name_devm *fwn = res; return (fwn->magic == (unsigned long)match_data); } static void dev_cache_fw_image(struct device *dev, void *data) { LIST_HEAD(todo); struct fw_cache_entry *fce; struct fw_cache_entry *fce_next; struct firmware_cache *fwc = &fw_cache; devres_for_each_res(dev, fw_name_devm_release, devm_name_match, &fw_cache, dev_create_fw_entry, &todo); list_for_each_entry_safe(fce, fce_next, &todo, list) { list_del(&fce->list); spin_lock(&fwc->name_lock); /* only one cache entry for one firmware */ if (!__fw_entry_found(fce->name)) { list_add(&fce->list, &fwc->fw_names); } else { free_fw_cache_entry(fce); fce = NULL; } spin_unlock(&fwc->name_lock); if (fce) async_schedule_domain(__async_dev_cache_fw_image, (void *)fce, &fw_cache_domain); } } static void __device_uncache_fw_images(void) { struct firmware_cache *fwc = &fw_cache; struct fw_cache_entry *fce; spin_lock(&fwc->name_lock); while (!list_empty(&fwc->fw_names)) { fce = list_entry(fwc->fw_names.next, struct fw_cache_entry, list); list_del(&fce->list); spin_unlock(&fwc->name_lock); uncache_firmware(fce->name); free_fw_cache_entry(fce); spin_lock(&fwc->name_lock); } spin_unlock(&fwc->name_lock); } /** * device_cache_fw_images - cache devices' firmware * * If one device called request_firmware or its nowait version * successfully before, the firmware names are recored into the * device's devres link list, so device_cache_fw_images can call * cache_firmware() to cache these firmwares for the device, * then the device driver can load its firmwares easily at * time when system is not ready to complete loading firmware. */ static void device_cache_fw_images(void) { struct firmware_cache *fwc = &fw_cache; int old_timeout; DEFINE_WAIT(wait); pr_debug("%s\n", __func__); /* cancel uncache work */ cancel_delayed_work_sync(&fwc->work); /* * use small loading timeout for caching devices' firmware * because all these firmware images have been loaded * successfully at lease once, also system is ready for * completing firmware loading now. The maximum size of * firmware in current distributions is about 2M bytes, * so 10 secs should be enough. */ old_timeout = loading_timeout; loading_timeout = 10; mutex_lock(&fw_lock); fwc->state = FW_LOADER_START_CACHE; dpm_for_each_dev(NULL, dev_cache_fw_image); mutex_unlock(&fw_lock); /* wait for completion of caching firmware for all devices */ async_synchronize_full_domain(&fw_cache_domain); loading_timeout = old_timeout; } /** * device_uncache_fw_images - uncache devices' firmware * * uncache all firmwares which have been cached successfully * by device_uncache_fw_images earlier */ static void device_uncache_fw_images(void) { pr_debug("%s\n", __func__); __device_uncache_fw_images(); } static void device_uncache_fw_images_work(struct work_struct *work) { device_uncache_fw_images(); } /** * device_uncache_fw_images_delay - uncache devices firmwares * @delay: number of milliseconds to delay uncache device firmwares * * uncache all devices's firmwares which has been cached successfully * by device_cache_fw_images after @delay milliseconds. */ static void device_uncache_fw_images_delay(unsigned long delay) { queue_delayed_work(system_power_efficient_wq, &fw_cache.work, msecs_to_jiffies(delay)); } static int fw_pm_notify(struct notifier_block *notify_block, unsigned long mode, void *unused) { switch (mode) { case PM_HIBERNATION_PREPARE: case PM_SUSPEND_PREPARE: case PM_RESTORE_PREPARE: kill_requests_without_uevent(); device_cache_fw_images(); break; case PM_POST_SUSPEND: case PM_POST_HIBERNATION: case PM_POST_RESTORE: /* * In case that system sleep failed and syscore_suspend is * not called. */ mutex_lock(&fw_lock); fw_cache.state = FW_LOADER_NO_CACHE; mutex_unlock(&fw_lock); device_uncache_fw_images_delay(10 * MSEC_PER_SEC); break; } return 0; } /* stop caching firmware once syscore_suspend is reached */ static int fw_suspend(void) { fw_cache.state = FW_LOADER_NO_CACHE; return 0; } static struct syscore_ops fw_syscore_ops = { .suspend = fw_suspend, }; #else static int fw_cache_piggyback_on_request(const char *name) { return 0; } #endif static void __init fw_cache_init(void) { spin_lock_init(&fw_cache.lock); INIT_LIST_HEAD(&fw_cache.head); fw_cache.state = FW_LOADER_NO_CACHE; #ifdef CONFIG_PM_SLEEP spin_lock_init(&fw_cache.name_lock); INIT_LIST_HEAD(&fw_cache.fw_names); INIT_DELAYED_WORK(&fw_cache.work, device_uncache_fw_images_work); fw_cache.pm_notify.notifier_call = fw_pm_notify; register_pm_notifier(&fw_cache.pm_notify); register_syscore_ops(&fw_syscore_ops); #endif } static int __init firmware_class_init(void) { fw_cache_init(); #ifdef CONFIG_FW_LOADER_USER_HELPER register_reboot_notifier(&fw_shutdown_nb); return class_register(&firmware_class); #else return 0; #endif } static void __exit firmware_class_exit(void) { #ifdef CONFIG_PM_SLEEP unregister_syscore_ops(&fw_syscore_ops); unregister_pm_notifier(&fw_cache.pm_notify); #endif #ifdef CONFIG_FW_LOADER_USER_HELPER unregister_reboot_notifier(&fw_shutdown_nb); class_unregister(&firmware_class); #endif } fs_initcall(firmware_class_init); module_exit(firmware_class_exit);
gpl-2.0
iperminov/linux-tion270
drivers/staging/lustre/lnet/lnet/lib-msg.c
161
16969
/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 only, * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License version 2 for more details (a copy is included * in the LICENSE file that accompanied this code). * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf * * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. * * GPL HEADER END */ /* * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * * Copyright (c) 2012, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ * Lustre is a trademark of Sun Microsystems, Inc. * * lnet/lnet/lib-msg.c * * Message decoding, parsing and finalizing routines */ #define DEBUG_SUBSYSTEM S_LNET #include <linux/lnet/lib-lnet.h> void lnet_build_unlink_event (lnet_libmd_t *md, lnet_event_t *ev) { memset(ev, 0, sizeof(*ev)); ev->status = 0; ev->unlinked = 1; ev->type = LNET_EVENT_UNLINK; lnet_md_deconstruct(md, &ev->md); lnet_md2handle(&ev->md_handle, md); } /* * Don't need any lock, must be called after lnet_commit_md */ void lnet_build_msg_event(lnet_msg_t *msg, lnet_event_kind_t ev_type) { lnet_hdr_t *hdr = &msg->msg_hdr; lnet_event_t *ev = &msg->msg_ev; LASSERT(!msg->msg_routing); ev->type = ev_type; if (ev_type == LNET_EVENT_SEND) { /* event for active message */ ev->target.nid = le64_to_cpu(hdr->dest_nid); ev->target.pid = le32_to_cpu(hdr->dest_pid); ev->initiator.nid = LNET_NID_ANY; ev->initiator.pid = the_lnet.ln_pid; ev->sender = LNET_NID_ANY; } else { /* event for passive message */ ev->target.pid = hdr->dest_pid; ev->target.nid = hdr->dest_nid; ev->initiator.pid = hdr->src_pid; ev->initiator.nid = hdr->src_nid; ev->rlength = hdr->payload_length; ev->sender = msg->msg_from; ev->mlength = msg->msg_wanted; ev->offset = msg->msg_offset; } switch (ev_type) { default: LBUG(); case LNET_EVENT_PUT: /* passive PUT */ ev->pt_index = hdr->msg.put.ptl_index; ev->match_bits = hdr->msg.put.match_bits; ev->hdr_data = hdr->msg.put.hdr_data; return; case LNET_EVENT_GET: /* passive GET */ ev->pt_index = hdr->msg.get.ptl_index; ev->match_bits = hdr->msg.get.match_bits; ev->hdr_data = 0; return; case LNET_EVENT_ACK: /* ACK */ ev->match_bits = hdr->msg.ack.match_bits; ev->mlength = hdr->msg.ack.mlength; return; case LNET_EVENT_REPLY: /* REPLY */ return; case LNET_EVENT_SEND: /* active message */ if (msg->msg_type == LNET_MSG_PUT) { ev->pt_index = le32_to_cpu(hdr->msg.put.ptl_index); ev->match_bits = le64_to_cpu(hdr->msg.put.match_bits); ev->offset = le32_to_cpu(hdr->msg.put.offset); ev->mlength = ev->rlength = le32_to_cpu(hdr->payload_length); ev->hdr_data = le64_to_cpu(hdr->msg.put.hdr_data); } else { LASSERT(msg->msg_type == LNET_MSG_GET); ev->pt_index = le32_to_cpu(hdr->msg.get.ptl_index); ev->match_bits = le64_to_cpu(hdr->msg.get.match_bits); ev->mlength = ev->rlength = le32_to_cpu(hdr->msg.get.sink_length); ev->offset = le32_to_cpu(hdr->msg.get.src_offset); ev->hdr_data = 0; } return; } } void lnet_msg_commit(lnet_msg_t *msg, int cpt) { struct lnet_msg_container *container = the_lnet.ln_msg_containers[cpt]; lnet_counters_t *counters = the_lnet.ln_counters[cpt]; /* routed message can be committed for both receiving and sending */ LASSERT(!msg->msg_tx_committed); if (msg->msg_sending) { LASSERT(!msg->msg_receiving); msg->msg_tx_cpt = cpt; msg->msg_tx_committed = 1; if (msg->msg_rx_committed) { /* routed message REPLY */ LASSERT(msg->msg_onactivelist); return; } } else { LASSERT(!msg->msg_sending); msg->msg_rx_cpt = cpt; msg->msg_rx_committed = 1; } LASSERT(!msg->msg_onactivelist); msg->msg_onactivelist = 1; list_add(&msg->msg_activelist, &container->msc_active); counters->msgs_alloc++; if (counters->msgs_alloc > counters->msgs_max) counters->msgs_max = counters->msgs_alloc; } static void lnet_msg_decommit_tx(lnet_msg_t *msg, int status) { lnet_counters_t *counters; lnet_event_t *ev = &msg->msg_ev; LASSERT(msg->msg_tx_committed); if (status != 0) goto out; counters = the_lnet.ln_counters[msg->msg_tx_cpt]; switch (ev->type) { default: /* routed message */ LASSERT(msg->msg_routing); LASSERT(msg->msg_rx_committed); LASSERT(ev->type == 0); counters->route_length += msg->msg_len; counters->route_count++; goto out; case LNET_EVENT_PUT: /* should have been decommitted */ LASSERT(!msg->msg_rx_committed); /* overwritten while sending ACK */ LASSERT(msg->msg_type == LNET_MSG_ACK); msg->msg_type = LNET_MSG_PUT; /* fix type */ break; case LNET_EVENT_SEND: LASSERT(!msg->msg_rx_committed); if (msg->msg_type == LNET_MSG_PUT) counters->send_length += msg->msg_len; break; case LNET_EVENT_GET: LASSERT(msg->msg_rx_committed); /* overwritten while sending reply, we should never be * here for optimized GET */ LASSERT(msg->msg_type == LNET_MSG_REPLY); msg->msg_type = LNET_MSG_GET; /* fix type */ break; } counters->send_count++; out: lnet_return_tx_credits_locked(msg); msg->msg_tx_committed = 0; } static void lnet_msg_decommit_rx(lnet_msg_t *msg, int status) { lnet_counters_t *counters; lnet_event_t *ev = &msg->msg_ev; LASSERT(!msg->msg_tx_committed); /* decommitted or never committed */ LASSERT(msg->msg_rx_committed); if (status != 0) goto out; counters = the_lnet.ln_counters[msg->msg_rx_cpt]; switch (ev->type) { default: LASSERT(ev->type == 0); LASSERT(msg->msg_routing); goto out; case LNET_EVENT_ACK: LASSERT(msg->msg_type == LNET_MSG_ACK); break; case LNET_EVENT_GET: /* type is "REPLY" if it's an optimized GET on passive side, * because optimized GET will never be committed for sending, * so message type wouldn't be changed back to "GET" by * lnet_msg_decommit_tx(), see details in lnet_parse_get() */ LASSERT(msg->msg_type == LNET_MSG_REPLY || msg->msg_type == LNET_MSG_GET); counters->send_length += msg->msg_wanted; break; case LNET_EVENT_PUT: LASSERT(msg->msg_type == LNET_MSG_PUT); break; case LNET_EVENT_REPLY: /* type is "GET" if it's an optimized GET on active side, * see details in lnet_create_reply_msg() */ LASSERT(msg->msg_type == LNET_MSG_GET || msg->msg_type == LNET_MSG_REPLY); break; } counters->recv_count++; if (ev->type == LNET_EVENT_PUT || ev->type == LNET_EVENT_REPLY) counters->recv_length += msg->msg_wanted; out: lnet_return_rx_credits_locked(msg); msg->msg_rx_committed = 0; } void lnet_msg_decommit(lnet_msg_t *msg, int cpt, int status) { int cpt2 = cpt; LASSERT(msg->msg_tx_committed || msg->msg_rx_committed); LASSERT(msg->msg_onactivelist); if (msg->msg_tx_committed) { /* always decommit for sending first */ LASSERT(cpt == msg->msg_tx_cpt); lnet_msg_decommit_tx(msg, status); } if (msg->msg_rx_committed) { /* forwarding msg committed for both receiving and sending */ if (cpt != msg->msg_rx_cpt) { lnet_net_unlock(cpt); cpt2 = msg->msg_rx_cpt; lnet_net_lock(cpt2); } lnet_msg_decommit_rx(msg, status); } list_del(&msg->msg_activelist); msg->msg_onactivelist = 0; the_lnet.ln_counters[cpt2]->msgs_alloc--; if (cpt2 != cpt) { lnet_net_unlock(cpt2); lnet_net_lock(cpt); } } void lnet_msg_attach_md(lnet_msg_t *msg, lnet_libmd_t *md, unsigned int offset, unsigned int mlen) { /* NB: @offset and @len are only useful for receiving */ /* Here, we attach the MD on lnet_msg and mark it busy and * decrementing its threshold. Come what may, the lnet_msg "owns" * the MD until a call to lnet_msg_detach_md or lnet_finalize() * signals completion. */ LASSERT(!msg->msg_routing); msg->msg_md = md; if (msg->msg_receiving) { /* committed for receiving */ msg->msg_offset = offset; msg->msg_wanted = mlen; } md->md_refcount++; if (md->md_threshold != LNET_MD_THRESH_INF) { LASSERT(md->md_threshold > 0); md->md_threshold--; } /* build umd in event */ lnet_md2handle(&msg->msg_ev.md_handle, md); lnet_md_deconstruct(md, &msg->msg_ev.md); } void lnet_msg_detach_md(lnet_msg_t *msg, int status) { lnet_libmd_t *md = msg->msg_md; int unlink; /* Now it's safe to drop my caller's ref */ md->md_refcount--; LASSERT(md->md_refcount >= 0); unlink = lnet_md_unlinkable(md); if (md->md_eq != NULL) { msg->msg_ev.status = status; msg->msg_ev.unlinked = unlink; lnet_eq_enqueue_event(md->md_eq, &msg->msg_ev); } if (unlink) lnet_md_unlink(md); msg->msg_md = NULL; } static int lnet_complete_msg_locked(lnet_msg_t *msg, int cpt) { lnet_handle_wire_t ack_wmd; int rc; int status = msg->msg_ev.status; LASSERT (msg->msg_onactivelist); if (status == 0 && msg->msg_ack) { /* Only send an ACK if the PUT completed successfully */ lnet_msg_decommit(msg, cpt, 0); msg->msg_ack = 0; lnet_net_unlock(cpt); LASSERT(msg->msg_ev.type == LNET_EVENT_PUT); LASSERT(!msg->msg_routing); ack_wmd = msg->msg_hdr.msg.put.ack_wmd; lnet_prep_send(msg, LNET_MSG_ACK, msg->msg_ev.initiator, 0, 0); msg->msg_hdr.msg.ack.dst_wmd = ack_wmd; msg->msg_hdr.msg.ack.match_bits = msg->msg_ev.match_bits; msg->msg_hdr.msg.ack.mlength = cpu_to_le32(msg->msg_ev.mlength); /* NB: we probably want to use NID of msg::msg_from as 3rd * parameter (router NID) if it's routed message */ rc = lnet_send(msg->msg_ev.target.nid, msg, LNET_NID_ANY); lnet_net_lock(cpt); /* * NB: message is committed for sending, we should return * on success because LND will finalize this message later. * * Also, there is possibility that message is committed for * sending and also failed before delivering to LND, * i.e: ENOMEM, in that case we can't fall through either * because CPT for sending can be different with CPT for * receiving, so we should return back to lnet_finalize() * to make sure we are locking the correct partition. */ return rc; } else if (status == 0 && /* OK so far */ (msg->msg_routing && !msg->msg_sending)) { /* not forwarded */ LASSERT(!msg->msg_receiving); /* called back recv already */ lnet_net_unlock(cpt); rc = lnet_send(LNET_NID_ANY, msg, LNET_NID_ANY); lnet_net_lock(cpt); /* * NB: message is committed for sending, we should return * on success because LND will finalize this message later. * * Also, there is possibility that message is committed for * sending and also failed before delivering to LND, * i.e: ENOMEM, in that case we can't fall through either: * - The rule is message must decommit for sending first if * the it's committed for both sending and receiving * - CPT for sending can be different with CPT for receiving, * so we should return back to lnet_finalize() to make * sure we are locking the correct partition. */ return rc; } lnet_msg_decommit(msg, cpt, status); lnet_msg_free_locked(msg); return 0; } void lnet_finalize (lnet_ni_t *ni, lnet_msg_t *msg, int status) { struct lnet_msg_container *container; int my_slot; int cpt; int rc; int i; LASSERT (!in_interrupt ()); if (msg == NULL) return; #if 0 CDEBUG(D_WARNING, "%s msg->%s Flags:%s%s%s%s%s%s%s%s%s%s%s txp %s rxp %s\n", lnet_msgtyp2str(msg->msg_type), libcfs_id2str(msg->msg_target), msg->msg_target_is_router ? "t" : "", msg->msg_routing ? "X" : "", msg->msg_ack ? "A" : "", msg->msg_sending ? "S" : "", msg->msg_receiving ? "R" : "", msg->msg_delayed ? "d" : "", msg->msg_txcredit ? "C" : "", msg->msg_peertxcredit ? "c" : "", msg->msg_rtrcredit ? "F" : "", msg->msg_peerrtrcredit ? "f" : "", msg->msg_onactivelist ? "!" : "", msg->msg_txpeer == NULL ? "<none>" : libcfs_nid2str(msg->msg_txpeer->lp_nid), msg->msg_rxpeer == NULL ? "<none>" : libcfs_nid2str(msg->msg_rxpeer->lp_nid)); #endif msg->msg_ev.status = status; if (msg->msg_md != NULL) { cpt = lnet_cpt_of_cookie(msg->msg_md->md_lh.lh_cookie); lnet_res_lock(cpt); lnet_msg_detach_md(msg, status); lnet_res_unlock(cpt); } again: rc = 0; if (!msg->msg_tx_committed && !msg->msg_rx_committed) { /* not committed to network yet */ LASSERT(!msg->msg_onactivelist); lnet_msg_free(msg); return; } /* * NB: routed message can be committed for both receiving and sending, * we should finalize in LIFO order and keep counters correct. * (finalize sending first then finalize receiving) */ cpt = msg->msg_tx_committed ? msg->msg_tx_cpt : msg->msg_rx_cpt; lnet_net_lock(cpt); container = the_lnet.ln_msg_containers[cpt]; list_add_tail(&msg->msg_list, &container->msc_finalizing); /* Recursion breaker. Don't complete the message here if I am (or * enough other threads are) already completing messages */ my_slot = -1; for (i = 0; i < container->msc_nfinalizers; i++) { if (container->msc_finalizers[i] == current) break; if (my_slot < 0 && container->msc_finalizers[i] == NULL) my_slot = i; } if (i < container->msc_nfinalizers || my_slot < 0) { lnet_net_unlock(cpt); return; } container->msc_finalizers[my_slot] = current; while (!list_empty(&container->msc_finalizing)) { msg = list_entry(container->msc_finalizing.next, lnet_msg_t, msg_list); list_del(&msg->msg_list); /* NB drops and regains the lnet lock if it actually does * anything, so my finalizing friends can chomp along too */ rc = lnet_complete_msg_locked(msg, cpt); if (rc != 0) break; } container->msc_finalizers[my_slot] = NULL; lnet_net_unlock(cpt); if (rc != 0) goto again; } EXPORT_SYMBOL(lnet_finalize); void lnet_msg_container_cleanup(struct lnet_msg_container *container) { int count = 0; if (container->msc_init == 0) return; while (!list_empty(&container->msc_active)) { lnet_msg_t *msg = list_entry(container->msc_active.next, lnet_msg_t, msg_activelist); LASSERT(msg->msg_onactivelist); msg->msg_onactivelist = 0; list_del(&msg->msg_activelist); lnet_msg_free(msg); count++; } if (count > 0) CERROR("%d active msg on exit\n", count); if (container->msc_finalizers != NULL) { LIBCFS_FREE(container->msc_finalizers, container->msc_nfinalizers * sizeof(*container->msc_finalizers)); container->msc_finalizers = NULL; } #ifdef LNET_USE_LIB_FREELIST lnet_freelist_fini(&container->msc_freelist); #endif container->msc_init = 0; } int lnet_msg_container_setup(struct lnet_msg_container *container, int cpt) { int rc; container->msc_init = 1; INIT_LIST_HEAD(&container->msc_active); INIT_LIST_HEAD(&container->msc_finalizing); #ifdef LNET_USE_LIB_FREELIST memset(&container->msc_freelist, 0, sizeof(lnet_freelist_t)); rc = lnet_freelist_init(&container->msc_freelist, LNET_FL_MAX_MSGS, sizeof(lnet_msg_t)); if (rc != 0) { CERROR("Failed to init freelist for message container\n"); lnet_msg_container_cleanup(container); return rc; } #else rc = 0; #endif /* number of CPUs */ container->msc_nfinalizers = cfs_cpt_weight(lnet_cpt_table(), cpt); LIBCFS_CPT_ALLOC(container->msc_finalizers, lnet_cpt_table(), cpt, container->msc_nfinalizers * sizeof(*container->msc_finalizers)); if (container->msc_finalizers == NULL) { CERROR("Failed to allocate message finalizers\n"); lnet_msg_container_cleanup(container); return -ENOMEM; } return rc; } void lnet_msg_containers_destroy(void) { struct lnet_msg_container *container; int i; if (the_lnet.ln_msg_containers == NULL) return; cfs_percpt_for_each(container, i, the_lnet.ln_msg_containers) lnet_msg_container_cleanup(container); cfs_percpt_free(the_lnet.ln_msg_containers); the_lnet.ln_msg_containers = NULL; } int lnet_msg_containers_create(void) { struct lnet_msg_container *container; int rc; int i; the_lnet.ln_msg_containers = cfs_percpt_alloc(lnet_cpt_table(), sizeof(*container)); if (the_lnet.ln_msg_containers == NULL) { CERROR("Failed to allocate cpu-partition data for network\n"); return -ENOMEM; } cfs_percpt_for_each(container, i, the_lnet.ln_msg_containers) { rc = lnet_msg_container_setup(container, i); if (rc != 0) { lnet_msg_containers_destroy(); return rc; } } return 0; }
gpl-2.0
heybaby2707/VFCore
dep/g3dlite/source/Image3.cpp
673
5016
/** @file Image3.cpp @maintainer Morgan McGuire, http://graphics.cs.williams.edu @created 2007-01-31 @edited 2007-01-31 */ #include "G3D/Image3.h" #include "G3D/Image3uint8.h" #include "G3D/GImage.h" #include "G3D/Color4.h" #include "G3D/Color4uint8.h" #include "G3D/Color1.h" #include "G3D/Color1uint8.h" #include "G3D/ImageFormat.h" namespace G3D { Image3::Image3(int w, int h, WrapMode wrap) : Map2D<Color3, Color3>(w, h, wrap) { setAll(Color3::black()); } Image3::Ref Image3::fromGImage(const GImage& im, WrapMode wrap) { switch (im.channels()) { case 1: return fromArray(im.pixel1(), im.width(), im.height(), wrap); case 3: return fromArray(im.pixel3(), im.width(), im.height(), wrap); case 4: return fromArray(im.pixel4(), im.width(), im.height(), wrap); default: debugAssertM(false, "Input GImage must have 1, 3, or 4 channels."); return NULL; } } Image3::Ref Image3::fromImage3uint8(const ReferenceCountedPointer<Image3uint8>& im) { Ref out = createEmpty(im->wrapMode()); out->resize(im->width(), im->height()); int N = im->width() * im->height(); const Color3uint8* src = reinterpret_cast<Color3uint8*>(im->getCArray()); for (int i = 0; i < N; ++i) { out->data[i] = Color3(src[i]); } return out; } Image3::Ref Image3::createEmpty(int width, int height, WrapMode wrap) { return new Image3(width, height, wrap); } Image3::Ref Image3::createEmpty(WrapMode wrap) { return createEmpty(0, 0, wrap); } Image3::Ref Image3::fromFile(const std::string& filename, WrapMode wrap, GImage::Format fmt) { Ref out = createEmpty(wrap); out->load(filename, fmt); return out; } void Image3::load(const std::string& filename, GImage::Format fmt) { copyGImage(GImage(filename, fmt)); setChanged(true); } Image3::Ref Image3::fromArray(const class Color3uint8* ptr, int w, int h, WrapMode wrap) { Ref out = createEmpty(wrap); out->copyArray(ptr, w, h); return out; } Image3::Ref Image3::fromArray(const class Color1* ptr, int w, int h, WrapMode wrap) { Ref out = createEmpty(wrap); out->copyArray(ptr, w, h); return out; } Image3::Ref Image3::fromArray(const class Color1uint8* ptr, int w, int h, WrapMode wrap) { Ref out = createEmpty(wrap); out->copyArray(ptr, w, h); return out; } Image3::Ref Image3::fromArray(const class Color3* ptr, int w, int h, WrapMode wrap) { Ref out = createEmpty(wrap); out->copyArray(ptr, w, h); return out; } Image3::Ref Image3::fromArray(const class Color4uint8* ptr, int w, int h, WrapMode wrap) { Ref out = createEmpty(wrap); out->copyArray(ptr, w, h); return out; } Image3::Ref Image3::fromArray(const class Color4* ptr, int w, int h, WrapMode wrap) { Ref out = createEmpty(wrap); out->copyArray(ptr, w, h); return out; } void Image3::copyGImage(const GImage& im) { switch (im.channels()) { case 1: copyArray(im.pixel1(), im.width(), im.height()); break; case 3: copyArray(im.pixel3(), im.width(), im.height()); break; case 4: copyArray(im.pixel4(), im.width(), im.height()); break; } } void Image3::copyArray(const Color3uint8* src, int w, int h) { resize(w, h); int N = w * h; Color3* dst = data.getCArray(); // Convert int8 -> float for (int i = 0; i < N; ++i) { dst[i] = Color3(src[i]); } } void Image3::copyArray(const Color4uint8* src, int w, int h) { resize(w, h); int N = w * h; Color3* dst = data.getCArray(); // Strip alpha and convert for (int i = 0; i < N; ++i) { dst[i] = Color3(src[i].rgb()); } } void Image3::copyArray(const Color3* src, int w, int h) { resize(w, h); System::memcpy(getCArray(), src, w * h * sizeof(Color3)); } void Image3::copyArray(const Color4* src, int w, int h) { resize(w, h); int N = w * h; Color3* dst = data.getCArray(); // Strip alpha for (int i = 0; i < N; ++i) { dst[i] = src[i].rgb(); } } void Image3::copyArray(const Color1uint8* src, int w, int h) { resize(w, h); int N = w * h; Color3* dst = getCArray(); for (int i = 0; i < N; ++i) { dst[i].r = dst[i].g = dst[i].b = Color1(src[i]).value; } } void Image3::copyArray(const Color1* src, int w, int h) { resize(w, h); int N = w * h; Color3* dst = getCArray(); for (int i = 0; i < N; ++i) { dst[i].r = dst[i].g = dst[i].b = src[i].value; } } /** Saves in any of the formats supported by G3D::GImage. */ void Image3::save(const std::string& filename, GImage::Format fmt) { GImage im(width(), height(), 3); int N = im.width() * im.height(); Color3uint8* dst = im.pixel3(); for (int i = 0; i < N; ++i) { dst[i] = Color3uint8(data[i]); } im.save(filename, fmt); } const ImageFormat* Image3::format() const { return ImageFormat::RGB32F(); } } // G3D
gpl-2.0
mythos234/zerolte-kernel-CM
fs/xfs/xfs_inode.c
1441
118832
/* * Copyright (c) 2000-2006 Silicon Graphics, Inc. * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/log2.h> #include "xfs.h" #include "xfs_fs.h" #include "xfs_types.h" #include "xfs_log.h" #include "xfs_inum.h" #include "xfs_trans.h" #include "xfs_trans_priv.h" #include "xfs_sb.h" #include "xfs_ag.h" #include "xfs_mount.h" #include "xfs_bmap_btree.h" #include "xfs_alloc_btree.h" #include "xfs_ialloc_btree.h" #include "xfs_attr_sf.h" #include "xfs_dinode.h" #include "xfs_inode.h" #include "xfs_buf_item.h" #include "xfs_inode_item.h" #include "xfs_btree.h" #include "xfs_alloc.h" #include "xfs_ialloc.h" #include "xfs_bmap.h" #include "xfs_error.h" #include "xfs_utils.h" #include "xfs_quota.h" #include "xfs_filestream.h" #include "xfs_vnodeops.h" #include "xfs_cksum.h" #include "xfs_trace.h" #include "xfs_icache.h" kmem_zone_t *xfs_ifork_zone; kmem_zone_t *xfs_inode_zone; /* * Used in xfs_itruncate_extents(). This is the maximum number of extents * freed from a file in a single transaction. */ #define XFS_ITRUNC_MAX_EXTENTS 2 STATIC int xfs_iflush_int(xfs_inode_t *, xfs_buf_t *); STATIC int xfs_iformat_local(xfs_inode_t *, xfs_dinode_t *, int, int); STATIC int xfs_iformat_extents(xfs_inode_t *, xfs_dinode_t *, int); STATIC int xfs_iformat_btree(xfs_inode_t *, xfs_dinode_t *, int); /* * helper function to extract extent size hint from inode */ xfs_extlen_t xfs_get_extsz_hint( struct xfs_inode *ip) { if ((ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) && ip->i_d.di_extsize) return ip->i_d.di_extsize; if (XFS_IS_REALTIME_INODE(ip)) return ip->i_mount->m_sb.sb_rextsize; return 0; } /* * This is a wrapper routine around the xfs_ilock() routine used to centralize * some grungy code. It is used in places that wish to lock the inode solely * for reading the extents. The reason these places can't just call * xfs_ilock(SHARED) is that the inode lock also guards to bringing in of the * extents from disk for a file in b-tree format. If the inode is in b-tree * format, then we need to lock the inode exclusively until the extents are read * in. Locking it exclusively all the time would limit our parallelism * unnecessarily, though. What we do instead is check to see if the extents * have been read in yet, and only lock the inode exclusively if they have not. * * The function returns a value which should be given to the corresponding * xfs_iunlock_map_shared(). This value is the mode in which the lock was * actually taken. */ uint xfs_ilock_map_shared( xfs_inode_t *ip) { uint lock_mode; if ((ip->i_d.di_format == XFS_DINODE_FMT_BTREE) && ((ip->i_df.if_flags & XFS_IFEXTENTS) == 0)) { lock_mode = XFS_ILOCK_EXCL; } else { lock_mode = XFS_ILOCK_SHARED; } xfs_ilock(ip, lock_mode); return lock_mode; } /* * This is simply the unlock routine to go with xfs_ilock_map_shared(). * All it does is call xfs_iunlock() with the given lock_mode. */ void xfs_iunlock_map_shared( xfs_inode_t *ip, unsigned int lock_mode) { xfs_iunlock(ip, lock_mode); } /* * The xfs inode contains 2 locks: a multi-reader lock called the * i_iolock and a multi-reader lock called the i_lock. This routine * allows either or both of the locks to be obtained. * * The 2 locks should always be ordered so that the IO lock is * obtained first in order to prevent deadlock. * * ip -- the inode being locked * lock_flags -- this parameter indicates the inode's locks * to be locked. It can be: * XFS_IOLOCK_SHARED, * XFS_IOLOCK_EXCL, * XFS_ILOCK_SHARED, * XFS_ILOCK_EXCL, * XFS_IOLOCK_SHARED | XFS_ILOCK_SHARED, * XFS_IOLOCK_SHARED | XFS_ILOCK_EXCL, * XFS_IOLOCK_EXCL | XFS_ILOCK_SHARED, * XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL */ void xfs_ilock( xfs_inode_t *ip, uint lock_flags) { trace_xfs_ilock(ip, lock_flags, _RET_IP_); /* * You can't set both SHARED and EXCL for the same lock, * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED, * and XFS_ILOCK_EXCL are valid values to set in lock_flags. */ ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) != (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)); ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) != (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0); if (lock_flags & XFS_IOLOCK_EXCL) mrupdate_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags)); else if (lock_flags & XFS_IOLOCK_SHARED) mraccess_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags)); if (lock_flags & XFS_ILOCK_EXCL) mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags)); else if (lock_flags & XFS_ILOCK_SHARED) mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags)); } /* * This is just like xfs_ilock(), except that the caller * is guaranteed not to sleep. It returns 1 if it gets * the requested locks and 0 otherwise. If the IO lock is * obtained but the inode lock cannot be, then the IO lock * is dropped before returning. * * ip -- the inode being locked * lock_flags -- this parameter indicates the inode's locks to be * to be locked. See the comment for xfs_ilock() for a list * of valid values. */ int xfs_ilock_nowait( xfs_inode_t *ip, uint lock_flags) { trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_); /* * You can't set both SHARED and EXCL for the same lock, * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED, * and XFS_ILOCK_EXCL are valid values to set in lock_flags. */ ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) != (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)); ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) != (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0); if (lock_flags & XFS_IOLOCK_EXCL) { if (!mrtryupdate(&ip->i_iolock)) goto out; } else if (lock_flags & XFS_IOLOCK_SHARED) { if (!mrtryaccess(&ip->i_iolock)) goto out; } if (lock_flags & XFS_ILOCK_EXCL) { if (!mrtryupdate(&ip->i_lock)) goto out_undo_iolock; } else if (lock_flags & XFS_ILOCK_SHARED) { if (!mrtryaccess(&ip->i_lock)) goto out_undo_iolock; } return 1; out_undo_iolock: if (lock_flags & XFS_IOLOCK_EXCL) mrunlock_excl(&ip->i_iolock); else if (lock_flags & XFS_IOLOCK_SHARED) mrunlock_shared(&ip->i_iolock); out: return 0; } /* * xfs_iunlock() is used to drop the inode locks acquired with * xfs_ilock() and xfs_ilock_nowait(). The caller must pass * in the flags given to xfs_ilock() or xfs_ilock_nowait() so * that we know which locks to drop. * * ip -- the inode being unlocked * lock_flags -- this parameter indicates the inode's locks to be * to be unlocked. See the comment for xfs_ilock() for a list * of valid values for this parameter. * */ void xfs_iunlock( xfs_inode_t *ip, uint lock_flags) { /* * You can't set both SHARED and EXCL for the same lock, * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED, * and XFS_ILOCK_EXCL are valid values to set in lock_flags. */ ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) != (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)); ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) != (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0); ASSERT(lock_flags != 0); if (lock_flags & XFS_IOLOCK_EXCL) mrunlock_excl(&ip->i_iolock); else if (lock_flags & XFS_IOLOCK_SHARED) mrunlock_shared(&ip->i_iolock); if (lock_flags & XFS_ILOCK_EXCL) mrunlock_excl(&ip->i_lock); else if (lock_flags & XFS_ILOCK_SHARED) mrunlock_shared(&ip->i_lock); trace_xfs_iunlock(ip, lock_flags, _RET_IP_); } /* * give up write locks. the i/o lock cannot be held nested * if it is being demoted. */ void xfs_ilock_demote( xfs_inode_t *ip, uint lock_flags) { ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)); ASSERT((lock_flags & ~(XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)) == 0); if (lock_flags & XFS_ILOCK_EXCL) mrdemote(&ip->i_lock); if (lock_flags & XFS_IOLOCK_EXCL) mrdemote(&ip->i_iolock); trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_); } #if defined(DEBUG) || defined(XFS_WARN) int xfs_isilocked( xfs_inode_t *ip, uint lock_flags) { if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) { if (!(lock_flags & XFS_ILOCK_SHARED)) return !!ip->i_lock.mr_writer; return rwsem_is_locked(&ip->i_lock.mr_lock); } if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) { if (!(lock_flags & XFS_IOLOCK_SHARED)) return !!ip->i_iolock.mr_writer; return rwsem_is_locked(&ip->i_iolock.mr_lock); } ASSERT(0); return 0; } #endif void __xfs_iflock( struct xfs_inode *ip) { wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IFLOCK_BIT); DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IFLOCK_BIT); do { prepare_to_wait_exclusive(wq, &wait.wait, TASK_UNINTERRUPTIBLE); if (xfs_isiflocked(ip)) io_schedule(); } while (!xfs_iflock_nowait(ip)); finish_wait(wq, &wait.wait); } #ifdef DEBUG /* * Make sure that the extents in the given memory buffer * are valid. */ STATIC void xfs_validate_extents( xfs_ifork_t *ifp, int nrecs, xfs_exntfmt_t fmt) { xfs_bmbt_irec_t irec; xfs_bmbt_rec_host_t rec; int i; for (i = 0; i < nrecs; i++) { xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i); rec.l0 = get_unaligned(&ep->l0); rec.l1 = get_unaligned(&ep->l1); xfs_bmbt_get_all(&rec, &irec); if (fmt == XFS_EXTFMT_NOSTATE) ASSERT(irec.br_state == XFS_EXT_NORM); } } #else /* DEBUG */ #define xfs_validate_extents(ifp, nrecs, fmt) #endif /* DEBUG */ /* * Check that none of the inode's in the buffer have a next * unlinked field of 0. */ #if defined(DEBUG) void xfs_inobp_check( xfs_mount_t *mp, xfs_buf_t *bp) { int i; int j; xfs_dinode_t *dip; j = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog; for (i = 0; i < j; i++) { dip = (xfs_dinode_t *)xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize); if (!dip->di_next_unlinked) { xfs_alert(mp, "Detected bogus zero next_unlinked field in incore inode buffer 0x%p.", bp); ASSERT(dip->di_next_unlinked); } } } #endif static void xfs_inode_buf_verify( struct xfs_buf *bp) { struct xfs_mount *mp = bp->b_target->bt_mount; int i; int ni; /* * Validate the magic number and version of every inode in the buffer */ ni = XFS_BB_TO_FSB(mp, bp->b_length) * mp->m_sb.sb_inopblock; for (i = 0; i < ni; i++) { int di_ok; xfs_dinode_t *dip; dip = (struct xfs_dinode *)xfs_buf_offset(bp, (i << mp->m_sb.sb_inodelog)); di_ok = dip->di_magic == cpu_to_be16(XFS_DINODE_MAGIC) && XFS_DINODE_GOOD_VERSION(dip->di_version); if (unlikely(XFS_TEST_ERROR(!di_ok, mp, XFS_ERRTAG_ITOBP_INOTOBP, XFS_RANDOM_ITOBP_INOTOBP))) { xfs_buf_ioerror(bp, EFSCORRUPTED); XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_HIGH, mp, dip); #ifdef DEBUG xfs_emerg(mp, "bad inode magic/vsn daddr %lld #%d (magic=%x)", (unsigned long long)bp->b_bn, i, be16_to_cpu(dip->di_magic)); ASSERT(0); #endif } } xfs_inobp_check(mp, bp); } static void xfs_inode_buf_read_verify( struct xfs_buf *bp) { xfs_inode_buf_verify(bp); } static void xfs_inode_buf_write_verify( struct xfs_buf *bp) { xfs_inode_buf_verify(bp); } const struct xfs_buf_ops xfs_inode_buf_ops = { .verify_read = xfs_inode_buf_read_verify, .verify_write = xfs_inode_buf_write_verify, }; /* * This routine is called to map an inode to the buffer containing the on-disk * version of the inode. It returns a pointer to the buffer containing the * on-disk inode in the bpp parameter, and in the dipp parameter it returns a * pointer to the on-disk inode within that buffer. * * If a non-zero error is returned, then the contents of bpp and dipp are * undefined. */ int xfs_imap_to_bp( struct xfs_mount *mp, struct xfs_trans *tp, struct xfs_imap *imap, struct xfs_dinode **dipp, struct xfs_buf **bpp, uint buf_flags, uint iget_flags) { struct xfs_buf *bp; int error; buf_flags |= XBF_UNMAPPED; error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno, (int)imap->im_len, buf_flags, &bp, &xfs_inode_buf_ops); if (error) { if (error == EAGAIN) { ASSERT(buf_flags & XBF_TRYLOCK); return error; } if (error == EFSCORRUPTED && (iget_flags & XFS_IGET_UNTRUSTED)) return XFS_ERROR(EINVAL); xfs_warn(mp, "%s: xfs_trans_read_buf() returned error %d.", __func__, error); return error; } *bpp = bp; *dipp = (struct xfs_dinode *)xfs_buf_offset(bp, imap->im_boffset); return 0; } /* * Move inode type and inode format specific information from the * on-disk inode to the in-core inode. For fifos, devs, and sockets * this means set if_rdev to the proper value. For files, directories, * and symlinks this means to bring in the in-line data or extent * pointers. For a file in B-tree format, only the root is immediately * brought in-core. The rest will be in-lined in if_extents when it * is first referenced (see xfs_iread_extents()). */ STATIC int xfs_iformat( xfs_inode_t *ip, xfs_dinode_t *dip) { xfs_attr_shortform_t *atp; int size; int error = 0; xfs_fsize_t di_size; if (unlikely(be32_to_cpu(dip->di_nextents) + be16_to_cpu(dip->di_anextents) > be64_to_cpu(dip->di_nblocks))) { xfs_warn(ip->i_mount, "corrupt dinode %Lu, extent total = %d, nblocks = %Lu.", (unsigned long long)ip->i_ino, (int)(be32_to_cpu(dip->di_nextents) + be16_to_cpu(dip->di_anextents)), (unsigned long long) be64_to_cpu(dip->di_nblocks)); XFS_CORRUPTION_ERROR("xfs_iformat(1)", XFS_ERRLEVEL_LOW, ip->i_mount, dip); return XFS_ERROR(EFSCORRUPTED); } if (unlikely(dip->di_forkoff > ip->i_mount->m_sb.sb_inodesize)) { xfs_warn(ip->i_mount, "corrupt dinode %Lu, forkoff = 0x%x.", (unsigned long long)ip->i_ino, dip->di_forkoff); XFS_CORRUPTION_ERROR("xfs_iformat(2)", XFS_ERRLEVEL_LOW, ip->i_mount, dip); return XFS_ERROR(EFSCORRUPTED); } if (unlikely((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) && !ip->i_mount->m_rtdev_targp)) { xfs_warn(ip->i_mount, "corrupt dinode %Lu, has realtime flag set.", ip->i_ino); XFS_CORRUPTION_ERROR("xfs_iformat(realtime)", XFS_ERRLEVEL_LOW, ip->i_mount, dip); return XFS_ERROR(EFSCORRUPTED); } switch (ip->i_d.di_mode & S_IFMT) { case S_IFIFO: case S_IFCHR: case S_IFBLK: case S_IFSOCK: if (unlikely(dip->di_format != XFS_DINODE_FMT_DEV)) { XFS_CORRUPTION_ERROR("xfs_iformat(3)", XFS_ERRLEVEL_LOW, ip->i_mount, dip); return XFS_ERROR(EFSCORRUPTED); } ip->i_d.di_size = 0; ip->i_df.if_u2.if_rdev = xfs_dinode_get_rdev(dip); break; case S_IFREG: case S_IFLNK: case S_IFDIR: switch (dip->di_format) { case XFS_DINODE_FMT_LOCAL: /* * no local regular files yet */ if (unlikely(S_ISREG(be16_to_cpu(dip->di_mode)))) { xfs_warn(ip->i_mount, "corrupt inode %Lu (local format for regular file).", (unsigned long long) ip->i_ino); XFS_CORRUPTION_ERROR("xfs_iformat(4)", XFS_ERRLEVEL_LOW, ip->i_mount, dip); return XFS_ERROR(EFSCORRUPTED); } di_size = be64_to_cpu(dip->di_size); if (unlikely(di_size > XFS_DFORK_DSIZE(dip, ip->i_mount))) { xfs_warn(ip->i_mount, "corrupt inode %Lu (bad size %Ld for local inode).", (unsigned long long) ip->i_ino, (long long) di_size); XFS_CORRUPTION_ERROR("xfs_iformat(5)", XFS_ERRLEVEL_LOW, ip->i_mount, dip); return XFS_ERROR(EFSCORRUPTED); } size = (int)di_size; error = xfs_iformat_local(ip, dip, XFS_DATA_FORK, size); break; case XFS_DINODE_FMT_EXTENTS: error = xfs_iformat_extents(ip, dip, XFS_DATA_FORK); break; case XFS_DINODE_FMT_BTREE: error = xfs_iformat_btree(ip, dip, XFS_DATA_FORK); break; default: XFS_ERROR_REPORT("xfs_iformat(6)", XFS_ERRLEVEL_LOW, ip->i_mount); return XFS_ERROR(EFSCORRUPTED); } break; default: XFS_ERROR_REPORT("xfs_iformat(7)", XFS_ERRLEVEL_LOW, ip->i_mount); return XFS_ERROR(EFSCORRUPTED); } if (error) { return error; } if (!XFS_DFORK_Q(dip)) return 0; ASSERT(ip->i_afp == NULL); ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP | KM_NOFS); switch (dip->di_aformat) { case XFS_DINODE_FMT_LOCAL: atp = (xfs_attr_shortform_t *)XFS_DFORK_APTR(dip); size = be16_to_cpu(atp->hdr.totsize); if (unlikely(size < sizeof(struct xfs_attr_sf_hdr))) { xfs_warn(ip->i_mount, "corrupt inode %Lu (bad attr fork size %Ld).", (unsigned long long) ip->i_ino, (long long) size); XFS_CORRUPTION_ERROR("xfs_iformat(8)", XFS_ERRLEVEL_LOW, ip->i_mount, dip); return XFS_ERROR(EFSCORRUPTED); } error = xfs_iformat_local(ip, dip, XFS_ATTR_FORK, size); break; case XFS_DINODE_FMT_EXTENTS: error = xfs_iformat_extents(ip, dip, XFS_ATTR_FORK); break; case XFS_DINODE_FMT_BTREE: error = xfs_iformat_btree(ip, dip, XFS_ATTR_FORK); break; default: error = XFS_ERROR(EFSCORRUPTED); break; } if (error) { kmem_zone_free(xfs_ifork_zone, ip->i_afp); ip->i_afp = NULL; xfs_idestroy_fork(ip, XFS_DATA_FORK); } return error; } /* * The file is in-lined in the on-disk inode. * If it fits into if_inline_data, then copy * it there, otherwise allocate a buffer for it * and copy the data there. Either way, set * if_data to point at the data. * If we allocate a buffer for the data, make * sure that its size is a multiple of 4 and * record the real size in i_real_bytes. */ STATIC int xfs_iformat_local( xfs_inode_t *ip, xfs_dinode_t *dip, int whichfork, int size) { xfs_ifork_t *ifp; int real_size; /* * If the size is unreasonable, then something * is wrong and we just bail out rather than crash in * kmem_alloc() or memcpy() below. */ if (unlikely(size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) { xfs_warn(ip->i_mount, "corrupt inode %Lu (bad size %d for local fork, size = %d).", (unsigned long long) ip->i_ino, size, XFS_DFORK_SIZE(dip, ip->i_mount, whichfork)); XFS_CORRUPTION_ERROR("xfs_iformat_local", XFS_ERRLEVEL_LOW, ip->i_mount, dip); return XFS_ERROR(EFSCORRUPTED); } ifp = XFS_IFORK_PTR(ip, whichfork); real_size = 0; if (size == 0) ifp->if_u1.if_data = NULL; else if (size <= sizeof(ifp->if_u2.if_inline_data)) ifp->if_u1.if_data = ifp->if_u2.if_inline_data; else { real_size = roundup(size, 4); ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP | KM_NOFS); } ifp->if_bytes = size; ifp->if_real_bytes = real_size; if (size) memcpy(ifp->if_u1.if_data, XFS_DFORK_PTR(dip, whichfork), size); ifp->if_flags &= ~XFS_IFEXTENTS; ifp->if_flags |= XFS_IFINLINE; return 0; } /* * The file consists of a set of extents all * of which fit into the on-disk inode. * If there are few enough extents to fit into * the if_inline_ext, then copy them there. * Otherwise allocate a buffer for them and copy * them into it. Either way, set if_extents * to point at the extents. */ STATIC int xfs_iformat_extents( xfs_inode_t *ip, xfs_dinode_t *dip, int whichfork) { xfs_bmbt_rec_t *dp; xfs_ifork_t *ifp; int nex; int size; int i; ifp = XFS_IFORK_PTR(ip, whichfork); nex = XFS_DFORK_NEXTENTS(dip, whichfork); size = nex * (uint)sizeof(xfs_bmbt_rec_t); /* * If the number of extents is unreasonable, then something * is wrong and we just bail out rather than crash in * kmem_alloc() or memcpy() below. */ if (unlikely(size < 0 || size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) { xfs_warn(ip->i_mount, "corrupt inode %Lu ((a)extents = %d).", (unsigned long long) ip->i_ino, nex); XFS_CORRUPTION_ERROR("xfs_iformat_extents(1)", XFS_ERRLEVEL_LOW, ip->i_mount, dip); return XFS_ERROR(EFSCORRUPTED); } ifp->if_real_bytes = 0; if (nex == 0) ifp->if_u1.if_extents = NULL; else if (nex <= XFS_INLINE_EXTS) ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext; else xfs_iext_add(ifp, 0, nex); ifp->if_bytes = size; if (size) { dp = (xfs_bmbt_rec_t *) XFS_DFORK_PTR(dip, whichfork); xfs_validate_extents(ifp, nex, XFS_EXTFMT_INODE(ip)); for (i = 0; i < nex; i++, dp++) { xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i); ep->l0 = get_unaligned_be64(&dp->l0); ep->l1 = get_unaligned_be64(&dp->l1); } XFS_BMAP_TRACE_EXLIST(ip, nex, whichfork); if (whichfork != XFS_DATA_FORK || XFS_EXTFMT_INODE(ip) == XFS_EXTFMT_NOSTATE) if (unlikely(xfs_check_nostate_extents( ifp, 0, nex))) { XFS_ERROR_REPORT("xfs_iformat_extents(2)", XFS_ERRLEVEL_LOW, ip->i_mount); return XFS_ERROR(EFSCORRUPTED); } } ifp->if_flags |= XFS_IFEXTENTS; return 0; } /* * The file has too many extents to fit into * the inode, so they are in B-tree format. * Allocate a buffer for the root of the B-tree * and copy the root into it. The i_extents * field will remain NULL until all of the * extents are read in (when they are needed). */ STATIC int xfs_iformat_btree( xfs_inode_t *ip, xfs_dinode_t *dip, int whichfork) { struct xfs_mount *mp = ip->i_mount; xfs_bmdr_block_t *dfp; xfs_ifork_t *ifp; /* REFERENCED */ int nrecs; int size; ifp = XFS_IFORK_PTR(ip, whichfork); dfp = (xfs_bmdr_block_t *)XFS_DFORK_PTR(dip, whichfork); size = XFS_BMAP_BROOT_SPACE(mp, dfp); nrecs = be16_to_cpu(dfp->bb_numrecs); /* * blow out if -- fork has less extents than can fit in * fork (fork shouldn't be a btree format), root btree * block has more records than can fit into the fork, * or the number of extents is greater than the number of * blocks. */ if (unlikely(XFS_IFORK_NEXTENTS(ip, whichfork) <= XFS_IFORK_MAXEXT(ip, whichfork) || XFS_BMDR_SPACE_CALC(nrecs) > XFS_DFORK_SIZE(dip, mp, whichfork) || XFS_IFORK_NEXTENTS(ip, whichfork) > ip->i_d.di_nblocks)) { xfs_warn(mp, "corrupt inode %Lu (btree).", (unsigned long long) ip->i_ino); XFS_CORRUPTION_ERROR("xfs_iformat_btree", XFS_ERRLEVEL_LOW, mp, dip); return XFS_ERROR(EFSCORRUPTED); } ifp->if_broot_bytes = size; ifp->if_broot = kmem_alloc(size, KM_SLEEP | KM_NOFS); ASSERT(ifp->if_broot != NULL); /* * Copy and convert from the on-disk structure * to the in-memory structure. */ xfs_bmdr_to_bmbt(ip, dfp, XFS_DFORK_SIZE(dip, ip->i_mount, whichfork), ifp->if_broot, size); ifp->if_flags &= ~XFS_IFEXTENTS; ifp->if_flags |= XFS_IFBROOT; return 0; } STATIC void xfs_dinode_from_disk( xfs_icdinode_t *to, xfs_dinode_t *from) { to->di_magic = be16_to_cpu(from->di_magic); to->di_mode = be16_to_cpu(from->di_mode); to->di_version = from ->di_version; to->di_format = from->di_format; to->di_onlink = be16_to_cpu(from->di_onlink); to->di_uid = be32_to_cpu(from->di_uid); to->di_gid = be32_to_cpu(from->di_gid); to->di_nlink = be32_to_cpu(from->di_nlink); to->di_projid_lo = be16_to_cpu(from->di_projid_lo); to->di_projid_hi = be16_to_cpu(from->di_projid_hi); memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad)); to->di_flushiter = be16_to_cpu(from->di_flushiter); to->di_atime.t_sec = be32_to_cpu(from->di_atime.t_sec); to->di_atime.t_nsec = be32_to_cpu(from->di_atime.t_nsec); to->di_mtime.t_sec = be32_to_cpu(from->di_mtime.t_sec); to->di_mtime.t_nsec = be32_to_cpu(from->di_mtime.t_nsec); to->di_ctime.t_sec = be32_to_cpu(from->di_ctime.t_sec); to->di_ctime.t_nsec = be32_to_cpu(from->di_ctime.t_nsec); to->di_size = be64_to_cpu(from->di_size); to->di_nblocks = be64_to_cpu(from->di_nblocks); to->di_extsize = be32_to_cpu(from->di_extsize); to->di_nextents = be32_to_cpu(from->di_nextents); to->di_anextents = be16_to_cpu(from->di_anextents); to->di_forkoff = from->di_forkoff; to->di_aformat = from->di_aformat; to->di_dmevmask = be32_to_cpu(from->di_dmevmask); to->di_dmstate = be16_to_cpu(from->di_dmstate); to->di_flags = be16_to_cpu(from->di_flags); to->di_gen = be32_to_cpu(from->di_gen); if (to->di_version == 3) { to->di_changecount = be64_to_cpu(from->di_changecount); to->di_crtime.t_sec = be32_to_cpu(from->di_crtime.t_sec); to->di_crtime.t_nsec = be32_to_cpu(from->di_crtime.t_nsec); to->di_flags2 = be64_to_cpu(from->di_flags2); to->di_ino = be64_to_cpu(from->di_ino); to->di_lsn = be64_to_cpu(from->di_lsn); memcpy(to->di_pad2, from->di_pad2, sizeof(to->di_pad2)); uuid_copy(&to->di_uuid, &from->di_uuid); } } void xfs_dinode_to_disk( xfs_dinode_t *to, xfs_icdinode_t *from) { to->di_magic = cpu_to_be16(from->di_magic); to->di_mode = cpu_to_be16(from->di_mode); to->di_version = from ->di_version; to->di_format = from->di_format; to->di_onlink = cpu_to_be16(from->di_onlink); to->di_uid = cpu_to_be32(from->di_uid); to->di_gid = cpu_to_be32(from->di_gid); to->di_nlink = cpu_to_be32(from->di_nlink); to->di_projid_lo = cpu_to_be16(from->di_projid_lo); to->di_projid_hi = cpu_to_be16(from->di_projid_hi); memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad)); to->di_flushiter = cpu_to_be16(from->di_flushiter); to->di_atime.t_sec = cpu_to_be32(from->di_atime.t_sec); to->di_atime.t_nsec = cpu_to_be32(from->di_atime.t_nsec); to->di_mtime.t_sec = cpu_to_be32(from->di_mtime.t_sec); to->di_mtime.t_nsec = cpu_to_be32(from->di_mtime.t_nsec); to->di_ctime.t_sec = cpu_to_be32(from->di_ctime.t_sec); to->di_ctime.t_nsec = cpu_to_be32(from->di_ctime.t_nsec); to->di_size = cpu_to_be64(from->di_size); to->di_nblocks = cpu_to_be64(from->di_nblocks); to->di_extsize = cpu_to_be32(from->di_extsize); to->di_nextents = cpu_to_be32(from->di_nextents); to->di_anextents = cpu_to_be16(from->di_anextents); to->di_forkoff = from->di_forkoff; to->di_aformat = from->di_aformat; to->di_dmevmask = cpu_to_be32(from->di_dmevmask); to->di_dmstate = cpu_to_be16(from->di_dmstate); to->di_flags = cpu_to_be16(from->di_flags); to->di_gen = cpu_to_be32(from->di_gen); if (from->di_version == 3) { to->di_changecount = cpu_to_be64(from->di_changecount); to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.t_sec); to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.t_nsec); to->di_flags2 = cpu_to_be64(from->di_flags2); to->di_ino = cpu_to_be64(from->di_ino); to->di_lsn = cpu_to_be64(from->di_lsn); memcpy(to->di_pad2, from->di_pad2, sizeof(to->di_pad2)); uuid_copy(&to->di_uuid, &from->di_uuid); } } STATIC uint _xfs_dic2xflags( __uint16_t di_flags) { uint flags = 0; if (di_flags & XFS_DIFLAG_ANY) { if (di_flags & XFS_DIFLAG_REALTIME) flags |= XFS_XFLAG_REALTIME; if (di_flags & XFS_DIFLAG_PREALLOC) flags |= XFS_XFLAG_PREALLOC; if (di_flags & XFS_DIFLAG_IMMUTABLE) flags |= XFS_XFLAG_IMMUTABLE; if (di_flags & XFS_DIFLAG_APPEND) flags |= XFS_XFLAG_APPEND; if (di_flags & XFS_DIFLAG_SYNC) flags |= XFS_XFLAG_SYNC; if (di_flags & XFS_DIFLAG_NOATIME) flags |= XFS_XFLAG_NOATIME; if (di_flags & XFS_DIFLAG_NODUMP) flags |= XFS_XFLAG_NODUMP; if (di_flags & XFS_DIFLAG_RTINHERIT) flags |= XFS_XFLAG_RTINHERIT; if (di_flags & XFS_DIFLAG_PROJINHERIT) flags |= XFS_XFLAG_PROJINHERIT; if (di_flags & XFS_DIFLAG_NOSYMLINKS) flags |= XFS_XFLAG_NOSYMLINKS; if (di_flags & XFS_DIFLAG_EXTSIZE) flags |= XFS_XFLAG_EXTSIZE; if (di_flags & XFS_DIFLAG_EXTSZINHERIT) flags |= XFS_XFLAG_EXTSZINHERIT; if (di_flags & XFS_DIFLAG_NODEFRAG) flags |= XFS_XFLAG_NODEFRAG; if (di_flags & XFS_DIFLAG_FILESTREAM) flags |= XFS_XFLAG_FILESTREAM; } return flags; } uint xfs_ip2xflags( xfs_inode_t *ip) { xfs_icdinode_t *dic = &ip->i_d; return _xfs_dic2xflags(dic->di_flags) | (XFS_IFORK_Q(ip) ? XFS_XFLAG_HASATTR : 0); } uint xfs_dic2xflags( xfs_dinode_t *dip) { return _xfs_dic2xflags(be16_to_cpu(dip->di_flags)) | (XFS_DFORK_Q(dip) ? XFS_XFLAG_HASATTR : 0); } static bool xfs_dinode_verify( struct xfs_mount *mp, struct xfs_inode *ip, struct xfs_dinode *dip) { if (dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC)) return false; /* only version 3 or greater inodes are extensively verified here */ if (dip->di_version < 3) return true; if (!xfs_sb_version_hascrc(&mp->m_sb)) return false; if (!xfs_verify_cksum((char *)dip, mp->m_sb.sb_inodesize, offsetof(struct xfs_dinode, di_crc))) return false; if (be64_to_cpu(dip->di_ino) != ip->i_ino) return false; if (!uuid_equal(&dip->di_uuid, &mp->m_sb.sb_uuid)) return false; return true; } void xfs_dinode_calc_crc( struct xfs_mount *mp, struct xfs_dinode *dip) { __uint32_t crc; if (dip->di_version < 3) return; ASSERT(xfs_sb_version_hascrc(&mp->m_sb)); crc = xfs_start_cksum((char *)dip, mp->m_sb.sb_inodesize, offsetof(struct xfs_dinode, di_crc)); dip->di_crc = xfs_end_cksum(crc); } /* * Read the disk inode attributes into the in-core inode structure. */ int xfs_iread( xfs_mount_t *mp, xfs_trans_t *tp, xfs_inode_t *ip, uint iget_flags) { xfs_buf_t *bp; xfs_dinode_t *dip; int error; /* * Fill in the location information in the in-core inode. */ error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, iget_flags); if (error) return error; /* * Get pointers to the on-disk inode and the buffer containing it. */ error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &bp, 0, iget_flags); if (error) return error; /* even unallocated inodes are verified */ if (!xfs_dinode_verify(mp, ip, dip)) { xfs_alert(mp, "%s: validation failed for inode %lld failed", __func__, ip->i_ino); XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, dip); error = XFS_ERROR(EFSCORRUPTED); goto out_brelse; } /* * If the on-disk inode is already linked to a directory * entry, copy all of the inode into the in-core inode. * xfs_iformat() handles copying in the inode format * specific information. * Otherwise, just get the truly permanent information. */ if (dip->di_mode) { xfs_dinode_from_disk(&ip->i_d, dip); error = xfs_iformat(ip, dip); if (error) { #ifdef DEBUG xfs_alert(mp, "%s: xfs_iformat() returned error %d", __func__, error); #endif /* DEBUG */ goto out_brelse; } } else { /* * Partial initialisation of the in-core inode. Just the bits * that xfs_ialloc won't overwrite or relies on being correct. */ ip->i_d.di_magic = be16_to_cpu(dip->di_magic); ip->i_d.di_version = dip->di_version; ip->i_d.di_gen = be32_to_cpu(dip->di_gen); ip->i_d.di_flushiter = be16_to_cpu(dip->di_flushiter); if (dip->di_version == 3) { ip->i_d.di_ino = be64_to_cpu(dip->di_ino); uuid_copy(&ip->i_d.di_uuid, &dip->di_uuid); } /* * Make sure to pull in the mode here as well in * case the inode is released without being used. * This ensures that xfs_inactive() will see that * the inode is already free and not try to mess * with the uninitialized part of it. */ ip->i_d.di_mode = 0; } /* * The inode format changed when we moved the link count and * made it 32 bits long. If this is an old format inode, * convert it in memory to look like a new one. If it gets * flushed to disk we will convert back before flushing or * logging it. We zero out the new projid field and the old link * count field. We'll handle clearing the pad field (the remains * of the old uuid field) when we actually convert the inode to * the new format. We don't change the version number so that we * can distinguish this from a real new format inode. */ if (ip->i_d.di_version == 1) { ip->i_d.di_nlink = ip->i_d.di_onlink; ip->i_d.di_onlink = 0; xfs_set_projid(ip, 0); } ip->i_delayed_blks = 0; /* * Mark the buffer containing the inode as something to keep * around for a while. This helps to keep recently accessed * meta-data in-core longer. */ xfs_buf_set_ref(bp, XFS_INO_REF); /* * Use xfs_trans_brelse() to release the buffer containing the * on-disk inode, because it was acquired with xfs_trans_read_buf() * in xfs_imap_to_bp() above. If tp is NULL, this is just a normal * brelse(). If we're within a transaction, then xfs_trans_brelse() * will only release the buffer if it is not dirty within the * transaction. It will be OK to release the buffer in this case, * because inodes on disk are never destroyed and we will be * locking the new in-core inode before putting it in the hash * table where other processes can find it. Thus we don't have * to worry about the inode being changed just because we released * the buffer. */ out_brelse: xfs_trans_brelse(tp, bp); return error; } /* * Read in extents from a btree-format inode. * Allocate and fill in if_extents. Real work is done in xfs_bmap.c. */ int xfs_iread_extents( xfs_trans_t *tp, xfs_inode_t *ip, int whichfork) { int error; xfs_ifork_t *ifp; xfs_extnum_t nextents; if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) { XFS_ERROR_REPORT("xfs_iread_extents", XFS_ERRLEVEL_LOW, ip->i_mount); return XFS_ERROR(EFSCORRUPTED); } nextents = XFS_IFORK_NEXTENTS(ip, whichfork); ifp = XFS_IFORK_PTR(ip, whichfork); /* * We know that the size is valid (it's checked in iformat_btree) */ ifp->if_bytes = ifp->if_real_bytes = 0; ifp->if_flags |= XFS_IFEXTENTS; xfs_iext_add(ifp, 0, nextents); error = xfs_bmap_read_extents(tp, ip, whichfork); if (error) { xfs_iext_destroy(ifp); ifp->if_flags &= ~XFS_IFEXTENTS; return error; } xfs_validate_extents(ifp, nextents, XFS_EXTFMT_INODE(ip)); return 0; } /* * Allocate an inode on disk and return a copy of its in-core version. * The in-core inode is locked exclusively. Set mode, nlink, and rdev * appropriately within the inode. The uid and gid for the inode are * set according to the contents of the given cred structure. * * Use xfs_dialloc() to allocate the on-disk inode. If xfs_dialloc() * has a free inode available, call xfs_iget() to obtain the in-core * version of the allocated inode. Finally, fill in the inode and * log its initial contents. In this case, ialloc_context would be * set to NULL. * * If xfs_dialloc() does not have an available inode, it will replenish * its supply by doing an allocation. Since we can only do one * allocation within a transaction without deadlocks, we must commit * the current transaction before returning the inode itself. * In this case, therefore, we will set ialloc_context and return. * The caller should then commit the current transaction, start a new * transaction, and call xfs_ialloc() again to actually get the inode. * * To ensure that some other process does not grab the inode that * was allocated during the first call to xfs_ialloc(), this routine * also returns the [locked] bp pointing to the head of the freelist * as ialloc_context. The caller should hold this buffer across * the commit and pass it back into this routine on the second call. * * If we are allocating quota inodes, we do not have a parent inode * to attach to or associate with (i.e. pip == NULL) because they * are not linked into the directory structure - they are attached * directly to the superblock - and so have no parent. */ int xfs_ialloc( xfs_trans_t *tp, xfs_inode_t *pip, umode_t mode, xfs_nlink_t nlink, xfs_dev_t rdev, prid_t prid, int okalloc, xfs_buf_t **ialloc_context, xfs_inode_t **ipp) { struct xfs_mount *mp = tp->t_mountp; xfs_ino_t ino; xfs_inode_t *ip; uint flags; int error; timespec_t tv; int filestreams = 0; /* * Call the space management code to pick * the on-disk inode to be allocated. */ error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode, okalloc, ialloc_context, &ino); if (error) return error; if (*ialloc_context || ino == NULLFSINO) { *ipp = NULL; return 0; } ASSERT(*ialloc_context == NULL); /* * Get the in-core inode with the lock held exclusively. * This is because we're setting fields here we need * to prevent others from looking at until we're done. */ error = xfs_iget(mp, tp, ino, XFS_IGET_CREATE, XFS_ILOCK_EXCL, &ip); if (error) return error; ASSERT(ip != NULL); ip->i_d.di_mode = mode; ip->i_d.di_onlink = 0; ip->i_d.di_nlink = nlink; ASSERT(ip->i_d.di_nlink == nlink); ip->i_d.di_uid = current_fsuid(); ip->i_d.di_gid = current_fsgid(); xfs_set_projid(ip, prid); memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad)); /* * If the superblock version is up to where we support new format * inodes and this is currently an old format inode, then change * the inode version number now. This way we only do the conversion * here rather than here and in the flush/logging code. */ if (xfs_sb_version_hasnlink(&mp->m_sb) && ip->i_d.di_version == 1) { ip->i_d.di_version = 2; /* * We've already zeroed the old link count, the projid field, * and the pad field. */ } /* * Project ids won't be stored on disk if we are using a version 1 inode. */ if ((prid != 0) && (ip->i_d.di_version == 1)) xfs_bump_ino_vers2(tp, ip); if (pip && XFS_INHERIT_GID(pip)) { ip->i_d.di_gid = pip->i_d.di_gid; if ((pip->i_d.di_mode & S_ISGID) && S_ISDIR(mode)) { ip->i_d.di_mode |= S_ISGID; } } /* * If the group ID of the new file does not match the effective group * ID or one of the supplementary group IDs, the S_ISGID bit is cleared * (and only if the irix_sgid_inherit compatibility variable is set). */ if ((irix_sgid_inherit) && (ip->i_d.di_mode & S_ISGID) && (!in_group_p((gid_t)ip->i_d.di_gid))) { ip->i_d.di_mode &= ~S_ISGID; } ip->i_d.di_size = 0; ip->i_d.di_nextents = 0; ASSERT(ip->i_d.di_nblocks == 0); nanotime(&tv); ip->i_d.di_mtime.t_sec = (__int32_t)tv.tv_sec; ip->i_d.di_mtime.t_nsec = (__int32_t)tv.tv_nsec; ip->i_d.di_atime = ip->i_d.di_mtime; ip->i_d.di_ctime = ip->i_d.di_mtime; /* * di_gen will have been taken care of in xfs_iread. */ ip->i_d.di_extsize = 0; ip->i_d.di_dmevmask = 0; ip->i_d.di_dmstate = 0; ip->i_d.di_flags = 0; if (ip->i_d.di_version == 3) { ASSERT(ip->i_d.di_ino == ino); ASSERT(uuid_equal(&ip->i_d.di_uuid, &mp->m_sb.sb_uuid)); ip->i_d.di_crc = 0; ip->i_d.di_changecount = 1; ip->i_d.di_lsn = 0; ip->i_d.di_flags2 = 0; memset(&(ip->i_d.di_pad2[0]), 0, sizeof(ip->i_d.di_pad2)); ip->i_d.di_crtime = ip->i_d.di_mtime; } flags = XFS_ILOG_CORE; switch (mode & S_IFMT) { case S_IFIFO: case S_IFCHR: case S_IFBLK: case S_IFSOCK: ip->i_d.di_format = XFS_DINODE_FMT_DEV; ip->i_df.if_u2.if_rdev = rdev; ip->i_df.if_flags = 0; flags |= XFS_ILOG_DEV; break; case S_IFREG: /* * we can't set up filestreams until after the VFS inode * is set up properly. */ if (pip && xfs_inode_is_filestream(pip)) filestreams = 1; /* fall through */ case S_IFDIR: if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) { uint di_flags = 0; if (S_ISDIR(mode)) { if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT) di_flags |= XFS_DIFLAG_RTINHERIT; if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) { di_flags |= XFS_DIFLAG_EXTSZINHERIT; ip->i_d.di_extsize = pip->i_d.di_extsize; } } else if (S_ISREG(mode)) { if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT) di_flags |= XFS_DIFLAG_REALTIME; if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) { di_flags |= XFS_DIFLAG_EXTSIZE; ip->i_d.di_extsize = pip->i_d.di_extsize; } } if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) && xfs_inherit_noatime) di_flags |= XFS_DIFLAG_NOATIME; if ((pip->i_d.di_flags & XFS_DIFLAG_NODUMP) && xfs_inherit_nodump) di_flags |= XFS_DIFLAG_NODUMP; if ((pip->i_d.di_flags & XFS_DIFLAG_SYNC) && xfs_inherit_sync) di_flags |= XFS_DIFLAG_SYNC; if ((pip->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) && xfs_inherit_nosymlinks) di_flags |= XFS_DIFLAG_NOSYMLINKS; if (pip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) di_flags |= XFS_DIFLAG_PROJINHERIT; if ((pip->i_d.di_flags & XFS_DIFLAG_NODEFRAG) && xfs_inherit_nodefrag) di_flags |= XFS_DIFLAG_NODEFRAG; if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM) di_flags |= XFS_DIFLAG_FILESTREAM; ip->i_d.di_flags |= di_flags; } /* FALLTHROUGH */ case S_IFLNK: ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS; ip->i_df.if_flags = XFS_IFEXTENTS; ip->i_df.if_bytes = ip->i_df.if_real_bytes = 0; ip->i_df.if_u1.if_extents = NULL; break; default: ASSERT(0); } /* * Attribute fork settings for new inode. */ ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS; ip->i_d.di_anextents = 0; /* * Log the new values stuffed into the inode. */ xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); xfs_trans_log_inode(tp, ip, flags); /* now that we have an i_mode we can setup inode ops and unlock */ xfs_setup_inode(ip); /* now we have set up the vfs inode we can associate the filestream */ if (filestreams) { error = xfs_filestream_associate(pip, ip); if (error < 0) return -error; if (!error) xfs_iflags_set(ip, XFS_IFILESTREAM); } *ipp = ip; return 0; } /* * Free up the underlying blocks past new_size. The new size must be smaller * than the current size. This routine can be used both for the attribute and * data fork, and does not modify the inode size, which is left to the caller. * * The transaction passed to this routine must have made a permanent log * reservation of at least XFS_ITRUNCATE_LOG_RES. This routine may commit the * given transaction and start new ones, so make sure everything involved in * the transaction is tidy before calling here. Some transaction will be * returned to the caller to be committed. The incoming transaction must * already include the inode, and both inode locks must be held exclusively. * The inode must also be "held" within the transaction. On return the inode * will be "held" within the returned transaction. This routine does NOT * require any disk space to be reserved for it within the transaction. * * If we get an error, we must return with the inode locked and linked into the * current transaction. This keeps things simple for the higher level code, * because it always knows that the inode is locked and held in the transaction * that returns to it whether errors occur or not. We don't mark the inode * dirty on error so that transactions can be easily aborted if possible. */ int xfs_itruncate_extents( struct xfs_trans **tpp, struct xfs_inode *ip, int whichfork, xfs_fsize_t new_size) { struct xfs_mount *mp = ip->i_mount; struct xfs_trans *tp = *tpp; struct xfs_trans *ntp; xfs_bmap_free_t free_list; xfs_fsblock_t first_block; xfs_fileoff_t first_unmap_block; xfs_fileoff_t last_block; xfs_filblks_t unmap_len; int committed; int error = 0; int done = 0; ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); ASSERT(!atomic_read(&VFS_I(ip)->i_count) || xfs_isilocked(ip, XFS_IOLOCK_EXCL)); ASSERT(new_size <= XFS_ISIZE(ip)); ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); ASSERT(ip->i_itemp != NULL); ASSERT(ip->i_itemp->ili_lock_flags == 0); ASSERT(!XFS_NOT_DQATTACHED(mp, ip)); trace_xfs_itruncate_extents_start(ip, new_size); /* * Since it is possible for space to become allocated beyond * the end of the file (in a crash where the space is allocated * but the inode size is not yet updated), simply remove any * blocks which show up between the new EOF and the maximum * possible file size. If the first block to be removed is * beyond the maximum file size (ie it is the same as last_block), * then there is nothing to do. */ first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size); last_block = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes); if (first_unmap_block == last_block) return 0; ASSERT(first_unmap_block < last_block); unmap_len = last_block - first_unmap_block + 1; while (!done) { xfs_bmap_init(&free_list, &first_block); error = xfs_bunmapi(tp, ip, first_unmap_block, unmap_len, xfs_bmapi_aflag(whichfork), XFS_ITRUNC_MAX_EXTENTS, &first_block, &free_list, &done); if (error) goto out_bmap_cancel; /* * Duplicate the transaction that has the permanent * reservation and commit the old transaction. */ error = xfs_bmap_finish(&tp, &free_list, &committed); if (committed) xfs_trans_ijoin(tp, ip, 0); if (error) goto out_bmap_cancel; if (committed) { /* * Mark the inode dirty so it will be logged and * moved forward in the log as part of every commit. */ xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); } ntp = xfs_trans_dup(tp); error = xfs_trans_commit(tp, 0); tp = ntp; xfs_trans_ijoin(tp, ip, 0); if (error) goto out; /* * Transaction commit worked ok so we can drop the extra ticket * reference that we gained in xfs_trans_dup() */ xfs_log_ticket_put(tp->t_ticket); error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, XFS_TRANS_PERM_LOG_RES, XFS_ITRUNCATE_LOG_COUNT); if (error) goto out; } /* * Always re-log the inode so that our permanent transaction can keep * on rolling it forward in the log. */ xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); trace_xfs_itruncate_extents_end(ip, new_size); out: *tpp = tp; return error; out_bmap_cancel: /* * If the bunmapi call encounters an error, return to the caller where * the transaction can be properly aborted. We just need to make sure * we're not holding any resources that we were not when we came in. */ xfs_bmap_cancel(&free_list); goto out; } /* * This is called when the inode's link count goes to 0. * We place the on-disk inode on a list in the AGI. It * will be pulled from this list when the inode is freed. */ int xfs_iunlink( xfs_trans_t *tp, xfs_inode_t *ip) { xfs_mount_t *mp; xfs_agi_t *agi; xfs_dinode_t *dip; xfs_buf_t *agibp; xfs_buf_t *ibp; xfs_agino_t agino; short bucket_index; int offset; int error; ASSERT(ip->i_d.di_nlink == 0); ASSERT(ip->i_d.di_mode != 0); mp = tp->t_mountp; /* * Get the agi buffer first. It ensures lock ordering * on the list. */ error = xfs_read_agi(mp, tp, XFS_INO_TO_AGNO(mp, ip->i_ino), &agibp); if (error) return error; agi = XFS_BUF_TO_AGI(agibp); /* * Get the index into the agi hash table for the * list this inode will go on. */ agino = XFS_INO_TO_AGINO(mp, ip->i_ino); ASSERT(agino != 0); bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS; ASSERT(agi->agi_unlinked[bucket_index]); ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != agino); if (agi->agi_unlinked[bucket_index] != cpu_to_be32(NULLAGINO)) { /* * There is already another inode in the bucket we need * to add ourselves to. Add us at the front of the list. * Here we put the head pointer into our next pointer, * and then we fall through to point the head at us. */ error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp, 0, 0); if (error) return error; ASSERT(dip->di_next_unlinked == cpu_to_be32(NULLAGINO)); dip->di_next_unlinked = agi->agi_unlinked[bucket_index]; offset = ip->i_imap.im_boffset + offsetof(xfs_dinode_t, di_next_unlinked); /* need to recalc the inode CRC if appropriate */ xfs_dinode_calc_crc(mp, dip); xfs_trans_inode_buf(tp, ibp); xfs_trans_log_buf(tp, ibp, offset, (offset + sizeof(xfs_agino_t) - 1)); xfs_inobp_check(mp, ibp); } /* * Point the bucket head pointer at the inode being inserted. */ ASSERT(agino != 0); agi->agi_unlinked[bucket_index] = cpu_to_be32(agino); offset = offsetof(xfs_agi_t, agi_unlinked) + (sizeof(xfs_agino_t) * bucket_index); xfs_trans_log_buf(tp, agibp, offset, (offset + sizeof(xfs_agino_t) - 1)); return 0; } /* * Pull the on-disk inode from the AGI unlinked list. */ STATIC int xfs_iunlink_remove( xfs_trans_t *tp, xfs_inode_t *ip) { xfs_ino_t next_ino; xfs_mount_t *mp; xfs_agi_t *agi; xfs_dinode_t *dip; xfs_buf_t *agibp; xfs_buf_t *ibp; xfs_agnumber_t agno; xfs_agino_t agino; xfs_agino_t next_agino; xfs_buf_t *last_ibp; xfs_dinode_t *last_dip = NULL; short bucket_index; int offset, last_offset = 0; int error; mp = tp->t_mountp; agno = XFS_INO_TO_AGNO(mp, ip->i_ino); /* * Get the agi buffer first. It ensures lock ordering * on the list. */ error = xfs_read_agi(mp, tp, agno, &agibp); if (error) return error; agi = XFS_BUF_TO_AGI(agibp); /* * Get the index into the agi hash table for the * list this inode will go on. */ agino = XFS_INO_TO_AGINO(mp, ip->i_ino); ASSERT(agino != 0); bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS; ASSERT(agi->agi_unlinked[bucket_index] != cpu_to_be32(NULLAGINO)); ASSERT(agi->agi_unlinked[bucket_index]); if (be32_to_cpu(agi->agi_unlinked[bucket_index]) == agino) { /* * We're at the head of the list. Get the inode's on-disk * buffer to see if there is anyone after us on the list. * Only modify our next pointer if it is not already NULLAGINO. * This saves us the overhead of dealing with the buffer when * there is no need to change it. */ error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp, 0, 0); if (error) { xfs_warn(mp, "%s: xfs_imap_to_bp returned error %d.", __func__, error); return error; } next_agino = be32_to_cpu(dip->di_next_unlinked); ASSERT(next_agino != 0); if (next_agino != NULLAGINO) { dip->di_next_unlinked = cpu_to_be32(NULLAGINO); offset = ip->i_imap.im_boffset + offsetof(xfs_dinode_t, di_next_unlinked); /* need to recalc the inode CRC if appropriate */ xfs_dinode_calc_crc(mp, dip); xfs_trans_inode_buf(tp, ibp); xfs_trans_log_buf(tp, ibp, offset, (offset + sizeof(xfs_agino_t) - 1)); xfs_inobp_check(mp, ibp); } else { xfs_trans_brelse(tp, ibp); } /* * Point the bucket head pointer at the next inode. */ ASSERT(next_agino != 0); ASSERT(next_agino != agino); agi->agi_unlinked[bucket_index] = cpu_to_be32(next_agino); offset = offsetof(xfs_agi_t, agi_unlinked) + (sizeof(xfs_agino_t) * bucket_index); xfs_trans_log_buf(tp, agibp, offset, (offset + sizeof(xfs_agino_t) - 1)); } else { /* * We need to search the list for the inode being freed. */ next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]); last_ibp = NULL; while (next_agino != agino) { struct xfs_imap imap; if (last_ibp) xfs_trans_brelse(tp, last_ibp); imap.im_blkno = 0; next_ino = XFS_AGINO_TO_INO(mp, agno, next_agino); error = xfs_imap(mp, tp, next_ino, &imap, 0); if (error) { xfs_warn(mp, "%s: xfs_imap returned error %d.", __func__, error); return error; } error = xfs_imap_to_bp(mp, tp, &imap, &last_dip, &last_ibp, 0, 0); if (error) { xfs_warn(mp, "%s: xfs_imap_to_bp returned error %d.", __func__, error); return error; } last_offset = imap.im_boffset; next_agino = be32_to_cpu(last_dip->di_next_unlinked); ASSERT(next_agino != NULLAGINO); ASSERT(next_agino != 0); } /* * Now last_ibp points to the buffer previous to us on the * unlinked list. Pull us from the list. */ error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp, 0, 0); if (error) { xfs_warn(mp, "%s: xfs_imap_to_bp(2) returned error %d.", __func__, error); return error; } next_agino = be32_to_cpu(dip->di_next_unlinked); ASSERT(next_agino != 0); ASSERT(next_agino != agino); if (next_agino != NULLAGINO) { dip->di_next_unlinked = cpu_to_be32(NULLAGINO); offset = ip->i_imap.im_boffset + offsetof(xfs_dinode_t, di_next_unlinked); /* need to recalc the inode CRC if appropriate */ xfs_dinode_calc_crc(mp, dip); xfs_trans_inode_buf(tp, ibp); xfs_trans_log_buf(tp, ibp, offset, (offset + sizeof(xfs_agino_t) - 1)); xfs_inobp_check(mp, ibp); } else { xfs_trans_brelse(tp, ibp); } /* * Point the previous inode on the list to the next inode. */ last_dip->di_next_unlinked = cpu_to_be32(next_agino); ASSERT(next_agino != 0); offset = last_offset + offsetof(xfs_dinode_t, di_next_unlinked); /* need to recalc the inode CRC if appropriate */ xfs_dinode_calc_crc(mp, last_dip); xfs_trans_inode_buf(tp, last_ibp); xfs_trans_log_buf(tp, last_ibp, offset, (offset + sizeof(xfs_agino_t) - 1)); xfs_inobp_check(mp, last_ibp); } return 0; } /* * A big issue when freeing the inode cluster is is that we _cannot_ skip any * inodes that are in memory - they all must be marked stale and attached to * the cluster buffer. */ STATIC int xfs_ifree_cluster( xfs_inode_t *free_ip, xfs_trans_t *tp, xfs_ino_t inum) { xfs_mount_t *mp = free_ip->i_mount; int blks_per_cluster; int nbufs; int ninodes; int i, j; xfs_daddr_t blkno; xfs_buf_t *bp; xfs_inode_t *ip; xfs_inode_log_item_t *iip; xfs_log_item_t *lip; struct xfs_perag *pag; pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum)); if (mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) { blks_per_cluster = 1; ninodes = mp->m_sb.sb_inopblock; nbufs = XFS_IALLOC_BLOCKS(mp); } else { blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) / mp->m_sb.sb_blocksize; ninodes = blks_per_cluster * mp->m_sb.sb_inopblock; nbufs = XFS_IALLOC_BLOCKS(mp) / blks_per_cluster; } for (j = 0; j < nbufs; j++, inum += ninodes) { blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum), XFS_INO_TO_AGBNO(mp, inum)); /* * We obtain and lock the backing buffer first in the process * here, as we have to ensure that any dirty inode that we * can't get the flush lock on is attached to the buffer. * If we scan the in-memory inodes first, then buffer IO can * complete before we get a lock on it, and hence we may fail * to mark all the active inodes on the buffer stale. */ bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno, mp->m_bsize * blks_per_cluster, XBF_UNMAPPED); if (!bp) return ENOMEM; /* * This buffer may not have been correctly initialised as we * didn't read it from disk. That's not important because we are * only using to mark the buffer as stale in the log, and to * attach stale cached inodes on it. That means it will never be * dispatched for IO. If it is, we want to know about it, and we * want it to fail. We can acheive this by adding a write * verifier to the buffer. */ bp->b_ops = &xfs_inode_buf_ops; /* * Walk the inodes already attached to the buffer and mark them * stale. These will all have the flush locks held, so an * in-memory inode walk can't lock them. By marking them all * stale first, we will not attempt to lock them in the loop * below as the XFS_ISTALE flag will be set. */ lip = bp->b_fspriv; while (lip) { if (lip->li_type == XFS_LI_INODE) { iip = (xfs_inode_log_item_t *)lip; ASSERT(iip->ili_logged == 1); lip->li_cb = xfs_istale_done; xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn, &iip->ili_item.li_lsn); xfs_iflags_set(iip->ili_inode, XFS_ISTALE); } lip = lip->li_bio_list; } /* * For each inode in memory attempt to add it to the inode * buffer and set it up for being staled on buffer IO * completion. This is safe as we've locked out tail pushing * and flushing by locking the buffer. * * We have already marked every inode that was part of a * transaction stale above, which means there is no point in * even trying to lock them. */ for (i = 0; i < ninodes; i++) { retry: rcu_read_lock(); ip = radix_tree_lookup(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, (inum + i))); /* Inode not in memory, nothing to do */ if (!ip) { rcu_read_unlock(); continue; } /* * because this is an RCU protected lookup, we could * find a recently freed or even reallocated inode * during the lookup. We need to check under the * i_flags_lock for a valid inode here. Skip it if it * is not valid, the wrong inode or stale. */ spin_lock(&ip->i_flags_lock); if (ip->i_ino != inum + i || __xfs_iflags_test(ip, XFS_ISTALE)) { spin_unlock(&ip->i_flags_lock); rcu_read_unlock(); continue; } spin_unlock(&ip->i_flags_lock); /* * Don't try to lock/unlock the current inode, but we * _cannot_ skip the other inodes that we did not find * in the list attached to the buffer and are not * already marked stale. If we can't lock it, back off * and retry. */ if (ip != free_ip && !xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) { rcu_read_unlock(); delay(1); goto retry; } rcu_read_unlock(); xfs_iflock(ip); xfs_iflags_set(ip, XFS_ISTALE); /* * we don't need to attach clean inodes or those only * with unlogged changes (which we throw away, anyway). */ iip = ip->i_itemp; if (!iip || xfs_inode_clean(ip)) { ASSERT(ip != free_ip); xfs_ifunlock(ip); xfs_iunlock(ip, XFS_ILOCK_EXCL); continue; } iip->ili_last_fields = iip->ili_fields; iip->ili_fields = 0; iip->ili_logged = 1; xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn, &iip->ili_item.li_lsn); xfs_buf_attach_iodone(bp, xfs_istale_done, &iip->ili_item); if (ip != free_ip) xfs_iunlock(ip, XFS_ILOCK_EXCL); } xfs_trans_stale_inode_buf(tp, bp); xfs_trans_binval(tp, bp); } xfs_perag_put(pag); return 0; } /* * This is called to return an inode to the inode free list. * The inode should already be truncated to 0 length and have * no pages associated with it. This routine also assumes that * the inode is already a part of the transaction. * * The on-disk copy of the inode will have been added to the list * of unlinked inodes in the AGI. We need to remove the inode from * that list atomically with respect to freeing it here. */ int xfs_ifree( xfs_trans_t *tp, xfs_inode_t *ip, xfs_bmap_free_t *flist) { int error; int delete; xfs_ino_t first_ino; xfs_dinode_t *dip; xfs_buf_t *ibp; ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); ASSERT(ip->i_d.di_nlink == 0); ASSERT(ip->i_d.di_nextents == 0); ASSERT(ip->i_d.di_anextents == 0); ASSERT(ip->i_d.di_size == 0 || !S_ISREG(ip->i_d.di_mode)); ASSERT(ip->i_d.di_nblocks == 0); /* * Pull the on-disk inode from the AGI unlinked list. */ error = xfs_iunlink_remove(tp, ip); if (error != 0) { return error; } error = xfs_difree(tp, ip->i_ino, flist, &delete, &first_ino); if (error != 0) { return error; } ip->i_d.di_mode = 0; /* mark incore inode as free */ ip->i_d.di_flags = 0; ip->i_d.di_dmevmask = 0; ip->i_d.di_forkoff = 0; /* mark the attr fork not in use */ ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS; ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS; /* * Bump the generation count so no one will be confused * by reincarnations of this inode. */ ip->i_d.di_gen++; xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); error = xfs_imap_to_bp(ip->i_mount, tp, &ip->i_imap, &dip, &ibp, 0, 0); if (error) return error; /* * Clear the on-disk di_mode. This is to prevent xfs_bulkstat * from picking up this inode when it is reclaimed (its incore state * initialzed but not flushed to disk yet). The in-core di_mode is * already cleared and a corresponding transaction logged. * The hack here just synchronizes the in-core to on-disk * di_mode value in advance before the actual inode sync to disk. * This is OK because the inode is already unlinked and would never * change its di_mode again for this inode generation. * This is a temporary hack that would require a proper fix * in the future. */ dip->di_mode = 0; if (delete) { error = xfs_ifree_cluster(ip, tp, first_ino); } return error; } /* * Reallocate the space for if_broot based on the number of records * being added or deleted as indicated in rec_diff. Move the records * and pointers in if_broot to fit the new size. When shrinking this * will eliminate holes between the records and pointers created by * the caller. When growing this will create holes to be filled in * by the caller. * * The caller must not request to add more records than would fit in * the on-disk inode root. If the if_broot is currently NULL, then * if we adding records one will be allocated. The caller must also * not request that the number of records go below zero, although * it can go to zero. * * ip -- the inode whose if_broot area is changing * ext_diff -- the change in the number of records, positive or negative, * requested for the if_broot array. */ void xfs_iroot_realloc( xfs_inode_t *ip, int rec_diff, int whichfork) { struct xfs_mount *mp = ip->i_mount; int cur_max; xfs_ifork_t *ifp; struct xfs_btree_block *new_broot; int new_max; size_t new_size; char *np; char *op; /* * Handle the degenerate case quietly. */ if (rec_diff == 0) { return; } ifp = XFS_IFORK_PTR(ip, whichfork); if (rec_diff > 0) { /* * If there wasn't any memory allocated before, just * allocate it now and get out. */ if (ifp->if_broot_bytes == 0) { new_size = XFS_BMAP_BROOT_SPACE_CALC(mp, rec_diff); ifp->if_broot = kmem_alloc(new_size, KM_SLEEP | KM_NOFS); ifp->if_broot_bytes = (int)new_size; return; } /* * If there is already an existing if_broot, then we need * to realloc() it and shift the pointers to their new * location. The records don't change location because * they are kept butted up against the btree block header. */ cur_max = xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0); new_max = cur_max + rec_diff; new_size = XFS_BMAP_BROOT_SPACE_CALC(mp, new_max); ifp->if_broot = kmem_realloc(ifp->if_broot, new_size, XFS_BMAP_BROOT_SPACE_CALC(mp, cur_max), KM_SLEEP | KM_NOFS); op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1, ifp->if_broot_bytes); np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1, (int)new_size); ifp->if_broot_bytes = (int)new_size; ASSERT(ifp->if_broot_bytes <= XFS_IFORK_SIZE(ip, whichfork) + XFS_BROOT_SIZE_ADJ(ip)); memmove(np, op, cur_max * (uint)sizeof(xfs_dfsbno_t)); return; } /* * rec_diff is less than 0. In this case, we are shrinking the * if_broot buffer. It must already exist. If we go to zero * records, just get rid of the root and clear the status bit. */ ASSERT((ifp->if_broot != NULL) && (ifp->if_broot_bytes > 0)); cur_max = xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0); new_max = cur_max + rec_diff; ASSERT(new_max >= 0); if (new_max > 0) new_size = XFS_BMAP_BROOT_SPACE_CALC(mp, new_max); else new_size = 0; if (new_size > 0) { new_broot = kmem_alloc(new_size, KM_SLEEP | KM_NOFS); /* * First copy over the btree block header. */ memcpy(new_broot, ifp->if_broot, XFS_BMBT_BLOCK_LEN(ip->i_mount)); } else { new_broot = NULL; ifp->if_flags &= ~XFS_IFBROOT; } /* * Only copy the records and pointers if there are any. */ if (new_max > 0) { /* * First copy the records. */ op = (char *)XFS_BMBT_REC_ADDR(mp, ifp->if_broot, 1); np = (char *)XFS_BMBT_REC_ADDR(mp, new_broot, 1); memcpy(np, op, new_max * (uint)sizeof(xfs_bmbt_rec_t)); /* * Then copy the pointers. */ op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1, ifp->if_broot_bytes); np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, new_broot, 1, (int)new_size); memcpy(np, op, new_max * (uint)sizeof(xfs_dfsbno_t)); } kmem_free(ifp->if_broot); ifp->if_broot = new_broot; ifp->if_broot_bytes = (int)new_size; ASSERT(ifp->if_broot_bytes <= XFS_IFORK_SIZE(ip, whichfork) + XFS_BROOT_SIZE_ADJ(ip)); return; } /* * This is called when the amount of space needed for if_data * is increased or decreased. The change in size is indicated by * the number of bytes that need to be added or deleted in the * byte_diff parameter. * * If the amount of space needed has decreased below the size of the * inline buffer, then switch to using the inline buffer. Otherwise, * use kmem_realloc() or kmem_alloc() to adjust the size of the buffer * to what is needed. * * ip -- the inode whose if_data area is changing * byte_diff -- the change in the number of bytes, positive or negative, * requested for the if_data array. */ void xfs_idata_realloc( xfs_inode_t *ip, int byte_diff, int whichfork) { xfs_ifork_t *ifp; int new_size; int real_size; if (byte_diff == 0) { return; } ifp = XFS_IFORK_PTR(ip, whichfork); new_size = (int)ifp->if_bytes + byte_diff; ASSERT(new_size >= 0); if (new_size == 0) { if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) { kmem_free(ifp->if_u1.if_data); } ifp->if_u1.if_data = NULL; real_size = 0; } else if (new_size <= sizeof(ifp->if_u2.if_inline_data)) { /* * If the valid extents/data can fit in if_inline_ext/data, * copy them from the malloc'd vector and free it. */ if (ifp->if_u1.if_data == NULL) { ifp->if_u1.if_data = ifp->if_u2.if_inline_data; } else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) { ASSERT(ifp->if_real_bytes != 0); memcpy(ifp->if_u2.if_inline_data, ifp->if_u1.if_data, new_size); kmem_free(ifp->if_u1.if_data); ifp->if_u1.if_data = ifp->if_u2.if_inline_data; } real_size = 0; } else { /* * Stuck with malloc/realloc. * For inline data, the underlying buffer must be * a multiple of 4 bytes in size so that it can be * logged and stay on word boundaries. We enforce * that here. */ real_size = roundup(new_size, 4); if (ifp->if_u1.if_data == NULL) { ASSERT(ifp->if_real_bytes == 0); ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP | KM_NOFS); } else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) { /* * Only do the realloc if the underlying size * is really changing. */ if (ifp->if_real_bytes != real_size) { ifp->if_u1.if_data = kmem_realloc(ifp->if_u1.if_data, real_size, ifp->if_real_bytes, KM_SLEEP | KM_NOFS); } } else { ASSERT(ifp->if_real_bytes == 0); ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP | KM_NOFS); memcpy(ifp->if_u1.if_data, ifp->if_u2.if_inline_data, ifp->if_bytes); } } ifp->if_real_bytes = real_size; ifp->if_bytes = new_size; ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork)); } void xfs_idestroy_fork( xfs_inode_t *ip, int whichfork) { xfs_ifork_t *ifp; ifp = XFS_IFORK_PTR(ip, whichfork); if (ifp->if_broot != NULL) { kmem_free(ifp->if_broot); ifp->if_broot = NULL; } /* * If the format is local, then we can't have an extents * array so just look for an inline data array. If we're * not local then we may or may not have an extents list, * so check and free it up if we do. */ if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) { if ((ifp->if_u1.if_data != ifp->if_u2.if_inline_data) && (ifp->if_u1.if_data != NULL)) { ASSERT(ifp->if_real_bytes != 0); kmem_free(ifp->if_u1.if_data); ifp->if_u1.if_data = NULL; ifp->if_real_bytes = 0; } } else if ((ifp->if_flags & XFS_IFEXTENTS) && ((ifp->if_flags & XFS_IFEXTIREC) || ((ifp->if_u1.if_extents != NULL) && (ifp->if_u1.if_extents != ifp->if_u2.if_inline_ext)))) { ASSERT(ifp->if_real_bytes != 0); xfs_iext_destroy(ifp); } ASSERT(ifp->if_u1.if_extents == NULL || ifp->if_u1.if_extents == ifp->if_u2.if_inline_ext); ASSERT(ifp->if_real_bytes == 0); if (whichfork == XFS_ATTR_FORK) { kmem_zone_free(xfs_ifork_zone, ip->i_afp); ip->i_afp = NULL; } } /* * This is called to unpin an inode. The caller must have the inode locked * in at least shared mode so that the buffer cannot be subsequently pinned * once someone is waiting for it to be unpinned. */ static void xfs_iunpin( struct xfs_inode *ip) { ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); trace_xfs_inode_unpin_nowait(ip, _RET_IP_); /* Give the log a push to start the unpinning I/O */ xfs_log_force_lsn(ip->i_mount, ip->i_itemp->ili_last_lsn, 0); } static void __xfs_iunpin_wait( struct xfs_inode *ip) { wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT); DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT); xfs_iunpin(ip); do { prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE); if (xfs_ipincount(ip)) io_schedule(); } while (xfs_ipincount(ip)); finish_wait(wq, &wait.wait); } void xfs_iunpin_wait( struct xfs_inode *ip) { if (xfs_ipincount(ip)) __xfs_iunpin_wait(ip); } /* * xfs_iextents_copy() * * This is called to copy the REAL extents (as opposed to the delayed * allocation extents) from the inode into the given buffer. It * returns the number of bytes copied into the buffer. * * If there are no delayed allocation extents, then we can just * memcpy() the extents into the buffer. Otherwise, we need to * examine each extent in turn and skip those which are delayed. */ int xfs_iextents_copy( xfs_inode_t *ip, xfs_bmbt_rec_t *dp, int whichfork) { int copied; int i; xfs_ifork_t *ifp; int nrecs; xfs_fsblock_t start_block; ifp = XFS_IFORK_PTR(ip, whichfork); ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); ASSERT(ifp->if_bytes > 0); nrecs = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); XFS_BMAP_TRACE_EXLIST(ip, nrecs, whichfork); ASSERT(nrecs > 0); /* * There are some delayed allocation extents in the * inode, so copy the extents one at a time and skip * the delayed ones. There must be at least one * non-delayed extent. */ copied = 0; for (i = 0; i < nrecs; i++) { xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i); start_block = xfs_bmbt_get_startblock(ep); if (isnullstartblock(start_block)) { /* * It's a delayed allocation extent, so skip it. */ continue; } /* Translate to on disk format */ put_unaligned(cpu_to_be64(ep->l0), &dp->l0); put_unaligned(cpu_to_be64(ep->l1), &dp->l1); dp++; copied++; } ASSERT(copied != 0); xfs_validate_extents(ifp, copied, XFS_EXTFMT_INODE(ip)); return (copied * (uint)sizeof(xfs_bmbt_rec_t)); } /* * Each of the following cases stores data into the same region * of the on-disk inode, so only one of them can be valid at * any given time. While it is possible to have conflicting formats * and log flags, e.g. having XFS_ILOG_?DATA set when the fork is * in EXTENTS format, this can only happen when the fork has * changed formats after being modified but before being flushed. * In these cases, the format always takes precedence, because the * format indicates the current state of the fork. */ /*ARGSUSED*/ STATIC void xfs_iflush_fork( xfs_inode_t *ip, xfs_dinode_t *dip, xfs_inode_log_item_t *iip, int whichfork, xfs_buf_t *bp) { char *cp; xfs_ifork_t *ifp; xfs_mount_t *mp; static const short brootflag[2] = { XFS_ILOG_DBROOT, XFS_ILOG_ABROOT }; static const short dataflag[2] = { XFS_ILOG_DDATA, XFS_ILOG_ADATA }; static const short extflag[2] = { XFS_ILOG_DEXT, XFS_ILOG_AEXT }; if (!iip) return; ifp = XFS_IFORK_PTR(ip, whichfork); /* * This can happen if we gave up in iformat in an error path, * for the attribute fork. */ if (!ifp) { ASSERT(whichfork == XFS_ATTR_FORK); return; } cp = XFS_DFORK_PTR(dip, whichfork); mp = ip->i_mount; switch (XFS_IFORK_FORMAT(ip, whichfork)) { case XFS_DINODE_FMT_LOCAL: if ((iip->ili_fields & dataflag[whichfork]) && (ifp->if_bytes > 0)) { ASSERT(ifp->if_u1.if_data != NULL); ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork)); memcpy(cp, ifp->if_u1.if_data, ifp->if_bytes); } break; case XFS_DINODE_FMT_EXTENTS: ASSERT((ifp->if_flags & XFS_IFEXTENTS) || !(iip->ili_fields & extflag[whichfork])); if ((iip->ili_fields & extflag[whichfork]) && (ifp->if_bytes > 0)) { ASSERT(xfs_iext_get_ext(ifp, 0)); ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) > 0); (void)xfs_iextents_copy(ip, (xfs_bmbt_rec_t *)cp, whichfork); } break; case XFS_DINODE_FMT_BTREE: if ((iip->ili_fields & brootflag[whichfork]) && (ifp->if_broot_bytes > 0)) { ASSERT(ifp->if_broot != NULL); ASSERT(ifp->if_broot_bytes <= (XFS_IFORK_SIZE(ip, whichfork) + XFS_BROOT_SIZE_ADJ(ip))); xfs_bmbt_to_bmdr(mp, ifp->if_broot, ifp->if_broot_bytes, (xfs_bmdr_block_t *)cp, XFS_DFORK_SIZE(dip, mp, whichfork)); } break; case XFS_DINODE_FMT_DEV: if (iip->ili_fields & XFS_ILOG_DEV) { ASSERT(whichfork == XFS_DATA_FORK); xfs_dinode_put_rdev(dip, ip->i_df.if_u2.if_rdev); } break; case XFS_DINODE_FMT_UUID: if (iip->ili_fields & XFS_ILOG_UUID) { ASSERT(whichfork == XFS_DATA_FORK); memcpy(XFS_DFORK_DPTR(dip), &ip->i_df.if_u2.if_uuid, sizeof(uuid_t)); } break; default: ASSERT(0); break; } } STATIC int xfs_iflush_cluster( xfs_inode_t *ip, xfs_buf_t *bp) { xfs_mount_t *mp = ip->i_mount; struct xfs_perag *pag; unsigned long first_index, mask; unsigned long inodes_per_cluster; int ilist_size; xfs_inode_t **ilist; xfs_inode_t *iq; int nr_found; int clcount = 0; int bufwasdelwri; int i; pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); inodes_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog; ilist_size = inodes_per_cluster * sizeof(xfs_inode_t *); ilist = kmem_alloc(ilist_size, KM_MAYFAIL|KM_NOFS); if (!ilist) goto out_put; mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1); first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask; rcu_read_lock(); /* really need a gang lookup range call here */ nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, (void**)ilist, first_index, inodes_per_cluster); if (nr_found == 0) goto out_free; for (i = 0; i < nr_found; i++) { iq = ilist[i]; if (iq == ip) continue; /* * because this is an RCU protected lookup, we could find a * recently freed or even reallocated inode during the lookup. * We need to check under the i_flags_lock for a valid inode * here. Skip it if it is not valid or the wrong inode. */ spin_lock(&ip->i_flags_lock); if (!ip->i_ino || (XFS_INO_TO_AGINO(mp, iq->i_ino) & mask) != first_index) { spin_unlock(&ip->i_flags_lock); continue; } spin_unlock(&ip->i_flags_lock); /* * Do an un-protected check to see if the inode is dirty and * is a candidate for flushing. These checks will be repeated * later after the appropriate locks are acquired. */ if (xfs_inode_clean(iq) && xfs_ipincount(iq) == 0) continue; /* * Try to get locks. If any are unavailable or it is pinned, * then this inode cannot be flushed and is skipped. */ if (!xfs_ilock_nowait(iq, XFS_ILOCK_SHARED)) continue; if (!xfs_iflock_nowait(iq)) { xfs_iunlock(iq, XFS_ILOCK_SHARED); continue; } if (xfs_ipincount(iq)) { xfs_ifunlock(iq); xfs_iunlock(iq, XFS_ILOCK_SHARED); continue; } /* * arriving here means that this inode can be flushed. First * re-check that it's dirty before flushing. */ if (!xfs_inode_clean(iq)) { int error; error = xfs_iflush_int(iq, bp); if (error) { xfs_iunlock(iq, XFS_ILOCK_SHARED); goto cluster_corrupt_out; } clcount++; } else { xfs_ifunlock(iq); } xfs_iunlock(iq, XFS_ILOCK_SHARED); } if (clcount) { XFS_STATS_INC(xs_icluster_flushcnt); XFS_STATS_ADD(xs_icluster_flushinode, clcount); } out_free: rcu_read_unlock(); kmem_free(ilist); out_put: xfs_perag_put(pag); return 0; cluster_corrupt_out: /* * Corruption detected in the clustering loop. Invalidate the * inode buffer and shut down the filesystem. */ rcu_read_unlock(); /* * Clean up the buffer. If it was delwri, just release it -- * brelse can handle it with no problems. If not, shut down the * filesystem before releasing the buffer. */ bufwasdelwri = (bp->b_flags & _XBF_DELWRI_Q); if (bufwasdelwri) xfs_buf_relse(bp); xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); if (!bufwasdelwri) { /* * Just like incore_relse: if we have b_iodone functions, * mark the buffer as an error and call them. Otherwise * mark it as stale and brelse. */ if (bp->b_iodone) { XFS_BUF_UNDONE(bp); xfs_buf_stale(bp); xfs_buf_ioerror(bp, EIO); xfs_buf_ioend(bp, 0); } else { xfs_buf_stale(bp); xfs_buf_relse(bp); } } /* * Unlocks the flush lock */ xfs_iflush_abort(iq, false); kmem_free(ilist); xfs_perag_put(pag); return XFS_ERROR(EFSCORRUPTED); } /* * Flush dirty inode metadata into the backing buffer. * * The caller must have the inode lock and the inode flush lock held. The * inode lock will still be held upon return to the caller, and the inode * flush lock will be released after the inode has reached the disk. * * The caller must write out the buffer returned in *bpp and release it. */ int xfs_iflush( struct xfs_inode *ip, struct xfs_buf **bpp) { struct xfs_mount *mp = ip->i_mount; struct xfs_buf *bp; struct xfs_dinode *dip; int error; XFS_STATS_INC(xs_iflush_count); ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); ASSERT(xfs_isiflocked(ip)); ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK)); *bpp = NULL; xfs_iunpin_wait(ip); /* * For stale inodes we cannot rely on the backing buffer remaining * stale in cache for the remaining life of the stale inode and so * xfs_imap_to_bp() below may give us a buffer that no longer contains * inodes below. We have to check this after ensuring the inode is * unpinned so that it is safe to reclaim the stale inode after the * flush call. */ if (xfs_iflags_test(ip, XFS_ISTALE)) { xfs_ifunlock(ip); return 0; } /* * This may have been unpinned because the filesystem is shutting * down forcibly. If that's the case we must not write this inode * to disk, because the log record didn't make it to disk. * * We also have to remove the log item from the AIL in this case, * as we wait for an empty AIL as part of the unmount process. */ if (XFS_FORCED_SHUTDOWN(mp)) { error = XFS_ERROR(EIO); goto abort_out; } /* * Get the buffer containing the on-disk inode. */ error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &bp, XBF_TRYLOCK, 0); if (error || !bp) { xfs_ifunlock(ip); return error; } /* * First flush out the inode that xfs_iflush was called with. */ error = xfs_iflush_int(ip, bp); if (error) goto corrupt_out; /* * If the buffer is pinned then push on the log now so we won't * get stuck waiting in the write for too long. */ if (xfs_buf_ispinned(bp)) xfs_log_force(mp, 0); /* * inode clustering: * see if other inodes can be gathered into this write */ error = xfs_iflush_cluster(ip, bp); if (error) goto cluster_corrupt_out; *bpp = bp; return 0; corrupt_out: xfs_buf_relse(bp); xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); cluster_corrupt_out: error = XFS_ERROR(EFSCORRUPTED); abort_out: /* * Unlocks the flush lock */ xfs_iflush_abort(ip, false); return error; } STATIC int xfs_iflush_int( struct xfs_inode *ip, struct xfs_buf *bp) { struct xfs_inode_log_item *iip = ip->i_itemp; struct xfs_dinode *dip; struct xfs_mount *mp = ip->i_mount; ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); ASSERT(xfs_isiflocked(ip)); ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK)); ASSERT(iip != NULL && iip->ili_fields != 0); /* set *dip = inode's place in the buffer */ dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset); if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC), mp, XFS_ERRTAG_IFLUSH_1, XFS_RANDOM_IFLUSH_1)) { xfs_alert_tag(mp, XFS_PTAG_IFLUSH, "%s: Bad inode %Lu magic number 0x%x, ptr 0x%p", __func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip); goto corrupt_out; } if (XFS_TEST_ERROR(ip->i_d.di_magic != XFS_DINODE_MAGIC, mp, XFS_ERRTAG_IFLUSH_2, XFS_RANDOM_IFLUSH_2)) { xfs_alert_tag(mp, XFS_PTAG_IFLUSH, "%s: Bad inode %Lu, ptr 0x%p, magic number 0x%x", __func__, ip->i_ino, ip, ip->i_d.di_magic); goto corrupt_out; } if (S_ISREG(ip->i_d.di_mode)) { if (XFS_TEST_ERROR( (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) && (ip->i_d.di_format != XFS_DINODE_FMT_BTREE), mp, XFS_ERRTAG_IFLUSH_3, XFS_RANDOM_IFLUSH_3)) { xfs_alert_tag(mp, XFS_PTAG_IFLUSH, "%s: Bad regular inode %Lu, ptr 0x%p", __func__, ip->i_ino, ip); goto corrupt_out; } } else if (S_ISDIR(ip->i_d.di_mode)) { if (XFS_TEST_ERROR( (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) && (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) && (ip->i_d.di_format != XFS_DINODE_FMT_LOCAL), mp, XFS_ERRTAG_IFLUSH_4, XFS_RANDOM_IFLUSH_4)) { xfs_alert_tag(mp, XFS_PTAG_IFLUSH, "%s: Bad directory inode %Lu, ptr 0x%p", __func__, ip->i_ino, ip); goto corrupt_out; } } if (XFS_TEST_ERROR(ip->i_d.di_nextents + ip->i_d.di_anextents > ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5, XFS_RANDOM_IFLUSH_5)) { xfs_alert_tag(mp, XFS_PTAG_IFLUSH, "%s: detected corrupt incore inode %Lu, " "total extents = %d, nblocks = %Ld, ptr 0x%p", __func__, ip->i_ino, ip->i_d.di_nextents + ip->i_d.di_anextents, ip->i_d.di_nblocks, ip); goto corrupt_out; } if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize, mp, XFS_ERRTAG_IFLUSH_6, XFS_RANDOM_IFLUSH_6)) { xfs_alert_tag(mp, XFS_PTAG_IFLUSH, "%s: bad inode %Lu, forkoff 0x%x, ptr 0x%p", __func__, ip->i_ino, ip->i_d.di_forkoff, ip); goto corrupt_out; } /* * bump the flush iteration count, used to detect flushes which * postdate a log record during recovery. This is redundant as we now * log every change and hence this can't happen. Still, it doesn't hurt. */ ip->i_d.di_flushiter++; /* * Copy the dirty parts of the inode into the on-disk * inode. We always copy out the core of the inode, * because if the inode is dirty at all the core must * be. */ xfs_dinode_to_disk(dip, &ip->i_d); /* Wrap, we never let the log put out DI_MAX_FLUSH */ if (ip->i_d.di_flushiter == DI_MAX_FLUSH) ip->i_d.di_flushiter = 0; /* * If this is really an old format inode and the superblock version * has not been updated to support only new format inodes, then * convert back to the old inode format. If the superblock version * has been updated, then make the conversion permanent. */ ASSERT(ip->i_d.di_version == 1 || xfs_sb_version_hasnlink(&mp->m_sb)); if (ip->i_d.di_version == 1) { if (!xfs_sb_version_hasnlink(&mp->m_sb)) { /* * Convert it back. */ ASSERT(ip->i_d.di_nlink <= XFS_MAXLINK_1); dip->di_onlink = cpu_to_be16(ip->i_d.di_nlink); } else { /* * The superblock version has already been bumped, * so just make the conversion to the new inode * format permanent. */ ip->i_d.di_version = 2; dip->di_version = 2; ip->i_d.di_onlink = 0; dip->di_onlink = 0; memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad)); memset(&(dip->di_pad[0]), 0, sizeof(dip->di_pad)); ASSERT(xfs_get_projid(ip) == 0); } } xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK, bp); if (XFS_IFORK_Q(ip)) xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK, bp); xfs_inobp_check(mp, bp); /* * We've recorded everything logged in the inode, so we'd like to clear * the ili_fields bits so we don't log and flush things unnecessarily. * However, we can't stop logging all this information until the data * we've copied into the disk buffer is written to disk. If we did we * might overwrite the copy of the inode in the log with all the data * after re-logging only part of it, and in the face of a crash we * wouldn't have all the data we need to recover. * * What we do is move the bits to the ili_last_fields field. When * logging the inode, these bits are moved back to the ili_fields field. * In the xfs_iflush_done() routine we clear ili_last_fields, since we * know that the information those bits represent is permanently on * disk. As long as the flush completes before the inode is logged * again, then both ili_fields and ili_last_fields will be cleared. * * We can play with the ili_fields bits here, because the inode lock * must be held exclusively in order to set bits there and the flush * lock protects the ili_last_fields bits. Set ili_logged so the flush * done routine can tell whether or not to look in the AIL. Also, store * the current LSN of the inode so that we can tell whether the item has * moved in the AIL from xfs_iflush_done(). In order to read the lsn we * need the AIL lock, because it is a 64 bit value that cannot be read * atomically. */ iip->ili_last_fields = iip->ili_fields; iip->ili_fields = 0; iip->ili_logged = 1; xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn, &iip->ili_item.li_lsn); /* * Attach the function xfs_iflush_done to the inode's * buffer. This will remove the inode from the AIL * and unlock the inode's flush lock when the inode is * completely written to disk. */ xfs_buf_attach_iodone(bp, xfs_iflush_done, &iip->ili_item); /* update the lsn in the on disk inode if required */ if (ip->i_d.di_version == 3) dip->di_lsn = cpu_to_be64(iip->ili_item.li_lsn); /* generate the checksum. */ xfs_dinode_calc_crc(mp, dip); ASSERT(bp->b_fspriv != NULL); ASSERT(bp->b_iodone != NULL); return 0; corrupt_out: return XFS_ERROR(EFSCORRUPTED); } /* * Return a pointer to the extent record at file index idx. */ xfs_bmbt_rec_host_t * xfs_iext_get_ext( xfs_ifork_t *ifp, /* inode fork pointer */ xfs_extnum_t idx) /* index of target extent */ { ASSERT(idx >= 0); ASSERT(idx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t)); if ((ifp->if_flags & XFS_IFEXTIREC) && (idx == 0)) { return ifp->if_u1.if_ext_irec->er_extbuf; } else if (ifp->if_flags & XFS_IFEXTIREC) { xfs_ext_irec_t *erp; /* irec pointer */ int erp_idx = 0; /* irec index */ xfs_extnum_t page_idx = idx; /* ext index in target list */ erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0); return &erp->er_extbuf[page_idx]; } else if (ifp->if_bytes) { return &ifp->if_u1.if_extents[idx]; } else { return NULL; } } /* * Insert new item(s) into the extent records for incore inode * fork 'ifp'. 'count' new items are inserted at index 'idx'. */ void xfs_iext_insert( xfs_inode_t *ip, /* incore inode pointer */ xfs_extnum_t idx, /* starting index of new items */ xfs_extnum_t count, /* number of inserted items */ xfs_bmbt_irec_t *new, /* items to insert */ int state) /* type of extent conversion */ { xfs_ifork_t *ifp = (state & BMAP_ATTRFORK) ? ip->i_afp : &ip->i_df; xfs_extnum_t i; /* extent record index */ trace_xfs_iext_insert(ip, idx, new, state, _RET_IP_); ASSERT(ifp->if_flags & XFS_IFEXTENTS); xfs_iext_add(ifp, idx, count); for (i = idx; i < idx + count; i++, new++) xfs_bmbt_set_all(xfs_iext_get_ext(ifp, i), new); } /* * This is called when the amount of space required for incore file * extents needs to be increased. The ext_diff parameter stores the * number of new extents being added and the idx parameter contains * the extent index where the new extents will be added. If the new * extents are being appended, then we just need to (re)allocate and * initialize the space. Otherwise, if the new extents are being * inserted into the middle of the existing entries, a bit more work * is required to make room for the new extents to be inserted. The * caller is responsible for filling in the new extent entries upon * return. */ void xfs_iext_add( xfs_ifork_t *ifp, /* inode fork pointer */ xfs_extnum_t idx, /* index to begin adding exts */ int ext_diff) /* number of extents to add */ { int byte_diff; /* new bytes being added */ int new_size; /* size of extents after adding */ xfs_extnum_t nextents; /* number of extents in file */ nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); ASSERT((idx >= 0) && (idx <= nextents)); byte_diff = ext_diff * sizeof(xfs_bmbt_rec_t); new_size = ifp->if_bytes + byte_diff; /* * If the new number of extents (nextents + ext_diff) * fits inside the inode, then continue to use the inline * extent buffer. */ if (nextents + ext_diff <= XFS_INLINE_EXTS) { if (idx < nextents) { memmove(&ifp->if_u2.if_inline_ext[idx + ext_diff], &ifp->if_u2.if_inline_ext[idx], (nextents - idx) * sizeof(xfs_bmbt_rec_t)); memset(&ifp->if_u2.if_inline_ext[idx], 0, byte_diff); } ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext; ifp->if_real_bytes = 0; } /* * Otherwise use a linear (direct) extent list. * If the extents are currently inside the inode, * xfs_iext_realloc_direct will switch us from * inline to direct extent allocation mode. */ else if (nextents + ext_diff <= XFS_LINEAR_EXTS) { xfs_iext_realloc_direct(ifp, new_size); if (idx < nextents) { memmove(&ifp->if_u1.if_extents[idx + ext_diff], &ifp->if_u1.if_extents[idx], (nextents - idx) * sizeof(xfs_bmbt_rec_t)); memset(&ifp->if_u1.if_extents[idx], 0, byte_diff); } } /* Indirection array */ else { xfs_ext_irec_t *erp; int erp_idx = 0; int page_idx = idx; ASSERT(nextents + ext_diff > XFS_LINEAR_EXTS); if (ifp->if_flags & XFS_IFEXTIREC) { erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 1); } else { xfs_iext_irec_init(ifp); ASSERT(ifp->if_flags & XFS_IFEXTIREC); erp = ifp->if_u1.if_ext_irec; } /* Extents fit in target extent page */ if (erp && erp->er_extcount + ext_diff <= XFS_LINEAR_EXTS) { if (page_idx < erp->er_extcount) { memmove(&erp->er_extbuf[page_idx + ext_diff], &erp->er_extbuf[page_idx], (erp->er_extcount - page_idx) * sizeof(xfs_bmbt_rec_t)); memset(&erp->er_extbuf[page_idx], 0, byte_diff); } erp->er_extcount += ext_diff; xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff); } /* Insert a new extent page */ else if (erp) { xfs_iext_add_indirect_multi(ifp, erp_idx, page_idx, ext_diff); } /* * If extent(s) are being appended to the last page in * the indirection array and the new extent(s) don't fit * in the page, then erp is NULL and erp_idx is set to * the next index needed in the indirection array. */ else { int count = ext_diff; while (count) { erp = xfs_iext_irec_new(ifp, erp_idx); erp->er_extcount = count; count -= MIN(count, (int)XFS_LINEAR_EXTS); if (count) { erp_idx++; } } } } ifp->if_bytes = new_size; } /* * This is called when incore extents are being added to the indirection * array and the new extents do not fit in the target extent list. The * erp_idx parameter contains the irec index for the target extent list * in the indirection array, and the idx parameter contains the extent * index within the list. The number of extents being added is stored * in the count parameter. * * |-------| |-------| * | | | | idx - number of extents before idx * | idx | | count | * | | | | count - number of extents being inserted at idx * |-------| |-------| * | count | | nex2 | nex2 - number of extents after idx + count * |-------| |-------| */ void xfs_iext_add_indirect_multi( xfs_ifork_t *ifp, /* inode fork pointer */ int erp_idx, /* target extent irec index */ xfs_extnum_t idx, /* index within target list */ int count) /* new extents being added */ { int byte_diff; /* new bytes being added */ xfs_ext_irec_t *erp; /* pointer to irec entry */ xfs_extnum_t ext_diff; /* number of extents to add */ xfs_extnum_t ext_cnt; /* new extents still needed */ xfs_extnum_t nex2; /* extents after idx + count */ xfs_bmbt_rec_t *nex2_ep = NULL; /* temp list for nex2 extents */ int nlists; /* number of irec's (lists) */ ASSERT(ifp->if_flags & XFS_IFEXTIREC); erp = &ifp->if_u1.if_ext_irec[erp_idx]; nex2 = erp->er_extcount - idx; nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; /* * Save second part of target extent list * (all extents past */ if (nex2) { byte_diff = nex2 * sizeof(xfs_bmbt_rec_t); nex2_ep = (xfs_bmbt_rec_t *) kmem_alloc(byte_diff, KM_NOFS); memmove(nex2_ep, &erp->er_extbuf[idx], byte_diff); erp->er_extcount -= nex2; xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -nex2); memset(&erp->er_extbuf[idx], 0, byte_diff); } /* * Add the new extents to the end of the target * list, then allocate new irec record(s) and * extent buffer(s) as needed to store the rest * of the new extents. */ ext_cnt = count; ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS - erp->er_extcount); if (ext_diff) { erp->er_extcount += ext_diff; xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff); ext_cnt -= ext_diff; } while (ext_cnt) { erp_idx++; erp = xfs_iext_irec_new(ifp, erp_idx); ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS); erp->er_extcount = ext_diff; xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff); ext_cnt -= ext_diff; } /* Add nex2 extents back to indirection array */ if (nex2) { xfs_extnum_t ext_avail; int i; byte_diff = nex2 * sizeof(xfs_bmbt_rec_t); ext_avail = XFS_LINEAR_EXTS - erp->er_extcount; i = 0; /* * If nex2 extents fit in the current page, append * nex2_ep after the new extents. */ if (nex2 <= ext_avail) { i = erp->er_extcount; } /* * Otherwise, check if space is available in the * next page. */ else if ((erp_idx < nlists - 1) && (nex2 <= (ext_avail = XFS_LINEAR_EXTS - ifp->if_u1.if_ext_irec[erp_idx+1].er_extcount))) { erp_idx++; erp++; /* Create a hole for nex2 extents */ memmove(&erp->er_extbuf[nex2], erp->er_extbuf, erp->er_extcount * sizeof(xfs_bmbt_rec_t)); } /* * Final choice, create a new extent page for * nex2 extents. */ else { erp_idx++; erp = xfs_iext_irec_new(ifp, erp_idx); } memmove(&erp->er_extbuf[i], nex2_ep, byte_diff); kmem_free(nex2_ep); erp->er_extcount += nex2; xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, nex2); } } /* * This is called when the amount of space required for incore file * extents needs to be decreased. The ext_diff parameter stores the * number of extents to be removed and the idx parameter contains * the extent index where the extents will be removed from. * * If the amount of space needed has decreased below the linear * limit, XFS_IEXT_BUFSZ, then switch to using the contiguous * extent array. Otherwise, use kmem_realloc() to adjust the * size to what is needed. */ void xfs_iext_remove( xfs_inode_t *ip, /* incore inode pointer */ xfs_extnum_t idx, /* index to begin removing exts */ int ext_diff, /* number of extents to remove */ int state) /* type of extent conversion */ { xfs_ifork_t *ifp = (state & BMAP_ATTRFORK) ? ip->i_afp : &ip->i_df; xfs_extnum_t nextents; /* number of extents in file */ int new_size; /* size of extents after removal */ trace_xfs_iext_remove(ip, idx, state, _RET_IP_); ASSERT(ext_diff > 0); nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); new_size = (nextents - ext_diff) * sizeof(xfs_bmbt_rec_t); if (new_size == 0) { xfs_iext_destroy(ifp); } else if (ifp->if_flags & XFS_IFEXTIREC) { xfs_iext_remove_indirect(ifp, idx, ext_diff); } else if (ifp->if_real_bytes) { xfs_iext_remove_direct(ifp, idx, ext_diff); } else { xfs_iext_remove_inline(ifp, idx, ext_diff); } ifp->if_bytes = new_size; } /* * This removes ext_diff extents from the inline buffer, beginning * at extent index idx. */ void xfs_iext_remove_inline( xfs_ifork_t *ifp, /* inode fork pointer */ xfs_extnum_t idx, /* index to begin removing exts */ int ext_diff) /* number of extents to remove */ { int nextents; /* number of extents in file */ ASSERT(!(ifp->if_flags & XFS_IFEXTIREC)); ASSERT(idx < XFS_INLINE_EXTS); nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); ASSERT(((nextents - ext_diff) > 0) && (nextents - ext_diff) < XFS_INLINE_EXTS); if (idx + ext_diff < nextents) { memmove(&ifp->if_u2.if_inline_ext[idx], &ifp->if_u2.if_inline_ext[idx + ext_diff], (nextents - (idx + ext_diff)) * sizeof(xfs_bmbt_rec_t)); memset(&ifp->if_u2.if_inline_ext[nextents - ext_diff], 0, ext_diff * sizeof(xfs_bmbt_rec_t)); } else { memset(&ifp->if_u2.if_inline_ext[idx], 0, ext_diff * sizeof(xfs_bmbt_rec_t)); } } /* * This removes ext_diff extents from a linear (direct) extent list, * beginning at extent index idx. If the extents are being removed * from the end of the list (ie. truncate) then we just need to re- * allocate the list to remove the extra space. Otherwise, if the * extents are being removed from the middle of the existing extent * entries, then we first need to move the extent records beginning * at idx + ext_diff up in the list to overwrite the records being * removed, then remove the extra space via kmem_realloc. */ void xfs_iext_remove_direct( xfs_ifork_t *ifp, /* inode fork pointer */ xfs_extnum_t idx, /* index to begin removing exts */ int ext_diff) /* number of extents to remove */ { xfs_extnum_t nextents; /* number of extents in file */ int new_size; /* size of extents after removal */ ASSERT(!(ifp->if_flags & XFS_IFEXTIREC)); new_size = ifp->if_bytes - (ext_diff * sizeof(xfs_bmbt_rec_t)); nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); if (new_size == 0) { xfs_iext_destroy(ifp); return; } /* Move extents up in the list (if needed) */ if (idx + ext_diff < nextents) { memmove(&ifp->if_u1.if_extents[idx], &ifp->if_u1.if_extents[idx + ext_diff], (nextents - (idx + ext_diff)) * sizeof(xfs_bmbt_rec_t)); } memset(&ifp->if_u1.if_extents[nextents - ext_diff], 0, ext_diff * sizeof(xfs_bmbt_rec_t)); /* * Reallocate the direct extent list. If the extents * will fit inside the inode then xfs_iext_realloc_direct * will switch from direct to inline extent allocation * mode for us. */ xfs_iext_realloc_direct(ifp, new_size); ifp->if_bytes = new_size; } /* * This is called when incore extents are being removed from the * indirection array and the extents being removed span multiple extent * buffers. The idx parameter contains the file extent index where we * want to begin removing extents, and the count parameter contains * how many extents need to be removed. * * |-------| |-------| * | nex1 | | | nex1 - number of extents before idx * |-------| | count | * | | | | count - number of extents being removed at idx * | count | |-------| * | | | nex2 | nex2 - number of extents after idx + count * |-------| |-------| */ void xfs_iext_remove_indirect( xfs_ifork_t *ifp, /* inode fork pointer */ xfs_extnum_t idx, /* index to begin removing extents */ int count) /* number of extents to remove */ { xfs_ext_irec_t *erp; /* indirection array pointer */ int erp_idx = 0; /* indirection array index */ xfs_extnum_t ext_cnt; /* extents left to remove */ xfs_extnum_t ext_diff; /* extents to remove in current list */ xfs_extnum_t nex1; /* number of extents before idx */ xfs_extnum_t nex2; /* extents after idx + count */ int page_idx = idx; /* index in target extent list */ ASSERT(ifp->if_flags & XFS_IFEXTIREC); erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0); ASSERT(erp != NULL); nex1 = page_idx; ext_cnt = count; while (ext_cnt) { nex2 = MAX((erp->er_extcount - (nex1 + ext_cnt)), 0); ext_diff = MIN(ext_cnt, (erp->er_extcount - nex1)); /* * Check for deletion of entire list; * xfs_iext_irec_remove() updates extent offsets. */ if (ext_diff == erp->er_extcount) { xfs_iext_irec_remove(ifp, erp_idx); ext_cnt -= ext_diff; nex1 = 0; if (ext_cnt) { ASSERT(erp_idx < ifp->if_real_bytes / XFS_IEXT_BUFSZ); erp = &ifp->if_u1.if_ext_irec[erp_idx]; nex1 = 0; continue; } else { break; } } /* Move extents up (if needed) */ if (nex2) { memmove(&erp->er_extbuf[nex1], &erp->er_extbuf[nex1 + ext_diff], nex2 * sizeof(xfs_bmbt_rec_t)); } /* Zero out rest of page */ memset(&erp->er_extbuf[nex1 + nex2], 0, (XFS_IEXT_BUFSZ - ((nex1 + nex2) * sizeof(xfs_bmbt_rec_t)))); /* Update remaining counters */ erp->er_extcount -= ext_diff; xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -ext_diff); ext_cnt -= ext_diff; nex1 = 0; erp_idx++; erp++; } ifp->if_bytes -= count * sizeof(xfs_bmbt_rec_t); xfs_iext_irec_compact(ifp); } /* * Create, destroy, or resize a linear (direct) block of extents. */ void xfs_iext_realloc_direct( xfs_ifork_t *ifp, /* inode fork pointer */ int new_size) /* new size of extents */ { int rnew_size; /* real new size of extents */ rnew_size = new_size; ASSERT(!(ifp->if_flags & XFS_IFEXTIREC) || ((new_size >= 0) && (new_size <= XFS_IEXT_BUFSZ) && (new_size != ifp->if_real_bytes))); /* Free extent records */ if (new_size == 0) { xfs_iext_destroy(ifp); } /* Resize direct extent list and zero any new bytes */ else if (ifp->if_real_bytes) { /* Check if extents will fit inside the inode */ if (new_size <= XFS_INLINE_EXTS * sizeof(xfs_bmbt_rec_t)) { xfs_iext_direct_to_inline(ifp, new_size / (uint)sizeof(xfs_bmbt_rec_t)); ifp->if_bytes = new_size; return; } if (!is_power_of_2(new_size)){ rnew_size = roundup_pow_of_two(new_size); } if (rnew_size != ifp->if_real_bytes) { ifp->if_u1.if_extents = kmem_realloc(ifp->if_u1.if_extents, rnew_size, ifp->if_real_bytes, KM_NOFS); } if (rnew_size > ifp->if_real_bytes) { memset(&ifp->if_u1.if_extents[ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)], 0, rnew_size - ifp->if_real_bytes); } } /* * Switch from the inline extent buffer to a direct * extent list. Be sure to include the inline extent * bytes in new_size. */ else { new_size += ifp->if_bytes; if (!is_power_of_2(new_size)) { rnew_size = roundup_pow_of_two(new_size); } xfs_iext_inline_to_direct(ifp, rnew_size); } ifp->if_real_bytes = rnew_size; ifp->if_bytes = new_size; } /* * Switch from linear (direct) extent records to inline buffer. */ void xfs_iext_direct_to_inline( xfs_ifork_t *ifp, /* inode fork pointer */ xfs_extnum_t nextents) /* number of extents in file */ { ASSERT(ifp->if_flags & XFS_IFEXTENTS); ASSERT(nextents <= XFS_INLINE_EXTS); /* * The inline buffer was zeroed when we switched * from inline to direct extent allocation mode, * so we don't need to clear it here. */ memcpy(ifp->if_u2.if_inline_ext, ifp->if_u1.if_extents, nextents * sizeof(xfs_bmbt_rec_t)); kmem_free(ifp->if_u1.if_extents); ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext; ifp->if_real_bytes = 0; } /* * Switch from inline buffer to linear (direct) extent records. * new_size should already be rounded up to the next power of 2 * by the caller (when appropriate), so use new_size as it is. * However, since new_size may be rounded up, we can't update * if_bytes here. It is the caller's responsibility to update * if_bytes upon return. */ void xfs_iext_inline_to_direct( xfs_ifork_t *ifp, /* inode fork pointer */ int new_size) /* number of extents in file */ { ifp->if_u1.if_extents = kmem_alloc(new_size, KM_NOFS); memset(ifp->if_u1.if_extents, 0, new_size); if (ifp->if_bytes) { memcpy(ifp->if_u1.if_extents, ifp->if_u2.if_inline_ext, ifp->if_bytes); memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS * sizeof(xfs_bmbt_rec_t)); } ifp->if_real_bytes = new_size; } /* * Resize an extent indirection array to new_size bytes. */ STATIC void xfs_iext_realloc_indirect( xfs_ifork_t *ifp, /* inode fork pointer */ int new_size) /* new indirection array size */ { int nlists; /* number of irec's (ex lists) */ int size; /* current indirection array size */ ASSERT(ifp->if_flags & XFS_IFEXTIREC); nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; size = nlists * sizeof(xfs_ext_irec_t); ASSERT(ifp->if_real_bytes); ASSERT((new_size >= 0) && (new_size != size)); if (new_size == 0) { xfs_iext_destroy(ifp); } else { ifp->if_u1.if_ext_irec = (xfs_ext_irec_t *) kmem_realloc(ifp->if_u1.if_ext_irec, new_size, size, KM_NOFS); } } /* * Switch from indirection array to linear (direct) extent allocations. */ STATIC void xfs_iext_indirect_to_direct( xfs_ifork_t *ifp) /* inode fork pointer */ { xfs_bmbt_rec_host_t *ep; /* extent record pointer */ xfs_extnum_t nextents; /* number of extents in file */ int size; /* size of file extents */ ASSERT(ifp->if_flags & XFS_IFEXTIREC); nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); ASSERT(nextents <= XFS_LINEAR_EXTS); size = nextents * sizeof(xfs_bmbt_rec_t); xfs_iext_irec_compact_pages(ifp); ASSERT(ifp->if_real_bytes == XFS_IEXT_BUFSZ); ep = ifp->if_u1.if_ext_irec->er_extbuf; kmem_free(ifp->if_u1.if_ext_irec); ifp->if_flags &= ~XFS_IFEXTIREC; ifp->if_u1.if_extents = ep; ifp->if_bytes = size; if (nextents < XFS_LINEAR_EXTS) { xfs_iext_realloc_direct(ifp, size); } } /* * Free incore file extents. */ void xfs_iext_destroy( xfs_ifork_t *ifp) /* inode fork pointer */ { if (ifp->if_flags & XFS_IFEXTIREC) { int erp_idx; int nlists; nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; for (erp_idx = nlists - 1; erp_idx >= 0 ; erp_idx--) { xfs_iext_irec_remove(ifp, erp_idx); } ifp->if_flags &= ~XFS_IFEXTIREC; } else if (ifp->if_real_bytes) { kmem_free(ifp->if_u1.if_extents); } else if (ifp->if_bytes) { memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS * sizeof(xfs_bmbt_rec_t)); } ifp->if_u1.if_extents = NULL; ifp->if_real_bytes = 0; ifp->if_bytes = 0; } /* * Return a pointer to the extent record for file system block bno. */ xfs_bmbt_rec_host_t * /* pointer to found extent record */ xfs_iext_bno_to_ext( xfs_ifork_t *ifp, /* inode fork pointer */ xfs_fileoff_t bno, /* block number to search for */ xfs_extnum_t *idxp) /* index of target extent */ { xfs_bmbt_rec_host_t *base; /* pointer to first extent */ xfs_filblks_t blockcount = 0; /* number of blocks in extent */ xfs_bmbt_rec_host_t *ep = NULL; /* pointer to target extent */ xfs_ext_irec_t *erp = NULL; /* indirection array pointer */ int high; /* upper boundary in search */ xfs_extnum_t idx = 0; /* index of target extent */ int low; /* lower boundary in search */ xfs_extnum_t nextents; /* number of file extents */ xfs_fileoff_t startoff = 0; /* start offset of extent */ nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); if (nextents == 0) { *idxp = 0; return NULL; } low = 0; if (ifp->if_flags & XFS_IFEXTIREC) { /* Find target extent list */ int erp_idx = 0; erp = xfs_iext_bno_to_irec(ifp, bno, &erp_idx); base = erp->er_extbuf; high = erp->er_extcount - 1; } else { base = ifp->if_u1.if_extents; high = nextents - 1; } /* Binary search extent records */ while (low <= high) { idx = (low + high) >> 1; ep = base + idx; startoff = xfs_bmbt_get_startoff(ep); blockcount = xfs_bmbt_get_blockcount(ep); if (bno < startoff) { high = idx - 1; } else if (bno >= startoff + blockcount) { low = idx + 1; } else { /* Convert back to file-based extent index */ if (ifp->if_flags & XFS_IFEXTIREC) { idx += erp->er_extoff; } *idxp = idx; return ep; } } /* Convert back to file-based extent index */ if (ifp->if_flags & XFS_IFEXTIREC) { idx += erp->er_extoff; } if (bno >= startoff + blockcount) { if (++idx == nextents) { ep = NULL; } else { ep = xfs_iext_get_ext(ifp, idx); } } *idxp = idx; return ep; } /* * Return a pointer to the indirection array entry containing the * extent record for filesystem block bno. Store the index of the * target irec in *erp_idxp. */ xfs_ext_irec_t * /* pointer to found extent record */ xfs_iext_bno_to_irec( xfs_ifork_t *ifp, /* inode fork pointer */ xfs_fileoff_t bno, /* block number to search for */ int *erp_idxp) /* irec index of target ext list */ { xfs_ext_irec_t *erp = NULL; /* indirection array pointer */ xfs_ext_irec_t *erp_next; /* next indirection array entry */ int erp_idx; /* indirection array index */ int nlists; /* number of extent irec's (lists) */ int high; /* binary search upper limit */ int low; /* binary search lower limit */ ASSERT(ifp->if_flags & XFS_IFEXTIREC); nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; erp_idx = 0; low = 0; high = nlists - 1; while (low <= high) { erp_idx = (low + high) >> 1; erp = &ifp->if_u1.if_ext_irec[erp_idx]; erp_next = erp_idx < nlists - 1 ? erp + 1 : NULL; if (bno < xfs_bmbt_get_startoff(erp->er_extbuf)) { high = erp_idx - 1; } else if (erp_next && bno >= xfs_bmbt_get_startoff(erp_next->er_extbuf)) { low = erp_idx + 1; } else { break; } } *erp_idxp = erp_idx; return erp; } /* * Return a pointer to the indirection array entry containing the * extent record at file extent index *idxp. Store the index of the * target irec in *erp_idxp and store the page index of the target * extent record in *idxp. */ xfs_ext_irec_t * xfs_iext_idx_to_irec( xfs_ifork_t *ifp, /* inode fork pointer */ xfs_extnum_t *idxp, /* extent index (file -> page) */ int *erp_idxp, /* pointer to target irec */ int realloc) /* new bytes were just added */ { xfs_ext_irec_t *prev; /* pointer to previous irec */ xfs_ext_irec_t *erp = NULL; /* pointer to current irec */ int erp_idx; /* indirection array index */ int nlists; /* number of irec's (ex lists) */ int high; /* binary search upper limit */ int low; /* binary search lower limit */ xfs_extnum_t page_idx = *idxp; /* extent index in target list */ ASSERT(ifp->if_flags & XFS_IFEXTIREC); ASSERT(page_idx >= 0); ASSERT(page_idx <= ifp->if_bytes / sizeof(xfs_bmbt_rec_t)); ASSERT(page_idx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t) || realloc); nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; erp_idx = 0; low = 0; high = nlists - 1; /* Binary search extent irec's */ while (low <= high) { erp_idx = (low + high) >> 1; erp = &ifp->if_u1.if_ext_irec[erp_idx]; prev = erp_idx > 0 ? erp - 1 : NULL; if (page_idx < erp->er_extoff || (page_idx == erp->er_extoff && realloc && prev && prev->er_extcount < XFS_LINEAR_EXTS)) { high = erp_idx - 1; } else if (page_idx > erp->er_extoff + erp->er_extcount || (page_idx == erp->er_extoff + erp->er_extcount && !realloc)) { low = erp_idx + 1; } else if (page_idx == erp->er_extoff + erp->er_extcount && erp->er_extcount == XFS_LINEAR_EXTS) { ASSERT(realloc); page_idx = 0; erp_idx++; erp = erp_idx < nlists ? erp + 1 : NULL; break; } else { page_idx -= erp->er_extoff; break; } } *idxp = page_idx; *erp_idxp = erp_idx; return(erp); } /* * Allocate and initialize an indirection array once the space needed * for incore extents increases above XFS_IEXT_BUFSZ. */ void xfs_iext_irec_init( xfs_ifork_t *ifp) /* inode fork pointer */ { xfs_ext_irec_t *erp; /* indirection array pointer */ xfs_extnum_t nextents; /* number of extents in file */ ASSERT(!(ifp->if_flags & XFS_IFEXTIREC)); nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); ASSERT(nextents <= XFS_LINEAR_EXTS); erp = kmem_alloc(sizeof(xfs_ext_irec_t), KM_NOFS); if (nextents == 0) { ifp->if_u1.if_extents = kmem_alloc(XFS_IEXT_BUFSZ, KM_NOFS); } else if (!ifp->if_real_bytes) { xfs_iext_inline_to_direct(ifp, XFS_IEXT_BUFSZ); } else if (ifp->if_real_bytes < XFS_IEXT_BUFSZ) { xfs_iext_realloc_direct(ifp, XFS_IEXT_BUFSZ); } erp->er_extbuf = ifp->if_u1.if_extents; erp->er_extcount = nextents; erp->er_extoff = 0; ifp->if_flags |= XFS_IFEXTIREC; ifp->if_real_bytes = XFS_IEXT_BUFSZ; ifp->if_bytes = nextents * sizeof(xfs_bmbt_rec_t); ifp->if_u1.if_ext_irec = erp; return; } /* * Allocate and initialize a new entry in the indirection array. */ xfs_ext_irec_t * xfs_iext_irec_new( xfs_ifork_t *ifp, /* inode fork pointer */ int erp_idx) /* index for new irec */ { xfs_ext_irec_t *erp; /* indirection array pointer */ int i; /* loop counter */ int nlists; /* number of irec's (ex lists) */ ASSERT(ifp->if_flags & XFS_IFEXTIREC); nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; /* Resize indirection array */ xfs_iext_realloc_indirect(ifp, ++nlists * sizeof(xfs_ext_irec_t)); /* * Move records down in the array so the * new page can use erp_idx. */ erp = ifp->if_u1.if_ext_irec; for (i = nlists - 1; i > erp_idx; i--) { memmove(&erp[i], &erp[i-1], sizeof(xfs_ext_irec_t)); } ASSERT(i == erp_idx); /* Initialize new extent record */ erp = ifp->if_u1.if_ext_irec; erp[erp_idx].er_extbuf = kmem_alloc(XFS_IEXT_BUFSZ, KM_NOFS); ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ; memset(erp[erp_idx].er_extbuf, 0, XFS_IEXT_BUFSZ); erp[erp_idx].er_extcount = 0; erp[erp_idx].er_extoff = erp_idx > 0 ? erp[erp_idx-1].er_extoff + erp[erp_idx-1].er_extcount : 0; return (&erp[erp_idx]); } /* * Remove a record from the indirection array. */ void xfs_iext_irec_remove( xfs_ifork_t *ifp, /* inode fork pointer */ int erp_idx) /* irec index to remove */ { xfs_ext_irec_t *erp; /* indirection array pointer */ int i; /* loop counter */ int nlists; /* number of irec's (ex lists) */ ASSERT(ifp->if_flags & XFS_IFEXTIREC); nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; erp = &ifp->if_u1.if_ext_irec[erp_idx]; if (erp->er_extbuf) { xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -erp->er_extcount); kmem_free(erp->er_extbuf); } /* Compact extent records */ erp = ifp->if_u1.if_ext_irec; for (i = erp_idx; i < nlists - 1; i++) { memmove(&erp[i], &erp[i+1], sizeof(xfs_ext_irec_t)); } /* * Manually free the last extent record from the indirection * array. A call to xfs_iext_realloc_indirect() with a size * of zero would result in a call to xfs_iext_destroy() which * would in turn call this function again, creating a nasty * infinite loop. */ if (--nlists) { xfs_iext_realloc_indirect(ifp, nlists * sizeof(xfs_ext_irec_t)); } else { kmem_free(ifp->if_u1.if_ext_irec); } ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ; } /* * This is called to clean up large amounts of unused memory allocated * by the indirection array. Before compacting anything though, verify * that the indirection array is still needed and switch back to the * linear extent list (or even the inline buffer) if possible. The * compaction policy is as follows: * * Full Compaction: Extents fit into a single page (or inline buffer) * Partial Compaction: Extents occupy less than 50% of allocated space * No Compaction: Extents occupy at least 50% of allocated space */ void xfs_iext_irec_compact( xfs_ifork_t *ifp) /* inode fork pointer */ { xfs_extnum_t nextents; /* number of extents in file */ int nlists; /* number of irec's (ex lists) */ ASSERT(ifp->if_flags & XFS_IFEXTIREC); nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); if (nextents == 0) { xfs_iext_destroy(ifp); } else if (nextents <= XFS_INLINE_EXTS) { xfs_iext_indirect_to_direct(ifp); xfs_iext_direct_to_inline(ifp, nextents); } else if (nextents <= XFS_LINEAR_EXTS) { xfs_iext_indirect_to_direct(ifp); } else if (nextents < (nlists * XFS_LINEAR_EXTS) >> 1) { xfs_iext_irec_compact_pages(ifp); } } /* * Combine extents from neighboring extent pages. */ void xfs_iext_irec_compact_pages( xfs_ifork_t *ifp) /* inode fork pointer */ { xfs_ext_irec_t *erp, *erp_next;/* pointers to irec entries */ int erp_idx = 0; /* indirection array index */ int nlists; /* number of irec's (ex lists) */ ASSERT(ifp->if_flags & XFS_IFEXTIREC); nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; while (erp_idx < nlists - 1) { erp = &ifp->if_u1.if_ext_irec[erp_idx]; erp_next = erp + 1; if (erp_next->er_extcount <= (XFS_LINEAR_EXTS - erp->er_extcount)) { memcpy(&erp->er_extbuf[erp->er_extcount], erp_next->er_extbuf, erp_next->er_extcount * sizeof(xfs_bmbt_rec_t)); erp->er_extcount += erp_next->er_extcount; /* * Free page before removing extent record * so er_extoffs don't get modified in * xfs_iext_irec_remove. */ kmem_free(erp_next->er_extbuf); erp_next->er_extbuf = NULL; xfs_iext_irec_remove(ifp, erp_idx + 1); nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; } else { erp_idx++; } } } /* * This is called to update the er_extoff field in the indirection * array when extents have been added or removed from one of the * extent lists. erp_idx contains the irec index to begin updating * at and ext_diff contains the number of extents that were added * or removed. */ void xfs_iext_irec_update_extoffs( xfs_ifork_t *ifp, /* inode fork pointer */ int erp_idx, /* irec index to update */ int ext_diff) /* number of new extents */ { int i; /* loop counter */ int nlists; /* number of irec's (ex lists */ ASSERT(ifp->if_flags & XFS_IFEXTIREC); nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; for (i = erp_idx; i < nlists; i++) { ifp->if_u1.if_ext_irec[i].er_extoff += ext_diff; } } /* * Test whether it is appropriate to check an inode for and free post EOF * blocks. The 'force' parameter determines whether we should also consider * regular files that are marked preallocated or append-only. */ bool xfs_can_free_eofblocks(struct xfs_inode *ip, bool force) { /* prealloc/delalloc exists only on regular files */ if (!S_ISREG(ip->i_d.di_mode)) return false; /* * Zero sized files with no cached pages and delalloc blocks will not * have speculative prealloc/delalloc blocks to remove. */ if (VFS_I(ip)->i_size == 0 && VN_CACHED(VFS_I(ip)) == 0 && ip->i_delayed_blks == 0) return false; /* If we haven't read in the extent list, then don't do it now. */ if (!(ip->i_df.if_flags & XFS_IFEXTENTS)) return false; /* * Do not free real preallocated or append-only files unless the file * has delalloc blocks and we are forced to remove them. */ if (ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)) if (!force || ip->i_delayed_blks == 0) return false; return true; }
gpl-2.0
kevinzyuan/ok6410
arch/mn10300/kernel/asm-offsets.c
1441
3286
/* * Generate definitions needed by assembly language modules. * This code generates raw asm output which is post-processed * to extract and format the required data. */ #include <linux/sched.h> #include <linux/signal.h> #include <linux/personality.h> #include <linux/kbuild.h> #include <asm/ucontext.h> #include <asm/processor.h> #include <asm/thread_info.h> #include <asm/ptrace.h> #include "sigframe.h" #include "mn10300-serial.h" void foo(void) { OFFSET(SIGCONTEXT_d0, sigcontext, d0); OFFSET(SIGCONTEXT_d1, sigcontext, d1); BLANK(); OFFSET(TI_task, thread_info, task); OFFSET(TI_exec_domain, thread_info, exec_domain); OFFSET(TI_flags, thread_info, flags); OFFSET(TI_cpu, thread_info, cpu); OFFSET(TI_preempt_count, thread_info, preempt_count); OFFSET(TI_addr_limit, thread_info, addr_limit); OFFSET(TI_restart_block, thread_info, restart_block); BLANK(); OFFSET(REG_D0, pt_regs, d0); OFFSET(REG_D1, pt_regs, d1); OFFSET(REG_D2, pt_regs, d2); OFFSET(REG_D3, pt_regs, d3); OFFSET(REG_A0, pt_regs, a0); OFFSET(REG_A1, pt_regs, a1); OFFSET(REG_A2, pt_regs, a2); OFFSET(REG_A3, pt_regs, a3); OFFSET(REG_E0, pt_regs, e0); OFFSET(REG_E1, pt_regs, e1); OFFSET(REG_E2, pt_regs, e2); OFFSET(REG_E3, pt_regs, e3); OFFSET(REG_E4, pt_regs, e4); OFFSET(REG_E5, pt_regs, e5); OFFSET(REG_E6, pt_regs, e6); OFFSET(REG_E7, pt_regs, e7); OFFSET(REG_SP, pt_regs, sp); OFFSET(REG_EPSW, pt_regs, epsw); OFFSET(REG_PC, pt_regs, pc); OFFSET(REG_LAR, pt_regs, lar); OFFSET(REG_LIR, pt_regs, lir); OFFSET(REG_MDR, pt_regs, mdr); OFFSET(REG_MCVF, pt_regs, mcvf); OFFSET(REG_MCRL, pt_regs, mcrl); OFFSET(REG_MCRH, pt_regs, mcrh); OFFSET(REG_MDRQ, pt_regs, mdrq); OFFSET(REG_ORIG_D0, pt_regs, orig_d0); OFFSET(REG_NEXT, pt_regs, next); DEFINE(REG__END, sizeof(struct pt_regs)); BLANK(); OFFSET(THREAD_UREGS, thread_struct, uregs); OFFSET(THREAD_PC, thread_struct, pc); OFFSET(THREAD_SP, thread_struct, sp); OFFSET(THREAD_A3, thread_struct, a3); OFFSET(THREAD_USP, thread_struct, usp); OFFSET(THREAD_FRAME, thread_struct, __frame); BLANK(); DEFINE(CLONE_VM_asm, CLONE_VM); DEFINE(CLONE_FS_asm, CLONE_FS); DEFINE(CLONE_FILES_asm, CLONE_FILES); DEFINE(CLONE_SIGHAND_asm, CLONE_SIGHAND); DEFINE(CLONE_UNTRACED_asm, CLONE_UNTRACED); DEFINE(SIGCHLD_asm, SIGCHLD); BLANK(); OFFSET(EXEC_DOMAIN_handler, exec_domain, handler); OFFSET(RT_SIGFRAME_sigcontext, rt_sigframe, uc.uc_mcontext); DEFINE(PAGE_SIZE_asm, PAGE_SIZE); OFFSET(__rx_buffer, mn10300_serial_port, rx_buffer); OFFSET(__rx_inp, mn10300_serial_port, rx_inp); OFFSET(__rx_outp, mn10300_serial_port, rx_outp); OFFSET(__uart_state, mn10300_serial_port, uart.state); OFFSET(__tx_xchar, mn10300_serial_port, tx_xchar); OFFSET(__tx_break, mn10300_serial_port, tx_break); OFFSET(__intr_flags, mn10300_serial_port, intr_flags); OFFSET(__rx_icr, mn10300_serial_port, rx_icr); OFFSET(__tx_icr, mn10300_serial_port, tx_icr); OFFSET(__tm_icr, mn10300_serial_port, _tmicr); OFFSET(__iobase, mn10300_serial_port, _iobase); DEFINE(__UART_XMIT_SIZE, UART_XMIT_SIZE); OFFSET(__xmit_buffer, uart_state, xmit.buf); OFFSET(__xmit_head, uart_state, xmit.head); OFFSET(__xmit_tail, uart_state, xmit.tail); }
gpl-2.0
gchild320/flounder
net/netfilter/nf_log.c
1953
9122
#include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/proc_fs.h> #include <linux/skbuff.h> #include <linux/netfilter.h> #include <linux/seq_file.h> #include <net/protocol.h> #include <net/netfilter/nf_log.h> #include "nf_internals.h" /* Internal logging interface, which relies on the real LOG target modules */ #define NF_LOG_PREFIXLEN 128 #define NFLOGGER_NAME_LEN 64 static struct list_head nf_loggers_l[NFPROTO_NUMPROTO] __read_mostly; static DEFINE_MUTEX(nf_log_mutex); static struct nf_logger *__find_logger(int pf, const char *str_logger) { struct nf_logger *t; list_for_each_entry(t, &nf_loggers_l[pf], list[pf]) { if (!strnicmp(str_logger, t->name, strlen(t->name))) return t; } return NULL; } void nf_log_set(struct net *net, u_int8_t pf, const struct nf_logger *logger) { const struct nf_logger *log; if (pf == NFPROTO_UNSPEC) return; mutex_lock(&nf_log_mutex); log = rcu_dereference_protected(net->nf.nf_loggers[pf], lockdep_is_held(&nf_log_mutex)); if (log == NULL) rcu_assign_pointer(net->nf.nf_loggers[pf], logger); mutex_unlock(&nf_log_mutex); } EXPORT_SYMBOL(nf_log_set); void nf_log_unset(struct net *net, const struct nf_logger *logger) { int i; const struct nf_logger *log; mutex_lock(&nf_log_mutex); for (i = 0; i < NFPROTO_NUMPROTO; i++) { log = rcu_dereference_protected(net->nf.nf_loggers[i], lockdep_is_held(&nf_log_mutex)); if (log == logger) RCU_INIT_POINTER(net->nf.nf_loggers[i], NULL); } mutex_unlock(&nf_log_mutex); synchronize_rcu(); } EXPORT_SYMBOL(nf_log_unset); /* return EEXIST if the same logger is registered, 0 on success. */ int nf_log_register(u_int8_t pf, struct nf_logger *logger) { int i; if (pf >= ARRAY_SIZE(init_net.nf.nf_loggers)) return -EINVAL; for (i = 0; i < ARRAY_SIZE(logger->list); i++) INIT_LIST_HEAD(&logger->list[i]); mutex_lock(&nf_log_mutex); if (pf == NFPROTO_UNSPEC) { for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++) list_add_tail(&(logger->list[i]), &(nf_loggers_l[i])); } else { /* register at end of list to honor first register win */ list_add_tail(&logger->list[pf], &nf_loggers_l[pf]); } mutex_unlock(&nf_log_mutex); return 0; } EXPORT_SYMBOL(nf_log_register); void nf_log_unregister(struct nf_logger *logger) { int i; mutex_lock(&nf_log_mutex); for (i = 0; i < NFPROTO_NUMPROTO; i++) list_del(&logger->list[i]); mutex_unlock(&nf_log_mutex); } EXPORT_SYMBOL(nf_log_unregister); int nf_log_bind_pf(struct net *net, u_int8_t pf, const struct nf_logger *logger) { if (pf >= ARRAY_SIZE(net->nf.nf_loggers)) return -EINVAL; mutex_lock(&nf_log_mutex); if (__find_logger(pf, logger->name) == NULL) { mutex_unlock(&nf_log_mutex); return -ENOENT; } rcu_assign_pointer(net->nf.nf_loggers[pf], logger); mutex_unlock(&nf_log_mutex); return 0; } EXPORT_SYMBOL(nf_log_bind_pf); void nf_log_unbind_pf(struct net *net, u_int8_t pf) { if (pf >= ARRAY_SIZE(net->nf.nf_loggers)) return; mutex_lock(&nf_log_mutex); RCU_INIT_POINTER(net->nf.nf_loggers[pf], NULL); mutex_unlock(&nf_log_mutex); } EXPORT_SYMBOL(nf_log_unbind_pf); void nf_log_packet(struct net *net, u_int8_t pf, unsigned int hooknum, const struct sk_buff *skb, const struct net_device *in, const struct net_device *out, const struct nf_loginfo *loginfo, const char *fmt, ...) { va_list args; char prefix[NF_LOG_PREFIXLEN]; const struct nf_logger *logger; rcu_read_lock(); logger = rcu_dereference(net->nf.nf_loggers[pf]); if (logger) { va_start(args, fmt); vsnprintf(prefix, sizeof(prefix), fmt, args); va_end(args); logger->logfn(net, pf, hooknum, skb, in, out, loginfo, prefix); } rcu_read_unlock(); } EXPORT_SYMBOL(nf_log_packet); #ifdef CONFIG_PROC_FS static void *seq_start(struct seq_file *seq, loff_t *pos) { struct net *net = seq_file_net(seq); mutex_lock(&nf_log_mutex); if (*pos >= ARRAY_SIZE(net->nf.nf_loggers)) return NULL; return pos; } static void *seq_next(struct seq_file *s, void *v, loff_t *pos) { struct net *net = seq_file_net(s); (*pos)++; if (*pos >= ARRAY_SIZE(net->nf.nf_loggers)) return NULL; return pos; } static void seq_stop(struct seq_file *s, void *v) { mutex_unlock(&nf_log_mutex); } static int seq_show(struct seq_file *s, void *v) { loff_t *pos = v; const struct nf_logger *logger; struct nf_logger *t; int ret; struct net *net = seq_file_net(s); logger = rcu_dereference_protected(net->nf.nf_loggers[*pos], lockdep_is_held(&nf_log_mutex)); if (!logger) ret = seq_printf(s, "%2lld NONE (", *pos); else ret = seq_printf(s, "%2lld %s (", *pos, logger->name); if (ret < 0) return ret; list_for_each_entry(t, &nf_loggers_l[*pos], list[*pos]) { ret = seq_printf(s, "%s", t->name); if (ret < 0) return ret; if (&t->list[*pos] != nf_loggers_l[*pos].prev) { ret = seq_printf(s, ","); if (ret < 0) return ret; } } return seq_printf(s, ")\n"); } static const struct seq_operations nflog_seq_ops = { .start = seq_start, .next = seq_next, .stop = seq_stop, .show = seq_show, }; static int nflog_open(struct inode *inode, struct file *file) { return seq_open_net(inode, file, &nflog_seq_ops, sizeof(struct seq_net_private)); } static const struct file_operations nflog_file_ops = { .owner = THIS_MODULE, .open = nflog_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net, }; #endif /* PROC_FS */ #ifdef CONFIG_SYSCTL static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3]; static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1]; static int nf_log_proc_dostring(ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { const struct nf_logger *logger; char buf[NFLOGGER_NAME_LEN]; size_t size = *lenp; int r = 0; int tindex = (unsigned long)table->extra1; struct net *net = current->nsproxy->net_ns; if (write) { if (size > sizeof(buf)) size = sizeof(buf); if (copy_from_user(buf, buffer, size)) return -EFAULT; if (!strcmp(buf, "NONE")) { nf_log_unbind_pf(net, tindex); return 0; } mutex_lock(&nf_log_mutex); logger = __find_logger(tindex, buf); if (logger == NULL) { mutex_unlock(&nf_log_mutex); return -ENOENT; } rcu_assign_pointer(net->nf.nf_loggers[tindex], logger); mutex_unlock(&nf_log_mutex); } else { mutex_lock(&nf_log_mutex); logger = rcu_dereference_protected(net->nf.nf_loggers[tindex], lockdep_is_held(&nf_log_mutex)); if (!logger) table->data = "NONE"; else table->data = logger->name; r = proc_dostring(table, write, buffer, lenp, ppos); mutex_unlock(&nf_log_mutex); } return r; } static int netfilter_log_sysctl_init(struct net *net) { int i; struct ctl_table *table; table = nf_log_sysctl_table; if (!net_eq(net, &init_net)) { table = kmemdup(nf_log_sysctl_table, sizeof(nf_log_sysctl_table), GFP_KERNEL); if (!table) goto err_alloc; } else { for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++) { snprintf(nf_log_sysctl_fnames[i], 3, "%d", i); nf_log_sysctl_table[i].procname = nf_log_sysctl_fnames[i]; nf_log_sysctl_table[i].data = NULL; nf_log_sysctl_table[i].maxlen = NFLOGGER_NAME_LEN * sizeof(char); nf_log_sysctl_table[i].mode = 0644; nf_log_sysctl_table[i].proc_handler = nf_log_proc_dostring; nf_log_sysctl_table[i].extra1 = (void *)(unsigned long) i; } } net->nf.nf_log_dir_header = register_net_sysctl(net, "net/netfilter/nf_log", table); if (!net->nf.nf_log_dir_header) goto err_reg; return 0; err_reg: if (!net_eq(net, &init_net)) kfree(table); err_alloc: return -ENOMEM; } static void netfilter_log_sysctl_exit(struct net *net) { struct ctl_table *table; table = net->nf.nf_log_dir_header->ctl_table_arg; unregister_net_sysctl_table(net->nf.nf_log_dir_header); if (!net_eq(net, &init_net)) kfree(table); } #else static int netfilter_log_sysctl_init(struct net *net) { return 0; } static void netfilter_log_sysctl_exit(struct net *net) { } #endif /* CONFIG_SYSCTL */ static int __net_init nf_log_net_init(struct net *net) { int ret = -ENOMEM; #ifdef CONFIG_PROC_FS if (!proc_create("nf_log", S_IRUGO, net->nf.proc_netfilter, &nflog_file_ops)) return ret; #endif ret = netfilter_log_sysctl_init(net); if (ret < 0) goto out_sysctl; return 0; out_sysctl: #ifdef CONFIG_PROC_FS /* For init_net: errors will trigger panic, don't unroll on error. */ if (!net_eq(net, &init_net)) remove_proc_entry("nf_log", net->nf.proc_netfilter); #endif return ret; } static void __net_exit nf_log_net_exit(struct net *net) { netfilter_log_sysctl_exit(net); #ifdef CONFIG_PROC_FS remove_proc_entry("nf_log", net->nf.proc_netfilter); #endif } static struct pernet_operations nf_log_net_ops = { .init = nf_log_net_init, .exit = nf_log_net_exit, }; int __init netfilter_log_init(void) { int i, ret; ret = register_pernet_subsys(&nf_log_net_ops); if (ret < 0) return ret; for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++) INIT_LIST_HEAD(&(nf_loggers_l[i])); return 0; }
gpl-2.0
kogone/android_kernel_oneplus_msm8974
arch/arm/mach-msm/board-8930-camera.c
1953
16952
/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <asm/mach-types.h> #include <linux/gpio.h> #include <mach/camera.h> #include <mach/msm_bus_board.h> #include <mach/gpiomux.h> #include "devices.h" #include "board-8930.h" #ifdef CONFIG_MSM_CAMERA #if (defined(CONFIG_GPIO_SX150X) || defined(CONFIG_GPIO_SX150X_MODULE)) && \ defined(CONFIG_I2C) static struct i2c_board_info cam_expander_i2c_info[] = { { I2C_BOARD_INFO("sx1508q", 0x22), .platform_data = &msm8930_sx150x_data[SX150X_CAM] }, }; static struct msm_cam_expander_info cam_expander_info[] = { { cam_expander_i2c_info, MSM_8930_GSBI4_QUP_I2C_BUS_ID, }, }; #endif static struct gpiomux_setting cam_settings[] = { { .func = GPIOMUX_FUNC_GPIO, /*suspend*/ .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, }, { .func = GPIOMUX_FUNC_1, /*active 1*/ .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }, { .func = GPIOMUX_FUNC_GPIO, /*active 2*/ .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }, { .func = GPIOMUX_FUNC_1, /*active 3*/ .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_NONE, }, { .func = GPIOMUX_FUNC_5, /*active 4*/ .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_UP, }, { .func = GPIOMUX_FUNC_6, /*active 5*/ .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_UP, }, { .func = GPIOMUX_FUNC_2, /*active 6*/ .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_UP, }, { .func = GPIOMUX_FUNC_3, /*active 7*/ .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_UP, }, { .func = GPIOMUX_FUNC_GPIO, /*i2c suspend*/ .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_KEEPER, }, { .func = GPIOMUX_FUNC_2, /*active 9*/ .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }, }; static struct msm_gpiomux_config msm8930_cam_common_configs[] = { { .gpio = 2, .settings = { [GPIOMUX_ACTIVE] = &cam_settings[2], [GPIOMUX_SUSPENDED] = &cam_settings[0], }, }, { .gpio = 3, .settings = { [GPIOMUX_ACTIVE] = &cam_settings[1], [GPIOMUX_SUSPENDED] = &cam_settings[0], }, }, { .gpio = 4, .settings = { [GPIOMUX_ACTIVE] = &cam_settings[9], [GPIOMUX_SUSPENDED] = &cam_settings[0], }, }, { .gpio = 5, .settings = { [GPIOMUX_ACTIVE] = &cam_settings[1], [GPIOMUX_SUSPENDED] = &cam_settings[0], }, }, { .gpio = 76, .settings = { [GPIOMUX_ACTIVE] = &cam_settings[2], [GPIOMUX_SUSPENDED] = &cam_settings[0], }, }, { .gpio = 107, .settings = { [GPIOMUX_ACTIVE] = &cam_settings[2], [GPIOMUX_SUSPENDED] = &cam_settings[0], }, }, { .gpio = 54, .settings = { [GPIOMUX_ACTIVE] = &cam_settings[2], [GPIOMUX_SUSPENDED] = &cam_settings[0], }, }, }; static struct msm_gpiomux_config msm8930_cam_2d_configs[] = { { .gpio = 18, .settings = { [GPIOMUX_ACTIVE] = &cam_settings[3], [GPIOMUX_SUSPENDED] = &cam_settings[8], }, }, { .gpio = 19, .settings = { [GPIOMUX_ACTIVE] = &cam_settings[3], [GPIOMUX_SUSPENDED] = &cam_settings[8], }, }, { .gpio = 20, .settings = { [GPIOMUX_ACTIVE] = &cam_settings[3], [GPIOMUX_SUSPENDED] = &cam_settings[8], }, }, { .gpio = 21, .settings = { [GPIOMUX_ACTIVE] = &cam_settings[3], [GPIOMUX_SUSPENDED] = &cam_settings[8], }, }, }; #define VFE_CAMIF_TIMER1_GPIO 2 #define VFE_CAMIF_TIMER2_GPIO 3 #define VFE_CAMIF_TIMER3_GPIO_INT 4 static struct gpio flash_init_gpio[] = { {VFE_CAMIF_TIMER1_GPIO, GPIOF_OUT_INIT_LOW, "CAMIF_TIMER1"}, {VFE_CAMIF_TIMER2_GPIO, GPIOF_OUT_INIT_LOW, "CAMIF_TIMER2"}, }; static struct msm_gpio_set_tbl flash_set_gpio[] = { {VFE_CAMIF_TIMER1_GPIO, GPIOF_OUT_INIT_HIGH, 2000}, {VFE_CAMIF_TIMER2_GPIO, GPIOF_OUT_INIT_HIGH, 2000}, }; static struct msm_camera_sensor_strobe_flash_data strobe_flash_xenon = { .flash_trigger = VFE_CAMIF_TIMER2_GPIO, .flash_charge = VFE_CAMIF_TIMER1_GPIO, .flash_charge_done = VFE_CAMIF_TIMER3_GPIO_INT, .flash_recharge_duration = 50000, .irq = MSM_GPIO_TO_INT(VFE_CAMIF_TIMER3_GPIO_INT), }; static struct msm_camera_sensor_flash_src msm_flash_src = { .flash_sr_type = MSM_CAMERA_FLASH_SRC_EXT, .init_gpio_tbl = flash_init_gpio, .init_gpio_tbl_size = ARRAY_SIZE(flash_init_gpio), .set_gpio_tbl = flash_set_gpio, .set_gpio_tbl_size = ARRAY_SIZE(flash_set_gpio), ._fsrc.ext_driver_src.led_en = VFE_CAMIF_TIMER1_GPIO, ._fsrc.ext_driver_src.led_flash_en = VFE_CAMIF_TIMER2_GPIO, ._fsrc.ext_driver_src.flash_id = MAM_CAMERA_EXT_LED_FLASH_TPS61310, }; static struct msm_bus_vectors cam_init_vectors[] = { { .src = MSM_BUS_MASTER_VFE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 0, .ib = 0, }, { .src = MSM_BUS_MASTER_VPE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 0, .ib = 0, }, { .src = MSM_BUS_MASTER_JPEG_ENC, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 0, .ib = 0, }, }; static struct msm_bus_vectors cam_preview_vectors[] = { { .src = MSM_BUS_MASTER_VFE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 27648000, .ib = 2656000000UL, }, { .src = MSM_BUS_MASTER_VPE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 0, .ib = 0, }, { .src = MSM_BUS_MASTER_JPEG_ENC, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 0, .ib = 0, }, }; static struct msm_bus_vectors cam_video_vectors[] = { { .src = MSM_BUS_MASTER_VFE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 600000000, .ib = 2656000000UL, }, { .src = MSM_BUS_MASTER_VPE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 206807040, .ib = 488816640, }, { .src = MSM_BUS_MASTER_JPEG_ENC, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 0, .ib = 0, }, }; static struct msm_bus_vectors cam_snapshot_vectors[] = { { .src = MSM_BUS_MASTER_VFE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 600000000, .ib = 2656000000UL, }, { .src = MSM_BUS_MASTER_VPE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 0, .ib = 0, }, { .src = MSM_BUS_MASTER_JPEG_ENC, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 540000000, .ib = 1350000000, }, }; static struct msm_bus_vectors cam_zsl_vectors[] = { { .src = MSM_BUS_MASTER_VFE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 600000000, .ib = 2656000000UL, }, { .src = MSM_BUS_MASTER_VPE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 0, .ib = 0, }, { .src = MSM_BUS_MASTER_JPEG_ENC, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 540000000, .ib = 1350000000, }, }; static struct msm_bus_vectors cam_video_ls_vectors[] = { { .src = MSM_BUS_MASTER_VFE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 600000000, .ib = 4264000000UL, }, { .src = MSM_BUS_MASTER_VPE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 206807040, .ib = 488816640, }, { .src = MSM_BUS_MASTER_JPEG_ENC, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 540000000, .ib = 1350000000, }, }; static struct msm_bus_vectors cam_dual_vectors[] = { { .src = MSM_BUS_MASTER_VFE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 302071680, .ib = 2656000000UL, }, { .src = MSM_BUS_MASTER_VPE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 206807040, .ib = 488816640, }, { .src = MSM_BUS_MASTER_JPEG_ENC, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 540000000, .ib = 1350000000, }, }; static struct msm_bus_vectors cam_adv_video_vectors[] = { { .src = MSM_BUS_MASTER_VFE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 274406400, .ib = 2656000000UL, }, { .src = MSM_BUS_MASTER_VPE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 206807040, .ib = 488816640, }, { .src = MSM_BUS_MASTER_JPEG_ENC, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 0, .ib = 0, }, }; static struct msm_bus_paths cam_bus_client_config[] = { { ARRAY_SIZE(cam_init_vectors), cam_init_vectors, }, { ARRAY_SIZE(cam_preview_vectors), cam_preview_vectors, }, { ARRAY_SIZE(cam_video_vectors), cam_video_vectors, }, { ARRAY_SIZE(cam_snapshot_vectors), cam_snapshot_vectors, }, { ARRAY_SIZE(cam_zsl_vectors), cam_zsl_vectors, }, { ARRAY_SIZE(cam_video_ls_vectors), cam_video_ls_vectors, }, { ARRAY_SIZE(cam_dual_vectors), cam_dual_vectors, }, { ARRAY_SIZE(cam_adv_video_vectors), cam_adv_video_vectors, }, }; static struct msm_bus_scale_pdata cam_bus_client_pdata = { cam_bus_client_config, ARRAY_SIZE(cam_bus_client_config), .name = "msm_camera", }; static struct msm_camera_device_platform_data msm_camera_csi_device_data[] = { { .csiphy_core = 0, .csid_core = 0, .is_vpe = 1, .cam_bus_scale_table = &cam_bus_client_pdata, }, { .csiphy_core = 1, .csid_core = 1, .is_vpe = 1, .cam_bus_scale_table = &cam_bus_client_pdata, }, }; static struct camera_vreg_t msm_8930_cam_vreg[] = { {"cam_vdig", REG_LDO, 1200000, 1200000, 105000}, {"cam_vio", REG_VS, 0, 0, 0}, {"cam_vana", REG_LDO, 2800000, 2850000, 85600}, {"cam_vaf", REG_LDO, 2800000, 2850000, 300000}, }; static struct gpio msm8930_common_cam_gpio[] = { {20, GPIOF_DIR_IN, "CAMIF_I2C_DATA"}, {21, GPIOF_DIR_IN, "CAMIF_I2C_CLK"}, }; static struct gpio msm8930_front_cam_gpio[] = { {4, GPIOF_DIR_IN, "CAMIF_MCLK"}, {76, GPIOF_DIR_OUT, "CAM_RESET"}, }; static struct gpio msm8930_back_cam_gpio[] = { {5, GPIOF_DIR_IN, "CAMIF_MCLK"}, {107, GPIOF_DIR_OUT, "CAM_RESET"}, {54, GPIOF_DIR_OUT, "CAM_STBY_N"}, }; static struct msm_gpio_set_tbl msm8930_front_cam_gpio_set_tbl[] = { {76, GPIOF_OUT_INIT_LOW, 1000}, {76, GPIOF_OUT_INIT_HIGH, 4000}, }; static struct msm_gpio_set_tbl msm8930_back_cam_gpio_set_tbl[] = { {54, GPIOF_OUT_INIT_LOW, 1000}, {54, GPIOF_OUT_INIT_HIGH, 4000}, {107, GPIOF_OUT_INIT_LOW, 1000}, {107, GPIOF_OUT_INIT_HIGH, 4000}, }; static struct msm_camera_gpio_conf msm_8930_front_cam_gpio_conf = { .cam_gpiomux_conf_tbl = msm8930_cam_2d_configs, .cam_gpiomux_conf_tbl_size = ARRAY_SIZE(msm8930_cam_2d_configs), .cam_gpio_common_tbl = msm8930_common_cam_gpio, .cam_gpio_common_tbl_size = ARRAY_SIZE(msm8930_common_cam_gpio), .cam_gpio_req_tbl = msm8930_front_cam_gpio, .cam_gpio_req_tbl_size = ARRAY_SIZE(msm8930_front_cam_gpio), .cam_gpio_set_tbl = msm8930_front_cam_gpio_set_tbl, .cam_gpio_set_tbl_size = ARRAY_SIZE(msm8930_front_cam_gpio_set_tbl), }; static struct msm_camera_gpio_conf msm_8930_back_cam_gpio_conf = { .cam_gpiomux_conf_tbl = msm8930_cam_2d_configs, .cam_gpiomux_conf_tbl_size = ARRAY_SIZE(msm8930_cam_2d_configs), .cam_gpio_common_tbl = msm8930_common_cam_gpio, .cam_gpio_common_tbl_size = ARRAY_SIZE(msm8930_common_cam_gpio), .cam_gpio_req_tbl = msm8930_back_cam_gpio, .cam_gpio_req_tbl_size = ARRAY_SIZE(msm8930_back_cam_gpio), .cam_gpio_set_tbl = msm8930_back_cam_gpio_set_tbl, .cam_gpio_set_tbl_size = ARRAY_SIZE(msm8930_back_cam_gpio_set_tbl), }; static struct i2c_board_info msm_act_main_cam_i2c_info = { I2C_BOARD_INFO("msm_actuator", 0x11), }; static struct msm_actuator_info msm_act_main_cam_0_info = { .board_info = &msm_act_main_cam_i2c_info, .cam_name = MSM_ACTUATOR_MAIN_CAM_0, .bus_id = MSM_8930_GSBI4_QUP_I2C_BUS_ID, .vcm_pwd = 0, .vcm_enable = 0, }; static struct msm_camera_sensor_flash_data flash_imx074 = { .flash_type = MSM_CAMERA_FLASH_LED, .flash_src = &msm_flash_src }; static struct msm_camera_csi_lane_params imx074_csi_lane_params = { .csi_lane_assign = 0xE4, .csi_lane_mask = 0xF, }; static struct msm_camera_sensor_platform_info sensor_board_info_imx074 = { .mount_angle = 90, .cam_vreg = msm_8930_cam_vreg, .num_vreg = ARRAY_SIZE(msm_8930_cam_vreg), .gpio_conf = &msm_8930_back_cam_gpio_conf, .csi_lane_params = &imx074_csi_lane_params, }; static struct msm_camera_sensor_info msm_camera_sensor_imx074_data = { .sensor_name = "imx074", .pdata = &msm_camera_csi_device_data[0], .flash_data = &flash_imx074, .strobe_flash_data = &strobe_flash_xenon, .sensor_platform_info = &sensor_board_info_imx074, .csi_if = 1, .camera_type = BACK_CAMERA_2D, .sensor_type = BAYER_SENSOR, .actuator_info = &msm_act_main_cam_0_info, }; static struct msm_camera_sensor_flash_data flash_mt9m114 = { .flash_type = MSM_CAMERA_FLASH_NONE }; static struct msm_camera_csi_lane_params mt9m114_csi_lane_params = { .csi_lane_assign = 0xE4, .csi_lane_mask = 0x1, }; static struct msm_camera_sensor_platform_info sensor_board_info_mt9m114 = { .mount_angle = 90, .cam_vreg = msm_8930_cam_vreg, .num_vreg = ARRAY_SIZE(msm_8930_cam_vreg), .gpio_conf = &msm_8930_front_cam_gpio_conf, .csi_lane_params = &mt9m114_csi_lane_params, }; static struct msm_camera_sensor_info msm_camera_sensor_mt9m114_data = { .sensor_name = "mt9m114", .pdata = &msm_camera_csi_device_data[1], .flash_data = &flash_mt9m114, .sensor_platform_info = &sensor_board_info_mt9m114, .csi_if = 1, .camera_type = FRONT_CAMERA_2D, .sensor_type = YUV_SENSOR, }; static struct msm_camera_sensor_flash_data flash_ov2720 = { .flash_type = MSM_CAMERA_FLASH_NONE, }; static struct msm_camera_csi_lane_params ov2720_csi_lane_params = { .csi_lane_assign = 0xE4, .csi_lane_mask = 0x3, }; static struct msm_camera_sensor_platform_info sensor_board_info_ov2720 = { .mount_angle = 0, .cam_vreg = msm_8930_cam_vreg, .num_vreg = ARRAY_SIZE(msm_8930_cam_vreg), .gpio_conf = &msm_8930_front_cam_gpio_conf, .csi_lane_params = &ov2720_csi_lane_params, }; static struct msm_camera_sensor_info msm_camera_sensor_ov2720_data = { .sensor_name = "ov2720", .pdata = &msm_camera_csi_device_data[1], .flash_data = &flash_ov2720, .sensor_platform_info = &sensor_board_info_ov2720, .csi_if = 1, .camera_type = FRONT_CAMERA_2D, .sensor_type = BAYER_SENSOR, }; static struct i2c_board_info tps61310_flash_i2c_info = { I2C_BOARD_INFO("tps61310", 0x66), }; static struct msm_camera_sensor_flash_data flash_s5k3l1yx = { .flash_type = MSM_CAMERA_FLASH_LED, .flash_src = &msm_flash_src, .board_info = &tps61310_flash_i2c_info, .bus_id = MSM_8930_GSBI4_QUP_I2C_BUS_ID, }; static struct msm_camera_csi_lane_params s5k3l1yx_csi_lane_params = { .csi_lane_assign = 0xE4, .csi_lane_mask = 0xF, }; static struct msm_camera_sensor_platform_info sensor_board_info_s5k3l1yx = { .mount_angle = 90, .cam_vreg = msm_8930_cam_vreg, .num_vreg = ARRAY_SIZE(msm_8930_cam_vreg), .gpio_conf = &msm_8930_back_cam_gpio_conf, .csi_lane_params = &s5k3l1yx_csi_lane_params, }; static struct msm_actuator_info msm_act_main_cam_2_info = { .board_info = &msm_act_main_cam_i2c_info, .cam_name = MSM_ACTUATOR_MAIN_CAM_2, .bus_id = MSM_8930_GSBI4_QUP_I2C_BUS_ID, .vcm_pwd = 0, .vcm_enable = 0, }; static struct msm_camera_sensor_info msm_camera_sensor_s5k3l1yx_data = { .sensor_name = "s5k3l1yx", .pdata = &msm_camera_csi_device_data[0], .flash_data = &flash_s5k3l1yx, .sensor_platform_info = &sensor_board_info_s5k3l1yx, .csi_if = 1, .camera_type = BACK_CAMERA_2D, .sensor_type = BAYER_SENSOR, .actuator_info = &msm_act_main_cam_2_info, }; static struct platform_device msm_camera_server = { .name = "msm_cam_server", .id = 0, }; void __init msm8930_init_cam(void) { msm_gpiomux_install(msm8930_cam_common_configs, ARRAY_SIZE(msm8930_cam_common_configs)); if (machine_is_msm8930_cdp()) { struct msm_camera_sensor_info *s_info; s_info = &msm_camera_sensor_s5k3l1yx_data; s_info->sensor_platform_info->mount_angle = 0; msm_flash_src._fsrc.ext_driver_src.led_en = GPIO_CAM_GP_LED_EN1; msm_flash_src._fsrc.ext_driver_src.led_flash_en = GPIO_CAM_GP_LED_EN2; #if defined(CONFIG_I2C) && (defined(CONFIG_GPIO_SX150X) || \ defined(CONFIG_GPIO_SX150X_MODULE)) msm_flash_src._fsrc.ext_driver_src.expander_info = cam_expander_info; #endif } platform_device_register(&msm_camera_server); platform_device_register(&msm8960_device_csiphy0); platform_device_register(&msm8960_device_csiphy1); platform_device_register(&msm8960_device_csid0); platform_device_register(&msm8960_device_csid1); platform_device_register(&msm8960_device_ispif); platform_device_register(&msm8960_device_vfe); platform_device_register(&msm8960_device_vpe); } #ifdef CONFIG_I2C struct i2c_board_info msm8930_camera_i2c_boardinfo[] = { { I2C_BOARD_INFO("imx074", 0x1A), .platform_data = &msm_camera_sensor_imx074_data, }, { I2C_BOARD_INFO("ov2720", 0x6C), .platform_data = &msm_camera_sensor_ov2720_data, }, { I2C_BOARD_INFO("mt9m114", 0x48), .platform_data = &msm_camera_sensor_mt9m114_data, }, { I2C_BOARD_INFO("s5k3l1yx", 0x20), .platform_data = &msm_camera_sensor_s5k3l1yx_data, }, }; struct msm_camera_board_info msm8930_camera_board_info = { .board_info = msm8930_camera_i2c_boardinfo, .num_i2c_board_info = ARRAY_SIZE(msm8930_camera_i2c_boardinfo), }; #endif #endif
gpl-2.0
garwedgess/android_kernel_lge_g4
arch/m68k/mvme147/config.c
2209
4823
/* * arch/m68k/mvme147/config.c * * Copyright (C) 1996 Dave Frascone [chaos@mindspring.com] * Cloned from Richard Hirst [richard@sleepie.demon.co.uk] * * Based on: * * Copyright (C) 1993 Hamish Macdonald * * This file is subject to the terms and conditions of the GNU General Public * License. See the file README.legal in the main directory of this archive * for more details. */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/tty.h> #include <linux/console.h> #include <linux/linkage.h> #include <linux/init.h> #include <linux/major.h> #include <linux/genhd.h> #include <linux/rtc.h> #include <linux/interrupt.h> #include <asm/bootinfo.h> #include <asm/pgtable.h> #include <asm/setup.h> #include <asm/irq.h> #include <asm/traps.h> #include <asm/rtc.h> #include <asm/machdep.h> #include <asm/mvme147hw.h> static void mvme147_get_model(char *model); extern void mvme147_sched_init(irq_handler_t handler); extern u32 mvme147_gettimeoffset(void); extern int mvme147_hwclk (int, struct rtc_time *); extern int mvme147_set_clock_mmss (unsigned long); extern void mvme147_reset (void); static int bcd2int (unsigned char b); /* Save tick handler routine pointer, will point to xtime_update() in * kernel/time/timekeeping.c, called via mvme147_process_int() */ irq_handler_t tick_handler; int mvme147_parse_bootinfo(const struct bi_record *bi) { if (bi->tag == BI_VME_TYPE || bi->tag == BI_VME_BRDINFO) return 0; else return 1; } void mvme147_reset(void) { printk ("\r\n\nCalled mvme147_reset\r\n"); m147_pcc->watchdog = 0x0a; /* Clear timer */ m147_pcc->watchdog = 0xa5; /* Enable watchdog - 100ms to reset */ while (1) ; } static void mvme147_get_model(char *model) { sprintf(model, "Motorola MVME147"); } /* * This function is called during kernel startup to initialize * the mvme147 IRQ handling routines. */ void __init mvme147_init_IRQ(void) { m68k_setup_user_interrupt(VEC_USER, 192); } void __init config_mvme147(void) { mach_max_dma_address = 0x01000000; mach_sched_init = mvme147_sched_init; mach_init_IRQ = mvme147_init_IRQ; arch_gettimeoffset = mvme147_gettimeoffset; mach_hwclk = mvme147_hwclk; mach_set_clock_mmss = mvme147_set_clock_mmss; mach_reset = mvme147_reset; mach_get_model = mvme147_get_model; /* Board type is only set by newer versions of vmelilo/tftplilo */ if (!vme_brdtype) vme_brdtype = VME_TYPE_MVME147; } /* Using pcc tick timer 1 */ static irqreturn_t mvme147_timer_int (int irq, void *dev_id) { m147_pcc->t1_int_cntrl = PCC_TIMER_INT_CLR; m147_pcc->t1_int_cntrl = PCC_INT_ENAB|PCC_LEVEL_TIMER1; return tick_handler(irq, dev_id); } void mvme147_sched_init (irq_handler_t timer_routine) { tick_handler = timer_routine; if (request_irq(PCC_IRQ_TIMER1, mvme147_timer_int, 0, "timer 1", NULL)) pr_err("Couldn't register timer interrupt\n"); /* Init the clock with a value */ /* our clock goes off every 6.25us */ m147_pcc->t1_preload = PCC_TIMER_PRELOAD; m147_pcc->t1_cntrl = 0x0; /* clear timer */ m147_pcc->t1_cntrl = 0x3; /* start timer */ m147_pcc->t1_int_cntrl = PCC_TIMER_INT_CLR; /* clear pending ints */ m147_pcc->t1_int_cntrl = PCC_INT_ENAB|PCC_LEVEL_TIMER1; } /* This is always executed with interrupts disabled. */ /* XXX There are race hazards in this code XXX */ u32 mvme147_gettimeoffset(void) { volatile unsigned short *cp = (volatile unsigned short *)0xfffe1012; unsigned short n; n = *cp; while (n != *cp) n = *cp; n -= PCC_TIMER_PRELOAD; return ((unsigned long)n * 25 / 4) * 1000; } static int bcd2int (unsigned char b) { return ((b>>4)*10 + (b&15)); } int mvme147_hwclk(int op, struct rtc_time *t) { #warning check me! if (!op) { m147_rtc->ctrl = RTC_READ; t->tm_year = bcd2int (m147_rtc->bcd_year); t->tm_mon = bcd2int (m147_rtc->bcd_mth); t->tm_mday = bcd2int (m147_rtc->bcd_dom); t->tm_hour = bcd2int (m147_rtc->bcd_hr); t->tm_min = bcd2int (m147_rtc->bcd_min); t->tm_sec = bcd2int (m147_rtc->bcd_sec); m147_rtc->ctrl = 0; } return 0; } int mvme147_set_clock_mmss (unsigned long nowtime) { return 0; } /*------------------- Serial console stuff ------------------------*/ static void scc_delay (void) { int n; volatile int trash; for (n = 0; n < 20; n++) trash = n; } static void scc_write (char ch) { volatile char *p = (volatile char *)M147_SCC_A_ADDR; do { scc_delay(); } while (!(*p & 4)); scc_delay(); *p = 8; scc_delay(); *p = ch; } void m147_scc_write (struct console *co, const char *str, unsigned count) { unsigned long flags; local_irq_save(flags); while (count--) { if (*str == '\n') scc_write ('\r'); scc_write (*str++); } local_irq_restore(flags); } void mvme147_init_console_port (struct console *co, int cflag) { co->write = m147_scc_write; }
gpl-2.0
quincykt/android_kernel_samsung_SHV-E170K
fs/ocfs2/dlmglue.c
2209
115760
/* -*- mode: c; c-basic-offset: 8; -*- * vim: noexpandtab sw=8 ts=8 sts=0: * * dlmglue.c * * Code which implements an OCFS2 specific interface to our DLM. * * Copyright (C) 2003, 2004 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/types.h> #include <linux/slab.h> #include <linux/highmem.h> #include <linux/mm.h> #include <linux/kthread.h> #include <linux/pagemap.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/time.h> #include <linux/quotaops.h> #define MLOG_MASK_PREFIX ML_DLM_GLUE #include <cluster/masklog.h> #include "ocfs2.h" #include "ocfs2_lockingver.h" #include "alloc.h" #include "dcache.h" #include "dlmglue.h" #include "extent_map.h" #include "file.h" #include "heartbeat.h" #include "inode.h" #include "journal.h" #include "stackglue.h" #include "slot_map.h" #include "super.h" #include "uptodate.h" #include "quota.h" #include "refcounttree.h" #include "buffer_head_io.h" struct ocfs2_mask_waiter { struct list_head mw_item; int mw_status; struct completion mw_complete; unsigned long mw_mask; unsigned long mw_goal; #ifdef CONFIG_OCFS2_FS_STATS ktime_t mw_lock_start; #endif }; static struct ocfs2_super *ocfs2_get_dentry_osb(struct ocfs2_lock_res *lockres); static struct ocfs2_super *ocfs2_get_inode_osb(struct ocfs2_lock_res *lockres); static struct ocfs2_super *ocfs2_get_file_osb(struct ocfs2_lock_res *lockres); static struct ocfs2_super *ocfs2_get_qinfo_osb(struct ocfs2_lock_res *lockres); /* * Return value from ->downconvert_worker functions. * * These control the precise actions of ocfs2_unblock_lock() * and ocfs2_process_blocked_lock() * */ enum ocfs2_unblock_action { UNBLOCK_CONTINUE = 0, /* Continue downconvert */ UNBLOCK_CONTINUE_POST = 1, /* Continue downconvert, fire * ->post_unlock callback */ UNBLOCK_STOP_POST = 2, /* Do not downconvert, fire * ->post_unlock() callback. */ }; struct ocfs2_unblock_ctl { int requeue; enum ocfs2_unblock_action unblock_action; }; /* Lockdep class keys */ struct lock_class_key lockdep_keys[OCFS2_NUM_LOCK_TYPES]; static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres, int new_level); static void ocfs2_set_meta_lvb(struct ocfs2_lock_res *lockres); static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres, int blocking); static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res *lockres, int blocking); static void ocfs2_dentry_post_unlock(struct ocfs2_super *osb, struct ocfs2_lock_res *lockres); static void ocfs2_set_qinfo_lvb(struct ocfs2_lock_res *lockres); static int ocfs2_check_refcount_downconvert(struct ocfs2_lock_res *lockres, int new_level); static int ocfs2_refcount_convert_worker(struct ocfs2_lock_res *lockres, int blocking); #define mlog_meta_lvb(__level, __lockres) ocfs2_dump_meta_lvb_info(__level, __PRETTY_FUNCTION__, __LINE__, __lockres) /* This aids in debugging situations where a bad LVB might be involved. */ static void ocfs2_dump_meta_lvb_info(u64 level, const char *function, unsigned int line, struct ocfs2_lock_res *lockres) { struct ocfs2_meta_lvb *lvb = ocfs2_dlm_lvb(&lockres->l_lksb); mlog(level, "LVB information for %s (called from %s:%u):\n", lockres->l_name, function, line); mlog(level, "version: %u, clusters: %u, generation: 0x%x\n", lvb->lvb_version, be32_to_cpu(lvb->lvb_iclusters), be32_to_cpu(lvb->lvb_igeneration)); mlog(level, "size: %llu, uid %u, gid %u, mode 0x%x\n", (unsigned long long)be64_to_cpu(lvb->lvb_isize), be32_to_cpu(lvb->lvb_iuid), be32_to_cpu(lvb->lvb_igid), be16_to_cpu(lvb->lvb_imode)); mlog(level, "nlink %u, atime_packed 0x%llx, ctime_packed 0x%llx, " "mtime_packed 0x%llx iattr 0x%x\n", be16_to_cpu(lvb->lvb_inlink), (long long)be64_to_cpu(lvb->lvb_iatime_packed), (long long)be64_to_cpu(lvb->lvb_ictime_packed), (long long)be64_to_cpu(lvb->lvb_imtime_packed), be32_to_cpu(lvb->lvb_iattr)); } /* * OCFS2 Lock Resource Operations * * These fine tune the behavior of the generic dlmglue locking infrastructure. * * The most basic of lock types can point ->l_priv to their respective * struct ocfs2_super and allow the default actions to manage things. * * Right now, each lock type also needs to implement an init function, * and trivial lock/unlock wrappers. ocfs2_simple_drop_lockres() * should be called when the lock is no longer needed (i.e., object * destruction time). */ struct ocfs2_lock_res_ops { /* * Translate an ocfs2_lock_res * into an ocfs2_super *. Define * this callback if ->l_priv is not an ocfs2_super pointer */ struct ocfs2_super * (*get_osb)(struct ocfs2_lock_res *); /* * Optionally called in the downconvert thread after a * successful downconvert. The lockres will not be referenced * after this callback is called, so it is safe to free * memory, etc. * * The exact semantics of when this is called are controlled * by ->downconvert_worker() */ void (*post_unlock)(struct ocfs2_super *, struct ocfs2_lock_res *); /* * Allow a lock type to add checks to determine whether it is * safe to downconvert a lock. Return 0 to re-queue the * downconvert at a later time, nonzero to continue. * * For most locks, the default checks that there are no * incompatible holders are sufficient. * * Called with the lockres spinlock held. */ int (*check_downconvert)(struct ocfs2_lock_res *, int); /* * Allows a lock type to populate the lock value block. This * is called on downconvert, and when we drop a lock. * * Locks that want to use this should set LOCK_TYPE_USES_LVB * in the flags field. * * Called with the lockres spinlock held. */ void (*set_lvb)(struct ocfs2_lock_res *); /* * Called from the downconvert thread when it is determined * that a lock will be downconverted. This is called without * any locks held so the function can do work that might * schedule (syncing out data, etc). * * This should return any one of the ocfs2_unblock_action * values, depending on what it wants the thread to do. */ int (*downconvert_worker)(struct ocfs2_lock_res *, int); /* * LOCK_TYPE_* flags which describe the specific requirements * of a lock type. Descriptions of each individual flag follow. */ int flags; }; /* * Some locks want to "refresh" potentially stale data when a * meaningful (PRMODE or EXMODE) lock level is first obtained. If this * flag is set, the OCFS2_LOCK_NEEDS_REFRESH flag will be set on the * individual lockres l_flags member from the ast function. It is * expected that the locking wrapper will clear the * OCFS2_LOCK_NEEDS_REFRESH flag when done. */ #define LOCK_TYPE_REQUIRES_REFRESH 0x1 /* * Indicate that a lock type makes use of the lock value block. The * ->set_lvb lock type callback must be defined. */ #define LOCK_TYPE_USES_LVB 0x2 static struct ocfs2_lock_res_ops ocfs2_inode_rw_lops = { .get_osb = ocfs2_get_inode_osb, .flags = 0, }; static struct ocfs2_lock_res_ops ocfs2_inode_inode_lops = { .get_osb = ocfs2_get_inode_osb, .check_downconvert = ocfs2_check_meta_downconvert, .set_lvb = ocfs2_set_meta_lvb, .downconvert_worker = ocfs2_data_convert_worker, .flags = LOCK_TYPE_REQUIRES_REFRESH|LOCK_TYPE_USES_LVB, }; static struct ocfs2_lock_res_ops ocfs2_super_lops = { .flags = LOCK_TYPE_REQUIRES_REFRESH, }; static struct ocfs2_lock_res_ops ocfs2_rename_lops = { .flags = 0, }; static struct ocfs2_lock_res_ops ocfs2_nfs_sync_lops = { .flags = 0, }; static struct ocfs2_lock_res_ops ocfs2_orphan_scan_lops = { .flags = LOCK_TYPE_REQUIRES_REFRESH|LOCK_TYPE_USES_LVB, }; static struct ocfs2_lock_res_ops ocfs2_dentry_lops = { .get_osb = ocfs2_get_dentry_osb, .post_unlock = ocfs2_dentry_post_unlock, .downconvert_worker = ocfs2_dentry_convert_worker, .flags = 0, }; static struct ocfs2_lock_res_ops ocfs2_inode_open_lops = { .get_osb = ocfs2_get_inode_osb, .flags = 0, }; static struct ocfs2_lock_res_ops ocfs2_flock_lops = { .get_osb = ocfs2_get_file_osb, .flags = 0, }; static struct ocfs2_lock_res_ops ocfs2_qinfo_lops = { .set_lvb = ocfs2_set_qinfo_lvb, .get_osb = ocfs2_get_qinfo_osb, .flags = LOCK_TYPE_REQUIRES_REFRESH | LOCK_TYPE_USES_LVB, }; static struct ocfs2_lock_res_ops ocfs2_refcount_block_lops = { .check_downconvert = ocfs2_check_refcount_downconvert, .downconvert_worker = ocfs2_refcount_convert_worker, .flags = 0, }; static inline int ocfs2_is_inode_lock(struct ocfs2_lock_res *lockres) { return lockres->l_type == OCFS2_LOCK_TYPE_META || lockres->l_type == OCFS2_LOCK_TYPE_RW || lockres->l_type == OCFS2_LOCK_TYPE_OPEN; } static inline struct ocfs2_lock_res *ocfs2_lksb_to_lock_res(struct ocfs2_dlm_lksb *lksb) { return container_of(lksb, struct ocfs2_lock_res, l_lksb); } static inline struct inode *ocfs2_lock_res_inode(struct ocfs2_lock_res *lockres) { BUG_ON(!ocfs2_is_inode_lock(lockres)); return (struct inode *) lockres->l_priv; } static inline struct ocfs2_dentry_lock *ocfs2_lock_res_dl(struct ocfs2_lock_res *lockres) { BUG_ON(lockres->l_type != OCFS2_LOCK_TYPE_DENTRY); return (struct ocfs2_dentry_lock *)lockres->l_priv; } static inline struct ocfs2_mem_dqinfo *ocfs2_lock_res_qinfo(struct ocfs2_lock_res *lockres) { BUG_ON(lockres->l_type != OCFS2_LOCK_TYPE_QINFO); return (struct ocfs2_mem_dqinfo *)lockres->l_priv; } static inline struct ocfs2_refcount_tree * ocfs2_lock_res_refcount_tree(struct ocfs2_lock_res *res) { return container_of(res, struct ocfs2_refcount_tree, rf_lockres); } static inline struct ocfs2_super *ocfs2_get_lockres_osb(struct ocfs2_lock_res *lockres) { if (lockres->l_ops->get_osb) return lockres->l_ops->get_osb(lockres); return (struct ocfs2_super *)lockres->l_priv; } static int ocfs2_lock_create(struct ocfs2_super *osb, struct ocfs2_lock_res *lockres, int level, u32 dlm_flags); static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res *lockres, int wanted); static void __ocfs2_cluster_unlock(struct ocfs2_super *osb, struct ocfs2_lock_res *lockres, int level, unsigned long caller_ip); static inline void ocfs2_cluster_unlock(struct ocfs2_super *osb, struct ocfs2_lock_res *lockres, int level) { __ocfs2_cluster_unlock(osb, lockres, level, _RET_IP_); } static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres); static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres); static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres); static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres, int level); static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb, struct ocfs2_lock_res *lockres); static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres, int convert); #define ocfs2_log_dlm_error(_func, _err, _lockres) do { \ if ((_lockres)->l_type != OCFS2_LOCK_TYPE_DENTRY) \ mlog(ML_ERROR, "DLM error %d while calling %s on resource %s\n", \ _err, _func, _lockres->l_name); \ else \ mlog(ML_ERROR, "DLM error %d while calling %s on resource %.*s%08x\n", \ _err, _func, OCFS2_DENTRY_LOCK_INO_START - 1, (_lockres)->l_name, \ (unsigned int)ocfs2_get_dentry_lock_ino(_lockres)); \ } while (0) static int ocfs2_downconvert_thread(void *arg); static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb, struct ocfs2_lock_res *lockres); static int ocfs2_inode_lock_update(struct inode *inode, struct buffer_head **bh); static void ocfs2_drop_osb_locks(struct ocfs2_super *osb); static inline int ocfs2_highest_compat_lock_level(int level); static unsigned int ocfs2_prepare_downconvert(struct ocfs2_lock_res *lockres, int new_level); static int ocfs2_downconvert_lock(struct ocfs2_super *osb, struct ocfs2_lock_res *lockres, int new_level, int lvb, unsigned int generation); static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb, struct ocfs2_lock_res *lockres); static int ocfs2_cancel_convert(struct ocfs2_super *osb, struct ocfs2_lock_res *lockres); static void ocfs2_build_lock_name(enum ocfs2_lock_type type, u64 blkno, u32 generation, char *name) { int len; BUG_ON(type >= OCFS2_NUM_LOCK_TYPES); len = snprintf(name, OCFS2_LOCK_ID_MAX_LEN, "%c%s%016llx%08x", ocfs2_lock_type_char(type), OCFS2_LOCK_ID_PAD, (long long)blkno, generation); BUG_ON(len != (OCFS2_LOCK_ID_MAX_LEN - 1)); mlog(0, "built lock resource with name: %s\n", name); } static DEFINE_SPINLOCK(ocfs2_dlm_tracking_lock); static void ocfs2_add_lockres_tracking(struct ocfs2_lock_res *res, struct ocfs2_dlm_debug *dlm_debug) { mlog(0, "Add tracking for lockres %s\n", res->l_name); spin_lock(&ocfs2_dlm_tracking_lock); list_add(&res->l_debug_list, &dlm_debug->d_lockres_tracking); spin_unlock(&ocfs2_dlm_tracking_lock); } static void ocfs2_remove_lockres_tracking(struct ocfs2_lock_res *res) { spin_lock(&ocfs2_dlm_tracking_lock); if (!list_empty(&res->l_debug_list)) list_del_init(&res->l_debug_list); spin_unlock(&ocfs2_dlm_tracking_lock); } #ifdef CONFIG_OCFS2_FS_STATS static void ocfs2_init_lock_stats(struct ocfs2_lock_res *res) { res->l_lock_refresh = 0; memset(&res->l_lock_prmode, 0, sizeof(struct ocfs2_lock_stats)); memset(&res->l_lock_exmode, 0, sizeof(struct ocfs2_lock_stats)); } static void ocfs2_update_lock_stats(struct ocfs2_lock_res *res, int level, struct ocfs2_mask_waiter *mw, int ret) { u32 usec; ktime_t kt; struct ocfs2_lock_stats *stats; if (level == LKM_PRMODE) stats = &res->l_lock_prmode; else if (level == LKM_EXMODE) stats = &res->l_lock_exmode; else return; kt = ktime_sub(ktime_get(), mw->mw_lock_start); usec = ktime_to_us(kt); stats->ls_gets++; stats->ls_total += ktime_to_ns(kt); /* overflow */ if (unlikely(stats->ls_gets) == 0) { stats->ls_gets++; stats->ls_total = ktime_to_ns(kt); } if (stats->ls_max < usec) stats->ls_max = usec; if (ret) stats->ls_fail++; } static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res *lockres) { lockres->l_lock_refresh++; } static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw) { mw->mw_lock_start = ktime_get(); } #else static inline void ocfs2_init_lock_stats(struct ocfs2_lock_res *res) { } static inline void ocfs2_update_lock_stats(struct ocfs2_lock_res *res, int level, struct ocfs2_mask_waiter *mw, int ret) { } static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res *lockres) { } static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw) { } #endif static void ocfs2_lock_res_init_common(struct ocfs2_super *osb, struct ocfs2_lock_res *res, enum ocfs2_lock_type type, struct ocfs2_lock_res_ops *ops, void *priv) { res->l_type = type; res->l_ops = ops; res->l_priv = priv; res->l_level = DLM_LOCK_IV; res->l_requested = DLM_LOCK_IV; res->l_blocking = DLM_LOCK_IV; res->l_action = OCFS2_AST_INVALID; res->l_unlock_action = OCFS2_UNLOCK_INVALID; res->l_flags = OCFS2_LOCK_INITIALIZED; ocfs2_add_lockres_tracking(res, osb->osb_dlm_debug); ocfs2_init_lock_stats(res); #ifdef CONFIG_DEBUG_LOCK_ALLOC if (type != OCFS2_LOCK_TYPE_OPEN) lockdep_init_map(&res->l_lockdep_map, ocfs2_lock_type_strings[type], &lockdep_keys[type], 0); else res->l_lockdep_map.key = NULL; #endif } void ocfs2_lock_res_init_once(struct ocfs2_lock_res *res) { /* This also clears out the lock status block */ memset(res, 0, sizeof(struct ocfs2_lock_res)); spin_lock_init(&res->l_lock); init_waitqueue_head(&res->l_event); INIT_LIST_HEAD(&res->l_blocked_list); INIT_LIST_HEAD(&res->l_mask_waiters); } void ocfs2_inode_lock_res_init(struct ocfs2_lock_res *res, enum ocfs2_lock_type type, unsigned int generation, struct inode *inode) { struct ocfs2_lock_res_ops *ops; switch(type) { case OCFS2_LOCK_TYPE_RW: ops = &ocfs2_inode_rw_lops; break; case OCFS2_LOCK_TYPE_META: ops = &ocfs2_inode_inode_lops; break; case OCFS2_LOCK_TYPE_OPEN: ops = &ocfs2_inode_open_lops; break; default: mlog_bug_on_msg(1, "type: %d\n", type); ops = NULL; /* thanks, gcc */ break; }; ocfs2_build_lock_name(type, OCFS2_I(inode)->ip_blkno, generation, res->l_name); ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), res, type, ops, inode); } static struct ocfs2_super *ocfs2_get_inode_osb(struct ocfs2_lock_res *lockres) { struct inode *inode = ocfs2_lock_res_inode(lockres); return OCFS2_SB(inode->i_sb); } static struct ocfs2_super *ocfs2_get_qinfo_osb(struct ocfs2_lock_res *lockres) { struct ocfs2_mem_dqinfo *info = lockres->l_priv; return OCFS2_SB(info->dqi_gi.dqi_sb); } static struct ocfs2_super *ocfs2_get_file_osb(struct ocfs2_lock_res *lockres) { struct ocfs2_file_private *fp = lockres->l_priv; return OCFS2_SB(fp->fp_file->f_mapping->host->i_sb); } static __u64 ocfs2_get_dentry_lock_ino(struct ocfs2_lock_res *lockres) { __be64 inode_blkno_be; memcpy(&inode_blkno_be, &lockres->l_name[OCFS2_DENTRY_LOCK_INO_START], sizeof(__be64)); return be64_to_cpu(inode_blkno_be); } static struct ocfs2_super *ocfs2_get_dentry_osb(struct ocfs2_lock_res *lockres) { struct ocfs2_dentry_lock *dl = lockres->l_priv; return OCFS2_SB(dl->dl_inode->i_sb); } void ocfs2_dentry_lock_res_init(struct ocfs2_dentry_lock *dl, u64 parent, struct inode *inode) { int len; u64 inode_blkno = OCFS2_I(inode)->ip_blkno; __be64 inode_blkno_be = cpu_to_be64(inode_blkno); struct ocfs2_lock_res *lockres = &dl->dl_lockres; ocfs2_lock_res_init_once(lockres); /* * Unfortunately, the standard lock naming scheme won't work * here because we have two 16 byte values to use. Instead, * we'll stuff the inode number as a binary value. We still * want error prints to show something without garbling the * display, so drop a null byte in there before the inode * number. A future version of OCFS2 will likely use all * binary lock names. The stringified names have been a * tremendous aid in debugging, but now that the debugfs * interface exists, we can mangle things there if need be. * * NOTE: We also drop the standard "pad" value (the total lock * name size stays the same though - the last part is all * zeros due to the memset in ocfs2_lock_res_init_once() */ len = snprintf(lockres->l_name, OCFS2_DENTRY_LOCK_INO_START, "%c%016llx", ocfs2_lock_type_char(OCFS2_LOCK_TYPE_DENTRY), (long long)parent); BUG_ON(len != (OCFS2_DENTRY_LOCK_INO_START - 1)); memcpy(&lockres->l_name[OCFS2_DENTRY_LOCK_INO_START], &inode_blkno_be, sizeof(__be64)); ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), lockres, OCFS2_LOCK_TYPE_DENTRY, &ocfs2_dentry_lops, dl); } static void ocfs2_super_lock_res_init(struct ocfs2_lock_res *res, struct ocfs2_super *osb) { /* Superblock lockres doesn't come from a slab so we call init * once on it manually. */ ocfs2_lock_res_init_once(res); ocfs2_build_lock_name(OCFS2_LOCK_TYPE_SUPER, OCFS2_SUPER_BLOCK_BLKNO, 0, res->l_name); ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_SUPER, &ocfs2_super_lops, osb); } static void ocfs2_rename_lock_res_init(struct ocfs2_lock_res *res, struct ocfs2_super *osb) { /* Rename lockres doesn't come from a slab so we call init * once on it manually. */ ocfs2_lock_res_init_once(res); ocfs2_build_lock_name(OCFS2_LOCK_TYPE_RENAME, 0, 0, res->l_name); ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_RENAME, &ocfs2_rename_lops, osb); } static void ocfs2_nfs_sync_lock_res_init(struct ocfs2_lock_res *res, struct ocfs2_super *osb) { /* nfs_sync lockres doesn't come from a slab so we call init * once on it manually. */ ocfs2_lock_res_init_once(res); ocfs2_build_lock_name(OCFS2_LOCK_TYPE_NFS_SYNC, 0, 0, res->l_name); ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_NFS_SYNC, &ocfs2_nfs_sync_lops, osb); } static void ocfs2_orphan_scan_lock_res_init(struct ocfs2_lock_res *res, struct ocfs2_super *osb) { ocfs2_lock_res_init_once(res); ocfs2_build_lock_name(OCFS2_LOCK_TYPE_ORPHAN_SCAN, 0, 0, res->l_name); ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_ORPHAN_SCAN, &ocfs2_orphan_scan_lops, osb); } void ocfs2_file_lock_res_init(struct ocfs2_lock_res *lockres, struct ocfs2_file_private *fp) { struct inode *inode = fp->fp_file->f_mapping->host; struct ocfs2_inode_info *oi = OCFS2_I(inode); ocfs2_lock_res_init_once(lockres); ocfs2_build_lock_name(OCFS2_LOCK_TYPE_FLOCK, oi->ip_blkno, inode->i_generation, lockres->l_name); ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), lockres, OCFS2_LOCK_TYPE_FLOCK, &ocfs2_flock_lops, fp); lockres->l_flags |= OCFS2_LOCK_NOCACHE; } void ocfs2_qinfo_lock_res_init(struct ocfs2_lock_res *lockres, struct ocfs2_mem_dqinfo *info) { ocfs2_lock_res_init_once(lockres); ocfs2_build_lock_name(OCFS2_LOCK_TYPE_QINFO, info->dqi_gi.dqi_type, 0, lockres->l_name); ocfs2_lock_res_init_common(OCFS2_SB(info->dqi_gi.dqi_sb), lockres, OCFS2_LOCK_TYPE_QINFO, &ocfs2_qinfo_lops, info); } void ocfs2_refcount_lock_res_init(struct ocfs2_lock_res *lockres, struct ocfs2_super *osb, u64 ref_blkno, unsigned int generation) { ocfs2_lock_res_init_once(lockres); ocfs2_build_lock_name(OCFS2_LOCK_TYPE_REFCOUNT, ref_blkno, generation, lockres->l_name); ocfs2_lock_res_init_common(osb, lockres, OCFS2_LOCK_TYPE_REFCOUNT, &ocfs2_refcount_block_lops, osb); } void ocfs2_lock_res_free(struct ocfs2_lock_res *res) { if (!(res->l_flags & OCFS2_LOCK_INITIALIZED)) return; ocfs2_remove_lockres_tracking(res); mlog_bug_on_msg(!list_empty(&res->l_blocked_list), "Lockres %s is on the blocked list\n", res->l_name); mlog_bug_on_msg(!list_empty(&res->l_mask_waiters), "Lockres %s has mask waiters pending\n", res->l_name); mlog_bug_on_msg(spin_is_locked(&res->l_lock), "Lockres %s is locked\n", res->l_name); mlog_bug_on_msg(res->l_ro_holders, "Lockres %s has %u ro holders\n", res->l_name, res->l_ro_holders); mlog_bug_on_msg(res->l_ex_holders, "Lockres %s has %u ex holders\n", res->l_name, res->l_ex_holders); /* Need to clear out the lock status block for the dlm */ memset(&res->l_lksb, 0, sizeof(res->l_lksb)); res->l_flags = 0UL; } static inline void ocfs2_inc_holders(struct ocfs2_lock_res *lockres, int level) { BUG_ON(!lockres); switch(level) { case DLM_LOCK_EX: lockres->l_ex_holders++; break; case DLM_LOCK_PR: lockres->l_ro_holders++; break; default: BUG(); } } static inline void ocfs2_dec_holders(struct ocfs2_lock_res *lockres, int level) { BUG_ON(!lockres); switch(level) { case DLM_LOCK_EX: BUG_ON(!lockres->l_ex_holders); lockres->l_ex_holders--; break; case DLM_LOCK_PR: BUG_ON(!lockres->l_ro_holders); lockres->l_ro_holders--; break; default: BUG(); } } /* WARNING: This function lives in a world where the only three lock * levels are EX, PR, and NL. It *will* have to be adjusted when more * lock types are added. */ static inline int ocfs2_highest_compat_lock_level(int level) { int new_level = DLM_LOCK_EX; if (level == DLM_LOCK_EX) new_level = DLM_LOCK_NL; else if (level == DLM_LOCK_PR) new_level = DLM_LOCK_PR; return new_level; } static void lockres_set_flags(struct ocfs2_lock_res *lockres, unsigned long newflags) { struct ocfs2_mask_waiter *mw, *tmp; assert_spin_locked(&lockres->l_lock); lockres->l_flags = newflags; list_for_each_entry_safe(mw, tmp, &lockres->l_mask_waiters, mw_item) { if ((lockres->l_flags & mw->mw_mask) != mw->mw_goal) continue; list_del_init(&mw->mw_item); mw->mw_status = 0; complete(&mw->mw_complete); } } static void lockres_or_flags(struct ocfs2_lock_res *lockres, unsigned long or) { lockres_set_flags(lockres, lockres->l_flags | or); } static void lockres_clear_flags(struct ocfs2_lock_res *lockres, unsigned long clear) { lockres_set_flags(lockres, lockres->l_flags & ~clear); } static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres) { BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY)); BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED)); BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED)); BUG_ON(lockres->l_blocking <= DLM_LOCK_NL); lockres->l_level = lockres->l_requested; if (lockres->l_level <= ocfs2_highest_compat_lock_level(lockres->l_blocking)) { lockres->l_blocking = DLM_LOCK_NL; lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED); } lockres_clear_flags(lockres, OCFS2_LOCK_BUSY); } static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres) { BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY)); BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED)); /* Convert from RO to EX doesn't really need anything as our * information is already up to data. Convert from NL to * *anything* however should mark ourselves as needing an * update */ if (lockres->l_level == DLM_LOCK_NL && lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH) lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH); lockres->l_level = lockres->l_requested; /* * We set the OCFS2_LOCK_UPCONVERT_FINISHING flag before clearing * the OCFS2_LOCK_BUSY flag to prevent the dc thread from * downconverting the lock before the upconvert has fully completed. */ lockres_or_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING); lockres_clear_flags(lockres, OCFS2_LOCK_BUSY); } static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres) { BUG_ON((!(lockres->l_flags & OCFS2_LOCK_BUSY))); BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED); if (lockres->l_requested > DLM_LOCK_NL && !(lockres->l_flags & OCFS2_LOCK_LOCAL) && lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH) lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH); lockres->l_level = lockres->l_requested; lockres_or_flags(lockres, OCFS2_LOCK_ATTACHED); lockres_clear_flags(lockres, OCFS2_LOCK_BUSY); } static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres, int level) { int needs_downconvert = 0; assert_spin_locked(&lockres->l_lock); if (level > lockres->l_blocking) { /* only schedule a downconvert if we haven't already scheduled * one that goes low enough to satisfy the level we're * blocking. this also catches the case where we get * duplicate BASTs */ if (ocfs2_highest_compat_lock_level(level) < ocfs2_highest_compat_lock_level(lockres->l_blocking)) needs_downconvert = 1; lockres->l_blocking = level; } mlog(ML_BASTS, "lockres %s, block %d, level %d, l_block %d, dwn %d\n", lockres->l_name, level, lockres->l_level, lockres->l_blocking, needs_downconvert); if (needs_downconvert) lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED); mlog(0, "needs_downconvert = %d\n", needs_downconvert); return needs_downconvert; } /* * OCFS2_LOCK_PENDING and l_pending_gen. * * Why does OCFS2_LOCK_PENDING exist? To close a race between setting * OCFS2_LOCK_BUSY and calling ocfs2_dlm_lock(). See ocfs2_unblock_lock() * for more details on the race. * * OCFS2_LOCK_PENDING closes the race quite nicely. However, it introduces * a race on itself. In o2dlm, we can get the ast before ocfs2_dlm_lock() * returns. The ast clears OCFS2_LOCK_BUSY, and must therefore clear * OCFS2_LOCK_PENDING at the same time. When ocfs2_dlm_lock() returns, * the caller is going to try to clear PENDING again. If nothing else is * happening, __lockres_clear_pending() sees PENDING is unset and does * nothing. * * But what if another path (eg downconvert thread) has just started a * new locking action? The other path has re-set PENDING. Our path * cannot clear PENDING, because that will re-open the original race * window. * * [Example] * * ocfs2_meta_lock() * ocfs2_cluster_lock() * set BUSY * set PENDING * drop l_lock * ocfs2_dlm_lock() * ocfs2_locking_ast() ocfs2_downconvert_thread() * clear PENDING ocfs2_unblock_lock() * take_l_lock * !BUSY * ocfs2_prepare_downconvert() * set BUSY * set PENDING * drop l_lock * take l_lock * clear PENDING * drop l_lock * <window> * ocfs2_dlm_lock() * * So as you can see, we now have a window where l_lock is not held, * PENDING is not set, and ocfs2_dlm_lock() has not been called. * * The core problem is that ocfs2_cluster_lock() has cleared the PENDING * set by ocfs2_prepare_downconvert(). That wasn't nice. * * To solve this we introduce l_pending_gen. A call to * lockres_clear_pending() will only do so when it is passed a generation * number that matches the lockres. lockres_set_pending() will return the * current generation number. When ocfs2_cluster_lock() goes to clear * PENDING, it passes the generation it got from set_pending(). In our * example above, the generation numbers will *not* match. Thus, * ocfs2_cluster_lock() will not clear the PENDING set by * ocfs2_prepare_downconvert(). */ /* Unlocked version for ocfs2_locking_ast() */ static void __lockres_clear_pending(struct ocfs2_lock_res *lockres, unsigned int generation, struct ocfs2_super *osb) { assert_spin_locked(&lockres->l_lock); /* * The ast and locking functions can race us here. The winner * will clear pending, the loser will not. */ if (!(lockres->l_flags & OCFS2_LOCK_PENDING) || (lockres->l_pending_gen != generation)) return; lockres_clear_flags(lockres, OCFS2_LOCK_PENDING); lockres->l_pending_gen++; /* * The downconvert thread may have skipped us because we * were PENDING. Wake it up. */ if (lockres->l_flags & OCFS2_LOCK_BLOCKED) ocfs2_wake_downconvert_thread(osb); } /* Locked version for callers of ocfs2_dlm_lock() */ static void lockres_clear_pending(struct ocfs2_lock_res *lockres, unsigned int generation, struct ocfs2_super *osb) { unsigned long flags; spin_lock_irqsave(&lockres->l_lock, flags); __lockres_clear_pending(lockres, generation, osb); spin_unlock_irqrestore(&lockres->l_lock, flags); } static unsigned int lockres_set_pending(struct ocfs2_lock_res *lockres) { assert_spin_locked(&lockres->l_lock); BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY)); lockres_or_flags(lockres, OCFS2_LOCK_PENDING); return lockres->l_pending_gen; } static void ocfs2_blocking_ast(struct ocfs2_dlm_lksb *lksb, int level) { struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb); struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres); int needs_downconvert; unsigned long flags; BUG_ON(level <= DLM_LOCK_NL); mlog(ML_BASTS, "BAST fired for lockres %s, blocking %d, level %d, " "type %s\n", lockres->l_name, level, lockres->l_level, ocfs2_lock_type_string(lockres->l_type)); /* * We can skip the bast for locks which don't enable caching - * they'll be dropped at the earliest possible time anyway. */ if (lockres->l_flags & OCFS2_LOCK_NOCACHE) return; spin_lock_irqsave(&lockres->l_lock, flags); needs_downconvert = ocfs2_generic_handle_bast(lockres, level); if (needs_downconvert) ocfs2_schedule_blocked_lock(osb, lockres); spin_unlock_irqrestore(&lockres->l_lock, flags); wake_up(&lockres->l_event); ocfs2_wake_downconvert_thread(osb); } static void ocfs2_locking_ast(struct ocfs2_dlm_lksb *lksb) { struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb); struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres); unsigned long flags; int status; spin_lock_irqsave(&lockres->l_lock, flags); status = ocfs2_dlm_lock_status(&lockres->l_lksb); if (status == -EAGAIN) { lockres_clear_flags(lockres, OCFS2_LOCK_BUSY); goto out; } if (status) { mlog(ML_ERROR, "lockres %s: lksb status value of %d!\n", lockres->l_name, status); spin_unlock_irqrestore(&lockres->l_lock, flags); return; } mlog(ML_BASTS, "AST fired for lockres %s, action %d, unlock %d, " "level %d => %d\n", lockres->l_name, lockres->l_action, lockres->l_unlock_action, lockres->l_level, lockres->l_requested); switch(lockres->l_action) { case OCFS2_AST_ATTACH: ocfs2_generic_handle_attach_action(lockres); lockres_clear_flags(lockres, OCFS2_LOCK_LOCAL); break; case OCFS2_AST_CONVERT: ocfs2_generic_handle_convert_action(lockres); break; case OCFS2_AST_DOWNCONVERT: ocfs2_generic_handle_downconvert_action(lockres); break; default: mlog(ML_ERROR, "lockres %s: AST fired with invalid action: %u, " "flags 0x%lx, unlock: %u\n", lockres->l_name, lockres->l_action, lockres->l_flags, lockres->l_unlock_action); BUG(); } out: /* set it to something invalid so if we get called again we * can catch it. */ lockres->l_action = OCFS2_AST_INVALID; /* Did we try to cancel this lock? Clear that state */ if (lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT) lockres->l_unlock_action = OCFS2_UNLOCK_INVALID; /* * We may have beaten the locking functions here. We certainly * know that dlm_lock() has been called :-) * Because we can't have two lock calls in flight at once, we * can use lockres->l_pending_gen. */ __lockres_clear_pending(lockres, lockres->l_pending_gen, osb); wake_up(&lockres->l_event); spin_unlock_irqrestore(&lockres->l_lock, flags); } static void ocfs2_unlock_ast(struct ocfs2_dlm_lksb *lksb, int error) { struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb); unsigned long flags; mlog(ML_BASTS, "UNLOCK AST fired for lockres %s, action = %d\n", lockres->l_name, lockres->l_unlock_action); spin_lock_irqsave(&lockres->l_lock, flags); if (error) { mlog(ML_ERROR, "Dlm passes error %d for lock %s, " "unlock_action %d\n", error, lockres->l_name, lockres->l_unlock_action); spin_unlock_irqrestore(&lockres->l_lock, flags); return; } switch(lockres->l_unlock_action) { case OCFS2_UNLOCK_CANCEL_CONVERT: mlog(0, "Cancel convert success for %s\n", lockres->l_name); lockres->l_action = OCFS2_AST_INVALID; /* Downconvert thread may have requeued this lock, we * need to wake it. */ if (lockres->l_flags & OCFS2_LOCK_BLOCKED) ocfs2_wake_downconvert_thread(ocfs2_get_lockres_osb(lockres)); break; case OCFS2_UNLOCK_DROP_LOCK: lockres->l_level = DLM_LOCK_IV; break; default: BUG(); } lockres_clear_flags(lockres, OCFS2_LOCK_BUSY); lockres->l_unlock_action = OCFS2_UNLOCK_INVALID; wake_up(&lockres->l_event); spin_unlock_irqrestore(&lockres->l_lock, flags); } /* * This is the filesystem locking protocol. It provides the lock handling * hooks for the underlying DLM. It has a maximum version number. * The version number allows interoperability with systems running at * the same major number and an equal or smaller minor number. * * Whenever the filesystem does new things with locks (adds or removes a * lock, orders them differently, does different things underneath a lock), * the version must be changed. The protocol is negotiated when joining * the dlm domain. A node may join the domain if its major version is * identical to all other nodes and its minor version is greater than * or equal to all other nodes. When its minor version is greater than * the other nodes, it will run at the minor version specified by the * other nodes. * * If a locking change is made that will not be compatible with older * versions, the major number must be increased and the minor version set * to zero. If a change merely adds a behavior that can be disabled when * speaking to older versions, the minor version must be increased. If a * change adds a fully backwards compatible change (eg, LVB changes that * are just ignored by older versions), the version does not need to be * updated. */ static struct ocfs2_locking_protocol lproto = { .lp_max_version = { .pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR, .pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR, }, .lp_lock_ast = ocfs2_locking_ast, .lp_blocking_ast = ocfs2_blocking_ast, .lp_unlock_ast = ocfs2_unlock_ast, }; void ocfs2_set_locking_protocol(void) { ocfs2_stack_glue_set_max_proto_version(&lproto.lp_max_version); } static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres, int convert) { unsigned long flags; spin_lock_irqsave(&lockres->l_lock, flags); lockres_clear_flags(lockres, OCFS2_LOCK_BUSY); lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING); if (convert) lockres->l_action = OCFS2_AST_INVALID; else lockres->l_unlock_action = OCFS2_UNLOCK_INVALID; spin_unlock_irqrestore(&lockres->l_lock, flags); wake_up(&lockres->l_event); } /* Note: If we detect another process working on the lock (i.e., * OCFS2_LOCK_BUSY), we'll bail out returning 0. It's up to the caller * to do the right thing in that case. */ static int ocfs2_lock_create(struct ocfs2_super *osb, struct ocfs2_lock_res *lockres, int level, u32 dlm_flags) { int ret = 0; unsigned long flags; unsigned int gen; mlog(0, "lock %s, level = %d, flags = %u\n", lockres->l_name, level, dlm_flags); spin_lock_irqsave(&lockres->l_lock, flags); if ((lockres->l_flags & OCFS2_LOCK_ATTACHED) || (lockres->l_flags & OCFS2_LOCK_BUSY)) { spin_unlock_irqrestore(&lockres->l_lock, flags); goto bail; } lockres->l_action = OCFS2_AST_ATTACH; lockres->l_requested = level; lockres_or_flags(lockres, OCFS2_LOCK_BUSY); gen = lockres_set_pending(lockres); spin_unlock_irqrestore(&lockres->l_lock, flags); ret = ocfs2_dlm_lock(osb->cconn, level, &lockres->l_lksb, dlm_flags, lockres->l_name, OCFS2_LOCK_ID_MAX_LEN - 1); lockres_clear_pending(lockres, gen, osb); if (ret) { ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres); ocfs2_recover_from_dlm_error(lockres, 1); } mlog(0, "lock %s, return from ocfs2_dlm_lock\n", lockres->l_name); bail: return ret; } static inline int ocfs2_check_wait_flag(struct ocfs2_lock_res *lockres, int flag) { unsigned long flags; int ret; spin_lock_irqsave(&lockres->l_lock, flags); ret = lockres->l_flags & flag; spin_unlock_irqrestore(&lockres->l_lock, flags); return ret; } static inline void ocfs2_wait_on_busy_lock(struct ocfs2_lock_res *lockres) { wait_event(lockres->l_event, !ocfs2_check_wait_flag(lockres, OCFS2_LOCK_BUSY)); } static inline void ocfs2_wait_on_refreshing_lock(struct ocfs2_lock_res *lockres) { wait_event(lockres->l_event, !ocfs2_check_wait_flag(lockres, OCFS2_LOCK_REFRESHING)); } /* predict what lock level we'll be dropping down to on behalf * of another node, and return true if the currently wanted * level will be compatible with it. */ static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res *lockres, int wanted) { BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED)); return wanted <= ocfs2_highest_compat_lock_level(lockres->l_blocking); } static void ocfs2_init_mask_waiter(struct ocfs2_mask_waiter *mw) { INIT_LIST_HEAD(&mw->mw_item); init_completion(&mw->mw_complete); ocfs2_init_start_time(mw); } static int ocfs2_wait_for_mask(struct ocfs2_mask_waiter *mw) { wait_for_completion(&mw->mw_complete); /* Re-arm the completion in case we want to wait on it again */ INIT_COMPLETION(mw->mw_complete); return mw->mw_status; } static void lockres_add_mask_waiter(struct ocfs2_lock_res *lockres, struct ocfs2_mask_waiter *mw, unsigned long mask, unsigned long goal) { BUG_ON(!list_empty(&mw->mw_item)); assert_spin_locked(&lockres->l_lock); list_add_tail(&mw->mw_item, &lockres->l_mask_waiters); mw->mw_mask = mask; mw->mw_goal = goal; } /* returns 0 if the mw that was removed was already satisfied, -EBUSY * if the mask still hadn't reached its goal */ static int lockres_remove_mask_waiter(struct ocfs2_lock_res *lockres, struct ocfs2_mask_waiter *mw) { unsigned long flags; int ret = 0; spin_lock_irqsave(&lockres->l_lock, flags); if (!list_empty(&mw->mw_item)) { if ((lockres->l_flags & mw->mw_mask) != mw->mw_goal) ret = -EBUSY; list_del_init(&mw->mw_item); init_completion(&mw->mw_complete); } spin_unlock_irqrestore(&lockres->l_lock, flags); return ret; } static int ocfs2_wait_for_mask_interruptible(struct ocfs2_mask_waiter *mw, struct ocfs2_lock_res *lockres) { int ret; ret = wait_for_completion_interruptible(&mw->mw_complete); if (ret) lockres_remove_mask_waiter(lockres, mw); else ret = mw->mw_status; /* Re-arm the completion in case we want to wait on it again */ INIT_COMPLETION(mw->mw_complete); return ret; } static int __ocfs2_cluster_lock(struct ocfs2_super *osb, struct ocfs2_lock_res *lockres, int level, u32 lkm_flags, int arg_flags, int l_subclass, unsigned long caller_ip) { struct ocfs2_mask_waiter mw; int wait, catch_signals = !(osb->s_mount_opt & OCFS2_MOUNT_NOINTR); int ret = 0; /* gcc doesn't realize wait = 1 guarantees ret is set */ unsigned long flags; unsigned int gen; int noqueue_attempted = 0; ocfs2_init_mask_waiter(&mw); if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) lkm_flags |= DLM_LKF_VALBLK; again: wait = 0; spin_lock_irqsave(&lockres->l_lock, flags); if (catch_signals && signal_pending(current)) { ret = -ERESTARTSYS; goto unlock; } mlog_bug_on_msg(lockres->l_flags & OCFS2_LOCK_FREEING, "Cluster lock called on freeing lockres %s! flags " "0x%lx\n", lockres->l_name, lockres->l_flags); /* We only compare against the currently granted level * here. If the lock is blocked waiting on a downconvert, * we'll get caught below. */ if (lockres->l_flags & OCFS2_LOCK_BUSY && level > lockres->l_level) { /* is someone sitting in dlm_lock? If so, wait on * them. */ lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0); wait = 1; goto unlock; } if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING) { /* * We've upconverted. If the lock now has a level we can * work with, we take it. If, however, the lock is not at the * required level, we go thru the full cycle. One way this could * happen is if a process requesting an upconvert to PR is * closely followed by another requesting upconvert to an EX. * If the process requesting EX lands here, we want it to * continue attempting to upconvert and let the process * requesting PR take the lock. * If multiple processes request upconvert to PR, the first one * here will take the lock. The others will have to go thru the * OCFS2_LOCK_BLOCKED check to ensure that there is no pending * downconvert request. */ if (level <= lockres->l_level) goto update_holders; } if (lockres->l_flags & OCFS2_LOCK_BLOCKED && !ocfs2_may_continue_on_blocked_lock(lockres, level)) { /* is the lock is currently blocked on behalf of * another node */ lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BLOCKED, 0); wait = 1; goto unlock; } if (level > lockres->l_level) { if (noqueue_attempted > 0) { ret = -EAGAIN; goto unlock; } if (lkm_flags & DLM_LKF_NOQUEUE) noqueue_attempted = 1; if (lockres->l_action != OCFS2_AST_INVALID) mlog(ML_ERROR, "lockres %s has action %u pending\n", lockres->l_name, lockres->l_action); if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) { lockres->l_action = OCFS2_AST_ATTACH; lkm_flags &= ~DLM_LKF_CONVERT; } else { lockres->l_action = OCFS2_AST_CONVERT; lkm_flags |= DLM_LKF_CONVERT; } lockres->l_requested = level; lockres_or_flags(lockres, OCFS2_LOCK_BUSY); gen = lockres_set_pending(lockres); spin_unlock_irqrestore(&lockres->l_lock, flags); BUG_ON(level == DLM_LOCK_IV); BUG_ON(level == DLM_LOCK_NL); mlog(ML_BASTS, "lockres %s, convert from %d to %d\n", lockres->l_name, lockres->l_level, level); /* call dlm_lock to upgrade lock now */ ret = ocfs2_dlm_lock(osb->cconn, level, &lockres->l_lksb, lkm_flags, lockres->l_name, OCFS2_LOCK_ID_MAX_LEN - 1); lockres_clear_pending(lockres, gen, osb); if (ret) { if (!(lkm_flags & DLM_LKF_NOQUEUE) || (ret != -EAGAIN)) { ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres); } ocfs2_recover_from_dlm_error(lockres, 1); goto out; } mlog(0, "lock %s, successful return from ocfs2_dlm_lock\n", lockres->l_name); /* At this point we've gone inside the dlm and need to * complete our work regardless. */ catch_signals = 0; /* wait for busy to clear and carry on */ goto again; } update_holders: /* Ok, if we get here then we're good to go. */ ocfs2_inc_holders(lockres, level); ret = 0; unlock: lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING); spin_unlock_irqrestore(&lockres->l_lock, flags); out: /* * This is helping work around a lock inversion between the page lock * and dlm locks. One path holds the page lock while calling aops * which block acquiring dlm locks. The voting thread holds dlm * locks while acquiring page locks while down converting data locks. * This block is helping an aop path notice the inversion and back * off to unlock its page lock before trying the dlm lock again. */ if (wait && arg_flags & OCFS2_LOCK_NONBLOCK && mw.mw_mask & (OCFS2_LOCK_BUSY|OCFS2_LOCK_BLOCKED)) { wait = 0; if (lockres_remove_mask_waiter(lockres, &mw)) ret = -EAGAIN; else goto again; } if (wait) { ret = ocfs2_wait_for_mask(&mw); if (ret == 0) goto again; mlog_errno(ret); } ocfs2_update_lock_stats(lockres, level, &mw, ret); #ifdef CONFIG_DEBUG_LOCK_ALLOC if (!ret && lockres->l_lockdep_map.key != NULL) { if (level == DLM_LOCK_PR) rwsem_acquire_read(&lockres->l_lockdep_map, l_subclass, !!(arg_flags & OCFS2_META_LOCK_NOQUEUE), caller_ip); else rwsem_acquire(&lockres->l_lockdep_map, l_subclass, !!(arg_flags & OCFS2_META_LOCK_NOQUEUE), caller_ip); } #endif return ret; } static inline int ocfs2_cluster_lock(struct ocfs2_super *osb, struct ocfs2_lock_res *lockres, int level, u32 lkm_flags, int arg_flags) { return __ocfs2_cluster_lock(osb, lockres, level, lkm_flags, arg_flags, 0, _RET_IP_); } static void __ocfs2_cluster_unlock(struct ocfs2_super *osb, struct ocfs2_lock_res *lockres, int level, unsigned long caller_ip) { unsigned long flags; spin_lock_irqsave(&lockres->l_lock, flags); ocfs2_dec_holders(lockres, level); ocfs2_downconvert_on_unlock(osb, lockres); spin_unlock_irqrestore(&lockres->l_lock, flags); #ifdef CONFIG_DEBUG_LOCK_ALLOC if (lockres->l_lockdep_map.key != NULL) rwsem_release(&lockres->l_lockdep_map, 1, caller_ip); #endif } static int ocfs2_create_new_lock(struct ocfs2_super *osb, struct ocfs2_lock_res *lockres, int ex, int local) { int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR; unsigned long flags; u32 lkm_flags = local ? DLM_LKF_LOCAL : 0; spin_lock_irqsave(&lockres->l_lock, flags); BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED); lockres_or_flags(lockres, OCFS2_LOCK_LOCAL); spin_unlock_irqrestore(&lockres->l_lock, flags); return ocfs2_lock_create(osb, lockres, level, lkm_flags); } /* Grants us an EX lock on the data and metadata resources, skipping * the normal cluster directory lookup. Use this ONLY on newly created * inodes which other nodes can't possibly see, and which haven't been * hashed in the inode hash yet. This can give us a good performance * increase as it'll skip the network broadcast normally associated * with creating a new lock resource. */ int ocfs2_create_new_inode_locks(struct inode *inode) { int ret; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); BUG_ON(!inode); BUG_ON(!ocfs2_inode_is_new(inode)); mlog(0, "Inode %llu\n", (unsigned long long)OCFS2_I(inode)->ip_blkno); /* NOTE: That we don't increment any of the holder counts, nor * do we add anything to a journal handle. Since this is * supposed to be a new inode which the cluster doesn't know * about yet, there is no need to. As far as the LVB handling * is concerned, this is basically like acquiring an EX lock * on a resource which has an invalid one -- we'll set it * valid when we release the EX. */ ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_rw_lockres, 1, 1); if (ret) { mlog_errno(ret); goto bail; } /* * We don't want to use DLM_LKF_LOCAL on a meta data lock as they * don't use a generation in their lock names. */ ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_inode_lockres, 1, 0); if (ret) { mlog_errno(ret); goto bail; } ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_open_lockres, 0, 0); if (ret) { mlog_errno(ret); goto bail; } bail: return ret; } int ocfs2_rw_lock(struct inode *inode, int write) { int status, level; struct ocfs2_lock_res *lockres; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); BUG_ON(!inode); mlog(0, "inode %llu take %s RW lock\n", (unsigned long long)OCFS2_I(inode)->ip_blkno, write ? "EXMODE" : "PRMODE"); if (ocfs2_mount_local(osb)) return 0; lockres = &OCFS2_I(inode)->ip_rw_lockres; level = write ? DLM_LOCK_EX : DLM_LOCK_PR; status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres, level, 0, 0); if (status < 0) mlog_errno(status); return status; } void ocfs2_rw_unlock(struct inode *inode, int write) { int level = write ? DLM_LOCK_EX : DLM_LOCK_PR; struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_rw_lockres; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); mlog(0, "inode %llu drop %s RW lock\n", (unsigned long long)OCFS2_I(inode)->ip_blkno, write ? "EXMODE" : "PRMODE"); if (!ocfs2_mount_local(osb)) ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level); } /* * ocfs2_open_lock always get PR mode lock. */ int ocfs2_open_lock(struct inode *inode) { int status = 0; struct ocfs2_lock_res *lockres; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); BUG_ON(!inode); mlog(0, "inode %llu take PRMODE open lock\n", (unsigned long long)OCFS2_I(inode)->ip_blkno); if (ocfs2_mount_local(osb)) goto out; lockres = &OCFS2_I(inode)->ip_open_lockres; status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres, DLM_LOCK_PR, 0, 0); if (status < 0) mlog_errno(status); out: return status; } int ocfs2_try_open_lock(struct inode *inode, int write) { int status = 0, level; struct ocfs2_lock_res *lockres; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); BUG_ON(!inode); mlog(0, "inode %llu try to take %s open lock\n", (unsigned long long)OCFS2_I(inode)->ip_blkno, write ? "EXMODE" : "PRMODE"); if (ocfs2_mount_local(osb)) goto out; lockres = &OCFS2_I(inode)->ip_open_lockres; level = write ? DLM_LOCK_EX : DLM_LOCK_PR; /* * The file system may already holding a PRMODE/EXMODE open lock. * Since we pass DLM_LKF_NOQUEUE, the request won't block waiting on * other nodes and the -EAGAIN will indicate to the caller that * this inode is still in use. */ status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres, level, DLM_LKF_NOQUEUE, 0); out: return status; } /* * ocfs2_open_unlock unlock PR and EX mode open locks. */ void ocfs2_open_unlock(struct inode *inode) { struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_open_lockres; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); mlog(0, "inode %llu drop open lock\n", (unsigned long long)OCFS2_I(inode)->ip_blkno); if (ocfs2_mount_local(osb)) goto out; if(lockres->l_ro_holders) ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, DLM_LOCK_PR); if(lockres->l_ex_holders) ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, DLM_LOCK_EX); out: return; } static int ocfs2_flock_handle_signal(struct ocfs2_lock_res *lockres, int level) { int ret; struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres); unsigned long flags; struct ocfs2_mask_waiter mw; ocfs2_init_mask_waiter(&mw); retry_cancel: spin_lock_irqsave(&lockres->l_lock, flags); if (lockres->l_flags & OCFS2_LOCK_BUSY) { ret = ocfs2_prepare_cancel_convert(osb, lockres); if (ret) { spin_unlock_irqrestore(&lockres->l_lock, flags); ret = ocfs2_cancel_convert(osb, lockres); if (ret < 0) { mlog_errno(ret); goto out; } goto retry_cancel; } lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0); spin_unlock_irqrestore(&lockres->l_lock, flags); ocfs2_wait_for_mask(&mw); goto retry_cancel; } ret = -ERESTARTSYS; /* * We may still have gotten the lock, in which case there's no * point to restarting the syscall. */ if (lockres->l_level == level) ret = 0; mlog(0, "Cancel returning %d. flags: 0x%lx, level: %d, act: %d\n", ret, lockres->l_flags, lockres->l_level, lockres->l_action); spin_unlock_irqrestore(&lockres->l_lock, flags); out: return ret; } /* * ocfs2_file_lock() and ocfs2_file_unlock() map to a single pair of * flock() calls. The locking approach this requires is sufficiently * different from all other cluster lock types that we implement a * separate path to the "low-level" dlm calls. In particular: * * - No optimization of lock levels is done - we take at exactly * what's been requested. * * - No lock caching is employed. We immediately downconvert to * no-lock at unlock time. This also means flock locks never go on * the blocking list). * * - Since userspace can trivially deadlock itself with flock, we make * sure to allow cancellation of a misbehaving applications flock() * request. * * - Access to any flock lockres doesn't require concurrency, so we * can simplify the code by requiring the caller to guarantee * serialization of dlmglue flock calls. */ int ocfs2_file_lock(struct file *file, int ex, int trylock) { int ret, level = ex ? DLM_LOCK_EX : DLM_LOCK_PR; unsigned int lkm_flags = trylock ? DLM_LKF_NOQUEUE : 0; unsigned long flags; struct ocfs2_file_private *fp = file->private_data; struct ocfs2_lock_res *lockres = &fp->fp_flock; struct ocfs2_super *osb = OCFS2_SB(file->f_mapping->host->i_sb); struct ocfs2_mask_waiter mw; ocfs2_init_mask_waiter(&mw); if ((lockres->l_flags & OCFS2_LOCK_BUSY) || (lockres->l_level > DLM_LOCK_NL)) { mlog(ML_ERROR, "File lock \"%s\" has busy or locked state: flags: 0x%lx, " "level: %u\n", lockres->l_name, lockres->l_flags, lockres->l_level); return -EINVAL; } spin_lock_irqsave(&lockres->l_lock, flags); if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) { lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0); spin_unlock_irqrestore(&lockres->l_lock, flags); /* * Get the lock at NLMODE to start - that way we * can cancel the upconvert request if need be. */ ret = ocfs2_lock_create(osb, lockres, DLM_LOCK_NL, 0); if (ret < 0) { mlog_errno(ret); goto out; } ret = ocfs2_wait_for_mask(&mw); if (ret) { mlog_errno(ret); goto out; } spin_lock_irqsave(&lockres->l_lock, flags); } lockres->l_action = OCFS2_AST_CONVERT; lkm_flags |= DLM_LKF_CONVERT; lockres->l_requested = level; lockres_or_flags(lockres, OCFS2_LOCK_BUSY); lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0); spin_unlock_irqrestore(&lockres->l_lock, flags); ret = ocfs2_dlm_lock(osb->cconn, level, &lockres->l_lksb, lkm_flags, lockres->l_name, OCFS2_LOCK_ID_MAX_LEN - 1); if (ret) { if (!trylock || (ret != -EAGAIN)) { ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres); ret = -EINVAL; } ocfs2_recover_from_dlm_error(lockres, 1); lockres_remove_mask_waiter(lockres, &mw); goto out; } ret = ocfs2_wait_for_mask_interruptible(&mw, lockres); if (ret == -ERESTARTSYS) { /* * Userspace can cause deadlock itself with * flock(). Current behavior locally is to allow the * deadlock, but abort the system call if a signal is * received. We follow this example, otherwise a * poorly written program could sit in kernel until * reboot. * * Handling this is a bit more complicated for Ocfs2 * though. We can't exit this function with an * outstanding lock request, so a cancel convert is * required. We intentionally overwrite 'ret' - if the * cancel fails and the lock was granted, it's easier * to just bubble success back up to the user. */ ret = ocfs2_flock_handle_signal(lockres, level); } else if (!ret && (level > lockres->l_level)) { /* Trylock failed asynchronously */ BUG_ON(!trylock); ret = -EAGAIN; } out: mlog(0, "Lock: \"%s\" ex: %d, trylock: %d, returns: %d\n", lockres->l_name, ex, trylock, ret); return ret; } void ocfs2_file_unlock(struct file *file) { int ret; unsigned int gen; unsigned long flags; struct ocfs2_file_private *fp = file->private_data; struct ocfs2_lock_res *lockres = &fp->fp_flock; struct ocfs2_super *osb = OCFS2_SB(file->f_mapping->host->i_sb); struct ocfs2_mask_waiter mw; ocfs2_init_mask_waiter(&mw); if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) return; if (lockres->l_level == DLM_LOCK_NL) return; mlog(0, "Unlock: \"%s\" flags: 0x%lx, level: %d, act: %d\n", lockres->l_name, lockres->l_flags, lockres->l_level, lockres->l_action); spin_lock_irqsave(&lockres->l_lock, flags); /* * Fake a blocking ast for the downconvert code. */ lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED); lockres->l_blocking = DLM_LOCK_EX; gen = ocfs2_prepare_downconvert(lockres, DLM_LOCK_NL); lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0); spin_unlock_irqrestore(&lockres->l_lock, flags); ret = ocfs2_downconvert_lock(osb, lockres, DLM_LOCK_NL, 0, gen); if (ret) { mlog_errno(ret); return; } ret = ocfs2_wait_for_mask(&mw); if (ret) mlog_errno(ret); } static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb, struct ocfs2_lock_res *lockres) { int kick = 0; /* If we know that another node is waiting on our lock, kick * the downconvert thread * pre-emptively when we reach a release * condition. */ if (lockres->l_flags & OCFS2_LOCK_BLOCKED) { switch(lockres->l_blocking) { case DLM_LOCK_EX: if (!lockres->l_ex_holders && !lockres->l_ro_holders) kick = 1; break; case DLM_LOCK_PR: if (!lockres->l_ex_holders) kick = 1; break; default: BUG(); } } if (kick) ocfs2_wake_downconvert_thread(osb); } #define OCFS2_SEC_BITS 34 #define OCFS2_SEC_SHIFT (64 - 34) #define OCFS2_NSEC_MASK ((1ULL << OCFS2_SEC_SHIFT) - 1) /* LVB only has room for 64 bits of time here so we pack it for * now. */ static u64 ocfs2_pack_timespec(struct timespec *spec) { u64 res; u64 sec = spec->tv_sec; u32 nsec = spec->tv_nsec; res = (sec << OCFS2_SEC_SHIFT) | (nsec & OCFS2_NSEC_MASK); return res; } /* Call this with the lockres locked. I am reasonably sure we don't * need ip_lock in this function as anyone who would be changing those * values is supposed to be blocked in ocfs2_inode_lock right now. */ static void __ocfs2_stuff_meta_lvb(struct inode *inode) { struct ocfs2_inode_info *oi = OCFS2_I(inode); struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres; struct ocfs2_meta_lvb *lvb; lvb = ocfs2_dlm_lvb(&lockres->l_lksb); /* * Invalidate the LVB of a deleted inode - this way other * nodes are forced to go to disk and discover the new inode * status. */ if (oi->ip_flags & OCFS2_INODE_DELETED) { lvb->lvb_version = 0; goto out; } lvb->lvb_version = OCFS2_LVB_VERSION; lvb->lvb_isize = cpu_to_be64(i_size_read(inode)); lvb->lvb_iclusters = cpu_to_be32(oi->ip_clusters); lvb->lvb_iuid = cpu_to_be32(inode->i_uid); lvb->lvb_igid = cpu_to_be32(inode->i_gid); lvb->lvb_imode = cpu_to_be16(inode->i_mode); lvb->lvb_inlink = cpu_to_be16(inode->i_nlink); lvb->lvb_iatime_packed = cpu_to_be64(ocfs2_pack_timespec(&inode->i_atime)); lvb->lvb_ictime_packed = cpu_to_be64(ocfs2_pack_timespec(&inode->i_ctime)); lvb->lvb_imtime_packed = cpu_to_be64(ocfs2_pack_timespec(&inode->i_mtime)); lvb->lvb_iattr = cpu_to_be32(oi->ip_attr); lvb->lvb_idynfeatures = cpu_to_be16(oi->ip_dyn_features); lvb->lvb_igeneration = cpu_to_be32(inode->i_generation); out: mlog_meta_lvb(0, lockres); } static void ocfs2_unpack_timespec(struct timespec *spec, u64 packed_time) { spec->tv_sec = packed_time >> OCFS2_SEC_SHIFT; spec->tv_nsec = packed_time & OCFS2_NSEC_MASK; } static void ocfs2_refresh_inode_from_lvb(struct inode *inode) { struct ocfs2_inode_info *oi = OCFS2_I(inode); struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres; struct ocfs2_meta_lvb *lvb; mlog_meta_lvb(0, lockres); lvb = ocfs2_dlm_lvb(&lockres->l_lksb); /* We're safe here without the lockres lock... */ spin_lock(&oi->ip_lock); oi->ip_clusters = be32_to_cpu(lvb->lvb_iclusters); i_size_write(inode, be64_to_cpu(lvb->lvb_isize)); oi->ip_attr = be32_to_cpu(lvb->lvb_iattr); oi->ip_dyn_features = be16_to_cpu(lvb->lvb_idynfeatures); ocfs2_set_inode_flags(inode); /* fast-symlinks are a special case */ if (S_ISLNK(inode->i_mode) && !oi->ip_clusters) inode->i_blocks = 0; else inode->i_blocks = ocfs2_inode_sector_count(inode); inode->i_uid = be32_to_cpu(lvb->lvb_iuid); inode->i_gid = be32_to_cpu(lvb->lvb_igid); inode->i_mode = be16_to_cpu(lvb->lvb_imode); inode->i_nlink = be16_to_cpu(lvb->lvb_inlink); ocfs2_unpack_timespec(&inode->i_atime, be64_to_cpu(lvb->lvb_iatime_packed)); ocfs2_unpack_timespec(&inode->i_mtime, be64_to_cpu(lvb->lvb_imtime_packed)); ocfs2_unpack_timespec(&inode->i_ctime, be64_to_cpu(lvb->lvb_ictime_packed)); spin_unlock(&oi->ip_lock); } static inline int ocfs2_meta_lvb_is_trustable(struct inode *inode, struct ocfs2_lock_res *lockres) { struct ocfs2_meta_lvb *lvb = ocfs2_dlm_lvb(&lockres->l_lksb); if (ocfs2_dlm_lvb_valid(&lockres->l_lksb) && lvb->lvb_version == OCFS2_LVB_VERSION && be32_to_cpu(lvb->lvb_igeneration) == inode->i_generation) return 1; return 0; } /* Determine whether a lock resource needs to be refreshed, and * arbitrate who gets to refresh it. * * 0 means no refresh needed. * * > 0 means you need to refresh this and you MUST call * ocfs2_complete_lock_res_refresh afterwards. */ static int ocfs2_should_refresh_lock_res(struct ocfs2_lock_res *lockres) { unsigned long flags; int status = 0; refresh_check: spin_lock_irqsave(&lockres->l_lock, flags); if (!(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH)) { spin_unlock_irqrestore(&lockres->l_lock, flags); goto bail; } if (lockres->l_flags & OCFS2_LOCK_REFRESHING) { spin_unlock_irqrestore(&lockres->l_lock, flags); ocfs2_wait_on_refreshing_lock(lockres); goto refresh_check; } /* Ok, I'll be the one to refresh this lock. */ lockres_or_flags(lockres, OCFS2_LOCK_REFRESHING); spin_unlock_irqrestore(&lockres->l_lock, flags); status = 1; bail: mlog(0, "status %d\n", status); return status; } /* If status is non zero, I'll mark it as not being in refresh * anymroe, but i won't clear the needs refresh flag. */ static inline void ocfs2_complete_lock_res_refresh(struct ocfs2_lock_res *lockres, int status) { unsigned long flags; spin_lock_irqsave(&lockres->l_lock, flags); lockres_clear_flags(lockres, OCFS2_LOCK_REFRESHING); if (!status) lockres_clear_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH); spin_unlock_irqrestore(&lockres->l_lock, flags); wake_up(&lockres->l_event); } /* may or may not return a bh if it went to disk. */ static int ocfs2_inode_lock_update(struct inode *inode, struct buffer_head **bh) { int status = 0; struct ocfs2_inode_info *oi = OCFS2_I(inode); struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres; struct ocfs2_dinode *fe; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); if (ocfs2_mount_local(osb)) goto bail; spin_lock(&oi->ip_lock); if (oi->ip_flags & OCFS2_INODE_DELETED) { mlog(0, "Orphaned inode %llu was deleted while we " "were waiting on a lock. ip_flags = 0x%x\n", (unsigned long long)oi->ip_blkno, oi->ip_flags); spin_unlock(&oi->ip_lock); status = -ENOENT; goto bail; } spin_unlock(&oi->ip_lock); if (!ocfs2_should_refresh_lock_res(lockres)) goto bail; /* This will discard any caching information we might have had * for the inode metadata. */ ocfs2_metadata_cache_purge(INODE_CACHE(inode)); ocfs2_extent_map_trunc(inode, 0); if (ocfs2_meta_lvb_is_trustable(inode, lockres)) { mlog(0, "Trusting LVB on inode %llu\n", (unsigned long long)oi->ip_blkno); ocfs2_refresh_inode_from_lvb(inode); } else { /* Boo, we have to go to disk. */ /* read bh, cast, ocfs2_refresh_inode */ status = ocfs2_read_inode_block(inode, bh); if (status < 0) { mlog_errno(status); goto bail_refresh; } fe = (struct ocfs2_dinode *) (*bh)->b_data; /* This is a good chance to make sure we're not * locking an invalid object. ocfs2_read_inode_block() * already checked that the inode block is sane. * * We bug on a stale inode here because we checked * above whether it was wiped from disk. The wiping * node provides a guarantee that we receive that * message and can mark the inode before dropping any * locks associated with it. */ mlog_bug_on_msg(inode->i_generation != le32_to_cpu(fe->i_generation), "Invalid dinode %llu disk generation: %u " "inode->i_generation: %u\n", (unsigned long long)oi->ip_blkno, le32_to_cpu(fe->i_generation), inode->i_generation); mlog_bug_on_msg(le64_to_cpu(fe->i_dtime) || !(fe->i_flags & cpu_to_le32(OCFS2_VALID_FL)), "Stale dinode %llu dtime: %llu flags: 0x%x\n", (unsigned long long)oi->ip_blkno, (unsigned long long)le64_to_cpu(fe->i_dtime), le32_to_cpu(fe->i_flags)); ocfs2_refresh_inode(inode, fe); ocfs2_track_lock_refresh(lockres); } status = 0; bail_refresh: ocfs2_complete_lock_res_refresh(lockres, status); bail: return status; } static int ocfs2_assign_bh(struct inode *inode, struct buffer_head **ret_bh, struct buffer_head *passed_bh) { int status; if (passed_bh) { /* Ok, the update went to disk for us, use the * returned bh. */ *ret_bh = passed_bh; get_bh(*ret_bh); return 0; } status = ocfs2_read_inode_block(inode, ret_bh); if (status < 0) mlog_errno(status); return status; } /* * returns < 0 error if the callback will never be called, otherwise * the result of the lock will be communicated via the callback. */ int ocfs2_inode_lock_full_nested(struct inode *inode, struct buffer_head **ret_bh, int ex, int arg_flags, int subclass) { int status, level, acquired; u32 dlm_flags; struct ocfs2_lock_res *lockres = NULL; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct buffer_head *local_bh = NULL; BUG_ON(!inode); mlog(0, "inode %llu, take %s META lock\n", (unsigned long long)OCFS2_I(inode)->ip_blkno, ex ? "EXMODE" : "PRMODE"); status = 0; acquired = 0; /* We'll allow faking a readonly metadata lock for * rodevices. */ if (ocfs2_is_hard_readonly(osb)) { if (ex) status = -EROFS; goto bail; } if (ocfs2_mount_local(osb)) goto local; if (!(arg_flags & OCFS2_META_LOCK_RECOVERY)) ocfs2_wait_for_recovery(osb); lockres = &OCFS2_I(inode)->ip_inode_lockres; level = ex ? DLM_LOCK_EX : DLM_LOCK_PR; dlm_flags = 0; if (arg_flags & OCFS2_META_LOCK_NOQUEUE) dlm_flags |= DLM_LKF_NOQUEUE; status = __ocfs2_cluster_lock(osb, lockres, level, dlm_flags, arg_flags, subclass, _RET_IP_); if (status < 0) { if (status != -EAGAIN && status != -EIOCBRETRY) mlog_errno(status); goto bail; } /* Notify the error cleanup path to drop the cluster lock. */ acquired = 1; /* We wait twice because a node may have died while we were in * the lower dlm layers. The second time though, we've * committed to owning this lock so we don't allow signals to * abort the operation. */ if (!(arg_flags & OCFS2_META_LOCK_RECOVERY)) ocfs2_wait_for_recovery(osb); local: /* * We only see this flag if we're being called from * ocfs2_read_locked_inode(). It means we're locking an inode * which hasn't been populated yet, so clear the refresh flag * and let the caller handle it. */ if (inode->i_state & I_NEW) { status = 0; if (lockres) ocfs2_complete_lock_res_refresh(lockres, 0); goto bail; } /* This is fun. The caller may want a bh back, or it may * not. ocfs2_inode_lock_update definitely wants one in, but * may or may not read one, depending on what's in the * LVB. The result of all of this is that we've *only* gone to * disk if we have to, so the complexity is worthwhile. */ status = ocfs2_inode_lock_update(inode, &local_bh); if (status < 0) { if (status != -ENOENT) mlog_errno(status); goto bail; } if (ret_bh) { status = ocfs2_assign_bh(inode, ret_bh, local_bh); if (status < 0) { mlog_errno(status); goto bail; } } bail: if (status < 0) { if (ret_bh && (*ret_bh)) { brelse(*ret_bh); *ret_bh = NULL; } if (acquired) ocfs2_inode_unlock(inode, ex); } if (local_bh) brelse(local_bh); return status; } /* * This is working around a lock inversion between tasks acquiring DLM * locks while holding a page lock and the downconvert thread which * blocks dlm lock acquiry while acquiring page locks. * * ** These _with_page variantes are only intended to be called from aop * methods that hold page locks and return a very specific *positive* error * code that aop methods pass up to the VFS -- test for errors with != 0. ** * * The DLM is called such that it returns -EAGAIN if it would have * blocked waiting for the downconvert thread. In that case we unlock * our page so the downconvert thread can make progress. Once we've * done this we have to return AOP_TRUNCATED_PAGE so the aop method * that called us can bubble that back up into the VFS who will then * immediately retry the aop call. * * We do a blocking lock and immediate unlock before returning, though, so that * the lock has a great chance of being cached on this node by the time the VFS * calls back to retry the aop. This has a potential to livelock as nodes * ping locks back and forth, but that's a risk we're willing to take to avoid * the lock inversion simply. */ int ocfs2_inode_lock_with_page(struct inode *inode, struct buffer_head **ret_bh, int ex, struct page *page) { int ret; ret = ocfs2_inode_lock_full(inode, ret_bh, ex, OCFS2_LOCK_NONBLOCK); if (ret == -EAGAIN) { unlock_page(page); if (ocfs2_inode_lock(inode, ret_bh, ex) == 0) ocfs2_inode_unlock(inode, ex); ret = AOP_TRUNCATED_PAGE; } return ret; } int ocfs2_inode_lock_atime(struct inode *inode, struct vfsmount *vfsmnt, int *level) { int ret; ret = ocfs2_inode_lock(inode, NULL, 0); if (ret < 0) { mlog_errno(ret); return ret; } /* * If we should update atime, we will get EX lock, * otherwise we just get PR lock. */ if (ocfs2_should_update_atime(inode, vfsmnt)) { struct buffer_head *bh = NULL; ocfs2_inode_unlock(inode, 0); ret = ocfs2_inode_lock(inode, &bh, 1); if (ret < 0) { mlog_errno(ret); return ret; } *level = 1; if (ocfs2_should_update_atime(inode, vfsmnt)) ocfs2_update_inode_atime(inode, bh); if (bh) brelse(bh); } else *level = 0; return ret; } void ocfs2_inode_unlock(struct inode *inode, int ex) { int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR; struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_inode_lockres; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); mlog(0, "inode %llu drop %s META lock\n", (unsigned long long)OCFS2_I(inode)->ip_blkno, ex ? "EXMODE" : "PRMODE"); if (!ocfs2_is_hard_readonly(OCFS2_SB(inode->i_sb)) && !ocfs2_mount_local(osb)) ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level); } int ocfs2_orphan_scan_lock(struct ocfs2_super *osb, u32 *seqno) { struct ocfs2_lock_res *lockres; struct ocfs2_orphan_scan_lvb *lvb; int status = 0; if (ocfs2_is_hard_readonly(osb)) return -EROFS; if (ocfs2_mount_local(osb)) return 0; lockres = &osb->osb_orphan_scan.os_lockres; status = ocfs2_cluster_lock(osb, lockres, DLM_LOCK_EX, 0, 0); if (status < 0) return status; lvb = ocfs2_dlm_lvb(&lockres->l_lksb); if (ocfs2_dlm_lvb_valid(&lockres->l_lksb) && lvb->lvb_version == OCFS2_ORPHAN_LVB_VERSION) *seqno = be32_to_cpu(lvb->lvb_os_seqno); else *seqno = osb->osb_orphan_scan.os_seqno + 1; return status; } void ocfs2_orphan_scan_unlock(struct ocfs2_super *osb, u32 seqno) { struct ocfs2_lock_res *lockres; struct ocfs2_orphan_scan_lvb *lvb; if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb)) { lockres = &osb->osb_orphan_scan.os_lockres; lvb = ocfs2_dlm_lvb(&lockres->l_lksb); lvb->lvb_version = OCFS2_ORPHAN_LVB_VERSION; lvb->lvb_os_seqno = cpu_to_be32(seqno); ocfs2_cluster_unlock(osb, lockres, DLM_LOCK_EX); } } int ocfs2_super_lock(struct ocfs2_super *osb, int ex) { int status = 0; int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR; struct ocfs2_lock_res *lockres = &osb->osb_super_lockres; if (ocfs2_is_hard_readonly(osb)) return -EROFS; if (ocfs2_mount_local(osb)) goto bail; status = ocfs2_cluster_lock(osb, lockres, level, 0, 0); if (status < 0) { mlog_errno(status); goto bail; } /* The super block lock path is really in the best position to * know when resources covered by the lock need to be * refreshed, so we do it here. Of course, making sense of * everything is up to the caller :) */ status = ocfs2_should_refresh_lock_res(lockres); if (status < 0) { mlog_errno(status); goto bail; } if (status) { status = ocfs2_refresh_slot_info(osb); ocfs2_complete_lock_res_refresh(lockres, status); if (status < 0) mlog_errno(status); ocfs2_track_lock_refresh(lockres); } bail: return status; } void ocfs2_super_unlock(struct ocfs2_super *osb, int ex) { int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR; struct ocfs2_lock_res *lockres = &osb->osb_super_lockres; if (!ocfs2_mount_local(osb)) ocfs2_cluster_unlock(osb, lockres, level); } int ocfs2_rename_lock(struct ocfs2_super *osb) { int status; struct ocfs2_lock_res *lockres = &osb->osb_rename_lockres; if (ocfs2_is_hard_readonly(osb)) return -EROFS; if (ocfs2_mount_local(osb)) return 0; status = ocfs2_cluster_lock(osb, lockres, DLM_LOCK_EX, 0, 0); if (status < 0) mlog_errno(status); return status; } void ocfs2_rename_unlock(struct ocfs2_super *osb) { struct ocfs2_lock_res *lockres = &osb->osb_rename_lockres; if (!ocfs2_mount_local(osb)) ocfs2_cluster_unlock(osb, lockres, DLM_LOCK_EX); } int ocfs2_nfs_sync_lock(struct ocfs2_super *osb, int ex) { int status; struct ocfs2_lock_res *lockres = &osb->osb_nfs_sync_lockres; if (ocfs2_is_hard_readonly(osb)) return -EROFS; if (ocfs2_mount_local(osb)) return 0; status = ocfs2_cluster_lock(osb, lockres, ex ? LKM_EXMODE : LKM_PRMODE, 0, 0); if (status < 0) mlog(ML_ERROR, "lock on nfs sync lock failed %d\n", status); return status; } void ocfs2_nfs_sync_unlock(struct ocfs2_super *osb, int ex) { struct ocfs2_lock_res *lockres = &osb->osb_nfs_sync_lockres; if (!ocfs2_mount_local(osb)) ocfs2_cluster_unlock(osb, lockres, ex ? LKM_EXMODE : LKM_PRMODE); } int ocfs2_dentry_lock(struct dentry *dentry, int ex) { int ret; int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR; struct ocfs2_dentry_lock *dl = dentry->d_fsdata; struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb); BUG_ON(!dl); if (ocfs2_is_hard_readonly(osb)) return -EROFS; if (ocfs2_mount_local(osb)) return 0; ret = ocfs2_cluster_lock(osb, &dl->dl_lockres, level, 0, 0); if (ret < 0) mlog_errno(ret); return ret; } void ocfs2_dentry_unlock(struct dentry *dentry, int ex) { int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR; struct ocfs2_dentry_lock *dl = dentry->d_fsdata; struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb); if (!ocfs2_mount_local(osb)) ocfs2_cluster_unlock(osb, &dl->dl_lockres, level); } /* Reference counting of the dlm debug structure. We want this because * open references on the debug inodes can live on after a mount, so * we can't rely on the ocfs2_super to always exist. */ static void ocfs2_dlm_debug_free(struct kref *kref) { struct ocfs2_dlm_debug *dlm_debug; dlm_debug = container_of(kref, struct ocfs2_dlm_debug, d_refcnt); kfree(dlm_debug); } void ocfs2_put_dlm_debug(struct ocfs2_dlm_debug *dlm_debug) { if (dlm_debug) kref_put(&dlm_debug->d_refcnt, ocfs2_dlm_debug_free); } static void ocfs2_get_dlm_debug(struct ocfs2_dlm_debug *debug) { kref_get(&debug->d_refcnt); } struct ocfs2_dlm_debug *ocfs2_new_dlm_debug(void) { struct ocfs2_dlm_debug *dlm_debug; dlm_debug = kmalloc(sizeof(struct ocfs2_dlm_debug), GFP_KERNEL); if (!dlm_debug) { mlog_errno(-ENOMEM); goto out; } kref_init(&dlm_debug->d_refcnt); INIT_LIST_HEAD(&dlm_debug->d_lockres_tracking); dlm_debug->d_locking_state = NULL; out: return dlm_debug; } /* Access to this is arbitrated for us via seq_file->sem. */ struct ocfs2_dlm_seq_priv { struct ocfs2_dlm_debug *p_dlm_debug; struct ocfs2_lock_res p_iter_res; struct ocfs2_lock_res p_tmp_res; }; static struct ocfs2_lock_res *ocfs2_dlm_next_res(struct ocfs2_lock_res *start, struct ocfs2_dlm_seq_priv *priv) { struct ocfs2_lock_res *iter, *ret = NULL; struct ocfs2_dlm_debug *dlm_debug = priv->p_dlm_debug; assert_spin_locked(&ocfs2_dlm_tracking_lock); list_for_each_entry(iter, &start->l_debug_list, l_debug_list) { /* discover the head of the list */ if (&iter->l_debug_list == &dlm_debug->d_lockres_tracking) { mlog(0, "End of list found, %p\n", ret); break; } /* We track our "dummy" iteration lockres' by a NULL * l_ops field. */ if (iter->l_ops != NULL) { ret = iter; break; } } return ret; } static void *ocfs2_dlm_seq_start(struct seq_file *m, loff_t *pos) { struct ocfs2_dlm_seq_priv *priv = m->private; struct ocfs2_lock_res *iter; spin_lock(&ocfs2_dlm_tracking_lock); iter = ocfs2_dlm_next_res(&priv->p_iter_res, priv); if (iter) { /* Since lockres' have the lifetime of their container * (which can be inodes, ocfs2_supers, etc) we want to * copy this out to a temporary lockres while still * under the spinlock. Obviously after this we can't * trust any pointers on the copy returned, but that's * ok as the information we want isn't typically held * in them. */ priv->p_tmp_res = *iter; iter = &priv->p_tmp_res; } spin_unlock(&ocfs2_dlm_tracking_lock); return iter; } static void ocfs2_dlm_seq_stop(struct seq_file *m, void *v) { } static void *ocfs2_dlm_seq_next(struct seq_file *m, void *v, loff_t *pos) { struct ocfs2_dlm_seq_priv *priv = m->private; struct ocfs2_lock_res *iter = v; struct ocfs2_lock_res *dummy = &priv->p_iter_res; spin_lock(&ocfs2_dlm_tracking_lock); iter = ocfs2_dlm_next_res(iter, priv); list_del_init(&dummy->l_debug_list); if (iter) { list_add(&dummy->l_debug_list, &iter->l_debug_list); priv->p_tmp_res = *iter; iter = &priv->p_tmp_res; } spin_unlock(&ocfs2_dlm_tracking_lock); return iter; } /* * Version is used by debugfs.ocfs2 to determine the format being used * * New in version 2 * - Lock stats printed * New in version 3 * - Max time in lock stats is in usecs (instead of nsecs) */ #define OCFS2_DLM_DEBUG_STR_VERSION 3 static int ocfs2_dlm_seq_show(struct seq_file *m, void *v) { int i; char *lvb; struct ocfs2_lock_res *lockres = v; if (!lockres) return -EINVAL; seq_printf(m, "0x%x\t", OCFS2_DLM_DEBUG_STR_VERSION); if (lockres->l_type == OCFS2_LOCK_TYPE_DENTRY) seq_printf(m, "%.*s%08x\t", OCFS2_DENTRY_LOCK_INO_START - 1, lockres->l_name, (unsigned int)ocfs2_get_dentry_lock_ino(lockres)); else seq_printf(m, "%.*s\t", OCFS2_LOCK_ID_MAX_LEN, lockres->l_name); seq_printf(m, "%d\t" "0x%lx\t" "0x%x\t" "0x%x\t" "%u\t" "%u\t" "%d\t" "%d\t", lockres->l_level, lockres->l_flags, lockres->l_action, lockres->l_unlock_action, lockres->l_ro_holders, lockres->l_ex_holders, lockres->l_requested, lockres->l_blocking); /* Dump the raw LVB */ lvb = ocfs2_dlm_lvb(&lockres->l_lksb); for(i = 0; i < DLM_LVB_LEN; i++) seq_printf(m, "0x%x\t", lvb[i]); #ifdef CONFIG_OCFS2_FS_STATS # define lock_num_prmode(_l) ((_l)->l_lock_prmode.ls_gets) # define lock_num_exmode(_l) ((_l)->l_lock_exmode.ls_gets) # define lock_num_prmode_failed(_l) ((_l)->l_lock_prmode.ls_fail) # define lock_num_exmode_failed(_l) ((_l)->l_lock_exmode.ls_fail) # define lock_total_prmode(_l) ((_l)->l_lock_prmode.ls_total) # define lock_total_exmode(_l) ((_l)->l_lock_exmode.ls_total) # define lock_max_prmode(_l) ((_l)->l_lock_prmode.ls_max) # define lock_max_exmode(_l) ((_l)->l_lock_exmode.ls_max) # define lock_refresh(_l) ((_l)->l_lock_refresh) #else # define lock_num_prmode(_l) (0) # define lock_num_exmode(_l) (0) # define lock_num_prmode_failed(_l) (0) # define lock_num_exmode_failed(_l) (0) # define lock_total_prmode(_l) (0ULL) # define lock_total_exmode(_l) (0ULL) # define lock_max_prmode(_l) (0) # define lock_max_exmode(_l) (0) # define lock_refresh(_l) (0) #endif /* The following seq_print was added in version 2 of this output */ seq_printf(m, "%u\t" "%u\t" "%u\t" "%u\t" "%llu\t" "%llu\t" "%u\t" "%u\t" "%u\t", lock_num_prmode(lockres), lock_num_exmode(lockres), lock_num_prmode_failed(lockres), lock_num_exmode_failed(lockres), lock_total_prmode(lockres), lock_total_exmode(lockres), lock_max_prmode(lockres), lock_max_exmode(lockres), lock_refresh(lockres)); /* End the line */ seq_printf(m, "\n"); return 0; } static const struct seq_operations ocfs2_dlm_seq_ops = { .start = ocfs2_dlm_seq_start, .stop = ocfs2_dlm_seq_stop, .next = ocfs2_dlm_seq_next, .show = ocfs2_dlm_seq_show, }; static int ocfs2_dlm_debug_release(struct inode *inode, struct file *file) { struct seq_file *seq = file->private_data; struct ocfs2_dlm_seq_priv *priv = seq->private; struct ocfs2_lock_res *res = &priv->p_iter_res; ocfs2_remove_lockres_tracking(res); ocfs2_put_dlm_debug(priv->p_dlm_debug); return seq_release_private(inode, file); } static int ocfs2_dlm_debug_open(struct inode *inode, struct file *file) { int ret; struct ocfs2_dlm_seq_priv *priv; struct seq_file *seq; struct ocfs2_super *osb; priv = kzalloc(sizeof(struct ocfs2_dlm_seq_priv), GFP_KERNEL); if (!priv) { ret = -ENOMEM; mlog_errno(ret); goto out; } osb = inode->i_private; ocfs2_get_dlm_debug(osb->osb_dlm_debug); priv->p_dlm_debug = osb->osb_dlm_debug; INIT_LIST_HEAD(&priv->p_iter_res.l_debug_list); ret = seq_open(file, &ocfs2_dlm_seq_ops); if (ret) { kfree(priv); mlog_errno(ret); goto out; } seq = file->private_data; seq->private = priv; ocfs2_add_lockres_tracking(&priv->p_iter_res, priv->p_dlm_debug); out: return ret; } static const struct file_operations ocfs2_dlm_debug_fops = { .open = ocfs2_dlm_debug_open, .release = ocfs2_dlm_debug_release, .read = seq_read, .llseek = seq_lseek, }; static int ocfs2_dlm_init_debug(struct ocfs2_super *osb) { int ret = 0; struct ocfs2_dlm_debug *dlm_debug = osb->osb_dlm_debug; dlm_debug->d_locking_state = debugfs_create_file("locking_state", S_IFREG|S_IRUSR, osb->osb_debug_root, osb, &ocfs2_dlm_debug_fops); if (!dlm_debug->d_locking_state) { ret = -EINVAL; mlog(ML_ERROR, "Unable to create locking state debugfs file.\n"); goto out; } ocfs2_get_dlm_debug(dlm_debug); out: return ret; } static void ocfs2_dlm_shutdown_debug(struct ocfs2_super *osb) { struct ocfs2_dlm_debug *dlm_debug = osb->osb_dlm_debug; if (dlm_debug) { debugfs_remove(dlm_debug->d_locking_state); ocfs2_put_dlm_debug(dlm_debug); } } int ocfs2_dlm_init(struct ocfs2_super *osb) { int status = 0; struct ocfs2_cluster_connection *conn = NULL; if (ocfs2_mount_local(osb)) { osb->node_num = 0; goto local; } status = ocfs2_dlm_init_debug(osb); if (status < 0) { mlog_errno(status); goto bail; } /* launch downconvert thread */ osb->dc_task = kthread_run(ocfs2_downconvert_thread, osb, "ocfs2dc"); if (IS_ERR(osb->dc_task)) { status = PTR_ERR(osb->dc_task); osb->dc_task = NULL; mlog_errno(status); goto bail; } /* for now, uuid == domain */ status = ocfs2_cluster_connect(osb->osb_cluster_stack, osb->uuid_str, strlen(osb->uuid_str), &lproto, ocfs2_do_node_down, osb, &conn); if (status) { mlog_errno(status); goto bail; } status = ocfs2_cluster_this_node(&osb->node_num); if (status < 0) { mlog_errno(status); mlog(ML_ERROR, "could not find this host's node number\n"); ocfs2_cluster_disconnect(conn, 0); goto bail; } local: ocfs2_super_lock_res_init(&osb->osb_super_lockres, osb); ocfs2_rename_lock_res_init(&osb->osb_rename_lockres, osb); ocfs2_nfs_sync_lock_res_init(&osb->osb_nfs_sync_lockres, osb); ocfs2_orphan_scan_lock_res_init(&osb->osb_orphan_scan.os_lockres, osb); osb->cconn = conn; status = 0; bail: if (status < 0) { ocfs2_dlm_shutdown_debug(osb); if (osb->dc_task) kthread_stop(osb->dc_task); } return status; } void ocfs2_dlm_shutdown(struct ocfs2_super *osb, int hangup_pending) { ocfs2_drop_osb_locks(osb); /* * Now that we have dropped all locks and ocfs2_dismount_volume() * has disabled recovery, the DLM won't be talking to us. It's * safe to tear things down before disconnecting the cluster. */ if (osb->dc_task) { kthread_stop(osb->dc_task); osb->dc_task = NULL; } ocfs2_lock_res_free(&osb->osb_super_lockres); ocfs2_lock_res_free(&osb->osb_rename_lockres); ocfs2_lock_res_free(&osb->osb_nfs_sync_lockres); ocfs2_lock_res_free(&osb->osb_orphan_scan.os_lockres); ocfs2_cluster_disconnect(osb->cconn, hangup_pending); osb->cconn = NULL; ocfs2_dlm_shutdown_debug(osb); } static int ocfs2_drop_lock(struct ocfs2_super *osb, struct ocfs2_lock_res *lockres) { int ret; unsigned long flags; u32 lkm_flags = 0; /* We didn't get anywhere near actually using this lockres. */ if (!(lockres->l_flags & OCFS2_LOCK_INITIALIZED)) goto out; if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) lkm_flags |= DLM_LKF_VALBLK; spin_lock_irqsave(&lockres->l_lock, flags); mlog_bug_on_msg(!(lockres->l_flags & OCFS2_LOCK_FREEING), "lockres %s, flags 0x%lx\n", lockres->l_name, lockres->l_flags); while (lockres->l_flags & OCFS2_LOCK_BUSY) { mlog(0, "waiting on busy lock \"%s\": flags = %lx, action = " "%u, unlock_action = %u\n", lockres->l_name, lockres->l_flags, lockres->l_action, lockres->l_unlock_action); spin_unlock_irqrestore(&lockres->l_lock, flags); /* XXX: Today we just wait on any busy * locks... Perhaps we need to cancel converts in the * future? */ ocfs2_wait_on_busy_lock(lockres); spin_lock_irqsave(&lockres->l_lock, flags); } if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) { if (lockres->l_flags & OCFS2_LOCK_ATTACHED && lockres->l_level == DLM_LOCK_EX && !(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH)) lockres->l_ops->set_lvb(lockres); } if (lockres->l_flags & OCFS2_LOCK_BUSY) mlog(ML_ERROR, "destroying busy lock: \"%s\"\n", lockres->l_name); if (lockres->l_flags & OCFS2_LOCK_BLOCKED) mlog(0, "destroying blocked lock: \"%s\"\n", lockres->l_name); if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) { spin_unlock_irqrestore(&lockres->l_lock, flags); goto out; } lockres_clear_flags(lockres, OCFS2_LOCK_ATTACHED); /* make sure we never get here while waiting for an ast to * fire. */ BUG_ON(lockres->l_action != OCFS2_AST_INVALID); /* is this necessary? */ lockres_or_flags(lockres, OCFS2_LOCK_BUSY); lockres->l_unlock_action = OCFS2_UNLOCK_DROP_LOCK; spin_unlock_irqrestore(&lockres->l_lock, flags); mlog(0, "lock %s\n", lockres->l_name); ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb, lkm_flags); if (ret) { ocfs2_log_dlm_error("ocfs2_dlm_unlock", ret, lockres); mlog(ML_ERROR, "lockres flags: %lu\n", lockres->l_flags); ocfs2_dlm_dump_lksb(&lockres->l_lksb); BUG(); } mlog(0, "lock %s, successful return from ocfs2_dlm_unlock\n", lockres->l_name); ocfs2_wait_on_busy_lock(lockres); out: return 0; } /* Mark the lockres as being dropped. It will no longer be * queued if blocking, but we still may have to wait on it * being dequeued from the downconvert thread before we can consider * it safe to drop. * * You can *not* attempt to call cluster_lock on this lockres anymore. */ void ocfs2_mark_lockres_freeing(struct ocfs2_lock_res *lockres) { int status; struct ocfs2_mask_waiter mw; unsigned long flags; ocfs2_init_mask_waiter(&mw); spin_lock_irqsave(&lockres->l_lock, flags); lockres->l_flags |= OCFS2_LOCK_FREEING; while (lockres->l_flags & OCFS2_LOCK_QUEUED) { lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_QUEUED, 0); spin_unlock_irqrestore(&lockres->l_lock, flags); mlog(0, "Waiting on lockres %s\n", lockres->l_name); status = ocfs2_wait_for_mask(&mw); if (status) mlog_errno(status); spin_lock_irqsave(&lockres->l_lock, flags); } spin_unlock_irqrestore(&lockres->l_lock, flags); } void ocfs2_simple_drop_lockres(struct ocfs2_super *osb, struct ocfs2_lock_res *lockres) { int ret; ocfs2_mark_lockres_freeing(lockres); ret = ocfs2_drop_lock(osb, lockres); if (ret) mlog_errno(ret); } static void ocfs2_drop_osb_locks(struct ocfs2_super *osb) { ocfs2_simple_drop_lockres(osb, &osb->osb_super_lockres); ocfs2_simple_drop_lockres(osb, &osb->osb_rename_lockres); ocfs2_simple_drop_lockres(osb, &osb->osb_nfs_sync_lockres); ocfs2_simple_drop_lockres(osb, &osb->osb_orphan_scan.os_lockres); } int ocfs2_drop_inode_locks(struct inode *inode) { int status, err; /* No need to call ocfs2_mark_lockres_freeing here - * ocfs2_clear_inode has done it for us. */ err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb), &OCFS2_I(inode)->ip_open_lockres); if (err < 0) mlog_errno(err); status = err; err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb), &OCFS2_I(inode)->ip_inode_lockres); if (err < 0) mlog_errno(err); if (err < 0 && !status) status = err; err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb), &OCFS2_I(inode)->ip_rw_lockres); if (err < 0) mlog_errno(err); if (err < 0 && !status) status = err; return status; } static unsigned int ocfs2_prepare_downconvert(struct ocfs2_lock_res *lockres, int new_level) { assert_spin_locked(&lockres->l_lock); BUG_ON(lockres->l_blocking <= DLM_LOCK_NL); if (lockres->l_level <= new_level) { mlog(ML_ERROR, "lockres %s, lvl %d <= %d, blcklst %d, mask %d, " "type %d, flags 0x%lx, hold %d %d, act %d %d, req %d, " "block %d, pgen %d\n", lockres->l_name, lockres->l_level, new_level, list_empty(&lockres->l_blocked_list), list_empty(&lockres->l_mask_waiters), lockres->l_type, lockres->l_flags, lockres->l_ro_holders, lockres->l_ex_holders, lockres->l_action, lockres->l_unlock_action, lockres->l_requested, lockres->l_blocking, lockres->l_pending_gen); BUG(); } mlog(ML_BASTS, "lockres %s, level %d => %d, blocking %d\n", lockres->l_name, lockres->l_level, new_level, lockres->l_blocking); lockres->l_action = OCFS2_AST_DOWNCONVERT; lockres->l_requested = new_level; lockres_or_flags(lockres, OCFS2_LOCK_BUSY); return lockres_set_pending(lockres); } static int ocfs2_downconvert_lock(struct ocfs2_super *osb, struct ocfs2_lock_res *lockres, int new_level, int lvb, unsigned int generation) { int ret; u32 dlm_flags = DLM_LKF_CONVERT; mlog(ML_BASTS, "lockres %s, level %d => %d\n", lockres->l_name, lockres->l_level, new_level); if (lvb) dlm_flags |= DLM_LKF_VALBLK; ret = ocfs2_dlm_lock(osb->cconn, new_level, &lockres->l_lksb, dlm_flags, lockres->l_name, OCFS2_LOCK_ID_MAX_LEN - 1); lockres_clear_pending(lockres, generation, osb); if (ret) { ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres); ocfs2_recover_from_dlm_error(lockres, 1); goto bail; } ret = 0; bail: return ret; } /* returns 1 when the caller should unlock and call ocfs2_dlm_unlock */ static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb, struct ocfs2_lock_res *lockres) { assert_spin_locked(&lockres->l_lock); if (lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT) { /* If we're already trying to cancel a lock conversion * then just drop the spinlock and allow the caller to * requeue this lock. */ mlog(ML_BASTS, "lockres %s, skip convert\n", lockres->l_name); return 0; } /* were we in a convert when we got the bast fire? */ BUG_ON(lockres->l_action != OCFS2_AST_CONVERT && lockres->l_action != OCFS2_AST_DOWNCONVERT); /* set things up for the unlockast to know to just * clear out the ast_action and unset busy, etc. */ lockres->l_unlock_action = OCFS2_UNLOCK_CANCEL_CONVERT; mlog_bug_on_msg(!(lockres->l_flags & OCFS2_LOCK_BUSY), "lock %s, invalid flags: 0x%lx\n", lockres->l_name, lockres->l_flags); mlog(ML_BASTS, "lockres %s\n", lockres->l_name); return 1; } static int ocfs2_cancel_convert(struct ocfs2_super *osb, struct ocfs2_lock_res *lockres) { int ret; ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb, DLM_LKF_CANCEL); if (ret) { ocfs2_log_dlm_error("ocfs2_dlm_unlock", ret, lockres); ocfs2_recover_from_dlm_error(lockres, 0); } mlog(ML_BASTS, "lockres %s\n", lockres->l_name); return ret; } static int ocfs2_unblock_lock(struct ocfs2_super *osb, struct ocfs2_lock_res *lockres, struct ocfs2_unblock_ctl *ctl) { unsigned long flags; int blocking; int new_level; int level; int ret = 0; int set_lvb = 0; unsigned int gen; spin_lock_irqsave(&lockres->l_lock, flags); recheck: /* * Is it still blocking? If not, we have no more work to do. */ if (!(lockres->l_flags & OCFS2_LOCK_BLOCKED)) { BUG_ON(lockres->l_blocking != DLM_LOCK_NL); spin_unlock_irqrestore(&lockres->l_lock, flags); ret = 0; goto leave; } if (lockres->l_flags & OCFS2_LOCK_BUSY) { /* XXX * This is a *big* race. The OCFS2_LOCK_PENDING flag * exists entirely for one reason - another thread has set * OCFS2_LOCK_BUSY, but has *NOT* yet called dlm_lock(). * * If we do ocfs2_cancel_convert() before the other thread * calls dlm_lock(), our cancel will do nothing. We will * get no ast, and we will have no way of knowing the * cancel failed. Meanwhile, the other thread will call * into dlm_lock() and wait...forever. * * Why forever? Because another node has asked for the * lock first; that's why we're here in unblock_lock(). * * The solution is OCFS2_LOCK_PENDING. When PENDING is * set, we just requeue the unblock. Only when the other * thread has called dlm_lock() and cleared PENDING will * we then cancel their request. * * All callers of dlm_lock() must set OCFS2_DLM_PENDING * at the same time they set OCFS2_DLM_BUSY. They must * clear OCFS2_DLM_PENDING after dlm_lock() returns. */ if (lockres->l_flags & OCFS2_LOCK_PENDING) { mlog(ML_BASTS, "lockres %s, ReQ: Pending\n", lockres->l_name); goto leave_requeue; } ctl->requeue = 1; ret = ocfs2_prepare_cancel_convert(osb, lockres); spin_unlock_irqrestore(&lockres->l_lock, flags); if (ret) { ret = ocfs2_cancel_convert(osb, lockres); if (ret < 0) mlog_errno(ret); } goto leave; } /* * This prevents livelocks. OCFS2_LOCK_UPCONVERT_FINISHING flag is * set when the ast is received for an upconvert just before the * OCFS2_LOCK_BUSY flag is cleared. Now if the fs received a bast * on the heels of the ast, we want to delay the downconvert just * enough to allow the up requestor to do its task. Because this * lock is in the blocked queue, the lock will be downconverted * as soon as the requestor is done with the lock. */ if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING) goto leave_requeue; /* * How can we block and yet be at NL? We were trying to upconvert * from NL and got canceled. The code comes back here, and now * we notice and clear BLOCKING. */ if (lockres->l_level == DLM_LOCK_NL) { BUG_ON(lockres->l_ex_holders || lockres->l_ro_holders); mlog(ML_BASTS, "lockres %s, Aborting dc\n", lockres->l_name); lockres->l_blocking = DLM_LOCK_NL; lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED); spin_unlock_irqrestore(&lockres->l_lock, flags); goto leave; } /* if we're blocking an exclusive and we have *any* holders, * then requeue. */ if ((lockres->l_blocking == DLM_LOCK_EX) && (lockres->l_ex_holders || lockres->l_ro_holders)) { mlog(ML_BASTS, "lockres %s, ReQ: EX/PR Holders %u,%u\n", lockres->l_name, lockres->l_ex_holders, lockres->l_ro_holders); goto leave_requeue; } /* If it's a PR we're blocking, then only * requeue if we've got any EX holders */ if (lockres->l_blocking == DLM_LOCK_PR && lockres->l_ex_holders) { mlog(ML_BASTS, "lockres %s, ReQ: EX Holders %u\n", lockres->l_name, lockres->l_ex_holders); goto leave_requeue; } /* * Can we get a lock in this state if the holder counts are * zero? The meta data unblock code used to check this. */ if ((lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH) && (lockres->l_flags & OCFS2_LOCK_REFRESHING)) { mlog(ML_BASTS, "lockres %s, ReQ: Lock Refreshing\n", lockres->l_name); goto leave_requeue; } new_level = ocfs2_highest_compat_lock_level(lockres->l_blocking); if (lockres->l_ops->check_downconvert && !lockres->l_ops->check_downconvert(lockres, new_level)) { mlog(ML_BASTS, "lockres %s, ReQ: Checkpointing\n", lockres->l_name); goto leave_requeue; } /* If we get here, then we know that there are no more * incompatible holders (and anyone asking for an incompatible * lock is blocked). We can now downconvert the lock */ if (!lockres->l_ops->downconvert_worker) goto downconvert; /* Some lockres types want to do a bit of work before * downconverting a lock. Allow that here. The worker function * may sleep, so we save off a copy of what we're blocking as * it may change while we're not holding the spin lock. */ blocking = lockres->l_blocking; level = lockres->l_level; spin_unlock_irqrestore(&lockres->l_lock, flags); ctl->unblock_action = lockres->l_ops->downconvert_worker(lockres, blocking); if (ctl->unblock_action == UNBLOCK_STOP_POST) { mlog(ML_BASTS, "lockres %s, UNBLOCK_STOP_POST\n", lockres->l_name); goto leave; } spin_lock_irqsave(&lockres->l_lock, flags); if ((blocking != lockres->l_blocking) || (level != lockres->l_level)) { /* If this changed underneath us, then we can't drop * it just yet. */ mlog(ML_BASTS, "lockres %s, block=%d:%d, level=%d:%d, " "Recheck\n", lockres->l_name, blocking, lockres->l_blocking, level, lockres->l_level); goto recheck; } downconvert: ctl->requeue = 0; if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) { if (lockres->l_level == DLM_LOCK_EX) set_lvb = 1; /* * We only set the lvb if the lock has been fully * refreshed - otherwise we risk setting stale * data. Otherwise, there's no need to actually clear * out the lvb here as it's value is still valid. */ if (set_lvb && !(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH)) lockres->l_ops->set_lvb(lockres); } gen = ocfs2_prepare_downconvert(lockres, new_level); spin_unlock_irqrestore(&lockres->l_lock, flags); ret = ocfs2_downconvert_lock(osb, lockres, new_level, set_lvb, gen); leave: if (ret) mlog_errno(ret); return ret; leave_requeue: spin_unlock_irqrestore(&lockres->l_lock, flags); ctl->requeue = 1; return 0; } static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres, int blocking) { struct inode *inode; struct address_space *mapping; struct ocfs2_inode_info *oi; inode = ocfs2_lock_res_inode(lockres); mapping = inode->i_mapping; if (S_ISDIR(inode->i_mode)) { oi = OCFS2_I(inode); oi->ip_dir_lock_gen++; mlog(0, "generation: %u\n", oi->ip_dir_lock_gen); goto out; } if (!S_ISREG(inode->i_mode)) goto out; /* * We need this before the filemap_fdatawrite() so that it can * transfer the dirty bit from the PTE to the * page. Unfortunately this means that even for EX->PR * downconverts, we'll lose our mappings and have to build * them up again. */ unmap_mapping_range(mapping, 0, 0, 0); if (filemap_fdatawrite(mapping)) { mlog(ML_ERROR, "Could not sync inode %llu for downconvert!", (unsigned long long)OCFS2_I(inode)->ip_blkno); } sync_mapping_buffers(mapping); if (blocking == DLM_LOCK_EX) { truncate_inode_pages(mapping, 0); } else { /* We only need to wait on the I/O if we're not also * truncating pages because truncate_inode_pages waits * for us above. We don't truncate pages if we're * blocking anything < EXMODE because we want to keep * them around in that case. */ filemap_fdatawait(mapping); } out: return UNBLOCK_CONTINUE; } static int ocfs2_ci_checkpointed(struct ocfs2_caching_info *ci, struct ocfs2_lock_res *lockres, int new_level) { int checkpointed = ocfs2_ci_fully_checkpointed(ci); BUG_ON(new_level != DLM_LOCK_NL && new_level != DLM_LOCK_PR); BUG_ON(lockres->l_level != DLM_LOCK_EX && !checkpointed); if (checkpointed) return 1; ocfs2_start_checkpoint(OCFS2_SB(ocfs2_metadata_cache_get_super(ci))); return 0; } static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres, int new_level) { struct inode *inode = ocfs2_lock_res_inode(lockres); return ocfs2_ci_checkpointed(INODE_CACHE(inode), lockres, new_level); } static void ocfs2_set_meta_lvb(struct ocfs2_lock_res *lockres) { struct inode *inode = ocfs2_lock_res_inode(lockres); __ocfs2_stuff_meta_lvb(inode); } /* * Does the final reference drop on our dentry lock. Right now this * happens in the downconvert thread, but we could choose to simplify the * dlmglue API and push these off to the ocfs2_wq in the future. */ static void ocfs2_dentry_post_unlock(struct ocfs2_super *osb, struct ocfs2_lock_res *lockres) { struct ocfs2_dentry_lock *dl = ocfs2_lock_res_dl(lockres); ocfs2_dentry_lock_put(osb, dl); } /* * d_delete() matching dentries before the lock downconvert. * * At this point, any process waiting to destroy the * dentry_lock due to last ref count is stopped by the * OCFS2_LOCK_QUEUED flag. * * We have two potential problems * * 1) If we do the last reference drop on our dentry_lock (via dput) * we'll wind up in ocfs2_release_dentry_lock(), waiting on * the downconvert to finish. Instead we take an elevated * reference and push the drop until after we've completed our * unblock processing. * * 2) There might be another process with a final reference, * waiting on us to finish processing. If this is the case, we * detect it and exit out - there's no more dentries anyway. */ static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res *lockres, int blocking) { struct ocfs2_dentry_lock *dl = ocfs2_lock_res_dl(lockres); struct ocfs2_inode_info *oi = OCFS2_I(dl->dl_inode); struct dentry *dentry; unsigned long flags; int extra_ref = 0; /* * This node is blocking another node from getting a read * lock. This happens when we've renamed within a * directory. We've forced the other nodes to d_delete(), but * we never actually dropped our lock because it's still * valid. The downconvert code will retain a PR for this node, * so there's no further work to do. */ if (blocking == DLM_LOCK_PR) return UNBLOCK_CONTINUE; /* * Mark this inode as potentially orphaned. The code in * ocfs2_delete_inode() will figure out whether it actually * needs to be freed or not. */ spin_lock(&oi->ip_lock); oi->ip_flags |= OCFS2_INODE_MAYBE_ORPHANED; spin_unlock(&oi->ip_lock); /* * Yuck. We need to make sure however that the check of * OCFS2_LOCK_FREEING and the extra reference are atomic with * respect to a reference decrement or the setting of that * flag. */ spin_lock_irqsave(&lockres->l_lock, flags); spin_lock(&dentry_attach_lock); if (!(lockres->l_flags & OCFS2_LOCK_FREEING) && dl->dl_count) { dl->dl_count++; extra_ref = 1; } spin_unlock(&dentry_attach_lock); spin_unlock_irqrestore(&lockres->l_lock, flags); mlog(0, "extra_ref = %d\n", extra_ref); /* * We have a process waiting on us in ocfs2_dentry_iput(), * which means we can't have any more outstanding * aliases. There's no need to do any more work. */ if (!extra_ref) return UNBLOCK_CONTINUE; spin_lock(&dentry_attach_lock); while (1) { dentry = ocfs2_find_local_alias(dl->dl_inode, dl->dl_parent_blkno, 1); if (!dentry) break; spin_unlock(&dentry_attach_lock); mlog(0, "d_delete(%.*s);\n", dentry->d_name.len, dentry->d_name.name); /* * The following dcache calls may do an * iput(). Normally we don't want that from the * downconverting thread, but in this case it's ok * because the requesting node already has an * exclusive lock on the inode, so it can't be queued * for a downconvert. */ d_delete(dentry); dput(dentry); spin_lock(&dentry_attach_lock); } spin_unlock(&dentry_attach_lock); /* * If we are the last holder of this dentry lock, there is no * reason to downconvert so skip straight to the unlock. */ if (dl->dl_count == 1) return UNBLOCK_STOP_POST; return UNBLOCK_CONTINUE_POST; } static int ocfs2_check_refcount_downconvert(struct ocfs2_lock_res *lockres, int new_level) { struct ocfs2_refcount_tree *tree = ocfs2_lock_res_refcount_tree(lockres); return ocfs2_ci_checkpointed(&tree->rf_ci, lockres, new_level); } static int ocfs2_refcount_convert_worker(struct ocfs2_lock_res *lockres, int blocking) { struct ocfs2_refcount_tree *tree = ocfs2_lock_res_refcount_tree(lockres); ocfs2_metadata_cache_purge(&tree->rf_ci); return UNBLOCK_CONTINUE; } static void ocfs2_set_qinfo_lvb(struct ocfs2_lock_res *lockres) { struct ocfs2_qinfo_lvb *lvb; struct ocfs2_mem_dqinfo *oinfo = ocfs2_lock_res_qinfo(lockres); struct mem_dqinfo *info = sb_dqinfo(oinfo->dqi_gi.dqi_sb, oinfo->dqi_gi.dqi_type); lvb = ocfs2_dlm_lvb(&lockres->l_lksb); lvb->lvb_version = OCFS2_QINFO_LVB_VERSION; lvb->lvb_bgrace = cpu_to_be32(info->dqi_bgrace); lvb->lvb_igrace = cpu_to_be32(info->dqi_igrace); lvb->lvb_syncms = cpu_to_be32(oinfo->dqi_syncms); lvb->lvb_blocks = cpu_to_be32(oinfo->dqi_gi.dqi_blocks); lvb->lvb_free_blk = cpu_to_be32(oinfo->dqi_gi.dqi_free_blk); lvb->lvb_free_entry = cpu_to_be32(oinfo->dqi_gi.dqi_free_entry); } void ocfs2_qinfo_unlock(struct ocfs2_mem_dqinfo *oinfo, int ex) { struct ocfs2_lock_res *lockres = &oinfo->dqi_gqlock; struct ocfs2_super *osb = OCFS2_SB(oinfo->dqi_gi.dqi_sb); int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR; if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb)) ocfs2_cluster_unlock(osb, lockres, level); } static int ocfs2_refresh_qinfo(struct ocfs2_mem_dqinfo *oinfo) { struct mem_dqinfo *info = sb_dqinfo(oinfo->dqi_gi.dqi_sb, oinfo->dqi_gi.dqi_type); struct ocfs2_lock_res *lockres = &oinfo->dqi_gqlock; struct ocfs2_qinfo_lvb *lvb = ocfs2_dlm_lvb(&lockres->l_lksb); struct buffer_head *bh = NULL; struct ocfs2_global_disk_dqinfo *gdinfo; int status = 0; if (ocfs2_dlm_lvb_valid(&lockres->l_lksb) && lvb->lvb_version == OCFS2_QINFO_LVB_VERSION) { info->dqi_bgrace = be32_to_cpu(lvb->lvb_bgrace); info->dqi_igrace = be32_to_cpu(lvb->lvb_igrace); oinfo->dqi_syncms = be32_to_cpu(lvb->lvb_syncms); oinfo->dqi_gi.dqi_blocks = be32_to_cpu(lvb->lvb_blocks); oinfo->dqi_gi.dqi_free_blk = be32_to_cpu(lvb->lvb_free_blk); oinfo->dqi_gi.dqi_free_entry = be32_to_cpu(lvb->lvb_free_entry); } else { status = ocfs2_read_quota_phys_block(oinfo->dqi_gqinode, oinfo->dqi_giblk, &bh); if (status) { mlog_errno(status); goto bail; } gdinfo = (struct ocfs2_global_disk_dqinfo *) (bh->b_data + OCFS2_GLOBAL_INFO_OFF); info->dqi_bgrace = le32_to_cpu(gdinfo->dqi_bgrace); info->dqi_igrace = le32_to_cpu(gdinfo->dqi_igrace); oinfo->dqi_syncms = le32_to_cpu(gdinfo->dqi_syncms); oinfo->dqi_gi.dqi_blocks = le32_to_cpu(gdinfo->dqi_blocks); oinfo->dqi_gi.dqi_free_blk = le32_to_cpu(gdinfo->dqi_free_blk); oinfo->dqi_gi.dqi_free_entry = le32_to_cpu(gdinfo->dqi_free_entry); brelse(bh); ocfs2_track_lock_refresh(lockres); } bail: return status; } /* Lock quota info, this function expects at least shared lock on the quota file * so that we can safely refresh quota info from disk. */ int ocfs2_qinfo_lock(struct ocfs2_mem_dqinfo *oinfo, int ex) { struct ocfs2_lock_res *lockres = &oinfo->dqi_gqlock; struct ocfs2_super *osb = OCFS2_SB(oinfo->dqi_gi.dqi_sb); int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR; int status = 0; /* On RO devices, locking really isn't needed... */ if (ocfs2_is_hard_readonly(osb)) { if (ex) status = -EROFS; goto bail; } if (ocfs2_mount_local(osb)) goto bail; status = ocfs2_cluster_lock(osb, lockres, level, 0, 0); if (status < 0) { mlog_errno(status); goto bail; } if (!ocfs2_should_refresh_lock_res(lockres)) goto bail; /* OK, we have the lock but we need to refresh the quota info */ status = ocfs2_refresh_qinfo(oinfo); if (status) ocfs2_qinfo_unlock(oinfo, ex); ocfs2_complete_lock_res_refresh(lockres, status); bail: return status; } int ocfs2_refcount_lock(struct ocfs2_refcount_tree *ref_tree, int ex) { int status; int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR; struct ocfs2_lock_res *lockres = &ref_tree->rf_lockres; struct ocfs2_super *osb = lockres->l_priv; if (ocfs2_is_hard_readonly(osb)) return -EROFS; if (ocfs2_mount_local(osb)) return 0; status = ocfs2_cluster_lock(osb, lockres, level, 0, 0); if (status < 0) mlog_errno(status); return status; } void ocfs2_refcount_unlock(struct ocfs2_refcount_tree *ref_tree, int ex) { int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR; struct ocfs2_lock_res *lockres = &ref_tree->rf_lockres; struct ocfs2_super *osb = lockres->l_priv; if (!ocfs2_mount_local(osb)) ocfs2_cluster_unlock(osb, lockres, level); } static void ocfs2_process_blocked_lock(struct ocfs2_super *osb, struct ocfs2_lock_res *lockres) { int status; struct ocfs2_unblock_ctl ctl = {0, 0,}; unsigned long flags; /* Our reference to the lockres in this function can be * considered valid until we remove the OCFS2_LOCK_QUEUED * flag. */ BUG_ON(!lockres); BUG_ON(!lockres->l_ops); mlog(ML_BASTS, "lockres %s blocked\n", lockres->l_name); /* Detect whether a lock has been marked as going away while * the downconvert thread was processing other things. A lock can * still be marked with OCFS2_LOCK_FREEING after this check, * but short circuiting here will still save us some * performance. */ spin_lock_irqsave(&lockres->l_lock, flags); if (lockres->l_flags & OCFS2_LOCK_FREEING) goto unqueue; spin_unlock_irqrestore(&lockres->l_lock, flags); status = ocfs2_unblock_lock(osb, lockres, &ctl); if (status < 0) mlog_errno(status); spin_lock_irqsave(&lockres->l_lock, flags); unqueue: if (lockres->l_flags & OCFS2_LOCK_FREEING || !ctl.requeue) { lockres_clear_flags(lockres, OCFS2_LOCK_QUEUED); } else ocfs2_schedule_blocked_lock(osb, lockres); mlog(ML_BASTS, "lockres %s, requeue = %s.\n", lockres->l_name, ctl.requeue ? "yes" : "no"); spin_unlock_irqrestore(&lockres->l_lock, flags); if (ctl.unblock_action != UNBLOCK_CONTINUE && lockres->l_ops->post_unlock) lockres->l_ops->post_unlock(osb, lockres); } static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb, struct ocfs2_lock_res *lockres) { assert_spin_locked(&lockres->l_lock); if (lockres->l_flags & OCFS2_LOCK_FREEING) { /* Do not schedule a lock for downconvert when it's on * the way to destruction - any nodes wanting access * to the resource will get it soon. */ mlog(ML_BASTS, "lockres %s won't be scheduled: flags 0x%lx\n", lockres->l_name, lockres->l_flags); return; } lockres_or_flags(lockres, OCFS2_LOCK_QUEUED); spin_lock(&osb->dc_task_lock); if (list_empty(&lockres->l_blocked_list)) { list_add_tail(&lockres->l_blocked_list, &osb->blocked_lock_list); osb->blocked_lock_count++; } spin_unlock(&osb->dc_task_lock); } static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb) { unsigned long processed; struct ocfs2_lock_res *lockres; spin_lock(&osb->dc_task_lock); /* grab this early so we know to try again if a state change and * wake happens part-way through our work */ osb->dc_work_sequence = osb->dc_wake_sequence; processed = osb->blocked_lock_count; while (processed) { BUG_ON(list_empty(&osb->blocked_lock_list)); lockres = list_entry(osb->blocked_lock_list.next, struct ocfs2_lock_res, l_blocked_list); list_del_init(&lockres->l_blocked_list); osb->blocked_lock_count--; spin_unlock(&osb->dc_task_lock); BUG_ON(!processed); processed--; ocfs2_process_blocked_lock(osb, lockres); spin_lock(&osb->dc_task_lock); } spin_unlock(&osb->dc_task_lock); } static int ocfs2_downconvert_thread_lists_empty(struct ocfs2_super *osb) { int empty = 0; spin_lock(&osb->dc_task_lock); if (list_empty(&osb->blocked_lock_list)) empty = 1; spin_unlock(&osb->dc_task_lock); return empty; } static int ocfs2_downconvert_thread_should_wake(struct ocfs2_super *osb) { int should_wake = 0; spin_lock(&osb->dc_task_lock); if (osb->dc_work_sequence != osb->dc_wake_sequence) should_wake = 1; spin_unlock(&osb->dc_task_lock); return should_wake; } static int ocfs2_downconvert_thread(void *arg) { int status = 0; struct ocfs2_super *osb = arg; /* only quit once we've been asked to stop and there is no more * work available */ while (!(kthread_should_stop() && ocfs2_downconvert_thread_lists_empty(osb))) { wait_event_interruptible(osb->dc_event, ocfs2_downconvert_thread_should_wake(osb) || kthread_should_stop()); mlog(0, "downconvert_thread: awoken\n"); ocfs2_downconvert_thread_do_work(osb); } osb->dc_task = NULL; return status; } void ocfs2_wake_downconvert_thread(struct ocfs2_super *osb) { spin_lock(&osb->dc_task_lock); /* make sure the voting thread gets a swipe at whatever changes * the caller may have made to the voting state */ osb->dc_wake_sequence++; spin_unlock(&osb->dc_task_lock); wake_up(&osb->dc_event); }
gpl-2.0
Dee-UK/RK3288_Lollipop_Kernel
drivers/char/ipmi/ipmi_bt_sm.c
2465
20842
/* * ipmi_bt_sm.c * * The state machine for an Open IPMI BT sub-driver under ipmi_si.c, part * of the driver architecture at http://sourceforge.net/projects/openipmi * * Author: Rocky Craig <first.last@hp.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> /* For printk. */ #include <linux/string.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/ipmi_msgdefs.h> /* for completion codes */ #include "ipmi_si_sm.h" #define BT_DEBUG_OFF 0 /* Used in production */ #define BT_DEBUG_ENABLE 1 /* Generic messages */ #define BT_DEBUG_MSG 2 /* Prints all request/response buffers */ #define BT_DEBUG_STATES 4 /* Verbose look at state changes */ /* * BT_DEBUG_OFF must be zero to correspond to the default uninitialized * value */ static int bt_debug; /* 0 == BT_DEBUG_OFF */ module_param(bt_debug, int, 0644); MODULE_PARM_DESC(bt_debug, "debug bitmask, 1=enable, 2=messages, 4=states"); /* * Typical "Get BT Capabilities" values are 2-3 retries, 5-10 seconds, * and 64 byte buffers. However, one HP implementation wants 255 bytes of * buffer (with a documented message of 160 bytes) so go for the max. * Since the Open IPMI architecture is single-message oriented at this * stage, the queue depth of BT is of no concern. */ #define BT_NORMAL_TIMEOUT 5 /* seconds */ #define BT_NORMAL_RETRY_LIMIT 2 #define BT_RESET_DELAY 6 /* seconds after warm reset */ /* * States are written in chronological order and usually cover * multiple rows of the state table discussion in the IPMI spec. */ enum bt_states { BT_STATE_IDLE = 0, /* Order is critical in this list */ BT_STATE_XACTION_START, BT_STATE_WRITE_BYTES, BT_STATE_WRITE_CONSUME, BT_STATE_READ_WAIT, BT_STATE_CLEAR_B2H, BT_STATE_READ_BYTES, BT_STATE_RESET1, /* These must come last */ BT_STATE_RESET2, BT_STATE_RESET3, BT_STATE_RESTART, BT_STATE_PRINTME, BT_STATE_CAPABILITIES_BEGIN, BT_STATE_CAPABILITIES_END, BT_STATE_LONG_BUSY /* BT doesn't get hosed :-) */ }; /* * Macros seen at the end of state "case" blocks. They help with legibility * and debugging. */ #define BT_STATE_CHANGE(X, Y) { bt->state = X; return Y; } #define BT_SI_SM_RETURN(Y) { last_printed = BT_STATE_PRINTME; return Y; } struct si_sm_data { enum bt_states state; unsigned char seq; /* BT sequence number */ struct si_sm_io *io; unsigned char write_data[IPMI_MAX_MSG_LENGTH + 2]; /* +2 for memcpy */ int write_count; unsigned char read_data[IPMI_MAX_MSG_LENGTH + 2]; /* +2 for memcpy */ int read_count; int truncated; long timeout; /* microseconds countdown */ int error_retries; /* end of "common" fields */ int nonzero_status; /* hung BMCs stay all 0 */ enum bt_states complete; /* to divert the state machine */ int BT_CAP_outreqs; long BT_CAP_req2rsp; int BT_CAP_retries; /* Recommended retries */ }; #define BT_CLR_WR_PTR 0x01 /* See IPMI 1.5 table 11.6.4 */ #define BT_CLR_RD_PTR 0x02 #define BT_H2B_ATN 0x04 #define BT_B2H_ATN 0x08 #define BT_SMS_ATN 0x10 #define BT_OEM0 0x20 #define BT_H_BUSY 0x40 #define BT_B_BUSY 0x80 /* * Some bits are toggled on each write: write once to set it, once * more to clear it; writing a zero does nothing. To absolutely * clear it, check its state and write if set. This avoids the "get * current then use as mask" scheme to modify one bit. Note that the * variable "bt" is hardcoded into these macros. */ #define BT_STATUS bt->io->inputb(bt->io, 0) #define BT_CONTROL(x) bt->io->outputb(bt->io, 0, x) #define BMC2HOST bt->io->inputb(bt->io, 1) #define HOST2BMC(x) bt->io->outputb(bt->io, 1, x) #define BT_INTMASK_R bt->io->inputb(bt->io, 2) #define BT_INTMASK_W(x) bt->io->outputb(bt->io, 2, x) /* * Convenience routines for debugging. These are not multi-open safe! * Note the macros have hardcoded variables in them. */ static char *state2txt(unsigned char state) { switch (state) { case BT_STATE_IDLE: return("IDLE"); case BT_STATE_XACTION_START: return("XACTION"); case BT_STATE_WRITE_BYTES: return("WR_BYTES"); case BT_STATE_WRITE_CONSUME: return("WR_CONSUME"); case BT_STATE_READ_WAIT: return("RD_WAIT"); case BT_STATE_CLEAR_B2H: return("CLEAR_B2H"); case BT_STATE_READ_BYTES: return("RD_BYTES"); case BT_STATE_RESET1: return("RESET1"); case BT_STATE_RESET2: return("RESET2"); case BT_STATE_RESET3: return("RESET3"); case BT_STATE_RESTART: return("RESTART"); case BT_STATE_LONG_BUSY: return("LONG_BUSY"); case BT_STATE_CAPABILITIES_BEGIN: return("CAP_BEGIN"); case BT_STATE_CAPABILITIES_END: return("CAP_END"); } return("BAD STATE"); } #define STATE2TXT state2txt(bt->state) static char *status2txt(unsigned char status) { /* * This cannot be called by two threads at the same time and * the buffer is always consumed immediately, so the static is * safe to use. */ static char buf[40]; strcpy(buf, "[ "); if (status & BT_B_BUSY) strcat(buf, "B_BUSY "); if (status & BT_H_BUSY) strcat(buf, "H_BUSY "); if (status & BT_OEM0) strcat(buf, "OEM0 "); if (status & BT_SMS_ATN) strcat(buf, "SMS "); if (status & BT_B2H_ATN) strcat(buf, "B2H "); if (status & BT_H2B_ATN) strcat(buf, "H2B "); strcat(buf, "]"); return buf; } #define STATUS2TXT status2txt(status) /* called externally at insmod time, and internally on cleanup */ static unsigned int bt_init_data(struct si_sm_data *bt, struct si_sm_io *io) { memset(bt, 0, sizeof(struct si_sm_data)); if (bt->io != io) { /* external: one-time only things */ bt->io = io; bt->seq = 0; } bt->state = BT_STATE_IDLE; /* start here */ bt->complete = BT_STATE_IDLE; /* end here */ bt->BT_CAP_req2rsp = BT_NORMAL_TIMEOUT * 1000000; bt->BT_CAP_retries = BT_NORMAL_RETRY_LIMIT; /* BT_CAP_outreqs == zero is a flag to read BT Capabilities */ return 3; /* We claim 3 bytes of space; ought to check SPMI table */ } /* Jam a completion code (probably an error) into a response */ static void force_result(struct si_sm_data *bt, unsigned char completion_code) { bt->read_data[0] = 4; /* # following bytes */ bt->read_data[1] = bt->write_data[1] | 4; /* Odd NetFn/LUN */ bt->read_data[2] = bt->write_data[2]; /* seq (ignored) */ bt->read_data[3] = bt->write_data[3]; /* Command */ bt->read_data[4] = completion_code; bt->read_count = 5; } /* The upper state machine starts here */ static int bt_start_transaction(struct si_sm_data *bt, unsigned char *data, unsigned int size) { unsigned int i; if (size < 2) return IPMI_REQ_LEN_INVALID_ERR; if (size > IPMI_MAX_MSG_LENGTH) return IPMI_REQ_LEN_EXCEEDED_ERR; if (bt->state == BT_STATE_LONG_BUSY) return IPMI_NODE_BUSY_ERR; if (bt->state != BT_STATE_IDLE) return IPMI_NOT_IN_MY_STATE_ERR; if (bt_debug & BT_DEBUG_MSG) { printk(KERN_WARNING "BT: +++++++++++++++++ New command\n"); printk(KERN_WARNING "BT: NetFn/LUN CMD [%d data]:", size - 2); for (i = 0; i < size; i ++) printk(" %02x", data[i]); printk("\n"); } bt->write_data[0] = size + 1; /* all data plus seq byte */ bt->write_data[1] = *data; /* NetFn/LUN */ bt->write_data[2] = bt->seq++; memcpy(bt->write_data + 3, data + 1, size - 1); bt->write_count = size + 2; bt->error_retries = 0; bt->nonzero_status = 0; bt->truncated = 0; bt->state = BT_STATE_XACTION_START; bt->timeout = bt->BT_CAP_req2rsp; force_result(bt, IPMI_ERR_UNSPECIFIED); return 0; } /* * After the upper state machine has been told SI_SM_TRANSACTION_COMPLETE * it calls this. Strip out the length and seq bytes. */ static int bt_get_result(struct si_sm_data *bt, unsigned char *data, unsigned int length) { int i, msg_len; msg_len = bt->read_count - 2; /* account for length & seq */ if (msg_len < 3 || msg_len > IPMI_MAX_MSG_LENGTH) { force_result(bt, IPMI_ERR_UNSPECIFIED); msg_len = 3; } data[0] = bt->read_data[1]; data[1] = bt->read_data[3]; if (length < msg_len || bt->truncated) { data[2] = IPMI_ERR_MSG_TRUNCATED; msg_len = 3; } else memcpy(data + 2, bt->read_data + 4, msg_len - 2); if (bt_debug & BT_DEBUG_MSG) { printk(KERN_WARNING "BT: result %d bytes:", msg_len); for (i = 0; i < msg_len; i++) printk(" %02x", data[i]); printk("\n"); } return msg_len; } /* This bit's functionality is optional */ #define BT_BMC_HWRST 0x80 static void reset_flags(struct si_sm_data *bt) { if (bt_debug) printk(KERN_WARNING "IPMI BT: flag reset %s\n", status2txt(BT_STATUS)); if (BT_STATUS & BT_H_BUSY) BT_CONTROL(BT_H_BUSY); /* force clear */ BT_CONTROL(BT_CLR_WR_PTR); /* always reset */ BT_CONTROL(BT_SMS_ATN); /* always clear */ BT_INTMASK_W(BT_BMC_HWRST); } /* * Get rid of an unwanted/stale response. This should only be needed for * BMCs that support multiple outstanding requests. */ static void drain_BMC2HOST(struct si_sm_data *bt) { int i, size; if (!(BT_STATUS & BT_B2H_ATN)) /* Not signalling a response */ return; BT_CONTROL(BT_H_BUSY); /* now set */ BT_CONTROL(BT_B2H_ATN); /* always clear */ BT_STATUS; /* pause */ BT_CONTROL(BT_B2H_ATN); /* some BMCs are stubborn */ BT_CONTROL(BT_CLR_RD_PTR); /* always reset */ if (bt_debug) printk(KERN_WARNING "IPMI BT: stale response %s; ", status2txt(BT_STATUS)); size = BMC2HOST; for (i = 0; i < size ; i++) BMC2HOST; BT_CONTROL(BT_H_BUSY); /* now clear */ if (bt_debug) printk("drained %d bytes\n", size + 1); } static inline void write_all_bytes(struct si_sm_data *bt) { int i; if (bt_debug & BT_DEBUG_MSG) { printk(KERN_WARNING "BT: write %d bytes seq=0x%02X", bt->write_count, bt->seq); for (i = 0; i < bt->write_count; i++) printk(" %02x", bt->write_data[i]); printk("\n"); } for (i = 0; i < bt->write_count; i++) HOST2BMC(bt->write_data[i]); } static inline int read_all_bytes(struct si_sm_data *bt) { unsigned int i; /* * length is "framing info", minimum = 4: NetFn, Seq, Cmd, cCode. * Keep layout of first four bytes aligned with write_data[] */ bt->read_data[0] = BMC2HOST; bt->read_count = bt->read_data[0]; if (bt->read_count < 4 || bt->read_count >= IPMI_MAX_MSG_LENGTH) { if (bt_debug & BT_DEBUG_MSG) printk(KERN_WARNING "BT: bad raw rsp len=%d\n", bt->read_count); bt->truncated = 1; return 1; /* let next XACTION START clean it up */ } for (i = 1; i <= bt->read_count; i++) bt->read_data[i] = BMC2HOST; bt->read_count++; /* Account internally for length byte */ if (bt_debug & BT_DEBUG_MSG) { int max = bt->read_count; printk(KERN_WARNING "BT: got %d bytes seq=0x%02X", max, bt->read_data[2]); if (max > 16) max = 16; for (i = 0; i < max; i++) printk(KERN_CONT " %02x", bt->read_data[i]); printk(KERN_CONT "%s\n", bt->read_count == max ? "" : " ..."); } /* per the spec, the (NetFn[1], Seq[2], Cmd[3]) tuples must match */ if ((bt->read_data[3] == bt->write_data[3]) && (bt->read_data[2] == bt->write_data[2]) && ((bt->read_data[1] & 0xF8) == (bt->write_data[1] & 0xF8))) return 1; if (bt_debug & BT_DEBUG_MSG) printk(KERN_WARNING "IPMI BT: bad packet: " "want 0x(%02X, %02X, %02X) got (%02X, %02X, %02X)\n", bt->write_data[1] | 0x04, bt->write_data[2], bt->write_data[3], bt->read_data[1], bt->read_data[2], bt->read_data[3]); return 0; } /* Restart if retries are left, or return an error completion code */ static enum si_sm_result error_recovery(struct si_sm_data *bt, unsigned char status, unsigned char cCode) { char *reason; bt->timeout = bt->BT_CAP_req2rsp; switch (cCode) { case IPMI_TIMEOUT_ERR: reason = "timeout"; break; default: reason = "internal error"; break; } printk(KERN_WARNING "IPMI BT: %s in %s %s ", /* open-ended line */ reason, STATE2TXT, STATUS2TXT); /* * Per the IPMI spec, retries are based on the sequence number * known only to this module, so manage a restart here. */ (bt->error_retries)++; if (bt->error_retries < bt->BT_CAP_retries) { printk("%d retries left\n", bt->BT_CAP_retries - bt->error_retries); bt->state = BT_STATE_RESTART; return SI_SM_CALL_WITHOUT_DELAY; } printk(KERN_WARNING "failed %d retries, sending error response\n", bt->BT_CAP_retries); if (!bt->nonzero_status) printk(KERN_ERR "IPMI BT: stuck, try power cycle\n"); /* this is most likely during insmod */ else if (bt->seq <= (unsigned char)(bt->BT_CAP_retries & 0xFF)) { printk(KERN_WARNING "IPMI: BT reset (takes 5 secs)\n"); bt->state = BT_STATE_RESET1; return SI_SM_CALL_WITHOUT_DELAY; } /* * Concoct a useful error message, set up the next state, and * be done with this sequence. */ bt->state = BT_STATE_IDLE; switch (cCode) { case IPMI_TIMEOUT_ERR: if (status & BT_B_BUSY) { cCode = IPMI_NODE_BUSY_ERR; bt->state = BT_STATE_LONG_BUSY; } break; default: break; } force_result(bt, cCode); return SI_SM_TRANSACTION_COMPLETE; } /* Check status and (usually) take action and change this state machine. */ static enum si_sm_result bt_event(struct si_sm_data *bt, long time) { unsigned char status, BT_CAP[8]; static enum bt_states last_printed = BT_STATE_PRINTME; int i; status = BT_STATUS; bt->nonzero_status |= status; if ((bt_debug & BT_DEBUG_STATES) && (bt->state != last_printed)) { printk(KERN_WARNING "BT: %s %s TO=%ld - %ld \n", STATE2TXT, STATUS2TXT, bt->timeout, time); last_printed = bt->state; } /* * Commands that time out may still (eventually) provide a response. * This stale response will get in the way of a new response so remove * it if possible (hopefully during IDLE). Even if it comes up later * it will be rejected by its (now-forgotten) seq number. */ if ((bt->state < BT_STATE_WRITE_BYTES) && (status & BT_B2H_ATN)) { drain_BMC2HOST(bt); BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY); } if ((bt->state != BT_STATE_IDLE) && (bt->state < BT_STATE_PRINTME)) { /* check timeout */ bt->timeout -= time; if ((bt->timeout < 0) && (bt->state < BT_STATE_RESET1)) return error_recovery(bt, status, IPMI_TIMEOUT_ERR); } switch (bt->state) { /* * Idle state first checks for asynchronous messages from another * channel, then does some opportunistic housekeeping. */ case BT_STATE_IDLE: if (status & BT_SMS_ATN) { BT_CONTROL(BT_SMS_ATN); /* clear it */ return SI_SM_ATTN; } if (status & BT_H_BUSY) /* clear a leftover H_BUSY */ BT_CONTROL(BT_H_BUSY); /* Read BT capabilities if it hasn't been done yet */ if (!bt->BT_CAP_outreqs) BT_STATE_CHANGE(BT_STATE_CAPABILITIES_BEGIN, SI_SM_CALL_WITHOUT_DELAY); bt->timeout = bt->BT_CAP_req2rsp; BT_SI_SM_RETURN(SI_SM_IDLE); case BT_STATE_XACTION_START: if (status & (BT_B_BUSY | BT_H2B_ATN)) BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY); if (BT_STATUS & BT_H_BUSY) BT_CONTROL(BT_H_BUSY); /* force clear */ BT_STATE_CHANGE(BT_STATE_WRITE_BYTES, SI_SM_CALL_WITHOUT_DELAY); case BT_STATE_WRITE_BYTES: if (status & BT_H_BUSY) BT_CONTROL(BT_H_BUSY); /* clear */ BT_CONTROL(BT_CLR_WR_PTR); write_all_bytes(bt); BT_CONTROL(BT_H2B_ATN); /* can clear too fast to catch */ BT_STATE_CHANGE(BT_STATE_WRITE_CONSUME, SI_SM_CALL_WITHOUT_DELAY); case BT_STATE_WRITE_CONSUME: if (status & (BT_B_BUSY | BT_H2B_ATN)) BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY); BT_STATE_CHANGE(BT_STATE_READ_WAIT, SI_SM_CALL_WITHOUT_DELAY); /* Spinning hard can suppress B2H_ATN and force a timeout */ case BT_STATE_READ_WAIT: if (!(status & BT_B2H_ATN)) BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY); BT_CONTROL(BT_H_BUSY); /* set */ /* * Uncached, ordered writes should just proceed serially but * some BMCs don't clear B2H_ATN with one hit. Fast-path a * workaround without too much penalty to the general case. */ BT_CONTROL(BT_B2H_ATN); /* clear it to ACK the BMC */ BT_STATE_CHANGE(BT_STATE_CLEAR_B2H, SI_SM_CALL_WITHOUT_DELAY); case BT_STATE_CLEAR_B2H: if (status & BT_B2H_ATN) { /* keep hitting it */ BT_CONTROL(BT_B2H_ATN); BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY); } BT_STATE_CHANGE(BT_STATE_READ_BYTES, SI_SM_CALL_WITHOUT_DELAY); case BT_STATE_READ_BYTES: if (!(status & BT_H_BUSY)) /* check in case of retry */ BT_CONTROL(BT_H_BUSY); BT_CONTROL(BT_CLR_RD_PTR); /* start of BMC2HOST buffer */ i = read_all_bytes(bt); /* true == packet seq match */ BT_CONTROL(BT_H_BUSY); /* NOW clear */ if (!i) /* Not my message */ BT_STATE_CHANGE(BT_STATE_READ_WAIT, SI_SM_CALL_WITHOUT_DELAY); bt->state = bt->complete; return bt->state == BT_STATE_IDLE ? /* where to next? */ SI_SM_TRANSACTION_COMPLETE : /* normal */ SI_SM_CALL_WITHOUT_DELAY; /* Startup magic */ case BT_STATE_LONG_BUSY: /* For example: after FW update */ if (!(status & BT_B_BUSY)) { reset_flags(bt); /* next state is now IDLE */ bt_init_data(bt, bt->io); } return SI_SM_CALL_WITH_DELAY; /* No repeat printing */ case BT_STATE_RESET1: reset_flags(bt); drain_BMC2HOST(bt); BT_STATE_CHANGE(BT_STATE_RESET2, SI_SM_CALL_WITH_DELAY); case BT_STATE_RESET2: /* Send a soft reset */ BT_CONTROL(BT_CLR_WR_PTR); HOST2BMC(3); /* number of bytes following */ HOST2BMC(0x18); /* NetFn/LUN == Application, LUN 0 */ HOST2BMC(42); /* Sequence number */ HOST2BMC(3); /* Cmd == Soft reset */ BT_CONTROL(BT_H2B_ATN); bt->timeout = BT_RESET_DELAY * 1000000; BT_STATE_CHANGE(BT_STATE_RESET3, SI_SM_CALL_WITH_DELAY); case BT_STATE_RESET3: /* Hold off everything for a bit */ if (bt->timeout > 0) return SI_SM_CALL_WITH_DELAY; drain_BMC2HOST(bt); BT_STATE_CHANGE(BT_STATE_RESTART, SI_SM_CALL_WITH_DELAY); case BT_STATE_RESTART: /* don't reset retries or seq! */ bt->read_count = 0; bt->nonzero_status = 0; bt->timeout = bt->BT_CAP_req2rsp; BT_STATE_CHANGE(BT_STATE_XACTION_START, SI_SM_CALL_WITH_DELAY); /* * Get BT Capabilities, using timing of upper level state machine. * Set outreqs to prevent infinite loop on timeout. */ case BT_STATE_CAPABILITIES_BEGIN: bt->BT_CAP_outreqs = 1; { unsigned char GetBT_CAP[] = { 0x18, 0x36 }; bt->state = BT_STATE_IDLE; bt_start_transaction(bt, GetBT_CAP, sizeof(GetBT_CAP)); } bt->complete = BT_STATE_CAPABILITIES_END; BT_STATE_CHANGE(BT_STATE_XACTION_START, SI_SM_CALL_WITH_DELAY); case BT_STATE_CAPABILITIES_END: i = bt_get_result(bt, BT_CAP, sizeof(BT_CAP)); bt_init_data(bt, bt->io); if ((i == 8) && !BT_CAP[2]) { bt->BT_CAP_outreqs = BT_CAP[3]; bt->BT_CAP_req2rsp = BT_CAP[6] * 1000000; bt->BT_CAP_retries = BT_CAP[7]; } else printk(KERN_WARNING "IPMI BT: using default values\n"); if (!bt->BT_CAP_outreqs) bt->BT_CAP_outreqs = 1; printk(KERN_WARNING "IPMI BT: req2rsp=%ld secs retries=%d\n", bt->BT_CAP_req2rsp / 1000000L, bt->BT_CAP_retries); bt->timeout = bt->BT_CAP_req2rsp; return SI_SM_CALL_WITHOUT_DELAY; default: /* should never occur */ return error_recovery(bt, status, IPMI_ERR_UNSPECIFIED); } return SI_SM_CALL_WITH_DELAY; } static int bt_detect(struct si_sm_data *bt) { /* * It's impossible for the BT status and interrupt registers to be * all 1's, (assuming a properly functioning, self-initialized BMC) * but that's what you get from reading a bogus address, so we * test that first. The calling routine uses negative logic. */ if ((BT_STATUS == 0xFF) && (BT_INTMASK_R == 0xFF)) return 1; reset_flags(bt); return 0; } static void bt_cleanup(struct si_sm_data *bt) { } static int bt_size(void) { return sizeof(struct si_sm_data); } struct si_sm_handlers bt_smi_handlers = { .init_data = bt_init_data, .start_transaction = bt_start_transaction, .get_result = bt_get_result, .event = bt_event, .detect = bt_detect, .cleanup = bt_cleanup, .size = bt_size, };
gpl-2.0
broodplank/glass-omap-xrr02
drivers/hid/hid-debug.c
2721
36332
/* * (c) 1999 Andreas Gal <gal@cs.uni-magdeburg.de> * (c) 2000-2001 Vojtech Pavlik <vojtech@ucw.cz> * (c) 2007-2009 Jiri Kosina * * HID debugging support */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Should you need to contact me, the author, you can do so either by * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail: * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/poll.h> #include <linux/hid.h> #include <linux/hid-debug.h> static struct dentry *hid_debug_root; struct hid_usage_entry { unsigned page; unsigned usage; const char *description; }; static const struct hid_usage_entry hid_usage_table[] = { { 0, 0, "Undefined" }, { 1, 0, "GenericDesktop" }, {0, 0x01, "Pointer"}, {0, 0x02, "Mouse"}, {0, 0x04, "Joystick"}, {0, 0x05, "GamePad"}, {0, 0x06, "Keyboard"}, {0, 0x07, "Keypad"}, {0, 0x08, "MultiAxis"}, {0, 0x30, "X"}, {0, 0x31, "Y"}, {0, 0x32, "Z"}, {0, 0x33, "Rx"}, {0, 0x34, "Ry"}, {0, 0x35, "Rz"}, {0, 0x36, "Slider"}, {0, 0x37, "Dial"}, {0, 0x38, "Wheel"}, {0, 0x39, "HatSwitch"}, {0, 0x3a, "CountedBuffer"}, {0, 0x3b, "ByteCount"}, {0, 0x3c, "MotionWakeup"}, {0, 0x3d, "Start"}, {0, 0x3e, "Select"}, {0, 0x40, "Vx"}, {0, 0x41, "Vy"}, {0, 0x42, "Vz"}, {0, 0x43, "Vbrx"}, {0, 0x44, "Vbry"}, {0, 0x45, "Vbrz"}, {0, 0x46, "Vno"}, {0, 0x80, "SystemControl"}, {0, 0x81, "SystemPowerDown"}, {0, 0x82, "SystemSleep"}, {0, 0x83, "SystemWakeUp"}, {0, 0x84, "SystemContextMenu"}, {0, 0x85, "SystemMainMenu"}, {0, 0x86, "SystemAppMenu"}, {0, 0x87, "SystemMenuHelp"}, {0, 0x88, "SystemMenuExit"}, {0, 0x89, "SystemMenuSelect"}, {0, 0x8a, "SystemMenuRight"}, {0, 0x8b, "SystemMenuLeft"}, {0, 0x8c, "SystemMenuUp"}, {0, 0x8d, "SystemMenuDown"}, {0, 0x90, "D-PadUp"}, {0, 0x91, "D-PadDown"}, {0, 0x92, "D-PadRight"}, {0, 0x93, "D-PadLeft"}, { 2, 0, "Simulation" }, {0, 0xb0, "Aileron"}, {0, 0xb1, "AileronTrim"}, {0, 0xb2, "Anti-Torque"}, {0, 0xb3, "Autopilot"}, {0, 0xb4, "Chaff"}, {0, 0xb5, "Collective"}, {0, 0xb6, "DiveBrake"}, {0, 0xb7, "ElectronicCountermeasures"}, {0, 0xb8, "Elevator"}, {0, 0xb9, "ElevatorTrim"}, {0, 0xba, "Rudder"}, {0, 0xbb, "Throttle"}, {0, 0xbc, "FlightCommunications"}, {0, 0xbd, "FlareRelease"}, {0, 0xbe, "LandingGear"}, {0, 0xbf, "ToeBrake"}, { 7, 0, "Keyboard" }, { 8, 0, "LED" }, {0, 0x01, "NumLock"}, {0, 0x02, "CapsLock"}, {0, 0x03, "ScrollLock"}, {0, 0x04, "Compose"}, {0, 0x05, "Kana"}, {0, 0x4b, "GenericIndicator"}, { 9, 0, "Button" }, { 10, 0, "Ordinal" }, { 12, 0, "Consumer" }, {0, 0x238, "HorizontalWheel"}, { 13, 0, "Digitizers" }, {0, 0x01, "Digitizer"}, {0, 0x02, "Pen"}, {0, 0x03, "LightPen"}, {0, 0x04, "TouchScreen"}, {0, 0x05, "TouchPad"}, {0, 0x20, "Stylus"}, {0, 0x21, "Puck"}, {0, 0x22, "Finger"}, {0, 0x30, "TipPressure"}, {0, 0x31, "BarrelPressure"}, {0, 0x32, "InRange"}, {0, 0x33, "Touch"}, {0, 0x34, "UnTouch"}, {0, 0x35, "Tap"}, {0, 0x39, "TabletFunctionKey"}, {0, 0x3a, "ProgramChangeKey"}, {0, 0x3c, "Invert"}, {0, 0x42, "TipSwitch"}, {0, 0x43, "SecondaryTipSwitch"}, {0, 0x44, "BarrelSwitch"}, {0, 0x45, "Eraser"}, {0, 0x46, "TabletPick"}, {0, 0x47, "Confidence"}, {0, 0x48, "Width"}, {0, 0x49, "Height"}, {0, 0x51, "ContactID"}, {0, 0x52, "InputMode"}, {0, 0x53, "DeviceIndex"}, {0, 0x54, "ContactCount"}, {0, 0x55, "ContactMaximumNumber"}, { 15, 0, "PhysicalInterfaceDevice" }, {0, 0x00, "Undefined"}, {0, 0x01, "Physical_Interface_Device"}, {0, 0x20, "Normal"}, {0, 0x21, "Set_Effect_Report"}, {0, 0x22, "Effect_Block_Index"}, {0, 0x23, "Parameter_Block_Offset"}, {0, 0x24, "ROM_Flag"}, {0, 0x25, "Effect_Type"}, {0, 0x26, "ET_Constant_Force"}, {0, 0x27, "ET_Ramp"}, {0, 0x28, "ET_Custom_Force_Data"}, {0, 0x30, "ET_Square"}, {0, 0x31, "ET_Sine"}, {0, 0x32, "ET_Triangle"}, {0, 0x33, "ET_Sawtooth_Up"}, {0, 0x34, "ET_Sawtooth_Down"}, {0, 0x40, "ET_Spring"}, {0, 0x41, "ET_Damper"}, {0, 0x42, "ET_Inertia"}, {0, 0x43, "ET_Friction"}, {0, 0x50, "Duration"}, {0, 0x51, "Sample_Period"}, {0, 0x52, "Gain"}, {0, 0x53, "Trigger_Button"}, {0, 0x54, "Trigger_Repeat_Interval"}, {0, 0x55, "Axes_Enable"}, {0, 0x56, "Direction_Enable"}, {0, 0x57, "Direction"}, {0, 0x58, "Type_Specific_Block_Offset"}, {0, 0x59, "Block_Type"}, {0, 0x5A, "Set_Envelope_Report"}, {0, 0x5B, "Attack_Level"}, {0, 0x5C, "Attack_Time"}, {0, 0x5D, "Fade_Level"}, {0, 0x5E, "Fade_Time"}, {0, 0x5F, "Set_Condition_Report"}, {0, 0x60, "CP_Offset"}, {0, 0x61, "Positive_Coefficient"}, {0, 0x62, "Negative_Coefficient"}, {0, 0x63, "Positive_Saturation"}, {0, 0x64, "Negative_Saturation"}, {0, 0x65, "Dead_Band"}, {0, 0x66, "Download_Force_Sample"}, {0, 0x67, "Isoch_Custom_Force_Enable"}, {0, 0x68, "Custom_Force_Data_Report"}, {0, 0x69, "Custom_Force_Data"}, {0, 0x6A, "Custom_Force_Vendor_Defined_Data"}, {0, 0x6B, "Set_Custom_Force_Report"}, {0, 0x6C, "Custom_Force_Data_Offset"}, {0, 0x6D, "Sample_Count"}, {0, 0x6E, "Set_Periodic_Report"}, {0, 0x6F, "Offset"}, {0, 0x70, "Magnitude"}, {0, 0x71, "Phase"}, {0, 0x72, "Period"}, {0, 0x73, "Set_Constant_Force_Report"}, {0, 0x74, "Set_Ramp_Force_Report"}, {0, 0x75, "Ramp_Start"}, {0, 0x76, "Ramp_End"}, {0, 0x77, "Effect_Operation_Report"}, {0, 0x78, "Effect_Operation"}, {0, 0x79, "Op_Effect_Start"}, {0, 0x7A, "Op_Effect_Start_Solo"}, {0, 0x7B, "Op_Effect_Stop"}, {0, 0x7C, "Loop_Count"}, {0, 0x7D, "Device_Gain_Report"}, {0, 0x7E, "Device_Gain"}, {0, 0x7F, "PID_Pool_Report"}, {0, 0x80, "RAM_Pool_Size"}, {0, 0x81, "ROM_Pool_Size"}, {0, 0x82, "ROM_Effect_Block_Count"}, {0, 0x83, "Simultaneous_Effects_Max"}, {0, 0x84, "Pool_Alignment"}, {0, 0x85, "PID_Pool_Move_Report"}, {0, 0x86, "Move_Source"}, {0, 0x87, "Move_Destination"}, {0, 0x88, "Move_Length"}, {0, 0x89, "PID_Block_Load_Report"}, {0, 0x8B, "Block_Load_Status"}, {0, 0x8C, "Block_Load_Success"}, {0, 0x8D, "Block_Load_Full"}, {0, 0x8E, "Block_Load_Error"}, {0, 0x8F, "Block_Handle"}, {0, 0x90, "PID_Block_Free_Report"}, {0, 0x91, "Type_Specific_Block_Handle"}, {0, 0x92, "PID_State_Report"}, {0, 0x94, "Effect_Playing"}, {0, 0x95, "PID_Device_Control_Report"}, {0, 0x96, "PID_Device_Control"}, {0, 0x97, "DC_Enable_Actuators"}, {0, 0x98, "DC_Disable_Actuators"}, {0, 0x99, "DC_Stop_All_Effects"}, {0, 0x9A, "DC_Device_Reset"}, {0, 0x9B, "DC_Device_Pause"}, {0, 0x9C, "DC_Device_Continue"}, {0, 0x9F, "Device_Paused"}, {0, 0xA0, "Actuators_Enabled"}, {0, 0xA4, "Safety_Switch"}, {0, 0xA5, "Actuator_Override_Switch"}, {0, 0xA6, "Actuator_Power"}, {0, 0xA7, "Start_Delay"}, {0, 0xA8, "Parameter_Block_Size"}, {0, 0xA9, "Device_Managed_Pool"}, {0, 0xAA, "Shared_Parameter_Blocks"}, {0, 0xAB, "Create_New_Effect_Report"}, {0, 0xAC, "RAM_Pool_Available"}, { 0x84, 0, "Power Device" }, { 0x84, 0x02, "PresentStatus" }, { 0x84, 0x03, "ChangeStatus" }, { 0x84, 0x04, "UPS" }, { 0x84, 0x05, "PowerSupply" }, { 0x84, 0x10, "BatterySystem" }, { 0x84, 0x11, "BatterySystemID" }, { 0x84, 0x12, "Battery" }, { 0x84, 0x13, "BatteryID" }, { 0x84, 0x14, "Charger" }, { 0x84, 0x15, "ChargerID" }, { 0x84, 0x16, "PowerConverter" }, { 0x84, 0x17, "PowerConverterID" }, { 0x84, 0x18, "OutletSystem" }, { 0x84, 0x19, "OutletSystemID" }, { 0x84, 0x1a, "Input" }, { 0x84, 0x1b, "InputID" }, { 0x84, 0x1c, "Output" }, { 0x84, 0x1d, "OutputID" }, { 0x84, 0x1e, "Flow" }, { 0x84, 0x1f, "FlowID" }, { 0x84, 0x20, "Outlet" }, { 0x84, 0x21, "OutletID" }, { 0x84, 0x22, "Gang" }, { 0x84, 0x24, "PowerSummary" }, { 0x84, 0x25, "PowerSummaryID" }, { 0x84, 0x30, "Voltage" }, { 0x84, 0x31, "Current" }, { 0x84, 0x32, "Frequency" }, { 0x84, 0x33, "ApparentPower" }, { 0x84, 0x35, "PercentLoad" }, { 0x84, 0x40, "ConfigVoltage" }, { 0x84, 0x41, "ConfigCurrent" }, { 0x84, 0x43, "ConfigApparentPower" }, { 0x84, 0x53, "LowVoltageTransfer" }, { 0x84, 0x54, "HighVoltageTransfer" }, { 0x84, 0x56, "DelayBeforeStartup" }, { 0x84, 0x57, "DelayBeforeShutdown" }, { 0x84, 0x58, "Test" }, { 0x84, 0x5a, "AudibleAlarmControl" }, { 0x84, 0x60, "Present" }, { 0x84, 0x61, "Good" }, { 0x84, 0x62, "InternalFailure" }, { 0x84, 0x65, "Overload" }, { 0x84, 0x66, "OverCharged" }, { 0x84, 0x67, "OverTemperature" }, { 0x84, 0x68, "ShutdownRequested" }, { 0x84, 0x69, "ShutdownImminent" }, { 0x84, 0x6b, "SwitchOn/Off" }, { 0x84, 0x6c, "Switchable" }, { 0x84, 0x6d, "Used" }, { 0x84, 0x6e, "Boost" }, { 0x84, 0x73, "CommunicationLost" }, { 0x84, 0xfd, "iManufacturer" }, { 0x84, 0xfe, "iProduct" }, { 0x84, 0xff, "iSerialNumber" }, { 0x85, 0, "Battery System" }, { 0x85, 0x01, "SMBBatteryMode" }, { 0x85, 0x02, "SMBBatteryStatus" }, { 0x85, 0x03, "SMBAlarmWarning" }, { 0x85, 0x04, "SMBChargerMode" }, { 0x85, 0x05, "SMBChargerStatus" }, { 0x85, 0x06, "SMBChargerSpecInfo" }, { 0x85, 0x07, "SMBSelectorState" }, { 0x85, 0x08, "SMBSelectorPresets" }, { 0x85, 0x09, "SMBSelectorInfo" }, { 0x85, 0x29, "RemainingCapacityLimit" }, { 0x85, 0x2c, "CapacityMode" }, { 0x85, 0x42, "BelowRemainingCapacityLimit" }, { 0x85, 0x44, "Charging" }, { 0x85, 0x45, "Discharging" }, { 0x85, 0x4b, "NeedReplacement" }, { 0x85, 0x66, "RemainingCapacity" }, { 0x85, 0x68, "RunTimeToEmpty" }, { 0x85, 0x6a, "AverageTimeToFull" }, { 0x85, 0x83, "DesignCapacity" }, { 0x85, 0x85, "ManufacturerDate" }, { 0x85, 0x89, "iDeviceChemistry" }, { 0x85, 0x8b, "Rechargeable" }, { 0x85, 0x8f, "iOEMInformation" }, { 0x85, 0x8d, "CapacityGranularity1" }, { 0x85, 0xd0, "ACPresent" }, /* pages 0xff00 to 0xffff are vendor-specific */ { 0xffff, 0, "Vendor-specific-FF" }, { 0, 0, NULL } }; /* Either output directly into simple seq_file, or (if f == NULL) * allocate a separate buffer that will then be passed to the 'events' * ringbuffer. * * This is because these functions can be called both for "one-shot" * "rdesc" while resolving, or for blocking "events". * * This holds both for resolv_usage_page() and hid_resolv_usage(). */ static char *resolv_usage_page(unsigned page, struct seq_file *f) { const struct hid_usage_entry *p; char *buf = NULL; if (!f) { buf = kzalloc(sizeof(char) * HID_DEBUG_BUFSIZE, GFP_ATOMIC); if (!buf) return ERR_PTR(-ENOMEM); } for (p = hid_usage_table; p->description; p++) if (p->page == page) { if (!f) { snprintf(buf, HID_DEBUG_BUFSIZE, "%s", p->description); return buf; } else { seq_printf(f, "%s", p->description); return NULL; } } if (!f) snprintf(buf, HID_DEBUG_BUFSIZE, "%04x", page); else seq_printf(f, "%04x", page); return buf; } char *hid_resolv_usage(unsigned usage, struct seq_file *f) { const struct hid_usage_entry *p; char *buf = NULL; int len = 0; buf = resolv_usage_page(usage >> 16, f); if (IS_ERR(buf)) { pr_err("error allocating HID debug buffer\n"); return NULL; } if (!f) { len = strlen(buf); snprintf(buf+len, max(0, HID_DEBUG_BUFSIZE - len), "."); len++; } else { seq_printf(f, "."); } for (p = hid_usage_table; p->description; p++) if (p->page == (usage >> 16)) { for(++p; p->description && p->usage != 0; p++) if (p->usage == (usage & 0xffff)) { if (!f) snprintf(buf + len, max(0,HID_DEBUG_BUFSIZE - len - 1), "%s", p->description); else seq_printf(f, "%s", p->description); return buf; } break; } if (!f) snprintf(buf + len, max(0, HID_DEBUG_BUFSIZE - len - 1), "%04x", usage & 0xffff); else seq_printf(f, "%04x", usage & 0xffff); return buf; } EXPORT_SYMBOL_GPL(hid_resolv_usage); static void tab(int n, struct seq_file *f) { seq_printf(f, "%*s", n, ""); } void hid_dump_field(struct hid_field *field, int n, struct seq_file *f) { int j; if (field->physical) { tab(n, f); seq_printf(f, "Physical("); hid_resolv_usage(field->physical, f); seq_printf(f, ")\n"); } if (field->logical) { tab(n, f); seq_printf(f, "Logical("); hid_resolv_usage(field->logical, f); seq_printf(f, ")\n"); } if (field->application) { tab(n, f); seq_printf(f, "Application("); hid_resolv_usage(field->application, f); seq_printf(f, ")\n"); } tab(n, f); seq_printf(f, "Usage(%d)\n", field->maxusage); for (j = 0; j < field->maxusage; j++) { tab(n+2, f); hid_resolv_usage(field->usage[j].hid, f); seq_printf(f, "\n"); } if (field->logical_minimum != field->logical_maximum) { tab(n, f); seq_printf(f, "Logical Minimum(%d)\n", field->logical_minimum); tab(n, f); seq_printf(f, "Logical Maximum(%d)\n", field->logical_maximum); } if (field->physical_minimum != field->physical_maximum) { tab(n, f); seq_printf(f, "Physical Minimum(%d)\n", field->physical_minimum); tab(n, f); seq_printf(f, "Physical Maximum(%d)\n", field->physical_maximum); } if (field->unit_exponent) { tab(n, f); seq_printf(f, "Unit Exponent(%d)\n", field->unit_exponent); } if (field->unit) { static const char *systems[5] = { "None", "SI Linear", "SI Rotation", "English Linear", "English Rotation" }; static const char *units[5][8] = { { "None", "None", "None", "None", "None", "None", "None", "None" }, { "None", "Centimeter", "Gram", "Seconds", "Kelvin", "Ampere", "Candela", "None" }, { "None", "Radians", "Gram", "Seconds", "Kelvin", "Ampere", "Candela", "None" }, { "None", "Inch", "Slug", "Seconds", "Fahrenheit", "Ampere", "Candela", "None" }, { "None", "Degrees", "Slug", "Seconds", "Fahrenheit", "Ampere", "Candela", "None" } }; int i; int sys; __u32 data = field->unit; /* First nibble tells us which system we're in. */ sys = data & 0xf; data >>= 4; if(sys > 4) { tab(n, f); seq_printf(f, "Unit(Invalid)\n"); } else { int earlier_unit = 0; tab(n, f); seq_printf(f, "Unit(%s : ", systems[sys]); for (i=1 ; i<sizeof(__u32)*2 ; i++) { char nibble = data & 0xf; data >>= 4; if (nibble != 0) { if(earlier_unit++ > 0) seq_printf(f, "*"); seq_printf(f, "%s", units[sys][i]); if(nibble != 1) { /* This is a _signed_ nibble(!) */ int val = nibble & 0x7; if(nibble & 0x08) val = -((0x7 & ~val) +1); seq_printf(f, "^%d", val); } } } seq_printf(f, ")\n"); } } tab(n, f); seq_printf(f, "Report Size(%u)\n", field->report_size); tab(n, f); seq_printf(f, "Report Count(%u)\n", field->report_count); tab(n, f); seq_printf(f, "Report Offset(%u)\n", field->report_offset); tab(n, f); seq_printf(f, "Flags( "); j = field->flags; seq_printf(f, "%s", HID_MAIN_ITEM_CONSTANT & j ? "Constant " : ""); seq_printf(f, "%s", HID_MAIN_ITEM_VARIABLE & j ? "Variable " : "Array "); seq_printf(f, "%s", HID_MAIN_ITEM_RELATIVE & j ? "Relative " : "Absolute "); seq_printf(f, "%s", HID_MAIN_ITEM_WRAP & j ? "Wrap " : ""); seq_printf(f, "%s", HID_MAIN_ITEM_NONLINEAR & j ? "NonLinear " : ""); seq_printf(f, "%s", HID_MAIN_ITEM_NO_PREFERRED & j ? "NoPreferredState " : ""); seq_printf(f, "%s", HID_MAIN_ITEM_NULL_STATE & j ? "NullState " : ""); seq_printf(f, "%s", HID_MAIN_ITEM_VOLATILE & j ? "Volatile " : ""); seq_printf(f, "%s", HID_MAIN_ITEM_BUFFERED_BYTE & j ? "BufferedByte " : ""); seq_printf(f, ")\n"); } EXPORT_SYMBOL_GPL(hid_dump_field); void hid_dump_device(struct hid_device *device, struct seq_file *f) { struct hid_report_enum *report_enum; struct hid_report *report; struct list_head *list; unsigned i,k; static const char *table[] = {"INPUT", "OUTPUT", "FEATURE"}; for (i = 0; i < HID_REPORT_TYPES; i++) { report_enum = device->report_enum + i; list = report_enum->report_list.next; while (list != &report_enum->report_list) { report = (struct hid_report *) list; tab(2, f); seq_printf(f, "%s", table[i]); if (report->id) seq_printf(f, "(%d)", report->id); seq_printf(f, "[%s]", table[report->type]); seq_printf(f, "\n"); for (k = 0; k < report->maxfield; k++) { tab(4, f); seq_printf(f, "Field(%d)\n", k); hid_dump_field(report->field[k], 6, f); } list = list->next; } } } EXPORT_SYMBOL_GPL(hid_dump_device); /* enqueue string to 'events' ring buffer */ void hid_debug_event(struct hid_device *hdev, char *buf) { int i; struct hid_debug_list *list; list_for_each_entry(list, &hdev->debug_list, node) { for (i = 0; i < strlen(buf); i++) list->hid_debug_buf[(list->tail + i) % HID_DEBUG_BUFSIZE] = buf[i]; list->tail = (list->tail + i) % HID_DEBUG_BUFSIZE; } wake_up_interruptible(&hdev->debug_wait); } EXPORT_SYMBOL_GPL(hid_debug_event); void hid_dump_input(struct hid_device *hdev, struct hid_usage *usage, __s32 value) { char *buf; int len; buf = hid_resolv_usage(usage->hid, NULL); if (!buf) return; len = strlen(buf); snprintf(buf + len, HID_DEBUG_BUFSIZE - len - 1, " = %d\n", value); hid_debug_event(hdev, buf); kfree(buf); wake_up_interruptible(&hdev->debug_wait); } EXPORT_SYMBOL_GPL(hid_dump_input); static const char *events[EV_MAX + 1] = { [EV_SYN] = "Sync", [EV_KEY] = "Key", [EV_REL] = "Relative", [EV_ABS] = "Absolute", [EV_MSC] = "Misc", [EV_LED] = "LED", [EV_SND] = "Sound", [EV_REP] = "Repeat", [EV_FF] = "ForceFeedback", [EV_PWR] = "Power", [EV_FF_STATUS] = "ForceFeedbackStatus", }; static const char *syncs[3] = { [SYN_REPORT] = "Report", [SYN_CONFIG] = "Config", [SYN_MT_REPORT] = "MT Report", }; static const char *keys[KEY_MAX + 1] = { [KEY_RESERVED] = "Reserved", [KEY_ESC] = "Esc", [KEY_1] = "1", [KEY_2] = "2", [KEY_3] = "3", [KEY_4] = "4", [KEY_5] = "5", [KEY_6] = "6", [KEY_7] = "7", [KEY_8] = "8", [KEY_9] = "9", [KEY_0] = "0", [KEY_MINUS] = "Minus", [KEY_EQUAL] = "Equal", [KEY_BACKSPACE] = "Backspace", [KEY_TAB] = "Tab", [KEY_Q] = "Q", [KEY_W] = "W", [KEY_E] = "E", [KEY_R] = "R", [KEY_T] = "T", [KEY_Y] = "Y", [KEY_U] = "U", [KEY_I] = "I", [KEY_O] = "O", [KEY_P] = "P", [KEY_LEFTBRACE] = "LeftBrace", [KEY_RIGHTBRACE] = "RightBrace", [KEY_ENTER] = "Enter", [KEY_LEFTCTRL] = "LeftControl", [KEY_A] = "A", [KEY_S] = "S", [KEY_D] = "D", [KEY_F] = "F", [KEY_G] = "G", [KEY_H] = "H", [KEY_J] = "J", [KEY_K] = "K", [KEY_L] = "L", [KEY_SEMICOLON] = "Semicolon", [KEY_APOSTROPHE] = "Apostrophe", [KEY_GRAVE] = "Grave", [KEY_LEFTSHIFT] = "LeftShift", [KEY_BACKSLASH] = "BackSlash", [KEY_Z] = "Z", [KEY_X] = "X", [KEY_C] = "C", [KEY_V] = "V", [KEY_B] = "B", [KEY_N] = "N", [KEY_M] = "M", [KEY_COMMA] = "Comma", [KEY_DOT] = "Dot", [KEY_SLASH] = "Slash", [KEY_RIGHTSHIFT] = "RightShift", [KEY_KPASTERISK] = "KPAsterisk", [KEY_LEFTALT] = "LeftAlt", [KEY_SPACE] = "Space", [KEY_CAPSLOCK] = "CapsLock", [KEY_F1] = "F1", [KEY_F2] = "F2", [KEY_F3] = "F3", [KEY_F4] = "F4", [KEY_F5] = "F5", [KEY_F6] = "F6", [KEY_F7] = "F7", [KEY_F8] = "F8", [KEY_F9] = "F9", [KEY_F10] = "F10", [KEY_NUMLOCK] = "NumLock", [KEY_SCROLLLOCK] = "ScrollLock", [KEY_KP7] = "KP7", [KEY_KP8] = "KP8", [KEY_KP9] = "KP9", [KEY_KPMINUS] = "KPMinus", [KEY_KP4] = "KP4", [KEY_KP5] = "KP5", [KEY_KP6] = "KP6", [KEY_KPPLUS] = "KPPlus", [KEY_KP1] = "KP1", [KEY_KP2] = "KP2", [KEY_KP3] = "KP3", [KEY_KP0] = "KP0", [KEY_KPDOT] = "KPDot", [KEY_ZENKAKUHANKAKU] = "Zenkaku/Hankaku", [KEY_102ND] = "102nd", [KEY_F11] = "F11", [KEY_F12] = "F12", [KEY_RO] = "RO", [KEY_KATAKANA] = "Katakana", [KEY_HIRAGANA] = "HIRAGANA", [KEY_HENKAN] = "Henkan", [KEY_KATAKANAHIRAGANA] = "Katakana/Hiragana", [KEY_MUHENKAN] = "Muhenkan", [KEY_KPJPCOMMA] = "KPJpComma", [KEY_KPENTER] = "KPEnter", [KEY_RIGHTCTRL] = "RightCtrl", [KEY_KPSLASH] = "KPSlash", [KEY_SYSRQ] = "SysRq", [KEY_RIGHTALT] = "RightAlt", [KEY_LINEFEED] = "LineFeed", [KEY_HOME] = "Home", [KEY_UP] = "Up", [KEY_PAGEUP] = "PageUp", [KEY_LEFT] = "Left", [KEY_RIGHT] = "Right", [KEY_END] = "End", [KEY_DOWN] = "Down", [KEY_PAGEDOWN] = "PageDown", [KEY_INSERT] = "Insert", [KEY_DELETE] = "Delete", [KEY_MACRO] = "Macro", [KEY_MUTE] = "Mute", [KEY_VOLUMEDOWN] = "VolumeDown", [KEY_VOLUMEUP] = "VolumeUp", [KEY_POWER] = "Power", [KEY_KPEQUAL] = "KPEqual", [KEY_KPPLUSMINUS] = "KPPlusMinus", [KEY_PAUSE] = "Pause", [KEY_KPCOMMA] = "KPComma", [KEY_HANGUEL] = "Hangeul", [KEY_HANJA] = "Hanja", [KEY_YEN] = "Yen", [KEY_LEFTMETA] = "LeftMeta", [KEY_RIGHTMETA] = "RightMeta", [KEY_COMPOSE] = "Compose", [KEY_STOP] = "Stop", [KEY_AGAIN] = "Again", [KEY_PROPS] = "Props", [KEY_UNDO] = "Undo", [KEY_FRONT] = "Front", [KEY_COPY] = "Copy", [KEY_OPEN] = "Open", [KEY_PASTE] = "Paste", [KEY_FIND] = "Find", [KEY_CUT] = "Cut", [KEY_HELP] = "Help", [KEY_MENU] = "Menu", [KEY_CALC] = "Calc", [KEY_SETUP] = "Setup", [KEY_SLEEP] = "Sleep", [KEY_WAKEUP] = "WakeUp", [KEY_FILE] = "File", [KEY_SENDFILE] = "SendFile", [KEY_DELETEFILE] = "DeleteFile", [KEY_XFER] = "X-fer", [KEY_PROG1] = "Prog1", [KEY_PROG2] = "Prog2", [KEY_WWW] = "WWW", [KEY_MSDOS] = "MSDOS", [KEY_COFFEE] = "Coffee", [KEY_DIRECTION] = "Direction", [KEY_CYCLEWINDOWS] = "CycleWindows", [KEY_MAIL] = "Mail", [KEY_BOOKMARKS] = "Bookmarks", [KEY_COMPUTER] = "Computer", [KEY_BACK] = "Back", [KEY_FORWARD] = "Forward", [KEY_CLOSECD] = "CloseCD", [KEY_EJECTCD] = "EjectCD", [KEY_EJECTCLOSECD] = "EjectCloseCD", [KEY_NEXTSONG] = "NextSong", [KEY_PLAYPAUSE] = "PlayPause", [KEY_PREVIOUSSONG] = "PreviousSong", [KEY_STOPCD] = "StopCD", [KEY_RECORD] = "Record", [KEY_REWIND] = "Rewind", [KEY_PHONE] = "Phone", [KEY_ISO] = "ISOKey", [KEY_CONFIG] = "Config", [KEY_HOMEPAGE] = "HomePage", [KEY_REFRESH] = "Refresh", [KEY_EXIT] = "Exit", [KEY_MOVE] = "Move", [KEY_EDIT] = "Edit", [KEY_SCROLLUP] = "ScrollUp", [KEY_SCROLLDOWN] = "ScrollDown", [KEY_KPLEFTPAREN] = "KPLeftParenthesis", [KEY_KPRIGHTPAREN] = "KPRightParenthesis", [KEY_NEW] = "New", [KEY_REDO] = "Redo", [KEY_F13] = "F13", [KEY_F14] = "F14", [KEY_F15] = "F15", [KEY_F16] = "F16", [KEY_F17] = "F17", [KEY_F18] = "F18", [KEY_F19] = "F19", [KEY_F20] = "F20", [KEY_F21] = "F21", [KEY_F22] = "F22", [KEY_F23] = "F23", [KEY_F24] = "F24", [KEY_PLAYCD] = "PlayCD", [KEY_PAUSECD] = "PauseCD", [KEY_PROG3] = "Prog3", [KEY_PROG4] = "Prog4", [KEY_SUSPEND] = "Suspend", [KEY_CLOSE] = "Close", [KEY_PLAY] = "Play", [KEY_FASTFORWARD] = "FastForward", [KEY_BASSBOOST] = "BassBoost", [KEY_PRINT] = "Print", [KEY_HP] = "HP", [KEY_CAMERA] = "Camera", [KEY_SOUND] = "Sound", [KEY_QUESTION] = "Question", [KEY_EMAIL] = "Email", [KEY_CHAT] = "Chat", [KEY_SEARCH] = "Search", [KEY_CONNECT] = "Connect", [KEY_FINANCE] = "Finance", [KEY_SPORT] = "Sport", [KEY_SHOP] = "Shop", [KEY_ALTERASE] = "AlternateErase", [KEY_CANCEL] = "Cancel", [KEY_BRIGHTNESSDOWN] = "BrightnessDown", [KEY_BRIGHTNESSUP] = "BrightnessUp", [KEY_MEDIA] = "Media", [KEY_UNKNOWN] = "Unknown", [BTN_0] = "Btn0", [BTN_1] = "Btn1", [BTN_2] = "Btn2", [BTN_3] = "Btn3", [BTN_4] = "Btn4", [BTN_5] = "Btn5", [BTN_6] = "Btn6", [BTN_7] = "Btn7", [BTN_8] = "Btn8", [BTN_9] = "Btn9", [BTN_LEFT] = "LeftBtn", [BTN_RIGHT] = "RightBtn", [BTN_MIDDLE] = "MiddleBtn", [BTN_SIDE] = "SideBtn", [BTN_EXTRA] = "ExtraBtn", [BTN_FORWARD] = "ForwardBtn", [BTN_BACK] = "BackBtn", [BTN_TASK] = "TaskBtn", [BTN_TRIGGER] = "Trigger", [BTN_THUMB] = "ThumbBtn", [BTN_THUMB2] = "ThumbBtn2", [BTN_TOP] = "TopBtn", [BTN_TOP2] = "TopBtn2", [BTN_PINKIE] = "PinkieBtn", [BTN_BASE] = "BaseBtn", [BTN_BASE2] = "BaseBtn2", [BTN_BASE3] = "BaseBtn3", [BTN_BASE4] = "BaseBtn4", [BTN_BASE5] = "BaseBtn5", [BTN_BASE6] = "BaseBtn6", [BTN_DEAD] = "BtnDead", [BTN_A] = "BtnA", [BTN_B] = "BtnB", [BTN_C] = "BtnC", [BTN_X] = "BtnX", [BTN_Y] = "BtnY", [BTN_Z] = "BtnZ", [BTN_TL] = "BtnTL", [BTN_TR] = "BtnTR", [BTN_TL2] = "BtnTL2", [BTN_TR2] = "BtnTR2", [BTN_SELECT] = "BtnSelect", [BTN_START] = "BtnStart", [BTN_MODE] = "BtnMode", [BTN_THUMBL] = "BtnThumbL", [BTN_THUMBR] = "BtnThumbR", [BTN_TOOL_PEN] = "ToolPen", [BTN_TOOL_RUBBER] = "ToolRubber", [BTN_TOOL_BRUSH] = "ToolBrush", [BTN_TOOL_PENCIL] = "ToolPencil", [BTN_TOOL_AIRBRUSH] = "ToolAirbrush", [BTN_TOOL_FINGER] = "ToolFinger", [BTN_TOOL_MOUSE] = "ToolMouse", [BTN_TOOL_LENS] = "ToolLens", [BTN_TOUCH] = "Touch", [BTN_STYLUS] = "Stylus", [BTN_STYLUS2] = "Stylus2", [BTN_TOOL_DOUBLETAP] = "ToolDoubleTap", [BTN_TOOL_TRIPLETAP] = "ToolTripleTap", [BTN_GEAR_DOWN] = "WheelBtn", [BTN_GEAR_UP] = "Gear up", [KEY_OK] = "Ok", [KEY_SELECT] = "Select", [KEY_GOTO] = "Goto", [KEY_CLEAR] = "Clear", [KEY_POWER2] = "Power2", [KEY_OPTION] = "Option", [KEY_INFO] = "Info", [KEY_TIME] = "Time", [KEY_VENDOR] = "Vendor", [KEY_ARCHIVE] = "Archive", [KEY_PROGRAM] = "Program", [KEY_CHANNEL] = "Channel", [KEY_FAVORITES] = "Favorites", [KEY_EPG] = "EPG", [KEY_PVR] = "PVR", [KEY_MHP] = "MHP", [KEY_LANGUAGE] = "Language", [KEY_TITLE] = "Title", [KEY_SUBTITLE] = "Subtitle", [KEY_ANGLE] = "Angle", [KEY_ZOOM] = "Zoom", [KEY_MODE] = "Mode", [KEY_KEYBOARD] = "Keyboard", [KEY_SCREEN] = "Screen", [KEY_PC] = "PC", [KEY_TV] = "TV", [KEY_TV2] = "TV2", [KEY_VCR] = "VCR", [KEY_VCR2] = "VCR2", [KEY_SAT] = "Sat", [KEY_SAT2] = "Sat2", [KEY_CD] = "CD", [KEY_TAPE] = "Tape", [KEY_RADIO] = "Radio", [KEY_TUNER] = "Tuner", [KEY_PLAYER] = "Player", [KEY_TEXT] = "Text", [KEY_DVD] = "DVD", [KEY_AUX] = "Aux", [KEY_MP3] = "MP3", [KEY_AUDIO] = "Audio", [KEY_VIDEO] = "Video", [KEY_DIRECTORY] = "Directory", [KEY_LIST] = "List", [KEY_MEMO] = "Memo", [KEY_CALENDAR] = "Calendar", [KEY_RED] = "Red", [KEY_GREEN] = "Green", [KEY_YELLOW] = "Yellow", [KEY_BLUE] = "Blue", [KEY_CHANNELUP] = "ChannelUp", [KEY_CHANNELDOWN] = "ChannelDown", [KEY_FIRST] = "First", [KEY_LAST] = "Last", [KEY_AB] = "AB", [KEY_NEXT] = "Next", [KEY_RESTART] = "Restart", [KEY_SLOW] = "Slow", [KEY_SHUFFLE] = "Shuffle", [KEY_BREAK] = "Break", [KEY_PREVIOUS] = "Previous", [KEY_DIGITS] = "Digits", [KEY_TEEN] = "TEEN", [KEY_TWEN] = "TWEN", [KEY_DEL_EOL] = "DeleteEOL", [KEY_DEL_EOS] = "DeleteEOS", [KEY_INS_LINE] = "InsertLine", [KEY_DEL_LINE] = "DeleteLine", [KEY_SEND] = "Send", [KEY_REPLY] = "Reply", [KEY_FORWARDMAIL] = "ForwardMail", [KEY_SAVE] = "Save", [KEY_DOCUMENTS] = "Documents", [KEY_SPELLCHECK] = "SpellCheck", [KEY_LOGOFF] = "Logoff", [KEY_FN] = "Fn", [KEY_FN_ESC] = "Fn+ESC", [KEY_FN_1] = "Fn+1", [KEY_FN_2] = "Fn+2", [KEY_FN_B] = "Fn+B", [KEY_FN_D] = "Fn+D", [KEY_FN_E] = "Fn+E", [KEY_FN_F] = "Fn+F", [KEY_FN_S] = "Fn+S", [KEY_FN_F1] = "Fn+F1", [KEY_FN_F2] = "Fn+F2", [KEY_FN_F3] = "Fn+F3", [KEY_FN_F4] = "Fn+F4", [KEY_FN_F5] = "Fn+F5", [KEY_FN_F6] = "Fn+F6", [KEY_FN_F7] = "Fn+F7", [KEY_FN_F8] = "Fn+F8", [KEY_FN_F9] = "Fn+F9", [KEY_FN_F10] = "Fn+F10", [KEY_FN_F11] = "Fn+F11", [KEY_FN_F12] = "Fn+F12", [KEY_KBDILLUMTOGGLE] = "KbdIlluminationToggle", [KEY_KBDILLUMDOWN] = "KbdIlluminationDown", [KEY_KBDILLUMUP] = "KbdIlluminationUp", [KEY_SWITCHVIDEOMODE] = "SwitchVideoMode", }; static const char *relatives[REL_MAX + 1] = { [REL_X] = "X", [REL_Y] = "Y", [REL_Z] = "Z", [REL_RX] = "Rx", [REL_RY] = "Ry", [REL_RZ] = "Rz", [REL_HWHEEL] = "HWheel", [REL_DIAL] = "Dial", [REL_WHEEL] = "Wheel", [REL_MISC] = "Misc", }; static const char *absolutes[ABS_CNT] = { [ABS_X] = "X", [ABS_Y] = "Y", [ABS_Z] = "Z", [ABS_RX] = "Rx", [ABS_RY] = "Ry", [ABS_RZ] = "Rz", [ABS_THROTTLE] = "Throttle", [ABS_RUDDER] = "Rudder", [ABS_WHEEL] = "Wheel", [ABS_GAS] = "Gas", [ABS_BRAKE] = "Brake", [ABS_HAT0X] = "Hat0X", [ABS_HAT0Y] = "Hat0Y", [ABS_HAT1X] = "Hat1X", [ABS_HAT1Y] = "Hat1Y", [ABS_HAT2X] = "Hat2X", [ABS_HAT2Y] = "Hat2Y", [ABS_HAT3X] = "Hat3X", [ABS_HAT3Y] = "Hat 3Y", [ABS_PRESSURE] = "Pressure", [ABS_DISTANCE] = "Distance", [ABS_TILT_X] = "XTilt", [ABS_TILT_Y] = "YTilt", [ABS_TOOL_WIDTH] = "ToolWidth", [ABS_VOLUME] = "Volume", [ABS_MISC] = "Misc", [ABS_MT_TOUCH_MAJOR] = "MTMajor", [ABS_MT_TOUCH_MINOR] = "MTMinor", [ABS_MT_WIDTH_MAJOR] = "MTMajorW", [ABS_MT_WIDTH_MINOR] = "MTMinorW", [ABS_MT_ORIENTATION] = "MTOrientation", [ABS_MT_POSITION_X] = "MTPositionX", [ABS_MT_POSITION_Y] = "MTPositionY", [ABS_MT_TOOL_TYPE] = "MTToolType", [ABS_MT_BLOB_ID] = "MTBlobID", }; static const char *misc[MSC_MAX + 1] = { [MSC_SERIAL] = "Serial", [MSC_PULSELED] = "Pulseled", [MSC_GESTURE] = "Gesture", [MSC_RAW] = "RawData" }; static const char *leds[LED_MAX + 1] = { [LED_NUML] = "NumLock", [LED_CAPSL] = "CapsLock", [LED_SCROLLL] = "ScrollLock", [LED_COMPOSE] = "Compose", [LED_KANA] = "Kana", [LED_SLEEP] = "Sleep", [LED_SUSPEND] = "Suspend", [LED_MUTE] = "Mute", [LED_MISC] = "Misc", }; static const char *repeats[REP_MAX + 1] = { [REP_DELAY] = "Delay", [REP_PERIOD] = "Period" }; static const char *sounds[SND_MAX + 1] = { [SND_CLICK] = "Click", [SND_BELL] = "Bell", [SND_TONE] = "Tone" }; static const char **names[EV_MAX + 1] = { [EV_SYN] = syncs, [EV_KEY] = keys, [EV_REL] = relatives, [EV_ABS] = absolutes, [EV_MSC] = misc, [EV_LED] = leds, [EV_SND] = sounds, [EV_REP] = repeats, }; static void hid_resolv_event(__u8 type, __u16 code, struct seq_file *f) { seq_printf(f, "%s.%s", events[type] ? events[type] : "?", names[type] ? (names[type][code] ? names[type][code] : "?") : "?"); } static void hid_dump_input_mapping(struct hid_device *hid, struct seq_file *f) { int i, j, k; struct hid_report *report; struct hid_usage *usage; for (k = HID_INPUT_REPORT; k <= HID_OUTPUT_REPORT; k++) { list_for_each_entry(report, &hid->report_enum[k].report_list, list) { for (i = 0; i < report->maxfield; i++) { for ( j = 0; j < report->field[i]->maxusage; j++) { usage = report->field[i]->usage + j; hid_resolv_usage(usage->hid, f); seq_printf(f, " ---> "); hid_resolv_event(usage->type, usage->code, f); seq_printf(f, "\n"); } } } } } static int hid_debug_rdesc_show(struct seq_file *f, void *p) { struct hid_device *hdev = f->private; int i; /* dump HID report descriptor */ for (i = 0; i < hdev->rsize; i++) seq_printf(f, "%02x ", hdev->rdesc[i]); seq_printf(f, "\n\n"); /* dump parsed data and input mappings */ hid_dump_device(hdev, f); seq_printf(f, "\n"); hid_dump_input_mapping(hdev, f); return 0; } static int hid_debug_rdesc_open(struct inode *inode, struct file *file) { return single_open(file, hid_debug_rdesc_show, inode->i_private); } static int hid_debug_events_open(struct inode *inode, struct file *file) { int err = 0; struct hid_debug_list *list; if (!(list = kzalloc(sizeof(struct hid_debug_list), GFP_KERNEL))) { err = -ENOMEM; goto out; } if (!(list->hid_debug_buf = kzalloc(sizeof(char) * HID_DEBUG_BUFSIZE, GFP_KERNEL))) { err = -ENOMEM; kfree(list); goto out; } list->hdev = (struct hid_device *) inode->i_private; file->private_data = list; mutex_init(&list->read_mutex); list_add_tail(&list->node, &list->hdev->debug_list); out: return err; } static ssize_t hid_debug_events_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) { struct hid_debug_list *list = file->private_data; int ret = 0, len; DECLARE_WAITQUEUE(wait, current); mutex_lock(&list->read_mutex); while (ret == 0) { if (list->head == list->tail) { add_wait_queue(&list->hdev->debug_wait, &wait); set_current_state(TASK_INTERRUPTIBLE); while (list->head == list->tail) { if (file->f_flags & O_NONBLOCK) { ret = -EAGAIN; break; } if (signal_pending(current)) { ret = -ERESTARTSYS; break; } if (!list->hdev || !list->hdev->debug) { ret = -EIO; break; } /* allow O_NONBLOCK from other threads */ mutex_unlock(&list->read_mutex); schedule(); mutex_lock(&list->read_mutex); set_current_state(TASK_INTERRUPTIBLE); } set_current_state(TASK_RUNNING); remove_wait_queue(&list->hdev->debug_wait, &wait); } if (ret) goto out; /* pass the ringbuffer contents to userspace */ copy_rest: if (list->tail == list->head) goto out; if (list->tail > list->head) { len = list->tail - list->head; if (copy_to_user(buffer + ret, &list->hid_debug_buf[list->head], len)) { ret = -EFAULT; goto out; } ret += len; list->head += len; } else { len = HID_DEBUG_BUFSIZE - list->head; if (copy_to_user(buffer, &list->hid_debug_buf[list->head], len)) { ret = -EFAULT; goto out; } list->head = 0; ret += len; goto copy_rest; } } out: mutex_unlock(&list->read_mutex); return ret; } static unsigned int hid_debug_events_poll(struct file *file, poll_table *wait) { struct hid_debug_list *list = file->private_data; poll_wait(file, &list->hdev->debug_wait, wait); if (list->head != list->tail) return POLLIN | POLLRDNORM; if (!list->hdev->debug) return POLLERR | POLLHUP; return 0; } static int hid_debug_events_release(struct inode *inode, struct file *file) { struct hid_debug_list *list = file->private_data; list_del(&list->node); kfree(list->hid_debug_buf); kfree(list); return 0; } static const struct file_operations hid_debug_rdesc_fops = { .open = hid_debug_rdesc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations hid_debug_events_fops = { .owner = THIS_MODULE, .open = hid_debug_events_open, .read = hid_debug_events_read, .poll = hid_debug_events_poll, .release = hid_debug_events_release, .llseek = noop_llseek, }; void hid_debug_register(struct hid_device *hdev, const char *name) { hdev->debug_dir = debugfs_create_dir(name, hid_debug_root); hdev->debug_rdesc = debugfs_create_file("rdesc", 0400, hdev->debug_dir, hdev, &hid_debug_rdesc_fops); hdev->debug_events = debugfs_create_file("events", 0400, hdev->debug_dir, hdev, &hid_debug_events_fops); hdev->debug = 1; } void hid_debug_unregister(struct hid_device *hdev) { hdev->debug = 0; wake_up_interruptible(&hdev->debug_wait); debugfs_remove(hdev->debug_rdesc); debugfs_remove(hdev->debug_events); debugfs_remove(hdev->debug_dir); } void hid_debug_init(void) { hid_debug_root = debugfs_create_dir("hid", NULL); } void hid_debug_exit(void) { debugfs_remove_recursive(hid_debug_root); }
gpl-2.0
AmeriCanAndroid/kernel_htc_shooteru
arch/x86/oprofile/nmi_timer_int.c
2977
1364
/** * @file nmi_timer_int.c * * @remark Copyright 2003 OProfile authors * @remark Read the file COPYING * * @author Zwane Mwaikambo <zwane@linuxpower.ca> */ #include <linux/init.h> #include <linux/smp.h> #include <linux/errno.h> #include <linux/oprofile.h> #include <linux/rcupdate.h> #include <linux/kdebug.h> #include <asm/nmi.h> #include <asm/apic.h> #include <asm/ptrace.h> static int profile_timer_exceptions_notify(struct notifier_block *self, unsigned long val, void *data) { struct die_args *args = (struct die_args *)data; int ret = NOTIFY_DONE; switch (val) { case DIE_NMI: oprofile_add_sample(args->regs, 0); ret = NOTIFY_STOP; break; default: break; } return ret; } static struct notifier_block profile_timer_exceptions_nb = { .notifier_call = profile_timer_exceptions_notify, .next = NULL, .priority = NMI_LOW_PRIOR, }; static int timer_start(void) { if (register_die_notifier(&profile_timer_exceptions_nb)) return 1; return 0; } static void timer_stop(void) { unregister_die_notifier(&profile_timer_exceptions_nb); synchronize_sched(); /* Allow already-started NMIs to complete. */ } int __init op_nmi_timer_init(struct oprofile_operations *ops) { ops->start = timer_start; ops->stop = timer_stop; ops->cpu_type = "timer"; printk(KERN_INFO "oprofile: using NMI timer interrupt.\n"); return 0; }
gpl-2.0
dsb9938/DNA_JB_KERNEL_2
mm/kmemleak.c
3745
53858
/* * mm/kmemleak.c * * Copyright (C) 2008 ARM Limited * Written by Catalin Marinas <catalin.marinas@arm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * * For more information on the algorithm and kmemleak usage, please see * Documentation/kmemleak.txt. * * Notes on locking * ---------------- * * The following locks and mutexes are used by kmemleak: * * - kmemleak_lock (rwlock): protects the object_list modifications and * accesses to the object_tree_root. The object_list is the main list * holding the metadata (struct kmemleak_object) for the allocated memory * blocks. The object_tree_root is a priority search tree used to look-up * metadata based on a pointer to the corresponding memory block. The * kmemleak_object structures are added to the object_list and * object_tree_root in the create_object() function called from the * kmemleak_alloc() callback and removed in delete_object() called from the * kmemleak_free() callback * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to * the metadata (e.g. count) are protected by this lock. Note that some * members of this structure may be protected by other means (atomic or * kmemleak_lock). This lock is also held when scanning the corresponding * memory block to avoid the kernel freeing it via the kmemleak_free() * callback. This is less heavyweight than holding a global lock like * kmemleak_lock during scanning * - scan_mutex (mutex): ensures that only one thread may scan the memory for * unreferenced objects at a time. The gray_list contains the objects which * are already referenced or marked as false positives and need to be * scanned. This list is only modified during a scanning episode when the * scan_mutex is held. At the end of a scan, the gray_list is always empty. * Note that the kmemleak_object.use_count is incremented when an object is * added to the gray_list and therefore cannot be freed. This mutex also * prevents multiple users of the "kmemleak" debugfs file together with * modifications to the memory scanning parameters including the scan_thread * pointer * * The kmemleak_object structures have a use_count incremented or decremented * using the get_object()/put_object() functions. When the use_count becomes * 0, this count can no longer be incremented and put_object() schedules the * kmemleak_object freeing via an RCU callback. All calls to the get_object() * function must be protected by rcu_read_lock() to avoid accessing a freed * structure. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/init.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/sched.h> #include <linux/jiffies.h> #include <linux/delay.h> #include <linux/export.h> #include <linux/kthread.h> #include <linux/prio_tree.h> #include <linux/fs.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/cpumask.h> #include <linux/spinlock.h> #include <linux/mutex.h> #include <linux/rcupdate.h> #include <linux/stacktrace.h> #include <linux/cache.h> #include <linux/percpu.h> #include <linux/hardirq.h> #include <linux/mmzone.h> #include <linux/slab.h> #include <linux/thread_info.h> #include <linux/err.h> #include <linux/uaccess.h> #include <linux/string.h> #include <linux/nodemask.h> #include <linux/mm.h> #include <linux/workqueue.h> #include <linux/crc32.h> #include <asm/sections.h> #include <asm/processor.h> #include <linux/atomic.h> #include <linux/kmemcheck.h> #include <linux/kmemleak.h> #include <linux/memory_hotplug.h> /* * Kmemleak configuration and common defines. */ #define MAX_TRACE 16 /* stack trace length */ #define MSECS_MIN_AGE 5000 /* minimum object age for reporting */ #define SECS_FIRST_SCAN 60 /* delay before the first scan */ #define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */ #define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */ #define BYTES_PER_POINTER sizeof(void *) /* GFP bitmask for kmemleak internal allocations */ #define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \ __GFP_NORETRY | __GFP_NOMEMALLOC | \ __GFP_NOWARN) /* scanning area inside a memory block */ struct kmemleak_scan_area { struct hlist_node node; unsigned long start; size_t size; }; #define KMEMLEAK_GREY 0 #define KMEMLEAK_BLACK -1 /* * Structure holding the metadata for each allocated memory block. * Modifications to such objects should be made while holding the * object->lock. Insertions or deletions from object_list, gray_list or * tree_node are already protected by the corresponding locks or mutex (see * the notes on locking above). These objects are reference-counted * (use_count) and freed using the RCU mechanism. */ struct kmemleak_object { spinlock_t lock; unsigned long flags; /* object status flags */ struct list_head object_list; struct list_head gray_list; struct prio_tree_node tree_node; struct rcu_head rcu; /* object_list lockless traversal */ /* object usage count; object freed when use_count == 0 */ atomic_t use_count; unsigned long pointer; size_t size; /* minimum number of a pointers found before it is considered leak */ int min_count; /* the total number of pointers found pointing to this object */ int count; /* checksum for detecting modified objects */ u32 checksum; /* memory ranges to be scanned inside an object (empty for all) */ struct hlist_head area_list; unsigned long trace[MAX_TRACE]; unsigned int trace_len; unsigned long jiffies; /* creation timestamp */ pid_t pid; /* pid of the current task */ char comm[TASK_COMM_LEN]; /* executable name */ }; /* flag representing the memory block allocation status */ #define OBJECT_ALLOCATED (1 << 0) /* flag set after the first reporting of an unreference object */ #define OBJECT_REPORTED (1 << 1) /* flag set to not scan the object */ #define OBJECT_NO_SCAN (1 << 2) /* number of bytes to print per line; must be 16 or 32 */ #define HEX_ROW_SIZE 16 /* number of bytes to print at a time (1, 2, 4, 8) */ #define HEX_GROUP_SIZE 1 /* include ASCII after the hex output */ #define HEX_ASCII 1 /* max number of lines to be printed */ #define HEX_MAX_LINES 2 /* the list of all allocated objects */ static LIST_HEAD(object_list); /* the list of gray-colored objects (see color_gray comment below) */ static LIST_HEAD(gray_list); /* prio search tree for object boundaries */ static struct prio_tree_root object_tree_root; /* rw_lock protecting the access to object_list and prio_tree_root */ static DEFINE_RWLOCK(kmemleak_lock); /* allocation caches for kmemleak internal data */ static struct kmem_cache *object_cache; static struct kmem_cache *scan_area_cache; /* set if tracing memory operations is enabled */ static atomic_t kmemleak_enabled = ATOMIC_INIT(0); /* set in the late_initcall if there were no errors */ static atomic_t kmemleak_initialized = ATOMIC_INIT(0); /* enables or disables early logging of the memory operations */ static atomic_t kmemleak_early_log = ATOMIC_INIT(1); /* set if a kmemleak warning was issued */ static atomic_t kmemleak_warning = ATOMIC_INIT(0); /* set if a fatal kmemleak error has occurred */ static atomic_t kmemleak_error = ATOMIC_INIT(0); /* minimum and maximum address that may be valid pointers */ static unsigned long min_addr = ULONG_MAX; static unsigned long max_addr; static struct task_struct *scan_thread; /* used to avoid reporting of recently allocated objects */ static unsigned long jiffies_min_age; static unsigned long jiffies_last_scan; /* delay between automatic memory scannings */ static signed long jiffies_scan_wait; /* enables or disables the task stacks scanning */ static int kmemleak_stack_scan = 1; /* protects the memory scanning, parameters and debug/kmemleak file access */ static DEFINE_MUTEX(scan_mutex); /* setting kmemleak=on, will set this var, skipping the disable */ static int kmemleak_skip_disable; /* * Early object allocation/freeing logging. Kmemleak is initialized after the * kernel allocator. However, both the kernel allocator and kmemleak may * allocate memory blocks which need to be tracked. Kmemleak defines an * arbitrary buffer to hold the allocation/freeing information before it is * fully initialized. */ /* kmemleak operation type for early logging */ enum { KMEMLEAK_ALLOC, KMEMLEAK_ALLOC_PERCPU, KMEMLEAK_FREE, KMEMLEAK_FREE_PART, KMEMLEAK_FREE_PERCPU, KMEMLEAK_NOT_LEAK, KMEMLEAK_IGNORE, KMEMLEAK_SCAN_AREA, KMEMLEAK_NO_SCAN }; /* * Structure holding the information passed to kmemleak callbacks during the * early logging. */ struct early_log { int op_type; /* kmemleak operation type */ const void *ptr; /* allocated/freed memory block */ size_t size; /* memory block size */ int min_count; /* minimum reference count */ unsigned long trace[MAX_TRACE]; /* stack trace */ unsigned int trace_len; /* stack trace length */ }; /* early logging buffer and current position */ static struct early_log early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE] __initdata; static int crt_early_log __initdata; static void kmemleak_disable(void); /* * Print a warning and dump the stack trace. */ #define kmemleak_warn(x...) do { \ pr_warning(x); \ dump_stack(); \ atomic_set(&kmemleak_warning, 1); \ } while (0) /* * Macro invoked when a serious kmemleak condition occurred and cannot be * recovered from. Kmemleak will be disabled and further allocation/freeing * tracing no longer available. */ #define kmemleak_stop(x...) do { \ kmemleak_warn(x); \ kmemleak_disable(); \ } while (0) /* * Printing of the objects hex dump to the seq file. The number of lines to be * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called * with the object->lock held. */ static void hex_dump_object(struct seq_file *seq, struct kmemleak_object *object) { const u8 *ptr = (const u8 *)object->pointer; int i, len, remaining; unsigned char linebuf[HEX_ROW_SIZE * 5]; /* limit the number of lines to HEX_MAX_LINES */ remaining = len = min(object->size, (size_t)(HEX_MAX_LINES * HEX_ROW_SIZE)); seq_printf(seq, " hex dump (first %d bytes):\n", len); for (i = 0; i < len; i += HEX_ROW_SIZE) { int linelen = min(remaining, HEX_ROW_SIZE); remaining -= HEX_ROW_SIZE; hex_dump_to_buffer(ptr + i, linelen, HEX_ROW_SIZE, HEX_GROUP_SIZE, linebuf, sizeof(linebuf), HEX_ASCII); seq_printf(seq, " %s\n", linebuf); } } /* * Object colors, encoded with count and min_count: * - white - orphan object, not enough references to it (count < min_count) * - gray - not orphan, not marked as false positive (min_count == 0) or * sufficient references to it (count >= min_count) * - black - ignore, it doesn't contain references (e.g. text section) * (min_count == -1). No function defined for this color. * Newly created objects don't have any color assigned (object->count == -1) * before the next memory scan when they become white. */ static bool color_white(const struct kmemleak_object *object) { return object->count != KMEMLEAK_BLACK && object->count < object->min_count; } static bool color_gray(const struct kmemleak_object *object) { return object->min_count != KMEMLEAK_BLACK && object->count >= object->min_count; } /* * Objects are considered unreferenced only if their color is white, they have * not be deleted and have a minimum age to avoid false positives caused by * pointers temporarily stored in CPU registers. */ static bool unreferenced_object(struct kmemleak_object *object) { return (color_white(object) && object->flags & OBJECT_ALLOCATED) && time_before_eq(object->jiffies + jiffies_min_age, jiffies_last_scan); } /* * Printing of the unreferenced objects information to the seq file. The * print_unreferenced function must be called with the object->lock held. */ static void print_unreferenced(struct seq_file *seq, struct kmemleak_object *object) { int i; unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies); seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n", object->pointer, object->size); seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n", object->comm, object->pid, object->jiffies, msecs_age / 1000, msecs_age % 1000); hex_dump_object(seq, object); seq_printf(seq, " backtrace:\n"); for (i = 0; i < object->trace_len; i++) { void *ptr = (void *)object->trace[i]; seq_printf(seq, " [<%p>] %pS\n", ptr, ptr); } } /* * Print the kmemleak_object information. This function is used mainly for * debugging special cases when kmemleak operations. It must be called with * the object->lock held. */ static void dump_object_info(struct kmemleak_object *object) { struct stack_trace trace; trace.nr_entries = object->trace_len; trace.entries = object->trace; pr_notice("Object 0x%08lx (size %zu):\n", object->tree_node.start, object->size); pr_notice(" comm \"%s\", pid %d, jiffies %lu\n", object->comm, object->pid, object->jiffies); pr_notice(" min_count = %d\n", object->min_count); pr_notice(" count = %d\n", object->count); pr_notice(" flags = 0x%lx\n", object->flags); pr_notice(" checksum = %d\n", object->checksum); pr_notice(" backtrace:\n"); print_stack_trace(&trace, 4); } /* * Look-up a memory block metadata (kmemleak_object) in the priority search * tree based on a pointer value. If alias is 0, only values pointing to the * beginning of the memory block are allowed. The kmemleak_lock must be held * when calling this function. */ static struct kmemleak_object *lookup_object(unsigned long ptr, int alias) { struct prio_tree_node *node; struct prio_tree_iter iter; struct kmemleak_object *object; prio_tree_iter_init(&iter, &object_tree_root, ptr, ptr); node = prio_tree_next(&iter); if (node) { object = prio_tree_entry(node, struct kmemleak_object, tree_node); if (!alias && object->pointer != ptr) { kmemleak_warn("Found object by alias at 0x%08lx\n", ptr); dump_object_info(object); object = NULL; } } else object = NULL; return object; } /* * Increment the object use_count. Return 1 if successful or 0 otherwise. Note * that once an object's use_count reached 0, the RCU freeing was already * registered and the object should no longer be used. This function must be * called under the protection of rcu_read_lock(). */ static int get_object(struct kmemleak_object *object) { return atomic_inc_not_zero(&object->use_count); } /* * RCU callback to free a kmemleak_object. */ static void free_object_rcu(struct rcu_head *rcu) { struct hlist_node *elem, *tmp; struct kmemleak_scan_area *area; struct kmemleak_object *object = container_of(rcu, struct kmemleak_object, rcu); /* * Once use_count is 0 (guaranteed by put_object), there is no other * code accessing this object, hence no need for locking. */ hlist_for_each_entry_safe(area, elem, tmp, &object->area_list, node) { hlist_del(elem); kmem_cache_free(scan_area_cache, area); } kmem_cache_free(object_cache, object); } /* * Decrement the object use_count. Once the count is 0, free the object using * an RCU callback. Since put_object() may be called via the kmemleak_free() -> * delete_object() path, the delayed RCU freeing ensures that there is no * recursive call to the kernel allocator. Lock-less RCU object_list traversal * is also possible. */ static void put_object(struct kmemleak_object *object) { if (!atomic_dec_and_test(&object->use_count)) return; /* should only get here after delete_object was called */ WARN_ON(object->flags & OBJECT_ALLOCATED); call_rcu(&object->rcu, free_object_rcu); } /* * Look up an object in the prio search tree and increase its use_count. */ static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias) { unsigned long flags; struct kmemleak_object *object = NULL; rcu_read_lock(); read_lock_irqsave(&kmemleak_lock, flags); if (ptr >= min_addr && ptr < max_addr) object = lookup_object(ptr, alias); read_unlock_irqrestore(&kmemleak_lock, flags); /* check whether the object is still available */ if (object && !get_object(object)) object = NULL; rcu_read_unlock(); return object; } /* * Save stack trace to the given array of MAX_TRACE size. */ static int __save_stack_trace(unsigned long *trace) { struct stack_trace stack_trace; stack_trace.max_entries = MAX_TRACE; stack_trace.nr_entries = 0; stack_trace.entries = trace; stack_trace.skip = 2; save_stack_trace(&stack_trace); return stack_trace.nr_entries; } /* * Create the metadata (struct kmemleak_object) corresponding to an allocated * memory block and add it to the object_list and object_tree_root. */ static struct kmemleak_object *create_object(unsigned long ptr, size_t size, int min_count, gfp_t gfp) { unsigned long flags; struct kmemleak_object *object; struct prio_tree_node *node; object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp)); if (!object) { pr_warning("Cannot allocate a kmemleak_object structure\n"); kmemleak_disable(); return NULL; } INIT_LIST_HEAD(&object->object_list); INIT_LIST_HEAD(&object->gray_list); INIT_HLIST_HEAD(&object->area_list); spin_lock_init(&object->lock); atomic_set(&object->use_count, 1); object->flags = OBJECT_ALLOCATED; object->pointer = ptr; object->size = size; object->min_count = min_count; object->count = 0; /* white color initially */ object->jiffies = jiffies; object->checksum = 0; /* task information */ if (in_irq()) { object->pid = 0; strncpy(object->comm, "hardirq", sizeof(object->comm)); } else if (in_softirq()) { object->pid = 0; strncpy(object->comm, "softirq", sizeof(object->comm)); } else { object->pid = current->pid; /* * There is a small chance of a race with set_task_comm(), * however using get_task_comm() here may cause locking * dependency issues with current->alloc_lock. In the worst * case, the command line is not correct. */ strncpy(object->comm, current->comm, sizeof(object->comm)); } /* kernel backtrace */ object->trace_len = __save_stack_trace(object->trace); INIT_PRIO_TREE_NODE(&object->tree_node); object->tree_node.start = ptr; object->tree_node.last = ptr + size - 1; write_lock_irqsave(&kmemleak_lock, flags); min_addr = min(min_addr, ptr); max_addr = max(max_addr, ptr + size); node = prio_tree_insert(&object_tree_root, &object->tree_node); /* * The code calling the kernel does not yet have the pointer to the * memory block to be able to free it. However, we still hold the * kmemleak_lock here in case parts of the kernel started freeing * random memory blocks. */ if (node != &object->tree_node) { kmemleak_stop("Cannot insert 0x%lx into the object search tree " "(already existing)\n", ptr); object = lookup_object(ptr, 1); spin_lock(&object->lock); dump_object_info(object); spin_unlock(&object->lock); goto out; } list_add_tail_rcu(&object->object_list, &object_list); out: write_unlock_irqrestore(&kmemleak_lock, flags); return object; } /* * Remove the metadata (struct kmemleak_object) for a memory block from the * object_list and object_tree_root and decrement its use_count. */ static void __delete_object(struct kmemleak_object *object) { unsigned long flags; write_lock_irqsave(&kmemleak_lock, flags); prio_tree_remove(&object_tree_root, &object->tree_node); list_del_rcu(&object->object_list); write_unlock_irqrestore(&kmemleak_lock, flags); WARN_ON(!(object->flags & OBJECT_ALLOCATED)); WARN_ON(atomic_read(&object->use_count) < 2); /* * Locking here also ensures that the corresponding memory block * cannot be freed when it is being scanned. */ spin_lock_irqsave(&object->lock, flags); object->flags &= ~OBJECT_ALLOCATED; spin_unlock_irqrestore(&object->lock, flags); put_object(object); } /* * Look up the metadata (struct kmemleak_object) corresponding to ptr and * delete it. */ static void delete_object_full(unsigned long ptr) { struct kmemleak_object *object; object = find_and_get_object(ptr, 0); if (!object) { #ifdef DEBUG kmemleak_warn("Freeing unknown object at 0x%08lx\n", ptr); #endif return; } __delete_object(object); put_object(object); } /* * Look up the metadata (struct kmemleak_object) corresponding to ptr and * delete it. If the memory block is partially freed, the function may create * additional metadata for the remaining parts of the block. */ static void delete_object_part(unsigned long ptr, size_t size) { struct kmemleak_object *object; unsigned long start, end; object = find_and_get_object(ptr, 1); if (!object) { #ifdef DEBUG kmemleak_warn("Partially freeing unknown object at 0x%08lx " "(size %zu)\n", ptr, size); #endif return; } __delete_object(object); /* * Create one or two objects that may result from the memory block * split. Note that partial freeing is only done by free_bootmem() and * this happens before kmemleak_init() is called. The path below is * only executed during early log recording in kmemleak_init(), so * GFP_KERNEL is enough. */ start = object->pointer; end = object->pointer + object->size; if (ptr > start) create_object(start, ptr - start, object->min_count, GFP_KERNEL); if (ptr + size < end) create_object(ptr + size, end - ptr - size, object->min_count, GFP_KERNEL); put_object(object); } static void __paint_it(struct kmemleak_object *object, int color) { object->min_count = color; if (color == KMEMLEAK_BLACK) object->flags |= OBJECT_NO_SCAN; } static void paint_it(struct kmemleak_object *object, int color) { unsigned long flags; spin_lock_irqsave(&object->lock, flags); __paint_it(object, color); spin_unlock_irqrestore(&object->lock, flags); } static void paint_ptr(unsigned long ptr, int color) { struct kmemleak_object *object; object = find_and_get_object(ptr, 0); if (!object) { kmemleak_warn("Trying to color unknown object " "at 0x%08lx as %s\n", ptr, (color == KMEMLEAK_GREY) ? "Grey" : (color == KMEMLEAK_BLACK) ? "Black" : "Unknown"); return; } paint_it(object, color); put_object(object); } /* * Mark an object permanently as gray-colored so that it can no longer be * reported as a leak. This is used in general to mark a false positive. */ static void make_gray_object(unsigned long ptr) { paint_ptr(ptr, KMEMLEAK_GREY); } /* * Mark the object as black-colored so that it is ignored from scans and * reporting. */ static void make_black_object(unsigned long ptr) { paint_ptr(ptr, KMEMLEAK_BLACK); } /* * Add a scanning area to the object. If at least one such area is added, * kmemleak will only scan these ranges rather than the whole memory block. */ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp) { unsigned long flags; struct kmemleak_object *object; struct kmemleak_scan_area *area; object = find_and_get_object(ptr, 1); if (!object) { kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n", ptr); return; } area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp)); if (!area) { pr_warning("Cannot allocate a scan area\n"); goto out; } spin_lock_irqsave(&object->lock, flags); if (ptr + size > object->pointer + object->size) { kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr); dump_object_info(object); kmem_cache_free(scan_area_cache, area); goto out_unlock; } INIT_HLIST_NODE(&area->node); area->start = ptr; area->size = size; hlist_add_head(&area->node, &object->area_list); out_unlock: spin_unlock_irqrestore(&object->lock, flags); out: put_object(object); } /* * Set the OBJECT_NO_SCAN flag for the object corresponding to the give * pointer. Such object will not be scanned by kmemleak but references to it * are searched. */ static void object_no_scan(unsigned long ptr) { unsigned long flags; struct kmemleak_object *object; object = find_and_get_object(ptr, 0); if (!object) { kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr); return; } spin_lock_irqsave(&object->lock, flags); object->flags |= OBJECT_NO_SCAN; spin_unlock_irqrestore(&object->lock, flags); put_object(object); } /* * Log an early kmemleak_* call to the early_log buffer. These calls will be * processed later once kmemleak is fully initialized. */ static void __init log_early(int op_type, const void *ptr, size_t size, int min_count) { unsigned long flags; struct early_log *log; if (atomic_read(&kmemleak_error)) { /* kmemleak stopped recording, just count the requests */ crt_early_log++; return; } if (crt_early_log >= ARRAY_SIZE(early_log)) { kmemleak_disable(); return; } /* * There is no need for locking since the kernel is still in UP mode * at this stage. Disabling the IRQs is enough. */ local_irq_save(flags); log = &early_log[crt_early_log]; log->op_type = op_type; log->ptr = ptr; log->size = size; log->min_count = min_count; log->trace_len = __save_stack_trace(log->trace); crt_early_log++; local_irq_restore(flags); } /* * Log an early allocated block and populate the stack trace. */ static void early_alloc(struct early_log *log) { struct kmemleak_object *object; unsigned long flags; int i; if (!atomic_read(&kmemleak_enabled) || !log->ptr || IS_ERR(log->ptr)) return; /* * RCU locking needed to ensure object is not freed via put_object(). */ rcu_read_lock(); object = create_object((unsigned long)log->ptr, log->size, log->min_count, GFP_ATOMIC); if (!object) goto out; spin_lock_irqsave(&object->lock, flags); for (i = 0; i < log->trace_len; i++) object->trace[i] = log->trace[i]; object->trace_len = log->trace_len; spin_unlock_irqrestore(&object->lock, flags); out: rcu_read_unlock(); } /* * Log an early allocated block and populate the stack trace. */ static void early_alloc_percpu(struct early_log *log) { unsigned int cpu; const void __percpu *ptr = log->ptr; for_each_possible_cpu(cpu) { log->ptr = per_cpu_ptr(ptr, cpu); early_alloc(log); } } /** * kmemleak_alloc - register a newly allocated object * @ptr: pointer to beginning of the object * @size: size of the object * @min_count: minimum number of references to this object. If during memory * scanning a number of references less than @min_count is found, * the object is reported as a memory leak. If @min_count is 0, * the object is never reported as a leak. If @min_count is -1, * the object is ignored (not scanned and not reported as a leak) * @gfp: kmalloc() flags used for kmemleak internal memory allocations * * This function is called from the kernel allocators when a new object * (memory block) is allocated (kmem_cache_alloc, kmalloc, vmalloc etc.). */ void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count, gfp_t gfp) { pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count); if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) create_object((unsigned long)ptr, size, min_count, gfp); else if (atomic_read(&kmemleak_early_log)) log_early(KMEMLEAK_ALLOC, ptr, size, min_count); } EXPORT_SYMBOL_GPL(kmemleak_alloc); /** * kmemleak_alloc_percpu - register a newly allocated __percpu object * @ptr: __percpu pointer to beginning of the object * @size: size of the object * * This function is called from the kernel percpu allocator when a new object * (memory block) is allocated (alloc_percpu). It assumes GFP_KERNEL * allocation. */ void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size) { unsigned int cpu; pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size); /* * Percpu allocations are only scanned and not reported as leaks * (min_count is set to 0). */ if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) for_each_possible_cpu(cpu) create_object((unsigned long)per_cpu_ptr(ptr, cpu), size, 0, GFP_KERNEL); else if (atomic_read(&kmemleak_early_log)) log_early(KMEMLEAK_ALLOC_PERCPU, ptr, size, 0); } EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu); /** * kmemleak_free - unregister a previously registered object * @ptr: pointer to beginning of the object * * This function is called from the kernel allocators when an object (memory * block) is freed (kmem_cache_free, kfree, vfree etc.). */ void __ref kmemleak_free(const void *ptr) { pr_debug("%s(0x%p)\n", __func__, ptr); if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) delete_object_full((unsigned long)ptr); else if (atomic_read(&kmemleak_early_log)) log_early(KMEMLEAK_FREE, ptr, 0, 0); } EXPORT_SYMBOL_GPL(kmemleak_free); /** * kmemleak_free_part - partially unregister a previously registered object * @ptr: pointer to the beginning or inside the object. This also * represents the start of the range to be freed * @size: size to be unregistered * * This function is called when only a part of a memory block is freed * (usually from the bootmem allocator). */ void __ref kmemleak_free_part(const void *ptr, size_t size) { pr_debug("%s(0x%p)\n", __func__, ptr); if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) delete_object_part((unsigned long)ptr, size); else if (atomic_read(&kmemleak_early_log)) log_early(KMEMLEAK_FREE_PART, ptr, size, 0); } EXPORT_SYMBOL_GPL(kmemleak_free_part); /** * kmemleak_free_percpu - unregister a previously registered __percpu object * @ptr: __percpu pointer to beginning of the object * * This function is called from the kernel percpu allocator when an object * (memory block) is freed (free_percpu). */ void __ref kmemleak_free_percpu(const void __percpu *ptr) { unsigned int cpu; pr_debug("%s(0x%p)\n", __func__, ptr); if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) for_each_possible_cpu(cpu) delete_object_full((unsigned long)per_cpu_ptr(ptr, cpu)); else if (atomic_read(&kmemleak_early_log)) log_early(KMEMLEAK_FREE_PERCPU, ptr, 0, 0); } EXPORT_SYMBOL_GPL(kmemleak_free_percpu); /** * kmemleak_not_leak - mark an allocated object as false positive * @ptr: pointer to beginning of the object * * Calling this function on an object will cause the memory block to no longer * be reported as leak and always be scanned. */ void __ref kmemleak_not_leak(const void *ptr) { pr_debug("%s(0x%p)\n", __func__, ptr); if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) make_gray_object((unsigned long)ptr); else if (atomic_read(&kmemleak_early_log)) log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0); } EXPORT_SYMBOL(kmemleak_not_leak); /** * kmemleak_ignore - ignore an allocated object * @ptr: pointer to beginning of the object * * Calling this function on an object will cause the memory block to be * ignored (not scanned and not reported as a leak). This is usually done when * it is known that the corresponding block is not a leak and does not contain * any references to other allocated memory blocks. */ void __ref kmemleak_ignore(const void *ptr) { pr_debug("%s(0x%p)\n", __func__, ptr); if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) make_black_object((unsigned long)ptr); else if (atomic_read(&kmemleak_early_log)) log_early(KMEMLEAK_IGNORE, ptr, 0, 0); } EXPORT_SYMBOL(kmemleak_ignore); /** * kmemleak_scan_area - limit the range to be scanned in an allocated object * @ptr: pointer to beginning or inside the object. This also * represents the start of the scan area * @size: size of the scan area * @gfp: kmalloc() flags used for kmemleak internal memory allocations * * This function is used when it is known that only certain parts of an object * contain references to other objects. Kmemleak will only scan these areas * reducing the number false negatives. */ void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) { pr_debug("%s(0x%p)\n", __func__, ptr); if (atomic_read(&kmemleak_enabled) && ptr && size && !IS_ERR(ptr)) add_scan_area((unsigned long)ptr, size, gfp); else if (atomic_read(&kmemleak_early_log)) log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0); } EXPORT_SYMBOL(kmemleak_scan_area); /** * kmemleak_no_scan - do not scan an allocated object * @ptr: pointer to beginning of the object * * This function notifies kmemleak not to scan the given memory block. Useful * in situations where it is known that the given object does not contain any * references to other objects. Kmemleak will not scan such objects reducing * the number of false negatives. */ void __ref kmemleak_no_scan(const void *ptr) { pr_debug("%s(0x%p)\n", __func__, ptr); if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) object_no_scan((unsigned long)ptr); else if (atomic_read(&kmemleak_early_log)) log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0); } EXPORT_SYMBOL(kmemleak_no_scan); /* * Update an object's checksum and return true if it was modified. */ static bool update_checksum(struct kmemleak_object *object) { u32 old_csum = object->checksum; if (!kmemcheck_is_obj_initialized(object->pointer, object->size)) return false; object->checksum = crc32(0, (void *)object->pointer, object->size); return object->checksum != old_csum; } /* * Memory scanning is a long process and it needs to be interruptable. This * function checks whether such interrupt condition occurred. */ static int scan_should_stop(void) { if (!atomic_read(&kmemleak_enabled)) return 1; /* * This function may be called from either process or kthread context, * hence the need to check for both stop conditions. */ if (current->mm) return signal_pending(current); else return kthread_should_stop(); return 0; } /* * Scan a memory block (exclusive range) for valid pointers and add those * found to the gray list. */ static void scan_block(void *_start, void *_end, struct kmemleak_object *scanned, int allow_resched) { unsigned long *ptr; unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER); unsigned long *end = _end - (BYTES_PER_POINTER - 1); for (ptr = start; ptr < end; ptr++) { struct kmemleak_object *object; unsigned long flags; unsigned long pointer; if (allow_resched) cond_resched(); if (scan_should_stop()) break; /* don't scan uninitialized memory */ if (!kmemcheck_is_obj_initialized((unsigned long)ptr, BYTES_PER_POINTER)) continue; pointer = *ptr; object = find_and_get_object(pointer, 1); if (!object) continue; if (object == scanned) { /* self referenced, ignore */ put_object(object); continue; } /* * Avoid the lockdep recursive warning on object->lock being * previously acquired in scan_object(). These locks are * enclosed by scan_mutex. */ spin_lock_irqsave_nested(&object->lock, flags, SINGLE_DEPTH_NESTING); if (!color_white(object)) { /* non-orphan, ignored or new */ spin_unlock_irqrestore(&object->lock, flags); put_object(object); continue; } /* * Increase the object's reference count (number of pointers * to the memory block). If this count reaches the required * minimum, the object's color will become gray and it will be * added to the gray_list. */ object->count++; if (color_gray(object)) { list_add_tail(&object->gray_list, &gray_list); spin_unlock_irqrestore(&object->lock, flags); continue; } spin_unlock_irqrestore(&object->lock, flags); put_object(object); } } /* * Scan a memory block corresponding to a kmemleak_object. A condition is * that object->use_count >= 1. */ static void scan_object(struct kmemleak_object *object) { struct kmemleak_scan_area *area; struct hlist_node *elem; unsigned long flags; /* * Once the object->lock is acquired, the corresponding memory block * cannot be freed (the same lock is acquired in delete_object). */ spin_lock_irqsave(&object->lock, flags); if (object->flags & OBJECT_NO_SCAN) goto out; if (!(object->flags & OBJECT_ALLOCATED)) /* already freed object */ goto out; if (hlist_empty(&object->area_list)) { void *start = (void *)object->pointer; void *end = (void *)(object->pointer + object->size); while (start < end && (object->flags & OBJECT_ALLOCATED) && !(object->flags & OBJECT_NO_SCAN)) { scan_block(start, min(start + MAX_SCAN_SIZE, end), object, 0); start += MAX_SCAN_SIZE; spin_unlock_irqrestore(&object->lock, flags); cond_resched(); spin_lock_irqsave(&object->lock, flags); } } else hlist_for_each_entry(area, elem, &object->area_list, node) scan_block((void *)area->start, (void *)(area->start + area->size), object, 0); out: spin_unlock_irqrestore(&object->lock, flags); } /* * Scan the objects already referenced (gray objects). More objects will be * referenced and, if there are no memory leaks, all the objects are scanned. */ static void scan_gray_list(void) { struct kmemleak_object *object, *tmp; /* * The list traversal is safe for both tail additions and removals * from inside the loop. The kmemleak objects cannot be freed from * outside the loop because their use_count was incremented. */ object = list_entry(gray_list.next, typeof(*object), gray_list); while (&object->gray_list != &gray_list) { cond_resched(); /* may add new objects to the list */ if (!scan_should_stop()) scan_object(object); tmp = list_entry(object->gray_list.next, typeof(*object), gray_list); /* remove the object from the list and release it */ list_del(&object->gray_list); put_object(object); object = tmp; } WARN_ON(!list_empty(&gray_list)); } /* * Scan data sections and all the referenced memory blocks allocated via the * kernel's standard allocators. This function must be called with the * scan_mutex held. */ static void kmemleak_scan(void) { unsigned long flags; struct kmemleak_object *object; int i; int new_leaks = 0; jiffies_last_scan = jiffies; /* prepare the kmemleak_object's */ rcu_read_lock(); list_for_each_entry_rcu(object, &object_list, object_list) { spin_lock_irqsave(&object->lock, flags); #ifdef DEBUG /* * With a few exceptions there should be a maximum of * 1 reference to any object at this point. */ if (atomic_read(&object->use_count) > 1) { pr_debug("object->use_count = %d\n", atomic_read(&object->use_count)); dump_object_info(object); } #endif /* reset the reference count (whiten the object) */ object->count = 0; if (color_gray(object) && get_object(object)) list_add_tail(&object->gray_list, &gray_list); spin_unlock_irqrestore(&object->lock, flags); } rcu_read_unlock(); /* data/bss scanning */ scan_block(_sdata, _edata, NULL, 1); scan_block(__bss_start, __bss_stop, NULL, 1); #ifdef CONFIG_SMP /* per-cpu sections scanning */ for_each_possible_cpu(i) scan_block(__per_cpu_start + per_cpu_offset(i), __per_cpu_end + per_cpu_offset(i), NULL, 1); #endif /* * Struct page scanning for each node. */ lock_memory_hotplug(); for_each_online_node(i) { pg_data_t *pgdat = NODE_DATA(i); unsigned long start_pfn = pgdat->node_start_pfn; unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages; unsigned long pfn; for (pfn = start_pfn; pfn < end_pfn; pfn++) { struct page *page; if (!pfn_valid(pfn)) continue; page = pfn_to_page(pfn); /* only scan if page is in use */ if (page_count(page) == 0) continue; scan_block(page, page + 1, NULL, 1); } } unlock_memory_hotplug(); /* * Scanning the task stacks (may introduce false negatives). */ if (kmemleak_stack_scan) { struct task_struct *p, *g; read_lock(&tasklist_lock); do_each_thread(g, p) { scan_block(task_stack_page(p), task_stack_page(p) + THREAD_SIZE, NULL, 0); } while_each_thread(g, p); read_unlock(&tasklist_lock); } /* * Scan the objects already referenced from the sections scanned * above. */ scan_gray_list(); /* * Check for new or unreferenced objects modified since the previous * scan and color them gray until the next scan. */ rcu_read_lock(); list_for_each_entry_rcu(object, &object_list, object_list) { spin_lock_irqsave(&object->lock, flags); if (color_white(object) && (object->flags & OBJECT_ALLOCATED) && update_checksum(object) && get_object(object)) { /* color it gray temporarily */ object->count = object->min_count; list_add_tail(&object->gray_list, &gray_list); } spin_unlock_irqrestore(&object->lock, flags); } rcu_read_unlock(); /* * Re-scan the gray list for modified unreferenced objects. */ scan_gray_list(); /* * If scanning was stopped do not report any new unreferenced objects. */ if (scan_should_stop()) return; /* * Scanning result reporting. */ rcu_read_lock(); list_for_each_entry_rcu(object, &object_list, object_list) { spin_lock_irqsave(&object->lock, flags); if (unreferenced_object(object) && !(object->flags & OBJECT_REPORTED)) { object->flags |= OBJECT_REPORTED; new_leaks++; } spin_unlock_irqrestore(&object->lock, flags); } rcu_read_unlock(); if (new_leaks) pr_info("%d new suspected memory leaks (see " "/sys/kernel/debug/kmemleak)\n", new_leaks); } /* * Thread function performing automatic memory scanning. Unreferenced objects * at the end of a memory scan are reported but only the first time. */ static int kmemleak_scan_thread(void *arg) { static int first_run = 1; pr_info("Automatic memory scanning thread started\n"); set_user_nice(current, 10); /* * Wait before the first scan to allow the system to fully initialize. */ if (first_run) { first_run = 0; ssleep(SECS_FIRST_SCAN); } while (!kthread_should_stop()) { signed long timeout = jiffies_scan_wait; mutex_lock(&scan_mutex); kmemleak_scan(); mutex_unlock(&scan_mutex); /* wait before the next scan */ while (timeout && !kthread_should_stop()) timeout = schedule_timeout_interruptible(timeout); } pr_info("Automatic memory scanning thread ended\n"); return 0; } /* * Start the automatic memory scanning thread. This function must be called * with the scan_mutex held. */ static void start_scan_thread(void) { if (scan_thread) return; scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak"); if (IS_ERR(scan_thread)) { pr_warning("Failed to create the scan thread\n"); scan_thread = NULL; } } /* * Stop the automatic memory scanning thread. This function must be called * with the scan_mutex held. */ static void stop_scan_thread(void) { if (scan_thread) { kthread_stop(scan_thread); scan_thread = NULL; } } /* * Iterate over the object_list and return the first valid object at or after * the required position with its use_count incremented. The function triggers * a memory scanning when the pos argument points to the first position. */ static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos) { struct kmemleak_object *object; loff_t n = *pos; int err; err = mutex_lock_interruptible(&scan_mutex); if (err < 0) return ERR_PTR(err); rcu_read_lock(); list_for_each_entry_rcu(object, &object_list, object_list) { if (n-- > 0) continue; if (get_object(object)) goto out; } object = NULL; out: return object; } /* * Return the next object in the object_list. The function decrements the * use_count of the previous object and increases that of the next one. */ static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct kmemleak_object *prev_obj = v; struct kmemleak_object *next_obj = NULL; struct list_head *n = &prev_obj->object_list; ++(*pos); list_for_each_continue_rcu(n, &object_list) { struct kmemleak_object *obj = list_entry(n, struct kmemleak_object, object_list); if (get_object(obj)) { next_obj = obj; break; } } put_object(prev_obj); return next_obj; } /* * Decrement the use_count of the last object required, if any. */ static void kmemleak_seq_stop(struct seq_file *seq, void *v) { if (!IS_ERR(v)) { /* * kmemleak_seq_start may return ERR_PTR if the scan_mutex * waiting was interrupted, so only release it if !IS_ERR. */ rcu_read_unlock(); mutex_unlock(&scan_mutex); if (v) put_object(v); } } /* * Print the information for an unreferenced object to the seq file. */ static int kmemleak_seq_show(struct seq_file *seq, void *v) { struct kmemleak_object *object = v; unsigned long flags; spin_lock_irqsave(&object->lock, flags); if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object)) print_unreferenced(seq, object); spin_unlock_irqrestore(&object->lock, flags); return 0; } static const struct seq_operations kmemleak_seq_ops = { .start = kmemleak_seq_start, .next = kmemleak_seq_next, .stop = kmemleak_seq_stop, .show = kmemleak_seq_show, }; static int kmemleak_open(struct inode *inode, struct file *file) { return seq_open(file, &kmemleak_seq_ops); } static int kmemleak_release(struct inode *inode, struct file *file) { return seq_release(inode, file); } static int dump_str_object_info(const char *str) { unsigned long flags; struct kmemleak_object *object; unsigned long addr; addr= simple_strtoul(str, NULL, 0); object = find_and_get_object(addr, 0); if (!object) { pr_info("Unknown object at 0x%08lx\n", addr); return -EINVAL; } spin_lock_irqsave(&object->lock, flags); dump_object_info(object); spin_unlock_irqrestore(&object->lock, flags); put_object(object); return 0; } /* * We use grey instead of black to ensure we can do future scans on the same * objects. If we did not do future scans these black objects could * potentially contain references to newly allocated objects in the future and * we'd end up with false positives. */ static void kmemleak_clear(void) { struct kmemleak_object *object; unsigned long flags; rcu_read_lock(); list_for_each_entry_rcu(object, &object_list, object_list) { spin_lock_irqsave(&object->lock, flags); if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object)) __paint_it(object, KMEMLEAK_GREY); spin_unlock_irqrestore(&object->lock, flags); } rcu_read_unlock(); } /* * File write operation to configure kmemleak at run-time. The following * commands can be written to the /sys/kernel/debug/kmemleak file: * off - disable kmemleak (irreversible) * stack=on - enable the task stacks scanning * stack=off - disable the tasks stacks scanning * scan=on - start the automatic memory scanning thread * scan=off - stop the automatic memory scanning thread * scan=... - set the automatic memory scanning period in seconds (0 to * disable it) * scan - trigger a memory scan * clear - mark all current reported unreferenced kmemleak objects as * grey to ignore printing them * dump=... - dump information about the object found at the given address */ static ssize_t kmemleak_write(struct file *file, const char __user *user_buf, size_t size, loff_t *ppos) { char buf[64]; int buf_size; int ret; if (!atomic_read(&kmemleak_enabled)) return -EBUSY; buf_size = min(size, (sizeof(buf) - 1)); if (strncpy_from_user(buf, user_buf, buf_size) < 0) return -EFAULT; buf[buf_size] = 0; ret = mutex_lock_interruptible(&scan_mutex); if (ret < 0) return ret; if (strncmp(buf, "off", 3) == 0) kmemleak_disable(); else if (strncmp(buf, "stack=on", 8) == 0) kmemleak_stack_scan = 1; else if (strncmp(buf, "stack=off", 9) == 0) kmemleak_stack_scan = 0; else if (strncmp(buf, "scan=on", 7) == 0) start_scan_thread(); else if (strncmp(buf, "scan=off", 8) == 0) stop_scan_thread(); else if (strncmp(buf, "scan=", 5) == 0) { unsigned long secs; ret = strict_strtoul(buf + 5, 0, &secs); if (ret < 0) goto out; stop_scan_thread(); if (secs) { jiffies_scan_wait = msecs_to_jiffies(secs * 1000); start_scan_thread(); } } else if (strncmp(buf, "scan", 4) == 0) kmemleak_scan(); else if (strncmp(buf, "clear", 5) == 0) kmemleak_clear(); else if (strncmp(buf, "dump=", 5) == 0) ret = dump_str_object_info(buf + 5); else ret = -EINVAL; out: mutex_unlock(&scan_mutex); if (ret < 0) return ret; /* ignore the rest of the buffer, only one command at a time */ *ppos += size; return size; } static const struct file_operations kmemleak_fops = { .owner = THIS_MODULE, .open = kmemleak_open, .read = seq_read, .write = kmemleak_write, .llseek = seq_lseek, .release = kmemleak_release, }; /* * Stop the memory scanning thread and free the kmemleak internal objects if * no previous scan thread (otherwise, kmemleak may still have some useful * information on memory leaks). */ static void kmemleak_do_cleanup(struct work_struct *work) { struct kmemleak_object *object; bool cleanup = scan_thread == NULL; mutex_lock(&scan_mutex); stop_scan_thread(); if (cleanup) { rcu_read_lock(); list_for_each_entry_rcu(object, &object_list, object_list) delete_object_full(object->pointer); rcu_read_unlock(); } mutex_unlock(&scan_mutex); } static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup); /* * Disable kmemleak. No memory allocation/freeing will be traced once this * function is called. Disabling kmemleak is an irreversible operation. */ static void kmemleak_disable(void) { /* atomically check whether it was already invoked */ if (atomic_cmpxchg(&kmemleak_error, 0, 1)) return; /* stop any memory operation tracing */ atomic_set(&kmemleak_enabled, 0); /* check whether it is too early for a kernel thread */ if (atomic_read(&kmemleak_initialized)) schedule_work(&cleanup_work); pr_info("Kernel memory leak detector disabled\n"); } /* * Allow boot-time kmemleak disabling (enabled by default). */ static int kmemleak_boot_config(char *str) { if (!str) return -EINVAL; if (strcmp(str, "off") == 0) kmemleak_disable(); else if (strcmp(str, "on") == 0) kmemleak_skip_disable = 1; else return -EINVAL; return 0; } early_param("kmemleak", kmemleak_boot_config); static void __init print_log_trace(struct early_log *log) { struct stack_trace trace; trace.nr_entries = log->trace_len; trace.entries = log->trace; pr_notice("Early log backtrace:\n"); print_stack_trace(&trace, 2); } /* * Kmemleak initialization. */ void __init kmemleak_init(void) { int i; unsigned long flags; #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF if (!kmemleak_skip_disable) { atomic_set(&kmemleak_early_log, 0); kmemleak_disable(); return; } #endif jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE); jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000); object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE); scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE); INIT_PRIO_TREE_ROOT(&object_tree_root); if (crt_early_log >= ARRAY_SIZE(early_log)) pr_warning("Early log buffer exceeded (%d), please increase " "DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n", crt_early_log); /* the kernel is still in UP mode, so disabling the IRQs is enough */ local_irq_save(flags); atomic_set(&kmemleak_early_log, 0); if (atomic_read(&kmemleak_error)) { local_irq_restore(flags); return; } else atomic_set(&kmemleak_enabled, 1); local_irq_restore(flags); /* * This is the point where tracking allocations is safe. Automatic * scanning is started during the late initcall. Add the early logged * callbacks to the kmemleak infrastructure. */ for (i = 0; i < crt_early_log; i++) { struct early_log *log = &early_log[i]; switch (log->op_type) { case KMEMLEAK_ALLOC: early_alloc(log); break; case KMEMLEAK_ALLOC_PERCPU: early_alloc_percpu(log); break; case KMEMLEAK_FREE: kmemleak_free(log->ptr); break; case KMEMLEAK_FREE_PART: kmemleak_free_part(log->ptr, log->size); break; case KMEMLEAK_FREE_PERCPU: kmemleak_free_percpu(log->ptr); break; case KMEMLEAK_NOT_LEAK: kmemleak_not_leak(log->ptr); break; case KMEMLEAK_IGNORE: kmemleak_ignore(log->ptr); break; case KMEMLEAK_SCAN_AREA: kmemleak_scan_area(log->ptr, log->size, GFP_KERNEL); break; case KMEMLEAK_NO_SCAN: kmemleak_no_scan(log->ptr); break; default: kmemleak_warn("Unknown early log operation: %d\n", log->op_type); } if (atomic_read(&kmemleak_warning)) { print_log_trace(log); atomic_set(&kmemleak_warning, 0); } } } /* * Late initialization function. */ static int __init kmemleak_late_init(void) { struct dentry *dentry; atomic_set(&kmemleak_initialized, 1); if (atomic_read(&kmemleak_error)) { /* * Some error occurred and kmemleak was disabled. There is a * small chance that kmemleak_disable() was called immediately * after setting kmemleak_initialized and we may end up with * two clean-up threads but serialized by scan_mutex. */ schedule_work(&cleanup_work); return -ENOMEM; } dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL, &kmemleak_fops); if (!dentry) pr_warning("Failed to create the debugfs kmemleak file\n"); mutex_lock(&scan_mutex); start_scan_thread(); mutex_unlock(&scan_mutex); pr_info("Kernel memory leak detector initialized\n"); return 0; } late_initcall(kmemleak_late_init);
gpl-2.0
StarKissed/starkissed-kernel-ardbeg
drivers/char/nwflash.c
4257
13461
/* * Flash memory interface rev.5 driver for the Intel * Flash chips used on the NetWinder. * * 20/08/2000 RMK use __ioremap to map flash into virtual memory * make a few more places use "volatile" * 22/05/2001 RMK - Lock read against write * - merge printk level changes (with mods) from Alan Cox. * - use *ppos as the file position, not file->f_pos. * - fix check for out of range pos and r/w size * * Please note that we are tampering with the only flash chip in the * machine, which contains the bootup code. We therefore have the * power to convert these machines into doorstops... */ #include <linux/module.h> #include <linux/types.h> #include <linux/fs.h> #include <linux/errno.h> #include <linux/mm.h> #include <linux/delay.h> #include <linux/proc_fs.h> #include <linux/miscdevice.h> #include <linux/spinlock.h> #include <linux/rwsem.h> #include <linux/init.h> #include <linux/mutex.h> #include <linux/jiffies.h> #include <asm/hardware/dec21285.h> #include <asm/io.h> #include <asm/mach-types.h> #include <asm/uaccess.h> /*****************************************************************************/ #include <asm/nwflash.h> #define NWFLASH_VERSION "6.4" static DEFINE_MUTEX(flash_mutex); static void kick_open(void); static int get_flash_id(void); static int erase_block(int nBlock); static int write_block(unsigned long p, const char __user *buf, int count); #define KFLASH_SIZE 1024*1024 //1 Meg #define KFLASH_SIZE4 4*1024*1024 //4 Meg #define KFLASH_ID 0x89A6 //Intel flash #define KFLASH_ID4 0xB0D4 //Intel flash 4Meg static bool flashdebug; //if set - we will display progress msgs static int gbWriteEnable; static int gbWriteBase64Enable; static volatile unsigned char *FLASH_BASE; static int gbFlashSize = KFLASH_SIZE; static DEFINE_MUTEX(nwflash_mutex); static int get_flash_id(void) { volatile unsigned int c1, c2; /* * try to get flash chip ID */ kick_open(); c2 = inb(0x80); *(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0x90; udelay(15); c1 = *(volatile unsigned char *) FLASH_BASE; c2 = inb(0x80); /* * on 4 Meg flash the second byte is actually at offset 2... */ if (c1 == 0xB0) c2 = *(volatile unsigned char *) (FLASH_BASE + 2); else c2 = *(volatile unsigned char *) (FLASH_BASE + 1); c2 += (c1 << 8); /* * set it back to read mode */ *(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0xFF; if (c2 == KFLASH_ID4) gbFlashSize = KFLASH_SIZE4; return c2; } static long flash_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) { mutex_lock(&flash_mutex); switch (cmd) { case CMD_WRITE_DISABLE: gbWriteBase64Enable = 0; gbWriteEnable = 0; break; case CMD_WRITE_ENABLE: gbWriteEnable = 1; break; case CMD_WRITE_BASE64K_ENABLE: gbWriteBase64Enable = 1; break; default: gbWriteBase64Enable = 0; gbWriteEnable = 0; mutex_unlock(&flash_mutex); return -EINVAL; } mutex_unlock(&flash_mutex); return 0; } static ssize_t flash_read(struct file *file, char __user *buf, size_t size, loff_t *ppos) { ssize_t ret; if (flashdebug) printk(KERN_DEBUG "flash_read: flash_read: offset=0x%llx, " "buffer=%p, count=0x%zx.\n", *ppos, buf, size); /* * We now lock against reads and writes. --rmk */ if (mutex_lock_interruptible(&nwflash_mutex)) return -ERESTARTSYS; ret = simple_read_from_buffer(buf, size, ppos, (void *)FLASH_BASE, gbFlashSize); mutex_unlock(&nwflash_mutex); return ret; } static ssize_t flash_write(struct file *file, const char __user *buf, size_t size, loff_t * ppos) { unsigned long p = *ppos; unsigned int count = size; int written; int nBlock, temp, rc; int i, j; if (flashdebug) printk("flash_write: offset=0x%lX, buffer=0x%p, count=0x%X.\n", p, buf, count); if (!gbWriteEnable) return -EINVAL; if (p < 64 * 1024 && (!gbWriteBase64Enable)) return -EINVAL; /* * check for out of range pos or count */ if (p >= gbFlashSize) return count ? -ENXIO : 0; if (count > gbFlashSize - p) count = gbFlashSize - p; if (!access_ok(VERIFY_READ, buf, count)) return -EFAULT; /* * We now lock against reads and writes. --rmk */ if (mutex_lock_interruptible(&nwflash_mutex)) return -ERESTARTSYS; written = 0; nBlock = (int) p >> 16; //block # of 64K bytes /* * # of 64K blocks to erase and write */ temp = ((int) (p + count) >> 16) - nBlock + 1; /* * write ends at exactly 64k boundary? */ if (((int) (p + count) & 0xFFFF) == 0) temp -= 1; if (flashdebug) printk(KERN_DEBUG "flash_write: writing %d block(s) " "starting at %d.\n", temp, nBlock); for (; temp; temp--, nBlock++) { if (flashdebug) printk(KERN_DEBUG "flash_write: erasing block %d.\n", nBlock); /* * first we have to erase the block(s), where we will write... */ i = 0; j = 0; RetryBlock: do { rc = erase_block(nBlock); i++; } while (rc && i < 10); if (rc) { printk(KERN_ERR "flash_write: erase error %x\n", rc); break; } if (flashdebug) printk(KERN_DEBUG "flash_write: writing offset %lX, " "from buf %p, bytes left %X.\n", p, buf, count - written); /* * write_block will limit write to space left in this block */ rc = write_block(p, buf, count - written); j++; /* * if somehow write verify failed? Can't happen?? */ if (!rc) { /* * retry up to 10 times */ if (j < 10) goto RetryBlock; else /* * else quit with error... */ rc = -1; } if (rc < 0) { printk(KERN_ERR "flash_write: write error %X\n", rc); break; } p += rc; buf += rc; written += rc; *ppos += rc; if (flashdebug) printk(KERN_DEBUG "flash_write: written 0x%X bytes OK.\n", written); } mutex_unlock(&nwflash_mutex); return written; } /* * The memory devices use the full 32/64 bits of the offset, and so we cannot * check against negative addresses: they are ok. The return value is weird, * though, in that case (0). * * also note that seeking relative to the "end of file" isn't supported: * it has no meaning, so it returns -EINVAL. */ static loff_t flash_llseek(struct file *file, loff_t offset, int orig) { loff_t ret; mutex_lock(&flash_mutex); if (flashdebug) printk(KERN_DEBUG "flash_llseek: offset=0x%X, orig=0x%X.\n", (unsigned int) offset, orig); switch (orig) { case 0: if (offset < 0) { ret = -EINVAL; break; } if ((unsigned int) offset > gbFlashSize) { ret = -EINVAL; break; } file->f_pos = (unsigned int) offset; ret = file->f_pos; break; case 1: if ((file->f_pos + offset) > gbFlashSize) { ret = -EINVAL; break; } if ((file->f_pos + offset) < 0) { ret = -EINVAL; break; } file->f_pos += offset; ret = file->f_pos; break; default: ret = -EINVAL; } mutex_unlock(&flash_mutex); return ret; } /* * assume that main Write routine did the parameter checking... * so just go ahead and erase, what requested! */ static int erase_block(int nBlock) { volatile unsigned int c1; volatile unsigned char *pWritePtr; unsigned long timeout; int temp, temp1; /* * reset footbridge to the correct offset 0 (...0..3) */ *CSR_ROMWRITEREG = 0; /* * dummy ROM read */ c1 = *(volatile unsigned char *) (FLASH_BASE + 0x8000); kick_open(); /* * reset status if old errors */ *(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0x50; /* * erase a block... * aim at the middle of a current block... */ pWritePtr = (unsigned char *) ((unsigned int) (FLASH_BASE + 0x8000 + (nBlock << 16))); /* * dummy read */ c1 = *pWritePtr; kick_open(); /* * erase */ *(volatile unsigned char *) pWritePtr = 0x20; /* * confirm */ *(volatile unsigned char *) pWritePtr = 0xD0; /* * wait 10 ms */ msleep(10); /* * wait while erasing in process (up to 10 sec) */ timeout = jiffies + 10 * HZ; c1 = 0; while (!(c1 & 0x80) && time_before(jiffies, timeout)) { msleep(10); /* * read any address */ c1 = *(volatile unsigned char *) (pWritePtr); // printk("Flash_erase: status=%X.\n",c1); } /* * set flash for normal read access */ kick_open(); // *(volatile unsigned char*)(FLASH_BASE+0x8000) = 0xFF; *(volatile unsigned char *) pWritePtr = 0xFF; //back to normal operation /* * check if erase errors were reported */ if (c1 & 0x20) { printk(KERN_ERR "flash_erase: err at %p\n", pWritePtr); /* * reset error */ *(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0x50; return -2; } /* * just to make sure - verify if erased OK... */ msleep(10); pWritePtr = (unsigned char *) ((unsigned int) (FLASH_BASE + (nBlock << 16))); for (temp = 0; temp < 16 * 1024; temp++, pWritePtr += 4) { if ((temp1 = *(volatile unsigned int *) pWritePtr) != 0xFFFFFFFF) { printk(KERN_ERR "flash_erase: verify err at %p = %X\n", pWritePtr, temp1); return -1; } } return 0; } /* * write_block will limit number of bytes written to the space in this block */ static int write_block(unsigned long p, const char __user *buf, int count) { volatile unsigned int c1; volatile unsigned int c2; unsigned char *pWritePtr; unsigned int uAddress; unsigned int offset; unsigned long timeout; unsigned long timeout1; pWritePtr = (unsigned char *) ((unsigned int) (FLASH_BASE + p)); /* * check if write will end in this block.... */ offset = p & 0xFFFF; if (offset + count > 0x10000) count = 0x10000 - offset; /* * wait up to 30 sec for this block */ timeout = jiffies + 30 * HZ; for (offset = 0; offset < count; offset++, pWritePtr++) { uAddress = (unsigned int) pWritePtr; uAddress &= 0xFFFFFFFC; if (__get_user(c2, buf + offset)) return -EFAULT; WriteRetry: /* * dummy read */ c1 = *(volatile unsigned char *) (FLASH_BASE + 0x8000); /* * kick open the write gate */ kick_open(); /* * program footbridge to the correct offset...0..3 */ *CSR_ROMWRITEREG = (unsigned int) pWritePtr & 3; /* * write cmd */ *(volatile unsigned char *) (uAddress) = 0x40; /* * data to write */ *(volatile unsigned char *) (uAddress) = c2; /* * get status */ *(volatile unsigned char *) (FLASH_BASE + 0x10000) = 0x70; c1 = 0; /* * wait up to 1 sec for this byte */ timeout1 = jiffies + 1 * HZ; /* * while not ready... */ while (!(c1 & 0x80) && time_before(jiffies, timeout1)) c1 = *(volatile unsigned char *) (FLASH_BASE + 0x8000); /* * if timeout getting status */ if (time_after_eq(jiffies, timeout1)) { kick_open(); /* * reset err */ *(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0x50; goto WriteRetry; } /* * switch on read access, as a default flash operation mode */ kick_open(); /* * read access */ *(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0xFF; /* * if hardware reports an error writing, and not timeout - * reset the chip and retry */ if (c1 & 0x10) { kick_open(); /* * reset err */ *(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0x50; /* * before timeout? */ if (time_before(jiffies, timeout)) { if (flashdebug) printk(KERN_DEBUG "write_block: Retrying write at 0x%X)n", pWritePtr - FLASH_BASE); /* * wait couple ms */ msleep(10); goto WriteRetry; } else { printk(KERN_ERR "write_block: timeout at 0x%X\n", pWritePtr - FLASH_BASE); /* * return error -2 */ return -2; } } } msleep(10); pWritePtr = (unsigned char *) ((unsigned int) (FLASH_BASE + p)); for (offset = 0; offset < count; offset++) { char c, c1; if (__get_user(c, buf)) return -EFAULT; buf++; if ((c1 = *pWritePtr++) != c) { printk(KERN_ERR "write_block: verify error at 0x%X (%02X!=%02X)\n", pWritePtr - FLASH_BASE, c1, c); return 0; } } return count; } static void kick_open(void) { unsigned long flags; /* * we want to write a bit pattern XXX1 to Xilinx to enable * the write gate, which will be open for about the next 2ms. */ raw_spin_lock_irqsave(&nw_gpio_lock, flags); nw_cpld_modify(CPLD_FLASH_WR_ENABLE, CPLD_FLASH_WR_ENABLE); raw_spin_unlock_irqrestore(&nw_gpio_lock, flags); /* * let the ISA bus to catch on... */ udelay(25); } static const struct file_operations flash_fops = { .owner = THIS_MODULE, .llseek = flash_llseek, .read = flash_read, .write = flash_write, .unlocked_ioctl = flash_ioctl, }; static struct miscdevice flash_miscdev = { FLASH_MINOR, "nwflash", &flash_fops }; static int __init nwflash_init(void) { int ret = -ENODEV; if (machine_is_netwinder()) { int id; FLASH_BASE = ioremap(DC21285_FLASH, KFLASH_SIZE4); if (!FLASH_BASE) goto out; id = get_flash_id(); if ((id != KFLASH_ID) && (id != KFLASH_ID4)) { ret = -ENXIO; iounmap((void *)FLASH_BASE); printk("Flash: incorrect ID 0x%04X.\n", id); goto out; } printk("Flash ROM driver v.%s, flash device ID 0x%04X, size %d Mb.\n", NWFLASH_VERSION, id, gbFlashSize / (1024 * 1024)); ret = misc_register(&flash_miscdev); if (ret < 0) { iounmap((void *)FLASH_BASE); } } out: return ret; } static void __exit nwflash_exit(void) { misc_deregister(&flash_miscdev); iounmap((void *)FLASH_BASE); } MODULE_LICENSE("GPL"); module_param(flashdebug, bool, 0644); module_init(nwflash_init); module_exit(nwflash_exit);
gpl-2.0
Superbox/D3-Linux
sound/soc/mid-x86/sst_platform.c
5025
13858
/* * sst_platform.c - Intel MID Platform driver * * Copyright (C) 2010 Intel Corp * Author: Vinod Koul <vinod.koul@intel.com> * Author: Harsha Priya <priya.harsha@intel.com> * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/slab.h> #include <linux/io.h> #include <linux/module.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include "sst_platform.h" static struct sst_device *sst; static DEFINE_MUTEX(sst_lock); int sst_register_dsp(struct sst_device *dev) { BUG_ON(!dev); if (!try_module_get(dev->dev->driver->owner)) return -ENODEV; mutex_lock(&sst_lock); if (sst) { pr_err("we already have a device %s\n", sst->name); module_put(dev->dev->driver->owner); mutex_unlock(&sst_lock); return -EEXIST; } pr_debug("registering device %s\n", dev->name); sst = dev; mutex_unlock(&sst_lock); return 0; } EXPORT_SYMBOL_GPL(sst_register_dsp); int sst_unregister_dsp(struct sst_device *dev) { BUG_ON(!dev); if (dev != sst) return -EINVAL; mutex_lock(&sst_lock); if (!sst) { mutex_unlock(&sst_lock); return -EIO; } module_put(sst->dev->driver->owner); pr_debug("unreg %s\n", sst->name); sst = NULL; mutex_unlock(&sst_lock); return 0; } EXPORT_SYMBOL_GPL(sst_unregister_dsp); static struct snd_pcm_hardware sst_platform_pcm_hw = { .info = (SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_DOUBLE | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME | SNDRV_PCM_INFO_MMAP| SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_SYNC_START), .formats = (SNDRV_PCM_FMTBIT_S16 | SNDRV_PCM_FMTBIT_U16 | SNDRV_PCM_FMTBIT_S24 | SNDRV_PCM_FMTBIT_U24 | SNDRV_PCM_FMTBIT_S32 | SNDRV_PCM_FMTBIT_U32), .rates = (SNDRV_PCM_RATE_8000| SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000), .rate_min = SST_MIN_RATE, .rate_max = SST_MAX_RATE, .channels_min = SST_MIN_CHANNEL, .channels_max = SST_MAX_CHANNEL, .buffer_bytes_max = SST_MAX_BUFFER, .period_bytes_min = SST_MIN_PERIOD_BYTES, .period_bytes_max = SST_MAX_PERIOD_BYTES, .periods_min = SST_MIN_PERIODS, .periods_max = SST_MAX_PERIODS, .fifo_size = SST_FIFO_SIZE, }; /* MFLD - MSIC */ static struct snd_soc_dai_driver sst_platform_dai[] = { { .name = "Headset-cpu-dai", .id = 0, .playback = { .channels_min = SST_STEREO, .channels_max = SST_STEREO, .rates = SNDRV_PCM_RATE_48000, .formats = SNDRV_PCM_FMTBIT_S24_LE, }, .capture = { .channels_min = 1, .channels_max = 5, .rates = SNDRV_PCM_RATE_48000, .formats = SNDRV_PCM_FMTBIT_S24_LE, }, }, { .name = "Speaker-cpu-dai", .id = 1, .playback = { .channels_min = SST_MONO, .channels_max = SST_STEREO, .rates = SNDRV_PCM_RATE_48000, .formats = SNDRV_PCM_FMTBIT_S24_LE, }, }, { .name = "Vibra1-cpu-dai", .id = 2, .playback = { .channels_min = SST_MONO, .channels_max = SST_MONO, .rates = SNDRV_PCM_RATE_48000, .formats = SNDRV_PCM_FMTBIT_S24_LE, }, }, { .name = "Vibra2-cpu-dai", .id = 3, .playback = { .channels_min = SST_MONO, .channels_max = SST_STEREO, .rates = SNDRV_PCM_RATE_48000, .formats = SNDRV_PCM_FMTBIT_S24_LE, }, }, }; /* helper functions */ static inline void sst_set_stream_status(struct sst_runtime_stream *stream, int state) { unsigned long flags; spin_lock_irqsave(&stream->status_lock, flags); stream->stream_status = state; spin_unlock_irqrestore(&stream->status_lock, flags); } static inline int sst_get_stream_status(struct sst_runtime_stream *stream) { int state; unsigned long flags; spin_lock_irqsave(&stream->status_lock, flags); state = stream->stream_status; spin_unlock_irqrestore(&stream->status_lock, flags); return state; } static void sst_fill_pcm_params(struct snd_pcm_substream *substream, struct sst_pcm_params *param) { param->codec = SST_CODEC_TYPE_PCM; param->num_chan = (u8) substream->runtime->channels; param->pcm_wd_sz = substream->runtime->sample_bits; param->reserved = 0; param->sfreq = substream->runtime->rate; param->ring_buffer_size = snd_pcm_lib_buffer_bytes(substream); param->period_count = substream->runtime->period_size; param->ring_buffer_addr = virt_to_phys(substream->dma_buffer.area); pr_debug("period_cnt = %d\n", param->period_count); pr_debug("sfreq= %d, wd_sz = %d\n", param->sfreq, param->pcm_wd_sz); } static int sst_platform_alloc_stream(struct snd_pcm_substream *substream) { struct sst_runtime_stream *stream = substream->runtime->private_data; struct sst_pcm_params param = {0}; struct sst_stream_params str_params = {0}; int ret_val; /* set codec params and inform SST driver the same */ sst_fill_pcm_params(substream, &param); substream->runtime->dma_area = substream->dma_buffer.area; str_params.sparams = param; str_params.codec = param.codec; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { str_params.ops = STREAM_OPS_PLAYBACK; str_params.device_type = substream->pcm->device + 1; pr_debug("Playbck stream,Device %d\n", substream->pcm->device); } else { str_params.ops = STREAM_OPS_CAPTURE; str_params.device_type = SND_SST_DEVICE_CAPTURE; pr_debug("Capture stream,Device %d\n", substream->pcm->device); } ret_val = stream->ops->open(&str_params); pr_debug("SST_SND_PLAY/CAPTURE ret_val = %x\n", ret_val); if (ret_val < 0) return ret_val; stream->stream_info.str_id = ret_val; pr_debug("str id : %d\n", stream->stream_info.str_id); return ret_val; } static void sst_period_elapsed(void *mad_substream) { struct snd_pcm_substream *substream = mad_substream; struct sst_runtime_stream *stream; int status; if (!substream || !substream->runtime) return; stream = substream->runtime->private_data; if (!stream) return; status = sst_get_stream_status(stream); if (status != SST_PLATFORM_RUNNING) return; snd_pcm_period_elapsed(substream); } static int sst_platform_init_stream(struct snd_pcm_substream *substream) { struct sst_runtime_stream *stream = substream->runtime->private_data; int ret_val; pr_debug("setting buffer ptr param\n"); sst_set_stream_status(stream, SST_PLATFORM_INIT); stream->stream_info.period_elapsed = sst_period_elapsed; stream->stream_info.mad_substream = substream; stream->stream_info.buffer_ptr = 0; stream->stream_info.sfreq = substream->runtime->rate; ret_val = stream->ops->device_control( SST_SND_STREAM_INIT, &stream->stream_info); if (ret_val) pr_err("control_set ret error %d\n", ret_val); return ret_val; } /* end -- helper functions */ static int sst_platform_open(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct sst_runtime_stream *stream; int ret_val; pr_debug("sst_platform_open called\n"); snd_soc_set_runtime_hwparams(substream, &sst_platform_pcm_hw); ret_val = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); if (ret_val < 0) return ret_val; stream = kzalloc(sizeof(*stream), GFP_KERNEL); if (!stream) return -ENOMEM; spin_lock_init(&stream->status_lock); /* get the sst ops */ mutex_lock(&sst_lock); if (!sst) { pr_err("no device available to run\n"); mutex_unlock(&sst_lock); kfree(stream); return -ENODEV; } if (!try_module_get(sst->dev->driver->owner)) { mutex_unlock(&sst_lock); kfree(stream); return -ENODEV; } stream->ops = sst->ops; mutex_unlock(&sst_lock); stream->stream_info.str_id = 0; sst_set_stream_status(stream, SST_PLATFORM_INIT); stream->stream_info.mad_substream = substream; /* allocate memory for SST API set */ runtime->private_data = stream; return 0; } static int sst_platform_close(struct snd_pcm_substream *substream) { struct sst_runtime_stream *stream; int ret_val = 0, str_id; pr_debug("sst_platform_close called\n"); stream = substream->runtime->private_data; str_id = stream->stream_info.str_id; if (str_id) ret_val = stream->ops->close(str_id); module_put(sst->dev->driver->owner); kfree(stream); return ret_val; } static int sst_platform_pcm_prepare(struct snd_pcm_substream *substream) { struct sst_runtime_stream *stream; int ret_val = 0, str_id; pr_debug("sst_platform_pcm_prepare called\n"); stream = substream->runtime->private_data; str_id = stream->stream_info.str_id; if (stream->stream_info.str_id) { ret_val = stream->ops->device_control( SST_SND_DROP, &str_id); return ret_val; } ret_val = sst_platform_alloc_stream(substream); if (ret_val < 0) return ret_val; snprintf(substream->pcm->id, sizeof(substream->pcm->id), "%d", stream->stream_info.str_id); ret_val = sst_platform_init_stream(substream); if (ret_val) return ret_val; substream->runtime->hw.info = SNDRV_PCM_INFO_BLOCK_TRANSFER; return ret_val; } static int sst_platform_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { int ret_val = 0, str_id; struct sst_runtime_stream *stream; int str_cmd, status; pr_debug("sst_platform_pcm_trigger called\n"); stream = substream->runtime->private_data; str_id = stream->stream_info.str_id; switch (cmd) { case SNDRV_PCM_TRIGGER_START: pr_debug("sst: Trigger Start\n"); str_cmd = SST_SND_START; status = SST_PLATFORM_RUNNING; stream->stream_info.mad_substream = substream; break; case SNDRV_PCM_TRIGGER_STOP: pr_debug("sst: in stop\n"); str_cmd = SST_SND_DROP; status = SST_PLATFORM_DROPPED; break; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: pr_debug("sst: in pause\n"); str_cmd = SST_SND_PAUSE; status = SST_PLATFORM_PAUSED; break; case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: pr_debug("sst: in pause release\n"); str_cmd = SST_SND_RESUME; status = SST_PLATFORM_RUNNING; break; default: return -EINVAL; } ret_val = stream->ops->device_control(str_cmd, &str_id); if (!ret_val) sst_set_stream_status(stream, status); return ret_val; } static snd_pcm_uframes_t sst_platform_pcm_pointer (struct snd_pcm_substream *substream) { struct sst_runtime_stream *stream; int ret_val, status; struct pcm_stream_info *str_info; stream = substream->runtime->private_data; status = sst_get_stream_status(stream); if (status == SST_PLATFORM_INIT) return 0; str_info = &stream->stream_info; ret_val = stream->ops->device_control( SST_SND_BUFFER_POINTER, str_info); if (ret_val) { pr_err("sst: error code = %d\n", ret_val); return ret_val; } return stream->stream_info.buffer_ptr; } static int sst_platform_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params)); memset(substream->runtime->dma_area, 0, params_buffer_bytes(params)); return 0; } static int sst_platform_pcm_hw_free(struct snd_pcm_substream *substream) { return snd_pcm_lib_free_pages(substream); } static struct snd_pcm_ops sst_platform_ops = { .open = sst_platform_open, .close = sst_platform_close, .ioctl = snd_pcm_lib_ioctl, .prepare = sst_platform_pcm_prepare, .trigger = sst_platform_pcm_trigger, .pointer = sst_platform_pcm_pointer, .hw_params = sst_platform_pcm_hw_params, .hw_free = sst_platform_pcm_hw_free, }; static void sst_pcm_free(struct snd_pcm *pcm) { pr_debug("sst_pcm_free called\n"); snd_pcm_lib_preallocate_free_for_all(pcm); } static int sst_pcm_new(struct snd_soc_pcm_runtime *rtd) { struct snd_pcm *pcm = rtd->pcm; int retval = 0; pr_debug("sst_pcm_new called\n"); if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream || pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) { retval = snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS, snd_dma_continuous_data(GFP_KERNEL), SST_MIN_BUFFER, SST_MAX_BUFFER); if (retval) { pr_err("dma buffer allocationf fail\n"); return retval; } } return retval; } static struct snd_soc_platform_driver sst_soc_platform_drv = { .ops = &sst_platform_ops, .pcm_new = sst_pcm_new, .pcm_free = sst_pcm_free, }; static int sst_platform_probe(struct platform_device *pdev) { int ret; pr_debug("sst_platform_probe called\n"); sst = NULL; ret = snd_soc_register_platform(&pdev->dev, &sst_soc_platform_drv); if (ret) { pr_err("registering soc platform failed\n"); return ret; } ret = snd_soc_register_dais(&pdev->dev, sst_platform_dai, ARRAY_SIZE(sst_platform_dai)); if (ret) { pr_err("registering cpu dais failed\n"); snd_soc_unregister_platform(&pdev->dev); } return ret; } static int sst_platform_remove(struct platform_device *pdev) { snd_soc_unregister_dais(&pdev->dev, ARRAY_SIZE(sst_platform_dai)); snd_soc_unregister_platform(&pdev->dev); pr_debug("sst_platform_remove success\n"); return 0; } static struct platform_driver sst_platform_driver = { .driver = { .name = "sst-platform", .owner = THIS_MODULE, }, .probe = sst_platform_probe, .remove = sst_platform_remove, }; module_platform_driver(sst_platform_driver); MODULE_DESCRIPTION("ASoC Intel(R) MID Platform driver"); MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>"); MODULE_AUTHOR("Harsha Priya <priya.harsha@intel.com>"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:sst-platform");
gpl-2.0
AODP/android_kernel_asus_moorefield
arch/powerpc/platforms/cell/cbe_thermal.c
7329
10504
/* * thermal support for the cell processor * * This module adds some sysfs attributes to cpu and spu nodes. * Base for measurements are the digital thermal sensors (DTS) * located on the chip. * The accuracy is 2 degrees, starting from 65 up to 125 degrees celsius * The attributes can be found under * /sys/devices/system/cpu/cpuX/thermal * /sys/devices/system/spu/spuX/thermal * * The following attributes are added for each node: * temperature: * contains the current temperature measured by the DTS * throttle_begin: * throttling begins when temperature is greater or equal to * throttle_begin. Setting this value to 125 prevents throttling. * throttle_end: * throttling is being ceased, if the temperature is lower than * throttle_end. Due to a delay between applying throttling and * a reduced temperature this value should be less than throttle_begin. * A value equal to throttle_begin provides only a very little hysteresis. * throttle_full_stop: * If the temperatrue is greater or equal to throttle_full_stop, * full throttling is applied to the cpu or spu. This value should be * greater than throttle_begin and throttle_end. Setting this value to * 65 prevents the unit from running code at all. * * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 * * Author: Christian Krafft <krafft@de.ibm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/cpu.h> #include <asm/spu.h> #include <asm/io.h> #include <asm/prom.h> #include <asm/cell-regs.h> #include "spu_priv1_mmio.h" #define TEMP_MIN 65 #define TEMP_MAX 125 #define DEVICE_PREFIX_ATTR(_prefix,_name,_mode) \ struct device_attribute attr_ ## _prefix ## _ ## _name = { \ .attr = { .name = __stringify(_name), .mode = _mode }, \ .show = _prefix ## _show_ ## _name, \ .store = _prefix ## _store_ ## _name, \ }; static inline u8 reg_to_temp(u8 reg_value) { return ((reg_value & 0x3f) << 1) + TEMP_MIN; } static inline u8 temp_to_reg(u8 temp) { return ((temp - TEMP_MIN) >> 1) & 0x3f; } static struct cbe_pmd_regs __iomem *get_pmd_regs(struct device *dev) { struct spu *spu; spu = container_of(dev, struct spu, dev); return cbe_get_pmd_regs(spu_devnode(spu)); } /* returns the value for a given spu in a given register */ static u8 spu_read_register_value(struct device *dev, union spe_reg __iomem *reg) { union spe_reg value; struct spu *spu; spu = container_of(dev, struct spu, dev); value.val = in_be64(&reg->val); return value.spe[spu->spe_id]; } static ssize_t spu_show_temp(struct device *dev, struct device_attribute *attr, char *buf) { u8 value; struct cbe_pmd_regs __iomem *pmd_regs; pmd_regs = get_pmd_regs(dev); value = spu_read_register_value(dev, &pmd_regs->ts_ctsr1); return sprintf(buf, "%d\n", reg_to_temp(value)); } static ssize_t show_throttle(struct cbe_pmd_regs __iomem *pmd_regs, char *buf, int pos) { u64 value; value = in_be64(&pmd_regs->tm_tpr.val); /* access the corresponding byte */ value >>= pos; value &= 0x3F; return sprintf(buf, "%d\n", reg_to_temp(value)); } static ssize_t store_throttle(struct cbe_pmd_regs __iomem *pmd_regs, const char *buf, size_t size, int pos) { u64 reg_value; int temp; u64 new_value; int ret; ret = sscanf(buf, "%u", &temp); if (ret != 1 || temp < TEMP_MIN || temp > TEMP_MAX) return -EINVAL; new_value = temp_to_reg(temp); reg_value = in_be64(&pmd_regs->tm_tpr.val); /* zero out bits for new value */ reg_value &= ~(0xffull << pos); /* set bits to new value */ reg_value |= new_value << pos; out_be64(&pmd_regs->tm_tpr.val, reg_value); return size; } static ssize_t spu_show_throttle_end(struct device *dev, struct device_attribute *attr, char *buf) { return show_throttle(get_pmd_regs(dev), buf, 0); } static ssize_t spu_show_throttle_begin(struct device *dev, struct device_attribute *attr, char *buf) { return show_throttle(get_pmd_regs(dev), buf, 8); } static ssize_t spu_show_throttle_full_stop(struct device *dev, struct device_attribute *attr, char *buf) { return show_throttle(get_pmd_regs(dev), buf, 16); } static ssize_t spu_store_throttle_end(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { return store_throttle(get_pmd_regs(dev), buf, size, 0); } static ssize_t spu_store_throttle_begin(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { return store_throttle(get_pmd_regs(dev), buf, size, 8); } static ssize_t spu_store_throttle_full_stop(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { return store_throttle(get_pmd_regs(dev), buf, size, 16); } static ssize_t ppe_show_temp(struct device *dev, char *buf, int pos) { struct cbe_pmd_regs __iomem *pmd_regs; u64 value; pmd_regs = cbe_get_cpu_pmd_regs(dev->id); value = in_be64(&pmd_regs->ts_ctsr2); value = (value >> pos) & 0x3f; return sprintf(buf, "%d\n", reg_to_temp(value)); } /* shows the temperature of the DTS on the PPE, * located near the linear thermal sensor */ static ssize_t ppe_show_temp0(struct device *dev, struct device_attribute *attr, char *buf) { return ppe_show_temp(dev, buf, 32); } /* shows the temperature of the second DTS on the PPE */ static ssize_t ppe_show_temp1(struct device *dev, struct device_attribute *attr, char *buf) { return ppe_show_temp(dev, buf, 0); } static ssize_t ppe_show_throttle_end(struct device *dev, struct device_attribute *attr, char *buf) { return show_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, 32); } static ssize_t ppe_show_throttle_begin(struct device *dev, struct device_attribute *attr, char *buf) { return show_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, 40); } static ssize_t ppe_show_throttle_full_stop(struct device *dev, struct device_attribute *attr, char *buf) { return show_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, 48); } static ssize_t ppe_store_throttle_end(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { return store_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, size, 32); } static ssize_t ppe_store_throttle_begin(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { return store_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, size, 40); } static ssize_t ppe_store_throttle_full_stop(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { return store_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, size, 48); } static struct device_attribute attr_spu_temperature = { .attr = {.name = "temperature", .mode = 0400 }, .show = spu_show_temp, }; static DEVICE_PREFIX_ATTR(spu, throttle_end, 0600); static DEVICE_PREFIX_ATTR(spu, throttle_begin, 0600); static DEVICE_PREFIX_ATTR(spu, throttle_full_stop, 0600); static struct attribute *spu_attributes[] = { &attr_spu_temperature.attr, &attr_spu_throttle_end.attr, &attr_spu_throttle_begin.attr, &attr_spu_throttle_full_stop.attr, NULL, }; static struct attribute_group spu_attribute_group = { .name = "thermal", .attrs = spu_attributes, }; static struct device_attribute attr_ppe_temperature0 = { .attr = {.name = "temperature0", .mode = 0400 }, .show = ppe_show_temp0, }; static struct device_attribute attr_ppe_temperature1 = { .attr = {.name = "temperature1", .mode = 0400 }, .show = ppe_show_temp1, }; static DEVICE_PREFIX_ATTR(ppe, throttle_end, 0600); static DEVICE_PREFIX_ATTR(ppe, throttle_begin, 0600); static DEVICE_PREFIX_ATTR(ppe, throttle_full_stop, 0600); static struct attribute *ppe_attributes[] = { &attr_ppe_temperature0.attr, &attr_ppe_temperature1.attr, &attr_ppe_throttle_end.attr, &attr_ppe_throttle_begin.attr, &attr_ppe_throttle_full_stop.attr, NULL, }; static struct attribute_group ppe_attribute_group = { .name = "thermal", .attrs = ppe_attributes, }; /* * initialize throttling with default values */ static int __init init_default_values(void) { int cpu; struct cbe_pmd_regs __iomem *pmd_regs; struct device *dev; union ppe_spe_reg tpr; union spe_reg str1; u64 str2; union spe_reg cr1; u64 cr2; /* TPR defaults */ /* ppe * 1F - no full stop * 08 - dynamic throttling starts if over 80 degrees * 03 - dynamic throttling ceases if below 70 degrees */ tpr.ppe = 0x1F0803; /* spe * 10 - full stopped when over 96 degrees * 08 - dynamic throttling starts if over 80 degrees * 03 - dynamic throttling ceases if below 70 degrees */ tpr.spe = 0x100803; /* STR defaults */ /* str1 * 10 - stop 16 of 32 cycles */ str1.val = 0x1010101010101010ull; /* str2 * 10 - stop 16 of 32 cycles */ str2 = 0x10; /* CR defaults */ /* cr1 * 4 - normal operation */ cr1.val = 0x0404040404040404ull; /* cr2 * 4 - normal operation */ cr2 = 0x04; for_each_possible_cpu (cpu) { pr_debug("processing cpu %d\n", cpu); dev = get_cpu_device(cpu); if (!dev) { pr_info("invalid dev pointer for cbe_thermal\n"); return -EINVAL; } pmd_regs = cbe_get_cpu_pmd_regs(dev->id); if (!pmd_regs) { pr_info("invalid CBE regs pointer for cbe_thermal\n"); return -EINVAL; } out_be64(&pmd_regs->tm_str2, str2); out_be64(&pmd_regs->tm_str1.val, str1.val); out_be64(&pmd_regs->tm_tpr.val, tpr.val); out_be64(&pmd_regs->tm_cr1.val, cr1.val); out_be64(&pmd_regs->tm_cr2, cr2); } return 0; } static int __init thermal_init(void) { int rc = init_default_values(); if (rc == 0) { spu_add_dev_attr_group(&spu_attribute_group); cpu_add_dev_attr_group(&ppe_attribute_group); } return rc; } module_init(thermal_init); static void __exit thermal_exit(void) { spu_remove_dev_attr_group(&spu_attribute_group); cpu_remove_dev_attr_group(&ppe_attribute_group); } module_exit(thermal_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>");
gpl-2.0
z8cpaul/lsikernel-3.10
drivers/media/pci/cx18/cx18-fileops.c
7841
25737
/* * cx18 file operation functions * * Derived from ivtv-fileops.c * * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl> * Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA * 02111-1307 USA */ #include "cx18-driver.h" #include "cx18-fileops.h" #include "cx18-i2c.h" #include "cx18-queue.h" #include "cx18-vbi.h" #include "cx18-audio.h" #include "cx18-mailbox.h" #include "cx18-scb.h" #include "cx18-streams.h" #include "cx18-controls.h" #include "cx18-ioctl.h" #include "cx18-cards.h" /* This function tries to claim the stream for a specific file descriptor. If no one else is using this stream then the stream is claimed and associated VBI and IDX streams are also automatically claimed. Possible error returns: -EBUSY if someone else has claimed the stream or 0 on success. */ int cx18_claim_stream(struct cx18_open_id *id, int type) { struct cx18 *cx = id->cx; struct cx18_stream *s = &cx->streams[type]; struct cx18_stream *s_assoc; /* Nothing should ever try to directly claim the IDX stream */ if (type == CX18_ENC_STREAM_TYPE_IDX) { CX18_WARN("MPEG Index stream cannot be claimed " "directly, but something tried.\n"); return -EINVAL; } if (test_and_set_bit(CX18_F_S_CLAIMED, &s->s_flags)) { /* someone already claimed this stream */ if (s->id == id->open_id) { /* yes, this file descriptor did. So that's OK. */ return 0; } if (s->id == -1 && type == CX18_ENC_STREAM_TYPE_VBI) { /* VBI is handled already internally, now also assign the file descriptor to this stream for external reading of the stream. */ s->id = id->open_id; CX18_DEBUG_INFO("Start Read VBI\n"); return 0; } /* someone else is using this stream already */ CX18_DEBUG_INFO("Stream %d is busy\n", type); return -EBUSY; } s->id = id->open_id; /* * CX18_ENC_STREAM_TYPE_MPG needs to claim: * CX18_ENC_STREAM_TYPE_VBI, if VBI insertion is on for sliced VBI, or * CX18_ENC_STREAM_TYPE_IDX, if VBI insertion is off for sliced VBI * (We don't yet fix up MPEG Index entries for our inserted packets). * * For all other streams we're done. */ if (type != CX18_ENC_STREAM_TYPE_MPG) return 0; s_assoc = &cx->streams[CX18_ENC_STREAM_TYPE_IDX]; if (cx->vbi.insert_mpeg && !cx18_raw_vbi(cx)) s_assoc = &cx->streams[CX18_ENC_STREAM_TYPE_VBI]; else if (!cx18_stream_enabled(s_assoc)) return 0; set_bit(CX18_F_S_CLAIMED, &s_assoc->s_flags); /* mark that it is used internally */ set_bit(CX18_F_S_INTERNAL_USE, &s_assoc->s_flags); return 0; } EXPORT_SYMBOL(cx18_claim_stream); /* This function releases a previously claimed stream. It will take into account associated VBI streams. */ void cx18_release_stream(struct cx18_stream *s) { struct cx18 *cx = s->cx; struct cx18_stream *s_assoc; s->id = -1; if (s->type == CX18_ENC_STREAM_TYPE_IDX) { /* * The IDX stream is only used internally, and can * only be indirectly unclaimed by unclaiming the MPG stream. */ return; } if (s->type == CX18_ENC_STREAM_TYPE_VBI && test_bit(CX18_F_S_INTERNAL_USE, &s->s_flags)) { /* this stream is still in use internally */ return; } if (!test_and_clear_bit(CX18_F_S_CLAIMED, &s->s_flags)) { CX18_DEBUG_WARN("Release stream %s not in use!\n", s->name); return; } cx18_flush_queues(s); /* * CX18_ENC_STREAM_TYPE_MPG needs to release the * CX18_ENC_STREAM_TYPE_VBI and/or CX18_ENC_STREAM_TYPE_IDX streams. * * For all other streams we're done. */ if (s->type != CX18_ENC_STREAM_TYPE_MPG) return; /* Unclaim the associated MPEG Index stream */ s_assoc = &cx->streams[CX18_ENC_STREAM_TYPE_IDX]; if (test_and_clear_bit(CX18_F_S_INTERNAL_USE, &s_assoc->s_flags)) { clear_bit(CX18_F_S_CLAIMED, &s_assoc->s_flags); cx18_flush_queues(s_assoc); } /* Unclaim the associated VBI stream */ s_assoc = &cx->streams[CX18_ENC_STREAM_TYPE_VBI]; if (test_and_clear_bit(CX18_F_S_INTERNAL_USE, &s_assoc->s_flags)) { if (s_assoc->id == -1) { /* * The VBI stream is not still claimed by a file * descriptor, so completely unclaim it. */ clear_bit(CX18_F_S_CLAIMED, &s_assoc->s_flags); cx18_flush_queues(s_assoc); } } } EXPORT_SYMBOL(cx18_release_stream); static void cx18_dualwatch(struct cx18 *cx) { struct v4l2_tuner vt; u32 new_stereo_mode; const u32 dual = 0x0200; new_stereo_mode = v4l2_ctrl_g_ctrl(cx->cxhdl.audio_mode); memset(&vt, 0, sizeof(vt)); cx18_call_all(cx, tuner, g_tuner, &vt); if (vt.audmode == V4L2_TUNER_MODE_LANG1_LANG2 && (vt.rxsubchans & V4L2_TUNER_SUB_LANG2)) new_stereo_mode = dual; if (new_stereo_mode == cx->dualwatch_stereo_mode) return; CX18_DEBUG_INFO("dualwatch: change stereo flag from 0x%x to 0x%x.\n", cx->dualwatch_stereo_mode, new_stereo_mode); if (v4l2_ctrl_s_ctrl(cx->cxhdl.audio_mode, new_stereo_mode)) CX18_DEBUG_INFO("dualwatch: changing stereo flag failed\n"); } static struct cx18_mdl *cx18_get_mdl(struct cx18_stream *s, int non_block, int *err) { struct cx18 *cx = s->cx; struct cx18_stream *s_vbi = &cx->streams[CX18_ENC_STREAM_TYPE_VBI]; struct cx18_mdl *mdl; DEFINE_WAIT(wait); *err = 0; while (1) { if (s->type == CX18_ENC_STREAM_TYPE_MPG) { /* Process pending program updates and VBI data */ if (time_after(jiffies, cx->dualwatch_jiffies + msecs_to_jiffies(1000))) { cx->dualwatch_jiffies = jiffies; cx18_dualwatch(cx); } if (test_bit(CX18_F_S_INTERNAL_USE, &s_vbi->s_flags) && !test_bit(CX18_F_S_APPL_IO, &s_vbi->s_flags)) { while ((mdl = cx18_dequeue(s_vbi, &s_vbi->q_full))) { /* byteswap and process VBI data */ cx18_process_vbi_data(cx, mdl, s_vbi->type); cx18_stream_put_mdl_fw(s_vbi, mdl); } } mdl = &cx->vbi.sliced_mpeg_mdl; if (mdl->readpos != mdl->bytesused) return mdl; } /* do we have new data? */ mdl = cx18_dequeue(s, &s->q_full); if (mdl) { if (!test_and_clear_bit(CX18_F_M_NEED_SWAP, &mdl->m_flags)) return mdl; if (s->type == CX18_ENC_STREAM_TYPE_MPG) /* byteswap MPG data */ cx18_mdl_swap(mdl); else { /* byteswap and process VBI data */ cx18_process_vbi_data(cx, mdl, s->type); } return mdl; } /* return if end of stream */ if (!test_bit(CX18_F_S_STREAMING, &s->s_flags)) { CX18_DEBUG_INFO("EOS %s\n", s->name); return NULL; } /* return if file was opened with O_NONBLOCK */ if (non_block) { *err = -EAGAIN; return NULL; } /* wait for more data to arrive */ prepare_to_wait(&s->waitq, &wait, TASK_INTERRUPTIBLE); /* New buffers might have become available before we were added to the waitqueue */ if (!atomic_read(&s->q_full.depth)) schedule(); finish_wait(&s->waitq, &wait); if (signal_pending(current)) { /* return if a signal was received */ CX18_DEBUG_INFO("User stopped %s\n", s->name); *err = -EINTR; return NULL; } } } static void cx18_setup_sliced_vbi_mdl(struct cx18 *cx) { struct cx18_mdl *mdl = &cx->vbi.sliced_mpeg_mdl; struct cx18_buffer *buf = &cx->vbi.sliced_mpeg_buf; int idx = cx->vbi.inserted_frame % CX18_VBI_FRAMES; buf->buf = cx->vbi.sliced_mpeg_data[idx]; buf->bytesused = cx->vbi.sliced_mpeg_size[idx]; buf->readpos = 0; mdl->curr_buf = NULL; mdl->bytesused = cx->vbi.sliced_mpeg_size[idx]; mdl->readpos = 0; } static size_t cx18_copy_buf_to_user(struct cx18_stream *s, struct cx18_buffer *buf, char __user *ubuf, size_t ucount, bool *stop) { struct cx18 *cx = s->cx; size_t len = buf->bytesused - buf->readpos; *stop = false; if (len > ucount) len = ucount; if (cx->vbi.insert_mpeg && s->type == CX18_ENC_STREAM_TYPE_MPG && !cx18_raw_vbi(cx) && buf != &cx->vbi.sliced_mpeg_buf) { /* * Try to find a good splice point in the PS, just before * an MPEG-2 Program Pack start code, and provide only * up to that point to the user, so it's easy to insert VBI data * the next time around. * * This will not work for an MPEG-2 TS and has only been * verified by analysis to work for an MPEG-2 PS. Helen Buus * pointed out this works for the CX23416 MPEG-2 DVD compatible * stream, and research indicates both the MPEG 2 SVCD and DVD * stream types use an MPEG-2 PS container. */ /* * An MPEG-2 Program Stream (PS) is a series of * MPEG-2 Program Packs terminated by an * MPEG Program End Code after the last Program Pack. * A Program Pack may hold a PS System Header packet and any * number of Program Elementary Stream (PES) Packets */ const char *start = buf->buf + buf->readpos; const char *p = start + 1; const u8 *q; u8 ch = cx->search_pack_header ? 0xba : 0xe0; int stuffing, i; while (start + len > p) { /* Scan for a 0 to find a potential MPEG-2 start code */ q = memchr(p, 0, start + len - p); if (q == NULL) break; p = q + 1; /* * Keep looking if not a * MPEG-2 Pack header start code: 0x00 0x00 0x01 0xba * or MPEG-2 video PES start code: 0x00 0x00 0x01 0xe0 */ if ((char *)q + 15 >= buf->buf + buf->bytesused || q[1] != 0 || q[2] != 1 || q[3] != ch) continue; /* If expecting the primary video PES */ if (!cx->search_pack_header) { /* Continue if it couldn't be a PES packet */ if ((q[6] & 0xc0) != 0x80) continue; /* Check if a PTS or PTS & DTS follow */ if (((q[7] & 0xc0) == 0x80 && /* PTS only */ (q[9] & 0xf0) == 0x20) || /* PTS only */ ((q[7] & 0xc0) == 0xc0 && /* PTS & DTS */ (q[9] & 0xf0) == 0x30)) { /* DTS follows */ /* Assume we found the video PES hdr */ ch = 0xba; /* next want a Program Pack*/ cx->search_pack_header = 1; p = q + 9; /* Skip this video PES hdr */ } continue; } /* We may have found a Program Pack start code */ /* Get the count of stuffing bytes & verify them */ stuffing = q[13] & 7; /* all stuffing bytes must be 0xff */ for (i = 0; i < stuffing; i++) if (q[14 + i] != 0xff) break; if (i == stuffing && /* right number of stuffing bytes*/ (q[4] & 0xc4) == 0x44 && /* marker check */ (q[12] & 3) == 3 && /* marker check */ q[14 + stuffing] == 0 && /* PES Pack or Sys Hdr */ q[15 + stuffing] == 0 && q[16 + stuffing] == 1) { /* We declare we actually found a Program Pack*/ cx->search_pack_header = 0; /* expect vid PES */ len = (char *)q - start; cx18_setup_sliced_vbi_mdl(cx); *stop = true; break; } } } if (copy_to_user(ubuf, (u8 *)buf->buf + buf->readpos, len)) { CX18_DEBUG_WARN("copy %zd bytes to user failed for %s\n", len, s->name); return -EFAULT; } buf->readpos += len; if (s->type == CX18_ENC_STREAM_TYPE_MPG && buf != &cx->vbi.sliced_mpeg_buf) cx->mpg_data_received += len; return len; } static size_t cx18_copy_mdl_to_user(struct cx18_stream *s, struct cx18_mdl *mdl, char __user *ubuf, size_t ucount) { size_t tot_written = 0; int rc; bool stop = false; if (mdl->curr_buf == NULL) mdl->curr_buf = list_first_entry(&mdl->buf_list, struct cx18_buffer, list); if (list_entry_is_past_end(mdl->curr_buf, &mdl->buf_list, list)) { /* * For some reason we've exhausted the buffers, but the MDL * object still said some data was unread. * Fix that and bail out. */ mdl->readpos = mdl->bytesused; return 0; } list_for_each_entry_from(mdl->curr_buf, &mdl->buf_list, list) { if (mdl->curr_buf->readpos >= mdl->curr_buf->bytesused) continue; rc = cx18_copy_buf_to_user(s, mdl->curr_buf, ubuf + tot_written, ucount - tot_written, &stop); if (rc < 0) return rc; mdl->readpos += rc; tot_written += rc; if (stop || /* Forced stopping point for VBI insertion */ tot_written >= ucount || /* Reader request statisfied */ mdl->curr_buf->readpos < mdl->curr_buf->bytesused || mdl->readpos >= mdl->bytesused) /* MDL buffers drained */ break; } return tot_written; } static ssize_t cx18_read(struct cx18_stream *s, char __user *ubuf, size_t tot_count, int non_block) { struct cx18 *cx = s->cx; size_t tot_written = 0; int single_frame = 0; if (atomic_read(&cx->ana_capturing) == 0 && s->id == -1) { /* shouldn't happen */ CX18_DEBUG_WARN("Stream %s not initialized before read\n", s->name); return -EIO; } /* Each VBI buffer is one frame, the v4l2 API says that for VBI the frames should arrive one-by-one, so make sure we never output more than one VBI frame at a time */ if (s->type == CX18_ENC_STREAM_TYPE_VBI && !cx18_raw_vbi(cx)) single_frame = 1; for (;;) { struct cx18_mdl *mdl; int rc; mdl = cx18_get_mdl(s, non_block, &rc); /* if there is no data available... */ if (mdl == NULL) { /* if we got data, then return that regardless */ if (tot_written) break; /* EOS condition */ if (rc == 0) { clear_bit(CX18_F_S_STREAMOFF, &s->s_flags); clear_bit(CX18_F_S_APPL_IO, &s->s_flags); cx18_release_stream(s); } /* set errno */ return rc; } rc = cx18_copy_mdl_to_user(s, mdl, ubuf + tot_written, tot_count - tot_written); if (mdl != &cx->vbi.sliced_mpeg_mdl) { if (mdl->readpos == mdl->bytesused) cx18_stream_put_mdl_fw(s, mdl); else cx18_push(s, mdl, &s->q_full); } else if (mdl->readpos == mdl->bytesused) { int idx = cx->vbi.inserted_frame % CX18_VBI_FRAMES; cx->vbi.sliced_mpeg_size[idx] = 0; cx->vbi.inserted_frame++; cx->vbi_data_inserted += mdl->bytesused; } if (rc < 0) return rc; tot_written += rc; if (tot_written == tot_count || single_frame) break; } return tot_written; } static ssize_t cx18_read_pos(struct cx18_stream *s, char __user *ubuf, size_t count, loff_t *pos, int non_block) { ssize_t rc = count ? cx18_read(s, ubuf, count, non_block) : 0; struct cx18 *cx = s->cx; CX18_DEBUG_HI_FILE("read %zd from %s, got %zd\n", count, s->name, rc); if (rc > 0) pos += rc; return rc; } int cx18_start_capture(struct cx18_open_id *id) { struct cx18 *cx = id->cx; struct cx18_stream *s = &cx->streams[id->type]; struct cx18_stream *s_vbi; struct cx18_stream *s_idx; if (s->type == CX18_ENC_STREAM_TYPE_RAD) { /* you cannot read from these stream types. */ return -EPERM; } /* Try to claim this stream. */ if (cx18_claim_stream(id, s->type)) return -EBUSY; /* If capture is already in progress, then we also have to do nothing extra. */ if (test_bit(CX18_F_S_STREAMOFF, &s->s_flags) || test_and_set_bit(CX18_F_S_STREAMING, &s->s_flags)) { set_bit(CX18_F_S_APPL_IO, &s->s_flags); return 0; } /* Start associated VBI or IDX stream capture if required */ s_vbi = &cx->streams[CX18_ENC_STREAM_TYPE_VBI]; s_idx = &cx->streams[CX18_ENC_STREAM_TYPE_IDX]; if (s->type == CX18_ENC_STREAM_TYPE_MPG) { /* * The VBI and IDX streams should have been claimed * automatically, if for internal use, when the MPG stream was * claimed. We only need to start these streams capturing. */ if (test_bit(CX18_F_S_INTERNAL_USE, &s_idx->s_flags) && !test_and_set_bit(CX18_F_S_STREAMING, &s_idx->s_flags)) { if (cx18_start_v4l2_encode_stream(s_idx)) { CX18_DEBUG_WARN("IDX capture start failed\n"); clear_bit(CX18_F_S_STREAMING, &s_idx->s_flags); goto start_failed; } CX18_DEBUG_INFO("IDX capture started\n"); } if (test_bit(CX18_F_S_INTERNAL_USE, &s_vbi->s_flags) && !test_and_set_bit(CX18_F_S_STREAMING, &s_vbi->s_flags)) { if (cx18_start_v4l2_encode_stream(s_vbi)) { CX18_DEBUG_WARN("VBI capture start failed\n"); clear_bit(CX18_F_S_STREAMING, &s_vbi->s_flags); goto start_failed; } CX18_DEBUG_INFO("VBI insertion started\n"); } } /* Tell the card to start capturing */ if (!cx18_start_v4l2_encode_stream(s)) { /* We're done */ set_bit(CX18_F_S_APPL_IO, &s->s_flags); /* Resume a possibly paused encoder */ if (test_and_clear_bit(CX18_F_I_ENC_PAUSED, &cx->i_flags)) cx18_vapi(cx, CX18_CPU_CAPTURE_PAUSE, 1, s->handle); return 0; } start_failed: CX18_DEBUG_WARN("Failed to start capturing for stream %s\n", s->name); /* * The associated VBI and IDX streams for internal use are released * automatically when the MPG stream is released. We only need to stop * the associated stream. */ if (s->type == CX18_ENC_STREAM_TYPE_MPG) { /* Stop the IDX stream which is always for internal use */ if (test_bit(CX18_F_S_STREAMING, &s_idx->s_flags)) { cx18_stop_v4l2_encode_stream(s_idx, 0); clear_bit(CX18_F_S_STREAMING, &s_idx->s_flags); } /* Stop the VBI stream, if only running for internal use */ if (test_bit(CX18_F_S_STREAMING, &s_vbi->s_flags) && !test_bit(CX18_F_S_APPL_IO, &s_vbi->s_flags)) { cx18_stop_v4l2_encode_stream(s_vbi, 0); clear_bit(CX18_F_S_STREAMING, &s_vbi->s_flags); } } clear_bit(CX18_F_S_STREAMING, &s->s_flags); cx18_release_stream(s); /* Also releases associated streams */ return -EIO; } ssize_t cx18_v4l2_read(struct file *filp, char __user *buf, size_t count, loff_t *pos) { struct cx18_open_id *id = file2id(filp); struct cx18 *cx = id->cx; struct cx18_stream *s = &cx->streams[id->type]; int rc; CX18_DEBUG_HI_FILE("read %zd bytes from %s\n", count, s->name); mutex_lock(&cx->serialize_lock); rc = cx18_start_capture(id); mutex_unlock(&cx->serialize_lock); if (rc) return rc; if ((s->vb_type == V4L2_BUF_TYPE_VIDEO_CAPTURE) && (id->type == CX18_ENC_STREAM_TYPE_YUV)) { return videobuf_read_stream(&s->vbuf_q, buf, count, pos, 0, filp->f_flags & O_NONBLOCK); } return cx18_read_pos(s, buf, count, pos, filp->f_flags & O_NONBLOCK); } unsigned int cx18_v4l2_enc_poll(struct file *filp, poll_table *wait) { struct cx18_open_id *id = file2id(filp); struct cx18 *cx = id->cx; struct cx18_stream *s = &cx->streams[id->type]; int eof = test_bit(CX18_F_S_STREAMOFF, &s->s_flags); /* Start a capture if there is none */ if (!eof && !test_bit(CX18_F_S_STREAMING, &s->s_flags)) { int rc; mutex_lock(&cx->serialize_lock); rc = cx18_start_capture(id); mutex_unlock(&cx->serialize_lock); if (rc) { CX18_DEBUG_INFO("Could not start capture for %s (%d)\n", s->name, rc); return POLLERR; } CX18_DEBUG_FILE("Encoder poll started capture\n"); } if ((s->vb_type == V4L2_BUF_TYPE_VIDEO_CAPTURE) && (id->type == CX18_ENC_STREAM_TYPE_YUV)) { int videobuf_poll = videobuf_poll_stream(filp, &s->vbuf_q, wait); if (eof && videobuf_poll == POLLERR) return POLLHUP; else return videobuf_poll; } /* add stream's waitq to the poll list */ CX18_DEBUG_HI_FILE("Encoder poll\n"); poll_wait(filp, &s->waitq, wait); if (atomic_read(&s->q_full.depth)) return POLLIN | POLLRDNORM; if (eof) return POLLHUP; return 0; } int cx18_v4l2_mmap(struct file *file, struct vm_area_struct *vma) { struct cx18_open_id *id = file->private_data; struct cx18 *cx = id->cx; struct cx18_stream *s = &cx->streams[id->type]; int eof = test_bit(CX18_F_S_STREAMOFF, &s->s_flags); if ((s->vb_type == V4L2_BUF_TYPE_VIDEO_CAPTURE) && (id->type == CX18_ENC_STREAM_TYPE_YUV)) { /* Start a capture if there is none */ if (!eof && !test_bit(CX18_F_S_STREAMING, &s->s_flags)) { int rc; mutex_lock(&cx->serialize_lock); rc = cx18_start_capture(id); mutex_unlock(&cx->serialize_lock); if (rc) { CX18_DEBUG_INFO( "Could not start capture for %s (%d)\n", s->name, rc); return -EINVAL; } CX18_DEBUG_FILE("Encoder mmap started capture\n"); } return videobuf_mmap_mapper(&s->vbuf_q, vma); } return -EINVAL; } void cx18_vb_timeout(unsigned long data) { struct cx18_stream *s = (struct cx18_stream *)data; struct cx18_videobuf_buffer *buf; unsigned long flags; /* Return all of the buffers in error state, so the vbi/vid inode * can return from blocking. */ spin_lock_irqsave(&s->vb_lock, flags); while (!list_empty(&s->vb_capture)) { buf = list_entry(s->vb_capture.next, struct cx18_videobuf_buffer, vb.queue); list_del(&buf->vb.queue); buf->vb.state = VIDEOBUF_ERROR; wake_up(&buf->vb.done); } spin_unlock_irqrestore(&s->vb_lock, flags); } void cx18_stop_capture(struct cx18_open_id *id, int gop_end) { struct cx18 *cx = id->cx; struct cx18_stream *s = &cx->streams[id->type]; struct cx18_stream *s_vbi = &cx->streams[CX18_ENC_STREAM_TYPE_VBI]; struct cx18_stream *s_idx = &cx->streams[CX18_ENC_STREAM_TYPE_IDX]; CX18_DEBUG_IOCTL("close() of %s\n", s->name); /* 'Unclaim' this stream */ /* Stop capturing */ if (test_bit(CX18_F_S_STREAMING, &s->s_flags)) { CX18_DEBUG_INFO("close stopping capture\n"); if (id->type == CX18_ENC_STREAM_TYPE_MPG) { /* Stop internal use associated VBI and IDX streams */ if (test_bit(CX18_F_S_STREAMING, &s_vbi->s_flags) && !test_bit(CX18_F_S_APPL_IO, &s_vbi->s_flags)) { CX18_DEBUG_INFO("close stopping embedded VBI " "capture\n"); cx18_stop_v4l2_encode_stream(s_vbi, 0); } if (test_bit(CX18_F_S_STREAMING, &s_idx->s_flags)) { CX18_DEBUG_INFO("close stopping IDX capture\n"); cx18_stop_v4l2_encode_stream(s_idx, 0); } } if (id->type == CX18_ENC_STREAM_TYPE_VBI && test_bit(CX18_F_S_INTERNAL_USE, &s->s_flags)) /* Also used internally, don't stop capturing */ s->id = -1; else cx18_stop_v4l2_encode_stream(s, gop_end); } if (!gop_end) { clear_bit(CX18_F_S_APPL_IO, &s->s_flags); clear_bit(CX18_F_S_STREAMOFF, &s->s_flags); cx18_release_stream(s); } } int cx18_v4l2_close(struct file *filp) { struct v4l2_fh *fh = filp->private_data; struct cx18_open_id *id = fh2id(fh); struct cx18 *cx = id->cx; struct cx18_stream *s = &cx->streams[id->type]; CX18_DEBUG_IOCTL("close() of %s\n", s->name); mutex_lock(&cx->serialize_lock); /* Stop radio */ if (id->type == CX18_ENC_STREAM_TYPE_RAD && v4l2_fh_is_singular_file(filp)) { /* Closing radio device, return to TV mode */ cx18_mute(cx); /* Mark that the radio is no longer in use */ clear_bit(CX18_F_I_RADIO_USER, &cx->i_flags); /* Switch tuner to TV */ cx18_call_all(cx, core, s_std, cx->std); /* Select correct audio input (i.e. TV tuner or Line in) */ cx18_audio_set_io(cx); if (atomic_read(&cx->ana_capturing) > 0) { /* Undo video mute */ cx18_vapi(cx, CX18_CPU_SET_VIDEO_MUTE, 2, s->handle, (v4l2_ctrl_g_ctrl(cx->cxhdl.video_mute) | (v4l2_ctrl_g_ctrl(cx->cxhdl.video_mute_yuv) << 8))); } /* Done! Unmute and continue. */ cx18_unmute(cx); } v4l2_fh_del(fh); v4l2_fh_exit(fh); /* 'Unclaim' this stream */ if (s->id == id->open_id) cx18_stop_capture(id, 0); kfree(id); mutex_unlock(&cx->serialize_lock); return 0; } static int cx18_serialized_open(struct cx18_stream *s, struct file *filp) { struct cx18 *cx = s->cx; struct cx18_open_id *item; CX18_DEBUG_FILE("open %s\n", s->name); /* Allocate memory */ item = kzalloc(sizeof(struct cx18_open_id), GFP_KERNEL); if (NULL == item) { CX18_DEBUG_WARN("nomem on v4l2 open\n"); return -ENOMEM; } v4l2_fh_init(&item->fh, s->video_dev); item->cx = cx; item->type = s->type; item->open_id = cx->open_id++; filp->private_data = &item->fh; v4l2_fh_add(&item->fh); if (item->type == CX18_ENC_STREAM_TYPE_RAD && v4l2_fh_is_singular_file(filp)) { if (!test_bit(CX18_F_I_RADIO_USER, &cx->i_flags)) { if (atomic_read(&cx->ana_capturing) > 0) { /* switching to radio while capture is in progress is not polite */ v4l2_fh_del(&item->fh); v4l2_fh_exit(&item->fh); kfree(item); return -EBUSY; } } /* Mark that the radio is being used. */ set_bit(CX18_F_I_RADIO_USER, &cx->i_flags); /* We have the radio */ cx18_mute(cx); /* Switch tuner to radio */ cx18_call_all(cx, tuner, s_radio); /* Select the correct audio input (i.e. radio tuner) */ cx18_audio_set_io(cx); /* Done! Unmute and continue. */ cx18_unmute(cx); } return 0; } int cx18_v4l2_open(struct file *filp) { int res; struct video_device *video_dev = video_devdata(filp); struct cx18_stream *s = video_get_drvdata(video_dev); struct cx18 *cx = s->cx; mutex_lock(&cx->serialize_lock); if (cx18_init_on_first_open(cx)) { CX18_ERR("Failed to initialize on %s\n", video_device_node_name(video_dev)); mutex_unlock(&cx->serialize_lock); return -ENXIO; } res = cx18_serialized_open(s, filp); mutex_unlock(&cx->serialize_lock); return res; } void cx18_mute(struct cx18 *cx) { u32 h; if (atomic_read(&cx->ana_capturing)) { h = cx18_find_handle(cx); if (h != CX18_INVALID_TASK_HANDLE) cx18_vapi(cx, CX18_CPU_SET_AUDIO_MUTE, 2, h, 1); else CX18_ERR("Can't find valid task handle for mute\n"); } CX18_DEBUG_INFO("Mute\n"); } void cx18_unmute(struct cx18 *cx) { u32 h; if (atomic_read(&cx->ana_capturing)) { h = cx18_find_handle(cx); if (h != CX18_INVALID_TASK_HANDLE) { cx18_msleep_timeout(100, 0); cx18_vapi(cx, CX18_CPU_SET_MISC_PARAMETERS, 2, h, 12); cx18_vapi(cx, CX18_CPU_SET_AUDIO_MUTE, 2, h, 0); } else CX18_ERR("Can't find valid task handle for unmute\n"); } CX18_DEBUG_INFO("Unmute\n"); }
gpl-2.0
lce67/android_kernel_htc_inc
drivers/usb/gadget/nokia.c
162
6457
/* * nokia.c -- Nokia Composite Gadget Driver * * Copyright (C) 2008-2010 Nokia Corporation * Contact: Felipe Balbi <felipe.balbi@nokia.com> * * This gadget driver borrows from serial.c which is: * * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com) * Copyright (C) 2008 by David Brownell * Copyright (C) 2008 by Nokia Corporation * * This software is distributed under the terms of the GNU General * Public License ("GPL") as published by the Free Software Foundation, * version 2 of that License. */ #include <linux/kernel.h> #include <linux/utsname.h> #include <linux/device.h> #include "u_serial.h" #include "u_ether.h" #include "u_phonet.h" #include "gadget_chips.h" /* Defines */ #define NOKIA_VERSION_NUM 0x0211 #define NOKIA_LONG_NAME "N900 (PC-Suite Mode)" /*-------------------------------------------------------------------------*/ /* * Kbuild is not very cooperative with respect to linking separately * compiled library objects into one module. So for now we won't use * separate compilation ... ensuring init/exit sections work to shrink * the runtime footprint, and giving us at least some parts of what * a "gcc --combine ... part1.c part2.c part3.c ... " build would. */ #include "composite.c" #include "usbstring.c" #include "config.c" #include "epautoconf.c" #include "u_serial.c" #include "f_acm.c" #include "f_ecm.c" #include "f_obex.c" #include "f_serial.c" #include "f_phonet.c" #include "u_ether.c" /*-------------------------------------------------------------------------*/ #define NOKIA_VENDOR_ID 0x0421 /* Nokia */ #define NOKIA_PRODUCT_ID 0x01c8 /* Nokia Gadget */ /* string IDs are assigned dynamically */ #define STRING_MANUFACTURER_IDX 0 #define STRING_PRODUCT_IDX 1 #define STRING_DESCRIPTION_IDX 2 static char manufacturer_nokia[] = "Nokia"; static const char product_nokia[] = NOKIA_LONG_NAME; static const char description_nokia[] = "PC-Suite Configuration"; static struct usb_string strings_dev[] = { [STRING_MANUFACTURER_IDX].s = manufacturer_nokia, [STRING_PRODUCT_IDX].s = NOKIA_LONG_NAME, [STRING_DESCRIPTION_IDX].s = description_nokia, { } /* end of list */ }; static struct usb_gadget_strings stringtab_dev = { .language = 0x0409, /* en-us */ .strings = strings_dev, }; static struct usb_gadget_strings *dev_strings[] = { &stringtab_dev, NULL, }; static struct usb_device_descriptor device_desc = { .bLength = USB_DT_DEVICE_SIZE, .bDescriptorType = USB_DT_DEVICE, .bcdUSB = __constant_cpu_to_le16(0x0200), .bDeviceClass = USB_CLASS_COMM, .idVendor = __constant_cpu_to_le16(NOKIA_VENDOR_ID), .idProduct = __constant_cpu_to_le16(NOKIA_PRODUCT_ID), /* .iManufacturer = DYNAMIC */ /* .iProduct = DYNAMIC */ .bNumConfigurations = 1, }; /*-------------------------------------------------------------------------*/ /* Module */ MODULE_DESCRIPTION("Nokia composite gadget driver for N900"); MODULE_AUTHOR("Felipe Balbi"); MODULE_LICENSE("GPL"); /*-------------------------------------------------------------------------*/ static u8 hostaddr[ETH_ALEN]; static int __init nokia_bind_config(struct usb_configuration *c) { int status = 0; status = phonet_bind_config(c); if (status) printk(KERN_DEBUG "could not bind phonet config\n"); status = obex_bind_config(c, 0); if (status) printk(KERN_DEBUG "could not bind obex config %d\n", 0); status = obex_bind_config(c, 1); if (status) printk(KERN_DEBUG "could not bind obex config %d\n", 0); status = acm_bind_config(c, 2); if (status) printk(KERN_DEBUG "could not bind acm config\n"); status = ecm_bind_config(c, hostaddr); if (status) printk(KERN_DEBUG "could not bind ecm config\n"); return status; } static struct usb_configuration nokia_config_500ma_driver = { .label = "Bus Powered", .bConfigurationValue = 1, /* .iConfiguration = DYNAMIC */ .bmAttributes = USB_CONFIG_ATT_ONE, .bMaxPower = 250, /* 500mA */ }; static struct usb_configuration nokia_config_100ma_driver = { .label = "Self Powered", .bConfigurationValue = 2, /* .iConfiguration = DYNAMIC */ .bmAttributes = USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER, .bMaxPower = 50, /* 100 mA */ }; static int __init nokia_bind(struct usb_composite_dev *cdev) { int gcnum; struct usb_gadget *gadget = cdev->gadget; int status; status = gphonet_setup(cdev->gadget); if (status < 0) goto err_phonet; status = gserial_setup(cdev->gadget, 3); if (status < 0) goto err_serial; status = gether_setup(cdev->gadget, hostaddr); if (status < 0) goto err_ether; status = usb_string_id(cdev); if (status < 0) goto err_usb; strings_dev[STRING_MANUFACTURER_IDX].id = status; device_desc.iManufacturer = status; status = usb_string_id(cdev); if (status < 0) goto err_usb; strings_dev[STRING_PRODUCT_IDX].id = status; device_desc.iProduct = status; /* config description */ status = usb_string_id(cdev); if (status < 0) goto err_usb; strings_dev[STRING_DESCRIPTION_IDX].id = status; nokia_config_500ma_driver.iConfiguration = status; nokia_config_100ma_driver.iConfiguration = status; /* set up other descriptors */ gcnum = usb_gadget_controller_number(gadget); if (gcnum >= 0) device_desc.bcdDevice = cpu_to_le16(NOKIA_VERSION_NUM); else { /* this should only work with hw that supports altsettings * and several endpoints, anything else, panic. */ pr_err("nokia_bind: controller '%s' not recognized\n", gadget->name); goto err_usb; } /* finaly register the configuration */ status = usb_add_config(cdev, &nokia_config_500ma_driver, nokia_bind_config); if (status < 0) goto err_usb; status = usb_add_config(cdev, &nokia_config_100ma_driver, nokia_bind_config); if (status < 0) goto err_usb; dev_info(&gadget->dev, "%s\n", NOKIA_LONG_NAME); return 0; err_usb: gether_cleanup(); err_ether: gserial_cleanup(); err_serial: gphonet_cleanup(); err_phonet: return status; } static int __exit nokia_unbind(struct usb_composite_dev *cdev) { gphonet_cleanup(); gserial_cleanup(); gether_cleanup(); return 0; } static struct usb_composite_driver nokia_driver = { .name = "g_nokia", .dev = &device_desc, .strings = dev_strings, .unbind = __exit_p(nokia_unbind), }; static int __init nokia_init(void) { return usb_composite_probe(&nokia_driver, nokia_bind); } module_init(nokia_init); static void __exit nokia_cleanup(void) { usb_composite_unregister(&nokia_driver); } module_exit(nokia_cleanup);
gpl-2.0
skitlab/kernel-source-dm8168-3.2.y
drivers/media/video/gspca/t613.c
162
36136
/* * T613 subdriver * * Copyright (C) 2010 Jean-Francois Moine (http://moinejf.free.fr) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * *Notes: * t613 + tas5130A * * Focus to light do not balance well as in win. * Quality in win is not good, but its kinda better. * * Fix some "extraneous bytes", most of apps will show the image anyway * * Gamma table, is there, but its really doing something? * * 7~8 Fps, its ok, max on win its 10. * Costantino Leandro */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MODULE_NAME "t613" #include <linux/slab.h> #include "gspca.h" #define V4L2_CID_EFFECTS (V4L2_CID_PRIVATE_BASE + 0) MODULE_AUTHOR("Leandro Costantino <le_costantino@pixartargentina.com.ar>"); MODULE_DESCRIPTION("GSPCA/T613 (JPEG Compliance) USB Camera Driver"); MODULE_LICENSE("GPL"); struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ u8 brightness; u8 contrast; u8 colors; u8 autogain; u8 gamma; u8 sharpness; u8 freq; u8 red_gain; u8 blue_gain; u8 green_gain; u8 awb; /* set default r/g/b and activate */ u8 mirror; u8 effect; u8 sensor; }; enum sensors { SENSOR_OM6802, SENSOR_OTHER, SENSOR_TAS5130A, SENSOR_LT168G, /* must verify if this is the actual model */ }; /* V4L2 controls supported by the driver */ static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val); static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val); static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val); static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val); static int sd_setcolors(struct gspca_dev *gspca_dev, __s32 val); static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val); static int sd_setlowlight(struct gspca_dev *gspca_dev, __s32 val); static int sd_getlowlight(struct gspca_dev *gspca_dev, __s32 *val); static int sd_setgamma(struct gspca_dev *gspca_dev, __s32 val); static int sd_getgamma(struct gspca_dev *gspca_dev, __s32 *val); static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val); static int sd_getsharpness(struct gspca_dev *gspca_dev, __s32 *val); static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val); static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val); static int sd_setawb(struct gspca_dev *gspca_dev, __s32 val); static int sd_getawb(struct gspca_dev *gspca_dev, __s32 *val); static int sd_setblue_gain(struct gspca_dev *gspca_dev, __s32 val); static int sd_getblue_gain(struct gspca_dev *gspca_dev, __s32 *val); static int sd_setred_gain(struct gspca_dev *gspca_dev, __s32 val); static int sd_getred_gain(struct gspca_dev *gspca_dev, __s32 *val); static int sd_setgain(struct gspca_dev *gspca_dev, __s32 val); static int sd_getgain(struct gspca_dev *gspca_dev, __s32 *val); static int sd_setmirror(struct gspca_dev *gspca_dev, __s32 val); static int sd_getmirror(struct gspca_dev *gspca_dev, __s32 *val); static int sd_seteffect(struct gspca_dev *gspca_dev, __s32 val); static int sd_geteffect(struct gspca_dev *gspca_dev, __s32 *val); static const struct ctrl sd_ctrls[] = { { { .id = V4L2_CID_BRIGHTNESS, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Brightness", .minimum = 0, .maximum = 14, .step = 1, #define BRIGHTNESS_DEF 8 .default_value = BRIGHTNESS_DEF, }, .set = sd_setbrightness, .get = sd_getbrightness, }, { { .id = V4L2_CID_CONTRAST, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Contrast", .minimum = 0, .maximum = 0x0d, .step = 1, #define CONTRAST_DEF 0x07 .default_value = CONTRAST_DEF, }, .set = sd_setcontrast, .get = sd_getcontrast, }, { { .id = V4L2_CID_SATURATION, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Color", .minimum = 0, .maximum = 0x0f, .step = 1, #define COLORS_DEF 0x05 .default_value = COLORS_DEF, }, .set = sd_setcolors, .get = sd_getcolors, }, #define GAMMA_MAX 16 #define GAMMA_DEF 10 { { .id = V4L2_CID_GAMMA, /* (gamma on win) */ .type = V4L2_CTRL_TYPE_INTEGER, .name = "Gamma", .minimum = 0, .maximum = GAMMA_MAX - 1, .step = 1, .default_value = GAMMA_DEF, }, .set = sd_setgamma, .get = sd_getgamma, }, { { .id = V4L2_CID_BACKLIGHT_COMPENSATION, /* Activa lowlight, * some apps dont bring up the * backligth_compensation control) */ .type = V4L2_CTRL_TYPE_INTEGER, .name = "Low Light", .minimum = 0, .maximum = 1, .step = 1, #define AUTOGAIN_DEF 0x01 .default_value = AUTOGAIN_DEF, }, .set = sd_setlowlight, .get = sd_getlowlight, }, { { .id = V4L2_CID_HFLIP, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "Mirror Image", .minimum = 0, .maximum = 1, .step = 1, #define MIRROR_DEF 0 .default_value = MIRROR_DEF, }, .set = sd_setmirror, .get = sd_getmirror }, { { .id = V4L2_CID_POWER_LINE_FREQUENCY, .type = V4L2_CTRL_TYPE_MENU, .name = "Light Frequency Filter", .minimum = 1, /* 1 -> 0x50, 2->0x60 */ .maximum = 2, .step = 1, #define FREQ_DEF 1 .default_value = FREQ_DEF, }, .set = sd_setfreq, .get = sd_getfreq}, { { .id = V4L2_CID_AUTO_WHITE_BALANCE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Auto White Balance", .minimum = 0, .maximum = 1, .step = 1, #define AWB_DEF 0 .default_value = AWB_DEF, }, .set = sd_setawb, .get = sd_getawb }, { { .id = V4L2_CID_SHARPNESS, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Sharpness", .minimum = 0, .maximum = 15, .step = 1, #define SHARPNESS_DEF 0x06 .default_value = SHARPNESS_DEF, }, .set = sd_setsharpness, .get = sd_getsharpness, }, { { .id = V4L2_CID_EFFECTS, .type = V4L2_CTRL_TYPE_MENU, .name = "Webcam Effects", .minimum = 0, .maximum = 4, .step = 1, #define EFFECTS_DEF 0 .default_value = EFFECTS_DEF, }, .set = sd_seteffect, .get = sd_geteffect }, { { .id = V4L2_CID_BLUE_BALANCE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Blue Balance", .minimum = 0x10, .maximum = 0x40, .step = 1, #define BLUE_GAIN_DEF 0x20 .default_value = BLUE_GAIN_DEF, }, .set = sd_setblue_gain, .get = sd_getblue_gain, }, { { .id = V4L2_CID_RED_BALANCE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Red Balance", .minimum = 0x10, .maximum = 0x40, .step = 1, #define RED_GAIN_DEF 0x20 .default_value = RED_GAIN_DEF, }, .set = sd_setred_gain, .get = sd_getred_gain, }, { { .id = V4L2_CID_GAIN, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Gain", .minimum = 0x10, .maximum = 0x40, .step = 1, #define GAIN_DEF 0x20 .default_value = GAIN_DEF, }, .set = sd_setgain, .get = sd_getgain, }, }; static const struct v4l2_pix_format vga_mode_t16[] = { {160, 120, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 160, .sizeimage = 160 * 120 * 4 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 4}, {176, 144, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 176, .sizeimage = 176 * 144 * 3 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 3}, {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240 * 3 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 2}, {352, 288, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 352, .sizeimage = 352 * 288 * 3 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 1}, {640, 480, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480 * 3 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 0}, }; /* sensor specific data */ struct additional_sensor_data { const u8 n3[6]; const u8 *n4, n4sz; const u8 reg80, reg8e; const u8 nset8[6]; const u8 data1[10]; const u8 data2[9]; const u8 data3[9]; const u8 data5[6]; const u8 stream[4]; }; static const u8 n4_om6802[] = { 0x09, 0x01, 0x12, 0x04, 0x66, 0x8a, 0x80, 0x3c, 0x81, 0x22, 0x84, 0x50, 0x8a, 0x78, 0x8b, 0x68, 0x8c, 0x88, 0x8e, 0x33, 0x8f, 0x24, 0xaa, 0xb1, 0xa2, 0x60, 0xa5, 0x30, 0xa6, 0x3a, 0xa8, 0xe8, 0xae, 0x05, 0xb1, 0x00, 0xbb, 0x04, 0xbc, 0x48, 0xbe, 0x36, 0xc6, 0x88, 0xe9, 0x00, 0xc5, 0xc0, 0x65, 0x0a, 0xbb, 0x86, 0xaf, 0x58, 0xb0, 0x68, 0x87, 0x40, 0x89, 0x2b, 0x8d, 0xff, 0x83, 0x40, 0xac, 0x84, 0xad, 0x86, 0xaf, 0x46 }; static const u8 n4_other[] = { 0x66, 0x00, 0x7f, 0x00, 0x80, 0xac, 0x81, 0x69, 0x84, 0x40, 0x85, 0x70, 0x86, 0x20, 0x8a, 0x68, 0x8b, 0x58, 0x8c, 0x88, 0x8d, 0xff, 0x8e, 0xb8, 0x8f, 0x28, 0xa2, 0x60, 0xa5, 0x40, 0xa8, 0xa8, 0xac, 0x84, 0xad, 0x84, 0xae, 0x24, 0xaf, 0x56, 0xb0, 0x68, 0xb1, 0x00, 0xb2, 0x88, 0xbb, 0xc5, 0xbc, 0x4a, 0xbe, 0x36, 0xc2, 0x88, 0xc5, 0xc0, 0xc6, 0xda, 0xe9, 0x26, 0xeb, 0x00 }; static const u8 n4_tas5130a[] = { 0x80, 0x3c, 0x81, 0x68, 0x83, 0xa0, 0x84, 0x20, 0x8a, 0x68, 0x8b, 0x58, 0x8c, 0x88, 0x8e, 0xb4, 0x8f, 0x24, 0xa1, 0xb1, 0xa2, 0x30, 0xa5, 0x10, 0xa6, 0x4a, 0xae, 0x03, 0xb1, 0x44, 0xb2, 0x08, 0xb7, 0x06, 0xb9, 0xe7, 0xbb, 0xc4, 0xbc, 0x4a, 0xbe, 0x36, 0xbf, 0xff, 0xc2, 0x88, 0xc5, 0xc8, 0xc6, 0xda }; static const u8 n4_lt168g[] = { 0x66, 0x01, 0x7f, 0x00, 0x80, 0x7c, 0x81, 0x28, 0x83, 0x44, 0x84, 0x20, 0x86, 0x20, 0x8a, 0x70, 0x8b, 0x58, 0x8c, 0x88, 0x8d, 0xa0, 0x8e, 0xb3, 0x8f, 0x24, 0xa1, 0xb0, 0xa2, 0x38, 0xa5, 0x20, 0xa6, 0x4a, 0xa8, 0xe8, 0xaf, 0x38, 0xb0, 0x68, 0xb1, 0x44, 0xb2, 0x88, 0xbb, 0x86, 0xbd, 0x40, 0xbe, 0x26, 0xc1, 0x05, 0xc2, 0x88, 0xc5, 0xc0, 0xda, 0x8e, 0xdb, 0xca, 0xdc, 0xa8, 0xdd, 0x8c, 0xde, 0x44, 0xdf, 0x0c, 0xe9, 0x80 }; static const struct additional_sensor_data sensor_data[] = { [SENSOR_OM6802] = { .n3 = {0x61, 0x68, 0x65, 0x0a, 0x60, 0x04}, .n4 = n4_om6802, .n4sz = sizeof n4_om6802, .reg80 = 0x3c, .reg8e = 0x33, .nset8 = {0xa8, 0xf0, 0xc6, 0x88, 0xc0, 0x00}, .data1 = {0xc2, 0x28, 0x0f, 0x22, 0xcd, 0x27, 0x2c, 0x06, 0xb3, 0xfc}, .data2 = {0x80, 0xff, 0xff, 0x80, 0xff, 0xff, 0x80, 0xff, 0xff}, .data3 = {0x80, 0xff, 0xff, 0x80, 0xff, 0xff, 0x80, 0xff, 0xff}, .data5 = /* this could be removed later */ {0x0c, 0x03, 0xab, 0x13, 0x81, 0x23}, .stream = {0x0b, 0x04, 0x0a, 0x78}, }, [SENSOR_OTHER] = { .n3 = {0x61, 0xc2, 0x65, 0x88, 0x60, 0x00}, .n4 = n4_other, .n4sz = sizeof n4_other, .reg80 = 0xac, .reg8e = 0xb8, .nset8 = {0xa8, 0xa8, 0xc6, 0xda, 0xc0, 0x00}, .data1 = {0xc1, 0x48, 0x04, 0x1b, 0xca, 0x2e, 0x33, 0x3a, 0xe8, 0xfc}, .data2 = {0x4e, 0x9c, 0xec, 0x40, 0x80, 0xc0, 0x48, 0x96, 0xd9}, .data3 = {0x4e, 0x9c, 0xec, 0x40, 0x80, 0xc0, 0x48, 0x96, 0xd9}, .data5 = {0x0c, 0x03, 0xab, 0x29, 0x81, 0x69}, .stream = {0x0b, 0x04, 0x0a, 0x00}, }, [SENSOR_TAS5130A] = { .n3 = {0x61, 0xc2, 0x65, 0x0d, 0x60, 0x08}, .n4 = n4_tas5130a, .n4sz = sizeof n4_tas5130a, .reg80 = 0x3c, .reg8e = 0xb4, .nset8 = {0xa8, 0xf0, 0xc6, 0xda, 0xc0, 0x00}, .data1 = {0xbb, 0x28, 0x10, 0x10, 0xbb, 0x28, 0x1e, 0x27, 0xc8, 0xfc}, .data2 = {0x60, 0xa8, 0xe0, 0x60, 0xa8, 0xe0, 0x60, 0xa8, 0xe0}, .data3 = {0x60, 0xa8, 0xe0, 0x60, 0xa8, 0xe0, 0x60, 0xa8, 0xe0}, .data5 = {0x0c, 0x03, 0xab, 0x10, 0x81, 0x20}, .stream = {0x0b, 0x04, 0x0a, 0x40}, }, [SENSOR_LT168G] = { .n3 = {0x61, 0xc2, 0x65, 0x68, 0x60, 0x00}, .n4 = n4_lt168g, .n4sz = sizeof n4_lt168g, .reg80 = 0x7c, .reg8e = 0xb3, .nset8 = {0xa8, 0xf0, 0xc6, 0xba, 0xc0, 0x00}, .data1 = {0xc0, 0x38, 0x08, 0x10, 0xc0, 0x30, 0x10, 0x40, 0xb0, 0xf4}, .data2 = {0x40, 0x80, 0xc0, 0x50, 0xa0, 0xf0, 0x53, 0xa6, 0xff}, .data3 = {0x40, 0x80, 0xc0, 0x50, 0xa0, 0xf0, 0x53, 0xa6, 0xff}, .data5 = {0x0c, 0x03, 0xab, 0x4b, 0x81, 0x2b}, .stream = {0x0b, 0x04, 0x0a, 0x28}, }, }; #define MAX_EFFECTS 7 /* easily done by soft, this table could be removed, * i keep it here just in case */ static char *effects_control[MAX_EFFECTS] = { "Normal", "Emboss", /* disabled */ "Monochrome", "Sepia", "Sketch", "Sun Effect", /* disabled */ "Negative", }; static const u8 effects_table[MAX_EFFECTS][6] = { {0xa8, 0xe8, 0xc6, 0xd2, 0xc0, 0x00}, /* Normal */ {0xa8, 0xc8, 0xc6, 0x52, 0xc0, 0x04}, /* Repujar */ {0xa8, 0xe8, 0xc6, 0xd2, 0xc0, 0x20}, /* Monochrome */ {0xa8, 0xe8, 0xc6, 0xd2, 0xc0, 0x80}, /* Sepia */ {0xa8, 0xc8, 0xc6, 0x52, 0xc0, 0x02}, /* Croquis */ {0xa8, 0xc8, 0xc6, 0xd2, 0xc0, 0x10}, /* Sun Effect */ {0xa8, 0xc8, 0xc6, 0xd2, 0xc0, 0x40}, /* Negative */ }; static const u8 gamma_table[GAMMA_MAX][17] = { /* gamma table from cam1690.ini */ {0x00, 0x00, 0x01, 0x04, 0x08, 0x0e, 0x16, 0x21, /* 0 */ 0x2e, 0x3d, 0x50, 0x65, 0x7d, 0x99, 0xb8, 0xdb, 0xff}, {0x00, 0x01, 0x03, 0x08, 0x0e, 0x16, 0x21, 0x2d, /* 1 */ 0x3c, 0x4d, 0x60, 0x75, 0x8d, 0xa6, 0xc2, 0xe1, 0xff}, {0x00, 0x01, 0x05, 0x0b, 0x12, 0x1c, 0x28, 0x35, /* 2 */ 0x45, 0x56, 0x69, 0x7e, 0x95, 0xad, 0xc7, 0xe3, 0xff}, {0x00, 0x02, 0x07, 0x0f, 0x18, 0x24, 0x30, 0x3f, /* 3 */ 0x4f, 0x61, 0x73, 0x88, 0x9d, 0xb4, 0xcd, 0xe6, 0xff}, {0x00, 0x04, 0x0b, 0x15, 0x20, 0x2d, 0x3b, 0x4a, /* 4 */ 0x5b, 0x6c, 0x7f, 0x92, 0xa7, 0xbc, 0xd2, 0xe9, 0xff}, {0x00, 0x07, 0x11, 0x15, 0x20, 0x2d, 0x48, 0x58, /* 5 */ 0x68, 0x79, 0x8b, 0x9d, 0xb0, 0xc4, 0xd7, 0xec, 0xff}, {0x00, 0x0c, 0x1a, 0x29, 0x38, 0x47, 0x57, 0x67, /* 6 */ 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff}, {0x00, 0x10, 0x20, 0x30, 0x40, 0x50, 0x60, 0x70, /* 7 */ 0x80, 0x90, 0xa0, 0xb0, 0xc0, 0xd0, 0xe0, 0xf0, 0xff}, {0x00, 0x15, 0x27, 0x38, 0x49, 0x59, 0x69, 0x79, /* 8 */ 0x88, 0x97, 0xa7, 0xb6, 0xc4, 0xd3, 0xe2, 0xf0, 0xff}, {0x00, 0x1c, 0x30, 0x43, 0x54, 0x65, 0x75, 0x84, /* 9 */ 0x93, 0xa1, 0xb0, 0xbd, 0xca, 0xd8, 0xe5, 0xf2, 0xff}, {0x00, 0x24, 0x3b, 0x4f, 0x60, 0x70, 0x80, 0x8e, /* 10 */ 0x9c, 0xaa, 0xb7, 0xc4, 0xd0, 0xdc, 0xe8, 0xf3, 0xff}, {0x00, 0x2a, 0x3c, 0x5d, 0x6e, 0x7e, 0x8d, 0x9b, /* 11 */ 0xa8, 0xb4, 0xc0, 0xcb, 0xd6, 0xe1, 0xeb, 0xf5, 0xff}, {0x00, 0x3f, 0x5a, 0x6e, 0x7f, 0x8e, 0x9c, 0xa8, /* 12 */ 0xb4, 0xbf, 0xc9, 0xd3, 0xdc, 0xe5, 0xee, 0xf6, 0xff}, {0x00, 0x54, 0x6f, 0x83, 0x93, 0xa0, 0xad, 0xb7, /* 13 */ 0xc2, 0xcb, 0xd4, 0xdc, 0xe4, 0xeb, 0xf2, 0xf9, 0xff}, {0x00, 0x6e, 0x88, 0x9a, 0xa8, 0xb3, 0xbd, 0xc6, /* 14 */ 0xcf, 0xd6, 0xdd, 0xe3, 0xe9, 0xef, 0xf4, 0xfa, 0xff}, {0x00, 0x93, 0xa8, 0xb7, 0xc1, 0xca, 0xd2, 0xd8, /* 15 */ 0xde, 0xe3, 0xe8, 0xed, 0xf1, 0xf5, 0xf8, 0xfc, 0xff} }; static const u8 tas5130a_sensor_init[][8] = { {0x62, 0x08, 0x63, 0x70, 0x64, 0x1d, 0x60, 0x09}, {0x62, 0x20, 0x63, 0x01, 0x64, 0x02, 0x60, 0x09}, {0x62, 0x07, 0x63, 0x03, 0x64, 0x00, 0x60, 0x09}, }; static u8 sensor_reset[] = {0x61, 0x68, 0x62, 0xff, 0x60, 0x07}; /* read 1 byte */ static u8 reg_r(struct gspca_dev *gspca_dev, u16 index) { usb_control_msg(gspca_dev->dev, usb_rcvctrlpipe(gspca_dev->dev, 0), 0, /* request */ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, /* value */ index, gspca_dev->usb_buf, 1, 500); return gspca_dev->usb_buf[0]; } static void reg_w(struct gspca_dev *gspca_dev, u16 index) { usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), 0, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, index, NULL, 0, 500); } static void reg_w_buf(struct gspca_dev *gspca_dev, const u8 *buffer, u16 len) { if (len <= USB_BUF_SZ) { memcpy(gspca_dev->usb_buf, buffer, len); usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), 0, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0x01, 0, gspca_dev->usb_buf, len, 500); } else { u8 *tmpbuf; tmpbuf = kmemdup(buffer, len, GFP_KERNEL); if (!tmpbuf) { pr_err("Out of memory\n"); return; } usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), 0, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0x01, 0, tmpbuf, len, 500); kfree(tmpbuf); } } /* write values to consecutive registers */ static void reg_w_ixbuf(struct gspca_dev *gspca_dev, u8 reg, const u8 *buffer, u16 len) { int i; u8 *p, *tmpbuf; if (len * 2 <= USB_BUF_SZ) { p = tmpbuf = gspca_dev->usb_buf; } else { p = tmpbuf = kmalloc(len * 2, GFP_KERNEL); if (!tmpbuf) { pr_err("Out of memory\n"); return; } } i = len; while (--i >= 0) { *p++ = reg++; *p++ = *buffer++; } usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), 0, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0x01, 0, tmpbuf, len * 2, 500); if (len * 2 > USB_BUF_SZ) kfree(tmpbuf); } static void om6802_sensor_init(struct gspca_dev *gspca_dev) { int i; const u8 *p; u8 byte; u8 val[6] = {0x62, 0, 0x64, 0, 0x60, 0x05}; static const u8 sensor_init[] = { 0xdf, 0x6d, 0xdd, 0x18, 0x5a, 0xe0, 0x5c, 0x07, 0x5d, 0xb0, 0x5e, 0x1e, 0x60, 0x71, 0xef, 0x00, 0xe9, 0x00, 0xea, 0x00, 0x90, 0x24, 0x91, 0xb2, 0x82, 0x32, 0xfd, 0x41, 0x00 /* table end */ }; reg_w_buf(gspca_dev, sensor_reset, sizeof sensor_reset); msleep(100); i = 4; while (--i > 0) { byte = reg_r(gspca_dev, 0x0060); if (!(byte & 0x01)) break; msleep(100); } byte = reg_r(gspca_dev, 0x0063); if (byte != 0x17) { pr_err("Bad sensor reset %02x\n", byte); /* continue? */ } p = sensor_init; while (*p != 0) { val[1] = *p++; val[3] = *p++; if (*p == 0) reg_w(gspca_dev, 0x3c80); reg_w_buf(gspca_dev, val, sizeof val); i = 4; while (--i >= 0) { msleep(15); byte = reg_r(gspca_dev, 0x60); if (!(byte & 0x01)) break; } } msleep(15); reg_w(gspca_dev, 0x3c80); } /* this function is called at probe time */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct sd *sd = (struct sd *) gspca_dev; struct cam *cam; cam = &gspca_dev->cam; cam->cam_mode = vga_mode_t16; cam->nmodes = ARRAY_SIZE(vga_mode_t16); sd->brightness = BRIGHTNESS_DEF; sd->contrast = CONTRAST_DEF; sd->colors = COLORS_DEF; sd->gamma = GAMMA_DEF; sd->autogain = AUTOGAIN_DEF; sd->mirror = MIRROR_DEF; sd->freq = FREQ_DEF; sd->awb = AWB_DEF; sd->sharpness = SHARPNESS_DEF; sd->effect = EFFECTS_DEF; sd->red_gain = RED_GAIN_DEF; sd->blue_gain = BLUE_GAIN_DEF; sd->green_gain = GAIN_DEF * 3 - RED_GAIN_DEF - BLUE_GAIN_DEF; return 0; } static void setbrightness(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; unsigned int brightness; u8 set6[4] = { 0x8f, 0x24, 0xc3, 0x00 }; brightness = sd->brightness; if (brightness < 7) { set6[1] = 0x26; set6[3] = 0x70 - brightness * 0x10; } else { set6[3] = 0x00 + ((brightness - 7) * 0x10); } reg_w_buf(gspca_dev, set6, sizeof set6); } static void setcontrast(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; unsigned int contrast = sd->contrast; u16 reg_to_write; if (contrast < 7) reg_to_write = 0x8ea9 - contrast * 0x200; else reg_to_write = 0x00a9 + (contrast - 7) * 0x200; reg_w(gspca_dev, reg_to_write); } static void setcolors(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; u16 reg_to_write; reg_to_write = 0x80bb + sd->colors * 0x100; /* was 0xc0 */ reg_w(gspca_dev, reg_to_write); } static void setgamma(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; PDEBUG(D_CONF, "Gamma: %d", sd->gamma); reg_w_ixbuf(gspca_dev, 0x90, gamma_table[sd->gamma], sizeof gamma_table[0]); } static void setRGB(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; u8 all_gain_reg[6] = {0x87, 0x00, 0x88, 0x00, 0x89, 0x00}; all_gain_reg[1] = sd->red_gain; all_gain_reg[3] = sd->blue_gain; all_gain_reg[5] = sd->green_gain; reg_w_buf(gspca_dev, all_gain_reg, sizeof all_gain_reg); } /* Generic fnc for r/b balance, exposure and awb */ static void setawb(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; u16 reg80; reg80 = (sensor_data[sd->sensor].reg80 << 8) | 0x80; /* on awb leave defaults values */ if (!sd->awb) { /* shoud we wait here.. */ /* update and reset RGB gains with webcam values */ sd->red_gain = reg_r(gspca_dev, 0x0087); sd->blue_gain = reg_r(gspca_dev, 0x0088); sd->green_gain = reg_r(gspca_dev, 0x0089); reg80 &= ~0x0400; /* AWB off */ } reg_w(gspca_dev, reg80); reg_w(gspca_dev, reg80); } static void init_gains(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; u16 reg80; u8 all_gain_reg[8] = {0x87, 0x00, 0x88, 0x00, 0x89, 0x00, 0x80, 0x00}; all_gain_reg[1] = sd->red_gain; all_gain_reg[3] = sd->blue_gain; all_gain_reg[5] = sd->green_gain; reg80 = sensor_data[sd->sensor].reg80; if (!sd->awb) reg80 &= ~0x04; all_gain_reg[7] = reg80; reg_w_buf(gspca_dev, all_gain_reg, sizeof all_gain_reg); reg_w(gspca_dev, (sd->red_gain << 8) + 0x87); reg_w(gspca_dev, (sd->blue_gain << 8) + 0x88); reg_w(gspca_dev, (sd->green_gain << 8) + 0x89); } static void setsharpness(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; u16 reg_to_write; reg_to_write = 0x0aa6 + 0x1000 * sd->sharpness; reg_w(gspca_dev, reg_to_write); } static void setfreq(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; u8 reg66; u8 freq[4] = { 0x66, 0x00, 0xa8, 0xe8 }; switch (sd->sensor) { case SENSOR_LT168G: if (sd->freq != 0) freq[3] = 0xa8; reg66 = 0x41; break; case SENSOR_OM6802: reg66 = 0xca; break; default: reg66 = 0x40; break; } switch (sd->freq) { case 0: /* no flicker */ freq[3] = 0xf0; break; case 2: /* 60Hz */ reg66 &= ~0x40; break; } freq[1] = reg66; reg_w_buf(gspca_dev, freq, sizeof freq); } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { /* some of this registers are not really neded, because * they are overriden by setbrigthness, setcontrast, etc, * but wont hurt anyway, and can help someone with similar webcam * to see the initial parameters.*/ struct sd *sd = (struct sd *) gspca_dev; const struct additional_sensor_data *sensor; int i; u16 sensor_id; u8 test_byte = 0; static const u8 read_indexs[] = { 0x0a, 0x0b, 0x66, 0x80, 0x81, 0x8e, 0x8f, 0xa5, 0xa6, 0xa8, 0xbb, 0xbc, 0xc6, 0x00 }; static const u8 n1[] = {0x08, 0x03, 0x09, 0x03, 0x12, 0x04}; static const u8 n2[] = {0x08, 0x00}; sensor_id = (reg_r(gspca_dev, 0x06) << 8) | reg_r(gspca_dev, 0x07); switch (sensor_id & 0xff0f) { case 0x0801: PDEBUG(D_PROBE, "sensor tas5130a"); sd->sensor = SENSOR_TAS5130A; break; case 0x0802: PDEBUG(D_PROBE, "sensor lt168g"); sd->sensor = SENSOR_LT168G; break; case 0x0803: PDEBUG(D_PROBE, "sensor 'other'"); sd->sensor = SENSOR_OTHER; break; case 0x0807: PDEBUG(D_PROBE, "sensor om6802"); sd->sensor = SENSOR_OM6802; break; default: pr_err("unknown sensor %04x\n", sensor_id); return -EINVAL; } if (sd->sensor == SENSOR_OM6802) { reg_w_buf(gspca_dev, n1, sizeof n1); i = 5; while (--i >= 0) { reg_w_buf(gspca_dev, sensor_reset, sizeof sensor_reset); test_byte = reg_r(gspca_dev, 0x0063); msleep(100); if (test_byte == 0x17) break; /* OK */ } if (i < 0) { pr_err("Bad sensor reset %02x\n", test_byte); return -EIO; } reg_w_buf(gspca_dev, n2, sizeof n2); } i = 0; while (read_indexs[i] != 0x00) { test_byte = reg_r(gspca_dev, read_indexs[i]); PDEBUG(D_STREAM, "Reg 0x%02x = 0x%02x", read_indexs[i], test_byte); i++; } sensor = &sensor_data[sd->sensor]; reg_w_buf(gspca_dev, sensor->n3, sizeof sensor->n3); reg_w_buf(gspca_dev, sensor->n4, sensor->n4sz); if (sd->sensor == SENSOR_LT168G) { test_byte = reg_r(gspca_dev, 0x80); PDEBUG(D_STREAM, "Reg 0x%02x = 0x%02x", 0x80, test_byte); reg_w(gspca_dev, 0x6c80); } reg_w_ixbuf(gspca_dev, 0xd0, sensor->data1, sizeof sensor->data1); reg_w_ixbuf(gspca_dev, 0xc7, sensor->data2, sizeof sensor->data2); reg_w_ixbuf(gspca_dev, 0xe0, sensor->data3, sizeof sensor->data3); reg_w(gspca_dev, (sensor->reg80 << 8) + 0x80); reg_w(gspca_dev, (sensor->reg80 << 8) + 0x80); reg_w(gspca_dev, (sensor->reg8e << 8) + 0x8e); setbrightness(gspca_dev); setcontrast(gspca_dev); setgamma(gspca_dev); setcolors(gspca_dev); setsharpness(gspca_dev); init_gains(gspca_dev); setfreq(gspca_dev); reg_w_buf(gspca_dev, sensor->data5, sizeof sensor->data5); reg_w_buf(gspca_dev, sensor->nset8, sizeof sensor->nset8); reg_w_buf(gspca_dev, sensor->stream, sizeof sensor->stream); if (sd->sensor == SENSOR_LT168G) { test_byte = reg_r(gspca_dev, 0x80); PDEBUG(D_STREAM, "Reg 0x%02x = 0x%02x", 0x80, test_byte); reg_w(gspca_dev, 0x6c80); } reg_w_ixbuf(gspca_dev, 0xd0, sensor->data1, sizeof sensor->data1); reg_w_ixbuf(gspca_dev, 0xc7, sensor->data2, sizeof sensor->data2); reg_w_ixbuf(gspca_dev, 0xe0, sensor->data3, sizeof sensor->data3); return 0; } static void setmirror(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; u8 hflipcmd[8] = {0x62, 0x07, 0x63, 0x03, 0x64, 0x00, 0x60, 0x09}; if (sd->mirror) hflipcmd[3] = 0x01; reg_w_buf(gspca_dev, hflipcmd, sizeof hflipcmd); } static void seteffect(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; reg_w_buf(gspca_dev, effects_table[sd->effect], sizeof effects_table[0]); if (sd->effect == 1 || sd->effect == 5) { PDEBUG(D_CONF, "This effect have been disabled for webcam \"safety\""); return; } if (sd->effect == 1 || sd->effect == 4) reg_w(gspca_dev, 0x4aa6); else reg_w(gspca_dev, 0xfaa6); } /* Is this really needed? * i added some module parameters for test with some users */ static void poll_sensor(struct gspca_dev *gspca_dev) { static const u8 poll1[] = {0x67, 0x05, 0x68, 0x81, 0x69, 0x80, 0x6a, 0x82, 0x6b, 0x68, 0x6c, 0x69, 0x72, 0xd9, 0x73, 0x34, 0x74, 0x32, 0x75, 0x92, 0x76, 0x00, 0x09, 0x01, 0x60, 0x14}; static const u8 poll2[] = {0x67, 0x02, 0x68, 0x71, 0x69, 0x72, 0x72, 0xa9, 0x73, 0x02, 0x73, 0x02, 0x60, 0x14}; static const u8 noise03[] = /* (some differences / ms-drv) */ {0xa6, 0x0a, 0xea, 0xcf, 0xbe, 0x26, 0xb1, 0x5f, 0xa1, 0xb1, 0xda, 0x6b, 0xdb, 0x98, 0xdf, 0x0c, 0xc2, 0x80, 0xc3, 0x10}; PDEBUG(D_STREAM, "[Sensor requires polling]"); reg_w_buf(gspca_dev, poll1, sizeof poll1); reg_w_buf(gspca_dev, poll2, sizeof poll2); reg_w_buf(gspca_dev, noise03, sizeof noise03); } static int sd_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; const struct additional_sensor_data *sensor; int i, mode; u8 t2[] = { 0x07, 0x00, 0x0d, 0x60, 0x0e, 0x80 }; static const u8 t3[] = { 0x07, 0x00, 0x88, 0x02, 0x06, 0x00, 0xe7, 0x01 }; mode = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv; switch (mode) { case 0: /* 640x480 (0x00) */ break; case 1: /* 352x288 */ t2[1] = 0x40; break; case 2: /* 320x240 */ t2[1] = 0x10; break; case 3: /* 176x144 */ t2[1] = 0x50; break; default: /* case 4: * 160x120 */ t2[1] = 0x20; break; } switch (sd->sensor) { case SENSOR_OM6802: om6802_sensor_init(gspca_dev); break; case SENSOR_TAS5130A: i = 0; for (;;) { reg_w_buf(gspca_dev, tas5130a_sensor_init[i], sizeof tas5130a_sensor_init[0]); if (i >= ARRAY_SIZE(tas5130a_sensor_init) - 1) break; i++; } reg_w(gspca_dev, 0x3c80); /* just in case and to keep sync with logs (for mine) */ reg_w_buf(gspca_dev, tas5130a_sensor_init[i], sizeof tas5130a_sensor_init[0]); reg_w(gspca_dev, 0x3c80); break; } sensor = &sensor_data[sd->sensor]; setfreq(gspca_dev); reg_r(gspca_dev, 0x0012); reg_w_buf(gspca_dev, t2, sizeof t2); reg_w_ixbuf(gspca_dev, 0xb3, t3, sizeof t3); reg_w(gspca_dev, 0x0013); msleep(15); reg_w_buf(gspca_dev, sensor->stream, sizeof sensor->stream); reg_w_buf(gspca_dev, sensor->stream, sizeof sensor->stream); if (sd->sensor == SENSOR_OM6802) poll_sensor(gspca_dev); return 0; } static void sd_stopN(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; reg_w_buf(gspca_dev, sensor_data[sd->sensor].stream, sizeof sensor_data[sd->sensor].stream); reg_w_buf(gspca_dev, sensor_data[sd->sensor].stream, sizeof sensor_data[sd->sensor].stream); if (sd->sensor == SENSOR_OM6802) { msleep(20); reg_w(gspca_dev, 0x0309); } } static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* isoc packet */ int len) /* iso packet length */ { int pkt_type; if (data[0] == 0x5a) { /* Control Packet, after this came the header again, * but extra bytes came in the packet before this, * sometimes an EOF arrives, sometimes not... */ return; } data += 2; len -= 2; if (data[0] == 0xff && data[1] == 0xd8) pkt_type = FIRST_PACKET; else if (data[len - 2] == 0xff && data[len - 1] == 0xd9) pkt_type = LAST_PACKET; else pkt_type = INTER_PACKET; gspca_frame_add(gspca_dev, pkt_type, data, len); } static int sd_setblue_gain(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; sd->blue_gain = val; if (gspca_dev->streaming) reg_w(gspca_dev, (val << 8) + 0x88); return 0; } static int sd_getblue_gain(struct gspca_dev *gspca_dev, __s32 *val) { struct sd *sd = (struct sd *) gspca_dev; *val = sd->blue_gain; return 0; } static int sd_setred_gain(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; sd->red_gain = val; if (gspca_dev->streaming) reg_w(gspca_dev, (val << 8) + 0x87); return 0; } static int sd_getred_gain(struct gspca_dev *gspca_dev, __s32 *val) { struct sd *sd = (struct sd *) gspca_dev; *val = sd->red_gain; return 0; } static int sd_setgain(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; u16 psg, nsg; psg = sd->red_gain + sd->blue_gain + sd->green_gain; nsg = val * 3; sd->red_gain = sd->red_gain * nsg / psg; if (sd->red_gain > 0x40) sd->red_gain = 0x40; else if (sd->red_gain < 0x10) sd->red_gain = 0x10; sd->blue_gain = sd->blue_gain * nsg / psg; if (sd->blue_gain > 0x40) sd->blue_gain = 0x40; else if (sd->blue_gain < 0x10) sd->blue_gain = 0x10; sd->green_gain = sd->green_gain * nsg / psg; if (sd->green_gain > 0x40) sd->green_gain = 0x40; else if (sd->green_gain < 0x10) sd->green_gain = 0x10; if (gspca_dev->streaming) setRGB(gspca_dev); return 0; } static int sd_getgain(struct gspca_dev *gspca_dev, __s32 *val) { struct sd *sd = (struct sd *) gspca_dev; *val = (sd->red_gain + sd->blue_gain + sd->green_gain) / 3; return 0; } static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; sd->brightness = val; if (gspca_dev->streaming) setbrightness(gspca_dev); return 0; } static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val) { struct sd *sd = (struct sd *) gspca_dev; *val = sd->brightness; return *val; } static int sd_setawb(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; sd->awb = val; if (gspca_dev->streaming) setawb(gspca_dev); return 0; } static int sd_getawb(struct gspca_dev *gspca_dev, __s32 *val) { struct sd *sd = (struct sd *) gspca_dev; *val = sd->awb; return *val; } static int sd_setmirror(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; sd->mirror = val; if (gspca_dev->streaming) setmirror(gspca_dev); return 0; } static int sd_getmirror(struct gspca_dev *gspca_dev, __s32 *val) { struct sd *sd = (struct sd *) gspca_dev; *val = sd->mirror; return *val; } static int sd_seteffect(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; sd->effect = val; if (gspca_dev->streaming) seteffect(gspca_dev); return 0; } static int sd_geteffect(struct gspca_dev *gspca_dev, __s32 *val) { struct sd *sd = (struct sd *) gspca_dev; *val = sd->effect; return *val; } static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; sd->contrast = val; if (gspca_dev->streaming) setcontrast(gspca_dev); return 0; } static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val) { struct sd *sd = (struct sd *) gspca_dev; *val = sd->contrast; return *val; } static int sd_setcolors(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; sd->colors = val; if (gspca_dev->streaming) setcolors(gspca_dev); return 0; } static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val) { struct sd *sd = (struct sd *) gspca_dev; *val = sd->colors; return 0; } static int sd_setgamma(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; sd->gamma = val; if (gspca_dev->streaming) setgamma(gspca_dev); return 0; } static int sd_getgamma(struct gspca_dev *gspca_dev, __s32 *val) { struct sd *sd = (struct sd *) gspca_dev; *val = sd->gamma; return 0; } static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; sd->freq = val; if (gspca_dev->streaming) setfreq(gspca_dev); return 0; } static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val) { struct sd *sd = (struct sd *) gspca_dev; *val = sd->freq; return 0; } static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; sd->sharpness = val; if (gspca_dev->streaming) setsharpness(gspca_dev); return 0; } static int sd_getsharpness(struct gspca_dev *gspca_dev, __s32 *val) { struct sd *sd = (struct sd *) gspca_dev; *val = sd->sharpness; return 0; } /* Low Light set here......*/ static int sd_setlowlight(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; sd->autogain = val; if (val != 0) reg_w(gspca_dev, 0xf48e); else reg_w(gspca_dev, 0xb48e); return 0; } static int sd_getlowlight(struct gspca_dev *gspca_dev, __s32 *val) { struct sd *sd = (struct sd *) gspca_dev; *val = sd->autogain; return 0; } static int sd_querymenu(struct gspca_dev *gspca_dev, struct v4l2_querymenu *menu) { static const char *freq_nm[3] = {"NoFliker", "50 Hz", "60 Hz"}; switch (menu->id) { case V4L2_CID_POWER_LINE_FREQUENCY: if ((unsigned) menu->index >= ARRAY_SIZE(freq_nm)) break; strcpy((char *) menu->name, freq_nm[menu->index]); return 0; case V4L2_CID_EFFECTS: if ((unsigned) menu->index < ARRAY_SIZE(effects_control)) { strlcpy((char *) menu->name, effects_control[menu->index], sizeof menu->name); return 0; } break; } return -EINVAL; } /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .ctrls = sd_ctrls, .nctrls = ARRAY_SIZE(sd_ctrls), .config = sd_config, .init = sd_init, .start = sd_start, .stopN = sd_stopN, .pkt_scan = sd_pkt_scan, .querymenu = sd_querymenu, }; /* -- module initialisation -- */ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x17a1, 0x0128)}, {} }; MODULE_DEVICE_TABLE(usb, device_table); /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, #endif }; /* -- module insert / remove -- */ static int __init sd_mod_init(void) { return usb_register(&sd_driver); } static void __exit sd_mod_exit(void) { usb_deregister(&sd_driver); } module_init(sd_mod_init); module_exit(sd_mod_exit);
gpl-2.0
netarchy/android-git-kernel
net/netfilter/ipvs/ip_vs_proto.c
162
6158
/* * ip_vs_proto.c: transport protocol load balancing support for IPVS * * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> * Julian Anastasov <ja@ssi.bg> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Changes: * */ #define KMSG_COMPONENT "IPVS" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/module.h> #include <linux/kernel.h> #include <linux/skbuff.h> #include <linux/gfp.h> #include <linux/in.h> #include <linux/ip.h> #include <net/protocol.h> #include <net/tcp.h> #include <net/udp.h> #include <asm/system.h> #include <linux/stat.h> #include <linux/proc_fs.h> #include <net/ip_vs.h> /* * IPVS protocols can only be registered/unregistered when the ipvs * module is loaded/unloaded, so no lock is needed in accessing the * ipvs protocol table. */ #define IP_VS_PROTO_TAB_SIZE 32 /* must be power of 2 */ #define IP_VS_PROTO_HASH(proto) ((proto) & (IP_VS_PROTO_TAB_SIZE-1)) static struct ip_vs_protocol *ip_vs_proto_table[IP_VS_PROTO_TAB_SIZE]; /* * register an ipvs protocol */ static int __used __init register_ip_vs_protocol(struct ip_vs_protocol *pp) { unsigned hash = IP_VS_PROTO_HASH(pp->protocol); pp->next = ip_vs_proto_table[hash]; ip_vs_proto_table[hash] = pp; if (pp->init != NULL) pp->init(pp); return 0; } /* * unregister an ipvs protocol */ static int unregister_ip_vs_protocol(struct ip_vs_protocol *pp) { struct ip_vs_protocol **pp_p; unsigned hash = IP_VS_PROTO_HASH(pp->protocol); pp_p = &ip_vs_proto_table[hash]; for (; *pp_p; pp_p = &(*pp_p)->next) { if (*pp_p == pp) { *pp_p = pp->next; if (pp->exit != NULL) pp->exit(pp); return 0; } } return -ESRCH; } /* * get ip_vs_protocol object by its proto. */ struct ip_vs_protocol * ip_vs_proto_get(unsigned short proto) { struct ip_vs_protocol *pp; unsigned hash = IP_VS_PROTO_HASH(proto); for (pp = ip_vs_proto_table[hash]; pp; pp = pp->next) { if (pp->protocol == proto) return pp; } return NULL; } EXPORT_SYMBOL(ip_vs_proto_get); /* * Propagate event for state change to all protocols */ void ip_vs_protocol_timeout_change(int flags) { struct ip_vs_protocol *pp; int i; for (i = 0; i < IP_VS_PROTO_TAB_SIZE; i++) { for (pp = ip_vs_proto_table[i]; pp; pp = pp->next) { if (pp->timeout_change) pp->timeout_change(pp, flags); } } } int * ip_vs_create_timeout_table(int *table, int size) { return kmemdup(table, size, GFP_ATOMIC); } /* * Set timeout value for state specified by name */ int ip_vs_set_state_timeout(int *table, int num, const char *const *names, const char *name, int to) { int i; if (!table || !name || !to) return -EINVAL; for (i = 0; i < num; i++) { if (strcmp(names[i], name)) continue; table[i] = to * HZ; return 0; } return -ENOENT; } const char * ip_vs_state_name(__u16 proto, int state) { struct ip_vs_protocol *pp = ip_vs_proto_get(proto); if (pp == NULL || pp->state_name == NULL) return (IPPROTO_IP == proto) ? "NONE" : "ERR!"; return pp->state_name(state); } static void ip_vs_tcpudp_debug_packet_v4(struct ip_vs_protocol *pp, const struct sk_buff *skb, int offset, const char *msg) { char buf[128]; struct iphdr _iph, *ih; ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph); if (ih == NULL) sprintf(buf, "TRUNCATED"); else if (ih->frag_off & htons(IP_OFFSET)) sprintf(buf, "%pI4->%pI4 frag", &ih->saddr, &ih->daddr); else { __be16 _ports[2], *pptr; pptr = skb_header_pointer(skb, offset + ih->ihl*4, sizeof(_ports), _ports); if (pptr == NULL) sprintf(buf, "TRUNCATED %pI4->%pI4", &ih->saddr, &ih->daddr); else sprintf(buf, "%pI4:%u->%pI4:%u", &ih->saddr, ntohs(pptr[0]), &ih->daddr, ntohs(pptr[1])); } pr_debug("%s: %s %s\n", msg, pp->name, buf); } #ifdef CONFIG_IP_VS_IPV6 static void ip_vs_tcpudp_debug_packet_v6(struct ip_vs_protocol *pp, const struct sk_buff *skb, int offset, const char *msg) { char buf[192]; struct ipv6hdr _iph, *ih; ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph); if (ih == NULL) sprintf(buf, "TRUNCATED"); else if (ih->nexthdr == IPPROTO_FRAGMENT) sprintf(buf, "%pI6->%pI6 frag", &ih->saddr, &ih->daddr); else { __be16 _ports[2], *pptr; pptr = skb_header_pointer(skb, offset + sizeof(struct ipv6hdr), sizeof(_ports), _ports); if (pptr == NULL) sprintf(buf, "TRUNCATED %pI6->%pI6", &ih->saddr, &ih->daddr); else sprintf(buf, "%pI6:%u->%pI6:%u", &ih->saddr, ntohs(pptr[0]), &ih->daddr, ntohs(pptr[1])); } pr_debug("%s: %s %s\n", msg, pp->name, buf); } #endif void ip_vs_tcpudp_debug_packet(int af, struct ip_vs_protocol *pp, const struct sk_buff *skb, int offset, const char *msg) { #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) ip_vs_tcpudp_debug_packet_v6(pp, skb, offset, msg); else #endif ip_vs_tcpudp_debug_packet_v4(pp, skb, offset, msg); } int __init ip_vs_protocol_init(void) { char protocols[64]; #define REGISTER_PROTOCOL(p) \ do { \ register_ip_vs_protocol(p); \ strcat(protocols, ", "); \ strcat(protocols, (p)->name); \ } while (0) protocols[0] = '\0'; protocols[2] = '\0'; #ifdef CONFIG_IP_VS_PROTO_TCP REGISTER_PROTOCOL(&ip_vs_protocol_tcp); #endif #ifdef CONFIG_IP_VS_PROTO_UDP REGISTER_PROTOCOL(&ip_vs_protocol_udp); #endif #ifdef CONFIG_IP_VS_PROTO_SCTP REGISTER_PROTOCOL(&ip_vs_protocol_sctp); #endif #ifdef CONFIG_IP_VS_PROTO_AH REGISTER_PROTOCOL(&ip_vs_protocol_ah); #endif #ifdef CONFIG_IP_VS_PROTO_ESP REGISTER_PROTOCOL(&ip_vs_protocol_esp); #endif pr_info("Registered protocols (%s)\n", &protocols[2]); return 0; } void ip_vs_protocol_cleanup(void) { struct ip_vs_protocol *pp; int i; /* unregister all the ipvs protocols */ for (i = 0; i < IP_VS_PROTO_TAB_SIZE; i++) { while ((pp = ip_vs_proto_table[i]) != NULL) unregister_ip_vs_protocol(pp); } }
gpl-2.0
zhuowei/dolphin
Externals/wxWidgets3/src/common/geometry.cpp
162
8961
///////////////////////////////////////////////////////////////////////////// // Name: src/common/geometry.cpp // Purpose: Common Geometry Classes // Author: Stefan Csomor // Modified by: // Created: 08/05/99 // Copyright: (c) 1999 Stefan Csomor // Licence: wxWindows licence ///////////////////////////////////////////////////////////////////////////// // For compilers that support precompilation, includes "wx.h". #include "wx/wxprec.h" #ifdef __BORLANDC__ #pragma hdrstop #endif #if wxUSE_GEOMETRY #include "wx/geometry.h" #ifndef WX_PRECOMP #include "wx/log.h" #endif #include <string.h> #include "wx/datstrm.h" // // wxPoint2D // // // wxRect2D // // wxDouble version // for the following calculations always remember // that the right and bottom edges are not part of a rect bool wxRect2DDouble::Intersects( const wxRect2DDouble &rect ) const { wxDouble left,right,bottom,top; left = wxMax ( m_x , rect.m_x ); right = wxMin ( m_x+m_width, rect.m_x + rect.m_width ); top = wxMax ( m_y , rect.m_y ); bottom = wxMin ( m_y+m_height, rect.m_y + rect.m_height ); if ( left < right && top < bottom ) { return true; } return false; } void wxRect2DDouble::Intersect( const wxRect2DDouble &src1 , const wxRect2DDouble &src2 , wxRect2DDouble *dest ) { wxDouble left,right,bottom,top; left = wxMax ( src1.m_x , src2.m_x ); right = wxMin ( src1.m_x+src1.m_width, src2.m_x + src2.m_width ); top = wxMax ( src1.m_y , src2.m_y ); bottom = wxMin ( src1.m_y+src1.m_height, src2.m_y + src2.m_height ); if ( left < right && top < bottom ) { dest->m_x = left; dest->m_y = top; dest->m_width = right - left; dest->m_height = bottom - top; } else { dest->m_width = dest->m_height = 0; } } void wxRect2DDouble::Union( const wxRect2DDouble &src1 , const wxRect2DDouble &src2 , wxRect2DDouble *dest ) { wxDouble left,right,bottom,top; left = wxMin ( src1.m_x , src2.m_x ); right = wxMax ( src1.m_x+src1.m_width, src2.m_x + src2.m_width ); top = wxMin ( src1.m_y , src2.m_y ); bottom = wxMax ( src1.m_y+src1.m_height, src2.m_y + src2.m_height ); dest->m_x = left; dest->m_y = top; dest->m_width = right - left; dest->m_height = bottom - top; } void wxRect2DDouble::Union( const wxPoint2DDouble &pt ) { wxDouble x = pt.m_x; wxDouble y = pt.m_y; if ( x < m_x ) { SetLeft( x ); } else if ( x < m_x + m_width ) { // contained } else { SetRight( x ); } if ( y < m_y ) { SetTop( y ); } else if ( y < m_y + m_height ) { // contained } else { SetBottom( y ); } } void wxRect2DDouble::ConstrainTo( const wxRect2DDouble &rect ) { if ( GetLeft() < rect.GetLeft() ) SetLeft( rect.GetLeft() ); if ( GetRight() > rect.GetRight() ) SetRight( rect.GetRight() ); if ( GetBottom() > rect.GetBottom() ) SetBottom( rect.GetBottom() ); if ( GetTop() < rect.GetTop() ) SetTop( rect.GetTop() ); } wxRect2DDouble& wxRect2DDouble::operator=( const wxRect2DDouble &r ) { m_x = r.m_x; m_y = r.m_y; m_width = r.m_width; m_height = r.m_height; return *this; } // integer version // for the following calculations always remember // that the right and bottom edges are not part of a rect // wxPoint2D #if wxUSE_STREAMS void wxPoint2DInt::WriteTo( wxDataOutputStream &stream ) const { stream.Write32( m_x ); stream.Write32( m_y ); } void wxPoint2DInt::ReadFrom( wxDataInputStream &stream ) { m_x = stream.Read32(); m_y = stream.Read32(); } #endif // wxUSE_STREAMS wxDouble wxPoint2DInt::GetVectorAngle() const { if ( m_x == 0 ) { if ( m_y >= 0 ) return 90; else return 270; } if ( m_y == 0 ) { if ( m_x >= 0 ) return 0; else return 180; } // casts needed for MIPSpro compiler under SGI wxDouble deg = atan2( (double)m_y , (double)m_x ) * 180 / M_PI; if ( deg < 0 ) { deg += 360; } return deg; } void wxPoint2DInt::SetVectorAngle( wxDouble degrees ) { wxDouble length = GetVectorLength(); m_x = (int)(length * cos( degrees / 180 * M_PI )); m_y = (int)(length * sin( degrees / 180 * M_PI )); } wxDouble wxPoint2DDouble::GetVectorAngle() const { if ( wxIsNullDouble(m_x) ) { if ( m_y >= 0 ) return 90; else return 270; } if ( wxIsNullDouble(m_y) ) { if ( m_x >= 0 ) return 0; else return 180; } wxDouble deg = atan2( m_y , m_x ) * 180 / M_PI; if ( deg < 0 ) { deg += 360; } return deg; } void wxPoint2DDouble::SetVectorAngle( wxDouble degrees ) { wxDouble length = GetVectorLength(); m_x = length * cos( degrees / 180 * M_PI ); m_y = length * sin( degrees / 180 * M_PI ); } // wxRect2D bool wxRect2DInt::Intersects( const wxRect2DInt &rect ) const { wxInt32 left,right,bottom,top; left = wxMax ( m_x , rect.m_x ); right = wxMin ( m_x+m_width, rect.m_x + rect.m_width ); top = wxMax ( m_y , rect.m_y ); bottom = wxMin ( m_y+m_height, rect.m_y + rect.m_height ); if ( left < right && top < bottom ) { return true; } return false; } void wxRect2DInt::Intersect( const wxRect2DInt &src1 , const wxRect2DInt &src2 , wxRect2DInt *dest ) { wxInt32 left,right,bottom,top; left = wxMax ( src1.m_x , src2.m_x ); right = wxMin ( src1.m_x+src1.m_width, src2.m_x + src2.m_width ); top = wxMax ( src1.m_y , src2.m_y ); bottom = wxMin ( src1.m_y+src1.m_height, src2.m_y + src2.m_height ); if ( left < right && top < bottom ) { dest->m_x = left; dest->m_y = top; dest->m_width = right - left; dest->m_height = bottom - top; } else { dest->m_width = dest->m_height = 0; } } void wxRect2DInt::Union( const wxRect2DInt &src1 , const wxRect2DInt &src2 , wxRect2DInt *dest ) { wxInt32 left,right,bottom,top; left = wxMin ( src1.m_x , src2.m_x ); right = wxMax ( src1.m_x+src1.m_width, src2.m_x + src2.m_width ); top = wxMin ( src1.m_y , src2.m_y ); bottom = wxMax ( src1.m_y+src1.m_height, src2.m_y + src2.m_height ); dest->m_x = left; dest->m_y = top; dest->m_width = right - left; dest->m_height = bottom - top; } void wxRect2DInt::Union( const wxPoint2DInt &pt ) { wxInt32 x = pt.m_x; wxInt32 y = pt.m_y; if ( x < m_x ) { SetLeft( x ); } else if ( x < m_x + m_width ) { // contained } else { SetRight( x ); } if ( y < m_y ) { SetTop( y ); } else if ( y < m_y + m_height ) { // contained } else { SetBottom( y ); } } void wxRect2DInt::ConstrainTo( const wxRect2DInt &rect ) { if ( GetLeft() < rect.GetLeft() ) SetLeft( rect.GetLeft() ); if ( GetRight() > rect.GetRight() ) SetRight( rect.GetRight() ); if ( GetBottom() > rect.GetBottom() ) SetBottom( rect.GetBottom() ); if ( GetTop() < rect.GetTop() ) SetTop( rect.GetTop() ); } wxRect2DInt& wxRect2DInt::operator=( const wxRect2DInt &r ) { m_x = r.m_x; m_y = r.m_y; m_width = r.m_width; m_height = r.m_height; return *this; } #if wxUSE_STREAMS void wxRect2DInt::WriteTo( wxDataOutputStream &stream ) const { stream.Write32( m_x ); stream.Write32( m_y ); stream.Write32( m_width ); stream.Write32( m_height ); } void wxRect2DInt::ReadFrom( wxDataInputStream &stream ) { m_x = stream.Read32(); m_y = stream.Read32(); m_width = stream.Read32(); m_height = stream.Read32(); } #endif // wxUSE_STREAMS // wxTransform2D void wxTransform2D::Transform( wxRect2DInt* r ) const { wxPoint2DInt a = r->GetLeftTop(), b = r->GetRightBottom(); Transform( &a ); Transform( &b ); *r = wxRect2DInt( a, b ); } wxPoint2DInt wxTransform2D::Transform( const wxPoint2DInt &pt ) const { wxPoint2DInt res = pt; Transform( &res ); return res; } wxRect2DInt wxTransform2D::Transform( const wxRect2DInt &r ) const { wxRect2DInt res = r; Transform( &res ); return res; } void wxTransform2D::InverseTransform( wxRect2DInt* r ) const { wxPoint2DInt a = r->GetLeftTop(), b = r->GetRightBottom(); InverseTransform( &a ); InverseTransform( &b ); *r = wxRect2DInt( a , b ); } wxPoint2DInt wxTransform2D::InverseTransform( const wxPoint2DInt &pt ) const { wxPoint2DInt res = pt; InverseTransform( &res ); return res; } wxRect2DInt wxTransform2D::InverseTransform( const wxRect2DInt &r ) const { wxRect2DInt res = r; InverseTransform( &res ); return res; } #endif // wxUSE_GEOMETRY
gpl-2.0
Zenfone2-Dev/kernel_4.3.y
drivers/mfd/wm831x-irq.c
674
15657
/* * wm831x-irq.c -- Interrupt controller support for Wolfson WM831x PMICs * * Copyright 2009 Wolfson Microelectronics PLC. * * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/i2c.h> #include <linux/irq.h> #include <linux/mfd/core.h> #include <linux/interrupt.h> #include <linux/irqdomain.h> #include <linux/mfd/wm831x/core.h> #include <linux/mfd/wm831x/pdata.h> #include <linux/mfd/wm831x/gpio.h> #include <linux/mfd/wm831x/irq.h> #include <linux/delay.h> struct wm831x_irq_data { int primary; int reg; int mask; }; static struct wm831x_irq_data wm831x_irqs[] = { [WM831X_IRQ_TEMP_THW] = { .primary = WM831X_TEMP_INT, .reg = 1, .mask = WM831X_TEMP_THW_EINT, }, [WM831X_IRQ_GPIO_1] = { .primary = WM831X_GP_INT, .reg = 5, .mask = WM831X_GP1_EINT, }, [WM831X_IRQ_GPIO_2] = { .primary = WM831X_GP_INT, .reg = 5, .mask = WM831X_GP2_EINT, }, [WM831X_IRQ_GPIO_3] = { .primary = WM831X_GP_INT, .reg = 5, .mask = WM831X_GP3_EINT, }, [WM831X_IRQ_GPIO_4] = { .primary = WM831X_GP_INT, .reg = 5, .mask = WM831X_GP4_EINT, }, [WM831X_IRQ_GPIO_5] = { .primary = WM831X_GP_INT, .reg = 5, .mask = WM831X_GP5_EINT, }, [WM831X_IRQ_GPIO_6] = { .primary = WM831X_GP_INT, .reg = 5, .mask = WM831X_GP6_EINT, }, [WM831X_IRQ_GPIO_7] = { .primary = WM831X_GP_INT, .reg = 5, .mask = WM831X_GP7_EINT, }, [WM831X_IRQ_GPIO_8] = { .primary = WM831X_GP_INT, .reg = 5, .mask = WM831X_GP8_EINT, }, [WM831X_IRQ_GPIO_9] = { .primary = WM831X_GP_INT, .reg = 5, .mask = WM831X_GP9_EINT, }, [WM831X_IRQ_GPIO_10] = { .primary = WM831X_GP_INT, .reg = 5, .mask = WM831X_GP10_EINT, }, [WM831X_IRQ_GPIO_11] = { .primary = WM831X_GP_INT, .reg = 5, .mask = WM831X_GP11_EINT, }, [WM831X_IRQ_GPIO_12] = { .primary = WM831X_GP_INT, .reg = 5, .mask = WM831X_GP12_EINT, }, [WM831X_IRQ_GPIO_13] = { .primary = WM831X_GP_INT, .reg = 5, .mask = WM831X_GP13_EINT, }, [WM831X_IRQ_GPIO_14] = { .primary = WM831X_GP_INT, .reg = 5, .mask = WM831X_GP14_EINT, }, [WM831X_IRQ_GPIO_15] = { .primary = WM831X_GP_INT, .reg = 5, .mask = WM831X_GP15_EINT, }, [WM831X_IRQ_GPIO_16] = { .primary = WM831X_GP_INT, .reg = 5, .mask = WM831X_GP16_EINT, }, [WM831X_IRQ_ON] = { .primary = WM831X_ON_PIN_INT, .reg = 1, .mask = WM831X_ON_PIN_EINT, }, [WM831X_IRQ_PPM_SYSLO] = { .primary = WM831X_PPM_INT, .reg = 1, .mask = WM831X_PPM_SYSLO_EINT, }, [WM831X_IRQ_PPM_PWR_SRC] = { .primary = WM831X_PPM_INT, .reg = 1, .mask = WM831X_PPM_PWR_SRC_EINT, }, [WM831X_IRQ_PPM_USB_CURR] = { .primary = WM831X_PPM_INT, .reg = 1, .mask = WM831X_PPM_USB_CURR_EINT, }, [WM831X_IRQ_WDOG_TO] = { .primary = WM831X_WDOG_INT, .reg = 1, .mask = WM831X_WDOG_TO_EINT, }, [WM831X_IRQ_RTC_PER] = { .primary = WM831X_RTC_INT, .reg = 1, .mask = WM831X_RTC_PER_EINT, }, [WM831X_IRQ_RTC_ALM] = { .primary = WM831X_RTC_INT, .reg = 1, .mask = WM831X_RTC_ALM_EINT, }, [WM831X_IRQ_CHG_BATT_HOT] = { .primary = WM831X_CHG_INT, .reg = 2, .mask = WM831X_CHG_BATT_HOT_EINT, }, [WM831X_IRQ_CHG_BATT_COLD] = { .primary = WM831X_CHG_INT, .reg = 2, .mask = WM831X_CHG_BATT_COLD_EINT, }, [WM831X_IRQ_CHG_BATT_FAIL] = { .primary = WM831X_CHG_INT, .reg = 2, .mask = WM831X_CHG_BATT_FAIL_EINT, }, [WM831X_IRQ_CHG_OV] = { .primary = WM831X_CHG_INT, .reg = 2, .mask = WM831X_CHG_OV_EINT, }, [WM831X_IRQ_CHG_END] = { .primary = WM831X_CHG_INT, .reg = 2, .mask = WM831X_CHG_END_EINT, }, [WM831X_IRQ_CHG_TO] = { .primary = WM831X_CHG_INT, .reg = 2, .mask = WM831X_CHG_TO_EINT, }, [WM831X_IRQ_CHG_MODE] = { .primary = WM831X_CHG_INT, .reg = 2, .mask = WM831X_CHG_MODE_EINT, }, [WM831X_IRQ_CHG_START] = { .primary = WM831X_CHG_INT, .reg = 2, .mask = WM831X_CHG_START_EINT, }, [WM831X_IRQ_TCHDATA] = { .primary = WM831X_TCHDATA_INT, .reg = 1, .mask = WM831X_TCHDATA_EINT, }, [WM831X_IRQ_TCHPD] = { .primary = WM831X_TCHPD_INT, .reg = 1, .mask = WM831X_TCHPD_EINT, }, [WM831X_IRQ_AUXADC_DATA] = { .primary = WM831X_AUXADC_INT, .reg = 1, .mask = WM831X_AUXADC_DATA_EINT, }, [WM831X_IRQ_AUXADC_DCOMP1] = { .primary = WM831X_AUXADC_INT, .reg = 1, .mask = WM831X_AUXADC_DCOMP1_EINT, }, [WM831X_IRQ_AUXADC_DCOMP2] = { .primary = WM831X_AUXADC_INT, .reg = 1, .mask = WM831X_AUXADC_DCOMP2_EINT, }, [WM831X_IRQ_AUXADC_DCOMP3] = { .primary = WM831X_AUXADC_INT, .reg = 1, .mask = WM831X_AUXADC_DCOMP3_EINT, }, [WM831X_IRQ_AUXADC_DCOMP4] = { .primary = WM831X_AUXADC_INT, .reg = 1, .mask = WM831X_AUXADC_DCOMP4_EINT, }, [WM831X_IRQ_CS1] = { .primary = WM831X_CS_INT, .reg = 2, .mask = WM831X_CS1_EINT, }, [WM831X_IRQ_CS2] = { .primary = WM831X_CS_INT, .reg = 2, .mask = WM831X_CS2_EINT, }, [WM831X_IRQ_HC_DC1] = { .primary = WM831X_HC_INT, .reg = 4, .mask = WM831X_HC_DC1_EINT, }, [WM831X_IRQ_HC_DC2] = { .primary = WM831X_HC_INT, .reg = 4, .mask = WM831X_HC_DC2_EINT, }, [WM831X_IRQ_UV_LDO1] = { .primary = WM831X_UV_INT, .reg = 3, .mask = WM831X_UV_LDO1_EINT, }, [WM831X_IRQ_UV_LDO2] = { .primary = WM831X_UV_INT, .reg = 3, .mask = WM831X_UV_LDO2_EINT, }, [WM831X_IRQ_UV_LDO3] = { .primary = WM831X_UV_INT, .reg = 3, .mask = WM831X_UV_LDO3_EINT, }, [WM831X_IRQ_UV_LDO4] = { .primary = WM831X_UV_INT, .reg = 3, .mask = WM831X_UV_LDO4_EINT, }, [WM831X_IRQ_UV_LDO5] = { .primary = WM831X_UV_INT, .reg = 3, .mask = WM831X_UV_LDO5_EINT, }, [WM831X_IRQ_UV_LDO6] = { .primary = WM831X_UV_INT, .reg = 3, .mask = WM831X_UV_LDO6_EINT, }, [WM831X_IRQ_UV_LDO7] = { .primary = WM831X_UV_INT, .reg = 3, .mask = WM831X_UV_LDO7_EINT, }, [WM831X_IRQ_UV_LDO8] = { .primary = WM831X_UV_INT, .reg = 3, .mask = WM831X_UV_LDO8_EINT, }, [WM831X_IRQ_UV_LDO9] = { .primary = WM831X_UV_INT, .reg = 3, .mask = WM831X_UV_LDO9_EINT, }, [WM831X_IRQ_UV_LDO10] = { .primary = WM831X_UV_INT, .reg = 3, .mask = WM831X_UV_LDO10_EINT, }, [WM831X_IRQ_UV_DC1] = { .primary = WM831X_UV_INT, .reg = 4, .mask = WM831X_UV_DC1_EINT, }, [WM831X_IRQ_UV_DC2] = { .primary = WM831X_UV_INT, .reg = 4, .mask = WM831X_UV_DC2_EINT, }, [WM831X_IRQ_UV_DC3] = { .primary = WM831X_UV_INT, .reg = 4, .mask = WM831X_UV_DC3_EINT, }, [WM831X_IRQ_UV_DC4] = { .primary = WM831X_UV_INT, .reg = 4, .mask = WM831X_UV_DC4_EINT, }, }; static inline int irq_data_to_status_reg(struct wm831x_irq_data *irq_data) { return WM831X_INTERRUPT_STATUS_1 - 1 + irq_data->reg; } static inline struct wm831x_irq_data *irq_to_wm831x_irq(struct wm831x *wm831x, int irq) { return &wm831x_irqs[irq]; } static void wm831x_irq_lock(struct irq_data *data) { struct wm831x *wm831x = irq_data_get_irq_chip_data(data); mutex_lock(&wm831x->irq_lock); } static void wm831x_irq_sync_unlock(struct irq_data *data) { struct wm831x *wm831x = irq_data_get_irq_chip_data(data); int i; for (i = 0; i < ARRAY_SIZE(wm831x->gpio_update); i++) { if (wm831x->gpio_update[i]) { wm831x_set_bits(wm831x, WM831X_GPIO1_CONTROL + i, WM831X_GPN_INT_MODE | WM831X_GPN_POL, wm831x->gpio_update[i]); wm831x->gpio_update[i] = 0; } } for (i = 0; i < ARRAY_SIZE(wm831x->irq_masks_cur); i++) { /* If there's been a change in the mask write it back * to the hardware. */ if (wm831x->irq_masks_cur[i] != wm831x->irq_masks_cache[i]) { dev_dbg(wm831x->dev, "IRQ mask sync: %x = %x\n", WM831X_INTERRUPT_STATUS_1_MASK + i, wm831x->irq_masks_cur[i]); wm831x->irq_masks_cache[i] = wm831x->irq_masks_cur[i]; wm831x_reg_write(wm831x, WM831X_INTERRUPT_STATUS_1_MASK + i, wm831x->irq_masks_cur[i]); } } mutex_unlock(&wm831x->irq_lock); } static void wm831x_irq_enable(struct irq_data *data) { struct wm831x *wm831x = irq_data_get_irq_chip_data(data); struct wm831x_irq_data *irq_data = irq_to_wm831x_irq(wm831x, data->hwirq); wm831x->irq_masks_cur[irq_data->reg - 1] &= ~irq_data->mask; } static void wm831x_irq_disable(struct irq_data *data) { struct wm831x *wm831x = irq_data_get_irq_chip_data(data); struct wm831x_irq_data *irq_data = irq_to_wm831x_irq(wm831x, data->hwirq); wm831x->irq_masks_cur[irq_data->reg - 1] |= irq_data->mask; } static int wm831x_irq_set_type(struct irq_data *data, unsigned int type) { struct wm831x *wm831x = irq_data_get_irq_chip_data(data); int irq; irq = data->hwirq; if (irq < WM831X_IRQ_GPIO_1 || irq > WM831X_IRQ_GPIO_11) { /* Ignore internal-only IRQs */ if (irq >= 0 && irq < WM831X_NUM_IRQS) return 0; else return -EINVAL; } /* Rebase the IRQ into the GPIO range so we've got a sensible array * index. */ irq -= WM831X_IRQ_GPIO_1; /* We set the high bit to flag that we need an update; don't * do the update here as we can be called with the bus lock * held. */ wm831x->gpio_level_low[irq] = false; wm831x->gpio_level_high[irq] = false; switch (type) { case IRQ_TYPE_EDGE_BOTH: wm831x->gpio_update[irq] = 0x10000 | WM831X_GPN_INT_MODE; break; case IRQ_TYPE_EDGE_RISING: wm831x->gpio_update[irq] = 0x10000 | WM831X_GPN_POL; break; case IRQ_TYPE_EDGE_FALLING: wm831x->gpio_update[irq] = 0x10000; break; case IRQ_TYPE_LEVEL_HIGH: wm831x->gpio_update[irq] = 0x10000 | WM831X_GPN_POL; wm831x->gpio_level_high[irq] = true; break; case IRQ_TYPE_LEVEL_LOW: wm831x->gpio_update[irq] = 0x10000; wm831x->gpio_level_low[irq] = true; break; default: return -EINVAL; } return 0; } static struct irq_chip wm831x_irq_chip = { .name = "wm831x", .irq_bus_lock = wm831x_irq_lock, .irq_bus_sync_unlock = wm831x_irq_sync_unlock, .irq_disable = wm831x_irq_disable, .irq_enable = wm831x_irq_enable, .irq_set_type = wm831x_irq_set_type, }; /* The processing of the primary interrupt occurs in a thread so that * we can interact with the device over I2C or SPI. */ static irqreturn_t wm831x_irq_thread(int irq, void *data) { struct wm831x *wm831x = data; unsigned int i; int primary, status_addr, ret; int status_regs[WM831X_NUM_IRQ_REGS] = { 0 }; int read[WM831X_NUM_IRQ_REGS] = { 0 }; int *status; primary = wm831x_reg_read(wm831x, WM831X_SYSTEM_INTERRUPTS); if (primary < 0) { dev_err(wm831x->dev, "Failed to read system interrupt: %d\n", primary); goto out; } /* The touch interrupts are visible in the primary register as * an optimisation; open code this to avoid complicating the * main handling loop and so we can also skip iterating the * descriptors. */ if (primary & WM831X_TCHPD_INT) handle_nested_irq(irq_find_mapping(wm831x->irq_domain, WM831X_IRQ_TCHPD)); if (primary & WM831X_TCHDATA_INT) handle_nested_irq(irq_find_mapping(wm831x->irq_domain, WM831X_IRQ_TCHDATA)); primary &= ~(WM831X_TCHDATA_EINT | WM831X_TCHPD_EINT); for (i = 0; i < ARRAY_SIZE(wm831x_irqs); i++) { int offset = wm831x_irqs[i].reg - 1; if (!(primary & wm831x_irqs[i].primary)) continue; status = &status_regs[offset]; /* Hopefully there should only be one register to read * each time otherwise we ought to do a block read. */ if (!read[offset]) { status_addr = irq_data_to_status_reg(&wm831x_irqs[i]); *status = wm831x_reg_read(wm831x, status_addr); if (*status < 0) { dev_err(wm831x->dev, "Failed to read IRQ status: %d\n", *status); goto out; } read[offset] = 1; /* Ignore any bits that we don't think are masked */ *status &= ~wm831x->irq_masks_cur[offset]; /* Acknowledge now so we don't miss * notifications while we handle. */ wm831x_reg_write(wm831x, status_addr, *status); } if (*status & wm831x_irqs[i].mask) handle_nested_irq(irq_find_mapping(wm831x->irq_domain, i)); /* Simulate an edge triggered IRQ by polling the input * status. This is sucky but improves interoperability. */ if (primary == WM831X_GP_INT && wm831x->gpio_level_high[i - WM831X_IRQ_GPIO_1]) { ret = wm831x_reg_read(wm831x, WM831X_GPIO_LEVEL); while (ret & 1 << (i - WM831X_IRQ_GPIO_1)) { handle_nested_irq(irq_find_mapping(wm831x->irq_domain, i)); ret = wm831x_reg_read(wm831x, WM831X_GPIO_LEVEL); } } if (primary == WM831X_GP_INT && wm831x->gpio_level_low[i - WM831X_IRQ_GPIO_1]) { ret = wm831x_reg_read(wm831x, WM831X_GPIO_LEVEL); while (!(ret & 1 << (i - WM831X_IRQ_GPIO_1))) { handle_nested_irq(irq_find_mapping(wm831x->irq_domain, i)); ret = wm831x_reg_read(wm831x, WM831X_GPIO_LEVEL); } } } out: return IRQ_HANDLED; } static int wm831x_irq_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { irq_set_chip_data(virq, h->host_data); irq_set_chip_and_handler(virq, &wm831x_irq_chip, handle_edge_irq); irq_set_nested_thread(virq, 1); irq_set_noprobe(virq); return 0; } static const struct irq_domain_ops wm831x_irq_domain_ops = { .map = wm831x_irq_map, .xlate = irq_domain_xlate_twocell, }; int wm831x_irq_init(struct wm831x *wm831x, int irq) { struct wm831x_pdata *pdata = dev_get_platdata(wm831x->dev); struct irq_domain *domain; int i, ret, irq_base; mutex_init(&wm831x->irq_lock); /* Mask the individual interrupt sources */ for (i = 0; i < ARRAY_SIZE(wm831x->irq_masks_cur); i++) { wm831x->irq_masks_cur[i] = 0xffff; wm831x->irq_masks_cache[i] = 0xffff; wm831x_reg_write(wm831x, WM831X_INTERRUPT_STATUS_1_MASK + i, 0xffff); } /* Try to dynamically allocate IRQs if no base is specified */ if (pdata && pdata->irq_base) { irq_base = irq_alloc_descs(pdata->irq_base, 0, WM831X_NUM_IRQS, 0); if (irq_base < 0) { dev_warn(wm831x->dev, "Failed to allocate IRQs: %d\n", irq_base); irq_base = 0; } } else { irq_base = 0; } if (irq_base) domain = irq_domain_add_legacy(wm831x->dev->of_node, ARRAY_SIZE(wm831x_irqs), irq_base, 0, &wm831x_irq_domain_ops, wm831x); else domain = irq_domain_add_linear(wm831x->dev->of_node, ARRAY_SIZE(wm831x_irqs), &wm831x_irq_domain_ops, wm831x); if (!domain) { dev_warn(wm831x->dev, "Failed to allocate IRQ domain\n"); return -EINVAL; } if (pdata && pdata->irq_cmos) i = 0; else i = WM831X_IRQ_OD; wm831x_set_bits(wm831x, WM831X_IRQ_CONFIG, WM831X_IRQ_OD, i); wm831x->irq = irq; wm831x->irq_domain = domain; if (irq) { /* Try to flag /IRQ as a wake source; there are a number of * unconditional wake sources in the PMIC so this isn't * conditional but we don't actually care *too* much if it * fails. */ ret = enable_irq_wake(irq); if (ret != 0) { dev_warn(wm831x->dev, "Can't enable IRQ as wake source: %d\n", ret); } ret = request_threaded_irq(irq, NULL, wm831x_irq_thread, IRQF_TRIGGER_LOW | IRQF_ONESHOT, "wm831x", wm831x); if (ret != 0) { dev_err(wm831x->dev, "Failed to request IRQ %d: %d\n", irq, ret); return ret; } } else { dev_warn(wm831x->dev, "No interrupt specified - functionality limited\n"); } /* Enable top level interrupts, we mask at secondary level */ wm831x_reg_write(wm831x, WM831X_SYSTEM_INTERRUPTS_MASK, 0); return 0; } void wm831x_irq_exit(struct wm831x *wm831x) { if (wm831x->irq) free_irq(wm831x->irq, wm831x); }
gpl-2.0
necsst-nms/PMAL_TRACE
kernel/sched/cpupri.c
930
6807
/* * kernel/sched/cpupri.c * * CPU priority management * * Copyright (C) 2007-2008 Novell * * Author: Gregory Haskins <ghaskins@novell.com> * * This code tracks the priority of each CPU so that global migration * decisions are easy to calculate. Each CPU can be in a state as follows: * * (INVALID), IDLE, NORMAL, RT1, ... RT99 * * going from the lowest priority to the highest. CPUs in the INVALID state * are not eligible for routing. The system maintains this state with * a 2 dimensional bitmap (the first for priority class, the second for cpus * in that class). Therefore a typical application without affinity * restrictions can find a suitable CPU with O(1) complexity (e.g. two bit * searches). For tasks with affinity restrictions, the algorithm has a * worst case complexity of O(min(102, nr_domcpus)), though the scenario that * yields the worst case search is fairly contrived. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; version 2 * of the License. */ #include <linux/gfp.h> #include <linux/sched.h> #include <linux/sched/rt.h> #include "cpupri.h" /* Convert between a 140 based task->prio, and our 102 based cpupri */ static int convert_prio(int prio) { int cpupri; if (prio == CPUPRI_INVALID) cpupri = CPUPRI_INVALID; else if (prio == MAX_PRIO) cpupri = CPUPRI_IDLE; else if (prio >= MAX_RT_PRIO) cpupri = CPUPRI_NORMAL; else cpupri = MAX_RT_PRIO - prio + 1; return cpupri; } /** * cpupri_find - find the best (lowest-pri) CPU in the system * @cp: The cpupri context * @p: The task * @lowest_mask: A mask to fill in with selected CPUs (or NULL) * * Note: This function returns the recommended CPUs as calculated during the * current invocation. By the time the call returns, the CPUs may have in * fact changed priorities any number of times. While not ideal, it is not * an issue of correctness since the normal rebalancer logic will correct * any discrepancies created by racing against the uncertainty of the current * priority configuration. * * Returns: (int)bool - CPUs were found */ int cpupri_find(struct cpupri *cp, struct task_struct *p, struct cpumask *lowest_mask) { int idx = 0; int task_pri = convert_prio(p->prio); if (task_pri >= MAX_RT_PRIO) return 0; for (idx = 0; idx < task_pri; idx++) { struct cpupri_vec *vec = &cp->pri_to_cpu[idx]; int skip = 0; if (!atomic_read(&(vec)->count)) skip = 1; /* * When looking at the vector, we need to read the counter, * do a memory barrier, then read the mask. * * Note: This is still all racey, but we can deal with it. * Ideally, we only want to look at masks that are set. * * If a mask is not set, then the only thing wrong is that we * did a little more work than necessary. * * If we read a zero count but the mask is set, because of the * memory barriers, that can only happen when the highest prio * task for a run queue has left the run queue, in which case, * it will be followed by a pull. If the task we are processing * fails to find a proper place to go, that pull request will * pull this task if the run queue is running at a lower * priority. */ smp_rmb(); /* Need to do the rmb for every iteration */ if (skip) continue; if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) continue; if (lowest_mask) { cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask); /* * We have to ensure that we have at least one bit * still set in the array, since the map could have * been concurrently emptied between the first and * second reads of vec->mask. If we hit this * condition, simply act as though we never hit this * priority level and continue on. */ if (cpumask_any(lowest_mask) >= nr_cpu_ids) continue; } return 1; } return 0; } /** * cpupri_set - update the cpu priority setting * @cp: The cpupri context * @cpu: The target cpu * @newpri: The priority (INVALID-RT99) to assign to this CPU * * Note: Assumes cpu_rq(cpu)->lock is locked * * Returns: (void) */ void cpupri_set(struct cpupri *cp, int cpu, int newpri) { int *currpri = &cp->cpu_to_pri[cpu]; int oldpri = *currpri; int do_mb = 0; newpri = convert_prio(newpri); BUG_ON(newpri >= CPUPRI_NR_PRIORITIES); if (newpri == oldpri) return; /* * If the cpu was currently mapped to a different value, we * need to map it to the new value then remove the old value. * Note, we must add the new value first, otherwise we risk the * cpu being missed by the priority loop in cpupri_find. */ if (likely(newpri != CPUPRI_INVALID)) { struct cpupri_vec *vec = &cp->pri_to_cpu[newpri]; cpumask_set_cpu(cpu, vec->mask); /* * When adding a new vector, we update the mask first, * do a write memory barrier, and then update the count, to * make sure the vector is visible when count is set. */ smp_mb__before_atomic_inc(); atomic_inc(&(vec)->count); do_mb = 1; } if (likely(oldpri != CPUPRI_INVALID)) { struct cpupri_vec *vec = &cp->pri_to_cpu[oldpri]; /* * Because the order of modification of the vec->count * is important, we must make sure that the update * of the new prio is seen before we decrement the * old prio. This makes sure that the loop sees * one or the other when we raise the priority of * the run queue. We don't care about when we lower the * priority, as that will trigger an rt pull anyway. * * We only need to do a memory barrier if we updated * the new priority vec. */ if (do_mb) smp_mb__after_atomic_inc(); /* * When removing from the vector, we decrement the counter first * do a memory barrier and then clear the mask. */ atomic_dec(&(vec)->count); smp_mb__after_atomic_inc(); cpumask_clear_cpu(cpu, vec->mask); } *currpri = newpri; } /** * cpupri_init - initialize the cpupri structure * @cp: The cpupri context * * Returns: -ENOMEM if memory fails. */ int cpupri_init(struct cpupri *cp) { int i; memset(cp, 0, sizeof(*cp)); for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) { struct cpupri_vec *vec = &cp->pri_to_cpu[i]; atomic_set(&vec->count, 0); if (!zalloc_cpumask_var(&vec->mask, GFP_KERNEL)) goto cleanup; } for_each_possible_cpu(i) cp->cpu_to_pri[i] = CPUPRI_INVALID; return 0; cleanup: for (i--; i >= 0; i--) free_cpumask_var(cp->pri_to_cpu[i].mask); return -ENOMEM; } /** * cpupri_cleanup - clean up the cpupri structure * @cp: The cpupri context */ void cpupri_cleanup(struct cpupri *cp) { int i; for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) free_cpumask_var(cp->pri_to_cpu[i].mask); }
gpl-2.0
blazingwolf/Acer-A500-Stock-Kernel-with-touchscreen-fix-for-ICS
net/sched/sch_netem.c
930
15675
/* * net/sched/sch_netem.c Network emulator * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License. * * Many of the algorithms and ideas for this came from * NIST Net which is not copyrighted. * * Authors: Stephen Hemminger <shemminger@osdl.org> * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro> */ #include <linux/module.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/skbuff.h> #include <linux/rtnetlink.h> #include <net/netlink.h> #include <net/pkt_sched.h> #define VERSION "1.2" /* Network Emulation Queuing algorithm. ==================================== Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based Network Emulation Tool [2] Luigi Rizzo, DummyNet for FreeBSD ---------------------------------------------------------------- This started out as a simple way to delay outgoing packets to test TCP but has grown to include most of the functionality of a full blown network emulator like NISTnet. It can delay packets and add random jitter (and correlation). The random distribution can be loaded from a table as well to provide normal, Pareto, or experimental curves. Packet loss, duplication, and reordering can also be emulated. This qdisc does not do classification that can be handled in layering other disciplines. It does not need to do bandwidth control either since that can be handled by using token bucket or other rate control. */ struct netem_sched_data { struct Qdisc *qdisc; struct qdisc_watchdog watchdog; psched_tdiff_t latency; psched_tdiff_t jitter; u32 loss; u32 limit; u32 counter; u32 gap; u32 duplicate; u32 reorder; u32 corrupt; struct crndstate { u32 last; u32 rho; } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor; struct disttable { u32 size; s16 table[0]; } *delay_dist; }; /* Time stamp put into socket buffer control block */ struct netem_skb_cb { psched_time_t time_to_send; }; static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb) { BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct qdisc_skb_cb) + sizeof(struct netem_skb_cb)); return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data; } /* init_crandom - initialize correlated random number generator * Use entropy source for initial seed. */ static void init_crandom(struct crndstate *state, unsigned long rho) { state->rho = rho; state->last = net_random(); } /* get_crandom - correlated random number generator * Next number depends on last value. * rho is scaled to avoid floating point. */ static u32 get_crandom(struct crndstate *state) { u64 value, rho; unsigned long answer; if (state->rho == 0) /* no correlation */ return net_random(); value = net_random(); rho = (u64)state->rho + 1; answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32; state->last = answer; return answer; } /* tabledist - return a pseudo-randomly distributed value with mean mu and * std deviation sigma. Uses table lookup to approximate the desired * distribution, and a uniformly-distributed pseudo-random source. */ static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma, struct crndstate *state, const struct disttable *dist) { psched_tdiff_t x; long t; u32 rnd; if (sigma == 0) return mu; rnd = get_crandom(state); /* default uniform distribution */ if (dist == NULL) return (rnd % (2*sigma)) - sigma + mu; t = dist->table[rnd % dist->size]; x = (sigma % NETEM_DIST_SCALE) * t; if (x >= 0) x += NETEM_DIST_SCALE/2; else x -= NETEM_DIST_SCALE/2; return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu; } /* * Insert one skb into qdisc. * Note: parent depends on return value to account for queue length. * NET_XMIT_DROP: queue length didn't change. * NET_XMIT_SUCCESS: one skb was queued. */ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) { struct netem_sched_data *q = qdisc_priv(sch); /* We don't fill cb now as skb_unshare() may invalidate it */ struct netem_skb_cb *cb; struct sk_buff *skb2; int ret; int count = 1; pr_debug("netem_enqueue skb=%p\n", skb); /* Random duplication */ if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor)) ++count; /* Random packet drop 0 => none, ~0 => all */ if (q->loss && q->loss >= get_crandom(&q->loss_cor)) --count; if (count == 0) { sch->qstats.drops++; kfree_skb(skb); return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; } skb_orphan(skb); /* * If we need to duplicate packet, then re-insert at top of the * qdisc tree, since parent queuer expects that only one * skb will be queued. */ if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) { struct Qdisc *rootq = qdisc_root(sch); u32 dupsave = q->duplicate; /* prevent duplicating a dup... */ q->duplicate = 0; qdisc_enqueue_root(skb2, rootq); q->duplicate = dupsave; } /* * Randomized packet corruption. * Make copy if needed since we are modifying * If packet is going to be hardware checksummed, then * do it now in software before we mangle it. */ if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) { if (!(skb = skb_unshare(skb, GFP_ATOMIC)) || (skb->ip_summed == CHECKSUM_PARTIAL && skb_checksum_help(skb))) { sch->qstats.drops++; return NET_XMIT_DROP; } skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8); } cb = netem_skb_cb(skb); if (q->gap == 0 || /* not doing reordering */ q->counter < q->gap || /* inside last reordering gap */ q->reorder < get_crandom(&q->reorder_cor)) { psched_time_t now; psched_tdiff_t delay; delay = tabledist(q->latency, q->jitter, &q->delay_cor, q->delay_dist); now = psched_get_time(); cb->time_to_send = now + delay; ++q->counter; ret = qdisc_enqueue(skb, q->qdisc); } else { /* * Do re-ordering by putting one out of N packets at the front * of the queue. */ cb->time_to_send = psched_get_time(); q->counter = 0; __skb_queue_head(&q->qdisc->q, skb); q->qdisc->qstats.backlog += qdisc_pkt_len(skb); q->qdisc->qstats.requeues++; ret = NET_XMIT_SUCCESS; } if (likely(ret == NET_XMIT_SUCCESS)) { sch->q.qlen++; sch->bstats.bytes += qdisc_pkt_len(skb); sch->bstats.packets++; } else if (net_xmit_drop_count(ret)) { sch->qstats.drops++; } pr_debug("netem: enqueue ret %d\n", ret); return ret; } static unsigned int netem_drop(struct Qdisc* sch) { struct netem_sched_data *q = qdisc_priv(sch); unsigned int len = 0; if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) { sch->q.qlen--; sch->qstats.drops++; } return len; } static struct sk_buff *netem_dequeue(struct Qdisc *sch) { struct netem_sched_data *q = qdisc_priv(sch); struct sk_buff *skb; if (sch->flags & TCQ_F_THROTTLED) return NULL; skb = q->qdisc->ops->peek(q->qdisc); if (skb) { const struct netem_skb_cb *cb = netem_skb_cb(skb); psched_time_t now = psched_get_time(); /* if more time remaining? */ if (cb->time_to_send <= now) { skb = qdisc_dequeue_peeked(q->qdisc); if (unlikely(!skb)) return NULL; #ifdef CONFIG_NET_CLS_ACT /* * If it's at ingress let's pretend the delay is * from the network (tstamp will be updated). */ if (G_TC_FROM(skb->tc_verd) & AT_INGRESS) skb->tstamp.tv64 = 0; #endif pr_debug("netem_dequeue: return skb=%p\n", skb); sch->q.qlen--; return skb; } qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send); } return NULL; } static void netem_reset(struct Qdisc *sch) { struct netem_sched_data *q = qdisc_priv(sch); qdisc_reset(q->qdisc); sch->q.qlen = 0; qdisc_watchdog_cancel(&q->watchdog); } /* * Distribution data is a variable size payload containing * signed 16 bit values. */ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr) { struct netem_sched_data *q = qdisc_priv(sch); unsigned long n = nla_len(attr)/sizeof(__s16); const __s16 *data = nla_data(attr); spinlock_t *root_lock; struct disttable *d; int i; if (n > 65536) return -EINVAL; d = kmalloc(sizeof(*d) + n*sizeof(d->table[0]), GFP_KERNEL); if (!d) return -ENOMEM; d->size = n; for (i = 0; i < n; i++) d->table[i] = data[i]; root_lock = qdisc_root_sleeping_lock(sch); spin_lock_bh(root_lock); kfree(q->delay_dist); q->delay_dist = d; spin_unlock_bh(root_lock); return 0; } static void get_correlation(struct Qdisc *sch, const struct nlattr *attr) { struct netem_sched_data *q = qdisc_priv(sch); const struct tc_netem_corr *c = nla_data(attr); init_crandom(&q->delay_cor, c->delay_corr); init_crandom(&q->loss_cor, c->loss_corr); init_crandom(&q->dup_cor, c->dup_corr); } static void get_reorder(struct Qdisc *sch, const struct nlattr *attr) { struct netem_sched_data *q = qdisc_priv(sch); const struct tc_netem_reorder *r = nla_data(attr); q->reorder = r->probability; init_crandom(&q->reorder_cor, r->correlation); } static void get_corrupt(struct Qdisc *sch, const struct nlattr *attr) { struct netem_sched_data *q = qdisc_priv(sch); const struct tc_netem_corrupt *r = nla_data(attr); q->corrupt = r->probability; init_crandom(&q->corrupt_cor, r->correlation); } static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = { [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) }, [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) }, [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) }, }; static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla, const struct nla_policy *policy, int len) { int nested_len = nla_len(nla) - NLA_ALIGN(len); if (nested_len < 0) return -EINVAL; if (nested_len >= nla_attr_size(0)) return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len), nested_len, policy); memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1)); return 0; } /* Parse netlink message to set options */ static int netem_change(struct Qdisc *sch, struct nlattr *opt) { struct netem_sched_data *q = qdisc_priv(sch); struct nlattr *tb[TCA_NETEM_MAX + 1]; struct tc_netem_qopt *qopt; int ret; if (opt == NULL) return -EINVAL; qopt = nla_data(opt); ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt)); if (ret < 0) return ret; ret = fifo_set_limit(q->qdisc, qopt->limit); if (ret) { pr_debug("netem: can't set fifo limit\n"); return ret; } q->latency = qopt->latency; q->jitter = qopt->jitter; q->limit = qopt->limit; q->gap = qopt->gap; q->counter = 0; q->loss = qopt->loss; q->duplicate = qopt->duplicate; /* for compatibility with earlier versions. * if gap is set, need to assume 100% probability */ if (q->gap) q->reorder = ~0; if (tb[TCA_NETEM_CORR]) get_correlation(sch, tb[TCA_NETEM_CORR]); if (tb[TCA_NETEM_DELAY_DIST]) { ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]); if (ret) return ret; } if (tb[TCA_NETEM_REORDER]) get_reorder(sch, tb[TCA_NETEM_REORDER]); if (tb[TCA_NETEM_CORRUPT]) get_corrupt(sch, tb[TCA_NETEM_CORRUPT]); return 0; } /* * Special case version of FIFO queue for use by netem. * It queues in order based on timestamps in skb's */ struct fifo_sched_data { u32 limit; psched_time_t oldest; }; static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) { struct fifo_sched_data *q = qdisc_priv(sch); struct sk_buff_head *list = &sch->q; psched_time_t tnext = netem_skb_cb(nskb)->time_to_send; struct sk_buff *skb; if (likely(skb_queue_len(list) < q->limit)) { /* Optimize for add at tail */ if (likely(skb_queue_empty(list) || tnext >= q->oldest)) { q->oldest = tnext; return qdisc_enqueue_tail(nskb, sch); } skb_queue_reverse_walk(list, skb) { const struct netem_skb_cb *cb = netem_skb_cb(skb); if (tnext >= cb->time_to_send) break; } __skb_queue_after(list, skb, nskb); sch->qstats.backlog += qdisc_pkt_len(nskb); sch->bstats.bytes += qdisc_pkt_len(nskb); sch->bstats.packets++; return NET_XMIT_SUCCESS; } return qdisc_reshape_fail(nskb, sch); } static int tfifo_init(struct Qdisc *sch, struct nlattr *opt) { struct fifo_sched_data *q = qdisc_priv(sch); if (opt) { struct tc_fifo_qopt *ctl = nla_data(opt); if (nla_len(opt) < sizeof(*ctl)) return -EINVAL; q->limit = ctl->limit; } else q->limit = max_t(u32, qdisc_dev(sch)->tx_queue_len, 1); q->oldest = PSCHED_PASTPERFECT; return 0; } static int tfifo_dump(struct Qdisc *sch, struct sk_buff *skb) { struct fifo_sched_data *q = qdisc_priv(sch); struct tc_fifo_qopt opt = { .limit = q->limit }; NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); return skb->len; nla_put_failure: return -1; } static struct Qdisc_ops tfifo_qdisc_ops __read_mostly = { .id = "tfifo", .priv_size = sizeof(struct fifo_sched_data), .enqueue = tfifo_enqueue, .dequeue = qdisc_dequeue_head, .peek = qdisc_peek_head, .drop = qdisc_queue_drop, .init = tfifo_init, .reset = qdisc_reset_queue, .change = tfifo_init, .dump = tfifo_dump, }; static int netem_init(struct Qdisc *sch, struct nlattr *opt) { struct netem_sched_data *q = qdisc_priv(sch); int ret; if (!opt) return -EINVAL; qdisc_watchdog_init(&q->watchdog, sch); q->qdisc = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, &tfifo_qdisc_ops, TC_H_MAKE(sch->handle, 1)); if (!q->qdisc) { pr_debug("netem: qdisc create failed\n"); return -ENOMEM; } ret = netem_change(sch, opt); if (ret) { pr_debug("netem: change failed\n"); qdisc_destroy(q->qdisc); } return ret; } static void netem_destroy(struct Qdisc *sch) { struct netem_sched_data *q = qdisc_priv(sch); qdisc_watchdog_cancel(&q->watchdog); qdisc_destroy(q->qdisc); kfree(q->delay_dist); } static int netem_dump(struct Qdisc *sch, struct sk_buff *skb) { const struct netem_sched_data *q = qdisc_priv(sch); unsigned char *b = skb_tail_pointer(skb); struct nlattr *nla = (struct nlattr *) b; struct tc_netem_qopt qopt; struct tc_netem_corr cor; struct tc_netem_reorder reorder; struct tc_netem_corrupt corrupt; qopt.latency = q->latency; qopt.jitter = q->jitter; qopt.limit = q->limit; qopt.loss = q->loss; qopt.gap = q->gap; qopt.duplicate = q->duplicate; NLA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt); cor.delay_corr = q->delay_cor.rho; cor.loss_corr = q->loss_cor.rho; cor.dup_corr = q->dup_cor.rho; NLA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor); reorder.probability = q->reorder; reorder.correlation = q->reorder_cor.rho; NLA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder); corrupt.probability = q->corrupt; corrupt.correlation = q->corrupt_cor.rho; NLA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt); nla->nla_len = skb_tail_pointer(skb) - b; return skb->len; nla_put_failure: nlmsg_trim(skb, b); return -1; } static struct Qdisc_ops netem_qdisc_ops __read_mostly = { .id = "netem", .priv_size = sizeof(struct netem_sched_data), .enqueue = netem_enqueue, .dequeue = netem_dequeue, .peek = qdisc_peek_dequeued, .drop = netem_drop, .init = netem_init, .reset = netem_reset, .destroy = netem_destroy, .change = netem_change, .dump = netem_dump, .owner = THIS_MODULE, }; static int __init netem_module_init(void) { pr_info("netem: version " VERSION "\n"); return register_qdisc(&netem_qdisc_ops); } static void __exit netem_module_exit(void) { unregister_qdisc(&netem_qdisc_ops); } module_init(netem_module_init) module_exit(netem_module_exit) MODULE_LICENSE("GPL");
gpl-2.0
hajuuk/asuswrt
release/src-rt-6.x.4708/linux/linux-2.6.36/fs/coda/dir.c
930
17339
/* * Directory operations for Coda filesystem * Original version: (C) 1996 P. Braam and M. Callahan * Rewritten for Linux 2.1. (C) 1997 Carnegie Mellon University * * Carnegie Mellon encourages users to contribute improvements to * the Coda project. Contact Peter Braam (coda@cs.cmu.edu). */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/time.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/file.h> #include <linux/stat.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/smp_lock.h> #include <asm/uaccess.h> #include <linux/coda.h> #include <linux/coda_linux.h> #include <linux/coda_psdev.h> #include <linux/coda_fs_i.h> #include <linux/coda_cache.h> #include "coda_int.h" /* dir inode-ops */ static int coda_create(struct inode *dir, struct dentry *new, int mode, struct nameidata *nd); static struct dentry *coda_lookup(struct inode *dir, struct dentry *target, struct nameidata *nd); static int coda_link(struct dentry *old_dentry, struct inode *dir_inode, struct dentry *entry); static int coda_unlink(struct inode *dir_inode, struct dentry *entry); static int coda_symlink(struct inode *dir_inode, struct dentry *entry, const char *symname); static int coda_mkdir(struct inode *dir_inode, struct dentry *entry, int mode); static int coda_rmdir(struct inode *dir_inode, struct dentry *entry); static int coda_rename(struct inode *old_inode, struct dentry *old_dentry, struct inode *new_inode, struct dentry *new_dentry); /* dir file-ops */ static int coda_readdir(struct file *file, void *buf, filldir_t filldir); /* dentry ops */ static int coda_dentry_revalidate(struct dentry *de, struct nameidata *nd); static int coda_dentry_delete(struct dentry *); /* support routines */ static int coda_venus_readdir(struct file *coda_file, void *buf, filldir_t filldir); /* same as fs/bad_inode.c */ static int coda_return_EIO(void) { return -EIO; } #define CODA_EIO_ERROR ((void *) (coda_return_EIO)) static const struct dentry_operations coda_dentry_operations = { .d_revalidate = coda_dentry_revalidate, .d_delete = coda_dentry_delete, }; const struct inode_operations coda_dir_inode_operations = { .create = coda_create, .lookup = coda_lookup, .link = coda_link, .unlink = coda_unlink, .symlink = coda_symlink, .mkdir = coda_mkdir, .rmdir = coda_rmdir, .mknod = CODA_EIO_ERROR, .rename = coda_rename, .permission = coda_permission, .getattr = coda_getattr, .setattr = coda_setattr, }; const struct file_operations coda_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, .readdir = coda_readdir, .open = coda_open, .release = coda_release, .fsync = coda_fsync, }; /* inode operations for directories */ /* access routines: lookup, readlink, permission */ static struct dentry *coda_lookup(struct inode *dir, struct dentry *entry, struct nameidata *nd) { struct inode *inode = NULL; struct CodaFid resfid = { { 0, } }; int type = 0; int error = 0; const char *name = entry->d_name.name; size_t length = entry->d_name.len; if (length > CODA_MAXNAMLEN) { printk(KERN_ERR "name too long: lookup, %s (%*s)\n", coda_i2s(dir), (int)length, name); return ERR_PTR(-ENAMETOOLONG); } /* control object, create inode on the fly */ if (coda_isroot(dir) && coda_iscontrol(name, length)) { error = coda_cnode_makectl(&inode, dir->i_sb); type = CODA_NOCACHE; goto exit; } lock_kernel(); error = venus_lookup(dir->i_sb, coda_i2f(dir), name, length, &type, &resfid); if (!error) error = coda_cnode_make(&inode, &resfid, dir->i_sb); unlock_kernel(); if (error && error != -ENOENT) return ERR_PTR(error); exit: entry->d_op = &coda_dentry_operations; if (inode && (type & CODA_NOCACHE)) coda_flag_inode(inode, C_VATTR | C_PURGE); return d_splice_alias(inode, entry); } int coda_permission(struct inode *inode, int mask) { int error = 0; mask &= MAY_READ | MAY_WRITE | MAY_EXEC; if (!mask) return 0; if ((mask & MAY_EXEC) && !execute_ok(inode)) return -EACCES; lock_kernel(); if (coda_cache_check(inode, mask)) goto out; error = venus_access(inode->i_sb, coda_i2f(inode), mask); if (!error) coda_cache_enter(inode, mask); out: unlock_kernel(); return error; } static inline void coda_dir_update_mtime(struct inode *dir) { #ifdef REQUERY_VENUS_FOR_MTIME /* invalidate the directory cnode's attributes so we refetch the * attributes from venus next time the inode is referenced */ coda_flag_inode(dir, C_VATTR); #else /* optimistically we can also act as if our nose bleeds. The * granularity of the mtime is coarse anyways so we might actually be * right most of the time. Note: we only do this for directories. */ dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC; #endif } /* we have to wrap inc_nlink/drop_nlink because sometimes userspace uses a * trick to fool GNU find's optimizations. If we can't be sure of the link * (because of volume mount points) we set i_nlink to 1 which forces find * to consider every child as a possible directory. We should also never * see an increment or decrement for deleted directories where i_nlink == 0 */ static inline void coda_dir_inc_nlink(struct inode *dir) { if (dir->i_nlink >= 2) inc_nlink(dir); } static inline void coda_dir_drop_nlink(struct inode *dir) { if (dir->i_nlink > 2) drop_nlink(dir); } /* creation routines: create, mknod, mkdir, link, symlink */ static int coda_create(struct inode *dir, struct dentry *de, int mode, struct nameidata *nd) { int error=0; const char *name=de->d_name.name; int length=de->d_name.len; struct inode *inode; struct CodaFid newfid; struct coda_vattr attrs; lock_kernel(); if (coda_isroot(dir) && coda_iscontrol(name, length)) { unlock_kernel(); return -EPERM; } error = venus_create(dir->i_sb, coda_i2f(dir), name, length, 0, mode, &newfid, &attrs); if ( error ) { unlock_kernel(); d_drop(de); return error; } inode = coda_iget(dir->i_sb, &newfid, &attrs); if ( IS_ERR(inode) ) { unlock_kernel(); d_drop(de); return PTR_ERR(inode); } /* invalidate the directory cnode's attributes */ coda_dir_update_mtime(dir); unlock_kernel(); d_instantiate(de, inode); return 0; } static int coda_mkdir(struct inode *dir, struct dentry *de, int mode) { struct inode *inode; struct coda_vattr attrs; const char *name = de->d_name.name; int len = de->d_name.len; int error; struct CodaFid newfid; lock_kernel(); if (coda_isroot(dir) && coda_iscontrol(name, len)) { unlock_kernel(); return -EPERM; } attrs.va_mode = mode; error = venus_mkdir(dir->i_sb, coda_i2f(dir), name, len, &newfid, &attrs); if ( error ) { unlock_kernel(); d_drop(de); return error; } inode = coda_iget(dir->i_sb, &newfid, &attrs); if ( IS_ERR(inode) ) { unlock_kernel(); d_drop(de); return PTR_ERR(inode); } /* invalidate the directory cnode's attributes */ coda_dir_inc_nlink(dir); coda_dir_update_mtime(dir); unlock_kernel(); d_instantiate(de, inode); return 0; } /* try to make de an entry in dir_inodde linked to source_de */ static int coda_link(struct dentry *source_de, struct inode *dir_inode, struct dentry *de) { struct inode *inode = source_de->d_inode; const char * name = de->d_name.name; int len = de->d_name.len; int error; lock_kernel(); if (coda_isroot(dir_inode) && coda_iscontrol(name, len)) { unlock_kernel(); return -EPERM; } error = venus_link(dir_inode->i_sb, coda_i2f(inode), coda_i2f(dir_inode), (const char *)name, len); if (error) { d_drop(de); goto out; } coda_dir_update_mtime(dir_inode); atomic_inc(&inode->i_count); d_instantiate(de, inode); inc_nlink(inode); out: unlock_kernel(); return(error); } static int coda_symlink(struct inode *dir_inode, struct dentry *de, const char *symname) { const char *name = de->d_name.name; int len = de->d_name.len; int symlen; int error = 0; lock_kernel(); if (coda_isroot(dir_inode) && coda_iscontrol(name, len)) { unlock_kernel(); return -EPERM; } symlen = strlen(symname); if ( symlen > CODA_MAXPATHLEN ) { unlock_kernel(); return -ENAMETOOLONG; } /* * This entry is now negative. Since we do not create * an inode for the entry we have to drop it. */ d_drop(de); error = venus_symlink(dir_inode->i_sb, coda_i2f(dir_inode), name, len, symname, symlen); /* mtime is no good anymore */ if ( !error ) coda_dir_update_mtime(dir_inode); unlock_kernel(); return error; } /* destruction routines: unlink, rmdir */ static int coda_unlink(struct inode *dir, struct dentry *de) { int error; const char *name = de->d_name.name; int len = de->d_name.len; lock_kernel(); error = venus_remove(dir->i_sb, coda_i2f(dir), name, len); if ( error ) { unlock_kernel(); return error; } coda_dir_update_mtime(dir); drop_nlink(de->d_inode); unlock_kernel(); return 0; } static int coda_rmdir(struct inode *dir, struct dentry *de) { const char *name = de->d_name.name; int len = de->d_name.len; int error; lock_kernel(); error = venus_rmdir(dir->i_sb, coda_i2f(dir), name, len); if (!error) { /* VFS may delete the child */ if (de->d_inode) de->d_inode->i_nlink = 0; /* fix the link count of the parent */ coda_dir_drop_nlink(dir); coda_dir_update_mtime(dir); } unlock_kernel(); return error; } /* rename */ static int coda_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { const char *old_name = old_dentry->d_name.name; const char *new_name = new_dentry->d_name.name; int old_length = old_dentry->d_name.len; int new_length = new_dentry->d_name.len; int error; lock_kernel(); error = venus_rename(old_dir->i_sb, coda_i2f(old_dir), coda_i2f(new_dir), old_length, new_length, (const char *) old_name, (const char *)new_name); if ( !error ) { if ( new_dentry->d_inode ) { if ( S_ISDIR(new_dentry->d_inode->i_mode) ) { coda_dir_drop_nlink(old_dir); coda_dir_inc_nlink(new_dir); } coda_dir_update_mtime(old_dir); coda_dir_update_mtime(new_dir); coda_flag_inode(new_dentry->d_inode, C_VATTR); } else { coda_flag_inode(old_dir, C_VATTR); coda_flag_inode(new_dir, C_VATTR); } } unlock_kernel(); return error; } /* file operations for directories */ static int coda_readdir(struct file *coda_file, void *buf, filldir_t filldir) { struct coda_file_info *cfi; struct file *host_file; int ret; cfi = CODA_FTOC(coda_file); BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC); host_file = cfi->cfi_container; if (!host_file->f_op) return -ENOTDIR; if (host_file->f_op->readdir) { /* potemkin case: we were handed a directory inode. * We can't use vfs_readdir because we have to keep the file * position in sync between the coda_file and the host_file. * and as such we need grab the inode mutex. */ struct inode *host_inode = host_file->f_path.dentry->d_inode; mutex_lock(&host_inode->i_mutex); host_file->f_pos = coda_file->f_pos; ret = -ENOENT; if (!IS_DEADDIR(host_inode)) { ret = host_file->f_op->readdir(host_file, buf, filldir); file_accessed(host_file); } coda_file->f_pos = host_file->f_pos; mutex_unlock(&host_inode->i_mutex); } else /* Venus: we must read Venus dirents from a file */ ret = coda_venus_readdir(coda_file, buf, filldir); return ret; } static inline unsigned int CDT2DT(unsigned char cdt) { unsigned int dt; switch(cdt) { case CDT_UNKNOWN: dt = DT_UNKNOWN; break; case CDT_FIFO: dt = DT_FIFO; break; case CDT_CHR: dt = DT_CHR; break; case CDT_DIR: dt = DT_DIR; break; case CDT_BLK: dt = DT_BLK; break; case CDT_REG: dt = DT_REG; break; case CDT_LNK: dt = DT_LNK; break; case CDT_SOCK: dt = DT_SOCK; break; case CDT_WHT: dt = DT_WHT; break; default: dt = DT_UNKNOWN; break; } return dt; } /* support routines */ static int coda_venus_readdir(struct file *coda_file, void *buf, filldir_t filldir) { int result = 0; /* # of entries returned */ struct coda_file_info *cfi; struct coda_inode_info *cii; struct file *host_file; struct dentry *de; struct venus_dirent *vdir; unsigned long vdir_size = (unsigned long)(&((struct venus_dirent *)0)->d_name); unsigned int type; struct qstr name; ino_t ino; int ret; cfi = CODA_FTOC(coda_file); BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC); host_file = cfi->cfi_container; de = coda_file->f_path.dentry; cii = ITOC(de->d_inode); vdir = kmalloc(sizeof(*vdir), GFP_KERNEL); if (!vdir) return -ENOMEM; if (coda_file->f_pos == 0) { ret = filldir(buf, ".", 1, 0, de->d_inode->i_ino, DT_DIR); if (ret < 0) goto out; result++; coda_file->f_pos++; } if (coda_file->f_pos == 1) { ret = filldir(buf, "..", 2, 1, de->d_parent->d_inode->i_ino, DT_DIR); if (ret < 0) goto out; result++; coda_file->f_pos++; } while (1) { /* read entries from the directory file */ ret = kernel_read(host_file, coda_file->f_pos - 2, (char *)vdir, sizeof(*vdir)); if (ret < 0) { printk(KERN_ERR "coda readdir: read dir %s failed %d\n", coda_f2s(&cii->c_fid), ret); break; } if (ret == 0) break; /* end of directory file reached */ /* catch truncated reads */ if (ret < vdir_size || ret < vdir_size + vdir->d_namlen) { printk(KERN_ERR "coda readdir: short read on %s\n", coda_f2s(&cii->c_fid)); ret = -EBADF; break; } /* validate whether the directory file actually makes sense */ if (vdir->d_reclen < vdir_size + vdir->d_namlen) { printk(KERN_ERR "coda readdir: invalid dir %s\n", coda_f2s(&cii->c_fid)); ret = -EBADF; break; } name.len = vdir->d_namlen; name.name = vdir->d_name; /* Make sure we skip '.' and '..', we already got those */ if (name.name[0] == '.' && (name.len == 1 || (vdir->d_name[1] == '.' && name.len == 2))) vdir->d_fileno = name.len = 0; /* skip null entries */ if (vdir->d_fileno && name.len) { /* try to look up this entry in the dcache, that way * userspace doesn't have to worry about breaking * getcwd by having mismatched inode numbers for * internal volume mountpoints. */ ino = find_inode_number(de, &name); if (!ino) ino = vdir->d_fileno; type = CDT2DT(vdir->d_type); ret = filldir(buf, name.name, name.len, coda_file->f_pos, ino, type); /* failure means no space for filling in this round */ if (ret < 0) break; result++; } /* we'll always have progress because d_reclen is unsigned and * we've already established it is non-zero. */ coda_file->f_pos += vdir->d_reclen; } out: kfree(vdir); return result ? result : ret; } /* called when a cache lookup succeeds */ static int coda_dentry_revalidate(struct dentry *de, struct nameidata *nd) { struct inode *inode = de->d_inode; struct coda_inode_info *cii; if (!inode) return 1; lock_kernel(); if (coda_isroot(inode)) goto out; if (is_bad_inode(inode)) goto bad; cii = ITOC(de->d_inode); if (!(cii->c_flags & (C_PURGE | C_FLUSH))) goto out; shrink_dcache_parent(de); /* propagate for a flush */ if (cii->c_flags & C_FLUSH) coda_flag_inode_children(inode, C_FLUSH); if (atomic_read(&de->d_count) > 1) /* pretend it's valid, but don't change the flags */ goto out; /* clear the flags. */ cii->c_flags &= ~(C_VATTR | C_PURGE | C_FLUSH); bad: unlock_kernel(); return 0; out: unlock_kernel(); return 1; } /* * This is the callback from dput() when d_count is going to 0. * We use this to unhash dentries with bad inodes. */ static int coda_dentry_delete(struct dentry * dentry) { int flags; if (!dentry->d_inode) return 0; flags = (ITOC(dentry->d_inode)->c_flags) & C_PURGE; if (is_bad_inode(dentry->d_inode) || flags) { return 1; } return 0; } /* * This is called when we want to check if the inode has * changed on the server. Coda makes this easy since the * cache manager Venus issues a downcall to the kernel when this * happens */ int coda_revalidate_inode(struct dentry *dentry) { struct coda_vattr attr; int error = 0; int old_mode; ino_t old_ino; struct inode *inode = dentry->d_inode; struct coda_inode_info *cii = ITOC(inode); lock_kernel(); if ( !cii->c_flags ) goto ok; if (cii->c_flags & (C_VATTR | C_PURGE | C_FLUSH)) { error = venus_getattr(inode->i_sb, &(cii->c_fid), &attr); if ( error ) goto return_bad; /* this inode may be lost if: - it's ino changed - type changes must be permitted for repair and missing mount points. */ old_mode = inode->i_mode; old_ino = inode->i_ino; coda_vattr_to_iattr(inode, &attr); if ((old_mode & S_IFMT) != (inode->i_mode & S_IFMT)) { printk("Coda: inode %ld, fid %s changed type!\n", inode->i_ino, coda_f2s(&(cii->c_fid))); } /* the following can happen when a local fid is replaced with a global one, here we lose and declare the inode bad */ if (inode->i_ino != old_ino) goto return_bad; coda_flag_inode_children(inode, C_FLUSH); cii->c_flags &= ~(C_VATTR | C_PURGE | C_FLUSH); } ok: unlock_kernel(); return 0; return_bad: unlock_kernel(); return -EIO; }
gpl-2.0
thanhphat11/Android_kernel_xiaomi_ALL
arch/arm/mach-msm/board-8960-gpiomux.c
1954
24746
/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/gpio.h> #include <asm/mach-types.h> #include <mach/gpiomux.h> #include <mach/socinfo.h> #include "devices.h" #include "board-8960.h" /* The SPI configurations apply to GSBI 1*/ static struct gpiomux_setting spi_active = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_12MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting spi_suspended_config = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, }; static struct gpiomux_setting spi_active_config2 = { .func = GPIOMUX_FUNC_2, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting spi_suspended_config2 = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_UP, }; static struct gpiomux_setting gsbi3_suspended_cfg = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_KEEPER, }; static struct gpiomux_setting gsbi3_active_cfg = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting gsbi6_active_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting gsbi6_suspended_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, }; static struct gpiomux_setting external_vfr[] = { /* Suspended state */ { .func = GPIOMUX_FUNC_3, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_KEEPER, }, /* Active state */ { .func = GPIOMUX_FUNC_3, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_KEEPER, }, }; static struct gpiomux_setting gsbi_uart = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting gsbi8_uartdm_active_cfg = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting gsbi8_uartdm_suspended_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, }; static struct gpiomux_setting gsbi9_active_cfg = { .func = GPIOMUX_FUNC_2, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_DOWN, }; static struct gpiomux_setting gsbi9_suspended_cfg = { .func = GPIOMUX_FUNC_2, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, }; static struct gpiomux_setting gsbi10 = { .func = GPIOMUX_FUNC_2, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting gsbi12 = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting cdc_mclk = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting audio_auxpcm[] = { /* Suspended state */ { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, }, /* Active state */ { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }, }; #if defined(CONFIG_KS8851) || defined(CONFIG_KS8851_MODULE) static struct gpiomux_setting gpio_eth_config = { .pull = GPIOMUX_PULL_NONE, .drv = GPIOMUX_DRV_8MA, .func = GPIOMUX_FUNC_GPIO, }; #endif static struct gpiomux_setting slimbus = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_KEEPER, }; static struct gpiomux_setting wcnss_5wire_suspend_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_UP, }; static struct gpiomux_setting wcnss_5wire_active_cfg = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_6MA, .pull = GPIOMUX_PULL_DOWN, }; static struct gpiomux_setting cyts_resout_sus_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_6MA, .pull = GPIOMUX_PULL_UP, }; static struct gpiomux_setting cyts_resout_act_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_6MA, .pull = GPIOMUX_PULL_UP, }; static struct gpiomux_setting cyts_sleep_sus_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_6MA, .pull = GPIOMUX_PULL_DOWN, }; static struct gpiomux_setting cyts_sleep_act_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_6MA, .pull = GPIOMUX_PULL_DOWN, }; static struct gpiomux_setting cyts_int_act_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_UP, }; static struct gpiomux_setting cyts_int_sus_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, }; #ifdef CONFIG_USB_EHCI_MSM_HSIC static struct gpiomux_setting hsic_act_cfg = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_12MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting hsic_sus_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, .dir = GPIOMUX_OUT_LOW, }; static struct gpiomux_setting hsic_hub_act_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }; #endif static struct gpiomux_setting hap_lvl_shft_suspended_config = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, }; static struct gpiomux_setting hap_lvl_shft_active_config = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_UP, }; static struct gpiomux_setting ap2mdm_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_4MA, .pull = GPIOMUX_PULL_DOWN, }; static struct gpiomux_setting mdm2ap_status_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting mdm2ap_errfatal_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, }; static struct gpiomux_setting ap2mdm_kpdpwr_n_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_4MA, .pull = GPIOMUX_PULL_DOWN, }; static struct gpiomux_setting usbsw_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_DOWN, }; static struct gpiomux_setting mdp_vsync_suspend_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, }; static struct gpiomux_setting mdp_vsync_active_cfg = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, }; #ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL static struct gpiomux_setting hdmi_suspend_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, }; static struct gpiomux_setting hdmi_active_1_cfg = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_UP, }; static struct gpiomux_setting hdmi_active_2_cfg = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, }; #if defined(CONFIG_FB_MSM_HDMI_MHL_8334) || defined(CONFIG_FB_MSM_HDMI_MHL_9244) static struct gpiomux_setting hdmi_active_3_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_UP, .dir = GPIOMUX_IN, }; static struct gpiomux_setting hdmi_active_4_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_UP, .dir = GPIOMUX_OUT_HIGH, }; #endif #endif #if defined(CONFIG_KS8851) || defined(CONFIG_KS8851_MODULE) static struct msm_gpiomux_config msm8960_ethernet_configs[] = { { .gpio = 90, .settings = { [GPIOMUX_SUSPENDED] = &gpio_eth_config, } }, { .gpio = 89, .settings = { [GPIOMUX_SUSPENDED] = &gpio_eth_config, } }, }; #endif /* GSBI8 UART GPIOs for Atheros Bluetooth */ static struct msm_gpiomux_config msm8960_gsbi8_uartdm_configs[] = { { .gpio = 34, .settings = { [GPIOMUX_SUSPENDED] = &gsbi8_uartdm_suspended_cfg, [GPIOMUX_ACTIVE] = &gsbi8_uartdm_active_cfg, } }, { .gpio = 35, .settings = { [GPIOMUX_SUSPENDED] = &gsbi8_uartdm_suspended_cfg, [GPIOMUX_ACTIVE] = &gsbi8_uartdm_active_cfg, } }, { .gpio = 36, .settings = { [GPIOMUX_SUSPENDED] = &gsbi8_uartdm_suspended_cfg, [GPIOMUX_ACTIVE] = &gsbi8_uartdm_active_cfg, } }, { .gpio = 37, .settings = { [GPIOMUX_SUSPENDED] = &gsbi8_uartdm_suspended_cfg, [GPIOMUX_ACTIVE] = &gsbi8_uartdm_active_cfg, } }, }; static struct msm_gpiomux_config msm8960_fusion_gsbi_configs[] = { { .gpio = 93, .settings = { [GPIOMUX_SUSPENDED] = &gsbi9_suspended_cfg, [GPIOMUX_ACTIVE] = &gsbi9_active_cfg, } }, { .gpio = 94, .settings = { [GPIOMUX_SUSPENDED] = &gsbi9_suspended_cfg, [GPIOMUX_ACTIVE] = &gsbi9_active_cfg, } }, { .gpio = 95, .settings = { [GPIOMUX_SUSPENDED] = &gsbi9_suspended_cfg, [GPIOMUX_ACTIVE] = &gsbi9_active_cfg, } }, { .gpio = 96, .settings = { [GPIOMUX_SUSPENDED] = &gsbi9_suspended_cfg, [GPIOMUX_ACTIVE] = &gsbi9_active_cfg, } }, }; static struct msm_gpiomux_config msm8960_gsbi_configs[] __initdata = { { .gpio = 6, /* GSBI1 QUP SPI_DATA_MOSI */ .settings = { [GPIOMUX_SUSPENDED] = &spi_suspended_config, [GPIOMUX_ACTIVE] = &spi_active, }, }, { .gpio = 7, /* GSBI1 QUP SPI_DATA_MISO */ .settings = { [GPIOMUX_SUSPENDED] = &spi_suspended_config, [GPIOMUX_ACTIVE] = &spi_active, }, }, { .gpio = 8, /* GSBI1 QUP SPI_CS_N */ .settings = { [GPIOMUX_SUSPENDED] = &spi_suspended_config, [GPIOMUX_ACTIVE] = &spi_active, }, }, { .gpio = 9, /* GSBI1 QUP SPI_CLK */ .settings = { [GPIOMUX_SUSPENDED] = &spi_suspended_config, [GPIOMUX_ACTIVE] = &spi_active, }, }, { .gpio = 14, /* GSBI1 SPI_CS_1 */ .settings = { [GPIOMUX_SUSPENDED] = &spi_suspended_config2, [GPIOMUX_ACTIVE] = &spi_active_config2, }, }, { .gpio = 16, /* GSBI3 I2C QUP SDA */ .settings = { [GPIOMUX_SUSPENDED] = &gsbi3_suspended_cfg, [GPIOMUX_ACTIVE] = &gsbi3_active_cfg, }, }, { .gpio = 17, /* GSBI3 I2C QUP SCL */ .settings = { [GPIOMUX_SUSPENDED] = &gsbi3_suspended_cfg, [GPIOMUX_ACTIVE] = &gsbi3_active_cfg, }, }, { .gpio = 26, /* GSBI6 WLAN_PWD_L for AR6004 */ .settings = { [GPIOMUX_SUSPENDED] = &gsbi6_suspended_cfg, [GPIOMUX_ACTIVE] = &gsbi6_active_cfg, }, }, { .gpio = 27, /* GSBI6 BT_INT2AP_N for AR3002 */ .settings = { [GPIOMUX_SUSPENDED] = &gsbi6_suspended_cfg, [GPIOMUX_ACTIVE] = &gsbi6_active_cfg, }, }, { .gpio = 28, /* GSBI6 BT_EN for AR3002 */ .settings = { [GPIOMUX_SUSPENDED] = &gsbi6_suspended_cfg, [GPIOMUX_ACTIVE] = &gsbi6_active_cfg, }, }, { .gpio = 29, /* GSBI6 BT_WAKE for AR3002 */ .settings = { [GPIOMUX_SUSPENDED] = &gsbi6_suspended_cfg, [GPIOMUX_ACTIVE] = &gsbi6_active_cfg, }, }, { .gpio = 44, /* GSBI12 I2C QUP SDA */ .settings = { [GPIOMUX_SUSPENDED] = &gsbi12, }, }, { .gpio = 45, /* GSBI12 I2C QUP SCL */ .settings = { [GPIOMUX_SUSPENDED] = &gsbi12, }, }, { .gpio = 73, /* GSBI10 I2C QUP SDA */ .settings = { [GPIOMUX_SUSPENDED] = &gsbi10, }, }, { .gpio = 74, /* GSBI10 I2C QUP SCL */ .settings = { [GPIOMUX_SUSPENDED] = &gsbi10, }, }, }; static struct msm_gpiomux_config msm8960_gsbi5_uart_configs[] __initdata = { { .gpio = 22, /* GSBI5 UART2 */ .settings = { [GPIOMUX_SUSPENDED] = &gsbi_uart, }, }, { .gpio = 23, /* GSBI5 UART2 */ .settings = { [GPIOMUX_SUSPENDED] = &gsbi_uart, }, }, { .gpio = 24, /* GSBI5 UART2 */ .settings = { [GPIOMUX_SUSPENDED] = &gsbi_uart, }, }, { .gpio = 25, /* GSBI5 UART2 */ .settings = { [GPIOMUX_SUSPENDED] = &gsbi_uart, }, }, }; static struct msm_gpiomux_config msm8960_external_vfr_configs[] __initdata = { { .gpio = 23, /* EXTERNAL VFR */ .settings = { [GPIOMUX_SUSPENDED] = &external_vfr[0], [GPIOMUX_ACTIVE] = &external_vfr[1], }, }, }; static struct msm_gpiomux_config msm8960_gsbi8_uart_configs[] __initdata = { { .gpio = 34, /* GSBI8 UART3 */ .settings = { [GPIOMUX_SUSPENDED] = &gsbi_uart, }, }, { .gpio = 35, /* GSBI8 UART3 */ .settings = { [GPIOMUX_SUSPENDED] = &gsbi_uart, }, }, { .gpio = 36, /* GSBI8 UART3 */ .settings = { [GPIOMUX_SUSPENDED] = &gsbi_uart, }, }, { .gpio = 37, /* GSBI8 UART3 */ .settings = { [GPIOMUX_SUSPENDED] = &gsbi_uart, }, }, }; static struct msm_gpiomux_config msm8960_slimbus_config[] __initdata = { { .gpio = 60, /* slimbus data */ .settings = { [GPIOMUX_SUSPENDED] = &slimbus, }, }, { .gpio = 61, /* slimbus clk */ .settings = { [GPIOMUX_SUSPENDED] = &slimbus, }, }, }; static struct msm_gpiomux_config msm8960_audio_codec_configs[] __initdata = { { .gpio = 59, .settings = { [GPIOMUX_SUSPENDED] = &cdc_mclk, }, }, }; static struct msm_gpiomux_config msm8960_audio_auxpcm_configs[] __initdata = { { .gpio = 63, .settings = { [GPIOMUX_SUSPENDED] = &audio_auxpcm[0], [GPIOMUX_ACTIVE] = &audio_auxpcm[1], }, }, { .gpio = 64, .settings = { [GPIOMUX_SUSPENDED] = &audio_auxpcm[0], [GPIOMUX_ACTIVE] = &audio_auxpcm[1], }, }, { .gpio = 65, .settings = { [GPIOMUX_SUSPENDED] = &audio_auxpcm[0], [GPIOMUX_ACTIVE] = &audio_auxpcm[1], }, }, { .gpio = 66, .settings = { [GPIOMUX_SUSPENDED] = &audio_auxpcm[0], [GPIOMUX_ACTIVE] = &audio_auxpcm[1], }, }, }; static struct msm_gpiomux_config wcnss_5wire_interface[] = { { .gpio = 84, .settings = { [GPIOMUX_ACTIVE] = &wcnss_5wire_active_cfg, [GPIOMUX_SUSPENDED] = &wcnss_5wire_suspend_cfg, }, }, { .gpio = 85, .settings = { [GPIOMUX_ACTIVE] = &wcnss_5wire_active_cfg, [GPIOMUX_SUSPENDED] = &wcnss_5wire_suspend_cfg, }, }, { .gpio = 86, .settings = { [GPIOMUX_ACTIVE] = &wcnss_5wire_active_cfg, [GPIOMUX_SUSPENDED] = &wcnss_5wire_suspend_cfg, }, }, { .gpio = 87, .settings = { [GPIOMUX_ACTIVE] = &wcnss_5wire_active_cfg, [GPIOMUX_SUSPENDED] = &wcnss_5wire_suspend_cfg, }, }, { .gpio = 88, .settings = { [GPIOMUX_ACTIVE] = &wcnss_5wire_active_cfg, [GPIOMUX_SUSPENDED] = &wcnss_5wire_suspend_cfg, }, }, }; static struct msm_gpiomux_config msm8960_cyts_configs[] __initdata = { { /* TS INTERRUPT */ .gpio = 11, .settings = { [GPIOMUX_ACTIVE] = &cyts_int_act_cfg, [GPIOMUX_SUSPENDED] = &cyts_int_sus_cfg, }, }, { /* TS SLEEP */ .gpio = 50, .settings = { [GPIOMUX_ACTIVE] = &cyts_sleep_act_cfg, [GPIOMUX_SUSPENDED] = &cyts_sleep_sus_cfg, }, }, { /* TS RESOUT */ .gpio = 52, .settings = { [GPIOMUX_ACTIVE] = &cyts_resout_act_cfg, [GPIOMUX_SUSPENDED] = &cyts_resout_sus_cfg, }, }, }; #ifdef CONFIG_USB_EHCI_MSM_HSIC static struct msm_gpiomux_config msm8960_hsic_configs[] = { { .gpio = 150, /*HSIC_STROBE */ .settings = { [GPIOMUX_ACTIVE] = &hsic_act_cfg, [GPIOMUX_SUSPENDED] = &hsic_sus_cfg, }, }, { .gpio = 151, /* HSIC_DATA */ .settings = { [GPIOMUX_ACTIVE] = &hsic_act_cfg, [GPIOMUX_SUSPENDED] = &hsic_sus_cfg, }, }, }; static struct msm_gpiomux_config msm8960_hsic_hub_configs[] = { { .gpio = 91, /* HSIC_HUB_RESET */ .settings = { [GPIOMUX_ACTIVE] = &hsic_hub_act_cfg, [GPIOMUX_SUSPENDED] = &hsic_sus_cfg, }, }, }; #endif #ifdef CONFIG_MMC_MSM_SDC4_SUPPORT static struct gpiomux_setting sdcc4_clk_actv_cfg = { .func = GPIOMUX_FUNC_2, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting sdcc4_cmd_data_0_3_actv_cfg = { .func = GPIOMUX_FUNC_2, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_UP, }; static struct gpiomux_setting sdcc4_suspend_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, }; static struct gpiomux_setting sdcc4_data_1_suspend_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_UP, }; static struct msm_gpiomux_config msm8960_sdcc4_configs[] __initdata = { { /* SDC4_DATA_3 */ .gpio = 83, .settings = { [GPIOMUX_ACTIVE] = &sdcc4_cmd_data_0_3_actv_cfg, [GPIOMUX_SUSPENDED] = &sdcc4_suspend_cfg, }, }, { /* SDC4_DATA_2 */ .gpio = 84, .settings = { [GPIOMUX_ACTIVE] = &sdcc4_cmd_data_0_3_actv_cfg, [GPIOMUX_SUSPENDED] = &sdcc4_suspend_cfg, }, }, { /* SDC4_DATA_1 */ .gpio = 85, .settings = { [GPIOMUX_ACTIVE] = &sdcc4_cmd_data_0_3_actv_cfg, [GPIOMUX_SUSPENDED] = &sdcc4_data_1_suspend_cfg, }, }, { /* SDC4_DATA_0 */ .gpio = 86, .settings = { [GPIOMUX_ACTIVE] = &sdcc4_cmd_data_0_3_actv_cfg, [GPIOMUX_SUSPENDED] = &sdcc4_suspend_cfg, }, }, { /* SDC4_CMD */ .gpio = 87, .settings = { [GPIOMUX_ACTIVE] = &sdcc4_cmd_data_0_3_actv_cfg, [GPIOMUX_SUSPENDED] = &sdcc4_suspend_cfg, }, }, { /* SDC4_CLK */ .gpio = 88, .settings = { [GPIOMUX_ACTIVE] = &sdcc4_clk_actv_cfg, [GPIOMUX_SUSPENDED] = &sdcc4_suspend_cfg, }, }, }; #endif static struct msm_gpiomux_config hap_lvl_shft_config[] __initdata = { { .gpio = 47, .settings = { [GPIOMUX_SUSPENDED] = &hap_lvl_shft_suspended_config, [GPIOMUX_ACTIVE] = &hap_lvl_shft_active_config, }, }, }; static struct msm_gpiomux_config hap_lvl_shft_config_sglte[] __initdata = { { .gpio = 89, .settings = { [GPIOMUX_SUSPENDED] = &hap_lvl_shft_suspended_config, [GPIOMUX_ACTIVE] = &hap_lvl_shft_active_config, }, }, }; static struct msm_gpiomux_config sglte_configs[] __initdata = { /* AP2MDM_STATUS */ { .gpio = 77, .settings = { [GPIOMUX_SUSPENDED] = &ap2mdm_cfg, } }, /* MDM2AP_STATUS */ { .gpio = 24, .settings = { [GPIOMUX_SUSPENDED] = &mdm2ap_status_cfg, } }, /* MDM2AP_ERRFATAL */ { .gpio = 40, .settings = { [GPIOMUX_SUSPENDED] = &mdm2ap_errfatal_cfg, } }, /* AP2MDM_ERRFATAL */ { .gpio = 80, .settings = { [GPIOMUX_SUSPENDED] = &ap2mdm_cfg, } }, /* AP2MDM_KPDPWR_N */ { .gpio = 79, .settings = { [GPIOMUX_SUSPENDED] = &ap2mdm_kpdpwr_n_cfg, } }, /* AP2MDM_PMIC_PWR_EN */ { .gpio = 22, .settings = { [GPIOMUX_SUSPENDED] = &ap2mdm_kpdpwr_n_cfg, } }, /* AP2MDM_SOFT_RESET */ { .gpio = 78, .settings = { [GPIOMUX_SUSPENDED] = &ap2mdm_cfg, } }, /* USB_SW */ { .gpio = 25, .settings = { [GPIOMUX_SUSPENDED] = &usbsw_cfg, } } }; static struct msm_gpiomux_config msm8960_mdp_vsync_configs[] __initdata = { { .gpio = 0, .settings = { [GPIOMUX_ACTIVE] = &mdp_vsync_active_cfg, [GPIOMUX_SUSPENDED] = &mdp_vsync_suspend_cfg, }, } }; #ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL static struct msm_gpiomux_config msm8960_hdmi_configs[] __initdata = { { .gpio = 99, .settings = { [GPIOMUX_ACTIVE] = &hdmi_active_1_cfg, [GPIOMUX_SUSPENDED] = &hdmi_suspend_cfg, }, }, { .gpio = 100, .settings = { [GPIOMUX_ACTIVE] = &hdmi_active_1_cfg, [GPIOMUX_SUSPENDED] = &hdmi_suspend_cfg, }, }, { .gpio = 101, .settings = { [GPIOMUX_ACTIVE] = &hdmi_active_1_cfg, [GPIOMUX_SUSPENDED] = &hdmi_suspend_cfg, }, }, { .gpio = 102, .settings = { [GPIOMUX_ACTIVE] = &hdmi_active_2_cfg, [GPIOMUX_SUSPENDED] = &hdmi_suspend_cfg, }, }, #ifdef CONFIG_FB_MSM_HDMI_MHL_9244 { .gpio = 15, .settings = { [GPIOMUX_ACTIVE] = &hdmi_active_3_cfg, [GPIOMUX_SUSPENDED] = &hdmi_suspend_cfg, }, }, { .gpio = 66, .settings = { [GPIOMUX_ACTIVE] = &hdmi_active_4_cfg, [GPIOMUX_SUSPENDED] = &hdmi_suspend_cfg, }, }, #endif #ifdef CONFIG_FB_MSM_HDMI_MHL_8334 { .gpio = 4, .settings = { [GPIOMUX_ACTIVE] = &hdmi_active_3_cfg, [GPIOMUX_SUSPENDED] = &hdmi_suspend_cfg, }, }, { .gpio = 15, .settings = { [GPIOMUX_ACTIVE] = &hdmi_active_4_cfg, [GPIOMUX_SUSPENDED] = &hdmi_suspend_cfg, }, }, #endif /* CONFIG_FB_MSM_HDMI_MHL */ }; #endif #ifdef CONFIG_MMC_MSM_SDC2_SUPPORT static struct gpiomux_setting sdcc2_clk_actv_cfg = { .func = GPIOMUX_FUNC_2, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting sdcc2_cmd_data_0_3_actv_cfg = { .func = GPIOMUX_FUNC_2, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_UP, }; static struct gpiomux_setting sdcc2_suspend_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, }; static struct gpiomux_setting sdcc2_data_1_suspend_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_UP, }; static struct msm_gpiomux_config msm8960_sdcc2_configs[] __initdata = { { /* DATA_3 */ .gpio = 92, .settings = { [GPIOMUX_ACTIVE] = &sdcc2_cmd_data_0_3_actv_cfg, [GPIOMUX_SUSPENDED] = &sdcc2_suspend_cfg, }, }, { /* DATA_2 */ .gpio = 91, .settings = { [GPIOMUX_ACTIVE] = &sdcc2_cmd_data_0_3_actv_cfg, [GPIOMUX_SUSPENDED] = &sdcc2_suspend_cfg, }, }, { /* DATA_1 */ .gpio = 90, .settings = { [GPIOMUX_ACTIVE] = &sdcc2_cmd_data_0_3_actv_cfg, [GPIOMUX_SUSPENDED] = &sdcc2_data_1_suspend_cfg, }, }, { /* DATA_0 */ .gpio = 89, .settings = { [GPIOMUX_ACTIVE] = &sdcc2_cmd_data_0_3_actv_cfg, [GPIOMUX_SUSPENDED] = &sdcc2_suspend_cfg, }, }, { /* CMD */ .gpio = 97, .settings = { [GPIOMUX_ACTIVE] = &sdcc2_cmd_data_0_3_actv_cfg, [GPIOMUX_SUSPENDED] = &sdcc2_suspend_cfg, }, }, { /* CLK */ .gpio = 98, .settings = { [GPIOMUX_ACTIVE] = &sdcc2_clk_actv_cfg, [GPIOMUX_SUSPENDED] = &sdcc2_suspend_cfg, }, }, }; #endif int __init msm8960_init_gpiomux(void) { int rc = msm_gpiomux_init(NR_GPIO_IRQS); if (rc) { pr_err(KERN_ERR "msm_gpiomux_init failed %d\n", rc); return rc; } #if defined(CONFIG_KS8851) || defined(CONFIG_KS8851_MODULE) if (socinfo_get_platform_subtype() != PLATFORM_SUBTYPE_SGLTE) msm_gpiomux_install(msm8960_ethernet_configs, ARRAY_SIZE(msm8960_ethernet_configs)); #endif msm_gpiomux_install(msm8960_gsbi_configs, ARRAY_SIZE(msm8960_gsbi_configs)); msm_gpiomux_install(msm8960_cyts_configs, ARRAY_SIZE(msm8960_cyts_configs)); msm_gpiomux_install(msm8960_slimbus_config, ARRAY_SIZE(msm8960_slimbus_config)); msm_gpiomux_install(msm8960_audio_codec_configs, ARRAY_SIZE(msm8960_audio_codec_configs)); msm_gpiomux_install(msm8960_audio_auxpcm_configs, ARRAY_SIZE(msm8960_audio_auxpcm_configs)); msm_gpiomux_install(wcnss_5wire_interface, ARRAY_SIZE(wcnss_5wire_interface)); #ifdef CONFIG_MMC_MSM_SDC4_SUPPORT msm_gpiomux_install(msm8960_sdcc4_configs, ARRAY_SIZE(msm8960_sdcc4_configs)); #endif if (machine_is_msm8960_mtp() || machine_is_msm8960_fluid() || machine_is_msm8960_liquid() || machine_is_msm8960_cdp()) { if (socinfo_get_platform_subtype() == PLATFORM_SUBTYPE_SGLTE) msm_gpiomux_install(hap_lvl_shft_config_sglte, ARRAY_SIZE(hap_lvl_shft_config_sglte)); else msm_gpiomux_install(hap_lvl_shft_config, ARRAY_SIZE(hap_lvl_shft_config)); } #ifdef CONFIG_USB_EHCI_MSM_HSIC if ((SOCINFO_VERSION_MAJOR(socinfo_get_version()) != 1) && machine_is_msm8960_liquid()) msm_gpiomux_install(msm8960_hsic_configs, ARRAY_SIZE(msm8960_hsic_configs)); if ((SOCINFO_VERSION_MAJOR(socinfo_get_version()) != 1) && machine_is_msm8960_liquid()) msm_gpiomux_install(msm8960_hsic_hub_configs, ARRAY_SIZE(msm8960_hsic_hub_configs)); #endif #ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL msm_gpiomux_install(msm8960_hdmi_configs, ARRAY_SIZE(msm8960_hdmi_configs)); #endif msm_gpiomux_install(msm8960_mdp_vsync_configs, ARRAY_SIZE(msm8960_mdp_vsync_configs)); if (socinfo_get_platform_subtype() != PLATFORM_SUBTYPE_SGLTE) msm_gpiomux_install(msm8960_gsbi8_uartdm_configs, ARRAY_SIZE(msm8960_gsbi8_uartdm_configs)); if (socinfo_get_platform_subtype() == PLATFORM_SUBTYPE_SGLTE) msm_gpiomux_install(msm8960_gsbi8_uart_configs, ARRAY_SIZE(msm8960_gsbi8_uart_configs)); else msm_gpiomux_install(msm8960_gsbi5_uart_configs, ARRAY_SIZE(msm8960_gsbi5_uart_configs)); if (socinfo_get_platform_subtype() == PLATFORM_SUBTYPE_SGLTE) { /* For 8960 Fusion 2.2 Primary IPC */ msm_gpiomux_install(msm8960_fusion_gsbi_configs, ARRAY_SIZE(msm8960_fusion_gsbi_configs)); /* For SGLTE 8960 Fusion External VFR */ msm_gpiomux_install(msm8960_external_vfr_configs, ARRAY_SIZE(msm8960_external_vfr_configs)); } #ifdef CONFIG_MMC_MSM_SDC2_SUPPORT msm_gpiomux_install(msm8960_sdcc2_configs, ARRAY_SIZE(msm8960_sdcc2_configs)); #endif if (socinfo_get_platform_subtype() == PLATFORM_SUBTYPE_SGLTE) msm_gpiomux_install(sglte_configs, ARRAY_SIZE(sglte_configs)); return 0; }
gpl-2.0
ArtisteHsu/jetson-tk1-r21.3-kernel
drivers/regulator/dummy.c
2722
2201
/* * dummy.c * * Copyright 2010 Wolfson Microelectronics PLC. * * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This is useful for systems with mixed controllable and * non-controllable regulators, as well as for allowing testing on * systems with no controllable regulators. */ #include <linux/err.h> #include <linux/export.h> #include <linux/platform_device.h> #include <linux/regulator/driver.h> #include <linux/regulator/machine.h> #include "dummy.h" struct regulator_dev *dummy_regulator_rdev; static struct regulator_init_data dummy_initdata; static struct regulator_ops dummy_ops; static struct regulator_desc dummy_desc = { .name = "regulator-dummy", .id = -1, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, .ops = &dummy_ops, }; static int dummy_regulator_probe(struct platform_device *pdev) { struct regulator_config config = { }; int ret; config.dev = &pdev->dev; config.init_data = &dummy_initdata; dummy_regulator_rdev = regulator_register(&dummy_desc, &config); if (IS_ERR(dummy_regulator_rdev)) { ret = PTR_ERR(dummy_regulator_rdev); pr_err("Failed to register regulator: %d\n", ret); return ret; } return 0; } static struct platform_driver dummy_regulator_driver = { .probe = dummy_regulator_probe, .driver = { .name = "reg-dummy", .owner = THIS_MODULE, }, }; static struct platform_device *dummy_pdev; void __init regulator_dummy_init(void) { int ret; dummy_pdev = platform_device_alloc("reg-dummy", -1); if (!dummy_pdev) { pr_err("Failed to allocate dummy regulator device\n"); return; } ret = platform_device_add(dummy_pdev); if (ret != 0) { pr_err("Failed to register dummy regulator device: %d\n", ret); platform_device_put(dummy_pdev); return; } ret = platform_driver_register(&dummy_regulator_driver); if (ret != 0) { pr_err("Failed to register dummy regulator driver: %d\n", ret); platform_device_unregister(dummy_pdev); } }
gpl-2.0
TeamWin/android_kernel_oneplus_msm8974
kernel/trace/trace_irqsoff.c
3490
15890
/* * trace irqs off critical timings * * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> * * From code in the latency_tracer, that is: * * Copyright (C) 2004-2006 Ingo Molnar * Copyright (C) 2004 William Lee Irwin III */ #include <linux/kallsyms.h> #include <linux/debugfs.h> #include <linux/uaccess.h> #include <linux/module.h> #include <linux/ftrace.h> #include <linux/fs.h> #include "trace.h" static struct trace_array *irqsoff_trace __read_mostly; static int tracer_enabled __read_mostly; static DEFINE_PER_CPU(int, tracing_cpu); static DEFINE_RAW_SPINLOCK(max_trace_lock); enum { TRACER_IRQS_OFF = (1 << 1), TRACER_PREEMPT_OFF = (1 << 2), }; static int trace_type __read_mostly; static int save_lat_flag; static void stop_irqsoff_tracer(struct trace_array *tr, int graph); static int start_irqsoff_tracer(struct trace_array *tr, int graph); #ifdef CONFIG_PREEMPT_TRACER static inline int preempt_trace(void) { return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count()); } #else # define preempt_trace() (0) #endif #ifdef CONFIG_IRQSOFF_TRACER static inline int irq_trace(void) { return ((trace_type & TRACER_IRQS_OFF) && irqs_disabled()); } #else # define irq_trace() (0) #endif #define TRACE_DISPLAY_GRAPH 1 static struct tracer_opt trace_opts[] = { #ifdef CONFIG_FUNCTION_GRAPH_TRACER /* display latency trace as call graph */ { TRACER_OPT(display-graph, TRACE_DISPLAY_GRAPH) }, #endif { } /* Empty entry */ }; static struct tracer_flags tracer_flags = { .val = 0, .opts = trace_opts, }; #define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH) /* * Sequence count - we record it when starting a measurement and * skip the latency if the sequence has changed - some other section * did a maximum and could disturb our measurement with serial console * printouts, etc. Truly coinciding maximum latencies should be rare * and what happens together happens separately as well, so this doesn't * decrease the validity of the maximum found: */ static __cacheline_aligned_in_smp unsigned long max_sequence; #ifdef CONFIG_FUNCTION_TRACER /* * Prologue for the preempt and irqs off function tracers. * * Returns 1 if it is OK to continue, and data->disabled is * incremented. * 0 if the trace is to be ignored, and data->disabled * is kept the same. * * Note, this function is also used outside this ifdef but * inside the #ifdef of the function graph tracer below. * This is OK, since the function graph tracer is * dependent on the function tracer. */ static int func_prolog_dec(struct trace_array *tr, struct trace_array_cpu **data, unsigned long *flags) { long disabled; int cpu; /* * Does not matter if we preempt. We test the flags * afterward, to see if irqs are disabled or not. * If we preempt and get a false positive, the flags * test will fail. */ cpu = raw_smp_processor_id(); if (likely(!per_cpu(tracing_cpu, cpu))) return 0; local_save_flags(*flags); /* slight chance to get a false positive on tracing_cpu */ if (!irqs_disabled_flags(*flags)) return 0; *data = tr->data[cpu]; disabled = atomic_inc_return(&(*data)->disabled); if (likely(disabled == 1)) return 1; atomic_dec(&(*data)->disabled); return 0; } /* * irqsoff uses its own tracer function to keep the overhead down: */ static void irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip) { struct trace_array *tr = irqsoff_trace; struct trace_array_cpu *data; unsigned long flags; if (!func_prolog_dec(tr, &data, &flags)) return; trace_function(tr, ip, parent_ip, flags, preempt_count()); atomic_dec(&data->disabled); } static struct ftrace_ops trace_ops __read_mostly = { .func = irqsoff_tracer_call, .flags = FTRACE_OPS_FL_GLOBAL, }; #endif /* CONFIG_FUNCTION_TRACER */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER static int irqsoff_set_flag(u32 old_flags, u32 bit, int set) { int cpu; if (!(bit & TRACE_DISPLAY_GRAPH)) return -EINVAL; if (!(is_graph() ^ set)) return 0; stop_irqsoff_tracer(irqsoff_trace, !set); for_each_possible_cpu(cpu) per_cpu(tracing_cpu, cpu) = 0; tracing_max_latency = 0; tracing_reset_online_cpus(irqsoff_trace); return start_irqsoff_tracer(irqsoff_trace, set); } static int irqsoff_graph_entry(struct ftrace_graph_ent *trace) { struct trace_array *tr = irqsoff_trace; struct trace_array_cpu *data; unsigned long flags; int ret; int pc; if (!func_prolog_dec(tr, &data, &flags)) return 0; pc = preempt_count(); ret = __trace_graph_entry(tr, trace, flags, pc); atomic_dec(&data->disabled); return ret; } static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { struct trace_array *tr = irqsoff_trace; struct trace_array_cpu *data; unsigned long flags; int pc; if (!func_prolog_dec(tr, &data, &flags)) return; pc = preempt_count(); __trace_graph_return(tr, trace, flags, pc); atomic_dec(&data->disabled); } static void irqsoff_trace_open(struct trace_iterator *iter) { if (is_graph()) graph_trace_open(iter); } static void irqsoff_trace_close(struct trace_iterator *iter) { if (iter->private) graph_trace_close(iter); } #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \ TRACE_GRAPH_PRINT_PROC | \ TRACE_GRAPH_PRINT_ABS_TIME | \ TRACE_GRAPH_PRINT_DURATION) static enum print_line_t irqsoff_print_line(struct trace_iterator *iter) { /* * In graph mode call the graph tracer output function, * otherwise go with the TRACE_FN event handler */ if (is_graph()) return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS); return TRACE_TYPE_UNHANDLED; } static void irqsoff_print_header(struct seq_file *s) { if (is_graph()) print_graph_headers_flags(s, GRAPH_TRACER_FLAGS); else trace_default_header(s); } static void __trace_function(struct trace_array *tr, unsigned long ip, unsigned long parent_ip, unsigned long flags, int pc) { if (is_graph()) trace_graph_function(tr, ip, parent_ip, flags, pc); else trace_function(tr, ip, parent_ip, flags, pc); } #else #define __trace_function trace_function static int irqsoff_set_flag(u32 old_flags, u32 bit, int set) { return -EINVAL; } static int irqsoff_graph_entry(struct ftrace_graph_ent *trace) { return -1; } static enum print_line_t irqsoff_print_line(struct trace_iterator *iter) { return TRACE_TYPE_UNHANDLED; } static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { } static void irqsoff_trace_open(struct trace_iterator *iter) { } static void irqsoff_trace_close(struct trace_iterator *iter) { } #ifdef CONFIG_FUNCTION_TRACER static void irqsoff_print_header(struct seq_file *s) { trace_default_header(s); } #else static void irqsoff_print_header(struct seq_file *s) { trace_latency_header(s); } #endif /* CONFIG_FUNCTION_TRACER */ #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ /* * Should this new latency be reported/recorded? */ static int report_latency(cycle_t delta) { if (tracing_thresh) { if (delta < tracing_thresh) return 0; } else { if (delta <= tracing_max_latency) return 0; } return 1; } static void check_critical_timing(struct trace_array *tr, struct trace_array_cpu *data, unsigned long parent_ip, int cpu) { cycle_t T0, T1, delta; unsigned long flags; int pc; T0 = data->preempt_timestamp; T1 = ftrace_now(cpu); delta = T1-T0; local_save_flags(flags); pc = preempt_count(); if (!report_latency(delta)) goto out; raw_spin_lock_irqsave(&max_trace_lock, flags); /* check if we are still the max latency */ if (!report_latency(delta)) goto out_unlock; __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); /* Skip 5 functions to get to the irq/preempt enable function */ __trace_stack(tr, flags, 5, pc); if (data->critical_sequence != max_sequence) goto out_unlock; data->critical_end = parent_ip; if (likely(!is_tracing_stopped())) { tracing_max_latency = delta; update_max_tr_single(tr, current, cpu); } max_sequence++; out_unlock: raw_spin_unlock_irqrestore(&max_trace_lock, flags); out: data->critical_sequence = max_sequence; data->preempt_timestamp = ftrace_now(cpu); __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); } static inline void start_critical_timing(unsigned long ip, unsigned long parent_ip) { int cpu; struct trace_array *tr = irqsoff_trace; struct trace_array_cpu *data; unsigned long flags; if (likely(!tracer_enabled)) return; cpu = raw_smp_processor_id(); if (per_cpu(tracing_cpu, cpu)) return; data = tr->data[cpu]; if (unlikely(!data) || atomic_read(&data->disabled)) return; atomic_inc(&data->disabled); data->critical_sequence = max_sequence; data->preempt_timestamp = ftrace_now(cpu); data->critical_start = parent_ip ? : ip; local_save_flags(flags); __trace_function(tr, ip, parent_ip, flags, preempt_count()); per_cpu(tracing_cpu, cpu) = 1; atomic_dec(&data->disabled); } static inline void stop_critical_timing(unsigned long ip, unsigned long parent_ip) { int cpu; struct trace_array *tr = irqsoff_trace; struct trace_array_cpu *data; unsigned long flags; cpu = raw_smp_processor_id(); /* Always clear the tracing cpu on stopping the trace */ if (unlikely(per_cpu(tracing_cpu, cpu))) per_cpu(tracing_cpu, cpu) = 0; else return; if (!tracer_enabled) return; data = tr->data[cpu]; if (unlikely(!data) || !data->critical_start || atomic_read(&data->disabled)) return; atomic_inc(&data->disabled); local_save_flags(flags); __trace_function(tr, ip, parent_ip, flags, preempt_count()); check_critical_timing(tr, data, parent_ip ? : ip, cpu); data->critical_start = 0; atomic_dec(&data->disabled); } /* start and stop critical timings used to for stoppage (in idle) */ void start_critical_timings(void) { if (preempt_trace() || irq_trace()) start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); } EXPORT_SYMBOL_GPL(start_critical_timings); void stop_critical_timings(void) { if (preempt_trace() || irq_trace()) stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); } EXPORT_SYMBOL_GPL(stop_critical_timings); #ifdef CONFIG_IRQSOFF_TRACER #ifdef CONFIG_PROVE_LOCKING void time_hardirqs_on(unsigned long a0, unsigned long a1) { if (!preempt_trace() && irq_trace()) stop_critical_timing(a0, a1); } void time_hardirqs_off(unsigned long a0, unsigned long a1) { if (!preempt_trace() && irq_trace()) start_critical_timing(a0, a1); } #else /* !CONFIG_PROVE_LOCKING */ /* * Stubs: */ void trace_softirqs_on(unsigned long ip) { } void trace_softirqs_off(unsigned long ip) { } inline void print_irqtrace_events(struct task_struct *curr) { } /* * We are only interested in hardirq on/off events: */ void trace_hardirqs_on(void) { if (!preempt_trace() && irq_trace()) stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); } EXPORT_SYMBOL(trace_hardirqs_on); void trace_hardirqs_off(void) { if (!preempt_trace() && irq_trace()) start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); } EXPORT_SYMBOL(trace_hardirqs_off); void trace_hardirqs_on_caller(unsigned long caller_addr) { if (!preempt_trace() && irq_trace()) stop_critical_timing(CALLER_ADDR0, caller_addr); } EXPORT_SYMBOL(trace_hardirqs_on_caller); void trace_hardirqs_off_caller(unsigned long caller_addr) { if (!preempt_trace() && irq_trace()) start_critical_timing(CALLER_ADDR0, caller_addr); } EXPORT_SYMBOL(trace_hardirqs_off_caller); #endif /* CONFIG_PROVE_LOCKING */ #endif /* CONFIG_IRQSOFF_TRACER */ #ifdef CONFIG_PREEMPT_TRACER void trace_preempt_on(unsigned long a0, unsigned long a1) { if (preempt_trace() && !irq_trace()) stop_critical_timing(a0, a1); } void trace_preempt_off(unsigned long a0, unsigned long a1) { if (preempt_trace() && !irq_trace()) start_critical_timing(a0, a1); } #endif /* CONFIG_PREEMPT_TRACER */ static int start_irqsoff_tracer(struct trace_array *tr, int graph) { int ret = 0; if (!graph) ret = register_ftrace_function(&trace_ops); else ret = register_ftrace_graph(&irqsoff_graph_return, &irqsoff_graph_entry); if (!ret && tracing_is_enabled()) tracer_enabled = 1; else tracer_enabled = 0; return ret; } static void stop_irqsoff_tracer(struct trace_array *tr, int graph) { tracer_enabled = 0; if (!graph) unregister_ftrace_function(&trace_ops); else unregister_ftrace_graph(); } static void __irqsoff_tracer_init(struct trace_array *tr) { save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT; trace_flags |= TRACE_ITER_LATENCY_FMT; tracing_max_latency = 0; irqsoff_trace = tr; /* make sure that the tracer is visible */ smp_wmb(); tracing_reset_online_cpus(tr); if (start_irqsoff_tracer(tr, is_graph())) printk(KERN_ERR "failed to start irqsoff tracer\n"); } static void irqsoff_tracer_reset(struct trace_array *tr) { stop_irqsoff_tracer(tr, is_graph()); if (!save_lat_flag) trace_flags &= ~TRACE_ITER_LATENCY_FMT; } static void irqsoff_tracer_start(struct trace_array *tr) { tracer_enabled = 1; } static void irqsoff_tracer_stop(struct trace_array *tr) { tracer_enabled = 0; } #ifdef CONFIG_IRQSOFF_TRACER static int irqsoff_tracer_init(struct trace_array *tr) { trace_type = TRACER_IRQS_OFF; __irqsoff_tracer_init(tr); return 0; } static struct tracer irqsoff_tracer __read_mostly = { .name = "irqsoff", .init = irqsoff_tracer_init, .reset = irqsoff_tracer_reset, .start = irqsoff_tracer_start, .stop = irqsoff_tracer_stop, .print_max = 1, .print_header = irqsoff_print_header, .print_line = irqsoff_print_line, .flags = &tracer_flags, .set_flag = irqsoff_set_flag, #ifdef CONFIG_FTRACE_SELFTEST .selftest = trace_selftest_startup_irqsoff, #endif .open = irqsoff_trace_open, .close = irqsoff_trace_close, .use_max_tr = 1, }; # define register_irqsoff(trace) register_tracer(&trace) #else # define register_irqsoff(trace) do { } while (0) #endif #ifdef CONFIG_PREEMPT_TRACER static int preemptoff_tracer_init(struct trace_array *tr) { trace_type = TRACER_PREEMPT_OFF; __irqsoff_tracer_init(tr); return 0; } static struct tracer preemptoff_tracer __read_mostly = { .name = "preemptoff", .init = preemptoff_tracer_init, .reset = irqsoff_tracer_reset, .start = irqsoff_tracer_start, .stop = irqsoff_tracer_stop, .print_max = 1, .print_header = irqsoff_print_header, .print_line = irqsoff_print_line, .flags = &tracer_flags, .set_flag = irqsoff_set_flag, #ifdef CONFIG_FTRACE_SELFTEST .selftest = trace_selftest_startup_preemptoff, #endif .open = irqsoff_trace_open, .close = irqsoff_trace_close, .use_max_tr = 1, }; # define register_preemptoff(trace) register_tracer(&trace) #else # define register_preemptoff(trace) do { } while (0) #endif #if defined(CONFIG_IRQSOFF_TRACER) && \ defined(CONFIG_PREEMPT_TRACER) static int preemptirqsoff_tracer_init(struct trace_array *tr) { trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF; __irqsoff_tracer_init(tr); return 0; } static struct tracer preemptirqsoff_tracer __read_mostly = { .name = "preemptirqsoff", .init = preemptirqsoff_tracer_init, .reset = irqsoff_tracer_reset, .start = irqsoff_tracer_start, .stop = irqsoff_tracer_stop, .print_max = 1, .print_header = irqsoff_print_header, .print_line = irqsoff_print_line, .flags = &tracer_flags, .set_flag = irqsoff_set_flag, #ifdef CONFIG_FTRACE_SELFTEST .selftest = trace_selftest_startup_preemptirqsoff, #endif .open = irqsoff_trace_open, .close = irqsoff_trace_close, .use_max_tr = 1, }; # define register_preemptirqsoff(trace) register_tracer(&trace) #else # define register_preemptirqsoff(trace) do { } while (0) #endif __init static int init_irqsoff_tracer(void) { register_irqsoff(irqsoff_tracer); register_preemptoff(preemptoff_tracer); register_preemptirqsoff(preemptirqsoff_tracer); return 0; } device_initcall(init_irqsoff_tracer);
gpl-2.0
crysehillmes/android_kernel_samsung_klimtlte
drivers/gpu/drm/i915/intel_panel.c
4002
10465
/* * Copyright © 2006-2010 Intel Corporation * Copyright (c) 2006 Dave Airlie <airlied@linux.ie> * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Eric Anholt <eric@anholt.net> * Dave Airlie <airlied@linux.ie> * Jesse Barnes <jesse.barnes@intel.com> * Chris Wilson <chris@chris-wilson.co.uk> */ #include "intel_drv.h" #define PCI_LBPC 0xf4 /* legacy/combination backlight modes */ void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, struct drm_display_mode *adjusted_mode) { adjusted_mode->hdisplay = fixed_mode->hdisplay; adjusted_mode->hsync_start = fixed_mode->hsync_start; adjusted_mode->hsync_end = fixed_mode->hsync_end; adjusted_mode->htotal = fixed_mode->htotal; adjusted_mode->vdisplay = fixed_mode->vdisplay; adjusted_mode->vsync_start = fixed_mode->vsync_start; adjusted_mode->vsync_end = fixed_mode->vsync_end; adjusted_mode->vtotal = fixed_mode->vtotal; adjusted_mode->clock = fixed_mode->clock; } /* adjusted_mode has been preset to be the panel's fixed mode */ void intel_pch_panel_fitting(struct drm_device *dev, int fitting_mode, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_i915_private *dev_priv = dev->dev_private; int x, y, width, height; x = y = width = height = 0; /* Native modes don't need fitting */ if (adjusted_mode->hdisplay == mode->hdisplay && adjusted_mode->vdisplay == mode->vdisplay) goto done; switch (fitting_mode) { case DRM_MODE_SCALE_CENTER: width = mode->hdisplay; height = mode->vdisplay; x = (adjusted_mode->hdisplay - width + 1)/2; y = (adjusted_mode->vdisplay - height + 1)/2; break; case DRM_MODE_SCALE_ASPECT: /* Scale but preserve the aspect ratio */ { u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay; u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay; if (scaled_width > scaled_height) { /* pillar */ width = scaled_height / mode->vdisplay; if (width & 1) width++; x = (adjusted_mode->hdisplay - width + 1) / 2; y = 0; height = adjusted_mode->vdisplay; } else if (scaled_width < scaled_height) { /* letter */ height = scaled_width / mode->hdisplay; if (height & 1) height++; y = (adjusted_mode->vdisplay - height + 1) / 2; x = 0; width = adjusted_mode->hdisplay; } else { x = y = 0; width = adjusted_mode->hdisplay; height = adjusted_mode->vdisplay; } } break; default: case DRM_MODE_SCALE_FULLSCREEN: x = y = 0; width = adjusted_mode->hdisplay; height = adjusted_mode->vdisplay; break; } done: dev_priv->pch_pf_pos = (x << 16) | y; dev_priv->pch_pf_size = (width << 16) | height; } static int is_backlight_combination_mode(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; if (INTEL_INFO(dev)->gen >= 4) return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE; if (IS_GEN2(dev)) return I915_READ(BLC_PWM_CTL) & BLM_LEGACY_MODE; return 0; } static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv) { u32 val; /* Restore the CTL value if it lost, e.g. GPU reset */ if (HAS_PCH_SPLIT(dev_priv->dev)) { val = I915_READ(BLC_PWM_PCH_CTL2); if (dev_priv->saveBLC_PWM_CTL2 == 0) { dev_priv->saveBLC_PWM_CTL2 = val; } else if (val == 0) { I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2); val = dev_priv->saveBLC_PWM_CTL2; } } else { val = I915_READ(BLC_PWM_CTL); if (dev_priv->saveBLC_PWM_CTL == 0) { dev_priv->saveBLC_PWM_CTL = val; dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2); } else if (val == 0) { I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL); I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2); val = dev_priv->saveBLC_PWM_CTL; } } return val; } u32 intel_panel_get_max_backlight(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; u32 max; max = i915_read_blc_pwm_ctl(dev_priv); if (max == 0) { /* XXX add code here to query mode clock or hardware clock * and program max PWM appropriately. */ printk_once(KERN_WARNING "fixme: max PWM is zero.\n"); return 1; } if (HAS_PCH_SPLIT(dev)) { max >>= 16; } else { if (INTEL_INFO(dev)->gen < 4) max >>= 17; else max >>= 16; if (is_backlight_combination_mode(dev)) max *= 0xff; } DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max); return max; } u32 intel_panel_get_backlight(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; u32 val; if (HAS_PCH_SPLIT(dev)) { val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; } else { val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; if (INTEL_INFO(dev)->gen < 4) val >>= 1; if (is_backlight_combination_mode(dev)) { u8 lbpc; pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc); val *= lbpc; } } DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val); return val; } static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level) { struct drm_i915_private *dev_priv = dev->dev_private; u32 val = I915_READ(BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK; I915_WRITE(BLC_PWM_CPU_CTL, val | level); } static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level) { struct drm_i915_private *dev_priv = dev->dev_private; u32 tmp; DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level); if (HAS_PCH_SPLIT(dev)) return intel_pch_panel_set_backlight(dev, level); if (is_backlight_combination_mode(dev)) { u32 max = intel_panel_get_max_backlight(dev); u8 lbpc; lbpc = level * 0xfe / max + 1; level /= lbpc; pci_write_config_byte(dev->pdev, PCI_LBPC, lbpc); } tmp = I915_READ(BLC_PWM_CTL); if (INTEL_INFO(dev)->gen < 4) level <<= 1; tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK; I915_WRITE(BLC_PWM_CTL, tmp | level); } void intel_panel_set_backlight(struct drm_device *dev, u32 level) { struct drm_i915_private *dev_priv = dev->dev_private; dev_priv->backlight_level = level; if (dev_priv->backlight_enabled) intel_panel_actually_set_backlight(dev, level); } void intel_panel_disable_backlight(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; dev_priv->backlight_enabled = false; intel_panel_actually_set_backlight(dev, 0); } void intel_panel_enable_backlight(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; if (dev_priv->backlight_level == 0) dev_priv->backlight_level = intel_panel_get_max_backlight(dev); dev_priv->backlight_enabled = true; intel_panel_actually_set_backlight(dev, dev_priv->backlight_level); } static void intel_panel_init_backlight(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; dev_priv->backlight_level = intel_panel_get_backlight(dev); dev_priv->backlight_enabled = dev_priv->backlight_level != 0; } enum drm_connector_status intel_panel_detect(struct drm_device *dev) { #if 0 struct drm_i915_private *dev_priv = dev->dev_private; #endif if (i915_panel_ignore_lid) return i915_panel_ignore_lid > 0 ? connector_status_connected : connector_status_disconnected; /* opregion lid state on HP 2540p is wrong at boot up, * appears to be either the BIOS or Linux ACPI fault */ #if 0 /* Assume that the BIOS does not lie through the OpRegion... */ if (dev_priv->opregion.lid_state) return ioread32(dev_priv->opregion.lid_state) & 0x1 ? connector_status_connected : connector_status_disconnected; #endif return connector_status_unknown; } #ifdef CONFIG_BACKLIGHT_CLASS_DEVICE static int intel_panel_update_status(struct backlight_device *bd) { struct drm_device *dev = bl_get_data(bd); intel_panel_set_backlight(dev, bd->props.brightness); return 0; } static int intel_panel_get_brightness(struct backlight_device *bd) { struct drm_device *dev = bl_get_data(bd); struct drm_i915_private *dev_priv = dev->dev_private; return dev_priv->backlight_level; } static const struct backlight_ops intel_panel_bl_ops = { .update_status = intel_panel_update_status, .get_brightness = intel_panel_get_brightness, }; int intel_panel_setup_backlight(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct backlight_properties props; struct drm_connector *connector; intel_panel_init_backlight(dev); if (dev_priv->int_lvds_connector) connector = dev_priv->int_lvds_connector; else if (dev_priv->int_edp_connector) connector = dev_priv->int_edp_connector; else return -ENODEV; props.type = BACKLIGHT_RAW; props.max_brightness = intel_panel_get_max_backlight(dev); dev_priv->backlight = backlight_device_register("intel_backlight", &connector->kdev, dev, &intel_panel_bl_ops, &props); if (IS_ERR(dev_priv->backlight)) { DRM_ERROR("Failed to register backlight: %ld\n", PTR_ERR(dev_priv->backlight)); dev_priv->backlight = NULL; return -ENODEV; } dev_priv->backlight->props.brightness = intel_panel_get_backlight(dev); return 0; } void intel_panel_destroy_backlight(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; if (dev_priv->backlight) backlight_device_unregister(dev_priv->backlight); } #else int intel_panel_setup_backlight(struct drm_device *dev) { intel_panel_init_backlight(dev); return 0; } void intel_panel_destroy_backlight(struct drm_device *dev) { return; } #endif
gpl-2.0
GalaxyTab4/android_kernel_motorola_msm8226
drivers/power/da9030_battery.c
5026
16063
/* * Battery charger driver for Dialog Semiconductor DA9030 * * Copyright (C) 2008 Compulab, Ltd. * Mike Rapoport <mike@compulab.co.il> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/types.h> #include <linux/device.h> #include <linux/workqueue.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/power_supply.h> #include <linux/mfd/da903x.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #define DA9030_FAULT_LOG 0x0a #define DA9030_FAULT_LOG_OVER_TEMP (1 << 7) #define DA9030_FAULT_LOG_VBAT_OVER (1 << 4) #define DA9030_CHARGE_CONTROL 0x28 #define DA9030_CHRG_CHARGER_ENABLE (1 << 7) #define DA9030_ADC_MAN_CONTROL 0x30 #define DA9030_ADC_TBATREF_ENABLE (1 << 5) #define DA9030_ADC_LDO_INT_ENABLE (1 << 4) #define DA9030_ADC_AUTO_CONTROL 0x31 #define DA9030_ADC_TBAT_ENABLE (1 << 5) #define DA9030_ADC_VBAT_IN_TXON (1 << 4) #define DA9030_ADC_VCH_ENABLE (1 << 3) #define DA9030_ADC_ICH_ENABLE (1 << 2) #define DA9030_ADC_VBAT_ENABLE (1 << 1) #define DA9030_ADC_AUTO_SLEEP_ENABLE (1 << 0) #define DA9030_VBATMON 0x32 #define DA9030_VBATMONTXON 0x33 #define DA9030_TBATHIGHP 0x34 #define DA9030_TBATHIGHN 0x35 #define DA9030_TBATLOW 0x36 #define DA9030_VBAT_RES 0x41 #define DA9030_VBATMIN_RES 0x42 #define DA9030_VBATMINTXON_RES 0x43 #define DA9030_ICHMAX_RES 0x44 #define DA9030_ICHMIN_RES 0x45 #define DA9030_ICHAVERAGE_RES 0x46 #define DA9030_VCHMAX_RES 0x47 #define DA9030_VCHMIN_RES 0x48 #define DA9030_TBAT_RES 0x49 struct da9030_adc_res { uint8_t vbat_res; uint8_t vbatmin_res; uint8_t vbatmintxon; uint8_t ichmax_res; uint8_t ichmin_res; uint8_t ichaverage_res; uint8_t vchmax_res; uint8_t vchmin_res; uint8_t tbat_res; uint8_t adc_in4_res; uint8_t adc_in5_res; }; struct da9030_battery_thresholds { int tbat_low; int tbat_high; int tbat_restart; int vbat_low; int vbat_crit; int vbat_charge_start; int vbat_charge_stop; int vbat_charge_restart; int vcharge_min; int vcharge_max; }; struct da9030_charger { struct power_supply psy; struct device *master; struct da9030_adc_res adc; struct delayed_work work; unsigned int interval; struct power_supply_info *battery_info; struct da9030_battery_thresholds thresholds; unsigned int charge_milliamp; unsigned int charge_millivolt; /* charger status */ bool chdet; uint8_t fault; int mA; int mV; bool is_on; struct notifier_block nb; /* platform callbacks for battery low and critical events */ void (*battery_low)(void); void (*battery_critical)(void); struct dentry *debug_file; }; static inline int da9030_reg_to_mV(int reg) { return ((reg * 2650) >> 8) + 2650; } static inline int da9030_millivolt_to_reg(int mV) { return ((mV - 2650) << 8) / 2650; } static inline int da9030_reg_to_mA(int reg) { return ((reg * 24000) >> 8) / 15; } #ifdef CONFIG_DEBUG_FS static int bat_debug_show(struct seq_file *s, void *data) { struct da9030_charger *charger = s->private; seq_printf(s, "charger is %s\n", charger->is_on ? "on" : "off"); if (charger->chdet) { seq_printf(s, "iset = %dmA, vset = %dmV\n", charger->mA, charger->mV); } seq_printf(s, "vbat_res = %d (%dmV)\n", charger->adc.vbat_res, da9030_reg_to_mV(charger->adc.vbat_res)); seq_printf(s, "vbatmin_res = %d (%dmV)\n", charger->adc.vbatmin_res, da9030_reg_to_mV(charger->adc.vbatmin_res)); seq_printf(s, "vbatmintxon = %d (%dmV)\n", charger->adc.vbatmintxon, da9030_reg_to_mV(charger->adc.vbatmintxon)); seq_printf(s, "ichmax_res = %d (%dmA)\n", charger->adc.ichmax_res, da9030_reg_to_mV(charger->adc.ichmax_res)); seq_printf(s, "ichmin_res = %d (%dmA)\n", charger->adc.ichmin_res, da9030_reg_to_mA(charger->adc.ichmin_res)); seq_printf(s, "ichaverage_res = %d (%dmA)\n", charger->adc.ichaverage_res, da9030_reg_to_mA(charger->adc.ichaverage_res)); seq_printf(s, "vchmax_res = %d (%dmV)\n", charger->adc.vchmax_res, da9030_reg_to_mA(charger->adc.vchmax_res)); seq_printf(s, "vchmin_res = %d (%dmV)\n", charger->adc.vchmin_res, da9030_reg_to_mV(charger->adc.vchmin_res)); return 0; } static int debug_open(struct inode *inode, struct file *file) { return single_open(file, bat_debug_show, inode->i_private); } static const struct file_operations bat_debug_fops = { .open = debug_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static struct dentry *da9030_bat_create_debugfs(struct da9030_charger *charger) { charger->debug_file = debugfs_create_file("charger", 0666, 0, charger, &bat_debug_fops); return charger->debug_file; } static void da9030_bat_remove_debugfs(struct da9030_charger *charger) { debugfs_remove(charger->debug_file); } #else static inline struct dentry *da9030_bat_create_debugfs(struct da9030_charger *charger) { return NULL; } static inline void da9030_bat_remove_debugfs(struct da9030_charger *charger) { } #endif static inline void da9030_read_adc(struct da9030_charger *charger, struct da9030_adc_res *adc) { da903x_reads(charger->master, DA9030_VBAT_RES, sizeof(*adc), (uint8_t *)adc); } static void da9030_charger_update_state(struct da9030_charger *charger) { uint8_t val; da903x_read(charger->master, DA9030_CHARGE_CONTROL, &val); charger->is_on = (val & DA9030_CHRG_CHARGER_ENABLE) ? 1 : 0; charger->mA = ((val >> 3) & 0xf) * 100; charger->mV = (val & 0x7) * 50 + 4000; da9030_read_adc(charger, &charger->adc); da903x_read(charger->master, DA9030_FAULT_LOG, &charger->fault); charger->chdet = da903x_query_status(charger->master, DA9030_STATUS_CHDET); } static void da9030_set_charge(struct da9030_charger *charger, int on) { uint8_t val; if (on) { val = DA9030_CHRG_CHARGER_ENABLE; val |= (charger->charge_milliamp / 100) << 3; val |= (charger->charge_millivolt - 4000) / 50; charger->is_on = 1; } else { val = 0; charger->is_on = 0; } da903x_write(charger->master, DA9030_CHARGE_CONTROL, val); power_supply_changed(&charger->psy); } static void da9030_charger_check_state(struct da9030_charger *charger) { da9030_charger_update_state(charger); /* we wake or boot with external power on */ if (!charger->is_on) { if ((charger->chdet) && (charger->adc.vbat_res < charger->thresholds.vbat_charge_start)) { da9030_set_charge(charger, 1); } } else { /* Charger has been pulled out */ if (!charger->chdet) { da9030_set_charge(charger, 0); return; } if (charger->adc.vbat_res >= charger->thresholds.vbat_charge_stop) { da9030_set_charge(charger, 0); da903x_write(charger->master, DA9030_VBATMON, charger->thresholds.vbat_charge_restart); } else if (charger->adc.vbat_res > charger->thresholds.vbat_low) { /* we are charging and passed LOW_THRESH, so upate DA9030 VBAT threshold */ da903x_write(charger->master, DA9030_VBATMON, charger->thresholds.vbat_low); } if (charger->adc.vchmax_res > charger->thresholds.vcharge_max || charger->adc.vchmin_res < charger->thresholds.vcharge_min || /* Tempreture readings are negative */ charger->adc.tbat_res < charger->thresholds.tbat_high || charger->adc.tbat_res > charger->thresholds.tbat_low) { /* disable charger */ da9030_set_charge(charger, 0); } } } static void da9030_charging_monitor(struct work_struct *work) { struct da9030_charger *charger; charger = container_of(work, struct da9030_charger, work.work); da9030_charger_check_state(charger); /* reschedule for the next time */ schedule_delayed_work(&charger->work, charger->interval); } static enum power_supply_property da9030_battery_props[] = { POWER_SUPPLY_PROP_MODEL_NAME, POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_HEALTH, POWER_SUPPLY_PROP_TECHNOLOGY, POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN, POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN, POWER_SUPPLY_PROP_VOLTAGE_NOW, POWER_SUPPLY_PROP_CURRENT_AVG, }; static void da9030_battery_check_status(struct da9030_charger *charger, union power_supply_propval *val) { if (charger->chdet) { if (charger->is_on) val->intval = POWER_SUPPLY_STATUS_CHARGING; else val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING; } else { val->intval = POWER_SUPPLY_STATUS_DISCHARGING; } } static void da9030_battery_check_health(struct da9030_charger *charger, union power_supply_propval *val) { if (charger->fault & DA9030_FAULT_LOG_OVER_TEMP) val->intval = POWER_SUPPLY_HEALTH_OVERHEAT; else if (charger->fault & DA9030_FAULT_LOG_VBAT_OVER) val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE; else val->intval = POWER_SUPPLY_HEALTH_GOOD; } static int da9030_battery_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { struct da9030_charger *charger; charger = container_of(psy, struct da9030_charger, psy); switch (psp) { case POWER_SUPPLY_PROP_STATUS: da9030_battery_check_status(charger, val); break; case POWER_SUPPLY_PROP_HEALTH: da9030_battery_check_health(charger, val); break; case POWER_SUPPLY_PROP_TECHNOLOGY: val->intval = charger->battery_info->technology; break; case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN: val->intval = charger->battery_info->voltage_max_design; break; case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN: val->intval = charger->battery_info->voltage_min_design; break; case POWER_SUPPLY_PROP_VOLTAGE_NOW: val->intval = da9030_reg_to_mV(charger->adc.vbat_res) * 1000; break; case POWER_SUPPLY_PROP_CURRENT_AVG: val->intval = da9030_reg_to_mA(charger->adc.ichaverage_res) * 1000; break; case POWER_SUPPLY_PROP_MODEL_NAME: val->strval = charger->battery_info->name; break; default: break; } return 0; } static void da9030_battery_vbat_event(struct da9030_charger *charger) { da9030_read_adc(charger, &charger->adc); if (charger->is_on) return; if (charger->adc.vbat_res < charger->thresholds.vbat_low) { /* set VBAT threshold for critical */ da903x_write(charger->master, DA9030_VBATMON, charger->thresholds.vbat_crit); if (charger->battery_low) charger->battery_low(); } else if (charger->adc.vbat_res < charger->thresholds.vbat_crit) { /* notify the system of battery critical */ if (charger->battery_critical) charger->battery_critical(); } } static int da9030_battery_event(struct notifier_block *nb, unsigned long event, void *data) { struct da9030_charger *charger = container_of(nb, struct da9030_charger, nb); switch (event) { case DA9030_EVENT_CHDET: cancel_delayed_work_sync(&charger->work); schedule_work(&charger->work.work); break; case DA9030_EVENT_VBATMON: da9030_battery_vbat_event(charger); break; case DA9030_EVENT_CHIOVER: case DA9030_EVENT_TBAT: da9030_set_charge(charger, 0); break; } return 0; } static void da9030_battery_convert_thresholds(struct da9030_charger *charger, struct da9030_battery_info *pdata) { charger->thresholds.tbat_low = pdata->tbat_low; charger->thresholds.tbat_high = pdata->tbat_high; charger->thresholds.tbat_restart = pdata->tbat_restart; charger->thresholds.vbat_low = da9030_millivolt_to_reg(pdata->vbat_low); charger->thresholds.vbat_crit = da9030_millivolt_to_reg(pdata->vbat_crit); charger->thresholds.vbat_charge_start = da9030_millivolt_to_reg(pdata->vbat_charge_start); charger->thresholds.vbat_charge_stop = da9030_millivolt_to_reg(pdata->vbat_charge_stop); charger->thresholds.vbat_charge_restart = da9030_millivolt_to_reg(pdata->vbat_charge_restart); charger->thresholds.vcharge_min = da9030_millivolt_to_reg(pdata->vcharge_min); charger->thresholds.vcharge_max = da9030_millivolt_to_reg(pdata->vcharge_max); } static void da9030_battery_setup_psy(struct da9030_charger *charger) { struct power_supply *psy = &charger->psy; struct power_supply_info *info = charger->battery_info; psy->name = info->name; psy->use_for_apm = info->use_for_apm; psy->type = POWER_SUPPLY_TYPE_BATTERY; psy->get_property = da9030_battery_get_property; psy->properties = da9030_battery_props; psy->num_properties = ARRAY_SIZE(da9030_battery_props); }; static int da9030_battery_charger_init(struct da9030_charger *charger) { char v[5]; int ret; v[0] = v[1] = charger->thresholds.vbat_low; v[2] = charger->thresholds.tbat_high; v[3] = charger->thresholds.tbat_restart; v[4] = charger->thresholds.tbat_low; ret = da903x_writes(charger->master, DA9030_VBATMON, 5, v); if (ret) return ret; /* * Enable reference voltage supply for ADC from the LDO_INTERNAL * regulator. Must be set before ADC measurements can be made. */ ret = da903x_write(charger->master, DA9030_ADC_MAN_CONTROL, DA9030_ADC_LDO_INT_ENABLE | DA9030_ADC_TBATREF_ENABLE); if (ret) return ret; /* enable auto ADC measuremnts */ return da903x_write(charger->master, DA9030_ADC_AUTO_CONTROL, DA9030_ADC_TBAT_ENABLE | DA9030_ADC_VBAT_IN_TXON | DA9030_ADC_VCH_ENABLE | DA9030_ADC_ICH_ENABLE | DA9030_ADC_VBAT_ENABLE | DA9030_ADC_AUTO_SLEEP_ENABLE); } static int da9030_battery_probe(struct platform_device *pdev) { struct da9030_charger *charger; struct da9030_battery_info *pdata = pdev->dev.platform_data; int ret; if (pdata == NULL) return -EINVAL; if (pdata->charge_milliamp >= 1500 || pdata->charge_millivolt < 4000 || pdata->charge_millivolt > 4350) return -EINVAL; charger = kzalloc(sizeof(*charger), GFP_KERNEL); if (charger == NULL) return -ENOMEM; charger->master = pdev->dev.parent; /* 10 seconds between monitor runs unless platform defines other interval */ charger->interval = msecs_to_jiffies( (pdata->batmon_interval ? : 10) * 1000); charger->charge_milliamp = pdata->charge_milliamp; charger->charge_millivolt = pdata->charge_millivolt; charger->battery_info = pdata->battery_info; charger->battery_low = pdata->battery_low; charger->battery_critical = pdata->battery_critical; da9030_battery_convert_thresholds(charger, pdata); ret = da9030_battery_charger_init(charger); if (ret) goto err_charger_init; INIT_DELAYED_WORK(&charger->work, da9030_charging_monitor); schedule_delayed_work(&charger->work, charger->interval); charger->nb.notifier_call = da9030_battery_event; ret = da903x_register_notifier(charger->master, &charger->nb, DA9030_EVENT_CHDET | DA9030_EVENT_VBATMON | DA9030_EVENT_CHIOVER | DA9030_EVENT_TBAT); if (ret) goto err_notifier; da9030_battery_setup_psy(charger); ret = power_supply_register(&pdev->dev, &charger->psy); if (ret) goto err_ps_register; charger->debug_file = da9030_bat_create_debugfs(charger); platform_set_drvdata(pdev, charger); return 0; err_ps_register: da903x_unregister_notifier(charger->master, &charger->nb, DA9030_EVENT_CHDET | DA9030_EVENT_VBATMON | DA9030_EVENT_CHIOVER | DA9030_EVENT_TBAT); err_notifier: cancel_delayed_work(&charger->work); err_charger_init: kfree(charger); return ret; } static int da9030_battery_remove(struct platform_device *dev) { struct da9030_charger *charger = platform_get_drvdata(dev); da9030_bat_remove_debugfs(charger); da903x_unregister_notifier(charger->master, &charger->nb, DA9030_EVENT_CHDET | DA9030_EVENT_VBATMON | DA9030_EVENT_CHIOVER | DA9030_EVENT_TBAT); cancel_delayed_work_sync(&charger->work); da9030_set_charge(charger, 0); power_supply_unregister(&charger->psy); kfree(charger); return 0; } static struct platform_driver da903x_battery_driver = { .driver = { .name = "da903x-battery", .owner = THIS_MODULE, }, .probe = da9030_battery_probe, .remove = da9030_battery_remove, }; module_platform_driver(da903x_battery_driver); MODULE_DESCRIPTION("DA9030 battery charger driver"); MODULE_AUTHOR("Mike Rapoport, CompuLab"); MODULE_LICENSE("GPL");
gpl-2.0
stupaq/linux-file-tracer
drivers/isdn/i4l/isdn_v110.c
5026
16733
/* $Id: isdn_v110.c,v 1.1.2.2 2004/01/12 22:37:19 keil Exp $ * * Linux ISDN subsystem, V.110 related functions (linklevel). * * Copyright by Thomas Pfeiffer (pfeiffer@pds.de) * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ #include <linux/string.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/delay.h> #include <linux/isdn.h> #include "isdn_v110.h" #undef ISDN_V110_DEBUG char *isdn_v110_revision = "$Revision: 1.1.2.2 $"; #define V110_38400 255 #define V110_19200 15 #define V110_9600 3 /* * The following data are precoded matrices, online and offline matrix * for 9600, 19200 und 38400, respectively */ static unsigned char V110_OnMatrix_9600[] = {0xfc, 0xfc, 0xfc, 0xfc, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xfd, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xfd}; static unsigned char V110_OffMatrix_9600[] = {0xfc, 0xfc, 0xfc, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; static unsigned char V110_OnMatrix_19200[] = {0xf0, 0xf0, 0xff, 0xf7, 0xff, 0xf7, 0xff, 0xf7, 0xff, 0xf7, 0xfd, 0xff, 0xff, 0xf7, 0xff, 0xf7, 0xff, 0xf7, 0xff, 0xf7}; static unsigned char V110_OffMatrix_19200[] = {0xf0, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; static unsigned char V110_OnMatrix_38400[] = {0x00, 0x7f, 0x7f, 0x7f, 0x7f, 0xfd, 0x7f, 0x7f, 0x7f, 0x7f}; static unsigned char V110_OffMatrix_38400[] = {0x00, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff}; /* * FlipBits reorders sequences of keylen bits in one byte. * E.g. source order 7654321 will be converted to 45670123 when keylen = 4, * and to 67452301 when keylen = 2. This is necessary because ordering on * the isdn line is the other way. */ static inline unsigned char FlipBits(unsigned char c, int keylen) { unsigned char b = c; unsigned char bit = 128; int i; int j; int hunks = (8 / keylen); c = 0; for (i = 0; i < hunks; i++) { for (j = 0; j < keylen; j++) { if (b & (bit >> j)) c |= bit >> (keylen - j - 1); } bit >>= keylen; } return c; } /* isdn_v110_open allocates and initializes private V.110 data * structures and returns a pointer to these. */ static isdn_v110_stream * isdn_v110_open(unsigned char key, int hdrlen, int maxsize) { int i; isdn_v110_stream *v; if ((v = kzalloc(sizeof(isdn_v110_stream), GFP_ATOMIC)) == NULL) return NULL; v->key = key; v->nbits = 0; for (i = 0; key & (1 << i); i++) v->nbits++; v->nbytes = 8 / v->nbits; v->decodelen = 0; switch (key) { case V110_38400: v->OnlineFrame = V110_OnMatrix_38400; v->OfflineFrame = V110_OffMatrix_38400; break; case V110_19200: v->OnlineFrame = V110_OnMatrix_19200; v->OfflineFrame = V110_OffMatrix_19200; break; default: v->OnlineFrame = V110_OnMatrix_9600; v->OfflineFrame = V110_OffMatrix_9600; break; } v->framelen = v->nbytes * 10; v->SyncInit = 5; v->introducer = 0; v->dbit = 1; v->b = 0; v->skbres = hdrlen; v->maxsize = maxsize - hdrlen; if ((v->encodebuf = kmalloc(maxsize, GFP_ATOMIC)) == NULL) { kfree(v); return NULL; } return v; } /* isdn_v110_close frees private V.110 data structures */ void isdn_v110_close(isdn_v110_stream * v) { if (v == NULL) return; #ifdef ISDN_V110_DEBUG printk(KERN_DEBUG "v110 close\n"); #endif kfree(v->encodebuf); kfree(v); } /* * ValidHeaderBytes return the number of valid bytes in v->decodebuf */ static int ValidHeaderBytes(isdn_v110_stream * v) { int i; for (i = 0; (i < v->decodelen) && (i < v->nbytes); i++) if ((v->decodebuf[i] & v->key) != 0) break; return i; } /* * SyncHeader moves the decodebuf ptr to the next valid header */ static void SyncHeader(isdn_v110_stream * v) { unsigned char *rbuf = v->decodebuf; int len = v->decodelen; if (len == 0) return; for (rbuf++, len--; len > 0; len--, rbuf++) /* such den SyncHeader in buf ! */ if ((*rbuf & v->key) == 0) /* erstes byte gefunden ? */ break; /* jupp! */ if (len) memcpy(v->decodebuf, rbuf, len); v->decodelen = len; #ifdef ISDN_V110_DEBUG printk(KERN_DEBUG "isdn_v110: Header resync\n"); #endif } /* DecodeMatrix takes n (n>=1) matrices (v110 frames, 10 bytes) where len is the number of matrix-lines. len must be a multiple of 10, i.e. only complete matices must be given. From these, netto data is extracted and returned in buf. The return-value is the bytecount of the decoded data. */ static int DecodeMatrix(isdn_v110_stream * v, unsigned char *m, int len, unsigned char *buf) { int line = 0; int buflen = 0; int mbit = 64; int introducer = v->introducer; int dbit = v->dbit; unsigned char b = v->b; while (line < len) { /* Are we done with all lines of the matrix? */ if ((line % 10) == 0) { /* the 0. line of the matrix is always 0 ! */ if (m[line] != 0x00) { /* not 0 ? -> error! */ #ifdef ISDN_V110_DEBUG printk(KERN_DEBUG "isdn_v110: DecodeMatrix, V110 Bad Header\n"); /* returning now is not the right thing, though :-( */ #endif } line++; /* next line of matrix */ continue; } else if ((line % 10) == 5) { /* in line 5 there's only e-bits ! */ if ((m[line] & 0x70) != 0x30) { /* 011 has to be at the beginning! */ #ifdef ISDN_V110_DEBUG printk(KERN_DEBUG "isdn_v110: DecodeMatrix, V110 Bad 5th line\n"); /* returning now is not the right thing, though :-( */ #endif } line++; /* next line */ continue; } else if (!introducer) { /* every byte starts with 10 (stopbit, startbit) */ introducer = (m[line] & mbit) ? 0 : 1; /* current bit of the matrix */ next_byte: if (mbit > 2) { /* was it the last bit in this line ? */ mbit >>= 1; /* no -> take next */ continue; } /* otherwise start with leftmost bit in the next line */ mbit = 64; line++; continue; } else { /* otherwise we need to set a data bit */ if (m[line] & mbit) /* was that bit set in the matrix ? */ b |= dbit; /* yes -> set it in the data byte */ else b &= dbit - 1; /* no -> clear it in the data byte */ if (dbit < 128) /* is that data byte done ? */ dbit <<= 1; /* no, got the next bit */ else { /* data byte is done */ buf[buflen++] = b; /* copy byte into the output buffer */ introducer = b = 0; /* init of the intro sequence and of the data byte */ dbit = 1; /* next we look for the 0th bit */ } goto next_byte; /* look for next bit in the matrix */ } } v->introducer = introducer; v->dbit = dbit; v->b = b; return buflen; /* return number of bytes in the output buffer */ } /* * DecodeStream receives V.110 coded data from the input stream. It recovers the * original frames. * The input stream doesn't need to be framed */ struct sk_buff * isdn_v110_decode(isdn_v110_stream * v, struct sk_buff *skb) { int i; int j; int len; unsigned char *v110_buf; unsigned char *rbuf; if (!skb) { printk(KERN_WARNING "isdn_v110_decode called with NULL skb!\n"); return NULL; } rbuf = skb->data; len = skb->len; if (v == NULL) { /* invalid handle, no chance to proceed */ printk(KERN_WARNING "isdn_v110_decode called with NULL stream!\n"); dev_kfree_skb(skb); return NULL; } if (v->decodelen == 0) /* cache empty? */ for (; len > 0; len--, rbuf++) /* scan for SyncHeader in buf */ if ((*rbuf & v->key) == 0) break; /* found first byte */ if (len == 0) { dev_kfree_skb(skb); return NULL; } /* copy new data to decode-buffer */ memcpy(&(v->decodebuf[v->decodelen]), rbuf, len); v->decodelen += len; ReSync: if (v->decodelen < v->nbytes) { /* got a new header ? */ dev_kfree_skb(skb); return NULL; /* no, try later */ } if (ValidHeaderBytes(v) != v->nbytes) { /* is that a valid header? */ SyncHeader(v); /* no -> look for header */ goto ReSync; } len = (v->decodelen - (v->decodelen % (10 * v->nbytes))) / v->nbytes; if ((v110_buf = kmalloc(len, GFP_ATOMIC)) == NULL) { printk(KERN_WARNING "isdn_v110_decode: Couldn't allocate v110_buf\n"); dev_kfree_skb(skb); return NULL; } for (i = 0; i < len; i++) { v110_buf[i] = 0; for (j = 0; j < v->nbytes; j++) v110_buf[i] |= (v->decodebuf[(i * v->nbytes) + j] & v->key) << (8 - ((j + 1) * v->nbits)); v110_buf[i] = FlipBits(v110_buf[i], v->nbits); } v->decodelen = (v->decodelen % (10 * v->nbytes)); memcpy(v->decodebuf, &(v->decodebuf[len * v->nbytes]), v->decodelen); skb_trim(skb, DecodeMatrix(v, v110_buf, len, skb->data)); kfree(v110_buf); if (skb->len) return skb; else { kfree_skb(skb); return NULL; } } /* EncodeMatrix takes input data in buf, len is the bytecount. Data is encoded into v110 frames in m. Return value is the number of matrix-lines generated. */ static int EncodeMatrix(unsigned char *buf, int len, unsigned char *m, int mlen) { int line = 0; int i = 0; int mbit = 128; int dbit = 1; int introducer = 3; int ibit[] = {0, 1, 1}; while ((i < len) && (line < mlen)) { /* while we still have input data */ switch (line % 10) { /* in which line of the matrix are we? */ case 0: m[line++] = 0x00; /* line 0 is always 0 */ mbit = 128; /* go on with the 7th bit */ break; case 5: m[line++] = 0xbf; /* line 5 is always 10111111 */ mbit = 128; /* go on with the 7th bit */ break; } if (line >= mlen) { printk(KERN_WARNING "isdn_v110 (EncodeMatrix): buffer full!\n"); return line; } next_bit: switch (mbit) { /* leftmost or rightmost bit ? */ case 1: line++; /* rightmost -> go to next line */ if (line >= mlen) { printk(KERN_WARNING "isdn_v110 (EncodeMatrix): buffer full!\n"); return line; } case 128: m[line] = 128; /* leftmost -> set byte to 1000000 */ mbit = 64; /* current bit in the matrix line */ continue; } if (introducer) { /* set 110 sequence ? */ introducer--; /* set on digit less */ m[line] |= ibit[introducer] ? mbit : 0; /* set corresponding bit */ mbit >>= 1; /* bit of matrix line >> 1 */ goto next_bit; /* and go on there */ } /* else push data bits into the matrix! */ m[line] |= (buf[i] & dbit) ? mbit : 0; /* set data bit in matrix */ if (dbit == 128) { /* was it the last one? */ dbit = 1; /* then go on with first bit of */ i++; /* next byte in input buffer */ if (i < len) /* input buffer done ? */ introducer = 3; /* no, write introducer 110 */ else { /* input buffer done ! */ m[line] |= (mbit - 1) & 0xfe; /* set remaining bits in line to 1 */ break; } } else /* not the last data bit */ dbit <<= 1; /* then go to next data bit */ mbit >>= 1; /* go to next bit of matrix */ goto next_bit; } /* if necessary, generate remaining lines of the matrix... */ if ((line) && ((line + 10) < mlen)) switch (++line % 10) { case 1: m[line++] = 0xfe; case 2: m[line++] = 0xfe; case 3: m[line++] = 0xfe; case 4: m[line++] = 0xfe; case 5: m[line++] = 0xbf; case 6: m[line++] = 0xfe; case 7: m[line++] = 0xfe; case 8: m[line++] = 0xfe; case 9: m[line++] = 0xfe; } return line; /* that's how many lines we have */ } /* * Build a sync frame. */ static struct sk_buff * isdn_v110_sync(isdn_v110_stream *v) { struct sk_buff *skb; if (v == NULL) { /* invalid handle, no chance to proceed */ printk(KERN_WARNING "isdn_v110_sync called with NULL stream!\n"); return NULL; } if ((skb = dev_alloc_skb(v->framelen + v->skbres))) { skb_reserve(skb, v->skbres); memcpy(skb_put(skb, v->framelen), v->OfflineFrame, v->framelen); } return skb; } /* * Build an idle frame. */ static struct sk_buff * isdn_v110_idle(isdn_v110_stream *v) { struct sk_buff *skb; if (v == NULL) { /* invalid handle, no chance to proceed */ printk(KERN_WARNING "isdn_v110_sync called with NULL stream!\n"); return NULL; } if ((skb = dev_alloc_skb(v->framelen + v->skbres))) { skb_reserve(skb, v->skbres); memcpy(skb_put(skb, v->framelen), v->OnlineFrame, v->framelen); } return skb; } struct sk_buff * isdn_v110_encode(isdn_v110_stream * v, struct sk_buff *skb) { int i; int j; int rlen; int mlen; int olen; int size; int sval1; int sval2; int nframes; unsigned char *v110buf; unsigned char *rbuf; struct sk_buff *nskb; if (v == NULL) { /* invalid handle, no chance to proceed */ printk(KERN_WARNING "isdn_v110_encode called with NULL stream!\n"); return NULL; } if (!skb) { /* invalid skb, no chance to proceed */ printk(KERN_WARNING "isdn_v110_encode called with NULL skb!\n"); return NULL; } rlen = skb->len; nframes = (rlen + 3) / 4; v110buf = v->encodebuf; if ((nframes * 40) > v->maxsize) { size = v->maxsize; rlen = v->maxsize / 40; } else size = nframes * 40; if (!(nskb = dev_alloc_skb(size + v->skbres + sizeof(int)))) { printk(KERN_WARNING "isdn_v110_encode: Couldn't alloc skb\n"); return NULL; } skb_reserve(nskb, v->skbres + sizeof(int)); if (skb->len == 0) { memcpy(skb_put(nskb, v->framelen), v->OnlineFrame, v->framelen); *((int *)skb_push(nskb, sizeof(int))) = 0; return nskb; } mlen = EncodeMatrix(skb->data, rlen, v110buf, size); /* now distribute 2 or 4 bits each to the output stream! */ rbuf = skb_put(nskb, size); olen = 0; sval1 = 8 - v->nbits; sval2 = v->key << sval1; for (i = 0; i < mlen; i++) { v110buf[i] = FlipBits(v110buf[i], v->nbits); for (j = 0; j < v->nbytes; j++) { if (size--) *rbuf++ = ~v->key | (((v110buf[i] << (j * v->nbits)) & sval2) >> sval1); else { printk(KERN_WARNING "isdn_v110_encode: buffers full!\n"); goto buffer_full; } olen++; } } buffer_full: skb_trim(nskb, olen); *((int *)skb_push(nskb, sizeof(int))) = rlen; return nskb; } int isdn_v110_stat_callback(int idx, isdn_ctrl *c) { isdn_v110_stream *v = NULL; int i; int ret = 0; if (idx < 0) return 0; switch (c->command) { case ISDN_STAT_BSENT: /* Keep the send-queue of the driver filled * with frames: * If number of outstanding frames < 3, * send down an Idle-Frame (or an Sync-Frame, if * v->SyncInit != 0). */ if (!(v = dev->v110[idx])) return 0; atomic_inc(&dev->v110use[idx]); for (i=0; i * v->framelen < c->parm.length; i++) { if (v->skbidle > 0) { v->skbidle--; ret = 1; } else { if (v->skbuser > 0) v->skbuser--; ret = 0; } } for (i = v->skbuser + v->skbidle; i < 2; i++) { struct sk_buff *skb; if (v->SyncInit > 0) skb = isdn_v110_sync(v); else skb = isdn_v110_idle(v); if (skb) { if (dev->drv[c->driver]->interface->writebuf_skb(c->driver, c->arg, 1, skb) <= 0) { dev_kfree_skb(skb); break; } else { if (v->SyncInit) v->SyncInit--; v->skbidle++; } } else break; } atomic_dec(&dev->v110use[idx]); return ret; case ISDN_STAT_DHUP: case ISDN_STAT_BHUP: while (1) { atomic_inc(&dev->v110use[idx]); if (atomic_dec_and_test(&dev->v110use[idx])) { isdn_v110_close(dev->v110[idx]); dev->v110[idx] = NULL; break; } mdelay(1); } break; case ISDN_STAT_BCONN: if (dev->v110emu[idx] && (dev->v110[idx] == NULL)) { int hdrlen = dev->drv[c->driver]->interface->hl_hdrlen; int maxsize = dev->drv[c->driver]->interface->maxbufsize; atomic_inc(&dev->v110use[idx]); switch (dev->v110emu[idx]) { case ISDN_PROTO_L2_V11096: dev->v110[idx] = isdn_v110_open(V110_9600, hdrlen, maxsize); break; case ISDN_PROTO_L2_V11019: dev->v110[idx] = isdn_v110_open(V110_19200, hdrlen, maxsize); break; case ISDN_PROTO_L2_V11038: dev->v110[idx] = isdn_v110_open(V110_38400, hdrlen, maxsize); break; default:; } if ((v = dev->v110[idx])) { while (v->SyncInit) { struct sk_buff *skb = isdn_v110_sync(v); if (dev->drv[c->driver]->interface->writebuf_skb(c->driver, c->arg, 1, skb) <= 0) { dev_kfree_skb(skb); /* Unable to send, try later */ break; } v->SyncInit--; v->skbidle++; } } else printk(KERN_WARNING "isdn_v110: Couldn't open stream for chan %d\n", idx); atomic_dec(&dev->v110use[idx]); } break; default: return 0; } return 0; }
gpl-2.0
byeonggonlee/lynx-jb
drivers/scsi/aic94xx/aic94xx_seq.c
8098
47441
/* * Aic94xx SAS/SATA driver sequencer interface. * * Copyright (C) 2005 Adaptec, Inc. All rights reserved. * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> * * Parts of this code adapted from David Chaw's adp94xx_seq.c. * * This file is licensed under GPLv2. * * This file is part of the aic94xx driver. * * The aic94xx driver is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; version 2 of the * License. * * The aic94xx driver is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with the aic94xx driver; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #include <linux/delay.h> #include <linux/gfp.h> #include <linux/pci.h> #include <linux/module.h> #include <linux/firmware.h> #include "aic94xx_reg.h" #include "aic94xx_hwi.h" #include "aic94xx_seq.h" #include "aic94xx_dump.h" /* It takes no more than 0.05 us for an instruction * to complete. So waiting for 1 us should be more than * plenty. */ #define PAUSE_DELAY 1 #define PAUSE_TRIES 1000 static const struct firmware *sequencer_fw; static u16 cseq_vecs[CSEQ_NUM_VECS], lseq_vecs[LSEQ_NUM_VECS], mode2_task, cseq_idle_loop, lseq_idle_loop; static const u8 *cseq_code, *lseq_code; static u32 cseq_code_size, lseq_code_size; static u16 first_scb_site_no = 0xFFFF; static u16 last_scb_site_no; /* ---------- Pause/Unpause CSEQ/LSEQ ---------- */ /** * asd_pause_cseq - pause the central sequencer * @asd_ha: pointer to host adapter structure * * Return 0 on success, negative on failure. */ static int asd_pause_cseq(struct asd_ha_struct *asd_ha) { int count = PAUSE_TRIES; u32 arp2ctl; arp2ctl = asd_read_reg_dword(asd_ha, CARP2CTL); if (arp2ctl & PAUSED) return 0; asd_write_reg_dword(asd_ha, CARP2CTL, arp2ctl | EPAUSE); do { arp2ctl = asd_read_reg_dword(asd_ha, CARP2CTL); if (arp2ctl & PAUSED) return 0; udelay(PAUSE_DELAY); } while (--count > 0); ASD_DPRINTK("couldn't pause CSEQ\n"); return -1; } /** * asd_unpause_cseq - unpause the central sequencer. * @asd_ha: pointer to host adapter structure. * * Return 0 on success, negative on error. */ static int asd_unpause_cseq(struct asd_ha_struct *asd_ha) { u32 arp2ctl; int count = PAUSE_TRIES; arp2ctl = asd_read_reg_dword(asd_ha, CARP2CTL); if (!(arp2ctl & PAUSED)) return 0; asd_write_reg_dword(asd_ha, CARP2CTL, arp2ctl & ~EPAUSE); do { arp2ctl = asd_read_reg_dword(asd_ha, CARP2CTL); if (!(arp2ctl & PAUSED)) return 0; udelay(PAUSE_DELAY); } while (--count > 0); ASD_DPRINTK("couldn't unpause the CSEQ\n"); return -1; } /** * asd_seq_pause_lseq - pause a link sequencer * @asd_ha: pointer to a host adapter structure * @lseq: link sequencer of interest * * Return 0 on success, negative on error. */ static int asd_seq_pause_lseq(struct asd_ha_struct *asd_ha, int lseq) { u32 arp2ctl; int count = PAUSE_TRIES; arp2ctl = asd_read_reg_dword(asd_ha, LmARP2CTL(lseq)); if (arp2ctl & PAUSED) return 0; asd_write_reg_dword(asd_ha, LmARP2CTL(lseq), arp2ctl | EPAUSE); do { arp2ctl = asd_read_reg_dword(asd_ha, LmARP2CTL(lseq)); if (arp2ctl & PAUSED) return 0; udelay(PAUSE_DELAY); } while (--count > 0); ASD_DPRINTK("couldn't pause LSEQ %d\n", lseq); return -1; } /** * asd_pause_lseq - pause the link sequencer(s) * @asd_ha: pointer to host adapter structure * @lseq_mask: mask of link sequencers of interest * * Return 0 on success, negative on failure. */ static int asd_pause_lseq(struct asd_ha_struct *asd_ha, u8 lseq_mask) { int lseq; int err = 0; for_each_sequencer(lseq_mask, lseq_mask, lseq) { err = asd_seq_pause_lseq(asd_ha, lseq); if (err) return err; } return err; } /** * asd_seq_unpause_lseq - unpause a link sequencer * @asd_ha: pointer to host adapter structure * @lseq: link sequencer of interest * * Return 0 on success, negative on error. */ static int asd_seq_unpause_lseq(struct asd_ha_struct *asd_ha, int lseq) { u32 arp2ctl; int count = PAUSE_TRIES; arp2ctl = asd_read_reg_dword(asd_ha, LmARP2CTL(lseq)); if (!(arp2ctl & PAUSED)) return 0; asd_write_reg_dword(asd_ha, LmARP2CTL(lseq), arp2ctl & ~EPAUSE); do { arp2ctl = asd_read_reg_dword(asd_ha, LmARP2CTL(lseq)); if (!(arp2ctl & PAUSED)) return 0; udelay(PAUSE_DELAY); } while (--count > 0); ASD_DPRINTK("couldn't unpause LSEQ %d\n", lseq); return 0; } /* ---------- Downloading CSEQ/LSEQ microcode ---------- */ static int asd_verify_cseq(struct asd_ha_struct *asd_ha, const u8 *_prog, u32 size) { u32 addr = CSEQ_RAM_REG_BASE_ADR; const u32 *prog = (u32 *) _prog; u32 i; for (i = 0; i < size; i += 4, prog++, addr += 4) { u32 val = asd_read_reg_dword(asd_ha, addr); if (le32_to_cpu(*prog) != val) { asd_printk("%s: cseq verify failed at %u " "read:0x%x, wanted:0x%x\n", pci_name(asd_ha->pcidev), i, val, le32_to_cpu(*prog)); return -1; } } ASD_DPRINTK("verified %d bytes, passed\n", size); return 0; } /** * asd_verify_lseq - verify the microcode of a link sequencer * @asd_ha: pointer to host adapter structure * @_prog: pointer to the microcode * @size: size of the microcode in bytes * @lseq: link sequencer of interest * * The link sequencer code is accessed in 4 KB pages, which are selected * by setting LmRAMPAGE (bits 8 and 9) of the LmBISTCTL1 register. * The 10 KB LSEQm instruction code is mapped, page at a time, at * LmSEQRAM address. */ static int asd_verify_lseq(struct asd_ha_struct *asd_ha, const u8 *_prog, u32 size, int lseq) { #define LSEQ_CODEPAGE_SIZE 4096 int pages = (size + LSEQ_CODEPAGE_SIZE - 1) / LSEQ_CODEPAGE_SIZE; u32 page; const u32 *prog = (u32 *) _prog; for (page = 0; page < pages; page++) { u32 i; asd_write_reg_dword(asd_ha, LmBISTCTL1(lseq), page << LmRAMPAGE_LSHIFT); for (i = 0; size > 0 && i < LSEQ_CODEPAGE_SIZE; i += 4, prog++, size-=4) { u32 val = asd_read_reg_dword(asd_ha, LmSEQRAM(lseq)+i); if (le32_to_cpu(*prog) != val) { asd_printk("%s: LSEQ%d verify failed " "page:%d, offs:%d\n", pci_name(asd_ha->pcidev), lseq, page, i); return -1; } } } ASD_DPRINTK("LSEQ%d verified %d bytes, passed\n", lseq, (int)((u8 *)prog-_prog)); return 0; } /** * asd_verify_seq -- verify CSEQ/LSEQ microcode * @asd_ha: pointer to host adapter structure * @prog: pointer to microcode * @size: size of the microcode * @lseq_mask: if 0, verify CSEQ microcode, else mask of LSEQs of interest * * Return 0 if microcode is correct, negative on mismatch. */ static int asd_verify_seq(struct asd_ha_struct *asd_ha, const u8 *prog, u32 size, u8 lseq_mask) { if (lseq_mask == 0) return asd_verify_cseq(asd_ha, prog, size); else { int lseq, err; for_each_sequencer(lseq_mask, lseq_mask, lseq) { err = asd_verify_lseq(asd_ha, prog, size, lseq); if (err) return err; } } return 0; } #define ASD_DMA_MODE_DOWNLOAD #ifdef ASD_DMA_MODE_DOWNLOAD /* This is the size of the CSEQ Mapped instruction page */ #define MAX_DMA_OVLY_COUNT ((1U << 14)-1) static int asd_download_seq(struct asd_ha_struct *asd_ha, const u8 * const prog, u32 size, u8 lseq_mask) { u32 comstaten; u32 reg; int page; const int pages = (size + MAX_DMA_OVLY_COUNT - 1) / MAX_DMA_OVLY_COUNT; struct asd_dma_tok *token; int err = 0; if (size % 4) { asd_printk("sequencer program not multiple of 4\n"); return -1; } asd_pause_cseq(asd_ha); asd_pause_lseq(asd_ha, 0xFF); /* save, disable and clear interrupts */ comstaten = asd_read_reg_dword(asd_ha, COMSTATEN); asd_write_reg_dword(asd_ha, COMSTATEN, 0); asd_write_reg_dword(asd_ha, COMSTAT, COMSTAT_MASK); asd_write_reg_dword(asd_ha, CHIMINTEN, RST_CHIMINTEN); asd_write_reg_dword(asd_ha, CHIMINT, CHIMINT_MASK); token = asd_alloc_coherent(asd_ha, MAX_DMA_OVLY_COUNT, GFP_KERNEL); if (!token) { asd_printk("out of memory for dma SEQ download\n"); err = -ENOMEM; goto out; } ASD_DPRINTK("dma-ing %d bytes\n", size); for (page = 0; page < pages; page++) { int i; u32 left = min(size-page*MAX_DMA_OVLY_COUNT, (u32)MAX_DMA_OVLY_COUNT); memcpy(token->vaddr, prog + page*MAX_DMA_OVLY_COUNT, left); asd_write_reg_addr(asd_ha, OVLYDMAADR, token->dma_handle); asd_write_reg_dword(asd_ha, OVLYDMACNT, left); reg = !page ? RESETOVLYDMA : 0; reg |= (STARTOVLYDMA | OVLYHALTERR); reg |= (lseq_mask ? (((u32)lseq_mask) << 8) : OVLYCSEQ); /* Start DMA. */ asd_write_reg_dword(asd_ha, OVLYDMACTL, reg); for (i = PAUSE_TRIES*100; i > 0; i--) { u32 dmadone = asd_read_reg_dword(asd_ha, OVLYDMACTL); if (!(dmadone & OVLYDMAACT)) break; udelay(PAUSE_DELAY); } } reg = asd_read_reg_dword(asd_ha, COMSTAT); if (!(reg & OVLYDMADONE) || (reg & OVLYERR) || (asd_read_reg_dword(asd_ha, CHIMINT) & DEVEXCEPT_MASK)){ asd_printk("%s: error DMA-ing sequencer code\n", pci_name(asd_ha->pcidev)); err = -ENODEV; } asd_free_coherent(asd_ha, token); out: asd_write_reg_dword(asd_ha, COMSTATEN, comstaten); return err ? : asd_verify_seq(asd_ha, prog, size, lseq_mask); } #else /* ASD_DMA_MODE_DOWNLOAD */ static int asd_download_seq(struct asd_ha_struct *asd_ha, const u8 *_prog, u32 size, u8 lseq_mask) { int i; u32 reg = 0; const u32 *prog = (u32 *) _prog; if (size % 4) { asd_printk("sequencer program not multiple of 4\n"); return -1; } asd_pause_cseq(asd_ha); asd_pause_lseq(asd_ha, 0xFF); reg |= (lseq_mask ? (((u32)lseq_mask) << 8) : OVLYCSEQ); reg |= PIOCMODE; asd_write_reg_dword(asd_ha, OVLYDMACNT, size); asd_write_reg_dword(asd_ha, OVLYDMACTL, reg); ASD_DPRINTK("downloading %s sequencer%s in PIO mode...\n", lseq_mask ? "LSEQ" : "CSEQ", lseq_mask ? "s" : ""); for (i = 0; i < size; i += 4, prog++) asd_write_reg_dword(asd_ha, SPIODATA, *prog); reg = (reg & ~PIOCMODE) | OVLYHALTERR; asd_write_reg_dword(asd_ha, OVLYDMACTL, reg); return asd_verify_seq(asd_ha, _prog, size, lseq_mask); } #endif /* ASD_DMA_MODE_DOWNLOAD */ /** * asd_seq_download_seqs - download the sequencer microcode * @asd_ha: pointer to host adapter structure * * Download the central and link sequencer microcode. */ static int asd_seq_download_seqs(struct asd_ha_struct *asd_ha) { int err; if (!asd_ha->hw_prof.enabled_phys) { asd_printk("%s: no enabled phys!\n", pci_name(asd_ha->pcidev)); return -ENODEV; } /* Download the CSEQ */ ASD_DPRINTK("downloading CSEQ...\n"); err = asd_download_seq(asd_ha, cseq_code, cseq_code_size, 0); if (err) { asd_printk("CSEQ download failed:%d\n", err); return err; } /* Download the Link Sequencers code. All of the Link Sequencers * microcode can be downloaded at the same time. */ ASD_DPRINTK("downloading LSEQs...\n"); err = asd_download_seq(asd_ha, lseq_code, lseq_code_size, asd_ha->hw_prof.enabled_phys); if (err) { /* Try it one at a time */ u8 lseq; u8 lseq_mask = asd_ha->hw_prof.enabled_phys; for_each_sequencer(lseq_mask, lseq_mask, lseq) { err = asd_download_seq(asd_ha, lseq_code, lseq_code_size, 1<<lseq); if (err) break; } } if (err) asd_printk("LSEQs download failed:%d\n", err); return err; } /* ---------- Initializing the chip, chip memory, etc. ---------- */ /** * asd_init_cseq_mip - initialize CSEQ mode independent pages 4-7 * @asd_ha: pointer to host adapter structure */ static void asd_init_cseq_mip(struct asd_ha_struct *asd_ha) { /* CSEQ Mode Independent, page 4 setup. */ asd_write_reg_word(asd_ha, CSEQ_Q_EXE_HEAD, 0xFFFF); asd_write_reg_word(asd_ha, CSEQ_Q_EXE_TAIL, 0xFFFF); asd_write_reg_word(asd_ha, CSEQ_Q_DONE_HEAD, 0xFFFF); asd_write_reg_word(asd_ha, CSEQ_Q_DONE_TAIL, 0xFFFF); asd_write_reg_word(asd_ha, CSEQ_Q_SEND_HEAD, 0xFFFF); asd_write_reg_word(asd_ha, CSEQ_Q_SEND_TAIL, 0xFFFF); asd_write_reg_word(asd_ha, CSEQ_Q_DMA2CHIM_HEAD, 0xFFFF); asd_write_reg_word(asd_ha, CSEQ_Q_DMA2CHIM_TAIL, 0xFFFF); asd_write_reg_word(asd_ha, CSEQ_Q_COPY_HEAD, 0xFFFF); asd_write_reg_word(asd_ha, CSEQ_Q_COPY_TAIL, 0xFFFF); asd_write_reg_word(asd_ha, CSEQ_REG0, 0); asd_write_reg_word(asd_ha, CSEQ_REG1, 0); asd_write_reg_dword(asd_ha, CSEQ_REG2, 0); asd_write_reg_byte(asd_ha, CSEQ_LINK_CTL_Q_MAP, 0); { u8 con = asd_read_reg_byte(asd_ha, CCONEXIST); u8 val = hweight8(con); asd_write_reg_byte(asd_ha, CSEQ_MAX_CSEQ_MODE, (val<<4)|val); } asd_write_reg_word(asd_ha, CSEQ_FREE_LIST_HACK_COUNT, 0); /* CSEQ Mode independent, page 5 setup. */ asd_write_reg_dword(asd_ha, CSEQ_EST_NEXUS_REQ_QUEUE, 0); asd_write_reg_dword(asd_ha, CSEQ_EST_NEXUS_REQ_QUEUE+4, 0); asd_write_reg_dword(asd_ha, CSEQ_EST_NEXUS_REQ_COUNT, 0); asd_write_reg_dword(asd_ha, CSEQ_EST_NEXUS_REQ_COUNT+4, 0); asd_write_reg_word(asd_ha, CSEQ_Q_EST_NEXUS_HEAD, 0xFFFF); asd_write_reg_word(asd_ha, CSEQ_Q_EST_NEXUS_TAIL, 0xFFFF); asd_write_reg_word(asd_ha, CSEQ_NEED_EST_NEXUS_SCB, 0); asd_write_reg_byte(asd_ha, CSEQ_EST_NEXUS_REQ_HEAD, 0); asd_write_reg_byte(asd_ha, CSEQ_EST_NEXUS_REQ_TAIL, 0); asd_write_reg_byte(asd_ha, CSEQ_EST_NEXUS_SCB_OFFSET, 0); /* CSEQ Mode independent, page 6 setup. */ asd_write_reg_word(asd_ha, CSEQ_INT_ROUT_RET_ADDR0, 0); asd_write_reg_word(asd_ha, CSEQ_INT_ROUT_RET_ADDR1, 0); asd_write_reg_word(asd_ha, CSEQ_INT_ROUT_SCBPTR, 0); asd_write_reg_byte(asd_ha, CSEQ_INT_ROUT_MODE, 0); asd_write_reg_byte(asd_ha, CSEQ_ISR_SCRATCH_FLAGS, 0); asd_write_reg_word(asd_ha, CSEQ_ISR_SAVE_SINDEX, 0); asd_write_reg_word(asd_ha, CSEQ_ISR_SAVE_DINDEX, 0); asd_write_reg_word(asd_ha, CSEQ_Q_MONIRTT_HEAD, 0xFFFF); asd_write_reg_word(asd_ha, CSEQ_Q_MONIRTT_TAIL, 0xFFFF); /* Calculate the free scb mask. */ { u16 cmdctx = asd_get_cmdctx_size(asd_ha); cmdctx = (~((cmdctx/128)-1)) >> 8; asd_write_reg_byte(asd_ha, CSEQ_FREE_SCB_MASK, (u8)cmdctx); } asd_write_reg_word(asd_ha, CSEQ_BUILTIN_FREE_SCB_HEAD, first_scb_site_no); asd_write_reg_word(asd_ha, CSEQ_BUILTIN_FREE_SCB_TAIL, last_scb_site_no); asd_write_reg_word(asd_ha, CSEQ_EXTENDED_FREE_SCB_HEAD, 0xFFFF); asd_write_reg_word(asd_ha, CSEQ_EXTENDED_FREE_SCB_TAIL, 0xFFFF); /* CSEQ Mode independent, page 7 setup. */ asd_write_reg_dword(asd_ha, CSEQ_EMPTY_REQ_QUEUE, 0); asd_write_reg_dword(asd_ha, CSEQ_EMPTY_REQ_QUEUE+4, 0); asd_write_reg_dword(asd_ha, CSEQ_EMPTY_REQ_COUNT, 0); asd_write_reg_dword(asd_ha, CSEQ_EMPTY_REQ_COUNT+4, 0); asd_write_reg_word(asd_ha, CSEQ_Q_EMPTY_HEAD, 0xFFFF); asd_write_reg_word(asd_ha, CSEQ_Q_EMPTY_TAIL, 0xFFFF); asd_write_reg_word(asd_ha, CSEQ_NEED_EMPTY_SCB, 0); asd_write_reg_byte(asd_ha, CSEQ_EMPTY_REQ_HEAD, 0); asd_write_reg_byte(asd_ha, CSEQ_EMPTY_REQ_TAIL, 0); asd_write_reg_byte(asd_ha, CSEQ_EMPTY_SCB_OFFSET, 0); asd_write_reg_word(asd_ha, CSEQ_PRIMITIVE_DATA, 0); asd_write_reg_dword(asd_ha, CSEQ_TIMEOUT_CONST, 0); } /** * asd_init_cseq_mdp - initialize CSEQ Mode dependent pages * @asd_ha: pointer to host adapter structure */ static void asd_init_cseq_mdp(struct asd_ha_struct *asd_ha) { int i; int moffs; moffs = CSEQ_PAGE_SIZE * 2; /* CSEQ Mode dependent, modes 0-7, page 0 setup. */ for (i = 0; i < 8; i++) { asd_write_reg_word(asd_ha, i*moffs+CSEQ_LRM_SAVE_SINDEX, 0); asd_write_reg_word(asd_ha, i*moffs+CSEQ_LRM_SAVE_SCBPTR, 0); asd_write_reg_word(asd_ha, i*moffs+CSEQ_Q_LINK_HEAD, 0xFFFF); asd_write_reg_word(asd_ha, i*moffs+CSEQ_Q_LINK_TAIL, 0xFFFF); asd_write_reg_byte(asd_ha, i*moffs+CSEQ_LRM_SAVE_SCRPAGE, 0); } /* CSEQ Mode dependent, mode 0-7, page 1 and 2 shall be ignored. */ /* CSEQ Mode dependent, mode 8, page 0 setup. */ asd_write_reg_word(asd_ha, CSEQ_RET_ADDR, 0xFFFF); asd_write_reg_word(asd_ha, CSEQ_RET_SCBPTR, 0); asd_write_reg_word(asd_ha, CSEQ_SAVE_SCBPTR, 0); asd_write_reg_word(asd_ha, CSEQ_EMPTY_TRANS_CTX, 0); asd_write_reg_word(asd_ha, CSEQ_RESP_LEN, 0); asd_write_reg_word(asd_ha, CSEQ_TMF_SCBPTR, 0); asd_write_reg_word(asd_ha, CSEQ_GLOBAL_PREV_SCB, 0); asd_write_reg_word(asd_ha, CSEQ_GLOBAL_HEAD, 0); asd_write_reg_word(asd_ha, CSEQ_CLEAR_LU_HEAD, 0); asd_write_reg_byte(asd_ha, CSEQ_TMF_OPCODE, 0); asd_write_reg_byte(asd_ha, CSEQ_SCRATCH_FLAGS, 0); asd_write_reg_word(asd_ha, CSEQ_HSB_SITE, 0); asd_write_reg_word(asd_ha, CSEQ_FIRST_INV_SCB_SITE, (u16)last_scb_site_no+1); asd_write_reg_word(asd_ha, CSEQ_FIRST_INV_DDB_SITE, (u16)asd_ha->hw_prof.max_ddbs); /* CSEQ Mode dependent, mode 8, page 1 setup. */ asd_write_reg_dword(asd_ha, CSEQ_LUN_TO_CLEAR, 0); asd_write_reg_dword(asd_ha, CSEQ_LUN_TO_CLEAR + 4, 0); asd_write_reg_dword(asd_ha, CSEQ_LUN_TO_CHECK, 0); asd_write_reg_dword(asd_ha, CSEQ_LUN_TO_CHECK + 4, 0); /* CSEQ Mode dependent, mode 8, page 2 setup. */ /* Tell the sequencer the bus address of the first SCB. */ asd_write_reg_addr(asd_ha, CSEQ_HQ_NEW_POINTER, asd_ha->seq.next_scb.dma_handle); ASD_DPRINTK("First SCB dma_handle: 0x%llx\n", (unsigned long long)asd_ha->seq.next_scb.dma_handle); /* Tell the sequencer the first Done List entry address. */ asd_write_reg_addr(asd_ha, CSEQ_HQ_DONE_BASE, asd_ha->seq.actual_dl->dma_handle); /* Initialize the Q_DONE_POINTER with the least significant * 4 bytes of the first Done List address. */ asd_write_reg_dword(asd_ha, CSEQ_HQ_DONE_POINTER, ASD_BUSADDR_LO(asd_ha->seq.actual_dl->dma_handle)); asd_write_reg_byte(asd_ha, CSEQ_HQ_DONE_PASS, ASD_DEF_DL_TOGGLE); /* CSEQ Mode dependent, mode 8, page 3 shall be ignored. */ } /** * asd_init_cseq_scratch -- setup and init CSEQ * @asd_ha: pointer to host adapter structure * * Setup and initialize Central sequencers. Initialize the mode * independent and dependent scratch page to the default settings. */ static void asd_init_cseq_scratch(struct asd_ha_struct *asd_ha) { asd_init_cseq_mip(asd_ha); asd_init_cseq_mdp(asd_ha); } /** * asd_init_lseq_mip -- initialize LSEQ Mode independent pages 0-3 * @asd_ha: pointer to host adapter structure */ static void asd_init_lseq_mip(struct asd_ha_struct *asd_ha, u8 lseq) { int i; /* LSEQ Mode independent page 0 setup. */ asd_write_reg_word(asd_ha, LmSEQ_Q_TGTXFR_HEAD(lseq), 0xFFFF); asd_write_reg_word(asd_ha, LmSEQ_Q_TGTXFR_TAIL(lseq), 0xFFFF); asd_write_reg_byte(asd_ha, LmSEQ_LINK_NUMBER(lseq), lseq); asd_write_reg_byte(asd_ha, LmSEQ_SCRATCH_FLAGS(lseq), ASD_NOTIFY_ENABLE_SPINUP); asd_write_reg_dword(asd_ha, LmSEQ_CONNECTION_STATE(lseq),0x08000000); asd_write_reg_word(asd_ha, LmSEQ_CONCTL(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_CONSTAT(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_CONNECTION_MODES(lseq), 0); asd_write_reg_word(asd_ha, LmSEQ_REG1_ISR(lseq), 0); asd_write_reg_word(asd_ha, LmSEQ_REG2_ISR(lseq), 0); asd_write_reg_word(asd_ha, LmSEQ_REG3_ISR(lseq), 0); asd_write_reg_dword(asd_ha, LmSEQ_REG0_ISR(lseq), 0); asd_write_reg_dword(asd_ha, LmSEQ_REG0_ISR(lseq)+4, 0); /* LSEQ Mode independent page 1 setup. */ asd_write_reg_word(asd_ha, LmSEQ_EST_NEXUS_SCBPTR0(lseq), 0xFFFF); asd_write_reg_word(asd_ha, LmSEQ_EST_NEXUS_SCBPTR1(lseq), 0xFFFF); asd_write_reg_word(asd_ha, LmSEQ_EST_NEXUS_SCBPTR2(lseq), 0xFFFF); asd_write_reg_word(asd_ha, LmSEQ_EST_NEXUS_SCBPTR3(lseq), 0xFFFF); asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_OPCODE0(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_OPCODE1(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_OPCODE2(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_OPCODE3(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_HEAD(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_TAIL(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_BUF_AVAIL(lseq), 0); asd_write_reg_dword(asd_ha, LmSEQ_TIMEOUT_CONST(lseq), 0); asd_write_reg_word(asd_ha, LmSEQ_ISR_SAVE_SINDEX(lseq), 0); asd_write_reg_word(asd_ha, LmSEQ_ISR_SAVE_DINDEX(lseq), 0); /* LSEQ Mode Independent page 2 setup. */ asd_write_reg_word(asd_ha, LmSEQ_EMPTY_SCB_PTR0(lseq), 0xFFFF); asd_write_reg_word(asd_ha, LmSEQ_EMPTY_SCB_PTR1(lseq), 0xFFFF); asd_write_reg_word(asd_ha, LmSEQ_EMPTY_SCB_PTR2(lseq), 0xFFFF); asd_write_reg_word(asd_ha, LmSEQ_EMPTY_SCB_PTR3(lseq), 0xFFFF); asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_OPCD0(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_OPCD1(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_OPCD2(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_OPCD3(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_HEAD(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_TAIL(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_BUFS_AVAIL(lseq), 0); for (i = 0; i < 12; i += 4) asd_write_reg_dword(asd_ha, LmSEQ_ATA_SCR_REGS(lseq) + i, 0); /* LSEQ Mode Independent page 3 setup. */ /* Device present timer timeout */ asd_write_reg_dword(asd_ha, LmSEQ_DEV_PRES_TMR_TOUT_CONST(lseq), ASD_DEV_PRESENT_TIMEOUT); /* SATA interlock timer disabled */ asd_write_reg_dword(asd_ha, LmSEQ_SATA_INTERLOCK_TIMEOUT(lseq), ASD_SATA_INTERLOCK_TIMEOUT); /* STP shutdown timer timeout constant, IGNORED by the sequencer, * always 0. */ asd_write_reg_dword(asd_ha, LmSEQ_STP_SHUTDOWN_TIMEOUT(lseq), ASD_STP_SHUTDOWN_TIMEOUT); asd_write_reg_dword(asd_ha, LmSEQ_SRST_ASSERT_TIMEOUT(lseq), ASD_SRST_ASSERT_TIMEOUT); asd_write_reg_dword(asd_ha, LmSEQ_RCV_FIS_TIMEOUT(lseq), ASD_RCV_FIS_TIMEOUT); asd_write_reg_dword(asd_ha, LmSEQ_ONE_MILLISEC_TIMEOUT(lseq), ASD_ONE_MILLISEC_TIMEOUT); /* COM_INIT timer */ asd_write_reg_dword(asd_ha, LmSEQ_TEN_MS_COMINIT_TIMEOUT(lseq), ASD_TEN_MILLISEC_TIMEOUT); asd_write_reg_dword(asd_ha, LmSEQ_SMP_RCV_TIMEOUT(lseq), ASD_SMP_RCV_TIMEOUT); } /** * asd_init_lseq_mdp -- initialize LSEQ mode dependent pages. * @asd_ha: pointer to host adapter structure */ static void asd_init_lseq_mdp(struct asd_ha_struct *asd_ha, int lseq) { int i; u32 moffs; u16 ret_addr[] = { 0xFFFF, /* mode 0 */ 0xFFFF, /* mode 1 */ mode2_task, /* mode 2 */ 0, 0xFFFF, /* mode 4/5 */ 0xFFFF, /* mode 4/5 */ }; /* * Mode 0,1,2 and 4/5 have common field on page 0 for the first * 14 bytes. */ for (i = 0; i < 3; i++) { moffs = i * LSEQ_MODE_SCRATCH_SIZE; asd_write_reg_word(asd_ha, LmSEQ_RET_ADDR(lseq)+moffs, ret_addr[i]); asd_write_reg_word(asd_ha, LmSEQ_REG0_MODE(lseq)+moffs, 0); asd_write_reg_word(asd_ha, LmSEQ_MODE_FLAGS(lseq)+moffs, 0); asd_write_reg_word(asd_ha, LmSEQ_RET_ADDR2(lseq)+moffs,0xFFFF); asd_write_reg_word(asd_ha, LmSEQ_RET_ADDR1(lseq)+moffs,0xFFFF); asd_write_reg_byte(asd_ha, LmSEQ_OPCODE_TO_CSEQ(lseq)+moffs,0); asd_write_reg_word(asd_ha, LmSEQ_DATA_TO_CSEQ(lseq)+moffs,0); } /* * Mode 5 page 0 overlaps the same scratch page with Mode 0 page 3. */ asd_write_reg_word(asd_ha, LmSEQ_RET_ADDR(lseq)+LSEQ_MODE5_PAGE0_OFFSET, ret_addr[5]); asd_write_reg_word(asd_ha, LmSEQ_REG0_MODE(lseq)+LSEQ_MODE5_PAGE0_OFFSET,0); asd_write_reg_word(asd_ha, LmSEQ_MODE_FLAGS(lseq)+LSEQ_MODE5_PAGE0_OFFSET, 0); asd_write_reg_word(asd_ha, LmSEQ_RET_ADDR2(lseq)+LSEQ_MODE5_PAGE0_OFFSET,0xFFFF); asd_write_reg_word(asd_ha, LmSEQ_RET_ADDR1(lseq)+LSEQ_MODE5_PAGE0_OFFSET,0xFFFF); asd_write_reg_byte(asd_ha, LmSEQ_OPCODE_TO_CSEQ(lseq)+LSEQ_MODE5_PAGE0_OFFSET,0); asd_write_reg_word(asd_ha, LmSEQ_DATA_TO_CSEQ(lseq)+LSEQ_MODE5_PAGE0_OFFSET, 0); /* LSEQ Mode dependent 0, page 0 setup. */ asd_write_reg_word(asd_ha, LmSEQ_FIRST_INV_DDB_SITE(lseq), (u16)asd_ha->hw_prof.max_ddbs); asd_write_reg_word(asd_ha, LmSEQ_EMPTY_TRANS_CTX(lseq), 0); asd_write_reg_word(asd_ha, LmSEQ_RESP_LEN(lseq), 0); asd_write_reg_word(asd_ha, LmSEQ_FIRST_INV_SCB_SITE(lseq), (u16)last_scb_site_no+1); asd_write_reg_word(asd_ha, LmSEQ_INTEN_SAVE(lseq), (u16) ((LmM0INTEN_MASK & 0xFFFF0000) >> 16)); asd_write_reg_word(asd_ha, LmSEQ_INTEN_SAVE(lseq) + 2, (u16) LmM0INTEN_MASK & 0xFFFF); asd_write_reg_byte(asd_ha, LmSEQ_LINK_RST_FRM_LEN(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_LINK_RST_PROTOCOL(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_RESP_STATUS(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_LAST_LOADED_SGE(lseq), 0); asd_write_reg_word(asd_ha, LmSEQ_SAVE_SCBPTR(lseq), 0); /* LSEQ mode dependent, mode 1, page 0 setup. */ asd_write_reg_word(asd_ha, LmSEQ_Q_XMIT_HEAD(lseq), 0xFFFF); asd_write_reg_word(asd_ha, LmSEQ_M1_EMPTY_TRANS_CTX(lseq), 0); asd_write_reg_word(asd_ha, LmSEQ_INI_CONN_TAG(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_FAILED_OPEN_STATUS(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_XMIT_REQUEST_TYPE(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_M1_RESP_STATUS(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_M1_LAST_LOADED_SGE(lseq), 0); asd_write_reg_word(asd_ha, LmSEQ_M1_SAVE_SCBPTR(lseq), 0); /* LSEQ Mode dependent mode 2, page 0 setup */ asd_write_reg_word(asd_ha, LmSEQ_PORT_COUNTER(lseq), 0); asd_write_reg_word(asd_ha, LmSEQ_PM_TABLE_PTR(lseq), 0); asd_write_reg_word(asd_ha, LmSEQ_SATA_INTERLOCK_TMR_SAVE(lseq), 0); asd_write_reg_word(asd_ha, LmSEQ_IP_BITL(lseq), 0); asd_write_reg_word(asd_ha, LmSEQ_COPY_SMP_CONN_TAG(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_P0M2_OFFS1AH(lseq), 0); /* LSEQ Mode dependent, mode 4/5, page 0 setup. */ asd_write_reg_byte(asd_ha, LmSEQ_SAVED_OOB_STATUS(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_SAVED_OOB_MODE(lseq), 0); asd_write_reg_word(asd_ha, LmSEQ_Q_LINK_HEAD(lseq), 0xFFFF); asd_write_reg_byte(asd_ha, LmSEQ_LINK_RST_ERR(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_SAVED_OOB_SIGNALS(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_SAS_RESET_MODE(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_LINK_RESET_RETRY_COUNT(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_NUM_LINK_RESET_RETRIES(lseq), 0); asd_write_reg_word(asd_ha, LmSEQ_OOB_INT_ENABLES(lseq), 0); /* * Set the desired interval between transmissions of the NOTIFY * (ENABLE SPINUP) primitive. Must be initialized to val - 1. */ asd_write_reg_word(asd_ha, LmSEQ_NOTIFY_TIMER_TIMEOUT(lseq), ASD_NOTIFY_TIMEOUT - 1); /* No delay for the first NOTIFY to be sent to the attached target. */ asd_write_reg_word(asd_ha, LmSEQ_NOTIFY_TIMER_DOWN_COUNT(lseq), ASD_NOTIFY_DOWN_COUNT); asd_write_reg_word(asd_ha, LmSEQ_NOTIFY_TIMER_INITIAL_COUNT(lseq), ASD_NOTIFY_DOWN_COUNT); /* LSEQ Mode dependent, mode 0 and 1, page 1 setup. */ for (i = 0; i < 2; i++) { int j; /* Start from Page 1 of Mode 0 and 1. */ moffs = LSEQ_PAGE_SIZE + i*LSEQ_MODE_SCRATCH_SIZE; /* All the fields of page 1 can be initialized to 0. */ for (j = 0; j < LSEQ_PAGE_SIZE; j += 4) asd_write_reg_dword(asd_ha, LmSCRATCH(lseq)+moffs+j,0); } /* LSEQ Mode dependent, mode 2, page 1 setup. */ asd_write_reg_dword(asd_ha, LmSEQ_INVALID_DWORD_COUNT(lseq), 0); asd_write_reg_dword(asd_ha, LmSEQ_DISPARITY_ERROR_COUNT(lseq), 0); asd_write_reg_dword(asd_ha, LmSEQ_LOSS_OF_SYNC_COUNT(lseq), 0); /* LSEQ Mode dependent, mode 4/5, page 1. */ for (i = 0; i < LSEQ_PAGE_SIZE; i+=4) asd_write_reg_dword(asd_ha, LmSEQ_FRAME_TYPE_MASK(lseq)+i, 0); asd_write_reg_byte(asd_ha, LmSEQ_FRAME_TYPE_MASK(lseq), 0xFF); asd_write_reg_byte(asd_ha, LmSEQ_HASHED_DEST_ADDR_MASK(lseq), 0xFF); asd_write_reg_byte(asd_ha, LmSEQ_HASHED_DEST_ADDR_MASK(lseq)+1,0xFF); asd_write_reg_byte(asd_ha, LmSEQ_HASHED_DEST_ADDR_MASK(lseq)+2,0xFF); asd_write_reg_byte(asd_ha, LmSEQ_HASHED_SRC_ADDR_MASK(lseq), 0xFF); asd_write_reg_byte(asd_ha, LmSEQ_HASHED_SRC_ADDR_MASK(lseq)+1, 0xFF); asd_write_reg_byte(asd_ha, LmSEQ_HASHED_SRC_ADDR_MASK(lseq)+2, 0xFF); asd_write_reg_dword(asd_ha, LmSEQ_DATA_OFFSET(lseq), 0xFFFFFFFF); /* LSEQ Mode dependent, mode 0, page 2 setup. */ asd_write_reg_dword(asd_ha, LmSEQ_SMP_RCV_TIMER_TERM_TS(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_DEVICE_BITS(lseq), 0); asd_write_reg_word(asd_ha, LmSEQ_SDB_DDB(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_SDB_NUM_TAGS(lseq), 0); asd_write_reg_byte(asd_ha, LmSEQ_SDB_CURR_TAG(lseq), 0); /* LSEQ Mode Dependent 1, page 2 setup. */ asd_write_reg_dword(asd_ha, LmSEQ_TX_ID_ADDR_FRAME(lseq), 0); asd_write_reg_dword(asd_ha, LmSEQ_TX_ID_ADDR_FRAME(lseq)+4, 0); asd_write_reg_dword(asd_ha, LmSEQ_OPEN_TIMER_TERM_TS(lseq), 0); asd_write_reg_dword(asd_ha, LmSEQ_SRST_AS_TIMER_TERM_TS(lseq), 0); asd_write_reg_dword(asd_ha, LmSEQ_LAST_LOADED_SG_EL(lseq), 0); /* LSEQ Mode Dependent 2, page 2 setup. */ /* The LmSEQ_STP_SHUTDOWN_TIMER_TERM_TS is IGNORED by the sequencer, * i.e. always 0. */ asd_write_reg_dword(asd_ha, LmSEQ_STP_SHUTDOWN_TIMER_TERM_TS(lseq),0); asd_write_reg_dword(asd_ha, LmSEQ_CLOSE_TIMER_TERM_TS(lseq), 0); asd_write_reg_dword(asd_ha, LmSEQ_BREAK_TIMER_TERM_TS(lseq), 0); asd_write_reg_dword(asd_ha, LmSEQ_DWS_RESET_TIMER_TERM_TS(lseq), 0); asd_write_reg_dword(asd_ha,LmSEQ_SATA_INTERLOCK_TIMER_TERM_TS(lseq),0); asd_write_reg_dword(asd_ha, LmSEQ_MCTL_TIMER_TERM_TS(lseq), 0); /* LSEQ Mode Dependent 4/5, page 2 setup. */ asd_write_reg_dword(asd_ha, LmSEQ_COMINIT_TIMER_TERM_TS(lseq), 0); asd_write_reg_dword(asd_ha, LmSEQ_RCV_ID_TIMER_TERM_TS(lseq), 0); asd_write_reg_dword(asd_ha, LmSEQ_RCV_FIS_TIMER_TERM_TS(lseq), 0); asd_write_reg_dword(asd_ha, LmSEQ_DEV_PRES_TIMER_TERM_TS(lseq), 0); } /** * asd_init_lseq_scratch -- setup and init link sequencers * @asd_ha: pointer to host adapter struct */ static void asd_init_lseq_scratch(struct asd_ha_struct *asd_ha) { u8 lseq; u8 lseq_mask; lseq_mask = asd_ha->hw_prof.enabled_phys; for_each_sequencer(lseq_mask, lseq_mask, lseq) { asd_init_lseq_mip(asd_ha, lseq); asd_init_lseq_mdp(asd_ha, lseq); } } /** * asd_init_scb_sites -- initialize sequencer SCB sites (memory). * @asd_ha: pointer to host adapter structure * * This should be done before initializing common CSEQ and LSEQ * scratch since those areas depend on some computed values here, * last_scb_site_no, etc. */ static void asd_init_scb_sites(struct asd_ha_struct *asd_ha) { u16 site_no; u16 max_scbs = 0; for (site_no = asd_ha->hw_prof.max_scbs-1; site_no != (u16) -1; site_no--) { u16 i; /* Initialize all fields in the SCB site to 0. */ for (i = 0; i < ASD_SCB_SIZE; i += 4) asd_scbsite_write_dword(asd_ha, site_no, i, 0); /* Initialize SCB Site Opcode field to invalid. */ asd_scbsite_write_byte(asd_ha, site_no, offsetof(struct scb_header, opcode), 0xFF); /* Initialize SCB Site Flags field to mean a response * frame has been received. This means inadvertent * frames received to be dropped. */ asd_scbsite_write_byte(asd_ha, site_no, 0x49, 0x01); /* Workaround needed by SEQ to fix a SATA issue is to exclude * certain SCB sites from the free list. */ if (!SCB_SITE_VALID(site_no)) continue; if (last_scb_site_no == 0) last_scb_site_no = site_no; /* For every SCB site, we need to initialize the * following fields: Q_NEXT, SCB_OPCODE, SCB_FLAGS, * and SG Element Flag. */ /* Q_NEXT field of the last SCB is invalidated. */ asd_scbsite_write_word(asd_ha, site_no, 0, first_scb_site_no); first_scb_site_no = site_no; max_scbs++; } asd_ha->hw_prof.max_scbs = max_scbs; ASD_DPRINTK("max_scbs:%d\n", asd_ha->hw_prof.max_scbs); ASD_DPRINTK("first_scb_site_no:0x%x\n", first_scb_site_no); ASD_DPRINTK("last_scb_site_no:0x%x\n", last_scb_site_no); } /** * asd_init_cseq_cio - initialize CSEQ CIO registers * @asd_ha: pointer to host adapter structure */ static void asd_init_cseq_cio(struct asd_ha_struct *asd_ha) { int i; asd_write_reg_byte(asd_ha, CSEQCOMINTEN, 0); asd_write_reg_byte(asd_ha, CSEQDLCTL, ASD_DL_SIZE_BITS); asd_write_reg_byte(asd_ha, CSEQDLOFFS, 0); asd_write_reg_byte(asd_ha, CSEQDLOFFS+1, 0); asd_ha->seq.scbpro = 0; asd_write_reg_dword(asd_ha, SCBPRO, 0); asd_write_reg_dword(asd_ha, CSEQCON, 0); /* Initialize CSEQ Mode 11 Interrupt Vectors. * The addresses are 16 bit wide and in dword units. * The values of their macros are in byte units. * Thus we have to divide by 4. */ asd_write_reg_word(asd_ha, CM11INTVEC0, cseq_vecs[0]); asd_write_reg_word(asd_ha, CM11INTVEC1, cseq_vecs[1]); asd_write_reg_word(asd_ha, CM11INTVEC2, cseq_vecs[2]); /* Enable ARP2HALTC (ARP2 Halted from Halt Code Write). */ asd_write_reg_byte(asd_ha, CARP2INTEN, EN_ARP2HALTC); /* Initialize CSEQ Scratch Page to 0x04. */ asd_write_reg_byte(asd_ha, CSCRATCHPAGE, 0x04); /* Initialize CSEQ Mode[0-8] Dependent registers. */ /* Initialize Scratch Page to 0. */ for (i = 0; i < 9; i++) asd_write_reg_byte(asd_ha, CMnSCRATCHPAGE(i), 0); /* Reset the ARP2 Program Count. */ asd_write_reg_word(asd_ha, CPRGMCNT, cseq_idle_loop); for (i = 0; i < 8; i++) { /* Initialize Mode n Link m Interrupt Enable. */ asd_write_reg_dword(asd_ha, CMnINTEN(i), EN_CMnRSPMBXF); /* Initialize Mode n Request Mailbox. */ asd_write_reg_dword(asd_ha, CMnREQMBX(i), 0); } } /** * asd_init_lseq_cio -- initialize LmSEQ CIO registers * @asd_ha: pointer to host adapter structure */ static void asd_init_lseq_cio(struct asd_ha_struct *asd_ha, int lseq) { u8 *sas_addr; int i; /* Enable ARP2HALTC (ARP2 Halted from Halt Code Write). */ asd_write_reg_dword(asd_ha, LmARP2INTEN(lseq), EN_ARP2HALTC); asd_write_reg_byte(asd_ha, LmSCRATCHPAGE(lseq), 0); /* Initialize Mode 0,1, and 2 SCRATCHPAGE to 0. */ for (i = 0; i < 3; i++) asd_write_reg_byte(asd_ha, LmMnSCRATCHPAGE(lseq, i), 0); /* Initialize Mode 5 SCRATCHPAGE to 0. */ asd_write_reg_byte(asd_ha, LmMnSCRATCHPAGE(lseq, 5), 0); asd_write_reg_dword(asd_ha, LmRSPMBX(lseq), 0); /* Initialize Mode 0,1,2 and 5 Interrupt Enable and * Interrupt registers. */ asd_write_reg_dword(asd_ha, LmMnINTEN(lseq, 0), LmM0INTEN_MASK); asd_write_reg_dword(asd_ha, LmMnINT(lseq, 0), 0xFFFFFFFF); /* Mode 1 */ asd_write_reg_dword(asd_ha, LmMnINTEN(lseq, 1), LmM1INTEN_MASK); asd_write_reg_dword(asd_ha, LmMnINT(lseq, 1), 0xFFFFFFFF); /* Mode 2 */ asd_write_reg_dword(asd_ha, LmMnINTEN(lseq, 2), LmM2INTEN_MASK); asd_write_reg_dword(asd_ha, LmMnINT(lseq, 2), 0xFFFFFFFF); /* Mode 5 */ asd_write_reg_dword(asd_ha, LmMnINTEN(lseq, 5), LmM5INTEN_MASK); asd_write_reg_dword(asd_ha, LmMnINT(lseq, 5), 0xFFFFFFFF); /* Enable HW Timer status. */ asd_write_reg_byte(asd_ha, LmHWTSTATEN(lseq), LmHWTSTATEN_MASK); /* Enable Primitive Status 0 and 1. */ asd_write_reg_dword(asd_ha, LmPRIMSTAT0EN(lseq), LmPRIMSTAT0EN_MASK); asd_write_reg_dword(asd_ha, LmPRIMSTAT1EN(lseq), LmPRIMSTAT1EN_MASK); /* Enable Frame Error. */ asd_write_reg_dword(asd_ha, LmFRMERREN(lseq), LmFRMERREN_MASK); asd_write_reg_byte(asd_ha, LmMnHOLDLVL(lseq, 0), 0x50); /* Initialize Mode 0 Transfer Level to 512. */ asd_write_reg_byte(asd_ha, LmMnXFRLVL(lseq, 0), LmMnXFRLVL_512); /* Initialize Mode 1 Transfer Level to 256. */ asd_write_reg_byte(asd_ha, LmMnXFRLVL(lseq, 1), LmMnXFRLVL_256); /* Initialize Program Count. */ asd_write_reg_word(asd_ha, LmPRGMCNT(lseq), lseq_idle_loop); /* Enable Blind SG Move. */ asd_write_reg_dword(asd_ha, LmMODECTL(lseq), LmBLIND48); asd_write_reg_word(asd_ha, LmM3SATATIMER(lseq), ASD_SATA_INTERLOCK_TIMEOUT); (void) asd_read_reg_dword(asd_ha, LmREQMBX(lseq)); /* Clear Primitive Status 0 and 1. */ asd_write_reg_dword(asd_ha, LmPRMSTAT0(lseq), 0xFFFFFFFF); asd_write_reg_dword(asd_ha, LmPRMSTAT1(lseq), 0xFFFFFFFF); /* Clear HW Timer status. */ asd_write_reg_byte(asd_ha, LmHWTSTAT(lseq), 0xFF); /* Clear DMA Errors for Mode 0 and 1. */ asd_write_reg_byte(asd_ha, LmMnDMAERRS(lseq, 0), 0xFF); asd_write_reg_byte(asd_ha, LmMnDMAERRS(lseq, 1), 0xFF); /* Clear SG DMA Errors for Mode 0 and 1. */ asd_write_reg_byte(asd_ha, LmMnSGDMAERRS(lseq, 0), 0xFF); asd_write_reg_byte(asd_ha, LmMnSGDMAERRS(lseq, 1), 0xFF); /* Clear Mode 0 Buffer Parity Error. */ asd_write_reg_byte(asd_ha, LmMnBUFSTAT(lseq, 0), LmMnBUFPERR); /* Clear Mode 0 Frame Error register. */ asd_write_reg_dword(asd_ha, LmMnFRMERR(lseq, 0), 0xFFFFFFFF); /* Reset LSEQ external interrupt arbiter. */ asd_write_reg_byte(asd_ha, LmARP2INTCTL(lseq), RSTINTCTL); /* Set the Phy SAS for the LmSEQ WWN. */ sas_addr = asd_ha->phys[lseq].phy_desc->sas_addr; for (i = 0; i < SAS_ADDR_SIZE; i++) asd_write_reg_byte(asd_ha, LmWWN(lseq) + i, sas_addr[i]); /* Set the Transmit Size to 1024 bytes, 0 = 256 Dwords. */ asd_write_reg_byte(asd_ha, LmMnXMTSIZE(lseq, 1), 0); /* Set the Bus Inactivity Time Limit Timer. */ asd_write_reg_word(asd_ha, LmBITL_TIMER(lseq), 9); /* Enable SATA Port Multiplier. */ asd_write_reg_byte(asd_ha, LmMnSATAFS(lseq, 1), 0x80); /* Initialize Interrupt Vector[0-10] address in Mode 3. * See the comment on CSEQ_INT_* */ asd_write_reg_word(asd_ha, LmM3INTVEC0(lseq), lseq_vecs[0]); asd_write_reg_word(asd_ha, LmM3INTVEC1(lseq), lseq_vecs[1]); asd_write_reg_word(asd_ha, LmM3INTVEC2(lseq), lseq_vecs[2]); asd_write_reg_word(asd_ha, LmM3INTVEC3(lseq), lseq_vecs[3]); asd_write_reg_word(asd_ha, LmM3INTVEC4(lseq), lseq_vecs[4]); asd_write_reg_word(asd_ha, LmM3INTVEC5(lseq), lseq_vecs[5]); asd_write_reg_word(asd_ha, LmM3INTVEC6(lseq), lseq_vecs[6]); asd_write_reg_word(asd_ha, LmM3INTVEC7(lseq), lseq_vecs[7]); asd_write_reg_word(asd_ha, LmM3INTVEC8(lseq), lseq_vecs[8]); asd_write_reg_word(asd_ha, LmM3INTVEC9(lseq), lseq_vecs[9]); asd_write_reg_word(asd_ha, LmM3INTVEC10(lseq), lseq_vecs[10]); /* * Program the Link LED control, applicable only for * Chip Rev. B or later. */ asd_write_reg_dword(asd_ha, LmCONTROL(lseq), (LEDTIMER | LEDMODE_TXRX | LEDTIMERS_100ms)); /* Set the Align Rate for SAS and STP mode. */ asd_write_reg_byte(asd_ha, LmM1SASALIGN(lseq), SAS_ALIGN_DEFAULT); asd_write_reg_byte(asd_ha, LmM1STPALIGN(lseq), STP_ALIGN_DEFAULT); } /** * asd_post_init_cseq -- clear CSEQ Mode n Int. status and Response mailbox * @asd_ha: pointer to host adapter struct */ static void asd_post_init_cseq(struct asd_ha_struct *asd_ha) { int i; for (i = 0; i < 8; i++) asd_write_reg_dword(asd_ha, CMnINT(i), 0xFFFFFFFF); for (i = 0; i < 8; i++) asd_read_reg_dword(asd_ha, CMnRSPMBX(i)); /* Reset the external interrupt arbiter. */ asd_write_reg_byte(asd_ha, CARP2INTCTL, RSTINTCTL); } /** * asd_init_ddb_0 -- initialize DDB 0 * @asd_ha: pointer to host adapter structure * * Initialize DDB site 0 which is used internally by the sequencer. */ static void asd_init_ddb_0(struct asd_ha_struct *asd_ha) { int i; /* Zero out the DDB explicitly */ for (i = 0; i < sizeof(struct asd_ddb_seq_shared); i+=4) asd_ddbsite_write_dword(asd_ha, 0, i, 0); asd_ddbsite_write_word(asd_ha, 0, offsetof(struct asd_ddb_seq_shared, q_free_ddb_head), 0); asd_ddbsite_write_word(asd_ha, 0, offsetof(struct asd_ddb_seq_shared, q_free_ddb_tail), asd_ha->hw_prof.max_ddbs-1); asd_ddbsite_write_word(asd_ha, 0, offsetof(struct asd_ddb_seq_shared, q_free_ddb_cnt), 0); asd_ddbsite_write_word(asd_ha, 0, offsetof(struct asd_ddb_seq_shared, q_used_ddb_head), 0xFFFF); asd_ddbsite_write_word(asd_ha, 0, offsetof(struct asd_ddb_seq_shared, q_used_ddb_tail), 0xFFFF); asd_ddbsite_write_word(asd_ha, 0, offsetof(struct asd_ddb_seq_shared, shared_mem_lock), 0); asd_ddbsite_write_word(asd_ha, 0, offsetof(struct asd_ddb_seq_shared, smp_conn_tag), 0); asd_ddbsite_write_word(asd_ha, 0, offsetof(struct asd_ddb_seq_shared, est_nexus_buf_cnt), 0); asd_ddbsite_write_word(asd_ha, 0, offsetof(struct asd_ddb_seq_shared, est_nexus_buf_thresh), asd_ha->hw_prof.num_phys * 2); asd_ddbsite_write_byte(asd_ha, 0, offsetof(struct asd_ddb_seq_shared, settable_max_contexts),0); asd_ddbsite_write_byte(asd_ha, 0, offsetof(struct asd_ddb_seq_shared, conn_not_active), 0xFF); asd_ddbsite_write_byte(asd_ha, 0, offsetof(struct asd_ddb_seq_shared, phy_is_up), 0x00); /* DDB 0 is reserved */ set_bit(0, asd_ha->hw_prof.ddb_bitmap); } static void asd_seq_init_ddb_sites(struct asd_ha_struct *asd_ha) { unsigned int i; unsigned int ddb_site; for (ddb_site = 0 ; ddb_site < ASD_MAX_DDBS; ddb_site++) for (i = 0; i < sizeof(struct asd_ddb_ssp_smp_target_port); i+= 4) asd_ddbsite_write_dword(asd_ha, ddb_site, i, 0); } /** * asd_seq_setup_seqs -- setup and initialize central and link sequencers * @asd_ha: pointer to host adapter structure */ static void asd_seq_setup_seqs(struct asd_ha_struct *asd_ha) { int lseq; u8 lseq_mask; /* Initialize DDB sites */ asd_seq_init_ddb_sites(asd_ha); /* Initialize SCB sites. Done first to compute some values which * the rest of the init code depends on. */ asd_init_scb_sites(asd_ha); /* Initialize CSEQ Scratch RAM registers. */ asd_init_cseq_scratch(asd_ha); /* Initialize LmSEQ Scratch RAM registers. */ asd_init_lseq_scratch(asd_ha); /* Initialize CSEQ CIO registers. */ asd_init_cseq_cio(asd_ha); asd_init_ddb_0(asd_ha); /* Initialize LmSEQ CIO registers. */ lseq_mask = asd_ha->hw_prof.enabled_phys; for_each_sequencer(lseq_mask, lseq_mask, lseq) asd_init_lseq_cio(asd_ha, lseq); asd_post_init_cseq(asd_ha); } /** * asd_seq_start_cseq -- start the central sequencer, CSEQ * @asd_ha: pointer to host adapter structure */ static int asd_seq_start_cseq(struct asd_ha_struct *asd_ha) { /* Reset the ARP2 instruction to location zero. */ asd_write_reg_word(asd_ha, CPRGMCNT, cseq_idle_loop); /* Unpause the CSEQ */ return asd_unpause_cseq(asd_ha); } /** * asd_seq_start_lseq -- start a link sequencer * @asd_ha: pointer to host adapter structure * @lseq: the link sequencer of interest */ static int asd_seq_start_lseq(struct asd_ha_struct *asd_ha, int lseq) { /* Reset the ARP2 instruction to location zero. */ asd_write_reg_word(asd_ha, LmPRGMCNT(lseq), lseq_idle_loop); /* Unpause the LmSEQ */ return asd_seq_unpause_lseq(asd_ha, lseq); } int asd_release_firmware(void) { if (sequencer_fw) release_firmware(sequencer_fw); return 0; } static int asd_request_firmware(struct asd_ha_struct *asd_ha) { int err, i; struct sequencer_file_header header; const struct sequencer_file_header *hdr_ptr; u32 csum = 0; u16 *ptr_cseq_vecs, *ptr_lseq_vecs; if (sequencer_fw) /* already loaded */ return 0; err = request_firmware(&sequencer_fw, SAS_RAZOR_SEQUENCER_FW_FILE, &asd_ha->pcidev->dev); if (err) return err; hdr_ptr = (const struct sequencer_file_header *)sequencer_fw->data; header.csum = le32_to_cpu(hdr_ptr->csum); header.major = le32_to_cpu(hdr_ptr->major); header.minor = le32_to_cpu(hdr_ptr->minor); header.cseq_table_offset = le32_to_cpu(hdr_ptr->cseq_table_offset); header.cseq_table_size = le32_to_cpu(hdr_ptr->cseq_table_size); header.lseq_table_offset = le32_to_cpu(hdr_ptr->lseq_table_offset); header.lseq_table_size = le32_to_cpu(hdr_ptr->lseq_table_size); header.cseq_code_offset = le32_to_cpu(hdr_ptr->cseq_code_offset); header.cseq_code_size = le32_to_cpu(hdr_ptr->cseq_code_size); header.lseq_code_offset = le32_to_cpu(hdr_ptr->lseq_code_offset); header.lseq_code_size = le32_to_cpu(hdr_ptr->lseq_code_size); header.mode2_task = le16_to_cpu(hdr_ptr->mode2_task); header.cseq_idle_loop = le16_to_cpu(hdr_ptr->cseq_idle_loop); header.lseq_idle_loop = le16_to_cpu(hdr_ptr->lseq_idle_loop); for (i = sizeof(header.csum); i < sequencer_fw->size; i++) csum += sequencer_fw->data[i]; if (csum != header.csum) { asd_printk("Firmware file checksum mismatch\n"); return -EINVAL; } if (header.cseq_table_size != CSEQ_NUM_VECS || header.lseq_table_size != LSEQ_NUM_VECS) { asd_printk("Firmware file table size mismatch\n"); return -EINVAL; } asd_printk("Found sequencer Firmware version %d.%d (%s)\n", header.major, header.minor, hdr_ptr->version); if (header.major != SAS_RAZOR_SEQUENCER_FW_MAJOR) { asd_printk("Firmware Major Version Mismatch;" "driver requires version %d.X", SAS_RAZOR_SEQUENCER_FW_MAJOR); return -EINVAL; } ptr_cseq_vecs = (u16 *)&sequencer_fw->data[header.cseq_table_offset]; ptr_lseq_vecs = (u16 *)&sequencer_fw->data[header.lseq_table_offset]; mode2_task = header.mode2_task; cseq_idle_loop = header.cseq_idle_loop; lseq_idle_loop = header.lseq_idle_loop; for (i = 0; i < CSEQ_NUM_VECS; i++) cseq_vecs[i] = le16_to_cpu(ptr_cseq_vecs[i]); for (i = 0; i < LSEQ_NUM_VECS; i++) lseq_vecs[i] = le16_to_cpu(ptr_lseq_vecs[i]); cseq_code = &sequencer_fw->data[header.cseq_code_offset]; cseq_code_size = header.cseq_code_size; lseq_code = &sequencer_fw->data[header.lseq_code_offset]; lseq_code_size = header.lseq_code_size; return 0; } int asd_init_seqs(struct asd_ha_struct *asd_ha) { int err; err = asd_request_firmware(asd_ha); if (err) { asd_printk("Failed to load sequencer firmware file %s, error %d\n", SAS_RAZOR_SEQUENCER_FW_FILE, err); return err; } err = asd_seq_download_seqs(asd_ha); if (err) { asd_printk("couldn't download sequencers for %s\n", pci_name(asd_ha->pcidev)); return err; } asd_seq_setup_seqs(asd_ha); return 0; } int asd_start_seqs(struct asd_ha_struct *asd_ha) { int err; u8 lseq_mask; int lseq; err = asd_seq_start_cseq(asd_ha); if (err) { asd_printk("couldn't start CSEQ for %s\n", pci_name(asd_ha->pcidev)); return err; } lseq_mask = asd_ha->hw_prof.enabled_phys; for_each_sequencer(lseq_mask, lseq_mask, lseq) { err = asd_seq_start_lseq(asd_ha, lseq); if (err) { asd_printk("coudln't start LSEQ %d for %s\n", lseq, pci_name(asd_ha->pcidev)); return err; } } return 0; } /** * asd_update_port_links -- update port_map_by_links and phy_is_up * @sas_phy: pointer to the phy which has been added to a port * * 1) When a link reset has completed and we got BYTES DMAED with a * valid frame we call this function for that phy, to indicate that * the phy is up, i.e. we update the phy_is_up in DDB 0. The * sequencer checks phy_is_up when pending SCBs are to be sent, and * when an open address frame has been received. * * 2) When we know of ports, we call this function to update the map * of phys participaing in that port, i.e. we update the * port_map_by_links in DDB 0. When a HARD_RESET primitive has been * received, the sequencer disables all phys in that port. * port_map_by_links is also used as the conn_mask byte in the * initiator/target port DDB. */ void asd_update_port_links(struct asd_ha_struct *asd_ha, struct asd_phy *phy) { const u8 phy_mask = (u8) phy->asd_port->phy_mask; u8 phy_is_up; u8 mask; int i, err; unsigned long flags; spin_lock_irqsave(&asd_ha->hw_prof.ddb_lock, flags); for_each_phy(phy_mask, mask, i) asd_ddbsite_write_byte(asd_ha, 0, offsetof(struct asd_ddb_seq_shared, port_map_by_links)+i,phy_mask); for (i = 0; i < 12; i++) { phy_is_up = asd_ddbsite_read_byte(asd_ha, 0, offsetof(struct asd_ddb_seq_shared, phy_is_up)); err = asd_ddbsite_update_byte(asd_ha, 0, offsetof(struct asd_ddb_seq_shared, phy_is_up), phy_is_up, phy_is_up | phy_mask); if (!err) break; else if (err == -EFAULT) { asd_printk("phy_is_up: parity error in DDB 0\n"); break; } } spin_unlock_irqrestore(&asd_ha->hw_prof.ddb_lock, flags); if (err) asd_printk("couldn't update DDB 0:error:%d\n", err); } MODULE_FIRMWARE(SAS_RAZOR_SEQUENCER_FW_FILE);
gpl-2.0
percy-g2/bbbandroid-kernel
arch/alpha/kernel/sys_eiger.c
8098
5516
/* * linux/arch/alpha/kernel/sys_eiger.c * * Copyright (C) 1995 David A Rusling * Copyright (C) 1996, 1999 Jay A Estabrook * Copyright (C) 1998, 1999 Richard Henderson * Copyright (C) 1999 Iain Grant * * Code supporting the EIGER (EV6+TSUNAMI). */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/bitops.h> #include <asm/ptrace.h> #include <asm/dma.h> #include <asm/irq.h> #include <asm/mmu_context.h> #include <asm/io.h> #include <asm/pci.h> #include <asm/pgtable.h> #include <asm/core_tsunami.h> #include <asm/hwrpb.h> #include <asm/tlbflush.h> #include "proto.h" #include "irq_impl.h" #include "pci_impl.h" #include "machvec_impl.h" /* Note that this interrupt code is identical to TAKARA. */ /* Note mask bit is true for DISABLED irqs. */ static unsigned long cached_irq_mask[2] = { -1, -1 }; static inline void eiger_update_irq_hw(unsigned long irq, unsigned long mask) { int regaddr; mask = (irq >= 64 ? mask << 16 : mask >> ((irq - 16) & 0x30)); regaddr = 0x510 + (((irq - 16) >> 2) & 0x0c); outl(mask & 0xffff0000UL, regaddr); } static inline void eiger_enable_irq(struct irq_data *d) { unsigned int irq = d->irq; unsigned long mask; mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63))); eiger_update_irq_hw(irq, mask); } static void eiger_disable_irq(struct irq_data *d) { unsigned int irq = d->irq; unsigned long mask; mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63)); eiger_update_irq_hw(irq, mask); } static struct irq_chip eiger_irq_type = { .name = "EIGER", .irq_unmask = eiger_enable_irq, .irq_mask = eiger_disable_irq, .irq_mask_ack = eiger_disable_irq, }; static void eiger_device_interrupt(unsigned long vector) { unsigned intstatus; /* * The PALcode will have passed us vectors 0x800 or 0x810, * which are fairly arbitrary values and serve only to tell * us whether an interrupt has come in on IRQ0 or IRQ1. If * it's IRQ1 it's a PCI interrupt; if it's IRQ0, it's * probably ISA, but PCI interrupts can come through IRQ0 * as well if the interrupt controller isn't in accelerated * mode. * * OTOH, the accelerator thing doesn't seem to be working * overly well, so what we'll do instead is try directly * examining the Master Interrupt Register to see if it's a * PCI interrupt, and if _not_ then we'll pass it on to the * ISA handler. */ intstatus = inw(0x500) & 15; if (intstatus) { /* * This is a PCI interrupt. Check each bit and * despatch an interrupt if it's set. */ if (intstatus & 8) handle_irq(16+3); if (intstatus & 4) handle_irq(16+2); if (intstatus & 2) handle_irq(16+1); if (intstatus & 1) handle_irq(16+0); } else { isa_device_interrupt(vector); } } static void eiger_srm_device_interrupt(unsigned long vector) { int irq = (vector - 0x800) >> 4; handle_irq(irq); } static void __init eiger_init_irq(void) { long i; outb(0, DMA1_RESET_REG); outb(0, DMA2_RESET_REG); outb(DMA_MODE_CASCADE, DMA2_MODE_REG); outb(0, DMA2_MASK_REG); if (alpha_using_srm) alpha_mv.device_interrupt = eiger_srm_device_interrupt; for (i = 16; i < 128; i += 16) eiger_update_irq_hw(i, -1); init_i8259a_irqs(); for (i = 16; i < 128; ++i) { irq_set_chip_and_handler(i, &eiger_irq_type, handle_level_irq); irq_set_status_flags(i, IRQ_LEVEL); } } static int __init eiger_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { u8 irq_orig; /* The SRM console has already calculated out the IRQ value's for option cards. As this works lets just read in the value already set and change it to a useable value by Linux. All the IRQ values generated by the console are greater than 90, so we subtract 80 because it is (90 - allocated ISA IRQ's). */ pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq_orig); return irq_orig - 0x80; } static u8 __init eiger_swizzle(struct pci_dev *dev, u8 *pinp) { struct pci_controller *hose = dev->sysdata; int slot, pin = *pinp; int bridge_count = 0; /* Find the number of backplane bridges. */ int backplane = inw(0x502) & 0x0f; switch (backplane) { case 0x00: bridge_count = 0; break; /* No bridges */ case 0x01: bridge_count = 1; break; /* 1 */ case 0x03: bridge_count = 2; break; /* 2 */ case 0x07: bridge_count = 3; break; /* 3 */ case 0x0f: bridge_count = 4; break; /* 4 */ }; slot = PCI_SLOT(dev->devfn); while (dev->bus->self) { /* Check for built-in bridges on hose 0. */ if (hose->index == 0 && (PCI_SLOT(dev->bus->self->devfn) > 20 - bridge_count)) { slot = PCI_SLOT(dev->devfn); break; } /* Must be a card-based bridge. */ pin = pci_swizzle_interrupt_pin(dev, pin); /* Move up the chain of bridges. */ dev = dev->bus->self; } *pinp = pin; return slot; } /* * The System Vectors */ struct alpha_machine_vector eiger_mv __initmv = { .vector_name = "Eiger", DO_EV6_MMU, DO_DEFAULT_RTC, DO_TSUNAMI_IO, .machine_check = tsunami_machine_check, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, .min_io_address = DEFAULT_IO_BASE, .min_mem_address = DEFAULT_MEM_BASE, .pci_dac_offset = TSUNAMI_DAC_OFFSET, .nr_irqs = 128, .device_interrupt = eiger_device_interrupt, .init_arch = tsunami_init_arch, .init_irq = eiger_init_irq, .init_rtc = common_init_rtc, .init_pci = common_init_pci, .kill_arch = tsunami_kill_arch, .pci_map_irq = eiger_map_irq, .pci_swizzle = eiger_swizzle, }; ALIAS_MV(eiger)
gpl-2.0
chrisc93/android_kernel_samsung_jf
drivers/input/keyboard/xtkbd.c
9890
4779
/* * Copyright (c) 1999-2001 Vojtech Pavlik */ /* * XT keyboard driver for Linux */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Should you need to contact me, the author, you can do so either by * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail: * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic */ #include <linux/slab.h> #include <linux/module.h> #include <linux/input.h> #include <linux/init.h> #include <linux/serio.h> #define DRIVER_DESC "XT keyboard driver" MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); #define XTKBD_EMUL0 0xe0 #define XTKBD_EMUL1 0xe1 #define XTKBD_KEY 0x7f #define XTKBD_RELEASE 0x80 static unsigned char xtkbd_keycode[256] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 0, 0, 0, 87, 88, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 87, 88, 0, 0, 0, 0,110,111,103,108,105, 106 }; struct xtkbd { unsigned char keycode[256]; struct input_dev *dev; struct serio *serio; char phys[32]; }; static irqreturn_t xtkbd_interrupt(struct serio *serio, unsigned char data, unsigned int flags) { struct xtkbd *xtkbd = serio_get_drvdata(serio); switch (data) { case XTKBD_EMUL0: case XTKBD_EMUL1: break; default: if (xtkbd->keycode[data & XTKBD_KEY]) { input_report_key(xtkbd->dev, xtkbd->keycode[data & XTKBD_KEY], !(data & XTKBD_RELEASE)); input_sync(xtkbd->dev); } else { printk(KERN_WARNING "xtkbd.c: Unknown key (scancode %#x) %s.\n", data & XTKBD_KEY, data & XTKBD_RELEASE ? "released" : "pressed"); } } return IRQ_HANDLED; } static int xtkbd_connect(struct serio *serio, struct serio_driver *drv) { struct xtkbd *xtkbd; struct input_dev *input_dev; int err = -ENOMEM; int i; xtkbd = kmalloc(sizeof(struct xtkbd), GFP_KERNEL); input_dev = input_allocate_device(); if (!xtkbd || !input_dev) goto fail1; xtkbd->serio = serio; xtkbd->dev = input_dev; snprintf(xtkbd->phys, sizeof(xtkbd->phys), "%s/input0", serio->phys); memcpy(xtkbd->keycode, xtkbd_keycode, sizeof(xtkbd->keycode)); input_dev->name = "XT Keyboard"; input_dev->phys = xtkbd->phys; input_dev->id.bustype = BUS_XTKBD; input_dev->id.vendor = 0x0001; input_dev->id.product = 0x0001; input_dev->id.version = 0x0100; input_dev->dev.parent = &serio->dev; input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP); input_dev->keycode = xtkbd->keycode; input_dev->keycodesize = sizeof(unsigned char); input_dev->keycodemax = ARRAY_SIZE(xtkbd_keycode); for (i = 0; i < 255; i++) set_bit(xtkbd->keycode[i], input_dev->keybit); clear_bit(0, input_dev->keybit); serio_set_drvdata(serio, xtkbd); err = serio_open(serio, drv); if (err) goto fail2; err = input_register_device(xtkbd->dev); if (err) goto fail3; return 0; fail3: serio_close(serio); fail2: serio_set_drvdata(serio, NULL); fail1: input_free_device(input_dev); kfree(xtkbd); return err; } static void xtkbd_disconnect(struct serio *serio) { struct xtkbd *xtkbd = serio_get_drvdata(serio); serio_close(serio); serio_set_drvdata(serio, NULL); input_unregister_device(xtkbd->dev); kfree(xtkbd); } static struct serio_device_id xtkbd_serio_ids[] = { { .type = SERIO_XT, .proto = SERIO_ANY, .id = SERIO_ANY, .extra = SERIO_ANY, }, { 0 } }; MODULE_DEVICE_TABLE(serio, xtkbd_serio_ids); static struct serio_driver xtkbd_drv = { .driver = { .name = "xtkbd", }, .description = DRIVER_DESC, .id_table = xtkbd_serio_ids, .interrupt = xtkbd_interrupt, .connect = xtkbd_connect, .disconnect = xtkbd_disconnect, }; static int __init xtkbd_init(void) { return serio_register_driver(&xtkbd_drv); } static void __exit xtkbd_exit(void) { serio_unregister_driver(&xtkbd_drv); } module_init(xtkbd_init); module_exit(xtkbd_exit);
gpl-2.0
zefie/nxt_andx86_kernel
drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
163
6782
/* * Copyright 2015 Freescale Semiconductor, Inc. * * Freescale DCU drm device driver * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/regmap.h> #include <drm/drmP.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_fb_cma_helper.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_plane_helper.h> #include "fsl_dcu_drm_drv.h" #include "fsl_dcu_drm_plane.h" static int fsl_dcu_drm_plane_index(struct drm_plane *plane) { struct fsl_dcu_drm_device *fsl_dev = plane->dev->dev_private; unsigned int total_layer = fsl_dev->soc->total_layer; unsigned int index; index = drm_plane_index(plane); if (index < total_layer) return total_layer - index - 1; dev_err(fsl_dev->dev, "No more layer left\n"); return -EINVAL; } static int fsl_dcu_drm_plane_atomic_check(struct drm_plane *plane, struct drm_plane_state *state) { struct drm_framebuffer *fb = state->fb; switch (fb->pixel_format) { case DRM_FORMAT_RGB565: case DRM_FORMAT_RGB888: case DRM_FORMAT_ARGB8888: case DRM_FORMAT_BGRA4444: case DRM_FORMAT_ARGB1555: case DRM_FORMAT_YUV422: return 0; default: return -EINVAL; } } static void fsl_dcu_drm_plane_atomic_disable(struct drm_plane *plane, struct drm_plane_state *old_state) { struct fsl_dcu_drm_device *fsl_dev = plane->dev->dev_private; unsigned int value; int index, ret; index = fsl_dcu_drm_plane_index(plane); if (index < 0) return; ret = regmap_read(fsl_dev->regmap, DCU_CTRLDESCLN(index, 4), &value); if (ret) dev_err(fsl_dev->dev, "read DCU_INT_MASK failed\n"); value &= ~DCU_LAYER_EN; ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 4), value); if (ret) dev_err(fsl_dev->dev, "set DCU register failed\n"); } static void fsl_dcu_drm_plane_atomic_update(struct drm_plane *plane, struct drm_plane_state *old_state) { struct fsl_dcu_drm_device *fsl_dev = plane->dev->dev_private; struct drm_plane_state *state = plane->state; struct drm_framebuffer *fb = plane->state->fb; struct drm_gem_cma_object *gem; unsigned int alpha, bpp; int index, ret; if (!fb) return; index = fsl_dcu_drm_plane_index(plane); if (index < 0) return; gem = drm_fb_cma_get_gem_obj(fb, 0); switch (fb->pixel_format) { case DRM_FORMAT_RGB565: bpp = FSL_DCU_RGB565; alpha = 0xff; break; case DRM_FORMAT_RGB888: bpp = FSL_DCU_RGB888; alpha = 0xff; break; case DRM_FORMAT_ARGB8888: bpp = FSL_DCU_ARGB8888; alpha = 0xff; break; case DRM_FORMAT_BGRA4444: bpp = FSL_DCU_ARGB4444; alpha = 0xff; break; case DRM_FORMAT_ARGB1555: bpp = FSL_DCU_ARGB1555; alpha = 0xff; break; case DRM_FORMAT_YUV422: bpp = FSL_DCU_YUV422; alpha = 0xff; break; default: return; } ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 1), DCU_LAYER_HEIGHT(state->crtc_h) | DCU_LAYER_WIDTH(state->crtc_w)); if (ret) goto set_failed; ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 2), DCU_LAYER_POSY(state->crtc_y) | DCU_LAYER_POSX(state->crtc_x)); if (ret) goto set_failed; ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 3), gem->paddr); if (ret) goto set_failed; ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 4), DCU_LAYER_EN | DCU_LAYER_TRANS(alpha) | DCU_LAYER_BPP(bpp) | DCU_LAYER_AB(0)); if (ret) goto set_failed; ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 5), DCU_LAYER_CKMAX_R(0xFF) | DCU_LAYER_CKMAX_G(0xFF) | DCU_LAYER_CKMAX_B(0xFF)); if (ret) goto set_failed; ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 6), DCU_LAYER_CKMIN_R(0) | DCU_LAYER_CKMIN_G(0) | DCU_LAYER_CKMIN_B(0)); if (ret) goto set_failed; ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 7), 0); if (ret) goto set_failed; ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 8), DCU_LAYER_FG_FCOLOR(0)); if (ret) goto set_failed; ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 9), DCU_LAYER_BG_BCOLOR(0)); if (ret) goto set_failed; if (!strcmp(fsl_dev->soc->name, "ls1021a")) { ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 10), DCU_LAYER_POST_SKIP(0) | DCU_LAYER_PRE_SKIP(0)); if (ret) goto set_failed; } ret = regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE, DCU_MODE_DCU_MODE_MASK, DCU_MODE_DCU_MODE(DCU_MODE_NORMAL)); if (ret) goto set_failed; ret = regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE, DCU_UPDATE_MODE_READREG); if (ret) goto set_failed; return; set_failed: dev_err(fsl_dev->dev, "set DCU register failed\n"); } static void fsl_dcu_drm_plane_cleanup_fb(struct drm_plane *plane, const struct drm_plane_state *new_state) { } static int fsl_dcu_drm_plane_prepare_fb(struct drm_plane *plane, const struct drm_plane_state *new_state) { return 0; } static const struct drm_plane_helper_funcs fsl_dcu_drm_plane_helper_funcs = { .atomic_check = fsl_dcu_drm_plane_atomic_check, .atomic_disable = fsl_dcu_drm_plane_atomic_disable, .atomic_update = fsl_dcu_drm_plane_atomic_update, .cleanup_fb = fsl_dcu_drm_plane_cleanup_fb, .prepare_fb = fsl_dcu_drm_plane_prepare_fb, }; static void fsl_dcu_drm_plane_destroy(struct drm_plane *plane) { drm_plane_cleanup(plane); } static const struct drm_plane_funcs fsl_dcu_drm_plane_funcs = { .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, .destroy = fsl_dcu_drm_plane_destroy, .disable_plane = drm_atomic_helper_disable_plane, .reset = drm_atomic_helper_plane_reset, .update_plane = drm_atomic_helper_update_plane, }; static const u32 fsl_dcu_drm_plane_formats[] = { DRM_FORMAT_RGB565, DRM_FORMAT_RGB888, DRM_FORMAT_ARGB8888, DRM_FORMAT_ARGB4444, DRM_FORMAT_ARGB1555, DRM_FORMAT_YUV422, }; struct drm_plane *fsl_dcu_drm_primary_create_plane(struct drm_device *dev) { struct drm_plane *primary; int ret; primary = kzalloc(sizeof(*primary), GFP_KERNEL); if (!primary) { DRM_DEBUG_KMS("Failed to allocate primary plane\n"); return NULL; } /* possible_crtc's will be filled in later by crtc_init */ ret = drm_universal_plane_init(dev, primary, 0, &fsl_dcu_drm_plane_funcs, fsl_dcu_drm_plane_formats, ARRAY_SIZE(fsl_dcu_drm_plane_formats), DRM_PLANE_TYPE_PRIMARY); if (ret) { kfree(primary); primary = NULL; } drm_plane_helper_add(primary, &fsl_dcu_drm_plane_helper_funcs); return primary; }
gpl-2.0
Motorhead1991/android_kernel_samsung_geim
drivers/net/sunbmac.c
163
34117
/* sunbmac.c: Driver for Sparc BigMAC 100baseT ethernet adapters. * * Copyright (C) 1997, 1998, 1999, 2003, 2008 David S. Miller (davem@davemloft.net) */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/in.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/crc32.h> #include <linux/errno.h> #include <linux/ethtool.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/bitops.h> #include <linux/dma-mapping.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/gfp.h> #include <asm/auxio.h> #include <asm/byteorder.h> #include <asm/dma.h> #include <asm/idprom.h> #include <asm/io.h> #include <asm/openprom.h> #include <asm/oplib.h> #include <asm/pgtable.h> #include <asm/system.h> #include "sunbmac.h" #define DRV_NAME "sunbmac" #define DRV_VERSION "2.1" #define DRV_RELDATE "August 26, 2008" #define DRV_AUTHOR "David S. Miller (davem@davemloft.net)" static char version[] = DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n"; MODULE_VERSION(DRV_VERSION); MODULE_AUTHOR(DRV_AUTHOR); MODULE_DESCRIPTION("Sun BigMAC 100baseT ethernet driver"); MODULE_LICENSE("GPL"); #undef DEBUG_PROBE #undef DEBUG_TX #undef DEBUG_IRQ #ifdef DEBUG_PROBE #define DP(x) printk x #else #define DP(x) #endif #ifdef DEBUG_TX #define DTX(x) printk x #else #define DTX(x) #endif #ifdef DEBUG_IRQ #define DIRQ(x) printk x #else #define DIRQ(x) #endif #define DEFAULT_JAMSIZE 4 /* Toe jam */ #define QEC_RESET_TRIES 200 static int qec_global_reset(void __iomem *gregs) { int tries = QEC_RESET_TRIES; sbus_writel(GLOB_CTRL_RESET, gregs + GLOB_CTRL); while (--tries) { if (sbus_readl(gregs + GLOB_CTRL) & GLOB_CTRL_RESET) { udelay(20); continue; } break; } if (tries) return 0; printk(KERN_ERR "BigMAC: Cannot reset the QEC.\n"); return -1; } static void qec_init(struct bigmac *bp) { struct platform_device *qec_op = bp->qec_op; void __iomem *gregs = bp->gregs; u8 bsizes = bp->bigmac_bursts; u32 regval; /* 64byte bursts do not work at the moment, do * not even try to enable them. -DaveM */ if (bsizes & DMA_BURST32) regval = GLOB_CTRL_B32; else regval = GLOB_CTRL_B16; sbus_writel(regval | GLOB_CTRL_BMODE, gregs + GLOB_CTRL); sbus_writel(GLOB_PSIZE_2048, gregs + GLOB_PSIZE); /* All of memsize is given to bigmac. */ sbus_writel(resource_size(&qec_op->resource[1]), gregs + GLOB_MSIZE); /* Half to the transmitter, half to the receiver. */ sbus_writel(resource_size(&qec_op->resource[1]) >> 1, gregs + GLOB_TSIZE); sbus_writel(resource_size(&qec_op->resource[1]) >> 1, gregs + GLOB_RSIZE); } #define TX_RESET_TRIES 32 #define RX_RESET_TRIES 32 static void bigmac_tx_reset(void __iomem *bregs) { int tries = TX_RESET_TRIES; sbus_writel(0, bregs + BMAC_TXCFG); /* The fifo threshold bit is read-only and does * not clear. -DaveM */ while ((sbus_readl(bregs + BMAC_TXCFG) & ~(BIGMAC_TXCFG_FIFO)) != 0 && --tries != 0) udelay(20); if (!tries) { printk(KERN_ERR "BIGMAC: Transmitter will not reset.\n"); printk(KERN_ERR "BIGMAC: tx_cfg is %08x\n", sbus_readl(bregs + BMAC_TXCFG)); } } static void bigmac_rx_reset(void __iomem *bregs) { int tries = RX_RESET_TRIES; sbus_writel(0, bregs + BMAC_RXCFG); while (sbus_readl(bregs + BMAC_RXCFG) && --tries) udelay(20); if (!tries) { printk(KERN_ERR "BIGMAC: Receiver will not reset.\n"); printk(KERN_ERR "BIGMAC: rx_cfg is %08x\n", sbus_readl(bregs + BMAC_RXCFG)); } } /* Reset the transmitter and receiver. */ static void bigmac_stop(struct bigmac *bp) { bigmac_tx_reset(bp->bregs); bigmac_rx_reset(bp->bregs); } static void bigmac_get_counters(struct bigmac *bp, void __iomem *bregs) { struct net_device_stats *stats = &bp->enet_stats; stats->rx_crc_errors += sbus_readl(bregs + BMAC_RCRCECTR); sbus_writel(0, bregs + BMAC_RCRCECTR); stats->rx_frame_errors += sbus_readl(bregs + BMAC_UNALECTR); sbus_writel(0, bregs + BMAC_UNALECTR); stats->rx_length_errors += sbus_readl(bregs + BMAC_GLECTR); sbus_writel(0, bregs + BMAC_GLECTR); stats->tx_aborted_errors += sbus_readl(bregs + BMAC_EXCTR); stats->collisions += (sbus_readl(bregs + BMAC_EXCTR) + sbus_readl(bregs + BMAC_LTCTR)); sbus_writel(0, bregs + BMAC_EXCTR); sbus_writel(0, bregs + BMAC_LTCTR); } static void bigmac_clean_rings(struct bigmac *bp) { int i; for (i = 0; i < RX_RING_SIZE; i++) { if (bp->rx_skbs[i] != NULL) { dev_kfree_skb_any(bp->rx_skbs[i]); bp->rx_skbs[i] = NULL; } } for (i = 0; i < TX_RING_SIZE; i++) { if (bp->tx_skbs[i] != NULL) { dev_kfree_skb_any(bp->tx_skbs[i]); bp->tx_skbs[i] = NULL; } } } static void bigmac_init_rings(struct bigmac *bp, int from_irq) { struct bmac_init_block *bb = bp->bmac_block; struct net_device *dev = bp->dev; int i; gfp_t gfp_flags = GFP_KERNEL; if (from_irq || in_interrupt()) gfp_flags = GFP_ATOMIC; bp->rx_new = bp->rx_old = bp->tx_new = bp->tx_old = 0; /* Free any skippy bufs left around in the rings. */ bigmac_clean_rings(bp); /* Now get new skbufs for the receive ring. */ for (i = 0; i < RX_RING_SIZE; i++) { struct sk_buff *skb; skb = big_mac_alloc_skb(RX_BUF_ALLOC_SIZE, gfp_flags); if (!skb) continue; bp->rx_skbs[i] = skb; skb->dev = dev; /* Because we reserve afterwards. */ skb_put(skb, ETH_FRAME_LEN); skb_reserve(skb, 34); bb->be_rxd[i].rx_addr = dma_map_single(&bp->bigmac_op->dev, skb->data, RX_BUF_ALLOC_SIZE - 34, DMA_FROM_DEVICE); bb->be_rxd[i].rx_flags = (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH)); } for (i = 0; i < TX_RING_SIZE; i++) bb->be_txd[i].tx_flags = bb->be_txd[i].tx_addr = 0; } #define MGMT_CLKON (MGMT_PAL_INT_MDIO|MGMT_PAL_EXT_MDIO|MGMT_PAL_OENAB|MGMT_PAL_DCLOCK) #define MGMT_CLKOFF (MGMT_PAL_INT_MDIO|MGMT_PAL_EXT_MDIO|MGMT_PAL_OENAB) static void idle_transceiver(void __iomem *tregs) { int i = 20; while (i--) { sbus_writel(MGMT_CLKOFF, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); sbus_writel(MGMT_CLKON, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); } } static void write_tcvr_bit(struct bigmac *bp, void __iomem *tregs, int bit) { if (bp->tcvr_type == internal) { bit = (bit & 1) << 3; sbus_writel(bit | (MGMT_PAL_OENAB | MGMT_PAL_EXT_MDIO), tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); sbus_writel(bit | MGMT_PAL_OENAB | MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); } else if (bp->tcvr_type == external) { bit = (bit & 1) << 2; sbus_writel(bit | MGMT_PAL_INT_MDIO | MGMT_PAL_OENAB, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); sbus_writel(bit | MGMT_PAL_INT_MDIO | MGMT_PAL_OENAB | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); } else { printk(KERN_ERR "write_tcvr_bit: No transceiver type known!\n"); } } static int read_tcvr_bit(struct bigmac *bp, void __iomem *tregs) { int retval = 0; if (bp->tcvr_type == internal) { sbus_writel(MGMT_PAL_EXT_MDIO, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); sbus_writel(MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_INT_MDIO) >> 3; } else if (bp->tcvr_type == external) { sbus_writel(MGMT_PAL_INT_MDIO, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_EXT_MDIO) >> 2; } else { printk(KERN_ERR "read_tcvr_bit: No transceiver type known!\n"); } return retval; } static int read_tcvr_bit2(struct bigmac *bp, void __iomem *tregs) { int retval = 0; if (bp->tcvr_type == internal) { sbus_writel(MGMT_PAL_EXT_MDIO, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_INT_MDIO) >> 3; sbus_writel(MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); } else if (bp->tcvr_type == external) { sbus_writel(MGMT_PAL_INT_MDIO, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_EXT_MDIO) >> 2; sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); } else { printk(KERN_ERR "read_tcvr_bit2: No transceiver type known!\n"); } return retval; } static void put_tcvr_byte(struct bigmac *bp, void __iomem *tregs, unsigned int byte) { int shift = 4; do { write_tcvr_bit(bp, tregs, ((byte >> shift) & 1)); shift -= 1; } while (shift >= 0); } static void bigmac_tcvr_write(struct bigmac *bp, void __iomem *tregs, int reg, unsigned short val) { int shift; reg &= 0xff; val &= 0xffff; switch(bp->tcvr_type) { case internal: case external: break; default: printk(KERN_ERR "bigmac_tcvr_read: Whoops, no known transceiver type.\n"); return; } idle_transceiver(tregs); write_tcvr_bit(bp, tregs, 0); write_tcvr_bit(bp, tregs, 1); write_tcvr_bit(bp, tregs, 0); write_tcvr_bit(bp, tregs, 1); put_tcvr_byte(bp, tregs, ((bp->tcvr_type == internal) ? BIGMAC_PHY_INTERNAL : BIGMAC_PHY_EXTERNAL)); put_tcvr_byte(bp, tregs, reg); write_tcvr_bit(bp, tregs, 1); write_tcvr_bit(bp, tregs, 0); shift = 15; do { write_tcvr_bit(bp, tregs, (val >> shift) & 1); shift -= 1; } while (shift >= 0); } static unsigned short bigmac_tcvr_read(struct bigmac *bp, void __iomem *tregs, int reg) { unsigned short retval = 0; reg &= 0xff; switch(bp->tcvr_type) { case internal: case external: break; default: printk(KERN_ERR "bigmac_tcvr_read: Whoops, no known transceiver type.\n"); return 0xffff; } idle_transceiver(tregs); write_tcvr_bit(bp, tregs, 0); write_tcvr_bit(bp, tregs, 1); write_tcvr_bit(bp, tregs, 1); write_tcvr_bit(bp, tregs, 0); put_tcvr_byte(bp, tregs, ((bp->tcvr_type == internal) ? BIGMAC_PHY_INTERNAL : BIGMAC_PHY_EXTERNAL)); put_tcvr_byte(bp, tregs, reg); if (bp->tcvr_type == external) { int shift = 15; (void) read_tcvr_bit2(bp, tregs); (void) read_tcvr_bit2(bp, tregs); do { int tmp; tmp = read_tcvr_bit2(bp, tregs); retval |= ((tmp & 1) << shift); shift -= 1; } while (shift >= 0); (void) read_tcvr_bit2(bp, tregs); (void) read_tcvr_bit2(bp, tregs); (void) read_tcvr_bit2(bp, tregs); } else { int shift = 15; (void) read_tcvr_bit(bp, tregs); (void) read_tcvr_bit(bp, tregs); do { int tmp; tmp = read_tcvr_bit(bp, tregs); retval |= ((tmp & 1) << shift); shift -= 1; } while (shift >= 0); (void) read_tcvr_bit(bp, tregs); (void) read_tcvr_bit(bp, tregs); (void) read_tcvr_bit(bp, tregs); } return retval; } static void bigmac_tcvr_init(struct bigmac *bp) { void __iomem *tregs = bp->tregs; u32 mpal; idle_transceiver(tregs); sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); /* Only the bit for the present transceiver (internal or * external) will stick, set them both and see what stays. */ sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_EXT_MDIO, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); udelay(20); mpal = sbus_readl(tregs + TCVR_MPAL); if (mpal & MGMT_PAL_EXT_MDIO) { bp->tcvr_type = external; sbus_writel(~(TCVR_PAL_EXTLBACK | TCVR_PAL_MSENSE | TCVR_PAL_LTENABLE), tregs + TCVR_TPAL); sbus_readl(tregs + TCVR_TPAL); } else if (mpal & MGMT_PAL_INT_MDIO) { bp->tcvr_type = internal; sbus_writel(~(TCVR_PAL_SERIAL | TCVR_PAL_EXTLBACK | TCVR_PAL_MSENSE | TCVR_PAL_LTENABLE), tregs + TCVR_TPAL); sbus_readl(tregs + TCVR_TPAL); } else { printk(KERN_ERR "BIGMAC: AIEEE, neither internal nor " "external MDIO available!\n"); printk(KERN_ERR "BIGMAC: mgmt_pal[%08x] tcvr_pal[%08x]\n", sbus_readl(tregs + TCVR_MPAL), sbus_readl(tregs + TCVR_TPAL)); } } static int bigmac_init_hw(struct bigmac *, int); static int try_next_permutation(struct bigmac *bp, void __iomem *tregs) { if (bp->sw_bmcr & BMCR_SPEED100) { int timeout; /* Reset the PHY. */ bp->sw_bmcr = (BMCR_ISOLATE | BMCR_PDOWN | BMCR_LOOPBACK); bigmac_tcvr_write(bp, tregs, BIGMAC_BMCR, bp->sw_bmcr); bp->sw_bmcr = (BMCR_RESET); bigmac_tcvr_write(bp, tregs, BIGMAC_BMCR, bp->sw_bmcr); timeout = 64; while (--timeout) { bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMCR); if ((bp->sw_bmcr & BMCR_RESET) == 0) break; udelay(20); } if (timeout == 0) printk(KERN_ERR "%s: PHY reset failed.\n", bp->dev->name); bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMCR); /* Now we try 10baseT. */ bp->sw_bmcr &= ~(BMCR_SPEED100); bigmac_tcvr_write(bp, tregs, BIGMAC_BMCR, bp->sw_bmcr); return 0; } /* We've tried them all. */ return -1; } static void bigmac_timer(unsigned long data) { struct bigmac *bp = (struct bigmac *) data; void __iomem *tregs = bp->tregs; int restart_timer = 0; bp->timer_ticks++; if (bp->timer_state == ltrywait) { bp->sw_bmsr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMSR); bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMCR); if (bp->sw_bmsr & BMSR_LSTATUS) { printk(KERN_INFO "%s: Link is now up at %s.\n", bp->dev->name, (bp->sw_bmcr & BMCR_SPEED100) ? "100baseT" : "10baseT"); bp->timer_state = asleep; restart_timer = 0; } else { if (bp->timer_ticks >= 4) { int ret; ret = try_next_permutation(bp, tregs); if (ret == -1) { printk(KERN_ERR "%s: Link down, cable problem?\n", bp->dev->name); ret = bigmac_init_hw(bp, 0); if (ret) { printk(KERN_ERR "%s: Error, cannot re-init the " "BigMAC.\n", bp->dev->name); } return; } bp->timer_ticks = 0; restart_timer = 1; } else { restart_timer = 1; } } } else { /* Can't happens.... */ printk(KERN_ERR "%s: Aieee, link timer is asleep but we got one anyways!\n", bp->dev->name); restart_timer = 0; bp->timer_ticks = 0; bp->timer_state = asleep; /* foo on you */ } if (restart_timer != 0) { bp->bigmac_timer.expires = jiffies + ((12 * HZ)/10); /* 1.2 sec. */ add_timer(&bp->bigmac_timer); } } /* Well, really we just force the chip into 100baseT then * 10baseT, each time checking for a link status. */ static void bigmac_begin_auto_negotiation(struct bigmac *bp) { void __iomem *tregs = bp->tregs; int timeout; /* Grab new software copies of PHY registers. */ bp->sw_bmsr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMSR); bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMCR); /* Reset the PHY. */ bp->sw_bmcr = (BMCR_ISOLATE | BMCR_PDOWN | BMCR_LOOPBACK); bigmac_tcvr_write(bp, tregs, BIGMAC_BMCR, bp->sw_bmcr); bp->sw_bmcr = (BMCR_RESET); bigmac_tcvr_write(bp, tregs, BIGMAC_BMCR, bp->sw_bmcr); timeout = 64; while (--timeout) { bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMCR); if ((bp->sw_bmcr & BMCR_RESET) == 0) break; udelay(20); } if (timeout == 0) printk(KERN_ERR "%s: PHY reset failed.\n", bp->dev->name); bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMCR); /* First we try 100baseT. */ bp->sw_bmcr |= BMCR_SPEED100; bigmac_tcvr_write(bp, tregs, BIGMAC_BMCR, bp->sw_bmcr); bp->timer_state = ltrywait; bp->timer_ticks = 0; bp->bigmac_timer.expires = jiffies + (12 * HZ) / 10; bp->bigmac_timer.data = (unsigned long) bp; bp->bigmac_timer.function = bigmac_timer; add_timer(&bp->bigmac_timer); } static int bigmac_init_hw(struct bigmac *bp, int from_irq) { void __iomem *gregs = bp->gregs; void __iomem *cregs = bp->creg; void __iomem *bregs = bp->bregs; unsigned char *e = &bp->dev->dev_addr[0]; /* Latch current counters into statistics. */ bigmac_get_counters(bp, bregs); /* Reset QEC. */ qec_global_reset(gregs); /* Init QEC. */ qec_init(bp); /* Alloc and reset the tx/rx descriptor chains. */ bigmac_init_rings(bp, from_irq); /* Initialize the PHY. */ bigmac_tcvr_init(bp); /* Stop transmitter and receiver. */ bigmac_stop(bp); /* Set hardware ethernet address. */ sbus_writel(((e[4] << 8) | e[5]), bregs + BMAC_MACADDR2); sbus_writel(((e[2] << 8) | e[3]), bregs + BMAC_MACADDR1); sbus_writel(((e[0] << 8) | e[1]), bregs + BMAC_MACADDR0); /* Clear the hash table until mc upload occurs. */ sbus_writel(0, bregs + BMAC_HTABLE3); sbus_writel(0, bregs + BMAC_HTABLE2); sbus_writel(0, bregs + BMAC_HTABLE1); sbus_writel(0, bregs + BMAC_HTABLE0); /* Enable Big Mac hash table filter. */ sbus_writel(BIGMAC_RXCFG_HENABLE | BIGMAC_RXCFG_FIFO, bregs + BMAC_RXCFG); udelay(20); /* Ok, configure the Big Mac transmitter. */ sbus_writel(BIGMAC_TXCFG_FIFO, bregs + BMAC_TXCFG); /* The HME docs recommend to use the 10LSB of our MAC here. */ sbus_writel(((e[5] | e[4] << 8) & 0x3ff), bregs + BMAC_RSEED); /* Enable the output drivers no matter what. */ sbus_writel(BIGMAC_XCFG_ODENABLE | BIGMAC_XCFG_RESV, bregs + BMAC_XIFCFG); /* Tell the QEC where the ring descriptors are. */ sbus_writel(bp->bblock_dvma + bib_offset(be_rxd, 0), cregs + CREG_RXDS); sbus_writel(bp->bblock_dvma + bib_offset(be_txd, 0), cregs + CREG_TXDS); /* Setup the FIFO pointers into QEC local memory. */ sbus_writel(0, cregs + CREG_RXRBUFPTR); sbus_writel(0, cregs + CREG_RXWBUFPTR); sbus_writel(sbus_readl(gregs + GLOB_RSIZE), cregs + CREG_TXRBUFPTR); sbus_writel(sbus_readl(gregs + GLOB_RSIZE), cregs + CREG_TXWBUFPTR); /* Tell bigmac what interrupts we don't want to hear about. */ sbus_writel(BIGMAC_IMASK_GOTFRAME | BIGMAC_IMASK_SENTFRAME, bregs + BMAC_IMASK); /* Enable the various other irq's. */ sbus_writel(0, cregs + CREG_RIMASK); sbus_writel(0, cregs + CREG_TIMASK); sbus_writel(0, cregs + CREG_QMASK); sbus_writel(0, cregs + CREG_BMASK); /* Set jam size to a reasonable default. */ sbus_writel(DEFAULT_JAMSIZE, bregs + BMAC_JSIZE); /* Clear collision counter. */ sbus_writel(0, cregs + CREG_CCNT); /* Enable transmitter and receiver. */ sbus_writel(sbus_readl(bregs + BMAC_TXCFG) | BIGMAC_TXCFG_ENABLE, bregs + BMAC_TXCFG); sbus_writel(sbus_readl(bregs + BMAC_RXCFG) | BIGMAC_RXCFG_ENABLE, bregs + BMAC_RXCFG); /* Ok, start detecting link speed/duplex. */ bigmac_begin_auto_negotiation(bp); /* Success. */ return 0; } /* Error interrupts get sent here. */ static void bigmac_is_medium_rare(struct bigmac *bp, u32 qec_status, u32 bmac_status) { printk(KERN_ERR "bigmac_is_medium_rare: "); if (qec_status & (GLOB_STAT_ER | GLOB_STAT_BM)) { if (qec_status & GLOB_STAT_ER) printk("QEC_ERROR, "); if (qec_status & GLOB_STAT_BM) printk("QEC_BMAC_ERROR, "); } if (bmac_status & CREG_STAT_ERRORS) { if (bmac_status & CREG_STAT_BERROR) printk("BMAC_ERROR, "); if (bmac_status & CREG_STAT_TXDERROR) printk("TXD_ERROR, "); if (bmac_status & CREG_STAT_TXLERR) printk("TX_LATE_ERROR, "); if (bmac_status & CREG_STAT_TXPERR) printk("TX_PARITY_ERROR, "); if (bmac_status & CREG_STAT_TXSERR) printk("TX_SBUS_ERROR, "); if (bmac_status & CREG_STAT_RXDROP) printk("RX_DROP_ERROR, "); if (bmac_status & CREG_STAT_RXSMALL) printk("RX_SMALL_ERROR, "); if (bmac_status & CREG_STAT_RXLERR) printk("RX_LATE_ERROR, "); if (bmac_status & CREG_STAT_RXPERR) printk("RX_PARITY_ERROR, "); if (bmac_status & CREG_STAT_RXSERR) printk("RX_SBUS_ERROR, "); } printk(" RESET\n"); bigmac_init_hw(bp, 1); } /* BigMAC transmit complete service routines. */ static void bigmac_tx(struct bigmac *bp) { struct be_txd *txbase = &bp->bmac_block->be_txd[0]; struct net_device *dev = bp->dev; int elem; spin_lock(&bp->lock); elem = bp->tx_old; DTX(("bigmac_tx: tx_old[%d] ", elem)); while (elem != bp->tx_new) { struct sk_buff *skb; struct be_txd *this = &txbase[elem]; DTX(("this(%p) [flags(%08x)addr(%08x)]", this, this->tx_flags, this->tx_addr)); if (this->tx_flags & TXD_OWN) break; skb = bp->tx_skbs[elem]; bp->enet_stats.tx_packets++; bp->enet_stats.tx_bytes += skb->len; dma_unmap_single(&bp->bigmac_op->dev, this->tx_addr, skb->len, DMA_TO_DEVICE); DTX(("skb(%p) ", skb)); bp->tx_skbs[elem] = NULL; dev_kfree_skb_irq(skb); elem = NEXT_TX(elem); } DTX((" DONE, tx_old=%d\n", elem)); bp->tx_old = elem; if (netif_queue_stopped(dev) && TX_BUFFS_AVAIL(bp) > 0) netif_wake_queue(bp->dev); spin_unlock(&bp->lock); } /* BigMAC receive complete service routines. */ static void bigmac_rx(struct bigmac *bp) { struct be_rxd *rxbase = &bp->bmac_block->be_rxd[0]; struct be_rxd *this; int elem = bp->rx_new, drops = 0; u32 flags; this = &rxbase[elem]; while (!((flags = this->rx_flags) & RXD_OWN)) { struct sk_buff *skb; int len = (flags & RXD_LENGTH); /* FCS not included */ /* Check for errors. */ if (len < ETH_ZLEN) { bp->enet_stats.rx_errors++; bp->enet_stats.rx_length_errors++; drop_it: /* Return it to the BigMAC. */ bp->enet_stats.rx_dropped++; this->rx_flags = (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH)); goto next; } skb = bp->rx_skbs[elem]; if (len > RX_COPY_THRESHOLD) { struct sk_buff *new_skb; /* Now refill the entry, if we can. */ new_skb = big_mac_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC); if (new_skb == NULL) { drops++; goto drop_it; } dma_unmap_single(&bp->bigmac_op->dev, this->rx_addr, RX_BUF_ALLOC_SIZE - 34, DMA_FROM_DEVICE); bp->rx_skbs[elem] = new_skb; new_skb->dev = bp->dev; skb_put(new_skb, ETH_FRAME_LEN); skb_reserve(new_skb, 34); this->rx_addr = dma_map_single(&bp->bigmac_op->dev, new_skb->data, RX_BUF_ALLOC_SIZE - 34, DMA_FROM_DEVICE); this->rx_flags = (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH)); /* Trim the original skb for the netif. */ skb_trim(skb, len); } else { struct sk_buff *copy_skb = dev_alloc_skb(len + 2); if (copy_skb == NULL) { drops++; goto drop_it; } skb_reserve(copy_skb, 2); skb_put(copy_skb, len); dma_sync_single_for_cpu(&bp->bigmac_op->dev, this->rx_addr, len, DMA_FROM_DEVICE); skb_copy_to_linear_data(copy_skb, (unsigned char *)skb->data, len); dma_sync_single_for_device(&bp->bigmac_op->dev, this->rx_addr, len, DMA_FROM_DEVICE); /* Reuse original ring buffer. */ this->rx_flags = (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH)); skb = copy_skb; } /* No checksums done by the BigMAC ;-( */ skb->protocol = eth_type_trans(skb, bp->dev); netif_rx(skb); bp->enet_stats.rx_packets++; bp->enet_stats.rx_bytes += len; next: elem = NEXT_RX(elem); this = &rxbase[elem]; } bp->rx_new = elem; if (drops) printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", bp->dev->name); } static irqreturn_t bigmac_interrupt(int irq, void *dev_id) { struct bigmac *bp = (struct bigmac *) dev_id; u32 qec_status, bmac_status; DIRQ(("bigmac_interrupt: ")); /* Latch status registers now. */ bmac_status = sbus_readl(bp->creg + CREG_STAT); qec_status = sbus_readl(bp->gregs + GLOB_STAT); DIRQ(("qec_status=%08x bmac_status=%08x\n", qec_status, bmac_status)); if ((qec_status & (GLOB_STAT_ER | GLOB_STAT_BM)) || (bmac_status & CREG_STAT_ERRORS)) bigmac_is_medium_rare(bp, qec_status, bmac_status); if (bmac_status & CREG_STAT_TXIRQ) bigmac_tx(bp); if (bmac_status & CREG_STAT_RXIRQ) bigmac_rx(bp); return IRQ_HANDLED; } static int bigmac_open(struct net_device *dev) { struct bigmac *bp = netdev_priv(dev); int ret; ret = request_irq(dev->irq, bigmac_interrupt, IRQF_SHARED, dev->name, bp); if (ret) { printk(KERN_ERR "BIGMAC: Can't order irq %d to go.\n", dev->irq); return ret; } init_timer(&bp->bigmac_timer); ret = bigmac_init_hw(bp, 0); if (ret) free_irq(dev->irq, bp); return ret; } static int bigmac_close(struct net_device *dev) { struct bigmac *bp = netdev_priv(dev); del_timer(&bp->bigmac_timer); bp->timer_state = asleep; bp->timer_ticks = 0; bigmac_stop(bp); bigmac_clean_rings(bp); free_irq(dev->irq, bp); return 0; } static void bigmac_tx_timeout(struct net_device *dev) { struct bigmac *bp = netdev_priv(dev); bigmac_init_hw(bp, 0); netif_wake_queue(dev); } /* Put a packet on the wire. */ static int bigmac_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct bigmac *bp = netdev_priv(dev); int len, entry; u32 mapping; len = skb->len; mapping = dma_map_single(&bp->bigmac_op->dev, skb->data, len, DMA_TO_DEVICE); /* Avoid a race... */ spin_lock_irq(&bp->lock); entry = bp->tx_new; DTX(("bigmac_start_xmit: len(%d) entry(%d)\n", len, entry)); bp->bmac_block->be_txd[entry].tx_flags = TXD_UPDATE; bp->tx_skbs[entry] = skb; bp->bmac_block->be_txd[entry].tx_addr = mapping; bp->bmac_block->be_txd[entry].tx_flags = (TXD_OWN | TXD_SOP | TXD_EOP | (len & TXD_LENGTH)); bp->tx_new = NEXT_TX(entry); if (TX_BUFFS_AVAIL(bp) <= 0) netif_stop_queue(dev); spin_unlock_irq(&bp->lock); /* Get it going. */ sbus_writel(CREG_CTRL_TWAKEUP, bp->creg + CREG_CTRL); return NETDEV_TX_OK; } static struct net_device_stats *bigmac_get_stats(struct net_device *dev) { struct bigmac *bp = netdev_priv(dev); bigmac_get_counters(bp, bp->bregs); return &bp->enet_stats; } static void bigmac_set_multicast(struct net_device *dev) { struct bigmac *bp = netdev_priv(dev); void __iomem *bregs = bp->bregs; struct netdev_hw_addr *ha; char *addrs; int i; u32 tmp, crc; /* Disable the receiver. The bit self-clears when * the operation is complete. */ tmp = sbus_readl(bregs + BMAC_RXCFG); tmp &= ~(BIGMAC_RXCFG_ENABLE); sbus_writel(tmp, bregs + BMAC_RXCFG); while ((sbus_readl(bregs + BMAC_RXCFG) & BIGMAC_RXCFG_ENABLE) != 0) udelay(20); if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) { sbus_writel(0xffff, bregs + BMAC_HTABLE0); sbus_writel(0xffff, bregs + BMAC_HTABLE1); sbus_writel(0xffff, bregs + BMAC_HTABLE2); sbus_writel(0xffff, bregs + BMAC_HTABLE3); } else if (dev->flags & IFF_PROMISC) { tmp = sbus_readl(bregs + BMAC_RXCFG); tmp |= BIGMAC_RXCFG_PMISC; sbus_writel(tmp, bregs + BMAC_RXCFG); } else { u16 hash_table[4]; for (i = 0; i < 4; i++) hash_table[i] = 0; netdev_for_each_mc_addr(ha, dev) { addrs = ha->addr; if (!(*addrs & 1)) continue; crc = ether_crc_le(6, addrs); crc >>= 26; hash_table[crc >> 4] |= 1 << (crc & 0xf); } sbus_writel(hash_table[0], bregs + BMAC_HTABLE0); sbus_writel(hash_table[1], bregs + BMAC_HTABLE1); sbus_writel(hash_table[2], bregs + BMAC_HTABLE2); sbus_writel(hash_table[3], bregs + BMAC_HTABLE3); } /* Re-enable the receiver. */ tmp = sbus_readl(bregs + BMAC_RXCFG); tmp |= BIGMAC_RXCFG_ENABLE; sbus_writel(tmp, bregs + BMAC_RXCFG); } /* Ethtool support... */ static void bigmac_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strcpy(info->driver, "sunbmac"); strcpy(info->version, "2.0"); } static u32 bigmac_get_link(struct net_device *dev) { struct bigmac *bp = netdev_priv(dev); spin_lock_irq(&bp->lock); bp->sw_bmsr = bigmac_tcvr_read(bp, bp->tregs, BIGMAC_BMSR); spin_unlock_irq(&bp->lock); return (bp->sw_bmsr & BMSR_LSTATUS); } static const struct ethtool_ops bigmac_ethtool_ops = { .get_drvinfo = bigmac_get_drvinfo, .get_link = bigmac_get_link, }; static const struct net_device_ops bigmac_ops = { .ndo_open = bigmac_open, .ndo_stop = bigmac_close, .ndo_start_xmit = bigmac_start_xmit, .ndo_get_stats = bigmac_get_stats, .ndo_set_multicast_list = bigmac_set_multicast, .ndo_tx_timeout = bigmac_tx_timeout, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; static int __devinit bigmac_ether_init(struct platform_device *op, struct platform_device *qec_op) { static int version_printed; struct net_device *dev; u8 bsizes, bsizes_more; struct bigmac *bp; int i; /* Get a new device struct for this interface. */ dev = alloc_etherdev(sizeof(struct bigmac)); if (!dev) return -ENOMEM; if (version_printed++ == 0) printk(KERN_INFO "%s", version); for (i = 0; i < 6; i++) dev->dev_addr[i] = idprom->id_ethaddr[i]; /* Setup softc, with backpointers to QEC and BigMAC SBUS device structs. */ bp = netdev_priv(dev); bp->qec_op = qec_op; bp->bigmac_op = op; SET_NETDEV_DEV(dev, &op->dev); spin_lock_init(&bp->lock); /* Map in QEC global control registers. */ bp->gregs = of_ioremap(&qec_op->resource[0], 0, GLOB_REG_SIZE, "BigMAC QEC GLobal Regs"); if (!bp->gregs) { printk(KERN_ERR "BIGMAC: Cannot map QEC global registers.\n"); goto fail_and_cleanup; } /* Make sure QEC is in BigMAC mode. */ if ((sbus_readl(bp->gregs + GLOB_CTRL) & 0xf0000000) != GLOB_CTRL_BMODE) { printk(KERN_ERR "BigMAC: AIEEE, QEC is not in BigMAC mode!\n"); goto fail_and_cleanup; } /* Reset the QEC. */ if (qec_global_reset(bp->gregs)) goto fail_and_cleanup; /* Get supported SBUS burst sizes. */ bsizes = of_getintprop_default(qec_op->dev.of_node, "burst-sizes", 0xff); bsizes_more = of_getintprop_default(qec_op->dev.of_node, "burst-sizes", 0xff); bsizes &= 0xff; if (bsizes_more != 0xff) bsizes &= bsizes_more; if (bsizes == 0xff || (bsizes & DMA_BURST16) == 0 || (bsizes & DMA_BURST32) == 0) bsizes = (DMA_BURST32 - 1); bp->bigmac_bursts = bsizes; /* Perform QEC initialization. */ qec_init(bp); /* Map in the BigMAC channel registers. */ bp->creg = of_ioremap(&op->resource[0], 0, CREG_REG_SIZE, "BigMAC QEC Channel Regs"); if (!bp->creg) { printk(KERN_ERR "BIGMAC: Cannot map QEC channel registers.\n"); goto fail_and_cleanup; } /* Map in the BigMAC control registers. */ bp->bregs = of_ioremap(&op->resource[1], 0, BMAC_REG_SIZE, "BigMAC Primary Regs"); if (!bp->bregs) { printk(KERN_ERR "BIGMAC: Cannot map BigMAC primary registers.\n"); goto fail_and_cleanup; } /* Map in the BigMAC transceiver registers, this is how you poke at * the BigMAC's PHY. */ bp->tregs = of_ioremap(&op->resource[2], 0, TCVR_REG_SIZE, "BigMAC Transceiver Regs"); if (!bp->tregs) { printk(KERN_ERR "BIGMAC: Cannot map BigMAC transceiver registers.\n"); goto fail_and_cleanup; } /* Stop the BigMAC. */ bigmac_stop(bp); /* Allocate transmit/receive descriptor DVMA block. */ bp->bmac_block = dma_alloc_coherent(&bp->bigmac_op->dev, PAGE_SIZE, &bp->bblock_dvma, GFP_ATOMIC); if (bp->bmac_block == NULL || bp->bblock_dvma == 0) { printk(KERN_ERR "BIGMAC: Cannot allocate consistent DMA.\n"); goto fail_and_cleanup; } /* Get the board revision of this BigMAC. */ bp->board_rev = of_getintprop_default(bp->bigmac_op->dev.of_node, "board-version", 1); /* Init auto-negotiation timer state. */ init_timer(&bp->bigmac_timer); bp->timer_state = asleep; bp->timer_ticks = 0; /* Backlink to generic net device struct. */ bp->dev = dev; /* Set links to our BigMAC open and close routines. */ dev->ethtool_ops = &bigmac_ethtool_ops; dev->netdev_ops = &bigmac_ops; dev->watchdog_timeo = 5*HZ; /* Finish net device registration. */ dev->irq = bp->bigmac_op->archdata.irqs[0]; dev->dma = 0; if (register_netdev(dev)) { printk(KERN_ERR "BIGMAC: Cannot register device.\n"); goto fail_and_cleanup; } dev_set_drvdata(&bp->bigmac_op->dev, bp); printk(KERN_INFO "%s: BigMAC 100baseT Ethernet %pM\n", dev->name, dev->dev_addr); return 0; fail_and_cleanup: /* Something went wrong, undo whatever we did so far. */ /* Free register mappings if any. */ if (bp->gregs) of_iounmap(&qec_op->resource[0], bp->gregs, GLOB_REG_SIZE); if (bp->creg) of_iounmap(&op->resource[0], bp->creg, CREG_REG_SIZE); if (bp->bregs) of_iounmap(&op->resource[1], bp->bregs, BMAC_REG_SIZE); if (bp->tregs) of_iounmap(&op->resource[2], bp->tregs, TCVR_REG_SIZE); if (bp->bmac_block) dma_free_coherent(&bp->bigmac_op->dev, PAGE_SIZE, bp->bmac_block, bp->bblock_dvma); /* This also frees the co-located private data */ free_netdev(dev); return -ENODEV; } /* QEC can be the parent of either QuadEthernet or a BigMAC. We want * the latter. */ static int __devinit bigmac_sbus_probe(struct platform_device *op, const struct of_device_id *match) { struct device *parent = op->dev.parent; struct platform_device *qec_op; qec_op = to_platform_device(parent); return bigmac_ether_init(op, qec_op); } static int __devexit bigmac_sbus_remove(struct platform_device *op) { struct bigmac *bp = dev_get_drvdata(&op->dev); struct device *parent = op->dev.parent; struct net_device *net_dev = bp->dev; struct platform_device *qec_op; qec_op = to_platform_device(parent); unregister_netdev(net_dev); of_iounmap(&qec_op->resource[0], bp->gregs, GLOB_REG_SIZE); of_iounmap(&op->resource[0], bp->creg, CREG_REG_SIZE); of_iounmap(&op->resource[1], bp->bregs, BMAC_REG_SIZE); of_iounmap(&op->resource[2], bp->tregs, TCVR_REG_SIZE); dma_free_coherent(&op->dev, PAGE_SIZE, bp->bmac_block, bp->bblock_dvma); free_netdev(net_dev); dev_set_drvdata(&op->dev, NULL); return 0; } static const struct of_device_id bigmac_sbus_match[] = { { .name = "be", }, {}, }; MODULE_DEVICE_TABLE(of, bigmac_sbus_match); static struct of_platform_driver bigmac_sbus_driver = { .driver = { .name = "sunbmac", .owner = THIS_MODULE, .of_match_table = bigmac_sbus_match, }, .probe = bigmac_sbus_probe, .remove = __devexit_p(bigmac_sbus_remove), }; static int __init bigmac_init(void) { return of_register_platform_driver(&bigmac_sbus_driver); } static void __exit bigmac_exit(void) { of_unregister_platform_driver(&bigmac_sbus_driver); } module_init(bigmac_init); module_exit(bigmac_exit);
gpl-2.0
MrApocalypse/Immortality_kernel
arch/mips/sibyte/sb1250/setup.c
675
5817
/* * Copyright (C) 2000, 2001, 2002, 2003 Broadcom Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/reboot.h> #include <linux/string.h> #include <asm/bootinfo.h> #include <asm/mipsregs.h> #include <asm/io.h> #include <asm/sibyte/sb1250.h> #include <asm/sibyte/sb1250_regs.h> #include <asm/sibyte/sb1250_scd.h> unsigned int sb1_pass; unsigned int soc_pass; unsigned int soc_type; EXPORT_SYMBOL(soc_type); unsigned int periph_rev; unsigned int zbbus_mhz; EXPORT_SYMBOL(zbbus_mhz); static char *soc_str; static char *pass_str; static unsigned int war_pass; /* XXXKW don't overload PASS defines? */ static int __init setup_bcm1250(void) { int ret = 0; switch (soc_pass) { case K_SYS_REVISION_BCM1250_PASS1: periph_rev = 1; pass_str = "Pass 1"; break; case K_SYS_REVISION_BCM1250_A10: periph_rev = 2; pass_str = "A8/A10"; /* XXXKW different war_pass? */ war_pass = K_SYS_REVISION_BCM1250_PASS2; break; case K_SYS_REVISION_BCM1250_PASS2_2: periph_rev = 2; pass_str = "B1"; break; case K_SYS_REVISION_BCM1250_B2: periph_rev = 2; pass_str = "B2"; war_pass = K_SYS_REVISION_BCM1250_PASS2_2; break; case K_SYS_REVISION_BCM1250_PASS3: periph_rev = 3; pass_str = "C0"; break; case K_SYS_REVISION_BCM1250_C1: periph_rev = 3; pass_str = "C1"; break; default: if (soc_pass < K_SYS_REVISION_BCM1250_PASS2_2) { periph_rev = 2; pass_str = "A0-A6"; war_pass = K_SYS_REVISION_BCM1250_PASS2; } else { printk("Unknown BCM1250 rev %x\n", soc_pass); ret = 1; } break; } return ret; } static int __init setup_bcm112x(void) { int ret = 0; switch (soc_pass) { case 0: /* Early build didn't have revid set */ periph_rev = 3; pass_str = "A1"; war_pass = K_SYS_REVISION_BCM112x_A1; break; case K_SYS_REVISION_BCM112x_A1: periph_rev = 3; pass_str = "A1"; break; case K_SYS_REVISION_BCM112x_A2: periph_rev = 3; pass_str = "A2"; break; case K_SYS_REVISION_BCM112x_A3: periph_rev = 3; pass_str = "A3"; break; case K_SYS_REVISION_BCM112x_A4: periph_rev = 3; pass_str = "A4"; break; case K_SYS_REVISION_BCM112x_B0: periph_rev = 3; pass_str = "B0"; break; default: printk("Unknown %s rev %x\n", soc_str, soc_pass); ret = 1; } return ret; } /* Setup code likely to be common to all SiByte platforms */ static int __init sys_rev_decode(void) { int ret = 0; war_pass = soc_pass; switch (soc_type) { case K_SYS_SOC_TYPE_BCM1250: case K_SYS_SOC_TYPE_BCM1250_ALT: case K_SYS_SOC_TYPE_BCM1250_ALT2: soc_str = "BCM1250"; ret = setup_bcm1250(); break; case K_SYS_SOC_TYPE_BCM1120: soc_str = "BCM1120"; ret = setup_bcm112x(); break; case K_SYS_SOC_TYPE_BCM1125: soc_str = "BCM1125"; ret = setup_bcm112x(); break; case K_SYS_SOC_TYPE_BCM1125H: soc_str = "BCM1125H"; ret = setup_bcm112x(); break; default: printk("Unknown SOC type %x\n", soc_type); ret = 1; break; } return ret; } void __init sb1250_setup(void) { uint64_t sys_rev; int plldiv; int bad_config = 0; sb1_pass = read_c0_prid() & 0xff; sys_rev = __raw_readq(IOADDR(A_SCD_SYSTEM_REVISION)); soc_type = SYS_SOC_TYPE(sys_rev); soc_pass = G_SYS_REVISION(sys_rev); if (sys_rev_decode()) { printk("Restart after failure to identify SiByte chip\n"); machine_restart(NULL); } plldiv = G_SYS_PLL_DIV(__raw_readq(IOADDR(A_SCD_SYSTEM_CFG))); zbbus_mhz = ((plldiv >> 1) * 50) + ((plldiv & 1) * 25); printk("Broadcom SiByte %s %s @ %d MHz (SB1 rev %d)\n", soc_str, pass_str, zbbus_mhz * 2, sb1_pass); printk("Board type: %s\n", get_system_type()); switch (war_pass) { case K_SYS_REVISION_BCM1250_PASS1: #ifndef CONFIG_SB1_PASS_1_WORKAROUNDS printk("@@@@ This is a BCM1250 A0-A2 (Pass 1) board, " "and the kernel doesn't have the proper " "workarounds compiled in. @@@@\n"); bad_config = 1; #endif break; case K_SYS_REVISION_BCM1250_PASS2: /* Pass 2 - easiest as default for now - so many numbers */ #if !defined(CONFIG_SB1_PASS_2_WORKAROUNDS) || \ !defined(CONFIG_SB1_PASS_2_1_WORKAROUNDS) printk("@@@@ This is a BCM1250 A3-A10 board, and the " "kernel doesn't have the proper workarounds " "compiled in. @@@@\n"); bad_config = 1; #endif #ifdef CONFIG_CPU_HAS_PREFETCH printk("@@@@ Prefetches may be enabled in this kernel, " "but are buggy on this board. @@@@\n"); bad_config = 1; #endif break; case K_SYS_REVISION_BCM1250_PASS2_2: #ifndef CONFIG_SB1_PASS_2_WORKAROUNDS printk("@@@@ This is a BCM1250 B1/B2. board, and the " "kernel doesn't have the proper workarounds " "compiled in. @@@@\n"); bad_config = 1; #endif #if defined(CONFIG_SB1_PASS_2_1_WORKAROUNDS) || \ !defined(CONFIG_CPU_HAS_PREFETCH) printk("@@@@ This is a BCM1250 B1/B2, but the kernel is " "conservatively configured for an 'A' stepping. " "@@@@\n"); #endif break; default: break; } if (bad_config) { printk("Invalid configuration for this chip.\n"); machine_restart(NULL); } }
gpl-2.0
ISTweak/android_kernel_nec_msm7x30
drivers/uio/uio.c
931
20300
/* * drivers/uio/uio.c * * Copyright(C) 2005, Benedikt Spranger <b.spranger@linutronix.de> * Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de> * Copyright(C) 2006, Hans J. Koch <hjk@linutronix.de> * Copyright(C) 2006, Greg Kroah-Hartman <greg@kroah.com> * * Userspace IO * * Base Functions * * Licensed under the GPLv2 only. */ #include <linux/module.h> #include <linux/init.h> #include <linux/poll.h> #include <linux/device.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/idr.h> #include <linux/sched.h> #include <linux/string.h> #include <linux/kobject.h> #include <linux/uio_driver.h> #define UIO_MAX_DEVICES 255 struct uio_device { struct module *owner; struct device *dev; int minor; atomic_t event; struct fasync_struct *async_queue; wait_queue_head_t wait; int vma_count; struct uio_info *info; struct kobject *map_dir; struct kobject *portio_dir; }; static int uio_major; static DEFINE_IDR(uio_idr); static const struct file_operations uio_fops; /* UIO class infrastructure */ static struct uio_class { struct kref kref; struct class *class; } *uio_class; /* Protect idr accesses */ static DEFINE_MUTEX(minor_lock); /* * attributes */ struct uio_map { struct kobject kobj; struct uio_mem *mem; }; #define to_map(map) container_of(map, struct uio_map, kobj) static ssize_t map_name_show(struct uio_mem *mem, char *buf) { if (unlikely(!mem->name)) mem->name = ""; return sprintf(buf, "%s\n", mem->name); } static ssize_t map_addr_show(struct uio_mem *mem, char *buf) { return sprintf(buf, "0x%lx\n", mem->addr); } static ssize_t map_size_show(struct uio_mem *mem, char *buf) { return sprintf(buf, "0x%lx\n", mem->size); } static ssize_t map_offset_show(struct uio_mem *mem, char *buf) { return sprintf(buf, "0x%lx\n", mem->addr & ~PAGE_MASK); } struct map_sysfs_entry { struct attribute attr; ssize_t (*show)(struct uio_mem *, char *); ssize_t (*store)(struct uio_mem *, const char *, size_t); }; static struct map_sysfs_entry name_attribute = __ATTR(name, S_IRUGO, map_name_show, NULL); static struct map_sysfs_entry addr_attribute = __ATTR(addr, S_IRUGO, map_addr_show, NULL); static struct map_sysfs_entry size_attribute = __ATTR(size, S_IRUGO, map_size_show, NULL); static struct map_sysfs_entry offset_attribute = __ATTR(offset, S_IRUGO, map_offset_show, NULL); static struct attribute *attrs[] = { &name_attribute.attr, &addr_attribute.attr, &size_attribute.attr, &offset_attribute.attr, NULL, /* need to NULL terminate the list of attributes */ }; static void map_release(struct kobject *kobj) { struct uio_map *map = to_map(kobj); kfree(map); } static ssize_t map_type_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct uio_map *map = to_map(kobj); struct uio_mem *mem = map->mem; struct map_sysfs_entry *entry; entry = container_of(attr, struct map_sysfs_entry, attr); if (!entry->show) return -EIO; return entry->show(mem, buf); } static const struct sysfs_ops map_sysfs_ops = { .show = map_type_show, }; static struct kobj_type map_attr_type = { .release = map_release, .sysfs_ops = &map_sysfs_ops, .default_attrs = attrs, }; struct uio_portio { struct kobject kobj; struct uio_port *port; }; #define to_portio(portio) container_of(portio, struct uio_portio, kobj) static ssize_t portio_name_show(struct uio_port *port, char *buf) { if (unlikely(!port->name)) port->name = ""; return sprintf(buf, "%s\n", port->name); } static ssize_t portio_start_show(struct uio_port *port, char *buf) { return sprintf(buf, "0x%lx\n", port->start); } static ssize_t portio_size_show(struct uio_port *port, char *buf) { return sprintf(buf, "0x%lx\n", port->size); } static ssize_t portio_porttype_show(struct uio_port *port, char *buf) { const char *porttypes[] = {"none", "x86", "gpio", "other"}; if ((port->porttype < 0) || (port->porttype > UIO_PORT_OTHER)) return -EINVAL; return sprintf(buf, "port_%s\n", porttypes[port->porttype]); } struct portio_sysfs_entry { struct attribute attr; ssize_t (*show)(struct uio_port *, char *); ssize_t (*store)(struct uio_port *, const char *, size_t); }; static struct portio_sysfs_entry portio_name_attribute = __ATTR(name, S_IRUGO, portio_name_show, NULL); static struct portio_sysfs_entry portio_start_attribute = __ATTR(start, S_IRUGO, portio_start_show, NULL); static struct portio_sysfs_entry portio_size_attribute = __ATTR(size, S_IRUGO, portio_size_show, NULL); static struct portio_sysfs_entry portio_porttype_attribute = __ATTR(porttype, S_IRUGO, portio_porttype_show, NULL); static struct attribute *portio_attrs[] = { &portio_name_attribute.attr, &portio_start_attribute.attr, &portio_size_attribute.attr, &portio_porttype_attribute.attr, NULL, }; static void portio_release(struct kobject *kobj) { struct uio_portio *portio = to_portio(kobj); kfree(portio); } static ssize_t portio_type_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct uio_portio *portio = to_portio(kobj); struct uio_port *port = portio->port; struct portio_sysfs_entry *entry; entry = container_of(attr, struct portio_sysfs_entry, attr); if (!entry->show) return -EIO; return entry->show(port, buf); } static const struct sysfs_ops portio_sysfs_ops = { .show = portio_type_show, }; static struct kobj_type portio_attr_type = { .release = portio_release, .sysfs_ops = &portio_sysfs_ops, .default_attrs = portio_attrs, }; static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf) { struct uio_device *idev = dev_get_drvdata(dev); if (idev) return sprintf(buf, "%s\n", idev->info->name); else return -ENODEV; } static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); static ssize_t show_version(struct device *dev, struct device_attribute *attr, char *buf) { struct uio_device *idev = dev_get_drvdata(dev); if (idev) return sprintf(buf, "%s\n", idev->info->version); else return -ENODEV; } static DEVICE_ATTR(version, S_IRUGO, show_version, NULL); static ssize_t show_event(struct device *dev, struct device_attribute *attr, char *buf) { struct uio_device *idev = dev_get_drvdata(dev); if (idev) return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event)); else return -ENODEV; } static DEVICE_ATTR(event, S_IRUGO, show_event, NULL); static struct attribute *uio_attrs[] = { &dev_attr_name.attr, &dev_attr_version.attr, &dev_attr_event.attr, NULL, }; static struct attribute_group uio_attr_grp = { .attrs = uio_attrs, }; /* * device functions */ static int uio_dev_add_attributes(struct uio_device *idev) { int ret; int mi, pi; int map_found = 0; int portio_found = 0; struct uio_mem *mem; struct uio_map *map; struct uio_port *port; struct uio_portio *portio; ret = sysfs_create_group(&idev->dev->kobj, &uio_attr_grp); if (ret) goto err_group; for (mi = 0; mi < MAX_UIO_MAPS; mi++) { mem = &idev->info->mem[mi]; if (mem->size == 0) break; if (!map_found) { map_found = 1; idev->map_dir = kobject_create_and_add("maps", &idev->dev->kobj); if (!idev->map_dir) goto err_map; } map = kzalloc(sizeof(*map), GFP_KERNEL); if (!map) goto err_map; kobject_init(&map->kobj, &map_attr_type); map->mem = mem; mem->map = map; ret = kobject_add(&map->kobj, idev->map_dir, "map%d", mi); if (ret) goto err_map; ret = kobject_uevent(&map->kobj, KOBJ_ADD); if (ret) goto err_map; } for (pi = 0; pi < MAX_UIO_PORT_REGIONS; pi++) { port = &idev->info->port[pi]; if (port->size == 0) break; if (!portio_found) { portio_found = 1; idev->portio_dir = kobject_create_and_add("portio", &idev->dev->kobj); if (!idev->portio_dir) goto err_portio; } portio = kzalloc(sizeof(*portio), GFP_KERNEL); if (!portio) goto err_portio; kobject_init(&portio->kobj, &portio_attr_type); portio->port = port; port->portio = portio; ret = kobject_add(&portio->kobj, idev->portio_dir, "port%d", pi); if (ret) goto err_portio; ret = kobject_uevent(&portio->kobj, KOBJ_ADD); if (ret) goto err_portio; } return 0; err_portio: for (pi--; pi >= 0; pi--) { port = &idev->info->port[pi]; portio = port->portio; kobject_put(&portio->kobj); } kobject_put(idev->portio_dir); err_map: for (mi--; mi>=0; mi--) { mem = &idev->info->mem[mi]; map = mem->map; kobject_put(&map->kobj); } kobject_put(idev->map_dir); sysfs_remove_group(&idev->dev->kobj, &uio_attr_grp); err_group: dev_err(idev->dev, "error creating sysfs files (%d)\n", ret); return ret; } static void uio_dev_del_attributes(struct uio_device *idev) { int i; struct uio_mem *mem; struct uio_port *port; for (i = 0; i < MAX_UIO_MAPS; i++) { mem = &idev->info->mem[i]; if (mem->size == 0) break; kobject_put(&mem->map->kobj); } kobject_put(idev->map_dir); for (i = 0; i < MAX_UIO_PORT_REGIONS; i++) { port = &idev->info->port[i]; if (port->size == 0) break; kobject_put(&port->portio->kobj); } kobject_put(idev->portio_dir); sysfs_remove_group(&idev->dev->kobj, &uio_attr_grp); } static int uio_get_minor(struct uio_device *idev) { int retval = -ENOMEM; int id; mutex_lock(&minor_lock); if (idr_pre_get(&uio_idr, GFP_KERNEL) == 0) goto exit; retval = idr_get_new(&uio_idr, idev, &id); if (retval < 0) { if (retval == -EAGAIN) retval = -ENOMEM; goto exit; } idev->minor = id & MAX_ID_MASK; exit: mutex_unlock(&minor_lock); return retval; } static void uio_free_minor(struct uio_device *idev) { mutex_lock(&minor_lock); idr_remove(&uio_idr, idev->minor); mutex_unlock(&minor_lock); } /** * uio_event_notify - trigger an interrupt event * @info: UIO device capabilities */ void uio_event_notify(struct uio_info *info) { struct uio_device *idev = info->uio_dev; atomic_inc(&idev->event); wake_up_interruptible(&idev->wait); kill_fasync(&idev->async_queue, SIGIO, POLL_IN); } EXPORT_SYMBOL_GPL(uio_event_notify); /** * uio_interrupt - hardware interrupt handler * @irq: IRQ number, can be UIO_IRQ_CYCLIC for cyclic timer * @dev_id: Pointer to the devices uio_device structure */ static irqreturn_t uio_interrupt(int irq, void *dev_id) { struct uio_device *idev = (struct uio_device *)dev_id; irqreturn_t ret = idev->info->handler(irq, idev->info); if (ret == IRQ_HANDLED) uio_event_notify(idev->info); return ret; } struct uio_listener { struct uio_device *dev; s32 event_count; }; static int uio_open(struct inode *inode, struct file *filep) { struct uio_device *idev; struct uio_listener *listener; int ret = 0; mutex_lock(&minor_lock); idev = idr_find(&uio_idr, iminor(inode)); mutex_unlock(&minor_lock); if (!idev) { ret = -ENODEV; goto out; } if (!try_module_get(idev->owner)) { ret = -ENODEV; goto out; } listener = kmalloc(sizeof(*listener), GFP_KERNEL); if (!listener) { ret = -ENOMEM; goto err_alloc_listener; } listener->dev = idev; listener->event_count = atomic_read(&idev->event); filep->private_data = listener; if (idev->info->open) { ret = idev->info->open(idev->info, inode); if (ret) goto err_infoopen; } return 0; err_infoopen: kfree(listener); err_alloc_listener: module_put(idev->owner); out: return ret; } static int uio_fasync(int fd, struct file *filep, int on) { struct uio_listener *listener = filep->private_data; struct uio_device *idev = listener->dev; return fasync_helper(fd, filep, on, &idev->async_queue); } static int uio_release(struct inode *inode, struct file *filep) { int ret = 0; struct uio_listener *listener = filep->private_data; struct uio_device *idev = listener->dev; if (idev->info->release) ret = idev->info->release(idev->info, inode); module_put(idev->owner); kfree(listener); return ret; } static unsigned int uio_poll(struct file *filep, poll_table *wait) { struct uio_listener *listener = filep->private_data; struct uio_device *idev = listener->dev; if (idev->info->irq == UIO_IRQ_NONE) return -EIO; poll_wait(filep, &idev->wait, wait); if (listener->event_count != atomic_read(&idev->event)) return POLLIN | POLLRDNORM; return 0; } static ssize_t uio_read(struct file *filep, char __user *buf, size_t count, loff_t *ppos) { struct uio_listener *listener = filep->private_data; struct uio_device *idev = listener->dev; DECLARE_WAITQUEUE(wait, current); ssize_t retval; s32 event_count; if (idev->info->irq == UIO_IRQ_NONE) return -EIO; if (count != sizeof(s32)) return -EINVAL; add_wait_queue(&idev->wait, &wait); do { set_current_state(TASK_INTERRUPTIBLE); event_count = atomic_read(&idev->event); if (event_count != listener->event_count) { if (copy_to_user(buf, &event_count, count)) retval = -EFAULT; else { listener->event_count = event_count; retval = count; } break; } if (filep->f_flags & O_NONBLOCK) { retval = -EAGAIN; break; } if (signal_pending(current)) { retval = -ERESTARTSYS; break; } schedule(); } while (1); __set_current_state(TASK_RUNNING); remove_wait_queue(&idev->wait, &wait); return retval; } static ssize_t uio_write(struct file *filep, const char __user *buf, size_t count, loff_t *ppos) { struct uio_listener *listener = filep->private_data; struct uio_device *idev = listener->dev; ssize_t retval; s32 irq_on; if (idev->info->irq == UIO_IRQ_NONE) return -EIO; if (count != sizeof(s32)) return -EINVAL; if (!idev->info->irqcontrol) return -ENOSYS; if (copy_from_user(&irq_on, buf, count)) return -EFAULT; retval = idev->info->irqcontrol(idev->info, irq_on); return retval ? retval : sizeof(s32); } static int uio_find_mem_index(struct vm_area_struct *vma) { int mi; struct uio_device *idev = vma->vm_private_data; for (mi = 0; mi < MAX_UIO_MAPS; mi++) { if (idev->info->mem[mi].size == 0) return -1; if (vma->vm_pgoff == mi) return mi; } return -1; } static void uio_vma_open(struct vm_area_struct *vma) { struct uio_device *idev = vma->vm_private_data; idev->vma_count++; } static void uio_vma_close(struct vm_area_struct *vma) { struct uio_device *idev = vma->vm_private_data; idev->vma_count--; } static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct uio_device *idev = vma->vm_private_data; struct page *page; unsigned long offset; int mi = uio_find_mem_index(vma); if (mi < 0) return VM_FAULT_SIGBUS; /* * We need to subtract mi because userspace uses offset = N*PAGE_SIZE * to use mem[N]. */ offset = (vmf->pgoff - mi) << PAGE_SHIFT; if (idev->info->mem[mi].memtype == UIO_MEM_LOGICAL) page = virt_to_page(idev->info->mem[mi].addr + offset); else page = vmalloc_to_page((void *)idev->info->mem[mi].addr + offset); get_page(page); vmf->page = page; return 0; } static const struct vm_operations_struct uio_vm_ops = { .open = uio_vma_open, .close = uio_vma_close, .fault = uio_vma_fault, }; static int uio_mmap_physical(struct vm_area_struct *vma) { struct uio_device *idev = vma->vm_private_data; int mi = uio_find_mem_index(vma); if (mi < 0) return -EINVAL; vma->vm_flags |= VM_IO | VM_RESERVED; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); return remap_pfn_range(vma, vma->vm_start, idev->info->mem[mi].addr >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot); } static int uio_mmap_logical(struct vm_area_struct *vma) { vma->vm_flags |= VM_RESERVED; vma->vm_ops = &uio_vm_ops; uio_vma_open(vma); return 0; } static int uio_mmap(struct file *filep, struct vm_area_struct *vma) { struct uio_listener *listener = filep->private_data; struct uio_device *idev = listener->dev; int mi; unsigned long requested_pages, actual_pages; int ret = 0; if (vma->vm_end < vma->vm_start) return -EINVAL; vma->vm_private_data = idev; mi = uio_find_mem_index(vma); if (mi < 0) return -EINVAL; requested_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; actual_pages = ((idev->info->mem[mi].addr & ~PAGE_MASK) + idev->info->mem[mi].size + PAGE_SIZE -1) >> PAGE_SHIFT; if (requested_pages > actual_pages) return -EINVAL; if (idev->info->mmap) { ret = idev->info->mmap(idev->info, vma); return ret; } switch (idev->info->mem[mi].memtype) { case UIO_MEM_PHYS: return uio_mmap_physical(vma); case UIO_MEM_LOGICAL: case UIO_MEM_VIRTUAL: return uio_mmap_logical(vma); default: return -EINVAL; } } static const struct file_operations uio_fops = { .owner = THIS_MODULE, .open = uio_open, .release = uio_release, .read = uio_read, .write = uio_write, .mmap = uio_mmap, .poll = uio_poll, .fasync = uio_fasync, }; static int uio_major_init(void) { uio_major = register_chrdev(0, "uio", &uio_fops); if (uio_major < 0) return uio_major; return 0; } static void uio_major_cleanup(void) { unregister_chrdev(uio_major, "uio"); } static int init_uio_class(void) { int ret = 0; if (uio_class != NULL) { kref_get(&uio_class->kref); goto exit; } /* This is the first time in here, set everything up properly */ ret = uio_major_init(); if (ret) goto exit; uio_class = kzalloc(sizeof(*uio_class), GFP_KERNEL); if (!uio_class) { ret = -ENOMEM; goto err_kzalloc; } kref_init(&uio_class->kref); uio_class->class = class_create(THIS_MODULE, "uio"); if (IS_ERR(uio_class->class)) { ret = IS_ERR(uio_class->class); printk(KERN_ERR "class_create failed for uio\n"); goto err_class_create; } return 0; err_class_create: kfree(uio_class); uio_class = NULL; err_kzalloc: uio_major_cleanup(); exit: return ret; } static void release_uio_class(struct kref *kref) { /* Ok, we cheat as we know we only have one uio_class */ class_destroy(uio_class->class); kfree(uio_class); uio_major_cleanup(); uio_class = NULL; } static void uio_class_destroy(void) { if (uio_class) kref_put(&uio_class->kref, release_uio_class); } /** * uio_register_device - register a new userspace IO device * @owner: module that creates the new device * @parent: parent device * @info: UIO device capabilities * * returns zero on success or a negative error code. */ int __uio_register_device(struct module *owner, struct device *parent, struct uio_info *info) { struct uio_device *idev; int ret = 0; if (!parent || !info || !info->name || !info->version) return -EINVAL; info->uio_dev = NULL; ret = init_uio_class(); if (ret) return ret; idev = kzalloc(sizeof(*idev), GFP_KERNEL); if (!idev) { ret = -ENOMEM; goto err_kzalloc; } idev->owner = owner; idev->info = info; init_waitqueue_head(&idev->wait); atomic_set(&idev->event, 0); ret = uio_get_minor(idev); if (ret) goto err_get_minor; idev->dev = device_create(uio_class->class, parent, MKDEV(uio_major, idev->minor), idev, "uio%d", idev->minor); if (IS_ERR(idev->dev)) { printk(KERN_ERR "UIO: device register failed\n"); ret = PTR_ERR(idev->dev); goto err_device_create; } ret = uio_dev_add_attributes(idev); if (ret) goto err_uio_dev_add_attributes; info->uio_dev = idev; if (idev->info->irq >= 0) { ret = request_irq(idev->info->irq, uio_interrupt, idev->info->irq_flags, idev->info->name, idev); if (ret) goto err_request_irq; } return 0; err_request_irq: uio_dev_del_attributes(idev); err_uio_dev_add_attributes: device_destroy(uio_class->class, MKDEV(uio_major, idev->minor)); err_device_create: uio_free_minor(idev); err_get_minor: kfree(idev); err_kzalloc: uio_class_destroy(); return ret; } EXPORT_SYMBOL_GPL(__uio_register_device); /** * uio_unregister_device - unregister a industrial IO device * @info: UIO device capabilities * */ void uio_unregister_device(struct uio_info *info) { struct uio_device *idev; if (!info || !info->uio_dev) return; idev = info->uio_dev; uio_free_minor(idev); if (info->irq >= 0) free_irq(info->irq, idev); uio_dev_del_attributes(idev); dev_set_drvdata(idev->dev, NULL); device_destroy(uio_class->class, MKDEV(uio_major, idev->minor)); kfree(idev); uio_class_destroy(); return; } EXPORT_SYMBOL_GPL(uio_unregister_device); static int __init uio_init(void) { return 0; } static void __exit uio_exit(void) { } module_init(uio_init) module_exit(uio_exit) MODULE_LICENSE("GPL v2");
gpl-2.0
Druboo666/android_kernel_asus_moorefield
scripts/mod/devicetable-offsets.c
1187
5216
#include <linux/kbuild.h> #include <linux/mod_devicetable.h> #define DEVID(devid) DEFINE(SIZE_##devid, sizeof(struct devid)) #define DEVID_FIELD(devid, field) \ DEFINE(OFF_##devid##_##field, offsetof(struct devid, field)) int main(void) { DEVID(usb_device_id); DEVID_FIELD(usb_device_id, match_flags); DEVID_FIELD(usb_device_id, idVendor); DEVID_FIELD(usb_device_id, idProduct); DEVID_FIELD(usb_device_id, bcdDevice_lo); DEVID_FIELD(usb_device_id, bcdDevice_hi); DEVID_FIELD(usb_device_id, bDeviceClass); DEVID_FIELD(usb_device_id, bDeviceSubClass); DEVID_FIELD(usb_device_id, bDeviceProtocol); DEVID_FIELD(usb_device_id, bInterfaceClass); DEVID_FIELD(usb_device_id, bInterfaceSubClass); DEVID_FIELD(usb_device_id, bInterfaceProtocol); DEVID_FIELD(usb_device_id, bInterfaceNumber); DEVID(hid_device_id); DEVID_FIELD(hid_device_id, bus); DEVID_FIELD(hid_device_id, group); DEVID_FIELD(hid_device_id, vendor); DEVID_FIELD(hid_device_id, product); DEVID(ieee1394_device_id); DEVID_FIELD(ieee1394_device_id, match_flags); DEVID_FIELD(ieee1394_device_id, vendor_id); DEVID_FIELD(ieee1394_device_id, model_id); DEVID_FIELD(ieee1394_device_id, specifier_id); DEVID_FIELD(ieee1394_device_id, version); DEVID(pci_device_id); DEVID_FIELD(pci_device_id, vendor); DEVID_FIELD(pci_device_id, device); DEVID_FIELD(pci_device_id, subvendor); DEVID_FIELD(pci_device_id, subdevice); DEVID_FIELD(pci_device_id, class); DEVID_FIELD(pci_device_id, class_mask); DEVID(ccw_device_id); DEVID_FIELD(ccw_device_id, match_flags); DEVID_FIELD(ccw_device_id, cu_type); DEVID_FIELD(ccw_device_id, cu_model); DEVID_FIELD(ccw_device_id, dev_type); DEVID_FIELD(ccw_device_id, dev_model); DEVID(ap_device_id); DEVID_FIELD(ap_device_id, dev_type); DEVID(css_device_id); DEVID_FIELD(css_device_id, type); DEVID(serio_device_id); DEVID_FIELD(serio_device_id, type); DEVID_FIELD(serio_device_id, proto); DEVID_FIELD(serio_device_id, id); DEVID_FIELD(serio_device_id, extra); DEVID(acpi_device_id); DEVID_FIELD(acpi_device_id, id); DEVID(pnp_device_id); DEVID_FIELD(pnp_device_id, id); DEVID(pnp_card_device_id); DEVID_FIELD(pnp_card_device_id, devs); DEVID(pcmcia_device_id); DEVID_FIELD(pcmcia_device_id, match_flags); DEVID_FIELD(pcmcia_device_id, manf_id); DEVID_FIELD(pcmcia_device_id, card_id); DEVID_FIELD(pcmcia_device_id, func_id); DEVID_FIELD(pcmcia_device_id, function); DEVID_FIELD(pcmcia_device_id, device_no); DEVID_FIELD(pcmcia_device_id, prod_id_hash); DEVID(of_device_id); DEVID_FIELD(of_device_id, name); DEVID_FIELD(of_device_id, type); DEVID_FIELD(of_device_id, compatible); DEVID(vio_device_id); DEVID_FIELD(vio_device_id, type); DEVID_FIELD(vio_device_id, compat); DEVID(input_device_id); DEVID_FIELD(input_device_id, flags); DEVID_FIELD(input_device_id, bustype); DEVID_FIELD(input_device_id, vendor); DEVID_FIELD(input_device_id, product); DEVID_FIELD(input_device_id, version); DEVID_FIELD(input_device_id, evbit); DEVID_FIELD(input_device_id, keybit); DEVID_FIELD(input_device_id, relbit); DEVID_FIELD(input_device_id, absbit); DEVID_FIELD(input_device_id, mscbit); DEVID_FIELD(input_device_id, ledbit); DEVID_FIELD(input_device_id, sndbit); DEVID_FIELD(input_device_id, ffbit); DEVID_FIELD(input_device_id, swbit); DEVID(eisa_device_id); DEVID_FIELD(eisa_device_id, sig); DEVID(parisc_device_id); DEVID_FIELD(parisc_device_id, hw_type); DEVID_FIELD(parisc_device_id, hversion); DEVID_FIELD(parisc_device_id, hversion_rev); DEVID_FIELD(parisc_device_id, sversion); DEVID(sdio_device_id); DEVID_FIELD(sdio_device_id, class); DEVID_FIELD(sdio_device_id, vendor); DEVID_FIELD(sdio_device_id, device); DEVID(ssb_device_id); DEVID_FIELD(ssb_device_id, vendor); DEVID_FIELD(ssb_device_id, coreid); DEVID_FIELD(ssb_device_id, revision); DEVID(bcma_device_id); DEVID_FIELD(bcma_device_id, manuf); DEVID_FIELD(bcma_device_id, id); DEVID_FIELD(bcma_device_id, rev); DEVID_FIELD(bcma_device_id, class); DEVID(virtio_device_id); DEVID_FIELD(virtio_device_id, device); DEVID_FIELD(virtio_device_id, vendor); DEVID(hv_vmbus_device_id); DEVID_FIELD(hv_vmbus_device_id, guid); DEVID(i2c_device_id); DEVID_FIELD(i2c_device_id, name); DEVID(spi_device_id); DEVID_FIELD(spi_device_id, name); DEVID(dmi_system_id); DEVID_FIELD(dmi_system_id, matches); DEVID(platform_device_id); DEVID_FIELD(platform_device_id, name); DEVID(mdio_device_id); DEVID_FIELD(mdio_device_id, phy_id); DEVID_FIELD(mdio_device_id, phy_id_mask); DEVID(zorro_device_id); DEVID_FIELD(zorro_device_id, id); DEVID(isapnp_device_id); DEVID_FIELD(isapnp_device_id, vendor); DEVID_FIELD(isapnp_device_id, function); DEVID(ipack_device_id); DEVID_FIELD(ipack_device_id, format); DEVID_FIELD(ipack_device_id, vendor); DEVID_FIELD(ipack_device_id, device); DEVID(amba_id); DEVID_FIELD(amba_id, id); DEVID_FIELD(amba_id, mask); DEVID(x86_cpu_id); DEVID_FIELD(x86_cpu_id, feature); DEVID_FIELD(x86_cpu_id, family); DEVID_FIELD(x86_cpu_id, model); DEVID_FIELD(x86_cpu_id, vendor); DEVID(cpu_feature); DEVID_FIELD(cpu_feature, feature); DEVID(mei_cl_device_id); DEVID_FIELD(mei_cl_device_id, name); return 0; }
gpl-2.0
chongzi865458/android4.04_kernel
arch/powerpc/sysdev/tsi108_pci.c
2979
11403
/* * Common routines for Tundra Semiconductor TSI108 host bridge. * * 2004-2005 (c) Tundra Semiconductor Corp. * Author: Alex Bounine (alexandreb@tundra.com) * Author: Roy Zang (tie-fei.zang@freescale.com) * Add pci interrupt router host * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 * Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <asm/byteorder.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/uaccess.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> #include <asm/tsi108.h> #include <asm/tsi108_pci.h> #include <asm/tsi108_irq.h> #include <asm/prom.h> #undef DEBUG #ifdef DEBUG #define DBG(x...) printk(x) #else #define DBG(x...) #endif #define tsi_mk_config_addr(bus, devfunc, offset) \ ((((bus)<<16) | ((devfunc)<<8) | (offset & 0xfc)) + tsi108_pci_cfg_base) u32 tsi108_pci_cfg_base; static u32 tsi108_pci_cfg_phys; u32 tsi108_csr_vir_base; static struct irq_host *pci_irq_host; extern u32 get_vir_csrbase(void); extern u32 tsi108_read_reg(u32 reg_offset); extern void tsi108_write_reg(u32 reg_offset, u32 val); int tsi108_direct_write_config(struct pci_bus *bus, unsigned int devfunc, int offset, int len, u32 val) { volatile unsigned char *cfg_addr; struct pci_controller *hose = pci_bus_to_host(bus); if (ppc_md.pci_exclude_device) if (ppc_md.pci_exclude_device(hose, bus->number, devfunc)) return PCIBIOS_DEVICE_NOT_FOUND; cfg_addr = (unsigned char *)(tsi_mk_config_addr(bus->number, devfunc, offset) | (offset & 0x03)); #ifdef DEBUG printk("PCI CFG write : "); printk("%d:0x%x:0x%x ", bus->number, devfunc, offset); printk("%d ADDR=0x%08x ", len, (uint) cfg_addr); printk("data = 0x%08x\n", val); #endif switch (len) { case 1: out_8((u8 *) cfg_addr, val); break; case 2: out_le16((u16 *) cfg_addr, val); break; default: out_le32((u32 *) cfg_addr, val); break; } return PCIBIOS_SUCCESSFUL; } void tsi108_clear_pci_error(u32 pci_cfg_base) { u32 err_stat, err_addr, pci_stat; /* * Quietly clear PB and PCI error flags set as result * of PCI/X configuration read requests. */ /* Read PB Error Log Registers */ err_stat = tsi108_read_reg(TSI108_PB_OFFSET + TSI108_PB_ERRCS); err_addr = tsi108_read_reg(TSI108_PB_OFFSET + TSI108_PB_AERR); if (err_stat & TSI108_PB_ERRCS_ES) { /* Clear error flag */ tsi108_write_reg(TSI108_PB_OFFSET + TSI108_PB_ERRCS, TSI108_PB_ERRCS_ES); /* Clear read error reported in PB_ISR */ tsi108_write_reg(TSI108_PB_OFFSET + TSI108_PB_ISR, TSI108_PB_ISR_PBS_RD_ERR); /* Clear PCI/X bus cfg errors if applicable */ if ((err_addr & 0xFF000000) == pci_cfg_base) { pci_stat = tsi108_read_reg(TSI108_PCI_OFFSET + TSI108_PCI_CSR); tsi108_write_reg(TSI108_PCI_OFFSET + TSI108_PCI_CSR, pci_stat); } } return; } #define __tsi108_read_pci_config(x, addr, op) \ __asm__ __volatile__( \ " "op" %0,0,%1\n" \ "1: eieio\n" \ "2:\n" \ ".section .fixup,\"ax\"\n" \ "3: li %0,-1\n" \ " b 2b\n" \ ".section __ex_table,\"a\"\n" \ " .align 2\n" \ " .long 1b,3b\n" \ ".text" \ : "=r"(x) : "r"(addr)) int tsi108_direct_read_config(struct pci_bus *bus, unsigned int devfn, int offset, int len, u32 * val) { volatile unsigned char *cfg_addr; struct pci_controller *hose = pci_bus_to_host(bus); u32 temp; if (ppc_md.pci_exclude_device) if (ppc_md.pci_exclude_device(hose, bus->number, devfn)) return PCIBIOS_DEVICE_NOT_FOUND; cfg_addr = (unsigned char *)(tsi_mk_config_addr(bus->number, devfn, offset) | (offset & 0x03)); switch (len) { case 1: __tsi108_read_pci_config(temp, cfg_addr, "lbzx"); break; case 2: __tsi108_read_pci_config(temp, cfg_addr, "lhbrx"); break; default: __tsi108_read_pci_config(temp, cfg_addr, "lwbrx"); break; } *val = temp; #ifdef DEBUG if ((0xFFFFFFFF != temp) && (0xFFFF != temp) && (0xFF != temp)) { printk("PCI CFG read : "); printk("%d:0x%x:0x%x ", bus->number, devfn, offset); printk("%d ADDR=0x%08x ", len, (uint) cfg_addr); printk("data = 0x%x\n", *val); } #endif return PCIBIOS_SUCCESSFUL; } void tsi108_clear_pci_cfg_error(void) { tsi108_clear_pci_error(tsi108_pci_cfg_phys); } static struct pci_ops tsi108_direct_pci_ops = { .read = tsi108_direct_read_config, .write = tsi108_direct_write_config, }; int __init tsi108_setup_pci(struct device_node *dev, u32 cfg_phys, int primary) { int len; struct pci_controller *hose; struct resource rsrc; const int *bus_range; int has_address = 0; /* PCI Config mapping */ tsi108_pci_cfg_base = (u32)ioremap(cfg_phys, TSI108_PCI_CFG_SIZE); tsi108_pci_cfg_phys = cfg_phys; DBG("TSI_PCI: %s tsi108_pci_cfg_base=0x%x\n", __func__, tsi108_pci_cfg_base); /* Fetch host bridge registers address */ has_address = (of_address_to_resource(dev, 0, &rsrc) == 0); /* Get bus range if any */ bus_range = of_get_property(dev, "bus-range", &len); if (bus_range == NULL || len < 2 * sizeof(int)) { printk(KERN_WARNING "Can't get bus-range for %s, assume" " bus 0\n", dev->full_name); } hose = pcibios_alloc_controller(dev); if (!hose) { printk("PCI Host bridge init failed\n"); return -ENOMEM; } hose->first_busno = bus_range ? bus_range[0] : 0; hose->last_busno = bus_range ? bus_range[1] : 0xff; (hose)->ops = &tsi108_direct_pci_ops; printk(KERN_INFO "Found tsi108 PCI host bridge at 0x%08x. " "Firmware bus number: %d->%d\n", rsrc.start, hose->first_busno, hose->last_busno); /* Interpret the "ranges" property */ /* This also maps the I/O region and sets isa_io/mem_base */ pci_process_bridge_OF_ranges(hose, dev, primary); return 0; } /* * Low level utility functions */ static void tsi108_pci_int_mask(u_int irq) { u_int irp_cfg; int int_line = (irq - IRQ_PCI_INTAD_BASE); irp_cfg = tsi108_read_reg(TSI108_PCI_OFFSET + TSI108_PCI_IRP_CFG_CTL); mb(); irp_cfg |= (1 << int_line); /* INTx_DIR = output */ irp_cfg &= ~(3 << (8 + (int_line * 2))); /* INTx_TYPE = unused */ tsi108_write_reg(TSI108_PCI_OFFSET + TSI108_PCI_IRP_CFG_CTL, irp_cfg); mb(); irp_cfg = tsi108_read_reg(TSI108_PCI_OFFSET + TSI108_PCI_IRP_CFG_CTL); } static void tsi108_pci_int_unmask(u_int irq) { u_int irp_cfg; int int_line = (irq - IRQ_PCI_INTAD_BASE); irp_cfg = tsi108_read_reg(TSI108_PCI_OFFSET + TSI108_PCI_IRP_CFG_CTL); mb(); irp_cfg &= ~(1 << int_line); irp_cfg |= (3 << (8 + (int_line * 2))); tsi108_write_reg(TSI108_PCI_OFFSET + TSI108_PCI_IRP_CFG_CTL, irp_cfg); mb(); } static void init_pci_source(void) { tsi108_write_reg(TSI108_PCI_OFFSET + TSI108_PCI_IRP_CFG_CTL, 0x0000ff00); tsi108_write_reg(TSI108_PCI_OFFSET + TSI108_PCI_IRP_ENABLE, TSI108_PCI_IRP_ENABLE_P_INT); mb(); } static inline unsigned int get_pci_source(void) { u_int temp = 0; int irq = -1; int i; u_int pci_irp_stat; static int mask = 0; /* Read PCI/X block interrupt status register */ pci_irp_stat = tsi108_read_reg(TSI108_PCI_OFFSET + TSI108_PCI_IRP_STAT); mb(); if (pci_irp_stat & TSI108_PCI_IRP_STAT_P_INT) { /* Process Interrupt from PCI bus INTA# - INTD# lines */ temp = tsi108_read_reg(TSI108_PCI_OFFSET + TSI108_PCI_IRP_INTAD) & 0xf; mb(); for (i = 0; i < 4; i++, mask++) { if (temp & (1 << mask % 4)) { irq = IRQ_PCI_INTA + mask % 4; mask++; break; } } /* Disable interrupts from PCI block */ temp = tsi108_read_reg(TSI108_PCI_OFFSET + TSI108_PCI_IRP_ENABLE); tsi108_write_reg(TSI108_PCI_OFFSET + TSI108_PCI_IRP_ENABLE, temp & ~TSI108_PCI_IRP_ENABLE_P_INT); mb(); (void)tsi108_read_reg(TSI108_PCI_OFFSET + TSI108_PCI_IRP_ENABLE); mb(); } #ifdef DEBUG else { printk("TSI108_PIC: error in TSI108_PCI_IRP_STAT\n"); pci_irp_stat = tsi108_read_reg(TSI108_PCI_OFFSET + TSI108_PCI_IRP_STAT); temp = tsi108_read_reg(TSI108_PCI_OFFSET + TSI108_PCI_IRP_INTAD); mb(); printk(">> stat=0x%08x intad=0x%08x ", pci_irp_stat, temp); temp = tsi108_read_reg(TSI108_PCI_OFFSET + TSI108_PCI_IRP_CFG_CTL); mb(); printk("cfg_ctl=0x%08x ", temp); temp = tsi108_read_reg(TSI108_PCI_OFFSET + TSI108_PCI_IRP_ENABLE); mb(); printk("irp_enable=0x%08x\n", temp); } #endif /* end of DEBUG */ return irq; } /* * Linux descriptor level callbacks */ static void tsi108_pci_irq_unmask(struct irq_data *d) { tsi108_pci_int_unmask(d->irq); /* Enable interrupts from PCI block */ tsi108_write_reg(TSI108_PCI_OFFSET + TSI108_PCI_IRP_ENABLE, tsi108_read_reg(TSI108_PCI_OFFSET + TSI108_PCI_IRP_ENABLE) | TSI108_PCI_IRP_ENABLE_P_INT); mb(); } static void tsi108_pci_irq_mask(struct irq_data *d) { tsi108_pci_int_mask(d->irq); } static void tsi108_pci_irq_ack(struct irq_data *d) { tsi108_pci_int_mask(d->irq); } /* * Interrupt controller descriptor for cascaded PCI interrupt controller. */ static struct irq_chip tsi108_pci_irq = { .name = "tsi108_PCI_int", .irq_mask = tsi108_pci_irq_mask, .irq_ack = tsi108_pci_irq_ack, .irq_unmask = tsi108_pci_irq_unmask, }; static int pci_irq_host_xlate(struct irq_host *h, struct device_node *ct, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_flags) { *out_hwirq = intspec[0]; *out_flags = IRQ_TYPE_LEVEL_HIGH; return 0; } static int pci_irq_host_map(struct irq_host *h, unsigned int virq, irq_hw_number_t hw) { unsigned int irq; DBG("%s(%d, 0x%lx)\n", __func__, virq, hw); if ((virq >= 1) && (virq <= 4)){ irq = virq + IRQ_PCI_INTAD_BASE - 1; irq_set_status_flags(irq, IRQ_LEVEL); irq_set_chip(irq, &tsi108_pci_irq); } return 0; } static struct irq_host_ops pci_irq_host_ops = { .map = pci_irq_host_map, .xlate = pci_irq_host_xlate, }; /* * Exported functions */ /* * The Tsi108 PCI interrupts initialization routine. * * The INTA# - INTD# interrupts on the PCI bus are reported by the PCI block * to the MPIC using single interrupt source (IRQ_TSI108_PCI). Therefore the * PCI block has to be treated as a cascaded interrupt controller connected * to the MPIC. */ void __init tsi108_pci_int_init(struct device_node *node) { DBG("Tsi108_pci_int_init: initializing PCI interrupts\n"); pci_irq_host = irq_alloc_host(node, IRQ_HOST_MAP_LEGACY, 0, &pci_irq_host_ops, 0); if (pci_irq_host == NULL) { printk(KERN_ERR "pci_irq_host: failed to allocate irq host !\n"); return; } init_pci_source(); } void tsi108_irq_cascade(unsigned int irq, struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); unsigned int cascade_irq = get_pci_source(); if (cascade_irq != NO_IRQ) generic_handle_irq(cascade_irq); chip->irq_eoi(&desc->irq_data); }
gpl-2.0
eagleeyetom/android_kernel_mtk_mt6572
drivers/tty/serial/of_serial.c
4259
5320
/* * Serial Port driver for Open Firmware platform devices * * Copyright (C) 2006 Arnd Bergmann <arnd@arndb.de>, IBM Corp. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * */ #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/serial_core.h> #include <linux/serial_8250.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/nwpserial.h> struct of_serial_info { int type; int line; }; /* * Fill a struct uart_port for a given device node */ static int __devinit of_platform_serial_setup(struct platform_device *ofdev, int type, struct uart_port *port) { struct resource resource; struct device_node *np = ofdev->dev.of_node; u32 clk, spd, prop; int ret; memset(port, 0, sizeof *port); if (of_property_read_u32(np, "clock-frequency", &clk)) { dev_warn(&ofdev->dev, "no clock-frequency property set\n"); return -ENODEV; } /* If current-speed was set, then try not to change it. */ if (of_property_read_u32(np, "current-speed", &spd) == 0) port->custom_divisor = clk / (16 * spd); ret = of_address_to_resource(np, 0, &resource); if (ret) { dev_warn(&ofdev->dev, "invalid address\n"); return ret; } spin_lock_init(&port->lock); port->mapbase = resource.start; /* Check for shifted address mapping */ if (of_property_read_u32(np, "reg-offset", &prop) == 0) port->mapbase += prop; /* Check for registers offset within the devices address range */ if (of_property_read_u32(np, "reg-shift", &prop) == 0) port->regshift = prop; port->irq = irq_of_parse_and_map(np, 0); port->iotype = UPIO_MEM; if (of_property_read_u32(np, "reg-io-width", &prop) == 0) { switch (prop) { case 1: port->iotype = UPIO_MEM; break; case 4: port->iotype = UPIO_MEM32; break; default: dev_warn(&ofdev->dev, "unsupported reg-io-width (%d)\n", prop); return -EINVAL; } } port->type = type; port->uartclk = clk; port->flags = UPF_SHARE_IRQ | UPF_BOOT_AUTOCONF | UPF_IOREMAP | UPF_FIXED_PORT | UPF_FIXED_TYPE; port->dev = &ofdev->dev; return 0; } /* * Try to register a serial port */ static struct of_device_id of_platform_serial_table[]; static int __devinit of_platform_serial_probe(struct platform_device *ofdev) { const struct of_device_id *match; struct of_serial_info *info; struct uart_port port; int port_type; int ret; match = of_match_device(of_platform_serial_table, &ofdev->dev); if (!match) return -EINVAL; if (of_find_property(ofdev->dev.of_node, "used-by-rtas", NULL)) return -EBUSY; info = kmalloc(sizeof(*info), GFP_KERNEL); if (info == NULL) return -ENOMEM; port_type = (unsigned long)match->data; ret = of_platform_serial_setup(ofdev, port_type, &port); if (ret) goto out; switch (port_type) { #ifdef CONFIG_SERIAL_8250 case PORT_8250 ... PORT_MAX_8250: ret = serial8250_register_port(&port); break; #endif #ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL case PORT_NWPSERIAL: ret = nwpserial_register_port(&port); break; #endif default: /* need to add code for these */ case PORT_UNKNOWN: dev_info(&ofdev->dev, "Unknown serial port found, ignored\n"); ret = -ENODEV; break; } if (ret < 0) goto out; info->type = port_type; info->line = ret; dev_set_drvdata(&ofdev->dev, info); return 0; out: kfree(info); irq_dispose_mapping(port.irq); return ret; } /* * Release a line */ static int of_platform_serial_remove(struct platform_device *ofdev) { struct of_serial_info *info = dev_get_drvdata(&ofdev->dev); switch (info->type) { #ifdef CONFIG_SERIAL_8250 case PORT_8250 ... PORT_MAX_8250: serial8250_unregister_port(info->line); break; #endif #ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL case PORT_NWPSERIAL: nwpserial_unregister_port(info->line); break; #endif default: /* need to add code for these */ break; } kfree(info); return 0; } /* * A few common types, add more as needed. */ static struct of_device_id __devinitdata of_platform_serial_table[] = { { .compatible = "ns8250", .data = (void *)PORT_8250, }, { .compatible = "ns16450", .data = (void *)PORT_16450, }, { .compatible = "ns16550a", .data = (void *)PORT_16550A, }, { .compatible = "ns16550", .data = (void *)PORT_16550, }, { .compatible = "ns16750", .data = (void *)PORT_16750, }, { .compatible = "ns16850", .data = (void *)PORT_16850, }, { .compatible = "nvidia,tegra20-uart", .data = (void *)PORT_TEGRA, }, #ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL { .compatible = "ibm,qpace-nwp-serial", .data = (void *)PORT_NWPSERIAL, }, #endif { .type = "serial", .data = (void *)PORT_UNKNOWN, }, { /* end of list */ }, }; static struct platform_driver of_platform_serial_driver = { .driver = { .name = "of_serial", .owner = THIS_MODULE, .of_match_table = of_platform_serial_table, }, .probe = of_platform_serial_probe, .remove = of_platform_serial_remove, }; module_platform_driver(of_platform_serial_driver); MODULE_AUTHOR("Arnd Bergmann <arnd@arndb.de>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Serial Port driver for Open Firmware platform devices");
gpl-2.0
MrColdbird/android_kernel_oukitel_k4000pro
drivers/ide/cs5530.c
5027
8338
/* * Copyright (C) 2000 Andre Hedrick <andre@linux-ide.org> * Copyright (C) 2000 Mark Lord <mlord@pobox.com> * Copyright (C) 2007 Bartlomiej Zolnierkiewicz * * May be copied or modified under the terms of the GNU General Public License * * Development of this chipset driver was funded * by the nice folks at National Semiconductor. * * Documentation: * CS5530 documentation available from National Semiconductor. */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/ide.h> #include <asm/io.h> #define DRV_NAME "cs5530" /* * Here are the standard PIO mode 0-4 timings for each "format". * Format-0 uses fast data reg timings, with slower command reg timings. * Format-1 uses fast timings for all registers, but won't work with all drives. */ static unsigned int cs5530_pio_timings[2][5] = { {0x00009172, 0x00012171, 0x00020080, 0x00032010, 0x00040010}, {0xd1329172, 0x71212171, 0x30200080, 0x20102010, 0x00100010} }; /* * After chip reset, the PIO timings are set to 0x0000e132, which is not valid. */ #define CS5530_BAD_PIO(timings) (((timings)&~0x80000000)==0x0000e132) #define CS5530_BASEREG(hwif) (((hwif)->dma_base & ~0xf) + ((hwif)->channel ? 0x30 : 0x20)) /** * cs5530_set_pio_mode - set host controller for PIO mode * @hwif: port * @drive: drive * * Handles setting of PIO mode for the chipset. * * The init_hwif_cs5530() routine guarantees that all drives * will have valid default PIO timings set up before we get here. */ static void cs5530_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { unsigned long basereg = CS5530_BASEREG(hwif); unsigned int format = (inl(basereg + 4) >> 31) & 1; const u8 pio = drive->pio_mode - XFER_PIO_0; outl(cs5530_pio_timings[format][pio], basereg + ((drive->dn & 1)<<3)); } /** * cs5530_udma_filter - UDMA filter * @drive: drive * * cs5530_udma_filter() does UDMA mask filtering for the given drive * taking into the consideration capabilities of the mate device. * * The CS5530 specifies that two drives sharing a cable cannot mix * UDMA/MDMA. It has to be one or the other, for the pair, though * different timings can still be chosen for each drive. We could * set the appropriate timing bits on the fly, but that might be * a bit confusing. So, for now we statically handle this requirement * by looking at our mate drive to see what it is capable of, before * choosing a mode for our own drive. * * Note: This relies on the fact we never fail from UDMA to MWDMA2 * but instead drop to PIO. */ static u8 cs5530_udma_filter(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; ide_drive_t *mate = ide_get_pair_dev(drive); u16 *mateid; u8 mask = hwif->ultra_mask; if (mate == NULL) goto out; mateid = mate->id; if (ata_id_has_dma(mateid) && __ide_dma_bad_drive(mate) == 0) { if ((mateid[ATA_ID_FIELD_VALID] & 4) && (mateid[ATA_ID_UDMA_MODES] & 7)) goto out; if (mateid[ATA_ID_MWDMA_MODES] & 7) mask = 0; } out: return mask; } static void cs5530_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) { unsigned long basereg; unsigned int reg, timings = 0; switch (drive->dma_mode) { case XFER_UDMA_0: timings = 0x00921250; break; case XFER_UDMA_1: timings = 0x00911140; break; case XFER_UDMA_2: timings = 0x00911030; break; case XFER_MW_DMA_0: timings = 0x00077771; break; case XFER_MW_DMA_1: timings = 0x00012121; break; case XFER_MW_DMA_2: timings = 0x00002020; break; } basereg = CS5530_BASEREG(hwif); reg = inl(basereg + 4); /* get drive0 config register */ timings |= reg & 0x80000000; /* preserve PIO format bit */ if ((drive-> dn & 1) == 0) { /* are we configuring drive0? */ outl(timings, basereg + 4); /* write drive0 config register */ } else { if (timings & 0x00100000) reg |= 0x00100000; /* enable UDMA timings for both drives */ else reg &= ~0x00100000; /* disable UDMA timings for both drives */ outl(reg, basereg + 4); /* write drive0 config register */ outl(timings, basereg + 12); /* write drive1 config register */ } } /** * init_chipset_5530 - set up 5530 bridge * @dev: PCI device * * Initialize the cs5530 bridge for reliable IDE DMA operation. */ static int init_chipset_cs5530(struct pci_dev *dev) { struct pci_dev *master_0 = NULL, *cs5530_0 = NULL; if (pci_resource_start(dev, 4) == 0) return -EFAULT; dev = NULL; while ((dev = pci_get_device(PCI_VENDOR_ID_CYRIX, PCI_ANY_ID, dev)) != NULL) { switch (dev->device) { case PCI_DEVICE_ID_CYRIX_PCI_MASTER: master_0 = pci_dev_get(dev); break; case PCI_DEVICE_ID_CYRIX_5530_LEGACY: cs5530_0 = pci_dev_get(dev); break; } } if (!master_0) { printk(KERN_ERR DRV_NAME ": unable to locate PCI MASTER function\n"); goto out; } if (!cs5530_0) { printk(KERN_ERR DRV_NAME ": unable to locate CS5530 LEGACY function\n"); goto out; } /* * Enable BusMaster and MemoryWriteAndInvalidate for the cs5530: * --> OR 0x14 into 16-bit PCI COMMAND reg of function 0 of the cs5530 */ pci_set_master(cs5530_0); pci_try_set_mwi(cs5530_0); /* * Set PCI CacheLineSize to 16-bytes: * --> Write 0x04 into 8-bit PCI CACHELINESIZE reg of function 0 of the cs5530 */ pci_write_config_byte(cs5530_0, PCI_CACHE_LINE_SIZE, 0x04); /* * Disable trapping of UDMA register accesses (Win98 hack): * --> Write 0x5006 into 16-bit reg at offset 0xd0 of function 0 of the cs5530 */ pci_write_config_word(cs5530_0, 0xd0, 0x5006); /* * Bit-1 at 0x40 enables MemoryWriteAndInvalidate on internal X-bus: * The other settings are what is necessary to get the register * into a sane state for IDE DMA operation. */ pci_write_config_byte(master_0, 0x40, 0x1e); /* * Set max PCI burst size (16-bytes seems to work best): * 16bytes: set bit-1 at 0x41 (reg value of 0x16) * all others: clear bit-1 at 0x41, and do: * 128bytes: OR 0x00 at 0x41 * 256bytes: OR 0x04 at 0x41 * 512bytes: OR 0x08 at 0x41 * 1024bytes: OR 0x0c at 0x41 */ pci_write_config_byte(master_0, 0x41, 0x14); /* * These settings are necessary to get the chip * into a sane state for IDE DMA operation. */ pci_write_config_byte(master_0, 0x42, 0x00); pci_write_config_byte(master_0, 0x43, 0xc1); out: pci_dev_put(master_0); pci_dev_put(cs5530_0); return 0; } /** * init_hwif_cs5530 - initialise an IDE channel * @hwif: IDE to initialize * * This gets invoked by the IDE driver once for each channel. It * performs channel-specific pre-initialization before drive probing. */ static void init_hwif_cs5530 (ide_hwif_t *hwif) { unsigned long basereg; u32 d0_timings; basereg = CS5530_BASEREG(hwif); d0_timings = inl(basereg + 0); if (CS5530_BAD_PIO(d0_timings)) outl(cs5530_pio_timings[(d0_timings >> 31) & 1][0], basereg + 0); if (CS5530_BAD_PIO(inl(basereg + 8))) outl(cs5530_pio_timings[(d0_timings >> 31) & 1][0], basereg + 8); } static const struct ide_port_ops cs5530_port_ops = { .set_pio_mode = cs5530_set_pio_mode, .set_dma_mode = cs5530_set_dma_mode, .udma_filter = cs5530_udma_filter, }; static const struct ide_port_info cs5530_chipset = { .name = DRV_NAME, .init_chipset = init_chipset_cs5530, .init_hwif = init_hwif_cs5530, .port_ops = &cs5530_port_ops, .host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_POST_SET_MODE, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA2, }; static int cs5530_init_one(struct pci_dev *dev, const struct pci_device_id *id) { return ide_pci_init_one(dev, &cs5530_chipset, NULL); } static const struct pci_device_id cs5530_pci_tbl[] = { { PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5530_IDE), 0 }, { 0, }, }; MODULE_DEVICE_TABLE(pci, cs5530_pci_tbl); static struct pci_driver cs5530_pci_driver = { .name = "CS5530 IDE", .id_table = cs5530_pci_tbl, .probe = cs5530_init_one, .remove = ide_pci_remove, .suspend = ide_pci_suspend, .resume = ide_pci_resume, }; static int __init cs5530_ide_init(void) { return ide_pci_register_driver(&cs5530_pci_driver); } static void __exit cs5530_ide_exit(void) { pci_unregister_driver(&cs5530_pci_driver); } module_init(cs5530_ide_init); module_exit(cs5530_ide_exit); MODULE_AUTHOR("Mark Lord"); MODULE_DESCRIPTION("PCI driver module for Cyrix/NS 5530 IDE"); MODULE_LICENSE("GPL");
gpl-2.0
victormlourenco/kernel_msm
drivers/media/dvb/frontends/dvb-pll.c
5027
20509
/* * descriptions + helper functions for simple dvb plls. * * (c) 2004 Gerd Knorr <kraxel@bytesex.org> [SuSE Labs] * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/slab.h> #include <linux/module.h> #include <linux/dvb/frontend.h> #include <asm/types.h> #include "dvb-pll.h" struct dvb_pll_priv { /* pll number */ int nr; /* i2c details */ int pll_i2c_address; struct i2c_adapter *i2c; /* the PLL descriptor */ struct dvb_pll_desc *pll_desc; /* cached frequency/bandwidth */ u32 frequency; u32 bandwidth; }; #define DVB_PLL_MAX 64 static unsigned int dvb_pll_devcount; static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "enable verbose debug messages"); static unsigned int id[DVB_PLL_MAX] = { [ 0 ... (DVB_PLL_MAX-1) ] = DVB_PLL_UNDEFINED }; module_param_array(id, int, NULL, 0644); MODULE_PARM_DESC(id, "force pll id to use (DEBUG ONLY)"); /* ----------------------------------------------------------- */ struct dvb_pll_desc { char *name; u32 min; u32 max; u32 iffreq; void (*set)(struct dvb_frontend *fe, u8 *buf); u8 *initdata; u8 *initdata2; u8 *sleepdata; int count; struct { u32 limit; u32 stepsize; u8 config; u8 cb; } entries[12]; }; /* ----------------------------------------------------------- */ /* descriptions */ static struct dvb_pll_desc dvb_pll_thomson_dtt7579 = { .name = "Thomson dtt7579", .min = 177000000, .max = 858000000, .iffreq= 36166667, .sleepdata = (u8[]){ 2, 0xb4, 0x03 }, .count = 4, .entries = { { 443250000, 166667, 0xb4, 0x02 }, { 542000000, 166667, 0xb4, 0x08 }, { 771000000, 166667, 0xbc, 0x08 }, { 999999999, 166667, 0xf4, 0x08 }, }, }; static void thomson_dtt759x_bw(struct dvb_frontend *fe, u8 *buf) { u32 bw = fe->dtv_property_cache.bandwidth_hz; if (bw == 7000000) buf[3] |= 0x10; } static struct dvb_pll_desc dvb_pll_thomson_dtt759x = { .name = "Thomson dtt759x", .min = 177000000, .max = 896000000, .set = thomson_dtt759x_bw, .iffreq= 36166667, .sleepdata = (u8[]){ 2, 0x84, 0x03 }, .count = 5, .entries = { { 264000000, 166667, 0xb4, 0x02 }, { 470000000, 166667, 0xbc, 0x02 }, { 735000000, 166667, 0xbc, 0x08 }, { 835000000, 166667, 0xf4, 0x08 }, { 999999999, 166667, 0xfc, 0x08 }, }, }; static struct dvb_pll_desc dvb_pll_lg_z201 = { .name = "LG z201", .min = 174000000, .max = 862000000, .iffreq= 36166667, .sleepdata = (u8[]){ 2, 0xbc, 0x03 }, .count = 5, .entries = { { 157500000, 166667, 0xbc, 0x01 }, { 443250000, 166667, 0xbc, 0x02 }, { 542000000, 166667, 0xbc, 0x04 }, { 830000000, 166667, 0xf4, 0x04 }, { 999999999, 166667, 0xfc, 0x04 }, }, }; static struct dvb_pll_desc dvb_pll_unknown_1 = { .name = "unknown 1", /* used by dntv live dvb-t */ .min = 174000000, .max = 862000000, .iffreq= 36166667, .count = 9, .entries = { { 150000000, 166667, 0xb4, 0x01 }, { 173000000, 166667, 0xbc, 0x01 }, { 250000000, 166667, 0xb4, 0x02 }, { 400000000, 166667, 0xbc, 0x02 }, { 420000000, 166667, 0xf4, 0x02 }, { 470000000, 166667, 0xfc, 0x02 }, { 600000000, 166667, 0xbc, 0x08 }, { 730000000, 166667, 0xf4, 0x08 }, { 999999999, 166667, 0xfc, 0x08 }, }, }; /* Infineon TUA6010XS * used in Thomson Cable Tuner */ static struct dvb_pll_desc dvb_pll_tua6010xs = { .name = "Infineon TUA6010XS", .min = 44250000, .max = 858000000, .iffreq= 36125000, .count = 3, .entries = { { 115750000, 62500, 0x8e, 0x03 }, { 403250000, 62500, 0x8e, 0x06 }, { 999999999, 62500, 0x8e, 0x85 }, }, }; /* Panasonic env57h1xd5 (some Philips PLL ?) */ static struct dvb_pll_desc dvb_pll_env57h1xd5 = { .name = "Panasonic ENV57H1XD5", .min = 44250000, .max = 858000000, .iffreq= 36125000, .count = 4, .entries = { { 153000000, 166667, 0xc2, 0x41 }, { 470000000, 166667, 0xc2, 0x42 }, { 526000000, 166667, 0xc2, 0x84 }, { 999999999, 166667, 0xc2, 0xa4 }, }, }; /* Philips TDA6650/TDA6651 * used in Panasonic ENV77H11D5 */ static void tda665x_bw(struct dvb_frontend *fe, u8 *buf) { u32 bw = fe->dtv_property_cache.bandwidth_hz; if (bw == 8000000) buf[3] |= 0x08; } static struct dvb_pll_desc dvb_pll_tda665x = { .name = "Philips TDA6650/TDA6651", .min = 44250000, .max = 858000000, .set = tda665x_bw, .iffreq= 36166667, .initdata = (u8[]){ 4, 0x0b, 0xf5, 0x85, 0xab }, .count = 12, .entries = { { 93834000, 166667, 0xca, 0x61 /* 011 0 0 0 01 */ }, { 123834000, 166667, 0xca, 0xa1 /* 101 0 0 0 01 */ }, { 161000000, 166667, 0xca, 0xa1 /* 101 0 0 0 01 */ }, { 163834000, 166667, 0xca, 0xc2 /* 110 0 0 0 10 */ }, { 253834000, 166667, 0xca, 0x62 /* 011 0 0 0 10 */ }, { 383834000, 166667, 0xca, 0xa2 /* 101 0 0 0 10 */ }, { 443834000, 166667, 0xca, 0xc2 /* 110 0 0 0 10 */ }, { 444000000, 166667, 0xca, 0xc4 /* 110 0 0 1 00 */ }, { 583834000, 166667, 0xca, 0x64 /* 011 0 0 1 00 */ }, { 793834000, 166667, 0xca, 0xa4 /* 101 0 0 1 00 */ }, { 444834000, 166667, 0xca, 0xc4 /* 110 0 0 1 00 */ }, { 861000000, 166667, 0xca, 0xe4 /* 111 0 0 1 00 */ }, } }; /* Infineon TUA6034 * used in LG TDTP E102P */ static void tua6034_bw(struct dvb_frontend *fe, u8 *buf) { u32 bw = fe->dtv_property_cache.bandwidth_hz; if (bw == 7000000) buf[3] |= 0x08; } static struct dvb_pll_desc dvb_pll_tua6034 = { .name = "Infineon TUA6034", .min = 44250000, .max = 858000000, .iffreq= 36166667, .count = 3, .set = tua6034_bw, .entries = { { 174500000, 62500, 0xce, 0x01 }, { 230000000, 62500, 0xce, 0x02 }, { 999999999, 62500, 0xce, 0x04 }, }, }; /* ALPS TDED4 * used in Nebula-Cards and USB boxes */ static void tded4_bw(struct dvb_frontend *fe, u8 *buf) { u32 bw = fe->dtv_property_cache.bandwidth_hz; if (bw == 8000000) buf[3] |= 0x04; } static struct dvb_pll_desc dvb_pll_tded4 = { .name = "ALPS TDED4", .min = 47000000, .max = 863000000, .iffreq= 36166667, .set = tded4_bw, .count = 4, .entries = { { 153000000, 166667, 0x85, 0x01 }, { 470000000, 166667, 0x85, 0x02 }, { 823000000, 166667, 0x85, 0x08 }, { 999999999, 166667, 0x85, 0x88 }, } }; /* ALPS TDHU2 * used in AverTVHD MCE A180 */ static struct dvb_pll_desc dvb_pll_tdhu2 = { .name = "ALPS TDHU2", .min = 54000000, .max = 864000000, .iffreq= 44000000, .count = 4, .entries = { { 162000000, 62500, 0x85, 0x01 }, { 426000000, 62500, 0x85, 0x02 }, { 782000000, 62500, 0x85, 0x08 }, { 999999999, 62500, 0x85, 0x88 }, } }; /* Samsung TBMV30111IN / TBMV30712IN1 * used in Air2PC ATSC - 2nd generation (nxt2002) */ static struct dvb_pll_desc dvb_pll_samsung_tbmv = { .name = "Samsung TBMV30111IN / TBMV30712IN1", .min = 54000000, .max = 860000000, .iffreq= 44000000, .count = 6, .entries = { { 172000000, 166667, 0xb4, 0x01 }, { 214000000, 166667, 0xb4, 0x02 }, { 467000000, 166667, 0xbc, 0x02 }, { 721000000, 166667, 0xbc, 0x08 }, { 841000000, 166667, 0xf4, 0x08 }, { 999999999, 166667, 0xfc, 0x02 }, } }; /* * Philips SD1878 Tuner. */ static struct dvb_pll_desc dvb_pll_philips_sd1878_tda8261 = { .name = "Philips SD1878", .min = 950000, .max = 2150000, .iffreq= 249, /* zero-IF, offset 249 is to round up */ .count = 4, .entries = { { 1250000, 500, 0xc4, 0x00}, { 1450000, 500, 0xc4, 0x40}, { 2050000, 500, 0xc4, 0x80}, { 2150000, 500, 0xc4, 0xc0}, }, }; static void opera1_bw(struct dvb_frontend *fe, u8 *buf) { struct dtv_frontend_properties *c = &fe->dtv_property_cache; struct dvb_pll_priv *priv = fe->tuner_priv; u32 b_w = (c->symbol_rate * 27) / 32000; struct i2c_msg msg = { .addr = priv->pll_i2c_address, .flags = 0, .buf = buf, .len = 4 }; int result; u8 lpf; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); result = i2c_transfer(priv->i2c, &msg, 1); if (result != 1) printk(KERN_ERR "%s: i2c_transfer failed:%d", __func__, result); if (b_w <= 10000) lpf = 0xc; else if (b_w <= 12000) lpf = 0x2; else if (b_w <= 14000) lpf = 0xa; else if (b_w <= 16000) lpf = 0x6; else if (b_w <= 18000) lpf = 0xe; else if (b_w <= 20000) lpf = 0x1; else if (b_w <= 22000) lpf = 0x9; else if (b_w <= 24000) lpf = 0x5; else if (b_w <= 26000) lpf = 0xd; else if (b_w <= 28000) lpf = 0x3; else lpf = 0xb; buf[2] ^= 0x1c; /* Flip bits 3-5 */ /* Set lpf */ buf[2] |= ((lpf >> 2) & 0x3) << 3; buf[3] |= (lpf & 0x3) << 2; return; } static struct dvb_pll_desc dvb_pll_opera1 = { .name = "Opera Tuner", .min = 900000, .max = 2250000, .initdata = (u8[]){ 4, 0x08, 0xe5, 0xe1, 0x00 }, .initdata2 = (u8[]){ 4, 0x08, 0xe5, 0xe5, 0x00 }, .iffreq= 0, .set = opera1_bw, .count = 8, .entries = { { 1064000, 500, 0xf9, 0xc2 }, { 1169000, 500, 0xf9, 0xe2 }, { 1299000, 500, 0xf9, 0x20 }, { 1444000, 500, 0xf9, 0x40 }, { 1606000, 500, 0xf9, 0x60 }, { 1777000, 500, 0xf9, 0x80 }, { 1941000, 500, 0xf9, 0xa0 }, { 2250000, 500, 0xf9, 0xc0 }, } }; static void samsung_dtos403ih102a_set(struct dvb_frontend *fe, u8 *buf) { struct dvb_pll_priv *priv = fe->tuner_priv; struct i2c_msg msg = { .addr = priv->pll_i2c_address, .flags = 0, .buf = buf, .len = 4 }; int result; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); result = i2c_transfer(priv->i2c, &msg, 1); if (result != 1) printk(KERN_ERR "%s: i2c_transfer failed:%d", __func__, result); buf[2] = 0x9e; buf[3] = 0x90; return; } /* unknown pll used in Samsung DTOS403IH102A DVB-C tuner */ static struct dvb_pll_desc dvb_pll_samsung_dtos403ih102a = { .name = "Samsung DTOS403IH102A", .min = 44250000, .max = 858000000, .iffreq = 36125000, .count = 8, .set = samsung_dtos403ih102a_set, .entries = { { 135000000, 62500, 0xbe, 0x01 }, { 177000000, 62500, 0xf6, 0x01 }, { 370000000, 62500, 0xbe, 0x02 }, { 450000000, 62500, 0xf6, 0x02 }, { 466000000, 62500, 0xfe, 0x02 }, { 538000000, 62500, 0xbe, 0x08 }, { 826000000, 62500, 0xf6, 0x08 }, { 999999999, 62500, 0xfe, 0x08 }, } }; /* Samsung TDTC9251DH0 DVB-T NIM, as used on AirStar 2 */ static struct dvb_pll_desc dvb_pll_samsung_tdtc9251dh0 = { .name = "Samsung TDTC9251DH0", .min = 48000000, .max = 863000000, .iffreq = 36166667, .count = 3, .entries = { { 157500000, 166667, 0xcc, 0x09 }, { 443000000, 166667, 0xcc, 0x0a }, { 863000000, 166667, 0xcc, 0x08 }, } }; /* Samsung TBDU18132 DVB-S NIM with TSA5059 PLL, used in SkyStar2 DVB-S 2.3 */ static struct dvb_pll_desc dvb_pll_samsung_tbdu18132 = { .name = "Samsung TBDU18132", .min = 950000, .max = 2150000, /* guesses */ .iffreq = 0, .count = 2, .entries = { { 1550000, 125, 0x84, 0x82 }, { 4095937, 125, 0x84, 0x80 }, } /* TSA5059 PLL has a 17 bit divisor rather than the 15 bits supported * by this driver. The two extra bits are 0x60 in the third byte. 15 * bits is enough for over 4 GHz, which is enough to cover the range * of this tuner. We could use the additional divisor bits by adding * more entries, e.g. { 0x0ffff * 125 + 125/2, 125, 0x84 | 0x20, }, { 0x17fff * 125 + 125/2, 125, 0x84 | 0x40, }, { 0x1ffff * 125 + 125/2, 125, 0x84 | 0x60, }, */ }; /* Samsung TBMU24112 DVB-S NIM with SL1935 zero-IF tuner */ static struct dvb_pll_desc dvb_pll_samsung_tbmu24112 = { .name = "Samsung TBMU24112", .min = 950000, .max = 2150000, /* guesses */ .iffreq = 0, .count = 2, .entries = { { 1500000, 125, 0x84, 0x18 }, { 9999999, 125, 0x84, 0x08 }, } }; /* Alps TDEE4 DVB-C NIM, used on Cablestar 2 */ /* byte 4 : 1 * * AGD R3 R2 R1 R0 * byte 5 : C1 * RE RTS BS4 BS3 BS2 BS1 * AGD = 1, R3 R2 R1 R0 = 0 1 0 1 => byte 4 = 1**10101 = 0x95 * Range(MHz) C1 * RE RTS BS4 BS3 BS2 BS1 Byte 5 * 47 - 153 0 * 0 0 0 0 0 1 0x01 * 153 - 430 0 * 0 0 0 0 1 0 0x02 * 430 - 822 0 * 0 0 1 0 0 0 0x08 * 822 - 862 1 * 0 0 1 0 0 0 0x88 */ static struct dvb_pll_desc dvb_pll_alps_tdee4 = { .name = "ALPS TDEE4", .min = 47000000, .max = 862000000, .iffreq = 36125000, .count = 4, .entries = { { 153000000, 62500, 0x95, 0x01 }, { 430000000, 62500, 0x95, 0x02 }, { 822000000, 62500, 0x95, 0x08 }, { 999999999, 62500, 0x95, 0x88 }, } }; /* ----------------------------------------------------------- */ static struct dvb_pll_desc *pll_list[] = { [DVB_PLL_UNDEFINED] = NULL, [DVB_PLL_THOMSON_DTT7579] = &dvb_pll_thomson_dtt7579, [DVB_PLL_THOMSON_DTT759X] = &dvb_pll_thomson_dtt759x, [DVB_PLL_LG_Z201] = &dvb_pll_lg_z201, [DVB_PLL_UNKNOWN_1] = &dvb_pll_unknown_1, [DVB_PLL_TUA6010XS] = &dvb_pll_tua6010xs, [DVB_PLL_ENV57H1XD5] = &dvb_pll_env57h1xd5, [DVB_PLL_TUA6034] = &dvb_pll_tua6034, [DVB_PLL_TDA665X] = &dvb_pll_tda665x, [DVB_PLL_TDED4] = &dvb_pll_tded4, [DVB_PLL_TDEE4] = &dvb_pll_alps_tdee4, [DVB_PLL_TDHU2] = &dvb_pll_tdhu2, [DVB_PLL_SAMSUNG_TBMV] = &dvb_pll_samsung_tbmv, [DVB_PLL_PHILIPS_SD1878_TDA8261] = &dvb_pll_philips_sd1878_tda8261, [DVB_PLL_OPERA1] = &dvb_pll_opera1, [DVB_PLL_SAMSUNG_DTOS403IH102A] = &dvb_pll_samsung_dtos403ih102a, [DVB_PLL_SAMSUNG_TDTC9251DH0] = &dvb_pll_samsung_tdtc9251dh0, [DVB_PLL_SAMSUNG_TBDU18132] = &dvb_pll_samsung_tbdu18132, [DVB_PLL_SAMSUNG_TBMU24112] = &dvb_pll_samsung_tbmu24112, }; /* ----------------------------------------------------------- */ /* code */ static int dvb_pll_configure(struct dvb_frontend *fe, u8 *buf, const u32 frequency) { struct dvb_pll_priv *priv = fe->tuner_priv; struct dvb_pll_desc *desc = priv->pll_desc; u32 div; int i; if (frequency && (frequency < desc->min || frequency > desc->max)) return -EINVAL; for (i = 0; i < desc->count; i++) { if (frequency > desc->entries[i].limit) continue; break; } if (debug) printk("pll: %s: freq=%d | i=%d/%d\n", desc->name, frequency, i, desc->count); if (i == desc->count) return -EINVAL; div = (frequency + desc->iffreq + desc->entries[i].stepsize/2) / desc->entries[i].stepsize; buf[0] = div >> 8; buf[1] = div & 0xff; buf[2] = desc->entries[i].config; buf[3] = desc->entries[i].cb; if (desc->set) desc->set(fe, buf); if (debug) printk("pll: %s: div=%d | buf=0x%02x,0x%02x,0x%02x,0x%02x\n", desc->name, div, buf[0], buf[1], buf[2], buf[3]); // calculate the frequency we set it to return (div * desc->entries[i].stepsize) - desc->iffreq; } static int dvb_pll_release(struct dvb_frontend *fe) { kfree(fe->tuner_priv); fe->tuner_priv = NULL; return 0; } static int dvb_pll_sleep(struct dvb_frontend *fe) { struct dvb_pll_priv *priv = fe->tuner_priv; if (priv->i2c == NULL) return -EINVAL; if (priv->pll_desc->sleepdata) { struct i2c_msg msg = { .flags = 0, .addr = priv->pll_i2c_address, .buf = priv->pll_desc->sleepdata + 1, .len = priv->pll_desc->sleepdata[0] }; int result; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); if ((result = i2c_transfer(priv->i2c, &msg, 1)) != 1) { return result; } return 0; } /* Shouldn't be called when initdata is NULL, maybe BUG()? */ return -EINVAL; } static int dvb_pll_set_params(struct dvb_frontend *fe) { struct dtv_frontend_properties *c = &fe->dtv_property_cache; struct dvb_pll_priv *priv = fe->tuner_priv; u8 buf[4]; struct i2c_msg msg = { .addr = priv->pll_i2c_address, .flags = 0, .buf = buf, .len = sizeof(buf) }; int result; u32 frequency = 0; if (priv->i2c == NULL) return -EINVAL; result = dvb_pll_configure(fe, buf, c->frequency); if (result < 0) return result; else frequency = result; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); if ((result = i2c_transfer(priv->i2c, &msg, 1)) != 1) { return result; } priv->frequency = frequency; priv->bandwidth = c->bandwidth_hz; return 0; } static int dvb_pll_calc_regs(struct dvb_frontend *fe, u8 *buf, int buf_len) { struct dtv_frontend_properties *c = &fe->dtv_property_cache; struct dvb_pll_priv *priv = fe->tuner_priv; int result; u32 frequency = 0; if (buf_len < 5) return -EINVAL; result = dvb_pll_configure(fe, buf + 1, c->frequency); if (result < 0) return result; else frequency = result; buf[0] = priv->pll_i2c_address; priv->frequency = frequency; priv->bandwidth = c->bandwidth_hz; return 5; } static int dvb_pll_get_frequency(struct dvb_frontend *fe, u32 *frequency) { struct dvb_pll_priv *priv = fe->tuner_priv; *frequency = priv->frequency; return 0; } static int dvb_pll_get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth) { struct dvb_pll_priv *priv = fe->tuner_priv; *bandwidth = priv->bandwidth; return 0; } static int dvb_pll_init(struct dvb_frontend *fe) { struct dvb_pll_priv *priv = fe->tuner_priv; if (priv->i2c == NULL) return -EINVAL; if (priv->pll_desc->initdata) { struct i2c_msg msg = { .flags = 0, .addr = priv->pll_i2c_address, .buf = priv->pll_desc->initdata + 1, .len = priv->pll_desc->initdata[0] }; int result; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); result = i2c_transfer(priv->i2c, &msg, 1); if (result != 1) return result; if (priv->pll_desc->initdata2) { msg.buf = priv->pll_desc->initdata2 + 1; msg.len = priv->pll_desc->initdata2[0]; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); result = i2c_transfer(priv->i2c, &msg, 1); if (result != 1) return result; } return 0; } /* Shouldn't be called when initdata is NULL, maybe BUG()? */ return -EINVAL; } static struct dvb_tuner_ops dvb_pll_tuner_ops = { .release = dvb_pll_release, .sleep = dvb_pll_sleep, .init = dvb_pll_init, .set_params = dvb_pll_set_params, .calc_regs = dvb_pll_calc_regs, .get_frequency = dvb_pll_get_frequency, .get_bandwidth = dvb_pll_get_bandwidth, }; struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr, struct i2c_adapter *i2c, unsigned int pll_desc_id) { u8 b1 [] = { 0 }; struct i2c_msg msg = { .addr = pll_addr, .flags = I2C_M_RD, .buf = b1, .len = 1 }; struct dvb_pll_priv *priv = NULL; int ret; struct dvb_pll_desc *desc; if ((id[dvb_pll_devcount] > DVB_PLL_UNDEFINED) && (id[dvb_pll_devcount] < ARRAY_SIZE(pll_list))) pll_desc_id = id[dvb_pll_devcount]; BUG_ON(pll_desc_id < 1 || pll_desc_id >= ARRAY_SIZE(pll_list)); desc = pll_list[pll_desc_id]; if (i2c != NULL) { if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); ret = i2c_transfer (i2c, &msg, 1); if (ret != 1) return NULL; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); } priv = kzalloc(sizeof(struct dvb_pll_priv), GFP_KERNEL); if (priv == NULL) return NULL; priv->pll_i2c_address = pll_addr; priv->i2c = i2c; priv->pll_desc = desc; priv->nr = dvb_pll_devcount++; memcpy(&fe->ops.tuner_ops, &dvb_pll_tuner_ops, sizeof(struct dvb_tuner_ops)); strncpy(fe->ops.tuner_ops.info.name, desc->name, sizeof(fe->ops.tuner_ops.info.name)); fe->ops.tuner_ops.info.frequency_min = desc->min; fe->ops.tuner_ops.info.frequency_max = desc->max; if (!desc->initdata) fe->ops.tuner_ops.init = NULL; if (!desc->sleepdata) fe->ops.tuner_ops.sleep = NULL; fe->tuner_priv = priv; if ((debug) || (id[priv->nr] == pll_desc_id)) { printk("dvb-pll[%d]", priv->nr); if (i2c != NULL) printk(" %d-%04x", i2c_adapter_id(i2c), pll_addr); printk(": id# %d (%s) attached, %s\n", pll_desc_id, desc->name, id[priv->nr] == pll_desc_id ? "insmod option" : "autodetected"); } return fe; } EXPORT_SYMBOL(dvb_pll_attach); MODULE_DESCRIPTION("dvb pll library"); MODULE_AUTHOR("Gerd Knorr"); MODULE_LICENSE("GPL");
gpl-2.0
BanBxda/GoogleEdition_4.3
fs/afs/proc.c
8611
17455
/* /proc interface for AFS * * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/slab.h> #include <linux/module.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/sched.h> #include <asm/uaccess.h> #include "internal.h" static struct proc_dir_entry *proc_afs; static int afs_proc_cells_open(struct inode *inode, struct file *file); static void *afs_proc_cells_start(struct seq_file *p, loff_t *pos); static void *afs_proc_cells_next(struct seq_file *p, void *v, loff_t *pos); static void afs_proc_cells_stop(struct seq_file *p, void *v); static int afs_proc_cells_show(struct seq_file *m, void *v); static ssize_t afs_proc_cells_write(struct file *file, const char __user *buf, size_t size, loff_t *_pos); static const struct seq_operations afs_proc_cells_ops = { .start = afs_proc_cells_start, .next = afs_proc_cells_next, .stop = afs_proc_cells_stop, .show = afs_proc_cells_show, }; static const struct file_operations afs_proc_cells_fops = { .open = afs_proc_cells_open, .read = seq_read, .write = afs_proc_cells_write, .llseek = seq_lseek, .release = seq_release, .owner = THIS_MODULE, }; static int afs_proc_rootcell_open(struct inode *inode, struct file *file); static int afs_proc_rootcell_release(struct inode *inode, struct file *file); static ssize_t afs_proc_rootcell_read(struct file *file, char __user *buf, size_t size, loff_t *_pos); static ssize_t afs_proc_rootcell_write(struct file *file, const char __user *buf, size_t size, loff_t *_pos); static const struct file_operations afs_proc_rootcell_fops = { .open = afs_proc_rootcell_open, .read = afs_proc_rootcell_read, .write = afs_proc_rootcell_write, .llseek = no_llseek, .release = afs_proc_rootcell_release, .owner = THIS_MODULE, }; static int afs_proc_cell_volumes_open(struct inode *inode, struct file *file); static int afs_proc_cell_volumes_release(struct inode *inode, struct file *file); static void *afs_proc_cell_volumes_start(struct seq_file *p, loff_t *pos); static void *afs_proc_cell_volumes_next(struct seq_file *p, void *v, loff_t *pos); static void afs_proc_cell_volumes_stop(struct seq_file *p, void *v); static int afs_proc_cell_volumes_show(struct seq_file *m, void *v); static const struct seq_operations afs_proc_cell_volumes_ops = { .start = afs_proc_cell_volumes_start, .next = afs_proc_cell_volumes_next, .stop = afs_proc_cell_volumes_stop, .show = afs_proc_cell_volumes_show, }; static const struct file_operations afs_proc_cell_volumes_fops = { .open = afs_proc_cell_volumes_open, .read = seq_read, .llseek = seq_lseek, .release = afs_proc_cell_volumes_release, .owner = THIS_MODULE, }; static int afs_proc_cell_vlservers_open(struct inode *inode, struct file *file); static int afs_proc_cell_vlservers_release(struct inode *inode, struct file *file); static void *afs_proc_cell_vlservers_start(struct seq_file *p, loff_t *pos); static void *afs_proc_cell_vlservers_next(struct seq_file *p, void *v, loff_t *pos); static void afs_proc_cell_vlservers_stop(struct seq_file *p, void *v); static int afs_proc_cell_vlservers_show(struct seq_file *m, void *v); static const struct seq_operations afs_proc_cell_vlservers_ops = { .start = afs_proc_cell_vlservers_start, .next = afs_proc_cell_vlservers_next, .stop = afs_proc_cell_vlservers_stop, .show = afs_proc_cell_vlservers_show, }; static const struct file_operations afs_proc_cell_vlservers_fops = { .open = afs_proc_cell_vlservers_open, .read = seq_read, .llseek = seq_lseek, .release = afs_proc_cell_vlservers_release, .owner = THIS_MODULE, }; static int afs_proc_cell_servers_open(struct inode *inode, struct file *file); static int afs_proc_cell_servers_release(struct inode *inode, struct file *file); static void *afs_proc_cell_servers_start(struct seq_file *p, loff_t *pos); static void *afs_proc_cell_servers_next(struct seq_file *p, void *v, loff_t *pos); static void afs_proc_cell_servers_stop(struct seq_file *p, void *v); static int afs_proc_cell_servers_show(struct seq_file *m, void *v); static const struct seq_operations afs_proc_cell_servers_ops = { .start = afs_proc_cell_servers_start, .next = afs_proc_cell_servers_next, .stop = afs_proc_cell_servers_stop, .show = afs_proc_cell_servers_show, }; static const struct file_operations afs_proc_cell_servers_fops = { .open = afs_proc_cell_servers_open, .read = seq_read, .llseek = seq_lseek, .release = afs_proc_cell_servers_release, .owner = THIS_MODULE, }; /* * initialise the /proc/fs/afs/ directory */ int afs_proc_init(void) { struct proc_dir_entry *p; _enter(""); proc_afs = proc_mkdir("fs/afs", NULL); if (!proc_afs) goto error_dir; p = proc_create("cells", 0, proc_afs, &afs_proc_cells_fops); if (!p) goto error_cells; p = proc_create("rootcell", 0, proc_afs, &afs_proc_rootcell_fops); if (!p) goto error_rootcell; _leave(" = 0"); return 0; error_rootcell: remove_proc_entry("cells", proc_afs); error_cells: remove_proc_entry("fs/afs", NULL); error_dir: _leave(" = -ENOMEM"); return -ENOMEM; } /* * clean up the /proc/fs/afs/ directory */ void afs_proc_cleanup(void) { remove_proc_entry("rootcell", proc_afs); remove_proc_entry("cells", proc_afs); remove_proc_entry("fs/afs", NULL); } /* * open "/proc/fs/afs/cells" which provides a summary of extant cells */ static int afs_proc_cells_open(struct inode *inode, struct file *file) { struct seq_file *m; int ret; ret = seq_open(file, &afs_proc_cells_ops); if (ret < 0) return ret; m = file->private_data; m->private = PDE(inode)->data; return 0; } /* * set up the iterator to start reading from the cells list and return the * first item */ static void *afs_proc_cells_start(struct seq_file *m, loff_t *_pos) { /* lock the list against modification */ down_read(&afs_proc_cells_sem); return seq_list_start_head(&afs_proc_cells, *_pos); } /* * move to next cell in cells list */ static void *afs_proc_cells_next(struct seq_file *p, void *v, loff_t *pos) { return seq_list_next(v, &afs_proc_cells, pos); } /* * clean up after reading from the cells list */ static void afs_proc_cells_stop(struct seq_file *p, void *v) { up_read(&afs_proc_cells_sem); } /* * display a header line followed by a load of cell lines */ static int afs_proc_cells_show(struct seq_file *m, void *v) { struct afs_cell *cell = list_entry(v, struct afs_cell, proc_link); if (v == &afs_proc_cells) { /* display header on line 1 */ seq_puts(m, "USE NAME\n"); return 0; } /* display one cell per line on subsequent lines */ seq_printf(m, "%3d %s\n", atomic_read(&cell->usage), cell->name); return 0; } /* * handle writes to /proc/fs/afs/cells * - to add cells: echo "add <cellname> <IP>[:<IP>][:<IP>]" */ static ssize_t afs_proc_cells_write(struct file *file, const char __user *buf, size_t size, loff_t *_pos) { char *kbuf, *name, *args; int ret; /* start by dragging the command into memory */ if (size <= 1 || size >= PAGE_SIZE) return -EINVAL; kbuf = kmalloc(size + 1, GFP_KERNEL); if (!kbuf) return -ENOMEM; ret = -EFAULT; if (copy_from_user(kbuf, buf, size) != 0) goto done; kbuf[size] = 0; /* trim to first NL */ name = memchr(kbuf, '\n', size); if (name) *name = 0; /* split into command, name and argslist */ name = strchr(kbuf, ' '); if (!name) goto inval; do { *name++ = 0; } while(*name == ' '); if (!*name) goto inval; args = strchr(name, ' '); if (!args) goto inval; do { *args++ = 0; } while(*args == ' '); if (!*args) goto inval; /* determine command to perform */ _debug("cmd=%s name=%s args=%s", kbuf, name, args); if (strcmp(kbuf, "add") == 0) { struct afs_cell *cell; cell = afs_cell_create(name, strlen(name), args, false); if (IS_ERR(cell)) { ret = PTR_ERR(cell); goto done; } afs_put_cell(cell); printk("kAFS: Added new cell '%s'\n", name); } else { goto inval; } ret = size; done: kfree(kbuf); _leave(" = %d", ret); return ret; inval: ret = -EINVAL; printk("kAFS: Invalid Command on /proc/fs/afs/cells file\n"); goto done; } /* * Stubs for /proc/fs/afs/rootcell */ static int afs_proc_rootcell_open(struct inode *inode, struct file *file) { return 0; } static int afs_proc_rootcell_release(struct inode *inode, struct file *file) { return 0; } static ssize_t afs_proc_rootcell_read(struct file *file, char __user *buf, size_t size, loff_t *_pos) { return 0; } /* * handle writes to /proc/fs/afs/rootcell * - to initialize rootcell: echo "cell.name:192.168.231.14" */ static ssize_t afs_proc_rootcell_write(struct file *file, const char __user *buf, size_t size, loff_t *_pos) { char *kbuf, *s; int ret; /* start by dragging the command into memory */ if (size <= 1 || size >= PAGE_SIZE) return -EINVAL; ret = -ENOMEM; kbuf = kmalloc(size + 1, GFP_KERNEL); if (!kbuf) goto nomem; ret = -EFAULT; if (copy_from_user(kbuf, buf, size) != 0) goto infault; kbuf[size] = 0; /* trim to first NL */ s = memchr(kbuf, '\n', size); if (s) *s = 0; /* determine command to perform */ _debug("rootcell=%s", kbuf); ret = afs_cell_init(kbuf); if (ret >= 0) ret = size; /* consume everything, always */ infault: kfree(kbuf); nomem: _leave(" = %d", ret); return ret; } /* * initialise /proc/fs/afs/<cell>/ */ int afs_proc_cell_setup(struct afs_cell *cell) { struct proc_dir_entry *p; _enter("%p{%s}", cell, cell->name); cell->proc_dir = proc_mkdir(cell->name, proc_afs); if (!cell->proc_dir) goto error_dir; p = proc_create_data("servers", 0, cell->proc_dir, &afs_proc_cell_servers_fops, cell); if (!p) goto error_servers; p = proc_create_data("vlservers", 0, cell->proc_dir, &afs_proc_cell_vlservers_fops, cell); if (!p) goto error_vlservers; p = proc_create_data("volumes", 0, cell->proc_dir, &afs_proc_cell_volumes_fops, cell); if (!p) goto error_volumes; _leave(" = 0"); return 0; error_volumes: remove_proc_entry("vlservers", cell->proc_dir); error_vlservers: remove_proc_entry("servers", cell->proc_dir); error_servers: remove_proc_entry(cell->name, proc_afs); error_dir: _leave(" = -ENOMEM"); return -ENOMEM; } /* * remove /proc/fs/afs/<cell>/ */ void afs_proc_cell_remove(struct afs_cell *cell) { _enter(""); remove_proc_entry("volumes", cell->proc_dir); remove_proc_entry("vlservers", cell->proc_dir); remove_proc_entry("servers", cell->proc_dir); remove_proc_entry(cell->name, proc_afs); _leave(""); } /* * open "/proc/fs/afs/<cell>/volumes" which provides a summary of extant cells */ static int afs_proc_cell_volumes_open(struct inode *inode, struct file *file) { struct afs_cell *cell; struct seq_file *m; int ret; cell = PDE(inode)->data; if (!cell) return -ENOENT; ret = seq_open(file, &afs_proc_cell_volumes_ops); if (ret < 0) return ret; m = file->private_data; m->private = cell; return 0; } /* * close the file and release the ref to the cell */ static int afs_proc_cell_volumes_release(struct inode *inode, struct file *file) { return seq_release(inode, file); } /* * set up the iterator to start reading from the cells list and return the * first item */ static void *afs_proc_cell_volumes_start(struct seq_file *m, loff_t *_pos) { struct afs_cell *cell = m->private; _enter("cell=%p pos=%Ld", cell, *_pos); /* lock the list against modification */ down_read(&cell->vl_sem); return seq_list_start_head(&cell->vl_list, *_pos); } /* * move to next cell in cells list */ static void *afs_proc_cell_volumes_next(struct seq_file *p, void *v, loff_t *_pos) { struct afs_cell *cell = p->private; _enter("cell=%p pos=%Ld", cell, *_pos); return seq_list_next(v, &cell->vl_list, _pos); } /* * clean up after reading from the cells list */ static void afs_proc_cell_volumes_stop(struct seq_file *p, void *v) { struct afs_cell *cell = p->private; up_read(&cell->vl_sem); } static const char afs_vlocation_states[][4] = { [AFS_VL_NEW] = "New", [AFS_VL_CREATING] = "Crt", [AFS_VL_VALID] = "Val", [AFS_VL_NO_VOLUME] = "NoV", [AFS_VL_UPDATING] = "Upd", [AFS_VL_VOLUME_DELETED] = "Del", [AFS_VL_UNCERTAIN] = "Unc", }; /* * display a header line followed by a load of volume lines */ static int afs_proc_cell_volumes_show(struct seq_file *m, void *v) { struct afs_cell *cell = m->private; struct afs_vlocation *vlocation = list_entry(v, struct afs_vlocation, link); /* display header on line 1 */ if (v == &cell->vl_list) { seq_puts(m, "USE STT VLID[0] VLID[1] VLID[2] NAME\n"); return 0; } /* display one cell per line on subsequent lines */ seq_printf(m, "%3d %s %08x %08x %08x %s\n", atomic_read(&vlocation->usage), afs_vlocation_states[vlocation->state], vlocation->vldb.vid[0], vlocation->vldb.vid[1], vlocation->vldb.vid[2], vlocation->vldb.name); return 0; } /* * open "/proc/fs/afs/<cell>/vlservers" which provides a list of volume * location server */ static int afs_proc_cell_vlservers_open(struct inode *inode, struct file *file) { struct afs_cell *cell; struct seq_file *m; int ret; cell = PDE(inode)->data; if (!cell) return -ENOENT; ret = seq_open(file, &afs_proc_cell_vlservers_ops); if (ret<0) return ret; m = file->private_data; m->private = cell; return 0; } /* * close the file and release the ref to the cell */ static int afs_proc_cell_vlservers_release(struct inode *inode, struct file *file) { return seq_release(inode, file); } /* * set up the iterator to start reading from the cells list and return the * first item */ static void *afs_proc_cell_vlservers_start(struct seq_file *m, loff_t *_pos) { struct afs_cell *cell = m->private; loff_t pos = *_pos; _enter("cell=%p pos=%Ld", cell, *_pos); /* lock the list against modification */ down_read(&cell->vl_sem); /* allow for the header line */ if (!pos) return (void *) 1; pos--; if (pos >= cell->vl_naddrs) return NULL; return &cell->vl_addrs[pos]; } /* * move to next cell in cells list */ static void *afs_proc_cell_vlservers_next(struct seq_file *p, void *v, loff_t *_pos) { struct afs_cell *cell = p->private; loff_t pos; _enter("cell=%p{nad=%u} pos=%Ld", cell, cell->vl_naddrs, *_pos); pos = *_pos; (*_pos)++; if (pos >= cell->vl_naddrs) return NULL; return &cell->vl_addrs[pos]; } /* * clean up after reading from the cells list */ static void afs_proc_cell_vlservers_stop(struct seq_file *p, void *v) { struct afs_cell *cell = p->private; up_read(&cell->vl_sem); } /* * display a header line followed by a load of volume lines */ static int afs_proc_cell_vlservers_show(struct seq_file *m, void *v) { struct in_addr *addr = v; /* display header on line 1 */ if (v == (struct in_addr *) 1) { seq_puts(m, "ADDRESS\n"); return 0; } /* display one cell per line on subsequent lines */ seq_printf(m, "%pI4\n", &addr->s_addr); return 0; } /* * open "/proc/fs/afs/<cell>/servers" which provides a summary of active * servers */ static int afs_proc_cell_servers_open(struct inode *inode, struct file *file) { struct afs_cell *cell; struct seq_file *m; int ret; cell = PDE(inode)->data; if (!cell) return -ENOENT; ret = seq_open(file, &afs_proc_cell_servers_ops); if (ret < 0) return ret; m = file->private_data; m->private = cell; return 0; } /* * close the file and release the ref to the cell */ static int afs_proc_cell_servers_release(struct inode *inode, struct file *file) { return seq_release(inode, file); } /* * set up the iterator to start reading from the cells list and return the * first item */ static void *afs_proc_cell_servers_start(struct seq_file *m, loff_t *_pos) __acquires(m->private->servers_lock) { struct afs_cell *cell = m->private; _enter("cell=%p pos=%Ld", cell, *_pos); /* lock the list against modification */ read_lock(&cell->servers_lock); return seq_list_start_head(&cell->servers, *_pos); } /* * move to next cell in cells list */ static void *afs_proc_cell_servers_next(struct seq_file *p, void *v, loff_t *_pos) { struct afs_cell *cell = p->private; _enter("cell=%p pos=%Ld", cell, *_pos); return seq_list_next(v, &cell->servers, _pos); } /* * clean up after reading from the cells list */ static void afs_proc_cell_servers_stop(struct seq_file *p, void *v) __releases(p->private->servers_lock) { struct afs_cell *cell = p->private; read_unlock(&cell->servers_lock); } /* * display a header line followed by a load of volume lines */ static int afs_proc_cell_servers_show(struct seq_file *m, void *v) { struct afs_cell *cell = m->private; struct afs_server *server = list_entry(v, struct afs_server, link); char ipaddr[20]; /* display header on line 1 */ if (v == &cell->servers) { seq_puts(m, "USE ADDR STATE\n"); return 0; } /* display one cell per line on subsequent lines */ sprintf(ipaddr, "%pI4", &server->addr); seq_printf(m, "%3d %-15.15s %5d\n", atomic_read(&server->usage), ipaddr, server->fs_state); return 0; }
gpl-2.0
NoelMacwan/android_kernel_sony_c2005
drivers/media/video/cx23885/cx23885-f300.c
9379
4188
/* * Driver for Silicon Labs C8051F300 microcontroller. * * It is used for LNB power control in TeVii S470, * TBS 6920 PCIe DVB-S2 cards. * * Microcontroller connected to cx23885 GPIO pins: * GPIO0 - data - P0.3 F300 * GPIO1 - reset - P0.2 F300 * GPIO2 - clk - P0.1 F300 * GPIO3 - busy - P0.0 F300 * * Copyright (C) 2009 Igor M. Liplianin <liplianin@me.by> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "cx23885.h" #define F300_DATA GPIO_0 #define F300_RESET GPIO_1 #define F300_CLK GPIO_2 #define F300_BUSY GPIO_3 static void f300_set_line(struct cx23885_dev *dev, u32 line, u8 lvl) { cx23885_gpio_enable(dev, line, 1); if (lvl == 1) cx23885_gpio_set(dev, line); else cx23885_gpio_clear(dev, line); } static u8 f300_get_line(struct cx23885_dev *dev, u32 line) { cx23885_gpio_enable(dev, line, 0); return cx23885_gpio_get(dev, line); } static void f300_send_byte(struct cx23885_dev *dev, u8 dta) { u8 i; for (i = 0; i < 8; i++) { f300_set_line(dev, F300_CLK, 0); udelay(30); f300_set_line(dev, F300_DATA, (dta & 0x80) >> 7);/* msb first */ udelay(30); dta <<= 1; f300_set_line(dev, F300_CLK, 1); udelay(30); } } static u8 f300_get_byte(struct cx23885_dev *dev) { u8 i, dta = 0; for (i = 0; i < 8; i++) { f300_set_line(dev, F300_CLK, 0); udelay(30); dta <<= 1; f300_set_line(dev, F300_CLK, 1); udelay(30); dta |= f300_get_line(dev, F300_DATA);/* msb first */ } return dta; } static u8 f300_xfer(struct dvb_frontend *fe, u8 *buf) { struct cx23885_tsport *port = fe->dvb->priv; struct cx23885_dev *dev = port->dev; u8 i, temp, ret = 0; temp = buf[0]; for (i = 0; i < buf[0]; i++) temp += buf[i + 1]; temp = (~temp + 1);/* get check sum */ buf[1 + buf[0]] = temp; f300_set_line(dev, F300_RESET, 1); f300_set_line(dev, F300_CLK, 1); udelay(30); f300_set_line(dev, F300_DATA, 1); msleep(1); /* question: */ f300_set_line(dev, F300_RESET, 0);/* begin to send data */ msleep(1); f300_send_byte(dev, 0xe0);/* the slave address is 0xe0, write */ msleep(1); temp = buf[0]; temp += 2; for (i = 0; i < temp; i++) f300_send_byte(dev, buf[i]); f300_set_line(dev, F300_RESET, 1);/* sent data over */ f300_set_line(dev, F300_DATA, 1); /* answer: */ temp = 0; for (i = 0; ((i < 8) & (temp == 0)); i++) { msleep(1); if (f300_get_line(dev, F300_BUSY) == 0) temp = 1; } if (i > 7) { printk(KERN_ERR "%s: timeout, the slave no response\n", __func__); ret = 1; /* timeout, the slave no response */ } else { /* the slave not busy, prepare for getting data */ f300_set_line(dev, F300_RESET, 0);/*ready...*/ msleep(1); f300_send_byte(dev, 0xe1);/* 0xe1 is Read */ msleep(1); temp = f300_get_byte(dev);/*get the data length */ if (temp > 14) temp = 14; for (i = 0; i < (temp + 1); i++) f300_get_byte(dev);/* get data to empty buffer */ f300_set_line(dev, F300_RESET, 1);/* received data over */ f300_set_line(dev, F300_DATA, 1); } return ret; } int f300_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage) { u8 buf[16]; buf[0] = 0x05; buf[1] = 0x38;/* write port */ buf[2] = 0x01;/* A port, lnb power */ switch (voltage) { case SEC_VOLTAGE_13: buf[3] = 0x01;/* power on */ buf[4] = 0x02;/* B port, H/V */ buf[5] = 0x00;/*13V v*/ break; case SEC_VOLTAGE_18: buf[3] = 0x01; buf[4] = 0x02; buf[5] = 0x01;/* 18V h*/ break; case SEC_VOLTAGE_OFF: buf[3] = 0x00;/* power off */ buf[4] = 0x00; buf[5] = 0x00; break; } return f300_xfer(fe, buf); }
gpl-2.0
AOKP/kernel_motorola_msm8960dt
drivers/media/video/cx23885/cx23885-f300.c
9379
4188
/* * Driver for Silicon Labs C8051F300 microcontroller. * * It is used for LNB power control in TeVii S470, * TBS 6920 PCIe DVB-S2 cards. * * Microcontroller connected to cx23885 GPIO pins: * GPIO0 - data - P0.3 F300 * GPIO1 - reset - P0.2 F300 * GPIO2 - clk - P0.1 F300 * GPIO3 - busy - P0.0 F300 * * Copyright (C) 2009 Igor M. Liplianin <liplianin@me.by> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "cx23885.h" #define F300_DATA GPIO_0 #define F300_RESET GPIO_1 #define F300_CLK GPIO_2 #define F300_BUSY GPIO_3 static void f300_set_line(struct cx23885_dev *dev, u32 line, u8 lvl) { cx23885_gpio_enable(dev, line, 1); if (lvl == 1) cx23885_gpio_set(dev, line); else cx23885_gpio_clear(dev, line); } static u8 f300_get_line(struct cx23885_dev *dev, u32 line) { cx23885_gpio_enable(dev, line, 0); return cx23885_gpio_get(dev, line); } static void f300_send_byte(struct cx23885_dev *dev, u8 dta) { u8 i; for (i = 0; i < 8; i++) { f300_set_line(dev, F300_CLK, 0); udelay(30); f300_set_line(dev, F300_DATA, (dta & 0x80) >> 7);/* msb first */ udelay(30); dta <<= 1; f300_set_line(dev, F300_CLK, 1); udelay(30); } } static u8 f300_get_byte(struct cx23885_dev *dev) { u8 i, dta = 0; for (i = 0; i < 8; i++) { f300_set_line(dev, F300_CLK, 0); udelay(30); dta <<= 1; f300_set_line(dev, F300_CLK, 1); udelay(30); dta |= f300_get_line(dev, F300_DATA);/* msb first */ } return dta; } static u8 f300_xfer(struct dvb_frontend *fe, u8 *buf) { struct cx23885_tsport *port = fe->dvb->priv; struct cx23885_dev *dev = port->dev; u8 i, temp, ret = 0; temp = buf[0]; for (i = 0; i < buf[0]; i++) temp += buf[i + 1]; temp = (~temp + 1);/* get check sum */ buf[1 + buf[0]] = temp; f300_set_line(dev, F300_RESET, 1); f300_set_line(dev, F300_CLK, 1); udelay(30); f300_set_line(dev, F300_DATA, 1); msleep(1); /* question: */ f300_set_line(dev, F300_RESET, 0);/* begin to send data */ msleep(1); f300_send_byte(dev, 0xe0);/* the slave address is 0xe0, write */ msleep(1); temp = buf[0]; temp += 2; for (i = 0; i < temp; i++) f300_send_byte(dev, buf[i]); f300_set_line(dev, F300_RESET, 1);/* sent data over */ f300_set_line(dev, F300_DATA, 1); /* answer: */ temp = 0; for (i = 0; ((i < 8) & (temp == 0)); i++) { msleep(1); if (f300_get_line(dev, F300_BUSY) == 0) temp = 1; } if (i > 7) { printk(KERN_ERR "%s: timeout, the slave no response\n", __func__); ret = 1; /* timeout, the slave no response */ } else { /* the slave not busy, prepare for getting data */ f300_set_line(dev, F300_RESET, 0);/*ready...*/ msleep(1); f300_send_byte(dev, 0xe1);/* 0xe1 is Read */ msleep(1); temp = f300_get_byte(dev);/*get the data length */ if (temp > 14) temp = 14; for (i = 0; i < (temp + 1); i++) f300_get_byte(dev);/* get data to empty buffer */ f300_set_line(dev, F300_RESET, 1);/* received data over */ f300_set_line(dev, F300_DATA, 1); } return ret; } int f300_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage) { u8 buf[16]; buf[0] = 0x05; buf[1] = 0x38;/* write port */ buf[2] = 0x01;/* A port, lnb power */ switch (voltage) { case SEC_VOLTAGE_13: buf[3] = 0x01;/* power on */ buf[4] = 0x02;/* B port, H/V */ buf[5] = 0x00;/*13V v*/ break; case SEC_VOLTAGE_18: buf[3] = 0x01; buf[4] = 0x02; buf[5] = 0x01;/* 18V h*/ break; case SEC_VOLTAGE_OFF: buf[3] = 0x00;/* power off */ buf[4] = 0x00; buf[5] = 0x00; break; } return f300_xfer(fe, buf); }
gpl-2.0
tempesta-tech/linux-4.1-tfw
fs/logfs/compr.c
12707
1816
/* * fs/logfs/compr.c - compression routines * * As should be obvious for Linux kernel code, license is GPLv2 * * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org> */ #include "logfs.h" #include <linux/vmalloc.h> #include <linux/zlib.h> #define COMPR_LEVEL 3 static DEFINE_MUTEX(compr_mutex); static struct z_stream_s stream; int logfs_compress(void *in, void *out, size_t inlen, size_t outlen) { int err, ret; ret = -EIO; mutex_lock(&compr_mutex); err = zlib_deflateInit(&stream, COMPR_LEVEL); if (err != Z_OK) goto error; stream.next_in = in; stream.avail_in = inlen; stream.total_in = 0; stream.next_out = out; stream.avail_out = outlen; stream.total_out = 0; err = zlib_deflate(&stream, Z_FINISH); if (err != Z_STREAM_END) goto error; err = zlib_deflateEnd(&stream); if (err != Z_OK) goto error; if (stream.total_out >= stream.total_in) goto error; ret = stream.total_out; error: mutex_unlock(&compr_mutex); return ret; } int logfs_uncompress(void *in, void *out, size_t inlen, size_t outlen) { int err, ret; ret = -EIO; mutex_lock(&compr_mutex); err = zlib_inflateInit(&stream); if (err != Z_OK) goto error; stream.next_in = in; stream.avail_in = inlen; stream.total_in = 0; stream.next_out = out; stream.avail_out = outlen; stream.total_out = 0; err = zlib_inflate(&stream, Z_FINISH); if (err != Z_STREAM_END) goto error; err = zlib_inflateEnd(&stream); if (err != Z_OK) goto error; ret = 0; error: mutex_unlock(&compr_mutex); return ret; } int __init logfs_compr_init(void) { size_t size = max(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL), zlib_inflate_workspacesize()); stream.workspace = vmalloc(size); if (!stream.workspace) return -ENOMEM; return 0; } void logfs_compr_exit(void) { vfree(stream.workspace); }
gpl-2.0
Blechd0se/mako_kernel
fs/logfs/compr.c
12707
1816
/* * fs/logfs/compr.c - compression routines * * As should be obvious for Linux kernel code, license is GPLv2 * * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org> */ #include "logfs.h" #include <linux/vmalloc.h> #include <linux/zlib.h> #define COMPR_LEVEL 3 static DEFINE_MUTEX(compr_mutex); static struct z_stream_s stream; int logfs_compress(void *in, void *out, size_t inlen, size_t outlen) { int err, ret; ret = -EIO; mutex_lock(&compr_mutex); err = zlib_deflateInit(&stream, COMPR_LEVEL); if (err != Z_OK) goto error; stream.next_in = in; stream.avail_in = inlen; stream.total_in = 0; stream.next_out = out; stream.avail_out = outlen; stream.total_out = 0; err = zlib_deflate(&stream, Z_FINISH); if (err != Z_STREAM_END) goto error; err = zlib_deflateEnd(&stream); if (err != Z_OK) goto error; if (stream.total_out >= stream.total_in) goto error; ret = stream.total_out; error: mutex_unlock(&compr_mutex); return ret; } int logfs_uncompress(void *in, void *out, size_t inlen, size_t outlen) { int err, ret; ret = -EIO; mutex_lock(&compr_mutex); err = zlib_inflateInit(&stream); if (err != Z_OK) goto error; stream.next_in = in; stream.avail_in = inlen; stream.total_in = 0; stream.next_out = out; stream.avail_out = outlen; stream.total_out = 0; err = zlib_inflate(&stream, Z_FINISH); if (err != Z_STREAM_END) goto error; err = zlib_inflateEnd(&stream); if (err != Z_OK) goto error; ret = 0; error: mutex_unlock(&compr_mutex); return ret; } int __init logfs_compr_init(void) { size_t size = max(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL), zlib_inflate_workspacesize()); stream.workspace = vmalloc(size); if (!stream.workspace) return -ENOMEM; return 0; } void logfs_compr_exit(void) { vfree(stream.workspace); }
gpl-2.0
sim0629/linux-openwrt
arch/parisc/lib/memset.c
14243
2442
/* Copyright (C) 1991, 1997 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. */ /* Slight modifications for pa-risc linux - Paul Bame <bame@debian.org> */ #include <linux/types.h> #include <asm/string.h> #define OPSIZ (BITS_PER_LONG/8) typedef unsigned long op_t; void * memset (void *dstpp, int sc, size_t len) { unsigned int c = sc; long int dstp = (long int) dstpp; if (len >= 8) { size_t xlen; op_t cccc; cccc = (unsigned char) c; cccc |= cccc << 8; cccc |= cccc << 16; if (OPSIZ > 4) /* Do the shift in two steps to avoid warning if long has 32 bits. */ cccc |= (cccc << 16) << 16; /* There are at least some bytes to set. No need to test for LEN == 0 in this alignment loop. */ while (dstp % OPSIZ != 0) { ((unsigned char *) dstp)[0] = c; dstp += 1; len -= 1; } /* Write 8 `op_t' per iteration until less than 8 `op_t' remain. */ xlen = len / (OPSIZ * 8); while (xlen > 0) { ((op_t *) dstp)[0] = cccc; ((op_t *) dstp)[1] = cccc; ((op_t *) dstp)[2] = cccc; ((op_t *) dstp)[3] = cccc; ((op_t *) dstp)[4] = cccc; ((op_t *) dstp)[5] = cccc; ((op_t *) dstp)[6] = cccc; ((op_t *) dstp)[7] = cccc; dstp += 8 * OPSIZ; xlen -= 1; } len %= OPSIZ * 8; /* Write 1 `op_t' per iteration until less than OPSIZ bytes remain. */ xlen = len / OPSIZ; while (xlen > 0) { ((op_t *) dstp)[0] = cccc; dstp += OPSIZ; xlen -= 1; } len %= OPSIZ; } /* Write the last few bytes. */ while (len > 0) { ((unsigned char *) dstp)[0] = c; dstp += 1; len -= 1; } return dstpp; }
gpl-2.0
iyahman/samsung-kernel-aries
arch/parisc/lib/memset.c
14243
2442
/* Copyright (C) 1991, 1997 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. */ /* Slight modifications for pa-risc linux - Paul Bame <bame@debian.org> */ #include <linux/types.h> #include <asm/string.h> #define OPSIZ (BITS_PER_LONG/8) typedef unsigned long op_t; void * memset (void *dstpp, int sc, size_t len) { unsigned int c = sc; long int dstp = (long int) dstpp; if (len >= 8) { size_t xlen; op_t cccc; cccc = (unsigned char) c; cccc |= cccc << 8; cccc |= cccc << 16; if (OPSIZ > 4) /* Do the shift in two steps to avoid warning if long has 32 bits. */ cccc |= (cccc << 16) << 16; /* There are at least some bytes to set. No need to test for LEN == 0 in this alignment loop. */ while (dstp % OPSIZ != 0) { ((unsigned char *) dstp)[0] = c; dstp += 1; len -= 1; } /* Write 8 `op_t' per iteration until less than 8 `op_t' remain. */ xlen = len / (OPSIZ * 8); while (xlen > 0) { ((op_t *) dstp)[0] = cccc; ((op_t *) dstp)[1] = cccc; ((op_t *) dstp)[2] = cccc; ((op_t *) dstp)[3] = cccc; ((op_t *) dstp)[4] = cccc; ((op_t *) dstp)[5] = cccc; ((op_t *) dstp)[6] = cccc; ((op_t *) dstp)[7] = cccc; dstp += 8 * OPSIZ; xlen -= 1; } len %= OPSIZ * 8; /* Write 1 `op_t' per iteration until less than OPSIZ bytes remain. */ xlen = len / OPSIZ; while (xlen > 0) { ((op_t *) dstp)[0] = cccc; dstp += OPSIZ; xlen -= 1; } len %= OPSIZ; } /* Write the last few bytes. */ while (len > 0) { ((unsigned char *) dstp)[0] = c; dstp += 1; len -= 1; } return dstpp; }
gpl-2.0
Entropy512/android_kernel_motorola_msm8226
drivers/tc/tc-driver.c
15011
3027
/* * TURBOchannel driver services. * * Copyright (c) 2005 James Simmons * Copyright (c) 2006 Maciej W. Rozycki * * Loosely based on drivers/dio/dio-driver.c and * drivers/pci/pci-driver.c. * * This file is subject to the terms and conditions of the GNU * General Public License. See the file "COPYING" in the main * directory of this archive for more details. */ #include <linux/init.h> #include <linux/module.h> #include <linux/tc.h> /** * tc_register_driver - register a new TC driver * @drv: the driver structure to register * * Adds the driver structure to the list of registered drivers * Returns a negative value on error, otherwise 0. * If no error occurred, the driver remains registered even if * no device was claimed during registration. */ int tc_register_driver(struct tc_driver *tdrv) { return driver_register(&tdrv->driver); } EXPORT_SYMBOL(tc_register_driver); /** * tc_unregister_driver - unregister a TC driver * @drv: the driver structure to unregister * * Deletes the driver structure from the list of registered TC drivers, * gives it a chance to clean up by calling its remove() function for * each device it was responsible for, and marks those devices as * driverless. */ void tc_unregister_driver(struct tc_driver *tdrv) { driver_unregister(&tdrv->driver); } EXPORT_SYMBOL(tc_unregister_driver); /** * tc_match_device - tell if a TC device structure has a matching * TC device ID structure * @tdrv: the TC driver to earch for matching TC device ID strings * @tdev: the TC device structure to match against * * Used by a driver to check whether a TC device present in the * system is in its list of supported devices. Returns the matching * tc_device_id structure or %NULL if there is no match. */ const struct tc_device_id *tc_match_device(struct tc_driver *tdrv, struct tc_dev *tdev) { const struct tc_device_id *id = tdrv->id_table; if (id) { while (id->name[0] || id->vendor[0]) { if (strcmp(tdev->name, id->name) == 0 && strcmp(tdev->vendor, id->vendor) == 0) return id; id++; } } return NULL; } EXPORT_SYMBOL(tc_match_device); /** * tc_bus_match - Tell if a device structure has a matching * TC device ID structure * @dev: the device structure to match against * @drv: the device driver to search for matching TC device ID strings * * Used by a driver to check whether a TC device present in the * system is in its list of supported devices. Returns 1 if there * is a match or 0 otherwise. */ static int tc_bus_match(struct device *dev, struct device_driver *drv) { struct tc_dev *tdev = to_tc_dev(dev); struct tc_driver *tdrv = to_tc_driver(drv); const struct tc_device_id *id; id = tc_match_device(tdrv, tdev); if (id) return 1; return 0; } struct bus_type tc_bus_type = { .name = "tc", .match = tc_bus_match, }; EXPORT_SYMBOL(tc_bus_type); static int __init tc_driver_init(void) { return bus_register(&tc_bus_type); } postcore_initcall(tc_driver_init);
gpl-2.0
Excito/community-b3-kernel
sound/isa/cs423x/cs4236.c
164
22577
/* * Driver for generic CS4232/CS4235/CS4236/CS4236B/CS4237B/CS4238B/CS4239 chips * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/err.h> #include <linux/isa.h> #include <linux/pnp.h> #include <linux/module.h> #include <sound/core.h> #include <sound/wss.h> #include <sound/mpu401.h> #include <sound/opl3.h> #include <sound/initval.h> MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Cirrus Logic CS4232-9"); MODULE_SUPPORTED_DEVICE("{{Turtle Beach,TBS-2000}," "{Turtle Beach,Tropez Plus}," "{SIC CrystalWave 32}," "{Hewlett Packard,Omnibook 5500}," "{TerraTec,Maestro 32/96}," "{Philips,PCA70PS}}," "{{Crystal Semiconductors,CS4235}," "{Crystal Semiconductors,CS4236}," "{Crystal Semiconductors,CS4237}," "{Crystal Semiconductors,CS4238}," "{Crystal Semiconductors,CS4239}," "{Acer,AW37}," "{Acer,AW35/Pro}," "{Crystal,3D}," "{Crystal Computer,TidalWave128}," "{Dell,Optiplex GX1}," "{Dell,Workstation 400 sound}," "{EliteGroup,P5TX-LA sound}," "{Gallant,SC-70P}," "{Gateway,E1000 Onboard CS4236B}," "{Genius,Sound Maker 3DJ}," "{Hewlett Packard,HP6330 sound}," "{IBM,PC 300PL sound}," "{IBM,Aptiva 2137 E24}," "{IBM,IntelliStation M Pro}," "{Intel,Marlin Spike Mobo CS4235}," "{Intel PR440FX Onboard}," "{Guillemot,MaxiSound 16 PnP}," "{NewClear,3D}," "{TerraTec,AudioSystem EWS64L/XL}," "{Typhoon Soundsystem,CS4236B}," "{Turtle Beach,Malibu}," "{Unknown,Digital PC 5000 Onboard}}"); MODULE_ALIAS("snd_cs4232"); #define IDENT "CS4232+" #define DEV_NAME "cs4232+" static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_ISAPNP; /* Enable this card */ #ifdef CONFIG_PNP static int isapnp[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 1}; #endif static long port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* PnP setup */ static long cport[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* PnP setup */ static long mpu_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT;/* PnP setup */ static long fm_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* PnP setup */ static long sb_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* PnP setup */ static int irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; /* 5,7,9,11,12,15 */ static int mpu_irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; /* 9,11,12,15 */ static int dma1[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; /* 0,1,3,5,6,7 */ static int dma2[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; /* 0,1,3,5,6,7 */ module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for " IDENT " soundcard."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for " IDENT " soundcard."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable " IDENT " soundcard."); #ifdef CONFIG_PNP module_param_array(isapnp, bool, NULL, 0444); MODULE_PARM_DESC(isapnp, "ISA PnP detection for specified soundcard."); #endif module_param_array(port, long, NULL, 0444); MODULE_PARM_DESC(port, "Port # for " IDENT " driver."); module_param_array(cport, long, NULL, 0444); MODULE_PARM_DESC(cport, "Control port # for " IDENT " driver."); module_param_array(mpu_port, long, NULL, 0444); MODULE_PARM_DESC(mpu_port, "MPU-401 port # for " IDENT " driver."); module_param_array(fm_port, long, NULL, 0444); MODULE_PARM_DESC(fm_port, "FM port # for " IDENT " driver."); module_param_array(sb_port, long, NULL, 0444); MODULE_PARM_DESC(sb_port, "SB port # for " IDENT " driver (optional)."); module_param_array(irq, int, NULL, 0444); MODULE_PARM_DESC(irq, "IRQ # for " IDENT " driver."); module_param_array(mpu_irq, int, NULL, 0444); MODULE_PARM_DESC(mpu_irq, "MPU-401 IRQ # for " IDENT " driver."); module_param_array(dma1, int, NULL, 0444); MODULE_PARM_DESC(dma1, "DMA1 # for " IDENT " driver."); module_param_array(dma2, int, NULL, 0444); MODULE_PARM_DESC(dma2, "DMA2 # for " IDENT " driver."); #ifdef CONFIG_PNP static int isa_registered; static int pnpc_registered; static int pnp_registered; #endif /* CONFIG_PNP */ struct snd_card_cs4236 { struct snd_wss *chip; struct resource *res_sb_port; #ifdef CONFIG_PNP struct pnp_dev *wss; struct pnp_dev *ctrl; struct pnp_dev *mpu; #endif }; #ifdef CONFIG_PNP /* * PNP BIOS */ static const struct pnp_device_id snd_cs423x_pnpbiosids[] = { { .id = "CSC0100" }, { .id = "CSC0000" }, /* Guillemot Turtlebeach something appears to be cs4232 compatible * (untested) */ { .id = "GIM0100" }, { .id = "" } }; MODULE_DEVICE_TABLE(pnp, snd_cs423x_pnpbiosids); #define CS423X_ISAPNP_DRIVER "cs4232_isapnp" static struct pnp_card_device_id snd_cs423x_pnpids[] = { /* Philips PCA70PS */ { .id = "CSC0d32", .devs = { { "CSC0000" }, { "CSC0010" }, { "PNPb006" } } }, /* TerraTec Maestro 32/96 (CS4232) */ { .id = "CSC1a32", .devs = { { "CSC0000" }, { "CSC0010" }, { "CSC0003" } } }, /* HP Omnibook 5500 onboard */ { .id = "CSC4232", .devs = { { "CSC0000" }, { "CSC0002" }, { "CSC0003" } } }, /* Unnamed CS4236 card (Made in Taiwan) */ { .id = "CSC4236", .devs = { { "CSC0000" }, { "CSC0010" }, { "CSC0003" } } }, /* Turtle Beach TBS-2000 (CS4232) */ { .id = "CSC7532", .devs = { { "CSC0000" }, { "CSC0010" }, { "CSCb006" } } }, /* Turtle Beach Tropez Plus (CS4232) */ { .id = "CSC7632", .devs = { { "CSC0000" }, { "CSC0010" }, { "PNPb006" } } }, /* SIC CrystalWave 32 (CS4232) */ { .id = "CSCf032", .devs = { { "CSC0000" }, { "CSC0010" }, { "CSC0003" } } }, /* Netfinity 3000 on-board soundcard */ { .id = "CSCe825", .devs = { { "CSC0100" }, { "CSC0110" }, { "CSC010f" } } }, /* Intel Marlin Spike Motherboard - CS4235 */ { .id = "CSC0225", .devs = { { "CSC0000" }, { "CSC0010" }, { "CSC0003" } } }, /* Intel Marlin Spike Motherboard (#2) - CS4235 */ { .id = "CSC0225", .devs = { { "CSC0100" }, { "CSC0110" }, { "CSC0103" } } }, /* Unknown Intel mainboard - CS4235 */ { .id = "CSC0225", .devs = { { "CSC0100" }, { "CSC0110" } } }, /* Genius Sound Maker 3DJ - CS4237B */ { .id = "CSC0437", .devs = { { "CSC0000" }, { "CSC0010" }, { "CSC0003" } } }, /* Digital PC 5000 Onboard - CS4236B */ { .id = "CSC0735", .devs = { { "CSC0000" }, { "CSC0010" } } }, /* some unknown CS4236B */ { .id = "CSC0b35", .devs = { { "CSC0000" }, { "CSC0010" }, { "CSC0003" } } }, /* Intel PR440FX Onboard sound */ { .id = "CSC0b36", .devs = { { "CSC0000" }, { "CSC0010" }, { "CSC0003" } } }, /* CS4235 on mainboard without MPU */ { .id = "CSC1425", .devs = { { "CSC0100" }, { "CSC0110" } } }, /* Gateway E1000 Onboard CS4236B */ { .id = "CSC1335", .devs = { { "CSC0000" }, { "CSC0010" }, { "CSC0003" } } }, /* HP 6330 Onboard sound */ { .id = "CSC1525", .devs = { { "CSC0100" }, { "CSC0110" }, { "CSC0103" } } }, /* Crystal Computer TidalWave128 */ { .id = "CSC1e37", .devs = { { "CSC0000" }, { "CSC0010" }, { "CSC0003" } } }, /* ACER AW37 - CS4235 */ { .id = "CSC4236", .devs = { { "CSC0000" }, { "CSC0010" }, { "CSC0003" } } }, /* build-in soundcard in EliteGroup P5TX-LA motherboard - CS4237B */ { .id = "CSC4237", .devs = { { "CSC0000" }, { "CSC0010" }, { "CSC0003" } } }, /* Crystal 3D - CS4237B */ { .id = "CSC4336", .devs = { { "CSC0000" }, { "CSC0010" }, { "CSC0003" } } }, /* Typhoon Soundsystem PnP - CS4236B */ { .id = "CSC4536", .devs = { { "CSC0000" }, { "CSC0010" }, { "CSC0003" } } }, /* Crystal CX4235-XQ3 EP - CS4235 */ { .id = "CSC4625", .devs = { { "CSC0100" }, { "CSC0110" }, { "CSC0103" } } }, /* Crystal Semiconductors CS4237B */ { .id = "CSC4637", .devs = { { "CSC0000" }, { "CSC0010" }, { "CSC0003" } } }, /* NewClear 3D - CX4237B-XQ3 */ { .id = "CSC4837", .devs = { { "CSC0000" }, { "CSC0010" }, { "CSC0003" } } }, /* Dell Optiplex GX1 - CS4236B */ { .id = "CSC6835", .devs = { { "CSC0000" }, { "CSC0010" }, { "CSC0003" } } }, /* Dell P410 motherboard - CS4236B */ { .id = "CSC6835", .devs = { { "CSC0000" }, { "CSC0010" } } }, /* Dell Workstation 400 Onboard - CS4236B */ { .id = "CSC6836", .devs = { { "CSC0000" }, { "CSC0010" }, { "CSC0003" } } }, /* Turtle Beach Malibu - CS4237B */ { .id = "CSC7537", .devs = { { "CSC0000" }, { "CSC0010" }, { "CSC0003" } } }, /* CS4235 - onboard */ { .id = "CSC8025", .devs = { { "CSC0100" }, { "CSC0110" }, { "CSC0103" } } }, /* IBM Aptiva 2137 E24 Onboard - CS4237B */ { .id = "CSC8037", .devs = { { "CSC0000" }, { "CSC0010" }, { "CSC0003" } } }, /* IBM IntelliStation M Pro motherboard */ { .id = "CSCc835", .devs = { { "CSC0000" }, { "CSC0010" } } }, /* Guillemot MaxiSound 16 PnP - CS4236B */ { .id = "CSC9836", .devs = { { "CSC0000" }, { "CSC0010" }, { "CSC0003" } } }, /* Gallant SC-70P */ { .id = "CSC9837", .devs = { { "CSC0000" }, { "CSC0010" }, { "CSC0003" } } }, /* Techmakers MF-4236PW */ { .id = "CSCa736", .devs = { { "CSC0000" }, { "CSC0010" }, { "CSC0003" } } }, /* TerraTec AudioSystem EWS64XL - CS4236B */ { .id = "CSCa836", .devs = { { "CSCa800" }, { "CSCa810" }, { "CSCa803" } } }, /* TerraTec AudioSystem EWS64XL - CS4236B */ { .id = "CSCa836", .devs = { { "CSCa800" }, { "CSCa810" } } }, /* ACER AW37/Pro - CS4235 */ { .id = "CSCd925", .devs = { { "CSC0000" }, { "CSC0010" }, { "CSC0003" } } }, /* ACER AW35/Pro - CS4237B */ { .id = "CSCd937", .devs = { { "CSC0000" }, { "CSC0010" }, { "CSC0003" } } }, /* CS4235 without MPU401 */ { .id = "CSCe825", .devs = { { "CSC0100" }, { "CSC0110" } } }, /* Unknown SiS530 - CS4235 */ { .id = "CSC4825", .devs = { { "CSC0100" }, { "CSC0110" } } }, /* IBM IntelliStation M Pro 6898 11U - CS4236B */ { .id = "CSCe835", .devs = { { "CSC0000" }, { "CSC0010" } } }, /* IBM PC 300PL Onboard - CS4236B */ { .id = "CSCe836", .devs = { { "CSC0000" }, { "CSC0010" } } }, /* Some noname CS4236 based card */ { .id = "CSCe936", .devs = { { "CSC0000" }, { "CSC0010" }, { "CSC0003" } } }, /* CS4236B */ { .id = "CSCf235", .devs = { { "CSC0000" }, { "CSC0010" }, { "CSC0003" } } }, /* CS4236B */ { .id = "CSCf238", .devs = { { "CSC0000" }, { "CSC0010" }, { "CSC0003" } } }, /* --- */ { .id = "" } /* end */ }; MODULE_DEVICE_TABLE(pnp_card, snd_cs423x_pnpids); /* WSS initialization */ static int __devinit snd_cs423x_pnp_init_wss(int dev, struct pnp_dev *pdev) { if (pnp_activate_dev(pdev) < 0) { printk(KERN_ERR IDENT " WSS PnP configure failed for WSS (out of resources?)\n"); return -EBUSY; } port[dev] = pnp_port_start(pdev, 0); if (fm_port[dev] > 0) fm_port[dev] = pnp_port_start(pdev, 1); sb_port[dev] = pnp_port_start(pdev, 2); irq[dev] = pnp_irq(pdev, 0); dma1[dev] = pnp_dma(pdev, 0); dma2[dev] = pnp_dma(pdev, 1) == 4 ? -1 : (int)pnp_dma(pdev, 1); snd_printdd("isapnp WSS: wss port=0x%lx, fm port=0x%lx, sb port=0x%lx\n", port[dev], fm_port[dev], sb_port[dev]); snd_printdd("isapnp WSS: irq=%i, dma1=%i, dma2=%i\n", irq[dev], dma1[dev], dma2[dev]); return 0; } /* CTRL initialization */ static int __devinit snd_cs423x_pnp_init_ctrl(int dev, struct pnp_dev *pdev) { if (pnp_activate_dev(pdev) < 0) { printk(KERN_ERR IDENT " CTRL PnP configure failed for WSS (out of resources?)\n"); return -EBUSY; } cport[dev] = pnp_port_start(pdev, 0); snd_printdd("isapnp CTRL: control port=0x%lx\n", cport[dev]); return 0; } /* MPU initialization */ static int __devinit snd_cs423x_pnp_init_mpu(int dev, struct pnp_dev *pdev) { if (pnp_activate_dev(pdev) < 0) { printk(KERN_ERR IDENT " MPU401 PnP configure failed for WSS (out of resources?)\n"); mpu_port[dev] = SNDRV_AUTO_PORT; mpu_irq[dev] = SNDRV_AUTO_IRQ; } else { mpu_port[dev] = pnp_port_start(pdev, 0); if (mpu_irq[dev] >= 0 && pnp_irq_valid(pdev, 0) && pnp_irq(pdev, 0) >= 0) { mpu_irq[dev] = pnp_irq(pdev, 0); } else { mpu_irq[dev] = -1; /* disable interrupt */ } } snd_printdd("isapnp MPU: port=0x%lx, irq=%i\n", mpu_port[dev], mpu_irq[dev]); return 0; } static int __devinit snd_card_cs423x_pnp(int dev, struct snd_card_cs4236 *acard, struct pnp_dev *pdev, struct pnp_dev *cdev) { acard->wss = pdev; if (snd_cs423x_pnp_init_wss(dev, acard->wss) < 0) return -EBUSY; if (cdev) cport[dev] = pnp_port_start(cdev, 0); else cport[dev] = -1; return 0; } static int __devinit snd_card_cs423x_pnpc(int dev, struct snd_card_cs4236 *acard, struct pnp_card_link *card, const struct pnp_card_device_id *id) { acard->wss = pnp_request_card_device(card, id->devs[0].id, NULL); if (acard->wss == NULL) return -EBUSY; acard->ctrl = pnp_request_card_device(card, id->devs[1].id, NULL); if (acard->ctrl == NULL) return -EBUSY; if (id->devs[2].id[0]) { acard->mpu = pnp_request_card_device(card, id->devs[2].id, NULL); if (acard->mpu == NULL) return -EBUSY; } /* WSS initialization */ if (snd_cs423x_pnp_init_wss(dev, acard->wss) < 0) return -EBUSY; /* CTRL initialization */ if (acard->ctrl && cport[dev] > 0) { if (snd_cs423x_pnp_init_ctrl(dev, acard->ctrl) < 0) return -EBUSY; } /* MPU initialization */ if (acard->mpu && mpu_port[dev] > 0) { if (snd_cs423x_pnp_init_mpu(dev, acard->mpu) < 0) return -EBUSY; } return 0; } #endif /* CONFIG_PNP */ #ifdef CONFIG_PNP #define is_isapnp_selected(dev) isapnp[dev] #else #define is_isapnp_selected(dev) 0 #endif static void snd_card_cs4236_free(struct snd_card *card) { struct snd_card_cs4236 *acard = card->private_data; release_and_free_resource(acard->res_sb_port); } static int snd_cs423x_card_new(int dev, struct snd_card **cardp) { struct snd_card *card; int err; err = snd_card_create(index[dev], id[dev], THIS_MODULE, sizeof(struct snd_card_cs4236), &card); if (err < 0) return err; card->private_free = snd_card_cs4236_free; *cardp = card; return 0; } static int __devinit snd_cs423x_probe(struct snd_card *card, int dev) { struct snd_card_cs4236 *acard; struct snd_pcm *pcm; struct snd_wss *chip; struct snd_opl3 *opl3; int err; acard = card->private_data; if (sb_port[dev] > 0 && sb_port[dev] != SNDRV_AUTO_PORT) if ((acard->res_sb_port = request_region(sb_port[dev], 16, IDENT " SB")) == NULL) { printk(KERN_ERR IDENT ": unable to register SB port at 0x%lx\n", sb_port[dev]); return -EBUSY; } err = snd_cs4236_create(card, port[dev], cport[dev], irq[dev], dma1[dev], dma2[dev], WSS_HW_DETECT3, 0, &chip); if (err < 0) return err; acard->chip = chip; if (chip->hardware & WSS_HW_CS4236B_MASK) { err = snd_cs4236_pcm(chip, 0, &pcm); if (err < 0) return err; err = snd_cs4236_mixer(chip); if (err < 0) return err; } else { err = snd_wss_pcm(chip, 0, &pcm); if (err < 0) return err; err = snd_wss_mixer(chip); if (err < 0) return err; } strcpy(card->driver, pcm->name); strcpy(card->shortname, pcm->name); sprintf(card->longname, "%s at 0x%lx, irq %i, dma %i", pcm->name, chip->port, irq[dev], dma1[dev]); if (dma2[dev] >= 0) sprintf(card->longname + strlen(card->longname), "&%d", dma2[dev]); err = snd_wss_timer(chip, 0, NULL); if (err < 0) return err; if (fm_port[dev] > 0 && fm_port[dev] != SNDRV_AUTO_PORT) { if (snd_opl3_create(card, fm_port[dev], fm_port[dev] + 2, OPL3_HW_OPL3_CS, 0, &opl3) < 0) { printk(KERN_WARNING IDENT ": OPL3 not detected\n"); } else { if ((err = snd_opl3_hwdep_new(opl3, 0, 1, NULL)) < 0) return err; } } if (mpu_port[dev] > 0 && mpu_port[dev] != SNDRV_AUTO_PORT) { if (mpu_irq[dev] == SNDRV_AUTO_IRQ) mpu_irq[dev] = -1; if (snd_mpu401_uart_new(card, 0, MPU401_HW_CS4232, mpu_port[dev], 0, mpu_irq[dev], NULL) < 0) printk(KERN_WARNING IDENT ": MPU401 not detected\n"); } return snd_card_register(card); } static int __devinit snd_cs423x_isa_match(struct device *pdev, unsigned int dev) { if (!enable[dev] || is_isapnp_selected(dev)) return 0; if (port[dev] == SNDRV_AUTO_PORT) { dev_err(pdev, "please specify port\n"); return 0; } if (cport[dev] == SNDRV_AUTO_PORT) { dev_err(pdev, "please specify cport\n"); return 0; } if (irq[dev] == SNDRV_AUTO_IRQ) { dev_err(pdev, "please specify irq\n"); return 0; } if (dma1[dev] == SNDRV_AUTO_DMA) { dev_err(pdev, "please specify dma1\n"); return 0; } return 1; } static int __devinit snd_cs423x_isa_probe(struct device *pdev, unsigned int dev) { struct snd_card *card; int err; err = snd_cs423x_card_new(dev, &card); if (err < 0) return err; snd_card_set_dev(card, pdev); if ((err = snd_cs423x_probe(card, dev)) < 0) { snd_card_free(card); return err; } dev_set_drvdata(pdev, card); return 0; } static int __devexit snd_cs423x_isa_remove(struct device *pdev, unsigned int dev) { snd_card_free(dev_get_drvdata(pdev)); dev_set_drvdata(pdev, NULL); return 0; } #ifdef CONFIG_PM static int snd_cs423x_suspend(struct snd_card *card) { struct snd_card_cs4236 *acard = card->private_data; snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); acard->chip->suspend(acard->chip); return 0; } static int snd_cs423x_resume(struct snd_card *card) { struct snd_card_cs4236 *acard = card->private_data; acard->chip->resume(acard->chip); snd_power_change_state(card, SNDRV_CTL_POWER_D0); return 0; } static int snd_cs423x_isa_suspend(struct device *dev, unsigned int n, pm_message_t state) { return snd_cs423x_suspend(dev_get_drvdata(dev)); } static int snd_cs423x_isa_resume(struct device *dev, unsigned int n) { return snd_cs423x_resume(dev_get_drvdata(dev)); } #endif static struct isa_driver cs423x_isa_driver = { .match = snd_cs423x_isa_match, .probe = snd_cs423x_isa_probe, .remove = __devexit_p(snd_cs423x_isa_remove), #ifdef CONFIG_PM .suspend = snd_cs423x_isa_suspend, .resume = snd_cs423x_isa_resume, #endif .driver = { .name = DEV_NAME }, }; #ifdef CONFIG_PNP static int __devinit snd_cs423x_pnpbios_detect(struct pnp_dev *pdev, const struct pnp_device_id *id) { static int dev; int err; struct snd_card *card; struct pnp_dev *cdev; char cid[PNP_ID_LEN]; if (pnp_device_is_isapnp(pdev)) return -ENOENT; /* we have another procedure - card */ for (; dev < SNDRV_CARDS; dev++) { if (enable[dev] && isapnp[dev]) break; } if (dev >= SNDRV_CARDS) return -ENODEV; /* prepare second id */ strcpy(cid, pdev->id[0].id); cid[5] = '1'; cdev = NULL; list_for_each_entry(cdev, &(pdev->protocol->devices), protocol_list) { if (!strcmp(cdev->id[0].id, cid)) break; } err = snd_cs423x_card_new(dev, &card); if (err < 0) return err; err = snd_card_cs423x_pnp(dev, card->private_data, pdev, cdev); if (err < 0) { printk(KERN_ERR "PnP BIOS detection failed for " IDENT "\n"); snd_card_free(card); return err; } snd_card_set_dev(card, &pdev->dev); if ((err = snd_cs423x_probe(card, dev)) < 0) { snd_card_free(card); return err; } pnp_set_drvdata(pdev, card); dev++; return 0; } static void __devexit snd_cs423x_pnp_remove(struct pnp_dev *pdev) { snd_card_free(pnp_get_drvdata(pdev)); pnp_set_drvdata(pdev, NULL); } #ifdef CONFIG_PM static int snd_cs423x_pnp_suspend(struct pnp_dev *pdev, pm_message_t state) { return snd_cs423x_suspend(pnp_get_drvdata(pdev)); } static int snd_cs423x_pnp_resume(struct pnp_dev *pdev) { return snd_cs423x_resume(pnp_get_drvdata(pdev)); } #endif static struct pnp_driver cs423x_pnp_driver = { .name = "cs423x-pnpbios", .id_table = snd_cs423x_pnpbiosids, .probe = snd_cs423x_pnpbios_detect, .remove = __devexit_p(snd_cs423x_pnp_remove), #ifdef CONFIG_PM .suspend = snd_cs423x_pnp_suspend, .resume = snd_cs423x_pnp_resume, #endif }; static int __devinit snd_cs423x_pnpc_detect(struct pnp_card_link *pcard, const struct pnp_card_device_id *pid) { static int dev; struct snd_card *card; int res; for ( ; dev < SNDRV_CARDS; dev++) { if (enable[dev] && isapnp[dev]) break; } if (dev >= SNDRV_CARDS) return -ENODEV; res = snd_cs423x_card_new(dev, &card); if (res < 0) return res; if ((res = snd_card_cs423x_pnpc(dev, card->private_data, pcard, pid)) < 0) { printk(KERN_ERR "isapnp detection failed and probing for " IDENT " is not supported\n"); snd_card_free(card); return res; } snd_card_set_dev(card, &pcard->card->dev); if ((res = snd_cs423x_probe(card, dev)) < 0) { snd_card_free(card); return res; } pnp_set_card_drvdata(pcard, card); dev++; return 0; } static void __devexit snd_cs423x_pnpc_remove(struct pnp_card_link * pcard) { snd_card_free(pnp_get_card_drvdata(pcard)); pnp_set_card_drvdata(pcard, NULL); } #ifdef CONFIG_PM static int snd_cs423x_pnpc_suspend(struct pnp_card_link *pcard, pm_message_t state) { return snd_cs423x_suspend(pnp_get_card_drvdata(pcard)); } static int snd_cs423x_pnpc_resume(struct pnp_card_link *pcard) { return snd_cs423x_resume(pnp_get_card_drvdata(pcard)); } #endif static struct pnp_card_driver cs423x_pnpc_driver = { .flags = PNP_DRIVER_RES_DISABLE, .name = CS423X_ISAPNP_DRIVER, .id_table = snd_cs423x_pnpids, .probe = snd_cs423x_pnpc_detect, .remove = __devexit_p(snd_cs423x_pnpc_remove), #ifdef CONFIG_PM .suspend = snd_cs423x_pnpc_suspend, .resume = snd_cs423x_pnpc_resume, #endif }; #endif /* CONFIG_PNP */ static int __init alsa_card_cs423x_init(void) { int err; err = isa_register_driver(&cs423x_isa_driver, SNDRV_CARDS); #ifdef CONFIG_PNP if (!err) isa_registered = 1; err = pnp_register_driver(&cs423x_pnp_driver); if (!err) pnp_registered = 1; err = pnp_register_card_driver(&cs423x_pnpc_driver); if (!err) pnpc_registered = 1; if (pnp_registered) err = 0; if (isa_registered) err = 0; #endif return err; } static void __exit alsa_card_cs423x_exit(void) { #ifdef CONFIG_PNP if (pnpc_registered) pnp_unregister_card_driver(&cs423x_pnpc_driver); if (pnp_registered) pnp_unregister_driver(&cs423x_pnp_driver); if (isa_registered) #endif isa_unregister_driver(&cs423x_isa_driver); } module_init(alsa_card_cs423x_init) module_exit(alsa_card_cs423x_exit)
gpl-2.0
abhinavaggarwal/MuseScore
thirdparty/freetype/src/gxvalid/gxvmorx1.c
164
9719
/***************************************************************************/ /* */ /* gxvmorx1.c */ /* */ /* TrueTypeGX/AAT morx table validation */ /* body for type1 (Contextual Substitution) subtable. */ /* */ /* Copyright 2005-2015 by */ /* suzuki toshiya, Masatake YAMATO, Red Hat K.K., */ /* David Turner, Robert Wilhelm, and Werner Lemberg. */ /* */ /* This file is part of the FreeType project, and may only be used, */ /* modified, and distributed under the terms of the FreeType project */ /* license, LICENSE.TXT. By continuing to use, modify, or distribute */ /* this file you indicate that you have read the license and */ /* understand and accept it fully. */ /* */ /***************************************************************************/ /***************************************************************************/ /* */ /* gxvalid is derived from both gxlayout module and otvalid module. */ /* Development of gxlayout is supported by the Information-technology */ /* Promotion Agency(IPA), Japan. */ /* */ /***************************************************************************/ #include "gxvmorx.h" /*************************************************************************/ /* */ /* The macro FT_COMPONENT is used in trace mode. It is an implicit */ /* parameter of the FT_TRACE() and FT_ERROR() macros, used to print/log */ /* messages during execution. */ /* */ #undef FT_COMPONENT #define FT_COMPONENT trace_gxvmorx typedef struct GXV_morx_subtable_type1_StateOptRec_ { FT_ULong substitutionTable; FT_ULong substitutionTable_length; FT_UShort substitutionTable_num_lookupTables; } GXV_morx_subtable_type1_StateOptRec, *GXV_morx_subtable_type1_StateOptRecData; #define GXV_MORX_SUBTABLE_TYPE1_HEADER_SIZE \ ( GXV_STATETABLE_HEADER_SIZE + 2 ) static void gxv_morx_subtable_type1_substitutionTable_load( FT_Bytes table, FT_Bytes limit, GXV_Validator gxvalid ) { FT_Bytes p = table; GXV_morx_subtable_type1_StateOptRecData optdata = (GXV_morx_subtable_type1_StateOptRecData)gxvalid->xstatetable.optdata; GXV_LIMIT_CHECK( 2 ); optdata->substitutionTable = FT_NEXT_USHORT( p ); } static void gxv_morx_subtable_type1_subtable_setup( FT_ULong table_size, FT_ULong classTable, FT_ULong stateArray, FT_ULong entryTable, FT_ULong* classTable_length_p, FT_ULong* stateArray_length_p, FT_ULong* entryTable_length_p, GXV_Validator gxvalid ) { FT_ULong o[4]; FT_ULong *l[4]; FT_ULong buff[5]; GXV_morx_subtable_type1_StateOptRecData optdata = (GXV_morx_subtable_type1_StateOptRecData)gxvalid->xstatetable.optdata; o[0] = classTable; o[1] = stateArray; o[2] = entryTable; o[3] = optdata->substitutionTable; l[0] = classTable_length_p; l[1] = stateArray_length_p; l[2] = entryTable_length_p; l[3] = &(optdata->substitutionTable_length); gxv_set_length_by_ulong_offset( o, l, buff, 4, table_size, gxvalid ); } static void gxv_morx_subtable_type1_entry_validate( FT_UShort state, FT_UShort flags, GXV_StateTable_GlyphOffsetCPtr glyphOffset_p, FT_Bytes table, FT_Bytes limit, GXV_Validator gxvalid ) { #ifdef GXV_LOAD_TRACE_VARS FT_UShort setMark; FT_UShort dontAdvance; #endif FT_UShort reserved; FT_Short markIndex; FT_Short currentIndex; GXV_morx_subtable_type1_StateOptRecData optdata = (GXV_morx_subtable_type1_StateOptRecData)gxvalid->xstatetable.optdata; FT_UNUSED( state ); FT_UNUSED( table ); FT_UNUSED( limit ); #ifdef GXV_LOAD_TRACE_VARS setMark = (FT_UShort)( ( flags >> 15 ) & 1 ); dontAdvance = (FT_UShort)( ( flags >> 14 ) & 1 ); #endif reserved = (FT_UShort)( flags & 0x3FFF ); markIndex = (FT_Short)( glyphOffset_p->ul >> 16 ); currentIndex = (FT_Short)( glyphOffset_p->ul ); GXV_TRACE(( " setMark=%01d dontAdvance=%01d\n", setMark, dontAdvance )); if ( 0 < reserved ) { GXV_TRACE(( " non-zero bits found in reserved range\n" )); GXV_SET_ERR_IF_PARANOID( FT_INVALID_DATA ); } GXV_TRACE(( "markIndex = %d, currentIndex = %d\n", markIndex, currentIndex )); if ( optdata->substitutionTable_num_lookupTables < markIndex + 1 ) optdata->substitutionTable_num_lookupTables = (FT_UShort)( markIndex + 1 ); if ( optdata->substitutionTable_num_lookupTables < currentIndex + 1 ) optdata->substitutionTable_num_lookupTables = (FT_UShort)( currentIndex + 1 ); } static void gxv_morx_subtable_type1_LookupValue_validate( FT_UShort glyph, GXV_LookupValueCPtr value_p, GXV_Validator gxvalid ) { FT_UNUSED( glyph ); /* for the non-debugging case */ GXV_TRACE(( "morx subtable type1 subst.: %d -> %d\n", glyph, value_p->u )); if ( value_p->u > gxvalid->face->num_glyphs ) FT_INVALID_GLYPH_ID; } static GXV_LookupValueDesc gxv_morx_subtable_type1_LookupFmt4_transit( FT_UShort relative_gindex, GXV_LookupValueCPtr base_value_p, FT_Bytes lookuptbl_limit, GXV_Validator gxvalid ) { FT_Bytes p; FT_Bytes limit; FT_UShort offset; GXV_LookupValueDesc value; /* XXX: check range? */ offset = (FT_UShort)( base_value_p->u + relative_gindex * sizeof ( FT_UShort ) ); p = gxvalid->lookuptbl_head + offset; limit = lookuptbl_limit; GXV_LIMIT_CHECK ( 2 ); value.u = FT_NEXT_USHORT( p ); return value; } /* * TODO: length should be limit? **/ static void gxv_morx_subtable_type1_substitutionTable_validate( FT_Bytes table, FT_Bytes limit, GXV_Validator gxvalid ) { FT_Bytes p = table; FT_UShort i; GXV_morx_subtable_type1_StateOptRecData optdata = (GXV_morx_subtable_type1_StateOptRecData)gxvalid->xstatetable.optdata; /* TODO: calculate offset/length for each lookupTables */ gxvalid->lookupval_sign = GXV_LOOKUPVALUE_UNSIGNED; gxvalid->lookupval_func = gxv_morx_subtable_type1_LookupValue_validate; gxvalid->lookupfmt4_trans = gxv_morx_subtable_type1_LookupFmt4_transit; for ( i = 0; i < optdata->substitutionTable_num_lookupTables; i++ ) { FT_ULong offset; GXV_LIMIT_CHECK( 4 ); offset = FT_NEXT_ULONG( p ); gxv_LookupTable_validate( table + offset, limit, gxvalid ); } /* TODO: overlapping of lookupTables in substitutionTable */ } /* * subtable for Contextual glyph substitution is a modified StateTable. * In addition to classTable, stateArray, entryTable, the field * `substitutionTable' is added. */ FT_LOCAL_DEF( void ) gxv_morx_subtable_type1_validate( FT_Bytes table, FT_Bytes limit, GXV_Validator gxvalid ) { FT_Bytes p = table; GXV_morx_subtable_type1_StateOptRec st_rec; GXV_NAME_ENTER( "morx chain subtable type1 (Contextual Glyph Subst)" ); GXV_LIMIT_CHECK( GXV_MORX_SUBTABLE_TYPE1_HEADER_SIZE ); st_rec.substitutionTable_num_lookupTables = 0; gxvalid->xstatetable.optdata = &st_rec; gxvalid->xstatetable.optdata_load_func = gxv_morx_subtable_type1_substitutionTable_load; gxvalid->xstatetable.subtable_setup_func = gxv_morx_subtable_type1_subtable_setup; gxvalid->xstatetable.entry_glyphoffset_fmt = GXV_GLYPHOFFSET_ULONG; gxvalid->xstatetable.entry_validate_func = gxv_morx_subtable_type1_entry_validate; gxv_XStateTable_validate( p, limit, gxvalid ); gxv_morx_subtable_type1_substitutionTable_validate( table + st_rec.substitutionTable, table + st_rec.substitutionTable + st_rec.substitutionTable_length, gxvalid ); GXV_EXIT; } /* END */
gpl-2.0
LimKyungWoo/linux-2.6.39
drivers/scsi/libsas/sas_expander.c
164
51769
/* * Serial Attached SCSI (SAS) Expander discovery and configuration * * Copyright (C) 2005 Adaptec, Inc. All rights reserved. * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> * * This file is licensed under GPLv2. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #include <linux/scatterlist.h> #include <linux/blkdev.h> #include <linux/slab.h> #include "sas_internal.h" #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_sas.h> #include "../scsi_sas_internal.h" static int sas_discover_expander(struct domain_device *dev); static int sas_configure_routing(struct domain_device *dev, u8 *sas_addr); static int sas_configure_phy(struct domain_device *dev, int phy_id, u8 *sas_addr, int include); static int sas_disable_routing(struct domain_device *dev, u8 *sas_addr); /* ---------- SMP task management ---------- */ static void smp_task_timedout(unsigned long _task) { struct sas_task *task = (void *) _task; unsigned long flags; spin_lock_irqsave(&task->task_state_lock, flags); if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) task->task_state_flags |= SAS_TASK_STATE_ABORTED; spin_unlock_irqrestore(&task->task_state_lock, flags); complete(&task->completion); } static void smp_task_done(struct sas_task *task) { if (!del_timer(&task->timer)) return; complete(&task->completion); } /* Give it some long enough timeout. In seconds. */ #define SMP_TIMEOUT 10 static int smp_execute_task(struct domain_device *dev, void *req, int req_size, void *resp, int resp_size) { int res, retry; struct sas_task *task = NULL; struct sas_internal *i = to_sas_internal(dev->port->ha->core.shost->transportt); for (retry = 0; retry < 3; retry++) { task = sas_alloc_task(GFP_KERNEL); if (!task) return -ENOMEM; task->dev = dev; task->task_proto = dev->tproto; sg_init_one(&task->smp_task.smp_req, req, req_size); sg_init_one(&task->smp_task.smp_resp, resp, resp_size); task->task_done = smp_task_done; task->timer.data = (unsigned long) task; task->timer.function = smp_task_timedout; task->timer.expires = jiffies + SMP_TIMEOUT*HZ; add_timer(&task->timer); res = i->dft->lldd_execute_task(task, 1, GFP_KERNEL); if (res) { del_timer(&task->timer); SAS_DPRINTK("executing SMP task failed:%d\n", res); goto ex_err; } wait_for_completion(&task->completion); res = -ECOMM; if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { SAS_DPRINTK("smp task timed out or aborted\n"); i->dft->lldd_abort_task(task); if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { SAS_DPRINTK("SMP task aborted and not done\n"); goto ex_err; } } if (task->task_status.resp == SAS_TASK_COMPLETE && task->task_status.stat == SAM_STAT_GOOD) { res = 0; break; } if (task->task_status.resp == SAS_TASK_COMPLETE && task->task_status.stat == SAS_DATA_UNDERRUN) { /* no error, but return the number of bytes of * underrun */ res = task->task_status.residual; break; } if (task->task_status.resp == SAS_TASK_COMPLETE && task->task_status.stat == SAS_DATA_OVERRUN) { res = -EMSGSIZE; break; } else { SAS_DPRINTK("%s: task to dev %016llx response: 0x%x " "status 0x%x\n", __func__, SAS_ADDR(dev->sas_addr), task->task_status.resp, task->task_status.stat); sas_free_task(task); task = NULL; } } ex_err: BUG_ON(retry == 3 && task != NULL); if (task != NULL) { sas_free_task(task); } return res; } /* ---------- Allocations ---------- */ static inline void *alloc_smp_req(int size) { u8 *p = kzalloc(size, GFP_KERNEL); if (p) p[0] = SMP_REQUEST; return p; } static inline void *alloc_smp_resp(int size) { return kzalloc(size, GFP_KERNEL); } /* ---------- Expander configuration ---------- */ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *disc_resp) { struct expander_device *ex = &dev->ex_dev; struct ex_phy *phy = &ex->ex_phy[phy_id]; struct smp_resp *resp = disc_resp; struct discover_resp *dr = &resp->disc; struct sas_rphy *rphy = dev->rphy; int rediscover = (phy->phy != NULL); if (!rediscover) { phy->phy = sas_phy_alloc(&rphy->dev, phy_id); /* FIXME: error_handling */ BUG_ON(!phy->phy); } switch (resp->result) { case SMP_RESP_PHY_VACANT: phy->phy_state = PHY_VACANT; break; default: phy->phy_state = PHY_NOT_PRESENT; break; case SMP_RESP_FUNC_ACC: phy->phy_state = PHY_EMPTY; /* do not know yet */ break; } phy->phy_id = phy_id; phy->attached_dev_type = dr->attached_dev_type; phy->linkrate = dr->linkrate; phy->attached_sata_host = dr->attached_sata_host; phy->attached_sata_dev = dr->attached_sata_dev; phy->attached_sata_ps = dr->attached_sata_ps; phy->attached_iproto = dr->iproto << 1; phy->attached_tproto = dr->tproto << 1; memcpy(phy->attached_sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE); phy->attached_phy_id = dr->attached_phy_id; phy->phy_change_count = dr->change_count; phy->routing_attr = dr->routing_attr; phy->virtual = dr->virtual; phy->last_da_index = -1; phy->phy->identify.initiator_port_protocols = phy->attached_iproto; phy->phy->identify.target_port_protocols = phy->attached_tproto; phy->phy->identify.phy_identifier = phy_id; phy->phy->minimum_linkrate_hw = dr->hmin_linkrate; phy->phy->maximum_linkrate_hw = dr->hmax_linkrate; phy->phy->minimum_linkrate = dr->pmin_linkrate; phy->phy->maximum_linkrate = dr->pmax_linkrate; phy->phy->negotiated_linkrate = phy->linkrate; if (!rediscover) if (sas_phy_add(phy->phy)) { sas_phy_free(phy->phy); return; } SAS_DPRINTK("ex %016llx phy%02d:%c attached: %016llx\n", SAS_ADDR(dev->sas_addr), phy->phy_id, phy->routing_attr == TABLE_ROUTING ? 'T' : phy->routing_attr == DIRECT_ROUTING ? 'D' : phy->routing_attr == SUBTRACTIVE_ROUTING ? 'S' : '?', SAS_ADDR(phy->attached_sas_addr)); return; } #define DISCOVER_REQ_SIZE 16 #define DISCOVER_RESP_SIZE 56 static int sas_ex_phy_discover_helper(struct domain_device *dev, u8 *disc_req, u8 *disc_resp, int single) { int i, res; disc_req[9] = single; for (i = 1 ; i < 3; i++) { struct discover_resp *dr; res = smp_execute_task(dev, disc_req, DISCOVER_REQ_SIZE, disc_resp, DISCOVER_RESP_SIZE); if (res) return res; /* This is detecting a failure to transmit initial * dev to host FIS as described in section G.5 of * sas-2 r 04b */ dr = &((struct smp_resp *)disc_resp)->disc; if (memcmp(dev->sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE) == 0) { sas_printk("Found loopback topology, just ignore it!\n"); return 0; } if (!(dr->attached_dev_type == 0 && dr->attached_sata_dev)) break; /* In order to generate the dev to host FIS, we * send a link reset to the expander port */ sas_smp_phy_control(dev, single, PHY_FUNC_LINK_RESET, NULL); /* Wait for the reset to trigger the negotiation */ msleep(500); } sas_set_ex_phy(dev, single, disc_resp); return 0; } static int sas_ex_phy_discover(struct domain_device *dev, int single) { struct expander_device *ex = &dev->ex_dev; int res = 0; u8 *disc_req; u8 *disc_resp; disc_req = alloc_smp_req(DISCOVER_REQ_SIZE); if (!disc_req) return -ENOMEM; disc_resp = alloc_smp_req(DISCOVER_RESP_SIZE); if (!disc_resp) { kfree(disc_req); return -ENOMEM; } disc_req[1] = SMP_DISCOVER; if (0 <= single && single < ex->num_phys) { res = sas_ex_phy_discover_helper(dev, disc_req, disc_resp, single); } else { int i; for (i = 0; i < ex->num_phys; i++) { res = sas_ex_phy_discover_helper(dev, disc_req, disc_resp, i); if (res) goto out_err; } } out_err: kfree(disc_resp); kfree(disc_req); return res; } static int sas_expander_discover(struct domain_device *dev) { struct expander_device *ex = &dev->ex_dev; int res = -ENOMEM; ex->ex_phy = kzalloc(sizeof(*ex->ex_phy)*ex->num_phys, GFP_KERNEL); if (!ex->ex_phy) return -ENOMEM; res = sas_ex_phy_discover(dev, -1); if (res) goto out_err; return 0; out_err: kfree(ex->ex_phy); ex->ex_phy = NULL; return res; } #define MAX_EXPANDER_PHYS 128 static void ex_assign_report_general(struct domain_device *dev, struct smp_resp *resp) { struct report_general_resp *rg = &resp->rg; dev->ex_dev.ex_change_count = be16_to_cpu(rg->change_count); dev->ex_dev.max_route_indexes = be16_to_cpu(rg->route_indexes); dev->ex_dev.num_phys = min(rg->num_phys, (u8)MAX_EXPANDER_PHYS); dev->ex_dev.conf_route_table = rg->conf_route_table; dev->ex_dev.configuring = rg->configuring; memcpy(dev->ex_dev.enclosure_logical_id, rg->enclosure_logical_id, 8); } #define RG_REQ_SIZE 8 #define RG_RESP_SIZE 32 static int sas_ex_general(struct domain_device *dev) { u8 *rg_req; struct smp_resp *rg_resp; int res; int i; rg_req = alloc_smp_req(RG_REQ_SIZE); if (!rg_req) return -ENOMEM; rg_resp = alloc_smp_resp(RG_RESP_SIZE); if (!rg_resp) { kfree(rg_req); return -ENOMEM; } rg_req[1] = SMP_REPORT_GENERAL; for (i = 0; i < 5; i++) { res = smp_execute_task(dev, rg_req, RG_REQ_SIZE, rg_resp, RG_RESP_SIZE); if (res) { SAS_DPRINTK("RG to ex %016llx failed:0x%x\n", SAS_ADDR(dev->sas_addr), res); goto out; } else if (rg_resp->result != SMP_RESP_FUNC_ACC) { SAS_DPRINTK("RG:ex %016llx returned SMP result:0x%x\n", SAS_ADDR(dev->sas_addr), rg_resp->result); res = rg_resp->result; goto out; } ex_assign_report_general(dev, rg_resp); if (dev->ex_dev.configuring) { SAS_DPRINTK("RG: ex %llx self-configuring...\n", SAS_ADDR(dev->sas_addr)); schedule_timeout_interruptible(5*HZ); } else break; } out: kfree(rg_req); kfree(rg_resp); return res; } static void ex_assign_manuf_info(struct domain_device *dev, void *_mi_resp) { u8 *mi_resp = _mi_resp; struct sas_rphy *rphy = dev->rphy; struct sas_expander_device *edev = rphy_to_expander_device(rphy); memcpy(edev->vendor_id, mi_resp + 12, SAS_EXPANDER_VENDOR_ID_LEN); memcpy(edev->product_id, mi_resp + 20, SAS_EXPANDER_PRODUCT_ID_LEN); memcpy(edev->product_rev, mi_resp + 36, SAS_EXPANDER_PRODUCT_REV_LEN); if (mi_resp[8] & 1) { memcpy(edev->component_vendor_id, mi_resp + 40, SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN); edev->component_id = mi_resp[48] << 8 | mi_resp[49]; edev->component_revision_id = mi_resp[50]; } } #define MI_REQ_SIZE 8 #define MI_RESP_SIZE 64 static int sas_ex_manuf_info(struct domain_device *dev) { u8 *mi_req; u8 *mi_resp; int res; mi_req = alloc_smp_req(MI_REQ_SIZE); if (!mi_req) return -ENOMEM; mi_resp = alloc_smp_resp(MI_RESP_SIZE); if (!mi_resp) { kfree(mi_req); return -ENOMEM; } mi_req[1] = SMP_REPORT_MANUF_INFO; res = smp_execute_task(dev, mi_req, MI_REQ_SIZE, mi_resp,MI_RESP_SIZE); if (res) { SAS_DPRINTK("MI: ex %016llx failed:0x%x\n", SAS_ADDR(dev->sas_addr), res); goto out; } else if (mi_resp[2] != SMP_RESP_FUNC_ACC) { SAS_DPRINTK("MI ex %016llx returned SMP result:0x%x\n", SAS_ADDR(dev->sas_addr), mi_resp[2]); goto out; } ex_assign_manuf_info(dev, mi_resp); out: kfree(mi_req); kfree(mi_resp); return res; } #define PC_REQ_SIZE 44 #define PC_RESP_SIZE 8 int sas_smp_phy_control(struct domain_device *dev, int phy_id, enum phy_func phy_func, struct sas_phy_linkrates *rates) { u8 *pc_req; u8 *pc_resp; int res; pc_req = alloc_smp_req(PC_REQ_SIZE); if (!pc_req) return -ENOMEM; pc_resp = alloc_smp_resp(PC_RESP_SIZE); if (!pc_resp) { kfree(pc_req); return -ENOMEM; } pc_req[1] = SMP_PHY_CONTROL; pc_req[9] = phy_id; pc_req[10]= phy_func; if (rates) { pc_req[32] = rates->minimum_linkrate << 4; pc_req[33] = rates->maximum_linkrate << 4; } res = smp_execute_task(dev, pc_req, PC_REQ_SIZE, pc_resp,PC_RESP_SIZE); kfree(pc_resp); kfree(pc_req); return res; } static void sas_ex_disable_phy(struct domain_device *dev, int phy_id) { struct expander_device *ex = &dev->ex_dev; struct ex_phy *phy = &ex->ex_phy[phy_id]; sas_smp_phy_control(dev, phy_id, PHY_FUNC_DISABLE, NULL); phy->linkrate = SAS_PHY_DISABLED; } static void sas_ex_disable_port(struct domain_device *dev, u8 *sas_addr) { struct expander_device *ex = &dev->ex_dev; int i; for (i = 0; i < ex->num_phys; i++) { struct ex_phy *phy = &ex->ex_phy[i]; if (phy->phy_state == PHY_VACANT || phy->phy_state == PHY_NOT_PRESENT) continue; if (SAS_ADDR(phy->attached_sas_addr) == SAS_ADDR(sas_addr)) sas_ex_disable_phy(dev, i); } } static int sas_dev_present_in_domain(struct asd_sas_port *port, u8 *sas_addr) { struct domain_device *dev; if (SAS_ADDR(port->sas_addr) == SAS_ADDR(sas_addr)) return 1; list_for_each_entry(dev, &port->dev_list, dev_list_node) { if (SAS_ADDR(dev->sas_addr) == SAS_ADDR(sas_addr)) return 1; } return 0; } #define RPEL_REQ_SIZE 16 #define RPEL_RESP_SIZE 32 int sas_smp_get_phy_events(struct sas_phy *phy) { int res; u8 *req; u8 *resp; struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent); struct domain_device *dev = sas_find_dev_by_rphy(rphy); req = alloc_smp_req(RPEL_REQ_SIZE); if (!req) return -ENOMEM; resp = alloc_smp_resp(RPEL_RESP_SIZE); if (!resp) { kfree(req); return -ENOMEM; } req[1] = SMP_REPORT_PHY_ERR_LOG; req[9] = phy->number; res = smp_execute_task(dev, req, RPEL_REQ_SIZE, resp, RPEL_RESP_SIZE); if (!res) goto out; phy->invalid_dword_count = scsi_to_u32(&resp[12]); phy->running_disparity_error_count = scsi_to_u32(&resp[16]); phy->loss_of_dword_sync_count = scsi_to_u32(&resp[20]); phy->phy_reset_problem_count = scsi_to_u32(&resp[24]); out: kfree(resp); return res; } #ifdef CONFIG_SCSI_SAS_ATA #define RPS_REQ_SIZE 16 #define RPS_RESP_SIZE 60 static int sas_get_report_phy_sata(struct domain_device *dev, int phy_id, struct smp_resp *rps_resp) { int res; u8 *rps_req = alloc_smp_req(RPS_REQ_SIZE); u8 *resp = (u8 *)rps_resp; if (!rps_req) return -ENOMEM; rps_req[1] = SMP_REPORT_PHY_SATA; rps_req[9] = phy_id; res = smp_execute_task(dev, rps_req, RPS_REQ_SIZE, rps_resp, RPS_RESP_SIZE); /* 0x34 is the FIS type for the D2H fis. There's a potential * standards cockup here. sas-2 explicitly specifies the FIS * should be encoded so that FIS type is in resp[24]. * However, some expanders endian reverse this. Undo the * reversal here */ if (!res && resp[27] == 0x34 && resp[24] != 0x34) { int i; for (i = 0; i < 5; i++) { int j = 24 + (i*4); u8 a, b; a = resp[j + 0]; b = resp[j + 1]; resp[j + 0] = resp[j + 3]; resp[j + 1] = resp[j + 2]; resp[j + 2] = b; resp[j + 3] = a; } } kfree(rps_req); return res; } #endif static void sas_ex_get_linkrate(struct domain_device *parent, struct domain_device *child, struct ex_phy *parent_phy) { struct expander_device *parent_ex = &parent->ex_dev; struct sas_port *port; int i; child->pathways = 0; port = parent_phy->port; for (i = 0; i < parent_ex->num_phys; i++) { struct ex_phy *phy = &parent_ex->ex_phy[i]; if (phy->phy_state == PHY_VACANT || phy->phy_state == PHY_NOT_PRESENT) continue; if (SAS_ADDR(phy->attached_sas_addr) == SAS_ADDR(child->sas_addr)) { child->min_linkrate = min(parent->min_linkrate, phy->linkrate); child->max_linkrate = max(parent->max_linkrate, phy->linkrate); child->pathways++; sas_port_add_phy(port, phy->phy); } } child->linkrate = min(parent_phy->linkrate, child->max_linkrate); child->pathways = min(child->pathways, parent->pathways); } static struct domain_device *sas_ex_discover_end_dev( struct domain_device *parent, int phy_id) { struct expander_device *parent_ex = &parent->ex_dev; struct ex_phy *phy = &parent_ex->ex_phy[phy_id]; struct domain_device *child = NULL; struct sas_rphy *rphy; int res; if (phy->attached_sata_host || phy->attached_sata_ps) return NULL; child = kzalloc(sizeof(*child), GFP_KERNEL); if (!child) return NULL; child->parent = parent; child->port = parent->port; child->iproto = phy->attached_iproto; memcpy(child->sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE); sas_hash_addr(child->hashed_sas_addr, child->sas_addr); if (!phy->port) { phy->port = sas_port_alloc(&parent->rphy->dev, phy_id); if (unlikely(!phy->port)) goto out_err; if (unlikely(sas_port_add(phy->port) != 0)) { sas_port_free(phy->port); goto out_err; } } sas_ex_get_linkrate(parent, child, phy); #ifdef CONFIG_SCSI_SAS_ATA if ((phy->attached_tproto & SAS_PROTOCOL_STP) || phy->attached_sata_dev) { child->dev_type = SATA_DEV; if (phy->attached_tproto & SAS_PROTOCOL_STP) child->tproto = phy->attached_tproto; if (phy->attached_sata_dev) child->tproto |= SATA_DEV; res = sas_get_report_phy_sata(parent, phy_id, &child->sata_dev.rps_resp); if (res) { SAS_DPRINTK("report phy sata to %016llx:0x%x returned " "0x%x\n", SAS_ADDR(parent->sas_addr), phy_id, res); goto out_free; } memcpy(child->frame_rcvd, &child->sata_dev.rps_resp.rps.fis, sizeof(struct dev_to_host_fis)); rphy = sas_end_device_alloc(phy->port); if (unlikely(!rphy)) goto out_free; sas_init_dev(child); child->rphy = rphy; spin_lock_irq(&parent->port->dev_list_lock); list_add_tail(&child->dev_list_node, &parent->port->dev_list); spin_unlock_irq(&parent->port->dev_list_lock); res = sas_discover_sata(child); if (res) { SAS_DPRINTK("sas_discover_sata() for device %16llx at " "%016llx:0x%x returned 0x%x\n", SAS_ADDR(child->sas_addr), SAS_ADDR(parent->sas_addr), phy_id, res); goto out_list_del; } } else #endif if (phy->attached_tproto & SAS_PROTOCOL_SSP) { child->dev_type = SAS_END_DEV; rphy = sas_end_device_alloc(phy->port); /* FIXME: error handling */ if (unlikely(!rphy)) goto out_free; child->tproto = phy->attached_tproto; sas_init_dev(child); child->rphy = rphy; sas_fill_in_rphy(child, rphy); spin_lock_irq(&parent->port->dev_list_lock); list_add_tail(&child->dev_list_node, &parent->port->dev_list); spin_unlock_irq(&parent->port->dev_list_lock); res = sas_discover_end_dev(child); if (res) { SAS_DPRINTK("sas_discover_end_dev() for device %16llx " "at %016llx:0x%x returned 0x%x\n", SAS_ADDR(child->sas_addr), SAS_ADDR(parent->sas_addr), phy_id, res); goto out_list_del; } } else { SAS_DPRINTK("target proto 0x%x at %016llx:0x%x not handled\n", phy->attached_tproto, SAS_ADDR(parent->sas_addr), phy_id); goto out_free; } list_add_tail(&child->siblings, &parent_ex->children); return child; out_list_del: sas_rphy_free(child->rphy); child->rphy = NULL; list_del(&child->dev_list_node); out_free: sas_port_delete(phy->port); out_err: phy->port = NULL; kfree(child); return NULL; } /* See if this phy is part of a wide port */ static int sas_ex_join_wide_port(struct domain_device *parent, int phy_id) { struct ex_phy *phy = &parent->ex_dev.ex_phy[phy_id]; int i; for (i = 0; i < parent->ex_dev.num_phys; i++) { struct ex_phy *ephy = &parent->ex_dev.ex_phy[i]; if (ephy == phy) continue; if (!memcmp(phy->attached_sas_addr, ephy->attached_sas_addr, SAS_ADDR_SIZE) && ephy->port) { sas_port_add_phy(ephy->port, phy->phy); phy->port = ephy->port; phy->phy_state = PHY_DEVICE_DISCOVERED; return 0; } } return -ENODEV; } static struct domain_device *sas_ex_discover_expander( struct domain_device *parent, int phy_id) { struct sas_expander_device *parent_ex = rphy_to_expander_device(parent->rphy); struct ex_phy *phy = &parent->ex_dev.ex_phy[phy_id]; struct domain_device *child = NULL; struct sas_rphy *rphy; struct sas_expander_device *edev; struct asd_sas_port *port; int res; if (phy->routing_attr == DIRECT_ROUTING) { SAS_DPRINTK("ex %016llx:0x%x:D <--> ex %016llx:0x%x is not " "allowed\n", SAS_ADDR(parent->sas_addr), phy_id, SAS_ADDR(phy->attached_sas_addr), phy->attached_phy_id); return NULL; } child = kzalloc(sizeof(*child), GFP_KERNEL); if (!child) return NULL; phy->port = sas_port_alloc(&parent->rphy->dev, phy_id); /* FIXME: better error handling */ BUG_ON(sas_port_add(phy->port) != 0); switch (phy->attached_dev_type) { case EDGE_DEV: rphy = sas_expander_alloc(phy->port, SAS_EDGE_EXPANDER_DEVICE); break; case FANOUT_DEV: rphy = sas_expander_alloc(phy->port, SAS_FANOUT_EXPANDER_DEVICE); break; default: rphy = NULL; /* shut gcc up */ BUG(); } port = parent->port; child->rphy = rphy; edev = rphy_to_expander_device(rphy); child->dev_type = phy->attached_dev_type; child->parent = parent; child->port = port; child->iproto = phy->attached_iproto; child->tproto = phy->attached_tproto; memcpy(child->sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE); sas_hash_addr(child->hashed_sas_addr, child->sas_addr); sas_ex_get_linkrate(parent, child, phy); edev->level = parent_ex->level + 1; parent->port->disc.max_level = max(parent->port->disc.max_level, edev->level); sas_init_dev(child); sas_fill_in_rphy(child, rphy); sas_rphy_add(rphy); spin_lock_irq(&parent->port->dev_list_lock); list_add_tail(&child->dev_list_node, &parent->port->dev_list); spin_unlock_irq(&parent->port->dev_list_lock); res = sas_discover_expander(child); if (res) { kfree(child); return NULL; } list_add_tail(&child->siblings, &parent->ex_dev.children); return child; } static int sas_ex_discover_dev(struct domain_device *dev, int phy_id) { struct expander_device *ex = &dev->ex_dev; struct ex_phy *ex_phy = &ex->ex_phy[phy_id]; struct domain_device *child = NULL; int res = 0; /* Phy state */ if (ex_phy->linkrate == SAS_SATA_SPINUP_HOLD) { if (!sas_smp_phy_control(dev, phy_id, PHY_FUNC_LINK_RESET, NULL)) res = sas_ex_phy_discover(dev, phy_id); if (res) return res; } /* Parent and domain coherency */ if (!dev->parent && (SAS_ADDR(ex_phy->attached_sas_addr) == SAS_ADDR(dev->port->sas_addr))) { sas_add_parent_port(dev, phy_id); return 0; } if (dev->parent && (SAS_ADDR(ex_phy->attached_sas_addr) == SAS_ADDR(dev->parent->sas_addr))) { sas_add_parent_port(dev, phy_id); if (ex_phy->routing_attr == TABLE_ROUTING) sas_configure_phy(dev, phy_id, dev->port->sas_addr, 1); return 0; } if (sas_dev_present_in_domain(dev->port, ex_phy->attached_sas_addr)) sas_ex_disable_port(dev, ex_phy->attached_sas_addr); if (ex_phy->attached_dev_type == NO_DEVICE) { if (ex_phy->routing_attr == DIRECT_ROUTING) { memset(ex_phy->attached_sas_addr, 0, SAS_ADDR_SIZE); sas_configure_routing(dev, ex_phy->attached_sas_addr); } return 0; } else if (ex_phy->linkrate == SAS_LINK_RATE_UNKNOWN) return 0; if (ex_phy->attached_dev_type != SAS_END_DEV && ex_phy->attached_dev_type != FANOUT_DEV && ex_phy->attached_dev_type != EDGE_DEV) { SAS_DPRINTK("unknown device type(0x%x) attached to ex %016llx " "phy 0x%x\n", ex_phy->attached_dev_type, SAS_ADDR(dev->sas_addr), phy_id); return 0; } res = sas_configure_routing(dev, ex_phy->attached_sas_addr); if (res) { SAS_DPRINTK("configure routing for dev %016llx " "reported 0x%x. Forgotten\n", SAS_ADDR(ex_phy->attached_sas_addr), res); sas_disable_routing(dev, ex_phy->attached_sas_addr); return res; } res = sas_ex_join_wide_port(dev, phy_id); if (!res) { SAS_DPRINTK("Attaching ex phy%d to wide port %016llx\n", phy_id, SAS_ADDR(ex_phy->attached_sas_addr)); return res; } switch (ex_phy->attached_dev_type) { case SAS_END_DEV: child = sas_ex_discover_end_dev(dev, phy_id); break; case FANOUT_DEV: if (SAS_ADDR(dev->port->disc.fanout_sas_addr)) { SAS_DPRINTK("second fanout expander %016llx phy 0x%x " "attached to ex %016llx phy 0x%x\n", SAS_ADDR(ex_phy->attached_sas_addr), ex_phy->attached_phy_id, SAS_ADDR(dev->sas_addr), phy_id); sas_ex_disable_phy(dev, phy_id); break; } else memcpy(dev->port->disc.fanout_sas_addr, ex_phy->attached_sas_addr, SAS_ADDR_SIZE); /* fallthrough */ case EDGE_DEV: child = sas_ex_discover_expander(dev, phy_id); break; default: break; } if (child) { int i; for (i = 0; i < ex->num_phys; i++) { if (ex->ex_phy[i].phy_state == PHY_VACANT || ex->ex_phy[i].phy_state == PHY_NOT_PRESENT) continue; /* * Due to races, the phy might not get added to the * wide port, so we add the phy to the wide port here. */ if (SAS_ADDR(ex->ex_phy[i].attached_sas_addr) == SAS_ADDR(child->sas_addr)) { ex->ex_phy[i].phy_state= PHY_DEVICE_DISCOVERED; res = sas_ex_join_wide_port(dev, i); if (!res) SAS_DPRINTK("Attaching ex phy%d to wide port %016llx\n", i, SAS_ADDR(ex->ex_phy[i].attached_sas_addr)); } } } return res; } static int sas_find_sub_addr(struct domain_device *dev, u8 *sub_addr) { struct expander_device *ex = &dev->ex_dev; int i; for (i = 0; i < ex->num_phys; i++) { struct ex_phy *phy = &ex->ex_phy[i]; if (phy->phy_state == PHY_VACANT || phy->phy_state == PHY_NOT_PRESENT) continue; if ((phy->attached_dev_type == EDGE_DEV || phy->attached_dev_type == FANOUT_DEV) && phy->routing_attr == SUBTRACTIVE_ROUTING) { memcpy(sub_addr, phy->attached_sas_addr,SAS_ADDR_SIZE); return 1; } } return 0; } static int sas_check_level_subtractive_boundary(struct domain_device *dev) { struct expander_device *ex = &dev->ex_dev; struct domain_device *child; u8 sub_addr[8] = {0, }; list_for_each_entry(child, &ex->children, siblings) { if (child->dev_type != EDGE_DEV && child->dev_type != FANOUT_DEV) continue; if (sub_addr[0] == 0) { sas_find_sub_addr(child, sub_addr); continue; } else { u8 s2[8]; if (sas_find_sub_addr(child, s2) && (SAS_ADDR(sub_addr) != SAS_ADDR(s2))) { SAS_DPRINTK("ex %016llx->%016llx-?->%016llx " "diverges from subtractive " "boundary %016llx\n", SAS_ADDR(dev->sas_addr), SAS_ADDR(child->sas_addr), SAS_ADDR(s2), SAS_ADDR(sub_addr)); sas_ex_disable_port(child, s2); } } } return 0; } /** * sas_ex_discover_devices -- discover devices attached to this expander * dev: pointer to the expander domain device * single: if you want to do a single phy, else set to -1; * * Configure this expander for use with its devices and register the * devices of this expander. */ static int sas_ex_discover_devices(struct domain_device *dev, int single) { struct expander_device *ex = &dev->ex_dev; int i = 0, end = ex->num_phys; int res = 0; if (0 <= single && single < end) { i = single; end = i+1; } for ( ; i < end; i++) { struct ex_phy *ex_phy = &ex->ex_phy[i]; if (ex_phy->phy_state == PHY_VACANT || ex_phy->phy_state == PHY_NOT_PRESENT || ex_phy->phy_state == PHY_DEVICE_DISCOVERED) continue; switch (ex_phy->linkrate) { case SAS_PHY_DISABLED: case SAS_PHY_RESET_PROBLEM: case SAS_SATA_PORT_SELECTOR: continue; default: res = sas_ex_discover_dev(dev, i); if (res) break; continue; } } if (!res) sas_check_level_subtractive_boundary(dev); return res; } static int sas_check_ex_subtractive_boundary(struct domain_device *dev) { struct expander_device *ex = &dev->ex_dev; int i; u8 *sub_sas_addr = NULL; if (dev->dev_type != EDGE_DEV) return 0; for (i = 0; i < ex->num_phys; i++) { struct ex_phy *phy = &ex->ex_phy[i]; if (phy->phy_state == PHY_VACANT || phy->phy_state == PHY_NOT_PRESENT) continue; if ((phy->attached_dev_type == FANOUT_DEV || phy->attached_dev_type == EDGE_DEV) && phy->routing_attr == SUBTRACTIVE_ROUTING) { if (!sub_sas_addr) sub_sas_addr = &phy->attached_sas_addr[0]; else if (SAS_ADDR(sub_sas_addr) != SAS_ADDR(phy->attached_sas_addr)) { SAS_DPRINTK("ex %016llx phy 0x%x " "diverges(%016llx) on subtractive " "boundary(%016llx). Disabled\n", SAS_ADDR(dev->sas_addr), i, SAS_ADDR(phy->attached_sas_addr), SAS_ADDR(sub_sas_addr)); sas_ex_disable_phy(dev, i); } } } return 0; } static void sas_print_parent_topology_bug(struct domain_device *child, struct ex_phy *parent_phy, struct ex_phy *child_phy) { static const char ra_char[] = { [DIRECT_ROUTING] = 'D', [SUBTRACTIVE_ROUTING] = 'S', [TABLE_ROUTING] = 'T', }; static const char *ex_type[] = { [EDGE_DEV] = "edge", [FANOUT_DEV] = "fanout", }; struct domain_device *parent = child->parent; sas_printk("%s ex %016llx phy 0x%x <--> %s ex %016llx phy 0x%x " "has %c:%c routing link!\n", ex_type[parent->dev_type], SAS_ADDR(parent->sas_addr), parent_phy->phy_id, ex_type[child->dev_type], SAS_ADDR(child->sas_addr), child_phy->phy_id, ra_char[parent_phy->routing_attr], ra_char[child_phy->routing_attr]); } static int sas_check_eeds(struct domain_device *child, struct ex_phy *parent_phy, struct ex_phy *child_phy) { int res = 0; struct domain_device *parent = child->parent; if (SAS_ADDR(parent->port->disc.fanout_sas_addr) != 0) { res = -ENODEV; SAS_DPRINTK("edge ex %016llx phy S:0x%x <--> edge ex %016llx " "phy S:0x%x, while there is a fanout ex %016llx\n", SAS_ADDR(parent->sas_addr), parent_phy->phy_id, SAS_ADDR(child->sas_addr), child_phy->phy_id, SAS_ADDR(parent->port->disc.fanout_sas_addr)); } else if (SAS_ADDR(parent->port->disc.eeds_a) == 0) { memcpy(parent->port->disc.eeds_a, parent->sas_addr, SAS_ADDR_SIZE); memcpy(parent->port->disc.eeds_b, child->sas_addr, SAS_ADDR_SIZE); } else if (((SAS_ADDR(parent->port->disc.eeds_a) == SAS_ADDR(parent->sas_addr)) || (SAS_ADDR(parent->port->disc.eeds_a) == SAS_ADDR(child->sas_addr))) && ((SAS_ADDR(parent->port->disc.eeds_b) == SAS_ADDR(parent->sas_addr)) || (SAS_ADDR(parent->port->disc.eeds_b) == SAS_ADDR(child->sas_addr)))) ; else { res = -ENODEV; SAS_DPRINTK("edge ex %016llx phy 0x%x <--> edge ex %016llx " "phy 0x%x link forms a third EEDS!\n", SAS_ADDR(parent->sas_addr), parent_phy->phy_id, SAS_ADDR(child->sas_addr), child_phy->phy_id); } return res; } /* Here we spill over 80 columns. It is intentional. */ static int sas_check_parent_topology(struct domain_device *child) { struct expander_device *child_ex = &child->ex_dev; struct expander_device *parent_ex; int i; int res = 0; if (!child->parent) return 0; if (child->parent->dev_type != EDGE_DEV && child->parent->dev_type != FANOUT_DEV) return 0; parent_ex = &child->parent->ex_dev; for (i = 0; i < parent_ex->num_phys; i++) { struct ex_phy *parent_phy = &parent_ex->ex_phy[i]; struct ex_phy *child_phy; if (parent_phy->phy_state == PHY_VACANT || parent_phy->phy_state == PHY_NOT_PRESENT) continue; if (SAS_ADDR(parent_phy->attached_sas_addr) != SAS_ADDR(child->sas_addr)) continue; child_phy = &child_ex->ex_phy[parent_phy->attached_phy_id]; switch (child->parent->dev_type) { case EDGE_DEV: if (child->dev_type == FANOUT_DEV) { if (parent_phy->routing_attr != SUBTRACTIVE_ROUTING || child_phy->routing_attr != TABLE_ROUTING) { sas_print_parent_topology_bug(child, parent_phy, child_phy); res = -ENODEV; } } else if (parent_phy->routing_attr == SUBTRACTIVE_ROUTING) { if (child_phy->routing_attr == SUBTRACTIVE_ROUTING) { res = sas_check_eeds(child, parent_phy, child_phy); } else if (child_phy->routing_attr != TABLE_ROUTING) { sas_print_parent_topology_bug(child, parent_phy, child_phy); res = -ENODEV; } } else if (parent_phy->routing_attr == TABLE_ROUTING && child_phy->routing_attr != SUBTRACTIVE_ROUTING) { sas_print_parent_topology_bug(child, parent_phy, child_phy); res = -ENODEV; } break; case FANOUT_DEV: if (parent_phy->routing_attr != TABLE_ROUTING || child_phy->routing_attr != SUBTRACTIVE_ROUTING) { sas_print_parent_topology_bug(child, parent_phy, child_phy); res = -ENODEV; } break; default: break; } } return res; } #define RRI_REQ_SIZE 16 #define RRI_RESP_SIZE 44 static int sas_configure_present(struct domain_device *dev, int phy_id, u8 *sas_addr, int *index, int *present) { int i, res = 0; struct expander_device *ex = &dev->ex_dev; struct ex_phy *phy = &ex->ex_phy[phy_id]; u8 *rri_req; u8 *rri_resp; *present = 0; *index = 0; rri_req = alloc_smp_req(RRI_REQ_SIZE); if (!rri_req) return -ENOMEM; rri_resp = alloc_smp_resp(RRI_RESP_SIZE); if (!rri_resp) { kfree(rri_req); return -ENOMEM; } rri_req[1] = SMP_REPORT_ROUTE_INFO; rri_req[9] = phy_id; for (i = 0; i < ex->max_route_indexes ; i++) { *(__be16 *)(rri_req+6) = cpu_to_be16(i); res = smp_execute_task(dev, rri_req, RRI_REQ_SIZE, rri_resp, RRI_RESP_SIZE); if (res) goto out; res = rri_resp[2]; if (res == SMP_RESP_NO_INDEX) { SAS_DPRINTK("overflow of indexes: dev %016llx " "phy 0x%x index 0x%x\n", SAS_ADDR(dev->sas_addr), phy_id, i); goto out; } else if (res != SMP_RESP_FUNC_ACC) { SAS_DPRINTK("%s: dev %016llx phy 0x%x index 0x%x " "result 0x%x\n", __func__, SAS_ADDR(dev->sas_addr), phy_id, i, res); goto out; } if (SAS_ADDR(sas_addr) != 0) { if (SAS_ADDR(rri_resp+16) == SAS_ADDR(sas_addr)) { *index = i; if ((rri_resp[12] & 0x80) == 0x80) *present = 0; else *present = 1; goto out; } else if (SAS_ADDR(rri_resp+16) == 0) { *index = i; *present = 0; goto out; } } else if (SAS_ADDR(rri_resp+16) == 0 && phy->last_da_index < i) { phy->last_da_index = i; *index = i; *present = 0; goto out; } } res = -1; out: kfree(rri_req); kfree(rri_resp); return res; } #define CRI_REQ_SIZE 44 #define CRI_RESP_SIZE 8 static int sas_configure_set(struct domain_device *dev, int phy_id, u8 *sas_addr, int index, int include) { int res; u8 *cri_req; u8 *cri_resp; cri_req = alloc_smp_req(CRI_REQ_SIZE); if (!cri_req) return -ENOMEM; cri_resp = alloc_smp_resp(CRI_RESP_SIZE); if (!cri_resp) { kfree(cri_req); return -ENOMEM; } cri_req[1] = SMP_CONF_ROUTE_INFO; *(__be16 *)(cri_req+6) = cpu_to_be16(index); cri_req[9] = phy_id; if (SAS_ADDR(sas_addr) == 0 || !include) cri_req[12] |= 0x80; memcpy(cri_req+16, sas_addr, SAS_ADDR_SIZE); res = smp_execute_task(dev, cri_req, CRI_REQ_SIZE, cri_resp, CRI_RESP_SIZE); if (res) goto out; res = cri_resp[2]; if (res == SMP_RESP_NO_INDEX) { SAS_DPRINTK("overflow of indexes: dev %016llx phy 0x%x " "index 0x%x\n", SAS_ADDR(dev->sas_addr), phy_id, index); } out: kfree(cri_req); kfree(cri_resp); return res; } static int sas_configure_phy(struct domain_device *dev, int phy_id, u8 *sas_addr, int include) { int index; int present; int res; res = sas_configure_present(dev, phy_id, sas_addr, &index, &present); if (res) return res; if (include ^ present) return sas_configure_set(dev, phy_id, sas_addr, index,include); return res; } /** * sas_configure_parent -- configure routing table of parent * parent: parent expander * child: child expander * sas_addr: SAS port identifier of device directly attached to child */ static int sas_configure_parent(struct domain_device *parent, struct domain_device *child, u8 *sas_addr, int include) { struct expander_device *ex_parent = &parent->ex_dev; int res = 0; int i; if (parent->parent) { res = sas_configure_parent(parent->parent, parent, sas_addr, include); if (res) return res; } if (ex_parent->conf_route_table == 0) { SAS_DPRINTK("ex %016llx has self-configuring routing table\n", SAS_ADDR(parent->sas_addr)); return 0; } for (i = 0; i < ex_parent->num_phys; i++) { struct ex_phy *phy = &ex_parent->ex_phy[i]; if ((phy->routing_attr == TABLE_ROUTING) && (SAS_ADDR(phy->attached_sas_addr) == SAS_ADDR(child->sas_addr))) { res = sas_configure_phy(parent, i, sas_addr, include); if (res) return res; } } return res; } /** * sas_configure_routing -- configure routing * dev: expander device * sas_addr: port identifier of device directly attached to the expander device */ static int sas_configure_routing(struct domain_device *dev, u8 *sas_addr) { if (dev->parent) return sas_configure_parent(dev->parent, dev, sas_addr, 1); return 0; } static int sas_disable_routing(struct domain_device *dev, u8 *sas_addr) { if (dev->parent) return sas_configure_parent(dev->parent, dev, sas_addr, 0); return 0; } /** * sas_discover_expander -- expander discovery * @ex: pointer to expander domain device * * See comment in sas_discover_sata(). */ static int sas_discover_expander(struct domain_device *dev) { int res; res = sas_notify_lldd_dev_found(dev); if (res) return res; res = sas_ex_general(dev); if (res) goto out_err; res = sas_ex_manuf_info(dev); if (res) goto out_err; res = sas_expander_discover(dev); if (res) { SAS_DPRINTK("expander %016llx discovery failed(0x%x)\n", SAS_ADDR(dev->sas_addr), res); goto out_err; } sas_check_ex_subtractive_boundary(dev); res = sas_check_parent_topology(dev); if (res) goto out_err; return 0; out_err: sas_notify_lldd_dev_gone(dev); return res; } static int sas_ex_level_discovery(struct asd_sas_port *port, const int level) { int res = 0; struct domain_device *dev; list_for_each_entry(dev, &port->dev_list, dev_list_node) { if (dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV) { struct sas_expander_device *ex = rphy_to_expander_device(dev->rphy); if (level == ex->level) res = sas_ex_discover_devices(dev, -1); else if (level > 0) res = sas_ex_discover_devices(port->port_dev, -1); } } return res; } static int sas_ex_bfs_disc(struct asd_sas_port *port) { int res; int level; do { level = port->disc.max_level; res = sas_ex_level_discovery(port, level); mb(); } while (level < port->disc.max_level); return res; } int sas_discover_root_expander(struct domain_device *dev) { int res; struct sas_expander_device *ex = rphy_to_expander_device(dev->rphy); res = sas_rphy_add(dev->rphy); if (res) goto out_err; ex->level = dev->port->disc.max_level; /* 0 */ res = sas_discover_expander(dev); if (res) goto out_err2; sas_ex_bfs_disc(dev->port); return res; out_err2: sas_rphy_remove(dev->rphy); out_err: return res; } /* ---------- Domain revalidation ---------- */ static int sas_get_phy_discover(struct domain_device *dev, int phy_id, struct smp_resp *disc_resp) { int res; u8 *disc_req; disc_req = alloc_smp_req(DISCOVER_REQ_SIZE); if (!disc_req) return -ENOMEM; disc_req[1] = SMP_DISCOVER; disc_req[9] = phy_id; res = smp_execute_task(dev, disc_req, DISCOVER_REQ_SIZE, disc_resp, DISCOVER_RESP_SIZE); if (res) goto out; else if (disc_resp->result != SMP_RESP_FUNC_ACC) { res = disc_resp->result; goto out; } out: kfree(disc_req); return res; } static int sas_get_phy_change_count(struct domain_device *dev, int phy_id, int *pcc) { int res; struct smp_resp *disc_resp; disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE); if (!disc_resp) return -ENOMEM; res = sas_get_phy_discover(dev, phy_id, disc_resp); if (!res) *pcc = disc_resp->disc.change_count; kfree(disc_resp); return res; } static int sas_get_phy_attached_sas_addr(struct domain_device *dev, int phy_id, u8 *attached_sas_addr) { int res; struct smp_resp *disc_resp; struct discover_resp *dr; disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE); if (!disc_resp) return -ENOMEM; dr = &disc_resp->disc; res = sas_get_phy_discover(dev, phy_id, disc_resp); if (!res) { memcpy(attached_sas_addr,disc_resp->disc.attached_sas_addr,8); if (dr->attached_dev_type == 0) memset(attached_sas_addr, 0, 8); } kfree(disc_resp); return res; } static int sas_find_bcast_phy(struct domain_device *dev, int *phy_id, int from_phy, bool update) { struct expander_device *ex = &dev->ex_dev; int res = 0; int i; for (i = from_phy; i < ex->num_phys; i++) { int phy_change_count = 0; res = sas_get_phy_change_count(dev, i, &phy_change_count); if (res) goto out; else if (phy_change_count != ex->ex_phy[i].phy_change_count) { if (update) ex->ex_phy[i].phy_change_count = phy_change_count; *phy_id = i; return 0; } } out: return res; } static int sas_get_ex_change_count(struct domain_device *dev, int *ecc) { int res; u8 *rg_req; struct smp_resp *rg_resp; rg_req = alloc_smp_req(RG_REQ_SIZE); if (!rg_req) return -ENOMEM; rg_resp = alloc_smp_resp(RG_RESP_SIZE); if (!rg_resp) { kfree(rg_req); return -ENOMEM; } rg_req[1] = SMP_REPORT_GENERAL; res = smp_execute_task(dev, rg_req, RG_REQ_SIZE, rg_resp, RG_RESP_SIZE); if (res) goto out; if (rg_resp->result != SMP_RESP_FUNC_ACC) { res = rg_resp->result; goto out; } *ecc = be16_to_cpu(rg_resp->rg.change_count); out: kfree(rg_resp); kfree(rg_req); return res; } /** * sas_find_bcast_dev - find the device issue BROADCAST(CHANGE). * @dev:domain device to be detect. * @src_dev: the device which originated BROADCAST(CHANGE). * * Add self-configuration expander suport. Suppose two expander cascading, * when the first level expander is self-configuring, hotplug the disks in * second level expander, BROADCAST(CHANGE) will not only be originated * in the second level expander, but also be originated in the first level * expander (see SAS protocol SAS 2r-14, 7.11 for detail), it is to say, * expander changed count in two level expanders will all increment at least * once, but the phy which chang count has changed is the source device which * we concerned. */ static int sas_find_bcast_dev(struct domain_device *dev, struct domain_device **src_dev) { struct expander_device *ex = &dev->ex_dev; int ex_change_count = -1; int phy_id = -1; int res; struct domain_device *ch; res = sas_get_ex_change_count(dev, &ex_change_count); if (res) goto out; if (ex_change_count != -1 && ex_change_count != ex->ex_change_count) { /* Just detect if this expander phys phy change count changed, * in order to determine if this expander originate BROADCAST, * and do not update phy change count field in our structure. */ res = sas_find_bcast_phy(dev, &phy_id, 0, false); if (phy_id != -1) { *src_dev = dev; ex->ex_change_count = ex_change_count; SAS_DPRINTK("Expander phy change count has changed\n"); return res; } else SAS_DPRINTK("Expander phys DID NOT change\n"); } list_for_each_entry(ch, &ex->children, siblings) { if (ch->dev_type == EDGE_DEV || ch->dev_type == FANOUT_DEV) { res = sas_find_bcast_dev(ch, src_dev); if (src_dev) return res; } } out: return res; } static void sas_unregister_ex_tree(struct domain_device *dev) { struct expander_device *ex = &dev->ex_dev; struct domain_device *child, *n; list_for_each_entry_safe(child, n, &ex->children, siblings) { child->gone = 1; if (child->dev_type == EDGE_DEV || child->dev_type == FANOUT_DEV) sas_unregister_ex_tree(child); else sas_unregister_dev(child); } sas_unregister_dev(dev); } static void sas_unregister_devs_sas_addr(struct domain_device *parent, int phy_id, bool last) { struct expander_device *ex_dev = &parent->ex_dev; struct ex_phy *phy = &ex_dev->ex_phy[phy_id]; struct domain_device *child, *n; if (last) { list_for_each_entry_safe(child, n, &ex_dev->children, siblings) { if (SAS_ADDR(child->sas_addr) == SAS_ADDR(phy->attached_sas_addr)) { child->gone = 1; if (child->dev_type == EDGE_DEV || child->dev_type == FANOUT_DEV) sas_unregister_ex_tree(child); else sas_unregister_dev(child); break; } } parent->gone = 1; sas_disable_routing(parent, phy->attached_sas_addr); } memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); sas_port_delete_phy(phy->port, phy->phy); if (phy->port->num_phys == 0) sas_port_delete(phy->port); phy->port = NULL; } static int sas_discover_bfs_by_root_level(struct domain_device *root, const int level) { struct expander_device *ex_root = &root->ex_dev; struct domain_device *child; int res = 0; list_for_each_entry(child, &ex_root->children, siblings) { if (child->dev_type == EDGE_DEV || child->dev_type == FANOUT_DEV) { struct sas_expander_device *ex = rphy_to_expander_device(child->rphy); if (level > ex->level) res = sas_discover_bfs_by_root_level(child, level); else if (level == ex->level) res = sas_ex_discover_devices(child, -1); } } return res; } static int sas_discover_bfs_by_root(struct domain_device *dev) { int res; struct sas_expander_device *ex = rphy_to_expander_device(dev->rphy); int level = ex->level+1; res = sas_ex_discover_devices(dev, -1); if (res) goto out; do { res = sas_discover_bfs_by_root_level(dev, level); mb(); level += 1; } while (level <= dev->port->disc.max_level); out: return res; } static int sas_discover_new(struct domain_device *dev, int phy_id) { struct ex_phy *ex_phy = &dev->ex_dev.ex_phy[phy_id]; struct domain_device *child; bool found = false; int res, i; SAS_DPRINTK("ex %016llx phy%d new device attached\n", SAS_ADDR(dev->sas_addr), phy_id); res = sas_ex_phy_discover(dev, phy_id); if (res) goto out; /* to support the wide port inserted */ for (i = 0; i < dev->ex_dev.num_phys; i++) { struct ex_phy *ex_phy_temp = &dev->ex_dev.ex_phy[i]; if (i == phy_id) continue; if (SAS_ADDR(ex_phy_temp->attached_sas_addr) == SAS_ADDR(ex_phy->attached_sas_addr)) { found = true; break; } } if (found) { sas_ex_join_wide_port(dev, phy_id); return 0; } res = sas_ex_discover_devices(dev, phy_id); if (!res) goto out; list_for_each_entry(child, &dev->ex_dev.children, siblings) { if (SAS_ADDR(child->sas_addr) == SAS_ADDR(ex_phy->attached_sas_addr)) { if (child->dev_type == EDGE_DEV || child->dev_type == FANOUT_DEV) res = sas_discover_bfs_by_root(child); break; } } out: return res; } static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last) { struct expander_device *ex = &dev->ex_dev; struct ex_phy *phy = &ex->ex_phy[phy_id]; u8 attached_sas_addr[8]; int res; res = sas_get_phy_attached_sas_addr(dev, phy_id, attached_sas_addr); switch (res) { case SMP_RESP_NO_PHY: phy->phy_state = PHY_NOT_PRESENT; sas_unregister_devs_sas_addr(dev, phy_id, last); goto out; break; case SMP_RESP_PHY_VACANT: phy->phy_state = PHY_VACANT; sas_unregister_devs_sas_addr(dev, phy_id, last); goto out; break; case SMP_RESP_FUNC_ACC: break; } if (SAS_ADDR(attached_sas_addr) == 0) { phy->phy_state = PHY_EMPTY; sas_unregister_devs_sas_addr(dev, phy_id, last); } else if (SAS_ADDR(attached_sas_addr) == SAS_ADDR(phy->attached_sas_addr)) { SAS_DPRINTK("ex %016llx phy 0x%x broadcast flutter\n", SAS_ADDR(dev->sas_addr), phy_id); sas_ex_phy_discover(dev, phy_id); } else res = sas_discover_new(dev, phy_id); out: return res; } /** * sas_rediscover - revalidate the domain. * @dev:domain device to be detect. * @phy_id: the phy id will be detected. * * NOTE: this process _must_ quit (return) as soon as any connection * errors are encountered. Connection recovery is done elsewhere. * Discover process only interrogates devices in order to discover the * domain.For plugging out, we un-register the device only when it is * the last phy in the port, for other phys in this port, we just delete it * from the port.For inserting, we do discovery when it is the * first phy,for other phys in this port, we add it to the port to * forming the wide-port. */ static int sas_rediscover(struct domain_device *dev, const int phy_id) { struct expander_device *ex = &dev->ex_dev; struct ex_phy *changed_phy = &ex->ex_phy[phy_id]; int res = 0; int i; bool last = true; /* is this the last phy of the port */ SAS_DPRINTK("ex %016llx phy%d originated BROADCAST(CHANGE)\n", SAS_ADDR(dev->sas_addr), phy_id); if (SAS_ADDR(changed_phy->attached_sas_addr) != 0) { for (i = 0; i < ex->num_phys; i++) { struct ex_phy *phy = &ex->ex_phy[i]; if (i == phy_id) continue; if (SAS_ADDR(phy->attached_sas_addr) == SAS_ADDR(changed_phy->attached_sas_addr)) { SAS_DPRINTK("phy%d part of wide port with " "phy%d\n", phy_id, i); last = false; break; } } res = sas_rediscover_dev(dev, phy_id, last); } else res = sas_discover_new(dev, phy_id); return res; } /** * sas_revalidate_domain -- revalidate the domain * @port: port to the domain of interest * * NOTE: this process _must_ quit (return) as soon as any connection * errors are encountered. Connection recovery is done elsewhere. * Discover process only interrogates devices in order to discover the * domain. */ int sas_ex_revalidate_domain(struct domain_device *port_dev) { int res; struct domain_device *dev = NULL; res = sas_find_bcast_dev(port_dev, &dev); if (res) goto out; if (dev) { struct expander_device *ex = &dev->ex_dev; int i = 0, phy_id; do { phy_id = -1; res = sas_find_bcast_phy(dev, &phy_id, i, true); if (phy_id == -1) break; res = sas_rediscover(dev, phy_id); i = phy_id + 1; } while (i < ex->num_phys); } out: return res; } int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, struct request *req) { struct domain_device *dev; int ret, type; struct request *rsp = req->next_rq; if (!rsp) { printk("%s: space for a smp response is missing\n", __func__); return -EINVAL; } /* no rphy means no smp target support (ie aic94xx host) */ if (!rphy) return sas_smp_host_handler(shost, req, rsp); type = rphy->identify.device_type; if (type != SAS_EDGE_EXPANDER_DEVICE && type != SAS_FANOUT_EXPANDER_DEVICE) { printk("%s: can we send a smp request to a device?\n", __func__); return -EINVAL; } dev = sas_find_dev_by_rphy(rphy); if (!dev) { printk("%s: fail to find a domain_device?\n", __func__); return -EINVAL; } /* do we need to support multiple segments? */ if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) { printk("%s: multiple segments req %u %u, rsp %u %u\n", __func__, req->bio->bi_vcnt, blk_rq_bytes(req), rsp->bio->bi_vcnt, blk_rq_bytes(rsp)); return -EINVAL; } ret = smp_execute_task(dev, bio_data(req->bio), blk_rq_bytes(req), bio_data(rsp->bio), blk_rq_bytes(rsp)); if (ret > 0) { /* positive number is the untransferred residual */ rsp->resid_len = ret; req->resid_len = 0; ret = 0; } else if (ret == 0) { rsp->resid_len = 0; req->resid_len = 0; } return ret; }
gpl-2.0
stepsongit/linux
drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c
420
5377
/* * Copyright (C) 2013 Red Hat * Author: Rob Clark <robdclark@gmail.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ #include "hdmi.h" struct hdmi_phy_8x60 { struct hdmi_phy base; struct hdmi *hdmi; }; #define to_hdmi_phy_8x60(x) container_of(x, struct hdmi_phy_8x60, base) static void hdmi_phy_8x60_destroy(struct hdmi_phy *phy) { struct hdmi_phy_8x60 *phy_8x60 = to_hdmi_phy_8x60(phy); kfree(phy_8x60); } static void hdmi_phy_8x60_powerup(struct hdmi_phy *phy, unsigned long int pixclock) { struct hdmi_phy_8x60 *phy_8x60 = to_hdmi_phy_8x60(phy); struct hdmi *hdmi = phy_8x60->hdmi; /* De-serializer delay D/C for non-lbk mode: */ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG0, HDMI_8x60_PHY_REG0_DESER_DEL_CTRL(3)); if (pixclock == 27000000) { /* video_format == HDMI_VFRMT_720x480p60_16_9 */ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG1, HDMI_8x60_PHY_REG1_DTEST_MUX_SEL(5) | HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(3)); } else { hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG1, HDMI_8x60_PHY_REG1_DTEST_MUX_SEL(5) | HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(4)); } /* No matter what, start from the power down mode: */ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2, HDMI_8x60_PHY_REG2_PD_PWRGEN | HDMI_8x60_PHY_REG2_PD_PLL | HDMI_8x60_PHY_REG2_PD_DRIVE_4 | HDMI_8x60_PHY_REG2_PD_DRIVE_3 | HDMI_8x60_PHY_REG2_PD_DRIVE_2 | HDMI_8x60_PHY_REG2_PD_DRIVE_1 | HDMI_8x60_PHY_REG2_PD_DESER); /* Turn PowerGen on: */ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2, HDMI_8x60_PHY_REG2_PD_PLL | HDMI_8x60_PHY_REG2_PD_DRIVE_4 | HDMI_8x60_PHY_REG2_PD_DRIVE_3 | HDMI_8x60_PHY_REG2_PD_DRIVE_2 | HDMI_8x60_PHY_REG2_PD_DRIVE_1 | HDMI_8x60_PHY_REG2_PD_DESER); /* Turn PLL power on: */ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2, HDMI_8x60_PHY_REG2_PD_DRIVE_4 | HDMI_8x60_PHY_REG2_PD_DRIVE_3 | HDMI_8x60_PHY_REG2_PD_DRIVE_2 | HDMI_8x60_PHY_REG2_PD_DRIVE_1 | HDMI_8x60_PHY_REG2_PD_DESER); /* Write to HIGH after PLL power down de-assert: */ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG3, HDMI_8x60_PHY_REG3_PLL_ENABLE); /* ASIC power on; PHY REG9 = 0 */ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG9, 0); /* Enable PLL lock detect, PLL lock det will go high after lock * Enable the re-time logic */ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG12, HDMI_8x60_PHY_REG12_RETIMING_EN | HDMI_8x60_PHY_REG12_PLL_LOCK_DETECT_EN); /* Drivers are on: */ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2, HDMI_8x60_PHY_REG2_PD_DESER); /* If the RX detector is needed: */ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2, HDMI_8x60_PHY_REG2_RCV_SENSE_EN | HDMI_8x60_PHY_REG2_PD_DESER); hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG4, 0); hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG5, 0); hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG6, 0); hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG7, 0); hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG8, 0); hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG9, 0); hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG10, 0); hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG11, 0); /* If we want to use lock enable based on counting: */ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG12, HDMI_8x60_PHY_REG12_RETIMING_EN | HDMI_8x60_PHY_REG12_PLL_LOCK_DETECT_EN | HDMI_8x60_PHY_REG12_FORCE_LOCK); } static void hdmi_phy_8x60_powerdown(struct hdmi_phy *phy) { struct hdmi_phy_8x60 *phy_8x60 = to_hdmi_phy_8x60(phy); struct hdmi *hdmi = phy_8x60->hdmi; /* Assert RESET PHY from controller */ hdmi_write(hdmi, REG_HDMI_PHY_CTRL, HDMI_PHY_CTRL_SW_RESET); udelay(10); /* De-assert RESET PHY from controller */ hdmi_write(hdmi, REG_HDMI_PHY_CTRL, 0); /* Turn off Driver */ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2, HDMI_8x60_PHY_REG2_PD_DRIVE_4 | HDMI_8x60_PHY_REG2_PD_DRIVE_3 | HDMI_8x60_PHY_REG2_PD_DRIVE_2 | HDMI_8x60_PHY_REG2_PD_DRIVE_1 | HDMI_8x60_PHY_REG2_PD_DESER); udelay(10); /* Disable PLL */ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG3, 0); /* Power down PHY, but keep RX-sense: */ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2, HDMI_8x60_PHY_REG2_RCV_SENSE_EN | HDMI_8x60_PHY_REG2_PD_PWRGEN | HDMI_8x60_PHY_REG2_PD_PLL | HDMI_8x60_PHY_REG2_PD_DRIVE_4 | HDMI_8x60_PHY_REG2_PD_DRIVE_3 | HDMI_8x60_PHY_REG2_PD_DRIVE_2 | HDMI_8x60_PHY_REG2_PD_DRIVE_1 | HDMI_8x60_PHY_REG2_PD_DESER); } static const struct hdmi_phy_funcs hdmi_phy_8x60_funcs = { .destroy = hdmi_phy_8x60_destroy, .powerup = hdmi_phy_8x60_powerup, .powerdown = hdmi_phy_8x60_powerdown, }; struct hdmi_phy *hdmi_phy_8x60_init(struct hdmi *hdmi) { struct hdmi_phy_8x60 *phy_8x60; struct hdmi_phy *phy = NULL; int ret; phy_8x60 = kzalloc(sizeof(*phy_8x60), GFP_KERNEL); if (!phy_8x60) { ret = -ENOMEM; goto fail; } phy = &phy_8x60->base; phy->funcs = &hdmi_phy_8x60_funcs; phy_8x60->hdmi = hdmi; return phy; fail: if (phy) hdmi_phy_8x60_destroy(phy); return ERR_PTR(ret); }
gpl-2.0
rafyvitto/HTC-Vivid-ICS-GPU-CPU-OC
drivers/pci/hotplug/acpiphp_glue.c
1444
36213
/* * ACPI PCI HotPlug glue functions to ACPI CA subsystem * * Copyright (C) 2002,2003 Takayoshi Kochi (t-kochi@bq.jp.nec.com) * Copyright (C) 2002 Hiroshi Aono (h-aono@ap.jp.nec.com) * Copyright (C) 2002,2003 NEC Corporation * Copyright (C) 2003-2005 Matthew Wilcox (matthew.wilcox@hp.com) * Copyright (C) 2003-2005 Hewlett Packard * Copyright (C) 2005 Rajesh Shah (rajesh.shah@intel.com) * Copyright (C) 2005 Intel Corporation * * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Send feedback to <kristen.c.accardi@intel.com> * */ /* * Lifetime rules for pci_dev: * - The one in acpiphp_bridge has its refcount elevated by pci_get_slot() * when the bridge is scanned and it loses a refcount when the bridge * is removed. * - When a P2P bridge is present, we elevate the refcount on the subordinate * bus. It loses the refcount when the the driver unloads. */ #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/pci_hotplug.h> #include <linux/pci-acpi.h> #include <linux/mutex.h> #include <linux/slab.h> #include "../pci.h" #include "acpiphp.h" static LIST_HEAD(bridge_list); #define MY_NAME "acpiphp_glue" static void handle_hotplug_event_bridge (acpi_handle, u32, void *); static void acpiphp_sanitize_bus(struct pci_bus *bus); static void acpiphp_set_hpp_values(struct pci_bus *bus); static void handle_hotplug_event_func(acpi_handle handle, u32 type, void *context); /* callback routine to check for the existence of a pci dock device */ static acpi_status is_pci_dock_device(acpi_handle handle, u32 lvl, void *context, void **rv) { int *count = (int *)context; if (is_dock_device(handle)) { (*count)++; return AE_CTRL_TERMINATE; } else { return AE_OK; } } /* * the _DCK method can do funny things... and sometimes not * hah-hah funny. * * TBD - figure out a way to only call fixups for * systems that require them. */ static int post_dock_fixups(struct notifier_block *nb, unsigned long val, void *v) { struct acpiphp_func *func = container_of(nb, struct acpiphp_func, nb); struct pci_bus *bus = func->slot->bridge->pci_bus; u32 buses; if (!bus->self) return NOTIFY_OK; /* fixup bad _DCK function that rewrites * secondary bridge on slot */ pci_read_config_dword(bus->self, PCI_PRIMARY_BUS, &buses); if (((buses >> 8) & 0xff) != bus->secondary) { buses = (buses & 0xff000000) | ((unsigned int)(bus->primary) << 0) | ((unsigned int)(bus->secondary) << 8) | ((unsigned int)(bus->subordinate) << 16); pci_write_config_dword(bus->self, PCI_PRIMARY_BUS, buses); } return NOTIFY_OK; } static struct acpi_dock_ops acpiphp_dock_ops = { .handler = handle_hotplug_event_func, }; /* callback routine to register each ACPI PCI slot object */ static acpi_status register_slot(acpi_handle handle, u32 lvl, void *context, void **rv) { struct acpiphp_bridge *bridge = (struct acpiphp_bridge *)context; struct acpiphp_slot *slot; struct acpiphp_func *newfunc; acpi_handle tmp; acpi_status status = AE_OK; unsigned long long adr, sun; int device, function, retval; struct pci_bus *pbus = bridge->pci_bus; struct pci_dev *pdev; if (!acpi_pci_check_ejectable(pbus, handle) && !is_dock_device(handle)) return AE_OK; acpi_evaluate_integer(handle, "_ADR", NULL, &adr); device = (adr >> 16) & 0xffff; function = adr & 0xffff; newfunc = kzalloc(sizeof(struct acpiphp_func), GFP_KERNEL); if (!newfunc) return AE_NO_MEMORY; INIT_LIST_HEAD(&newfunc->sibling); newfunc->handle = handle; newfunc->function = function; if (ACPI_SUCCESS(acpi_get_handle(handle, "_EJ0", &tmp))) newfunc->flags = FUNC_HAS_EJ0; if (ACPI_SUCCESS(acpi_get_handle(handle, "_STA", &tmp))) newfunc->flags |= FUNC_HAS_STA; if (ACPI_SUCCESS(acpi_get_handle(handle, "_PS0", &tmp))) newfunc->flags |= FUNC_HAS_PS0; if (ACPI_SUCCESS(acpi_get_handle(handle, "_PS3", &tmp))) newfunc->flags |= FUNC_HAS_PS3; if (ACPI_SUCCESS(acpi_get_handle(handle, "_DCK", &tmp))) newfunc->flags |= FUNC_HAS_DCK; status = acpi_evaluate_integer(handle, "_SUN", NULL, &sun); if (ACPI_FAILURE(status)) { /* * use the count of the number of slots we've found * for the number of the slot */ sun = bridge->nr_slots+1; } /* search for objects that share the same slot */ for (slot = bridge->slots; slot; slot = slot->next) if (slot->device == device) { if (slot->sun != sun) warn("sibling found, but _SUN doesn't match!\n"); break; } if (!slot) { slot = kzalloc(sizeof(struct acpiphp_slot), GFP_KERNEL); if (!slot) { kfree(newfunc); return AE_NO_MEMORY; } slot->bridge = bridge; slot->device = device; slot->sun = sun; INIT_LIST_HEAD(&slot->funcs); mutex_init(&slot->crit_sect); slot->next = bridge->slots; bridge->slots = slot; bridge->nr_slots++; dbg("found ACPI PCI Hotplug slot %llu at PCI %04x:%02x:%02x\n", slot->sun, pci_domain_nr(pbus), pbus->number, device); retval = acpiphp_register_hotplug_slot(slot); if (retval) { if (retval == -EBUSY) warn("Slot %llu already registered by another " "hotplug driver\n", slot->sun); else warn("acpiphp_register_hotplug_slot failed " "(err code = 0x%x)\n", retval); goto err_exit; } } newfunc->slot = slot; list_add_tail(&newfunc->sibling, &slot->funcs); pdev = pci_get_slot(pbus, PCI_DEVFN(device, function)); if (pdev) { pdev->current_state = PCI_D0; slot->flags |= (SLOT_ENABLED | SLOT_POWEREDON); pci_dev_put(pdev); } if (is_dock_device(handle)) { /* we don't want to call this device's _EJ0 * because we want the dock notify handler * to call it after it calls _DCK */ newfunc->flags &= ~FUNC_HAS_EJ0; if (register_hotplug_dock_device(handle, &acpiphp_dock_ops, newfunc)) dbg("failed to register dock device\n"); /* we need to be notified when dock events happen * outside of the hotplug operation, since we may * need to do fixups before we can hotplug. */ newfunc->nb.notifier_call = post_dock_fixups; if (register_dock_notifier(&newfunc->nb)) dbg("failed to register a dock notifier"); } /* install notify handler */ if (!(newfunc->flags & FUNC_HAS_DCK)) { status = acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY, handle_hotplug_event_func, newfunc); if (ACPI_FAILURE(status)) err("failed to register interrupt notify handler\n"); } else status = AE_OK; return status; err_exit: bridge->nr_slots--; bridge->slots = slot->next; kfree(slot); kfree(newfunc); return AE_OK; } /* see if it's worth looking at this bridge */ static int detect_ejectable_slots(acpi_handle handle) { int found = acpi_pci_detect_ejectable(handle); if (!found) { acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1, is_pci_dock_device, NULL, (void *)&found, NULL); } return found; } /* initialize miscellaneous stuff for both root and PCI-to-PCI bridge */ static void init_bridge_misc(struct acpiphp_bridge *bridge) { acpi_status status; /* must be added to the list prior to calling register_slot */ list_add(&bridge->list, &bridge_list); /* register all slot objects under this bridge */ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, bridge->handle, (u32)1, register_slot, NULL, bridge, NULL); if (ACPI_FAILURE(status)) { list_del(&bridge->list); return; } /* install notify handler */ if (bridge->type != BRIDGE_TYPE_HOST) { if ((bridge->flags & BRIDGE_HAS_EJ0) && bridge->func) { status = acpi_remove_notify_handler(bridge->func->handle, ACPI_SYSTEM_NOTIFY, handle_hotplug_event_func); if (ACPI_FAILURE(status)) err("failed to remove notify handler\n"); } status = acpi_install_notify_handler(bridge->handle, ACPI_SYSTEM_NOTIFY, handle_hotplug_event_bridge, bridge); if (ACPI_FAILURE(status)) { err("failed to register interrupt notify handler\n"); } } } /* find acpiphp_func from acpiphp_bridge */ static struct acpiphp_func *acpiphp_bridge_handle_to_function(acpi_handle handle) { struct acpiphp_bridge *bridge; struct acpiphp_slot *slot; struct acpiphp_func *func; list_for_each_entry(bridge, &bridge_list, list) { for (slot = bridge->slots; slot; slot = slot->next) { list_for_each_entry(func, &slot->funcs, sibling) { if (func->handle == handle) return func; } } } return NULL; } static inline void config_p2p_bridge_flags(struct acpiphp_bridge *bridge) { acpi_handle dummy_handle; if (ACPI_SUCCESS(acpi_get_handle(bridge->handle, "_STA", &dummy_handle))) bridge->flags |= BRIDGE_HAS_STA; if (ACPI_SUCCESS(acpi_get_handle(bridge->handle, "_EJ0", &dummy_handle))) bridge->flags |= BRIDGE_HAS_EJ0; if (ACPI_SUCCESS(acpi_get_handle(bridge->handle, "_PS0", &dummy_handle))) bridge->flags |= BRIDGE_HAS_PS0; if (ACPI_SUCCESS(acpi_get_handle(bridge->handle, "_PS3", &dummy_handle))) bridge->flags |= BRIDGE_HAS_PS3; /* is this ejectable p2p bridge? */ if (bridge->flags & BRIDGE_HAS_EJ0) { struct acpiphp_func *func; dbg("found ejectable p2p bridge\n"); /* make link between PCI bridge and PCI function */ func = acpiphp_bridge_handle_to_function(bridge->handle); if (!func) return; bridge->func = func; func->bridge = bridge; } } /* allocate and initialize host bridge data structure */ static void add_host_bridge(acpi_handle *handle) { struct acpiphp_bridge *bridge; struct acpi_pci_root *root = acpi_pci_find_root(handle); bridge = kzalloc(sizeof(struct acpiphp_bridge), GFP_KERNEL); if (bridge == NULL) return; bridge->type = BRIDGE_TYPE_HOST; bridge->handle = handle; bridge->pci_bus = root->bus; spin_lock_init(&bridge->res_lock); init_bridge_misc(bridge); } /* allocate and initialize PCI-to-PCI bridge data structure */ static void add_p2p_bridge(acpi_handle *handle) { struct acpiphp_bridge *bridge; bridge = kzalloc(sizeof(struct acpiphp_bridge), GFP_KERNEL); if (bridge == NULL) { err("out of memory\n"); return; } bridge->type = BRIDGE_TYPE_P2P; bridge->handle = handle; config_p2p_bridge_flags(bridge); bridge->pci_dev = acpi_get_pci_dev(handle); bridge->pci_bus = bridge->pci_dev->subordinate; if (!bridge->pci_bus) { err("This is not a PCI-to-PCI bridge!\n"); goto err; } /* * Grab a ref to the subordinate PCI bus in case the bus is * removed via PCI core logical hotplug. The ref pins the bus * (which we access during module unload). */ get_device(&bridge->pci_bus->dev); spin_lock_init(&bridge->res_lock); init_bridge_misc(bridge); return; err: pci_dev_put(bridge->pci_dev); kfree(bridge); return; } /* callback routine to find P2P bridges */ static acpi_status find_p2p_bridge(acpi_handle handle, u32 lvl, void *context, void **rv) { acpi_status status; struct pci_dev *dev; dev = acpi_get_pci_dev(handle); if (!dev || !dev->subordinate) goto out; /* check if this bridge has ejectable slots */ if ((detect_ejectable_slots(handle) > 0)) { dbg("found PCI-to-PCI bridge at PCI %s\n", pci_name(dev)); add_p2p_bridge(handle); } /* search P2P bridges under this p2p bridge */ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1, find_p2p_bridge, NULL, NULL, NULL); if (ACPI_FAILURE(status)) warn("find_p2p_bridge failed (error code = 0x%x)\n", status); out: pci_dev_put(dev); return AE_OK; } /* find hot-pluggable slots, and then find P2P bridge */ static int add_bridge(acpi_handle handle) { acpi_status status; unsigned long long tmp; acpi_handle dummy_handle; /* if the bridge doesn't have _STA, we assume it is always there */ status = acpi_get_handle(handle, "_STA", &dummy_handle); if (ACPI_SUCCESS(status)) { status = acpi_evaluate_integer(handle, "_STA", NULL, &tmp); if (ACPI_FAILURE(status)) { dbg("%s: _STA evaluation failure\n", __func__); return 0; } if ((tmp & ACPI_STA_FUNCTIONING) == 0) /* don't register this object */ return 0; } /* check if this bridge has ejectable slots */ if (detect_ejectable_slots(handle) > 0) { dbg("found PCI host-bus bridge with hot-pluggable slots\n"); add_host_bridge(handle); } /* search P2P bridges under this host bridge */ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1, find_p2p_bridge, NULL, NULL, NULL); if (ACPI_FAILURE(status)) warn("find_p2p_bridge failed (error code = 0x%x)\n", status); return 0; } static struct acpiphp_bridge *acpiphp_handle_to_bridge(acpi_handle handle) { struct acpiphp_bridge *bridge; list_for_each_entry(bridge, &bridge_list, list) if (bridge->handle == handle) return bridge; return NULL; } static void cleanup_bridge(struct acpiphp_bridge *bridge) { struct acpiphp_slot *slot, *next; struct acpiphp_func *func, *tmp; acpi_status status; acpi_handle handle = bridge->handle; status = acpi_remove_notify_handler(handle, ACPI_SYSTEM_NOTIFY, handle_hotplug_event_bridge); if (ACPI_FAILURE(status)) err("failed to remove notify handler\n"); if ((bridge->type != BRIDGE_TYPE_HOST) && ((bridge->flags & BRIDGE_HAS_EJ0) && bridge->func)) { status = acpi_install_notify_handler(bridge->func->handle, ACPI_SYSTEM_NOTIFY, handle_hotplug_event_func, bridge->func); if (ACPI_FAILURE(status)) err("failed to install interrupt notify handler\n"); } slot = bridge->slots; while (slot) { next = slot->next; list_for_each_entry_safe(func, tmp, &slot->funcs, sibling) { if (is_dock_device(func->handle)) { unregister_hotplug_dock_device(func->handle); unregister_dock_notifier(&func->nb); } if (!(func->flags & FUNC_HAS_DCK)) { status = acpi_remove_notify_handler(func->handle, ACPI_SYSTEM_NOTIFY, handle_hotplug_event_func); if (ACPI_FAILURE(status)) err("failed to remove notify handler\n"); } list_del(&func->sibling); kfree(func); } acpiphp_unregister_hotplug_slot(slot); list_del(&slot->funcs); kfree(slot); slot = next; } /* * Only P2P bridges have a pci_dev */ if (bridge->pci_dev) put_device(&bridge->pci_bus->dev); pci_dev_put(bridge->pci_dev); list_del(&bridge->list); kfree(bridge); } static acpi_status cleanup_p2p_bridge(acpi_handle handle, u32 lvl, void *context, void **rv) { struct acpiphp_bridge *bridge; /* cleanup p2p bridges under this P2P bridge in a depth-first manner */ acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1, cleanup_p2p_bridge, NULL, NULL, NULL); bridge = acpiphp_handle_to_bridge(handle); if (bridge) cleanup_bridge(bridge); return AE_OK; } static void remove_bridge(acpi_handle handle) { struct acpiphp_bridge *bridge; /* cleanup p2p bridges under this host bridge in a depth-first manner */ acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1, cleanup_p2p_bridge, NULL, NULL, NULL); /* * On root bridges with hotplug slots directly underneath (ie, * no p2p bridge between), we call cleanup_bridge(). * * The else clause cleans up root bridges that either had no * hotplug slots at all, or had a p2p bridge underneath. */ bridge = acpiphp_handle_to_bridge(handle); if (bridge) cleanup_bridge(bridge); else acpi_remove_notify_handler(handle, ACPI_SYSTEM_NOTIFY, handle_hotplug_event_bridge); } static int power_on_slot(struct acpiphp_slot *slot) { acpi_status status; struct acpiphp_func *func; int retval = 0; /* if already enabled, just skip */ if (slot->flags & SLOT_POWEREDON) goto err_exit; list_for_each_entry(func, &slot->funcs, sibling) { if (func->flags & FUNC_HAS_PS0) { dbg("%s: executing _PS0\n", __func__); status = acpi_evaluate_object(func->handle, "_PS0", NULL, NULL); if (ACPI_FAILURE(status)) { warn("%s: _PS0 failed\n", __func__); retval = -1; goto err_exit; } else break; } } /* TBD: evaluate _STA to check if the slot is enabled */ slot->flags |= SLOT_POWEREDON; err_exit: return retval; } static int power_off_slot(struct acpiphp_slot *slot) { acpi_status status; struct acpiphp_func *func; int retval = 0; /* if already disabled, just skip */ if ((slot->flags & SLOT_POWEREDON) == 0) goto err_exit; list_for_each_entry(func, &slot->funcs, sibling) { if (func->flags & FUNC_HAS_PS3) { status = acpi_evaluate_object(func->handle, "_PS3", NULL, NULL); if (ACPI_FAILURE(status)) { warn("%s: _PS3 failed\n", __func__); retval = -1; goto err_exit; } else break; } } /* TBD: evaluate _STA to check if the slot is disabled */ slot->flags &= (~SLOT_POWEREDON); err_exit: return retval; } /** * acpiphp_max_busnr - return the highest reserved bus number under the given bus. * @bus: bus to start search with */ static unsigned char acpiphp_max_busnr(struct pci_bus *bus) { struct list_head *tmp; unsigned char max, n; /* * pci_bus_max_busnr will return the highest * reserved busnr for all these children. * that is equivalent to the bus->subordinate * value. We don't want to use the parent's * bus->subordinate value because it could have * padding in it. */ max = bus->secondary; list_for_each(tmp, &bus->children) { n = pci_bus_max_busnr(pci_bus_b(tmp)); if (n > max) max = n; } return max; } /** * acpiphp_bus_add - add a new bus to acpi subsystem * @func: acpiphp_func of the bridge */ static int acpiphp_bus_add(struct acpiphp_func *func) { acpi_handle phandle; struct acpi_device *device, *pdevice; int ret_val; acpi_get_parent(func->handle, &phandle); if (acpi_bus_get_device(phandle, &pdevice)) { dbg("no parent device, assuming NULL\n"); pdevice = NULL; } if (!acpi_bus_get_device(func->handle, &device)) { dbg("bus exists... trim\n"); /* this shouldn't be in here, so remove * the bus then re-add it... */ ret_val = acpi_bus_trim(device, 1); dbg("acpi_bus_trim return %x\n", ret_val); } ret_val = acpi_bus_add(&device, pdevice, func->handle, ACPI_BUS_TYPE_DEVICE); if (ret_val) { dbg("error adding bus, %x\n", -ret_val); goto acpiphp_bus_add_out; } ret_val = acpi_bus_start(device); acpiphp_bus_add_out: return ret_val; } /** * acpiphp_bus_trim - trim a bus from acpi subsystem * @handle: handle to acpi namespace */ static int acpiphp_bus_trim(acpi_handle handle) { struct acpi_device *device; int retval; retval = acpi_bus_get_device(handle, &device); if (retval) { dbg("acpi_device not found\n"); return retval; } retval = acpi_bus_trim(device, 1); if (retval) err("cannot remove from acpi list\n"); return retval; } static void acpiphp_set_acpi_region(struct acpiphp_slot *slot) { struct acpiphp_func *func; union acpi_object params[2]; struct acpi_object_list arg_list; list_for_each_entry(func, &slot->funcs, sibling) { arg_list.count = 2; arg_list.pointer = params; params[0].type = ACPI_TYPE_INTEGER; params[0].integer.value = ACPI_ADR_SPACE_PCI_CONFIG; params[1].type = ACPI_TYPE_INTEGER; params[1].integer.value = 1; /* _REG is optional, we don't care about if there is failure */ acpi_evaluate_object(func->handle, "_REG", &arg_list, NULL); } } /** * enable_device - enable, configure a slot * @slot: slot to be enabled * * This function should be called per *physical slot*, * not per each slot object in ACPI namespace. */ static int __ref enable_device(struct acpiphp_slot *slot) { struct pci_dev *dev; struct pci_bus *bus = slot->bridge->pci_bus; struct acpiphp_func *func; int retval = 0; int num, max, pass; acpi_status status; if (slot->flags & SLOT_ENABLED) goto err_exit; /* sanity check: dev should be NULL when hot-plugged in */ dev = pci_get_slot(bus, PCI_DEVFN(slot->device, 0)); if (dev) { /* This case shouldn't happen */ err("pci_dev structure already exists.\n"); pci_dev_put(dev); retval = -1; goto err_exit; } num = pci_scan_slot(bus, PCI_DEVFN(slot->device, 0)); if (num == 0) { err("No new device found\n"); retval = -1; goto err_exit; } max = acpiphp_max_busnr(bus); for (pass = 0; pass < 2; pass++) { list_for_each_entry(dev, &bus->devices, bus_list) { if (PCI_SLOT(dev->devfn) != slot->device) continue; if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) { max = pci_scan_bridge(bus, dev, max, pass); if (pass && dev->subordinate) pci_bus_size_bridges(dev->subordinate); } } } list_for_each_entry(func, &slot->funcs, sibling) acpiphp_bus_add(func); pci_bus_assign_resources(bus); acpiphp_sanitize_bus(bus); acpiphp_set_hpp_values(bus); acpiphp_set_acpi_region(slot); pci_enable_bridges(bus); list_for_each_entry(dev, &bus->devices, bus_list) { /* Assume that newly added devices are powered on already. */ if (!dev->is_added) dev->current_state = PCI_D0; } pci_bus_add_devices(bus); list_for_each_entry(func, &slot->funcs, sibling) { dev = pci_get_slot(bus, PCI_DEVFN(slot->device, func->function)); if (!dev) continue; if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE && dev->hdr_type != PCI_HEADER_TYPE_CARDBUS) { pci_dev_put(dev); continue; } status = find_p2p_bridge(func->handle, (u32)1, bus, NULL); if (ACPI_FAILURE(status)) warn("find_p2p_bridge failed (error code = 0x%x)\n", status); pci_dev_put(dev); } slot->flags |= SLOT_ENABLED; err_exit: return retval; } static void disable_bridges(struct pci_bus *bus) { struct pci_dev *dev; list_for_each_entry(dev, &bus->devices, bus_list) { if (dev->subordinate) { disable_bridges(dev->subordinate); pci_disable_device(dev); } } } /** * disable_device - disable a slot * @slot: ACPI PHP slot */ static int disable_device(struct acpiphp_slot *slot) { struct acpiphp_func *func; struct pci_dev *pdev; /* is this slot already disabled? */ if (!(slot->flags & SLOT_ENABLED)) goto err_exit; list_for_each_entry(func, &slot->funcs, sibling) { if (func->bridge) { /* cleanup p2p bridges under this P2P bridge */ cleanup_p2p_bridge(func->bridge->handle, (u32)1, NULL, NULL); func->bridge = NULL; } pdev = pci_get_slot(slot->bridge->pci_bus, PCI_DEVFN(slot->device, func->function)); if (pdev) { pci_stop_bus_device(pdev); if (pdev->subordinate) { disable_bridges(pdev->subordinate); pci_disable_device(pdev); } pci_remove_bus_device(pdev); pci_dev_put(pdev); } } list_for_each_entry(func, &slot->funcs, sibling) { acpiphp_bus_trim(func->handle); } slot->flags &= (~SLOT_ENABLED); err_exit: return 0; } /** * get_slot_status - get ACPI slot status * @slot: ACPI PHP slot * * If a slot has _STA for each function and if any one of them * returned non-zero status, return it. * * If a slot doesn't have _STA and if any one of its functions' * configuration space is configured, return 0x0f as a _STA. * * Otherwise return 0. */ static unsigned int get_slot_status(struct acpiphp_slot *slot) { acpi_status status; unsigned long long sta = 0; u32 dvid; struct acpiphp_func *func; list_for_each_entry(func, &slot->funcs, sibling) { if (func->flags & FUNC_HAS_STA) { status = acpi_evaluate_integer(func->handle, "_STA", NULL, &sta); if (ACPI_SUCCESS(status) && sta) break; } else { pci_bus_read_config_dword(slot->bridge->pci_bus, PCI_DEVFN(slot->device, func->function), PCI_VENDOR_ID, &dvid); if (dvid != 0xffffffff) { sta = ACPI_STA_ALL; break; } } } return (unsigned int)sta; } /** * acpiphp_eject_slot - physically eject the slot * @slot: ACPI PHP slot */ int acpiphp_eject_slot(struct acpiphp_slot *slot) { acpi_status status; struct acpiphp_func *func; struct acpi_object_list arg_list; union acpi_object arg; list_for_each_entry(func, &slot->funcs, sibling) { /* We don't want to call _EJ0 on non-existing functions. */ if ((func->flags & FUNC_HAS_EJ0)) { /* _EJ0 method take one argument */ arg_list.count = 1; arg_list.pointer = &arg; arg.type = ACPI_TYPE_INTEGER; arg.integer.value = 1; status = acpi_evaluate_object(func->handle, "_EJ0", &arg_list, NULL); if (ACPI_FAILURE(status)) { warn("%s: _EJ0 failed\n", __func__); return -1; } else break; } } return 0; } /** * acpiphp_check_bridge - re-enumerate devices * @bridge: where to begin re-enumeration * * Iterate over all slots under this bridge and make sure that if a * card is present they are enabled, and if not they are disabled. */ static int acpiphp_check_bridge(struct acpiphp_bridge *bridge) { struct acpiphp_slot *slot; int retval = 0; int enabled, disabled; enabled = disabled = 0; for (slot = bridge->slots; slot; slot = slot->next) { unsigned int status = get_slot_status(slot); if (slot->flags & SLOT_ENABLED) { if (status == ACPI_STA_ALL) continue; retval = acpiphp_disable_slot(slot); if (retval) { err("Error occurred in disabling\n"); goto err_exit; } else { acpiphp_eject_slot(slot); } disabled++; } else { if (status != ACPI_STA_ALL) continue; retval = acpiphp_enable_slot(slot); if (retval) { err("Error occurred in enabling\n"); goto err_exit; } enabled++; } } dbg("%s: %d enabled, %d disabled\n", __func__, enabled, disabled); err_exit: return retval; } static void acpiphp_set_hpp_values(struct pci_bus *bus) { struct pci_dev *dev; list_for_each_entry(dev, &bus->devices, bus_list) pci_configure_slot(dev); } /* * Remove devices for which we could not assign resources, call * arch specific code to fix-up the bus */ static void acpiphp_sanitize_bus(struct pci_bus *bus) { struct pci_dev *dev; int i; unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM; list_for_each_entry(dev, &bus->devices, bus_list) { for (i=0; i<PCI_BRIDGE_RESOURCES; i++) { struct resource *res = &dev->resource[i]; if ((res->flags & type_mask) && !res->start && res->end) { /* Could not assign a required resources * for this device, remove it */ pci_remove_bus_device(dev); break; } } } } /* Program resources in newly inserted bridge */ static int acpiphp_configure_bridge (acpi_handle handle) { struct pci_bus *bus; if (acpi_is_root_bridge(handle)) { struct acpi_pci_root *root = acpi_pci_find_root(handle); bus = root->bus; } else { struct pci_dev *pdev = acpi_get_pci_dev(handle); bus = pdev->subordinate; pci_dev_put(pdev); } pci_bus_size_bridges(bus); pci_bus_assign_resources(bus); acpiphp_sanitize_bus(bus); acpiphp_set_hpp_values(bus); pci_enable_bridges(bus); return 0; } static void handle_bridge_insertion(acpi_handle handle, u32 type) { struct acpi_device *device, *pdevice; acpi_handle phandle; if ((type != ACPI_NOTIFY_BUS_CHECK) && (type != ACPI_NOTIFY_DEVICE_CHECK)) { err("unexpected notification type %d\n", type); return; } acpi_get_parent(handle, &phandle); if (acpi_bus_get_device(phandle, &pdevice)) { dbg("no parent device, assuming NULL\n"); pdevice = NULL; } if (acpi_bus_add(&device, pdevice, handle, ACPI_BUS_TYPE_DEVICE)) { err("cannot add bridge to acpi list\n"); return; } if (!acpiphp_configure_bridge(handle) && !acpi_bus_start(device)) add_bridge(handle); else err("cannot configure and start bridge\n"); } /* * ACPI event handlers */ static acpi_status count_sub_bridges(acpi_handle handle, u32 lvl, void *context, void **rv) { int *count = (int *)context; struct acpiphp_bridge *bridge; bridge = acpiphp_handle_to_bridge(handle); if (bridge) (*count)++; return AE_OK ; } static acpi_status check_sub_bridges(acpi_handle handle, u32 lvl, void *context, void **rv) { struct acpiphp_bridge *bridge; char objname[64]; struct acpi_buffer buffer = { .length = sizeof(objname), .pointer = objname }; bridge = acpiphp_handle_to_bridge(handle); if (bridge) { acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); dbg("%s: re-enumerating slots under %s\n", __func__, objname); acpiphp_check_bridge(bridge); } return AE_OK ; } /** * handle_hotplug_event_bridge - handle ACPI event on bridges * @handle: Notify()'ed acpi_handle * @type: Notify code * @context: pointer to acpiphp_bridge structure * * Handles ACPI event notification on {host,p2p} bridges. */ static void handle_hotplug_event_bridge(acpi_handle handle, u32 type, void *context) { struct acpiphp_bridge *bridge; char objname[64]; struct acpi_buffer buffer = { .length = sizeof(objname), .pointer = objname }; struct acpi_device *device; int num_sub_bridges = 0; if (acpi_bus_get_device(handle, &device)) { /* This bridge must have just been physically inserted */ handle_bridge_insertion(handle, type); return; } bridge = acpiphp_handle_to_bridge(handle); if (type == ACPI_NOTIFY_BUS_CHECK) { acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, ACPI_UINT32_MAX, count_sub_bridges, NULL, &num_sub_bridges, NULL); } if (!bridge && !num_sub_bridges) { err("cannot get bridge info\n"); return; } acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); switch (type) { case ACPI_NOTIFY_BUS_CHECK: /* bus re-enumerate */ dbg("%s: Bus check notify on %s\n", __func__, objname); if (bridge) { dbg("%s: re-enumerating slots under %s\n", __func__, objname); acpiphp_check_bridge(bridge); } if (num_sub_bridges) acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, ACPI_UINT32_MAX, check_sub_bridges, NULL, NULL, NULL); break; case ACPI_NOTIFY_DEVICE_CHECK: /* device check */ dbg("%s: Device check notify on %s\n", __func__, objname); acpiphp_check_bridge(bridge); break; case ACPI_NOTIFY_DEVICE_WAKE: /* wake event */ dbg("%s: Device wake notify on %s\n", __func__, objname); break; case ACPI_NOTIFY_EJECT_REQUEST: /* request device eject */ dbg("%s: Device eject notify on %s\n", __func__, objname); if ((bridge->type != BRIDGE_TYPE_HOST) && (bridge->flags & BRIDGE_HAS_EJ0)) { struct acpiphp_slot *slot; slot = bridge->func->slot; if (!acpiphp_disable_slot(slot)) acpiphp_eject_slot(slot); } break; case ACPI_NOTIFY_FREQUENCY_MISMATCH: printk(KERN_ERR "Device %s cannot be configured due" " to a frequency mismatch\n", objname); break; case ACPI_NOTIFY_BUS_MODE_MISMATCH: printk(KERN_ERR "Device %s cannot be configured due" " to a bus mode mismatch\n", objname); break; case ACPI_NOTIFY_POWER_FAULT: printk(KERN_ERR "Device %s has suffered a power fault\n", objname); break; default: warn("notify_handler: unknown event type 0x%x for %s\n", type, objname); break; } } /** * handle_hotplug_event_func - handle ACPI event on functions (i.e. slots) * @handle: Notify()'ed acpi_handle * @type: Notify code * @context: pointer to acpiphp_func structure * * Handles ACPI event notification on slots. */ static void handle_hotplug_event_func(acpi_handle handle, u32 type, void *context) { struct acpiphp_func *func; char objname[64]; struct acpi_buffer buffer = { .length = sizeof(objname), .pointer = objname }; acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); func = (struct acpiphp_func *)context; switch (type) { case ACPI_NOTIFY_BUS_CHECK: /* bus re-enumerate */ dbg("%s: Bus check notify on %s\n", __func__, objname); acpiphp_enable_slot(func->slot); break; case ACPI_NOTIFY_DEVICE_CHECK: /* device check : re-enumerate from parent bus */ dbg("%s: Device check notify on %s\n", __func__, objname); acpiphp_check_bridge(func->slot->bridge); break; case ACPI_NOTIFY_DEVICE_WAKE: /* wake event */ dbg("%s: Device wake notify on %s\n", __func__, objname); break; case ACPI_NOTIFY_EJECT_REQUEST: /* request device eject */ dbg("%s: Device eject notify on %s\n", __func__, objname); if (!(acpiphp_disable_slot(func->slot))) acpiphp_eject_slot(func->slot); break; default: warn("notify_handler: unknown event type 0x%x for %s\n", type, objname); break; } } static acpi_status find_root_bridges(acpi_handle handle, u32 lvl, void *context, void **rv) { int *count = (int *)context; if (acpi_is_root_bridge(handle)) { acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY, handle_hotplug_event_bridge, NULL); (*count)++; } return AE_OK ; } static struct acpi_pci_driver acpi_pci_hp_driver = { .add = add_bridge, .remove = remove_bridge, }; /** * acpiphp_glue_init - initializes all PCI hotplug - ACPI glue data structures */ int __init acpiphp_glue_init(void) { int num = 0; acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, find_root_bridges, NULL, &num, NULL); if (num <= 0) return -1; else acpi_pci_register_driver(&acpi_pci_hp_driver); return 0; } /** * acpiphp_glue_exit - terminates all PCI hotplug - ACPI glue data structures * * This function frees all data allocated in acpiphp_glue_init(). */ void acpiphp_glue_exit(void) { acpi_pci_unregister_driver(&acpi_pci_hp_driver); } /** * acpiphp_get_num_slots - count number of slots in a system */ int __init acpiphp_get_num_slots(void) { struct acpiphp_bridge *bridge; int num_slots = 0; list_for_each_entry(bridge, &bridge_list, list) { dbg("Bus %04x:%02x has %d slot%s\n", pci_domain_nr(bridge->pci_bus), bridge->pci_bus->number, bridge->nr_slots, bridge->nr_slots == 1 ? "" : "s"); num_slots += bridge->nr_slots; } dbg("Total %d slots\n", num_slots); return num_slots; } #if 0 /** * acpiphp_for_each_slot - call function for each slot * @fn: callback function * @data: context to be passed to callback function */ static int acpiphp_for_each_slot(acpiphp_callback fn, void *data) { struct list_head *node; struct acpiphp_bridge *bridge; struct acpiphp_slot *slot; int retval = 0; list_for_each (node, &bridge_list) { bridge = (struct acpiphp_bridge *)node; for (slot = bridge->slots; slot; slot = slot->next) { retval = fn(slot, data); if (!retval) goto err_exit; } } err_exit: return retval; } #endif /** * acpiphp_enable_slot - power on slot * @slot: ACPI PHP slot */ int acpiphp_enable_slot(struct acpiphp_slot *slot) { int retval; mutex_lock(&slot->crit_sect); /* wake up all functions */ retval = power_on_slot(slot); if (retval) goto err_exit; if (get_slot_status(slot) == ACPI_STA_ALL) { /* configure all functions */ retval = enable_device(slot); if (retval) power_off_slot(slot); } else { dbg("%s: Slot status is not ACPI_STA_ALL\n", __func__); power_off_slot(slot); } err_exit: mutex_unlock(&slot->crit_sect); return retval; } /** * acpiphp_disable_slot - power off slot * @slot: ACPI PHP slot */ int acpiphp_disable_slot(struct acpiphp_slot *slot) { int retval = 0; mutex_lock(&slot->crit_sect); /* unconfigure all functions */ retval = disable_device(slot); if (retval) goto err_exit; /* power off all functions */ retval = power_off_slot(slot); if (retval) goto err_exit; err_exit: mutex_unlock(&slot->crit_sect); return retval; } /* * slot enabled: 1 * slot disabled: 0 */ u8 acpiphp_get_power_status(struct acpiphp_slot *slot) { return (slot->flags & SLOT_POWEREDON); } /* * latch open: 1 * latch closed: 0 */ u8 acpiphp_get_latch_status(struct acpiphp_slot *slot) { unsigned int sta; sta = get_slot_status(slot); return (sta & ACPI_STA_SHOW_IN_UI) ? 0 : 1; } /* * adapter presence : 1 * absence : 0 */ u8 acpiphp_get_adapter_status(struct acpiphp_slot *slot) { unsigned int sta; sta = get_slot_status(slot); return (sta == 0) ? 0 : 1; }
gpl-2.0
BORETS24/common.git-android-3.18
net/wireless/debugfs.c
1956
3100
/* * cfg80211 debugfs * * Copyright 2009 Luis R. Rodriguez <lrodriguez@atheros.com> * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/slab.h> #include "core.h" #include "debugfs.h" #define DEBUGFS_READONLY_FILE(name, buflen, fmt, value...) \ static ssize_t name## _read(struct file *file, char __user *userbuf, \ size_t count, loff_t *ppos) \ { \ struct wiphy *wiphy= file->private_data; \ char buf[buflen]; \ int res; \ \ res = scnprintf(buf, buflen, fmt "\n", ##value); \ return simple_read_from_buffer(userbuf, count, ppos, buf, res); \ } \ \ static const struct file_operations name## _ops = { \ .read = name## _read, \ .open = simple_open, \ .llseek = generic_file_llseek, \ }; DEBUGFS_READONLY_FILE(rts_threshold, 20, "%d", wiphy->rts_threshold) DEBUGFS_READONLY_FILE(fragmentation_threshold, 20, "%d", wiphy->frag_threshold); DEBUGFS_READONLY_FILE(short_retry_limit, 20, "%d", wiphy->retry_short) DEBUGFS_READONLY_FILE(long_retry_limit, 20, "%d", wiphy->retry_long); static int ht_print_chan(struct ieee80211_channel *chan, char *buf, int buf_size, int offset) { if (WARN_ON(offset > buf_size)) return 0; if (chan->flags & IEEE80211_CHAN_DISABLED) return scnprintf(buf + offset, buf_size - offset, "%d Disabled\n", chan->center_freq); return scnprintf(buf + offset, buf_size - offset, "%d HT40 %c%c\n", chan->center_freq, (chan->flags & IEEE80211_CHAN_NO_HT40MINUS) ? ' ' : '-', (chan->flags & IEEE80211_CHAN_NO_HT40PLUS) ? ' ' : '+'); } static ssize_t ht40allow_map_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct wiphy *wiphy = file->private_data; char *buf; unsigned int offset = 0, buf_size = PAGE_SIZE, i, r; enum ieee80211_band band; struct ieee80211_supported_band *sband; buf = kzalloc(buf_size, GFP_KERNEL); if (!buf) return -ENOMEM; rtnl_lock(); for (band = 0; band < IEEE80211_NUM_BANDS; band++) { sband = wiphy->bands[band]; if (!sband) continue; for (i = 0; i < sband->n_channels; i++) offset += ht_print_chan(&sband->channels[i], buf, buf_size, offset); } rtnl_unlock(); r = simple_read_from_buffer(user_buf, count, ppos, buf, offset); kfree(buf); return r; } static const struct file_operations ht40allow_map_ops = { .read = ht40allow_map_read, .open = simple_open, .llseek = default_llseek, }; #define DEBUGFS_ADD(name) \ debugfs_create_file(#name, S_IRUGO, phyd, &rdev->wiphy, &name## _ops); void cfg80211_debugfs_rdev_add(struct cfg80211_registered_device *rdev) { struct dentry *phyd = rdev->wiphy.debugfsdir; DEBUGFS_ADD(rts_threshold); DEBUGFS_ADD(fragmentation_threshold); DEBUGFS_ADD(short_retry_limit); DEBUGFS_ADD(long_retry_limit); DEBUGFS_ADD(ht40allow_map); }
gpl-2.0
Sricharanti/sricharan
drivers/staging/sbe-2t3e3/netdev.c
2724
3542
/* * SBE 2T3E3 synchronous serial card driver for Linux * * Copyright (C) 2009-2010 Krzysztof Halasa <khc@pm.waw.pl> * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License * as published by the Free Software Foundation. * * This code is based on a driver written by SBE Inc. */ #include <linux/capability.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/pci.h> #include <linux/hdlc.h> #include <linux/if_arp.h> #include <linux/interrupt.h> #include "2t3e3.h" static int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct channel *sc = dev_to_priv(dev); int cmd_2t3e3, len, rlen; t3e3_param_t param; t3e3_resp_t resp; void __user *data = ifr->ifr_data + sizeof(cmd_2t3e3) + sizeof(len); if (cmd == SIOCWANDEV) return hdlc_ioctl(dev, ifr, cmd); if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (cmd != SIOCDEVPRIVATE + 15) return -EINVAL; if (copy_from_user(&cmd_2t3e3, ifr->ifr_data, sizeof(cmd_2t3e3))) return -EFAULT; if (copy_from_user(&len, ifr->ifr_data + sizeof(cmd_2t3e3), sizeof(len))) return -EFAULT; if (len > sizeof(param)) return -EFAULT; if (len) if (copy_from_user(&param, data, len)) return -EFAULT; t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen); if (rlen) if (copy_to_user(data, &resp, rlen)) return -EFAULT; return 0; } static struct net_device_stats *t3e3_get_stats(struct net_device *dev) { struct net_device_stats *nstats = &dev->stats; struct channel *sc = dev_to_priv(dev); t3e3_stats_t *stats = &sc->s; memset(nstats, 0, sizeof(struct net_device_stats)); nstats->rx_packets = stats->in_packets; nstats->tx_packets = stats->out_packets; nstats->rx_bytes = stats->in_bytes; nstats->tx_bytes = stats->out_bytes; nstats->rx_errors = stats->in_errors; nstats->tx_errors = stats->out_errors; nstats->rx_crc_errors = stats->in_error_crc; nstats->rx_dropped = stats->in_dropped; nstats->tx_dropped = stats->out_dropped; nstats->tx_carrier_errors = stats->out_error_lost_carr + stats->out_error_no_carr; return nstats; } static int t3e3_open(struct net_device *dev) { struct channel *sc = dev_to_priv(dev); int ret = hdlc_open(dev); if (ret) return ret; sc->r.flags |= SBE_2T3E3_FLAG_NETWORK_UP; dc_start(dev_to_priv(dev)); netif_start_queue(dev); try_module_get(THIS_MODULE); return 0; } static int t3e3_close(struct net_device *dev) { struct channel *sc = dev_to_priv(dev); hdlc_close(dev); netif_stop_queue(dev); dc_stop(sc); sc->r.flags &= ~SBE_2T3E3_FLAG_NETWORK_UP; module_put(THIS_MODULE); return 0; } static int t3e3_attach(struct net_device *dev, unsigned short foo1, unsigned short foo2) { return 0; } static const struct net_device_ops t3e3_ops = { .ndo_open = t3e3_open, .ndo_stop = t3e3_close, .ndo_change_mtu = hdlc_change_mtu, .ndo_start_xmit = hdlc_start_xmit, .ndo_do_ioctl = t3e3_ioctl, .ndo_get_stats = t3e3_get_stats, }; int setup_device(struct net_device *dev, struct channel *sc) { hdlc_device *hdlc = dev_to_hdlc(dev); int retval; dev->base_addr = pci_resource_start(sc->pdev, 0); dev->irq = sc->pdev->irq; dev->netdev_ops = &t3e3_ops; dev->tx_queue_len = 100; hdlc->xmit = t3e3_if_start_xmit; hdlc->attach = t3e3_attach; retval = register_hdlc_device(dev); if (retval) { dev_err(&sc->pdev->dev, "error registering HDLC device\n"); return retval; } return 0; }
gpl-2.0
JijonHyuni/HyperKernel-JB
drivers/staging/ath6kl/htc2/htc_services.c
2980
18495
//------------------------------------------------------------------------------ // <copyright file="htc_services.c" company="Atheros"> // Copyright (c) 2007-2010 Atheros Corporation. All rights reserved. // // // Permission to use, copy, modify, and/or distribute this software for any // purpose with or without fee is hereby granted, provided that the above // copyright notice and this permission notice appear in all copies. // // THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES // WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF // MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR // ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES // WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN // ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF // OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. // // //------------------------------------------------------------------------------ //============================================================================== // Author(s): ="Atheros" //============================================================================== #include "htc_internal.h" void HTCControlTxComplete(void *Context, struct htc_packet *pPacket) { /* not implemented * we do not send control TX frames during normal runtime, only during setup */ AR_DEBUG_ASSERT(false); } /* callback when a control message arrives on this endpoint */ void HTCControlRecv(void *Context, struct htc_packet *pPacket) { AR_DEBUG_ASSERT(pPacket->Endpoint == ENDPOINT_0); if (pPacket->Status == A_ECANCELED) { /* this is a flush operation, return the control packet back to the pool */ HTC_FREE_CONTROL_RX((struct htc_target*)Context,pPacket); return; } /* the only control messages we are expecting are NULL messages (credit resports) */ if (pPacket->ActualLength > 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("HTCControlRecv, got message with length:%d \n", pPacket->ActualLength + (u32)HTC_HDR_LENGTH)); #ifdef ATH_DEBUG_MODULE /* dump header and message */ DebugDumpBytes(pPacket->pBuffer - HTC_HDR_LENGTH, pPacket->ActualLength + HTC_HDR_LENGTH, "Unexpected ENDPOINT 0 Message"); #endif } HTC_RECYCLE_RX_PKT((struct htc_target*)Context,pPacket,&((struct htc_target*)Context)->EndPoint[0]); } int HTCSendSetupComplete(struct htc_target *target) { struct htc_packet *pSendPacket = NULL; int status; do { /* allocate a packet to send to the target */ pSendPacket = HTC_ALLOC_CONTROL_TX(target); if (NULL == pSendPacket) { status = A_NO_MEMORY; break; } if (target->HTCTargetVersion >= HTC_VERSION_2P1) { HTC_SETUP_COMPLETE_EX_MSG *pSetupCompleteEx; u32 setupFlags = 0; pSetupCompleteEx = (HTC_SETUP_COMPLETE_EX_MSG *)pSendPacket->pBuffer; A_MEMZERO(pSetupCompleteEx, sizeof(HTC_SETUP_COMPLETE_EX_MSG)); pSetupCompleteEx->MessageID = HTC_MSG_SETUP_COMPLETE_EX_ID; if (target->MaxMsgPerBundle > 0) { /* host can do HTC bundling, indicate this to the target */ setupFlags |= HTC_SETUP_COMPLETE_FLAGS_ENABLE_BUNDLE_RECV; pSetupCompleteEx->MaxMsgsPerBundledRecv = target->MaxMsgPerBundle; } memcpy(&pSetupCompleteEx->SetupFlags, &setupFlags, sizeof(pSetupCompleteEx->SetupFlags)); SET_HTC_PACKET_INFO_TX(pSendPacket, NULL, (u8 *)pSetupCompleteEx, sizeof(HTC_SETUP_COMPLETE_EX_MSG), ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG); } else { HTC_SETUP_COMPLETE_MSG *pSetupComplete; /* assemble setup complete message */ pSetupComplete = (HTC_SETUP_COMPLETE_MSG *)pSendPacket->pBuffer; A_MEMZERO(pSetupComplete, sizeof(HTC_SETUP_COMPLETE_MSG)); pSetupComplete->MessageID = HTC_MSG_SETUP_COMPLETE_ID; SET_HTC_PACKET_INFO_TX(pSendPacket, NULL, (u8 *)pSetupComplete, sizeof(HTC_SETUP_COMPLETE_MSG), ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG); } /* we want synchronous operation */ pSendPacket->Completion = NULL; HTC_PREPARE_SEND_PKT(pSendPacket,0,0,0); /* send the message */ status = HTCIssueSend(target,pSendPacket); } while (false); if (pSendPacket != NULL) { HTC_FREE_CONTROL_TX(target,pSendPacket); } return status; } int HTCConnectService(HTC_HANDLE HTCHandle, struct htc_service_connect_req *pConnectReq, struct htc_service_connect_resp *pConnectResp) { struct htc_target *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle); int status = 0; struct htc_packet *pRecvPacket = NULL; struct htc_packet *pSendPacket = NULL; HTC_CONNECT_SERVICE_RESPONSE_MSG *pResponseMsg; HTC_CONNECT_SERVICE_MSG *pConnectMsg; HTC_ENDPOINT_ID assignedEndpoint = ENDPOINT_MAX; struct htc_endpoint *pEndpoint; unsigned int maxMsgSize = 0; AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("+HTCConnectService, target:0x%lX SvcID:0x%X \n", (unsigned long)target, pConnectReq->ServiceID)); do { AR_DEBUG_ASSERT(pConnectReq->ServiceID != 0); if (HTC_CTRL_RSVD_SVC == pConnectReq->ServiceID) { /* special case for pseudo control service */ assignedEndpoint = ENDPOINT_0; maxMsgSize = HTC_MAX_CONTROL_MESSAGE_LENGTH; } else { /* allocate a packet to send to the target */ pSendPacket = HTC_ALLOC_CONTROL_TX(target); if (NULL == pSendPacket) { AR_DEBUG_ASSERT(false); status = A_NO_MEMORY; break; } /* assemble connect service message */ pConnectMsg = (HTC_CONNECT_SERVICE_MSG *)pSendPacket->pBuffer; AR_DEBUG_ASSERT(pConnectMsg != NULL); A_MEMZERO(pConnectMsg,sizeof(HTC_CONNECT_SERVICE_MSG)); pConnectMsg->MessageID = HTC_MSG_CONNECT_SERVICE_ID; pConnectMsg->ServiceID = pConnectReq->ServiceID; pConnectMsg->ConnectionFlags = pConnectReq->ConnectionFlags; /* check caller if it wants to transfer meta data */ if ((pConnectReq->pMetaData != NULL) && (pConnectReq->MetaDataLength <= HTC_SERVICE_META_DATA_MAX_LENGTH)) { /* copy meta data into message buffer (after header ) */ memcpy((u8 *)pConnectMsg + sizeof(HTC_CONNECT_SERVICE_MSG), pConnectReq->pMetaData, pConnectReq->MetaDataLength); pConnectMsg->ServiceMetaLength = pConnectReq->MetaDataLength; } SET_HTC_PACKET_INFO_TX(pSendPacket, NULL, (u8 *)pConnectMsg, sizeof(HTC_CONNECT_SERVICE_MSG) + pConnectMsg->ServiceMetaLength, ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG); /* we want synchronous operation */ pSendPacket->Completion = NULL; HTC_PREPARE_SEND_PKT(pSendPacket,0,0,0); status = HTCIssueSend(target,pSendPacket); if (status) { break; } /* wait for response */ status = HTCWaitforControlMessage(target, &pRecvPacket); if (status) { break; } /* we controlled the buffer creation so it has to be properly aligned */ pResponseMsg = (HTC_CONNECT_SERVICE_RESPONSE_MSG *)pRecvPacket->pBuffer; if ((pResponseMsg->MessageID != HTC_MSG_CONNECT_SERVICE_RESPONSE_ID) || (pRecvPacket->ActualLength < sizeof(HTC_CONNECT_SERVICE_RESPONSE_MSG))) { /* this message is not valid */ AR_DEBUG_ASSERT(false); status = A_EPROTO; break; } pConnectResp->ConnectRespCode = pResponseMsg->Status; /* check response status */ if (pResponseMsg->Status != HTC_SERVICE_SUCCESS) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, (" Target failed service 0x%X connect request (status:%d)\n", pResponseMsg->ServiceID, pResponseMsg->Status)); status = A_EPROTO; break; } assignedEndpoint = (HTC_ENDPOINT_ID) pResponseMsg->EndpointID; maxMsgSize = pResponseMsg->MaxMsgSize; if ((pConnectResp->pMetaData != NULL) && (pResponseMsg->ServiceMetaLength > 0) && (pResponseMsg->ServiceMetaLength <= HTC_SERVICE_META_DATA_MAX_LENGTH)) { /* caller supplied a buffer and the target responded with data */ int copyLength = min((int)pConnectResp->BufferLength, (int)pResponseMsg->ServiceMetaLength); /* copy the meta data */ memcpy(pConnectResp->pMetaData, ((u8 *)pResponseMsg) + sizeof(HTC_CONNECT_SERVICE_RESPONSE_MSG), copyLength); pConnectResp->ActualLength = copyLength; } } /* the rest of these are parameter checks so set the error status */ status = A_EPROTO; if (assignedEndpoint >= ENDPOINT_MAX) { AR_DEBUG_ASSERT(false); break; } if (0 == maxMsgSize) { AR_DEBUG_ASSERT(false); break; } pEndpoint = &target->EndPoint[assignedEndpoint]; pEndpoint->Id = assignedEndpoint; if (pEndpoint->ServiceID != 0) { /* endpoint already in use! */ AR_DEBUG_ASSERT(false); break; } /* return assigned endpoint to caller */ pConnectResp->Endpoint = assignedEndpoint; pConnectResp->MaxMsgLength = maxMsgSize; /* setup the endpoint */ pEndpoint->ServiceID = pConnectReq->ServiceID; /* this marks the endpoint in use */ pEndpoint->MaxTxQueueDepth = pConnectReq->MaxSendQueueDepth; pEndpoint->MaxMsgLength = maxMsgSize; /* copy all the callbacks */ pEndpoint->EpCallBacks = pConnectReq->EpCallbacks; /* set the credit distribution info for this endpoint, this information is * passed back to the credit distribution callback function */ pEndpoint->CreditDist.ServiceID = pConnectReq->ServiceID; pEndpoint->CreditDist.pHTCReserved = pEndpoint; pEndpoint->CreditDist.Endpoint = assignedEndpoint; pEndpoint->CreditDist.TxCreditSize = target->TargetCreditSize; if (pConnectReq->MaxSendMsgSize != 0) { /* override TxCreditsPerMaxMsg calculation, this optimizes the credit-low indications * since the host will actually issue smaller messages in the Send path */ if (pConnectReq->MaxSendMsgSize > maxMsgSize) { /* can't be larger than the maximum the target can support */ AR_DEBUG_ASSERT(false); break; } pEndpoint->CreditDist.TxCreditsPerMaxMsg = pConnectReq->MaxSendMsgSize / target->TargetCreditSize; } else { pEndpoint->CreditDist.TxCreditsPerMaxMsg = maxMsgSize / target->TargetCreditSize; } if (0 == pEndpoint->CreditDist.TxCreditsPerMaxMsg) { pEndpoint->CreditDist.TxCreditsPerMaxMsg = 1; } /* save local connection flags */ pEndpoint->LocalConnectionFlags = pConnectReq->LocalConnectionFlags; status = 0; } while (false); if (pSendPacket != NULL) { HTC_FREE_CONTROL_TX(target,pSendPacket); } if (pRecvPacket != NULL) { HTC_FREE_CONTROL_RX(target,pRecvPacket); } AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("-HTCConnectService \n")); return status; } static void AddToEndpointDistList(struct htc_target *target, struct htc_endpoint_credit_dist *pEpDist) { struct htc_endpoint_credit_dist *pCurEntry,*pLastEntry; if (NULL == target->EpCreditDistributionListHead) { target->EpCreditDistributionListHead = pEpDist; pEpDist->pNext = NULL; pEpDist->pPrev = NULL; return; } /* queue to the end of the list, this does not have to be very * fast since this list is built at startup time */ pCurEntry = target->EpCreditDistributionListHead; while (pCurEntry) { pLastEntry = pCurEntry; pCurEntry = pCurEntry->pNext; } pLastEntry->pNext = pEpDist; pEpDist->pPrev = pLastEntry; pEpDist->pNext = NULL; } /* default credit init callback */ static void HTCDefaultCreditInit(void *Context, struct htc_endpoint_credit_dist *pEPList, int TotalCredits) { struct htc_endpoint_credit_dist *pCurEpDist; int totalEps = 0; int creditsPerEndpoint; pCurEpDist = pEPList; /* first run through the list and figure out how many endpoints we are dealing with */ while (pCurEpDist != NULL) { pCurEpDist = pCurEpDist->pNext; totalEps++; } /* even distribution */ creditsPerEndpoint = TotalCredits/totalEps; pCurEpDist = pEPList; /* run through the list and set minimum and normal credits and * provide the endpoint with some credits to start */ while (pCurEpDist != NULL) { if (creditsPerEndpoint < pCurEpDist->TxCreditsPerMaxMsg) { /* too many endpoints and not enough credits */ AR_DEBUG_ASSERT(false); break; } /* our minimum is set for at least 1 max message */ pCurEpDist->TxCreditsMin = pCurEpDist->TxCreditsPerMaxMsg; /* this value is ignored by our credit alg, since we do * not dynamically adjust credits, this is the policy of * the "default" credit distribution, something simple and easy */ pCurEpDist->TxCreditsNorm = 0xFFFF; /* give the endpoint minimum credits */ pCurEpDist->TxCredits = creditsPerEndpoint; pCurEpDist->TxCreditsAssigned = creditsPerEndpoint; pCurEpDist = pCurEpDist->pNext; } } /* default credit distribution callback, NOTE, this callback holds the TX lock */ void HTCDefaultCreditDist(void *Context, struct htc_endpoint_credit_dist *pEPDistList, HTC_CREDIT_DIST_REASON Reason) { struct htc_endpoint_credit_dist *pCurEpDist; if (Reason == HTC_CREDIT_DIST_SEND_COMPLETE) { pCurEpDist = pEPDistList; /* simple distribution */ while (pCurEpDist != NULL) { if (pCurEpDist->TxCreditsToDist > 0) { /* just give the endpoint back the credits */ pCurEpDist->TxCredits += pCurEpDist->TxCreditsToDist; pCurEpDist->TxCreditsToDist = 0; } pCurEpDist = pCurEpDist->pNext; } } /* note we do not need to handle the other reason codes as this is a very * simple distribution scheme, no need to seek for more credits or handle inactivity */ } void HTCSetCreditDistribution(HTC_HANDLE HTCHandle, void *pCreditDistContext, HTC_CREDIT_DIST_CALLBACK CreditDistFunc, HTC_CREDIT_INIT_CALLBACK CreditInitFunc, HTC_SERVICE_ID ServicePriorityOrder[], int ListLength) { struct htc_target *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle); int i; int ep; if (CreditInitFunc != NULL) { /* caller has supplied their own distribution functions */ target->InitCredits = CreditInitFunc; AR_DEBUG_ASSERT(CreditDistFunc != NULL); target->DistributeCredits = CreditDistFunc; target->pCredDistContext = pCreditDistContext; } else { /* caller wants HTC to do distribution */ /* if caller wants service to handle distributions then * it must set both of these to NULL! */ AR_DEBUG_ASSERT(CreditDistFunc == NULL); target->InitCredits = HTCDefaultCreditInit; target->DistributeCredits = HTCDefaultCreditDist; target->pCredDistContext = target; } /* always add HTC control endpoint first, we only expose the list after the * first one, this is added for TX queue checking */ AddToEndpointDistList(target, &target->EndPoint[ENDPOINT_0].CreditDist); /* build the list of credit distribution structures in priority order * supplied by the caller, these will follow endpoint 0 */ for (i = 0; i < ListLength; i++) { /* match services with endpoints and add the endpoints to the distribution list * in FIFO order */ for (ep = ENDPOINT_1; ep < ENDPOINT_MAX; ep++) { if (target->EndPoint[ep].ServiceID == ServicePriorityOrder[i]) { /* queue this one to the list */ AddToEndpointDistList(target, &target->EndPoint[ep].CreditDist); break; } } AR_DEBUG_ASSERT(ep < ENDPOINT_MAX); } }
gpl-2.0