repo_name
string
path
string
copies
string
size
string
content
string
license
string
hephaex/kernel
drivers/ata/pata_atp867x.c
398
14662
/* * pata_atp867x.c - ARTOP 867X 64bit 4-channel UDMA133 ATA controller driver * * (C) 2009 Google Inc. John(Jung-Ik) Lee <jilee@google.com> * * Per Atp867 data sheet rev 1.2, Acard. * Based in part on early ide code from * 2003-2004 by Eric Uhrhane, Google, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * * TODO: * 1. RAID features [comparison, XOR, striping, mirroring, etc.] */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/gfp.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #define DRV_NAME "pata_atp867x" #define DRV_VERSION "0.7.5" /* * IO Registers * Note that all runtime hot priv ports are cached in ap private_data */ enum { ATP867X_IO_CHANNEL_OFFSET = 0x10, /* * IO Register Bitfields */ ATP867X_IO_PIOSPD_ACTIVE_SHIFT = 4, ATP867X_IO_PIOSPD_RECOVER_SHIFT = 0, ATP867X_IO_DMAMODE_MSTR_SHIFT = 0, ATP867X_IO_DMAMODE_MSTR_MASK = 0x07, ATP867X_IO_DMAMODE_SLAVE_SHIFT = 4, ATP867X_IO_DMAMODE_SLAVE_MASK = 0x70, ATP867X_IO_DMAMODE_UDMA_6 = 0x07, ATP867X_IO_DMAMODE_UDMA_5 = 0x06, ATP867X_IO_DMAMODE_UDMA_4 = 0x05, ATP867X_IO_DMAMODE_UDMA_3 = 0x04, ATP867X_IO_DMAMODE_UDMA_2 = 0x03, ATP867X_IO_DMAMODE_UDMA_1 = 0x02, ATP867X_IO_DMAMODE_UDMA_0 = 0x01, ATP867X_IO_DMAMODE_DISABLE = 0x00, ATP867X_IO_SYS_INFO_66MHZ = 0x04, ATP867X_IO_SYS_INFO_SLOW_UDMA5 = 0x02, ATP867X_IO_SYS_MASK_RESERVED = (~0xf1), ATP867X_IO_PORTSPD_VAL = 0x1143, ATP867X_PREREAD_VAL = 0x0200, ATP867X_NUM_PORTS = 4, ATP867X_BAR_IOBASE = 0, ATP867X_BAR_ROMBASE = 6, }; #define ATP867X_IOBASE(ap) ((ap)->host->iomap[0]) #define ATP867X_SYS_INFO(ap) (0x3F + ATP867X_IOBASE(ap)) #define ATP867X_IO_PORTBASE(ap, port) (0x00 + ATP867X_IOBASE(ap) + \ (port) * ATP867X_IO_CHANNEL_OFFSET) #define ATP867X_IO_DMABASE(ap, port) (0x40 + \ ATP867X_IO_PORTBASE((ap), (port))) #define ATP867X_IO_STATUS(ap, port) (0x07 + \ ATP867X_IO_PORTBASE((ap), (port))) #define ATP867X_IO_ALTSTATUS(ap, port) (0x0E + \ ATP867X_IO_PORTBASE((ap), (port))) /* * hot priv ports */ #define ATP867X_IO_MSTRPIOSPD(ap, port) (0x08 + \ ATP867X_IO_DMABASE((ap), (port))) #define ATP867X_IO_SLAVPIOSPD(ap, port) (0x09 + \ ATP867X_IO_DMABASE((ap), (port))) #define ATP867X_IO_8BPIOSPD(ap, port) (0x0A + \ ATP867X_IO_DMABASE((ap), (port))) #define ATP867X_IO_DMAMODE(ap, port) (0x0B + \ ATP867X_IO_DMABASE((ap), (port))) #define ATP867X_IO_PORTSPD(ap, port) (0x4A + \ ATP867X_IO_PORTBASE((ap), (port))) #define ATP867X_IO_PREREAD(ap, port) (0x4C + \ ATP867X_IO_PORTBASE((ap), (port))) struct atp867x_priv { void __iomem *dma_mode; void __iomem *mstr_piospd; void __iomem *slave_piospd; void __iomem *eightb_piospd; int pci66mhz; }; static void atp867x_set_dmamode(struct ata_port *ap, struct ata_device *adev) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); struct atp867x_priv *dp = ap->private_data; u8 speed = adev->dma_mode; u8 b; u8 mode = speed - XFER_UDMA_0 + 1; /* * Doc 6.6.9: decrease the udma mode value by 1 for safer UDMA speed * on 66MHz bus * rev-A: UDMA_1~4 (5, 6 no change) * rev-B: all UDMA modes * UDMA_0 stays not to disable UDMA */ if (dp->pci66mhz && mode > ATP867X_IO_DMAMODE_UDMA_0 && (pdev->device == PCI_DEVICE_ID_ARTOP_ATP867B || mode < ATP867X_IO_DMAMODE_UDMA_5)) mode--; b = ioread8(dp->dma_mode); if (adev->devno & 1) { b = (b & ~ATP867X_IO_DMAMODE_SLAVE_MASK) | (mode << ATP867X_IO_DMAMODE_SLAVE_SHIFT); } else { b = (b & ~ATP867X_IO_DMAMODE_MSTR_MASK) | (mode << ATP867X_IO_DMAMODE_MSTR_SHIFT); } iowrite8(b, dp->dma_mode); } static int atp867x_get_active_clocks_shifted(struct ata_port *ap, unsigned int clk) { struct atp867x_priv *dp = ap->private_data; unsigned char clocks = clk; /* * Doc 6.6.9: increase the clock value by 1 for safer PIO speed * on 66MHz bus */ if (dp->pci66mhz) clocks++; switch (clocks) { case 0: clocks = 1; break; case 1 ... 6: break; default: printk(KERN_WARNING "ATP867X: active %dclk is invalid. " "Using 12clk.\n", clk); case 9 ... 12: clocks = 7; /* 12 clk */ break; case 7: case 8: /* default 8 clk */ clocks = 0; goto active_clock_shift_done; } active_clock_shift_done: return clocks << ATP867X_IO_PIOSPD_ACTIVE_SHIFT; } static int atp867x_get_recover_clocks_shifted(unsigned int clk) { unsigned char clocks = clk; switch (clocks) { case 0: clocks = 1; break; case 1 ... 11: break; case 13: case 14: --clocks; /* by the spec */ break; case 15: break; default: printk(KERN_WARNING "ATP867X: recover %dclk is invalid. " "Using default 12clk.\n", clk); case 12: /* default 12 clk */ clocks = 0; break; } return clocks << ATP867X_IO_PIOSPD_RECOVER_SHIFT; } static void atp867x_set_piomode(struct ata_port *ap, struct ata_device *adev) { struct ata_device *peer = ata_dev_pair(adev); struct atp867x_priv *dp = ap->private_data; u8 speed = adev->pio_mode; struct ata_timing t, p; int T, UT; u8 b; T = 1000000000 / 33333; UT = T / 4; ata_timing_compute(adev, speed, &t, T, UT); if (peer && peer->pio_mode) { ata_timing_compute(peer, peer->pio_mode, &p, T, UT); ata_timing_merge(&p, &t, &t, ATA_TIMING_8BIT); } b = ioread8(dp->dma_mode); if (adev->devno & 1) b = (b & ~ATP867X_IO_DMAMODE_SLAVE_MASK); else b = (b & ~ATP867X_IO_DMAMODE_MSTR_MASK); iowrite8(b, dp->dma_mode); b = atp867x_get_active_clocks_shifted(ap, t.active) | atp867x_get_recover_clocks_shifted(t.recover); if (adev->devno & 1) iowrite8(b, dp->slave_piospd); else iowrite8(b, dp->mstr_piospd); b = atp867x_get_active_clocks_shifted(ap, t.act8b) | atp867x_get_recover_clocks_shifted(t.rec8b); iowrite8(b, dp->eightb_piospd); } static int atp867x_cable_override(struct pci_dev *pdev) { if (pdev->subsystem_vendor == PCI_VENDOR_ID_ARTOP && (pdev->subsystem_device == PCI_DEVICE_ID_ARTOP_ATP867A || pdev->subsystem_device == PCI_DEVICE_ID_ARTOP_ATP867B)) { return 1; } return 0; } static int atp867x_cable_detect(struct ata_port *ap) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); if (atp867x_cable_override(pdev)) return ATA_CBL_PATA40_SHORT; return ATA_CBL_PATA_UNK; } static struct scsi_host_template atp867x_sht = { ATA_BMDMA_SHT(DRV_NAME), }; static struct ata_port_operations atp867x_ops = { .inherits = &ata_bmdma_port_ops, .cable_detect = atp867x_cable_detect, .set_piomode = atp867x_set_piomode, .set_dmamode = atp867x_set_dmamode, }; #ifdef ATP867X_DEBUG static void atp867x_check_res(struct pci_dev *pdev) { int i; unsigned long start, len; /* Check the PCI resources for this channel are enabled */ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { start = pci_resource_start(pdev, i); len = pci_resource_len(pdev, i); printk(KERN_DEBUG "ATP867X: resource start:len=%lx:%lx\n", start, len); } } static void atp867x_check_ports(struct ata_port *ap, int port) { struct ata_ioports *ioaddr = &ap->ioaddr; struct atp867x_priv *dp = ap->private_data; printk(KERN_DEBUG "ATP867X: port[%d] addresses\n" " cmd_addr =0x%llx, 0x%llx\n" " ctl_addr =0x%llx, 0x%llx\n" " bmdma_addr =0x%llx, 0x%llx\n" " data_addr =0x%llx\n" " error_addr =0x%llx\n" " feature_addr =0x%llx\n" " nsect_addr =0x%llx\n" " lbal_addr =0x%llx\n" " lbam_addr =0x%llx\n" " lbah_addr =0x%llx\n" " device_addr =0x%llx\n" " status_addr =0x%llx\n" " command_addr =0x%llx\n" " dp->dma_mode =0x%llx\n" " dp->mstr_piospd =0x%llx\n" " dp->slave_piospd =0x%llx\n" " dp->eightb_piospd =0x%llx\n" " dp->pci66mhz =0x%lx\n", port, (unsigned long long)ioaddr->cmd_addr, (unsigned long long)ATP867X_IO_PORTBASE(ap, port), (unsigned long long)ioaddr->ctl_addr, (unsigned long long)ATP867X_IO_ALTSTATUS(ap, port), (unsigned long long)ioaddr->bmdma_addr, (unsigned long long)ATP867X_IO_DMABASE(ap, port), (unsigned long long)ioaddr->data_addr, (unsigned long long)ioaddr->error_addr, (unsigned long long)ioaddr->feature_addr, (unsigned long long)ioaddr->nsect_addr, (unsigned long long)ioaddr->lbal_addr, (unsigned long long)ioaddr->lbam_addr, (unsigned long long)ioaddr->lbah_addr, (unsigned long long)ioaddr->device_addr, (unsigned long long)ioaddr->status_addr, (unsigned long long)ioaddr->command_addr, (unsigned long long)dp->dma_mode, (unsigned long long)dp->mstr_piospd, (unsigned long long)dp->slave_piospd, (unsigned long long)dp->eightb_piospd, (unsigned long)dp->pci66mhz); } #endif static int atp867x_set_priv(struct ata_port *ap) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); struct atp867x_priv *dp; int port = ap->port_no; dp = ap->private_data = devm_kzalloc(&pdev->dev, sizeof(*dp), GFP_KERNEL); if (dp == NULL) return -ENOMEM; dp->dma_mode = ATP867X_IO_DMAMODE(ap, port); dp->mstr_piospd = ATP867X_IO_MSTRPIOSPD(ap, port); dp->slave_piospd = ATP867X_IO_SLAVPIOSPD(ap, port); dp->eightb_piospd = ATP867X_IO_8BPIOSPD(ap, port); dp->pci66mhz = ioread8(ATP867X_SYS_INFO(ap)) & ATP867X_IO_SYS_INFO_66MHZ; return 0; } static void atp867x_fixup(struct ata_host *host) { struct pci_dev *pdev = to_pci_dev(host->dev); struct ata_port *ap = host->ports[0]; int i; u8 v; /* * Broken BIOS might not set latency high enough */ pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &v); if (v < 0x80) { v = 0x80; pci_write_config_byte(pdev, PCI_LATENCY_TIMER, v); printk(KERN_DEBUG "ATP867X: set latency timer of device %s" " to %d\n", pci_name(pdev), v); } /* * init 8bit io ports speed(0aaarrrr) to 43h and * init udma modes of master/slave to 0/0(11h) */ for (i = 0; i < ATP867X_NUM_PORTS; i++) iowrite16(ATP867X_IO_PORTSPD_VAL, ATP867X_IO_PORTSPD(ap, i)); /* * init PreREAD counts */ for (i = 0; i < ATP867X_NUM_PORTS; i++) iowrite16(ATP867X_PREREAD_VAL, ATP867X_IO_PREREAD(ap, i)); v = ioread8(ATP867X_IOBASE(ap) + 0x28); v &= 0xcf; /* Enable INTA#: bit4=0 means enable */ v |= 0xc0; /* Enable PCI burst, MRM & not immediate interrupts */ iowrite8(v, ATP867X_IOBASE(ap) + 0x28); /* * Turn off the over clocked udma5 mode, only for Rev-B */ v = ioread8(ATP867X_SYS_INFO(ap)); v &= ATP867X_IO_SYS_MASK_RESERVED; if (pdev->device == PCI_DEVICE_ID_ARTOP_ATP867B) v |= ATP867X_IO_SYS_INFO_SLOW_UDMA5; iowrite8(v, ATP867X_SYS_INFO(ap)); } static int atp867x_ata_pci_sff_init_host(struct ata_host *host) { struct device *gdev = host->dev; struct pci_dev *pdev = to_pci_dev(gdev); unsigned int mask = 0; int i, rc; /* * do not map rombase */ rc = pcim_iomap_regions(pdev, 1 << ATP867X_BAR_IOBASE, DRV_NAME); if (rc == -EBUSY) pcim_pin_device(pdev); if (rc) return rc; host->iomap = pcim_iomap_table(pdev); #ifdef ATP867X_DEBUG atp867x_check_res(pdev); for (i = 0; i < PCI_ROM_RESOURCE; i++) printk(KERN_DEBUG "ATP867X: iomap[%d]=0x%llx\n", i, (unsigned long long)(host->iomap[i])); #endif /* * request, iomap BARs and init port addresses accordingly */ for (i = 0; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; struct ata_ioports *ioaddr = &ap->ioaddr; ioaddr->cmd_addr = ATP867X_IO_PORTBASE(ap, i); ioaddr->ctl_addr = ioaddr->altstatus_addr = ATP867X_IO_ALTSTATUS(ap, i); ioaddr->bmdma_addr = ATP867X_IO_DMABASE(ap, i); ata_sff_std_ports(ioaddr); rc = atp867x_set_priv(ap); if (rc) return rc; #ifdef ATP867X_DEBUG atp867x_check_ports(ap, i); #endif ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", (unsigned long)ioaddr->cmd_addr, (unsigned long)ioaddr->ctl_addr); ata_port_desc(ap, "bmdma 0x%lx", (unsigned long)ioaddr->bmdma_addr); mask |= 1 << i; } if (!mask) { dev_err(gdev, "no available native port\n"); return -ENODEV; } atp867x_fixup(host); rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); if (rc) return rc; rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); return rc; } static int atp867x_init_one(struct pci_dev *pdev, const struct pci_device_id *id) { static const struct ata_port_info info_867x = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, .port_ops = &atp867x_ops, }; struct ata_host *host; const struct ata_port_info *ppi[] = { &info_867x, NULL }; int rc; ata_print_version_once(&pdev->dev, DRV_VERSION); rc = pcim_enable_device(pdev); if (rc) return rc; printk(KERN_INFO "ATP867X: ATP867 ATA UDMA133 controller (rev %02X)", pdev->device); host = ata_host_alloc_pinfo(&pdev->dev, ppi, ATP867X_NUM_PORTS); if (!host) { dev_err(&pdev->dev, "failed to allocate ATA host\n"); rc = -ENOMEM; goto err_out; } rc = atp867x_ata_pci_sff_init_host(host); if (rc) { dev_err(&pdev->dev, "failed to init host\n"); goto err_out; } pci_set_master(pdev); rc = ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, IRQF_SHARED, &atp867x_sht); if (rc) dev_err(&pdev->dev, "failed to activate host\n"); err_out: return rc; } #ifdef CONFIG_PM static int atp867x_reinit_one(struct pci_dev *pdev) { struct ata_host *host = pci_get_drvdata(pdev); int rc; rc = ata_pci_device_do_resume(pdev); if (rc) return rc; atp867x_fixup(host); ata_host_resume(host); return 0; } #endif static struct pci_device_id atp867x_pci_tbl[] = { { PCI_VDEVICE(ARTOP, PCI_DEVICE_ID_ARTOP_ATP867A), 0 }, { PCI_VDEVICE(ARTOP, PCI_DEVICE_ID_ARTOP_ATP867B), 0 }, { }, }; static struct pci_driver atp867x_driver = { .name = DRV_NAME, .id_table = atp867x_pci_tbl, .probe = atp867x_init_one, .remove = ata_pci_remove_one, #ifdef CONFIG_PM .suspend = ata_pci_device_suspend, .resume = atp867x_reinit_one, #endif }; module_pci_driver(atp867x_driver); MODULE_AUTHOR("John(Jung-Ik) Lee, Google Inc."); MODULE_DESCRIPTION("low level driver for Artop/Acard 867x ATA controller"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, atp867x_pci_tbl); MODULE_VERSION(DRV_VERSION);
gpl-2.0
fritsch/linux
drivers/ata/pata_sl82c105.c
398
9488
/* * pata_sl82c105.c - SL82C105 PATA for new ATA layer * (C) 2005 Red Hat Inc * (C) 2011 Bartlomiej Zolnierkiewicz * * Based in part on linux/drivers/ide/pci/sl82c105.c * SL82C105/Winbond 553 IDE driver * * and in part on the documentation and errata sheet * * * Note: The controller like many controllers has shared timings for * PIO and DMA. We thus flip to the DMA timings in dma_start and flip back * in the dma_stop function. Thus we actually don't need a set_dmamode * method as the PIO method is always called and will set the right PIO * timing parameters. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #define DRV_NAME "pata_sl82c105" #define DRV_VERSION "0.3.3" enum { /* * SL82C105 PCI config register 0x40 bits. */ CTRL_IDE_IRQB = (1 << 30), CTRL_IDE_IRQA = (1 << 28), CTRL_LEGIRQ = (1 << 11), CTRL_P1F16 = (1 << 5), CTRL_P1EN = (1 << 4), CTRL_P0F16 = (1 << 1), CTRL_P0EN = (1 << 0) }; /** * sl82c105_pre_reset - probe begin * @link: ATA link * @deadline: deadline jiffies for the operation * * Set up cable type and use generic probe init */ static int sl82c105_pre_reset(struct ata_link *link, unsigned long deadline) { static const struct pci_bits sl82c105_enable_bits[] = { { 0x40, 1, 0x01, 0x01 }, { 0x40, 1, 0x10, 0x10 } }; struct ata_port *ap = link->ap; struct pci_dev *pdev = to_pci_dev(ap->host->dev); if (ap->port_no && !pci_test_config_bits(pdev, &sl82c105_enable_bits[ap->port_no])) return -ENOENT; return ata_sff_prereset(link, deadline); } /** * sl82c105_configure_piomode - set chip PIO timing * @ap: ATA interface * @adev: ATA device * @pio: PIO mode * * Called to do the PIO mode setup. Our timing registers are shared * so a configure_dmamode call will undo any work we do here and vice * versa */ static void sl82c105_configure_piomode(struct ata_port *ap, struct ata_device *adev, int pio) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); static u16 pio_timing[5] = { 0x50D, 0x407, 0x304, 0x242, 0x240 }; u16 dummy; int timing = 0x44 + (8 * ap->port_no) + (4 * adev->devno); pci_write_config_word(pdev, timing, pio_timing[pio]); /* Can we lose this oddity of the old driver */ pci_read_config_word(pdev, timing, &dummy); } /** * sl82c105_set_piomode - set initial PIO mode data * @ap: ATA interface * @adev: ATA device * * Called to do the PIO mode setup. Our timing registers are shared * but we want to set the PIO timing by default. */ static void sl82c105_set_piomode(struct ata_port *ap, struct ata_device *adev) { sl82c105_configure_piomode(ap, adev, adev->pio_mode - XFER_PIO_0); } /** * sl82c105_configure_dmamode - set DMA mode in chip * @ap: ATA interface * @adev: ATA device * * Load DMA cycle times into the chip ready for a DMA transfer * to occur. */ static void sl82c105_configure_dmamode(struct ata_port *ap, struct ata_device *adev) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); static u16 dma_timing[3] = { 0x707, 0x201, 0x200 }; u16 dummy; int timing = 0x44 + (8 * ap->port_no) + (4 * adev->devno); int dma = adev->dma_mode - XFER_MW_DMA_0; pci_write_config_word(pdev, timing, dma_timing[dma]); /* Can we lose this oddity of the old driver */ pci_read_config_word(pdev, timing, &dummy); } /** * sl82c105_reset_engine - Reset the DMA engine * @ap: ATA interface * * The sl82c105 has some serious problems with the DMA engine * when transfers don't run as expected or ATAPI is used. The * recommended fix is to reset the engine each use using a chip * test register. */ static void sl82c105_reset_engine(struct ata_port *ap) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); u16 val; pci_read_config_word(pdev, 0x7E, &val); pci_write_config_word(pdev, 0x7E, val | 4); pci_write_config_word(pdev, 0x7E, val & ~4); } /** * sl82c105_bmdma_start - DMA engine begin * @qc: ATA command * * Reset the DMA engine each use as recommended by the errata * document. * * FIXME: if we switch clock at BMDMA start/end we might get better * PIO performance on DMA capable devices. */ static void sl82c105_bmdma_start(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; udelay(100); sl82c105_reset_engine(ap); udelay(100); /* Set the clocks for DMA */ sl82c105_configure_dmamode(ap, qc->dev); /* Activate DMA */ ata_bmdma_start(qc); } /** * sl82c105_bmdma_end - DMA engine stop * @qc: ATA command * * Reset the DMA engine each use as recommended by the errata * document. * * This function is also called to turn off DMA when a timeout occurs * during DMA operation. In both cases we need to reset the engine, * so no actual eng_timeout handler is required. * * We assume bmdma_stop is always called if bmdma_start as called. If * not then we may need to wrap qc_issue. */ static void sl82c105_bmdma_stop(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; ata_bmdma_stop(qc); sl82c105_reset_engine(ap); udelay(100); /* This will redo the initial setup of the DMA device to matching PIO timings */ sl82c105_set_piomode(ap, qc->dev); } /** * sl82c105_qc_defer - implement serialization * @qc: command * * We must issue one command per host not per channel because * of the reset bug. * * Q: is the scsi host lock sufficient ? */ static int sl82c105_qc_defer(struct ata_queued_cmd *qc) { struct ata_host *host = qc->ap->host; struct ata_port *alt = host->ports[1 ^ qc->ap->port_no]; int rc; /* First apply the usual rules */ rc = ata_std_qc_defer(qc); if (rc != 0) return rc; /* Now apply serialization rules. Only allow a command if the other channel state machine is idle */ if (alt && alt->qc_active) return ATA_DEFER_PORT; return 0; } static bool sl82c105_sff_irq_check(struct ata_port *ap) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); u32 val, mask = ap->port_no ? CTRL_IDE_IRQB : CTRL_IDE_IRQA; pci_read_config_dword(pdev, 0x40, &val); return val & mask; } static struct scsi_host_template sl82c105_sht = { ATA_BMDMA_SHT(DRV_NAME), }; static struct ata_port_operations sl82c105_port_ops = { .inherits = &ata_bmdma_port_ops, .qc_defer = sl82c105_qc_defer, .bmdma_start = sl82c105_bmdma_start, .bmdma_stop = sl82c105_bmdma_stop, .cable_detect = ata_cable_40wire, .set_piomode = sl82c105_set_piomode, .prereset = sl82c105_pre_reset, .sff_irq_check = sl82c105_sff_irq_check, }; /** * sl82c105_bridge_revision - find bridge version * @pdev: PCI device for the ATA function * * Locates the PCI bridge associated with the ATA function and * providing it is a Winbond 553 reports the revision. If it cannot * find a revision or the right device it returns -1 */ static int sl82c105_bridge_revision(struct pci_dev *pdev) { struct pci_dev *bridge; /* * The bridge should be part of the same device, but function 0. */ bridge = pci_get_slot(pdev->bus, PCI_DEVFN(PCI_SLOT(pdev->devfn), 0)); if (!bridge) return -1; /* * Make sure it is a Winbond 553 and is an ISA bridge. */ if (bridge->vendor != PCI_VENDOR_ID_WINBOND || bridge->device != PCI_DEVICE_ID_WINBOND_83C553 || bridge->class >> 8 != PCI_CLASS_BRIDGE_ISA) { pci_dev_put(bridge); return -1; } /* * We need to find function 0's revision, not function 1 */ pci_dev_put(bridge); return bridge->revision; } static void sl82c105_fixup(struct pci_dev *pdev) { u32 val; pci_read_config_dword(pdev, 0x40, &val); val |= CTRL_P0EN | CTRL_P0F16 | CTRL_P1F16; pci_write_config_dword(pdev, 0x40, val); } static int sl82c105_init_one(struct pci_dev *dev, const struct pci_device_id *id) { static const struct ata_port_info info_dma = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .port_ops = &sl82c105_port_ops }; static const struct ata_port_info info_early = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .port_ops = &sl82c105_port_ops }; /* for now use only the first port */ const struct ata_port_info *ppi[] = { &info_early, NULL }; int rev; int rc; rc = pcim_enable_device(dev); if (rc) return rc; rev = sl82c105_bridge_revision(dev); if (rev == -1) dev_warn(&dev->dev, "pata_sl82c105: Unable to find bridge, disabling DMA\n"); else if (rev <= 5) dev_warn(&dev->dev, "pata_sl82c105: Early bridge revision, no DMA available\n"); else ppi[0] = &info_dma; sl82c105_fixup(dev); return ata_pci_bmdma_init_one(dev, ppi, &sl82c105_sht, NULL, 0); } #ifdef CONFIG_PM static int sl82c105_reinit_one(struct pci_dev *pdev) { struct ata_host *host = pci_get_drvdata(pdev); int rc; rc = ata_pci_device_do_resume(pdev); if (rc) return rc; sl82c105_fixup(pdev); ata_host_resume(host); return 0; } #endif static const struct pci_device_id sl82c105[] = { { PCI_VDEVICE(WINBOND, PCI_DEVICE_ID_WINBOND_82C105), }, { }, }; static struct pci_driver sl82c105_pci_driver = { .name = DRV_NAME, .id_table = sl82c105, .probe = sl82c105_init_one, .remove = ata_pci_remove_one, #ifdef CONFIG_PM .suspend = ata_pci_device_suspend, .resume = sl82c105_reinit_one, #endif }; module_pci_driver(sl82c105_pci_driver); MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("low-level driver for Sl82c105"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, sl82c105); MODULE_VERSION(DRV_VERSION);
gpl-2.0
radiohap/prd
drivers/platform/chrome/cros_ec_lpc.c
398
9604
/* * cros_ec_lpc - LPC access to the Chrome OS Embedded Controller * * Copyright (C) 2012-2015 Google, Inc * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * This driver uses the Chrome OS EC byte-level message-based protocol for * communicating the keyboard state (which keys are pressed) from a keyboard EC * to the AP over some bus (such as i2c, lpc, spi). The EC does debouncing, * but everything else (including deghosting) is done here. The main * motivation for this is to keep the EC firmware as simple as possible, since * it cannot be easily upgraded and EC flash/IRAM space is relatively * expensive. */ #include <linux/dmi.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/mfd/cros_ec.h> #include <linux/mfd/cros_ec_commands.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/printk.h> #define DRV_NAME "cros_ec_lpc" static int ec_response_timed_out(void) { unsigned long one_second = jiffies + HZ; usleep_range(200, 300); do { if (!(inb(EC_LPC_ADDR_HOST_CMD) & EC_LPC_STATUS_BUSY_MASK)) return 0; usleep_range(100, 200); } while (time_before(jiffies, one_second)); return 1; } static int cros_ec_pkt_xfer_lpc(struct cros_ec_device *ec, struct cros_ec_command *msg) { struct ec_host_request *request; struct ec_host_response response; u8 sum = 0; int i; int ret = 0; u8 *dout; ret = cros_ec_prepare_tx(ec, msg); /* Write buffer */ for (i = 0; i < ret; i++) outb(ec->dout[i], EC_LPC_ADDR_HOST_PACKET + i); request = (struct ec_host_request *)ec->dout; /* Here we go */ outb(EC_COMMAND_PROTOCOL_3, EC_LPC_ADDR_HOST_CMD); if (ec_response_timed_out()) { dev_warn(ec->dev, "EC responsed timed out\n"); ret = -EIO; goto done; } /* Check result */ msg->result = inb(EC_LPC_ADDR_HOST_DATA); ret = cros_ec_check_result(ec, msg); if (ret) goto done; /* Read back response */ dout = (u8 *)&response; for (i = 0; i < sizeof(response); i++) { dout[i] = inb(EC_LPC_ADDR_HOST_PACKET + i); sum += dout[i]; } msg->result = response.result; if (response.data_len > msg->insize) { dev_err(ec->dev, "packet too long (%d bytes, expected %d)", response.data_len, msg->insize); ret = -EMSGSIZE; goto done; } /* Read response and process checksum */ for (i = 0; i < response.data_len; i++) { msg->data[i] = inb(EC_LPC_ADDR_HOST_PACKET + sizeof(response) + i); sum += msg->data[i]; } if (sum) { dev_err(ec->dev, "bad packet checksum %02x\n", response.checksum); ret = -EBADMSG; goto done; } /* Return actual amount of data received */ ret = response.data_len; done: return ret; } static int cros_ec_cmd_xfer_lpc(struct cros_ec_device *ec, struct cros_ec_command *msg) { struct ec_lpc_host_args args; int csum; int i; int ret = 0; if (msg->outsize > EC_PROTO2_MAX_PARAM_SIZE || msg->insize > EC_PROTO2_MAX_PARAM_SIZE) { dev_err(ec->dev, "invalid buffer sizes (out %d, in %d)\n", msg->outsize, msg->insize); return -EINVAL; } /* Now actually send the command to the EC and get the result */ args.flags = EC_HOST_ARGS_FLAG_FROM_HOST; args.command_version = msg->version; args.data_size = msg->outsize; /* Initialize checksum */ csum = msg->command + args.flags + args.command_version + args.data_size; /* Copy data and update checksum */ for (i = 0; i < msg->outsize; i++) { outb(msg->data[i], EC_LPC_ADDR_HOST_PARAM + i); csum += msg->data[i]; } /* Finalize checksum and write args */ args.checksum = csum & 0xFF; outb(args.flags, EC_LPC_ADDR_HOST_ARGS); outb(args.command_version, EC_LPC_ADDR_HOST_ARGS + 1); outb(args.data_size, EC_LPC_ADDR_HOST_ARGS + 2); outb(args.checksum, EC_LPC_ADDR_HOST_ARGS + 3); /* Here we go */ outb(msg->command, EC_LPC_ADDR_HOST_CMD); if (ec_response_timed_out()) { dev_warn(ec->dev, "EC responsed timed out\n"); ret = -EIO; goto done; } /* Check result */ msg->result = inb(EC_LPC_ADDR_HOST_DATA); switch (msg->result) { case EC_RES_SUCCESS: break; case EC_RES_IN_PROGRESS: ret = -EAGAIN; dev_dbg(ec->dev, "command 0x%02x in progress\n", msg->command); goto done; default: dev_dbg(ec->dev, "command 0x%02x returned %d\n", msg->command, msg->result); } /* Read back args */ args.flags = inb(EC_LPC_ADDR_HOST_ARGS); args.command_version = inb(EC_LPC_ADDR_HOST_ARGS + 1); args.data_size = inb(EC_LPC_ADDR_HOST_ARGS + 2); args.checksum = inb(EC_LPC_ADDR_HOST_ARGS + 3); if (args.data_size > msg->insize) { dev_err(ec->dev, "packet too long (%d bytes, expected %d)", args.data_size, msg->insize); ret = -ENOSPC; goto done; } /* Start calculating response checksum */ csum = msg->command + args.flags + args.command_version + args.data_size; /* Read response and update checksum */ for (i = 0; i < args.data_size; i++) { msg->data[i] = inb(EC_LPC_ADDR_HOST_PARAM + i); csum += msg->data[i]; } /* Verify checksum */ if (args.checksum != (csum & 0xFF)) { dev_err(ec->dev, "bad packet checksum, expected %02x, got %02x\n", args.checksum, csum & 0xFF); ret = -EBADMSG; goto done; } /* Return actual amount of data received */ ret = args.data_size; done: return ret; } /* Returns num bytes read, or negative on error. Doesn't need locking. */ static int cros_ec_lpc_readmem(struct cros_ec_device *ec, unsigned int offset, unsigned int bytes, void *dest) { int i = offset; char *s = dest; int cnt = 0; if (offset >= EC_MEMMAP_SIZE - bytes) return -EINVAL; /* fixed length */ if (bytes) { for (; cnt < bytes; i++, s++, cnt++) *s = inb(EC_LPC_ADDR_MEMMAP + i); return cnt; } /* string */ for (; i < EC_MEMMAP_SIZE; i++, s++) { *s = inb(EC_LPC_ADDR_MEMMAP + i); cnt++; if (!*s) break; } return cnt; } static int cros_ec_lpc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct cros_ec_device *ec_dev; int ret; if (!devm_request_region(dev, EC_LPC_ADDR_MEMMAP, EC_MEMMAP_SIZE, dev_name(dev))) { dev_err(dev, "couldn't reserve memmap region\n"); return -EBUSY; } if ((inb(EC_LPC_ADDR_MEMMAP + EC_MEMMAP_ID) != 'E') || (inb(EC_LPC_ADDR_MEMMAP + EC_MEMMAP_ID + 1) != 'C')) { dev_err(dev, "EC ID not detected\n"); return -ENODEV; } if (!devm_request_region(dev, EC_HOST_CMD_REGION0, EC_HOST_CMD_REGION_SIZE, dev_name(dev))) { dev_err(dev, "couldn't reserve region0\n"); return -EBUSY; } if (!devm_request_region(dev, EC_HOST_CMD_REGION1, EC_HOST_CMD_REGION_SIZE, dev_name(dev))) { dev_err(dev, "couldn't reserve region1\n"); return -EBUSY; } ec_dev = devm_kzalloc(dev, sizeof(*ec_dev), GFP_KERNEL); if (!ec_dev) return -ENOMEM; platform_set_drvdata(pdev, ec_dev); ec_dev->dev = dev; ec_dev->phys_name = dev_name(dev); ec_dev->cmd_xfer = cros_ec_cmd_xfer_lpc; ec_dev->pkt_xfer = cros_ec_pkt_xfer_lpc; ec_dev->cmd_readmem = cros_ec_lpc_readmem; ec_dev->din_size = sizeof(struct ec_host_response) + sizeof(struct ec_response_get_protocol_info); ec_dev->dout_size = sizeof(struct ec_host_request); ret = cros_ec_register(ec_dev); if (ret) { dev_err(dev, "couldn't register ec_dev (%d)\n", ret); return ret; } return 0; } static int cros_ec_lpc_remove(struct platform_device *pdev) { struct cros_ec_device *ec_dev; ec_dev = platform_get_drvdata(pdev); cros_ec_remove(ec_dev); return 0; } static struct dmi_system_id cros_ec_lpc_dmi_table[] __initdata = { { /* * Today all Chromebooks/boxes ship with Google_* as version and * coreboot as bios vendor. No other systems with this * combination are known to date. */ .matches = { DMI_MATCH(DMI_BIOS_VENDOR, "coreboot"), DMI_MATCH(DMI_BIOS_VERSION, "Google_"), }, }, { /* x86-link, the Chromebook Pixel. */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), DMI_MATCH(DMI_PRODUCT_NAME, "Link"), }, }, { /* x86-peppy, the Acer C720 Chromebook. */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "Peppy"), }, }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(dmi, cros_ec_lpc_dmi_table); static struct platform_driver cros_ec_lpc_driver = { .driver = { .name = DRV_NAME, }, .probe = cros_ec_lpc_probe, .remove = cros_ec_lpc_remove, }; static struct platform_device cros_ec_lpc_device = { .name = DRV_NAME }; static int __init cros_ec_lpc_init(void) { int ret; if (!dmi_check_system(cros_ec_lpc_dmi_table)) { pr_err(DRV_NAME ": unsupported system.\n"); return -ENODEV; } /* Register the driver */ ret = platform_driver_register(&cros_ec_lpc_driver); if (ret) { pr_err(DRV_NAME ": can't register driver: %d\n", ret); return ret; } /* Register the device, and it'll get hooked up automatically */ ret = platform_device_register(&cros_ec_lpc_device); if (ret) { pr_err(DRV_NAME ": can't register device: %d\n", ret); platform_driver_unregister(&cros_ec_lpc_driver); return ret; } return 0; } static void __exit cros_ec_lpc_exit(void) { platform_device_unregister(&cros_ec_lpc_device); platform_driver_unregister(&cros_ec_lpc_driver); } module_init(cros_ec_lpc_init); module_exit(cros_ec_lpc_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("ChromeOS EC LPC driver");
gpl-2.0
Asure/Dropad-kernel-2.6.35.7
arch/mips/math-emu/dp_modf.c
1678
2064
/* IEEE754 floating point arithmetic * double precision: common utilities */ /* * MIPS floating point support * Copyright (C) 1994-2000 Algorithmics Ltd. * http://www.algor.co.uk * * ######################################################################## * * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. * * ######################################################################## */ #include "ieee754dp.h" /* modf function is always exact for a finite number */ ieee754dp ieee754dp_modf(ieee754dp x, ieee754dp * ip) { COMPXDP; CLEARCX; EXPLODEXDP; switch (xc) { case IEEE754_CLASS_SNAN: case IEEE754_CLASS_QNAN: case IEEE754_CLASS_INF: case IEEE754_CLASS_ZERO: *ip = x; return x; case IEEE754_CLASS_DNORM: /* far to small */ *ip = ieee754dp_zero(xs); return x; case IEEE754_CLASS_NORM: break; } if (xe < 0) { *ip = ieee754dp_zero(xs); return x; } if (xe >= DP_MBITS) { *ip = x; return ieee754dp_zero(xs); } /* generate ipart mantissa by clearing bottom bits */ *ip = builddp(xs, xe + DP_EBIAS, ((xm >> (DP_MBITS - xe)) << (DP_MBITS - xe)) & ~DP_HIDDEN_BIT); /* generate fpart mantissa by clearing top bits * and normalizing (must be able to normalize) */ xm = (xm << (64 - (DP_MBITS - xe))) >> (64 - (DP_MBITS - xe)); if (xm == 0) return ieee754dp_zero(xs); while ((xm >> DP_MBITS) == 0) { xm <<= 1; xe--; } return builddp(xs, xe + DP_EBIAS, xm & ~DP_HIDDEN_BIT); }
gpl-2.0
rukin5197/android_kernel_htc_msm7x30
drivers/platform/x86/wmi.c
2190
23561
/* * ACPI-WMI mapping driver * * Copyright (C) 2007-2008 Carlos Corbacho <carlos@strangeworlds.co.uk> * * GUID parsing code from ldm.c is: * Copyright (C) 2001,2002 Richard Russon <ldm@flatcap.org> * Copyright (c) 2001-2007 Anton Altaparmakov * Copyright (C) 2001,2002 Jakob Kemi <jakob.kemi@telia.com> * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/init.h> #include <linux/types.h> #include <linux/device.h> #include <linux/list.h> #include <linux/acpi.h> #include <linux/slab.h> #include <acpi/acpi_bus.h> #include <acpi/acpi_drivers.h> ACPI_MODULE_NAME("wmi"); MODULE_AUTHOR("Carlos Corbacho"); MODULE_DESCRIPTION("ACPI-WMI Mapping Driver"); MODULE_LICENSE("GPL"); #define ACPI_WMI_CLASS "wmi" static DEFINE_MUTEX(wmi_data_lock); static LIST_HEAD(wmi_block_list); struct guid_block { char guid[16]; union { char object_id[2]; struct { unsigned char notify_id; unsigned char reserved; }; }; u8 instance_count; u8 flags; }; struct wmi_block { struct list_head list; struct guid_block gblock; acpi_handle handle; wmi_notify_handler handler; void *handler_data; struct device dev; }; /* * If the GUID data block is marked as expensive, we must enable and * explicitily disable data collection. */ #define ACPI_WMI_EXPENSIVE 0x1 #define ACPI_WMI_METHOD 0x2 /* GUID is a method */ #define ACPI_WMI_STRING 0x4 /* GUID takes & returns a string */ #define ACPI_WMI_EVENT 0x8 /* GUID is an event */ static int debug_event; module_param(debug_event, bool, 0444); MODULE_PARM_DESC(debug_event, "Log WMI Events [0/1]"); static int debug_dump_wdg; module_param(debug_dump_wdg, bool, 0444); MODULE_PARM_DESC(debug_dump_wdg, "Dump available WMI interfaces [0/1]"); static int acpi_wmi_remove(struct acpi_device *device, int type); static int acpi_wmi_add(struct acpi_device *device); static void acpi_wmi_notify(struct acpi_device *device, u32 event); static const struct acpi_device_id wmi_device_ids[] = { {"PNP0C14", 0}, {"pnp0c14", 0}, {"", 0}, }; MODULE_DEVICE_TABLE(acpi, wmi_device_ids); static struct acpi_driver acpi_wmi_driver = { .name = "wmi", .class = ACPI_WMI_CLASS, .ids = wmi_device_ids, .ops = { .add = acpi_wmi_add, .remove = acpi_wmi_remove, .notify = acpi_wmi_notify, }, }; /* * GUID parsing functions */ /** * wmi_parse_hexbyte - Convert a ASCII hex number to a byte * @src: Pointer to at least 2 characters to convert. * * Convert a two character ASCII hex string to a number. * * Return: 0-255 Success, the byte was parsed correctly * -1 Error, an invalid character was supplied */ static int wmi_parse_hexbyte(const u8 *src) { int h; int value; /* high part */ h = value = hex_to_bin(src[0]); if (value < 0) return -1; /* low part */ value = hex_to_bin(src[1]); if (value >= 0) return (h << 4) | value; return -1; } /** * wmi_swap_bytes - Rearrange GUID bytes to match GUID binary * @src: Memory block holding binary GUID (16 bytes) * @dest: Memory block to hold byte swapped binary GUID (16 bytes) * * Byte swap a binary GUID to match it's real GUID value */ static void wmi_swap_bytes(u8 *src, u8 *dest) { int i; for (i = 0; i <= 3; i++) memcpy(dest + i, src + (3 - i), 1); for (i = 0; i <= 1; i++) memcpy(dest + 4 + i, src + (5 - i), 1); for (i = 0; i <= 1; i++) memcpy(dest + 6 + i, src + (7 - i), 1); memcpy(dest + 8, src + 8, 8); } /** * wmi_parse_guid - Convert GUID from ASCII to binary * @src: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba * @dest: Memory block to hold binary GUID (16 bytes) * * N.B. The GUID need not be NULL terminated. * * Return: 'true' @dest contains binary GUID * 'false' @dest contents are undefined */ static bool wmi_parse_guid(const u8 *src, u8 *dest) { static const int size[] = { 4, 2, 2, 2, 6 }; int i, j, v; if (src[8] != '-' || src[13] != '-' || src[18] != '-' || src[23] != '-') return false; for (j = 0; j < 5; j++, src++) { for (i = 0; i < size[j]; i++, src += 2, *dest++ = v) { v = wmi_parse_hexbyte(src); if (v < 0) return false; } } return true; } /* * Convert a raw GUID to the ACII string representation */ static int wmi_gtoa(const char *in, char *out) { int i; for (i = 3; i >= 0; i--) out += sprintf(out, "%02X", in[i] & 0xFF); out += sprintf(out, "-"); out += sprintf(out, "%02X", in[5] & 0xFF); out += sprintf(out, "%02X", in[4] & 0xFF); out += sprintf(out, "-"); out += sprintf(out, "%02X", in[7] & 0xFF); out += sprintf(out, "%02X", in[6] & 0xFF); out += sprintf(out, "-"); out += sprintf(out, "%02X", in[8] & 0xFF); out += sprintf(out, "%02X", in[9] & 0xFF); out += sprintf(out, "-"); for (i = 10; i <= 15; i++) out += sprintf(out, "%02X", in[i] & 0xFF); *out = '\0'; return 0; } static bool find_guid(const char *guid_string, struct wmi_block **out) { char tmp[16], guid_input[16]; struct wmi_block *wblock; struct guid_block *block; struct list_head *p; wmi_parse_guid(guid_string, tmp); wmi_swap_bytes(tmp, guid_input); list_for_each(p, &wmi_block_list) { wblock = list_entry(p, struct wmi_block, list); block = &wblock->gblock; if (memcmp(block->guid, guid_input, 16) == 0) { if (out) *out = wblock; return 1; } } return 0; } static acpi_status wmi_method_enable(struct wmi_block *wblock, int enable) { struct guid_block *block = NULL; char method[5]; struct acpi_object_list input; union acpi_object params[1]; acpi_status status; acpi_handle handle; block = &wblock->gblock; handle = wblock->handle; if (!block) return AE_NOT_EXIST; input.count = 1; input.pointer = params; params[0].type = ACPI_TYPE_INTEGER; params[0].integer.value = enable; snprintf(method, 5, "WE%02X", block->notify_id); status = acpi_evaluate_object(handle, method, &input, NULL); if (status != AE_OK && status != AE_NOT_FOUND) return status; else return AE_OK; } /* * Exported WMI functions */ /** * wmi_evaluate_method - Evaluate a WMI method * @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba * @instance: Instance index * @method_id: Method ID to call * &in: Buffer containing input for the method call * &out: Empty buffer to return the method results * * Call an ACPI-WMI method */ acpi_status wmi_evaluate_method(const char *guid_string, u8 instance, u32 method_id, const struct acpi_buffer *in, struct acpi_buffer *out) { struct guid_block *block = NULL; struct wmi_block *wblock = NULL; acpi_handle handle; acpi_status status; struct acpi_object_list input; union acpi_object params[3]; char method[5] = "WM"; if (!find_guid(guid_string, &wblock)) return AE_ERROR; block = &wblock->gblock; handle = wblock->handle; if (!(block->flags & ACPI_WMI_METHOD)) return AE_BAD_DATA; if (block->instance_count < instance) return AE_BAD_PARAMETER; input.count = 2; input.pointer = params; params[0].type = ACPI_TYPE_INTEGER; params[0].integer.value = instance; params[1].type = ACPI_TYPE_INTEGER; params[1].integer.value = method_id; if (in) { input.count = 3; if (block->flags & ACPI_WMI_STRING) { params[2].type = ACPI_TYPE_STRING; } else { params[2].type = ACPI_TYPE_BUFFER; } params[2].buffer.length = in->length; params[2].buffer.pointer = in->pointer; } strncat(method, block->object_id, 2); status = acpi_evaluate_object(handle, method, &input, out); return status; } EXPORT_SYMBOL_GPL(wmi_evaluate_method); /** * wmi_query_block - Return contents of a WMI block * @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba * @instance: Instance index * &out: Empty buffer to return the contents of the data block to * * Return the contents of an ACPI-WMI data block to a buffer */ acpi_status wmi_query_block(const char *guid_string, u8 instance, struct acpi_buffer *out) { struct guid_block *block = NULL; struct wmi_block *wblock = NULL; acpi_handle handle, wc_handle; acpi_status status, wc_status = AE_ERROR; struct acpi_object_list input, wc_input; union acpi_object wc_params[1], wq_params[1]; char method[5]; char wc_method[5] = "WC"; if (!guid_string || !out) return AE_BAD_PARAMETER; if (!find_guid(guid_string, &wblock)) return AE_ERROR; block = &wblock->gblock; handle = wblock->handle; if (block->instance_count < instance) return AE_BAD_PARAMETER; /* Check GUID is a data block */ if (block->flags & (ACPI_WMI_EVENT | ACPI_WMI_METHOD)) return AE_ERROR; input.count = 1; input.pointer = wq_params; wq_params[0].type = ACPI_TYPE_INTEGER; wq_params[0].integer.value = instance; /* * If ACPI_WMI_EXPENSIVE, call the relevant WCxx method first to * enable collection. */ if (block->flags & ACPI_WMI_EXPENSIVE) { wc_input.count = 1; wc_input.pointer = wc_params; wc_params[0].type = ACPI_TYPE_INTEGER; wc_params[0].integer.value = 1; strncat(wc_method, block->object_id, 2); /* * Some GUIDs break the specification by declaring themselves * expensive, but have no corresponding WCxx method. So we * should not fail if this happens. */ wc_status = acpi_get_handle(handle, wc_method, &wc_handle); if (ACPI_SUCCESS(wc_status)) wc_status = acpi_evaluate_object(handle, wc_method, &wc_input, NULL); } strcpy(method, "WQ"); strncat(method, block->object_id, 2); status = acpi_evaluate_object(handle, method, &input, out); /* * If ACPI_WMI_EXPENSIVE, call the relevant WCxx method, even if * the WQxx method failed - we should disable collection anyway. */ if ((block->flags & ACPI_WMI_EXPENSIVE) && ACPI_SUCCESS(wc_status)) { wc_params[0].integer.value = 0; status = acpi_evaluate_object(handle, wc_method, &wc_input, NULL); } return status; } EXPORT_SYMBOL_GPL(wmi_query_block); /** * wmi_set_block - Write to a WMI block * @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba * @instance: Instance index * &in: Buffer containing new values for the data block * * Write the contents of the input buffer to an ACPI-WMI data block */ acpi_status wmi_set_block(const char *guid_string, u8 instance, const struct acpi_buffer *in) { struct guid_block *block = NULL; struct wmi_block *wblock = NULL; acpi_handle handle; struct acpi_object_list input; union acpi_object params[2]; char method[5] = "WS"; if (!guid_string || !in) return AE_BAD_DATA; if (!find_guid(guid_string, &wblock)) return AE_ERROR; block = &wblock->gblock; handle = wblock->handle; if (block->instance_count < instance) return AE_BAD_PARAMETER; /* Check GUID is a data block */ if (block->flags & (ACPI_WMI_EVENT | ACPI_WMI_METHOD)) return AE_ERROR; input.count = 2; input.pointer = params; params[0].type = ACPI_TYPE_INTEGER; params[0].integer.value = instance; if (block->flags & ACPI_WMI_STRING) { params[1].type = ACPI_TYPE_STRING; } else { params[1].type = ACPI_TYPE_BUFFER; } params[1].buffer.length = in->length; params[1].buffer.pointer = in->pointer; strncat(method, block->object_id, 2); return acpi_evaluate_object(handle, method, &input, NULL); } EXPORT_SYMBOL_GPL(wmi_set_block); static void wmi_dump_wdg(const struct guid_block *g) { char guid_string[37]; wmi_gtoa(g->guid, guid_string); pr_info("%s:\n", guid_string); pr_info("\tobject_id: %c%c\n", g->object_id[0], g->object_id[1]); pr_info("\tnotify_id: %02X\n", g->notify_id); pr_info("\treserved: %02X\n", g->reserved); pr_info("\tinstance_count: %d\n", g->instance_count); pr_info("\tflags: %#x", g->flags); if (g->flags) { if (g->flags & ACPI_WMI_EXPENSIVE) pr_cont(" ACPI_WMI_EXPENSIVE"); if (g->flags & ACPI_WMI_METHOD) pr_cont(" ACPI_WMI_METHOD"); if (g->flags & ACPI_WMI_STRING) pr_cont(" ACPI_WMI_STRING"); if (g->flags & ACPI_WMI_EVENT) pr_cont(" ACPI_WMI_EVENT"); } pr_cont("\n"); } static void wmi_notify_debug(u32 value, void *context) { struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *obj; acpi_status status; status = wmi_get_event_data(value, &response); if (status != AE_OK) { pr_info("bad event status 0x%x\n", status); return; } obj = (union acpi_object *)response.pointer; if (!obj) return; pr_info("DEBUG Event "); switch(obj->type) { case ACPI_TYPE_BUFFER: pr_cont("BUFFER_TYPE - length %d\n", obj->buffer.length); break; case ACPI_TYPE_STRING: pr_cont("STRING_TYPE - %s\n", obj->string.pointer); break; case ACPI_TYPE_INTEGER: pr_cont("INTEGER_TYPE - %llu\n", obj->integer.value); break; case ACPI_TYPE_PACKAGE: pr_cont("PACKAGE_TYPE - %d elements\n", obj->package.count); break; default: pr_cont("object type 0x%X\n", obj->type); } kfree(obj); } /** * wmi_install_notify_handler - Register handler for WMI events * @handler: Function to handle notifications * @data: Data to be returned to handler when event is fired * * Register a handler for events sent to the ACPI-WMI mapper device. */ acpi_status wmi_install_notify_handler(const char *guid, wmi_notify_handler handler, void *data) { struct wmi_block *block; acpi_status status = AE_NOT_EXIST; char tmp[16], guid_input[16]; struct list_head *p; if (!guid || !handler) return AE_BAD_PARAMETER; wmi_parse_guid(guid, tmp); wmi_swap_bytes(tmp, guid_input); list_for_each(p, &wmi_block_list) { acpi_status wmi_status; block = list_entry(p, struct wmi_block, list); if (memcmp(block->gblock.guid, guid_input, 16) == 0) { if (block->handler && block->handler != wmi_notify_debug) return AE_ALREADY_ACQUIRED; block->handler = handler; block->handler_data = data; wmi_status = wmi_method_enable(block, 1); if ((wmi_status != AE_OK) || ((wmi_status == AE_OK) && (status == AE_NOT_EXIST))) status = wmi_status; } } return status; } EXPORT_SYMBOL_GPL(wmi_install_notify_handler); /** * wmi_uninstall_notify_handler - Unregister handler for WMI events * * Unregister handler for events sent to the ACPI-WMI mapper device. */ acpi_status wmi_remove_notify_handler(const char *guid) { struct wmi_block *block; acpi_status status = AE_NOT_EXIST; char tmp[16], guid_input[16]; struct list_head *p; if (!guid) return AE_BAD_PARAMETER; wmi_parse_guid(guid, tmp); wmi_swap_bytes(tmp, guid_input); list_for_each(p, &wmi_block_list) { acpi_status wmi_status; block = list_entry(p, struct wmi_block, list); if (memcmp(block->gblock.guid, guid_input, 16) == 0) { if (!block->handler || block->handler == wmi_notify_debug) return AE_NULL_ENTRY; if (debug_event) { block->handler = wmi_notify_debug; status = AE_OK; } else { wmi_status = wmi_method_enable(block, 0); block->handler = NULL; block->handler_data = NULL; if ((wmi_status != AE_OK) || ((wmi_status == AE_OK) && (status == AE_NOT_EXIST))) status = wmi_status; } } } return status; } EXPORT_SYMBOL_GPL(wmi_remove_notify_handler); /** * wmi_get_event_data - Get WMI data associated with an event * * @event: Event to find * @out: Buffer to hold event data. out->pointer should be freed with kfree() * * Returns extra data associated with an event in WMI. */ acpi_status wmi_get_event_data(u32 event, struct acpi_buffer *out) { struct acpi_object_list input; union acpi_object params[1]; struct guid_block *gblock; struct wmi_block *wblock; struct list_head *p; input.count = 1; input.pointer = params; params[0].type = ACPI_TYPE_INTEGER; params[0].integer.value = event; list_for_each(p, &wmi_block_list) { wblock = list_entry(p, struct wmi_block, list); gblock = &wblock->gblock; if ((gblock->flags & ACPI_WMI_EVENT) && (gblock->notify_id == event)) return acpi_evaluate_object(wblock->handle, "_WED", &input, out); } return AE_NOT_FOUND; } EXPORT_SYMBOL_GPL(wmi_get_event_data); /** * wmi_has_guid - Check if a GUID is available * @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba * * Check if a given GUID is defined by _WDG */ bool wmi_has_guid(const char *guid_string) { return find_guid(guid_string, NULL); } EXPORT_SYMBOL_GPL(wmi_has_guid); /* * sysfs interface */ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf) { char guid_string[37]; struct wmi_block *wblock; wblock = dev_get_drvdata(dev); if (!wblock) return -ENOMEM; wmi_gtoa(wblock->gblock.guid, guid_string); return sprintf(buf, "wmi:%s\n", guid_string); } static struct device_attribute wmi_dev_attrs[] = { __ATTR_RO(modalias), __ATTR_NULL }; static int wmi_dev_uevent(struct device *dev, struct kobj_uevent_env *env) { char guid_string[37]; struct wmi_block *wblock; if (add_uevent_var(env, "MODALIAS=")) return -ENOMEM; wblock = dev_get_drvdata(dev); if (!wblock) return -ENOMEM; wmi_gtoa(wblock->gblock.guid, guid_string); strcpy(&env->buf[env->buflen - 1], "wmi:"); memcpy(&env->buf[env->buflen - 1 + 4], guid_string, 36); env->buflen += 40; return 0; } static void wmi_dev_free(struct device *dev) { struct wmi_block *wmi_block = container_of(dev, struct wmi_block, dev); kfree(wmi_block); } static struct class wmi_class = { .name = "wmi", .dev_release = wmi_dev_free, .dev_uevent = wmi_dev_uevent, .dev_attrs = wmi_dev_attrs, }; static int wmi_create_device(const struct guid_block *gblock, struct wmi_block *wblock, acpi_handle handle) { char guid_string[37]; wblock->dev.class = &wmi_class; wmi_gtoa(gblock->guid, guid_string); dev_set_name(&wblock->dev, guid_string); dev_set_drvdata(&wblock->dev, wblock); return device_register(&wblock->dev); } static void wmi_free_devices(void) { struct wmi_block *wblock, *next; /* Delete devices for all the GUIDs */ list_for_each_entry_safe(wblock, next, &wmi_block_list, list) { list_del(&wblock->list); if (wblock->dev.class) device_unregister(&wblock->dev); else kfree(wblock); } } static bool guid_already_parsed(const char *guid_string) { struct wmi_block *wblock; list_for_each_entry(wblock, &wmi_block_list, list) if (memcmp(wblock->gblock.guid, guid_string, 16) == 0) return true; return false; } /* * Parse the _WDG method for the GUID data blocks */ static acpi_status parse_wdg(acpi_handle handle) { struct acpi_buffer out = {ACPI_ALLOCATE_BUFFER, NULL}; union acpi_object *obj; const struct guid_block *gblock; struct wmi_block *wblock; acpi_status status; int retval; u32 i, total; status = acpi_evaluate_object(handle, "_WDG", NULL, &out); if (ACPI_FAILURE(status)) return -ENXIO; obj = (union acpi_object *) out.pointer; if (!obj) return -ENXIO; if (obj->type != ACPI_TYPE_BUFFER) { retval = -ENXIO; goto out_free_pointer; } gblock = (const struct guid_block *)obj->buffer.pointer; total = obj->buffer.length / sizeof(struct guid_block); for (i = 0; i < total; i++) { if (debug_dump_wdg) wmi_dump_wdg(&gblock[i]); wblock = kzalloc(sizeof(struct wmi_block), GFP_KERNEL); if (!wblock) return AE_NO_MEMORY; wblock->handle = handle; wblock->gblock = gblock[i]; /* Some WMI devices, like those for nVidia hooks, have a duplicate GUID. It's not clear what we should do in this case yet, so for now, we'll just ignore the duplicate for device creation. */ if (!guid_already_parsed(gblock[i].guid)) { retval = wmi_create_device(&gblock[i], wblock, handle); if (retval) { wmi_free_devices(); goto out_free_pointer; } } list_add_tail(&wblock->list, &wmi_block_list); if (debug_event) { wblock->handler = wmi_notify_debug; wmi_method_enable(wblock, 1); } } retval = 0; out_free_pointer: kfree(out.pointer); return retval; } /* * WMI can have EmbeddedControl access regions. In which case, we just want to * hand these off to the EC driver. */ static acpi_status acpi_wmi_ec_space_handler(u32 function, acpi_physical_address address, u32 bits, u64 *value, void *handler_context, void *region_context) { int result = 0, i = 0; u8 temp = 0; if ((address > 0xFF) || !value) return AE_BAD_PARAMETER; if (function != ACPI_READ && function != ACPI_WRITE) return AE_BAD_PARAMETER; if (bits != 8) return AE_BAD_PARAMETER; if (function == ACPI_READ) { result = ec_read(address, &temp); (*value) |= ((u64)temp) << i; } else { temp = 0xff & ((*value) >> i); result = ec_write(address, temp); } switch (result) { case -EINVAL: return AE_BAD_PARAMETER; break; case -ENODEV: return AE_NOT_FOUND; break; case -ETIME: return AE_TIME; break; default: return AE_OK; } } static void acpi_wmi_notify(struct acpi_device *device, u32 event) { struct guid_block *block; struct wmi_block *wblock; struct list_head *p; char guid_string[37]; list_for_each(p, &wmi_block_list) { wblock = list_entry(p, struct wmi_block, list); block = &wblock->gblock; if ((block->flags & ACPI_WMI_EVENT) && (block->notify_id == event)) { if (wblock->handler) wblock->handler(event, wblock->handler_data); if (debug_event) { wmi_gtoa(wblock->gblock.guid, guid_string); pr_info("DEBUG Event GUID: %s\n", guid_string); } acpi_bus_generate_netlink_event( device->pnp.device_class, dev_name(&device->dev), event, 0); break; } } } static int acpi_wmi_remove(struct acpi_device *device, int type) { acpi_remove_address_space_handler(device->handle, ACPI_ADR_SPACE_EC, &acpi_wmi_ec_space_handler); wmi_free_devices(); return 0; } static int acpi_wmi_add(struct acpi_device *device) { acpi_status status; int error; status = acpi_install_address_space_handler(device->handle, ACPI_ADR_SPACE_EC, &acpi_wmi_ec_space_handler, NULL, NULL); if (ACPI_FAILURE(status)) { pr_err("Error installing EC region handler\n"); return -ENODEV; } error = parse_wdg(device->handle); if (error) { acpi_remove_address_space_handler(device->handle, ACPI_ADR_SPACE_EC, &acpi_wmi_ec_space_handler); pr_err("Failed to parse WDG method\n"); return error; } return 0; } static int __init acpi_wmi_init(void) { int error; if (acpi_disabled) return -ENODEV; error = class_register(&wmi_class); if (error) return error; error = acpi_bus_register_driver(&acpi_wmi_driver); if (error) { pr_err("Error loading mapper\n"); class_unregister(&wmi_class); return error; } pr_info("Mapper loaded\n"); return 0; } static void __exit acpi_wmi_exit(void) { acpi_bus_unregister_driver(&acpi_wmi_driver); class_unregister(&wmi_class); pr_info("Mapper unloaded\n"); } subsys_initcall(acpi_wmi_init); module_exit(acpi_wmi_exit);
gpl-2.0
peterzhu0503/kernel_rk3168_86v_yk
drivers/net/enic/vnic_intr.c
2702
1995
/* * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. * * This program is free software; you may redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/delay.h> #include "vnic_dev.h" #include "vnic_intr.h" void vnic_intr_free(struct vnic_intr *intr) { intr->ctrl = NULL; } int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr, unsigned int index) { intr->index = index; intr->vdev = vdev; intr->ctrl = vnic_dev_get_res(vdev, RES_TYPE_INTR_CTRL, index); if (!intr->ctrl) { pr_err("Failed to hook INTR[%d].ctrl resource\n", index); return -EINVAL; } return 0; } void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer, unsigned int coalescing_type, unsigned int mask_on_assertion) { vnic_intr_coalescing_timer_set(intr, coalescing_timer); iowrite32(coalescing_type, &intr->ctrl->coalescing_type); iowrite32(mask_on_assertion, &intr->ctrl->mask_on_assertion); iowrite32(0, &intr->ctrl->int_credits); } void vnic_intr_coalescing_timer_set(struct vnic_intr *intr, unsigned int coalescing_timer) { iowrite32(coalescing_timer, &intr->ctrl->coalescing_timer); } void vnic_intr_clean(struct vnic_intr *intr) { iowrite32(0, &intr->ctrl->int_credits); }
gpl-2.0
kim6515516/fastmodel-on-kvm
fs/ext2/xattr_user.c
8590
1571
/* * linux/fs/ext2/xattr_user.c * Handler for extended user attributes. * * Copyright (C) 2001 by Andreas Gruenbacher, <a.gruenbacher@computer.org> */ #include <linux/init.h> #include <linux/string.h> #include "ext2.h" #include "xattr.h" static size_t ext2_xattr_user_list(struct dentry *dentry, char *list, size_t list_size, const char *name, size_t name_len, int type) { const size_t prefix_len = XATTR_USER_PREFIX_LEN; const size_t total_len = prefix_len + name_len + 1; if (!test_opt(dentry->d_sb, XATTR_USER)) return 0; if (list && total_len <= list_size) { memcpy(list, XATTR_USER_PREFIX, prefix_len); memcpy(list+prefix_len, name, name_len); list[prefix_len + name_len] = '\0'; } return total_len; } static int ext2_xattr_user_get(struct dentry *dentry, const char *name, void *buffer, size_t size, int type) { if (strcmp(name, "") == 0) return -EINVAL; if (!test_opt(dentry->d_sb, XATTR_USER)) return -EOPNOTSUPP; return ext2_xattr_get(dentry->d_inode, EXT2_XATTR_INDEX_USER, name, buffer, size); } static int ext2_xattr_user_set(struct dentry *dentry, const char *name, const void *value, size_t size, int flags, int type) { if (strcmp(name, "") == 0) return -EINVAL; if (!test_opt(dentry->d_sb, XATTR_USER)) return -EOPNOTSUPP; return ext2_xattr_set(dentry->d_inode, EXT2_XATTR_INDEX_USER, name, value, size, flags); } const struct xattr_handler ext2_xattr_user_handler = { .prefix = XATTR_USER_PREFIX, .list = ext2_xattr_user_list, .get = ext2_xattr_user_get, .set = ext2_xattr_user_set, };
gpl-2.0
F4uzan/mono_hima
fs/ext2/xattr_user.c
8590
1571
/* * linux/fs/ext2/xattr_user.c * Handler for extended user attributes. * * Copyright (C) 2001 by Andreas Gruenbacher, <a.gruenbacher@computer.org> */ #include <linux/init.h> #include <linux/string.h> #include "ext2.h" #include "xattr.h" static size_t ext2_xattr_user_list(struct dentry *dentry, char *list, size_t list_size, const char *name, size_t name_len, int type) { const size_t prefix_len = XATTR_USER_PREFIX_LEN; const size_t total_len = prefix_len + name_len + 1; if (!test_opt(dentry->d_sb, XATTR_USER)) return 0; if (list && total_len <= list_size) { memcpy(list, XATTR_USER_PREFIX, prefix_len); memcpy(list+prefix_len, name, name_len); list[prefix_len + name_len] = '\0'; } return total_len; } static int ext2_xattr_user_get(struct dentry *dentry, const char *name, void *buffer, size_t size, int type) { if (strcmp(name, "") == 0) return -EINVAL; if (!test_opt(dentry->d_sb, XATTR_USER)) return -EOPNOTSUPP; return ext2_xattr_get(dentry->d_inode, EXT2_XATTR_INDEX_USER, name, buffer, size); } static int ext2_xattr_user_set(struct dentry *dentry, const char *name, const void *value, size_t size, int flags, int type) { if (strcmp(name, "") == 0) return -EINVAL; if (!test_opt(dentry->d_sb, XATTR_USER)) return -EOPNOTSUPP; return ext2_xattr_set(dentry->d_inode, EXT2_XATTR_INDEX_USER, name, value, size, flags); } const struct xattr_handler ext2_xattr_user_handler = { .prefix = XATTR_USER_PREFIX, .list = ext2_xattr_user_list, .get = ext2_xattr_user_get, .set = ext2_xattr_user_set, };
gpl-2.0
omnirom/android_kernel_sony_apq8064
drivers/char/snsc_event.c
11662
7425
/* * SN Platform system controller communication support * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2004-2006 Silicon Graphics, Inc. All rights reserved. */ /* * System controller event handler * * These routines deal with environmental events arriving from the * system controllers. */ #include <linux/interrupt.h> #include <linux/sched.h> #include <linux/slab.h> #include <asm/byteorder.h> #include <asm/sn/sn_sal.h> #include <asm/unaligned.h> #include "snsc.h" static struct subch_data_s *event_sd; void scdrv_event(unsigned long); DECLARE_TASKLET(sn_sysctl_event, scdrv_event, 0); /* * scdrv_event_interrupt * * Pull incoming environmental events off the physical link to the * system controller and put them in a temporary holding area in SAL. * Schedule scdrv_event() to move them along to their ultimate * destination. */ static irqreturn_t scdrv_event_interrupt(int irq, void *subch_data) { struct subch_data_s *sd = subch_data; unsigned long flags; int status; spin_lock_irqsave(&sd->sd_rlock, flags); status = ia64_sn_irtr_intr(sd->sd_nasid, sd->sd_subch); if ((status > 0) && (status & SAL_IROUTER_INTR_RECV)) { tasklet_schedule(&sn_sysctl_event); } spin_unlock_irqrestore(&sd->sd_rlock, flags); return IRQ_HANDLED; } /* * scdrv_parse_event * * Break an event (as read from SAL) into useful pieces so we can decide * what to do with it. */ static int scdrv_parse_event(char *event, int *src, int *code, int *esp_code, char *desc) { char *desc_end; /* record event source address */ *src = get_unaligned_be32(event); event += 4; /* move on to event code */ /* record the system controller's event code */ *code = get_unaligned_be32(event); event += 4; /* move on to event arguments */ /* how many arguments are in the packet? */ if (*event++ != 2) { /* if not 2, give up */ return -1; } /* parse out the ESP code */ if (*event++ != IR_ARG_INT) { /* not an integer argument, so give up */ return -1; } *esp_code = get_unaligned_be32(event); event += 4; /* parse out the event description */ if (*event++ != IR_ARG_ASCII) { /* not an ASCII string, so give up */ return -1; } event[CHUNKSIZE-1] = '\0'; /* ensure this string ends! */ event += 2; /* skip leading CR/LF */ desc_end = desc + sprintf(desc, "%s", event); /* strip trailing CR/LF (if any) */ for (desc_end--; (desc_end != desc) && ((*desc_end == 0xd) || (*desc_end == 0xa)); desc_end--) { *desc_end = '\0'; } return 0; } /* * scdrv_event_severity * * Figure out how urgent a message we should write to the console/syslog * via printk. */ static char * scdrv_event_severity(int code) { int ev_class = (code & EV_CLASS_MASK); int ev_severity = (code & EV_SEVERITY_MASK); char *pk_severity = KERN_NOTICE; switch (ev_class) { case EV_CLASS_POWER: switch (ev_severity) { case EV_SEVERITY_POWER_LOW_WARNING: case EV_SEVERITY_POWER_HIGH_WARNING: pk_severity = KERN_WARNING; break; case EV_SEVERITY_POWER_HIGH_FAULT: case EV_SEVERITY_POWER_LOW_FAULT: pk_severity = KERN_ALERT; break; } break; case EV_CLASS_FAN: switch (ev_severity) { case EV_SEVERITY_FAN_WARNING: pk_severity = KERN_WARNING; break; case EV_SEVERITY_FAN_FAULT: pk_severity = KERN_CRIT; break; } break; case EV_CLASS_TEMP: switch (ev_severity) { case EV_SEVERITY_TEMP_ADVISORY: pk_severity = KERN_WARNING; break; case EV_SEVERITY_TEMP_CRITICAL: pk_severity = KERN_CRIT; break; case EV_SEVERITY_TEMP_FAULT: pk_severity = KERN_ALERT; break; } break; case EV_CLASS_ENV: pk_severity = KERN_ALERT; break; case EV_CLASS_TEST_FAULT: pk_severity = KERN_ALERT; break; case EV_CLASS_TEST_WARNING: pk_severity = KERN_WARNING; break; case EV_CLASS_PWRD_NOTIFY: pk_severity = KERN_ALERT; break; } return pk_severity; } /* * scdrv_dispatch_event * * Do the right thing with an incoming event. That's often nothing * more than printing it to the system log. For power-down notifications * we start a graceful shutdown. */ static void scdrv_dispatch_event(char *event, int len) { static int snsc_shutting_down = 0; int code, esp_code, src, class; char desc[CHUNKSIZE]; char *severity; if (scdrv_parse_event(event, &src, &code, &esp_code, desc) < 0) { /* ignore uninterpretible event */ return; } /* how urgent is the message? */ severity = scdrv_event_severity(code); class = (code & EV_CLASS_MASK); if (class == EV_CLASS_PWRD_NOTIFY || code == ENV_PWRDN_PEND) { if (snsc_shutting_down) return; snsc_shutting_down = 1; /* give a message for each type of event */ if (class == EV_CLASS_PWRD_NOTIFY) printk(KERN_NOTICE "Power off indication received." " Sending SIGPWR to init...\n"); else if (code == ENV_PWRDN_PEND) printk(KERN_CRIT "WARNING: Shutting down the system" " due to a critical environmental condition." " Sending SIGPWR to init...\n"); /* give a SIGPWR signal to init proc */ kill_cad_pid(SIGPWR, 0); } else { /* print to system log */ printk("%s|$(0x%x)%s\n", severity, esp_code, desc); } } /* * scdrv_event * * Called as a tasklet when an event arrives from the L1. Read the event * from where it's temporarily stored in SAL and call scdrv_dispatch_event() * to send it on its way. Keep trying to read events until SAL indicates * that there are no more immediately available. */ void scdrv_event(unsigned long dummy) { int status; int len; unsigned long flags; struct subch_data_s *sd = event_sd; /* anything to read? */ len = CHUNKSIZE; spin_lock_irqsave(&sd->sd_rlock, flags); status = ia64_sn_irtr_recv(sd->sd_nasid, sd->sd_subch, sd->sd_rb, &len); while (!(status < 0)) { spin_unlock_irqrestore(&sd->sd_rlock, flags); scdrv_dispatch_event(sd->sd_rb, len); len = CHUNKSIZE; spin_lock_irqsave(&sd->sd_rlock, flags); status = ia64_sn_irtr_recv(sd->sd_nasid, sd->sd_subch, sd->sd_rb, &len); } spin_unlock_irqrestore(&sd->sd_rlock, flags); } /* * scdrv_event_init * * Sets up a system controller subchannel to begin receiving event * messages. This is sort of a specialized version of scdrv_open() * in drivers/char/sn_sysctl.c. */ void scdrv_event_init(struct sysctl_data_s *scd) { int rv; event_sd = kzalloc(sizeof (struct subch_data_s), GFP_KERNEL); if (event_sd == NULL) { printk(KERN_WARNING "%s: couldn't allocate subchannel info" " for event monitoring\n", __func__); return; } /* initialize subch_data_s fields */ event_sd->sd_nasid = scd->scd_nasid; spin_lock_init(&event_sd->sd_rlock); /* ask the system controllers to send events to this node */ event_sd->sd_subch = ia64_sn_sysctl_event_init(scd->scd_nasid); if (event_sd->sd_subch < 0) { kfree(event_sd); printk(KERN_WARNING "%s: couldn't open event subchannel\n", __func__); return; } /* hook event subchannel up to the system controller interrupt */ rv = request_irq(SGI_UART_VECTOR, scdrv_event_interrupt, IRQF_SHARED | IRQF_DISABLED, "system controller events", event_sd); if (rv) { printk(KERN_WARNING "%s: irq request failed (%d)\n", __func__, rv); ia64_sn_irtr_close(event_sd->sd_nasid, event_sd->sd_subch); kfree(event_sd); return; } }
gpl-2.0
NeoPhyTe-x360/i9300-S3-JB-kernel
drivers/infiniband/core/mad_rmpp.c
12942
27619
/* * Copyright (c) 2005 Intel Inc. All rights reserved. * Copyright (c) 2005-2006 Voltaire, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/slab.h> #include "mad_priv.h" #include "mad_rmpp.h" enum rmpp_state { RMPP_STATE_ACTIVE, RMPP_STATE_TIMEOUT, RMPP_STATE_COMPLETE, RMPP_STATE_CANCELING }; struct mad_rmpp_recv { struct ib_mad_agent_private *agent; struct list_head list; struct delayed_work timeout_work; struct delayed_work cleanup_work; struct completion comp; enum rmpp_state state; spinlock_t lock; atomic_t refcount; struct ib_ah *ah; struct ib_mad_recv_wc *rmpp_wc; struct ib_mad_recv_buf *cur_seg_buf; int last_ack; int seg_num; int newwin; int repwin; __be64 tid; u32 src_qp; u16 slid; u8 mgmt_class; u8 class_version; u8 method; }; static inline void deref_rmpp_recv(struct mad_rmpp_recv *rmpp_recv) { if (atomic_dec_and_test(&rmpp_recv->refcount)) complete(&rmpp_recv->comp); } static void destroy_rmpp_recv(struct mad_rmpp_recv *rmpp_recv) { deref_rmpp_recv(rmpp_recv); wait_for_completion(&rmpp_recv->comp); ib_destroy_ah(rmpp_recv->ah); kfree(rmpp_recv); } void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent) { struct mad_rmpp_recv *rmpp_recv, *temp_rmpp_recv; unsigned long flags; spin_lock_irqsave(&agent->lock, flags); list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) { if (rmpp_recv->state != RMPP_STATE_COMPLETE) ib_free_recv_mad(rmpp_recv->rmpp_wc); rmpp_recv->state = RMPP_STATE_CANCELING; } spin_unlock_irqrestore(&agent->lock, flags); list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) { cancel_delayed_work(&rmpp_recv->timeout_work); cancel_delayed_work(&rmpp_recv->cleanup_work); } flush_workqueue(agent->qp_info->port_priv->wq); list_for_each_entry_safe(rmpp_recv, temp_rmpp_recv, &agent->rmpp_list, list) { list_del(&rmpp_recv->list); destroy_rmpp_recv(rmpp_recv); } } static void format_ack(struct ib_mad_send_buf *msg, struct ib_rmpp_mad *data, struct mad_rmpp_recv *rmpp_recv) { struct ib_rmpp_mad *ack = msg->mad; unsigned long flags; memcpy(ack, &data->mad_hdr, msg->hdr_len); ack->mad_hdr.method ^= IB_MGMT_METHOD_RESP; ack->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ACK; ib_set_rmpp_flags(&ack->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); spin_lock_irqsave(&rmpp_recv->lock, flags); rmpp_recv->last_ack = rmpp_recv->seg_num; ack->rmpp_hdr.seg_num = cpu_to_be32(rmpp_recv->seg_num); ack->rmpp_hdr.paylen_newwin = cpu_to_be32(rmpp_recv->newwin); spin_unlock_irqrestore(&rmpp_recv->lock, flags); } static void ack_recv(struct mad_rmpp_recv *rmpp_recv, struct ib_mad_recv_wc *recv_wc) { struct ib_mad_send_buf *msg; int ret, hdr_len; hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class); msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp, recv_wc->wc->pkey_index, 1, hdr_len, 0, GFP_KERNEL); if (IS_ERR(msg)) return; format_ack(msg, (struct ib_rmpp_mad *) recv_wc->recv_buf.mad, rmpp_recv); msg->ah = rmpp_recv->ah; ret = ib_post_send_mad(msg, NULL); if (ret) ib_free_send_mad(msg); } static struct ib_mad_send_buf *alloc_response_msg(struct ib_mad_agent *agent, struct ib_mad_recv_wc *recv_wc) { struct ib_mad_send_buf *msg; struct ib_ah *ah; int hdr_len; ah = ib_create_ah_from_wc(agent->qp->pd, recv_wc->wc, recv_wc->recv_buf.grh, agent->port_num); if (IS_ERR(ah)) return (void *) ah; hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class); msg = ib_create_send_mad(agent, recv_wc->wc->src_qp, recv_wc->wc->pkey_index, 1, hdr_len, 0, GFP_KERNEL); if (IS_ERR(msg)) ib_destroy_ah(ah); else { msg->ah = ah; msg->context[0] = ah; } return msg; } static void ack_ds_ack(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *recv_wc) { struct ib_mad_send_buf *msg; struct ib_rmpp_mad *rmpp_mad; int ret; msg = alloc_response_msg(&agent->agent, recv_wc); if (IS_ERR(msg)) return; rmpp_mad = msg->mad; memcpy(rmpp_mad, recv_wc->recv_buf.mad, msg->hdr_len); rmpp_mad->mad_hdr.method ^= IB_MGMT_METHOD_RESP; ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); rmpp_mad->rmpp_hdr.seg_num = 0; rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(1); ret = ib_post_send_mad(msg, NULL); if (ret) { ib_destroy_ah(msg->ah); ib_free_send_mad(msg); } } void ib_rmpp_send_handler(struct ib_mad_send_wc *mad_send_wc) { if (mad_send_wc->send_buf->context[0] == mad_send_wc->send_buf->ah) ib_destroy_ah(mad_send_wc->send_buf->ah); ib_free_send_mad(mad_send_wc->send_buf); } static void nack_recv(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *recv_wc, u8 rmpp_status) { struct ib_mad_send_buf *msg; struct ib_rmpp_mad *rmpp_mad; int ret; msg = alloc_response_msg(&agent->agent, recv_wc); if (IS_ERR(msg)) return; rmpp_mad = msg->mad; memcpy(rmpp_mad, recv_wc->recv_buf.mad, msg->hdr_len); rmpp_mad->mad_hdr.method ^= IB_MGMT_METHOD_RESP; rmpp_mad->rmpp_hdr.rmpp_version = IB_MGMT_RMPP_VERSION; rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ABORT; ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); rmpp_mad->rmpp_hdr.rmpp_status = rmpp_status; rmpp_mad->rmpp_hdr.seg_num = 0; rmpp_mad->rmpp_hdr.paylen_newwin = 0; ret = ib_post_send_mad(msg, NULL); if (ret) { ib_destroy_ah(msg->ah); ib_free_send_mad(msg); } } static void recv_timeout_handler(struct work_struct *work) { struct mad_rmpp_recv *rmpp_recv = container_of(work, struct mad_rmpp_recv, timeout_work.work); struct ib_mad_recv_wc *rmpp_wc; unsigned long flags; spin_lock_irqsave(&rmpp_recv->agent->lock, flags); if (rmpp_recv->state != RMPP_STATE_ACTIVE) { spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags); return; } rmpp_recv->state = RMPP_STATE_TIMEOUT; list_del(&rmpp_recv->list); spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags); rmpp_wc = rmpp_recv->rmpp_wc; nack_recv(rmpp_recv->agent, rmpp_wc, IB_MGMT_RMPP_STATUS_T2L); destroy_rmpp_recv(rmpp_recv); ib_free_recv_mad(rmpp_wc); } static void recv_cleanup_handler(struct work_struct *work) { struct mad_rmpp_recv *rmpp_recv = container_of(work, struct mad_rmpp_recv, cleanup_work.work); unsigned long flags; spin_lock_irqsave(&rmpp_recv->agent->lock, flags); if (rmpp_recv->state == RMPP_STATE_CANCELING) { spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags); return; } list_del(&rmpp_recv->list); spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags); destroy_rmpp_recv(rmpp_recv); } static struct mad_rmpp_recv * create_rmpp_recv(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc) { struct mad_rmpp_recv *rmpp_recv; struct ib_mad_hdr *mad_hdr; rmpp_recv = kmalloc(sizeof *rmpp_recv, GFP_KERNEL); if (!rmpp_recv) return NULL; rmpp_recv->ah = ib_create_ah_from_wc(agent->agent.qp->pd, mad_recv_wc->wc, mad_recv_wc->recv_buf.grh, agent->agent.port_num); if (IS_ERR(rmpp_recv->ah)) goto error; rmpp_recv->agent = agent; init_completion(&rmpp_recv->comp); INIT_DELAYED_WORK(&rmpp_recv->timeout_work, recv_timeout_handler); INIT_DELAYED_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler); spin_lock_init(&rmpp_recv->lock); rmpp_recv->state = RMPP_STATE_ACTIVE; atomic_set(&rmpp_recv->refcount, 1); rmpp_recv->rmpp_wc = mad_recv_wc; rmpp_recv->cur_seg_buf = &mad_recv_wc->recv_buf; rmpp_recv->newwin = 1; rmpp_recv->seg_num = 1; rmpp_recv->last_ack = 0; rmpp_recv->repwin = 1; mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr; rmpp_recv->tid = mad_hdr->tid; rmpp_recv->src_qp = mad_recv_wc->wc->src_qp; rmpp_recv->slid = mad_recv_wc->wc->slid; rmpp_recv->mgmt_class = mad_hdr->mgmt_class; rmpp_recv->class_version = mad_hdr->class_version; rmpp_recv->method = mad_hdr->method; return rmpp_recv; error: kfree(rmpp_recv); return NULL; } static struct mad_rmpp_recv * find_rmpp_recv(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc) { struct mad_rmpp_recv *rmpp_recv; struct ib_mad_hdr *mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr; list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) { if (rmpp_recv->tid == mad_hdr->tid && rmpp_recv->src_qp == mad_recv_wc->wc->src_qp && rmpp_recv->slid == mad_recv_wc->wc->slid && rmpp_recv->mgmt_class == mad_hdr->mgmt_class && rmpp_recv->class_version == mad_hdr->class_version && rmpp_recv->method == mad_hdr->method) return rmpp_recv; } return NULL; } static struct mad_rmpp_recv * acquire_rmpp_recv(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc) { struct mad_rmpp_recv *rmpp_recv; unsigned long flags; spin_lock_irqsave(&agent->lock, flags); rmpp_recv = find_rmpp_recv(agent, mad_recv_wc); if (rmpp_recv) atomic_inc(&rmpp_recv->refcount); spin_unlock_irqrestore(&agent->lock, flags); return rmpp_recv; } static struct mad_rmpp_recv * insert_rmpp_recv(struct ib_mad_agent_private *agent, struct mad_rmpp_recv *rmpp_recv) { struct mad_rmpp_recv *cur_rmpp_recv; cur_rmpp_recv = find_rmpp_recv(agent, rmpp_recv->rmpp_wc); if (!cur_rmpp_recv) list_add_tail(&rmpp_recv->list, &agent->rmpp_list); return cur_rmpp_recv; } static inline int get_last_flag(struct ib_mad_recv_buf *seg) { struct ib_rmpp_mad *rmpp_mad; rmpp_mad = (struct ib_rmpp_mad *) seg->mad; return ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_LAST; } static inline int get_seg_num(struct ib_mad_recv_buf *seg) { struct ib_rmpp_mad *rmpp_mad; rmpp_mad = (struct ib_rmpp_mad *) seg->mad; return be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num); } static inline struct ib_mad_recv_buf * get_next_seg(struct list_head *rmpp_list, struct ib_mad_recv_buf *seg) { if (seg->list.next == rmpp_list) return NULL; return container_of(seg->list.next, struct ib_mad_recv_buf, list); } static inline int window_size(struct ib_mad_agent_private *agent) { return max(agent->qp_info->recv_queue.max_active >> 3, 1); } static struct ib_mad_recv_buf * find_seg_location(struct list_head *rmpp_list, int seg_num) { struct ib_mad_recv_buf *seg_buf; int cur_seg_num; list_for_each_entry_reverse(seg_buf, rmpp_list, list) { cur_seg_num = get_seg_num(seg_buf); if (seg_num > cur_seg_num) return seg_buf; if (seg_num == cur_seg_num) break; } return NULL; } static void update_seg_num(struct mad_rmpp_recv *rmpp_recv, struct ib_mad_recv_buf *new_buf) { struct list_head *rmpp_list = &rmpp_recv->rmpp_wc->rmpp_list; while (new_buf && (get_seg_num(new_buf) == rmpp_recv->seg_num + 1)) { rmpp_recv->cur_seg_buf = new_buf; rmpp_recv->seg_num++; new_buf = get_next_seg(rmpp_list, new_buf); } } static inline int get_mad_len(struct mad_rmpp_recv *rmpp_recv) { struct ib_rmpp_mad *rmpp_mad; int hdr_size, data_size, pad; rmpp_mad = (struct ib_rmpp_mad *)rmpp_recv->cur_seg_buf->mad; hdr_size = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class); data_size = sizeof(struct ib_rmpp_mad) - hdr_size; pad = IB_MGMT_RMPP_DATA - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin); if (pad > IB_MGMT_RMPP_DATA || pad < 0) pad = 0; return hdr_size + rmpp_recv->seg_num * data_size - pad; } static struct ib_mad_recv_wc * complete_rmpp(struct mad_rmpp_recv *rmpp_recv) { struct ib_mad_recv_wc *rmpp_wc; ack_recv(rmpp_recv, rmpp_recv->rmpp_wc); if (rmpp_recv->seg_num > 1) cancel_delayed_work(&rmpp_recv->timeout_work); rmpp_wc = rmpp_recv->rmpp_wc; rmpp_wc->mad_len = get_mad_len(rmpp_recv); /* 10 seconds until we can find the packet lifetime */ queue_delayed_work(rmpp_recv->agent->qp_info->port_priv->wq, &rmpp_recv->cleanup_work, msecs_to_jiffies(10000)); return rmpp_wc; } static struct ib_mad_recv_wc * continue_rmpp(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc) { struct mad_rmpp_recv *rmpp_recv; struct ib_mad_recv_buf *prev_buf; struct ib_mad_recv_wc *done_wc; int seg_num; unsigned long flags; rmpp_recv = acquire_rmpp_recv(agent, mad_recv_wc); if (!rmpp_recv) goto drop1; seg_num = get_seg_num(&mad_recv_wc->recv_buf); spin_lock_irqsave(&rmpp_recv->lock, flags); if ((rmpp_recv->state == RMPP_STATE_TIMEOUT) || (seg_num > rmpp_recv->newwin)) goto drop3; if ((seg_num <= rmpp_recv->last_ack) || (rmpp_recv->state == RMPP_STATE_COMPLETE)) { spin_unlock_irqrestore(&rmpp_recv->lock, flags); ack_recv(rmpp_recv, mad_recv_wc); goto drop2; } prev_buf = find_seg_location(&rmpp_recv->rmpp_wc->rmpp_list, seg_num); if (!prev_buf) goto drop3; done_wc = NULL; list_add(&mad_recv_wc->recv_buf.list, &prev_buf->list); if (rmpp_recv->cur_seg_buf == prev_buf) { update_seg_num(rmpp_recv, &mad_recv_wc->recv_buf); if (get_last_flag(rmpp_recv->cur_seg_buf)) { rmpp_recv->state = RMPP_STATE_COMPLETE; spin_unlock_irqrestore(&rmpp_recv->lock, flags); done_wc = complete_rmpp(rmpp_recv); goto out; } else if (rmpp_recv->seg_num == rmpp_recv->newwin) { rmpp_recv->newwin += window_size(agent); spin_unlock_irqrestore(&rmpp_recv->lock, flags); ack_recv(rmpp_recv, mad_recv_wc); goto out; } } spin_unlock_irqrestore(&rmpp_recv->lock, flags); out: deref_rmpp_recv(rmpp_recv); return done_wc; drop3: spin_unlock_irqrestore(&rmpp_recv->lock, flags); drop2: deref_rmpp_recv(rmpp_recv); drop1: ib_free_recv_mad(mad_recv_wc); return NULL; } static struct ib_mad_recv_wc * start_rmpp(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc) { struct mad_rmpp_recv *rmpp_recv; unsigned long flags; rmpp_recv = create_rmpp_recv(agent, mad_recv_wc); if (!rmpp_recv) { ib_free_recv_mad(mad_recv_wc); return NULL; } spin_lock_irqsave(&agent->lock, flags); if (insert_rmpp_recv(agent, rmpp_recv)) { spin_unlock_irqrestore(&agent->lock, flags); /* duplicate first MAD */ destroy_rmpp_recv(rmpp_recv); return continue_rmpp(agent, mad_recv_wc); } atomic_inc(&rmpp_recv->refcount); if (get_last_flag(&mad_recv_wc->recv_buf)) { rmpp_recv->state = RMPP_STATE_COMPLETE; spin_unlock_irqrestore(&agent->lock, flags); complete_rmpp(rmpp_recv); } else { spin_unlock_irqrestore(&agent->lock, flags); /* 40 seconds until we can find the packet lifetimes */ queue_delayed_work(agent->qp_info->port_priv->wq, &rmpp_recv->timeout_work, msecs_to_jiffies(40000)); rmpp_recv->newwin += window_size(agent); ack_recv(rmpp_recv, mad_recv_wc); mad_recv_wc = NULL; } deref_rmpp_recv(rmpp_recv); return mad_recv_wc; } static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr) { struct ib_rmpp_mad *rmpp_mad; int timeout; u32 paylen = 0; rmpp_mad = mad_send_wr->send_buf.mad; ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); rmpp_mad->rmpp_hdr.seg_num = cpu_to_be32(++mad_send_wr->seg_num); if (mad_send_wr->seg_num == 1) { rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_FIRST; paylen = mad_send_wr->send_buf.seg_count * IB_MGMT_RMPP_DATA - mad_send_wr->pad; } if (mad_send_wr->seg_num == mad_send_wr->send_buf.seg_count) { rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_LAST; paylen = IB_MGMT_RMPP_DATA - mad_send_wr->pad; } rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(paylen); /* 2 seconds for an ACK until we can find the packet lifetime */ timeout = mad_send_wr->send_buf.timeout_ms; if (!timeout || timeout > 2000) mad_send_wr->timeout = msecs_to_jiffies(2000); return ib_send_mad(mad_send_wr); } static void abort_send(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc, u8 rmpp_status) { struct ib_mad_send_wr_private *mad_send_wr; struct ib_mad_send_wc wc; unsigned long flags; spin_lock_irqsave(&agent->lock, flags); mad_send_wr = ib_find_send_mad(agent, mad_recv_wc); if (!mad_send_wr) goto out; /* Unmatched send */ if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) || (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS)) goto out; /* Send is already done */ ib_mark_mad_done(mad_send_wr); spin_unlock_irqrestore(&agent->lock, flags); wc.status = IB_WC_REM_ABORT_ERR; wc.vendor_err = rmpp_status; wc.send_buf = &mad_send_wr->send_buf; ib_mad_complete_send_wr(mad_send_wr, &wc); return; out: spin_unlock_irqrestore(&agent->lock, flags); } static inline void adjust_last_ack(struct ib_mad_send_wr_private *wr, int seg_num) { struct list_head *list; wr->last_ack = seg_num; list = &wr->last_ack_seg->list; list_for_each_entry(wr->last_ack_seg, list, list) if (wr->last_ack_seg->num == seg_num) break; } static void process_ds_ack(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc, int newwin) { struct mad_rmpp_recv *rmpp_recv; rmpp_recv = find_rmpp_recv(agent, mad_recv_wc); if (rmpp_recv && rmpp_recv->state == RMPP_STATE_COMPLETE) rmpp_recv->repwin = newwin; } static void process_rmpp_ack(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc) { struct ib_mad_send_wr_private *mad_send_wr; struct ib_rmpp_mad *rmpp_mad; unsigned long flags; int seg_num, newwin, ret; rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad; if (rmpp_mad->rmpp_hdr.rmpp_status) { abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); return; } seg_num = be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num); newwin = be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin); if (newwin < seg_num) { abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S); nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S); return; } spin_lock_irqsave(&agent->lock, flags); mad_send_wr = ib_find_send_mad(agent, mad_recv_wc); if (!mad_send_wr) { if (!seg_num) process_ds_ack(agent, mad_recv_wc, newwin); goto out; /* Unmatched or DS RMPP ACK */ } if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) && (mad_send_wr->timeout)) { spin_unlock_irqrestore(&agent->lock, flags); ack_ds_ack(agent, mad_recv_wc); return; /* Repeated ACK for DS RMPP transaction */ } if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) || (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS)) goto out; /* Send is already done */ if (seg_num > mad_send_wr->send_buf.seg_count || seg_num > mad_send_wr->newwin) { spin_unlock_irqrestore(&agent->lock, flags); abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B); nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B); return; } if (newwin < mad_send_wr->newwin || seg_num < mad_send_wr->last_ack) goto out; /* Old ACK */ if (seg_num > mad_send_wr->last_ack) { adjust_last_ack(mad_send_wr, seg_num); mad_send_wr->retries_left = mad_send_wr->max_retries; } mad_send_wr->newwin = newwin; if (mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) { /* If no response is expected, the ACK completes the send */ if (!mad_send_wr->send_buf.timeout_ms) { struct ib_mad_send_wc wc; ib_mark_mad_done(mad_send_wr); spin_unlock_irqrestore(&agent->lock, flags); wc.status = IB_WC_SUCCESS; wc.vendor_err = 0; wc.send_buf = &mad_send_wr->send_buf; ib_mad_complete_send_wr(mad_send_wr, &wc); return; } if (mad_send_wr->refcount == 1) ib_reset_mad_timeout(mad_send_wr, mad_send_wr->send_buf.timeout_ms); spin_unlock_irqrestore(&agent->lock, flags); ack_ds_ack(agent, mad_recv_wc); return; } else if (mad_send_wr->refcount == 1 && mad_send_wr->seg_num < mad_send_wr->newwin && mad_send_wr->seg_num < mad_send_wr->send_buf.seg_count) { /* Send failure will just result in a timeout/retry */ ret = send_next_seg(mad_send_wr); if (ret) goto out; mad_send_wr->refcount++; list_move_tail(&mad_send_wr->agent_list, &mad_send_wr->mad_agent_priv->send_list); } out: spin_unlock_irqrestore(&agent->lock, flags); } static struct ib_mad_recv_wc * process_rmpp_data(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc) { struct ib_rmpp_hdr *rmpp_hdr; u8 rmpp_status; rmpp_hdr = &((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr; if (rmpp_hdr->rmpp_status) { rmpp_status = IB_MGMT_RMPP_STATUS_BAD_STATUS; goto bad; } if (rmpp_hdr->seg_num == cpu_to_be32(1)) { if (!(ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST)) { rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG; goto bad; } return start_rmpp(agent, mad_recv_wc); } else { if (ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST) { rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG; goto bad; } return continue_rmpp(agent, mad_recv_wc); } bad: nack_recv(agent, mad_recv_wc, rmpp_status); ib_free_recv_mad(mad_recv_wc); return NULL; } static void process_rmpp_stop(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc) { struct ib_rmpp_mad *rmpp_mad; rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad; if (rmpp_mad->rmpp_hdr.rmpp_status != IB_MGMT_RMPP_STATUS_RESX) { abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); } else abort_send(agent, mad_recv_wc, rmpp_mad->rmpp_hdr.rmpp_status); } static void process_rmpp_abort(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc) { struct ib_rmpp_mad *rmpp_mad; rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad; if (rmpp_mad->rmpp_hdr.rmpp_status < IB_MGMT_RMPP_STATUS_ABORT_MIN || rmpp_mad->rmpp_hdr.rmpp_status > IB_MGMT_RMPP_STATUS_ABORT_MAX) { abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); } else abort_send(agent, mad_recv_wc, rmpp_mad->rmpp_hdr.rmpp_status); } struct ib_mad_recv_wc * ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc) { struct ib_rmpp_mad *rmpp_mad; rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad; if (!(rmpp_mad->rmpp_hdr.rmpp_rtime_flags & IB_MGMT_RMPP_FLAG_ACTIVE)) return mad_recv_wc; if (rmpp_mad->rmpp_hdr.rmpp_version != IB_MGMT_RMPP_VERSION) { abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV); nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV); goto out; } switch (rmpp_mad->rmpp_hdr.rmpp_type) { case IB_MGMT_RMPP_TYPE_DATA: return process_rmpp_data(agent, mad_recv_wc); case IB_MGMT_RMPP_TYPE_ACK: process_rmpp_ack(agent, mad_recv_wc); break; case IB_MGMT_RMPP_TYPE_STOP: process_rmpp_stop(agent, mad_recv_wc); break; case IB_MGMT_RMPP_TYPE_ABORT: process_rmpp_abort(agent, mad_recv_wc); break; default: abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT); nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT); break; } out: ib_free_recv_mad(mad_recv_wc); return NULL; } static int init_newwin(struct ib_mad_send_wr_private *mad_send_wr) { struct ib_mad_agent_private *agent = mad_send_wr->mad_agent_priv; struct ib_mad_hdr *mad_hdr = mad_send_wr->send_buf.mad; struct mad_rmpp_recv *rmpp_recv; struct ib_ah_attr ah_attr; unsigned long flags; int newwin = 1; if (!(mad_hdr->method & IB_MGMT_METHOD_RESP)) goto out; spin_lock_irqsave(&agent->lock, flags); list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) { if (rmpp_recv->tid != mad_hdr->tid || rmpp_recv->mgmt_class != mad_hdr->mgmt_class || rmpp_recv->class_version != mad_hdr->class_version || (rmpp_recv->method & IB_MGMT_METHOD_RESP)) continue; if (ib_query_ah(mad_send_wr->send_buf.ah, &ah_attr)) continue; if (rmpp_recv->slid == ah_attr.dlid) { newwin = rmpp_recv->repwin; break; } } spin_unlock_irqrestore(&agent->lock, flags); out: return newwin; } int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr) { struct ib_rmpp_mad *rmpp_mad; int ret; rmpp_mad = mad_send_wr->send_buf.mad; if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE)) return IB_RMPP_RESULT_UNHANDLED; if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA) { mad_send_wr->seg_num = 1; return IB_RMPP_RESULT_INTERNAL; } mad_send_wr->newwin = init_newwin(mad_send_wr); /* We need to wait for the final ACK even if there isn't a response */ mad_send_wr->refcount += (mad_send_wr->timeout == 0); ret = send_next_seg(mad_send_wr); if (!ret) return IB_RMPP_RESULT_CONSUMED; return ret; } int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr, struct ib_mad_send_wc *mad_send_wc) { struct ib_rmpp_mad *rmpp_mad; int ret; rmpp_mad = mad_send_wr->send_buf.mad; if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE)) return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */ if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA) return IB_RMPP_RESULT_INTERNAL; /* ACK, STOP, or ABORT */ if (mad_send_wc->status != IB_WC_SUCCESS || mad_send_wr->status != IB_WC_SUCCESS) return IB_RMPP_RESULT_PROCESSED; /* Canceled or send error */ if (!mad_send_wr->timeout) return IB_RMPP_RESULT_PROCESSED; /* Response received */ if (mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) { mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms); return IB_RMPP_RESULT_PROCESSED; /* Send done */ } if (mad_send_wr->seg_num == mad_send_wr->newwin || mad_send_wr->seg_num == mad_send_wr->send_buf.seg_count) return IB_RMPP_RESULT_PROCESSED; /* Wait for ACK */ ret = send_next_seg(mad_send_wr); if (ret) { mad_send_wc->status = IB_WC_GENERAL_ERR; return IB_RMPP_RESULT_PROCESSED; } return IB_RMPP_RESULT_CONSUMED; } int ib_retry_rmpp(struct ib_mad_send_wr_private *mad_send_wr) { struct ib_rmpp_mad *rmpp_mad; int ret; rmpp_mad = mad_send_wr->send_buf.mad; if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE)) return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */ if (mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) return IB_RMPP_RESULT_PROCESSED; mad_send_wr->seg_num = mad_send_wr->last_ack; mad_send_wr->cur_seg = mad_send_wr->last_ack_seg; ret = send_next_seg(mad_send_wr); if (ret) return IB_RMPP_RESULT_PROCESSED; return IB_RMPP_RESULT_CONSUMED; }
gpl-2.0
zrafa/linuxkernel
linux-2.6.17.new/net/ipv4/ipvs/ip_vs_wlc.c
143
3961
/* * IPVS: Weighted Least-Connection Scheduling module * * Version: $Id: ip_vs_wlc.c,v 1.13 2003/04/18 09:03:16 wensong Exp $ * * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> * Peter Kese <peter.kese@ijs.si> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Changes: * Wensong Zhang : changed the ip_vs_wlc_schedule to return dest * Wensong Zhang : changed to use the inactconns in scheduling * Wensong Zhang : changed some comestics things for debugging * Wensong Zhang : changed for the d-linked destination list * Wensong Zhang : added the ip_vs_wlc_update_svc * Wensong Zhang : added any dest with weight=0 is quiesced * */ #include <linux/module.h> #include <linux/kernel.h> #include <net/ip_vs.h> static int ip_vs_wlc_init_svc(struct ip_vs_service *svc) { return 0; } static int ip_vs_wlc_done_svc(struct ip_vs_service *svc) { return 0; } static int ip_vs_wlc_update_svc(struct ip_vs_service *svc) { return 0; } static inline unsigned int ip_vs_wlc_dest_overhead(struct ip_vs_dest *dest) { /* * We think the overhead of processing active connections is 256 * times higher than that of inactive connections in average. (This * 256 times might not be accurate, we will change it later) We * use the following formula to estimate the overhead now: * dest->activeconns*256 + dest->inactconns */ return (atomic_read(&dest->activeconns) << 8) + atomic_read(&dest->inactconns); } /* * Weighted Least Connection scheduling */ static struct ip_vs_dest * ip_vs_wlc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) { struct ip_vs_dest *dest, *least; unsigned int loh, doh; IP_VS_DBG(6, "ip_vs_wlc_schedule(): Scheduling...\n"); /* * We calculate the load of each dest server as follows: * (dest overhead) / dest->weight * * Remember -- no floats in kernel mode!!! * The comparison of h1*w2 > h2*w1 is equivalent to that of * h1/w1 > h2/w2 * if every weight is larger than zero. * * The server with weight=0 is quiesced and will not receive any * new connections. */ list_for_each_entry(dest, &svc->destinations, n_list) { if (!(dest->flags & IP_VS_DEST_F_OVERLOAD) && atomic_read(&dest->weight) > 0) { least = dest; loh = ip_vs_wlc_dest_overhead(least); goto nextstage; } } return NULL; /* * Find the destination with the least load. */ nextstage: list_for_each_entry_continue(dest, &svc->destinations, n_list) { if (dest->flags & IP_VS_DEST_F_OVERLOAD) continue; doh = ip_vs_wlc_dest_overhead(dest); if (loh * atomic_read(&dest->weight) > doh * atomic_read(&least->weight)) { least = dest; loh = doh; } } IP_VS_DBG(6, "WLC: server %u.%u.%u.%u:%u " "activeconns %d refcnt %d weight %d overhead %d\n", NIPQUAD(least->addr), ntohs(least->port), atomic_read(&least->activeconns), atomic_read(&least->refcnt), atomic_read(&least->weight), loh); return least; } static struct ip_vs_scheduler ip_vs_wlc_scheduler = { .name = "wlc", .refcnt = ATOMIC_INIT(0), .module = THIS_MODULE, .init_service = ip_vs_wlc_init_svc, .done_service = ip_vs_wlc_done_svc, .update_service = ip_vs_wlc_update_svc, .schedule = ip_vs_wlc_schedule, }; static int __init ip_vs_wlc_init(void) { INIT_LIST_HEAD(&ip_vs_wlc_scheduler.n_list); return register_ip_vs_scheduler(&ip_vs_wlc_scheduler); } static void __exit ip_vs_wlc_cleanup(void) { unregister_ip_vs_scheduler(&ip_vs_wlc_scheduler); } module_init(ip_vs_wlc_init); module_exit(ip_vs_wlc_cleanup); MODULE_LICENSE("GPL");
gpl-2.0
cm-a7lte/device_samsung_a7lte
drivers/gator/gator_main.c
143
40318
/** * Copyright (C) ARM Limited 2010-2014. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ /* This version must match the gator daemon version */ #define PROTOCOL_VERSION 20 static unsigned long gator_protocol_version = PROTOCOL_VERSION; #include <linux/slab.h> #include <linux/cpu.h> #include <linux/sched.h> #include <linux/irq.h> #include <linux/vmalloc.h> #include <linux/hardirq.h> #include <linux/highmem.h> #include <linux/pagemap.h> #include <linux/suspend.h> #include <linux/module.h> #include <linux/perf_event.h> #include <linux/utsname.h> #include <linux/kthread.h> #include <asm/stacktrace.h> #include <linux/uaccess.h> #include "gator.h" #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32) #error kernels prior to 2.6.32 are not supported #endif #if defined(MODULE) && !defined(CONFIG_MODULES) #error Cannot build a module against a kernel that does not support modules. To resolve, either rebuild the kernel to support modules or build gator as part of the kernel. #endif #if !defined(CONFIG_GENERIC_TRACER) && !defined(CONFIG_TRACING) #error gator requires the kernel to have CONFIG_GENERIC_TRACER or CONFIG_TRACING defined #endif #ifndef CONFIG_PROFILING #error gator requires the kernel to have CONFIG_PROFILING defined #endif #ifndef CONFIG_HIGH_RES_TIMERS #error gator requires the kernel to have CONFIG_HIGH_RES_TIMERS defined to support PC sampling #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0) && defined(__arm__) && defined(CONFIG_SMP) && !defined(CONFIG_LOCAL_TIMERS) #error gator requires the kernel to have CONFIG_LOCAL_TIMERS defined on SMP systems #endif #if (GATOR_PERF_SUPPORT) && (!(GATOR_PERF_PMU_SUPPORT)) #ifndef CONFIG_PERF_EVENTS #error gator requires the kernel to have CONFIG_PERF_EVENTS defined to support pmu hardware counters #elif !defined CONFIG_HW_PERF_EVENTS #error gator requires the kernel to have CONFIG_HW_PERF_EVENTS defined to support pmu hardware counters #endif #endif /****************************************************************************** * DEFINES ******************************************************************************/ #define SUMMARY_BUFFER_SIZE (1*1024) #define BACKTRACE_BUFFER_SIZE (128*1024) #define NAME_BUFFER_SIZE (64*1024) #define COUNTER_BUFFER_SIZE (64*1024) /* counters have the core as part of the data and the core value in the frame header may be discarded */ #define BLOCK_COUNTER_BUFFER_SIZE (128*1024) #define ANNOTATE_BUFFER_SIZE (128*1024) /* annotate counters have the core as part of the data and the core value in the frame header may be discarded */ #define SCHED_TRACE_BUFFER_SIZE (128*1024) #define IDLE_BUFFER_SIZE (32*1024) /* idle counters have the core as part of the data and the core value in the frame header may be discarded */ #define ACTIVITY_BUFFER_SIZE (128*1024) #define NO_COOKIE 0U #define UNRESOLVED_COOKIE ~0U #define FRAME_SUMMARY 1 #define FRAME_BACKTRACE 2 #define FRAME_NAME 3 #define FRAME_COUNTER 4 #define FRAME_BLOCK_COUNTER 5 #define FRAME_ANNOTATE 6 #define FRAME_SCHED_TRACE 7 #define FRAME_IDLE 9 #define FRAME_ACTIVITY 13 #define MESSAGE_END_BACKTRACE 1 /* Name Frame Messages */ #define MESSAGE_COOKIE 1 #define MESSAGE_THREAD_NAME 2 #define MESSAGE_LINK 4 /* Scheduler Trace Frame Messages */ #define MESSAGE_SCHED_SWITCH 1 #define MESSAGE_SCHED_EXIT 2 /* Idle Frame Messages */ #define MESSAGE_IDLE_ENTER 1 #define MESSAGE_IDLE_EXIT 2 /* Summary Frame Messages */ #define MESSAGE_SUMMARY 1 #define MESSAGE_CORE_NAME 3 /* Activity Frame Messages */ #define MESSAGE_SWITCH 2 #define MESSAGE_EXIT 3 #define MAXSIZE_PACK32 5 #define MAXSIZE_PACK64 10 #define FRAME_HEADER_SIZE 3 #if defined(__arm__) #define PC_REG regs->ARM_pc #elif defined(__aarch64__) #define PC_REG regs->pc #else #define PC_REG regs->ip #endif enum { SUMMARY_BUF, BACKTRACE_BUF, NAME_BUF, COUNTER_BUF, BLOCK_COUNTER_BUF, ANNOTATE_BUF, SCHED_TRACE_BUF, IDLE_BUF, ACTIVITY_BUF, NUM_GATOR_BUFS }; /****************************************************************************** * Globals ******************************************************************************/ static unsigned long gator_cpu_cores; /* Size of the largest buffer. Effectively constant, set in gator_op_create_files */ static unsigned long userspace_buffer_size; static unsigned long gator_backtrace_depth; /* How often to commit the buffers for live in nanoseconds */ static u64 gator_live_rate; static unsigned long gator_started; static u64 gator_monotonic_started; static u64 gator_sync_time; static u64 gator_hibernate_time; static unsigned long gator_buffer_opened; static unsigned long gator_timer_count; static unsigned long gator_response_type; static DEFINE_MUTEX(start_mutex); static DEFINE_MUTEX(gator_buffer_mutex); bool event_based_sampling; static DECLARE_WAIT_QUEUE_HEAD(gator_buffer_wait); static DECLARE_WAIT_QUEUE_HEAD(gator_annotate_wait); static struct timer_list gator_buffer_wake_up_timer; static bool gator_buffer_wake_run; /* Initialize semaphore unlocked to initialize memory values */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36) static DECLARE_MUTEX(gator_buffer_wake_sem); #else static DEFINE_SEMAPHORE(gator_buffer_wake_sem); #endif static struct task_struct *gator_buffer_wake_thread; static LIST_HEAD(gator_events); static DEFINE_PER_CPU(u64, last_timestamp); static bool printed_monotonic_warning; static u32 gator_cpuids[NR_CPUS]; static bool sent_core_name[NR_CPUS]; static DEFINE_PER_CPU(bool, in_scheduler_context); /****************************************************************************** * Prototypes ******************************************************************************/ static u64 gator_get_time(void); static void gator_emit_perf_time(u64 time); static void gator_op_create_files(struct super_block *sb, struct dentry *root); /* gator_buffer is protected by being per_cpu and by having IRQs * disabled when writing to it. Most marshal_* calls take care of this * except for marshal_cookie*, marshal_backtrace* and marshal_frame * where the caller is responsible for doing so. No synchronization is * needed with the backtrace buffer as it is per cpu and is only used * from the hrtimer. The annotate_lock must be held when using the * annotation buffer as it is not per cpu. collect_counters which is * the sole writer to the block counter frame is additionally * protected by the per cpu collecting flag. */ /* Size of the buffer, must be a power of 2. Effectively constant, set in gator_op_setup. */ static uint32_t gator_buffer_size[NUM_GATOR_BUFS]; /* gator_buffer_size - 1, bitwise and with pos to get offset into the array. Effectively constant, set in gator_op_setup. */ static uint32_t gator_buffer_mask[NUM_GATOR_BUFS]; /* Read position in the buffer. Initialized to zero in gator_op_setup and incremented after bytes are read by userspace in userspace_buffer_read */ static DEFINE_PER_CPU(int[NUM_GATOR_BUFS], gator_buffer_read); /* Write position in the buffer. Initialized to zero in gator_op_setup and incremented after bytes are written to the buffer */ static DEFINE_PER_CPU(int[NUM_GATOR_BUFS], gator_buffer_write); /* Commit position in the buffer. Initialized to zero in gator_op_setup and incremented after a frame is ready to be read by userspace */ static DEFINE_PER_CPU(int[NUM_GATOR_BUFS], gator_buffer_commit); /* If set to false, decreases the number of bytes returned by * buffer_bytes_available. Set in buffer_check_space if no space is * remaining. Initialized to true in gator_op_setup. This means that * if we run out of space, continue to report that no space is * available until bytes are read by userspace */ static DEFINE_PER_CPU(int[NUM_GATOR_BUFS], buffer_space_available); /* The buffer. Allocated in gator_op_setup */ static DEFINE_PER_CPU(char *[NUM_GATOR_BUFS], gator_buffer); /* The time after which the buffer should be committed for live display */ static DEFINE_PER_CPU(u64, gator_buffer_commit_time); /* List of all gator events - new events must be added to this list */ #define GATOR_EVENTS_LIST \ GATOR_EVENT(gator_events_armv6_init) \ GATOR_EVENT(gator_events_armv7_init) \ GATOR_EVENT(gator_events_block_init) \ GATOR_EVENT(gator_events_ccn504_init) \ GATOR_EVENT(gator_events_irq_init) \ GATOR_EVENT(gator_events_l2c310_init) \ GATOR_EVENT(gator_events_mali_init) \ GATOR_EVENT(gator_events_mali_midgard_hw_init) \ GATOR_EVENT(gator_events_mali_midgard_init) \ GATOR_EVENT(gator_events_meminfo_init) \ GATOR_EVENT(gator_events_mmapped_init) \ GATOR_EVENT(gator_events_net_init) \ GATOR_EVENT(gator_events_perf_pmu_init) \ GATOR_EVENT(gator_events_sched_init) \ GATOR_EVENT(gator_events_scorpion_init) \ #define GATOR_EVENT(EVENT_INIT) __weak int EVENT_INIT(void); GATOR_EVENTS_LIST #undef GATOR_EVENT static int (*gator_events_list[])(void) = { #define GATOR_EVENT(EVENT_INIT) EVENT_INIT, GATOR_EVENTS_LIST #undef GATOR_EVENT }; /****************************************************************************** * Application Includes ******************************************************************************/ #include "gator_fs.c" #include "gator_buffer_write.c" #include "gator_buffer.c" #include "gator_marshaling.c" #include "gator_hrtimer_gator.c" #include "gator_cookies.c" #include "gator_annotate.c" #include "gator_trace_sched.c" #include "gator_trace_power.c" #include "gator_trace_gpu.c" #include "gator_backtrace.c" /****************************************************************************** * Misc ******************************************************************************/ static const struct gator_cpu gator_cpus[] = { { .cpuid = ARM1136, .core_name = "ARM1136", .pmnc_name = "ARM_ARM11", .dt_name = "arm,arm1136", .pmnc_counters = 3, }, { .cpuid = ARM1156, .core_name = "ARM1156", .pmnc_name = "ARM_ARM11", .dt_name = "arm,arm1156", .pmnc_counters = 3, }, { .cpuid = ARM1176, .core_name = "ARM1176", .pmnc_name = "ARM_ARM11", .dt_name = "arm,arm1176", .pmnc_counters = 3, }, { .cpuid = ARM11MPCORE, .core_name = "ARM11MPCore", .pmnc_name = "ARM_ARM11MPCore", .dt_name = "arm,arm11mpcore", .pmnc_counters = 3, }, { .cpuid = CORTEX_A5, .core_name = "Cortex-A5", .pmnc_name = "ARMv7_Cortex_A5", .dt_name = "arm,cortex-a5", .pmnc_counters = 2, }, { .cpuid = CORTEX_A7, .core_name = "Cortex-A7", .pmnc_name = "ARMv7_Cortex_A7", .dt_name = "arm,cortex-a7", .pmnc_counters = 4, }, { .cpuid = CORTEX_A8, .core_name = "Cortex-A8", .pmnc_name = "ARMv7_Cortex_A8", .dt_name = "arm,cortex-a8", .pmnc_counters = 4, }, { .cpuid = CORTEX_A9, .core_name = "Cortex-A9", .pmnc_name = "ARMv7_Cortex_A9", .dt_name = "arm,cortex-a9", .pmnc_counters = 6, }, { .cpuid = CORTEX_A15, .core_name = "Cortex-A15", .pmnc_name = "ARMv7_Cortex_A15", .dt_name = "arm,cortex-a15", .pmnc_counters = 6, }, { .cpuid = CORTEX_A17, .core_name = "Cortex-A17", .pmnc_name = "ARMv7_Cortex_A17", .dt_name = "arm,cortex-a17", .pmnc_counters = 6, }, { .cpuid = SCORPION, .core_name = "Scorpion", .pmnc_name = "Scorpion", .pmnc_counters = 4, }, { .cpuid = SCORPIONMP, .core_name = "ScorpionMP", .pmnc_name = "ScorpionMP", .pmnc_counters = 4, }, { .cpuid = KRAITSIM, .core_name = "KraitSIM", .pmnc_name = "Krait", .pmnc_counters = 4, }, { .cpuid = KRAIT, .core_name = "Krait", .pmnc_name = "Krait", .pmnc_counters = 4, }, { .cpuid = KRAIT_S4_PRO, .core_name = "Krait S4 Pro", .pmnc_name = "Krait", .pmnc_counters = 4, }, { .cpuid = CORTEX_A53, .core_name = "Cortex-A53", .pmnc_name = "ARM_Cortex-A53", .dt_name = "arm,cortex-a53", .pmnc_counters = 6, }, { .cpuid = CORTEX_A57, .core_name = "Cortex-A57", .pmnc_name = "ARM_Cortex-A57", .dt_name = "arm,cortex-a57", .pmnc_counters = 6, }, { .cpuid = AARCH64, .core_name = "AArch64", .pmnc_name = "ARM_AArch64", .pmnc_counters = 6, }, { .cpuid = OTHER, .core_name = "Other", .pmnc_name = "Other", .pmnc_counters = 6, }, {} }; const struct gator_cpu *gator_find_cpu_by_cpuid(const u32 cpuid) { int i; for (i = 0; gator_cpus[i].cpuid != 0; ++i) { const struct gator_cpu *const gator_cpu = &gator_cpus[i]; if (gator_cpu->cpuid == cpuid) return gator_cpu; } return NULL; } static const char OLD_PMU_PREFIX[] = "ARMv7 Cortex-"; static const char NEW_PMU_PREFIX[] = "ARMv7_Cortex_"; const struct gator_cpu *gator_find_cpu_by_pmu_name(const char *const name) { int i; for (i = 0; gator_cpus[i].cpuid != 0; ++i) { const struct gator_cpu *const gator_cpu = &gator_cpus[i]; if (gator_cpu->pmnc_name != NULL && /* Do the names match exactly? */ (strcasecmp(gator_cpu->pmnc_name, name) == 0 || /* Do these names match but have the old vs new prefix? */ ((strncasecmp(name, OLD_PMU_PREFIX, sizeof(OLD_PMU_PREFIX) - 1) == 0 && strncasecmp(gator_cpu->pmnc_name, NEW_PMU_PREFIX, sizeof(NEW_PMU_PREFIX) - 1) == 0 && strcasecmp(name + sizeof(OLD_PMU_PREFIX) - 1, gator_cpu->pmnc_name + sizeof(NEW_PMU_PREFIX) - 1) == 0)))) return gator_cpu; } return NULL; } u32 gator_cpuid(void) { #if defined(__arm__) || defined(__aarch64__) u32 val; #if !defined(__aarch64__) asm volatile("mrc p15, 0, %0, c0, c0, 0" : "=r" (val)); #else asm volatile("mrs %0, midr_el1" : "=r" (val)); #endif return (val >> 4) & 0xfff; #else return OTHER; #endif } static void gator_buffer_wake_up(unsigned long data) { wake_up(&gator_buffer_wait); } static int gator_buffer_wake_func(void *data) { for (;;) { if (down_killable(&gator_buffer_wake_sem)) break; /* Eat up any pending events */ while (!down_trylock(&gator_buffer_wake_sem)) ; if (!gator_buffer_wake_run) break; gator_buffer_wake_up(0); } return 0; } /****************************************************************************** * Commit interface ******************************************************************************/ static bool buffer_commit_ready(int *cpu, int *buftype) { int cpu_x, x; for_each_present_cpu(cpu_x) { for (x = 0; x < NUM_GATOR_BUFS; x++) if (per_cpu(gator_buffer_commit, cpu_x)[x] != per_cpu(gator_buffer_read, cpu_x)[x]) { *cpu = cpu_x; *buftype = x; return true; } } *cpu = -1; *buftype = -1; return false; } /****************************************************************************** * hrtimer interrupt processing ******************************************************************************/ static void gator_timer_interrupt(void) { struct pt_regs *const regs = get_irq_regs(); gator_backtrace_handler(regs); } void gator_backtrace_handler(struct pt_regs *const regs) { u64 time = gator_get_time(); int cpu = get_physical_cpu(); /* Output backtrace */ gator_add_sample(cpu, regs, time); /* Collect counters */ if (!per_cpu(collecting, cpu)) collect_counters(time, current, false); /* No buffer flushing occurs during sched switch for RT-Preempt full. The block counter frame will be flushed by collect_counters, but the sched buffer needs to be explicitly flushed */ #ifdef CONFIG_PREEMPT_RT_FULL buffer_check(cpu, SCHED_TRACE_BUF, time); #endif } static int gator_running; /* This function runs in interrupt context and on the appropriate core */ static void gator_timer_offline(void *migrate) { struct gator_interface *gi; int i, len, cpu = get_physical_cpu(); int *buffer; u64 time; gator_trace_sched_offline(); gator_trace_power_offline(); if (!migrate) gator_hrtimer_offline(); /* Offline any events and output counters */ time = gator_get_time(); if (marshal_event_header(time)) { list_for_each_entry(gi, &gator_events, list) { if (gi->offline) { len = gi->offline(&buffer, migrate); marshal_event(len, buffer); } } /* Only check after writing all counters so that time and corresponding counters appear in the same frame */ buffer_check(cpu, BLOCK_COUNTER_BUF, time); } /* Flush all buffers on this core */ for (i = 0; i < NUM_GATOR_BUFS; i++) gator_commit_buffer(cpu, i, time); } /* This function runs in interrupt context and may be running on a core other than core 'cpu' */ static void gator_timer_offline_dispatch(int cpu, bool migrate) { struct gator_interface *gi; list_for_each_entry(gi, &gator_events, list) { if (gi->offline_dispatch) gi->offline_dispatch(cpu, migrate); } } static void gator_timer_stop(void) { int cpu; if (gator_running) { on_each_cpu(gator_timer_offline, NULL, 1); for_each_online_cpu(cpu) { gator_timer_offline_dispatch(lcpu_to_pcpu(cpu), false); } gator_running = 0; gator_hrtimer_shutdown(); } } static void gator_send_core_name(const int cpu, const u32 cpuid) { #if defined(__arm__) || defined(__aarch64__) if (!sent_core_name[cpu] || (cpuid != gator_cpuids[cpu])) { const struct gator_cpu *const gator_cpu = gator_find_cpu_by_cpuid(cpuid); const char *core_name = NULL; char core_name_buf[32]; /* Save off this cpuid */ gator_cpuids[cpu] = cpuid; if (gator_cpu != NULL) { core_name = gator_cpu->core_name; } else { if (cpuid == -1) snprintf(core_name_buf, sizeof(core_name_buf), "Unknown"); else snprintf(core_name_buf, sizeof(core_name_buf), "Unknown (0x%.3x)", cpuid); core_name = core_name_buf; } marshal_core_name(cpu, cpuid, core_name); sent_core_name[cpu] = true; } #endif } static void gator_read_cpuid(void *arg) { gator_cpuids[get_physical_cpu()] = gator_cpuid(); } /* This function runs in interrupt context and on the appropriate core */ static void gator_timer_online(void *migrate) { struct gator_interface *gi; int len, cpu = get_physical_cpu(); int *buffer; u64 time; /* Send what is currently running on this core */ marshal_sched_trace_switch(current->pid, 0); gator_trace_power_online(); /* online any events and output counters */ time = gator_get_time(); if (marshal_event_header(time)) { list_for_each_entry(gi, &gator_events, list) { if (gi->online) { len = gi->online(&buffer, migrate); marshal_event(len, buffer); } } /* Only check after writing all counters so that time and corresponding counters appear in the same frame */ buffer_check(cpu, BLOCK_COUNTER_BUF, time); } if (!migrate) gator_hrtimer_online(); gator_send_core_name(cpu, gator_cpuid()); } /* This function runs in interrupt context and may be running on a core other than core 'cpu' */ static void gator_timer_online_dispatch(int cpu, bool migrate) { struct gator_interface *gi; list_for_each_entry(gi, &gator_events, list) { if (gi->online_dispatch) gi->online_dispatch(cpu, migrate); } } #include "gator_iks.c" static int gator_timer_start(unsigned long sample_rate) { int cpu; if (gator_running) { pr_notice("gator: already running\n"); return 0; } gator_running = 1; /* event based sampling trumps hr timer based sampling */ if (event_based_sampling) sample_rate = 0; if (gator_hrtimer_init(sample_rate, gator_timer_interrupt) == -1) return -1; /* Send off the previously saved cpuids */ for_each_present_cpu(cpu) { preempt_disable(); gator_send_core_name(cpu, gator_cpuids[cpu]); preempt_enable(); } gator_send_iks_core_names(); for_each_online_cpu(cpu) { gator_timer_online_dispatch(lcpu_to_pcpu(cpu), false); } on_each_cpu(gator_timer_online, NULL, 1); return 0; } static u64 gator_get_time(void) { struct timespec ts; u64 timestamp; u64 prev_timestamp; u64 delta; int cpu = smp_processor_id(); /* Match clock_gettime(CLOCK_MONOTONIC_RAW, &ts) from userspace */ getrawmonotonic(&ts); timestamp = timespec_to_ns(&ts); /* getrawmonotonic is not monotonic on all systems. Detect and * attempt to correct these cases. up to 0.5ms delta has been seen * on some systems, which can skew Streamline data when viewing at * high resolution. This doesn't work well with interrupts, but that * it's OK - the real concern is to catch big jumps in time */ prev_timestamp = per_cpu(last_timestamp, cpu); if (prev_timestamp <= timestamp) { per_cpu(last_timestamp, cpu) = timestamp; } else { delta = prev_timestamp - timestamp; /* Log the error once */ if (!printed_monotonic_warning && delta > 500000) { pr_err("%s: getrawmonotonic is not monotonic cpu: %i delta: %lli\nSkew in Streamline data may be present at the fine zoom levels\n", __func__, cpu, delta); printed_monotonic_warning = true; } timestamp = prev_timestamp; } return timestamp - gator_monotonic_started; } static void gator_emit_perf_time(u64 time) { #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0) if (time >= gator_sync_time) { int cpu = get_physical_cpu(); marshal_event_single64(0, -1, local_clock()); gator_sync_time += NSEC_PER_SEC; gator_commit_buffer(cpu, COUNTER_BUF, time); } #endif } /****************************************************************************** * cpu hotplug and pm notifiers ******************************************************************************/ static int __cpuinit gator_hotcpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { int cpu = lcpu_to_pcpu((long)hcpu); switch (action) { case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE_FROZEN: smp_call_function_single(cpu, gator_timer_offline, NULL, 1); gator_timer_offline_dispatch(cpu, false); break; case CPU_ONLINE: case CPU_ONLINE_FROZEN: gator_timer_online_dispatch(cpu, false); smp_call_function_single(cpu, gator_timer_online, NULL, 1); break; } return NOTIFY_OK; } static struct notifier_block __refdata gator_hotcpu_notifier = { .notifier_call = gator_hotcpu_notify, }; /* n.b. calling "on_each_cpu" only runs on those that are online. * Registered linux events are not disabled, so their counters will * continue to collect */ static int gator_pm_notify(struct notifier_block *nb, unsigned long event, void *dummy) { int cpu; struct timespec ts; switch (event) { case PM_HIBERNATION_PREPARE: case PM_SUSPEND_PREPARE: unregister_hotcpu_notifier(&gator_hotcpu_notifier); unregister_scheduler_tracepoints(); on_each_cpu(gator_timer_offline, NULL, 1); for_each_online_cpu(cpu) { gator_timer_offline_dispatch(lcpu_to_pcpu(cpu), false); } /* Record the wallclock hibernate time */ getnstimeofday(&ts); gator_hibernate_time = timespec_to_ns(&ts) - gator_get_time(); break; case PM_POST_HIBERNATION: case PM_POST_SUSPEND: /* Adjust gator_monotonic_started for the time spent sleeping, as gator_get_time does not account for it */ if (gator_hibernate_time > 0) { getnstimeofday(&ts); gator_monotonic_started += gator_hibernate_time + gator_get_time() - timespec_to_ns(&ts); gator_hibernate_time = 0; } for_each_online_cpu(cpu) { gator_timer_online_dispatch(lcpu_to_pcpu(cpu), false); } on_each_cpu(gator_timer_online, NULL, 1); register_scheduler_tracepoints(); register_hotcpu_notifier(&gator_hotcpu_notifier); break; } return NOTIFY_OK; } static struct notifier_block gator_pm_notifier = { .notifier_call = gator_pm_notify, }; static int gator_notifier_start(void) { int retval; retval = register_hotcpu_notifier(&gator_hotcpu_notifier); if (retval == 0) retval = register_pm_notifier(&gator_pm_notifier); return retval; } static void gator_notifier_stop(void) { unregister_pm_notifier(&gator_pm_notifier); unregister_hotcpu_notifier(&gator_hotcpu_notifier); } /****************************************************************************** * Main ******************************************************************************/ static void gator_summary(void) { u64 timestamp, uptime; struct timespec ts; char uname_buf[512]; snprintf(uname_buf, sizeof(uname_buf), "%s %s %s %s %s GNU/Linux", utsname()->sysname, utsname()->nodename, utsname()->release, utsname()->version, utsname()->machine); getnstimeofday(&ts); timestamp = timespec_to_ns(&ts); /* Similar to reading /proc/uptime from fs/proc/uptime.c, calculate uptime */ #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0) { void (*m2b)(struct timespec *ts); do_posix_clock_monotonic_gettime(&ts); /* monotonic_to_bootbased is not defined for some versions of Android */ m2b = symbol_get(monotonic_to_bootbased); if (m2b) m2b(&ts); } #else get_monotonic_boottime(&ts); #endif uptime = timespec_to_ns(&ts); /* Disable preemption as gator_get_time calls smp_processor_id to verify time is monotonic */ preempt_disable(); /* Set monotonic_started to zero as gator_get_time is uptime minus monotonic_started */ gator_monotonic_started = 0; gator_monotonic_started = gator_get_time(); marshal_summary(timestamp, uptime, gator_monotonic_started, uname_buf); gator_sync_time = 0; gator_emit_perf_time(gator_monotonic_started); preempt_enable(); } int gator_events_install(struct gator_interface *interface) { list_add_tail(&interface->list, &gator_events); return 0; } int gator_events_get_key(void) { /* key 0 is reserved as a timestamp. key 1 is reserved as the marker * for thread specific counters. key 2 is reserved as the marker for * core. Odd keys are assigned by the driver, even keys by the * daemon. */ static int key = 3; const int ret = key; key += 2; return ret; } static int gator_init(void) { int i; calc_first_cluster_size(); /* events sources */ for (i = 0; i < ARRAY_SIZE(gator_events_list); i++) if (gator_events_list[i]) gator_events_list[i](); gator_trace_sched_init(); gator_trace_power_init(); return 0; } static void gator_exit(void) { struct gator_interface *gi; list_for_each_entry(gi, &gator_events, list) if (gi->shutdown) gi->shutdown(); } static int gator_start(void) { unsigned long cpu, i; struct gator_interface *gi; gator_buffer_wake_run = true; gator_buffer_wake_thread = kthread_run(gator_buffer_wake_func, NULL, "gator_bwake"); if (IS_ERR(gator_buffer_wake_thread)) goto bwake_failure; if (gator_migrate_start()) goto migrate_failure; /* Initialize the buffer with the frame type and core */ for_each_present_cpu(cpu) { for (i = 0; i < NUM_GATOR_BUFS; i++) marshal_frame(cpu, i); per_cpu(last_timestamp, cpu) = 0; } printed_monotonic_warning = false; /* Capture the start time */ gator_summary(); /* start all events */ list_for_each_entry(gi, &gator_events, list) { if (gi->start && gi->start() != 0) { struct list_head *ptr = gi->list.prev; while (ptr != &gator_events) { gi = list_entry(ptr, struct gator_interface, list); if (gi->stop) gi->stop(); ptr = ptr->prev; } goto events_failure; } } /* cookies shall be initialized before trace_sched_start() and gator_timer_start() */ if (cookies_initialize()) goto cookies_failure; if (gator_annotate_start()) goto annotate_failure; if (gator_trace_sched_start()) goto sched_failure; if (gator_trace_power_start()) goto power_failure; if (gator_trace_gpu_start()) goto gpu_failure; if (gator_timer_start(gator_timer_count)) goto timer_failure; if (gator_notifier_start()) goto notifier_failure; return 0; notifier_failure: gator_timer_stop(); timer_failure: gator_trace_gpu_stop(); gpu_failure: gator_trace_power_stop(); power_failure: gator_trace_sched_stop(); sched_failure: gator_annotate_stop(); annotate_failure: cookies_release(); cookies_failure: /* stop all events */ list_for_each_entry(gi, &gator_events, list) if (gi->stop) gi->stop(); events_failure: gator_migrate_stop(); migrate_failure: gator_buffer_wake_run = false; up(&gator_buffer_wake_sem); gator_buffer_wake_thread = NULL; bwake_failure: return -1; } static void gator_stop(void) { struct gator_interface *gi; gator_annotate_stop(); gator_trace_sched_stop(); gator_trace_power_stop(); gator_trace_gpu_stop(); /* stop all interrupt callback reads before tearing down other interfaces */ gator_notifier_stop(); /* should be called before gator_timer_stop to avoid re-enabling the hrtimer after it has been offlined */ gator_timer_stop(); /* stop all events */ list_for_each_entry(gi, &gator_events, list) if (gi->stop) gi->stop(); gator_migrate_stop(); gator_buffer_wake_run = false; up(&gator_buffer_wake_sem); gator_buffer_wake_thread = NULL; } /****************************************************************************** * Filesystem ******************************************************************************/ /* fopen("buffer") */ static int gator_op_setup(void) { int err = 0; int cpu, i; mutex_lock(&start_mutex); gator_buffer_size[SUMMARY_BUF] = SUMMARY_BUFFER_SIZE; gator_buffer_mask[SUMMARY_BUF] = SUMMARY_BUFFER_SIZE - 1; gator_buffer_size[BACKTRACE_BUF] = BACKTRACE_BUFFER_SIZE; gator_buffer_mask[BACKTRACE_BUF] = BACKTRACE_BUFFER_SIZE - 1; gator_buffer_size[NAME_BUF] = NAME_BUFFER_SIZE; gator_buffer_mask[NAME_BUF] = NAME_BUFFER_SIZE - 1; gator_buffer_size[COUNTER_BUF] = COUNTER_BUFFER_SIZE; gator_buffer_mask[COUNTER_BUF] = COUNTER_BUFFER_SIZE - 1; gator_buffer_size[BLOCK_COUNTER_BUF] = BLOCK_COUNTER_BUFFER_SIZE; gator_buffer_mask[BLOCK_COUNTER_BUF] = BLOCK_COUNTER_BUFFER_SIZE - 1; gator_buffer_size[ANNOTATE_BUF] = ANNOTATE_BUFFER_SIZE; gator_buffer_mask[ANNOTATE_BUF] = ANNOTATE_BUFFER_SIZE - 1; gator_buffer_size[SCHED_TRACE_BUF] = SCHED_TRACE_BUFFER_SIZE; gator_buffer_mask[SCHED_TRACE_BUF] = SCHED_TRACE_BUFFER_SIZE - 1; gator_buffer_size[IDLE_BUF] = IDLE_BUFFER_SIZE; gator_buffer_mask[IDLE_BUF] = IDLE_BUFFER_SIZE - 1; gator_buffer_size[ACTIVITY_BUF] = ACTIVITY_BUFFER_SIZE; gator_buffer_mask[ACTIVITY_BUF] = ACTIVITY_BUFFER_SIZE - 1; /* Initialize percpu per buffer variables */ for (i = 0; i < NUM_GATOR_BUFS; i++) { /* Verify buffers are a power of 2 */ if (gator_buffer_size[i] & (gator_buffer_size[i] - 1)) { err = -ENOEXEC; goto setup_error; } for_each_present_cpu(cpu) { per_cpu(gator_buffer_read, cpu)[i] = 0; per_cpu(gator_buffer_write, cpu)[i] = 0; per_cpu(gator_buffer_commit, cpu)[i] = 0; per_cpu(buffer_space_available, cpu)[i] = true; per_cpu(gator_buffer_commit_time, cpu) = gator_live_rate; /* Annotation is a special case that only uses a single buffer */ if (cpu > 0 && i == ANNOTATE_BUF) { per_cpu(gator_buffer, cpu)[i] = NULL; continue; } per_cpu(gator_buffer, cpu)[i] = vmalloc(gator_buffer_size[i]); if (!per_cpu(gator_buffer, cpu)[i]) { err = -ENOMEM; goto setup_error; } } } setup_error: mutex_unlock(&start_mutex); return err; } /* Actually start profiling (echo 1>/dev/gator/enable) */ static int gator_op_start(void) { int err = 0; mutex_lock(&start_mutex); if (gator_started || gator_start()) err = -EINVAL; else gator_started = 1; mutex_unlock(&start_mutex); return err; } /* echo 0>/dev/gator/enable */ static void gator_op_stop(void) { mutex_lock(&start_mutex); if (gator_started) { gator_stop(); mutex_lock(&gator_buffer_mutex); gator_started = 0; gator_monotonic_started = 0; cookies_release(); wake_up(&gator_buffer_wait); mutex_unlock(&gator_buffer_mutex); } mutex_unlock(&start_mutex); } static void gator_shutdown(void) { int cpu, i; mutex_lock(&start_mutex); for_each_present_cpu(cpu) { mutex_lock(&gator_buffer_mutex); for (i = 0; i < NUM_GATOR_BUFS; i++) { vfree(per_cpu(gator_buffer, cpu)[i]); per_cpu(gator_buffer, cpu)[i] = NULL; per_cpu(gator_buffer_read, cpu)[i] = 0; per_cpu(gator_buffer_write, cpu)[i] = 0; per_cpu(gator_buffer_commit, cpu)[i] = 0; per_cpu(buffer_space_available, cpu)[i] = true; per_cpu(gator_buffer_commit_time, cpu) = 0; } mutex_unlock(&gator_buffer_mutex); } memset(&sent_core_name, 0, sizeof(sent_core_name)); mutex_unlock(&start_mutex); } static int gator_set_backtrace(unsigned long val) { int err = 0; mutex_lock(&start_mutex); if (gator_started) err = -EBUSY; else gator_backtrace_depth = val; mutex_unlock(&start_mutex); return err; } static ssize_t enable_read(struct file *file, char __user *buf, size_t count, loff_t *offset) { return gatorfs_ulong_to_user(gator_started, buf, count, offset); } static ssize_t enable_write(struct file *file, char const __user *buf, size_t count, loff_t *offset) { unsigned long val; int retval; if (*offset) return -EINVAL; retval = gatorfs_ulong_from_user(&val, buf, count); if (retval) return retval; if (val) retval = gator_op_start(); else gator_op_stop(); if (retval) return retval; return count; } static const struct file_operations enable_fops = { .read = enable_read, .write = enable_write, }; static int userspace_buffer_open(struct inode *inode, struct file *file) { int err = -EPERM; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (test_and_set_bit_lock(0, &gator_buffer_opened)) return -EBUSY; err = gator_op_setup(); if (err) goto fail; /* NB: the actual start happens from userspace * echo 1 >/dev/gator/enable */ return 0; fail: __clear_bit_unlock(0, &gator_buffer_opened); return err; } static int userspace_buffer_release(struct inode *inode, struct file *file) { gator_op_stop(); gator_shutdown(); __clear_bit_unlock(0, &gator_buffer_opened); return 0; } static ssize_t userspace_buffer_read(struct file *file, char __user *buf, size_t count, loff_t *offset) { int commit, length1, length2, read; char *buffer1; char *buffer2; int cpu, buftype; int written = 0; /* ensure there is enough space for a whole frame */ if (count < userspace_buffer_size || *offset) return -EINVAL; /* sleep until the condition is true or a signal is received the * condition is checked each time gator_buffer_wait is woken up */ wait_event_interruptible(gator_buffer_wait, buffer_commit_ready(&cpu, &buftype) || !gator_started); if (signal_pending(current)) return -EINTR; if (buftype == -1 || cpu == -1) return 0; mutex_lock(&gator_buffer_mutex); do { read = per_cpu(gator_buffer_read, cpu)[buftype]; commit = per_cpu(gator_buffer_commit, cpu)[buftype]; /* May happen if the buffer is freed during pending reads. */ if (!per_cpu(gator_buffer, cpu)[buftype]) break; /* determine the size of two halves */ length1 = commit - read; length2 = 0; buffer1 = &(per_cpu(gator_buffer, cpu)[buftype][read]); buffer2 = &(per_cpu(gator_buffer, cpu)[buftype][0]); if (length1 < 0) { length1 = gator_buffer_size[buftype] - read; length2 = commit; } if (length1 + length2 > count - written) break; /* start, middle or end */ if (length1 > 0 && copy_to_user(&buf[written], buffer1, length1)) break; /* possible wrap around */ if (length2 > 0 && copy_to_user(&buf[written + length1], buffer2, length2)) break; per_cpu(gator_buffer_read, cpu)[buftype] = commit; written += length1 + length2; /* Wake up annotate_write if more space is available */ if (buftype == ANNOTATE_BUF) wake_up(&gator_annotate_wait); } while (buffer_commit_ready(&cpu, &buftype)); mutex_unlock(&gator_buffer_mutex); /* kick just in case we've lost an SMP event */ wake_up(&gator_buffer_wait); return written > 0 ? written : -EFAULT; } static const struct file_operations gator_event_buffer_fops = { .open = userspace_buffer_open, .release = userspace_buffer_release, .read = userspace_buffer_read, }; static ssize_t depth_read(struct file *file, char __user *buf, size_t count, loff_t *offset) { return gatorfs_ulong_to_user(gator_backtrace_depth, buf, count, offset); } static ssize_t depth_write(struct file *file, char const __user *buf, size_t count, loff_t *offset) { unsigned long val; int retval; if (*offset) return -EINVAL; retval = gatorfs_ulong_from_user(&val, buf, count); if (retval) return retval; retval = gator_set_backtrace(val); if (retval) return retval; return count; } static const struct file_operations depth_fops = { .read = depth_read, .write = depth_write }; static void gator_op_create_files(struct super_block *sb, struct dentry *root) { struct dentry *dir; struct gator_interface *gi; int cpu; /* reinitialize default values */ gator_cpu_cores = 0; for_each_present_cpu(cpu) { gator_cpu_cores++; } userspace_buffer_size = BACKTRACE_BUFFER_SIZE; gator_response_type = 1; gator_live_rate = 0; gatorfs_create_file(sb, root, "enable", &enable_fops); gatorfs_create_file(sb, root, "buffer", &gator_event_buffer_fops); gatorfs_create_file(sb, root, "backtrace_depth", &depth_fops); gatorfs_create_ro_ulong(sb, root, "cpu_cores", &gator_cpu_cores); gatorfs_create_ro_ulong(sb, root, "buffer_size", &userspace_buffer_size); gatorfs_create_ulong(sb, root, "tick", &gator_timer_count); gatorfs_create_ulong(sb, root, "response_type", &gator_response_type); gatorfs_create_ro_ulong(sb, root, "version", &gator_protocol_version); gatorfs_create_ro_u64(sb, root, "started", &gator_monotonic_started); gatorfs_create_u64(sb, root, "live_rate", &gator_live_rate); /* Annotate interface */ gator_annotate_create_files(sb, root); /* Linux Events */ dir = gatorfs_mkdir(sb, root, "events"); list_for_each_entry(gi, &gator_events, list) if (gi->create_files) gi->create_files(sb, dir); /* Sched Events */ sched_trace_create_files(sb, dir); /* Power interface */ gator_trace_power_create_files(sb, dir); } /****************************************************************************** * Module ******************************************************************************/ #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) #define GATOR_TRACEPOINTS \ GATOR_HANDLE_TRACEPOINT(block_rq_complete); \ GATOR_HANDLE_TRACEPOINT(cpu_frequency); \ GATOR_HANDLE_TRACEPOINT(cpu_idle); \ GATOR_HANDLE_TRACEPOINT(cpu_migrate_begin); \ GATOR_HANDLE_TRACEPOINT(cpu_migrate_current); \ GATOR_HANDLE_TRACEPOINT(cpu_migrate_finish); \ GATOR_HANDLE_TRACEPOINT(irq_handler_exit); \ GATOR_HANDLE_TRACEPOINT(mali_hw_counter); \ GATOR_HANDLE_TRACEPOINT(mali_job_slots_event); \ GATOR_HANDLE_TRACEPOINT(mali_mmu_as_in_use); \ GATOR_HANDLE_TRACEPOINT(mali_mmu_as_released); \ GATOR_HANDLE_TRACEPOINT(mali_page_fault_insert_pages); \ GATOR_HANDLE_TRACEPOINT(mali_pm_status); \ GATOR_HANDLE_TRACEPOINT(mali_sw_counter); \ GATOR_HANDLE_TRACEPOINT(mali_sw_counters); \ GATOR_HANDLE_TRACEPOINT(mali_timeline_event); \ GATOR_HANDLE_TRACEPOINT(mali_total_alloc_pages_change); \ GATOR_HANDLE_TRACEPOINT(mm_page_alloc); \ GATOR_HANDLE_TRACEPOINT(mm_page_free); \ GATOR_HANDLE_TRACEPOINT(mm_page_free_batched); \ GATOR_HANDLE_TRACEPOINT(sched_process_exec); \ GATOR_HANDLE_TRACEPOINT(sched_process_fork); \ GATOR_HANDLE_TRACEPOINT(sched_process_free); \ GATOR_HANDLE_TRACEPOINT(sched_switch); \ GATOR_HANDLE_TRACEPOINT(softirq_exit); \ GATOR_HANDLE_TRACEPOINT(task_rename); \ #define GATOR_HANDLE_TRACEPOINT(probe_name) \ struct tracepoint *gator_tracepoint_##probe_name GATOR_TRACEPOINTS; #undef GATOR_HANDLE_TRACEPOINT static void gator_save_tracepoint(struct tracepoint *tp, void *priv) { #define GATOR_HANDLE_TRACEPOINT(probe_name) \ do { \ if (strcmp(tp->name, #probe_name) == 0) { \ gator_tracepoint_##probe_name = tp; \ return; \ } \ } while (0) GATOR_TRACEPOINTS; #undef GATOR_HANDLE_TRACEPOINT } #else #define for_each_kernel_tracepoint(fct, priv) #endif static int __init gator_module_init(void) { for_each_kernel_tracepoint(gator_save_tracepoint, NULL); if (gatorfs_register()) return -1; if (gator_init()) { gatorfs_unregister(); return -1; } setup_timer(&gator_buffer_wake_up_timer, gator_buffer_wake_up, 0); /* Initialize the list of cpuids */ memset(gator_cpuids, -1, sizeof(gator_cpuids)); on_each_cpu(gator_read_cpuid, NULL, 1); return 0; } static void __exit gator_module_exit(void) { del_timer_sync(&gator_buffer_wake_up_timer); tracepoint_synchronize_unregister(); gator_exit(); gatorfs_unregister(); } module_init(gator_module_init); module_exit(gator_module_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("ARM Ltd"); MODULE_DESCRIPTION("Gator system profiler"); #define STRIFY2(ARG) #ARG #define STRIFY(ARG) STRIFY2(ARG) MODULE_VERSION(STRIFY(PROTOCOL_VERSION));
gpl-2.0
gnychis/droid-kernel-coexisyst
drivers/gpio/gpiolib.c
399
37292
#include <linux/kernel.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/spinlock.h> #include <linux/device.h> #include <linux/err.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/gpio.h> #include <linux/idr.h> /* Optional implementation infrastructure for GPIO interfaces. * * Platforms may want to use this if they tend to use very many GPIOs * that aren't part of a System-On-Chip core; or across I2C/SPI/etc. * * When kernel footprint or instruction count is an issue, simpler * implementations may be preferred. The GPIO programming interface * allows for inlining speed-critical get/set operations for common * cases, so that access to SOC-integrated GPIOs can sometimes cost * only an instruction or two per bit. */ /* When debugging, extend minimal trust to callers and platform code. * Also emit diagnostic messages that may help initial bringup, when * board setup or driver bugs are most common. * * Otherwise, minimize overhead in what may be bitbanging codepaths. */ #ifdef DEBUG #define extra_checks 1 #else #define extra_checks 0 #endif /* gpio_lock prevents conflicts during gpio_desc[] table updates. * While any GPIO is requested, its gpio_chip is not removable; * each GPIO's "requested" flag serves as a lock and refcount. */ static DEFINE_SPINLOCK(gpio_lock); struct gpio_desc { struct gpio_chip *chip; unsigned long flags; /* flag symbols are bit numbers */ #define FLAG_REQUESTED 0 #define FLAG_IS_OUT 1 #define FLAG_RESERVED 2 #define FLAG_EXPORT 3 /* protected by sysfs_lock */ #define FLAG_SYSFS 4 /* exported via /sys/class/gpio/control */ #define FLAG_TRIG_FALL 5 /* trigger on falling edge */ #define FLAG_TRIG_RISE 6 /* trigger on rising edge */ #define PDESC_ID_SHIFT 16 /* add new flags before this one */ #define GPIO_FLAGS_MASK ((1 << PDESC_ID_SHIFT) - 1) #define GPIO_TRIGGER_MASK (BIT(FLAG_TRIG_FALL) | BIT(FLAG_TRIG_RISE)) #ifdef CONFIG_DEBUG_FS const char *label; #endif }; static struct gpio_desc gpio_desc[ARCH_NR_GPIOS]; #ifdef CONFIG_GPIO_SYSFS struct poll_desc { struct work_struct work; struct sysfs_dirent *value_sd; }; static struct idr pdesc_idr; #endif static inline void desc_set_label(struct gpio_desc *d, const char *label) { #ifdef CONFIG_DEBUG_FS d->label = label; #endif } /* Warn when drivers omit gpio_request() calls -- legal but ill-advised * when setting direction, and otherwise illegal. Until board setup code * and drivers use explicit requests everywhere (which won't happen when * those calls have no teeth) we can't avoid autorequesting. This nag * message should motivate switching to explicit requests... so should * the weaker cleanup after faults, compared to gpio_request(). * * NOTE: the autorequest mechanism is going away; at this point it's * only "legal" in the sense that (old) code using it won't break yet, * but instead only triggers a WARN() stack dump. */ static int gpio_ensure_requested(struct gpio_desc *desc, unsigned offset) { const struct gpio_chip *chip = desc->chip; const int gpio = chip->base + offset; if (WARN(test_and_set_bit(FLAG_REQUESTED, &desc->flags) == 0, "autorequest GPIO-%d\n", gpio)) { if (!try_module_get(chip->owner)) { pr_err("GPIO-%d: module can't be gotten \n", gpio); clear_bit(FLAG_REQUESTED, &desc->flags); /* lose */ return -EIO; } desc_set_label(desc, "[auto]"); /* caller must chip->request() w/o spinlock */ if (chip->request) return 1; } return 0; } /* caller holds gpio_lock *OR* gpio is marked as requested */ static inline struct gpio_chip *gpio_to_chip(unsigned gpio) { return gpio_desc[gpio].chip; } /* dynamic allocation of GPIOs, e.g. on a hotplugged device */ static int gpiochip_find_base(int ngpio) { int i; int spare = 0; int base = -ENOSPC; for (i = ARCH_NR_GPIOS - 1; i >= 0 ; i--) { struct gpio_desc *desc = &gpio_desc[i]; struct gpio_chip *chip = desc->chip; if (!chip && !test_bit(FLAG_RESERVED, &desc->flags)) { spare++; if (spare == ngpio) { base = i; break; } } else { spare = 0; if (chip) i -= chip->ngpio - 1; } } if (gpio_is_valid(base)) pr_debug("%s: found new base at %d\n", __func__, base); return base; } /** * gpiochip_reserve() - reserve range of gpios to use with platform code only * @start: starting gpio number * @ngpio: number of gpios to reserve * Context: platform init, potentially before irqs or kmalloc will work * * Returns a negative errno if any gpio within the range is already reserved * or registered, else returns zero as a success code. Use this function * to mark a range of gpios as unavailable for dynamic gpio number allocation, * for example because its driver support is not yet loaded. */ int __init gpiochip_reserve(int start, int ngpio) { int ret = 0; unsigned long flags; int i; if (!gpio_is_valid(start) || !gpio_is_valid(start + ngpio - 1)) return -EINVAL; spin_lock_irqsave(&gpio_lock, flags); for (i = start; i < start + ngpio; i++) { struct gpio_desc *desc = &gpio_desc[i]; if (desc->chip || test_bit(FLAG_RESERVED, &desc->flags)) { ret = -EBUSY; goto err; } set_bit(FLAG_RESERVED, &desc->flags); } pr_debug("%s: reserved gpios from %d to %d\n", __func__, start, start + ngpio - 1); err: spin_unlock_irqrestore(&gpio_lock, flags); return ret; } #ifdef CONFIG_GPIO_SYSFS /* lock protects against unexport_gpio() being called while * sysfs files are active. */ static DEFINE_MUTEX(sysfs_lock); /* * /sys/class/gpio/gpioN... only for GPIOs that are exported * /direction * * MAY BE OMITTED if kernel won't allow direction changes * * is read/write as "in" or "out" * * may also be written as "high" or "low", initializing * output value as specified ("out" implies "low") * /value * * always readable, subject to hardware behavior * * may be writable, as zero/nonzero * /edge * * configures behavior of poll(2) on /value * * available only if pin can generate IRQs on input * * is read/write as "none", "falling", "rising", or "both" */ static ssize_t gpio_direction_show(struct device *dev, struct device_attribute *attr, char *buf) { const struct gpio_desc *desc = dev_get_drvdata(dev); ssize_t status; mutex_lock(&sysfs_lock); if (!test_bit(FLAG_EXPORT, &desc->flags)) status = -EIO; else status = sprintf(buf, "%s\n", test_bit(FLAG_IS_OUT, &desc->flags) ? "out" : "in"); mutex_unlock(&sysfs_lock); return status; } static ssize_t gpio_direction_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { const struct gpio_desc *desc = dev_get_drvdata(dev); unsigned gpio = desc - gpio_desc; ssize_t status; mutex_lock(&sysfs_lock); if (!test_bit(FLAG_EXPORT, &desc->flags)) status = -EIO; else if (sysfs_streq(buf, "high")) status = gpio_direction_output(gpio, 1); else if (sysfs_streq(buf, "out") || sysfs_streq(buf, "low")) status = gpio_direction_output(gpio, 0); else if (sysfs_streq(buf, "in")) status = gpio_direction_input(gpio); else status = -EINVAL; mutex_unlock(&sysfs_lock); return status ? : size; } static const DEVICE_ATTR(direction, 0644, gpio_direction_show, gpio_direction_store); static ssize_t gpio_value_show(struct device *dev, struct device_attribute *attr, char *buf) { const struct gpio_desc *desc = dev_get_drvdata(dev); unsigned gpio = desc - gpio_desc; ssize_t status; mutex_lock(&sysfs_lock); if (!test_bit(FLAG_EXPORT, &desc->flags)) status = -EIO; else status = sprintf(buf, "%d\n", !!gpio_get_value_cansleep(gpio)); mutex_unlock(&sysfs_lock); return status; } static ssize_t gpio_value_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { const struct gpio_desc *desc = dev_get_drvdata(dev); unsigned gpio = desc - gpio_desc; ssize_t status; mutex_lock(&sysfs_lock); if (!test_bit(FLAG_EXPORT, &desc->flags)) status = -EIO; else if (!test_bit(FLAG_IS_OUT, &desc->flags)) status = -EPERM; else { long value; status = strict_strtol(buf, 0, &value); if (status == 0) { gpio_set_value_cansleep(gpio, value != 0); status = size; } } mutex_unlock(&sysfs_lock); return status; } static /*const*/ DEVICE_ATTR(value, 0644, gpio_value_show, gpio_value_store); static irqreturn_t gpio_sysfs_irq(int irq, void *priv) { struct work_struct *work = priv; schedule_work(work); return IRQ_HANDLED; } static void gpio_notify_sysfs(struct work_struct *work) { struct poll_desc *pdesc; pdesc = container_of(work, struct poll_desc, work); sysfs_notify_dirent(pdesc->value_sd); } static int gpio_setup_irq(struct gpio_desc *desc, struct device *dev, unsigned long gpio_flags) { struct poll_desc *pdesc; unsigned long irq_flags; int ret, irq, id; if ((desc->flags & GPIO_TRIGGER_MASK) == gpio_flags) return 0; irq = gpio_to_irq(desc - gpio_desc); if (irq < 0) return -EIO; id = desc->flags >> PDESC_ID_SHIFT; pdesc = idr_find(&pdesc_idr, id); if (pdesc) { free_irq(irq, &pdesc->work); cancel_work_sync(&pdesc->work); } desc->flags &= ~GPIO_TRIGGER_MASK; if (!gpio_flags) { ret = 0; goto free_sd; } irq_flags = IRQF_SHARED; if (test_bit(FLAG_TRIG_FALL, &gpio_flags)) irq_flags |= IRQF_TRIGGER_FALLING; if (test_bit(FLAG_TRIG_RISE, &gpio_flags)) irq_flags |= IRQF_TRIGGER_RISING; if (!pdesc) { pdesc = kmalloc(sizeof(*pdesc), GFP_KERNEL); if (!pdesc) { ret = -ENOMEM; goto err_out; } do { ret = -ENOMEM; if (idr_pre_get(&pdesc_idr, GFP_KERNEL)) ret = idr_get_new_above(&pdesc_idr, pdesc, 1, &id); } while (ret == -EAGAIN); if (ret) goto free_mem; desc->flags &= GPIO_FLAGS_MASK; desc->flags |= (unsigned long)id << PDESC_ID_SHIFT; if (desc->flags >> PDESC_ID_SHIFT != id) { ret = -ERANGE; goto free_id; } pdesc->value_sd = sysfs_get_dirent(dev->kobj.sd, "value"); if (!pdesc->value_sd) { ret = -ENODEV; goto free_id; } INIT_WORK(&pdesc->work, gpio_notify_sysfs); } ret = request_irq(irq, gpio_sysfs_irq, irq_flags, "gpiolib", &pdesc->work); if (ret) goto free_sd; desc->flags |= gpio_flags; return 0; free_sd: sysfs_put(pdesc->value_sd); free_id: idr_remove(&pdesc_idr, id); desc->flags &= GPIO_FLAGS_MASK; free_mem: kfree(pdesc); err_out: return ret; } static const struct { const char *name; unsigned long flags; } trigger_types[] = { { "none", 0 }, { "falling", BIT(FLAG_TRIG_FALL) }, { "rising", BIT(FLAG_TRIG_RISE) }, { "both", BIT(FLAG_TRIG_FALL) | BIT(FLAG_TRIG_RISE) }, }; static ssize_t gpio_edge_show(struct device *dev, struct device_attribute *attr, char *buf) { const struct gpio_desc *desc = dev_get_drvdata(dev); ssize_t status; mutex_lock(&sysfs_lock); if (!test_bit(FLAG_EXPORT, &desc->flags)) status = -EIO; else { int i; status = 0; for (i = 0; i < ARRAY_SIZE(trigger_types); i++) if ((desc->flags & GPIO_TRIGGER_MASK) == trigger_types[i].flags) { status = sprintf(buf, "%s\n", trigger_types[i].name); break; } } mutex_unlock(&sysfs_lock); return status; } static ssize_t gpio_edge_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct gpio_desc *desc = dev_get_drvdata(dev); ssize_t status; int i; for (i = 0; i < ARRAY_SIZE(trigger_types); i++) if (sysfs_streq(trigger_types[i].name, buf)) goto found; return -EINVAL; found: mutex_lock(&sysfs_lock); if (!test_bit(FLAG_EXPORT, &desc->flags)) status = -EIO; else { status = gpio_setup_irq(desc, dev, trigger_types[i].flags); if (!status) status = size; } mutex_unlock(&sysfs_lock); return status; } static DEVICE_ATTR(edge, 0644, gpio_edge_show, gpio_edge_store); static const struct attribute *gpio_attrs[] = { &dev_attr_direction.attr, &dev_attr_value.attr, NULL, }; static const struct attribute_group gpio_attr_group = { .attrs = (struct attribute **) gpio_attrs, }; /* * /sys/class/gpio/gpiochipN/ * /base ... matching gpio_chip.base (N) * /label ... matching gpio_chip.label * /ngpio ... matching gpio_chip.ngpio */ static ssize_t chip_base_show(struct device *dev, struct device_attribute *attr, char *buf) { const struct gpio_chip *chip = dev_get_drvdata(dev); return sprintf(buf, "%d\n", chip->base); } static DEVICE_ATTR(base, 0444, chip_base_show, NULL); static ssize_t chip_label_show(struct device *dev, struct device_attribute *attr, char *buf) { const struct gpio_chip *chip = dev_get_drvdata(dev); return sprintf(buf, "%s\n", chip->label ? : ""); } static DEVICE_ATTR(label, 0444, chip_label_show, NULL); static ssize_t chip_ngpio_show(struct device *dev, struct device_attribute *attr, char *buf) { const struct gpio_chip *chip = dev_get_drvdata(dev); return sprintf(buf, "%u\n", chip->ngpio); } static DEVICE_ATTR(ngpio, 0444, chip_ngpio_show, NULL); static const struct attribute *gpiochip_attrs[] = { &dev_attr_base.attr, &dev_attr_label.attr, &dev_attr_ngpio.attr, NULL, }; static const struct attribute_group gpiochip_attr_group = { .attrs = (struct attribute **) gpiochip_attrs, }; /* * /sys/class/gpio/export ... write-only * integer N ... number of GPIO to export (full access) * /sys/class/gpio/unexport ... write-only * integer N ... number of GPIO to unexport */ static ssize_t export_store(struct class *class, const char *buf, size_t len) { long gpio; int status; status = strict_strtol(buf, 0, &gpio); if (status < 0) goto done; /* No extra locking here; FLAG_SYSFS just signifies that the * request and export were done by on behalf of userspace, so * they may be undone on its behalf too. */ status = gpio_request(gpio, "sysfs"); if (status < 0) goto done; status = gpio_export(gpio, true); if (status < 0) gpio_free(gpio); else set_bit(FLAG_SYSFS, &gpio_desc[gpio].flags); done: if (status) pr_debug("%s: status %d\n", __func__, status); return status ? : len; } static ssize_t unexport_store(struct class *class, const char *buf, size_t len) { long gpio; int status; status = strict_strtol(buf, 0, &gpio); if (status < 0) goto done; status = -EINVAL; /* reject bogus commands (gpio_unexport ignores them) */ if (!gpio_is_valid(gpio)) goto done; /* No extra locking here; FLAG_SYSFS just signifies that the * request and export were done by on behalf of userspace, so * they may be undone on its behalf too. */ if (test_and_clear_bit(FLAG_SYSFS, &gpio_desc[gpio].flags)) { status = 0; gpio_free(gpio); } done: if (status) pr_debug("%s: status %d\n", __func__, status); return status ? : len; } static struct class_attribute gpio_class_attrs[] = { __ATTR(export, 0200, NULL, export_store), __ATTR(unexport, 0200, NULL, unexport_store), __ATTR_NULL, }; static struct class gpio_class = { .name = "gpio", .owner = THIS_MODULE, .class_attrs = gpio_class_attrs, }; /** * gpio_export - export a GPIO through sysfs * @gpio: gpio to make available, already requested * @direction_may_change: true if userspace may change gpio direction * Context: arch_initcall or later * * When drivers want to make a GPIO accessible to userspace after they * have requested it -- perhaps while debugging, or as part of their * public interface -- they may use this routine. If the GPIO can * change direction (some can't) and the caller allows it, userspace * will see "direction" sysfs attribute which may be used to change * the gpio's direction. A "value" attribute will always be provided. * * Returns zero on success, else an error. */ int gpio_export(unsigned gpio, bool direction_may_change) { unsigned long flags; struct gpio_desc *desc; int status = -EINVAL; char *ioname = NULL; /* can't export until sysfs is available ... */ if (!gpio_class.p) { pr_debug("%s: called too early!\n", __func__); return -ENOENT; } if (!gpio_is_valid(gpio)) goto done; mutex_lock(&sysfs_lock); spin_lock_irqsave(&gpio_lock, flags); desc = &gpio_desc[gpio]; if (test_bit(FLAG_REQUESTED, &desc->flags) && !test_bit(FLAG_EXPORT, &desc->flags)) { status = 0; if (!desc->chip->direction_input || !desc->chip->direction_output) direction_may_change = false; } spin_unlock_irqrestore(&gpio_lock, flags); if (desc->chip->names && desc->chip->names[gpio - desc->chip->base]) ioname = desc->chip->names[gpio - desc->chip->base]; if (status == 0) { struct device *dev; dev = device_create(&gpio_class, desc->chip->dev, MKDEV(0, 0), desc, ioname ? ioname : "gpio%d", gpio); if (!IS_ERR(dev)) { if (direction_may_change) status = sysfs_create_group(&dev->kobj, &gpio_attr_group); else status = device_create_file(dev, &dev_attr_value); if (!status && gpio_to_irq(gpio) >= 0 && (direction_may_change || !test_bit(FLAG_IS_OUT, &desc->flags))) status = device_create_file(dev, &dev_attr_edge); if (status != 0) device_unregister(dev); } else status = PTR_ERR(dev); if (status == 0) set_bit(FLAG_EXPORT, &desc->flags); } mutex_unlock(&sysfs_lock); done: if (status) pr_debug("%s: gpio%d status %d\n", __func__, gpio, status); return status; } EXPORT_SYMBOL_GPL(gpio_export); static int match_export(struct device *dev, void *data) { return dev_get_drvdata(dev) == data; } /** * gpio_export_link - create a sysfs link to an exported GPIO node * @dev: device under which to create symlink * @name: name of the symlink * @gpio: gpio to create symlink to, already exported * * Set up a symlink from /sys/.../dev/name to /sys/class/gpio/gpioN * node. Caller is responsible for unlinking. * * Returns zero on success, else an error. */ int gpio_export_link(struct device *dev, const char *name, unsigned gpio) { struct gpio_desc *desc; int status = -EINVAL; if (!gpio_is_valid(gpio)) goto done; mutex_lock(&sysfs_lock); desc = &gpio_desc[gpio]; if (test_bit(FLAG_EXPORT, &desc->flags)) { struct device *tdev; tdev = class_find_device(&gpio_class, NULL, desc, match_export); if (tdev != NULL) { status = sysfs_create_link(&dev->kobj, &tdev->kobj, name); } else { status = -ENODEV; } } mutex_unlock(&sysfs_lock); done: if (status) pr_debug("%s: gpio%d status %d\n", __func__, gpio, status); return status; } EXPORT_SYMBOL_GPL(gpio_export_link); /** * gpio_unexport - reverse effect of gpio_export() * @gpio: gpio to make unavailable * * This is implicit on gpio_free(). */ void gpio_unexport(unsigned gpio) { struct gpio_desc *desc; int status = -EINVAL; if (!gpio_is_valid(gpio)) goto done; mutex_lock(&sysfs_lock); desc = &gpio_desc[gpio]; if (test_bit(FLAG_EXPORT, &desc->flags)) { struct device *dev = NULL; dev = class_find_device(&gpio_class, NULL, desc, match_export); if (dev) { gpio_setup_irq(desc, dev, 0); clear_bit(FLAG_EXPORT, &desc->flags); put_device(dev); device_unregister(dev); status = 0; } else status = -ENODEV; } mutex_unlock(&sysfs_lock); done: if (status) pr_debug("%s: gpio%d status %d\n", __func__, gpio, status); } EXPORT_SYMBOL_GPL(gpio_unexport); static int gpiochip_export(struct gpio_chip *chip) { int status; struct device *dev; /* Many systems register gpio chips for SOC support very early, * before driver model support is available. In those cases we * export this later, in gpiolib_sysfs_init() ... here we just * verify that _some_ field of gpio_class got initialized. */ if (!gpio_class.p) return 0; /* use chip->base for the ID; it's already known to be unique */ mutex_lock(&sysfs_lock); dev = device_create(&gpio_class, chip->dev, MKDEV(0, 0), chip, "gpiochip%d", chip->base); if (!IS_ERR(dev)) { status = sysfs_create_group(&dev->kobj, &gpiochip_attr_group); } else status = PTR_ERR(dev); chip->exported = (status == 0); mutex_unlock(&sysfs_lock); if (status) { unsigned long flags; unsigned gpio; spin_lock_irqsave(&gpio_lock, flags); gpio = chip->base; while (gpio_desc[gpio].chip == chip) gpio_desc[gpio++].chip = NULL; spin_unlock_irqrestore(&gpio_lock, flags); pr_debug("%s: chip %s status %d\n", __func__, chip->label, status); } return status; } static void gpiochip_unexport(struct gpio_chip *chip) { int status; struct device *dev; mutex_lock(&sysfs_lock); dev = class_find_device(&gpio_class, NULL, chip, match_export); if (dev) { put_device(dev); device_unregister(dev); chip->exported = 0; status = 0; } else status = -ENODEV; mutex_unlock(&sysfs_lock); if (status) pr_debug("%s: chip %s status %d\n", __func__, chip->label, status); } static int __init gpiolib_sysfs_init(void) { int status; unsigned long flags; unsigned gpio; idr_init(&pdesc_idr); status = class_register(&gpio_class); if (status < 0) return status; /* Scan and register the gpio_chips which registered very * early (e.g. before the class_register above was called). * * We run before arch_initcall() so chip->dev nodes can have * registered, and so arch_initcall() can always gpio_export(). */ spin_lock_irqsave(&gpio_lock, flags); for (gpio = 0; gpio < ARCH_NR_GPIOS; gpio++) { struct gpio_chip *chip; chip = gpio_desc[gpio].chip; if (!chip || chip->exported) continue; spin_unlock_irqrestore(&gpio_lock, flags); status = gpiochip_export(chip); spin_lock_irqsave(&gpio_lock, flags); } spin_unlock_irqrestore(&gpio_lock, flags); return status; } postcore_initcall(gpiolib_sysfs_init); #else static inline int gpiochip_export(struct gpio_chip *chip) { return 0; } static inline void gpiochip_unexport(struct gpio_chip *chip) { } #endif /* CONFIG_GPIO_SYSFS */ /** * gpiochip_add() - register a gpio_chip * @chip: the chip to register, with chip->base initialized * Context: potentially before irqs or kmalloc will work * * Returns a negative errno if the chip can't be registered, such as * because the chip->base is invalid or already associated with a * different chip. Otherwise it returns zero as a success code. * * When gpiochip_add() is called very early during boot, so that GPIOs * can be freely used, the chip->dev device must be registered before * the gpio framework's arch_initcall(). Otherwise sysfs initialization * for GPIOs will fail rudely. * * If chip->base is negative, this requests dynamic assignment of * a range of valid GPIOs. */ int gpiochip_add(struct gpio_chip *chip) { unsigned long flags; int status = 0; unsigned id; int base = chip->base; if ((!gpio_is_valid(base) || !gpio_is_valid(base + chip->ngpio - 1)) && base >= 0) { status = -EINVAL; goto fail; } spin_lock_irqsave(&gpio_lock, flags); if (base < 0) { base = gpiochip_find_base(chip->ngpio); if (base < 0) { status = base; goto unlock; } chip->base = base; } /* these GPIO numbers must not be managed by another gpio_chip */ for (id = base; id < base + chip->ngpio; id++) { if (gpio_desc[id].chip != NULL) { status = -EBUSY; break; } } if (status == 0) { for (id = base; id < base + chip->ngpio; id++) { gpio_desc[id].chip = chip; /* REVISIT: most hardware initializes GPIOs as * inputs (often with pullups enabled) so power * usage is minimized. Linux code should set the * gpio direction first thing; but until it does, * we may expose the wrong direction in sysfs. */ gpio_desc[id].flags = !chip->direction_input ? (1 << FLAG_IS_OUT) : 0; } } unlock: spin_unlock_irqrestore(&gpio_lock, flags); if (status == 0) status = gpiochip_export(chip); fail: /* failures here can mean systems won't boot... */ if (status) pr_err("gpiochip_add: gpios %d..%d (%s) not registered\n", chip->base, chip->base + chip->ngpio - 1, chip->label ? : "generic"); return status; } EXPORT_SYMBOL_GPL(gpiochip_add); /** * gpiochip_remove() - unregister a gpio_chip * @chip: the chip to unregister * * A gpio_chip with any GPIOs still requested may not be removed. */ int gpiochip_remove(struct gpio_chip *chip) { unsigned long flags; int status = 0; unsigned id; spin_lock_irqsave(&gpio_lock, flags); for (id = chip->base; id < chip->base + chip->ngpio; id++) { if (test_bit(FLAG_REQUESTED, &gpio_desc[id].flags)) { status = -EBUSY; break; } } if (status == 0) { for (id = chip->base; id < chip->base + chip->ngpio; id++) gpio_desc[id].chip = NULL; } spin_unlock_irqrestore(&gpio_lock, flags); if (status == 0) gpiochip_unexport(chip); return status; } EXPORT_SYMBOL_GPL(gpiochip_remove); /* These "optional" allocation calls help prevent drivers from stomping * on each other, and help provide better diagnostics in debugfs. * They're called even less than the "set direction" calls. */ int gpio_request(unsigned gpio, const char *label) { struct gpio_desc *desc; struct gpio_chip *chip; int status = -EINVAL; unsigned long flags; spin_lock_irqsave(&gpio_lock, flags); if (!gpio_is_valid(gpio)) goto done; desc = &gpio_desc[gpio]; chip = desc->chip; if (chip == NULL) goto done; if (!try_module_get(chip->owner)) goto done; /* NOTE: gpio_request() can be called in early boot, * before IRQs are enabled, for non-sleeping (SOC) GPIOs. */ if (test_and_set_bit(FLAG_REQUESTED, &desc->flags) == 0) { desc_set_label(desc, label ? : "?"); status = 0; } else { status = -EBUSY; module_put(chip->owner); goto done; } if (chip->request) { /* chip->request may sleep */ spin_unlock_irqrestore(&gpio_lock, flags); status = chip->request(chip, gpio - chip->base); spin_lock_irqsave(&gpio_lock, flags); if (status < 0) { desc_set_label(desc, NULL); module_put(chip->owner); clear_bit(FLAG_REQUESTED, &desc->flags); } } done: if (status) pr_debug("gpio_request: gpio-%d (%s) status %d\n", gpio, label ? : "?", status); spin_unlock_irqrestore(&gpio_lock, flags); return status; } EXPORT_SYMBOL_GPL(gpio_request); void gpio_free(unsigned gpio) { unsigned long flags; struct gpio_desc *desc; struct gpio_chip *chip; might_sleep(); if (!gpio_is_valid(gpio)) { WARN_ON(extra_checks); return; } gpio_unexport(gpio); spin_lock_irqsave(&gpio_lock, flags); desc = &gpio_desc[gpio]; chip = desc->chip; if (chip && test_bit(FLAG_REQUESTED, &desc->flags)) { if (chip->free) { spin_unlock_irqrestore(&gpio_lock, flags); might_sleep_if(extra_checks && chip->can_sleep); chip->free(chip, gpio - chip->base); spin_lock_irqsave(&gpio_lock, flags); } desc_set_label(desc, NULL); module_put(desc->chip->owner); clear_bit(FLAG_REQUESTED, &desc->flags); } else WARN_ON(extra_checks); spin_unlock_irqrestore(&gpio_lock, flags); } EXPORT_SYMBOL_GPL(gpio_free); /** * gpiochip_is_requested - return string iff signal was requested * @chip: controller managing the signal * @offset: of signal within controller's 0..(ngpio - 1) range * * Returns NULL if the GPIO is not currently requested, else a string. * If debugfs support is enabled, the string returned is the label passed * to gpio_request(); otherwise it is a meaningless constant. * * This function is for use by GPIO controller drivers. The label can * help with diagnostics, and knowing that the signal is used as a GPIO * can help avoid accidentally multiplexing it to another controller. */ const char *gpiochip_is_requested(struct gpio_chip *chip, unsigned offset) { unsigned gpio = chip->base + offset; if (!gpio_is_valid(gpio) || gpio_desc[gpio].chip != chip) return NULL; if (test_bit(FLAG_REQUESTED, &gpio_desc[gpio].flags) == 0) return NULL; #ifdef CONFIG_DEBUG_FS return gpio_desc[gpio].label; #else return "?"; #endif } EXPORT_SYMBOL_GPL(gpiochip_is_requested); /* Drivers MUST set GPIO direction before making get/set calls. In * some cases this is done in early boot, before IRQs are enabled. * * As a rule these aren't called more than once (except for drivers * using the open-drain emulation idiom) so these are natural places * to accumulate extra debugging checks. Note that we can't (yet) * rely on gpio_request() having been called beforehand. */ int gpio_direction_input(unsigned gpio) { unsigned long flags; struct gpio_chip *chip; struct gpio_desc *desc = &gpio_desc[gpio]; int status = -EINVAL; spin_lock_irqsave(&gpio_lock, flags); if (!gpio_is_valid(gpio)) goto fail; chip = desc->chip; if (!chip || !chip->get || !chip->direction_input) goto fail; gpio -= chip->base; if (gpio >= chip->ngpio) goto fail; status = gpio_ensure_requested(desc, gpio); if (status < 0) goto fail; /* now we know the gpio is valid and chip won't vanish */ spin_unlock_irqrestore(&gpio_lock, flags); might_sleep_if(extra_checks && chip->can_sleep); if (status) { status = chip->request(chip, gpio); if (status < 0) { pr_debug("GPIO-%d: chip request fail, %d\n", chip->base + gpio, status); /* and it's not available to anyone else ... * gpio_request() is the fully clean solution. */ goto lose; } } status = chip->direction_input(chip, gpio); if (status == 0) clear_bit(FLAG_IS_OUT, &desc->flags); lose: return status; fail: spin_unlock_irqrestore(&gpio_lock, flags); if (status) pr_debug("%s: gpio-%d status %d\n", __func__, gpio, status); return status; } EXPORT_SYMBOL_GPL(gpio_direction_input); int gpio_direction_output(unsigned gpio, int value) { unsigned long flags; struct gpio_chip *chip; struct gpio_desc *desc = &gpio_desc[gpio]; int status = -EINVAL; spin_lock_irqsave(&gpio_lock, flags); if (!gpio_is_valid(gpio)) goto fail; chip = desc->chip; if (!chip || !chip->set || !chip->direction_output) goto fail; gpio -= chip->base; if (gpio >= chip->ngpio) goto fail; status = gpio_ensure_requested(desc, gpio); if (status < 0) goto fail; /* now we know the gpio is valid and chip won't vanish */ spin_unlock_irqrestore(&gpio_lock, flags); might_sleep_if(extra_checks && chip->can_sleep); if (status) { status = chip->request(chip, gpio); if (status < 0) { pr_debug("GPIO-%d: chip request fail, %d\n", chip->base + gpio, status); /* and it's not available to anyone else ... * gpio_request() is the fully clean solution. */ goto lose; } } status = chip->direction_output(chip, gpio, value); if (status == 0) set_bit(FLAG_IS_OUT, &desc->flags); lose: return status; fail: spin_unlock_irqrestore(&gpio_lock, flags); if (status) pr_debug("%s: gpio-%d status %d\n", __func__, gpio, status); return status; } EXPORT_SYMBOL_GPL(gpio_direction_output); /* I/O calls are only valid after configuration completed; the relevant * "is this a valid GPIO" error checks should already have been done. * * "Get" operations are often inlinable as reading a pin value register, * and masking the relevant bit in that register. * * When "set" operations are inlinable, they involve writing that mask to * one register to set a low value, or a different register to set it high. * Otherwise locking is needed, so there may be little value to inlining. * *------------------------------------------------------------------------ * * IMPORTANT!!! The hot paths -- get/set value -- assume that callers * have requested the GPIO. That can include implicit requesting by * a direction setting call. Marking a gpio as requested locks its chip * in memory, guaranteeing that these table lookups need no more locking * and that gpiochip_remove() will fail. * * REVISIT when debugging, consider adding some instrumentation to ensure * that the GPIO was actually requested. */ /** * __gpio_get_value() - return a gpio's value * @gpio: gpio whose value will be returned * Context: any * * This is used directly or indirectly to implement gpio_get_value(). * It returns the zero or nonzero value provided by the associated * gpio_chip.get() method; or zero if no such method is provided. */ int __gpio_get_value(unsigned gpio) { struct gpio_chip *chip; chip = gpio_to_chip(gpio); WARN_ON(extra_checks && chip->can_sleep); return chip->get ? chip->get(chip, gpio - chip->base) : 0; } EXPORT_SYMBOL_GPL(__gpio_get_value); /** * __gpio_set_value() - assign a gpio's value * @gpio: gpio whose value will be assigned * @value: value to assign * Context: any * * This is used directly or indirectly to implement gpio_set_value(). * It invokes the associated gpio_chip.set() method. */ void __gpio_set_value(unsigned gpio, int value) { struct gpio_chip *chip; chip = gpio_to_chip(gpio); WARN_ON(extra_checks && chip->can_sleep); chip->set(chip, gpio - chip->base, value); } EXPORT_SYMBOL_GPL(__gpio_set_value); /** * __gpio_cansleep() - report whether gpio value access will sleep * @gpio: gpio in question * Context: any * * This is used directly or indirectly to implement gpio_cansleep(). It * returns nonzero if access reading or writing the GPIO value can sleep. */ int __gpio_cansleep(unsigned gpio) { struct gpio_chip *chip; /* only call this on GPIOs that are valid! */ chip = gpio_to_chip(gpio); return chip->can_sleep; } EXPORT_SYMBOL_GPL(__gpio_cansleep); /** * __gpio_to_irq() - return the IRQ corresponding to a GPIO * @gpio: gpio whose IRQ will be returned (already requested) * Context: any * * This is used directly or indirectly to implement gpio_to_irq(). * It returns the number of the IRQ signaled by this (input) GPIO, * or a negative errno. */ int __gpio_to_irq(unsigned gpio) { struct gpio_chip *chip; chip = gpio_to_chip(gpio); return chip->to_irq ? chip->to_irq(chip, gpio - chip->base) : -ENXIO; } EXPORT_SYMBOL_GPL(__gpio_to_irq); /* There's no value in making it easy to inline GPIO calls that may sleep. * Common examples include ones connected to I2C or SPI chips. */ int gpio_get_value_cansleep(unsigned gpio) { struct gpio_chip *chip; might_sleep_if(extra_checks); chip = gpio_to_chip(gpio); return chip->get ? chip->get(chip, gpio - chip->base) : 0; } EXPORT_SYMBOL_GPL(gpio_get_value_cansleep); void gpio_set_value_cansleep(unsigned gpio, int value) { struct gpio_chip *chip; might_sleep_if(extra_checks); chip = gpio_to_chip(gpio); chip->set(chip, gpio - chip->base, value); } EXPORT_SYMBOL_GPL(gpio_set_value_cansleep); #ifdef CONFIG_DEBUG_FS static void gpiolib_dbg_show(struct seq_file *s, struct gpio_chip *chip) { unsigned i; unsigned gpio = chip->base; struct gpio_desc *gdesc = &gpio_desc[gpio]; int is_out; for (i = 0; i < chip->ngpio; i++, gpio++, gdesc++) { if (!test_bit(FLAG_REQUESTED, &gdesc->flags)) continue; is_out = test_bit(FLAG_IS_OUT, &gdesc->flags); seq_printf(s, " gpio-%-3d (%-20.20s) %s %s", gpio, gdesc->label, is_out ? "out" : "in ", chip->get ? (chip->get(chip, i) ? "hi" : "lo") : "? "); if (!is_out) { int irq = gpio_to_irq(gpio); struct irq_desc *desc = irq_to_desc(irq); /* This races with request_irq(), set_irq_type(), * and set_irq_wake() ... but those are "rare". * * More significantly, trigger type flags aren't * currently maintained by genirq. */ if (irq >= 0 && desc->action) { char *trigger; switch (desc->status & IRQ_TYPE_SENSE_MASK) { case IRQ_TYPE_NONE: trigger = "(default)"; break; case IRQ_TYPE_EDGE_FALLING: trigger = "edge-falling"; break; case IRQ_TYPE_EDGE_RISING: trigger = "edge-rising"; break; case IRQ_TYPE_EDGE_BOTH: trigger = "edge-both"; break; case IRQ_TYPE_LEVEL_HIGH: trigger = "level-high"; break; case IRQ_TYPE_LEVEL_LOW: trigger = "level-low"; break; default: trigger = "?trigger?"; break; } seq_printf(s, " irq-%d %s%s", irq, trigger, (desc->status & IRQ_WAKEUP) ? " wakeup" : ""); } } seq_printf(s, "\n"); } } static int gpiolib_show(struct seq_file *s, void *unused) { struct gpio_chip *chip = NULL; unsigned gpio; int started = 0; /* REVISIT this isn't locked against gpio_chip removal ... */ for (gpio = 0; gpio_is_valid(gpio); gpio++) { struct device *dev; if (chip == gpio_desc[gpio].chip) continue; chip = gpio_desc[gpio].chip; if (!chip) continue; seq_printf(s, "%sGPIOs %d-%d", started ? "\n" : "", chip->base, chip->base + chip->ngpio - 1); dev = chip->dev; if (dev) seq_printf(s, ", %s/%s", dev->bus ? dev->bus->name : "no-bus", dev_name(dev)); if (chip->label) seq_printf(s, ", %s", chip->label); if (chip->can_sleep) seq_printf(s, ", can sleep"); seq_printf(s, ":\n"); started = 1; if (chip->dbg_show) chip->dbg_show(s, chip); else gpiolib_dbg_show(s, chip); } return 0; } static int gpiolib_open(struct inode *inode, struct file *file) { return single_open(file, gpiolib_show, NULL); } static const struct file_operations gpiolib_operations = { .open = gpiolib_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int __init gpiolib_debugfs_init(void) { /* /sys/kernel/debug/gpio */ (void) debugfs_create_file("gpio", S_IFREG | S_IRUGO, NULL, NULL, &gpiolib_operations); return 0; } subsys_initcall(gpiolib_debugfs_init); #endif /* DEBUG_FS */
gpl-2.0
rickyzhang82/bbb-linux
drivers/staging/rtl8188eu/core/rtw_ieee80211.c
399
37977
/****************************************************************************** * * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * ******************************************************************************/ #define _IEEE80211_C #include <drv_types.h> #include <osdep_intf.h> #include <ieee80211.h> #include <wifi.h> #include <osdep_service.h> #include <wlan_bssdef.h> u8 RTW_WPA_OUI_TYPE[] = { 0x00, 0x50, 0xf2, 1 }; u8 WPA_AUTH_KEY_MGMT_NONE[] = { 0x00, 0x50, 0xf2, 0 }; u8 WPA_AUTH_KEY_MGMT_UNSPEC_802_1X[] = { 0x00, 0x50, 0xf2, 1 }; u8 WPA_AUTH_KEY_MGMT_PSK_OVER_802_1X[] = { 0x00, 0x50, 0xf2, 2 }; u8 WPA_CIPHER_SUITE_NONE[] = { 0x00, 0x50, 0xf2, 0 }; u8 WPA_CIPHER_SUITE_WEP40[] = { 0x00, 0x50, 0xf2, 1 }; u8 WPA_CIPHER_SUITE_TKIP[] = { 0x00, 0x50, 0xf2, 2 }; u8 WPA_CIPHER_SUITE_WRAP[] = { 0x00, 0x50, 0xf2, 3 }; u8 WPA_CIPHER_SUITE_CCMP[] = { 0x00, 0x50, 0xf2, 4 }; u8 WPA_CIPHER_SUITE_WEP104[] = { 0x00, 0x50, 0xf2, 5 }; u16 RSN_VERSION_BSD = 1; u8 RSN_AUTH_KEY_MGMT_UNSPEC_802_1X[] = { 0x00, 0x0f, 0xac, 1 }; u8 RSN_AUTH_KEY_MGMT_PSK_OVER_802_1X[] = { 0x00, 0x0f, 0xac, 2 }; u8 RSN_CIPHER_SUITE_NONE[] = { 0x00, 0x0f, 0xac, 0 }; u8 RSN_CIPHER_SUITE_WEP40[] = { 0x00, 0x0f, 0xac, 1 }; u8 RSN_CIPHER_SUITE_TKIP[] = { 0x00, 0x0f, 0xac, 2 }; u8 RSN_CIPHER_SUITE_WRAP[] = { 0x00, 0x0f, 0xac, 3 }; u8 RSN_CIPHER_SUITE_CCMP[] = { 0x00, 0x0f, 0xac, 4 }; u8 RSN_CIPHER_SUITE_WEP104[] = { 0x00, 0x0f, 0xac, 5 }; /* */ /* for adhoc-master to generate ie and provide supported-rate to fw */ /* */ static u8 WIFI_CCKRATES[] = { (IEEE80211_CCK_RATE_1MB | IEEE80211_BASIC_RATE_MASK), (IEEE80211_CCK_RATE_2MB | IEEE80211_BASIC_RATE_MASK), (IEEE80211_CCK_RATE_5MB | IEEE80211_BASIC_RATE_MASK), (IEEE80211_CCK_RATE_11MB | IEEE80211_BASIC_RATE_MASK) }; static u8 WIFI_OFDMRATES[] = { (IEEE80211_OFDM_RATE_6MB), (IEEE80211_OFDM_RATE_9MB), (IEEE80211_OFDM_RATE_12MB), (IEEE80211_OFDM_RATE_18MB), (IEEE80211_OFDM_RATE_24MB), IEEE80211_OFDM_RATE_36MB, IEEE80211_OFDM_RATE_48MB, IEEE80211_OFDM_RATE_54MB }; int rtw_get_bit_value_from_ieee_value(u8 val) { unsigned char dot11_rate_table[] = { 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108, 0}; /* last element must be zero!! */ int i = 0; while (dot11_rate_table[i] != 0) { if (dot11_rate_table[i] == val) return BIT(i); i++; } return 0; } uint rtw_is_cckrates_included(u8 *rate) { u32 i = 0; while (rate[i] != 0) { if ((((rate[i]) & 0x7f) == 2) || (((rate[i]) & 0x7f) == 4) || (((rate[i]) & 0x7f) == 11) || (((rate[i]) & 0x7f) == 22)) return true; i++; } return false; } uint rtw_is_cckratesonly_included(u8 *rate) { u32 i = 0; while (rate[i] != 0) { if ((((rate[i]) & 0x7f) != 2) && (((rate[i]) & 0x7f) != 4) && (((rate[i]) & 0x7f) != 11) && (((rate[i]) & 0x7f) != 22)) return false; i++; } return true; } int rtw_check_network_type(unsigned char *rate, int ratelen, int channel) { if (channel > 14) { if ((rtw_is_cckrates_included(rate)) == true) return WIRELESS_INVALID; else return WIRELESS_11A; } else { /* could be pure B, pure G, or B/G */ if ((rtw_is_cckratesonly_included(rate)) == true) return WIRELESS_11B; else if ((rtw_is_cckrates_included(rate)) == true) return WIRELESS_11BG; else return WIRELESS_11G; } } u8 *rtw_set_fixed_ie(unsigned char *pbuf, unsigned int len, unsigned char *source, unsigned int *frlen) { memcpy((void *)pbuf, (void *)source, len); *frlen = *frlen + len; return pbuf + len; } /* rtw_set_ie will update frame length */ u8 *rtw_set_ie ( u8 *pbuf, int index, uint len, u8 *source, uint *frlen /* frame length */ ) { *pbuf = (u8)index; *(pbuf + 1) = (u8)len; if (len > 0) memcpy((void *)(pbuf + 2), (void *)source, len); *frlen = *frlen + (len + 2); return pbuf + len + 2; } inline u8 *rtw_set_ie_ch_switch(u8 *buf, u32 *buf_len, u8 ch_switch_mode, u8 new_ch, u8 ch_switch_cnt) { u8 ie_data[3]; ie_data[0] = ch_switch_mode; ie_data[1] = new_ch; ie_data[2] = ch_switch_cnt; return rtw_set_ie(buf, WLAN_EID_CHANNEL_SWITCH, 3, ie_data, buf_len); } inline u8 secondary_ch_offset_to_hal_ch_offset(u8 ch_offset) { if (ch_offset == SCN) return HAL_PRIME_CHNL_OFFSET_DONT_CARE; else if (ch_offset == SCA) return HAL_PRIME_CHNL_OFFSET_UPPER; else if (ch_offset == SCB) return HAL_PRIME_CHNL_OFFSET_LOWER; return HAL_PRIME_CHNL_OFFSET_DONT_CARE; } inline u8 hal_ch_offset_to_secondary_ch_offset(u8 ch_offset) { if (ch_offset == HAL_PRIME_CHNL_OFFSET_DONT_CARE) return SCN; else if (ch_offset == HAL_PRIME_CHNL_OFFSET_LOWER) return SCB; else if (ch_offset == HAL_PRIME_CHNL_OFFSET_UPPER) return SCA; return SCN; } inline u8 *rtw_set_ie_secondary_ch_offset(u8 *buf, u32 *buf_len, u8 secondary_ch_offset) { return rtw_set_ie(buf, WLAN_EID_SECONDARY_CHANNEL_OFFSET, 1, &secondary_ch_offset, buf_len); } inline u8 *rtw_set_ie_mesh_ch_switch_parm(u8 *buf, u32 *buf_len, u8 ttl, u8 flags, u16 reason, u16 precedence) { u8 ie_data[6]; ie_data[0] = ttl; ie_data[1] = flags; *(u16 *)(ie_data+2) = cpu_to_le16(reason); *(u16 *)(ie_data+4) = cpu_to_le16(precedence); return rtw_set_ie(buf, 0x118, 6, ie_data, buf_len); } /*---------------------------------------------------------------------------- index: the information element id index, limit is the limit for search -----------------------------------------------------------------------------*/ u8 *rtw_get_ie(u8 *pbuf, int index, int *len, int limit) { int tmp, i; u8 *p; if (limit < 1) return NULL; p = pbuf; i = 0; *len = 0; while (1) { if (*p == index) { *len = *(p + 1); return p; } else { tmp = *(p + 1); p += (tmp + 2); i += (tmp + 2); } if (i >= limit) break; } return NULL; } /** * rtw_get_ie_ex - Search specific IE from a series of IEs * @in_ie: Address of IEs to search * @in_len: Length limit from in_ie * @eid: Element ID to match * @oui: OUI to match * @oui_len: OUI length * @ie: If not NULL and the specific IE is found, the IE will be copied to the buf starting from the specific IE * @ielen: If not NULL and the specific IE is found, will set to the length of the entire IE * * Returns: The address of the specific IE found, or NULL */ u8 *rtw_get_ie_ex(u8 *in_ie, uint in_len, u8 eid, u8 *oui, u8 oui_len, u8 *ie, uint *ielen) { uint cnt; u8 *target_ie = NULL; if (ielen) *ielen = 0; if (!in_ie || in_len <= 0) return target_ie; cnt = 0; while (cnt < in_len) { if (eid == in_ie[cnt] && (!oui || !memcmp(&in_ie[cnt+2], oui, oui_len))) { target_ie = &in_ie[cnt]; if (ie) memcpy(ie, &in_ie[cnt], in_ie[cnt+1]+2); if (ielen) *ielen = in_ie[cnt+1]+2; break; } else { cnt += in_ie[cnt+1]+2; /* goto next */ } } return target_ie; } /** * rtw_ies_remove_ie - Find matching IEs and remove * @ies: Address of IEs to search * @ies_len: Pointer of length of ies, will update to new length * @offset: The offset to start scarch * @eid: Element ID to match * @oui: OUI to match * @oui_len: OUI length * * Returns: _SUCCESS: ies is updated, _FAIL: not updated */ int rtw_ies_remove_ie(u8 *ies, uint *ies_len, uint offset, u8 eid, u8 *oui, u8 oui_len) { int ret = _FAIL; u8 *target_ie; u32 target_ielen; u8 *start; uint search_len; if (!ies || !ies_len || *ies_len <= offset) goto exit; start = ies + offset; search_len = *ies_len - offset; while (1) { target_ie = rtw_get_ie_ex(start, search_len, eid, oui, oui_len, NULL, &target_ielen); if (target_ie && target_ielen) { u8 buf[MAX_IE_SZ] = {0}; u8 *remain_ies = target_ie + target_ielen; uint remain_len = search_len - (remain_ies - start); memcpy(buf, remain_ies, remain_len); memcpy(target_ie, buf, remain_len); *ies_len = *ies_len - target_ielen; ret = _SUCCESS; start = target_ie; search_len = remain_len; } else { break; } } exit: return ret; } void rtw_set_supported_rate(u8 *SupportedRates, uint mode) { memset(SupportedRates, 0, NDIS_802_11_LENGTH_RATES_EX); switch (mode) { case WIRELESS_11B: memcpy(SupportedRates, WIFI_CCKRATES, IEEE80211_CCK_RATE_LEN); break; case WIRELESS_11G: case WIRELESS_11A: case WIRELESS_11_5N: case WIRELESS_11A_5N:/* Todo: no basic rate for ofdm ? */ memcpy(SupportedRates, WIFI_OFDMRATES, IEEE80211_NUM_OFDM_RATESLEN); break; case WIRELESS_11BG: case WIRELESS_11G_24N: case WIRELESS_11_24N: case WIRELESS_11BG_24N: memcpy(SupportedRates, WIFI_CCKRATES, IEEE80211_CCK_RATE_LEN); memcpy(SupportedRates + IEEE80211_CCK_RATE_LEN, WIFI_OFDMRATES, IEEE80211_NUM_OFDM_RATESLEN); break; } } uint rtw_get_rateset_len(u8 *rateset) { uint i = 0; while (1) { if ((rateset[i]) == 0) break; if (i > 12) break; i++; } return i; } int rtw_generate_ie(struct registry_priv *pregistrypriv) { u8 wireless_mode; int sz = 0, rateLen; struct wlan_bssid_ex *pdev_network = &pregistrypriv->dev_network; u8 *ie = pdev_network->IEs; /* timestamp will be inserted by hardware */ sz += 8; ie += sz; /* beacon interval : 2bytes */ *(__le16 *)ie = cpu_to_le16((u16)pdev_network->Configuration.BeaconPeriod);/* BCN_INTERVAL; */ sz += 2; ie += 2; /* capability info */ *(u16 *)ie = 0; *(__le16 *)ie |= cpu_to_le16(cap_IBSS); if (pregistrypriv->preamble == PREAMBLE_SHORT) *(__le16 *)ie |= cpu_to_le16(cap_ShortPremble); if (pdev_network->Privacy) *(__le16 *)ie |= cpu_to_le16(cap_Privacy); sz += 2; ie += 2; /* SSID */ ie = rtw_set_ie(ie, _SSID_IE_, pdev_network->Ssid.SsidLength, pdev_network->Ssid.Ssid, &sz); /* supported rates */ if (pregistrypriv->wireless_mode == WIRELESS_11ABGN) { if (pdev_network->Configuration.DSConfig > 14) wireless_mode = WIRELESS_11A_5N; else wireless_mode = WIRELESS_11BG_24N; } else { wireless_mode = pregistrypriv->wireless_mode; } rtw_set_supported_rate(pdev_network->SupportedRates, wireless_mode); rateLen = rtw_get_rateset_len(pdev_network->SupportedRates); if (rateLen > 8) { ie = rtw_set_ie(ie, _SUPPORTEDRATES_IE_, 8, pdev_network->SupportedRates, &sz); /* ie = rtw_set_ie(ie, _EXT_SUPPORTEDRATES_IE_, (rateLen - 8), (pdev_network->SupportedRates + 8), &sz); */ } else { ie = rtw_set_ie(ie, _SUPPORTEDRATES_IE_, rateLen, pdev_network->SupportedRates, &sz); } /* DS parameter set */ ie = rtw_set_ie(ie, _DSSET_IE_, 1, (u8 *)&(pdev_network->Configuration.DSConfig), &sz); /* IBSS Parameter Set */ ie = rtw_set_ie(ie, _IBSS_PARA_IE_, 2, (u8 *)&(pdev_network->Configuration.ATIMWindow), &sz); if (rateLen > 8) ie = rtw_set_ie(ie, _EXT_SUPPORTEDRATES_IE_, (rateLen - 8), (pdev_network->SupportedRates + 8), &sz); return sz; } unsigned char *rtw_get_wpa_ie(unsigned char *pie, int *wpa_ie_len, int limit) { int len; u16 val16; __le16 le_tmp; unsigned char wpa_oui_type[] = {0x00, 0x50, 0xf2, 0x01}; u8 *pbuf = pie; int limit_new = limit; while (1) { pbuf = rtw_get_ie(pbuf, _WPA_IE_ID_, &len, limit_new); if (pbuf) { /* check if oui matches... */ if (!memcmp((pbuf + 2), wpa_oui_type, sizeof(wpa_oui_type)) == false) goto check_next_ie; /* check version... */ memcpy((u8 *)&le_tmp, (pbuf + 6), sizeof(val16)); val16 = le16_to_cpu(le_tmp); if (val16 != 0x0001) goto check_next_ie; *wpa_ie_len = *(pbuf + 1); return pbuf; } else { *wpa_ie_len = 0; return NULL; } check_next_ie: limit_new = limit - (pbuf - pie) - 2 - len; if (limit_new <= 0) break; pbuf += (2 + len); } *wpa_ie_len = 0; return NULL; } unsigned char *rtw_get_wpa2_ie(unsigned char *pie, int *rsn_ie_len, int limit) { return rtw_get_ie(pie, _WPA2_IE_ID_, rsn_ie_len, limit); } int rtw_get_wpa_cipher_suite(u8 *s) { if (!memcmp(s, WPA_CIPHER_SUITE_NONE, WPA_SELECTOR_LEN)) return WPA_CIPHER_NONE; if (!memcmp(s, WPA_CIPHER_SUITE_WEP40, WPA_SELECTOR_LEN)) return WPA_CIPHER_WEP40; if (!memcmp(s, WPA_CIPHER_SUITE_TKIP, WPA_SELECTOR_LEN)) return WPA_CIPHER_TKIP; if (!memcmp(s, WPA_CIPHER_SUITE_CCMP, WPA_SELECTOR_LEN)) return WPA_CIPHER_CCMP; if (!memcmp(s, WPA_CIPHER_SUITE_WEP104, WPA_SELECTOR_LEN)) return WPA_CIPHER_WEP104; return 0; } int rtw_get_wpa2_cipher_suite(u8 *s) { if (!memcmp(s, RSN_CIPHER_SUITE_NONE, RSN_SELECTOR_LEN)) return WPA_CIPHER_NONE; if (!memcmp(s, RSN_CIPHER_SUITE_WEP40, RSN_SELECTOR_LEN)) return WPA_CIPHER_WEP40; if (!memcmp(s, RSN_CIPHER_SUITE_TKIP, RSN_SELECTOR_LEN)) return WPA_CIPHER_TKIP; if (!memcmp(s, RSN_CIPHER_SUITE_CCMP, RSN_SELECTOR_LEN)) return WPA_CIPHER_CCMP; if (!memcmp(s, RSN_CIPHER_SUITE_WEP104, RSN_SELECTOR_LEN)) return WPA_CIPHER_WEP104; return 0; } int rtw_parse_wpa_ie(u8 *wpa_ie, int wpa_ie_len, int *group_cipher, int *pairwise_cipher, int *is_8021x) { int i, ret = _SUCCESS; int left, count; u8 *pos; u8 SUITE_1X[4] = {0x00, 0x50, 0xf2, 1}; if (wpa_ie_len <= 0) { /* No WPA IE - fail silently */ return _FAIL; } if ((*wpa_ie != _WPA_IE_ID_) || (*(wpa_ie+1) != (u8)(wpa_ie_len - 2)) || (memcmp(wpa_ie+2, RTW_WPA_OUI_TYPE, WPA_SELECTOR_LEN))) return _FAIL; pos = wpa_ie; pos += 8; left = wpa_ie_len - 8; /* group_cipher */ if (left >= WPA_SELECTOR_LEN) { *group_cipher = rtw_get_wpa_cipher_suite(pos); pos += WPA_SELECTOR_LEN; left -= WPA_SELECTOR_LEN; } else if (left > 0) { RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("%s: ie length mismatch, %u too much", __func__, left)); return _FAIL; } /* pairwise_cipher */ if (left >= 2) { count = get_unaligned_le16(pos); pos += 2; left -= 2; if (count == 0 || left < count * WPA_SELECTOR_LEN) { RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("%s: ie count botch (pairwise), " "count %u left %u", __func__, count, left)); return _FAIL; } for (i = 0; i < count; i++) { *pairwise_cipher |= rtw_get_wpa_cipher_suite(pos); pos += WPA_SELECTOR_LEN; left -= WPA_SELECTOR_LEN; } } else if (left == 1) { RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("%s: ie too short (for key mgmt)", __func__)); return _FAIL; } if (is_8021x) { if (left >= 6) { pos += 2; if (!memcmp(pos, SUITE_1X, 4)) { RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s : there has 802.1x auth\n", __func__)); *is_8021x = 1; } } } return ret; } int rtw_parse_wpa2_ie(u8 *rsn_ie, int rsn_ie_len, int *group_cipher, int *pairwise_cipher, int *is_8021x) { int i, ret = _SUCCESS; int left, count; u8 *pos; u8 SUITE_1X[4] = {0x00, 0x0f, 0xac, 0x01}; if (rsn_ie_len <= 0) { /* No RSN IE - fail silently */ return _FAIL; } if ((*rsn_ie != _WPA2_IE_ID_) || (*(rsn_ie+1) != (u8)(rsn_ie_len - 2))) return _FAIL; pos = rsn_ie; pos += 4; left = rsn_ie_len - 4; /* group_cipher */ if (left >= RSN_SELECTOR_LEN) { *group_cipher = rtw_get_wpa2_cipher_suite(pos); pos += RSN_SELECTOR_LEN; left -= RSN_SELECTOR_LEN; } else if (left > 0) { RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("%s: ie length mismatch, %u too much", __func__, left)); return _FAIL; } /* pairwise_cipher */ if (left >= 2) { count = get_unaligned_le16(pos); pos += 2; left -= 2; if (count == 0 || left < count * RSN_SELECTOR_LEN) { RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("%s: ie count botch (pairwise), " "count %u left %u", __func__, count, left)); return _FAIL; } for (i = 0; i < count; i++) { *pairwise_cipher |= rtw_get_wpa2_cipher_suite(pos); pos += RSN_SELECTOR_LEN; left -= RSN_SELECTOR_LEN; } } else if (left == 1) { RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("%s: ie too short (for key mgmt)", __func__)); return _FAIL; } if (is_8021x) { if (left >= 6) { pos += 2; if (!memcmp(pos, SUITE_1X, 4)) { RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s (): there has 802.1x auth\n", __func__)); *is_8021x = 1; } } } return ret; } int rtw_get_sec_ie(u8 *in_ie, uint in_len, u8 *rsn_ie, u16 *rsn_len, u8 *wpa_ie, u16 *wpa_len) { u8 authmode, sec_idx, i; u8 wpa_oui[4] = {0x0, 0x50, 0xf2, 0x01}; uint cnt; /* Search required WPA or WPA2 IE and copy to sec_ie[] */ cnt = _TIMESTAMP_ + _BEACON_ITERVAL_ + _CAPABILITY_; sec_idx = 0; while (cnt < in_len) { authmode = in_ie[cnt]; if ((authmode == _WPA_IE_ID_) && (!memcmp(&in_ie[cnt+2], &wpa_oui[0], 4))) { RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("\n rtw_get_wpa_ie: sec_idx =%d in_ie[cnt+1]+2 =%d\n", sec_idx, in_ie[cnt+1]+2)); if (wpa_ie) { memcpy(wpa_ie, &in_ie[cnt], in_ie[cnt+1]+2); for (i = 0; i < (in_ie[cnt+1]+2); i += 8) { RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("\n %2x,%2x,%2x,%2x,%2x,%2x,%2x,%2x\n", wpa_ie[i], wpa_ie[i+1], wpa_ie[i+2], wpa_ie[i+3], wpa_ie[i+4], wpa_ie[i+5], wpa_ie[i+6], wpa_ie[i+7])); } } *wpa_len = in_ie[cnt+1]+2; cnt += in_ie[cnt+1]+2; /* get next */ } else { if (authmode == _WPA2_IE_ID_) { RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("\n get_rsn_ie: sec_idx =%d in_ie[cnt+1]+2 =%d\n", sec_idx, in_ie[cnt+1]+2)); if (rsn_ie) { memcpy(rsn_ie, &in_ie[cnt], in_ie[cnt+1]+2); for (i = 0; i < (in_ie[cnt+1]+2); i += 8) { RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("\n %2x,%2x,%2x,%2x,%2x,%2x,%2x,%2x\n", rsn_ie[i], rsn_ie[i+1], rsn_ie[i+2], rsn_ie[i+3], rsn_ie[i+4], rsn_ie[i+5], rsn_ie[i+6], rsn_ie[i+7])); } } *rsn_len = in_ie[cnt+1]+2; cnt += in_ie[cnt+1]+2; /* get next */ } else { cnt += in_ie[cnt+1]+2; /* get next */ } } } return *rsn_len + *wpa_len; } u8 rtw_is_wps_ie(u8 *ie_ptr, uint *wps_ielen) { u8 match = false; u8 eid, wps_oui[4] = {0x0, 0x50, 0xf2, 0x04}; if (ie_ptr == NULL) return match; eid = ie_ptr[0]; if ((eid == _WPA_IE_ID_) && (!memcmp(&ie_ptr[2], wps_oui, 4))) { *wps_ielen = ie_ptr[1]+2; match = true; } return match; } /** * rtw_get_wps_ie - Search WPS IE from a series of IEs * @in_ie: Address of IEs to search * @in_len: Length limit from in_ie * @wps_ie: If not NULL and WPS IE is found, WPS IE will be copied to the buf starting from wps_ie * @wps_ielen: If not NULL and WPS IE is found, will set to the length of the entire WPS IE * * Returns: The address of the WPS IE found, or NULL */ u8 *rtw_get_wps_ie(u8 *in_ie, uint in_len, u8 *wps_ie, uint *wps_ielen) { uint cnt; u8 *wpsie_ptr = NULL; u8 eid, wps_oui[4] = {0x0, 0x50, 0xf2, 0x04}; if (wps_ielen) *wps_ielen = 0; if (!in_ie || in_len <= 0) return wpsie_ptr; cnt = 0; while (cnt < in_len) { eid = in_ie[cnt]; if ((eid == _WPA_IE_ID_) && (!memcmp(&in_ie[cnt+2], wps_oui, 4))) { wpsie_ptr = &in_ie[cnt]; if (wps_ie) memcpy(wps_ie, &in_ie[cnt], in_ie[cnt+1]+2); if (wps_ielen) *wps_ielen = in_ie[cnt+1]+2; cnt += in_ie[cnt+1]+2; break; } else { cnt += in_ie[cnt+1]+2; /* goto next */ } } return wpsie_ptr; } /** * rtw_get_wps_attr - Search a specific WPS attribute from a given WPS IE * @wps_ie: Address of WPS IE to search * @wps_ielen: Length limit from wps_ie * @target_attr_id: The attribute ID of WPS attribute to search * @buf_attr: If not NULL and the WPS attribute is found, WPS attribute will be copied to the buf starting from buf_attr * @len_attr: If not NULL and the WPS attribute is found, will set to the length of the entire WPS attribute * * Returns: the address of the specific WPS attribute found, or NULL */ u8 *rtw_get_wps_attr(u8 *wps_ie, uint wps_ielen, u16 target_attr_id, u8 *buf_attr, u32 *len_attr) { u8 *attr_ptr = NULL; u8 *target_attr_ptr = NULL; u8 wps_oui[4] = {0x00, 0x50, 0xF2, 0x04}; if (len_attr) *len_attr = 0; if ((wps_ie[0] != _VENDOR_SPECIFIC_IE_) || (memcmp(wps_ie + 2, wps_oui, 4))) return attr_ptr; /* 6 = 1(Element ID) + 1(Length) + 4(WPS OUI) */ attr_ptr = wps_ie + 6; /* goto first attr */ while (attr_ptr - wps_ie < wps_ielen) { /* 4 = 2(Attribute ID) + 2(Length) */ u16 attr_id = get_unaligned_be16(attr_ptr); u16 attr_data_len = get_unaligned_be16(attr_ptr + 2); u16 attr_len = attr_data_len + 4; if (attr_id == target_attr_id) { target_attr_ptr = attr_ptr; if (buf_attr) memcpy(buf_attr, attr_ptr, attr_len); if (len_attr) *len_attr = attr_len; break; } else { attr_ptr += attr_len; /* goto next */ } } return target_attr_ptr; } /** * rtw_get_wps_attr_content - Search a specific WPS attribute content from a given WPS IE * @wps_ie: Address of WPS IE to search * @wps_ielen: Length limit from wps_ie * @target_attr_id: The attribute ID of WPS attribute to search * @buf_content: If not NULL and the WPS attribute is found, WPS attribute content will be copied to the buf starting from buf_content * @len_content: If not NULL and the WPS attribute is found, will set to the length of the WPS attribute content * * Returns: the address of the specific WPS attribute content found, or NULL */ u8 *rtw_get_wps_attr_content(u8 *wps_ie, uint wps_ielen, u16 target_attr_id, u8 *buf_content, uint *len_content) { u8 *attr_ptr; u32 attr_len; if (len_content) *len_content = 0; attr_ptr = rtw_get_wps_attr(wps_ie, wps_ielen, target_attr_id, NULL, &attr_len); if (attr_ptr && attr_len) { if (buf_content) memcpy(buf_content, attr_ptr+4, attr_len-4); if (len_content) *len_content = attr_len-4; return attr_ptr+4; } return NULL; } static int rtw_ieee802_11_parse_vendor_specific(u8 *pos, uint elen, struct rtw_ieee802_11_elems *elems, int show_errors) { unsigned int oui; /* first 3 bytes in vendor specific information element are the IEEE * OUI of the vendor. The following byte is used a vendor specific * sub-type. */ if (elen < 4) { if (show_errors) { DBG_88E("short vendor specific information element ignored (len=%lu)\n", (unsigned long)elen); } return -1; } oui = RTW_GET_BE24(pos); switch (oui) { case OUI_MICROSOFT: /* Microsoft/Wi-Fi information elements are further typed and * subtyped */ switch (pos[3]) { case 1: /* Microsoft OUI (00:50:F2) with OUI Type 1: * real WPA information element */ elems->wpa_ie = pos; elems->wpa_ie_len = elen; break; case WME_OUI_TYPE: /* this is a Wi-Fi WME info. element */ if (elen < 5) { DBG_88E("short WME information element ignored (len=%lu)\n", (unsigned long)elen); return -1; } switch (pos[4]) { case WME_OUI_SUBTYPE_INFORMATION_ELEMENT: case WME_OUI_SUBTYPE_PARAMETER_ELEMENT: elems->wme = pos; elems->wme_len = elen; break; case WME_OUI_SUBTYPE_TSPEC_ELEMENT: elems->wme_tspec = pos; elems->wme_tspec_len = elen; break; default: DBG_88E("unknown WME information element ignored (subtype=%d len=%lu)\n", pos[4], (unsigned long)elen); return -1; } break; case 4: /* Wi-Fi Protected Setup (WPS) IE */ elems->wps_ie = pos; elems->wps_ie_len = elen; break; default: DBG_88E("Unknown Microsoft information element ignored (type=%d len=%lu)\n", pos[3], (unsigned long)elen); return -1; } break; case OUI_BROADCOM: switch (pos[3]) { case VENDOR_HT_CAPAB_OUI_TYPE: elems->vendor_ht_cap = pos; elems->vendor_ht_cap_len = elen; break; default: DBG_88E("Unknown Broadcom information element ignored (type=%d len=%lu)\n", pos[3], (unsigned long)elen); return -1; } break; default: DBG_88E("unknown vendor specific information element ignored (vendor OUI %02x:%02x:%02x len=%lu)\n", pos[0], pos[1], pos[2], (unsigned long)elen); return -1; } return 0; } /** * ieee802_11_parse_elems - Parse information elements in management frames * @start: Pointer to the start of IEs * @len: Length of IE buffer in octets * @elems: Data structure for parsed elements * @show_errors: Whether to show parsing errors in debug log * Returns: Parsing result */ enum parse_res rtw_ieee802_11_parse_elems(u8 *start, uint len, struct rtw_ieee802_11_elems *elems, int show_errors) { uint left = len; u8 *pos = start; int unknown = 0; memset(elems, 0, sizeof(*elems)); while (left >= 2) { u8 id, elen; id = *pos++; elen = *pos++; left -= 2; if (elen > left) { if (show_errors) { DBG_88E("IEEE 802.11 element parse failed (id=%d elen=%d left=%lu)\n", id, elen, (unsigned long)left); } return ParseFailed; } switch (id) { case WLAN_EID_SSID: elems->ssid = pos; elems->ssid_len = elen; break; case WLAN_EID_SUPP_RATES: elems->supp_rates = pos; elems->supp_rates_len = elen; break; case WLAN_EID_FH_PARAMS: elems->fh_params = pos; elems->fh_params_len = elen; break; case WLAN_EID_DS_PARAMS: elems->ds_params = pos; elems->ds_params_len = elen; break; case WLAN_EID_CF_PARAMS: elems->cf_params = pos; elems->cf_params_len = elen; break; case WLAN_EID_TIM: elems->tim = pos; elems->tim_len = elen; break; case WLAN_EID_IBSS_PARAMS: elems->ibss_params = pos; elems->ibss_params_len = elen; break; case WLAN_EID_CHALLENGE: elems->challenge = pos; elems->challenge_len = elen; break; case WLAN_EID_ERP_INFO: elems->erp_info = pos; elems->erp_info_len = elen; break; case WLAN_EID_EXT_SUPP_RATES: elems->ext_supp_rates = pos; elems->ext_supp_rates_len = elen; break; case WLAN_EID_VENDOR_SPECIFIC: if (rtw_ieee802_11_parse_vendor_specific(pos, elen, elems, show_errors)) unknown++; break; case WLAN_EID_RSN: elems->rsn_ie = pos; elems->rsn_ie_len = elen; break; case WLAN_EID_PWR_CAPABILITY: elems->power_cap = pos; elems->power_cap_len = elen; break; case WLAN_EID_SUPPORTED_CHANNELS: elems->supp_channels = pos; elems->supp_channels_len = elen; break; case WLAN_EID_MOBILITY_DOMAIN: elems->mdie = pos; elems->mdie_len = elen; break; case WLAN_EID_FAST_BSS_TRANSITION: elems->ftie = pos; elems->ftie_len = elen; break; case WLAN_EID_TIMEOUT_INTERVAL: elems->timeout_int = pos; elems->timeout_int_len = elen; break; case WLAN_EID_HT_CAP: elems->ht_capabilities = pos; elems->ht_capabilities_len = elen; break; case WLAN_EID_HT_OPERATION: elems->ht_operation = pos; elems->ht_operation_len = elen; break; default: unknown++; if (!show_errors) break; DBG_88E("IEEE 802.11 element parse ignored unknown element (id=%d elen=%d)\n", id, elen); break; } left -= elen; pos += elen; } if (left) return ParseFailed; return unknown ? ParseUnknown : ParseOK; } void rtw_macaddr_cfg(u8 *mac_addr) { u8 mac[ETH_ALEN]; if (mac_addr == NULL) return; if (rtw_initmac && mac_pton(rtw_initmac, mac)) { /* Users specify the mac address */ memcpy(mac_addr, mac, ETH_ALEN); } else { /* Use the mac address stored in the Efuse */ memcpy(mac, mac_addr, ETH_ALEN); } if (((mac[0] == 0xff) && (mac[1] == 0xff) && (mac[2] == 0xff) && (mac[3] == 0xff) && (mac[4] == 0xff) && (mac[5] == 0xff)) || ((mac[0] == 0x0) && (mac[1] == 0x0) && (mac[2] == 0x0) && (mac[3] == 0x0) && (mac[4] == 0x0) && (mac[5] == 0x0))) { mac[0] = 0x00; mac[1] = 0xe0; mac[2] = 0x4c; mac[3] = 0x87; mac[4] = 0x00; mac[5] = 0x00; /* use default mac address */ memcpy(mac_addr, mac, ETH_ALEN); DBG_88E("MAC Address from efuse error, assign default one !!!\n"); } DBG_88E("rtw_macaddr_cfg MAC Address = %pM\n", (mac_addr)); } void dump_ies(u8 *buf, u32 buf_len) { u8 *pos = (u8 *)buf; u8 id, len; while (pos-buf <= buf_len) { id = *pos; len = *(pos+1); DBG_88E("%s ID:%u, LEN:%u\n", __func__, id, len); dump_wps_ie(pos, len); pos += (2 + len); } } void dump_wps_ie(u8 *ie, u32 ie_len) { u8 *pos = (u8 *)ie; u16 id; u16 len; u8 *wps_ie; uint wps_ielen; wps_ie = rtw_get_wps_ie(ie, ie_len, NULL, &wps_ielen); if (wps_ie != ie || wps_ielen == 0) return; pos += 6; while (pos-ie < ie_len) { id = get_unaligned_be16(pos); len = get_unaligned_be16(pos + 2); DBG_88E("%s ID:0x%04x, LEN:%u\n", __func__, id, len); pos += (4+len); } } /* Baron adds to avoid FreeBSD warning */ int ieee80211_is_empty_essid(const char *essid, int essid_len) { /* Single white space is for Linksys APs */ if (essid_len == 1 && essid[0] == ' ') return 1; /* Otherwise, if the entire essid is 0, we assume it is hidden */ while (essid_len) { essid_len--; if (essid[essid_len] != '\0') return 0; } return 1; } int ieee80211_get_hdrlen(u16 fc) { int hdrlen = 24; switch (WLAN_FC_GET_TYPE(fc)) { case RTW_IEEE80211_FTYPE_DATA: if (fc & RTW_IEEE80211_STYPE_QOS_DATA) hdrlen += 2; if ((fc & RTW_IEEE80211_FCTL_FROMDS) && (fc & RTW_IEEE80211_FCTL_TODS)) hdrlen += 6; /* Addr4 */ break; case RTW_IEEE80211_FTYPE_CTL: switch (WLAN_FC_GET_STYPE(fc)) { case RTW_IEEE80211_STYPE_CTS: case RTW_IEEE80211_STYPE_ACK: hdrlen = 10; break; default: hdrlen = 16; break; } break; } return hdrlen; } static int rtw_get_cipher_info(struct wlan_network *pnetwork) { u32 wpa_ielen; unsigned char *pbuf; int group_cipher = 0, pairwise_cipher = 0, is8021x = 0; int ret = _FAIL; pbuf = rtw_get_wpa_ie(&pnetwork->network.IEs[12], &wpa_ielen, pnetwork->network.IELength-12); if (pbuf && (wpa_ielen > 0)) { RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_get_cipher_info: wpa_ielen: %d", wpa_ielen)); if (_SUCCESS == rtw_parse_wpa_ie(pbuf, wpa_ielen+2, &group_cipher, &pairwise_cipher, &is8021x)) { pnetwork->BcnInfo.pairwise_cipher = pairwise_cipher; pnetwork->BcnInfo.group_cipher = group_cipher; pnetwork->BcnInfo.is_8021x = is8021x; RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s: pnetwork->pairwise_cipher: %d, is_8021x is %d", __func__, pnetwork->BcnInfo.pairwise_cipher, pnetwork->BcnInfo.is_8021x)); ret = _SUCCESS; } } else { pbuf = rtw_get_wpa2_ie(&pnetwork->network.IEs[12], &wpa_ielen, pnetwork->network.IELength-12); if (pbuf && (wpa_ielen > 0)) { RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("get RSN IE\n")); if (_SUCCESS == rtw_parse_wpa2_ie(pbuf, wpa_ielen+2, &group_cipher, &pairwise_cipher, &is8021x)) { RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("get RSN IE OK!!!\n")); pnetwork->BcnInfo.pairwise_cipher = pairwise_cipher; pnetwork->BcnInfo.group_cipher = group_cipher; pnetwork->BcnInfo.is_8021x = is8021x; RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s: pnetwork->pairwise_cipher: %d," "pnetwork->group_cipher is %d, is_8021x is %d", __func__, pnetwork->BcnInfo.pairwise_cipher, pnetwork->BcnInfo.group_cipher, pnetwork->BcnInfo.is_8021x)); ret = _SUCCESS; } } } return ret; } void rtw_get_bcn_info(struct wlan_network *pnetwork) { unsigned short cap = 0; u8 bencrypt = 0; __le16 le_tmp; u16 wpa_len = 0, rsn_len = 0; struct HT_info_element *pht_info = NULL; struct rtw_ieee80211_ht_cap *pht_cap = NULL; unsigned int len; unsigned char *p; memcpy(&le_tmp, rtw_get_capability_from_ie(pnetwork->network.IEs), 2); cap = le16_to_cpu(le_tmp); if (cap & WLAN_CAPABILITY_PRIVACY) { bencrypt = 1; pnetwork->network.Privacy = 1; } else { pnetwork->BcnInfo.encryp_protocol = ENCRYP_PROTOCOL_OPENSYS; } rtw_get_sec_ie(pnetwork->network.IEs, pnetwork->network.IELength, NULL, &rsn_len, NULL, &wpa_len); RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_get_bcn_info: ssid =%s\n", pnetwork->network.Ssid.Ssid)); RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_get_bcn_info: wpa_len =%d rsn_len =%d\n", wpa_len, rsn_len)); RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_get_bcn_info: ssid =%s\n", pnetwork->network.Ssid.Ssid)); RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_get_bcn_info: wpa_len =%d rsn_len =%d\n", wpa_len, rsn_len)); if (rsn_len > 0) { pnetwork->BcnInfo.encryp_protocol = ENCRYP_PROTOCOL_WPA2; } else if (wpa_len > 0) { pnetwork->BcnInfo.encryp_protocol = ENCRYP_PROTOCOL_WPA; } else { if (bencrypt) pnetwork->BcnInfo.encryp_protocol = ENCRYP_PROTOCOL_WEP; } RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_get_bcn_info: pnetwork->encryp_protocol is %x\n", pnetwork->BcnInfo.encryp_protocol)); RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_get_bcn_info: pnetwork->encryp_protocol is %x\n", pnetwork->BcnInfo.encryp_protocol)); rtw_get_cipher_info(pnetwork); /* get bwmode and ch_offset */ /* parsing HT_CAP_IE */ p = rtw_get_ie(pnetwork->network.IEs + _FIXED_IE_LENGTH_, _HT_CAPABILITY_IE_, &len, pnetwork->network.IELength - _FIXED_IE_LENGTH_); if (p && len > 0) { pht_cap = (struct rtw_ieee80211_ht_cap *)(p + 2); pnetwork->BcnInfo.ht_cap_info = pht_cap->cap_info; } else { pnetwork->BcnInfo.ht_cap_info = 0; } /* parsing HT_INFO_IE */ p = rtw_get_ie(pnetwork->network.IEs + _FIXED_IE_LENGTH_, _HT_ADD_INFO_IE_, &len, pnetwork->network.IELength - _FIXED_IE_LENGTH_); if (p && len > 0) { pht_info = (struct HT_info_element *)(p + 2); pnetwork->BcnInfo.ht_info_infos_0 = pht_info->infos[0]; } else { pnetwork->BcnInfo.ht_info_infos_0 = 0; } } /* show MCS rate, unit: 100Kbps */ u16 rtw_mcs_rate(u8 rf_type, u8 bw_40MHz, u8 short_GI_20, u8 short_GI_40, unsigned char *MCS_rate) { u16 max_rate = 0; if (rf_type == RF_1T1R) { if (MCS_rate[0] & BIT(7)) max_rate = (bw_40MHz) ? ((short_GI_40) ? 1500 : 1350) : ((short_GI_20) ? 722 : 650); else if (MCS_rate[0] & BIT(6)) max_rate = (bw_40MHz) ? ((short_GI_40) ? 1350 : 1215) : ((short_GI_20) ? 650 : 585); else if (MCS_rate[0] & BIT(5)) max_rate = (bw_40MHz) ? ((short_GI_40) ? 1200 : 1080) : ((short_GI_20) ? 578 : 520); else if (MCS_rate[0] & BIT(4)) max_rate = (bw_40MHz) ? ((short_GI_40) ? 900 : 810) : ((short_GI_20) ? 433 : 390); else if (MCS_rate[0] & BIT(3)) max_rate = (bw_40MHz) ? ((short_GI_40) ? 600 : 540) : ((short_GI_20) ? 289 : 260); else if (MCS_rate[0] & BIT(2)) max_rate = (bw_40MHz) ? ((short_GI_40) ? 450 : 405) : ((short_GI_20) ? 217 : 195); else if (MCS_rate[0] & BIT(1)) max_rate = (bw_40MHz) ? ((short_GI_40) ? 300 : 270) : ((short_GI_20) ? 144 : 130); else if (MCS_rate[0] & BIT(0)) max_rate = (bw_40MHz) ? ((short_GI_40) ? 150 : 135) : ((short_GI_20) ? 72 : 65); } else { if (MCS_rate[1]) { if (MCS_rate[1] & BIT(7)) max_rate = (bw_40MHz) ? ((short_GI_40) ? 3000 : 2700) : ((short_GI_20) ? 1444 : 1300); else if (MCS_rate[1] & BIT(6)) max_rate = (bw_40MHz) ? ((short_GI_40) ? 2700 : 2430) : ((short_GI_20) ? 1300 : 1170); else if (MCS_rate[1] & BIT(5)) max_rate = (bw_40MHz) ? ((short_GI_40) ? 2400 : 2160) : ((short_GI_20) ? 1156 : 1040); else if (MCS_rate[1] & BIT(4)) max_rate = (bw_40MHz) ? ((short_GI_40) ? 1800 : 1620) : ((short_GI_20) ? 867 : 780); else if (MCS_rate[1] & BIT(3)) max_rate = (bw_40MHz) ? ((short_GI_40) ? 1200 : 1080) : ((short_GI_20) ? 578 : 520); else if (MCS_rate[1] & BIT(2)) max_rate = (bw_40MHz) ? ((short_GI_40) ? 900 : 810) : ((short_GI_20) ? 433 : 390); else if (MCS_rate[1] & BIT(1)) max_rate = (bw_40MHz) ? ((short_GI_40) ? 600 : 540) : ((short_GI_20) ? 289 : 260); else if (MCS_rate[1] & BIT(0)) max_rate = (bw_40MHz) ? ((short_GI_40) ? 300 : 270) : ((short_GI_20) ? 144 : 130); } else { if (MCS_rate[0] & BIT(7)) max_rate = (bw_40MHz) ? ((short_GI_40) ? 1500 : 1350) : ((short_GI_20) ? 722 : 650); else if (MCS_rate[0] & BIT(6)) max_rate = (bw_40MHz) ? ((short_GI_40) ? 1350 : 1215) : ((short_GI_20) ? 650 : 585); else if (MCS_rate[0] & BIT(5)) max_rate = (bw_40MHz) ? ((short_GI_40) ? 1200 : 1080) : ((short_GI_20) ? 578 : 520); else if (MCS_rate[0] & BIT(4)) max_rate = (bw_40MHz) ? ((short_GI_40) ? 900 : 810) : ((short_GI_20) ? 433 : 390); else if (MCS_rate[0] & BIT(3)) max_rate = (bw_40MHz) ? ((short_GI_40) ? 600 : 540) : ((short_GI_20) ? 289 : 260); else if (MCS_rate[0] & BIT(2)) max_rate = (bw_40MHz) ? ((short_GI_40) ? 450 : 405) : ((short_GI_20) ? 217 : 195); else if (MCS_rate[0] & BIT(1)) max_rate = (bw_40MHz) ? ((short_GI_40) ? 300 : 270) : ((short_GI_20) ? 144 : 130); else if (MCS_rate[0] & BIT(0)) max_rate = (bw_40MHz) ? ((short_GI_40) ? 150 : 135) : ((short_GI_20) ? 72 : 65); } } return max_rate; } int rtw_action_frame_parse(const u8 *frame, u32 frame_len, u8 *category, u8 *action) { const u8 *frame_body = frame + sizeof(struct rtw_ieee80211_hdr_3addr); u16 fc; u8 c, a = 0; fc = le16_to_cpu(((struct rtw_ieee80211_hdr_3addr *)frame)->frame_ctl); if ((fc & (RTW_IEEE80211_FCTL_FTYPE|RTW_IEEE80211_FCTL_STYPE)) != (RTW_IEEE80211_FTYPE_MGMT|RTW_IEEE80211_STYPE_ACTION)) return false; c = frame_body[0]; switch (c) { case RTW_WLAN_CATEGORY_P2P: /* vendor-specific */ break; default: a = frame_body[1]; } if (category) *category = c; if (action) *action = a; return true; } static const char *_action_public_str[] = { "ACT_PUB_BSSCOEXIST", "ACT_PUB_DSE_ENABLE", "ACT_PUB_DSE_DEENABLE", "ACT_PUB_DSE_REG_LOCATION", "ACT_PUB_EXT_CHL_SWITCH", "ACT_PUB_DSE_MSR_REQ", "ACT_PUB_DSE_MSR_RPRT", "ACT_PUB_MP", "ACT_PUB_DSE_PWR_CONSTRAINT", "ACT_PUB_VENDOR", "ACT_PUB_GAS_INITIAL_REQ", "ACT_PUB_GAS_INITIAL_RSP", "ACT_PUB_GAS_COMEBACK_REQ", "ACT_PUB_GAS_COMEBACK_RSP", "ACT_PUB_TDLS_DISCOVERY_RSP", "ACT_PUB_LOCATION_TRACK", "ACT_PUB_RSVD", }; const char *action_public_str(u8 action) { action = (action >= ACT_PUBLIC_MAX) ? ACT_PUBLIC_MAX : action; return _action_public_str[action]; }
gpl-2.0
sbryan12144/BeastMode-Elite
arch/x86/kernel/cpu/perf_event_intel_ds.c
655
18207
#ifdef CONFIG_CPU_SUP_INTEL /* The maximal number of PEBS events: */ #define MAX_PEBS_EVENTS 4 /* The size of a BTS record in bytes: */ #define BTS_RECORD_SIZE 24 #define BTS_BUFFER_SIZE (PAGE_SIZE << 4) #define PEBS_BUFFER_SIZE PAGE_SIZE /* * pebs_record_32 for p4 and core not supported struct pebs_record_32 { u32 flags, ip; u32 ax, bc, cx, dx; u32 si, di, bp, sp; }; */ struct pebs_record_core { u64 flags, ip; u64 ax, bx, cx, dx; u64 si, di, bp, sp; u64 r8, r9, r10, r11; u64 r12, r13, r14, r15; }; struct pebs_record_nhm { u64 flags, ip; u64 ax, bx, cx, dx; u64 si, di, bp, sp; u64 r8, r9, r10, r11; u64 r12, r13, r14, r15; u64 status, dla, dse, lat; }; /* * A debug store configuration. * * We only support architectures that use 64bit fields. */ struct debug_store { u64 bts_buffer_base; u64 bts_index; u64 bts_absolute_maximum; u64 bts_interrupt_threshold; u64 pebs_buffer_base; u64 pebs_index; u64 pebs_absolute_maximum; u64 pebs_interrupt_threshold; u64 pebs_event_reset[MAX_PEBS_EVENTS]; }; static void init_debug_store_on_cpu(int cpu) { struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; if (!ds) return; wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, (u32)((u64)(unsigned long)ds), (u32)((u64)(unsigned long)ds >> 32)); } static void fini_debug_store_on_cpu(int cpu) { if (!per_cpu(cpu_hw_events, cpu).ds) return; wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0); } static int alloc_pebs_buffer(int cpu) { struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; int node = cpu_to_node(cpu); int max, thresh = 1; /* always use a single PEBS record */ void *buffer; if (!x86_pmu.pebs) return 0; buffer = kmalloc_node(PEBS_BUFFER_SIZE, GFP_KERNEL | __GFP_ZERO, node); if (unlikely(!buffer)) return -ENOMEM; max = PEBS_BUFFER_SIZE / x86_pmu.pebs_record_size; ds->pebs_buffer_base = (u64)(unsigned long)buffer; ds->pebs_index = ds->pebs_buffer_base; ds->pebs_absolute_maximum = ds->pebs_buffer_base + max * x86_pmu.pebs_record_size; ds->pebs_interrupt_threshold = ds->pebs_buffer_base + thresh * x86_pmu.pebs_record_size; return 0; } static void release_pebs_buffer(int cpu) { struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; if (!ds || !x86_pmu.pebs) return; kfree((void *)(unsigned long)ds->pebs_buffer_base); ds->pebs_buffer_base = 0; } static int alloc_bts_buffer(int cpu) { struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; int node = cpu_to_node(cpu); int max, thresh; void *buffer; if (!x86_pmu.bts) return 0; buffer = kmalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_ZERO, node); if (unlikely(!buffer)) return -ENOMEM; max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE; thresh = max / 16; ds->bts_buffer_base = (u64)(unsigned long)buffer; ds->bts_index = ds->bts_buffer_base; ds->bts_absolute_maximum = ds->bts_buffer_base + max * BTS_RECORD_SIZE; ds->bts_interrupt_threshold = ds->bts_absolute_maximum - thresh * BTS_RECORD_SIZE; return 0; } static void release_bts_buffer(int cpu) { struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; if (!ds || !x86_pmu.bts) return; kfree((void *)(unsigned long)ds->bts_buffer_base); ds->bts_buffer_base = 0; } static int alloc_ds_buffer(int cpu) { int node = cpu_to_node(cpu); struct debug_store *ds; ds = kmalloc_node(sizeof(*ds), GFP_KERNEL | __GFP_ZERO, node); if (unlikely(!ds)) return -ENOMEM; per_cpu(cpu_hw_events, cpu).ds = ds; return 0; } static void release_ds_buffer(int cpu) { struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; if (!ds) return; per_cpu(cpu_hw_events, cpu).ds = NULL; kfree(ds); } static void release_ds_buffers(void) { int cpu; if (!x86_pmu.bts && !x86_pmu.pebs) return; get_online_cpus(); for_each_online_cpu(cpu) fini_debug_store_on_cpu(cpu); for_each_possible_cpu(cpu) { release_pebs_buffer(cpu); release_bts_buffer(cpu); release_ds_buffer(cpu); } put_online_cpus(); } static void reserve_ds_buffers(void) { int bts_err = 0, pebs_err = 0; int cpu; x86_pmu.bts_active = 0; x86_pmu.pebs_active = 0; if (!x86_pmu.bts && !x86_pmu.pebs) return; if (!x86_pmu.bts) bts_err = 1; if (!x86_pmu.pebs) pebs_err = 1; get_online_cpus(); for_each_possible_cpu(cpu) { if (alloc_ds_buffer(cpu)) { bts_err = 1; pebs_err = 1; } if (!bts_err && alloc_bts_buffer(cpu)) bts_err = 1; if (!pebs_err && alloc_pebs_buffer(cpu)) pebs_err = 1; if (bts_err && pebs_err) break; } if (bts_err) { for_each_possible_cpu(cpu) release_bts_buffer(cpu); } if (pebs_err) { for_each_possible_cpu(cpu) release_pebs_buffer(cpu); } if (bts_err && pebs_err) { for_each_possible_cpu(cpu) release_ds_buffer(cpu); } else { if (x86_pmu.bts && !bts_err) x86_pmu.bts_active = 1; if (x86_pmu.pebs && !pebs_err) x86_pmu.pebs_active = 1; for_each_online_cpu(cpu) init_debug_store_on_cpu(cpu); } put_online_cpus(); } /* * BTS */ static struct event_constraint bts_constraint = EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0); static void intel_pmu_enable_bts(u64 config) { unsigned long debugctlmsr; debugctlmsr = get_debugctlmsr(); debugctlmsr |= DEBUGCTLMSR_TR; debugctlmsr |= DEBUGCTLMSR_BTS; debugctlmsr |= DEBUGCTLMSR_BTINT; if (!(config & ARCH_PERFMON_EVENTSEL_OS)) debugctlmsr |= DEBUGCTLMSR_BTS_OFF_OS; if (!(config & ARCH_PERFMON_EVENTSEL_USR)) debugctlmsr |= DEBUGCTLMSR_BTS_OFF_USR; update_debugctlmsr(debugctlmsr); } static void intel_pmu_disable_bts(void) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); unsigned long debugctlmsr; if (!cpuc->ds) return; debugctlmsr = get_debugctlmsr(); debugctlmsr &= ~(DEBUGCTLMSR_TR | DEBUGCTLMSR_BTS | DEBUGCTLMSR_BTINT | DEBUGCTLMSR_BTS_OFF_OS | DEBUGCTLMSR_BTS_OFF_USR); update_debugctlmsr(debugctlmsr); } static int intel_pmu_drain_bts_buffer(void) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct debug_store *ds = cpuc->ds; struct bts_record { u64 from; u64 to; u64 flags; }; struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS]; struct bts_record *at, *top; struct perf_output_handle handle; struct perf_event_header header; struct perf_sample_data data; struct pt_regs regs; if (!event) return 0; if (!x86_pmu.bts_active) return 0; at = (struct bts_record *)(unsigned long)ds->bts_buffer_base; top = (struct bts_record *)(unsigned long)ds->bts_index; if (top <= at) return 0; ds->bts_index = ds->bts_buffer_base; perf_sample_data_init(&data, 0); data.period = event->hw.last_period; regs.ip = 0; /* * Prepare a generic sample, i.e. fill in the invariant fields. * We will overwrite the from and to address before we output * the sample. */ perf_prepare_sample(&header, &data, event, &regs); if (perf_output_begin(&handle, event, header.size * (top - at), 1, 1)) return 1; for (; at < top; at++) { data.ip = at->from; data.addr = at->to; perf_output_sample(&handle, &header, &data, event); } perf_output_end(&handle); /* There's new data available. */ event->hw.interrupts++; event->pending_kill = POLL_IN; return 1; } /* * PEBS */ static struct event_constraint intel_core2_pebs_event_constraints[] = { INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */ INTEL_UEVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */ INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */ INTEL_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */ INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */ EVENT_CONSTRAINT_END }; static struct event_constraint intel_atom_pebs_event_constraints[] = { INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */ INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */ INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */ EVENT_CONSTRAINT_END }; static struct event_constraint intel_nehalem_pebs_event_constraints[] = { INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */ INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */ INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */ INTEL_EVENT_CONSTRAINT(0xc0, 0xf), /* INST_RETIRED.ANY */ INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */ INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */ INTEL_UEVENT_CONSTRAINT(0x02c5, 0xf), /* BR_MISP_RETIRED.NEAR_CALL */ INTEL_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */ INTEL_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */ INTEL_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */ INTEL_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */ EVENT_CONSTRAINT_END }; static struct event_constraint intel_westmere_pebs_event_constraints[] = { INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */ INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */ INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */ INTEL_EVENT_CONSTRAINT(0xc0, 0xf), /* INSTR_RETIRED.* */ INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */ INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */ INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */ INTEL_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */ INTEL_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */ INTEL_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */ INTEL_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */ EVENT_CONSTRAINT_END }; static struct event_constraint intel_snb_pebs_events[] = { INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */ INTEL_UEVENT_CONSTRAINT(0x01c2, 0xf), /* UOPS_RETIRED.ALL */ INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */ INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */ INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */ INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.* */ INTEL_UEVENT_CONSTRAINT(0x11d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_LOADS */ INTEL_UEVENT_CONSTRAINT(0x12d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_STORES */ INTEL_UEVENT_CONSTRAINT(0x21d0, 0xf), /* MEM_UOP_RETIRED.LOCK_LOADS */ INTEL_UEVENT_CONSTRAINT(0x22d0, 0xf), /* MEM_UOP_RETIRED.LOCK_STORES */ INTEL_UEVENT_CONSTRAINT(0x41d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_LOADS */ INTEL_UEVENT_CONSTRAINT(0x42d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_STORES */ INTEL_UEVENT_CONSTRAINT(0x81d0, 0xf), /* MEM_UOP_RETIRED.ANY_LOADS */ INTEL_UEVENT_CONSTRAINT(0x82d0, 0xf), /* MEM_UOP_RETIRED.ANY_STORES */ INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ INTEL_UEVENT_CONSTRAINT(0x02d4, 0xf), /* MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS */ EVENT_CONSTRAINT_END }; static struct event_constraint * intel_pebs_constraints(struct perf_event *event) { struct event_constraint *c; if (!event->attr.precise_ip) return NULL; if (x86_pmu.pebs_constraints) { for_each_event_constraint(c, x86_pmu.pebs_constraints) { if ((event->hw.config & c->cmask) == c->code) return c; } } return &emptyconstraint; } static void intel_pmu_pebs_enable(struct perf_event *event) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct hw_perf_event *hwc = &event->hw; hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT; cpuc->pebs_enabled |= 1ULL << hwc->idx; WARN_ON_ONCE(cpuc->enabled); if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1) intel_pmu_lbr_enable(event); } static void intel_pmu_pebs_disable(struct perf_event *event) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct hw_perf_event *hwc = &event->hw; cpuc->pebs_enabled &= ~(1ULL << hwc->idx); if (cpuc->enabled) wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); hwc->config |= ARCH_PERFMON_EVENTSEL_INT; if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1) intel_pmu_lbr_disable(event); } static void intel_pmu_pebs_enable_all(void) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); if (cpuc->pebs_enabled) wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); } static void intel_pmu_pebs_disable_all(void) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); if (cpuc->pebs_enabled) wrmsrl(MSR_IA32_PEBS_ENABLE, 0); } #include <asm/insn.h> static inline bool kernel_ip(unsigned long ip) { #ifdef CONFIG_X86_32 return ip > PAGE_OFFSET; #else return (long)ip < 0; #endif } static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); unsigned long from = cpuc->lbr_entries[0].from; unsigned long old_to, to = cpuc->lbr_entries[0].to; unsigned long ip = regs->ip; /* * We don't need to fixup if the PEBS assist is fault like */ if (!x86_pmu.intel_cap.pebs_trap) return 1; /* * No LBR entry, no basic block, no rewinding */ if (!cpuc->lbr_stack.nr || !from || !to) return 0; /* * Basic blocks should never cross user/kernel boundaries */ if (kernel_ip(ip) != kernel_ip(to)) return 0; /* * unsigned math, either ip is before the start (impossible) or * the basic block is larger than 1 page (sanity) */ if ((ip - to) > PAGE_SIZE) return 0; /* * We sampled a branch insn, rewind using the LBR stack */ if (ip == to) { regs->ip = from; return 1; } do { struct insn insn; u8 buf[MAX_INSN_SIZE]; void *kaddr; old_to = to; if (!kernel_ip(ip)) { int bytes, size = MAX_INSN_SIZE; bytes = copy_from_user_nmi(buf, (void __user *)to, size); if (bytes != size) return 0; kaddr = buf; } else kaddr = (void *)to; kernel_insn_init(&insn, kaddr); insn_get_length(&insn); to += insn.length; } while (to < ip); if (to == ip) { regs->ip = old_to; return 1; } /* * Even though we decoded the basic block, the instruction stream * never matched the given IP, either the TO or the IP got corrupted. */ return 0; } static int intel_pmu_save_and_restart(struct perf_event *event); static void __intel_pmu_pebs_event(struct perf_event *event, struct pt_regs *iregs, void *__pebs) { /* * We cast to pebs_record_core since that is a subset of * both formats and we don't use the other fields in this * routine. */ struct pebs_record_core *pebs = __pebs; struct perf_sample_data data; struct pt_regs regs; if (!intel_pmu_save_and_restart(event)) return; perf_sample_data_init(&data, 0); data.period = event->hw.last_period; /* * We use the interrupt regs as a base because the PEBS record * does not contain a full regs set, specifically it seems to * lack segment descriptors, which get used by things like * user_mode(). * * In the simple case fix up only the IP and BP,SP regs, for * PERF_SAMPLE_IP and PERF_SAMPLE_CALLCHAIN to function properly. * A possible PERF_SAMPLE_REGS will have to transfer all regs. */ regs = *iregs; regs.ip = pebs->ip; regs.bp = pebs->bp; regs.sp = pebs->sp; if (event->attr.precise_ip > 1 && intel_pmu_pebs_fixup_ip(&regs)) regs.flags |= PERF_EFLAGS_EXACT; else regs.flags &= ~PERF_EFLAGS_EXACT; if (perf_event_overflow(event, 1, &data, &regs)) x86_pmu_stop(event, 0); } static void intel_pmu_drain_pebs_core(struct pt_regs *iregs) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct debug_store *ds = cpuc->ds; struct perf_event *event = cpuc->events[0]; /* PMC0 only */ struct pebs_record_core *at, *top; int n; if (!x86_pmu.pebs_active) return; at = (struct pebs_record_core *)(unsigned long)ds->pebs_buffer_base; top = (struct pebs_record_core *)(unsigned long)ds->pebs_index; /* * Whatever else happens, drain the thing */ ds->pebs_index = ds->pebs_buffer_base; if (!test_bit(0, cpuc->active_mask)) return; WARN_ON_ONCE(!event); if (!event->attr.precise_ip) return; n = top - at; if (n <= 0) return; /* * Should not happen, we program the threshold at 1 and do not * set a reset value. */ WARN_ON_ONCE(n > 1); at += n - 1; __intel_pmu_pebs_event(event, iregs, at); } static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct debug_store *ds = cpuc->ds; struct pebs_record_nhm *at, *top; struct perf_event *event = NULL; u64 status = 0; int bit, n; if (!x86_pmu.pebs_active) return; at = (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base; top = (struct pebs_record_nhm *)(unsigned long)ds->pebs_index; ds->pebs_index = ds->pebs_buffer_base; n = top - at; if (n <= 0) return; /* * Should not happen, we program the threshold at 1 and do not * set a reset value. */ WARN_ON_ONCE(n > MAX_PEBS_EVENTS); for ( ; at < top; at++) { for_each_set_bit(bit, (unsigned long *)&at->status, MAX_PEBS_EVENTS) { event = cpuc->events[bit]; if (!test_bit(bit, cpuc->active_mask)) continue; WARN_ON_ONCE(!event); if (!event->attr.precise_ip) continue; if (__test_and_set_bit(bit, (unsigned long *)&status)) continue; break; } if (!event || bit >= MAX_PEBS_EVENTS) continue; __intel_pmu_pebs_event(event, iregs, at); } } /* * BTS, PEBS probe and setup */ static void intel_ds_init(void) { /* * No support for 32bit formats */ if (!boot_cpu_has(X86_FEATURE_DTES64)) return; x86_pmu.bts = boot_cpu_has(X86_FEATURE_BTS); x86_pmu.pebs = boot_cpu_has(X86_FEATURE_PEBS); if (x86_pmu.pebs) { char pebs_type = x86_pmu.intel_cap.pebs_trap ? '+' : '-'; int format = x86_pmu.intel_cap.pebs_format; switch (format) { case 0: printk(KERN_CONT "PEBS fmt0%c, ", pebs_type); x86_pmu.pebs_record_size = sizeof(struct pebs_record_core); x86_pmu.drain_pebs = intel_pmu_drain_pebs_core; break; case 1: printk(KERN_CONT "PEBS fmt1%c, ", pebs_type); x86_pmu.pebs_record_size = sizeof(struct pebs_record_nhm); x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm; break; default: printk(KERN_CONT "no PEBS fmt%d%c, ", format, pebs_type); x86_pmu.pebs = 0; } } } #else /* CONFIG_CPU_SUP_INTEL */ static void reserve_ds_buffers(void) { } static void release_ds_buffers(void) { } #endif /* CONFIG_CPU_SUP_INTEL */
gpl-2.0
burstlam/zte-blade-32
sound/pci/emu10k1/emu10k1_main.c
655
68082
/* * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * Creative Labs, Inc. * Routines for control of EMU10K1 chips * * Copyright (c) by James Courtier-Dutton <James@superbug.co.uk> * Added support for Audigy 2 Value. * Added EMU 1010 support. * General bug fixes and enhancements. * * * BUGS: * -- * * TODO: * -- * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/sched.h> #include <linux/kthread.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/mutex.h> #include <sound/core.h> #include <sound/emu10k1.h> #include <linux/firmware.h> #include "p16v.h" #include "tina2.h" #include "p17v.h" #define HANA_FILENAME "emu/hana.fw" #define DOCK_FILENAME "emu/audio_dock.fw" #define EMU1010B_FILENAME "emu/emu1010b.fw" #define MICRO_DOCK_FILENAME "emu/micro_dock.fw" #define EMU0404_FILENAME "emu/emu0404.fw" #define EMU1010_NOTEBOOK_FILENAME "emu/emu1010_notebook.fw" MODULE_FIRMWARE(HANA_FILENAME); MODULE_FIRMWARE(DOCK_FILENAME); MODULE_FIRMWARE(EMU1010B_FILENAME); MODULE_FIRMWARE(MICRO_DOCK_FILENAME); MODULE_FIRMWARE(EMU0404_FILENAME); MODULE_FIRMWARE(EMU1010_NOTEBOOK_FILENAME); /************************************************************************* * EMU10K1 init / done *************************************************************************/ void snd_emu10k1_voice_init(struct snd_emu10k1 *emu, int ch) { snd_emu10k1_ptr_write(emu, DCYSUSV, ch, 0); snd_emu10k1_ptr_write(emu, IP, ch, 0); snd_emu10k1_ptr_write(emu, VTFT, ch, 0xffff); snd_emu10k1_ptr_write(emu, CVCF, ch, 0xffff); snd_emu10k1_ptr_write(emu, PTRX, ch, 0); snd_emu10k1_ptr_write(emu, CPF, ch, 0); snd_emu10k1_ptr_write(emu, CCR, ch, 0); snd_emu10k1_ptr_write(emu, PSST, ch, 0); snd_emu10k1_ptr_write(emu, DSL, ch, 0x10); snd_emu10k1_ptr_write(emu, CCCA, ch, 0); snd_emu10k1_ptr_write(emu, Z1, ch, 0); snd_emu10k1_ptr_write(emu, Z2, ch, 0); snd_emu10k1_ptr_write(emu, FXRT, ch, 0x32100000); snd_emu10k1_ptr_write(emu, ATKHLDM, ch, 0); snd_emu10k1_ptr_write(emu, DCYSUSM, ch, 0); snd_emu10k1_ptr_write(emu, IFATN, ch, 0xffff); snd_emu10k1_ptr_write(emu, PEFE, ch, 0); snd_emu10k1_ptr_write(emu, FMMOD, ch, 0); snd_emu10k1_ptr_write(emu, TREMFRQ, ch, 24); /* 1 Hz */ snd_emu10k1_ptr_write(emu, FM2FRQ2, ch, 24); /* 1 Hz */ snd_emu10k1_ptr_write(emu, TEMPENV, ch, 0); /*** these are last so OFF prevents writing ***/ snd_emu10k1_ptr_write(emu, LFOVAL2, ch, 0); snd_emu10k1_ptr_write(emu, LFOVAL1, ch, 0); snd_emu10k1_ptr_write(emu, ATKHLDV, ch, 0); snd_emu10k1_ptr_write(emu, ENVVOL, ch, 0); snd_emu10k1_ptr_write(emu, ENVVAL, ch, 0); /* Audigy extra stuffs */ if (emu->audigy) { snd_emu10k1_ptr_write(emu, 0x4c, ch, 0); /* ?? */ snd_emu10k1_ptr_write(emu, 0x4d, ch, 0); /* ?? */ snd_emu10k1_ptr_write(emu, 0x4e, ch, 0); /* ?? */ snd_emu10k1_ptr_write(emu, 0x4f, ch, 0); /* ?? */ snd_emu10k1_ptr_write(emu, A_FXRT1, ch, 0x03020100); snd_emu10k1_ptr_write(emu, A_FXRT2, ch, 0x3f3f3f3f); snd_emu10k1_ptr_write(emu, A_SENDAMOUNTS, ch, 0); } } static unsigned int spi_dac_init[] = { 0x00ff, 0x02ff, 0x0400, 0x0520, 0x0600, 0x08ff, 0x0aff, 0x0cff, 0x0eff, 0x10ff, 0x1200, 0x1400, 0x1480, 0x1800, 0x1aff, 0x1cff, 0x1e00, 0x0530, 0x0602, 0x0622, 0x1400, }; static unsigned int i2c_adc_init[][2] = { { 0x17, 0x00 }, /* Reset */ { 0x07, 0x00 }, /* Timeout */ { 0x0b, 0x22 }, /* Interface control */ { 0x0c, 0x22 }, /* Master mode control */ { 0x0d, 0x08 }, /* Powerdown control */ { 0x0e, 0xcf }, /* Attenuation Left 0x01 = -103dB, 0xff = 24dB */ { 0x0f, 0xcf }, /* Attenuation Right 0.5dB steps */ { 0x10, 0x7b }, /* ALC Control 1 */ { 0x11, 0x00 }, /* ALC Control 2 */ { 0x12, 0x32 }, /* ALC Control 3 */ { 0x13, 0x00 }, /* Noise gate control */ { 0x14, 0xa6 }, /* Limiter control */ { 0x15, ADC_MUX_2 }, /* ADC Mixer control. Mic for A2ZS Notebook */ }; static int snd_emu10k1_init(struct snd_emu10k1 *emu, int enable_ir, int resume) { unsigned int silent_page; int ch; u32 tmp; /* disable audio and lock cache */ outl(HCFG_LOCKSOUNDCACHE | HCFG_LOCKTANKCACHE_MASK | HCFG_MUTEBUTTONENABLE, emu->port + HCFG); /* reset recording buffers */ snd_emu10k1_ptr_write(emu, MICBS, 0, ADCBS_BUFSIZE_NONE); snd_emu10k1_ptr_write(emu, MICBA, 0, 0); snd_emu10k1_ptr_write(emu, FXBS, 0, ADCBS_BUFSIZE_NONE); snd_emu10k1_ptr_write(emu, FXBA, 0, 0); snd_emu10k1_ptr_write(emu, ADCBS, 0, ADCBS_BUFSIZE_NONE); snd_emu10k1_ptr_write(emu, ADCBA, 0, 0); /* disable channel interrupt */ outl(0, emu->port + INTE); snd_emu10k1_ptr_write(emu, CLIEL, 0, 0); snd_emu10k1_ptr_write(emu, CLIEH, 0, 0); snd_emu10k1_ptr_write(emu, SOLEL, 0, 0); snd_emu10k1_ptr_write(emu, SOLEH, 0, 0); if (emu->audigy) { /* set SPDIF bypass mode */ snd_emu10k1_ptr_write(emu, SPBYPASS, 0, SPBYPASS_FORMAT); /* enable rear left + rear right AC97 slots */ snd_emu10k1_ptr_write(emu, AC97SLOT, 0, AC97SLOT_REAR_RIGHT | AC97SLOT_REAR_LEFT); } /* init envelope engine */ for (ch = 0; ch < NUM_G; ch++) snd_emu10k1_voice_init(emu, ch); snd_emu10k1_ptr_write(emu, SPCS0, 0, emu->spdif_bits[0]); snd_emu10k1_ptr_write(emu, SPCS1, 0, emu->spdif_bits[1]); snd_emu10k1_ptr_write(emu, SPCS2, 0, emu->spdif_bits[2]); if (emu->card_capabilities->ca0151_chip) { /* audigy2 */ /* Hacks for Alice3 to work independent of haP16V driver */ /* Setup SRCMulti_I2S SamplingRate */ tmp = snd_emu10k1_ptr_read(emu, A_SPDIF_SAMPLERATE, 0); tmp &= 0xfffff1ff; tmp |= (0x2<<9); snd_emu10k1_ptr_write(emu, A_SPDIF_SAMPLERATE, 0, tmp); /* Setup SRCSel (Enable Spdif,I2S SRCMulti) */ snd_emu10k1_ptr20_write(emu, SRCSel, 0, 0x14); /* Setup SRCMulti Input Audio Enable */ /* Use 0xFFFFFFFF to enable P16V sounds. */ snd_emu10k1_ptr20_write(emu, SRCMULTI_ENABLE, 0, 0xFFFFFFFF); /* Enabled Phased (8-channel) P16V playback */ outl(0x0201, emu->port + HCFG2); /* Set playback routing. */ snd_emu10k1_ptr20_write(emu, CAPTURE_P16V_SOURCE, 0, 0x78e4); } if (emu->card_capabilities->ca0108_chip) { /* audigy2 Value */ /* Hacks for Alice3 to work independent of haP16V driver */ snd_printk(KERN_INFO "Audigy2 value: Special config.\n"); /* Setup SRCMulti_I2S SamplingRate */ tmp = snd_emu10k1_ptr_read(emu, A_SPDIF_SAMPLERATE, 0); tmp &= 0xfffff1ff; tmp |= (0x2<<9); snd_emu10k1_ptr_write(emu, A_SPDIF_SAMPLERATE, 0, tmp); /* Setup SRCSel (Enable Spdif,I2S SRCMulti) */ outl(0x600000, emu->port + 0x20); outl(0x14, emu->port + 0x24); /* Setup SRCMulti Input Audio Enable */ outl(0x7b0000, emu->port + 0x20); outl(0xFF000000, emu->port + 0x24); /* Setup SPDIF Out Audio Enable */ /* The Audigy 2 Value has a separate SPDIF out, * so no need for a mixer switch */ outl(0x7a0000, emu->port + 0x20); outl(0xFF000000, emu->port + 0x24); tmp = inl(emu->port + A_IOCFG) & ~0x8; /* Clear bit 3 */ outl(tmp, emu->port + A_IOCFG); } if (emu->card_capabilities->spi_dac) { /* Audigy 2 ZS Notebook with DAC Wolfson WM8768/WM8568 */ int size, n; size = ARRAY_SIZE(spi_dac_init); for (n = 0; n < size; n++) snd_emu10k1_spi_write(emu, spi_dac_init[n]); snd_emu10k1_ptr20_write(emu, 0x60, 0, 0x10); /* Enable GPIOs * GPIO0: Unknown * GPIO1: Speakers-enabled. * GPIO2: Unknown * GPIO3: Unknown * GPIO4: IEC958 Output on. * GPIO5: Unknown * GPIO6: Unknown * GPIO7: Unknown */ outl(0x76, emu->port + A_IOCFG); /* Windows uses 0x3f76 */ } if (emu->card_capabilities->i2c_adc) { /* Audigy 2 ZS Notebook with ADC Wolfson WM8775 */ int size, n; snd_emu10k1_ptr20_write(emu, P17V_I2S_SRC_SEL, 0, 0x2020205f); tmp = inl(emu->port + A_IOCFG); outl(tmp | 0x4, emu->port + A_IOCFG); /* Set bit 2 for mic input */ tmp = inl(emu->port + A_IOCFG); size = ARRAY_SIZE(i2c_adc_init); for (n = 0; n < size; n++) snd_emu10k1_i2c_write(emu, i2c_adc_init[n][0], i2c_adc_init[n][1]); for (n = 0; n < 4; n++) { emu->i2c_capture_volume[n][0] = 0xcf; emu->i2c_capture_volume[n][1] = 0xcf; } } snd_emu10k1_ptr_write(emu, PTB, 0, emu->ptb_pages.addr); snd_emu10k1_ptr_write(emu, TCB, 0, 0); /* taken from original driver */ snd_emu10k1_ptr_write(emu, TCBS, 0, 4); /* taken from original driver */ silent_page = (emu->silent_page.addr << 1) | MAP_PTI_MASK; for (ch = 0; ch < NUM_G; ch++) { snd_emu10k1_ptr_write(emu, MAPA, ch, silent_page); snd_emu10k1_ptr_write(emu, MAPB, ch, silent_page); } if (emu->card_capabilities->emu_model) { outl(HCFG_AUTOMUTE_ASYNC | HCFG_EMU32_SLAVE | HCFG_AUDIOENABLE, emu->port + HCFG); /* * Hokay, setup HCFG * Mute Disable Audio = 0 * Lock Tank Memory = 1 * Lock Sound Memory = 0 * Auto Mute = 1 */ } else if (emu->audigy) { if (emu->revision == 4) /* audigy2 */ outl(HCFG_AUDIOENABLE | HCFG_AC3ENABLE_CDSPDIF | HCFG_AC3ENABLE_GPSPDIF | HCFG_AUTOMUTE | HCFG_JOYENABLE, emu->port + HCFG); else outl(HCFG_AUTOMUTE | HCFG_JOYENABLE, emu->port + HCFG); /* FIXME: Remove all these emu->model and replace it with a card recognition parameter, * e.g. card_capabilities->joystick */ } else if (emu->model == 0x20 || emu->model == 0xc400 || (emu->model == 0x21 && emu->revision < 6)) outl(HCFG_LOCKTANKCACHE_MASK | HCFG_AUTOMUTE, emu->port + HCFG); else /* With on-chip joystick */ outl(HCFG_LOCKTANKCACHE_MASK | HCFG_AUTOMUTE | HCFG_JOYENABLE, emu->port + HCFG); if (enable_ir) { /* enable IR for SB Live */ if (emu->card_capabilities->emu_model) { ; /* Disable all access to A_IOCFG for the emu1010 */ } else if (emu->card_capabilities->i2c_adc) { ; /* Disable A_IOCFG for Audigy 2 ZS Notebook */ } else if (emu->audigy) { unsigned int reg = inl(emu->port + A_IOCFG); outl(reg | A_IOCFG_GPOUT2, emu->port + A_IOCFG); udelay(500); outl(reg | A_IOCFG_GPOUT1 | A_IOCFG_GPOUT2, emu->port + A_IOCFG); udelay(100); outl(reg, emu->port + A_IOCFG); } else { unsigned int reg = inl(emu->port + HCFG); outl(reg | HCFG_GPOUT2, emu->port + HCFG); udelay(500); outl(reg | HCFG_GPOUT1 | HCFG_GPOUT2, emu->port + HCFG); udelay(100); outl(reg, emu->port + HCFG); } } if (emu->card_capabilities->emu_model) { ; /* Disable all access to A_IOCFG for the emu1010 */ } else if (emu->card_capabilities->i2c_adc) { ; /* Disable A_IOCFG for Audigy 2 ZS Notebook */ } else if (emu->audigy) { /* enable analog output */ unsigned int reg = inl(emu->port + A_IOCFG); outl(reg | A_IOCFG_GPOUT0, emu->port + A_IOCFG); } return 0; } static void snd_emu10k1_audio_enable(struct snd_emu10k1 *emu) { /* * Enable the audio bit */ outl(inl(emu->port + HCFG) | HCFG_AUDIOENABLE, emu->port + HCFG); /* Enable analog/digital outs on audigy */ if (emu->card_capabilities->emu_model) { ; /* Disable all access to A_IOCFG for the emu1010 */ } else if (emu->card_capabilities->i2c_adc) { ; /* Disable A_IOCFG for Audigy 2 ZS Notebook */ } else if (emu->audigy) { outl(inl(emu->port + A_IOCFG) & ~0x44, emu->port + A_IOCFG); if (emu->card_capabilities->ca0151_chip) { /* audigy2 */ /* Unmute Analog now. Set GPO6 to 1 for Apollo. * This has to be done after init ALice3 I2SOut beyond 48KHz. * So, sequence is important. */ outl(inl(emu->port + A_IOCFG) | 0x0040, emu->port + A_IOCFG); } else if (emu->card_capabilities->ca0108_chip) { /* audigy2 value */ /* Unmute Analog now. */ outl(inl(emu->port + A_IOCFG) | 0x0060, emu->port + A_IOCFG); } else { /* Disable routing from AC97 line out to Front speakers */ outl(inl(emu->port + A_IOCFG) | 0x0080, emu->port + A_IOCFG); } } #if 0 { unsigned int tmp; /* FIXME: the following routine disables LiveDrive-II !! */ /* TOSLink detection */ emu->tos_link = 0; tmp = inl(emu->port + HCFG); if (tmp & (HCFG_GPINPUT0 | HCFG_GPINPUT1)) { outl(tmp|0x800, emu->port + HCFG); udelay(50); if (tmp != (inl(emu->port + HCFG) & ~0x800)) { emu->tos_link = 1; outl(tmp, emu->port + HCFG); } } } #endif snd_emu10k1_intr_enable(emu, INTE_PCIERRORENABLE); } int snd_emu10k1_done(struct snd_emu10k1 *emu) { int ch; outl(0, emu->port + INTE); /* * Shutdown the chip */ for (ch = 0; ch < NUM_G; ch++) snd_emu10k1_ptr_write(emu, DCYSUSV, ch, 0); for (ch = 0; ch < NUM_G; ch++) { snd_emu10k1_ptr_write(emu, VTFT, ch, 0); snd_emu10k1_ptr_write(emu, CVCF, ch, 0); snd_emu10k1_ptr_write(emu, PTRX, ch, 0); snd_emu10k1_ptr_write(emu, CPF, ch, 0); } /* reset recording buffers */ snd_emu10k1_ptr_write(emu, MICBS, 0, 0); snd_emu10k1_ptr_write(emu, MICBA, 0, 0); snd_emu10k1_ptr_write(emu, FXBS, 0, 0); snd_emu10k1_ptr_write(emu, FXBA, 0, 0); snd_emu10k1_ptr_write(emu, FXWC, 0, 0); snd_emu10k1_ptr_write(emu, ADCBS, 0, ADCBS_BUFSIZE_NONE); snd_emu10k1_ptr_write(emu, ADCBA, 0, 0); snd_emu10k1_ptr_write(emu, TCBS, 0, TCBS_BUFFSIZE_16K); snd_emu10k1_ptr_write(emu, TCB, 0, 0); if (emu->audigy) snd_emu10k1_ptr_write(emu, A_DBG, 0, A_DBG_SINGLE_STEP); else snd_emu10k1_ptr_write(emu, DBG, 0, EMU10K1_DBG_SINGLE_STEP); /* disable channel interrupt */ snd_emu10k1_ptr_write(emu, CLIEL, 0, 0); snd_emu10k1_ptr_write(emu, CLIEH, 0, 0); snd_emu10k1_ptr_write(emu, SOLEL, 0, 0); snd_emu10k1_ptr_write(emu, SOLEH, 0, 0); /* disable audio and lock cache */ outl(HCFG_LOCKSOUNDCACHE | HCFG_LOCKTANKCACHE_MASK | HCFG_MUTEBUTTONENABLE, emu->port + HCFG); snd_emu10k1_ptr_write(emu, PTB, 0, 0); return 0; } /************************************************************************* * ECARD functional implementation *************************************************************************/ /* In A1 Silicon, these bits are in the HC register */ #define HOOKN_BIT (1L << 12) #define HANDN_BIT (1L << 11) #define PULSEN_BIT (1L << 10) #define EC_GDI1 (1 << 13) #define EC_GDI0 (1 << 14) #define EC_NUM_CONTROL_BITS 20 #define EC_AC3_DATA_SELN 0x0001L #define EC_EE_DATA_SEL 0x0002L #define EC_EE_CNTRL_SELN 0x0004L #define EC_EECLK 0x0008L #define EC_EECS 0x0010L #define EC_EESDO 0x0020L #define EC_TRIM_CSN 0x0040L #define EC_TRIM_SCLK 0x0080L #define EC_TRIM_SDATA 0x0100L #define EC_TRIM_MUTEN 0x0200L #define EC_ADCCAL 0x0400L #define EC_ADCRSTN 0x0800L #define EC_DACCAL 0x1000L #define EC_DACMUTEN 0x2000L #define EC_LEDN 0x4000L #define EC_SPDIF0_SEL_SHIFT 15 #define EC_SPDIF1_SEL_SHIFT 17 #define EC_SPDIF0_SEL_MASK (0x3L << EC_SPDIF0_SEL_SHIFT) #define EC_SPDIF1_SEL_MASK (0x7L << EC_SPDIF1_SEL_SHIFT) #define EC_SPDIF0_SELECT(_x) (((_x) << EC_SPDIF0_SEL_SHIFT) & EC_SPDIF0_SEL_MASK) #define EC_SPDIF1_SELECT(_x) (((_x) << EC_SPDIF1_SEL_SHIFT) & EC_SPDIF1_SEL_MASK) #define EC_CURRENT_PROM_VERSION 0x01 /* Self-explanatory. This should * be incremented any time the EEPROM's * format is changed. */ #define EC_EEPROM_SIZE 0x40 /* ECARD EEPROM has 64 16-bit words */ /* Addresses for special values stored in to EEPROM */ #define EC_PROM_VERSION_ADDR 0x20 /* Address of the current prom version */ #define EC_BOARDREV0_ADDR 0x21 /* LSW of board rev */ #define EC_BOARDREV1_ADDR 0x22 /* MSW of board rev */ #define EC_LAST_PROMFILE_ADDR 0x2f #define EC_SERIALNUM_ADDR 0x30 /* First word of serial number. The * can be up to 30 characters in length * and is stored as a NULL-terminated * ASCII string. Any unused bytes must be * filled with zeros */ #define EC_CHECKSUM_ADDR 0x3f /* Location at which checksum is stored */ /* Most of this stuff is pretty self-evident. According to the hardware * dudes, we need to leave the ADCCAL bit low in order to avoid a DC * offset problem. Weird. */ #define EC_RAW_RUN_MODE (EC_DACMUTEN | EC_ADCRSTN | EC_TRIM_MUTEN | \ EC_TRIM_CSN) #define EC_DEFAULT_ADC_GAIN 0xC4C4 #define EC_DEFAULT_SPDIF0_SEL 0x0 #define EC_DEFAULT_SPDIF1_SEL 0x4 /************************************************************************** * @func Clock bits into the Ecard's control latch. The Ecard uses a * control latch will is loaded bit-serially by toggling the Modem control * lines from function 2 on the E8010. This function hides these details * and presents the illusion that we are actually writing to a distinct * register. */ static void snd_emu10k1_ecard_write(struct snd_emu10k1 *emu, unsigned int value) { unsigned short count; unsigned int data; unsigned long hc_port; unsigned int hc_value; hc_port = emu->port + HCFG; hc_value = inl(hc_port) & ~(HOOKN_BIT | HANDN_BIT | PULSEN_BIT); outl(hc_value, hc_port); for (count = 0; count < EC_NUM_CONTROL_BITS; count++) { /* Set up the value */ data = ((value & 0x1) ? PULSEN_BIT : 0); value >>= 1; outl(hc_value | data, hc_port); /* Clock the shift register */ outl(hc_value | data | HANDN_BIT, hc_port); outl(hc_value | data, hc_port); } /* Latch the bits */ outl(hc_value | HOOKN_BIT, hc_port); outl(hc_value, hc_port); } /************************************************************************** * @func Set the gain of the ECARD's CS3310 Trim/gain controller. The * trim value consists of a 16bit value which is composed of two * 8 bit gain/trim values, one for the left channel and one for the * right channel. The following table maps from the Gain/Attenuation * value in decibels into the corresponding bit pattern for a single * channel. */ static void snd_emu10k1_ecard_setadcgain(struct snd_emu10k1 *emu, unsigned short gain) { unsigned int bit; /* Enable writing to the TRIM registers */ snd_emu10k1_ecard_write(emu, emu->ecard_ctrl & ~EC_TRIM_CSN); /* Do it again to insure that we meet hold time requirements */ snd_emu10k1_ecard_write(emu, emu->ecard_ctrl & ~EC_TRIM_CSN); for (bit = (1 << 15); bit; bit >>= 1) { unsigned int value; value = emu->ecard_ctrl & ~(EC_TRIM_CSN | EC_TRIM_SDATA); if (gain & bit) value |= EC_TRIM_SDATA; /* Clock the bit */ snd_emu10k1_ecard_write(emu, value); snd_emu10k1_ecard_write(emu, value | EC_TRIM_SCLK); snd_emu10k1_ecard_write(emu, value); } snd_emu10k1_ecard_write(emu, emu->ecard_ctrl); } static int snd_emu10k1_ecard_init(struct snd_emu10k1 *emu) { unsigned int hc_value; /* Set up the initial settings */ emu->ecard_ctrl = EC_RAW_RUN_MODE | EC_SPDIF0_SELECT(EC_DEFAULT_SPDIF0_SEL) | EC_SPDIF1_SELECT(EC_DEFAULT_SPDIF1_SEL); /* Step 0: Set the codec type in the hardware control register * and enable audio output */ hc_value = inl(emu->port + HCFG); outl(hc_value | HCFG_AUDIOENABLE | HCFG_CODECFORMAT_I2S, emu->port + HCFG); inl(emu->port + HCFG); /* Step 1: Turn off the led and deassert TRIM_CS */ snd_emu10k1_ecard_write(emu, EC_ADCCAL | EC_LEDN | EC_TRIM_CSN); /* Step 2: Calibrate the ADC and DAC */ snd_emu10k1_ecard_write(emu, EC_DACCAL | EC_LEDN | EC_TRIM_CSN); /* Step 3: Wait for awhile; XXX We can't get away with this * under a real operating system; we'll need to block and wait that * way. */ snd_emu10k1_wait(emu, 48000); /* Step 4: Switch off the DAC and ADC calibration. Note * That ADC_CAL is actually an inverted signal, so we assert * it here to stop calibration. */ snd_emu10k1_ecard_write(emu, EC_ADCCAL | EC_LEDN | EC_TRIM_CSN); /* Step 4: Switch into run mode */ snd_emu10k1_ecard_write(emu, emu->ecard_ctrl); /* Step 5: Set the analog input gain */ snd_emu10k1_ecard_setadcgain(emu, EC_DEFAULT_ADC_GAIN); return 0; } static int snd_emu10k1_cardbus_init(struct snd_emu10k1 *emu) { unsigned long special_port; unsigned int value; /* Special initialisation routine * before the rest of the IO-Ports become active. */ special_port = emu->port + 0x38; value = inl(special_port); outl(0x00d00000, special_port); value = inl(special_port); outl(0x00d00001, special_port); value = inl(special_port); outl(0x00d0005f, special_port); value = inl(special_port); outl(0x00d0007f, special_port); value = inl(special_port); outl(0x0090007f, special_port); value = inl(special_port); snd_emu10k1_ptr20_write(emu, TINA2_VOLUME, 0, 0xfefefefe); /* Defaults to 0x30303030 */ /* Delay to give time for ADC chip to switch on. It needs 113ms */ msleep(200); return 0; } static int snd_emu1010_load_firmware(struct snd_emu10k1 *emu, const char *filename) { int err; int n, i; int reg; int value; unsigned int write_post; unsigned long flags; const struct firmware *fw_entry; err = request_firmware(&fw_entry, filename, &emu->pci->dev); if (err != 0) { snd_printk(KERN_ERR "firmware: %s not found. Err = %d\n", filename, err); return err; } snd_printk(KERN_INFO "firmware size = 0x%zx\n", fw_entry->size); /* The FPGA is a Xilinx Spartan IIE XC2S50E */ /* GPIO7 -> FPGA PGMN * GPIO6 -> FPGA CCLK * GPIO5 -> FPGA DIN * FPGA CONFIG OFF -> FPGA PGMN */ spin_lock_irqsave(&emu->emu_lock, flags); outl(0x00, emu->port + A_IOCFG); /* Set PGMN low for 1uS. */ write_post = inl(emu->port + A_IOCFG); udelay(100); outl(0x80, emu->port + A_IOCFG); /* Leave bit 7 set during netlist setup. */ write_post = inl(emu->port + A_IOCFG); udelay(100); /* Allow FPGA memory to clean */ for (n = 0; n < fw_entry->size; n++) { value = fw_entry->data[n]; for (i = 0; i < 8; i++) { reg = 0x80; if (value & 0x1) reg = reg | 0x20; value = value >> 1; outl(reg, emu->port + A_IOCFG); write_post = inl(emu->port + A_IOCFG); outl(reg | 0x40, emu->port + A_IOCFG); write_post = inl(emu->port + A_IOCFG); } } /* After programming, set GPIO bit 4 high again. */ outl(0x10, emu->port + A_IOCFG); write_post = inl(emu->port + A_IOCFG); spin_unlock_irqrestore(&emu->emu_lock, flags); release_firmware(fw_entry); return 0; } static int emu1010_firmware_thread(void *data) { struct snd_emu10k1 *emu = data; u32 tmp, tmp2, reg; int err; for (;;) { /* Delay to allow Audio Dock to settle */ msleep_interruptible(1000); if (kthread_should_stop()) break; snd_emu1010_fpga_read(emu, EMU_HANA_IRQ_STATUS, &tmp); /* IRQ Status */ snd_emu1010_fpga_read(emu, EMU_HANA_OPTION_CARDS, &reg); /* OPTIONS: Which cards are attached to the EMU */ if (reg & EMU_HANA_OPTION_DOCK_OFFLINE) { /* Audio Dock attached */ /* Return to Audio Dock programming mode */ snd_printk(KERN_INFO "emu1010: Loading Audio Dock Firmware\n"); snd_emu1010_fpga_write(emu, EMU_HANA_FPGA_CONFIG, EMU_HANA_FPGA_CONFIG_AUDIODOCK); if (emu->card_capabilities->emu_model == EMU_MODEL_EMU1010) { err = snd_emu1010_load_firmware(emu, DOCK_FILENAME); if (err != 0) continue; } else if (emu->card_capabilities->emu_model == EMU_MODEL_EMU1010B) { err = snd_emu1010_load_firmware(emu, MICRO_DOCK_FILENAME); if (err != 0) continue; } else if (emu->card_capabilities->emu_model == EMU_MODEL_EMU1616) { err = snd_emu1010_load_firmware(emu, MICRO_DOCK_FILENAME); if (err != 0) continue; } snd_emu1010_fpga_write(emu, EMU_HANA_FPGA_CONFIG, 0); snd_emu1010_fpga_read(emu, EMU_HANA_IRQ_STATUS, &reg); snd_printk(KERN_INFO "emu1010: EMU_HANA+DOCK_IRQ_STATUS = 0x%x\n", reg); /* ID, should read & 0x7f = 0x55 when FPGA programmed. */ snd_emu1010_fpga_read(emu, EMU_HANA_ID, &reg); snd_printk(KERN_INFO "emu1010: EMU_HANA+DOCK_ID = 0x%x\n", reg); if ((reg & 0x1f) != 0x15) { /* FPGA failed to be programmed */ snd_printk(KERN_INFO "emu1010: Loading Audio Dock Firmware file failed, reg = 0x%x\n", reg); continue; } snd_printk(KERN_INFO "emu1010: Audio Dock Firmware loaded\n"); snd_emu1010_fpga_read(emu, EMU_DOCK_MAJOR_REV, &tmp); snd_emu1010_fpga_read(emu, EMU_DOCK_MINOR_REV, &tmp2); snd_printk(KERN_INFO "Audio Dock ver: %u.%u\n", tmp, tmp2); /* Sync clocking between 1010 and Dock */ /* Allow DLL to settle */ msleep(10); /* Unmute all. Default is muted after a firmware load */ snd_emu1010_fpga_write(emu, EMU_HANA_UNMUTE, EMU_UNMUTE); } } snd_printk(KERN_INFO "emu1010: firmware thread stopping\n"); return 0; } /* * EMU-1010 - details found out from this driver, official MS Win drivers, * testing the card: * * Audigy2 (aka Alice2): * --------------------- * * communication over PCI * * conversion of 32-bit data coming over EMU32 links from HANA FPGA * to 2 x 16-bit, using internal DSP instructions * * slave mode, clock supplied by HANA * * linked to HANA using: * 32 x 32-bit serial EMU32 output channels * 16 x EMU32 input channels * (?) x I2S I/O channels (?) * * FPGA (aka HANA): * --------------- * * provides all (?) physical inputs and outputs of the card * (ADC, DAC, SPDIF I/O, ADAT I/O, etc.) * * provides clock signal for the card and Alice2 * * two crystals - for 44.1kHz and 48kHz multiples * * provides internal routing of signal sources to signal destinations * * inputs/outputs to Alice2 - see above * * Current status of the driver: * ---------------------------- * * only 44.1/48kHz supported (the MS Win driver supports up to 192 kHz) * * PCM device nb. 2: * 16 x 16-bit playback - snd_emu10k1_fx8010_playback_ops * 16 x 32-bit capture - snd_emu10k1_capture_efx_ops */ static int snd_emu10k1_emu1010_init(struct snd_emu10k1 *emu) { unsigned int i; u32 tmp, tmp2, reg; int err; const char *filename = NULL; snd_printk(KERN_INFO "emu1010: Special config.\n"); /* AC97 2.1, Any 16Meg of 4Gig address, Auto-Mute, EMU32 Slave, * Lock Sound Memory Cache, Lock Tank Memory Cache, * Mute all codecs. */ outl(0x0005a00c, emu->port + HCFG); /* AC97 2.1, Any 16Meg of 4Gig address, Auto-Mute, EMU32 Slave, * Lock Tank Memory Cache, * Mute all codecs. */ outl(0x0005a004, emu->port + HCFG); /* AC97 2.1, Any 16Meg of 4Gig address, Auto-Mute, EMU32 Slave, * Mute all codecs. */ outl(0x0005a000, emu->port + HCFG); /* AC97 2.1, Any 16Meg of 4Gig address, Auto-Mute, EMU32 Slave, * Mute all codecs. */ outl(0x0005a000, emu->port + HCFG); /* Disable 48Volt power to Audio Dock */ snd_emu1010_fpga_write(emu, EMU_HANA_DOCK_PWR, 0); /* ID, should read & 0x7f = 0x55. (Bit 7 is the IRQ bit) */ snd_emu1010_fpga_read(emu, EMU_HANA_ID, &reg); snd_printdd("reg1 = 0x%x\n", reg); if ((reg & 0x3f) == 0x15) { /* FPGA netlist already present so clear it */ /* Return to programming mode */ snd_emu1010_fpga_write(emu, EMU_HANA_FPGA_CONFIG, 0x02); } snd_emu1010_fpga_read(emu, EMU_HANA_ID, &reg); snd_printdd("reg2 = 0x%x\n", reg); if ((reg & 0x3f) == 0x15) { /* FPGA failed to return to programming mode */ snd_printk(KERN_INFO "emu1010: FPGA failed to return to programming mode\n"); return -ENODEV; } snd_printk(KERN_INFO "emu1010: EMU_HANA_ID = 0x%x\n", reg); switch (emu->card_capabilities->emu_model) { case EMU_MODEL_EMU1010: filename = HANA_FILENAME; break; case EMU_MODEL_EMU1010B: filename = EMU1010B_FILENAME; break; case EMU_MODEL_EMU1616: filename = EMU1010_NOTEBOOK_FILENAME; break; case EMU_MODEL_EMU0404: filename = EMU0404_FILENAME; break; default: filename = NULL; return -ENODEV; break; } snd_printk(KERN_INFO "emu1010: filename %s testing\n", filename); err = snd_emu1010_load_firmware(emu, filename); if (err != 0) { snd_printk( KERN_INFO "emu1010: Loading Firmware file %s failed\n", filename); return err; } /* ID, should read & 0x7f = 0x55 when FPGA programmed. */ snd_emu1010_fpga_read(emu, EMU_HANA_ID, &reg); if ((reg & 0x3f) != 0x15) { /* FPGA failed to be programmed */ snd_printk(KERN_INFO "emu1010: Loading Hana Firmware file failed, reg = 0x%x\n", reg); return -ENODEV; } snd_printk(KERN_INFO "emu1010: Hana Firmware loaded\n"); snd_emu1010_fpga_read(emu, EMU_HANA_MAJOR_REV, &tmp); snd_emu1010_fpga_read(emu, EMU_HANA_MINOR_REV, &tmp2); snd_printk(KERN_INFO "emu1010: Hana version: %u.%u\n", tmp, tmp2); /* Enable 48Volt power to Audio Dock */ snd_emu1010_fpga_write(emu, EMU_HANA_DOCK_PWR, EMU_HANA_DOCK_PWR_ON); snd_emu1010_fpga_read(emu, EMU_HANA_OPTION_CARDS, &reg); snd_printk(KERN_INFO "emu1010: Card options = 0x%x\n", reg); snd_emu1010_fpga_read(emu, EMU_HANA_OPTION_CARDS, &reg); snd_printk(KERN_INFO "emu1010: Card options = 0x%x\n", reg); snd_emu1010_fpga_read(emu, EMU_HANA_OPTICAL_TYPE, &tmp); /* Optical -> ADAT I/O */ /* 0 : SPDIF * 1 : ADAT */ emu->emu1010.optical_in = 1; /* IN_ADAT */ emu->emu1010.optical_out = 1; /* IN_ADAT */ tmp = 0; tmp = (emu->emu1010.optical_in ? EMU_HANA_OPTICAL_IN_ADAT : 0) | (emu->emu1010.optical_out ? EMU_HANA_OPTICAL_OUT_ADAT : 0); snd_emu1010_fpga_write(emu, EMU_HANA_OPTICAL_TYPE, tmp); snd_emu1010_fpga_read(emu, EMU_HANA_ADC_PADS, &tmp); /* Set no attenuation on Audio Dock pads. */ snd_emu1010_fpga_write(emu, EMU_HANA_ADC_PADS, 0x00); emu->emu1010.adc_pads = 0x00; snd_emu1010_fpga_read(emu, EMU_HANA_DOCK_MISC, &tmp); /* Unmute Audio dock DACs, Headphone source DAC-4. */ snd_emu1010_fpga_write(emu, EMU_HANA_DOCK_MISC, 0x30); snd_emu1010_fpga_write(emu, EMU_HANA_DOCK_LEDS_2, 0x12); snd_emu1010_fpga_read(emu, EMU_HANA_DAC_PADS, &tmp); /* DAC PADs. */ snd_emu1010_fpga_write(emu, EMU_HANA_DAC_PADS, 0x0f); emu->emu1010.dac_pads = 0x0f; snd_emu1010_fpga_read(emu, EMU_HANA_DOCK_MISC, &tmp); snd_emu1010_fpga_write(emu, EMU_HANA_DOCK_MISC, 0x30); snd_emu1010_fpga_read(emu, EMU_HANA_SPDIF_MODE, &tmp); /* SPDIF Format. Set Consumer mode, 24bit, copy enable */ snd_emu1010_fpga_write(emu, EMU_HANA_SPDIF_MODE, 0x10); /* MIDI routing */ snd_emu1010_fpga_write(emu, EMU_HANA_MIDI_IN, 0x19); /* Unknown. */ snd_emu1010_fpga_write(emu, EMU_HANA_MIDI_OUT, 0x0c); /* IRQ Enable: Alll on */ /* snd_emu1010_fpga_write(emu, 0x09, 0x0f ); */ /* IRQ Enable: All off */ snd_emu1010_fpga_write(emu, EMU_HANA_IRQ_ENABLE, 0x00); snd_emu1010_fpga_read(emu, EMU_HANA_OPTION_CARDS, &reg); snd_printk(KERN_INFO "emu1010: Card options3 = 0x%x\n", reg); /* Default WCLK set to 48kHz. */ snd_emu1010_fpga_write(emu, EMU_HANA_DEFCLOCK, 0x00); /* Word Clock source, Internal 48kHz x1 */ snd_emu1010_fpga_write(emu, EMU_HANA_WCLOCK, EMU_HANA_WCLOCK_INT_48K); /* snd_emu1010_fpga_write(emu, EMU_HANA_WCLOCK, EMU_HANA_WCLOCK_INT_48K | EMU_HANA_WCLOCK_4X); */ /* Audio Dock LEDs. */ snd_emu1010_fpga_write(emu, EMU_HANA_DOCK_LEDS_2, 0x12); #if 0 /* For 96kHz */ snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_ALICE2_EMU32_0, EMU_SRC_HAMOA_ADC_LEFT1); snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_ALICE2_EMU32_1, EMU_SRC_HAMOA_ADC_RIGHT1); snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_ALICE2_EMU32_4, EMU_SRC_HAMOA_ADC_LEFT2); snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_ALICE2_EMU32_5, EMU_SRC_HAMOA_ADC_RIGHT2); #endif #if 0 /* For 192kHz */ snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_ALICE2_EMU32_0, EMU_SRC_HAMOA_ADC_LEFT1); snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_ALICE2_EMU32_1, EMU_SRC_HAMOA_ADC_RIGHT1); snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_ALICE2_EMU32_2, EMU_SRC_HAMOA_ADC_LEFT2); snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_ALICE2_EMU32_3, EMU_SRC_HAMOA_ADC_RIGHT2); snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_ALICE2_EMU32_4, EMU_SRC_HAMOA_ADC_LEFT3); snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_ALICE2_EMU32_5, EMU_SRC_HAMOA_ADC_RIGHT3); snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_ALICE2_EMU32_6, EMU_SRC_HAMOA_ADC_LEFT4); snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_ALICE2_EMU32_7, EMU_SRC_HAMOA_ADC_RIGHT4); #endif #if 1 /* For 48kHz */ snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_ALICE2_EMU32_0, EMU_SRC_DOCK_MIC_A1); snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_ALICE2_EMU32_1, EMU_SRC_DOCK_MIC_B1); snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_ALICE2_EMU32_2, EMU_SRC_HAMOA_ADC_LEFT2); snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_ALICE2_EMU32_3, EMU_SRC_HAMOA_ADC_LEFT2); snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_ALICE2_EMU32_4, EMU_SRC_DOCK_ADC1_LEFT1); snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_ALICE2_EMU32_5, EMU_SRC_DOCK_ADC1_RIGHT1); snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_ALICE2_EMU32_6, EMU_SRC_DOCK_ADC2_LEFT1); snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_ALICE2_EMU32_7, EMU_SRC_DOCK_ADC2_RIGHT1); /* Pavel Hofman - setting defaults for 8 more capture channels * Defaults only, users will set their own values anyways, let's * just copy/paste. */ snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_ALICE2_EMU32_8, EMU_SRC_DOCK_MIC_A1); snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_ALICE2_EMU32_9, EMU_SRC_DOCK_MIC_B1); snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_ALICE2_EMU32_A, EMU_SRC_HAMOA_ADC_LEFT2); snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_ALICE2_EMU32_B, EMU_SRC_HAMOA_ADC_LEFT2); snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_ALICE2_EMU32_C, EMU_SRC_DOCK_ADC1_LEFT1); snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_ALICE2_EMU32_D, EMU_SRC_DOCK_ADC1_RIGHT1); snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_ALICE2_EMU32_E, EMU_SRC_DOCK_ADC2_LEFT1); snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_ALICE2_EMU32_F, EMU_SRC_DOCK_ADC2_RIGHT1); #endif #if 0 /* Original */ snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_ALICE2_EMU32_4, EMU_SRC_HANA_ADAT); snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_ALICE2_EMU32_5, EMU_SRC_HANA_ADAT + 1); snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_ALICE2_EMU32_6, EMU_SRC_HANA_ADAT + 2); snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_ALICE2_EMU32_7, EMU_SRC_HANA_ADAT + 3); snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_ALICE2_EMU32_8, EMU_SRC_HANA_ADAT + 4); snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_ALICE2_EMU32_9, EMU_SRC_HANA_ADAT + 5); snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_ALICE2_EMU32_A, EMU_SRC_HANA_ADAT + 6); snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_ALICE2_EMU32_B, EMU_SRC_HANA_ADAT + 7); snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_ALICE2_EMU32_C, EMU_SRC_DOCK_MIC_A1); snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_ALICE2_EMU32_D, EMU_SRC_DOCK_MIC_B1); snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_ALICE2_EMU32_E, EMU_SRC_HAMOA_ADC_LEFT2); snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_ALICE2_EMU32_F, EMU_SRC_HAMOA_ADC_LEFT2); #endif for (i = 0; i < 0x20; i++) { /* AudioDock Elink <- Silence */ snd_emu1010_fpga_link_dst_src_write(emu, 0x0100 + i, EMU_SRC_SILENCE); } for (i = 0; i < 4; i++) { /* Hana SPDIF Out <- Silence */ snd_emu1010_fpga_link_dst_src_write(emu, 0x0200 + i, EMU_SRC_SILENCE); } for (i = 0; i < 7; i++) { /* Hamoa DAC <- Silence */ snd_emu1010_fpga_link_dst_src_write(emu, 0x0300 + i, EMU_SRC_SILENCE); } for (i = 0; i < 7; i++) { /* Hana ADAT Out <- Silence */ snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_HANA_ADAT + i, EMU_SRC_SILENCE); } snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_ALICE_I2S0_LEFT, EMU_SRC_DOCK_ADC1_LEFT1); snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_ALICE_I2S0_RIGHT, EMU_SRC_DOCK_ADC1_RIGHT1); snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_ALICE_I2S1_LEFT, EMU_SRC_DOCK_ADC2_LEFT1); snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_ALICE_I2S1_RIGHT, EMU_SRC_DOCK_ADC2_RIGHT1); snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_ALICE_I2S2_LEFT, EMU_SRC_DOCK_ADC3_LEFT1); snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_ALICE_I2S2_RIGHT, EMU_SRC_DOCK_ADC3_RIGHT1); snd_emu1010_fpga_write(emu, EMU_HANA_UNMUTE, 0x01); /* Unmute all */ snd_emu1010_fpga_read(emu, EMU_HANA_OPTION_CARDS, &tmp); /* AC97 1.03, Any 32Meg of 2Gig address, Auto-Mute, EMU32 Slave, * Lock Sound Memory Cache, Lock Tank Memory Cache, * Mute all codecs. */ outl(0x0000a000, emu->port + HCFG); /* AC97 1.03, Any 32Meg of 2Gig address, Auto-Mute, EMU32 Slave, * Lock Sound Memory Cache, Lock Tank Memory Cache, * Un-Mute all codecs. */ outl(0x0000a001, emu->port + HCFG); /* Initial boot complete. Now patches */ snd_emu1010_fpga_read(emu, EMU_HANA_OPTION_CARDS, &tmp); snd_emu1010_fpga_write(emu, EMU_HANA_MIDI_IN, 0x19); /* MIDI Route */ snd_emu1010_fpga_write(emu, EMU_HANA_MIDI_OUT, 0x0c); /* Unknown */ snd_emu1010_fpga_write(emu, EMU_HANA_MIDI_IN, 0x19); /* MIDI Route */ snd_emu1010_fpga_write(emu, EMU_HANA_MIDI_OUT, 0x0c); /* Unknown */ snd_emu1010_fpga_read(emu, EMU_HANA_SPDIF_MODE, &tmp); snd_emu1010_fpga_write(emu, EMU_HANA_SPDIF_MODE, 0x10); /* SPDIF Format spdif (or 0x11 for aes/ebu) */ /* Start Micro/Audio Dock firmware loader thread */ if (!emu->emu1010.firmware_thread) { emu->emu1010.firmware_thread = kthread_create(emu1010_firmware_thread, emu, "emu1010_firmware"); wake_up_process(emu->emu1010.firmware_thread); } #if 0 snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_HAMOA_DAC_LEFT1, EMU_SRC_ALICE_EMU32B + 2); /* ALICE2 bus 0xa2 */ snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_HAMOA_DAC_RIGHT1, EMU_SRC_ALICE_EMU32B + 3); /* ALICE2 bus 0xa3 */ snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_HANA_SPDIF_LEFT1, EMU_SRC_ALICE_EMU32A + 2); /* ALICE2 bus 0xb2 */ snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_HANA_SPDIF_RIGHT1, EMU_SRC_ALICE_EMU32A + 3); /* ALICE2 bus 0xb3 */ #endif /* Default outputs */ if (emu->card_capabilities->emu_model == EMU_MODEL_EMU1616) { /* 1616(M) cardbus default outputs */ /* ALICE2 bus 0xa0 */ snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_DOCK_DAC1_LEFT1, EMU_SRC_ALICE_EMU32A + 0); emu->emu1010.output_source[0] = 17; snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_DOCK_DAC1_RIGHT1, EMU_SRC_ALICE_EMU32A + 1); emu->emu1010.output_source[1] = 18; snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_DOCK_DAC2_LEFT1, EMU_SRC_ALICE_EMU32A + 2); emu->emu1010.output_source[2] = 19; snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_DOCK_DAC2_RIGHT1, EMU_SRC_ALICE_EMU32A + 3); emu->emu1010.output_source[3] = 20; snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_DOCK_DAC3_LEFT1, EMU_SRC_ALICE_EMU32A + 4); emu->emu1010.output_source[4] = 21; snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_DOCK_DAC3_RIGHT1, EMU_SRC_ALICE_EMU32A + 5); emu->emu1010.output_source[5] = 22; /* ALICE2 bus 0xa0 */ snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_MANA_DAC_LEFT, EMU_SRC_ALICE_EMU32A + 0); emu->emu1010.output_source[16] = 17; snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_MANA_DAC_RIGHT, EMU_SRC_ALICE_EMU32A + 1); emu->emu1010.output_source[17] = 18; } else { /* ALICE2 bus 0xa0 */ snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_DOCK_DAC1_LEFT1, EMU_SRC_ALICE_EMU32A + 0); emu->emu1010.output_source[0] = 21; snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_DOCK_DAC1_RIGHT1, EMU_SRC_ALICE_EMU32A + 1); emu->emu1010.output_source[1] = 22; snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_DOCK_DAC2_LEFT1, EMU_SRC_ALICE_EMU32A + 2); emu->emu1010.output_source[2] = 23; snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_DOCK_DAC2_RIGHT1, EMU_SRC_ALICE_EMU32A + 3); emu->emu1010.output_source[3] = 24; snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_DOCK_DAC3_LEFT1, EMU_SRC_ALICE_EMU32A + 4); emu->emu1010.output_source[4] = 25; snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_DOCK_DAC3_RIGHT1, EMU_SRC_ALICE_EMU32A + 5); emu->emu1010.output_source[5] = 26; snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_DOCK_DAC4_LEFT1, EMU_SRC_ALICE_EMU32A + 6); emu->emu1010.output_source[6] = 27; snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_DOCK_DAC4_RIGHT1, EMU_SRC_ALICE_EMU32A + 7); emu->emu1010.output_source[7] = 28; /* ALICE2 bus 0xa0 */ snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_DOCK_PHONES_LEFT1, EMU_SRC_ALICE_EMU32A + 0); emu->emu1010.output_source[8] = 21; snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_DOCK_PHONES_RIGHT1, EMU_SRC_ALICE_EMU32A + 1); emu->emu1010.output_source[9] = 22; /* ALICE2 bus 0xa0 */ snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_DOCK_SPDIF_LEFT1, EMU_SRC_ALICE_EMU32A + 0); emu->emu1010.output_source[10] = 21; snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_DOCK_SPDIF_RIGHT1, EMU_SRC_ALICE_EMU32A + 1); emu->emu1010.output_source[11] = 22; /* ALICE2 bus 0xa0 */ snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_HANA_SPDIF_LEFT1, EMU_SRC_ALICE_EMU32A + 0); emu->emu1010.output_source[12] = 21; snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_HANA_SPDIF_RIGHT1, EMU_SRC_ALICE_EMU32A + 1); emu->emu1010.output_source[13] = 22; /* ALICE2 bus 0xa0 */ snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_HAMOA_DAC_LEFT1, EMU_SRC_ALICE_EMU32A + 0); emu->emu1010.output_source[14] = 21; snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_HAMOA_DAC_RIGHT1, EMU_SRC_ALICE_EMU32A + 1); emu->emu1010.output_source[15] = 22; /* ALICE2 bus 0xa0 */ snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_HANA_ADAT, EMU_SRC_ALICE_EMU32A + 0); emu->emu1010.output_source[16] = 21; snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_HANA_ADAT + 1, EMU_SRC_ALICE_EMU32A + 1); emu->emu1010.output_source[17] = 22; snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_HANA_ADAT + 2, EMU_SRC_ALICE_EMU32A + 2); emu->emu1010.output_source[18] = 23; snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_HANA_ADAT + 3, EMU_SRC_ALICE_EMU32A + 3); emu->emu1010.output_source[19] = 24; snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_HANA_ADAT + 4, EMU_SRC_ALICE_EMU32A + 4); emu->emu1010.output_source[20] = 25; snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_HANA_ADAT + 5, EMU_SRC_ALICE_EMU32A + 5); emu->emu1010.output_source[21] = 26; snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_HANA_ADAT + 6, EMU_SRC_ALICE_EMU32A + 6); emu->emu1010.output_source[22] = 27; snd_emu1010_fpga_link_dst_src_write(emu, EMU_DST_HANA_ADAT + 7, EMU_SRC_ALICE_EMU32A + 7); emu->emu1010.output_source[23] = 28; } /* TEMP: Select SPDIF in/out */ /* snd_emu1010_fpga_write(emu, EMU_HANA_OPTICAL_TYPE, 0x0); */ /* Output spdif */ /* TEMP: Select 48kHz SPDIF out */ snd_emu1010_fpga_write(emu, EMU_HANA_UNMUTE, 0x0); /* Mute all */ snd_emu1010_fpga_write(emu, EMU_HANA_DEFCLOCK, 0x0); /* Default fallback clock 48kHz */ /* Word Clock source, Internal 48kHz x1 */ snd_emu1010_fpga_write(emu, EMU_HANA_WCLOCK, EMU_HANA_WCLOCK_INT_48K); /* snd_emu1010_fpga_write(emu, EMU_HANA_WCLOCK, EMU_HANA_WCLOCK_INT_48K | EMU_HANA_WCLOCK_4X); */ emu->emu1010.internal_clock = 1; /* 48000 */ snd_emu1010_fpga_write(emu, EMU_HANA_DOCK_LEDS_2, 0x12); /* Set LEDs on Audio Dock */ snd_emu1010_fpga_write(emu, EMU_HANA_UNMUTE, 0x1); /* Unmute all */ /* snd_emu1010_fpga_write(emu, 0x7, 0x0); */ /* Mute all */ /* snd_emu1010_fpga_write(emu, 0x7, 0x1); */ /* Unmute all */ /* snd_emu1010_fpga_write(emu, 0xe, 0x12); */ /* Set LEDs on Audio Dock */ return 0; } /* * Create the EMU10K1 instance */ #ifdef CONFIG_PM static int alloc_pm_buffer(struct snd_emu10k1 *emu); static void free_pm_buffer(struct snd_emu10k1 *emu); #endif static int snd_emu10k1_free(struct snd_emu10k1 *emu) { if (emu->port) { /* avoid access to already used hardware */ snd_emu10k1_fx8010_tram_setup(emu, 0); snd_emu10k1_done(emu); snd_emu10k1_free_efx(emu); } if (emu->card_capabilities->emu_model == EMU_MODEL_EMU1010) { /* Disable 48Volt power to Audio Dock */ snd_emu1010_fpga_write(emu, EMU_HANA_DOCK_PWR, 0); } if (emu->emu1010.firmware_thread) kthread_stop(emu->emu1010.firmware_thread); if (emu->irq >= 0) free_irq(emu->irq, emu); /* remove reserved page */ if (emu->reserved_page) { snd_emu10k1_synth_free(emu, (struct snd_util_memblk *)emu->reserved_page); emu->reserved_page = NULL; } if (emu->memhdr) snd_util_memhdr_free(emu->memhdr); if (emu->silent_page.area) snd_dma_free_pages(&emu->silent_page); if (emu->ptb_pages.area) snd_dma_free_pages(&emu->ptb_pages); vfree(emu->page_ptr_table); vfree(emu->page_addr_table); #ifdef CONFIG_PM free_pm_buffer(emu); #endif if (emu->port) pci_release_regions(emu->pci); if (emu->card_capabilities->ca0151_chip) /* P16V */ snd_p16v_free(emu); pci_disable_device(emu->pci); kfree(emu); return 0; } static int snd_emu10k1_dev_free(struct snd_device *device) { struct snd_emu10k1 *emu = device->device_data; return snd_emu10k1_free(emu); } static struct snd_emu_chip_details emu_chip_details[] = { /* Audigy4 (Not PRO) SB0610 */ /* Tested by James@superbug.co.uk 4th April 2006 */ /* A_IOCFG bits * Output * 0: ? * 1: ? * 2: ? * 3: 0 - Digital Out, 1 - Line in * 4: ? * 5: ? * 6: ? * 7: ? * Input * 8: ? * 9: ? * A: Green jack sense (Front) * B: ? * C: Black jack sense (Rear/Side Right) * D: Yellow jack sense (Center/LFE/Side Left) * E: ? * F: ? * * Digital Out/Line in switch using A_IOCFG bit 3 (0x08) * 0 - Digital Out * 1 - Line in */ /* Mic input not tested. * Analog CD input not tested * Digital Out not tested. * Line in working. * Audio output 5.1 working. Side outputs not working. */ /* DSP: CA10300-IAT LF * DAC: Cirrus Logic CS4382-KQZ * ADC: Philips 1361T * AC97: Sigmatel STAC9750 * CA0151: None */ {.vendor = 0x1102, .device = 0x0008, .subsystem = 0x10211102, .driver = "Audigy2", .name = "SB Audigy 4 [SB0610]", .id = "Audigy2", .emu10k2_chip = 1, .ca0108_chip = 1, .spk71 = 1, .adc_1361t = 1, /* 24 bit capture instead of 16bit */ .ac97_chip = 1} , /* Audigy 2 Value AC3 out does not work yet. * Need to find out how to turn off interpolators. */ /* Tested by James@superbug.co.uk 3rd July 2005 */ /* DSP: CA0108-IAT * DAC: CS4382-KQ * ADC: Philips 1361T * AC97: STAC9750 * CA0151: None */ {.vendor = 0x1102, .device = 0x0008, .subsystem = 0x10011102, .driver = "Audigy2", .name = "SB Audigy 2 Value [SB0400]", .id = "Audigy2", .emu10k2_chip = 1, .ca0108_chip = 1, .spk71 = 1, .ac97_chip = 1} , /* Audigy 2 ZS Notebook Cardbus card.*/ /* Tested by James@superbug.co.uk 6th November 2006 */ /* Audio output 7.1/Headphones working. * Digital output working. (AC3 not checked, only PCM) * Audio Mic/Line inputs working. * Digital input not tested. */ /* DSP: Tina2 * DAC: Wolfson WM8768/WM8568 * ADC: Wolfson WM8775 * AC97: None * CA0151: None */ /* Tested by James@superbug.co.uk 4th April 2006 */ /* A_IOCFG bits * Output * 0: Not Used * 1: 0 = Mute all the 7.1 channel out. 1 = unmute. * 2: Analog input 0 = line in, 1 = mic in * 3: Not Used * 4: Digital output 0 = off, 1 = on. * 5: Not Used * 6: Not Used * 7: Not Used * Input * All bits 1 (0x3fxx) means nothing plugged in. * 8-9: 0 = Line in/Mic, 2 = Optical in, 3 = Nothing. * A-B: 0 = Headphones, 2 = Optical out, 3 = Nothing. * C-D: 2 = Front/Rear/etc, 3 = nothing. * E-F: Always 0 * */ {.vendor = 0x1102, .device = 0x0008, .subsystem = 0x20011102, .driver = "Audigy2", .name = "SB Audigy 2 ZS Notebook [SB0530]", .id = "Audigy2", .emu10k2_chip = 1, .ca0108_chip = 1, .ca_cardbus_chip = 1, .spi_dac = 1, .i2c_adc = 1, .spk71 = 1} , /* Tested by James@superbug.co.uk 4th Nov 2007. */ {.vendor = 0x1102, .device = 0x0008, .subsystem = 0x42011102, .driver = "Audigy2", .name = "E-mu 1010 Notebook [MAEM8950]", .id = "EMU1010", .emu10k2_chip = 1, .ca0108_chip = 1, .ca_cardbus_chip = 1, .spk71 = 1 , .emu_model = EMU_MODEL_EMU1616}, /* Tested by James@superbug.co.uk 4th Nov 2007. */ /* This is MAEM8960, 0202 is MAEM 8980 */ {.vendor = 0x1102, .device = 0x0008, .subsystem = 0x40041102, .driver = "Audigy2", .name = "E-mu 1010b PCI [MAEM8960]", .id = "EMU1010", .emu10k2_chip = 1, .ca0108_chip = 1, .spk71 = 1, .emu_model = EMU_MODEL_EMU1010B}, /* EMU 1010 new revision */ /* Tested by James@superbug.co.uk 8th July 2005. */ /* This is MAEM8810, 0202 is MAEM8820 */ {.vendor = 0x1102, .device = 0x0004, .subsystem = 0x40011102, .driver = "Audigy2", .name = "E-mu 1010 [MAEM8810]", .id = "EMU1010", .emu10k2_chip = 1, .ca0102_chip = 1, .spk71 = 1, .emu_model = EMU_MODEL_EMU1010}, /* EMU 1010 old revision */ /* EMU0404b */ {.vendor = 0x1102, .device = 0x0008, .subsystem = 0x40021102, .driver = "Audigy2", .name = "E-mu 0404b PCI [MAEM8852]", .id = "EMU0404", .emu10k2_chip = 1, .ca0108_chip = 1, .spk71 = 1, .emu_model = EMU_MODEL_EMU0404}, /* EMU 0404 new revision */ /* Tested by James@superbug.co.uk 20-3-2007. */ {.vendor = 0x1102, .device = 0x0004, .subsystem = 0x40021102, .driver = "Audigy2", .name = "E-mu 0404 [MAEM8850]", .id = "EMU0404", .emu10k2_chip = 1, .ca0102_chip = 1, .spk71 = 1, .emu_model = EMU_MODEL_EMU0404}, /* EMU 0404 */ /* Note that all E-mu cards require kernel 2.6 or newer. */ {.vendor = 0x1102, .device = 0x0008, .driver = "Audigy2", .name = "SB Audigy 2 Value [Unknown]", .id = "Audigy2", .emu10k2_chip = 1, .ca0108_chip = 1, .ac97_chip = 1} , /* Tested by James@superbug.co.uk 3rd July 2005 */ {.vendor = 0x1102, .device = 0x0004, .subsystem = 0x20071102, .driver = "Audigy2", .name = "SB Audigy 4 PRO [SB0380]", .id = "Audigy2", .emu10k2_chip = 1, .ca0102_chip = 1, .ca0151_chip = 1, .spk71 = 1, .spdif_bug = 1, .ac97_chip = 1} , /* Tested by shane-alsa@cm.nu 5th Nov 2005 */ /* The 0x20061102 does have SB0350 written on it * Just like 0x20021102 */ {.vendor = 0x1102, .device = 0x0004, .subsystem = 0x20061102, .driver = "Audigy2", .name = "SB Audigy 2 [SB0350b]", .id = "Audigy2", .emu10k2_chip = 1, .ca0102_chip = 1, .ca0151_chip = 1, .spk71 = 1, .spdif_bug = 1, .invert_shared_spdif = 1, /* digital/analog switch swapped */ .ac97_chip = 1} , {.vendor = 0x1102, .device = 0x0004, .subsystem = 0x20021102, .driver = "Audigy2", .name = "SB Audigy 2 ZS [SB0350]", .id = "Audigy2", .emu10k2_chip = 1, .ca0102_chip = 1, .ca0151_chip = 1, .spk71 = 1, .spdif_bug = 1, .invert_shared_spdif = 1, /* digital/analog switch swapped */ .ac97_chip = 1} , {.vendor = 0x1102, .device = 0x0004, .subsystem = 0x20011102, .driver = "Audigy2", .name = "SB Audigy 2 ZS [SB0360]", .id = "Audigy2", .emu10k2_chip = 1, .ca0102_chip = 1, .ca0151_chip = 1, .spk71 = 1, .spdif_bug = 1, .invert_shared_spdif = 1, /* digital/analog switch swapped */ .ac97_chip = 1} , /* Audigy 2 */ /* Tested by James@superbug.co.uk 3rd July 2005 */ /* DSP: CA0102-IAT * DAC: CS4382-KQ * ADC: Philips 1361T * AC97: STAC9721 * CA0151: Yes */ {.vendor = 0x1102, .device = 0x0004, .subsystem = 0x10071102, .driver = "Audigy2", .name = "SB Audigy 2 [SB0240]", .id = "Audigy2", .emu10k2_chip = 1, .ca0102_chip = 1, .ca0151_chip = 1, .spk71 = 1, .spdif_bug = 1, .adc_1361t = 1, /* 24 bit capture instead of 16bit */ .ac97_chip = 1} , {.vendor = 0x1102, .device = 0x0004, .subsystem = 0x10051102, .driver = "Audigy2", .name = "SB Audigy 2 Platinum EX [SB0280]", .id = "Audigy2", .emu10k2_chip = 1, .ca0102_chip = 1, .ca0151_chip = 1, .spk71 = 1, .spdif_bug = 1} , /* Dell OEM/Creative Labs Audigy 2 ZS */ /* See ALSA bug#1365 */ {.vendor = 0x1102, .device = 0x0004, .subsystem = 0x10031102, .driver = "Audigy2", .name = "SB Audigy 2 ZS [SB0353]", .id = "Audigy2", .emu10k2_chip = 1, .ca0102_chip = 1, .ca0151_chip = 1, .spk71 = 1, .spdif_bug = 1, .invert_shared_spdif = 1, /* digital/analog switch swapped */ .ac97_chip = 1} , {.vendor = 0x1102, .device = 0x0004, .subsystem = 0x10021102, .driver = "Audigy2", .name = "SB Audigy 2 Platinum [SB0240P]", .id = "Audigy2", .emu10k2_chip = 1, .ca0102_chip = 1, .ca0151_chip = 1, .spk71 = 1, .spdif_bug = 1, .invert_shared_spdif = 1, /* digital/analog switch swapped */ .adc_1361t = 1, /* 24 bit capture instead of 16bit. Fixes ALSA bug#324 */ .ac97_chip = 1} , {.vendor = 0x1102, .device = 0x0004, .revision = 0x04, .driver = "Audigy2", .name = "SB Audigy 2 [Unknown]", .id = "Audigy2", .emu10k2_chip = 1, .ca0102_chip = 1, .ca0151_chip = 1, .spdif_bug = 1, .ac97_chip = 1} , {.vendor = 0x1102, .device = 0x0004, .subsystem = 0x00531102, .driver = "Audigy", .name = "SB Audigy 1 [SB0092]", .id = "Audigy", .emu10k2_chip = 1, .ca0102_chip = 1, .ac97_chip = 1} , {.vendor = 0x1102, .device = 0x0004, .subsystem = 0x00521102, .driver = "Audigy", .name = "SB Audigy 1 ES [SB0160]", .id = "Audigy", .emu10k2_chip = 1, .ca0102_chip = 1, .spdif_bug = 1, .ac97_chip = 1} , {.vendor = 0x1102, .device = 0x0004, .subsystem = 0x00511102, .driver = "Audigy", .name = "SB Audigy 1 [SB0090]", .id = "Audigy", .emu10k2_chip = 1, .ca0102_chip = 1, .ac97_chip = 1} , {.vendor = 0x1102, .device = 0x0004, .driver = "Audigy", .name = "Audigy 1 [Unknown]", .id = "Audigy", .emu10k2_chip = 1, .ca0102_chip = 1, .ac97_chip = 1} , {.vendor = 0x1102, .device = 0x0002, .subsystem = 0x100a1102, .driver = "EMU10K1", .name = "SB Live! 5.1 [SB0220]", .id = "Live", .emu10k1_chip = 1, .ac97_chip = 1, .sblive51 = 1} , {.vendor = 0x1102, .device = 0x0002, .subsystem = 0x806b1102, .driver = "EMU10K1", .name = "SB Live! [SB0105]", .id = "Live", .emu10k1_chip = 1, .ac97_chip = 1, .sblive51 = 1} , {.vendor = 0x1102, .device = 0x0002, .subsystem = 0x806a1102, .driver = "EMU10K1", .name = "SB Live! Value [SB0103]", .id = "Live", .emu10k1_chip = 1, .ac97_chip = 1, .sblive51 = 1} , {.vendor = 0x1102, .device = 0x0002, .subsystem = 0x80691102, .driver = "EMU10K1", .name = "SB Live! Value [SB0101]", .id = "Live", .emu10k1_chip = 1, .ac97_chip = 1, .sblive51 = 1} , /* Tested by ALSA bug#1680 26th December 2005 */ /* note: It really has SB0220 written on the card, */ /* but it's SB0228 according to kx.inf */ {.vendor = 0x1102, .device = 0x0002, .subsystem = 0x80661102, .driver = "EMU10K1", .name = "SB Live! 5.1 Dell OEM [SB0228]", .id = "Live", .emu10k1_chip = 1, .ac97_chip = 1, .sblive51 = 1} , /* Tested by Thomas Zehetbauer 27th Aug 2005 */ {.vendor = 0x1102, .device = 0x0002, .subsystem = 0x80651102, .driver = "EMU10K1", .name = "SB Live! 5.1 [SB0220]", .id = "Live", .emu10k1_chip = 1, .ac97_chip = 1, .sblive51 = 1} , {.vendor = 0x1102, .device = 0x0002, .subsystem = 0x80641102, .driver = "EMU10K1", .name = "SB Live! 5.1", .id = "Live", .emu10k1_chip = 1, .ac97_chip = 1, .sblive51 = 1} , /* Tested by alsa bugtrack user "hus" bug #1297 12th Aug 2005 */ {.vendor = 0x1102, .device = 0x0002, .subsystem = 0x80611102, .driver = "EMU10K1", .name = "SB Live! 5.1 [SB0060]", .id = "Live", .emu10k1_chip = 1, .ac97_chip = 2, /* ac97 is optional; both SBLive 5.1 and platinum * share the same IDs! */ .sblive51 = 1} , {.vendor = 0x1102, .device = 0x0002, .subsystem = 0x80511102, .driver = "EMU10K1", .name = "SB Live! Value [CT4850]", .id = "Live", .emu10k1_chip = 1, .ac97_chip = 1, .sblive51 = 1} , {.vendor = 0x1102, .device = 0x0002, .subsystem = 0x80401102, .driver = "EMU10K1", .name = "SB Live! Platinum [CT4760P]", .id = "Live", .emu10k1_chip = 1, .ac97_chip = 1} , {.vendor = 0x1102, .device = 0x0002, .subsystem = 0x80321102, .driver = "EMU10K1", .name = "SB Live! Value [CT4871]", .id = "Live", .emu10k1_chip = 1, .ac97_chip = 1, .sblive51 = 1} , {.vendor = 0x1102, .device = 0x0002, .subsystem = 0x80311102, .driver = "EMU10K1", .name = "SB Live! Value [CT4831]", .id = "Live", .emu10k1_chip = 1, .ac97_chip = 1, .sblive51 = 1} , {.vendor = 0x1102, .device = 0x0002, .subsystem = 0x80281102, .driver = "EMU10K1", .name = "SB Live! Value [CT4870]", .id = "Live", .emu10k1_chip = 1, .ac97_chip = 1, .sblive51 = 1} , /* Tested by James@superbug.co.uk 3rd July 2005 */ {.vendor = 0x1102, .device = 0x0002, .subsystem = 0x80271102, .driver = "EMU10K1", .name = "SB Live! Value [CT4832]", .id = "Live", .emu10k1_chip = 1, .ac97_chip = 1, .sblive51 = 1} , {.vendor = 0x1102, .device = 0x0002, .subsystem = 0x80261102, .driver = "EMU10K1", .name = "SB Live! Value [CT4830]", .id = "Live", .emu10k1_chip = 1, .ac97_chip = 1, .sblive51 = 1} , {.vendor = 0x1102, .device = 0x0002, .subsystem = 0x80231102, .driver = "EMU10K1", .name = "SB PCI512 [CT4790]", .id = "Live", .emu10k1_chip = 1, .ac97_chip = 1, .sblive51 = 1} , {.vendor = 0x1102, .device = 0x0002, .subsystem = 0x80221102, .driver = "EMU10K1", .name = "SB Live! Value [CT4780]", .id = "Live", .emu10k1_chip = 1, .ac97_chip = 1, .sblive51 = 1} , {.vendor = 0x1102, .device = 0x0002, .subsystem = 0x40011102, .driver = "EMU10K1", .name = "E-mu APS [PC545]", .id = "APS", .emu10k1_chip = 1, .ecard = 1} , {.vendor = 0x1102, .device = 0x0002, .subsystem = 0x00211102, .driver = "EMU10K1", .name = "SB Live! [CT4620]", .id = "Live", .emu10k1_chip = 1, .ac97_chip = 1, .sblive51 = 1} , {.vendor = 0x1102, .device = 0x0002, .subsystem = 0x00201102, .driver = "EMU10K1", .name = "SB Live! Value [CT4670]", .id = "Live", .emu10k1_chip = 1, .ac97_chip = 1, .sblive51 = 1} , {.vendor = 0x1102, .device = 0x0002, .driver = "EMU10K1", .name = "SB Live! [Unknown]", .id = "Live", .emu10k1_chip = 1, .ac97_chip = 1, .sblive51 = 1} , { } /* terminator */ }; int __devinit snd_emu10k1_create(struct snd_card *card, struct pci_dev *pci, unsigned short extin_mask, unsigned short extout_mask, long max_cache_bytes, int enable_ir, uint subsystem, struct snd_emu10k1 **remu) { struct snd_emu10k1 *emu; int idx, err; int is_audigy; unsigned int silent_page; const struct snd_emu_chip_details *c; static struct snd_device_ops ops = { .dev_free = snd_emu10k1_dev_free, }; *remu = NULL; /* enable PCI device */ err = pci_enable_device(pci); if (err < 0) return err; emu = kzalloc(sizeof(*emu), GFP_KERNEL); if (emu == NULL) { pci_disable_device(pci); return -ENOMEM; } emu->card = card; spin_lock_init(&emu->reg_lock); spin_lock_init(&emu->emu_lock); spin_lock_init(&emu->spi_lock); spin_lock_init(&emu->i2c_lock); spin_lock_init(&emu->voice_lock); spin_lock_init(&emu->synth_lock); spin_lock_init(&emu->memblk_lock); mutex_init(&emu->fx8010.lock); INIT_LIST_HEAD(&emu->mapped_link_head); INIT_LIST_HEAD(&emu->mapped_order_link_head); emu->pci = pci; emu->irq = -1; emu->synth = NULL; emu->get_synth_voice = NULL; /* read revision & serial */ emu->revision = pci->revision; pci_read_config_dword(pci, PCI_SUBSYSTEM_VENDOR_ID, &emu->serial); pci_read_config_word(pci, PCI_SUBSYSTEM_ID, &emu->model); snd_printdd("vendor = 0x%x, device = 0x%x, subsystem_vendor_id = 0x%x, subsystem_id = 0x%x\n", pci->vendor, pci->device, emu->serial, emu->model); for (c = emu_chip_details; c->vendor; c++) { if (c->vendor == pci->vendor && c->device == pci->device) { if (subsystem) { if (c->subsystem && (c->subsystem == subsystem)) break; else continue; } else { if (c->subsystem && (c->subsystem != emu->serial)) continue; if (c->revision && c->revision != emu->revision) continue; } break; } } if (c->vendor == 0) { snd_printk(KERN_ERR "emu10k1: Card not recognised\n"); kfree(emu); pci_disable_device(pci); return -ENOENT; } emu->card_capabilities = c; if (c->subsystem && !subsystem) snd_printdd("Sound card name = %s\n", c->name); else if (subsystem) snd_printdd("Sound card name = %s, " "vendor = 0x%x, device = 0x%x, subsystem = 0x%x. " "Forced to subsytem = 0x%x\n", c->name, pci->vendor, pci->device, emu->serial, c->subsystem); else snd_printdd("Sound card name = %s, " "vendor = 0x%x, device = 0x%x, subsystem = 0x%x.\n", c->name, pci->vendor, pci->device, emu->serial); if (!*card->id && c->id) { int i, n = 0; strlcpy(card->id, c->id, sizeof(card->id)); for (;;) { for (i = 0; i < snd_ecards_limit; i++) { if (snd_cards[i] && !strcmp(snd_cards[i]->id, card->id)) break; } if (i >= snd_ecards_limit) break; n++; if (n >= SNDRV_CARDS) break; snprintf(card->id, sizeof(card->id), "%s_%d", c->id, n); } } is_audigy = emu->audigy = c->emu10k2_chip; /* set the DMA transfer mask */ emu->dma_mask = is_audigy ? AUDIGY_DMA_MASK : EMU10K1_DMA_MASK; if (pci_set_dma_mask(pci, emu->dma_mask) < 0 || pci_set_consistent_dma_mask(pci, emu->dma_mask) < 0) { snd_printk(KERN_ERR "architecture does not support PCI busmaster DMA with mask 0x%lx\n", emu->dma_mask); kfree(emu); pci_disable_device(pci); return -ENXIO; } if (is_audigy) emu->gpr_base = A_FXGPREGBASE; else emu->gpr_base = FXGPREGBASE; err = pci_request_regions(pci, "EMU10K1"); if (err < 0) { kfree(emu); pci_disable_device(pci); return err; } emu->port = pci_resource_start(pci, 0); emu->max_cache_pages = max_cache_bytes >> PAGE_SHIFT; if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci), 32 * 1024, &emu->ptb_pages) < 0) { err = -ENOMEM; goto error; } emu->page_ptr_table = vmalloc(emu->max_cache_pages * sizeof(void *)); emu->page_addr_table = vmalloc(emu->max_cache_pages * sizeof(unsigned long)); if (emu->page_ptr_table == NULL || emu->page_addr_table == NULL) { err = -ENOMEM; goto error; } if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci), EMUPAGESIZE, &emu->silent_page) < 0) { err = -ENOMEM; goto error; } emu->memhdr = snd_util_memhdr_new(emu->max_cache_pages * PAGE_SIZE); if (emu->memhdr == NULL) { err = -ENOMEM; goto error; } emu->memhdr->block_extra_size = sizeof(struct snd_emu10k1_memblk) - sizeof(struct snd_util_memblk); pci_set_master(pci); emu->fx8010.fxbus_mask = 0x303f; if (extin_mask == 0) extin_mask = 0x3fcf; if (extout_mask == 0) extout_mask = 0x7fff; emu->fx8010.extin_mask = extin_mask; emu->fx8010.extout_mask = extout_mask; emu->enable_ir = enable_ir; if (emu->card_capabilities->ca_cardbus_chip) { err = snd_emu10k1_cardbus_init(emu); if (err < 0) goto error; } if (emu->card_capabilities->ecard) { err = snd_emu10k1_ecard_init(emu); if (err < 0) goto error; } else if (emu->card_capabilities->emu_model) { err = snd_emu10k1_emu1010_init(emu); if (err < 0) { snd_emu10k1_free(emu); return err; } } else { /* 5.1: Enable the additional AC97 Slots. If the emu10k1 version does not support this, it shouldn't do any harm */ snd_emu10k1_ptr_write(emu, AC97SLOT, 0, AC97SLOT_CNTR|AC97SLOT_LFE); } /* initialize TRAM setup */ emu->fx8010.itram_size = (16 * 1024)/2; emu->fx8010.etram_pages.area = NULL; emu->fx8010.etram_pages.bytes = 0; /* irq handler must be registered after I/O ports are activated */ if (request_irq(pci->irq, snd_emu10k1_interrupt, IRQF_SHARED, "EMU10K1", emu)) { err = -EBUSY; goto error; } emu->irq = pci->irq; /* * Init to 0x02109204 : * Clock accuracy = 0 (1000ppm) * Sample Rate = 2 (48kHz) * Audio Channel = 1 (Left of 2) * Source Number = 0 (Unspecified) * Generation Status = 1 (Original for Cat Code 12) * Cat Code = 12 (Digital Signal Mixer) * Mode = 0 (Mode 0) * Emphasis = 0 (None) * CP = 1 (Copyright unasserted) * AN = 0 (Audio data) * P = 0 (Consumer) */ emu->spdif_bits[0] = emu->spdif_bits[1] = emu->spdif_bits[2] = SPCS_CLKACCY_1000PPM | SPCS_SAMPLERATE_48 | SPCS_CHANNELNUM_LEFT | SPCS_SOURCENUM_UNSPEC | SPCS_GENERATIONSTATUS | 0x00001200 | 0x00000000 | SPCS_EMPHASIS_NONE | SPCS_COPYRIGHT; emu->reserved_page = (struct snd_emu10k1_memblk *) snd_emu10k1_synth_alloc(emu, 4096); if (emu->reserved_page) emu->reserved_page->map_locked = 1; /* Clear silent pages and set up pointers */ memset(emu->silent_page.area, 0, PAGE_SIZE); silent_page = emu->silent_page.addr << 1; for (idx = 0; idx < MAXPAGES; idx++) ((u32 *)emu->ptb_pages.area)[idx] = cpu_to_le32(silent_page | idx); /* set up voice indices */ for (idx = 0; idx < NUM_G; idx++) { emu->voices[idx].emu = emu; emu->voices[idx].number = idx; } err = snd_emu10k1_init(emu, enable_ir, 0); if (err < 0) goto error; #ifdef CONFIG_PM err = alloc_pm_buffer(emu); if (err < 0) goto error; #endif /* Initialize the effect engine */ err = snd_emu10k1_init_efx(emu); if (err < 0) goto error; snd_emu10k1_audio_enable(emu); err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, emu, &ops); if (err < 0) goto error; #ifdef CONFIG_PROC_FS snd_emu10k1_proc_init(emu); #endif snd_card_set_dev(card, &pci->dev); *remu = emu; return 0; error: snd_emu10k1_free(emu); return err; } #ifdef CONFIG_PM static unsigned char saved_regs[] = { CPF, PTRX, CVCF, VTFT, Z1, Z2, PSST, DSL, CCCA, CCR, CLP, FXRT, MAPA, MAPB, ENVVOL, ATKHLDV, DCYSUSV, LFOVAL1, ENVVAL, ATKHLDM, DCYSUSM, LFOVAL2, IP, IFATN, PEFE, FMMOD, TREMFRQ, FM2FRQ2, TEMPENV, ADCCR, FXWC, MICBA, ADCBA, FXBA, MICBS, ADCBS, FXBS, CDCS, GPSCS, SPCS0, SPCS1, SPCS2, SPBYPASS, AC97SLOT, CDSRCS, GPSRCS, ZVSRCS, MICIDX, ADCIDX, FXIDX, 0xff /* end */ }; static unsigned char saved_regs_audigy[] = { A_ADCIDX, A_MICIDX, A_FXWC1, A_FXWC2, A_SAMPLE_RATE, A_FXRT2, A_SENDAMOUNTS, A_FXRT1, 0xff /* end */ }; static int __devinit alloc_pm_buffer(struct snd_emu10k1 *emu) { int size; size = ARRAY_SIZE(saved_regs); if (emu->audigy) size += ARRAY_SIZE(saved_regs_audigy); emu->saved_ptr = vmalloc(4 * NUM_G * size); if (!emu->saved_ptr) return -ENOMEM; if (snd_emu10k1_efx_alloc_pm_buffer(emu) < 0) return -ENOMEM; if (emu->card_capabilities->ca0151_chip && snd_p16v_alloc_pm_buffer(emu) < 0) return -ENOMEM; return 0; } static void free_pm_buffer(struct snd_emu10k1 *emu) { vfree(emu->saved_ptr); snd_emu10k1_efx_free_pm_buffer(emu); if (emu->card_capabilities->ca0151_chip) snd_p16v_free_pm_buffer(emu); } void snd_emu10k1_suspend_regs(struct snd_emu10k1 *emu) { int i; unsigned char *reg; unsigned int *val; val = emu->saved_ptr; for (reg = saved_regs; *reg != 0xff; reg++) for (i = 0; i < NUM_G; i++, val++) *val = snd_emu10k1_ptr_read(emu, *reg, i); if (emu->audigy) { for (reg = saved_regs_audigy; *reg != 0xff; reg++) for (i = 0; i < NUM_G; i++, val++) *val = snd_emu10k1_ptr_read(emu, *reg, i); } if (emu->audigy) emu->saved_a_iocfg = inl(emu->port + A_IOCFG); emu->saved_hcfg = inl(emu->port + HCFG); } void snd_emu10k1_resume_init(struct snd_emu10k1 *emu) { if (emu->card_capabilities->ca_cardbus_chip) snd_emu10k1_cardbus_init(emu); if (emu->card_capabilities->ecard) snd_emu10k1_ecard_init(emu); else if (emu->card_capabilities->emu_model) snd_emu10k1_emu1010_init(emu); else snd_emu10k1_ptr_write(emu, AC97SLOT, 0, AC97SLOT_CNTR|AC97SLOT_LFE); snd_emu10k1_init(emu, emu->enable_ir, 1); } void snd_emu10k1_resume_regs(struct snd_emu10k1 *emu) { int i; unsigned char *reg; unsigned int *val; snd_emu10k1_audio_enable(emu); /* resore for spdif */ if (emu->audigy) outl(emu->saved_a_iocfg, emu->port + A_IOCFG); outl(emu->saved_hcfg, emu->port + HCFG); val = emu->saved_ptr; for (reg = saved_regs; *reg != 0xff; reg++) for (i = 0; i < NUM_G; i++, val++) snd_emu10k1_ptr_write(emu, *reg, i, *val); if (emu->audigy) { for (reg = saved_regs_audigy; *reg != 0xff; reg++) for (i = 0; i < NUM_G; i++, val++) snd_emu10k1_ptr_write(emu, *reg, i, *val); } } #endif
gpl-2.0
dhacker29/android_kernel_moto_shamu
drivers/cpufreq/dbx500-cpufreq.c
1167
4255
/* * Copyright (C) STMicroelectronics 2009 * Copyright (C) ST-Ericsson SA 2010-2012 * * License Terms: GNU General Public License v2 * Author: Sundar Iyer <sundar.iyer@stericsson.com> * Author: Martin Persson <martin.persson@stericsson.com> * Author: Jonas Aaberg <jonas.aberg@stericsson.com> */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/cpufreq.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/platform_device.h> #include <linux/clk.h> static struct cpufreq_frequency_table *freq_table; static struct clk *armss_clk; static struct freq_attr *dbx500_cpufreq_attr[] = { &cpufreq_freq_attr_scaling_available_freqs, NULL, }; static int dbx500_cpufreq_verify_speed(struct cpufreq_policy *policy) { return cpufreq_frequency_table_verify(policy, freq_table); } static int dbx500_cpufreq_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { struct cpufreq_freqs freqs; unsigned int idx; int ret; /* Lookup the next frequency */ if (cpufreq_frequency_table_target(policy, freq_table, target_freq, relation, &idx)) return -EINVAL; freqs.old = policy->cur; freqs.new = freq_table[idx].frequency; if (freqs.old == freqs.new) return 0; /* pre-change notification */ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); /* update armss clk frequency */ ret = clk_set_rate(armss_clk, freqs.new * 1000); if (ret) { pr_err("dbx500-cpufreq: Failed to set armss_clk to %d Hz: error %d\n", freqs.new * 1000, ret); return ret; } /* post change notification */ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); return 0; } static unsigned int dbx500_cpufreq_getspeed(unsigned int cpu) { int i = 0; unsigned long freq = clk_get_rate(armss_clk) / 1000; /* The value is rounded to closest frequency in the defined table. */ while (freq_table[i + 1].frequency != CPUFREQ_TABLE_END) { if (freq < freq_table[i].frequency + (freq_table[i + 1].frequency - freq_table[i].frequency) / 2) return freq_table[i].frequency; i++; } return freq_table[i].frequency; } static int dbx500_cpufreq_init(struct cpufreq_policy *policy) { int res; /* get policy fields based on the table */ res = cpufreq_frequency_table_cpuinfo(policy, freq_table); if (!res) cpufreq_frequency_table_get_attr(freq_table, policy->cpu); else { pr_err("dbx500-cpufreq: Failed to read policy table\n"); return res; } policy->min = policy->cpuinfo.min_freq; policy->max = policy->cpuinfo.max_freq; policy->cur = dbx500_cpufreq_getspeed(policy->cpu); policy->governor = CPUFREQ_DEFAULT_GOVERNOR; /* * FIXME : Need to take time measurement across the target() * function with no/some/all drivers in the notification * list. */ policy->cpuinfo.transition_latency = 20 * 1000; /* in ns */ /* policy sharing between dual CPUs */ cpumask_setall(policy->cpus); return 0; } static struct cpufreq_driver dbx500_cpufreq_driver = { .flags = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS, .verify = dbx500_cpufreq_verify_speed, .target = dbx500_cpufreq_target, .get = dbx500_cpufreq_getspeed, .init = dbx500_cpufreq_init, .name = "DBX500", .attr = dbx500_cpufreq_attr, }; static int dbx500_cpufreq_probe(struct platform_device *pdev) { int i = 0; freq_table = dev_get_platdata(&pdev->dev); if (!freq_table) { pr_err("dbx500-cpufreq: Failed to fetch cpufreq table\n"); return -ENODEV; } armss_clk = clk_get(&pdev->dev, "armss"); if (IS_ERR(armss_clk)) { pr_err("dbx500-cpufreq: Failed to get armss clk\n"); return PTR_ERR(armss_clk); } pr_info("dbx500-cpufreq: Available frequencies:\n"); while (freq_table[i].frequency != CPUFREQ_TABLE_END) { pr_info(" %d Mhz\n", freq_table[i].frequency/1000); i++; } return cpufreq_register_driver(&dbx500_cpufreq_driver); } static struct platform_driver dbx500_cpufreq_plat_driver = { .driver = { .name = "cpufreq-ux500", .owner = THIS_MODULE, }, .probe = dbx500_cpufreq_probe, }; static int __init dbx500_cpufreq_register(void) { return platform_driver_register(&dbx500_cpufreq_plat_driver); } device_initcall(dbx500_cpufreq_register); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("cpufreq driver for DBX500");
gpl-2.0
jasonzhong/linux
arch/blackfin/mach-common/scb-init.c
2191
1309
/* * arch/blackfin/mach-common/scb-init.c - reprogram system cross bar priority * * Copyright 2012 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ #include <linux/errno.h> #include <linux/kernel.h> #include <asm/scb.h> __attribute__((l1_text)) inline void scb_mi_write(unsigned long scb_mi_arbw, unsigned int slots, unsigned char *scb_mi_prio) { unsigned int i; for (i = 0; i < slots; ++i) bfin_write32(scb_mi_arbw, (i << SCB_SLOT_OFFSET) | scb_mi_prio[i]); } __attribute__((l1_text)) inline void scb_mi_read(unsigned long scb_mi_arbw, unsigned int slots, unsigned char *scb_mi_prio) { unsigned int i; for (i = 0; i < slots; ++i) { bfin_write32(scb_mi_arbw, (0xFF << SCB_SLOT_OFFSET) | i); scb_mi_prio[i] = bfin_read32(scb_mi_arbw); } } __attribute__((l1_text)) void init_scb(void) { unsigned int i, j; unsigned char scb_tmp_prio[32]; pr_info("Init System Crossbar\n"); for (i = 0; scb_data[i].scb_mi_arbr > 0; ++i) { scb_mi_write(scb_data[i].scb_mi_arbw, scb_data[i].scb_mi_slots, scb_data[i].scb_mi_prio); pr_debug("scb priority at 0x%lx:\n", scb_data[i].scb_mi_arbr); scb_mi_read(scb_data[i].scb_mi_arbw, scb_data[i].scb_mi_slots, scb_tmp_prio); for (j = 0; j < scb_data[i].scb_mi_slots; ++j) pr_debug("slot %d = %d\n", j, scb_tmp_prio[j]); } }
gpl-2.0
ModADroid/android-omap-tuna
drivers/staging/generic_serial/rio/rio_linux.c
2703
33335
/* rio_linux.c -- Linux driver for the Specialix RIO series cards. * * * (C) 1999 R.E.Wolff@BitWizard.nl * * Specialix pays for the development and support of this driver. * Please DO contact support@specialix.co.uk if you require * support. But please read the documentation (rio.txt) first. * * * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be * useful, but WITHOUT ANY WARRANTY; without even the implied * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR * PURPOSE. See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the Free * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, * USA. * * */ #include <linux/module.h> #include <linux/kdev_t.h> #include <asm/io.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/errno.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/mm.h> #include <linux/serial.h> #include <linux/fcntl.h> #include <linux/major.h> #include <linux/delay.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/miscdevice.h> #include <linux/init.h> #include <linux/generic_serial.h> #include <asm/uaccess.h> #include "linux_compat.h" #include "pkt.h" #include "daemon.h" #include "rio.h" #include "riospace.h" #include "cmdpkt.h" #include "map.h" #include "rup.h" #include "port.h" #include "riodrvr.h" #include "rioinfo.h" #include "func.h" #include "errors.h" #include "pci.h" #include "parmmap.h" #include "unixrup.h" #include "board.h" #include "host.h" #include "phb.h" #include "link.h" #include "cmdblk.h" #include "route.h" #include "cirrus.h" #include "rioioctl.h" #include "param.h" #include "protsts.h" #include "rioboard.h" #include "rio_linux.h" /* I don't think that this driver can handle more than 512 ports on one machine. Specialix specifies max 4 boards in one machine. I don't know why. If you want to try anyway you'll have to increase the number of boards in rio.h. You'll have to allocate more majors if you need more than 512 ports.... */ #ifndef RIO_NORMAL_MAJOR0 /* This allows overriding on the compiler commandline, or in a "major.h" include or something like that */ #define RIO_NORMAL_MAJOR0 154 #define RIO_NORMAL_MAJOR1 156 #endif #ifndef PCI_DEVICE_ID_SPECIALIX_SX_XIO_IO8 #define PCI_DEVICE_ID_SPECIALIX_SX_XIO_IO8 0x2000 #endif #ifndef RIO_WINDOW_LEN #define RIO_WINDOW_LEN 0x10000 #endif /* Configurable options: (Don't be too sure that it'll work if you toggle them) */ /* Am I paranoid or not ? ;-) */ #undef RIO_PARANOIA_CHECK /* 20 -> 2000 per second. The card should rate-limit interrupts at 1000 Hz, but it is user configurable. I don't recommend going above 1000 Hz. The interrupt ratelimit might trigger if the interrupt is shared with a very active other device. undef this if you want to disable the check.... */ #define IRQ_RATE_LIMIT 200 /* These constants are derived from SCO Source */ static DEFINE_MUTEX(rio_fw_mutex); static struct Conf RIOConf = { /* locator */ "RIO Config here", /* startuptime */ HZ * 2, /* how long to wait for card to run */ /* slowcook */ 0, /* TRUE -> always use line disc. */ /* intrpolltime */ 1, /* The frequency of OUR polls */ /* breakinterval */ 25, /* x10 mS XXX: units seem to be 1ms not 10! -- REW */ /* timer */ 10, /* mS */ /* RtaLoadBase */ 0x7000, /* HostLoadBase */ 0x7C00, /* XpHz */ 5, /* number of Xprint hits per second */ /* XpCps */ 120, /* Xprint characters per second */ /* XpOn */ "\033d#", /* start Xprint for a wyse 60 */ /* XpOff */ "\024", /* end Xprint for a wyse 60 */ /* MaxXpCps */ 2000, /* highest Xprint speed */ /* MinXpCps */ 10, /* slowest Xprint speed */ /* SpinCmds */ 1, /* non-zero for mega fast boots */ /* First Addr */ 0x0A0000, /* First address to look at */ /* Last Addr */ 0xFF0000, /* Last address looked at */ /* BufferSize */ 1024, /* Bytes per port of buffering */ /* LowWater */ 256, /* how much data left before wakeup */ /* LineLength */ 80, /* how wide is the console? */ /* CmdTimeout */ HZ, /* how long a close command may take */ }; /* Function prototypes */ static void rio_disable_tx_interrupts(void *ptr); static void rio_enable_tx_interrupts(void *ptr); static void rio_disable_rx_interrupts(void *ptr); static void rio_enable_rx_interrupts(void *ptr); static int rio_carrier_raised(struct tty_port *port); static void rio_shutdown_port(void *ptr); static int rio_set_real_termios(void *ptr); static void rio_hungup(void *ptr); static void rio_close(void *ptr); static int rio_chars_in_buffer(void *ptr); static long rio_fw_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); static int rio_init_drivers(void); static void my_hd(void *addr, int len); static struct tty_driver *rio_driver, *rio_driver2; /* The name "p" is a bit non-descript. But that's what the rio-lynxos sources use all over the place. */ struct rio_info *p; int rio_debug; /* You can have the driver poll your card. - Set rio_poll to 1 to poll every timer tick (10ms on Intel). This is used when the card cannot use an interrupt for some reason. */ static int rio_poll = 1; /* These are the only open spaces in my computer. Yours may have more or less.... */ static int rio_probe_addrs[] = { 0xc0000, 0xd0000, 0xe0000 }; #define NR_RIO_ADDRS ARRAY_SIZE(rio_probe_addrs) /* Set the mask to all-ones. This alas, only supports 32 interrupts. Some architectures may need more. -- Changed to LONG to support up to 64 bits on 64bit architectures. -- REW 20/06/99 */ static long rio_irqmask = -1; MODULE_AUTHOR("Rogier Wolff <R.E.Wolff@bitwizard.nl>, Patrick van de Lageweg <patrick@bitwizard.nl>"); MODULE_DESCRIPTION("RIO driver"); MODULE_LICENSE("GPL"); module_param(rio_poll, int, 0); module_param(rio_debug, int, 0644); module_param(rio_irqmask, long, 0); static struct real_driver rio_real_driver = { rio_disable_tx_interrupts, rio_enable_tx_interrupts, rio_disable_rx_interrupts, rio_enable_rx_interrupts, rio_shutdown_port, rio_set_real_termios, rio_chars_in_buffer, rio_close, rio_hungup, NULL }; /* * Firmware loader driver specific routines * */ static const struct file_operations rio_fw_fops = { .owner = THIS_MODULE, .unlocked_ioctl = rio_fw_ioctl, .llseek = noop_llseek, }; static struct miscdevice rio_fw_device = { RIOCTL_MISC_MINOR, "rioctl", &rio_fw_fops }; #ifdef RIO_PARANOIA_CHECK /* This doesn't work. Who's paranoid around here? Not me! */ static inline int rio_paranoia_check(struct rio_port const *port, char *name, const char *routine) { static const char *badmagic = KERN_ERR "rio: Warning: bad rio port magic number for device %s in %s\n"; static const char *badinfo = KERN_ERR "rio: Warning: null rio port for device %s in %s\n"; if (!port) { printk(badinfo, name, routine); return 1; } if (port->magic != RIO_MAGIC) { printk(badmagic, name, routine); return 1; } return 0; } #else #define rio_paranoia_check(a,b,c) 0 #endif #ifdef DEBUG static void my_hd(void *ad, int len) { int i, j, ch; unsigned char *addr = ad; for (i = 0; i < len; i += 16) { rio_dprintk(RIO_DEBUG_PARAM, "%08lx ", (unsigned long) addr + i); for (j = 0; j < 16; j++) { rio_dprintk(RIO_DEBUG_PARAM, "%02x %s", addr[j + i], (j == 7) ? " " : ""); } for (j = 0; j < 16; j++) { ch = addr[j + i]; rio_dprintk(RIO_DEBUG_PARAM, "%c", (ch < 0x20) ? '.' : ((ch > 0x7f) ? '.' : ch)); } rio_dprintk(RIO_DEBUG_PARAM, "\n"); } } #else #define my_hd(ad,len) do{/* nothing*/ } while (0) #endif /* Delay a number of jiffies, allowing a signal to interrupt */ int RIODelay(struct Port *PortP, int njiffies) { func_enter(); rio_dprintk(RIO_DEBUG_DELAY, "delaying %d jiffies\n", njiffies); msleep_interruptible(jiffies_to_msecs(njiffies)); func_exit(); if (signal_pending(current)) return RIO_FAIL; else return !RIO_FAIL; } /* Delay a number of jiffies, disallowing a signal to interrupt */ int RIODelay_ni(struct Port *PortP, int njiffies) { func_enter(); rio_dprintk(RIO_DEBUG_DELAY, "delaying %d jiffies (ni)\n", njiffies); msleep(jiffies_to_msecs(njiffies)); func_exit(); return !RIO_FAIL; } void rio_copy_to_card(void *from, void __iomem *to, int len) { rio_copy_toio(to, from, len); } int rio_minor(struct tty_struct *tty) { return tty->index + ((tty->driver == rio_driver) ? 0 : 256); } static int rio_set_real_termios(void *ptr) { return RIOParam((struct Port *) ptr, RIOC_CONFIG, 1, 1); } static void rio_reset_interrupt(struct Host *HostP) { func_enter(); switch (HostP->Type) { case RIO_AT: case RIO_MCA: case RIO_PCI: writeb(0xFF, &HostP->ResetInt); } func_exit(); } static irqreturn_t rio_interrupt(int irq, void *ptr) { struct Host *HostP; func_enter(); HostP = ptr; /* &p->RIOHosts[(long)ptr]; */ rio_dprintk(RIO_DEBUG_IFLOW, "rio: enter rio_interrupt (%d/%d)\n", irq, HostP->Ivec); /* AAargh! The order in which to do these things is essential and not trivial. - hardware twiddling goes before "recursive". Otherwise when we poll the card, and a recursive interrupt happens, we won't ack the card, so it might keep on interrupting us. (especially level sensitive interrupt systems like PCI). - Rate limit goes before hardware twiddling. Otherwise we won't catch a card that has gone bonkers. - The "initialized" test goes after the hardware twiddling. Otherwise the card will stick us in the interrupt routine again. - The initialized test goes before recursive. */ rio_dprintk(RIO_DEBUG_IFLOW, "rio: We've have noticed the interrupt\n"); if (HostP->Ivec == irq) { /* Tell the card we've noticed the interrupt. */ rio_reset_interrupt(HostP); } if ((HostP->Flags & RUN_STATE) != RC_RUNNING) return IRQ_HANDLED; if (test_and_set_bit(RIO_BOARD_INTR_LOCK, &HostP->locks)) { printk(KERN_ERR "Recursive interrupt! (host %p/irq%d)\n", ptr, HostP->Ivec); return IRQ_HANDLED; } RIOServiceHost(p, HostP); rio_dprintk(RIO_DEBUG_IFLOW, "riointr() doing host %p type %d\n", ptr, HostP->Type); clear_bit(RIO_BOARD_INTR_LOCK, &HostP->locks); rio_dprintk(RIO_DEBUG_IFLOW, "rio: exit rio_interrupt (%d/%d)\n", irq, HostP->Ivec); func_exit(); return IRQ_HANDLED; } static void rio_pollfunc(unsigned long data) { func_enter(); rio_interrupt(0, &p->RIOHosts[data]); mod_timer(&p->RIOHosts[data].timer, jiffies + rio_poll); func_exit(); } /* ********************************************************************** * * Here are the routines that actually * * interface with the generic_serial driver * * ********************************************************************** */ /* Ehhm. I don't know how to fiddle with interrupts on the Specialix cards. .... Hmm. Ok I figured it out. You don't. -- REW */ static void rio_disable_tx_interrupts(void *ptr) { func_enter(); /* port->gs.port.flags &= ~GS_TX_INTEN; */ func_exit(); } static void rio_enable_tx_interrupts(void *ptr) { struct Port *PortP = ptr; /* int hn; */ func_enter(); /* hn = PortP->HostP - p->RIOHosts; rio_dprintk (RIO_DEBUG_TTY, "Pushing host %d\n", hn); rio_interrupt (-1,(void *) hn, NULL); */ RIOTxEnable((char *) PortP); /* * In general we cannot count on "tx empty" interrupts, although * the interrupt routine seems to be able to tell the difference. */ PortP->gs.port.flags &= ~GS_TX_INTEN; func_exit(); } static void rio_disable_rx_interrupts(void *ptr) { func_enter(); func_exit(); } static void rio_enable_rx_interrupts(void *ptr) { /* struct rio_port *port = ptr; */ func_enter(); func_exit(); } /* Jeez. Isn't this simple? */ static int rio_carrier_raised(struct tty_port *port) { struct Port *PortP = container_of(port, struct Port, gs.port); int rv; func_enter(); rv = (PortP->ModemState & RIOC_MSVR1_CD) != 0; rio_dprintk(RIO_DEBUG_INIT, "Getting CD status: %d\n", rv); func_exit(); return rv; } /* Jeez. Isn't this simple? Actually, we can sync with the actual port by just pushing stuff into the queue going to the port... */ static int rio_chars_in_buffer(void *ptr) { func_enter(); func_exit(); return 0; } /* Nothing special here... */ static void rio_shutdown_port(void *ptr) { struct Port *PortP; func_enter(); PortP = (struct Port *) ptr; PortP->gs.port.tty = NULL; func_exit(); } /* I haven't the foggiest why the decrement use count has to happen here. The whole linux serial drivers stuff needs to be redesigned. My guess is that this is a hack to minimize the impact of a bug elsewhere. Thinking about it some more. (try it sometime) Try running minicom on a serial port that is driven by a modularized driver. Have the modem hangup. Then remove the driver module. Then exit minicom. I expect an "oops". -- REW */ static void rio_hungup(void *ptr) { struct Port *PortP; func_enter(); PortP = (struct Port *) ptr; PortP->gs.port.tty = NULL; func_exit(); } /* The standard serial_close would become shorter if you'd wrap it like this. rs_close (...){save_flags;cli;real_close();dec_use_count;restore_flags;} */ static void rio_close(void *ptr) { struct Port *PortP; func_enter(); PortP = (struct Port *) ptr; riotclose(ptr); if (PortP->gs.port.count) { printk(KERN_ERR "WARNING port count:%d\n", PortP->gs.port.count); PortP->gs.port.count = 0; } PortP->gs.port.tty = NULL; func_exit(); } static long rio_fw_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { int rc = 0; func_enter(); /* The "dev" argument isn't used. */ mutex_lock(&rio_fw_mutex); rc = riocontrol(p, 0, cmd, arg, capable(CAP_SYS_ADMIN)); mutex_unlock(&rio_fw_mutex); func_exit(); return rc; } extern int RIOShortCommand(struct rio_info *p, struct Port *PortP, int command, int len, int arg); static int rio_ioctl(struct tty_struct *tty, struct file *filp, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; int rc; struct Port *PortP; int ival; func_enter(); PortP = (struct Port *) tty->driver_data; rc = 0; switch (cmd) { case TIOCSSOFTCAR: if ((rc = get_user(ival, (unsigned __user *) argp)) == 0) { tty->termios->c_cflag = (tty->termios->c_cflag & ~CLOCAL) | (ival ? CLOCAL : 0); } break; case TIOCGSERIAL: rc = -EFAULT; if (access_ok(VERIFY_WRITE, argp, sizeof(struct serial_struct))) rc = gs_getserial(&PortP->gs, argp); break; case TCSBRK: if (PortP->State & RIO_DELETED) { rio_dprintk(RIO_DEBUG_TTY, "BREAK on deleted RTA\n"); rc = -EIO; } else { if (RIOShortCommand(p, PortP, RIOC_SBREAK, 2, 250) == RIO_FAIL) { rio_dprintk(RIO_DEBUG_INTR, "SBREAK RIOShortCommand failed\n"); rc = -EIO; } } break; case TCSBRKP: if (PortP->State & RIO_DELETED) { rio_dprintk(RIO_DEBUG_TTY, "BREAK on deleted RTA\n"); rc = -EIO; } else { int l; l = arg ? arg * 100 : 250; if (l > 255) l = 255; if (RIOShortCommand(p, PortP, RIOC_SBREAK, 2, arg ? arg * 100 : 250) == RIO_FAIL) { rio_dprintk(RIO_DEBUG_INTR, "SBREAK RIOShortCommand failed\n"); rc = -EIO; } } break; case TIOCSSERIAL: rc = -EFAULT; if (access_ok(VERIFY_READ, argp, sizeof(struct serial_struct))) rc = gs_setserial(&PortP->gs, argp); break; default: rc = -ENOIOCTLCMD; break; } func_exit(); return rc; } /* The throttle/unthrottle scheme for the Specialix card is different * from other drivers and deserves some explanation. * The Specialix hardware takes care of XON/XOFF * and CTS/RTS flow control itself. This means that all we have to * do when signalled by the upper tty layer to throttle/unthrottle is * to make a note of it here. When we come to read characters from the * rx buffers on the card (rio_receive_chars()) we look to see if the * upper layer can accept more (as noted here in rio_rx_throt[]). * If it can't we simply don't remove chars from the cards buffer. * When the tty layer can accept chars, we again note that here and when * rio_receive_chars() is called it will remove them from the cards buffer. * The card will notice that a ports buffer has drained below some low * water mark and will unflow control the line itself, using whatever * flow control scheme is in use for that port. -- Simon Allen */ static void rio_throttle(struct tty_struct *tty) { struct Port *port = (struct Port *) tty->driver_data; func_enter(); /* If the port is using any type of input flow * control then throttle the port. */ if ((tty->termios->c_cflag & CRTSCTS) || (I_IXOFF(tty))) { port->State |= RIO_THROTTLE_RX; } func_exit(); } static void rio_unthrottle(struct tty_struct *tty) { struct Port *port = (struct Port *) tty->driver_data; func_enter(); /* Always unthrottle even if flow control is not enabled on * this port in case we disabled flow control while the port * was throttled */ port->State &= ~RIO_THROTTLE_RX; func_exit(); return; } /* ********************************************************************** * * Here are the initialization routines. * * ********************************************************************** */ static struct vpd_prom *get_VPD_PROM(struct Host *hp) { static struct vpd_prom vpdp; char *p; int i; func_enter(); rio_dprintk(RIO_DEBUG_PROBE, "Going to verify vpd prom at %p.\n", hp->Caddr + RIO_VPD_ROM); p = (char *) &vpdp; for (i = 0; i < sizeof(struct vpd_prom); i++) *p++ = readb(hp->Caddr + RIO_VPD_ROM + i * 2); /* read_rio_byte (hp, RIO_VPD_ROM + i*2); */ /* Terminate the identifier string. *** requires one extra byte in struct vpd_prom *** */ *p++ = 0; if (rio_debug & RIO_DEBUG_PROBE) my_hd((char *) &vpdp, 0x20); func_exit(); return &vpdp; } static const struct tty_operations rio_ops = { .open = riotopen, .close = gs_close, .write = gs_write, .put_char = gs_put_char, .flush_chars = gs_flush_chars, .write_room = gs_write_room, .chars_in_buffer = gs_chars_in_buffer, .flush_buffer = gs_flush_buffer, .ioctl = rio_ioctl, .throttle = rio_throttle, .unthrottle = rio_unthrottle, .set_termios = gs_set_termios, .stop = gs_stop, .start = gs_start, .hangup = gs_hangup, }; static int rio_init_drivers(void) { int error = -ENOMEM; rio_driver = alloc_tty_driver(256); if (!rio_driver) goto out; rio_driver2 = alloc_tty_driver(256); if (!rio_driver2) goto out1; func_enter(); rio_driver->owner = THIS_MODULE; rio_driver->driver_name = "specialix_rio"; rio_driver->name = "ttySR"; rio_driver->major = RIO_NORMAL_MAJOR0; rio_driver->type = TTY_DRIVER_TYPE_SERIAL; rio_driver->subtype = SERIAL_TYPE_NORMAL; rio_driver->init_termios = tty_std_termios; rio_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL; rio_driver->flags = TTY_DRIVER_REAL_RAW; tty_set_operations(rio_driver, &rio_ops); rio_driver2->owner = THIS_MODULE; rio_driver2->driver_name = "specialix_rio"; rio_driver2->name = "ttySR"; rio_driver2->major = RIO_NORMAL_MAJOR1; rio_driver2->type = TTY_DRIVER_TYPE_SERIAL; rio_driver2->subtype = SERIAL_TYPE_NORMAL; rio_driver2->init_termios = tty_std_termios; rio_driver2->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL; rio_driver2->flags = TTY_DRIVER_REAL_RAW; tty_set_operations(rio_driver2, &rio_ops); rio_dprintk(RIO_DEBUG_INIT, "set_termios = %p\n", gs_set_termios); if ((error = tty_register_driver(rio_driver))) goto out2; if ((error = tty_register_driver(rio_driver2))) goto out3; func_exit(); return 0; out3: tty_unregister_driver(rio_driver); out2: put_tty_driver(rio_driver2); out1: put_tty_driver(rio_driver); out: printk(KERN_ERR "rio: Couldn't register a rio driver, error = %d\n", error); return 1; } static const struct tty_port_operations rio_port_ops = { .carrier_raised = rio_carrier_raised, }; static int rio_init_datastructures(void) { int i; struct Port *port; func_enter(); /* Many drivers statically allocate the maximum number of ports There is no reason not to allocate them dynamically. Is there? -- REW */ /* However, the RIO driver allows users to configure their first RTA as the ports numbered 504-511. We therefore need to allocate the whole range. :-( -- REW */ #define RI_SZ sizeof(struct rio_info) #define HOST_SZ sizeof(struct Host) #define PORT_SZ sizeof(struct Port *) #define TMIO_SZ sizeof(struct termios *) rio_dprintk(RIO_DEBUG_INIT, "getting : %Zd %Zd %Zd %Zd %Zd bytes\n", RI_SZ, RIO_HOSTS * HOST_SZ, RIO_PORTS * PORT_SZ, RIO_PORTS * TMIO_SZ, RIO_PORTS * TMIO_SZ); if (!(p = kzalloc(RI_SZ, GFP_KERNEL))) goto free0; if (!(p->RIOHosts = kzalloc(RIO_HOSTS * HOST_SZ, GFP_KERNEL))) goto free1; if (!(p->RIOPortp = kzalloc(RIO_PORTS * PORT_SZ, GFP_KERNEL))) goto free2; p->RIOConf = RIOConf; rio_dprintk(RIO_DEBUG_INIT, "Got : %p %p %p\n", p, p->RIOHosts, p->RIOPortp); #if 1 for (i = 0; i < RIO_PORTS; i++) { port = p->RIOPortp[i] = kzalloc(sizeof(struct Port), GFP_KERNEL); if (!port) { goto free6; } rio_dprintk(RIO_DEBUG_INIT, "initing port %d (%d)\n", i, port->Mapped); tty_port_init(&port->gs.port); port->gs.port.ops = &rio_port_ops; port->PortNum = i; port->gs.magic = RIO_MAGIC; port->gs.close_delay = HZ / 2; port->gs.closing_wait = 30 * HZ; port->gs.rd = &rio_real_driver; spin_lock_init(&port->portSem); } #else /* We could postpone initializing them to when they are configured. */ #endif if (rio_debug & RIO_DEBUG_INIT) { my_hd(&rio_real_driver, sizeof(rio_real_driver)); } func_exit(); return 0; free6:for (i--; i >= 0; i--) kfree(p->RIOPortp[i]); /*free5: free4: free3:*/ kfree(p->RIOPortp); free2:kfree(p->RIOHosts); free1: rio_dprintk(RIO_DEBUG_INIT, "Not enough memory! %p %p %p\n", p, p->RIOHosts, p->RIOPortp); kfree(p); free0: return -ENOMEM; } static void __exit rio_release_drivers(void) { func_enter(); tty_unregister_driver(rio_driver2); tty_unregister_driver(rio_driver); put_tty_driver(rio_driver2); put_tty_driver(rio_driver); func_exit(); } #ifdef CONFIG_PCI /* This was written for SX, but applies to RIO too... (including bugs....) There is another bit besides Bit 17. Turning that bit off (on boards shipped with the fix in the eeprom) results in a hang on the next access to the card. */ /******************************************************** * Setting bit 17 in the CNTRL register of the PLX 9050 * * chip forces a retry on writes while a read is pending.* * This is to prevent the card locking up on Intel Xeon * * multiprocessor systems with the NX chipset. -- NV * ********************************************************/ /* Newer cards are produced with this bit set from the configuration EEprom. As the bit is read/write for the CPU, we can fix it here, if we detect that it isn't set correctly. -- REW */ static void fix_rio_pci(struct pci_dev *pdev) { unsigned long hwbase; unsigned char __iomem *rebase; unsigned int t; #define CNTRL_REG_OFFSET 0x50 #define CNTRL_REG_GOODVALUE 0x18260000 hwbase = pci_resource_start(pdev, 0); rebase = ioremap(hwbase, 0x80); t = readl(rebase + CNTRL_REG_OFFSET); if (t != CNTRL_REG_GOODVALUE) { printk(KERN_DEBUG "rio: performing cntrl reg fix: %08x -> %08x\n", t, CNTRL_REG_GOODVALUE); writel(CNTRL_REG_GOODVALUE, rebase + CNTRL_REG_OFFSET); } iounmap(rebase); } #endif static int __init rio_init(void) { int found = 0; int i; struct Host *hp; int retval; struct vpd_prom *vpdp; int okboard; #ifdef CONFIG_PCI struct pci_dev *pdev = NULL; unsigned short tshort; #endif func_enter(); rio_dprintk(RIO_DEBUG_INIT, "Initing rio module... (rio_debug=%d)\n", rio_debug); if (abs((long) (&rio_debug) - rio_debug) < 0x10000) { printk(KERN_WARNING "rio: rio_debug is an address, instead of a value. " "Assuming -1. Was %x/%p.\n", rio_debug, &rio_debug); rio_debug = -1; } if (misc_register(&rio_fw_device) < 0) { printk(KERN_ERR "RIO: Unable to register firmware loader driver.\n"); return -EIO; } retval = rio_init_datastructures(); if (retval < 0) { misc_deregister(&rio_fw_device); return retval; } #ifdef CONFIG_PCI /* First look for the JET devices: */ while ((pdev = pci_get_device(PCI_VENDOR_ID_SPECIALIX, PCI_DEVICE_ID_SPECIALIX_SX_XIO_IO8, pdev))) { u32 tint; if (pci_enable_device(pdev)) continue; /* Specialix has a whole bunch of cards with 0x2000 as the device ID. They say its because the standard requires it. Stupid standard. */ /* It seems that reading a word doesn't work reliably on 2.0. Also, reading a non-aligned dword doesn't work. So we read the whole dword at 0x2c and extract the word at 0x2e (SUBSYSTEM_ID) ourselves */ pci_read_config_dword(pdev, 0x2c, &tint); tshort = (tint >> 16) & 0xffff; rio_dprintk(RIO_DEBUG_PROBE, "Got a specialix card: %x.\n", tint); if (tshort != 0x0100) { rio_dprintk(RIO_DEBUG_PROBE, "But it's not a RIO card (%d)...\n", tshort); continue; } rio_dprintk(RIO_DEBUG_PROBE, "cp1\n"); hp = &p->RIOHosts[p->RIONumHosts]; hp->PaddrP = pci_resource_start(pdev, 2); hp->Ivec = pdev->irq; if (((1 << hp->Ivec) & rio_irqmask) == 0) hp->Ivec = 0; hp->Caddr = ioremap(p->RIOHosts[p->RIONumHosts].PaddrP, RIO_WINDOW_LEN); hp->CardP = (struct DpRam __iomem *) hp->Caddr; hp->Type = RIO_PCI; hp->Copy = rio_copy_to_card; hp->Mode = RIO_PCI_BOOT_FROM_RAM; spin_lock_init(&hp->HostLock); rio_reset_interrupt(hp); rio_start_card_running(hp); rio_dprintk(RIO_DEBUG_PROBE, "Going to test it (%p/%p).\n", (void *) p->RIOHosts[p->RIONumHosts].PaddrP, p->RIOHosts[p->RIONumHosts].Caddr); if (RIOBoardTest(p->RIOHosts[p->RIONumHosts].PaddrP, p->RIOHosts[p->RIONumHosts].Caddr, RIO_PCI, 0) == 0) { rio_dprintk(RIO_DEBUG_INIT, "Done RIOBoardTest\n"); writeb(0xFF, &p->RIOHosts[p->RIONumHosts].ResetInt); p->RIOHosts[p->RIONumHosts].UniqueNum = ((readb(&p->RIOHosts[p->RIONumHosts].Unique[0]) & 0xFF) << 0) | ((readb(&p->RIOHosts[p->RIONumHosts].Unique[1]) & 0xFF) << 8) | ((readb(&p->RIOHosts[p->RIONumHosts].Unique[2]) & 0xFF) << 16) | ((readb(&p->RIOHosts[p->RIONumHosts].Unique[3]) & 0xFF) << 24); rio_dprintk(RIO_DEBUG_PROBE, "Hmm Tested ok, uniqid = %x.\n", p->RIOHosts[p->RIONumHosts].UniqueNum); fix_rio_pci(pdev); p->RIOHosts[p->RIONumHosts].pdev = pdev; pci_dev_get(pdev); p->RIOLastPCISearch = 0; p->RIONumHosts++; found++; } else { iounmap(p->RIOHosts[p->RIONumHosts].Caddr); p->RIOHosts[p->RIONumHosts].Caddr = NULL; } } /* Then look for the older PCI card.... : */ /* These older PCI cards have problems (only byte-mode access is supported), which makes them a bit awkward to support. They also have problems sharing interrupts. Be careful. (The driver now refuses to share interrupts for these cards. This should be sufficient). */ /* Then look for the older RIO/PCI devices: */ while ((pdev = pci_get_device(PCI_VENDOR_ID_SPECIALIX, PCI_DEVICE_ID_SPECIALIX_RIO, pdev))) { if (pci_enable_device(pdev)) continue; #ifdef CONFIG_RIO_OLDPCI hp = &p->RIOHosts[p->RIONumHosts]; hp->PaddrP = pci_resource_start(pdev, 0); hp->Ivec = pdev->irq; if (((1 << hp->Ivec) & rio_irqmask) == 0) hp->Ivec = 0; hp->Ivec |= 0x8000; /* Mark as non-sharable */ hp->Caddr = ioremap(p->RIOHosts[p->RIONumHosts].PaddrP, RIO_WINDOW_LEN); hp->CardP = (struct DpRam __iomem *) hp->Caddr; hp->Type = RIO_PCI; hp->Copy = rio_copy_to_card; hp->Mode = RIO_PCI_BOOT_FROM_RAM; spin_lock_init(&hp->HostLock); rio_dprintk(RIO_DEBUG_PROBE, "Ivec: %x\n", hp->Ivec); rio_dprintk(RIO_DEBUG_PROBE, "Mode: %x\n", hp->Mode); rio_reset_interrupt(hp); rio_start_card_running(hp); rio_dprintk(RIO_DEBUG_PROBE, "Going to test it (%p/%p).\n", (void *) p->RIOHosts[p->RIONumHosts].PaddrP, p->RIOHosts[p->RIONumHosts].Caddr); if (RIOBoardTest(p->RIOHosts[p->RIONumHosts].PaddrP, p->RIOHosts[p->RIONumHosts].Caddr, RIO_PCI, 0) == 0) { writeb(0xFF, &p->RIOHosts[p->RIONumHosts].ResetInt); p->RIOHosts[p->RIONumHosts].UniqueNum = ((readb(&p->RIOHosts[p->RIONumHosts].Unique[0]) & 0xFF) << 0) | ((readb(&p->RIOHosts[p->RIONumHosts].Unique[1]) & 0xFF) << 8) | ((readb(&p->RIOHosts[p->RIONumHosts].Unique[2]) & 0xFF) << 16) | ((readb(&p->RIOHosts[p->RIONumHosts].Unique[3]) & 0xFF) << 24); rio_dprintk(RIO_DEBUG_PROBE, "Hmm Tested ok, uniqid = %x.\n", p->RIOHosts[p->RIONumHosts].UniqueNum); p->RIOHosts[p->RIONumHosts].pdev = pdev; pci_dev_get(pdev); p->RIOLastPCISearch = 0; p->RIONumHosts++; found++; } else { iounmap(p->RIOHosts[p->RIONumHosts].Caddr); p->RIOHosts[p->RIONumHosts].Caddr = NULL; } #else printk(KERN_ERR "Found an older RIO PCI card, but the driver is not " "compiled to support it.\n"); #endif } #endif /* PCI */ /* Now probe for ISA cards... */ for (i = 0; i < NR_RIO_ADDRS; i++) { hp = &p->RIOHosts[p->RIONumHosts]; hp->PaddrP = rio_probe_addrs[i]; /* There was something about the IRQs of these cards. 'Forget what.--REW */ hp->Ivec = 0; hp->Caddr = ioremap(p->RIOHosts[p->RIONumHosts].PaddrP, RIO_WINDOW_LEN); hp->CardP = (struct DpRam __iomem *) hp->Caddr; hp->Type = RIO_AT; hp->Copy = rio_copy_to_card; /* AT card PCI???? - PVDL * -- YES! this is now a normal copy. Only the * old PCI card uses the special PCI copy. * Moreover, the ISA card will work with the * special PCI copy anyway. -- REW */ hp->Mode = 0; spin_lock_init(&hp->HostLock); vpdp = get_VPD_PROM(hp); rio_dprintk(RIO_DEBUG_PROBE, "Got VPD ROM\n"); okboard = 0; if ((strncmp(vpdp->identifier, RIO_ISA_IDENT, 16) == 0) || (strncmp(vpdp->identifier, RIO_ISA2_IDENT, 16) == 0) || (strncmp(vpdp->identifier, RIO_ISA3_IDENT, 16) == 0)) { /* Board is present... */ if (RIOBoardTest(hp->PaddrP, hp->Caddr, RIO_AT, 0) == 0) { /* ... and feeling fine!!!! */ rio_dprintk(RIO_DEBUG_PROBE, "Hmm Tested ok, uniqid = %x.\n", p->RIOHosts[p->RIONumHosts].UniqueNum); if (RIOAssignAT(p, hp->PaddrP, hp->Caddr, 0)) { rio_dprintk(RIO_DEBUG_PROBE, "Hmm Tested ok, host%d uniqid = %x.\n", p->RIONumHosts, p->RIOHosts[p->RIONumHosts - 1].UniqueNum); okboard++; found++; } } if (!okboard) { iounmap(hp->Caddr); hp->Caddr = NULL; } } } for (i = 0; i < p->RIONumHosts; i++) { hp = &p->RIOHosts[i]; if (hp->Ivec) { int mode = IRQF_SHARED; if (hp->Ivec & 0x8000) { mode = 0; hp->Ivec &= 0x7fff; } rio_dprintk(RIO_DEBUG_INIT, "Requesting interrupt hp: %p rio_interrupt: %d Mode: %x\n", hp, hp->Ivec, hp->Mode); retval = request_irq(hp->Ivec, rio_interrupt, mode, "rio", hp); rio_dprintk(RIO_DEBUG_INIT, "Return value from request_irq: %d\n", retval); if (retval) { printk(KERN_ERR "rio: Cannot allocate irq %d.\n", hp->Ivec); hp->Ivec = 0; } rio_dprintk(RIO_DEBUG_INIT, "Got irq %d.\n", hp->Ivec); if (hp->Ivec != 0) { rio_dprintk(RIO_DEBUG_INIT, "Enabling interrupts on rio card.\n"); hp->Mode |= RIO_PCI_INT_ENABLE; } else hp->Mode &= ~RIO_PCI_INT_ENABLE; rio_dprintk(RIO_DEBUG_INIT, "New Mode: %x\n", hp->Mode); rio_start_card_running(hp); } /* Init the timer "always" to make sure that it can safely be deleted when we unload... */ setup_timer(&hp->timer, rio_pollfunc, i); if (!hp->Ivec) { rio_dprintk(RIO_DEBUG_INIT, "Starting polling at %dj intervals.\n", rio_poll); mod_timer(&hp->timer, jiffies + rio_poll); } } if (found) { rio_dprintk(RIO_DEBUG_INIT, "rio: total of %d boards detected.\n", found); rio_init_drivers(); } else { /* deregister the misc device we created earlier */ misc_deregister(&rio_fw_device); } func_exit(); return found ? 0 : -EIO; } static void __exit rio_exit(void) { int i; struct Host *hp; func_enter(); for (i = 0, hp = p->RIOHosts; i < p->RIONumHosts; i++, hp++) { RIOHostReset(hp->Type, hp->CardP, hp->Slot); if (hp->Ivec) { free_irq(hp->Ivec, hp); rio_dprintk(RIO_DEBUG_INIT, "freed irq %d.\n", hp->Ivec); } /* It is safe/allowed to del_timer a non-active timer */ del_timer_sync(&hp->timer); if (hp->Caddr) iounmap(hp->Caddr); if (hp->Type == RIO_PCI) pci_dev_put(hp->pdev); } if (misc_deregister(&rio_fw_device) < 0) { printk(KERN_INFO "rio: couldn't deregister control-device\n"); } rio_dprintk(RIO_DEBUG_CLEANUP, "Cleaning up drivers\n"); rio_release_drivers(); /* Release dynamically allocated memory */ kfree(p->RIOPortp); kfree(p->RIOHosts); kfree(p); func_exit(); } module_init(rio_init); module_exit(rio_exit);
gpl-2.0
maxwen/android_kernel_oppo_msm8916
sound/pci/ctxfi/ctmixer.c
2959
30729
/** * Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved. * * This source file is released under GPL v2 license (no other versions). * See the COPYING file included in the main directory of this source * distribution for the license terms and conditions. * * @File ctmixer.c * * @Brief * This file contains the implementation of alsa mixer device functions. * * @Author Liu Chun * @Date May 28 2008 * */ #include "ctmixer.h" #include "ctamixer.h" #include <linux/slab.h> #include <sound/core.h> #include <sound/control.h> #include <sound/asoundef.h> #include <sound/pcm.h> #include <sound/tlv.h> enum CT_SUM_CTL { SUM_IN_F, SUM_IN_R, SUM_IN_C, SUM_IN_S, SUM_IN_F_C, NUM_CT_SUMS }; enum CT_AMIXER_CTL { /* volume control mixers */ AMIXER_MASTER_F, AMIXER_MASTER_R, AMIXER_MASTER_C, AMIXER_MASTER_S, AMIXER_PCM_F, AMIXER_PCM_R, AMIXER_PCM_C, AMIXER_PCM_S, AMIXER_SPDIFI, AMIXER_LINEIN, AMIXER_MIC, AMIXER_SPDIFO, AMIXER_WAVE_F, AMIXER_WAVE_R, AMIXER_WAVE_C, AMIXER_WAVE_S, AMIXER_MASTER_F_C, AMIXER_PCM_F_C, AMIXER_SPDIFI_C, AMIXER_LINEIN_C, AMIXER_MIC_C, /* this should always be the last one */ NUM_CT_AMIXERS }; enum CTALSA_MIXER_CTL { /* volume control mixers */ MIXER_MASTER_P, MIXER_PCM_P, MIXER_LINEIN_P, MIXER_MIC_P, MIXER_SPDIFI_P, MIXER_SPDIFO_P, MIXER_WAVEF_P, MIXER_WAVER_P, MIXER_WAVEC_P, MIXER_WAVES_P, MIXER_MASTER_C, MIXER_PCM_C, MIXER_LINEIN_C, MIXER_MIC_C, MIXER_SPDIFI_C, /* switch control mixers */ MIXER_PCM_C_S, MIXER_LINEIN_C_S, MIXER_MIC_C_S, MIXER_SPDIFI_C_S, MIXER_SPDIFO_P_S, MIXER_WAVEF_P_S, MIXER_WAVER_P_S, MIXER_WAVEC_P_S, MIXER_WAVES_P_S, MIXER_DIGITAL_IO_S, MIXER_IEC958_MASK, MIXER_IEC958_DEFAULT, MIXER_IEC958_STREAM, /* this should always be the last one */ NUM_CTALSA_MIXERS }; #define VOL_MIXER_START MIXER_MASTER_P #define VOL_MIXER_END MIXER_SPDIFI_C #define VOL_MIXER_NUM (VOL_MIXER_END - VOL_MIXER_START + 1) #define SWH_MIXER_START MIXER_PCM_C_S #define SWH_MIXER_END MIXER_DIGITAL_IO_S #define SWH_CAPTURE_START MIXER_PCM_C_S #define SWH_CAPTURE_END MIXER_SPDIFI_C_S #define CHN_NUM 2 struct ct_kcontrol_init { unsigned char ctl; char *name; }; static struct ct_kcontrol_init ct_kcontrol_init_table[NUM_CTALSA_MIXERS] = { [MIXER_MASTER_P] = { .ctl = 1, .name = "Master Playback Volume", }, [MIXER_MASTER_C] = { .ctl = 1, .name = "Master Capture Volume", }, [MIXER_PCM_P] = { .ctl = 1, .name = "PCM Playback Volume", }, [MIXER_PCM_C] = { .ctl = 1, .name = "PCM Capture Volume", }, [MIXER_LINEIN_P] = { .ctl = 1, .name = "Line Playback Volume", }, [MIXER_LINEIN_C] = { .ctl = 1, .name = "Line Capture Volume", }, [MIXER_MIC_P] = { .ctl = 1, .name = "Mic Playback Volume", }, [MIXER_MIC_C] = { .ctl = 1, .name = "Mic Capture Volume", }, [MIXER_SPDIFI_P] = { .ctl = 1, .name = "IEC958 Playback Volume", }, [MIXER_SPDIFI_C] = { .ctl = 1, .name = "IEC958 Capture Volume", }, [MIXER_SPDIFO_P] = { .ctl = 1, .name = "Digital Playback Volume", }, [MIXER_WAVEF_P] = { .ctl = 1, .name = "Front Playback Volume", }, [MIXER_WAVES_P] = { .ctl = 1, .name = "Side Playback Volume", }, [MIXER_WAVEC_P] = { .ctl = 1, .name = "Center/LFE Playback Volume", }, [MIXER_WAVER_P] = { .ctl = 1, .name = "Surround Playback Volume", }, [MIXER_PCM_C_S] = { .ctl = 1, .name = "PCM Capture Switch", }, [MIXER_LINEIN_C_S] = { .ctl = 1, .name = "Line Capture Switch", }, [MIXER_MIC_C_S] = { .ctl = 1, .name = "Mic Capture Switch", }, [MIXER_SPDIFI_C_S] = { .ctl = 1, .name = "IEC958 Capture Switch", }, [MIXER_SPDIFO_P_S] = { .ctl = 1, .name = "Digital Playback Switch", }, [MIXER_WAVEF_P_S] = { .ctl = 1, .name = "Front Playback Switch", }, [MIXER_WAVES_P_S] = { .ctl = 1, .name = "Side Playback Switch", }, [MIXER_WAVEC_P_S] = { .ctl = 1, .name = "Center/LFE Playback Switch", }, [MIXER_WAVER_P_S] = { .ctl = 1, .name = "Surround Playback Switch", }, [MIXER_DIGITAL_IO_S] = { .ctl = 0, .name = "Digit-IO Playback Switch", }, }; static void ct_mixer_recording_select(struct ct_mixer *mixer, enum CT_AMIXER_CTL type); static void ct_mixer_recording_unselect(struct ct_mixer *mixer, enum CT_AMIXER_CTL type); /* FIXME: this static looks like it would fail if more than one card was */ /* installed. */ static struct snd_kcontrol *kctls[2] = {NULL}; static enum CT_AMIXER_CTL get_amixer_index(enum CTALSA_MIXER_CTL alsa_index) { switch (alsa_index) { case MIXER_MASTER_P: return AMIXER_MASTER_F; case MIXER_MASTER_C: return AMIXER_MASTER_F_C; case MIXER_PCM_P: return AMIXER_PCM_F; case MIXER_PCM_C: case MIXER_PCM_C_S: return AMIXER_PCM_F_C; case MIXER_LINEIN_P: return AMIXER_LINEIN; case MIXER_LINEIN_C: case MIXER_LINEIN_C_S: return AMIXER_LINEIN_C; case MIXER_MIC_P: return AMIXER_MIC; case MIXER_MIC_C: case MIXER_MIC_C_S: return AMIXER_MIC_C; case MIXER_SPDIFI_P: return AMIXER_SPDIFI; case MIXER_SPDIFI_C: case MIXER_SPDIFI_C_S: return AMIXER_SPDIFI_C; case MIXER_SPDIFO_P: return AMIXER_SPDIFO; case MIXER_WAVEF_P: return AMIXER_WAVE_F; case MIXER_WAVES_P: return AMIXER_WAVE_S; case MIXER_WAVEC_P: return AMIXER_WAVE_C; case MIXER_WAVER_P: return AMIXER_WAVE_R; default: return NUM_CT_AMIXERS; } } static enum CT_AMIXER_CTL get_recording_amixer(enum CT_AMIXER_CTL index) { switch (index) { case AMIXER_MASTER_F: return AMIXER_MASTER_F_C; case AMIXER_PCM_F: return AMIXER_PCM_F_C; case AMIXER_SPDIFI: return AMIXER_SPDIFI_C; case AMIXER_LINEIN: return AMIXER_LINEIN_C; case AMIXER_MIC: return AMIXER_MIC_C; default: return NUM_CT_AMIXERS; } } static unsigned char get_switch_state(struct ct_mixer *mixer, enum CTALSA_MIXER_CTL type) { return (mixer->switch_state & (0x1 << (type - SWH_MIXER_START))) ? 1 : 0; } static void set_switch_state(struct ct_mixer *mixer, enum CTALSA_MIXER_CTL type, unsigned char state) { if (state) mixer->switch_state |= (0x1 << (type - SWH_MIXER_START)); else mixer->switch_state &= ~(0x1 << (type - SWH_MIXER_START)); } #if 0 /* not used */ /* Map integer value ranging from 0 to 65535 to 14-bit float value ranging * from 2^-6 to (1+1023/1024) */ static unsigned int uint16_to_float14(unsigned int x) { unsigned int i; if (x < 17) return 0; x *= 2031; x /= 65535; x += 16; /* i <= 6 */ for (i = 0; !(x & 0x400); i++) x <<= 1; x = (((7 - i) & 0x7) << 10) | (x & 0x3ff); return x; } static unsigned int float14_to_uint16(unsigned int x) { unsigned int e; if (!x) return x; e = (x >> 10) & 0x7; x &= 0x3ff; x += 1024; x >>= (7 - e); x -= 16; x *= 65535; x /= 2031; return x; } #endif /* not used */ #define VOL_SCALE 0x1c #define VOL_MAX 0x100 static const DECLARE_TLV_DB_SCALE(ct_vol_db_scale, -6400, 25, 1); static int ct_alsa_mix_volume_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = VOL_MAX; return 0; } static int ct_alsa_mix_volume_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct ct_atc *atc = snd_kcontrol_chip(kcontrol); enum CT_AMIXER_CTL type = get_amixer_index(kcontrol->private_value); struct amixer *amixer; int i, val; for (i = 0; i < 2; i++) { amixer = ((struct ct_mixer *)atc->mixer)-> amixers[type*CHN_NUM+i]; val = amixer->ops->get_scale(amixer) / VOL_SCALE; if (val < 0) val = 0; else if (val > VOL_MAX) val = VOL_MAX; ucontrol->value.integer.value[i] = val; } return 0; } static int ct_alsa_mix_volume_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct ct_atc *atc = snd_kcontrol_chip(kcontrol); struct ct_mixer *mixer = atc->mixer; enum CT_AMIXER_CTL type = get_amixer_index(kcontrol->private_value); struct amixer *amixer; int i, j, val, oval, change = 0; for (i = 0; i < 2; i++) { val = ucontrol->value.integer.value[i]; if (val < 0) val = 0; else if (val > VOL_MAX) val = VOL_MAX; val *= VOL_SCALE; amixer = mixer->amixers[type*CHN_NUM+i]; oval = amixer->ops->get_scale(amixer); if (val != oval) { amixer->ops->set_scale(amixer, val); amixer->ops->commit_write(amixer); change = 1; /* Synchronize Master/PCM playback AMIXERs. */ if (AMIXER_MASTER_F == type || AMIXER_PCM_F == type) { for (j = 1; j < 4; j++) { amixer = mixer-> amixers[(type+j)*CHN_NUM+i]; amixer->ops->set_scale(amixer, val); amixer->ops->commit_write(amixer); } } } } return change; } static struct snd_kcontrol_new vol_ctl = { .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ, .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .info = ct_alsa_mix_volume_info, .get = ct_alsa_mix_volume_get, .put = ct_alsa_mix_volume_put, .tlv = { .p = ct_vol_db_scale }, }; static int output_switch_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *info) { static const char *const names[3] = { "FP Headphones", "Headphones", "Speakers" }; return snd_ctl_enum_info(info, 1, 3, names); } static int output_switch_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct ct_atc *atc = snd_kcontrol_chip(kcontrol); ucontrol->value.enumerated.item[0] = atc->output_switch_get(atc); return 0; } static int output_switch_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct ct_atc *atc = snd_kcontrol_chip(kcontrol); if (ucontrol->value.enumerated.item[0] > 2) return -EINVAL; return atc->output_switch_put(atc, ucontrol->value.enumerated.item[0]); } static struct snd_kcontrol_new output_ctl = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Analog Output Playback Enum", .info = output_switch_info, .get = output_switch_get, .put = output_switch_put, }; static int mic_source_switch_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *info) { static const char *const names[3] = { "Mic", "FP Mic", "Aux" }; return snd_ctl_enum_info(info, 1, 3, names); } static int mic_source_switch_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct ct_atc *atc = snd_kcontrol_chip(kcontrol); ucontrol->value.enumerated.item[0] = atc->mic_source_switch_get(atc); return 0; } static int mic_source_switch_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct ct_atc *atc = snd_kcontrol_chip(kcontrol); if (ucontrol->value.enumerated.item[0] > 2) return -EINVAL; return atc->mic_source_switch_put(atc, ucontrol->value.enumerated.item[0]); } static struct snd_kcontrol_new mic_source_ctl = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Mic Source Capture Enum", .info = mic_source_switch_info, .get = mic_source_switch_get, .put = mic_source_switch_put, }; static void do_line_mic_switch(struct ct_atc *atc, enum CTALSA_MIXER_CTL type) { if (MIXER_LINEIN_C_S == type) { atc->select_line_in(atc); set_switch_state(atc->mixer, MIXER_MIC_C_S, 0); snd_ctl_notify(atc->card, SNDRV_CTL_EVENT_MASK_VALUE, &kctls[1]->id); } else if (MIXER_MIC_C_S == type) { atc->select_mic_in(atc); set_switch_state(atc->mixer, MIXER_LINEIN_C_S, 0); snd_ctl_notify(atc->card, SNDRV_CTL_EVENT_MASK_VALUE, &kctls[0]->id); } } static void do_digit_io_switch(struct ct_atc *atc, int state) { struct ct_mixer *mixer = atc->mixer; if (state) { atc->select_digit_io(atc); atc->spdif_out_unmute(atc, get_switch_state(mixer, MIXER_SPDIFO_P_S)); atc->spdif_in_unmute(atc, 1); atc->line_in_unmute(atc, 0); return; } if (get_switch_state(mixer, MIXER_LINEIN_C_S)) atc->select_line_in(atc); else if (get_switch_state(mixer, MIXER_MIC_C_S)) atc->select_mic_in(atc); atc->spdif_out_unmute(atc, 0); atc->spdif_in_unmute(atc, 0); atc->line_in_unmute(atc, 1); return; } static void do_switch(struct ct_atc *atc, enum CTALSA_MIXER_CTL type, int state) { struct ct_mixer *mixer = atc->mixer; struct capabilities cap = atc->capabilities(atc); /* Do changes in mixer. */ if ((SWH_CAPTURE_START <= type) && (SWH_CAPTURE_END >= type)) { if (state) { ct_mixer_recording_select(mixer, get_amixer_index(type)); } else { ct_mixer_recording_unselect(mixer, get_amixer_index(type)); } } /* Do changes out of mixer. */ if (!cap.dedicated_mic && (MIXER_LINEIN_C_S == type || MIXER_MIC_C_S == type)) { if (state) do_line_mic_switch(atc, type); atc->line_in_unmute(atc, state); } else if (cap.dedicated_mic && (MIXER_LINEIN_C_S == type)) atc->line_in_unmute(atc, state); else if (cap.dedicated_mic && (MIXER_MIC_C_S == type)) atc->mic_unmute(atc, state); else if (MIXER_SPDIFI_C_S == type) atc->spdif_in_unmute(atc, state); else if (MIXER_WAVEF_P_S == type) atc->line_front_unmute(atc, state); else if (MIXER_WAVES_P_S == type) atc->line_surround_unmute(atc, state); else if (MIXER_WAVEC_P_S == type) atc->line_clfe_unmute(atc, state); else if (MIXER_WAVER_P_S == type) atc->line_rear_unmute(atc, state); else if (MIXER_SPDIFO_P_S == type) atc->spdif_out_unmute(atc, state); else if (MIXER_DIGITAL_IO_S == type) do_digit_io_switch(atc, state); return; } static int ct_alsa_mix_switch_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = 1; uinfo->value.integer.step = 1; return 0; } static int ct_alsa_mix_switch_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct ct_mixer *mixer = ((struct ct_atc *)snd_kcontrol_chip(kcontrol))->mixer; enum CTALSA_MIXER_CTL type = kcontrol->private_value; ucontrol->value.integer.value[0] = get_switch_state(mixer, type); return 0; } static int ct_alsa_mix_switch_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct ct_atc *atc = snd_kcontrol_chip(kcontrol); struct ct_mixer *mixer = atc->mixer; enum CTALSA_MIXER_CTL type = kcontrol->private_value; int state; state = ucontrol->value.integer.value[0]; if (get_switch_state(mixer, type) == state) return 0; set_switch_state(mixer, type, state); do_switch(atc, type, state); return 1; } static struct snd_kcontrol_new swh_ctl = { .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .info = ct_alsa_mix_switch_info, .get = ct_alsa_mix_switch_get, .put = ct_alsa_mix_switch_put }; static int ct_spdif_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958; uinfo->count = 1; return 0; } static int ct_spdif_get_mask(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { ucontrol->value.iec958.status[0] = 0xff; ucontrol->value.iec958.status[1] = 0xff; ucontrol->value.iec958.status[2] = 0xff; ucontrol->value.iec958.status[3] = 0xff; return 0; } static int ct_spdif_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct ct_atc *atc = snd_kcontrol_chip(kcontrol); unsigned int status; atc->spdif_out_get_status(atc, &status); if (status == 0) status = SNDRV_PCM_DEFAULT_CON_SPDIF; ucontrol->value.iec958.status[0] = (status >> 0) & 0xff; ucontrol->value.iec958.status[1] = (status >> 8) & 0xff; ucontrol->value.iec958.status[2] = (status >> 16) & 0xff; ucontrol->value.iec958.status[3] = (status >> 24) & 0xff; return 0; } static int ct_spdif_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct ct_atc *atc = snd_kcontrol_chip(kcontrol); int change; unsigned int status, old_status; status = (ucontrol->value.iec958.status[0] << 0) | (ucontrol->value.iec958.status[1] << 8) | (ucontrol->value.iec958.status[2] << 16) | (ucontrol->value.iec958.status[3] << 24); atc->spdif_out_get_status(atc, &old_status); change = (old_status != status); if (change) atc->spdif_out_set_status(atc, status); return change; } static struct snd_kcontrol_new iec958_mask_ctl = { .access = SNDRV_CTL_ELEM_ACCESS_READ, .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, MASK), .count = 1, .info = ct_spdif_info, .get = ct_spdif_get_mask, .private_value = MIXER_IEC958_MASK }; static struct snd_kcontrol_new iec958_default_ctl = { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, DEFAULT), .count = 1, .info = ct_spdif_info, .get = ct_spdif_get, .put = ct_spdif_put, .private_value = MIXER_IEC958_DEFAULT }; static struct snd_kcontrol_new iec958_ctl = { .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, PCM_STREAM), .count = 1, .info = ct_spdif_info, .get = ct_spdif_get, .put = ct_spdif_put, .private_value = MIXER_IEC958_STREAM }; #define NUM_IEC958_CTL 3 static int ct_mixer_kcontrol_new(struct ct_mixer *mixer, struct snd_kcontrol_new *new) { struct snd_kcontrol *kctl; int err; kctl = snd_ctl_new1(new, mixer->atc); if (!kctl) return -ENOMEM; if (SNDRV_CTL_ELEM_IFACE_PCM == kctl->id.iface) kctl->id.device = IEC958; err = snd_ctl_add(mixer->atc->card, kctl); if (err) return err; switch (new->private_value) { case MIXER_LINEIN_C_S: kctls[0] = kctl; break; case MIXER_MIC_C_S: kctls[1] = kctl; break; default: break; } return 0; } static int ct_mixer_kcontrols_create(struct ct_mixer *mixer) { enum CTALSA_MIXER_CTL type; struct ct_atc *atc = mixer->atc; struct capabilities cap = atc->capabilities(atc); int err; /* Create snd kcontrol instances on demand */ for (type = VOL_MIXER_START; type <= VOL_MIXER_END; type++) { if (ct_kcontrol_init_table[type].ctl) { vol_ctl.name = ct_kcontrol_init_table[type].name; vol_ctl.private_value = (unsigned long)type; err = ct_mixer_kcontrol_new(mixer, &vol_ctl); if (err) return err; } } ct_kcontrol_init_table[MIXER_DIGITAL_IO_S].ctl = cap.digit_io_switch; for (type = SWH_MIXER_START; type <= SWH_MIXER_END; type++) { if (ct_kcontrol_init_table[type].ctl) { swh_ctl.name = ct_kcontrol_init_table[type].name; swh_ctl.private_value = (unsigned long)type; err = ct_mixer_kcontrol_new(mixer, &swh_ctl); if (err) return err; } } err = ct_mixer_kcontrol_new(mixer, &iec958_mask_ctl); if (err) return err; err = ct_mixer_kcontrol_new(mixer, &iec958_default_ctl); if (err) return err; err = ct_mixer_kcontrol_new(mixer, &iec958_ctl); if (err) return err; if (cap.output_switch) { err = ct_mixer_kcontrol_new(mixer, &output_ctl); if (err) return err; } if (cap.mic_source_switch) { err = ct_mixer_kcontrol_new(mixer, &mic_source_ctl); if (err) return err; } atc->line_front_unmute(atc, 1); set_switch_state(mixer, MIXER_WAVEF_P_S, 1); atc->line_surround_unmute(atc, 0); set_switch_state(mixer, MIXER_WAVES_P_S, 0); atc->line_clfe_unmute(atc, 0); set_switch_state(mixer, MIXER_WAVEC_P_S, 0); atc->line_rear_unmute(atc, 0); set_switch_state(mixer, MIXER_WAVER_P_S, 0); atc->spdif_out_unmute(atc, 0); set_switch_state(mixer, MIXER_SPDIFO_P_S, 0); atc->line_in_unmute(atc, 0); if (cap.dedicated_mic) atc->mic_unmute(atc, 0); atc->spdif_in_unmute(atc, 0); set_switch_state(mixer, MIXER_PCM_C_S, 0); set_switch_state(mixer, MIXER_LINEIN_C_S, 0); set_switch_state(mixer, MIXER_SPDIFI_C_S, 0); return 0; } static void ct_mixer_recording_select(struct ct_mixer *mixer, enum CT_AMIXER_CTL type) { struct amixer *amix_d; struct sum *sum_c; int i; for (i = 0; i < 2; i++) { amix_d = mixer->amixers[type*CHN_NUM+i]; sum_c = mixer->sums[SUM_IN_F_C*CHN_NUM+i]; amix_d->ops->set_sum(amix_d, sum_c); amix_d->ops->commit_write(amix_d); } } static void ct_mixer_recording_unselect(struct ct_mixer *mixer, enum CT_AMIXER_CTL type) { struct amixer *amix_d; int i; for (i = 0; i < 2; i++) { amix_d = mixer->amixers[type*CHN_NUM+i]; amix_d->ops->set_sum(amix_d, NULL); amix_d->ops->commit_write(amix_d); } } static int ct_mixer_get_resources(struct ct_mixer *mixer) { struct sum_mgr *sum_mgr; struct sum *sum; struct sum_desc sum_desc = {0}; struct amixer_mgr *amixer_mgr; struct amixer *amixer; struct amixer_desc am_desc = {0}; int err; int i; /* Allocate sum resources for mixer obj */ sum_mgr = (struct sum_mgr *)mixer->atc->rsc_mgrs[SUM]; sum_desc.msr = mixer->atc->msr; for (i = 0; i < (NUM_CT_SUMS * CHN_NUM); i++) { err = sum_mgr->get_sum(sum_mgr, &sum_desc, &sum); if (err) { printk(KERN_ERR "ctxfi:Failed to get sum resources for " "front output!\n"); break; } mixer->sums[i] = sum; } if (err) goto error1; /* Allocate amixer resources for mixer obj */ amixer_mgr = (struct amixer_mgr *)mixer->atc->rsc_mgrs[AMIXER]; am_desc.msr = mixer->atc->msr; for (i = 0; i < (NUM_CT_AMIXERS * CHN_NUM); i++) { err = amixer_mgr->get_amixer(amixer_mgr, &am_desc, &amixer); if (err) { printk(KERN_ERR "ctxfi:Failed to get amixer resources " "for mixer obj!\n"); break; } mixer->amixers[i] = amixer; } if (err) goto error2; return 0; error2: for (i = 0; i < (NUM_CT_AMIXERS * CHN_NUM); i++) { if (NULL != mixer->amixers[i]) { amixer = mixer->amixers[i]; amixer_mgr->put_amixer(amixer_mgr, amixer); mixer->amixers[i] = NULL; } } error1: for (i = 0; i < (NUM_CT_SUMS * CHN_NUM); i++) { if (NULL != mixer->sums[i]) { sum_mgr->put_sum(sum_mgr, (struct sum *)mixer->sums[i]); mixer->sums[i] = NULL; } } return err; } static int ct_mixer_get_mem(struct ct_mixer **rmixer) { struct ct_mixer *mixer; int err; *rmixer = NULL; /* Allocate mem for mixer obj */ mixer = kzalloc(sizeof(*mixer), GFP_KERNEL); if (!mixer) return -ENOMEM; mixer->amixers = kzalloc(sizeof(void *)*(NUM_CT_AMIXERS*CHN_NUM), GFP_KERNEL); if (!mixer->amixers) { err = -ENOMEM; goto error1; } mixer->sums = kzalloc(sizeof(void *)*(NUM_CT_SUMS*CHN_NUM), GFP_KERNEL); if (!mixer->sums) { err = -ENOMEM; goto error2; } *rmixer = mixer; return 0; error2: kfree(mixer->amixers); error1: kfree(mixer); return err; } static int ct_mixer_topology_build(struct ct_mixer *mixer) { struct sum *sum; struct amixer *amix_d, *amix_s; enum CT_AMIXER_CTL i, j; /* Build topology from destination to source */ /* Set up Master mixer */ for (i = AMIXER_MASTER_F, j = SUM_IN_F; i <= AMIXER_MASTER_S; i++, j++) { amix_d = mixer->amixers[i*CHN_NUM]; sum = mixer->sums[j*CHN_NUM]; amix_d->ops->setup(amix_d, &sum->rsc, INIT_VOL, NULL); amix_d = mixer->amixers[i*CHN_NUM+1]; sum = mixer->sums[j*CHN_NUM+1]; amix_d->ops->setup(amix_d, &sum->rsc, INIT_VOL, NULL); } /* Set up Wave-out mixer */ for (i = AMIXER_WAVE_F, j = AMIXER_MASTER_F; i <= AMIXER_WAVE_S; i++, j++) { amix_d = mixer->amixers[i*CHN_NUM]; amix_s = mixer->amixers[j*CHN_NUM]; amix_d->ops->setup(amix_d, &amix_s->rsc, INIT_VOL, NULL); amix_d = mixer->amixers[i*CHN_NUM+1]; amix_s = mixer->amixers[j*CHN_NUM+1]; amix_d->ops->setup(amix_d, &amix_s->rsc, INIT_VOL, NULL); } /* Set up S/PDIF-out mixer */ amix_d = mixer->amixers[AMIXER_SPDIFO*CHN_NUM]; amix_s = mixer->amixers[AMIXER_MASTER_F*CHN_NUM]; amix_d->ops->setup(amix_d, &amix_s->rsc, INIT_VOL, NULL); amix_d = mixer->amixers[AMIXER_SPDIFO*CHN_NUM+1]; amix_s = mixer->amixers[AMIXER_MASTER_F*CHN_NUM+1]; amix_d->ops->setup(amix_d, &amix_s->rsc, INIT_VOL, NULL); /* Set up PCM-in mixer */ for (i = AMIXER_PCM_F, j = SUM_IN_F; i <= AMIXER_PCM_S; i++, j++) { amix_d = mixer->amixers[i*CHN_NUM]; sum = mixer->sums[j*CHN_NUM]; amix_d->ops->setup(amix_d, NULL, INIT_VOL, sum); amix_d = mixer->amixers[i*CHN_NUM+1]; sum = mixer->sums[j*CHN_NUM+1]; amix_d->ops->setup(amix_d, NULL, INIT_VOL, sum); } /* Set up Line-in mixer */ amix_d = mixer->amixers[AMIXER_LINEIN*CHN_NUM]; sum = mixer->sums[SUM_IN_F*CHN_NUM]; amix_d->ops->setup(amix_d, NULL, INIT_VOL, sum); amix_d = mixer->amixers[AMIXER_LINEIN*CHN_NUM+1]; sum = mixer->sums[SUM_IN_F*CHN_NUM+1]; amix_d->ops->setup(amix_d, NULL, INIT_VOL, sum); /* Set up Mic-in mixer */ amix_d = mixer->amixers[AMIXER_MIC*CHN_NUM]; sum = mixer->sums[SUM_IN_F*CHN_NUM]; amix_d->ops->setup(amix_d, NULL, INIT_VOL, sum); amix_d = mixer->amixers[AMIXER_MIC*CHN_NUM+1]; sum = mixer->sums[SUM_IN_F*CHN_NUM+1]; amix_d->ops->setup(amix_d, NULL, INIT_VOL, sum); /* Set up S/PDIF-in mixer */ amix_d = mixer->amixers[AMIXER_SPDIFI*CHN_NUM]; sum = mixer->sums[SUM_IN_F*CHN_NUM]; amix_d->ops->setup(amix_d, NULL, INIT_VOL, sum); amix_d = mixer->amixers[AMIXER_SPDIFI*CHN_NUM+1]; sum = mixer->sums[SUM_IN_F*CHN_NUM+1]; amix_d->ops->setup(amix_d, NULL, INIT_VOL, sum); /* Set up Master recording mixer */ amix_d = mixer->amixers[AMIXER_MASTER_F_C*CHN_NUM]; sum = mixer->sums[SUM_IN_F_C*CHN_NUM]; amix_d->ops->setup(amix_d, &sum->rsc, INIT_VOL, NULL); amix_d = mixer->amixers[AMIXER_MASTER_F_C*CHN_NUM+1]; sum = mixer->sums[SUM_IN_F_C*CHN_NUM+1]; amix_d->ops->setup(amix_d, &sum->rsc, INIT_VOL, NULL); /* Set up PCM-in recording mixer */ amix_d = mixer->amixers[AMIXER_PCM_F_C*CHN_NUM]; sum = mixer->sums[SUM_IN_F_C*CHN_NUM]; amix_d->ops->setup(amix_d, NULL, INIT_VOL, sum); amix_d = mixer->amixers[AMIXER_PCM_F_C*CHN_NUM+1]; sum = mixer->sums[SUM_IN_F_C*CHN_NUM+1]; amix_d->ops->setup(amix_d, NULL, INIT_VOL, sum); /* Set up Line-in recording mixer */ amix_d = mixer->amixers[AMIXER_LINEIN_C*CHN_NUM]; sum = mixer->sums[SUM_IN_F_C*CHN_NUM]; amix_d->ops->setup(amix_d, NULL, INIT_VOL, sum); amix_d = mixer->amixers[AMIXER_LINEIN_C*CHN_NUM+1]; sum = mixer->sums[SUM_IN_F_C*CHN_NUM+1]; amix_d->ops->setup(amix_d, NULL, INIT_VOL, sum); /* Set up Mic-in recording mixer */ amix_d = mixer->amixers[AMIXER_MIC_C*CHN_NUM]; sum = mixer->sums[SUM_IN_F_C*CHN_NUM]; amix_d->ops->setup(amix_d, NULL, INIT_VOL, sum); amix_d = mixer->amixers[AMIXER_MIC_C*CHN_NUM+1]; sum = mixer->sums[SUM_IN_F_C*CHN_NUM+1]; amix_d->ops->setup(amix_d, NULL, INIT_VOL, sum); /* Set up S/PDIF-in recording mixer */ amix_d = mixer->amixers[AMIXER_SPDIFI_C*CHN_NUM]; sum = mixer->sums[SUM_IN_F_C*CHN_NUM]; amix_d->ops->setup(amix_d, NULL, INIT_VOL, sum); amix_d = mixer->amixers[AMIXER_SPDIFI_C*CHN_NUM+1]; sum = mixer->sums[SUM_IN_F_C*CHN_NUM+1]; amix_d->ops->setup(amix_d, NULL, INIT_VOL, sum); return 0; } static int mixer_set_input_port(struct amixer *amixer, struct rsc *rsc) { amixer->ops->set_input(amixer, rsc); amixer->ops->commit_write(amixer); return 0; } static enum CT_AMIXER_CTL port_to_amixer(enum MIXER_PORT_T type) { switch (type) { case MIX_WAVE_FRONT: return AMIXER_WAVE_F; case MIX_WAVE_SURROUND: return AMIXER_WAVE_S; case MIX_WAVE_CENTLFE: return AMIXER_WAVE_C; case MIX_WAVE_REAR: return AMIXER_WAVE_R; case MIX_PCMO_FRONT: return AMIXER_MASTER_F_C; case MIX_SPDIF_OUT: return AMIXER_SPDIFO; case MIX_LINE_IN: return AMIXER_LINEIN; case MIX_MIC_IN: return AMIXER_MIC; case MIX_SPDIF_IN: return AMIXER_SPDIFI; case MIX_PCMI_FRONT: return AMIXER_PCM_F; case MIX_PCMI_SURROUND: return AMIXER_PCM_S; case MIX_PCMI_CENTLFE: return AMIXER_PCM_C; case MIX_PCMI_REAR: return AMIXER_PCM_R; default: return 0; } } static int mixer_get_output_ports(struct ct_mixer *mixer, enum MIXER_PORT_T type, struct rsc **rleft, struct rsc **rright) { enum CT_AMIXER_CTL amix = port_to_amixer(type); if (NULL != rleft) *rleft = &((struct amixer *)mixer->amixers[amix*CHN_NUM])->rsc; if (NULL != rright) *rright = &((struct amixer *)mixer->amixers[amix*CHN_NUM+1])->rsc; return 0; } static int mixer_set_input_left(struct ct_mixer *mixer, enum MIXER_PORT_T type, struct rsc *rsc) { enum CT_AMIXER_CTL amix = port_to_amixer(type); mixer_set_input_port(mixer->amixers[amix*CHN_NUM], rsc); amix = get_recording_amixer(amix); if (amix < NUM_CT_AMIXERS) mixer_set_input_port(mixer->amixers[amix*CHN_NUM], rsc); return 0; } static int mixer_set_input_right(struct ct_mixer *mixer, enum MIXER_PORT_T type, struct rsc *rsc) { enum CT_AMIXER_CTL amix = port_to_amixer(type); mixer_set_input_port(mixer->amixers[amix*CHN_NUM+1], rsc); amix = get_recording_amixer(amix); if (amix < NUM_CT_AMIXERS) mixer_set_input_port(mixer->amixers[amix*CHN_NUM+1], rsc); return 0; } #ifdef CONFIG_PM_SLEEP static int mixer_resume(struct ct_mixer *mixer) { int i, state; struct amixer *amixer; /* resume topology and volume gain. */ for (i = 0; i < NUM_CT_AMIXERS*CHN_NUM; i++) { amixer = mixer->amixers[i]; amixer->ops->commit_write(amixer); } /* resume switch state. */ for (i = SWH_MIXER_START; i <= SWH_MIXER_END; i++) { state = get_switch_state(mixer, i); do_switch(mixer->atc, i, state); } return 0; } #endif int ct_mixer_destroy(struct ct_mixer *mixer) { struct sum_mgr *sum_mgr = (struct sum_mgr *)mixer->atc->rsc_mgrs[SUM]; struct amixer_mgr *amixer_mgr = (struct amixer_mgr *)mixer->atc->rsc_mgrs[AMIXER]; struct amixer *amixer; int i = 0; /* Release amixer resources */ for (i = 0; i < (NUM_CT_AMIXERS * CHN_NUM); i++) { if (NULL != mixer->amixers[i]) { amixer = mixer->amixers[i]; amixer_mgr->put_amixer(amixer_mgr, amixer); } } /* Release sum resources */ for (i = 0; i < (NUM_CT_SUMS * CHN_NUM); i++) { if (NULL != mixer->sums[i]) sum_mgr->put_sum(sum_mgr, (struct sum *)mixer->sums[i]); } /* Release mem assigned to mixer object */ kfree(mixer->sums); kfree(mixer->amixers); kfree(mixer); return 0; } int ct_mixer_create(struct ct_atc *atc, struct ct_mixer **rmixer) { struct ct_mixer *mixer; int err; *rmixer = NULL; /* Allocate mem for mixer obj */ err = ct_mixer_get_mem(&mixer); if (err) return err; mixer->switch_state = 0; mixer->atc = atc; /* Set operations */ mixer->get_output_ports = mixer_get_output_ports; mixer->set_input_left = mixer_set_input_left; mixer->set_input_right = mixer_set_input_right; #ifdef CONFIG_PM_SLEEP mixer->resume = mixer_resume; #endif /* Allocate chip resources for mixer obj */ err = ct_mixer_get_resources(mixer); if (err) goto error; /* Build internal mixer topology */ ct_mixer_topology_build(mixer); *rmixer = mixer; return 0; error: ct_mixer_destroy(mixer); return err; } int ct_alsa_mix_create(struct ct_atc *atc, enum CTALSADEVS device, const char *device_name) { int err; /* Create snd kcontrol instances on demand */ /* vol_ctl.device = swh_ctl.device = device; */ /* better w/ device 0 */ err = ct_mixer_kcontrols_create((struct ct_mixer *)atc->mixer); if (err) return err; strcpy(atc->card->mixername, device_name); return 0; }
gpl-2.0
luckpizza/n8000-kernel-aufs
arch/ia64/kernel/paravirt.c
3983
26380
/****************************************************************************** * arch/ia64/kernel/paravirt.c * * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> * VA Linux Systems Japan K.K. * Yaozu (Eddie) Dong <eddie.dong@intel.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/compiler.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/module.h> #include <linux/types.h> #include <asm/iosapic.h> #include <asm/paravirt.h> /*************************************************************************** * general info */ struct pv_info pv_info = { .kernel_rpl = 0, .paravirt_enabled = 0, .name = "bare hardware" }; /*************************************************************************** * pv_init_ops * initialization hooks. */ static void __init ia64_native_patch_branch(unsigned long tag, unsigned long type); struct pv_init_ops pv_init_ops = { #ifdef ASM_SUPPORTED .patch_bundle = ia64_native_patch_bundle, #endif .patch_branch = ia64_native_patch_branch, }; /*************************************************************************** * pv_cpu_ops * intrinsics hooks. */ #ifndef ASM_SUPPORTED /* ia64_native_xxx are macros so that we have to make them real functions */ #define DEFINE_VOID_FUNC1(name) \ static void \ ia64_native_ ## name ## _func(unsigned long arg) \ { \ ia64_native_ ## name(arg); \ } #define DEFINE_VOID_FUNC1_VOID(name) \ static void \ ia64_native_ ## name ## _func(void *arg) \ { \ ia64_native_ ## name(arg); \ } #define DEFINE_VOID_FUNC2(name) \ static void \ ia64_native_ ## name ## _func(unsigned long arg0, \ unsigned long arg1) \ { \ ia64_native_ ## name(arg0, arg1); \ } #define DEFINE_FUNC0(name) \ static unsigned long \ ia64_native_ ## name ## _func(void) \ { \ return ia64_native_ ## name(); \ } #define DEFINE_FUNC1(name, type) \ static unsigned long \ ia64_native_ ## name ## _func(type arg) \ { \ return ia64_native_ ## name(arg); \ } \ DEFINE_VOID_FUNC1_VOID(fc); DEFINE_VOID_FUNC1(intrin_local_irq_restore); DEFINE_VOID_FUNC2(ptcga); DEFINE_VOID_FUNC2(set_rr); DEFINE_FUNC0(get_psr_i); DEFINE_FUNC1(thash, unsigned long); DEFINE_FUNC1(get_cpuid, int); DEFINE_FUNC1(get_pmd, int); DEFINE_FUNC1(get_rr, unsigned long); static void ia64_native_ssm_i_func(void) { ia64_native_ssm(IA64_PSR_I); } static void ia64_native_rsm_i_func(void) { ia64_native_rsm(IA64_PSR_I); } static void ia64_native_set_rr0_to_rr4_func(unsigned long val0, unsigned long val1, unsigned long val2, unsigned long val3, unsigned long val4) { ia64_native_set_rr0_to_rr4(val0, val1, val2, val3, val4); } #define CASE_GET_REG(id) \ case _IA64_REG_ ## id: \ res = ia64_native_getreg(_IA64_REG_ ## id); \ break; #define CASE_GET_AR(id) CASE_GET_REG(AR_ ## id) #define CASE_GET_CR(id) CASE_GET_REG(CR_ ## id) unsigned long ia64_native_getreg_func(int regnum) { unsigned long res = -1; switch (regnum) { CASE_GET_REG(GP); /*CASE_GET_REG(IP);*/ /* returned ip value shouldn't be constant */ CASE_GET_REG(PSR); CASE_GET_REG(TP); CASE_GET_REG(SP); CASE_GET_AR(KR0); CASE_GET_AR(KR1); CASE_GET_AR(KR2); CASE_GET_AR(KR3); CASE_GET_AR(KR4); CASE_GET_AR(KR5); CASE_GET_AR(KR6); CASE_GET_AR(KR7); CASE_GET_AR(RSC); CASE_GET_AR(BSP); CASE_GET_AR(BSPSTORE); CASE_GET_AR(RNAT); CASE_GET_AR(FCR); CASE_GET_AR(EFLAG); CASE_GET_AR(CSD); CASE_GET_AR(SSD); CASE_GET_AR(CFLAG); CASE_GET_AR(FSR); CASE_GET_AR(FIR); CASE_GET_AR(FDR); CASE_GET_AR(CCV); CASE_GET_AR(UNAT); CASE_GET_AR(FPSR); CASE_GET_AR(ITC); CASE_GET_AR(PFS); CASE_GET_AR(LC); CASE_GET_AR(EC); CASE_GET_CR(DCR); CASE_GET_CR(ITM); CASE_GET_CR(IVA); CASE_GET_CR(PTA); CASE_GET_CR(IPSR); CASE_GET_CR(ISR); CASE_GET_CR(IIP); CASE_GET_CR(IFA); CASE_GET_CR(ITIR); CASE_GET_CR(IIPA); CASE_GET_CR(IFS); CASE_GET_CR(IIM); CASE_GET_CR(IHA); CASE_GET_CR(LID); CASE_GET_CR(IVR); CASE_GET_CR(TPR); CASE_GET_CR(EOI); CASE_GET_CR(IRR0); CASE_GET_CR(IRR1); CASE_GET_CR(IRR2); CASE_GET_CR(IRR3); CASE_GET_CR(ITV); CASE_GET_CR(PMV); CASE_GET_CR(CMCV); CASE_GET_CR(LRR0); CASE_GET_CR(LRR1); default: printk(KERN_CRIT "wrong_getreg %d\n", regnum); break; } return res; } #define CASE_SET_REG(id) \ case _IA64_REG_ ## id: \ ia64_native_setreg(_IA64_REG_ ## id, val); \ break; #define CASE_SET_AR(id) CASE_SET_REG(AR_ ## id) #define CASE_SET_CR(id) CASE_SET_REG(CR_ ## id) void ia64_native_setreg_func(int regnum, unsigned long val) { switch (regnum) { case _IA64_REG_PSR_L: ia64_native_setreg(_IA64_REG_PSR_L, val); ia64_dv_serialize_data(); break; CASE_SET_REG(SP); CASE_SET_REG(GP); CASE_SET_AR(KR0); CASE_SET_AR(KR1); CASE_SET_AR(KR2); CASE_SET_AR(KR3); CASE_SET_AR(KR4); CASE_SET_AR(KR5); CASE_SET_AR(KR6); CASE_SET_AR(KR7); CASE_SET_AR(RSC); CASE_SET_AR(BSP); CASE_SET_AR(BSPSTORE); CASE_SET_AR(RNAT); CASE_SET_AR(FCR); CASE_SET_AR(EFLAG); CASE_SET_AR(CSD); CASE_SET_AR(SSD); CASE_SET_AR(CFLAG); CASE_SET_AR(FSR); CASE_SET_AR(FIR); CASE_SET_AR(FDR); CASE_SET_AR(CCV); CASE_SET_AR(UNAT); CASE_SET_AR(FPSR); CASE_SET_AR(ITC); CASE_SET_AR(PFS); CASE_SET_AR(LC); CASE_SET_AR(EC); CASE_SET_CR(DCR); CASE_SET_CR(ITM); CASE_SET_CR(IVA); CASE_SET_CR(PTA); CASE_SET_CR(IPSR); CASE_SET_CR(ISR); CASE_SET_CR(IIP); CASE_SET_CR(IFA); CASE_SET_CR(ITIR); CASE_SET_CR(IIPA); CASE_SET_CR(IFS); CASE_SET_CR(IIM); CASE_SET_CR(IHA); CASE_SET_CR(LID); CASE_SET_CR(IVR); CASE_SET_CR(TPR); CASE_SET_CR(EOI); CASE_SET_CR(IRR0); CASE_SET_CR(IRR1); CASE_SET_CR(IRR2); CASE_SET_CR(IRR3); CASE_SET_CR(ITV); CASE_SET_CR(PMV); CASE_SET_CR(CMCV); CASE_SET_CR(LRR0); CASE_SET_CR(LRR1); default: printk(KERN_CRIT "wrong setreg %d\n", regnum); break; } } #else #define __DEFINE_FUNC(name, code) \ extern const char ia64_native_ ## name ## _direct_start[]; \ extern const char ia64_native_ ## name ## _direct_end[]; \ asm (".align 32\n" \ ".proc ia64_native_" #name "_func\n" \ "ia64_native_" #name "_func:\n" \ "ia64_native_" #name "_direct_start:\n" \ code \ "ia64_native_" #name "_direct_end:\n" \ "br.cond.sptk.many b6\n" \ ".endp ia64_native_" #name "_func\n") #define DEFINE_VOID_FUNC0(name, code) \ extern void \ ia64_native_ ## name ## _func(void); \ __DEFINE_FUNC(name, code) #define DEFINE_VOID_FUNC1(name, code) \ extern void \ ia64_native_ ## name ## _func(unsigned long arg); \ __DEFINE_FUNC(name, code) #define DEFINE_VOID_FUNC1_VOID(name, code) \ extern void \ ia64_native_ ## name ## _func(void *arg); \ __DEFINE_FUNC(name, code) #define DEFINE_VOID_FUNC2(name, code) \ extern void \ ia64_native_ ## name ## _func(unsigned long arg0, \ unsigned long arg1); \ __DEFINE_FUNC(name, code) #define DEFINE_FUNC0(name, code) \ extern unsigned long \ ia64_native_ ## name ## _func(void); \ __DEFINE_FUNC(name, code) #define DEFINE_FUNC1(name, type, code) \ extern unsigned long \ ia64_native_ ## name ## _func(type arg); \ __DEFINE_FUNC(name, code) DEFINE_VOID_FUNC1_VOID(fc, "fc r8\n"); DEFINE_VOID_FUNC1(intrin_local_irq_restore, ";;\n" " cmp.ne p6, p7 = r8, r0\n" ";;\n" "(p6) ssm psr.i\n" "(p7) rsm psr.i\n" ";;\n" "(p6) srlz.d\n"); DEFINE_VOID_FUNC2(ptcga, "ptc.ga r8, r9\n"); DEFINE_VOID_FUNC2(set_rr, "mov rr[r8] = r9\n"); /* ia64_native_getreg(_IA64_REG_PSR) & IA64_PSR_I */ DEFINE_FUNC0(get_psr_i, "mov r2 = " __stringify(1 << IA64_PSR_I_BIT) "\n" "mov r8 = psr\n" ";;\n" "and r8 = r2, r8\n"); DEFINE_FUNC1(thash, unsigned long, "thash r8 = r8\n"); DEFINE_FUNC1(get_cpuid, int, "mov r8 = cpuid[r8]\n"); DEFINE_FUNC1(get_pmd, int, "mov r8 = pmd[r8]\n"); DEFINE_FUNC1(get_rr, unsigned long, "mov r8 = rr[r8]\n"); DEFINE_VOID_FUNC0(ssm_i, "ssm psr.i\n"); DEFINE_VOID_FUNC0(rsm_i, "rsm psr.i\n"); extern void ia64_native_set_rr0_to_rr4_func(unsigned long val0, unsigned long val1, unsigned long val2, unsigned long val3, unsigned long val4); __DEFINE_FUNC(set_rr0_to_rr4, "mov rr[r0] = r8\n" "movl r2 = 0x2000000000000000\n" ";;\n" "mov rr[r2] = r9\n" "shl r3 = r2, 1\n" /* movl r3 = 0x4000000000000000 */ ";;\n" "add r2 = r2, r3\n" /* movl r2 = 0x6000000000000000 */ "mov rr[r3] = r10\n" ";;\n" "mov rr[r2] = r11\n" "shl r3 = r3, 1\n" /* movl r3 = 0x8000000000000000 */ ";;\n" "mov rr[r3] = r14\n"); extern unsigned long ia64_native_getreg_func(int regnum); asm(".global ia64_native_getreg_func\n"); #define __DEFINE_GET_REG(id, reg) \ "mov r2 = " __stringify(_IA64_REG_ ## id) "\n" \ ";;\n" \ "cmp.eq p6, p0 = r2, r8\n" \ ";;\n" \ "(p6) mov r8 = " #reg "\n" \ "(p6) br.cond.sptk.many b6\n" \ ";;\n" #define __DEFINE_GET_AR(id, reg) __DEFINE_GET_REG(AR_ ## id, ar.reg) #define __DEFINE_GET_CR(id, reg) __DEFINE_GET_REG(CR_ ## id, cr.reg) __DEFINE_FUNC(getreg, __DEFINE_GET_REG(GP, gp) /*__DEFINE_GET_REG(IP, ip)*/ /* returned ip value shouldn't be constant */ __DEFINE_GET_REG(PSR, psr) __DEFINE_GET_REG(TP, tp) __DEFINE_GET_REG(SP, sp) __DEFINE_GET_REG(AR_KR0, ar0) __DEFINE_GET_REG(AR_KR1, ar1) __DEFINE_GET_REG(AR_KR2, ar2) __DEFINE_GET_REG(AR_KR3, ar3) __DEFINE_GET_REG(AR_KR4, ar4) __DEFINE_GET_REG(AR_KR5, ar5) __DEFINE_GET_REG(AR_KR6, ar6) __DEFINE_GET_REG(AR_KR7, ar7) __DEFINE_GET_AR(RSC, rsc) __DEFINE_GET_AR(BSP, bsp) __DEFINE_GET_AR(BSPSTORE, bspstore) __DEFINE_GET_AR(RNAT, rnat) __DEFINE_GET_AR(FCR, fcr) __DEFINE_GET_AR(EFLAG, eflag) __DEFINE_GET_AR(CSD, csd) __DEFINE_GET_AR(SSD, ssd) __DEFINE_GET_REG(AR_CFLAG, ar27) __DEFINE_GET_AR(FSR, fsr) __DEFINE_GET_AR(FIR, fir) __DEFINE_GET_AR(FDR, fdr) __DEFINE_GET_AR(CCV, ccv) __DEFINE_GET_AR(UNAT, unat) __DEFINE_GET_AR(FPSR, fpsr) __DEFINE_GET_AR(ITC, itc) __DEFINE_GET_AR(PFS, pfs) __DEFINE_GET_AR(LC, lc) __DEFINE_GET_AR(EC, ec) __DEFINE_GET_CR(DCR, dcr) __DEFINE_GET_CR(ITM, itm) __DEFINE_GET_CR(IVA, iva) __DEFINE_GET_CR(PTA, pta) __DEFINE_GET_CR(IPSR, ipsr) __DEFINE_GET_CR(ISR, isr) __DEFINE_GET_CR(IIP, iip) __DEFINE_GET_CR(IFA, ifa) __DEFINE_GET_CR(ITIR, itir) __DEFINE_GET_CR(IIPA, iipa) __DEFINE_GET_CR(IFS, ifs) __DEFINE_GET_CR(IIM, iim) __DEFINE_GET_CR(IHA, iha) __DEFINE_GET_CR(LID, lid) __DEFINE_GET_CR(IVR, ivr) __DEFINE_GET_CR(TPR, tpr) __DEFINE_GET_CR(EOI, eoi) __DEFINE_GET_CR(IRR0, irr0) __DEFINE_GET_CR(IRR1, irr1) __DEFINE_GET_CR(IRR2, irr2) __DEFINE_GET_CR(IRR3, irr3) __DEFINE_GET_CR(ITV, itv) __DEFINE_GET_CR(PMV, pmv) __DEFINE_GET_CR(CMCV, cmcv) __DEFINE_GET_CR(LRR0, lrr0) __DEFINE_GET_CR(LRR1, lrr1) "mov r8 = -1\n" /* unsupported case */ ); extern void ia64_native_setreg_func(int regnum, unsigned long val); asm(".global ia64_native_setreg_func\n"); #define __DEFINE_SET_REG(id, reg) \ "mov r2 = " __stringify(_IA64_REG_ ## id) "\n" \ ";;\n" \ "cmp.eq p6, p0 = r2, r9\n" \ ";;\n" \ "(p6) mov " #reg " = r8\n" \ "(p6) br.cond.sptk.many b6\n" \ ";;\n" #define __DEFINE_SET_AR(id, reg) __DEFINE_SET_REG(AR_ ## id, ar.reg) #define __DEFINE_SET_CR(id, reg) __DEFINE_SET_REG(CR_ ## id, cr.reg) __DEFINE_FUNC(setreg, "mov r2 = " __stringify(_IA64_REG_PSR_L) "\n" ";;\n" "cmp.eq p6, p0 = r2, r9\n" ";;\n" "(p6) mov psr.l = r8\n" #ifdef HAVE_SERIALIZE_DIRECTIVE ".serialize.data\n" #endif "(p6) br.cond.sptk.many b6\n" __DEFINE_SET_REG(GP, gp) __DEFINE_SET_REG(SP, sp) __DEFINE_SET_REG(AR_KR0, ar0) __DEFINE_SET_REG(AR_KR1, ar1) __DEFINE_SET_REG(AR_KR2, ar2) __DEFINE_SET_REG(AR_KR3, ar3) __DEFINE_SET_REG(AR_KR4, ar4) __DEFINE_SET_REG(AR_KR5, ar5) __DEFINE_SET_REG(AR_KR6, ar6) __DEFINE_SET_REG(AR_KR7, ar7) __DEFINE_SET_AR(RSC, rsc) __DEFINE_SET_AR(BSP, bsp) __DEFINE_SET_AR(BSPSTORE, bspstore) __DEFINE_SET_AR(RNAT, rnat) __DEFINE_SET_AR(FCR, fcr) __DEFINE_SET_AR(EFLAG, eflag) __DEFINE_SET_AR(CSD, csd) __DEFINE_SET_AR(SSD, ssd) __DEFINE_SET_REG(AR_CFLAG, ar27) __DEFINE_SET_AR(FSR, fsr) __DEFINE_SET_AR(FIR, fir) __DEFINE_SET_AR(FDR, fdr) __DEFINE_SET_AR(CCV, ccv) __DEFINE_SET_AR(UNAT, unat) __DEFINE_SET_AR(FPSR, fpsr) __DEFINE_SET_AR(ITC, itc) __DEFINE_SET_AR(PFS, pfs) __DEFINE_SET_AR(LC, lc) __DEFINE_SET_AR(EC, ec) __DEFINE_SET_CR(DCR, dcr) __DEFINE_SET_CR(ITM, itm) __DEFINE_SET_CR(IVA, iva) __DEFINE_SET_CR(PTA, pta) __DEFINE_SET_CR(IPSR, ipsr) __DEFINE_SET_CR(ISR, isr) __DEFINE_SET_CR(IIP, iip) __DEFINE_SET_CR(IFA, ifa) __DEFINE_SET_CR(ITIR, itir) __DEFINE_SET_CR(IIPA, iipa) __DEFINE_SET_CR(IFS, ifs) __DEFINE_SET_CR(IIM, iim) __DEFINE_SET_CR(IHA, iha) __DEFINE_SET_CR(LID, lid) __DEFINE_SET_CR(IVR, ivr) __DEFINE_SET_CR(TPR, tpr) __DEFINE_SET_CR(EOI, eoi) __DEFINE_SET_CR(IRR0, irr0) __DEFINE_SET_CR(IRR1, irr1) __DEFINE_SET_CR(IRR2, irr2) __DEFINE_SET_CR(IRR3, irr3) __DEFINE_SET_CR(ITV, itv) __DEFINE_SET_CR(PMV, pmv) __DEFINE_SET_CR(CMCV, cmcv) __DEFINE_SET_CR(LRR0, lrr0) __DEFINE_SET_CR(LRR1, lrr1) ); #endif struct pv_cpu_ops pv_cpu_ops = { .fc = ia64_native_fc_func, .thash = ia64_native_thash_func, .get_cpuid = ia64_native_get_cpuid_func, .get_pmd = ia64_native_get_pmd_func, .ptcga = ia64_native_ptcga_func, .get_rr = ia64_native_get_rr_func, .set_rr = ia64_native_set_rr_func, .set_rr0_to_rr4 = ia64_native_set_rr0_to_rr4_func, .ssm_i = ia64_native_ssm_i_func, .getreg = ia64_native_getreg_func, .setreg = ia64_native_setreg_func, .rsm_i = ia64_native_rsm_i_func, .get_psr_i = ia64_native_get_psr_i_func, .intrin_local_irq_restore = ia64_native_intrin_local_irq_restore_func, }; EXPORT_SYMBOL(pv_cpu_ops); /****************************************************************************** * replacement of hand written assembly codes. */ void paravirt_cpu_asm_init(const struct pv_cpu_asm_switch *cpu_asm_switch) { extern unsigned long paravirt_switch_to_targ; extern unsigned long paravirt_leave_syscall_targ; extern unsigned long paravirt_work_processed_syscall_targ; extern unsigned long paravirt_leave_kernel_targ; paravirt_switch_to_targ = cpu_asm_switch->switch_to; paravirt_leave_syscall_targ = cpu_asm_switch->leave_syscall; paravirt_work_processed_syscall_targ = cpu_asm_switch->work_processed_syscall; paravirt_leave_kernel_targ = cpu_asm_switch->leave_kernel; } /*************************************************************************** * pv_iosapic_ops * iosapic read/write hooks. */ static unsigned int ia64_native_iosapic_read(char __iomem *iosapic, unsigned int reg) { return __ia64_native_iosapic_read(iosapic, reg); } static void ia64_native_iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val) { __ia64_native_iosapic_write(iosapic, reg, val); } struct pv_iosapic_ops pv_iosapic_ops = { .pcat_compat_init = ia64_native_iosapic_pcat_compat_init, .__get_irq_chip = ia64_native_iosapic_get_irq_chip, .__read = ia64_native_iosapic_read, .__write = ia64_native_iosapic_write, }; /*************************************************************************** * pv_irq_ops * irq operations */ struct pv_irq_ops pv_irq_ops = { .register_ipi = ia64_native_register_ipi, .assign_irq_vector = ia64_native_assign_irq_vector, .free_irq_vector = ia64_native_free_irq_vector, .register_percpu_irq = ia64_native_register_percpu_irq, .resend_irq = ia64_native_resend_irq, }; /*************************************************************************** * pv_time_ops * time operations */ static int ia64_native_do_steal_accounting(unsigned long *new_itm) { return 0; } struct pv_time_ops pv_time_ops = { .do_steal_accounting = ia64_native_do_steal_accounting, .sched_clock = ia64_native_sched_clock, }; /*************************************************************************** * binary pacthing * pv_init_ops.patch_bundle */ #ifdef ASM_SUPPORTED #define IA64_NATIVE_PATCH_DEFINE_GET_REG(name, reg) \ __DEFINE_FUNC(get_ ## name, \ ";;\n" \ "mov r8 = " #reg "\n" \ ";;\n") #define IA64_NATIVE_PATCH_DEFINE_SET_REG(name, reg) \ __DEFINE_FUNC(set_ ## name, \ ";;\n" \ "mov " #reg " = r8\n" \ ";;\n") #define IA64_NATIVE_PATCH_DEFINE_REG(name, reg) \ IA64_NATIVE_PATCH_DEFINE_GET_REG(name, reg); \ IA64_NATIVE_PATCH_DEFINE_SET_REG(name, reg) \ #define IA64_NATIVE_PATCH_DEFINE_AR(name, reg) \ IA64_NATIVE_PATCH_DEFINE_REG(ar_ ## name, ar.reg) #define IA64_NATIVE_PATCH_DEFINE_CR(name, reg) \ IA64_NATIVE_PATCH_DEFINE_REG(cr_ ## name, cr.reg) IA64_NATIVE_PATCH_DEFINE_GET_REG(psr, psr); IA64_NATIVE_PATCH_DEFINE_GET_REG(tp, tp); /* IA64_NATIVE_PATCH_DEFINE_SET_REG(psr_l, psr.l); */ __DEFINE_FUNC(set_psr_l, ";;\n" "mov psr.l = r8\n" #ifdef HAVE_SERIALIZE_DIRECTIVE ".serialize.data\n" #endif ";;\n"); IA64_NATIVE_PATCH_DEFINE_REG(gp, gp); IA64_NATIVE_PATCH_DEFINE_REG(sp, sp); IA64_NATIVE_PATCH_DEFINE_REG(kr0, ar0); IA64_NATIVE_PATCH_DEFINE_REG(kr1, ar1); IA64_NATIVE_PATCH_DEFINE_REG(kr2, ar2); IA64_NATIVE_PATCH_DEFINE_REG(kr3, ar3); IA64_NATIVE_PATCH_DEFINE_REG(kr4, ar4); IA64_NATIVE_PATCH_DEFINE_REG(kr5, ar5); IA64_NATIVE_PATCH_DEFINE_REG(kr6, ar6); IA64_NATIVE_PATCH_DEFINE_REG(kr7, ar7); IA64_NATIVE_PATCH_DEFINE_AR(rsc, rsc); IA64_NATIVE_PATCH_DEFINE_AR(bsp, bsp); IA64_NATIVE_PATCH_DEFINE_AR(bspstore, bspstore); IA64_NATIVE_PATCH_DEFINE_AR(rnat, rnat); IA64_NATIVE_PATCH_DEFINE_AR(fcr, fcr); IA64_NATIVE_PATCH_DEFINE_AR(eflag, eflag); IA64_NATIVE_PATCH_DEFINE_AR(csd, csd); IA64_NATIVE_PATCH_DEFINE_AR(ssd, ssd); IA64_NATIVE_PATCH_DEFINE_REG(ar27, ar27); IA64_NATIVE_PATCH_DEFINE_AR(fsr, fsr); IA64_NATIVE_PATCH_DEFINE_AR(fir, fir); IA64_NATIVE_PATCH_DEFINE_AR(fdr, fdr); IA64_NATIVE_PATCH_DEFINE_AR(ccv, ccv); IA64_NATIVE_PATCH_DEFINE_AR(unat, unat); IA64_NATIVE_PATCH_DEFINE_AR(fpsr, fpsr); IA64_NATIVE_PATCH_DEFINE_AR(itc, itc); IA64_NATIVE_PATCH_DEFINE_AR(pfs, pfs); IA64_NATIVE_PATCH_DEFINE_AR(lc, lc); IA64_NATIVE_PATCH_DEFINE_AR(ec, ec); IA64_NATIVE_PATCH_DEFINE_CR(dcr, dcr); IA64_NATIVE_PATCH_DEFINE_CR(itm, itm); IA64_NATIVE_PATCH_DEFINE_CR(iva, iva); IA64_NATIVE_PATCH_DEFINE_CR(pta, pta); IA64_NATIVE_PATCH_DEFINE_CR(ipsr, ipsr); IA64_NATIVE_PATCH_DEFINE_CR(isr, isr); IA64_NATIVE_PATCH_DEFINE_CR(iip, iip); IA64_NATIVE_PATCH_DEFINE_CR(ifa, ifa); IA64_NATIVE_PATCH_DEFINE_CR(itir, itir); IA64_NATIVE_PATCH_DEFINE_CR(iipa, iipa); IA64_NATIVE_PATCH_DEFINE_CR(ifs, ifs); IA64_NATIVE_PATCH_DEFINE_CR(iim, iim); IA64_NATIVE_PATCH_DEFINE_CR(iha, iha); IA64_NATIVE_PATCH_DEFINE_CR(lid, lid); IA64_NATIVE_PATCH_DEFINE_CR(ivr, ivr); IA64_NATIVE_PATCH_DEFINE_CR(tpr, tpr); IA64_NATIVE_PATCH_DEFINE_CR(eoi, eoi); IA64_NATIVE_PATCH_DEFINE_CR(irr0, irr0); IA64_NATIVE_PATCH_DEFINE_CR(irr1, irr1); IA64_NATIVE_PATCH_DEFINE_CR(irr2, irr2); IA64_NATIVE_PATCH_DEFINE_CR(irr3, irr3); IA64_NATIVE_PATCH_DEFINE_CR(itv, itv); IA64_NATIVE_PATCH_DEFINE_CR(pmv, pmv); IA64_NATIVE_PATCH_DEFINE_CR(cmcv, cmcv); IA64_NATIVE_PATCH_DEFINE_CR(lrr0, lrr0); IA64_NATIVE_PATCH_DEFINE_CR(lrr1, lrr1); static const struct paravirt_patch_bundle_elem ia64_native_patch_bundle_elems[] __initdata_or_module = { #define IA64_NATIVE_PATCH_BUNDLE_ELEM(name, type) \ { \ (void*)ia64_native_ ## name ## _direct_start, \ (void*)ia64_native_ ## name ## _direct_end, \ PARAVIRT_PATCH_TYPE_ ## type, \ } IA64_NATIVE_PATCH_BUNDLE_ELEM(fc, FC), IA64_NATIVE_PATCH_BUNDLE_ELEM(thash, THASH), IA64_NATIVE_PATCH_BUNDLE_ELEM(get_cpuid, GET_CPUID), IA64_NATIVE_PATCH_BUNDLE_ELEM(get_pmd, GET_PMD), IA64_NATIVE_PATCH_BUNDLE_ELEM(ptcga, PTCGA), IA64_NATIVE_PATCH_BUNDLE_ELEM(get_rr, GET_RR), IA64_NATIVE_PATCH_BUNDLE_ELEM(set_rr, SET_RR), IA64_NATIVE_PATCH_BUNDLE_ELEM(set_rr0_to_rr4, SET_RR0_TO_RR4), IA64_NATIVE_PATCH_BUNDLE_ELEM(ssm_i, SSM_I), IA64_NATIVE_PATCH_BUNDLE_ELEM(rsm_i, RSM_I), IA64_NATIVE_PATCH_BUNDLE_ELEM(get_psr_i, GET_PSR_I), IA64_NATIVE_PATCH_BUNDLE_ELEM(intrin_local_irq_restore, INTRIN_LOCAL_IRQ_RESTORE), #define IA64_NATIVE_PATCH_BUNDLE_ELEM_GETREG(name, reg) \ { \ (void*)ia64_native_get_ ## name ## _direct_start, \ (void*)ia64_native_get_ ## name ## _direct_end, \ PARAVIRT_PATCH_TYPE_GETREG + _IA64_REG_ ## reg, \ } #define IA64_NATIVE_PATCH_BUNDLE_ELEM_SETREG(name, reg) \ { \ (void*)ia64_native_set_ ## name ## _direct_start, \ (void*)ia64_native_set_ ## name ## _direct_end, \ PARAVIRT_PATCH_TYPE_SETREG + _IA64_REG_ ## reg, \ } #define IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(name, reg) \ IA64_NATIVE_PATCH_BUNDLE_ELEM_GETREG(name, reg), \ IA64_NATIVE_PATCH_BUNDLE_ELEM_SETREG(name, reg) \ #define IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(name, reg) \ IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(ar_ ## name, AR_ ## reg) #define IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(name, reg) \ IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(cr_ ## name, CR_ ## reg) IA64_NATIVE_PATCH_BUNDLE_ELEM_GETREG(psr, PSR), IA64_NATIVE_PATCH_BUNDLE_ELEM_GETREG(tp, TP), IA64_NATIVE_PATCH_BUNDLE_ELEM_SETREG(psr_l, PSR_L), IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(gp, GP), IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(sp, SP), IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr0, AR_KR0), IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr1, AR_KR1), IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr2, AR_KR2), IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr3, AR_KR3), IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr4, AR_KR4), IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr5, AR_KR5), IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr6, AR_KR6), IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr7, AR_KR7), IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(rsc, RSC), IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(bsp, BSP), IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(bspstore, BSPSTORE), IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(rnat, RNAT), IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(fcr, FCR), IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(eflag, EFLAG), IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(csd, CSD), IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(ssd, SSD), IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(ar27, AR_CFLAG), IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(fsr, FSR), IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(fir, FIR), IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(fdr, FDR), IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(ccv, CCV), IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(unat, UNAT), IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(fpsr, FPSR), IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(itc, ITC), IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(pfs, PFS), IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(lc, LC), IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(ec, EC), IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(dcr, DCR), IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(itm, ITM), IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(iva, IVA), IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(pta, PTA), IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(ipsr, IPSR), IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(isr, ISR), IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(iip, IIP), IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(ifa, IFA), IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(itir, ITIR), IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(iipa, IIPA), IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(ifs, IFS), IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(iim, IIM), IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(iha, IHA), IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(lid, LID), IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(ivr, IVR), IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(tpr, TPR), IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(eoi, EOI), IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(irr0, IRR0), IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(irr1, IRR1), IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(irr2, IRR2), IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(irr3, IRR3), IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(itv, ITV), IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(pmv, PMV), IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(cmcv, CMCV), IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(lrr0, LRR0), IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(lrr1, LRR1), }; unsigned long __init_or_module ia64_native_patch_bundle(void *sbundle, void *ebundle, unsigned long type) { const unsigned long nelems = sizeof(ia64_native_patch_bundle_elems) / sizeof(ia64_native_patch_bundle_elems[0]); return __paravirt_patch_apply_bundle(sbundle, ebundle, type, ia64_native_patch_bundle_elems, nelems, NULL); } #endif /* ASM_SUPPOTED */ extern const char ia64_native_switch_to[]; extern const char ia64_native_leave_syscall[]; extern const char ia64_native_work_processed_syscall[]; extern const char ia64_native_leave_kernel[]; const struct paravirt_patch_branch_target ia64_native_branch_target[] __initconst = { #define PARAVIRT_BR_TARGET(name, type) \ { \ ia64_native_ ## name, \ PARAVIRT_PATCH_TYPE_BR_ ## type, \ } PARAVIRT_BR_TARGET(switch_to, SWITCH_TO), PARAVIRT_BR_TARGET(leave_syscall, LEAVE_SYSCALL), PARAVIRT_BR_TARGET(work_processed_syscall, WORK_PROCESSED_SYSCALL), PARAVIRT_BR_TARGET(leave_kernel, LEAVE_KERNEL), }; static void __init ia64_native_patch_branch(unsigned long tag, unsigned long type) { const unsigned long nelem = sizeof(ia64_native_branch_target) / sizeof(ia64_native_branch_target[0]); __paravirt_patch_apply_branch(tag, type, ia64_native_branch_target, nelem); }
gpl-2.0
godmachine81/AsusTF300-Enhanced-Kernel
crypto/async_tx/async_tx.c
3983
7786
/* * core routines for the asynchronous memory transfer/transform api * * Copyright © 2006, Intel Corporation. * * Dan Williams <dan.j.williams@intel.com> * * with architecture considerations by: * Neil Brown <neilb@suse.de> * Jeff Garzik <jeff@garzik.org> * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * */ #include <linux/rculist.h> #include <linux/kernel.h> #include <linux/async_tx.h> #ifdef CONFIG_DMA_ENGINE static int __init async_tx_init(void) { async_dmaengine_get(); printk(KERN_INFO "async_tx: api initialized (async)\n"); return 0; } static void __exit async_tx_exit(void) { async_dmaengine_put(); } module_init(async_tx_init); module_exit(async_tx_exit); /** * __async_tx_find_channel - find a channel to carry out the operation or let * the transaction execute synchronously * @submit: transaction dependency and submission modifiers * @tx_type: transaction type */ struct dma_chan * __async_tx_find_channel(struct async_submit_ctl *submit, enum dma_transaction_type tx_type) { struct dma_async_tx_descriptor *depend_tx = submit->depend_tx; /* see if we can keep the chain on one channel */ if (depend_tx && dma_has_cap(tx_type, depend_tx->chan->device->cap_mask)) return depend_tx->chan; return async_dma_find_channel(tx_type); } EXPORT_SYMBOL_GPL(__async_tx_find_channel); #endif /** * async_tx_channel_switch - queue an interrupt descriptor with a dependency * pre-attached. * @depend_tx: the operation that must finish before the new operation runs * @tx: the new operation */ static void async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, struct dma_async_tx_descriptor *tx) { struct dma_chan *chan = depend_tx->chan; struct dma_device *device = chan->device; struct dma_async_tx_descriptor *intr_tx = (void *) ~0; /* first check to see if we can still append to depend_tx */ txd_lock(depend_tx); if (txd_parent(depend_tx) && depend_tx->chan == tx->chan) { txd_chain(depend_tx, tx); intr_tx = NULL; } txd_unlock(depend_tx); /* attached dependency, flush the parent channel */ if (!intr_tx) { device->device_issue_pending(chan); return; } /* see if we can schedule an interrupt * otherwise poll for completion */ if (dma_has_cap(DMA_INTERRUPT, device->cap_mask)) intr_tx = device->device_prep_dma_interrupt(chan, 0); else intr_tx = NULL; if (intr_tx) { intr_tx->callback = NULL; intr_tx->callback_param = NULL; /* safe to chain outside the lock since we know we are * not submitted yet */ txd_chain(intr_tx, tx); /* check if we need to append */ txd_lock(depend_tx); if (txd_parent(depend_tx)) { txd_chain(depend_tx, intr_tx); async_tx_ack(intr_tx); intr_tx = NULL; } txd_unlock(depend_tx); if (intr_tx) { txd_clear_parent(intr_tx); intr_tx->tx_submit(intr_tx); async_tx_ack(intr_tx); } device->device_issue_pending(chan); } else { if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) panic("%s: DMA_ERROR waiting for depend_tx\n", __func__); tx->tx_submit(tx); } } /** * submit_disposition - flags for routing an incoming operation * @ASYNC_TX_SUBMITTED: we were able to append the new operation under the lock * @ASYNC_TX_CHANNEL_SWITCH: when the lock is dropped schedule a channel switch * @ASYNC_TX_DIRECT_SUBMIT: when the lock is dropped submit directly * * while holding depend_tx->lock we must avoid submitting new operations * to prevent a circular locking dependency with drivers that already * hold a channel lock when calling async_tx_run_dependencies. */ enum submit_disposition { ASYNC_TX_SUBMITTED, ASYNC_TX_CHANNEL_SWITCH, ASYNC_TX_DIRECT_SUBMIT, }; void async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, struct async_submit_ctl *submit) { struct dma_async_tx_descriptor *depend_tx = submit->depend_tx; tx->callback = submit->cb_fn; tx->callback_param = submit->cb_param; if (depend_tx) { enum submit_disposition s; /* sanity check the dependency chain: * 1/ if ack is already set then we cannot be sure * we are referring to the correct operation * 2/ dependencies are 1:1 i.e. two transactions can * not depend on the same parent */ BUG_ON(async_tx_test_ack(depend_tx) || txd_next(depend_tx) || txd_parent(tx)); /* the lock prevents async_tx_run_dependencies from missing * the setting of ->next when ->parent != NULL */ txd_lock(depend_tx); if (txd_parent(depend_tx)) { /* we have a parent so we can not submit directly * if we are staying on the same channel: append * else: channel switch */ if (depend_tx->chan == chan) { txd_chain(depend_tx, tx); s = ASYNC_TX_SUBMITTED; } else s = ASYNC_TX_CHANNEL_SWITCH; } else { /* we do not have a parent so we may be able to submit * directly if we are staying on the same channel */ if (depend_tx->chan == chan) s = ASYNC_TX_DIRECT_SUBMIT; else s = ASYNC_TX_CHANNEL_SWITCH; } txd_unlock(depend_tx); switch (s) { case ASYNC_TX_SUBMITTED: break; case ASYNC_TX_CHANNEL_SWITCH: async_tx_channel_switch(depend_tx, tx); break; case ASYNC_TX_DIRECT_SUBMIT: txd_clear_parent(tx); tx->tx_submit(tx); break; } } else { txd_clear_parent(tx); tx->tx_submit(tx); } if (submit->flags & ASYNC_TX_ACK) async_tx_ack(tx); if (depend_tx) async_tx_ack(depend_tx); } EXPORT_SYMBOL_GPL(async_tx_submit); /** * async_trigger_callback - schedules the callback function to be run * @submit: submission and completion parameters * * honored flags: ASYNC_TX_ACK * * The callback is run after any dependent operations have completed. */ struct dma_async_tx_descriptor * async_trigger_callback(struct async_submit_ctl *submit) { struct dma_chan *chan; struct dma_device *device; struct dma_async_tx_descriptor *tx; struct dma_async_tx_descriptor *depend_tx = submit->depend_tx; if (depend_tx) { chan = depend_tx->chan; device = chan->device; /* see if we can schedule an interrupt * otherwise poll for completion */ if (device && !dma_has_cap(DMA_INTERRUPT, device->cap_mask)) device = NULL; tx = device ? device->device_prep_dma_interrupt(chan, 0) : NULL; } else tx = NULL; if (tx) { pr_debug("%s: (async)\n", __func__); async_tx_submit(chan, tx, submit); } else { pr_debug("%s: (sync)\n", __func__); /* wait for any prerequisite operations */ async_tx_quiesce(&submit->depend_tx); async_tx_sync_epilog(submit); } return tx; } EXPORT_SYMBOL_GPL(async_trigger_callback); /** * async_tx_quiesce - ensure tx is complete and freeable upon return * @tx - transaction to quiesce */ void async_tx_quiesce(struct dma_async_tx_descriptor **tx) { if (*tx) { /* if ack is already set then we cannot be sure * we are referring to the correct operation */ BUG_ON(async_tx_test_ack(*tx)); if (dma_wait_for_async_tx(*tx) == DMA_ERROR) panic("DMA_ERROR waiting for transaction\n"); async_tx_ack(*tx); *tx = NULL; } } EXPORT_SYMBOL_GPL(async_tx_quiesce); MODULE_AUTHOR("Intel Corporation"); MODULE_DESCRIPTION("Asynchronous Bulk Memory Transactions API"); MODULE_LICENSE("GPL");
gpl-2.0
JustAkan/Oxygen_united_kernel-gproj
arch/mips/kernel/smp-cmp.c
4495
5300
/* * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. * * Copyright (C) 2007 MIPS Technologies, Inc. * Chris Dearman (chris@mips.com) */ #undef DEBUG #include <linux/kernel.h> #include <linux/sched.h> #include <linux/smp.h> #include <linux/cpumask.h> #include <linux/interrupt.h> #include <linux/compiler.h> #include <linux/atomic.h> #include <asm/cacheflush.h> #include <asm/cpu.h> #include <asm/processor.h> #include <asm/hardirq.h> #include <asm/mmu_context.h> #include <asm/smp.h> #include <asm/time.h> #include <asm/mipsregs.h> #include <asm/mipsmtregs.h> #include <asm/mips_mt.h> #include <asm/amon.h> #include <asm/gic.h> static void ipi_call_function(unsigned int cpu) { pr_debug("CPU%d: %s cpu %d status %08x\n", smp_processor_id(), __func__, cpu, read_c0_status()); gic_send_ipi(plat_ipi_call_int_xlate(cpu)); } static void ipi_resched(unsigned int cpu) { pr_debug("CPU%d: %s cpu %d status %08x\n", smp_processor_id(), __func__, cpu, read_c0_status()); gic_send_ipi(plat_ipi_resched_int_xlate(cpu)); } /* * FIXME: This isn't restricted to CMP * The SMVP kernel could use GIC interrupts if available */ void cmp_send_ipi_single(int cpu, unsigned int action) { unsigned long flags; local_irq_save(flags); switch (action) { case SMP_CALL_FUNCTION: ipi_call_function(cpu); break; case SMP_RESCHEDULE_YOURSELF: ipi_resched(cpu); break; } local_irq_restore(flags); } static void cmp_send_ipi_mask(const struct cpumask *mask, unsigned int action) { unsigned int i; for_each_cpu(i, mask) cmp_send_ipi_single(i, action); } static void cmp_init_secondary(void) { struct cpuinfo_mips *c = &current_cpu_data; /* Assume GIC is present */ change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | STATUSF_IP6 | STATUSF_IP7); /* Enable per-cpu interrupts: platform specific */ c->core = (read_c0_ebase() >> 1) & 0xff; #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE; #endif #ifdef CONFIG_MIPS_MT_SMTC c->tc_id = (read_c0_tcbind() >> TCBIND_CURTC_SHIFT) & TCBIND_CURTC; #endif } static void cmp_smp_finish(void) { pr_debug("SMPCMP: CPU%d: %s\n", smp_processor_id(), __func__); /* CDFIXME: remove this? */ write_c0_compare(read_c0_count() + (8 * mips_hpt_frequency / HZ)); #ifdef CONFIG_MIPS_MT_FPAFF /* If we have an FPU, enroll ourselves in the FPU-full mask */ if (cpu_has_fpu) cpu_set(smp_processor_id(), mt_fpu_cpumask); #endif /* CONFIG_MIPS_MT_FPAFF */ local_irq_enable(); } static void cmp_cpus_done(void) { pr_debug("SMPCMP: CPU%d: %s\n", smp_processor_id(), __func__); } /* * Setup the PC, SP, and GP of a secondary processor and start it running * smp_bootstrap is the place to resume from * __KSTK_TOS(idle) is apparently the stack pointer * (unsigned long)idle->thread_info the gp */ static void cmp_boot_secondary(int cpu, struct task_struct *idle) { struct thread_info *gp = task_thread_info(idle); unsigned long sp = __KSTK_TOS(idle); unsigned long pc = (unsigned long)&smp_bootstrap; unsigned long a0 = 0; pr_debug("SMPCMP: CPU%d: %s cpu %d\n", smp_processor_id(), __func__, cpu); #if 0 /* Needed? */ flush_icache_range((unsigned long)gp, (unsigned long)(gp + sizeof(struct thread_info))); #endif amon_cpu_start(cpu, pc, sp, (unsigned long)gp, a0); } /* * Common setup before any secondaries are started */ void __init cmp_smp_setup(void) { int i; int ncpu = 0; pr_debug("SMPCMP: CPU%d: %s\n", smp_processor_id(), __func__); #ifdef CONFIG_MIPS_MT_FPAFF /* If we have an FPU, enroll ourselves in the FPU-full mask */ if (cpu_has_fpu) cpu_set(0, mt_fpu_cpumask); #endif /* CONFIG_MIPS_MT_FPAFF */ for (i = 1; i < NR_CPUS; i++) { if (amon_cpu_avail(i)) { set_cpu_possible(i, true); __cpu_number_map[i] = ++ncpu; __cpu_logical_map[ncpu] = i; } } if (cpu_has_mipsmt) { unsigned int nvpe, mvpconf0 = read_c0_mvpconf0(); nvpe = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; smp_num_siblings = nvpe; } pr_info("Detected %i available secondary CPU(s)\n", ncpu); } void __init cmp_prepare_cpus(unsigned int max_cpus) { pr_debug("SMPCMP: CPU%d: %s max_cpus=%d\n", smp_processor_id(), __func__, max_cpus); /* * FIXME: some of these options are per-system, some per-core and * some per-cpu */ mips_mt_set_cpuoptions(); } struct plat_smp_ops cmp_smp_ops = { .send_ipi_single = cmp_send_ipi_single, .send_ipi_mask = cmp_send_ipi_mask, .init_secondary = cmp_init_secondary, .smp_finish = cmp_smp_finish, .cpus_done = cmp_cpus_done, .boot_secondary = cmp_boot_secondary, .smp_setup = cmp_smp_setup, .prepare_cpus = cmp_prepare_cpus, };
gpl-2.0
Apology11/test
drivers/of/irq.c
4751
14086
/* * Derived from arch/i386/kernel/irq.c * Copyright (C) 1992 Linus Torvalds * Adapted from arch/i386 by Gary Thomas * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * Updated and modified by Cort Dougan <cort@fsmlabs.com> * Copyright (C) 1996-2001 Cort Dougan * Adapted for Power Macintosh by Paul Mackerras * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * This file contains the code used to make IRQ descriptions in the * device tree to actual irq numbers on an interrupt controller * driver. */ #include <linux/errno.h> #include <linux/list.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_irq.h> #include <linux/string.h> #include <linux/slab.h> /** * irq_of_parse_and_map - Parse and map an interrupt into linux virq space * @device: Device node of the device whose interrupt is to be mapped * @index: Index of the interrupt to map * * This function is a wrapper that chains of_irq_map_one() and * irq_create_of_mapping() to make things easier to callers */ unsigned int irq_of_parse_and_map(struct device_node *dev, int index) { struct of_irq oirq; if (of_irq_map_one(dev, index, &oirq)) return 0; return irq_create_of_mapping(oirq.controller, oirq.specifier, oirq.size); } EXPORT_SYMBOL_GPL(irq_of_parse_and_map); /** * of_irq_find_parent - Given a device node, find its interrupt parent node * @child: pointer to device node * * Returns a pointer to the interrupt parent node, or NULL if the interrupt * parent could not be determined. */ struct device_node *of_irq_find_parent(struct device_node *child) { struct device_node *p; const __be32 *parp; if (!of_node_get(child)) return NULL; do { parp = of_get_property(child, "interrupt-parent", NULL); if (parp == NULL) p = of_get_parent(child); else { if (of_irq_workarounds & OF_IMAP_NO_PHANDLE) p = of_node_get(of_irq_dflt_pic); else p = of_find_node_by_phandle(be32_to_cpup(parp)); } of_node_put(child); child = p; } while (p && of_get_property(p, "#interrupt-cells", NULL) == NULL); return p; } /** * of_irq_map_raw - Low level interrupt tree parsing * @parent: the device interrupt parent * @intspec: interrupt specifier ("interrupts" property of the device) * @ointsize: size of the passed in interrupt specifier * @addr: address specifier (start of "reg" property of the device) * @out_irq: structure of_irq filled by this function * * Returns 0 on success and a negative number on error * * This function is a low-level interrupt tree walking function. It * can be used to do a partial walk with synthetized reg and interrupts * properties, for example when resolving PCI interrupts when no device * node exist for the parent. */ int of_irq_map_raw(struct device_node *parent, const __be32 *intspec, u32 ointsize, const __be32 *addr, struct of_irq *out_irq) { struct device_node *ipar, *tnode, *old = NULL, *newpar = NULL; const __be32 *tmp, *imap, *imask; u32 intsize = 1, addrsize, newintsize = 0, newaddrsize = 0; int imaplen, match, i; pr_debug("of_irq_map_raw: par=%s,intspec=[0x%08x 0x%08x...],ointsize=%d\n", parent->full_name, be32_to_cpup(intspec), be32_to_cpup(intspec + 1), ointsize); ipar = of_node_get(parent); /* First get the #interrupt-cells property of the current cursor * that tells us how to interpret the passed-in intspec. If there * is none, we are nice and just walk up the tree */ do { tmp = of_get_property(ipar, "#interrupt-cells", NULL); if (tmp != NULL) { intsize = be32_to_cpu(*tmp); break; } tnode = ipar; ipar = of_irq_find_parent(ipar); of_node_put(tnode); } while (ipar); if (ipar == NULL) { pr_debug(" -> no parent found !\n"); goto fail; } pr_debug("of_irq_map_raw: ipar=%s, size=%d\n", ipar->full_name, intsize); if (ointsize != intsize) return -EINVAL; /* Look for this #address-cells. We have to implement the old linux * trick of looking for the parent here as some device-trees rely on it */ old = of_node_get(ipar); do { tmp = of_get_property(old, "#address-cells", NULL); tnode = of_get_parent(old); of_node_put(old); old = tnode; } while (old && tmp == NULL); of_node_put(old); old = NULL; addrsize = (tmp == NULL) ? 2 : be32_to_cpu(*tmp); pr_debug(" -> addrsize=%d\n", addrsize); /* Now start the actual "proper" walk of the interrupt tree */ while (ipar != NULL) { /* Now check if cursor is an interrupt-controller and if it is * then we are done */ if (of_get_property(ipar, "interrupt-controller", NULL) != NULL) { pr_debug(" -> got it !\n"); for (i = 0; i < intsize; i++) out_irq->specifier[i] = of_read_number(intspec +i, 1); out_irq->size = intsize; out_irq->controller = ipar; of_node_put(old); return 0; } /* Now look for an interrupt-map */ imap = of_get_property(ipar, "interrupt-map", &imaplen); /* No interrupt map, check for an interrupt parent */ if (imap == NULL) { pr_debug(" -> no map, getting parent\n"); newpar = of_irq_find_parent(ipar); goto skiplevel; } imaplen /= sizeof(u32); /* Look for a mask */ imask = of_get_property(ipar, "interrupt-map-mask", NULL); /* If we were passed no "reg" property and we attempt to parse * an interrupt-map, then #address-cells must be 0. * Fail if it's not. */ if (addr == NULL && addrsize != 0) { pr_debug(" -> no reg passed in when needed !\n"); goto fail; } /* Parse interrupt-map */ match = 0; while (imaplen > (addrsize + intsize + 1) && !match) { /* Compare specifiers */ match = 1; for (i = 0; i < addrsize && match; ++i) { u32 mask = imask ? imask[i] : 0xffffffffu; match = ((addr[i] ^ imap[i]) & mask) == 0; } for (; i < (addrsize + intsize) && match; ++i) { u32 mask = imask ? imask[i] : 0xffffffffu; match = ((intspec[i-addrsize] ^ imap[i]) & mask) == 0; } imap += addrsize + intsize; imaplen -= addrsize + intsize; pr_debug(" -> match=%d (imaplen=%d)\n", match, imaplen); /* Get the interrupt parent */ if (of_irq_workarounds & OF_IMAP_NO_PHANDLE) newpar = of_node_get(of_irq_dflt_pic); else newpar = of_find_node_by_phandle(be32_to_cpup(imap)); imap++; --imaplen; /* Check if not found */ if (newpar == NULL) { pr_debug(" -> imap parent not found !\n"); goto fail; } /* Get #interrupt-cells and #address-cells of new * parent */ tmp = of_get_property(newpar, "#interrupt-cells", NULL); if (tmp == NULL) { pr_debug(" -> parent lacks #interrupt-cells!\n"); goto fail; } newintsize = be32_to_cpu(*tmp); tmp = of_get_property(newpar, "#address-cells", NULL); newaddrsize = (tmp == NULL) ? 0 : be32_to_cpu(*tmp); pr_debug(" -> newintsize=%d, newaddrsize=%d\n", newintsize, newaddrsize); /* Check for malformed properties */ if (imaplen < (newaddrsize + newintsize)) goto fail; imap += newaddrsize + newintsize; imaplen -= newaddrsize + newintsize; pr_debug(" -> imaplen=%d\n", imaplen); } if (!match) goto fail; of_node_put(old); old = of_node_get(newpar); addrsize = newaddrsize; intsize = newintsize; intspec = imap - intsize; addr = intspec - addrsize; skiplevel: /* Iterate again with new parent */ pr_debug(" -> new parent: %s\n", newpar ? newpar->full_name : "<>"); of_node_put(ipar); ipar = newpar; newpar = NULL; } fail: of_node_put(ipar); of_node_put(old); of_node_put(newpar); return -EINVAL; } EXPORT_SYMBOL_GPL(of_irq_map_raw); /** * of_irq_map_one - Resolve an interrupt for a device * @device: the device whose interrupt is to be resolved * @index: index of the interrupt to resolve * @out_irq: structure of_irq filled by this function * * This function resolves an interrupt, walking the tree, for a given * device-tree node. It's the high level pendant to of_irq_map_raw(). */ int of_irq_map_one(struct device_node *device, int index, struct of_irq *out_irq) { struct device_node *p; const __be32 *intspec, *tmp, *addr; u32 intsize, intlen; int res = -EINVAL; pr_debug("of_irq_map_one: dev=%s, index=%d\n", device->full_name, index); /* OldWorld mac stuff is "special", handle out of line */ if (of_irq_workarounds & OF_IMAP_OLDWORLD_MAC) return of_irq_map_oldworld(device, index, out_irq); /* Get the interrupts property */ intspec = of_get_property(device, "interrupts", &intlen); if (intspec == NULL) return -EINVAL; intlen /= sizeof(*intspec); pr_debug(" intspec=%d intlen=%d\n", be32_to_cpup(intspec), intlen); /* Get the reg property (if any) */ addr = of_get_property(device, "reg", NULL); /* Look for the interrupt parent. */ p = of_irq_find_parent(device); if (p == NULL) return -EINVAL; /* Get size of interrupt specifier */ tmp = of_get_property(p, "#interrupt-cells", NULL); if (tmp == NULL) goto out; intsize = be32_to_cpu(*tmp); pr_debug(" intsize=%d intlen=%d\n", intsize, intlen); /* Check index */ if ((index + 1) * intsize > intlen) goto out; /* Get new specifier and map it */ res = of_irq_map_raw(p, intspec + index * intsize, intsize, addr, out_irq); out: of_node_put(p); return res; } EXPORT_SYMBOL_GPL(of_irq_map_one); /** * of_irq_to_resource - Decode a node's IRQ and return it as a resource * @dev: pointer to device tree node * @index: zero-based index of the irq * @r: pointer to resource structure to return result into. */ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r) { int irq = irq_of_parse_and_map(dev, index); /* Only dereference the resource if both the * resource and the irq are valid. */ if (r && irq) { const char *name = NULL; /* * Get optional "interrupts-names" property to add a name * to the resource. */ of_property_read_string_index(dev, "interrupt-names", index, &name); r->start = r->end = irq; r->flags = IORESOURCE_IRQ; r->name = name ? name : dev->full_name; } return irq; } EXPORT_SYMBOL_GPL(of_irq_to_resource); /** * of_irq_count - Count the number of IRQs a node uses * @dev: pointer to device tree node */ int of_irq_count(struct device_node *dev) { int nr = 0; while (of_irq_to_resource(dev, nr, NULL)) nr++; return nr; } /** * of_irq_to_resource_table - Fill in resource table with node's IRQ info * @dev: pointer to device tree node * @res: array of resources to fill in * @nr_irqs: the number of IRQs (and upper bound for num of @res elements) * * Returns the size of the filled in table (up to @nr_irqs). */ int of_irq_to_resource_table(struct device_node *dev, struct resource *res, int nr_irqs) { int i; for (i = 0; i < nr_irqs; i++, res++) if (!of_irq_to_resource(dev, i, res)) break; return i; } struct intc_desc { struct list_head list; struct device_node *dev; struct device_node *interrupt_parent; }; /** * of_irq_init - Scan and init matching interrupt controllers in DT * @matches: 0 terminated array of nodes to match and init function to call * * This function scans the device tree for matching interrupt controller nodes, * and calls their initialization functions in order with parents first. */ void __init of_irq_init(const struct of_device_id *matches) { struct device_node *np, *parent = NULL; struct intc_desc *desc, *temp_desc; struct list_head intc_desc_list, intc_parent_list; INIT_LIST_HEAD(&intc_desc_list); INIT_LIST_HEAD(&intc_parent_list); for_each_matching_node(np, matches) { if (!of_find_property(np, "interrupt-controller", NULL)) continue; /* * Here, we allocate and populate an intc_desc with the node * pointer, interrupt-parent device_node etc. */ desc = kzalloc(sizeof(*desc), GFP_KERNEL); if (WARN_ON(!desc)) goto err; desc->dev = np; desc->interrupt_parent = of_irq_find_parent(np); if (desc->interrupt_parent == np) desc->interrupt_parent = NULL; list_add_tail(&desc->list, &intc_desc_list); } /* * The root irq controller is the one without an interrupt-parent. * That one goes first, followed by the controllers that reference it, * followed by the ones that reference the 2nd level controllers, etc. */ while (!list_empty(&intc_desc_list)) { /* * Process all controllers with the current 'parent'. * First pass will be looking for NULL as the parent. * The assumption is that NULL parent means a root controller. */ list_for_each_entry_safe(desc, temp_desc, &intc_desc_list, list) { const struct of_device_id *match; int ret; of_irq_init_cb_t irq_init_cb; if (desc->interrupt_parent != parent) continue; list_del(&desc->list); match = of_match_node(matches, desc->dev); if (WARN(!match->data, "of_irq_init: no init function for %s\n", match->compatible)) { kfree(desc); continue; } pr_debug("of_irq_init: init %s @ %p, parent %p\n", match->compatible, desc->dev, desc->interrupt_parent); irq_init_cb = match->data; ret = irq_init_cb(desc->dev, desc->interrupt_parent); if (ret) { kfree(desc); continue; } /* * This one is now set up; add it to the parent list so * its children can get processed in a subsequent pass. */ list_add_tail(&desc->list, &intc_parent_list); } /* Get the next pending parent that might have children */ desc = list_first_entry(&intc_parent_list, typeof(*desc), list); if (list_empty(&intc_parent_list) || !desc) { pr_err("of_irq_init: children remain, but no parents\n"); break; } list_del(&desc->list); parent = desc->dev; kfree(desc); } list_for_each_entry_safe(desc, temp_desc, &intc_parent_list, list) { list_del(&desc->list); kfree(desc); } err: list_for_each_entry_safe(desc, temp_desc, &intc_desc_list, list) { list_del(&desc->list); kfree(desc); } }
gpl-2.0
ghbhaha/furnace-bacon
drivers/gpu/drm/gma500/oaktrail_hdmi.c
5263
15621
/* * Copyright © 2010 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Li Peng <peng.li@intel.com> */ #include <drm/drmP.h> #include <drm/drm.h> #include "psb_intel_drv.h" #include "psb_intel_reg.h" #include "psb_drv.h" #define HDMI_READ(reg) readl(hdmi_dev->regs + (reg)) #define HDMI_WRITE(reg, val) writel(val, hdmi_dev->regs + (reg)) #define HDMI_HCR 0x1000 #define HCR_ENABLE_HDCP (1 << 5) #define HCR_ENABLE_AUDIO (1 << 2) #define HCR_ENABLE_PIXEL (1 << 1) #define HCR_ENABLE_TMDS (1 << 0) #define HDMI_HICR 0x1004 #define HDMI_HSR 0x1008 #define HDMI_HISR 0x100C #define HDMI_DETECT_HDP (1 << 0) #define HDMI_VIDEO_REG 0x3000 #define HDMI_UNIT_EN (1 << 7) #define HDMI_MODE_OUTPUT (1 << 0) #define HDMI_HBLANK_A 0x3100 #define HDMI_AUDIO_CTRL 0x4000 #define HDMI_ENABLE_AUDIO (1 << 0) #define PCH_HTOTAL_B 0x3100 #define PCH_HBLANK_B 0x3104 #define PCH_HSYNC_B 0x3108 #define PCH_VTOTAL_B 0x310C #define PCH_VBLANK_B 0x3110 #define PCH_VSYNC_B 0x3114 #define PCH_PIPEBSRC 0x311C #define PCH_PIPEB_DSL 0x3800 #define PCH_PIPEB_SLC 0x3804 #define PCH_PIPEBCONF 0x3808 #define PCH_PIPEBSTAT 0x3824 #define CDVO_DFT 0x5000 #define CDVO_SLEWRATE 0x5004 #define CDVO_STRENGTH 0x5008 #define CDVO_RCOMP 0x500C #define DPLL_CTRL 0x6000 #define DPLL_PDIV_SHIFT 16 #define DPLL_PDIV_MASK (0xf << 16) #define DPLL_PWRDN (1 << 4) #define DPLL_RESET (1 << 3) #define DPLL_FASTEN (1 << 2) #define DPLL_ENSTAT (1 << 1) #define DPLL_DITHEN (1 << 0) #define DPLL_DIV_CTRL 0x6004 #define DPLL_CLKF_MASK 0xffffffc0 #define DPLL_CLKR_MASK (0x3f) #define DPLL_CLK_ENABLE 0x6008 #define DPLL_EN_DISP (1 << 31) #define DPLL_SEL_HDMI (1 << 8) #define DPLL_EN_HDMI (1 << 1) #define DPLL_EN_VGA (1 << 0) #define DPLL_ADJUST 0x600C #define DPLL_STATUS 0x6010 #define DPLL_UPDATE 0x6014 #define DPLL_DFT 0x6020 struct intel_range { int min, max; }; struct oaktrail_hdmi_limit { struct intel_range vco, np, nr, nf; }; struct oaktrail_hdmi_clock { int np; int nr; int nf; int dot; }; #define VCO_MIN 320000 #define VCO_MAX 1650000 #define NP_MIN 1 #define NP_MAX 15 #define NR_MIN 1 #define NR_MAX 64 #define NF_MIN 2 #define NF_MAX 4095 static const struct oaktrail_hdmi_limit oaktrail_hdmi_limit = { .vco = { .min = VCO_MIN, .max = VCO_MAX }, .np = { .min = NP_MIN, .max = NP_MAX }, .nr = { .min = NR_MIN, .max = NR_MAX }, .nf = { .min = NF_MIN, .max = NF_MAX }, }; static void oaktrail_hdmi_audio_enable(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv; HDMI_WRITE(HDMI_HCR, 0x67); HDMI_READ(HDMI_HCR); HDMI_WRITE(0x51a8, 0x10); HDMI_READ(0x51a8); HDMI_WRITE(HDMI_AUDIO_CTRL, 0x1); HDMI_READ(HDMI_AUDIO_CTRL); } static void oaktrail_hdmi_audio_disable(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv; HDMI_WRITE(0x51a8, 0x0); HDMI_READ(0x51a8); HDMI_WRITE(HDMI_AUDIO_CTRL, 0x0); HDMI_READ(HDMI_AUDIO_CTRL); HDMI_WRITE(HDMI_HCR, 0x47); HDMI_READ(HDMI_HCR); } static void oaktrail_hdmi_dpms(struct drm_encoder *encoder, int mode) { static int dpms_mode = -1; struct drm_device *dev = encoder->dev; struct drm_psb_private *dev_priv = dev->dev_private; struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv; u32 temp; if (dpms_mode == mode) return; if (mode != DRM_MODE_DPMS_ON) temp = 0x0; else temp = 0x99; dpms_mode = mode; HDMI_WRITE(HDMI_VIDEO_REG, temp); } static int oaktrail_hdmi_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct drm_psb_private *dev_priv = connector->dev->dev_private; if (mode->clock > 165000) return MODE_CLOCK_HIGH; if (mode->clock < 20000) return MODE_CLOCK_LOW; if (mode->flags & DRM_MODE_FLAG_DBLSCAN) return MODE_NO_DBLESCAN; /* We assume worst case scenario of 32 bpp here, since we don't know */ if ((ALIGN(mode->hdisplay * 4, 64) * mode->vdisplay) > dev_priv->vram_stolen_size) return MODE_MEM; return MODE_OK; } static bool oaktrail_hdmi_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { return true; } static enum drm_connector_status oaktrail_hdmi_detect(struct drm_connector *connector, bool force) { enum drm_connector_status status; struct drm_device *dev = connector->dev; struct drm_psb_private *dev_priv = dev->dev_private; struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv; u32 temp; temp = HDMI_READ(HDMI_HSR); DRM_DEBUG_KMS("HDMI_HSR %x\n", temp); if ((temp & HDMI_DETECT_HDP) != 0) status = connector_status_connected; else status = connector_status_disconnected; return status; } static const unsigned char raw_edid[] = { 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x10, 0xac, 0x2f, 0xa0, 0x53, 0x55, 0x33, 0x30, 0x16, 0x13, 0x01, 0x03, 0x0e, 0x3a, 0x24, 0x78, 0xea, 0xe9, 0xf5, 0xac, 0x51, 0x30, 0xb4, 0x25, 0x11, 0x50, 0x54, 0xa5, 0x4b, 0x00, 0x81, 0x80, 0xa9, 0x40, 0x71, 0x4f, 0xb3, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x28, 0x3c, 0x80, 0xa0, 0x70, 0xb0, 0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x46, 0x6c, 0x21, 0x00, 0x00, 0x1a, 0x00, 0x00, 0x00, 0xff, 0x00, 0x47, 0x4e, 0x37, 0x32, 0x31, 0x39, 0x35, 0x52, 0x30, 0x33, 0x55, 0x53, 0x0a, 0x00, 0x00, 0x00, 0xfc, 0x00, 0x44, 0x45, 0x4c, 0x4c, 0x20, 0x32, 0x37, 0x30, 0x39, 0x57, 0x0a, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x38, 0x4c, 0x1e, 0x53, 0x11, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00, 0x8d }; static int oaktrail_hdmi_get_modes(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct drm_psb_private *dev_priv = dev->dev_private; struct i2c_adapter *i2c_adap; struct edid *edid; struct drm_display_mode *mode, *t; int i = 0, ret = 0; i2c_adap = i2c_get_adapter(3); if (i2c_adap == NULL) { DRM_ERROR("No ddc adapter available!\n"); edid = (struct edid *)raw_edid; } else { edid = (struct edid *)raw_edid; /* FIXME ? edid = drm_get_edid(connector, i2c_adap); */ } if (edid) { drm_mode_connector_update_edid_property(connector, edid); ret = drm_add_edid_modes(connector, edid); connector->display_info.raw_edid = NULL; } /* * prune modes that require frame buffer bigger than stolen mem */ list_for_each_entry_safe(mode, t, &connector->probed_modes, head) { if ((mode->hdisplay * mode->vdisplay * 4) >= dev_priv->vram_stolen_size) { i++; drm_mode_remove(connector, mode); } } return ret - i; } static void oaktrail_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; oaktrail_hdmi_audio_enable(dev); return; } static void oaktrail_hdmi_destroy(struct drm_connector *connector) { return; } static const struct drm_encoder_helper_funcs oaktrail_hdmi_helper_funcs = { .dpms = oaktrail_hdmi_dpms, .mode_fixup = oaktrail_hdmi_mode_fixup, .prepare = psb_intel_encoder_prepare, .mode_set = oaktrail_hdmi_mode_set, .commit = psb_intel_encoder_commit, }; static const struct drm_connector_helper_funcs oaktrail_hdmi_connector_helper_funcs = { .get_modes = oaktrail_hdmi_get_modes, .mode_valid = oaktrail_hdmi_mode_valid, .best_encoder = psb_intel_best_encoder, }; static const struct drm_connector_funcs oaktrail_hdmi_connector_funcs = { .dpms = drm_helper_connector_dpms, .detect = oaktrail_hdmi_detect, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = oaktrail_hdmi_destroy, }; static void oaktrail_hdmi_enc_destroy(struct drm_encoder *encoder) { drm_encoder_cleanup(encoder); } static const struct drm_encoder_funcs oaktrail_hdmi_enc_funcs = { .destroy = oaktrail_hdmi_enc_destroy, }; void oaktrail_hdmi_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev) { struct psb_intel_encoder *psb_intel_encoder; struct psb_intel_connector *psb_intel_connector; struct drm_connector *connector; struct drm_encoder *encoder; psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL); if (!psb_intel_encoder) return; psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL); if (!psb_intel_connector) goto failed_connector; connector = &psb_intel_connector->base; encoder = &psb_intel_encoder->base; drm_connector_init(dev, connector, &oaktrail_hdmi_connector_funcs, DRM_MODE_CONNECTOR_DVID); drm_encoder_init(dev, encoder, &oaktrail_hdmi_enc_funcs, DRM_MODE_ENCODER_TMDS); psb_intel_connector_attach_encoder(psb_intel_connector, psb_intel_encoder); psb_intel_encoder->type = INTEL_OUTPUT_HDMI; drm_encoder_helper_add(encoder, &oaktrail_hdmi_helper_funcs); drm_connector_helper_add(connector, &oaktrail_hdmi_connector_helper_funcs); connector->display_info.subpixel_order = SubPixelHorizontalRGB; connector->interlace_allowed = false; connector->doublescan_allowed = false; drm_sysfs_connector_add(connector); return; failed_connector: kfree(psb_intel_encoder); } static DEFINE_PCI_DEVICE_TABLE(hdmi_ids) = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080d) }, { 0 } }; void oaktrail_hdmi_setup(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; struct pci_dev *pdev; struct oaktrail_hdmi_dev *hdmi_dev; int ret; pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x080d, NULL); if (!pdev) return; hdmi_dev = kzalloc(sizeof(struct oaktrail_hdmi_dev), GFP_KERNEL); if (!hdmi_dev) { dev_err(dev->dev, "failed to allocate memory\n"); goto out; } ret = pci_enable_device(pdev); if (ret) { dev_err(dev->dev, "failed to enable hdmi controller\n"); goto free; } hdmi_dev->mmio = pci_resource_start(pdev, 0); hdmi_dev->mmio_len = pci_resource_len(pdev, 0); hdmi_dev->regs = ioremap(hdmi_dev->mmio, hdmi_dev->mmio_len); if (!hdmi_dev->regs) { dev_err(dev->dev, "failed to map hdmi mmio\n"); goto free; } hdmi_dev->dev = pdev; pci_set_drvdata(pdev, hdmi_dev); /* Initialize i2c controller */ ret = oaktrail_hdmi_i2c_init(hdmi_dev->dev); if (ret) dev_err(dev->dev, "HDMI I2C initialization failed\n"); dev_priv->hdmi_priv = hdmi_dev; oaktrail_hdmi_audio_disable(dev); return; free: kfree(hdmi_dev); out: return; } void oaktrail_hdmi_teardown(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv; struct pci_dev *pdev; if (hdmi_dev) { pdev = hdmi_dev->dev; pci_set_drvdata(pdev, NULL); oaktrail_hdmi_i2c_exit(pdev); iounmap(hdmi_dev->regs); kfree(hdmi_dev); pci_dev_put(pdev); } } /* save HDMI register state */ void oaktrail_hdmi_save(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv; struct psb_state *regs = &dev_priv->regs.psb; int i; /* dpll */ hdmi_dev->saveDPLL_CTRL = PSB_RVDC32(DPLL_CTRL); hdmi_dev->saveDPLL_DIV_CTRL = PSB_RVDC32(DPLL_DIV_CTRL); hdmi_dev->saveDPLL_ADJUST = PSB_RVDC32(DPLL_ADJUST); hdmi_dev->saveDPLL_UPDATE = PSB_RVDC32(DPLL_UPDATE); hdmi_dev->saveDPLL_CLK_ENABLE = PSB_RVDC32(DPLL_CLK_ENABLE); /* pipe B */ regs->savePIPEBCONF = PSB_RVDC32(PIPEBCONF); regs->savePIPEBSRC = PSB_RVDC32(PIPEBSRC); regs->saveHTOTAL_B = PSB_RVDC32(HTOTAL_B); regs->saveHBLANK_B = PSB_RVDC32(HBLANK_B); regs->saveHSYNC_B = PSB_RVDC32(HSYNC_B); regs->saveVTOTAL_B = PSB_RVDC32(VTOTAL_B); regs->saveVBLANK_B = PSB_RVDC32(VBLANK_B); regs->saveVSYNC_B = PSB_RVDC32(VSYNC_B); hdmi_dev->savePCH_PIPEBCONF = PSB_RVDC32(PCH_PIPEBCONF); hdmi_dev->savePCH_PIPEBSRC = PSB_RVDC32(PCH_PIPEBSRC); hdmi_dev->savePCH_HTOTAL_B = PSB_RVDC32(PCH_HTOTAL_B); hdmi_dev->savePCH_HBLANK_B = PSB_RVDC32(PCH_HBLANK_B); hdmi_dev->savePCH_HSYNC_B = PSB_RVDC32(PCH_HSYNC_B); hdmi_dev->savePCH_VTOTAL_B = PSB_RVDC32(PCH_VTOTAL_B); hdmi_dev->savePCH_VBLANK_B = PSB_RVDC32(PCH_VBLANK_B); hdmi_dev->savePCH_VSYNC_B = PSB_RVDC32(PCH_VSYNC_B); /* plane */ regs->saveDSPBCNTR = PSB_RVDC32(DSPBCNTR); regs->saveDSPBSTRIDE = PSB_RVDC32(DSPBSTRIDE); regs->saveDSPBADDR = PSB_RVDC32(DSPBBASE); regs->saveDSPBSURF = PSB_RVDC32(DSPBSURF); regs->saveDSPBLINOFF = PSB_RVDC32(DSPBLINOFF); regs->saveDSPBTILEOFF = PSB_RVDC32(DSPBTILEOFF); /* cursor B */ regs->saveDSPBCURSOR_CTRL = PSB_RVDC32(CURBCNTR); regs->saveDSPBCURSOR_BASE = PSB_RVDC32(CURBBASE); regs->saveDSPBCURSOR_POS = PSB_RVDC32(CURBPOS); /* save palette */ for (i = 0; i < 256; i++) regs->save_palette_b[i] = PSB_RVDC32(PALETTE_B + (i << 2)); } /* restore HDMI register state */ void oaktrail_hdmi_restore(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv; struct psb_state *regs = &dev_priv->regs.psb; int i; /* dpll */ PSB_WVDC32(hdmi_dev->saveDPLL_CTRL, DPLL_CTRL); PSB_WVDC32(hdmi_dev->saveDPLL_DIV_CTRL, DPLL_DIV_CTRL); PSB_WVDC32(hdmi_dev->saveDPLL_ADJUST, DPLL_ADJUST); PSB_WVDC32(hdmi_dev->saveDPLL_UPDATE, DPLL_UPDATE); PSB_WVDC32(hdmi_dev->saveDPLL_CLK_ENABLE, DPLL_CLK_ENABLE); DRM_UDELAY(150); /* pipe */ PSB_WVDC32(regs->savePIPEBSRC, PIPEBSRC); PSB_WVDC32(regs->saveHTOTAL_B, HTOTAL_B); PSB_WVDC32(regs->saveHBLANK_B, HBLANK_B); PSB_WVDC32(regs->saveHSYNC_B, HSYNC_B); PSB_WVDC32(regs->saveVTOTAL_B, VTOTAL_B); PSB_WVDC32(regs->saveVBLANK_B, VBLANK_B); PSB_WVDC32(regs->saveVSYNC_B, VSYNC_B); PSB_WVDC32(hdmi_dev->savePCH_PIPEBSRC, PCH_PIPEBSRC); PSB_WVDC32(hdmi_dev->savePCH_HTOTAL_B, PCH_HTOTAL_B); PSB_WVDC32(hdmi_dev->savePCH_HBLANK_B, PCH_HBLANK_B); PSB_WVDC32(hdmi_dev->savePCH_HSYNC_B, PCH_HSYNC_B); PSB_WVDC32(hdmi_dev->savePCH_VTOTAL_B, PCH_VTOTAL_B); PSB_WVDC32(hdmi_dev->savePCH_VBLANK_B, PCH_VBLANK_B); PSB_WVDC32(hdmi_dev->savePCH_VSYNC_B, PCH_VSYNC_B); PSB_WVDC32(regs->savePIPEBCONF, PIPEBCONF); PSB_WVDC32(hdmi_dev->savePCH_PIPEBCONF, PCH_PIPEBCONF); /* plane */ PSB_WVDC32(regs->saveDSPBLINOFF, DSPBLINOFF); PSB_WVDC32(regs->saveDSPBSTRIDE, DSPBSTRIDE); PSB_WVDC32(regs->saveDSPBTILEOFF, DSPBTILEOFF); PSB_WVDC32(regs->saveDSPBCNTR, DSPBCNTR); PSB_WVDC32(regs->saveDSPBSURF, DSPBSURF); /* cursor B */ PSB_WVDC32(regs->saveDSPBCURSOR_CTRL, CURBCNTR); PSB_WVDC32(regs->saveDSPBCURSOR_POS, CURBPOS); PSB_WVDC32(regs->saveDSPBCURSOR_BASE, CURBBASE); /* restore palette */ for (i = 0; i < 256; i++) PSB_WVDC32(regs->save_palette_b[i], PALETTE_B + (i << 2)); }
gpl-2.0
nicholaschw/jared-rA
drivers/staging/speakup/varhandlers.c
7567
9472
#include <linux/ctype.h> #include "spk_types.h" #include "spk_priv.h" #include "speakup.h" static struct st_var_header var_headers[] = { { "version", VERSION, VAR_PROC, NULL, NULL }, { "synth_name", SYNTH, VAR_PROC, NULL, NULL }, { "keymap", KEYMAP, VAR_PROC, NULL, NULL }, { "silent", SILENT, VAR_PROC, NULL, NULL }, { "punc_some", PUNC_SOME, VAR_PROC, NULL, NULL }, { "punc_most", PUNC_MOST, VAR_PROC, NULL, NULL }, { "punc_all", PUNC_ALL, VAR_PROC, NULL, NULL }, { "delimiters", DELIM, VAR_PROC, NULL, NULL }, { "repeats", REPEATS, VAR_PROC, NULL, NULL }, { "ex_num", EXNUMBER, VAR_PROC, NULL, NULL }, { "characters", CHARS, VAR_PROC, NULL, NULL }, { "synth_direct", SYNTH_DIRECT, VAR_PROC, NULL, NULL }, { "caps_start", CAPS_START, VAR_STRING, str_caps_start, NULL }, { "caps_stop", CAPS_STOP, VAR_STRING, str_caps_stop, NULL }, { "delay_time", DELAY, VAR_TIME, NULL, NULL }, { "trigger_time", TRIGGER, VAR_TIME, NULL, NULL }, { "jiffy_delta", JIFFY, VAR_TIME, NULL, NULL }, { "full_time", FULL, VAR_TIME, NULL, NULL }, { "spell_delay", SPELL_DELAY, VAR_NUM, &spell_delay, NULL }, { "bleeps", BLEEPS, VAR_NUM, &bleeps, NULL }, { "attrib_bleep", ATTRIB_BLEEP, VAR_NUM, &attrib_bleep, NULL }, { "bleep_time", BLEEP_TIME, VAR_TIME, &bleep_time, NULL }, { "cursor_time", CURSOR_TIME, VAR_TIME, NULL, NULL }, { "punc_level", PUNC_LEVEL, VAR_NUM, &punc_level, NULL }, { "reading_punc", READING_PUNC, VAR_NUM, &reading_punc, NULL }, { "say_control", SAY_CONTROL, VAR_NUM, &say_ctrl, NULL }, { "say_word_ctl", SAY_WORD_CTL, VAR_NUM, &say_word_ctl, NULL }, { "no_interrupt", NO_INTERRUPT, VAR_NUM, &no_intr, NULL }, { "key_echo", KEY_ECHO, VAR_NUM, &key_echo, NULL }, { "bell_pos", BELL_POS, VAR_NUM, &bell_pos, NULL }, { "rate", RATE, VAR_NUM, NULL, NULL }, { "pitch", PITCH, VAR_NUM, NULL, NULL }, { "vol", VOL, VAR_NUM, NULL, NULL }, { "tone", TONE, VAR_NUM, NULL, NULL }, { "punct", PUNCT, VAR_NUM, NULL, NULL }, { "voice", VOICE, VAR_NUM, NULL, NULL }, { "freq", FREQUENCY, VAR_NUM, NULL, NULL }, { "lang", LANG, VAR_NUM, NULL, NULL }, { "chartab", CHARTAB, VAR_PROC, NULL, NULL }, { "direct", DIRECT, VAR_NUM, NULL, NULL }, }; static struct st_var_header *var_ptrs[MAXVARS] = { 0, 0, 0 }; static struct punc_var_t punc_vars[] = { { PUNC_SOME, 1 }, { PUNC_MOST, 2 }, { PUNC_ALL, 3 }, { DELIM, 4 }, { REPEATS, 5 }, { EXNUMBER, 6 }, { -1, -1 }, }; int chartab_get_value(char *keyword) { int value = 0; if (!strcmp(keyword, "ALPHA")) value = ALPHA; else if (!strcmp(keyword, "B_CTL")) value = B_CTL; else if (!strcmp(keyword, "WDLM")) value = WDLM; else if (!strcmp(keyword, "A_PUNC")) value = A_PUNC; else if (!strcmp(keyword, "PUNC")) value = PUNC; else if (!strcmp(keyword, "NUM")) value = NUM; else if (!strcmp(keyword, "A_CAP")) value = A_CAP; else if (!strcmp(keyword, "B_CAPSYM")) value = B_CAPSYM; else if (!strcmp(keyword, "B_SYM")) value = B_SYM; return value; } void speakup_register_var(struct var_t *var) { static char nothing[2] = "\0"; int i; struct st_var_header *p_header; BUG_ON(!var || var->var_id < 0 || var->var_id >= MAXVARS); if (var_ptrs[0] == NULL) { for (i = 0; i < MAXVARS; i++) { p_header = &var_headers[i]; var_ptrs[p_header->var_id] = p_header; p_header->data = NULL; } } p_header = var_ptrs[var->var_id]; if (p_header->data != NULL) return; p_header->data = var; switch (p_header->var_type) { case VAR_STRING: set_string_var(nothing, p_header, 0); break; case VAR_NUM: case VAR_TIME: set_num_var(0, p_header, E_DEFAULT); break; default: break; } return; } void speakup_unregister_var(enum var_id_t var_id) { struct st_var_header *p_header; BUG_ON(var_id < 0 || var_id >= MAXVARS); p_header = var_ptrs[var_id]; p_header->data = NULL; } struct st_var_header *get_var_header(enum var_id_t var_id) { struct st_var_header *p_header; if (var_id < 0 || var_id >= MAXVARS) return NULL; p_header = var_ptrs[var_id]; if (p_header->data == NULL) return NULL; return p_header; } struct st_var_header *var_header_by_name(const char *name) { int i; struct st_var_header *where = NULL; if (name != NULL) { i = 0; while ((i < MAXVARS) && (where == NULL)) { if (strcmp(name, var_ptrs[i]->name) == 0) where = var_ptrs[i]; else i++; } } return where; } struct var_t *get_var(enum var_id_t var_id) { BUG_ON(var_id < 0 || var_id >= MAXVARS); BUG_ON(!var_ptrs[var_id]); return var_ptrs[var_id]->data; } EXPORT_SYMBOL_GPL(get_var); struct punc_var_t *get_punc_var(enum var_id_t var_id) { struct punc_var_t *rv = NULL; struct punc_var_t *where; where = punc_vars; while ((where->var_id != -1) && (rv == NULL)) { if (where->var_id == var_id) rv = where; else where++; } return rv; } /* handlers for setting vars */ int set_num_var(int input, struct st_var_header *var, int how) { int val; short ret = 0; int *p_val = var->p_val; int l; char buf[32]; char *cp; struct var_t *var_data = var->data; if (var_data == NULL) return E_UNDEF; if (how == E_NEW_DEFAULT) { if (input < var_data->u.n.low || input > var_data->u.n.high) ret = E_RANGE; else var_data->u.n.default_val = input; return ret; } if (how == E_DEFAULT) { val = var_data->u.n.default_val; ret = SET_DEFAULT; } else { if (how == E_SET) val = input; else val = var_data->u.n.value; if (how == E_INC) val += input; else if (how == E_DEC) val -= input; if (val < var_data->u.n.low || val > var_data->u.n.high) return E_RANGE; } var_data->u.n.value = val; if (var->var_type == VAR_TIME && p_val != NULL) { *p_val = msecs_to_jiffies(val); return ret; } if (p_val != NULL) *p_val = val; if (var->var_id == PUNC_LEVEL) { punc_mask = punc_masks[val]; return ret; } if (var_data->u.n.multiplier != 0) val *= var_data->u.n.multiplier; val += var_data->u.n.offset; if (var->var_id < FIRST_SYNTH_VAR || synth == NULL) return ret; if (synth->synth_adjust != NULL) { int status = synth->synth_adjust(var); return (status != 0) ? status : ret; } if (!var_data->u.n.synth_fmt) return ret; if (var->var_id == PITCH) cp = pitch_buff; else cp = buf; if (!var_data->u.n.out_str) l = sprintf(cp, var_data->u.n.synth_fmt, (int)val); else l = sprintf(cp, var_data->u.n.synth_fmt, var_data->u.n.out_str[val]); synth_printf("%s", cp); return ret; } int set_string_var(const char *page, struct st_var_header *var, int len) { int ret = 0; struct var_t *var_data = var->data; if (var_data == NULL) return E_UNDEF; if (len > MAXVARLEN) return -E_TOOLONG; if (!len) { if (!var_data->u.s.default_val) return 0; ret = SET_DEFAULT; if (!var->p_val) var->p_val = var_data->u.s.default_val; if (var->p_val != var_data->u.s.default_val) strcpy((char *)var->p_val, var_data->u.s.default_val); } else if (var->p_val) strcpy((char *)var->p_val, page); else return -E_TOOLONG; return ret; } /* set_mask_bits sets or clears the punc/delim/repeat bits, * if input is null uses the defaults. * values for how: 0 clears bits of chars supplied, * 1 clears allk, 2 sets bits for chars */ int set_mask_bits(const char *input, const int which, const int how) { u_char *cp; short mask = punc_info[which].mask; if (how&1) { for (cp = (u_char *)punc_info[3].value; *cp; cp++) spk_chartab[*cp] &= ~mask; } cp = (u_char *)input; if (cp == 0) cp = punc_info[which].value; else { for ( ; *cp; cp++) { if (*cp < SPACE) break; if (mask < PUNC) { if (!(spk_chartab[*cp]&PUNC)) break; } else if (spk_chartab[*cp]&B_NUM) break; } if (*cp) return -EINVAL; cp = (u_char *)input; } if (how&2) { for ( ; *cp; cp++) if (*cp > SPACE) spk_chartab[*cp] |= mask; } else { for ( ; *cp; cp++) if (*cp > SPACE) spk_chartab[*cp] &= ~mask; } return 0; } char *strlwr(char *s) { char *p; if (s == NULL) return NULL; for (p = s; *p; p++) *p = tolower(*p); return s; } char *speakup_s2i(char *start, int *dest) { int val; char ch = *start; if (ch == '-' || ch == '+') start++; if (*start < '0' || *start > '9') return start; val = (*start) - '0'; start++; while (*start >= '0' && *start <= '9') { val *= 10; val += (*start) - '0'; start++; } if (ch == '-') *dest = -val; else *dest = val; return start; } char *s2uchar(char *start, char *dest) { int val = 0; while (*start && *start <= SPACE) start++; while (*start >= '0' && *start <= '9') { val *= 10; val += (*start) - '0'; start++; } if (*start == ',') start++; *dest = (u_char)val; return start; } char *xlate(char *s) { static const char finds[] = "nrtvafe"; static const char subs[] = "\n\r\t\013\001\014\033"; static const char hx[] = "0123456789abcdefABCDEF"; char *p = s, *p1, *p2, c; int num; while ((p = strchr(p, '\\'))) { p1 = p+1; p2 = strchr(finds, *p1); if (p2) { *p++ = subs[p2-finds]; p1++; } else if (*p1 >= '0' && *p1 <= '7') { num = (*p1++)&7; while (num < 256 && *p1 >= '0' && *p1 <= '7') { num <<= 3; num = (*p1++)&7; } *p++ = num; } else if (*p1 == 'x' && strchr(hx, p1[1]) && strchr(hx, p1[2])) { p1++; c = *p1++; if (c > '9') c = (c - '7') & 0x0f; else c -= '0'; num = c << 4; c = *p1++; if (c > '9') c = (c-'7')&0x0f; else c -= '0'; num += c; *p++ = num; } else *p++ = *p1++; p2 = p; while (*p1) *p2++ = *p1++; *p2 = '\0'; } return s; }
gpl-2.0
TheKang/kernel_lge_hammerhead
net/phonet/sysctl.c
8591
2857
/* * File: sysctl.c * * Phonet /proc/sys/net/phonet interface implementation * * Copyright (C) 2008 Nokia Corporation. * * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA */ #include <linux/seqlock.h> #include <linux/sysctl.h> #include <linux/errno.h> #include <linux/init.h> #define DYNAMIC_PORT_MIN 0x40 #define DYNAMIC_PORT_MAX 0x7f static DEFINE_SEQLOCK(local_port_range_lock); static int local_port_range_min[2] = {0, 0}; static int local_port_range_max[2] = {1023, 1023}; static int local_port_range[2] = {DYNAMIC_PORT_MIN, DYNAMIC_PORT_MAX}; static struct ctl_table_header *phonet_table_hrd; static void set_local_port_range(int range[2]) { write_seqlock(&local_port_range_lock); local_port_range[0] = range[0]; local_port_range[1] = range[1]; write_sequnlock(&local_port_range_lock); } void phonet_get_local_port_range(int *min, int *max) { unsigned seq; do { seq = read_seqbegin(&local_port_range_lock); if (min) *min = local_port_range[0]; if (max) *max = local_port_range[1]; } while (read_seqretry(&local_port_range_lock, seq)); } static int proc_local_port_range(ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int ret; int range[2] = {local_port_range[0], local_port_range[1]}; ctl_table tmp = { .data = &range, .maxlen = sizeof(range), .mode = table->mode, .extra1 = &local_port_range_min, .extra2 = &local_port_range_max, }; ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); if (write && ret == 0) { if (range[1] < range[0]) ret = -EINVAL; else set_local_port_range(range); } return ret; } static struct ctl_table phonet_table[] = { { .procname = "local_port_range", .data = &local_port_range, .maxlen = sizeof(local_port_range), .mode = 0644, .proc_handler = proc_local_port_range, }, { } }; static struct ctl_path phonet_ctl_path[] = { { .procname = "net", }, { .procname = "phonet", }, { }, }; int __init phonet_sysctl_init(void) { phonet_table_hrd = register_sysctl_paths(phonet_ctl_path, phonet_table); return phonet_table_hrd == NULL ? -ENOMEM : 0; } void phonet_sysctl_exit(void) { unregister_sysctl_table(phonet_table_hrd); }
gpl-2.0
int0x19/android_kernel_xiaomi_msm8992
drivers/gpio/gpio-it8761e.c
8591
4921
/* * GPIO interface for IT8761E Super I/O chip * * Author: Denis Turischev <denis@compulab.co.il> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License 2 as published * by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/io.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/gpio.h> #define SIO_CHIP_ID 0x8761 #define CHIP_ID_HIGH_BYTE 0x20 #define CHIP_ID_LOW_BYTE 0x21 static u8 ports[2] = { 0x2e, 0x4e }; static u8 port; static DEFINE_SPINLOCK(sio_lock); #define GPIO_NAME "it8761-gpio" #define GPIO_BA_HIGH_BYTE 0x60 #define GPIO_BA_LOW_BYTE 0x61 #define GPIO_IOSIZE 4 #define GPIO1X_IO 0xf0 #define GPIO2X_IO 0xf1 static u16 gpio_ba; static u8 read_reg(u8 addr, u8 port) { outb(addr, port); return inb(port + 1); } static void write_reg(u8 data, u8 addr, u8 port) { outb(addr, port); outb(data, port + 1); } static void enter_conf_mode(u8 port) { outb(0x87, port); outb(0x61, port); outb(0x55, port); outb((port == 0x2e) ? 0x55 : 0xaa, port); } static void exit_conf_mode(u8 port) { outb(0x2, port); outb(0x2, port + 1); } static void enter_gpio_mode(u8 port) { write_reg(0x2, 0x7, port); } static int it8761e_gpio_get(struct gpio_chip *gc, unsigned gpio_num) { u16 reg; u8 bit; bit = gpio_num % 8; reg = (gpio_num >= 8) ? gpio_ba + 1 : gpio_ba; return !!(inb(reg) & (1 << bit)); } static int it8761e_gpio_direction_in(struct gpio_chip *gc, unsigned gpio_num) { u8 curr_dirs; u8 io_reg, bit; bit = gpio_num % 8; io_reg = (gpio_num >= 8) ? GPIO2X_IO : GPIO1X_IO; spin_lock(&sio_lock); enter_conf_mode(port); enter_gpio_mode(port); curr_dirs = read_reg(io_reg, port); if (curr_dirs & (1 << bit)) write_reg(curr_dirs & ~(1 << bit), io_reg, port); exit_conf_mode(port); spin_unlock(&sio_lock); return 0; } static void it8761e_gpio_set(struct gpio_chip *gc, unsigned gpio_num, int val) { u8 curr_vals, bit; u16 reg; bit = gpio_num % 8; reg = (gpio_num >= 8) ? gpio_ba + 1 : gpio_ba; spin_lock(&sio_lock); curr_vals = inb(reg); if (val) outb(curr_vals | (1 << bit) , reg); else outb(curr_vals & ~(1 << bit), reg); spin_unlock(&sio_lock); } static int it8761e_gpio_direction_out(struct gpio_chip *gc, unsigned gpio_num, int val) { u8 curr_dirs, io_reg, bit; bit = gpio_num % 8; io_reg = (gpio_num >= 8) ? GPIO2X_IO : GPIO1X_IO; it8761e_gpio_set(gc, gpio_num, val); spin_lock(&sio_lock); enter_conf_mode(port); enter_gpio_mode(port); curr_dirs = read_reg(io_reg, port); if (!(curr_dirs & (1 << bit))) write_reg(curr_dirs | (1 << bit), io_reg, port); exit_conf_mode(port); spin_unlock(&sio_lock); return 0; } static struct gpio_chip it8761e_gpio_chip = { .label = GPIO_NAME, .owner = THIS_MODULE, .get = it8761e_gpio_get, .direction_input = it8761e_gpio_direction_in, .set = it8761e_gpio_set, .direction_output = it8761e_gpio_direction_out, }; static int __init it8761e_gpio_init(void) { int i, id, err; /* chip and port detection */ for (i = 0; i < ARRAY_SIZE(ports); i++) { spin_lock(&sio_lock); enter_conf_mode(ports[i]); id = (read_reg(CHIP_ID_HIGH_BYTE, ports[i]) << 8) + read_reg(CHIP_ID_LOW_BYTE, ports[i]); exit_conf_mode(ports[i]); spin_unlock(&sio_lock); if (id == SIO_CHIP_ID) { port = ports[i]; break; } } if (!port) return -ENODEV; /* fetch GPIO base address */ enter_conf_mode(port); enter_gpio_mode(port); gpio_ba = (read_reg(GPIO_BA_HIGH_BYTE, port) << 8) + read_reg(GPIO_BA_LOW_BYTE, port); exit_conf_mode(port); if (!request_region(gpio_ba, GPIO_IOSIZE, GPIO_NAME)) return -EBUSY; it8761e_gpio_chip.base = -1; it8761e_gpio_chip.ngpio = 16; err = gpiochip_add(&it8761e_gpio_chip); if (err < 0) goto gpiochip_add_err; return 0; gpiochip_add_err: release_region(gpio_ba, GPIO_IOSIZE); gpio_ba = 0; return err; } static void __exit it8761e_gpio_exit(void) { if (gpio_ba) { int ret = gpiochip_remove(&it8761e_gpio_chip); WARN(ret, "%s(): gpiochip_remove() failed, ret=%d\n", __func__, ret); release_region(gpio_ba, GPIO_IOSIZE); gpio_ba = 0; } } module_init(it8761e_gpio_init); module_exit(it8761e_gpio_exit); MODULE_AUTHOR("Denis Turischev <denis@compulab.co.il>"); MODULE_DESCRIPTION("GPIO interface for IT8761E Super I/O chip"); MODULE_LICENSE("GPL");
gpl-2.0
nighthawk149/xpenology-4.2-kernel
drivers/gpio/gpio-it8761e.c
8591
4921
/* * GPIO interface for IT8761E Super I/O chip * * Author: Denis Turischev <denis@compulab.co.il> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License 2 as published * by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/io.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/gpio.h> #define SIO_CHIP_ID 0x8761 #define CHIP_ID_HIGH_BYTE 0x20 #define CHIP_ID_LOW_BYTE 0x21 static u8 ports[2] = { 0x2e, 0x4e }; static u8 port; static DEFINE_SPINLOCK(sio_lock); #define GPIO_NAME "it8761-gpio" #define GPIO_BA_HIGH_BYTE 0x60 #define GPIO_BA_LOW_BYTE 0x61 #define GPIO_IOSIZE 4 #define GPIO1X_IO 0xf0 #define GPIO2X_IO 0xf1 static u16 gpio_ba; static u8 read_reg(u8 addr, u8 port) { outb(addr, port); return inb(port + 1); } static void write_reg(u8 data, u8 addr, u8 port) { outb(addr, port); outb(data, port + 1); } static void enter_conf_mode(u8 port) { outb(0x87, port); outb(0x61, port); outb(0x55, port); outb((port == 0x2e) ? 0x55 : 0xaa, port); } static void exit_conf_mode(u8 port) { outb(0x2, port); outb(0x2, port + 1); } static void enter_gpio_mode(u8 port) { write_reg(0x2, 0x7, port); } static int it8761e_gpio_get(struct gpio_chip *gc, unsigned gpio_num) { u16 reg; u8 bit; bit = gpio_num % 8; reg = (gpio_num >= 8) ? gpio_ba + 1 : gpio_ba; return !!(inb(reg) & (1 << bit)); } static int it8761e_gpio_direction_in(struct gpio_chip *gc, unsigned gpio_num) { u8 curr_dirs; u8 io_reg, bit; bit = gpio_num % 8; io_reg = (gpio_num >= 8) ? GPIO2X_IO : GPIO1X_IO; spin_lock(&sio_lock); enter_conf_mode(port); enter_gpio_mode(port); curr_dirs = read_reg(io_reg, port); if (curr_dirs & (1 << bit)) write_reg(curr_dirs & ~(1 << bit), io_reg, port); exit_conf_mode(port); spin_unlock(&sio_lock); return 0; } static void it8761e_gpio_set(struct gpio_chip *gc, unsigned gpio_num, int val) { u8 curr_vals, bit; u16 reg; bit = gpio_num % 8; reg = (gpio_num >= 8) ? gpio_ba + 1 : gpio_ba; spin_lock(&sio_lock); curr_vals = inb(reg); if (val) outb(curr_vals | (1 << bit) , reg); else outb(curr_vals & ~(1 << bit), reg); spin_unlock(&sio_lock); } static int it8761e_gpio_direction_out(struct gpio_chip *gc, unsigned gpio_num, int val) { u8 curr_dirs, io_reg, bit; bit = gpio_num % 8; io_reg = (gpio_num >= 8) ? GPIO2X_IO : GPIO1X_IO; it8761e_gpio_set(gc, gpio_num, val); spin_lock(&sio_lock); enter_conf_mode(port); enter_gpio_mode(port); curr_dirs = read_reg(io_reg, port); if (!(curr_dirs & (1 << bit))) write_reg(curr_dirs | (1 << bit), io_reg, port); exit_conf_mode(port); spin_unlock(&sio_lock); return 0; } static struct gpio_chip it8761e_gpio_chip = { .label = GPIO_NAME, .owner = THIS_MODULE, .get = it8761e_gpio_get, .direction_input = it8761e_gpio_direction_in, .set = it8761e_gpio_set, .direction_output = it8761e_gpio_direction_out, }; static int __init it8761e_gpio_init(void) { int i, id, err; /* chip and port detection */ for (i = 0; i < ARRAY_SIZE(ports); i++) { spin_lock(&sio_lock); enter_conf_mode(ports[i]); id = (read_reg(CHIP_ID_HIGH_BYTE, ports[i]) << 8) + read_reg(CHIP_ID_LOW_BYTE, ports[i]); exit_conf_mode(ports[i]); spin_unlock(&sio_lock); if (id == SIO_CHIP_ID) { port = ports[i]; break; } } if (!port) return -ENODEV; /* fetch GPIO base address */ enter_conf_mode(port); enter_gpio_mode(port); gpio_ba = (read_reg(GPIO_BA_HIGH_BYTE, port) << 8) + read_reg(GPIO_BA_LOW_BYTE, port); exit_conf_mode(port); if (!request_region(gpio_ba, GPIO_IOSIZE, GPIO_NAME)) return -EBUSY; it8761e_gpio_chip.base = -1; it8761e_gpio_chip.ngpio = 16; err = gpiochip_add(&it8761e_gpio_chip); if (err < 0) goto gpiochip_add_err; return 0; gpiochip_add_err: release_region(gpio_ba, GPIO_IOSIZE); gpio_ba = 0; return err; } static void __exit it8761e_gpio_exit(void) { if (gpio_ba) { int ret = gpiochip_remove(&it8761e_gpio_chip); WARN(ret, "%s(): gpiochip_remove() failed, ret=%d\n", __func__, ret); release_region(gpio_ba, GPIO_IOSIZE); gpio_ba = 0; } } module_init(it8761e_gpio_init); module_exit(it8761e_gpio_exit); MODULE_AUTHOR("Denis Turischev <denis@compulab.co.il>"); MODULE_DESCRIPTION("GPIO interface for IT8761E Super I/O chip"); MODULE_LICENSE("GPL");
gpl-2.0
TheTypoMaster/android_kernel_samsung_hlte
lib/mpi/generic_mpih-mul3.c
9871
1977
/* mpihelp-mul_3.c - MPI helper functions * Copyright (C) 1994, 1996, 1997, 1998, 2001 Free Software Foundation, Inc. * * This file is part of GnuPG. * * GnuPG is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * GnuPG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA * * Note: This code is heavily based on the GNU MP Library. * Actually it's the same code with only minor changes in the * way the data is stored; this is to support the abstraction * of an optional secure memory allocation which may be used * to avoid revealing of sensitive data due to paging etc. * The GNU MP Library itself is published under the LGPL; * however I decided to publish this code under the plain GPL. */ #include "mpi-internal.h" #include "longlong.h" mpi_limb_t mpihelp_submul_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size, mpi_limb_t s2_limb) { mpi_limb_t cy_limb; mpi_size_t j; mpi_limb_t prod_high, prod_low; mpi_limb_t x; /* The loop counter and index J goes from -SIZE to -1. This way * the loop becomes faster. */ j = -s1_size; res_ptr -= j; s1_ptr -= j; cy_limb = 0; do { umul_ppmm(prod_high, prod_low, s1_ptr[j], s2_limb); prod_low += cy_limb; cy_limb = (prod_low < cy_limb ? 1 : 0) + prod_high; x = res_ptr[j]; prod_low = x - prod_low; cy_limb += prod_low > x ? 1 : 0; res_ptr[j] = prod_low; } while (++j); return cy_limb; }
gpl-2.0
someone755/android_kernel_sony_msm8974
lib/mpi/generic_mpih-mul1.c
9871
1958
/* mpihelp-mul_1.c - MPI helper functions * Copyright (C) 1994, 1996, 1997, 1998, 2001 Free Software Foundation, Inc. * * This file is part of GnuPG. * * GnuPG is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * GnuPG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA * * Note: This code is heavily based on the GNU MP Library. * Actually it's the same code with only minor changes in the * way the data is stored; this is to support the abstraction * of an optional secure memory allocation which may be used * to avoid revealing of sensitive data due to paging etc. * The GNU MP Library itself is published under the LGPL; * however I decided to publish this code under the plain GPL. */ #include "mpi-internal.h" #include "longlong.h" mpi_limb_t mpihelp_mul_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size, mpi_limb_t s2_limb) { mpi_limb_t cy_limb; mpi_size_t j; mpi_limb_t prod_high, prod_low; /* The loop counter and index J goes from -S1_SIZE to -1. This way * the loop becomes faster. */ j = -s1_size; /* Offset the base pointers to compensate for the negative indices. */ s1_ptr -= j; res_ptr -= j; cy_limb = 0; do { umul_ppmm(prod_high, prod_low, s1_ptr[j], s2_limb); prod_low += cy_limb; cy_limb = (prod_low < cy_limb ? 1 : 0) + prod_high; res_ptr[j] = prod_low; } while (++j); return cy_limb; }
gpl-2.0
nismoryco/kernel-msm
lib/mpi/mpih-cmp.c
9871
2021
/* mpihelp-sub.c - MPI helper functions * Copyright (C) 1994, 1996 Free Software Foundation, Inc. * Copyright (C) 1998, 1999, 2000, 2001 Free Software Foundation, Inc. * * This file is part of GnuPG. * * GnuPG is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * GnuPG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA * * Note: This code is heavily based on the GNU MP Library. * Actually it's the same code with only minor changes in the * way the data is stored; this is to support the abstraction * of an optional secure memory allocation which may be used * to avoid revealing of sensitive data due to paging etc. * The GNU MP Library itself is published under the LGPL; * however I decided to publish this code under the plain GPL. */ #include "mpi-internal.h" /**************** * Compare OP1_PTR/OP1_SIZE with OP2_PTR/OP2_SIZE. * There are no restrictions on the relative sizes of * the two arguments. * Return 1 if OP1 > OP2, 0 if they are equal, and -1 if OP1 < OP2. */ int mpihelp_cmp(mpi_ptr_t op1_ptr, mpi_ptr_t op2_ptr, mpi_size_t size) { mpi_size_t i; mpi_limb_t op1_word, op2_word; for (i = size - 1; i >= 0; i--) { op1_word = op1_ptr[i]; op2_word = op2_ptr[i]; if (op1_word != op2_word) goto diff; } return 0; diff: /* This can *not* be simplified to * op2_word - op2_word * since that expression might give signed overflow. */ return (op1_word > op2_word) ? 1 : -1; }
gpl-2.0
vitek999/Lenovo-a328
drivers/net/fddi/skfp/smt.c
10639
52521
/****************************************************************************** * * (C)Copyright 1998,1999 SysKonnect, * a business unit of Schneider & Koch & Co. Datensysteme GmbH. * * See the file "skfddi.c" for further information. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * The information in this file is provided "AS IS" without warranty. * ******************************************************************************/ #include "h/types.h" #include "h/fddi.h" #include "h/smc.h" #include "h/smt_p.h" #include <linux/bitrev.h> #include <linux/kernel.h> #define KERNEL #include "h/smtstate.h" #ifndef lint static const char ID_sccs[] = "@(#)smt.c 2.43 98/11/23 (C) SK " ; #endif /* * FC in SMbuf */ #define m_fc(mb) ((mb)->sm_data[0]) #define SMT_TID_MAGIC 0x1f0a7b3c #ifdef DEBUG static const char *const smt_type_name[] = { "SMT_00??", "SMT_INFO", "SMT_02??", "SMT_03??", "SMT_04??", "SMT_05??", "SMT_06??", "SMT_07??", "SMT_08??", "SMT_09??", "SMT_0A??", "SMT_0B??", "SMT_0C??", "SMT_0D??", "SMT_0E??", "SMT_NSA" } ; static const char *const smt_class_name[] = { "UNKNOWN","NIF","SIF_CONFIG","SIF_OPER","ECF","RAF","RDF", "SRF","PMF_GET","PMF_SET","ESF" } ; #endif #define LAST_CLASS (SMT_PMF_SET) static const struct fddi_addr SMT_Unknown = { { 0,0,0x1f,0,0,0 } } ; /* * function prototypes */ #ifdef LITTLE_ENDIAN static int smt_swap_short(u_short s); #endif static int mac_index(struct s_smc *smc, int mac); static int phy_index(struct s_smc *smc, int phy); static int mac_con_resource_index(struct s_smc *smc, int mac); static int phy_con_resource_index(struct s_smc *smc, int phy); static void smt_send_rdf(struct s_smc *smc, SMbuf *rej, int fc, int reason, int local); static void smt_send_nif(struct s_smc *smc, const struct fddi_addr *dest, int fc, u_long tid, int type, int local); static void smt_send_ecf(struct s_smc *smc, struct fddi_addr *dest, int fc, u_long tid, int type, int len); static void smt_echo_test(struct s_smc *smc, int dna); static void smt_send_sif_config(struct s_smc *smc, struct fddi_addr *dest, u_long tid, int local); static void smt_send_sif_operation(struct s_smc *smc, struct fddi_addr *dest, u_long tid, int local); #ifdef LITTLE_ENDIAN static void smt_string_swap(char *data, const char *format, int len); #endif static void smt_add_frame_len(SMbuf *mb, int len); static void smt_fill_una(struct s_smc *smc, struct smt_p_una *una); static void smt_fill_sde(struct s_smc *smc, struct smt_p_sde *sde); static void smt_fill_state(struct s_smc *smc, struct smt_p_state *state); static void smt_fill_timestamp(struct s_smc *smc, struct smt_p_timestamp *ts); static void smt_fill_policy(struct s_smc *smc, struct smt_p_policy *policy); static void smt_fill_latency(struct s_smc *smc, struct smt_p_latency *latency); static void smt_fill_neighbor(struct s_smc *smc, struct smt_p_neighbor *neighbor); static int smt_fill_path(struct s_smc *smc, struct smt_p_path *path); static void smt_fill_mac_status(struct s_smc *smc, struct smt_p_mac_status *st); static void smt_fill_lem(struct s_smc *smc, struct smt_p_lem *lem, int phy); static void smt_fill_version(struct s_smc *smc, struct smt_p_version *vers); static void smt_fill_fsc(struct s_smc *smc, struct smt_p_fsc *fsc); static void smt_fill_mac_counter(struct s_smc *smc, struct smt_p_mac_counter *mc); static void smt_fill_mac_fnc(struct s_smc *smc, struct smt_p_mac_fnc *fnc); static void smt_fill_manufacturer(struct s_smc *smc, struct smp_p_manufacturer *man); static void smt_fill_user(struct s_smc *smc, struct smp_p_user *user); static void smt_fill_setcount(struct s_smc *smc, struct smt_p_setcount *setcount); static void smt_fill_echo(struct s_smc *smc, struct smt_p_echo *echo, u_long seed, int len); static void smt_clear_una_dna(struct s_smc *smc); static void smt_clear_old_una_dna(struct s_smc *smc); #ifdef CONCENTRATOR static int entity_to_index(void); #endif static void update_dac(struct s_smc *smc, int report); static int div_ratio(u_long upper, u_long lower); #ifdef USE_CAN_ADDR static void hwm_conv_can(struct s_smc *smc, char *data, int len); #else #define hwm_conv_can(smc,data,len) #endif static inline int is_my_addr(const struct s_smc *smc, const struct fddi_addr *addr) { return(*(short *)(&addr->a[0]) == *(short *)(&smc->mib.m[MAC0].fddiMACSMTAddress.a[0]) && *(short *)(&addr->a[2]) == *(short *)(&smc->mib.m[MAC0].fddiMACSMTAddress.a[2]) && *(short *)(&addr->a[4]) == *(short *)(&smc->mib.m[MAC0].fddiMACSMTAddress.a[4])) ; } static inline int is_broadcast(const struct fddi_addr *addr) { return *(u_short *)(&addr->a[0]) == 0xffff && *(u_short *)(&addr->a[2]) == 0xffff && *(u_short *)(&addr->a[4]) == 0xffff; } static inline int is_individual(const struct fddi_addr *addr) { return !(addr->a[0] & GROUP_ADDR); } static inline int is_equal(const struct fddi_addr *addr1, const struct fddi_addr *addr2) { return *(u_short *)(&addr1->a[0]) == *(u_short *)(&addr2->a[0]) && *(u_short *)(&addr1->a[2]) == *(u_short *)(&addr2->a[2]) && *(u_short *)(&addr1->a[4]) == *(u_short *)(&addr2->a[4]); } /* * list of mandatory paras in frames */ static const u_short plist_nif[] = { SMT_P_UNA,SMT_P_SDE,SMT_P_STATE,0 } ; /* * init SMT agent */ void smt_agent_init(struct s_smc *smc) { int i ; /* * get MAC address */ smc->mib.m[MAC0].fddiMACSMTAddress = smc->hw.fddi_home_addr ; /* * get OUI address from driver (bia == built-in-address) */ smc->mib.fddiSMTStationId.sid_oem[0] = 0 ; smc->mib.fddiSMTStationId.sid_oem[1] = 0 ; driver_get_bia(smc,&smc->mib.fddiSMTStationId.sid_node) ; for (i = 0 ; i < 6 ; i ++) { smc->mib.fddiSMTStationId.sid_node.a[i] = bitrev8(smc->mib.fddiSMTStationId.sid_node.a[i]); } smc->mib.fddiSMTManufacturerData[0] = smc->mib.fddiSMTStationId.sid_node.a[0] ; smc->mib.fddiSMTManufacturerData[1] = smc->mib.fddiSMTStationId.sid_node.a[1] ; smc->mib.fddiSMTManufacturerData[2] = smc->mib.fddiSMTStationId.sid_node.a[2] ; smc->sm.smt_tid = 0 ; smc->mib.m[MAC0].fddiMACDupAddressTest = DA_NONE ; smc->mib.m[MAC0].fddiMACUNDA_Flag = FALSE ; #ifndef SLIM_SMT smt_clear_una_dna(smc) ; smt_clear_old_una_dna(smc) ; #endif for (i = 0 ; i < SMT_MAX_TEST ; i++) smc->sm.pend[i] = 0 ; smc->sm.please_reconnect = 0 ; smc->sm.uniq_ticks = 0 ; } /* * SMT task * forever * delay 30 seconds * send NIF * check tvu & tvd * end */ void smt_agent_task(struct s_smc *smc) { smt_timer_start(smc,&smc->sm.smt_timer, (u_long)1000000L, EV_TOKEN(EVENT_SMT,SM_TIMER)) ; DB_SMT("SMT agent task\n",0,0) ; } #ifndef SMT_REAL_TOKEN_CT void smt_emulate_token_ct(struct s_smc *smc, int mac_index) { u_long count; u_long time; time = smt_get_time(); count = ((time - smc->sm.last_tok_time[mac_index]) * 100)/TICKS_PER_SECOND; /* * Only when ring is up we will have a token count. The * flag is unfortunately a single instance value. This * doesn't matter now, because we currently have only * one MAC instance. */ if (smc->hw.mac_ring_is_up){ smc->mib.m[mac_index].fddiMACToken_Ct += count; } /* Remember current time */ smc->sm.last_tok_time[mac_index] = time; } #endif /*ARGSUSED1*/ void smt_event(struct s_smc *smc, int event) { u_long time ; #ifndef SMT_REAL_TOKEN_CT int i ; #endif if (smc->sm.please_reconnect) { smc->sm.please_reconnect -- ; if (smc->sm.please_reconnect == 0) { /* Counted down */ queue_event(smc,EVENT_ECM,EC_CONNECT) ; } } if (event == SM_FAST) return ; /* * timer for periodic cleanup in driver * reset and start the watchdog (FM2) * ESS timer * SBA timer */ smt_timer_poll(smc) ; smt_start_watchdog(smc) ; #ifndef SLIM_SMT #ifndef BOOT #ifdef ESS ess_timer_poll(smc) ; #endif #endif #ifdef SBA sba_timer_poll(smc) ; #endif smt_srf_event(smc,0,0,0) ; #endif /* no SLIM_SMT */ time = smt_get_time() ; if (time - smc->sm.smt_last_lem >= TICKS_PER_SECOND*8) { /* * Use 8 sec. for the time intervall, it simplifies the * LER estimation. */ struct fddi_mib_m *mib ; u_long upper ; u_long lower ; int cond ; int port; struct s_phy *phy ; /* * calculate LEM bit error rate */ sm_lem_evaluate(smc) ; smc->sm.smt_last_lem = time ; /* * check conditions */ #ifndef SLIM_SMT mac_update_counter(smc) ; mib = smc->mib.m ; upper = (mib->fddiMACLost_Ct - mib->fddiMACOld_Lost_Ct) + (mib->fddiMACError_Ct - mib->fddiMACOld_Error_Ct) ; lower = (mib->fddiMACFrame_Ct - mib->fddiMACOld_Frame_Ct) + (mib->fddiMACLost_Ct - mib->fddiMACOld_Lost_Ct) ; mib->fddiMACFrameErrorRatio = div_ratio(upper,lower) ; cond = ((!mib->fddiMACFrameErrorThreshold && mib->fddiMACError_Ct != mib->fddiMACOld_Error_Ct) || (mib->fddiMACFrameErrorRatio > mib->fddiMACFrameErrorThreshold)) ; if (cond != mib->fddiMACFrameErrorFlag) smt_srf_event(smc,SMT_COND_MAC_FRAME_ERROR, INDEX_MAC,cond) ; upper = (mib->fddiMACNotCopied_Ct - mib->fddiMACOld_NotCopied_Ct) ; lower = upper + (mib->fddiMACCopied_Ct - mib->fddiMACOld_Copied_Ct) ; mib->fddiMACNotCopiedRatio = div_ratio(upper,lower) ; cond = ((!mib->fddiMACNotCopiedThreshold && mib->fddiMACNotCopied_Ct != mib->fddiMACOld_NotCopied_Ct)|| (mib->fddiMACNotCopiedRatio > mib->fddiMACNotCopiedThreshold)) ; if (cond != mib->fddiMACNotCopiedFlag) smt_srf_event(smc,SMT_COND_MAC_NOT_COPIED, INDEX_MAC,cond) ; /* * set old values */ mib->fddiMACOld_Frame_Ct = mib->fddiMACFrame_Ct ; mib->fddiMACOld_Copied_Ct = mib->fddiMACCopied_Ct ; mib->fddiMACOld_Error_Ct = mib->fddiMACError_Ct ; mib->fddiMACOld_Lost_Ct = mib->fddiMACLost_Ct ; mib->fddiMACOld_NotCopied_Ct = mib->fddiMACNotCopied_Ct ; /* * Check port EBError Condition */ for (port = 0; port < NUMPHYS; port ++) { phy = &smc->y[port] ; if (!phy->mib->fddiPORTHardwarePresent) { continue; } cond = (phy->mib->fddiPORTEBError_Ct - phy->mib->fddiPORTOldEBError_Ct > 5) ; /* If ratio is more than 5 in 8 seconds * Set the condition. */ smt_srf_event(smc,SMT_COND_PORT_EB_ERROR, (int) (INDEX_PORT+ phy->np) ,cond) ; /* * set old values */ phy->mib->fddiPORTOldEBError_Ct = phy->mib->fddiPORTEBError_Ct ; } #endif /* no SLIM_SMT */ } #ifndef SLIM_SMT if (time - smc->sm.smt_last_notify >= (u_long) (smc->mib.fddiSMTTT_Notify * TICKS_PER_SECOND) ) { /* * we can either send an announcement or a request * a request will trigger a reply so that we can update * our dna * note: same tid must be used until reply is received */ if (!smc->sm.pend[SMT_TID_NIF]) smc->sm.pend[SMT_TID_NIF] = smt_get_tid(smc) ; smt_send_nif(smc,&fddi_broadcast, FC_SMT_NSA, smc->sm.pend[SMT_TID_NIF], SMT_REQUEST,0) ; smc->sm.smt_last_notify = time ; } /* * check timer */ if (smc->sm.smt_tvu && time - smc->sm.smt_tvu > 228*TICKS_PER_SECOND) { DB_SMT("SMT : UNA expired\n",0,0) ; smc->sm.smt_tvu = 0 ; if (!is_equal(&smc->mib.m[MAC0].fddiMACUpstreamNbr, &SMT_Unknown)){ /* Do not update unknown address */ smc->mib.m[MAC0].fddiMACOldUpstreamNbr= smc->mib.m[MAC0].fddiMACUpstreamNbr ; } smc->mib.m[MAC0].fddiMACUpstreamNbr = SMT_Unknown ; smc->mib.m[MAC0].fddiMACUNDA_Flag = FALSE ; /* * Make sure the fddiMACUNDA_Flag = FALSE is * included in the SRF so we don't generate * a separate SRF for the deassertion of this * condition */ update_dac(smc,0) ; smt_srf_event(smc, SMT_EVENT_MAC_NEIGHBOR_CHANGE, INDEX_MAC,0) ; } if (smc->sm.smt_tvd && time - smc->sm.smt_tvd > 228*TICKS_PER_SECOND) { DB_SMT("SMT : DNA expired\n",0,0) ; smc->sm.smt_tvd = 0 ; if (!is_equal(&smc->mib.m[MAC0].fddiMACDownstreamNbr, &SMT_Unknown)){ /* Do not update unknown address */ smc->mib.m[MAC0].fddiMACOldDownstreamNbr= smc->mib.m[MAC0].fddiMACDownstreamNbr ; } smc->mib.m[MAC0].fddiMACDownstreamNbr = SMT_Unknown ; smt_srf_event(smc, SMT_EVENT_MAC_NEIGHBOR_CHANGE, INDEX_MAC,0) ; } #endif /* no SLIM_SMT */ #ifndef SMT_REAL_TOKEN_CT /* * Token counter emulation section. If hardware supports the token * count, the token counter will be updated in mac_update_counter. */ for (i = MAC0; i < NUMMACS; i++ ){ if (time - smc->sm.last_tok_time[i] > 2*TICKS_PER_SECOND ){ smt_emulate_token_ct( smc, i ); } } #endif smt_timer_start(smc,&smc->sm.smt_timer, (u_long)1000000L, EV_TOKEN(EVENT_SMT,SM_TIMER)) ; } static int div_ratio(u_long upper, u_long lower) { if ((upper<<16L) < upper) upper = 0xffff0000L ; else upper <<= 16L ; if (!lower) return 0; return (int)(upper/lower) ; } #ifndef SLIM_SMT /* * receive packet handler */ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs) /* int fs; frame status */ { struct smt_header *sm ; int local ; int illegal = 0 ; switch (m_fc(mb)) { case FC_SMT_INFO : case FC_SMT_LAN_LOC : case FC_SMT_LOC : case FC_SMT_NSA : break ; default : smt_free_mbuf(smc,mb) ; return ; } smc->mib.m[MAC0].fddiMACSMTCopied_Ct++ ; sm = smtod(mb,struct smt_header *) ; local = ((fs & L_INDICATOR) != 0) ; hwm_conv_can(smc,(char *)sm,12) ; /* check destination address */ if (is_individual(&sm->smt_dest) && !is_my_addr(smc,&sm->smt_dest)) { smt_free_mbuf(smc,mb) ; return ; } #if 0 /* for DUP recognition, do NOT filter them */ /* ignore loop back packets */ if (is_my_addr(smc,&sm->smt_source) && !local) { smt_free_mbuf(smc,mb) ; return ; } #endif smt_swap_para(sm,(int) mb->sm_len,1) ; DB_SMT("SMT : received packet [%s] at 0x%x\n", smt_type_name[m_fc(mb) & 0xf],sm) ; DB_SMT("SMT : version %d, class %s\n",sm->smt_version, smt_class_name[(sm->smt_class>LAST_CLASS)?0 : sm->smt_class]) ; #ifdef SBA /* * check if NSA frame */ if (m_fc(mb) == FC_SMT_NSA && sm->smt_class == SMT_NIF && (sm->smt_type == SMT_ANNOUNCE || sm->smt_type == SMT_REQUEST)) { smc->sba.sm = sm ; sba(smc,NIF) ; } #endif /* * ignore any packet with NSA and A-indicator set */ if ( (fs & A_INDICATOR) && m_fc(mb) == FC_SMT_NSA) { DB_SMT("SMT : ignoring NSA with A-indicator set from %s\n", addr_to_string(&sm->smt_source),0) ; smt_free_mbuf(smc,mb) ; return ; } /* * ignore frames with illegal length */ if (((sm->smt_class == SMT_ECF) && (sm->smt_len > SMT_MAX_ECHO_LEN)) || ((sm->smt_class != SMT_ECF) && (sm->smt_len > SMT_MAX_INFO_LEN))) { smt_free_mbuf(smc,mb) ; return ; } /* * check SMT version */ switch (sm->smt_class) { case SMT_NIF : case SMT_SIF_CONFIG : case SMT_SIF_OPER : case SMT_ECF : if (sm->smt_version != SMT_VID) illegal = 1; break ; default : if (sm->smt_version != SMT_VID_2) illegal = 1; break ; } if (illegal) { DB_SMT("SMT : version = %d, dest = %s\n", sm->smt_version,addr_to_string(&sm->smt_source)) ; smt_send_rdf(smc,mb,m_fc(mb),SMT_RDF_VERSION,local) ; smt_free_mbuf(smc,mb) ; return ; } if ((sm->smt_len > mb->sm_len - sizeof(struct smt_header)) || ((sm->smt_len & 3) && (sm->smt_class != SMT_ECF))) { DB_SMT("SMT: info length error, len = %d\n",sm->smt_len,0) ; smt_send_rdf(smc,mb,m_fc(mb),SMT_RDF_LENGTH,local) ; smt_free_mbuf(smc,mb) ; return ; } switch (sm->smt_class) { case SMT_NIF : if (smt_check_para(smc,sm,plist_nif)) { DB_SMT("SMT: NIF with para problem, ignoring\n",0,0) ; break ; } switch (sm->smt_type) { case SMT_ANNOUNCE : case SMT_REQUEST : if (!(fs & C_INDICATOR) && m_fc(mb) == FC_SMT_NSA && is_broadcast(&sm->smt_dest)) { struct smt_p_state *st ; /* set my UNA */ if (!is_equal( &smc->mib.m[MAC0].fddiMACUpstreamNbr, &sm->smt_source)) { DB_SMT("SMT : updated my UNA = %s\n", addr_to_string(&sm->smt_source),0) ; if (!is_equal(&smc->mib.m[MAC0]. fddiMACUpstreamNbr,&SMT_Unknown)){ /* Do not update unknown address */ smc->mib.m[MAC0].fddiMACOldUpstreamNbr= smc->mib.m[MAC0].fddiMACUpstreamNbr ; } smc->mib.m[MAC0].fddiMACUpstreamNbr = sm->smt_source ; smt_srf_event(smc, SMT_EVENT_MAC_NEIGHBOR_CHANGE, INDEX_MAC,0) ; smt_echo_test(smc,0) ; } smc->sm.smt_tvu = smt_get_time() ; st = (struct smt_p_state *) sm_to_para(smc,sm,SMT_P_STATE) ; if (st) { smc->mib.m[MAC0].fddiMACUNDA_Flag = (st->st_dupl_addr & SMT_ST_MY_DUPA) ? TRUE : FALSE ; update_dac(smc,1) ; } } if ((sm->smt_type == SMT_REQUEST) && is_individual(&sm->smt_source) && ((!(fs & A_INDICATOR) && m_fc(mb) == FC_SMT_NSA) || (m_fc(mb) != FC_SMT_NSA))) { DB_SMT("SMT : replying to NIF request %s\n", addr_to_string(&sm->smt_source),0) ; smt_send_nif(smc,&sm->smt_source, FC_SMT_INFO, sm->smt_tid, SMT_REPLY,local) ; } break ; case SMT_REPLY : DB_SMT("SMT : received NIF response from %s\n", addr_to_string(&sm->smt_source),0) ; if (fs & A_INDICATOR) { smc->sm.pend[SMT_TID_NIF] = 0 ; DB_SMT("SMT : duplicate address\n",0,0) ; smc->mib.m[MAC0].fddiMACDupAddressTest = DA_FAILED ; smc->r.dup_addr_test = DA_FAILED ; queue_event(smc,EVENT_RMT,RM_DUP_ADDR) ; smc->mib.m[MAC0].fddiMACDA_Flag = TRUE ; update_dac(smc,1) ; break ; } if (sm->smt_tid == smc->sm.pend[SMT_TID_NIF]) { smc->sm.pend[SMT_TID_NIF] = 0 ; /* set my DNA */ if (!is_equal( &smc->mib.m[MAC0].fddiMACDownstreamNbr, &sm->smt_source)) { DB_SMT("SMT : updated my DNA\n",0,0) ; if (!is_equal(&smc->mib.m[MAC0]. fddiMACDownstreamNbr, &SMT_Unknown)){ /* Do not update unknown address */ smc->mib.m[MAC0].fddiMACOldDownstreamNbr = smc->mib.m[MAC0].fddiMACDownstreamNbr ; } smc->mib.m[MAC0].fddiMACDownstreamNbr = sm->smt_source ; smt_srf_event(smc, SMT_EVENT_MAC_NEIGHBOR_CHANGE, INDEX_MAC,0) ; smt_echo_test(smc,1) ; } smc->mib.m[MAC0].fddiMACDA_Flag = FALSE ; update_dac(smc,1) ; smc->sm.smt_tvd = smt_get_time() ; smc->mib.m[MAC0].fddiMACDupAddressTest = DA_PASSED ; if (smc->r.dup_addr_test != DA_PASSED) { smc->r.dup_addr_test = DA_PASSED ; queue_event(smc,EVENT_RMT,RM_DUP_ADDR) ; } } else if (sm->smt_tid == smc->sm.pend[SMT_TID_NIF_TEST]) { DB_SMT("SMT : NIF test TID ok\n",0,0) ; } else { DB_SMT("SMT : expected TID %lx, got %lx\n", smc->sm.pend[SMT_TID_NIF],sm->smt_tid) ; } break ; default : illegal = 2 ; break ; } break ; case SMT_SIF_CONFIG : /* station information */ if (sm->smt_type != SMT_REQUEST) break ; DB_SMT("SMT : replying to SIF Config request from %s\n", addr_to_string(&sm->smt_source),0) ; smt_send_sif_config(smc,&sm->smt_source,sm->smt_tid,local) ; break ; case SMT_SIF_OPER : /* station information */ if (sm->smt_type != SMT_REQUEST) break ; DB_SMT("SMT : replying to SIF Operation request from %s\n", addr_to_string(&sm->smt_source),0) ; smt_send_sif_operation(smc,&sm->smt_source,sm->smt_tid,local) ; break ; case SMT_ECF : /* echo frame */ switch (sm->smt_type) { case SMT_REPLY : smc->mib.priv.fddiPRIVECF_Reply_Rx++ ; DB_SMT("SMT: received ECF reply from %s\n", addr_to_string(&sm->smt_source),0) ; if (sm_to_para(smc,sm,SMT_P_ECHODATA) == NULL) { DB_SMT("SMT: ECHODATA missing\n",0,0) ; break ; } if (sm->smt_tid == smc->sm.pend[SMT_TID_ECF]) { DB_SMT("SMT : ECF test TID ok\n",0,0) ; } else if (sm->smt_tid == smc->sm.pend[SMT_TID_ECF_UNA]) { DB_SMT("SMT : ECF test UNA ok\n",0,0) ; } else if (sm->smt_tid == smc->sm.pend[SMT_TID_ECF_DNA]) { DB_SMT("SMT : ECF test DNA ok\n",0,0) ; } else { DB_SMT("SMT : expected TID %lx, got %lx\n", smc->sm.pend[SMT_TID_ECF], sm->smt_tid) ; } break ; case SMT_REQUEST : smc->mib.priv.fddiPRIVECF_Req_Rx++ ; { if (sm->smt_len && !sm_to_para(smc,sm,SMT_P_ECHODATA)) { DB_SMT("SMT: ECF with para problem,sending RDF\n",0,0) ; smt_send_rdf(smc,mb,m_fc(mb),SMT_RDF_LENGTH, local) ; break ; } DB_SMT("SMT - sending ECF reply to %s\n", addr_to_string(&sm->smt_source),0) ; /* set destination addr. & reply */ sm->smt_dest = sm->smt_source ; sm->smt_type = SMT_REPLY ; dump_smt(smc,sm,"ECF REPLY") ; smc->mib.priv.fddiPRIVECF_Reply_Tx++ ; smt_send_frame(smc,mb,FC_SMT_INFO,local) ; return ; /* DON'T free mbuf */ } default : illegal = 1 ; break ; } break ; #ifndef BOOT case SMT_RAF : /* resource allocation */ #ifdef ESS DB_ESSN(2,"ESS: RAF frame received\n",0,0) ; fs = ess_raf_received_pack(smc,mb,sm,fs) ; #endif #ifdef SBA DB_SBAN(2,"SBA: RAF frame received\n",0,0) ; sba_raf_received_pack(smc,sm,fs) ; #endif break ; case SMT_RDF : /* request denied */ smc->mib.priv.fddiPRIVRDF_Rx++ ; break ; case SMT_ESF : /* extended service - not supported */ if (sm->smt_type == SMT_REQUEST) { DB_SMT("SMT - received ESF, sending RDF\n",0,0) ; smt_send_rdf(smc,mb,m_fc(mb),SMT_RDF_CLASS,local) ; } break ; case SMT_PMF_GET : case SMT_PMF_SET : if (sm->smt_type != SMT_REQUEST) break ; /* update statistics */ if (sm->smt_class == SMT_PMF_GET) smc->mib.priv.fddiPRIVPMF_Get_Rx++ ; else smc->mib.priv.fddiPRIVPMF_Set_Rx++ ; /* * ignore PMF SET with I/G set */ if ((sm->smt_class == SMT_PMF_SET) && !is_individual(&sm->smt_dest)) { DB_SMT("SMT: ignoring PMF-SET with I/G set\n",0,0) ; break ; } smt_pmf_received_pack(smc,mb, local) ; break ; case SMT_SRF : dump_smt(smc,sm,"SRF received") ; break ; default : if (sm->smt_type != SMT_REQUEST) break ; /* * For frames with unknown class: * we need to send a RDF frame according to 8.1.3.1.1, * only if it is a REQUEST. */ DB_SMT("SMT : class = %d, send RDF to %s\n", sm->smt_class, addr_to_string(&sm->smt_source)) ; smt_send_rdf(smc,mb,m_fc(mb),SMT_RDF_CLASS,local) ; break ; #endif } if (illegal) { DB_SMT("SMT: discarding invalid frame, reason = %d\n", illegal,0) ; } smt_free_mbuf(smc,mb) ; } static void update_dac(struct s_smc *smc, int report) { int cond ; cond = ( smc->mib.m[MAC0].fddiMACUNDA_Flag | smc->mib.m[MAC0].fddiMACDA_Flag) != 0 ; if (report && (cond != smc->mib.m[MAC0].fddiMACDuplicateAddressCond)) smt_srf_event(smc, SMT_COND_MAC_DUP_ADDR,INDEX_MAC,cond) ; else smc->mib.m[MAC0].fddiMACDuplicateAddressCond = cond ; } /* * send SMT frame * set source address * set station ID * send frame */ void smt_send_frame(struct s_smc *smc, SMbuf *mb, int fc, int local) /* SMbuf *mb; buffer to send */ /* int fc; FC value */ { struct smt_header *sm ; if (!smc->r.sm_ma_avail && !local) { smt_free_mbuf(smc,mb) ; return ; } sm = smtod(mb,struct smt_header *) ; sm->smt_source = smc->mib.m[MAC0].fddiMACSMTAddress ; sm->smt_sid = smc->mib.fddiSMTStationId ; smt_swap_para(sm,(int) mb->sm_len,0) ; /* swap para & header */ hwm_conv_can(smc,(char *)sm,12) ; /* convert SA and DA */ smc->mib.m[MAC0].fddiMACSMTTransmit_Ct++ ; smt_send_mbuf(smc,mb,local ? FC_SMT_LOC : fc) ; } /* * generate and send RDF */ static void smt_send_rdf(struct s_smc *smc, SMbuf *rej, int fc, int reason, int local) /* SMbuf *rej; mbuf of offending frame */ /* int fc; FC of denied frame */ /* int reason; reason code */ { SMbuf *mb ; struct smt_header *sm ; /* header of offending frame */ struct smt_rdf *rdf ; int len ; int frame_len ; sm = smtod(rej,struct smt_header *) ; if (sm->smt_type != SMT_REQUEST) return ; DB_SMT("SMT: sending RDF to %s,reason = 0x%x\n", addr_to_string(&sm->smt_source),reason) ; /* * note: get framelength from MAC length, NOT from SMT header * smt header length is included in sm_len */ frame_len = rej->sm_len ; if (!(mb=smt_build_frame(smc,SMT_RDF,SMT_REPLY,sizeof(struct smt_rdf)))) return ; rdf = smtod(mb,struct smt_rdf *) ; rdf->smt.smt_tid = sm->smt_tid ; /* use TID from sm */ rdf->smt.smt_dest = sm->smt_source ; /* set dest = source */ /* set P12 */ rdf->reason.para.p_type = SMT_P_REASON ; rdf->reason.para.p_len = sizeof(struct smt_p_reason) - PARA_LEN ; rdf->reason.rdf_reason = reason ; /* set P14 */ rdf->version.para.p_type = SMT_P_VERSION ; rdf->version.para.p_len = sizeof(struct smt_p_version) - PARA_LEN ; rdf->version.v_pad = 0 ; rdf->version.v_n = 1 ; rdf->version.v_index = 1 ; rdf->version.v_version[0] = SMT_VID_2 ; rdf->version.v_pad2 = 0 ; /* set P13 */ if ((unsigned) frame_len <= SMT_MAX_INFO_LEN - sizeof(*rdf) + 2*sizeof(struct smt_header)) len = frame_len ; else len = SMT_MAX_INFO_LEN - sizeof(*rdf) + 2*sizeof(struct smt_header) ; /* make length multiple of 4 */ len &= ~3 ; rdf->refused.para.p_type = SMT_P_REFUSED ; /* length of para is smt_frame + ref_fc */ rdf->refused.para.p_len = len + 4 ; rdf->refused.ref_fc = fc ; /* swap it back */ smt_swap_para(sm,frame_len,0) ; memcpy((char *) &rdf->refused.ref_header,(char *) sm,len) ; len -= sizeof(struct smt_header) ; mb->sm_len += len ; rdf->smt.smt_len += len ; dump_smt(smc,(struct smt_header *)rdf,"RDF") ; smc->mib.priv.fddiPRIVRDF_Tx++ ; smt_send_frame(smc,mb,FC_SMT_INFO,local) ; } /* * generate and send NIF */ static void smt_send_nif(struct s_smc *smc, const struct fddi_addr *dest, int fc, u_long tid, int type, int local) /* struct fddi_addr *dest; dest address */ /* int fc; frame control */ /* u_long tid; transaction id */ /* int type; frame type */ { struct smt_nif *nif ; SMbuf *mb ; if (!(mb = smt_build_frame(smc,SMT_NIF,type,sizeof(struct smt_nif)))) return ; nif = smtod(mb, struct smt_nif *) ; smt_fill_una(smc,&nif->una) ; /* set UNA */ smt_fill_sde(smc,&nif->sde) ; /* set station descriptor */ smt_fill_state(smc,&nif->state) ; /* set state information */ #ifdef SMT6_10 smt_fill_fsc(smc,&nif->fsc) ; /* set frame status cap. */ #endif nif->smt.smt_dest = *dest ; /* destination address */ nif->smt.smt_tid = tid ; /* transaction ID */ dump_smt(smc,(struct smt_header *)nif,"NIF") ; smt_send_frame(smc,mb,fc,local) ; } #ifdef DEBUG /* * send NIF request (test purpose) */ static void smt_send_nif_request(struct s_smc *smc, struct fddi_addr *dest) { smc->sm.pend[SMT_TID_NIF_TEST] = smt_get_tid(smc) ; smt_send_nif(smc,dest, FC_SMT_INFO, smc->sm.pend[SMT_TID_NIF_TEST], SMT_REQUEST,0) ; } /* * send ECF request (test purpose) */ static void smt_send_ecf_request(struct s_smc *smc, struct fddi_addr *dest, int len) { smc->sm.pend[SMT_TID_ECF] = smt_get_tid(smc) ; smt_send_ecf(smc,dest, FC_SMT_INFO, smc->sm.pend[SMT_TID_ECF], SMT_REQUEST,len) ; } #endif /* * echo test */ static void smt_echo_test(struct s_smc *smc, int dna) { u_long tid ; smc->sm.pend[dna ? SMT_TID_ECF_DNA : SMT_TID_ECF_UNA] = tid = smt_get_tid(smc) ; smt_send_ecf(smc, dna ? &smc->mib.m[MAC0].fddiMACDownstreamNbr : &smc->mib.m[MAC0].fddiMACUpstreamNbr, FC_SMT_INFO,tid, SMT_REQUEST, (SMT_TEST_ECHO_LEN & ~3)-8) ; } /* * generate and send ECF */ static void smt_send_ecf(struct s_smc *smc, struct fddi_addr *dest, int fc, u_long tid, int type, int len) /* struct fddi_addr *dest; dest address */ /* int fc; frame control */ /* u_long tid; transaction id */ /* int type; frame type */ /* int len; frame length */ { struct smt_ecf *ecf ; SMbuf *mb ; if (!(mb = smt_build_frame(smc,SMT_ECF,type,SMT_ECF_LEN + len))) return ; ecf = smtod(mb, struct smt_ecf *) ; smt_fill_echo(smc,&ecf->ec_echo,tid,len) ; /* set ECHO */ ecf->smt.smt_dest = *dest ; /* destination address */ ecf->smt.smt_tid = tid ; /* transaction ID */ smc->mib.priv.fddiPRIVECF_Req_Tx++ ; smt_send_frame(smc,mb,fc,0) ; } /* * generate and send SIF config response */ static void smt_send_sif_config(struct s_smc *smc, struct fddi_addr *dest, u_long tid, int local) /* struct fddi_addr *dest; dest address */ /* u_long tid; transaction id */ { struct smt_sif_config *sif ; SMbuf *mb ; int len ; if (!(mb = smt_build_frame(smc,SMT_SIF_CONFIG,SMT_REPLY, SIZEOF_SMT_SIF_CONFIG))) return ; sif = smtod(mb, struct smt_sif_config *) ; smt_fill_timestamp(smc,&sif->ts) ; /* set time stamp */ smt_fill_sde(smc,&sif->sde) ; /* set station descriptor */ smt_fill_version(smc,&sif->version) ; /* set version information */ smt_fill_state(smc,&sif->state) ; /* set state information */ smt_fill_policy(smc,&sif->policy) ; /* set station policy */ smt_fill_latency(smc,&sif->latency); /* set station latency */ smt_fill_neighbor(smc,&sif->neighbor); /* set station neighbor */ smt_fill_setcount(smc,&sif->setcount) ; /* set count */ len = smt_fill_path(smc,&sif->path); /* set station path descriptor*/ sif->smt.smt_dest = *dest ; /* destination address */ sif->smt.smt_tid = tid ; /* transaction ID */ smt_add_frame_len(mb,len) ; /* adjust length fields */ dump_smt(smc,(struct smt_header *)sif,"SIF Configuration Reply") ; smt_send_frame(smc,mb,FC_SMT_INFO,local) ; } /* * generate and send SIF operation response */ static void smt_send_sif_operation(struct s_smc *smc, struct fddi_addr *dest, u_long tid, int local) /* struct fddi_addr *dest; dest address */ /* u_long tid; transaction id */ { struct smt_sif_operation *sif ; SMbuf *mb ; int ports ; int i ; ports = NUMPHYS ; #ifndef CONCENTRATOR if (smc->s.sas == SMT_SAS) ports = 1 ; #endif if (!(mb = smt_build_frame(smc,SMT_SIF_OPER,SMT_REPLY, SIZEOF_SMT_SIF_OPERATION+ports*sizeof(struct smt_p_lem)))) return ; sif = smtod(mb, struct smt_sif_operation *) ; smt_fill_timestamp(smc,&sif->ts) ; /* set time stamp */ smt_fill_mac_status(smc,&sif->status) ; /* set mac status */ smt_fill_mac_counter(smc,&sif->mc) ; /* set mac counter field */ smt_fill_mac_fnc(smc,&sif->fnc) ; /* set frame not copied counter */ smt_fill_manufacturer(smc,&sif->man) ; /* set manufacturer field */ smt_fill_user(smc,&sif->user) ; /* set user field */ smt_fill_setcount(smc,&sif->setcount) ; /* set count */ /* * set link error mon information */ if (ports == 1) { smt_fill_lem(smc,sif->lem,PS) ; } else { for (i = 0 ; i < ports ; i++) { smt_fill_lem(smc,&sif->lem[i],i) ; } } sif->smt.smt_dest = *dest ; /* destination address */ sif->smt.smt_tid = tid ; /* transaction ID */ dump_smt(smc,(struct smt_header *)sif,"SIF Operation Reply") ; smt_send_frame(smc,mb,FC_SMT_INFO,local) ; } /* * get and initialize SMT frame */ SMbuf *smt_build_frame(struct s_smc *smc, int class, int type, int length) { SMbuf *mb ; struct smt_header *smt ; #if 0 if (!smc->r.sm_ma_avail) { return 0; } #endif if (!(mb = smt_get_mbuf(smc))) return mb; mb->sm_len = length ; smt = smtod(mb, struct smt_header *) ; smt->smt_dest = fddi_broadcast ; /* set dest = broadcast */ smt->smt_class = class ; smt->smt_type = type ; switch (class) { case SMT_NIF : case SMT_SIF_CONFIG : case SMT_SIF_OPER : case SMT_ECF : smt->smt_version = SMT_VID ; break ; default : smt->smt_version = SMT_VID_2 ; break ; } smt->smt_tid = smt_get_tid(smc) ; /* set transaction ID */ smt->smt_pad = 0 ; smt->smt_len = length - sizeof(struct smt_header) ; return mb; } static void smt_add_frame_len(SMbuf *mb, int len) { struct smt_header *smt ; smt = smtod(mb, struct smt_header *) ; smt->smt_len += len ; mb->sm_len += len ; } /* * fill values in UNA parameter */ static void smt_fill_una(struct s_smc *smc, struct smt_p_una *una) { SMTSETPARA(una,SMT_P_UNA) ; una->una_pad = 0 ; una->una_node = smc->mib.m[MAC0].fddiMACUpstreamNbr ; } /* * fill values in SDE parameter */ static void smt_fill_sde(struct s_smc *smc, struct smt_p_sde *sde) { SMTSETPARA(sde,SMT_P_SDE) ; sde->sde_non_master = smc->mib.fddiSMTNonMaster_Ct ; sde->sde_master = smc->mib.fddiSMTMaster_Ct ; sde->sde_mac_count = NUMMACS ; /* only 1 MAC */ #ifdef CONCENTRATOR sde->sde_type = SMT_SDE_CONCENTRATOR ; #else sde->sde_type = SMT_SDE_STATION ; #endif } /* * fill in values in station state parameter */ static void smt_fill_state(struct s_smc *smc, struct smt_p_state *state) { int top ; int twist ; SMTSETPARA(state,SMT_P_STATE) ; state->st_pad = 0 ; /* determine topology */ top = 0 ; if (smc->mib.fddiSMTPeerWrapFlag) { top |= SMT_ST_WRAPPED ; /* state wrapped */ } #ifdef CONCENTRATOR if (cfm_status_unattached(smc)) { top |= SMT_ST_UNATTACHED ; /* unattached concentrator */ } #endif if ((twist = pcm_status_twisted(smc)) & 1) { top |= SMT_ST_TWISTED_A ; /* twisted cable */ } if (twist & 2) { top |= SMT_ST_TWISTED_B ; /* twisted cable */ } #ifdef OPT_SRF top |= SMT_ST_SRF ; #endif if (pcm_rooted_station(smc)) top |= SMT_ST_ROOTED_S ; if (smc->mib.a[0].fddiPATHSbaPayload != 0) top |= SMT_ST_SYNC_SERVICE ; state->st_topology = top ; state->st_dupl_addr = ((smc->mib.m[MAC0].fddiMACDA_Flag ? SMT_ST_MY_DUPA : 0 ) | (smc->mib.m[MAC0].fddiMACUNDA_Flag ? SMT_ST_UNA_DUPA : 0)) ; } /* * fill values in timestamp parameter */ static void smt_fill_timestamp(struct s_smc *smc, struct smt_p_timestamp *ts) { SMTSETPARA(ts,SMT_P_TIMESTAMP) ; smt_set_timestamp(smc,ts->ts_time) ; } void smt_set_timestamp(struct s_smc *smc, u_char *p) { u_long time ; u_long utime ; /* * timestamp is 64 bits long ; resolution is 80 nS * our clock resolution is 10mS * 10mS/80ns = 125000 ~ 2^17 = 131072 */ utime = smt_get_time() ; time = utime * 100 ; time /= TICKS_PER_SECOND ; p[0] = 0 ; p[1] = (u_char)((time>>(8+8+8+8-1)) & 1) ; p[2] = (u_char)(time>>(8+8+8-1)) ; p[3] = (u_char)(time>>(8+8-1)) ; p[4] = (u_char)(time>>(8-1)) ; p[5] = (u_char)(time<<1) ; p[6] = (u_char)(smc->sm.uniq_ticks>>8) ; p[7] = (u_char)smc->sm.uniq_ticks ; /* * make sure we don't wrap: restart whenever the upper digits change */ if (utime != smc->sm.uniq_time) { smc->sm.uniq_ticks = 0 ; } smc->sm.uniq_ticks++ ; smc->sm.uniq_time = utime ; } /* * fill values in station policy parameter */ static void smt_fill_policy(struct s_smc *smc, struct smt_p_policy *policy) { int i ; const u_char *map ; u_short in ; u_short out ; /* * MIB para 101b (fddiSMTConnectionPolicy) coding * is different from 0005 coding */ static const u_char ansi_weirdness[16] = { 0,7,5,3,8,1,6,4,9,10,2,11,12,13,14,15 } ; SMTSETPARA(policy,SMT_P_POLICY) ; out = 0 ; in = smc->mib.fddiSMTConnectionPolicy ; for (i = 0, map = ansi_weirdness ; i < 16 ; i++) { if (in & 1) out |= (1<<*map) ; in >>= 1 ; map++ ; } policy->pl_config = smc->mib.fddiSMTConfigPolicy ; policy->pl_connect = out ; } /* * fill values in latency equivalent parameter */ static void smt_fill_latency(struct s_smc *smc, struct smt_p_latency *latency) { SMTSETPARA(latency,SMT_P_LATENCY) ; latency->lt_phyout_idx1 = phy_index(smc,0) ; latency->lt_latency1 = 10 ; /* in octets (byte clock) */ /* * note: latency has two phy entries by definition * for a SAS, the 2nd one is null */ if (smc->s.sas == SMT_DAS) { latency->lt_phyout_idx2 = phy_index(smc,1) ; latency->lt_latency2 = 10 ; /* in octets (byte clock) */ } else { latency->lt_phyout_idx2 = 0 ; latency->lt_latency2 = 0 ; } } /* * fill values in MAC neighbors parameter */ static void smt_fill_neighbor(struct s_smc *smc, struct smt_p_neighbor *neighbor) { SMTSETPARA(neighbor,SMT_P_NEIGHBORS) ; neighbor->nb_mib_index = INDEX_MAC ; neighbor->nb_mac_index = mac_index(smc,1) ; neighbor->nb_una = smc->mib.m[MAC0].fddiMACUpstreamNbr ; neighbor->nb_dna = smc->mib.m[MAC0].fddiMACDownstreamNbr ; } /* * fill values in path descriptor */ #ifdef CONCENTRATOR #define ALLPHYS NUMPHYS #else #define ALLPHYS ((smc->s.sas == SMT_SAS) ? 1 : 2) #endif static int smt_fill_path(struct s_smc *smc, struct smt_p_path *path) { SK_LOC_DECL(int,type) ; SK_LOC_DECL(int,state) ; SK_LOC_DECL(int,remote) ; SK_LOC_DECL(int,mac) ; int len ; int p ; int physp ; struct smt_phy_rec *phy ; struct smt_mac_rec *pd_mac ; len = PARA_LEN + sizeof(struct smt_mac_rec) * NUMMACS + sizeof(struct smt_phy_rec) * ALLPHYS ; path->para.p_type = SMT_P_PATH ; path->para.p_len = len - PARA_LEN ; /* PHYs */ for (p = 0,phy = path->pd_phy ; p < ALLPHYS ; p++, phy++) { physp = p ; #ifndef CONCENTRATOR if (smc->s.sas == SMT_SAS) physp = PS ; #endif pcm_status_state(smc,physp,&type,&state,&remote,&mac) ; #ifdef LITTLE_ENDIAN phy->phy_mib_index = smt_swap_short((u_short)p+INDEX_PORT) ; #else phy->phy_mib_index = p+INDEX_PORT ; #endif phy->phy_type = type ; phy->phy_connect_state = state ; phy->phy_remote_type = remote ; phy->phy_remote_mac = mac ; phy->phy_resource_idx = phy_con_resource_index(smc,p) ; } /* MAC */ pd_mac = (struct smt_mac_rec *) phy ; pd_mac->mac_addr = smc->mib.m[MAC0].fddiMACSMTAddress ; pd_mac->mac_resource_idx = mac_con_resource_index(smc,1) ; return len; } /* * fill values in mac status */ static void smt_fill_mac_status(struct s_smc *smc, struct smt_p_mac_status *st) { SMTSETPARA(st,SMT_P_MAC_STATUS) ; st->st_mib_index = INDEX_MAC ; st->st_mac_index = mac_index(smc,1) ; mac_update_counter(smc) ; /* * timer values are represented in SMT as 2's complement numbers * units : internal : 2's complement BCLK */ st->st_t_req = smc->mib.m[MAC0].fddiMACT_Req ; st->st_t_neg = smc->mib.m[MAC0].fddiMACT_Neg ; st->st_t_max = smc->mib.m[MAC0].fddiMACT_Max ; st->st_tvx_value = smc->mib.m[MAC0].fddiMACTvxValue ; st->st_t_min = smc->mib.m[MAC0].fddiMACT_Min ; st->st_sba = smc->mib.a[PATH0].fddiPATHSbaPayload ; st->st_frame_ct = smc->mib.m[MAC0].fddiMACFrame_Ct ; st->st_error_ct = smc->mib.m[MAC0].fddiMACError_Ct ; st->st_lost_ct = smc->mib.m[MAC0].fddiMACLost_Ct ; } /* * fill values in LEM status */ static void smt_fill_lem(struct s_smc *smc, struct smt_p_lem *lem, int phy) { struct fddi_mib_p *mib ; mib = smc->y[phy].mib ; SMTSETPARA(lem,SMT_P_LEM) ; lem->lem_mib_index = phy+INDEX_PORT ; lem->lem_phy_index = phy_index(smc,phy) ; lem->lem_pad2 = 0 ; lem->lem_cutoff = mib->fddiPORTLer_Cutoff ; lem->lem_alarm = mib->fddiPORTLer_Alarm ; /* long term bit error rate */ lem->lem_estimate = mib->fddiPORTLer_Estimate ; /* # of rejected connections */ lem->lem_reject_ct = mib->fddiPORTLem_Reject_Ct ; lem->lem_ct = mib->fddiPORTLem_Ct ; /* total number of errors */ } /* * fill version parameter */ static void smt_fill_version(struct s_smc *smc, struct smt_p_version *vers) { SK_UNUSED(smc) ; SMTSETPARA(vers,SMT_P_VERSION) ; vers->v_pad = 0 ; vers->v_n = 1 ; /* one version is enough .. */ vers->v_index = 1 ; vers->v_version[0] = SMT_VID_2 ; vers->v_pad2 = 0 ; } #ifdef SMT6_10 /* * fill frame status capabilities */ /* * note: this para 200B is NOT in swap table, because it's also set in * PMF add_para */ static void smt_fill_fsc(struct s_smc *smc, struct smt_p_fsc *fsc) { SK_UNUSED(smc) ; SMTSETPARA(fsc,SMT_P_FSC) ; fsc->fsc_pad0 = 0 ; fsc->fsc_mac_index = INDEX_MAC ; /* this is MIB ; MIB is NOT * mac_index ()i ! */ fsc->fsc_pad1 = 0 ; fsc->fsc_value = FSC_TYPE0 ; /* "normal" node */ #ifdef LITTLE_ENDIAN fsc->fsc_mac_index = smt_swap_short(INDEX_MAC) ; fsc->fsc_value = smt_swap_short(FSC_TYPE0) ; #endif } #endif /* * fill mac counter field */ static void smt_fill_mac_counter(struct s_smc *smc, struct smt_p_mac_counter *mc) { SMTSETPARA(mc,SMT_P_MAC_COUNTER) ; mc->mc_mib_index = INDEX_MAC ; mc->mc_index = mac_index(smc,1) ; mc->mc_receive_ct = smc->mib.m[MAC0].fddiMACCopied_Ct ; mc->mc_transmit_ct = smc->mib.m[MAC0].fddiMACTransmit_Ct ; } /* * fill mac frame not copied counter */ static void smt_fill_mac_fnc(struct s_smc *smc, struct smt_p_mac_fnc *fnc) { SMTSETPARA(fnc,SMT_P_MAC_FNC) ; fnc->nc_mib_index = INDEX_MAC ; fnc->nc_index = mac_index(smc,1) ; fnc->nc_counter = smc->mib.m[MAC0].fddiMACNotCopied_Ct ; } /* * fill manufacturer field */ static void smt_fill_manufacturer(struct s_smc *smc, struct smp_p_manufacturer *man) { SMTSETPARA(man,SMT_P_MANUFACTURER) ; memcpy((char *) man->mf_data, (char *) smc->mib.fddiSMTManufacturerData, sizeof(man->mf_data)) ; } /* * fill user field */ static void smt_fill_user(struct s_smc *smc, struct smp_p_user *user) { SMTSETPARA(user,SMT_P_USER) ; memcpy((char *) user->us_data, (char *) smc->mib.fddiSMTUserData, sizeof(user->us_data)) ; } /* * fill set count */ static void smt_fill_setcount(struct s_smc *smc, struct smt_p_setcount *setcount) { SK_UNUSED(smc) ; SMTSETPARA(setcount,SMT_P_SETCOUNT) ; setcount->count = smc->mib.fddiSMTSetCount.count ; memcpy((char *)setcount->timestamp, (char *)smc->mib.fddiSMTSetCount.timestamp,8) ; } /* * fill echo data */ static void smt_fill_echo(struct s_smc *smc, struct smt_p_echo *echo, u_long seed, int len) { u_char *p ; SK_UNUSED(smc) ; SMTSETPARA(echo,SMT_P_ECHODATA) ; echo->para.p_len = len ; for (p = echo->ec_data ; len ; len--) { *p++ = (u_char) seed ; seed += 13 ; } } /* * clear DNA and UNA * called from CFM if configuration changes */ static void smt_clear_una_dna(struct s_smc *smc) { smc->mib.m[MAC0].fddiMACUpstreamNbr = SMT_Unknown ; smc->mib.m[MAC0].fddiMACDownstreamNbr = SMT_Unknown ; } static void smt_clear_old_una_dna(struct s_smc *smc) { smc->mib.m[MAC0].fddiMACOldUpstreamNbr = SMT_Unknown ; smc->mib.m[MAC0].fddiMACOldDownstreamNbr = SMT_Unknown ; } u_long smt_get_tid(struct s_smc *smc) { u_long tid ; while ((tid = ++(smc->sm.smt_tid) ^ SMT_TID_MAGIC) == 0) ; return tid & 0x3fffffffL; } /* * table of parameter lengths */ static const struct smt_pdef { int ptype ; int plen ; const char *pswap ; } smt_pdef[] = { { SMT_P_UNA, sizeof(struct smt_p_una) , SWAP_SMT_P_UNA } , { SMT_P_SDE, sizeof(struct smt_p_sde) , SWAP_SMT_P_SDE } , { SMT_P_STATE, sizeof(struct smt_p_state) , SWAP_SMT_P_STATE } , { SMT_P_TIMESTAMP,sizeof(struct smt_p_timestamp) , SWAP_SMT_P_TIMESTAMP } , { SMT_P_POLICY, sizeof(struct smt_p_policy) , SWAP_SMT_P_POLICY } , { SMT_P_LATENCY, sizeof(struct smt_p_latency) , SWAP_SMT_P_LATENCY } , { SMT_P_NEIGHBORS,sizeof(struct smt_p_neighbor) , SWAP_SMT_P_NEIGHBORS } , { SMT_P_PATH, sizeof(struct smt_p_path) , SWAP_SMT_P_PATH } , { SMT_P_MAC_STATUS,sizeof(struct smt_p_mac_status) , SWAP_SMT_P_MAC_STATUS } , { SMT_P_LEM, sizeof(struct smt_p_lem) , SWAP_SMT_P_LEM } , { SMT_P_MAC_COUNTER,sizeof(struct smt_p_mac_counter) , SWAP_SMT_P_MAC_COUNTER } , { SMT_P_MAC_FNC,sizeof(struct smt_p_mac_fnc) , SWAP_SMT_P_MAC_FNC } , { SMT_P_PRIORITY,sizeof(struct smt_p_priority) , SWAP_SMT_P_PRIORITY } , { SMT_P_EB,sizeof(struct smt_p_eb) , SWAP_SMT_P_EB } , { SMT_P_MANUFACTURER,sizeof(struct smp_p_manufacturer) , SWAP_SMT_P_MANUFACTURER } , { SMT_P_REASON, sizeof(struct smt_p_reason) , SWAP_SMT_P_REASON } , { SMT_P_REFUSED, sizeof(struct smt_p_refused) , SWAP_SMT_P_REFUSED } , { SMT_P_VERSION, sizeof(struct smt_p_version) , SWAP_SMT_P_VERSION } , #ifdef ESS { SMT_P0015, sizeof(struct smt_p_0015) , SWAP_SMT_P0015 } , { SMT_P0016, sizeof(struct smt_p_0016) , SWAP_SMT_P0016 } , { SMT_P0017, sizeof(struct smt_p_0017) , SWAP_SMT_P0017 } , { SMT_P0018, sizeof(struct smt_p_0018) , SWAP_SMT_P0018 } , { SMT_P0019, sizeof(struct smt_p_0019) , SWAP_SMT_P0019 } , { SMT_P001A, sizeof(struct smt_p_001a) , SWAP_SMT_P001A } , { SMT_P001B, sizeof(struct smt_p_001b) , SWAP_SMT_P001B } , { SMT_P001C, sizeof(struct smt_p_001c) , SWAP_SMT_P001C } , { SMT_P001D, sizeof(struct smt_p_001d) , SWAP_SMT_P001D } , #endif #if 0 { SMT_P_FSC, sizeof(struct smt_p_fsc) , SWAP_SMT_P_FSC } , #endif { SMT_P_SETCOUNT,0, SWAP_SMT_P_SETCOUNT } , { SMT_P1048, 0, SWAP_SMT_P1048 } , { SMT_P208C, 0, SWAP_SMT_P208C } , { SMT_P208D, 0, SWAP_SMT_P208D } , { SMT_P208E, 0, SWAP_SMT_P208E } , { SMT_P208F, 0, SWAP_SMT_P208F } , { SMT_P2090, 0, SWAP_SMT_P2090 } , #ifdef ESS { SMT_P320B, sizeof(struct smt_p_320b) , SWAP_SMT_P320B } , { SMT_P320F, sizeof(struct smt_p_320f) , SWAP_SMT_P320F } , { SMT_P3210, sizeof(struct smt_p_3210) , SWAP_SMT_P3210 } , #endif { SMT_P4050, 0, SWAP_SMT_P4050 } , { SMT_P4051, 0, SWAP_SMT_P4051 } , { SMT_P4052, 0, SWAP_SMT_P4052 } , { SMT_P4053, 0, SWAP_SMT_P4053 } , } ; #define N_SMT_PLEN ARRAY_SIZE(smt_pdef) int smt_check_para(struct s_smc *smc, struct smt_header *sm, const u_short list[]) { const u_short *p = list ; while (*p) { if (!sm_to_para(smc,sm,(int) *p)) { DB_SMT("SMT: smt_check_para - missing para %x\n",*p,0); return -1; } p++ ; } return 0; } void *sm_to_para(struct s_smc *smc, struct smt_header *sm, int para) { char *p ; int len ; int plen ; void *found = NULL; SK_UNUSED(smc) ; len = sm->smt_len ; p = (char *)(sm+1) ; /* pointer to info */ while (len > 0 ) { if (((struct smt_para *)p)->p_type == para) found = (void *) p ; plen = ((struct smt_para *)p)->p_len + PARA_LEN ; p += plen ; len -= plen ; if (len < 0) { DB_SMT("SMT : sm_to_para - length error %d\n",plen,0) ; return NULL; } if ((plen & 3) && (para != SMT_P_ECHODATA)) { DB_SMT("SMT : sm_to_para - odd length %d\n",plen,0) ; return NULL; } if (found) return found; } return NULL; } #if 0 /* * send ANTC data test frame */ void fddi_send_antc(struct s_smc *smc, struct fddi_addr *dest) { SK_UNUSED(smc) ; SK_UNUSED(dest) ; #if 0 SMbuf *mb ; struct smt_header *smt ; int i ; char *p ; mb = smt_get_mbuf() ; mb->sm_len = 3000+12 ; p = smtod(mb, char *) + 12 ; for (i = 0 ; i < 3000 ; i++) *p++ = 1 << (i&7) ; smt = smtod(mb, struct smt_header *) ; smt->smt_dest = *dest ; smt->smt_source = smc->mib.m[MAC0].fddiMACSMTAddress ; smt_send_mbuf(smc,mb,FC_ASYNC_LLC) ; #endif } #endif #ifdef DEBUG char *addr_to_string(struct fddi_addr *addr) { int i ; static char string[6*3] = "****" ; for (i = 0 ; i < 6 ; i++) { string[i * 3] = hex_asc_hi(addr->a[i]); string[i * 3 + 1] = hex_asc_lo(addr->a[i]); string[i * 3 + 2] = ':'; } string[5 * 3 + 2] = 0; return string; } #endif #ifdef AM29K int smt_ifconfig(int argc, char *argv[]) { if (argc >= 2 && !strcmp(argv[0],"opt_bypass") && !strcmp(argv[1],"yes")) { smc->mib.fddiSMTBypassPresent = 1 ; return 0; } return amdfddi_config(0, argc, argv); } #endif /* * return static mac index */ static int mac_index(struct s_smc *smc, int mac) { SK_UNUSED(mac) ; #ifdef CONCENTRATOR SK_UNUSED(smc) ; return NUMPHYS + 1; #else return (smc->s.sas == SMT_SAS) ? 2 : 3; #endif } /* * return static phy index */ static int phy_index(struct s_smc *smc, int phy) { SK_UNUSED(smc) ; return phy + 1; } /* * return dynamic mac connection resource index */ static int mac_con_resource_index(struct s_smc *smc, int mac) { #ifdef CONCENTRATOR SK_UNUSED(smc) ; SK_UNUSED(mac) ; return entity_to_index(smc, cem_get_downstream(smc, ENTITY_MAC)); #else SK_UNUSED(mac) ; switch (smc->mib.fddiSMTCF_State) { case SC9_C_WRAP_A : case SC5_THRU_B : case SC11_C_WRAP_S : return 1; case SC10_C_WRAP_B : case SC4_THRU_A : return 2; } return smc->s.sas == SMT_SAS ? 2 : 3; #endif } /* * return dynamic phy connection resource index */ static int phy_con_resource_index(struct s_smc *smc, int phy) { #ifdef CONCENTRATOR return entity_to_index(smc, cem_get_downstream(smc, ENTITY_PHY(phy))) ; #else switch (smc->mib.fddiSMTCF_State) { case SC9_C_WRAP_A : return phy == PA ? 3 : 2; case SC10_C_WRAP_B : return phy == PA ? 1 : 3; case SC4_THRU_A : return phy == PA ? 3 : 1; case SC5_THRU_B : return phy == PA ? 2 : 3; case SC11_C_WRAP_S : return 2; } return phy; #endif } #ifdef CONCENTRATOR static int entity_to_index(struct s_smc *smc, int e) { if (e == ENTITY_MAC) return mac_index(smc, 1); else return phy_index(smc, e - ENTITY_PHY(0)); } #endif #ifdef LITTLE_ENDIAN static int smt_swap_short(u_short s) { return ((s>>8)&0xff) | ((s&0xff)<<8); } void smt_swap_para(struct smt_header *sm, int len, int direction) /* int direction; 0 encode 1 decode */ { struct smt_para *pa ; const struct smt_pdef *pd ; char *p ; int plen ; int type ; int i ; /* printf("smt_swap_para sm %x len %d dir %d\n", sm,len,direction) ; */ smt_string_swap((char *)sm,SWAP_SMTHEADER,len) ; /* swap args */ len -= sizeof(struct smt_header) ; p = (char *) (sm + 1) ; while (len > 0) { pa = (struct smt_para *) p ; plen = pa->p_len ; type = pa->p_type ; pa->p_type = smt_swap_short(pa->p_type) ; pa->p_len = smt_swap_short(pa->p_len) ; if (direction) { plen = pa->p_len ; type = pa->p_type ; } /* * note: paras can have 0 length ! */ if (plen < 0) break ; plen += PARA_LEN ; for (i = N_SMT_PLEN, pd = smt_pdef; i ; i--,pd++) { if (pd->ptype == type) break ; } if (i && pd->pswap) { smt_string_swap(p+PARA_LEN,pd->pswap,len) ; } len -= plen ; p += plen ; } } static void smt_string_swap(char *data, const char *format, int len) { const char *open_paren = NULL ; int x ; while (len > 0 && *format) { switch (*format) { case '[' : open_paren = format ; break ; case ']' : format = open_paren ; break ; case '1' : case '2' : case '3' : case '4' : case '5' : case '6' : case '7' : case '8' : case '9' : data += *format - '0' ; len -= *format - '0' ; break ; case 'c': data++ ; len-- ; break ; case 's' : x = data[0] ; data[0] = data[1] ; data[1] = x ; data += 2 ; len -= 2 ; break ; case 'l' : x = data[0] ; data[0] = data[3] ; data[3] = x ; x = data[1] ; data[1] = data[2] ; data[2] = x ; data += 4 ; len -= 4 ; break ; } format++ ; } } #else void smt_swap_para(struct smt_header *sm, int len, int direction) /* int direction; 0 encode 1 decode */ { SK_UNUSED(sm) ; SK_UNUSED(len) ; SK_UNUSED(direction) ; } #endif /* * PMF actions */ int smt_action(struct s_smc *smc, int class, int code, int index) { int event ; int port ; DB_SMT("SMT: action %d code %d\n",class,code) ; switch(class) { case SMT_STATION_ACTION : switch(code) { case SMT_STATION_ACTION_CONNECT : smc->mib.fddiSMTRemoteDisconnectFlag = FALSE ; queue_event(smc,EVENT_ECM,EC_CONNECT) ; break ; case SMT_STATION_ACTION_DISCONNECT : queue_event(smc,EVENT_ECM,EC_DISCONNECT) ; smc->mib.fddiSMTRemoteDisconnectFlag = TRUE ; RS_SET(smc,RS_DISCONNECT) ; AIX_EVENT(smc, (u_long) FDDI_RING_STATUS, (u_long) FDDI_SMT_EVENT, (u_long) FDDI_REMOTE_DISCONNECT, smt_get_event_word(smc)); break ; case SMT_STATION_ACTION_PATHTEST : AIX_EVENT(smc, (u_long) FDDI_RING_STATUS, (u_long) FDDI_SMT_EVENT, (u_long) FDDI_PATH_TEST, smt_get_event_word(smc)); break ; case SMT_STATION_ACTION_SELFTEST : AIX_EVENT(smc, (u_long) FDDI_RING_STATUS, (u_long) FDDI_SMT_EVENT, (u_long) FDDI_REMOTE_SELF_TEST, smt_get_event_word(smc)); break ; case SMT_STATION_ACTION_DISABLE_A : if (smc->y[PA].pc_mode == PM_PEER) { RS_SET(smc,RS_EVENT) ; queue_event(smc,EVENT_PCM+PA,PC_DISABLE) ; } break ; case SMT_STATION_ACTION_DISABLE_B : if (smc->y[PB].pc_mode == PM_PEER) { RS_SET(smc,RS_EVENT) ; queue_event(smc,EVENT_PCM+PB,PC_DISABLE) ; } break ; case SMT_STATION_ACTION_DISABLE_M : for (port = 0 ; port < NUMPHYS ; port++) { if (smc->mib.p[port].fddiPORTMy_Type != TM) continue ; RS_SET(smc,RS_EVENT) ; queue_event(smc,EVENT_PCM+port,PC_DISABLE) ; } break ; default : return 1; } break ; case SMT_PORT_ACTION : switch(code) { case SMT_PORT_ACTION_ENABLE : event = PC_ENABLE ; break ; case SMT_PORT_ACTION_DISABLE : event = PC_DISABLE ; break ; case SMT_PORT_ACTION_MAINT : event = PC_MAINT ; break ; case SMT_PORT_ACTION_START : event = PC_START ; break ; case SMT_PORT_ACTION_STOP : event = PC_STOP ; break ; default : return 1; } queue_event(smc,EVENT_PCM+index,event) ; break ; default : return 1; } return 0; } /* * canonical conversion of <len> bytes beginning form *data */ #ifdef USE_CAN_ADDR static void hwm_conv_can(struct s_smc *smc, char *data, int len) { int i ; SK_UNUSED(smc) ; for (i = len; i ; i--, data++) *data = bitrev8(*data); } #endif #endif /* no SLIM_SMT */
gpl-2.0
clessg/linux
arch/sh/drivers/pci/pci-dreamcast.c
13199
2479
/* * PCI support for the Sega Dreamcast * * Copyright (C) 2001, 2002 M. R. Brown * Copyright (C) 2002, 2003 Paul Mundt * * This file originally bore the message (with enclosed-$): * Id: pci.c,v 1.3 2003/05/04 19:29:46 lethal Exp * Dreamcast PCI: Supports SEGA Broadband Adaptor only. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/sched.h> #include <linux/kernel.h> #include <linux/param.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/irq.h> #include <linux/pci.h> #include <linux/module.h> #include <asm/io.h> #include <asm/irq.h> #include <mach/pci.h> static struct resource gapspci_resources[] = { { .name = "GAPSPCI IO", .start = GAPSPCI_BBA_CONFIG, .end = GAPSPCI_BBA_CONFIG + GAPSPCI_BBA_CONFIG_SIZE - 1, .flags = IORESOURCE_IO, }, { .name = "GAPSPCI mem", .start = GAPSPCI_DMA_BASE, .end = GAPSPCI_DMA_BASE + GAPSPCI_DMA_SIZE - 1, .flags = IORESOURCE_MEM, }, }; static struct pci_channel dreamcast_pci_controller = { .pci_ops = &gapspci_pci_ops, .resources = gapspci_resources, .nr_resources = ARRAY_SIZE(gapspci_resources), .io_offset = 0x00000000, .mem_offset = 0x00000000, }; /* * gapspci init */ static int __init gapspci_init(void) { char idbuf[16]; int i; /* * FIXME: All of this wants documenting to some degree, * even some basic register definitions would be nice. * * I haven't seen anything this ugly since.. maple. */ for (i=0; i<16; i++) idbuf[i] = inb(GAPSPCI_REGS+i); if (strncmp(idbuf, "GAPSPCI_BRIDGE_2", 16)) return -ENODEV; outl(0x5a14a501, GAPSPCI_REGS+0x18); for (i=0; i<1000000; i++) cpu_relax(); if (inl(GAPSPCI_REGS+0x18) != 1) return -EINVAL; outl(0x01000000, GAPSPCI_REGS+0x20); outl(0x01000000, GAPSPCI_REGS+0x24); outl(GAPSPCI_DMA_BASE, GAPSPCI_REGS+0x28); outl(GAPSPCI_DMA_BASE+GAPSPCI_DMA_SIZE, GAPSPCI_REGS+0x2c); outl(1, GAPSPCI_REGS+0x14); outl(1, GAPSPCI_REGS+0x34); /* Setting Broadband Adapter */ outw(0xf900, GAPSPCI_BBA_CONFIG+0x06); outl(0x00000000, GAPSPCI_BBA_CONFIG+0x30); outb(0x00, GAPSPCI_BBA_CONFIG+0x3c); outb(0xf0, GAPSPCI_BBA_CONFIG+0x0d); outw(0x0006, GAPSPCI_BBA_CONFIG+0x04); outl(0x00002001, GAPSPCI_BBA_CONFIG+0x10); outl(0x01000000, GAPSPCI_BBA_CONFIG+0x14); return register_pci_controller(&dreamcast_pci_controller); } arch_initcall(gapspci_init);
gpl-2.0
sgs3/GT-I9300_Kernel
arch/mn10300/unit-asb2305/leds.c
13199
2960
/* ASB2305 Peripheral 7-segment LEDs x4 support * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/param.h> #include <linux/init.h> #include <asm/io.h> #include <asm/processor.h> #include <asm/intctl-regs.h> #include <asm/rtc-regs.h> #include <unit/leds.h> static const u8 asb2305_led_hex_tbl[16] = { 0x80, 0xf2, 0x48, 0x60, 0x32, 0x24, 0x04, 0xf0, 0x00, 0x20, 0x10, 0x06, 0x8c, 0x42, 0x0c, 0x1c }; static const u32 asb2305_led_chase_tbl[6] = { ~0x02020202, /* top - segA */ ~0x04040404, /* right top - segB */ ~0x08080808, /* right bottom - segC */ ~0x10101010, /* bottom - segD */ ~0x20202020, /* left bottom - segE */ ~0x40404040, /* left top - segF */ }; static unsigned asb2305_led_chase; void peripheral_leds7x4_display_dec(unsigned int val, unsigned int points) { u32 leds; leds = asb2305_led_hex_tbl[(val/1000) % 10]; leds <<= 8; leds |= asb2305_led_hex_tbl[(val/100) % 10]; leds <<= 8; leds |= asb2305_led_hex_tbl[(val/10) % 10]; leds <<= 8; leds |= asb2305_led_hex_tbl[val % 10]; leds |= points^0x01010101; ASB2305_7SEGLEDS = leds; } void peripheral_leds7x4_display_hex(unsigned int val, unsigned int points) { u32 leds; leds = asb2305_led_hex_tbl[(val/1000) % 10]; leds <<= 8; leds |= asb2305_led_hex_tbl[(val/100) % 10]; leds <<= 8; leds |= asb2305_led_hex_tbl[(val/10) % 10]; leds <<= 8; leds |= asb2305_led_hex_tbl[val % 10]; leds |= points^0x01010101; ASB2305_7SEGLEDS = leds; } void peripheral_leds_display_exception(enum exception_code code) { u32 leds; leds = asb2305_led_hex_tbl[(code/0x100) % 0x10]; leds <<= 8; leds |= asb2305_led_hex_tbl[(code/0x10) % 0x10]; leds <<= 8; leds |= asb2305_led_hex_tbl[code % 0x10]; leds |= 0x6d010101; ASB2305_7SEGLEDS = leds; } void peripheral_leds7x4_display_minssecs(unsigned int time, unsigned int points) { u32 leds; leds = asb2305_led_hex_tbl[(time/600) % 6]; leds <<= 8; leds |= asb2305_led_hex_tbl[(time/60) % 10]; leds <<= 8; leds |= asb2305_led_hex_tbl[(time/10) % 6]; leds <<= 8; leds |= asb2305_led_hex_tbl[time % 10]; leds |= points^0x01010101; ASB2305_7SEGLEDS = leds; } void peripheral_leds7x4_display_rtc(void) { unsigned int clock; u8 mins, secs; mins = RTMCR; secs = RTSCR; clock = ((mins & 0xf0) >> 4); clock *= 10; clock += (mins & 0x0f); clock *= 6; clock += ((secs & 0xf0) >> 4); clock *= 10; clock += (secs & 0x0f); peripheral_leds7x4_display_minssecs(clock, 0); } void peripheral_leds_led_chase(void) { ASB2305_7SEGLEDS = asb2305_led_chase_tbl[asb2305_led_chase]; asb2305_led_chase++; if (asb2305_led_chase >= 6) asb2305_led_chase = 0; }
gpl-2.0
AOSP-ZEUS/android_kernel_samsung_n1
arch/arm/mach-tegra/board-dt.c
400
3665
/* * nVidia Tegra device tree board support * * Copyright (C) 2010 Secret Lab Technologies, Ltd. * Copyright (C) 2010 Google, Inc. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/serial_8250.h> #include <linux/clk.h> #include <linux/dma-mapping.h> #include <linux/irqdomain.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_fdt.h> #include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/pda_power.h> #include <linux/io.h> #include <linux/i2c.h> #include <linux/i2c-tegra.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/time.h> #include <asm/setup.h> #include <mach/iomap.h> #include <mach/irqs.h> #include "board.h" #include "board-harmony.h" #include "clock.h" #include "devices.h" void harmony_pinmux_init(void); void seaboard_pinmux_init(void); struct of_dev_auxdata tegra20_auxdata_lookup[] __initdata = { OF_DEV_AUXDATA("nvidia,tegra20-sdhci", TEGRA_SDMMC1_BASE, "sdhci-tegra.0", NULL), OF_DEV_AUXDATA("nvidia,tegra20-sdhci", TEGRA_SDMMC2_BASE, "sdhci-tegra.1", NULL), OF_DEV_AUXDATA("nvidia,tegra20-sdhci", TEGRA_SDMMC3_BASE, "sdhci-tegra.2", NULL), OF_DEV_AUXDATA("nvidia,tegra20-sdhci", TEGRA_SDMMC4_BASE, "sdhci-tegra.3", NULL), OF_DEV_AUXDATA("nvidia,tegra20-i2c", TEGRA_I2C_BASE, "tegra-i2c.0", NULL), OF_DEV_AUXDATA("nvidia,tegra20-i2c", TEGRA_I2C2_BASE, "tegra-i2c.1", NULL), OF_DEV_AUXDATA("nvidia,tegra20-i2c", TEGRA_I2C3_BASE, "tegra-i2c.2", NULL), OF_DEV_AUXDATA("nvidia,tegra20-i2c", TEGRA_DVC_BASE, "tegra-i2c.3", NULL), OF_DEV_AUXDATA("nvidia,tegra20-i2s", TEGRA_I2S1_BASE, "tegra-i2s.0", NULL), OF_DEV_AUXDATA("nvidia,tegra20-i2s", TEGRA_I2S1_BASE, "tegra-i2s.1", NULL), OF_DEV_AUXDATA("nvidia,tegra20-das", TEGRA_APB_MISC_DAS_BASE, "tegra-das", NULL), {} }; static __initdata struct tegra_clk_init_table tegra_dt_clk_init_table[] = { /* name parent rate enabled */ { "uartd", "pll_p", 216000000, true }, { NULL, NULL, 0, 0}, }; static struct of_device_id tegra_dt_match_table[] __initdata = { { .compatible = "simple-bus", }, {} }; static struct of_device_id tegra_dt_gic_match[] __initdata = { { .compatible = "nvidia,tegra20-gic", }, {} }; static void __init tegra_dt_init(void) { struct device_node *node; node = of_find_matching_node_by_address(NULL, tegra_dt_gic_match, TEGRA_ARM_INT_DIST_BASE); if (node) irq_domain_add_simple(node, INT_GIC_BASE); tegra_clk_init_from_table(tegra_dt_clk_init_table); if (of_machine_is_compatible("nvidia,harmony")) harmony_pinmux_init(); else if (of_machine_is_compatible("nvidia,seaboard")) seaboard_pinmux_init(); /* * Finished with the static registrations now; fill in the missing * devices */ of_platform_populate(NULL, tegra_dt_match_table, tegra20_auxdata_lookup, NULL); } static const char * tegra_dt_board_compat[] = { "nvidia,harmony", "nvidia,seaboard", NULL }; DT_MACHINE_START(TEGRA_DT, "nVidia Tegra (Flattened Device Tree)") .map_io = tegra_map_common_io, .init_early = tegra_init_early, .init_irq = tegra_init_irq, .timer = &tegra_timer, .init_machine = tegra_dt_init, .dt_compat = tegra_dt_board_compat, MACHINE_END
gpl-2.0
blitztech/master434
dep/mysqllite/strings/ctype-tis620.c
400
44166
/* Copyright (C) 2000-2003 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* Copyright (C) 2003 by Sathit Jittanupat <jsat66@hotmail.com,jsat66@yahoo.com> * solving bug crash with long text field string * sorting with different number of space or sign char. within string Copyright (C) 2001 by Korakot Chaovavanich <korakot@iname.com> and Apisilp Trunganont <apisilp@pantip.inet.co.th> Copyright (C) 1998, 1999 by Pruet Boonma <pruet@eng.cmu.ac.th> Copyright (C) 1998 by Theppitak Karoonboonyanan <thep@links.nectec.or.th> Copyright (C) 1989, 1991 by Samphan Raruenrom <samphan@thai.com> Permission to use, copy, modify, distribute and sell this software and its documentation for any purpose is hereby granted without fee, provided that the above copyright notice appear in all copies. Samphan Raruenrom , Theppitak Karoonboonyanan , Pruet Boonma , Korakot Chaovavanich and Apisilp Trunganont makes no representations about the suitability of this software for any purpose. It is provided "as is" without express or implied warranty. */ /* This file is basicly tis620 character sets with some extra functions for tis-620 handling */ /* * This comment is parsed by configure to create ctype.c, * so don't change it unless you know what you are doing. * * .configure. strxfrm_multiply_tis620=4 */ #include <my_global.h> #include <my_sys.h> #include "m_string.h" #include "m_ctype.h" #include "t_ctype.h" #ifdef HAVE_CHARSET_tis620 #define BUFFER_MULTIPLY 4 #define M L_MIDDLE #define U L_UPPER #define L L_LOWER #define UU L_UPRUPR #define X L_MIDDLE static int t_ctype[][TOT_LEVELS] = { /*0x00*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x01*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x02*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x03*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x04*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x05*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x06*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x07*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x08*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x09*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x0A*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x0B*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x0C*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x0D*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x0E*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x0F*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x10*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x11*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x12*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x13*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x14*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x15*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x16*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x17*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x18*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x19*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x1A*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x1B*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x1C*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x1D*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x1E*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x1F*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x20*/ { IGNORE, IGNORE, L3_SPACE, IGNORE, M}, /*0x21*/ { IGNORE, IGNORE, L3_EXCLAMATION, IGNORE, M }, /*0x22*/ { IGNORE, IGNORE, L3_QUOTATION, IGNORE, M }, /*0x23*/ { IGNORE, IGNORE, L3_NUMBER, IGNORE, M }, /*0x24*/ { IGNORE, IGNORE, L3_DOLLAR, IGNORE, M }, /*0x25*/ { IGNORE, IGNORE, L3_PERCENT, IGNORE, M }, /*0x26*/ { IGNORE, IGNORE, L3_AMPERSAND, IGNORE, M }, /*0x27*/ { IGNORE, IGNORE, L3_APOSTROPHE, IGNORE, M }, /*0x28*/ { IGNORE, IGNORE, L3_L_PARANTHESIS, IGNORE, M }, /*0x29*/ { IGNORE, IGNORE, L3_R_PARENTHESIS, IGNORE, M }, /*0x2A*/ { IGNORE, IGNORE, L3_ASTERISK, IGNORE, M }, /*0x2B*/ { IGNORE, IGNORE, L3_PLUS, IGNORE, M }, /*0x2C*/ { IGNORE, IGNORE, L3_COMMA, IGNORE, M }, /*0x2D*/ { IGNORE, IGNORE, L3_HYPHEN, IGNORE, M }, /*0x2E*/ { IGNORE, IGNORE, L3_FULL_STOP, IGNORE, M }, /*0x2F*/ { IGNORE, IGNORE, L3_SOLIDUS, IGNORE, M }, /*0x30*/ { L1_08, L2_BLANK, L3_BLANK, L4_BLANK, M }, /*0x31*/ { L1_18, L2_BLANK, L3_BLANK, L4_BLANK, M }, /*0x32*/ { L1_28, L2_BLANK, L3_BLANK, L4_BLANK, M }, /*0x33*/ { L1_38, L2_BLANK, L3_BLANK, L4_BLANK, M }, /*0x34*/ { L1_48, L2_BLANK, L3_BLANK, L4_BLANK, M }, /*0x35*/ { L1_58, L2_BLANK, L3_BLANK, L4_BLANK, M }, /*0x36*/ { L1_68, L2_BLANK, L3_BLANK, L4_BLANK, M }, /*0x37*/ { L1_78, L2_BLANK, L3_BLANK, L4_BLANK, M }, /*0x38*/ { L1_88, L2_BLANK, L3_BLANK, L4_BLANK, M }, /*0x39*/ { L1_98, L2_BLANK, L3_BLANK, L4_BLANK, M }, /*0x3A*/ { IGNORE, IGNORE, L3_COLON, IGNORE, M }, /*0x3B*/ { IGNORE, IGNORE, L3_SEMICOLON, IGNORE, M }, /*0x3C*/ { IGNORE, IGNORE, L3_LESS_THAN, IGNORE, M }, /*0x3D*/ { IGNORE, IGNORE, L3_EQUAL, IGNORE, M }, /*0x3E*/ { IGNORE, IGNORE, L3_GREATER_THAN, IGNORE, M }, /*0x3F*/ { IGNORE, IGNORE, L3_QUESTION, IGNORE, M }, /*0x40*/ { IGNORE, IGNORE, L3_AT, IGNORE, M }, /*0x41*/ { L1_A8, L2_BLANK, L3_BLANK, L4_CAP, M }, /*0x42*/ { L1_B8, L2_BLANK, L3_BLANK, L4_CAP, M }, /*0x43*/ { L1_C8, L2_BLANK, L3_BLANK, L4_CAP, M }, /*0x44*/ { L1_D8, L2_BLANK, L3_BLANK, L4_CAP, M }, /*0x45*/ { L1_E8, L2_BLANK, L3_BLANK, L4_CAP, M }, /*0x46*/ { L1_F8, L2_BLANK, L3_BLANK, L4_CAP, M }, /*0x47*/ { L1_G8, L2_BLANK, L3_BLANK, L4_CAP, M }, /*0x48*/ { L1_H8, L2_BLANK, L3_BLANK, L4_CAP, M }, /*0x49*/ { L1_I8, L2_BLANK, L3_BLANK, L4_CAP, M }, /*0x4A*/ { L1_J8, L2_BLANK, L3_BLANK, L4_CAP, M }, /*0x4B*/ { L1_K8, L2_BLANK, L3_BLANK, L4_CAP, M }, /*0x4C*/ { L1_L8, L2_BLANK, L3_BLANK, L4_CAP, M }, /*0x4D*/ { L1_M8, L2_BLANK, L3_BLANK, L4_CAP, M }, /*0x4E*/ { L1_N8, L2_BLANK, L3_BLANK, L4_CAP, M }, /*0x4F*/ { L1_O8, L2_BLANK, L3_BLANK, L4_CAP, M }, /*0x50*/ { L1_P8, L2_BLANK, L3_BLANK, L4_CAP, M }, /*0x51*/ { L1_Q8, L2_BLANK, L3_BLANK, L4_CAP, M }, /*0x52*/ { L1_R8, L2_BLANK, L3_BLANK, L4_CAP, M }, /*0x53*/ { L1_S8, L2_BLANK, L3_BLANK, L4_CAP, M }, /*0x54*/ { L1_T8, L2_BLANK, L3_BLANK, L4_CAP, M }, /*0x55*/ { L1_U8, L2_BLANK, L3_BLANK, L4_CAP, M }, /*0x56*/ { L1_V8, L2_BLANK, L3_BLANK, L4_CAP, M }, /*0x57*/ { L1_W8, L2_BLANK, L3_BLANK, L4_CAP, M }, /*0x58*/ { L1_X8, L2_BLANK, L3_BLANK, L4_CAP, M }, /*0x59*/ { L1_Y8, L2_BLANK, L3_BLANK, L4_CAP, M }, /*0x5A*/ { L1_Z8, L2_BLANK, L3_BLANK, L4_CAP, M }, /*0x5B*/ { IGNORE, IGNORE, L3_L_BRACKET, IGNORE, M }, /*0x5C*/ { IGNORE, IGNORE, L3_BK_SOLIDUS, IGNORE, M }, /*0x5D*/ { IGNORE, IGNORE, L3_R_BRACKET, IGNORE, M }, /*0x5E*/ { IGNORE, IGNORE, L3_CIRCUMFLEX, IGNORE, M }, /*0x5F*/ { IGNORE, IGNORE, L3_LOW_LINE, IGNORE, M }, /*0x60*/ { IGNORE, IGNORE, L3_GRAVE, IGNORE, M }, /*0x61*/ { L1_A8, L2_BLANK, L3_BLANK, L4_MIN, M }, /*0x62*/ { L1_B8, L2_BLANK, L3_BLANK, L4_MIN, M }, /*0x63*/ { L1_C8, L2_BLANK, L3_BLANK, L4_MIN, M }, /*0x64*/ { L1_D8, L2_BLANK, L3_BLANK, L4_MIN, M }, /*0x65*/ { L1_E8, L2_BLANK, L3_BLANK, L4_MIN, M }, /*0x66*/ { L1_F8, L2_BLANK, L3_BLANK, L4_MIN, M }, /*0x67*/ { L1_G8, L2_BLANK, L3_BLANK, L4_MIN, M }, /*0x68*/ { L1_H8, L2_BLANK, L3_BLANK, L4_MIN, M }, /*0x69*/ { L1_I8, L2_BLANK, L3_BLANK, L4_MIN, M }, /*0x6A*/ { L1_J8, L2_BLANK, L3_BLANK, L4_MIN, M }, /*0x6B*/ { L1_K8, L2_BLANK, L3_BLANK, L4_MIN, M }, /*0x6C*/ { L1_L8, L2_BLANK, L3_BLANK, L4_MIN, M }, /*0x6D*/ { L1_M8, L2_BLANK, L3_BLANK, L4_MIN, M }, /*0x6E*/ { L1_N8, L2_BLANK, L3_BLANK, L4_MIN, M }, /*0x6F*/ { L1_O8, L2_BLANK, L3_BLANK, L4_MIN, M }, /*0x70*/ { L1_P8, L2_BLANK, L3_BLANK, L4_MIN, M }, /*0x71*/ { L1_Q8, L2_BLANK, L3_BLANK, L4_MIN, M }, /*0x72*/ { L1_R8, L2_BLANK, L3_BLANK, L4_MIN, M }, /*0x73*/ { L1_S8, L2_BLANK, L3_BLANK, L4_MIN, M }, /*0x74*/ { L1_T8, L2_BLANK, L3_BLANK, L4_MIN, M }, /*0x75*/ { L1_U8, L2_BLANK, L3_BLANK, L4_MIN, M }, /*0x76*/ { L1_V8, L2_BLANK, L3_BLANK, L4_MIN, M }, /*0x77*/ { L1_W8, L2_BLANK, L3_BLANK, L4_MIN, M }, /*0x78*/ { L1_X8, L2_BLANK, L3_BLANK, L4_MIN, M }, /*0x79*/ { L1_Y8, L2_BLANK, L3_BLANK, L4_MIN, M }, /*0x7A*/ { L1_Z8, L2_BLANK, L3_BLANK, L4_MIN, M }, /*0x7B*/ { IGNORE, IGNORE, L3_L_BRACE, IGNORE, M }, /*0x7C*/ { IGNORE, IGNORE, L3_V_LINE, IGNORE, M }, /*0x7D*/ { IGNORE, IGNORE, L3_R_BRACE, IGNORE, M }, /*0x7E*/ { IGNORE, IGNORE, L3_TILDE, IGNORE, M }, /*0x7F*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x80*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x81*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x82*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x83*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x84*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x85*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x86*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x87*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x88*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x89*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x8A*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x8B*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x8C*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x8D*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x8E*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x8F*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x90*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x91*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x92*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x93*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x94*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x95*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x96*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x97*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x98*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x99*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x9A*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x9B*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x9C*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x9D*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x9E*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0x9F*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0xA0*/ { IGNORE, IGNORE, L3_NB_SACE, IGNORE, X }, /*0xA1*/ { L1_KO_KAI, L2_BLANK, L3_BLANK, L4_BLANK, M | _consnt}, /*0xA2*/ { L1_KHO_KHAI, L2_BLANK, L3_BLANK, L4_BLANK, M | _consnt}, /*0xA3*/ { L1_KHO_KHUAT, L2_BLANK, L3_BLANK, L4_BLANK, M | _consnt}, /*0xA4*/ { L1_KHO_KHWAI, L2_BLANK, L3_BLANK, L4_BLANK, M | _consnt}, /*0xA5*/ { L1_KHO_KHON, L2_BLANK, L3_BLANK, L4_BLANK, M | _consnt}, /*0xA6*/ { L1_KHO_RAKHANG, L2_BLANK, L3_BLANK, L4_BLANK, M | _consnt}, /*0xA7*/ { L1_NGO_NGU, L2_BLANK, L3_BLANK, L4_BLANK, M | _consnt}, /*0xA8*/ { L1_CHO_CHAN, L2_BLANK, L3_BLANK, L4_BLANK, M | _consnt}, /*0xA9*/ { L1_CHO_CHING, L2_BLANK, L3_BLANK, L4_BLANK, M | _consnt}, /*0xAA*/ { L1_CHO_CHANG, L2_BLANK, L3_BLANK, L4_BLANK, M | _consnt}, /*0xAB*/ { L1_SO_SO, L2_BLANK, L3_BLANK, L4_BLANK, M | _consnt}, /*0xAC*/ { L1_CHO_CHOE, L2_BLANK, L3_BLANK, L4_BLANK, M | _consnt}, /*0xAD*/ { L1_YO_YING, L2_BLANK, L3_BLANK, L4_BLANK, M | _consnt}, /*0xAE*/ { L1_DO_CHADA, L2_BLANK, L3_BLANK, L4_BLANK, M | _consnt}, /*0xAF*/ { L1_TO_PATAK, L2_BLANK, L3_BLANK, L4_BLANK, M | _consnt}, /*0xB0*/ { L1_THO_THAN, L2_BLANK,L3_BLANK, L4_BLANK, M | _consnt}, /*0xB1*/ { L1_THO_NANGMONTHO, L2_BLANK, L3_BLANK, L4_BLANK, M | _consnt}, /*0xB2*/ { L1_THO_PHUTHAO, L2_BLANK, L3_BLANK, L4_BLANK, M | _consnt}, /*0xB3*/ { L1_NO_NEN, L2_BLANK, L3_BLANK, L4_BLANK, M | _consnt}, /*0xB4*/ { L1_DO_DEK, L2_BLANK, L3_BLANK, L4_BLANK, M | _consnt}, /*0xB5*/ { L1_TO_TAO, L2_BLANK, L3_BLANK, L4_BLANK, M | _consnt}, /*0xB6*/ { L1_THO_THUNG, L2_BLANK, L3_BLANK, L4_BLANK, M | _consnt}, /*0xB7*/ { L1_THO_THAHAN, L2_BLANK, L3_BLANK, L4_BLANK, M | _consnt}, /*0xB8*/ { L1_THO_THONG, L2_BLANK, L3_BLANK, L4_BLANK, M | _consnt}, /*0xB9*/ { L1_NO_NU, L2_BLANK, L3_BLANK, L4_BLANK, M | _consnt}, /*0xBA*/ { L1_BO_BAIMAI, L2_BLANK, L3_BLANK, L4_BLANK, M | _consnt}, /*0xBB*/ { L1_PO_PLA, L2_BLANK, L3_BLANK, L4_BLANK, M | _consnt}, /*0xBC*/ { L1_PHO_PHUNG, L2_BLANK, L3_BLANK, L4_BLANK, M | _consnt}, /*0xBD*/ { L1_FO_FA, L2_BLANK, L3_BLANK, L4_BLANK, M | _consnt}, /*0xBE*/ { L1_PHO_PHAN, L2_BLANK, L3_BLANK, L4_BLANK, M | _consnt}, /*0xBF*/ { L1_FO_FAN, L2_BLANK, L3_BLANK, L4_BLANK, M | _consnt}, /*0xC0*/ { L1_PHO_SAMPHAO, L2_BLANK, L3_BLANK, L4_BLANK, M | _consnt}, /*0xC1*/ { L1_MO_MA, L2_BLANK, L3_BLANK, L4_BLANK, M | _consnt}, /*0xC2*/ { L1_YO_YAK, L2_BLANK, L3_BLANK, L4_BLANK, M | _consnt}, /*0xC3*/ { L1_RO_RUA, L2_BLANK, L3_BLANK, L4_BLANK, M | _consnt}, /*0xC4*/ { L1_RU, L2_BLANK, L3_BLANK, L4_BLANK, M | _consnt}, /*0xC5*/ { L1_LO_LING, L2_BLANK, L3_BLANK, L4_BLANK, M | _consnt}, /*0xC6*/ { L1_LU, L2_BLANK, L3_BLANK, L4_BLANK, M | _consnt}, /*0xC7*/ { L1_WO_WAEN, L2_BLANK, L3_BLANK, L4_BLANK, M | _consnt}, /*0xC8*/ { L1_SO_SALA, L2_BLANK, L3_BLANK, L4_BLANK, M | _consnt}, /*0xC9*/ { L1_SO_RUSI, L2_BLANK, L3_BLANK, L4_BLANK, M | _consnt}, /*0xCA*/ { L1_SO_SUA, L2_BLANK, L3_BLANK, L4_BLANK, M | _consnt}, /*0xCB*/ { L1_HO_HIP, L2_BLANK, L3_BLANK, L4_BLANK, M | _consnt}, /*0xCC*/ { L1_LO_CHULA, L2_BLANK, L3_BLANK, L4_BLANK, M | _consnt}, /*0xCD*/ { L1_O_ANG, L2_BLANK, L3_BLANK, L4_BLANK, M | _consnt}, /*0xCE*/ { L1_HO_NOKHUK, L2_BLANK, L3_BLANK, L4_BLANK, M | _consnt}, /*0xCF*/ { IGNORE, IGNORE, L3_PAIYAN_NOI, IGNORE, M}, /*0xD0*/ { L1_SARA_A, L2_BLANK, L3_BLANK, L4_BLANK, M | _fllwvowel}, /*0xD1*/ { L1_MAI_HAN_AKAT, L2_BLANK, L3_BLANK, L4_BLANK, U | _uprvowel}, /*0xD2*/ { L1_SARA_AA, L2_BLANK, L3_BLANK, L4_BLANK, M | _fllwvowel}, /*0xD3*/ { L1_SARA_AM, L2_BLANK, L3_BLANK, L4_BLANK, M | _fllwvowel}, /*0xD4*/ { L1_SARA_I, L2_BLANK, L3_BLANK, L4_BLANK, U | _uprvowel}, /*0xD5*/ { L1_SARA_II, L2_BLANK, L3_BLANK, L4_BLANK, U | _uprvowel}, /*0xD6*/ { L1_SARA_UE, L2_BLANK, L3_BLANK, L4_BLANK, U | _uprvowel}, /*0xD7*/ { L1_SARA_UEE, L2_BLANK, L3_BLANK, L4_BLANK, U | _uprvowel}, /*0xD8*/ { L1_SARA_U, L2_BLANK, L3_BLANK, L4_BLANK, L | _lwrvowel}, /*0xD9*/ { L1_SARA_UU, L2_BLANK, L3_BLANK, L4_BLANK, L | _lwrvowel}, /*0xDA*/ { IGNORE, L2_PINTHU, L3_BLANK, L4_BLANK, L }, /*0xDB*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0xDC*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0xDD*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0xDE*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0xDF*/ { IGNORE, IGNORE, L3_BAHT, IGNORE, M}, /*0xE0*/ { L1_SARA_E, L2_BLANK, L3_BLANK, L4_BLANK, M | _ldvowel }, /*0xE1*/ { L1_SARA_AE, L2_BLANK, L3_BLANK, L4_BLANK, M | _ldvowel }, /*0xE2*/ { L1_SARA_O, L2_BLANK, L3_BLANK, L4_BLANK, M | _ldvowel }, /*0xE3*/ { L1_SARA_AI_MAIMUAN, L2_BLANK, L3_BLANK, L4_BLANK, M | _ldvowel }, /*0xE4*/ { L1_SARA_AI_MAIMALAI, L2_BLANK, L3_BLANK, L4_BLANK, M | _ldvowel }, /*0xE5*/ { L1_SARA_AA, L2_BLANK, L3_BLANK, L4_EXT, M | _fllwvowel }, /*0xE6*/ { IGNORE, IGNORE, L3_MAI_YAMOK, IGNORE, M | _stone }, /*0xE7*/ { IGNORE, L2_TYKHU, L3_BLANK, L4_BLANK, U | _diacrt1 | _stone }, /*0xE8*/ { IGNORE, L2_TONE1, L3_BLANK, L4_BLANK, UU | _tone | _combine | _stone }, /*0xE9*/ { IGNORE, L2_TONE2, L3_BLANK, L4_BLANK, UU | _tone | _combine | _stone }, /*0xEA*/ { IGNORE, L2_TONE3, L3_BLANK, L4_BLANK, UU | _tone | _combine | _stone }, /*0xEB*/ { IGNORE, L2_TONE4, L3_BLANK, L4_BLANK, UU | _tone | _combine | _stone }, /*0xEC*/ { IGNORE, L2_GARAN, L3_BLANK, L4_BLANK, UU | _diacrt2 | _combine | _stone }, /*0xED*/ { L1_NKHIT, L2_BLANK, L3_BLANK, L4_BLANK, U | _diacrt1 }, /*0xEE*/ { IGNORE, L2_YAMAK, L3_BLANK, L4_BLANK, U | _diacrt1 }, /*0xEF*/ { IGNORE, IGNORE, L3_FONGMAN, IGNORE, M }, /*0xF0*/ { L1_08, L2_THAII, L3_BLANK, L4_BLANK, M | _tdig }, /*0xF1*/ { L1_18, L2_THAII, L3_BLANK, L4_BLANK, M | _tdig }, /*0xF2*/ { L1_28, L2_THAII, L3_BLANK, L4_BLANK, M | _tdig }, /*0xF3*/ { L1_38, L2_THAII, L3_BLANK, L4_BLANK, M | _tdig }, /*0xF4*/ { L1_48, L2_THAII, L3_BLANK, L4_BLANK, M | _tdig }, /*0xF5*/ { L1_58, L2_THAII, L3_BLANK, L4_BLANK, M | _tdig }, /*0xF6*/ { L1_68, L2_THAII, L3_BLANK, L4_BLANK, M | _tdig }, /*0xF7*/ { L1_78, L2_THAII, L3_BLANK, L4_BLANK, M | _tdig }, /*0xF8*/ { L1_88, L2_THAII, L3_BLANK, L4_BLANK, M | _tdig }, /*0xF9*/ { L1_98, L2_THAII, L3_BLANK, L4_BLANK, M | _tdig }, /*0xFA*/ { IGNORE, IGNORE, L3_ANGKHANKHU, IGNORE, X }, /*0xFB*/ { IGNORE, IGNORE, L3_KHOMUT, IGNORE, X }, /*0xFC*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0xFD*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /*0xFE*/ { IGNORE, IGNORE, IGNORE, IGNORE, X }, /* Utilize 0xFF for max_sort_chr in my_like_range_tis620 */ /*0xFF*/ { 255 /*IGNORE*/, IGNORE, IGNORE, IGNORE, X }, }; static uchar ctype_tis620[257] = { 0, /* For standard library */ 32,32,32,32,32,32,32,32,32,40,40,40,40,40,32,32, 32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32, 72,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16, 132,132,132,132,132,132,132,132,132,132,16,16,16,16,16,16, 16,129,129,129,129,129,129,1,1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,1,1,1,16,16,16,16,16, 16,130,130,130,130,130,130,2,2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2,2,2,2,16,16,16,16,32, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, }; static uchar to_lower_tis620[]= { '\000','\001','\002','\003','\004','\005','\006','\007', '\010','\011','\012','\013','\014','\015','\016','\017', '\020','\021','\022','\023','\024','\025','\026','\027', '\030','\031','\032','\033','\034','\035','\036','\037', ' ', '!', '"', '#', '$', '%', '&', '\'', '(', ')', '*', '+', ',', '-', '.', '/', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ':', ';', '<', '=', '>', '?', '@', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '[', '\\', ']', '^', '_', '`', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '{', '|', '}', '~', '\177', (uchar) '\200',(uchar) '\201',(uchar) '\202',(uchar) '\203',(uchar) '\204',(uchar) '\205',(uchar) '\206',(uchar) '\207', (uchar) '\210',(uchar) '\211',(uchar) '\212',(uchar) '\213',(uchar) '\214',(uchar) '\215',(uchar) '\216',(uchar) '\217', (uchar) '\220',(uchar) '\221',(uchar) '\222',(uchar) '\223',(uchar) '\224',(uchar) '\225',(uchar) '\226',(uchar) '\227', (uchar) '\230',(uchar) '\231',(uchar) '\232',(uchar) '\233',(uchar) '\234',(uchar) '\235',(uchar) '\236',(uchar) '\237', (uchar) '\240',(uchar) '\241',(uchar) '\242',(uchar) '\243',(uchar) '\244',(uchar) '\245',(uchar) '\246',(uchar) '\247', (uchar) '\250',(uchar) '\251',(uchar) '\252',(uchar) '\253',(uchar) '\254',(uchar) '\255',(uchar) '\256',(uchar) '\257', (uchar) '\260',(uchar) '\261',(uchar) '\262',(uchar) '\263',(uchar) '\264',(uchar) '\265',(uchar) '\266',(uchar) '\267', (uchar) '\270',(uchar) '\271',(uchar) '\272',(uchar) '\273',(uchar) '\274',(uchar) '\275',(uchar) '\276',(uchar) '\277', (uchar) '\300',(uchar) '\301',(uchar) '\302',(uchar) '\303',(uchar) '\304',(uchar) '\305',(uchar) '\306',(uchar) '\307', (uchar) '\310',(uchar) '\311',(uchar) '\312',(uchar) '\313',(uchar) '\314',(uchar) '\315',(uchar) '\316',(uchar) '\317', (uchar) '\320',(uchar) '\321',(uchar) '\322',(uchar) '\323',(uchar) '\324',(uchar) '\325',(uchar) '\326',(uchar) '\327', (uchar) '\330',(uchar) '\331',(uchar) '\332',(uchar) '\333',(uchar) '\334',(uchar) '\335',(uchar) '\336',(uchar) '\337', (uchar) '\340',(uchar) '\341',(uchar) '\342',(uchar) '\343',(uchar) '\344',(uchar) '\345',(uchar) '\346',(uchar) '\347', (uchar) '\350',(uchar) '\351',(uchar) '\352',(uchar) '\353',(uchar) '\354',(uchar) '\355',(uchar) '\356',(uchar) '\357', (uchar) '\360',(uchar) '\361',(uchar) '\362',(uchar) '\363',(uchar) '\364',(uchar) '\365',(uchar) '\366',(uchar) '\367', (uchar) '\370',(uchar) '\371',(uchar) '\372',(uchar) '\373',(uchar) '\374',(uchar) '\375',(uchar) '\376',(uchar) '\377', }; static uchar to_upper_tis620[]= { '\000','\001','\002','\003','\004','\005','\006','\007', '\010','\011','\012','\013','\014','\015','\016','\017', '\020','\021','\022','\023','\024','\025','\026','\027', '\030','\031','\032','\033','\034','\035','\036','\037', ' ', '!', '"', '#', '$', '%', '&', '\'', '(', ')', '*', '+', ',', '-', '.', '/', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ':', ';', '<', '=', '>', '?', '@', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '[', '\\', ']', '^', '_', '`', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '{', '|', '}', '~', '\177', (uchar) '\200',(uchar) '\201',(uchar) '\202',(uchar) '\203',(uchar) '\204',(uchar) '\205',(uchar) '\206',(uchar) '\207', (uchar) '\210',(uchar) '\211',(uchar) '\212',(uchar) '\213',(uchar) '\214',(uchar) '\215',(uchar) '\216',(uchar) '\217', (uchar) '\220',(uchar) '\221',(uchar) '\222',(uchar) '\223',(uchar) '\224',(uchar) '\225',(uchar) '\226',(uchar) '\227', (uchar) '\230',(uchar) '\231',(uchar) '\232',(uchar) '\233',(uchar) '\234',(uchar) '\235',(uchar) '\236',(uchar) '\237', (uchar) '\240',(uchar) '\241',(uchar) '\242',(uchar) '\243',(uchar) '\244',(uchar) '\245',(uchar) '\246',(uchar) '\247', (uchar) '\250',(uchar) '\251',(uchar) '\252',(uchar) '\253',(uchar) '\254',(uchar) '\255',(uchar) '\256',(uchar) '\257', (uchar) '\260',(uchar) '\261',(uchar) '\262',(uchar) '\263',(uchar) '\264',(uchar) '\265',(uchar) '\266',(uchar) '\267', (uchar) '\270',(uchar) '\271',(uchar) '\272',(uchar) '\273',(uchar) '\274',(uchar) '\275',(uchar) '\276',(uchar) '\277', (uchar) '\300',(uchar) '\301',(uchar) '\302',(uchar) '\303',(uchar) '\304',(uchar) '\305',(uchar) '\306',(uchar) '\307', (uchar) '\310',(uchar) '\311',(uchar) '\312',(uchar) '\313',(uchar) '\314',(uchar) '\315',(uchar) '\316',(uchar) '\317', (uchar) '\320',(uchar) '\321',(uchar) '\322',(uchar) '\323',(uchar) '\324',(uchar) '\325',(uchar) '\326',(uchar) '\327', (uchar) '\330',(uchar) '\331',(uchar) '\332',(uchar) '\333',(uchar) '\334',(uchar) '\335',(uchar) '\336',(uchar) '\337', (uchar) '\340',(uchar) '\341',(uchar) '\342',(uchar) '\343',(uchar) '\344',(uchar) '\345',(uchar) '\346',(uchar) '\347', (uchar) '\350',(uchar) '\351',(uchar) '\352',(uchar) '\353',(uchar) '\354',(uchar) '\355',(uchar) '\356',(uchar) '\357', (uchar) '\360',(uchar) '\361',(uchar) '\362',(uchar) '\363',(uchar) '\364',(uchar) '\365',(uchar) '\366',(uchar) '\367', (uchar) '\370',(uchar) '\371',(uchar) '\372',(uchar) '\373',(uchar) '\374',(uchar) '\375',(uchar) '\376',(uchar) '\377', }; static uchar sort_order_tis620[]= { '\000','\001','\002','\003','\004','\005','\006','\007', '\010','\011','\012','\013','\014','\015','\016','\017', '\020','\021','\022','\023','\024','\025','\026','\027', '\030','\031','\032','\033','\034','\035','\036','\037', ' ', '!', '"', '#', '$', '%', '&', '\'', '(', ')', '*', '+', ',', '-', '.', '/', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ':', ';', '<', '=', '>', '?', '@', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '\\', ']', '[', '^', '_', 'E', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '{', '|', '}', 'Y', '\177', (uchar) '\200',(uchar) '\201',(uchar) '\202',(uchar) '\203',(uchar) '\204',(uchar) '\205',(uchar) '\206',(uchar) '\207', (uchar) '\210',(uchar) '\211',(uchar) '\212',(uchar) '\213',(uchar) '\214',(uchar) '\215',(uchar) '\216',(uchar) '\217', (uchar) '\220',(uchar) '\221',(uchar) '\222',(uchar) '\223',(uchar) '\224',(uchar) '\225',(uchar) '\226',(uchar) '\227', (uchar) '\230',(uchar) '\231',(uchar) '\232',(uchar) '\233',(uchar) '\234',(uchar) '\235',(uchar) '\236',(uchar) '\237', (uchar) '\240',(uchar) '\241',(uchar) '\242',(uchar) '\243',(uchar) '\244',(uchar) '\245',(uchar) '\246',(uchar) '\247', (uchar) '\250',(uchar) '\251',(uchar) '\252',(uchar) '\253',(uchar) '\254',(uchar) '\255',(uchar) '\256',(uchar) '\257', (uchar) '\260',(uchar) '\261',(uchar) '\262',(uchar) '\263',(uchar) '\264',(uchar) '\265',(uchar) '\266',(uchar) '\267', (uchar) '\270',(uchar) '\271',(uchar) '\272',(uchar) '\273',(uchar) '\274',(uchar) '\275',(uchar) '\276',(uchar) '\277', (uchar) '\300',(uchar) '\301',(uchar) '\302',(uchar) '\303',(uchar) '\304',(uchar) '\305',(uchar) '\306',(uchar) '\307', (uchar) '\310',(uchar) '\311',(uchar) '\312',(uchar) '\313',(uchar) '\314',(uchar) '\315',(uchar) '\316',(uchar) '\317', (uchar) '\320',(uchar) '\321',(uchar) '\322',(uchar) '\323',(uchar) '\324',(uchar) '\325',(uchar) '\326',(uchar) '\327', (uchar) '\330',(uchar) '\331',(uchar) '\332',(uchar) '\333',(uchar) '\334',(uchar) '\335',(uchar) '\336',(uchar) '\337', (uchar) '\340',(uchar) '\341',(uchar) '\342',(uchar) '\343',(uchar) '\344',(uchar) '\345',(uchar) '\346',(uchar) '\347', (uchar) '\350',(uchar) '\351',(uchar) '\352',(uchar) '\353',(uchar) '\354',(uchar) '\355',(uchar) '\356',(uchar) '\357', (uchar) '\360',(uchar) '\361',(uchar) '\362',(uchar) '\363',(uchar) '\364',(uchar) '\365',(uchar) '\366',(uchar) '\367', (uchar) '\370',(uchar) '\371',(uchar) '\372',(uchar) '\373',(uchar) '\374',(uchar) '\375',(uchar) '\376',(uchar) '\377', }; /* Convert thai string to "Standard C String Function" sortable string SYNOPSIS thai2sortable() tstr String to convert. Does not have to end with \0 len Length of tstr */ static size_t thai2sortable(uchar *tstr, size_t len) { uchar *p; int tlen; uchar l2bias; tlen= len; l2bias= 256 - 8; for (p= tstr; tlen > 0; p++, tlen--) { uchar c= *p; if (isthai(c)) { int *t_ctype0= t_ctype[c]; if (isconsnt(c)) l2bias -= 8; if (isldvowel(c) && tlen != 1 && isconsnt(p[1])) { /* simply swap between leading-vowel and consonant */ *p= p[1]; p[1]= c; tlen--; p++; continue; } /* if found level 2 char (L2_GARAN,L2_TONE*,L2_TYKHU) move to last */ if (t_ctype0[1] >= L2_GARAN) { /* l2bias use to control position weight of l2char example (*=l2char) XX*X must come before X*XX */ memmove((char*) p, (char*) (p+1), tlen-1); tstr[len-1]= l2bias + t_ctype0[1]- L2_GARAN +1; p--; continue; } } else { l2bias-= 8; *p= to_lower_tis620[c]; } } return len; } /* strncoll() replacement, compare 2 string, both are converted to sortable string NOTE: We can't cut strings at end \0 as this would break comparision with LIKE characters, where the min range is stored as end \0 Arg: 2 Strings and it compare length Ret: strcmp result */ static int my_strnncoll_tis620(CHARSET_INFO *cs __attribute__((unused)), const uchar *s1, size_t len1, const uchar *s2, size_t len2, my_bool s2_is_prefix) { uchar buf[80] ; uchar *tc1, *tc2; int i; if (s2_is_prefix && len1 > len2) len1= len2; tc1= buf; if ((len1 + len2 +2) > (int) sizeof(buf)) tc1= (uchar*) my_str_malloc(len1+len2+2); tc2= tc1 + len1+1; memcpy((char*) tc1, (char*) s1, len1); tc1[len1]= 0; /* if length(s1)> len1, need to put 'end of string' */ memcpy((char *)tc2, (char *)s2, len2); tc2[len2]= 0; /* put end of string */ thai2sortable(tc1, len1); thai2sortable(tc2, len2); i= strcmp((char*)tc1, (char*)tc2); if (tc1 != buf) my_str_free(tc1); return i; } static int my_strnncollsp_tis620(CHARSET_INFO * cs __attribute__((unused)), const uchar *a0, size_t a_length, const uchar *b0, size_t b_length, my_bool diff_if_only_endspace_difference) { uchar buf[80], *end, *a, *b, *alloced= NULL; size_t length; int res= 0; #ifndef VARCHAR_WITH_DIFF_ENDSPACE_ARE_DIFFERENT_FOR_UNIQUE diff_if_only_endspace_difference= 0; #endif a= buf; if ((a_length + b_length +2) > (int) sizeof(buf)) alloced= a= (uchar*) my_str_malloc(a_length+b_length+2); b= a + a_length+1; memcpy((char*) a, (char*) a0, a_length); a[a_length]= 0; /* if length(a0)> len1, need to put 'end of string' */ memcpy((char *)b, (char *)b0, b_length); b[b_length]= 0; /* put end of string */ a_length= thai2sortable(a, a_length); b_length= thai2sortable(b, b_length); end= a + (length= min(a_length, b_length)); while (a < end) { if (*a++ != *b++) { res= ((int) a[-1] - (int) b[-1]); goto ret; } } if (a_length != b_length) { int swap= 1; if (diff_if_only_endspace_difference) res= 1; /* Assume 'a' is bigger */ /* Check the next not space character of the longer key. If it's < ' ', then it's smaller than the other key. */ if (a_length < b_length) { /* put shorter key in s */ a_length= b_length; a= b; swap= -1; /* swap sign of result */ res= -res; } for (end= a + a_length-length; a < end ; a++) { if (*a != ' ') { res= (*a < ' ') ? -swap : swap; goto ret; } } } ret: if (alloced) my_str_free(alloced); return res; } /* strnxfrm replacment, convert Thai string to sortable string Arg: Destination buffer, source string, dest length and source length Ret: Conveted string size */ static size_t my_strnxfrm_tis620(CHARSET_INFO *cs __attribute__((unused)), uchar *dest, size_t len, const uchar *src, size_t srclen) { size_t dstlen= len; len= (size_t) (strmake((char*) dest, (char*) src, min(len, srclen)) - (char*) dest); len= thai2sortable(dest, len); if (dstlen > len) bfill(dest + len, dstlen - len, ' '); return dstlen; } static unsigned short cs_to_uni[256]={ 0x0000,0x0001,0x0002,0x0003,0x0004,0x0005,0x0006,0x0007, 0x0008,0x0009,0x000A,0x000B,0x000C,0x000D,0x000E,0x000F, 0x0010,0x0011,0x0012,0x0013,0x0014,0x0015,0x0016,0x0017, 0x0018,0x0019,0x001A,0x001B,0x001C,0x001D,0x001E,0x001F, 0x0020,0x0021,0x0022,0x0023,0x0024,0x0025,0x0026,0x0027, 0x0028,0x0029,0x002A,0x002B,0x002C,0x002D,0x002E,0x002F, 0x0030,0x0031,0x0032,0x0033,0x0034,0x0035,0x0036,0x0037, 0x0038,0x0039,0x003A,0x003B,0x003C,0x003D,0x003E,0x003F, 0x0040,0x0041,0x0042,0x0043,0x0044,0x0045,0x0046,0x0047, 0x0048,0x0049,0x004A,0x004B,0x004C,0x004D,0x004E,0x004F, 0x0050,0x0051,0x0052,0x0053,0x0054,0x0055,0x0056,0x0057, 0x0058,0x0059,0x005A,0x005B,0x005C,0x005D,0x005E,0x005F, 0x0060,0x0061,0x0062,0x0063,0x0064,0x0065,0x0066,0x0067, 0x0068,0x0069,0x006A,0x006B,0x006C,0x006D,0x006E,0x006F, 0x0070,0x0071,0x0072,0x0073,0x0074,0x0075,0x0076,0x0077, 0x0078,0x0079,0x007A,0x007B,0x007C,0x007D,0x007E,0x007F, 0x0080,0x0081,0x0082,0x0083,0x0084,0x0085,0x0086,0x0087, 0x0088,0x0089,0x008A,0x008B,0x008C,0x008D,0x008E,0x008F, 0x0090,0x0091,0x0092,0x0093,0x0094,0x0095,0x0096,0x0097, 0x0098,0x0099,0x009A,0x009B,0x009C,0x009D,0x009E,0x009F, 0xFFFD,0x0E01,0x0E02,0x0E03,0x0E04,0x0E05,0x0E06,0x0E07, 0x0E08,0x0E09,0x0E0A,0x0E0B,0x0E0C,0x0E0D,0x0E0E,0x0E0F, 0x0E10,0x0E11,0x0E12,0x0E13,0x0E14,0x0E15,0x0E16,0x0E17, 0x0E18,0x0E19,0x0E1A,0x0E1B,0x0E1C,0x0E1D,0x0E1E,0x0E1F, 0x0E20,0x0E21,0x0E22,0x0E23,0x0E24,0x0E25,0x0E26,0x0E27, 0x0E28,0x0E29,0x0E2A,0x0E2B,0x0E2C,0x0E2D,0x0E2E,0x0E2F, 0x0E30,0x0E31,0x0E32,0x0E33,0x0E34,0x0E35,0x0E36,0x0E37, 0x0E38,0x0E39,0x0E3A,0xFFFD,0xFFFD,0xFFFD,0xFFFD,0x0E3F, 0x0E40,0x0E41,0x0E42,0x0E43,0x0E44,0x0E45,0x0E46,0x0E47, 0x0E48,0x0E49,0x0E4A,0x0E4B,0x0E4C,0x0E4D,0x0E4E,0x0E4F, 0x0E50,0x0E51,0x0E52,0x0E53,0x0E54,0x0E55,0x0E56,0x0E57, 0x0E58,0x0E59,0x0E5A,0x0E5B,0xFFFD,0xFFFD,0xFFFD,0xFFFD }; static uchar pl00[256]={ 0x0000,0x0001,0x0002,0x0003,0x0004,0x0005,0x0006,0x0007, 0x0008,0x0009,0x000A,0x000B,0x000C,0x000D,0x000E,0x000F, 0x0010,0x0011,0x0012,0x0013,0x0014,0x0015,0x0016,0x0017, 0x0018,0x0019,0x001A,0x001B,0x001C,0x001D,0x001E,0x001F, 0x0020,0x0021,0x0022,0x0023,0x0024,0x0025,0x0026,0x0027, 0x0028,0x0029,0x002A,0x002B,0x002C,0x002D,0x002E,0x002F, 0x0030,0x0031,0x0032,0x0033,0x0034,0x0035,0x0036,0x0037, 0x0038,0x0039,0x003A,0x003B,0x003C,0x003D,0x003E,0x003F, 0x0040,0x0041,0x0042,0x0043,0x0044,0x0045,0x0046,0x0047, 0x0048,0x0049,0x004A,0x004B,0x004C,0x004D,0x004E,0x004F, 0x0050,0x0051,0x0052,0x0053,0x0054,0x0055,0x0056,0x0057, 0x0058,0x0059,0x005A,0x005B,0x005C,0x005D,0x005E,0x005F, 0x0060,0x0061,0x0062,0x0063,0x0064,0x0065,0x0066,0x0067, 0x0068,0x0069,0x006A,0x006B,0x006C,0x006D,0x006E,0x006F, 0x0070,0x0071,0x0072,0x0073,0x0074,0x0075,0x0076,0x0077, 0x0078,0x0079,0x007A,0x007B,0x007C,0x007D,0x007E,0x007F, 0x0080,0x0081,0x0082,0x0083,0x0084,0x0085,0x0086,0x0087, 0x0088,0x0089,0x008A,0x008B,0x008C,0x008D,0x008E,0x008F, 0x0090,0x0091,0x0092,0x0093,0x0094,0x0095,0x0096,0x0097, 0x0098,0x0099,0x009A,0x009B,0x009C,0x009D,0x009E,0x009F, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000 }; static uchar pl0E[256]={ 0x0000,0x00A1,0x00A2,0x00A3,0x00A4,0x00A5,0x00A6,0x00A7, 0x00A8,0x00A9,0x00AA,0x00AB,0x00AC,0x00AD,0x00AE,0x00AF, 0x00B0,0x00B1,0x00B2,0x00B3,0x00B4,0x00B5,0x00B6,0x00B7, 0x00B8,0x00B9,0x00BA,0x00BB,0x00BC,0x00BD,0x00BE,0x00BF, 0x00C0,0x00C1,0x00C2,0x00C3,0x00C4,0x00C5,0x00C6,0x00C7, 0x00C8,0x00C9,0x00CA,0x00CB,0x00CC,0x00CD,0x00CE,0x00CF, 0x00D0,0x00D1,0x00D2,0x00D3,0x00D4,0x00D5,0x00D6,0x00D7, 0x00D8,0x00D9,0x00DA,0x0000,0x0000,0x0000,0x0000,0x00DF, 0x00E0,0x00E1,0x00E2,0x00E3,0x00E4,0x00E5,0x00E6,0x00E7, 0x00E8,0x00E9,0x00EA,0x00EB,0x00EC,0x00ED,0x00EE,0x00EF, 0x00F0,0x00F1,0x00F2,0x00F3,0x00F4,0x00F5,0x00F6,0x00F7, 0x00F8,0x00F9,0x00FA,0x00FB,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000 }; static uchar plFF[256]={ 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000, 0x0000,0x0000,0x0000,0x0000,0x0000,0x00FF,0x0000,0x0000 }; static uchar *uni_to_cs[256]={ pl00,NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL,NULL,NULL,pl0E,NULL, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL,NULL,NULL,NULL,plFF }; static int my_mb_wc_tis620(CHARSET_INFO *cs __attribute__((unused)), my_wc_t *wc, const uchar *str, const uchar *end __attribute__((unused))) { if (str >= end) return MY_CS_TOOSMALL; *wc=cs_to_uni[*str]; return (!wc[0] && str[0]) ? -1 : 1; } static int my_wc_mb_tis620(CHARSET_INFO *cs __attribute__((unused)), my_wc_t wc, uchar *str, uchar *end __attribute__((unused))) { uchar *pl; if (str >= end) return MY_CS_TOOSMALL; pl= uni_to_cs[(wc>>8) & 0xFF]; str[0]= pl ? pl[wc & 0xFF] : '\0'; return (!str[0] && wc) ? MY_CS_ILUNI : 1; } static MY_COLLATION_HANDLER my_collation_ci_handler = { NULL, /* init */ my_strnncoll_tis620, my_strnncollsp_tis620, my_strnxfrm_tis620, my_strnxfrmlen_simple, my_like_range_simple, my_wildcmp_8bit, /* wildcmp */ my_strcasecmp_8bit, my_instr_simple, /* QQ: To be fixed */ my_hash_sort_simple, my_propagate_simple }; static MY_CHARSET_HANDLER my_charset_handler= { NULL, /* init */ NULL, /* ismbchar */ my_mbcharlen_8bit, /* mbcharlen */ my_numchars_8bit, my_charpos_8bit, my_well_formed_len_8bit, my_lengthsp_8bit, my_numcells_8bit, my_mb_wc_tis620, /* mb_wc */ my_wc_mb_tis620, /* wc_mb */ my_mb_ctype_8bit, my_caseup_str_8bit, my_casedn_str_8bit, my_caseup_8bit, my_casedn_8bit, my_snprintf_8bit, my_long10_to_str_8bit, my_longlong10_to_str_8bit, my_fill_8bit, my_strntol_8bit, my_strntoul_8bit, my_strntoll_8bit, my_strntoull_8bit, my_strntod_8bit, my_strtoll10_8bit, my_strntoull10rnd_8bit, my_scan_8bit }; CHARSET_INFO my_charset_tis620_thai_ci= { 18,0,0, /* number */ MY_CS_COMPILED|MY_CS_PRIMARY|MY_CS_STRNXFRM, /* state */ "tis620", /* cs name */ "tis620_thai_ci", /* name */ "", /* comment */ NULL, /* tailoring */ ctype_tis620, to_lower_tis620, to_upper_tis620, sort_order_tis620, NULL, /* contractions */ NULL, /* sort_order_big*/ NULL, /* tab_to_uni */ NULL, /* tab_from_uni */ my_unicase_default, /* caseinfo */ NULL, /* state_map */ NULL, /* ident_map */ 4, /* strxfrm_multiply */ 1, /* caseup_multiply */ 1, /* casedn_multiply */ 1, /* mbminlen */ 1, /* mbmaxlen */ 0, /* min_sort_char */ 255, /* max_sort_char */ ' ', /* pad char */ 0, /* escape_with_backslash_is_dangerous */ &my_charset_handler, &my_collation_ci_handler }; CHARSET_INFO my_charset_tis620_bin= { 89,0,0, /* number */ MY_CS_COMPILED|MY_CS_BINSORT, /* state */ "tis620", /* cs name */ "tis620_bin", /* name */ "", /* comment */ NULL, /* tailoring */ ctype_tis620, to_lower_tis620, to_upper_tis620, NULL, /* sort_order */ NULL, /* contractions */ NULL, /* sort_order_big*/ NULL, /* tab_to_uni */ NULL, /* tab_from_uni */ my_unicase_default, /* caseinfo */ NULL, /* state_map */ NULL, /* ident_map */ 1, /* strxfrm_multiply */ 1, /* caseup_multiply */ 1, /* casedn_multiply */ 1, /* mbminlen */ 1, /* mbmaxlen */ 0, /* min_sort_char */ 255, /* max_sort_char */ ' ', /* pad char */ 0, /* escape_with_backslash_is_dangerous */ &my_charset_handler, &my_collation_8bit_bin_handler }; #endif
gpl-2.0
cminyard/linux-fumount
drivers/i2c/busses/i2c-sh_mobile.c
400
28781
/* * SuperH Mobile I2C Controller * * Copyright (C) 2014 Wolfram Sang <wsa@sang-engineering.com> * * Copyright (C) 2008 Magnus Damm * * Portions of the code based on out-of-tree driver i2c-sh7343.c * Copyright (c) 2006 Carlos Munoz <carlos@kenati.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/dmaengine.h> #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/i2c.h> #include <linux/i2c/i2c-sh_mobile.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/slab.h> /* Transmit operation: */ /* */ /* 0 byte transmit */ /* BUS: S A8 ACK P(*) */ /* IRQ: DTE WAIT */ /* ICIC: */ /* ICCR: 0x94 0x90 */ /* ICDR: A8 */ /* */ /* 1 byte transmit */ /* BUS: S A8 ACK D8(1) ACK P(*) */ /* IRQ: DTE WAIT WAIT */ /* ICIC: -DTE */ /* ICCR: 0x94 0x90 */ /* ICDR: A8 D8(1) */ /* */ /* 2 byte transmit */ /* BUS: S A8 ACK D8(1) ACK D8(2) ACK P(*) */ /* IRQ: DTE WAIT WAIT WAIT */ /* ICIC: -DTE */ /* ICCR: 0x94 0x90 */ /* ICDR: A8 D8(1) D8(2) */ /* */ /* 3 bytes or more, +---------+ gets repeated */ /* */ /* */ /* Receive operation: */ /* */ /* 0 byte receive - not supported since slave may hold SDA low */ /* */ /* 1 byte receive [TX] | [RX] */ /* BUS: S A8 ACK | D8(1) ACK P(*) */ /* IRQ: DTE WAIT | WAIT DTE */ /* ICIC: -DTE | +DTE */ /* ICCR: 0x94 0x81 | 0xc0 */ /* ICDR: A8 | D8(1) */ /* */ /* 2 byte receive [TX]| [RX] */ /* BUS: S A8 ACK | D8(1) ACK D8(2) ACK P(*) */ /* IRQ: DTE WAIT | WAIT WAIT DTE */ /* ICIC: -DTE | +DTE */ /* ICCR: 0x94 0x81 | 0xc0 */ /* ICDR: A8 | D8(1) D8(2) */ /* */ /* 3 byte receive [TX] | [RX] (*) */ /* BUS: S A8 ACK | D8(1) ACK D8(2) ACK D8(3) ACK P */ /* IRQ: DTE WAIT | WAIT WAIT WAIT DTE */ /* ICIC: -DTE | +DTE */ /* ICCR: 0x94 0x81 | 0xc0 */ /* ICDR: A8 | D8(1) D8(2) D8(3) */ /* */ /* 4 bytes or more, this part is repeated +---------+ */ /* */ /* */ /* Interrupt order and BUSY flag */ /* ___ _ */ /* SDA ___\___XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXAAAAAAAAA___/ */ /* SCL \_/1\_/2\_/3\_/4\_/5\_/6\_/7\_/8\___/9\_____/ */ /* */ /* S D7 D6 D5 D4 D3 D2 D1 D0 P(*) */ /* ___ */ /* WAIT IRQ ________________________________/ \___________ */ /* TACK IRQ ____________________________________/ \_______ */ /* DTE IRQ __________________________________________/ \_ */ /* AL IRQ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ /* _______________________________________________ */ /* BUSY __/ \_ */ /* */ /* (*) The STOP condition is only sent by the master at the end of the last */ /* I2C message or if the I2C_M_STOP flag is set. Similarly, the BUSY bit is */ /* only cleared after the STOP condition, so, between messages we have to */ /* poll for the DTE bit. */ /* */ enum sh_mobile_i2c_op { OP_START = 0, OP_TX_FIRST, OP_TX, OP_TX_STOP, OP_TX_STOP_DATA, OP_TX_TO_RX, OP_RX, OP_RX_STOP, OP_RX_STOP_DATA, }; struct sh_mobile_i2c_data { struct device *dev; void __iomem *reg; struct i2c_adapter adap; unsigned long bus_speed; unsigned int clks_per_count; struct clk *clk; u_int8_t icic; u_int8_t flags; u_int16_t iccl; u_int16_t icch; spinlock_t lock; wait_queue_head_t wait; struct i2c_msg *msg; int pos; int sr; bool send_stop; bool stop_after_dma; struct resource *res; struct dma_chan *dma_tx; struct dma_chan *dma_rx; struct scatterlist sg; enum dma_data_direction dma_direction; }; struct sh_mobile_dt_config { int clks_per_count; }; #define IIC_FLAG_HAS_ICIC67 (1 << 0) #define STANDARD_MODE 100000 #define FAST_MODE 400000 /* Register offsets */ #define ICDR 0x00 #define ICCR 0x04 #define ICSR 0x08 #define ICIC 0x0c #define ICCL 0x10 #define ICCH 0x14 /* Register bits */ #define ICCR_ICE 0x80 #define ICCR_RACK 0x40 #define ICCR_TRS 0x10 #define ICCR_BBSY 0x04 #define ICCR_SCP 0x01 #define ICSR_SCLM 0x80 #define ICSR_SDAM 0x40 #define SW_DONE 0x20 #define ICSR_BUSY 0x10 #define ICSR_AL 0x08 #define ICSR_TACK 0x04 #define ICSR_WAIT 0x02 #define ICSR_DTE 0x01 #define ICIC_ICCLB8 0x80 #define ICIC_ICCHB8 0x40 #define ICIC_TDMAE 0x20 #define ICIC_RDMAE 0x10 #define ICIC_ALE 0x08 #define ICIC_TACKE 0x04 #define ICIC_WAITE 0x02 #define ICIC_DTEE 0x01 static void iic_wr(struct sh_mobile_i2c_data *pd, int offs, unsigned char data) { if (offs == ICIC) data |= pd->icic; iowrite8(data, pd->reg + offs); } static unsigned char iic_rd(struct sh_mobile_i2c_data *pd, int offs) { return ioread8(pd->reg + offs); } static void iic_set_clr(struct sh_mobile_i2c_data *pd, int offs, unsigned char set, unsigned char clr) { iic_wr(pd, offs, (iic_rd(pd, offs) | set) & ~clr); } static u32 sh_mobile_i2c_iccl(unsigned long count_khz, u32 tLOW, u32 tf) { /* * Conditional expression: * ICCL >= COUNT_CLK * (tLOW + tf) * * SH-Mobile IIC hardware starts counting the LOW period of * the SCL signal (tLOW) as soon as it pulls the SCL line. * In order to meet the tLOW timing spec, we need to take into * account the fall time of SCL signal (tf). Default tf value * should be 0.3 us, for safety. */ return (((count_khz * (tLOW + tf)) + 5000) / 10000); } static u32 sh_mobile_i2c_icch(unsigned long count_khz, u32 tHIGH, u32 tf) { /* * Conditional expression: * ICCH >= COUNT_CLK * (tHIGH + tf) * * SH-Mobile IIC hardware is aware of SCL transition period 'tr', * and can ignore it. SH-Mobile IIC controller starts counting * the HIGH period of the SCL signal (tHIGH) after the SCL input * voltage increases at VIH. * * Afterward it turned out calculating ICCH using only tHIGH spec * will result in violation of the tHD;STA timing spec. We need * to take into account the fall time of SDA signal (tf) at START * condition, in order to meet both tHIGH and tHD;STA specs. */ return (((count_khz * (tHIGH + tf)) + 5000) / 10000); } static int sh_mobile_i2c_init(struct sh_mobile_i2c_data *pd) { unsigned long i2c_clk_khz; u32 tHIGH, tLOW, tf; uint16_t max_val; /* Get clock rate after clock is enabled */ clk_prepare_enable(pd->clk); i2c_clk_khz = clk_get_rate(pd->clk) / 1000; clk_disable_unprepare(pd->clk); i2c_clk_khz /= pd->clks_per_count; if (pd->bus_speed == STANDARD_MODE) { tLOW = 47; /* tLOW = 4.7 us */ tHIGH = 40; /* tHD;STA = tHIGH = 4.0 us */ tf = 3; /* tf = 0.3 us */ } else if (pd->bus_speed == FAST_MODE) { tLOW = 13; /* tLOW = 1.3 us */ tHIGH = 6; /* tHD;STA = tHIGH = 0.6 us */ tf = 3; /* tf = 0.3 us */ } else { dev_err(pd->dev, "unrecognized bus speed %lu Hz\n", pd->bus_speed); return -EINVAL; } pd->iccl = sh_mobile_i2c_iccl(i2c_clk_khz, tLOW, tf); pd->icch = sh_mobile_i2c_icch(i2c_clk_khz, tHIGH, tf); max_val = pd->flags & IIC_FLAG_HAS_ICIC67 ? 0x1ff : 0xff; if (pd->iccl > max_val || pd->icch > max_val) { dev_err(pd->dev, "timing values out of range: L/H=0x%x/0x%x\n", pd->iccl, pd->icch); return -EINVAL; } /* one more bit of ICCL in ICIC */ if (pd->iccl & 0x100) pd->icic |= ICIC_ICCLB8; else pd->icic &= ~ICIC_ICCLB8; /* one more bit of ICCH in ICIC */ if (pd->icch & 0x100) pd->icic |= ICIC_ICCHB8; else pd->icic &= ~ICIC_ICCHB8; dev_dbg(pd->dev, "timing values: L/H=0x%x/0x%x\n", pd->iccl, pd->icch); return 0; } static void activate_ch(struct sh_mobile_i2c_data *pd) { /* Wake up device and enable clock */ pm_runtime_get_sync(pd->dev); clk_prepare_enable(pd->clk); /* Enable channel and configure rx ack */ iic_set_clr(pd, ICCR, ICCR_ICE, 0); /* Mask all interrupts */ iic_wr(pd, ICIC, 0); /* Set the clock */ iic_wr(pd, ICCL, pd->iccl & 0xff); iic_wr(pd, ICCH, pd->icch & 0xff); } static void deactivate_ch(struct sh_mobile_i2c_data *pd) { /* Clear/disable interrupts */ iic_wr(pd, ICSR, 0); iic_wr(pd, ICIC, 0); /* Disable channel */ iic_set_clr(pd, ICCR, 0, ICCR_ICE); /* Disable clock and mark device as idle */ clk_disable_unprepare(pd->clk); pm_runtime_put_sync(pd->dev); } static unsigned char i2c_op(struct sh_mobile_i2c_data *pd, enum sh_mobile_i2c_op op, unsigned char data) { unsigned char ret = 0; unsigned long flags; dev_dbg(pd->dev, "op %d, data in 0x%02x\n", op, data); spin_lock_irqsave(&pd->lock, flags); switch (op) { case OP_START: /* issue start and trigger DTE interrupt */ iic_wr(pd, ICCR, ICCR_ICE | ICCR_TRS | ICCR_BBSY); break; case OP_TX_FIRST: /* disable DTE interrupt and write data */ iic_wr(pd, ICIC, ICIC_WAITE | ICIC_ALE | ICIC_TACKE); iic_wr(pd, ICDR, data); break; case OP_TX: /* write data */ iic_wr(pd, ICDR, data); break; case OP_TX_STOP_DATA: /* write data and issue a stop afterwards */ iic_wr(pd, ICDR, data); /* fallthrough */ case OP_TX_STOP: /* issue a stop */ iic_wr(pd, ICCR, pd->send_stop ? ICCR_ICE | ICCR_TRS : ICCR_ICE | ICCR_TRS | ICCR_BBSY); break; case OP_TX_TO_RX: /* select read mode */ iic_wr(pd, ICCR, ICCR_ICE | ICCR_SCP); break; case OP_RX: /* just read data */ ret = iic_rd(pd, ICDR); break; case OP_RX_STOP: /* enable DTE interrupt, issue stop */ iic_wr(pd, ICIC, ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE); iic_wr(pd, ICCR, ICCR_ICE | ICCR_RACK); break; case OP_RX_STOP_DATA: /* enable DTE interrupt, read data, issue stop */ iic_wr(pd, ICIC, ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE); ret = iic_rd(pd, ICDR); iic_wr(pd, ICCR, ICCR_ICE | ICCR_RACK); break; } spin_unlock_irqrestore(&pd->lock, flags); dev_dbg(pd->dev, "op %d, data out 0x%02x\n", op, ret); return ret; } static bool sh_mobile_i2c_is_first_byte(struct sh_mobile_i2c_data *pd) { return pd->pos == -1; } static bool sh_mobile_i2c_is_last_byte(struct sh_mobile_i2c_data *pd) { return pd->pos == pd->msg->len - 1; } static void sh_mobile_i2c_get_data(struct sh_mobile_i2c_data *pd, unsigned char *buf) { switch (pd->pos) { case -1: *buf = (pd->msg->addr & 0x7f) << 1; *buf |= (pd->msg->flags & I2C_M_RD) ? 1 : 0; break; default: *buf = pd->msg->buf[pd->pos]; } } static int sh_mobile_i2c_isr_tx(struct sh_mobile_i2c_data *pd) { unsigned char data; if (pd->pos == pd->msg->len) { /* Send stop if we haven't yet (DMA case) */ if (pd->send_stop && pd->stop_after_dma) i2c_op(pd, OP_TX_STOP, 0); return 1; } sh_mobile_i2c_get_data(pd, &data); if (sh_mobile_i2c_is_last_byte(pd)) i2c_op(pd, OP_TX_STOP_DATA, data); else if (sh_mobile_i2c_is_first_byte(pd)) i2c_op(pd, OP_TX_FIRST, data); else i2c_op(pd, OP_TX, data); pd->pos++; return 0; } static int sh_mobile_i2c_isr_rx(struct sh_mobile_i2c_data *pd) { unsigned char data; int real_pos; do { if (pd->pos <= -1) { sh_mobile_i2c_get_data(pd, &data); if (sh_mobile_i2c_is_first_byte(pd)) i2c_op(pd, OP_TX_FIRST, data); else i2c_op(pd, OP_TX, data); break; } if (pd->pos == 0) { i2c_op(pd, OP_TX_TO_RX, 0); break; } real_pos = pd->pos - 2; if (pd->pos == pd->msg->len) { if (pd->stop_after_dma) { /* Simulate PIO end condition after DMA transfer */ i2c_op(pd, OP_RX_STOP, 0); pd->pos++; break; } if (real_pos < 0) { i2c_op(pd, OP_RX_STOP, 0); break; } data = i2c_op(pd, OP_RX_STOP_DATA, 0); } else data = i2c_op(pd, OP_RX, 0); if (real_pos >= 0) pd->msg->buf[real_pos] = data; } while (0); pd->pos++; return pd->pos == (pd->msg->len + 2); } static irqreturn_t sh_mobile_i2c_isr(int irq, void *dev_id) { struct sh_mobile_i2c_data *pd = dev_id; unsigned char sr; int wakeup = 0; sr = iic_rd(pd, ICSR); pd->sr |= sr; /* remember state */ dev_dbg(pd->dev, "i2c_isr 0x%02x 0x%02x %s %d %d!\n", sr, pd->sr, (pd->msg->flags & I2C_M_RD) ? "read" : "write", pd->pos, pd->msg->len); /* Kick off TxDMA after preface was done */ if (pd->dma_direction == DMA_TO_DEVICE && pd->pos == 0) iic_set_clr(pd, ICIC, ICIC_TDMAE, 0); else if (sr & (ICSR_AL | ICSR_TACK)) /* don't interrupt transaction - continue to issue stop */ iic_wr(pd, ICSR, sr & ~(ICSR_AL | ICSR_TACK)); else if (pd->msg->flags & I2C_M_RD) wakeup = sh_mobile_i2c_isr_rx(pd); else wakeup = sh_mobile_i2c_isr_tx(pd); /* Kick off RxDMA after preface was done */ if (pd->dma_direction == DMA_FROM_DEVICE && pd->pos == 1) iic_set_clr(pd, ICIC, ICIC_RDMAE, 0); if (sr & ICSR_WAIT) /* TODO: add delay here to support slow acks */ iic_wr(pd, ICSR, sr & ~ICSR_WAIT); if (wakeup) { pd->sr |= SW_DONE; wake_up(&pd->wait); } /* defeat write posting to avoid spurious WAIT interrupts */ iic_rd(pd, ICSR); return IRQ_HANDLED; } static void sh_mobile_i2c_dma_unmap(struct sh_mobile_i2c_data *pd) { struct dma_chan *chan = pd->dma_direction == DMA_FROM_DEVICE ? pd->dma_rx : pd->dma_tx; dma_unmap_single(chan->device->dev, sg_dma_address(&pd->sg), pd->msg->len, pd->dma_direction); pd->dma_direction = DMA_NONE; } static void sh_mobile_i2c_cleanup_dma(struct sh_mobile_i2c_data *pd) { if (pd->dma_direction == DMA_NONE) return; else if (pd->dma_direction == DMA_FROM_DEVICE) dmaengine_terminate_all(pd->dma_rx); else if (pd->dma_direction == DMA_TO_DEVICE) dmaengine_terminate_all(pd->dma_tx); sh_mobile_i2c_dma_unmap(pd); } static void sh_mobile_i2c_dma_callback(void *data) { struct sh_mobile_i2c_data *pd = data; sh_mobile_i2c_dma_unmap(pd); pd->pos = pd->msg->len; pd->stop_after_dma = true; iic_set_clr(pd, ICIC, 0, ICIC_TDMAE | ICIC_RDMAE); } static struct dma_chan *sh_mobile_i2c_request_dma_chan(struct device *dev, enum dma_transfer_direction dir, dma_addr_t port_addr) { struct dma_chan *chan; struct dma_slave_config cfg; char *chan_name = dir == DMA_MEM_TO_DEV ? "tx" : "rx"; int ret; chan = dma_request_slave_channel_reason(dev, chan_name); if (IS_ERR(chan)) { ret = PTR_ERR(chan); dev_dbg(dev, "request_channel failed for %s (%d)\n", chan_name, ret); return chan; } memset(&cfg, 0, sizeof(cfg)); cfg.direction = dir; if (dir == DMA_MEM_TO_DEV) { cfg.dst_addr = port_addr; cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; } else { cfg.src_addr = port_addr; cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; } ret = dmaengine_slave_config(chan, &cfg); if (ret) { dev_dbg(dev, "slave_config failed for %s (%d)\n", chan_name, ret); dma_release_channel(chan); return ERR_PTR(ret); } dev_dbg(dev, "got DMA channel for %s\n", chan_name); return chan; } static void sh_mobile_i2c_xfer_dma(struct sh_mobile_i2c_data *pd) { bool read = pd->msg->flags & I2C_M_RD; enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE; struct dma_chan *chan = read ? pd->dma_rx : pd->dma_tx; struct dma_async_tx_descriptor *txdesc; dma_addr_t dma_addr; dma_cookie_t cookie; if (PTR_ERR(chan) == -EPROBE_DEFER) { if (read) chan = pd->dma_rx = sh_mobile_i2c_request_dma_chan(pd->dev, DMA_DEV_TO_MEM, pd->res->start + ICDR); else chan = pd->dma_tx = sh_mobile_i2c_request_dma_chan(pd->dev, DMA_MEM_TO_DEV, pd->res->start + ICDR); } if (IS_ERR(chan)) return; dma_addr = dma_map_single(chan->device->dev, pd->msg->buf, pd->msg->len, dir); if (dma_mapping_error(pd->dev, dma_addr)) { dev_dbg(pd->dev, "dma map failed, using PIO\n"); return; } sg_dma_len(&pd->sg) = pd->msg->len; sg_dma_address(&pd->sg) = dma_addr; pd->dma_direction = dir; txdesc = dmaengine_prep_slave_sg(chan, &pd->sg, 1, read ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!txdesc) { dev_dbg(pd->dev, "dma prep slave sg failed, using PIO\n"); sh_mobile_i2c_cleanup_dma(pd); return; } txdesc->callback = sh_mobile_i2c_dma_callback; txdesc->callback_param = pd; cookie = dmaengine_submit(txdesc); if (dma_submit_error(cookie)) { dev_dbg(pd->dev, "submitting dma failed, using PIO\n"); sh_mobile_i2c_cleanup_dma(pd); return; } dma_async_issue_pending(chan); } static int start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg, bool do_init) { if (usr_msg->len == 0 && (usr_msg->flags & I2C_M_RD)) { dev_err(pd->dev, "Unsupported zero length i2c read\n"); return -EOPNOTSUPP; } if (do_init) { /* Initialize channel registers */ iic_set_clr(pd, ICCR, 0, ICCR_ICE); /* Enable channel and configure rx ack */ iic_set_clr(pd, ICCR, ICCR_ICE, 0); /* Set the clock */ iic_wr(pd, ICCL, pd->iccl & 0xff); iic_wr(pd, ICCH, pd->icch & 0xff); } pd->msg = usr_msg; pd->pos = -1; pd->sr = 0; if (pd->msg->len > 8) sh_mobile_i2c_xfer_dma(pd); /* Enable all interrupts to begin with */ iic_wr(pd, ICIC, ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE); return 0; } static int poll_dte(struct sh_mobile_i2c_data *pd) { int i; for (i = 1000; i; i--) { u_int8_t val = iic_rd(pd, ICSR); if (val & ICSR_DTE) break; if (val & ICSR_TACK) return -ENXIO; udelay(10); } return i ? 0 : -ETIMEDOUT; } static int poll_busy(struct sh_mobile_i2c_data *pd) { int i; for (i = 1000; i; i--) { u_int8_t val = iic_rd(pd, ICSR); dev_dbg(pd->dev, "val 0x%02x pd->sr 0x%02x\n", val, pd->sr); /* the interrupt handler may wake us up before the * transfer is finished, so poll the hardware * until we're done. */ if (!(val & ICSR_BUSY)) { /* handle missing acknowledge and arbitration lost */ val |= pd->sr; if (val & ICSR_TACK) return -ENXIO; if (val & ICSR_AL) return -EAGAIN; break; } udelay(10); } return i ? 0 : -ETIMEDOUT; } static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) { struct sh_mobile_i2c_data *pd = i2c_get_adapdata(adapter); struct i2c_msg *msg; int err = 0; int i, k; activate_ch(pd); /* Process all messages */ for (i = 0; i < num; i++) { bool do_start = pd->send_stop || !i; msg = &msgs[i]; pd->send_stop = i == num - 1 || msg->flags & I2C_M_STOP; pd->stop_after_dma = false; err = start_ch(pd, msg, do_start); if (err) break; if (do_start) i2c_op(pd, OP_START, 0); /* The interrupt handler takes care of the rest... */ k = wait_event_timeout(pd->wait, pd->sr & (ICSR_TACK | SW_DONE), 5 * HZ); if (!k) { dev_err(pd->dev, "Transfer request timed out\n"); if (pd->dma_direction != DMA_NONE) sh_mobile_i2c_cleanup_dma(pd); err = -ETIMEDOUT; break; } if (pd->send_stop) err = poll_busy(pd); else err = poll_dte(pd); if (err < 0) break; } deactivate_ch(pd); if (!err) err = num; return err; } static u32 sh_mobile_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_PROTOCOL_MANGLING; } static struct i2c_algorithm sh_mobile_i2c_algorithm = { .functionality = sh_mobile_i2c_func, .master_xfer = sh_mobile_i2c_xfer, }; static const struct sh_mobile_dt_config default_dt_config = { .clks_per_count = 1, }; static const struct sh_mobile_dt_config fast_clock_dt_config = { .clks_per_count = 2, }; static const struct of_device_id sh_mobile_i2c_dt_ids[] = { { .compatible = "renesas,rmobile-iic", .data = &default_dt_config }, { .compatible = "renesas,iic-r8a73a4", .data = &fast_clock_dt_config }, { .compatible = "renesas,iic-r8a7790", .data = &fast_clock_dt_config }, { .compatible = "renesas,iic-r8a7791", .data = &fast_clock_dt_config }, { .compatible = "renesas,iic-r8a7792", .data = &fast_clock_dt_config }, { .compatible = "renesas,iic-r8a7793", .data = &fast_clock_dt_config }, { .compatible = "renesas,iic-r8a7794", .data = &fast_clock_dt_config }, { .compatible = "renesas,iic-sh73a0", .data = &fast_clock_dt_config }, {}, }; MODULE_DEVICE_TABLE(of, sh_mobile_i2c_dt_ids); static void sh_mobile_i2c_release_dma(struct sh_mobile_i2c_data *pd) { if (!IS_ERR(pd->dma_tx)) { dma_release_channel(pd->dma_tx); pd->dma_tx = ERR_PTR(-EPROBE_DEFER); } if (!IS_ERR(pd->dma_rx)) { dma_release_channel(pd->dma_rx); pd->dma_rx = ERR_PTR(-EPROBE_DEFER); } } static int sh_mobile_i2c_hook_irqs(struct platform_device *dev, struct sh_mobile_i2c_data *pd) { struct resource *res; resource_size_t n; int k = 0, ret; while ((res = platform_get_resource(dev, IORESOURCE_IRQ, k))) { for (n = res->start; n <= res->end; n++) { ret = devm_request_irq(&dev->dev, n, sh_mobile_i2c_isr, 0, dev_name(&dev->dev), pd); if (ret) { dev_err(&dev->dev, "cannot request IRQ %pa\n", &n); return ret; } } k++; } return k > 0 ? 0 : -ENOENT; } static int sh_mobile_i2c_probe(struct platform_device *dev) { struct i2c_sh_mobile_platform_data *pdata = dev_get_platdata(&dev->dev); struct sh_mobile_i2c_data *pd; struct i2c_adapter *adap; struct resource *res; int ret; u32 bus_speed; pd = devm_kzalloc(&dev->dev, sizeof(struct sh_mobile_i2c_data), GFP_KERNEL); if (!pd) return -ENOMEM; pd->clk = devm_clk_get(&dev->dev, NULL); if (IS_ERR(pd->clk)) { dev_err(&dev->dev, "cannot get clock\n"); return PTR_ERR(pd->clk); } ret = sh_mobile_i2c_hook_irqs(dev, pd); if (ret) return ret; pd->dev = &dev->dev; platform_set_drvdata(dev, pd); res = platform_get_resource(dev, IORESOURCE_MEM, 0); pd->res = res; pd->reg = devm_ioremap_resource(&dev->dev, res); if (IS_ERR(pd->reg)) return PTR_ERR(pd->reg); /* Use platform data bus speed or STANDARD_MODE */ ret = of_property_read_u32(dev->dev.of_node, "clock-frequency", &bus_speed); pd->bus_speed = ret ? STANDARD_MODE : bus_speed; pd->clks_per_count = 1; if (dev->dev.of_node) { const struct of_device_id *match; match = of_match_device(sh_mobile_i2c_dt_ids, &dev->dev); if (match) { const struct sh_mobile_dt_config *config; config = match->data; pd->clks_per_count = config->clks_per_count; } } else { if (pdata && pdata->bus_speed) pd->bus_speed = pdata->bus_speed; if (pdata && pdata->clks_per_count) pd->clks_per_count = pdata->clks_per_count; } /* The IIC blocks on SH-Mobile ARM processors * come with two new bits in ICIC. */ if (resource_size(res) > 0x17) pd->flags |= IIC_FLAG_HAS_ICIC67; ret = sh_mobile_i2c_init(pd); if (ret) return ret; /* Init DMA */ sg_init_table(&pd->sg, 1); pd->dma_direction = DMA_NONE; pd->dma_rx = pd->dma_tx = ERR_PTR(-EPROBE_DEFER); /* Enable Runtime PM for this device. * * Also tell the Runtime PM core to ignore children * for this device since it is valid for us to suspend * this I2C master driver even though the slave devices * on the I2C bus may not be suspended. * * The state of the I2C hardware bus is unaffected by * the Runtime PM state. */ pm_suspend_ignore_children(&dev->dev, true); pm_runtime_enable(&dev->dev); /* setup the private data */ adap = &pd->adap; i2c_set_adapdata(adap, pd); adap->owner = THIS_MODULE; adap->algo = &sh_mobile_i2c_algorithm; adap->dev.parent = &dev->dev; adap->retries = 5; adap->nr = dev->id; adap->dev.of_node = dev->dev.of_node; strlcpy(adap->name, dev->name, sizeof(adap->name)); spin_lock_init(&pd->lock); init_waitqueue_head(&pd->wait); ret = i2c_add_numbered_adapter(adap); if (ret < 0) { sh_mobile_i2c_release_dma(pd); dev_err(&dev->dev, "cannot add numbered adapter\n"); return ret; } dev_info(&dev->dev, "I2C adapter %d, bus speed %lu Hz\n", adap->nr, pd->bus_speed); return 0; } static int sh_mobile_i2c_remove(struct platform_device *dev) { struct sh_mobile_i2c_data *pd = platform_get_drvdata(dev); i2c_del_adapter(&pd->adap); sh_mobile_i2c_release_dma(pd); pm_runtime_disable(&dev->dev); return 0; } static int sh_mobile_i2c_runtime_nop(struct device *dev) { /* Runtime PM callback shared between ->runtime_suspend() * and ->runtime_resume(). Simply returns success. * * This driver re-initializes all registers after * pm_runtime_get_sync() anyway so there is no need * to save and restore registers here. */ return 0; } static const struct dev_pm_ops sh_mobile_i2c_dev_pm_ops = { .runtime_suspend = sh_mobile_i2c_runtime_nop, .runtime_resume = sh_mobile_i2c_runtime_nop, }; static struct platform_driver sh_mobile_i2c_driver = { .driver = { .name = "i2c-sh_mobile", .pm = &sh_mobile_i2c_dev_pm_ops, .of_match_table = sh_mobile_i2c_dt_ids, }, .probe = sh_mobile_i2c_probe, .remove = sh_mobile_i2c_remove, }; static int __init sh_mobile_i2c_adap_init(void) { return platform_driver_register(&sh_mobile_i2c_driver); } subsys_initcall(sh_mobile_i2c_adap_init); static void __exit sh_mobile_i2c_adap_exit(void) { platform_driver_unregister(&sh_mobile_i2c_driver); } module_exit(sh_mobile_i2c_adap_exit); MODULE_DESCRIPTION("SuperH Mobile I2C Bus Controller driver"); MODULE_AUTHOR("Magnus Damm and Wolfram Sang"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:i2c-sh_mobile");
gpl-2.0
netico-solutions/linux-urtu-bb
drivers/net/wireless/ath/wil6210/cfg80211.c
400
30185
/* * Copyright (c) 2012-2015 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/etherdevice.h> #include "wil6210.h" #include "wmi.h" #define CHAN60G(_channel, _flags) { \ .band = IEEE80211_BAND_60GHZ, \ .center_freq = 56160 + (2160 * (_channel)), \ .hw_value = (_channel), \ .flags = (_flags), \ .max_antenna_gain = 0, \ .max_power = 40, \ } static struct ieee80211_channel wil_60ghz_channels[] = { CHAN60G(1, 0), CHAN60G(2, 0), CHAN60G(3, 0), /* channel 4 not supported yet */ }; static struct ieee80211_supported_band wil_band_60ghz = { .channels = wil_60ghz_channels, .n_channels = ARRAY_SIZE(wil_60ghz_channels), .ht_cap = { .ht_supported = true, .cap = 0, /* TODO */ .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K, /* TODO */ .ampdu_density = IEEE80211_HT_MPDU_DENSITY_8, /* TODO */ .mcs = { /* MCS 1..12 - SC PHY */ .rx_mask = {0xfe, 0x1f}, /* 1..12 */ .tx_params = IEEE80211_HT_MCS_TX_DEFINED, /* TODO */ }, }, }; static const struct ieee80211_txrx_stypes wil_mgmt_stypes[NUM_NL80211_IFTYPES] = { [NL80211_IFTYPE_STATION] = { .tx = BIT(IEEE80211_STYPE_ACTION >> 4) | BIT(IEEE80211_STYPE_PROBE_RESP >> 4), .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | BIT(IEEE80211_STYPE_PROBE_REQ >> 4) }, [NL80211_IFTYPE_AP] = { .tx = BIT(IEEE80211_STYPE_ACTION >> 4) | BIT(IEEE80211_STYPE_PROBE_RESP >> 4), .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | BIT(IEEE80211_STYPE_PROBE_REQ >> 4) }, [NL80211_IFTYPE_P2P_CLIENT] = { .tx = BIT(IEEE80211_STYPE_ACTION >> 4) | BIT(IEEE80211_STYPE_PROBE_RESP >> 4), .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | BIT(IEEE80211_STYPE_PROBE_REQ >> 4) }, [NL80211_IFTYPE_P2P_GO] = { .tx = BIT(IEEE80211_STYPE_ACTION >> 4) | BIT(IEEE80211_STYPE_PROBE_RESP >> 4), .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | BIT(IEEE80211_STYPE_PROBE_REQ >> 4) }, }; static const u32 wil_cipher_suites[] = { WLAN_CIPHER_SUITE_GCMP, }; int wil_iftype_nl2wmi(enum nl80211_iftype type) { static const struct { enum nl80211_iftype nl; enum wmi_network_type wmi; } __nl2wmi[] = { {NL80211_IFTYPE_ADHOC, WMI_NETTYPE_ADHOC}, {NL80211_IFTYPE_STATION, WMI_NETTYPE_INFRA}, {NL80211_IFTYPE_AP, WMI_NETTYPE_AP}, {NL80211_IFTYPE_P2P_CLIENT, WMI_NETTYPE_P2P}, {NL80211_IFTYPE_P2P_GO, WMI_NETTYPE_P2P}, {NL80211_IFTYPE_MONITOR, WMI_NETTYPE_ADHOC}, /* FIXME */ }; uint i; for (i = 0; i < ARRAY_SIZE(__nl2wmi); i++) { if (__nl2wmi[i].nl == type) return __nl2wmi[i].wmi; } return -EOPNOTSUPP; } int wil_cid_fill_sinfo(struct wil6210_priv *wil, int cid, struct station_info *sinfo) { struct wmi_notify_req_cmd cmd = { .cid = cid, .interval_usec = 0, }; struct { struct wil6210_mbox_hdr_wmi wmi; struct wmi_notify_req_done_event evt; } __packed reply; struct wil_net_stats *stats = &wil->sta[cid].stats; int rc; rc = wmi_call(wil, WMI_NOTIFY_REQ_CMDID, &cmd, sizeof(cmd), WMI_NOTIFY_REQ_DONE_EVENTID, &reply, sizeof(reply), 20); if (rc) return rc; wil_dbg_wmi(wil, "Link status for CID %d: {\n" " MCS %d TSF 0x%016llx\n" " BF status 0x%08x SNR 0x%08x SQI %d%%\n" " Tx Tpt %d goodput %d Rx goodput %d\n" " Sectors(rx:tx) my %d:%d peer %d:%d\n""}\n", cid, le16_to_cpu(reply.evt.bf_mcs), le64_to_cpu(reply.evt.tsf), reply.evt.status, le32_to_cpu(reply.evt.snr_val), reply.evt.sqi, le32_to_cpu(reply.evt.tx_tpt), le32_to_cpu(reply.evt.tx_goodput), le32_to_cpu(reply.evt.rx_goodput), le16_to_cpu(reply.evt.my_rx_sector), le16_to_cpu(reply.evt.my_tx_sector), le16_to_cpu(reply.evt.other_rx_sector), le16_to_cpu(reply.evt.other_tx_sector)); sinfo->generation = wil->sinfo_gen; sinfo->filled = BIT(NL80211_STA_INFO_RX_BYTES) | BIT(NL80211_STA_INFO_TX_BYTES) | BIT(NL80211_STA_INFO_RX_PACKETS) | BIT(NL80211_STA_INFO_TX_PACKETS) | BIT(NL80211_STA_INFO_RX_BITRATE) | BIT(NL80211_STA_INFO_TX_BITRATE) | BIT(NL80211_STA_INFO_RX_DROP_MISC) | BIT(NL80211_STA_INFO_TX_FAILED); sinfo->txrate.flags = RATE_INFO_FLAGS_MCS | RATE_INFO_FLAGS_60G; sinfo->txrate.mcs = le16_to_cpu(reply.evt.bf_mcs); sinfo->rxrate.flags = RATE_INFO_FLAGS_MCS | RATE_INFO_FLAGS_60G; sinfo->rxrate.mcs = stats->last_mcs_rx; sinfo->rx_bytes = stats->rx_bytes; sinfo->rx_packets = stats->rx_packets; sinfo->rx_dropped_misc = stats->rx_dropped; sinfo->tx_bytes = stats->tx_bytes; sinfo->tx_packets = stats->tx_packets; sinfo->tx_failed = stats->tx_errors; if (test_bit(wil_status_fwconnected, wil->status)) { sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL); sinfo->signal = reply.evt.sqi; } return rc; } static int wil_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev, const u8 *mac, struct station_info *sinfo) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); int rc; int cid = wil_find_cid(wil, mac); wil_dbg_misc(wil, "%s(%pM) CID %d\n", __func__, mac, cid); if (cid < 0) return cid; rc = wil_cid_fill_sinfo(wil, cid, sinfo); return rc; } /* * Find @idx-th active STA for station dump. */ static int wil_find_cid_by_idx(struct wil6210_priv *wil, int idx) { int i; for (i = 0; i < ARRAY_SIZE(wil->sta); i++) { if (wil->sta[i].status == wil_sta_unused) continue; if (idx == 0) return i; idx--; } return -ENOENT; } static int wil_cfg80211_dump_station(struct wiphy *wiphy, struct net_device *dev, int idx, u8 *mac, struct station_info *sinfo) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); int rc; int cid = wil_find_cid_by_idx(wil, idx); if (cid < 0) return -ENOENT; ether_addr_copy(mac, wil->sta[cid].addr); wil_dbg_misc(wil, "%s(%pM) CID %d\n", __func__, mac, cid); rc = wil_cid_fill_sinfo(wil, cid, sinfo); return rc; } static int wil_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev, enum nl80211_iftype type, u32 *flags, struct vif_params *params) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); struct wireless_dev *wdev = wil->wdev; switch (type) { case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_AP: case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_P2P_GO: break; case NL80211_IFTYPE_MONITOR: if (flags) wil->monitor_flags = *flags; else wil->monitor_flags = 0; break; default: return -EOPNOTSUPP; } wdev->iftype = type; return 0; } static int wil_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); struct wireless_dev *wdev = wil->wdev; struct { struct wmi_start_scan_cmd cmd; u16 chnl[4]; } __packed cmd; uint i, n; int rc; if (wil->scan_request) { wil_err(wil, "Already scanning\n"); return -EAGAIN; } /* check we are client side */ switch (wdev->iftype) { case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: break; default: return -EOPNOTSUPP; } /* FW don't support scan after connection attempt */ if (test_bit(wil_status_dontscan, wil->status)) { wil_err(wil, "Can't scan now\n"); return -EBUSY; } wil_dbg_misc(wil, "Start scan_request 0x%p\n", request); wil_dbg_misc(wil, "SSID count: %d", request->n_ssids); for (i = 0; i < request->n_ssids; i++) { wil_dbg_misc(wil, "SSID[%d]", i); print_hex_dump_bytes("SSID ", DUMP_PREFIX_OFFSET, request->ssids[i].ssid, request->ssids[i].ssid_len); } if (request->n_ssids) rc = wmi_set_ssid(wil, request->ssids[0].ssid_len, request->ssids[0].ssid); else rc = wmi_set_ssid(wil, 0, NULL); if (rc) { wil_err(wil, "set SSID for scan request failed: %d\n", rc); return rc; } wil->scan_request = request; mod_timer(&wil->scan_timer, jiffies + WIL6210_SCAN_TO); memset(&cmd, 0, sizeof(cmd)); cmd.cmd.num_channels = 0; n = min(request->n_channels, 4U); for (i = 0; i < n; i++) { int ch = request->channels[i]->hw_value; if (ch == 0) { wil_err(wil, "Scan requested for unknown frequency %dMhz\n", request->channels[i]->center_freq); continue; } /* 0-based channel indexes */ cmd.cmd.channel_list[cmd.cmd.num_channels++].channel = ch - 1; wil_dbg_misc(wil, "Scan for ch %d : %d MHz\n", ch, request->channels[i]->center_freq); } if (request->ie_len) print_hex_dump_bytes("Scan IE ", DUMP_PREFIX_OFFSET, request->ie, request->ie_len); else wil_dbg_misc(wil, "Scan has no IE's\n"); rc = wmi_set_ie(wil, WMI_FRAME_PROBE_REQ, request->ie_len, request->ie); if (rc) goto out; rc = wmi_send(wil, WMI_START_SCAN_CMDID, &cmd, sizeof(cmd.cmd) + cmd.cmd.num_channels * sizeof(cmd.cmd.channel_list[0])); out: if (rc) { del_timer_sync(&wil->scan_timer); wil->scan_request = NULL; } return rc; } static void wil_print_crypto(struct wil6210_priv *wil, struct cfg80211_crypto_settings *c) { int i, n; wil_dbg_misc(wil, "WPA versions: 0x%08x cipher group 0x%08x\n", c->wpa_versions, c->cipher_group); wil_dbg_misc(wil, "Pairwise ciphers [%d] {\n", c->n_ciphers_pairwise); n = min_t(int, c->n_ciphers_pairwise, ARRAY_SIZE(c->ciphers_pairwise)); for (i = 0; i < n; i++) wil_dbg_misc(wil, " [%d] = 0x%08x\n", i, c->ciphers_pairwise[i]); wil_dbg_misc(wil, "}\n"); wil_dbg_misc(wil, "AKM suites [%d] {\n", c->n_akm_suites); n = min_t(int, c->n_akm_suites, ARRAY_SIZE(c->akm_suites)); for (i = 0; i < n; i++) wil_dbg_misc(wil, " [%d] = 0x%08x\n", i, c->akm_suites[i]); wil_dbg_misc(wil, "}\n"); wil_dbg_misc(wil, "Control port : %d, eth_type 0x%04x no_encrypt %d\n", c->control_port, be16_to_cpu(c->control_port_ethertype), c->control_port_no_encrypt); } static void wil_print_connect_params(struct wil6210_priv *wil, struct cfg80211_connect_params *sme) { wil_info(wil, "Connecting to:\n"); if (sme->channel) { wil_info(wil, " Channel: %d freq %d\n", sme->channel->hw_value, sme->channel->center_freq); } if (sme->bssid) wil_info(wil, " BSSID: %pM\n", sme->bssid); if (sme->ssid) print_hex_dump(KERN_INFO, " SSID: ", DUMP_PREFIX_OFFSET, 16, 1, sme->ssid, sme->ssid_len, true); wil_info(wil, " Privacy: %s\n", sme->privacy ? "secure" : "open"); wil_print_crypto(wil, &sme->crypto); } static int wil_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev, struct cfg80211_connect_params *sme) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); struct cfg80211_bss *bss; struct wmi_connect_cmd conn; const u8 *ssid_eid; const u8 *rsn_eid; int ch; int rc = 0; wil_print_connect_params(wil, sme); if (test_bit(wil_status_fwconnecting, wil->status) || test_bit(wil_status_fwconnected, wil->status)) return -EALREADY; if (sme->ie_len > WMI_MAX_IE_LEN) { wil_err(wil, "IE too large (%td bytes)\n", sme->ie_len); return -ERANGE; } rsn_eid = sme->ie ? cfg80211_find_ie(WLAN_EID_RSN, sme->ie, sme->ie_len) : NULL; if (sme->privacy && !rsn_eid) wil_info(wil, "WSC connection\n"); bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid, sme->ssid, sme->ssid_len, IEEE80211_BSS_TYPE_ESS, IEEE80211_PRIVACY_ANY); if (!bss) { wil_err(wil, "Unable to find BSS\n"); return -ENOENT; } ssid_eid = ieee80211_bss_get_ie(bss, WLAN_EID_SSID); if (!ssid_eid) { wil_err(wil, "No SSID\n"); rc = -ENOENT; goto out; } wil->privacy = sme->privacy; if (wil->privacy) { /* For secure assoc, remove old keys */ rc = wmi_del_cipher_key(wil, 0, bss->bssid, WMI_KEY_USE_PAIRWISE); if (rc) { wil_err(wil, "WMI_DELETE_CIPHER_KEY_CMD(PTK) failed\n"); goto out; } rc = wmi_del_cipher_key(wil, 0, bss->bssid, WMI_KEY_USE_RX_GROUP); if (rc) { wil_err(wil, "WMI_DELETE_CIPHER_KEY_CMD(GTK) failed\n"); goto out; } } /* WMI_SET_APPIE_CMD. ie may contain rsn info as well as other info * elements. Send it also in case it's empty, to erase previously set * ies in FW. */ rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_REQ, sme->ie_len, sme->ie); if (rc) goto out; /* WMI_CONNECT_CMD */ memset(&conn, 0, sizeof(conn)); switch (bss->capability & WLAN_CAPABILITY_DMG_TYPE_MASK) { case WLAN_CAPABILITY_DMG_TYPE_AP: conn.network_type = WMI_NETTYPE_INFRA; break; case WLAN_CAPABILITY_DMG_TYPE_PBSS: conn.network_type = WMI_NETTYPE_P2P; break; default: wil_err(wil, "Unsupported BSS type, capability= 0x%04x\n", bss->capability); goto out; } if (wil->privacy) { if (rsn_eid) { /* regular secure connection */ conn.dot11_auth_mode = WMI_AUTH11_SHARED; conn.auth_mode = WMI_AUTH_WPA2_PSK; conn.pairwise_crypto_type = WMI_CRYPT_AES_GCMP; conn.pairwise_crypto_len = 16; conn.group_crypto_type = WMI_CRYPT_AES_GCMP; conn.group_crypto_len = 16; } else { /* WSC */ conn.dot11_auth_mode = WMI_AUTH11_WSC; conn.auth_mode = WMI_AUTH_NONE; } } else { /* insecure connection */ conn.dot11_auth_mode = WMI_AUTH11_OPEN; conn.auth_mode = WMI_AUTH_NONE; } conn.ssid_len = min_t(u8, ssid_eid[1], 32); memcpy(conn.ssid, ssid_eid+2, conn.ssid_len); ch = bss->channel->hw_value; if (ch == 0) { wil_err(wil, "BSS at unknown frequency %dMhz\n", bss->channel->center_freq); rc = -EOPNOTSUPP; goto out; } conn.channel = ch - 1; ether_addr_copy(conn.bssid, bss->bssid); ether_addr_copy(conn.dst_mac, bss->bssid); set_bit(wil_status_fwconnecting, wil->status); rc = wmi_send(wil, WMI_CONNECT_CMDID, &conn, sizeof(conn)); if (rc == 0) { netif_carrier_on(ndev); /* Connect can take lots of time */ mod_timer(&wil->connect_timer, jiffies + msecs_to_jiffies(2000)); } else { clear_bit(wil_status_fwconnecting, wil->status); } out: cfg80211_put_bss(wiphy, bss); return rc; } static int wil_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *ndev, u16 reason_code) { int rc; struct wil6210_priv *wil = wiphy_to_wil(wiphy); wil_dbg_misc(wil, "%s(reason=%d)\n", __func__, reason_code); rc = wmi_send(wil, WMI_DISCONNECT_CMDID, NULL, 0); return rc; } int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, struct cfg80211_mgmt_tx_params *params, u64 *cookie) { const u8 *buf = params->buf; size_t len = params->len; struct wil6210_priv *wil = wiphy_to_wil(wiphy); int rc; bool tx_status = false; struct ieee80211_mgmt *mgmt_frame = (void *)buf; struct wmi_sw_tx_req_cmd *cmd; struct { struct wil6210_mbox_hdr_wmi wmi; struct wmi_sw_tx_complete_event evt; } __packed evt; cmd = kmalloc(sizeof(*cmd) + len, GFP_KERNEL); if (!cmd) { rc = -ENOMEM; goto out; } memcpy(cmd->dst_mac, mgmt_frame->da, WMI_MAC_LEN); cmd->len = cpu_to_le16(len); memcpy(cmd->payload, buf, len); rc = wmi_call(wil, WMI_SW_TX_REQ_CMDID, cmd, sizeof(*cmd) + len, WMI_SW_TX_COMPLETE_EVENTID, &evt, sizeof(evt), 2000); if (rc == 0) tx_status = !evt.evt.status; kfree(cmd); out: cfg80211_mgmt_tx_status(wdev, cookie ? *cookie : 0, buf, len, tx_status, GFP_KERNEL); return rc; } static int wil_cfg80211_set_channel(struct wiphy *wiphy, struct cfg80211_chan_def *chandef) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); struct wireless_dev *wdev = wil->wdev; wdev->preset_chandef = *chandef; return 0; } static enum wmi_key_usage wil_detect_key_usage(struct wil6210_priv *wil, bool pairwise) { struct wireless_dev *wdev = wil->wdev; enum wmi_key_usage rc; static const char * const key_usage_str[] = { [WMI_KEY_USE_PAIRWISE] = "WMI_KEY_USE_PAIRWISE", [WMI_KEY_USE_RX_GROUP] = "WMI_KEY_USE_RX_GROUP", [WMI_KEY_USE_TX_GROUP] = "WMI_KEY_USE_TX_GROUP", }; if (pairwise) { rc = WMI_KEY_USE_PAIRWISE; } else { switch (wdev->iftype) { case NL80211_IFTYPE_STATION: rc = WMI_KEY_USE_RX_GROUP; break; case NL80211_IFTYPE_AP: rc = WMI_KEY_USE_TX_GROUP; break; default: /* TODO: Rx GTK or Tx GTK? */ wil_err(wil, "Can't determine GTK type\n"); rc = WMI_KEY_USE_RX_GROUP; break; } } wil_dbg_misc(wil, "%s() -> %s\n", __func__, key_usage_str[rc]); return rc; } static int wil_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev, u8 key_index, bool pairwise, const u8 *mac_addr, struct key_params *params) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); enum wmi_key_usage key_usage = wil_detect_key_usage(wil, pairwise); wil_dbg_misc(wil, "%s(%pM[%d] %s)\n", __func__, mac_addr, key_index, pairwise ? "PTK" : "GTK"); return wmi_add_cipher_key(wil, key_index, mac_addr, params->key_len, params->key, key_usage); } static int wil_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev, u8 key_index, bool pairwise, const u8 *mac_addr) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); enum wmi_key_usage key_usage = wil_detect_key_usage(wil, pairwise); wil_dbg_misc(wil, "%s(%pM[%d] %s)\n", __func__, mac_addr, key_index, pairwise ? "PTK" : "GTK"); return wmi_del_cipher_key(wil, key_index, mac_addr, key_usage); } /* Need to be present or wiphy_new() will WARN */ static int wil_cfg80211_set_default_key(struct wiphy *wiphy, struct net_device *ndev, u8 key_index, bool unicast, bool multicast) { return 0; } static int wil_remain_on_channel(struct wiphy *wiphy, struct wireless_dev *wdev, struct ieee80211_channel *chan, unsigned int duration, u64 *cookie) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); int rc; /* TODO: handle duration */ wil_info(wil, "%s(%d, %d ms)\n", __func__, chan->center_freq, duration); rc = wmi_set_channel(wil, chan->hw_value); if (rc) return rc; rc = wmi_rxon(wil, true); return rc; } static int wil_cancel_remain_on_channel(struct wiphy *wiphy, struct wireless_dev *wdev, u64 cookie) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); int rc; wil_info(wil, "%s()\n", __func__); rc = wmi_rxon(wil, false); return rc; } static void wil_print_bcon_data(struct cfg80211_beacon_data *b) { print_hex_dump_bytes("head ", DUMP_PREFIX_OFFSET, b->head, b->head_len); print_hex_dump_bytes("tail ", DUMP_PREFIX_OFFSET, b->tail, b->tail_len); print_hex_dump_bytes("BCON IE ", DUMP_PREFIX_OFFSET, b->beacon_ies, b->beacon_ies_len); print_hex_dump_bytes("PROBE ", DUMP_PREFIX_OFFSET, b->probe_resp, b->probe_resp_len); print_hex_dump_bytes("PROBE IE ", DUMP_PREFIX_OFFSET, b->proberesp_ies, b->proberesp_ies_len); print_hex_dump_bytes("ASSOC IE ", DUMP_PREFIX_OFFSET, b->assocresp_ies, b->assocresp_ies_len); } static int wil_fix_bcon(struct wil6210_priv *wil, struct cfg80211_beacon_data *bcon) { struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp; size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable); if (bcon->probe_resp_len <= hlen) return 0; /* always use IE's from full probe frame, they has more info * notable RSN */ bcon->proberesp_ies = f->u.probe_resp.variable; bcon->proberesp_ies_len = bcon->probe_resp_len - hlen; if (!bcon->assocresp_ies) { bcon->assocresp_ies = bcon->proberesp_ies; bcon->assocresp_ies_len = bcon->proberesp_ies_len; } return 1; } /* internal functions for device reset and starting AP */ static int _wil_cfg80211_set_ies(struct wiphy *wiphy, struct cfg80211_beacon_data *bcon) { int rc; struct wil6210_priv *wil = wiphy_to_wil(wiphy); rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, bcon->proberesp_ies_len, bcon->proberesp_ies); if (rc) return rc; rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, bcon->assocresp_ies_len, bcon->assocresp_ies); #if 0 /* to use beacon IE's, remove this #if 0 */ if (rc) return rc; rc = wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->tail_len, bcon->tail); #endif return rc; } static int _wil_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, const u8 *ssid, size_t ssid_len, u32 privacy, int bi, u8 chan, struct cfg80211_beacon_data *bcon, u8 hidden_ssid) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); int rc; struct wireless_dev *wdev = ndev->ieee80211_ptr; u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype); wil_set_recovery_state(wil, fw_recovery_idle); mutex_lock(&wil->mutex); __wil_down(wil); rc = __wil_up(wil); if (rc) goto out; rc = wmi_set_ssid(wil, ssid_len, ssid); if (rc) goto out; rc = _wil_cfg80211_set_ies(wiphy, bcon); if (rc) goto out; wil->privacy = privacy; wil->channel = chan; wil->hidden_ssid = hidden_ssid; netif_carrier_on(ndev); rc = wmi_pcp_start(wil, bi, wmi_nettype, chan, hidden_ssid); if (rc) goto err_pcp_start; rc = wil_bcast_init(wil); if (rc) goto err_bcast; goto out; /* success */ err_bcast: wmi_pcp_stop(wil); err_pcp_start: netif_carrier_off(ndev); out: mutex_unlock(&wil->mutex); return rc; } static int wil_cfg80211_change_beacon(struct wiphy *wiphy, struct net_device *ndev, struct cfg80211_beacon_data *bcon) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); int rc; u32 privacy = 0; wil_dbg_misc(wil, "%s()\n", __func__); wil_print_bcon_data(bcon); if (wil_fix_bcon(wil, bcon)) { wil_dbg_misc(wil, "Fixed bcon\n"); wil_print_bcon_data(bcon); } if (bcon->proberesp_ies && cfg80211_find_ie(WLAN_EID_RSN, bcon->proberesp_ies, bcon->proberesp_ies_len)) privacy = 1; /* in case privacy has changed, need to restart the AP */ if (wil->privacy != privacy) { struct wireless_dev *wdev = ndev->ieee80211_ptr; wil_dbg_misc(wil, "privacy changed %d=>%d. Restarting AP\n", wil->privacy, privacy); rc = _wil_cfg80211_start_ap(wiphy, ndev, wdev->ssid, wdev->ssid_len, privacy, wdev->beacon_interval, wil->channel, bcon, wil->hidden_ssid); } else { rc = _wil_cfg80211_set_ies(wiphy, bcon); } return rc; } static int wil_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, struct cfg80211_ap_settings *info) { int rc; struct wil6210_priv *wil = wiphy_to_wil(wiphy); struct ieee80211_channel *channel = info->chandef.chan; struct cfg80211_beacon_data *bcon = &info->beacon; struct cfg80211_crypto_settings *crypto = &info->crypto; u8 hidden_ssid; wil_dbg_misc(wil, "%s()\n", __func__); if (!channel) { wil_err(wil, "AP: No channel???\n"); return -EINVAL; } switch (info->hidden_ssid) { case NL80211_HIDDEN_SSID_NOT_IN_USE: hidden_ssid = WMI_HIDDEN_SSID_DISABLED; break; case NL80211_HIDDEN_SSID_ZERO_LEN: hidden_ssid = WMI_HIDDEN_SSID_SEND_EMPTY; break; case NL80211_HIDDEN_SSID_ZERO_CONTENTS: hidden_ssid = WMI_HIDDEN_SSID_CLEAR; break; default: wil_err(wil, "AP: Invalid hidden SSID %d\n", info->hidden_ssid); return -EOPNOTSUPP; } wil_dbg_misc(wil, "AP on Channel %d %d MHz, %s\n", channel->hw_value, channel->center_freq, info->privacy ? "secure" : "open"); wil_dbg_misc(wil, "Privacy: %d auth_type %d\n", info->privacy, info->auth_type); wil_dbg_misc(wil, "Hidden SSID mode: %d\n", info->hidden_ssid); wil_dbg_misc(wil, "BI %d DTIM %d\n", info->beacon_interval, info->dtim_period); print_hex_dump_bytes("SSID ", DUMP_PREFIX_OFFSET, info->ssid, info->ssid_len); wil_print_bcon_data(bcon); wil_print_crypto(wil, crypto); if (wil_fix_bcon(wil, bcon)) { wil_dbg_misc(wil, "Fixed bcon\n"); wil_print_bcon_data(bcon); } rc = _wil_cfg80211_start_ap(wiphy, ndev, info->ssid, info->ssid_len, info->privacy, info->beacon_interval, channel->hw_value, bcon, hidden_ssid); return rc; } static int wil_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); wil_dbg_misc(wil, "%s()\n", __func__); netif_carrier_off(ndev); wil_set_recovery_state(wil, fw_recovery_idle); mutex_lock(&wil->mutex); wmi_pcp_stop(wil); __wil_down(wil); mutex_unlock(&wil->mutex); return 0; } static int wil_cfg80211_del_station(struct wiphy *wiphy, struct net_device *dev, struct station_del_parameters *params) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); wil_dbg_misc(wil, "%s(%pM, reason=%d)\n", __func__, params->mac, params->reason_code); mutex_lock(&wil->mutex); wil6210_disconnect(wil, params->mac, params->reason_code, false); mutex_unlock(&wil->mutex); return 0; } /* probe_client handling */ static void wil_probe_client_handle(struct wil6210_priv *wil, struct wil_probe_client_req *req) { struct net_device *ndev = wil_to_ndev(wil); struct wil_sta_info *sta = &wil->sta[req->cid]; /* assume STA is alive if it is still connected, * else FW will disconnect it */ bool alive = (sta->status == wil_sta_connected); cfg80211_probe_status(ndev, sta->addr, req->cookie, alive, GFP_KERNEL); } static struct list_head *next_probe_client(struct wil6210_priv *wil) { struct list_head *ret = NULL; mutex_lock(&wil->probe_client_mutex); if (!list_empty(&wil->probe_client_pending)) { ret = wil->probe_client_pending.next; list_del(ret); } mutex_unlock(&wil->probe_client_mutex); return ret; } void wil_probe_client_worker(struct work_struct *work) { struct wil6210_priv *wil = container_of(work, struct wil6210_priv, probe_client_worker); struct wil_probe_client_req *req; struct list_head *lh; while ((lh = next_probe_client(wil)) != NULL) { req = list_entry(lh, struct wil_probe_client_req, list); wil_probe_client_handle(wil, req); kfree(req); } } void wil_probe_client_flush(struct wil6210_priv *wil) { struct wil_probe_client_req *req, *t; wil_dbg_misc(wil, "%s()\n", __func__); mutex_lock(&wil->probe_client_mutex); list_for_each_entry_safe(req, t, &wil->probe_client_pending, list) { list_del(&req->list); kfree(req); } mutex_unlock(&wil->probe_client_mutex); } static int wil_cfg80211_probe_client(struct wiphy *wiphy, struct net_device *dev, const u8 *peer, u64 *cookie) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); struct wil_probe_client_req *req; int cid = wil_find_cid(wil, peer); wil_dbg_misc(wil, "%s(%pM => CID %d)\n", __func__, peer, cid); if (cid < 0) return -ENOLINK; req = kzalloc(sizeof(*req), GFP_KERNEL); if (!req) return -ENOMEM; req->cid = cid; req->cookie = cid; mutex_lock(&wil->probe_client_mutex); list_add_tail(&req->list, &wil->probe_client_pending); mutex_unlock(&wil->probe_client_mutex); *cookie = req->cookie; queue_work(wil->wq_service, &wil->probe_client_worker); return 0; } static int wil_cfg80211_change_bss(struct wiphy *wiphy, struct net_device *dev, struct bss_parameters *params) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); if (params->ap_isolate >= 0) { wil_dbg_misc(wil, "%s(ap_isolate %d => %d)\n", __func__, wil->ap_isolate, params->ap_isolate); wil->ap_isolate = params->ap_isolate; } return 0; } static struct cfg80211_ops wil_cfg80211_ops = { .scan = wil_cfg80211_scan, .connect = wil_cfg80211_connect, .disconnect = wil_cfg80211_disconnect, .change_virtual_intf = wil_cfg80211_change_iface, .get_station = wil_cfg80211_get_station, .dump_station = wil_cfg80211_dump_station, .remain_on_channel = wil_remain_on_channel, .cancel_remain_on_channel = wil_cancel_remain_on_channel, .mgmt_tx = wil_cfg80211_mgmt_tx, .set_monitor_channel = wil_cfg80211_set_channel, .add_key = wil_cfg80211_add_key, .del_key = wil_cfg80211_del_key, .set_default_key = wil_cfg80211_set_default_key, /* AP mode */ .change_beacon = wil_cfg80211_change_beacon, .start_ap = wil_cfg80211_start_ap, .stop_ap = wil_cfg80211_stop_ap, .del_station = wil_cfg80211_del_station, .probe_client = wil_cfg80211_probe_client, .change_bss = wil_cfg80211_change_bss, }; static void wil_wiphy_init(struct wiphy *wiphy) { wiphy->max_scan_ssids = 1; wiphy->max_scan_ie_len = WMI_MAX_IE_LEN; wiphy->max_num_pmkids = 0 /* TODO: */; wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_MONITOR); /* TODO: enable P2P when integrated with supplicant: * BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO) */ wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME | WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD; dev_dbg(wiphy_dev(wiphy), "%s : flags = 0x%08x\n", __func__, wiphy->flags); wiphy->probe_resp_offload = NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS | NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 | NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P; wiphy->bands[IEEE80211_BAND_60GHZ] = &wil_band_60ghz; /* TODO: figure this out */ wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC; wiphy->cipher_suites = wil_cipher_suites; wiphy->n_cipher_suites = ARRAY_SIZE(wil_cipher_suites); wiphy->mgmt_stypes = wil_mgmt_stypes; wiphy->features |= NL80211_FEATURE_SK_TX_STATUS; } struct wireless_dev *wil_cfg80211_init(struct device *dev) { int rc = 0; struct wireless_dev *wdev; dev_dbg(dev, "%s()\n", __func__); wdev = kzalloc(sizeof(*wdev), GFP_KERNEL); if (!wdev) return ERR_PTR(-ENOMEM); wdev->wiphy = wiphy_new(&wil_cfg80211_ops, sizeof(struct wil6210_priv)); if (!wdev->wiphy) { rc = -ENOMEM; goto out; } set_wiphy_dev(wdev->wiphy, dev); wil_wiphy_init(wdev->wiphy); rc = wiphy_register(wdev->wiphy); if (rc < 0) goto out_failed_reg; return wdev; out_failed_reg: wiphy_free(wdev->wiphy); out: kfree(wdev); return ERR_PTR(rc); } void wil_wdev_free(struct wil6210_priv *wil) { struct wireless_dev *wdev = wil_to_wdev(wil); dev_dbg(wil_to_dev(wil), "%s()\n", __func__); if (!wdev) return; wiphy_unregister(wdev->wiphy); wiphy_free(wdev->wiphy); kfree(wdev); }
gpl-2.0
bromacia/wotlk_globalwow
dep/mysqllite/mysys/mf_keycache.c
400
163277
/* Copyright (C) 2000 MySQL AB, 2008-2009 Sun Microsystems, Inc This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /** @file These functions handle keyblock cacheing for ISAM and MyISAM tables. One cache can handle many files. It must contain buffers of the same blocksize. init_key_cache() should be used to init cache handler. The free list (free_block_list) is a stack like structure. When a block is freed by free_block(), it is pushed onto the stack. When a new block is required it is first tried to pop one from the stack. If the stack is empty, it is tried to get a never-used block from the pool. If this is empty too, then a block is taken from the LRU ring, flushing it to disk, if neccessary. This is handled in find_key_block(). With the new free list, the blocks can have three temperatures: hot, warm and cold (which is free). This is remembered in the block header by the enum BLOCK_TEMPERATURE temperature variable. Remembering the temperature is neccessary to correctly count the number of warm blocks, which is required to decide when blocks are allowed to become hot. Whenever a block is inserted to another (sub-)chain, we take the old and new temperature into account to decide if we got one more or less warm block. blocks_unused is the sum of never used blocks in the pool and of currently free blocks. blocks_used is the number of blocks fetched from the pool and as such gives the maximum number of in-use blocks at any time. */ /* Key Cache Locking ================= All key cache locking is done with a single mutex per key cache: keycache->cache_lock. This mutex is locked almost all the time when executing code in this file (mf_keycache.c). However it is released for I/O and some copy operations. The cache_lock is also released when waiting for some event. Waiting and signalling is done via condition variables. In most cases the thread waits on its thread->suspend condition variable. Every thread has a my_thread_var structure, which contains this variable and a '*next' and '**prev' pointer. These pointers are used to insert the thread into a wait queue. A thread can wait for one block and thus be in one wait queue at a time only. Before starting to wait on its condition variable with mysql_cond_wait(), the thread enters itself to a specific wait queue with link_into_queue() (double linked with '*next' + '**prev') or wait_on_queue() (single linked with '*next'). Another thread, when releasing a resource, looks up the waiting thread in the related wait queue. It sends a signal with mysql_cond_signal() to the waiting thread. NOTE: Depending on the particular wait situation, either the sending thread removes the waiting thread from the wait queue with unlink_from_queue() or release_whole_queue() respectively, or the waiting thread removes itself. There is one exception from this locking scheme when one thread wants to reuse a block for some other address. This works by first marking the block reserved (status= BLOCK_IN_SWITCH) and then waiting for all threads that are reading the block to finish. Each block has a reference to a condition variable (condvar). It holds a reference to the thread->suspend condition variable for the waiting thread (if such a thread exists). When that thread is signaled, the reference is cleared. The number of readers of a block is registered in block->hash_link->requests. See wait_for_readers() / remove_reader() for details. This is similar to the above, but it clearly means that only one thread can wait for a particular block. There is no queue in this case. Strangely enough block->convar is used for waiting for the assigned hash_link only. More precisely it is used to wait for all requests to be unregistered from the assigned hash_link. The resize_queue serves two purposes: 1. Threads that want to do a resize wait there if in_resize is set. This is not used in the server. The server refuses a second resize request if one is already active. keycache->in_init is used for the synchronization. See set_var.cc. 2. Threads that want to access blocks during resize wait here during the re-initialization phase. When the resize is done, all threads on the queue are signalled. Hypothetical resizers can compete for resizing, and read/write requests will restart to request blocks from the freshly resized cache. If the cache has been resized too small, it is disabled and 'can_be_used' is false. In this case read/write requests bypass the cache. Since they increment and decrement 'cnt_for_resize_op', the next resizer can wait on the queue 'waiting_for_resize_cnt' until all I/O finished. */ #include "mysys_priv.h" #include "mysys_err.h" #include <keycache.h> #include "my_static.h" #include <m_string.h> #include <my_bit.h> #include <errno.h> #include <stdarg.h> #include "probes_mysql.h" /* Some compilation flags have been added specifically for this module to control the following: - not to let a thread to yield the control when reading directly from key cache, which might improve performance in many cases; to enable this add: #define SERIALIZED_READ_FROM_CACHE - to set an upper bound for number of threads simultaneously using the key cache; this setting helps to determine an optimal size for hash table and improve performance when the number of blocks in the key cache much less than the number of threads accessing it; to set this number equal to <N> add #define MAX_THREADS <N> - to substitute calls of mysql_cond_wait for calls of mysql_cond_timedwait (wait with timeout set up); this setting should be used only when you want to trap a deadlock situation, which theoretically should not happen; to set timeout equal to <T> seconds add #define KEYCACHE_TIMEOUT <T> - to enable the module traps and to send debug information from key cache module to a special debug log add: #define KEYCACHE_DEBUG the name of this debug log file <LOG NAME> can be set through: #define KEYCACHE_DEBUG_LOG <LOG NAME> if the name is not defined, it's set by default; if the KEYCACHE_DEBUG flag is not set up and we are in a debug mode, i.e. when ! defined(DBUG_OFF), the debug information from the module is sent to the regular debug log. Example of the settings: #define SERIALIZED_READ_FROM_CACHE #define MAX_THREADS 100 #define KEYCACHE_TIMEOUT 1 #define KEYCACHE_DEBUG #define KEYCACHE_DEBUG_LOG "my_key_cache_debug.log" */ #define STRUCT_PTR(TYPE, MEMBER, a) \ (TYPE *) ((char *) (a) - offsetof(TYPE, MEMBER)) /* types of condition variables */ #define COND_FOR_REQUESTED 0 #define COND_FOR_SAVED 1 #define COND_FOR_READERS 2 typedef mysql_cond_t KEYCACHE_CONDVAR; /* descriptor of the page in the key cache block buffer */ struct st_keycache_page { int file; /* file to which the page belongs to */ my_off_t filepos; /* position of the page in the file */ }; /* element in the chain of a hash table bucket */ struct st_hash_link { struct st_hash_link *next, **prev; /* to connect links in the same bucket */ struct st_block_link *block; /* reference to the block for the page: */ File file; /* from such a file */ my_off_t diskpos; /* with such an offset */ uint requests; /* number of requests for the page */ }; /* simple states of a block */ #define BLOCK_ERROR 1 /* an error occured when performing file i/o */ #define BLOCK_READ 2 /* file block is in the block buffer */ #define BLOCK_IN_SWITCH 4 /* block is preparing to read new page */ #define BLOCK_REASSIGNED 8 /* blk does not accept requests for old page */ #define BLOCK_IN_FLUSH 16 /* block is selected for flush */ #define BLOCK_CHANGED 32 /* block buffer contains a dirty page */ #define BLOCK_IN_USE 64 /* block is not free */ #define BLOCK_IN_EVICTION 128 /* block is selected for eviction */ #define BLOCK_IN_FLUSHWRITE 256 /* block is in write to file */ #define BLOCK_FOR_UPDATE 512 /* block is selected for buffer modification */ /* page status, returned by find_key_block */ #define PAGE_READ 0 #define PAGE_TO_BE_READ 1 #define PAGE_WAIT_TO_BE_READ 2 /* block temperature determines in which (sub-)chain the block currently is */ enum BLOCK_TEMPERATURE { BLOCK_COLD /*free*/ , BLOCK_WARM , BLOCK_HOT }; /* key cache block */ struct st_block_link { struct st_block_link *next_used, **prev_used; /* to connect links in the LRU chain (ring) */ struct st_block_link *next_changed, **prev_changed; /* for lists of file dirty/clean blocks */ struct st_hash_link *hash_link; /* backward ptr to referring hash_link */ KEYCACHE_WQUEUE wqueue[2]; /* queues on waiting requests for new/old pages */ uint requests; /* number of requests for the block */ uchar *buffer; /* buffer for the block page */ uint offset; /* beginning of modified data in the buffer */ uint length; /* end of data in the buffer */ uint status; /* state of the block */ enum BLOCK_TEMPERATURE temperature; /* block temperature: cold, warm, hot */ uint hits_left; /* number of hits left until promotion */ ulonglong last_hit_time; /* timestamp of the last hit */ KEYCACHE_CONDVAR *condvar; /* condition variable for 'no readers' event */ }; KEY_CACHE dflt_key_cache_var; KEY_CACHE *dflt_key_cache= &dflt_key_cache_var; #define FLUSH_CACHE 2000 /* sort this many blocks at once */ static int flush_all_key_blocks(KEY_CACHE *keycache); static void wait_on_queue(KEYCACHE_WQUEUE *wqueue, mysql_mutex_t *mutex); static void release_whole_queue(KEYCACHE_WQUEUE *wqueue); static void free_block(KEY_CACHE *keycache, BLOCK_LINK *block); #if !defined(DBUG_OFF) static void test_key_cache(KEY_CACHE *keycache, const char *where, my_bool lock); #endif #define KEYCACHE_HASH(f, pos) \ (((ulong) ((pos) / keycache->key_cache_block_size) + \ (ulong) (f)) & (keycache->hash_entries-1)) #define FILE_HASH(f) ((uint) (f) & (CHANGED_BLOCKS_HASH-1)) #define DEFAULT_KEYCACHE_DEBUG_LOG "keycache_debug.log" #if defined(KEYCACHE_DEBUG) && ! defined(KEYCACHE_DEBUG_LOG) #define KEYCACHE_DEBUG_LOG DEFAULT_KEYCACHE_DEBUG_LOG #endif #if defined(KEYCACHE_DEBUG_LOG) static FILE *keycache_debug_log=NULL; static void keycache_debug_print(const char *fmt,...); #define KEYCACHE_DEBUG_OPEN \ if (!keycache_debug_log) \ { \ keycache_debug_log= fopen(KEYCACHE_DEBUG_LOG, "w"); \ (void) setvbuf(keycache_debug_log, NULL, _IOLBF, BUFSIZ); \ } #define KEYCACHE_DEBUG_CLOSE \ if (keycache_debug_log) \ { \ fclose(keycache_debug_log); \ keycache_debug_log= 0; \ } #else #define KEYCACHE_DEBUG_OPEN #define KEYCACHE_DEBUG_CLOSE #endif /* defined(KEYCACHE_DEBUG_LOG) */ #if defined(KEYCACHE_DEBUG_LOG) && defined(KEYCACHE_DEBUG) #define KEYCACHE_DBUG_PRINT(l, m) \ { if (keycache_debug_log) fprintf(keycache_debug_log, "%s: ", l); \ keycache_debug_print m; } #define KEYCACHE_DBUG_ASSERT(a) \ { if (! (a) && keycache_debug_log) fclose(keycache_debug_log); \ assert(a); } #else #define KEYCACHE_DBUG_PRINT(l, m) DBUG_PRINT(l, m) #define KEYCACHE_DBUG_ASSERT(a) DBUG_ASSERT(a) #endif /* defined(KEYCACHE_DEBUG_LOG) && defined(KEYCACHE_DEBUG) */ #if defined(KEYCACHE_DEBUG) || !defined(DBUG_OFF) static long keycache_thread_id; #define KEYCACHE_THREAD_TRACE(l) \ KEYCACHE_DBUG_PRINT(l,("|thread %ld",keycache_thread_id)) #define KEYCACHE_THREAD_TRACE_BEGIN(l) \ { struct st_my_thread_var *thread_var= my_thread_var; \ keycache_thread_id= thread_var->id; \ KEYCACHE_DBUG_PRINT(l,("[thread %ld",keycache_thread_id)) } #define KEYCACHE_THREAD_TRACE_END(l) \ KEYCACHE_DBUG_PRINT(l,("]thread %ld",keycache_thread_id)) #else #define KEYCACHE_THREAD_TRACE_BEGIN(l) #define KEYCACHE_THREAD_TRACE_END(l) #define KEYCACHE_THREAD_TRACE(l) #endif /* defined(KEYCACHE_DEBUG) || !defined(DBUG_OFF) */ #define BLOCK_NUMBER(b) \ ((uint) (((char*)(b)-(char *) keycache->block_root)/sizeof(BLOCK_LINK))) #define HASH_LINK_NUMBER(h) \ ((uint) (((char*)(h)-(char *) keycache->hash_link_root)/sizeof(HASH_LINK))) #if (defined(KEYCACHE_TIMEOUT) && !defined(__WIN__)) || defined(KEYCACHE_DEBUG) static int keycache_pthread_cond_wait(mysql_cond_t *cond, mysql_mutex_t *mutex); #else #define keycache_pthread_cond_wait(C, M) mysql_cond_wait(C, M) #endif #if defined(KEYCACHE_DEBUG) static int keycache_pthread_mutex_lock(mysql_mutex_t *mutex); static void keycache_pthread_mutex_unlock(mysql_mutex_t *mutex); static int keycache_pthread_cond_signal(mysql_cond_t *cond); #else #define keycache_pthread_mutex_lock(M) mysql_mutex_lock(M) #define keycache_pthread_mutex_unlock(M) mysql_mutex_unlock(M) #define keycache_pthread_cond_signal(C) mysql_cond_signal(C) #endif /* defined(KEYCACHE_DEBUG) */ #if !defined(DBUG_OFF) #if defined(inline) #undef inline #endif #define inline /* disabled inline for easier debugging */ static int fail_block(BLOCK_LINK *block); static int fail_hlink(HASH_LINK *hlink); static int cache_empty(KEY_CACHE *keycache); #endif static inline uint next_power(uint value) { return (uint) my_round_up_to_next_power((uint32) value) << 1; } /* Initialize a key cache SYNOPSIS init_key_cache() keycache pointer to a key cache data structure key_cache_block_size size of blocks to keep cached data use_mem total memory to use for the key cache division_limit division limit (may be zero) age_threshold age threshold (may be zero) RETURN VALUE number of blocks in the key cache, if successful, 0 - otherwise. NOTES. if keycache->key_cache_inited != 0 we assume that the key cache is already initialized. This is for now used by myisamchk, but shouldn't be something that a program should rely on! It's assumed that no two threads call this function simultaneously referring to the same key cache handle. */ int init_key_cache(KEY_CACHE *keycache, uint key_cache_block_size, size_t use_mem, uint division_limit, uint age_threshold) { ulong blocks, hash_links; size_t length; int error; DBUG_ENTER("init_key_cache"); DBUG_ASSERT(key_cache_block_size >= 512); KEYCACHE_DEBUG_OPEN; if (keycache->key_cache_inited && keycache->disk_blocks > 0) { DBUG_PRINT("warning",("key cache already in use")); DBUG_RETURN(0); } keycache->global_cache_w_requests= keycache->global_cache_r_requests= 0; keycache->global_cache_read= keycache->global_cache_write= 0; keycache->disk_blocks= -1; if (! keycache->key_cache_inited) { keycache->key_cache_inited= 1; /* Initialize these variables once only. Their value must survive re-initialization during resizing. */ keycache->in_resize= 0; keycache->resize_in_flush= 0; keycache->cnt_for_resize_op= 0; keycache->waiting_for_resize_cnt.last_thread= NULL; keycache->in_init= 0; mysql_mutex_init(key_KEY_CACHE_cache_lock, &keycache->cache_lock, MY_MUTEX_INIT_FAST); keycache->resize_queue.last_thread= NULL; } keycache->key_cache_mem_size= use_mem; keycache->key_cache_block_size= key_cache_block_size; DBUG_PRINT("info", ("key_cache_block_size: %u", key_cache_block_size)); blocks= (ulong) (use_mem / (sizeof(BLOCK_LINK) + 2 * sizeof(HASH_LINK) + sizeof(HASH_LINK*) * 5/4 + key_cache_block_size)); /* It doesn't make sense to have too few blocks (less than 8) */ if (blocks >= 8) { for ( ; ; ) { /* Set my_hash_entries to the next bigger 2 power */ if ((keycache->hash_entries= next_power(blocks)) < blocks * 5/4) keycache->hash_entries<<= 1; hash_links= 2 * blocks; #if defined(MAX_THREADS) if (hash_links < MAX_THREADS + blocks - 1) hash_links= MAX_THREADS + blocks - 1; #endif while ((length= (ALIGN_SIZE(blocks * sizeof(BLOCK_LINK)) + ALIGN_SIZE(hash_links * sizeof(HASH_LINK)) + ALIGN_SIZE(sizeof(HASH_LINK*) * keycache->hash_entries))) + ((size_t) blocks * keycache->key_cache_block_size) > use_mem) blocks--; /* Allocate memory for cache page buffers */ if ((keycache->block_mem= my_large_malloc((size_t) blocks * keycache->key_cache_block_size, MYF(0)))) { /* Allocate memory for blocks, hash_links and hash entries; For each block 2 hash links are allocated */ if ((keycache->block_root= (BLOCK_LINK*) my_malloc(length, MYF(0)))) break; my_large_free(keycache->block_mem); keycache->block_mem= 0; } if (blocks < 8) { my_errno= ENOMEM; my_error(EE_OUTOFMEMORY, MYF(0), blocks * keycache->key_cache_block_size); goto err; } blocks= blocks / 4*3; } keycache->blocks_unused= blocks; keycache->disk_blocks= (int) blocks; keycache->hash_links= hash_links; keycache->hash_root= (HASH_LINK**) ((char*) keycache->block_root + ALIGN_SIZE(blocks*sizeof(BLOCK_LINK))); keycache->hash_link_root= (HASH_LINK*) ((char*) keycache->hash_root + ALIGN_SIZE((sizeof(HASH_LINK*) * keycache->hash_entries))); bzero((uchar*) keycache->block_root, keycache->disk_blocks * sizeof(BLOCK_LINK)); bzero((uchar*) keycache->hash_root, keycache->hash_entries * sizeof(HASH_LINK*)); bzero((uchar*) keycache->hash_link_root, keycache->hash_links * sizeof(HASH_LINK)); keycache->hash_links_used= 0; keycache->free_hash_list= NULL; keycache->blocks_used= keycache->blocks_changed= 0; keycache->global_blocks_changed= 0; keycache->blocks_available=0; /* For debugging */ /* The LRU chain is empty after initialization */ keycache->used_last= NULL; keycache->used_ins= NULL; keycache->free_block_list= NULL; keycache->keycache_time= 0; keycache->warm_blocks= 0; keycache->min_warm_blocks= (division_limit ? blocks * division_limit / 100 + 1 : blocks); keycache->age_threshold= (age_threshold ? blocks * age_threshold / 100 : blocks); keycache->can_be_used= 1; keycache->waiting_for_hash_link.last_thread= NULL; keycache->waiting_for_block.last_thread= NULL; DBUG_PRINT("exit", ("disk_blocks: %d block_root: 0x%lx hash_entries: %d\ hash_root: 0x%lx hash_links: %d hash_link_root: 0x%lx", keycache->disk_blocks, (long) keycache->block_root, keycache->hash_entries, (long) keycache->hash_root, keycache->hash_links, (long) keycache->hash_link_root)); bzero((uchar*) keycache->changed_blocks, sizeof(keycache->changed_blocks[0]) * CHANGED_BLOCKS_HASH); bzero((uchar*) keycache->file_blocks, sizeof(keycache->file_blocks[0]) * CHANGED_BLOCKS_HASH); } else { /* key_buffer_size is specified too small. Disable the cache. */ keycache->can_be_used= 0; } keycache->blocks= keycache->disk_blocks > 0 ? keycache->disk_blocks : 0; DBUG_RETURN((int) keycache->disk_blocks); err: error= my_errno; keycache->disk_blocks= 0; keycache->blocks= 0; if (keycache->block_mem) { my_large_free((uchar*) keycache->block_mem); keycache->block_mem= NULL; } if (keycache->block_root) { my_free(keycache->block_root); keycache->block_root= NULL; } my_errno= error; keycache->can_be_used= 0; DBUG_RETURN(0); } /* Resize a key cache SYNOPSIS resize_key_cache() keycache pointer to a key cache data structure key_cache_block_size size of blocks to keep cached data use_mem total memory to use for the new key cache division_limit new division limit (if not zero) age_threshold new age threshold (if not zero) RETURN VALUE number of blocks in the key cache, if successful, 0 - otherwise. NOTES. The function first compares the memory size and the block size parameters with the key cache values. If they differ the function free the the memory allocated for the old key cache blocks by calling the end_key_cache function and then rebuilds the key cache with new blocks by calling init_key_cache. The function starts the operation only when all other threads performing operations with the key cache let her to proceed (when cnt_for_resize=0). */ int resize_key_cache(KEY_CACHE *keycache, uint key_cache_block_size, size_t use_mem, uint division_limit, uint age_threshold) { int blocks; DBUG_ENTER("resize_key_cache"); if (!keycache->key_cache_inited) DBUG_RETURN(keycache->disk_blocks); if(key_cache_block_size == keycache->key_cache_block_size && use_mem == keycache->key_cache_mem_size) { change_key_cache_param(keycache, division_limit, age_threshold); DBUG_RETURN(keycache->disk_blocks); } keycache_pthread_mutex_lock(&keycache->cache_lock); /* We may need to wait for another thread which is doing a resize already. This cannot happen in the MySQL server though. It allows one resizer only. In set_var.cc keycache->in_init is used to block multiple attempts. */ while (keycache->in_resize) { /* purecov: begin inspected */ wait_on_queue(&keycache->resize_queue, &keycache->cache_lock); /* purecov: end */ } /* Mark the operation in progress. This blocks other threads from doing a resize in parallel. It prohibits new blocks to enter the cache. Read/write requests can bypass the cache during the flush phase. */ keycache->in_resize= 1; /* Need to flush only if keycache is enabled. */ if (keycache->can_be_used) { /* Start the flush phase. */ keycache->resize_in_flush= 1; if (flush_all_key_blocks(keycache)) { /* TODO: if this happens, we should write a warning in the log file ! */ keycache->resize_in_flush= 0; blocks= 0; keycache->can_be_used= 0; goto finish; } DBUG_ASSERT(cache_empty(keycache)); /* End the flush phase. */ keycache->resize_in_flush= 0; } /* Some direct read/write operations (bypassing the cache) may still be unfinished. Wait until they are done. If the key cache can be used, direct I/O is done in increments of key_cache_block_size. That is, every block is checked if it is in the cache. We need to wait for pending I/O before re-initializing the cache, because we may change the block size. Otherwise they could check for blocks at file positions where the new block division has none. We do also want to wait for I/O done when (if) the cache was disabled. It must not run in parallel with normal cache operation. */ while (keycache->cnt_for_resize_op) wait_on_queue(&keycache->waiting_for_resize_cnt, &keycache->cache_lock); /* Free old cache structures, allocate new structures, and initialize them. Note that the cache_lock mutex and the resize_queue are left untouched. We do not lose the cache_lock and will release it only at the end of this function. */ end_key_cache(keycache, 0); /* Don't free mutex */ /* The following will work even if use_mem is 0 */ blocks= init_key_cache(keycache, key_cache_block_size, use_mem, division_limit, age_threshold); finish: /* Mark the resize finished. This allows other threads to start a resize or to request new cache blocks. */ keycache->in_resize= 0; /* Signal waiting threads. */ release_whole_queue(&keycache->resize_queue); keycache_pthread_mutex_unlock(&keycache->cache_lock); DBUG_RETURN(blocks); } /* Increment counter blocking resize key cache operation */ static inline void inc_counter_for_resize_op(KEY_CACHE *keycache) { keycache->cnt_for_resize_op++; } /* Decrement counter blocking resize key cache operation; Signal the operation to proceed when counter becomes equal zero */ static inline void dec_counter_for_resize_op(KEY_CACHE *keycache) { if (!--keycache->cnt_for_resize_op) release_whole_queue(&keycache->waiting_for_resize_cnt); } /* Change the key cache parameters SYNOPSIS change_key_cache_param() keycache pointer to a key cache data structure division_limit new division limit (if not zero) age_threshold new age threshold (if not zero) RETURN VALUE none NOTES. Presently the function resets the key cache parameters concerning midpoint insertion strategy - division_limit and age_threshold. */ void change_key_cache_param(KEY_CACHE *keycache, uint division_limit, uint age_threshold) { DBUG_ENTER("change_key_cache_param"); keycache_pthread_mutex_lock(&keycache->cache_lock); if (division_limit) keycache->min_warm_blocks= (keycache->disk_blocks * division_limit / 100 + 1); if (age_threshold) keycache->age_threshold= (keycache->disk_blocks * age_threshold / 100); keycache_pthread_mutex_unlock(&keycache->cache_lock); DBUG_VOID_RETURN; } /* Remove key_cache from memory SYNOPSIS end_key_cache() keycache key cache handle cleanup Complete free (Free also mutex for key cache) RETURN VALUE none */ void end_key_cache(KEY_CACHE *keycache, my_bool cleanup) { DBUG_ENTER("end_key_cache"); DBUG_PRINT("enter", ("key_cache: 0x%lx", (long) keycache)); if (!keycache->key_cache_inited) DBUG_VOID_RETURN; if (keycache->disk_blocks > 0) { if (keycache->block_mem) { my_large_free((uchar*) keycache->block_mem); keycache->block_mem= NULL; my_free(keycache->block_root); keycache->block_root= NULL; } keycache->disk_blocks= -1; /* Reset blocks_changed to be safe if flush_all_key_blocks is called */ keycache->blocks_changed= 0; } DBUG_PRINT("status", ("used: %lu changed: %lu w_requests: %lu " "writes: %lu r_requests: %lu reads: %lu", keycache->blocks_used, keycache->global_blocks_changed, (ulong) keycache->global_cache_w_requests, (ulong) keycache->global_cache_write, (ulong) keycache->global_cache_r_requests, (ulong) keycache->global_cache_read)); /* Reset these values to be able to detect a disabled key cache. See Bug#44068 (RESTORE can disable the MyISAM Key Cache). */ keycache->blocks_used= 0; keycache->blocks_unused= 0; if (cleanup) { mysql_mutex_destroy(&keycache->cache_lock); keycache->key_cache_inited= keycache->can_be_used= 0; KEYCACHE_DEBUG_CLOSE; } DBUG_VOID_RETURN; } /* end_key_cache */ /* Link a thread into double-linked queue of waiting threads. SYNOPSIS link_into_queue() wqueue pointer to the queue structure thread pointer to the thread to be added to the queue RETURN VALUE none NOTES. Queue is represented by a circular list of the thread structures The list is double-linked of the type (**prev,*next), accessed by a pointer to the last element. */ static void link_into_queue(KEYCACHE_WQUEUE *wqueue, struct st_my_thread_var *thread) { struct st_my_thread_var *last; DBUG_ASSERT(!thread->next && !thread->prev); if (! (last= wqueue->last_thread)) { /* Queue is empty */ thread->next= thread; thread->prev= &thread->next; } else { thread->prev= last->next->prev; last->next->prev= &thread->next; thread->next= last->next; last->next= thread; } wqueue->last_thread= thread; } /* Unlink a thread from double-linked queue of waiting threads SYNOPSIS unlink_from_queue() wqueue pointer to the queue structure thread pointer to the thread to be removed from the queue RETURN VALUE none NOTES. See NOTES for link_into_queue */ static void unlink_from_queue(KEYCACHE_WQUEUE *wqueue, struct st_my_thread_var *thread) { KEYCACHE_DBUG_PRINT("unlink_from_queue", ("thread %ld", thread->id)); DBUG_ASSERT(thread->next && thread->prev); if (thread->next == thread) /* The queue contains only one member */ wqueue->last_thread= NULL; else { thread->next->prev= thread->prev; *thread->prev=thread->next; if (wqueue->last_thread == thread) wqueue->last_thread= STRUCT_PTR(struct st_my_thread_var, next, thread->prev); } thread->next= NULL; #if !defined(DBUG_OFF) /* This makes it easier to see it's not in a chain during debugging. And some DBUG_ASSERT() rely on it. */ thread->prev= NULL; #endif } /* Add a thread to single-linked queue of waiting threads SYNOPSIS wait_on_queue() wqueue Pointer to the queue structure. mutex Cache_lock to acquire after awake. RETURN VALUE none NOTES. Queue is represented by a circular list of the thread structures The list is single-linked of the type (*next), accessed by a pointer to the last element. The function protects against stray signals by verifying that the current thread is unlinked from the queue when awaking. However, since several threads can wait for the same event, it might be necessary for the caller of the function to check again if the condition for awake is indeed matched. */ static void wait_on_queue(KEYCACHE_WQUEUE *wqueue, mysql_mutex_t *mutex) { struct st_my_thread_var *last; struct st_my_thread_var *thread= my_thread_var; /* Add to queue. */ DBUG_ASSERT(!thread->next); DBUG_ASSERT(!thread->prev); /* Not required, but must be true anyway. */ if (! (last= wqueue->last_thread)) thread->next= thread; else { thread->next= last->next; last->next= thread; } wqueue->last_thread= thread; /* Wait until thread is removed from queue by the signalling thread. The loop protects against stray signals. */ do { KEYCACHE_DBUG_PRINT("wait", ("suspend thread %ld", thread->id)); keycache_pthread_cond_wait(&thread->suspend, mutex); } while (thread->next); } /* Remove all threads from queue signaling them to proceed SYNOPSIS release_whole_queue() wqueue pointer to the queue structure RETURN VALUE none NOTES. See notes for wait_on_queue(). When removed from the queue each thread is signaled via condition variable thread->suspend. */ static void release_whole_queue(KEYCACHE_WQUEUE *wqueue) { struct st_my_thread_var *last; struct st_my_thread_var *next; struct st_my_thread_var *thread; /* Queue may be empty. */ if (!(last= wqueue->last_thread)) return; next= last->next; do { thread=next; KEYCACHE_DBUG_PRINT("release_whole_queue: signal", ("thread %ld", thread->id)); /* Signal the thread. */ keycache_pthread_cond_signal(&thread->suspend); /* Take thread from queue. */ next=thread->next; thread->next= NULL; } while (thread != last); /* Now queue is definitely empty. */ wqueue->last_thread= NULL; } /* Unlink a block from the chain of dirty/clean blocks */ static inline void unlink_changed(BLOCK_LINK *block) { DBUG_ASSERT(block->prev_changed && *block->prev_changed == block); if (block->next_changed) block->next_changed->prev_changed= block->prev_changed; *block->prev_changed= block->next_changed; #if !defined(DBUG_OFF) /* This makes it easier to see it's not in a chain during debugging. And some DBUG_ASSERT() rely on it. */ block->next_changed= NULL; block->prev_changed= NULL; #endif } /* Link a block into the chain of dirty/clean blocks */ static inline void link_changed(BLOCK_LINK *block, BLOCK_LINK **phead) { DBUG_ASSERT(!block->next_changed); DBUG_ASSERT(!block->prev_changed); block->prev_changed= phead; if ((block->next_changed= *phead)) (*phead)->prev_changed= &block->next_changed; *phead= block; } /* Link a block in a chain of clean blocks of a file. SYNOPSIS link_to_file_list() keycache Key cache handle block Block to relink file File to be linked to unlink If to unlink first DESCRIPTION Unlink a block from whichever chain it is linked in, if it's asked for, and link it to the chain of clean blocks of the specified file. NOTE Please do never set/clear BLOCK_CHANGED outside of link_to_file_list() or link_to_changed_list(). You would risk to damage correct counting of changed blocks and to find blocks in the wrong hash. RETURN void */ static void link_to_file_list(KEY_CACHE *keycache, BLOCK_LINK *block, int file, my_bool unlink_block) { DBUG_ASSERT(block->status & BLOCK_IN_USE); DBUG_ASSERT(block->hash_link && block->hash_link->block == block); DBUG_ASSERT(block->hash_link->file == file); if (unlink_block) unlink_changed(block); link_changed(block, &keycache->file_blocks[FILE_HASH(file)]); if (block->status & BLOCK_CHANGED) { block->status&= ~BLOCK_CHANGED; keycache->blocks_changed--; keycache->global_blocks_changed--; } } /* Re-link a block from the clean chain to the dirty chain of a file. SYNOPSIS link_to_changed_list() keycache key cache handle block block to relink DESCRIPTION Unlink a block from the chain of clean blocks of a file and link it to the chain of dirty blocks of the same file. NOTE Please do never set/clear BLOCK_CHANGED outside of link_to_file_list() or link_to_changed_list(). You would risk to damage correct counting of changed blocks and to find blocks in the wrong hash. RETURN void */ static void link_to_changed_list(KEY_CACHE *keycache, BLOCK_LINK *block) { DBUG_ASSERT(block->status & BLOCK_IN_USE); DBUG_ASSERT(!(block->status & BLOCK_CHANGED)); DBUG_ASSERT(block->hash_link && block->hash_link->block == block); unlink_changed(block); link_changed(block, &keycache->changed_blocks[FILE_HASH(block->hash_link->file)]); block->status|=BLOCK_CHANGED; keycache->blocks_changed++; keycache->global_blocks_changed++; } /* Link a block to the LRU chain at the beginning or at the end of one of two parts. SYNOPSIS link_block() keycache pointer to a key cache data structure block pointer to the block to link to the LRU chain hot <-> to link the block into the hot subchain at_end <-> to link the block at the end of the subchain RETURN VALUE none NOTES. The LRU ring is represented by a circular list of block structures. The list is double-linked of the type (**prev,*next) type. The LRU ring is divided into two parts - hot and warm. There are two pointers to access the last blocks of these two parts. The beginning of the warm part follows right after the end of the hot part. Only blocks of the warm part can be used for eviction. The first block from the beginning of this subchain is always taken for eviction (keycache->last_used->next) LRU chain: +------+ H O T +------+ +----| end |----...<----| beg |----+ | +------+last +------+ | v<-link in latest hot (new end) | | link in latest warm (new end)->^ | +------+ W A R M +------+ | +----| beg |---->...----| end |----+ +------+ +------+ins first for eviction It is also possible that the block is selected for eviction and thus not linked in the LRU ring. */ static void link_block(KEY_CACHE *keycache, BLOCK_LINK *block, my_bool hot, my_bool at_end) { BLOCK_LINK *ins; BLOCK_LINK **pins; DBUG_ASSERT((block->status & ~BLOCK_CHANGED) == (BLOCK_READ | BLOCK_IN_USE)); DBUG_ASSERT(block->hash_link); /*backptr to block NULL from free_block()*/ DBUG_ASSERT(!block->requests); DBUG_ASSERT(block->prev_changed && *block->prev_changed == block); DBUG_ASSERT(!block->next_used); DBUG_ASSERT(!block->prev_used); if (!hot && keycache->waiting_for_block.last_thread) { /* Signal that in the LRU warm sub-chain an available block has appeared */ struct st_my_thread_var *last_thread= keycache->waiting_for_block.last_thread; struct st_my_thread_var *first_thread= last_thread->next; struct st_my_thread_var *next_thread= first_thread; HASH_LINK *hash_link= (HASH_LINK *) first_thread->opt_info; struct st_my_thread_var *thread; do { thread= next_thread; next_thread= thread->next; /* We notify about the event all threads that ask for the same page as the first thread in the queue */ if ((HASH_LINK *) thread->opt_info == hash_link) { KEYCACHE_DBUG_PRINT("link_block: signal", ("thread %ld", thread->id)); keycache_pthread_cond_signal(&thread->suspend); unlink_from_queue(&keycache->waiting_for_block, thread); block->requests++; } } while (thread != last_thread); hash_link->block= block; /* NOTE: We assigned the block to the hash_link and signalled the requesting thread(s). But it is possible that other threads runs first. These threads see the hash_link assigned to a block which is assigned to another hash_link and not marked BLOCK_IN_SWITCH. This can be a problem for functions that do not select the block via its hash_link: flush and free. They do only see a block which is in a "normal" state and don't know that it will be evicted soon. We cannot set BLOCK_IN_SWITCH here because only one of the requesting threads must handle the eviction. All others must wait for it to complete. If we set the flag here, the threads would not know who is in charge of the eviction. Without the flag, the first thread takes the stick and sets the flag. But we need to note in the block that is has been selected for eviction. It must not be freed. The evicting thread will not expect the block in the free list. Before freeing we could also check if block->requests > 1. But I think including another flag in the check of block->status is slightly more efficient and probably easier to read. */ block->status|= BLOCK_IN_EVICTION; KEYCACHE_THREAD_TRACE("link_block: after signaling"); #if defined(KEYCACHE_DEBUG) KEYCACHE_DBUG_PRINT("link_block", ("linked,unlinked block %u status=%x #requests=%u #available=%u", BLOCK_NUMBER(block), block->status, block->requests, keycache->blocks_available)); #endif return; } pins= hot ? &keycache->used_ins : &keycache->used_last; ins= *pins; if (ins) { ins->next_used->prev_used= &block->next_used; block->next_used= ins->next_used; block->prev_used= &ins->next_used; ins->next_used= block; if (at_end) *pins= block; } else { /* The LRU ring is empty. Let the block point to itself. */ keycache->used_last= keycache->used_ins= block->next_used= block; block->prev_used= &block->next_used; } KEYCACHE_THREAD_TRACE("link_block"); #if defined(KEYCACHE_DEBUG) keycache->blocks_available++; KEYCACHE_DBUG_PRINT("link_block", ("linked block %u:%1u status=%x #requests=%u #available=%u", BLOCK_NUMBER(block), at_end, block->status, block->requests, keycache->blocks_available)); KEYCACHE_DBUG_ASSERT((ulong) keycache->blocks_available <= keycache->blocks_used); #endif } /* Unlink a block from the LRU chain SYNOPSIS unlink_block() keycache pointer to a key cache data structure block pointer to the block to unlink from the LRU chain RETURN VALUE none NOTES. See NOTES for link_block */ static void unlink_block(KEY_CACHE *keycache, BLOCK_LINK *block) { DBUG_ASSERT((block->status & ~BLOCK_CHANGED) == (BLOCK_READ | BLOCK_IN_USE)); DBUG_ASSERT(block->hash_link); /*backptr to block NULL from free_block()*/ DBUG_ASSERT(!block->requests); DBUG_ASSERT(block->prev_changed && *block->prev_changed == block); DBUG_ASSERT(block->next_used && block->prev_used && (block->next_used->prev_used == &block->next_used) && (*block->prev_used == block)); if (block->next_used == block) /* The list contains only one member */ keycache->used_last= keycache->used_ins= NULL; else { block->next_used->prev_used= block->prev_used; *block->prev_used= block->next_used; if (keycache->used_last == block) keycache->used_last= STRUCT_PTR(BLOCK_LINK, next_used, block->prev_used); if (keycache->used_ins == block) keycache->used_ins=STRUCT_PTR(BLOCK_LINK, next_used, block->prev_used); } block->next_used= NULL; #if !defined(DBUG_OFF) /* This makes it easier to see it's not in a chain during debugging. And some DBUG_ASSERT() rely on it. */ block->prev_used= NULL; #endif KEYCACHE_THREAD_TRACE("unlink_block"); #if defined(KEYCACHE_DEBUG) KEYCACHE_DBUG_ASSERT(keycache->blocks_available != 0); keycache->blocks_available--; KEYCACHE_DBUG_PRINT("unlink_block", ("unlinked block %u status=%x #requests=%u #available=%u", BLOCK_NUMBER(block), block->status, block->requests, keycache->blocks_available)); #endif } /* Register requests for a block. SYNOPSIS reg_requests() keycache Pointer to a key cache data structure. block Pointer to the block to register a request on. count Number of requests. Always 1. NOTE The first request unlinks the block from the LRU ring. This means that it is protected against eveiction. RETURN void */ static void reg_requests(KEY_CACHE *keycache, BLOCK_LINK *block, int count) { DBUG_ASSERT(block->status & BLOCK_IN_USE); DBUG_ASSERT(block->hash_link); if (!block->requests) unlink_block(keycache, block); block->requests+=count; } /* Unregister request for a block linking it to the LRU chain if it's the last request SYNOPSIS unreg_request() keycache pointer to a key cache data structure block pointer to the block to link to the LRU chain at_end <-> to link the block at the end of the LRU chain RETURN VALUE none NOTES. Every linking to the LRU ring decrements by one a special block counter (if it's positive). If the at_end parameter is TRUE the block is added either at the end of warm sub-chain or at the end of hot sub-chain. It is added to the hot subchain if its counter is zero and number of blocks in warm sub-chain is not less than some low limit (determined by the division_limit parameter). Otherwise the block is added to the warm sub-chain. If the at_end parameter is FALSE the block is always added at beginning of the warm sub-chain. Thus a warm block can be promoted to the hot sub-chain when its counter becomes zero for the first time. At the same time the block at the very beginning of the hot subchain might be moved to the beginning of the warm subchain if it stays untouched for a too long time (this time is determined by parameter age_threshold). It is also possible that the block is selected for eviction and thus not linked in the LRU ring. */ static void unreg_request(KEY_CACHE *keycache, BLOCK_LINK *block, int at_end) { DBUG_ASSERT(block->status & (BLOCK_READ | BLOCK_IN_USE)); DBUG_ASSERT(block->hash_link); /*backptr to block NULL from free_block()*/ DBUG_ASSERT(block->requests); DBUG_ASSERT(block->prev_changed && *block->prev_changed == block); DBUG_ASSERT(!block->next_used); DBUG_ASSERT(!block->prev_used); /* Unregister the request, but do not link erroneous blocks into the LRU ring. */ if (!--block->requests && !(block->status & BLOCK_ERROR)) { my_bool hot; if (block->hits_left) block->hits_left--; hot= !block->hits_left && at_end && keycache->warm_blocks > keycache->min_warm_blocks; if (hot) { if (block->temperature == BLOCK_WARM) keycache->warm_blocks--; block->temperature= BLOCK_HOT; KEYCACHE_DBUG_PRINT("unreg_request", ("#warm_blocks: %lu", keycache->warm_blocks)); } link_block(keycache, block, hot, (my_bool)at_end); block->last_hit_time= keycache->keycache_time; keycache->keycache_time++; /* At this place, the block might be in the LRU ring or not. If an evicter was waiting for a block, it was selected for eviction and not linked in the LRU ring. */ /* Check if we should link a hot block to the warm block sub-chain. It is possible that we select the same block as above. But it can also be another block. In any case a block from the LRU ring is selected. In other words it works even if the above block was selected for eviction and not linked in the LRU ring. Since this happens only if the LRU ring is empty, the block selected below would be NULL and the rest of the function skipped. */ block= keycache->used_ins; if (block && keycache->keycache_time - block->last_hit_time > keycache->age_threshold) { unlink_block(keycache, block); link_block(keycache, block, 0, 0); if (block->temperature != BLOCK_WARM) { keycache->warm_blocks++; block->temperature= BLOCK_WARM; } KEYCACHE_DBUG_PRINT("unreg_request", ("#warm_blocks: %lu", keycache->warm_blocks)); } } } /* Remove a reader of the page in block */ static void remove_reader(BLOCK_LINK *block) { DBUG_ASSERT(block->status & (BLOCK_READ | BLOCK_IN_USE)); DBUG_ASSERT(block->hash_link && block->hash_link->block == block); DBUG_ASSERT(block->prev_changed && *block->prev_changed == block); DBUG_ASSERT(!block->next_used); DBUG_ASSERT(!block->prev_used); DBUG_ASSERT(block->hash_link->requests); if (! --block->hash_link->requests && block->condvar) keycache_pthread_cond_signal(block->condvar); } /* Wait until the last reader of the page in block signals on its termination */ static void wait_for_readers(KEY_CACHE *keycache, BLOCK_LINK *block) { struct st_my_thread_var *thread= my_thread_var; DBUG_ASSERT(block->status & (BLOCK_READ | BLOCK_IN_USE)); DBUG_ASSERT(!(block->status & (BLOCK_IN_FLUSH | BLOCK_CHANGED))); DBUG_ASSERT(block->hash_link); DBUG_ASSERT(block->hash_link->block == block); /* Linked in file_blocks or changed_blocks hash. */ DBUG_ASSERT(block->prev_changed && *block->prev_changed == block); /* Not linked in LRU ring. */ DBUG_ASSERT(!block->next_used); DBUG_ASSERT(!block->prev_used); while (block->hash_link->requests) { KEYCACHE_DBUG_PRINT("wait_for_readers: wait", ("suspend thread %ld block %u", thread->id, BLOCK_NUMBER(block))); /* There must be no other waiter. We have no queue here. */ DBUG_ASSERT(!block->condvar); block->condvar= &thread->suspend; keycache_pthread_cond_wait(&thread->suspend, &keycache->cache_lock); block->condvar= NULL; } } /* Add a hash link to a bucket in the hash_table */ static inline void link_hash(HASH_LINK **start, HASH_LINK *hash_link) { if (*start) (*start)->prev= &hash_link->next; hash_link->next= *start; hash_link->prev= start; *start= hash_link; } /* Remove a hash link from the hash table */ static void unlink_hash(KEY_CACHE *keycache, HASH_LINK *hash_link) { KEYCACHE_DBUG_PRINT("unlink_hash", ("fd: %u pos_ %lu #requests=%u", (uint) hash_link->file,(ulong) hash_link->diskpos, hash_link->requests)); KEYCACHE_DBUG_ASSERT(hash_link->requests == 0); if ((*hash_link->prev= hash_link->next)) hash_link->next->prev= hash_link->prev; hash_link->block= NULL; if (keycache->waiting_for_hash_link.last_thread) { /* Signal that a free hash link has appeared */ struct st_my_thread_var *last_thread= keycache->waiting_for_hash_link.last_thread; struct st_my_thread_var *first_thread= last_thread->next; struct st_my_thread_var *next_thread= first_thread; KEYCACHE_PAGE *first_page= (KEYCACHE_PAGE *) (first_thread->opt_info); struct st_my_thread_var *thread; hash_link->file= first_page->file; hash_link->diskpos= first_page->filepos; do { KEYCACHE_PAGE *page; thread= next_thread; page= (KEYCACHE_PAGE *) thread->opt_info; next_thread= thread->next; /* We notify about the event all threads that ask for the same page as the first thread in the queue */ if (page->file == hash_link->file && page->filepos == hash_link->diskpos) { KEYCACHE_DBUG_PRINT("unlink_hash: signal", ("thread %ld", thread->id)); keycache_pthread_cond_signal(&thread->suspend); unlink_from_queue(&keycache->waiting_for_hash_link, thread); } } while (thread != last_thread); link_hash(&keycache->hash_root[KEYCACHE_HASH(hash_link->file, hash_link->diskpos)], hash_link); return; } hash_link->next= keycache->free_hash_list; keycache->free_hash_list= hash_link; } /* Get the hash link for a page */ static HASH_LINK *get_hash_link(KEY_CACHE *keycache, int file, my_off_t filepos) { reg1 HASH_LINK *hash_link, **start; #if defined(KEYCACHE_DEBUG) int cnt; #endif KEYCACHE_DBUG_PRINT("get_hash_link", ("fd: %u pos: %lu", (uint) file,(ulong) filepos)); restart: /* Find the bucket in the hash table for the pair (file, filepos); start contains the head of the bucket list, hash_link points to the first member of the list */ hash_link= *(start= &keycache->hash_root[KEYCACHE_HASH(file, filepos)]); #if defined(KEYCACHE_DEBUG) cnt= 0; #endif /* Look for an element for the pair (file, filepos) in the bucket chain */ while (hash_link && (hash_link->diskpos != filepos || hash_link->file != file)) { hash_link= hash_link->next; #if defined(KEYCACHE_DEBUG) cnt++; if (! (cnt <= keycache->hash_links_used)) { int i; for (i=0, hash_link= *start ; i < cnt ; i++, hash_link= hash_link->next) { KEYCACHE_DBUG_PRINT("get_hash_link", ("fd: %u pos: %lu", (uint) hash_link->file,(ulong) hash_link->diskpos)); } } KEYCACHE_DBUG_ASSERT(cnt <= keycache->hash_links_used); #endif } if (! hash_link) { /* There is no hash link in the hash table for the pair (file, filepos) */ if (keycache->free_hash_list) { hash_link= keycache->free_hash_list; keycache->free_hash_list= hash_link->next; } else if (keycache->hash_links_used < keycache->hash_links) { hash_link= &keycache->hash_link_root[keycache->hash_links_used++]; } else { /* Wait for a free hash link */ struct st_my_thread_var *thread= my_thread_var; KEYCACHE_PAGE page; KEYCACHE_DBUG_PRINT("get_hash_link", ("waiting")); page.file= file; page.filepos= filepos; thread->opt_info= (void *) &page; link_into_queue(&keycache->waiting_for_hash_link, thread); KEYCACHE_DBUG_PRINT("get_hash_link: wait", ("suspend thread %ld", thread->id)); keycache_pthread_cond_wait(&thread->suspend, &keycache->cache_lock); thread->opt_info= NULL; goto restart; } hash_link->file= file; hash_link->diskpos= filepos; link_hash(start, hash_link); } /* Register the request for the page */ hash_link->requests++; return hash_link; } /* Get a block for the file page requested by a keycache read/write operation; If the page is not in the cache return a free block, if there is none return the lru block after saving its buffer if the page is dirty. SYNOPSIS find_key_block() keycache pointer to a key cache data structure file handler for the file to read page from filepos position of the page in the file init_hits_left how initialize the block counter for the page wrmode <-> get for writing page_st out {PAGE_READ,PAGE_TO_BE_READ,PAGE_WAIT_TO_BE_READ} RETURN VALUE Pointer to the found block if successful, 0 - otherwise NOTES. For the page from file positioned at filepos the function checks whether the page is in the key cache specified by the first parameter. If this is the case it immediately returns the block. If not, the function first chooses a block for this page. If there is no not used blocks in the key cache yet, the function takes the block at the very beginning of the warm sub-chain. It saves the page in that block if it's dirty before returning the pointer to it. The function returns in the page_st parameter the following values: PAGE_READ - if page already in the block, PAGE_TO_BE_READ - if it is to be read yet by the current thread WAIT_TO_BE_READ - if it is to be read by another thread If an error occurs THE BLOCK_ERROR bit is set in the block status. It might happen that there are no blocks in LRU chain (in warm part) - all blocks are unlinked for some read/write operations. Then the function waits until first of this operations links any block back. */ static BLOCK_LINK *find_key_block(KEY_CACHE *keycache, File file, my_off_t filepos, int init_hits_left, int wrmode, int *page_st) { HASH_LINK *hash_link; BLOCK_LINK *block; int error= 0; int page_status; DBUG_ENTER("find_key_block"); KEYCACHE_THREAD_TRACE("find_key_block:begin"); DBUG_PRINT("enter", ("fd: %d pos: %lu wrmode: %d", file, (ulong) filepos, wrmode)); KEYCACHE_DBUG_PRINT("find_key_block", ("fd: %d pos: %lu wrmode: %d", file, (ulong) filepos, wrmode)); #if !defined(DBUG_OFF) && defined(EXTRA_DEBUG) DBUG_EXECUTE("check_keycache2", test_key_cache(keycache, "start of find_key_block", 0);); #endif restart: /* If the flush phase of a resize operation fails, the cache is left unusable. This will be detected only after "goto restart". */ if (!keycache->can_be_used) DBUG_RETURN(0); /* Find the hash_link for the requested file block (file, filepos). We do always get a hash_link here. It has registered our request so that no other thread can use it for another file block until we release the request (which is done by remove_reader() usually). The hash_link can have a block assigned to it or not. If there is a block, it may be assigned to this hash_link or not. In cases where a block is evicted from the cache, it is taken from the LRU ring and referenced by the new hash_link. But the block can still be assigned to its old hash_link for some time if it needs to be flushed first, or if there are other threads still reading it. Summary: hash_link is always returned. hash_link->block can be: - NULL or - not assigned to this hash_link or - assigned to this hash_link. If assigned, the block can have - invalid data (when freshly assigned) or - valid data. Valid data can be - changed over the file contents (dirty) or - not changed (clean). */ hash_link= get_hash_link(keycache, file, filepos); DBUG_ASSERT((hash_link->file == file) && (hash_link->diskpos == filepos)); page_status= -1; if ((block= hash_link->block) && block->hash_link == hash_link && (block->status & BLOCK_READ)) { /* Assigned block with valid (changed or unchanged) contents. */ page_status= PAGE_READ; } /* else (page_status == -1) - block == NULL or - block not assigned to this hash_link or - block assigned but not yet read from file (invalid data). */ if (keycache->in_resize) { /* This is a request during a resize operation */ if (!block) { struct st_my_thread_var *thread; /* The file block is not in the cache. We don't need it in the cache: we are going to read or write directly to file. Cancel the request. We can simply decrement hash_link->requests because we did not release cache_lock since increasing it. So no other thread can wait for our request to become released. */ if (hash_link->requests == 1) { /* We are the only one to request this hash_link (this file/pos). Free the hash_link. */ hash_link->requests--; unlink_hash(keycache, hash_link); DBUG_RETURN(0); } /* More requests on the hash_link. Someone tries to evict a block for this hash_link (could have started before resizing started). This means that the LRU ring is empty. Otherwise a block could be assigned immediately. Behave like a thread that wants to evict a block for this file/pos. Add to the queue of threads waiting for a block. Wait until there is one assigned. Refresh the request on the hash-link so that it cannot be reused for another file/pos. */ thread= my_thread_var; thread->opt_info= (void *) hash_link; link_into_queue(&keycache->waiting_for_block, thread); do { KEYCACHE_DBUG_PRINT("find_key_block: wait", ("suspend thread %ld", thread->id)); keycache_pthread_cond_wait(&thread->suspend, &keycache->cache_lock); } while (thread->next); thread->opt_info= NULL; /* A block should now be assigned to the hash_link. But it may still need to be evicted. Anyway, we should re-check the situation. page_status must be set correctly. */ hash_link->requests--; goto restart; } /* end of if (!block) */ /* There is a block for this file/pos in the cache. Register a request on it. This unlinks it from the LRU ring (if it is there) and hence protects it against eviction (if not already in eviction). We need this for returning the block to the caller, for calling remove_reader() (for debugging purposes), and for calling free_block(). The only case where we don't need the request is if the block is in eviction. In that case we have to unregister the request later. */ reg_requests(keycache, block, 1); if (page_status != PAGE_READ) { /* - block not assigned to this hash_link or - block assigned but not yet read from file (invalid data). This must be a block in eviction. It will be read soon. We need to wait here until this happened. Otherwise the caller could access a wrong block or a block which is in read. While waiting we cannot lose hash_link nor block. We have registered a request on the hash_link. Everything can happen to the block but changes in the hash_link -> block relationship. In other words: everything can happen to the block but free or another completed eviction. Note that we bahave like a secondary requestor here. We just cannot return with PAGE_WAIT_TO_BE_READ. This would work for read requests and writes on dirty blocks that are not in flush only. Waiting here on COND_FOR_REQUESTED works in all situations. */ DBUG_ASSERT(((block->hash_link != hash_link) && (block->status & (BLOCK_IN_EVICTION | BLOCK_IN_SWITCH))) || ((block->hash_link == hash_link) && !(block->status & BLOCK_READ))); wait_on_queue(&block->wqueue[COND_FOR_REQUESTED], &keycache->cache_lock); /* Here we can trust that the block has been assigned to this hash_link (block->hash_link == hash_link) and read into the buffer (BLOCK_READ). The worst things possible here are that the block is in free (BLOCK_REASSIGNED). But the block is still assigned to the hash_link. The freeing thread waits until we release our request on the hash_link. The block must not be again in eviction because we registered an request on it before starting to wait. */ DBUG_ASSERT(block->hash_link == hash_link); DBUG_ASSERT(block->status & (BLOCK_READ | BLOCK_IN_USE)); DBUG_ASSERT(!(block->status & (BLOCK_IN_EVICTION | BLOCK_IN_SWITCH))); } /* The block is in the cache. Assigned to the hash_link. Valid data. Note that in case of page_st == PAGE_READ, the block can be marked for eviction. In any case it can be marked for freeing. */ if (!wrmode) { /* A reader can just read the block. */ *page_st= PAGE_READ; DBUG_ASSERT((hash_link->file == file) && (hash_link->diskpos == filepos) && (block->hash_link == hash_link)); DBUG_RETURN(block); } /* This is a writer. No two writers for the same block can exist. This must be assured by locks outside of the key cache. */ DBUG_ASSERT(!(block->status & BLOCK_FOR_UPDATE) || fail_block(block)); while (block->status & BLOCK_IN_FLUSH) { /* Wait until the block is flushed to file. Do not release the request on the hash_link yet to prevent that the block is freed or reassigned while we wait. While we wait, several things can happen to the block, including another flush. But the block cannot be reassigned to another hash_link until we release our request on it. But it can be marked BLOCK_REASSIGNED from free or eviction, while they wait for us to release the hash_link. */ wait_on_queue(&block->wqueue[COND_FOR_SAVED], &keycache->cache_lock); /* If the flush phase failed, the resize could have finished while we waited here. */ if (!keycache->in_resize) { remove_reader(block); unreg_request(keycache, block, 1); goto restart; } DBUG_ASSERT(block->status & (BLOCK_READ | BLOCK_IN_USE)); DBUG_ASSERT(!(block->status & BLOCK_FOR_UPDATE) || fail_block(block)); DBUG_ASSERT(block->hash_link == hash_link); } if (block->status & BLOCK_CHANGED) { /* We want to write a block with changed contents. If the cache block size is bigger than the callers block size (e.g. MyISAM), the caller may replace part of the block only. Changes of the other part of the block must be preserved. Since the block has not yet been selected for flush, we can still add our changes. */ *page_st= PAGE_READ; DBUG_ASSERT((hash_link->file == file) && (hash_link->diskpos == filepos) && (block->hash_link == hash_link)); DBUG_RETURN(block); } /* This is a write request for a clean block. We do not want to have new dirty blocks in the cache while resizing. We will free the block and write directly to file. If the block is in eviction or in free, we just let it go. Unregister from the hash_link. This must be done before freeing the block. And it must be done if not freeing the block. Because we could have waited above, we need to call remove_reader(). Other threads could wait for us to release our request on the hash_link. */ remove_reader(block); /* If the block is not in eviction and not in free, we can free it. */ if (!(block->status & (BLOCK_IN_EVICTION | BLOCK_IN_SWITCH | BLOCK_REASSIGNED))) { /* Free block as we are going to write directly to file. Although we have an exlusive lock for the updated key part, the control can be yielded by the current thread as we might have unfinished readers of other key parts in the block buffer. Still we are guaranteed not to have any readers of the key part we are writing into until the block is removed from the cache as we set the BLOCK_REASSIGNED flag (see the code below that handles reading requests). */ free_block(keycache, block); } else { /* The block will be evicted/freed soon. Don't touch it in any way. Unregister the request that we registered above. */ unreg_request(keycache, block, 1); /* The block is still assigned to the hash_link (the file/pos that we are going to write to). Wait until the eviction/free is complete. Otherwise the direct write could complete before all readers are done with the block. So they could read outdated data. Since we released our request on the hash_link, it can be reused for another file/pos. Hence we cannot just check for block->hash_link == hash_link. As long as the resize is proceeding the block cannot be reassigned to the same file/pos again. So we can terminate the loop when the block is no longer assigned to this file/pos. */ do { wait_on_queue(&block->wqueue[COND_FOR_SAVED], &keycache->cache_lock); /* If the flush phase failed, the resize could have finished while we waited here. */ if (!keycache->in_resize) goto restart; } while (block->hash_link && (block->hash_link->file == file) && (block->hash_link->diskpos == filepos)); } DBUG_RETURN(0); } if (page_status == PAGE_READ && (block->status & (BLOCK_IN_EVICTION | BLOCK_IN_SWITCH | BLOCK_REASSIGNED))) { /* This is a request for a block to be removed from cache. The block is assigned to this hash_link and contains valid data, but is marked for eviction or to be freed. Possible reasons why it has not yet been evicted/freed can be a flush before reassignment (BLOCK_IN_SWITCH), readers of the block have not finished yet (BLOCK_REASSIGNED), or the evicting thread did not yet awake after the block has been selected for it (BLOCK_IN_EVICTION). */ KEYCACHE_DBUG_PRINT("find_key_block", ("request for old page in block %u " "wrmode: %d block->status: %d", BLOCK_NUMBER(block), wrmode, block->status)); /* Only reading requests can proceed until the old dirty page is flushed, all others are to be suspended, then resubmitted */ if (!wrmode && !(block->status & BLOCK_REASSIGNED)) { /* This is a read request and the block not yet reassigned. We can register our request and proceed. This unlinks the block from the LRU ring and protects it against eviction. */ reg_requests(keycache, block, 1); } else { /* Either this is a write request for a block that is in eviction or in free. We must not use it any more. Instead we must evict another block. But we cannot do this before the eviction/free is done. Otherwise we would find the same hash_link + block again and again. Or this is a read request for a block in eviction/free that does not require a flush, but waits for readers to finish with the block. We do not read this block to let the eviction/free happen as soon as possible. Again we must wait so that we don't find the same hash_link + block again and again. */ DBUG_ASSERT(hash_link->requests); hash_link->requests--; KEYCACHE_DBUG_PRINT("find_key_block", ("request waiting for old page to be saved")); wait_on_queue(&block->wqueue[COND_FOR_SAVED], &keycache->cache_lock); KEYCACHE_DBUG_PRINT("find_key_block", ("request for old page resubmitted")); /* The block is no longer assigned to this hash_link. Get another one. */ goto restart; } } else { /* This is a request for a new block or for a block not to be removed. Either - block == NULL or - block not assigned to this hash_link or - block assigned but not yet read from file, or - block assigned with valid (changed or unchanged) data and - it will not be reassigned/freed. */ if (! block) { /* No block is assigned to the hash_link yet. */ if (keycache->blocks_unused) { if (keycache->free_block_list) { /* There is a block in the free list. */ block= keycache->free_block_list; keycache->free_block_list= block->next_used; block->next_used= NULL; } else { size_t block_mem_offset; /* There are some never used blocks, take first of them */ DBUG_ASSERT(keycache->blocks_used < (ulong) keycache->disk_blocks); block= &keycache->block_root[keycache->blocks_used]; block_mem_offset= ((size_t) keycache->blocks_used) * keycache->key_cache_block_size; block->buffer= ADD_TO_PTR(keycache->block_mem, block_mem_offset, uchar*); keycache->blocks_used++; DBUG_ASSERT(!block->next_used); } DBUG_ASSERT(!block->prev_used); DBUG_ASSERT(!block->next_changed); DBUG_ASSERT(!block->prev_changed); DBUG_ASSERT(!block->hash_link); DBUG_ASSERT(!block->status); DBUG_ASSERT(!block->requests); keycache->blocks_unused--; block->status= BLOCK_IN_USE; block->length= 0; block->offset= keycache->key_cache_block_size; block->requests= 1; block->temperature= BLOCK_COLD; block->hits_left= init_hits_left; block->last_hit_time= 0; block->hash_link= hash_link; hash_link->block= block; link_to_file_list(keycache, block, file, 0); page_status= PAGE_TO_BE_READ; KEYCACHE_DBUG_PRINT("find_key_block", ("got free or never used block %u", BLOCK_NUMBER(block))); } else { /* There are no free blocks and no never used blocks, use a block from the LRU ring. */ if (! keycache->used_last) { /* The LRU ring is empty. Wait until a new block is added to it. Several threads might wait here for the same hash_link, all of them must get the same block. While waiting for a block, after a block is selected for this hash_link, other threads can run first before this one awakes. During this time interval other threads find this hash_link pointing to the block, which is still assigned to another hash_link. In this case the block is not marked BLOCK_IN_SWITCH yet, but it is marked BLOCK_IN_EVICTION. */ struct st_my_thread_var *thread= my_thread_var; thread->opt_info= (void *) hash_link; link_into_queue(&keycache->waiting_for_block, thread); do { KEYCACHE_DBUG_PRINT("find_key_block: wait", ("suspend thread %ld", thread->id)); keycache_pthread_cond_wait(&thread->suspend, &keycache->cache_lock); } while (thread->next); thread->opt_info= NULL; /* Assert that block has a request registered. */ DBUG_ASSERT(hash_link->block->requests); /* Assert that block is not in LRU ring. */ DBUG_ASSERT(!hash_link->block->next_used); DBUG_ASSERT(!hash_link->block->prev_used); } /* If we waited above, hash_link->block has been assigned by link_block(). Otherwise it is still NULL. In the latter case we need to grab a block from the LRU ring ourselves. */ block= hash_link->block; if (! block) { /* Select the last block from the LRU ring. */ block= keycache->used_last->next_used; block->hits_left= init_hits_left; block->last_hit_time= 0; hash_link->block= block; /* Register a request on the block. This unlinks it from the LRU ring and protects it against eviction. */ DBUG_ASSERT(!block->requests); reg_requests(keycache, block,1); /* We do not need to set block->status|= BLOCK_IN_EVICTION here because we will set block->status|= BLOCK_IN_SWITCH immediately without releasing the lock in between. This does also support debugging. When looking at the block, one can see if the block has been selected by link_block() after the LRU ring was empty, or if it was grabbed directly from the LRU ring in this branch. */ } /* If we had to wait above, there is a small chance that another thread grabbed this block for the same file block already. But in most cases the first condition is true. */ if (block->hash_link != hash_link && ! (block->status & BLOCK_IN_SWITCH) ) { /* this is a primary request for a new page */ block->status|= BLOCK_IN_SWITCH; KEYCACHE_DBUG_PRINT("find_key_block", ("got block %u for new page", BLOCK_NUMBER(block))); if (block->status & BLOCK_CHANGED) { /* The block contains a dirty page - push it out of the cache */ KEYCACHE_DBUG_PRINT("find_key_block", ("block is dirty")); if (block->status & BLOCK_IN_FLUSH) { /* The block is marked for flush. If we do not wait here, it could happen that we write the block, reassign it to another file block, then, before the new owner can read the new file block, the flusher writes the cache block (which still has the old contents) to the new file block! */ wait_on_queue(&block->wqueue[COND_FOR_SAVED], &keycache->cache_lock); /* The block is marked BLOCK_IN_SWITCH. It should be left alone except for reading. No free, no write. */ DBUG_ASSERT(block->status & (BLOCK_READ | BLOCK_IN_USE)); DBUG_ASSERT(!(block->status & (BLOCK_REASSIGNED | BLOCK_CHANGED | BLOCK_FOR_UPDATE))); } else { block->status|= BLOCK_IN_FLUSH | BLOCK_IN_FLUSHWRITE; /* BLOCK_IN_EVICTION may be true or not. Other flags must have a fixed value. */ DBUG_ASSERT((block->status & ~BLOCK_IN_EVICTION) == (BLOCK_READ | BLOCK_IN_SWITCH | BLOCK_IN_FLUSH | BLOCK_IN_FLUSHWRITE | BLOCK_CHANGED | BLOCK_IN_USE)); DBUG_ASSERT(block->hash_link); keycache_pthread_mutex_unlock(&keycache->cache_lock); /* The call is thread safe because only the current thread might change the block->hash_link value */ error= my_pwrite(block->hash_link->file, block->buffer + block->offset, block->length - block->offset, block->hash_link->diskpos + block->offset, MYF(MY_NABP | MY_WAIT_IF_FULL)); keycache_pthread_mutex_lock(&keycache->cache_lock); /* Block status must not have changed. */ DBUG_ASSERT((block->status & ~BLOCK_IN_EVICTION) == (BLOCK_READ | BLOCK_IN_SWITCH | BLOCK_IN_FLUSH | BLOCK_IN_FLUSHWRITE | BLOCK_CHANGED | BLOCK_IN_USE) || fail_block(block)); keycache->global_cache_write++; } } block->status|= BLOCK_REASSIGNED; /* The block comes from the LRU ring. It must have a hash_link assigned. */ DBUG_ASSERT(block->hash_link); if (block->hash_link) { /* All pending requests for this page must be resubmitted. This must be done before waiting for readers. They could wait for the flush to complete. And we must also do it after the wait. Flushers might try to free the block while we wait. They would wait until the reassignment is complete. Also the block status must reflect the correct situation: The block is not changed nor in flush any more. Note that we must not change the BLOCK_CHANGED flag outside of link_to_file_list() so that it is always in the correct queue and the *blocks_changed counters are correct. */ block->status&= ~(BLOCK_IN_FLUSH | BLOCK_IN_FLUSHWRITE); link_to_file_list(keycache, block, block->hash_link->file, 1); release_whole_queue(&block->wqueue[COND_FOR_SAVED]); /* The block is still assigned to its old hash_link. Wait until all pending read requests for this page are executed (we could have avoided this waiting, if we had read a page in the cache in a sweep, without yielding control) */ wait_for_readers(keycache, block); DBUG_ASSERT(block->hash_link && block->hash_link->block == block && block->prev_changed); /* The reader must not have been a writer. */ DBUG_ASSERT(!(block->status & BLOCK_CHANGED)); /* Wake flushers that might have found the block in between. */ release_whole_queue(&block->wqueue[COND_FOR_SAVED]); /* Remove the hash link for the old file block from the hash. */ unlink_hash(keycache, block->hash_link); /* For sanity checks link_to_file_list() asserts that block and hash_link refer to each other. Hence we need to assign the hash_link first, but then we would not know if it was linked before. Hence we would not know if to unlink it. So unlink it here and call link_to_file_list(..., FALSE). */ unlink_changed(block); } block->status= error ? BLOCK_ERROR : BLOCK_IN_USE ; block->length= 0; block->offset= keycache->key_cache_block_size; block->hash_link= hash_link; link_to_file_list(keycache, block, file, 0); page_status= PAGE_TO_BE_READ; KEYCACHE_DBUG_ASSERT(block->hash_link->block == block); KEYCACHE_DBUG_ASSERT(hash_link->block->hash_link == hash_link); } else { /* Either (block->hash_link == hash_link), or (block->status & BLOCK_IN_SWITCH). This is for secondary requests for a new file block only. Either it is already assigned to the new hash_link meanwhile (if we had to wait due to empty LRU), or it is already in eviction by another thread. Since this block has been grabbed from the LRU ring and attached to this hash_link, another thread cannot grab the same block from the LRU ring anymore. If the block is in eviction already, it must become attached to the same hash_link and as such destined for the same file block. */ KEYCACHE_DBUG_PRINT("find_key_block", ("block->hash_link: %p hash_link: %p " "block->status: %u", block->hash_link, hash_link, block->status )); page_status= (((block->hash_link == hash_link) && (block->status & BLOCK_READ)) ? PAGE_READ : PAGE_WAIT_TO_BE_READ); } } } else { /* Block is not NULL. This hash_link points to a block. Either - block not assigned to this hash_link (yet) or - block assigned but not yet read from file, or - block assigned with valid (changed or unchanged) data and - it will not be reassigned/freed. The first condition means hash_link points to a block in eviction. This is not necessarily marked by BLOCK_IN_SWITCH yet. But then it is marked BLOCK_IN_EVICTION. See the NOTE in link_block(). In both cases it is destined for this hash_link and its file block address. When this hash_link got its block address, the block was removed from the LRU ring and cannot be selected for eviction (for another hash_link) again. Register a request on the block. This is another protection against eviction. */ DBUG_ASSERT(((block->hash_link != hash_link) && (block->status & (BLOCK_IN_EVICTION | BLOCK_IN_SWITCH))) || ((block->hash_link == hash_link) && !(block->status & BLOCK_READ)) || ((block->status & BLOCK_READ) && !(block->status & (BLOCK_IN_EVICTION | BLOCK_IN_SWITCH)))); reg_requests(keycache, block, 1); KEYCACHE_DBUG_PRINT("find_key_block", ("block->hash_link: %p hash_link: %p " "block->status: %u", block->hash_link, hash_link, block->status )); page_status= (((block->hash_link == hash_link) && (block->status & BLOCK_READ)) ? PAGE_READ : PAGE_WAIT_TO_BE_READ); } } KEYCACHE_DBUG_ASSERT(page_status != -1); /* Same assert basically, but be very sure. */ KEYCACHE_DBUG_ASSERT(block); /* Assert that block has a request and is not in LRU ring. */ DBUG_ASSERT(block->requests); DBUG_ASSERT(!block->next_used); DBUG_ASSERT(!block->prev_used); /* Assert that we return the correct block. */ DBUG_ASSERT((page_status == PAGE_WAIT_TO_BE_READ) || ((block->hash_link->file == file) && (block->hash_link->diskpos == filepos))); *page_st=page_status; KEYCACHE_DBUG_PRINT("find_key_block", ("fd: %d pos: %lu block->status: %u page_status: %d", file, (ulong) filepos, block->status, page_status)); #if !defined(DBUG_OFF) && defined(EXTRA_DEBUG) DBUG_EXECUTE("check_keycache2", test_key_cache(keycache, "end of find_key_block",0);); #endif KEYCACHE_THREAD_TRACE("find_key_block:end"); DBUG_RETURN(block); } /* Read into a key cache block buffer from disk. SYNOPSIS read_block() keycache pointer to a key cache data structure block block to which buffer the data is to be read read_length size of data to be read min_length at least so much data must be read primary <-> the current thread will read the data RETURN VALUE None NOTES. The function either reads a page data from file to the block buffer, or waits until another thread reads it. What page to read is determined by a block parameter - reference to a hash link for this page. If an error occurs THE BLOCK_ERROR bit is set in the block status. We do not report error when the size of successfully read portion is less than read_length, but not less than min_length. */ static void read_block(KEY_CACHE *keycache, BLOCK_LINK *block, uint read_length, uint min_length, my_bool primary) { size_t got_length; /* On entry cache_lock is locked */ KEYCACHE_THREAD_TRACE("read_block"); if (primary) { /* This code is executed only by threads that submitted primary requests. Until block->status contains BLOCK_READ, all other request for the block become secondary requests. For a primary request the block must be properly initialized. */ DBUG_ASSERT(((block->status & ~BLOCK_FOR_UPDATE) == BLOCK_IN_USE) || fail_block(block)); DBUG_ASSERT((block->length == 0) || fail_block(block)); DBUG_ASSERT((block->offset == keycache->key_cache_block_size) || fail_block(block)); DBUG_ASSERT((block->requests > 0) || fail_block(block)); KEYCACHE_DBUG_PRINT("read_block", ("page to be read by primary request")); keycache->global_cache_read++; /* Page is not in buffer yet, is to be read from disk */ keycache_pthread_mutex_unlock(&keycache->cache_lock); /* Here other threads may step in and register as secondary readers. They will register in block->wqueue[COND_FOR_REQUESTED]. */ got_length= my_pread(block->hash_link->file, block->buffer, read_length, block->hash_link->diskpos, MYF(0)); keycache_pthread_mutex_lock(&keycache->cache_lock); /* The block can now have been marked for free (in case of FLUSH_RELEASE). Otherwise the state must be unchanged. */ DBUG_ASSERT(((block->status & ~(BLOCK_REASSIGNED | BLOCK_FOR_UPDATE)) == BLOCK_IN_USE) || fail_block(block)); DBUG_ASSERT((block->length == 0) || fail_block(block)); DBUG_ASSERT((block->offset == keycache->key_cache_block_size) || fail_block(block)); DBUG_ASSERT((block->requests > 0) || fail_block(block)); if (got_length < min_length) block->status|= BLOCK_ERROR; else { block->status|= BLOCK_READ; block->length= got_length; /* Do not set block->offset here. If this block is marked BLOCK_CHANGED later, we want to flush only the modified part. So only a writer may set block->offset down from keycache->key_cache_block_size. */ } KEYCACHE_DBUG_PRINT("read_block", ("primary request: new page in cache")); /* Signal that all pending requests for this page now can be processed */ release_whole_queue(&block->wqueue[COND_FOR_REQUESTED]); } else { /* This code is executed only by threads that submitted secondary requests. At this point it could happen that the cache block is not yet assigned to the hash_link for the requested file block. But at awake from the wait this should be the case. Unfortunately we cannot assert this here because we do not know the hash_link for the requested file block nor the file and position. So we have to assert this in the caller. */ KEYCACHE_DBUG_PRINT("read_block", ("secondary request waiting for new page to be read")); wait_on_queue(&block->wqueue[COND_FOR_REQUESTED], &keycache->cache_lock); KEYCACHE_DBUG_PRINT("read_block", ("secondary request: new page in cache")); } } /* Read a block of data from a cached file into a buffer; SYNOPSIS key_cache_read() keycache pointer to a key cache data structure file handler for the file for the block of data to be read filepos position of the block of data in the file level determines the weight of the data buff buffer to where the data must be placed length length of the buffer block_length length of the block in the key cache buffer return_buffer return pointer to the key cache buffer with the data RETURN VALUE Returns address from where the data is placed if sucessful, 0 - otherwise. NOTES. The function ensures that a block of data of size length from file positioned at filepos is in the buffers for some key cache blocks. Then the function either copies the data into the buffer buff, or, if return_buffer is TRUE, it just returns the pointer to the key cache buffer with the data. Filepos must be a multiple of 'block_length', but it doesn't have to be a multiple of key_cache_block_size; */ uchar *key_cache_read(KEY_CACHE *keycache, File file, my_off_t filepos, int level, uchar *buff, uint length, uint block_length __attribute__((unused)), int return_buffer __attribute__((unused))) { my_bool locked_and_incremented= FALSE; int error=0; uchar *start= buff; DBUG_ENTER("key_cache_read"); DBUG_PRINT("enter", ("fd: %u pos: %lu length: %u", (uint) file, (ulong) filepos, length)); if (keycache->key_cache_inited) { /* Key cache is used */ reg1 BLOCK_LINK *block; uint read_length; uint offset; int page_st; if (MYSQL_KEYCACHE_READ_START_ENABLED()) { MYSQL_KEYCACHE_READ_START(my_filename(file), length, (ulong) (keycache->blocks_used * keycache->key_cache_block_size), (ulong) (keycache->blocks_unused * keycache->key_cache_block_size)); } /* When the key cache is once initialized, we use the cache_lock to reliably distinguish the cases of normal operation, resizing, and disabled cache. We always increment and decrement 'cnt_for_resize_op' so that a resizer can wait for pending I/O. */ keycache_pthread_mutex_lock(&keycache->cache_lock); /* Cache resizing has two phases: Flushing and re-initializing. In the flush phase read requests are allowed to bypass the cache for blocks not in the cache. find_key_block() returns NULL in this case. After the flush phase new I/O requests must wait until the re-initialization is done. The re-initialization can be done only if no I/O request is in progress. The reason is that key_cache_block_size can change. With enabled cache, I/O is done in chunks of key_cache_block_size. Every chunk tries to use a cache block first. If the block size changes in the middle, a block could be missed and old data could be read. */ while (keycache->in_resize && !keycache->resize_in_flush) wait_on_queue(&keycache->resize_queue, &keycache->cache_lock); /* Register the I/O for the next resize. */ inc_counter_for_resize_op(keycache); locked_and_incremented= TRUE; /* Requested data may not always be aligned to cache blocks. */ offset= (uint) (filepos % keycache->key_cache_block_size); /* Read data in key_cache_block_size increments */ do { /* Cache could be disabled in a later iteration. */ if (!keycache->can_be_used) { KEYCACHE_DBUG_PRINT("key_cache_read", ("keycache cannot be used")); goto no_key_cache; } /* Start reading at the beginning of the cache block. */ filepos-= offset; /* Do not read beyond the end of the cache block. */ read_length= length; set_if_smaller(read_length, keycache->key_cache_block_size-offset); KEYCACHE_DBUG_ASSERT(read_length > 0); if (block_length > keycache->key_cache_block_size || offset) return_buffer=0; /* Request the cache block that matches file/pos. */ keycache->global_cache_r_requests++; MYSQL_KEYCACHE_READ_BLOCK(keycache->key_cache_block_size); block=find_key_block(keycache, file, filepos, level, 0, &page_st); if (!block) { /* This happens only for requests submitted during key cache resize. The block is not in the cache and shall not go in. Read directly from file. */ keycache->global_cache_read++; keycache_pthread_mutex_unlock(&keycache->cache_lock); error= (my_pread(file, (uchar*) buff, read_length, filepos + offset, MYF(MY_NABP)) != 0); keycache_pthread_mutex_lock(&keycache->cache_lock); goto next_block; } if (!(block->status & BLOCK_ERROR)) { if (page_st != PAGE_READ) { MYSQL_KEYCACHE_READ_MISS(); /* The requested page is to be read into the block buffer */ read_block(keycache, block, keycache->key_cache_block_size, read_length+offset, (my_bool)(page_st == PAGE_TO_BE_READ)); /* A secondary request must now have the block assigned to the requested file block. It does not hurt to check it for primary requests too. */ DBUG_ASSERT(keycache->can_be_used); DBUG_ASSERT(block->hash_link->file == file); DBUG_ASSERT(block->hash_link->diskpos == filepos); DBUG_ASSERT(block->status & (BLOCK_READ | BLOCK_IN_USE)); } else if (block->length < read_length + offset) { /* Impossible if nothing goes wrong: this could only happen if we are using a file with small key blocks and are trying to read outside the file */ my_errno= -1; block->status|= BLOCK_ERROR; } else { MYSQL_KEYCACHE_READ_HIT(); } } /* block status may have added BLOCK_ERROR in the above 'if'. */ if (!(block->status & BLOCK_ERROR)) { { DBUG_ASSERT(block->status & (BLOCK_READ | BLOCK_IN_USE)); #if !defined(SERIALIZED_READ_FROM_CACHE) keycache_pthread_mutex_unlock(&keycache->cache_lock); #endif /* Copy data from the cache buffer */ memcpy(buff, block->buffer+offset, (size_t) read_length); #if !defined(SERIALIZED_READ_FROM_CACHE) keycache_pthread_mutex_lock(&keycache->cache_lock); DBUG_ASSERT(block->status & (BLOCK_READ | BLOCK_IN_USE)); #endif } } remove_reader(block); /* Error injection for coverage testing. */ DBUG_EXECUTE_IF("key_cache_read_block_error", block->status|= BLOCK_ERROR;); /* Do not link erroneous blocks into the LRU ring, but free them. */ if (!(block->status & BLOCK_ERROR)) { /* Link the block into the LRU ring if it's the last submitted request for the block. This enables eviction for the block. */ unreg_request(keycache, block, 1); } else { free_block(keycache, block); error= 1; break; } next_block: buff+= read_length; filepos+= read_length+offset; offset= 0; } while ((length-= read_length)); if (MYSQL_KEYCACHE_READ_DONE_ENABLED()) { MYSQL_KEYCACHE_READ_DONE((ulong) (keycache->blocks_used * keycache->key_cache_block_size), (ulong) (keycache->blocks_unused * keycache->key_cache_block_size)); } goto end; } KEYCACHE_DBUG_PRINT("key_cache_read", ("keycache not initialized")); no_key_cache: /* Key cache is not used */ keycache->global_cache_r_requests++; keycache->global_cache_read++; if (locked_and_incremented) keycache_pthread_mutex_unlock(&keycache->cache_lock); if (my_pread(file, (uchar*) buff, length, filepos, MYF(MY_NABP))) error= 1; if (locked_and_incremented) keycache_pthread_mutex_lock(&keycache->cache_lock); end: if (locked_and_incremented) { dec_counter_for_resize_op(keycache); keycache_pthread_mutex_unlock(&keycache->cache_lock); } DBUG_PRINT("exit", ("error: %d", error )); DBUG_RETURN(error ? (uchar*) 0 : start); } /* Insert a block of file data from a buffer into key cache SYNOPSIS key_cache_insert() keycache pointer to a key cache data structure file handler for the file to insert data from filepos position of the block of data in the file to insert level determines the weight of the data buff buffer to read data from length length of the data in the buffer NOTES This is used by MyISAM to move all blocks from a index file to the key cache RETURN VALUE 0 if a success, 1 - otherwise. */ int key_cache_insert(KEY_CACHE *keycache, File file, my_off_t filepos, int level, uchar *buff, uint length) { int error= 0; DBUG_ENTER("key_cache_insert"); DBUG_PRINT("enter", ("fd: %u pos: %lu length: %u", (uint) file,(ulong) filepos, length)); if (keycache->key_cache_inited) { /* Key cache is used */ reg1 BLOCK_LINK *block; uint read_length; uint offset; int page_st; my_bool locked_and_incremented= FALSE; /* When the keycache is once initialized, we use the cache_lock to reliably distinguish the cases of normal operation, resizing, and disabled cache. We always increment and decrement 'cnt_for_resize_op' so that a resizer can wait for pending I/O. */ keycache_pthread_mutex_lock(&keycache->cache_lock); /* We do not load index data into a disabled cache nor into an ongoing resize. */ if (!keycache->can_be_used || keycache->in_resize) goto no_key_cache; /* Register the pseudo I/O for the next resize. */ inc_counter_for_resize_op(keycache); locked_and_incremented= TRUE; /* Loaded data may not always be aligned to cache blocks. */ offset= (uint) (filepos % keycache->key_cache_block_size); /* Load data in key_cache_block_size increments. */ do { /* Cache could be disabled or resizing in a later iteration. */ if (!keycache->can_be_used || keycache->in_resize) goto no_key_cache; /* Start loading at the beginning of the cache block. */ filepos-= offset; /* Do not load beyond the end of the cache block. */ read_length= length; set_if_smaller(read_length, keycache->key_cache_block_size-offset); KEYCACHE_DBUG_ASSERT(read_length > 0); /* The block has been read by the caller already. */ keycache->global_cache_read++; /* Request the cache block that matches file/pos. */ keycache->global_cache_r_requests++; block= find_key_block(keycache, file, filepos, level, 0, &page_st); if (!block) { /* This happens only for requests submitted during key cache resize. The block is not in the cache and shall not go in. Stop loading index data. */ goto no_key_cache; } if (!(block->status & BLOCK_ERROR)) { if ((page_st == PAGE_WAIT_TO_BE_READ) || ((page_st == PAGE_TO_BE_READ) && (offset || (read_length < keycache->key_cache_block_size)))) { /* Either this is a secondary request for a block to be read into the cache. The block is in eviction. It is not yet assigned to the requested file block (It does not point to the right hash_link). So we cannot call remove_reader() on the block. And we cannot access the hash_link directly here. We need to wait until the assignment is complete. read_block() executes the correct wait when called with primary == FALSE. Or this is a primary request for a block to be read into the cache and the supplied data does not fill the whole block. This function is called on behalf of a LOAD INDEX INTO CACHE statement, which is a read-only task and allows other readers. It is possible that a parallel running reader tries to access this block. If it needs more data than has been supplied here, it would report an error. To be sure that we have all data in the block that is available in the file, we read the block ourselves. Though reading again what the caller did read already is an expensive operation, we need to do this for correctness. */ read_block(keycache, block, keycache->key_cache_block_size, read_length + offset, (page_st == PAGE_TO_BE_READ)); /* A secondary request must now have the block assigned to the requested file block. It does not hurt to check it for primary requests too. */ DBUG_ASSERT(keycache->can_be_used); DBUG_ASSERT(block->hash_link->file == file); DBUG_ASSERT(block->hash_link->diskpos == filepos); DBUG_ASSERT(block->status & (BLOCK_READ | BLOCK_IN_USE)); } else if (page_st == PAGE_TO_BE_READ) { /* This is a new block in the cache. If we come here, we have data for the whole block. */ DBUG_ASSERT(block->hash_link->requests); DBUG_ASSERT(block->status & BLOCK_IN_USE); DBUG_ASSERT((page_st == PAGE_TO_BE_READ) || (block->status & BLOCK_READ)); #if !defined(SERIALIZED_READ_FROM_CACHE) keycache_pthread_mutex_unlock(&keycache->cache_lock); /* Here other threads may step in and register as secondary readers. They will register in block->wqueue[COND_FOR_REQUESTED]. */ #endif /* Copy data from buff */ memcpy(block->buffer+offset, buff, (size_t) read_length); #if !defined(SERIALIZED_READ_FROM_CACHE) keycache_pthread_mutex_lock(&keycache->cache_lock); DBUG_ASSERT(block->status & BLOCK_IN_USE); DBUG_ASSERT((page_st == PAGE_TO_BE_READ) || (block->status & BLOCK_READ)); #endif /* After the data is in the buffer, we can declare the block valid. Now other threads do not need to register as secondary readers any more. They can immediately access the block. */ block->status|= BLOCK_READ; block->length= read_length+offset; /* Do not set block->offset here. If this block is marked BLOCK_CHANGED later, we want to flush only the modified part. So only a writer may set block->offset down from keycache->key_cache_block_size. */ KEYCACHE_DBUG_PRINT("key_cache_insert", ("primary request: new page in cache")); /* Signal all pending requests. */ release_whole_queue(&block->wqueue[COND_FOR_REQUESTED]); } else { /* page_st == PAGE_READ. The block is in the buffer. All data must already be present. Blocks are always read with all data available on file. Assert that the block does not have less contents than the preloader supplies. If the caller has data beyond block->length, it means that a file write has been done while this block was in cache and not extended with the new data. If the condition is met, we can simply ignore the block. */ DBUG_ASSERT((page_st == PAGE_READ) && (read_length + offset <= block->length)); } /* A secondary request must now have the block assigned to the requested file block. It does not hurt to check it for primary requests too. */ DBUG_ASSERT(block->hash_link->file == file); DBUG_ASSERT(block->hash_link->diskpos == filepos); DBUG_ASSERT(block->status & (BLOCK_READ | BLOCK_IN_USE)); } /* end of if (!(block->status & BLOCK_ERROR)) */ remove_reader(block); /* Error injection for coverage testing. */ DBUG_EXECUTE_IF("key_cache_insert_block_error", block->status|= BLOCK_ERROR; errno=EIO;); /* Do not link erroneous blocks into the LRU ring, but free them. */ if (!(block->status & BLOCK_ERROR)) { /* Link the block into the LRU ring if it's the last submitted request for the block. This enables eviction for the block. */ unreg_request(keycache, block, 1); } else { free_block(keycache, block); error= 1; break; } buff+= read_length; filepos+= read_length+offset; offset= 0; } while ((length-= read_length)); no_key_cache: if (locked_and_incremented) dec_counter_for_resize_op(keycache); keycache_pthread_mutex_unlock(&keycache->cache_lock); } DBUG_RETURN(error); } /* Write a buffer into a cached file. SYNOPSIS key_cache_write() keycache pointer to a key cache data structure file handler for the file to write data to filepos position in the file to write data to level determines the weight of the data buff buffer with the data length length of the buffer dont_write if is 0 then all dirty pages involved in writing should have been flushed from key cache RETURN VALUE 0 if a success, 1 - otherwise. NOTES. The function copies the data of size length from buff into buffers for key cache blocks that are assigned to contain the portion of the file starting with position filepos. It ensures that this data is flushed to the file if dont_write is FALSE. Filepos must be a multiple of 'block_length', but it doesn't have to be a multiple of key_cache_block_size; dont_write is always TRUE in the server (info->lock_type is never F_UNLCK). */ int key_cache_write(KEY_CACHE *keycache, File file, my_off_t filepos, int level, uchar *buff, uint length, uint block_length __attribute__((unused)), int dont_write) { my_bool locked_and_incremented= FALSE; int error=0; DBUG_ENTER("key_cache_write"); DBUG_PRINT("enter", ("fd: %u pos: %lu length: %u block_length: %u" " key_block_length: %u", (uint) file, (ulong) filepos, length, block_length, keycache ? keycache->key_cache_block_size : 0)); if (!dont_write) { /* purecov: begin inspected */ /* Not used in the server. */ /* Force writing from buff into disk. */ keycache->global_cache_w_requests++; keycache->global_cache_write++; if (my_pwrite(file, buff, length, filepos, MYF(MY_NABP | MY_WAIT_IF_FULL))) DBUG_RETURN(1); /* purecov: end */ } #if !defined(DBUG_OFF) && defined(EXTRA_DEBUG) DBUG_EXECUTE("check_keycache", test_key_cache(keycache, "start of key_cache_write", 1);); #endif if (keycache->key_cache_inited) { /* Key cache is used */ reg1 BLOCK_LINK *block; uint read_length; uint offset; int page_st; if (MYSQL_KEYCACHE_WRITE_START_ENABLED()) { MYSQL_KEYCACHE_WRITE_START(my_filename(file), length, (ulong) (keycache->blocks_used * keycache->key_cache_block_size), (ulong) (keycache->blocks_unused * keycache->key_cache_block_size)); } /* When the key cache is once initialized, we use the cache_lock to reliably distinguish the cases of normal operation, resizing, and disabled cache. We always increment and decrement 'cnt_for_resize_op' so that a resizer can wait for pending I/O. */ keycache_pthread_mutex_lock(&keycache->cache_lock); /* Cache resizing has two phases: Flushing and re-initializing. In the flush phase write requests can modify dirty blocks that are not yet in flush. Otherwise they are allowed to bypass the cache. find_key_block() returns NULL in both cases (clean blocks and non-cached blocks). After the flush phase new I/O requests must wait until the re-initialization is done. The re-initialization can be done only if no I/O request is in progress. The reason is that key_cache_block_size can change. With enabled cache I/O is done in chunks of key_cache_block_size. Every chunk tries to use a cache block first. If the block size changes in the middle, a block could be missed and data could be written below a cached block. */ while (keycache->in_resize && !keycache->resize_in_flush) wait_on_queue(&keycache->resize_queue, &keycache->cache_lock); /* Register the I/O for the next resize. */ inc_counter_for_resize_op(keycache); locked_and_incremented= TRUE; /* Requested data may not always be aligned to cache blocks. */ offset= (uint) (filepos % keycache->key_cache_block_size); /* Write data in key_cache_block_size increments. */ do { /* Cache could be disabled in a later iteration. */ if (!keycache->can_be_used) goto no_key_cache; MYSQL_KEYCACHE_WRITE_BLOCK(keycache->key_cache_block_size); /* Start writing at the beginning of the cache block. */ filepos-= offset; /* Do not write beyond the end of the cache block. */ read_length= length; set_if_smaller(read_length, keycache->key_cache_block_size-offset); KEYCACHE_DBUG_ASSERT(read_length > 0); /* Request the cache block that matches file/pos. */ keycache->global_cache_w_requests++; block= find_key_block(keycache, file, filepos, level, 1, &page_st); if (!block) { /* This happens only for requests submitted during key cache resize. The block is not in the cache and shall not go in. Write directly to file. */ if (dont_write) { /* Used in the server. */ keycache->global_cache_write++; keycache_pthread_mutex_unlock(&keycache->cache_lock); if (my_pwrite(file, (uchar*) buff, read_length, filepos + offset, MYF(MY_NABP | MY_WAIT_IF_FULL))) error=1; keycache_pthread_mutex_lock(&keycache->cache_lock); } goto next_block; } /* Prevent block from flushing and from being selected for to be freed. This must be set when we release the cache_lock. However, we must not set the status of the block before it is assigned to this file/pos. */ if (page_st != PAGE_WAIT_TO_BE_READ) block->status|= BLOCK_FOR_UPDATE; /* We must read the file block first if it is not yet in the cache and we do not replace all of its contents. In cases where the cache block is big enough to contain (parts of) index blocks of different indexes, our request can be secondary (PAGE_WAIT_TO_BE_READ). In this case another thread is reading the file block. If the read completes after us, it overwrites our new contents with the old contents. So we have to wait for the other thread to complete the read of this block. read_block() takes care for the wait. */ if (!(block->status & BLOCK_ERROR) && ((page_st == PAGE_TO_BE_READ && (offset || read_length < keycache->key_cache_block_size)) || (page_st == PAGE_WAIT_TO_BE_READ))) { read_block(keycache, block, offset + read_length >= keycache->key_cache_block_size? offset : keycache->key_cache_block_size, offset, (page_st == PAGE_TO_BE_READ)); DBUG_ASSERT(keycache->can_be_used); DBUG_ASSERT(block->status & (BLOCK_READ | BLOCK_IN_USE)); /* Prevent block from flushing and from being selected for to be freed. This must be set when we release the cache_lock. Here we set it in case we could not set it above. */ block->status|= BLOCK_FOR_UPDATE; } /* The block should always be assigned to the requested file block here. It need not be BLOCK_READ when overwriting the whole block. */ DBUG_ASSERT(block->hash_link->file == file); DBUG_ASSERT(block->hash_link->diskpos == filepos); DBUG_ASSERT(block->status & BLOCK_IN_USE); DBUG_ASSERT((page_st == PAGE_TO_BE_READ) || (block->status & BLOCK_READ)); /* The block to be written must not be marked BLOCK_REASSIGNED. Otherwise it could be freed in dirty state or reused without another flush during eviction. It must also not be in flush. Otherwise the old contens may have been flushed already and the flusher could clear BLOCK_CHANGED without flushing the new changes again. */ DBUG_ASSERT(!(block->status & BLOCK_REASSIGNED)); while (block->status & BLOCK_IN_FLUSHWRITE) { /* Another thread is flushing the block. It was dirty already. Wait until the block is flushed to file. Otherwise we could modify the buffer contents just while it is written to file. An unpredictable file block contents would be the result. While we wait, several things can happen to the block, including another flush. But the block cannot be reassigned to another hash_link until we release our request on it. */ wait_on_queue(&block->wqueue[COND_FOR_SAVED], &keycache->cache_lock); DBUG_ASSERT(keycache->can_be_used); DBUG_ASSERT(block->status & (BLOCK_READ | BLOCK_IN_USE)); /* Still must not be marked for free. */ DBUG_ASSERT(!(block->status & BLOCK_REASSIGNED)); DBUG_ASSERT(block->hash_link && (block->hash_link->block == block)); } /* We could perhaps release the cache_lock during access of the data like in the other functions. Locks outside of the key cache assure that readers and a writer do not access the same range of data. Parallel accesses should happen only if the cache block contains multiple index block(fragment)s. So different parts of the buffer would be read/written. An attempt to flush during memcpy() is prevented with BLOCK_FOR_UPDATE. */ if (!(block->status & BLOCK_ERROR)) { #if !defined(SERIALIZED_READ_FROM_CACHE) keycache_pthread_mutex_unlock(&keycache->cache_lock); #endif memcpy(block->buffer+offset, buff, (size_t) read_length); #if !defined(SERIALIZED_READ_FROM_CACHE) keycache_pthread_mutex_lock(&keycache->cache_lock); #endif } if (!dont_write) { /* Not used in the server. buff has been written to disk at start. */ if ((block->status & BLOCK_CHANGED) && (!offset && read_length >= keycache->key_cache_block_size)) link_to_file_list(keycache, block, block->hash_link->file, 1); } else if (! (block->status & BLOCK_CHANGED)) link_to_changed_list(keycache, block); block->status|=BLOCK_READ; /* Allow block to be selected for to be freed. Since it is marked BLOCK_CHANGED too, it won't be selected for to be freed without a flush. */ block->status&= ~BLOCK_FOR_UPDATE; set_if_smaller(block->offset, offset); set_if_bigger(block->length, read_length+offset); /* Threads may be waiting for the changes to be complete. */ release_whole_queue(&block->wqueue[COND_FOR_REQUESTED]); /* If only a part of the cache block is to be replaced, and the rest has been read from file, then the cache lock has been released for I/O and it could be possible that another thread wants to evict or free the block and waits for it to be released. So we must not just decrement hash_link->requests, but also wake a waiting thread. */ remove_reader(block); /* Error injection for coverage testing. */ DBUG_EXECUTE_IF("key_cache_write_block_error", block->status|= BLOCK_ERROR;); /* Do not link erroneous blocks into the LRU ring, but free them. */ if (!(block->status & BLOCK_ERROR)) { /* Link the block into the LRU ring if it's the last submitted request for the block. This enables eviction for the block. */ unreg_request(keycache, block, 1); } else { /* Pretend a "clean" block to avoid complications. */ block->status&= ~(BLOCK_CHANGED); free_block(keycache, block); error= 1; break; } next_block: buff+= read_length; filepos+= read_length+offset; offset= 0; } while ((length-= read_length)); goto end; } no_key_cache: /* Key cache is not used */ if (dont_write) { /* Used in the server. */ keycache->global_cache_w_requests++; keycache->global_cache_write++; if (locked_and_incremented) keycache_pthread_mutex_unlock(&keycache->cache_lock); if (my_pwrite(file, (uchar*) buff, length, filepos, MYF(MY_NABP | MY_WAIT_IF_FULL))) error=1; if (locked_and_incremented) keycache_pthread_mutex_lock(&keycache->cache_lock); } end: if (locked_and_incremented) { dec_counter_for_resize_op(keycache); keycache_pthread_mutex_unlock(&keycache->cache_lock); } if (MYSQL_KEYCACHE_WRITE_DONE_ENABLED()) { MYSQL_KEYCACHE_WRITE_DONE((ulong) (keycache->blocks_used * keycache->key_cache_block_size), (ulong) (keycache->blocks_unused * keycache->key_cache_block_size)); } #if !defined(DBUG_OFF) && defined(EXTRA_DEBUG) DBUG_EXECUTE("exec", test_key_cache(keycache, "end of key_cache_write", 1);); #endif DBUG_RETURN(error); } /* Free block. SYNOPSIS free_block() keycache Pointer to a key cache data structure block Pointer to the block to free DESCRIPTION Remove reference to block from hash table. Remove block from the chain of clean blocks. Add block to the free list. NOTE Block must not be free (status == 0). Block must not be in free_block_list. Block must not be in the LRU ring. Block must not be in eviction (BLOCK_IN_EVICTION | BLOCK_IN_SWITCH). Block must not be in free (BLOCK_REASSIGNED). Block must not be in flush (BLOCK_IN_FLUSH). Block must not be dirty (BLOCK_CHANGED). Block must not be in changed_blocks (dirty) hash. Block must be in file_blocks (clean) hash. Block must refer to a hash_link. Block must have a request registered on it. */ static void free_block(KEY_CACHE *keycache, BLOCK_LINK *block) { KEYCACHE_THREAD_TRACE("free block"); KEYCACHE_DBUG_PRINT("free_block", ("block %u to be freed, hash_link %p status: %u", BLOCK_NUMBER(block), block->hash_link, block->status)); /* Assert that the block is not free already. And that it is in a clean state. Note that the block might just be assigned to a hash_link and not yet read (BLOCK_READ may not be set here). In this case a reader is registered in the hash_link and free_block() will wait for it below. */ DBUG_ASSERT((block->status & BLOCK_IN_USE) && !(block->status & (BLOCK_IN_EVICTION | BLOCK_IN_SWITCH | BLOCK_REASSIGNED | BLOCK_IN_FLUSH | BLOCK_CHANGED | BLOCK_FOR_UPDATE))); /* Assert that the block is in a file_blocks chain. */ DBUG_ASSERT(block->prev_changed && *block->prev_changed == block); /* Assert that the block is not in the LRU ring. */ DBUG_ASSERT(!block->next_used && !block->prev_used); /* IMHO the below condition (if()) makes no sense. I can't see how it could be possible that free_block() is entered with a NULL hash_link pointer. The only place where it can become NULL is in free_block() (or before its first use ever, but for those blocks free_block() is not called). I don't remove the conditional as it cannot harm, but place an DBUG_ASSERT to confirm my hypothesis. Eventually the condition (if()) can be removed. */ DBUG_ASSERT(block->hash_link && block->hash_link->block == block); if (block->hash_link) { /* While waiting for readers to finish, new readers might request the block. But since we set block->status|= BLOCK_REASSIGNED, they will wait on block->wqueue[COND_FOR_SAVED]. They must be signalled later. */ block->status|= BLOCK_REASSIGNED; wait_for_readers(keycache, block); /* The block must not have been freed by another thread. Repeat some checks. An additional requirement is that it must be read now (BLOCK_READ). */ DBUG_ASSERT(block->hash_link && block->hash_link->block == block); DBUG_ASSERT((block->status & (BLOCK_READ | BLOCK_IN_USE | BLOCK_REASSIGNED)) && !(block->status & (BLOCK_IN_EVICTION | BLOCK_IN_SWITCH | BLOCK_IN_FLUSH | BLOCK_CHANGED | BLOCK_FOR_UPDATE))); DBUG_ASSERT(block->prev_changed && *block->prev_changed == block); DBUG_ASSERT(!block->prev_used); /* Unset BLOCK_REASSIGNED again. If we hand the block to an evicting thread (through unreg_request() below), other threads must not see this flag. They could become confused. */ block->status&= ~BLOCK_REASSIGNED; /* Do not release the hash_link until the block is off all lists. At least not if we hand it over for eviction in unreg_request(). */ } /* Unregister the block request and link the block into the LRU ring. This enables eviction for the block. If the LRU ring was empty and threads are waiting for a block, then the block wil be handed over for eviction immediately. Otherwise we will unlink it from the LRU ring again, without releasing the lock in between. So decrementing the request counter and updating statistics are the only relevant operation in this case. Assert that there are no other requests registered. */ DBUG_ASSERT(block->requests == 1); unreg_request(keycache, block, 0); /* Note that even without releasing the cache lock it is possible that the block is immediately selected for eviction by link_block() and thus not added to the LRU ring. In this case we must not touch the block any more. */ if (block->status & BLOCK_IN_EVICTION) return; /* Error blocks are not put into the LRU ring. */ if (!(block->status & BLOCK_ERROR)) { /* Here the block must be in the LRU ring. Unlink it again. */ DBUG_ASSERT(block->next_used && block->prev_used && *block->prev_used == block); unlink_block(keycache, block); } if (block->temperature == BLOCK_WARM) keycache->warm_blocks--; block->temperature= BLOCK_COLD; /* Remove from file_blocks hash. */ unlink_changed(block); /* Remove reference to block from hash table. */ unlink_hash(keycache, block->hash_link); block->hash_link= NULL; block->status= 0; block->length= 0; block->offset= keycache->key_cache_block_size; KEYCACHE_THREAD_TRACE("free block"); KEYCACHE_DBUG_PRINT("free_block", ("block is freed")); /* Enforced by unlink_changed(), but just to be sure. */ DBUG_ASSERT(!block->next_changed && !block->prev_changed); /* Enforced by unlink_block(): not in LRU ring nor in free_block_list. */ DBUG_ASSERT(!block->next_used && !block->prev_used); /* Insert the free block in the free list. */ block->next_used= keycache->free_block_list; keycache->free_block_list= block; /* Keep track of the number of currently unused blocks. */ keycache->blocks_unused++; /* All pending requests for this page must be resubmitted. */ release_whole_queue(&block->wqueue[COND_FOR_SAVED]); } static int cmp_sec_link(BLOCK_LINK **a, BLOCK_LINK **b) { return (((*a)->hash_link->diskpos < (*b)->hash_link->diskpos) ? -1 : ((*a)->hash_link->diskpos > (*b)->hash_link->diskpos) ? 1 : 0); } /* Flush a portion of changed blocks to disk, free used blocks if requested */ static int flush_cached_blocks(KEY_CACHE *keycache, File file, BLOCK_LINK **cache, BLOCK_LINK **end, enum flush_type type) { int error; int last_errno= 0; uint count= (uint) (end-cache); /* Don't lock the cache during the flush */ keycache_pthread_mutex_unlock(&keycache->cache_lock); /* As all blocks referred in 'cache' are marked by BLOCK_IN_FLUSH we are guarunteed no thread will change them */ my_qsort((uchar*) cache, count, sizeof(*cache), (qsort_cmp) cmp_sec_link); keycache_pthread_mutex_lock(&keycache->cache_lock); /* Note: Do not break the loop. We have registered a request on every block in 'cache'. These must be unregistered by free_block() or unreg_request(). */ for ( ; cache != end ; cache++) { BLOCK_LINK *block= *cache; KEYCACHE_DBUG_PRINT("flush_cached_blocks", ("block %u to be flushed", BLOCK_NUMBER(block))); /* If the block contents is going to be changed, we abandon the flush for this block. flush_key_blocks_int() will restart its search and handle the block properly. */ if (!(block->status & BLOCK_FOR_UPDATE)) { /* Blocks coming here must have a certain status. */ DBUG_ASSERT(block->hash_link); DBUG_ASSERT(block->hash_link->block == block); DBUG_ASSERT(block->hash_link->file == file); DBUG_ASSERT((block->status & ~BLOCK_IN_EVICTION) == (BLOCK_READ | BLOCK_IN_FLUSH | BLOCK_CHANGED | BLOCK_IN_USE)); block->status|= BLOCK_IN_FLUSHWRITE; keycache_pthread_mutex_unlock(&keycache->cache_lock); error= my_pwrite(file, block->buffer+block->offset, block->length - block->offset, block->hash_link->diskpos+ block->offset, MYF(MY_NABP | MY_WAIT_IF_FULL)); keycache_pthread_mutex_lock(&keycache->cache_lock); keycache->global_cache_write++; if (error) { block->status|= BLOCK_ERROR; if (!last_errno) last_errno= errno ? errno : -1; } block->status&= ~BLOCK_IN_FLUSHWRITE; /* Block must not have changed status except BLOCK_FOR_UPDATE. */ DBUG_ASSERT(block->hash_link); DBUG_ASSERT(block->hash_link->block == block); DBUG_ASSERT(block->hash_link->file == file); DBUG_ASSERT((block->status & ~(BLOCK_FOR_UPDATE | BLOCK_IN_EVICTION)) == (BLOCK_READ | BLOCK_IN_FLUSH | BLOCK_CHANGED | BLOCK_IN_USE)); /* Set correct status and link in right queue for free or later use. free_block() must not see BLOCK_CHANGED and it may need to wait for readers of the block. These should not see the block in the wrong hash. If not freeing the block, we need to have it in the right queue anyway. */ link_to_file_list(keycache, block, file, 1); } block->status&= ~BLOCK_IN_FLUSH; /* Let to proceed for possible waiting requests to write to the block page. It might happen only during an operation to resize the key cache. */ release_whole_queue(&block->wqueue[COND_FOR_SAVED]); /* type will never be FLUSH_IGNORE_CHANGED here */ if (!(type == FLUSH_KEEP || type == FLUSH_FORCE_WRITE) && !(block->status & (BLOCK_IN_EVICTION | BLOCK_IN_SWITCH | BLOCK_FOR_UPDATE))) { /* Note that a request has been registered against the block in flush_key_blocks_int(). */ free_block(keycache, block); } else { /* Link the block into the LRU ring if it's the last submitted request for the block. This enables eviction for the block. Note that a request has been registered against the block in flush_key_blocks_int(). */ unreg_request(keycache, block, 1); } } /* end of for ( ; cache != end ; cache++) */ return last_errno; } /* Flush all key blocks for a file to disk, but don't do any mutex locks. SYNOPSIS flush_key_blocks_int() keycache pointer to a key cache data structure file handler for the file to flush to flush_type type of the flush NOTES This function doesn't do any mutex locks because it needs to be called both from flush_key_blocks and flush_all_key_blocks (the later one does the mutex lock in the resize_key_cache() function). We do only care about changed blocks that exist when the function is entered. We do not guarantee that all changed blocks of the file are flushed if more blocks change while this function is running. RETURN 0 ok 1 error */ static int flush_key_blocks_int(KEY_CACHE *keycache, File file, enum flush_type type) { BLOCK_LINK *cache_buff[FLUSH_CACHE],**cache; int last_errno= 0; int last_errcnt= 0; DBUG_ENTER("flush_key_blocks_int"); DBUG_PRINT("enter",("file: %d blocks_used: %lu blocks_changed: %lu", file, keycache->blocks_used, keycache->blocks_changed)); #if !defined(DBUG_OFF) && defined(EXTRA_DEBUG) DBUG_EXECUTE("check_keycache", test_key_cache(keycache, "start of flush_key_blocks", 0);); #endif cache= cache_buff; if (keycache->disk_blocks > 0 && (!my_disable_flush_key_blocks || type != FLUSH_KEEP)) { /* Key cache exists and flush is not disabled */ int error= 0; uint count= FLUSH_CACHE; BLOCK_LINK **pos,**end; BLOCK_LINK *first_in_switch= NULL; BLOCK_LINK *last_in_flush; BLOCK_LINK *last_for_update; BLOCK_LINK *block, *next; #if defined(KEYCACHE_DEBUG) uint cnt=0; #endif if (type != FLUSH_IGNORE_CHANGED) { /* Count how many key blocks we have to cache to be able to flush all dirty pages with minimum seek moves */ count= 0; for (block= keycache->changed_blocks[FILE_HASH(file)] ; block ; block= block->next_changed) { if ((block->hash_link->file == file) && !(block->status & BLOCK_IN_FLUSH)) { count++; KEYCACHE_DBUG_ASSERT(count<= keycache->blocks_used); } } /* Allocate a new buffer only if its bigger than the one we have. Assure that we always have some entries for the case that new changed blocks appear while we need to wait for something. */ if ((count > FLUSH_CACHE) && !(cache= (BLOCK_LINK**) my_malloc(sizeof(BLOCK_LINK*)*count, MYF(0)))) cache= cache_buff; /* After a restart there could be more changed blocks than now. So we should not let count become smaller than the fixed buffer. */ if (cache == cache_buff) count= FLUSH_CACHE; } /* Retrieve the blocks and write them to a buffer to be flushed */ restart: last_in_flush= NULL; last_for_update= NULL; end= (pos= cache)+count; for (block= keycache->changed_blocks[FILE_HASH(file)] ; block ; block= next) { #if defined(KEYCACHE_DEBUG) cnt++; KEYCACHE_DBUG_ASSERT(cnt <= keycache->blocks_used); #endif next= block->next_changed; if (block->hash_link->file == file) { if (!(block->status & (BLOCK_IN_FLUSH | BLOCK_FOR_UPDATE))) { /* Note: The special handling of BLOCK_IN_SWITCH is obsolete since we set BLOCK_IN_FLUSH if the eviction includes a flush. It can be removed in a later version. */ if (!(block->status & BLOCK_IN_SWITCH)) { /* We care only for the blocks for which flushing was not initiated by another thread and which are not in eviction. Registering a request on the block unlinks it from the LRU ring and protects against eviction. */ reg_requests(keycache, block, 1); if (type != FLUSH_IGNORE_CHANGED) { /* It's not a temporary file */ if (pos == end) { /* This should happen relatively seldom. Remove the request because we won't do anything with the block but restart and pick it again in the next iteration. */ unreg_request(keycache, block, 0); /* This happens only if there is not enough memory for the big block */ if ((error= flush_cached_blocks(keycache, file, cache, end,type))) { /* Do not loop infinitely trying to flush in vain. */ if ((last_errno == error) && (++last_errcnt > 5)) goto err; last_errno= error; } /* Restart the scan as some other thread might have changed the changed blocks chain: the blocks that were in switch state before the flush started have to be excluded */ goto restart; } /* Mark the block with BLOCK_IN_FLUSH in order not to let other threads to use it for new pages and interfere with our sequence of flushing dirty file pages. We must not set this flag before actually putting the block on the write burst array called 'cache'. */ block->status|= BLOCK_IN_FLUSH; /* Add block to the array for a write burst. */ *pos++= block; } else { /* It's a temporary file */ DBUG_ASSERT(!(block->status & BLOCK_REASSIGNED)); /* free_block() must not be called with BLOCK_CHANGED. Note that we must not change the BLOCK_CHANGED flag outside of link_to_file_list() so that it is always in the correct queue and the *blocks_changed counters are correct. */ link_to_file_list(keycache, block, file, 1); if (!(block->status & (BLOCK_IN_EVICTION | BLOCK_IN_SWITCH))) { /* A request has been registered against the block above. */ free_block(keycache, block); } else { /* Link the block into the LRU ring if it's the last submitted request for the block. This enables eviction for the block. A request has been registered against the block above. */ unreg_request(keycache, block, 1); } } } else { /* Link the block into a list of blocks 'in switch'. WARNING: Here we introduce a place where a changed block is not in the changed_blocks hash! This is acceptable for a BLOCK_IN_SWITCH. Never try this for another situation. Other parts of the key cache code rely on changed blocks being in the changed_blocks hash. */ unlink_changed(block); link_changed(block, &first_in_switch); } } else if (type != FLUSH_KEEP) { /* During the normal flush at end of statement (FLUSH_KEEP) we do not need to ensure that blocks in flush or update by other threads are flushed. They will be flushed by them later. In all other cases we must assure that we do not have any changed block of this file in the cache when this function returns. */ if (block->status & BLOCK_IN_FLUSH) { /* Remember the last block found to be in flush. */ last_in_flush= block; } else { /* Remember the last block found to be selected for update. */ last_for_update= block; } } } } if (pos != cache) { if ((error= flush_cached_blocks(keycache, file, cache, pos, type))) { /* Do not loop inifnitely trying to flush in vain. */ if ((last_errno == error) && (++last_errcnt > 5)) goto err; last_errno= error; } /* Do not restart here during the normal flush at end of statement (FLUSH_KEEP). We have now flushed at least all blocks that were changed when entering this function. In all other cases we must assure that we do not have any changed block of this file in the cache when this function returns. */ if (type != FLUSH_KEEP) goto restart; } if (last_in_flush) { /* There are no blocks to be flushed by this thread, but blocks in flush by other threads. Wait until one of the blocks is flushed. Re-check the condition for last_in_flush. We may have unlocked the cache_lock in flush_cached_blocks(). The state of the block could have changed. */ if (last_in_flush->status & BLOCK_IN_FLUSH) wait_on_queue(&last_in_flush->wqueue[COND_FOR_SAVED], &keycache->cache_lock); /* Be sure not to lose a block. They may be flushed in random order. */ goto restart; } if (last_for_update) { /* There are no blocks to be flushed by this thread, but blocks for update by other threads. Wait until one of the blocks is updated. Re-check the condition for last_for_update. We may have unlocked the cache_lock in flush_cached_blocks(). The state of the block could have changed. */ if (last_for_update->status & BLOCK_FOR_UPDATE) wait_on_queue(&last_for_update->wqueue[COND_FOR_REQUESTED], &keycache->cache_lock); /* The block is now changed. Flush it. */ goto restart; } /* Wait until the list of blocks in switch is empty. The threads that are switching these blocks will relink them to clean file chains while we wait and thus empty the 'first_in_switch' chain. */ while (first_in_switch) { #if defined(KEYCACHE_DEBUG) cnt= 0; #endif wait_on_queue(&first_in_switch->wqueue[COND_FOR_SAVED], &keycache->cache_lock); #if defined(KEYCACHE_DEBUG) cnt++; KEYCACHE_DBUG_ASSERT(cnt <= keycache->blocks_used); #endif /* Do not restart here. We have flushed all blocks that were changed when entering this function and were not marked for eviction. Other threads have now flushed all remaining blocks in the course of their eviction. */ } if (! (type == FLUSH_KEEP || type == FLUSH_FORCE_WRITE)) { BLOCK_LINK *last_for_update= NULL; BLOCK_LINK *last_in_switch= NULL; uint total_found= 0; uint found; /* Finally free all clean blocks for this file. During resize this may be run by two threads in parallel. */ do { found= 0; for (block= keycache->file_blocks[FILE_HASH(file)] ; block ; block= next) { /* Remember the next block. After freeing we cannot get at it. */ next= block->next_changed; /* Changed blocks cannot appear in the file_blocks hash. */ DBUG_ASSERT(!(block->status & BLOCK_CHANGED)); if (block->hash_link->file == file) { /* We must skip blocks that will be changed. */ if (block->status & BLOCK_FOR_UPDATE) { last_for_update= block; continue; } /* We must not free blocks in eviction (BLOCK_IN_EVICTION | BLOCK_IN_SWITCH) or blocks intended to be freed (BLOCK_REASSIGNED). */ if (!(block->status & (BLOCK_IN_EVICTION | BLOCK_IN_SWITCH | BLOCK_REASSIGNED))) { struct st_hash_link *UNINIT_VAR(next_hash_link); my_off_t UNINIT_VAR(next_diskpos); File UNINIT_VAR(next_file); uint UNINIT_VAR(next_status); uint UNINIT_VAR(hash_requests); total_found++; found++; KEYCACHE_DBUG_ASSERT(found <= keycache->blocks_used); /* Register a request. This unlinks the block from the LRU ring and protects it against eviction. This is required by free_block(). */ reg_requests(keycache, block, 1); /* free_block() may need to wait for readers of the block. This is the moment where the other thread can move the 'next' block from the chain. free_block() needs to wait if there are requests for the block pending. */ if (next && (hash_requests= block->hash_link->requests)) { /* Copy values from the 'next' block and its hash_link. */ next_status= next->status; next_hash_link= next->hash_link; next_diskpos= next_hash_link->diskpos; next_file= next_hash_link->file; DBUG_ASSERT(next == next_hash_link->block); } free_block(keycache, block); /* If we had to wait and the state of the 'next' block changed, break the inner loop. 'next' may no longer be part of the current chain. We do not want to break the loop after every free_block(), not even only after waits. The chain might be quite long and contain blocks for many files. Traversing it again and again to find more blocks for this file could become quite inefficient. */ if (next && hash_requests && ((next_status != next->status) || (next_hash_link != next->hash_link) || (next_file != next_hash_link->file) || (next_diskpos != next_hash_link->diskpos) || (next != next_hash_link->block))) break; } else { last_in_switch= block; } } } /* end for block in file_blocks */ } while (found); /* If any clean block has been found, we may have waited for it to become free. In this case it could be possible that another clean block became dirty. This is possible if the write request existed before the flush started (BLOCK_FOR_UPDATE). Re-check the hashes. */ if (total_found) goto restart; /* To avoid an infinite loop, wait until one of the blocks marked for update is updated. */ if (last_for_update) { /* We did not wait. Block must not have changed status. */ DBUG_ASSERT(last_for_update->status & BLOCK_FOR_UPDATE); wait_on_queue(&last_for_update->wqueue[COND_FOR_REQUESTED], &keycache->cache_lock); goto restart; } /* To avoid an infinite loop wait until one of the blocks marked for eviction is switched. */ if (last_in_switch) { /* We did not wait. Block must not have changed status. */ DBUG_ASSERT(last_in_switch->status & (BLOCK_IN_EVICTION | BLOCK_IN_SWITCH | BLOCK_REASSIGNED)); wait_on_queue(&last_in_switch->wqueue[COND_FOR_SAVED], &keycache->cache_lock); goto restart; } } /* if (! (type == FLUSH_KEEP || type == FLUSH_FORCE_WRITE)) */ } /* if (keycache->disk_blocks > 0 */ #ifndef DBUG_OFF DBUG_EXECUTE("check_keycache", test_key_cache(keycache, "end of flush_key_blocks", 0);); #endif err: if (cache != cache_buff) my_free(cache); if (last_errno) errno=last_errno; /* Return first error */ DBUG_RETURN(last_errno != 0); } /* Flush all blocks for a file to disk SYNOPSIS flush_key_blocks() keycache pointer to a key cache data structure file handler for the file to flush to flush_type type of the flush RETURN 0 ok 1 error */ int flush_key_blocks(KEY_CACHE *keycache, File file, enum flush_type type) { int res= 0; DBUG_ENTER("flush_key_blocks"); DBUG_PRINT("enter", ("keycache: 0x%lx", (long) keycache)); if (!keycache->key_cache_inited) DBUG_RETURN(0); keycache_pthread_mutex_lock(&keycache->cache_lock); /* While waiting for lock, keycache could have been ended. */ if (keycache->disk_blocks > 0) { inc_counter_for_resize_op(keycache); res= flush_key_blocks_int(keycache, file, type); dec_counter_for_resize_op(keycache); } keycache_pthread_mutex_unlock(&keycache->cache_lock); DBUG_RETURN(res); } /* Flush all blocks in the key cache to disk. SYNOPSIS flush_all_key_blocks() keycache pointer to key cache root structure DESCRIPTION Flushing of the whole key cache is done in two phases. 1. Flush all changed blocks, waiting for them if necessary. Loop until there is no changed block left in the cache. 2. Free all clean blocks. Normally this means free all blocks. The changed blocks were flushed in phase 1 and became clean. However we may need to wait for blocks that are read by other threads. While we wait, a clean block could become changed if that operation started before the resize operation started. To be safe we must restart at phase 1. When we can run through the changed_blocks and file_blocks hashes without finding a block any more, then we are done. Note that we hold keycache->cache_lock all the time unless we need to wait for something. RETURN 0 OK != 0 Error */ static int flush_all_key_blocks(KEY_CACHE *keycache) { BLOCK_LINK *block; uint total_found; uint found; uint idx; DBUG_ENTER("flush_all_key_blocks"); do { mysql_mutex_assert_owner(&keycache->cache_lock); total_found= 0; /* Phase1: Flush all changed blocks, waiting for them if necessary. Loop until there is no changed block left in the cache. */ do { found= 0; /* Step over the whole changed_blocks hash array. */ for (idx= 0; idx < CHANGED_BLOCKS_HASH; idx++) { /* If an array element is non-empty, use the first block from its chain to find a file for flush. All changed blocks for this file are flushed. So the same block will not appear at this place again with the next iteration. New writes for blocks are not accepted during the flush. If multiple files share the same hash bucket, one of them will be flushed per iteration of the outer loop of phase 1. */ if ((block= keycache->changed_blocks[idx])) { found++; /* Flush dirty blocks but do not free them yet. They can be used for reading until all other blocks are flushed too. */ if (flush_key_blocks_int(keycache, block->hash_link->file, FLUSH_FORCE_WRITE)) DBUG_RETURN(1); } } } while (found); /* Phase 2: Free all clean blocks. Normally this means free all blocks. The changed blocks were flushed in phase 1 and became clean. However we may need to wait for blocks that are read by other threads. While we wait, a clean block could become changed if that operation started before the resize operation started. To be safe we must restart at phase 1. */ do { found= 0; /* Step over the whole file_blocks hash array. */ for (idx= 0; idx < CHANGED_BLOCKS_HASH; idx++) { /* If an array element is non-empty, use the first block from its chain to find a file for flush. All blocks for this file are freed. So the same block will not appear at this place again with the next iteration. If multiple files share the same hash bucket, one of them will be flushed per iteration of the outer loop of phase 2. */ if ((block= keycache->file_blocks[idx])) { total_found++; found++; if (flush_key_blocks_int(keycache, block->hash_link->file, FLUSH_RELEASE)) DBUG_RETURN(1); } } } while (found); /* If any clean block has been found, we may have waited for it to become free. In this case it could be possible that another clean block became dirty. This is possible if the write request existed before the resize started (BLOCK_FOR_UPDATE). Re-check the hashes. */ } while (total_found); #ifndef DBUG_OFF /* Now there should not exist any block any more. */ for (idx= 0; idx < CHANGED_BLOCKS_HASH; idx++) { DBUG_ASSERT(!keycache->changed_blocks[idx]); DBUG_ASSERT(!keycache->file_blocks[idx]); } #endif DBUG_RETURN(0); } /* Reset the counters of a key cache. SYNOPSIS reset_key_cache_counters() name the name of a key cache key_cache pointer to the key kache to be reset DESCRIPTION This procedure is used by process_key_caches() to reset the counters of all currently used key caches, both the default one and the named ones. RETURN 0 on success (always because it can't fail) */ int reset_key_cache_counters(const char *name __attribute__((unused)), KEY_CACHE *key_cache) { DBUG_ENTER("reset_key_cache_counters"); if (!key_cache->key_cache_inited) { DBUG_PRINT("info", ("Key cache %s not initialized.", name)); DBUG_RETURN(0); } DBUG_PRINT("info", ("Resetting counters for key cache %s.", name)); key_cache->global_blocks_changed= 0; /* Key_blocks_not_flushed */ key_cache->global_cache_r_requests= 0; /* Key_read_requests */ key_cache->global_cache_read= 0; /* Key_reads */ key_cache->global_cache_w_requests= 0; /* Key_write_requests */ key_cache->global_cache_write= 0; /* Key_writes */ DBUG_RETURN(0); } #ifndef DBUG_OFF /* Test if disk-cache is ok */ static void test_key_cache(KEY_CACHE *keycache __attribute__((unused)), const char *where __attribute__((unused)), my_bool lock __attribute__((unused))) { /* TODO */ } #endif #if defined(KEYCACHE_TIMEOUT) #define KEYCACHE_DUMP_FILE "keycache_dump.txt" #define MAX_QUEUE_LEN 100 static void keycache_dump(KEY_CACHE *keycache) { FILE *keycache_dump_file=fopen(KEYCACHE_DUMP_FILE, "w"); struct st_my_thread_var *last; struct st_my_thread_var *thread; BLOCK_LINK *block; HASH_LINK *hash_link; KEYCACHE_PAGE *page; uint i; fprintf(keycache_dump_file, "thread:%u\n", thread->id); i=0; thread=last=waiting_for_hash_link.last_thread; fprintf(keycache_dump_file, "queue of threads waiting for hash link\n"); if (thread) do { thread=thread->next; page= (KEYCACHE_PAGE *) thread->opt_info; fprintf(keycache_dump_file, "thread:%u, (file,filepos)=(%u,%lu)\n", thread->id,(uint) page->file,(ulong) page->filepos); if (++i == MAX_QUEUE_LEN) break; } while (thread != last); i=0; thread=last=waiting_for_block.last_thread; fprintf(keycache_dump_file, "queue of threads waiting for block\n"); if (thread) do { thread=thread->next; hash_link= (HASH_LINK *) thread->opt_info; fprintf(keycache_dump_file, "thread:%u hash_link:%u (file,filepos)=(%u,%lu)\n", thread->id, (uint) HASH_LINK_NUMBER(hash_link), (uint) hash_link->file,(ulong) hash_link->diskpos); if (++i == MAX_QUEUE_LEN) break; } while (thread != last); for (i=0 ; i< keycache->blocks_used ; i++) { int j; block= &keycache->block_root[i]; hash_link= block->hash_link; fprintf(keycache_dump_file, "block:%u hash_link:%d status:%x #requests=%u waiting_for_readers:%d\n", i, (int) (hash_link ? HASH_LINK_NUMBER(hash_link) : -1), block->status, block->requests, block->condvar ? 1 : 0); for (j=0 ; j < 2; j++) { KEYCACHE_WQUEUE *wqueue=&block->wqueue[j]; thread= last= wqueue->last_thread; fprintf(keycache_dump_file, "queue #%d\n", j); if (thread) { do { thread=thread->next; fprintf(keycache_dump_file, "thread:%u\n", thread->id); if (++i == MAX_QUEUE_LEN) break; } while (thread != last); } } } fprintf(keycache_dump_file, "LRU chain:"); block= keycache= used_last; if (block) { do { block= block->next_used; fprintf(keycache_dump_file, "block:%u, ", BLOCK_NUMBER(block)); } while (block != keycache->used_last); } fprintf(keycache_dump_file, "\n"); fclose(keycache_dump_file); } #endif /* defined(KEYCACHE_TIMEOUT) */ #if defined(KEYCACHE_TIMEOUT) && !defined(__WIN__) static int keycache_pthread_cond_wait(mysql_cond_t *cond, mysql_mutex_t *mutex) { int rc; struct timeval now; /* time when we started waiting */ struct timespec timeout; /* timeout value for the wait function */ struct timezone tz; #if defined(KEYCACHE_DEBUG) int cnt=0; #endif /* Get current time */ gettimeofday(&now, &tz); /* Prepare timeout value */ timeout.tv_sec= now.tv_sec + KEYCACHE_TIMEOUT; /* timeval uses microseconds. timespec uses nanoseconds. 1 nanosecond = 1000 micro seconds */ timeout.tv_nsec= now.tv_usec * 1000; KEYCACHE_THREAD_TRACE_END("started waiting"); #if defined(KEYCACHE_DEBUG) cnt++; if (cnt % 100 == 0) fprintf(keycache_debug_log, "waiting...\n"); fflush(keycache_debug_log); #endif rc= mysql_cond_timedwait(cond, mutex, &timeout); KEYCACHE_THREAD_TRACE_BEGIN("finished waiting"); if (rc == ETIMEDOUT || rc == ETIME) { #if defined(KEYCACHE_DEBUG) fprintf(keycache_debug_log,"aborted by keycache timeout\n"); fclose(keycache_debug_log); abort(); #endif keycache_dump(); } #if defined(KEYCACHE_DEBUG) KEYCACHE_DBUG_ASSERT(rc != ETIMEDOUT); #else assert(rc != ETIMEDOUT); #endif return rc; } #else #if defined(KEYCACHE_DEBUG) static int keycache_pthread_cond_wait(mysql_cond_t *cond, mysql_mutex_t *mutex) { int rc; KEYCACHE_THREAD_TRACE_END("started waiting"); rc= mysql_cond_wait(cond, mutex); KEYCACHE_THREAD_TRACE_BEGIN("finished waiting"); return rc; } #endif #endif /* defined(KEYCACHE_TIMEOUT) && !defined(__WIN__) */ #if defined(KEYCACHE_DEBUG) static int keycache_pthread_mutex_lock(mysql_mutex_t *mutex) { int rc; rc= mysql_mutex_lock(mutex); KEYCACHE_THREAD_TRACE_BEGIN(""); return rc; } static void keycache_pthread_mutex_unlock(mysql_mutex_t *mutex) { KEYCACHE_THREAD_TRACE_END(""); mysql_mutex_unlock(mutex); } static int keycache_pthread_cond_signal(mysql_cond_t *cond) { int rc; KEYCACHE_THREAD_TRACE("signal"); rc= mysql_cond_signal(cond); return rc; } #if defined(KEYCACHE_DEBUG_LOG) static void keycache_debug_print(const char * fmt,...) { va_list args; va_start(args,fmt); if (keycache_debug_log) { (void) vfprintf(keycache_debug_log, fmt, args); (void) fputc('\n',keycache_debug_log); } va_end(args); } #endif /* defined(KEYCACHE_DEBUG_LOG) */ #if defined(KEYCACHE_DEBUG_LOG) void keycache_debug_log_close(void) { if (keycache_debug_log) fclose(keycache_debug_log); } #endif /* defined(KEYCACHE_DEBUG_LOG) */ #endif /* defined(KEYCACHE_DEBUG) */ #if !defined(DBUG_OFF) #define F_B_PRT(_f_, _v_) DBUG_PRINT("assert_fail", (_f_, _v_)) static int fail_block(BLOCK_LINK *block) { F_B_PRT("block->next_used: %lx\n", (ulong) block->next_used); F_B_PRT("block->prev_used: %lx\n", (ulong) block->prev_used); F_B_PRT("block->next_changed: %lx\n", (ulong) block->next_changed); F_B_PRT("block->prev_changed: %lx\n", (ulong) block->prev_changed); F_B_PRT("block->hash_link: %lx\n", (ulong) block->hash_link); F_B_PRT("block->status: %u\n", block->status); F_B_PRT("block->length: %u\n", block->length); F_B_PRT("block->offset: %u\n", block->offset); F_B_PRT("block->requests: %u\n", block->requests); F_B_PRT("block->temperature: %u\n", block->temperature); return 0; /* Let the assert fail. */ } static int fail_hlink(HASH_LINK *hlink) { F_B_PRT("hlink->next: %lx\n", (ulong) hlink->next); F_B_PRT("hlink->prev: %lx\n", (ulong) hlink->prev); F_B_PRT("hlink->block: %lx\n", (ulong) hlink->block); F_B_PRT("hlink->diskpos: %lu\n", (ulong) hlink->diskpos); F_B_PRT("hlink->file: %d\n", hlink->file); return 0; /* Let the assert fail. */ } static int cache_empty(KEY_CACHE *keycache) { int errcnt= 0; int idx; if (keycache->disk_blocks <= 0) return 1; for (idx= 0; idx < keycache->disk_blocks; idx++) { BLOCK_LINK *block= keycache->block_root + idx; if (block->status || block->requests || block->hash_link) { fprintf(stderr, "block index: %u\n", idx); fail_block(block); errcnt++; } } for (idx= 0; idx < keycache->hash_links; idx++) { HASH_LINK *hash_link= keycache->hash_link_root + idx; if (hash_link->requests || hash_link->block) { fprintf(stderr, "hash_link index: %u\n", idx); fail_hlink(hash_link); errcnt++; } } if (errcnt) { fprintf(stderr, "blocks: %d used: %lu\n", keycache->disk_blocks, keycache->blocks_used); fprintf(stderr, "hash_links: %d used: %d\n", keycache->hash_links, keycache->hash_links_used); fprintf(stderr, "\n"); } return !errcnt; } #endif
gpl-2.0
francegabb/mxu1130driver
drivers/net/ethernet/realtek/8139cp.c
656
56660
/* 8139cp.c: A Linux PCI Ethernet driver for the RealTek 8139C+ chips. */ /* Copyright 2001-2004 Jeff Garzik <jgarzik@pobox.com> Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com) [tg3.c] Copyright (C) 2000, 2001 David S. Miller (davem@redhat.com) [sungem.c] Copyright 2001 Manfred Spraul [natsemi.c] Copyright 1999-2001 by Donald Becker. [natsemi.c] Written 1997-2001 by Donald Becker. [8139too.c] Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>. [acenic.c] This software may be used and distributed according to the terms of the GNU General Public License (GPL), incorporated herein by reference. Drivers based on or derived from this code fall under the GPL and must retain the authorship, copyright and license notice. This file is not a complete program and may only be used when the entire operating system is licensed under the GPL. See the file COPYING in this distribution for more information. Contributors: Wake-on-LAN support - Felipe Damasio <felipewd@terra.com.br> PCI suspend/resume - Felipe Damasio <felipewd@terra.com.br> LinkChg interrupt - Felipe Damasio <felipewd@terra.com.br> TODO: * Test Tx checksumming thoroughly Low priority TODO: * Complete reset on PciErr * Consider Rx interrupt mitigation using TimerIntr * Investigate using skb->priority with h/w VLAN priority * Investigate using High Priority Tx Queue with skb->priority * Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error * Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error * Implement Tx software interrupt mitigation via Tx descriptor bit * The real minimum of CP_MIN_MTU is 4 bytes. However, for this to be supported, one must(?) turn on packet padding. * Support external MII transceivers (patch available) NOTES: * TX checksumming is considered experimental. It is off by default, use ethtool to turn it on. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define DRV_NAME "8139cp" #define DRV_VERSION "1.3" #define DRV_RELDATE "Mar 22, 2004" #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/compiler.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/ethtool.h> #include <linux/gfp.h> #include <linux/mii.h> #include <linux/if_vlan.h> #include <linux/crc32.h> #include <linux/in.h> #include <linux/ip.h> #include <linux/tcp.h> #include <linux/udp.h> #include <linux/cache.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/uaccess.h> /* These identify the driver base version and may not be removed. */ static char version[] = DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n"; MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>"); MODULE_DESCRIPTION("RealTek RTL-8139C+ series 10/100 PCI Ethernet driver"); MODULE_VERSION(DRV_VERSION); MODULE_LICENSE("GPL"); static int debug = -1; module_param(debug, int, 0); MODULE_PARM_DESC (debug, "8139cp: bitmapped message enable number"); /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). The RTL chips use a 64 element hash table based on the Ethernet CRC. */ static int multicast_filter_limit = 32; module_param(multicast_filter_limit, int, 0); MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses"); #define CP_DEF_MSG_ENABLE (NETIF_MSG_DRV | \ NETIF_MSG_PROBE | \ NETIF_MSG_LINK) #define CP_NUM_STATS 14 /* struct cp_dma_stats, plus one */ #define CP_STATS_SIZE 64 /* size in bytes of DMA stats block */ #define CP_REGS_SIZE (0xff + 1) #define CP_REGS_VER 1 /* version 1 */ #define CP_RX_RING_SIZE 64 #define CP_TX_RING_SIZE 64 #define CP_RING_BYTES \ ((sizeof(struct cp_desc) * CP_RX_RING_SIZE) + \ (sizeof(struct cp_desc) * CP_TX_RING_SIZE) + \ CP_STATS_SIZE) #define NEXT_TX(N) (((N) + 1) & (CP_TX_RING_SIZE - 1)) #define NEXT_RX(N) (((N) + 1) & (CP_RX_RING_SIZE - 1)) #define TX_BUFFS_AVAIL(CP) \ (((CP)->tx_tail <= (CP)->tx_head) ? \ (CP)->tx_tail + (CP_TX_RING_SIZE - 1) - (CP)->tx_head : \ (CP)->tx_tail - (CP)->tx_head - 1) #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/ #define CP_INTERNAL_PHY 32 /* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024, 7==end of packet. */ #define RX_FIFO_THRESH 5 /* Rx buffer level before first PCI xfer. */ #define RX_DMA_BURST 4 /* Maximum PCI burst, '4' is 256 */ #define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */ #define TX_EARLY_THRESH 256 /* Early Tx threshold, in bytes */ /* Time in jiffies before concluding the transmitter is hung. */ #define TX_TIMEOUT (6*HZ) /* hardware minimum and maximum for a single frame's data payload */ #define CP_MIN_MTU 60 /* TODO: allow lower, but pad */ #define CP_MAX_MTU 4096 enum { /* NIC register offsets */ MAC0 = 0x00, /* Ethernet hardware address. */ MAR0 = 0x08, /* Multicast filter. */ StatsAddr = 0x10, /* 64-bit start addr of 64-byte DMA stats blk */ TxRingAddr = 0x20, /* 64-bit start addr of Tx ring */ HiTxRingAddr = 0x28, /* 64-bit start addr of high priority Tx ring */ Cmd = 0x37, /* Command register */ IntrMask = 0x3C, /* Interrupt mask */ IntrStatus = 0x3E, /* Interrupt status */ TxConfig = 0x40, /* Tx configuration */ ChipVersion = 0x43, /* 8-bit chip version, inside TxConfig */ RxConfig = 0x44, /* Rx configuration */ RxMissed = 0x4C, /* 24 bits valid, write clears */ Cfg9346 = 0x50, /* EEPROM select/control; Cfg reg [un]lock */ Config1 = 0x52, /* Config1 */ Config3 = 0x59, /* Config3 */ Config4 = 0x5A, /* Config4 */ MultiIntr = 0x5C, /* Multiple interrupt select */ BasicModeCtrl = 0x62, /* MII BMCR */ BasicModeStatus = 0x64, /* MII BMSR */ NWayAdvert = 0x66, /* MII ADVERTISE */ NWayLPAR = 0x68, /* MII LPA */ NWayExpansion = 0x6A, /* MII Expansion */ Config5 = 0xD8, /* Config5 */ TxPoll = 0xD9, /* Tell chip to check Tx descriptors for work */ RxMaxSize = 0xDA, /* Max size of an Rx packet (8169 only) */ CpCmd = 0xE0, /* C+ Command register (C+ mode only) */ IntrMitigate = 0xE2, /* rx/tx interrupt mitigation control */ RxRingAddr = 0xE4, /* 64-bit start addr of Rx ring */ TxThresh = 0xEC, /* Early Tx threshold */ OldRxBufAddr = 0x30, /* DMA address of Rx ring buffer (C mode) */ OldTSD0 = 0x10, /* DMA address of first Tx desc (C mode) */ /* Tx and Rx status descriptors */ DescOwn = (1 << 31), /* Descriptor is owned by NIC */ RingEnd = (1 << 30), /* End of descriptor ring */ FirstFrag = (1 << 29), /* First segment of a packet */ LastFrag = (1 << 28), /* Final segment of a packet */ LargeSend = (1 << 27), /* TCP Large Send Offload (TSO) */ MSSShift = 16, /* MSS value position */ MSSMask = 0xfff, /* MSS value: 11 bits */ TxError = (1 << 23), /* Tx error summary */ RxError = (1 << 20), /* Rx error summary */ IPCS = (1 << 18), /* Calculate IP checksum */ UDPCS = (1 << 17), /* Calculate UDP/IP checksum */ TCPCS = (1 << 16), /* Calculate TCP/IP checksum */ TxVlanTag = (1 << 17), /* Add VLAN tag */ RxVlanTagged = (1 << 16), /* Rx VLAN tag available */ IPFail = (1 << 15), /* IP checksum failed */ UDPFail = (1 << 14), /* UDP/IP checksum failed */ TCPFail = (1 << 13), /* TCP/IP checksum failed */ NormalTxPoll = (1 << 6), /* One or more normal Tx packets to send */ PID1 = (1 << 17), /* 2 protocol id bits: 0==non-IP, */ PID0 = (1 << 16), /* 1==UDP/IP, 2==TCP/IP, 3==IP */ RxProtoTCP = 1, RxProtoUDP = 2, RxProtoIP = 3, TxFIFOUnder = (1 << 25), /* Tx FIFO underrun */ TxOWC = (1 << 22), /* Tx Out-of-window collision */ TxLinkFail = (1 << 21), /* Link failed during Tx of packet */ TxMaxCol = (1 << 20), /* Tx aborted due to excessive collisions */ TxColCntShift = 16, /* Shift, to get 4-bit Tx collision cnt */ TxColCntMask = 0x01 | 0x02 | 0x04 | 0x08, /* 4-bit collision count */ RxErrFrame = (1 << 27), /* Rx frame alignment error */ RxMcast = (1 << 26), /* Rx multicast packet rcv'd */ RxErrCRC = (1 << 18), /* Rx CRC error */ RxErrRunt = (1 << 19), /* Rx error, packet < 64 bytes */ RxErrLong = (1 << 21), /* Rx error, packet > 4096 bytes */ RxErrFIFO = (1 << 22), /* Rx error, FIFO overflowed, pkt bad */ /* StatsAddr register */ DumpStats = (1 << 3), /* Begin stats dump */ /* RxConfig register */ RxCfgFIFOShift = 13, /* Shift, to get Rx FIFO thresh value */ RxCfgDMAShift = 8, /* Shift, to get Rx Max DMA value */ AcceptErr = 0x20, /* Accept packets with CRC errors */ AcceptRunt = 0x10, /* Accept runt (<64 bytes) packets */ AcceptBroadcast = 0x08, /* Accept broadcast packets */ AcceptMulticast = 0x04, /* Accept multicast packets */ AcceptMyPhys = 0x02, /* Accept pkts with our MAC as dest */ AcceptAllPhys = 0x01, /* Accept all pkts w/ physical dest */ /* IntrMask / IntrStatus registers */ PciErr = (1 << 15), /* System error on the PCI bus */ TimerIntr = (1 << 14), /* Asserted when TCTR reaches TimerInt value */ LenChg = (1 << 13), /* Cable length change */ SWInt = (1 << 8), /* Software-requested interrupt */ TxEmpty = (1 << 7), /* No Tx descriptors available */ RxFIFOOvr = (1 << 6), /* Rx FIFO Overflow */ LinkChg = (1 << 5), /* Packet underrun, or link change */ RxEmpty = (1 << 4), /* No Rx descriptors available */ TxErr = (1 << 3), /* Tx error */ TxOK = (1 << 2), /* Tx packet sent */ RxErr = (1 << 1), /* Rx error */ RxOK = (1 << 0), /* Rx packet received */ IntrResvd = (1 << 10), /* reserved, according to RealTek engineers, but hardware likes to raise it */ IntrAll = PciErr | TimerIntr | LenChg | SWInt | TxEmpty | RxFIFOOvr | LinkChg | RxEmpty | TxErr | TxOK | RxErr | RxOK | IntrResvd, /* C mode command register */ CmdReset = (1 << 4), /* Enable to reset; self-clearing */ RxOn = (1 << 3), /* Rx mode enable */ TxOn = (1 << 2), /* Tx mode enable */ /* C+ mode command register */ RxVlanOn = (1 << 6), /* Rx VLAN de-tagging enable */ RxChkSum = (1 << 5), /* Rx checksum offload enable */ PCIDAC = (1 << 4), /* PCI Dual Address Cycle (64-bit PCI) */ PCIMulRW = (1 << 3), /* Enable PCI read/write multiple */ CpRxOn = (1 << 1), /* Rx mode enable */ CpTxOn = (1 << 0), /* Tx mode enable */ /* Cfg9436 EEPROM control register */ Cfg9346_Lock = 0x00, /* Lock ConfigX/MII register access */ Cfg9346_Unlock = 0xC0, /* Unlock ConfigX/MII register access */ /* TxConfig register */ IFG = (1 << 25) | (1 << 24), /* standard IEEE interframe gap */ TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */ /* Early Tx Threshold register */ TxThreshMask = 0x3f, /* Mask bits 5-0 */ TxThreshMax = 2048, /* Max early Tx threshold */ /* Config1 register */ DriverLoaded = (1 << 5), /* Software marker, driver is loaded */ LWACT = (1 << 4), /* LWAKE active mode */ PMEnable = (1 << 0), /* Enable various PM features of chip */ /* Config3 register */ PARMEnable = (1 << 6), /* Enable auto-loading of PHY parms */ MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */ LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */ /* Config4 register */ LWPTN = (1 << 1), /* LWAKE Pattern */ LWPME = (1 << 4), /* LANWAKE vs PMEB */ /* Config5 register */ BWF = (1 << 6), /* Accept Broadcast wakeup frame */ MWF = (1 << 5), /* Accept Multicast wakeup frame */ UWF = (1 << 4), /* Accept Unicast wakeup frame */ LANWake = (1 << 1), /* Enable LANWake signal */ PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */ cp_norx_intr_mask = PciErr | LinkChg | TxOK | TxErr | TxEmpty, cp_rx_intr_mask = RxOK | RxErr | RxEmpty | RxFIFOOvr, cp_intr_mask = cp_rx_intr_mask | cp_norx_intr_mask, }; static const unsigned int cp_rx_config = (RX_FIFO_THRESH << RxCfgFIFOShift) | (RX_DMA_BURST << RxCfgDMAShift); struct cp_desc { __le32 opts1; __le32 opts2; __le64 addr; }; struct cp_dma_stats { __le64 tx_ok; __le64 rx_ok; __le64 tx_err; __le32 rx_err; __le16 rx_fifo; __le16 frame_align; __le32 tx_ok_1col; __le32 tx_ok_mcol; __le64 rx_ok_phys; __le64 rx_ok_bcast; __le32 rx_ok_mcast; __le16 tx_abort; __le16 tx_underrun; } __packed; struct cp_extra_stats { unsigned long rx_frags; }; struct cp_private { void __iomem *regs; struct net_device *dev; spinlock_t lock; u32 msg_enable; struct napi_struct napi; struct pci_dev *pdev; u32 rx_config; u16 cpcmd; struct cp_extra_stats cp_stats; unsigned rx_head ____cacheline_aligned; unsigned rx_tail; struct cp_desc *rx_ring; struct sk_buff *rx_skb[CP_RX_RING_SIZE]; unsigned tx_head ____cacheline_aligned; unsigned tx_tail; struct cp_desc *tx_ring; struct sk_buff *tx_skb[CP_TX_RING_SIZE]; unsigned rx_buf_sz; unsigned wol_enabled : 1; /* Is Wake-on-LAN enabled? */ dma_addr_t ring_dma; struct mii_if_info mii_if; }; #define cpr8(reg) readb(cp->regs + (reg)) #define cpr16(reg) readw(cp->regs + (reg)) #define cpr32(reg) readl(cp->regs + (reg)) #define cpw8(reg,val) writeb((val), cp->regs + (reg)) #define cpw16(reg,val) writew((val), cp->regs + (reg)) #define cpw32(reg,val) writel((val), cp->regs + (reg)) #define cpw8_f(reg,val) do { \ writeb((val), cp->regs + (reg)); \ readb(cp->regs + (reg)); \ } while (0) #define cpw16_f(reg,val) do { \ writew((val), cp->regs + (reg)); \ readw(cp->regs + (reg)); \ } while (0) #define cpw32_f(reg,val) do { \ writel((val), cp->regs + (reg)); \ readl(cp->regs + (reg)); \ } while (0) static void __cp_set_rx_mode (struct net_device *dev); static void cp_tx (struct cp_private *cp); static void cp_clean_rings (struct cp_private *cp); #ifdef CONFIG_NET_POLL_CONTROLLER static void cp_poll_controller(struct net_device *dev); #endif static int cp_get_eeprom_len(struct net_device *dev); static int cp_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data); static int cp_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data); static struct { const char str[ETH_GSTRING_LEN]; } ethtool_stats_keys[] = { { "tx_ok" }, { "rx_ok" }, { "tx_err" }, { "rx_err" }, { "rx_fifo" }, { "frame_align" }, { "tx_ok_1col" }, { "tx_ok_mcol" }, { "rx_ok_phys" }, { "rx_ok_bcast" }, { "rx_ok_mcast" }, { "tx_abort" }, { "tx_underrun" }, { "rx_frags" }, }; static inline void cp_set_rxbufsize (struct cp_private *cp) { unsigned int mtu = cp->dev->mtu; if (mtu > ETH_DATA_LEN) /* MTU + ethernet header + FCS + optional VLAN tag */ cp->rx_buf_sz = mtu + ETH_HLEN + 8; else cp->rx_buf_sz = PKT_BUF_SZ; } static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb, struct cp_desc *desc) { u32 opts2 = le32_to_cpu(desc->opts2); skb->protocol = eth_type_trans (skb, cp->dev); cp->dev->stats.rx_packets++; cp->dev->stats.rx_bytes += skb->len; if (opts2 & RxVlanTagged) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff)); napi_gro_receive(&cp->napi, skb); } static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail, u32 status, u32 len) { netif_dbg(cp, rx_err, cp->dev, "rx err, slot %d status 0x%x len %d\n", rx_tail, status, len); cp->dev->stats.rx_errors++; if (status & RxErrFrame) cp->dev->stats.rx_frame_errors++; if (status & RxErrCRC) cp->dev->stats.rx_crc_errors++; if ((status & RxErrRunt) || (status & RxErrLong)) cp->dev->stats.rx_length_errors++; if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) cp->dev->stats.rx_length_errors++; if (status & RxErrFIFO) cp->dev->stats.rx_fifo_errors++; } static inline unsigned int cp_rx_csum_ok (u32 status) { unsigned int protocol = (status >> 16) & 0x3; if (((protocol == RxProtoTCP) && !(status & TCPFail)) || ((protocol == RxProtoUDP) && !(status & UDPFail))) return 1; else return 0; } static int cp_rx_poll(struct napi_struct *napi, int budget) { struct cp_private *cp = container_of(napi, struct cp_private, napi); struct net_device *dev = cp->dev; unsigned int rx_tail = cp->rx_tail; int rx; rx_status_loop: rx = 0; cpw16(IntrStatus, cp_rx_intr_mask); while (rx < budget) { u32 status, len; dma_addr_t mapping, new_mapping; struct sk_buff *skb, *new_skb; struct cp_desc *desc; const unsigned buflen = cp->rx_buf_sz; skb = cp->rx_skb[rx_tail]; BUG_ON(!skb); desc = &cp->rx_ring[rx_tail]; status = le32_to_cpu(desc->opts1); if (status & DescOwn) break; len = (status & 0x1fff) - 4; mapping = le64_to_cpu(desc->addr); if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) { /* we don't support incoming fragmented frames. * instead, we attempt to ensure that the * pre-allocated RX skbs are properly sized such * that RX fragments are never encountered */ cp_rx_err_acct(cp, rx_tail, status, len); dev->stats.rx_dropped++; cp->cp_stats.rx_frags++; goto rx_next; } if (status & (RxError | RxErrFIFO)) { cp_rx_err_acct(cp, rx_tail, status, len); goto rx_next; } netif_dbg(cp, rx_status, dev, "rx slot %d status 0x%x len %d\n", rx_tail, status, len); new_skb = napi_alloc_skb(napi, buflen); if (!new_skb) { dev->stats.rx_dropped++; goto rx_next; } new_mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen, PCI_DMA_FROMDEVICE); if (dma_mapping_error(&cp->pdev->dev, new_mapping)) { dev->stats.rx_dropped++; kfree_skb(new_skb); goto rx_next; } dma_unmap_single(&cp->pdev->dev, mapping, buflen, PCI_DMA_FROMDEVICE); /* Handle checksum offloading for incoming packets. */ if (cp_rx_csum_ok(status)) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb_checksum_none_assert(skb); skb_put(skb, len); cp->rx_skb[rx_tail] = new_skb; cp_rx_skb(cp, skb, desc); rx++; mapping = new_mapping; rx_next: cp->rx_ring[rx_tail].opts2 = 0; cp->rx_ring[rx_tail].addr = cpu_to_le64(mapping); if (rx_tail == (CP_RX_RING_SIZE - 1)) desc->opts1 = cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz); else desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz); rx_tail = NEXT_RX(rx_tail); } cp->rx_tail = rx_tail; /* if we did not reach work limit, then we're done with * this round of polling */ if (rx < budget) { unsigned long flags; if (cpr16(IntrStatus) & cp_rx_intr_mask) goto rx_status_loop; napi_gro_flush(napi, false); spin_lock_irqsave(&cp->lock, flags); __napi_complete(napi); cpw16_f(IntrMask, cp_intr_mask); spin_unlock_irqrestore(&cp->lock, flags); } return rx; } static irqreturn_t cp_interrupt (int irq, void *dev_instance) { struct net_device *dev = dev_instance; struct cp_private *cp; int handled = 0; u16 status; if (unlikely(dev == NULL)) return IRQ_NONE; cp = netdev_priv(dev); spin_lock(&cp->lock); status = cpr16(IntrStatus); if (!status || (status == 0xFFFF)) goto out_unlock; handled = 1; netif_dbg(cp, intr, dev, "intr, status %04x cmd %02x cpcmd %04x\n", status, cpr8(Cmd), cpr16(CpCmd)); cpw16(IntrStatus, status & ~cp_rx_intr_mask); /* close possible race's with dev_close */ if (unlikely(!netif_running(dev))) { cpw16(IntrMask, 0); goto out_unlock; } if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr)) if (napi_schedule_prep(&cp->napi)) { cpw16_f(IntrMask, cp_norx_intr_mask); __napi_schedule(&cp->napi); } if (status & (TxOK | TxErr | TxEmpty | SWInt)) cp_tx(cp); if (status & LinkChg) mii_check_media(&cp->mii_if, netif_msg_link(cp), false); if (status & PciErr) { u16 pci_status; pci_read_config_word(cp->pdev, PCI_STATUS, &pci_status); pci_write_config_word(cp->pdev, PCI_STATUS, pci_status); netdev_err(dev, "PCI bus error, status=%04x, PCI status=%04x\n", status, pci_status); /* TODO: reset hardware */ } out_unlock: spin_unlock(&cp->lock); return IRQ_RETVAL(handled); } #ifdef CONFIG_NET_POLL_CONTROLLER /* * Polling receive - used by netconsole and other diagnostic tools * to allow network i/o with interrupts disabled. */ static void cp_poll_controller(struct net_device *dev) { struct cp_private *cp = netdev_priv(dev); const int irq = cp->pdev->irq; disable_irq(irq); cp_interrupt(irq, dev); enable_irq(irq); } #endif static void cp_tx (struct cp_private *cp) { unsigned tx_head = cp->tx_head; unsigned tx_tail = cp->tx_tail; unsigned bytes_compl = 0, pkts_compl = 0; while (tx_tail != tx_head) { struct cp_desc *txd = cp->tx_ring + tx_tail; struct sk_buff *skb; u32 status; rmb(); status = le32_to_cpu(txd->opts1); if (status & DescOwn) break; skb = cp->tx_skb[tx_tail]; BUG_ON(!skb); dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr), le32_to_cpu(txd->opts1) & 0xffff, PCI_DMA_TODEVICE); if (status & LastFrag) { if (status & (TxError | TxFIFOUnder)) { netif_dbg(cp, tx_err, cp->dev, "tx err, status 0x%x\n", status); cp->dev->stats.tx_errors++; if (status & TxOWC) cp->dev->stats.tx_window_errors++; if (status & TxMaxCol) cp->dev->stats.tx_aborted_errors++; if (status & TxLinkFail) cp->dev->stats.tx_carrier_errors++; if (status & TxFIFOUnder) cp->dev->stats.tx_fifo_errors++; } else { cp->dev->stats.collisions += ((status >> TxColCntShift) & TxColCntMask); cp->dev->stats.tx_packets++; cp->dev->stats.tx_bytes += skb->len; netif_dbg(cp, tx_done, cp->dev, "tx done, slot %d\n", tx_tail); } bytes_compl += skb->len; pkts_compl++; dev_kfree_skb_irq(skb); } cp->tx_skb[tx_tail] = NULL; tx_tail = NEXT_TX(tx_tail); } cp->tx_tail = tx_tail; netdev_completed_queue(cp->dev, pkts_compl, bytes_compl); if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1)) netif_wake_queue(cp->dev); } static inline u32 cp_tx_vlan_tag(struct sk_buff *skb) { return skb_vlan_tag_present(skb) ? TxVlanTag | swab16(skb_vlan_tag_get(skb)) : 0x00; } static void unwind_tx_frag_mapping(struct cp_private *cp, struct sk_buff *skb, int first, int entry_last) { int frag, index; struct cp_desc *txd; skb_frag_t *this_frag; for (frag = 0; frag+first < entry_last; frag++) { index = first+frag; cp->tx_skb[index] = NULL; txd = &cp->tx_ring[index]; this_frag = &skb_shinfo(skb)->frags[frag]; dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr), skb_frag_size(this_frag), PCI_DMA_TODEVICE); } } static netdev_tx_t cp_start_xmit (struct sk_buff *skb, struct net_device *dev) { struct cp_private *cp = netdev_priv(dev); unsigned entry; u32 eor, flags; unsigned long intr_flags; __le32 opts2; int mss = 0; spin_lock_irqsave(&cp->lock, intr_flags); /* This is a hard error, log it. */ if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) { netif_stop_queue(dev); spin_unlock_irqrestore(&cp->lock, intr_flags); netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); return NETDEV_TX_BUSY; } entry = cp->tx_head; eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0; mss = skb_shinfo(skb)->gso_size; opts2 = cpu_to_le32(cp_tx_vlan_tag(skb)); if (skb_shinfo(skb)->nr_frags == 0) { struct cp_desc *txd = &cp->tx_ring[entry]; u32 len; dma_addr_t mapping; len = skb->len; mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE); if (dma_mapping_error(&cp->pdev->dev, mapping)) goto out_dma_error; txd->opts2 = opts2; txd->addr = cpu_to_le64(mapping); wmb(); flags = eor | len | DescOwn | FirstFrag | LastFrag; if (mss) flags |= LargeSend | ((mss & MSSMask) << MSSShift); else if (skb->ip_summed == CHECKSUM_PARTIAL) { const struct iphdr *ip = ip_hdr(skb); if (ip->protocol == IPPROTO_TCP) flags |= IPCS | TCPCS; else if (ip->protocol == IPPROTO_UDP) flags |= IPCS | UDPCS; else WARN_ON(1); /* we need a WARN() */ } txd->opts1 = cpu_to_le32(flags); wmb(); cp->tx_skb[entry] = skb; entry = NEXT_TX(entry); } else { struct cp_desc *txd; u32 first_len, first_eor; dma_addr_t first_mapping; int frag, first_entry = entry; const struct iphdr *ip = ip_hdr(skb); /* We must give this initial chunk to the device last. * Otherwise we could race with the device. */ first_eor = eor; first_len = skb_headlen(skb); first_mapping = dma_map_single(&cp->pdev->dev, skb->data, first_len, PCI_DMA_TODEVICE); if (dma_mapping_error(&cp->pdev->dev, first_mapping)) goto out_dma_error; cp->tx_skb[entry] = skb; entry = NEXT_TX(entry); for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag]; u32 len; u32 ctrl; dma_addr_t mapping; len = skb_frag_size(this_frag); mapping = dma_map_single(&cp->pdev->dev, skb_frag_address(this_frag), len, PCI_DMA_TODEVICE); if (dma_mapping_error(&cp->pdev->dev, mapping)) { unwind_tx_frag_mapping(cp, skb, first_entry, entry); goto out_dma_error; } eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0; ctrl = eor | len | DescOwn; if (mss) ctrl |= LargeSend | ((mss & MSSMask) << MSSShift); else if (skb->ip_summed == CHECKSUM_PARTIAL) { if (ip->protocol == IPPROTO_TCP) ctrl |= IPCS | TCPCS; else if (ip->protocol == IPPROTO_UDP) ctrl |= IPCS | UDPCS; else BUG(); } if (frag == skb_shinfo(skb)->nr_frags - 1) ctrl |= LastFrag; txd = &cp->tx_ring[entry]; txd->opts2 = opts2; txd->addr = cpu_to_le64(mapping); wmb(); txd->opts1 = cpu_to_le32(ctrl); wmb(); cp->tx_skb[entry] = skb; entry = NEXT_TX(entry); } txd = &cp->tx_ring[first_entry]; txd->opts2 = opts2; txd->addr = cpu_to_le64(first_mapping); wmb(); if (skb->ip_summed == CHECKSUM_PARTIAL) { if (ip->protocol == IPPROTO_TCP) txd->opts1 = cpu_to_le32(first_eor | first_len | FirstFrag | DescOwn | IPCS | TCPCS); else if (ip->protocol == IPPROTO_UDP) txd->opts1 = cpu_to_le32(first_eor | first_len | FirstFrag | DescOwn | IPCS | UDPCS); else BUG(); } else txd->opts1 = cpu_to_le32(first_eor | first_len | FirstFrag | DescOwn); wmb(); } cp->tx_head = entry; netdev_sent_queue(dev, skb->len); netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n", entry, skb->len); if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1)) netif_stop_queue(dev); out_unlock: spin_unlock_irqrestore(&cp->lock, intr_flags); cpw8(TxPoll, NormalTxPoll); return NETDEV_TX_OK; out_dma_error: dev_kfree_skb_any(skb); cp->dev->stats.tx_dropped++; goto out_unlock; } /* Set or clear the multicast filter for this adaptor. This routine is not state sensitive and need not be SMP locked. */ static void __cp_set_rx_mode (struct net_device *dev) { struct cp_private *cp = netdev_priv(dev); u32 mc_filter[2]; /* Multicast hash filter */ int rx_mode; /* Note: do not reorder, GCC is clever about common statements. */ if (dev->flags & IFF_PROMISC) { /* Unconditionally log net taps. */ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys | AcceptAllPhys; mc_filter[1] = mc_filter[0] = 0xffffffff; } else if ((netdev_mc_count(dev) > multicast_filter_limit) || (dev->flags & IFF_ALLMULTI)) { /* Too many to filter perfectly -- accept all multicasts. */ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; mc_filter[1] = mc_filter[0] = 0xffffffff; } else { struct netdev_hw_addr *ha; rx_mode = AcceptBroadcast | AcceptMyPhys; mc_filter[1] = mc_filter[0] = 0; netdev_for_each_mc_addr(ha, dev) { int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); rx_mode |= AcceptMulticast; } } /* We can safely update without stopping the chip. */ cp->rx_config = cp_rx_config | rx_mode; cpw32_f(RxConfig, cp->rx_config); cpw32_f (MAR0 + 0, mc_filter[0]); cpw32_f (MAR0 + 4, mc_filter[1]); } static void cp_set_rx_mode (struct net_device *dev) { unsigned long flags; struct cp_private *cp = netdev_priv(dev); spin_lock_irqsave (&cp->lock, flags); __cp_set_rx_mode(dev); spin_unlock_irqrestore (&cp->lock, flags); } static void __cp_get_stats(struct cp_private *cp) { /* only lower 24 bits valid; write any value to clear */ cp->dev->stats.rx_missed_errors += (cpr32 (RxMissed) & 0xffffff); cpw32 (RxMissed, 0); } static struct net_device_stats *cp_get_stats(struct net_device *dev) { struct cp_private *cp = netdev_priv(dev); unsigned long flags; /* The chip only need report frame silently dropped. */ spin_lock_irqsave(&cp->lock, flags); if (netif_running(dev) && netif_device_present(dev)) __cp_get_stats(cp); spin_unlock_irqrestore(&cp->lock, flags); return &dev->stats; } static void cp_stop_hw (struct cp_private *cp) { cpw16(IntrStatus, ~(cpr16(IntrStatus))); cpw16_f(IntrMask, 0); cpw8(Cmd, 0); cpw16_f(CpCmd, 0); cpw16_f(IntrStatus, ~(cpr16(IntrStatus))); cp->rx_tail = 0; cp->tx_head = cp->tx_tail = 0; netdev_reset_queue(cp->dev); } static void cp_reset_hw (struct cp_private *cp) { unsigned work = 1000; cpw8(Cmd, CmdReset); while (work--) { if (!(cpr8(Cmd) & CmdReset)) return; schedule_timeout_uninterruptible(10); } netdev_err(cp->dev, "hardware reset timeout\n"); } static inline void cp_start_hw (struct cp_private *cp) { dma_addr_t ring_dma; cpw16(CpCmd, cp->cpcmd); /* * These (at least TxRingAddr) need to be configured after the * corresponding bits in CpCmd are enabled. Datasheet v1.6 §6.33 * (C+ Command Register) recommends that these and more be configured * *after* the [RT]xEnable bits in CpCmd are set. And on some hardware * it's been observed that the TxRingAddr is actually reset to garbage * when C+ mode Tx is enabled in CpCmd. */ cpw32_f(HiTxRingAddr, 0); cpw32_f(HiTxRingAddr + 4, 0); ring_dma = cp->ring_dma; cpw32_f(RxRingAddr, ring_dma & 0xffffffff); cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16); ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE; cpw32_f(TxRingAddr, ring_dma & 0xffffffff); cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16); /* * Strictly speaking, the datasheet says this should be enabled * *before* setting the descriptor addresses. But what, then, would * prevent it from doing DMA to random unconfigured addresses? * This variant appears to work fine. */ cpw8(Cmd, RxOn | TxOn); netdev_reset_queue(cp->dev); } static void cp_enable_irq(struct cp_private *cp) { cpw16_f(IntrMask, cp_intr_mask); } static void cp_init_hw (struct cp_private *cp) { struct net_device *dev = cp->dev; cp_reset_hw(cp); cpw8_f (Cfg9346, Cfg9346_Unlock); /* Restore our idea of the MAC address. */ cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0))); cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4))); cp_start_hw(cp); cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */ __cp_set_rx_mode(dev); cpw32_f (TxConfig, IFG | (TX_DMA_BURST << TxDMAShift)); cpw8(Config1, cpr8(Config1) | DriverLoaded | PMEnable); /* Disable Wake-on-LAN. Can be turned on with ETHTOOL_SWOL */ cpw8(Config3, PARMEnable); cp->wol_enabled = 0; cpw8(Config5, cpr8(Config5) & PMEStatus); cpw16(MultiIntr, 0); cpw8_f(Cfg9346, Cfg9346_Lock); } static int cp_refill_rx(struct cp_private *cp) { struct net_device *dev = cp->dev; unsigned i; for (i = 0; i < CP_RX_RING_SIZE; i++) { struct sk_buff *skb; dma_addr_t mapping; skb = netdev_alloc_skb_ip_align(dev, cp->rx_buf_sz); if (!skb) goto err_out; mapping = dma_map_single(&cp->pdev->dev, skb->data, cp->rx_buf_sz, PCI_DMA_FROMDEVICE); if (dma_mapping_error(&cp->pdev->dev, mapping)) { kfree_skb(skb); goto err_out; } cp->rx_skb[i] = skb; cp->rx_ring[i].opts2 = 0; cp->rx_ring[i].addr = cpu_to_le64(mapping); if (i == (CP_RX_RING_SIZE - 1)) cp->rx_ring[i].opts1 = cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz); else cp->rx_ring[i].opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz); } return 0; err_out: cp_clean_rings(cp); return -ENOMEM; } static void cp_init_rings_index (struct cp_private *cp) { cp->rx_tail = 0; cp->tx_head = cp->tx_tail = 0; } static int cp_init_rings (struct cp_private *cp) { memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd); cp_init_rings_index(cp); return cp_refill_rx (cp); } static int cp_alloc_rings (struct cp_private *cp) { struct device *d = &cp->pdev->dev; void *mem; int rc; mem = dma_alloc_coherent(d, CP_RING_BYTES, &cp->ring_dma, GFP_KERNEL); if (!mem) return -ENOMEM; cp->rx_ring = mem; cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE]; rc = cp_init_rings(cp); if (rc < 0) dma_free_coherent(d, CP_RING_BYTES, cp->rx_ring, cp->ring_dma); return rc; } static void cp_clean_rings (struct cp_private *cp) { struct cp_desc *desc; unsigned i; for (i = 0; i < CP_RX_RING_SIZE; i++) { if (cp->rx_skb[i]) { desc = cp->rx_ring + i; dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr), cp->rx_buf_sz, PCI_DMA_FROMDEVICE); dev_kfree_skb(cp->rx_skb[i]); } } for (i = 0; i < CP_TX_RING_SIZE; i++) { if (cp->tx_skb[i]) { struct sk_buff *skb = cp->tx_skb[i]; desc = cp->tx_ring + i; dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr), le32_to_cpu(desc->opts1) & 0xffff, PCI_DMA_TODEVICE); if (le32_to_cpu(desc->opts1) & LastFrag) dev_kfree_skb(skb); cp->dev->stats.tx_dropped++; } } netdev_reset_queue(cp->dev); memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE); memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE); memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE); } static void cp_free_rings (struct cp_private *cp) { cp_clean_rings(cp); dma_free_coherent(&cp->pdev->dev, CP_RING_BYTES, cp->rx_ring, cp->ring_dma); cp->rx_ring = NULL; cp->tx_ring = NULL; } static int cp_open (struct net_device *dev) { struct cp_private *cp = netdev_priv(dev); const int irq = cp->pdev->irq; int rc; netif_dbg(cp, ifup, dev, "enabling interface\n"); rc = cp_alloc_rings(cp); if (rc) return rc; napi_enable(&cp->napi); cp_init_hw(cp); rc = request_irq(irq, cp_interrupt, IRQF_SHARED, dev->name, dev); if (rc) goto err_out_hw; cp_enable_irq(cp); netif_carrier_off(dev); mii_check_media(&cp->mii_if, netif_msg_link(cp), true); netif_start_queue(dev); return 0; err_out_hw: napi_disable(&cp->napi); cp_stop_hw(cp); cp_free_rings(cp); return rc; } static int cp_close (struct net_device *dev) { struct cp_private *cp = netdev_priv(dev); unsigned long flags; napi_disable(&cp->napi); netif_dbg(cp, ifdown, dev, "disabling interface\n"); spin_lock_irqsave(&cp->lock, flags); netif_stop_queue(dev); netif_carrier_off(dev); cp_stop_hw(cp); spin_unlock_irqrestore(&cp->lock, flags); free_irq(cp->pdev->irq, dev); cp_free_rings(cp); return 0; } static void cp_tx_timeout(struct net_device *dev) { struct cp_private *cp = netdev_priv(dev); unsigned long flags; int rc; netdev_warn(dev, "Transmit timeout, status %2x %4x %4x %4x\n", cpr8(Cmd), cpr16(CpCmd), cpr16(IntrStatus), cpr16(IntrMask)); spin_lock_irqsave(&cp->lock, flags); cp_stop_hw(cp); cp_clean_rings(cp); rc = cp_init_rings(cp); cp_start_hw(cp); cp_enable_irq(cp); netif_wake_queue(dev); spin_unlock_irqrestore(&cp->lock, flags); } static int cp_change_mtu(struct net_device *dev, int new_mtu) { struct cp_private *cp = netdev_priv(dev); /* check for invalid MTU, according to hardware limits */ if (new_mtu < CP_MIN_MTU || new_mtu > CP_MAX_MTU) return -EINVAL; /* if network interface not up, no need for complexity */ if (!netif_running(dev)) { dev->mtu = new_mtu; cp_set_rxbufsize(cp); /* set new rx buf size */ return 0; } /* network IS up, close it, reset MTU, and come up again. */ cp_close(dev); dev->mtu = new_mtu; cp_set_rxbufsize(cp); return cp_open(dev); } static const char mii_2_8139_map[8] = { BasicModeCtrl, BasicModeStatus, 0, 0, NWayAdvert, NWayLPAR, NWayExpansion, 0 }; static int mdio_read(struct net_device *dev, int phy_id, int location) { struct cp_private *cp = netdev_priv(dev); return location < 8 && mii_2_8139_map[location] ? readw(cp->regs + mii_2_8139_map[location]) : 0; } static void mdio_write(struct net_device *dev, int phy_id, int location, int value) { struct cp_private *cp = netdev_priv(dev); if (location == 0) { cpw8(Cfg9346, Cfg9346_Unlock); cpw16(BasicModeCtrl, value); cpw8(Cfg9346, Cfg9346_Lock); } else if (location < 8 && mii_2_8139_map[location]) cpw16(mii_2_8139_map[location], value); } /* Set the ethtool Wake-on-LAN settings */ static int netdev_set_wol (struct cp_private *cp, const struct ethtool_wolinfo *wol) { u8 options; options = cpr8 (Config3) & ~(LinkUp | MagicPacket); /* If WOL is being disabled, no need for complexity */ if (wol->wolopts) { if (wol->wolopts & WAKE_PHY) options |= LinkUp; if (wol->wolopts & WAKE_MAGIC) options |= MagicPacket; } cpw8 (Cfg9346, Cfg9346_Unlock); cpw8 (Config3, options); cpw8 (Cfg9346, Cfg9346_Lock); options = 0; /* Paranoia setting */ options = cpr8 (Config5) & ~(UWF | MWF | BWF); /* If WOL is being disabled, no need for complexity */ if (wol->wolopts) { if (wol->wolopts & WAKE_UCAST) options |= UWF; if (wol->wolopts & WAKE_BCAST) options |= BWF; if (wol->wolopts & WAKE_MCAST) options |= MWF; } cpw8 (Config5, options); cp->wol_enabled = (wol->wolopts) ? 1 : 0; return 0; } /* Get the ethtool Wake-on-LAN settings */ static void netdev_get_wol (struct cp_private *cp, struct ethtool_wolinfo *wol) { u8 options; wol->wolopts = 0; /* Start from scratch */ wol->supported = WAKE_PHY | WAKE_BCAST | WAKE_MAGIC | WAKE_MCAST | WAKE_UCAST; /* We don't need to go on if WOL is disabled */ if (!cp->wol_enabled) return; options = cpr8 (Config3); if (options & LinkUp) wol->wolopts |= WAKE_PHY; if (options & MagicPacket) wol->wolopts |= WAKE_MAGIC; options = 0; /* Paranoia setting */ options = cpr8 (Config5); if (options & UWF) wol->wolopts |= WAKE_UCAST; if (options & BWF) wol->wolopts |= WAKE_BCAST; if (options & MWF) wol->wolopts |= WAKE_MCAST; } static void cp_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info) { struct cp_private *cp = netdev_priv(dev); strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); strlcpy(info->version, DRV_VERSION, sizeof(info->version)); strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info)); } static void cp_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring) { ring->rx_max_pending = CP_RX_RING_SIZE; ring->tx_max_pending = CP_TX_RING_SIZE; ring->rx_pending = CP_RX_RING_SIZE; ring->tx_pending = CP_TX_RING_SIZE; } static int cp_get_regs_len(struct net_device *dev) { return CP_REGS_SIZE; } static int cp_get_sset_count (struct net_device *dev, int sset) { switch (sset) { case ETH_SS_STATS: return CP_NUM_STATS; default: return -EOPNOTSUPP; } } static int cp_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct cp_private *cp = netdev_priv(dev); int rc; unsigned long flags; spin_lock_irqsave(&cp->lock, flags); rc = mii_ethtool_gset(&cp->mii_if, cmd); spin_unlock_irqrestore(&cp->lock, flags); return rc; } static int cp_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct cp_private *cp = netdev_priv(dev); int rc; unsigned long flags; spin_lock_irqsave(&cp->lock, flags); rc = mii_ethtool_sset(&cp->mii_if, cmd); spin_unlock_irqrestore(&cp->lock, flags); return rc; } static int cp_nway_reset(struct net_device *dev) { struct cp_private *cp = netdev_priv(dev); return mii_nway_restart(&cp->mii_if); } static u32 cp_get_msglevel(struct net_device *dev) { struct cp_private *cp = netdev_priv(dev); return cp->msg_enable; } static void cp_set_msglevel(struct net_device *dev, u32 value) { struct cp_private *cp = netdev_priv(dev); cp->msg_enable = value; } static int cp_set_features(struct net_device *dev, netdev_features_t features) { struct cp_private *cp = netdev_priv(dev); unsigned long flags; if (!((dev->features ^ features) & NETIF_F_RXCSUM)) return 0; spin_lock_irqsave(&cp->lock, flags); if (features & NETIF_F_RXCSUM) cp->cpcmd |= RxChkSum; else cp->cpcmd &= ~RxChkSum; if (features & NETIF_F_HW_VLAN_CTAG_RX) cp->cpcmd |= RxVlanOn; else cp->cpcmd &= ~RxVlanOn; cpw16_f(CpCmd, cp->cpcmd); spin_unlock_irqrestore(&cp->lock, flags); return 0; } static void cp_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) { struct cp_private *cp = netdev_priv(dev); unsigned long flags; if (regs->len < CP_REGS_SIZE) return /* -EINVAL */; regs->version = CP_REGS_VER; spin_lock_irqsave(&cp->lock, flags); memcpy_fromio(p, cp->regs, CP_REGS_SIZE); spin_unlock_irqrestore(&cp->lock, flags); } static void cp_get_wol (struct net_device *dev, struct ethtool_wolinfo *wol) { struct cp_private *cp = netdev_priv(dev); unsigned long flags; spin_lock_irqsave (&cp->lock, flags); netdev_get_wol (cp, wol); spin_unlock_irqrestore (&cp->lock, flags); } static int cp_set_wol (struct net_device *dev, struct ethtool_wolinfo *wol) { struct cp_private *cp = netdev_priv(dev); unsigned long flags; int rc; spin_lock_irqsave (&cp->lock, flags); rc = netdev_set_wol (cp, wol); spin_unlock_irqrestore (&cp->lock, flags); return rc; } static void cp_get_strings (struct net_device *dev, u32 stringset, u8 *buf) { switch (stringset) { case ETH_SS_STATS: memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys)); break; default: BUG(); break; } } static void cp_get_ethtool_stats (struct net_device *dev, struct ethtool_stats *estats, u64 *tmp_stats) { struct cp_private *cp = netdev_priv(dev); struct cp_dma_stats *nic_stats; dma_addr_t dma; int i; nic_stats = dma_alloc_coherent(&cp->pdev->dev, sizeof(*nic_stats), &dma, GFP_KERNEL); if (!nic_stats) return; /* begin NIC statistics dump */ cpw32(StatsAddr + 4, (u64)dma >> 32); cpw32(StatsAddr, ((u64)dma & DMA_BIT_MASK(32)) | DumpStats); cpr32(StatsAddr); for (i = 0; i < 1000; i++) { if ((cpr32(StatsAddr) & DumpStats) == 0) break; udelay(10); } cpw32(StatsAddr, 0); cpw32(StatsAddr + 4, 0); cpr32(StatsAddr); i = 0; tmp_stats[i++] = le64_to_cpu(nic_stats->tx_ok); tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok); tmp_stats[i++] = le64_to_cpu(nic_stats->tx_err); tmp_stats[i++] = le32_to_cpu(nic_stats->rx_err); tmp_stats[i++] = le16_to_cpu(nic_stats->rx_fifo); tmp_stats[i++] = le16_to_cpu(nic_stats->frame_align); tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_1col); tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_mcol); tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_phys); tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_bcast); tmp_stats[i++] = le32_to_cpu(nic_stats->rx_ok_mcast); tmp_stats[i++] = le16_to_cpu(nic_stats->tx_abort); tmp_stats[i++] = le16_to_cpu(nic_stats->tx_underrun); tmp_stats[i++] = cp->cp_stats.rx_frags; BUG_ON(i != CP_NUM_STATS); dma_free_coherent(&cp->pdev->dev, sizeof(*nic_stats), nic_stats, dma); } static const struct ethtool_ops cp_ethtool_ops = { .get_drvinfo = cp_get_drvinfo, .get_regs_len = cp_get_regs_len, .get_sset_count = cp_get_sset_count, .get_settings = cp_get_settings, .set_settings = cp_set_settings, .nway_reset = cp_nway_reset, .get_link = ethtool_op_get_link, .get_msglevel = cp_get_msglevel, .set_msglevel = cp_set_msglevel, .get_regs = cp_get_regs, .get_wol = cp_get_wol, .set_wol = cp_set_wol, .get_strings = cp_get_strings, .get_ethtool_stats = cp_get_ethtool_stats, .get_eeprom_len = cp_get_eeprom_len, .get_eeprom = cp_get_eeprom, .set_eeprom = cp_set_eeprom, .get_ringparam = cp_get_ringparam, }; static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) { struct cp_private *cp = netdev_priv(dev); int rc; unsigned long flags; if (!netif_running(dev)) return -EINVAL; spin_lock_irqsave(&cp->lock, flags); rc = generic_mii_ioctl(&cp->mii_if, if_mii(rq), cmd, NULL); spin_unlock_irqrestore(&cp->lock, flags); return rc; } static int cp_set_mac_address(struct net_device *dev, void *p) { struct cp_private *cp = netdev_priv(dev); struct sockaddr *addr = p; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); spin_lock_irq(&cp->lock); cpw8_f(Cfg9346, Cfg9346_Unlock); cpw32_f(MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0))); cpw32_f(MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4))); cpw8_f(Cfg9346, Cfg9346_Lock); spin_unlock_irq(&cp->lock); return 0; } /* Serial EEPROM section. */ /* EEPROM_Ctrl bits. */ #define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */ #define EE_CS 0x08 /* EEPROM chip select. */ #define EE_DATA_WRITE 0x02 /* EEPROM chip data in. */ #define EE_WRITE_0 0x00 #define EE_WRITE_1 0x02 #define EE_DATA_READ 0x01 /* EEPROM chip data out. */ #define EE_ENB (0x80 | EE_CS) /* Delay between EEPROM clock transitions. No extra delay is needed with 33Mhz PCI, but 66Mhz may change this. */ #define eeprom_delay() readb(ee_addr) /* The EEPROM commands include the alway-set leading bit. */ #define EE_EXTEND_CMD (4) #define EE_WRITE_CMD (5) #define EE_READ_CMD (6) #define EE_ERASE_CMD (7) #define EE_EWDS_ADDR (0) #define EE_WRAL_ADDR (1) #define EE_ERAL_ADDR (2) #define EE_EWEN_ADDR (3) #define CP_EEPROM_MAGIC PCI_DEVICE_ID_REALTEK_8139 static void eeprom_cmd_start(void __iomem *ee_addr) { writeb (EE_ENB & ~EE_CS, ee_addr); writeb (EE_ENB, ee_addr); eeprom_delay (); } static void eeprom_cmd(void __iomem *ee_addr, int cmd, int cmd_len) { int i; /* Shift the command bits out. */ for (i = cmd_len - 1; i >= 0; i--) { int dataval = (cmd & (1 << i)) ? EE_DATA_WRITE : 0; writeb (EE_ENB | dataval, ee_addr); eeprom_delay (); writeb (EE_ENB | dataval | EE_SHIFT_CLK, ee_addr); eeprom_delay (); } writeb (EE_ENB, ee_addr); eeprom_delay (); } static void eeprom_cmd_end(void __iomem *ee_addr) { writeb(0, ee_addr); eeprom_delay (); } static void eeprom_extend_cmd(void __iomem *ee_addr, int extend_cmd, int addr_len) { int cmd = (EE_EXTEND_CMD << addr_len) | (extend_cmd << (addr_len - 2)); eeprom_cmd_start(ee_addr); eeprom_cmd(ee_addr, cmd, 3 + addr_len); eeprom_cmd_end(ee_addr); } static u16 read_eeprom (void __iomem *ioaddr, int location, int addr_len) { int i; u16 retval = 0; void __iomem *ee_addr = ioaddr + Cfg9346; int read_cmd = location | (EE_READ_CMD << addr_len); eeprom_cmd_start(ee_addr); eeprom_cmd(ee_addr, read_cmd, 3 + addr_len); for (i = 16; i > 0; i--) { writeb (EE_ENB | EE_SHIFT_CLK, ee_addr); eeprom_delay (); retval = (retval << 1) | ((readb (ee_addr) & EE_DATA_READ) ? 1 : 0); writeb (EE_ENB, ee_addr); eeprom_delay (); } eeprom_cmd_end(ee_addr); return retval; } static void write_eeprom(void __iomem *ioaddr, int location, u16 val, int addr_len) { int i; void __iomem *ee_addr = ioaddr + Cfg9346; int write_cmd = location | (EE_WRITE_CMD << addr_len); eeprom_extend_cmd(ee_addr, EE_EWEN_ADDR, addr_len); eeprom_cmd_start(ee_addr); eeprom_cmd(ee_addr, write_cmd, 3 + addr_len); eeprom_cmd(ee_addr, val, 16); eeprom_cmd_end(ee_addr); eeprom_cmd_start(ee_addr); for (i = 0; i < 20000; i++) if (readb(ee_addr) & EE_DATA_READ) break; eeprom_cmd_end(ee_addr); eeprom_extend_cmd(ee_addr, EE_EWDS_ADDR, addr_len); } static int cp_get_eeprom_len(struct net_device *dev) { struct cp_private *cp = netdev_priv(dev); int size; spin_lock_irq(&cp->lock); size = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 256 : 128; spin_unlock_irq(&cp->lock); return size; } static int cp_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) { struct cp_private *cp = netdev_priv(dev); unsigned int addr_len; u16 val; u32 offset = eeprom->offset >> 1; u32 len = eeprom->len; u32 i = 0; eeprom->magic = CP_EEPROM_MAGIC; spin_lock_irq(&cp->lock); addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6; if (eeprom->offset & 1) { val = read_eeprom(cp->regs, offset, addr_len); data[i++] = (u8)(val >> 8); offset++; } while (i < len - 1) { val = read_eeprom(cp->regs, offset, addr_len); data[i++] = (u8)val; data[i++] = (u8)(val >> 8); offset++; } if (i < len) { val = read_eeprom(cp->regs, offset, addr_len); data[i] = (u8)val; } spin_unlock_irq(&cp->lock); return 0; } static int cp_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) { struct cp_private *cp = netdev_priv(dev); unsigned int addr_len; u16 val; u32 offset = eeprom->offset >> 1; u32 len = eeprom->len; u32 i = 0; if (eeprom->magic != CP_EEPROM_MAGIC) return -EINVAL; spin_lock_irq(&cp->lock); addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6; if (eeprom->offset & 1) { val = read_eeprom(cp->regs, offset, addr_len) & 0xff; val |= (u16)data[i++] << 8; write_eeprom(cp->regs, offset, val, addr_len); offset++; } while (i < len - 1) { val = (u16)data[i++]; val |= (u16)data[i++] << 8; write_eeprom(cp->regs, offset, val, addr_len); offset++; } if (i < len) { val = read_eeprom(cp->regs, offset, addr_len) & 0xff00; val |= (u16)data[i]; write_eeprom(cp->regs, offset, val, addr_len); } spin_unlock_irq(&cp->lock); return 0; } /* Put the board into D3cold state and wait for WakeUp signal */ static void cp_set_d3_state (struct cp_private *cp) { pci_enable_wake(cp->pdev, PCI_D0, 1); /* Enable PME# generation */ pci_set_power_state (cp->pdev, PCI_D3hot); } static const struct net_device_ops cp_netdev_ops = { .ndo_open = cp_open, .ndo_stop = cp_close, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = cp_set_mac_address, .ndo_set_rx_mode = cp_set_rx_mode, .ndo_get_stats = cp_get_stats, .ndo_do_ioctl = cp_ioctl, .ndo_start_xmit = cp_start_xmit, .ndo_tx_timeout = cp_tx_timeout, .ndo_set_features = cp_set_features, .ndo_change_mtu = cp_change_mtu, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = cp_poll_controller, #endif }; static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *dev; struct cp_private *cp; int rc; void __iomem *regs; resource_size_t pciaddr; unsigned int addr_len, i, pci_using_dac; pr_info_once("%s", version); if (pdev->vendor == PCI_VENDOR_ID_REALTEK && pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision < 0x20) { dev_info(&pdev->dev, "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip, use 8139too\n", pdev->vendor, pdev->device, pdev->revision); return -ENODEV; } dev = alloc_etherdev(sizeof(struct cp_private)); if (!dev) return -ENOMEM; SET_NETDEV_DEV(dev, &pdev->dev); cp = netdev_priv(dev); cp->pdev = pdev; cp->dev = dev; cp->msg_enable = (debug < 0 ? CP_DEF_MSG_ENABLE : debug); spin_lock_init (&cp->lock); cp->mii_if.dev = dev; cp->mii_if.mdio_read = mdio_read; cp->mii_if.mdio_write = mdio_write; cp->mii_if.phy_id = CP_INTERNAL_PHY; cp->mii_if.phy_id_mask = 0x1f; cp->mii_if.reg_num_mask = 0x1f; cp_set_rxbufsize(cp); rc = pci_enable_device(pdev); if (rc) goto err_out_free; rc = pci_set_mwi(pdev); if (rc) goto err_out_disable; rc = pci_request_regions(pdev, DRV_NAME); if (rc) goto err_out_mwi; pciaddr = pci_resource_start(pdev, 1); if (!pciaddr) { rc = -EIO; dev_err(&pdev->dev, "no MMIO resource\n"); goto err_out_res; } if (pci_resource_len(pdev, 1) < CP_REGS_SIZE) { rc = -EIO; dev_err(&pdev->dev, "MMIO resource (%llx) too small\n", (unsigned long long)pci_resource_len(pdev, 1)); goto err_out_res; } /* Configure DMA attributes. */ if ((sizeof(dma_addr_t) > 4) && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) && !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { pci_using_dac = 1; } else { pci_using_dac = 0; rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) { dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); goto err_out_res; } rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) { dev_err(&pdev->dev, "No usable consistent DMA configuration, aborting\n"); goto err_out_res; } } cp->cpcmd = (pci_using_dac ? PCIDAC : 0) | PCIMulRW | RxChkSum | CpRxOn | CpTxOn; dev->features |= NETIF_F_RXCSUM; dev->hw_features |= NETIF_F_RXCSUM; regs = ioremap(pciaddr, CP_REGS_SIZE); if (!regs) { rc = -EIO; dev_err(&pdev->dev, "Cannot map PCI MMIO (%Lx@%Lx)\n", (unsigned long long)pci_resource_len(pdev, 1), (unsigned long long)pciaddr); goto err_out_res; } cp->regs = regs; cp_stop_hw(cp); /* read MAC address from EEPROM */ addr_len = read_eeprom (regs, 0, 8) == 0x8129 ? 8 : 6; for (i = 0; i < 3; i++) ((__le16 *) (dev->dev_addr))[i] = cpu_to_le16(read_eeprom (regs, i + 7, addr_len)); dev->netdev_ops = &cp_netdev_ops; netif_napi_add(dev, &cp->napi, cp_rx_poll, 16); dev->ethtool_ops = &cp_ethtool_ops; dev->watchdog_timeo = TX_TIMEOUT; dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; if (pci_using_dac) dev->features |= NETIF_F_HIGHDMA; /* disabled by default until verified */ dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | NETIF_F_HIGHDMA; rc = register_netdev(dev); if (rc) goto err_out_iomap; netdev_info(dev, "RTL-8139C+ at 0x%p, %pM, IRQ %d\n", regs, dev->dev_addr, pdev->irq); pci_set_drvdata(pdev, dev); /* enable busmastering and memory-write-invalidate */ pci_set_master(pdev); if (cp->wol_enabled) cp_set_d3_state (cp); return 0; err_out_iomap: iounmap(regs); err_out_res: pci_release_regions(pdev); err_out_mwi: pci_clear_mwi(pdev); err_out_disable: pci_disable_device(pdev); err_out_free: free_netdev(dev); return rc; } static void cp_remove_one (struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct cp_private *cp = netdev_priv(dev); unregister_netdev(dev); iounmap(cp->regs); if (cp->wol_enabled) pci_set_power_state (pdev, PCI_D0); pci_release_regions(pdev); pci_clear_mwi(pdev); pci_disable_device(pdev); free_netdev(dev); } #ifdef CONFIG_PM static int cp_suspend (struct pci_dev *pdev, pm_message_t state) { struct net_device *dev = pci_get_drvdata(pdev); struct cp_private *cp = netdev_priv(dev); unsigned long flags; if (!netif_running(dev)) return 0; netif_device_detach (dev); netif_stop_queue (dev); spin_lock_irqsave (&cp->lock, flags); /* Disable Rx and Tx */ cpw16 (IntrMask, 0); cpw8 (Cmd, cpr8 (Cmd) & (~RxOn | ~TxOn)); spin_unlock_irqrestore (&cp->lock, flags); pci_save_state(pdev); pci_enable_wake(pdev, pci_choose_state(pdev, state), cp->wol_enabled); pci_set_power_state(pdev, pci_choose_state(pdev, state)); return 0; } static int cp_resume (struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata (pdev); struct cp_private *cp = netdev_priv(dev); unsigned long flags; if (!netif_running(dev)) return 0; netif_device_attach (dev); pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); pci_enable_wake(pdev, PCI_D0, 0); /* FIXME: sh*t may happen if the Rx ring buffer is depleted */ cp_init_rings_index (cp); cp_init_hw (cp); cp_enable_irq(cp); netif_start_queue (dev); spin_lock_irqsave (&cp->lock, flags); mii_check_media(&cp->mii_if, netif_msg_link(cp), false); spin_unlock_irqrestore (&cp->lock, flags); return 0; } #endif /* CONFIG_PM */ static const struct pci_device_id cp_pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139), }, { PCI_DEVICE(PCI_VENDOR_ID_TTTECH, PCI_DEVICE_ID_TTTECH_MC322), }, { }, }; MODULE_DEVICE_TABLE(pci, cp_pci_tbl); static struct pci_driver cp_driver = { .name = DRV_NAME, .id_table = cp_pci_tbl, .probe = cp_init_one, .remove = cp_remove_one, #ifdef CONFIG_PM .resume = cp_resume, .suspend = cp_suspend, #endif }; module_pci_driver(cp_driver);
gpl-2.0
DESHONOR/BroodROM_kernel_huawei_honor
drivers/net/wireless/rtlwifi/rtl8192se/dm.c
2448
21424
/****************************************************************************** * * Copyright(c) 2009-2010 Realtek Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * wlanfae <wlanfae@realtek.com> * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, * Hsinchu 300, Taiwan. * * Larry Finger <Larry.Finger@lwfinger.net> * *****************************************************************************/ #include "../wifi.h" #include "../base.h" #include "reg.h" #include "def.h" #include "phy.h" #include "dm.h" #include "fw.h" struct dig_t digtable; static const u32 edca_setting_dl[PEER_MAX] = { 0xa44f, /* 0 UNKNOWN */ 0x5ea44f, /* 1 REALTEK_90 */ 0x5ea44f, /* 2 REALTEK_92SE */ 0xa630, /* 3 BROAD */ 0xa44f, /* 4 RAL */ 0xa630, /* 5 ATH */ 0xa630, /* 6 CISCO */ 0xa42b, /* 7 MARV */ }; static const u32 edca_setting_dl_gmode[PEER_MAX] = { 0x4322, /* 0 UNKNOWN */ 0xa44f, /* 1 REALTEK_90 */ 0x5ea44f, /* 2 REALTEK_92SE */ 0xa42b, /* 3 BROAD */ 0x5e4322, /* 4 RAL */ 0x4322, /* 5 ATH */ 0xa430, /* 6 CISCO */ 0x5ea44f, /* 7 MARV */ }; static const u32 edca_setting_ul[PEER_MAX] = { 0x5e4322, /* 0 UNKNOWN */ 0xa44f, /* 1 REALTEK_90 */ 0x5ea44f, /* 2 REALTEK_92SE */ 0x5ea322, /* 3 BROAD */ 0x5ea422, /* 4 RAL */ 0x5ea322, /* 5 ATH */ 0x3ea44f, /* 6 CISCO */ 0x5ea44f, /* 7 MARV */ }; static void _rtl92s_dm_check_edca_turbo(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); static u64 last_txok_cnt; static u64 last_rxok_cnt; u64 cur_txok_cnt = 0; u64 cur_rxok_cnt = 0; u32 edca_be_ul = edca_setting_ul[mac->vendor]; u32 edca_be_dl = edca_setting_dl[mac->vendor]; u32 edca_gmode = edca_setting_dl_gmode[mac->vendor]; if (mac->link_state != MAC80211_LINKED) { rtlpriv->dm.current_turbo_edca = false; goto dm_checkedcaturbo_exit; } if ((!rtlpriv->dm.is_any_nonbepkts) && (!rtlpriv->dm.disable_framebursting)) { cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok_cnt; cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - last_rxok_cnt; if (rtlpriv->phy.rf_type == RF_1T2R) { if (cur_txok_cnt > 4 * cur_rxok_cnt) { /* Uplink TP is present. */ if (rtlpriv->dm.is_cur_rdlstate || !rtlpriv->dm.current_turbo_edca) { rtl_write_dword(rtlpriv, EDCAPARA_BE, edca_be_ul); rtlpriv->dm.is_cur_rdlstate = false; } } else {/* Balance TP is present. */ if (!rtlpriv->dm.is_cur_rdlstate || !rtlpriv->dm.current_turbo_edca) { if (mac->mode == WIRELESS_MODE_G || mac->mode == WIRELESS_MODE_B) rtl_write_dword(rtlpriv, EDCAPARA_BE, edca_gmode); else rtl_write_dword(rtlpriv, EDCAPARA_BE, edca_be_dl); rtlpriv->dm.is_cur_rdlstate = true; } } rtlpriv->dm.current_turbo_edca = true; } else { if (cur_rxok_cnt > 4 * cur_txok_cnt) { if (!rtlpriv->dm.is_cur_rdlstate || !rtlpriv->dm.current_turbo_edca) { if (mac->mode == WIRELESS_MODE_G || mac->mode == WIRELESS_MODE_B) rtl_write_dword(rtlpriv, EDCAPARA_BE, edca_gmode); else rtl_write_dword(rtlpriv, EDCAPARA_BE, edca_be_dl); rtlpriv->dm.is_cur_rdlstate = true; } } else { if (rtlpriv->dm.is_cur_rdlstate || !rtlpriv->dm.current_turbo_edca) { rtl_write_dword(rtlpriv, EDCAPARA_BE, edca_be_ul); rtlpriv->dm.is_cur_rdlstate = false; } } rtlpriv->dm.current_turbo_edca = true; } } else { if (rtlpriv->dm.current_turbo_edca) { u8 tmp = AC0_BE; rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM, (u8 *)(&tmp)); rtlpriv->dm.current_turbo_edca = false; } } dm_checkedcaturbo_exit: rtlpriv->dm.is_any_nonbepkts = false; last_txok_cnt = rtlpriv->stats.txbytesunicast; last_rxok_cnt = rtlpriv->stats.rxbytesunicast; } static void _rtl92s_dm_txpowertracking_callback_thermalmeter( struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); u8 thermalvalue = 0; rtlpriv->dm.txpower_trackinginit = true; thermalvalue = (u8)rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER, 0x1f); RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, ("Readback Thermal Meter = 0x%x pre thermal meter 0x%x " "eeprom_thermalmeter 0x%x\n", thermalvalue, rtlpriv->dm.thermalvalue, rtlefuse->eeprom_thermalmeter)); if (thermalvalue) { rtlpriv->dm.thermalvalue = thermalvalue; rtl92s_phy_set_fw_cmd(hw, FW_CMD_TXPWR_TRACK_THERMAL); } rtlpriv->dm.txpowercount = 0; } static void _rtl92s_dm_check_txpowertracking_thermalmeter( struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); static u8 tm_trigger; u8 tx_power_checkcnt = 5; /* 2T2R TP issue */ if (rtlphy->rf_type == RF_2T2R) return; if (!rtlpriv->dm.txpower_tracking) return; if (rtlpriv->dm.txpowercount <= tx_power_checkcnt) { rtlpriv->dm.txpowercount++; return; } if (!tm_trigger) { rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER, RFREG_OFFSET_MASK, 0x60); tm_trigger = 1; } else { _rtl92s_dm_txpowertracking_callback_thermalmeter(hw); tm_trigger = 0; } } static void _rtl92s_dm_refresh_rateadaptive_mask(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rate_adaptive *ra = &(rtlpriv->ra); u32 low_rssi_thresh = 0; u32 middle_rssi_thresh = 0; u32 high_rssi_thresh = 0; u8 rssi_level; struct ieee80211_sta *sta = NULL; if (is_hal_stop(rtlhal)) return; if (!rtlpriv->dm.useramask) return; if (!rtlpriv->dm.inform_fw_driverctrldm) { rtl92s_phy_set_fw_cmd(hw, FW_CMD_CTRL_DM_BY_DRIVER); rtlpriv->dm.inform_fw_driverctrldm = true; } rcu_read_lock(); if (mac->opmode == NL80211_IFTYPE_STATION) sta = get_sta(hw, mac->vif, mac->bssid); if ((mac->link_state == MAC80211_LINKED) && (mac->opmode == NL80211_IFTYPE_STATION)) { switch (ra->pre_ratr_state) { case DM_RATR_STA_HIGH: high_rssi_thresh = 40; middle_rssi_thresh = 30; low_rssi_thresh = 20; break; case DM_RATR_STA_MIDDLE: high_rssi_thresh = 44; middle_rssi_thresh = 30; low_rssi_thresh = 20; break; case DM_RATR_STA_LOW: high_rssi_thresh = 44; middle_rssi_thresh = 34; low_rssi_thresh = 20; break; case DM_RATR_STA_ULTRALOW: high_rssi_thresh = 44; middle_rssi_thresh = 34; low_rssi_thresh = 24; break; default: high_rssi_thresh = 44; middle_rssi_thresh = 34; low_rssi_thresh = 24; break; } if (rtlpriv->dm.undecorated_smoothed_pwdb > (long)high_rssi_thresh) { ra->ratr_state = DM_RATR_STA_HIGH; rssi_level = 1; } else if (rtlpriv->dm.undecorated_smoothed_pwdb > (long)middle_rssi_thresh) { ra->ratr_state = DM_RATR_STA_LOW; rssi_level = 3; } else if (rtlpriv->dm.undecorated_smoothed_pwdb > (long)low_rssi_thresh) { ra->ratr_state = DM_RATR_STA_LOW; rssi_level = 5; } else { ra->ratr_state = DM_RATR_STA_ULTRALOW; rssi_level = 6; } if (ra->pre_ratr_state != ra->ratr_state) { RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, ("RSSI = %ld " "RSSI_LEVEL = %d PreState = %d, CurState = %d\n", rtlpriv->dm.undecorated_smoothed_pwdb, ra->ratr_state, ra->pre_ratr_state, ra->ratr_state)); rtlpriv->cfg->ops->update_rate_tbl(hw, sta, ra->ratr_state); ra->pre_ratr_state = ra->ratr_state; } } rcu_read_unlock(); } static void _rtl92s_dm_switch_baseband_mrc(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_phy *rtlphy = &(rtlpriv->phy); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); bool current_mrc; bool enable_mrc = true; long tmpentry_maxpwdb = 0; u8 rssi_a = 0; u8 rssi_b = 0; if (is_hal_stop(rtlhal)) return; if ((rtlphy->rf_type == RF_1T1R) || (rtlphy->rf_type == RF_2T2R)) return; rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_MRC, (u8 *)(&current_mrc)); if (mac->link_state >= MAC80211_LINKED) { if (rtlpriv->dm.undecorated_smoothed_pwdb > tmpentry_maxpwdb) { rssi_a = rtlpriv->stats.rx_rssi_percentage[RF90_PATH_A]; rssi_b = rtlpriv->stats.rx_rssi_percentage[RF90_PATH_B]; } } /* MRC settings would NOT affect TP on Wireless B mode. */ if (mac->mode != WIRELESS_MODE_B) { if ((rssi_a == 0) && (rssi_b == 0)) { enable_mrc = true; } else if (rssi_b > 30) { /* Turn on B-Path */ enable_mrc = true; } else if (rssi_b < 5) { /* Turn off B-path */ enable_mrc = false; /* Take care of RSSI differentiation. */ } else if (rssi_a > 15 && (rssi_a >= rssi_b)) { if ((rssi_a - rssi_b) > 15) /* Turn off B-path */ enable_mrc = false; else if ((rssi_a - rssi_b) < 10) /* Turn on B-Path */ enable_mrc = true; else enable_mrc = current_mrc; } else { /* Turn on B-Path */ enable_mrc = true; } } /* Update MRC settings if needed. */ if (enable_mrc != current_mrc) rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_MRC, (u8 *)&enable_mrc); } void rtl92s_dm_init_edca_turbo(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); rtlpriv->dm.current_turbo_edca = false; rtlpriv->dm.is_any_nonbepkts = false; rtlpriv->dm.is_cur_rdlstate = false; } static void _rtl92s_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rate_adaptive *ra = &(rtlpriv->ra); ra->ratr_state = DM_RATR_STA_MAX; ra->pre_ratr_state = DM_RATR_STA_MAX; if (rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER) rtlpriv->dm.useramask = true; else rtlpriv->dm.useramask = false; rtlpriv->dm.useramask = false; rtlpriv->dm.inform_fw_driverctrldm = false; } static void _rtl92s_dm_init_txpowertracking_thermalmeter( struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); rtlpriv->dm.txpower_tracking = true; rtlpriv->dm.txpowercount = 0; rtlpriv->dm.txpower_trackinginit = false; } static void _rtl92s_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt); u32 ret_value; ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER1, MASKDWORD); falsealm_cnt->cnt_parity_fail = ((ret_value & 0xffff0000) >> 16); ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER2, MASKDWORD); falsealm_cnt->cnt_rate_illegal = (ret_value & 0xffff); falsealm_cnt->cnt_crc8_fail = ((ret_value & 0xffff0000) >> 16); ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER3, MASKDWORD); falsealm_cnt->cnt_mcs_fail = (ret_value & 0xffff); falsealm_cnt->cnt_ofdm_fail = falsealm_cnt->cnt_parity_fail + falsealm_cnt->cnt_rate_illegal + falsealm_cnt->cnt_crc8_fail + falsealm_cnt->cnt_mcs_fail; /* read CCK false alarm */ ret_value = rtl_get_bbreg(hw, 0xc64, MASKDWORD); falsealm_cnt->cnt_cck_fail = (ret_value & 0xffff); falsealm_cnt->cnt_all = falsealm_cnt->cnt_ofdm_fail + falsealm_cnt->cnt_cck_fail; } static void rtl92s_backoff_enable_flag(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt); if (falsealm_cnt->cnt_all > digtable.fa_highthresh) { if ((digtable.backoff_val - 6) < digtable.backoffval_range_min) digtable.backoff_val = digtable.backoffval_range_min; else digtable.backoff_val -= 6; } else if (falsealm_cnt->cnt_all < digtable.fa_lowthresh) { if ((digtable.backoff_val + 6) > digtable.backoffval_range_max) digtable.backoff_val = digtable.backoffval_range_max; else digtable.backoff_val += 6; } } static void _rtl92s_dm_initial_gain_sta_beforeconnect(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt); static u8 initialized, force_write; u8 initial_gain = 0; if ((digtable.pre_sta_connectstate == digtable.cur_sta_connectstate) || (digtable.cur_sta_connectstate == DIG_STA_BEFORE_CONNECT)) { if (digtable.cur_sta_connectstate == DIG_STA_BEFORE_CONNECT) { if (rtlpriv->psc.rfpwr_state != ERFON) return; if (digtable.backoff_enable_flag == true) rtl92s_backoff_enable_flag(hw); else digtable.backoff_val = DM_DIG_BACKOFF; if ((digtable.rssi_val + 10 - digtable.backoff_val) > digtable.rx_gain_range_max) digtable.cur_igvalue = digtable.rx_gain_range_max; else if ((digtable.rssi_val + 10 - digtable.backoff_val) < digtable.rx_gain_range_min) digtable.cur_igvalue = digtable.rx_gain_range_min; else digtable.cur_igvalue = digtable.rssi_val + 10 - digtable.backoff_val; if (falsealm_cnt->cnt_all > 10000) digtable.cur_igvalue = (digtable.cur_igvalue > 0x33) ? digtable.cur_igvalue : 0x33; if (falsealm_cnt->cnt_all > 16000) digtable.cur_igvalue = digtable.rx_gain_range_max; /* connected -> connected or disconnected -> disconnected */ } else { /* Firmware control DIG, do nothing in driver dm */ return; } /* disconnected -> connected or connected -> * disconnected or beforeconnect->(dis)connected */ } else { /* Enable FW DIG */ digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX; rtl92s_phy_set_fw_cmd(hw, FW_CMD_DIG_ENABLE); digtable.backoff_val = DM_DIG_BACKOFF; digtable.cur_igvalue = rtlpriv->phy.default_initialgain[0]; digtable.pre_igvalue = 0; return; } /* Forced writing to prevent from fw-dig overwriting. */ if (digtable.pre_igvalue != rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0)) force_write = 1; if ((digtable.pre_igvalue != digtable.cur_igvalue) || !initialized || force_write) { /* Disable FW DIG */ rtl92s_phy_set_fw_cmd(hw, FW_CMD_DIG_DISABLE); initial_gain = (u8)digtable.cur_igvalue; /* Set initial gain. */ rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0, initial_gain); rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, MASKBYTE0, initial_gain); digtable.pre_igvalue = digtable.cur_igvalue; initialized = 1; force_write = 0; } } static void _rtl92s_dm_ctrl_initgain_bytwoport(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); if (rtlpriv->mac80211.act_scanning) return; /* Decide the current status and if modify initial gain or not */ if (rtlpriv->mac80211.link_state >= MAC80211_LINKED || rtlpriv->mac80211.opmode == NL80211_IFTYPE_ADHOC) digtable.cur_sta_connectstate = DIG_STA_CONNECT; else digtable.cur_sta_connectstate = DIG_STA_DISCONNECT; digtable.rssi_val = rtlpriv->dm.undecorated_smoothed_pwdb; /* Change dig mode to rssi */ if (digtable.cur_sta_connectstate != DIG_STA_DISCONNECT) { if (digtable.dig_twoport_algorithm == DIG_TWO_PORT_ALGO_FALSE_ALARM) { digtable.dig_twoport_algorithm = DIG_TWO_PORT_ALGO_RSSI; rtl92s_phy_set_fw_cmd(hw, FW_CMD_DIG_MODE_SS); } } _rtl92s_dm_false_alarm_counter_statistics(hw); _rtl92s_dm_initial_gain_sta_beforeconnect(hw); digtable.pre_sta_connectstate = digtable.cur_sta_connectstate; } static void _rtl92s_dm_ctrl_initgain_byrssi(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); /* 2T2R TP issue */ if (rtlphy->rf_type == RF_2T2R) return; if (!rtlpriv->dm.dm_initialgain_enable) return; if (digtable.dig_enable_flag == false) return; _rtl92s_dm_ctrl_initgain_bytwoport(hw); } static void _rtl92s_dm_dynamic_txpower(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); long undecorated_smoothed_pwdb; long txpwr_threshold_lv1, txpwr_threshold_lv2; /* 2T2R TP issue */ if (rtlphy->rf_type == RF_2T2R) return; if (!rtlpriv->dm.dynamic_txpower_enable || rtlpriv->dm.dm_flag & HAL_DM_HIPWR_DISABLE) { rtlpriv->dm.dynamic_txhighpower_lvl = TX_HIGHPWR_LEVEL_NORMAL; return; } if ((mac->link_state < MAC80211_LINKED) && (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) { RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE, ("Not connected to any\n")); rtlpriv->dm.dynamic_txhighpower_lvl = TX_HIGHPWR_LEVEL_NORMAL; rtlpriv->dm.last_dtp_lvl = TX_HIGHPWR_LEVEL_NORMAL; return; } if (mac->link_state >= MAC80211_LINKED) { if (mac->opmode == NL80211_IFTYPE_ADHOC) { undecorated_smoothed_pwdb = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb; RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, ("AP Client PWDB = 0x%lx\n", undecorated_smoothed_pwdb)); } else { undecorated_smoothed_pwdb = rtlpriv->dm.undecorated_smoothed_pwdb; RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, ("STA Default Port PWDB = 0x%lx\n", undecorated_smoothed_pwdb)); } } else { undecorated_smoothed_pwdb = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb; RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, ("AP Ext Port PWDB = 0x%lx\n", undecorated_smoothed_pwdb)); } txpwr_threshold_lv2 = TX_POWER_NEAR_FIELD_THRESH_LVL2; txpwr_threshold_lv1 = TX_POWER_NEAR_FIELD_THRESH_LVL1; if (rtl_get_bbreg(hw, 0xc90, MASKBYTE0) == 1) rtlpriv->dm.dynamic_txhighpower_lvl = TX_HIGHPWR_LEVEL_NORMAL; else if (undecorated_smoothed_pwdb >= txpwr_threshold_lv2) rtlpriv->dm.dynamic_txhighpower_lvl = TX_HIGHPWR_LEVEL_NORMAL2; else if ((undecorated_smoothed_pwdb < (txpwr_threshold_lv2 - 3)) && (undecorated_smoothed_pwdb >= txpwr_threshold_lv1)) rtlpriv->dm.dynamic_txhighpower_lvl = TX_HIGHPWR_LEVEL_NORMAL1; else if (undecorated_smoothed_pwdb < (txpwr_threshold_lv1 - 3)) rtlpriv->dm.dynamic_txhighpower_lvl = TX_HIGHPWR_LEVEL_NORMAL; if ((rtlpriv->dm.dynamic_txhighpower_lvl != rtlpriv->dm.last_dtp_lvl)) rtl92s_phy_set_txpower(hw, rtlphy->current_channel); rtlpriv->dm.last_dtp_lvl = rtlpriv->dm.dynamic_txhighpower_lvl; } static void _rtl92s_dm_init_dig(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); /* Disable DIG scheme now.*/ digtable.dig_enable_flag = true; digtable.backoff_enable_flag = true; if ((rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER) && (hal_get_firmwareversion(rtlpriv) >= 0x3c)) digtable.dig_algorithm = DIG_ALGO_BY_TOW_PORT; else digtable.dig_algorithm = DIG_ALGO_BEFORE_CONNECT_BY_RSSI_AND_ALARM; digtable.dig_twoport_algorithm = DIG_TWO_PORT_ALGO_RSSI; digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX; /* off=by real rssi value, on=by digtable.rssi_val for new dig */ digtable.dig_dbgmode = DM_DBG_OFF; digtable.dig_slgorithm_switch = 0; /* 2007/10/04 MH Define init gain threshol. */ digtable.dig_state = DM_STA_DIG_MAX; digtable.dig_highpwrstate = DM_STA_DIG_MAX; digtable.cur_sta_connectstate = DIG_STA_DISCONNECT; digtable.pre_sta_connectstate = DIG_STA_DISCONNECT; digtable.cur_ap_connectstate = DIG_AP_DISCONNECT; digtable.pre_ap_connectstate = DIG_AP_DISCONNECT; digtable.rssi_lowthresh = DM_DIG_THRESH_LOW; digtable.rssi_highthresh = DM_DIG_THRESH_HIGH; digtable.fa_lowthresh = DM_FALSEALARM_THRESH_LOW; digtable.fa_highthresh = DM_FALSEALARM_THRESH_HIGH; digtable.rssi_highpower_lowthresh = DM_DIG_HIGH_PWR_THRESH_LOW; digtable.rssi_highpower_highthresh = DM_DIG_HIGH_PWR_THRESH_HIGH; /* for dig debug rssi value */ digtable.rssi_val = 50; digtable.backoff_val = DM_DIG_BACKOFF; digtable.rx_gain_range_max = DM_DIG_MAX; digtable.rx_gain_range_min = DM_DIG_MIN; digtable.backoffval_range_max = DM_DIG_BACKOFF_MAX; digtable.backoffval_range_min = DM_DIG_BACKOFF_MIN; } static void _rtl92s_dm_init_dynamic_txpower(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); if ((hal_get_firmwareversion(rtlpriv) >= 60) && (rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER)) rtlpriv->dm.dynamic_txpower_enable = true; else rtlpriv->dm.dynamic_txpower_enable = false; rtlpriv->dm.last_dtp_lvl = TX_HIGHPWR_LEVEL_NORMAL; rtlpriv->dm.dynamic_txhighpower_lvl = TX_HIGHPWR_LEVEL_NORMAL; } void rtl92s_dm_init(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER; rtlpriv->dm.undecorated_smoothed_pwdb = -1; _rtl92s_dm_init_dynamic_txpower(hw); rtl92s_dm_init_edca_turbo(hw); _rtl92s_dm_init_rate_adaptive_mask(hw); _rtl92s_dm_init_txpowertracking_thermalmeter(hw); _rtl92s_dm_init_dig(hw); rtl_write_dword(rtlpriv, WFM5, FW_CCA_CHK_ENABLE); } void rtl92s_dm_watchdog(struct ieee80211_hw *hw) { _rtl92s_dm_check_edca_turbo(hw); _rtl92s_dm_check_txpowertracking_thermalmeter(hw); _rtl92s_dm_ctrl_initgain_byrssi(hw); _rtl92s_dm_dynamic_txpower(hw); _rtl92s_dm_refresh_rateadaptive_mask(hw); _rtl92s_dm_switch_baseband_mrc(hw); }
gpl-2.0
teamacid/niltmt_kernel
arch/arm/plat-samsung/dev-onenand.c
2704
1063
/* * linux/arch/arm/plat-samsung/dev-onenand.c * * Copyright (c) 2008-2010 Samsung Electronics * Kyungmin Park <kyungmin.park@samsung.com> * * S3C64XX/S5PC100 series device definition for OneNAND devices * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/platform_device.h> #include <mach/irqs.h> #include <mach/map.h> static struct resource s3c_onenand_resources[] = { [0] = { .start = S3C_PA_ONENAND, .end = S3C_PA_ONENAND + 0x400 - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = S3C_PA_ONENAND_BUF, .end = S3C_PA_ONENAND_BUF + S3C_SZ_ONENAND_BUF - 1, .flags = IORESOURCE_MEM, }, [2] = { .start = IRQ_ONENAND, .end = IRQ_ONENAND, .flags = IORESOURCE_IRQ, }, }; struct platform_device s3c_device_onenand = { .name = "samsung-onenand", .id = 0, .num_resources = ARRAY_SIZE(s3c_onenand_resources), .resource = s3c_onenand_resources, };
gpl-2.0
cuteprince/jb_kernel_3.0.16_htc_golfu
drivers/mmc/host/at91_mci.c
2704
31224
/* * linux/drivers/mmc/host/at91_mci.c - ATMEL AT91 MCI Driver * * Copyright (C) 2005 Cougar Creek Computing Devices Ltd, All Rights Reserved * * Copyright (C) 2006 Malcolm Noyes * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ /* This is the AT91 MCI driver that has been tested with both MMC cards and SD-cards. Boards that support write protect are now supported. The CCAT91SBC001 board does not support SD cards. The three entry points are at91_mci_request, at91_mci_set_ios and at91_mci_get_ro. SET IOS This configures the device to put it into the correct mode and clock speed required. MCI REQUEST MCI request processes the commands sent in the mmc_request structure. This can consist of a processing command and a stop command in the case of multiple block transfers. There are three main types of request, commands, reads and writes. Commands are straight forward. The command is submitted to the controller and the request function returns. When the controller generates an interrupt to indicate the command is finished, the response to the command are read and the mmc_request_done function called to end the request. Reads and writes work in a similar manner to normal commands but involve the PDC (DMA) controller to manage the transfers. A read is done from the controller directly to the scatterlist passed in from the request. Due to a bug in the AT91RM9200 controller, when a read is completed, all the words are byte swapped in the scatterlist buffers. AT91SAM926x are not affected by this bug. The sequence of read interrupts is: ENDRX, RXBUFF, CMDRDY A write is slightly different in that the bytes to write are read from the scatterlist into a dma memory buffer (this is in case the source buffer should be read only). The entire write buffer is then done from this single dma memory buffer. The sequence of write interrupts is: ENDTX, TXBUFE, NOTBUSY, CMDRDY GET RO Gets the status of the write protect pin, if available. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/dma-mapping.h> #include <linux/clk.h> #include <linux/atmel_pdc.h> #include <linux/gfp.h> #include <linux/highmem.h> #include <linux/mmc/host.h> #include <linux/mmc/sdio.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/gpio.h> #include <mach/board.h> #include <mach/cpu.h> #include <mach/at91_mci.h> #define DRIVER_NAME "at91_mci" static inline int at91mci_is_mci1rev2xx(void) { return ( cpu_is_at91sam9260() || cpu_is_at91sam9263() || cpu_is_at91cap9() || cpu_is_at91sam9rl() || cpu_is_at91sam9g10() || cpu_is_at91sam9g20() ); } #define FL_SENT_COMMAND (1 << 0) #define FL_SENT_STOP (1 << 1) #define AT91_MCI_ERRORS (AT91_MCI_RINDE | AT91_MCI_RDIRE | AT91_MCI_RCRCE \ | AT91_MCI_RENDE | AT91_MCI_RTOE | AT91_MCI_DCRCE \ | AT91_MCI_DTOE | AT91_MCI_OVRE | AT91_MCI_UNRE) #define at91_mci_read(host, reg) __raw_readl((host)->baseaddr + (reg)) #define at91_mci_write(host, reg, val) __raw_writel((val), (host)->baseaddr + (reg)) #define MCI_BLKSIZE 512 #define MCI_MAXBLKSIZE 4095 #define MCI_BLKATONCE 256 #define MCI_BUFSIZE (MCI_BLKSIZE * MCI_BLKATONCE) /* * Low level type for this driver */ struct at91mci_host { struct mmc_host *mmc; struct mmc_command *cmd; struct mmc_request *request; void __iomem *baseaddr; int irq; struct at91_mmc_data *board; int present; struct clk *mci_clk; /* * Flag indicating when the command has been sent. This is used to * work out whether or not to send the stop */ unsigned int flags; /* flag for current bus settings */ u32 bus_mode; /* DMA buffer used for transmitting */ unsigned int* buffer; dma_addr_t physical_address; unsigned int total_length; /* Latest in the scatterlist that has been enabled for transfer, but not freed */ int in_use_index; /* Latest in the scatterlist that has been enabled for transfer */ int transfer_index; /* Timer for timeouts */ struct timer_list timer; }; /* * Reset the controller and restore most of the state */ static void at91_reset_host(struct at91mci_host *host) { unsigned long flags; u32 mr; u32 sdcr; u32 dtor; u32 imr; local_irq_save(flags); imr = at91_mci_read(host, AT91_MCI_IMR); at91_mci_write(host, AT91_MCI_IDR, 0xffffffff); /* save current state */ mr = at91_mci_read(host, AT91_MCI_MR) & 0x7fff; sdcr = at91_mci_read(host, AT91_MCI_SDCR); dtor = at91_mci_read(host, AT91_MCI_DTOR); /* reset the controller */ at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS | AT91_MCI_SWRST); /* restore state */ at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN); at91_mci_write(host, AT91_MCI_MR, mr); at91_mci_write(host, AT91_MCI_SDCR, sdcr); at91_mci_write(host, AT91_MCI_DTOR, dtor); at91_mci_write(host, AT91_MCI_IER, imr); /* make sure sdio interrupts will fire */ at91_mci_read(host, AT91_MCI_SR); local_irq_restore(flags); } static void at91_timeout_timer(unsigned long data) { struct at91mci_host *host; host = (struct at91mci_host *)data; if (host->request) { dev_err(host->mmc->parent, "Timeout waiting end of packet\n"); if (host->cmd && host->cmd->data) { host->cmd->data->error = -ETIMEDOUT; } else { if (host->cmd) host->cmd->error = -ETIMEDOUT; else host->request->cmd->error = -ETIMEDOUT; } at91_reset_host(host); mmc_request_done(host->mmc, host->request); } } /* * Copy from sg to a dma block - used for transfers */ static inline void at91_mci_sg_to_dma(struct at91mci_host *host, struct mmc_data *data) { unsigned int len, i, size; unsigned *dmabuf = host->buffer; size = data->blksz * data->blocks; len = data->sg_len; /* MCI1 rev2xx Data Write Operation and number of bytes erratum */ if (at91mci_is_mci1rev2xx()) if (host->total_length == 12) memset(dmabuf, 0, 12); /* * Just loop through all entries. Size might not * be the entire list though so make sure that * we do not transfer too much. */ for (i = 0; i < len; i++) { struct scatterlist *sg; int amount; unsigned int *sgbuffer; sg = &data->sg[i]; sgbuffer = kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset; amount = min(size, sg->length); size -= amount; if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */ int index; for (index = 0; index < (amount / 4); index++) *dmabuf++ = swab32(sgbuffer[index]); } else { char *tmpv = (char *)dmabuf; memcpy(tmpv, sgbuffer, amount); tmpv += amount; dmabuf = (unsigned *)tmpv; } kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ); if (size == 0) break; } /* * Check that we didn't get a request to transfer * more data than can fit into the SG list. */ BUG_ON(size != 0); } /* * Handle after a dma read */ static void at91_mci_post_dma_read(struct at91mci_host *host) { struct mmc_command *cmd; struct mmc_data *data; unsigned int len, i, size; unsigned *dmabuf = host->buffer; pr_debug("post dma read\n"); cmd = host->cmd; if (!cmd) { pr_debug("no command\n"); return; } data = cmd->data; if (!data) { pr_debug("no data\n"); return; } size = data->blksz * data->blocks; len = data->sg_len; at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_ENDRX); at91_mci_write(host, AT91_MCI_IER, AT91_MCI_RXBUFF); for (i = 0; i < len; i++) { struct scatterlist *sg; int amount; unsigned int *sgbuffer; sg = &data->sg[i]; sgbuffer = kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset; amount = min(size, sg->length); size -= amount; if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */ int index; for (index = 0; index < (amount / 4); index++) sgbuffer[index] = swab32(*dmabuf++); } else { char *tmpv = (char *)dmabuf; memcpy(sgbuffer, tmpv, amount); tmpv += amount; dmabuf = (unsigned *)tmpv; } flush_kernel_dcache_page(sg_page(sg)); kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ); data->bytes_xfered += amount; if (size == 0) break; } pr_debug("post dma read done\n"); } /* * Handle transmitted data */ static void at91_mci_handle_transmitted(struct at91mci_host *host) { struct mmc_command *cmd; struct mmc_data *data; pr_debug("Handling the transmit\n"); /* Disable the transfer */ at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS); /* Now wait for cmd ready */ at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_TXBUFE); cmd = host->cmd; if (!cmd) return; data = cmd->data; if (!data) return; if (cmd->data->blocks > 1) { pr_debug("multiple write : wait for BLKE...\n"); at91_mci_write(host, AT91_MCI_IER, AT91_MCI_BLKE); } else at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY); } /* * Update bytes tranfered count during a write operation */ static void at91_mci_update_bytes_xfered(struct at91mci_host *host) { struct mmc_data *data; /* always deal with the effective request (and not the current cmd) */ if (host->request->cmd && host->request->cmd->error != 0) return; if (host->request->data) { data = host->request->data; if (data->flags & MMC_DATA_WRITE) { /* card is in IDLE mode now */ pr_debug("-> bytes_xfered %d, total_length = %d\n", data->bytes_xfered, host->total_length); data->bytes_xfered = data->blksz * data->blocks; } } } /*Handle after command sent ready*/ static int at91_mci_handle_cmdrdy(struct at91mci_host *host) { if (!host->cmd) return 1; else if (!host->cmd->data) { if (host->flags & FL_SENT_STOP) { /*After multi block write, we must wait for NOTBUSY*/ at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY); } else return 1; } else if (host->cmd->data->flags & MMC_DATA_WRITE) { /*After sendding multi-block-write command, start DMA transfer*/ at91_mci_write(host, AT91_MCI_IER, AT91_MCI_TXBUFE | AT91_MCI_BLKE); at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN); } /* command not completed, have to wait */ return 0; } /* * Enable the controller */ static void at91_mci_enable(struct at91mci_host *host) { unsigned int mr; at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN); at91_mci_write(host, AT91_MCI_IDR, 0xffffffff); at91_mci_write(host, AT91_MCI_DTOR, AT91_MCI_DTOMUL_1M | AT91_MCI_DTOCYC); mr = AT91_MCI_PDCMODE | 0x34a; if (at91mci_is_mci1rev2xx()) mr |= AT91_MCI_RDPROOF | AT91_MCI_WRPROOF; at91_mci_write(host, AT91_MCI_MR, mr); /* use Slot A or B (only one at same time) */ at91_mci_write(host, AT91_MCI_SDCR, host->board->slot_b); } /* * Disable the controller */ static void at91_mci_disable(struct at91mci_host *host) { at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS | AT91_MCI_SWRST); } /* * Send a command */ static void at91_mci_send_command(struct at91mci_host *host, struct mmc_command *cmd) { unsigned int cmdr, mr; unsigned int block_length; struct mmc_data *data = cmd->data; unsigned int blocks; unsigned int ier = 0; host->cmd = cmd; /* Needed for leaving busy state before CMD1 */ if ((at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_RTOE) && (cmd->opcode == 1)) { pr_debug("Clearing timeout\n"); at91_mci_write(host, AT91_MCI_ARGR, 0); at91_mci_write(host, AT91_MCI_CMDR, AT91_MCI_OPDCMD); while (!(at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_CMDRDY)) { /* spin */ pr_debug("Clearing: SR = %08X\n", at91_mci_read(host, AT91_MCI_SR)); } } cmdr = cmd->opcode; if (mmc_resp_type(cmd) == MMC_RSP_NONE) cmdr |= AT91_MCI_RSPTYP_NONE; else { /* if a response is expected then allow maximum response latancy */ cmdr |= AT91_MCI_MAXLAT; /* set 136 bit response for R2, 48 bit response otherwise */ if (mmc_resp_type(cmd) == MMC_RSP_R2) cmdr |= AT91_MCI_RSPTYP_136; else cmdr |= AT91_MCI_RSPTYP_48; } if (data) { if (cpu_is_at91rm9200() || cpu_is_at91sam9261()) { if (data->blksz & 0x3) { pr_debug("Unsupported block size\n"); cmd->error = -EINVAL; mmc_request_done(host->mmc, host->request); return; } if (data->flags & MMC_DATA_STREAM) { pr_debug("Stream commands not supported\n"); cmd->error = -EINVAL; mmc_request_done(host->mmc, host->request); return; } } block_length = data->blksz; blocks = data->blocks; /* always set data start - also set direction flag for read */ if (data->flags & MMC_DATA_READ) cmdr |= (AT91_MCI_TRDIR | AT91_MCI_TRCMD_START); else if (data->flags & MMC_DATA_WRITE) cmdr |= AT91_MCI_TRCMD_START; if (cmd->opcode == SD_IO_RW_EXTENDED) { cmdr |= AT91_MCI_TRTYP_SDIO_BLOCK; } else { if (data->flags & MMC_DATA_STREAM) cmdr |= AT91_MCI_TRTYP_STREAM; if (data->blocks > 1) cmdr |= AT91_MCI_TRTYP_MULTIPLE; } } else { block_length = 0; blocks = 0; } if (host->flags & FL_SENT_STOP) cmdr |= AT91_MCI_TRCMD_STOP; if (host->bus_mode == MMC_BUSMODE_OPENDRAIN) cmdr |= AT91_MCI_OPDCMD; /* * Set the arguments and send the command */ pr_debug("Sending command %d as %08X, arg = %08X, blocks = %d, length = %d (MR = %08X)\n", cmd->opcode, cmdr, cmd->arg, blocks, block_length, at91_mci_read(host, AT91_MCI_MR)); if (!data) { at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS | ATMEL_PDC_RXTDIS); at91_mci_write(host, ATMEL_PDC_RPR, 0); at91_mci_write(host, ATMEL_PDC_RCR, 0); at91_mci_write(host, ATMEL_PDC_RNPR, 0); at91_mci_write(host, ATMEL_PDC_RNCR, 0); at91_mci_write(host, ATMEL_PDC_TPR, 0); at91_mci_write(host, ATMEL_PDC_TCR, 0); at91_mci_write(host, ATMEL_PDC_TNPR, 0); at91_mci_write(host, ATMEL_PDC_TNCR, 0); ier = AT91_MCI_CMDRDY; } else { /* zero block length and PDC mode */ mr = at91_mci_read(host, AT91_MCI_MR) & 0x5fff; mr |= (data->blksz & 0x3) ? AT91_MCI_PDCFBYTE : 0; mr |= (block_length << 16); mr |= AT91_MCI_PDCMODE; at91_mci_write(host, AT91_MCI_MR, mr); if (!(cpu_is_at91rm9200() || cpu_is_at91sam9261())) at91_mci_write(host, AT91_MCI_BLKR, AT91_MCI_BLKR_BCNT(blocks) | AT91_MCI_BLKR_BLKLEN(block_length)); /* * Disable the PDC controller */ at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS); if (cmdr & AT91_MCI_TRCMD_START) { data->bytes_xfered = 0; host->transfer_index = 0; host->in_use_index = 0; if (cmdr & AT91_MCI_TRDIR) { /* * Handle a read */ host->total_length = 0; at91_mci_write(host, ATMEL_PDC_RPR, host->physical_address); at91_mci_write(host, ATMEL_PDC_RCR, (data->blksz & 0x3) ? (blocks * block_length) : (blocks * block_length) / 4); at91_mci_write(host, ATMEL_PDC_RNPR, 0); at91_mci_write(host, ATMEL_PDC_RNCR, 0); ier = AT91_MCI_ENDRX /* | AT91_MCI_RXBUFF */; } else { /* * Handle a write */ host->total_length = block_length * blocks; /* * MCI1 rev2xx Data Write Operation and * number of bytes erratum */ if (at91mci_is_mci1rev2xx()) if (host->total_length < 12) host->total_length = 12; at91_mci_sg_to_dma(host, data); pr_debug("Transmitting %d bytes\n", host->total_length); at91_mci_write(host, ATMEL_PDC_TPR, host->physical_address); at91_mci_write(host, ATMEL_PDC_TCR, (data->blksz & 0x3) ? host->total_length : host->total_length / 4); ier = AT91_MCI_CMDRDY; } } } /* * Send the command and then enable the PDC - not the other way round as * the data sheet says */ at91_mci_write(host, AT91_MCI_ARGR, cmd->arg); at91_mci_write(host, AT91_MCI_CMDR, cmdr); if (cmdr & AT91_MCI_TRCMD_START) { if (cmdr & AT91_MCI_TRDIR) at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN); } /* Enable selected interrupts */ at91_mci_write(host, AT91_MCI_IER, AT91_MCI_ERRORS | ier); } /* * Process the next step in the request */ static void at91_mci_process_next(struct at91mci_host *host) { if (!(host->flags & FL_SENT_COMMAND)) { host->flags |= FL_SENT_COMMAND; at91_mci_send_command(host, host->request->cmd); } else if ((!(host->flags & FL_SENT_STOP)) && host->request->stop) { host->flags |= FL_SENT_STOP; at91_mci_send_command(host, host->request->stop); } else { del_timer(&host->timer); /* the at91rm9200 mci controller hangs after some transfers, * and the workaround is to reset it after each transfer. */ if (cpu_is_at91rm9200()) at91_reset_host(host); mmc_request_done(host->mmc, host->request); } } /* * Handle a command that has been completed */ static void at91_mci_completed_command(struct at91mci_host *host, unsigned int status) { struct mmc_command *cmd = host->cmd; struct mmc_data *data = cmd->data; at91_mci_write(host, AT91_MCI_IDR, 0xffffffff & ~(AT91_MCI_SDIOIRQA | AT91_MCI_SDIOIRQB)); cmd->resp[0] = at91_mci_read(host, AT91_MCI_RSPR(0)); cmd->resp[1] = at91_mci_read(host, AT91_MCI_RSPR(1)); cmd->resp[2] = at91_mci_read(host, AT91_MCI_RSPR(2)); cmd->resp[3] = at91_mci_read(host, AT91_MCI_RSPR(3)); pr_debug("Status = %08X/%08x [%08X %08X %08X %08X]\n", status, at91_mci_read(host, AT91_MCI_SR), cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]); if (status & AT91_MCI_ERRORS) { if ((status & AT91_MCI_RCRCE) && !(mmc_resp_type(cmd) & MMC_RSP_CRC)) { cmd->error = 0; } else { if (status & (AT91_MCI_DTOE | AT91_MCI_DCRCE)) { if (data) { if (status & AT91_MCI_DTOE) data->error = -ETIMEDOUT; else if (status & AT91_MCI_DCRCE) data->error = -EILSEQ; } } else { if (status & AT91_MCI_RTOE) cmd->error = -ETIMEDOUT; else if (status & AT91_MCI_RCRCE) cmd->error = -EILSEQ; else cmd->error = -EIO; } pr_debug("Error detected and set to %d/%d (cmd = %d, retries = %d)\n", cmd->error, data ? data->error : 0, cmd->opcode, cmd->retries); } } else cmd->error = 0; at91_mci_process_next(host); } /* * Handle an MMC request */ static void at91_mci_request(struct mmc_host *mmc, struct mmc_request *mrq) { struct at91mci_host *host = mmc_priv(mmc); host->request = mrq; host->flags = 0; /* more than 1s timeout needed with slow SD cards */ mod_timer(&host->timer, jiffies + msecs_to_jiffies(2000)); at91_mci_process_next(host); } /* * Set the IOS */ static void at91_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) { int clkdiv; struct at91mci_host *host = mmc_priv(mmc); unsigned long at91_master_clock = clk_get_rate(host->mci_clk); host->bus_mode = ios->bus_mode; if (ios->clock == 0) { /* Disable the MCI controller */ at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS); clkdiv = 0; } else { /* Enable the MCI controller */ at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN); if ((at91_master_clock % (ios->clock * 2)) == 0) clkdiv = ((at91_master_clock / ios->clock) / 2) - 1; else clkdiv = (at91_master_clock / ios->clock) / 2; pr_debug("clkdiv = %d. mcck = %ld\n", clkdiv, at91_master_clock / (2 * (clkdiv + 1))); } if (ios->bus_width == MMC_BUS_WIDTH_4 && host->board->wire4) { pr_debug("MMC: Setting controller bus width to 4\n"); at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) | AT91_MCI_SDCBUS); } else { pr_debug("MMC: Setting controller bus width to 1\n"); at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS); } /* Set the clock divider */ at91_mci_write(host, AT91_MCI_MR, (at91_mci_read(host, AT91_MCI_MR) & ~AT91_MCI_CLKDIV) | clkdiv); /* maybe switch power to the card */ if (host->board->vcc_pin) { switch (ios->power_mode) { case MMC_POWER_OFF: gpio_set_value(host->board->vcc_pin, 0); break; case MMC_POWER_UP: gpio_set_value(host->board->vcc_pin, 1); break; case MMC_POWER_ON: break; default: WARN_ON(1); } } } /* * Handle an interrupt */ static irqreturn_t at91_mci_irq(int irq, void *devid) { struct at91mci_host *host = devid; int completed = 0; unsigned int int_status, int_mask; int_status = at91_mci_read(host, AT91_MCI_SR); int_mask = at91_mci_read(host, AT91_MCI_IMR); pr_debug("MCI irq: status = %08X, %08X, %08X\n", int_status, int_mask, int_status & int_mask); int_status = int_status & int_mask; if (int_status & AT91_MCI_ERRORS) { completed = 1; if (int_status & AT91_MCI_UNRE) pr_debug("MMC: Underrun error\n"); if (int_status & AT91_MCI_OVRE) pr_debug("MMC: Overrun error\n"); if (int_status & AT91_MCI_DTOE) pr_debug("MMC: Data timeout\n"); if (int_status & AT91_MCI_DCRCE) pr_debug("MMC: CRC error in data\n"); if (int_status & AT91_MCI_RTOE) pr_debug("MMC: Response timeout\n"); if (int_status & AT91_MCI_RENDE) pr_debug("MMC: Response end bit error\n"); if (int_status & AT91_MCI_RCRCE) pr_debug("MMC: Response CRC error\n"); if (int_status & AT91_MCI_RDIRE) pr_debug("MMC: Response direction error\n"); if (int_status & AT91_MCI_RINDE) pr_debug("MMC: Response index error\n"); } else { /* Only continue processing if no errors */ if (int_status & AT91_MCI_TXBUFE) { pr_debug("TX buffer empty\n"); at91_mci_handle_transmitted(host); } if (int_status & AT91_MCI_ENDRX) { pr_debug("ENDRX\n"); at91_mci_post_dma_read(host); } if (int_status & AT91_MCI_RXBUFF) { pr_debug("RX buffer full\n"); at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS); at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_RXBUFF | AT91_MCI_ENDRX); completed = 1; } if (int_status & AT91_MCI_ENDTX) pr_debug("Transmit has ended\n"); if (int_status & AT91_MCI_NOTBUSY) { pr_debug("Card is ready\n"); at91_mci_update_bytes_xfered(host); completed = 1; } if (int_status & AT91_MCI_DTIP) pr_debug("Data transfer in progress\n"); if (int_status & AT91_MCI_BLKE) { pr_debug("Block transfer has ended\n"); if (host->request->data && host->request->data->blocks > 1) { /* multi block write : complete multi write * command and send stop */ completed = 1; } else { at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY); } } if (int_status & AT91_MCI_SDIOIRQA) mmc_signal_sdio_irq(host->mmc); if (int_status & AT91_MCI_SDIOIRQB) mmc_signal_sdio_irq(host->mmc); if (int_status & AT91_MCI_TXRDY) pr_debug("Ready to transmit\n"); if (int_status & AT91_MCI_RXRDY) pr_debug("Ready to receive\n"); if (int_status & AT91_MCI_CMDRDY) { pr_debug("Command ready\n"); completed = at91_mci_handle_cmdrdy(host); } } if (completed) { pr_debug("Completed command\n"); at91_mci_write(host, AT91_MCI_IDR, 0xffffffff & ~(AT91_MCI_SDIOIRQA | AT91_MCI_SDIOIRQB)); at91_mci_completed_command(host, int_status); } else at91_mci_write(host, AT91_MCI_IDR, int_status & ~(AT91_MCI_SDIOIRQA | AT91_MCI_SDIOIRQB)); return IRQ_HANDLED; } static irqreturn_t at91_mmc_det_irq(int irq, void *_host) { struct at91mci_host *host = _host; int present = !gpio_get_value(irq_to_gpio(irq)); /* * we expect this irq on both insert and remove, * and use a short delay to debounce. */ if (present != host->present) { host->present = present; pr_debug("%s: card %s\n", mmc_hostname(host->mmc), present ? "insert" : "remove"); if (!present) { pr_debug("****** Resetting SD-card bus width ******\n"); at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS); } /* 0.5s needed because of early card detect switch firing */ mmc_detect_change(host->mmc, msecs_to_jiffies(500)); } return IRQ_HANDLED; } static int at91_mci_get_ro(struct mmc_host *mmc) { struct at91mci_host *host = mmc_priv(mmc); if (host->board->wp_pin) return !!gpio_get_value(host->board->wp_pin); /* * Board doesn't support read only detection; let the mmc core * decide what to do. */ return -ENOSYS; } static void at91_mci_enable_sdio_irq(struct mmc_host *mmc, int enable) { struct at91mci_host *host = mmc_priv(mmc); pr_debug("%s: sdio_irq %c : %s\n", mmc_hostname(host->mmc), host->board->slot_b ? 'B':'A', enable ? "enable" : "disable"); at91_mci_write(host, enable ? AT91_MCI_IER : AT91_MCI_IDR, host->board->slot_b ? AT91_MCI_SDIOIRQB : AT91_MCI_SDIOIRQA); } static const struct mmc_host_ops at91_mci_ops = { .request = at91_mci_request, .set_ios = at91_mci_set_ios, .get_ro = at91_mci_get_ro, .enable_sdio_irq = at91_mci_enable_sdio_irq, }; /* * Probe for the device */ static int __init at91_mci_probe(struct platform_device *pdev) { struct mmc_host *mmc; struct at91mci_host *host; struct resource *res; int ret; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENXIO; if (!request_mem_region(res->start, resource_size(res), DRIVER_NAME)) return -EBUSY; mmc = mmc_alloc_host(sizeof(struct at91mci_host), &pdev->dev); if (!mmc) { ret = -ENOMEM; dev_dbg(&pdev->dev, "couldn't allocate mmc host\n"); goto fail6; } mmc->ops = &at91_mci_ops; mmc->f_min = 375000; mmc->f_max = 25000000; mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; mmc->caps = 0; mmc->max_blk_size = MCI_MAXBLKSIZE; mmc->max_blk_count = MCI_BLKATONCE; mmc->max_req_size = MCI_BUFSIZE; mmc->max_segs = MCI_BLKATONCE; mmc->max_seg_size = MCI_BUFSIZE; host = mmc_priv(mmc); host->mmc = mmc; host->bus_mode = 0; host->board = pdev->dev.platform_data; if (host->board->wire4) { if (at91mci_is_mci1rev2xx()) mmc->caps |= MMC_CAP_4_BIT_DATA; else dev_warn(&pdev->dev, "4 wire bus mode not supported" " - using 1 wire\n"); } host->buffer = dma_alloc_coherent(&pdev->dev, MCI_BUFSIZE, &host->physical_address, GFP_KERNEL); if (!host->buffer) { ret = -ENOMEM; dev_err(&pdev->dev, "Can't allocate transmit buffer\n"); goto fail5; } /* Add SDIO capability when available */ if (at91mci_is_mci1rev2xx()) { /* at91mci MCI1 rev2xx sdio interrupt erratum */ if (host->board->wire4 || !host->board->slot_b) mmc->caps |= MMC_CAP_SDIO_IRQ; } /* * Reserve GPIOs ... board init code makes sure these pins are set * up as GPIOs with the right direction (input, except for vcc) */ if (host->board->det_pin) { ret = gpio_request(host->board->det_pin, "mmc_detect"); if (ret < 0) { dev_dbg(&pdev->dev, "couldn't claim card detect pin\n"); goto fail4b; } } if (host->board->wp_pin) { ret = gpio_request(host->board->wp_pin, "mmc_wp"); if (ret < 0) { dev_dbg(&pdev->dev, "couldn't claim wp sense pin\n"); goto fail4; } } if (host->board->vcc_pin) { ret = gpio_request(host->board->vcc_pin, "mmc_vcc"); if (ret < 0) { dev_dbg(&pdev->dev, "couldn't claim vcc switch pin\n"); goto fail3; } } /* * Get Clock */ host->mci_clk = clk_get(&pdev->dev, "mci_clk"); if (IS_ERR(host->mci_clk)) { ret = -ENODEV; dev_dbg(&pdev->dev, "no mci_clk?\n"); goto fail2; } /* * Map I/O region */ host->baseaddr = ioremap(res->start, resource_size(res)); if (!host->baseaddr) { ret = -ENOMEM; goto fail1; } /* * Reset hardware */ clk_enable(host->mci_clk); /* Enable the peripheral clock */ at91_mci_disable(host); at91_mci_enable(host); /* * Allocate the MCI interrupt */ host->irq = platform_get_irq(pdev, 0); ret = request_irq(host->irq, at91_mci_irq, IRQF_SHARED, mmc_hostname(mmc), host); if (ret) { dev_dbg(&pdev->dev, "request MCI interrupt failed\n"); goto fail0; } setup_timer(&host->timer, at91_timeout_timer, (unsigned long)host); platform_set_drvdata(pdev, mmc); /* * Add host to MMC layer */ if (host->board->det_pin) { host->present = !gpio_get_value(host->board->det_pin); } else host->present = -1; mmc_add_host(mmc); /* * monitor card insertion/removal if we can */ if (host->board->det_pin) { ret = request_irq(gpio_to_irq(host->board->det_pin), at91_mmc_det_irq, 0, mmc_hostname(mmc), host); if (ret) dev_warn(&pdev->dev, "request MMC detect irq failed\n"); else device_init_wakeup(&pdev->dev, 1); } pr_debug("Added MCI driver\n"); return 0; fail0: clk_disable(host->mci_clk); iounmap(host->baseaddr); fail1: clk_put(host->mci_clk); fail2: if (host->board->vcc_pin) gpio_free(host->board->vcc_pin); fail3: if (host->board->wp_pin) gpio_free(host->board->wp_pin); fail4: if (host->board->det_pin) gpio_free(host->board->det_pin); fail4b: if (host->buffer) dma_free_coherent(&pdev->dev, MCI_BUFSIZE, host->buffer, host->physical_address); fail5: mmc_free_host(mmc); fail6: release_mem_region(res->start, resource_size(res)); dev_err(&pdev->dev, "probe failed, err %d\n", ret); return ret; } /* * Remove a device */ static int __exit at91_mci_remove(struct platform_device *pdev) { struct mmc_host *mmc = platform_get_drvdata(pdev); struct at91mci_host *host; struct resource *res; if (!mmc) return -1; host = mmc_priv(mmc); if (host->buffer) dma_free_coherent(&pdev->dev, MCI_BUFSIZE, host->buffer, host->physical_address); if (host->board->det_pin) { if (device_can_wakeup(&pdev->dev)) free_irq(gpio_to_irq(host->board->det_pin), host); device_init_wakeup(&pdev->dev, 0); gpio_free(host->board->det_pin); } at91_mci_disable(host); del_timer_sync(&host->timer); mmc_remove_host(mmc); free_irq(host->irq, host); clk_disable(host->mci_clk); /* Disable the peripheral clock */ clk_put(host->mci_clk); if (host->board->vcc_pin) gpio_free(host->board->vcc_pin); if (host->board->wp_pin) gpio_free(host->board->wp_pin); iounmap(host->baseaddr); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(res->start, resource_size(res)); mmc_free_host(mmc); platform_set_drvdata(pdev, NULL); pr_debug("MCI Removed\n"); return 0; } #ifdef CONFIG_PM static int at91_mci_suspend(struct platform_device *pdev, pm_message_t state) { struct mmc_host *mmc = platform_get_drvdata(pdev); struct at91mci_host *host = mmc_priv(mmc); int ret = 0; if (host->board->det_pin && device_may_wakeup(&pdev->dev)) enable_irq_wake(host->board->det_pin); if (mmc) ret = mmc_suspend_host(mmc); return ret; } static int at91_mci_resume(struct platform_device *pdev) { struct mmc_host *mmc = platform_get_drvdata(pdev); struct at91mci_host *host = mmc_priv(mmc); int ret = 0; if (host->board->det_pin && device_may_wakeup(&pdev->dev)) disable_irq_wake(host->board->det_pin); if (mmc) ret = mmc_resume_host(mmc); return ret; } #else #define at91_mci_suspend NULL #define at91_mci_resume NULL #endif static struct platform_driver at91_mci_driver = { .remove = __exit_p(at91_mci_remove), .suspend = at91_mci_suspend, .resume = at91_mci_resume, .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, }, }; static int __init at91_mci_init(void) { return platform_driver_probe(&at91_mci_driver, at91_mci_probe); } static void __exit at91_mci_exit(void) { platform_driver_unregister(&at91_mci_driver); } module_init(at91_mci_init); module_exit(at91_mci_exit); MODULE_DESCRIPTION("AT91 Multimedia Card Interface driver"); MODULE_AUTHOR("Nick Randell"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:at91_mci");
gpl-2.0
ibrahima/kernel_i9300
net/atm/svc.c
3984
16807
/* net/atm/svc.c - ATM SVC sockets */ /* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */ #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ #include <linux/string.h> #include <linux/net.h> /* struct socket, struct proto_ops */ #include <linux/errno.h> /* error codes */ #include <linux/kernel.h> /* printk */ #include <linux/skbuff.h> #include <linux/wait.h> #include <linux/sched.h> /* jiffies and HZ */ #include <linux/fcntl.h> /* O_NONBLOCK */ #include <linux/init.h> #include <linux/atm.h> /* ATM stuff */ #include <linux/atmsap.h> #include <linux/atmsvc.h> #include <linux/atmdev.h> #include <linux/bitops.h> #include <net/sock.h> /* for sock_no_* */ #include <linux/uaccess.h> #include "resources.h" #include "common.h" /* common for PVCs and SVCs */ #include "signaling.h" #include "addr.h" static int svc_create(struct net *net, struct socket *sock, int protocol, int kern); /* * Note: since all this is still nicely synchronized with the signaling demon, * there's no need to protect sleep loops with clis. If signaling is * moved into the kernel, that would change. */ static int svc_shutdown(struct socket *sock, int how) { return 0; } static void svc_disconnect(struct atm_vcc *vcc) { DEFINE_WAIT(wait); struct sk_buff *skb; struct sock *sk = sk_atm(vcc); pr_debug("%p\n", vcc); if (test_bit(ATM_VF_REGIS, &vcc->flags)) { prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE); sigd_enq(vcc, as_close, NULL, NULL, NULL); while (!test_bit(ATM_VF_RELEASED, &vcc->flags) && sigd) { schedule(); prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE); } finish_wait(sk_sleep(sk), &wait); } /* beware - socket is still in use by atmsigd until the last as_indicate has been answered */ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { atm_return(vcc, skb->truesize); pr_debug("LISTEN REL\n"); sigd_enq2(NULL, as_reject, vcc, NULL, NULL, &vcc->qos, 0); dev_kfree_skb(skb); } clear_bit(ATM_VF_REGIS, &vcc->flags); /* ... may retry later */ } static int svc_release(struct socket *sock) { struct sock *sk = sock->sk; struct atm_vcc *vcc; if (sk) { vcc = ATM_SD(sock); pr_debug("%p\n", vcc); clear_bit(ATM_VF_READY, &vcc->flags); /* * VCC pointer is used as a reference, * so we must not free it (thereby subjecting it to re-use) * before all pending connections are closed */ svc_disconnect(vcc); vcc_release(sock); } return 0; } static int svc_bind(struct socket *sock, struct sockaddr *sockaddr, int sockaddr_len) { DEFINE_WAIT(wait); struct sock *sk = sock->sk; struct sockaddr_atmsvc *addr; struct atm_vcc *vcc; int error; if (sockaddr_len != sizeof(struct sockaddr_atmsvc)) return -EINVAL; lock_sock(sk); if (sock->state == SS_CONNECTED) { error = -EISCONN; goto out; } if (sock->state != SS_UNCONNECTED) { error = -EINVAL; goto out; } vcc = ATM_SD(sock); addr = (struct sockaddr_atmsvc *) sockaddr; if (addr->sas_family != AF_ATMSVC) { error = -EAFNOSUPPORT; goto out; } clear_bit(ATM_VF_BOUND, &vcc->flags); /* failing rebind will kill old binding */ /* @@@ check memory (de)allocation on rebind */ if (!test_bit(ATM_VF_HASQOS, &vcc->flags)) { error = -EBADFD; goto out; } vcc->local = *addr; set_bit(ATM_VF_WAITING, &vcc->flags); prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE); sigd_enq(vcc, as_bind, NULL, NULL, &vcc->local); while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { schedule(); prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE); } finish_wait(sk_sleep(sk), &wait); clear_bit(ATM_VF_REGIS, &vcc->flags); /* doesn't count */ if (!sigd) { error = -EUNATCH; goto out; } if (!sk->sk_err) set_bit(ATM_VF_BOUND, &vcc->flags); error = -sk->sk_err; out: release_sock(sk); return error; } static int svc_connect(struct socket *sock, struct sockaddr *sockaddr, int sockaddr_len, int flags) { DEFINE_WAIT(wait); struct sock *sk = sock->sk; struct sockaddr_atmsvc *addr; struct atm_vcc *vcc = ATM_SD(sock); int error; pr_debug("%p\n", vcc); lock_sock(sk); if (sockaddr_len != sizeof(struct sockaddr_atmsvc)) { error = -EINVAL; goto out; } switch (sock->state) { default: error = -EINVAL; goto out; case SS_CONNECTED: error = -EISCONN; goto out; case SS_CONNECTING: if (test_bit(ATM_VF_WAITING, &vcc->flags)) { error = -EALREADY; goto out; } sock->state = SS_UNCONNECTED; if (sk->sk_err) { error = -sk->sk_err; goto out; } break; case SS_UNCONNECTED: addr = (struct sockaddr_atmsvc *) sockaddr; if (addr->sas_family != AF_ATMSVC) { error = -EAFNOSUPPORT; goto out; } if (!test_bit(ATM_VF_HASQOS, &vcc->flags)) { error = -EBADFD; goto out; } if (vcc->qos.txtp.traffic_class == ATM_ANYCLASS || vcc->qos.rxtp.traffic_class == ATM_ANYCLASS) { error = -EINVAL; goto out; } if (!vcc->qos.txtp.traffic_class && !vcc->qos.rxtp.traffic_class) { error = -EINVAL; goto out; } vcc->remote = *addr; set_bit(ATM_VF_WAITING, &vcc->flags); prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); sigd_enq(vcc, as_connect, NULL, NULL, &vcc->remote); if (flags & O_NONBLOCK) { finish_wait(sk_sleep(sk), &wait); sock->state = SS_CONNECTING; error = -EINPROGRESS; goto out; } error = 0; while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { schedule(); if (!signal_pending(current)) { prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); continue; } pr_debug("*ABORT*\n"); /* * This is tricky: * Kernel ---close--> Demon * Kernel <--close--- Demon * or * Kernel ---close--> Demon * Kernel <--error--- Demon * or * Kernel ---close--> Demon * Kernel <--okay---- Demon * Kernel <--close--- Demon */ sigd_enq(vcc, as_close, NULL, NULL, NULL); while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); schedule(); } if (!sk->sk_err) while (!test_bit(ATM_VF_RELEASED, &vcc->flags) && sigd) { prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); schedule(); } clear_bit(ATM_VF_REGIS, &vcc->flags); clear_bit(ATM_VF_RELEASED, &vcc->flags); clear_bit(ATM_VF_CLOSE, &vcc->flags); /* we're gone now but may connect later */ error = -EINTR; break; } finish_wait(sk_sleep(sk), &wait); if (error) goto out; if (!sigd) { error = -EUNATCH; goto out; } if (sk->sk_err) { error = -sk->sk_err; goto out; } } /* * Not supported yet * * #ifndef CONFIG_SINGLE_SIGITF */ vcc->qos.txtp.max_pcr = SELECT_TOP_PCR(vcc->qos.txtp); vcc->qos.txtp.pcr = 0; vcc->qos.txtp.min_pcr = 0; /* * #endif */ error = vcc_connect(sock, vcc->itf, vcc->vpi, vcc->vci); if (!error) sock->state = SS_CONNECTED; else (void)svc_disconnect(vcc); out: release_sock(sk); return error; } static int svc_listen(struct socket *sock, int backlog) { DEFINE_WAIT(wait); struct sock *sk = sock->sk; struct atm_vcc *vcc = ATM_SD(sock); int error; pr_debug("%p\n", vcc); lock_sock(sk); /* let server handle listen on unbound sockets */ if (test_bit(ATM_VF_SESSION, &vcc->flags)) { error = -EINVAL; goto out; } if (test_bit(ATM_VF_LISTEN, &vcc->flags)) { error = -EADDRINUSE; goto out; } set_bit(ATM_VF_WAITING, &vcc->flags); prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE); sigd_enq(vcc, as_listen, NULL, NULL, &vcc->local); while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { schedule(); prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE); } finish_wait(sk_sleep(sk), &wait); if (!sigd) { error = -EUNATCH; goto out; } set_bit(ATM_VF_LISTEN, &vcc->flags); vcc_insert_socket(sk); sk->sk_max_ack_backlog = backlog > 0 ? backlog : ATM_BACKLOG_DEFAULT; error = -sk->sk_err; out: release_sock(sk); return error; } static int svc_accept(struct socket *sock, struct socket *newsock, int flags) { struct sock *sk = sock->sk; struct sk_buff *skb; struct atmsvc_msg *msg; struct atm_vcc *old_vcc = ATM_SD(sock); struct atm_vcc *new_vcc; int error; lock_sock(sk); error = svc_create(sock_net(sk), newsock, 0, 0); if (error) goto out; new_vcc = ATM_SD(newsock); pr_debug("%p -> %p\n", old_vcc, new_vcc); while (1) { DEFINE_WAIT(wait); prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); while (!(skb = skb_dequeue(&sk->sk_receive_queue)) && sigd) { if (test_bit(ATM_VF_RELEASED, &old_vcc->flags)) break; if (test_bit(ATM_VF_CLOSE, &old_vcc->flags)) { error = -sk->sk_err; break; } if (flags & O_NONBLOCK) { error = -EAGAIN; break; } release_sock(sk); schedule(); lock_sock(sk); if (signal_pending(current)) { error = -ERESTARTSYS; break; } prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); } finish_wait(sk_sleep(sk), &wait); if (error) goto out; if (!skb) { error = -EUNATCH; goto out; } msg = (struct atmsvc_msg *)skb->data; new_vcc->qos = msg->qos; set_bit(ATM_VF_HASQOS, &new_vcc->flags); new_vcc->remote = msg->svc; new_vcc->local = msg->local; new_vcc->sap = msg->sap; error = vcc_connect(newsock, msg->pvc.sap_addr.itf, msg->pvc.sap_addr.vpi, msg->pvc.sap_addr.vci); dev_kfree_skb(skb); sk->sk_ack_backlog--; if (error) { sigd_enq2(NULL, as_reject, old_vcc, NULL, NULL, &old_vcc->qos, error); error = error == -EAGAIN ? -EBUSY : error; goto out; } /* wait should be short, so we ignore the non-blocking flag */ set_bit(ATM_VF_WAITING, &new_vcc->flags); prepare_to_wait(sk_sleep(sk_atm(new_vcc)), &wait, TASK_UNINTERRUPTIBLE); sigd_enq(new_vcc, as_accept, old_vcc, NULL, NULL); while (test_bit(ATM_VF_WAITING, &new_vcc->flags) && sigd) { release_sock(sk); schedule(); lock_sock(sk); prepare_to_wait(sk_sleep(sk_atm(new_vcc)), &wait, TASK_UNINTERRUPTIBLE); } finish_wait(sk_sleep(sk_atm(new_vcc)), &wait); if (!sigd) { error = -EUNATCH; goto out; } if (!sk_atm(new_vcc)->sk_err) break; if (sk_atm(new_vcc)->sk_err != ERESTARTSYS) { error = -sk_atm(new_vcc)->sk_err; goto out; } } newsock->state = SS_CONNECTED; out: release_sock(sk); return error; } static int svc_getname(struct socket *sock, struct sockaddr *sockaddr, int *sockaddr_len, int peer) { struct sockaddr_atmsvc *addr; *sockaddr_len = sizeof(struct sockaddr_atmsvc); addr = (struct sockaddr_atmsvc *) sockaddr; memcpy(addr, peer ? &ATM_SD(sock)->remote : &ATM_SD(sock)->local, sizeof(struct sockaddr_atmsvc)); return 0; } int svc_change_qos(struct atm_vcc *vcc, struct atm_qos *qos) { struct sock *sk = sk_atm(vcc); DEFINE_WAIT(wait); set_bit(ATM_VF_WAITING, &vcc->flags); prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE); sigd_enq2(vcc, as_modify, NULL, NULL, &vcc->local, qos, 0); while (test_bit(ATM_VF_WAITING, &vcc->flags) && !test_bit(ATM_VF_RELEASED, &vcc->flags) && sigd) { schedule(); prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE); } finish_wait(sk_sleep(sk), &wait); if (!sigd) return -EUNATCH; return -sk->sk_err; } static int svc_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; struct atm_vcc *vcc = ATM_SD(sock); int value, error = 0; lock_sock(sk); switch (optname) { case SO_ATMSAP: if (level != SOL_ATM || optlen != sizeof(struct atm_sap)) { error = -EINVAL; goto out; } if (copy_from_user(&vcc->sap, optval, optlen)) { error = -EFAULT; goto out; } set_bit(ATM_VF_HASSAP, &vcc->flags); break; case SO_MULTIPOINT: if (level != SOL_ATM || optlen != sizeof(int)) { error = -EINVAL; goto out; } if (get_user(value, (int __user *)optval)) { error = -EFAULT; goto out; } if (value == 1) set_bit(ATM_VF_SESSION, &vcc->flags); else if (value == 0) clear_bit(ATM_VF_SESSION, &vcc->flags); else error = -EINVAL; break; default: error = vcc_setsockopt(sock, level, optname, optval, optlen); } out: release_sock(sk); return error; } static int svc_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; int error = 0, len; lock_sock(sk); if (!__SO_LEVEL_MATCH(optname, level) || optname != SO_ATMSAP) { error = vcc_getsockopt(sock, level, optname, optval, optlen); goto out; } if (get_user(len, optlen)) { error = -EFAULT; goto out; } if (len != sizeof(struct atm_sap)) { error = -EINVAL; goto out; } if (copy_to_user(optval, &ATM_SD(sock)->sap, sizeof(struct atm_sap))) { error = -EFAULT; goto out; } out: release_sock(sk); return error; } static int svc_addparty(struct socket *sock, struct sockaddr *sockaddr, int sockaddr_len, int flags) { DEFINE_WAIT(wait); struct sock *sk = sock->sk; struct atm_vcc *vcc = ATM_SD(sock); int error; lock_sock(sk); set_bit(ATM_VF_WAITING, &vcc->flags); prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); sigd_enq(vcc, as_addparty, NULL, NULL, (struct sockaddr_atmsvc *) sockaddr); if (flags & O_NONBLOCK) { finish_wait(sk_sleep(sk), &wait); error = -EINPROGRESS; goto out; } pr_debug("added wait queue\n"); while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { schedule(); prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); } finish_wait(sk_sleep(sk), &wait); error = xchg(&sk->sk_err_soft, 0); out: release_sock(sk); return error; } static int svc_dropparty(struct socket *sock, int ep_ref) { DEFINE_WAIT(wait); struct sock *sk = sock->sk; struct atm_vcc *vcc = ATM_SD(sock); int error; lock_sock(sk); set_bit(ATM_VF_WAITING, &vcc->flags); prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); sigd_enq2(vcc, as_dropparty, NULL, NULL, NULL, NULL, ep_ref); while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { schedule(); prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); } finish_wait(sk_sleep(sk), &wait); if (!sigd) { error = -EUNATCH; goto out; } error = xchg(&sk->sk_err_soft, 0); out: release_sock(sk); return error; } static int svc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { int error, ep_ref; struct sockaddr_atmsvc sa; struct atm_vcc *vcc = ATM_SD(sock); switch (cmd) { case ATM_ADDPARTY: if (!test_bit(ATM_VF_SESSION, &vcc->flags)) return -EINVAL; if (copy_from_user(&sa, (void __user *) arg, sizeof(sa))) return -EFAULT; error = svc_addparty(sock, (struct sockaddr *)&sa, sizeof(sa), 0); break; case ATM_DROPPARTY: if (!test_bit(ATM_VF_SESSION, &vcc->flags)) return -EINVAL; if (copy_from_user(&ep_ref, (void __user *) arg, sizeof(int))) return -EFAULT; error = svc_dropparty(sock, ep_ref); break; default: error = vcc_ioctl(sock, cmd, arg); } return error; } #ifdef CONFIG_COMPAT static int svc_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { /* The definition of ATM_ADDPARTY uses the size of struct atm_iobuf. But actually it takes a struct sockaddr_atmsvc, which doesn't need compat handling. So all we have to do is fix up cmd... */ if (cmd == COMPAT_ATM_ADDPARTY) cmd = ATM_ADDPARTY; if (cmd == ATM_ADDPARTY || cmd == ATM_DROPPARTY) return svc_ioctl(sock, cmd, arg); else return vcc_compat_ioctl(sock, cmd, arg); } #endif /* CONFIG_COMPAT */ static const struct proto_ops svc_proto_ops = { .family = PF_ATMSVC, .owner = THIS_MODULE, .release = svc_release, .bind = svc_bind, .connect = svc_connect, .socketpair = sock_no_socketpair, .accept = svc_accept, .getname = svc_getname, .poll = vcc_poll, .ioctl = svc_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = svc_compat_ioctl, #endif .listen = svc_listen, .shutdown = svc_shutdown, .setsockopt = svc_setsockopt, .getsockopt = svc_getsockopt, .sendmsg = vcc_sendmsg, .recvmsg = vcc_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, }; static int svc_create(struct net *net, struct socket *sock, int protocol, int kern) { int error; if (!net_eq(net, &init_net)) return -EAFNOSUPPORT; sock->ops = &svc_proto_ops; error = vcc_create(net, sock, protocol, AF_ATMSVC); if (error) return error; ATM_SD(sock)->local.sas_family = AF_ATMSVC; ATM_SD(sock)->remote.sas_family = AF_ATMSVC; return 0; } static const struct net_proto_family svc_family_ops = { .family = PF_ATMSVC, .create = svc_create, .owner = THIS_MODULE, }; /* * Initialize the ATM SVC protocol family */ int __init atmsvc_init(void) { return sock_register(&svc_family_ops); } void atmsvc_exit(void) { sock_unregister(PF_ATMSVC); }
gpl-2.0
boa19861105/Test-1-dlxp_ul
drivers/dma/mv_xor.c
4496
35674
/* * offload engine driver for the Marvell XOR engine * Copyright (C) 2007, 2008, Marvell International Ltd. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. */ #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/memory.h> #include <plat/mv_xor.h> #include "dmaengine.h" #include "mv_xor.h" static void mv_xor_issue_pending(struct dma_chan *chan); #define to_mv_xor_chan(chan) \ container_of(chan, struct mv_xor_chan, common) #define to_mv_xor_device(dev) \ container_of(dev, struct mv_xor_device, common) #define to_mv_xor_slot(tx) \ container_of(tx, struct mv_xor_desc_slot, async_tx) static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags) { struct mv_xor_desc *hw_desc = desc->hw_desc; hw_desc->status = (1 << 31); hw_desc->phy_next_desc = 0; hw_desc->desc_command = (1 << 31); } static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc) { struct mv_xor_desc *hw_desc = desc->hw_desc; return hw_desc->phy_dest_addr; } static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc, int src_idx) { struct mv_xor_desc *hw_desc = desc->hw_desc; return hw_desc->phy_src_addr[src_idx]; } static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc, u32 byte_count) { struct mv_xor_desc *hw_desc = desc->hw_desc; hw_desc->byte_count = byte_count; } static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc, u32 next_desc_addr) { struct mv_xor_desc *hw_desc = desc->hw_desc; BUG_ON(hw_desc->phy_next_desc); hw_desc->phy_next_desc = next_desc_addr; } static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc) { struct mv_xor_desc *hw_desc = desc->hw_desc; hw_desc->phy_next_desc = 0; } static void mv_desc_set_block_fill_val(struct mv_xor_desc_slot *desc, u32 val) { desc->value = val; } static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc, dma_addr_t addr) { struct mv_xor_desc *hw_desc = desc->hw_desc; hw_desc->phy_dest_addr = addr; } static int mv_chan_memset_slot_count(size_t len) { return 1; } #define mv_chan_memcpy_slot_count(c) mv_chan_memset_slot_count(c) static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc, int index, dma_addr_t addr) { struct mv_xor_desc *hw_desc = desc->hw_desc; hw_desc->phy_src_addr[index] = addr; if (desc->type == DMA_XOR) hw_desc->desc_command |= (1 << index); } static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan) { return __raw_readl(XOR_CURR_DESC(chan)); } static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan, u32 next_desc_addr) { __raw_writel(next_desc_addr, XOR_NEXT_DESC(chan)); } static void mv_chan_set_dest_pointer(struct mv_xor_chan *chan, u32 desc_addr) { __raw_writel(desc_addr, XOR_DEST_POINTER(chan)); } static void mv_chan_set_block_size(struct mv_xor_chan *chan, u32 block_size) { __raw_writel(block_size, XOR_BLOCK_SIZE(chan)); } static void mv_chan_set_value(struct mv_xor_chan *chan, u32 value) { __raw_writel(value, XOR_INIT_VALUE_LOW(chan)); __raw_writel(value, XOR_INIT_VALUE_HIGH(chan)); } static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan) { u32 val = __raw_readl(XOR_INTR_MASK(chan)); val |= XOR_INTR_MASK_VALUE << (chan->idx * 16); __raw_writel(val, XOR_INTR_MASK(chan)); } static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan) { u32 intr_cause = __raw_readl(XOR_INTR_CAUSE(chan)); intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF; return intr_cause; } static int mv_is_err_intr(u32 intr_cause) { if (intr_cause & ((1<<4)|(1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<9))) return 1; return 0; } static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan) { u32 val = ~(1 << (chan->idx * 16)); dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val); __raw_writel(val, XOR_INTR_CAUSE(chan)); } static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan) { u32 val = 0xFFFF0000 >> (chan->idx * 16); __raw_writel(val, XOR_INTR_CAUSE(chan)); } static int mv_can_chain(struct mv_xor_desc_slot *desc) { struct mv_xor_desc_slot *chain_old_tail = list_entry( desc->chain_node.prev, struct mv_xor_desc_slot, chain_node); if (chain_old_tail->type != desc->type) return 0; if (desc->type == DMA_MEMSET) return 0; return 1; } static void mv_set_mode(struct mv_xor_chan *chan, enum dma_transaction_type type) { u32 op_mode; u32 config = __raw_readl(XOR_CONFIG(chan)); switch (type) { case DMA_XOR: op_mode = XOR_OPERATION_MODE_XOR; break; case DMA_MEMCPY: op_mode = XOR_OPERATION_MODE_MEMCPY; break; case DMA_MEMSET: op_mode = XOR_OPERATION_MODE_MEMSET; break; default: dev_printk(KERN_ERR, chan->device->common.dev, "error: unsupported operation %d.\n", type); BUG(); return; } config &= ~0x7; config |= op_mode; __raw_writel(config, XOR_CONFIG(chan)); chan->current_type = type; } static void mv_chan_activate(struct mv_xor_chan *chan) { u32 activation; dev_dbg(chan->device->common.dev, " activate chan.\n"); activation = __raw_readl(XOR_ACTIVATION(chan)); activation |= 0x1; __raw_writel(activation, XOR_ACTIVATION(chan)); } static char mv_chan_is_busy(struct mv_xor_chan *chan) { u32 state = __raw_readl(XOR_ACTIVATION(chan)); state = (state >> 4) & 0x3; return (state == 1) ? 1 : 0; } static int mv_chan_xor_slot_count(size_t len, int src_cnt) { return 1; } /** * mv_xor_free_slots - flags descriptor slots for reuse * @slot: Slot to free * Caller must hold &mv_chan->lock while calling this function */ static void mv_xor_free_slots(struct mv_xor_chan *mv_chan, struct mv_xor_desc_slot *slot) { dev_dbg(mv_chan->device->common.dev, "%s %d slot %p\n", __func__, __LINE__, slot); slot->slots_per_op = 0; } /* * mv_xor_start_new_chain - program the engine to operate on new chain headed by * sw_desc * Caller must hold &mv_chan->lock while calling this function */ static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan, struct mv_xor_desc_slot *sw_desc) { dev_dbg(mv_chan->device->common.dev, "%s %d: sw_desc %p\n", __func__, __LINE__, sw_desc); if (sw_desc->type != mv_chan->current_type) mv_set_mode(mv_chan, sw_desc->type); if (sw_desc->type == DMA_MEMSET) { /* for memset requests we need to program the engine, no * descriptors used. */ struct mv_xor_desc *hw_desc = sw_desc->hw_desc; mv_chan_set_dest_pointer(mv_chan, hw_desc->phy_dest_addr); mv_chan_set_block_size(mv_chan, sw_desc->unmap_len); mv_chan_set_value(mv_chan, sw_desc->value); } else { /* set the hardware chain */ mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys); } mv_chan->pending += sw_desc->slot_cnt; mv_xor_issue_pending(&mv_chan->common); } static dma_cookie_t mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc, struct mv_xor_chan *mv_chan, dma_cookie_t cookie) { BUG_ON(desc->async_tx.cookie < 0); if (desc->async_tx.cookie > 0) { cookie = desc->async_tx.cookie; /* call the callback (must not sleep or submit new * operations to this channel) */ if (desc->async_tx.callback) desc->async_tx.callback( desc->async_tx.callback_param); /* unmap dma addresses * (unmap_single vs unmap_page?) */ if (desc->group_head && desc->unmap_len) { struct mv_xor_desc_slot *unmap = desc->group_head; struct device *dev = &mv_chan->device->pdev->dev; u32 len = unmap->unmap_len; enum dma_ctrl_flags flags = desc->async_tx.flags; u32 src_cnt; dma_addr_t addr; dma_addr_t dest; src_cnt = unmap->unmap_src_cnt; dest = mv_desc_get_dest_addr(unmap); if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { enum dma_data_direction dir; if (src_cnt > 1) /* is xor ? */ dir = DMA_BIDIRECTIONAL; else dir = DMA_FROM_DEVICE; dma_unmap_page(dev, dest, len, dir); } if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { while (src_cnt--) { addr = mv_desc_get_src_addr(unmap, src_cnt); if (addr == dest) continue; dma_unmap_page(dev, addr, len, DMA_TO_DEVICE); } } desc->group_head = NULL; } } /* run dependent operations */ dma_run_dependencies(&desc->async_tx); return cookie; } static int mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan) { struct mv_xor_desc_slot *iter, *_iter; dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__); list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, completed_node) { if (async_tx_test_ack(&iter->async_tx)) { list_del(&iter->completed_node); mv_xor_free_slots(mv_chan, iter); } } return 0; } static int mv_xor_clean_slot(struct mv_xor_desc_slot *desc, struct mv_xor_chan *mv_chan) { dev_dbg(mv_chan->device->common.dev, "%s %d: desc %p flags %d\n", __func__, __LINE__, desc, desc->async_tx.flags); list_del(&desc->chain_node); /* the client is allowed to attach dependent operations * until 'ack' is set */ if (!async_tx_test_ack(&desc->async_tx)) { /* move this slot to the completed_slots */ list_add_tail(&desc->completed_node, &mv_chan->completed_slots); return 0; } mv_xor_free_slots(mv_chan, desc); return 0; } static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan) { struct mv_xor_desc_slot *iter, *_iter; dma_cookie_t cookie = 0; int busy = mv_chan_is_busy(mv_chan); u32 current_desc = mv_chan_get_current_desc(mv_chan); int seen_current = 0; dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__); dev_dbg(mv_chan->device->common.dev, "current_desc %x\n", current_desc); mv_xor_clean_completed_slots(mv_chan); /* free completed slots from the chain starting with * the oldest descriptor */ list_for_each_entry_safe(iter, _iter, &mv_chan->chain, chain_node) { prefetch(_iter); prefetch(&_iter->async_tx); /* do not advance past the current descriptor loaded into the * hardware channel, subsequent descriptors are either in * process or have not been submitted */ if (seen_current) break; /* stop the search if we reach the current descriptor and the * channel is busy */ if (iter->async_tx.phys == current_desc) { seen_current = 1; if (busy) break; } cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie); if (mv_xor_clean_slot(iter, mv_chan)) break; } if ((busy == 0) && !list_empty(&mv_chan->chain)) { struct mv_xor_desc_slot *chain_head; chain_head = list_entry(mv_chan->chain.next, struct mv_xor_desc_slot, chain_node); mv_xor_start_new_chain(mv_chan, chain_head); } if (cookie > 0) mv_chan->common.completed_cookie = cookie; } static void mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan) { spin_lock_bh(&mv_chan->lock); __mv_xor_slot_cleanup(mv_chan); spin_unlock_bh(&mv_chan->lock); } static void mv_xor_tasklet(unsigned long data) { struct mv_xor_chan *chan = (struct mv_xor_chan *) data; mv_xor_slot_cleanup(chan); } static struct mv_xor_desc_slot * mv_xor_alloc_slots(struct mv_xor_chan *mv_chan, int num_slots, int slots_per_op) { struct mv_xor_desc_slot *iter, *_iter, *alloc_start = NULL; LIST_HEAD(chain); int slots_found, retry = 0; /* start search from the last allocated descrtiptor * if a contiguous allocation can not be found start searching * from the beginning of the list */ retry: slots_found = 0; if (retry == 0) iter = mv_chan->last_used; else iter = list_entry(&mv_chan->all_slots, struct mv_xor_desc_slot, slot_node); list_for_each_entry_safe_continue( iter, _iter, &mv_chan->all_slots, slot_node) { prefetch(_iter); prefetch(&_iter->async_tx); if (iter->slots_per_op) { /* give up after finding the first busy slot * on the second pass through the list */ if (retry) break; slots_found = 0; continue; } /* start the allocation if the slot is correctly aligned */ if (!slots_found++) alloc_start = iter; if (slots_found == num_slots) { struct mv_xor_desc_slot *alloc_tail = NULL; struct mv_xor_desc_slot *last_used = NULL; iter = alloc_start; while (num_slots) { int i; /* pre-ack all but the last descriptor */ async_tx_ack(&iter->async_tx); list_add_tail(&iter->chain_node, &chain); alloc_tail = iter; iter->async_tx.cookie = 0; iter->slot_cnt = num_slots; iter->xor_check_result = NULL; for (i = 0; i < slots_per_op; i++) { iter->slots_per_op = slots_per_op - i; last_used = iter; iter = list_entry(iter->slot_node.next, struct mv_xor_desc_slot, slot_node); } num_slots -= slots_per_op; } alloc_tail->group_head = alloc_start; alloc_tail->async_tx.cookie = -EBUSY; list_splice(&chain, &alloc_tail->tx_list); mv_chan->last_used = last_used; mv_desc_clear_next_desc(alloc_start); mv_desc_clear_next_desc(alloc_tail); return alloc_tail; } } if (!retry++) goto retry; /* try to free some slots if the allocation fails */ tasklet_schedule(&mv_chan->irq_tasklet); return NULL; } /************************ DMA engine API functions ****************************/ static dma_cookie_t mv_xor_tx_submit(struct dma_async_tx_descriptor *tx) { struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx); struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan); struct mv_xor_desc_slot *grp_start, *old_chain_tail; dma_cookie_t cookie; int new_hw_chain = 1; dev_dbg(mv_chan->device->common.dev, "%s sw_desc %p: async_tx %p\n", __func__, sw_desc, &sw_desc->async_tx); grp_start = sw_desc->group_head; spin_lock_bh(&mv_chan->lock); cookie = dma_cookie_assign(tx); if (list_empty(&mv_chan->chain)) list_splice_init(&sw_desc->tx_list, &mv_chan->chain); else { new_hw_chain = 0; old_chain_tail = list_entry(mv_chan->chain.prev, struct mv_xor_desc_slot, chain_node); list_splice_init(&grp_start->tx_list, &old_chain_tail->chain_node); if (!mv_can_chain(grp_start)) goto submit_done; dev_dbg(mv_chan->device->common.dev, "Append to last desc %x\n", old_chain_tail->async_tx.phys); /* fix up the hardware chain */ mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys); /* if the channel is not busy */ if (!mv_chan_is_busy(mv_chan)) { u32 current_desc = mv_chan_get_current_desc(mv_chan); /* * and the curren desc is the end of the chain before * the append, then we need to start the channel */ if (current_desc == old_chain_tail->async_tx.phys) new_hw_chain = 1; } } if (new_hw_chain) mv_xor_start_new_chain(mv_chan, grp_start); submit_done: spin_unlock_bh(&mv_chan->lock); return cookie; } /* returns the number of allocated descriptors */ static int mv_xor_alloc_chan_resources(struct dma_chan *chan) { char *hw_desc; int idx; struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); struct mv_xor_desc_slot *slot = NULL; struct mv_xor_platform_data *plat_data = mv_chan->device->pdev->dev.platform_data; int num_descs_in_pool = plat_data->pool_size/MV_XOR_SLOT_SIZE; /* Allocate descriptor slots */ idx = mv_chan->slots_allocated; while (idx < num_descs_in_pool) { slot = kzalloc(sizeof(*slot), GFP_KERNEL); if (!slot) { printk(KERN_INFO "MV XOR Channel only initialized" " %d descriptor slots", idx); break; } hw_desc = (char *) mv_chan->device->dma_desc_pool_virt; slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE]; dma_async_tx_descriptor_init(&slot->async_tx, chan); slot->async_tx.tx_submit = mv_xor_tx_submit; INIT_LIST_HEAD(&slot->chain_node); INIT_LIST_HEAD(&slot->slot_node); INIT_LIST_HEAD(&slot->tx_list); hw_desc = (char *) mv_chan->device->dma_desc_pool; slot->async_tx.phys = (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE]; slot->idx = idx++; spin_lock_bh(&mv_chan->lock); mv_chan->slots_allocated = idx; list_add_tail(&slot->slot_node, &mv_chan->all_slots); spin_unlock_bh(&mv_chan->lock); } if (mv_chan->slots_allocated && !mv_chan->last_used) mv_chan->last_used = list_entry(mv_chan->all_slots.next, struct mv_xor_desc_slot, slot_node); dev_dbg(mv_chan->device->common.dev, "allocated %d descriptor slots last_used: %p\n", mv_chan->slots_allocated, mv_chan->last_used); return mv_chan->slots_allocated ? : -ENOMEM; } static struct dma_async_tx_descriptor * mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, size_t len, unsigned long flags) { struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); struct mv_xor_desc_slot *sw_desc, *grp_start; int slot_cnt; dev_dbg(mv_chan->device->common.dev, "%s dest: %x src %x len: %u flags: %ld\n", __func__, dest, src, len, flags); if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) return NULL; BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); spin_lock_bh(&mv_chan->lock); slot_cnt = mv_chan_memcpy_slot_count(len); sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1); if (sw_desc) { sw_desc->type = DMA_MEMCPY; sw_desc->async_tx.flags = flags; grp_start = sw_desc->group_head; mv_desc_init(grp_start, flags); mv_desc_set_byte_count(grp_start, len); mv_desc_set_dest_addr(sw_desc->group_head, dest); mv_desc_set_src_addr(grp_start, 0, src); sw_desc->unmap_src_cnt = 1; sw_desc->unmap_len = len; } spin_unlock_bh(&mv_chan->lock); dev_dbg(mv_chan->device->common.dev, "%s sw_desc %p async_tx %p\n", __func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0); return sw_desc ? &sw_desc->async_tx : NULL; } static struct dma_async_tx_descriptor * mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value, size_t len, unsigned long flags) { struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); struct mv_xor_desc_slot *sw_desc, *grp_start; int slot_cnt; dev_dbg(mv_chan->device->common.dev, "%s dest: %x len: %u flags: %ld\n", __func__, dest, len, flags); if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) return NULL; BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); spin_lock_bh(&mv_chan->lock); slot_cnt = mv_chan_memset_slot_count(len); sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1); if (sw_desc) { sw_desc->type = DMA_MEMSET; sw_desc->async_tx.flags = flags; grp_start = sw_desc->group_head; mv_desc_init(grp_start, flags); mv_desc_set_byte_count(grp_start, len); mv_desc_set_dest_addr(sw_desc->group_head, dest); mv_desc_set_block_fill_val(grp_start, value); sw_desc->unmap_src_cnt = 1; sw_desc->unmap_len = len; } spin_unlock_bh(&mv_chan->lock); dev_dbg(mv_chan->device->common.dev, "%s sw_desc %p async_tx %p \n", __func__, sw_desc, &sw_desc->async_tx); return sw_desc ? &sw_desc->async_tx : NULL; } static struct dma_async_tx_descriptor * mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt, size_t len, unsigned long flags) { struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); struct mv_xor_desc_slot *sw_desc, *grp_start; int slot_cnt; if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) return NULL; BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); dev_dbg(mv_chan->device->common.dev, "%s src_cnt: %d len: dest %x %u flags: %ld\n", __func__, src_cnt, len, dest, flags); spin_lock_bh(&mv_chan->lock); slot_cnt = mv_chan_xor_slot_count(len, src_cnt); sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1); if (sw_desc) { sw_desc->type = DMA_XOR; sw_desc->async_tx.flags = flags; grp_start = sw_desc->group_head; mv_desc_init(grp_start, flags); /* the byte count field is the same as in memcpy desc*/ mv_desc_set_byte_count(grp_start, len); mv_desc_set_dest_addr(sw_desc->group_head, dest); sw_desc->unmap_src_cnt = src_cnt; sw_desc->unmap_len = len; while (src_cnt--) mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]); } spin_unlock_bh(&mv_chan->lock); dev_dbg(mv_chan->device->common.dev, "%s sw_desc %p async_tx %p \n", __func__, sw_desc, &sw_desc->async_tx); return sw_desc ? &sw_desc->async_tx : NULL; } static void mv_xor_free_chan_resources(struct dma_chan *chan) { struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); struct mv_xor_desc_slot *iter, *_iter; int in_use_descs = 0; mv_xor_slot_cleanup(mv_chan); spin_lock_bh(&mv_chan->lock); list_for_each_entry_safe(iter, _iter, &mv_chan->chain, chain_node) { in_use_descs++; list_del(&iter->chain_node); } list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, completed_node) { in_use_descs++; list_del(&iter->completed_node); } list_for_each_entry_safe_reverse( iter, _iter, &mv_chan->all_slots, slot_node) { list_del(&iter->slot_node); kfree(iter); mv_chan->slots_allocated--; } mv_chan->last_used = NULL; dev_dbg(mv_chan->device->common.dev, "%s slots_allocated %d\n", __func__, mv_chan->slots_allocated); spin_unlock_bh(&mv_chan->lock); if (in_use_descs) dev_err(mv_chan->device->common.dev, "freeing %d in use descriptors!\n", in_use_descs); } /** * mv_xor_status - poll the status of an XOR transaction * @chan: XOR channel handle * @cookie: XOR transaction identifier * @txstate: XOR transactions state holder (or NULL) */ static enum dma_status mv_xor_status(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *txstate) { struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); enum dma_status ret; ret = dma_cookie_status(chan, cookie, txstate); if (ret == DMA_SUCCESS) { mv_xor_clean_completed_slots(mv_chan); return ret; } mv_xor_slot_cleanup(mv_chan); return dma_cookie_status(chan, cookie, txstate); } static void mv_dump_xor_regs(struct mv_xor_chan *chan) { u32 val; val = __raw_readl(XOR_CONFIG(chan)); dev_printk(KERN_ERR, chan->device->common.dev, "config 0x%08x.\n", val); val = __raw_readl(XOR_ACTIVATION(chan)); dev_printk(KERN_ERR, chan->device->common.dev, "activation 0x%08x.\n", val); val = __raw_readl(XOR_INTR_CAUSE(chan)); dev_printk(KERN_ERR, chan->device->common.dev, "intr cause 0x%08x.\n", val); val = __raw_readl(XOR_INTR_MASK(chan)); dev_printk(KERN_ERR, chan->device->common.dev, "intr mask 0x%08x.\n", val); val = __raw_readl(XOR_ERROR_CAUSE(chan)); dev_printk(KERN_ERR, chan->device->common.dev, "error cause 0x%08x.\n", val); val = __raw_readl(XOR_ERROR_ADDR(chan)); dev_printk(KERN_ERR, chan->device->common.dev, "error addr 0x%08x.\n", val); } static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan, u32 intr_cause) { if (intr_cause & (1 << 4)) { dev_dbg(chan->device->common.dev, "ignore this error\n"); return; } dev_printk(KERN_ERR, chan->device->common.dev, "error on chan %d. intr cause 0x%08x.\n", chan->idx, intr_cause); mv_dump_xor_regs(chan); BUG(); } static irqreturn_t mv_xor_interrupt_handler(int irq, void *data) { struct mv_xor_chan *chan = data; u32 intr_cause = mv_chan_get_intr_cause(chan); dev_dbg(chan->device->common.dev, "intr cause %x\n", intr_cause); if (mv_is_err_intr(intr_cause)) mv_xor_err_interrupt_handler(chan, intr_cause); tasklet_schedule(&chan->irq_tasklet); mv_xor_device_clear_eoc_cause(chan); return IRQ_HANDLED; } static void mv_xor_issue_pending(struct dma_chan *chan) { struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); if (mv_chan->pending >= MV_XOR_THRESHOLD) { mv_chan->pending = 0; mv_chan_activate(mv_chan); } } /* * Perform a transaction to verify the HW works. */ #define MV_XOR_TEST_SIZE 2000 static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device) { int i; void *src, *dest; dma_addr_t src_dma, dest_dma; struct dma_chan *dma_chan; dma_cookie_t cookie; struct dma_async_tx_descriptor *tx; int err = 0; struct mv_xor_chan *mv_chan; src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); if (!src) return -ENOMEM; dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); if (!dest) { kfree(src); return -ENOMEM; } /* Fill in src buffer */ for (i = 0; i < MV_XOR_TEST_SIZE; i++) ((u8 *) src)[i] = (u8)i; /* Start copy, using first DMA channel */ dma_chan = container_of(device->common.channels.next, struct dma_chan, device_node); if (mv_xor_alloc_chan_resources(dma_chan) < 1) { err = -ENODEV; goto out; } dest_dma = dma_map_single(dma_chan->device->dev, dest, MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); src_dma = dma_map_single(dma_chan->device->dev, src, MV_XOR_TEST_SIZE, DMA_TO_DEVICE); tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma, MV_XOR_TEST_SIZE, 0); cookie = mv_xor_tx_submit(tx); mv_xor_issue_pending(dma_chan); async_tx_ack(tx); msleep(1); if (mv_xor_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { dev_printk(KERN_ERR, dma_chan->device->dev, "Self-test copy timed out, disabling\n"); err = -ENODEV; goto free_resources; } mv_chan = to_mv_xor_chan(dma_chan); dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma, MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); if (memcmp(src, dest, MV_XOR_TEST_SIZE)) { dev_printk(KERN_ERR, dma_chan->device->dev, "Self-test copy failed compare, disabling\n"); err = -ENODEV; goto free_resources; } free_resources: mv_xor_free_chan_resources(dma_chan); out: kfree(src); kfree(dest); return err; } #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */ static int __devinit mv_xor_xor_self_test(struct mv_xor_device *device) { int i, src_idx; struct page *dest; struct page *xor_srcs[MV_XOR_NUM_SRC_TEST]; dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST]; dma_addr_t dest_dma; struct dma_async_tx_descriptor *tx; struct dma_chan *dma_chan; dma_cookie_t cookie; u8 cmp_byte = 0; u32 cmp_word; int err = 0; struct mv_xor_chan *mv_chan; for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { xor_srcs[src_idx] = alloc_page(GFP_KERNEL); if (!xor_srcs[src_idx]) { while (src_idx--) __free_page(xor_srcs[src_idx]); return -ENOMEM; } } dest = alloc_page(GFP_KERNEL); if (!dest) { while (src_idx--) __free_page(xor_srcs[src_idx]); return -ENOMEM; } /* Fill in src buffers */ for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { u8 *ptr = page_address(xor_srcs[src_idx]); for (i = 0; i < PAGE_SIZE; i++) ptr[i] = (1 << src_idx); } for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) cmp_byte ^= (u8) (1 << src_idx); cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | (cmp_byte << 8) | cmp_byte; memset(page_address(dest), 0, PAGE_SIZE); dma_chan = container_of(device->common.channels.next, struct dma_chan, device_node); if (mv_xor_alloc_chan_resources(dma_chan) < 1) { err = -ENODEV; goto out; } /* test xor */ dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE); for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++) dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], 0, PAGE_SIZE, DMA_TO_DEVICE); tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs, MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0); cookie = mv_xor_tx_submit(tx); mv_xor_issue_pending(dma_chan); async_tx_ack(tx); msleep(8); if (mv_xor_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { dev_printk(KERN_ERR, dma_chan->device->dev, "Self-test xor timed out, disabling\n"); err = -ENODEV; goto free_resources; } mv_chan = to_mv_xor_chan(dma_chan); dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { u32 *ptr = page_address(dest); if (ptr[i] != cmp_word) { dev_printk(KERN_ERR, dma_chan->device->dev, "Self-test xor failed compare, disabling." " index %d, data %x, expected %x\n", i, ptr[i], cmp_word); err = -ENODEV; goto free_resources; } } free_resources: mv_xor_free_chan_resources(dma_chan); out: src_idx = MV_XOR_NUM_SRC_TEST; while (src_idx--) __free_page(xor_srcs[src_idx]); __free_page(dest); return err; } static int __devexit mv_xor_remove(struct platform_device *dev) { struct mv_xor_device *device = platform_get_drvdata(dev); struct dma_chan *chan, *_chan; struct mv_xor_chan *mv_chan; struct mv_xor_platform_data *plat_data = dev->dev.platform_data; dma_async_device_unregister(&device->common); dma_free_coherent(&dev->dev, plat_data->pool_size, device->dma_desc_pool_virt, device->dma_desc_pool); list_for_each_entry_safe(chan, _chan, &device->common.channels, device_node) { mv_chan = to_mv_xor_chan(chan); list_del(&chan->device_node); } return 0; } static int __devinit mv_xor_probe(struct platform_device *pdev) { int ret = 0; int irq; struct mv_xor_device *adev; struct mv_xor_chan *mv_chan; struct dma_device *dma_dev; struct mv_xor_platform_data *plat_data = pdev->dev.platform_data; adev = devm_kzalloc(&pdev->dev, sizeof(*adev), GFP_KERNEL); if (!adev) return -ENOMEM; dma_dev = &adev->common; /* allocate coherent memory for hardware descriptors * note: writecombine gives slightly better performance, but * requires that we explicitly flush the writes */ adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev, plat_data->pool_size, &adev->dma_desc_pool, GFP_KERNEL); if (!adev->dma_desc_pool_virt) return -ENOMEM; adev->id = plat_data->hw_id; /* discover transaction capabilites from the platform data */ dma_dev->cap_mask = plat_data->cap_mask; adev->pdev = pdev; platform_set_drvdata(pdev, adev); adev->shared = platform_get_drvdata(plat_data->shared); INIT_LIST_HEAD(&dma_dev->channels); /* set base routines */ dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources; dma_dev->device_free_chan_resources = mv_xor_free_chan_resources; dma_dev->device_tx_status = mv_xor_status; dma_dev->device_issue_pending = mv_xor_issue_pending; dma_dev->dev = &pdev->dev; /* set prep routines based on capability */ if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy; if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) dma_dev->device_prep_dma_memset = mv_xor_prep_dma_memset; if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { dma_dev->max_xor = 8; dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor; } mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL); if (!mv_chan) { ret = -ENOMEM; goto err_free_dma; } mv_chan->device = adev; mv_chan->idx = plat_data->hw_id; mv_chan->mmr_base = adev->shared->xor_base; if (!mv_chan->mmr_base) { ret = -ENOMEM; goto err_free_dma; } tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long) mv_chan); /* clear errors before enabling interrupts */ mv_xor_device_clear_err_status(mv_chan); irq = platform_get_irq(pdev, 0); if (irq < 0) { ret = irq; goto err_free_dma; } ret = devm_request_irq(&pdev->dev, irq, mv_xor_interrupt_handler, 0, dev_name(&pdev->dev), mv_chan); if (ret) goto err_free_dma; mv_chan_unmask_interrupts(mv_chan); mv_set_mode(mv_chan, DMA_MEMCPY); spin_lock_init(&mv_chan->lock); INIT_LIST_HEAD(&mv_chan->chain); INIT_LIST_HEAD(&mv_chan->completed_slots); INIT_LIST_HEAD(&mv_chan->all_slots); mv_chan->common.device = dma_dev; dma_cookie_init(&mv_chan->common); list_add_tail(&mv_chan->common.device_node, &dma_dev->channels); if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { ret = mv_xor_memcpy_self_test(adev); dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret); if (ret) goto err_free_dma; } if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { ret = mv_xor_xor_self_test(adev); dev_dbg(&pdev->dev, "xor self test returned %d\n", ret); if (ret) goto err_free_dma; } dev_printk(KERN_INFO, &pdev->dev, "Marvell XOR: " "( %s%s%s%s)\n", dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "", dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); dma_async_device_register(dma_dev); goto out; err_free_dma: dma_free_coherent(&adev->pdev->dev, plat_data->pool_size, adev->dma_desc_pool_virt, adev->dma_desc_pool); out: return ret; } static void mv_xor_conf_mbus_windows(struct mv_xor_shared_private *msp, const struct mbus_dram_target_info *dram) { void __iomem *base = msp->xor_base; u32 win_enable = 0; int i; for (i = 0; i < 8; i++) { writel(0, base + WINDOW_BASE(i)); writel(0, base + WINDOW_SIZE(i)); if (i < 4) writel(0, base + WINDOW_REMAP_HIGH(i)); } for (i = 0; i < dram->num_cs; i++) { const struct mbus_dram_window *cs = dram->cs + i; writel((cs->base & 0xffff0000) | (cs->mbus_attr << 8) | dram->mbus_dram_target_id, base + WINDOW_BASE(i)); writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i)); win_enable |= (1 << i); win_enable |= 3 << (16 + (2 * i)); } writel(win_enable, base + WINDOW_BAR_ENABLE(0)); writel(win_enable, base + WINDOW_BAR_ENABLE(1)); } static struct platform_driver mv_xor_driver = { .probe = mv_xor_probe, .remove = __devexit_p(mv_xor_remove), .driver = { .owner = THIS_MODULE, .name = MV_XOR_NAME, }, }; static int mv_xor_shared_probe(struct platform_device *pdev) { const struct mbus_dram_target_info *dram; struct mv_xor_shared_private *msp; struct resource *res; dev_printk(KERN_NOTICE, &pdev->dev, "Marvell shared XOR driver\n"); msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL); if (!msp) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENODEV; msp->xor_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!msp->xor_base) return -EBUSY; res = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (!res) return -ENODEV; msp->xor_high_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!msp->xor_high_base) return -EBUSY; platform_set_drvdata(pdev, msp); /* * (Re-)program MBUS remapping windows if we are asked to. */ dram = mv_mbus_dram_info(); if (dram) mv_xor_conf_mbus_windows(msp, dram); return 0; } static int mv_xor_shared_remove(struct platform_device *pdev) { return 0; } static struct platform_driver mv_xor_shared_driver = { .probe = mv_xor_shared_probe, .remove = mv_xor_shared_remove, .driver = { .owner = THIS_MODULE, .name = MV_XOR_SHARED_NAME, }, }; static int __init mv_xor_init(void) { int rc; rc = platform_driver_register(&mv_xor_shared_driver); if (!rc) { rc = platform_driver_register(&mv_xor_driver); if (rc) platform_driver_unregister(&mv_xor_shared_driver); } return rc; } module_init(mv_xor_init); /* it's currently unsafe to unload this module */ #if 0 static void __exit mv_xor_exit(void) { platform_driver_unregister(&mv_xor_driver); platform_driver_unregister(&mv_xor_shared_driver); return; } module_exit(mv_xor_exit); #endif MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>"); MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine"); MODULE_LICENSE("GPL");
gpl-2.0
percy-g2/bbbandroid-kernel
drivers/hid/hid-saitek.c
5008
1785
/* * HID driver for Saitek devices, currently only the PS1000 (USB gamepad). * Fixes the HID report descriptor by removing a non-existent axis and * clearing the constant bit on the input reports for buttons and d-pad. * (This module is based on "hid-ortek".) * * Copyright (c) 2012 Andreas Hübner */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. */ #include <linux/device.h> #include <linux/hid.h> #include <linux/module.h> #include <linux/kernel.h> #include "hid-ids.h" static __u8 *saitek_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { if (*rsize == 137 && rdesc[20] == 0x09 && rdesc[21] == 0x33 && rdesc[94] == 0x81 && rdesc[95] == 0x03 && rdesc[110] == 0x81 && rdesc[111] == 0x03) { hid_info(hdev, "Fixing up Saitek PS1000 report descriptor\n"); /* convert spurious axis to a "noop" Logical Minimum (0) */ rdesc[20] = 0x15; rdesc[21] = 0x00; /* clear constant bit on buttons and d-pad */ rdesc[95] = 0x02; rdesc[111] = 0x02; } return rdesc; } static const struct hid_device_id saitek_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_PS1000)}, { } }; MODULE_DEVICE_TABLE(hid, saitek_devices); static struct hid_driver saitek_driver = { .name = "saitek", .id_table = saitek_devices, .report_fixup = saitek_report_fixup }; static int __init saitek_init(void) { return hid_register_driver(&saitek_driver); } static void __exit saitek_exit(void) { hid_unregister_driver(&saitek_driver); } module_init(saitek_init); module_exit(saitek_exit); MODULE_LICENSE("GPL");
gpl-2.0
jlopex/kernel_lge_mako
drivers/char/bsr.c
5520
9022
/* IBM POWER Barrier Synchronization Register Driver * * Copyright IBM Corporation 2008 * * Author: Sonny Rao <sonnyrao@us.ibm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/of_platform.h> #include <linux/fs.h> #include <linux/module.h> #include <linux/cdev.h> #include <linux/list.h> #include <linux/mm.h> #include <linux/slab.h> #include <asm/pgtable.h> #include <asm/io.h> /* This driver exposes a special register which can be used for fast synchronization across a large SMP machine. The hardware is exposed as an array of bytes where each process will write to one of the bytes to indicate it has finished the current stage and this update is broadcast to all processors without having to bounce a cacheline between them. In POWER5 and POWER6 there is one of these registers per SMP, but it is presented in two forms; first, it is given as a whole and then as a number of smaller registers which alias to parts of the single whole register. This can potentially allow multiple groups of processes to each have their own private synchronization device. Note that this hardware *must* be written to using *only* single byte writes. It may be read using 1, 2, 4, or 8 byte loads which must be aligned since this region is treated as cache-inhibited processes should also use a full sync before and after writing to the BSR to ensure all stores and the BSR update have made it to all chips in the system */ /* This is arbitrary number, up to Power6 it's been 17 or fewer */ #define BSR_MAX_DEVS (32) struct bsr_dev { u64 bsr_addr; /* Real address */ u64 bsr_len; /* length of mem region we can map */ unsigned bsr_bytes; /* size of the BSR reg itself */ unsigned bsr_stride; /* interval at which BSR repeats in the page */ unsigned bsr_type; /* maps to enum below */ unsigned bsr_num; /* bsr id number for its type */ int bsr_minor; struct list_head bsr_list; dev_t bsr_dev; struct cdev bsr_cdev; struct device *bsr_device; char bsr_name[32]; }; static unsigned total_bsr_devs; static struct list_head bsr_devs = LIST_HEAD_INIT(bsr_devs); static struct class *bsr_class; static int bsr_major; enum { BSR_8 = 0, BSR_16 = 1, BSR_64 = 2, BSR_128 = 3, BSR_4096 = 4, BSR_UNKNOWN = 5, BSR_MAX = 6, }; static unsigned bsr_types[BSR_MAX]; static ssize_t bsr_size_show(struct device *dev, struct device_attribute *attr, char *buf) { struct bsr_dev *bsr_dev = dev_get_drvdata(dev); return sprintf(buf, "%u\n", bsr_dev->bsr_bytes); } static ssize_t bsr_stride_show(struct device *dev, struct device_attribute *attr, char *buf) { struct bsr_dev *bsr_dev = dev_get_drvdata(dev); return sprintf(buf, "%u\n", bsr_dev->bsr_stride); } static ssize_t bsr_len_show(struct device *dev, struct device_attribute *attr, char *buf) { struct bsr_dev *bsr_dev = dev_get_drvdata(dev); return sprintf(buf, "%llu\n", bsr_dev->bsr_len); } static struct device_attribute bsr_dev_attrs[] = { __ATTR(bsr_size, S_IRUGO, bsr_size_show, NULL), __ATTR(bsr_stride, S_IRUGO, bsr_stride_show, NULL), __ATTR(bsr_length, S_IRUGO, bsr_len_show, NULL), __ATTR_NULL }; static int bsr_mmap(struct file *filp, struct vm_area_struct *vma) { unsigned long size = vma->vm_end - vma->vm_start; struct bsr_dev *dev = filp->private_data; int ret; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); /* check for the case of a small BSR device and map one 4k page for it*/ if (dev->bsr_len < PAGE_SIZE && size == PAGE_SIZE) ret = remap_4k_pfn(vma, vma->vm_start, dev->bsr_addr >> 12, vma->vm_page_prot); else if (size <= dev->bsr_len) ret = io_remap_pfn_range(vma, vma->vm_start, dev->bsr_addr >> PAGE_SHIFT, size, vma->vm_page_prot); else return -EINVAL; if (ret) return -EAGAIN; return 0; } static int bsr_open(struct inode * inode, struct file * filp) { struct cdev *cdev = inode->i_cdev; struct bsr_dev *dev = container_of(cdev, struct bsr_dev, bsr_cdev); filp->private_data = dev; return 0; } static const struct file_operations bsr_fops = { .owner = THIS_MODULE, .mmap = bsr_mmap, .open = bsr_open, .llseek = noop_llseek, }; static void bsr_cleanup_devs(void) { struct bsr_dev *cur, *n; list_for_each_entry_safe(cur, n, &bsr_devs, bsr_list) { if (cur->bsr_device) { cdev_del(&cur->bsr_cdev); device_del(cur->bsr_device); } list_del(&cur->bsr_list); kfree(cur); } } static int bsr_add_node(struct device_node *bn) { int bsr_stride_len, bsr_bytes_len, num_bsr_devs; const u32 *bsr_stride; const u32 *bsr_bytes; unsigned i; int ret = -ENODEV; bsr_stride = of_get_property(bn, "ibm,lock-stride", &bsr_stride_len); bsr_bytes = of_get_property(bn, "ibm,#lock-bytes", &bsr_bytes_len); if (!bsr_stride || !bsr_bytes || (bsr_stride_len != bsr_bytes_len)) { printk(KERN_ERR "bsr of-node has missing/incorrect property\n"); return ret; } num_bsr_devs = bsr_bytes_len / sizeof(u32); for (i = 0 ; i < num_bsr_devs; i++) { struct bsr_dev *cur = kzalloc(sizeof(struct bsr_dev), GFP_KERNEL); struct resource res; int result; if (!cur) { printk(KERN_ERR "Unable to alloc bsr dev\n"); ret = -ENOMEM; goto out_err; } result = of_address_to_resource(bn, i, &res); if (result < 0) { printk(KERN_ERR "bsr of-node has invalid reg property, skipping\n"); kfree(cur); continue; } cur->bsr_minor = i + total_bsr_devs; cur->bsr_addr = res.start; cur->bsr_len = resource_size(&res); cur->bsr_bytes = bsr_bytes[i]; cur->bsr_stride = bsr_stride[i]; cur->bsr_dev = MKDEV(bsr_major, i + total_bsr_devs); /* if we have a bsr_len of > 4k and less then PAGE_SIZE (64k pages) */ /* we can only map 4k of it, so only advertise the 4k in sysfs */ if (cur->bsr_len > 4096 && cur->bsr_len < PAGE_SIZE) cur->bsr_len = 4096; switch(cur->bsr_bytes) { case 8: cur->bsr_type = BSR_8; break; case 16: cur->bsr_type = BSR_16; break; case 64: cur->bsr_type = BSR_64; break; case 128: cur->bsr_type = BSR_128; break; case 4096: cur->bsr_type = BSR_4096; break; default: cur->bsr_type = BSR_UNKNOWN; } cur->bsr_num = bsr_types[cur->bsr_type]; snprintf(cur->bsr_name, 32, "bsr%d_%d", cur->bsr_bytes, cur->bsr_num); cdev_init(&cur->bsr_cdev, &bsr_fops); result = cdev_add(&cur->bsr_cdev, cur->bsr_dev, 1); if (result) { kfree(cur); goto out_err; } cur->bsr_device = device_create(bsr_class, NULL, cur->bsr_dev, cur, cur->bsr_name); if (IS_ERR(cur->bsr_device)) { printk(KERN_ERR "device_create failed for %s\n", cur->bsr_name); cdev_del(&cur->bsr_cdev); kfree(cur); goto out_err; } bsr_types[cur->bsr_type] = cur->bsr_num + 1; list_add_tail(&cur->bsr_list, &bsr_devs); } total_bsr_devs += num_bsr_devs; return 0; out_err: bsr_cleanup_devs(); return ret; } static int bsr_create_devs(struct device_node *bn) { int ret; while (bn) { ret = bsr_add_node(bn); if (ret) { of_node_put(bn); return ret; } bn = of_find_compatible_node(bn, NULL, "ibm,bsr"); } return 0; } static int __init bsr_init(void) { struct device_node *np; dev_t bsr_dev; int ret = -ENODEV; int result; np = of_find_compatible_node(NULL, NULL, "ibm,bsr"); if (!np) goto out_err; bsr_class = class_create(THIS_MODULE, "bsr"); if (IS_ERR(bsr_class)) { printk(KERN_ERR "class_create() failed for bsr_class\n"); goto out_err_1; } bsr_class->dev_attrs = bsr_dev_attrs; result = alloc_chrdev_region(&bsr_dev, 0, BSR_MAX_DEVS, "bsr"); bsr_major = MAJOR(bsr_dev); if (result < 0) { printk(KERN_ERR "alloc_chrdev_region() failed for bsr\n"); goto out_err_2; } if ((ret = bsr_create_devs(np)) < 0) { np = NULL; goto out_err_3; } return 0; out_err_3: unregister_chrdev_region(bsr_dev, BSR_MAX_DEVS); out_err_2: class_destroy(bsr_class); out_err_1: of_node_put(np); out_err: return ret; } static void __exit bsr_exit(void) { bsr_cleanup_devs(); if (bsr_class) class_destroy(bsr_class); if (bsr_major) unregister_chrdev_region(MKDEV(bsr_major, 0), BSR_MAX_DEVS); } module_init(bsr_init); module_exit(bsr_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Sonny Rao <sonnyrao@us.ibm.com>");
gpl-2.0
Alucard24/Dorimanx-SG2-I9100-Kernel
drivers/staging/tidspbridge/pmgr/io.c
7824
2376
/* * io.c * * DSP-BIOS Bridge driver support functions for TI OMAP processors. * * IO manager interface: Manages IO between CHNL and msg_ctrl. * * Copyright (C) 2005-2006 Texas Instruments, Inc. * * This package is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ #include <linux/types.h> /* ----------------------------------- Host OS */ #include <dspbridge/host_os.h> /* ----------------------------------- DSP/BIOS Bridge */ #include <dspbridge/dbdefs.h> /* ----------------------------------- Platform Manager */ #include <dspbridge/dev.h> /* ----------------------------------- This */ #include <ioobj.h> #include <dspbridge/io.h> /* * ======== io_create ======== * Purpose: * Create an IO manager object, responsible for managing IO between * CHNL and msg_ctrl */ int io_create(struct io_mgr **io_man, struct dev_object *hdev_obj, const struct io_attrs *mgr_attrts) { struct bridge_drv_interface *intf_fxns; struct io_mgr *hio_mgr = NULL; struct io_mgr_ *pio_mgr = NULL; int status = 0; *io_man = NULL; /* A memory base of 0 implies no memory base: */ if ((mgr_attrts->shm_base != 0) && (mgr_attrts->sm_length == 0)) status = -EINVAL; if (mgr_attrts->word_size == 0) status = -EINVAL; if (!status) { dev_get_intf_fxns(hdev_obj, &intf_fxns); /* Let Bridge channel module finish the create: */ status = (*intf_fxns->io_create) (&hio_mgr, hdev_obj, mgr_attrts); if (!status) { pio_mgr = (struct io_mgr_ *)hio_mgr; pio_mgr->intf_fxns = intf_fxns; pio_mgr->dev_obj = hdev_obj; /* Return the new channel manager handle: */ *io_man = hio_mgr; } } return status; } /* * ======== io_destroy ======== * Purpose: * Delete IO manager. */ int io_destroy(struct io_mgr *hio_mgr) { struct bridge_drv_interface *intf_fxns; struct io_mgr_ *pio_mgr = (struct io_mgr_ *)hio_mgr; int status; intf_fxns = pio_mgr->intf_fxns; /* Let Bridge channel module destroy the io_mgr: */ status = (*intf_fxns->io_destroy) (hio_mgr); return status; }
gpl-2.0
jamieg71/kernel
arch/arm/plat-spear/shirq.c
7824
3326
/* * arch/arm/plat-spear/shirq.c * * SPEAr platform shared irq layer source file * * Copyright (C) 2009 ST Microelectronics * Viresh Kumar<viresh.kumar@st.com> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/err.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/spinlock.h> #include <plat/shirq.h> struct spear_shirq *shirq; static DEFINE_SPINLOCK(lock); static void shirq_irq_mask(struct irq_data *d) { struct spear_shirq *shirq = irq_data_get_irq_chip_data(d); u32 val, id = d->irq - shirq->dev_config[0].virq; unsigned long flags; if ((shirq->regs.enb_reg == -1) || shirq->dev_config[id].enb_mask == -1) return; spin_lock_irqsave(&lock, flags); val = readl(shirq->regs.base + shirq->regs.enb_reg); if (shirq->regs.reset_to_enb) val |= shirq->dev_config[id].enb_mask; else val &= ~(shirq->dev_config[id].enb_mask); writel(val, shirq->regs.base + shirq->regs.enb_reg); spin_unlock_irqrestore(&lock, flags); } static void shirq_irq_unmask(struct irq_data *d) { struct spear_shirq *shirq = irq_data_get_irq_chip_data(d); u32 val, id = d->irq - shirq->dev_config[0].virq; unsigned long flags; if ((shirq->regs.enb_reg == -1) || shirq->dev_config[id].enb_mask == -1) return; spin_lock_irqsave(&lock, flags); val = readl(shirq->regs.base + shirq->regs.enb_reg); if (shirq->regs.reset_to_enb) val &= ~(shirq->dev_config[id].enb_mask); else val |= shirq->dev_config[id].enb_mask; writel(val, shirq->regs.base + shirq->regs.enb_reg); spin_unlock_irqrestore(&lock, flags); } static struct irq_chip shirq_chip = { .name = "spear_shirq", .irq_ack = shirq_irq_mask, .irq_mask = shirq_irq_mask, .irq_unmask = shirq_irq_unmask, }; static void shirq_handler(unsigned irq, struct irq_desc *desc) { u32 i, val, mask; struct spear_shirq *shirq = irq_get_handler_data(irq); desc->irq_data.chip->irq_ack(&desc->irq_data); while ((val = readl(shirq->regs.base + shirq->regs.status_reg) & shirq->regs.status_reg_mask)) { for (i = 0; (i < shirq->dev_count) && val; i++) { if (!(shirq->dev_config[i].status_mask & val)) continue; generic_handle_irq(shirq->dev_config[i].virq); /* clear interrupt */ val &= ~shirq->dev_config[i].status_mask; if ((shirq->regs.clear_reg == -1) || shirq->dev_config[i].clear_mask == -1) continue; mask = readl(shirq->regs.base + shirq->regs.clear_reg); if (shirq->regs.reset_to_clear) mask &= ~shirq->dev_config[i].clear_mask; else mask |= shirq->dev_config[i].clear_mask; writel(mask, shirq->regs.base + shirq->regs.clear_reg); } } desc->irq_data.chip->irq_unmask(&desc->irq_data); } int spear_shirq_register(struct spear_shirq *shirq) { int i; if (!shirq || !shirq->dev_config || !shirq->regs.base) return -EFAULT; if (!shirq->dev_count) return -EINVAL; irq_set_chained_handler(shirq->irq, shirq_handler); for (i = 0; i < shirq->dev_count; i++) { irq_set_chip_and_handler(shirq->dev_config[i].virq, &shirq_chip, handle_simple_irq); set_irq_flags(shirq->dev_config[i].virq, IRQF_VALID); irq_set_chip_data(shirq->dev_config[i].virq, shirq); } irq_set_handler_data(shirq->irq, shirq); return 0; }
gpl-2.0
allenway/PIS-kernel
net/ieee802154/netlink.c
9104
3030
/* * Netlink inteface for IEEE 802.15.4 stack * * Copyright 2007, 2008 Siemens AG * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * Written by: * Sergey Lapin <slapin@ossfans.org> * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> * Maxim Osipov <maxim.osipov@siemens.com> */ #include <linux/kernel.h> #include <linux/gfp.h> #include <net/genetlink.h> #include <linux/nl802154.h> #include "ieee802154.h" static unsigned int ieee802154_seq_num; static DEFINE_SPINLOCK(ieee802154_seq_lock); struct genl_family nl802154_family = { .id = GENL_ID_GENERATE, .hdrsize = 0, .name = IEEE802154_NL_NAME, .version = 1, .maxattr = IEEE802154_ATTR_MAX, }; /* Requests to userspace */ struct sk_buff *ieee802154_nl_create(int flags, u8 req) { void *hdr; struct sk_buff *msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); unsigned long f; if (!msg) return NULL; spin_lock_irqsave(&ieee802154_seq_lock, f); hdr = genlmsg_put(msg, 0, ieee802154_seq_num++, &nl802154_family, flags, req); spin_unlock_irqrestore(&ieee802154_seq_lock, f); if (!hdr) { nlmsg_free(msg); return NULL; } return msg; } int ieee802154_nl_mcast(struct sk_buff *msg, unsigned int group) { /* XXX: nlh is right at the start of msg */ void *hdr = genlmsg_data(NLMSG_DATA(msg->data)); if (genlmsg_end(msg, hdr) < 0) goto out; return genlmsg_multicast(msg, 0, group, GFP_ATOMIC); out: nlmsg_free(msg); return -ENOBUFS; } struct sk_buff *ieee802154_nl_new_reply(struct genl_info *info, int flags, u8 req) { void *hdr; struct sk_buff *msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); if (!msg) return NULL; hdr = genlmsg_put_reply(msg, info, &nl802154_family, flags, req); if (!hdr) { nlmsg_free(msg); return NULL; } return msg; } int ieee802154_nl_reply(struct sk_buff *msg, struct genl_info *info) { /* XXX: nlh is right at the start of msg */ void *hdr = genlmsg_data(NLMSG_DATA(msg->data)); if (genlmsg_end(msg, hdr) < 0) goto out; return genlmsg_reply(msg, info); out: nlmsg_free(msg); return -ENOBUFS; } int __init ieee802154_nl_init(void) { int rc; rc = genl_register_family(&nl802154_family); if (rc) goto fail; rc = nl802154_mac_register(); if (rc) goto fail; rc = nl802154_phy_register(); if (rc) goto fail; return 0; fail: genl_unregister_family(&nl802154_family); return rc; } void __exit ieee802154_nl_exit(void) { genl_unregister_family(&nl802154_family); }
gpl-2.0
Bdaman80/BDA-Iconia
tools/perf/util/event.c
145
21002
#include <linux/types.h> #include "event.h" #include "debug.h" #include "session.h" #include "sort.h" #include "string.h" #include "strlist.h" #include "thread.h" const char *event__name[] = { [0] = "TOTAL", [PERF_RECORD_MMAP] = "MMAP", [PERF_RECORD_LOST] = "LOST", [PERF_RECORD_COMM] = "COMM", [PERF_RECORD_EXIT] = "EXIT", [PERF_RECORD_THROTTLE] = "THROTTLE", [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE", [PERF_RECORD_FORK] = "FORK", [PERF_RECORD_READ] = "READ", [PERF_RECORD_SAMPLE] = "SAMPLE", [PERF_RECORD_HEADER_ATTR] = "ATTR", [PERF_RECORD_HEADER_EVENT_TYPE] = "EVENT_TYPE", [PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA", [PERF_RECORD_HEADER_BUILD_ID] = "BUILD_ID", }; static pid_t event__synthesize_comm(pid_t pid, int full, event__handler_t process, struct perf_session *session) { event_t ev; char filename[PATH_MAX]; char bf[BUFSIZ]; FILE *fp; size_t size = 0; DIR *tasks; struct dirent dirent, *next; pid_t tgid = 0; snprintf(filename, sizeof(filename), "/proc/%d/status", pid); fp = fopen(filename, "r"); if (fp == NULL) { out_race: /* * We raced with a task exiting - just return: */ pr_debug("couldn't open %s\n", filename); return 0; } memset(&ev.comm, 0, sizeof(ev.comm)); while (!ev.comm.comm[0] || !ev.comm.pid) { if (fgets(bf, sizeof(bf), fp) == NULL) goto out_failure; if (memcmp(bf, "Name:", 5) == 0) { char *name = bf + 5; while (*name && isspace(*name)) ++name; size = strlen(name) - 1; memcpy(ev.comm.comm, name, size++); } else if (memcmp(bf, "Tgid:", 5) == 0) { char *tgids = bf + 5; while (*tgids && isspace(*tgids)) ++tgids; tgid = ev.comm.pid = atoi(tgids); } } ev.comm.header.type = PERF_RECORD_COMM; size = ALIGN(size, sizeof(u64)); ev.comm.header.size = sizeof(ev.comm) - (sizeof(ev.comm.comm) - size); if (!full) { ev.comm.tid = pid; process(&ev, session); goto out_fclose; } snprintf(filename, sizeof(filename), "/proc/%d/task", pid); tasks = opendir(filename); if (tasks == NULL) goto out_race; while (!readdir_r(tasks, &dirent, &next) && next) { char *end; pid = strtol(dirent.d_name, &end, 10); if (*end) continue; ev.comm.tid = pid; process(&ev, session); } closedir(tasks); out_fclose: fclose(fp); return tgid; out_failure: pr_warning("couldn't get COMM and pgid, malformed %s\n", filename); return -1; } static int event__synthesize_mmap_events(pid_t pid, pid_t tgid, event__handler_t process, struct perf_session *session) { char filename[PATH_MAX]; FILE *fp; snprintf(filename, sizeof(filename), "/proc/%d/maps", pid); fp = fopen(filename, "r"); if (fp == NULL) { /* * We raced with a task exiting - just return: */ pr_debug("couldn't open %s\n", filename); return -1; } while (1) { char bf[BUFSIZ], *pbf = bf; event_t ev = { .header = { .type = PERF_RECORD_MMAP, /* * Just like the kernel, see __perf_event_mmap * in kernel/perf_event.c */ .misc = PERF_RECORD_MISC_USER, }, }; int n; size_t size; if (fgets(bf, sizeof(bf), fp) == NULL) break; /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */ n = hex2u64(pbf, &ev.mmap.start); if (n < 0) continue; pbf += n + 1; n = hex2u64(pbf, &ev.mmap.len); if (n < 0) continue; pbf += n + 3; if (*pbf == 'x') { /* vm_exec */ char *execname = strchr(bf, '/'); /* Catch VDSO */ if (execname == NULL) execname = strstr(bf, "[vdso]"); if (execname == NULL) continue; pbf += 3; n = hex2u64(pbf, &ev.mmap.pgoff); size = strlen(execname); execname[size - 1] = '\0'; /* Remove \n */ memcpy(ev.mmap.filename, execname, size); size = ALIGN(size, sizeof(u64)); ev.mmap.len -= ev.mmap.start; ev.mmap.header.size = (sizeof(ev.mmap) - (sizeof(ev.mmap.filename) - size)); ev.mmap.pid = tgid; ev.mmap.tid = pid; process(&ev, session); } } fclose(fp); return 0; } int event__synthesize_modules(event__handler_t process, struct perf_session *session, struct machine *machine) { struct rb_node *nd; struct map_groups *kmaps = &machine->kmaps; u16 misc; /* * kernel uses 0 for user space maps, see kernel/perf_event.c * __perf_event_mmap */ if (machine__is_host(machine)) misc = PERF_RECORD_MISC_KERNEL; else misc = PERF_RECORD_MISC_GUEST_KERNEL; for (nd = rb_first(&kmaps->maps[MAP__FUNCTION]); nd; nd = rb_next(nd)) { event_t ev; size_t size; struct map *pos = rb_entry(nd, struct map, rb_node); if (pos->dso->kernel) continue; size = ALIGN(pos->dso->long_name_len + 1, sizeof(u64)); memset(&ev, 0, sizeof(ev)); ev.mmap.header.misc = misc; ev.mmap.header.type = PERF_RECORD_MMAP; ev.mmap.header.size = (sizeof(ev.mmap) - (sizeof(ev.mmap.filename) - size)); ev.mmap.start = pos->start; ev.mmap.len = pos->end - pos->start; ev.mmap.pid = machine->pid; memcpy(ev.mmap.filename, pos->dso->long_name, pos->dso->long_name_len + 1); process(&ev, session); } return 0; } int event__synthesize_thread(pid_t pid, event__handler_t process, struct perf_session *session) { pid_t tgid = event__synthesize_comm(pid, 1, process, session); if (tgid == -1) return -1; return event__synthesize_mmap_events(pid, tgid, process, session); } void event__synthesize_threads(event__handler_t process, struct perf_session *session) { DIR *proc; struct dirent dirent, *next; proc = opendir("/proc"); while (!readdir_r(proc, &dirent, &next) && next) { char *end; pid_t pid = strtol(dirent.d_name, &end, 10); if (*end) /* only interested in proper numerical dirents */ continue; event__synthesize_thread(pid, process, session); } closedir(proc); } struct process_symbol_args { const char *name; u64 start; }; static int find_symbol_cb(void *arg, const char *name, char type, u64 start) { struct process_symbol_args *args = arg; /* * Must be a function or at least an alias, as in PARISC64, where "_text" is * an 'A' to the same address as "_stext". */ if (!(symbol_type__is_a(type, MAP__FUNCTION) || type == 'A') || strcmp(name, args->name)) return 0; args->start = start; return 1; } int event__synthesize_kernel_mmap(event__handler_t process, struct perf_session *session, struct machine *machine, const char *symbol_name) { size_t size; const char *filename, *mmap_name; char path[PATH_MAX]; char name_buff[PATH_MAX]; struct map *map; event_t ev = { .header = { .type = PERF_RECORD_MMAP, }, }; /* * We should get this from /sys/kernel/sections/.text, but till that is * available use this, and after it is use this as a fallback for older * kernels. */ struct process_symbol_args args = { .name = symbol_name, }; mmap_name = machine__mmap_name(machine, name_buff, sizeof(name_buff)); if (machine__is_host(machine)) { /* * kernel uses PERF_RECORD_MISC_USER for user space maps, * see kernel/perf_event.c __perf_event_mmap */ ev.header.misc = PERF_RECORD_MISC_KERNEL; filename = "/proc/kallsyms"; } else { ev.header.misc = PERF_RECORD_MISC_GUEST_KERNEL; if (machine__is_default_guest(machine)) filename = (char *) symbol_conf.default_guest_kallsyms; else { sprintf(path, "%s/proc/kallsyms", machine->root_dir); filename = path; } } if (kallsyms__parse(filename, &args, find_symbol_cb) <= 0) return -ENOENT; map = machine->vmlinux_maps[MAP__FUNCTION]; size = snprintf(ev.mmap.filename, sizeof(ev.mmap.filename), "%s%s", mmap_name, symbol_name) + 1; size = ALIGN(size, sizeof(u64)); ev.mmap.header.size = (sizeof(ev.mmap) - (sizeof(ev.mmap.filename) - size)); ev.mmap.pgoff = args.start; ev.mmap.start = map->start; ev.mmap.len = map->end - ev.mmap.start; ev.mmap.pid = machine->pid; return process(&ev, session); } static void thread__comm_adjust(struct thread *self, struct hists *hists) { char *comm = self->comm; if (!symbol_conf.col_width_list_str && !symbol_conf.field_sep && (!symbol_conf.comm_list || strlist__has_entry(symbol_conf.comm_list, comm))) { u16 slen = strlen(comm); if (hists__new_col_len(hists, HISTC_COMM, slen)) hists__set_col_len(hists, HISTC_THREAD, slen + 6); } } static int thread__set_comm_adjust(struct thread *self, const char *comm, struct hists *hists) { int ret = thread__set_comm(self, comm); if (ret) return ret; thread__comm_adjust(self, hists); return 0; } int event__process_comm(event_t *self, struct perf_session *session) { struct thread *thread = perf_session__findnew(session, self->comm.tid); dump_printf(": %s:%d\n", self->comm.comm, self->comm.tid); if (thread == NULL || thread__set_comm_adjust(thread, self->comm.comm, &session->hists)) { dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n"); return -1; } return 0; } int event__process_lost(event_t *self, struct perf_session *session) { dump_printf(": id:%Ld: lost:%Ld\n", self->lost.id, self->lost.lost); session->hists.stats.total_lost += self->lost.lost; return 0; } static void event_set_kernel_mmap_len(struct map **maps, event_t *self) { maps[MAP__FUNCTION]->start = self->mmap.start; maps[MAP__FUNCTION]->end = self->mmap.start + self->mmap.len; /* * Be a bit paranoid here, some perf.data file came with * a zero sized synthesized MMAP event for the kernel. */ if (maps[MAP__FUNCTION]->end == 0) maps[MAP__FUNCTION]->end = ~0UL; } static int event__process_kernel_mmap(event_t *self, struct perf_session *session) { struct map *map; char kmmap_prefix[PATH_MAX]; struct machine *machine; enum dso_kernel_type kernel_type; bool is_kernel_mmap; machine = perf_session__findnew_machine(session, self->mmap.pid); if (!machine) { pr_err("Can't find id %d's machine\n", self->mmap.pid); goto out_problem; } machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix)); if (machine__is_host(machine)) kernel_type = DSO_TYPE_KERNEL; else kernel_type = DSO_TYPE_GUEST_KERNEL; is_kernel_mmap = memcmp(self->mmap.filename, kmmap_prefix, strlen(kmmap_prefix)) == 0; if (self->mmap.filename[0] == '/' || (!is_kernel_mmap && self->mmap.filename[0] == '[')) { char short_module_name[1024]; char *name, *dot; if (self->mmap.filename[0] == '/') { name = strrchr(self->mmap.filename, '/'); if (name == NULL) goto out_problem; ++name; /* skip / */ dot = strrchr(name, '.'); if (dot == NULL) goto out_problem; snprintf(short_module_name, sizeof(short_module_name), "[%.*s]", (int)(dot - name), name); strxfrchar(short_module_name, '-', '_'); } else strcpy(short_module_name, self->mmap.filename); map = machine__new_module(machine, self->mmap.start, self->mmap.filename); if (map == NULL) goto out_problem; name = strdup(short_module_name); if (name == NULL) goto out_problem; map->dso->short_name = name; map->dso->sname_alloc = 1; map->end = map->start + self->mmap.len; } else if (is_kernel_mmap) { const char *symbol_name = (self->mmap.filename + strlen(kmmap_prefix)); /* * Should be there already, from the build-id table in * the header. */ struct dso *kernel = __dsos__findnew(&machine->kernel_dsos, kmmap_prefix); if (kernel == NULL) goto out_problem; kernel->kernel = kernel_type; if (__machine__create_kernel_maps(machine, kernel) < 0) goto out_problem; event_set_kernel_mmap_len(machine->vmlinux_maps, self); perf_session__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, symbol_name, self->mmap.pgoff); if (machine__is_default_guest(machine)) { /* * preload dso of guest kernel and modules */ dso__load(kernel, machine->vmlinux_maps[MAP__FUNCTION], NULL); } } return 0; out_problem: return -1; } int event__process_mmap(event_t *self, struct perf_session *session) { struct machine *machine; struct thread *thread; struct map *map; u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; int ret = 0; dump_printf(" %d/%d: [%#Lx(%#Lx) @ %#Lx]: %s\n", self->mmap.pid, self->mmap.tid, self->mmap.start, self->mmap.len, self->mmap.pgoff, self->mmap.filename); if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL || cpumode == PERF_RECORD_MISC_KERNEL) { ret = event__process_kernel_mmap(self, session); if (ret < 0) goto out_problem; return 0; } machine = perf_session__find_host_machine(session); if (machine == NULL) goto out_problem; thread = perf_session__findnew(session, self->mmap.pid); if (thread == NULL) goto out_problem; map = map__new(&machine->user_dsos, self->mmap.start, self->mmap.len, self->mmap.pgoff, self->mmap.pid, self->mmap.filename, MAP__FUNCTION); if (map == NULL) goto out_problem; thread__insert_map(thread, map); return 0; out_problem: dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n"); return 0; } int event__process_task(event_t *self, struct perf_session *session) { struct thread *thread = perf_session__findnew(session, self->fork.tid); struct thread *parent = perf_session__findnew(session, self->fork.ptid); dump_printf("(%d:%d):(%d:%d)\n", self->fork.pid, self->fork.tid, self->fork.ppid, self->fork.ptid); if (self->header.type == PERF_RECORD_EXIT) { perf_session__remove_thread(session, thread); return 0; } if (thread == NULL || parent == NULL || thread__fork(thread, parent) < 0) { dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n"); return -1; } return 0; } int event__process(event_t *event, struct perf_session *session) { switch (event->header.type) { case PERF_RECORD_COMM: event__process_comm(event, session); break; case PERF_RECORD_MMAP: event__process_mmap(event, session); break; case PERF_RECORD_FORK: case PERF_RECORD_EXIT: event__process_task(event, session); break; default: break; } return 0; } void thread__find_addr_map(struct thread *self, struct perf_session *session, u8 cpumode, enum map_type type, pid_t pid, u64 addr, struct addr_location *al) { struct map_groups *mg = &self->mg; struct machine *machine = NULL; al->thread = self; al->addr = addr; al->cpumode = cpumode; al->filtered = false; if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) { al->level = 'k'; machine = perf_session__find_host_machine(session); if (machine == NULL) { al->map = NULL; return; } mg = &machine->kmaps; } else if (cpumode == PERF_RECORD_MISC_USER && perf_host) { al->level = '.'; machine = perf_session__find_host_machine(session); } else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) { al->level = 'g'; machine = perf_session__find_machine(session, pid); if (machine == NULL) { al->map = NULL; return; } mg = &machine->kmaps; } else { /* * 'u' means guest os user space. * TODO: We don't support guest user space. Might support late. */ if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest) al->level = 'u'; else al->level = 'H'; al->map = NULL; if ((cpumode == PERF_RECORD_MISC_GUEST_USER || cpumode == PERF_RECORD_MISC_GUEST_KERNEL) && !perf_guest) al->filtered = true; if ((cpumode == PERF_RECORD_MISC_USER || cpumode == PERF_RECORD_MISC_KERNEL) && !perf_host) al->filtered = true; return; } try_again: al->map = map_groups__find(mg, type, al->addr); if (al->map == NULL) { /* * If this is outside of all known maps, and is a negative * address, try to look it up in the kernel dso, as it might be * a vsyscall or vdso (which executes in user-mode). * * XXX This is nasty, we should have a symbol list in the * "[vdso]" dso, but for now lets use the old trick of looking * in the whole kernel symbol list. */ if ((long long)al->addr < 0 && cpumode == PERF_RECORD_MISC_KERNEL && machine && mg != &machine->kmaps) { mg = &machine->kmaps; goto try_again; } } else al->addr = al->map->map_ip(al->map, al->addr); } void thread__find_addr_location(struct thread *self, struct perf_session *session, u8 cpumode, enum map_type type, pid_t pid, u64 addr, struct addr_location *al, symbol_filter_t filter) { thread__find_addr_map(self, session, cpumode, type, pid, addr, al); if (al->map != NULL) al->sym = map__find_symbol(al->map, al->addr, filter); else al->sym = NULL; } static void dso__calc_col_width(struct dso *self, struct hists *hists) { if (!symbol_conf.col_width_list_str && !symbol_conf.field_sep && (!symbol_conf.dso_list || strlist__has_entry(symbol_conf.dso_list, self->name))) { u16 slen = dso__name_len(self); hists__new_col_len(hists, HISTC_DSO, slen); } self->slen_calculated = 1; } int event__preprocess_sample(const event_t *self, struct perf_session *session, struct addr_location *al, struct sample_data *data, symbol_filter_t filter) { u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; struct thread *thread; event__parse_sample(self, session->sample_type, data); dump_printf("(IP, %d): %d/%d: %#Lx period: %Ld cpu:%d\n", self->header.misc, data->pid, data->tid, data->ip, data->period, data->cpu); if (session->sample_type & PERF_SAMPLE_CALLCHAIN) { unsigned int i; dump_printf("... chain: nr:%Lu\n", data->callchain->nr); if (!ip_callchain__valid(data->callchain, self)) { pr_debug("call-chain problem with event, " "skipping it.\n"); goto out_filtered; } if (dump_trace) { for (i = 0; i < data->callchain->nr; i++) dump_printf("..... %2d: %016Lx\n", i, data->callchain->ips[i]); } } thread = perf_session__findnew(session, self->ip.pid); if (thread == NULL) return -1; if (symbol_conf.comm_list && !strlist__has_entry(symbol_conf.comm_list, thread->comm)) goto out_filtered; dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); /* * Have we already created the kernel maps for the host machine? * * This should have happened earlier, when we processed the kernel MMAP * events, but for older perf.data files there was no such thing, so do * it now. */ if (cpumode == PERF_RECORD_MISC_KERNEL && session->host_machine.vmlinux_maps[MAP__FUNCTION] == NULL) machine__create_kernel_maps(&session->host_machine); thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION, self->ip.pid, self->ip.ip, al); dump_printf(" ...... dso: %s\n", al->map ? al->map->dso->long_name : al->level == 'H' ? "[hypervisor]" : "<not found>"); al->sym = NULL; al->cpu = data->cpu; if (al->map) { if (symbol_conf.dso_list && (!al->map || !al->map->dso || !(strlist__has_entry(symbol_conf.dso_list, al->map->dso->short_name) || (al->map->dso->short_name != al->map->dso->long_name && strlist__has_entry(symbol_conf.dso_list, al->map->dso->long_name))))) goto out_filtered; /* * We have to do this here as we may have a dso with no symbol * hit that has a name longer than the ones with symbols * sampled. */ if (!sort_dso.elide && !al->map->dso->slen_calculated) dso__calc_col_width(al->map->dso, &session->hists); al->sym = map__find_symbol(al->map, al->addr, filter); } else { const unsigned int unresolved_col_width = BITS_PER_LONG / 4; if (hists__col_len(&session->hists, HISTC_DSO) < unresolved_col_width && !symbol_conf.col_width_list_str && !symbol_conf.field_sep && !symbol_conf.dso_list) hists__set_col_len(&session->hists, HISTC_DSO, unresolved_col_width); } if (symbol_conf.sym_list && al->sym && !strlist__has_entry(symbol_conf.sym_list, al->sym->name)) goto out_filtered; return 0; out_filtered: al->filtered = true; return 0; } int event__parse_sample(const event_t *event, u64 type, struct sample_data *data) { const u64 *array = event->sample.array; if (type & PERF_SAMPLE_IP) { data->ip = event->ip.ip; array++; } if (type & PERF_SAMPLE_TID) { u32 *p = (u32 *)array; data->pid = p[0]; data->tid = p[1]; array++; } if (type & PERF_SAMPLE_TIME) { data->time = *array; array++; } if (type & PERF_SAMPLE_ADDR) { data->addr = *array; array++; } data->id = -1ULL; if (type & PERF_SAMPLE_ID) { data->id = *array; array++; } if (type & PERF_SAMPLE_STREAM_ID) { data->stream_id = *array; array++; } if (type & PERF_SAMPLE_CPU) { u32 *p = (u32 *)array; data->cpu = *p; array++; } else data->cpu = -1; if (type & PERF_SAMPLE_PERIOD) { data->period = *array; array++; } if (type & PERF_SAMPLE_READ) { pr_debug("PERF_SAMPLE_READ is unsuported for now\n"); return -1; } if (type & PERF_SAMPLE_CALLCHAIN) { data->callchain = (struct ip_callchain *)array; array += 1 + data->callchain->nr; } if (type & PERF_SAMPLE_RAW) { u32 *p = (u32 *)array; data->raw_size = *p; p++; data->raw_data = p; } return 0; }
gpl-2.0
omegamoon/rockchip-rk3188-mk908
drivers/net/wireless/mt5931/mgmt/rsn.c
145
91375
/* ** $Id: //Department/DaVinci/BRANCHES/MT6620_WIFI_DRIVER_V2_3/mgmt/rsn.c#2 $ */ /*! \file "rsn.c" \brief This file including the 802.11i, wpa and wpa2(rsn) related function. This file provided the macros and functions library support the wpa/rsn ie parsing, cipher and AKM check to help the AP seleced deciding, tkip mic error handler and rsn PMKID support. */ /* ** $Log: rsn.c $ * * 07 17 2012 yuche.tsai * NULL * Compile no error before trial run. * * 03 09 2012 chinglan.wang * NULL * Fix the condition error. * * 03 02 2012 terry.wu * NULL * Snc CFG80211 modification for ICS migration from branch 2.2. * * 03 02 2012 terry.wu * NULL * Sync CFG80211 modification from branch 2,2. * * 11 11 2011 wh.su * [WCXRP00001078] [MT6620 Wi-Fi][Driver] Adding the mediatek log improment support : XLOG * modify the xlog related code. * * 11 10 2011 wh.su * [WCXRP00001078] [MT6620 Wi-Fi][Driver] Adding the mediatek log improment support : XLOG * change the debug module level. * * 10 12 2011 wh.su * [WCXRP00001036] [MT6620 Wi-Fi][Driver][FW] Adding the 802.11w code for MFP * adding the 802.11w related function and define . * * 03 17 2011 chinglan.wang * [WCXRP00000570] [MT6620 Wi-Fi][Driver] Add Wi-Fi Protected Setup v2.0 feature * . * * 02 09 2011 wh.su * [WCXRP00000432] [MT6620 Wi-Fi][Driver] Add STA privacy check at hotspot mode * adding the code for check STA privacy bit at AP mode, . * * 12 24 2010 chinglan.wang * NULL * [MT6620][Wi-Fi] Modify the key management in the driver for WPS function. * * 12 13 2010 cp.wu * [WCXRP00000260] [MT6620 Wi-Fi][Driver][Firmware] Create V1.1 branch for both firmware and driver * create branch for Wi-Fi driver v1.1 * * 11 05 2010 wh.su * [WCXRP00000165] [MT6620 Wi-Fi] [Pre-authentication] Assoc req rsn ie use wrong pmkid value * fixed the.pmkid value mismatch issue * * 11 03 2010 wh.su * [WCXRP00000124] [MT6620 Wi-Fi] [Driver] Support the dissolve P2P Group * Refine the HT rate disallow TKIP pairwise cipher . * * 10 04 2010 cp.wu * [WCXRP00000077] [MT6620 Wi-Fi][Driver][FW] Eliminate use of ENUM_NETWORK_TYPE_T and replaced by ENUM_NETWORK_TYPE_INDEX_T only * remove ENUM_NETWORK_TYPE_T definitions * * 09 29 2010 yuche.tsai * NULL * Fix compile error, remove unused pointer in rsnGenerateRSNIE(). * * 09 28 2010 wh.su * NULL * [WCXRP00000069][MT6620 Wi-Fi][Driver] Fix some code for phase 1 P2P Demo. * * 09 24 2010 wh.su * NULL * [WCXRP00005002][MT6620 Wi-Fi][Driver] Eliminate Linux Compile Warning. * * 09 06 2010 wh.su * NULL * let the p2p can set the privacy bit at beacon and rsn ie at assoc req at key handshake state. * * 08 30 2010 wh.su * NULL * remove non-used code. * * 08 19 2010 wh.su * NULL * adding the tx pkt call back handle for countermeasure. * * 07 24 2010 wh.su * * .support the Wi-Fi RSN * * 07 08 2010 cp.wu * * [WPD00003833] [MT6620 and MT5931] Driver migration - move to new repository. * * 06 21 2010 wh.su * [WPD00003840][MT6620 5931] Security migration * modify some code for concurrent network. * * 06 21 2010 cp.wu * [WPD00003833][MT6620 and MT5931] Driver migration * [WPD00003833][MT6620 and MT5931] Driver migration * enable RX management frame handling. * * 06 19 2010 wh.su * [WPD00003840][MT6620 5931] Security migration * consdier the concurrent network setting. * * 06 18 2010 wh.su * [WPD00003840][MT6620 5931] Security migration * [WPD00003840] [MT6620 5931] Security migration * migration from firmware. * * 05 27 2010 wh.su * [BORA00000637][MT6620 Wi-Fi] [Bug] WPA2 pre-authentication timer not correctly initialize * not indiate pmkid candidate while no new one scaned. * * 04 29 2010 wh.su * [BORA00000637][MT6620 Wi-Fi] [Bug] WPA2 pre-authentication timer not correctly initialize * adjsut the pre-authentication code. * * 03 03 2010 wh.su * [BORA00000637][MT6620 Wi-Fi] [Bug] WPA2 pre-authentication timer not correctly initialize * move the AIS specific variable for security to AIS specific structure. * * 03 03 2010 wh.su * [BORA00000637][MT6620 Wi-Fi] [Bug] WPA2 pre-authentication timer not correctly initialize * Fixed the pre-authentication timer not correctly init issue, and modify the security related callback function prototype. * * 01 27 2010 wh.su * [BORA00000476][Wi-Fi][firmware] Add the security module initialize code * add and fixed some security function. * * 12 18 2009 cm.chang * [BORA00000018]Integrate WIFI part into BORA for the 1st time * . * * Dec 8 2009 mtk01088 * [BORA00000476] [Wi-Fi][firmware] Add the security module initialize code * change the name * * Dec 7 2009 mtk01088 * [BORA00000476] [Wi-Fi][firmware] Add the security module initialize code * using the Rx0 port to indicate event * * Dec 4 2009 mtk01088 * [BORA00000476] [Wi-Fi][firmware] Add the security module initialize code * refine the code for generate the WPA/RSN IE for assoc req * * Dec 3 2009 mtk01088 * [BORA00000476] [Wi-Fi][firmware] Add the security module initialize code * adjust code for pmkid event * * Dec 1 2009 mtk01088 * [BORA00000476] [Wi-Fi][firmware] Add the security module initialize code * adding the code for event (mic error and pmkid indicate) and do some function rename * * Nov 23 2009 mtk01088 * [BORA00000476] [Wi-Fi][firmware] Add the security module initialize code * adding some security function * * Nov 19 2009 mtk01088 * [BORA00000476] [Wi-Fi][firmware] Add the security module initialize code * adding some security feature, including pmkid * * Nov 18 2009 mtk01088 * [BORA00000476] [Wi-Fi][firmware] Add the security module initialize code * ** */ /******************************************************************************* * C O M P I L E R F L A G S ******************************************************************************** */ /******************************************************************************* * E X T E R N A L R E F E R E N C E S ******************************************************************************** */ #include "precomp.h" #if CFG_RSN_MIGRATION //extern PHY_ATTRIBUTE_T rPhyAttributes[]; /******************************************************************************* * C O N S T A N T S ******************************************************************************** */ /******************************************************************************* * D A T A T Y P E S ******************************************************************************** */ /******************************************************************************* * P U B L I C D A T A ******************************************************************************** */ /******************************************************************************* * P R I V A T E D A T A ******************************************************************************** */ /******************************************************************************* * M A C R O S ******************************************************************************** */ /******************************************************************************* * F U N C T I O N D E C L A R A T I O N S ******************************************************************************** */ /******************************************************************************* * F U N C T I O N S ******************************************************************************** */ /*----------------------------------------------------------------------------*/ /*! * \brief This routine is called to parse RSN IE. * * \param[in] prInfoElem Pointer to the RSN IE * \param[out] prRsnInfo Pointer to the BSSDescription structure to store the ** RSN information from the given RSN IE * * \retval TRUE - Succeeded * \retval FALSE - Failed */ /*----------------------------------------------------------------------------*/ BOOLEAN rsnParseRsnIE ( IN P_ADAPTER_T prAdapter, IN P_RSN_INFO_ELEM_T prInfoElem, OUT P_RSN_INFO_T prRsnInfo ) { UINT_32 i; INT_32 u4RemainRsnIeLen; UINT_16 u2Version; UINT_16 u2Cap = 0; UINT_32 u4GroupSuite = RSN_CIPHER_SUITE_CCMP; UINT_16 u2PairSuiteCount = 0; UINT_16 u2AuthSuiteCount = 0; PUINT_8 pucPairSuite = NULL; PUINT_8 pucAuthSuite = NULL; PUINT_8 cp; DEBUGFUNC("rsnParseRsnIE"); ASSERT(prInfoElem); ASSERT(prRsnInfo); /* Verify the length of the RSN IE. */ if (prInfoElem->ucLength < 2) { DBGLOG(RSN, TRACE, ("RSN IE length too short (length=%d)\n", prInfoElem->ucLength)); return FALSE; } /* Check RSN version: currently, we only support version 1. */ WLAN_GET_FIELD_16(&prInfoElem->u2Version, &u2Version); if (u2Version != 1) { DBGLOG(RSN, TRACE,("Unsupported RSN IE version: %d\n", u2Version)); return FALSE; } cp = (PUCHAR) &prInfoElem->u4GroupKeyCipherSuite; u4RemainRsnIeLen = (INT_32) prInfoElem->ucLength - 2; do { if (u4RemainRsnIeLen == 0) { break; } /* Parse the Group Key Cipher Suite field. */ if (u4RemainRsnIeLen < 4) { DBGLOG(RSN, TRACE, ("Fail to parse RSN IE in group cipher suite (IE len: %d)\n", prInfoElem->ucLength)); return FALSE; } WLAN_GET_FIELD_32(cp, &u4GroupSuite); cp += 4; u4RemainRsnIeLen -= 4; if (u4RemainRsnIeLen == 0) { break; } /* Parse the Pairwise Key Cipher Suite Count field. */ if (u4RemainRsnIeLen < 2) { DBGLOG(RSN, TRACE,("Fail to parse RSN IE in pairwise cipher suite count (IE len: %d)\n", prInfoElem->ucLength)); return FALSE; } WLAN_GET_FIELD_16(cp, &u2PairSuiteCount); cp += 2; u4RemainRsnIeLen -= 2; /* Parse the Pairwise Key Cipher Suite List field. */ i = (UINT_32) u2PairSuiteCount * 4; if (u4RemainRsnIeLen < (INT_32) i) { DBGLOG(RSN, TRACE,("Fail to parse RSN IE in pairwise cipher suite list (IE len: %d)\n", prInfoElem->ucLength)); return FALSE; } pucPairSuite = cp; cp += i; u4RemainRsnIeLen -= (INT_32) i; if (u4RemainRsnIeLen == 0) { break; } /* Parse the Authentication and Key Management Cipher Suite Count field. */ if (u4RemainRsnIeLen < 2) { DBGLOG(RSN, TRACE,("Fail to parse RSN IE in auth & key mgt suite count (IE len: %d)\n", prInfoElem->ucLength)); return FALSE; } WLAN_GET_FIELD_16(cp, &u2AuthSuiteCount); cp += 2; u4RemainRsnIeLen -= 2; /* Parse the Authentication and Key Management Cipher Suite List field. */ i = (UINT_32) u2AuthSuiteCount * 4; if (u4RemainRsnIeLen < (INT_32) i) { DBGLOG(RSN, TRACE, ("Fail to parse RSN IE in auth & key mgt suite list (IE len: %d)\n", prInfoElem->ucLength)); return FALSE; } pucAuthSuite = cp; cp += i; u4RemainRsnIeLen -= (INT_32) i; if (u4RemainRsnIeLen == 0) { break; } /* Parse the RSN u2Capabilities field. */ if (u4RemainRsnIeLen < 2) { DBGLOG(RSN, TRACE, ("Fail to parse RSN IE in RSN capabilities (IE len: %d)\n", prInfoElem->ucLength)); return FALSE; } WLAN_GET_FIELD_16(cp, &u2Cap); } while (FALSE); /* Save the RSN information for the BSS. */ prRsnInfo->ucElemId = ELEM_ID_RSN; prRsnInfo->u2Version = u2Version; prRsnInfo->u4GroupKeyCipherSuite = u4GroupSuite; DBGLOG(RSN, LOUD, ("RSN: version %d, group key cipher suite %02x-%02x-%02x-%02x\n", u2Version, (UCHAR) (u4GroupSuite & 0x000000FF), (UCHAR) ((u4GroupSuite >> 8) & 0x000000FF), (UCHAR) ((u4GroupSuite >> 16) & 0x000000FF), (UCHAR) ((u4GroupSuite >> 24) & 0x000000FF))); if (pucPairSuite) { /* The information about the pairwise key cipher suites is present. */ if (u2PairSuiteCount > MAX_NUM_SUPPORTED_CIPHER_SUITES) { u2PairSuiteCount = MAX_NUM_SUPPORTED_CIPHER_SUITES; } prRsnInfo->u4PairwiseKeyCipherSuiteCount = (UINT_32) u2PairSuiteCount; for (i = 0; i < (UINT_32) u2PairSuiteCount; i++) { WLAN_GET_FIELD_32(pucPairSuite, &prRsnInfo->au4PairwiseKeyCipherSuite[i]); pucPairSuite += 4; DBGLOG(RSN, LOUD, ("RSN: pairwise key cipher suite [%d]: %02x-%02x-%02x-%02x\n", (UINT_8)i, (UCHAR) (prRsnInfo->au4PairwiseKeyCipherSuite[i] & 0x000000FF), (UCHAR) ((prRsnInfo->au4PairwiseKeyCipherSuite[i] >> 8) & 0x000000FF), (UCHAR) ((prRsnInfo->au4PairwiseKeyCipherSuite[i] >> 16) & 0x000000FF), (UCHAR) ((prRsnInfo->au4PairwiseKeyCipherSuite[i] >> 24) & 0x000000FF))); } } else { /* The information about the pairwise key cipher suites is not present. Use the default chipher suite for RSN: CCMP. */ prRsnInfo->u4PairwiseKeyCipherSuiteCount = 1; prRsnInfo->au4PairwiseKeyCipherSuite[0] = RSN_CIPHER_SUITE_CCMP; DBGLOG(RSN, LOUD, ("RSN: pairwise key cipher suite: %02x-%02x-%02x-%02x (default)\n", (UCHAR) (prRsnInfo->au4PairwiseKeyCipherSuite[0] & 0x000000FF), (UCHAR) ((prRsnInfo->au4PairwiseKeyCipherSuite[0] >> 8) & 0x000000FF), (UCHAR) ((prRsnInfo->au4PairwiseKeyCipherSuite[0] >> 16) & 0x000000FF), (UCHAR) ((prRsnInfo->au4PairwiseKeyCipherSuite[0] >> 24) & 0x000000FF))); } if (pucAuthSuite) { /* The information about the authentication and key management suites is present. */ if (u2AuthSuiteCount > MAX_NUM_SUPPORTED_AKM_SUITES) { u2AuthSuiteCount = MAX_NUM_SUPPORTED_AKM_SUITES; } prRsnInfo->u4AuthKeyMgtSuiteCount = (UINT_32) u2AuthSuiteCount; for (i = 0; i < (UINT_32) u2AuthSuiteCount; i++) { WLAN_GET_FIELD_32(pucAuthSuite, &prRsnInfo->au4AuthKeyMgtSuite[i]); pucAuthSuite += 4; DBGLOG(RSN, LOUD, ("RSN: AKM suite [%d]: %02x-%02x-%02x-%02x\n", (UINT_8)i, (UCHAR) (prRsnInfo->au4AuthKeyMgtSuite[i] & 0x000000FF), (UCHAR) ((prRsnInfo->au4AuthKeyMgtSuite[i] >> 8) & 0x000000FF), (UCHAR) ((prRsnInfo->au4AuthKeyMgtSuite[i] >> 16) & 0x000000FF), (UCHAR) ((prRsnInfo->au4AuthKeyMgtSuite[i] >> 24) & 0x000000FF))); } } else { /* The information about the authentication and key management suites is not present. Use the default AKM suite for RSN. */ prRsnInfo->u4AuthKeyMgtSuiteCount = 1; prRsnInfo->au4AuthKeyMgtSuite[0] = RSN_AKM_SUITE_802_1X; DBGLOG(RSN, LOUD, ("RSN: AKM suite: %02x-%02x-%02x-%02x (default)\n", (UCHAR) (prRsnInfo->au4AuthKeyMgtSuite[0] & 0x000000FF), (UCHAR) ((prRsnInfo->au4AuthKeyMgtSuite[0] >> 8) & 0x000000FF), (UCHAR) ((prRsnInfo->au4AuthKeyMgtSuite[0] >> 16) & 0x000000FF), (UCHAR) ((prRsnInfo->au4AuthKeyMgtSuite[0] >> 24) & 0x000000FF))); } prRsnInfo->u2RsnCap = u2Cap; #if CFG_SUPPORT_802_11W prRsnInfo->fgRsnCapPresent = TRUE; #endif DBGLOG(RSN, LOUD, ("RSN cap: 0x%04x\n", prRsnInfo->u2RsnCap)); return TRUE; } /* rsnParseRsnIE */ /*----------------------------------------------------------------------------*/ /*! * \brief This routine is called to parse WPA IE. * * \param[in] prInfoElem Pointer to the WPA IE. * \param[out] prWpaInfo Pointer to the BSSDescription structure to store the * WPA information from the given WPA IE. * * \retval TRUE Succeeded. * \retval FALSE Failed. */ /*----------------------------------------------------------------------------*/ BOOLEAN rsnParseWpaIE ( IN P_ADAPTER_T prAdapter, IN P_WPA_INFO_ELEM_T prInfoElem, OUT P_RSN_INFO_T prWpaInfo ) { UINT_32 i; INT_32 u4RemainWpaIeLen; UINT_16 u2Version; UINT_16 u2Cap = 0; UINT_32 u4GroupSuite = WPA_CIPHER_SUITE_TKIP; UINT_16 u2PairSuiteCount = 0; UINT_16 u2AuthSuiteCount = 0; PUCHAR pucPairSuite = NULL; PUCHAR pucAuthSuite = NULL; PUCHAR cp; BOOLEAN fgCapPresent = FALSE; DEBUGFUNC("rsnParseWpaIE"); ASSERT(prInfoElem); ASSERT(prWpaInfo); /* Verify the length of the WPA IE. */ if (prInfoElem->ucLength < 6) { DBGLOG(RSN, TRACE,("WPA IE length too short (length=%d)\n", prInfoElem->ucLength)); return FALSE; } /* Check WPA version: currently, we only support version 1. */ WLAN_GET_FIELD_16(&prInfoElem->u2Version, &u2Version); if (u2Version != 1) { DBGLOG(RSN, TRACE, ("Unsupported WPA IE version: %d\n", u2Version)); return FALSE; } cp = (PUCHAR) &prInfoElem->u4GroupKeyCipherSuite; u4RemainWpaIeLen = (INT_32) prInfoElem->ucLength - 6; do { if (u4RemainWpaIeLen == 0) { break; } /* WPA_OUI : 4 Version : 2 GroupSuite : 4 PairwiseCount: 2 PairwiseSuite: 4 * pairSuiteCount AuthCount : 2 AuthSuite : 4 * authSuiteCount Cap : 2 */ /* Parse the Group Key Cipher Suite field. */ if (u4RemainWpaIeLen < 4) { DBGLOG(RSN, TRACE,("Fail to parse WPA IE in group cipher suite (IE len: %d)\n", prInfoElem->ucLength)); return FALSE; } WLAN_GET_FIELD_32(cp, &u4GroupSuite); cp += 4; u4RemainWpaIeLen -= 4; if (u4RemainWpaIeLen == 0) { break; } /* Parse the Pairwise Key Cipher Suite Count field. */ if (u4RemainWpaIeLen < 2) { DBGLOG(RSN, TRACE,("Fail to parse WPA IE in pairwise cipher suite count (IE len: %d)\n", prInfoElem->ucLength)); return FALSE; } WLAN_GET_FIELD_16(cp, &u2PairSuiteCount); cp += 2; u4RemainWpaIeLen -= 2; /* Parse the Pairwise Key Cipher Suite List field. */ i = (UINT_32) u2PairSuiteCount * 4; if (u4RemainWpaIeLen < (INT_32) i) { DBGLOG(RSN, TRACE,("Fail to parse WPA IE in pairwise cipher suite list (IE len: %d)\n", prInfoElem->ucLength)); return FALSE; } pucPairSuite = cp; cp += i; u4RemainWpaIeLen -= (INT_32) i; if (u4RemainWpaIeLen == 0) { break; } /* Parse the Authentication and Key Management Cipher Suite Count field. */ if (u4RemainWpaIeLen < 2) { DBGLOG(RSN, TRACE,("Fail to parse WPA IE in auth & key mgt suite count (IE len: %d)\n", prInfoElem->ucLength)); return FALSE; } WLAN_GET_FIELD_16(cp, &u2AuthSuiteCount); cp += 2; u4RemainWpaIeLen -= 2; /* Parse the Authentication and Key Management Cipher Suite List field. */ i = (UINT_32) u2AuthSuiteCount * 4; if (u4RemainWpaIeLen < (INT_32) i) { DBGLOG(RSN, TRACE, ("Fail to parse WPA IE in auth & key mgt suite list (IE len: %d)\n", prInfoElem->ucLength)); return FALSE; } pucAuthSuite = cp; cp += i; u4RemainWpaIeLen -= (INT_32) i; if (u4RemainWpaIeLen == 0) { break; } /* Parse the WPA u2Capabilities field. */ if (u4RemainWpaIeLen < 2) { DBGLOG(RSN, TRACE, ("Fail to parse WPA IE in WPA capabilities (IE len: %d)\n", prInfoElem->ucLength)); return FALSE; } fgCapPresent = TRUE; WLAN_GET_FIELD_16(cp, &u2Cap); u4RemainWpaIeLen -= 2; } while (FALSE); /* Save the WPA information for the BSS. */ prWpaInfo->ucElemId = ELEM_ID_WPA; prWpaInfo->u2Version = u2Version; prWpaInfo->u4GroupKeyCipherSuite = u4GroupSuite; DBGLOG(RSN, LOUD, ("WPA: version %d, group key cipher suite %02x-%02x-%02x-%02x\n", u2Version, (UCHAR) (u4GroupSuite & 0x000000FF), (UCHAR) ((u4GroupSuite >> 8) & 0x000000FF), (UCHAR) ((u4GroupSuite >> 16) & 0x000000FF), (UCHAR) ((u4GroupSuite >> 24) & 0x000000FF))); if (pucPairSuite) { /* The information about the pairwise key cipher suites is present. */ if (u2PairSuiteCount > MAX_NUM_SUPPORTED_CIPHER_SUITES) { u2PairSuiteCount = MAX_NUM_SUPPORTED_CIPHER_SUITES; } prWpaInfo->u4PairwiseKeyCipherSuiteCount = (UINT_32) u2PairSuiteCount; for (i = 0; i < (UINT_32) u2PairSuiteCount; i++) { WLAN_GET_FIELD_32(pucPairSuite, &prWpaInfo->au4PairwiseKeyCipherSuite[i]); pucPairSuite += 4; DBGLOG(RSN, LOUD, ("WPA: pairwise key cipher suite [%d]: %02x-%02x-%02x-%02x\n", (UINT_8)i, (UCHAR) (prWpaInfo->au4PairwiseKeyCipherSuite[i] & 0x000000FF), (UCHAR) ((prWpaInfo->au4PairwiseKeyCipherSuite[i] >> 8) & 0x000000FF), (UCHAR) ((prWpaInfo->au4PairwiseKeyCipherSuite[i] >> 16) & 0x000000FF), (UCHAR) ((prWpaInfo->au4PairwiseKeyCipherSuite[i] >> 24) & 0x000000FF))); } } else { /* The information about the pairwise key cipher suites is not present. Use the default chipher suite for WPA: TKIP. */ prWpaInfo->u4PairwiseKeyCipherSuiteCount = 1; prWpaInfo->au4PairwiseKeyCipherSuite[0] = WPA_CIPHER_SUITE_TKIP; DBGLOG(RSN, LOUD, ("WPA: pairwise key cipher suite: %02x-%02x-%02x-%02x (default)\n", (UCHAR) (prWpaInfo->au4PairwiseKeyCipherSuite[0] & 0x000000FF), (UCHAR) ((prWpaInfo->au4PairwiseKeyCipherSuite[0] >> 8) & 0x000000FF), (UCHAR) ((prWpaInfo->au4PairwiseKeyCipherSuite[0] >> 16) & 0x000000FF), (UCHAR) ((prWpaInfo->au4PairwiseKeyCipherSuite[0] >> 24) & 0x000000FF))); } if (pucAuthSuite) { /* The information about the authentication and key management suites is present. */ if (u2AuthSuiteCount > MAX_NUM_SUPPORTED_AKM_SUITES) { u2AuthSuiteCount = MAX_NUM_SUPPORTED_AKM_SUITES; } prWpaInfo->u4AuthKeyMgtSuiteCount = (UINT_32) u2AuthSuiteCount; for (i = 0; i < (UINT_32) u2AuthSuiteCount; i++) { WLAN_GET_FIELD_32(pucAuthSuite, &prWpaInfo->au4AuthKeyMgtSuite[i]); pucAuthSuite += 4; DBGLOG(RSN, LOUD, ("WPA: AKM suite [%d]: %02x-%02x-%02x-%02x\n", (UINT_8)i, (UCHAR) (prWpaInfo->au4AuthKeyMgtSuite[i] & 0x000000FF), (UCHAR) ((prWpaInfo->au4AuthKeyMgtSuite[i] >> 8) & 0x000000FF), (UCHAR) ((prWpaInfo->au4AuthKeyMgtSuite[i] >> 16) & 0x000000FF), (UCHAR) ((prWpaInfo->au4AuthKeyMgtSuite[i] >> 24) & 0x000000FF))); } } else { /* The information about the authentication and key management suites is not present. Use the default AKM suite for WPA. */ prWpaInfo->u4AuthKeyMgtSuiteCount = 1; prWpaInfo->au4AuthKeyMgtSuite[0] = WPA_AKM_SUITE_802_1X; DBGLOG(RSN, LOUD, ("WPA: AKM suite: %02x-%02x-%02x-%02x (default)\n", (UCHAR) (prWpaInfo->au4AuthKeyMgtSuite[0] & 0x000000FF), (UCHAR) ((prWpaInfo->au4AuthKeyMgtSuite[0] >> 8) & 0x000000FF), (UCHAR) ((prWpaInfo->au4AuthKeyMgtSuite[0] >> 16) & 0x000000FF), (UCHAR) ((prWpaInfo->au4AuthKeyMgtSuite[0] >> 24) & 0x000000FF))); } if (fgCapPresent) { prWpaInfo->fgRsnCapPresent = TRUE; prWpaInfo->u2RsnCap = u2Cap; DBGLOG(RSN, LOUD, ("WPA: RSN cap: 0x%04x\n", prWpaInfo->u2RsnCap)); } else { prWpaInfo->fgRsnCapPresent = FALSE; prWpaInfo->u2RsnCap = 0; } return TRUE; } /* rsnParseWpaIE */ /*----------------------------------------------------------------------------*/ /*! * \brief This routine is called to search the desired pairwise * cipher suite from the MIB Pairwise Cipher Suite * configuration table. * * \param[in] u4Cipher The desired pairwise cipher suite to be searched * \param[out] pu4Index Pointer to the index of the desired pairwise cipher in * the table * * \retval TRUE - The desired pairwise cipher suite is found in the table. * \retval FALSE - The desired pairwise cipher suite is not found in the * table. */ /*----------------------------------------------------------------------------*/ BOOLEAN rsnSearchSupportedCipher ( IN P_ADAPTER_T prAdapter, IN UINT_32 u4Cipher, OUT PUINT_32 pu4Index ) { UINT_8 i; P_DOT11_RSNA_CONFIG_PAIRWISE_CIPHERS_ENTRY prEntry; DEBUGFUNC("rsnSearchSupportedCipher"); ASSERT(pu4Index); for (i = 0; i < MAX_NUM_SUPPORTED_CIPHER_SUITES; i++) { prEntry = &prAdapter->rMib.dot11RSNAConfigPairwiseCiphersTable[i]; if (prEntry->dot11RSNAConfigPairwiseCipher == u4Cipher && prEntry->dot11RSNAConfigPairwiseCipherEnabled) { *pu4Index = i; return TRUE; } } return FALSE; } /* rsnSearchSupportedCipher */ /*----------------------------------------------------------------------------*/ /*! * * \brief This routine is called to search the desired * authentication and key management (AKM) suite from the * MIB Authentication and Key Management Suites table. * * \param[in] u4AkmSuite The desired AKM suite to be searched * \param[out] pu4Index Pointer to the index of the desired AKM suite in the * table * * \retval TRUE The desired AKM suite is found in the table. * \retval FALSE The desired AKM suite is not found in the table. * * \note */ /*----------------------------------------------------------------------------*/ BOOLEAN rsnSearchAKMSuite ( IN P_ADAPTER_T prAdapter, IN UINT_32 u4AkmSuite, OUT PUINT_32 pu4Index ) { UINT_8 i; P_DOT11_RSNA_CONFIG_AUTHENTICATION_SUITES_ENTRY prEntry; DEBUGFUNC("rsnSearchAKMSuite"); ASSERT(pu4Index); for (i = 0; i < MAX_NUM_SUPPORTED_AKM_SUITES; i++) { prEntry = &prAdapter->rMib.dot11RSNAConfigAuthenticationSuitesTable[i]; if (prEntry->dot11RSNAConfigAuthenticationSuite == u4AkmSuite && prEntry->dot11RSNAConfigAuthenticationSuiteEnabled) { *pu4Index = i; return TRUE; } } return FALSE; } /* rsnSearchAKMSuite */ /*----------------------------------------------------------------------------*/ /*! * \brief This routine is called to perform RSNA or TSN policy * selection for a given BSS. * * \param[in] prBss Pointer to the BSS description * * \retval TRUE - The RSNA/TSN policy selection for the given BSS is * successful. The selected pairwise and group cipher suites * are returned in the BSS description. * \retval FALSE - The RSNA/TSN policy selection for the given BSS is failed. * The driver shall not attempt to join the given BSS. * * \note The Encrypt status matched score will save to bss for final ap select. */ /*----------------------------------------------------------------------------*/ BOOLEAN rsnPerformPolicySelection ( IN P_ADAPTER_T prAdapter, IN P_BSS_DESC_T prBss ) { #if CFG_SUPPORT_802_11W INT_32 i; UINT_32 j; #else UINT_32 i, j; #endif BOOLEAN fgSuiteSupported; UINT_32 u4PairwiseCipher = 0; UINT_32 u4GroupCipher = 0; UINT_32 u4AkmSuite = 0; P_RSN_INFO_T prBssRsnInfo; ENUM_NETWORK_TYPE_INDEX_T eNetwotkType; BOOLEAN fgIsWpsActive = (BOOLEAN)FALSE; DEBUGFUNC("rsnPerformPolicySelection"); ASSERT(prBss); DBGLOG(RSN, TRACE, ("rsnPerformPolicySelection\n")); //Todo:: eNetwotkType = NETWORK_TYPE_AIS_INDEX; prBss->u4RsnSelectedPairwiseCipher = 0; prBss->u4RsnSelectedGroupCipher = 0; prBss->u4RsnSelectedAKMSuite = 0; prBss->ucEncLevel = 0; #if CFG_SUPPORT_WPS fgIsWpsActive = kalWSCGetActiveState(prAdapter->prGlueInfo); /* CR1640, disable the AP select privacy check */ if ( fgIsWpsActive && (prAdapter->rWifiVar.rConnSettings.eAuthMode < AUTH_MODE_WPA) && (prAdapter->rWifiVar.rConnSettings.eOPMode == NET_TYPE_INFRA)) { DBGLOG(RSN, TRACE,("-- Skip the Protected BSS check\n")); return TRUE; } #endif /* Protection is not required in this BSS. */ if ((prBss->u2CapInfo & CAP_INFO_PRIVACY) == 0 ) { if (secEnabledInAis(prAdapter) == FALSE) { DBGLOG(RSN, TRACE,("-- No Protected BSS\n")); return TRUE; } else { DBGLOG(RSN, TRACE,("-- Protected BSS\n")); return FALSE; } } /* Protection is required in this BSS. */ if ((prBss->u2CapInfo & CAP_INFO_PRIVACY) != 0) { if (secEnabledInAis(prAdapter) == FALSE) { DBGLOG(RSN, TRACE,("-- Protected BSS\n")); return FALSE; } } if (prAdapter->rWifiVar.rConnSettings.eAuthMode == AUTH_MODE_WPA || prAdapter->rWifiVar.rConnSettings.eAuthMode == AUTH_MODE_WPA_PSK || prAdapter->rWifiVar.rConnSettings.eAuthMode == AUTH_MODE_WPA_NONE) { if (prBss->fgIEWPA) { prBssRsnInfo = &prBss->rWPAInfo; } else { DBGLOG(RSN, TRACE, ("WPA Information Element does not exist.\n")); return FALSE; } } else if (prAdapter->rWifiVar.rConnSettings.eAuthMode == AUTH_MODE_WPA2 || prAdapter->rWifiVar.rConnSettings.eAuthMode == AUTH_MODE_WPA2_PSK) { if (prBss->fgIERSN) { prBssRsnInfo = &prBss->rRSNInfo; } else { DBGLOG(RSN, TRACE, ("RSN Information Element does not exist.\n")); return FALSE; } } else if (prAdapter->rWifiVar.rConnSettings.eEncStatus != ENUM_ENCRYPTION1_ENABLED) { /* If the driver is configured to use WEP only, ignore this BSS. */ DBGLOG(RSN, TRACE, ("-- Not WEP-only legacy BSS\n")); return FALSE; } else if (prAdapter->rWifiVar.rConnSettings.eEncStatus == ENUM_ENCRYPTION1_ENABLED) { /* If the driver is configured to use WEP only, use this BSS. */ DBGLOG(RSN, TRACE, ("-- WEP-only legacy BSS\n")); return TRUE; } if (prBssRsnInfo->u4PairwiseKeyCipherSuiteCount == 1 && GET_SELECTOR_TYPE(prBssRsnInfo->au4PairwiseKeyCipherSuite[0]) == CIPHER_SUITE_NONE) { /* Since the pairwise cipher use the same cipher suite as the group cipher in the BSS, we check the group cipher suite against the current encryption status. */ fgSuiteSupported = FALSE; switch (prBssRsnInfo->u4GroupKeyCipherSuite) { case WPA_CIPHER_SUITE_CCMP: case RSN_CIPHER_SUITE_CCMP: if (prAdapter->rWifiVar.rConnSettings.eEncStatus == ENUM_ENCRYPTION3_ENABLED) { fgSuiteSupported = TRUE; } break; case WPA_CIPHER_SUITE_TKIP: case RSN_CIPHER_SUITE_TKIP: if (prAdapter->rWifiVar.rConnSettings.eEncStatus == ENUM_ENCRYPTION2_ENABLED) { fgSuiteSupported = TRUE; } break; case WPA_CIPHER_SUITE_WEP40: case WPA_CIPHER_SUITE_WEP104: if (prAdapter->rWifiVar.rConnSettings.eEncStatus == ENUM_ENCRYPTION1_ENABLED) { fgSuiteSupported = TRUE; } break; } if (fgSuiteSupported) { u4PairwiseCipher = WPA_CIPHER_SUITE_NONE; u4GroupCipher = prBssRsnInfo->u4GroupKeyCipherSuite; } #if DBG else { DBGLOG(RSN, TRACE, ("Inproper encryption status %d for group-key-only BSS\n", prAdapter->rWifiVar.rConnSettings.eEncStatus)); } #endif } else { fgSuiteSupported = FALSE; DBGLOG(RSN, TRACE, ("eEncStatus %d %d 0x%x\n", prAdapter->rWifiVar.rConnSettings.eEncStatus, prBssRsnInfo->u4PairwiseKeyCipherSuiteCount, prBssRsnInfo->au4PairwiseKeyCipherSuite[0])); /* Select pairwise/group ciphers */ switch (prAdapter->rWifiVar.rConnSettings.eEncStatus) { case ENUM_ENCRYPTION3_ENABLED: for (i = 0; i < prBssRsnInfo->u4PairwiseKeyCipherSuiteCount; i++) { if (GET_SELECTOR_TYPE(prBssRsnInfo->au4PairwiseKeyCipherSuite[i]) == CIPHER_SUITE_CCMP) { u4PairwiseCipher = prBssRsnInfo->au4PairwiseKeyCipherSuite[i]; } } u4GroupCipher = prBssRsnInfo->u4GroupKeyCipherSuite; break; case ENUM_ENCRYPTION2_ENABLED: for (i = 0; i < prBssRsnInfo->u4PairwiseKeyCipherSuiteCount; i++) { if (GET_SELECTOR_TYPE(prBssRsnInfo->au4PairwiseKeyCipherSuite[i]) == CIPHER_SUITE_TKIP) { u4PairwiseCipher = prBssRsnInfo->au4PairwiseKeyCipherSuite[i]; } } if (GET_SELECTOR_TYPE(prBssRsnInfo->u4GroupKeyCipherSuite) == CIPHER_SUITE_CCMP) { DBGLOG(RSN, TRACE, ("Cannot join CCMP BSS\n")); } else { u4GroupCipher = prBssRsnInfo->u4GroupKeyCipherSuite; } break; case ENUM_ENCRYPTION1_ENABLED: for (i = 0; i < prBssRsnInfo->u4PairwiseKeyCipherSuiteCount; i++) { if (GET_SELECTOR_TYPE(prBssRsnInfo->au4PairwiseKeyCipherSuite[i]) == CIPHER_SUITE_WEP40 || GET_SELECTOR_TYPE(prBssRsnInfo->au4PairwiseKeyCipherSuite[i]) == CIPHER_SUITE_WEP104) { u4PairwiseCipher = prBssRsnInfo->au4PairwiseKeyCipherSuite[i]; } } if (GET_SELECTOR_TYPE(prBssRsnInfo->u4GroupKeyCipherSuite) == CIPHER_SUITE_CCMP || GET_SELECTOR_TYPE(prBssRsnInfo->u4GroupKeyCipherSuite) == CIPHER_SUITE_TKIP) { DBGLOG(RSN, TRACE, ("Cannot join CCMP/TKIP BSS\n")); } else { u4GroupCipher = prBssRsnInfo->u4GroupKeyCipherSuite; } break; default: break; } } /* Exception handler */ /* If we cannot find proper pairwise and group cipher suites to join the BSS, do not check the supported AKM suites. */ if (u4PairwiseCipher == 0 || u4GroupCipher == 0) { DBGLOG(RSN, TRACE, ("Failed to select pairwise/group cipher (0x%08lx/0x%08lx)\n", u4PairwiseCipher, u4GroupCipher)); return FALSE; } #if CFG_ENABLE_WIFI_DIRECT if ((prAdapter->fgIsP2PRegistered) && (eNetwotkType == NETWORK_TYPE_P2P_INDEX)) { if (u4PairwiseCipher != RSN_CIPHER_SUITE_CCMP || u4GroupCipher != RSN_CIPHER_SUITE_CCMP || u4AkmSuite != RSN_AKM_SUITE_PSK) { DBGLOG(RSN, TRACE, ("Failed to select pairwise/group cipher for P2P network (0x%08lx/0x%08lx)\n", u4PairwiseCipher, u4GroupCipher)); return FALSE; } } #endif #if CFG_ENABLE_BT_OVER_WIFI if (eNetwotkType == NETWORK_TYPE_BOW_INDEX) { if (u4PairwiseCipher != RSN_CIPHER_SUITE_CCMP || u4GroupCipher != RSN_CIPHER_SUITE_CCMP || u4AkmSuite != RSN_AKM_SUITE_PSK) { } DBGLOG(RSN, TRACE, ("Failed to select pairwise/group cipher for BT over Wi-Fi network (0x%08lx/0x%08lx)\n", u4PairwiseCipher, u4GroupCipher)); return FALSE; } #endif /* Verify if selected pairwisse cipher is supported */ fgSuiteSupported = rsnSearchSupportedCipher(prAdapter, u4PairwiseCipher, &i); /* Verify if selected group cipher is supported */ if (fgSuiteSupported) { fgSuiteSupported = rsnSearchSupportedCipher(prAdapter, u4GroupCipher, &i); } if (!fgSuiteSupported) { DBGLOG(RSN, TRACE, ("Failed to support selected pairwise/group cipher (0x%08lx/0x%08lx)\n", u4PairwiseCipher, u4GroupCipher)); return FALSE; } /* Select AKM */ /* If the driver cannot support any authentication suites advertised in the given BSS, we fail to perform RSNA policy selection. */ /* Attempt to find any overlapping supported AKM suite. */ #if CFG_SUPPORT_802_11W if (i != 0) for (i = (prBssRsnInfo->u4AuthKeyMgtSuiteCount - 1); i >= 0; i--) #else for (i = 0; i < prBssRsnInfo->u4AuthKeyMgtSuiteCount; i++) #endif { if (rsnSearchAKMSuite(prAdapter, prBssRsnInfo->au4AuthKeyMgtSuite[i], &j)) { u4AkmSuite = prBssRsnInfo->au4AuthKeyMgtSuite[i]; break; } } if (u4AkmSuite == 0) { DBGLOG(RSN, TRACE, ("Cannot support any AKM suites\n")); return FALSE; } DBGLOG(RSN, TRACE, ("Selected pairwise/group cipher: %02x-%02x-%02x-%02x/%02x-%02x-%02x-%02x\n", (UINT_8) (u4PairwiseCipher & 0x000000FF), (UINT_8) ((u4PairwiseCipher >> 8) & 0x000000FF), (UINT_8) ((u4PairwiseCipher >> 16) & 0x000000FF), (UINT_8) ((u4PairwiseCipher >> 24) & 0x000000FF), (UINT_8) (u4GroupCipher & 0x000000FF), (UINT_8) ((u4GroupCipher >> 8) & 0x000000FF), (UINT_8) ((u4GroupCipher >> 16) & 0x000000FF), (UINT_8) ((u4GroupCipher >> 24) & 0x000000FF))); DBGLOG(RSN, TRACE, ("Selected AKM suite: %02x-%02x-%02x-%02x\n", (UINT_8) (u4AkmSuite & 0x000000FF), (UINT_8) ((u4AkmSuite >> 8) & 0x000000FF), (UINT_8) ((u4AkmSuite >> 16) & 0x000000FF), (UINT_8) ((u4AkmSuite >> 24) & 0x000000FF))); #if CFG_SUPPORT_802_11W DBGLOG(RSN, TRACE, ("MFP setting = %d\n ", kalGetMfpSetting(prAdapter->prGlueInfo))); if (kalGetMfpSetting(prAdapter->prGlueInfo) == RSN_AUTH_MFP_REQUIRED) { if (!prBssRsnInfo->fgRsnCapPresent) { DBGLOG(RSN, TRACE, ("Skip RSN IE, No MFP Required Capability.\n")); return FALSE; } else if (!(prBssRsnInfo->u2RsnCap & ELEM_WPA_CAP_MFPC)) { DBGLOG(RSN, TRACE, ("Skip RSN IE, No MFP Required\n")); return FALSE; } prAdapter->rWifiVar.rAisSpecificBssInfo.fgMgmtProtection = TRUE; } else if (kalGetMfpSetting(prAdapter->prGlueInfo) == RSN_AUTH_MFP_OPTIONAL) { if (prBssRsnInfo->u2RsnCap && ((prBssRsnInfo->u2RsnCap & ELEM_WPA_CAP_MFPR) || (prBssRsnInfo->u2RsnCap & ELEM_WPA_CAP_MFPC))) { prAdapter->rWifiVar.rAisSpecificBssInfo.fgMgmtProtection = TRUE; } else { prAdapter->rWifiVar.rAisSpecificBssInfo.fgMgmtProtection = FALSE; } } else { if (prBssRsnInfo->fgRsnCapPresent && (prBssRsnInfo->u2RsnCap & ELEM_WPA_CAP_MFPR)) { DBGLOG(RSN, TRACE, ("Skip RSN IE, No MFP Required Capability\n")); return FALSE; } prAdapter->rWifiVar.rAisSpecificBssInfo.fgMgmtProtection = FALSE; } DBGLOG(RSN, TRACE, ("fgMgmtProtection = %d\n ", prAdapter->rWifiVar.rAisSpecificBssInfo.fgMgmtProtection)); #endif if (GET_SELECTOR_TYPE(u4GroupCipher) == CIPHER_SUITE_CCMP){ prBss->ucEncLevel = 3; } else if (GET_SELECTOR_TYPE(u4GroupCipher) == CIPHER_SUITE_TKIP){ prBss->ucEncLevel = 2; } else if (GET_SELECTOR_TYPE(u4GroupCipher) == CIPHER_SUITE_WEP40 || GET_SELECTOR_TYPE(u4GroupCipher) == CIPHER_SUITE_WEP104) { prBss->ucEncLevel = 1; } else { ASSERT(FALSE); } prBss->u4RsnSelectedPairwiseCipher = u4PairwiseCipher; prBss->u4RsnSelectedGroupCipher = u4GroupCipher; prBss->u4RsnSelectedAKMSuite = u4AkmSuite; return TRUE; } /* rsnPerformPolicySelection */ /*----------------------------------------------------------------------------*/ /*! * \brief This routine is called to generate WPA IE for beacon frame. * * \param[in] pucIeStartAddr Pointer to put the generated WPA IE. * * \return The append WPA-None IE length * \note * Called by: JOIN module, compose beacon IE */ /*----------------------------------------------------------------------------*/ VOID rsnGenerateWpaNoneIE ( IN P_ADAPTER_T prAdapter, IN P_MSDU_INFO_T prMsduInfo ) { UINT_32 i; P_WPA_INFO_ELEM_T prWpaIE; UINT_32 u4Suite; UINT_16 u2SuiteCount; PUINT_8 cp, cp2; UINT_8 ucExpendedLen = 0; PUINT_8 pucBuffer; ENUM_NETWORK_TYPE_INDEX_T eNetworkId; DEBUGFUNC("rsnGenerateWpaNoneIE"); ASSERT(prMsduInfo); if (prAdapter->rWifiVar.rConnSettings.eAuthMode != AUTH_MODE_WPA_NONE) { return; } eNetworkId = (ENUM_NETWORK_TYPE_INDEX_T)prMsduInfo->ucNetworkType; if (eNetworkId != NETWORK_TYPE_AIS_INDEX) return; pucBuffer = (PUINT_8)((UINT_32)prMsduInfo->prPacket + (UINT_32)prMsduInfo->u2FrameLength); ASSERT(pucBuffer); prWpaIE = (P_WPA_INFO_ELEM_T)(pucBuffer); /* Start to construct a WPA IE. */ /* Fill the Element ID field. */ prWpaIE->ucElemId = ELEM_ID_WPA; /* Fill the OUI and OUI Type fields. */ prWpaIE->aucOui[0] = 0x00; prWpaIE->aucOui[1] = 0x50; prWpaIE->aucOui[2] = 0xF2; prWpaIE->ucOuiType = VENDOR_OUI_TYPE_WPA; /* Fill the Version field. */ WLAN_SET_FIELD_16(&prWpaIE->u2Version, 1); /* version 1 */ ucExpendedLen = 6; /* Fill the Pairwise Key Cipher Suite List field. */ u2SuiteCount = 0; cp = (PUINT_8)&prWpaIE->aucPairwiseKeyCipherSuite1[0]; if (rsnSearchSupportedCipher(prAdapter, WPA_CIPHER_SUITE_CCMP, &i)) { u4Suite = WPA_CIPHER_SUITE_CCMP; } else if (rsnSearchSupportedCipher(prAdapter, WPA_CIPHER_SUITE_TKIP, &i)) { u4Suite = WPA_CIPHER_SUITE_TKIP; } else if (rsnSearchSupportedCipher(prAdapter, WPA_CIPHER_SUITE_WEP104, &i)) { u4Suite = WPA_CIPHER_SUITE_WEP104; } else if (rsnSearchSupportedCipher(prAdapter, WPA_CIPHER_SUITE_WEP40, &i)) { u4Suite = WPA_CIPHER_SUITE_WEP40; } else { u4Suite = WPA_CIPHER_SUITE_TKIP; } WLAN_SET_FIELD_32(cp, u4Suite); u2SuiteCount++; ucExpendedLen += 4; cp += 4; /* Fill the Group Key Cipher Suite field as the same in pair-wise key. */ WLAN_SET_FIELD_32(&prWpaIE->u4GroupKeyCipherSuite, u4Suite); ucExpendedLen += 4; /* Fill the Pairwise Key Cipher Suite Count field. */ WLAN_SET_FIELD_16(&prWpaIE->u2PairwiseKeyCipherSuiteCount, u2SuiteCount); ucExpendedLen += 2; cp2 = cp; /* Fill the Authentication and Key Management Suite List field. */ u2SuiteCount = 0; cp += 2; if (rsnSearchAKMSuite(prAdapter, WPA_AKM_SUITE_802_1X, &i)) { u4Suite = WPA_AKM_SUITE_802_1X; } else if (rsnSearchAKMSuite(prAdapter, WPA_AKM_SUITE_PSK, &i)) { u4Suite = WPA_AKM_SUITE_PSK; } else { u4Suite = WPA_AKM_SUITE_NONE; } /* This shall be the only avaiable value for current implementation */ ASSERT(u4Suite == WPA_AKM_SUITE_NONE); WLAN_SET_FIELD_32(cp, u4Suite); u2SuiteCount++; ucExpendedLen += 4; cp += 4; /* Fill the Authentication and Key Management Suite Count field. */ WLAN_SET_FIELD_16(cp2, u2SuiteCount); ucExpendedLen += 2; /* Fill the Length field. */ prWpaIE->ucLength = (UINT_8)ucExpendedLen; /* Increment the total IE length for the Element ID and Length fields. */ prMsduInfo->u2FrameLength += IE_SIZE(pucBuffer); } /* rsnGenerateWpaNoneIE */ /*----------------------------------------------------------------------------*/ /*! * * \brief This routine is called to generate WPA IE for * associate request frame. * * \param[in] prCurrentBss The Selected BSS description * * \retval The append WPA IE length * * \note * Called by: AIS module, Associate request */ /*----------------------------------------------------------------------------*/ VOID rsnGenerateWPAIE ( IN P_ADAPTER_T prAdapter, IN P_MSDU_INFO_T prMsduInfo ) { PUCHAR cp; PUINT_8 pucBuffer; ENUM_NETWORK_TYPE_INDEX_T eNetworkId; DEBUGFUNC("rsnGenerateWPAIE"); ASSERT(prMsduInfo); pucBuffer = (PUINT_8)((UINT_32)prMsduInfo->prPacket + (UINT_32)prMsduInfo->u2FrameLength); ASSERT(pucBuffer); eNetworkId = (ENUM_NETWORK_TYPE_INDEX_T)prMsduInfo->ucNetworkType; //if (eNetworkId != NETWORK_TYPE_AIS_INDEX) // return; #if CFG_ENABLE_WIFI_DIRECT if ((1 /* prCurrentBss->fgIEWPA */ && ((prAdapter->fgIsP2PRegistered) && (eNetworkId == NETWORK_TYPE_P2P_INDEX) && (kalP2PGetTkipCipher(prAdapter->prGlueInfo)))) || ((prAdapter->rWifiVar.rConnSettings.eAuthMode == AUTH_MODE_WPA) || (prAdapter->rWifiVar.rConnSettings.eAuthMode == AUTH_MODE_WPA_PSK))) #else if ((1 /* prCurrentBss->fgIEWPA */ && ((prAdapter->rWifiVar.rConnSettings.eAuthMode == AUTH_MODE_WPA) || (prAdapter->rWifiVar.rConnSettings.eAuthMode == AUTH_MODE_WPA_PSK)))) #endif { /* Construct a WPA IE for association request frame. */ WPA_IE(pucBuffer)->ucElemId = ELEM_ID_WPA; WPA_IE(pucBuffer)->ucLength = ELEM_ID_WPA_LEN_FIXED; WPA_IE(pucBuffer)->aucOui[0] = 0x00; WPA_IE(pucBuffer)->aucOui[1] = 0x50; WPA_IE(pucBuffer)->aucOui[2] = 0xF2; WPA_IE(pucBuffer)->ucOuiType = VENDOR_OUI_TYPE_WPA; WLAN_SET_FIELD_16(&WPA_IE(pucBuffer)->u2Version, 1); #if CFG_ENABLE_WIFI_DIRECT if (prAdapter->fgIsP2PRegistered && eNetworkId == NETWORK_TYPE_P2P_INDEX) { WLAN_SET_FIELD_32(&WPA_IE(pucBuffer)->u4GroupKeyCipherSuite, WPA_CIPHER_SUITE_TKIP); } else #endif WLAN_SET_FIELD_32(&WPA_IE(pucBuffer)->u4GroupKeyCipherSuite, prAdapter->rWifiVar.arBssInfo[NETWORK_TYPE_AIS_INDEX].u4RsnSelectedGroupCipher); cp = (PUCHAR) &WPA_IE(pucBuffer)->aucPairwiseKeyCipherSuite1[0]; WLAN_SET_FIELD_16(&WPA_IE(pucBuffer)->u2PairwiseKeyCipherSuiteCount, 1); #if CFG_ENABLE_WIFI_DIRECT if (prAdapter->fgIsP2PRegistered && eNetworkId == NETWORK_TYPE_P2P_INDEX) { WLAN_SET_FIELD_32(cp, WPA_CIPHER_SUITE_TKIP); } else #endif WLAN_SET_FIELD_32(cp, prAdapter->rWifiVar.arBssInfo[NETWORK_TYPE_AIS_INDEX].u4RsnSelectedPairwiseCipher); cp += 4; WLAN_SET_FIELD_16(cp, 1); cp += 2; #if CFG_ENABLE_WIFI_DIRECT if (prAdapter->fgIsP2PRegistered && eNetworkId == NETWORK_TYPE_P2P_INDEX) { WLAN_SET_FIELD_32(cp, WPA_AKM_SUITE_PSK); } else #endif WLAN_SET_FIELD_32(cp, prAdapter->rWifiVar.arBssInfo[NETWORK_TYPE_AIS_INDEX].u4RsnSelectedAKMSuite); cp += 4; WPA_IE(pucBuffer)->ucLength = ELEM_ID_WPA_LEN_FIXED; prMsduInfo->u2FrameLength += IE_SIZE(pucBuffer); } } /* rsnGenerateWPAIE */ /*----------------------------------------------------------------------------*/ /*! * * \brief This routine is called to generate RSN IE for * associate request frame. * * \param[in] prMsduInfo The Selected BSS description * * \retval The append RSN IE length * * \note * Called by: AIS module, P2P module, BOW module Associate request */ /*----------------------------------------------------------------------------*/ VOID rsnGenerateRSNIE ( IN P_ADAPTER_T prAdapter, IN P_MSDU_INFO_T prMsduInfo ) { UINT_32 u4Entry; PUCHAR cp; //UINT_8 ucExpendedLen = 0; PUINT_8 pucBuffer; ENUM_NETWORK_TYPE_INDEX_T eNetworkId; P_STA_RECORD_T prStaRec; DEBUGFUNC("rsnGenerateRSNIE"); ASSERT(prMsduInfo); pucBuffer = (PUINT_8)((UINT_32)prMsduInfo->prPacket + (UINT_32)prMsduInfo->u2FrameLength); ASSERT(pucBuffer); /* Todo:: network id */ eNetworkId = (ENUM_NETWORK_TYPE_INDEX_T)prMsduInfo->ucNetworkType; if ( #if CFG_ENABLE_WIFI_DIRECT ((prAdapter->fgIsP2PRegistered) && (eNetworkId == NETWORK_TYPE_P2P_INDEX) && (kalP2PGetCcmpCipher(prAdapter->prGlueInfo))) || #endif #if CFG_ENABLE_BT_OVER_WIFI (eNetworkId == NETWORK_TYPE_BOW_INDEX) || #endif (eNetworkId == NETWORK_TYPE_AIS_INDEX /* prCurrentBss->fgIERSN */ && ((prAdapter->rWifiVar.rConnSettings.eAuthMode == AUTH_MODE_WPA2) || (prAdapter->rWifiVar.rConnSettings.eAuthMode == AUTH_MODE_WPA2_PSK)))) { /* Construct a RSN IE for association request frame. */ RSN_IE(pucBuffer)->ucElemId = ELEM_ID_RSN; RSN_IE(pucBuffer)->ucLength = ELEM_ID_RSN_LEN_FIXED; WLAN_SET_FIELD_16(&RSN_IE(pucBuffer)->u2Version, 1); // Version WLAN_SET_FIELD_32(&RSN_IE(pucBuffer)->u4GroupKeyCipherSuite, prAdapter->rWifiVar.arBssInfo[eNetworkId].u4RsnSelectedGroupCipher); // Group key suite cp = (PUCHAR) &RSN_IE(pucBuffer)->aucPairwiseKeyCipherSuite1[0]; WLAN_SET_FIELD_16(&RSN_IE(pucBuffer)->u2PairwiseKeyCipherSuiteCount, 1); WLAN_SET_FIELD_32(cp, prAdapter->rWifiVar.arBssInfo[eNetworkId].u4RsnSelectedPairwiseCipher); cp += 4; WLAN_SET_FIELD_16(cp, 1); // AKM suite count cp += 2; WLAN_SET_FIELD_32(cp, prAdapter->rWifiVar.arBssInfo[eNetworkId].u4RsnSelectedAKMSuite); // AKM suite cp += 4; WLAN_SET_FIELD_16(cp, prAdapter->rWifiVar.arBssInfo[eNetworkId].u2RsnSelectedCapInfo); // Capabilities #if CFG_SUPPORT_802_11W if (eNetworkId == NETWORK_TYPE_AIS_INDEX && prAdapter->rWifiVar.rAisSpecificBssInfo.fgMgmtProtection) { if (kalGetMfpSetting(prAdapter->prGlueInfo) == RSN_AUTH_MFP_REQUIRED) { WLAN_SET_FIELD_16(cp, ELEM_WPA_CAP_MFPC | ELEM_WPA_CAP_MFPR); // Capabilities } else if (kalGetMfpSetting(prAdapter->prGlueInfo) == RSN_AUTH_MFP_OPTIONAL) { WLAN_SET_FIELD_16(cp, ELEM_WPA_CAP_MFPC); // Capabilities } } #endif cp += 2; if (eNetworkId == NETWORK_TYPE_AIS_INDEX) prStaRec = cnmGetStaRecByIndex(prAdapter, prMsduInfo->ucStaRecIndex); if (eNetworkId == NETWORK_TYPE_AIS_INDEX && rsnSearchPmkidEntry(prAdapter, prStaRec->aucMacAddr, &u4Entry)) { //DBGLOG(RSN, TRACE, ("Add Pmk at assoc req\n")); //DBGLOG(RSN, TRACE, ("addr " MACSTR" PMKID "MACSTR"\n", // MAC2STR(prAdapter->rWifiVar.rAisSpecificBssInfo.arPmkidCache[u4Entry].rBssidInfo.arBSSID), MAC2STR(prAdapter->rWifiVar.rAisSpecificBssInfo.arPmkidCache[u4Entry].rBssidInfo.arPMKID))); if (prAdapter->rWifiVar.rAisSpecificBssInfo.arPmkidCache[u4Entry].fgPmkidExist) { RSN_IE(pucBuffer)->ucLength = 38; WLAN_SET_FIELD_16(cp, 1); // PMKID count cp += 2; DBGLOG(RSN, TRACE, ("BSSID "MACSTR" ind=%d\n", MAC2STR(prStaRec->aucMacAddr), u4Entry)); DBGLOG(RSN, TRACE, ("use PMKID "MACSTR"\n", MAC2STR(prAdapter->rWifiVar.rAisSpecificBssInfo.arPmkidCache[u4Entry].rBssidInfo.arPMKID))); kalMemCopy(cp, (PVOID)prAdapter->rWifiVar.rAisSpecificBssInfo.arPmkidCache[u4Entry].rBssidInfo.arPMKID, sizeof(PARAM_PMKID_VALUE)); //ucExpendedLen = 40; } else { WLAN_SET_FIELD_16(cp, 0); // PMKID count //ucExpendedLen = ELEM_ID_RSN_LEN_FIXED + 2; #if CFG_SUPPORT_802_11W cp += 2; RSN_IE(pucBuffer)->ucLength += 2; #endif } } else { WLAN_SET_FIELD_16(cp, 0); // PMKID count //ucExpendedLen = ELEM_ID_RSN_LEN_FIXED + 2; #if CFG_SUPPORT_802_11W cp += 2; RSN_IE(pucBuffer)->ucLength += 2; #endif } #if CFG_SUPPORT_802_11W if ((eNetworkId == NETWORK_TYPE_AIS_INDEX) && (kalGetMfpSetting(prAdapter->prGlueInfo) != RSN_AUTH_MFP_DISABLED) /* (mgmt_group_cipher == WPA_CIPHER_AES_128_CMAC) */ ) { WLAN_SET_FIELD_32(cp, RSN_CIPHER_SUITE_AES_128_CMAC); cp += 4; RSN_IE(pucBuffer)->ucLength += 4; } #endif prMsduInfo->u2FrameLength += IE_SIZE(pucBuffer); } } /* rsnGenerateRSNIE */ /*----------------------------------------------------------------------------*/ /*! * \brief Parse the given IE buffer and check if it is WFA IE and return Type and * SubType for further process. * * \param[in] pucBuf Pointer to the buffer of WFA Information Element. * \param[out] pucOuiType Pointer to the storage of OUI Type. * \param[out] pu2SubTypeVersion Pointer to the storage of OUI SubType and Version. * \retval TRUE Parse IE ok * \retval FALSE Parse IE fail */ /*----------------------------------------------------------------------------*/ BOOLEAN rsnParseCheckForWFAInfoElem ( IN P_ADAPTER_T prAdapter, IN PUINT_8 pucBuf, OUT PUINT_8 pucOuiType, OUT PUINT_16 pu2SubTypeVersion ) { UINT_8 aucWfaOui[] = VENDOR_OUI_WFA; P_IE_WFA_T prWfaIE; ASSERT(pucBuf); ASSERT(pucOuiType); ASSERT(pu2SubTypeVersion); prWfaIE = (P_IE_WFA_T)pucBuf; do { if (IE_LEN(pucBuf) <= ELEM_MIN_LEN_WFA_OUI_TYPE_SUBTYPE) { break; } else if (prWfaIE->aucOui[0] != aucWfaOui[0] || prWfaIE->aucOui[1] != aucWfaOui[1] || prWfaIE->aucOui[2] != aucWfaOui[2]) { break; } *pucOuiType = prWfaIE->ucOuiType; WLAN_GET_FIELD_16(&prWfaIE->aucOuiSubTypeVersion[0], pu2SubTypeVersion); return TRUE; } while (FALSE); return FALSE; } /* end of rsnParseCheckForWFAInfoElem() */ #if CFG_SUPPORT_AAA /*----------------------------------------------------------------------------*/ /*! * \brief Parse the given IE buffer and check if it is RSN IE with CCMP PSK * * \param[in] prAdapter Pointer to Adapter * \param[in] prSwRfb Pointer to the rx buffer * \param[in] pIE Pointer rthe buffer of Information Element. * \param[out] prStatusCode Pointer to the return status code. * \retval none */ /*----------------------------------------------------------------------------*/ void rsnParserCheckForRSNCCMPPSK( P_ADAPTER_T prAdapter, P_RSN_INFO_ELEM_T prIe, PUINT_16 pu2StatusCode ) { RSN_INFO_T rRsnIe; ASSERT(prAdapter); ASSERT(prIe); ASSERT(pu2StatusCode); *pu2StatusCode = STATUS_CODE_INVALID_INFO_ELEMENT; if (rsnParseRsnIE(prAdapter, prIe, &rRsnIe)) { if ((rRsnIe.u4PairwiseKeyCipherSuiteCount != 1) || (rRsnIe.au4PairwiseKeyCipherSuite[0] != RSN_CIPHER_SUITE_CCMP)) { *pu2StatusCode = STATUS_CODE_INVALID_PAIRWISE_CIPHER; return; } if ((rRsnIe.u4GroupKeyCipherSuite != RSN_CIPHER_SUITE_CCMP)) { *pu2StatusCode = STATUS_CODE_INVALID_GROUP_CIPHER; return; } if ((rRsnIe.u4AuthKeyMgtSuiteCount != 1) || (rRsnIe.au4AuthKeyMgtSuite[0] != RSN_AKM_SUITE_PSK)) { *pu2StatusCode = STATUS_CODE_INVALID_AKMP; return; } DBGLOG(RSN, TRACE, ("RSN with CCMP-PSK\n" )); *pu2StatusCode = WLAN_STATUS_SUCCESS; } } #endif /*----------------------------------------------------------------------------*/ /*! * \brief This routine is called to generate an authentication event to NDIS. * * \param[in] u4Flags Authentication event: \n * PARAM_AUTH_REQUEST_REAUTH 0x01 \n * PARAM_AUTH_REQUEST_KEYUPDATE 0x02 \n * PARAM_AUTH_REQUEST_PAIRWISE_ERROR 0x06 \n * PARAM_AUTH_REQUEST_GROUP_ERROR 0x0E \n * * \return (none) */ /*----------------------------------------------------------------------------*/ VOID rsnGenMicErrorEvent ( IN P_ADAPTER_T prAdapter, IN BOOLEAN fgFlags ) { P_PARAM_AUTH_EVENT_T prAuthEvent; DEBUGFUNC("rsnGenMicErrorEvent"); prAuthEvent = (P_PARAM_AUTH_EVENT_T)prAdapter->aucIndicationEventBuffer; /* Status type: Authentication Event */ prAuthEvent->rStatus.eStatusType = ENUM_STATUS_TYPE_AUTHENTICATION; /* Authentication request */ prAuthEvent->arRequest[0].u4Length = sizeof(PARAM_AUTH_REQUEST_T); kalMemCopy((PVOID)prAuthEvent->arRequest[0].arBssid, (PVOID)prAdapter->rWifiVar.arBssInfo[NETWORK_TYPE_AIS_INDEX].aucBSSID, MAC_ADDR_LEN); if (fgFlags == TRUE) prAuthEvent->arRequest[0].u4Flags = PARAM_AUTH_REQUEST_GROUP_ERROR; else prAuthEvent->arRequest[0].u4Flags = PARAM_AUTH_REQUEST_PAIRWISE_ERROR; kalIndicateStatusAndComplete(prAdapter->prGlueInfo, WLAN_STATUS_MEDIA_SPECIFIC_INDICATION, (PVOID)prAuthEvent, sizeof(PARAM_STATUS_INDICATION_T) + sizeof(PARAM_AUTH_REQUEST_T)); } /* rsnGenMicErrorEvent */ /*----------------------------------------------------------------------------*/ /*! * \brief This routine is called to handle TKIP MIC failures. * * \param[in] adapter_p Pointer to the adapter object data area. * \param[in] prSta Pointer to the STA which occur MIC Error * \param[in] fgErrorKeyType type of error key * * \retval none */ /*----------------------------------------------------------------------------*/ VOID rsnTkipHandleMICFailure ( IN P_ADAPTER_T prAdapter, IN P_STA_RECORD_T prSta, IN BOOLEAN fgErrorKeyType ) { //UINT_32 u4RsnaCurrentMICFailTime; //P_AIS_SPECIFIC_BSS_INFO_T prAisSpecBssInfo; DEBUGFUNC("rsnTkipHandleMICFailure"); ASSERT(prAdapter); #if 1 rsnGenMicErrorEvent(prAdapter,/* prSta,*/ fgErrorKeyType); /* Generate authentication request event. */ DBGLOG(RSN, INFO, ("Generate TKIP MIC error event (type: 0%d)\n", fgErrorKeyType)); #else ASSERT(prSta); prAisSpecBssInfo = &prAdapter->rWifiVar.rAisSpecificBssInfo; /* Record the MIC error occur time. */ GET_CURRENT_SYSTIME(&u4RsnaCurrentMICFailTime); /* Generate authentication request event. */ DBGLOG(RSN, INFO, ("Generate TKIP MIC error event (type: 0%d)\n", fgErrorKeyType)); /* If less than 60 seconds have passed since a previous TKIP MIC failure, disassociate from the AP and wait for 60 seconds before (re)associating with the same AP. */ if (prAisSpecBssInfo->u4RsnaLastMICFailTime != 0 && !CHECK_FOR_TIMEOUT(u4RsnaCurrentMICFailTime, prAisSpecBssInfo->u4RsnaLastMICFailTime, SEC_TO_SYSTIME(TKIP_COUNTERMEASURE_SEC))) { /* If less than 60 seconds expired since last MIC error, we have to block traffic. */ DBGLOG(RSN, INFO, ("Start blocking traffic!\n")); rsnGenMicErrorEvent( prAdapter,/* prSta,*/ fgErrorKeyType); secFsmEventStartCounterMeasure(prAdapter, prSta); } else { rsnGenMicErrorEvent( prAdapter,/* prSta,*/ fgErrorKeyType); DBGLOG(RSN, INFO, ("First TKIP MIC error!\n")); } COPY_SYSTIME(prAisSpecBssInfo->u4RsnaLastMICFailTime, u4RsnaCurrentMICFailTime); #endif } /* rsnTkipHandleMICFailure */ /*----------------------------------------------------------------------------*/ /*! * \brief This function is called to select a list of BSSID from * the scan results for PMKID candidate list. * * \param[in] prBssDesc the BSS Desc at scan result list * \param[out] pu4CandidateCount Pointer to the number of selected candidates. * It is set to zero if no BSSID matches our requirement. * * \retval none */ /*----------------------------------------------------------------------------*/ VOID rsnSelectPmkidCandidateList ( IN P_ADAPTER_T prAdapter, IN P_BSS_DESC_T prBssDesc ) { P_CONNECTION_SETTINGS_T prConnSettings; P_AIS_BSS_INFO_T prAisBssInfo; DEBUGFUNC("rsnSelectPmkidCandidateList"); ASSERT(prBssDesc); prConnSettings = &prAdapter->rWifiVar.rConnSettings; prAisBssInfo = &prAdapter->rWifiVar.arBssInfo[NETWORK_TYPE_AIS_INDEX]; /* Search a BSS with the same SSID from the given BSS description set. */ //DBGLOG(RSN, TRACE, ("Check scan result ["MACSTR"]\n", // MAC2STR(prBssDesc->aucBSSID))); if (UNEQUAL_SSID(prBssDesc->aucSSID, prBssDesc->ucSSIDLen, prConnSettings->aucSSID, prConnSettings->ucSSIDLen)) { DBGLOG(RSN, TRACE, ("-- SSID not matched\n")); return; } #if 0 if ((prBssDesc->u2BSSBasicRateSet & ~(rPhyAttributes[prAisBssInfo->ePhyType].u2SupportedRateSet)) || prBssDesc->fgIsUnknownBssBasicRate) { DBGLOG(RSN, TRACE, ("-- Rate set not matched\n")); return; } if (/* prBssDesc->u4RsnSelectedPairwiseCipher != prAisBssInfo->u4RsnSelectedPairwiseCipher ||*/ prBssDesc->u4RsnSelectedGroupCipher != prAisBssInfo->u4RsnSelectedGroupCipher /*|| prBssDesc->u4RsnSelectedAKMSuite != prAisBssInfo->u4RsnSelectedAKMSuite */) { DBGLOG(RSN, TRACE, ("-- Encrypt status not matched for PMKID \n")); return; } #endif rsnUpdatePmkidCandidateList(prAdapter, prBssDesc); } /* rsnSelectPmkidCandidateList */ /*----------------------------------------------------------------------------*/ /*! * \brief This function is called to select a list of BSSID from * the scan results for PMKID candidate list. * * \param[in] prBssDesc the BSS DESC at scan result list * * \retval none */ /*----------------------------------------------------------------------------*/ VOID rsnUpdatePmkidCandidateList ( IN P_ADAPTER_T prAdapter, IN P_BSS_DESC_T prBssDesc ) { UINT_32 i; P_CONNECTION_SETTINGS_T prConnSettings; P_AIS_SPECIFIC_BSS_INFO_T prAisSpecBssInfo; DEBUGFUNC("rsnUpdatePmkidCandidateList"); ASSERT(prBssDesc); prConnSettings = &prAdapter->rWifiVar.rConnSettings; prAisSpecBssInfo = &prAdapter->rWifiVar.rAisSpecificBssInfo; if (UNEQUAL_SSID(prBssDesc->aucSSID, prBssDesc->ucSSIDLen, prConnSettings->aucSSID, prConnSettings->ucSSIDLen)) { DBGLOG(RSN, TRACE, ("-- SSID not matched\n")); return; } for (i = 0; i < CFG_MAX_PMKID_CACHE; i++) { if (EQUAL_MAC_ADDR(prBssDesc->aucBSSID, prAisSpecBssInfo->arPmkidCandicate[i].aucBssid)) return; } /* If the number of selected BSSID exceed MAX_NUM_PMKID_CACHE(16), then we only store MAX_NUM_PMKID_CACHE(16) in PMKID cache */ if ((prAisSpecBssInfo->u4PmkidCandicateCount + 1) > CFG_MAX_PMKID_CACHE) { prAisSpecBssInfo->u4PmkidCandicateCount --; } i = prAisSpecBssInfo->u4PmkidCandicateCount; COPY_MAC_ADDR((PVOID)prAisSpecBssInfo->arPmkidCandicate[i].aucBssid, (PVOID)prBssDesc->aucBSSID); if (prBssDesc->u2RsnCap & MASK_RSNIE_CAP_PREAUTH) { prAisSpecBssInfo->arPmkidCandicate[i].u4PreAuthFlags = 1; DBGLOG(RSN, TRACE, ("Add " MACSTR " with pre-auth to candidate list\n", MAC2STR(prAisSpecBssInfo->arPmkidCandicate[i].aucBssid))); } else { prAisSpecBssInfo->arPmkidCandicate[i].u4PreAuthFlags = 0; DBGLOG(RSN, TRACE, ("Add " MACSTR " without pre-auth to candidate list\n", MAC2STR(prAisSpecBssInfo->arPmkidCandicate[i].aucBssid))); } prAisSpecBssInfo->u4PmkidCandicateCount ++; } /* rsnUpdatePmkidCandidateList */ /*----------------------------------------------------------------------------*/ /*! * \brief This routine is called to search the desired entry in * PMKID cache according to the BSSID * * \param[in] pucBssid Pointer to the BSSID * \param[out] pu4EntryIndex Pointer to place the found entry index * * \retval TRUE, if found one entry for specified BSSID * \retval FALSE, if not found */ /*----------------------------------------------------------------------------*/ BOOLEAN rsnSearchPmkidEntry ( IN P_ADAPTER_T prAdapter, IN PUINT_8 pucBssid, OUT PUINT_32 pu4EntryIndex ) { UINT_32 i; P_AIS_SPECIFIC_BSS_INFO_T prAisSpecBssInfo; DEBUGFUNC("rsnSearchPmkidEntry"); ASSERT(pucBssid); ASSERT(pu4EntryIndex); prAisSpecBssInfo = &prAdapter->rWifiVar.rAisSpecificBssInfo; if (prAisSpecBssInfo->u4PmkidCacheCount > CFG_MAX_PMKID_CACHE) { return FALSE; } ASSERT(prAisSpecBssInfo->u4PmkidCacheCount <= CFG_MAX_PMKID_CACHE); /* Search for desired BSSID */ for (i = 0; i < prAisSpecBssInfo->u4PmkidCacheCount; i++) { if (!kalMemCmp(prAisSpecBssInfo->arPmkidCache[i].rBssidInfo.arBSSID, pucBssid, MAC_ADDR_LEN)) { break; } } /* If desired BSSID is found, then set the PMKID */ if (i < prAisSpecBssInfo->u4PmkidCacheCount) { *pu4EntryIndex = i; return TRUE; } return FALSE; } /* rsnSearchPmkidEntry */ /*----------------------------------------------------------------------------*/ /*! * \brief This routine is called to check if there is difference * between PMKID candicate list and PMKID cache. If there * is new candicate that no cache entry is available, then * add a new entry for the new candicate in the PMKID cache * and set the PMKID indication flag to TRUE. * * \retval TRUE, if new member in the PMKID candicate list * \retval FALSe, if no new member in the PMKID candicate list */ /*----------------------------------------------------------------------------*/ BOOLEAN rsnCheckPmkidCandicate ( IN P_ADAPTER_T prAdapter ) { P_AIS_SPECIFIC_BSS_INFO_T prAisSpecBssInfo; UINT_32 i; // Index for PMKID candicate UINT_32 j; // Indix for PMKID cache BOOLEAN status = FALSE; DEBUGFUNC("rsnCheckPmkidCandicate"); prAisSpecBssInfo = &prAdapter->rWifiVar.rAisSpecificBssInfo; /* Check for each candicate */ for (i = 0; i < prAisSpecBssInfo->u4PmkidCandicateCount; i++) { for (j = 0; j < prAisSpecBssInfo->u4PmkidCacheCount; j++) { if (!kalMemCmp(prAisSpecBssInfo->arPmkidCache[j].rBssidInfo.arBSSID, prAisSpecBssInfo->arPmkidCandicate[i].aucBssid, MAC_ADDR_LEN)) { //DBGLOG(RSN, TRACE, (MACSTR" at PMKID cache!!\n", MAC2STR(prAisSpecBssInfo->arPmkidCandicate[i].aucBssid))); break; } } /* No entry found in PMKID cache for the candicate, add new one */ if (j == prAisSpecBssInfo->u4PmkidCacheCount && prAisSpecBssInfo->u4PmkidCacheCount < CFG_MAX_PMKID_CACHE) { DBGLOG(RSN, TRACE, ("Add "MACSTR" to PMKID cache!!\n", MAC2STR(prAisSpecBssInfo->arPmkidCandicate[i].aucBssid))); kalMemCopy((PVOID)prAisSpecBssInfo->arPmkidCache[prAisSpecBssInfo->u4PmkidCacheCount].rBssidInfo.arBSSID, (PVOID)prAisSpecBssInfo->arPmkidCandicate[i].aucBssid, MAC_ADDR_LEN); prAisSpecBssInfo->arPmkidCache[prAisSpecBssInfo->u4PmkidCacheCount].fgPmkidExist = FALSE; prAisSpecBssInfo->u4PmkidCacheCount++; status = TRUE; } } return status; } /* rsnCheckPmkidCandicate */ /*----------------------------------------------------------------------------*/ /*! * \brief This function is called to wait a duration to indicate the pre-auth AP candicate * * \return (none) */ /*----------------------------------------------------------------------------*/ VOID rsnIndicatePmkidCand ( IN P_ADAPTER_T prAdapter, IN UINT_32 u4Parm ) { DBGLOG(RSN, EVENT, ("Security - Time to indicate the PMKID cand.\n")); /* If the authentication mode is WPA2 and indication PMKID flag is available, then we indicate the PMKID candidate list to NDIS and clear the flag, indicatePMKID */ if (prAdapter->rWifiVar.arBssInfo[NETWORK_TYPE_AIS_INDEX].eConnectionState == PARAM_MEDIA_STATE_CONNECTED && prAdapter->rWifiVar.rConnSettings.eAuthMode == AUTH_MODE_WPA2) { rsnGeneratePmkidIndication(prAdapter); } return; } /* end of rsnIndicatePmkidCand() */ /*----------------------------------------------------------------------------*/ /*! * \brief This routine is called to check the BSS Desc at scan result * with pre-auth cap at wpa2 mode. If there * is candicate that no cache entry is available, then * add a new entry for the new candicate in the PMKID cache * and set the PMKID indication flag to TRUE. * * \param[in] prBss The BSS Desc at scan result * * \return none */ /*----------------------------------------------------------------------------*/ VOID rsnCheckPmkidCache ( IN P_ADAPTER_T prAdapter, IN P_BSS_DESC_T prBss ) { P_AIS_BSS_INFO_T prAisBssInfo; P_AIS_SPECIFIC_BSS_INFO_T prAisSpecBssInfo; P_CONNECTION_SETTINGS_T prConnSettings; DEBUGFUNC("rsnCheckPmkidCandicate"); ASSERT(prBss); prConnSettings = &prAdapter->rWifiVar.rConnSettings; prAisBssInfo = &prAdapter->rWifiVar.arBssInfo[NETWORK_TYPE_AIS_INDEX]; prAisSpecBssInfo = &prAdapter->rWifiVar.rAisSpecificBssInfo; if ((prAisBssInfo->eConnectionState == PARAM_MEDIA_STATE_CONNECTED) && (prConnSettings->eAuthMode == AUTH_MODE_WPA2)) { rsnSelectPmkidCandidateList(prAdapter, prBss); /* Set indication flag of PMKID to TRUE, and then connHandleNetworkConnection() will indicate this later */ if (rsnCheckPmkidCandicate(prAdapter)) { DBGLOG(RSN, TRACE, ("Prepare a timer to indicate candidate PMKID Candidate\n")); cnmTimerStopTimer(prAdapter, &prAisSpecBssInfo->rPreauthenticationTimer); cnmTimerStartTimer(prAdapter, &prAisSpecBssInfo->rPreauthenticationTimer, SEC_TO_MSEC(WAIT_TIME_IND_PMKID_CANDICATE_SEC)); } } } /*----------------------------------------------------------------------------*/ /*! * \brief This routine is called to generate an PMKID candidate list * indication to NDIS. * * \param[in] prAdapter Pointer to the adapter object data area. * \param[in] u4Flags PMKID candidate list event: * PARAM_PMKID_CANDIDATE_PREAUTH_ENABLED 0x01 * * \retval none */ /*----------------------------------------------------------------------------*/ VOID rsnGeneratePmkidIndication ( IN P_ADAPTER_T prAdapter ) { P_PARAM_STATUS_INDICATION_T prStatusEvent; P_PARAM_PMKID_CANDIDATE_LIST_T prPmkidEvent; P_AIS_SPECIFIC_BSS_INFO_T prAisSpecificBssInfo; UINT_8 i, j = 0, count = 0; UINT_32 u4LenOfUsedBuffer; DEBUGFUNC("rsnGeneratePmkidIndication"); ASSERT(prAdapter); prStatusEvent = (P_PARAM_STATUS_INDICATION_T)prAdapter->aucIndicationEventBuffer; /* Status type: PMKID Candidatelist Event */ prStatusEvent->eStatusType = ENUM_STATUS_TYPE_CANDIDATE_LIST; ASSERT(prStatusEvent); prPmkidEvent = (P_PARAM_PMKID_CANDIDATE_LIST_T)(&prStatusEvent->eStatusType + 1); ASSERT(prPmkidEvent); prAisSpecificBssInfo = &prAdapter->rWifiVar.rAisSpecificBssInfo; ASSERT(prAisSpecificBssInfo); for (i = 0; i < prAisSpecificBssInfo->u4PmkidCandicateCount; i++) { for (j = 0; j < prAisSpecificBssInfo->u4PmkidCacheCount; j++) { if (EQUAL_MAC_ADDR( prAisSpecificBssInfo->arPmkidCache[j].rBssidInfo.arBSSID, prAisSpecificBssInfo->arPmkidCandicate[i].aucBssid) && (prAisSpecificBssInfo->arPmkidCache[j].fgPmkidExist == TRUE)){ break; } } if (count >= CFG_MAX_PMKID_CACHE) { break; } if (j == prAisSpecificBssInfo->u4PmkidCacheCount) { kalMemCopy((PVOID)prPmkidEvent->arCandidateList[count].arBSSID, (PVOID)prAisSpecificBssInfo->arPmkidCandicate[i].aucBssid, PARAM_MAC_ADDR_LEN); prPmkidEvent->arCandidateList[count].u4Flags = prAisSpecificBssInfo->arPmkidCandicate[i].u4PreAuthFlags; DBGLOG(RSN, TRACE, (MACSTR" %d\n", MAC2STR(prPmkidEvent->arCandidateList[count].arBSSID), prPmkidEvent->arCandidateList[count].u4Flags)); count++; } } /* PMKID Candidate List */ prPmkidEvent->u4Version = 1; prPmkidEvent->u4NumCandidates = count; DBGLOG(RSN, TRACE, ("rsnGeneratePmkidIndication #%d\n", prPmkidEvent->u4NumCandidates)); u4LenOfUsedBuffer = sizeof(ENUM_STATUS_TYPE_T) + (2 * sizeof(UINT_32)) + (count * sizeof(PARAM_PMKID_CANDIDATE_T)); //dumpMemory8((PUINT_8)prAdapter->aucIndicationEventBuffer, u4LenOfUsedBuffer); kalIndicateStatusAndComplete(prAdapter->prGlueInfo, WLAN_STATUS_MEDIA_SPECIFIC_INDICATION, (PVOID) prAdapter->aucIndicationEventBuffer, u4LenOfUsedBuffer); } /* rsnGeneratePmkidIndication */ #endif #if CFG_SUPPORT_WPS2 /*----------------------------------------------------------------------------*/ /*! * * \brief This routine is called to generate WSC IE for * associate request frame. * * \param[in] prCurrentBss The Selected BSS description * * \retval The append WSC IE length * * \note * Called by: AIS module, Associate request */ /*----------------------------------------------------------------------------*/ VOID rsnGenerateWSCIE ( IN P_ADAPTER_T prAdapter, IN P_MSDU_INFO_T prMsduInfo ) { PUINT_8 pucBuffer; ASSERT(prAdapter); ASSERT(prMsduInfo); if (prMsduInfo->ucNetworkType != NETWORK_TYPE_AIS_INDEX) return; pucBuffer = (PUINT_8)((UINT_32)prMsduInfo->prPacket + (UINT_32)prMsduInfo->u2FrameLength); /* ASSOC INFO IE ID: 221 :0xDD */ if (prAdapter->prGlueInfo->u2WSCAssocInfoIELen) { kalMemCopy(pucBuffer, &prAdapter->prGlueInfo->aucWSCAssocInfoIE, prAdapter->prGlueInfo->u2WSCAssocInfoIELen); prMsduInfo->u2FrameLength += prAdapter->prGlueInfo->u2WSCAssocInfoIELen; } } #endif #if CFG_SUPPORT_802_11W /*----------------------------------------------------------------------------*/ /*! * \brief to check if the Bip Key installed or not * * \param[in] * prAdapter * * \return * TRUE * FALSE */ /*----------------------------------------------------------------------------*/ UINT_32 rsnCheckBipKeyInstalled ( IN P_ADAPTER_T prAdapter, IN P_STA_RECORD_T prStaRec ) { if (prStaRec && prStaRec->ucNetTypeIndex == (UINT_8)NETWORK_TYPE_AIS_INDEX) return prAdapter->rWifiVar.rAisSpecificBssInfo.fgBipKeyInstalled; else return FALSE; } /*----------------------------------------------------------------------------*/ /*! * * \brief This routine is called to check the Sa query timeout. * * * \note * Called by: AIS module, Handle by Sa Quert timeout */ /*----------------------------------------------------------------------------*/ UINT_8 rsnCheckSaQueryTimeout ( IN P_ADAPTER_T prAdapter ) { P_AIS_SPECIFIC_BSS_INFO_T prBssSpecInfo; UINT_32 now; prBssSpecInfo = &prAdapter->rWifiVar.rAisSpecificBssInfo; ASSERT(prBssSpecInfo); GET_CURRENT_SYSTIME(&now); if (CHECK_FOR_TIMEOUT(now, prBssSpecInfo->u4SaQueryStart, TU_TO_MSEC(1000))) { LOG_FUNC("association SA Query timed out\n"); prBssSpecInfo->ucSaQueryTimedOut = 1; kalMemFree(prBssSpecInfo->pucSaQueryTransId, VIR_MEM_TYPE, prBssSpecInfo->u4SaQueryCount * ACTION_SA_QUERY_TR_ID_LEN); prBssSpecInfo->pucSaQueryTransId = NULL; prBssSpecInfo->u4SaQueryCount = 0; cnmTimerStopTimer(prAdapter, &prBssSpecInfo->rSaQueryTimer); /* Re-connect */ kalIndicateStatusAndComplete(prAdapter->prGlueInfo, WLAN_STATUS_MEDIA_DISCONNECT, NULL, 0); return 1; } return 0; } /*----------------------------------------------------------------------------*/ /*! * * \brief This routine is called to start the 802.11w sa query timer. * * * \note * Called by: AIS module, Handle Rx mgmt request */ /*----------------------------------------------------------------------------*/ void rsnStartSaQueryTimer ( IN P_ADAPTER_T prAdapter ) { P_BSS_INFO_T prBssInfo; P_AIS_SPECIFIC_BSS_INFO_T prBssSpecInfo; P_MSDU_INFO_T prMsduInfo; P_ACTION_SA_QUERY_FRAME prTxFrame; UINT_16 u2PayloadLen; PUINT_8 pucTmp = NULL; UINT_8 ucTransId[ACTION_SA_QUERY_TR_ID_LEN]; prBssInfo = &prAdapter->rWifiVar.arBssInfo[NETWORK_TYPE_AIS_INDEX]; ASSERT(prBssInfo); prBssSpecInfo = &prAdapter->rWifiVar.rAisSpecificBssInfo; ASSERT(prBssSpecInfo); LOG_FUNC("MFP: Start Sa Query\n"); if (prBssSpecInfo->u4SaQueryCount > 0 && rsnCheckSaQueryTimeout(prAdapter)) { LOG_FUNC("MFP: u4SaQueryCount count =%d\n", prBssSpecInfo->u4SaQueryCount); return; } prMsduInfo = (P_MSDU_INFO_T) cnmMgtPktAlloc(prAdapter, MAC_TX_RESERVED_FIELD + PUBLIC_ACTION_MAX_LEN); if (!prMsduInfo) return; prTxFrame = (P_ACTION_SA_QUERY_FRAME) ((UINT_32)(prMsduInfo->prPacket) + MAC_TX_RESERVED_FIELD); prTxFrame->u2FrameCtrl = MAC_FRAME_ACTION; prTxFrame->u2FrameCtrl |= MASK_FC_PROTECTED_FRAME; COPY_MAC_ADDR(prTxFrame->aucDestAddr, prBssInfo->aucBSSID); COPY_MAC_ADDR(prTxFrame->aucSrcAddr, prBssInfo->aucOwnMacAddr); COPY_MAC_ADDR(prTxFrame->aucBSSID, prBssInfo->aucBSSID); prTxFrame->ucCategory = CATEGORY_SA_QUERT_ACTION; prTxFrame->ucAction = ACTION_SA_QUERY_REQUEST; if (prBssSpecInfo->u4SaQueryCount == 0) { GET_CURRENT_SYSTIME(&prBssSpecInfo->u4SaQueryStart); } if (prBssSpecInfo->u4SaQueryCount) { pucTmp = kalMemAlloc(prBssSpecInfo->u4SaQueryCount * ACTION_SA_QUERY_TR_ID_LEN, VIR_MEM_TYPE); if (!pucTmp) { DBGLOG(RSN, INFO, ("MFP: Fail to alloc tmp buffer for backup sa query id\n")); return; } kalMemCopy(pucTmp, prBssSpecInfo->pucSaQueryTransId, prBssSpecInfo->u4SaQueryCount * ACTION_SA_QUERY_TR_ID_LEN); } kalMemFree(prBssSpecInfo->pucSaQueryTransId, VIR_MEM_TYPE, prBssSpecInfo->u4SaQueryCount * ACTION_SA_QUERY_TR_ID_LEN); ucTransId[0] = (UINT_8)(kalRandomNumber() & 0xFF); ucTransId[1] = (UINT_8)(kalRandomNumber() & 0xFF); kalMemCopy(prTxFrame->ucTransId, ucTransId, ACTION_SA_QUERY_TR_ID_LEN); prBssSpecInfo->u4SaQueryCount++; prBssSpecInfo->pucSaQueryTransId = kalMemAlloc(prBssSpecInfo->u4SaQueryCount * ACTION_SA_QUERY_TR_ID_LEN, VIR_MEM_TYPE); if (!prBssSpecInfo->pucSaQueryTransId) { DBGLOG(RSN, INFO, ("MFP: Fail to alloc buffer for sa query id list\n")); return; } if (pucTmp) { kalMemCopy(prBssSpecInfo->pucSaQueryTransId, pucTmp, (prBssSpecInfo->u4SaQueryCount - 1) * ACTION_SA_QUERY_TR_ID_LEN); kalMemCopy(&prBssSpecInfo->pucSaQueryTransId[(prBssSpecInfo->u4SaQueryCount - 1) * ACTION_SA_QUERY_TR_ID_LEN], ucTransId, ACTION_SA_QUERY_TR_ID_LEN); kalMemFree(pucTmp, VIR_MEM_TYPE, (prBssSpecInfo->u4SaQueryCount - 1) * ACTION_SA_QUERY_TR_ID_LEN); } else { kalMemCopy(prBssSpecInfo->pucSaQueryTransId, ucTransId, ACTION_SA_QUERY_TR_ID_LEN); } u2PayloadLen = 2 + ACTION_SA_QUERY_TR_ID_LEN; //4 Update information of MSDU_INFO_T prMsduInfo->ucPacketType = HIF_TX_PACKET_TYPE_MGMT; /* Management frame */ prMsduInfo->ucStaRecIndex = prBssInfo->prStaRecOfAP->ucIndex; prMsduInfo->ucNetworkType = prBssInfo->ucNetTypeIndex; prMsduInfo->ucMacHeaderLength = WLAN_MAC_MGMT_HEADER_LEN; prMsduInfo->fgIs802_1x = FALSE; prMsduInfo->fgIs802_11 = TRUE; prMsduInfo->u2FrameLength = WLAN_MAC_MGMT_HEADER_LEN + u2PayloadLen; prMsduInfo->ucTxSeqNum = nicIncreaseTxSeqNum(prAdapter); prMsduInfo->pfTxDoneHandler = NULL; prMsduInfo->fgIsBasicRate = FALSE; //4 Enqueue the frame to send this action frame. nicTxEnqueueMsdu(prAdapter, prMsduInfo); DBGLOG(RSN, TRACE, ("Set SA Query timer %d (%d sec)\n", prBssSpecInfo->u4SaQueryCount, prBssInfo->u2ObssScanInterval)); cnmTimerStartTimer(prAdapter, &prBssSpecInfo->rSaQueryTimer, TU_TO_MSEC(201)); } /*----------------------------------------------------------------------------*/ /*! * * \brief This routine is called to start the 802.11w sa query. * * * \note * Called by: AIS module, Handle Rx mgmt request */ /*----------------------------------------------------------------------------*/ void rsnStartSaQuery ( IN P_ADAPTER_T prAdapter ) { rsnStartSaQueryTimer(prAdapter); } /*----------------------------------------------------------------------------*/ /*! * * \brief This routine is called to stop the 802.11w sa query. * * * \note * Called by: AIS module, Handle Rx mgmt request */ /*----------------------------------------------------------------------------*/ void rsnStopSaQuery ( IN P_ADAPTER_T prAdapter ) { P_AIS_SPECIFIC_BSS_INFO_T prBssSpecInfo; prBssSpecInfo = &prAdapter->rWifiVar.rAisSpecificBssInfo; ASSERT(prBssSpecInfo); cnmTimerStopTimer(prAdapter, &prBssSpecInfo->rSaQueryTimer); kalMemFree(prBssSpecInfo->pucSaQueryTransId, VIR_MEM_TYPE, prBssSpecInfo->u4SaQueryCount * ACTION_SA_QUERY_TR_ID_LEN); prBssSpecInfo->pucSaQueryTransId = NULL; prBssSpecInfo->u4SaQueryCount = 0; } /*----------------------------------------------------------------------------*/ /*! * * \brief This routine is called to process the 802.11w sa query action frame. * * * \note * Called by: AIS module, Handle Rx mgmt request */ /*----------------------------------------------------------------------------*/ void rsnSaQueryRequest ( IN P_ADAPTER_T prAdapter, IN P_SW_RFB_T prSwRfb ) { P_BSS_INFO_T prBssInfo; P_MSDU_INFO_T prMsduInfo; P_ACTION_SA_QUERY_FRAME prRxFrame = NULL; UINT_16 u2PayloadLen; P_STA_RECORD_T prStaRec; P_ACTION_SA_QUERY_FRAME prTxFrame; prBssInfo = &prAdapter->rWifiVar.arBssInfo[NETWORK_TYPE_AIS_INDEX]; ASSERT(prBssInfo); prRxFrame = (P_ACTION_SA_QUERY_FRAME)prSwRfb->pvHeader; if (!prRxFrame) return; prStaRec = cnmGetStaRecByIndex(prAdapter, prSwRfb->ucStaRecIdx); DBGLOG(RSN, TRACE, ("IEEE 802.11: Received SA Query Request from " MACSTR"\n", MAC2STR(prStaRec->aucMacAddr))); DBGLOG_MEM8(RSN, TRACE, prRxFrame->ucTransId, ACTION_SA_QUERY_TR_ID_LEN); if (kalGetMediaStateIndicated(prAdapter->prGlueInfo) == PARAM_MEDIA_STATE_DISCONNECTED) { DBGLOG(RSN, TRACE, ("IEEE 802.11: Ignore SA Query Request " "from unassociated STA " MACSTR"\n", MAC2STR(prStaRec->aucMacAddr))); return; } DBGLOG(RSN, TRACE, ("IEEE 802.11: Sending SA Query Response to " MACSTR"\n", MAC2STR(prStaRec->aucMacAddr))); prMsduInfo = (P_MSDU_INFO_T) cnmMgtPktAlloc(prAdapter, MAC_TX_RESERVED_FIELD + PUBLIC_ACTION_MAX_LEN); if (!prMsduInfo) return; prTxFrame = (P_ACTION_SA_QUERY_FRAME) ((UINT_32)(prMsduInfo->prPacket) + MAC_TX_RESERVED_FIELD); prTxFrame->u2FrameCtrl = MAC_FRAME_ACTION; /* SA Query always with protected */ prTxFrame->u2FrameCtrl |= MASK_FC_PROTECTED_FRAME; COPY_MAC_ADDR(prTxFrame->aucDestAddr, prBssInfo->aucBSSID); COPY_MAC_ADDR(prTxFrame->aucSrcAddr, prBssInfo->aucOwnMacAddr); COPY_MAC_ADDR(prTxFrame->aucBSSID, prBssInfo->aucBSSID); prTxFrame->ucCategory = CATEGORY_SA_QUERT_ACTION; prTxFrame->ucAction = ACTION_SA_QUERY_RESPONSE; kalMemCopy(prTxFrame->ucTransId, prRxFrame->ucTransId, ACTION_SA_QUERY_TR_ID_LEN); u2PayloadLen = 2 + ACTION_SA_QUERY_TR_ID_LEN; //4 Update information of MSDU_INFO_T prMsduInfo->ucPacketType = HIF_TX_PACKET_TYPE_MGMT; /* Management frame */ prMsduInfo->ucStaRecIndex = prBssInfo->prStaRecOfAP->ucIndex; prMsduInfo->ucNetworkType = prBssInfo->ucNetTypeIndex; prMsduInfo->ucMacHeaderLength = WLAN_MAC_MGMT_HEADER_LEN; prMsduInfo->fgIs802_1x = FALSE; prMsduInfo->fgIs802_11 = TRUE; prMsduInfo->u2FrameLength = WLAN_MAC_MGMT_HEADER_LEN + u2PayloadLen; prMsduInfo->ucTxSeqNum = nicIncreaseTxSeqNum(prAdapter); prMsduInfo->pfTxDoneHandler = NULL; prMsduInfo->fgIsBasicRate = FALSE; //4 Enqueue the frame to send this action frame. nicTxEnqueueMsdu(prAdapter, prMsduInfo); } /*----------------------------------------------------------------------------*/ /*! * * \brief This routine is called to process the 802.11w sa query action frame. * * * \note * Called by: AIS module, Handle Rx mgmt request */ /*----------------------------------------------------------------------------*/ void rsnSaQueryAction ( IN P_ADAPTER_T prAdapter, IN P_SW_RFB_T prSwRfb ) { P_AIS_SPECIFIC_BSS_INFO_T prBssSpecInfo; P_ACTION_SA_QUERY_FRAME prRxFrame; P_STA_RECORD_T prStaRec; UINT_32 i; prBssSpecInfo = &prAdapter->rWifiVar.rAisSpecificBssInfo; ASSERT(prBssSpecInfo); prRxFrame = (P_ACTION_SA_QUERY_FRAME) prSwRfb->pvHeader; prStaRec = cnmGetStaRecByIndex(prAdapter, prSwRfb->ucStaRecIdx); if (prSwRfb->u2PacketLen < ACTION_SA_QUERY_TR_ID_LEN) { DBGLOG(RSN, TRACE, ("IEEE 802.11: Too short SA Query Action " "frame (len=%lu)\n", (unsigned long) prSwRfb->u2PacketLen)); return; } if (prRxFrame->ucAction == ACTION_SA_QUERY_REQUEST) { rsnSaQueryRequest(prAdapter, prSwRfb); return; } if (prRxFrame->ucAction != ACTION_SA_QUERY_RESPONSE) { DBGLOG(RSN, TRACE, ("IEEE 802.11: Unexpected SA Query " "Action %d\n", prRxFrame->ucAction)); return; } DBGLOG(RSN, TRACE, ("IEEE 802.11: Received SA Query Response from " MACSTR"\n", MAC2STR(prStaRec->aucMacAddr))); DBGLOG_MEM8(RSN, TRACE, prRxFrame->ucTransId, ACTION_SA_QUERY_TR_ID_LEN); /* MLME-SAQuery.confirm */ for (i = 0; i < prBssSpecInfo->u4SaQueryCount; i++) { if (kalMemCmp(prBssSpecInfo->pucSaQueryTransId + i * ACTION_SA_QUERY_TR_ID_LEN, prRxFrame->ucTransId, ACTION_SA_QUERY_TR_ID_LEN) == 0) break; } if (i >= prBssSpecInfo->u4SaQueryCount) { DBGLOG(RSN, TRACE, ("IEEE 802.11: No matching SA Query " "transaction identifier found\n")); return; } DBGLOG(RSN, TRACE, ("Reply to pending SA Query received\n")); rsnStopSaQuery(prAdapter); } /*----------------------------------------------------------------------------*/ /*! * * \brief This routine is called to process the 802.11w mgmt frame. * * * \note * Called by: AIS module, Handle Rx mgmt request */ /*----------------------------------------------------------------------------*/ BOOLEAN rsnCheckRxMgmt ( IN P_ADAPTER_T prAdapter, IN P_SW_RFB_T prSwRfb, IN UINT_8 ucSubtype ) { P_HIF_RX_HEADER_T prHifRxHdr; BOOLEAN fgUnicast = TRUE; BOOLEAN fgRobustAction = FALSE; prHifRxHdr = prSwRfb->prHifRxHdr; if ((HIF_RX_HDR_GET_NETWORK_IDX(prHifRxHdr) == NETWORK_TYPE_AIS_INDEX) && prAdapter->rWifiVar.rAisSpecificBssInfo.fgMgmtProtection /* Use MFP */) { P_WLAN_ASSOC_REQ_FRAME_T prAssocReqFrame; prAssocReqFrame = (P_WLAN_ASSOC_REQ_FRAME_T) prSwRfb->pvHeader; if (prAssocReqFrame->aucDestAddr[0] & BIT(0)) fgUnicast = FALSE; LOG_FUNC("QM RX MGT: rsnCheckRxMgmt = %d 0x%x %d ucSubtype=%x\n", fgUnicast, prHifRxHdr->ucReserved, (prHifRxHdr->ucReserved & CONTROL_FLAG_UC_MGMT_NO_ENC), ucSubtype); if (prHifRxHdr->ucReserved & CONTROL_FLAG_UC_MGMT_NO_ENC) { /* "Dropped unprotected Robust Action frame from an MFP STA" */ /* exclude Public Action */ if (ucSubtype == 13 /* 0x1011: MAC_FRAME_ACTION */) { UINT_8 ucAction = *prSwRfb->pucRecvBuff; if (ucAction != CATEGORY_PUBLIC_ACTION && ucAction != CATEGORY_HT_ACTION) { #if DBG && CFG_RX_PKTS_DUMP LOG_FUNC("QM RX MGT: UnProtected Robust Action frame = %d\n", ucAction); #endif fgRobustAction = TRUE; return TRUE; } } if (fgUnicast && ((ucSubtype == 10 /* 0x1010: MAC_FRAME_DISASSOC */) || (ucSubtype == 12 /* 0x1100: MAC_FRAME_DEAUTH */))) { LOG_FUNC("QM RX MGT: rsnStartSaQuery\n"); /* MFP test plan 5.3.3.5 */ rsnStartSaQuery(prAdapter); return TRUE; } } #if 0 else { if (fgUnicast && ((ucSubtype == MAC_FRAME_DISASSOC) || (ucSubtype == MAC_FRAME_DEAUTH))) { /* This done by function handler */ //kalIndicateStatusAndComplete(prAdapter->prGlueInfo, // WLAN_STATUS_MEDIA_DISCONNECT, // NULL, // 0); } } #endif } return FALSE; } #endif
gpl-2.0
myhro/debian-linux-kernel-gzip
drivers/cpufreq/exynos4x12-cpufreq.c
145
6829
/* * Copyright (c) 2010-2012 Samsung Electronics Co., Ltd. * http://www.samsung.com * * EXYNOS4X12 - CPU frequency scaling support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/cpufreq.h> #include <linux/of.h> #include <linux/of_address.h> #include "exynos-cpufreq.h" static struct clk *cpu_clk; static struct clk *moutcore; static struct clk *mout_mpll; static struct clk *mout_apll; static struct exynos_dvfs_info *cpufreq; static unsigned int exynos4x12_volt_table[] = { 1350000, 1287500, 1250000, 1187500, 1137500, 1087500, 1037500, 1000000, 987500, 975000, 950000, 925000, 900000, 900000 }; static struct cpufreq_frequency_table exynos4x12_freq_table[] = { {CPUFREQ_BOOST_FREQ, L0, 1500 * 1000}, {0, L1, 1400 * 1000}, {0, L2, 1300 * 1000}, {0, L3, 1200 * 1000}, {0, L4, 1100 * 1000}, {0, L5, 1000 * 1000}, {0, L6, 900 * 1000}, {0, L7, 800 * 1000}, {0, L8, 700 * 1000}, {0, L9, 600 * 1000}, {0, L10, 500 * 1000}, {0, L11, 400 * 1000}, {0, L12, 300 * 1000}, {0, L13, 200 * 1000}, {0, 0, CPUFREQ_TABLE_END}, }; static struct apll_freq *apll_freq_4x12; static struct apll_freq apll_freq_4212[] = { /* * values: * freq * clock divider for CORE, COREM0, COREM1, PERIPH, ATB, PCLK_DBG, APLL, CORE2 * clock divider for COPY, HPM, RESERVED * PLL M, P, S */ APLL_FREQ(1500, 0, 3, 7, 0, 6, 1, 2, 0, 6, 2, 0, 250, 4, 0), APLL_FREQ(1400, 0, 3, 7, 0, 6, 1, 2, 0, 6, 2, 0, 175, 3, 0), APLL_FREQ(1300, 0, 3, 7, 0, 5, 1, 2, 0, 5, 2, 0, 325, 6, 0), APLL_FREQ(1200, 0, 3, 7, 0, 5, 1, 2, 0, 5, 2, 0, 200, 4, 0), APLL_FREQ(1100, 0, 3, 6, 0, 4, 1, 2, 0, 4, 2, 0, 275, 6, 0), APLL_FREQ(1000, 0, 2, 5, 0, 4, 1, 1, 0, 4, 2, 0, 125, 3, 0), APLL_FREQ(900, 0, 2, 5, 0, 3, 1, 1, 0, 3, 2, 0, 150, 4, 0), APLL_FREQ(800, 0, 2, 5, 0, 3, 1, 1, 0, 3, 2, 0, 100, 3, 0), APLL_FREQ(700, 0, 2, 4, 0, 3, 1, 1, 0, 3, 2, 0, 175, 3, 1), APLL_FREQ(600, 0, 2, 4, 0, 3, 1, 1, 0, 3, 2, 0, 200, 4, 1), APLL_FREQ(500, 0, 2, 4, 0, 3, 1, 1, 0, 3, 2, 0, 125, 3, 1), APLL_FREQ(400, 0, 2, 4, 0, 3, 1, 1, 0, 3, 2, 0, 100, 3, 1), APLL_FREQ(300, 0, 2, 4, 0, 2, 1, 1, 0, 3, 2, 0, 200, 4, 2), APLL_FREQ(200, 0, 1, 3, 0, 1, 1, 1, 0, 3, 2, 0, 100, 3, 2), }; static struct apll_freq apll_freq_4412[] = { /* * values: * freq * clock divider for CORE, COREM0, COREM1, PERIPH, ATB, PCLK_DBG, APLL, CORE2 * clock divider for COPY, HPM, CORES * PLL M, P, S */ APLL_FREQ(1500, 0, 3, 7, 0, 6, 1, 2, 0, 6, 0, 7, 250, 4, 0), APLL_FREQ(1400, 0, 3, 7, 0, 6, 1, 2, 0, 6, 0, 6, 175, 3, 0), APLL_FREQ(1300, 0, 3, 7, 0, 5, 1, 2, 0, 5, 0, 6, 325, 6, 0), APLL_FREQ(1200, 0, 3, 7, 0, 5, 1, 2, 0, 5, 0, 5, 200, 4, 0), APLL_FREQ(1100, 0, 3, 6, 0, 4, 1, 2, 0, 4, 0, 5, 275, 6, 0), APLL_FREQ(1000, 0, 2, 5, 0, 4, 1, 1, 0, 4, 0, 4, 125, 3, 0), APLL_FREQ(900, 0, 2, 5, 0, 3, 1, 1, 0, 3, 0, 4, 150, 4, 0), APLL_FREQ(800, 0, 2, 5, 0, 3, 1, 1, 0, 3, 0, 3, 100, 3, 0), APLL_FREQ(700, 0, 2, 4, 0, 3, 1, 1, 0, 3, 0, 3, 175, 3, 1), APLL_FREQ(600, 0, 2, 4, 0, 3, 1, 1, 0, 3, 0, 2, 200, 4, 1), APLL_FREQ(500, 0, 2, 4, 0, 3, 1, 1, 0, 3, 0, 2, 125, 3, 1), APLL_FREQ(400, 0, 2, 4, 0, 3, 1, 1, 0, 3, 0, 1, 100, 3, 1), APLL_FREQ(300, 0, 2, 4, 0, 2, 1, 1, 0, 3, 0, 1, 200, 4, 2), APLL_FREQ(200, 0, 1, 3, 0, 1, 1, 1, 0, 3, 0, 0, 100, 3, 2), }; static void exynos4x12_set_clkdiv(unsigned int div_index) { unsigned int tmp; /* Change Divider - CPU0 */ tmp = apll_freq_4x12[div_index].clk_div_cpu0; __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS4_CLKDIV_CPU); while (__raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKDIV_STATCPU) & 0x11111111) cpu_relax(); /* Change Divider - CPU1 */ tmp = apll_freq_4x12[div_index].clk_div_cpu1; __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS4_CLKDIV_CPU1); do { cpu_relax(); tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKDIV_STATCPU1); } while (tmp != 0x0); } static void exynos4x12_set_apll(unsigned int index) { unsigned int tmp, freq = apll_freq_4x12[index].freq; /* MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */ clk_set_parent(moutcore, mout_mpll); do { cpu_relax(); tmp = (__raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKMUX_STATCPU) >> EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT); tmp &= 0x7; } while (tmp != 0x2); clk_set_rate(mout_apll, freq * 1000); /* MUX_CORE_SEL = APLL */ clk_set_parent(moutcore, mout_apll); do { cpu_relax(); tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKMUX_STATCPU); tmp &= EXYNOS4_CLKMUX_STATCPU_MUXCORE_MASK; } while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT)); } static void exynos4x12_set_frequency(unsigned int old_index, unsigned int new_index) { if (old_index > new_index) { exynos4x12_set_clkdiv(new_index); exynos4x12_set_apll(new_index); } else if (old_index < new_index) { exynos4x12_set_apll(new_index); exynos4x12_set_clkdiv(new_index); } } int exynos4x12_cpufreq_init(struct exynos_dvfs_info *info) { struct device_node *np; unsigned long rate; /* * HACK: This is a temporary workaround to get access to clock * controller registers directly and remove static mappings and * dependencies on platform headers. It is necessary to enable * Exynos multi-platform support and will be removed together with * this whole driver as soon as Exynos gets migrated to use * cpufreq-cpu0 driver. */ np = of_find_compatible_node(NULL, NULL, "samsung,exynos4412-clock"); if (!np) { pr_err("%s: failed to find clock controller DT node\n", __func__); return -ENODEV; } info->cmu_regs = of_iomap(np, 0); if (!info->cmu_regs) { pr_err("%s: failed to map CMU registers\n", __func__); return -EFAULT; } cpu_clk = clk_get(NULL, "armclk"); if (IS_ERR(cpu_clk)) return PTR_ERR(cpu_clk); moutcore = clk_get(NULL, "moutcore"); if (IS_ERR(moutcore)) goto err_moutcore; mout_mpll = clk_get(NULL, "mout_mpll"); if (IS_ERR(mout_mpll)) goto err_mout_mpll; rate = clk_get_rate(mout_mpll) / 1000; mout_apll = clk_get(NULL, "mout_apll"); if (IS_ERR(mout_apll)) goto err_mout_apll; if (info->type == EXYNOS_SOC_4212) apll_freq_4x12 = apll_freq_4212; else apll_freq_4x12 = apll_freq_4412; info->mpll_freq_khz = rate; /* 800Mhz */ info->pll_safe_idx = L7; info->cpu_clk = cpu_clk; info->volt_table = exynos4x12_volt_table; info->freq_table = exynos4x12_freq_table; info->set_freq = exynos4x12_set_frequency; cpufreq = info; return 0; err_mout_apll: clk_put(mout_mpll); err_mout_mpll: clk_put(moutcore); err_moutcore: clk_put(cpu_clk); pr_debug("%s: failed initialization\n", __func__); return -EINVAL; }
gpl-2.0
XileForce/Vindicator-S6-Uni
drivers/gator/gator_cookies.c
145
11676
/** * Copyright (C) ARM Limited 2010-2014. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ /* must be power of 2 */ #define COOKIEMAP_ENTRIES 1024 /* must be a power of 2 - 512/4 = 128 entries */ #define TRANSLATE_BUFFER_SIZE 512 #define TRANSLATE_TEXT_SIZE 256 #define MAX_COLLISIONS 2 static uint32_t *gator_crc32_table; static unsigned int translate_buffer_mask; struct cookie_args { struct task_struct *task; const char *text; }; static DEFINE_PER_CPU(char *, translate_text); static DEFINE_PER_CPU(uint32_t, cookie_next_key); static DEFINE_PER_CPU(uint64_t *, cookie_keys); static DEFINE_PER_CPU(uint32_t *, cookie_values); static DEFINE_PER_CPU(int, translate_buffer_read); static DEFINE_PER_CPU(int, translate_buffer_write); static DEFINE_PER_CPU(struct cookie_args *, translate_buffer); static uint32_t get_cookie(int cpu, struct task_struct *task, const char *text, bool from_wq); static void wq_cookie_handler(struct work_struct *unused); static DECLARE_WORK(cookie_work, wq_cookie_handler); static struct timer_list app_process_wake_up_timer; static void app_process_wake_up_handler(unsigned long unused_data); static uint32_t cookiemap_code(uint64_t value64) { uint32_t value = (uint32_t)((value64 >> 32) + value64); uint32_t cookiecode = (value >> 24) & 0xff; cookiecode = cookiecode * 31 + ((value >> 16) & 0xff); cookiecode = cookiecode * 31 + ((value >> 8) & 0xff); cookiecode = cookiecode * 31 + ((value >> 0) & 0xff); cookiecode &= (COOKIEMAP_ENTRIES - 1); return cookiecode * MAX_COLLISIONS; } static uint32_t gator_chksum_crc32(const char *data) { register unsigned long crc; const unsigned char *block = data; int i, length = strlen(data); crc = 0xFFFFFFFF; for (i = 0; i < length; i++) crc = ((crc >> 8) & 0x00FFFFFF) ^ gator_crc32_table[(crc ^ *block++) & 0xFF]; return (crc ^ 0xFFFFFFFF); } /* * Exists * Pre: [0][1][v][3]..[n-1] * Post: [v][0][1][3]..[n-1] */ static uint32_t cookiemap_exists(uint64_t key) { unsigned long x, flags, retval = 0; int cpu = get_physical_cpu(); uint32_t cookiecode = cookiemap_code(key); uint64_t *keys = &(per_cpu(cookie_keys, cpu)[cookiecode]); uint32_t *values = &(per_cpu(cookie_values, cpu)[cookiecode]); /* Can be called from interrupt handler or from work queue */ local_irq_save(flags); for (x = 0; x < MAX_COLLISIONS; x++) { if (keys[x] == key) { uint32_t value = values[x]; for (; x > 0; x--) { keys[x] = keys[x - 1]; values[x] = values[x - 1]; } keys[0] = key; values[0] = value; retval = value; break; } } local_irq_restore(flags); return retval; } /* * Add * Pre: [0][1][2][3]..[n-1] * Post: [v][0][1][2]..[n-2] */ static void cookiemap_add(uint64_t key, uint32_t value) { int cpu = get_physical_cpu(); int cookiecode = cookiemap_code(key); uint64_t *keys = &(per_cpu(cookie_keys, cpu)[cookiecode]); uint32_t *values = &(per_cpu(cookie_values, cpu)[cookiecode]); int x; for (x = MAX_COLLISIONS - 1; x > 0; x--) { keys[x] = keys[x - 1]; values[x] = values[x - 1]; } keys[0] = key; values[0] = value; } #ifndef CONFIG_PREEMPT_RT_FULL static void translate_buffer_write_args(int cpu, struct task_struct *task, const char *text) { unsigned long flags; int write; int next_write; struct cookie_args *args; local_irq_save(flags); write = per_cpu(translate_buffer_write, cpu); next_write = (write + 1) & translate_buffer_mask; /* At least one entry must always remain available as when read == write, the queue is empty not full */ if (next_write != per_cpu(translate_buffer_read, cpu)) { args = &per_cpu(translate_buffer, cpu)[write]; args->task = task; args->text = text; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39) get_task_struct(task); #endif per_cpu(translate_buffer_write, cpu) = next_write; } local_irq_restore(flags); } #endif static void translate_buffer_read_args(int cpu, struct cookie_args *args) { unsigned long flags; int read; local_irq_save(flags); read = per_cpu(translate_buffer_read, cpu); *args = per_cpu(translate_buffer, cpu)[read]; per_cpu(translate_buffer_read, cpu) = (read + 1) & translate_buffer_mask; local_irq_restore(flags); } static void wq_cookie_handler(struct work_struct *unused) { struct cookie_args args; int cpu = get_physical_cpu(), cookie; mutex_lock(&start_mutex); if (gator_started != 0) { while (per_cpu(translate_buffer_read, cpu) != per_cpu(translate_buffer_write, cpu)) { translate_buffer_read_args(cpu, &args); cookie = get_cookie(cpu, args.task, args.text, true); marshal_link(cookie, args.task->tgid, args.task->pid); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39) put_task_struct(args.task); #endif } } mutex_unlock(&start_mutex); } static void app_process_wake_up_handler(unsigned long unused_data) { /* had to delay scheduling work as attempting to schedule work during the context switch is illegal in kernel versions 3.5 and greater */ schedule_work(&cookie_work); } /* Retrieve full name from proc/pid/cmdline for java processes on Android */ static int translate_app_process(const char **text, int cpu, struct task_struct *task, bool from_wq) { void *maddr; unsigned int len; unsigned long addr; struct mm_struct *mm; struct page *page = NULL; struct vm_area_struct *page_vma; int bytes, offset, retval = 0; char *buf = per_cpu(translate_text, cpu); #ifndef CONFIG_PREEMPT_RT_FULL /* Push work into a work queue if in atomic context as the kernel * functions below might sleep. Rely on the in_interrupt variable * rather than in_irq() or in_interrupt() kernel functions, as the * value of these functions seems inconsistent during a context * switch between android/linux versions */ if (!from_wq) { /* Check if already in buffer */ int pos = per_cpu(translate_buffer_read, cpu); while (pos != per_cpu(translate_buffer_write, cpu)) { if (per_cpu(translate_buffer, cpu)[pos].task == task) goto out; pos = (pos + 1) & translate_buffer_mask; } translate_buffer_write_args(cpu, task, *text); /* Not safe to call in RT-Preempt full in schedule switch context */ mod_timer(&app_process_wake_up_timer, jiffies + 1); goto out; } #endif mm = get_task_mm(task); if (!mm) goto out; if (!mm->arg_end) goto outmm; addr = mm->arg_start; len = mm->arg_end - mm->arg_start; if (len > TRANSLATE_TEXT_SIZE) len = TRANSLATE_TEXT_SIZE; down_read(&mm->mmap_sem); while (len) { if (get_user_pages(task, mm, addr, 1, 0, 1, &page, &page_vma) <= 0) goto outsem; maddr = kmap(page); offset = addr & (PAGE_SIZE - 1); bytes = len; if (bytes > PAGE_SIZE - offset) bytes = PAGE_SIZE - offset; copy_from_user_page(page_vma, page, addr, buf, maddr + offset, bytes); /* release page allocated by get_user_pages() */ kunmap(page); page_cache_release(page); len -= bytes; buf += bytes; addr += bytes; *text = per_cpu(translate_text, cpu); retval = 1; } /* On app_process startup, /proc/pid/cmdline is initially "zygote" then "<pre-initialized>" but changes after an initial startup period */ if (strcmp(*text, "zygote") == 0 || strcmp(*text, "<pre-initialized>") == 0) retval = 0; outsem: up_read(&mm->mmap_sem); outmm: mmput(mm); out: return retval; } static const char APP_PROCESS[] = "app_process"; static uint32_t get_cookie(int cpu, struct task_struct *task, const char *text, bool from_wq) { unsigned long flags, cookie; uint64_t key; key = gator_chksum_crc32(text); key = (key << 32) | (uint32_t)task->tgid; cookie = cookiemap_exists(key); if (cookie) return cookie; /* On 64-bit android app_process can be app_process32 or app_process64 */ if (strncmp(text, APP_PROCESS, sizeof(APP_PROCESS) - 1) == 0) { if (!translate_app_process(&text, cpu, task, from_wq)) return UNRESOLVED_COOKIE; } /* Can be called from interrupt handler or from work queue or from scheduler trace */ local_irq_save(flags); cookie = UNRESOLVED_COOKIE; if (marshal_cookie_header(text)) { cookie = per_cpu(cookie_next_key, cpu) += nr_cpu_ids; cookiemap_add(key, cookie); marshal_cookie(cookie, text); } local_irq_restore(flags); return cookie; } static int get_exec_cookie(int cpu, struct task_struct *task) { struct mm_struct *mm = task->mm; const char *text; /* kernel threads have no address space */ if (!mm) return NO_COOKIE; if (task && task->mm && task->mm->exe_file) { text = task->mm->exe_file->f_path.dentry->d_name.name; return get_cookie(cpu, task, text, false); } return UNRESOLVED_COOKIE; } static unsigned long get_address_cookie(int cpu, struct task_struct *task, unsigned long addr, off_t *offset) { unsigned long cookie = NO_COOKIE; struct mm_struct *mm = task->mm; struct vm_area_struct *vma; const char *text; if (!mm) return cookie; for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) { if (addr < vma->vm_start || addr >= vma->vm_end) continue; if (vma->vm_file) { text = vma->vm_file->f_path.dentry->d_name.name; cookie = get_cookie(cpu, task, text, false); *offset = (vma->vm_pgoff << PAGE_SHIFT) + addr - vma->vm_start; } else { /* must be an anonymous map */ *offset = addr; } break; } if (!vma) cookie = UNRESOLVED_COOKIE; return cookie; } static int cookies_initialize(void) { uint32_t crc, poly; int i, j, cpu, size, err = 0; translate_buffer_mask = TRANSLATE_BUFFER_SIZE / sizeof(per_cpu(translate_buffer, 0)[0]) - 1; for_each_present_cpu(cpu) { per_cpu(cookie_next_key, cpu) = nr_cpu_ids + cpu; size = COOKIEMAP_ENTRIES * MAX_COLLISIONS * sizeof(uint64_t); per_cpu(cookie_keys, cpu) = kmalloc(size, GFP_KERNEL); if (!per_cpu(cookie_keys, cpu)) { err = -ENOMEM; goto cookie_setup_error; } memset(per_cpu(cookie_keys, cpu), 0, size); size = COOKIEMAP_ENTRIES * MAX_COLLISIONS * sizeof(uint32_t); per_cpu(cookie_values, cpu) = kmalloc(size, GFP_KERNEL); if (!per_cpu(cookie_values, cpu)) { err = -ENOMEM; goto cookie_setup_error; } memset(per_cpu(cookie_values, cpu), 0, size); per_cpu(translate_buffer, cpu) = kmalloc(TRANSLATE_BUFFER_SIZE, GFP_KERNEL); if (!per_cpu(translate_buffer, cpu)) { err = -ENOMEM; goto cookie_setup_error; } per_cpu(translate_buffer_write, cpu) = 0; per_cpu(translate_buffer_read, cpu) = 0; per_cpu(translate_text, cpu) = kmalloc(TRANSLATE_TEXT_SIZE, GFP_KERNEL); if (!per_cpu(translate_text, cpu)) { err = -ENOMEM; goto cookie_setup_error; } } /* build CRC32 table */ poly = 0x04c11db7; gator_crc32_table = kmalloc(256 * sizeof(*gator_crc32_table), GFP_KERNEL); if (!gator_crc32_table) { err = -ENOMEM; goto cookie_setup_error; } for (i = 0; i < 256; i++) { crc = i; for (j = 8; j > 0; j--) { if (crc & 1) crc = (crc >> 1) ^ poly; else crc >>= 1; } gator_crc32_table[i] = crc; } setup_timer(&app_process_wake_up_timer, app_process_wake_up_handler, 0); cookie_setup_error: return err; } static void cookies_release(void) { int cpu; for_each_present_cpu(cpu) { kfree(per_cpu(cookie_keys, cpu)); per_cpu(cookie_keys, cpu) = NULL; kfree(per_cpu(cookie_values, cpu)); per_cpu(cookie_values, cpu) = NULL; kfree(per_cpu(translate_buffer, cpu)); per_cpu(translate_buffer, cpu) = NULL; per_cpu(translate_buffer_read, cpu) = 0; per_cpu(translate_buffer_write, cpu) = 0; kfree(per_cpu(translate_text, cpu)); per_cpu(translate_text, cpu) = NULL; } del_timer_sync(&app_process_wake_up_timer); kfree(gator_crc32_table); gator_crc32_table = NULL; }
gpl-2.0
spiderworthy/linux
drivers/scsi/device_handler/scsi_dh_alua.c
657
22715
/* * Generic SCSI-3 ALUA SCSI Device Handler * * Copyright (C) 2007-2010 Hannes Reinecke, SUSE Linux Products GmbH. * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * */ #include <linux/slab.h> #include <linux/delay.h> #include <linux/module.h> #include <scsi/scsi.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_dh.h> #define ALUA_DH_NAME "alua" #define ALUA_DH_VER "1.3" #define TPGS_STATE_OPTIMIZED 0x0 #define TPGS_STATE_NONOPTIMIZED 0x1 #define TPGS_STATE_STANDBY 0x2 #define TPGS_STATE_UNAVAILABLE 0x3 #define TPGS_STATE_LBA_DEPENDENT 0x4 #define TPGS_STATE_OFFLINE 0xe #define TPGS_STATE_TRANSITIONING 0xf #define TPGS_SUPPORT_NONE 0x00 #define TPGS_SUPPORT_OPTIMIZED 0x01 #define TPGS_SUPPORT_NONOPTIMIZED 0x02 #define TPGS_SUPPORT_STANDBY 0x04 #define TPGS_SUPPORT_UNAVAILABLE 0x08 #define TPGS_SUPPORT_LBA_DEPENDENT 0x10 #define TPGS_SUPPORT_OFFLINE 0x40 #define TPGS_SUPPORT_TRANSITION 0x80 #define RTPG_FMT_MASK 0x70 #define RTPG_FMT_EXT_HDR 0x10 #define TPGS_MODE_UNINITIALIZED -1 #define TPGS_MODE_NONE 0x0 #define TPGS_MODE_IMPLICIT 0x1 #define TPGS_MODE_EXPLICIT 0x2 #define ALUA_INQUIRY_SIZE 36 #define ALUA_FAILOVER_TIMEOUT 60 #define ALUA_FAILOVER_RETRIES 5 /* flags passed from user level */ #define ALUA_OPTIMIZE_STPG 1 struct alua_dh_data { struct scsi_dh_data dh_data; int group_id; int rel_port; int tpgs; int state; int pref; unsigned flags; /* used for optimizing STPG */ unsigned char inq[ALUA_INQUIRY_SIZE]; unsigned char *buff; int bufflen; unsigned char transition_tmo; unsigned char sense[SCSI_SENSE_BUFFERSIZE]; int senselen; struct scsi_device *sdev; activate_complete callback_fn; void *callback_data; }; #define ALUA_POLICY_SWITCH_CURRENT 0 #define ALUA_POLICY_SWITCH_ALL 1 static char print_alua_state(int); static int alua_check_sense(struct scsi_device *, struct scsi_sense_hdr *); static inline struct alua_dh_data *get_alua_data(struct scsi_device *sdev) { return container_of(sdev->scsi_dh_data, struct alua_dh_data, dh_data); } static int realloc_buffer(struct alua_dh_data *h, unsigned len) { if (h->buff && h->buff != h->inq) kfree(h->buff); h->buff = kmalloc(len, GFP_NOIO); if (!h->buff) { h->buff = h->inq; h->bufflen = ALUA_INQUIRY_SIZE; return 1; } h->bufflen = len; return 0; } static struct request *get_alua_req(struct scsi_device *sdev, void *buffer, unsigned buflen, int rw) { struct request *rq; struct request_queue *q = sdev->request_queue; rq = blk_get_request(q, rw, GFP_NOIO); if (IS_ERR(rq)) { sdev_printk(KERN_INFO, sdev, "%s: blk_get_request failed\n", __func__); return NULL; } blk_rq_set_block_pc(rq); if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) { blk_put_request(rq); sdev_printk(KERN_INFO, sdev, "%s: blk_rq_map_kern failed\n", __func__); return NULL; } rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER; rq->retries = ALUA_FAILOVER_RETRIES; rq->timeout = ALUA_FAILOVER_TIMEOUT * HZ; return rq; } /* * submit_vpd_inquiry - Issue an INQUIRY VPD page 0x83 command * @sdev: sdev the command should be sent to */ static int submit_vpd_inquiry(struct scsi_device *sdev, struct alua_dh_data *h) { struct request *rq; int err = SCSI_DH_RES_TEMP_UNAVAIL; rq = get_alua_req(sdev, h->buff, h->bufflen, READ); if (!rq) goto done; /* Prepare the command. */ rq->cmd[0] = INQUIRY; rq->cmd[1] = 1; rq->cmd[2] = 0x83; rq->cmd[4] = h->bufflen; rq->cmd_len = COMMAND_SIZE(INQUIRY); rq->sense = h->sense; memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE); rq->sense_len = h->senselen = 0; err = blk_execute_rq(rq->q, NULL, rq, 1); if (err == -EIO) { sdev_printk(KERN_INFO, sdev, "%s: evpd inquiry failed with %x\n", ALUA_DH_NAME, rq->errors); h->senselen = rq->sense_len; err = SCSI_DH_IO; } blk_put_request(rq); done: return err; } /* * submit_rtpg - Issue a REPORT TARGET GROUP STATES command * @sdev: sdev the command should be sent to */ static unsigned submit_rtpg(struct scsi_device *sdev, struct alua_dh_data *h, bool rtpg_ext_hdr_req) { struct request *rq; int err = SCSI_DH_RES_TEMP_UNAVAIL; rq = get_alua_req(sdev, h->buff, h->bufflen, READ); if (!rq) goto done; /* Prepare the command. */ rq->cmd[0] = MAINTENANCE_IN; if (rtpg_ext_hdr_req) rq->cmd[1] = MI_REPORT_TARGET_PGS | MI_EXT_HDR_PARAM_FMT; else rq->cmd[1] = MI_REPORT_TARGET_PGS; rq->cmd[6] = (h->bufflen >> 24) & 0xff; rq->cmd[7] = (h->bufflen >> 16) & 0xff; rq->cmd[8] = (h->bufflen >> 8) & 0xff; rq->cmd[9] = h->bufflen & 0xff; rq->cmd_len = COMMAND_SIZE(MAINTENANCE_IN); rq->sense = h->sense; memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE); rq->sense_len = h->senselen = 0; err = blk_execute_rq(rq->q, NULL, rq, 1); if (err == -EIO) { sdev_printk(KERN_INFO, sdev, "%s: rtpg failed with %x\n", ALUA_DH_NAME, rq->errors); h->senselen = rq->sense_len; err = SCSI_DH_IO; } blk_put_request(rq); done: return err; } /* * alua_stpg - Evaluate SET TARGET GROUP STATES * @sdev: the device to be evaluated * @state: the new target group state * * Send a SET TARGET GROUP STATES command to the device. * We only have to test here if we should resubmit the command; * any other error is assumed as a failure. */ static void stpg_endio(struct request *req, int error) { struct alua_dh_data *h = req->end_io_data; struct scsi_sense_hdr sense_hdr; unsigned err = SCSI_DH_OK; if (host_byte(req->errors) != DID_OK || msg_byte(req->errors) != COMMAND_COMPLETE) { err = SCSI_DH_IO; goto done; } if (req->sense_len > 0) { err = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE, &sense_hdr); if (!err) { err = SCSI_DH_IO; goto done; } err = alua_check_sense(h->sdev, &sense_hdr); if (err == ADD_TO_MLQUEUE) { err = SCSI_DH_RETRY; goto done; } sdev_printk(KERN_INFO, h->sdev, "%s: stpg sense code: %02x/%02x/%02x\n", ALUA_DH_NAME, sense_hdr.sense_key, sense_hdr.asc, sense_hdr.ascq); err = SCSI_DH_IO; } else if (error) err = SCSI_DH_IO; if (err == SCSI_DH_OK) { h->state = TPGS_STATE_OPTIMIZED; sdev_printk(KERN_INFO, h->sdev, "%s: port group %02x switched to state %c\n", ALUA_DH_NAME, h->group_id, print_alua_state(h->state)); } done: req->end_io_data = NULL; __blk_put_request(req->q, req); if (h->callback_fn) { h->callback_fn(h->callback_data, err); h->callback_fn = h->callback_data = NULL; } return; } /* * submit_stpg - Issue a SET TARGET GROUP STATES command * * Currently we're only setting the current target port group state * to 'active/optimized' and let the array firmware figure out * the states of the remaining groups. */ static unsigned submit_stpg(struct alua_dh_data *h) { struct request *rq; int stpg_len = 8; struct scsi_device *sdev = h->sdev; /* Prepare the data buffer */ memset(h->buff, 0, stpg_len); h->buff[4] = TPGS_STATE_OPTIMIZED & 0x0f; h->buff[6] = (h->group_id >> 8) & 0xff; h->buff[7] = h->group_id & 0xff; rq = get_alua_req(sdev, h->buff, stpg_len, WRITE); if (!rq) return SCSI_DH_RES_TEMP_UNAVAIL; /* Prepare the command. */ rq->cmd[0] = MAINTENANCE_OUT; rq->cmd[1] = MO_SET_TARGET_PGS; rq->cmd[6] = (stpg_len >> 24) & 0xff; rq->cmd[7] = (stpg_len >> 16) & 0xff; rq->cmd[8] = (stpg_len >> 8) & 0xff; rq->cmd[9] = stpg_len & 0xff; rq->cmd_len = COMMAND_SIZE(MAINTENANCE_OUT); rq->sense = h->sense; memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE); rq->sense_len = h->senselen = 0; rq->end_io_data = h; blk_execute_rq_nowait(rq->q, NULL, rq, 1, stpg_endio); return SCSI_DH_OK; } /* * alua_check_tpgs - Evaluate TPGS setting * @sdev: device to be checked * * Examine the TPGS setting of the sdev to find out if ALUA * is supported. */ static int alua_check_tpgs(struct scsi_device *sdev, struct alua_dh_data *h) { int err = SCSI_DH_OK; h->tpgs = scsi_device_tpgs(sdev); switch (h->tpgs) { case TPGS_MODE_EXPLICIT|TPGS_MODE_IMPLICIT: sdev_printk(KERN_INFO, sdev, "%s: supports implicit and explicit TPGS\n", ALUA_DH_NAME); break; case TPGS_MODE_EXPLICIT: sdev_printk(KERN_INFO, sdev, "%s: supports explicit TPGS\n", ALUA_DH_NAME); break; case TPGS_MODE_IMPLICIT: sdev_printk(KERN_INFO, sdev, "%s: supports implicit TPGS\n", ALUA_DH_NAME); break; default: h->tpgs = TPGS_MODE_NONE; sdev_printk(KERN_INFO, sdev, "%s: not supported\n", ALUA_DH_NAME); err = SCSI_DH_DEV_UNSUPP; break; } return err; } /* * alua_vpd_inquiry - Evaluate INQUIRY vpd page 0x83 * @sdev: device to be checked * * Extract the relative target port and the target port group * descriptor from the list of identificators. */ static int alua_vpd_inquiry(struct scsi_device *sdev, struct alua_dh_data *h) { int len; unsigned err; unsigned char *d; retry: err = submit_vpd_inquiry(sdev, h); if (err != SCSI_DH_OK) return err; /* Check if vpd page exceeds initial buffer */ len = (h->buff[2] << 8) + h->buff[3] + 4; if (len > h->bufflen) { /* Resubmit with the correct length */ if (realloc_buffer(h, len)) { sdev_printk(KERN_WARNING, sdev, "%s: kmalloc buffer failed\n", ALUA_DH_NAME); /* Temporary failure, bypass */ return SCSI_DH_DEV_TEMP_BUSY; } goto retry; } /* * Now look for the correct descriptor. */ d = h->buff + 4; while (d < h->buff + len) { switch (d[1] & 0xf) { case 0x4: /* Relative target port */ h->rel_port = (d[6] << 8) + d[7]; break; case 0x5: /* Target port group */ h->group_id = (d[6] << 8) + d[7]; break; default: break; } d += d[3] + 4; } if (h->group_id == -1) { /* * Internal error; TPGS supported but required * VPD identification descriptors not present. * Disable ALUA support */ sdev_printk(KERN_INFO, sdev, "%s: No target port descriptors found\n", ALUA_DH_NAME); h->state = TPGS_STATE_OPTIMIZED; h->tpgs = TPGS_MODE_NONE; err = SCSI_DH_DEV_UNSUPP; } else { sdev_printk(KERN_INFO, sdev, "%s: port group %02x rel port %02x\n", ALUA_DH_NAME, h->group_id, h->rel_port); } return err; } static char print_alua_state(int state) { switch (state) { case TPGS_STATE_OPTIMIZED: return 'A'; case TPGS_STATE_NONOPTIMIZED: return 'N'; case TPGS_STATE_STANDBY: return 'S'; case TPGS_STATE_UNAVAILABLE: return 'U'; case TPGS_STATE_LBA_DEPENDENT: return 'L'; case TPGS_STATE_OFFLINE: return 'O'; case TPGS_STATE_TRANSITIONING: return 'T'; default: return 'X'; } } static int alua_check_sense(struct scsi_device *sdev, struct scsi_sense_hdr *sense_hdr) { switch (sense_hdr->sense_key) { case NOT_READY: if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0a) /* * LUN Not Accessible - ALUA state transition */ return ADD_TO_MLQUEUE; if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0b) /* * LUN Not Accessible -- Target port in standby state */ return SUCCESS; if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0c) /* * LUN Not Accessible -- Target port in unavailable state */ return SUCCESS; if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x12) /* * LUN Not Ready -- Offline */ return SUCCESS; if (sdev->allow_restart && sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x02) /* * if the device is not started, we need to wake * the error handler to start the motor */ return FAILED; break; case UNIT_ATTENTION: if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00) /* * Power On, Reset, or Bus Device Reset, just retry. */ return ADD_TO_MLQUEUE; if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x04) /* * Device internal reset */ return ADD_TO_MLQUEUE; if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x01) /* * Mode Parameters Changed */ return ADD_TO_MLQUEUE; if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x06) /* * ALUA state changed */ return ADD_TO_MLQUEUE; if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x07) /* * Implicit ALUA state transition failed */ return ADD_TO_MLQUEUE; if (sense_hdr->asc == 0x3f && sense_hdr->ascq == 0x03) /* * Inquiry data has changed */ return ADD_TO_MLQUEUE; if (sense_hdr->asc == 0x3f && sense_hdr->ascq == 0x0e) /* * REPORTED_LUNS_DATA_HAS_CHANGED is reported * when switching controllers on targets like * Intel Multi-Flex. We can just retry. */ return ADD_TO_MLQUEUE; break; } return SCSI_RETURN_NOT_HANDLED; } /* * alua_rtpg - Evaluate REPORT TARGET GROUP STATES * @sdev: the device to be evaluated. * @wait_for_transition: if nonzero, wait ALUA_FAILOVER_TIMEOUT seconds for device to exit transitioning state * * Evaluate the Target Port Group State. * Returns SCSI_DH_DEV_OFFLINED if the path is * found to be unusable. */ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h, int wait_for_transition) { struct scsi_sense_hdr sense_hdr; int len, k, off, valid_states = 0; unsigned char *ucp; unsigned err; bool rtpg_ext_hdr_req = 1; unsigned long expiry, interval = 0; unsigned int tpg_desc_tbl_off; unsigned char orig_transition_tmo; if (!h->transition_tmo) expiry = round_jiffies_up(jiffies + ALUA_FAILOVER_TIMEOUT * HZ); else expiry = round_jiffies_up(jiffies + h->transition_tmo * HZ); retry: err = submit_rtpg(sdev, h, rtpg_ext_hdr_req); if (err == SCSI_DH_IO && h->senselen > 0) { err = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE, &sense_hdr); if (!err) return SCSI_DH_IO; /* * submit_rtpg() has failed on existing arrays * when requesting extended header info, and * the array doesn't support extended headers, * even though it shouldn't according to T10. * The retry without rtpg_ext_hdr_req set * handles this. */ if (rtpg_ext_hdr_req == 1 && sense_hdr.sense_key == ILLEGAL_REQUEST && sense_hdr.asc == 0x24 && sense_hdr.ascq == 0) { rtpg_ext_hdr_req = 0; goto retry; } err = alua_check_sense(sdev, &sense_hdr); if (err == ADD_TO_MLQUEUE && time_before(jiffies, expiry)) goto retry; sdev_printk(KERN_INFO, sdev, "%s: rtpg sense code %02x/%02x/%02x\n", ALUA_DH_NAME, sense_hdr.sense_key, sense_hdr.asc, sense_hdr.ascq); err = SCSI_DH_IO; } if (err != SCSI_DH_OK) return err; len = (h->buff[0] << 24) + (h->buff[1] << 16) + (h->buff[2] << 8) + h->buff[3] + 4; if (len > h->bufflen) { /* Resubmit with the correct length */ if (realloc_buffer(h, len)) { sdev_printk(KERN_WARNING, sdev, "%s: kmalloc buffer failed\n",__func__); /* Temporary failure, bypass */ return SCSI_DH_DEV_TEMP_BUSY; } goto retry; } orig_transition_tmo = h->transition_tmo; if ((h->buff[4] & RTPG_FMT_MASK) == RTPG_FMT_EXT_HDR && h->buff[5] != 0) h->transition_tmo = h->buff[5]; else h->transition_tmo = ALUA_FAILOVER_TIMEOUT; if (wait_for_transition && (orig_transition_tmo != h->transition_tmo)) { sdev_printk(KERN_INFO, sdev, "%s: transition timeout set to %d seconds\n", ALUA_DH_NAME, h->transition_tmo); expiry = jiffies + h->transition_tmo * HZ; } if ((h->buff[4] & RTPG_FMT_MASK) == RTPG_FMT_EXT_HDR) tpg_desc_tbl_off = 8; else tpg_desc_tbl_off = 4; for (k = tpg_desc_tbl_off, ucp = h->buff + tpg_desc_tbl_off; k < len; k += off, ucp += off) { if (h->group_id == (ucp[2] << 8) + ucp[3]) { h->state = ucp[0] & 0x0f; h->pref = ucp[0] >> 7; valid_states = ucp[1]; } off = 8 + (ucp[7] * 4); } sdev_printk(KERN_INFO, sdev, "%s: port group %02x state %c %s supports %c%c%c%c%c%c%c\n", ALUA_DH_NAME, h->group_id, print_alua_state(h->state), h->pref ? "preferred" : "non-preferred", valid_states&TPGS_SUPPORT_TRANSITION?'T':'t', valid_states&TPGS_SUPPORT_OFFLINE?'O':'o', valid_states&TPGS_SUPPORT_LBA_DEPENDENT?'L':'l', valid_states&TPGS_SUPPORT_UNAVAILABLE?'U':'u', valid_states&TPGS_SUPPORT_STANDBY?'S':'s', valid_states&TPGS_SUPPORT_NONOPTIMIZED?'N':'n', valid_states&TPGS_SUPPORT_OPTIMIZED?'A':'a'); switch (h->state) { case TPGS_STATE_TRANSITIONING: if (wait_for_transition) { if (time_before(jiffies, expiry)) { /* State transition, retry */ interval += 2000; msleep(interval); goto retry; } err = SCSI_DH_RETRY; } else { err = SCSI_DH_OK; } /* Transitioning time exceeded, set port to standby */ h->state = TPGS_STATE_STANDBY; break; case TPGS_STATE_OFFLINE: /* Path unusable */ err = SCSI_DH_DEV_OFFLINED; break; default: /* Useable path if active */ err = SCSI_DH_OK; break; } return err; } /* * alua_initialize - Initialize ALUA state * @sdev: the device to be initialized * * For the prep_fn to work correctly we have * to initialize the ALUA state for the device. */ static int alua_initialize(struct scsi_device *sdev, struct alua_dh_data *h) { int err; err = alua_check_tpgs(sdev, h); if (err != SCSI_DH_OK) goto out; err = alua_vpd_inquiry(sdev, h); if (err != SCSI_DH_OK) goto out; err = alua_rtpg(sdev, h, 0); if (err != SCSI_DH_OK) goto out; out: return err; } /* * alua_set_params - set/unset the optimize flag * @sdev: device on the path to be activated * params - parameters in the following format * "no_of_params\0param1\0param2\0param3\0...\0" * For example, to set the flag pass the following parameters * from multipath.conf * hardware_handler "2 alua 1" */ static int alua_set_params(struct scsi_device *sdev, const char *params) { struct alua_dh_data *h = get_alua_data(sdev); unsigned int optimize = 0, argc; const char *p = params; int result = SCSI_DH_OK; if ((sscanf(params, "%u", &argc) != 1) || (argc != 1)) return -EINVAL; while (*p++) ; if ((sscanf(p, "%u", &optimize) != 1) || (optimize > 1)) return -EINVAL; if (optimize) h->flags |= ALUA_OPTIMIZE_STPG; else h->flags &= ~ALUA_OPTIMIZE_STPG; return result; } static uint optimize_stpg; module_param(optimize_stpg, uint, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(optimize_stpg, "Allow use of a non-optimized path, rather than sending a STPG, when implicit TPGS is supported (0=No,1=Yes). Default is 0."); /* * alua_activate - activate a path * @sdev: device on the path to be activated * * We're currently switching the port group to be activated only and * let the array figure out the rest. * There may be other arrays which require us to switch all port groups * based on a certain policy. But until we actually encounter them it * should be okay. */ static int alua_activate(struct scsi_device *sdev, activate_complete fn, void *data) { struct alua_dh_data *h = get_alua_data(sdev); int err = SCSI_DH_OK; int stpg = 0; err = alua_rtpg(sdev, h, 1); if (err != SCSI_DH_OK) goto out; if (optimize_stpg) h->flags |= ALUA_OPTIMIZE_STPG; if (h->tpgs & TPGS_MODE_EXPLICIT) { switch (h->state) { case TPGS_STATE_NONOPTIMIZED: stpg = 1; if ((h->flags & ALUA_OPTIMIZE_STPG) && (!h->pref) && (h->tpgs & TPGS_MODE_IMPLICIT)) stpg = 0; break; case TPGS_STATE_STANDBY: case TPGS_STATE_UNAVAILABLE: stpg = 1; break; case TPGS_STATE_OFFLINE: err = SCSI_DH_IO; break; case TPGS_STATE_TRANSITIONING: err = SCSI_DH_RETRY; break; default: break; } } if (stpg) { h->callback_fn = fn; h->callback_data = data; err = submit_stpg(h); if (err == SCSI_DH_OK) return 0; h->callback_fn = h->callback_data = NULL; } out: if (fn) fn(data, err); return 0; } /* * alua_prep_fn - request callback * * Fail I/O to all paths not in state * active/optimized or active/non-optimized. */ static int alua_prep_fn(struct scsi_device *sdev, struct request *req) { struct alua_dh_data *h = get_alua_data(sdev); int ret = BLKPREP_OK; if (h->state == TPGS_STATE_TRANSITIONING) ret = BLKPREP_DEFER; else if (h->state != TPGS_STATE_OPTIMIZED && h->state != TPGS_STATE_NONOPTIMIZED && h->state != TPGS_STATE_LBA_DEPENDENT) { ret = BLKPREP_KILL; req->cmd_flags |= REQ_QUIET; } return ret; } static bool alua_match(struct scsi_device *sdev) { return (scsi_device_tpgs(sdev) != 0); } /* * alua_bus_attach - Attach device handler * @sdev: device to be attached to */ static struct scsi_dh_data *alua_bus_attach(struct scsi_device *sdev) { struct alua_dh_data *h; int err; h = kzalloc(sizeof(*h) , GFP_KERNEL); if (!h) return ERR_PTR(-ENOMEM); h->tpgs = TPGS_MODE_UNINITIALIZED; h->state = TPGS_STATE_OPTIMIZED; h->group_id = -1; h->rel_port = -1; h->buff = h->inq; h->bufflen = ALUA_INQUIRY_SIZE; h->sdev = sdev; err = alua_initialize(sdev, h); if (err != SCSI_DH_OK && err != SCSI_DH_DEV_OFFLINED) goto failed; sdev_printk(KERN_NOTICE, sdev, "%s: Attached\n", ALUA_DH_NAME); return &h->dh_data; failed: kfree(h); return ERR_PTR(-EINVAL); } /* * alua_bus_detach - Detach device handler * @sdev: device to be detached from */ static void alua_bus_detach(struct scsi_device *sdev) { struct alua_dh_data *h = get_alua_data(sdev); if (h->buff && h->inq != h->buff) kfree(h->buff); kfree(h); } static struct scsi_device_handler alua_dh = { .name = ALUA_DH_NAME, .module = THIS_MODULE, .attach = alua_bus_attach, .detach = alua_bus_detach, .prep_fn = alua_prep_fn, .check_sense = alua_check_sense, .activate = alua_activate, .set_params = alua_set_params, .match = alua_match, }; static int __init alua_init(void) { int r; r = scsi_register_device_handler(&alua_dh); if (r != 0) printk(KERN_ERR "%s: Failed to register scsi device handler", ALUA_DH_NAME); return r; } static void __exit alua_exit(void) { scsi_unregister_device_handler(&alua_dh); } module_init(alua_init); module_exit(alua_exit); MODULE_DESCRIPTION("DM Multipath ALUA support"); MODULE_AUTHOR("Hannes Reinecke <hare@suse.de>"); MODULE_LICENSE("GPL"); MODULE_VERSION(ALUA_DH_VER);
gpl-2.0
Metallium-Devices/android_kernel_google_msm
arch/arm/mach-msm/devices-msm7x27.c
1169
19795
/* * Copyright (C) 2008 Google, Inc. * Copyright (c) 2008-2012, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/kernel.h> #include <linux/platform_device.h> #include <mach/kgsl.h> #include <linux/regulator/machine.h> #include <linux/dma-mapping.h> #include <linux/init.h> #include <asm/clkdev.h> #include <mach/irqs.h> #include <mach/msm_iomap.h> #include <mach/dma.h> #include <mach/board.h> #include "devices.h" #include "footswitch.h" #include "acpuclock.h" #include <asm/mach/flash.h> #include <asm/mach/mmc.h> #include <mach/msm_hsusb.h> #include <mach/usbdiag.h> #include <mach/rpc_hsusb.h> #include "irq.h" #include "pm.h" static struct resource resources_uart1[] = { { .start = INT_UART1, .end = INT_UART1, .flags = IORESOURCE_IRQ, }, { .start = MSM7XXX_UART1_PHYS, .end = MSM7XXX_UART1_PHYS + MSM7XXX_UART1_SIZE - 1, .flags = IORESOURCE_MEM, }, }; static struct resource resources_uart2[] = { { .start = INT_UART2, .end = INT_UART2, .flags = IORESOURCE_IRQ, }, { .start = MSM7XXX_UART2_PHYS, .end = MSM7XXX_UART2_PHYS + MSM7XXX_UART2_SIZE - 1, .flags = IORESOURCE_MEM, }, }; struct platform_device msm_device_uart1 = { .name = "msm_serial", .id = 0, .num_resources = ARRAY_SIZE(resources_uart1), .resource = resources_uart1, }; struct platform_device msm_device_uart2 = { .name = "msm_serial", .id = 1, .num_resources = ARRAY_SIZE(resources_uart2), .resource = resources_uart2, }; static struct resource resources_adsp[] = { { .start = INT_ADSP_A9_A11, .end = INT_ADSP_A9_A11, .flags = IORESOURCE_IRQ, }, }; struct platform_device msm_adsp_device = { .name = "msm_adsp", .id = -1, .num_resources = ARRAY_SIZE(resources_adsp), .resource = resources_adsp, }; #define MSM_UART1DM_PHYS 0xA0200000 #define MSM_UART2DM_PHYS 0xA0300000 static struct resource msm_uart1_dm_resources[] = { { .start = MSM_UART1DM_PHYS, .end = MSM_UART1DM_PHYS + PAGE_SIZE - 1, .flags = IORESOURCE_MEM, }, { .start = INT_UART1DM_IRQ, .end = INT_UART1DM_IRQ, .flags = IORESOURCE_IRQ, }, { .start = INT_UART1DM_RX, .end = INT_UART1DM_RX, .flags = IORESOURCE_IRQ, }, { .start = DMOV_HSUART1_TX_CHAN, .end = DMOV_HSUART1_RX_CHAN, .name = "uartdm_channels", .flags = IORESOURCE_DMA, }, { .start = DMOV_HSUART1_TX_CRCI, .end = DMOV_HSUART1_RX_CRCI, .name = "uartdm_crci", .flags = IORESOURCE_DMA, }, }; static u64 msm_uart_dm1_dma_mask = DMA_BIT_MASK(32); struct platform_device msm_device_uart_dm1 = { .name = "msm_serial_hs", .id = 0, .num_resources = ARRAY_SIZE(msm_uart1_dm_resources), .resource = msm_uart1_dm_resources, .dev = { .dma_mask = &msm_uart_dm1_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, }; static struct resource msm_uart2_dm_resources[] = { { .start = MSM_UART2DM_PHYS, .end = MSM_UART2DM_PHYS + PAGE_SIZE - 1, .flags = IORESOURCE_MEM, }, { .start = INT_UART2DM_IRQ, .end = INT_UART2DM_IRQ, .flags = IORESOURCE_IRQ, }, { .start = INT_UART2DM_RX, .end = INT_UART2DM_RX, .flags = IORESOURCE_IRQ, }, { .start = DMOV_HSUART2_TX_CHAN, .end = DMOV_HSUART2_RX_CHAN, .name = "uartdm_channels", .flags = IORESOURCE_DMA, }, { .start = DMOV_HSUART2_TX_CRCI, .end = DMOV_HSUART2_RX_CRCI, .name = "uartdm_crci", .flags = IORESOURCE_DMA, }, }; static u64 msm_uart_dm2_dma_mask = DMA_BIT_MASK(32); struct platform_device msm_device_uart_dm2 = { .name = "msm_serial_hs", .id = 1, .num_resources = ARRAY_SIZE(msm_uart2_dm_resources), .resource = msm_uart2_dm_resources, .dev = { .dma_mask = &msm_uart_dm2_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, }; #define MSM_I2C_SIZE SZ_4K #define MSM_I2C_PHYS 0xA9900000 static struct resource resources_i2c[] = { { .start = MSM_I2C_PHYS, .end = MSM_I2C_PHYS + MSM_I2C_SIZE - 1, .flags = IORESOURCE_MEM, }, { .start = INT_PWB_I2C, .end = INT_PWB_I2C, .flags = IORESOURCE_IRQ, }, }; struct platform_device msm_device_i2c = { .name = "msm_i2c", .id = 0, .num_resources = ARRAY_SIZE(resources_i2c), .resource = resources_i2c, }; #define MSM_HSUSB_PHYS 0xA0800000 static struct resource resources_hsusb_otg[] = { { .start = MSM_HSUSB_PHYS, .end = MSM_HSUSB_PHYS + SZ_1K - 1, .flags = IORESOURCE_MEM, }, { .start = INT_USB_HS, .end = INT_USB_HS, .flags = IORESOURCE_IRQ, }, }; static u64 dma_mask = 0xffffffffULL; struct platform_device msm_device_hsusb_otg = { .name = "msm_hsusb_otg", .id = -1, .num_resources = ARRAY_SIZE(resources_hsusb_otg), .resource = resources_hsusb_otg, .dev = { .dma_mask = &dma_mask, .coherent_dma_mask = 0xffffffffULL, }, }; static struct resource resources_hsusb_peripheral[] = { { .start = MSM_HSUSB_PHYS, .end = MSM_HSUSB_PHYS + SZ_1K - 1, .flags = IORESOURCE_MEM, }, { .start = INT_USB_HS, .end = INT_USB_HS, .flags = IORESOURCE_IRQ, }, }; static struct resource resources_gadget_peripheral[] = { { .start = MSM_HSUSB_PHYS, .end = MSM_HSUSB_PHYS + SZ_1K - 1, .flags = IORESOURCE_MEM, }, { .start = INT_USB_HS, .end = INT_USB_HS, .flags = IORESOURCE_IRQ, }, }; struct platform_device msm_device_hsusb_peripheral = { .name = "msm_hsusb_peripheral", .id = -1, .num_resources = ARRAY_SIZE(resources_hsusb_peripheral), .resource = resources_hsusb_peripheral, .dev = { .dma_mask = &dma_mask, .coherent_dma_mask = 0xffffffffULL, }, }; struct platform_device msm_device_gadget_peripheral = { .name = "msm_hsusb", .id = -1, .num_resources = ARRAY_SIZE(resources_gadget_peripheral), .resource = resources_gadget_peripheral, .dev = { .dma_mask = &dma_mask, .coherent_dma_mask = 0xffffffffULL, }, }; static struct resource resources_hsusb_host[] = { { .start = MSM_HSUSB_PHYS, .end = MSM_HSUSB_PHYS + SZ_1K - 1, .flags = IORESOURCE_MEM, }, { .start = INT_USB_HS, .end = INT_USB_HS, .flags = IORESOURCE_IRQ, }, }; struct platform_device msm_device_hsusb_host = { .name = "msm_hsusb_host", .id = 0, .num_resources = ARRAY_SIZE(resources_hsusb_host), .resource = resources_hsusb_host, .dev = { .dma_mask = &dma_mask, .coherent_dma_mask = 0xffffffffULL, }, }; static struct platform_device *msm_host_devices[] = { &msm_device_hsusb_host, }; int msm_add_host(unsigned int host, struct msm_usb_host_platform_data *plat) { struct platform_device *pdev; pdev = msm_host_devices[host]; if (!pdev) return -ENODEV; pdev->dev.platform_data = plat; return platform_device_register(pdev); } struct platform_device asoc_msm_pcm = { .name = "msm-dsp-audio", .id = 0, }; struct platform_device asoc_msm_dai0 = { .name = "msm-codec-dai", .id = 0, }; struct platform_device asoc_msm_dai1 = { .name = "msm-cpu-dai", .id = 0, }; #define MSM_NAND_PHYS 0xA0A00000 static struct resource resources_nand[] = { [0] = { .name = "msm_nand_dmac", .start = DMOV_NAND_CHAN, .end = DMOV_NAND_CHAN, .flags = IORESOURCE_DMA, }, [1] = { .name = "msm_nand_phys", .start = MSM_NAND_PHYS, .end = MSM_NAND_PHYS + 0x7FF, .flags = IORESOURCE_MEM, }, }; static struct resource resources_otg[] = { { .start = MSM_HSUSB_PHYS, .end = MSM_HSUSB_PHYS + SZ_1K - 1, .flags = IORESOURCE_MEM, }, { .start = INT_USB_HS, .end = INT_USB_HS, .flags = IORESOURCE_IRQ, }, }; struct platform_device msm_device_otg = { .name = "msm_otg", .id = -1, .num_resources = ARRAY_SIZE(resources_otg), .resource = resources_otg, .dev = { .coherent_dma_mask = 0xffffffffULL, }, }; struct flash_platform_data msm_nand_data = { .parts = NULL, .nr_parts = 0, }; struct platform_device msm_device_nand = { .name = "msm_nand", .id = -1, .num_resources = ARRAY_SIZE(resources_nand), .resource = resources_nand, .dev = { .platform_data = &msm_nand_data, }, }; struct platform_device msm_device_smd = { .name = "msm_smd", .id = -1, }; static struct resource msm_dmov_resource[] = { { .start = INT_ADM_AARM, .flags = IORESOURCE_IRQ, }, { .start = 0xA9700000, .end = 0xA9700000 + SZ_4K - 1, .flags = IORESOURCE_MEM, }, }; static struct msm_dmov_pdata msm_dmov_pdata = { .sd = 3, .sd_size = 0x400, }; struct platform_device msm_device_dmov = { .name = "msm_dmov", .id = -1, .resource = msm_dmov_resource, .num_resources = ARRAY_SIZE(msm_dmov_resource), .dev = { .platform_data = &msm_dmov_pdata, }, }; static struct msm_pm_irq_calls msm7x27_pm_irq_calls = { .irq_pending = msm_irq_pending, .idle_sleep_allowed = msm_irq_idle_sleep_allowed, .enter_sleep1 = msm_irq_enter_sleep1, .enter_sleep2 = msm_irq_enter_sleep2, .exit_sleep1 = msm_irq_exit_sleep1, .exit_sleep2 = msm_irq_exit_sleep2, .exit_sleep3 = msm_irq_exit_sleep3, }; void __init msm_pm_register_irqs(void) { msm_pm_set_irq_extns(&msm7x27_pm_irq_calls); } static struct acpuclk_pdata msm7x27_acpuclk_pdata = { .max_speed_delta_khz = 400000, }; struct platform_device msm7x27_device_acpuclk = { .name = "acpuclk-7627", .id = -1, .dev.platform_data = &msm7x27_acpuclk_pdata, }; #define MSM_SDC1_BASE 0xA0400000 #define MSM_SDC2_BASE 0xA0500000 #define MSM_SDC3_BASE 0xA0600000 #define MSM_SDC4_BASE 0xA0700000 static struct resource resources_sdc1[] = { { .name = "core_mem", .start = MSM_SDC1_BASE, .end = MSM_SDC1_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, { .name = "core_irq", .start = INT_SDC1_0, .end = INT_SDC1_1, .flags = IORESOURCE_IRQ, }, { .name = "dma_chnl", .start = DMOV_SDC1_CHAN, .end = DMOV_SDC1_CHAN, .flags = IORESOURCE_DMA, }, { .name = "dma_crci", .start = DMOV_SDC1_CRCI, .end = DMOV_SDC1_CRCI, .flags = IORESOURCE_DMA, } }; static struct resource resources_sdc2[] = { { .name = "core_mem", .start = MSM_SDC2_BASE, .end = MSM_SDC2_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, { .name = "core_irq", .start = INT_SDC2_0, .end = INT_SDC2_1, .flags = IORESOURCE_IRQ, }, { .name = "dma_chnl", .start = DMOV_SDC2_CHAN, .end = DMOV_SDC2_CHAN, .flags = IORESOURCE_DMA, }, { .name = "dma_crci", .start = DMOV_SDC2_CRCI, .end = DMOV_SDC2_CRCI, .flags = IORESOURCE_DMA, } }; static struct resource resources_sdc3[] = { { .name = "core_mem", .start = MSM_SDC3_BASE, .end = MSM_SDC3_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, { .name = "core_irq", .start = INT_SDC3_0, .end = INT_SDC3_1, .flags = IORESOURCE_IRQ, }, { .name = "dma_chnl", .start = DMOV_SDC3_CHAN, .end = DMOV_SDC3_CHAN, .flags = IORESOURCE_DMA, }, { .name = "dma_crci", .start = DMOV_SDC3_CRCI, .end = DMOV_SDC3_CRCI, .flags = IORESOURCE_DMA, }, }; static struct resource resources_sdc4[] = { { .name = "core_mem", .start = MSM_SDC4_BASE, .end = MSM_SDC4_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, { .name = "core_irq", .start = INT_SDC4_0, .end = INT_SDC4_1, .flags = IORESOURCE_IRQ, }, { .name = "dma_chnl", .start = DMOV_SDC4_CHAN, .end = DMOV_SDC4_CHAN, .flags = IORESOURCE_DMA, }, { .name = "dma_crci", .start = DMOV_SDC4_CRCI, .end = DMOV_SDC4_CRCI, .flags = IORESOURCE_DMA, }, }; struct platform_device msm_device_sdc1 = { .name = "msm_sdcc", .id = 1, .num_resources = ARRAY_SIZE(resources_sdc1), .resource = resources_sdc1, .dev = { .coherent_dma_mask = 0xffffffff, }, }; struct platform_device msm_device_sdc2 = { .name = "msm_sdcc", .id = 2, .num_resources = ARRAY_SIZE(resources_sdc2), .resource = resources_sdc2, .dev = { .coherent_dma_mask = 0xffffffff, }, }; struct platform_device msm_device_sdc3 = { .name = "msm_sdcc", .id = 3, .num_resources = ARRAY_SIZE(resources_sdc3), .resource = resources_sdc3, .dev = { .coherent_dma_mask = 0xffffffff, }, }; struct platform_device msm_device_sdc4 = { .name = "msm_sdcc", .id = 4, .num_resources = ARRAY_SIZE(resources_sdc4), .resource = resources_sdc4, .dev = { .coherent_dma_mask = 0xffffffff, }, }; static struct platform_device *msm_sdcc_devices[] __initdata = { &msm_device_sdc1, &msm_device_sdc2, &msm_device_sdc3, &msm_device_sdc4, }; int __init msm_add_sdcc(unsigned int controller, struct mmc_platform_data *plat) { struct platform_device *pdev; if (controller < 1 || controller > 4) return -EINVAL; pdev = msm_sdcc_devices[controller-1]; pdev->dev.platform_data = plat; return platform_device_register(pdev); } #if defined(CONFIG_FB_MSM_MDP40) #define MDP_BASE 0xA3F00000 #define PMDH_BASE 0xAD600000 #define EMDH_BASE 0xAD700000 #define TVENC_BASE 0xAD400000 #else #define MDP_BASE 0xAA200000 #define PMDH_BASE 0xAA600000 #define EMDH_BASE 0xAA700000 #define TVENC_BASE 0xAA400000 #endif static struct resource msm_mdp_resources[] = { { .name = "mdp", .start = MDP_BASE, .end = MDP_BASE + 0x000F0000 - 1, .flags = IORESOURCE_MEM, }, { .start = INT_MDP, .end = INT_MDP, .flags = IORESOURCE_IRQ, }, }; static struct resource msm_mddi_resources[] = { { .name = "pmdh", .start = PMDH_BASE, .end = PMDH_BASE + PAGE_SIZE - 1, .flags = IORESOURCE_MEM, } }; static struct resource msm_mddi_ext_resources[] = { { .name = "emdh", .start = EMDH_BASE, .end = EMDH_BASE + PAGE_SIZE - 1, .flags = IORESOURCE_MEM, } }; static struct resource msm_ebi2_lcd_resources[] = { { .name = "base", .start = 0xa0d00000, .end = 0xa0d00000 + PAGE_SIZE - 1, .flags = IORESOURCE_MEM, }, { .name = "lcd01", .start = 0x98000000, .end = 0x98000000 + 0x80000 - 1, .flags = IORESOURCE_MEM, }, { .name = "lcd02", .start = 0x9c000000, .end = 0x9c000000 + 0x80000 - 1, .flags = IORESOURCE_MEM, }, }; static struct resource msm_tvenc_resources[] = { { .name = "tvenc", .start = TVENC_BASE, .end = TVENC_BASE + PAGE_SIZE - 1, .flags = IORESOURCE_MEM, } }; static struct platform_device msm_mdp_device = { .name = "mdp", .id = 0, .num_resources = ARRAY_SIZE(msm_mdp_resources), .resource = msm_mdp_resources, }; static struct platform_device msm_mddi_device = { .name = "mddi", .id = 0, .num_resources = ARRAY_SIZE(msm_mddi_resources), .resource = msm_mddi_resources, }; static struct platform_device msm_mddi_ext_device = { .name = "mddi_ext", .id = 0, .num_resources = ARRAY_SIZE(msm_mddi_ext_resources), .resource = msm_mddi_ext_resources, }; static struct platform_device msm_ebi2_lcd_device = { .name = "ebi2_lcd", .id = 0, .num_resources = ARRAY_SIZE(msm_ebi2_lcd_resources), .resource = msm_ebi2_lcd_resources, }; static struct platform_device msm_lcdc_device = { .name = "lcdc", .id = 0, }; static struct platform_device msm_tvenc_device = { .name = "tvenc", .id = 0, .num_resources = ARRAY_SIZE(msm_tvenc_resources), .resource = msm_tvenc_resources, }; /* TSIF begin */ #if defined(CONFIG_TSIF) || defined(CONFIG_TSIF_MODULE) #define MSM_TSIF_PHYS (0xa0100000) #define MSM_TSIF_SIZE (0x200) static struct resource tsif_resources[] = { [0] = { .flags = IORESOURCE_IRQ, .start = INT_TSIF_IRQ, .end = INT_TSIF_IRQ, }, [1] = { .flags = IORESOURCE_MEM, .start = MSM_TSIF_PHYS, .end = MSM_TSIF_PHYS + MSM_TSIF_SIZE - 1, }, [2] = { .flags = IORESOURCE_DMA, .start = DMOV_TSIF_CHAN, .end = DMOV_TSIF_CRCI, }, }; static void tsif_release(struct device *dev) { dev_info(dev, "release\n"); } struct platform_device msm_device_tsif = { .name = "msm_tsif", .id = 0, .num_resources = ARRAY_SIZE(tsif_resources), .resource = tsif_resources, .dev = { .release = tsif_release, }, }; #endif /* defined(CONFIG_TSIF) || defined(CONFIG_TSIF_MODULE) */ /* TSIF end */ #define MSM_TSSC_PHYS 0xAA300000 static struct resource resources_tssc[] = { { .start = MSM_TSSC_PHYS, .end = MSM_TSSC_PHYS + SZ_4K - 1, .name = "tssc", .flags = IORESOURCE_MEM, }, { .start = INT_TCHSCRN1, .end = INT_TCHSCRN1, .name = "tssc1", .flags = IORESOURCE_IRQ | IRQF_TRIGGER_RISING, }, { .start = INT_TCHSCRN2, .end = INT_TCHSCRN2, .name = "tssc2", .flags = IORESOURCE_IRQ | IRQF_TRIGGER_RISING, }, }; struct platform_device msm_device_tssc = { .name = "msm_touchscreen", .id = 0, .num_resources = ARRAY_SIZE(resources_tssc), .resource = resources_tssc, }; static void __init msm_register_device(struct platform_device *pdev, void *data) { int ret; pdev->dev.platform_data = data; ret = platform_device_register(pdev); if (ret) dev_err(&pdev->dev, "%s: platform_device_register() failed = %d\n", __func__, ret); } void __init msm_fb_register_device(char *name, void *data) { if (!strncmp(name, "mdp", 3)) msm_register_device(&msm_mdp_device, data); else if (!strncmp(name, "pmdh", 4)) msm_register_device(&msm_mddi_device, data); else if (!strncmp(name, "emdh", 4)) msm_register_device(&msm_mddi_ext_device, data); else if (!strncmp(name, "ebi2", 4)) msm_register_device(&msm_ebi2_lcd_device, data); else if (!strncmp(name, "tvenc", 5)) msm_register_device(&msm_tvenc_device, data); else if (!strncmp(name, "lcdc", 4)) msm_register_device(&msm_lcdc_device, data); else printk(KERN_ERR "%s: unknown device! %s\n", __func__, name); } static struct platform_device msm_camera_device = { .name = "msm_camera", .id = 0, }; void __init msm_camera_register_device(void *res, uint32_t num, void *data) { msm_camera_device.num_resources = num; msm_camera_device.resource = res; msm_register_device(&msm_camera_device, data); } static struct resource kgsl_3d0_resources[] = { { .name = KGSL_3D0_REG_MEMORY, .start = 0xA0000000, .end = 0xA001ffff, .flags = IORESOURCE_MEM, }, { .name = KGSL_3D0_IRQ, .start = INT_GRAPHICS, .end = INT_GRAPHICS, .flags = IORESOURCE_IRQ, }, }; static struct kgsl_device_platform_data kgsl_3d0_pdata = { /* bus_freq has been set to 160000 for power savings. * OEMs may modify the value at their discretion for performance * The appropriate maximum replacement for 160000 is: * msm7x2x_clock_data.max_axi_khz */ .pwrlevel = { { .gpu_freq = 0, .bus_freq = 160000000, }, }, .init_level = 0, .num_levels = 1, .set_grp_async = NULL, .idle_timeout = HZ, .strtstp_sleepwake = true, .clk_map = KGSL_CLK_CORE | KGSL_CLK_IFACE | KGSL_CLK_MEM, }; struct platform_device msm_kgsl_3d0 = { .name = "kgsl-3d0", .id = 0, .num_resources = ARRAY_SIZE(kgsl_3d0_resources), .resource = kgsl_3d0_resources, .dev = { .platform_data = &kgsl_3d0_pdata, }, }; struct platform_device *msm_footswitch_devices[] = { FS_PCOM(FS_GFX3D, "vdd", "kgsl-3d0.0"), }; unsigned msm_num_footswitch_devices = ARRAY_SIZE(msm_footswitch_devices); static struct resource gpio_resources[] = { { .start = INT_GPIO_GROUP1, .flags = IORESOURCE_IRQ, }, { .start = INT_GPIO_GROUP2, .flags = IORESOURCE_IRQ, }, }; static struct platform_device msm_device_gpio = { .name = "msmgpio", .id = -1, .resource = gpio_resources, .num_resources = ARRAY_SIZE(gpio_resources), }; static int __init msm7627_init_gpio(void) { platform_device_register(&msm_device_gpio); return 0; } postcore_initcall(msm7627_init_gpio);
gpl-2.0
allan888/Linux_anti_malware_file_system
drivers/mtd/maps/bfin-async-flash.c
1425
5073
/* * drivers/mtd/maps/bfin-async-flash.c * * Handle the case where flash memory and ethernet mac/phy are * mapped onto the same async bank. The BF533-STAMP does this * for example. All board-specific configuration goes in your * board resources file. * * Copyright 2000 Nicolas Pitre <nico@fluxnic.net> * Copyright 2005-2008 Analog Devices Inc. * * Enter bugs at http://blackfin.uclinux.org/ * * Licensed under the GPL-2 or later. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> #include <linux/mtd/partitions.h> #include <linux/mtd/physmap.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/types.h> #include <asm/blackfin.h> #include <linux/gpio.h> #include <linux/io.h> #include <asm/unaligned.h> #define pr_devinit(fmt, args...) \ ({ static const char __fmt[] = fmt; printk(__fmt, ## args); }) #define DRIVER_NAME "bfin-async-flash" struct async_state { struct mtd_info *mtd; struct map_info map; int enet_flash_pin; uint32_t flash_ambctl0, flash_ambctl1; uint32_t save_ambctl0, save_ambctl1; unsigned long irq_flags; }; static void switch_to_flash(struct async_state *state) { local_irq_save(state->irq_flags); gpio_set_value(state->enet_flash_pin, 0); state->save_ambctl0 = bfin_read_EBIU_AMBCTL0(); state->save_ambctl1 = bfin_read_EBIU_AMBCTL1(); bfin_write_EBIU_AMBCTL0(state->flash_ambctl0); bfin_write_EBIU_AMBCTL1(state->flash_ambctl1); SSYNC(); } static void switch_back(struct async_state *state) { bfin_write_EBIU_AMBCTL0(state->save_ambctl0); bfin_write_EBIU_AMBCTL1(state->save_ambctl1); SSYNC(); gpio_set_value(state->enet_flash_pin, 1); local_irq_restore(state->irq_flags); } static map_word bfin_flash_read(struct map_info *map, unsigned long ofs) { struct async_state *state = (struct async_state *)map->map_priv_1; uint16_t word; map_word test; switch_to_flash(state); word = readw(map->virt + ofs); switch_back(state); test.x[0] = word; return test; } static void bfin_flash_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len) { struct async_state *state = (struct async_state *)map->map_priv_1; switch_to_flash(state); memcpy(to, map->virt + from, len); switch_back(state); } static void bfin_flash_write(struct map_info *map, map_word d1, unsigned long ofs) { struct async_state *state = (struct async_state *)map->map_priv_1; uint16_t d; d = d1.x[0]; switch_to_flash(state); writew(d, map->virt + ofs); SSYNC(); switch_back(state); } static void bfin_flash_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len) { struct async_state *state = (struct async_state *)map->map_priv_1; switch_to_flash(state); memcpy(map->virt + to, from, len); SSYNC(); switch_back(state); } static const char * const part_probe_types[] = { "cmdlinepart", "RedBoot", NULL }; static int bfin_flash_probe(struct platform_device *pdev) { struct physmap_flash_data *pdata = dev_get_platdata(&pdev->dev); struct resource *memory = platform_get_resource(pdev, IORESOURCE_MEM, 0); struct resource *flash_ambctl = platform_get_resource(pdev, IORESOURCE_MEM, 1); struct async_state *state; state = kzalloc(sizeof(*state), GFP_KERNEL); if (!state) return -ENOMEM; state->map.name = DRIVER_NAME; state->map.read = bfin_flash_read; state->map.copy_from = bfin_flash_copy_from; state->map.write = bfin_flash_write; state->map.copy_to = bfin_flash_copy_to; state->map.bankwidth = pdata->width; state->map.size = resource_size(memory); state->map.virt = (void __iomem *)memory->start; state->map.phys = memory->start; state->map.map_priv_1 = (unsigned long)state; state->enet_flash_pin = platform_get_irq(pdev, 0); state->flash_ambctl0 = flash_ambctl->start; state->flash_ambctl1 = flash_ambctl->end; if (gpio_request(state->enet_flash_pin, DRIVER_NAME)) { pr_devinit(KERN_ERR DRIVER_NAME ": Failed to request gpio %d\n", state->enet_flash_pin); kfree(state); return -EBUSY; } gpio_direction_output(state->enet_flash_pin, 1); pr_devinit(KERN_NOTICE DRIVER_NAME ": probing %d-bit flash bus\n", state->map.bankwidth * 8); state->mtd = do_map_probe(memory->name, &state->map); if (!state->mtd) { gpio_free(state->enet_flash_pin); kfree(state); return -ENXIO; } mtd_device_parse_register(state->mtd, part_probe_types, NULL, pdata->parts, pdata->nr_parts); platform_set_drvdata(pdev, state); return 0; } static int bfin_flash_remove(struct platform_device *pdev) { struct async_state *state = platform_get_drvdata(pdev); gpio_free(state->enet_flash_pin); mtd_device_unregister(state->mtd); map_destroy(state->mtd); kfree(state); return 0; } static struct platform_driver bfin_flash_driver = { .probe = bfin_flash_probe, .remove = bfin_flash_remove, .driver = { .name = DRIVER_NAME, }, }; module_platform_driver(bfin_flash_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("MTD map driver for Blackfins with flash/ethernet on same async bank");
gpl-2.0
tomdean1/linux
drivers/scsi/libfc/fc_lport.c
1937
57757
/* * Copyright(c) 2007 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * * Maintained at www.Open-FCoE.org */ /* * PORT LOCKING NOTES * * These comments only apply to the 'port code' which consists of the lport, * disc and rport blocks. * * MOTIVATION * * The lport, disc and rport blocks all have mutexes that are used to protect * those objects. The main motivation for these locks is to prevent from * having an lport reset just before we send a frame. In that scenario the * lport's FID would get set to zero and then we'd send a frame with an * invalid SID. We also need to ensure that states don't change unexpectedly * while processing another state. * * HIERARCHY * * The following hierarchy defines the locking rules. A greater lock * may be held before acquiring a lesser lock, but a lesser lock should never * be held while attempting to acquire a greater lock. Here is the hierarchy- * * lport > disc, lport > rport, disc > rport * * CALLBACKS * * The callbacks cause complications with this scheme. There is a callback * from the rport (to either lport or disc) and a callback from disc * (to the lport). * * As rports exit the rport state machine a callback is made to the owner of * the rport to notify success or failure. Since the callback is likely to * cause the lport or disc to grab its lock we cannot hold the rport lock * while making the callback. To ensure that the rport is not free'd while * processing the callback the rport callbacks are serialized through a * single-threaded workqueue. An rport would never be free'd while in a * callback handler because no other rport work in this queue can be executed * at the same time. * * When discovery succeeds or fails a callback is made to the lport as * notification. Currently, successful discovery causes the lport to take no * action. A failure will cause the lport to reset. There is likely a circular * locking problem with this implementation. */ /* * LPORT LOCKING * * The critical sections protected by the lport's mutex are quite broad and * may be improved upon in the future. The lport code and its locking doesn't * influence the I/O path, so excessive locking doesn't penalize I/O * performance. * * The strategy is to lock whenever processing a request or response. Note * that every _enter_* function corresponds to a state change. They generally * change the lports state and then send a request out on the wire. We lock * before calling any of these functions to protect that state change. This * means that the entry points into the lport block manage the locks while * the state machine can transition between states (i.e. _enter_* functions) * while always staying protected. * * When handling responses we also hold the lport mutex broadly. When the * lport receives the response frame it locks the mutex and then calls the * appropriate handler for the particuar response. Generally a response will * trigger a state change and so the lock must already be held. * * Retries also have to consider the locking. The retries occur from a work * context and the work function will lock the lport and then retry the state * (i.e. _enter_* function). */ #include <linux/timer.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/slab.h> #include <asm/unaligned.h> #include <scsi/fc/fc_gs.h> #include <scsi/libfc.h> #include <scsi/fc_encode.h> #include <linux/scatterlist.h> #include "fc_libfc.h" /* Fabric IDs to use for point-to-point mode, chosen on whims. */ #define FC_LOCAL_PTP_FID_LO 0x010101 #define FC_LOCAL_PTP_FID_HI 0x010102 #define DNS_DELAY 3 /* Discovery delay after RSCN (in seconds)*/ static void fc_lport_error(struct fc_lport *, struct fc_frame *); static void fc_lport_enter_reset(struct fc_lport *); static void fc_lport_enter_flogi(struct fc_lport *); static void fc_lport_enter_dns(struct fc_lport *); static void fc_lport_enter_ns(struct fc_lport *, enum fc_lport_state); static void fc_lport_enter_scr(struct fc_lport *); static void fc_lport_enter_ready(struct fc_lport *); static void fc_lport_enter_logo(struct fc_lport *); static void fc_lport_enter_fdmi(struct fc_lport *lport); static void fc_lport_enter_ms(struct fc_lport *, enum fc_lport_state); static const char *fc_lport_state_names[] = { [LPORT_ST_DISABLED] = "disabled", [LPORT_ST_FLOGI] = "FLOGI", [LPORT_ST_DNS] = "dNS", [LPORT_ST_RNN_ID] = "RNN_ID", [LPORT_ST_RSNN_NN] = "RSNN_NN", [LPORT_ST_RSPN_ID] = "RSPN_ID", [LPORT_ST_RFT_ID] = "RFT_ID", [LPORT_ST_RFF_ID] = "RFF_ID", [LPORT_ST_FDMI] = "FDMI", [LPORT_ST_RHBA] = "RHBA", [LPORT_ST_RPA] = "RPA", [LPORT_ST_DHBA] = "DHBA", [LPORT_ST_DPRT] = "DPRT", [LPORT_ST_SCR] = "SCR", [LPORT_ST_READY] = "Ready", [LPORT_ST_LOGO] = "LOGO", [LPORT_ST_RESET] = "reset", }; /** * struct fc_bsg_info - FC Passthrough managemet structure * @job: The passthrough job * @lport: The local port to pass through a command * @rsp_code: The expected response code * @sg: job->reply_payload.sg_list * @nents: job->reply_payload.sg_cnt * @offset: The offset into the response data */ struct fc_bsg_info { struct fc_bsg_job *job; struct fc_lport *lport; u16 rsp_code; struct scatterlist *sg; u32 nents; size_t offset; }; /** * fc_frame_drop() - Dummy frame handler * @lport: The local port the frame was received on * @fp: The received frame */ static int fc_frame_drop(struct fc_lport *lport, struct fc_frame *fp) { fc_frame_free(fp); return 0; } /** * fc_lport_rport_callback() - Event handler for rport events * @lport: The lport which is receiving the event * @rdata: private remote port data * @event: The event that occurred * * Locking Note: The rport lock should not be held when calling * this function. */ static void fc_lport_rport_callback(struct fc_lport *lport, struct fc_rport_priv *rdata, enum fc_rport_event event) { FC_LPORT_DBG(lport, "Received a %d event for port (%6.6x)\n", event, rdata->ids.port_id); mutex_lock(&lport->lp_mutex); switch (event) { case RPORT_EV_READY: if (lport->state == LPORT_ST_DNS) { lport->dns_rdata = rdata; fc_lport_enter_ns(lport, LPORT_ST_RNN_ID); } else if (lport->state == LPORT_ST_FDMI) { lport->ms_rdata = rdata; fc_lport_enter_ms(lport, LPORT_ST_DHBA); } else { FC_LPORT_DBG(lport, "Received an READY event " "on port (%6.6x) for the directory " "server, but the lport is not " "in the DNS or FDMI state, it's in the " "%d state", rdata->ids.port_id, lport->state); lport->tt.rport_logoff(rdata); } break; case RPORT_EV_LOGO: case RPORT_EV_FAILED: case RPORT_EV_STOP: if (rdata->ids.port_id == FC_FID_DIR_SERV) lport->dns_rdata = NULL; else if (rdata->ids.port_id == FC_FID_MGMT_SERV) lport->ms_rdata = NULL; break; case RPORT_EV_NONE: break; } mutex_unlock(&lport->lp_mutex); } /** * fc_lport_state() - Return a string which represents the lport's state * @lport: The lport whose state is to converted to a string */ static const char *fc_lport_state(struct fc_lport *lport) { const char *cp; cp = fc_lport_state_names[lport->state]; if (!cp) cp = "unknown"; return cp; } /** * fc_lport_ptp_setup() - Create an rport for point-to-point mode * @lport: The lport to attach the ptp rport to * @remote_fid: The FID of the ptp rport * @remote_wwpn: The WWPN of the ptp rport * @remote_wwnn: The WWNN of the ptp rport */ static void fc_lport_ptp_setup(struct fc_lport *lport, u32 remote_fid, u64 remote_wwpn, u64 remote_wwnn) { mutex_lock(&lport->disc.disc_mutex); if (lport->ptp_rdata) { lport->tt.rport_logoff(lport->ptp_rdata); kref_put(&lport->ptp_rdata->kref, lport->tt.rport_destroy); } lport->ptp_rdata = lport->tt.rport_create(lport, remote_fid); kref_get(&lport->ptp_rdata->kref); lport->ptp_rdata->ids.port_name = remote_wwpn; lport->ptp_rdata->ids.node_name = remote_wwnn; mutex_unlock(&lport->disc.disc_mutex); lport->tt.rport_login(lport->ptp_rdata); fc_lport_enter_ready(lport); } /** * fc_get_host_port_state() - Return the port state of the given Scsi_Host * @shost: The SCSI host whose port state is to be determined */ void fc_get_host_port_state(struct Scsi_Host *shost) { struct fc_lport *lport = shost_priv(shost); mutex_lock(&lport->lp_mutex); if (!lport->link_up) fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN; else switch (lport->state) { case LPORT_ST_READY: fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; break; default: fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; } mutex_unlock(&lport->lp_mutex); } EXPORT_SYMBOL(fc_get_host_port_state); /** * fc_get_host_speed() - Return the speed of the given Scsi_Host * @shost: The SCSI host whose port speed is to be determined */ void fc_get_host_speed(struct Scsi_Host *shost) { struct fc_lport *lport = shost_priv(shost); fc_host_speed(shost) = lport->link_speed; } EXPORT_SYMBOL(fc_get_host_speed); /** * fc_get_host_stats() - Return the Scsi_Host's statistics * @shost: The SCSI host whose statistics are to be returned */ struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost) { struct fc_host_statistics *fc_stats; struct fc_lport *lport = shost_priv(shost); struct timespec v0, v1; unsigned int cpu; u64 fcp_in_bytes = 0; u64 fcp_out_bytes = 0; fc_stats = &lport->host_stats; memset(fc_stats, 0, sizeof(struct fc_host_statistics)); jiffies_to_timespec(jiffies, &v0); jiffies_to_timespec(lport->boot_time, &v1); fc_stats->seconds_since_last_reset = (v0.tv_sec - v1.tv_sec); for_each_possible_cpu(cpu) { struct fc_stats *stats; stats = per_cpu_ptr(lport->stats, cpu); fc_stats->tx_frames += stats->TxFrames; fc_stats->tx_words += stats->TxWords; fc_stats->rx_frames += stats->RxFrames; fc_stats->rx_words += stats->RxWords; fc_stats->error_frames += stats->ErrorFrames; fc_stats->invalid_crc_count += stats->InvalidCRCCount; fc_stats->fcp_input_requests += stats->InputRequests; fc_stats->fcp_output_requests += stats->OutputRequests; fc_stats->fcp_control_requests += stats->ControlRequests; fcp_in_bytes += stats->InputBytes; fcp_out_bytes += stats->OutputBytes; fc_stats->fcp_packet_alloc_failures += stats->FcpPktAllocFails; fc_stats->fcp_packet_aborts += stats->FcpPktAborts; fc_stats->fcp_frame_alloc_failures += stats->FcpFrameAllocFails; fc_stats->link_failure_count += stats->LinkFailureCount; } fc_stats->fcp_input_megabytes = div_u64(fcp_in_bytes, 1000000); fc_stats->fcp_output_megabytes = div_u64(fcp_out_bytes, 1000000); fc_stats->lip_count = -1; fc_stats->nos_count = -1; fc_stats->loss_of_sync_count = -1; fc_stats->loss_of_signal_count = -1; fc_stats->prim_seq_protocol_err_count = -1; fc_stats->dumped_frames = -1; /* update exches stats */ fc_exch_update_stats(lport); return fc_stats; } EXPORT_SYMBOL(fc_get_host_stats); /** * fc_lport_flogi_fill() - Fill in FLOGI command for request * @lport: The local port the FLOGI is for * @flogi: The FLOGI command * @op: The opcode */ static void fc_lport_flogi_fill(struct fc_lport *lport, struct fc_els_flogi *flogi, unsigned int op) { struct fc_els_csp *sp; struct fc_els_cssp *cp; memset(flogi, 0, sizeof(*flogi)); flogi->fl_cmd = (u8) op; put_unaligned_be64(lport->wwpn, &flogi->fl_wwpn); put_unaligned_be64(lport->wwnn, &flogi->fl_wwnn); sp = &flogi->fl_csp; sp->sp_hi_ver = 0x20; sp->sp_lo_ver = 0x20; sp->sp_bb_cred = htons(10); /* this gets set by gateway */ sp->sp_bb_data = htons((u16) lport->mfs); cp = &flogi->fl_cssp[3 - 1]; /* class 3 parameters */ cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ); if (op != ELS_FLOGI) { sp->sp_features = htons(FC_SP_FT_CIRO); sp->sp_tot_seq = htons(255); /* seq. we accept */ sp->sp_rel_off = htons(0x1f); sp->sp_e_d_tov = htonl(lport->e_d_tov); cp->cp_rdfs = htons((u16) lport->mfs); cp->cp_con_seq = htons(255); cp->cp_open_seq = 1; } } /** * fc_lport_add_fc4_type() - Add a supported FC-4 type to a local port * @lport: The local port to add a new FC-4 type to * @type: The new FC-4 type */ static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type) { __be32 *mp; mp = &lport->fcts.ff_type_map[type / FC_NS_BPW]; *mp = htonl(ntohl(*mp) | 1UL << (type % FC_NS_BPW)); } /** * fc_lport_recv_rlir_req() - Handle received Registered Link Incident Report. * @lport: Fibre Channel local port receiving the RLIR * @fp: The RLIR request frame * * Locking Note: The lport lock is expected to be held before calling * this function. */ static void fc_lport_recv_rlir_req(struct fc_lport *lport, struct fc_frame *fp) { FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n", fc_lport_state(lport)); lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL); fc_frame_free(fp); } /** * fc_lport_recv_echo_req() - Handle received ECHO request * @lport: The local port receiving the ECHO * @fp: ECHO request frame * * Locking Note: The lport lock is expected to be held before calling * this function. */ static void fc_lport_recv_echo_req(struct fc_lport *lport, struct fc_frame *in_fp) { struct fc_frame *fp; unsigned int len; void *pp; void *dp; FC_LPORT_DBG(lport, "Received ECHO request while in state %s\n", fc_lport_state(lport)); len = fr_len(in_fp) - sizeof(struct fc_frame_header); pp = fc_frame_payload_get(in_fp, len); if (len < sizeof(__be32)) len = sizeof(__be32); fp = fc_frame_alloc(lport, len); if (fp) { dp = fc_frame_payload_get(fp, len); memcpy(dp, pp, len); *((__be32 *)dp) = htonl(ELS_LS_ACC << 24); fc_fill_reply_hdr(fp, in_fp, FC_RCTL_ELS_REP, 0); lport->tt.frame_send(lport, fp); } fc_frame_free(in_fp); } /** * fc_lport_recv_rnid_req() - Handle received Request Node ID data request * @lport: The local port receiving the RNID * @fp: The RNID request frame * * Locking Note: The lport lock is expected to be held before calling * this function. */ static void fc_lport_recv_rnid_req(struct fc_lport *lport, struct fc_frame *in_fp) { struct fc_frame *fp; struct fc_els_rnid *req; struct { struct fc_els_rnid_resp rnid; struct fc_els_rnid_cid cid; struct fc_els_rnid_gen gen; } *rp; struct fc_seq_els_data rjt_data; u8 fmt; size_t len; FC_LPORT_DBG(lport, "Received RNID request while in state %s\n", fc_lport_state(lport)); req = fc_frame_payload_get(in_fp, sizeof(*req)); if (!req) { rjt_data.reason = ELS_RJT_LOGIC; rjt_data.explan = ELS_EXPL_NONE; lport->tt.seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data); } else { fmt = req->rnid_fmt; len = sizeof(*rp); if (fmt != ELS_RNIDF_GEN || ntohl(lport->rnid_gen.rnid_atype) == 0) { fmt = ELS_RNIDF_NONE; /* nothing to provide */ len -= sizeof(rp->gen); } fp = fc_frame_alloc(lport, len); if (fp) { rp = fc_frame_payload_get(fp, len); memset(rp, 0, len); rp->rnid.rnid_cmd = ELS_LS_ACC; rp->rnid.rnid_fmt = fmt; rp->rnid.rnid_cid_len = sizeof(rp->cid); rp->cid.rnid_wwpn = htonll(lport->wwpn); rp->cid.rnid_wwnn = htonll(lport->wwnn); if (fmt == ELS_RNIDF_GEN) { rp->rnid.rnid_sid_len = sizeof(rp->gen); memcpy(&rp->gen, &lport->rnid_gen, sizeof(rp->gen)); } fc_fill_reply_hdr(fp, in_fp, FC_RCTL_ELS_REP, 0); lport->tt.frame_send(lport, fp); } } fc_frame_free(in_fp); } /** * fc_lport_recv_logo_req() - Handle received fabric LOGO request * @lport: The local port receiving the LOGO * @fp: The LOGO request frame * * Locking Note: The lport lock is expected to be held before calling * this function. */ static void fc_lport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp) { lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL); fc_lport_enter_reset(lport); fc_frame_free(fp); } /** * fc_fabric_login() - Start the lport state machine * @lport: The local port that should log into the fabric * * Locking Note: This function should not be called * with the lport lock held. */ int fc_fabric_login(struct fc_lport *lport) { int rc = -1; mutex_lock(&lport->lp_mutex); if (lport->state == LPORT_ST_DISABLED || lport->state == LPORT_ST_LOGO) { fc_lport_state_enter(lport, LPORT_ST_RESET); fc_lport_enter_reset(lport); rc = 0; } mutex_unlock(&lport->lp_mutex); return rc; } EXPORT_SYMBOL(fc_fabric_login); /** * __fc_linkup() - Handler for transport linkup events * @lport: The lport whose link is up * * Locking: must be called with the lp_mutex held */ void __fc_linkup(struct fc_lport *lport) { if (!lport->link_up) { lport->link_up = 1; if (lport->state == LPORT_ST_RESET) fc_lport_enter_flogi(lport); } } /** * fc_linkup() - Handler for transport linkup events * @lport: The local port whose link is up */ void fc_linkup(struct fc_lport *lport) { printk(KERN_INFO "host%d: libfc: Link up on port (%6.6x)\n", lport->host->host_no, lport->port_id); mutex_lock(&lport->lp_mutex); __fc_linkup(lport); mutex_unlock(&lport->lp_mutex); } EXPORT_SYMBOL(fc_linkup); /** * __fc_linkdown() - Handler for transport linkdown events * @lport: The lport whose link is down * * Locking: must be called with the lp_mutex held */ void __fc_linkdown(struct fc_lport *lport) { if (lport->link_up) { lport->link_up = 0; fc_lport_enter_reset(lport); lport->tt.fcp_cleanup(lport); } } /** * fc_linkdown() - Handler for transport linkdown events * @lport: The local port whose link is down */ void fc_linkdown(struct fc_lport *lport) { printk(KERN_INFO "host%d: libfc: Link down on port (%6.6x)\n", lport->host->host_no, lport->port_id); mutex_lock(&lport->lp_mutex); __fc_linkdown(lport); mutex_unlock(&lport->lp_mutex); } EXPORT_SYMBOL(fc_linkdown); /** * fc_fabric_logoff() - Logout of the fabric * @lport: The local port to logoff the fabric * * Return value: * 0 for success, -1 for failure */ int fc_fabric_logoff(struct fc_lport *lport) { lport->tt.disc_stop_final(lport); mutex_lock(&lport->lp_mutex); if (lport->dns_rdata) lport->tt.rport_logoff(lport->dns_rdata); mutex_unlock(&lport->lp_mutex); lport->tt.rport_flush_queue(); mutex_lock(&lport->lp_mutex); fc_lport_enter_logo(lport); mutex_unlock(&lport->lp_mutex); cancel_delayed_work_sync(&lport->retry_work); return 0; } EXPORT_SYMBOL(fc_fabric_logoff); /** * fc_lport_destroy() - Unregister a fc_lport * @lport: The local port to unregister * * Note: * exit routine for fc_lport instance * clean-up all the allocated memory * and free up other system resources. * */ int fc_lport_destroy(struct fc_lport *lport) { mutex_lock(&lport->lp_mutex); lport->state = LPORT_ST_DISABLED; lport->link_up = 0; lport->tt.frame_send = fc_frame_drop; mutex_unlock(&lport->lp_mutex); lport->tt.fcp_abort_io(lport); lport->tt.disc_stop_final(lport); lport->tt.exch_mgr_reset(lport, 0, 0); cancel_delayed_work_sync(&lport->retry_work); fc_fc4_del_lport(lport); return 0; } EXPORT_SYMBOL(fc_lport_destroy); /** * fc_set_mfs() - Set the maximum frame size for a local port * @lport: The local port to set the MFS for * @mfs: The new MFS */ int fc_set_mfs(struct fc_lport *lport, u32 mfs) { unsigned int old_mfs; int rc = -EINVAL; mutex_lock(&lport->lp_mutex); old_mfs = lport->mfs; if (mfs >= FC_MIN_MAX_FRAME) { mfs &= ~3; if (mfs > FC_MAX_FRAME) mfs = FC_MAX_FRAME; mfs -= sizeof(struct fc_frame_header); lport->mfs = mfs; rc = 0; } if (!rc && mfs < old_mfs) fc_lport_enter_reset(lport); mutex_unlock(&lport->lp_mutex); return rc; } EXPORT_SYMBOL(fc_set_mfs); /** * fc_lport_disc_callback() - Callback for discovery events * @lport: The local port receiving the event * @event: The discovery event */ static void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event) { switch (event) { case DISC_EV_SUCCESS: FC_LPORT_DBG(lport, "Discovery succeeded\n"); break; case DISC_EV_FAILED: printk(KERN_ERR "host%d: libfc: " "Discovery failed for port (%6.6x)\n", lport->host->host_no, lport->port_id); mutex_lock(&lport->lp_mutex); fc_lport_enter_reset(lport); mutex_unlock(&lport->lp_mutex); break; case DISC_EV_NONE: WARN_ON(1); break; } } /** * fc_rport_enter_ready() - Enter the ready state and start discovery * @lport: The local port that is ready * * Locking Note: The lport lock is expected to be held before calling * this routine. */ static void fc_lport_enter_ready(struct fc_lport *lport) { FC_LPORT_DBG(lport, "Entered READY from state %s\n", fc_lport_state(lport)); fc_lport_state_enter(lport, LPORT_ST_READY); if (lport->vport) fc_vport_set_state(lport->vport, FC_VPORT_ACTIVE); fc_vports_linkchange(lport); if (!lport->ptp_rdata) lport->tt.disc_start(fc_lport_disc_callback, lport); } /** * fc_lport_set_port_id() - set the local port Port ID * @lport: The local port which will have its Port ID set. * @port_id: The new port ID. * @fp: The frame containing the incoming request, or NULL. * * Locking Note: The lport lock is expected to be held before calling * this function. */ static void fc_lport_set_port_id(struct fc_lport *lport, u32 port_id, struct fc_frame *fp) { if (port_id) printk(KERN_INFO "host%d: Assigned Port ID %6.6x\n", lport->host->host_no, port_id); lport->port_id = port_id; /* Update the fc_host */ fc_host_port_id(lport->host) = port_id; if (lport->tt.lport_set_port_id) lport->tt.lport_set_port_id(lport, port_id, fp); } /** * fc_lport_set_port_id() - set the local port Port ID for point-to-multipoint * @lport: The local port which will have its Port ID set. * @port_id: The new port ID. * * Called by the lower-level driver when transport sets the local port_id. * This is used in VN_port to VN_port mode for FCoE, and causes FLOGI and * discovery to be skipped. */ void fc_lport_set_local_id(struct fc_lport *lport, u32 port_id) { mutex_lock(&lport->lp_mutex); fc_lport_set_port_id(lport, port_id, NULL); switch (lport->state) { case LPORT_ST_RESET: case LPORT_ST_FLOGI: if (port_id) fc_lport_enter_ready(lport); break; default: break; } mutex_unlock(&lport->lp_mutex); } EXPORT_SYMBOL(fc_lport_set_local_id); /** * fc_lport_recv_flogi_req() - Receive a FLOGI request * @lport: The local port that received the request * @rx_fp: The FLOGI frame * * A received FLOGI request indicates a point-to-point connection. * Accept it with the common service parameters indicating our N port. * Set up to do a PLOGI if we have the higher-number WWPN. * * Locking Note: The lport lock is expected to be held before calling * this function. */ static void fc_lport_recv_flogi_req(struct fc_lport *lport, struct fc_frame *rx_fp) { struct fc_frame *fp; struct fc_frame_header *fh; struct fc_els_flogi *flp; struct fc_els_flogi *new_flp; u64 remote_wwpn; u32 remote_fid; u32 local_fid; FC_LPORT_DBG(lport, "Received FLOGI request while in state %s\n", fc_lport_state(lport)); remote_fid = fc_frame_sid(rx_fp); flp = fc_frame_payload_get(rx_fp, sizeof(*flp)); if (!flp) goto out; remote_wwpn = get_unaligned_be64(&flp->fl_wwpn); if (remote_wwpn == lport->wwpn) { printk(KERN_WARNING "host%d: libfc: Received FLOGI from port " "with same WWPN %16.16llx\n", lport->host->host_no, remote_wwpn); goto out; } FC_LPORT_DBG(lport, "FLOGI from port WWPN %16.16llx\n", remote_wwpn); /* * XXX what is the right thing to do for FIDs? * The originator might expect our S_ID to be 0xfffffe. * But if so, both of us could end up with the same FID. */ local_fid = FC_LOCAL_PTP_FID_LO; if (remote_wwpn < lport->wwpn) { local_fid = FC_LOCAL_PTP_FID_HI; if (!remote_fid || remote_fid == local_fid) remote_fid = FC_LOCAL_PTP_FID_LO; } else if (!remote_fid) { remote_fid = FC_LOCAL_PTP_FID_HI; } fc_lport_set_port_id(lport, local_fid, rx_fp); fp = fc_frame_alloc(lport, sizeof(*flp)); if (fp) { new_flp = fc_frame_payload_get(fp, sizeof(*flp)); fc_lport_flogi_fill(lport, new_flp, ELS_FLOGI); new_flp->fl_cmd = (u8) ELS_LS_ACC; /* * Send the response. If this fails, the originator should * repeat the sequence. */ fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0); fh = fc_frame_header_get(fp); hton24(fh->fh_s_id, local_fid); hton24(fh->fh_d_id, remote_fid); lport->tt.frame_send(lport, fp); } else { fc_lport_error(lport, fp); } fc_lport_ptp_setup(lport, remote_fid, remote_wwpn, get_unaligned_be64(&flp->fl_wwnn)); out: fc_frame_free(rx_fp); } /** * fc_lport_recv_els_req() - The generic lport ELS request handler * @lport: The local port that received the request * @fp: The request frame * * This function will see if the lport handles the request or * if an rport should handle the request. * * Locking Note: This function should not be called with the lport * lock held because it will grab the lock. */ static void fc_lport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp) { void (*recv)(struct fc_lport *, struct fc_frame *); mutex_lock(&lport->lp_mutex); /* * Handle special ELS cases like FLOGI, LOGO, and * RSCN here. These don't require a session. * Even if we had a session, it might not be ready. */ if (!lport->link_up) fc_frame_free(fp); else { /* * Check opcode. */ recv = lport->tt.rport_recv_req; switch (fc_frame_payload_op(fp)) { case ELS_FLOGI: if (!lport->point_to_multipoint) recv = fc_lport_recv_flogi_req; break; case ELS_LOGO: if (fc_frame_sid(fp) == FC_FID_FLOGI) recv = fc_lport_recv_logo_req; break; case ELS_RSCN: recv = lport->tt.disc_recv_req; break; case ELS_ECHO: recv = fc_lport_recv_echo_req; break; case ELS_RLIR: recv = fc_lport_recv_rlir_req; break; case ELS_RNID: recv = fc_lport_recv_rnid_req; break; } recv(lport, fp); } mutex_unlock(&lport->lp_mutex); } static int fc_lport_els_prli(struct fc_rport_priv *rdata, u32 spp_len, const struct fc_els_spp *spp_in, struct fc_els_spp *spp_out) { return FC_SPP_RESP_INVL; } struct fc4_prov fc_lport_els_prov = { .prli = fc_lport_els_prli, .recv = fc_lport_recv_els_req, }; /** * fc_lport_recv_req() - The generic lport request handler * @lport: The lport that received the request * @fp: The frame the request is in * * Locking Note: This function should not be called with the lport * lock held because it may grab the lock. */ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_frame *fp) { struct fc_frame_header *fh = fc_frame_header_get(fp); struct fc_seq *sp = fr_seq(fp); struct fc4_prov *prov; /* * Use RCU read lock and module_lock to be sure module doesn't * deregister and get unloaded while we're calling it. * try_module_get() is inlined and accepts a NULL parameter. * Only ELSes and FCP target ops should come through here. * The locking is unfortunate, and a better scheme is being sought. */ rcu_read_lock(); if (fh->fh_type >= FC_FC4_PROV_SIZE) goto drop; prov = rcu_dereference(fc_passive_prov[fh->fh_type]); if (!prov || !try_module_get(prov->module)) goto drop; rcu_read_unlock(); prov->recv(lport, fp); module_put(prov->module); return; drop: rcu_read_unlock(); FC_LPORT_DBG(lport, "dropping unexpected frame type %x\n", fh->fh_type); fc_frame_free(fp); if (sp) lport->tt.exch_done(sp); } /** * fc_lport_reset() - Reset a local port * @lport: The local port which should be reset * * Locking Note: This functions should not be called with the * lport lock held. */ int fc_lport_reset(struct fc_lport *lport) { cancel_delayed_work_sync(&lport->retry_work); mutex_lock(&lport->lp_mutex); fc_lport_enter_reset(lport); mutex_unlock(&lport->lp_mutex); return 0; } EXPORT_SYMBOL(fc_lport_reset); /** * fc_lport_reset_locked() - Reset the local port w/ the lport lock held * @lport: The local port to be reset * * Locking Note: The lport lock is expected to be held before calling * this routine. */ static void fc_lport_reset_locked(struct fc_lport *lport) { if (lport->dns_rdata) lport->tt.rport_logoff(lport->dns_rdata); if (lport->ptp_rdata) { lport->tt.rport_logoff(lport->ptp_rdata); kref_put(&lport->ptp_rdata->kref, lport->tt.rport_destroy); lport->ptp_rdata = NULL; } lport->tt.disc_stop(lport); lport->tt.exch_mgr_reset(lport, 0, 0); fc_host_fabric_name(lport->host) = 0; if (lport->port_id && (!lport->point_to_multipoint || !lport->link_up)) fc_lport_set_port_id(lport, 0, NULL); } /** * fc_lport_enter_reset() - Reset the local port * @lport: The local port to be reset * * Locking Note: The lport lock is expected to be held before calling * this routine. */ static void fc_lport_enter_reset(struct fc_lport *lport) { FC_LPORT_DBG(lport, "Entered RESET state from %s state\n", fc_lport_state(lport)); if (lport->state == LPORT_ST_DISABLED || lport->state == LPORT_ST_LOGO) return; if (lport->vport) { if (lport->link_up) fc_vport_set_state(lport->vport, FC_VPORT_INITIALIZING); else fc_vport_set_state(lport->vport, FC_VPORT_LINKDOWN); } fc_lport_state_enter(lport, LPORT_ST_RESET); fc_host_post_event(lport->host, fc_get_event_number(), FCH_EVT_LIPRESET, 0); fc_vports_linkchange(lport); fc_lport_reset_locked(lport); if (lport->link_up) fc_lport_enter_flogi(lport); } /** * fc_lport_enter_disabled() - Disable the local port * @lport: The local port to be reset * * Locking Note: The lport lock is expected to be held before calling * this routine. */ static void fc_lport_enter_disabled(struct fc_lport *lport) { FC_LPORT_DBG(lport, "Entered disabled state from %s state\n", fc_lport_state(lport)); fc_lport_state_enter(lport, LPORT_ST_DISABLED); fc_vports_linkchange(lport); fc_lport_reset_locked(lport); } /** * fc_lport_error() - Handler for any errors * @lport: The local port that the error was on * @fp: The error code encoded in a frame pointer * * If the error was caused by a resource allocation failure * then wait for half a second and retry, otherwise retry * after the e_d_tov time. */ static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp) { unsigned long delay = 0; FC_LPORT_DBG(lport, "Error %ld in state %s, retries %d\n", IS_ERR(fp) ? -PTR_ERR(fp) : 0, fc_lport_state(lport), lport->retry_count); if (PTR_ERR(fp) == -FC_EX_CLOSED) return; /* * Memory allocation failure, or the exchange timed out * or we received LS_RJT. * Retry after delay */ if (lport->retry_count < lport->max_retry_count) { lport->retry_count++; if (!fp) delay = msecs_to_jiffies(500); else delay = msecs_to_jiffies(lport->e_d_tov); schedule_delayed_work(&lport->retry_work, delay); } else fc_lport_enter_reset(lport); } /** * fc_lport_ns_resp() - Handle response to a name server * registration exchange * @sp: current sequence in exchange * @fp: response frame * @lp_arg: Fibre Channel host port instance * * Locking Note: This function will be called without the lport lock * held, but it will lock, call an _enter_* function or fc_lport_error() * and then unlock the lport. */ static void fc_lport_ns_resp(struct fc_seq *sp, struct fc_frame *fp, void *lp_arg) { struct fc_lport *lport = lp_arg; struct fc_frame_header *fh; struct fc_ct_hdr *ct; FC_LPORT_DBG(lport, "Received a ns %s\n", fc_els_resp_type(fp)); if (fp == ERR_PTR(-FC_EX_CLOSED)) return; mutex_lock(&lport->lp_mutex); if (lport->state < LPORT_ST_RNN_ID || lport->state > LPORT_ST_RFF_ID) { FC_LPORT_DBG(lport, "Received a name server response, " "but in state %s\n", fc_lport_state(lport)); if (IS_ERR(fp)) goto err; goto out; } if (IS_ERR(fp)) { fc_lport_error(lport, fp); goto err; } fh = fc_frame_header_get(fp); ct = fc_frame_payload_get(fp, sizeof(*ct)); if (fh && ct && fh->fh_type == FC_TYPE_CT && ct->ct_fs_type == FC_FST_DIR && ct->ct_fs_subtype == FC_NS_SUBTYPE && ntohs(ct->ct_cmd) == FC_FS_ACC) switch (lport->state) { case LPORT_ST_RNN_ID: fc_lport_enter_ns(lport, LPORT_ST_RSNN_NN); break; case LPORT_ST_RSNN_NN: fc_lport_enter_ns(lport, LPORT_ST_RSPN_ID); break; case LPORT_ST_RSPN_ID: fc_lport_enter_ns(lport, LPORT_ST_RFT_ID); break; case LPORT_ST_RFT_ID: fc_lport_enter_ns(lport, LPORT_ST_RFF_ID); break; case LPORT_ST_RFF_ID: if (lport->fdmi_enabled) fc_lport_enter_fdmi(lport); else fc_lport_enter_scr(lport); break; default: /* should have already been caught by state checks */ break; } else fc_lport_error(lport, fp); out: fc_frame_free(fp); err: mutex_unlock(&lport->lp_mutex); } /** * fc_lport_ms_resp() - Handle response to a management server * exchange * @sp: current sequence in exchange * @fp: response frame * @lp_arg: Fibre Channel host port instance * * Locking Note: This function will be called without the lport lock * held, but it will lock, call an _enter_* function or fc_lport_error() * and then unlock the lport. */ static void fc_lport_ms_resp(struct fc_seq *sp, struct fc_frame *fp, void *lp_arg) { struct fc_lport *lport = lp_arg; struct fc_frame_header *fh; struct fc_ct_hdr *ct; FC_LPORT_DBG(lport, "Received a ms %s\n", fc_els_resp_type(fp)); if (fp == ERR_PTR(-FC_EX_CLOSED)) return; mutex_lock(&lport->lp_mutex); if (lport->state < LPORT_ST_RHBA || lport->state > LPORT_ST_DPRT) { FC_LPORT_DBG(lport, "Received a management server response, " "but in state %s\n", fc_lport_state(lport)); if (IS_ERR(fp)) goto err; goto out; } if (IS_ERR(fp)) { fc_lport_error(lport, fp); goto err; } fh = fc_frame_header_get(fp); ct = fc_frame_payload_get(fp, sizeof(*ct)); if (fh && ct && fh->fh_type == FC_TYPE_CT && ct->ct_fs_type == FC_FST_MGMT && ct->ct_fs_subtype == FC_FDMI_SUBTYPE) { FC_LPORT_DBG(lport, "Received a management server response, " "reason=%d explain=%d\n", ct->ct_reason, ct->ct_explan); switch (lport->state) { case LPORT_ST_RHBA: if (ntohs(ct->ct_cmd) == FC_FS_ACC) fc_lport_enter_ms(lport, LPORT_ST_RPA); else /* Error Skip RPA */ fc_lport_enter_scr(lport); break; case LPORT_ST_RPA: fc_lport_enter_scr(lport); break; case LPORT_ST_DPRT: fc_lport_enter_ms(lport, LPORT_ST_RHBA); break; case LPORT_ST_DHBA: fc_lport_enter_ms(lport, LPORT_ST_DPRT); break; default: /* should have already been caught by state checks */ break; } } else { /* Invalid Frame? */ fc_lport_error(lport, fp); } out: fc_frame_free(fp); err: mutex_unlock(&lport->lp_mutex); } /** * fc_lport_scr_resp() - Handle response to State Change Register (SCR) request * @sp: current sequence in SCR exchange * @fp: response frame * @lp_arg: Fibre Channel lport port instance that sent the registration request * * Locking Note: This function will be called without the lport lock * held, but it will lock, call an _enter_* function or fc_lport_error * and then unlock the lport. */ static void fc_lport_scr_resp(struct fc_seq *sp, struct fc_frame *fp, void *lp_arg) { struct fc_lport *lport = lp_arg; u8 op; FC_LPORT_DBG(lport, "Received a SCR %s\n", fc_els_resp_type(fp)); if (fp == ERR_PTR(-FC_EX_CLOSED)) return; mutex_lock(&lport->lp_mutex); if (lport->state != LPORT_ST_SCR) { FC_LPORT_DBG(lport, "Received a SCR response, but in state " "%s\n", fc_lport_state(lport)); if (IS_ERR(fp)) goto err; goto out; } if (IS_ERR(fp)) { fc_lport_error(lport, fp); goto err; } op = fc_frame_payload_op(fp); if (op == ELS_LS_ACC) fc_lport_enter_ready(lport); else fc_lport_error(lport, fp); out: fc_frame_free(fp); err: mutex_unlock(&lport->lp_mutex); } /** * fc_lport_enter_scr() - Send a SCR (State Change Register) request * @lport: The local port to register for state changes * * Locking Note: The lport lock is expected to be held before calling * this routine. */ static void fc_lport_enter_scr(struct fc_lport *lport) { struct fc_frame *fp; FC_LPORT_DBG(lport, "Entered SCR state from %s state\n", fc_lport_state(lport)); fc_lport_state_enter(lport, LPORT_ST_SCR); fp = fc_frame_alloc(lport, sizeof(struct fc_els_scr)); if (!fp) { fc_lport_error(lport, fp); return; } if (!lport->tt.elsct_send(lport, FC_FID_FCTRL, fp, ELS_SCR, fc_lport_scr_resp, lport, 2 * lport->r_a_tov)) fc_lport_error(lport, NULL); } /** * fc_lport_enter_ns() - register some object with the name server * @lport: Fibre Channel local port to register * * Locking Note: The lport lock is expected to be held before calling * this routine. */ static void fc_lport_enter_ns(struct fc_lport *lport, enum fc_lport_state state) { struct fc_frame *fp; enum fc_ns_req cmd; int size = sizeof(struct fc_ct_hdr); size_t len; FC_LPORT_DBG(lport, "Entered %s state from %s state\n", fc_lport_state_names[state], fc_lport_state(lport)); fc_lport_state_enter(lport, state); switch (state) { case LPORT_ST_RNN_ID: cmd = FC_NS_RNN_ID; size += sizeof(struct fc_ns_rn_id); break; case LPORT_ST_RSNN_NN: len = strnlen(fc_host_symbolic_name(lport->host), 255); /* if there is no symbolic name, skip to RFT_ID */ if (!len) return fc_lport_enter_ns(lport, LPORT_ST_RFT_ID); cmd = FC_NS_RSNN_NN; size += sizeof(struct fc_ns_rsnn) + len; break; case LPORT_ST_RSPN_ID: len = strnlen(fc_host_symbolic_name(lport->host), 255); /* if there is no symbolic name, skip to RFT_ID */ if (!len) return fc_lport_enter_ns(lport, LPORT_ST_RFT_ID); cmd = FC_NS_RSPN_ID; size += sizeof(struct fc_ns_rspn) + len; break; case LPORT_ST_RFT_ID: cmd = FC_NS_RFT_ID; size += sizeof(struct fc_ns_rft); break; case LPORT_ST_RFF_ID: cmd = FC_NS_RFF_ID; size += sizeof(struct fc_ns_rff_id); break; default: fc_lport_error(lport, NULL); return; } fp = fc_frame_alloc(lport, size); if (!fp) { fc_lport_error(lport, fp); return; } if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, cmd, fc_lport_ns_resp, lport, 3 * lport->r_a_tov)) fc_lport_error(lport, fp); } static struct fc_rport_operations fc_lport_rport_ops = { .event_callback = fc_lport_rport_callback, }; /** * fc_rport_enter_dns() - Create a fc_rport for the name server * @lport: The local port requesting a remote port for the name server * * Locking Note: The lport lock is expected to be held before calling * this routine. */ static void fc_lport_enter_dns(struct fc_lport *lport) { struct fc_rport_priv *rdata; FC_LPORT_DBG(lport, "Entered DNS state from %s state\n", fc_lport_state(lport)); fc_lport_state_enter(lport, LPORT_ST_DNS); mutex_lock(&lport->disc.disc_mutex); rdata = lport->tt.rport_create(lport, FC_FID_DIR_SERV); mutex_unlock(&lport->disc.disc_mutex); if (!rdata) goto err; rdata->ops = &fc_lport_rport_ops; lport->tt.rport_login(rdata); return; err: fc_lport_error(lport, NULL); } /** * fc_lport_enter_ms() - management server commands * @lport: Fibre Channel local port to register * * Locking Note: The lport lock is expected to be held before calling * this routine. */ static void fc_lport_enter_ms(struct fc_lport *lport, enum fc_lport_state state) { struct fc_frame *fp; enum fc_fdmi_req cmd; int size = sizeof(struct fc_ct_hdr); size_t len; int numattrs; FC_LPORT_DBG(lport, "Entered %s state from %s state\n", fc_lport_state_names[state], fc_lport_state(lport)); fc_lport_state_enter(lport, state); switch (state) { case LPORT_ST_RHBA: cmd = FC_FDMI_RHBA; /* Number of HBA Attributes */ numattrs = 10; len = sizeof(struct fc_fdmi_rhba); len -= sizeof(struct fc_fdmi_attr_entry); len += (numattrs * FC_FDMI_ATTR_ENTRY_HEADER_LEN); len += FC_FDMI_HBA_ATTR_NODENAME_LEN; len += FC_FDMI_HBA_ATTR_MANUFACTURER_LEN; len += FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN; len += FC_FDMI_HBA_ATTR_MODEL_LEN; len += FC_FDMI_HBA_ATTR_MODELDESCR_LEN; len += FC_FDMI_HBA_ATTR_HARDWAREVERSION_LEN; len += FC_FDMI_HBA_ATTR_DRIVERVERSION_LEN; len += FC_FDMI_HBA_ATTR_OPTIONROMVERSION_LEN; len += FC_FDMI_HBA_ATTR_FIRMWAREVERSION_LEN; len += FC_FDMI_HBA_ATTR_OSNAMEVERSION_LEN; size += len; break; case LPORT_ST_RPA: cmd = FC_FDMI_RPA; /* Number of Port Attributes */ numattrs = 6; len = sizeof(struct fc_fdmi_rpa); len -= sizeof(struct fc_fdmi_attr_entry); len += (numattrs * FC_FDMI_ATTR_ENTRY_HEADER_LEN); len += FC_FDMI_PORT_ATTR_FC4TYPES_LEN; len += FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN; len += FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN; len += FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN; len += FC_FDMI_PORT_ATTR_OSDEVICENAME_LEN; len += FC_FDMI_PORT_ATTR_HOSTNAME_LEN; size += len; break; case LPORT_ST_DPRT: cmd = FC_FDMI_DPRT; len = sizeof(struct fc_fdmi_dprt); size += len; break; case LPORT_ST_DHBA: cmd = FC_FDMI_DHBA; len = sizeof(struct fc_fdmi_dhba); size += len; break; default: fc_lport_error(lport, NULL); return; } FC_LPORT_DBG(lport, "Cmd=0x%x Len %d size %d\n", cmd, (int)len, size); fp = fc_frame_alloc(lport, size); if (!fp) { fc_lport_error(lport, fp); return; } if (!lport->tt.elsct_send(lport, FC_FID_MGMT_SERV, fp, cmd, fc_lport_ms_resp, lport, 3 * lport->r_a_tov)) fc_lport_error(lport, fp); } /** * fc_rport_enter_fdmi() - Create a fc_rport for the management server * @lport: The local port requesting a remote port for the management server * * Locking Note: The lport lock is expected to be held before calling * this routine. */ static void fc_lport_enter_fdmi(struct fc_lport *lport) { struct fc_rport_priv *rdata; FC_LPORT_DBG(lport, "Entered FDMI state from %s state\n", fc_lport_state(lport)); fc_lport_state_enter(lport, LPORT_ST_FDMI); mutex_lock(&lport->disc.disc_mutex); rdata = lport->tt.rport_create(lport, FC_FID_MGMT_SERV); mutex_unlock(&lport->disc.disc_mutex); if (!rdata) goto err; rdata->ops = &fc_lport_rport_ops; lport->tt.rport_login(rdata); return; err: fc_lport_error(lport, NULL); } /** * fc_lport_timeout() - Handler for the retry_work timer * @work: The work struct of the local port */ static void fc_lport_timeout(struct work_struct *work) { struct fc_lport *lport = container_of(work, struct fc_lport, retry_work.work); mutex_lock(&lport->lp_mutex); switch (lport->state) { case LPORT_ST_DISABLED: break; case LPORT_ST_READY: break; case LPORT_ST_RESET: break; case LPORT_ST_FLOGI: fc_lport_enter_flogi(lport); break; case LPORT_ST_DNS: fc_lport_enter_dns(lport); break; case LPORT_ST_RNN_ID: case LPORT_ST_RSNN_NN: case LPORT_ST_RSPN_ID: case LPORT_ST_RFT_ID: case LPORT_ST_RFF_ID: fc_lport_enter_ns(lport, lport->state); break; case LPORT_ST_FDMI: fc_lport_enter_fdmi(lport); break; case LPORT_ST_RHBA: case LPORT_ST_RPA: case LPORT_ST_DHBA: case LPORT_ST_DPRT: FC_LPORT_DBG(lport, "Skipping lport state %s to SCR\n", fc_lport_state(lport)); /* fall thru */ case LPORT_ST_SCR: fc_lport_enter_scr(lport); break; case LPORT_ST_LOGO: fc_lport_enter_logo(lport); break; } mutex_unlock(&lport->lp_mutex); } /** * fc_lport_logo_resp() - Handle response to LOGO request * @sp: The sequence that the LOGO was on * @fp: The LOGO frame * @lp_arg: The lport port that received the LOGO request * * Locking Note: This function will be called without the lport lock * held, but it will lock, call an _enter_* function or fc_lport_error() * and then unlock the lport. */ void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp, void *lp_arg) { struct fc_lport *lport = lp_arg; u8 op; FC_LPORT_DBG(lport, "Received a LOGO %s\n", fc_els_resp_type(fp)); if (fp == ERR_PTR(-FC_EX_CLOSED)) return; mutex_lock(&lport->lp_mutex); if (lport->state != LPORT_ST_LOGO) { FC_LPORT_DBG(lport, "Received a LOGO response, but in state " "%s\n", fc_lport_state(lport)); if (IS_ERR(fp)) goto err; goto out; } if (IS_ERR(fp)) { fc_lport_error(lport, fp); goto err; } op = fc_frame_payload_op(fp); if (op == ELS_LS_ACC) fc_lport_enter_disabled(lport); else fc_lport_error(lport, fp); out: fc_frame_free(fp); err: mutex_unlock(&lport->lp_mutex); } EXPORT_SYMBOL(fc_lport_logo_resp); /** * fc_rport_enter_logo() - Logout of the fabric * @lport: The local port to be logged out * * Locking Note: The lport lock is expected to be held before calling * this routine. */ static void fc_lport_enter_logo(struct fc_lport *lport) { struct fc_frame *fp; struct fc_els_logo *logo; FC_LPORT_DBG(lport, "Entered LOGO state from %s state\n", fc_lport_state(lport)); fc_lport_state_enter(lport, LPORT_ST_LOGO); fc_vports_linkchange(lport); fp = fc_frame_alloc(lport, sizeof(*logo)); if (!fp) { fc_lport_error(lport, fp); return; } if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_LOGO, fc_lport_logo_resp, lport, 2 * lport->r_a_tov)) fc_lport_error(lport, NULL); } /** * fc_lport_flogi_resp() - Handle response to FLOGI request * @sp: The sequence that the FLOGI was on * @fp: The FLOGI response frame * @lp_arg: The lport port that received the FLOGI response * * Locking Note: This function will be called without the lport lock * held, but it will lock, call an _enter_* function or fc_lport_error() * and then unlock the lport. */ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, void *lp_arg) { struct fc_lport *lport = lp_arg; struct fc_frame_header *fh; struct fc_els_flogi *flp; u32 did; u16 csp_flags; unsigned int r_a_tov; unsigned int e_d_tov; u16 mfs; FC_LPORT_DBG(lport, "Received a FLOGI %s\n", fc_els_resp_type(fp)); if (fp == ERR_PTR(-FC_EX_CLOSED)) return; mutex_lock(&lport->lp_mutex); if (lport->state != LPORT_ST_FLOGI) { FC_LPORT_DBG(lport, "Received a FLOGI response, but in state " "%s\n", fc_lport_state(lport)); if (IS_ERR(fp)) goto err; goto out; } if (IS_ERR(fp)) { fc_lport_error(lport, fp); goto err; } fh = fc_frame_header_get(fp); did = fc_frame_did(fp); if (fh->fh_r_ctl != FC_RCTL_ELS_REP || did == 0 || fc_frame_payload_op(fp) != ELS_LS_ACC) { FC_LPORT_DBG(lport, "FLOGI not accepted or bad response\n"); fc_lport_error(lport, fp); goto err; } flp = fc_frame_payload_get(fp, sizeof(*flp)); if (!flp) { FC_LPORT_DBG(lport, "FLOGI bad response\n"); fc_lport_error(lport, fp); goto err; } mfs = ntohs(flp->fl_csp.sp_bb_data) & FC_SP_BB_DATA_MASK; if (mfs < FC_SP_MIN_MAX_PAYLOAD || mfs > FC_SP_MAX_MAX_PAYLOAD) { FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, " "lport->mfs:%hu\n", mfs, lport->mfs); fc_lport_error(lport, fp); goto err; } if (mfs <= lport->mfs) { lport->mfs = mfs; fc_host_maxframe_size(lport->host) = mfs; } csp_flags = ntohs(flp->fl_csp.sp_features); r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov); e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov); if (csp_flags & FC_SP_FT_EDTR) e_d_tov /= 1000000; lport->npiv_enabled = !!(csp_flags & FC_SP_FT_NPIV_ACC); if ((csp_flags & FC_SP_FT_FPORT) == 0) { if (e_d_tov > lport->e_d_tov) lport->e_d_tov = e_d_tov; lport->r_a_tov = 2 * e_d_tov; fc_lport_set_port_id(lport, did, fp); printk(KERN_INFO "host%d: libfc: " "Port (%6.6x) entered " "point-to-point mode\n", lport->host->host_no, did); fc_lport_ptp_setup(lport, fc_frame_sid(fp), get_unaligned_be64( &flp->fl_wwpn), get_unaligned_be64( &flp->fl_wwnn)); } else { lport->e_d_tov = e_d_tov; lport->r_a_tov = r_a_tov; fc_host_fabric_name(lport->host) = get_unaligned_be64(&flp->fl_wwnn); fc_lport_set_port_id(lport, did, fp); fc_lport_enter_dns(lport); } out: fc_frame_free(fp); err: mutex_unlock(&lport->lp_mutex); } EXPORT_SYMBOL(fc_lport_flogi_resp); /** * fc_rport_enter_flogi() - Send a FLOGI request to the fabric manager * @lport: Fibre Channel local port to be logged in to the fabric * * Locking Note: The lport lock is expected to be held before calling * this routine. */ static void fc_lport_enter_flogi(struct fc_lport *lport) { struct fc_frame *fp; FC_LPORT_DBG(lport, "Entered FLOGI state from %s state\n", fc_lport_state(lport)); fc_lport_state_enter(lport, LPORT_ST_FLOGI); if (lport->point_to_multipoint) { if (lport->port_id) fc_lport_enter_ready(lport); return; } fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi)); if (!fp) return fc_lport_error(lport, fp); if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, lport->vport ? ELS_FDISC : ELS_FLOGI, fc_lport_flogi_resp, lport, lport->vport ? 2 * lport->r_a_tov : lport->e_d_tov)) fc_lport_error(lport, NULL); } /** * fc_lport_config() - Configure a fc_lport * @lport: The local port to be configured */ int fc_lport_config(struct fc_lport *lport) { INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout); mutex_init(&lport->lp_mutex); fc_lport_state_enter(lport, LPORT_ST_DISABLED); fc_lport_add_fc4_type(lport, FC_TYPE_FCP); fc_lport_add_fc4_type(lport, FC_TYPE_CT); fc_fc4_conf_lport_params(lport, FC_TYPE_FCP); return 0; } EXPORT_SYMBOL(fc_lport_config); /** * fc_lport_init() - Initialize the lport layer for a local port * @lport: The local port to initialize the exchange layer for */ int fc_lport_init(struct fc_lport *lport) { if (!lport->tt.lport_recv) lport->tt.lport_recv = fc_lport_recv_req; if (!lport->tt.lport_reset) lport->tt.lport_reset = fc_lport_reset; fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT; fc_host_node_name(lport->host) = lport->wwnn; fc_host_port_name(lport->host) = lport->wwpn; fc_host_supported_classes(lport->host) = FC_COS_CLASS3; memset(fc_host_supported_fc4s(lport->host), 0, sizeof(fc_host_supported_fc4s(lport->host))); fc_host_supported_fc4s(lport->host)[2] = 1; fc_host_supported_fc4s(lport->host)[7] = 1; /* This value is also unchanging */ memset(fc_host_active_fc4s(lport->host), 0, sizeof(fc_host_active_fc4s(lport->host))); fc_host_active_fc4s(lport->host)[2] = 1; fc_host_active_fc4s(lport->host)[7] = 1; fc_host_maxframe_size(lport->host) = lport->mfs; fc_host_supported_speeds(lport->host) = 0; if (lport->link_supported_speeds & FC_PORTSPEED_1GBIT) fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_1GBIT; if (lport->link_supported_speeds & FC_PORTSPEED_10GBIT) fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_10GBIT; fc_fc4_add_lport(lport); return 0; } EXPORT_SYMBOL(fc_lport_init); /** * fc_lport_bsg_resp() - The common response handler for FC Passthrough requests * @sp: The sequence for the FC Passthrough response * @fp: The response frame * @info_arg: The BSG info that the response is for */ static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp, void *info_arg) { struct fc_bsg_info *info = info_arg; struct fc_bsg_job *job = info->job; struct fc_lport *lport = info->lport; struct fc_frame_header *fh; size_t len; void *buf; if (IS_ERR(fp)) { job->reply->result = (PTR_ERR(fp) == -FC_EX_CLOSED) ? -ECONNABORTED : -ETIMEDOUT; job->reply_len = sizeof(uint32_t); job->state_flags |= FC_RQST_STATE_DONE; job->job_done(job); kfree(info); return; } mutex_lock(&lport->lp_mutex); fh = fc_frame_header_get(fp); len = fr_len(fp) - sizeof(*fh); buf = fc_frame_payload_get(fp, 0); if (fr_sof(fp) == FC_SOF_I3 && !ntohs(fh->fh_seq_cnt)) { /* Get the response code from the first frame payload */ unsigned short cmd = (info->rsp_code == FC_FS_ACC) ? ntohs(((struct fc_ct_hdr *)buf)->ct_cmd) : (unsigned short)fc_frame_payload_op(fp); /* Save the reply status of the job */ job->reply->reply_data.ctels_reply.status = (cmd == info->rsp_code) ? FC_CTELS_STATUS_OK : FC_CTELS_STATUS_REJECT; } job->reply->reply_payload_rcv_len += fc_copy_buffer_to_sglist(buf, len, info->sg, &info->nents, &info->offset, NULL); if (fr_eof(fp) == FC_EOF_T && (ntoh24(fh->fh_f_ctl) & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) == (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) { if (job->reply->reply_payload_rcv_len > job->reply_payload.payload_len) job->reply->reply_payload_rcv_len = job->reply_payload.payload_len; job->reply->result = 0; job->state_flags |= FC_RQST_STATE_DONE; job->job_done(job); kfree(info); } fc_frame_free(fp); mutex_unlock(&lport->lp_mutex); } /** * fc_lport_els_request() - Send ELS passthrough request * @job: The BSG Passthrough job * @lport: The local port sending the request * @did: The destination port id * * Locking Note: The lport lock is expected to be held before calling * this routine. */ static int fc_lport_els_request(struct fc_bsg_job *job, struct fc_lport *lport, u32 did, u32 tov) { struct fc_bsg_info *info; struct fc_frame *fp; struct fc_frame_header *fh; char *pp; int len; fp = fc_frame_alloc(lport, job->request_payload.payload_len); if (!fp) return -ENOMEM; len = job->request_payload.payload_len; pp = fc_frame_payload_get(fp, len); sg_copy_to_buffer(job->request_payload.sg_list, job->request_payload.sg_cnt, pp, len); fh = fc_frame_header_get(fp); fh->fh_r_ctl = FC_RCTL_ELS_REQ; hton24(fh->fh_d_id, did); hton24(fh->fh_s_id, lport->port_id); fh->fh_type = FC_TYPE_ELS; hton24(fh->fh_f_ctl, FC_FCTL_REQ); fh->fh_cs_ctl = 0; fh->fh_df_ctl = 0; fh->fh_parm_offset = 0; info = kzalloc(sizeof(struct fc_bsg_info), GFP_KERNEL); if (!info) { fc_frame_free(fp); return -ENOMEM; } info->job = job; info->lport = lport; info->rsp_code = ELS_LS_ACC; info->nents = job->reply_payload.sg_cnt; info->sg = job->reply_payload.sg_list; if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp, NULL, info, tov)) { kfree(info); return -ECOMM; } return 0; } /** * fc_lport_ct_request() - Send CT Passthrough request * @job: The BSG Passthrough job * @lport: The local port sending the request * @did: The destination FC-ID * @tov: The timeout period to wait for the response * * Locking Note: The lport lock is expected to be held before calling * this routine. */ static int fc_lport_ct_request(struct fc_bsg_job *job, struct fc_lport *lport, u32 did, u32 tov) { struct fc_bsg_info *info; struct fc_frame *fp; struct fc_frame_header *fh; struct fc_ct_req *ct; size_t len; fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) + job->request_payload.payload_len); if (!fp) return -ENOMEM; len = job->request_payload.payload_len; ct = fc_frame_payload_get(fp, len); sg_copy_to_buffer(job->request_payload.sg_list, job->request_payload.sg_cnt, ct, len); fh = fc_frame_header_get(fp); fh->fh_r_ctl = FC_RCTL_DD_UNSOL_CTL; hton24(fh->fh_d_id, did); hton24(fh->fh_s_id, lport->port_id); fh->fh_type = FC_TYPE_CT; hton24(fh->fh_f_ctl, FC_FCTL_REQ); fh->fh_cs_ctl = 0; fh->fh_df_ctl = 0; fh->fh_parm_offset = 0; info = kzalloc(sizeof(struct fc_bsg_info), GFP_KERNEL); if (!info) { fc_frame_free(fp); return -ENOMEM; } info->job = job; info->lport = lport; info->rsp_code = FC_FS_ACC; info->nents = job->reply_payload.sg_cnt; info->sg = job->reply_payload.sg_list; if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp, NULL, info, tov)) { kfree(info); return -ECOMM; } return 0; } /** * fc_lport_bsg_request() - The common entry point for sending * FC Passthrough requests * @job: The BSG passthrough job */ int fc_lport_bsg_request(struct fc_bsg_job *job) { struct request *rsp = job->req->next_rq; struct Scsi_Host *shost = job->shost; struct fc_lport *lport = shost_priv(shost); struct fc_rport *rport; struct fc_rport_priv *rdata; int rc = -EINVAL; u32 did; job->reply->reply_payload_rcv_len = 0; if (rsp) rsp->resid_len = job->reply_payload.payload_len; mutex_lock(&lport->lp_mutex); switch (job->request->msgcode) { case FC_BSG_RPT_ELS: rport = job->rport; if (!rport) break; rdata = rport->dd_data; rc = fc_lport_els_request(job, lport, rport->port_id, rdata->e_d_tov); break; case FC_BSG_RPT_CT: rport = job->rport; if (!rport) break; rdata = rport->dd_data; rc = fc_lport_ct_request(job, lport, rport->port_id, rdata->e_d_tov); break; case FC_BSG_HST_CT: did = ntoh24(job->request->rqst_data.h_ct.port_id); if (did == FC_FID_DIR_SERV) rdata = lport->dns_rdata; else rdata = lport->tt.rport_lookup(lport, did); if (!rdata) break; rc = fc_lport_ct_request(job, lport, did, rdata->e_d_tov); break; case FC_BSG_HST_ELS_NOLOGIN: did = ntoh24(job->request->rqst_data.h_els.port_id); rc = fc_lport_els_request(job, lport, did, lport->e_d_tov); break; } mutex_unlock(&lport->lp_mutex); return rc; } EXPORT_SYMBOL(fc_lport_bsg_request);
gpl-2.0
civato/KK_Xplorer-9005
drivers/video/msm/hdmi_msm.c
2193
145125
/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ /* #define DEBUG */ #define DEV_DBG_PREFIX "HDMI: " /* #define REG_DUMP */ #define CEC_MSG_PRINT #define TOGGLE_CEC_HARDWARE_FSM #include <linux/types.h> #include <linux/bitops.h> #include <linux/clk.h> #include <linux/mutex.h> #include <mach/msm_hdmi_audio.h> #include <mach/clk.h> #include <mach/msm_iomap.h> #include <mach/socinfo.h> #include "msm_fb.h" #include "hdmi_msm.h" /* Supported HDMI Audio channels */ #define MSM_HDMI_AUDIO_CHANNEL_2 0 #define MSM_HDMI_AUDIO_CHANNEL_4 1 #define MSM_HDMI_AUDIO_CHANNEL_6 2 #define MSM_HDMI_AUDIO_CHANNEL_8 3 #define MSM_HDMI_AUDIO_CHANNEL_MAX 4 #define MSM_HDMI_AUDIO_CHANNEL_FORCE_32BIT 0x7FFFFFFF /* Supported HDMI Audio sample rates */ #define MSM_HDMI_SAMPLE_RATE_32KHZ 0 #define MSM_HDMI_SAMPLE_RATE_44_1KHZ 1 #define MSM_HDMI_SAMPLE_RATE_48KHZ 2 #define MSM_HDMI_SAMPLE_RATE_88_2KHZ 3 #define MSM_HDMI_SAMPLE_RATE_96KHZ 4 #define MSM_HDMI_SAMPLE_RATE_176_4KHZ 5 #define MSM_HDMI_SAMPLE_RATE_192KHZ 6 #define MSM_HDMI_SAMPLE_RATE_MAX 7 #define MSM_HDMI_SAMPLE_RATE_FORCE_32BIT 0x7FFFFFFF static int msm_hdmi_sample_rate = MSM_HDMI_SAMPLE_RATE_48KHZ; /* HDMI/HDCP Registers */ #define HDCP_DDC_STATUS 0x0128 #define HDCP_DDC_CTRL_0 0x0120 #define HDCP_DDC_CTRL_1 0x0124 #define HDMI_DDC_CTRL 0x020C #define HPD_EVENT_OFFLINE 0 #define HPD_EVENT_ONLINE 1 #define SWITCH_SET_HDMI_AUDIO(d, force) \ do {\ if (!hdmi_msm_is_dvi_mode() &&\ ((force) ||\ (external_common_state->audio_sdev.state != (d)))) {\ switch_set_state(&external_common_state->audio_sdev,\ (d));\ DEV_INFO("%s: hdmi_audio state switched to %d\n",\ __func__,\ external_common_state->audio_sdev.state);\ } \ } while (0) struct workqueue_struct *hdmi_work_queue; struct hdmi_msm_state_type *hdmi_msm_state; /* Enable HDCP by default */ static bool hdcp_feature_on = true; DEFINE_MUTEX(hdmi_msm_state_mutex); EXPORT_SYMBOL(hdmi_msm_state_mutex); static DEFINE_MUTEX(hdcp_auth_state_mutex); static void hdmi_msm_dump_regs(const char *prefix); static void hdmi_msm_hdcp_enable(void); static void hdmi_msm_turn_on(void); static int hdmi_msm_audio_off(void); static int hdmi_msm_read_edid(void); static void hdmi_msm_hpd_off(void); static boolean hdmi_msm_is_dvi_mode(void); #ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL_CEC_SUPPORT static void hdmi_msm_cec_line_latch_detect(void); #ifdef TOGGLE_CEC_HARDWARE_FSM static boolean msg_send_complete = TRUE; static boolean msg_recv_complete = TRUE; #endif #define HDMI_MSM_CEC_REFTIMER_REFTIMER_ENABLE BIT(16) #define HDMI_MSM_CEC_REFTIMER_REFTIMER(___t) (((___t)&0xFFFF) << 0) #define HDMI_MSM_CEC_TIME_SIGNAL_FREE_TIME(___t) (((___t)&0x1FF) << 7) #define HDMI_MSM_CEC_TIME_ENABLE BIT(0) #define HDMI_MSM_CEC_ADDR_LOGICAL_ADDR(___la) (((___la)&0xFF) << 0) #define HDMI_MSM_CEC_CTRL_LINE_OE BIT(9) #define HDMI_MSM_CEC_CTRL_FRAME_SIZE(___sz) (((___sz)&0x1F) << 4) #define HDMI_MSM_CEC_CTRL_SOFT_RESET BIT(2) #define HDMI_MSM_CEC_CTRL_SEND_TRIG BIT(1) #define HDMI_MSM_CEC_CTRL_ENABLE BIT(0) #define HDMI_MSM_CEC_INT_FRAME_RD_DONE_MASK BIT(7) #define HDMI_MSM_CEC_INT_FRAME_RD_DONE_ACK BIT(6) #define HDMI_MSM_CEC_INT_FRAME_RD_DONE_INT BIT(6) #define HDMI_MSM_CEC_INT_MONITOR_MASK BIT(5) #define HDMI_MSM_CEC_INT_MONITOR_ACK BIT(4) #define HDMI_MSM_CEC_INT_MONITOR_INT BIT(4) #define HDMI_MSM_CEC_INT_FRAME_ERROR_MASK BIT(3) #define HDMI_MSM_CEC_INT_FRAME_ERROR_ACK BIT(2) #define HDMI_MSM_CEC_INT_FRAME_ERROR_INT BIT(2) #define HDMI_MSM_CEC_INT_FRAME_WR_DONE_MASK BIT(1) #define HDMI_MSM_CEC_INT_FRAME_WR_DONE_ACK BIT(0) #define HDMI_MSM_CEC_INT_FRAME_WR_DONE_INT BIT(0) #define HDMI_MSM_CEC_FRAME_WR_SUCCESS(___st) (((___st)&0xB) ==\ (HDMI_MSM_CEC_INT_FRAME_WR_DONE_INT |\ HDMI_MSM_CEC_INT_FRAME_WR_DONE_MASK |\ HDMI_MSM_CEC_INT_FRAME_ERROR_MASK)) #define HDMI_MSM_CEC_RETRANSMIT_NUM(___num) (((___num)&0xF) << 4) #define HDMI_MSM_CEC_RETRANSMIT_ENABLE BIT(0) #define HDMI_MSM_CEC_WR_DATA_DATA(___d) (((___d)&0xFF) << 8) void hdmi_msm_cec_init(void) { /* 0x02A8 CEC_REFTIMER */ HDMI_OUTP(0x02A8, HDMI_MSM_CEC_REFTIMER_REFTIMER_ENABLE | HDMI_MSM_CEC_REFTIMER_REFTIMER(27 * 50) ); /* * 0x02A0 CEC_ADDR * Starting with a default address of 4 */ HDMI_OUTP(0x02A0, HDMI_MSM_CEC_ADDR_LOGICAL_ADDR(4)); hdmi_msm_state->first_monitor = 0; hdmi_msm_state->fsm_reset_done = false; /* 0x029C CEC_INT */ /* Enable CEC interrupts */ HDMI_OUTP(0x029C, \ HDMI_MSM_CEC_INT_FRAME_WR_DONE_MASK \ | HDMI_MSM_CEC_INT_FRAME_ERROR_MASK \ | HDMI_MSM_CEC_INT_MONITOR_MASK \ | HDMI_MSM_CEC_INT_FRAME_RD_DONE_MASK); HDMI_OUTP(0x02B0, 0x7FF << 4 | 1); /* * Slight adjustment to logic 1 low periods on read, * CEC Test 8.2-3 was failing, 8 for the * BIT_1_ERR_RANGE_HI = 8 => 750us, the test used 775us, * so increased this to 9 which => 800us. */ /* * CEC latch up issue - To fire monitor interrupt * for every start of message */ HDMI_OUTP(0x02E0, 0x880000); /* * Slight adjustment to logic 0 low period on write */ HDMI_OUTP(0x02DC, 0x8888A888); /* * Enable Signal Free Time counter and set to 7 bit periods */ HDMI_OUTP(0x02A4, 0x1 | (7 * 0x30) << 7); /* 0x028C CEC_CTRL */ HDMI_OUTP(0x028C, HDMI_MSM_CEC_CTRL_ENABLE); } void hdmi_msm_cec_write_logical_addr(int addr) { /* 0x02A0 CEC_ADDR * LOGICAL_ADDR 7:0 NUM */ HDMI_OUTP(0x02A0, addr & 0xFF); } void hdmi_msm_dump_cec_msg(struct hdmi_msm_cec_msg *msg) { #ifdef CEC_MSG_PRINT int i; DEV_DBG("sender_id : %d", msg->sender_id); DEV_DBG("recvr_id : %d", msg->recvr_id); if (msg->frame_size < 2) { DEV_DBG("polling message"); return; } DEV_DBG("opcode : %02x", msg->opcode); for (i = 0; i < msg->frame_size - 2; i++) DEV_DBG("operand(%2d) : %02x", i + 1, msg->operand[i]); #endif /* CEC_MSG_PRINT */ } void hdmi_msm_cec_msg_send(struct hdmi_msm_cec_msg *msg) { int i; uint32 timeout_count = 1; int retry = 10; boolean frameType = (msg->recvr_id == 15 ? BIT(0) : 0); mutex_lock(&hdmi_msm_state_mutex); hdmi_msm_state->fsm_reset_done = false; mutex_unlock(&hdmi_msm_state_mutex); #ifdef TOGGLE_CEC_HARDWARE_FSM msg_send_complete = FALSE; #endif INIT_COMPLETION(hdmi_msm_state->cec_frame_wr_done); hdmi_msm_state->cec_frame_wr_status = 0; /* 0x0294 HDMI_MSM_CEC_RETRANSMIT */ HDMI_OUTP(0x0294, #ifdef DRVR_ONLY_CECT_NO_DAEMON HDMI_MSM_CEC_RETRANSMIT_NUM(msg->retransmit) | (msg->retransmit > 0) ? HDMI_MSM_CEC_RETRANSMIT_ENABLE : 0); #else HDMI_MSM_CEC_RETRANSMIT_NUM(0) | HDMI_MSM_CEC_RETRANSMIT_ENABLE); #endif /* 0x028C CEC_CTRL */ HDMI_OUTP(0x028C, 0x1 | msg->frame_size << 4); /* 0x0290 CEC_WR_DATA */ /* header block */ HDMI_OUTP(0x0290, HDMI_MSM_CEC_WR_DATA_DATA(msg->sender_id << 4 | msg->recvr_id) | frameType); /* data block 0 : opcode */ HDMI_OUTP(0x0290, HDMI_MSM_CEC_WR_DATA_DATA(msg->frame_size < 2 ? 0 : msg->opcode) | frameType); /* data block 1-14 : operand 0-13 */ for (i = 0; i < msg->frame_size - 1; i++) HDMI_OUTP(0x0290, HDMI_MSM_CEC_WR_DATA_DATA(msg->operand[i]) | (msg->recvr_id == 15 ? BIT(0) : 0)); for (; i < 14; i++) HDMI_OUTP(0x0290, HDMI_MSM_CEC_WR_DATA_DATA(0) | (msg->recvr_id == 15 ? BIT(0) : 0)); while ((HDMI_INP(0x0298) & 1) && retry--) { DEV_DBG("CEC line is busy(%d)\n", retry); schedule(); } /* 0x028C CEC_CTRL */ HDMI_OUTP(0x028C, HDMI_MSM_CEC_CTRL_LINE_OE | HDMI_MSM_CEC_CTRL_FRAME_SIZE(msg->frame_size) | HDMI_MSM_CEC_CTRL_SEND_TRIG | HDMI_MSM_CEC_CTRL_ENABLE); timeout_count = wait_for_completion_interruptible_timeout( &hdmi_msm_state->cec_frame_wr_done, HZ); if (!timeout_count) { hdmi_msm_state->cec_frame_wr_status |= CEC_STATUS_WR_TMOUT; DEV_ERR("%s: timedout", __func__); hdmi_msm_dump_cec_msg(msg); } else { DEV_DBG("CEC write frame done (frame len=%d)", msg->frame_size); hdmi_msm_dump_cec_msg(msg); } #ifdef TOGGLE_CEC_HARDWARE_FSM if (!msg_recv_complete) { /* Toggle CEC hardware FSM */ HDMI_OUTP(0x028C, 0x0); HDMI_OUTP(0x028C, HDMI_MSM_CEC_CTRL_ENABLE); msg_recv_complete = TRUE; } msg_send_complete = TRUE; #else HDMI_OUTP(0x028C, 0x0); HDMI_OUTP(0x028C, HDMI_MSM_CEC_CTRL_ENABLE); #endif } void hdmi_msm_cec_line_latch_detect(void) { /* * CECT 9-5-1 * The timer period needs to be changed to appropriate value */ /* * Timedout without RD_DONE, WR_DONE or ERR_INT * Toggle CEC hardware FSM */ mutex_lock(&hdmi_msm_state_mutex); if (hdmi_msm_state->first_monitor == 1) { DEV_WARN("CEC line is probably latched up - CECT 9-5-1"); if (!msg_recv_complete) hdmi_msm_state->fsm_reset_done = true; HDMI_OUTP(0x028C, 0x0); HDMI_OUTP(0x028C, HDMI_MSM_CEC_CTRL_ENABLE); hdmi_msm_state->first_monitor = 0; } mutex_unlock(&hdmi_msm_state_mutex); } void hdmi_msm_cec_msg_recv(void) { uint32 data; int i; #ifdef DRVR_ONLY_CECT_NO_DAEMON struct hdmi_msm_cec_msg temp_msg; #endif mutex_lock(&hdmi_msm_state_mutex); if (hdmi_msm_state->cec_queue_wr == hdmi_msm_state->cec_queue_rd && hdmi_msm_state->cec_queue_full) { mutex_unlock(&hdmi_msm_state_mutex); DEV_ERR("CEC message queue is overflowing\n"); #ifdef DRVR_ONLY_CECT_NO_DAEMON /* * Without CEC daemon: * Compliance tests fail once the queue gets filled up. * so reset the pointers to the start of the queue. */ hdmi_msm_state->cec_queue_wr = hdmi_msm_state->cec_queue_start; hdmi_msm_state->cec_queue_rd = hdmi_msm_state->cec_queue_start; hdmi_msm_state->cec_queue_full = false; #else return; #endif } if (hdmi_msm_state->cec_queue_wr == NULL) { DEV_ERR("%s: wp is NULL\n", __func__); return; } mutex_unlock(&hdmi_msm_state_mutex); /* 0x02AC CEC_RD_DATA */ data = HDMI_INP(0x02AC); hdmi_msm_state->cec_queue_wr->sender_id = (data & 0xF0) >> 4; hdmi_msm_state->cec_queue_wr->recvr_id = (data & 0x0F); hdmi_msm_state->cec_queue_wr->frame_size = (data & 0x1F00) >> 8; DEV_DBG("Recvd init=[%u] dest=[%u] size=[%u]\n", hdmi_msm_state->cec_queue_wr->sender_id, hdmi_msm_state->cec_queue_wr->recvr_id, hdmi_msm_state->cec_queue_wr->frame_size); if (hdmi_msm_state->cec_queue_wr->frame_size < 1) { DEV_ERR("%s: invalid message (frame length = %d)", __func__, hdmi_msm_state->cec_queue_wr->frame_size); return; } else if (hdmi_msm_state->cec_queue_wr->frame_size == 1) { DEV_DBG("%s: polling message (dest[%x] <- init[%x])", __func__, hdmi_msm_state->cec_queue_wr->recvr_id, hdmi_msm_state->cec_queue_wr->sender_id); return; } /* data block 0 : opcode */ data = HDMI_INP(0x02AC); hdmi_msm_state->cec_queue_wr->opcode = data & 0xFF; /* data block 1-14 : operand 0-13 */ for (i = 0; i < hdmi_msm_state->cec_queue_wr->frame_size - 2; i++) { data = HDMI_INP(0x02AC); hdmi_msm_state->cec_queue_wr->operand[i] = data & 0xFF; } for (; i < 14; i++) hdmi_msm_state->cec_queue_wr->operand[i] = 0; DEV_DBG("CEC read frame done\n"); DEV_DBG("=======================================\n"); hdmi_msm_dump_cec_msg(hdmi_msm_state->cec_queue_wr); DEV_DBG("=======================================\n"); #ifdef DRVR_ONLY_CECT_NO_DAEMON switch (hdmi_msm_state->cec_queue_wr->opcode) { case 0x64: /* Set OSD String */ DEV_INFO("Recvd OSD Str=[%x]\n",\ hdmi_msm_state->cec_queue_wr->operand[3]); break; case 0x83: /* Give Phy Addr */ DEV_INFO("Recvd a Give Phy Addr cmd\n"); memset(&temp_msg, 0x00, sizeof(struct hdmi_msm_cec_msg)); /* Setup a frame for sending out phy addr */ temp_msg.sender_id = 0x4; /* Broadcast */ temp_msg.recvr_id = 0xf; temp_msg.opcode = 0x84; i = 0; temp_msg.operand[i++] = 0x10; temp_msg.operand[i++] = 0x00; temp_msg.operand[i++] = 0x04; temp_msg.frame_size = i + 2; hdmi_msm_cec_msg_send(&temp_msg); break; case 0xFF: /* Abort */ DEV_INFO("Recvd an abort cmd 0xFF\n"); memset(&temp_msg, 0x00, sizeof(struct hdmi_msm_cec_msg)); temp_msg.sender_id = 0x4; temp_msg.recvr_id = hdmi_msm_state->cec_queue_wr->sender_id; i = 0; /*feature abort */ temp_msg.opcode = 0x00; temp_msg.operand[i++] = hdmi_msm_state->cec_queue_wr->opcode; /*reason for abort = "Refused" */ temp_msg.operand[i++] = 0x04; temp_msg.frame_size = i + 2; hdmi_msm_dump_cec_msg(&temp_msg); hdmi_msm_cec_msg_send(&temp_msg); break; case 0x046: /* Give OSD name */ DEV_INFO("Recvd cmd 0x046\n"); memset(&temp_msg, 0x00, sizeof(struct hdmi_msm_cec_msg)); temp_msg.sender_id = 0x4; temp_msg.recvr_id = hdmi_msm_state->cec_queue_wr->sender_id; i = 0; /* OSD Name */ temp_msg.opcode = 0x47; /* Display control byte */ temp_msg.operand[i++] = 0x00; temp_msg.operand[i++] = 'H'; temp_msg.operand[i++] = 'e'; temp_msg.operand[i++] = 'l'; temp_msg.operand[i++] = 'l'; temp_msg.operand[i++] = 'o'; temp_msg.operand[i++] = ' '; temp_msg.operand[i++] = 'W'; temp_msg.operand[i++] = 'o'; temp_msg.operand[i++] = 'r'; temp_msg.operand[i++] = 'l'; temp_msg.operand[i++] = 'd'; temp_msg.frame_size = i + 2; hdmi_msm_cec_msg_send(&temp_msg); break; case 0x08F: /* Give Device Power status */ DEV_INFO("Recvd a Power status message\n"); memset(&temp_msg, 0x00, sizeof(struct hdmi_msm_cec_msg)); temp_msg.sender_id = 0x4; temp_msg.recvr_id = hdmi_msm_state->cec_queue_wr->sender_id; i = 0; /* OSD String */ temp_msg.opcode = 0x90; temp_msg.operand[i++] = 'H'; temp_msg.operand[i++] = 'e'; temp_msg.operand[i++] = 'l'; temp_msg.operand[i++] = 'l'; temp_msg.operand[i++] = 'o'; temp_msg.operand[i++] = ' '; temp_msg.operand[i++] = 'W'; temp_msg.operand[i++] = 'o'; temp_msg.operand[i++] = 'r'; temp_msg.operand[i++] = 'l'; temp_msg.operand[i++] = 'd'; temp_msg.frame_size = i + 2; hdmi_msm_cec_msg_send(&temp_msg); break; case 0x080: /* Routing Change cmd */ case 0x086: /* Set Stream Path */ DEV_INFO("Recvd Set Stream\n"); memset(&temp_msg, 0x00, sizeof(struct hdmi_msm_cec_msg)); temp_msg.sender_id = 0x4; /*Broadcast this message*/ temp_msg.recvr_id = 0xf; i = 0; temp_msg.opcode = 0x82; /* Active Source */ temp_msg.operand[i++] = 0x10; temp_msg.operand[i++] = 0x00; temp_msg.frame_size = i + 2; hdmi_msm_cec_msg_send(&temp_msg); /* * sending <Image View On> message */ memset(&temp_msg, 0x00, sizeof(struct hdmi_msm_cec_msg)); temp_msg.sender_id = 0x4; temp_msg.recvr_id = hdmi_msm_state->cec_queue_wr->sender_id; i = 0; /* opcode for Image View On */ temp_msg.opcode = 0x04; temp_msg.frame_size = i + 2; hdmi_msm_cec_msg_send(&temp_msg); break; case 0x44: /* User Control Pressed */ DEV_INFO("User Control Pressed\n"); break; case 0x45: /* User Control Released */ DEV_INFO("User Control Released\n"); break; default: DEV_INFO("Recvd an unknown cmd = [%u]\n", hdmi_msm_state->cec_queue_wr->opcode); #ifdef __SEND_ABORT__ memset(&temp_msg, 0x00, sizeof(struct hdmi_msm_cec_msg)); temp_msg.sender_id = 0x4; temp_msg.recvr_id = hdmi_msm_state->cec_queue_wr->sender_id; i = 0; /* opcode for feature abort */ temp_msg.opcode = 0x00; temp_msg.operand[i++] = hdmi_msm_state->cec_queue_wr->opcode; /*reason for abort = "Unrecognized opcode" */ temp_msg.operand[i++] = 0x00; temp_msg.frame_size = i + 2; hdmi_msm_cec_msg_send(&temp_msg); break; #else memset(&temp_msg, 0x00, sizeof(struct hdmi_msm_cec_msg)); temp_msg.sender_id = 0x4; temp_msg.recvr_id = hdmi_msm_state->cec_queue_wr->sender_id; i = 0; /* OSD String */ temp_msg.opcode = 0x64; temp_msg.operand[i++] = 0x0; temp_msg.operand[i++] = 'H'; temp_msg.operand[i++] = 'e'; temp_msg.operand[i++] = 'l'; temp_msg.operand[i++] = 'l'; temp_msg.operand[i++] = 'o'; temp_msg.operand[i++] = ' '; temp_msg.operand[i++] = 'W'; temp_msg.operand[i++] = 'o'; temp_msg.operand[i++] = 'r'; temp_msg.operand[i++] = 'l'; temp_msg.operand[i++] = 'd'; temp_msg.frame_size = i + 2; hdmi_msm_cec_msg_send(&temp_msg); break; #endif /* __SEND_ABORT__ */ } #endif /* DRVR_ONLY_CECT_NO_DAEMON */ mutex_lock(&hdmi_msm_state_mutex); hdmi_msm_state->cec_queue_wr++; if (hdmi_msm_state->cec_queue_wr == CEC_QUEUE_END) hdmi_msm_state->cec_queue_wr = hdmi_msm_state->cec_queue_start; if (hdmi_msm_state->cec_queue_wr == hdmi_msm_state->cec_queue_rd) hdmi_msm_state->cec_queue_full = true; mutex_unlock(&hdmi_msm_state_mutex); DEV_DBG("Exiting %s()\n", __func__); } void hdmi_msm_cec_one_touch_play(void) { struct hdmi_msm_cec_msg temp_msg; uint32 i = 0; memset(&temp_msg, 0x00, sizeof(struct hdmi_msm_cec_msg)); temp_msg.sender_id = 0x4; /* * Broadcast this message */ temp_msg.recvr_id = 0xf; i = 0; /* Active Source */ temp_msg.opcode = 0x82; temp_msg.operand[i++] = 0x10; temp_msg.operand[i++] = 0x00; /*temp_msg.operand[i++] = 0x04;*/ temp_msg.frame_size = i + 2; hdmi_msm_cec_msg_send(&temp_msg); /* * sending <Image View On> message */ memset(&temp_msg, 0x00, sizeof(struct hdmi_msm_cec_msg)); temp_msg.sender_id = 0x4; temp_msg.recvr_id = hdmi_msm_state->cec_queue_wr->sender_id; i = 0; /* Image View On */ temp_msg.opcode = 0x04; temp_msg.frame_size = i + 2; hdmi_msm_cec_msg_send(&temp_msg); } #endif /* CONFIG_FB_MSM_HDMI_MSM_PANEL_CEC_SUPPORT */ uint32 hdmi_msm_get_io_base(void) { return (uint32)MSM_HDMI_BASE; } EXPORT_SYMBOL(hdmi_msm_get_io_base); /* Table indicating the video format supported by the HDMI TX Core v1.0 */ /* Valid Pixel-Clock rates: 25.2MHz, 27MHz, 27.03MHz, 74.25MHz, 148.5MHz */ static void hdmi_msm_setup_video_mode_lut(void) { HDMI_SETUP_LUT(640x480p60_4_3); HDMI_SETUP_LUT(720x480p60_4_3); HDMI_SETUP_LUT(720x480p60_16_9); HDMI_SETUP_LUT(1280x720p60_16_9); HDMI_SETUP_LUT(1920x1080i60_16_9); HDMI_SETUP_LUT(1440x480i60_4_3); HDMI_SETUP_LUT(1440x480i60_16_9); HDMI_SETUP_LUT(1920x1080p60_16_9); HDMI_SETUP_LUT(720x576p50_4_3); HDMI_SETUP_LUT(720x576p50_16_9); HDMI_SETUP_LUT(1280x720p50_16_9); HDMI_SETUP_LUT(1440x576i50_4_3); HDMI_SETUP_LUT(1440x576i50_16_9); HDMI_SETUP_LUT(1920x1080p50_16_9); HDMI_SETUP_LUT(1920x1080p24_16_9); HDMI_SETUP_LUT(1920x1080p25_16_9); HDMI_SETUP_LUT(1920x1080p30_16_9); HDMI_SETUP_LUT(1280x1024p60_5_4); } #ifdef PORT_DEBUG const char *hdmi_msm_name(uint32 offset) { switch (offset) { case 0x0000: return "CTRL"; case 0x0020: return "AUDIO_PKT_CTRL1"; case 0x0024: return "ACR_PKT_CTRL"; case 0x0028: return "VBI_PKT_CTRL"; case 0x002C: return "INFOFRAME_CTRL0"; #ifdef CONFIG_FB_MSM_HDMI_3D case 0x0034: return "GEN_PKT_CTRL"; #endif case 0x003C: return "ACP"; case 0x0040: return "GC"; case 0x0044: return "AUDIO_PKT_CTRL2"; case 0x0048: return "ISRC1_0"; case 0x004C: return "ISRC1_1"; case 0x0050: return "ISRC1_2"; case 0x0054: return "ISRC1_3"; case 0x0058: return "ISRC1_4"; case 0x005C: return "ISRC2_0"; case 0x0060: return "ISRC2_1"; case 0x0064: return "ISRC2_2"; case 0x0068: return "ISRC2_3"; case 0x006C: return "AVI_INFO0"; case 0x0070: return "AVI_INFO1"; case 0x0074: return "AVI_INFO2"; case 0x0078: return "AVI_INFO3"; #ifdef CONFIG_FB_MSM_HDMI_3D case 0x0084: return "GENERIC0_HDR"; case 0x0088: return "GENERIC0_0"; case 0x008C: return "GENERIC0_1"; #endif case 0x00C4: return "ACR_32_0"; case 0x00C8: return "ACR_32_1"; case 0x00CC: return "ACR_44_0"; case 0x00D0: return "ACR_44_1"; case 0x00D4: return "ACR_48_0"; case 0x00D8: return "ACR_48_1"; case 0x00E4: return "AUDIO_INFO0"; case 0x00E8: return "AUDIO_INFO1"; case 0x0110: return "HDCP_CTRL"; case 0x0114: return "HDCP_DEBUG_CTRL"; case 0x0118: return "HDCP_INT_CTRL"; case 0x011C: return "HDCP_LINK0_STATUS"; case 0x012C: return "HDCP_ENTROPY_CTRL0"; case 0x0130: return "HDCP_RESET"; case 0x0134: return "HDCP_RCVPORT_DATA0"; case 0x0138: return "HDCP_RCVPORT_DATA1"; case 0x013C: return "HDCP_RCVPORT_DATA2"; case 0x0144: return "HDCP_RCVPORT_DATA3"; case 0x0148: return "HDCP_RCVPORT_DATA4"; case 0x014C: return "HDCP_RCVPORT_DATA5"; case 0x0150: return "HDCP_RCVPORT_DATA6"; case 0x0168: return "HDCP_RCVPORT_DATA12"; case 0x01D0: return "AUDIO_CFG"; case 0x0208: return "USEC_REFTIMER"; case 0x020C: return "DDC_CTRL"; case 0x0214: return "DDC_INT_CTRL"; case 0x0218: return "DDC_SW_STATUS"; case 0x021C: return "DDC_HW_STATUS"; case 0x0220: return "DDC_SPEED"; case 0x0224: return "DDC_SETUP"; case 0x0228: return "DDC_TRANS0"; case 0x022C: return "DDC_TRANS1"; case 0x0238: return "DDC_DATA"; case 0x0250: return "HPD_INT_STATUS"; case 0x0254: return "HPD_INT_CTRL"; case 0x0258: return "HPD_CTRL"; case 0x025C: return "HDCP_ENTROPY_CTRL1"; case 0x027C: return "DDC_REF"; case 0x0284: return "HDCP_SW_UPPER_AKSV"; case 0x0288: return "HDCP_SW_LOWER_AKSV"; case 0x02B4: return "ACTIVE_H"; case 0x02B8: return "ACTIVE_V"; case 0x02BC: return "ACTIVE_V_F2"; case 0x02C0: return "TOTAL"; case 0x02C4: return "V_TOTAL_F2"; case 0x02C8: return "FRAME_CTRL"; case 0x02CC: return "AUD_INT"; case 0x0300: return "PHY_REG0"; case 0x0304: return "PHY_REG1"; case 0x0308: return "PHY_REG2"; case 0x030C: return "PHY_REG3"; case 0x0310: return "PHY_REG4"; case 0x0314: return "PHY_REG5"; case 0x0318: return "PHY_REG6"; case 0x031C: return "PHY_REG7"; case 0x0320: return "PHY_REG8"; case 0x0324: return "PHY_REG9"; case 0x0328: return "PHY_REG10"; case 0x032C: return "PHY_REG11"; case 0x0330: return "PHY_REG12"; default: return "???"; } } void hdmi_outp(uint32 offset, uint32 value) { uint32 in_val; outpdw(MSM_HDMI_BASE+offset, value); in_val = inpdw(MSM_HDMI_BASE+offset); DEV_DBG("HDMI[%04x] => %08x [%08x] %s\n", offset, value, in_val, hdmi_msm_name(offset)); } uint32 hdmi_inp(uint32 offset) { uint32 value = inpdw(MSM_HDMI_BASE+offset); DEV_DBG("HDMI[%04x] <= %08x %s\n", offset, value, hdmi_msm_name(offset)); return value; } #endif /* DEBUG */ static void hdmi_msm_turn_on(void); static int hdmi_msm_audio_off(void); static int hdmi_msm_read_edid(void); static void hdmi_msm_hpd_off(void); static bool hdmi_ready(void) { return MSM_HDMI_BASE && hdmi_msm_state && hdmi_msm_state->hdmi_app_clk && hdmi_msm_state->hpd_initialized; } static void hdmi_msm_send_event(boolean on) { char *envp[2]; /* QDSP OFF preceding the HPD event notification */ envp[0] = "HDCP_STATE=FAIL"; envp[1] = NULL; DEV_ERR("hdmi: HDMI HPD: QDSP OFF\n"); kobject_uevent_env(external_common_state->uevent_kobj, KOBJ_CHANGE, envp); if (on) { /* Build EDID table */ hdmi_msm_read_edid(); switch_set_state(&external_common_state->sdev, 1); DEV_INFO("%s: hdmi state switched to %d\n", __func__, external_common_state->sdev.state); DEV_INFO("HDMI HPD: CONNECTED: send ONLINE\n"); kobject_uevent(external_common_state->uevent_kobj, KOBJ_ONLINE); if (!hdmi_msm_state->hdcp_enable) { /* Send Audio for HDMI Compliance Cases*/ envp[0] = "HDCP_STATE=PASS"; envp[1] = NULL; DEV_INFO("HDMI HPD: sense : send HDCP_PASS\n"); kobject_uevent_env(external_common_state->uevent_kobj, KOBJ_CHANGE, envp); } } else { switch_set_state(&external_common_state->sdev, 0); DEV_INFO("%s: hdmi state switch to %d\n", __func__, external_common_state->sdev.state); DEV_INFO("hdmi: HDMI HPD: sense DISCONNECTED: send OFFLINE\n"); kobject_uevent(external_common_state->uevent_kobj, KOBJ_OFFLINE); } if (!completion_done(&hdmi_msm_state->hpd_event_processed)) complete(&hdmi_msm_state->hpd_event_processed); } static void hdmi_msm_hpd_state_work(struct work_struct *work) { if (!hdmi_ready()) { DEV_ERR("hdmi: %s: ignored, probe failed\n", __func__); return; } hdmi_msm_send_event(external_common_state->hpd_state); } #ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL_CEC_SUPPORT static void hdmi_msm_cec_latch_work(struct work_struct *work) { hdmi_msm_cec_line_latch_detect(); } #endif static void hdcp_deauthenticate(void); static void hdmi_msm_hdcp_reauth_work(struct work_struct *work) { if (!hdmi_msm_state->hdcp_enable) { DEV_DBG("%s: HDCP not enabled\n", __func__); return; } /* Don't process recursive actions */ mutex_lock(&hdmi_msm_state_mutex); if (hdmi_msm_state->hdcp_activating) { mutex_unlock(&hdmi_msm_state_mutex); return; } mutex_unlock(&hdmi_msm_state_mutex); /* * Reauth=>deauth, hdcp_auth * hdcp_auth=>turn_on() which calls * HDMI Core reset without informing the Audio QDSP * this can do bad things to video playback on the HDTV * Therefore, as surprising as it may sound do reauth * only if the device is HDCP-capable */ hdcp_deauthenticate(); mutex_lock(&hdcp_auth_state_mutex); hdmi_msm_state->reauth = TRUE; mutex_unlock(&hdcp_auth_state_mutex); mod_timer(&hdmi_msm_state->hdcp_timer, jiffies + HZ/2); } static void hdmi_msm_hdcp_work(struct work_struct *work) { if (!hdmi_msm_state->hdcp_enable) { DEV_DBG("%s: HDCP not enabled\n", __func__); return; } /* Only re-enable if cable still connected */ mutex_lock(&external_common_state_hpd_mutex); if (external_common_state->hpd_state && !(hdmi_msm_state->full_auth_done)) { mutex_unlock(&external_common_state_hpd_mutex); if (hdmi_msm_state->reauth == TRUE) { DEV_DBG("%s: Starting HDCP re-authentication\n", __func__); hdmi_msm_turn_on(); } else { DEV_DBG("%s: Starting HDCP authentication\n", __func__); hdmi_msm_hdcp_enable(); } } else { mutex_unlock(&external_common_state_hpd_mutex); DEV_DBG("%s: HDMI not connected or HDCP already active\n", __func__); hdmi_msm_state->reauth = FALSE; } } int hdmi_msm_process_hdcp_interrupts(void) { int rc = -1; uint32 hdcp_int_val; char *envp[2]; if (!hdmi_msm_state->hdcp_enable) { DEV_DBG("%s: HDCP not enabled\n", __func__); return -EINVAL; } /* HDCP_INT_CTRL[0x0118] * [0] AUTH_SUCCESS_INT [R] HDCP Authentication Success * interrupt status * [1] AUTH_SUCCESS_ACK [W] Acknowledge bit for HDCP * Authentication Success bit - write 1 to clear * [2] AUTH_SUCCESS_MASK [R/W] Mask bit for HDCP Authentication * Success interrupt - set to 1 to enable interrupt */ hdcp_int_val = HDMI_INP_ND(0x0118); if ((hdcp_int_val & (1 << 2)) && (hdcp_int_val & (1 << 0))) { /* AUTH_SUCCESS_INT */ HDMI_OUTP(0x0118, (hdcp_int_val | (1 << 1)) & ~(1 << 0)); DEV_INFO("HDCP: AUTH_SUCCESS_INT received\n"); complete_all(&hdmi_msm_state->hdcp_success_done); return 0; } /* [4] AUTH_FAIL_INT [R] HDCP Authentication Lost * interrupt Status * [5] AUTH_FAIL_ACK [W] Acknowledge bit for HDCP * Authentication Lost bit - write 1 to clear * [6] AUTH_FAIL_MASK [R/W] Mask bit fo HDCP Authentication * Lost interrupt set to 1 to enable interrupt * [7] AUTH_FAIL_INFO_ACK [W] Acknowledge bit for HDCP * Authentication Failure Info field - write 1 to clear */ if ((hdcp_int_val & (1 << 6)) && (hdcp_int_val & (1 << 4))) { /* AUTH_FAIL_INT */ /* Clear and Disable */ uint32 link_status = HDMI_INP_ND(0x011C); HDMI_OUTP(0x0118, (hdcp_int_val | (1 << 5)) & ~((1 << 6) | (1 << 4))); DEV_INFO("HDCP: AUTH_FAIL_INT received, LINK0_STATUS=0x%08x\n", link_status); if (hdmi_msm_state->full_auth_done) { SWITCH_SET_HDMI_AUDIO(0, 0); envp[0] = "HDCP_STATE=FAIL"; envp[1] = NULL; DEV_INFO("HDMI HPD:QDSP OFF\n"); kobject_uevent_env(external_common_state->uevent_kobj, KOBJ_CHANGE, envp); mutex_lock(&hdcp_auth_state_mutex); hdmi_msm_state->full_auth_done = FALSE; mutex_unlock(&hdcp_auth_state_mutex); /* Calling reauth only when authentication * is sucessful or else we always go into * the reauth loop. Also, No need to reauthenticate * if authentication failed because of cable disconnect */ if (((link_status & 0xF0) >> 4) != 0x7) { DEV_DBG("Reauthenticate From %s HDCP FAIL INT ", __func__); queue_work(hdmi_work_queue, &hdmi_msm_state->hdcp_reauth_work); } else { DEV_INFO("HDCP: HDMI cable disconnected\n"); } } /* Clear AUTH_FAIL_INFO as well */ HDMI_OUTP(0x0118, (hdcp_int_val | (1 << 7))); return 0; } /* [8] DDC_XFER_REQ_INT [R] HDCP DDC Transfer Request * interrupt status * [9] DDC_XFER_REQ_ACK [W] Acknowledge bit for HDCP DDC * Transfer Request bit - write 1 to clear * [10] DDC_XFER_REQ_MASK [R/W] Mask bit for HDCP DDC Transfer * Request interrupt - set to 1 to enable interrupt */ if ((hdcp_int_val & (1 << 10)) && (hdcp_int_val & (1 << 8))) { /* DDC_XFER_REQ_INT */ HDMI_OUTP_ND(0x0118, (hdcp_int_val | (1 << 9)) & ~(1 << 8)); if (!(hdcp_int_val & (1 << 12))) return 0; } /* [12] DDC_XFER_DONE_INT [R] HDCP DDC Transfer done interrupt * status * [13] DDC_XFER_DONE_ACK [W] Acknowledge bit for HDCP DDC * Transfer done bit - write 1 to clear * [14] DDC_XFER_DONE_MASK [R/W] Mask bit for HDCP DDC Transfer * done interrupt - set to 1 to enable interrupt */ if ((hdcp_int_val & (1 << 14)) && (hdcp_int_val & (1 << 12))) { /* DDC_XFER_DONE_INT */ HDMI_OUTP_ND(0x0118, (hdcp_int_val | (1 << 13)) & ~(1 << 12)); DEV_INFO("HDCP: DDC_XFER_DONE received\n"); return 0; } return rc; } static irqreturn_t hdmi_msm_isr(int irq, void *dev_id) { uint32 hpd_int_status; uint32 hpd_int_ctrl; #ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL_CEC_SUPPORT uint32 cec_intr_status; #endif uint32 ddc_int_ctrl; uint32 audio_int_val; static uint32 fifo_urun_int_occurred; static uint32 sample_drop_int_occurred; const uint32 occurrence_limit = 5; if (!hdmi_ready()) { DEV_DBG("ISR ignored, probe failed\n"); return IRQ_HANDLED; } /* Process HPD Interrupt */ /* HDMI_HPD_INT_STATUS[0x0250] */ hpd_int_status = HDMI_INP_ND(0x0250); /* HDMI_HPD_INT_CTRL[0x0254] */ hpd_int_ctrl = HDMI_INP_ND(0x0254); if ((hpd_int_ctrl & (1 << 2)) && (hpd_int_status & (1 << 0))) { /* * Got HPD interrupt. Ack the interrupt and disable any * further HPD interrupts until we process this interrupt. */ HDMI_OUTP(0x0254, ((hpd_int_ctrl | (BIT(0))) & ~BIT(2))); external_common_state->hpd_state = (HDMI_INP(0x0250) & BIT(1)) >> 1; DEV_DBG("%s: Queuing work to handle HPD %s event\n", __func__, external_common_state->hpd_state ? "connect" : "disconnect"); queue_work(hdmi_work_queue, &hdmi_msm_state->hpd_state_work); return IRQ_HANDLED; } /* Process DDC Interrupts */ /* HDMI_DDC_INT_CTRL[0x0214] */ ddc_int_ctrl = HDMI_INP_ND(0x0214); if ((ddc_int_ctrl & (1 << 2)) && (ddc_int_ctrl & (1 << 0))) { /* SW_DONE INT occured, clr it */ HDMI_OUTP_ND(0x0214, ddc_int_ctrl | (1 << 1)); complete(&hdmi_msm_state->ddc_sw_done); return IRQ_HANDLED; } /* FIFO Underrun Int is enabled */ /* HDMI_AUD_INT[0x02CC] * [3] AUD_SAM_DROP_MASK [R/W] * [2] AUD_SAM_DROP_ACK [W], AUD_SAM_DROP_INT [R] * [1] AUD_FIFO_URUN_MASK [R/W] * [0] AUD_FIFO_URUN_ACK [W], AUD_FIFO_URUN_INT [R] */ audio_int_val = HDMI_INP_ND(0x02CC); if ((audio_int_val & (1 << 1)) && (audio_int_val & (1 << 0))) { /* FIFO Underrun occured, clr it */ HDMI_OUTP(0x02CC, audio_int_val | (1 << 0)); ++fifo_urun_int_occurred; DEV_INFO("HDMI AUD_FIFO_URUN: %d\n", fifo_urun_int_occurred); if (fifo_urun_int_occurred >= occurrence_limit) { HDMI_OUTP(0x02CC, HDMI_INP(0x02CC) & ~(1 << 1)); DEV_INFO("HDMI AUD_FIFO_URUN: INT has been disabled " "by the ISR after %d occurences...\n", fifo_urun_int_occurred); } return IRQ_HANDLED; } /* Audio Sample Drop int is enabled */ if ((audio_int_val & (1 << 3)) && (audio_int_val & (1 << 2))) { /* Audio Sample Drop occured, clr it */ HDMI_OUTP(0x02CC, audio_int_val | (1 << 2)); DEV_DBG("%s: AUD_SAM_DROP", __func__); ++sample_drop_int_occurred; if (sample_drop_int_occurred >= occurrence_limit) { HDMI_OUTP(0x02CC, HDMI_INP(0x02CC) & ~(1 << 3)); DEV_INFO("HDMI AUD_SAM_DROP: INT has been disabled " "by the ISR after %d occurences...\n", sample_drop_int_occurred); } return IRQ_HANDLED; } if (!hdmi_msm_process_hdcp_interrupts()) return IRQ_HANDLED; #ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL_CEC_SUPPORT /* Process CEC Interrupt */ /* HDMI_MSM_CEC_INT[0x029C] */ cec_intr_status = HDMI_INP_ND(0x029C); DEV_DBG("cec interrupt status is [%u]\n", cec_intr_status); if (HDMI_MSM_CEC_FRAME_WR_SUCCESS(cec_intr_status)) { DEV_DBG("CEC_IRQ_FRAME_WR_DONE\n"); HDMI_OUTP(0x029C, cec_intr_status | HDMI_MSM_CEC_INT_FRAME_WR_DONE_ACK); mutex_lock(&hdmi_msm_state_mutex); hdmi_msm_state->cec_frame_wr_status |= CEC_STATUS_WR_DONE; hdmi_msm_state->first_monitor = 0; del_timer(&hdmi_msm_state->cec_read_timer); mutex_unlock(&hdmi_msm_state_mutex); complete(&hdmi_msm_state->cec_frame_wr_done); return IRQ_HANDLED; } if ((cec_intr_status & (1 << 2)) && (cec_intr_status & (1 << 3))) { DEV_DBG("CEC_IRQ_FRAME_ERROR\n"); #ifdef TOGGLE_CEC_HARDWARE_FSM /* Toggle CEC hardware FSM */ HDMI_OUTP(0x028C, 0x0); HDMI_OUTP(0x028C, HDMI_MSM_CEC_CTRL_ENABLE); #endif HDMI_OUTP(0x029C, cec_intr_status); mutex_lock(&hdmi_msm_state_mutex); hdmi_msm_state->first_monitor = 0; del_timer(&hdmi_msm_state->cec_read_timer); hdmi_msm_state->cec_frame_wr_status |= CEC_STATUS_WR_ERROR; mutex_unlock(&hdmi_msm_state_mutex); complete(&hdmi_msm_state->cec_frame_wr_done); return IRQ_HANDLED; } if ((cec_intr_status & (1 << 4)) && (cec_intr_status & (1 << 5))) { DEV_DBG("CEC_IRQ_MONITOR\n"); HDMI_OUTP(0x029C, cec_intr_status | HDMI_MSM_CEC_INT_MONITOR_ACK); /* * CECT 9-5-1 * On the first occassion start a timer * for few hundred ms, if it expires then * reset the CEC block else go on with * frame transactions as usual. * Below adds hdmi_msm_cec_msg_recv() as an * item into the work queue instead of running in * interrupt context */ mutex_lock(&hdmi_msm_state_mutex); if (hdmi_msm_state->first_monitor == 0) { /* This timer might have to be changed * worst case theoritical = * 16 bytes * 8 * 2.7msec = 346 msec */ mod_timer(&hdmi_msm_state->cec_read_timer, jiffies + HZ/2); hdmi_msm_state->first_monitor = 1; } mutex_unlock(&hdmi_msm_state_mutex); return IRQ_HANDLED; } if ((cec_intr_status & (1 << 6)) && (cec_intr_status & (1 << 7))) { DEV_DBG("CEC_IRQ_FRAME_RD_DONE\n"); mutex_lock(&hdmi_msm_state_mutex); hdmi_msm_state->first_monitor = 0; del_timer(&hdmi_msm_state->cec_read_timer); mutex_unlock(&hdmi_msm_state_mutex); HDMI_OUTP(0x029C, cec_intr_status | HDMI_MSM_CEC_INT_FRAME_RD_DONE_ACK); hdmi_msm_cec_msg_recv(); #ifdef TOGGLE_CEC_HARDWARE_FSM if (!msg_send_complete) msg_recv_complete = FALSE; else { /* Toggle CEC hardware FSM */ HDMI_OUTP(0x028C, 0x0); HDMI_OUTP(0x028C, HDMI_MSM_CEC_CTRL_ENABLE); } #else HDMI_OUTP(0x028C, 0x0); HDMI_OUTP(0x028C, HDMI_MSM_CEC_CTRL_ENABLE); #endif return IRQ_HANDLED; } #endif /* CONFIG_FB_MSM_HDMI_MSM_PANEL_CEC_SUPPORT */ DEV_DBG("%s: HPD<Ctrl=%04x, State=%04x>, ddc_int_ctrl=%04x, " "aud_int=%04x, cec_intr_status=%04x\n", __func__, hpd_int_ctrl, hpd_int_status, ddc_int_ctrl, audio_int_val, HDMI_INP_ND(0x029C)); return IRQ_HANDLED; } static int check_hdmi_features(void) { /* RAW_FEAT_CONFIG_ROW0_LSB */ uint32 val = inpdw(QFPROM_BASE + 0x0238); /* HDMI_DISABLE */ boolean hdmi_disabled = (val & 0x00200000) >> 21; /* HDCP_DISABLE */ boolean hdcp_disabled = (val & 0x00400000) >> 22; DEV_DBG("Features <val:0x%08x, HDMI:%s, HDCP:%s>\n", val, hdmi_disabled ? "OFF" : "ON", hdcp_disabled ? "OFF" : "ON"); if (hdmi_disabled) { DEV_ERR("ERROR: HDMI disabled\n"); return -ENODEV; } if (hdcp_disabled) DEV_WARN("WARNING: HDCP disabled\n"); return 0; } static boolean hdmi_msm_has_hdcp(void) { /* RAW_FEAT_CONFIG_ROW0_LSB, HDCP_DISABLE */ return (inpdw(QFPROM_BASE + 0x0238) & 0x00400000) ? FALSE : TRUE; } static boolean hdmi_msm_is_power_on(void) { /* HDMI_CTRL, ENABLE */ return (HDMI_INP_ND(0x0000) & 0x00000001) ? TRUE : FALSE; } /* 1.2.1.2.1 DVI Operation * HDMI compliance requires the HDMI core to support DVI as well. The * HDMI core also supports DVI. In DVI operation there are no preambles * and guardbands transmitted. THe TMDS encoding of video data remains * the same as HDMI. There are no VBI or audio packets transmitted. In * order to enable DVI mode in HDMI core, HDMI_DVI_SEL field of * HDMI_CTRL register needs to be programmed to 0. */ static boolean hdmi_msm_is_dvi_mode(void) { /* HDMI_CTRL, HDMI_DVI_SEL */ return (HDMI_INP_ND(0x0000) & 0x00000002) ? FALSE : TRUE; } void hdmi_msm_set_mode(boolean power_on) { uint32 reg_val = 0; if (power_on) { /* ENABLE */ reg_val |= 0x00000001; /* Enable the block */ if (external_common_state->hdmi_sink == 0) { /* HDMI_DVI_SEL */ reg_val |= 0x00000002; if (hdmi_msm_state->hdcp_enable) /* HDMI Encryption */ reg_val |= 0x00000004; /* HDMI_CTRL */ HDMI_OUTP(0x0000, reg_val); /* HDMI_DVI_SEL */ reg_val &= ~0x00000002; } else { if (hdmi_msm_state->hdcp_enable) /* HDMI_Encryption_ON */ reg_val |= 0x00000006; else reg_val |= 0x00000002; } } else reg_val = 0x00000002; /* HDMI_CTRL */ HDMI_OUTP(0x0000, reg_val); DEV_DBG("HDMI Core: %s, HDMI_CTRL=0x%08x\n", power_on ? "Enable" : "Disable", reg_val); } static void msm_hdmi_init_ddc(void) { /* 0x0220 HDMI_DDC_SPEED [31:16] PRESCALE prescale = (m * xtal_frequency) / (desired_i2c_speed), where m is multiply factor, default: m = 1 [1:0] THRESHOLD Select threshold to use to determine whether value sampled on SDA is a 1 or 0. Specified in terms of the ratio between the number of sampled ones and the total number of times SDA is sampled. * 0x0: >0 * 0x1: 1/4 of total samples * 0x2: 1/2 of total samples * 0x3: 3/4 of total samples */ /* Configure the Pre-Scale multiplier * Configure the Threshold */ HDMI_OUTP_ND(0x0220, (10 << 16) | (2 << 0)); /* * 0x0224 HDMI_DDC_SETUP * Setting 31:24 bits : Time units to wait before timeout * when clock is being stalled by external sink device */ HDMI_OUTP_ND(0x0224, 0xff000000); /* 0x027C HDMI_DDC_REF [6] REFTIMER_ENABLE Enable the timer * 0: Disable * 1: Enable [15:0] REFTIMER Value to set the register in order to generate DDC strobe. This register counts on HDCP application clock */ /* Enable reference timer * 27 micro-seconds */ HDMI_OUTP_ND(0x027C, (1 << 16) | (27 << 0)); } static int hdmi_msm_ddc_clear_irq(const char *what) { const uint32 time_out = 0xFFFF; uint32 time_out_count, reg_val; /* clear pending and enable interrupt */ time_out_count = time_out; do { --time_out_count; /* HDMI_DDC_INT_CTRL[0x0214] [2] SW_DONE_MK Mask bit for SW_DONE_INT. Set to 1 to enable interrupt. [1] SW_DONE_ACK WRITE ONLY. Acknowledge bit for SW_DONE_INT. Write 1 to clear interrupt. [0] SW_DONE_INT READ ONLY. SW_DONE interrupt status */ /* Clear and Enable DDC interrupt */ /* Write */ HDMI_OUTP_ND(0x0214, (1 << 2) | (1 << 1)); /* Read back */ reg_val = HDMI_INP_ND(0x0214); } while ((reg_val & 0x1) && time_out_count); if (!time_out_count) { DEV_ERR("%s[%s]: timedout\n", __func__, what); return -ETIMEDOUT; } return 0; } static int hdmi_msm_ddc_write(uint32 dev_addr, uint32 offset, const uint8 *data_buf, uint32 data_len, const char *what) { uint32 reg_val, ndx; int status = 0, retry = 10; uint32 time_out_count; if (NULL == data_buf) { status = -EINVAL; DEV_ERR("%s[%s]: invalid input paramter\n", __func__, what); goto error; } again: status = hdmi_msm_ddc_clear_irq(what); if (status) goto error; /* Ensure Device Address has LSB set to 0 to indicate Slave addr read */ dev_addr &= 0xFE; /* 0x0238 HDMI_DDC_DATA [31] INDEX_WRITE WRITE ONLY. To write index field, set this bit to 1 while writing HDMI_DDC_DATA. [23:16] INDEX Use to set index into DDC buffer for next read or current write, or to read index of current read or next write. Writable only when INDEX_WRITE=1. [15:8] DATA Use to fill or read the DDC buffer [0] DATA_RW Select whether buffer access will be a read or write. For writes, address auto-increments on write to HDMI_DDC_DATA. For reads, address autoincrements on reads to HDMI_DDC_DATA. * 0: Write * 1: Read */ /* 1. Write to HDMI_I2C_DATA with the following fields set in order to * handle portion #1 * DATA_RW = 0x1 (write) * DATA = linkAddress (primary link address and writing) * INDEX = 0x0 (initial offset into buffer) * INDEX_WRITE = 0x1 (setting initial offset) */ HDMI_OUTP_ND(0x0238, (0x1UL << 31) | (dev_addr << 8)); /* 2. Write to HDMI_I2C_DATA with the following fields set in order to * handle portion #2 * DATA_RW = 0x0 (write) * DATA = offsetAddress * INDEX = 0x0 * INDEX_WRITE = 0x0 (auto-increment by hardware) */ HDMI_OUTP_ND(0x0238, offset << 8); /* 3. Write to HDMI_I2C_DATA with the following fields set in order to * handle portion #3 * DATA_RW = 0x0 (write) * DATA = data_buf[ndx] * INDEX = 0x0 * INDEX_WRITE = 0x0 (auto-increment by hardware) */ for (ndx = 0; ndx < data_len; ++ndx) HDMI_OUTP_ND(0x0238, ((uint32)data_buf[ndx]) << 8); /* Data setup is complete, now setup the transaction characteristics */ /* 0x0228 HDMI_DDC_TRANS0 [23:16] CNT0 Byte count for first transaction (excluding the first byte, which is usually the address). [13] STOP0 Determines whether a stop bit will be sent after the first transaction * 0: NO STOP * 1: STOP [12] START0 Determines whether a start bit will be sent before the first transaction * 0: NO START * 1: START [8] STOP_ON_NACK0 Determines whether the current transfer will stop if a NACK is received during the first transaction (current transaction always stops). * 0: STOP CURRENT TRANSACTION, GO TO NEXT TRANSACTION * 1: STOP ALL TRANSACTIONS, SEND STOP BIT [0] RW0 Read/write indicator for first transaction - set to 0 for write, 1 for read. This bit only controls HDMI_DDC behaviour - the R/W bit in the transaction is programmed into the DDC buffer as the LSB of the address byte. * 0: WRITE * 1: READ */ /* 4. Write to HDMI_I2C_TRANSACTION0 with the following fields set in order to handle characteristics of portion #1 and portion #2 * RW0 = 0x0 (write) * START0 = 0x1 (insert START bit) * STOP0 = 0x0 (do NOT insert STOP bit) * CNT0 = 0x1 (single byte transaction excluding address) */ HDMI_OUTP_ND(0x0228, (1 << 12) | (1 << 16)); /* 0x022C HDMI_DDC_TRANS1 [23:16] CNT1 Byte count for second transaction (excluding the first byte, which is usually the address). [13] STOP1 Determines whether a stop bit will be sent after the second transaction * 0: NO STOP * 1: STOP [12] START1 Determines whether a start bit will be sent before the second transaction * 0: NO START * 1: START [8] STOP_ON_NACK1 Determines whether the current transfer will stop if a NACK is received during the second transaction (current transaction always stops). * 0: STOP CURRENT TRANSACTION, GO TO NEXT TRANSACTION * 1: STOP ALL TRANSACTIONS, SEND STOP BIT [0] RW1 Read/write indicator for second transaction - set to 0 for write, 1 for read. This bit only controls HDMI_DDC behaviour - the R/W bit in the transaction is programmed into the DDC buffer as the LSB of the address byte. * 0: WRITE * 1: READ */ /* 5. Write to HDMI_I2C_TRANSACTION1 with the following fields set in order to handle characteristics of portion #3 * RW1 = 0x1 (read) * START1 = 0x1 (insert START bit) * STOP1 = 0x1 (insert STOP bit) * CNT1 = data_len (0xN (write N bytes of data)) * Byte count for second transition (excluding the first * Byte which is usually the address) */ HDMI_OUTP_ND(0x022C, (1 << 13) | ((data_len-1) << 16)); /* Trigger the I2C transfer */ /* 0x020C HDMI_DDC_CTRL [21:20] TRANSACTION_CNT Number of transactions to be done in current transfer. * 0x0: transaction0 only * 0x1: transaction0, transaction1 * 0x2: transaction0, transaction1, transaction2 * 0x3: transaction0, transaction1, transaction2, transaction3 [3] SW_STATUS_RESET Write 1 to reset HDMI_DDC_SW_STATUS flags, will reset SW_DONE, ABORTED, TIMEOUT, SW_INTERRUPTED, BUFFER_OVERFLOW, STOPPED_ON_NACK, NACK0, NACK1, NACK2, NACK3 [2] SEND_RESET Set to 1 to send reset sequence (9 clocks with no data) at start of transfer. This sequence is sent after GO is written to 1, before the first transaction only. [1] SOFT_RESET Write 1 to reset DDC controller [0] GO WRITE ONLY. Write 1 to start DDC transfer. */ /* 6. Write to HDMI_I2C_CONTROL to kick off the hardware. * Note that NOTHING has been transmitted on the DDC lines up to this * point. * TRANSACTION_CNT = 0x1 (execute transaction0 followed by * transaction1) * GO = 0x1 (kicks off hardware) */ INIT_COMPLETION(hdmi_msm_state->ddc_sw_done); HDMI_OUTP_ND(0x020C, (1 << 0) | (1 << 20)); time_out_count = wait_for_completion_interruptible_timeout( &hdmi_msm_state->ddc_sw_done, HZ/2); HDMI_OUTP_ND(0x0214, 0x2); if (!time_out_count) { if (retry-- > 0) { DEV_INFO("%s[%s]: failed timout, retry=%d\n", __func__, what, retry); goto again; } status = -ETIMEDOUT; DEV_ERR("%s[%s]: timedout, DDC SW Status=%08x, HW " "Status=%08x, Int Ctrl=%08x\n", __func__, what, HDMI_INP_ND(0x0218), HDMI_INP_ND(0x021C), HDMI_INP_ND(0x0214)); goto error; } /* Read DDC status */ reg_val = HDMI_INP_ND(0x0218); reg_val &= 0x00001000 | 0x00002000 | 0x00004000 | 0x00008000; /* Check if any NACK occurred */ if (reg_val) { if (retry > 1) HDMI_OUTP_ND(0x020C, BIT(3)); /* SW_STATUS_RESET */ else HDMI_OUTP_ND(0x020C, BIT(1)); /* SOFT_RESET */ if (retry-- > 0) { DEV_DBG("%s[%s]: failed NACK=%08x, retry=%d\n", __func__, what, reg_val, retry); msleep(100); goto again; } status = -EIO; DEV_ERR("%s[%s]: failed NACK: %08x\n", __func__, what, reg_val); goto error; } DEV_DBG("%s[%s] success\n", __func__, what); error: return status; } static int hdmi_msm_ddc_read_retry(uint32 dev_addr, uint32 offset, uint8 *data_buf, uint32 data_len, uint32 request_len, int retry, const char *what) { uint32 reg_val, ndx; int status = 0; uint32 time_out_count; int log_retry_fail = retry != 1; if (NULL == data_buf) { status = -EINVAL; DEV_ERR("%s: invalid input paramter\n", __func__); goto error; } again: status = hdmi_msm_ddc_clear_irq(what); if (status) goto error; /* Ensure Device Address has LSB set to 0 to indicate Slave addr read */ dev_addr &= 0xFE; /* 0x0238 HDMI_DDC_DATA [31] INDEX_WRITE WRITE ONLY. To write index field, set this bit to 1 while writing HDMI_DDC_DATA. [23:16] INDEX Use to set index into DDC buffer for next read or current write, or to read index of current read or next write. Writable only when INDEX_WRITE=1. [15:8] DATA Use to fill or read the DDC buffer [0] DATA_RW Select whether buffer access will be a read or write. For writes, address auto-increments on write to HDMI_DDC_DATA. For reads, address autoincrements on reads to HDMI_DDC_DATA. * 0: Write * 1: Read */ /* 1. Write to HDMI_I2C_DATA with the following fields set in order to * handle portion #1 * DATA_RW = 0x0 (write) * DATA = linkAddress (primary link address and writing) * INDEX = 0x0 (initial offset into buffer) * INDEX_WRITE = 0x1 (setting initial offset) */ HDMI_OUTP_ND(0x0238, (0x1UL << 31) | (dev_addr << 8)); /* 2. Write to HDMI_I2C_DATA with the following fields set in order to * handle portion #2 * DATA_RW = 0x0 (write) * DATA = offsetAddress * INDEX = 0x0 * INDEX_WRITE = 0x0 (auto-increment by hardware) */ HDMI_OUTP_ND(0x0238, offset << 8); /* 3. Write to HDMI_I2C_DATA with the following fields set in order to * handle portion #3 * DATA_RW = 0x0 (write) * DATA = linkAddress + 1 (primary link address 0x74 and reading) * INDEX = 0x0 * INDEX_WRITE = 0x0 (auto-increment by hardware) */ HDMI_OUTP_ND(0x0238, (dev_addr | 1) << 8); /* Data setup is complete, now setup the transaction characteristics */ /* 0x0228 HDMI_DDC_TRANS0 [23:16] CNT0 Byte count for first transaction (excluding the first byte, which is usually the address). [13] STOP0 Determines whether a stop bit will be sent after the first transaction * 0: NO STOP * 1: STOP [12] START0 Determines whether a start bit will be sent before the first transaction * 0: NO START * 1: START [8] STOP_ON_NACK0 Determines whether the current transfer will stop if a NACK is received during the first transaction (current transaction always stops). * 0: STOP CURRENT TRANSACTION, GO TO NEXT TRANSACTION * 1: STOP ALL TRANSACTIONS, SEND STOP BIT [0] RW0 Read/write indicator for first transaction - set to 0 for write, 1 for read. This bit only controls HDMI_DDC behaviour - the R/W bit in the transaction is programmed into the DDC buffer as the LSB of the address byte. * 0: WRITE * 1: READ */ /* 4. Write to HDMI_I2C_TRANSACTION0 with the following fields set in order to handle characteristics of portion #1 and portion #2 * RW0 = 0x0 (write) * START0 = 0x1 (insert START bit) * STOP0 = 0x0 (do NOT insert STOP bit) * CNT0 = 0x1 (single byte transaction excluding address) */ HDMI_OUTP_ND(0x0228, (1 << 12) | (1 << 16)); /* 0x022C HDMI_DDC_TRANS1 [23:16] CNT1 Byte count for second transaction (excluding the first byte, which is usually the address). [13] STOP1 Determines whether a stop bit will be sent after the second transaction * 0: NO STOP * 1: STOP [12] START1 Determines whether a start bit will be sent before the second transaction * 0: NO START * 1: START [8] STOP_ON_NACK1 Determines whether the current transfer will stop if a NACK is received during the second transaction (current transaction always stops). * 0: STOP CURRENT TRANSACTION, GO TO NEXT TRANSACTION * 1: STOP ALL TRANSACTIONS, SEND STOP BIT [0] RW1 Read/write indicator for second transaction - set to 0 for write, 1 for read. This bit only controls HDMI_DDC behaviour - the R/W bit in the transaction is programmed into the DDC buffer as the LSB of the address byte. * 0: WRITE * 1: READ */ /* 5. Write to HDMI_I2C_TRANSACTION1 with the following fields set in order to handle characteristics of portion #3 * RW1 = 0x1 (read) * START1 = 0x1 (insert START bit) * STOP1 = 0x1 (insert STOP bit) * CNT1 = data_len (it's 128 (0x80) for a blk read) */ HDMI_OUTP_ND(0x022C, 1 | (1 << 12) | (1 << 13) | (request_len << 16)); /* Trigger the I2C transfer */ /* 0x020C HDMI_DDC_CTRL [21:20] TRANSACTION_CNT Number of transactions to be done in current transfer. * 0x0: transaction0 only * 0x1: transaction0, transaction1 * 0x2: transaction0, transaction1, transaction2 * 0x3: transaction0, transaction1, transaction2, transaction3 [3] SW_STATUS_RESET Write 1 to reset HDMI_DDC_SW_STATUS flags, will reset SW_DONE, ABORTED, TIMEOUT, SW_INTERRUPTED, BUFFER_OVERFLOW, STOPPED_ON_NACK, NACK0, NACK1, NACK2, NACK3 [2] SEND_RESET Set to 1 to send reset sequence (9 clocks with no data) at start of transfer. This sequence is sent after GO is written to 1, before the first transaction only. [1] SOFT_RESET Write 1 to reset DDC controller [0] GO WRITE ONLY. Write 1 to start DDC transfer. */ /* 6. Write to HDMI_I2C_CONTROL to kick off the hardware. * Note that NOTHING has been transmitted on the DDC lines up to this * point. * TRANSACTION_CNT = 0x1 (execute transaction0 followed by * transaction1) * SEND_RESET = Set to 1 to send reset sequence * GO = 0x1 (kicks off hardware) */ INIT_COMPLETION(hdmi_msm_state->ddc_sw_done); HDMI_OUTP_ND(0x020C, (1 << 0) | (1 << 20)); time_out_count = wait_for_completion_interruptible_timeout( &hdmi_msm_state->ddc_sw_done, HZ/2); HDMI_OUTP_ND(0x0214, 0x2); if (!time_out_count) { if (retry-- > 0) { DEV_INFO("%s: failed timout, retry=%d\n", __func__, retry); goto again; } status = -ETIMEDOUT; DEV_ERR("%s: timedout(7), DDC SW Status=%08x, HW " "Status=%08x, Int Ctrl=%08x\n", __func__, HDMI_INP(0x0218), HDMI_INP(0x021C), HDMI_INP(0x0214)); goto error; } /* Read DDC status */ reg_val = HDMI_INP_ND(0x0218); reg_val &= 0x00001000 | 0x00002000 | 0x00004000 | 0x00008000; /* Check if any NACK occurred */ if (reg_val) { HDMI_OUTP_ND(0x020C, BIT(3)); /* SW_STATUS_RESET */ if (retry == 1) HDMI_OUTP_ND(0x020C, BIT(1)); /* SOFT_RESET */ if (retry-- > 0) { DEV_DBG("%s(%s): failed NACK=0x%08x, retry=%d, " "dev-addr=0x%02x, offset=0x%02x, " "length=%d\n", __func__, what, reg_val, retry, dev_addr, offset, data_len); goto again; } status = -EIO; if (log_retry_fail) DEV_ERR("%s(%s): failed NACK=0x%08x, dev-addr=0x%02x, " "offset=0x%02x, length=%d\n", __func__, what, reg_val, dev_addr, offset, data_len); goto error; } /* 0x0238 HDMI_DDC_DATA [31] INDEX_WRITE WRITE ONLY. To write index field, set this bit to 1 while writing HDMI_DDC_DATA. [23:16] INDEX Use to set index into DDC buffer for next read or current write, or to read index of current read or next write. Writable only when INDEX_WRITE=1. [15:8] DATA Use to fill or read the DDC buffer [0] DATA_RW Select whether buffer access will be a read or write. For writes, address auto-increments on write to HDMI_DDC_DATA. For reads, address autoincrements on reads to HDMI_DDC_DATA. * 0: Write * 1: Read */ /* 8. ALL data is now available and waiting in the DDC buffer. * Read HDMI_I2C_DATA with the following fields set * RW = 0x1 (read) * DATA = BCAPS (this is field where data is pulled from) * INDEX = 0x3 (where the data has been placed in buffer by hardware) * INDEX_WRITE = 0x1 (explicitly define offset) */ /* Write this data to DDC buffer */ HDMI_OUTP_ND(0x0238, 0x1 | (3 << 16) | (1 << 31)); /* Discard first byte */ HDMI_INP_ND(0x0238); for (ndx = 0; ndx < data_len; ++ndx) { reg_val = HDMI_INP_ND(0x0238); data_buf[ndx] = (uint8) ((reg_val & 0x0000FF00) >> 8); } DEV_DBG("%s[%s] success\n", __func__, what); error: return status; } static int hdmi_msm_ddc_read_edid_seg(uint32 dev_addr, uint32 offset, uint8 *data_buf, uint32 data_len, uint32 request_len, int retry, const char *what) { uint32 reg_val, ndx; int status = 0; uint32 time_out_count; int log_retry_fail = retry != 1; int seg_addr = 0x60, seg_num = 0x01; if (NULL == data_buf) { status = -EINVAL; DEV_ERR("%s: invalid input paramter\n", __func__); goto error; } again: status = hdmi_msm_ddc_clear_irq(what); if (status) goto error; /* Ensure Device Address has LSB set to 0 to indicate Slave addr read */ dev_addr &= 0xFE; /* 0x0238 HDMI_DDC_DATA [31] INDEX_WRITE WRITE ONLY. To write index field, set this bit to 1 while writing HDMI_DDC_DATA. [23:16] INDEX Use to set index into DDC buffer for next read or current write, or to read index of current read or next write. Writable only when INDEX_WRITE=1. [15:8] DATA Use to fill or read the DDC buffer [0] DATA_RW Select whether buffer access will be a read or write. For writes, address auto-increments on write to HDMI_DDC_DATA. For reads, address autoincrements on reads to HDMI_DDC_DATA. * 0: Write * 1: Read */ /* 1. Write to HDMI_I2C_DATA with the following fields set in order to * handle portion #1 * DATA_RW = 0x0 (write) * DATA = linkAddress (primary link address and writing) * INDEX = 0x0 (initial offset into buffer) * INDEX_WRITE = 0x1 (setting initial offset) */ HDMI_OUTP_ND(0x0238, (0x1UL << 31) | (seg_addr << 8)); /* 2. Write to HDMI_I2C_DATA with the following fields set in order to * handle portion #2 * DATA_RW = 0x0 (write) * DATA = offsetAddress * INDEX = 0x0 * INDEX_WRITE = 0x0 (auto-increment by hardware) */ HDMI_OUTP_ND(0x0238, seg_num << 8); /* 3. Write to HDMI_I2C_DATA with the following fields set in order to * handle portion #3 * DATA_RW = 0x0 (write) * DATA = linkAddress + 1 (primary link address 0x74 and reading) * INDEX = 0x0 * INDEX_WRITE = 0x0 (auto-increment by hardware) */ HDMI_OUTP_ND(0x0238, dev_addr << 8); HDMI_OUTP_ND(0x0238, offset << 8); HDMI_OUTP_ND(0x0238, (dev_addr | 1) << 8); /* Data setup is complete, now setup the transaction characteristics */ /* 0x0228 HDMI_DDC_TRANS0 [23:16] CNT0 Byte count for first transaction (excluding the first byte, which is usually the address). [13] STOP0 Determines whether a stop bit will be sent after the first transaction * 0: NO STOP * 1: STOP [12] START0 Determines whether a start bit will be sent before the first transaction * 0: NO START * 1: START [8] STOP_ON_NACK0 Determines whether the current transfer will stop if a NACK is received during the first transaction (current transaction always stops). * 0: STOP CURRENT TRANSACTION, GO TO NEXT TRANSACTION * 1: STOP ALL TRANSACTIONS, SEND STOP BIT [0] RW0 Read/write indicator for first transaction - set to 0 for write, 1 for read. This bit only controls HDMI_DDC behaviour - the R/W bit in the transaction is programmed into the DDC buffer as the LSB of the address byte. * 0: WRITE * 1: READ */ /* 4. Write to HDMI_I2C_TRANSACTION0 with the following fields set in order to handle characteristics of portion #1 and portion #2 * RW0 = 0x0 (write) * START0 = 0x1 (insert START bit) * STOP0 = 0x0 (do NOT insert STOP bit) * CNT0 = 0x1 (single byte transaction excluding address) */ HDMI_OUTP_ND(0x0228, (1 << 12) | (1 << 16)); /* 0x022C HDMI_DDC_TRANS1 [23:16] CNT1 Byte count for second transaction (excluding the first byte, which is usually the address). [13] STOP1 Determines whether a stop bit will be sent after the second transaction * 0: NO STOP * 1: STOP [12] START1 Determines whether a start bit will be sent before the second transaction * 0: NO START * 1: START [8] STOP_ON_NACK1 Determines whether the current transfer will stop if a NACK is received during the second transaction (current transaction always stops). * 0: STOP CURRENT TRANSACTION, GO TO NEXT TRANSACTION * 1: STOP ALL TRANSACTIONS, SEND STOP BIT [0] RW1 Read/write indicator for second transaction - set to 0 for write, 1 for read. This bit only controls HDMI_DDC behaviour - the R/W bit in the transaction is programmed into the DDC buffer as the LSB of the address byte. * 0: WRITE * 1: READ */ /* 5. Write to HDMI_I2C_TRANSACTION1 with the following fields set in order to handle characteristics of portion #3 * RW1 = 0x1 (read) * START1 = 0x1 (insert START bit) * STOP1 = 0x1 (insert STOP bit) * CNT1 = data_len (it's 128 (0x80) for a blk read) */ HDMI_OUTP_ND(0x022C, (1 << 12) | (1 << 16)); /* 0x022C HDMI_DDC_TRANS2 [23:16] CNT1 Byte count for second transaction (excluding the first byte, which is usually the address). [13] STOP1 Determines whether a stop bit will be sent after the second transaction * 0: NO STOP * 1: STOP [12] START1 Determines whether a start bit will be sent before the second transaction * 0: NO START * 1: START [8] STOP_ON_NACK1 Determines whether the current transfer will stop if a NACK is received during the second transaction (current transaction always stops). * 0: STOP CURRENT TRANSACTION, GO TO NEXT TRANSACTION * 1: STOP ALL TRANSACTIONS, SEND STOP BIT [0] RW1 Read/write indicator for second transaction - set to 0 for write, 1 for read. This bit only controls HDMI_DDC behaviour - the R/W bit in the transaction is programmed into the DDC buffer as the LSB of the address byte. * 0: WRITE * 1: READ */ /* 5. Write to HDMI_I2C_TRANSACTION1 with the following fields set in order to handle characteristics of portion #3 * RW1 = 0x1 (read) * START1 = 0x1 (insert START bit) * STOP1 = 0x1 (insert STOP bit) * CNT1 = data_len (it's 128 (0x80) for a blk read) */ HDMI_OUTP_ND(0x0230, 1 | (1 << 12) | (1 << 13) | (request_len << 16)); /* Trigger the I2C transfer */ /* 0x020C HDMI_DDC_CTRL [21:20] TRANSACTION_CNT Number of transactions to be done in current transfer. * 0x0: transaction0 only * 0x1: transaction0, transaction1 * 0x2: transaction0, transaction1, transaction2 * 0x3: transaction0, transaction1, transaction2, transaction3 [3] SW_STATUS_RESET Write 1 to reset HDMI_DDC_SW_STATUS flags, will reset SW_DONE, ABORTED, TIMEOUT, SW_INTERRUPTED, BUFFER_OVERFLOW, STOPPED_ON_NACK, NACK0, NACK1, NACK2, NACK3 [2] SEND_RESET Set to 1 to send reset sequence (9 clocks with no data) at start of transfer. This sequence is sent after GO is written to 1, before the first transaction only. [1] SOFT_RESET Write 1 to reset DDC controller [0] GO WRITE ONLY. Write 1 to start DDC transfer. */ /* 6. Write to HDMI_I2C_CONTROL to kick off the hardware. * Note that NOTHING has been transmitted on the DDC lines up to this * point. * TRANSACTION_CNT = 0x2 (execute transaction0 followed by * transaction1) * GO = 0x1 (kicks off hardware) */ INIT_COMPLETION(hdmi_msm_state->ddc_sw_done); HDMI_OUTP_ND(0x020C, (1 << 0) | (2 << 20)); time_out_count = wait_for_completion_interruptible_timeout( &hdmi_msm_state->ddc_sw_done, HZ/2); HDMI_OUTP_ND(0x0214, 0x2); if (!time_out_count) { if (retry-- > 0) { DEV_INFO("%s: failed timout, retry=%d\n", __func__, retry); goto again; } status = -ETIMEDOUT; DEV_ERR("%s: timedout(7), DDC SW Status=%08x, HW " "Status=%08x, Int Ctrl=%08x\n", __func__, HDMI_INP(0x0218), HDMI_INP(0x021C), HDMI_INP(0x0214)); goto error; } /* Read DDC status */ reg_val = HDMI_INP_ND(0x0218); reg_val &= 0x00001000 | 0x00002000 | 0x00004000 | 0x00008000; /* Check if any NACK occurred */ if (reg_val) { HDMI_OUTP_ND(0x020C, BIT(3)); /* SW_STATUS_RESET */ if (retry == 1) HDMI_OUTP_ND(0x020C, BIT(1)); /* SOFT_RESET */ if (retry-- > 0) { DEV_DBG("%s(%s): failed NACK=0x%08x, retry=%d, " "dev-addr=0x%02x, offset=0x%02x, " "length=%d\n", __func__, what, reg_val, retry, dev_addr, offset, data_len); goto again; } status = -EIO; if (log_retry_fail) DEV_ERR("%s(%s): failed NACK=0x%08x, dev-addr=0x%02x, " "offset=0x%02x, length=%d\n", __func__, what, reg_val, dev_addr, offset, data_len); goto error; } /* 0x0238 HDMI_DDC_DATA [31] INDEX_WRITE WRITE ONLY. To write index field, set this bit to 1 while writing HDMI_DDC_DATA. [23:16] INDEX Use to set index into DDC buffer for next read or current write, or to read index of current read or next write. Writable only when INDEX_WRITE=1. [15:8] DATA Use to fill or read the DDC buffer [0] DATA_RW Select whether buffer access will be a read or write. For writes, address auto-increments on write to HDMI_DDC_DATA. For reads, address autoincrements on reads to HDMI_DDC_DATA. * 0: Write * 1: Read */ /* 8. ALL data is now available and waiting in the DDC buffer. * Read HDMI_I2C_DATA with the following fields set * RW = 0x1 (read) * DATA = BCAPS (this is field where data is pulled from) * INDEX = 0x5 (where the data has been placed in buffer by hardware) * INDEX_WRITE = 0x1 (explicitly define offset) */ /* Write this data to DDC buffer */ HDMI_OUTP_ND(0x0238, 0x1 | (5 << 16) | (1 << 31)); /* Discard first byte */ HDMI_INP_ND(0x0238); for (ndx = 0; ndx < data_len; ++ndx) { reg_val = HDMI_INP_ND(0x0238); data_buf[ndx] = (uint8) ((reg_val & 0x0000FF00) >> 8); } DEV_DBG("%s[%s] success\n", __func__, what); error: return status; } static int hdmi_msm_ddc_read(uint32 dev_addr, uint32 offset, uint8 *data_buf, uint32 data_len, int retry, const char *what, boolean no_align) { int ret = hdmi_msm_ddc_read_retry(dev_addr, offset, data_buf, data_len, data_len, retry, what); if (!ret) return 0; if (no_align) { return hdmi_msm_ddc_read_retry(dev_addr, offset, data_buf, data_len, data_len, retry, what); } else { return hdmi_msm_ddc_read_retry(dev_addr, offset, data_buf, data_len, 32 * ((data_len + 31) / 32), retry, what); } } static int hdmi_msm_read_edid_block(int block, uint8 *edid_buf) { int i, rc = 0; int block_size = 0x80; do { DEV_DBG("EDID: reading block(%d) with block-size=%d\n", block, block_size); for (i = 0; i < 0x80; i += block_size) { /*Read EDID twice with 32bit alighnment too */ if (block < 2) { rc = hdmi_msm_ddc_read(0xA0, block*0x80 + i, edid_buf+i, block_size, 1, "EDID", FALSE); } else { rc = hdmi_msm_ddc_read_edid_seg(0xA0, block*0x80 + i, edid_buf+i, block_size, block_size, 1, "EDID"); } if (rc) break; } block_size /= 2; } while (rc && (block_size >= 16)); return rc; } static int hdmi_msm_read_edid(void) { int status; msm_hdmi_init_ddc(); /* Looks like we need to turn on HDMI engine before any * DDC transaction */ if (!hdmi_msm_is_power_on()) { DEV_ERR("%s: failed: HDMI power is off", __func__); status = -ENXIO; goto error; } external_common_state->read_edid_block = hdmi_msm_read_edid_block; status = hdmi_common_read_edid(); if (!status) DEV_DBG("EDID: successfully read\n"); error: return status; } static void hdcp_auth_info(uint32 auth_info) { if (!hdmi_msm_state->hdcp_enable) { DEV_DBG("%s: HDCP not enabled\n", __func__); return; } switch (auth_info) { case 0: DEV_INFO("%s: None", __func__); break; case 1: DEV_INFO("%s: Software Disabled Authentication", __func__); break; case 2: DEV_INFO("%s: An Written", __func__); break; case 3: DEV_INFO("%s: Invalid Aksv", __func__); break; case 4: DEV_INFO("%s: Invalid Bksv", __func__); break; case 5: DEV_INFO("%s: RI Mismatch (including RO)", __func__); break; case 6: DEV_INFO("%s: consecutive Pj Mismatches", __func__); break; case 7: DEV_INFO("%s: HPD Disconnect", __func__); break; case 8: default: DEV_INFO("%s: Reserved", __func__); break; } } static void hdcp_key_state(uint32 key_state) { if (!hdmi_msm_state->hdcp_enable) { DEV_DBG("%s: HDCP not enabled\n", __func__); return; } switch (key_state) { case 0: DEV_WARN("%s: No HDCP Keys", __func__); break; case 1: DEV_WARN("%s: Not Checked", __func__); break; case 2: DEV_DBG("%s: Checking", __func__); break; case 3: DEV_DBG("%s: HDCP Keys Valid", __func__); break; case 4: DEV_WARN("%s: AKSV not valid", __func__); break; case 5: DEV_WARN("%s: Checksum Mismatch", __func__); break; case 6: DEV_DBG("%s: Production AKSV" "with ENABLE_USER_DEFINED_AN=1", __func__); break; case 7: default: DEV_INFO("%s: Reserved", __func__); break; } } static int hdmi_msm_count_one(uint8 *array, uint8 len) { int i, j, count = 0; for (i = 0; i < len; i++) for (j = 0; j < 8; j++) count += (((array[i] >> j) & 0x1) ? 1 : 0); return count; } static void hdcp_deauthenticate(void) { int hdcp_link_status = HDMI_INP(0x011C); if (!hdmi_msm_state->hdcp_enable) { DEV_DBG("%s: HDCP not enabled\n", __func__); return; } /* Disable HDCP interrupts */ HDMI_OUTP(0x0118, 0x0); mutex_lock(&hdcp_auth_state_mutex); external_common_state->hdcp_active = FALSE; mutex_unlock(&hdcp_auth_state_mutex); /* 0x0130 HDCP_RESET [0] LINK0_DEAUTHENTICATE */ HDMI_OUTP(0x0130, 0x1); /* 0x0110 HDCP_CTRL [8] ENCRYPTION_ENABLE [0] ENABLE */ /* encryption_enable = 0 | hdcp block enable = 1 */ HDMI_OUTP(0x0110, 0x0); if (hdcp_link_status & 0x00000004) hdcp_auth_info((hdcp_link_status & 0x000000F0) >> 4); } static void check_and_clear_HDCP_DDC_Failure(void) { int hdcp_ddc_ctrl1_reg; int hdcp_ddc_status; int failure; int nack0; if (!hdmi_msm_state->hdcp_enable) { DEV_DBG("%s: HDCP not enabled\n", __func__); return; } /* * Check for any DDC transfer failures * 0x0128 HDCP_DDC_STATUS * [16] FAILED Indicates that the last HDCP HW DDC transer * failed. This occurs when a transfer is * attempted with HDCP DDC disabled * (HDCP_DDC_DISABLE=1) or the number of retries * match HDCP_DDC_RETRY_CNT * * [14] NACK0 Indicates that the last HDCP HW DDC transfer * was aborted due to a NACK on the first * transaction - cleared by writing 0 to GO bit */ hdcp_ddc_status = HDMI_INP(HDCP_DDC_STATUS); failure = (hdcp_ddc_status >> 16) & 0x1; nack0 = (hdcp_ddc_status >> 14) & 0x1; DEV_DBG("%s: On Entry: HDCP_DDC_STATUS = 0x%x, FAILURE = %d," "NACK0 = %d\n", __func__ , hdcp_ddc_status, failure, nack0); if (failure == 0x1) { /* * Indicates that the last HDCP HW DDC transfer failed. * This occurs when a transfer is attempted with HDCP DDC * disabled (HDCP_DDC_DISABLE=1) or the number of retries * matches HDCP_DDC_RETRY_CNT. * Failure occured, let's clear it. */ DEV_INFO("%s: DDC failure detected. HDCP_DDC_STATUS=0x%08x\n", __func__, hdcp_ddc_status); /* * First, Disable DDC * 0x0120 HDCP_DDC_CTRL_0 * [0] DDC_DISABLE Determines whether HDCP Ri and Pj reads * are done unassisted by hardware or by * software via HDMI_DDC (HDCP provides * interrupts to request software * transfers) * 0 : Use Hardware DDC * 1 : Use Software DDC */ HDMI_OUTP(HDCP_DDC_CTRL_0, 0x1); /* * ACK the Failure to Clear it * 0x0124 HDCP_DDC_CTRL_1 * [0] DDC_FAILED_ACK Write 1 to clear * HDCP_STATUS.HDCP_DDC_FAILED */ hdcp_ddc_ctrl1_reg = HDMI_INP(HDCP_DDC_CTRL_1); HDMI_OUTP(HDCP_DDC_CTRL_1, hdcp_ddc_ctrl1_reg | 0x1); /* Check if the FAILURE got Cleared */ hdcp_ddc_status = HDMI_INP(HDCP_DDC_STATUS); hdcp_ddc_status = (hdcp_ddc_status >> 16) & 0x1; if (hdcp_ddc_status == 0x0) { DEV_INFO("%s: HDCP DDC Failure has been cleared\n", __func__); } else { DEV_WARN("%s: Error: HDCP DDC Failure DID NOT get" "cleared\n", __func__); } /* Re-Enable HDCP DDC */ HDMI_OUTP(HDCP_DDC_CTRL_0, 0x0); } if (nack0 == 0x1) { /* * 0x020C HDMI_DDC_CTRL * [3] SW_STATUS_RESET Write 1 to reset HDMI_DDC_SW_STATUS * flags, will reset SW_DONE, ABORTED, * TIMEOUT, SW_INTERRUPTED, * BUFFER_OVERFLOW, STOPPED_ON_NACK, NACK0, * NACK1, NACK2, NACK3 */ HDMI_OUTP_ND(HDMI_DDC_CTRL, HDMI_INP(HDMI_DDC_CTRL) | (0x1 << 3)); msleep(20); HDMI_OUTP_ND(HDMI_DDC_CTRL, HDMI_INP(HDMI_DDC_CTRL) & ~(0x1 << 3)); } hdcp_ddc_status = HDMI_INP(HDCP_DDC_STATUS); failure = (hdcp_ddc_status >> 16) & 0x1; nack0 = (hdcp_ddc_status >> 14) & 0x1; DEV_DBG("%s: On Exit: HDCP_DDC_STATUS = 0x%x, FAILURE = %d," "NACK0 = %d\n", __func__ , hdcp_ddc_status, failure, nack0); } static int hdcp_authentication_part1(void) { int ret = 0; boolean is_match; boolean is_part1_done = FALSE; uint32 timeout_count; uint8 bcaps; uint8 aksv[5]; uint32 qfprom_aksv_0, qfprom_aksv_1, link0_aksv_0, link0_aksv_1; uint8 bksv[5]; uint32 link0_bksv_0, link0_bksv_1; uint8 an[8]; uint32 link0_an_0, link0_an_1; uint32 hpd_int_status, hpd_int_ctrl; static uint8 buf[0xFF]; memset(buf, 0, sizeof(buf)); if (!hdmi_msm_state->hdcp_enable) { DEV_DBG("%s: HDCP not enabled\n", __func__); return 0; } if (!is_part1_done) { is_part1_done = TRUE; /* Fetch aksv from QFprom, this info should be public. */ qfprom_aksv_0 = inpdw(QFPROM_BASE + 0x000060D8); qfprom_aksv_1 = inpdw(QFPROM_BASE + 0x000060DC); /* copy an and aksv to byte arrays for transmission */ aksv[0] = qfprom_aksv_0 & 0xFF; aksv[1] = (qfprom_aksv_0 >> 8) & 0xFF; aksv[2] = (qfprom_aksv_0 >> 16) & 0xFF; aksv[3] = (qfprom_aksv_0 >> 24) & 0xFF; aksv[4] = qfprom_aksv_1 & 0xFF; /* check there are 20 ones in AKSV */ if (hdmi_msm_count_one(aksv, 5) != 20) { DEV_ERR("HDCP: AKSV read from QFPROM doesn't have " "20 1's and 20 0's, FAIL (AKSV=%02x%08x)\n", qfprom_aksv_1, qfprom_aksv_0); ret = -EINVAL; goto error; } DEV_DBG("HDCP: AKSV=%02x%08x\n", qfprom_aksv_1, qfprom_aksv_0); /* 0x0288 HDCP_SW_LOWER_AKSV [31:0] LOWER_AKSV */ /* 0x0284 HDCP_SW_UPPER_AKSV [7:0] UPPER_AKSV */ /* This is the lower 32 bits of the SW * injected AKSV value(AKSV[31:0]) read * from the EFUSE. It is needed for HDCP * authentication and must be written * before enabling HDCP. */ HDMI_OUTP(0x0288, qfprom_aksv_0); HDMI_OUTP(0x0284, qfprom_aksv_1); msm_hdmi_init_ddc(); /* read Bcaps at 0x40 in HDCP Port */ ret = hdmi_msm_ddc_read(0x74, 0x40, &bcaps, 1, 5, "Bcaps", TRUE); if (ret) { DEV_ERR("%s(%d): Read Bcaps failed", __func__, __LINE__); goto error; } DEV_DBG("HDCP: Bcaps=%02x\n", bcaps); /* HDCP setup prior to HDCP enabled */ /* 0x0148 HDCP_RCVPORT_DATA4 [15:8] LINK0_AINFO [7:0] LINK0_AKSV_1 */ /* LINK0_AINFO = 0x2 FEATURE 1.1 on. * = 0x0 FEATURE 1.1 off*/ HDMI_OUTP(0x0148, 0x0); /* 0x012C HDCP_ENTROPY_CTRL0 [31:0] BITS_OF_INFLUENCE_0 */ /* 0x025C HDCP_ENTROPY_CTRL1 [31:0] BITS_OF_INFLUENCE_1 */ HDMI_OUTP(0x012C, 0xB1FFB0FF); HDMI_OUTP(0x025C, 0xF00DFACE); /* 0x0114 HDCP_DEBUG_CTRL [2] DEBUG_RNG_CIPHER else default 0 */ HDMI_OUTP(0x0114, HDMI_INP(0x0114) & 0xFFFFFFFB); /* 0x0110 HDCP_CTRL [8] ENCRYPTION_ENABLE [0] ENABLE */ /* Enable HDCP. Encryption should be enabled after reading R0 */ HDMI_OUTP(0x0110, BIT(0)); /* * Check to see if a HDCP DDC Failure is indicated in * HDCP_DDC_STATUS. If yes, clear it. */ check_and_clear_HDCP_DDC_Failure(); /* 0x0118 HDCP_INT_CTRL * [2] AUTH_SUCCESS_MASK [R/W] Mask bit for\ * HDCP Authentication * Success interrupt - set to 1 to enable interrupt * * [6] AUTH_FAIL_MASK [R/W] Mask bit for HDCP * Authentication * Lost interrupt set to 1 to enable interrupt * * [7] AUTH_FAIL_INFO_ACK [W] Acknwledge bit for HDCP * Auth Failure Info field - write 1 to clear * * [10] DDC_XFER_REQ_MASK [R/W] Mask bit for HDCP\ * DDC Transfer * Request interrupt - set to 1 to enable interrupt * * [14] DDC_XFER_DONE_MASK [R/W] Mask bit for HDCP\ * DDC Transfer * done interrupt - set to 1 to enable interrupt */ /* enable all HDCP ints */ HDMI_OUTP(0x0118, (1 << 2) | (1 << 6) | (1 << 7)); /* 0x011C HDCP_LINK0_STATUS [8] AN_0_READY [9] AN_1_READY */ /* wait for an0 and an1 ready bits to be set in LINK0_STATUS */ mutex_lock(&hdcp_auth_state_mutex); timeout_count = 100; while (((HDMI_INP_ND(0x011C) & (0x3 << 8)) != (0x3 << 8)) && timeout_count--) msleep(20); if (!timeout_count) { ret = -ETIMEDOUT; DEV_ERR("%s(%d): timedout, An0=%d, An1=%d\n", __func__, __LINE__, (HDMI_INP_ND(0x011C) & BIT(8)) >> 8, (HDMI_INP_ND(0x011C) & BIT(9)) >> 9); mutex_unlock(&hdcp_auth_state_mutex); goto error; } /* 0x0168 HDCP_RCVPORT_DATA12 [23:8] BSTATUS [7:0] BCAPS */ HDMI_OUTP(0x0168, bcaps); /* 0x014C HDCP_RCVPORT_DATA5 [31:0] LINK0_AN_0 */ /* read an0 calculation */ link0_an_0 = HDMI_INP(0x014C); /* 0x0150 HDCP_RCVPORT_DATA6 [31:0] LINK0_AN_1 */ /* read an1 calculation */ link0_an_1 = HDMI_INP(0x0150); mutex_unlock(&hdcp_auth_state_mutex); /* three bits 28..30 */ hdcp_key_state((HDMI_INP(0x011C) >> 28) & 0x7); /* 0x0144 HDCP_RCVPORT_DATA3 [31:0] LINK0_AKSV_0 public key 0x0148 HDCP_RCVPORT_DATA4 [15:8] LINK0_AINFO [7:0] LINK0_AKSV_1 public key */ link0_aksv_0 = HDMI_INP(0x0144); link0_aksv_1 = HDMI_INP(0x0148); /* copy an and aksv to byte arrays for transmission */ aksv[0] = link0_aksv_0 & 0xFF; aksv[1] = (link0_aksv_0 >> 8) & 0xFF; aksv[2] = (link0_aksv_0 >> 16) & 0xFF; aksv[3] = (link0_aksv_0 >> 24) & 0xFF; aksv[4] = link0_aksv_1 & 0xFF; an[0] = link0_an_0 & 0xFF; an[1] = (link0_an_0 >> 8) & 0xFF; an[2] = (link0_an_0 >> 16) & 0xFF; an[3] = (link0_an_0 >> 24) & 0xFF; an[4] = link0_an_1 & 0xFF; an[5] = (link0_an_1 >> 8) & 0xFF; an[6] = (link0_an_1 >> 16) & 0xFF; an[7] = (link0_an_1 >> 24) & 0xFF; /* Write An 8 bytes to offset 0x18 */ ret = hdmi_msm_ddc_write(0x74, 0x18, an, 8, "An"); if (ret) { DEV_ERR("%s(%d): Write An failed", __func__, __LINE__); goto error; } /* Write Aksv 5 bytes to offset 0x10 */ ret = hdmi_msm_ddc_write(0x74, 0x10, aksv, 5, "Aksv"); if (ret) { DEV_ERR("%s(%d): Write Aksv failed", __func__, __LINE__); goto error; } DEV_DBG("HDCP: Link0-AKSV=%02x%08x\n", link0_aksv_1 & 0xFF, link0_aksv_0); /* Read Bksv 5 bytes at 0x00 in HDCP port */ ret = hdmi_msm_ddc_read(0x74, 0x00, bksv, 5, 5, "Bksv", TRUE); if (ret) { DEV_ERR("%s(%d): Read BKSV failed", __func__, __LINE__); goto error; } /* check there are 20 ones in BKSV */ if (hdmi_msm_count_one(bksv, 5) != 20) { DEV_ERR("HDCP: BKSV read from Sink doesn't have " "20 1's and 20 0's, FAIL (BKSV=" "%02x%02x%02x%02x%02x)\n", bksv[4], bksv[3], bksv[2], bksv[1], bksv[0]); ret = -EINVAL; goto error; } link0_bksv_0 = bksv[3]; link0_bksv_0 = (link0_bksv_0 << 8) | bksv[2]; link0_bksv_0 = (link0_bksv_0 << 8) | bksv[1]; link0_bksv_0 = (link0_bksv_0 << 8) | bksv[0]; link0_bksv_1 = bksv[4]; DEV_DBG("HDCP: BKSV=%02x%08x\n", link0_bksv_1, link0_bksv_0); /* 0x0134 HDCP_RCVPORT_DATA0 [31:0] LINK0_BKSV_0 */ HDMI_OUTP(0x0134, link0_bksv_0); /* 0x0138 HDCP_RCVPORT_DATA1 [31:0] LINK0_BKSV_1 */ HDMI_OUTP(0x0138, link0_bksv_1); DEV_DBG("HDCP: Link0-BKSV=%02x%08x\n", link0_bksv_1, link0_bksv_0); /* HDMI_HPD_INT_STATUS[0x0250] */ hpd_int_status = HDMI_INP_ND(0x0250); /* HDMI_HPD_INT_CTRL[0x0254] */ hpd_int_ctrl = HDMI_INP_ND(0x0254); DEV_DBG("[SR-DEUG]: HPD_INTR_CTRL=[%u] HPD_INTR_STATUS=[%u] " "before reading R0'\n", hpd_int_ctrl, hpd_int_status); /* * HDCP Compliace Test case 1B-01: * Wait here until all the ksv bytes have been * read from the KSV FIFO register. */ msleep(125); /* Reading R0' 2 bytes at offset 0x08 */ ret = hdmi_msm_ddc_read(0x74, 0x08, buf, 2, 5, "RO'", TRUE); if (ret) { DEV_ERR("%s(%d): Read RO's failed", __func__, __LINE__); goto error; } DEV_DBG("HDCP: R0'=%02x%02x\n", buf[1], buf[0]); INIT_COMPLETION(hdmi_msm_state->hdcp_success_done); /* 0x013C HDCP_RCVPORT_DATA2_0 [15:0] LINK0_RI */ HDMI_OUTP(0x013C, (((uint32)buf[1]) << 8) | buf[0]); timeout_count = wait_for_completion_interruptible_timeout( &hdmi_msm_state->hdcp_success_done, HZ*2); if (!timeout_count) { ret = -ETIMEDOUT; is_match = HDMI_INP(0x011C) & BIT(12); DEV_ERR("%s(%d): timedout, Link0=<%s>\n", __func__, __LINE__, is_match ? "RI_MATCH" : "No RI Match INTR in time"); if (!is_match) goto error; } /* 0x011C HDCP_LINK0_STATUS [12] RI_MATCHES [0] MISMATCH, [1] MATCH [0] AUTH_SUCCESS */ /* Checking for RI, R0 Match */ /* RI_MATCHES */ if ((HDMI_INP(0x011C) & BIT(12)) != BIT(12)) { ret = -EINVAL; DEV_ERR("%s: HDCP_LINK0_STATUS[RI_MATCHES]: MISMATCH\n", __func__); goto error; } /* Enable HDCP Encryption */ HDMI_OUTP(0x0110, BIT(0) | BIT(8)); DEV_INFO("HDCP: authentication part I, successful\n"); is_part1_done = FALSE; return 0; error: DEV_ERR("[%s]: HDCP Reauthentication\n", __func__); is_part1_done = FALSE; return ret; } else { return 1; } } static int hdmi_msm_transfer_v_h(void) { /* Read V'.HO 4 Byte at offset 0x20 */ char what[20]; int ret; uint8 buf[4]; if (!hdmi_msm_state->hdcp_enable) { DEV_DBG("%s: HDCP not enabled\n", __func__); return 0; } snprintf(what, sizeof(what), "V' H0"); ret = hdmi_msm_ddc_read(0x74, 0x20, buf, 4, 5, what, TRUE); if (ret) { DEV_ERR("%s: Read %s failed", __func__, what); return ret; } DEV_DBG("buf[0]= %x , buf[1] = %x , buf[2] = %x , buf[3] = %x\n ", buf[0] , buf[1] , buf[2] , buf[3]); /* 0x0154 HDCP_RCVPORT_DATA7 [31:0] V_HO */ HDMI_OUTP(0x0154 , (buf[3] << 24 | buf[2] << 16 | buf[1] << 8 | buf[0])); snprintf(what, sizeof(what), "V' H1"); ret = hdmi_msm_ddc_read(0x74, 0x24, buf, 4, 5, what, TRUE); if (ret) { DEV_ERR("%s: Read %s failed", __func__, what); return ret; } DEV_DBG("buf[0]= %x , buf[1] = %x , buf[2] = %x , buf[3] = %x\n ", buf[0] , buf[1] , buf[2] , buf[3]); /* 0x0158 HDCP_RCVPORT_ DATA8 [31:0] V_H1 */ HDMI_OUTP(0x0158, (buf[3] << 24 | buf[2] << 16 | buf[1] << 8 | buf[0])); snprintf(what, sizeof(what), "V' H2"); ret = hdmi_msm_ddc_read(0x74, 0x28, buf, 4, 5, what, TRUE); if (ret) { DEV_ERR("%s: Read %s failed", __func__, what); return ret; } DEV_DBG("buf[0]= %x , buf[1] = %x , buf[2] = %x , buf[3] = %x\n ", buf[0] , buf[1] , buf[2] , buf[3]); /* 0x015c HDCP_RCVPORT_DATA9 [31:0] V_H2 */ HDMI_OUTP(0x015c , (buf[3] << 24 | buf[2] << 16 | buf[1] << 8 | buf[0])); snprintf(what, sizeof(what), "V' H3"); ret = hdmi_msm_ddc_read(0x74, 0x2c, buf, 4, 5, what, TRUE); if (ret) { DEV_ERR("%s: Read %s failed", __func__, what); return ret; } DEV_DBG("buf[0]= %x , buf[1] = %x , buf[2] = %x , buf[3] = %x\n ", buf[0] , buf[1] , buf[2] , buf[3]); /* 0x0160 HDCP_RCVPORT_DATA10 [31:0] V_H3 */ HDMI_OUTP(0x0160, (buf[3] << 24 | buf[2] << 16 | buf[1] << 8 | buf[0])); snprintf(what, sizeof(what), "V' H4"); ret = hdmi_msm_ddc_read(0x74, 0x30, buf, 4, 5, what, TRUE); if (ret) { DEV_ERR("%s: Read %s failed", __func__, what); return ret; } DEV_DBG("buf[0]= %x , buf[1] = %x , buf[2] = %x , buf[3] = %x\n ", buf[0] , buf[1] , buf[2] , buf[3]); /* 0x0164 HDCP_RCVPORT_DATA11 [31:0] V_H4 */ HDMI_OUTP(0x0164, (buf[3] << 24 | buf[2] << 16 | buf[1] << 8 | buf[0])); return 0; } static int hdcp_authentication_part2(void) { int ret = 0; uint32 timeout_count; int i = 0; int cnt = 0; uint bstatus; uint8 bcaps; uint32 down_stream_devices; uint32 ksv_bytes; static uint8 buf[0xFF]; static uint8 kvs_fifo[5 * 127]; boolean max_devs_exceeded = 0; boolean max_cascade_exceeded = 0; boolean ksv_done = FALSE; if (!hdmi_msm_state->hdcp_enable) { DEV_DBG("%s: HDCP not enabled\n", __func__); return 0; } memset(buf, 0, sizeof(buf)); memset(kvs_fifo, 0, sizeof(kvs_fifo)); /* wait until READY bit is set in bcaps */ timeout_count = 50; do { timeout_count--; /* read bcaps 1 Byte at offset 0x40 */ ret = hdmi_msm_ddc_read(0x74, 0x40, &bcaps, 1, 1, "Bcaps", FALSE); if (ret) { DEV_ERR("%s(%d): Read Bcaps failed", __func__, __LINE__); goto error; } msleep(100); } while ((0 == (bcaps & 0x20)) && timeout_count); /* READY (Bit 5) */ if (!timeout_count) { ret = -ETIMEDOUT; DEV_ERR("%s:timedout(1)", __func__); goto error; } /* read bstatus 2 bytes at offset 0x41 */ ret = hdmi_msm_ddc_read(0x74, 0x41, buf, 2, 5, "Bstatus", FALSE); if (ret) { DEV_ERR("%s(%d): Read Bstatus failed", __func__, __LINE__); goto error; } bstatus = buf[1]; bstatus = (bstatus << 8) | buf[0]; /* 0x0168 DCP_RCVPORT_DATA12 [7:0] BCAPS [23:8 BSTATUS */ HDMI_OUTP(0x0168, bcaps | (bstatus << 8)); /* BSTATUS [6:0] DEVICE_COUNT Number of HDMI device attached to repeater * - see HDCP spec */ down_stream_devices = bstatus & 0x7F; if (down_stream_devices == 0x0) { /* There isn't any devices attaced to the Repeater */ DEV_ERR("%s: there isn't any devices attached to the " "Repeater\n", __func__); ret = -EINVAL; goto error; } /* * HDCP Compliance 1B-05: * Check if no. of devices connected to repeater * exceed max_devices_connected from bit 7 of Bstatus. */ max_devs_exceeded = (bstatus & 0x80) >> 7; if (max_devs_exceeded == 0x01) { DEV_ERR("%s: Number of devs connected to repeater " "exceeds max_devs\n", __func__); ret = -EINVAL; goto hdcp_error; } /* * HDCP Compliance 1B-06: * Check if no. of cascade connected to repeater * exceed max_cascade_connected from bit 11 of Bstatus. */ max_cascade_exceeded = (bstatus & 0x800) >> 11; if (max_cascade_exceeded == 0x01) { DEV_ERR("%s: Number of cascade connected to repeater " "exceeds max_cascade\n", __func__); ret = -EINVAL; goto hdcp_error; } /* Read KSV FIFO over DDC * Key Slection vector FIFO * Used to pull downstream KSVs from HDCP Repeaters. * All bytes (DEVICE_COUNT * 5) must be read in a single, * auto incrementing access. * All bytes read as 0x00 for HDCP Receivers that are not * HDCP Repeaters (REPEATER == 0). */ ksv_bytes = 5 * down_stream_devices; /* Reading KSV FIFO / KSV FIFO */ ksv_done = FALSE; ret = hdmi_msm_ddc_read(0x74, 0x43, kvs_fifo, ksv_bytes, 5, "KSV FIFO", TRUE); do { if (ret) { DEV_ERR("%s(%d): Read KSV FIFO failed", __func__, __LINE__); /* * HDCP Compliace Test case 1B-01: * Wait here until all the ksv bytes have been * read from the KSV FIFO register. */ msleep(25); } else { ksv_done = TRUE; } cnt++; } while (!ksv_done && cnt != 20); if (ksv_done == FALSE) goto error; ret = hdmi_msm_transfer_v_h(); if (ret) goto error; /* Next: Write KSV FIFO to HDCP_SHA_DATA. * This is done 1 byte at time starting with the LSB. * On the very last byte write, * the HDCP_SHA_DATA_DONE bit[0] */ /* 0x023C HDCP_SHA_CTRL [0] RESET [0] Enable, [1] Reset [4] SELECT [0] DIGA_HDCP, [1] DIGB_HDCP */ /* reset SHA engine */ HDMI_OUTP(0x023C, 1); /* enable SHA engine, SEL=DIGA_HDCP */ HDMI_OUTP(0x023C, 0); for (i = 0; i < ksv_bytes - 1; i++) { /* Write KSV byte and do not set DONE bit[0] */ HDMI_OUTP_ND(0x0244, kvs_fifo[i] << 16); /* Once 64 bytes have been written, we need to poll for * HDCP_SHA_BLOCK_DONE before writing any further */ if (i && !((i+1)%64)) { timeout_count = 100; while (!(HDMI_INP_ND(0x0240) & 0x1) && (--timeout_count)) { DEV_DBG("HDCP Auth Part II: Waiting for the " "computation of the current 64 byte to " "complete. HDCP_SHA_STATUS=%08x. " "timeout_count=%d\n", HDMI_INP_ND(0x0240), timeout_count); msleep(20); } if (!timeout_count) { ret = -ETIMEDOUT; DEV_ERR("%s(%d): timedout", __func__, __LINE__); goto error; } } } /* Write l to DONE bit[0] */ HDMI_OUTP_ND(0x0244, (kvs_fifo[ksv_bytes - 1] << 16) | 0x1); /* 0x0240 HDCP_SHA_STATUS [4] COMP_DONE */ /* Now wait for HDCP_SHA_COMP_DONE */ timeout_count = 100; while ((0x10 != (HDMI_INP_ND(0x0240) & 0xFFFFFF10)) && --timeout_count) msleep(20); if (!timeout_count) { ret = -ETIMEDOUT; DEV_ERR("%s(%d): timedout", __func__, __LINE__); goto error; } /* 0x011C HDCP_LINK0_STATUS [20] V_MATCHES */ timeout_count = 100; while (((HDMI_INP_ND(0x011C) & (1 << 20)) != (1 << 20)) && --timeout_count) { msleep(20); } if (!timeout_count) { ret = -ETIMEDOUT; DEV_ERR("%s(%d): timedout", __func__, __LINE__); goto error; } DEV_INFO("HDCP: authentication part II, successful\n"); hdcp_error: error: return ret; } static int hdcp_authentication_part3(uint32 found_repeater) { int ret = 0; int poll = 3000; if (!hdmi_msm_state->hdcp_enable) { DEV_DBG("%s: HDCP not enabled\n", __func__); return 0; } while (poll) { /* 0x011C HDCP_LINK0_STATUS [30:28] KEYS_STATE = 3 = "Valid" [24] RO_COMPUTATION_DONE [0] Not Done, [1] Done [20] V_MATCHES [0] Mismtach, [1] Match [12] RI_MATCHES [0] Mismatch, [1] Match [0] AUTH_SUCCESS */ if (HDMI_INP_ND(0x011C) != (0x31001001 | (found_repeater << 20))) { DEV_ERR("HDCP: autentication part III, FAILED, " "Link Status=%08x\n", HDMI_INP(0x011C)); ret = -EINVAL; goto error; } poll--; } DEV_INFO("HDCP: authentication part III, successful\n"); error: return ret; } static void hdmi_msm_hdcp_enable(void) { int ret = 0; uint8 bcaps; uint32 found_repeater = 0x0; char *envp[2]; if (!hdmi_msm_state->hdcp_enable) { DEV_INFO("%s: HDCP NOT ENABLED\n", __func__); return; } mutex_lock(&hdmi_msm_state_mutex); hdmi_msm_state->hdcp_activating = TRUE; mutex_unlock(&hdmi_msm_state_mutex); mutex_lock(&hdcp_auth_state_mutex); /* This flag prevents other threads from re-authenticating * after we've just authenticated (i.e., finished part3) * We probably need to protect this in a mutex lock */ hdmi_msm_state->full_auth_done = FALSE; mutex_unlock(&hdcp_auth_state_mutex); /* Disable HDCP before we start part1 */ HDMI_OUTP(0x0110, 0x0); /* PART I Authentication*/ ret = hdcp_authentication_part1(); if (ret) goto error; /* PART II Authentication*/ /* read Bcaps at 0x40 in HDCP Port */ ret = hdmi_msm_ddc_read(0x74, 0x40, &bcaps, 1, 5, "Bcaps", FALSE); if (ret) { DEV_ERR("%s(%d): Read Bcaps failed\n", __func__, __LINE__); goto error; } DEV_DBG("HDCP: Bcaps=0x%02x (%s)\n", bcaps, (bcaps & BIT(6)) ? "repeater" : "no repeater"); /* if REPEATER (Bit 6), perform Part2 Authentication */ if (bcaps & BIT(6)) { found_repeater = 0x1; ret = hdcp_authentication_part2(); if (ret) goto error; } else DEV_INFO("HDCP: authentication part II skipped, no repeater\n"); /* PART III Authentication*/ ret = hdcp_authentication_part3(found_repeater); if (ret) goto error; mutex_lock(&hdmi_msm_state_mutex); hdmi_msm_state->hdcp_activating = FALSE; mutex_unlock(&hdmi_msm_state_mutex); mutex_lock(&hdcp_auth_state_mutex); /* * This flag prevents other threads from re-authenticating * after we've just authenticated (i.e., finished part3) */ hdmi_msm_state->full_auth_done = TRUE; external_common_state->hdcp_active = TRUE; mutex_unlock(&hdcp_auth_state_mutex); if (!hdmi_msm_is_dvi_mode()) { DEV_INFO("HDMI HPD: sense : send HDCP_PASS\n"); envp[0] = "HDCP_STATE=PASS"; envp[1] = NULL; kobject_uevent_env(external_common_state->uevent_kobj, KOBJ_CHANGE, envp); SWITCH_SET_HDMI_AUDIO(1, 0); } return; error: if (hdmi_msm_state->hpd_during_auth) { DEV_WARN("Calling Deauthentication: HPD occured during " "authentication from [%s]\n", __func__); hdcp_deauthenticate(); mutex_lock(&hdcp_auth_state_mutex); hdmi_msm_state->hpd_during_auth = FALSE; mutex_unlock(&hdcp_auth_state_mutex); } else { DEV_WARN("[DEV_DBG]: Calling reauth from [%s]\n", __func__); if (hdmi_msm_state->panel_power_on) queue_work(hdmi_work_queue, &hdmi_msm_state->hdcp_reauth_work); } mutex_lock(&hdmi_msm_state_mutex); hdmi_msm_state->hdcp_activating = FALSE; mutex_unlock(&hdmi_msm_state_mutex); } static void hdmi_msm_video_setup(int video_format) { uint32 total_v = 0; uint32 total_h = 0; uint32 start_h = 0; uint32 end_h = 0; uint32 start_v = 0; uint32 end_v = 0; const struct hdmi_disp_mode_timing_type *timing = hdmi_common_get_supported_mode(video_format); /* timing register setup */ if (timing == NULL) { DEV_ERR("video format not supported: %d\n", video_format); return; } /* Hsync Total and Vsync Total */ total_h = timing->active_h + timing->front_porch_h + timing->back_porch_h + timing->pulse_width_h - 1; total_v = timing->active_v + timing->front_porch_v + timing->back_porch_v + timing->pulse_width_v - 1; /* 0x02C0 HDMI_TOTAL [27:16] V_TOTAL Vertical Total [11:0] H_TOTAL Horizontal Total */ HDMI_OUTP(0x02C0, ((total_v << 16) & 0x0FFF0000) | ((total_h << 0) & 0x00000FFF)); /* Hsync Start and Hsync End */ start_h = timing->back_porch_h + timing->pulse_width_h; end_h = (total_h + 1) - timing->front_porch_h; /* 0x02B4 HDMI_ACTIVE_H [27:16] END Horizontal end [11:0] START Horizontal start */ HDMI_OUTP(0x02B4, ((end_h << 16) & 0x0FFF0000) | ((start_h << 0) & 0x00000FFF)); start_v = timing->back_porch_v + timing->pulse_width_v - 1; end_v = total_v - timing->front_porch_v; /* 0x02B8 HDMI_ACTIVE_V [27:16] END Vertical end [11:0] START Vertical start */ HDMI_OUTP(0x02B8, ((end_v << 16) & 0x0FFF0000) | ((start_v << 0) & 0x00000FFF)); if (timing->interlaced) { /* 0x02C4 HDMI_V_TOTAL_F2 [11:0] V_TOTAL_F2 Vertical total for field2 */ HDMI_OUTP(0x02C4, ((total_v + 1) << 0) & 0x00000FFF); /* 0x02BC HDMI_ACTIVE_V_F2 [27:16] END_F2 Vertical end for field2 [11:0] START_F2 Vertical start for Field2 */ HDMI_OUTP(0x02BC, (((start_v + 1) << 0) & 0x00000FFF) | (((end_v + 1) << 16) & 0x0FFF0000)); } else { /* HDMI_V_TOTAL_F2 */ HDMI_OUTP(0x02C4, 0); /* HDMI_ACTIVE_V_F2 */ HDMI_OUTP(0x02BC, 0); } hdmi_frame_ctrl_cfg(timing); } struct hdmi_msm_audio_acr { uint32 n; /* N parameter for clock regeneration */ uint32 cts; /* CTS parameter for clock regeneration */ }; struct hdmi_msm_audio_arcs { uint32 pclk; struct hdmi_msm_audio_acr lut[MSM_HDMI_SAMPLE_RATE_MAX]; }; #define HDMI_MSM_AUDIO_ARCS(pclk, ...) { pclk, __VA_ARGS__ } /* Audio constants lookup table for hdmi_msm_audio_acr_setup */ /* Valid Pixel-Clock rates: 25.2MHz, 27MHz, 27.03MHz, 74.25MHz, 148.5MHz */ static const struct hdmi_msm_audio_arcs hdmi_msm_audio_acr_lut[] = { /* 25.200MHz */ HDMI_MSM_AUDIO_ARCS(25200, { {4096, 25200}, {6272, 28000}, {6144, 25200}, {12544, 28000}, {12288, 25200}, {25088, 28000}, {24576, 25200} }), /* 27.000MHz */ HDMI_MSM_AUDIO_ARCS(27000, { {4096, 27000}, {6272, 30000}, {6144, 27000}, {12544, 30000}, {12288, 27000}, {25088, 30000}, {24576, 27000} }), /* 27.027MHz */ HDMI_MSM_AUDIO_ARCS(27030, { {4096, 27027}, {6272, 30030}, {6144, 27027}, {12544, 30030}, {12288, 27027}, {25088, 30030}, {24576, 27027} }), /* 74.250MHz */ HDMI_MSM_AUDIO_ARCS(74250, { {4096, 74250}, {6272, 82500}, {6144, 74250}, {12544, 82500}, {12288, 74250}, {25088, 82500}, {24576, 74250} }), /* 148.500MHz */ HDMI_MSM_AUDIO_ARCS(148500, { {4096, 148500}, {6272, 165000}, {6144, 148500}, {12544, 165000}, {12288, 148500}, {25088, 165000}, {24576, 148500} }), }; static void hdmi_msm_audio_acr_setup(boolean enabled, int video_format, int audio_sample_rate, int num_of_channels) { /* Read first before writing */ /* HDMI_ACR_PKT_CTRL[0x0024] */ uint32 acr_pck_ctrl_reg = HDMI_INP(0x0024); /* Clear N/CTS selection bits */ acr_pck_ctrl_reg &= ~(3 << 4); if (enabled) { const struct hdmi_disp_mode_timing_type *timing = hdmi_common_get_supported_mode(video_format); const struct hdmi_msm_audio_arcs *audio_arc = &hdmi_msm_audio_acr_lut[0]; const int lut_size = sizeof(hdmi_msm_audio_acr_lut) /sizeof(*hdmi_msm_audio_acr_lut); uint32 i, n, cts, layout, multiplier, aud_pck_ctrl_2_reg; if (timing == NULL) { DEV_WARN("%s: video format %d not supported\n", __func__, video_format); return; } for (i = 0; i < lut_size; audio_arc = &hdmi_msm_audio_acr_lut[++i]) { if (audio_arc->pclk == timing->pixel_freq) break; } if (i >= lut_size) { DEV_WARN("%s: pixel clock %d not supported\n", __func__, timing->pixel_freq); return; } n = audio_arc->lut[audio_sample_rate].n; cts = audio_arc->lut[audio_sample_rate].cts; layout = (MSM_HDMI_AUDIO_CHANNEL_2 == num_of_channels) ? 0 : 1; if ((MSM_HDMI_SAMPLE_RATE_192KHZ == audio_sample_rate) || (MSM_HDMI_SAMPLE_RATE_176_4KHZ == audio_sample_rate)) { multiplier = 4; n >>= 2; /* divide N by 4 and use multiplier */ } else if ((MSM_HDMI_SAMPLE_RATE_96KHZ == audio_sample_rate) || (MSM_HDMI_SAMPLE_RATE_88_2KHZ == audio_sample_rate)) { multiplier = 2; n >>= 1; /* divide N by 2 and use multiplier */ } else { multiplier = 1; } DEV_DBG("%s: n=%u, cts=%u, layout=%u\n", __func__, n, cts, layout); /* AUDIO_PRIORITY | SOURCE */ acr_pck_ctrl_reg |= 0x80000100; /* N_MULTIPLE(multiplier) */ acr_pck_ctrl_reg |= (multiplier & 7) << 16; if ((MSM_HDMI_SAMPLE_RATE_48KHZ == audio_sample_rate) || (MSM_HDMI_SAMPLE_RATE_96KHZ == audio_sample_rate) || (MSM_HDMI_SAMPLE_RATE_192KHZ == audio_sample_rate)) { /* SELECT(3) */ acr_pck_ctrl_reg |= 3 << 4; /* CTS_48 */ cts <<= 12; /* CTS: need to determine how many fractional bits */ /* HDMI_ACR_48_0 */ HDMI_OUTP(0x00D4, cts); /* N */ /* HDMI_ACR_48_1 */ HDMI_OUTP(0x00D8, n); } else if ((MSM_HDMI_SAMPLE_RATE_44_1KHZ == audio_sample_rate) || (MSM_HDMI_SAMPLE_RATE_88_2KHZ == audio_sample_rate) || (MSM_HDMI_SAMPLE_RATE_176_4KHZ == audio_sample_rate)) { /* SELECT(2) */ acr_pck_ctrl_reg |= 2 << 4; /* CTS_44 */ cts <<= 12; /* CTS: need to determine how many fractional bits */ /* HDMI_ACR_44_0 */ HDMI_OUTP(0x00CC, cts); /* N */ /* HDMI_ACR_44_1 */ HDMI_OUTP(0x00D0, n); } else { /* default to 32k */ /* SELECT(1) */ acr_pck_ctrl_reg |= 1 << 4; /* CTS_32 */ cts <<= 12; /* CTS: need to determine how many fractional bits */ /* HDMI_ACR_32_0 */ HDMI_OUTP(0x00C4, cts); /* N */ /* HDMI_ACR_32_1 */ HDMI_OUTP(0x00C8, n); } /* Payload layout depends on number of audio channels */ /* LAYOUT_SEL(layout) */ aud_pck_ctrl_2_reg = 1 | (layout << 1); /* override | layout */ /* HDMI_AUDIO_PKT_CTRL2[0x00044] */ HDMI_OUTP(0x00044, aud_pck_ctrl_2_reg); /* SEND | CONT */ acr_pck_ctrl_reg |= 0x00000003; } else { /* ~(SEND | CONT) */ acr_pck_ctrl_reg &= ~0x00000003; } /* HDMI_ACR_PKT_CTRL[0x0024] */ HDMI_OUTP(0x0024, acr_pck_ctrl_reg); } static void hdmi_msm_outpdw_chk(uint32 offset, uint32 data) { uint32 check, i = 0; #ifdef DEBUG HDMI_OUTP(offset, data); #endif do { outpdw(MSM_HDMI_BASE+offset, data); check = inpdw(MSM_HDMI_BASE+offset); } while (check != data && i++ < 10); if (check != data) DEV_ERR("%s: failed addr=%08x, data=%x, check=%x", __func__, offset, data, check); } static void hdmi_msm_rmw32or(uint32 offset, uint32 data) { uint32 reg_data; reg_data = inpdw(MSM_HDMI_BASE+offset); reg_data = inpdw(MSM_HDMI_BASE+offset); hdmi_msm_outpdw_chk(offset, reg_data | data); } #define HDMI_AUDIO_CFG 0x01D0 #define HDMI_AUDIO_ENGINE_ENABLE 1 #define HDMI_AUDIO_FIFO_MASK 0x000000F0 #define HDMI_AUDIO_FIFO_WATERMARK_SHIFT 4 #define HDMI_AUDIO_FIFO_MAX_WATER_MARK 8 int hdmi_audio_enable(bool on , u32 fifo_water_mark) { u32 hdmi_audio_config; hdmi_audio_config = HDMI_INP(HDMI_AUDIO_CFG); if (on) { if (fifo_water_mark > HDMI_AUDIO_FIFO_MAX_WATER_MARK) { pr_err("%s : HDMI audio fifo water mark can not be more" " than %u\n", __func__, HDMI_AUDIO_FIFO_MAX_WATER_MARK); return -EINVAL; } /* * Enable HDMI Audio engine. * MUST be enabled after Audio DMA is enabled. */ hdmi_audio_config &= ~(HDMI_AUDIO_FIFO_MASK); hdmi_audio_config |= (HDMI_AUDIO_ENGINE_ENABLE | (fifo_water_mark << HDMI_AUDIO_FIFO_WATERMARK_SHIFT)); } else hdmi_audio_config &= ~(HDMI_AUDIO_ENGINE_ENABLE); HDMI_OUTP(HDMI_AUDIO_CFG, hdmi_audio_config); mb(); pr_info("%s :HDMI_AUDIO_CFG 0x%08x\n", __func__, HDMI_INP(HDMI_AUDIO_CFG)); return 0; } EXPORT_SYMBOL(hdmi_audio_enable); #define HDMI_AUDIO_PKT_CTRL 0x0020 #define HDMI_AUDIO_SAMPLE_SEND_ENABLE 1 int hdmi_audio_packet_enable(bool on) { u32 hdmi_audio_pkt_ctrl; hdmi_audio_pkt_ctrl = HDMI_INP(HDMI_AUDIO_PKT_CTRL); if (on) hdmi_audio_pkt_ctrl |= HDMI_AUDIO_SAMPLE_SEND_ENABLE; else hdmi_audio_pkt_ctrl &= ~(HDMI_AUDIO_SAMPLE_SEND_ENABLE); HDMI_OUTP(HDMI_AUDIO_PKT_CTRL, hdmi_audio_pkt_ctrl); mb(); pr_info("%s : HDMI_AUDIO_PKT_CTRL 0x%08x\n", __func__, HDMI_INP(HDMI_AUDIO_PKT_CTRL)); return 0; } EXPORT_SYMBOL(hdmi_audio_packet_enable); /* TO-DO: return -EINVAL when num_of_channels and channel_allocation * does not match CEA 861-D spec. */ int hdmi_msm_audio_info_setup(bool enabled, u32 num_of_channels, u32 channel_allocation, u32 level_shift, bool down_mix) { uint32 channel_count = 1; /* Default to 2 channels -> See Table 17 in CEA-D spec */ uint32 check_sum, audio_info_0_reg, audio_info_1_reg; uint32 audio_info_ctrl_reg; u32 aud_pck_ctrl_2_reg; u32 layout; layout = (MSM_HDMI_AUDIO_CHANNEL_2 == num_of_channels) ? 0 : 1; aud_pck_ctrl_2_reg = 1 | (layout << 1); HDMI_OUTP(0x00044, aud_pck_ctrl_2_reg); /* Please see table 20 Audio InfoFrame in HDMI spec FL = front left FC = front Center FR = front right FLC = front left center FRC = front right center RL = rear left RC = rear center RR = rear right RLC = rear left center RRC = rear right center LFE = low frequency effect */ /* Read first then write because it is bundled with other controls */ /* HDMI_INFOFRAME_CTRL0[0x002C] */ audio_info_ctrl_reg = HDMI_INP(0x002C); if (enabled) { switch (num_of_channels) { case MSM_HDMI_AUDIO_CHANNEL_2: channel_allocation = 0; /* Default to FR,FL */ break; case MSM_HDMI_AUDIO_CHANNEL_4: channel_count = 3; /* FC,LFE,FR,FL */ channel_allocation = 0x3; break; case MSM_HDMI_AUDIO_CHANNEL_6: channel_count = 5; /* RR,RL,FC,LFE,FR,FL */ channel_allocation = 0xB; break; case MSM_HDMI_AUDIO_CHANNEL_8: channel_count = 7; /* FRC,FLC,RR,RL,FC,LFE,FR,FL */ channel_allocation = 0x1f; break; default: pr_err("%s(): Unsupported num_of_channels = %u\n", __func__, num_of_channels); return -EINVAL; break; } /* Program the Channel-Speaker allocation */ audio_info_1_reg = 0; /* CA(channel_allocation) */ audio_info_1_reg |= channel_allocation & 0xff; /* Program the Level shifter */ /* LSV(level_shift) */ audio_info_1_reg |= (level_shift << 11) & 0x00007800; /* Program the Down-mix Inhibit Flag */ /* DM_INH(down_mix) */ audio_info_1_reg |= (down_mix << 15) & 0x00008000; /* HDMI_AUDIO_INFO1[0x00E8] */ HDMI_OUTP(0x00E8, audio_info_1_reg); /* Calculate CheckSum Sum of all the bytes in the Audio Info Packet bytes (See table 8.4 in HDMI spec) */ check_sum = 0; /* HDMI_AUDIO_INFO_FRAME_PACKET_HEADER_TYPE[0x84] */ check_sum += 0x84; /* HDMI_AUDIO_INFO_FRAME_PACKET_HEADER_VERSION[0x01] */ check_sum += 1; /* HDMI_AUDIO_INFO_FRAME_PACKET_LENGTH[0x0A] */ check_sum += 0x0A; check_sum += channel_count; check_sum += channel_allocation; /* See Table 8.5 in HDMI spec */ check_sum += (level_shift & 0xF) << 3 | (down_mix & 0x1) << 7; check_sum &= 0xFF; check_sum = (uint8) (256 - check_sum); audio_info_0_reg = 0; /* CHECKSUM(check_sum) */ audio_info_0_reg |= check_sum & 0xff; /* CC(channel_count) */ audio_info_0_reg |= (channel_count << 8) & 0x00000700; /* HDMI_AUDIO_INFO0[0x00E4] */ HDMI_OUTP(0x00E4, audio_info_0_reg); /* Set these flags */ /* AUDIO_INFO_UPDATE | AUDIO_INFO_SOURCE | AUDIO_INFO_CONT | AUDIO_INFO_SEND */ audio_info_ctrl_reg |= 0x000000F0; } else { /* Clear these flags */ /* ~(AUDIO_INFO_UPDATE | AUDIO_INFO_SOURCE | AUDIO_INFO_CONT | AUDIO_INFO_SEND) */ audio_info_ctrl_reg &= ~0x000000F0; } /* HDMI_INFOFRAME_CTRL0[0x002C] */ HDMI_OUTP(0x002C, audio_info_ctrl_reg); hdmi_msm_dump_regs("HDMI-AUDIO-ON: "); return 0; } EXPORT_SYMBOL(hdmi_msm_audio_info_setup); static void hdmi_msm_en_gc_packet(boolean av_mute_is_requested) { /* HDMI_GC[0x0040] */ HDMI_OUTP(0x0040, av_mute_is_requested ? 1 : 0); /* GC packet enable (every frame) */ /* HDMI_VBI_PKT_CTRL[0x0028] */ hdmi_msm_rmw32or(0x0028, 3 << 4); } #ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL_ISRC_ACP_SUPPORT static void hdmi_msm_en_isrc_packet(boolean isrc_is_continued) { static const char isrc_psuedo_data[] = "ISRC1:0123456789isrc2=ABCDEFGHIJ"; const uint32 * isrc_data = (const uint32 *) isrc_psuedo_data; /* ISRC_STATUS =0b010 | ISRC_CONTINUE | ISRC_VALID */ /* HDMI_ISRC1_0[0x00048] */ HDMI_OUTP(0x00048, 2 | (isrc_is_continued ? 1 : 0) << 6 | 0 << 7); /* HDMI_ISRC1_1[0x004C] */ HDMI_OUTP(0x004C, *isrc_data++); /* HDMI_ISRC1_2[0x0050] */ HDMI_OUTP(0x0050, *isrc_data++); /* HDMI_ISRC1_3[0x0054] */ HDMI_OUTP(0x0054, *isrc_data++); /* HDMI_ISRC1_4[0x0058] */ HDMI_OUTP(0x0058, *isrc_data++); /* HDMI_ISRC2_0[0x005C] */ HDMI_OUTP(0x005C, *isrc_data++); /* HDMI_ISRC2_1[0x0060] */ HDMI_OUTP(0x0060, *isrc_data++); /* HDMI_ISRC2_2[0x0064] */ HDMI_OUTP(0x0064, *isrc_data++); /* HDMI_ISRC2_3[0x0068] */ HDMI_OUTP(0x0068, *isrc_data); /* HDMI_VBI_PKT_CTRL[0x0028] */ /* ISRC Send + Continuous */ hdmi_msm_rmw32or(0x0028, 3 << 8); } #else static void hdmi_msm_en_isrc_packet(boolean isrc_is_continued) { /* * Until end-to-end support for various audio packets */ } #endif #ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL_ISRC_ACP_SUPPORT static void hdmi_msm_en_acp_packet(uint32 byte1) { /* HDMI_ACP[0x003C] */ HDMI_OUTP(0x003C, 2 | 1 << 8 | byte1 << 16); /* HDMI_VBI_PKT_CTRL[0x0028] */ /* ACP send, s/w source */ hdmi_msm_rmw32or(0x0028, 3 << 12); } #else static void hdmi_msm_en_acp_packet(uint32 byte1) { /* * Until end-to-end support for various audio packets */ } #endif int hdmi_msm_audio_get_sample_rate(void) { return msm_hdmi_sample_rate; } EXPORT_SYMBOL(hdmi_msm_audio_get_sample_rate); void hdmi_msm_audio_sample_rate_reset(int rate) { if (msm_hdmi_sample_rate == rate) return; msm_hdmi_sample_rate = rate; if (hdmi_msm_state->hdcp_enable) hdcp_deauthenticate(); else hdmi_msm_turn_on(); } EXPORT_SYMBOL(hdmi_msm_audio_sample_rate_reset); static void hdmi_msm_audio_setup(void) { const int channels = MSM_HDMI_AUDIO_CHANNEL_2; /* (0) for clr_avmute, (1) for set_avmute */ hdmi_msm_en_gc_packet(0); /* (0) for isrc1 only, (1) for isrc1 and isrc2 */ hdmi_msm_en_isrc_packet(1); /* arbitrary bit pattern for byte1 */ hdmi_msm_en_acp_packet(0x5a); DEV_DBG("Not setting ACP, ISRC1, ISRC2 packets\n"); hdmi_msm_audio_acr_setup(TRUE, external_common_state->video_resolution, msm_hdmi_sample_rate, channels); hdmi_msm_audio_info_setup(TRUE, channels, 0, 0, FALSE); /* Turn on Audio FIFO and SAM DROP ISR */ HDMI_OUTP(0x02CC, HDMI_INP(0x02CC) | BIT(1) | BIT(3)); DEV_INFO("HDMI Audio: Enabled\n"); } static int hdmi_msm_audio_off(void) { uint32 audio_cfg; int i, timeout_val = 50; for (i = 0; (i < timeout_val) && ((audio_cfg = HDMI_INP_ND(0x01D0)) & BIT(0)); i++) { DEV_DBG("%s: %d times: AUDIO CFG is %08xi\n", __func__, i+1, audio_cfg); if (!((i+1) % 10)) { DEV_ERR("%s: audio still on after %d sec. try again\n", __func__, (i+1)/10); SWITCH_SET_HDMI_AUDIO(0, 1); } msleep(100); } if (i == timeout_val) DEV_ERR("%s: Error: cannot turn off audio engine\n", __func__); hdmi_msm_audio_info_setup(FALSE, 0, 0, 0, FALSE); hdmi_msm_audio_acr_setup(FALSE, 0, 0, 0); DEV_INFO("HDMI Audio: Disabled\n"); return 0; } static uint8 hdmi_msm_avi_iframe_lut[][17] = { /* 480p60 480i60 576p50 576i50 720p60 720p50 1080p60 1080i60 1080p50 1080i50 1080p24 1080p30 1080p25 640x480p 480p60_16_9 576p50_4_3 */ {0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10}, /*00*/ {0x18, 0x18, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x18, 0x28, 0x18, 0x08}, /*01*/ {0x00, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x88, 0x00, 0x04, 0x04}, /*02*/ {0x02, 0x06, 0x11, 0x15, 0x04, 0x13, 0x10, 0x05, 0x1F, 0x14, 0x20, 0x22, 0x21, 0x01, 0x03, 0x11, 0x00}, /*03*/ {0x00, 0x01, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*04*/ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*05*/ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*06*/ {0xE1, 0xE1, 0x41, 0x41, 0xD1, 0xd1, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0xe1, 0xE1, 0x41, 0x01}, /*07*/ {0x01, 0x01, 0x02, 0x02, 0x02, 0x02, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x01, 0x01, 0x02, 0x04}, /*08*/ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*09*/ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*10*/ {0xD1, 0xD1, 0xD1, 0xD1, 0x01, 0x01, 0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0xD1, 0xD1, 0x01}, /*11*/ {0x02, 0x02, 0x02, 0x02, 0x05, 0x05, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x02, 0x02, 0x02, 0x05} /*12*/ }; static void hdmi_msm_avi_info_frame(void) { /* two header + length + 13 data */ uint8 aviInfoFrame[16]; uint8 checksum; uint32 sum; uint32 regVal; int i; int mode = 0; boolean use_ce_scan_info = TRUE; switch (external_common_state->video_resolution) { case HDMI_VFRMT_720x480p60_4_3: mode = 0; break; case HDMI_VFRMT_720x480i60_16_9: mode = 1; break; case HDMI_VFRMT_720x576p50_16_9: mode = 2; break; case HDMI_VFRMT_720x576i50_16_9: mode = 3; break; case HDMI_VFRMT_1280x720p60_16_9: mode = 4; break; case HDMI_VFRMT_1280x720p50_16_9: mode = 5; break; case HDMI_VFRMT_1920x1080p60_16_9: mode = 6; break; case HDMI_VFRMT_1920x1080i60_16_9: mode = 7; break; case HDMI_VFRMT_1920x1080p50_16_9: mode = 8; break; case HDMI_VFRMT_1920x1080i50_16_9: mode = 9; break; case HDMI_VFRMT_1920x1080p24_16_9: mode = 10; break; case HDMI_VFRMT_1920x1080p30_16_9: mode = 11; break; case HDMI_VFRMT_1920x1080p25_16_9: mode = 12; break; case HDMI_VFRMT_640x480p60_4_3: mode = 13; break; case HDMI_VFRMT_720x480p60_16_9: mode = 14; break; case HDMI_VFRMT_720x576p50_4_3: mode = 15; break; case HDMI_VFRMT_1280x1024p60_5_4: mode = 16; break; default: DEV_INFO("%s: mode %d not supported\n", __func__, external_common_state->video_resolution); return; } /* InfoFrame Type = 82 */ aviInfoFrame[0] = 0x82; /* Version = 2 */ aviInfoFrame[1] = 2; /* Length of AVI InfoFrame = 13 */ aviInfoFrame[2] = 13; /* Data Byte 01: 0 Y1 Y0 A0 B1 B0 S1 S0 */ aviInfoFrame[3] = hdmi_msm_avi_iframe_lut[0][mode]; /* * If the sink specified support for both underscan/overscan * then, by default, set the underscan bit. * Only checking underscan support for preferred format and cea formats */ if ((external_common_state->video_resolution == external_common_state->preferred_video_format)) { use_ce_scan_info = FALSE; switch (external_common_state->pt_scan_info) { case 0: /* * Need to use the info specified for the corresponding * IT or CE format */ DEV_DBG("%s: No underscan information specified for the" " preferred video format\n", __func__); use_ce_scan_info = TRUE; break; case 3: DEV_DBG("%s: Setting underscan bit for the preferred" " video format\n", __func__); aviInfoFrame[3] |= 0x02; break; default: DEV_DBG("%s: Underscan information not set for the" " preferred video format\n", __func__); break; } } if (use_ce_scan_info) { if (3 == external_common_state->ce_scan_info) { DEV_DBG("%s: Setting underscan bit for the CE video" " format\n", __func__); aviInfoFrame[3] |= 0x02; } else { DEV_DBG("%s: Not setting underscan bit for the CE video" " format\n", __func__); } } /* Data Byte 02: C1 C0 M1 M0 R3 R2 R1 R0 */ aviInfoFrame[4] = hdmi_msm_avi_iframe_lut[1][mode]; /* Data Byte 03: ITC EC2 EC1 EC0 Q1 Q0 SC1 SC0 */ aviInfoFrame[5] = hdmi_msm_avi_iframe_lut[2][mode]; /* Data Byte 04: 0 VIC6 VIC5 VIC4 VIC3 VIC2 VIC1 VIC0 */ aviInfoFrame[6] = hdmi_msm_avi_iframe_lut[3][mode]; /* Data Byte 05: 0 0 0 0 PR3 PR2 PR1 PR0 */ aviInfoFrame[7] = hdmi_msm_avi_iframe_lut[4][mode]; /* Data Byte 06: LSB Line No of End of Top Bar */ aviInfoFrame[8] = hdmi_msm_avi_iframe_lut[5][mode]; /* Data Byte 07: MSB Line No of End of Top Bar */ aviInfoFrame[9] = hdmi_msm_avi_iframe_lut[6][mode]; /* Data Byte 08: LSB Line No of Start of Bottom Bar */ aviInfoFrame[10] = hdmi_msm_avi_iframe_lut[7][mode]; /* Data Byte 09: MSB Line No of Start of Bottom Bar */ aviInfoFrame[11] = hdmi_msm_avi_iframe_lut[8][mode]; /* Data Byte 10: LSB Pixel Number of End of Left Bar */ aviInfoFrame[12] = hdmi_msm_avi_iframe_lut[9][mode]; /* Data Byte 11: MSB Pixel Number of End of Left Bar */ aviInfoFrame[13] = hdmi_msm_avi_iframe_lut[10][mode]; /* Data Byte 12: LSB Pixel Number of Start of Right Bar */ aviInfoFrame[14] = hdmi_msm_avi_iframe_lut[11][mode]; /* Data Byte 13: MSB Pixel Number of Start of Right Bar */ aviInfoFrame[15] = hdmi_msm_avi_iframe_lut[12][mode]; sum = 0; for (i = 0; i < 16; i++) sum += aviInfoFrame[i]; sum &= 0xFF; sum = 256 - sum; checksum = (uint8) sum; regVal = aviInfoFrame[5]; regVal = regVal << 8 | aviInfoFrame[4]; regVal = regVal << 8 | aviInfoFrame[3]; regVal = regVal << 8 | checksum; HDMI_OUTP(0x006C, regVal); regVal = aviInfoFrame[9]; regVal = regVal << 8 | aviInfoFrame[8]; regVal = regVal << 8 | aviInfoFrame[7]; regVal = regVal << 8 | aviInfoFrame[6]; HDMI_OUTP(0x0070, regVal); regVal = aviInfoFrame[13]; regVal = regVal << 8 | aviInfoFrame[12]; regVal = regVal << 8 | aviInfoFrame[11]; regVal = regVal << 8 | aviInfoFrame[10]; HDMI_OUTP(0x0074, regVal); regVal = aviInfoFrame[1]; regVal = regVal << 16 | aviInfoFrame[15]; regVal = regVal << 8 | aviInfoFrame[14]; HDMI_OUTP(0x0078, regVal); /* INFOFRAME_CTRL0[0x002C] */ /* 0x3 for AVI InfFrame enable (every frame) */ HDMI_OUTP(0x002C, HDMI_INP(0x002C) | 0x00000003L); } #ifdef CONFIG_FB_MSM_HDMI_3D static void hdmi_msm_vendor_infoframe_packetsetup(void) { uint32 packet_header = 0; uint32 check_sum = 0; uint32 packet_payload = 0; if (!external_common_state->format_3d) { HDMI_OUTP(0x0034, 0); return; } /* 0x0084 GENERIC0_HDR * HB0 7:0 NUM * HB1 15:8 NUM * HB2 23:16 NUM */ /* Setup Packet header and payload */ /* 0x81 VS_INFO_FRAME_ID 0x01 VS_INFO_FRAME_VERSION 0x1B VS_INFO_FRAME_PAYLOAD_LENGTH */ packet_header = 0x81 | (0x01 << 8) | (0x1B << 16); HDMI_OUTP(0x0084, packet_header); check_sum = packet_header & 0xff; check_sum += (packet_header >> 8) & 0xff; check_sum += (packet_header >> 16) & 0xff; /* 0x008C GENERIC0_1 * BYTE4 7:0 NUM * BYTE5 15:8 NUM * BYTE6 23:16 NUM * BYTE7 31:24 NUM */ /* 0x02 VS_INFO_FRAME_3D_PRESENT */ packet_payload = 0x02 << 5; switch (external_common_state->format_3d) { case 1: /* 0b1000 VIDEO_3D_FORMAT_SIDE_BY_SIDE_HALF */ packet_payload |= (0x08 << 8) << 4; break; case 2: /* 0b0110 VIDEO_3D_FORMAT_TOP_AND_BOTTOM_HALF */ packet_payload |= (0x06 << 8) << 4; break; } HDMI_OUTP(0x008C, packet_payload); check_sum += packet_payload & 0xff; check_sum += (packet_payload >> 8) & 0xff; #define IEEE_REGISTRATION_ID 0xC03 /* Next 3 bytes are IEEE Registration Identifcation */ /* 0x0088 GENERIC0_0 * BYTE0 7:0 NUM (checksum) * BYTE1 15:8 NUM * BYTE2 23:16 NUM * BYTE3 31:24 NUM */ check_sum += IEEE_REGISTRATION_ID & 0xff; check_sum += (IEEE_REGISTRATION_ID >> 8) & 0xff; check_sum += (IEEE_REGISTRATION_ID >> 16) & 0xff; HDMI_OUTP(0x0088, (0x100 - (0xff & check_sum)) | ((IEEE_REGISTRATION_ID & 0xff) << 8) | (((IEEE_REGISTRATION_ID >> 8) & 0xff) << 16) | (((IEEE_REGISTRATION_ID >> 16) & 0xff) << 24)); /* 0x0034 GEN_PKT_CTRL * GENERIC0_SEND 0 0 = Disable Generic0 Packet Transmission * 1 = Enable Generic0 Packet Transmission * GENERIC0_CONT 1 0 = Send Generic0 Packet on next frame only * 1 = Send Generic0 Packet on every frame * GENERIC0_UPDATE 2 NUM * GENERIC1_SEND 4 0 = Disable Generic1 Packet Transmission * 1 = Enable Generic1 Packet Transmission * GENERIC1_CONT 5 0 = Send Generic1 Packet on next frame only * 1 = Send Generic1 Packet on every frame * GENERIC0_LINE 21:16 NUM * GENERIC1_LINE 29:24 NUM */ /* GENERIC0_LINE | GENERIC0_UPDATE | GENERIC0_CONT | GENERIC0_SEND * Setup HDMI TX generic packet control * Enable this packet to transmit every frame * Enable this packet to transmit every frame * Enable HDMI TX engine to transmit Generic packet 0 */ HDMI_OUTP(0x0034, (1 << 16) | (1 << 2) | BIT(1) | BIT(0)); } static void hdmi_msm_switch_3d(boolean on) { mutex_lock(&external_common_state_hpd_mutex); if (external_common_state->hpd_state) hdmi_msm_vendor_infoframe_packetsetup(); mutex_unlock(&external_common_state_hpd_mutex); } #endif #define IFRAME_CHECKSUM_32(d) \ ((d & 0xff) + ((d >> 8) & 0xff) + \ ((d >> 16) & 0xff) + ((d >> 24) & 0xff)) static void hdmi_msm_spd_infoframe_packetsetup(void) { uint32 packet_header = 0; uint32 check_sum = 0; uint32 packet_payload = 0; uint32 packet_control = 0; uint8 *vendor_name = external_common_state->spd_vendor_name; uint8 *product_description = external_common_state->spd_product_description; /* 0x00A4 GENERIC1_HDR * HB0 7:0 NUM * HB1 15:8 NUM * HB2 23:16 NUM */ /* Setup Packet header and payload */ /* 0x83 InfoFrame Type Code 0x01 InfoFrame Version Number 0x19 Length of Source Product Description InfoFrame */ packet_header = 0x83 | (0x01 << 8) | (0x19 << 16); HDMI_OUTP(0x00A4, packet_header); check_sum += IFRAME_CHECKSUM_32(packet_header); /* 0x00AC GENERIC1_1 * BYTE4 7:0 VENDOR_NAME[3] * BYTE5 15:8 VENDOR_NAME[4] * BYTE6 23:16 VENDOR_NAME[5] * BYTE7 31:24 VENDOR_NAME[6] */ packet_payload = (vendor_name[3] & 0x7f) | ((vendor_name[4] & 0x7f) << 8) | ((vendor_name[5] & 0x7f) << 16) | ((vendor_name[6] & 0x7f) << 24); HDMI_OUTP(0x00AC, packet_payload); check_sum += IFRAME_CHECKSUM_32(packet_payload); /* Product Description (7-bit ASCII code) */ /* 0x00B0 GENERIC1_2 * BYTE8 7:0 VENDOR_NAME[7] * BYTE9 15:8 PRODUCT_NAME[ 0] * BYTE10 23:16 PRODUCT_NAME[ 1] * BYTE11 31:24 PRODUCT_NAME[ 2] */ packet_payload = (vendor_name[7] & 0x7f) | ((product_description[0] & 0x7f) << 8) | ((product_description[1] & 0x7f) << 16) | ((product_description[2] & 0x7f) << 24); HDMI_OUTP(0x00B0, packet_payload); check_sum += IFRAME_CHECKSUM_32(packet_payload); /* 0x00B4 GENERIC1_3 * BYTE12 7:0 PRODUCT_NAME[ 3] * BYTE13 15:8 PRODUCT_NAME[ 4] * BYTE14 23:16 PRODUCT_NAME[ 5] * BYTE15 31:24 PRODUCT_NAME[ 6] */ packet_payload = (product_description[3] & 0x7f) | ((product_description[4] & 0x7f) << 8) | ((product_description[5] & 0x7f) << 16) | ((product_description[6] & 0x7f) << 24); HDMI_OUTP(0x00B4, packet_payload); check_sum += IFRAME_CHECKSUM_32(packet_payload); /* 0x00B8 GENERIC1_4 * BYTE16 7:0 PRODUCT_NAME[ 7] * BYTE17 15:8 PRODUCT_NAME[ 8] * BYTE18 23:16 PRODUCT_NAME[ 9] * BYTE19 31:24 PRODUCT_NAME[10] */ packet_payload = (product_description[7] & 0x7f) | ((product_description[8] & 0x7f) << 8) | ((product_description[9] & 0x7f) << 16) | ((product_description[10] & 0x7f) << 24); HDMI_OUTP(0x00B8, packet_payload); check_sum += IFRAME_CHECKSUM_32(packet_payload); /* 0x00BC GENERIC1_5 * BYTE20 7:0 PRODUCT_NAME[11] * BYTE21 15:8 PRODUCT_NAME[12] * BYTE22 23:16 PRODUCT_NAME[13] * BYTE23 31:24 PRODUCT_NAME[14] */ packet_payload = (product_description[11] & 0x7f) | ((product_description[12] & 0x7f) << 8) | ((product_description[13] & 0x7f) << 16) | ((product_description[14] & 0x7f) << 24); HDMI_OUTP(0x00BC, packet_payload); check_sum += IFRAME_CHECKSUM_32(packet_payload); /* 0x00C0 GENERIC1_6 * BYTE24 7:0 PRODUCT_NAME[15] * BYTE25 15:8 Source Device Information * BYTE26 23:16 NUM * BYTE27 31:24 NUM */ /* Source Device Information * 00h unknown * 01h Digital STB * 02h DVD * 03h D-VHS * 04h HDD Video * 05h DVC * 06h DSC * 07h Video CD * 08h Game * 09h PC general */ packet_payload = (product_description[15] & 0x7f) | 0x00 << 8; HDMI_OUTP(0x00C0, packet_payload); check_sum += IFRAME_CHECKSUM_32(packet_payload); /* Vendor Name (7bit ASCII code) */ /* 0x00A8 GENERIC1_0 * BYTE0 7:0 CheckSum * BYTE1 15:8 VENDOR_NAME[0] * BYTE2 23:16 VENDOR_NAME[1] * BYTE3 31:24 VENDOR_NAME[2] */ packet_payload = ((vendor_name[0] & 0x7f) << 8) | ((vendor_name[1] & 0x7f) << 16) | ((vendor_name[2] & 0x7f) << 24); check_sum += IFRAME_CHECKSUM_32(packet_payload); packet_payload |= ((0x100 - (0xff & check_sum)) & 0xff); HDMI_OUTP(0x00A8, packet_payload); /* GENERIC1_LINE | GENERIC1_CONT | GENERIC1_SEND * Setup HDMI TX generic packet control * Enable this packet to transmit every frame * Enable HDMI TX engine to transmit Generic packet 1 */ packet_control = HDMI_INP_ND(0x0034); packet_control |= ((0x1 << 24) | (1 << 5) | (1 << 4)); HDMI_OUTP(0x0034, packet_control); } int hdmi_msm_clk(int on) { int rc; DEV_DBG("HDMI Clk: %s\n", on ? "Enable" : "Disable"); if (on) { rc = clk_prepare_enable(hdmi_msm_state->hdmi_app_clk); if (rc) { DEV_ERR("'hdmi_app_clk' clock enable failed, rc=%d\n", rc); return rc; } rc = clk_prepare_enable(hdmi_msm_state->hdmi_m_pclk); if (rc) { DEV_ERR("'hdmi_m_pclk' clock enable failed, rc=%d\n", rc); return rc; } rc = clk_prepare_enable(hdmi_msm_state->hdmi_s_pclk); if (rc) { DEV_ERR("'hdmi_s_pclk' clock enable failed, rc=%d\n", rc); return rc; } } else { clk_disable_unprepare(hdmi_msm_state->hdmi_app_clk); clk_disable_unprepare(hdmi_msm_state->hdmi_m_pclk); clk_disable_unprepare(hdmi_msm_state->hdmi_s_pclk); } return 0; } static void hdmi_msm_turn_on(void) { uint32 audio_pkt_ctrl, audio_cfg; /* * Number of wait iterations for QDSP to disable Audio Engine * before resetting HDMI core */ int i = 10; audio_pkt_ctrl = HDMI_INP_ND(0x0020); audio_cfg = HDMI_INP_ND(0x01D0); /* * Checking BIT[0] of AUDIO PACKET CONTROL and * AUDIO CONFIGURATION register */ while (((audio_pkt_ctrl & 0x00000001) || (audio_cfg & 0x00000001)) && (i--)) { audio_pkt_ctrl = HDMI_INP_ND(0x0020); audio_cfg = HDMI_INP_ND(0x01D0); DEV_DBG("%d times :: HDMI AUDIO PACKET is %08x and " "AUDIO CFG is %08x", i, audio_pkt_ctrl, audio_cfg); msleep(20); } hdmi_msm_set_mode(FALSE); mutex_lock(&hdcp_auth_state_mutex); hdmi_msm_reset_core(); mutex_unlock(&hdcp_auth_state_mutex); hdmi_msm_init_phy(external_common_state->video_resolution); /* HDMI_USEC_REFTIMER[0x0208] */ HDMI_OUTP(0x0208, 0x0001001B); hdmi_msm_set_mode(TRUE); hdmi_msm_video_setup(external_common_state->video_resolution); if (!hdmi_msm_is_dvi_mode()) { hdmi_msm_audio_setup(); /* * Send the audio switch device notification if HDCP is * not enabled. Otherwise, the notification would be * sent after HDCP authentication is successful. */ if (!hdmi_msm_state->hdcp_enable) SWITCH_SET_HDMI_AUDIO(1, 0); } hdmi_msm_avi_info_frame(); #ifdef CONFIG_FB_MSM_HDMI_3D hdmi_msm_vendor_infoframe_packetsetup(); #endif hdmi_msm_spd_infoframe_packetsetup(); if (hdmi_msm_state->hdcp_enable && hdmi_msm_state->reauth) { hdmi_msm_hdcp_enable(); hdmi_msm_state->reauth = FALSE ; } #ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL_CEC_SUPPORT /* re-initialize CEC if enabled */ mutex_lock(&hdmi_msm_state_mutex); if (hdmi_msm_state->cec_enabled == true) { hdmi_msm_cec_init(); hdmi_msm_cec_write_logical_addr( hdmi_msm_state->cec_logical_addr); } mutex_unlock(&hdmi_msm_state_mutex); #endif /* CONFIG_FB_MSM_HDMI_MSM_PANEL_CEC_SUPPORT */ DEV_INFO("HDMI Core: Initialized\n"); } static void hdmi_msm_hdcp_timer(unsigned long data) { if (!hdmi_msm_state->hdcp_enable) { DEV_DBG("%s: HDCP not enabled\n", __func__); return; } queue_work(hdmi_work_queue, &hdmi_msm_state->hdcp_work); } #ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL_CEC_SUPPORT static void hdmi_msm_cec_read_timer_func(unsigned long data) { queue_work(hdmi_work_queue, &hdmi_msm_state->cec_latch_detect_work); } #endif static void hdmi_msm_hpd_polarity_setup(void) { u32 cable_sense; bool polarity = !external_common_state->hpd_state; bool trigger = false; if (polarity) HDMI_OUTP(0x0254, BIT(2) | BIT(1)); else HDMI_OUTP(0x0254, BIT(2)); cable_sense = (HDMI_INP(0x0250) & BIT(1)) >> 1; if (cable_sense == polarity) trigger = true; DEV_DBG("%s: listen=%s, sense=%s, trigger=%s\n", __func__, polarity ? "connect" : "disconnect", cable_sense ? "connect" : "disconnect", trigger ? "Yes" : "No"); if (trigger) { u32 reg_val = HDMI_INP(0x0258); /* Toggle HPD circuit to trigger HPD sense */ HDMI_OUTP(0x0258, reg_val & ~BIT(28)); HDMI_OUTP(0x0258, reg_val | BIT(28)); } } static void hdmi_msm_hpd_off(void) { int rc = 0; if (!hdmi_msm_state->hpd_initialized) { DEV_DBG("%s: HPD is already OFF, returning\n", __func__); return; } DEV_DBG("%s: (timer, 5V, IRQ off)\n", __func__); disable_irq(hdmi_msm_state->irq); /* Disable HPD interrupt */ HDMI_OUTP(0x0254, 0); DEV_DBG("%s: Disabling HPD_CTRLd\n", __func__); hdmi_msm_set_mode(FALSE); hdmi_msm_state->pd->enable_5v(0); hdmi_msm_clk(0); rc = hdmi_msm_state->pd->gpio_config(0); if (rc != 0) DEV_INFO("%s: Failed to disable GPIOs. Error=%d\n", __func__, rc); hdmi_msm_state->hpd_initialized = FALSE; } static void hdmi_msm_dump_regs(const char *prefix) { #ifdef REG_DUMP print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, 32, 4, (void *)MSM_HDMI_BASE, 0x0334, false); #endif } static int hdmi_msm_hpd_on(void) { static int phy_reset_done; uint32 hpd_ctrl; int rc = 0; if (hdmi_msm_state->hpd_initialized) { DEV_DBG("%s: HPD is already ON\n", __func__); } else { rc = hdmi_msm_state->pd->gpio_config(1); if (rc) { DEV_ERR("%s: Failed to enable GPIOs. Error=%d\n", __func__, rc); goto error1; } rc = hdmi_msm_clk(1); if (rc) { DEV_ERR("%s: Failed to enable clocks. Error=%d\n", __func__, rc); goto error2; } rc = hdmi_msm_state->pd->enable_5v(1); if (rc) { DEV_ERR("%s: Failed to enable 5V regulator. Error=%d\n", __func__, rc); goto error3; } hdmi_msm_dump_regs("HDMI-INIT: "); hdmi_msm_set_mode(FALSE); if (!phy_reset_done) { hdmi_phy_reset(); phy_reset_done = 1; } hdmi_msm_set_mode(TRUE); /* HDMI_USEC_REFTIMER[0x0208] */ HDMI_OUTP(0x0208, 0x0001001B); /* Set up HPD state variables */ mutex_lock(&external_common_state_hpd_mutex); external_common_state->hpd_state = 0; mutex_unlock(&external_common_state_hpd_mutex); mutex_lock(&hdmi_msm_state_mutex); mutex_unlock(&hdmi_msm_state_mutex); enable_irq(hdmi_msm_state->irq); hdmi_msm_state->hpd_initialized = TRUE; /* set timeout to 4.1ms (max) for hardware debounce */ hpd_ctrl = HDMI_INP(0x0258) | 0x1FFF; /* Turn on HPD HW circuit */ HDMI_OUTP(0x0258, hpd_ctrl | BIT(28)); /* Set HPD cable sense polarity */ hdmi_msm_hpd_polarity_setup(); } DEV_DBG("%s: (IRQ, 5V on)\n", __func__); return 0; error3: hdmi_msm_clk(0); error2: hdmi_msm_state->pd->gpio_config(0); error1: return rc; } static int hdmi_msm_power_ctrl(boolean enable) { int rc = 0; int time = 0; if (enable) { /* * Enable HPD only if the UI option is on or if * HDMI is configured as the primary display */ if (hdmi_prim_display || external_common_state->hpd_feature_on) { DEV_DBG("%s: Turning HPD ciruitry on\n", __func__); rc = hdmi_msm_hpd_on(); if (rc) { DEV_ERR("%s: HPD ON FAILED\n", __func__); return rc; } /* Wait for HPD initialization to complete */ INIT_COMPLETION(hdmi_msm_state->hpd_event_processed); time = wait_for_completion_interruptible_timeout( &hdmi_msm_state->hpd_event_processed, HZ); if (!time && !external_common_state->hpd_state) { DEV_DBG("%s: cable not detected\n", __func__); queue_work(hdmi_work_queue, &hdmi_msm_state->hpd_state_work); } } } else { DEV_DBG("%s: Turning HPD ciruitry off\n", __func__); hdmi_msm_hpd_off(); } return rc; } static int hdmi_msm_power_on(struct platform_device *pdev) { struct msm_fb_data_type *mfd = platform_get_drvdata(pdev); int ret = 0; bool changed; if (!hdmi_ready()) { DEV_ERR("%s: HDMI/HPD not initialized\n", __func__); return ret; } if (!external_common_state->hpd_state) { DEV_DBG("%s:HDMI cable not connected\n", __func__); goto error; } /* Only start transmission with supported resolution */ changed = hdmi_common_get_video_format_from_drv_data(mfd); if (changed || external_common_state->default_res_supported) { mutex_lock(&external_common_state_hpd_mutex); if (external_common_state->hpd_state && hdmi_msm_is_power_on()) { mutex_unlock(&external_common_state_hpd_mutex); DEV_INFO("HDMI cable connected %s(%dx%d, %d)\n", __func__, mfd->var_xres, mfd->var_yres, mfd->var_pixclock); hdmi_msm_turn_on(); hdmi_msm_state->panel_power_on = TRUE; if (hdmi_msm_state->hdcp_enable) { /* Kick off HDCP Authentication */ mutex_lock(&hdcp_auth_state_mutex); hdmi_msm_state->reauth = FALSE; hdmi_msm_state->full_auth_done = FALSE; mutex_unlock(&hdcp_auth_state_mutex); mod_timer(&hdmi_msm_state->hdcp_timer, jiffies + HZ/2); } } else { mutex_unlock(&external_common_state_hpd_mutex); } hdmi_msm_dump_regs("HDMI-ON: "); DEV_INFO("power=%s DVI= %s\n", hdmi_msm_is_power_on() ? "ON" : "OFF" , hdmi_msm_is_dvi_mode() ? "ON" : "OFF"); } else { DEV_ERR("%s: Video fmt %d not supp. Returning\n", __func__, external_common_state->video_resolution); } error: /* Set HPD cable sense polarity */ hdmi_msm_hpd_polarity_setup(); return ret; } void mhl_connect_api(boolean on) { char *envp[2]; /* Simulating a HPD event based on MHL event */ if (on) { hdmi_msm_read_edid(); hdmi_msm_state->reauth = FALSE ; /* Build EDID table */ hdmi_msm_turn_on(); DEV_INFO("HDMI HPD: CONNECTED: send ONLINE\n"); kobject_uevent(external_common_state->uevent_kobj, KOBJ_ONLINE); envp[0] = 0; if (!hdmi_msm_state->hdcp_enable) { /* Send Audio for HDMI Compliance Cases*/ envp[0] = "HDCP_STATE=PASS"; envp[1] = NULL; DEV_INFO("HDMI HPD: sense : send HDCP_PASS\n"); kobject_uevent_env(external_common_state->uevent_kobj, KOBJ_CHANGE, envp); switch_set_state(&external_common_state->sdev, 1); DEV_INFO("%s: hdmi state switched to %d\n", __func__, external_common_state->sdev.state); } else { hdmi_msm_hdcp_enable(); } } else { DEV_INFO("HDMI HPD: DISCONNECTED: send OFFLINE\n"); kobject_uevent(external_common_state->uevent_kobj, KOBJ_OFFLINE); switch_set_state(&external_common_state->sdev, 0); DEV_INFO("%s: hdmi state switched to %d\n", __func__, external_common_state->sdev.state); } } EXPORT_SYMBOL(mhl_connect_api); /* Note that power-off will also be called when the cable-remove event is * processed on the user-space and as a result the framebuffer is powered * down. However, we are still required to be able to detect a cable-insert * event; so for now leave the HDMI engine running; so that the HPD IRQ is * still being processed. */ static int hdmi_msm_power_off(struct platform_device *pdev) { int ret = 0; if (!hdmi_ready()) { DEV_ERR("%s: HDMI/HPD not initialized\n", __func__); return ret; } if (!hdmi_msm_state->panel_power_on) { DEV_DBG("%s: panel not ON\n", __func__); goto error; } if (hdmi_msm_state->hdcp_enable) { if (hdmi_msm_state->hdcp_activating) { /* * Let the HDCP work know that we got an HPD * disconnect so that it can stop the * reauthentication loop. */ mutex_lock(&hdcp_auth_state_mutex); hdmi_msm_state->hpd_during_auth = TRUE; mutex_unlock(&hdcp_auth_state_mutex); } /* * Cancel any pending reauth attempts. * If one is ongoing, wait for it to finish */ cancel_work_sync(&hdmi_msm_state->hdcp_reauth_work); cancel_work_sync(&hdmi_msm_state->hdcp_work); del_timer_sync(&hdmi_msm_state->hdcp_timer); hdmi_msm_state->reauth = FALSE; hdcp_deauthenticate(); } SWITCH_SET_HDMI_AUDIO(0, 0); if (!hdmi_msm_is_dvi_mode()) hdmi_msm_audio_off(); hdmi_msm_powerdown_phy(); hdmi_msm_state->panel_power_on = FALSE; DEV_INFO("power: OFF (audio off)\n"); if (!completion_done(&hdmi_msm_state->hpd_event_processed)) complete(&hdmi_msm_state->hpd_event_processed); error: /* Set HPD cable sense polarity */ hdmi_msm_hpd_polarity_setup(); return ret; } bool mhl_is_enabled(void) { return hdmi_msm_state->is_mhl_enabled; } void hdmi_msm_config_hdcp_feature(void) { if (hdcp_feature_on && hdmi_msm_has_hdcp()) { init_timer(&hdmi_msm_state->hdcp_timer); hdmi_msm_state->hdcp_timer.function = hdmi_msm_hdcp_timer; hdmi_msm_state->hdcp_timer.data = (uint32)NULL; hdmi_msm_state->hdcp_timer.expires = 0xffffffffL; init_completion(&hdmi_msm_state->hdcp_success_done); INIT_WORK(&hdmi_msm_state->hdcp_reauth_work, hdmi_msm_hdcp_reauth_work); INIT_WORK(&hdmi_msm_state->hdcp_work, hdmi_msm_hdcp_work); hdmi_msm_state->hdcp_enable = TRUE; } else { del_timer(&hdmi_msm_state->hdcp_timer); hdmi_msm_state->hdcp_enable = FALSE; } external_common_state->present_hdcp = hdmi_msm_state->hdcp_enable; DEV_INFO("%s: HDCP Feature: %s\n", __func__, hdmi_msm_state->hdcp_enable ? "Enabled" : "Disabled"); } static void hdmi_msm_update_panel_info(struct msm_fb_data_type *mfd) { if (!mfd) return; if (hdmi_common_get_video_format_from_drv_data(mfd)) hdmi_common_init_panel_info(&mfd->panel_info); } static bool hdmi_msm_cable_connected(void) { return hdmi_msm_state->hpd_initialized && external_common_state->hpd_state; } static int __devinit hdmi_msm_probe(struct platform_device *pdev) { int rc; struct platform_device *fb_dev; struct msm_fb_data_type *mfd = NULL; if (!hdmi_msm_state) { pr_err("%s: hdmi_msm_state is NULL\n", __func__); return -ENOMEM; } external_common_state->dev = &pdev->dev; DEV_DBG("probe\n"); if (pdev->id == 0) { struct resource *res; #define GET_RES(name, mode) do { \ res = platform_get_resource_byname(pdev, mode, name); \ if (!res) { \ DEV_ERR("'" name "' resource not found\n"); \ rc = -ENODEV; \ goto error; \ } \ } while (0) #define IO_REMAP(var, name) do { \ GET_RES(name, IORESOURCE_MEM); \ var = ioremap(res->start, resource_size(res)); \ if (!var) { \ DEV_ERR("'" name "' ioremap failed\n"); \ rc = -ENOMEM; \ goto error; \ } \ } while (0) #define GET_IRQ(var, name) do { \ GET_RES(name, IORESOURCE_IRQ); \ var = res->start; \ } while (0) IO_REMAP(hdmi_msm_state->qfprom_io, "hdmi_msm_qfprom_addr"); hdmi_msm_state->hdmi_io = MSM_HDMI_BASE; GET_IRQ(hdmi_msm_state->irq, "hdmi_msm_irq"); hdmi_msm_state->pd = pdev->dev.platform_data; #undef GET_RES #undef IO_REMAP #undef GET_IRQ return 0; } hdmi_msm_state->hdmi_app_clk = clk_get(&pdev->dev, "core_clk"); if (IS_ERR(hdmi_msm_state->hdmi_app_clk)) { DEV_ERR("'core_clk' clk not found\n"); rc = IS_ERR(hdmi_msm_state->hdmi_app_clk); goto error; } hdmi_msm_state->hdmi_m_pclk = clk_get(&pdev->dev, "master_iface_clk"); if (IS_ERR(hdmi_msm_state->hdmi_m_pclk)) { DEV_ERR("'master_iface_clk' clk not found\n"); rc = IS_ERR(hdmi_msm_state->hdmi_m_pclk); goto error; } hdmi_msm_state->hdmi_s_pclk = clk_get(&pdev->dev, "slave_iface_clk"); if (IS_ERR(hdmi_msm_state->hdmi_s_pclk)) { DEV_ERR("'slave_iface_clk' clk not found\n"); rc = IS_ERR(hdmi_msm_state->hdmi_s_pclk); goto error; } hdmi_msm_state->is_mhl_enabled = hdmi_msm_state->pd->is_mhl_enabled; rc = check_hdmi_features(); if (rc) { DEV_ERR("Init FAILED: check_hdmi_features rc=%d\n", rc); goto error; } if (!hdmi_msm_state->pd->core_power) { DEV_ERR("Init FAILED: core_power function missing\n"); rc = -ENODEV; goto error; } if (!hdmi_msm_state->pd->enable_5v) { DEV_ERR("Init FAILED: enable_5v function missing\n"); rc = -ENODEV; goto error; } if (!hdmi_msm_state->pd->cec_power) { DEV_ERR("Init FAILED: cec_power function missing\n"); rc = -ENODEV; goto error; } rc = request_threaded_irq(hdmi_msm_state->irq, NULL, &hdmi_msm_isr, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, "hdmi_msm_isr", NULL); if (rc) { DEV_ERR("Init FAILED: IRQ request, rc=%d\n", rc); goto error; } disable_irq(hdmi_msm_state->irq); #ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL_CEC_SUPPORT init_timer(&hdmi_msm_state->cec_read_timer); hdmi_msm_state->cec_read_timer.function = hdmi_msm_cec_read_timer_func; hdmi_msm_state->cec_read_timer.data = (uint32)NULL; hdmi_msm_state->cec_read_timer.expires = 0xffffffffL; #endif /* CONFIG_FB_MSM_HDMI_MSM_PANEL_CEC_SUPPORT */ fb_dev = msm_fb_add_device(pdev); if (fb_dev) { rc = external_common_state_create(fb_dev); if (rc) { DEV_ERR("Init FAILED: hdmi_msm_state_create, rc=%d\n", rc); goto error; } } else DEV_ERR("Init FAILED: failed to add fb device\n"); mfd = platform_get_drvdata(fb_dev); mfd->update_panel_info = hdmi_msm_update_panel_info; mfd->is_panel_ready = hdmi_msm_cable_connected; if (hdmi_prim_display) { rc = hdmi_msm_hpd_on(); if (rc) goto error; } hdmi_msm_config_hdcp_feature(); /* Initialize hdmi node and register with switch driver */ if (hdmi_prim_display) external_common_state->sdev.name = "hdmi_as_primary"; else external_common_state->sdev.name = "hdmi"; if (switch_dev_register(&external_common_state->sdev) < 0) { DEV_ERR("Hdmi switch registration failed\n"); rc = -ENODEV; goto error; } external_common_state->audio_sdev.name = "hdmi_audio"; if (switch_dev_register(&external_common_state->audio_sdev) < 0) { DEV_ERR("Hdmi audio switch registration failed\n"); switch_dev_unregister(&external_common_state->sdev); rc = -ENODEV; goto error; } /* Set the default video resolution for MHL-enabled display */ if (hdmi_msm_state->is_mhl_enabled) { DEV_DBG("MHL Enabled. Restricting default video resolution\n"); external_common_state->video_resolution = HDMI_VFRMT_1920x1080p30_16_9; } return 0; error: if (hdmi_msm_state->qfprom_io) iounmap(hdmi_msm_state->qfprom_io); hdmi_msm_state->qfprom_io = NULL; if (hdmi_msm_state->hdmi_io) iounmap(hdmi_msm_state->hdmi_io); hdmi_msm_state->hdmi_io = NULL; external_common_state_remove(); if (hdmi_msm_state->hdmi_app_clk) clk_put(hdmi_msm_state->hdmi_app_clk); if (hdmi_msm_state->hdmi_m_pclk) clk_put(hdmi_msm_state->hdmi_m_pclk); if (hdmi_msm_state->hdmi_s_pclk) clk_put(hdmi_msm_state->hdmi_s_pclk); hdmi_msm_state->hdmi_app_clk = NULL; hdmi_msm_state->hdmi_m_pclk = NULL; hdmi_msm_state->hdmi_s_pclk = NULL; return rc; } static int __devexit hdmi_msm_remove(struct platform_device *pdev) { DEV_INFO("HDMI device: remove\n"); DEV_INFO("HDMI HPD: OFF\n"); /* Unregister hdmi node from switch driver */ switch_dev_unregister(&external_common_state->sdev); switch_dev_unregister(&external_common_state->audio_sdev); hdmi_msm_hpd_off(); free_irq(hdmi_msm_state->irq, NULL); if (hdmi_msm_state->qfprom_io) iounmap(hdmi_msm_state->qfprom_io); hdmi_msm_state->qfprom_io = NULL; if (hdmi_msm_state->hdmi_io) iounmap(hdmi_msm_state->hdmi_io); hdmi_msm_state->hdmi_io = NULL; external_common_state_remove(); if (hdmi_msm_state->hdmi_app_clk) clk_put(hdmi_msm_state->hdmi_app_clk); if (hdmi_msm_state->hdmi_m_pclk) clk_put(hdmi_msm_state->hdmi_m_pclk); if (hdmi_msm_state->hdmi_s_pclk) clk_put(hdmi_msm_state->hdmi_s_pclk); hdmi_msm_state->hdmi_app_clk = NULL; hdmi_msm_state->hdmi_m_pclk = NULL; hdmi_msm_state->hdmi_s_pclk = NULL; kfree(hdmi_msm_state); hdmi_msm_state = NULL; return 0; } static int hdmi_msm_hpd_feature(int on) { int rc = 0; DEV_INFO("%s: %d\n", __func__, on); if (on) { rc = hdmi_msm_hpd_on(); } else { if (external_common_state->hpd_state) { external_common_state->hpd_state = 0; /* Send offline event to switch OFF HDMI and HAL FD */ hdmi_msm_send_event(HPD_EVENT_OFFLINE); /* Wait for HDMI and FD to close */ INIT_COMPLETION(hdmi_msm_state->hpd_event_processed); wait_for_completion_interruptible_timeout( &hdmi_msm_state->hpd_event_processed, HZ); } hdmi_msm_hpd_off(); /* Set HDMI switch node to 0 on HPD feature disable */ switch_set_state(&external_common_state->sdev, 0); DEV_INFO("%s: hdmi state switched to %d\n", __func__, external_common_state->sdev.state); } return rc; } static struct platform_driver this_driver = { .probe = hdmi_msm_probe, .remove = hdmi_msm_remove, .driver.name = "hdmi_msm", }; static struct msm_fb_panel_data hdmi_msm_panel_data = { .on = hdmi_msm_power_on, .off = hdmi_msm_power_off, .power_ctrl = hdmi_msm_power_ctrl, }; static struct platform_device this_device = { .name = "hdmi_msm", .id = 1, .dev.platform_data = &hdmi_msm_panel_data, }; static int __init hdmi_msm_init(void) { int rc; if (msm_fb_detect_client("hdmi_msm")) return 0; #ifdef CONFIG_FB_MSM_HDMI_AS_PRIMARY hdmi_prim_display = 1; #endif hdmi_msm_setup_video_mode_lut(); hdmi_msm_state = kzalloc(sizeof(*hdmi_msm_state), GFP_KERNEL); if (!hdmi_msm_state) { pr_err("hdmi_msm_init FAILED: out of memory\n"); rc = -ENOMEM; goto init_exit; } external_common_state = &hdmi_msm_state->common; if (hdmi_prim_display && hdmi_prim_resolution) external_common_state->video_resolution = hdmi_prim_resolution - 1; else external_common_state->video_resolution = HDMI_VFRMT_1920x1080p60_16_9; #ifdef CONFIG_FB_MSM_HDMI_3D external_common_state->switch_3d = hdmi_msm_switch_3d; #endif memset(external_common_state->spd_vendor_name, 0, sizeof(external_common_state->spd_vendor_name)); memset(external_common_state->spd_product_description, 0, sizeof(external_common_state->spd_product_description)); #ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL_CEC_SUPPORT hdmi_msm_state->cec_queue_start = kzalloc(sizeof(struct hdmi_msm_cec_msg)*CEC_QUEUE_SIZE, GFP_KERNEL); if (!hdmi_msm_state->cec_queue_start) { pr_err("hdmi_msm_init FAILED: CEC queue out of memory\n"); rc = -ENOMEM; goto init_exit; } hdmi_msm_state->cec_queue_wr = hdmi_msm_state->cec_queue_start; hdmi_msm_state->cec_queue_rd = hdmi_msm_state->cec_queue_start; hdmi_msm_state->cec_queue_full = false; #endif /* * Create your work queue * allocs and returns ptr */ hdmi_work_queue = create_workqueue("hdmi_hdcp"); external_common_state->hpd_feature = hdmi_msm_hpd_feature; rc = platform_driver_register(&this_driver); if (rc) { pr_err("hdmi_msm_init FAILED: platform_driver_register rc=%d\n", rc); goto init_exit; } hdmi_common_init_panel_info(&hdmi_msm_panel_data.panel_info); init_completion(&hdmi_msm_state->ddc_sw_done); init_completion(&hdmi_msm_state->hpd_event_processed); INIT_WORK(&hdmi_msm_state->hpd_state_work, hdmi_msm_hpd_state_work); #ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL_CEC_SUPPORT INIT_WORK(&hdmi_msm_state->cec_latch_detect_work, hdmi_msm_cec_latch_work); init_completion(&hdmi_msm_state->cec_frame_wr_done); init_completion(&hdmi_msm_state->cec_line_latch_wait); #endif rc = platform_device_register(&this_device); if (rc) { pr_err("hdmi_msm_init FAILED: platform_device_register rc=%d\n", rc); platform_driver_unregister(&this_driver); goto init_exit; } pr_debug("%s: success:" #ifdef DEBUG " DEBUG" #else " RELEASE" #endif " AUDIO EDID HPD HDCP" " DVI" #ifndef CONFIG_FB_MSM_HDMI_MSM_PANEL_DVI_SUPPORT ":0" #endif /* CONFIG_FB_MSM_HDMI_MSM_PANEL_DVI_SUPPORT */ "\n", __func__); return 0; init_exit: kfree(hdmi_msm_state); hdmi_msm_state = NULL; return rc; } static void __exit hdmi_msm_exit(void) { platform_device_unregister(&this_device); platform_driver_unregister(&this_driver); } static int set_hdcp_feature_on(const char *val, const struct kernel_param *kp) { int rv = param_set_bool(val, kp); if (rv) return rv; pr_debug("%s: HDCP feature = %d\n", __func__, hdcp_feature_on); if (hdmi_msm_state) { if ((HDMI_INP(0x0250) & 0x2)) { pr_err("%s: Unable to set HDCP feature", __func__); pr_err("%s: HDMI panel is currently turned on", __func__); } else if (hdcp_feature_on != hdmi_msm_state->hdcp_enable) { hdmi_msm_config_hdcp_feature(); } } return 0; } static struct kernel_param_ops hdcp_feature_on_param_ops = { .set = set_hdcp_feature_on, .get = param_get_bool, }; module_param_cb(hdcp, &hdcp_feature_on_param_ops, &hdcp_feature_on, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(hdcp, "Enable or Disable HDCP"); module_init(hdmi_msm_init); module_exit(hdmi_msm_exit); MODULE_LICENSE("GPL v2"); MODULE_VERSION("0.3"); MODULE_AUTHOR("Qualcomm Innovation Center, Inc."); MODULE_DESCRIPTION("HDMI MSM TX driver");
gpl-2.0
urumican/linux-3.8.8
arch/arm/mach-pxa/am300epd.c
2449
6607
/* * am300epd.c -- Platform device for AM300 EPD kit * * Copyright (C) 2008, Jaya Kumar * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. * * This work was made possible by help and equipment support from E-Ink * Corporation. http://support.eink.com/community * * This driver is written to be used with the Broadsheet display controller. * on the AM300 EPD prototype kit/development kit with an E-Ink 800x600 * Vizplex EPD on a Gumstix board using the Broadsheet interface board. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/fb.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/irq.h> #include <linux/gpio.h> #include <mach/gumstix.h> #include <mach/mfp-pxa25x.h> #include <linux/platform_data/video-pxafb.h> #include "generic.h" #include <video/broadsheetfb.h> static unsigned int panel_type = 6; static struct platform_device *am300_device; static struct broadsheet_board am300_board; static unsigned long am300_pin_config[] __initdata = { GPIO16_GPIO, GPIO17_GPIO, GPIO32_GPIO, GPIO48_GPIO, GPIO49_GPIO, GPIO51_GPIO, GPIO74_GPIO, GPIO75_GPIO, GPIO76_GPIO, GPIO77_GPIO, /* this is the 16-bit hdb bus 58-73 */ GPIO58_GPIO, GPIO59_GPIO, GPIO60_GPIO, GPIO61_GPIO, GPIO62_GPIO, GPIO63_GPIO, GPIO64_GPIO, GPIO65_GPIO, GPIO66_GPIO, GPIO67_GPIO, GPIO68_GPIO, GPIO69_GPIO, GPIO70_GPIO, GPIO71_GPIO, GPIO72_GPIO, GPIO73_GPIO, }; /* register offsets for gpio control */ #define PWR_GPIO_PIN 16 #define CFG_GPIO_PIN 17 #define RDY_GPIO_PIN 32 #define DC_GPIO_PIN 48 #define RST_GPIO_PIN 49 #define LED_GPIO_PIN 51 #define RD_GPIO_PIN 74 #define WR_GPIO_PIN 75 #define CS_GPIO_PIN 76 #define IRQ_GPIO_PIN 77 /* hdb bus */ #define DB0_GPIO_PIN 58 #define DB15_GPIO_PIN 73 static int gpios[] = { PWR_GPIO_PIN, CFG_GPIO_PIN, RDY_GPIO_PIN, DC_GPIO_PIN, RST_GPIO_PIN, RD_GPIO_PIN, WR_GPIO_PIN, CS_GPIO_PIN, IRQ_GPIO_PIN, LED_GPIO_PIN }; static char *gpio_names[] = { "PWR", "CFG", "RDY", "DC", "RST", "RD", "WR", "CS", "IRQ", "LED" }; static int am300_wait_event(struct broadsheetfb_par *par) { /* todo: improve err recovery */ wait_event(par->waitq, gpio_get_value(RDY_GPIO_PIN)); return 0; } static int am300_init_gpio_regs(struct broadsheetfb_par *par) { int i; int err; char dbname[8]; for (i = 0; i < ARRAY_SIZE(gpios); i++) { err = gpio_request(gpios[i], gpio_names[i]); if (err) { dev_err(&am300_device->dev, "failed requesting " "gpio %s, err=%d\n", gpio_names[i], err); goto err_req_gpio; } } /* we also need to take care of the hdb bus */ for (i = DB0_GPIO_PIN; i <= DB15_GPIO_PIN; i++) { sprintf(dbname, "DB%d", i); err = gpio_request(i, dbname); if (err) { dev_err(&am300_device->dev, "failed requesting " "gpio %d, err=%d\n", i, err); goto err_req_gpio2; } } /* setup the outputs and init values */ gpio_direction_output(PWR_GPIO_PIN, 0); gpio_direction_output(CFG_GPIO_PIN, 1); gpio_direction_output(DC_GPIO_PIN, 0); gpio_direction_output(RD_GPIO_PIN, 1); gpio_direction_output(WR_GPIO_PIN, 1); gpio_direction_output(CS_GPIO_PIN, 1); gpio_direction_output(RST_GPIO_PIN, 0); /* setup the inputs */ gpio_direction_input(RDY_GPIO_PIN); gpio_direction_input(IRQ_GPIO_PIN); /* start the hdb bus as an input */ for (i = DB0_GPIO_PIN; i <= DB15_GPIO_PIN; i++) gpio_direction_output(i, 0); /* go into command mode */ gpio_set_value(CFG_GPIO_PIN, 1); gpio_set_value(RST_GPIO_PIN, 0); msleep(10); gpio_set_value(RST_GPIO_PIN, 1); msleep(10); am300_wait_event(par); return 0; err_req_gpio2: while (--i >= DB0_GPIO_PIN) gpio_free(i); i = ARRAY_SIZE(gpios); err_req_gpio: while (--i >= 0) gpio_free(gpios[i]); return err; } static int am300_init_board(struct broadsheetfb_par *par) { return am300_init_gpio_regs(par); } static void am300_cleanup(struct broadsheetfb_par *par) { int i; free_irq(PXA_GPIO_TO_IRQ(RDY_GPIO_PIN), par); for (i = 0; i < ARRAY_SIZE(gpios); i++) gpio_free(gpios[i]); for (i = DB0_GPIO_PIN; i <= DB15_GPIO_PIN; i++) gpio_free(i); } static u16 am300_get_hdb(struct broadsheetfb_par *par) { u16 res = 0; int i; for (i = 0; i <= (DB15_GPIO_PIN - DB0_GPIO_PIN) ; i++) res |= (gpio_get_value(DB0_GPIO_PIN + i)) ? (1 << i) : 0; return res; } static void am300_set_hdb(struct broadsheetfb_par *par, u16 data) { int i; for (i = 0; i <= (DB15_GPIO_PIN - DB0_GPIO_PIN) ; i++) gpio_set_value(DB0_GPIO_PIN + i, (data >> i) & 0x01); } static void am300_set_ctl(struct broadsheetfb_par *par, unsigned char bit, u8 state) { switch (bit) { case BS_CS: gpio_set_value(CS_GPIO_PIN, state); break; case BS_DC: gpio_set_value(DC_GPIO_PIN, state); break; case BS_WR: gpio_set_value(WR_GPIO_PIN, state); break; } } static int am300_get_panel_type(void) { return panel_type; } static irqreturn_t am300_handle_irq(int irq, void *dev_id) { struct broadsheetfb_par *par = dev_id; wake_up(&par->waitq); return IRQ_HANDLED; } static int am300_setup_irq(struct fb_info *info) { int ret; struct broadsheetfb_par *par = info->par; ret = request_irq(PXA_GPIO_TO_IRQ(RDY_GPIO_PIN), am300_handle_irq, IRQF_DISABLED|IRQF_TRIGGER_RISING, "AM300", par); if (ret) dev_err(&am300_device->dev, "request_irq failed: %d\n", ret); return ret; } static struct broadsheet_board am300_board = { .owner = THIS_MODULE, .init = am300_init_board, .cleanup = am300_cleanup, .set_hdb = am300_set_hdb, .get_hdb = am300_get_hdb, .set_ctl = am300_set_ctl, .wait_for_rdy = am300_wait_event, .get_panel_type = am300_get_panel_type, .setup_irq = am300_setup_irq, }; int __init am300_init(void) { int ret; pxa2xx_mfp_config(ARRAY_AND_SIZE(am300_pin_config)); /* request our platform independent driver */ request_module("broadsheetfb"); am300_device = platform_device_alloc("broadsheetfb", -1); if (!am300_device) return -ENOMEM; /* the am300_board that will be seen by broadsheetfb is a copy */ platform_device_add_data(am300_device, &am300_board, sizeof(am300_board)); ret = platform_device_add(am300_device); if (ret) { platform_device_put(am300_device); return ret; } return 0; } module_param(panel_type, uint, 0); MODULE_PARM_DESC(panel_type, "Select the panel type: 37, 6, 97"); MODULE_DESCRIPTION("board driver for am300 epd kit"); MODULE_AUTHOR("Jaya Kumar"); MODULE_LICENSE("GPL");
gpl-2.0
NovaFusion/twrp_kernel
arch/x86/kernel/cpu/mcheck/mce_intel.c
2961
5586
/* * Intel specific MCE features. * Copyright 2004 Zwane Mwaikambo <zwane@linuxpower.ca> * Copyright (C) 2008, 2009 Intel Corporation * Author: Andi Kleen */ #include <linux/gfp.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/percpu.h> #include <linux/sched.h> #include <asm/apic.h> #include <asm/processor.h> #include <asm/msr.h> #include <asm/mce.h> /* * Support for Intel Correct Machine Check Interrupts. This allows * the CPU to raise an interrupt when a corrected machine check happened. * Normally we pick those up using a regular polling timer. * Also supports reliable discovery of shared banks. */ static DEFINE_PER_CPU(mce_banks_t, mce_banks_owned); /* * cmci_discover_lock protects against parallel discovery attempts * which could race against each other. */ static DEFINE_SPINLOCK(cmci_discover_lock); #define CMCI_THRESHOLD 1 static int cmci_supported(int *banks) { u64 cap; if (mce_cmci_disabled || mce_ignore_ce) return 0; /* * Vendor check is not strictly needed, but the initial * initialization is vendor keyed and this * makes sure none of the backdoors are entered otherwise. */ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) return 0; if (!cpu_has_apic || lapic_get_maxlvt() < 6) return 0; rdmsrl(MSR_IA32_MCG_CAP, cap); *banks = min_t(unsigned, MAX_NR_BANKS, cap & 0xff); return !!(cap & MCG_CMCI_P); } /* * The interrupt handler. This is called on every event. * Just call the poller directly to log any events. * This could in theory increase the threshold under high load, * but doesn't for now. */ static void intel_threshold_interrupt(void) { machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned)); mce_notify_irq(); } static void print_update(char *type, int *hdr, int num) { if (*hdr == 0) printk(KERN_INFO "CPU %d MCA banks", smp_processor_id()); *hdr = 1; printk(KERN_CONT " %s:%d", type, num); } /* * Enable CMCI (Corrected Machine Check Interrupt) for available MCE banks * on this CPU. Use the algorithm recommended in the SDM to discover shared * banks. */ static void cmci_discover(int banks, int boot) { unsigned long *owned = (void *)&__get_cpu_var(mce_banks_owned); unsigned long flags; int hdr = 0; int i; spin_lock_irqsave(&cmci_discover_lock, flags); for (i = 0; i < banks; i++) { u64 val; if (test_bit(i, owned)) continue; rdmsrl(MSR_IA32_MCx_CTL2(i), val); /* Already owned by someone else? */ if (val & MCI_CTL2_CMCI_EN) { if (test_and_clear_bit(i, owned) && !boot) print_update("SHD", &hdr, i); __clear_bit(i, __get_cpu_var(mce_poll_banks)); continue; } val &= ~MCI_CTL2_CMCI_THRESHOLD_MASK; val |= MCI_CTL2_CMCI_EN | CMCI_THRESHOLD; wrmsrl(MSR_IA32_MCx_CTL2(i), val); rdmsrl(MSR_IA32_MCx_CTL2(i), val); /* Did the enable bit stick? -- the bank supports CMCI */ if (val & MCI_CTL2_CMCI_EN) { if (!test_and_set_bit(i, owned) && !boot) print_update("CMCI", &hdr, i); __clear_bit(i, __get_cpu_var(mce_poll_banks)); } else { WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks))); } } spin_unlock_irqrestore(&cmci_discover_lock, flags); if (hdr) printk(KERN_CONT "\n"); } /* * Just in case we missed an event during initialization check * all the CMCI owned banks. */ void cmci_recheck(void) { unsigned long flags; int banks; if (!mce_available(__this_cpu_ptr(&cpu_info)) || !cmci_supported(&banks)) return; local_irq_save(flags); machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned)); local_irq_restore(flags); } /* * Disable CMCI on this CPU for all banks it owns when it goes down. * This allows other CPUs to claim the banks on rediscovery. */ void cmci_clear(void) { unsigned long flags; int i; int banks; u64 val; if (!cmci_supported(&banks)) return; spin_lock_irqsave(&cmci_discover_lock, flags); for (i = 0; i < banks; i++) { if (!test_bit(i, __get_cpu_var(mce_banks_owned))) continue; /* Disable CMCI */ rdmsrl(MSR_IA32_MCx_CTL2(i), val); val &= ~(MCI_CTL2_CMCI_EN|MCI_CTL2_CMCI_THRESHOLD_MASK); wrmsrl(MSR_IA32_MCx_CTL2(i), val); __clear_bit(i, __get_cpu_var(mce_banks_owned)); } spin_unlock_irqrestore(&cmci_discover_lock, flags); } /* * After a CPU went down cycle through all the others and rediscover * Must run in process context. */ void cmci_rediscover(int dying) { int banks; int cpu; cpumask_var_t old; if (!cmci_supported(&banks)) return; if (!alloc_cpumask_var(&old, GFP_KERNEL)) return; cpumask_copy(old, &current->cpus_allowed); for_each_online_cpu(cpu) { if (cpu == dying) continue; if (set_cpus_allowed_ptr(current, cpumask_of(cpu))) continue; /* Recheck banks in case CPUs don't all have the same */ if (cmci_supported(&banks)) cmci_discover(banks, 0); } set_cpus_allowed_ptr(current, old); free_cpumask_var(old); } /* * Reenable CMCI on this CPU in case a CPU down failed. */ void cmci_reenable(void) { int banks; if (cmci_supported(&banks)) cmci_discover(banks, 0); } static void intel_init_cmci(void) { int banks; if (!cmci_supported(&banks)) return; mce_threshold_vector = intel_threshold_interrupt; cmci_discover(banks, 1); /* * For CPU #0 this runs with still disabled APIC, but that's * ok because only the vector is set up. We still do another * check for the banks later for CPU #0 just to make sure * to not miss any events. */ apic_write(APIC_LVTCMCI, THRESHOLD_APIC_VECTOR|APIC_DM_FIXED); cmci_recheck(); } void mce_intel_feature_init(struct cpuinfo_x86 *c) { intel_init_thermal(c); intel_init_cmci(); }
gpl-2.0
widz4rd/WIDzard-A850L
drivers/net/wireless/bcmdhd/wl_linux_mon.c
3473
11534
/* * Broadcom Dongle Host Driver (DHD), Linux monitor network interface * * Copyright (C) 1999-2012, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2 (the "GPL"), * available at http://www.broadcom.com/licenses/GPLv2.php, with the * following added to such license: * * As a special exception, the copyright holders of this software give you * permission to link this software with independent modules, and to copy and * distribute the resulting executable under terms of your choice, provided that * you also meet, for each linked independent module, the terms and conditions of * the license of that module. An independent module is a module which is not * derived from this software. The special exception does not apply to any * modifications of the software. * * Notwithstanding the above, under no circumstances may you combine this * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. * * $Id: dhd_linux_mon.c 280623 2011-08-30 14:49:39Z $ */ #include <osl.h> #include <linux/string.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/if_arp.h> #include <linux/ieee80211.h> #include <linux/rtnetlink.h> #include <net/ieee80211_radiotap.h> #include <wlioctl.h> #include <bcmutils.h> #include <dhd_dbg.h> #include <dngl_stats.h> #include <dhd.h> typedef enum monitor_states { MONITOR_STATE_DEINIT = 0x0, MONITOR_STATE_INIT = 0x1, MONITOR_STATE_INTERFACE_ADDED = 0x2, MONITOR_STATE_INTERFACE_DELETED = 0x4 } monitor_states_t; int dhd_add_monitor(char *name, struct net_device **new_ndev); extern int dhd_start_xmit(struct sk_buff *skb, struct net_device *net); int dhd_del_monitor(struct net_device *ndev); int dhd_monitor_init(void *dhd_pub); int dhd_monitor_uninit(void); /** * Local declarations and defintions (not exposed) */ #ifndef DHD_MAX_IFS #define DHD_MAX_IFS 16 #endif #define MON_PRINT(format, ...) printk("DHD-MON: %s " format, __func__, ##__VA_ARGS__) #define MON_TRACE MON_PRINT typedef struct monitor_interface { int radiotap_enabled; struct net_device* real_ndev; /* The real interface that the monitor is on */ struct net_device* mon_ndev; } monitor_interface; typedef struct dhd_linux_monitor { void *dhd_pub; monitor_states_t monitor_state; monitor_interface mon_if[DHD_MAX_IFS]; struct mutex lock; /* lock to protect mon_if */ } dhd_linux_monitor_t; static dhd_linux_monitor_t g_monitor; static struct net_device* lookup_real_netdev(char *name); static monitor_interface* ndev_to_monif(struct net_device *ndev); static int dhd_mon_if_open(struct net_device *ndev); static int dhd_mon_if_stop(struct net_device *ndev); static int dhd_mon_if_subif_start_xmit(struct sk_buff *skb, struct net_device *ndev); static void dhd_mon_if_set_multicast_list(struct net_device *ndev); static int dhd_mon_if_change_mac(struct net_device *ndev, void *addr); static const struct net_device_ops dhd_mon_if_ops = { .ndo_open = dhd_mon_if_open, .ndo_stop = dhd_mon_if_stop, .ndo_start_xmit = dhd_mon_if_subif_start_xmit, #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)) .ndo_set_rx_mode = dhd_mon_if_set_multicast_list, #else .ndo_set_multicast_list = dhd_mon_if_set_multicast_list, #endif .ndo_set_mac_address = dhd_mon_if_change_mac, }; /** * Local static function defintions */ /* Look up dhd's net device table to find a match (e.g. interface "eth0" is a match for "mon.eth0" * "p2p-eth0-0" is a match for "mon.p2p-eth0-0") */ static struct net_device* lookup_real_netdev(char *name) { struct net_device *ndev_found = NULL; int i; int len = 0; int last_name_len = 0; struct net_device *ndev; /* We need to find interface "p2p-p2p-0" corresponding to monitor interface "mon-p2p-0", * Once mon iface name reaches IFNAMSIZ, it is reset to p2p0-0 and corresponding mon * iface would be mon-p2p0-0. */ for (i = 0; i < DHD_MAX_IFS; i++) { ndev = dhd_idx2net(g_monitor.dhd_pub, i); /* Skip "p2p" and look for "-p2p0-x" in monitor interface name. If it * it matches, then this netdev is the corresponding real_netdev. */ if (ndev && strstr(ndev->name, "p2p-p2p0")) { len = strlen("p2p"); } else { /* if p2p- is not present, then the IFNAMSIZ have reached and name * would have got reset. In this casse,look for p2p0-x in mon-p2p0-x */ len = 0; } if (ndev && strstr(name, (ndev->name + len))) { if (strlen(ndev->name) > last_name_len) { ndev_found = ndev; last_name_len = strlen(ndev->name); } } } return ndev_found; } static monitor_interface* ndev_to_monif(struct net_device *ndev) { int i; for (i = 0; i < DHD_MAX_IFS; i++) { if (g_monitor.mon_if[i].mon_ndev == ndev) return &g_monitor.mon_if[i]; } return NULL; } static int dhd_mon_if_open(struct net_device *ndev) { int ret = 0; MON_PRINT("enter\n"); return ret; } static int dhd_mon_if_stop(struct net_device *ndev) { int ret = 0; MON_PRINT("enter\n"); return ret; } static int dhd_mon_if_subif_start_xmit(struct sk_buff *skb, struct net_device *ndev) { int ret = 0; int rtap_len; int qos_len = 0; int dot11_hdr_len = 24; int snap_len = 6; unsigned char *pdata; unsigned short frame_ctl; unsigned char src_mac_addr[6]; unsigned char dst_mac_addr[6]; struct ieee80211_hdr *dot11_hdr; struct ieee80211_radiotap_header *rtap_hdr; monitor_interface* mon_if; MON_PRINT("enter\n"); mon_if = ndev_to_monif(ndev); if (mon_if == NULL || mon_if->real_ndev == NULL) { MON_PRINT(" cannot find matched net dev, skip the packet\n"); goto fail; } if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header))) goto fail; rtap_hdr = (struct ieee80211_radiotap_header *)skb->data; if (unlikely(rtap_hdr->it_version)) goto fail; rtap_len = ieee80211_get_radiotap_len(skb->data); if (unlikely(skb->len < rtap_len)) goto fail; MON_PRINT("radiotap len (should be 14): %d\n", rtap_len); /* Skip the ratio tap header */ skb_pull(skb, rtap_len); dot11_hdr = (struct ieee80211_hdr *)skb->data; frame_ctl = le16_to_cpu(dot11_hdr->frame_control); /* Check if the QoS bit is set */ if ((frame_ctl & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) { /* Check if this ia a Wireless Distribution System (WDS) frame * which has 4 MAC addresses */ if (dot11_hdr->frame_control & 0x0080) qos_len = 2; if ((dot11_hdr->frame_control & 0x0300) == 0x0300) dot11_hdr_len += 6; memcpy(dst_mac_addr, dot11_hdr->addr1, sizeof(dst_mac_addr)); memcpy(src_mac_addr, dot11_hdr->addr2, sizeof(src_mac_addr)); /* Skip the 802.11 header, QoS (if any) and SNAP, but leave spaces for * for two MAC addresses */ skb_pull(skb, dot11_hdr_len + qos_len + snap_len - sizeof(src_mac_addr) * 2); pdata = (unsigned char*)skb->data; memcpy(pdata, dst_mac_addr, sizeof(dst_mac_addr)); memcpy(pdata + sizeof(dst_mac_addr), src_mac_addr, sizeof(src_mac_addr)); PKTSETPRIO(skb, 0); MON_PRINT("if name: %s, matched if name %s\n", ndev->name, mon_if->real_ndev->name); /* Use the real net device to transmit the packet */ ret = dhd_start_xmit(skb, mon_if->real_ndev); return ret; } fail: dev_kfree_skb(skb); return 0; } static void dhd_mon_if_set_multicast_list(struct net_device *ndev) { monitor_interface* mon_if; mon_if = ndev_to_monif(ndev); if (mon_if == NULL || mon_if->real_ndev == NULL) { MON_PRINT(" cannot find matched net dev, skip the packet\n"); } else { MON_PRINT("enter, if name: %s, matched if name %s\n", ndev->name, mon_if->real_ndev->name); } } static int dhd_mon_if_change_mac(struct net_device *ndev, void *addr) { int ret = 0; monitor_interface* mon_if; mon_if = ndev_to_monif(ndev); if (mon_if == NULL || mon_if->real_ndev == NULL) { MON_PRINT(" cannot find matched net dev, skip the packet\n"); } else { MON_PRINT("enter, if name: %s, matched if name %s\n", ndev->name, mon_if->real_ndev->name); } return ret; } /** * Global function definitions (declared in dhd_linux_mon.h) */ int dhd_add_monitor(char *name, struct net_device **new_ndev) { int i; int idx = -1; int ret = 0; struct net_device* ndev = NULL; dhd_linux_monitor_t **dhd_mon; mutex_lock(&g_monitor.lock); MON_TRACE("enter, if name: %s\n", name); if (!name || !new_ndev) { MON_PRINT("invalid parameters\n"); ret = -EINVAL; goto out; } /* * Find a vacancy */ for (i = 0; i < DHD_MAX_IFS; i++) if (g_monitor.mon_if[i].mon_ndev == NULL) { idx = i; break; } if (idx == -1) { MON_PRINT("exceeds maximum interfaces\n"); ret = -EFAULT; goto out; } ndev = alloc_etherdev(sizeof(dhd_linux_monitor_t*)); if (!ndev) { MON_PRINT("failed to allocate memory\n"); ret = -ENOMEM; goto out; } ndev->type = ARPHRD_IEEE80211_RADIOTAP; strncpy(ndev->name, name, IFNAMSIZ); ndev->name[IFNAMSIZ - 1] = 0; ndev->netdev_ops = &dhd_mon_if_ops; ret = register_netdevice(ndev); if (ret) { MON_PRINT(" register_netdevice failed (%d)\n", ret); goto out; } *new_ndev = ndev; g_monitor.mon_if[idx].radiotap_enabled = TRUE; g_monitor.mon_if[idx].mon_ndev = ndev; g_monitor.mon_if[idx].real_ndev = lookup_real_netdev(name); dhd_mon = (dhd_linux_monitor_t **)netdev_priv(ndev); *dhd_mon = &g_monitor; g_monitor.monitor_state = MONITOR_STATE_INTERFACE_ADDED; MON_PRINT("net device returned: 0x%p\n", ndev); MON_PRINT("found a matched net device, name %s\n", g_monitor.mon_if[idx].real_ndev->name); out: if (ret && ndev) free_netdev(ndev); mutex_unlock(&g_monitor.lock); return ret; } int dhd_del_monitor(struct net_device *ndev) { int i; bool rollback_lock = false; if (!ndev) return -EINVAL; mutex_lock(&g_monitor.lock); for (i = 0; i < DHD_MAX_IFS; i++) { if (g_monitor.mon_if[i].mon_ndev == ndev || g_monitor.mon_if[i].real_ndev == ndev) { g_monitor.mon_if[i].real_ndev = NULL; if (rtnl_is_locked()) { rtnl_unlock(); rollback_lock = true; } unregister_netdev(g_monitor.mon_if[i].mon_ndev); free_netdev(g_monitor.mon_if[i].mon_ndev); g_monitor.mon_if[i].mon_ndev = NULL; g_monitor.monitor_state = MONITOR_STATE_INTERFACE_DELETED; break; } } if (rollback_lock) { rtnl_lock(); rollback_lock = false; } if (g_monitor.monitor_state != MONITOR_STATE_INTERFACE_DELETED) MON_PRINT("interface not found in monitor IF array, is this a monitor IF? 0x%p\n", ndev); mutex_unlock(&g_monitor.lock); return 0; } int dhd_monitor_init(void *dhd_pub) { if (g_monitor.monitor_state == MONITOR_STATE_DEINIT) { g_monitor.dhd_pub = dhd_pub; mutex_init(&g_monitor.lock); g_monitor.monitor_state = MONITOR_STATE_INIT; } return 0; } int dhd_monitor_uninit(void) { int i; struct net_device *ndev; bool rollback_lock = false; mutex_lock(&g_monitor.lock); if (g_monitor.monitor_state != MONITOR_STATE_DEINIT) { for (i = 0; i < DHD_MAX_IFS; i++) { ndev = g_monitor.mon_if[i].mon_ndev; if (ndev) { if (rtnl_is_locked()) { rtnl_unlock(); rollback_lock = true; } unregister_netdev(ndev); free_netdev(ndev); g_monitor.mon_if[i].real_ndev = NULL; g_monitor.mon_if[i].mon_ndev = NULL; if (rollback_lock) { rtnl_lock(); rollback_lock = false; } } } g_monitor.monitor_state = MONITOR_STATE_DEINIT; } mutex_unlock(&g_monitor.lock); return 0; }
gpl-2.0
skinner12/SkeRneL
drivers/hwmon/ltc4245.c
3473
16437
/* * Driver for Linear Technology LTC4245 I2C Multiple Supply Hot Swap Controller * * Copyright (C) 2008 Ira W. Snyder <iws@ovro.caltech.edu> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This driver is based on the ds1621 and ina209 drivers. * * Datasheet: * http://www.linear.com/pc/downloadDocument.do?navId=H0,C1,C1003,C1006,C1140,P19392,D13517 */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/i2c/ltc4245.h> /* Here are names of the chip's registers (a.k.a. commands) */ enum ltc4245_cmd { LTC4245_STATUS = 0x00, /* readonly */ LTC4245_ALERT = 0x01, LTC4245_CONTROL = 0x02, LTC4245_ON = 0x03, LTC4245_FAULT1 = 0x04, LTC4245_FAULT2 = 0x05, LTC4245_GPIO = 0x06, LTC4245_ADCADR = 0x07, LTC4245_12VIN = 0x10, LTC4245_12VSENSE = 0x11, LTC4245_12VOUT = 0x12, LTC4245_5VIN = 0x13, LTC4245_5VSENSE = 0x14, LTC4245_5VOUT = 0x15, LTC4245_3VIN = 0x16, LTC4245_3VSENSE = 0x17, LTC4245_3VOUT = 0x18, LTC4245_VEEIN = 0x19, LTC4245_VEESENSE = 0x1a, LTC4245_VEEOUT = 0x1b, LTC4245_GPIOADC = 0x1c, }; struct ltc4245_data { struct device *hwmon_dev; struct mutex update_lock; bool valid; unsigned long last_updated; /* in jiffies */ /* Control registers */ u8 cregs[0x08]; /* Voltage registers */ u8 vregs[0x0d]; /* GPIO ADC registers */ bool use_extra_gpios; int gpios[3]; }; /* * Update the readings from the GPIO pins. If the driver has been configured to * sample all GPIO's as analog voltages, a round-robin sampling method is used. * Otherwise, only the configured GPIO pin is sampled. * * LOCKING: must hold data->update_lock */ static void ltc4245_update_gpios(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct ltc4245_data *data = i2c_get_clientdata(client); u8 gpio_curr, gpio_next, gpio_reg; int i; /* no extra gpio support, we're basically done */ if (!data->use_extra_gpios) { data->gpios[0] = data->vregs[LTC4245_GPIOADC - 0x10]; return; } /* * If the last reading was too long ago, then we mark all old GPIO * readings as stale by setting them to -EAGAIN */ if (time_after(jiffies, data->last_updated + 5 * HZ)) { dev_dbg(&client->dev, "Marking GPIOs invalid\n"); for (i = 0; i < ARRAY_SIZE(data->gpios); i++) data->gpios[i] = -EAGAIN; } /* * Get the current GPIO pin * * The datasheet calls these GPIO[1-3], but we'll calculate the zero * based array index instead, and call them GPIO[0-2]. This is much * easier to think about. */ gpio_curr = (data->cregs[LTC4245_GPIO] & 0xc0) >> 6; if (gpio_curr > 0) gpio_curr -= 1; /* Read the GPIO voltage from the GPIOADC register */ data->gpios[gpio_curr] = data->vregs[LTC4245_GPIOADC - 0x10]; /* Find the next GPIO pin to read */ gpio_next = (gpio_curr + 1) % ARRAY_SIZE(data->gpios); /* * Calculate the correct setting for the GPIO register so it will * sample the next GPIO pin */ gpio_reg = (data->cregs[LTC4245_GPIO] & 0x3f) | ((gpio_next + 1) << 6); /* Update the GPIO register */ i2c_smbus_write_byte_data(client, LTC4245_GPIO, gpio_reg); /* Update saved data */ data->cregs[LTC4245_GPIO] = gpio_reg; } static struct ltc4245_data *ltc4245_update_device(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct ltc4245_data *data = i2c_get_clientdata(client); s32 val; int i; mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ) || !data->valid) { dev_dbg(&client->dev, "Starting ltc4245 update\n"); /* Read control registers -- 0x00 to 0x07 */ for (i = 0; i < ARRAY_SIZE(data->cregs); i++) { val = i2c_smbus_read_byte_data(client, i); if (unlikely(val < 0)) data->cregs[i] = 0; else data->cregs[i] = val; } /* Read voltage registers -- 0x10 to 0x1c */ for (i = 0; i < ARRAY_SIZE(data->vregs); i++) { val = i2c_smbus_read_byte_data(client, i+0x10); if (unlikely(val < 0)) data->vregs[i] = 0; else data->vregs[i] = val; } /* Update GPIO readings */ ltc4245_update_gpios(dev); data->last_updated = jiffies; data->valid = 1; } mutex_unlock(&data->update_lock); return data; } /* Return the voltage from the given register in millivolts */ static int ltc4245_get_voltage(struct device *dev, u8 reg) { struct ltc4245_data *data = ltc4245_update_device(dev); const u8 regval = data->vregs[reg - 0x10]; u32 voltage = 0; switch (reg) { case LTC4245_12VIN: case LTC4245_12VOUT: voltage = regval * 55; break; case LTC4245_5VIN: case LTC4245_5VOUT: voltage = regval * 22; break; case LTC4245_3VIN: case LTC4245_3VOUT: voltage = regval * 15; break; case LTC4245_VEEIN: case LTC4245_VEEOUT: voltage = regval * -55; break; case LTC4245_GPIOADC: voltage = regval * 10; break; default: /* If we get here, the developer messed up */ WARN_ON_ONCE(1); break; } return voltage; } /* Return the current in the given sense register in milliAmperes */ static unsigned int ltc4245_get_current(struct device *dev, u8 reg) { struct ltc4245_data *data = ltc4245_update_device(dev); const u8 regval = data->vregs[reg - 0x10]; unsigned int voltage; unsigned int curr; /* The strange looking conversions that follow are fixed-point * math, since we cannot do floating point in the kernel. * * Step 1: convert sense register to microVolts * Step 2: convert voltage to milliAmperes * * If you play around with the V=IR equation, you come up with * the following: X uV / Y mOhm == Z mA * * With the resistors that are fractions of a milliOhm, we multiply * the voltage and resistance by 10, to shift the decimal point. * Now we can use the normal division operator again. */ switch (reg) { case LTC4245_12VSENSE: voltage = regval * 250; /* voltage in uV */ curr = voltage / 50; /* sense resistor 50 mOhm */ break; case LTC4245_5VSENSE: voltage = regval * 125; /* voltage in uV */ curr = (voltage * 10) / 35; /* sense resistor 3.5 mOhm */ break; case LTC4245_3VSENSE: voltage = regval * 125; /* voltage in uV */ curr = (voltage * 10) / 25; /* sense resistor 2.5 mOhm */ break; case LTC4245_VEESENSE: voltage = regval * 250; /* voltage in uV */ curr = voltage / 100; /* sense resistor 100 mOhm */ break; default: /* If we get here, the developer messed up */ WARN_ON_ONCE(1); curr = 0; break; } return curr; } static ssize_t ltc4245_show_voltage(struct device *dev, struct device_attribute *da, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(da); const int voltage = ltc4245_get_voltage(dev, attr->index); return snprintf(buf, PAGE_SIZE, "%d\n", voltage); } static ssize_t ltc4245_show_current(struct device *dev, struct device_attribute *da, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(da); const unsigned int curr = ltc4245_get_current(dev, attr->index); return snprintf(buf, PAGE_SIZE, "%u\n", curr); } static ssize_t ltc4245_show_power(struct device *dev, struct device_attribute *da, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(da); const unsigned int curr = ltc4245_get_current(dev, attr->index); const int output_voltage = ltc4245_get_voltage(dev, attr->index+1); /* current in mA * voltage in mV == power in uW */ const unsigned int power = abs(output_voltage * curr); return snprintf(buf, PAGE_SIZE, "%u\n", power); } static ssize_t ltc4245_show_alarm(struct device *dev, struct device_attribute *da, char *buf) { struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(da); struct ltc4245_data *data = ltc4245_update_device(dev); const u8 reg = data->cregs[attr->index]; const u32 mask = attr->nr; return snprintf(buf, PAGE_SIZE, "%u\n", (reg & mask) ? 1 : 0); } static ssize_t ltc4245_show_gpio(struct device *dev, struct device_attribute *da, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(da); struct ltc4245_data *data = ltc4245_update_device(dev); int val = data->gpios[attr->index]; /* handle stale GPIO's */ if (val < 0) return val; /* Convert to millivolts and print */ return snprintf(buf, PAGE_SIZE, "%u\n", val * 10); } /* These macros are used below in constructing device attribute objects * for use with sysfs_create_group() to make a sysfs device file * for each register. */ #define LTC4245_VOLTAGE(name, ltc4245_cmd_idx) \ static SENSOR_DEVICE_ATTR(name, S_IRUGO, \ ltc4245_show_voltage, NULL, ltc4245_cmd_idx) #define LTC4245_CURRENT(name, ltc4245_cmd_idx) \ static SENSOR_DEVICE_ATTR(name, S_IRUGO, \ ltc4245_show_current, NULL, ltc4245_cmd_idx) #define LTC4245_POWER(name, ltc4245_cmd_idx) \ static SENSOR_DEVICE_ATTR(name, S_IRUGO, \ ltc4245_show_power, NULL, ltc4245_cmd_idx) #define LTC4245_ALARM(name, mask, reg) \ static SENSOR_DEVICE_ATTR_2(name, S_IRUGO, \ ltc4245_show_alarm, NULL, (mask), reg) #define LTC4245_GPIO_VOLTAGE(name, gpio_num) \ static SENSOR_DEVICE_ATTR(name, S_IRUGO, \ ltc4245_show_gpio, NULL, gpio_num) /* Construct a sensor_device_attribute structure for each register */ /* Input voltages */ LTC4245_VOLTAGE(in1_input, LTC4245_12VIN); LTC4245_VOLTAGE(in2_input, LTC4245_5VIN); LTC4245_VOLTAGE(in3_input, LTC4245_3VIN); LTC4245_VOLTAGE(in4_input, LTC4245_VEEIN); /* Input undervoltage alarms */ LTC4245_ALARM(in1_min_alarm, (1 << 0), LTC4245_FAULT1); LTC4245_ALARM(in2_min_alarm, (1 << 1), LTC4245_FAULT1); LTC4245_ALARM(in3_min_alarm, (1 << 2), LTC4245_FAULT1); LTC4245_ALARM(in4_min_alarm, (1 << 3), LTC4245_FAULT1); /* Currents (via sense resistor) */ LTC4245_CURRENT(curr1_input, LTC4245_12VSENSE); LTC4245_CURRENT(curr2_input, LTC4245_5VSENSE); LTC4245_CURRENT(curr3_input, LTC4245_3VSENSE); LTC4245_CURRENT(curr4_input, LTC4245_VEESENSE); /* Overcurrent alarms */ LTC4245_ALARM(curr1_max_alarm, (1 << 4), LTC4245_FAULT1); LTC4245_ALARM(curr2_max_alarm, (1 << 5), LTC4245_FAULT1); LTC4245_ALARM(curr3_max_alarm, (1 << 6), LTC4245_FAULT1); LTC4245_ALARM(curr4_max_alarm, (1 << 7), LTC4245_FAULT1); /* Output voltages */ LTC4245_VOLTAGE(in5_input, LTC4245_12VOUT); LTC4245_VOLTAGE(in6_input, LTC4245_5VOUT); LTC4245_VOLTAGE(in7_input, LTC4245_3VOUT); LTC4245_VOLTAGE(in8_input, LTC4245_VEEOUT); /* Power Bad alarms */ LTC4245_ALARM(in5_min_alarm, (1 << 0), LTC4245_FAULT2); LTC4245_ALARM(in6_min_alarm, (1 << 1), LTC4245_FAULT2); LTC4245_ALARM(in7_min_alarm, (1 << 2), LTC4245_FAULT2); LTC4245_ALARM(in8_min_alarm, (1 << 3), LTC4245_FAULT2); /* GPIO voltages */ LTC4245_GPIO_VOLTAGE(in9_input, 0); LTC4245_GPIO_VOLTAGE(in10_input, 1); LTC4245_GPIO_VOLTAGE(in11_input, 2); /* Power Consumption (virtual) */ LTC4245_POWER(power1_input, LTC4245_12VSENSE); LTC4245_POWER(power2_input, LTC4245_5VSENSE); LTC4245_POWER(power3_input, LTC4245_3VSENSE); LTC4245_POWER(power4_input, LTC4245_VEESENSE); /* Finally, construct an array of pointers to members of the above objects, * as required for sysfs_create_group() */ static struct attribute *ltc4245_std_attributes[] = { &sensor_dev_attr_in1_input.dev_attr.attr, &sensor_dev_attr_in2_input.dev_attr.attr, &sensor_dev_attr_in3_input.dev_attr.attr, &sensor_dev_attr_in4_input.dev_attr.attr, &sensor_dev_attr_in1_min_alarm.dev_attr.attr, &sensor_dev_attr_in2_min_alarm.dev_attr.attr, &sensor_dev_attr_in3_min_alarm.dev_attr.attr, &sensor_dev_attr_in4_min_alarm.dev_attr.attr, &sensor_dev_attr_curr1_input.dev_attr.attr, &sensor_dev_attr_curr2_input.dev_attr.attr, &sensor_dev_attr_curr3_input.dev_attr.attr, &sensor_dev_attr_curr4_input.dev_attr.attr, &sensor_dev_attr_curr1_max_alarm.dev_attr.attr, &sensor_dev_attr_curr2_max_alarm.dev_attr.attr, &sensor_dev_attr_curr3_max_alarm.dev_attr.attr, &sensor_dev_attr_curr4_max_alarm.dev_attr.attr, &sensor_dev_attr_in5_input.dev_attr.attr, &sensor_dev_attr_in6_input.dev_attr.attr, &sensor_dev_attr_in7_input.dev_attr.attr, &sensor_dev_attr_in8_input.dev_attr.attr, &sensor_dev_attr_in5_min_alarm.dev_attr.attr, &sensor_dev_attr_in6_min_alarm.dev_attr.attr, &sensor_dev_attr_in7_min_alarm.dev_attr.attr, &sensor_dev_attr_in8_min_alarm.dev_attr.attr, &sensor_dev_attr_in9_input.dev_attr.attr, &sensor_dev_attr_power1_input.dev_attr.attr, &sensor_dev_attr_power2_input.dev_attr.attr, &sensor_dev_attr_power3_input.dev_attr.attr, &sensor_dev_attr_power4_input.dev_attr.attr, NULL, }; static struct attribute *ltc4245_gpio_attributes[] = { &sensor_dev_attr_in10_input.dev_attr.attr, &sensor_dev_attr_in11_input.dev_attr.attr, NULL, }; static const struct attribute_group ltc4245_std_group = { .attrs = ltc4245_std_attributes, }; static const struct attribute_group ltc4245_gpio_group = { .attrs = ltc4245_gpio_attributes, }; static int ltc4245_sysfs_create_groups(struct i2c_client *client) { struct ltc4245_data *data = i2c_get_clientdata(client); struct device *dev = &client->dev; int ret; /* register the standard sysfs attributes */ ret = sysfs_create_group(&dev->kobj, &ltc4245_std_group); if (ret) { dev_err(dev, "unable to register standard attributes\n"); return ret; } /* if we're using the extra gpio support, register it's attributes */ if (data->use_extra_gpios) { ret = sysfs_create_group(&dev->kobj, &ltc4245_gpio_group); if (ret) { dev_err(dev, "unable to register gpio attributes\n"); sysfs_remove_group(&dev->kobj, &ltc4245_std_group); return ret; } } return 0; } static void ltc4245_sysfs_remove_groups(struct i2c_client *client) { struct ltc4245_data *data = i2c_get_clientdata(client); struct device *dev = &client->dev; if (data->use_extra_gpios) sysfs_remove_group(&dev->kobj, &ltc4245_gpio_group); sysfs_remove_group(&dev->kobj, &ltc4245_std_group); } static bool ltc4245_use_extra_gpios(struct i2c_client *client) { struct ltc4245_platform_data *pdata = dev_get_platdata(&client->dev); #ifdef CONFIG_OF struct device_node *np = client->dev.of_node; #endif /* prefer platform data */ if (pdata) return pdata->use_extra_gpios; #ifdef CONFIG_OF /* fallback on OF */ if (of_find_property(np, "ltc4245,use-extra-gpios", NULL)) return true; #endif return false; } static int ltc4245_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct i2c_adapter *adapter = client->adapter; struct ltc4245_data *data; int ret; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -ENODEV; data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) { ret = -ENOMEM; goto out_kzalloc; } i2c_set_clientdata(client, data); mutex_init(&data->update_lock); data->use_extra_gpios = ltc4245_use_extra_gpios(client); /* Initialize the LTC4245 chip */ i2c_smbus_write_byte_data(client, LTC4245_FAULT1, 0x00); i2c_smbus_write_byte_data(client, LTC4245_FAULT2, 0x00); /* Register sysfs hooks */ ret = ltc4245_sysfs_create_groups(client); if (ret) goto out_sysfs_create_groups; data->hwmon_dev = hwmon_device_register(&client->dev); if (IS_ERR(data->hwmon_dev)) { ret = PTR_ERR(data->hwmon_dev); goto out_hwmon_device_register; } return 0; out_hwmon_device_register: ltc4245_sysfs_remove_groups(client); out_sysfs_create_groups: kfree(data); out_kzalloc: return ret; } static int ltc4245_remove(struct i2c_client *client) { struct ltc4245_data *data = i2c_get_clientdata(client); hwmon_device_unregister(data->hwmon_dev); ltc4245_sysfs_remove_groups(client); kfree(data); return 0; } static const struct i2c_device_id ltc4245_id[] = { { "ltc4245", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, ltc4245_id); /* This is the driver that will be inserted */ static struct i2c_driver ltc4245_driver = { .driver = { .name = "ltc4245", }, .probe = ltc4245_probe, .remove = ltc4245_remove, .id_table = ltc4245_id, }; static int __init ltc4245_init(void) { return i2c_add_driver(&ltc4245_driver); } static void __exit ltc4245_exit(void) { i2c_del_driver(&ltc4245_driver); } MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>"); MODULE_DESCRIPTION("LTC4245 driver"); MODULE_LICENSE("GPL"); module_init(ltc4245_init); module_exit(ltc4245_exit);
gpl-2.0
peat-psuwit/android_kernel_lge_w7ds
arch/x86/kernel/process.c
3985
17574
#include <linux/errno.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/prctl.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/module.h> #include <linux/pm.h> #include <linux/clockchips.h> #include <linux/random.h> #include <linux/user-return-notifier.h> #include <linux/dmi.h> #include <linux/utsname.h> #include <linux/stackprotector.h> #include <linux/tick.h> #include <linux/cpuidle.h> #include <trace/events/power.h> #include <linux/hw_breakpoint.h> #include <asm/cpu.h> #include <asm/apic.h> #include <asm/syscalls.h> #include <asm/idle.h> #include <asm/uaccess.h> #include <asm/i387.h> #include <asm/fpu-internal.h> #include <asm/debugreg.h> #include <asm/nmi.h> #ifdef CONFIG_X86_64 static DEFINE_PER_CPU(unsigned char, is_idle); #endif struct kmem_cache *task_xstate_cachep; EXPORT_SYMBOL_GPL(task_xstate_cachep); int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) { int ret; *dst = *src; if (fpu_allocated(&src->thread.fpu)) { memset(&dst->thread.fpu, 0, sizeof(dst->thread.fpu)); ret = fpu_alloc(&dst->thread.fpu); if (ret) return ret; fpu_copy(&dst->thread.fpu, &src->thread.fpu); } return 0; } void free_thread_xstate(struct task_struct *tsk) { fpu_free(&tsk->thread.fpu); } void free_thread_info(struct thread_info *ti) { free_thread_xstate(ti->task); free_pages((unsigned long)ti, THREAD_ORDER); } void arch_task_cache_init(void) { task_xstate_cachep = kmem_cache_create("task_xstate", xstate_size, __alignof__(union thread_xstate), SLAB_PANIC | SLAB_NOTRACK, NULL); } /* * Free current thread data structures etc.. */ void exit_thread(void) { struct task_struct *me = current; struct thread_struct *t = &me->thread; unsigned long *bp = t->io_bitmap_ptr; if (bp) { struct tss_struct *tss = &per_cpu(init_tss, get_cpu()); t->io_bitmap_ptr = NULL; clear_thread_flag(TIF_IO_BITMAP); /* * Careful, clear this in the TSS too: */ memset(tss->io_bitmap, 0xff, t->io_bitmap_max); t->io_bitmap_max = 0; put_cpu(); kfree(bp); } } void show_regs(struct pt_regs *regs) { show_registers(regs); show_trace(NULL, regs, (unsigned long *)kernel_stack_pointer(regs), 0); } void show_regs_common(void) { const char *vendor, *product, *board; vendor = dmi_get_system_info(DMI_SYS_VENDOR); if (!vendor) vendor = ""; product = dmi_get_system_info(DMI_PRODUCT_NAME); if (!product) product = ""; /* Board Name is optional */ board = dmi_get_system_info(DMI_BOARD_NAME); printk(KERN_CONT "\n"); printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s", current->pid, current->comm, print_tainted(), init_utsname()->release, (int)strcspn(init_utsname()->version, " "), init_utsname()->version); printk(KERN_CONT " %s %s", vendor, product); if (board) printk(KERN_CONT "/%s", board); printk(KERN_CONT "\n"); } void flush_thread(void) { struct task_struct *tsk = current; flush_ptrace_hw_breakpoint(tsk); memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); /* * Forget coprocessor state.. */ tsk->fpu_counter = 0; clear_fpu(tsk); clear_used_math(); } static void hard_disable_TSC(void) { write_cr4(read_cr4() | X86_CR4_TSD); } void disable_TSC(void) { preempt_disable(); if (!test_and_set_thread_flag(TIF_NOTSC)) /* * Must flip the CPU state synchronously with * TIF_NOTSC in the current running context. */ hard_disable_TSC(); preempt_enable(); } static void hard_enable_TSC(void) { write_cr4(read_cr4() & ~X86_CR4_TSD); } static void enable_TSC(void) { preempt_disable(); if (test_and_clear_thread_flag(TIF_NOTSC)) /* * Must flip the CPU state synchronously with * TIF_NOTSC in the current running context. */ hard_enable_TSC(); preempt_enable(); } int get_tsc_mode(unsigned long adr) { unsigned int val; if (test_thread_flag(TIF_NOTSC)) val = PR_TSC_SIGSEGV; else val = PR_TSC_ENABLE; return put_user(val, (unsigned int __user *)adr); } int set_tsc_mode(unsigned int val) { if (val == PR_TSC_SIGSEGV) disable_TSC(); else if (val == PR_TSC_ENABLE) enable_TSC(); else return -EINVAL; return 0; } void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, struct tss_struct *tss) { struct thread_struct *prev, *next; prev = &prev_p->thread; next = &next_p->thread; if (test_tsk_thread_flag(prev_p, TIF_BLOCKSTEP) ^ test_tsk_thread_flag(next_p, TIF_BLOCKSTEP)) { unsigned long debugctl = get_debugctlmsr(); debugctl &= ~DEBUGCTLMSR_BTF; if (test_tsk_thread_flag(next_p, TIF_BLOCKSTEP)) debugctl |= DEBUGCTLMSR_BTF; update_debugctlmsr(debugctl); } if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^ test_tsk_thread_flag(next_p, TIF_NOTSC)) { /* prev and next are different */ if (test_tsk_thread_flag(next_p, TIF_NOTSC)) hard_disable_TSC(); else hard_enable_TSC(); } if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) { /* * Copy the relevant range of the IO bitmap. * Normally this is 128 bytes or less: */ memcpy(tss->io_bitmap, next->io_bitmap_ptr, max(prev->io_bitmap_max, next->io_bitmap_max)); } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) { /* * Clear any possible leftover bits: */ memset(tss->io_bitmap, 0xff, prev->io_bitmap_max); } propagate_user_return_notify(prev_p, next_p); } int sys_fork(struct pt_regs *regs) { return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL); } /* * This is trivial, and on the face of it looks like it * could equally well be done in user mode. * * Not so, for quite unobvious reasons - register pressure. * In user mode vfork() cannot have a stack frame, and if * done by calling the "clone()" system call directly, you * do not have enough call-clobbered registers to hold all * the information you need. */ int sys_vfork(struct pt_regs *regs) { return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0, NULL, NULL); } long sys_clone(unsigned long clone_flags, unsigned long newsp, void __user *parent_tid, void __user *child_tid, struct pt_regs *regs) { if (!newsp) newsp = regs->sp; return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid); } /* * This gets run with %si containing the * function to call, and %di containing * the "args". */ extern void kernel_thread_helper(void); /* * Create a kernel thread */ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) { struct pt_regs regs; memset(&regs, 0, sizeof(regs)); regs.si = (unsigned long) fn; regs.di = (unsigned long) arg; #ifdef CONFIG_X86_32 regs.ds = __USER_DS; regs.es = __USER_DS; regs.fs = __KERNEL_PERCPU; regs.gs = __KERNEL_STACK_CANARY; #else regs.ss = __KERNEL_DS; #endif regs.orig_ax = -1; regs.ip = (unsigned long) kernel_thread_helper; regs.cs = __KERNEL_CS | get_kernel_rpl(); regs.flags = X86_EFLAGS_IF | X86_EFLAGS_BIT1; /* Ok, create the new process.. */ return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL); } EXPORT_SYMBOL(kernel_thread); /* * sys_execve() executes a new program. */ long sys_execve(const char __user *name, const char __user *const __user *argv, const char __user *const __user *envp, struct pt_regs *regs) { long error; char *filename; filename = getname(name); error = PTR_ERR(filename); if (IS_ERR(filename)) return error; error = do_execve(filename, argv, envp, regs); #ifdef CONFIG_X86_32 if (error == 0) { /* Make sure we don't return using sysenter.. */ set_thread_flag(TIF_IRET); } #endif putname(filename); return error; } /* * Idle related variables and functions */ unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE; EXPORT_SYMBOL(boot_option_idle_override); /* * Powermanagement idle function, if any.. */ void (*pm_idle)(void); #ifdef CONFIG_APM_MODULE EXPORT_SYMBOL(pm_idle); #endif static inline int hlt_use_halt(void) { return 1; } #ifndef CONFIG_SMP static inline void play_dead(void) { BUG(); } #endif #ifdef CONFIG_X86_64 void enter_idle(void) { percpu_write(is_idle, 1); idle_notifier_call_chain(IDLE_START); } static void __exit_idle(void) { if (x86_test_and_clear_bit_percpu(0, is_idle) == 0) return; idle_notifier_call_chain(IDLE_END); } /* Called from interrupts to signify idle end */ void exit_idle(void) { /* idle loop has pid 0 */ if (current->pid) return; __exit_idle(); } #endif /* * The idle thread. There's no useful work to be * done, so just try to conserve power and have a * low exit latency (ie sit in a loop waiting for * somebody to say that they'd like to reschedule) */ void cpu_idle(void) { /* * If we're the non-boot CPU, nothing set the stack canary up * for us. CPU0 already has it initialized but no harm in * doing it again. This is a good place for updating it, as * we wont ever return from this function (so the invalid * canaries already on the stack wont ever trigger). */ boot_init_stack_canary(); current_thread_info()->status |= TS_POLLING; while (1) { tick_nohz_idle_enter(); while (!need_resched()) { rmb(); if (cpu_is_offline(smp_processor_id())) play_dead(); /* * Idle routines should keep interrupts disabled * from here on, until they go to idle. * Otherwise, idle callbacks can misfire. */ local_touch_nmi(); local_irq_disable(); enter_idle(); /* Don't trace irqs off for idle */ stop_critical_timings(); /* enter_idle() needs rcu for notifiers */ rcu_idle_enter(); if (cpuidle_idle_call()) pm_idle(); rcu_idle_exit(); start_critical_timings(); /* In many cases the interrupt that ended idle has already called exit_idle. But some idle loops can be woken up without interrupt. */ __exit_idle(); } tick_nohz_idle_exit(); preempt_enable_no_resched(); schedule(); preempt_disable(); } } /* * We use this if we don't have any better * idle routine.. */ void default_idle(void) { if (hlt_use_halt()) { trace_power_start_rcuidle(POWER_CSTATE, 1, smp_processor_id()); trace_cpu_idle_rcuidle(1, smp_processor_id()); current_thread_info()->status &= ~TS_POLLING; /* * TS_POLLING-cleared state must be visible before we * test NEED_RESCHED: */ smp_mb(); if (!need_resched()) safe_halt(); /* enables interrupts racelessly */ else local_irq_enable(); current_thread_info()->status |= TS_POLLING; trace_power_end_rcuidle(smp_processor_id()); trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); } else { local_irq_enable(); /* loop is done by the caller */ cpu_relax(); } } #ifdef CONFIG_APM_MODULE EXPORT_SYMBOL(default_idle); #endif bool set_pm_idle_to_default(void) { bool ret = !!pm_idle; pm_idle = default_idle; return ret; } void stop_this_cpu(void *dummy) { local_irq_disable(); /* * Remove this CPU: */ set_cpu_online(smp_processor_id(), false); disable_local_APIC(); for (;;) { if (hlt_works(smp_processor_id())) halt(); } } static void do_nothing(void *unused) { } /* * cpu_idle_wait - Used to ensure that all the CPUs discard old value of * pm_idle and update to new pm_idle value. Required while changing pm_idle * handler on SMP systems. * * Caller must have changed pm_idle to the new value before the call. Old * pm_idle value will not be used by any CPU after the return of this function. */ void cpu_idle_wait(void) { smp_mb(); /* kick all the CPUs so that they exit out of pm_idle */ smp_call_function(do_nothing, NULL, 1); } EXPORT_SYMBOL_GPL(cpu_idle_wait); /* Default MONITOR/MWAIT with no hints, used for default C1 state */ static void mwait_idle(void) { if (!need_resched()) { trace_power_start_rcuidle(POWER_CSTATE, 1, smp_processor_id()); trace_cpu_idle_rcuidle(1, smp_processor_id()); if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR)) clflush((void *)&current_thread_info()->flags); __monitor((void *)&current_thread_info()->flags, 0, 0); smp_mb(); if (!need_resched()) __sti_mwait(0, 0); else local_irq_enable(); trace_power_end_rcuidle(smp_processor_id()); trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); } else local_irq_enable(); } /* * On SMP it's slightly faster (but much more power-consuming!) * to poll the ->work.need_resched flag instead of waiting for the * cross-CPU IPI to arrive. Use this option with caution. */ static void poll_idle(void) { trace_power_start_rcuidle(POWER_CSTATE, 0, smp_processor_id()); trace_cpu_idle_rcuidle(0, smp_processor_id()); local_irq_enable(); while (!need_resched()) cpu_relax(); trace_power_end_rcuidle(smp_processor_id()); trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); } /* * mwait selection logic: * * It depends on the CPU. For AMD CPUs that support MWAIT this is * wrong. Family 0x10 and 0x11 CPUs will enter C1 on HLT. Powersavings * then depend on a clock divisor and current Pstate of the core. If * all cores of a processor are in halt state (C1) the processor can * enter the C1E (C1 enhanced) state. If mwait is used this will never * happen. * * idle=mwait overrides this decision and forces the usage of mwait. */ #define MWAIT_INFO 0x05 #define MWAIT_ECX_EXTENDED_INFO 0x01 #define MWAIT_EDX_C1 0xf0 int mwait_usable(const struct cpuinfo_x86 *c) { u32 eax, ebx, ecx, edx; if (boot_option_idle_override == IDLE_FORCE_MWAIT) return 1; if (c->cpuid_level < MWAIT_INFO) return 0; cpuid(MWAIT_INFO, &eax, &ebx, &ecx, &edx); /* Check, whether EDX has extended info about MWAIT */ if (!(ecx & MWAIT_ECX_EXTENDED_INFO)) return 1; /* * edx enumeratios MONITOR/MWAIT extensions. Check, whether * C1 supports MWAIT */ return (edx & MWAIT_EDX_C1); } bool amd_e400_c1e_detected; EXPORT_SYMBOL(amd_e400_c1e_detected); static cpumask_var_t amd_e400_c1e_mask; void amd_e400_remove_cpu(int cpu) { if (amd_e400_c1e_mask != NULL) cpumask_clear_cpu(cpu, amd_e400_c1e_mask); } /* * AMD Erratum 400 aware idle routine. We check for C1E active in the interrupt * pending message MSR. If we detect C1E, then we handle it the same * way as C3 power states (local apic timer and TSC stop) */ static void amd_e400_idle(void) { if (need_resched()) return; if (!amd_e400_c1e_detected) { u32 lo, hi; rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi); if (lo & K8_INTP_C1E_ACTIVE_MASK) { amd_e400_c1e_detected = true; if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) mark_tsc_unstable("TSC halt in AMD C1E"); printk(KERN_INFO "System has AMD C1E enabled\n"); } } if (amd_e400_c1e_detected) { int cpu = smp_processor_id(); if (!cpumask_test_cpu(cpu, amd_e400_c1e_mask)) { cpumask_set_cpu(cpu, amd_e400_c1e_mask); /* * Force broadcast so ACPI can not interfere. */ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE, &cpu); printk(KERN_INFO "Switch to broadcast mode on CPU%d\n", cpu); } clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); default_idle(); /* * The switch back from broadcast mode needs to be * called with interrupts disabled. */ local_irq_disable(); clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu); local_irq_enable(); } else default_idle(); } void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) { #ifdef CONFIG_SMP if (pm_idle == poll_idle && smp_num_siblings > 1) { printk_once(KERN_WARNING "WARNING: polling idle and HT enabled," " performance may degrade.\n"); } #endif if (pm_idle) return; if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) { /* * One CPU supports mwait => All CPUs supports mwait */ printk(KERN_INFO "using mwait in idle threads.\n"); pm_idle = mwait_idle; } else if (cpu_has_amd_erratum(amd_erratum_400)) { /* E400: APIC timer interrupt does not wake up CPU from C1e */ printk(KERN_INFO "using AMD E400 aware idle routine\n"); pm_idle = amd_e400_idle; } else pm_idle = default_idle; } void __init init_amd_e400_c1e_mask(void) { /* If we're using amd_e400_idle, we need to allocate amd_e400_c1e_mask. */ if (pm_idle == amd_e400_idle) zalloc_cpumask_var(&amd_e400_c1e_mask, GFP_KERNEL); } static int __init idle_setup(char *str) { if (!str) return -EINVAL; if (!strcmp(str, "poll")) { printk("using polling idle threads.\n"); pm_idle = poll_idle; boot_option_idle_override = IDLE_POLL; } else if (!strcmp(str, "mwait")) { boot_option_idle_override = IDLE_FORCE_MWAIT; WARN_ONCE(1, "\"idle=mwait\" will be removed in 2012\n"); } else if (!strcmp(str, "halt")) { /* * When the boot option of idle=halt is added, halt is * forced to be used for CPU idle. In such case CPU C2/C3 * won't be used again. * To continue to load the CPU idle driver, don't touch * the boot_option_idle_override. */ pm_idle = default_idle; boot_option_idle_override = IDLE_HALT; } else if (!strcmp(str, "nomwait")) { /* * If the boot option of "idle=nomwait" is added, * it means that mwait will be disabled for CPU C2/C3 * states. In such case it won't touch the variable * of boot_option_idle_override. */ boot_option_idle_override = IDLE_NOMWAIT; } else return -1; return 0; } early_param("idle", idle_setup); unsigned long arch_align_stack(unsigned long sp) { if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) sp -= get_random_int() % 8192; return sp & ~0xf; } unsigned long arch_randomize_brk(struct mm_struct *mm) { unsigned long range_end = mm->brk + 0x02000000; return randomize_range(mm->brk, range_end, 0) ? : mm->brk; }
gpl-2.0
cretin45/htc-kernel-ruby
drivers/uwb/lc-dev.c
3985
13007
/* * Ultra Wide Band * Life cycle of devices * * Copyright (C) 2005-2006 Intel Corporation * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * * * FIXME: docs */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/device.h> #include <linux/err.h> #include <linux/kdev_t.h> #include <linux/random.h> #include "uwb-internal.h" /* We initialize addresses to 0xff (invalid, as it is bcast) */ static inline void uwb_dev_addr_init(struct uwb_dev_addr *addr) { memset(&addr->data, 0xff, sizeof(addr->data)); } static inline void uwb_mac_addr_init(struct uwb_mac_addr *addr) { memset(&addr->data, 0xff, sizeof(addr->data)); } /* @returns !0 if a device @addr is a broadcast address */ static inline int uwb_dev_addr_bcast(const struct uwb_dev_addr *addr) { static const struct uwb_dev_addr bcast = { .data = { 0xff, 0xff } }; return !uwb_dev_addr_cmp(addr, &bcast); } /* * Add callback @new to be called when an event occurs in @rc. */ int uwb_notifs_register(struct uwb_rc *rc, struct uwb_notifs_handler *new) { if (mutex_lock_interruptible(&rc->notifs_chain.mutex)) return -ERESTARTSYS; list_add(&new->list_node, &rc->notifs_chain.list); mutex_unlock(&rc->notifs_chain.mutex); return 0; } EXPORT_SYMBOL_GPL(uwb_notifs_register); /* * Remove event handler (callback) */ int uwb_notifs_deregister(struct uwb_rc *rc, struct uwb_notifs_handler *entry) { if (mutex_lock_interruptible(&rc->notifs_chain.mutex)) return -ERESTARTSYS; list_del(&entry->list_node); mutex_unlock(&rc->notifs_chain.mutex); return 0; } EXPORT_SYMBOL_GPL(uwb_notifs_deregister); /* * Notify all event handlers of a given event on @rc * * We are called with a valid reference to the device, or NULL if the * event is not for a particular event (e.g., a BG join event). */ void uwb_notify(struct uwb_rc *rc, struct uwb_dev *uwb_dev, enum uwb_notifs event) { struct uwb_notifs_handler *handler; if (mutex_lock_interruptible(&rc->notifs_chain.mutex)) return; if (!list_empty(&rc->notifs_chain.list)) { list_for_each_entry(handler, &rc->notifs_chain.list, list_node) { handler->cb(handler->data, uwb_dev, event); } } mutex_unlock(&rc->notifs_chain.mutex); } /* * Release the backing device of a uwb_dev that has been dynamically allocated. */ static void uwb_dev_sys_release(struct device *dev) { struct uwb_dev *uwb_dev = to_uwb_dev(dev); uwb_bce_put(uwb_dev->bce); memset(uwb_dev, 0x69, sizeof(*uwb_dev)); kfree(uwb_dev); } /* * Initialize a UWB device instance * * Alloc, zero and call this function. */ void uwb_dev_init(struct uwb_dev *uwb_dev) { mutex_init(&uwb_dev->mutex); device_initialize(&uwb_dev->dev); uwb_dev->dev.release = uwb_dev_sys_release; uwb_dev_addr_init(&uwb_dev->dev_addr); uwb_mac_addr_init(&uwb_dev->mac_addr); bitmap_fill(uwb_dev->streams, UWB_NUM_GLOBAL_STREAMS); } static ssize_t uwb_dev_EUI_48_show(struct device *dev, struct device_attribute *attr, char *buf) { struct uwb_dev *uwb_dev = to_uwb_dev(dev); char addr[UWB_ADDR_STRSIZE]; uwb_mac_addr_print(addr, sizeof(addr), &uwb_dev->mac_addr); return sprintf(buf, "%s\n", addr); } static DEVICE_ATTR(EUI_48, S_IRUGO, uwb_dev_EUI_48_show, NULL); static ssize_t uwb_dev_DevAddr_show(struct device *dev, struct device_attribute *attr, char *buf) { struct uwb_dev *uwb_dev = to_uwb_dev(dev); char addr[UWB_ADDR_STRSIZE]; uwb_dev_addr_print(addr, sizeof(addr), &uwb_dev->dev_addr); return sprintf(buf, "%s\n", addr); } static DEVICE_ATTR(DevAddr, S_IRUGO, uwb_dev_DevAddr_show, NULL); /* * Show the BPST of this device. * * Calculated from the receive time of the device's beacon and it's * slot number. */ static ssize_t uwb_dev_BPST_show(struct device *dev, struct device_attribute *attr, char *buf) { struct uwb_dev *uwb_dev = to_uwb_dev(dev); struct uwb_beca_e *bce; struct uwb_beacon_frame *bf; u16 bpst; bce = uwb_dev->bce; mutex_lock(&bce->mutex); bf = (struct uwb_beacon_frame *)bce->be->BeaconInfo; bpst = bce->be->wBPSTOffset - (u16)(bf->Beacon_Slot_Number * UWB_BEACON_SLOT_LENGTH_US); mutex_unlock(&bce->mutex); return sprintf(buf, "%d\n", bpst); } static DEVICE_ATTR(BPST, S_IRUGO, uwb_dev_BPST_show, NULL); /* * Show the IEs a device is beaconing * * We need to access the beacon cache, so we just lock it really * quick, print the IEs and unlock. * * We have a reference on the cache entry, so that should be * quite safe. */ static ssize_t uwb_dev_IEs_show(struct device *dev, struct device_attribute *attr, char *buf) { struct uwb_dev *uwb_dev = to_uwb_dev(dev); return uwb_bce_print_IEs(uwb_dev, uwb_dev->bce, buf, PAGE_SIZE); } static DEVICE_ATTR(IEs, S_IRUGO | S_IWUSR, uwb_dev_IEs_show, NULL); static ssize_t uwb_dev_LQE_show(struct device *dev, struct device_attribute *attr, char *buf) { struct uwb_dev *uwb_dev = to_uwb_dev(dev); struct uwb_beca_e *bce = uwb_dev->bce; size_t result; mutex_lock(&bce->mutex); result = stats_show(&uwb_dev->bce->lqe_stats, buf); mutex_unlock(&bce->mutex); return result; } static ssize_t uwb_dev_LQE_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct uwb_dev *uwb_dev = to_uwb_dev(dev); struct uwb_beca_e *bce = uwb_dev->bce; ssize_t result; mutex_lock(&bce->mutex); result = stats_store(&uwb_dev->bce->lqe_stats, buf, size); mutex_unlock(&bce->mutex); return result; } static DEVICE_ATTR(LQE, S_IRUGO | S_IWUSR, uwb_dev_LQE_show, uwb_dev_LQE_store); static ssize_t uwb_dev_RSSI_show(struct device *dev, struct device_attribute *attr, char *buf) { struct uwb_dev *uwb_dev = to_uwb_dev(dev); struct uwb_beca_e *bce = uwb_dev->bce; size_t result; mutex_lock(&bce->mutex); result = stats_show(&uwb_dev->bce->rssi_stats, buf); mutex_unlock(&bce->mutex); return result; } static ssize_t uwb_dev_RSSI_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct uwb_dev *uwb_dev = to_uwb_dev(dev); struct uwb_beca_e *bce = uwb_dev->bce; ssize_t result; mutex_lock(&bce->mutex); result = stats_store(&uwb_dev->bce->rssi_stats, buf, size); mutex_unlock(&bce->mutex); return result; } static DEVICE_ATTR(RSSI, S_IRUGO | S_IWUSR, uwb_dev_RSSI_show, uwb_dev_RSSI_store); static struct attribute *dev_attrs[] = { &dev_attr_EUI_48.attr, &dev_attr_DevAddr.attr, &dev_attr_BPST.attr, &dev_attr_IEs.attr, &dev_attr_LQE.attr, &dev_attr_RSSI.attr, NULL, }; static struct attribute_group dev_attr_group = { .attrs = dev_attrs, }; static const struct attribute_group *groups[] = { &dev_attr_group, NULL, }; /** * Device SYSFS registration * * */ static int __uwb_dev_sys_add(struct uwb_dev *uwb_dev, struct device *parent_dev) { struct device *dev; dev = &uwb_dev->dev; /* Device sysfs files are only useful for neighbor devices not local radio controllers. */ if (&uwb_dev->rc->uwb_dev != uwb_dev) dev->groups = groups; dev->parent = parent_dev; dev_set_drvdata(dev, uwb_dev); return device_add(dev); } static void __uwb_dev_sys_rm(struct uwb_dev *uwb_dev) { dev_set_drvdata(&uwb_dev->dev, NULL); device_del(&uwb_dev->dev); } /** * Register and initialize a new UWB device * * Did you call uwb_dev_init() on it? * * @parent_rc: is the parent radio controller who has the link to the * device. When registering the UWB device that is a UWB * Radio Controller, we point back to it. * * If registering the device that is part of a radio, caller has set * rc->uwb_dev->dev. Otherwise it is to be left NULL--a new one will * be allocated. */ int uwb_dev_add(struct uwb_dev *uwb_dev, struct device *parent_dev, struct uwb_rc *parent_rc) { int result; struct device *dev; BUG_ON(uwb_dev == NULL); BUG_ON(parent_dev == NULL); BUG_ON(parent_rc == NULL); mutex_lock(&uwb_dev->mutex); dev = &uwb_dev->dev; uwb_dev->rc = parent_rc; result = __uwb_dev_sys_add(uwb_dev, parent_dev); if (result < 0) printk(KERN_ERR "UWB: unable to register dev %s with sysfs: %d\n", dev_name(dev), result); mutex_unlock(&uwb_dev->mutex); return result; } void uwb_dev_rm(struct uwb_dev *uwb_dev) { mutex_lock(&uwb_dev->mutex); __uwb_dev_sys_rm(uwb_dev); mutex_unlock(&uwb_dev->mutex); } static int __uwb_dev_try_get(struct device *dev, void *__target_uwb_dev) { struct uwb_dev *target_uwb_dev = __target_uwb_dev; struct uwb_dev *uwb_dev = to_uwb_dev(dev); if (uwb_dev == target_uwb_dev) { uwb_dev_get(uwb_dev); return 1; } else return 0; } /** * Given a UWB device descriptor, validate and refcount it * * @returns NULL if the device does not exist or is quiescing; the ptr to * it otherwise. */ struct uwb_dev *uwb_dev_try_get(struct uwb_rc *rc, struct uwb_dev *uwb_dev) { if (uwb_dev_for_each(rc, __uwb_dev_try_get, uwb_dev)) return uwb_dev; else return NULL; } EXPORT_SYMBOL_GPL(uwb_dev_try_get); /** * Remove a device from the system [grunt for other functions] */ int __uwb_dev_offair(struct uwb_dev *uwb_dev, struct uwb_rc *rc) { struct device *dev = &uwb_dev->dev; char macbuf[UWB_ADDR_STRSIZE], devbuf[UWB_ADDR_STRSIZE]; uwb_mac_addr_print(macbuf, sizeof(macbuf), &uwb_dev->mac_addr); uwb_dev_addr_print(devbuf, sizeof(devbuf), &uwb_dev->dev_addr); dev_info(dev, "uwb device (mac %s dev %s) disconnected from %s %s\n", macbuf, devbuf, rc ? rc->uwb_dev.dev.parent->bus->name : "n/a", rc ? dev_name(rc->uwb_dev.dev.parent) : ""); uwb_dev_rm(uwb_dev); list_del(&uwb_dev->bce->node); uwb_bce_put(uwb_dev->bce); uwb_dev_put(uwb_dev); /* for the creation in _onair() */ return 0; } /** * A device went off the air, clean up after it! * * This is called by the UWB Daemon (through the beacon purge function * uwb_bcn_cache_purge) when it is detected that a device has been in * radio silence for a while. * * If this device is actually a local radio controller we don't need * to go through the offair process, as it is not registered as that. * * NOTE: uwb_bcn_cache.mutex is held! */ void uwbd_dev_offair(struct uwb_beca_e *bce) { struct uwb_dev *uwb_dev; uwb_dev = bce->uwb_dev; if (uwb_dev) { uwb_notify(uwb_dev->rc, uwb_dev, UWB_NOTIF_OFFAIR); __uwb_dev_offair(uwb_dev, uwb_dev->rc); } } /** * A device went on the air, start it up! * * This is called by the UWB Daemon when it is detected that a device * has popped up in the radio range of the radio controller. * * It will just create the freaking device, register the beacon and * stuff and yatla, done. * * * NOTE: uwb_beca.mutex is held, bce->mutex is held */ void uwbd_dev_onair(struct uwb_rc *rc, struct uwb_beca_e *bce) { int result; struct device *dev = &rc->uwb_dev.dev; struct uwb_dev *uwb_dev; char macbuf[UWB_ADDR_STRSIZE], devbuf[UWB_ADDR_STRSIZE]; uwb_mac_addr_print(macbuf, sizeof(macbuf), bce->mac_addr); uwb_dev_addr_print(devbuf, sizeof(devbuf), &bce->dev_addr); uwb_dev = kzalloc(sizeof(struct uwb_dev), GFP_KERNEL); if (uwb_dev == NULL) { dev_err(dev, "new device %s: Cannot allocate memory\n", macbuf); return; } uwb_dev_init(uwb_dev); /* This sets refcnt to one, we own it */ uwb_dev->mac_addr = *bce->mac_addr; uwb_dev->dev_addr = bce->dev_addr; dev_set_name(&uwb_dev->dev, macbuf); result = uwb_dev_add(uwb_dev, &rc->uwb_dev.dev, rc); if (result < 0) { dev_err(dev, "new device %s: cannot instantiate device\n", macbuf); goto error_dev_add; } /* plug the beacon cache */ bce->uwb_dev = uwb_dev; uwb_dev->bce = bce; uwb_bce_get(bce); /* released in uwb_dev_sys_release() */ dev_info(dev, "uwb device (mac %s dev %s) connected to %s %s\n", macbuf, devbuf, rc->uwb_dev.dev.parent->bus->name, dev_name(rc->uwb_dev.dev.parent)); uwb_notify(rc, uwb_dev, UWB_NOTIF_ONAIR); return; error_dev_add: kfree(uwb_dev); return; } /** * Iterate over the list of UWB devices, calling a @function on each * * See docs for bus_for_each().... * * @rc: radio controller for the devices. * @function: function to call. * @priv: data to pass to @function. * @returns: 0 if no invocation of function() returned a value * different to zero. That value otherwise. */ int uwb_dev_for_each(struct uwb_rc *rc, uwb_dev_for_each_f function, void *priv) { return device_for_each_child(&rc->uwb_dev.dev, priv, function); } EXPORT_SYMBOL_GPL(uwb_dev_for_each);
gpl-2.0
Hybridmax/G92X_Mystery_Kernel
drivers/media/usb/gspca/m5602/m5602_po1030.c
4241
12685
/* * Driver for the po1030 sensor * * Copyright (c) 2008 Erik Andrén * Copyright (c) 2007 Ilyes Gouta. Based on the m5603x Linux Driver Project. * Copyright (c) 2005 m5603x Linux Driver Project <m5602@x3ng.com.br> * * Portions of code to USB interface and ALi driver software, * Copyright (c) 2006 Willem Duinker * v4l2 interface modeled after the V4L2 driver * for SN9C10x PC Camera Controllers * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include "m5602_po1030.h" static int po1030_s_ctrl(struct v4l2_ctrl *ctrl); static void po1030_dump_registers(struct sd *sd); static struct v4l2_pix_format po1030_modes[] = { { 640, 480, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .sizeimage = 640 * 480, .bytesperline = 640, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 2 } }; static const struct v4l2_ctrl_ops po1030_ctrl_ops = { .s_ctrl = po1030_s_ctrl, }; static const struct v4l2_ctrl_config po1030_greenbal_cfg = { .ops = &po1030_ctrl_ops, .id = M5602_V4L2_CID_GREEN_BALANCE, .name = "Green Balance", .type = V4L2_CTRL_TYPE_INTEGER, .min = 0, .max = 255, .step = 1, .def = PO1030_GREEN_GAIN_DEFAULT, .flags = V4L2_CTRL_FLAG_SLIDER, }; int po1030_probe(struct sd *sd) { u8 dev_id_h = 0, i; struct gspca_dev *gspca_dev = (struct gspca_dev *)sd; if (force_sensor) { if (force_sensor == PO1030_SENSOR) { pr_info("Forcing a %s sensor\n", po1030.name); goto sensor_found; } /* If we want to force another sensor, don't try to probe this * one */ return -ENODEV; } PDEBUG(D_PROBE, "Probing for a po1030 sensor"); /* Run the pre-init to actually probe the unit */ for (i = 0; i < ARRAY_SIZE(preinit_po1030); i++) { u8 data = preinit_po1030[i][2]; if (preinit_po1030[i][0] == SENSOR) m5602_write_sensor(sd, preinit_po1030[i][1], &data, 1); else m5602_write_bridge(sd, preinit_po1030[i][1], data); } if (m5602_read_sensor(sd, PO1030_DEVID_H, &dev_id_h, 1)) return -ENODEV; if (dev_id_h == 0x30) { pr_info("Detected a po1030 sensor\n"); goto sensor_found; } return -ENODEV; sensor_found: sd->gspca_dev.cam.cam_mode = po1030_modes; sd->gspca_dev.cam.nmodes = ARRAY_SIZE(po1030_modes); return 0; } int po1030_init(struct sd *sd) { int i, err = 0; /* Init the sensor */ for (i = 0; i < ARRAY_SIZE(init_po1030) && !err; i++) { u8 data[2] = {0x00, 0x00}; switch (init_po1030[i][0]) { case BRIDGE: err = m5602_write_bridge(sd, init_po1030[i][1], init_po1030[i][2]); break; case SENSOR: data[0] = init_po1030[i][2]; err = m5602_write_sensor(sd, init_po1030[i][1], data, 1); break; default: pr_info("Invalid stream command, exiting init\n"); return -EINVAL; } } if (err < 0) return err; if (dump_sensor) po1030_dump_registers(sd); return 0; } int po1030_init_controls(struct sd *sd) { struct v4l2_ctrl_handler *hdl = &sd->gspca_dev.ctrl_handler; sd->gspca_dev.vdev.ctrl_handler = hdl; v4l2_ctrl_handler_init(hdl, 9); sd->auto_white_bal = v4l2_ctrl_new_std(hdl, &po1030_ctrl_ops, V4L2_CID_AUTO_WHITE_BALANCE, 0, 1, 1, 0); sd->green_bal = v4l2_ctrl_new_custom(hdl, &po1030_greenbal_cfg, NULL); sd->red_bal = v4l2_ctrl_new_std(hdl, &po1030_ctrl_ops, V4L2_CID_RED_BALANCE, 0, 255, 1, PO1030_RED_GAIN_DEFAULT); sd->blue_bal = v4l2_ctrl_new_std(hdl, &po1030_ctrl_ops, V4L2_CID_BLUE_BALANCE, 0, 255, 1, PO1030_BLUE_GAIN_DEFAULT); sd->autoexpo = v4l2_ctrl_new_std_menu(hdl, &po1030_ctrl_ops, V4L2_CID_EXPOSURE_AUTO, 1, 0, V4L2_EXPOSURE_MANUAL); sd->expo = v4l2_ctrl_new_std(hdl, &po1030_ctrl_ops, V4L2_CID_EXPOSURE, 0, 0x2ff, 1, PO1030_EXPOSURE_DEFAULT); sd->gain = v4l2_ctrl_new_std(hdl, &po1030_ctrl_ops, V4L2_CID_GAIN, 0, 0x4f, 1, PO1030_GLOBAL_GAIN_DEFAULT); sd->hflip = v4l2_ctrl_new_std(hdl, &po1030_ctrl_ops, V4L2_CID_HFLIP, 0, 1, 1, 0); sd->vflip = v4l2_ctrl_new_std(hdl, &po1030_ctrl_ops, V4L2_CID_VFLIP, 0, 1, 1, 0); if (hdl->error) { pr_err("Could not initialize controls\n"); return hdl->error; } v4l2_ctrl_auto_cluster(4, &sd->auto_white_bal, 0, false); v4l2_ctrl_auto_cluster(2, &sd->autoexpo, 0, false); v4l2_ctrl_cluster(2, &sd->hflip); return 0; } int po1030_start(struct sd *sd) { struct cam *cam = &sd->gspca_dev.cam; int i, err = 0; int width = cam->cam_mode[sd->gspca_dev.curr_mode].width; int height = cam->cam_mode[sd->gspca_dev.curr_mode].height; int ver_offs = cam->cam_mode[sd->gspca_dev.curr_mode].priv; u8 data; switch (width) { case 320: data = PO1030_SUBSAMPLING; err = m5602_write_sensor(sd, PO1030_CONTROL3, &data, 1); if (err < 0) return err; data = ((width + 3) >> 8) & 0xff; err = m5602_write_sensor(sd, PO1030_WINDOWWIDTH_H, &data, 1); if (err < 0) return err; data = (width + 3) & 0xff; err = m5602_write_sensor(sd, PO1030_WINDOWWIDTH_L, &data, 1); if (err < 0) return err; data = ((height + 1) >> 8) & 0xff; err = m5602_write_sensor(sd, PO1030_WINDOWHEIGHT_H, &data, 1); if (err < 0) return err; data = (height + 1) & 0xff; err = m5602_write_sensor(sd, PO1030_WINDOWHEIGHT_L, &data, 1); height += 6; width -= 1; break; case 640: data = 0; err = m5602_write_sensor(sd, PO1030_CONTROL3, &data, 1); if (err < 0) return err; data = ((width + 7) >> 8) & 0xff; err = m5602_write_sensor(sd, PO1030_WINDOWWIDTH_H, &data, 1); if (err < 0) return err; data = (width + 7) & 0xff; err = m5602_write_sensor(sd, PO1030_WINDOWWIDTH_L, &data, 1); if (err < 0) return err; data = ((height + 3) >> 8) & 0xff; err = m5602_write_sensor(sd, PO1030_WINDOWHEIGHT_H, &data, 1); if (err < 0) return err; data = (height + 3) & 0xff; err = m5602_write_sensor(sd, PO1030_WINDOWHEIGHT_L, &data, 1); height += 12; width -= 2; break; } err = m5602_write_bridge(sd, M5602_XB_SENSOR_TYPE, 0x0c); if (err < 0) return err; err = m5602_write_bridge(sd, M5602_XB_LINE_OF_FRAME_H, 0x81); if (err < 0) return err; err = m5602_write_bridge(sd, M5602_XB_PIX_OF_LINE_H, 0x82); if (err < 0) return err; err = m5602_write_bridge(sd, M5602_XB_SIG_INI, 0x01); if (err < 0) return err; err = m5602_write_bridge(sd, M5602_XB_VSYNC_PARA, ((ver_offs >> 8) & 0xff)); if (err < 0) return err; err = m5602_write_bridge(sd, M5602_XB_VSYNC_PARA, (ver_offs & 0xff)); if (err < 0) return err; for (i = 0; i < 2 && !err; i++) err = m5602_write_bridge(sd, M5602_XB_VSYNC_PARA, 0); if (err < 0) return err; err = m5602_write_bridge(sd, M5602_XB_VSYNC_PARA, (height >> 8) & 0xff); if (err < 0) return err; err = m5602_write_bridge(sd, M5602_XB_VSYNC_PARA, (height & 0xff)); if (err < 0) return err; for (i = 0; i < 2 && !err; i++) err = m5602_write_bridge(sd, M5602_XB_VSYNC_PARA, 0); for (i = 0; i < 2 && !err; i++) err = m5602_write_bridge(sd, M5602_XB_SIG_INI, 0); for (i = 0; i < 2 && !err; i++) err = m5602_write_bridge(sd, M5602_XB_HSYNC_PARA, 0); if (err < 0) return err; err = m5602_write_bridge(sd, M5602_XB_HSYNC_PARA, (width >> 8) & 0xff); if (err < 0) return err; err = m5602_write_bridge(sd, M5602_XB_HSYNC_PARA, (width & 0xff)); if (err < 0) return err; err = m5602_write_bridge(sd, M5602_XB_SIG_INI, 0); return err; } static int po1030_set_exposure(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; u8 i2c_data; int err; PDEBUG(D_CONF, "Set exposure to %d", val & 0xffff); i2c_data = ((val & 0xff00) >> 8); PDEBUG(D_CONF, "Set exposure to high byte to 0x%x", i2c_data); err = m5602_write_sensor(sd, PO1030_INTEGLINES_H, &i2c_data, 1); if (err < 0) return err; i2c_data = (val & 0xff); PDEBUG(D_CONF, "Set exposure to low byte to 0x%x", i2c_data); err = m5602_write_sensor(sd, PO1030_INTEGLINES_M, &i2c_data, 1); return err; } static int po1030_set_gain(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; u8 i2c_data; int err; i2c_data = val & 0xff; PDEBUG(D_CONF, "Set global gain to %d", i2c_data); err = m5602_write_sensor(sd, PO1030_GLOBALGAIN, &i2c_data, 1); return err; } static int po1030_set_hvflip(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; u8 i2c_data; int err; PDEBUG(D_CONF, "Set hvflip %d %d", sd->hflip->val, sd->vflip->val); err = m5602_read_sensor(sd, PO1030_CONTROL2, &i2c_data, 1); if (err < 0) return err; i2c_data = (0x3f & i2c_data) | (sd->hflip->val << 7) | (sd->vflip->val << 6); err = m5602_write_sensor(sd, PO1030_CONTROL2, &i2c_data, 1); return err; } static int po1030_set_red_balance(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; u8 i2c_data; int err; i2c_data = val & 0xff; PDEBUG(D_CONF, "Set red gain to %d", i2c_data); err = m5602_write_sensor(sd, PO1030_RED_GAIN, &i2c_data, 1); return err; } static int po1030_set_blue_balance(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; u8 i2c_data; int err; i2c_data = val & 0xff; PDEBUG(D_CONF, "Set blue gain to %d", i2c_data); err = m5602_write_sensor(sd, PO1030_BLUE_GAIN, &i2c_data, 1); return err; } static int po1030_set_green_balance(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; u8 i2c_data; int err; i2c_data = val & 0xff; PDEBUG(D_CONF, "Set green gain to %d", i2c_data); err = m5602_write_sensor(sd, PO1030_GREEN_1_GAIN, &i2c_data, 1); if (err < 0) return err; return m5602_write_sensor(sd, PO1030_GREEN_2_GAIN, &i2c_data, 1); } static int po1030_set_auto_white_balance(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; u8 i2c_data; int err; err = m5602_read_sensor(sd, PO1030_AUTOCTRL1, &i2c_data, 1); if (err < 0) return err; PDEBUG(D_CONF, "Set auto white balance to %d", val); i2c_data = (i2c_data & 0xfe) | (val & 0x01); err = m5602_write_sensor(sd, PO1030_AUTOCTRL1, &i2c_data, 1); return err; } static int po1030_set_auto_exposure(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; u8 i2c_data; int err; err = m5602_read_sensor(sd, PO1030_AUTOCTRL1, &i2c_data, 1); if (err < 0) return err; PDEBUG(D_CONF, "Set auto exposure to %d", val); val = (val == V4L2_EXPOSURE_AUTO); i2c_data = (i2c_data & 0xfd) | ((val & 0x01) << 1); return m5602_write_sensor(sd, PO1030_AUTOCTRL1, &i2c_data, 1); } void po1030_disconnect(struct sd *sd) { sd->sensor = NULL; } static int po1030_s_ctrl(struct v4l2_ctrl *ctrl) { struct gspca_dev *gspca_dev = container_of(ctrl->handler, struct gspca_dev, ctrl_handler); struct sd *sd = (struct sd *) gspca_dev; int err; if (!gspca_dev->streaming) return 0; switch (ctrl->id) { case V4L2_CID_AUTO_WHITE_BALANCE: err = po1030_set_auto_white_balance(gspca_dev, ctrl->val); if (err || ctrl->val) return err; err = po1030_set_green_balance(gspca_dev, sd->green_bal->val); if (err) return err; err = po1030_set_red_balance(gspca_dev, sd->red_bal->val); if (err) return err; err = po1030_set_blue_balance(gspca_dev, sd->blue_bal->val); break; case V4L2_CID_EXPOSURE_AUTO: err = po1030_set_auto_exposure(gspca_dev, ctrl->val); if (err || ctrl->val == V4L2_EXPOSURE_AUTO) return err; err = po1030_set_exposure(gspca_dev, sd->expo->val); break; case V4L2_CID_GAIN: err = po1030_set_gain(gspca_dev, ctrl->val); break; case V4L2_CID_HFLIP: err = po1030_set_hvflip(gspca_dev); break; default: return -EINVAL; } return err; } static void po1030_dump_registers(struct sd *sd) { int address; u8 value = 0; pr_info("Dumping the po1030 sensor core registers\n"); for (address = 0; address < 0x7f; address++) { m5602_read_sensor(sd, address, &value, 1); pr_info("register 0x%x contains 0x%x\n", address, value); } pr_info("po1030 register state dump complete\n"); pr_info("Probing for which registers that are read/write\n"); for (address = 0; address < 0xff; address++) { u8 old_value, ctrl_value; u8 test_value[2] = {0xff, 0xff}; m5602_read_sensor(sd, address, &old_value, 1); m5602_write_sensor(sd, address, test_value, 1); m5602_read_sensor(sd, address, &ctrl_value, 1); if (ctrl_value == test_value[0]) pr_info("register 0x%x is writeable\n", address); else pr_info("register 0x%x is read only\n", address); /* Restore original value */ m5602_write_sensor(sd, address, &old_value, 1); } }
gpl-2.0
zjgeer/linux-80211n-csitool
drivers/media/video/ivtv/ivtv-irq.c
5009
31903
/* interrupt handling Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com> Copyright (C) 2004 Chris Kennedy <c@groovy.org> Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "ivtv-driver.h" #include "ivtv-queue.h" #include "ivtv-udma.h" #include "ivtv-irq.h" #include "ivtv-mailbox.h" #include "ivtv-vbi.h" #include "ivtv-yuv.h" #include <media/v4l2-event.h> #define DMA_MAGIC_COOKIE 0x000001fe static void ivtv_dma_dec_start(struct ivtv_stream *s); static const int ivtv_stream_map[] = { IVTV_ENC_STREAM_TYPE_MPG, IVTV_ENC_STREAM_TYPE_YUV, IVTV_ENC_STREAM_TYPE_PCM, IVTV_ENC_STREAM_TYPE_VBI, }; static void ivtv_pio_work_handler(struct ivtv *itv) { struct ivtv_stream *s = &itv->streams[itv->cur_pio_stream]; struct ivtv_buffer *buf; int i = 0; IVTV_DEBUG_HI_DMA("ivtv_pio_work_handler\n"); if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS || s->vdev == NULL || !ivtv_use_pio(s)) { itv->cur_pio_stream = -1; /* trigger PIO complete user interrupt */ write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44); return; } IVTV_DEBUG_HI_DMA("Process PIO %s\n", s->name); list_for_each_entry(buf, &s->q_dma.list, list) { u32 size = s->sg_processing[i].size & 0x3ffff; /* Copy the data from the card to the buffer */ if (s->type == IVTV_DEC_STREAM_TYPE_VBI) { memcpy_fromio(buf->buf, itv->dec_mem + s->sg_processing[i].src - IVTV_DECODER_OFFSET, size); } else { memcpy_fromio(buf->buf, itv->enc_mem + s->sg_processing[i].src, size); } i++; if (i == s->sg_processing_size) break; } write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44); } void ivtv_irq_work_handler(struct kthread_work *work) { struct ivtv *itv = container_of(work, struct ivtv, irq_work); if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags)) ivtv_pio_work_handler(itv); if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags)) ivtv_vbi_work_handler(itv); if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags)) ivtv_yuv_work_handler(itv); } /* Determine the required DMA size, setup enough buffers in the predma queue and actually copy the data from the card to the buffers in case a PIO transfer is required for this stream. */ static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MAX_DATA]) { struct ivtv *itv = s->itv; struct ivtv_buffer *buf; u32 bytes_needed = 0; u32 offset, size; u32 UVoffset = 0, UVsize = 0; int skip_bufs = s->q_predma.buffers; int idx = s->sg_pending_size; int rc; /* sanity checks */ if (s->vdev == NULL) { IVTV_DEBUG_WARN("Stream %s not started\n", s->name); return -1; } if (!test_bit(IVTV_F_S_CLAIMED, &s->s_flags)) { IVTV_DEBUG_WARN("Stream %s not open\n", s->name); return -1; } /* determine offset, size and PTS for the various streams */ switch (s->type) { case IVTV_ENC_STREAM_TYPE_MPG: offset = data[1]; size = data[2]; s->pending_pts = 0; break; case IVTV_ENC_STREAM_TYPE_YUV: offset = data[1]; size = data[2]; UVoffset = data[3]; UVsize = data[4]; s->pending_pts = ((u64) data[5] << 32) | data[6]; break; case IVTV_ENC_STREAM_TYPE_PCM: offset = data[1] + 12; size = data[2] - 12; s->pending_pts = read_dec(offset - 8) | ((u64)(read_dec(offset - 12)) << 32); if (itv->has_cx23415) offset += IVTV_DECODER_OFFSET; break; case IVTV_ENC_STREAM_TYPE_VBI: size = itv->vbi.enc_size * itv->vbi.fpi; offset = read_enc(itv->vbi.enc_start - 4) + 12; if (offset == 12) { IVTV_DEBUG_INFO("VBI offset == 0\n"); return -1; } s->pending_pts = read_enc(offset - 4) | ((u64)read_enc(offset - 8) << 32); break; case IVTV_DEC_STREAM_TYPE_VBI: size = read_dec(itv->vbi.dec_start + 4) + 8; offset = read_dec(itv->vbi.dec_start) + itv->vbi.dec_start; s->pending_pts = 0; offset += IVTV_DECODER_OFFSET; break; default: /* shouldn't happen */ return -1; } /* if this is the start of the DMA then fill in the magic cookie */ if (s->sg_pending_size == 0 && ivtv_use_dma(s)) { if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM || s->type == IVTV_DEC_STREAM_TYPE_VBI)) { s->pending_backup = read_dec(offset - IVTV_DECODER_OFFSET); write_dec_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset - IVTV_DECODER_OFFSET); } else { s->pending_backup = read_enc(offset); write_enc_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset); } s->pending_offset = offset; } bytes_needed = size; if (s->type == IVTV_ENC_STREAM_TYPE_YUV) { /* The size for the Y samples needs to be rounded upwards to a multiple of the buf_size. The UV samples then start in the next buffer. */ bytes_needed = s->buf_size * ((bytes_needed + s->buf_size - 1) / s->buf_size); bytes_needed += UVsize; } IVTV_DEBUG_HI_DMA("%s %s: 0x%08x bytes at 0x%08x\n", ivtv_use_pio(s) ? "PIO" : "DMA", s->name, bytes_needed, offset); rc = ivtv_queue_move(s, &s->q_free, &s->q_full, &s->q_predma, bytes_needed); if (rc < 0) { /* Insufficient buffers */ IVTV_DEBUG_WARN("Cannot obtain %d bytes for %s data transfer\n", bytes_needed, s->name); return -1; } if (rc && !s->buffers_stolen && test_bit(IVTV_F_S_APPL_IO, &s->s_flags)) { IVTV_WARN("All %s stream buffers are full. Dropping data.\n", s->name); IVTV_WARN("Cause: the application is not reading fast enough.\n"); } s->buffers_stolen = rc; /* got the buffers, now fill in sg_pending */ buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list); memset(buf->buf, 0, 128); list_for_each_entry(buf, &s->q_predma.list, list) { if (skip_bufs-- > 0) continue; s->sg_pending[idx].dst = buf->dma_handle; s->sg_pending[idx].src = offset; s->sg_pending[idx].size = s->buf_size; buf->bytesused = min(size, s->buf_size); buf->dma_xfer_cnt = s->dma_xfer_cnt; s->q_predma.bytesused += buf->bytesused; size -= buf->bytesused; offset += s->buf_size; /* Sync SG buffers */ ivtv_buf_sync_for_device(s, buf); if (size == 0) { /* YUV */ /* process the UV section */ offset = UVoffset; size = UVsize; } idx++; } s->sg_pending_size = idx; return 0; } static void dma_post(struct ivtv_stream *s) { struct ivtv *itv = s->itv; struct ivtv_buffer *buf = NULL; struct list_head *p; u32 offset; __le32 *u32buf; int x = 0; IVTV_DEBUG_HI_DMA("%s %s completed (%x)\n", ivtv_use_pio(s) ? "PIO" : "DMA", s->name, s->dma_offset); list_for_each(p, &s->q_dma.list) { buf = list_entry(p, struct ivtv_buffer, list); u32buf = (__le32 *)buf->buf; /* Sync Buffer */ ivtv_buf_sync_for_cpu(s, buf); if (x == 0 && ivtv_use_dma(s)) { offset = s->dma_last_offset; if (u32buf[offset / 4] != DMA_MAGIC_COOKIE) { for (offset = 0; offset < 64; offset++) { if (u32buf[offset] == DMA_MAGIC_COOKIE) { break; } } offset *= 4; if (offset == 256) { IVTV_DEBUG_WARN("%s: Couldn't find start of buffer within the first 256 bytes\n", s->name); offset = s->dma_last_offset; } if (s->dma_last_offset != offset) IVTV_DEBUG_WARN("%s: offset %d -> %d\n", s->name, s->dma_last_offset, offset); s->dma_last_offset = offset; } if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM || s->type == IVTV_DEC_STREAM_TYPE_VBI)) { write_dec_sync(0, s->dma_offset - IVTV_DECODER_OFFSET); } else { write_enc_sync(0, s->dma_offset); } if (offset) { buf->bytesused -= offset; memcpy(buf->buf, buf->buf + offset, buf->bytesused + offset); } *u32buf = cpu_to_le32(s->dma_backup); } x++; /* flag byteswap ABCD -> DCBA for MPG & VBI data outside irq */ if (s->type == IVTV_ENC_STREAM_TYPE_MPG || s->type == IVTV_ENC_STREAM_TYPE_VBI) buf->b_flags |= IVTV_F_B_NEED_BUF_SWAP; } if (buf) buf->bytesused += s->dma_last_offset; if (buf && s->type == IVTV_DEC_STREAM_TYPE_VBI) { list_for_each_entry(buf, &s->q_dma.list, list) { /* Parse and Groom VBI Data */ s->q_dma.bytesused -= buf->bytesused; ivtv_process_vbi_data(itv, buf, 0, s->type); s->q_dma.bytesused += buf->bytesused; } if (s->fh == NULL) { ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0); return; } } ivtv_queue_move(s, &s->q_dma, NULL, &s->q_full, s->q_dma.bytesused); if (s->fh) wake_up(&s->waitq); } void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock) { struct ivtv *itv = s->itv; struct yuv_playback_info *yi = &itv->yuv_info; u8 frame = yi->draw_frame; struct yuv_frame_info *f = &yi->new_frame_info[frame]; struct ivtv_buffer *buf; u32 y_size = 720 * ((f->src_h + 31) & ~31); u32 uv_offset = offset + IVTV_YUV_BUFFER_UV_OFFSET; int y_done = 0; int bytes_written = 0; unsigned long flags = 0; int idx = 0; IVTV_DEBUG_HI_DMA("DEC PREPARE DMA %s: %08x %08x\n", s->name, s->q_predma.bytesused, offset); /* Insert buffer block for YUV if needed */ if (s->type == IVTV_DEC_STREAM_TYPE_YUV && f->offset_y) { if (yi->blanking_dmaptr) { s->sg_pending[idx].src = yi->blanking_dmaptr; s->sg_pending[idx].dst = offset; s->sg_pending[idx].size = 720 * 16; } offset += 720 * 16; idx++; } list_for_each_entry(buf, &s->q_predma.list, list) { /* YUV UV Offset from Y Buffer */ if (s->type == IVTV_DEC_STREAM_TYPE_YUV && !y_done && (bytes_written + buf->bytesused) >= y_size) { s->sg_pending[idx].src = buf->dma_handle; s->sg_pending[idx].dst = offset; s->sg_pending[idx].size = y_size - bytes_written; offset = uv_offset; if (s->sg_pending[idx].size != buf->bytesused) { idx++; s->sg_pending[idx].src = buf->dma_handle + s->sg_pending[idx - 1].size; s->sg_pending[idx].dst = offset; s->sg_pending[idx].size = buf->bytesused - s->sg_pending[idx - 1].size; offset += s->sg_pending[idx].size; } y_done = 1; } else { s->sg_pending[idx].src = buf->dma_handle; s->sg_pending[idx].dst = offset; s->sg_pending[idx].size = buf->bytesused; offset += buf->bytesused; } bytes_written += buf->bytesused; /* Sync SG buffers */ ivtv_buf_sync_for_device(s, buf); idx++; } s->sg_pending_size = idx; /* Sync Hardware SG List of buffers */ ivtv_stream_sync_for_device(s); if (lock) spin_lock_irqsave(&itv->dma_reg_lock, flags); if (!test_bit(IVTV_F_I_DMA, &itv->i_flags)) { ivtv_dma_dec_start(s); } else { set_bit(IVTV_F_S_DMA_PENDING, &s->s_flags); } if (lock) spin_unlock_irqrestore(&itv->dma_reg_lock, flags); } static void ivtv_dma_enc_start_xfer(struct ivtv_stream *s) { struct ivtv *itv = s->itv; s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src); s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst); s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000); s->sg_processed++; /* Sync Hardware SG List of buffers */ ivtv_stream_sync_for_device(s); write_reg(s->sg_handle, IVTV_REG_ENCDMAADDR); write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x02, IVTV_REG_DMAXFER); itv->dma_timer.expires = jiffies + msecs_to_jiffies(300); add_timer(&itv->dma_timer); } static void ivtv_dma_dec_start_xfer(struct ivtv_stream *s) { struct ivtv *itv = s->itv; s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src); s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst); s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000); s->sg_processed++; /* Sync Hardware SG List of buffers */ ivtv_stream_sync_for_device(s); write_reg(s->sg_handle, IVTV_REG_DECDMAADDR); write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x01, IVTV_REG_DMAXFER); itv->dma_timer.expires = jiffies + msecs_to_jiffies(300); add_timer(&itv->dma_timer); } /* start the encoder DMA */ static void ivtv_dma_enc_start(struct ivtv_stream *s) { struct ivtv *itv = s->itv; struct ivtv_stream *s_vbi = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI]; int i; IVTV_DEBUG_HI_DMA("start %s for %s\n", ivtv_use_dma(s) ? "DMA" : "PIO", s->name); if (s->q_predma.bytesused) ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused); if (ivtv_use_dma(s)) s->sg_pending[s->sg_pending_size - 1].size += 256; /* If this is an MPEG stream, and VBI data is also pending, then append the VBI DMA to the MPEG DMA and transfer both sets of data at once. VBI DMA is a second class citizen compared to MPEG and mixing them together will confuse the firmware (the end of a VBI DMA is seen as the end of a MPEG DMA, thus effectively dropping an MPEG frame). So instead we make sure we only use the MPEG DMA to transfer the VBI DMA if both are in use. This way no conflicts occur. */ clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags); if (s->type == IVTV_ENC_STREAM_TYPE_MPG && s_vbi->sg_pending_size && s->sg_pending_size + s_vbi->sg_pending_size <= s->buffers) { ivtv_queue_move(s_vbi, &s_vbi->q_predma, NULL, &s_vbi->q_dma, s_vbi->q_predma.bytesused); if (ivtv_use_dma(s_vbi)) s_vbi->sg_pending[s_vbi->sg_pending_size - 1].size += 256; for (i = 0; i < s_vbi->sg_pending_size; i++) { s->sg_pending[s->sg_pending_size++] = s_vbi->sg_pending[i]; } s_vbi->dma_offset = s_vbi->pending_offset; s_vbi->sg_pending_size = 0; s_vbi->dma_xfer_cnt++; set_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags); IVTV_DEBUG_HI_DMA("include DMA for %s\n", s_vbi->name); } s->dma_xfer_cnt++; memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_host_element) * s->sg_pending_size); s->sg_processing_size = s->sg_pending_size; s->sg_pending_size = 0; s->sg_processed = 0; s->dma_offset = s->pending_offset; s->dma_backup = s->pending_backup; s->dma_pts = s->pending_pts; if (ivtv_use_pio(s)) { set_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags); set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags); set_bit(IVTV_F_I_PIO, &itv->i_flags); itv->cur_pio_stream = s->type; } else { itv->dma_retries = 0; ivtv_dma_enc_start_xfer(s); set_bit(IVTV_F_I_DMA, &itv->i_flags); itv->cur_dma_stream = s->type; } } static void ivtv_dma_dec_start(struct ivtv_stream *s) { struct ivtv *itv = s->itv; if (s->q_predma.bytesused) ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused); s->dma_xfer_cnt++; memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_host_element) * s->sg_pending_size); s->sg_processing_size = s->sg_pending_size; s->sg_pending_size = 0; s->sg_processed = 0; IVTV_DEBUG_HI_DMA("start DMA for %s\n", s->name); itv->dma_retries = 0; ivtv_dma_dec_start_xfer(s); set_bit(IVTV_F_I_DMA, &itv->i_flags); itv->cur_dma_stream = s->type; } static void ivtv_irq_dma_read(struct ivtv *itv) { struct ivtv_stream *s = NULL; struct ivtv_buffer *buf; int hw_stream_type = 0; IVTV_DEBUG_HI_IRQ("DEC DMA READ\n"); del_timer(&itv->dma_timer); if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) && itv->cur_dma_stream < 0) return; if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags)) { s = &itv->streams[itv->cur_dma_stream]; ivtv_stream_sync_for_cpu(s); if (read_reg(IVTV_REG_DMASTATUS) & 0x14) { IVTV_DEBUG_WARN("DEC DMA ERROR %x (xfer %d of %d, retry %d)\n", read_reg(IVTV_REG_DMASTATUS), s->sg_processed, s->sg_processing_size, itv->dma_retries); write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS); if (itv->dma_retries == 3) { /* Too many retries, give up on this frame */ itv->dma_retries = 0; s->sg_processed = s->sg_processing_size; } else { /* Retry, starting with the first xfer segment. Just retrying the current segment is not sufficient. */ s->sg_processed = 0; itv->dma_retries++; } } if (s->sg_processed < s->sg_processing_size) { /* DMA next buffer */ ivtv_dma_dec_start_xfer(s); return; } if (s->type == IVTV_DEC_STREAM_TYPE_YUV) hw_stream_type = 2; IVTV_DEBUG_HI_DMA("DEC DATA READ %s: %d\n", s->name, s->q_dma.bytesused); /* For some reason must kick the firmware, like PIO mode, I think this tells the firmware we are done and the size of the xfer so it can calculate what we need next. I think we can do this part ourselves but would have to fully calculate xfer info ourselves and not use interrupts */ ivtv_vapi(itv, CX2341X_DEC_SCHED_DMA_FROM_HOST, 3, 0, s->q_dma.bytesused, hw_stream_type); /* Free last DMA call */ while ((buf = ivtv_dequeue(s, &s->q_dma)) != NULL) { ivtv_buf_sync_for_cpu(s, buf); ivtv_enqueue(s, buf, &s->q_free); } wake_up(&s->waitq); } clear_bit(IVTV_F_I_UDMA, &itv->i_flags); clear_bit(IVTV_F_I_DMA, &itv->i_flags); itv->cur_dma_stream = -1; wake_up(&itv->dma_waitq); } static void ivtv_irq_enc_dma_complete(struct ivtv *itv) { u32 data[CX2341X_MBOX_MAX_DATA]; struct ivtv_stream *s; ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, 2, data); IVTV_DEBUG_HI_IRQ("ENC DMA COMPLETE %x %d (%d)\n", data[0], data[1], itv->cur_dma_stream); del_timer(&itv->dma_timer); if (itv->cur_dma_stream < 0) return; s = &itv->streams[itv->cur_dma_stream]; ivtv_stream_sync_for_cpu(s); if (data[0] & 0x18) { IVTV_DEBUG_WARN("ENC DMA ERROR %x (offset %08x, xfer %d of %d, retry %d)\n", data[0], s->dma_offset, s->sg_processed, s->sg_processing_size, itv->dma_retries); write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS); if (itv->dma_retries == 3) { /* Too many retries, give up on this frame */ itv->dma_retries = 0; s->sg_processed = s->sg_processing_size; } else { /* Retry, starting with the first xfer segment. Just retrying the current segment is not sufficient. */ s->sg_processed = 0; itv->dma_retries++; } } if (s->sg_processed < s->sg_processing_size) { /* DMA next buffer */ ivtv_dma_enc_start_xfer(s); return; } clear_bit(IVTV_F_I_DMA, &itv->i_flags); itv->cur_dma_stream = -1; dma_post(s); if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) { s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI]; dma_post(s); } s->sg_processing_size = 0; s->sg_processed = 0; wake_up(&itv->dma_waitq); } static void ivtv_irq_enc_pio_complete(struct ivtv *itv) { struct ivtv_stream *s; if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS) { itv->cur_pio_stream = -1; return; } s = &itv->streams[itv->cur_pio_stream]; IVTV_DEBUG_HI_IRQ("ENC PIO COMPLETE %s\n", s->name); clear_bit(IVTV_F_I_PIO, &itv->i_flags); itv->cur_pio_stream = -1; dma_post(s); if (s->type == IVTV_ENC_STREAM_TYPE_MPG) ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 0); else if (s->type == IVTV_ENC_STREAM_TYPE_YUV) ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 1); else if (s->type == IVTV_ENC_STREAM_TYPE_PCM) ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 2); clear_bit(IVTV_F_I_PIO, &itv->i_flags); if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) { s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI]; dma_post(s); } wake_up(&itv->dma_waitq); } static void ivtv_irq_dma_err(struct ivtv *itv) { u32 data[CX2341X_MBOX_MAX_DATA]; u32 status; del_timer(&itv->dma_timer); ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, 2, data); status = read_reg(IVTV_REG_DMASTATUS); IVTV_DEBUG_WARN("DMA ERROR %08x %08x %08x %d\n", data[0], data[1], status, itv->cur_dma_stream); /* * We do *not* write back to the IVTV_REG_DMASTATUS register to * clear the error status, if either the encoder write (0x02) or * decoder read (0x01) bus master DMA operation do not indicate * completed. We can race with the DMA engine, which may have * transitioned to completed status *after* we read the register. * Setting a IVTV_REG_DMASTATUS flag back to "busy" status, after the * DMA engine has completed, will cause the DMA engine to stop working. */ status &= 0x3; if (status == 0x3) write_reg(status, IVTV_REG_DMASTATUS); if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) && itv->cur_dma_stream >= 0 && itv->cur_dma_stream < IVTV_MAX_STREAMS) { struct ivtv_stream *s = &itv->streams[itv->cur_dma_stream]; if (s->type >= IVTV_DEC_STREAM_TYPE_MPG) { /* retry */ /* * FIXME - handle cases of DMA error similar to * encoder below, except conditioned on status & 0x1 */ ivtv_dma_dec_start(s); return; } else { if ((status & 0x2) == 0) { /* * CX2341x Bus Master DMA write is ongoing. * Reset the timer and let it complete. */ itv->dma_timer.expires = jiffies + msecs_to_jiffies(600); add_timer(&itv->dma_timer); return; } if (itv->dma_retries < 3) { /* * CX2341x Bus Master DMA write has ended. * Retry the write, starting with the first * xfer segment. Just retrying the current * segment is not sufficient. */ s->sg_processed = 0; itv->dma_retries++; ivtv_dma_enc_start_xfer(s); return; } /* Too many retries, give up on this one */ } } if (test_bit(IVTV_F_I_UDMA, &itv->i_flags)) { ivtv_udma_start(itv); return; } clear_bit(IVTV_F_I_UDMA, &itv->i_flags); clear_bit(IVTV_F_I_DMA, &itv->i_flags); itv->cur_dma_stream = -1; wake_up(&itv->dma_waitq); } static void ivtv_irq_enc_start_cap(struct ivtv *itv) { u32 data[CX2341X_MBOX_MAX_DATA]; struct ivtv_stream *s; /* Get DMA destination and size arguments from card */ ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA, 7, data); IVTV_DEBUG_HI_IRQ("ENC START CAP %d: %08x %08x\n", data[0], data[1], data[2]); if (data[0] > 2 || data[1] == 0 || data[2] == 0) { IVTV_DEBUG_WARN("Unknown input: %08x %08x %08x\n", data[0], data[1], data[2]); return; } s = &itv->streams[ivtv_stream_map[data[0]]]; if (!stream_enc_dma_append(s, data)) { set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags); } } static void ivtv_irq_enc_vbi_cap(struct ivtv *itv) { u32 data[CX2341X_MBOX_MAX_DATA]; struct ivtv_stream *s; IVTV_DEBUG_HI_IRQ("ENC START VBI CAP\n"); s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI]; if (!stream_enc_dma_append(s, data)) set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags); } static void ivtv_irq_dec_vbi_reinsert(struct ivtv *itv) { u32 data[CX2341X_MBOX_MAX_DATA]; struct ivtv_stream *s = &itv->streams[IVTV_DEC_STREAM_TYPE_VBI]; IVTV_DEBUG_HI_IRQ("DEC VBI REINSERT\n"); if (test_bit(IVTV_F_S_CLAIMED, &s->s_flags) && !stream_enc_dma_append(s, data)) { set_bit(IVTV_F_S_PIO_PENDING, &s->s_flags); } } static void ivtv_irq_dec_data_req(struct ivtv *itv) { u32 data[CX2341X_MBOX_MAX_DATA]; struct ivtv_stream *s; /* YUV or MPG */ if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)) { ivtv_api_get_data(&itv->dec_mbox, IVTV_MBOX_DMA, 2, data); itv->dma_data_req_size = 1080 * ((itv->yuv_info.v4l2_src_h + 31) & ~31); itv->dma_data_req_offset = data[1]; if (atomic_read(&itv->yuv_info.next_dma_frame) >= 0) ivtv_yuv_frame_complete(itv); s = &itv->streams[IVTV_DEC_STREAM_TYPE_YUV]; } else { ivtv_api_get_data(&itv->dec_mbox, IVTV_MBOX_DMA, 3, data); itv->dma_data_req_size = min_t(u32, data[2], 0x10000); itv->dma_data_req_offset = data[1]; s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG]; } IVTV_DEBUG_HI_IRQ("DEC DATA REQ %s: %d %08x %u\n", s->name, s->q_full.bytesused, itv->dma_data_req_offset, itv->dma_data_req_size); if (itv->dma_data_req_size == 0 || s->q_full.bytesused < itv->dma_data_req_size) { set_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags); } else { if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)) ivtv_yuv_setup_stream_frame(itv); clear_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags); ivtv_queue_move(s, &s->q_full, NULL, &s->q_predma, itv->dma_data_req_size); ivtv_dma_stream_dec_prepare(s, itv->dma_data_req_offset + IVTV_DECODER_OFFSET, 0); } } static void ivtv_irq_vsync(struct ivtv *itv) { /* The vsync interrupt is unusual in that it won't clear until * the end of the first line for the current field, at which * point it clears itself. This can result in repeated vsync * interrupts, or a missed vsync. Read some of the registers * to determine the line being displayed and ensure we handle * one vsync per frame. */ unsigned int frame = read_reg(IVTV_REG_DEC_LINE_FIELD) & 1; struct yuv_playback_info *yi = &itv->yuv_info; int last_dma_frame = atomic_read(&yi->next_dma_frame); struct yuv_frame_info *f = &yi->new_frame_info[last_dma_frame]; if (0) IVTV_DEBUG_IRQ("DEC VSYNC\n"); if (((frame ^ f->sync_field) == 0 && ((itv->last_vsync_field & 1) ^ f->sync_field)) || (frame != (itv->last_vsync_field & 1) && !f->interlaced)) { int next_dma_frame = last_dma_frame; if (!(f->interlaced && f->delay && yi->fields_lapsed < 1)) { if (next_dma_frame >= 0 && next_dma_frame != atomic_read(&yi->next_fill_frame)) { write_reg(yuv_offset[next_dma_frame] >> 4, 0x82c); write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x830); write_reg(yuv_offset[next_dma_frame] >> 4, 0x834); write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x838); next_dma_frame = (next_dma_frame + 1) % IVTV_YUV_BUFFERS; atomic_set(&yi->next_dma_frame, next_dma_frame); yi->fields_lapsed = -1; yi->running = 1; } } } if (frame != (itv->last_vsync_field & 1)) { static const struct v4l2_event evtop = { .type = V4L2_EVENT_VSYNC, .u.vsync.field = V4L2_FIELD_TOP, }; static const struct v4l2_event evbottom = { .type = V4L2_EVENT_VSYNC, .u.vsync.field = V4L2_FIELD_BOTTOM, }; struct ivtv_stream *s = ivtv_get_output_stream(itv); itv->last_vsync_field += 1; if (frame == 0) { clear_bit(IVTV_F_I_VALID_DEC_TIMINGS, &itv->i_flags); clear_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags); } else { set_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags); } if (test_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags)) { set_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags); wake_up(&itv->event_waitq); if (s) wake_up(&s->waitq); } if (s && s->vdev) v4l2_event_queue(s->vdev, frame ? &evtop : &evbottom); wake_up(&itv->vsync_waitq); /* Send VBI to saa7127 */ if (frame && (itv->output_mode == OUT_PASSTHROUGH || test_bit(IVTV_F_I_UPDATE_WSS, &itv->i_flags) || test_bit(IVTV_F_I_UPDATE_VPS, &itv->i_flags) || test_bit(IVTV_F_I_UPDATE_CC, &itv->i_flags))) { set_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags); set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags); } /* Check if we need to update the yuv registers */ if (yi->running && (yi->yuv_forced_update || f->update)) { if (!f->update) { last_dma_frame = (u8)(atomic_read(&yi->next_dma_frame) - 1) % IVTV_YUV_BUFFERS; f = &yi->new_frame_info[last_dma_frame]; } if (f->src_w) { yi->update_frame = last_dma_frame; f->update = 0; yi->yuv_forced_update = 0; set_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags); set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags); } } yi->fields_lapsed++; } } #define IVTV_IRQ_DMA (IVTV_IRQ_DMA_READ | IVTV_IRQ_ENC_DMA_COMPLETE | IVTV_IRQ_DMA_ERR | IVTV_IRQ_ENC_START_CAP | IVTV_IRQ_ENC_VBI_CAP | IVTV_IRQ_DEC_DATA_REQ | IVTV_IRQ_DEC_VBI_RE_INSERT) irqreturn_t ivtv_irq_handler(int irq, void *dev_id) { struct ivtv *itv = (struct ivtv *)dev_id; u32 combo; u32 stat; int i; u8 vsync_force = 0; spin_lock(&itv->dma_reg_lock); /* get contents of irq status register */ stat = read_reg(IVTV_REG_IRQSTATUS); combo = ~itv->irqmask & stat; /* Clear out IRQ */ if (combo) write_reg(combo, IVTV_REG_IRQSTATUS); if (0 == combo) { /* The vsync interrupt is unusual and clears itself. If we * took too long, we may have missed it. Do some checks */ if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) { /* vsync is enabled, see if we're in a new field */ if ((itv->last_vsync_field & 1) != (read_reg(IVTV_REG_DEC_LINE_FIELD) & 1)) { /* New field, looks like we missed it */ IVTV_DEBUG_YUV("VSync interrupt missed %d\n", read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16); vsync_force = 1; } } if (!vsync_force) { /* No Vsync expected, wasn't for us */ spin_unlock(&itv->dma_reg_lock); return IRQ_NONE; } } /* Exclude interrupts noted below from the output, otherwise the log is flooded with these messages */ if (combo & ~0xff6d0400) IVTV_DEBUG_HI_IRQ("======= valid IRQ bits: 0x%08x ======\n", combo); if (combo & IVTV_IRQ_DEC_DMA_COMPLETE) { IVTV_DEBUG_HI_IRQ("DEC DMA COMPLETE\n"); } if (combo & IVTV_IRQ_DMA_READ) { ivtv_irq_dma_read(itv); } if (combo & IVTV_IRQ_ENC_DMA_COMPLETE) { ivtv_irq_enc_dma_complete(itv); } if (combo & IVTV_IRQ_ENC_PIO_COMPLETE) { ivtv_irq_enc_pio_complete(itv); } if (combo & IVTV_IRQ_DMA_ERR) { ivtv_irq_dma_err(itv); } if (combo & IVTV_IRQ_ENC_START_CAP) { ivtv_irq_enc_start_cap(itv); } if (combo & IVTV_IRQ_ENC_VBI_CAP) { ivtv_irq_enc_vbi_cap(itv); } if (combo & IVTV_IRQ_DEC_VBI_RE_INSERT) { ivtv_irq_dec_vbi_reinsert(itv); } if (combo & IVTV_IRQ_ENC_EOS) { IVTV_DEBUG_IRQ("ENC EOS\n"); set_bit(IVTV_F_I_EOS, &itv->i_flags); wake_up(&itv->eos_waitq); } if (combo & IVTV_IRQ_DEC_DATA_REQ) { ivtv_irq_dec_data_req(itv); } /* Decoder Vertical Sync - We can't rely on 'combo', so check if vsync enabled */ if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) { ivtv_irq_vsync(itv); } if (combo & IVTV_IRQ_ENC_VIM_RST) { IVTV_DEBUG_IRQ("VIM RST\n"); /*ivtv_vapi(itv, CX2341X_ENC_REFRESH_INPUT, 0); */ } if (combo & IVTV_IRQ_DEC_AUD_MODE_CHG) { IVTV_DEBUG_INFO("Stereo mode changed\n"); } if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_DMA, &itv->i_flags)) { itv->irq_rr_idx++; for (i = 0; i < IVTV_MAX_STREAMS; i++) { int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS; struct ivtv_stream *s = &itv->streams[idx]; if (!test_and_clear_bit(IVTV_F_S_DMA_PENDING, &s->s_flags)) continue; if (s->type >= IVTV_DEC_STREAM_TYPE_MPG) ivtv_dma_dec_start(s); else ivtv_dma_enc_start(s); break; } if (i == IVTV_MAX_STREAMS && test_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags)) ivtv_udma_start(itv); } if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_PIO, &itv->i_flags)) { itv->irq_rr_idx++; for (i = 0; i < IVTV_MAX_STREAMS; i++) { int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS; struct ivtv_stream *s = &itv->streams[idx]; if (!test_and_clear_bit(IVTV_F_S_PIO_PENDING, &s->s_flags)) continue; if (s->type == IVTV_DEC_STREAM_TYPE_VBI || s->type < IVTV_DEC_STREAM_TYPE_MPG) ivtv_dma_enc_start(s); break; } } if (test_and_clear_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags)) { queue_kthread_work(&itv->irq_worker, &itv->irq_work); } spin_unlock(&itv->dma_reg_lock); /* If we've just handled a 'forced' vsync, it's safest to say it * wasn't ours. Another device may have triggered it at just * the right time. */ return vsync_force ? IRQ_NONE : IRQ_HANDLED; } void ivtv_unfinished_dma(unsigned long arg) { struct ivtv *itv = (struct ivtv *)arg; if (!test_bit(IVTV_F_I_DMA, &itv->i_flags)) return; IVTV_ERR("DMA TIMEOUT %08x %d\n", read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream); write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS); clear_bit(IVTV_F_I_UDMA, &itv->i_flags); clear_bit(IVTV_F_I_DMA, &itv->i_flags); itv->cur_dma_stream = -1; wake_up(&itv->dma_waitq); }
gpl-2.0
Arc-Team/android_kernel_htc_a11
drivers/usb/host/ehci-ppc-of.c
5521
6383
/* * EHCI HCD (Host Controller Driver) for USB. * * Bus Glue for PPC On-Chip EHCI driver on the of_platform bus * Tested on AMCC PPC 440EPx * * Valentine Barshak <vbarshak@ru.mvista.com> * * Based on "ehci-ppc-soc.c" by Stefan Roese <sr@denx.de> * and "ohci-ppc-of.c" by Sylvain Munaut <tnt@246tNt.com> * * This file is licenced under the GPL. */ #include <linux/signal.h> #include <linux/of.h> #include <linux/of_platform.h> /* called during probe() after chip reset completes */ static int ehci_ppc_of_setup(struct usb_hcd *hcd) { struct ehci_hcd *ehci = hcd_to_ehci(hcd); int retval; retval = ehci_halt(ehci); if (retval) return retval; retval = ehci_init(hcd); if (retval) return retval; ehci->sbrn = 0x20; return ehci_reset(ehci); } static const struct hc_driver ehci_ppc_of_hc_driver = { .description = hcd_name, .product_desc = "OF EHCI", .hcd_priv_size = sizeof(struct ehci_hcd), /* * generic hardware linkage */ .irq = ehci_irq, .flags = HCD_MEMORY | HCD_USB2, /* * basic lifecycle operations */ .reset = ehci_ppc_of_setup, .start = ehci_run, .stop = ehci_stop, .shutdown = ehci_shutdown, /* * managing i/o requests and associated device resources */ .urb_enqueue = ehci_urb_enqueue, .urb_dequeue = ehci_urb_dequeue, .endpoint_disable = ehci_endpoint_disable, .endpoint_reset = ehci_endpoint_reset, /* * scheduling support */ .get_frame_number = ehci_get_frame, /* * root hub support */ .hub_status_data = ehci_hub_status_data, .hub_control = ehci_hub_control, #ifdef CONFIG_PM .bus_suspend = ehci_bus_suspend, .bus_resume = ehci_bus_resume, #endif .relinquish_port = ehci_relinquish_port, .port_handed_over = ehci_port_handed_over, .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete, }; /* * 440EPx Errata USBH_3 * Fix: Enable Break Memory Transfer (BMT) in INSNREG3 */ #define PPC440EPX_EHCI0_INSREG_BMT (0x1 << 0) static int __devinit ppc44x_enable_bmt(struct device_node *dn) { __iomem u32 *insreg_virt; insreg_virt = of_iomap(dn, 1); if (!insreg_virt) return -EINVAL; out_be32(insreg_virt + 3, PPC440EPX_EHCI0_INSREG_BMT); iounmap(insreg_virt); return 0; } static int __devinit ehci_hcd_ppc_of_probe(struct platform_device *op) { struct device_node *dn = op->dev.of_node; struct usb_hcd *hcd; struct ehci_hcd *ehci = NULL; struct resource res; int irq; int rv; struct device_node *np; if (usb_disabled()) return -ENODEV; dev_dbg(&op->dev, "initializing PPC-OF USB Controller\n"); rv = of_address_to_resource(dn, 0, &res); if (rv) return rv; hcd = usb_create_hcd(&ehci_ppc_of_hc_driver, &op->dev, "PPC-OF USB"); if (!hcd) return -ENOMEM; hcd->rsrc_start = res.start; hcd->rsrc_len = resource_size(&res); if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { printk(KERN_ERR "%s: request_mem_region failed\n", __FILE__); rv = -EBUSY; goto err_rmr; } irq = irq_of_parse_and_map(dn, 0); if (irq == NO_IRQ) { printk(KERN_ERR "%s: irq_of_parse_and_map failed\n", __FILE__); rv = -EBUSY; goto err_irq; } hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); if (!hcd->regs) { printk(KERN_ERR "%s: ioremap failed\n", __FILE__); rv = -ENOMEM; goto err_ioremap; } ehci = hcd_to_ehci(hcd); np = of_find_compatible_node(NULL, NULL, "ibm,usb-ohci-440epx"); if (np != NULL) { /* claim we really affected by usb23 erratum */ if (!of_address_to_resource(np, 0, &res)) ehci->ohci_hcctrl_reg = ioremap(res.start + OHCI_HCCTRL_OFFSET, OHCI_HCCTRL_LEN); else pr_debug("%s: no ohci offset in fdt\n", __FILE__); if (!ehci->ohci_hcctrl_reg) { pr_debug("%s: ioremap for ohci hcctrl failed\n", __FILE__); } else { ehci->has_amcc_usb23 = 1; } } if (of_get_property(dn, "big-endian", NULL)) { ehci->big_endian_mmio = 1; ehci->big_endian_desc = 1; } if (of_get_property(dn, "big-endian-regs", NULL)) ehci->big_endian_mmio = 1; if (of_get_property(dn, "big-endian-desc", NULL)) ehci->big_endian_desc = 1; ehci->caps = hcd->regs; ehci->regs = hcd->regs + HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase)); /* cache this readonly data; minimize chip reads */ ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params); if (of_device_is_compatible(dn, "ibm,usb-ehci-440epx")) { rv = ppc44x_enable_bmt(dn); ehci_dbg(ehci, "Break Memory Transfer (BMT) is %senabled!\n", rv ? "NOT ": ""); } rv = usb_add_hcd(hcd, irq, 0); if (rv) goto err_ehci; return 0; err_ehci: if (ehci->has_amcc_usb23) iounmap(ehci->ohci_hcctrl_reg); iounmap(hcd->regs); err_ioremap: irq_dispose_mapping(irq); err_irq: release_mem_region(hcd->rsrc_start, hcd->rsrc_len); err_rmr: usb_put_hcd(hcd); return rv; } static int ehci_hcd_ppc_of_remove(struct platform_device *op) { struct usb_hcd *hcd = dev_get_drvdata(&op->dev); struct ehci_hcd *ehci = hcd_to_ehci(hcd); struct device_node *np; struct resource res; dev_set_drvdata(&op->dev, NULL); dev_dbg(&op->dev, "stopping PPC-OF USB Controller\n"); usb_remove_hcd(hcd); iounmap(hcd->regs); irq_dispose_mapping(hcd->irq); release_mem_region(hcd->rsrc_start, hcd->rsrc_len); /* use request_mem_region to test if the ohci driver is loaded. if so * ensure the ohci core is operational. */ if (ehci->has_amcc_usb23) { np = of_find_compatible_node(NULL, NULL, "ibm,usb-ohci-440epx"); if (np != NULL) { if (!of_address_to_resource(np, 0, &res)) if (!request_mem_region(res.start, 0x4, hcd_name)) set_ohci_hcfs(ehci, 1); else release_mem_region(res.start, 0x4); else pr_debug("%s: no ohci offset in fdt\n", __FILE__); of_node_put(np); } iounmap(ehci->ohci_hcctrl_reg); } usb_put_hcd(hcd); return 0; } static void ehci_hcd_ppc_of_shutdown(struct platform_device *op) { struct usb_hcd *hcd = dev_get_drvdata(&op->dev); if (hcd->driver->shutdown) hcd->driver->shutdown(hcd); } static const struct of_device_id ehci_hcd_ppc_of_match[] = { { .compatible = "usb-ehci", }, {}, }; MODULE_DEVICE_TABLE(of, ehci_hcd_ppc_of_match); static struct platform_driver ehci_hcd_ppc_of_driver = { .probe = ehci_hcd_ppc_of_probe, .remove = ehci_hcd_ppc_of_remove, .shutdown = ehci_hcd_ppc_of_shutdown, .driver = { .name = "ppc-of-ehci", .owner = THIS_MODULE, .of_match_table = ehci_hcd_ppc_of_match, }, };
gpl-2.0
pranav01/kernel_sprout
drivers/media/rc/keymaps/rc-dib0700-nec.c
7313
3393
/* rc-dvb0700-big.c - Keytable for devices in dvb0700 * * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com> * * TODO: This table is a real mess, as it merges RC codes from several * devices into a big table. It also has both RC-5 and NEC codes inside. * It should be broken into small tables, and the protocols should properly * be indentificated. * * The table were imported from dib0700_devices.c. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <media/rc-map.h> #include <linux/module.h> static struct rc_map_table dib0700_nec_table[] = { /* Key codes for the Pixelview SBTVD remote */ { 0x866b13, KEY_MUTE }, { 0x866b12, KEY_POWER }, { 0x866b01, KEY_1 }, { 0x866b02, KEY_2 }, { 0x866b03, KEY_3 }, { 0x866b04, KEY_4 }, { 0x866b05, KEY_5 }, { 0x866b06, KEY_6 }, { 0x866b07, KEY_7 }, { 0x866b08, KEY_8 }, { 0x866b09, KEY_9 }, { 0x866b00, KEY_0 }, { 0x866b0d, KEY_CHANNELUP }, { 0x866b19, KEY_CHANNELDOWN }, { 0x866b10, KEY_VOLUMEUP }, { 0x866b0c, KEY_VOLUMEDOWN }, { 0x866b0a, KEY_CAMERA }, { 0x866b0b, KEY_ZOOM }, { 0x866b1b, KEY_BACKSPACE }, { 0x866b15, KEY_ENTER }, { 0x866b1d, KEY_UP }, { 0x866b1e, KEY_DOWN }, { 0x866b0e, KEY_LEFT }, { 0x866b0f, KEY_RIGHT }, { 0x866b18, KEY_RECORD }, { 0x866b1a, KEY_STOP }, /* Key codes for the EvolutePC TVWay+ remote */ { 0x7a00, KEY_MENU }, { 0x7a01, KEY_RECORD }, { 0x7a02, KEY_PLAY }, { 0x7a03, KEY_STOP }, { 0x7a10, KEY_CHANNELUP }, { 0x7a11, KEY_CHANNELDOWN }, { 0x7a12, KEY_VOLUMEUP }, { 0x7a13, KEY_VOLUMEDOWN }, { 0x7a40, KEY_POWER }, { 0x7a41, KEY_MUTE }, /* Key codes for the Elgato EyeTV Diversity silver remote */ { 0x4501, KEY_POWER }, { 0x4502, KEY_MUTE }, { 0x4503, KEY_1 }, { 0x4504, KEY_2 }, { 0x4505, KEY_3 }, { 0x4506, KEY_4 }, { 0x4507, KEY_5 }, { 0x4508, KEY_6 }, { 0x4509, KEY_7 }, { 0x450a, KEY_8 }, { 0x450b, KEY_9 }, { 0x450c, KEY_LAST }, { 0x450d, KEY_0 }, { 0x450e, KEY_ENTER }, { 0x450f, KEY_RED }, { 0x4510, KEY_CHANNELUP }, { 0x4511, KEY_GREEN }, { 0x4512, KEY_VOLUMEDOWN }, { 0x4513, KEY_OK }, { 0x4514, KEY_VOLUMEUP }, { 0x4515, KEY_YELLOW }, { 0x4516, KEY_CHANNELDOWN }, { 0x4517, KEY_BLUE }, { 0x4518, KEY_LEFT }, /* Skip backwards */ { 0x4519, KEY_PLAYPAUSE }, { 0x451a, KEY_RIGHT }, /* Skip forward */ { 0x451b, KEY_REWIND }, { 0x451c, KEY_L }, /* Live */ { 0x451d, KEY_FASTFORWARD }, { 0x451e, KEY_STOP }, /* 'Reveal' for Teletext */ { 0x451f, KEY_MENU }, /* KEY_TEXT for Teletext */ { 0x4540, KEY_RECORD }, /* Font 'Size' for Teletext */ { 0x4541, KEY_SCREEN }, /* Full screen toggle, 'Hold' for Teletext */ { 0x4542, KEY_SELECT }, /* Select video input, 'Select' for Teletext */ }; static struct rc_map_list dib0700_nec_map = { .map = { .scan = dib0700_nec_table, .size = ARRAY_SIZE(dib0700_nec_table), .rc_type = RC_TYPE_NEC, .name = RC_MAP_DIB0700_NEC_TABLE, } }; static int __init init_rc_map(void) { return rc_map_register(&dib0700_nec_map); } static void __exit exit_rc_map(void) { rc_map_unregister(&dib0700_nec_map); } module_init(init_rc_map) module_exit(exit_rc_map) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
gpl-2.0
Pauliecoon/android_kernel_motorola_shamu_benzoCore
arch/powerpc/platforms/ps3/device-init.c
8849
23553
/* * PS3 device registration routines. * * Copyright (C) 2007 Sony Computer Entertainment Inc. * Copyright 2007 Sony Corp. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/delay.h> #include <linux/freezer.h> #include <linux/kernel.h> #include <linux/kthread.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/reboot.h> #include <asm/firmware.h> #include <asm/lv1call.h> #include <asm/ps3stor.h> #include "platform.h" static int __init ps3_register_lpm_devices(void) { int result; u64 tmp1; u64 tmp2; struct ps3_system_bus_device *dev; pr_debug(" -> %s:%d\n", __func__, __LINE__); dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; dev->match_id = PS3_MATCH_ID_LPM; dev->dev_type = PS3_DEVICE_TYPE_LPM; /* The current lpm driver only supports a single BE processor. */ result = ps3_repository_read_be_node_id(0, &dev->lpm.node_id); if (result) { pr_debug("%s:%d: ps3_repository_read_be_node_id failed \n", __func__, __LINE__); goto fail_read_repo; } result = ps3_repository_read_lpm_privileges(dev->lpm.node_id, &tmp1, &dev->lpm.rights); if (result) { pr_debug("%s:%d: ps3_repository_read_lpm_privleges failed \n", __func__, __LINE__); goto fail_read_repo; } lv1_get_logical_partition_id(&tmp2); if (tmp1 != tmp2) { pr_debug("%s:%d: wrong lpar\n", __func__, __LINE__); result = -ENODEV; goto fail_rights; } if (!(dev->lpm.rights & PS3_LPM_RIGHTS_USE_LPM)) { pr_debug("%s:%d: don't have rights to use lpm\n", __func__, __LINE__); result = -EPERM; goto fail_rights; } pr_debug("%s:%d: pu_id %llu, rights %llu(%llxh)\n", __func__, __LINE__, dev->lpm.pu_id, dev->lpm.rights, dev->lpm.rights); result = ps3_repository_read_pu_id(0, &dev->lpm.pu_id); if (result) { pr_debug("%s:%d: ps3_repository_read_pu_id failed \n", __func__, __LINE__); goto fail_read_repo; } result = ps3_system_bus_device_register(dev); if (result) { pr_debug("%s:%d ps3_system_bus_device_register failed\n", __func__, __LINE__); goto fail_register; } pr_debug(" <- %s:%d\n", __func__, __LINE__); return 0; fail_register: fail_rights: fail_read_repo: kfree(dev); pr_debug(" <- %s:%d: failed\n", __func__, __LINE__); return result; } /** * ps3_setup_gelic_device - Setup and register a gelic device instance. * * Allocates memory for a struct ps3_system_bus_device instance, initialises the * structure members, and registers the device instance with the system bus. */ static int __init ps3_setup_gelic_device( const struct ps3_repository_device *repo) { int result; struct layout { struct ps3_system_bus_device dev; struct ps3_dma_region d_region; } *p; pr_debug(" -> %s:%d\n", __func__, __LINE__); BUG_ON(repo->bus_type != PS3_BUS_TYPE_SB); BUG_ON(repo->dev_type != PS3_DEV_TYPE_SB_GELIC); p = kzalloc(sizeof(struct layout), GFP_KERNEL); if (!p) { result = -ENOMEM; goto fail_malloc; } p->dev.match_id = PS3_MATCH_ID_GELIC; p->dev.dev_type = PS3_DEVICE_TYPE_SB; p->dev.bus_id = repo->bus_id; p->dev.dev_id = repo->dev_id; p->dev.d_region = &p->d_region; result = ps3_repository_find_interrupt(repo, PS3_INTERRUPT_TYPE_EVENT_PORT, &p->dev.interrupt_id); if (result) { pr_debug("%s:%d ps3_repository_find_interrupt failed\n", __func__, __LINE__); goto fail_find_interrupt; } BUG_ON(p->dev.interrupt_id != 0); result = ps3_dma_region_init(&p->dev, p->dev.d_region, PS3_DMA_64K, PS3_DMA_OTHER, NULL, 0); if (result) { pr_debug("%s:%d ps3_dma_region_init failed\n", __func__, __LINE__); goto fail_dma_init; } result = ps3_system_bus_device_register(&p->dev); if (result) { pr_debug("%s:%d ps3_system_bus_device_register failed\n", __func__, __LINE__); goto fail_device_register; } pr_debug(" <- %s:%d\n", __func__, __LINE__); return result; fail_device_register: fail_dma_init: fail_find_interrupt: kfree(p); fail_malloc: pr_debug(" <- %s:%d: fail.\n", __func__, __LINE__); return result; } static int __init_refok ps3_setup_uhc_device( const struct ps3_repository_device *repo, enum ps3_match_id match_id, enum ps3_interrupt_type interrupt_type, enum ps3_reg_type reg_type) { int result; struct layout { struct ps3_system_bus_device dev; struct ps3_dma_region d_region; struct ps3_mmio_region m_region; } *p; u64 bus_addr; u64 len; pr_debug(" -> %s:%d\n", __func__, __LINE__); BUG_ON(repo->bus_type != PS3_BUS_TYPE_SB); BUG_ON(repo->dev_type != PS3_DEV_TYPE_SB_USB); p = kzalloc(sizeof(struct layout), GFP_KERNEL); if (!p) { result = -ENOMEM; goto fail_malloc; } p->dev.match_id = match_id; p->dev.dev_type = PS3_DEVICE_TYPE_SB; p->dev.bus_id = repo->bus_id; p->dev.dev_id = repo->dev_id; p->dev.d_region = &p->d_region; p->dev.m_region = &p->m_region; result = ps3_repository_find_interrupt(repo, interrupt_type, &p->dev.interrupt_id); if (result) { pr_debug("%s:%d ps3_repository_find_interrupt failed\n", __func__, __LINE__); goto fail_find_interrupt; } result = ps3_repository_find_reg(repo, reg_type, &bus_addr, &len); if (result) { pr_debug("%s:%d ps3_repository_find_reg failed\n", __func__, __LINE__); goto fail_find_reg; } result = ps3_dma_region_init(&p->dev, p->dev.d_region, PS3_DMA_64K, PS3_DMA_INTERNAL, NULL, 0); if (result) { pr_debug("%s:%d ps3_dma_region_init failed\n", __func__, __LINE__); goto fail_dma_init; } result = ps3_mmio_region_init(&p->dev, p->dev.m_region, bus_addr, len, PS3_MMIO_4K); if (result) { pr_debug("%s:%d ps3_mmio_region_init failed\n", __func__, __LINE__); goto fail_mmio_init; } result = ps3_system_bus_device_register(&p->dev); if (result) { pr_debug("%s:%d ps3_system_bus_device_register failed\n", __func__, __LINE__); goto fail_device_register; } pr_debug(" <- %s:%d\n", __func__, __LINE__); return result; fail_device_register: fail_mmio_init: fail_dma_init: fail_find_reg: fail_find_interrupt: kfree(p); fail_malloc: pr_debug(" <- %s:%d: fail.\n", __func__, __LINE__); return result; } static int __init ps3_setup_ehci_device( const struct ps3_repository_device *repo) { return ps3_setup_uhc_device(repo, PS3_MATCH_ID_EHCI, PS3_INTERRUPT_TYPE_SB_EHCI, PS3_REG_TYPE_SB_EHCI); } static int __init ps3_setup_ohci_device( const struct ps3_repository_device *repo) { return ps3_setup_uhc_device(repo, PS3_MATCH_ID_OHCI, PS3_INTERRUPT_TYPE_SB_OHCI, PS3_REG_TYPE_SB_OHCI); } static int __init ps3_setup_vuart_device(enum ps3_match_id match_id, unsigned int port_number) { int result; struct layout { struct ps3_system_bus_device dev; } *p; pr_debug(" -> %s:%d: match_id %u, port %u\n", __func__, __LINE__, match_id, port_number); p = kzalloc(sizeof(struct layout), GFP_KERNEL); if (!p) return -ENOMEM; p->dev.match_id = match_id; p->dev.dev_type = PS3_DEVICE_TYPE_VUART; p->dev.port_number = port_number; result = ps3_system_bus_device_register(&p->dev); if (result) { pr_debug("%s:%d ps3_system_bus_device_register failed\n", __func__, __LINE__); goto fail_device_register; } pr_debug(" <- %s:%d\n", __func__, __LINE__); return 0; fail_device_register: kfree(p); pr_debug(" <- %s:%d fail\n", __func__, __LINE__); return result; } static int ps3_setup_storage_dev(const struct ps3_repository_device *repo, enum ps3_match_id match_id) { int result; struct ps3_storage_device *p; u64 port, blk_size, num_blocks; unsigned int num_regions, i; pr_debug(" -> %s:%u: match_id %u\n", __func__, __LINE__, match_id); result = ps3_repository_read_stor_dev_info(repo->bus_index, repo->dev_index, &port, &blk_size, &num_blocks, &num_regions); if (result) { printk(KERN_ERR "%s:%u: _read_stor_dev_info failed %d\n", __func__, __LINE__, result); return -ENODEV; } pr_debug("%s:%u: (%u:%u:%u): port %llu blk_size %llu num_blocks %llu " "num_regions %u\n", __func__, __LINE__, repo->bus_index, repo->dev_index, repo->dev_type, port, blk_size, num_blocks, num_regions); p = kzalloc(sizeof(struct ps3_storage_device) + num_regions * sizeof(struct ps3_storage_region), GFP_KERNEL); if (!p) { result = -ENOMEM; goto fail_malloc; } p->sbd.match_id = match_id; p->sbd.dev_type = PS3_DEVICE_TYPE_SB; p->sbd.bus_id = repo->bus_id; p->sbd.dev_id = repo->dev_id; p->sbd.d_region = &p->dma_region; p->blk_size = blk_size; p->num_regions = num_regions; result = ps3_repository_find_interrupt(repo, PS3_INTERRUPT_TYPE_EVENT_PORT, &p->sbd.interrupt_id); if (result) { printk(KERN_ERR "%s:%u: find_interrupt failed %d\n", __func__, __LINE__, result); result = -ENODEV; goto fail_find_interrupt; } for (i = 0; i < num_regions; i++) { unsigned int id; u64 start, size; result = ps3_repository_read_stor_dev_region(repo->bus_index, repo->dev_index, i, &id, &start, &size); if (result) { printk(KERN_ERR "%s:%u: read_stor_dev_region failed %d\n", __func__, __LINE__, result); result = -ENODEV; goto fail_read_region; } pr_debug("%s:%u: region %u: id %u start %llu size %llu\n", __func__, __LINE__, i, id, start, size); p->regions[i].id = id; p->regions[i].start = start; p->regions[i].size = size; } result = ps3_system_bus_device_register(&p->sbd); if (result) { pr_debug("%s:%u ps3_system_bus_device_register failed\n", __func__, __LINE__); goto fail_device_register; } pr_debug(" <- %s:%u\n", __func__, __LINE__); return 0; fail_device_register: fail_read_region: fail_find_interrupt: kfree(p); fail_malloc: pr_debug(" <- %s:%u: fail.\n", __func__, __LINE__); return result; } static int __init ps3_register_vuart_devices(void) { int result; unsigned int port_number; pr_debug(" -> %s:%d\n", __func__, __LINE__); result = ps3_repository_read_vuart_av_port(&port_number); if (result) port_number = 0; /* av default */ result = ps3_setup_vuart_device(PS3_MATCH_ID_AV_SETTINGS, port_number); WARN_ON(result); result = ps3_repository_read_vuart_sysmgr_port(&port_number); if (result) port_number = 2; /* sysmgr default */ result = ps3_setup_vuart_device(PS3_MATCH_ID_SYSTEM_MANAGER, port_number); WARN_ON(result); pr_debug(" <- %s:%d\n", __func__, __LINE__); return result; } static int __init ps3_register_sound_devices(void) { int result; struct layout { struct ps3_system_bus_device dev; struct ps3_dma_region d_region; struct ps3_mmio_region m_region; } *p; pr_debug(" -> %s:%d\n", __func__, __LINE__); p = kzalloc(sizeof(*p), GFP_KERNEL); if (!p) return -ENOMEM; p->dev.match_id = PS3_MATCH_ID_SOUND; p->dev.dev_type = PS3_DEVICE_TYPE_IOC0; p->dev.d_region = &p->d_region; p->dev.m_region = &p->m_region; result = ps3_system_bus_device_register(&p->dev); if (result) { pr_debug("%s:%d ps3_system_bus_device_register failed\n", __func__, __LINE__); goto fail_device_register; } pr_debug(" <- %s:%d\n", __func__, __LINE__); return 0; fail_device_register: kfree(p); pr_debug(" <- %s:%d failed\n", __func__, __LINE__); return result; } static int __init ps3_register_graphics_devices(void) { int result; struct layout { struct ps3_system_bus_device dev; } *p; pr_debug(" -> %s:%d\n", __func__, __LINE__); p = kzalloc(sizeof(struct layout), GFP_KERNEL); if (!p) return -ENOMEM; p->dev.match_id = PS3_MATCH_ID_GPU; p->dev.match_sub_id = PS3_MATCH_SUB_ID_GPU_FB; p->dev.dev_type = PS3_DEVICE_TYPE_IOC0; result = ps3_system_bus_device_register(&p->dev); if (result) { pr_debug("%s:%d ps3_system_bus_device_register failed\n", __func__, __LINE__); goto fail_device_register; } pr_debug(" <- %s:%d\n", __func__, __LINE__); return 0; fail_device_register: kfree(p); pr_debug(" <- %s:%d failed\n", __func__, __LINE__); return result; } static int __init ps3_register_ramdisk_device(void) { int result; struct layout { struct ps3_system_bus_device dev; } *p; pr_debug(" -> %s:%d\n", __func__, __LINE__); p = kzalloc(sizeof(struct layout), GFP_KERNEL); if (!p) return -ENOMEM; p->dev.match_id = PS3_MATCH_ID_GPU; p->dev.match_sub_id = PS3_MATCH_SUB_ID_GPU_RAMDISK; p->dev.dev_type = PS3_DEVICE_TYPE_IOC0; result = ps3_system_bus_device_register(&p->dev); if (result) { pr_debug("%s:%d ps3_system_bus_device_register failed\n", __func__, __LINE__); goto fail_device_register; } pr_debug(" <- %s:%d\n", __func__, __LINE__); return 0; fail_device_register: kfree(p); pr_debug(" <- %s:%d failed\n", __func__, __LINE__); return result; } /** * ps3_setup_dynamic_device - Setup a dynamic device from the repository */ static int ps3_setup_dynamic_device(const struct ps3_repository_device *repo) { int result; switch (repo->dev_type) { case PS3_DEV_TYPE_STOR_DISK: result = ps3_setup_storage_dev(repo, PS3_MATCH_ID_STOR_DISK); /* Some devices are not accessible from the Other OS lpar. */ if (result == -ENODEV) { result = 0; pr_debug("%s:%u: not accessible\n", __func__, __LINE__); } if (result) pr_debug("%s:%u ps3_setup_storage_dev failed\n", __func__, __LINE__); break; case PS3_DEV_TYPE_STOR_ROM: result = ps3_setup_storage_dev(repo, PS3_MATCH_ID_STOR_ROM); if (result) pr_debug("%s:%u ps3_setup_storage_dev failed\n", __func__, __LINE__); break; case PS3_DEV_TYPE_STOR_FLASH: result = ps3_setup_storage_dev(repo, PS3_MATCH_ID_STOR_FLASH); if (result) pr_debug("%s:%u ps3_setup_storage_dev failed\n", __func__, __LINE__); break; default: result = 0; pr_debug("%s:%u: unsupported dev_type %u\n", __func__, __LINE__, repo->dev_type); } return result; } /** * ps3_setup_static_device - Setup a static device from the repository */ static int __init ps3_setup_static_device(const struct ps3_repository_device *repo) { int result; switch (repo->dev_type) { case PS3_DEV_TYPE_SB_GELIC: result = ps3_setup_gelic_device(repo); if (result) { pr_debug("%s:%d ps3_setup_gelic_device failed\n", __func__, __LINE__); } break; case PS3_DEV_TYPE_SB_USB: /* Each USB device has both an EHCI and an OHCI HC */ result = ps3_setup_ehci_device(repo); if (result) { pr_debug("%s:%d ps3_setup_ehci_device failed\n", __func__, __LINE__); } result = ps3_setup_ohci_device(repo); if (result) { pr_debug("%s:%d ps3_setup_ohci_device failed\n", __func__, __LINE__); } break; default: return ps3_setup_dynamic_device(repo); } return result; } static void ps3_find_and_add_device(u64 bus_id, u64 dev_id) { struct ps3_repository_device repo; int res; unsigned int retries; unsigned long rem; /* * On some firmware versions (e.g. 1.90), the device may not show up * in the repository immediately */ for (retries = 0; retries < 10; retries++) { res = ps3_repository_find_device_by_id(&repo, bus_id, dev_id); if (!res) goto found; rem = msleep_interruptible(100); if (rem) break; } pr_warning("%s:%u: device %llu:%llu not found\n", __func__, __LINE__, bus_id, dev_id); return; found: if (retries) pr_debug("%s:%u: device %llu:%llu found after %u retries\n", __func__, __LINE__, bus_id, dev_id, retries); ps3_setup_dynamic_device(&repo); return; } #define PS3_NOTIFICATION_DEV_ID ULONG_MAX #define PS3_NOTIFICATION_INTERRUPT_ID 0 struct ps3_notification_device { struct ps3_system_bus_device sbd; spinlock_t lock; u64 tag; u64 lv1_status; struct completion done; }; enum ps3_notify_type { notify_device_ready = 0, notify_region_probe = 1, notify_region_update = 2, }; struct ps3_notify_cmd { u64 operation_code; /* must be zero */ u64 event_mask; /* OR of 1UL << enum ps3_notify_type */ }; struct ps3_notify_event { u64 event_type; /* enum ps3_notify_type */ u64 bus_id; u64 dev_id; u64 dev_type; u64 dev_port; }; static irqreturn_t ps3_notification_interrupt(int irq, void *data) { struct ps3_notification_device *dev = data; int res; u64 tag, status; spin_lock(&dev->lock); res = lv1_storage_get_async_status(PS3_NOTIFICATION_DEV_ID, &tag, &status); if (tag != dev->tag) pr_err("%s:%u: tag mismatch, got %llx, expected %llx\n", __func__, __LINE__, tag, dev->tag); if (res) { pr_err("%s:%u: res %d status 0x%llx\n", __func__, __LINE__, res, status); } else { pr_debug("%s:%u: completed, status 0x%llx\n", __func__, __LINE__, status); dev->lv1_status = status; complete(&dev->done); } spin_unlock(&dev->lock); return IRQ_HANDLED; } static int ps3_notification_read_write(struct ps3_notification_device *dev, u64 lpar, int write) { const char *op = write ? "write" : "read"; unsigned long flags; int res; init_completion(&dev->done); spin_lock_irqsave(&dev->lock, flags); res = write ? lv1_storage_write(dev->sbd.dev_id, 0, 0, 1, 0, lpar, &dev->tag) : lv1_storage_read(dev->sbd.dev_id, 0, 0, 1, 0, lpar, &dev->tag); spin_unlock_irqrestore(&dev->lock, flags); if (res) { pr_err("%s:%u: %s failed %d\n", __func__, __LINE__, op, res); return -EPERM; } pr_debug("%s:%u: notification %s issued\n", __func__, __LINE__, op); res = wait_event_interruptible(dev->done.wait, dev->done.done || kthread_should_stop()); if (kthread_should_stop()) res = -EINTR; if (res) { pr_debug("%s:%u: interrupted %s\n", __func__, __LINE__, op); return res; } if (dev->lv1_status) { pr_err("%s:%u: %s not completed, status 0x%llx\n", __func__, __LINE__, op, dev->lv1_status); return -EIO; } pr_debug("%s:%u: notification %s completed\n", __func__, __LINE__, op); return 0; } static struct task_struct *probe_task; /** * ps3_probe_thread - Background repository probing at system startup. * * This implementation only supports background probing on a single bus. * It uses the hypervisor's storage device notification mechanism to wait until * a storage device is ready. The device notification mechanism uses a * pseudo device to asynchronously notify the guest when storage devices become * ready. The notification device has a block size of 512 bytes. */ static int ps3_probe_thread(void *data) { struct ps3_notification_device dev; int res; unsigned int irq; u64 lpar; void *buf; struct ps3_notify_cmd *notify_cmd; struct ps3_notify_event *notify_event; pr_debug(" -> %s:%u: kthread started\n", __func__, __LINE__); buf = kzalloc(512, GFP_KERNEL); if (!buf) return -ENOMEM; lpar = ps3_mm_phys_to_lpar(__pa(buf)); notify_cmd = buf; notify_event = buf; /* dummy system bus device */ dev.sbd.bus_id = (u64)data; dev.sbd.dev_id = PS3_NOTIFICATION_DEV_ID; dev.sbd.interrupt_id = PS3_NOTIFICATION_INTERRUPT_ID; res = lv1_open_device(dev.sbd.bus_id, dev.sbd.dev_id, 0); if (res) { pr_err("%s:%u: lv1_open_device failed %s\n", __func__, __LINE__, ps3_result(res)); goto fail_free; } res = ps3_sb_event_receive_port_setup(&dev.sbd, PS3_BINDING_CPU_ANY, &irq); if (res) { pr_err("%s:%u: ps3_sb_event_receive_port_setup failed %d\n", __func__, __LINE__, res); goto fail_close_device; } spin_lock_init(&dev.lock); res = request_irq(irq, ps3_notification_interrupt, 0, "ps3_notification", &dev); if (res) { pr_err("%s:%u: request_irq failed %d\n", __func__, __LINE__, res); goto fail_sb_event_receive_port_destroy; } /* Setup and write the request for device notification. */ notify_cmd->operation_code = 0; /* must be zero */ notify_cmd->event_mask = 1UL << notify_region_probe; res = ps3_notification_read_write(&dev, lpar, 1); if (res) goto fail_free_irq; /* Loop here processing the requested notification events. */ do { try_to_freeze(); memset(notify_event, 0, sizeof(*notify_event)); res = ps3_notification_read_write(&dev, lpar, 0); if (res) break; pr_debug("%s:%u: notify event type 0x%llx bus id %llu dev id %llu" " type %llu port %llu\n", __func__, __LINE__, notify_event->event_type, notify_event->bus_id, notify_event->dev_id, notify_event->dev_type, notify_event->dev_port); if (notify_event->event_type != notify_region_probe || notify_event->bus_id != dev.sbd.bus_id) { pr_warning("%s:%u: bad notify_event: event %llu, " "dev_id %llu, dev_type %llu\n", __func__, __LINE__, notify_event->event_type, notify_event->dev_id, notify_event->dev_type); continue; } ps3_find_and_add_device(dev.sbd.bus_id, notify_event->dev_id); } while (!kthread_should_stop()); fail_free_irq: free_irq(irq, &dev); fail_sb_event_receive_port_destroy: ps3_sb_event_receive_port_destroy(&dev.sbd, irq); fail_close_device: lv1_close_device(dev.sbd.bus_id, dev.sbd.dev_id); fail_free: kfree(buf); probe_task = NULL; pr_debug(" <- %s:%u: kthread finished\n", __func__, __LINE__); return 0; } /** * ps3_stop_probe_thread - Stops the background probe thread. * */ static int ps3_stop_probe_thread(struct notifier_block *nb, unsigned long code, void *data) { if (probe_task) kthread_stop(probe_task); return 0; } static struct notifier_block nb = { .notifier_call = ps3_stop_probe_thread }; /** * ps3_start_probe_thread - Starts the background probe thread. * */ static int __init ps3_start_probe_thread(enum ps3_bus_type bus_type) { int result; struct task_struct *task; struct ps3_repository_device repo; pr_debug(" -> %s:%d\n", __func__, __LINE__); memset(&repo, 0, sizeof(repo)); repo.bus_type = bus_type; result = ps3_repository_find_bus(repo.bus_type, 0, &repo.bus_index); if (result) { printk(KERN_ERR "%s: Cannot find bus (%d)\n", __func__, result); return -ENODEV; } result = ps3_repository_read_bus_id(repo.bus_index, &repo.bus_id); if (result) { printk(KERN_ERR "%s: read_bus_id failed %d\n", __func__, result); return -ENODEV; } task = kthread_run(ps3_probe_thread, (void *)repo.bus_id, "ps3-probe-%u", bus_type); if (IS_ERR(task)) { result = PTR_ERR(task); printk(KERN_ERR "%s: kthread_run failed %d\n", __func__, result); return result; } probe_task = task; register_reboot_notifier(&nb); pr_debug(" <- %s:%d\n", __func__, __LINE__); return 0; } /** * ps3_register_devices - Probe the system and register devices found. * * A device_initcall() routine. */ static int __init ps3_register_devices(void) { int result; if (!firmware_has_feature(FW_FEATURE_PS3_LV1)) return -ENODEV; pr_debug(" -> %s:%d\n", __func__, __LINE__); /* ps3_repository_dump_bus_info(); */ result = ps3_start_probe_thread(PS3_BUS_TYPE_STORAGE); ps3_register_vuart_devices(); ps3_register_graphics_devices(); ps3_repository_find_devices(PS3_BUS_TYPE_SB, ps3_setup_static_device); ps3_register_sound_devices(); ps3_register_lpm_devices(); ps3_register_ramdisk_device(); pr_debug(" <- %s:%d\n", __func__, __LINE__); return 0; } device_initcall(ps3_register_devices);
gpl-2.0
GustavoRD78/78Kernel-Z3-Kit-Kat
arch/mips/kernel/watch.c
9361
4633
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2008 David Daney */ #include <linux/sched.h> #include <asm/processor.h> #include <asm/watch.h> /* * Install the watch registers for the current thread. A maximum of * four registers are installed although the machine may have more. */ void mips_install_watch_registers(void) { struct mips3264_watch_reg_state *watches = &current->thread.watch.mips3264; switch (current_cpu_data.watch_reg_use_cnt) { default: BUG(); case 4: write_c0_watchlo3(watches->watchlo[3]); /* Write 1 to the I, R, and W bits to clear them, and 1 to G so all ASIDs are trapped. */ write_c0_watchhi3(0x40000007 | watches->watchhi[3]); case 3: write_c0_watchlo2(watches->watchlo[2]); write_c0_watchhi2(0x40000007 | watches->watchhi[2]); case 2: write_c0_watchlo1(watches->watchlo[1]); write_c0_watchhi1(0x40000007 | watches->watchhi[1]); case 1: write_c0_watchlo0(watches->watchlo[0]); write_c0_watchhi0(0x40000007 | watches->watchhi[0]); } } /* * Read back the watchhi registers so the user space debugger has * access to the I, R, and W bits. A maximum of four registers are * read although the machine may have more. */ void mips_read_watch_registers(void) { struct mips3264_watch_reg_state *watches = &current->thread.watch.mips3264; switch (current_cpu_data.watch_reg_use_cnt) { default: BUG(); case 4: watches->watchhi[3] = (read_c0_watchhi3() & 0x0fff); case 3: watches->watchhi[2] = (read_c0_watchhi2() & 0x0fff); case 2: watches->watchhi[1] = (read_c0_watchhi1() & 0x0fff); case 1: watches->watchhi[0] = (read_c0_watchhi0() & 0x0fff); } if (current_cpu_data.watch_reg_use_cnt == 1 && (watches->watchhi[0] & 7) == 0) { /* Pathological case of release 1 architecture that * doesn't set the condition bits. We assume that * since we got here, the watch condition was met and * signal that the conditions requested in watchlo * were met. */ watches->watchhi[0] |= (watches->watchlo[0] & 7); } } /* * Disable all watch registers. Although only four registers are * installed, all are cleared to eliminate the possibility of endless * looping in the watch handler. */ void mips_clear_watch_registers(void) { switch (current_cpu_data.watch_reg_count) { default: BUG(); case 8: write_c0_watchlo7(0); case 7: write_c0_watchlo6(0); case 6: write_c0_watchlo5(0); case 5: write_c0_watchlo4(0); case 4: write_c0_watchlo3(0); case 3: write_c0_watchlo2(0); case 2: write_c0_watchlo1(0); case 1: write_c0_watchlo0(0); } } __cpuinit void mips_probe_watch_registers(struct cpuinfo_mips *c) { unsigned int t; if ((c->options & MIPS_CPU_WATCH) == 0) return; /* * Check which of the I,R and W bits are supported, then * disable the register. */ write_c0_watchlo0(7); t = read_c0_watchlo0(); write_c0_watchlo0(0); c->watch_reg_masks[0] = t & 7; /* Write the mask bits and read them back to determine which * can be used. */ c->watch_reg_count = 1; c->watch_reg_use_cnt = 1; t = read_c0_watchhi0(); write_c0_watchhi0(t | 0xff8); t = read_c0_watchhi0(); c->watch_reg_masks[0] |= (t & 0xff8); if ((t & 0x80000000) == 0) return; write_c0_watchlo1(7); t = read_c0_watchlo1(); write_c0_watchlo1(0); c->watch_reg_masks[1] = t & 7; c->watch_reg_count = 2; c->watch_reg_use_cnt = 2; t = read_c0_watchhi1(); write_c0_watchhi1(t | 0xff8); t = read_c0_watchhi1(); c->watch_reg_masks[1] |= (t & 0xff8); if ((t & 0x80000000) == 0) return; write_c0_watchlo2(7); t = read_c0_watchlo2(); write_c0_watchlo2(0); c->watch_reg_masks[2] = t & 7; c->watch_reg_count = 3; c->watch_reg_use_cnt = 3; t = read_c0_watchhi2(); write_c0_watchhi2(t | 0xff8); t = read_c0_watchhi2(); c->watch_reg_masks[2] |= (t & 0xff8); if ((t & 0x80000000) == 0) return; write_c0_watchlo3(7); t = read_c0_watchlo3(); write_c0_watchlo3(0); c->watch_reg_masks[3] = t & 7; c->watch_reg_count = 4; c->watch_reg_use_cnt = 4; t = read_c0_watchhi3(); write_c0_watchhi3(t | 0xff8); t = read_c0_watchhi3(); c->watch_reg_masks[3] |= (t & 0xff8); if ((t & 0x80000000) == 0) return; /* We use at most 4, but probe and report up to 8. */ c->watch_reg_count = 5; t = read_c0_watchhi4(); if ((t & 0x80000000) == 0) return; c->watch_reg_count = 6; t = read_c0_watchhi5(); if ((t & 0x80000000) == 0) return; c->watch_reg_count = 7; t = read_c0_watchhi6(); if ((t & 0x80000000) == 0) return; c->watch_reg_count = 8; }
gpl-2.0
leilihh/linux
drivers/media/usb/tm6000/tm6000-i2c.c
9617
8434
/* * tm6000-i2c.c - driver for TM5600/TM6000/TM6010 USB video capture devices * * Copyright (C) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org> * * Copyright (C) 2007 Michel Ludwig <michel.ludwig@gmail.com> * - Fix SMBus Read Byte command * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation version 2 * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/usb.h> #include <linux/i2c.h> #include "tm6000.h" #include "tm6000-regs.h" #include <media/v4l2-common.h> #include <media/tuner.h> #include "tuner-xc2028.h" /* ----------------------------------------------------------- */ static unsigned int i2c_debug; module_param(i2c_debug, int, 0644); MODULE_PARM_DESC(i2c_debug, "enable debug messages [i2c]"); #define i2c_dprintk(lvl, fmt, args...) if (i2c_debug >= lvl) do { \ printk(KERN_DEBUG "%s at %s: " fmt, \ dev->name, __func__, ##args); } while (0) static int tm6000_i2c_send_regs(struct tm6000_core *dev, unsigned char addr, __u8 reg, char *buf, int len) { int rc; unsigned int i2c_packet_limit = 16; if (dev->dev_type == TM6010) i2c_packet_limit = 80; if (!buf) return -1; if (len < 1 || len > i2c_packet_limit) { printk(KERN_ERR "Incorrect length of i2c packet = %d, limit set to %d\n", len, i2c_packet_limit); return -1; } /* capture mutex */ rc = tm6000_read_write_usb(dev, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, REQ_16_SET_GET_I2C_WR1_RDN, addr | reg << 8, 0, buf, len); if (rc < 0) { /* release mutex */ return rc; } /* release mutex */ return rc; } /* Generic read - doesn't work fine with 16bit registers */ static int tm6000_i2c_recv_regs(struct tm6000_core *dev, unsigned char addr, __u8 reg, char *buf, int len) { int rc; u8 b[2]; unsigned int i2c_packet_limit = 16; if (dev->dev_type == TM6010) i2c_packet_limit = 64; if (!buf) return -1; if (len < 1 || len > i2c_packet_limit) { printk(KERN_ERR "Incorrect length of i2c packet = %d, limit set to %d\n", len, i2c_packet_limit); return -1; } /* capture mutex */ if ((dev->caps.has_zl10353) && (dev->demod_addr << 1 == addr) && (reg % 2 == 0)) { /* * Workaround an I2C bug when reading from zl10353 */ reg -= 1; len += 1; rc = tm6000_read_write_usb(dev, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, REQ_16_SET_GET_I2C_WR1_RDN, addr | reg << 8, 0, b, len); *buf = b[1]; } else { rc = tm6000_read_write_usb(dev, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, REQ_16_SET_GET_I2C_WR1_RDN, addr | reg << 8, 0, buf, len); } /* release mutex */ return rc; } /* * read from a 16bit register * for example xc2028, xc3028 or xc3028L */ static int tm6000_i2c_recv_regs16(struct tm6000_core *dev, unsigned char addr, __u16 reg, char *buf, int len) { int rc; unsigned char ureg; if (!buf || len != 2) return -1; /* capture mutex */ if (dev->dev_type == TM6010) { ureg = reg & 0xFF; rc = tm6000_read_write_usb(dev, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, REQ_16_SET_GET_I2C_WR1_RDN, addr | (reg & 0xFF00), 0, &ureg, 1); if (rc < 0) { /* release mutex */ return rc; } rc = tm6000_read_write_usb(dev, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, REQ_35_AFTEK_TUNER_READ, reg, 0, buf, len); } else { rc = tm6000_read_write_usb(dev, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, REQ_14_SET_GET_I2C_WR2_RDN, addr, reg, buf, len); } /* release mutex */ return rc; } static int tm6000_i2c_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msgs[], int num) { struct tm6000_core *dev = i2c_adap->algo_data; int addr, rc, i, byte; if (num <= 0) return 0; for (i = 0; i < num; i++) { addr = (msgs[i].addr << 1) & 0xff; i2c_dprintk(2, "%s %s addr=0x%x len=%d:", (msgs[i].flags & I2C_M_RD) ? "read" : "write", i == num - 1 ? "stop" : "nonstop", addr, msgs[i].len); if (msgs[i].flags & I2C_M_RD) { /* read request without preceding register selection */ /* * The TM6000 only supports a read transaction * immediately after a 1 or 2 byte write to select * a register. We cannot fulfil this request. */ i2c_dprintk(2, " read without preceding write not" " supported"); rc = -EOPNOTSUPP; goto err; } else if (i + 1 < num && msgs[i].len <= 2 && (msgs[i + 1].flags & I2C_M_RD) && msgs[i].addr == msgs[i + 1].addr) { /* 1 or 2 byte write followed by a read */ if (i2c_debug >= 2) for (byte = 0; byte < msgs[i].len; byte++) printk(KERN_CONT " %02x", msgs[i].buf[byte]); i2c_dprintk(2, "; joined to read %s len=%d:", i == num - 2 ? "stop" : "nonstop", msgs[i + 1].len); if (msgs[i].len == 2) { rc = tm6000_i2c_recv_regs16(dev, addr, msgs[i].buf[0] << 8 | msgs[i].buf[1], msgs[i + 1].buf, msgs[i + 1].len); } else { rc = tm6000_i2c_recv_regs(dev, addr, msgs[i].buf[0], msgs[i + 1].buf, msgs[i + 1].len); } i++; if (addr == dev->tuner_addr << 1) { tm6000_set_reg(dev, REQ_50_SET_START, 0, 0); tm6000_set_reg(dev, REQ_51_SET_STOP, 0, 0); } if (i2c_debug >= 2) for (byte = 0; byte < msgs[i].len; byte++) printk(KERN_CONT " %02x", msgs[i].buf[byte]); } else { /* write bytes */ if (i2c_debug >= 2) for (byte = 0; byte < msgs[i].len; byte++) printk(KERN_CONT " %02x", msgs[i].buf[byte]); rc = tm6000_i2c_send_regs(dev, addr, msgs[i].buf[0], msgs[i].buf + 1, msgs[i].len - 1); } if (i2c_debug >= 2) printk(KERN_CONT "\n"); if (rc < 0) goto err; } return num; err: i2c_dprintk(2, " ERROR: %i\n", rc); return rc; } static int tm6000_i2c_eeprom(struct tm6000_core *dev) { int i, rc; unsigned char *p = dev->eedata; unsigned char bytes[17]; dev->i2c_client.addr = 0xa0 >> 1; dev->eedata_size = 0; bytes[16] = '\0'; for (i = 0; i < sizeof(dev->eedata); ) { *p = i; rc = tm6000_i2c_recv_regs(dev, 0xa0, i, p, 1); if (rc < 1) { if (p == dev->eedata) goto noeeprom; else { printk(KERN_WARNING "%s: i2c eeprom read error (err=%d)\n", dev->name, rc); } return -EINVAL; } dev->eedata_size++; p++; if (0 == (i % 16)) printk(KERN_INFO "%s: i2c eeprom %02x:", dev->name, i); printk(KERN_CONT " %02x", dev->eedata[i]); if ((dev->eedata[i] >= ' ') && (dev->eedata[i] <= 'z')) bytes[i%16] = dev->eedata[i]; else bytes[i%16] = '.'; i++; if (0 == (i % 16)) { bytes[16] = '\0'; printk(KERN_CONT " %s\n", bytes); } } if (0 != (i%16)) { bytes[i%16] = '\0'; for (i %= 16; i < 16; i++) printk(KERN_CONT " "); printk(KERN_CONT " %s\n", bytes); } return 0; noeeprom: printk(KERN_INFO "%s: Huh, no eeprom present (err=%d)?\n", dev->name, rc); return -EINVAL; } /* ----------------------------------------------------------- */ /* * functionality() */ static u32 functionality(struct i2c_adapter *adap) { return I2C_FUNC_SMBUS_EMUL; } static const struct i2c_algorithm tm6000_algo = { .master_xfer = tm6000_i2c_xfer, .functionality = functionality, }; /* ----------------------------------------------------------- */ /* * tm6000_i2c_register() * register i2c bus */ int tm6000_i2c_register(struct tm6000_core *dev) { int rc; dev->i2c_adap.owner = THIS_MODULE; dev->i2c_adap.algo = &tm6000_algo; dev->i2c_adap.dev.parent = &dev->udev->dev; strlcpy(dev->i2c_adap.name, dev->name, sizeof(dev->i2c_adap.name)); dev->i2c_adap.algo_data = dev; i2c_set_adapdata(&dev->i2c_adap, &dev->v4l2_dev); rc = i2c_add_adapter(&dev->i2c_adap); if (rc) return rc; dev->i2c_client.adapter = &dev->i2c_adap; strlcpy(dev->i2c_client.name, "tm6000 internal", I2C_NAME_SIZE); tm6000_i2c_eeprom(dev); return 0; } /* * tm6000_i2c_unregister() * unregister i2c_bus */ int tm6000_i2c_unregister(struct tm6000_core *dev) { i2c_del_adapter(&dev->i2c_adap); return 0; }
gpl-2.0
extremetempz/Wingray-Kernel
arch/tile/kernel/smp.c
146
5963
/* * Copyright 2010 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. * * TILE SMP support routines. */ #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/module.h> #include <asm/cacheflush.h> HV_Topology smp_topology __write_once; EXPORT_SYMBOL(smp_topology); #if CHIP_HAS_IPI() static unsigned long __iomem *ipi_mappings[NR_CPUS]; #endif /* * Top-level send_IPI*() functions to send messages to other cpus. */ /* Set by smp_send_stop() to avoid recursive panics. */ static int stopping_cpus; static void __send_IPI_many(HV_Recipient *recip, int nrecip, int tag) { int sent = 0; while (sent < nrecip) { int rc = hv_send_message(recip, nrecip, (HV_VirtAddr)&tag, sizeof(tag)); if (rc < 0) { if (!stopping_cpus) /* avoid recursive panic */ panic("hv_send_message returned %d", rc); break; } WARN_ONCE(rc == 0, "hv_send_message() returned zero\n"); sent += rc; } } void send_IPI_single(int cpu, int tag) { HV_Recipient recip = { .y = cpu / smp_width, .x = cpu % smp_width, .state = HV_TO_BE_SENT }; __send_IPI_many(&recip, 1, tag); } void send_IPI_many(const struct cpumask *mask, int tag) { HV_Recipient recip[NR_CPUS]; int cpu; int nrecip = 0; int my_cpu = smp_processor_id(); for_each_cpu(cpu, mask) { HV_Recipient *r; BUG_ON(cpu == my_cpu); r = &recip[nrecip++]; r->y = cpu / smp_width; r->x = cpu % smp_width; r->state = HV_TO_BE_SENT; } __send_IPI_many(recip, nrecip, tag); } void send_IPI_allbutself(int tag) { struct cpumask mask; cpumask_copy(&mask, cpu_online_mask); cpumask_clear_cpu(smp_processor_id(), &mask); send_IPI_many(&mask, tag); } /* * Provide smp_call_function_mask, but also run function locally * if specified in the mask. */ void on_each_cpu_mask(const struct cpumask *mask, void (*func)(void *), void *info, bool wait) { int cpu = get_cpu(); smp_call_function_many(mask, func, info, wait); if (cpumask_test_cpu(cpu, mask)) { local_irq_disable(); func(info); local_irq_enable(); } put_cpu(); } /* * Functions related to starting/stopping cpus. */ /* Handler to start the current cpu. */ static void smp_start_cpu_interrupt(void) { get_irq_regs()->pc = start_cpu_function_addr; } /* Handler to stop the current cpu. */ static void smp_stop_cpu_interrupt(void) { set_cpu_online(smp_processor_id(), 0); arch_local_irq_disable_all(); for (;;) asm("nap"); } /* This function calls the 'stop' function on all other CPUs in the system. */ void smp_send_stop(void) { stopping_cpus = 1; send_IPI_allbutself(MSG_TAG_STOP_CPU); } /* * Dispatch code called from hv_message_intr() for HV_MSG_TILE hv messages. */ void evaluate_message(int tag) { switch (tag) { case MSG_TAG_START_CPU: /* Start up a cpu */ smp_start_cpu_interrupt(); break; case MSG_TAG_STOP_CPU: /* Sent to shut down slave CPU's */ smp_stop_cpu_interrupt(); break; case MSG_TAG_CALL_FUNCTION_MANY: /* Call function on cpumask */ generic_smp_call_function_interrupt(); break; case MSG_TAG_CALL_FUNCTION_SINGLE: /* Call function on one other CPU */ generic_smp_call_function_single_interrupt(); break; default: panic("Unknown IPI message tag %d", tag); break; } } /* * flush_icache_range() code uses smp_call_function(). */ struct ipi_flush { unsigned long start; unsigned long end; }; static void ipi_flush_icache_range(void *info) { struct ipi_flush *flush = (struct ipi_flush *) info; __flush_icache_range(flush->start, flush->end); } void flush_icache_range(unsigned long start, unsigned long end) { struct ipi_flush flush = { start, end }; preempt_disable(); on_each_cpu(ipi_flush_icache_range, &flush, 1); preempt_enable(); } /* Called when smp_send_reschedule() triggers IRQ_RESCHEDULE. */ static irqreturn_t handle_reschedule_ipi(int irq, void *token) { /* * Nothing to do here; when we return from interrupt, the * rescheduling will occur there. But do bump the interrupt * profiler count in the meantime. */ __get_cpu_var(irq_stat).irq_resched_count++; return IRQ_HANDLED; } static struct irqaction resched_action = { .handler = handle_reschedule_ipi, .name = "resched", .dev_id = handle_reschedule_ipi /* unique token */, }; void __init ipi_init(void) { #if CHIP_HAS_IPI() int cpu; /* Map IPI trigger MMIO addresses. */ for_each_possible_cpu(cpu) { HV_Coord tile; HV_PTE pte; unsigned long offset; tile.x = cpu_x(cpu); tile.y = cpu_y(cpu); if (hv_get_ipi_pte(tile, KERNEL_PL, &pte) != 0) panic("Failed to initialize IPI for cpu %d\n", cpu); offset = hv_pte_get_pfn(pte) << PAGE_SHIFT; ipi_mappings[cpu] = ioremap_prot(offset, PAGE_SIZE, pte); } #endif /* Bind handle_reschedule_ipi() to IRQ_RESCHEDULE. */ tile_irq_activate(IRQ_RESCHEDULE, TILE_IRQ_PERCPU); BUG_ON(setup_irq(IRQ_RESCHEDULE, &resched_action)); } #if CHIP_HAS_IPI() void smp_send_reschedule(int cpu) { WARN_ON(cpu_is_offline(cpu)); /* * We just want to do an MMIO store. The traditional writeq() * functions aren't really correct here, since they're always * directed at the PCI shim. For now, just do a raw store, * casting away the __iomem attribute. */ ((unsigned long __force *)ipi_mappings[cpu])[IRQ_RESCHEDULE] = 0; } #else void smp_send_reschedule(int cpu) { HV_Coord coord; WARN_ON(cpu_is_offline(cpu)); coord.y = cpu_y(cpu); coord.x = cpu_x(cpu); hv_trigger_ipi(coord, IRQ_RESCHEDULE); } #endif /* CHIP_HAS_IPI() */
gpl-2.0
tohenk/android_kernel_samsung_smdk4x12
drivers/gpu/mali400/r3p2/mali/common/mali_gp_scheduler.c
146
16655
/* * Copyright (C) 2012 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. * * A copy of the licence is included with the program, and can also be obtained from Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include "mali_gp_scheduler.h" #include "mali_kernel_common.h" #include "mali_osk.h" #include "mali_osk_list.h" #include "mali_scheduler.h" #include "mali_gp.h" #include "mali_gp_job.h" #include "mali_group.h" #include "mali_pm.h" #include "mali_kernel_utilization.h" #if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS) #include <linux/sched.h> #include <trace/events/gpu.h> #endif enum mali_gp_slot_state { MALI_GP_SLOT_STATE_IDLE, MALI_GP_SLOT_STATE_WORKING, MALI_GP_SLOT_STATE_DISABLED, }; /* A render slot is an entity which jobs can be scheduled onto */ struct mali_gp_slot { struct mali_group *group; /* * We keep track of the state here as well as in the group object * so we don't need to take the group lock so often (and also avoid clutter with the working lock) */ enum mali_gp_slot_state state; u32 returned_cookie; }; static u32 gp_version = 0; static _MALI_OSK_LIST_HEAD(job_queue); /* List of jobs with some unscheduled work */ static struct mali_gp_slot slot; /* Variables to allow safe pausing of the scheduler */ static _mali_osk_wait_queue_t *gp_scheduler_working_wait_queue = NULL; static u32 pause_count = 0; static mali_bool mali_gp_scheduler_is_suspended(void); static void mali_gp_scheduler_job_queued(void); static void mali_gp_scheduler_job_completed(void); static _mali_osk_lock_t *gp_scheduler_lock = NULL; /* Contains tid of thread that locked the scheduler or 0, if not locked */ _mali_osk_errcode_t mali_gp_scheduler_initialize(void) { u32 num_groups; u32 i; _MALI_OSK_INIT_LIST_HEAD(&job_queue); gp_scheduler_lock = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_SPINLOCK | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, _MALI_OSK_LOCK_ORDER_SCHEDULER); if (NULL == gp_scheduler_lock) { return _MALI_OSK_ERR_NOMEM; } gp_scheduler_working_wait_queue = _mali_osk_wait_queue_init(); if (NULL == gp_scheduler_working_wait_queue) { _mali_osk_lock_term(gp_scheduler_lock); return _MALI_OSK_ERR_NOMEM; } /* Find all the available GP cores */ num_groups = mali_group_get_glob_num_groups(); for (i = 0; i < num_groups; i++) { struct mali_group *group = mali_group_get_glob_group(i); struct mali_gp_core *gp_core = mali_group_get_gp_core(group); if (NULL != gp_core) { if (0 == gp_version) { /* Retrieve GP version */ gp_version = mali_gp_core_get_version(gp_core); } slot.group = group; slot.state = MALI_GP_SLOT_STATE_IDLE; break; /* There is only one GP, no point in looking for more */ } } return _MALI_OSK_ERR_OK; } void mali_gp_scheduler_terminate(void) { MALI_DEBUG_ASSERT( MALI_GP_SLOT_STATE_IDLE == slot.state || MALI_GP_SLOT_STATE_DISABLED == slot.state); MALI_DEBUG_ASSERT_POINTER(slot.group); mali_group_delete(slot.group); _mali_osk_wait_queue_term(gp_scheduler_working_wait_queue); _mali_osk_lock_term(gp_scheduler_lock); } MALI_STATIC_INLINE void mali_gp_scheduler_lock(void) { if(_MALI_OSK_ERR_OK != _mali_osk_lock_wait(gp_scheduler_lock, _MALI_OSK_LOCKMODE_RW)) { /* Non-interruptable lock failed: this should never happen. */ MALI_DEBUG_ASSERT(0); } MALI_DEBUG_PRINT(5, ("Mali GP scheduler: GP scheduler lock taken\n")); } MALI_STATIC_INLINE void mali_gp_scheduler_unlock(void) { MALI_DEBUG_PRINT(5, ("Mali GP scheduler: Releasing GP scheduler lock\n")); _mali_osk_lock_signal(gp_scheduler_lock, _MALI_OSK_LOCKMODE_RW); } #ifdef DEBUG MALI_STATIC_INLINE void mali_gp_scheduler_assert_locked(void) { MALI_DEBUG_ASSERT_LOCK_HELD(gp_scheduler_lock); } #define MALI_ASSERT_GP_SCHEDULER_LOCKED() mali_gp_scheduler_assert_locked() #else #define MALI_ASSERT_GP_SCHEDULER_LOCKED() #endif static void mali_gp_scheduler_schedule(void) { struct mali_gp_job *job; mali_gp_scheduler_lock(); if (0 < pause_count || MALI_GP_SLOT_STATE_IDLE != slot.state || _mali_osk_list_empty(&job_queue)) { MALI_DEBUG_PRINT(4, ("Mali GP scheduler: Nothing to schedule (paused=%u, idle slots=%u)\n", pause_count, MALI_GP_SLOT_STATE_IDLE == slot.state ? 1 : 0)); mali_gp_scheduler_unlock(); return; /* Nothing to do, so early out */ } /* Get (and remove) next job in queue */ job = _MALI_OSK_LIST_ENTRY(job_queue.next, struct mali_gp_job, list); _mali_osk_list_del(&job->list); /* Mark slot as busy */ slot.state = MALI_GP_SLOT_STATE_WORKING; mali_gp_scheduler_unlock(); MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Starting job %u (0x%08X)\n", mali_gp_job_get_id(job), job)); mali_group_lock(slot.group); mali_group_start_gp_job(slot.group, job); mali_group_unlock(slot.group); } static void mali_gp_scheduler_schedule_on_group(struct mali_group *group) { struct mali_gp_job *job; MALI_DEBUG_ASSERT_LOCK_HELD(group->lock); MALI_DEBUG_ASSERT_LOCK_HELD(gp_scheduler_lock); if (0 < pause_count || MALI_GP_SLOT_STATE_IDLE != slot.state || _mali_osk_list_empty(&job_queue)) { mali_gp_scheduler_unlock(); MALI_DEBUG_PRINT(4, ("Mali GP scheduler: Nothing to schedule (paused=%u, idle slots=%u)\n", pause_count, MALI_GP_SLOT_STATE_IDLE == slot.state ? 1 : 0)); #if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS) trace_gpu_sched_switch(mali_gp_get_hw_core_desc(group->gp_core), sched_clock(), 0, 0, 0); #endif return; /* Nothing to do, so early out */ } /* Get (and remove) next job in queue */ job = _MALI_OSK_LIST_ENTRY(job_queue.next, struct mali_gp_job, list); _mali_osk_list_del(&job->list); /* Mark slot as busy */ slot.state = MALI_GP_SLOT_STATE_WORKING; mali_gp_scheduler_unlock(); MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Starting job %u (0x%08X)\n", mali_gp_job_get_id(job), job)); mali_group_start_gp_job(slot.group, job); } static void mali_gp_scheduler_return_job_to_user(struct mali_gp_job *job, mali_bool success) { _mali_uk_gp_job_finished_s *jobres = job->finished_notification->result_buffer; _mali_osk_memset(jobres, 0, sizeof(_mali_uk_gp_job_finished_s)); /* @@@@ can be removed once we initialize all members in this struct */ jobres->user_job_ptr = mali_gp_job_get_user_id(job); if (MALI_TRUE == success) { jobres->status = _MALI_UK_JOB_STATUS_END_SUCCESS; } else { jobres->status = _MALI_UK_JOB_STATUS_END_UNKNOWN_ERR; } jobres->heap_current_addr = mali_gp_job_get_current_heap_addr(job); jobres->perf_counter0 = mali_gp_job_get_perf_counter_value0(job); jobres->perf_counter1 = mali_gp_job_get_perf_counter_value1(job); mali_session_send_notification(mali_gp_job_get_session(job), job->finished_notification); job->finished_notification = NULL; mali_gp_job_delete(job); } void mali_gp_scheduler_job_done(struct mali_group *group, struct mali_gp_job *job, mali_bool success) { MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Job %u (0x%08X) completed (%s)\n", mali_gp_job_get_id(job), job, success ? "success" : "failure")); mali_gp_scheduler_return_job_to_user(job, success); mali_gp_scheduler_lock(); /* Mark slot as idle again */ slot.state = MALI_GP_SLOT_STATE_IDLE; /* If paused, then this was the last job, so wake up sleeping workers */ if (pause_count > 0) { _mali_osk_wait_queue_wake_up(gp_scheduler_working_wait_queue); } mali_gp_scheduler_schedule_on_group(group); /* It is ok to do this after schedule, since START/STOP is simply ++ and -- anyways */ mali_gp_scheduler_job_completed(); } void mali_gp_scheduler_oom(struct mali_group *group, struct mali_gp_job *job) { _mali_uk_gp_job_suspended_s * jobres; _mali_osk_notification_t * notification; mali_gp_scheduler_lock(); notification = job->oom_notification; job->oom_notification = NULL; slot.returned_cookie = mali_gp_job_get_id(job); jobres = (_mali_uk_gp_job_suspended_s *)notification->result_buffer; jobres->user_job_ptr = mali_gp_job_get_user_id(job); jobres->cookie = mali_gp_job_get_id(job); mali_gp_scheduler_unlock(); jobres->reason = _MALIGP_JOB_SUSPENDED_OUT_OF_MEMORY; mali_session_send_notification(mali_gp_job_get_session(job), notification); /* * If this function failed, then we could return the job to user space right away, * but there is a job timer anyway that will do that eventually. * This is not exactly a common case anyway. */ } void mali_gp_scheduler_suspend(void) { mali_gp_scheduler_lock(); pause_count++; /* Increment the pause_count so that no more jobs will be scheduled */ mali_gp_scheduler_unlock(); _mali_osk_wait_queue_wait_event(gp_scheduler_working_wait_queue, mali_gp_scheduler_is_suspended); } void mali_gp_scheduler_resume(void) { mali_gp_scheduler_lock(); pause_count--; /* Decrement pause_count to allow scheduling again (if it reaches 0) */ mali_gp_scheduler_unlock(); if (0 == pause_count) { mali_gp_scheduler_schedule(); } } _mali_osk_errcode_t _mali_ukk_gp_start_job(void *ctx, _mali_uk_gp_start_job_s *uargs) { struct mali_session_data *session; struct mali_gp_job *job; MALI_DEBUG_ASSERT_POINTER(uargs); MALI_DEBUG_ASSERT_POINTER(ctx); session = (struct mali_session_data*)ctx; job = mali_gp_job_create(session, uargs, mali_scheduler_get_new_id()); if (NULL == job) { return _MALI_OSK_ERR_NOMEM; } #if PROFILING_SKIP_PP_AND_GP_JOBS #warning GP jobs will not be executed mali_gp_scheduler_return_job_to_user(job, MALI_TRUE); return _MALI_OSK_ERR_OK; #endif #if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS) trace_gpu_job_enqueue(mali_gp_job_get_tid(job), mali_gp_job_get_id(job), "GP"); #endif mali_gp_scheduler_job_queued(); mali_gp_scheduler_lock(); _mali_osk_list_addtail(&job->list, &job_queue); mali_gp_scheduler_unlock(); MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Job %u (0x%08X) queued\n", mali_gp_job_get_id(job), job)); mali_gp_scheduler_schedule(); return _MALI_OSK_ERR_OK; } _mali_osk_errcode_t _mali_ukk_get_gp_number_of_cores(_mali_uk_get_gp_number_of_cores_s *args) { MALI_DEBUG_ASSERT_POINTER(args); MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS); args->number_of_cores = 1; return _MALI_OSK_ERR_OK; } _mali_osk_errcode_t _mali_ukk_get_gp_core_version(_mali_uk_get_gp_core_version_s *args) { MALI_DEBUG_ASSERT_POINTER(args); MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS); args->version = gp_version; return _MALI_OSK_ERR_OK; } _mali_osk_errcode_t _mali_ukk_gp_suspend_response(_mali_uk_gp_suspend_response_s *args) { struct mali_session_data *session; struct mali_gp_job *resumed_job; _mali_osk_notification_t *new_notification = 0; MALI_DEBUG_ASSERT_POINTER(args); if (NULL == args->ctx) { return _MALI_OSK_ERR_INVALID_ARGS; } session = (struct mali_session_data*)args->ctx; if (NULL == session) { return _MALI_OSK_ERR_FAULT; } if (_MALIGP_JOB_RESUME_WITH_NEW_HEAP == args->code) { new_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_GP_STALLED, sizeof(_mali_uk_gp_job_suspended_s)); if (NULL == new_notification) { MALI_PRINT_ERROR(("Mali GP scheduler: Failed to allocate notification object. Will abort GP job.\n")); mali_group_lock(slot.group); mali_group_abort_gp_job(slot.group, args->cookie); mali_group_unlock(slot.group); return _MALI_OSK_ERR_FAULT; } } mali_group_lock(slot.group); if (_MALIGP_JOB_RESUME_WITH_NEW_HEAP == args->code) { MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Resuming job %u with new heap; 0x%08X - 0x%08X\n", args->cookie, args->arguments[0], args->arguments[1])); resumed_job = mali_group_resume_gp_with_new_heap(slot.group, args->cookie, args->arguments[0], args->arguments[1]); if (NULL != resumed_job) { resumed_job->oom_notification = new_notification; mali_group_unlock(slot.group); return _MALI_OSK_ERR_OK; } else { mali_group_unlock(slot.group); _mali_osk_notification_delete(new_notification); return _MALI_OSK_ERR_FAULT; } } MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Aborting job %u, no new heap provided\n", args->cookie)); mali_group_abort_gp_job(slot.group, args->cookie); mali_group_unlock(slot.group); return _MALI_OSK_ERR_OK; } void mali_gp_scheduler_abort_session(struct mali_session_data *session) { struct mali_gp_job *job, *tmp; mali_gp_scheduler_lock(); MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Aborting all jobs from session 0x%08x\n", session)); /* Check queue for jobs and remove */ _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &job_queue, struct mali_gp_job, list) { if (mali_gp_job_get_session(job) == session) { MALI_DEBUG_PRINT(4, ("Mali GP scheduler: Removing GP job 0x%08x from queue\n", job)); _mali_osk_list_del(&(job->list)); mali_gp_job_delete(job); mali_gp_scheduler_job_completed(); } } mali_gp_scheduler_unlock(); mali_group_abort_session(slot.group, session); } static mali_bool mali_gp_scheduler_is_suspended(void) { mali_bool ret; mali_gp_scheduler_lock(); ret = pause_count > 0 && slot.state == MALI_GP_SLOT_STATE_IDLE; mali_gp_scheduler_unlock(); return ret; } #if MALI_STATE_TRACKING u32 mali_gp_scheduler_dump_state(char *buf, u32 size) { int n = 0; n += _mali_osk_snprintf(buf + n, size - n, "GP\n"); n += _mali_osk_snprintf(buf + n, size - n, "\tQueue is %s\n", _mali_osk_list_empty(&job_queue) ? "empty" : "not empty"); n += mali_group_dump_state(slot.group, buf + n, size - n); n += _mali_osk_snprintf(buf + n, size - n, "\n"); return n; } #endif void mali_gp_scheduler_reset_all_groups(void) { if (NULL != slot.group) { mali_group_lock(slot.group); mali_group_reset(slot.group); mali_group_unlock(slot.group); } } void mali_gp_scheduler_zap_all_active(struct mali_session_data *session) { if (NULL != slot.group) { mali_group_zap_session(slot.group, session); } } void mali_gp_scheduler_enable_group(struct mali_group *group) { MALI_DEBUG_ASSERT_POINTER(group); MALI_DEBUG_ASSERT(slot.group == group); MALI_DEBUG_PRINT(2, ("Mali GP scheduler: enabling gp group %p\n", group)); mali_group_lock(group); if (MALI_GROUP_STATE_DISABLED != group->state) { mali_group_unlock(group); MALI_DEBUG_PRINT(2, ("Mali GP scheduler: gp group %p already enabled\n", group)); return; } mali_gp_scheduler_lock(); MALI_DEBUG_ASSERT(MALI_GROUP_STATE_DISABLED == group->state); MALI_DEBUG_ASSERT(MALI_GP_SLOT_STATE_DISABLED == slot.state); slot.state = MALI_GP_SLOT_STATE_IDLE; group->state = MALI_GROUP_STATE_IDLE; mali_group_power_on_group(group); mali_group_reset(group); mali_gp_scheduler_unlock(); mali_group_unlock(group); /* Pick up any jobs that might have been queued while the GP group was disabled. */ mali_gp_scheduler_schedule(); } void mali_gp_scheduler_disable_group(struct mali_group *group) { MALI_DEBUG_ASSERT_POINTER(group); MALI_DEBUG_ASSERT(slot.group == group); MALI_DEBUG_PRINT(2, ("Mali GP scheduler: disabling gp group %p\n", group)); mali_gp_scheduler_suspend(); mali_group_lock(group); mali_gp_scheduler_lock(); MALI_DEBUG_ASSERT( MALI_GROUP_STATE_IDLE == group->state || MALI_GROUP_STATE_DISABLED == group->state); if (MALI_GROUP_STATE_DISABLED == group->state) { MALI_DEBUG_ASSERT(MALI_GP_SLOT_STATE_DISABLED == slot.state); MALI_DEBUG_PRINT(2, ("Mali GP scheduler: gp group %p already disabled\n", group)); } else { MALI_DEBUG_ASSERT(MALI_GP_SLOT_STATE_IDLE == slot.state); slot.state = MALI_GP_SLOT_STATE_DISABLED; group->state = MALI_GROUP_STATE_DISABLED; mali_group_power_off_group(group); } mali_gp_scheduler_unlock(); mali_group_unlock(group); mali_gp_scheduler_resume(); } static void mali_gp_scheduler_job_queued(void) { /* We hold a PM reference for every job we hold queued (and running) */ _mali_osk_pm_dev_ref_add(); if (mali_utilization_enabled()) { /* * We cheat a little bit by counting the PP as busy from the time a GP job is queued. * This will be fine because we only loose the tiny idle gap between jobs, but * we will instead get less utilization work to do (less locks taken) */ mali_utilization_gp_start(); } } static void mali_gp_scheduler_job_completed(void) { /* Release the PM reference we got in the mali_gp_scheduler_job_queued() function */ _mali_osk_pm_dev_ref_dec(); if (mali_utilization_enabled()) { mali_utilization_gp_end(); } }
gpl-2.0
303750856/linux-3.1
drivers/tty/serial/zs.c
402
31205
/* * zs.c: Serial port driver for IOASIC DECstations. * * Derived from drivers/sbus/char/sunserial.c by Paul Mackerras. * Derived from drivers/macintosh/macserial.c by Harald Koerfgen. * * DECstation changes * Copyright (C) 1998-2000 Harald Koerfgen * Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki * * For the rest of the code the original Copyright applies: * Copyright (C) 1996 Paul Mackerras (Paul.Mackerras@cs.anu.edu.au) * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) * * * Note: for IOASIC systems the wiring is as follows: * * mouse/keyboard: * DIN-7 MJ-4 signal SCC * 2 1 TxD <- A.TxD * 3 4 RxD -> A.RxD * * EIA-232/EIA-423: * DB-25 MMJ-6 signal SCC * 2 2 TxD <- B.TxD * 3 5 RxD -> B.RxD * 4 RTS <- ~A.RTS * 5 CTS -> ~B.CTS * 6 6 DSR -> ~A.SYNC * 8 CD -> ~B.DCD * 12 DSRS(DCE) -> ~A.CTS (*) * 15 TxC -> B.TxC * 17 RxC -> B.RxC * 20 1 DTR <- ~A.DTR * 22 RI -> ~A.DCD * 23 DSRS(DTE) <- ~B.RTS * * (*) EIA-232 defines the signal at this pin to be SCD, while DSRS(DCE) * is shared with DSRS(DTE) at pin 23. * * As you can immediately notice the wiring of the RTS, DTR and DSR signals * is a bit odd. This makes the handling of port B unnecessarily * complicated and prevents the use of some automatic modes of operation. */ #if defined(CONFIG_SERIAL_ZS_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) #define SUPPORT_SYSRQ #endif #include <linux/bug.h> #include <linux/console.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/ioport.h> #include <linux/irqflags.h> #include <linux/kernel.h> #include <linux/major.h> #include <linux/serial.h> #include <linux/serial_core.h> #include <linux/spinlock.h> #include <linux/sysrq.h> #include <linux/tty.h> #include <linux/types.h> #include <linux/atomic.h> #include <asm/system.h> #include <asm/dec/interrupts.h> #include <asm/dec/ioasic_addrs.h> #include <asm/dec/system.h> #include "zs.h" MODULE_AUTHOR("Maciej W. Rozycki <macro@linux-mips.org>"); MODULE_DESCRIPTION("DECstation Z85C30 serial driver"); MODULE_LICENSE("GPL"); static char zs_name[] __initdata = "DECstation Z85C30 serial driver version "; static char zs_version[] __initdata = "0.10"; /* * It would be nice to dynamically allocate everything that * depends on ZS_NUM_SCCS, so we could support any number of * Z85C30s, but for now... */ #define ZS_NUM_SCCS 2 /* Max # of ZS chips supported. */ #define ZS_NUM_CHAN 2 /* 2 channels per chip. */ #define ZS_CHAN_A 0 /* Index of the channel A. */ #define ZS_CHAN_B 1 /* Index of the channel B. */ #define ZS_CHAN_IO_SIZE 8 /* IOMEM space size. */ #define ZS_CHAN_IO_STRIDE 4 /* Register alignment. */ #define ZS_CHAN_IO_OFFSET 1 /* The SCC resides on the high byte of the 16-bit IOBUS. */ #define ZS_CLOCK 7372800 /* Z85C30 PCLK input clock rate. */ #define to_zport(uport) container_of(uport, struct zs_port, port) struct zs_parms { resource_size_t scc[ZS_NUM_SCCS]; int irq[ZS_NUM_SCCS]; }; static struct zs_scc zs_sccs[ZS_NUM_SCCS]; static u8 zs_init_regs[ZS_NUM_REGS] __initdata = { 0, /* write 0 */ PAR_SPEC, /* write 1 */ 0, /* write 2 */ 0, /* write 3 */ X16CLK | SB1, /* write 4 */ 0, /* write 5 */ 0, 0, 0, /* write 6, 7, 8 */ MIE | DLC | NV, /* write 9 */ NRZ, /* write 10 */ TCBR | RCBR, /* write 11 */ 0, 0, /* BRG time constant, write 12 + 13 */ BRSRC | BRENABL, /* write 14 */ 0, /* write 15 */ }; /* * Debugging. */ #undef ZS_DEBUG_REGS /* * Reading and writing Z85C30 registers. */ static void recovery_delay(void) { udelay(2); } static u8 read_zsreg(struct zs_port *zport, int reg) { void __iomem *control = zport->port.membase + ZS_CHAN_IO_OFFSET; u8 retval; if (reg != 0) { writeb(reg & 0xf, control); fast_iob(); recovery_delay(); } retval = readb(control); recovery_delay(); return retval; } static void write_zsreg(struct zs_port *zport, int reg, u8 value) { void __iomem *control = zport->port.membase + ZS_CHAN_IO_OFFSET; if (reg != 0) { writeb(reg & 0xf, control); fast_iob(); recovery_delay(); } writeb(value, control); fast_iob(); recovery_delay(); return; } static u8 read_zsdata(struct zs_port *zport) { void __iomem *data = zport->port.membase + ZS_CHAN_IO_STRIDE + ZS_CHAN_IO_OFFSET; u8 retval; retval = readb(data); recovery_delay(); return retval; } static void write_zsdata(struct zs_port *zport, u8 value) { void __iomem *data = zport->port.membase + ZS_CHAN_IO_STRIDE + ZS_CHAN_IO_OFFSET; writeb(value, data); fast_iob(); recovery_delay(); return; } #ifdef ZS_DEBUG_REGS void zs_dump(void) { struct zs_port *zport; int i, j; for (i = 0; i < ZS_NUM_SCCS * ZS_NUM_CHAN; i++) { zport = &zs_sccs[i / ZS_NUM_CHAN].zport[i % ZS_NUM_CHAN]; if (!zport->scc) continue; for (j = 0; j < 16; j++) printk("W%-2d = 0x%02x\t", j, zport->regs[j]); printk("\n"); for (j = 0; j < 16; j++) printk("R%-2d = 0x%02x\t", j, read_zsreg(zport, j)); printk("\n\n"); } } #endif static void zs_spin_lock_cond_irq(spinlock_t *lock, int irq) { if (irq) spin_lock_irq(lock); else spin_lock(lock); } static void zs_spin_unlock_cond_irq(spinlock_t *lock, int irq) { if (irq) spin_unlock_irq(lock); else spin_unlock(lock); } static int zs_receive_drain(struct zs_port *zport) { int loops = 10000; while ((read_zsreg(zport, R0) & Rx_CH_AV) && --loops) read_zsdata(zport); return loops; } static int zs_transmit_drain(struct zs_port *zport, int irq) { struct zs_scc *scc = zport->scc; int loops = 10000; while (!(read_zsreg(zport, R0) & Tx_BUF_EMP) && --loops) { zs_spin_unlock_cond_irq(&scc->zlock, irq); udelay(2); zs_spin_lock_cond_irq(&scc->zlock, irq); } return loops; } static int zs_line_drain(struct zs_port *zport, int irq) { struct zs_scc *scc = zport->scc; int loops = 10000; while (!(read_zsreg(zport, R1) & ALL_SNT) && --loops) { zs_spin_unlock_cond_irq(&scc->zlock, irq); udelay(2); zs_spin_lock_cond_irq(&scc->zlock, irq); } return loops; } static void load_zsregs(struct zs_port *zport, u8 *regs, int irq) { /* Let the current transmission finish. */ zs_line_drain(zport, irq); /* Load 'em up. */ write_zsreg(zport, R3, regs[3] & ~RxENABLE); write_zsreg(zport, R5, regs[5] & ~TxENAB); write_zsreg(zport, R4, regs[4]); write_zsreg(zport, R9, regs[9]); write_zsreg(zport, R1, regs[1]); write_zsreg(zport, R2, regs[2]); write_zsreg(zport, R10, regs[10]); write_zsreg(zport, R14, regs[14] & ~BRENABL); write_zsreg(zport, R11, regs[11]); write_zsreg(zport, R12, regs[12]); write_zsreg(zport, R13, regs[13]); write_zsreg(zport, R14, regs[14]); write_zsreg(zport, R15, regs[15]); if (regs[3] & RxENABLE) write_zsreg(zport, R3, regs[3]); if (regs[5] & TxENAB) write_zsreg(zport, R5, regs[5]); return; } /* * Status handling routines. */ /* * zs_tx_empty() -- get the transmitter empty status * * Purpose: Let user call ioctl() to get info when the UART physically * is emptied. On bus types like RS485, the transmitter must * release the bus after transmitting. This must be done when * the transmit shift register is empty, not be done when the * transmit holding register is empty. This functionality * allows an RS485 driver to be written in user space. */ static unsigned int zs_tx_empty(struct uart_port *uport) { struct zs_port *zport = to_zport(uport); struct zs_scc *scc = zport->scc; unsigned long flags; u8 status; spin_lock_irqsave(&scc->zlock, flags); status = read_zsreg(zport, R1); spin_unlock_irqrestore(&scc->zlock, flags); return status & ALL_SNT ? TIOCSER_TEMT : 0; } static unsigned int zs_raw_get_ab_mctrl(struct zs_port *zport_a, struct zs_port *zport_b) { u8 status_a, status_b; unsigned int mctrl; status_a = read_zsreg(zport_a, R0); status_b = read_zsreg(zport_b, R0); mctrl = ((status_b & CTS) ? TIOCM_CTS : 0) | ((status_b & DCD) ? TIOCM_CAR : 0) | ((status_a & DCD) ? TIOCM_RNG : 0) | ((status_a & SYNC_HUNT) ? TIOCM_DSR : 0); return mctrl; } static unsigned int zs_raw_get_mctrl(struct zs_port *zport) { struct zs_port *zport_a = &zport->scc->zport[ZS_CHAN_A]; return zport != zport_a ? zs_raw_get_ab_mctrl(zport_a, zport) : 0; } static unsigned int zs_raw_xor_mctrl(struct zs_port *zport) { struct zs_port *zport_a = &zport->scc->zport[ZS_CHAN_A]; unsigned int mmask, mctrl, delta; u8 mask_a, mask_b; if (zport == zport_a) return 0; mask_a = zport_a->regs[15]; mask_b = zport->regs[15]; mmask = ((mask_b & CTSIE) ? TIOCM_CTS : 0) | ((mask_b & DCDIE) ? TIOCM_CAR : 0) | ((mask_a & DCDIE) ? TIOCM_RNG : 0) | ((mask_a & SYNCIE) ? TIOCM_DSR : 0); mctrl = zport->mctrl; if (mmask) { mctrl &= ~mmask; mctrl |= zs_raw_get_ab_mctrl(zport_a, zport) & mmask; } delta = mctrl ^ zport->mctrl; if (delta) zport->mctrl = mctrl; return delta; } static unsigned int zs_get_mctrl(struct uart_port *uport) { struct zs_port *zport = to_zport(uport); struct zs_scc *scc = zport->scc; unsigned int mctrl; spin_lock(&scc->zlock); mctrl = zs_raw_get_mctrl(zport); spin_unlock(&scc->zlock); return mctrl; } static void zs_set_mctrl(struct uart_port *uport, unsigned int mctrl) { struct zs_port *zport = to_zport(uport); struct zs_scc *scc = zport->scc; struct zs_port *zport_a = &scc->zport[ZS_CHAN_A]; u8 oldloop, newloop; spin_lock(&scc->zlock); if (zport != zport_a) { if (mctrl & TIOCM_DTR) zport_a->regs[5] |= DTR; else zport_a->regs[5] &= ~DTR; if (mctrl & TIOCM_RTS) zport_a->regs[5] |= RTS; else zport_a->regs[5] &= ~RTS; write_zsreg(zport_a, R5, zport_a->regs[5]); } /* Rarely modified, so don't poke at hardware unless necessary. */ oldloop = zport->regs[14]; newloop = oldloop; if (mctrl & TIOCM_LOOP) newloop |= LOOPBAK; else newloop &= ~LOOPBAK; if (newloop != oldloop) { zport->regs[14] = newloop; write_zsreg(zport, R14, zport->regs[14]); } spin_unlock(&scc->zlock); } static void zs_raw_stop_tx(struct zs_port *zport) { write_zsreg(zport, R0, RES_Tx_P); zport->tx_stopped = 1; } static void zs_stop_tx(struct uart_port *uport) { struct zs_port *zport = to_zport(uport); struct zs_scc *scc = zport->scc; spin_lock(&scc->zlock); zs_raw_stop_tx(zport); spin_unlock(&scc->zlock); } static void zs_raw_transmit_chars(struct zs_port *); static void zs_start_tx(struct uart_port *uport) { struct zs_port *zport = to_zport(uport); struct zs_scc *scc = zport->scc; spin_lock(&scc->zlock); if (zport->tx_stopped) { zs_transmit_drain(zport, 0); zport->tx_stopped = 0; zs_raw_transmit_chars(zport); } spin_unlock(&scc->zlock); } static void zs_stop_rx(struct uart_port *uport) { struct zs_port *zport = to_zport(uport); struct zs_scc *scc = zport->scc; struct zs_port *zport_a = &scc->zport[ZS_CHAN_A]; spin_lock(&scc->zlock); zport->regs[15] &= ~BRKIE; zport->regs[1] &= ~(RxINT_MASK | TxINT_ENAB); zport->regs[1] |= RxINT_DISAB; if (zport != zport_a) { /* A-side DCD tracks RI and SYNC tracks DSR. */ zport_a->regs[15] &= ~(DCDIE | SYNCIE); write_zsreg(zport_a, R15, zport_a->regs[15]); if (!(zport_a->regs[15] & BRKIE)) { zport_a->regs[1] &= ~EXT_INT_ENAB; write_zsreg(zport_a, R1, zport_a->regs[1]); } /* This-side DCD tracks DCD and CTS tracks CTS. */ zport->regs[15] &= ~(DCDIE | CTSIE); zport->regs[1] &= ~EXT_INT_ENAB; } else { /* DCD tracks RI and SYNC tracks DSR for the B side. */ if (!(zport->regs[15] & (DCDIE | SYNCIE))) zport->regs[1] &= ~EXT_INT_ENAB; } write_zsreg(zport, R15, zport->regs[15]); write_zsreg(zport, R1, zport->regs[1]); spin_unlock(&scc->zlock); } static void zs_enable_ms(struct uart_port *uport) { struct zs_port *zport = to_zport(uport); struct zs_scc *scc = zport->scc; struct zs_port *zport_a = &scc->zport[ZS_CHAN_A]; if (zport == zport_a) return; spin_lock(&scc->zlock); /* Clear Ext interrupts if not being handled already. */ if (!(zport_a->regs[1] & EXT_INT_ENAB)) write_zsreg(zport_a, R0, RES_EXT_INT); /* A-side DCD tracks RI and SYNC tracks DSR. */ zport_a->regs[1] |= EXT_INT_ENAB; zport_a->regs[15] |= DCDIE | SYNCIE; /* This-side DCD tracks DCD and CTS tracks CTS. */ zport->regs[15] |= DCDIE | CTSIE; zs_raw_xor_mctrl(zport); write_zsreg(zport_a, R1, zport_a->regs[1]); write_zsreg(zport_a, R15, zport_a->regs[15]); write_zsreg(zport, R15, zport->regs[15]); spin_unlock(&scc->zlock); } static void zs_break_ctl(struct uart_port *uport, int break_state) { struct zs_port *zport = to_zport(uport); struct zs_scc *scc = zport->scc; unsigned long flags; spin_lock_irqsave(&scc->zlock, flags); if (break_state == -1) zport->regs[5] |= SND_BRK; else zport->regs[5] &= ~SND_BRK; write_zsreg(zport, R5, zport->regs[5]); spin_unlock_irqrestore(&scc->zlock, flags); } /* * Interrupt handling routines. */ #define Rx_BRK 0x0100 /* BREAK event software flag. */ #define Rx_SYS 0x0200 /* SysRq event software flag. */ static void zs_receive_chars(struct zs_port *zport) { struct uart_port *uport = &zport->port; struct zs_scc *scc = zport->scc; struct uart_icount *icount; unsigned int avail, status, ch, flag; int count; for (count = 16; count; count--) { spin_lock(&scc->zlock); avail = read_zsreg(zport, R0) & Rx_CH_AV; spin_unlock(&scc->zlock); if (!avail) break; spin_lock(&scc->zlock); status = read_zsreg(zport, R1) & (Rx_OVR | FRM_ERR | PAR_ERR); ch = read_zsdata(zport); spin_unlock(&scc->zlock); flag = TTY_NORMAL; icount = &uport->icount; icount->rx++; /* Handle the null char got when BREAK is removed. */ if (!ch) status |= zport->tty_break; if (unlikely(status & (Rx_OVR | FRM_ERR | PAR_ERR | Rx_SYS | Rx_BRK))) { zport->tty_break = 0; /* Reset the error indication. */ if (status & (Rx_OVR | FRM_ERR | PAR_ERR)) { spin_lock(&scc->zlock); write_zsreg(zport, R0, ERR_RES); spin_unlock(&scc->zlock); } if (status & (Rx_SYS | Rx_BRK)) { icount->brk++; /* SysRq discards the null char. */ if (status & Rx_SYS) continue; } else if (status & FRM_ERR) icount->frame++; else if (status & PAR_ERR) icount->parity++; if (status & Rx_OVR) icount->overrun++; status &= uport->read_status_mask; if (status & Rx_BRK) flag = TTY_BREAK; else if (status & FRM_ERR) flag = TTY_FRAME; else if (status & PAR_ERR) flag = TTY_PARITY; } if (uart_handle_sysrq_char(uport, ch)) continue; uart_insert_char(uport, status, Rx_OVR, ch, flag); } tty_flip_buffer_push(uport->state->port.tty); } static void zs_raw_transmit_chars(struct zs_port *zport) { struct circ_buf *xmit = &zport->port.state->xmit; /* XON/XOFF chars. */ if (zport->port.x_char) { write_zsdata(zport, zport->port.x_char); zport->port.icount.tx++; zport->port.x_char = 0; return; } /* If nothing to do or stopped or hardware stopped. */ if (uart_circ_empty(xmit) || uart_tx_stopped(&zport->port)) { zs_raw_stop_tx(zport); return; } /* Send char. */ write_zsdata(zport, xmit->buf[xmit->tail]); xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); zport->port.icount.tx++; if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(&zport->port); /* Are we are done? */ if (uart_circ_empty(xmit)) zs_raw_stop_tx(zport); } static void zs_transmit_chars(struct zs_port *zport) { struct zs_scc *scc = zport->scc; spin_lock(&scc->zlock); zs_raw_transmit_chars(zport); spin_unlock(&scc->zlock); } static void zs_status_handle(struct zs_port *zport, struct zs_port *zport_a) { struct uart_port *uport = &zport->port; struct zs_scc *scc = zport->scc; unsigned int delta; u8 status, brk; spin_lock(&scc->zlock); /* Get status from Read Register 0. */ status = read_zsreg(zport, R0); if (zport->regs[15] & BRKIE) { brk = status & BRK_ABRT; if (brk && !zport->brk) { spin_unlock(&scc->zlock); if (uart_handle_break(uport)) zport->tty_break = Rx_SYS; else zport->tty_break = Rx_BRK; spin_lock(&scc->zlock); } zport->brk = brk; } if (zport != zport_a) { delta = zs_raw_xor_mctrl(zport); spin_unlock(&scc->zlock); if (delta & TIOCM_CTS) uart_handle_cts_change(uport, zport->mctrl & TIOCM_CTS); if (delta & TIOCM_CAR) uart_handle_dcd_change(uport, zport->mctrl & TIOCM_CAR); if (delta & TIOCM_RNG) uport->icount.dsr++; if (delta & TIOCM_DSR) uport->icount.rng++; if (delta) wake_up_interruptible(&uport->state->port.delta_msr_wait); spin_lock(&scc->zlock); } /* Clear the status condition... */ write_zsreg(zport, R0, RES_EXT_INT); spin_unlock(&scc->zlock); } /* * This is the Z85C30 driver's generic interrupt routine. */ static irqreturn_t zs_interrupt(int irq, void *dev_id) { struct zs_scc *scc = dev_id; struct zs_port *zport_a = &scc->zport[ZS_CHAN_A]; struct zs_port *zport_b = &scc->zport[ZS_CHAN_B]; irqreturn_t status = IRQ_NONE; u8 zs_intreg; int count; /* * NOTE: The read register 3, which holds the irq status, * does so for both channels on each chip. Although * the status value itself must be read from the A * channel and is only valid when read from channel A. * Yes... broken hardware... */ for (count = 16; count; count--) { spin_lock(&scc->zlock); zs_intreg = read_zsreg(zport_a, R3); spin_unlock(&scc->zlock); if (!zs_intreg) break; /* * We do not like losing characters, so we prioritise * interrupt sources a little bit differently than * the SCC would, was it allowed to. */ if (zs_intreg & CHBRxIP) zs_receive_chars(zport_b); if (zs_intreg & CHARxIP) zs_receive_chars(zport_a); if (zs_intreg & CHBEXT) zs_status_handle(zport_b, zport_a); if (zs_intreg & CHAEXT) zs_status_handle(zport_a, zport_a); if (zs_intreg & CHBTxIP) zs_transmit_chars(zport_b); if (zs_intreg & CHATxIP) zs_transmit_chars(zport_a); status = IRQ_HANDLED; } return status; } /* * Finally, routines used to initialize the serial port. */ static int zs_startup(struct uart_port *uport) { struct zs_port *zport = to_zport(uport); struct zs_scc *scc = zport->scc; unsigned long flags; int irq_guard; int ret; irq_guard = atomic_add_return(1, &scc->irq_guard); if (irq_guard == 1) { ret = request_irq(zport->port.irq, zs_interrupt, IRQF_SHARED, "scc", scc); if (ret) { atomic_add(-1, &scc->irq_guard); printk(KERN_ERR "zs: can't get irq %d\n", zport->port.irq); return ret; } } spin_lock_irqsave(&scc->zlock, flags); /* Clear the receive FIFO. */ zs_receive_drain(zport); /* Clear the interrupt registers. */ write_zsreg(zport, R0, ERR_RES); write_zsreg(zport, R0, RES_Tx_P); /* But Ext only if not being handled already. */ if (!(zport->regs[1] & EXT_INT_ENAB)) write_zsreg(zport, R0, RES_EXT_INT); /* Finally, enable sequencing and interrupts. */ zport->regs[1] &= ~RxINT_MASK; zport->regs[1] |= RxINT_ALL | TxINT_ENAB | EXT_INT_ENAB; zport->regs[3] |= RxENABLE; zport->regs[15] |= BRKIE; write_zsreg(zport, R1, zport->regs[1]); write_zsreg(zport, R3, zport->regs[3]); write_zsreg(zport, R5, zport->regs[5]); write_zsreg(zport, R15, zport->regs[15]); /* Record the current state of RR0. */ zport->mctrl = zs_raw_get_mctrl(zport); zport->brk = read_zsreg(zport, R0) & BRK_ABRT; zport->tx_stopped = 1; spin_unlock_irqrestore(&scc->zlock, flags); return 0; } static void zs_shutdown(struct uart_port *uport) { struct zs_port *zport = to_zport(uport); struct zs_scc *scc = zport->scc; unsigned long flags; int irq_guard; spin_lock_irqsave(&scc->zlock, flags); zport->regs[3] &= ~RxENABLE; write_zsreg(zport, R5, zport->regs[5]); write_zsreg(zport, R3, zport->regs[3]); spin_unlock_irqrestore(&scc->zlock, flags); irq_guard = atomic_add_return(-1, &scc->irq_guard); if (!irq_guard) free_irq(zport->port.irq, scc); } static void zs_reset(struct zs_port *zport) { struct zs_scc *scc = zport->scc; int irq; unsigned long flags; spin_lock_irqsave(&scc->zlock, flags); irq = !irqs_disabled_flags(flags); if (!scc->initialised) { /* Reset the pointer first, just in case... */ read_zsreg(zport, R0); /* And let the current transmission finish. */ zs_line_drain(zport, irq); write_zsreg(zport, R9, FHWRES); udelay(10); write_zsreg(zport, R9, 0); scc->initialised = 1; } load_zsregs(zport, zport->regs, irq); spin_unlock_irqrestore(&scc->zlock, flags); } static void zs_set_termios(struct uart_port *uport, struct ktermios *termios, struct ktermios *old_termios) { struct zs_port *zport = to_zport(uport); struct zs_scc *scc = zport->scc; struct zs_port *zport_a = &scc->zport[ZS_CHAN_A]; int irq; unsigned int baud, brg; unsigned long flags; spin_lock_irqsave(&scc->zlock, flags); irq = !irqs_disabled_flags(flags); /* Byte size. */ zport->regs[3] &= ~RxNBITS_MASK; zport->regs[5] &= ~TxNBITS_MASK; switch (termios->c_cflag & CSIZE) { case CS5: zport->regs[3] |= Rx5; zport->regs[5] |= Tx5; break; case CS6: zport->regs[3] |= Rx6; zport->regs[5] |= Tx6; break; case CS7: zport->regs[3] |= Rx7; zport->regs[5] |= Tx7; break; case CS8: default: zport->regs[3] |= Rx8; zport->regs[5] |= Tx8; break; } /* Parity and stop bits. */ zport->regs[4] &= ~(XCLK_MASK | SB_MASK | PAR_ENA | PAR_EVEN); if (termios->c_cflag & CSTOPB) zport->regs[4] |= SB2; else zport->regs[4] |= SB1; if (termios->c_cflag & PARENB) zport->regs[4] |= PAR_ENA; if (!(termios->c_cflag & PARODD)) zport->regs[4] |= PAR_EVEN; switch (zport->clk_mode) { case 64: zport->regs[4] |= X64CLK; break; case 32: zport->regs[4] |= X32CLK; break; case 16: zport->regs[4] |= X16CLK; break; case 1: zport->regs[4] |= X1CLK; break; default: BUG(); } baud = uart_get_baud_rate(uport, termios, old_termios, 0, uport->uartclk / zport->clk_mode / 4); brg = ZS_BPS_TO_BRG(baud, uport->uartclk / zport->clk_mode); zport->regs[12] = brg & 0xff; zport->regs[13] = (brg >> 8) & 0xff; uart_update_timeout(uport, termios->c_cflag, baud); uport->read_status_mask = Rx_OVR; if (termios->c_iflag & INPCK) uport->read_status_mask |= FRM_ERR | PAR_ERR; if (termios->c_iflag & (BRKINT | PARMRK)) uport->read_status_mask |= Rx_BRK; uport->ignore_status_mask = 0; if (termios->c_iflag & IGNPAR) uport->ignore_status_mask |= FRM_ERR | PAR_ERR; if (termios->c_iflag & IGNBRK) { uport->ignore_status_mask |= Rx_BRK; if (termios->c_iflag & IGNPAR) uport->ignore_status_mask |= Rx_OVR; } if (termios->c_cflag & CREAD) zport->regs[3] |= RxENABLE; else zport->regs[3] &= ~RxENABLE; if (zport != zport_a) { if (!(termios->c_cflag & CLOCAL)) { zport->regs[15] |= DCDIE; } else zport->regs[15] &= ~DCDIE; if (termios->c_cflag & CRTSCTS) { zport->regs[15] |= CTSIE; } else zport->regs[15] &= ~CTSIE; zs_raw_xor_mctrl(zport); } /* Load up the new values. */ load_zsregs(zport, zport->regs, irq); spin_unlock_irqrestore(&scc->zlock, flags); } /* * Hack alert! * Required solely so that the initial PROM-based console * works undisturbed in parallel with this one. */ static void zs_pm(struct uart_port *uport, unsigned int state, unsigned int oldstate) { struct zs_port *zport = to_zport(uport); if (state < 3) zport->regs[5] |= TxENAB; else zport->regs[5] &= ~TxENAB; write_zsreg(zport, R5, zport->regs[5]); } static const char *zs_type(struct uart_port *uport) { return "Z85C30 SCC"; } static void zs_release_port(struct uart_port *uport) { iounmap(uport->membase); uport->membase = 0; release_mem_region(uport->mapbase, ZS_CHAN_IO_SIZE); } static int zs_map_port(struct uart_port *uport) { if (!uport->membase) uport->membase = ioremap_nocache(uport->mapbase, ZS_CHAN_IO_SIZE); if (!uport->membase) { printk(KERN_ERR "zs: Cannot map MMIO\n"); return -ENOMEM; } return 0; } static int zs_request_port(struct uart_port *uport) { int ret; if (!request_mem_region(uport->mapbase, ZS_CHAN_IO_SIZE, "scc")) { printk(KERN_ERR "zs: Unable to reserve MMIO resource\n"); return -EBUSY; } ret = zs_map_port(uport); if (ret) { release_mem_region(uport->mapbase, ZS_CHAN_IO_SIZE); return ret; } return 0; } static void zs_config_port(struct uart_port *uport, int flags) { struct zs_port *zport = to_zport(uport); if (flags & UART_CONFIG_TYPE) { if (zs_request_port(uport)) return; uport->type = PORT_ZS; zs_reset(zport); } } static int zs_verify_port(struct uart_port *uport, struct serial_struct *ser) { struct zs_port *zport = to_zport(uport); int ret = 0; if (ser->type != PORT_UNKNOWN && ser->type != PORT_ZS) ret = -EINVAL; if (ser->irq != uport->irq) ret = -EINVAL; if (ser->baud_base != uport->uartclk / zport->clk_mode / 4) ret = -EINVAL; return ret; } static struct uart_ops zs_ops = { .tx_empty = zs_tx_empty, .set_mctrl = zs_set_mctrl, .get_mctrl = zs_get_mctrl, .stop_tx = zs_stop_tx, .start_tx = zs_start_tx, .stop_rx = zs_stop_rx, .enable_ms = zs_enable_ms, .break_ctl = zs_break_ctl, .startup = zs_startup, .shutdown = zs_shutdown, .set_termios = zs_set_termios, .pm = zs_pm, .type = zs_type, .release_port = zs_release_port, .request_port = zs_request_port, .config_port = zs_config_port, .verify_port = zs_verify_port, }; /* * Initialize Z85C30 port structures. */ static int __init zs_probe_sccs(void) { static int probed; struct zs_parms zs_parms; int chip, side, irq; int n_chips = 0; int i; if (probed) return 0; irq = dec_interrupt[DEC_IRQ_SCC0]; if (irq >= 0) { zs_parms.scc[n_chips] = IOASIC_SCC0; zs_parms.irq[n_chips] = dec_interrupt[DEC_IRQ_SCC0]; n_chips++; } irq = dec_interrupt[DEC_IRQ_SCC1]; if (irq >= 0) { zs_parms.scc[n_chips] = IOASIC_SCC1; zs_parms.irq[n_chips] = dec_interrupt[DEC_IRQ_SCC1]; n_chips++; } if (!n_chips) return -ENXIO; probed = 1; for (chip = 0; chip < n_chips; chip++) { spin_lock_init(&zs_sccs[chip].zlock); for (side = 0; side < ZS_NUM_CHAN; side++) { struct zs_port *zport = &zs_sccs[chip].zport[side]; struct uart_port *uport = &zport->port; zport->scc = &zs_sccs[chip]; zport->clk_mode = 16; uport->irq = zs_parms.irq[chip]; uport->uartclk = ZS_CLOCK; uport->fifosize = 1; uport->iotype = UPIO_MEM; uport->flags = UPF_BOOT_AUTOCONF; uport->ops = &zs_ops; uport->line = chip * ZS_NUM_CHAN + side; uport->mapbase = dec_kn_slot_base + zs_parms.scc[chip] + (side ^ ZS_CHAN_B) * ZS_CHAN_IO_SIZE; for (i = 0; i < ZS_NUM_REGS; i++) zport->regs[i] = zs_init_regs[i]; } } return 0; } #ifdef CONFIG_SERIAL_ZS_CONSOLE static void zs_console_putchar(struct uart_port *uport, int ch) { struct zs_port *zport = to_zport(uport); struct zs_scc *scc = zport->scc; int irq; unsigned long flags; spin_lock_irqsave(&scc->zlock, flags); irq = !irqs_disabled_flags(flags); if (zs_transmit_drain(zport, irq)) write_zsdata(zport, ch); spin_unlock_irqrestore(&scc->zlock, flags); } /* * Print a string to the serial port trying not to disturb * any possible real use of the port... */ static void zs_console_write(struct console *co, const char *s, unsigned int count) { int chip = co->index / ZS_NUM_CHAN, side = co->index % ZS_NUM_CHAN; struct zs_port *zport = &zs_sccs[chip].zport[side]; struct zs_scc *scc = zport->scc; unsigned long flags; u8 txint, txenb; int irq; /* Disable transmit interrupts and enable the transmitter. */ spin_lock_irqsave(&scc->zlock, flags); txint = zport->regs[1]; txenb = zport->regs[5]; if (txint & TxINT_ENAB) { zport->regs[1] = txint & ~TxINT_ENAB; write_zsreg(zport, R1, zport->regs[1]); } if (!(txenb & TxENAB)) { zport->regs[5] = txenb | TxENAB; write_zsreg(zport, R5, zport->regs[5]); } spin_unlock_irqrestore(&scc->zlock, flags); uart_console_write(&zport->port, s, count, zs_console_putchar); /* Restore transmit interrupts and the transmitter enable. */ spin_lock_irqsave(&scc->zlock, flags); irq = !irqs_disabled_flags(flags); zs_line_drain(zport, irq); if (!(txenb & TxENAB)) { zport->regs[5] &= ~TxENAB; write_zsreg(zport, R5, zport->regs[5]); } if (txint & TxINT_ENAB) { zport->regs[1] |= TxINT_ENAB; write_zsreg(zport, R1, zport->regs[1]); } spin_unlock_irqrestore(&scc->zlock, flags); } /* * Setup serial console baud/bits/parity. We do two things here: * - construct a cflag setting for the first uart_open() * - initialise the serial port * Return non-zero if we didn't find a serial port. */ static int __init zs_console_setup(struct console *co, char *options) { int chip = co->index / ZS_NUM_CHAN, side = co->index % ZS_NUM_CHAN; struct zs_port *zport = &zs_sccs[chip].zport[side]; struct uart_port *uport = &zport->port; int baud = 9600; int bits = 8; int parity = 'n'; int flow = 'n'; int ret; ret = zs_map_port(uport); if (ret) return ret; zs_reset(zport); zs_pm(uport, 0, -1); if (options) uart_parse_options(options, &baud, &parity, &bits, &flow); return uart_set_options(uport, co, baud, parity, bits, flow); } static struct uart_driver zs_reg; static struct console zs_console = { .name = "ttyS", .write = zs_console_write, .device = uart_console_device, .setup = zs_console_setup, .flags = CON_PRINTBUFFER, .index = -1, .data = &zs_reg, }; /* * Register console. */ static int __init zs_serial_console_init(void) { int ret; ret = zs_probe_sccs(); if (ret) return ret; register_console(&zs_console); return 0; } console_initcall(zs_serial_console_init); #define SERIAL_ZS_CONSOLE &zs_console #else #define SERIAL_ZS_CONSOLE NULL #endif /* CONFIG_SERIAL_ZS_CONSOLE */ static struct uart_driver zs_reg = { .owner = THIS_MODULE, .driver_name = "serial", .dev_name = "ttyS", .major = TTY_MAJOR, .minor = 64, .nr = ZS_NUM_SCCS * ZS_NUM_CHAN, .cons = SERIAL_ZS_CONSOLE, }; /* zs_init inits the driver. */ static int __init zs_init(void) { int i, ret; pr_info("%s%s\n", zs_name, zs_version); /* Find out how many Z85C30 SCCs we have. */ ret = zs_probe_sccs(); if (ret) return ret; ret = uart_register_driver(&zs_reg); if (ret) return ret; for (i = 0; i < ZS_NUM_SCCS * ZS_NUM_CHAN; i++) { struct zs_scc *scc = &zs_sccs[i / ZS_NUM_CHAN]; struct zs_port *zport = &scc->zport[i % ZS_NUM_CHAN]; struct uart_port *uport = &zport->port; if (zport->scc) uart_add_one_port(&zs_reg, uport); } return 0; } static void __exit zs_exit(void) { int i; for (i = ZS_NUM_SCCS * ZS_NUM_CHAN - 1; i >= 0; i--) { struct zs_scc *scc = &zs_sccs[i / ZS_NUM_CHAN]; struct zs_port *zport = &scc->zport[i % ZS_NUM_CHAN]; struct uart_port *uport = &zport->port; if (zport->scc) uart_remove_one_port(&zs_reg, uport); } uart_unregister_driver(&zs_reg); } module_init(zs_init); module_exit(zs_exit);
gpl-2.0
Klagopsalmer/linux
drivers/staging/rtl8188eu/core/rtw_xmit.c
402
58046
/****************************************************************************** * * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * ******************************************************************************/ #define _RTW_XMIT_C_ #include <osdep_service.h> #include <drv_types.h> #include <wifi.h> #include <osdep_intf.h> #include <linux/vmalloc.h> static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 }; static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 }; static void _init_txservq(struct tx_servq *ptxservq) { INIT_LIST_HEAD(&ptxservq->tx_pending); _rtw_init_queue(&ptxservq->sta_pending); ptxservq->qcnt = 0; } void _rtw_init_sta_xmit_priv(struct sta_xmit_priv *psta_xmitpriv) { memset((unsigned char *)psta_xmitpriv, 0, sizeof(struct sta_xmit_priv)); spin_lock_init(&psta_xmitpriv->lock); _init_txservq(&psta_xmitpriv->be_q); _init_txservq(&psta_xmitpriv->bk_q); _init_txservq(&psta_xmitpriv->vi_q); _init_txservq(&psta_xmitpriv->vo_q); INIT_LIST_HEAD(&psta_xmitpriv->legacy_dz); INIT_LIST_HEAD(&psta_xmitpriv->apsd); } s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter) { int i; struct xmit_buf *pxmitbuf; struct xmit_frame *pxframe; int res = _SUCCESS; u32 max_xmit_extbuf_size = MAX_XMIT_EXTBUF_SZ; u32 num_xmit_extbuf = NR_XMIT_EXTBUFF; /* We don't need to memset padapter->XXX to zero, because adapter is allocated by vzalloc(). */ spin_lock_init(&pxmitpriv->lock); sema_init(&pxmitpriv->xmit_sema, 0); sema_init(&pxmitpriv->terminate_xmitthread_sema, 0); /* Please insert all the queue initializaiton using _rtw_init_queue below */ pxmitpriv->adapter = padapter; _rtw_init_queue(&pxmitpriv->be_pending); _rtw_init_queue(&pxmitpriv->bk_pending); _rtw_init_queue(&pxmitpriv->vi_pending); _rtw_init_queue(&pxmitpriv->vo_pending); _rtw_init_queue(&pxmitpriv->bm_pending); _rtw_init_queue(&pxmitpriv->free_xmit_queue); /* Please allocate memory with the sz = (struct xmit_frame) * NR_XMITFRAME, and initialize free_xmit_frame below. Please also apply free_txobj to link_up all the xmit_frames... */ pxmitpriv->pallocated_frame_buf = vzalloc(NR_XMITFRAME * sizeof(struct xmit_frame) + 4); if (pxmitpriv->pallocated_frame_buf == NULL) { pxmitpriv->pxmit_frame_buf = NULL; RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("alloc xmit_frame fail!\n")); res = _FAIL; goto exit; } pxmitpriv->pxmit_frame_buf = (u8 *)N_BYTE_ALIGMENT((size_t)(pxmitpriv->pallocated_frame_buf), 4); /* pxmitpriv->pxmit_frame_buf = pxmitpriv->pallocated_frame_buf + 4 - */ /* ((size_t) (pxmitpriv->pallocated_frame_buf) &3); */ pxframe = (struct xmit_frame *)pxmitpriv->pxmit_frame_buf; for (i = 0; i < NR_XMITFRAME; i++) { INIT_LIST_HEAD(&(pxframe->list)); pxframe->padapter = padapter; pxframe->frame_tag = NULL_FRAMETAG; pxframe->pkt = NULL; pxframe->buf_addr = NULL; pxframe->pxmitbuf = NULL; list_add_tail(&(pxframe->list), &(pxmitpriv->free_xmit_queue.queue)); pxframe++; } pxmitpriv->free_xmitframe_cnt = NR_XMITFRAME; pxmitpriv->frag_len = MAX_FRAG_THRESHOLD; /* init xmit_buf */ _rtw_init_queue(&pxmitpriv->free_xmitbuf_queue); _rtw_init_queue(&pxmitpriv->pending_xmitbuf_queue); pxmitpriv->pallocated_xmitbuf = vzalloc(NR_XMITBUFF * sizeof(struct xmit_buf) + 4); if (pxmitpriv->pallocated_xmitbuf == NULL) { RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("alloc xmit_buf fail!\n")); res = _FAIL; goto exit; } pxmitpriv->pxmitbuf = (u8 *)N_BYTE_ALIGMENT((size_t)(pxmitpriv->pallocated_xmitbuf), 4); /* pxmitpriv->pxmitbuf = pxmitpriv->pallocated_xmitbuf + 4 - */ /* ((size_t) (pxmitpriv->pallocated_xmitbuf) &3); */ pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmitbuf; for (i = 0; i < NR_XMITBUFF; i++) { INIT_LIST_HEAD(&pxmitbuf->list); pxmitbuf->priv_data = NULL; pxmitbuf->padapter = padapter; pxmitbuf->ext_tag = false; /* Tx buf allocation may fail sometimes, so sleep and retry. */ res = rtw_os_xmit_resource_alloc(padapter, pxmitbuf, (MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ)); if (res == _FAIL) { msleep(10); res = rtw_os_xmit_resource_alloc(padapter, pxmitbuf, (MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ)); if (res == _FAIL) { goto exit; } } pxmitbuf->flags = XMIT_VO_QUEUE; list_add_tail(&pxmitbuf->list, &(pxmitpriv->free_xmitbuf_queue.queue)); pxmitbuf++; } pxmitpriv->free_xmitbuf_cnt = NR_XMITBUFF; /* Init xmit extension buff */ _rtw_init_queue(&pxmitpriv->free_xmit_extbuf_queue); pxmitpriv->pallocated_xmit_extbuf = vzalloc(num_xmit_extbuf * sizeof(struct xmit_buf) + 4); if (pxmitpriv->pallocated_xmit_extbuf == NULL) { RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("alloc xmit_extbuf fail!\n")); res = _FAIL; goto exit; } pxmitpriv->pxmit_extbuf = (u8 *)N_BYTE_ALIGMENT((size_t)(pxmitpriv->pallocated_xmit_extbuf), 4); pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmit_extbuf; for (i = 0; i < num_xmit_extbuf; i++) { INIT_LIST_HEAD(&pxmitbuf->list); pxmitbuf->priv_data = NULL; pxmitbuf->padapter = padapter; pxmitbuf->ext_tag = true; res = rtw_os_xmit_resource_alloc(padapter, pxmitbuf, max_xmit_extbuf_size + XMITBUF_ALIGN_SZ); if (res == _FAIL) { res = _FAIL; goto exit; } list_add_tail(&pxmitbuf->list, &(pxmitpriv->free_xmit_extbuf_queue.queue)); pxmitbuf++; } pxmitpriv->free_xmit_extbuf_cnt = num_xmit_extbuf; rtw_alloc_hwxmits(padapter); rtw_init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry); for (i = 0; i < 4; i++) pxmitpriv->wmm_para_seq[i] = i; pxmitpriv->txirp_cnt = 1; sema_init(&(pxmitpriv->tx_retevt), 0); /* per AC pending irp */ pxmitpriv->beq_cnt = 0; pxmitpriv->bkq_cnt = 0; pxmitpriv->viq_cnt = 0; pxmitpriv->voq_cnt = 0; pxmitpriv->ack_tx = false; mutex_init(&pxmitpriv->ack_tx_mutex); rtw_sctx_init(&pxmitpriv->ack_tx_ops, 0); rtw_hal_init_xmit_priv(padapter); exit: return res; } void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv) { int i; struct adapter *padapter = pxmitpriv->adapter; struct xmit_frame *pxmitframe = (struct xmit_frame *)pxmitpriv->pxmit_frame_buf; struct xmit_buf *pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmitbuf; u32 max_xmit_extbuf_size = MAX_XMIT_EXTBUF_SZ; u32 num_xmit_extbuf = NR_XMIT_EXTBUFF; if (pxmitpriv->pxmit_frame_buf == NULL) return; for (i = 0; i < NR_XMITFRAME; i++) { rtw_os_xmit_complete(padapter, pxmitframe); pxmitframe++; } for (i = 0; i < NR_XMITBUFF; i++) { rtw_os_xmit_resource_free(padapter, pxmitbuf, (MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ)); pxmitbuf++; } if (pxmitpriv->pallocated_frame_buf) vfree(pxmitpriv->pallocated_frame_buf); if (pxmitpriv->pallocated_xmitbuf) vfree(pxmitpriv->pallocated_xmitbuf); /* free xmit extension buff */ pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmit_extbuf; for (i = 0; i < num_xmit_extbuf; i++) { rtw_os_xmit_resource_free(padapter, pxmitbuf, (max_xmit_extbuf_size + XMITBUF_ALIGN_SZ)); pxmitbuf++; } if (pxmitpriv->pallocated_xmit_extbuf) { vfree(pxmitpriv->pallocated_xmit_extbuf); } rtw_free_hwxmits(padapter); mutex_destroy(&pxmitpriv->ack_tx_mutex); } static void update_attrib_vcs_info(struct adapter *padapter, struct xmit_frame *pxmitframe) { u32 sz; struct pkt_attrib *pattrib = &pxmitframe->attrib; struct sta_info *psta = pattrib->psta; struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv); struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info); if (pattrib->nr_frags != 1) sz = padapter->xmitpriv.frag_len; else /* no frag */ sz = pattrib->last_txcmdsz; /* (1) RTS_Threshold is compared to the MPDU, not MSDU. */ /* (2) If there are more than one frag in this MSDU, only the first frag uses protection frame. */ /* Other fragments are protected by previous fragment. */ /* So we only need to check the length of first fragment. */ if (pmlmeext->cur_wireless_mode < WIRELESS_11_24N || padapter->registrypriv.wifi_spec) { if (sz > padapter->registrypriv.rts_thresh) { pattrib->vcs_mode = RTS_CTS; } else { if (psta->rtsen) pattrib->vcs_mode = RTS_CTS; else if (psta->cts2self) pattrib->vcs_mode = CTS_TO_SELF; else pattrib->vcs_mode = NONE_VCS; } } else { while (true) { /* IOT action */ if ((pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_ATHEROS) && pattrib->ampdu_en && (padapter->securitypriv.dot11PrivacyAlgrthm == _AES_)) { pattrib->vcs_mode = CTS_TO_SELF; break; } /* check ERP protection */ if (psta->rtsen || psta->cts2self) { if (psta->rtsen) pattrib->vcs_mode = RTS_CTS; else if (psta->cts2self) pattrib->vcs_mode = CTS_TO_SELF; break; } /* check HT op mode */ if (pattrib->ht_en) { u8 htopmode = pmlmeinfo->HT_protection; if ((pmlmeext->cur_bwmode && (htopmode == 2 || htopmode == 3)) || (!pmlmeext->cur_bwmode && htopmode == 3)) { pattrib->vcs_mode = RTS_CTS; break; } } /* check rts */ if (sz > padapter->registrypriv.rts_thresh) { pattrib->vcs_mode = RTS_CTS; break; } /* to do list: check MIMO power save condition. */ /* check AMPDU aggregation for TXOP */ if (pattrib->ampdu_en) { pattrib->vcs_mode = RTS_CTS; break; } pattrib->vcs_mode = NONE_VCS; break; } } } static void update_attrib_phy_info(struct pkt_attrib *pattrib, struct sta_info *psta) { /*if (psta->rtsen) pattrib->vcs_mode = RTS_CTS; else if (psta->cts2self) pattrib->vcs_mode = CTS_TO_SELF; else pattrib->vcs_mode = NONE_VCS;*/ pattrib->mdata = 0; pattrib->eosp = 0; pattrib->triggered = 0; /* qos_en, ht_en, init rate, , bw, ch_offset, sgi */ pattrib->qos_en = psta->qos_option; pattrib->raid = psta->raid; pattrib->ht_en = psta->htpriv.ht_option; pattrib->bwmode = psta->htpriv.bwmode; pattrib->ch_offset = psta->htpriv.ch_offset; pattrib->sgi = psta->htpriv.sgi; pattrib->ampdu_en = false; pattrib->retry_ctrl = false; } u8 qos_acm(u8 acm_mask, u8 priority) { u8 change_priority = priority; switch (priority) { case 0: case 3: if (acm_mask & BIT(1)) change_priority = 1; break; case 1: case 2: break; case 4: case 5: if (acm_mask & BIT(2)) change_priority = 0; break; case 6: case 7: if (acm_mask & BIT(3)) change_priority = 5; break; default: DBG_88E("qos_acm(): invalid pattrib->priority: %d!!!\n", priority); break; } return change_priority; } static void set_qos(struct pkt_file *ppktfile, struct pkt_attrib *pattrib) { struct ethhdr etherhdr; struct iphdr ip_hdr; s32 user_prio = 0; _rtw_open_pktfile(ppktfile->pkt, ppktfile); _rtw_pktfile_read(ppktfile, (unsigned char *)&etherhdr, ETH_HLEN); /* get user_prio from IP hdr */ if (pattrib->ether_type == 0x0800) { _rtw_pktfile_read(ppktfile, (u8 *)&ip_hdr, sizeof(ip_hdr)); /* user_prio = (ntohs(ip_hdr.tos) >> 5) & 0x3; */ user_prio = ip_hdr.tos >> 5; } else if (pattrib->ether_type == 0x888e) { /* "When priority processing of data frames is supported, */ /* a STA's SME should send EAPOL-Key frames at the highest priority." */ user_prio = 7; } pattrib->priority = user_prio; pattrib->hdrlen = WLAN_HDR_A3_QOS_LEN; pattrib->subtype = WIFI_QOS_DATA_TYPE; } static s32 update_attrib(struct adapter *padapter, struct sk_buff *pkt, struct pkt_attrib *pattrib) { struct pkt_file pktfile; struct sta_info *psta = NULL; struct ethhdr etherhdr; int bmcast; struct sta_priv *pstapriv = &padapter->stapriv; struct security_priv *psecuritypriv = &padapter->securitypriv; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct qos_priv *pqospriv = &pmlmepriv->qospriv; int res = _SUCCESS; _rtw_open_pktfile(pkt, &pktfile); _rtw_pktfile_read(&pktfile, (u8 *)&etherhdr, ETH_HLEN); pattrib->ether_type = ntohs(etherhdr.h_proto); memcpy(pattrib->dst, &etherhdr.h_dest, ETH_ALEN); memcpy(pattrib->src, &etherhdr.h_source, ETH_ALEN); pattrib->pctrl = 0; if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) || check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) { memcpy(pattrib->ra, pattrib->dst, ETH_ALEN); memcpy(pattrib->ta, pattrib->src, ETH_ALEN); } else if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) { memcpy(pattrib->ra, get_bssid(pmlmepriv), ETH_ALEN); memcpy(pattrib->ta, pattrib->src, ETH_ALEN); } else if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) { memcpy(pattrib->ra, pattrib->dst, ETH_ALEN); memcpy(pattrib->ta, get_bssid(pmlmepriv), ETH_ALEN); } pattrib->pktlen = pktfile.pkt_len; if (ETH_P_IP == pattrib->ether_type) { /* The following is for DHCP and ARP packet, we use cck1M to tx these packets and let LPS awake some time */ /* to prevent DHCP protocol fail */ u8 tmp[24]; _rtw_pktfile_read(&pktfile, &tmp[0], 24); pattrib->dhcp_pkt = 0; if (pktfile.pkt_len > 282) {/* MINIMUM_DHCP_PACKET_SIZE) { */ if (ETH_P_IP == pattrib->ether_type) {/* IP header */ if (((tmp[21] == 68) && (tmp[23] == 67)) || ((tmp[21] == 67) && (tmp[23] == 68))) { /* 68 : UDP BOOTP client */ /* 67 : UDP BOOTP server */ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("====================== update_attrib: get DHCP Packet\n")); /* Use low rate to send DHCP packet. */ pattrib->dhcp_pkt = 1; } } } } else if (0x888e == pattrib->ether_type) { DBG_88E_LEVEL(_drv_info_, "send eapol packet\n"); } if ((pattrib->ether_type == 0x888e) || (pattrib->dhcp_pkt == 1)) rtw_set_scan_deny(padapter, 3000); /* If EAPOL , ARP , OR DHCP packet, driver must be in active mode. */ if ((pattrib->ether_type == 0x0806) || (pattrib->ether_type == 0x888e) || (pattrib->dhcp_pkt == 1)) rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_SPECIAL_PACKET, 1); bmcast = IS_MCAST(pattrib->ra); /* get sta_info */ if (bmcast) { psta = rtw_get_bcmc_stainfo(padapter); } else { psta = rtw_get_stainfo(pstapriv, pattrib->ra); if (psta == NULL) { /* if we cannot get psta => drrp the pkt */ RT_TRACE(_module_rtl871x_xmit_c_, _drv_alert_, ("\nupdate_attrib => get sta_info fail, ra: %pM\n", (pattrib->ra))); res = _FAIL; goto exit; } else if ((check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) && (!(psta->state & _FW_LINKED))) { res = _FAIL; goto exit; } } if (psta) { pattrib->mac_id = psta->mac_id; /* DBG_88E("%s ==> mac_id(%d)\n", __func__, pattrib->mac_id); */ pattrib->psta = psta; } else { /* if we cannot get psta => drop the pkt */ RT_TRACE(_module_rtl871x_xmit_c_, _drv_alert_, ("\nupdate_attrib => get sta_info fail, ra:%pM\n", (pattrib->ra))); res = _FAIL; goto exit; } pattrib->ack_policy = 0; /* get ether_hdr_len */ pattrib->pkt_hdrlen = ETH_HLEN;/* pattrib->ether_type == 0x8100) ? (14 + 4): 14; vlan tag */ pattrib->hdrlen = WLAN_HDR_A3_LEN; pattrib->subtype = WIFI_DATA_TYPE; pattrib->priority = 0; if (check_fwstate(pmlmepriv, WIFI_AP_STATE|WIFI_ADHOC_STATE|WIFI_ADHOC_MASTER_STATE)) { if (psta->qos_option) set_qos(&pktfile, pattrib); } else { if (pqospriv->qos_option) { set_qos(&pktfile, pattrib); if (pmlmepriv->acm_mask != 0) pattrib->priority = qos_acm(pmlmepriv->acm_mask, pattrib->priority); } } if (psta->ieee8021x_blocked) { RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("\n psta->ieee8021x_blocked == true\n")); pattrib->encrypt = 0; if ((pattrib->ether_type != 0x888e) && !check_fwstate(pmlmepriv, WIFI_MP_STATE)) { RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("\npsta->ieee8021x_blocked == true, pattrib->ether_type(%.4x) != 0x888e\n", pattrib->ether_type)); res = _FAIL; goto exit; } } else { GET_ENCRY_ALGO(psecuritypriv, psta, pattrib->encrypt, bmcast); switch (psecuritypriv->dot11AuthAlgrthm) { case dot11AuthAlgrthm_Open: case dot11AuthAlgrthm_Shared: case dot11AuthAlgrthm_Auto: pattrib->key_idx = (u8)psecuritypriv->dot11PrivacyKeyIndex; break; case dot11AuthAlgrthm_8021X: if (bmcast) pattrib->key_idx = (u8)psecuritypriv->dot118021XGrpKeyid; else pattrib->key_idx = 0; break; default: pattrib->key_idx = 0; break; } } switch (pattrib->encrypt) { case _WEP40_: case _WEP104_: pattrib->iv_len = 4; pattrib->icv_len = 4; break; case _TKIP_: pattrib->iv_len = 8; pattrib->icv_len = 4; if (padapter->securitypriv.busetkipkey == _FAIL) { RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("\npadapter->securitypriv.busetkipkey(%d) == _FAIL drop packet\n", padapter->securitypriv.busetkipkey)); res = _FAIL; goto exit; } break; case _AES_: RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("pattrib->encrypt=%d (_AES_)\n", pattrib->encrypt)); pattrib->iv_len = 8; pattrib->icv_len = 8; break; default: pattrib->iv_len = 0; pattrib->icv_len = 0; break; } RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("update_attrib: encrypt=%d securitypriv.sw_encrypt=%d\n", pattrib->encrypt, padapter->securitypriv.sw_encrypt)); if (pattrib->encrypt && (padapter->securitypriv.sw_encrypt || !psecuritypriv->hw_decrypted)) { pattrib->bswenc = true; RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("update_attrib: encrypt=%d securitypriv.hw_decrypted=%d bswenc = true\n", pattrib->encrypt, padapter->securitypriv.sw_encrypt)); } else { pattrib->bswenc = false; RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("update_attrib: bswenc = false\n")); } update_attrib_phy_info(pattrib, psta); exit: return res; } static s32 xmitframe_addmic(struct adapter *padapter, struct xmit_frame *pxmitframe) { int curfragnum, length; u8 *pframe, *payload, mic[8]; struct mic_data micdata; struct sta_info *stainfo; struct pkt_attrib *pattrib = &pxmitframe->attrib; struct security_priv *psecuritypriv = &padapter->securitypriv; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; u8 priority[4] = {0x0, 0x0, 0x0, 0x0}; u8 hw_hdr_offset = 0; int bmcst = IS_MCAST(pattrib->ra); if (pattrib->psta) stainfo = pattrib->psta; else stainfo = rtw_get_stainfo(&padapter->stapriv , &pattrib->ra[0]); hw_hdr_offset = TXDESC_SIZE + (pxmitframe->pkt_offset * PACKET_OFFSET_SZ); if (pattrib->encrypt == _TKIP_) {/* if (psecuritypriv->dot11PrivacyAlgrthm == _TKIP_PRIVACY_) */ /* encode mic code */ if (stainfo != NULL) { u8 null_key[16] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}; pframe = pxmitframe->buf_addr + hw_hdr_offset; if (bmcst) { if (!memcmp(psecuritypriv->dot118021XGrptxmickey[psecuritypriv->dot118021XGrpKeyid].skey, null_key, 16)) return _FAIL; /* start to calculate the mic code */ rtw_secmicsetkey(&micdata, psecuritypriv->dot118021XGrptxmickey[psecuritypriv->dot118021XGrpKeyid].skey); } else { if (!memcmp(&stainfo->dot11tkiptxmickey.skey[0], null_key, 16)) { /* DbgPrint("\nxmitframe_addmic:stainfo->dot11tkiptxmickey == 0\n"); */ /* msleep(10); */ return _FAIL; } /* start to calculate the mic code */ rtw_secmicsetkey(&micdata, &stainfo->dot11tkiptxmickey.skey[0]); } if (pframe[1]&1) { /* ToDS == 1 */ rtw_secmicappend(&micdata, &pframe[16], 6); /* DA */ if (pframe[1]&2) /* From Ds == 1 */ rtw_secmicappend(&micdata, &pframe[24], 6); else rtw_secmicappend(&micdata, &pframe[10], 6); } else { /* ToDS == 0 */ rtw_secmicappend(&micdata, &pframe[4], 6); /* DA */ if (pframe[1]&2) /* From Ds == 1 */ rtw_secmicappend(&micdata, &pframe[16], 6); else rtw_secmicappend(&micdata, &pframe[10], 6); } if (pattrib->qos_en) priority[0] = (u8)pxmitframe->attrib.priority; rtw_secmicappend(&micdata, &priority[0], 4); payload = pframe; for (curfragnum = 0; curfragnum < pattrib->nr_frags; curfragnum++) { payload = (u8 *)round_up((size_t)(payload), 4); RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("=== curfragnum=%d, pframe = 0x%.2x, 0x%.2x, 0x%.2x, 0x%.2x, 0x%.2x, 0x%.2x, 0x%.2x, 0x%.2x,!!!\n", curfragnum, *payload, *(payload+1), *(payload+2), *(payload+3), *(payload+4), *(payload+5), *(payload+6), *(payload+7))); payload = payload+pattrib->hdrlen+pattrib->iv_len; RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("curfragnum=%d pattrib->hdrlen=%d pattrib->iv_len=%d", curfragnum, pattrib->hdrlen, pattrib->iv_len)); if ((curfragnum+1) == pattrib->nr_frags) { length = pattrib->last_txcmdsz-pattrib->hdrlen-pattrib->iv_len-((pattrib->bswenc) ? pattrib->icv_len : 0); rtw_secmicappend(&micdata, payload, length); payload = payload+length; } else { length = pxmitpriv->frag_len-pattrib->hdrlen-pattrib->iv_len-((pattrib->bswenc) ? pattrib->icv_len : 0); rtw_secmicappend(&micdata, payload, length); payload = payload+length+pattrib->icv_len; RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("curfragnum=%d length=%d pattrib->icv_len=%d", curfragnum, length, pattrib->icv_len)); } } rtw_secgetmic(&micdata, &(mic[0])); RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("xmitframe_addmic: before add mic code!!!\n")); RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("xmitframe_addmic: pattrib->last_txcmdsz=%d!!!\n", pattrib->last_txcmdsz)); RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("xmitframe_addmic: mic[0]=0x%.2x , mic[1]=0x%.2x , mic[2]= 0x%.2x, mic[3]=0x%.2x\n\ mic[4]= 0x%.2x , mic[5]= 0x%.2x , mic[6]= 0x%.2x , mic[7]= 0x%.2x !!!!\n", mic[0], mic[1], mic[2], mic[3], mic[4], mic[5], mic[6], mic[7])); /* add mic code and add the mic code length in last_txcmdsz */ memcpy(payload, &(mic[0]), 8); pattrib->last_txcmdsz += 8; RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("\n ======== last pkt ========\n")); payload = payload-pattrib->last_txcmdsz+8; for (curfragnum = 0; curfragnum < pattrib->last_txcmdsz; curfragnum = curfragnum+8) RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, (" %.2x, %.2x, %.2x, %.2x, %.2x, %.2x, %.2x, %.2x ", *(payload+curfragnum), *(payload+curfragnum+1), *(payload+curfragnum+2), *(payload+curfragnum+3), *(payload+curfragnum+4), *(payload+curfragnum+5), *(payload+curfragnum+6), *(payload+curfragnum+7))); } else { RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("xmitframe_addmic: rtw_get_stainfo==NULL!!!\n")); } } return _SUCCESS; } static s32 xmitframe_swencrypt(struct adapter *padapter, struct xmit_frame *pxmitframe) { struct pkt_attrib *pattrib = &pxmitframe->attrib; if (pattrib->bswenc) { RT_TRACE(_module_rtl871x_xmit_c_, _drv_alert_, ("### xmitframe_swencrypt\n")); switch (pattrib->encrypt) { case _WEP40_: case _WEP104_: rtw_wep_encrypt(padapter, (u8 *)pxmitframe); break; case _TKIP_: rtw_tkip_encrypt(padapter, (u8 *)pxmitframe); break; case _AES_: rtw_aes_encrypt(padapter, (u8 *)pxmitframe); break; default: break; } } else { RT_TRACE(_module_rtl871x_xmit_c_, _drv_notice_, ("### xmitframe_hwencrypt\n")); } return _SUCCESS; } s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattrib) { u16 *qc; struct rtw_ieee80211_hdr *pwlanhdr = (struct rtw_ieee80211_hdr *)hdr; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct qos_priv *pqospriv = &pmlmepriv->qospriv; u8 qos_option = false; int res = _SUCCESS; __le16 *fctrl = &pwlanhdr->frame_ctl; struct sta_info *psta; int bmcst = IS_MCAST(pattrib->ra); if (pattrib->psta) { psta = pattrib->psta; } else { if (bmcst) { psta = rtw_get_bcmc_stainfo(padapter); } else { psta = rtw_get_stainfo(&padapter->stapriv, pattrib->ra); } } memset(hdr, 0, WLANHDR_OFFSET); SetFrameSubType(fctrl, pattrib->subtype); if (pattrib->subtype & WIFI_DATA_TYPE) { if ((check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true)) { /* to_ds = 1, fr_ds = 0; */ /* Data transfer to AP */ SetToDs(fctrl); memcpy(pwlanhdr->addr1, get_bssid(pmlmepriv), ETH_ALEN); memcpy(pwlanhdr->addr2, pattrib->src, ETH_ALEN); memcpy(pwlanhdr->addr3, pattrib->dst, ETH_ALEN); if (pqospriv->qos_option) qos_option = true; } else if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) { /* to_ds = 0, fr_ds = 1; */ SetFrDs(fctrl); memcpy(pwlanhdr->addr1, pattrib->dst, ETH_ALEN); memcpy(pwlanhdr->addr2, get_bssid(pmlmepriv), ETH_ALEN); memcpy(pwlanhdr->addr3, pattrib->src, ETH_ALEN); if (psta->qos_option) qos_option = true; } else if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) || check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) { memcpy(pwlanhdr->addr1, pattrib->dst, ETH_ALEN); memcpy(pwlanhdr->addr2, pattrib->src, ETH_ALEN); memcpy(pwlanhdr->addr3, get_bssid(pmlmepriv), ETH_ALEN); if (psta->qos_option) qos_option = true; } else { RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("fw_state:%x is not allowed to xmit frame\n", get_fwstate(pmlmepriv))); res = _FAIL; goto exit; } if (pattrib->mdata) SetMData(fctrl); if (pattrib->encrypt) SetPrivacy(fctrl); if (qos_option) { qc = (unsigned short *)(hdr + pattrib->hdrlen - 2); if (pattrib->priority) SetPriority(qc, pattrib->priority); SetEOSP(qc, pattrib->eosp); SetAckpolicy(qc, pattrib->ack_policy); } /* TODO: fill HT Control Field */ /* Update Seq Num will be handled by f/w */ if (psta) { psta->sta_xmitpriv.txseq_tid[pattrib->priority]++; psta->sta_xmitpriv.txseq_tid[pattrib->priority] &= 0xFFF; pattrib->seqnum = psta->sta_xmitpriv.txseq_tid[pattrib->priority]; SetSeqNum(hdr, pattrib->seqnum); /* check if enable ampdu */ if (pattrib->ht_en && psta->htpriv.ampdu_enable) { if (psta->htpriv.agg_enable_bitmap & BIT(pattrib->priority)) pattrib->ampdu_en = true; } /* re-check if enable ampdu by BA_starting_seqctrl */ if (pattrib->ampdu_en) { u16 tx_seq; tx_seq = psta->BA_starting_seqctrl[pattrib->priority & 0x0f]; /* check BA_starting_seqctrl */ if (SN_LESS(pattrib->seqnum, tx_seq)) { pattrib->ampdu_en = false;/* AGG BK */ } else if (SN_EQUAL(pattrib->seqnum, tx_seq)) { psta->BA_starting_seqctrl[pattrib->priority & 0x0f] = (tx_seq+1)&0xfff; pattrib->ampdu_en = true;/* AGG EN */ } else { psta->BA_starting_seqctrl[pattrib->priority & 0x0f] = (pattrib->seqnum+1)&0xfff; pattrib->ampdu_en = true;/* AGG EN */ } } } } exit: return res; } s32 rtw_txframes_pending(struct adapter *padapter) { struct xmit_priv *pxmitpriv = &padapter->xmitpriv; return (!list_empty(&pxmitpriv->be_pending.queue) || !list_empty(&pxmitpriv->bk_pending.queue) || !list_empty(&pxmitpriv->vi_pending.queue) || !list_empty(&pxmitpriv->vo_pending.queue)); } s32 rtw_txframes_sta_ac_pending(struct adapter *padapter, struct pkt_attrib *pattrib) { struct sta_info *psta; struct tx_servq *ptxservq; int priority = pattrib->priority; psta = pattrib->psta; switch (priority) { case 1: case 2: ptxservq = &(psta->sta_xmitpriv.bk_q); break; case 4: case 5: ptxservq = &(psta->sta_xmitpriv.vi_q); break; case 6: case 7: ptxservq = &(psta->sta_xmitpriv.vo_q); break; case 0: case 3: default: ptxservq = &(psta->sta_xmitpriv.be_q); break; } return ptxservq->qcnt; } /* * Calculate wlan 802.11 packet MAX size from pkt_attrib * This function doesn't consider fragment case */ u32 rtw_calculate_wlan_pkt_size_by_attribue(struct pkt_attrib *pattrib) { u32 len = 0; len = pattrib->hdrlen + pattrib->iv_len; /* WLAN Header and IV */ len += SNAP_SIZE + sizeof(u16); /* LLC */ len += pattrib->pktlen; if (pattrib->encrypt == _TKIP_) len += 8; /* MIC */ len += ((pattrib->bswenc) ? pattrib->icv_len : 0); /* ICV */ return len; } /* This sub-routine will perform all the following: 1. remove 802.3 header. 2. create wlan_header, based on the info in pxmitframe 3. append sta's iv/ext-iv 4. append LLC 5. move frag chunk from pframe to pxmitframe->mem 6. apply sw-encrypt, if necessary. */ s32 rtw_xmitframe_coalesce(struct adapter *padapter, struct sk_buff *pkt, struct xmit_frame *pxmitframe) { struct pkt_file pktfile; s32 frg_inx, frg_len, mpdu_len, llc_sz, mem_sz; size_t addr; u8 *pframe, *mem_start; u8 hw_hdr_offset; struct sta_info *psta; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; struct pkt_attrib *pattrib = &pxmitframe->attrib; u8 *pbuf_start; s32 bmcst = IS_MCAST(pattrib->ra); s32 res = _SUCCESS; psta = rtw_get_stainfo(&padapter->stapriv, pattrib->ra); if (psta == NULL) return _FAIL; if (pxmitframe->buf_addr == NULL) { DBG_88E("==> %s buf_addr == NULL\n", __func__); return _FAIL; } pbuf_start = pxmitframe->buf_addr; hw_hdr_offset = TXDESC_SIZE + (pxmitframe->pkt_offset * PACKET_OFFSET_SZ); mem_start = pbuf_start + hw_hdr_offset; if (rtw_make_wlanhdr(padapter, mem_start, pattrib) == _FAIL) { RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("rtw_xmitframe_coalesce: rtw_make_wlanhdr fail; drop pkt\n")); DBG_88E("rtw_xmitframe_coalesce: rtw_make_wlanhdr fail; drop pkt\n"); res = _FAIL; goto exit; } _rtw_open_pktfile(pkt, &pktfile); _rtw_pktfile_read(&pktfile, NULL, pattrib->pkt_hdrlen); frg_inx = 0; frg_len = pxmitpriv->frag_len - 4;/* 2346-4 = 2342 */ while (1) { llc_sz = 0; mpdu_len = frg_len; pframe = mem_start; SetMFrag(mem_start); pframe += pattrib->hdrlen; mpdu_len -= pattrib->hdrlen; /* adding icv, if necessary... */ if (pattrib->iv_len) { switch (pattrib->encrypt) { case _WEP40_: case _WEP104_: WEP_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx); break; case _TKIP_: if (bmcst) TKIP_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx); else TKIP_IV(pattrib->iv, psta->dot11txpn, 0); break; case _AES_: if (bmcst) AES_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx); else AES_IV(pattrib->iv, psta->dot11txpn, 0); break; } memcpy(pframe, pattrib->iv, pattrib->iv_len); RT_TRACE(_module_rtl871x_xmit_c_, _drv_notice_, ("rtw_xmitframe_coalesce: keyid=%d pattrib->iv[3]=%.2x pframe=%.2x %.2x %.2x %.2x\n", padapter->securitypriv.dot11PrivacyKeyIndex, pattrib->iv[3], *pframe, *(pframe+1), *(pframe+2), *(pframe+3))); pframe += pattrib->iv_len; mpdu_len -= pattrib->iv_len; } if (frg_inx == 0) { llc_sz = rtw_put_snap(pframe, pattrib->ether_type); pframe += llc_sz; mpdu_len -= llc_sz; } if ((pattrib->icv_len > 0) && (pattrib->bswenc)) { mpdu_len -= pattrib->icv_len; } if (bmcst) { /* don't do fragment to broadcat/multicast packets */ mem_sz = _rtw_pktfile_read(&pktfile, pframe, pattrib->pktlen); } else { mem_sz = _rtw_pktfile_read(&pktfile, pframe, mpdu_len); } pframe += mem_sz; if ((pattrib->icv_len > 0) && (pattrib->bswenc)) { memcpy(pframe, pattrib->icv, pattrib->icv_len); pframe += pattrib->icv_len; } frg_inx++; if (bmcst || rtw_endofpktfile(&pktfile)) { pattrib->nr_frags = frg_inx; pattrib->last_txcmdsz = pattrib->hdrlen + pattrib->iv_len + ((pattrib->nr_frags == 1) ? llc_sz : 0) + ((pattrib->bswenc) ? pattrib->icv_len : 0) + mem_sz; ClearMFrag(mem_start); break; } else { RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("%s: There're still something in packet!\n", __func__)); } addr = (size_t)(pframe); mem_start = (unsigned char *)round_up(addr, 4) + hw_hdr_offset; memcpy(mem_start, pbuf_start + hw_hdr_offset, pattrib->hdrlen); } if (xmitframe_addmic(padapter, pxmitframe) == _FAIL) { RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("xmitframe_addmic(padapter, pxmitframe) == _FAIL\n")); DBG_88E("xmitframe_addmic(padapter, pxmitframe) == _FAIL\n"); res = _FAIL; goto exit; } xmitframe_swencrypt(padapter, pxmitframe); if (!bmcst) update_attrib_vcs_info(padapter, pxmitframe); else pattrib->vcs_mode = NONE_VCS; exit: return res; } /* Logical Link Control(LLC) SubNetwork Attachment Point(SNAP) header * IEEE LLC/SNAP header contains 8 octets * First 3 octets comprise the LLC portion * SNAP portion, 5 octets, is divided into two fields: * Organizationally Unique Identifier(OUI), 3 octets, * type, defined by that organization, 2 octets. */ s32 rtw_put_snap(u8 *data, u16 h_proto) { struct ieee80211_snap_hdr *snap; u8 *oui; snap = (struct ieee80211_snap_hdr *)data; snap->dsap = 0xaa; snap->ssap = 0xaa; snap->ctrl = 0x03; if (h_proto == 0x8137 || h_proto == 0x80f3) oui = P802_1H_OUI; else oui = RFC1042_OUI; snap->oui[0] = oui[0]; snap->oui[1] = oui[1]; snap->oui[2] = oui[2]; *(__be16 *)(data + SNAP_SIZE) = htons(h_proto); return SNAP_SIZE + sizeof(u16); } void rtw_update_protection(struct adapter *padapter, u8 *ie, uint ie_len) { uint protection; u8 *perp; int erp_len; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; struct registry_priv *pregistrypriv = &padapter->registrypriv; switch (pxmitpriv->vcs_setting) { case DISABLE_VCS: pxmitpriv->vcs = NONE_VCS; break; case ENABLE_VCS: break; case AUTO_VCS: default: perp = rtw_get_ie(ie, _ERPINFO_IE_, &erp_len, ie_len); if (perp == NULL) { pxmitpriv->vcs = NONE_VCS; } else { protection = (*(perp + 2)) & BIT(1); if (protection) { if (pregistrypriv->vcs_type == RTS_CTS) pxmitpriv->vcs = RTS_CTS; else pxmitpriv->vcs = CTS_TO_SELF; } else { pxmitpriv->vcs = NONE_VCS; } } break; } } void rtw_count_tx_stats(struct adapter *padapter, struct xmit_frame *pxmitframe, int sz) { struct sta_info *psta = NULL; struct stainfo_stats *pstats = NULL; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; if ((pxmitframe->frame_tag&0x0f) == DATA_FRAMETAG) { pxmitpriv->tx_bytes += sz; pmlmepriv->LinkDetectInfo.NumTxOkInPeriod += pxmitframe->agg_num; psta = pxmitframe->attrib.psta; if (psta) { pstats = &psta->sta_stats; pstats->tx_pkts += pxmitframe->agg_num; pstats->tx_bytes += sz; } } } struct xmit_buf *rtw_alloc_xmitbuf_ext(struct xmit_priv *pxmitpriv) { unsigned long irql; struct xmit_buf *pxmitbuf = NULL; struct list_head *plist, *phead; struct __queue *pfree_queue = &pxmitpriv->free_xmit_extbuf_queue; spin_lock_irqsave(&pfree_queue->lock, irql); if (list_empty(&pfree_queue->queue)) { pxmitbuf = NULL; } else { phead = get_list_head(pfree_queue); plist = phead->next; pxmitbuf = container_of(plist, struct xmit_buf, list); list_del_init(&(pxmitbuf->list)); } if (pxmitbuf != NULL) { pxmitpriv->free_xmit_extbuf_cnt--; pxmitbuf->priv_data = NULL; /* pxmitbuf->ext_tag = true; */ if (pxmitbuf->sctx) { DBG_88E("%s pxmitbuf->sctx is not NULL\n", __func__); rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_BUF_ALLOC); } } spin_unlock_irqrestore(&pfree_queue->lock, irql); return pxmitbuf; } s32 rtw_free_xmitbuf_ext(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf) { unsigned long irql; struct __queue *pfree_queue = &pxmitpriv->free_xmit_extbuf_queue; if (pxmitbuf == NULL) return _FAIL; spin_lock_irqsave(&pfree_queue->lock, irql); list_del_init(&pxmitbuf->list); list_add_tail(&(pxmitbuf->list), get_list_head(pfree_queue)); pxmitpriv->free_xmit_extbuf_cnt++; spin_unlock_irqrestore(&pfree_queue->lock, irql); return _SUCCESS; } struct xmit_buf *rtw_alloc_xmitbuf(struct xmit_priv *pxmitpriv) { unsigned long irql; struct xmit_buf *pxmitbuf = NULL; struct list_head *plist, *phead; struct __queue *pfree_xmitbuf_queue = &pxmitpriv->free_xmitbuf_queue; /* DBG_88E("+rtw_alloc_xmitbuf\n"); */ spin_lock_irqsave(&pfree_xmitbuf_queue->lock, irql); if (list_empty(&pfree_xmitbuf_queue->queue)) { pxmitbuf = NULL; } else { phead = get_list_head(pfree_xmitbuf_queue); plist = phead->next; pxmitbuf = container_of(plist, struct xmit_buf, list); list_del_init(&(pxmitbuf->list)); } if (pxmitbuf != NULL) { pxmitpriv->free_xmitbuf_cnt--; pxmitbuf->priv_data = NULL; if (pxmitbuf->sctx) { DBG_88E("%s pxmitbuf->sctx is not NULL\n", __func__); rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_BUF_ALLOC); } } spin_unlock_irqrestore(&pfree_xmitbuf_queue->lock, irql); return pxmitbuf; } s32 rtw_free_xmitbuf(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf) { unsigned long irql; struct __queue *pfree_xmitbuf_queue = &pxmitpriv->free_xmitbuf_queue; if (pxmitbuf == NULL) return _FAIL; if (pxmitbuf->sctx) { DBG_88E("%s pxmitbuf->sctx is not NULL\n", __func__); rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_BUF_FREE); } if (pxmitbuf->ext_tag) { rtw_free_xmitbuf_ext(pxmitpriv, pxmitbuf); } else { spin_lock_irqsave(&pfree_xmitbuf_queue->lock, irql); list_del_init(&pxmitbuf->list); list_add_tail(&(pxmitbuf->list), get_list_head(pfree_xmitbuf_queue)); pxmitpriv->free_xmitbuf_cnt++; spin_unlock_irqrestore(&pfree_xmitbuf_queue->lock, irql); } return _SUCCESS; } /* Calling context: 1. OS_TXENTRY 2. RXENTRY (rx_thread or RX_ISR/RX_CallBack) If we turn on USE_RXTHREAD, then, no need for critical section. Otherwise, we must use _enter/_exit critical to protect free_xmit_queue... Must be very very cautious... */ struct xmit_frame *rtw_alloc_xmitframe(struct xmit_priv *pxmitpriv)/* _queue *pfree_xmit_queue) */ { /* Please remember to use all the osdep_service api, and lock/unlock or _enter/_exit critical to protect pfree_xmit_queue */ struct xmit_frame *pxframe = NULL; struct list_head *plist, *phead; struct __queue *pfree_xmit_queue = &pxmitpriv->free_xmit_queue; spin_lock_bh(&pfree_xmit_queue->lock); if (list_empty(&pfree_xmit_queue->queue)) { RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_alloc_xmitframe:%d\n", pxmitpriv->free_xmitframe_cnt)); pxframe = NULL; } else { phead = get_list_head(pfree_xmit_queue); plist = phead->next; pxframe = container_of(plist, struct xmit_frame, list); list_del_init(&(pxframe->list)); } if (pxframe != NULL) { /* default value setting */ pxmitpriv->free_xmitframe_cnt--; RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_alloc_xmitframe():free_xmitframe_cnt=%d\n", pxmitpriv->free_xmitframe_cnt)); pxframe->buf_addr = NULL; pxframe->pxmitbuf = NULL; memset(&pxframe->attrib, 0, sizeof(struct pkt_attrib)); /* pxframe->attrib.psta = NULL; */ pxframe->frame_tag = DATA_FRAMETAG; pxframe->pkt = NULL; pxframe->pkt_offset = 1;/* default use pkt_offset to fill tx desc */ pxframe->agg_num = 1; pxframe->ack_report = 0; } spin_unlock_bh(&pfree_xmit_queue->lock); return pxframe; } s32 rtw_free_xmitframe(struct xmit_priv *pxmitpriv, struct xmit_frame *pxmitframe) { struct __queue *pfree_xmit_queue = &pxmitpriv->free_xmit_queue; struct adapter *padapter = pxmitpriv->adapter; struct sk_buff *pndis_pkt = NULL; if (pxmitframe == NULL) { RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("====== rtw_free_xmitframe():pxmitframe == NULL!!!!!!!!!!\n")); goto exit; } spin_lock_bh(&pfree_xmit_queue->lock); list_del_init(&pxmitframe->list); if (pxmitframe->pkt) { pndis_pkt = pxmitframe->pkt; pxmitframe->pkt = NULL; } list_add_tail(&pxmitframe->list, get_list_head(pfree_xmit_queue)); pxmitpriv->free_xmitframe_cnt++; RT_TRACE(_module_rtl871x_xmit_c_, _drv_debug_, ("rtw_free_xmitframe():free_xmitframe_cnt=%d\n", pxmitpriv->free_xmitframe_cnt)); spin_unlock_bh(&pfree_xmit_queue->lock); if (pndis_pkt) rtw_os_pkt_complete(padapter, pndis_pkt); exit: return _SUCCESS; } void rtw_free_xmitframe_queue(struct xmit_priv *pxmitpriv, struct __queue *pframequeue) { struct list_head *plist, *phead; struct xmit_frame *pxmitframe; spin_lock_bh(&(pframequeue->lock)); phead = get_list_head(pframequeue); plist = phead->next; while (phead != plist) { pxmitframe = container_of(plist, struct xmit_frame, list); plist = plist->next; rtw_free_xmitframe(pxmitpriv, pxmitframe); } spin_unlock_bh(&(pframequeue->lock)); } s32 rtw_xmitframe_enqueue(struct adapter *padapter, struct xmit_frame *pxmitframe) { if (rtw_xmit_classifier(padapter, pxmitframe) == _FAIL) { RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("rtw_xmitframe_enqueue: drop xmit pkt for classifier fail\n")); /* pxmitframe->pkt = NULL; */ return _FAIL; } return _SUCCESS; } static struct xmit_frame *dequeue_one_xmitframe(struct xmit_priv *pxmitpriv, struct hw_xmit *phwxmit, struct tx_servq *ptxservq, struct __queue *pframe_queue) { struct list_head *xmitframe_plist, *xmitframe_phead; struct xmit_frame *pxmitframe = NULL; xmitframe_phead = get_list_head(pframe_queue); xmitframe_plist = xmitframe_phead->next; if (xmitframe_phead != xmitframe_plist) { pxmitframe = container_of(xmitframe_plist, struct xmit_frame, list); xmitframe_plist = xmitframe_plist->next; list_del_init(&pxmitframe->list); ptxservq->qcnt--; } return pxmitframe; } struct xmit_frame *rtw_dequeue_xframe(struct xmit_priv *pxmitpriv, struct hw_xmit *phwxmit_i, int entry) { struct list_head *sta_plist, *sta_phead; struct hw_xmit *phwxmit; struct tx_servq *ptxservq = NULL; struct __queue *pframe_queue = NULL; struct xmit_frame *pxmitframe = NULL; struct adapter *padapter = pxmitpriv->adapter; struct registry_priv *pregpriv = &padapter->registrypriv; int i, inx[4]; inx[0] = 0; inx[1] = 1; inx[2] = 2; inx[3] = 3; if (pregpriv->wifi_spec == 1) { int j; for (j = 0; j < 4; j++) inx[j] = pxmitpriv->wmm_para_seq[j]; } spin_lock_bh(&pxmitpriv->lock); for (i = 0; i < entry; i++) { phwxmit = phwxmit_i + inx[i]; sta_phead = get_list_head(phwxmit->sta_queue); sta_plist = sta_phead->next; while (sta_phead != sta_plist) { ptxservq = container_of(sta_plist, struct tx_servq, tx_pending); pframe_queue = &ptxservq->sta_pending; pxmitframe = dequeue_one_xmitframe(pxmitpriv, phwxmit, ptxservq, pframe_queue); if (pxmitframe) { phwxmit->accnt--; /* Remove sta node when there are no pending packets. */ if (list_empty(&pframe_queue->queue)) /* must be done after get_next and before break */ list_del_init(&ptxservq->tx_pending); goto exit; } sta_plist = sta_plist->next; } } exit: spin_unlock_bh(&pxmitpriv->lock); return pxmitframe; } struct tx_servq *rtw_get_sta_pending(struct adapter *padapter, struct sta_info *psta, int up, u8 *ac) { struct tx_servq *ptxservq; switch (up) { case 1: case 2: ptxservq = &(psta->sta_xmitpriv.bk_q); *(ac) = 3; RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_get_sta_pending : BK\n")); break; case 4: case 5: ptxservq = &(psta->sta_xmitpriv.vi_q); *(ac) = 1; RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_get_sta_pending : VI\n")); break; case 6: case 7: ptxservq = &(psta->sta_xmitpriv.vo_q); *(ac) = 0; RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_get_sta_pending : VO\n")); break; case 0: case 3: default: ptxservq = &(psta->sta_xmitpriv.be_q); *(ac) = 2; RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_get_sta_pending : BE\n")); break; } return ptxservq; } /* * Will enqueue pxmitframe to the proper queue, * and indicate it to xx_pending list..... */ s32 rtw_xmit_classifier(struct adapter *padapter, struct xmit_frame *pxmitframe) { u8 ac_index; struct sta_info *psta; struct tx_servq *ptxservq; struct pkt_attrib *pattrib = &pxmitframe->attrib; struct sta_priv *pstapriv = &padapter->stapriv; struct hw_xmit *phwxmits = padapter->xmitpriv.hwxmits; int res = _SUCCESS; if (pattrib->psta) { psta = pattrib->psta; } else { psta = rtw_get_stainfo(pstapriv, pattrib->ra); } if (psta == NULL) { res = _FAIL; DBG_88E("rtw_xmit_classifier: psta == NULL\n"); RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("rtw_xmit_classifier: psta == NULL\n")); goto exit; } ptxservq = rtw_get_sta_pending(padapter, psta, pattrib->priority, (u8 *)(&ac_index)); if (list_empty(&ptxservq->tx_pending)) list_add_tail(&ptxservq->tx_pending, get_list_head(phwxmits[ac_index].sta_queue)); list_add_tail(&pxmitframe->list, get_list_head(&ptxservq->sta_pending)); ptxservq->qcnt++; phwxmits[ac_index].accnt++; exit: return res; } void rtw_alloc_hwxmits(struct adapter *padapter) { struct hw_xmit *hwxmits; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; pxmitpriv->hwxmit_entry = HWXMIT_ENTRY; pxmitpriv->hwxmits = kcalloc(pxmitpriv->hwxmit_entry, sizeof(struct hw_xmit), GFP_KERNEL); hwxmits = pxmitpriv->hwxmits; hwxmits[0] .sta_queue = &pxmitpriv->vo_pending; hwxmits[1] .sta_queue = &pxmitpriv->vi_pending; hwxmits[2] .sta_queue = &pxmitpriv->be_pending; hwxmits[3] .sta_queue = &pxmitpriv->bk_pending; } void rtw_free_hwxmits(struct adapter *padapter) { struct hw_xmit *hwxmits; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; hwxmits = pxmitpriv->hwxmits; kfree(hwxmits); } void rtw_init_hwxmits(struct hw_xmit *phwxmit, int entry) { int i; for (i = 0; i < entry; i++, phwxmit++) phwxmit->accnt = 0; } u32 rtw_get_ff_hwaddr(struct xmit_frame *pxmitframe) { u32 addr; struct pkt_attrib *pattrib = &pxmitframe->attrib; switch (pattrib->qsel) { case 0: case 3: addr = BE_QUEUE_INX; break; case 1: case 2: addr = BK_QUEUE_INX; break; case 4: case 5: addr = VI_QUEUE_INX; break; case 6: case 7: addr = VO_QUEUE_INX; break; case 0x10: addr = BCN_QUEUE_INX; break; case 0x11:/* BC/MC in PS (HIQ) */ addr = HIGH_QUEUE_INX; break; case 0x12: default: addr = MGT_QUEUE_INX; break; } return addr; } static void do_queue_select(struct adapter *padapter, struct pkt_attrib *pattrib) { u8 qsel; qsel = pattrib->priority; RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("### do_queue_select priority=%d , qsel = %d\n", pattrib->priority , qsel)); pattrib->qsel = qsel; } /* * The main transmit(tx) entry * * Return * 1 enqueue * 0 success, hardware will handle this xmit frame(packet) * <0 fail */ s32 rtw_xmit(struct adapter *padapter, struct sk_buff **ppkt) { struct xmit_priv *pxmitpriv = &padapter->xmitpriv; struct xmit_frame *pxmitframe = NULL; s32 res; pxmitframe = rtw_alloc_xmitframe(pxmitpriv); if (pxmitframe == NULL) { RT_TRACE(_module_xmit_osdep_c_, _drv_err_, ("rtw_xmit: no more pxmitframe\n")); DBG_88E("DBG_TX_DROP_FRAME %s no more pxmitframe\n", __func__); return -1; } res = update_attrib(padapter, *ppkt, &pxmitframe->attrib); if (res == _FAIL) { RT_TRACE(_module_xmit_osdep_c_, _drv_err_, ("rtw_xmit: update attrib fail\n")); rtw_free_xmitframe(pxmitpriv, pxmitframe); return -1; } pxmitframe->pkt = *ppkt; rtw_led_control(padapter, LED_CTL_TX); do_queue_select(padapter, &pxmitframe->attrib); #ifdef CONFIG_88EU_AP_MODE spin_lock_bh(&pxmitpriv->lock); if (xmitframe_enqueue_for_sleeping_sta(padapter, pxmitframe)) { spin_unlock_bh(&pxmitpriv->lock); return 1; } spin_unlock_bh(&pxmitpriv->lock); #endif if (rtw_hal_xmit(padapter, pxmitframe) == false) return 1; return 0; } #if defined(CONFIG_88EU_AP_MODE) int xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct xmit_frame *pxmitframe) { int ret = false; struct sta_info *psta = NULL; struct sta_priv *pstapriv = &padapter->stapriv; struct pkt_attrib *pattrib = &pxmitframe->attrib; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; int bmcst = IS_MCAST(pattrib->ra); if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == false) return ret; if (pattrib->psta) psta = pattrib->psta; else psta = rtw_get_stainfo(pstapriv, pattrib->ra); if (psta == NULL) return ret; if (pattrib->triggered == 1) { if (bmcst) pattrib->qsel = 0x11;/* HIQ */ return ret; } if (bmcst) { spin_lock_bh(&psta->sleep_q.lock); if (pstapriv->sta_dz_bitmap) {/* if any one sta is in ps mode */ list_del_init(&pxmitframe->list); list_add_tail(&pxmitframe->list, get_list_head(&psta->sleep_q)); psta->sleepq_len++; pstapriv->tim_bitmap |= BIT(0);/* */ pstapriv->sta_dz_bitmap |= BIT(0); update_beacon(padapter, _TIM_IE_, NULL, false);/* tx bc/mc packets after update bcn */ ret = true; } spin_unlock_bh(&psta->sleep_q.lock); return ret; } spin_lock_bh(&psta->sleep_q.lock); if (psta->state&WIFI_SLEEP_STATE) { u8 wmmps_ac = 0; if (pstapriv->sta_dz_bitmap&BIT(psta->aid)) { list_del_init(&pxmitframe->list); list_add_tail(&pxmitframe->list, get_list_head(&psta->sleep_q)); psta->sleepq_len++; switch (pattrib->priority) { case 1: case 2: wmmps_ac = psta->uapsd_bk&BIT(0); break; case 4: case 5: wmmps_ac = psta->uapsd_vi&BIT(0); break; case 6: case 7: wmmps_ac = psta->uapsd_vo&BIT(0); break; case 0: case 3: default: wmmps_ac = psta->uapsd_be&BIT(0); break; } if (wmmps_ac) psta->sleepq_ac_len++; if (((psta->has_legacy_ac) && (!wmmps_ac)) || ((!psta->has_legacy_ac) && (wmmps_ac))) { pstapriv->tim_bitmap |= BIT(psta->aid); if (psta->sleepq_len == 1) { /* update BCN for TIM IE */ update_beacon(padapter, _TIM_IE_, NULL, false); } } ret = true; } } spin_unlock_bh(&psta->sleep_q.lock); return ret; } static void dequeue_xmitframes_to_sleeping_queue(struct adapter *padapter, struct sta_info *psta, struct __queue *pframequeue) { struct list_head *plist, *phead; u8 ac_index; struct tx_servq *ptxservq; struct pkt_attrib *pattrib; struct xmit_frame *pxmitframe; struct hw_xmit *phwxmits = padapter->xmitpriv.hwxmits; phead = get_list_head(pframequeue); plist = phead->next; while (phead != plist) { pxmitframe = container_of(plist, struct xmit_frame, list); plist = plist->next; xmitframe_enqueue_for_sleeping_sta(padapter, pxmitframe); pattrib = &pxmitframe->attrib; ptxservq = rtw_get_sta_pending(padapter, psta, pattrib->priority, (u8 *)(&ac_index)); ptxservq->qcnt--; phwxmits[ac_index].accnt--; } } void stop_sta_xmit(struct adapter *padapter, struct sta_info *psta) { struct sta_info *psta_bmc; struct sta_xmit_priv *pstaxmitpriv; struct sta_priv *pstapriv = &padapter->stapriv; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; pstaxmitpriv = &psta->sta_xmitpriv; /* for BC/MC Frames */ psta_bmc = rtw_get_bcmc_stainfo(padapter); spin_lock_bh(&pxmitpriv->lock); psta->state |= WIFI_SLEEP_STATE; pstapriv->sta_dz_bitmap |= BIT(psta->aid); dequeue_xmitframes_to_sleeping_queue(padapter, psta, &pstaxmitpriv->vo_q.sta_pending); list_del_init(&(pstaxmitpriv->vo_q.tx_pending)); dequeue_xmitframes_to_sleeping_queue(padapter, psta, &pstaxmitpriv->vi_q.sta_pending); list_del_init(&(pstaxmitpriv->vi_q.tx_pending)); dequeue_xmitframes_to_sleeping_queue(padapter, psta, &pstaxmitpriv->be_q.sta_pending); list_del_init(&(pstaxmitpriv->be_q.tx_pending)); dequeue_xmitframes_to_sleeping_queue(padapter, psta, &pstaxmitpriv->bk_q.sta_pending); list_del_init(&(pstaxmitpriv->bk_q.tx_pending)); /* for BC/MC Frames */ pstaxmitpriv = &psta_bmc->sta_xmitpriv; dequeue_xmitframes_to_sleeping_queue(padapter, psta_bmc, &pstaxmitpriv->be_q.sta_pending); list_del_init(&(pstaxmitpriv->be_q.tx_pending)); spin_unlock_bh(&pxmitpriv->lock); } void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta) { u8 update_mask = 0, wmmps_ac = 0; struct sta_info *psta_bmc; struct list_head *xmitframe_plist, *xmitframe_phead; struct xmit_frame *pxmitframe = NULL; struct sta_priv *pstapriv = &padapter->stapriv; spin_lock_bh(&psta->sleep_q.lock); xmitframe_phead = get_list_head(&psta->sleep_q); xmitframe_plist = xmitframe_phead->next; while (xmitframe_phead != xmitframe_plist) { pxmitframe = container_of(xmitframe_plist, struct xmit_frame, list); xmitframe_plist = xmitframe_plist->next; list_del_init(&pxmitframe->list); switch (pxmitframe->attrib.priority) { case 1: case 2: wmmps_ac = psta->uapsd_bk&BIT(1); break; case 4: case 5: wmmps_ac = psta->uapsd_vi&BIT(1); break; case 6: case 7: wmmps_ac = psta->uapsd_vo&BIT(1); break; case 0: case 3: default: wmmps_ac = psta->uapsd_be&BIT(1); break; } psta->sleepq_len--; if (psta->sleepq_len > 0) pxmitframe->attrib.mdata = 1; else pxmitframe->attrib.mdata = 0; if (wmmps_ac) { psta->sleepq_ac_len--; if (psta->sleepq_ac_len > 0) { pxmitframe->attrib.mdata = 1; pxmitframe->attrib.eosp = 0; } else { pxmitframe->attrib.mdata = 0; pxmitframe->attrib.eosp = 1; } } pxmitframe->attrib.triggered = 1; spin_unlock_bh(&psta->sleep_q.lock); if (rtw_hal_xmit(padapter, pxmitframe)) rtw_os_xmit_complete(padapter, pxmitframe); spin_lock_bh(&psta->sleep_q.lock); } if (psta->sleepq_len == 0) { pstapriv->tim_bitmap &= ~BIT(psta->aid); update_mask = BIT(0); if (psta->state&WIFI_SLEEP_STATE) psta->state ^= WIFI_SLEEP_STATE; if (psta->state & WIFI_STA_ALIVE_CHK_STATE) { psta->expire_to = pstapriv->expire_to; psta->state ^= WIFI_STA_ALIVE_CHK_STATE; } pstapriv->sta_dz_bitmap &= ~BIT(psta->aid); } spin_unlock_bh(&psta->sleep_q.lock); /* for BC/MC Frames */ psta_bmc = rtw_get_bcmc_stainfo(padapter); if (!psta_bmc) return; if ((pstapriv->sta_dz_bitmap&0xfffe) == 0x0) { /* no any sta in ps mode */ spin_lock_bh(&psta_bmc->sleep_q.lock); xmitframe_phead = get_list_head(&psta_bmc->sleep_q); xmitframe_plist = xmitframe_phead->next; while (xmitframe_phead != xmitframe_plist) { pxmitframe = container_of(xmitframe_plist, struct xmit_frame, list); xmitframe_plist = xmitframe_plist->next; list_del_init(&pxmitframe->list); psta_bmc->sleepq_len--; if (psta_bmc->sleepq_len > 0) pxmitframe->attrib.mdata = 1; else pxmitframe->attrib.mdata = 0; pxmitframe->attrib.triggered = 1; spin_unlock_bh(&psta_bmc->sleep_q.lock); if (rtw_hal_xmit(padapter, pxmitframe)) rtw_os_xmit_complete(padapter, pxmitframe); spin_lock_bh(&psta_bmc->sleep_q.lock); } if (psta_bmc->sleepq_len == 0) { pstapriv->tim_bitmap &= ~BIT(0); pstapriv->sta_dz_bitmap &= ~BIT(0); update_mask |= BIT(1); } spin_unlock_bh(&psta_bmc->sleep_q.lock); } if (update_mask) update_beacon(padapter, _TIM_IE_, NULL, false); } void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *psta) { u8 wmmps_ac = 0; struct list_head *xmitframe_plist, *xmitframe_phead; struct xmit_frame *pxmitframe = NULL; struct sta_priv *pstapriv = &padapter->stapriv; spin_lock_bh(&psta->sleep_q.lock); xmitframe_phead = get_list_head(&psta->sleep_q); xmitframe_plist = xmitframe_phead->next; while (xmitframe_phead != xmitframe_plist) { pxmitframe = container_of(xmitframe_plist, struct xmit_frame, list); xmitframe_plist = xmitframe_plist->next; switch (pxmitframe->attrib.priority) { case 1: case 2: wmmps_ac = psta->uapsd_bk&BIT(1); break; case 4: case 5: wmmps_ac = psta->uapsd_vi&BIT(1); break; case 6: case 7: wmmps_ac = psta->uapsd_vo&BIT(1); break; case 0: case 3: default: wmmps_ac = psta->uapsd_be&BIT(1); break; } if (!wmmps_ac) continue; list_del_init(&pxmitframe->list); psta->sleepq_len--; psta->sleepq_ac_len--; if (psta->sleepq_ac_len > 0) { pxmitframe->attrib.mdata = 1; pxmitframe->attrib.eosp = 0; } else { pxmitframe->attrib.mdata = 0; pxmitframe->attrib.eosp = 1; } pxmitframe->attrib.triggered = 1; if (rtw_hal_xmit(padapter, pxmitframe) == true) rtw_os_xmit_complete(padapter, pxmitframe); if ((psta->sleepq_ac_len == 0) && (!psta->has_legacy_ac) && (wmmps_ac)) { pstapriv->tim_bitmap &= ~BIT(psta->aid); /* update BCN for TIM IE */ update_beacon(padapter, _TIM_IE_, NULL, false); } } spin_unlock_bh(&psta->sleep_q.lock); } #endif void rtw_sctx_init(struct submit_ctx *sctx, int timeout_ms) { sctx->timeout_ms = timeout_ms; sctx->submit_time = jiffies; init_completion(&sctx->done); sctx->status = RTW_SCTX_SUBMITTED; } int rtw_sctx_wait(struct submit_ctx *sctx) { int ret = _FAIL; unsigned long expire; int status = 0; expire = sctx->timeout_ms ? msecs_to_jiffies(sctx->timeout_ms) : MAX_SCHEDULE_TIMEOUT; if (!wait_for_completion_timeout(&sctx->done, expire)) { /* timeout, do something?? */ status = RTW_SCTX_DONE_TIMEOUT; DBG_88E("%s timeout\n", __func__); } else { status = sctx->status; } if (status == RTW_SCTX_DONE_SUCCESS) ret = _SUCCESS; return ret; } static bool rtw_sctx_chk_waring_status(int status) { switch (status) { case RTW_SCTX_DONE_UNKNOWN: case RTW_SCTX_DONE_BUF_ALLOC: case RTW_SCTX_DONE_BUF_FREE: case RTW_SCTX_DONE_DRV_STOP: case RTW_SCTX_DONE_DEV_REMOVE: return true; default: return false; } } void rtw_sctx_done_err(struct submit_ctx **sctx, int status) { if (*sctx) { if (rtw_sctx_chk_waring_status(status)) DBG_88E("%s status:%d\n", __func__, status); (*sctx)->status = status; complete(&((*sctx)->done)); *sctx = NULL; } } void rtw_sctx_done(struct submit_ctx **sctx) { rtw_sctx_done_err(sctx, RTW_SCTX_DONE_SUCCESS); } int rtw_ack_tx_wait(struct xmit_priv *pxmitpriv, u32 timeout_ms) { struct submit_ctx *pack_tx_ops = &pxmitpriv->ack_tx_ops; pack_tx_ops->submit_time = jiffies; pack_tx_ops->timeout_ms = timeout_ms; pack_tx_ops->status = RTW_SCTX_SUBMITTED; return rtw_sctx_wait(pack_tx_ops); } void rtw_ack_tx_done(struct xmit_priv *pxmitpriv, int status) { struct submit_ctx *pack_tx_ops = &pxmitpriv->ack_tx_ops; if (pxmitpriv->ack_tx) rtw_sctx_done_err(&pack_tx_ops, status); else DBG_88E("%s ack_tx not set\n", __func__); }
gpl-2.0
shabinp555/https-github.com-torvalds-linux
drivers/gpu/drm/radeon/atombios_crtc.c
658
72465
/* * Copyright 2007-8 Advanced Micro Devices, Inc. * Copyright 2008 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Dave Airlie * Alex Deucher */ #include <drm/drmP.h> #include <drm/drm_crtc_helper.h> #include <drm/radeon_drm.h> #include <drm/drm_fixed.h> #include "radeon.h" #include "atom.h" #include "atom-bits.h" static void atombios_overscan_setup(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); SET_CRTC_OVERSCAN_PS_ALLOCATION args; int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_OverScan); int a1, a2; memset(&args, 0, sizeof(args)); args.ucCRTC = radeon_crtc->crtc_id; switch (radeon_crtc->rmx_type) { case RMX_CENTER: args.usOverscanTop = cpu_to_le16((adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2); args.usOverscanBottom = cpu_to_le16((adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2); args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2); args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2); break; case RMX_ASPECT: a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay; a2 = adjusted_mode->crtc_vdisplay * mode->crtc_hdisplay; if (a1 > a2) { args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2); args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2); } else if (a2 > a1) { args.usOverscanTop = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2); args.usOverscanBottom = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2); } break; case RMX_FULL: default: args.usOverscanRight = cpu_to_le16(radeon_crtc->h_border); args.usOverscanLeft = cpu_to_le16(radeon_crtc->h_border); args.usOverscanBottom = cpu_to_le16(radeon_crtc->v_border); args.usOverscanTop = cpu_to_le16(radeon_crtc->v_border); break; } atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } static void atombios_scaler_setup(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); ENABLE_SCALER_PS_ALLOCATION args; int index = GetIndexIntoMasterTable(COMMAND, EnableScaler); struct radeon_encoder *radeon_encoder = to_radeon_encoder(radeon_crtc->encoder); /* fixme - fill in enc_priv for atom dac */ enum radeon_tv_std tv_std = TV_STD_NTSC; bool is_tv = false, is_cv = false; if (!ASIC_IS_AVIVO(rdev) && radeon_crtc->crtc_id) return; if (radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT) { struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv; tv_std = tv_dac->tv_std; is_tv = true; } memset(&args, 0, sizeof(args)); args.ucScaler = radeon_crtc->crtc_id; if (is_tv) { switch (tv_std) { case TV_STD_NTSC: default: args.ucTVStandard = ATOM_TV_NTSC; break; case TV_STD_PAL: args.ucTVStandard = ATOM_TV_PAL; break; case TV_STD_PAL_M: args.ucTVStandard = ATOM_TV_PALM; break; case TV_STD_PAL_60: args.ucTVStandard = ATOM_TV_PAL60; break; case TV_STD_NTSC_J: args.ucTVStandard = ATOM_TV_NTSCJ; break; case TV_STD_SCART_PAL: args.ucTVStandard = ATOM_TV_PAL; /* ??? */ break; case TV_STD_SECAM: args.ucTVStandard = ATOM_TV_SECAM; break; case TV_STD_PAL_CN: args.ucTVStandard = ATOM_TV_PALCN; break; } args.ucEnable = SCALER_ENABLE_MULTITAP_MODE; } else if (is_cv) { args.ucTVStandard = ATOM_TV_CV; args.ucEnable = SCALER_ENABLE_MULTITAP_MODE; } else { switch (radeon_crtc->rmx_type) { case RMX_FULL: args.ucEnable = ATOM_SCALER_EXPANSION; break; case RMX_CENTER: args.ucEnable = ATOM_SCALER_CENTER; break; case RMX_ASPECT: args.ucEnable = ATOM_SCALER_EXPANSION; break; default: if (ASIC_IS_AVIVO(rdev)) args.ucEnable = ATOM_SCALER_DISABLE; else args.ucEnable = ATOM_SCALER_CENTER; break; } } atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); if ((is_tv || is_cv) && rdev->family >= CHIP_RV515 && rdev->family <= CHIP_R580) { atom_rv515_force_tv_scaler(rdev, radeon_crtc); } } static void atombios_lock_crtc(struct drm_crtc *crtc, int lock) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; int index = GetIndexIntoMasterTable(COMMAND, UpdateCRTC_DoubleBufferRegisters); ENABLE_CRTC_PS_ALLOCATION args; memset(&args, 0, sizeof(args)); args.ucCRTC = radeon_crtc->crtc_id; args.ucEnable = lock; atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } static void atombios_enable_crtc(struct drm_crtc *crtc, int state) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; int index = GetIndexIntoMasterTable(COMMAND, EnableCRTC); ENABLE_CRTC_PS_ALLOCATION args; memset(&args, 0, sizeof(args)); args.ucCRTC = radeon_crtc->crtc_id; args.ucEnable = state; atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } static void atombios_enable_crtc_memreq(struct drm_crtc *crtc, int state) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; int index = GetIndexIntoMasterTable(COMMAND, EnableCRTCMemReq); ENABLE_CRTC_PS_ALLOCATION args; memset(&args, 0, sizeof(args)); args.ucCRTC = radeon_crtc->crtc_id; args.ucEnable = state; atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } static const u32 vga_control_regs[6] = { AVIVO_D1VGA_CONTROL, AVIVO_D2VGA_CONTROL, EVERGREEN_D3VGA_CONTROL, EVERGREEN_D4VGA_CONTROL, EVERGREEN_D5VGA_CONTROL, EVERGREEN_D6VGA_CONTROL, }; static void atombios_blank_crtc(struct drm_crtc *crtc, int state) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; int index = GetIndexIntoMasterTable(COMMAND, BlankCRTC); BLANK_CRTC_PS_ALLOCATION args; u32 vga_control = 0; memset(&args, 0, sizeof(args)); if (ASIC_IS_DCE8(rdev)) { vga_control = RREG32(vga_control_regs[radeon_crtc->crtc_id]); WREG32(vga_control_regs[radeon_crtc->crtc_id], vga_control | 1); } args.ucCRTC = radeon_crtc->crtc_id; args.ucBlanking = state; atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); if (ASIC_IS_DCE8(rdev)) { WREG32(vga_control_regs[radeon_crtc->crtc_id], vga_control); } } static void atombios_powergate_crtc(struct drm_crtc *crtc, int state) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating); ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args; memset(&args, 0, sizeof(args)); args.ucDispPipeId = radeon_crtc->crtc_id; args.ucEnable = state; atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) { struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); switch (mode) { case DRM_MODE_DPMS_ON: radeon_crtc->enabled = true; atombios_enable_crtc(crtc, ATOM_ENABLE); if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev)) atombios_enable_crtc_memreq(crtc, ATOM_ENABLE); atombios_blank_crtc(crtc, ATOM_DISABLE); drm_vblank_post_modeset(dev, radeon_crtc->crtc_id); radeon_crtc_load_lut(crtc); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id); if (radeon_crtc->enabled) atombios_blank_crtc(crtc, ATOM_ENABLE); if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev)) atombios_enable_crtc_memreq(crtc, ATOM_DISABLE); atombios_enable_crtc(crtc, ATOM_DISABLE); radeon_crtc->enabled = false; break; } /* adjust pm to dpms */ radeon_pm_compute_clocks(rdev); } static void atombios_set_crtc_dtd_timing(struct drm_crtc *crtc, struct drm_display_mode *mode) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; SET_CRTC_USING_DTD_TIMING_PARAMETERS args; int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_UsingDTDTiming); u16 misc = 0; memset(&args, 0, sizeof(args)); args.usH_Size = cpu_to_le16(mode->crtc_hdisplay - (radeon_crtc->h_border * 2)); args.usH_Blanking_Time = cpu_to_le16(mode->crtc_hblank_end - mode->crtc_hdisplay + (radeon_crtc->h_border * 2)); args.usV_Size = cpu_to_le16(mode->crtc_vdisplay - (radeon_crtc->v_border * 2)); args.usV_Blanking_Time = cpu_to_le16(mode->crtc_vblank_end - mode->crtc_vdisplay + (radeon_crtc->v_border * 2)); args.usH_SyncOffset = cpu_to_le16(mode->crtc_hsync_start - mode->crtc_hdisplay + radeon_crtc->h_border); args.usH_SyncWidth = cpu_to_le16(mode->crtc_hsync_end - mode->crtc_hsync_start); args.usV_SyncOffset = cpu_to_le16(mode->crtc_vsync_start - mode->crtc_vdisplay + radeon_crtc->v_border); args.usV_SyncWidth = cpu_to_le16(mode->crtc_vsync_end - mode->crtc_vsync_start); args.ucH_Border = radeon_crtc->h_border; args.ucV_Border = radeon_crtc->v_border; if (mode->flags & DRM_MODE_FLAG_NVSYNC) misc |= ATOM_VSYNC_POLARITY; if (mode->flags & DRM_MODE_FLAG_NHSYNC) misc |= ATOM_HSYNC_POLARITY; if (mode->flags & DRM_MODE_FLAG_CSYNC) misc |= ATOM_COMPOSITESYNC; if (mode->flags & DRM_MODE_FLAG_INTERLACE) misc |= ATOM_INTERLACE; if (mode->flags & DRM_MODE_FLAG_DBLCLK) misc |= ATOM_DOUBLE_CLOCK_MODE; if (mode->flags & DRM_MODE_FLAG_DBLSCAN) misc |= ATOM_H_REPLICATIONBY2 | ATOM_V_REPLICATIONBY2; args.susModeMiscInfo.usAccess = cpu_to_le16(misc); args.ucCRTC = radeon_crtc->crtc_id; atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } static void atombios_crtc_set_timing(struct drm_crtc *crtc, struct drm_display_mode *mode) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION args; int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_Timing); u16 misc = 0; memset(&args, 0, sizeof(args)); args.usH_Total = cpu_to_le16(mode->crtc_htotal); args.usH_Disp = cpu_to_le16(mode->crtc_hdisplay); args.usH_SyncStart = cpu_to_le16(mode->crtc_hsync_start); args.usH_SyncWidth = cpu_to_le16(mode->crtc_hsync_end - mode->crtc_hsync_start); args.usV_Total = cpu_to_le16(mode->crtc_vtotal); args.usV_Disp = cpu_to_le16(mode->crtc_vdisplay); args.usV_SyncStart = cpu_to_le16(mode->crtc_vsync_start); args.usV_SyncWidth = cpu_to_le16(mode->crtc_vsync_end - mode->crtc_vsync_start); args.ucOverscanRight = radeon_crtc->h_border; args.ucOverscanLeft = radeon_crtc->h_border; args.ucOverscanBottom = radeon_crtc->v_border; args.ucOverscanTop = radeon_crtc->v_border; if (mode->flags & DRM_MODE_FLAG_NVSYNC) misc |= ATOM_VSYNC_POLARITY; if (mode->flags & DRM_MODE_FLAG_NHSYNC) misc |= ATOM_HSYNC_POLARITY; if (mode->flags & DRM_MODE_FLAG_CSYNC) misc |= ATOM_COMPOSITESYNC; if (mode->flags & DRM_MODE_FLAG_INTERLACE) misc |= ATOM_INTERLACE; if (mode->flags & DRM_MODE_FLAG_DBLCLK) misc |= ATOM_DOUBLE_CLOCK_MODE; if (mode->flags & DRM_MODE_FLAG_DBLSCAN) misc |= ATOM_H_REPLICATIONBY2 | ATOM_V_REPLICATIONBY2; args.susModeMiscInfo.usAccess = cpu_to_le16(misc); args.ucCRTC = radeon_crtc->crtc_id; atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } static void atombios_disable_ss(struct radeon_device *rdev, int pll_id) { u32 ss_cntl; if (ASIC_IS_DCE4(rdev)) { switch (pll_id) { case ATOM_PPLL1: ss_cntl = RREG32(EVERGREEN_P1PLL_SS_CNTL); ss_cntl &= ~EVERGREEN_PxPLL_SS_EN; WREG32(EVERGREEN_P1PLL_SS_CNTL, ss_cntl); break; case ATOM_PPLL2: ss_cntl = RREG32(EVERGREEN_P2PLL_SS_CNTL); ss_cntl &= ~EVERGREEN_PxPLL_SS_EN; WREG32(EVERGREEN_P2PLL_SS_CNTL, ss_cntl); break; case ATOM_DCPLL: case ATOM_PPLL_INVALID: return; } } else if (ASIC_IS_AVIVO(rdev)) { switch (pll_id) { case ATOM_PPLL1: ss_cntl = RREG32(AVIVO_P1PLL_INT_SS_CNTL); ss_cntl &= ~1; WREG32(AVIVO_P1PLL_INT_SS_CNTL, ss_cntl); break; case ATOM_PPLL2: ss_cntl = RREG32(AVIVO_P2PLL_INT_SS_CNTL); ss_cntl &= ~1; WREG32(AVIVO_P2PLL_INT_SS_CNTL, ss_cntl); break; case ATOM_DCPLL: case ATOM_PPLL_INVALID: return; } } } union atom_enable_ss { ENABLE_LVDS_SS_PARAMETERS lvds_ss; ENABLE_LVDS_SS_PARAMETERS_V2 lvds_ss_2; ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION v1; ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2 v2; ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3 v3; }; static void atombios_crtc_program_ss(struct radeon_device *rdev, int enable, int pll_id, int crtc_id, struct radeon_atom_ss *ss) { unsigned i; int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL); union atom_enable_ss args; if (enable) { /* Don't mess with SS if percentage is 0 or external ss. * SS is already disabled previously, and disabling it * again can cause display problems if the pll is already * programmed. */ if (ss->percentage == 0) return; if (ss->type & ATOM_EXTERNAL_SS_MASK) return; } else { for (i = 0; i < rdev->num_crtc; i++) { if (rdev->mode_info.crtcs[i] && rdev->mode_info.crtcs[i]->enabled && i != crtc_id && pll_id == rdev->mode_info.crtcs[i]->pll_id) { /* one other crtc is using this pll don't turn * off spread spectrum as it might turn off * display on active crtc */ return; } } } memset(&args, 0, sizeof(args)); if (ASIC_IS_DCE5(rdev)) { args.v3.usSpreadSpectrumAmountFrac = cpu_to_le16(0); args.v3.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK; switch (pll_id) { case ATOM_PPLL1: args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P1PLL; break; case ATOM_PPLL2: args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P2PLL; break; case ATOM_DCPLL: args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_DCPLL; break; case ATOM_PPLL_INVALID: return; } args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount); args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step); args.v3.ucEnable = enable; } else if (ASIC_IS_DCE4(rdev)) { args.v2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); args.v2.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK; switch (pll_id) { case ATOM_PPLL1: args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P1PLL; break; case ATOM_PPLL2: args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P2PLL; break; case ATOM_DCPLL: args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_DCPLL; break; case ATOM_PPLL_INVALID: return; } args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount); args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step); args.v2.ucEnable = enable; } else if (ASIC_IS_DCE3(rdev)) { args.v1.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); args.v1.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK; args.v1.ucSpreadSpectrumStep = ss->step; args.v1.ucSpreadSpectrumDelay = ss->delay; args.v1.ucSpreadSpectrumRange = ss->range; args.v1.ucPpll = pll_id; args.v1.ucEnable = enable; } else if (ASIC_IS_AVIVO(rdev)) { if ((enable == ATOM_DISABLE) || (ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK)) { atombios_disable_ss(rdev, pll_id); return; } args.lvds_ss_2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); args.lvds_ss_2.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK; args.lvds_ss_2.ucSpreadSpectrumStep = ss->step; args.lvds_ss_2.ucSpreadSpectrumDelay = ss->delay; args.lvds_ss_2.ucSpreadSpectrumRange = ss->range; args.lvds_ss_2.ucEnable = enable; } else { if (enable == ATOM_DISABLE) { atombios_disable_ss(rdev, pll_id); return; } args.lvds_ss.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); args.lvds_ss.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK; args.lvds_ss.ucSpreadSpectrumStepSize_Delay = (ss->step & 3) << 2; args.lvds_ss.ucSpreadSpectrumStepSize_Delay |= (ss->delay & 7) << 4; args.lvds_ss.ucEnable = enable; } atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } union adjust_pixel_clock { ADJUST_DISPLAY_PLL_PS_ALLOCATION v1; ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3 v3; }; static u32 atombios_adjust_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; struct drm_encoder *encoder = radeon_crtc->encoder; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); u32 adjusted_clock = mode->clock; int encoder_mode = atombios_get_encoder_mode(encoder); u32 dp_clock = mode->clock; u32 clock = mode->clock; int bpc = radeon_crtc->bpc; bool is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock); /* reset the pll flags */ radeon_crtc->pll_flags = 0; if (ASIC_IS_AVIVO(rdev)) { if ((rdev->family == CHIP_RS600) || (rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) radeon_crtc->pll_flags |= (/*RADEON_PLL_USE_FRAC_FB_DIV |*/ RADEON_PLL_PREFER_CLOSEST_LOWER); if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */ radeon_crtc->pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; else radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV; if (rdev->family < CHIP_RV770) radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; /* use frac fb div on APUs */ if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev) || ASIC_IS_DCE8(rdev)) radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; /* use frac fb div on RS780/RS880 */ if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880)) radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; if (ASIC_IS_DCE32(rdev) && mode->clock > 165000) radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; } else { radeon_crtc->pll_flags |= RADEON_PLL_LEGACY; if (mode->clock > 200000) /* range limits??? */ radeon_crtc->pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; else radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV; } if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) || (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) { if (connector) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; dp_clock = dig_connector->dp_clock; } } if (radeon_encoder->is_mst_encoder) { struct radeon_encoder_mst *mst_enc = radeon_encoder->enc_priv; struct radeon_connector_atom_dig *dig_connector = mst_enc->connector->con_priv; dp_clock = dig_connector->dp_clock; } /* use recommended ref_div for ss */ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { if (radeon_crtc->ss_enabled) { if (radeon_crtc->ss.refdiv) { radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV; radeon_crtc->pll_reference_div = radeon_crtc->ss.refdiv; if (ASIC_IS_AVIVO(rdev)) radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; } } } if (ASIC_IS_AVIVO(rdev)) { /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) adjusted_clock = mode->clock * 2; if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) radeon_crtc->pll_flags |= RADEON_PLL_PREFER_CLOSEST_LOWER; if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) radeon_crtc->pll_flags |= RADEON_PLL_IS_LCD; } else { if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) radeon_crtc->pll_flags |= RADEON_PLL_NO_ODD_POST_DIV; if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV; } /* adjust pll for deep color modes */ if (encoder_mode == ATOM_ENCODER_MODE_HDMI) { switch (bpc) { case 8: default: break; case 10: clock = (clock * 5) / 4; break; case 12: clock = (clock * 3) / 2; break; case 16: clock = clock * 2; break; } } /* DCE3+ has an AdjustDisplayPll that will adjust the pixel clock * accordingly based on the encoder/transmitter to work around * special hw requirements. */ if (ASIC_IS_DCE3(rdev)) { union adjust_pixel_clock args; u8 frev, crev; int index; index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll); if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) return adjusted_clock; memset(&args, 0, sizeof(args)); switch (frev) { case 1: switch (crev) { case 1: case 2: args.v1.usPixelClock = cpu_to_le16(clock / 10); args.v1.ucTransmitterID = radeon_encoder->encoder_id; args.v1.ucEncodeMode = encoder_mode; if (radeon_crtc->ss_enabled && radeon_crtc->ss.percentage) args.v1.ucConfig |= ADJUST_DISPLAY_CONFIG_SS_ENABLE; atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); adjusted_clock = le16_to_cpu(args.v1.usPixelClock) * 10; break; case 3: args.v3.sInput.usPixelClock = cpu_to_le16(clock / 10); args.v3.sInput.ucTransmitterID = radeon_encoder->encoder_id; args.v3.sInput.ucEncodeMode = encoder_mode; args.v3.sInput.ucDispPllConfig = 0; if (radeon_crtc->ss_enabled && radeon_crtc->ss.percentage) args.v3.sInput.ucDispPllConfig |= DISPPLL_CONFIG_SS_ENABLE; if (ENCODER_MODE_IS_DP(encoder_mode)) { args.v3.sInput.ucDispPllConfig |= DISPPLL_CONFIG_COHERENT_MODE; /* 16200 or 27000 */ args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10); } else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; if (dig->coherent_mode) args.v3.sInput.ucDispPllConfig |= DISPPLL_CONFIG_COHERENT_MODE; if (is_duallink) args.v3.sInput.ucDispPllConfig |= DISPPLL_CONFIG_DUAL_LINK; } if (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE) args.v3.sInput.ucExtTransmitterID = radeon_encoder_get_dp_bridge_encoder_id(encoder); else args.v3.sInput.ucExtTransmitterID = 0; atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10; if (args.v3.sOutput.ucRefDiv) { radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV; radeon_crtc->pll_reference_div = args.v3.sOutput.ucRefDiv; } if (args.v3.sOutput.ucPostDiv) { radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; radeon_crtc->pll_flags |= RADEON_PLL_USE_POST_DIV; radeon_crtc->pll_post_div = args.v3.sOutput.ucPostDiv; } break; default: DRM_ERROR("Unknown table version %d %d\n", frev, crev); return adjusted_clock; } break; default: DRM_ERROR("Unknown table version %d %d\n", frev, crev); return adjusted_clock; } } return adjusted_clock; } union set_pixel_clock { SET_PIXEL_CLOCK_PS_ALLOCATION base; PIXEL_CLOCK_PARAMETERS v1; PIXEL_CLOCK_PARAMETERS_V2 v2; PIXEL_CLOCK_PARAMETERS_V3 v3; PIXEL_CLOCK_PARAMETERS_V5 v5; PIXEL_CLOCK_PARAMETERS_V6 v6; }; /* on DCE5, make sure the voltage is high enough to support the * required disp clk. */ static void atombios_crtc_set_disp_eng_pll(struct radeon_device *rdev, u32 dispclk) { u8 frev, crev; int index; union set_pixel_clock args; memset(&args, 0, sizeof(args)); index = GetIndexIntoMasterTable(COMMAND, SetPixelClock); if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) return; switch (frev) { case 1: switch (crev) { case 5: /* if the default dcpll clock is specified, * SetPixelClock provides the dividers */ args.v5.ucCRTC = ATOM_CRTC_INVALID; args.v5.usPixelClock = cpu_to_le16(dispclk); args.v5.ucPpll = ATOM_DCPLL; break; case 6: /* if the default dcpll clock is specified, * SetPixelClock provides the dividers */ args.v6.ulDispEngClkFreq = cpu_to_le32(dispclk); if (ASIC_IS_DCE61(rdev) || ASIC_IS_DCE8(rdev)) args.v6.ucPpll = ATOM_EXT_PLL1; else if (ASIC_IS_DCE6(rdev)) args.v6.ucPpll = ATOM_PPLL0; else args.v6.ucPpll = ATOM_DCPLL; break; default: DRM_ERROR("Unknown table version %d %d\n", frev, crev); return; } break; default: DRM_ERROR("Unknown table version %d %d\n", frev, crev); return; } atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } static void atombios_crtc_program_pll(struct drm_crtc *crtc, u32 crtc_id, int pll_id, u32 encoder_mode, u32 encoder_id, u32 clock, u32 ref_div, u32 fb_div, u32 frac_fb_div, u32 post_div, int bpc, bool ss_enabled, struct radeon_atom_ss *ss) { struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; u8 frev, crev; int index = GetIndexIntoMasterTable(COMMAND, SetPixelClock); union set_pixel_clock args; memset(&args, 0, sizeof(args)); if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) return; switch (frev) { case 1: switch (crev) { case 1: if (clock == ATOM_DISABLE) return; args.v1.usPixelClock = cpu_to_le16(clock / 10); args.v1.usRefDiv = cpu_to_le16(ref_div); args.v1.usFbDiv = cpu_to_le16(fb_div); args.v1.ucFracFbDiv = frac_fb_div; args.v1.ucPostDiv = post_div; args.v1.ucPpll = pll_id; args.v1.ucCRTC = crtc_id; args.v1.ucRefDivSrc = 1; break; case 2: args.v2.usPixelClock = cpu_to_le16(clock / 10); args.v2.usRefDiv = cpu_to_le16(ref_div); args.v2.usFbDiv = cpu_to_le16(fb_div); args.v2.ucFracFbDiv = frac_fb_div; args.v2.ucPostDiv = post_div; args.v2.ucPpll = pll_id; args.v2.ucCRTC = crtc_id; args.v2.ucRefDivSrc = 1; break; case 3: args.v3.usPixelClock = cpu_to_le16(clock / 10); args.v3.usRefDiv = cpu_to_le16(ref_div); args.v3.usFbDiv = cpu_to_le16(fb_div); args.v3.ucFracFbDiv = frac_fb_div; args.v3.ucPostDiv = post_div; args.v3.ucPpll = pll_id; if (crtc_id == ATOM_CRTC2) args.v3.ucMiscInfo = PIXEL_CLOCK_MISC_CRTC_SEL_CRTC2; else args.v3.ucMiscInfo = PIXEL_CLOCK_MISC_CRTC_SEL_CRTC1; if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK)) args.v3.ucMiscInfo |= PIXEL_CLOCK_MISC_REF_DIV_SRC; args.v3.ucTransmitterId = encoder_id; args.v3.ucEncoderMode = encoder_mode; break; case 5: args.v5.ucCRTC = crtc_id; args.v5.usPixelClock = cpu_to_le16(clock / 10); args.v5.ucRefDiv = ref_div; args.v5.usFbDiv = cpu_to_le16(fb_div); args.v5.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000); args.v5.ucPostDiv = post_div; args.v5.ucMiscInfo = 0; /* HDMI depth, etc. */ if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK)) args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_REF_DIV_SRC; if (encoder_mode == ATOM_ENCODER_MODE_HDMI) { switch (bpc) { case 8: default: args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_24BPP; break; case 10: /* yes this is correct, the atom define is wrong */ args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_32BPP; break; case 12: /* yes this is correct, the atom define is wrong */ args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_30BPP; break; } } args.v5.ucTransmitterID = encoder_id; args.v5.ucEncoderMode = encoder_mode; args.v5.ucPpll = pll_id; break; case 6: args.v6.ulDispEngClkFreq = cpu_to_le32(crtc_id << 24 | clock / 10); args.v6.ucRefDiv = ref_div; args.v6.usFbDiv = cpu_to_le16(fb_div); args.v6.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000); args.v6.ucPostDiv = post_div; args.v6.ucMiscInfo = 0; /* HDMI depth, etc. */ if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK)) args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_REF_DIV_SRC; if (encoder_mode == ATOM_ENCODER_MODE_HDMI) { switch (bpc) { case 8: default: args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_24BPP; break; case 10: args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP_V6; break; case 12: args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP_V6; break; case 16: args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_48BPP; break; } } args.v6.ucTransmitterID = encoder_id; args.v6.ucEncoderMode = encoder_mode; args.v6.ucPpll = pll_id; break; default: DRM_ERROR("Unknown table version %d %d\n", frev, crev); return; } break; default: DRM_ERROR("Unknown table version %d %d\n", frev, crev); return; } atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } static bool atombios_crtc_prepare_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(radeon_crtc->encoder); int encoder_mode = atombios_get_encoder_mode(radeon_crtc->encoder); radeon_crtc->bpc = 8; radeon_crtc->ss_enabled = false; if (radeon_encoder->is_mst_encoder) { radeon_dp_mst_prepare_pll(crtc, mode); } else if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) || (radeon_encoder_get_dp_bridge_encoder_id(radeon_crtc->encoder) != ENCODER_OBJECT_ID_NONE)) { struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; struct drm_connector *connector = radeon_get_connector_for_encoder(radeon_crtc->encoder); struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; int dp_clock; /* Assign mode clock for hdmi deep color max clock limit check */ radeon_connector->pixelclock_for_modeset = mode->clock; radeon_crtc->bpc = radeon_get_monitor_bpc(connector); switch (encoder_mode) { case ATOM_ENCODER_MODE_DP_MST: case ATOM_ENCODER_MODE_DP: /* DP/eDP */ dp_clock = dig_connector->dp_clock / 10; if (ASIC_IS_DCE4(rdev)) radeon_crtc->ss_enabled = radeon_atombios_get_asic_ss_info(rdev, &radeon_crtc->ss, ASIC_INTERNAL_SS_ON_DP, dp_clock); else { if (dp_clock == 16200) { radeon_crtc->ss_enabled = radeon_atombios_get_ppll_ss_info(rdev, &radeon_crtc->ss, ATOM_DP_SS_ID2); if (!radeon_crtc->ss_enabled) radeon_crtc->ss_enabled = radeon_atombios_get_ppll_ss_info(rdev, &radeon_crtc->ss, ATOM_DP_SS_ID1); } else { radeon_crtc->ss_enabled = radeon_atombios_get_ppll_ss_info(rdev, &radeon_crtc->ss, ATOM_DP_SS_ID1); } /* disable spread spectrum on DCE3 DP */ radeon_crtc->ss_enabled = false; } break; case ATOM_ENCODER_MODE_LVDS: if (ASIC_IS_DCE4(rdev)) radeon_crtc->ss_enabled = radeon_atombios_get_asic_ss_info(rdev, &radeon_crtc->ss, dig->lcd_ss_id, mode->clock / 10); else radeon_crtc->ss_enabled = radeon_atombios_get_ppll_ss_info(rdev, &radeon_crtc->ss, dig->lcd_ss_id); break; case ATOM_ENCODER_MODE_DVI: if (ASIC_IS_DCE4(rdev)) radeon_crtc->ss_enabled = radeon_atombios_get_asic_ss_info(rdev, &radeon_crtc->ss, ASIC_INTERNAL_SS_ON_TMDS, mode->clock / 10); break; case ATOM_ENCODER_MODE_HDMI: if (ASIC_IS_DCE4(rdev)) radeon_crtc->ss_enabled = radeon_atombios_get_asic_ss_info(rdev, &radeon_crtc->ss, ASIC_INTERNAL_SS_ON_HDMI, mode->clock / 10); break; default: break; } } /* adjust pixel clock as needed */ radeon_crtc->adjusted_clock = atombios_adjust_pll(crtc, mode); return true; } static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(radeon_crtc->encoder); u32 pll_clock = mode->clock; u32 clock = mode->clock; u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0; struct radeon_pll *pll; int encoder_mode = atombios_get_encoder_mode(radeon_crtc->encoder); /* pass the actual clock to atombios_crtc_program_pll for DCE5,6 for HDMI */ if (ASIC_IS_DCE5(rdev) && (encoder_mode == ATOM_ENCODER_MODE_HDMI) && (radeon_crtc->bpc > 8)) clock = radeon_crtc->adjusted_clock; switch (radeon_crtc->pll_id) { case ATOM_PPLL1: pll = &rdev->clock.p1pll; break; case ATOM_PPLL2: pll = &rdev->clock.p2pll; break; case ATOM_DCPLL: case ATOM_PPLL_INVALID: default: pll = &rdev->clock.dcpll; break; } /* update pll params */ pll->flags = radeon_crtc->pll_flags; pll->reference_div = radeon_crtc->pll_reference_div; pll->post_div = radeon_crtc->pll_post_div; if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) /* TV seems to prefer the legacy algo on some boards */ radeon_compute_pll_legacy(pll, radeon_crtc->adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, &ref_div, &post_div); else if (ASIC_IS_AVIVO(rdev)) radeon_compute_pll_avivo(pll, radeon_crtc->adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, &ref_div, &post_div); else radeon_compute_pll_legacy(pll, radeon_crtc->adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, &ref_div, &post_div); atombios_crtc_program_ss(rdev, ATOM_DISABLE, radeon_crtc->pll_id, radeon_crtc->crtc_id, &radeon_crtc->ss); atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id, encoder_mode, radeon_encoder->encoder_id, clock, ref_div, fb_div, frac_fb_div, post_div, radeon_crtc->bpc, radeon_crtc->ss_enabled, &radeon_crtc->ss); if (radeon_crtc->ss_enabled) { /* calculate ss amount and step size */ if (ASIC_IS_DCE4(rdev)) { u32 step_size; u32 amount = (((fb_div * 10) + frac_fb_div) * (u32)radeon_crtc->ss.percentage) / (100 * (u32)radeon_crtc->ss.percentage_divider); radeon_crtc->ss.amount = (amount / 10) & ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK; radeon_crtc->ss.amount |= ((amount - (amount / 10)) << ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT) & ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK; if (radeon_crtc->ss.type & ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD) step_size = (4 * amount * ref_div * ((u32)radeon_crtc->ss.rate * 2048)) / (125 * 25 * pll->reference_freq / 100); else step_size = (2 * amount * ref_div * ((u32)radeon_crtc->ss.rate * 2048)) / (125 * 25 * pll->reference_freq / 100); radeon_crtc->ss.step = step_size; } atombios_crtc_program_ss(rdev, ATOM_ENABLE, radeon_crtc->pll_id, radeon_crtc->crtc_id, &radeon_crtc->ss); } } static int dce4_crtc_do_set_base(struct drm_crtc *crtc, struct drm_framebuffer *fb, int x, int y, int atomic) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_framebuffer *radeon_fb; struct drm_framebuffer *target_fb; struct drm_gem_object *obj; struct radeon_bo *rbo; uint64_t fb_location; uint32_t fb_format, fb_pitch_pixels, tiling_flags; unsigned bankw, bankh, mtaspect, tile_split; u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE); u32 tmp, viewport_w, viewport_h; int r; bool bypass_lut = false; /* no fb bound */ if (!atomic && !crtc->primary->fb) { DRM_DEBUG_KMS("No FB bound\n"); return 0; } if (atomic) { radeon_fb = to_radeon_framebuffer(fb); target_fb = fb; } else { radeon_fb = to_radeon_framebuffer(crtc->primary->fb); target_fb = crtc->primary->fb; } /* If atomic, assume fb object is pinned & idle & fenced and * just update base pointers */ obj = radeon_fb->obj; rbo = gem_to_radeon_bo(obj); r = radeon_bo_reserve(rbo, false); if (unlikely(r != 0)) return r; if (atomic) fb_location = radeon_bo_gpu_offset(rbo); else { r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location); if (unlikely(r != 0)) { radeon_bo_unreserve(rbo); return -EINVAL; } } radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL); radeon_bo_unreserve(rbo); switch (target_fb->pixel_format) { case DRM_FORMAT_C8: fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_8BPP) | EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_INDEXED)); break; case DRM_FORMAT_XRGB4444: case DRM_FORMAT_ARGB4444: fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) | EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB4444)); #ifdef __BIG_ENDIAN fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16); #endif break; case DRM_FORMAT_XRGB1555: case DRM_FORMAT_ARGB1555: fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) | EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB1555)); #ifdef __BIG_ENDIAN fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16); #endif break; case DRM_FORMAT_BGRX5551: case DRM_FORMAT_BGRA5551: fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) | EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_BGRA5551)); #ifdef __BIG_ENDIAN fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16); #endif break; case DRM_FORMAT_RGB565: fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) | EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB565)); #ifdef __BIG_ENDIAN fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16); #endif break; case DRM_FORMAT_XRGB8888: case DRM_FORMAT_ARGB8888: fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) | EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB8888)); #ifdef __BIG_ENDIAN fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32); #endif break; case DRM_FORMAT_XRGB2101010: case DRM_FORMAT_ARGB2101010: fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) | EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB2101010)); #ifdef __BIG_ENDIAN fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32); #endif /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */ bypass_lut = true; break; case DRM_FORMAT_BGRX1010102: case DRM_FORMAT_BGRA1010102: fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) | EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_BGRA1010102)); #ifdef __BIG_ENDIAN fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32); #endif /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */ bypass_lut = true; break; default: DRM_ERROR("Unsupported screen format %s\n", drm_get_format_name(target_fb->pixel_format)); return -EINVAL; } if (tiling_flags & RADEON_TILING_MACRO) { evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split); /* Set NUM_BANKS. */ if (rdev->family >= CHIP_TAHITI) { unsigned index, num_banks; if (rdev->family >= CHIP_BONAIRE) { unsigned tileb, tile_split_bytes; /* Calculate the macrotile mode index. */ tile_split_bytes = 64 << tile_split; tileb = 8 * 8 * target_fb->bits_per_pixel / 8; tileb = min(tile_split_bytes, tileb); for (index = 0; tileb > 64; index++) tileb >>= 1; if (index >= 16) { DRM_ERROR("Wrong screen bpp (%u) or tile split (%u)\n", target_fb->bits_per_pixel, tile_split); return -EINVAL; } num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3; } else { switch (target_fb->bits_per_pixel) { case 8: index = 10; break; case 16: index = SI_TILE_MODE_COLOR_2D_SCANOUT_16BPP; break; default: case 32: index = SI_TILE_MODE_COLOR_2D_SCANOUT_32BPP; break; } num_banks = (rdev->config.si.tile_mode_array[index] >> 20) & 0x3; } fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks); } else { /* NI and older. */ if (rdev->family >= CHIP_CAYMAN) tmp = rdev->config.cayman.tile_config; else tmp = rdev->config.evergreen.tile_config; switch ((tmp & 0xf0) >> 4) { case 0: /* 4 banks */ fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK); break; case 1: /* 8 banks */ default: fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK); break; case 2: /* 16 banks */ fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK); break; } } fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1); fb_format |= EVERGREEN_GRPH_TILE_SPLIT(tile_split); fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw); fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh); fb_format |= EVERGREEN_GRPH_MACRO_TILE_ASPECT(mtaspect); if (rdev->family >= CHIP_BONAIRE) { /* XXX need to know more about the surface tiling mode */ fb_format |= CIK_GRPH_MICRO_TILE_MODE(CIK_DISPLAY_MICRO_TILING); } } else if (tiling_flags & RADEON_TILING_MICRO) fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1); if (rdev->family >= CHIP_BONAIRE) { /* Read the pipe config from the 2D TILED SCANOUT mode. * It should be the same for the other modes too, but not all * modes set the pipe config field. */ u32 pipe_config = (rdev->config.cik.tile_mode_array[10] >> 6) & 0x1f; fb_format |= CIK_GRPH_PIPE_CONFIG(pipe_config); } else if ((rdev->family == CHIP_TAHITI) || (rdev->family == CHIP_PITCAIRN)) fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16); else if ((rdev->family == CHIP_VERDE) || (rdev->family == CHIP_OLAND) || (rdev->family == CHIP_HAINAN)) /* for completeness. HAINAN has no display hw */ fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P4_8x16); switch (radeon_crtc->crtc_id) { case 0: WREG32(AVIVO_D1VGA_CONTROL, 0); break; case 1: WREG32(AVIVO_D2VGA_CONTROL, 0); break; case 2: WREG32(EVERGREEN_D3VGA_CONTROL, 0); break; case 3: WREG32(EVERGREEN_D4VGA_CONTROL, 0); break; case 4: WREG32(EVERGREEN_D5VGA_CONTROL, 0); break; case 5: WREG32(EVERGREEN_D6VGA_CONTROL, 0); break; default: break; } WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset, upper_32_bits(fb_location)); WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset, upper_32_bits(fb_location)); WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, (u32)fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK); WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, (u32) fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK); WREG32(EVERGREEN_GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format); WREG32(EVERGREEN_GRPH_SWAP_CONTROL + radeon_crtc->crtc_offset, fb_swap); /* * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to * retain the full precision throughout the pipeline. */ WREG32_P(EVERGREEN_GRPH_LUT_10BIT_BYPASS_CONTROL + radeon_crtc->crtc_offset, (bypass_lut ? EVERGREEN_LUT_10BIT_BYPASS_EN : 0), ~EVERGREEN_LUT_10BIT_BYPASS_EN); if (bypass_lut) DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n"); WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0); WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0); WREG32(EVERGREEN_GRPH_X_START + radeon_crtc->crtc_offset, 0); WREG32(EVERGREEN_GRPH_Y_START + radeon_crtc->crtc_offset, 0); WREG32(EVERGREEN_GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width); WREG32(EVERGREEN_GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height); fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8); WREG32(EVERGREEN_GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels); WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 1); if (rdev->family >= CHIP_BONAIRE) WREG32(CIK_LB_DESKTOP_HEIGHT + radeon_crtc->crtc_offset, target_fb->height); else WREG32(EVERGREEN_DESKTOP_HEIGHT + radeon_crtc->crtc_offset, target_fb->height); x &= ~3; y &= ~1; WREG32(EVERGREEN_VIEWPORT_START + radeon_crtc->crtc_offset, (x << 16) | y); viewport_w = crtc->mode.hdisplay; viewport_h = (crtc->mode.vdisplay + 1) & ~1; if ((rdev->family >= CHIP_BONAIRE) && (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)) viewport_h *= 2; WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset, (viewport_w << 16) | viewport_h); /* pageflip setup */ /* make sure flip is at vb rather than hb */ tmp = RREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset); tmp &= ~EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN; WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp); /* set pageflip to happen only at start of vblank interval (front porch) */ WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3); if (!atomic && fb && fb != crtc->primary->fb) { radeon_fb = to_radeon_framebuffer(fb); rbo = gem_to_radeon_bo(radeon_fb->obj); r = radeon_bo_reserve(rbo, false); if (unlikely(r != 0)) return r; radeon_bo_unpin(rbo); radeon_bo_unreserve(rbo); } /* Bytes per pixel may have changed */ radeon_bandwidth_update(rdev); return 0; } static int avivo_crtc_do_set_base(struct drm_crtc *crtc, struct drm_framebuffer *fb, int x, int y, int atomic) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_framebuffer *radeon_fb; struct drm_gem_object *obj; struct radeon_bo *rbo; struct drm_framebuffer *target_fb; uint64_t fb_location; uint32_t fb_format, fb_pitch_pixels, tiling_flags; u32 fb_swap = R600_D1GRPH_SWAP_ENDIAN_NONE; u32 tmp, viewport_w, viewport_h; int r; bool bypass_lut = false; /* no fb bound */ if (!atomic && !crtc->primary->fb) { DRM_DEBUG_KMS("No FB bound\n"); return 0; } if (atomic) { radeon_fb = to_radeon_framebuffer(fb); target_fb = fb; } else { radeon_fb = to_radeon_framebuffer(crtc->primary->fb); target_fb = crtc->primary->fb; } obj = radeon_fb->obj; rbo = gem_to_radeon_bo(obj); r = radeon_bo_reserve(rbo, false); if (unlikely(r != 0)) return r; /* If atomic, assume fb object is pinned & idle & fenced and * just update base pointers */ if (atomic) fb_location = radeon_bo_gpu_offset(rbo); else { r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location); if (unlikely(r != 0)) { radeon_bo_unreserve(rbo); return -EINVAL; } } radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL); radeon_bo_unreserve(rbo); switch (target_fb->pixel_format) { case DRM_FORMAT_C8: fb_format = AVIVO_D1GRPH_CONTROL_DEPTH_8BPP | AVIVO_D1GRPH_CONTROL_8BPP_INDEXED; break; case DRM_FORMAT_XRGB4444: case DRM_FORMAT_ARGB4444: fb_format = AVIVO_D1GRPH_CONTROL_DEPTH_16BPP | AVIVO_D1GRPH_CONTROL_16BPP_ARGB4444; #ifdef __BIG_ENDIAN fb_swap = R600_D1GRPH_SWAP_ENDIAN_16BIT; #endif break; case DRM_FORMAT_XRGB1555: fb_format = AVIVO_D1GRPH_CONTROL_DEPTH_16BPP | AVIVO_D1GRPH_CONTROL_16BPP_ARGB1555; #ifdef __BIG_ENDIAN fb_swap = R600_D1GRPH_SWAP_ENDIAN_16BIT; #endif break; case DRM_FORMAT_RGB565: fb_format = AVIVO_D1GRPH_CONTROL_DEPTH_16BPP | AVIVO_D1GRPH_CONTROL_16BPP_RGB565; #ifdef __BIG_ENDIAN fb_swap = R600_D1GRPH_SWAP_ENDIAN_16BIT; #endif break; case DRM_FORMAT_XRGB8888: case DRM_FORMAT_ARGB8888: fb_format = AVIVO_D1GRPH_CONTROL_DEPTH_32BPP | AVIVO_D1GRPH_CONTROL_32BPP_ARGB8888; #ifdef __BIG_ENDIAN fb_swap = R600_D1GRPH_SWAP_ENDIAN_32BIT; #endif break; case DRM_FORMAT_XRGB2101010: case DRM_FORMAT_ARGB2101010: fb_format = AVIVO_D1GRPH_CONTROL_DEPTH_32BPP | AVIVO_D1GRPH_CONTROL_32BPP_ARGB2101010; #ifdef __BIG_ENDIAN fb_swap = R600_D1GRPH_SWAP_ENDIAN_32BIT; #endif /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */ bypass_lut = true; break; default: DRM_ERROR("Unsupported screen format %s\n", drm_get_format_name(target_fb->pixel_format)); return -EINVAL; } if (rdev->family >= CHIP_R600) { if (tiling_flags & RADEON_TILING_MACRO) fb_format |= R600_D1GRPH_ARRAY_MODE_2D_TILED_THIN1; else if (tiling_flags & RADEON_TILING_MICRO) fb_format |= R600_D1GRPH_ARRAY_MODE_1D_TILED_THIN1; } else { if (tiling_flags & RADEON_TILING_MACRO) fb_format |= AVIVO_D1GRPH_MACRO_ADDRESS_MODE; if (tiling_flags & RADEON_TILING_MICRO) fb_format |= AVIVO_D1GRPH_TILED; } if (radeon_crtc->crtc_id == 0) WREG32(AVIVO_D1VGA_CONTROL, 0); else WREG32(AVIVO_D2VGA_CONTROL, 0); if (rdev->family >= CHIP_RV770) { if (radeon_crtc->crtc_id) { WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location)); WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location)); } else { WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location)); WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location)); } } WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, (u32) fb_location); WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, (u32) fb_location); WREG32(AVIVO_D1GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format); if (rdev->family >= CHIP_R600) WREG32(R600_D1GRPH_SWAP_CONTROL + radeon_crtc->crtc_offset, fb_swap); /* LUT only has 256 slots for 8 bpc fb. Bypass for > 8 bpc scanout for precision */ WREG32_P(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, (bypass_lut ? AVIVO_LUT_10BIT_BYPASS_EN : 0), ~AVIVO_LUT_10BIT_BYPASS_EN); if (bypass_lut) DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n"); WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0); WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0); WREG32(AVIVO_D1GRPH_X_START + radeon_crtc->crtc_offset, 0); WREG32(AVIVO_D1GRPH_Y_START + radeon_crtc->crtc_offset, 0); WREG32(AVIVO_D1GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width); WREG32(AVIVO_D1GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height); fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8); WREG32(AVIVO_D1GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels); WREG32(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 1); WREG32(AVIVO_D1MODE_DESKTOP_HEIGHT + radeon_crtc->crtc_offset, target_fb->height); x &= ~3; y &= ~1; WREG32(AVIVO_D1MODE_VIEWPORT_START + radeon_crtc->crtc_offset, (x << 16) | y); viewport_w = crtc->mode.hdisplay; viewport_h = (crtc->mode.vdisplay + 1) & ~1; WREG32(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset, (viewport_w << 16) | viewport_h); /* pageflip setup */ /* make sure flip is at vb rather than hb */ tmp = RREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset); tmp &= ~AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN; WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp); /* set pageflip to happen only at start of vblank interval (front porch) */ WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3); if (!atomic && fb && fb != crtc->primary->fb) { radeon_fb = to_radeon_framebuffer(fb); rbo = gem_to_radeon_bo(radeon_fb->obj); r = radeon_bo_reserve(rbo, false); if (unlikely(r != 0)) return r; radeon_bo_unpin(rbo); radeon_bo_unreserve(rbo); } /* Bytes per pixel may have changed */ radeon_bandwidth_update(rdev); return 0; } int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb) { struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; if (ASIC_IS_DCE4(rdev)) return dce4_crtc_do_set_base(crtc, old_fb, x, y, 0); else if (ASIC_IS_AVIVO(rdev)) return avivo_crtc_do_set_base(crtc, old_fb, x, y, 0); else return radeon_crtc_do_set_base(crtc, old_fb, x, y, 0); } int atombios_crtc_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, int x, int y, enum mode_set_atomic state) { struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; if (ASIC_IS_DCE4(rdev)) return dce4_crtc_do_set_base(crtc, fb, x, y, 1); else if (ASIC_IS_AVIVO(rdev)) return avivo_crtc_do_set_base(crtc, fb, x, y, 1); else return radeon_crtc_do_set_base(crtc, fb, x, y, 1); } /* properly set additional regs when using atombios */ static void radeon_legacy_atom_fixup(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); u32 disp_merge_cntl; switch (radeon_crtc->crtc_id) { case 0: disp_merge_cntl = RREG32(RADEON_DISP_MERGE_CNTL); disp_merge_cntl &= ~RADEON_DISP_RGB_OFFSET_EN; WREG32(RADEON_DISP_MERGE_CNTL, disp_merge_cntl); break; case 1: disp_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL); disp_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN; WREG32(RADEON_DISP2_MERGE_CNTL, disp_merge_cntl); WREG32(RADEON_FP_H2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_H_SYNC_STRT_WID)); WREG32(RADEON_FP_V2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_V_SYNC_STRT_WID)); break; } } /** * radeon_get_pll_use_mask - look up a mask of which pplls are in use * * @crtc: drm crtc * * Returns the mask of which PPLLs (Pixel PLLs) are in use. */ static u32 radeon_get_pll_use_mask(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_crtc *test_crtc; struct radeon_crtc *test_radeon_crtc; u32 pll_in_use = 0; list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) { if (crtc == test_crtc) continue; test_radeon_crtc = to_radeon_crtc(test_crtc); if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID) pll_in_use |= (1 << test_radeon_crtc->pll_id); } return pll_in_use; } /** * radeon_get_shared_dp_ppll - return the PPLL used by another crtc for DP * * @crtc: drm crtc * * Returns the PPLL (Pixel PLL) used by another crtc/encoder which is * also in DP mode. For DP, a single PPLL can be used for all DP * crtcs/encoders. */ static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_crtc *test_crtc; struct radeon_crtc *test_radeon_crtc; list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) { if (crtc == test_crtc) continue; test_radeon_crtc = to_radeon_crtc(test_crtc); if (test_radeon_crtc->encoder && ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) { /* for DP use the same PLL for all */ if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID) return test_radeon_crtc->pll_id; } } return ATOM_PPLL_INVALID; } /** * radeon_get_shared_nondp_ppll - return the PPLL used by another non-DP crtc * * @crtc: drm crtc * @encoder: drm encoder * * Returns the PPLL (Pixel PLL) used by another non-DP crtc/encoder which can * be shared (i.e., same clock). */ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct drm_crtc *test_crtc; struct radeon_crtc *test_radeon_crtc; u32 adjusted_clock, test_adjusted_clock; adjusted_clock = radeon_crtc->adjusted_clock; if (adjusted_clock == 0) return ATOM_PPLL_INVALID; list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) { if (crtc == test_crtc) continue; test_radeon_crtc = to_radeon_crtc(test_crtc); if (test_radeon_crtc->encoder && !ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) { /* check if we are already driving this connector with another crtc */ if (test_radeon_crtc->connector == radeon_crtc->connector) { /* if we are, return that pll */ if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID) return test_radeon_crtc->pll_id; } /* for non-DP check the clock */ test_adjusted_clock = test_radeon_crtc->adjusted_clock; if ((crtc->mode.clock == test_crtc->mode.clock) && (adjusted_clock == test_adjusted_clock) && (radeon_crtc->ss_enabled == test_radeon_crtc->ss_enabled) && (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)) return test_radeon_crtc->pll_id; } } return ATOM_PPLL_INVALID; } /** * radeon_atom_pick_pll - Allocate a PPLL for use by the crtc. * * @crtc: drm crtc * * Returns the PPLL (Pixel PLL) to be used by the crtc. For DP monitors * a single PPLL can be used for all DP crtcs/encoders. For non-DP * monitors a dedicated PPLL must be used. If a particular board has * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming * as there is no need to program the PLL itself. If we are not able to * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to * avoid messing up an existing monitor. * * Asic specific PLL information * * DCE 8.x * KB/KV * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) * CI * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC * * DCE 6.1 * - PPLL2 is only available to UNIPHYA (both DP and non-DP) * - PPLL0, PPLL1 are available for UNIPHYB/C/D/E/F (both DP and non-DP) * * DCE 6.0 * - PPLL0 is available to all UNIPHY (DP only) * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC * * DCE 5.0 * - DCPLL is available to all UNIPHY (DP only) * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC * * DCE 3.0/4.0/4.1 * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC * */ static int radeon_atom_pick_pll(struct drm_crtc *crtc) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(radeon_crtc->encoder); u32 pll_in_use; int pll; if (ASIC_IS_DCE8(rdev)) { if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) { if (rdev->clock.dp_extclk) /* skip PPLL programming if using ext clock */ return ATOM_PPLL_INVALID; else { /* use the same PPLL for all DP monitors */ pll = radeon_get_shared_dp_ppll(crtc); if (pll != ATOM_PPLL_INVALID) return pll; } } else { /* use the same PPLL for all monitors with the same clock */ pll = radeon_get_shared_nondp_ppll(crtc); if (pll != ATOM_PPLL_INVALID) return pll; } /* otherwise, pick one of the plls */ if ((rdev->family == CHIP_KABINI) || (rdev->family == CHIP_MULLINS)) { /* KB/ML has PPLL1 and PPLL2 */ pll_in_use = radeon_get_pll_use_mask(crtc); if (!(pll_in_use & (1 << ATOM_PPLL2))) return ATOM_PPLL2; if (!(pll_in_use & (1 << ATOM_PPLL1))) return ATOM_PPLL1; DRM_ERROR("unable to allocate a PPLL\n"); return ATOM_PPLL_INVALID; } else { /* CI/KV has PPLL0, PPLL1, and PPLL2 */ pll_in_use = radeon_get_pll_use_mask(crtc); if (!(pll_in_use & (1 << ATOM_PPLL2))) return ATOM_PPLL2; if (!(pll_in_use & (1 << ATOM_PPLL1))) return ATOM_PPLL1; if (!(pll_in_use & (1 << ATOM_PPLL0))) return ATOM_PPLL0; DRM_ERROR("unable to allocate a PPLL\n"); return ATOM_PPLL_INVALID; } } else if (ASIC_IS_DCE61(rdev)) { struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY) && (dig->linkb == false)) /* UNIPHY A uses PPLL2 */ return ATOM_PPLL2; else if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) { /* UNIPHY B/C/D/E/F */ if (rdev->clock.dp_extclk) /* skip PPLL programming if using ext clock */ return ATOM_PPLL_INVALID; else { /* use the same PPLL for all DP monitors */ pll = radeon_get_shared_dp_ppll(crtc); if (pll != ATOM_PPLL_INVALID) return pll; } } else { /* use the same PPLL for all monitors with the same clock */ pll = radeon_get_shared_nondp_ppll(crtc); if (pll != ATOM_PPLL_INVALID) return pll; } /* UNIPHY B/C/D/E/F */ pll_in_use = radeon_get_pll_use_mask(crtc); if (!(pll_in_use & (1 << ATOM_PPLL0))) return ATOM_PPLL0; if (!(pll_in_use & (1 << ATOM_PPLL1))) return ATOM_PPLL1; DRM_ERROR("unable to allocate a PPLL\n"); return ATOM_PPLL_INVALID; } else if (ASIC_IS_DCE41(rdev)) { /* Don't share PLLs on DCE4.1 chips */ if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) { if (rdev->clock.dp_extclk) /* skip PPLL programming if using ext clock */ return ATOM_PPLL_INVALID; } pll_in_use = radeon_get_pll_use_mask(crtc); if (!(pll_in_use & (1 << ATOM_PPLL1))) return ATOM_PPLL1; if (!(pll_in_use & (1 << ATOM_PPLL2))) return ATOM_PPLL2; DRM_ERROR("unable to allocate a PPLL\n"); return ATOM_PPLL_INVALID; } else if (ASIC_IS_DCE4(rdev)) { /* in DP mode, the DP ref clock can come from PPLL, DCPLL, or ext clock, * depending on the asic: * DCE4: PPLL or ext clock * DCE5: PPLL, DCPLL, or ext clock * DCE6: PPLL, PPLL0, or ext clock * * Setting ATOM_PPLL_INVALID will cause SetPixelClock to skip * PPLL/DCPLL programming and only program the DP DTO for the * crtc virtual pixel clock. */ if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) { if (rdev->clock.dp_extclk) /* skip PPLL programming if using ext clock */ return ATOM_PPLL_INVALID; else if (ASIC_IS_DCE6(rdev)) /* use PPLL0 for all DP */ return ATOM_PPLL0; else if (ASIC_IS_DCE5(rdev)) /* use DCPLL for all DP */ return ATOM_DCPLL; else { /* use the same PPLL for all DP monitors */ pll = radeon_get_shared_dp_ppll(crtc); if (pll != ATOM_PPLL_INVALID) return pll; } } else { /* use the same PPLL for all monitors with the same clock */ pll = radeon_get_shared_nondp_ppll(crtc); if (pll != ATOM_PPLL_INVALID) return pll; } /* all other cases */ pll_in_use = radeon_get_pll_use_mask(crtc); if (!(pll_in_use & (1 << ATOM_PPLL1))) return ATOM_PPLL1; if (!(pll_in_use & (1 << ATOM_PPLL2))) return ATOM_PPLL2; DRM_ERROR("unable to allocate a PPLL\n"); return ATOM_PPLL_INVALID; } else { /* on pre-R5xx asics, the crtc to pll mapping is hardcoded */ /* some atombios (observed in some DCE2/DCE3) code have a bug, * the matching btw pll and crtc is done through * PCLK_CRTC[1|2]_CNTL (0x480/0x484) but atombios code use the * pll (1 or 2) to select which register to write. ie if using * pll1 it will use PCLK_CRTC1_CNTL (0x480) and if using pll2 * it will use PCLK_CRTC2_CNTL (0x484), it then use crtc id to * choose which value to write. Which is reverse order from * register logic. So only case that works is when pllid is * same as crtcid or when both pll and crtc are enabled and * both use same clock. * * So just return crtc id as if crtc and pll were hard linked * together even if they aren't */ return radeon_crtc->crtc_id; } } void radeon_atom_disp_eng_pll_init(struct radeon_device *rdev) { /* always set DCPLL */ if (ASIC_IS_DCE6(rdev)) atombios_crtc_set_disp_eng_pll(rdev, rdev->clock.default_dispclk); else if (ASIC_IS_DCE4(rdev)) { struct radeon_atom_ss ss; bool ss_enabled = radeon_atombios_get_asic_ss_info(rdev, &ss, ASIC_INTERNAL_SS_ON_DCPLL, rdev->clock.default_dispclk); if (ss_enabled) atombios_crtc_program_ss(rdev, ATOM_DISABLE, ATOM_DCPLL, -1, &ss); /* XXX: DCE5, make sure voltage, dispclk is high enough */ atombios_crtc_set_disp_eng_pll(rdev, rdev->clock.default_dispclk); if (ss_enabled) atombios_crtc_program_ss(rdev, ATOM_ENABLE, ATOM_DCPLL, -1, &ss); } } int atombios_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode, int x, int y, struct drm_framebuffer *old_fb) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(radeon_crtc->encoder); bool is_tvcv = false; if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) is_tvcv = true; if (!radeon_crtc->adjusted_clock) return -EINVAL; atombios_crtc_set_pll(crtc, adjusted_mode); if (ASIC_IS_DCE4(rdev)) atombios_set_crtc_dtd_timing(crtc, adjusted_mode); else if (ASIC_IS_AVIVO(rdev)) { if (is_tvcv) atombios_crtc_set_timing(crtc, adjusted_mode); else atombios_set_crtc_dtd_timing(crtc, adjusted_mode); } else { atombios_crtc_set_timing(crtc, adjusted_mode); if (radeon_crtc->crtc_id == 0) atombios_set_crtc_dtd_timing(crtc, adjusted_mode); radeon_legacy_atom_fixup(crtc); } atombios_crtc_set_base(crtc, x, y, old_fb); atombios_overscan_setup(crtc, mode, adjusted_mode); atombios_scaler_setup(crtc); radeon_cursor_reset(crtc); /* update the hw version fpr dpm */ radeon_crtc->hw_mode = *adjusted_mode; return 0; } static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct drm_encoder *encoder; /* assign the encoder to the radeon crtc to avoid repeated lookups later */ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { if (encoder->crtc == crtc) { radeon_crtc->encoder = encoder; radeon_crtc->connector = radeon_get_connector_for_encoder(encoder); break; } } if ((radeon_crtc->encoder == NULL) || (radeon_crtc->connector == NULL)) { radeon_crtc->encoder = NULL; radeon_crtc->connector = NULL; return false; } if (radeon_crtc->encoder) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(radeon_crtc->encoder); radeon_crtc->output_csc = radeon_encoder->output_csc; } if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) return false; if (!atombios_crtc_prepare_pll(crtc, adjusted_mode)) return false; /* pick pll */ radeon_crtc->pll_id = radeon_atom_pick_pll(crtc); /* if we can't get a PPLL for a non-DP encoder, fail */ if ((radeon_crtc->pll_id == ATOM_PPLL_INVALID) && !ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) return false; return true; } static void atombios_crtc_prepare(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; /* disable crtc pair power gating before programming */ if (ASIC_IS_DCE6(rdev)) atombios_powergate_crtc(crtc, ATOM_DISABLE); atombios_lock_crtc(crtc, ATOM_ENABLE); atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); } static void atombios_crtc_commit(struct drm_crtc *crtc) { atombios_crtc_dpms(crtc, DRM_MODE_DPMS_ON); atombios_lock_crtc(crtc, ATOM_DISABLE); } static void atombios_crtc_disable(struct drm_crtc *crtc) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_atom_ss ss; int i; atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); if (crtc->primary->fb) { int r; struct radeon_framebuffer *radeon_fb; struct radeon_bo *rbo; radeon_fb = to_radeon_framebuffer(crtc->primary->fb); rbo = gem_to_radeon_bo(radeon_fb->obj); r = radeon_bo_reserve(rbo, false); if (unlikely(r)) DRM_ERROR("failed to reserve rbo before unpin\n"); else { radeon_bo_unpin(rbo); radeon_bo_unreserve(rbo); } } /* disable the GRPH */ if (ASIC_IS_DCE4(rdev)) WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 0); else if (ASIC_IS_AVIVO(rdev)) WREG32(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 0); if (ASIC_IS_DCE6(rdev)) atombios_powergate_crtc(crtc, ATOM_ENABLE); for (i = 0; i < rdev->num_crtc; i++) { if (rdev->mode_info.crtcs[i] && rdev->mode_info.crtcs[i]->enabled && i != radeon_crtc->crtc_id && radeon_crtc->pll_id == rdev->mode_info.crtcs[i]->pll_id) { /* one other crtc is using this pll don't turn * off the pll */ goto done; } } switch (radeon_crtc->pll_id) { case ATOM_PPLL1: case ATOM_PPLL2: /* disable the ppll */ atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id, 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss); break; case ATOM_PPLL0: /* disable the ppll */ if ((rdev->family == CHIP_ARUBA) || (rdev->family == CHIP_KAVERI) || (rdev->family == CHIP_BONAIRE) || (rdev->family == CHIP_HAWAII)) atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id, 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss); break; default: break; } done: radeon_crtc->pll_id = ATOM_PPLL_INVALID; radeon_crtc->adjusted_clock = 0; radeon_crtc->encoder = NULL; radeon_crtc->connector = NULL; } static const struct drm_crtc_helper_funcs atombios_helper_funcs = { .dpms = atombios_crtc_dpms, .mode_fixup = atombios_crtc_mode_fixup, .mode_set = atombios_crtc_mode_set, .mode_set_base = atombios_crtc_set_base, .mode_set_base_atomic = atombios_crtc_set_base_atomic, .prepare = atombios_crtc_prepare, .commit = atombios_crtc_commit, .load_lut = radeon_crtc_load_lut, .disable = atombios_crtc_disable, }; void radeon_atombios_init_crtc(struct drm_device *dev, struct radeon_crtc *radeon_crtc) { struct radeon_device *rdev = dev->dev_private; if (ASIC_IS_DCE4(rdev)) { switch (radeon_crtc->crtc_id) { case 0: default: radeon_crtc->crtc_offset = EVERGREEN_CRTC0_REGISTER_OFFSET; break; case 1: radeon_crtc->crtc_offset = EVERGREEN_CRTC1_REGISTER_OFFSET; break; case 2: radeon_crtc->crtc_offset = EVERGREEN_CRTC2_REGISTER_OFFSET; break; case 3: radeon_crtc->crtc_offset = EVERGREEN_CRTC3_REGISTER_OFFSET; break; case 4: radeon_crtc->crtc_offset = EVERGREEN_CRTC4_REGISTER_OFFSET; break; case 5: radeon_crtc->crtc_offset = EVERGREEN_CRTC5_REGISTER_OFFSET; break; } } else { if (radeon_crtc->crtc_id == 1) radeon_crtc->crtc_offset = AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL; else radeon_crtc->crtc_offset = 0; } radeon_crtc->pll_id = ATOM_PPLL_INVALID; radeon_crtc->adjusted_clock = 0; radeon_crtc->encoder = NULL; radeon_crtc->connector = NULL; drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs); }
gpl-2.0
jforge/linux
arch/mips/cavium-octeon/oct_ilm.c
1938
4797
#include <linux/fs.h> #include <linux/interrupt.h> #include <asm/octeon/octeon.h> #include <asm/octeon/cvmx-ciu-defs.h> #include <asm/octeon/cvmx.h> #include <linux/debugfs.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/seq_file.h> #define TIMER_NUM 3 static bool reset_stats; struct latency_info { u64 io_interval; u64 cpu_interval; u64 timer_start1; u64 timer_start2; u64 max_latency; u64 min_latency; u64 latency_sum; u64 average_latency; u64 interrupt_cnt; }; static struct latency_info li; static struct dentry *dir; static int show_latency(struct seq_file *m, void *v) { u64 cpuclk, avg, max, min; struct latency_info curr_li = li; cpuclk = octeon_get_clock_rate(); max = (curr_li.max_latency * 1000000000) / cpuclk; min = (curr_li.min_latency * 1000000000) / cpuclk; avg = (curr_li.latency_sum * 1000000000) / (cpuclk * curr_li.interrupt_cnt); seq_printf(m, "cnt: %10lld, avg: %7lld ns, max: %7lld ns, min: %7lld ns\n", curr_li.interrupt_cnt, avg, max, min); return 0; } static int oct_ilm_open(struct inode *inode, struct file *file) { return single_open(file, show_latency, NULL); } static const struct file_operations oct_ilm_ops = { .open = oct_ilm_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int reset_statistics(void *data, u64 value) { reset_stats = true; return 0; } DEFINE_SIMPLE_ATTRIBUTE(reset_statistics_ops, NULL, reset_statistics, "%llu\n"); static int init_debufs(void) { struct dentry *show_dentry; dir = debugfs_create_dir("oct_ilm", 0); if (!dir) { pr_err("oct_ilm: failed to create debugfs entry oct_ilm\n"); return -1; } show_dentry = debugfs_create_file("statistics", 0222, dir, NULL, &oct_ilm_ops); if (!show_dentry) { pr_err("oct_ilm: failed to create debugfs entry oct_ilm/statistics\n"); return -1; } show_dentry = debugfs_create_file("reset", 0222, dir, NULL, &reset_statistics_ops); if (!show_dentry) { pr_err("oct_ilm: failed to create debugfs entry oct_ilm/reset\n"); return -1; } return 0; } static void init_latency_info(struct latency_info *li, int startup) { /* interval in milli seconds after which the interrupt will * be triggered */ int interval = 1; if (startup) { /* Calculating by the amounts io clock and cpu clock would * increment in interval amount of ms */ li->io_interval = (octeon_get_io_clock_rate() * interval) / 1000; li->cpu_interval = (octeon_get_clock_rate() * interval) / 1000; } li->timer_start1 = 0; li->timer_start2 = 0; li->max_latency = 0; li->min_latency = (u64)-1; li->latency_sum = 0; li->interrupt_cnt = 0; } static void start_timer(int timer, u64 interval) { union cvmx_ciu_timx timx; unsigned long flags; timx.u64 = 0; timx.s.one_shot = 1; timx.s.len = interval; raw_local_irq_save(flags); li.timer_start1 = read_c0_cvmcount(); cvmx_write_csr(CVMX_CIU_TIMX(timer), timx.u64); /* Read it back to force wait until register is written. */ timx.u64 = cvmx_read_csr(CVMX_CIU_TIMX(timer)); li.timer_start2 = read_c0_cvmcount(); raw_local_irq_restore(flags); } static irqreturn_t cvm_oct_ciu_timer_interrupt(int cpl, void *dev_id) { u64 last_latency; u64 last_int_cnt; if (reset_stats) { init_latency_info(&li, 0); reset_stats = false; } else { last_int_cnt = read_c0_cvmcount(); last_latency = last_int_cnt - (li.timer_start1 + li.cpu_interval); li.interrupt_cnt++; li.latency_sum += last_latency; if (last_latency > li.max_latency) li.max_latency = last_latency; if (last_latency < li.min_latency) li.min_latency = last_latency; } start_timer(TIMER_NUM, li.io_interval); return IRQ_HANDLED; } static void disable_timer(int timer) { union cvmx_ciu_timx timx; timx.s.one_shot = 0; timx.s.len = 0; cvmx_write_csr(CVMX_CIU_TIMX(timer), timx.u64); /* Read it back to force immediate write of timer register*/ timx.u64 = cvmx_read_csr(CVMX_CIU_TIMX(timer)); } static __init int oct_ilm_module_init(void) { int rc; int irq = OCTEON_IRQ_TIMER0 + TIMER_NUM; rc = init_debufs(); if (rc) { WARN(1, "Could not create debugfs entries"); return rc; } rc = request_irq(irq, cvm_oct_ciu_timer_interrupt, IRQF_NO_THREAD, "oct_ilm", 0); if (rc) { WARN(1, "Could not acquire IRQ %d", irq); goto err_irq; } init_latency_info(&li, 1); start_timer(TIMER_NUM, li.io_interval); return 0; err_irq: debugfs_remove_recursive(dir); return rc; } static __exit void oct_ilm_module_exit(void) { disable_timer(TIMER_NUM); debugfs_remove_recursive(dir); free_irq(OCTEON_IRQ_TIMER0 + TIMER_NUM, 0); } module_exit(oct_ilm_module_exit); module_init(oct_ilm_module_init); MODULE_AUTHOR("Venkat Subbiah, Cavium"); MODULE_DESCRIPTION("Measures interrupt latency on Octeon chips."); MODULE_LICENSE("GPL");
gpl-2.0
arter97/odroid
drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c
2450
9643
/****************************************************************************** * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * wlanfae <wlanfae@realtek.com> ******************************************************************************/ #include "rtl_core.h" #include "r8192E_hw.h" #include "r8192E_hwimg.h" #include "r8192E_firmware.h" #include <linux/firmware.h> void firmware_init_param(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); struct rt_firmware *pfirmware = priv->pFirmware; pfirmware->cmdpacket_frag_thresold = GET_COMMAND_PACKET_FRAG_THRESHOLD( MAX_TRANSMIT_BUFFER_SIZE); } static bool fw_download_code(struct net_device *dev, u8 *code_virtual_address, u32 buffer_len) { struct r8192_priv *priv = rtllib_priv(dev); bool rt_status = true; u16 frag_threshold; u16 frag_length, frag_offset = 0; int i; struct rt_firmware *pfirmware = priv->pFirmware; struct sk_buff *skb; unsigned char *seg_ptr; struct cb_desc *tcb_desc; u8 bLastIniPkt; firmware_init_param(dev); frag_threshold = pfirmware->cmdpacket_frag_thresold; do { if ((buffer_len - frag_offset) > frag_threshold) { frag_length = frag_threshold ; bLastIniPkt = 0; } else { frag_length = buffer_len - frag_offset; bLastIniPkt = 1; } skb = dev_alloc_skb(frag_length + 4); memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev)); tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE); tcb_desc->queue_index = TXCMD_QUEUE; tcb_desc->bCmdOrInit = DESC_PACKET_TYPE_INIT; tcb_desc->bLastIniPkt = bLastIniPkt; seg_ptr = skb->data; for (i = 0; i < frag_length; i += 4) { *seg_ptr++ = ((i+0) < frag_length) ? code_virtual_address[i+3] : 0; *seg_ptr++ = ((i+1) < frag_length) ? code_virtual_address[i+2] : 0; *seg_ptr++ = ((i+2) < frag_length) ? code_virtual_address[i+1] : 0; *seg_ptr++ = ((i+3) < frag_length) ? code_virtual_address[i+0] : 0; } tcb_desc->txbuf_size = (u16)i; skb_put(skb, i); if (!priv->rtllib->check_nic_enough_desc(dev, tcb_desc->queue_index) || (!skb_queue_empty(&priv->rtllib->skb_waitQ[tcb_desc->queue_index])) || (priv->rtllib->queue_stop)) { RT_TRACE(COMP_FIRMWARE, "===================> tx " "full!\n"); skb_queue_tail(&priv->rtllib->skb_waitQ [tcb_desc->queue_index], skb); } else { priv->rtllib->softmac_hard_start_xmit(skb, dev); } code_virtual_address += frag_length; frag_offset += frag_length; } while (frag_offset < buffer_len); write_nic_byte(dev, TPPoll, TPPoll_CQ); return rt_status; } static bool CPUcheck_maincodeok_turnonCPU(struct net_device *dev) { bool rt_status = true; u32 CPU_status = 0; unsigned long timeout; timeout = jiffies + MSECS(200); while (time_before(jiffies, timeout)) { CPU_status = read_nic_dword(dev, CPU_GEN); if (CPU_status & CPU_GEN_PUT_CODE_OK) break; mdelay(2); } if (!(CPU_status&CPU_GEN_PUT_CODE_OK)) { RT_TRACE(COMP_ERR, "Download Firmware: Put code fail!\n"); goto CPUCheckMainCodeOKAndTurnOnCPU_Fail; } else { RT_TRACE(COMP_FIRMWARE, "Download Firmware: Put code ok!\n"); } CPU_status = read_nic_dword(dev, CPU_GEN); write_nic_byte(dev, CPU_GEN, (u8)((CPU_status|CPU_GEN_PWR_STB_CPU)&0xff)); mdelay(1); timeout = jiffies + MSECS(200); while (time_before(jiffies, timeout)) { CPU_status = read_nic_dword(dev, CPU_GEN); if (CPU_status&CPU_GEN_BOOT_RDY) break; mdelay(2); } if (!(CPU_status&CPU_GEN_BOOT_RDY)) goto CPUCheckMainCodeOKAndTurnOnCPU_Fail; else RT_TRACE(COMP_FIRMWARE, "Download Firmware: Boot ready!\n"); return rt_status; CPUCheckMainCodeOKAndTurnOnCPU_Fail: RT_TRACE(COMP_ERR, "ERR in %s()\n", __func__); rt_status = false; return rt_status; } static bool CPUcheck_firmware_ready(struct net_device *dev) { bool rt_status = true; u32 CPU_status = 0; unsigned long timeout; timeout = jiffies + MSECS(20); while (time_before(jiffies, timeout)) { CPU_status = read_nic_dword(dev, CPU_GEN); if (CPU_status&CPU_GEN_FIRM_RDY) break; mdelay(2); } if (!(CPU_status&CPU_GEN_FIRM_RDY)) goto CPUCheckFirmwareReady_Fail; else RT_TRACE(COMP_FIRMWARE, "Download Firmware: Firmware ready!\n"); return rt_status; CPUCheckFirmwareReady_Fail: RT_TRACE(COMP_ERR, "ERR in %s()\n", __func__); rt_status = false; return rt_status; } static bool firmware_check_ready(struct net_device *dev, u8 load_fw_status) { struct r8192_priv *priv = rtllib_priv(dev); struct rt_firmware *pfirmware = priv->pFirmware; bool rt_status = true; switch (load_fw_status) { case FW_INIT_STEP0_BOOT: pfirmware->firmware_status = FW_STATUS_1_MOVE_BOOT_CODE; break; case FW_INIT_STEP1_MAIN: pfirmware->firmware_status = FW_STATUS_2_MOVE_MAIN_CODE; rt_status = CPUcheck_maincodeok_turnonCPU(dev); if (rt_status) pfirmware->firmware_status = FW_STATUS_3_TURNON_CPU; else RT_TRACE(COMP_FIRMWARE, "CPUcheck_maincodeok_turnon" "CPU fail!\n"); break; case FW_INIT_STEP2_DATA: pfirmware->firmware_status = FW_STATUS_4_MOVE_DATA_CODE; mdelay(1); rt_status = CPUcheck_firmware_ready(dev); if (rt_status) pfirmware->firmware_status = FW_STATUS_5_READY; else RT_TRACE(COMP_FIRMWARE, "CPUcheck_firmware_ready fail" "(%d)!\n", rt_status); break; default: rt_status = false; RT_TRACE(COMP_FIRMWARE, "Unknown firmware status"); break; } return rt_status; } bool init_firmware(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); bool rt_status = true; u8 *firmware_img_buf[3] = { &Rtl8192PciEFwBootArray[0], &Rtl8192PciEFwMainArray[0], &Rtl8192PciEFwDataArray[0]}; u32 firmware_img_len[3] = { sizeof(Rtl8192PciEFwBootArray), sizeof(Rtl8192PciEFwMainArray), sizeof(Rtl8192PciEFwDataArray)}; u32 file_length = 0; u8 *mapped_file = NULL; u8 init_step = 0; enum opt_rst_type rst_opt = OPT_SYSTEM_RESET; enum firmware_init_step starting_state = FW_INIT_STEP0_BOOT; struct rt_firmware *pfirmware = priv->pFirmware; RT_TRACE(COMP_FIRMWARE, " PlatformInitFirmware()==>\n"); if (pfirmware->firmware_status == FW_STATUS_0_INIT) { rst_opt = OPT_SYSTEM_RESET; starting_state = FW_INIT_STEP0_BOOT; } else if (pfirmware->firmware_status == FW_STATUS_5_READY) { rst_opt = OPT_FIRMWARE_RESET; starting_state = FW_INIT_STEP2_DATA; } else { RT_TRACE(COMP_FIRMWARE, "PlatformInitFirmware: undefined" " firmware state\n"); } priv->firmware_source = FW_SOURCE_IMG_FILE; for (init_step = starting_state; init_step <= FW_INIT_STEP2_DATA; init_step++) { if (rst_opt == OPT_SYSTEM_RESET) { switch (priv->firmware_source) { case FW_SOURCE_IMG_FILE: { if (pfirmware->firmware_buf_size[init_step] == 0) { const char *fw_name[3] = { RTL8192E_BOOT_IMG_FW, RTL8192E_MAIN_IMG_FW, RTL8192E_DATA_IMG_FW }; const struct firmware *fw_entry; int rc; rc = request_firmware(&fw_entry, fw_name[init_step], &priv->pdev->dev); if (rc < 0) { RT_TRACE(COMP_FIRMWARE, "request firm" "ware fail!\n"); goto download_firmware_fail; } if (fw_entry->size > sizeof(pfirmware->firmware_buf[init_step])) { RT_TRACE(COMP_FIRMWARE, "img file size " "exceed the container struct " "buffer fail!\n"); goto download_firmware_fail; } if (init_step != FW_INIT_STEP1_MAIN) { memcpy(pfirmware->firmware_buf[init_step], fw_entry->data, fw_entry->size); pfirmware->firmware_buf_size[init_step] = fw_entry->size; } else { memset(pfirmware->firmware_buf[init_step], 0, 128); memcpy(&pfirmware->firmware_buf[init_step][128], fw_entry->data, fw_entry->size); pfirmware->firmware_buf_size[init_step] = fw_entry->size + 128; } if (rst_opt == OPT_SYSTEM_RESET) release_firmware(fw_entry); } mapped_file = pfirmware->firmware_buf[init_step]; file_length = pfirmware->firmware_buf_size[init_step]; break; } case FW_SOURCE_HEADER_FILE: mapped_file = firmware_img_buf[init_step]; file_length = firmware_img_len[init_step]; if (init_step == FW_INIT_STEP2_DATA) { memcpy(pfirmware->firmware_buf[init_step], mapped_file, file_length); pfirmware->firmware_buf_size[init_step] = file_length; } break; default: break; } } else if (rst_opt == OPT_FIRMWARE_RESET) { mapped_file = pfirmware->firmware_buf[init_step]; file_length = pfirmware->firmware_buf_size[init_step]; } rt_status = fw_download_code(dev, mapped_file, file_length); if (rt_status != true) { goto download_firmware_fail; } if (!firmware_check_ready(dev, init_step)) { goto download_firmware_fail; } } RT_TRACE(COMP_FIRMWARE, "Firmware Download Success\n"); return rt_status; download_firmware_fail: RT_TRACE(COMP_ERR, "ERR in %s()\n", __func__); rt_status = false; return rt_status; }
gpl-2.0
pinkflozd/android_kernel_motorola_falcon
drivers/staging/octeon/ethernet-tx.c
3986
21562
/********************************************************************* * Author: Cavium Networks * * Contact: support@caviumnetworks.com * This file is part of the OCTEON SDK * * Copyright (c) 2003-2010 Cavium Networks * * This file is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, Version 2, as * published by the Free Software Foundation. * * This file is distributed in the hope that it will be useful, but * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or * NONINFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this file; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * or visit http://www.gnu.org/licenses/. * * This file may also be available under a different license from Cavium. * Contact Cavium Networks for more information *********************************************************************/ #include <linux/module.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/init.h> #include <linux/etherdevice.h> #include <linux/ip.h> #include <linux/ratelimit.h> #include <linux/string.h> #include <linux/interrupt.h> #include <net/dst.h> #ifdef CONFIG_XFRM #include <linux/xfrm.h> #include <net/xfrm.h> #endif /* CONFIG_XFRM */ #include <linux/atomic.h> #include <asm/octeon/octeon.h> #include "ethernet-defines.h" #include "octeon-ethernet.h" #include "ethernet-tx.h" #include "ethernet-util.h" #include <asm/octeon/cvmx-wqe.h> #include <asm/octeon/cvmx-fau.h> #include <asm/octeon/cvmx-pip.h> #include <asm/octeon/cvmx-pko.h> #include <asm/octeon/cvmx-helper.h> #include <asm/octeon/cvmx-gmxx-defs.h> #define CVM_OCT_SKB_CB(skb) ((u64 *)((skb)->cb)) /* * You can define GET_SKBUFF_QOS() to override how the skbuff output * function determines which output queue is used. The default * implementation always uses the base queue for the port. If, for * example, you wanted to use the skb->priority fieid, define * GET_SKBUFF_QOS as: #define GET_SKBUFF_QOS(skb) ((skb)->priority) */ #ifndef GET_SKBUFF_QOS #define GET_SKBUFF_QOS(skb) 0 #endif static void cvm_oct_tx_do_cleanup(unsigned long arg); static DECLARE_TASKLET(cvm_oct_tx_cleanup_tasklet, cvm_oct_tx_do_cleanup, 0); /* Maximum number of SKBs to try to free per xmit packet. */ #define MAX_SKB_TO_FREE (MAX_OUT_QUEUE_DEPTH * 2) static inline int32_t cvm_oct_adjust_skb_to_free(int32_t skb_to_free, int fau) { int32_t undo; undo = skb_to_free > 0 ? MAX_SKB_TO_FREE : skb_to_free + MAX_SKB_TO_FREE; if (undo > 0) cvmx_fau_atomic_add32(fau, -undo); skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ? MAX_SKB_TO_FREE : -skb_to_free; return skb_to_free; } static void cvm_oct_kick_tx_poll_watchdog(void) { union cvmx_ciu_timx ciu_timx; ciu_timx.u64 = 0; ciu_timx.s.one_shot = 1; ciu_timx.s.len = cvm_oct_tx_poll_interval; cvmx_write_csr(CVMX_CIU_TIMX(1), ciu_timx.u64); } void cvm_oct_free_tx_skbs(struct net_device *dev) { int32_t skb_to_free; int qos, queues_per_port; int total_freed = 0; int total_remaining = 0; unsigned long flags; struct octeon_ethernet *priv = netdev_priv(dev); queues_per_port = cvmx_pko_get_num_queues(priv->port); /* Drain any pending packets in the free list */ for (qos = 0; qos < queues_per_port; qos++) { if (skb_queue_len(&priv->tx_free_list[qos]) == 0) continue; skb_to_free = cvmx_fau_fetch_and_add32(priv->fau+qos*4, MAX_SKB_TO_FREE); skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau+qos*4); total_freed += skb_to_free; if (skb_to_free > 0) { struct sk_buff *to_free_list = NULL; spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags); while (skb_to_free > 0) { struct sk_buff *t = __skb_dequeue(&priv->tx_free_list[qos]); t->next = to_free_list; to_free_list = t; skb_to_free--; } spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags); /* Do the actual freeing outside of the lock. */ while (to_free_list) { struct sk_buff *t = to_free_list; to_free_list = to_free_list->next; dev_kfree_skb_any(t); } } total_remaining += skb_queue_len(&priv->tx_free_list[qos]); } if (total_freed >= 0 && netif_queue_stopped(dev)) netif_wake_queue(dev); if (total_remaining) cvm_oct_kick_tx_poll_watchdog(); } /** * cvm_oct_xmit - transmit a packet * @skb: Packet to send * @dev: Device info structure * * Returns Always returns NETDEV_TX_OK */ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev) { cvmx_pko_command_word0_t pko_command; union cvmx_buf_ptr hw_buffer; uint64_t old_scratch; uint64_t old_scratch2; int qos; int i; enum {QUEUE_CORE, QUEUE_HW, QUEUE_DROP} queue_type; struct octeon_ethernet *priv = netdev_priv(dev); struct sk_buff *to_free_list; int32_t skb_to_free; int32_t buffers_to_free; u32 total_to_clean; unsigned long flags; #if REUSE_SKBUFFS_WITHOUT_FREE unsigned char *fpa_head; #endif /* * Prefetch the private data structure. It is larger that one * cache line. */ prefetch(priv); /* * The check on CVMX_PKO_QUEUES_PER_PORT_* is designed to * completely remove "qos" in the event neither interface * supports multiple queues per port. */ if ((CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 > 1) || (CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 > 1)) { qos = GET_SKBUFF_QOS(skb); if (qos <= 0) qos = 0; else if (qos >= cvmx_pko_get_num_queues(priv->port)) qos = 0; } else qos = 0; if (USE_ASYNC_IOBDMA) { /* Save scratch in case userspace is using it */ CVMX_SYNCIOBDMA; old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH); old_scratch2 = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8); /* * Fetch and increment the number of packets to be * freed. */ cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH + 8, FAU_NUM_PACKET_BUFFERS_TO_FREE, 0); cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH, priv->fau + qos * 4, MAX_SKB_TO_FREE); } /* * We have space for 6 segment pointers, If there will be more * than that, we must linearize. */ if (unlikely(skb_shinfo(skb)->nr_frags > 5)) { if (unlikely(__skb_linearize(skb))) { queue_type = QUEUE_DROP; if (USE_ASYNC_IOBDMA) { /* Get the number of skbuffs in use by the hardware */ CVMX_SYNCIOBDMA; skb_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH); } else { /* Get the number of skbuffs in use by the hardware */ skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4, MAX_SKB_TO_FREE); } skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau + qos * 4); spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags); goto skip_xmit; } } /* * The CN3XXX series of parts has an errata (GMX-401) which * causes the GMX block to hang if a collision occurs towards * the end of a <68 byte packet. As a workaround for this, we * pad packets to be 68 bytes whenever we are in half duplex * mode. We don't handle the case of having a small packet but * no room to add the padding. The kernel should always give * us at least a cache line */ if ((skb->len < 64) && OCTEON_IS_MODEL(OCTEON_CN3XXX)) { union cvmx_gmxx_prtx_cfg gmx_prt_cfg; int interface = INTERFACE(priv->port); int index = INDEX(priv->port); if (interface < 2) { /* We only need to pad packet in half duplex mode */ gmx_prt_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); if (gmx_prt_cfg.s.duplex == 0) { int add_bytes = 64 - skb->len; if ((skb_tail_pointer(skb) + add_bytes) <= skb_end_pointer(skb)) memset(__skb_put(skb, add_bytes), 0, add_bytes); } } } /* Build the PKO command */ pko_command.u64 = 0; pko_command.s.n2 = 1; /* Don't pollute L2 with the outgoing packet */ pko_command.s.segs = 1; pko_command.s.total_bytes = skb->len; pko_command.s.size0 = CVMX_FAU_OP_SIZE_32; pko_command.s.subone0 = 1; pko_command.s.dontfree = 1; /* Build the PKO buffer pointer */ hw_buffer.u64 = 0; if (skb_shinfo(skb)->nr_frags == 0) { hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)skb->data); hw_buffer.s.pool = 0; hw_buffer.s.size = skb->len; } else { hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)skb->data); hw_buffer.s.pool = 0; hw_buffer.s.size = skb_headlen(skb); CVM_OCT_SKB_CB(skb)[0] = hw_buffer.u64; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { struct skb_frag_struct *fs = skb_shinfo(skb)->frags + i; hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)(page_address(fs->page.p) + fs->page_offset)); hw_buffer.s.size = fs->size; CVM_OCT_SKB_CB(skb)[i + 1] = hw_buffer.u64; } hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)CVM_OCT_SKB_CB(skb)); hw_buffer.s.size = skb_shinfo(skb)->nr_frags + 1; pko_command.s.segs = skb_shinfo(skb)->nr_frags + 1; pko_command.s.gather = 1; goto dont_put_skbuff_in_hw; } /* * See if we can put this skb in the FPA pool. Any strange * behavior from the Linux networking stack will most likely * be caused by a bug in the following code. If some field is * in use by the network stack and get carried over when a * buffer is reused, bad thing may happen. If in doubt and * you dont need the absolute best performance, disable the * define REUSE_SKBUFFS_WITHOUT_FREE. The reuse of buffers has * shown a 25% increase in performance under some loads. */ #if REUSE_SKBUFFS_WITHOUT_FREE fpa_head = skb->head + 256 - ((unsigned long)skb->head & 0x7f); if (unlikely(skb->data < fpa_head)) { /* * printk("TX buffer beginning can't meet FPA * alignment constraints\n"); */ goto dont_put_skbuff_in_hw; } if (unlikely ((skb_end_pointer(skb) - fpa_head) < CVMX_FPA_PACKET_POOL_SIZE)) { /* printk("TX buffer isn't large enough for the FPA\n"); */ goto dont_put_skbuff_in_hw; } if (unlikely(skb_shared(skb))) { /* printk("TX buffer sharing data with someone else\n"); */ goto dont_put_skbuff_in_hw; } if (unlikely(skb_cloned(skb))) { /* printk("TX buffer has been cloned\n"); */ goto dont_put_skbuff_in_hw; } if (unlikely(skb_header_cloned(skb))) { /* printk("TX buffer header has been cloned\n"); */ goto dont_put_skbuff_in_hw; } if (unlikely(skb->destructor)) { /* printk("TX buffer has a destructor\n"); */ goto dont_put_skbuff_in_hw; } if (unlikely(skb_shinfo(skb)->nr_frags)) { /* printk("TX buffer has fragments\n"); */ goto dont_put_skbuff_in_hw; } if (unlikely (skb->truesize != sizeof(*skb) + skb_end_pointer(skb) - skb->head)) { /* printk("TX buffer truesize has been changed\n"); */ goto dont_put_skbuff_in_hw; } /* * We can use this buffer in the FPA. We don't need the FAU * update anymore */ pko_command.s.dontfree = 0; hw_buffer.s.back = ((unsigned long)skb->data >> 7) - ((unsigned long)fpa_head >> 7); *(struct sk_buff **)(fpa_head - sizeof(void *)) = skb; /* * The skbuff will be reused without ever being freed. We must * cleanup a bunch of core things. */ dst_release(skb_dst(skb)); skb_dst_set(skb, NULL); #ifdef CONFIG_XFRM secpath_put(skb->sp); skb->sp = NULL; #endif nf_reset(skb); #ifdef CONFIG_NET_SCHED skb->tc_index = 0; #ifdef CONFIG_NET_CLS_ACT skb->tc_verd = 0; #endif /* CONFIG_NET_CLS_ACT */ #endif /* CONFIG_NET_SCHED */ #endif /* REUSE_SKBUFFS_WITHOUT_FREE */ dont_put_skbuff_in_hw: /* Check if we can use the hardware checksumming */ if (USE_HW_TCPUDP_CHECKSUM && (skb->protocol == htons(ETH_P_IP)) && (ip_hdr(skb)->version == 4) && (ip_hdr(skb)->ihl == 5) && ((ip_hdr(skb)->frag_off == 0) || (ip_hdr(skb)->frag_off == 1 << 14)) && ((ip_hdr(skb)->protocol == IPPROTO_TCP) || (ip_hdr(skb)->protocol == IPPROTO_UDP))) { /* Use hardware checksum calc */ pko_command.s.ipoffp1 = sizeof(struct ethhdr) + 1; } if (USE_ASYNC_IOBDMA) { /* Get the number of skbuffs in use by the hardware */ CVMX_SYNCIOBDMA; skb_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH); buffers_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8); } else { /* Get the number of skbuffs in use by the hardware */ skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4, MAX_SKB_TO_FREE); buffers_to_free = cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0); } skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau+qos*4); /* * If we're sending faster than the receive can free them then * don't do the HW free. */ if ((buffers_to_free < -100) && !pko_command.s.dontfree) pko_command.s.dontfree = 1; if (pko_command.s.dontfree) { queue_type = QUEUE_CORE; pko_command.s.reg0 = priv->fau+qos*4; } else { queue_type = QUEUE_HW; } if (USE_ASYNC_IOBDMA) cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH, FAU_TOTAL_TX_TO_CLEAN, 1); spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags); /* Drop this packet if we have too many already queued to the HW */ if (unlikely(skb_queue_len(&priv->tx_free_list[qos]) >= MAX_OUT_QUEUE_DEPTH)) { if (dev->tx_queue_len != 0) { /* Drop the lock when notifying the core. */ spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags); netif_stop_queue(dev); spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags); } else { /* If not using normal queueing. */ queue_type = QUEUE_DROP; goto skip_xmit; } } cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos, CVMX_PKO_LOCK_NONE); /* Send the packet to the output queue */ if (unlikely(cvmx_pko_send_packet_finish(priv->port, priv->queue + qos, pko_command, hw_buffer, CVMX_PKO_LOCK_NONE))) { printk_ratelimited("%s: Failed to send the packet\n", dev->name); queue_type = QUEUE_DROP; } skip_xmit: to_free_list = NULL; switch (queue_type) { case QUEUE_DROP: skb->next = to_free_list; to_free_list = skb; priv->stats.tx_dropped++; break; case QUEUE_HW: cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, -1); break; case QUEUE_CORE: __skb_queue_tail(&priv->tx_free_list[qos], skb); break; default: BUG(); } while (skb_to_free > 0) { struct sk_buff *t = __skb_dequeue(&priv->tx_free_list[qos]); t->next = to_free_list; to_free_list = t; skb_to_free--; } spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags); /* Do the actual freeing outside of the lock. */ while (to_free_list) { struct sk_buff *t = to_free_list; to_free_list = to_free_list->next; dev_kfree_skb_any(t); } if (USE_ASYNC_IOBDMA) { CVMX_SYNCIOBDMA; total_to_clean = cvmx_scratch_read64(CVMX_SCR_SCRATCH); /* Restore the scratch area */ cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch); cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2); } else { total_to_clean = cvmx_fau_fetch_and_add32(FAU_TOTAL_TX_TO_CLEAN, 1); } if (total_to_clean & 0x3ff) { /* * Schedule the cleanup tasklet every 1024 packets for * the pathological case of high traffic on one port * delaying clean up of packets on a different port * that is blocked waiting for the cleanup. */ tasklet_schedule(&cvm_oct_tx_cleanup_tasklet); } cvm_oct_kick_tx_poll_watchdog(); return NETDEV_TX_OK; } /** * cvm_oct_xmit_pow - transmit a packet to the POW * @skb: Packet to send * @dev: Device info structure * Returns Always returns zero */ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev) { struct octeon_ethernet *priv = netdev_priv(dev); void *packet_buffer; void *copy_location; /* Get a work queue entry */ cvmx_wqe_t *work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL); if (unlikely(work == NULL)) { printk_ratelimited("%s: Failed to allocate a work " "queue entry\n", dev->name); priv->stats.tx_dropped++; dev_kfree_skb(skb); return 0; } /* Get a packet buffer */ packet_buffer = cvmx_fpa_alloc(CVMX_FPA_PACKET_POOL); if (unlikely(packet_buffer == NULL)) { printk_ratelimited("%s: Failed to allocate a packet buffer\n", dev->name); cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, DONT_WRITEBACK(1)); priv->stats.tx_dropped++; dev_kfree_skb(skb); return 0; } /* * Calculate where we need to copy the data to. We need to * leave 8 bytes for a next pointer (unused). We also need to * include any configure skip. Then we need to align the IP * packet src and dest into the same 64bit word. The below * calculation may add a little extra, but that doesn't * hurt. */ copy_location = packet_buffer + sizeof(uint64_t); copy_location += ((CVMX_HELPER_FIRST_MBUFF_SKIP + 7) & 0xfff8) + 6; /* * We have to copy the packet since whoever processes this * packet will free it to a hardware pool. We can't use the * trick of counting outstanding packets like in * cvm_oct_xmit. */ memcpy(copy_location, skb->data, skb->len); /* * Fill in some of the work queue fields. We may need to add * more if the software at the other end needs them. */ work->hw_chksum = skb->csum; work->len = skb->len; work->ipprt = priv->port; work->qos = priv->port & 0x7; work->grp = pow_send_group; work->tag_type = CVMX_HELPER_INPUT_TAG_TYPE; work->tag = pow_send_group; /* FIXME */ /* Default to zero. Sets of zero later are commented out */ work->word2.u64 = 0; work->word2.s.bufs = 1; work->packet_ptr.u64 = 0; work->packet_ptr.s.addr = cvmx_ptr_to_phys(copy_location); work->packet_ptr.s.pool = CVMX_FPA_PACKET_POOL; work->packet_ptr.s.size = CVMX_FPA_PACKET_POOL_SIZE; work->packet_ptr.s.back = (copy_location - packet_buffer) >> 7; if (skb->protocol == htons(ETH_P_IP)) { work->word2.s.ip_offset = 14; #if 0 work->word2.s.vlan_valid = 0; /* FIXME */ work->word2.s.vlan_cfi = 0; /* FIXME */ work->word2.s.vlan_id = 0; /* FIXME */ work->word2.s.dec_ipcomp = 0; /* FIXME */ #endif work->word2.s.tcp_or_udp = (ip_hdr(skb)->protocol == IPPROTO_TCP) || (ip_hdr(skb)->protocol == IPPROTO_UDP); #if 0 /* FIXME */ work->word2.s.dec_ipsec = 0; /* We only support IPv4 right now */ work->word2.s.is_v6 = 0; /* Hardware would set to zero */ work->word2.s.software = 0; /* No error, packet is internal */ work->word2.s.L4_error = 0; #endif work->word2.s.is_frag = !((ip_hdr(skb)->frag_off == 0) || (ip_hdr(skb)->frag_off == 1 << 14)); #if 0 /* Assume Linux is sending a good packet */ work->word2.s.IP_exc = 0; #endif work->word2.s.is_bcast = (skb->pkt_type == PACKET_BROADCAST); work->word2.s.is_mcast = (skb->pkt_type == PACKET_MULTICAST); #if 0 /* This is an IP packet */ work->word2.s.not_IP = 0; /* No error, packet is internal */ work->word2.s.rcv_error = 0; /* No error, packet is internal */ work->word2.s.err_code = 0; #endif /* * When copying the data, include 4 bytes of the * ethernet header to align the same way hardware * does. */ memcpy(work->packet_data, skb->data + 10, sizeof(work->packet_data)); } else { #if 0 work->word2.snoip.vlan_valid = 0; /* FIXME */ work->word2.snoip.vlan_cfi = 0; /* FIXME */ work->word2.snoip.vlan_id = 0; /* FIXME */ work->word2.snoip.software = 0; /* Hardware would set to zero */ #endif work->word2.snoip.is_rarp = skb->protocol == htons(ETH_P_RARP); work->word2.snoip.is_arp = skb->protocol == htons(ETH_P_ARP); work->word2.snoip.is_bcast = (skb->pkt_type == PACKET_BROADCAST); work->word2.snoip.is_mcast = (skb->pkt_type == PACKET_MULTICAST); work->word2.snoip.not_IP = 1; /* IP was done up above */ #if 0 /* No error, packet is internal */ work->word2.snoip.rcv_error = 0; /* No error, packet is internal */ work->word2.snoip.err_code = 0; #endif memcpy(work->packet_data, skb->data, sizeof(work->packet_data)); } /* Submit the packet to the POW */ cvmx_pow_work_submit(work, work->tag, work->tag_type, work->qos, work->grp); priv->stats.tx_packets++; priv->stats.tx_bytes += skb->len; dev_kfree_skb(skb); return 0; } /** * cvm_oct_tx_shutdown_dev - free all skb that are currently queued for TX. * @dev: Device being shutdown * */ void cvm_oct_tx_shutdown_dev(struct net_device *dev) { struct octeon_ethernet *priv = netdev_priv(dev); unsigned long flags; int qos; for (qos = 0; qos < 16; qos++) { spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags); while (skb_queue_len(&priv->tx_free_list[qos])) dev_kfree_skb_any(__skb_dequeue (&priv->tx_free_list[qos])); spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags); } } static void cvm_oct_tx_do_cleanup(unsigned long arg) { int port; for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) { if (cvm_oct_device[port]) { struct net_device *dev = cvm_oct_device[port]; cvm_oct_free_tx_skbs(dev); } } } static irqreturn_t cvm_oct_tx_cleanup_watchdog(int cpl, void *dev_id) { /* Disable the interrupt. */ cvmx_write_csr(CVMX_CIU_TIMX(1), 0); /* Do the work in the tasklet. */ tasklet_schedule(&cvm_oct_tx_cleanup_tasklet); return IRQ_HANDLED; } void cvm_oct_tx_initialize(void) { int i; /* Disable the interrupt. */ cvmx_write_csr(CVMX_CIU_TIMX(1), 0); /* Register an IRQ hander for to receive CIU_TIMX(1) interrupts */ i = request_irq(OCTEON_IRQ_TIMER1, cvm_oct_tx_cleanup_watchdog, 0, "Ethernet", cvm_oct_device); if (i) panic("Could not acquire Ethernet IRQ %d\n", OCTEON_IRQ_TIMER1); } void cvm_oct_tx_shutdown(void) { /* Free the interrupt handler */ free_irq(OCTEON_IRQ_TIMER1, cvm_oct_device); }
gpl-2.0
LightningZap/sgs4g_lz_kernel
arch/arm/plat-s3c24xx/simtec-audio.c
4242
1823
/* linux/arch/arm/plat-s3c24xx/simtec-audio.c * * Copyright (c) 2009 Simtec Electronics * http://armlinux.simtec.co.uk/ * Ben Dooks <ben@simtec.co.uk> * * Audio setup for various Simtec S3C24XX implementations * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/device.h> #include <linux/io.h> #include <mach/bast-map.h> #include <mach/bast-irq.h> #include <mach/bast-cpld.h> #include <mach/hardware.h> #include <mach/regs-gpio.h> #include <plat/audio-simtec.h> #include <plat/devs.h> /* platform ops for audio */ static void simtec_audio_startup_lrroute(void) { unsigned int tmp; unsigned long flags; local_irq_save(flags); tmp = __raw_readb(BAST_VA_CTRL1); tmp &= ~BAST_CPLD_CTRL1_LRMASK; tmp |= BAST_CPLD_CTRL1_LRCDAC; __raw_writeb(tmp, BAST_VA_CTRL1); local_irq_restore(flags); } static struct s3c24xx_audio_simtec_pdata simtec_audio_platdata; static char our_name[32]; static struct platform_device simtec_audio_dev = { .name = our_name, .id = -1, .dev = { .parent = &s3c_device_iis.dev, .platform_data = &simtec_audio_platdata, }, }; int __init simtec_audio_add(const char *name, bool has_lr_routing, struct s3c24xx_audio_simtec_pdata *spd) { if (!name) name = "tlv320aic23"; snprintf(our_name, sizeof(our_name)-1, "s3c24xx-simtec-%s", name); /* copy platform data so the source can be __initdata */ if (spd) simtec_audio_platdata = *spd; if (has_lr_routing) simtec_audio_platdata.startup = simtec_audio_startup_lrroute; platform_device_register(&s3c_device_iis); platform_device_register(&simtec_audio_dev); return 0; }
gpl-2.0
fronti90/kernel_lge_geefhd
drivers/video/backlight/atmel-pwm-bl.c
7058
6308
/* * Copyright (C) 2008 Atmel Corporation * * Backlight driver using Atmel PWM peripheral. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/fb.h> #include <linux/clk.h> #include <linux/gpio.h> #include <linux/backlight.h> #include <linux/atmel_pwm.h> #include <linux/atmel-pwm-bl.h> #include <linux/slab.h> struct atmel_pwm_bl { const struct atmel_pwm_bl_platform_data *pdata; struct backlight_device *bldev; struct platform_device *pdev; struct pwm_channel pwmc; int gpio_on; }; static int atmel_pwm_bl_set_intensity(struct backlight_device *bd) { struct atmel_pwm_bl *pwmbl = bl_get_data(bd); int intensity = bd->props.brightness; int pwm_duty; if (bd->props.power != FB_BLANK_UNBLANK) intensity = 0; if (bd->props.fb_blank != FB_BLANK_UNBLANK) intensity = 0; if (pwmbl->pdata->pwm_active_low) pwm_duty = pwmbl->pdata->pwm_duty_min + intensity; else pwm_duty = pwmbl->pdata->pwm_duty_max - intensity; if (pwm_duty > pwmbl->pdata->pwm_duty_max) pwm_duty = pwmbl->pdata->pwm_duty_max; if (pwm_duty < pwmbl->pdata->pwm_duty_min) pwm_duty = pwmbl->pdata->pwm_duty_min; if (!intensity) { if (pwmbl->gpio_on != -1) { gpio_set_value(pwmbl->gpio_on, 0 ^ pwmbl->pdata->on_active_low); } pwm_channel_writel(&pwmbl->pwmc, PWM_CUPD, pwm_duty); pwm_channel_disable(&pwmbl->pwmc); } else { pwm_channel_enable(&pwmbl->pwmc); pwm_channel_writel(&pwmbl->pwmc, PWM_CUPD, pwm_duty); if (pwmbl->gpio_on != -1) { gpio_set_value(pwmbl->gpio_on, 1 ^ pwmbl->pdata->on_active_low); } } return 0; } static int atmel_pwm_bl_get_intensity(struct backlight_device *bd) { struct atmel_pwm_bl *pwmbl = bl_get_data(bd); u8 intensity; if (pwmbl->pdata->pwm_active_low) { intensity = pwm_channel_readl(&pwmbl->pwmc, PWM_CDTY) - pwmbl->pdata->pwm_duty_min; } else { intensity = pwmbl->pdata->pwm_duty_max - pwm_channel_readl(&pwmbl->pwmc, PWM_CDTY); } return intensity; } static int atmel_pwm_bl_init_pwm(struct atmel_pwm_bl *pwmbl) { unsigned long pwm_rate = pwmbl->pwmc.mck; unsigned long prescale = DIV_ROUND_UP(pwm_rate, (pwmbl->pdata->pwm_frequency * pwmbl->pdata->pwm_compare_max)) - 1; /* * Prescale must be power of two and maximum 0xf in size because of * hardware limit. PWM speed will be: * PWM module clock speed / (2 ^ prescale). */ prescale = fls(prescale); if (prescale > 0xf) prescale = 0xf; pwm_channel_writel(&pwmbl->pwmc, PWM_CMR, prescale); pwm_channel_writel(&pwmbl->pwmc, PWM_CDTY, pwmbl->pdata->pwm_duty_min + pwmbl->bldev->props.brightness); pwm_channel_writel(&pwmbl->pwmc, PWM_CPRD, pwmbl->pdata->pwm_compare_max); dev_info(&pwmbl->pdev->dev, "Atmel PWM backlight driver " "(%lu Hz)\n", pwmbl->pwmc.mck / pwmbl->pdata->pwm_compare_max / (1 << prescale)); return pwm_channel_enable(&pwmbl->pwmc); } static const struct backlight_ops atmel_pwm_bl_ops = { .get_brightness = atmel_pwm_bl_get_intensity, .update_status = atmel_pwm_bl_set_intensity, }; static int atmel_pwm_bl_probe(struct platform_device *pdev) { struct backlight_properties props; const struct atmel_pwm_bl_platform_data *pdata; struct backlight_device *bldev; struct atmel_pwm_bl *pwmbl; int retval; pwmbl = kzalloc(sizeof(struct atmel_pwm_bl), GFP_KERNEL); if (!pwmbl) return -ENOMEM; pwmbl->pdev = pdev; pdata = pdev->dev.platform_data; if (!pdata) { retval = -ENODEV; goto err_free_mem; } if (pdata->pwm_compare_max < pdata->pwm_duty_max || pdata->pwm_duty_min > pdata->pwm_duty_max || pdata->pwm_frequency == 0) { retval = -EINVAL; goto err_free_mem; } pwmbl->pdata = pdata; pwmbl->gpio_on = pdata->gpio_on; retval = pwm_channel_alloc(pdata->pwm_channel, &pwmbl->pwmc); if (retval) goto err_free_mem; if (pwmbl->gpio_on != -1) { retval = gpio_request(pwmbl->gpio_on, "gpio_atmel_pwm_bl"); if (retval) { pwmbl->gpio_on = -1; goto err_free_pwm; } /* Turn display off by default. */ retval = gpio_direction_output(pwmbl->gpio_on, 0 ^ pdata->on_active_low); if (retval) goto err_free_gpio; } memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_RAW; props.max_brightness = pdata->pwm_duty_max - pdata->pwm_duty_min; bldev = backlight_device_register("atmel-pwm-bl", &pdev->dev, pwmbl, &atmel_pwm_bl_ops, &props); if (IS_ERR(bldev)) { retval = PTR_ERR(bldev); goto err_free_gpio; } pwmbl->bldev = bldev; platform_set_drvdata(pdev, pwmbl); /* Power up the backlight by default at middle intesity. */ bldev->props.power = FB_BLANK_UNBLANK; bldev->props.brightness = bldev->props.max_brightness / 2; retval = atmel_pwm_bl_init_pwm(pwmbl); if (retval) goto err_free_bl_dev; atmel_pwm_bl_set_intensity(bldev); return 0; err_free_bl_dev: platform_set_drvdata(pdev, NULL); backlight_device_unregister(bldev); err_free_gpio: if (pwmbl->gpio_on != -1) gpio_free(pwmbl->gpio_on); err_free_pwm: pwm_channel_free(&pwmbl->pwmc); err_free_mem: kfree(pwmbl); return retval; } static int __exit atmel_pwm_bl_remove(struct platform_device *pdev) { struct atmel_pwm_bl *pwmbl = platform_get_drvdata(pdev); if (pwmbl->gpio_on != -1) { gpio_set_value(pwmbl->gpio_on, 0); gpio_free(pwmbl->gpio_on); } pwm_channel_disable(&pwmbl->pwmc); pwm_channel_free(&pwmbl->pwmc); backlight_device_unregister(pwmbl->bldev); platform_set_drvdata(pdev, NULL); kfree(pwmbl); return 0; } static struct platform_driver atmel_pwm_bl_driver = { .driver = { .name = "atmel-pwm-bl", }, /* REVISIT add suspend() and resume() */ .remove = __exit_p(atmel_pwm_bl_remove), }; static int __init atmel_pwm_bl_init(void) { return platform_driver_probe(&atmel_pwm_bl_driver, atmel_pwm_bl_probe); } module_init(atmel_pwm_bl_init); static void __exit atmel_pwm_bl_exit(void) { platform_driver_unregister(&atmel_pwm_bl_driver); } module_exit(atmel_pwm_bl_exit); MODULE_AUTHOR("Hans-Christian egtvedt <hans-christian.egtvedt@atmel.com>"); MODULE_DESCRIPTION("Atmel PWM backlight driver"); MODULE_LICENSE("GPL");
gpl-2.0
csmanjuvijay/usb-next
lib/cordic.c
9874
2546
/* * Copyright (c) 2011 Broadcom Corporation * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/module.h> #include <linux/cordic.h> #define CORDIC_ANGLE_GEN 39797 #define CORDIC_PRECISION_SHIFT 16 #define CORDIC_NUM_ITER (CORDIC_PRECISION_SHIFT + 2) #define FIXED(X) ((s32)((X) << CORDIC_PRECISION_SHIFT)) #define FLOAT(X) (((X) >= 0) \ ? ((((X) >> (CORDIC_PRECISION_SHIFT - 1)) + 1) >> 1) \ : -((((-(X)) >> (CORDIC_PRECISION_SHIFT - 1)) + 1) >> 1)) static const s32 arctan_table[] = { 2949120, 1740967, 919879, 466945, 234379, 117304, 58666, 29335, 14668, 7334, 3667, 1833, 917, 458, 229, 115, 57, 29 }; /* * cordic_calc_iq() - calculates the i/q coordinate for given angle * * theta: angle in degrees for which i/q coordinate is to be calculated * coord: function output parameter holding the i/q coordinate */ struct cordic_iq cordic_calc_iq(s32 theta) { struct cordic_iq coord; s32 angle, valtmp; unsigned iter; int signx = 1; int signtheta; coord.i = CORDIC_ANGLE_GEN; coord.q = 0; angle = 0; theta = FIXED(theta); signtheta = (theta < 0) ? -1 : 1; theta = ((theta + FIXED(180) * signtheta) % FIXED(360)) - FIXED(180) * signtheta; if (FLOAT(theta) > 90) { theta -= FIXED(180); signx = -1; } else if (FLOAT(theta) < -90) { theta += FIXED(180); signx = -1; } for (iter = 0; iter < CORDIC_NUM_ITER; iter++) { if (theta > angle) { valtmp = coord.i - (coord.q >> iter); coord.q += (coord.i >> iter); angle += arctan_table[iter]; } else { valtmp = coord.i + (coord.q >> iter); coord.q -= (coord.i >> iter); angle -= arctan_table[iter]; } coord.i = valtmp; } coord.i *= signx; coord.q *= signx; return coord; } EXPORT_SYMBOL(cordic_calc_iq); MODULE_DESCRIPTION("CORDIC algorithm"); MODULE_AUTHOR("Broadcom Corporation"); MODULE_LICENSE("Dual BSD/GPL");
gpl-2.0
ShinySide/HispAsian_Kernel
drivers/input/joystick/cobra.c
9874
6747
/* * Copyright (c) 1999-2001 Vojtech Pavlik */ /* * Creative Labs Blaster GamePad Cobra driver for Linux */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Should you need to contact me, the author, you can do so either by * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail: * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/gameport.h> #include <linux/input.h> #include <linux/jiffies.h> #define DRIVER_DESC "Creative Labs Blaster GamePad Cobra driver" MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); #define COBRA_MAX_STROBE 45 /* 45 us max wait for first strobe */ #define COBRA_LENGTH 36 static int cobra_btn[] = { BTN_START, BTN_SELECT, BTN_TL, BTN_TR, BTN_X, BTN_Y, BTN_Z, BTN_A, BTN_B, BTN_C, BTN_TL2, BTN_TR2, 0 }; struct cobra { struct gameport *gameport; struct input_dev *dev[2]; int reads; int bads; unsigned char exists; char phys[2][32]; }; static unsigned char cobra_read_packet(struct gameport *gameport, unsigned int *data) { unsigned long flags; unsigned char u, v, w; __u64 buf[2]; int r[2], t[2]; int i, j, ret; int strobe = gameport_time(gameport, COBRA_MAX_STROBE); for (i = 0; i < 2; i++) { r[i] = buf[i] = 0; t[i] = COBRA_MAX_STROBE; } local_irq_save(flags); u = gameport_read(gameport); do { t[0]--; t[1]--; v = gameport_read(gameport); for (i = 0, w = u ^ v; i < 2 && w; i++, w >>= 2) if (w & 0x30) { if ((w & 0x30) < 0x30 && r[i] < COBRA_LENGTH && t[i] > 0) { buf[i] |= (__u64)((w >> 5) & 1) << r[i]++; t[i] = strobe; u = v; } else t[i] = 0; } } while (t[0] > 0 || t[1] > 0); local_irq_restore(flags); ret = 0; for (i = 0; i < 2; i++) { if (r[i] != COBRA_LENGTH) continue; for (j = 0; j < COBRA_LENGTH && (buf[i] & 0x04104107f) ^ 0x041041040; j++) buf[i] = (buf[i] >> 1) | ((__u64)(buf[i] & 1) << (COBRA_LENGTH - 1)); if (j < COBRA_LENGTH) ret |= (1 << i); data[i] = ((buf[i] >> 7) & 0x000001f) | ((buf[i] >> 8) & 0x00003e0) | ((buf[i] >> 9) & 0x0007c00) | ((buf[i] >> 10) & 0x00f8000) | ((buf[i] >> 11) & 0x1f00000); } return ret; } static void cobra_poll(struct gameport *gameport) { struct cobra *cobra = gameport_get_drvdata(gameport); struct input_dev *dev; unsigned int data[2]; int i, j, r; cobra->reads++; if ((r = cobra_read_packet(gameport, data)) != cobra->exists) { cobra->bads++; return; } for (i = 0; i < 2; i++) if (cobra->exists & r & (1 << i)) { dev = cobra->dev[i]; input_report_abs(dev, ABS_X, ((data[i] >> 4) & 1) - ((data[i] >> 3) & 1)); input_report_abs(dev, ABS_Y, ((data[i] >> 2) & 1) - ((data[i] >> 1) & 1)); for (j = 0; cobra_btn[j]; j++) input_report_key(dev, cobra_btn[j], data[i] & (0x20 << j)); input_sync(dev); } } static int cobra_open(struct input_dev *dev) { struct cobra *cobra = input_get_drvdata(dev); gameport_start_polling(cobra->gameport); return 0; } static void cobra_close(struct input_dev *dev) { struct cobra *cobra = input_get_drvdata(dev); gameport_stop_polling(cobra->gameport); } static int cobra_connect(struct gameport *gameport, struct gameport_driver *drv) { struct cobra *cobra; struct input_dev *input_dev; unsigned int data[2]; int i, j; int err; cobra = kzalloc(sizeof(struct cobra), GFP_KERNEL); if (!cobra) return -ENOMEM; cobra->gameport = gameport; gameport_set_drvdata(gameport, cobra); err = gameport_open(gameport, drv, GAMEPORT_MODE_RAW); if (err) goto fail1; cobra->exists = cobra_read_packet(gameport, data); for (i = 0; i < 2; i++) if ((cobra->exists >> i) & data[i] & 1) { printk(KERN_WARNING "cobra.c: Device %d on %s has the Ext bit set. ID is: %d" " Contact vojtech@ucw.cz\n", i, gameport->phys, (data[i] >> 2) & 7); cobra->exists &= ~(1 << i); } if (!cobra->exists) { err = -ENODEV; goto fail2; } gameport_set_poll_handler(gameport, cobra_poll); gameport_set_poll_interval(gameport, 20); for (i = 0; i < 2; i++) { if (~(cobra->exists >> i) & 1) continue; cobra->dev[i] = input_dev = input_allocate_device(); if (!input_dev) { err = -ENOMEM; goto fail3; } snprintf(cobra->phys[i], sizeof(cobra->phys[i]), "%s/input%d", gameport->phys, i); input_dev->name = "Creative Labs Blaster GamePad Cobra"; input_dev->phys = cobra->phys[i]; input_dev->id.bustype = BUS_GAMEPORT; input_dev->id.vendor = GAMEPORT_ID_VENDOR_CREATIVE; input_dev->id.product = 0x0008; input_dev->id.version = 0x0100; input_dev->dev.parent = &gameport->dev; input_set_drvdata(input_dev, cobra); input_dev->open = cobra_open; input_dev->close = cobra_close; input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); input_set_abs_params(input_dev, ABS_X, -1, 1, 0, 0); input_set_abs_params(input_dev, ABS_Y, -1, 1, 0, 0); for (j = 0; cobra_btn[j]; j++) set_bit(cobra_btn[j], input_dev->keybit); err = input_register_device(cobra->dev[i]); if (err) goto fail4; } return 0; fail4: input_free_device(cobra->dev[i]); fail3: while (--i >= 0) if (cobra->dev[i]) input_unregister_device(cobra->dev[i]); fail2: gameport_close(gameport); fail1: gameport_set_drvdata(gameport, NULL); kfree(cobra); return err; } static void cobra_disconnect(struct gameport *gameport) { struct cobra *cobra = gameport_get_drvdata(gameport); int i; for (i = 0; i < 2; i++) if ((cobra->exists >> i) & 1) input_unregister_device(cobra->dev[i]); gameport_close(gameport); gameport_set_drvdata(gameport, NULL); kfree(cobra); } static struct gameport_driver cobra_drv = { .driver = { .name = "cobra", }, .description = DRIVER_DESC, .connect = cobra_connect, .disconnect = cobra_disconnect, }; static int __init cobra_init(void) { return gameport_register_driver(&cobra_drv); } static void __exit cobra_exit(void) { gameport_unregister_driver(&cobra_drv); } module_init(cobra_init); module_exit(cobra_exit);
gpl-2.0
SebSUE/InventecMustangXavier
drivers/leds/led-core.c
147
6321
/* * LED Class Core * * Copyright 2005-2006 Openedhand Ltd. * * Author: Richard Purdie <rpurdie@openedhand.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/kernel.h> #include <linux/leds.h> #include <linux/list.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/rwsem.h> #include "leds.h" DECLARE_RWSEM(leds_list_lock); EXPORT_SYMBOL_GPL(leds_list_lock); LIST_HEAD(leds_list); EXPORT_SYMBOL_GPL(leds_list); static void led_timer_function(unsigned long data) { struct led_classdev *led_cdev = (void *)data; unsigned long brightness; unsigned long delay; if (!led_cdev->blink_delay_on || !led_cdev->blink_delay_off) { led_set_brightness_async(led_cdev, LED_OFF); return; } if (led_cdev->flags & LED_BLINK_ONESHOT_STOP) { led_cdev->flags &= ~LED_BLINK_ONESHOT_STOP; return; } brightness = led_get_brightness(led_cdev); if (!brightness) { /* Time to switch the LED on. */ if (led_cdev->delayed_set_value) { led_cdev->blink_brightness = led_cdev->delayed_set_value; led_cdev->delayed_set_value = 0; } brightness = led_cdev->blink_brightness; delay = led_cdev->blink_delay_on; } else { /* Store the current brightness value to be able * to restore it when the delay_off period is over. */ led_cdev->blink_brightness = brightness; brightness = LED_OFF; delay = led_cdev->blink_delay_off; } led_set_brightness_async(led_cdev, brightness); /* Return in next iteration if led is in one-shot mode and we are in * the final blink state so that the led is toggled each delay_on + * delay_off milliseconds in worst case. */ if (led_cdev->flags & LED_BLINK_ONESHOT) { if (led_cdev->flags & LED_BLINK_INVERT) { if (brightness) led_cdev->flags |= LED_BLINK_ONESHOT_STOP; } else { if (!brightness) led_cdev->flags |= LED_BLINK_ONESHOT_STOP; } } mod_timer(&led_cdev->blink_timer, jiffies + msecs_to_jiffies(delay)); } static void set_brightness_delayed(struct work_struct *ws) { struct led_classdev *led_cdev = container_of(ws, struct led_classdev, set_brightness_work); led_stop_software_blink(led_cdev); led_set_brightness_async(led_cdev, led_cdev->delayed_set_value); } static void led_set_software_blink(struct led_classdev *led_cdev, unsigned long delay_on, unsigned long delay_off) { int current_brightness; current_brightness = led_get_brightness(led_cdev); if (current_brightness) led_cdev->blink_brightness = current_brightness; if (!led_cdev->blink_brightness) led_cdev->blink_brightness = led_cdev->max_brightness; led_cdev->blink_delay_on = delay_on; led_cdev->blink_delay_off = delay_off; /* never on - just set to off */ if (!delay_on) { led_set_brightness_async(led_cdev, LED_OFF); return; } /* never off - just set to brightness */ if (!delay_off) { led_set_brightness_async(led_cdev, led_cdev->blink_brightness); return; } mod_timer(&led_cdev->blink_timer, jiffies + 1); } static void led_blink_setup(struct led_classdev *led_cdev, unsigned long *delay_on, unsigned long *delay_off) { if (!(led_cdev->flags & LED_BLINK_ONESHOT) && led_cdev->blink_set && !led_cdev->blink_set(led_cdev, delay_on, delay_off)) return; /* blink with 1 Hz as default if nothing specified */ if (!*delay_on && !*delay_off) *delay_on = *delay_off = 500; led_set_software_blink(led_cdev, *delay_on, *delay_off); } void led_init_core(struct led_classdev *led_cdev) { INIT_WORK(&led_cdev->set_brightness_work, set_brightness_delayed); setup_timer(&led_cdev->blink_timer, led_timer_function, (unsigned long)led_cdev); } EXPORT_SYMBOL_GPL(led_init_core); void led_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on, unsigned long *delay_off) { del_timer_sync(&led_cdev->blink_timer); led_cdev->flags &= ~LED_BLINK_ONESHOT; led_cdev->flags &= ~LED_BLINK_ONESHOT_STOP; led_blink_setup(led_cdev, delay_on, delay_off); } EXPORT_SYMBOL(led_blink_set); void led_blink_set_oneshot(struct led_classdev *led_cdev, unsigned long *delay_on, unsigned long *delay_off, int invert) { if ((led_cdev->flags & LED_BLINK_ONESHOT) && timer_pending(&led_cdev->blink_timer)) return; led_cdev->flags |= LED_BLINK_ONESHOT; led_cdev->flags &= ~LED_BLINK_ONESHOT_STOP; if (invert) led_cdev->flags |= LED_BLINK_INVERT; else led_cdev->flags &= ~LED_BLINK_INVERT; led_blink_setup(led_cdev, delay_on, delay_off); } EXPORT_SYMBOL(led_blink_set_oneshot); void led_stop_software_blink(struct led_classdev *led_cdev) { del_timer_sync(&led_cdev->blink_timer); led_cdev->blink_delay_on = 0; led_cdev->blink_delay_off = 0; } EXPORT_SYMBOL_GPL(led_stop_software_blink); void led_set_brightness(struct led_classdev *led_cdev, enum led_brightness brightness) { int ret = 0; /* delay brightness if soft-blink is active */ if (led_cdev->blink_delay_on || led_cdev->blink_delay_off) { led_cdev->delayed_set_value = brightness; if (brightness == LED_OFF) schedule_work(&led_cdev->set_brightness_work); return; } if (led_cdev->flags & SET_BRIGHTNESS_ASYNC) { led_set_brightness_async(led_cdev, brightness); return; } else if (led_cdev->flags & SET_BRIGHTNESS_SYNC) ret = led_set_brightness_sync(led_cdev, brightness); else ret = -EINVAL; if (ret < 0) dev_dbg(led_cdev->dev, "Setting LED brightness failed (%d)\n", ret); } EXPORT_SYMBOL(led_set_brightness); int led_update_brightness(struct led_classdev *led_cdev) { int ret = 0; if (led_cdev->brightness_get) { ret = led_cdev->brightness_get(led_cdev); if (ret >= 0) { led_cdev->brightness = ret; return 0; } } return ret; } EXPORT_SYMBOL(led_update_brightness); /* Caller must ensure led_cdev->led_access held */ void led_sysfs_disable(struct led_classdev *led_cdev) { lockdep_assert_held(&led_cdev->led_access); led_cdev->flags |= LED_SYSFS_DISABLE; } EXPORT_SYMBOL_GPL(led_sysfs_disable); /* Caller must ensure led_cdev->led_access held */ void led_sysfs_enable(struct led_classdev *led_cdev) { lockdep_assert_held(&led_cdev->led_access); led_cdev->flags &= ~LED_SYSFS_DISABLE; } EXPORT_SYMBOL_GPL(led_sysfs_enable);
gpl-2.0
Cryptoo/kernel
drivers/firmware/efi/libstub/efi-stub-helper.c
915
16388
/* * Helper functions used by the EFI stub on multiple * architectures. This should be #included by the EFI stub * implementation files. * * Copyright 2011 Intel Corporation; author Matt Fleming * * This file is part of the Linux kernel, and is made available * under the terms of the GNU General Public License version 2. * */ #include <linux/efi.h> #include <asm/efi.h> #include "efistub.h" /* * Some firmware implementations have problems reading files in one go. * A read chunk size of 1MB seems to work for most platforms. * * Unfortunately, reading files in chunks triggers *other* bugs on some * platforms, so we provide a way to disable this workaround, which can * be done by passing "efi=nochunk" on the EFI boot stub command line. * * If you experience issues with initrd images being corrupt it's worth * trying efi=nochunk, but chunking is enabled by default because there * are far more machines that require the workaround than those that * break with it enabled. */ #define EFI_READ_CHUNK_SIZE (1024 * 1024) static unsigned long __chunk_size = EFI_READ_CHUNK_SIZE; /* * Allow the platform to override the allocation granularity: this allows * systems that have the capability to run with a larger page size to deal * with the allocations for initrd and fdt more efficiently. */ #ifndef EFI_ALLOC_ALIGN #define EFI_ALLOC_ALIGN EFI_PAGE_SIZE #endif struct file_info { efi_file_handle_t *handle; u64 size; }; void efi_printk(efi_system_table_t *sys_table_arg, char *str) { char *s8; for (s8 = str; *s8; s8++) { efi_char16_t ch[2] = { 0 }; ch[0] = *s8; if (*s8 == '\n') { efi_char16_t nl[2] = { '\r', 0 }; efi_char16_printk(sys_table_arg, nl); } efi_char16_printk(sys_table_arg, ch); } } efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg, efi_memory_desc_t **map, unsigned long *map_size, unsigned long *desc_size, u32 *desc_ver, unsigned long *key_ptr) { efi_memory_desc_t *m = NULL; efi_status_t status; unsigned long key; u32 desc_version; *map_size = sizeof(*m) * 32; again: /* * Add an additional efi_memory_desc_t because we're doing an * allocation which may be in a new descriptor region. */ *map_size += sizeof(*m); status = efi_call_early(allocate_pool, EFI_LOADER_DATA, *map_size, (void **)&m); if (status != EFI_SUCCESS) goto fail; *desc_size = 0; key = 0; status = efi_call_early(get_memory_map, map_size, m, &key, desc_size, &desc_version); if (status == EFI_BUFFER_TOO_SMALL) { efi_call_early(free_pool, m); goto again; } if (status != EFI_SUCCESS) efi_call_early(free_pool, m); if (key_ptr && status == EFI_SUCCESS) *key_ptr = key; if (desc_ver && status == EFI_SUCCESS) *desc_ver = desc_version; fail: *map = m; return status; } unsigned long get_dram_base(efi_system_table_t *sys_table_arg) { efi_status_t status; unsigned long map_size; unsigned long membase = EFI_ERROR; struct efi_memory_map map; efi_memory_desc_t *md; status = efi_get_memory_map(sys_table_arg, (efi_memory_desc_t **)&map.map, &map_size, &map.desc_size, NULL, NULL); if (status != EFI_SUCCESS) return membase; map.map_end = map.map + map_size; for_each_efi_memory_desc(&map, md) if (md->attribute & EFI_MEMORY_WB) if (membase > md->phys_addr) membase = md->phys_addr; efi_call_early(free_pool, map.map); return membase; } /* * Allocate at the highest possible address that is not above 'max'. */ efi_status_t efi_high_alloc(efi_system_table_t *sys_table_arg, unsigned long size, unsigned long align, unsigned long *addr, unsigned long max) { unsigned long map_size, desc_size; efi_memory_desc_t *map; efi_status_t status; unsigned long nr_pages; u64 max_addr = 0; int i; status = efi_get_memory_map(sys_table_arg, &map, &map_size, &desc_size, NULL, NULL); if (status != EFI_SUCCESS) goto fail; /* * Enforce minimum alignment that EFI requires when requesting * a specific address. We are doing page-based allocations, * so we must be aligned to a page. */ if (align < EFI_ALLOC_ALIGN) align = EFI_ALLOC_ALIGN; nr_pages = round_up(size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE; again: for (i = 0; i < map_size / desc_size; i++) { efi_memory_desc_t *desc; unsigned long m = (unsigned long)map; u64 start, end; desc = (efi_memory_desc_t *)(m + (i * desc_size)); if (desc->type != EFI_CONVENTIONAL_MEMORY) continue; if (desc->num_pages < nr_pages) continue; start = desc->phys_addr; end = start + desc->num_pages * (1UL << EFI_PAGE_SHIFT); if (end > max) end = max; if ((start + size) > end) continue; if (round_down(end - size, align) < start) continue; start = round_down(end - size, align); /* * Don't allocate at 0x0. It will confuse code that * checks pointers against NULL. */ if (start == 0x0) continue; if (start > max_addr) max_addr = start; } if (!max_addr) status = EFI_NOT_FOUND; else { status = efi_call_early(allocate_pages, EFI_ALLOCATE_ADDRESS, EFI_LOADER_DATA, nr_pages, &max_addr); if (status != EFI_SUCCESS) { max = max_addr; max_addr = 0; goto again; } *addr = max_addr; } efi_call_early(free_pool, map); fail: return status; } /* * Allocate at the lowest possible address. */ efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg, unsigned long size, unsigned long align, unsigned long *addr) { unsigned long map_size, desc_size; efi_memory_desc_t *map; efi_status_t status; unsigned long nr_pages; int i; status = efi_get_memory_map(sys_table_arg, &map, &map_size, &desc_size, NULL, NULL); if (status != EFI_SUCCESS) goto fail; /* * Enforce minimum alignment that EFI requires when requesting * a specific address. We are doing page-based allocations, * so we must be aligned to a page. */ if (align < EFI_ALLOC_ALIGN) align = EFI_ALLOC_ALIGN; nr_pages = round_up(size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE; for (i = 0; i < map_size / desc_size; i++) { efi_memory_desc_t *desc; unsigned long m = (unsigned long)map; u64 start, end; desc = (efi_memory_desc_t *)(m + (i * desc_size)); if (desc->type != EFI_CONVENTIONAL_MEMORY) continue; if (desc->num_pages < nr_pages) continue; start = desc->phys_addr; end = start + desc->num_pages * (1UL << EFI_PAGE_SHIFT); /* * Don't allocate at 0x0. It will confuse code that * checks pointers against NULL. Skip the first 8 * bytes so we start at a nice even number. */ if (start == 0x0) start += 8; start = round_up(start, align); if ((start + size) > end) continue; status = efi_call_early(allocate_pages, EFI_ALLOCATE_ADDRESS, EFI_LOADER_DATA, nr_pages, &start); if (status == EFI_SUCCESS) { *addr = start; break; } } if (i == map_size / desc_size) status = EFI_NOT_FOUND; efi_call_early(free_pool, map); fail: return status; } void efi_free(efi_system_table_t *sys_table_arg, unsigned long size, unsigned long addr) { unsigned long nr_pages; if (!size) return; nr_pages = round_up(size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE; efi_call_early(free_pages, addr, nr_pages); } /* * Parse the ASCII string 'cmdline' for EFI options, denoted by the efi= * option, e.g. efi=nochunk. * * It should be noted that efi= is parsed in two very different * environments, first in the early boot environment of the EFI boot * stub, and subsequently during the kernel boot. */ efi_status_t efi_parse_options(char *cmdline) { char *str; /* * If no EFI parameters were specified on the cmdline we've got * nothing to do. */ str = strstr(cmdline, "efi="); if (!str) return EFI_SUCCESS; /* Skip ahead to first argument */ str += strlen("efi="); /* * Remember, because efi= is also used by the kernel we need to * skip over arguments we don't understand. */ while (*str) { if (!strncmp(str, "nochunk", 7)) { str += strlen("nochunk"); __chunk_size = -1UL; } /* Group words together, delimited by "," */ while (*str && *str != ',') str++; if (*str == ',') str++; } return EFI_SUCCESS; } /* * Check the cmdline for a LILO-style file= arguments. * * We only support loading a file from the same filesystem as * the kernel image. */ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg, efi_loaded_image_t *image, char *cmd_line, char *option_string, unsigned long max_addr, unsigned long *load_addr, unsigned long *load_size) { struct file_info *files; unsigned long file_addr; u64 file_size_total; efi_file_handle_t *fh = NULL; efi_status_t status; int nr_files; char *str; int i, j, k; file_addr = 0; file_size_total = 0; str = cmd_line; j = 0; /* See close_handles */ if (!load_addr || !load_size) return EFI_INVALID_PARAMETER; *load_addr = 0; *load_size = 0; if (!str || !*str) return EFI_SUCCESS; for (nr_files = 0; *str; nr_files++) { str = strstr(str, option_string); if (!str) break; str += strlen(option_string); /* Skip any leading slashes */ while (*str == '/' || *str == '\\') str++; while (*str && *str != ' ' && *str != '\n') str++; } if (!nr_files) return EFI_SUCCESS; status = efi_call_early(allocate_pool, EFI_LOADER_DATA, nr_files * sizeof(*files), (void **)&files); if (status != EFI_SUCCESS) { pr_efi_err(sys_table_arg, "Failed to alloc mem for file handle list\n"); goto fail; } str = cmd_line; for (i = 0; i < nr_files; i++) { struct file_info *file; efi_char16_t filename_16[256]; efi_char16_t *p; str = strstr(str, option_string); if (!str) break; str += strlen(option_string); file = &files[i]; p = filename_16; /* Skip any leading slashes */ while (*str == '/' || *str == '\\') str++; while (*str && *str != ' ' && *str != '\n') { if ((u8 *)p >= (u8 *)filename_16 + sizeof(filename_16)) break; if (*str == '/') { *p++ = '\\'; str++; } else { *p++ = *str++; } } *p = '\0'; /* Only open the volume once. */ if (!i) { status = efi_open_volume(sys_table_arg, image, (void **)&fh); if (status != EFI_SUCCESS) goto free_files; } status = efi_file_size(sys_table_arg, fh, filename_16, (void **)&file->handle, &file->size); if (status != EFI_SUCCESS) goto close_handles; file_size_total += file->size; } if (file_size_total) { unsigned long addr; /* * Multiple files need to be at consecutive addresses in memory, * so allocate enough memory for all the files. This is used * for loading multiple files. */ status = efi_high_alloc(sys_table_arg, file_size_total, 0x1000, &file_addr, max_addr); if (status != EFI_SUCCESS) { pr_efi_err(sys_table_arg, "Failed to alloc highmem for files\n"); goto close_handles; } /* We've run out of free low memory. */ if (file_addr > max_addr) { pr_efi_err(sys_table_arg, "We've run out of free low memory\n"); status = EFI_INVALID_PARAMETER; goto free_file_total; } addr = file_addr; for (j = 0; j < nr_files; j++) { unsigned long size; size = files[j].size; while (size) { unsigned long chunksize; if (size > __chunk_size) chunksize = __chunk_size; else chunksize = size; status = efi_file_read(files[j].handle, &chunksize, (void *)addr); if (status != EFI_SUCCESS) { pr_efi_err(sys_table_arg, "Failed to read file\n"); goto free_file_total; } addr += chunksize; size -= chunksize; } efi_file_close(files[j].handle); } } efi_call_early(free_pool, files); *load_addr = file_addr; *load_size = file_size_total; return status; free_file_total: efi_free(sys_table_arg, file_size_total, file_addr); close_handles: for (k = j; k < i; k++) efi_file_close(files[k].handle); free_files: efi_call_early(free_pool, files); fail: *load_addr = 0; *load_size = 0; return status; } /* * Relocate a kernel image, either compressed or uncompressed. * In the ARM64 case, all kernel images are currently * uncompressed, and as such when we relocate it we need to * allocate additional space for the BSS segment. Any low * memory that this function should avoid needs to be * unavailable in the EFI memory map, as if the preferred * address is not available the lowest available address will * be used. */ efi_status_t efi_relocate_kernel(efi_system_table_t *sys_table_arg, unsigned long *image_addr, unsigned long image_size, unsigned long alloc_size, unsigned long preferred_addr, unsigned long alignment) { unsigned long cur_image_addr; unsigned long new_addr = 0; efi_status_t status; unsigned long nr_pages; efi_physical_addr_t efi_addr = preferred_addr; if (!image_addr || !image_size || !alloc_size) return EFI_INVALID_PARAMETER; if (alloc_size < image_size) return EFI_INVALID_PARAMETER; cur_image_addr = *image_addr; /* * The EFI firmware loader could have placed the kernel image * anywhere in memory, but the kernel has restrictions on the * max physical address it can run at. Some architectures * also have a prefered address, so first try to relocate * to the preferred address. If that fails, allocate as low * as possible while respecting the required alignment. */ nr_pages = round_up(alloc_size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE; status = efi_call_early(allocate_pages, EFI_ALLOCATE_ADDRESS, EFI_LOADER_DATA, nr_pages, &efi_addr); new_addr = efi_addr; /* * If preferred address allocation failed allocate as low as * possible. */ if (status != EFI_SUCCESS) { status = efi_low_alloc(sys_table_arg, alloc_size, alignment, &new_addr); } if (status != EFI_SUCCESS) { pr_efi_err(sys_table_arg, "Failed to allocate usable memory for kernel.\n"); return status; } /* * We know source/dest won't overlap since both memory ranges * have been allocated by UEFI, so we can safely use memcpy. */ memcpy((void *)new_addr, (void *)cur_image_addr, image_size); /* Return the new address of the relocated image. */ *image_addr = new_addr; return status; } /* * Get the number of UTF-8 bytes corresponding to an UTF-16 character. * This overestimates for surrogates, but that is okay. */ static int efi_utf8_bytes(u16 c) { return 1 + (c >= 0x80) + (c >= 0x800); } /* * Convert an UTF-16 string, not necessarily null terminated, to UTF-8. */ static u8 *efi_utf16_to_utf8(u8 *dst, const u16 *src, int n) { unsigned int c; while (n--) { c = *src++; if (n && c >= 0xd800 && c <= 0xdbff && *src >= 0xdc00 && *src <= 0xdfff) { c = 0x10000 + ((c & 0x3ff) << 10) + (*src & 0x3ff); src++; n--; } if (c >= 0xd800 && c <= 0xdfff) c = 0xfffd; /* Unmatched surrogate */ if (c < 0x80) { *dst++ = c; continue; } if (c < 0x800) { *dst++ = 0xc0 + (c >> 6); goto t1; } if (c < 0x10000) { *dst++ = 0xe0 + (c >> 12); goto t2; } *dst++ = 0xf0 + (c >> 18); *dst++ = 0x80 + ((c >> 12) & 0x3f); t2: *dst++ = 0x80 + ((c >> 6) & 0x3f); t1: *dst++ = 0x80 + (c & 0x3f); } return dst; } /* * Convert the unicode UEFI command line to ASCII to pass to kernel. * Size of memory allocated return in *cmd_line_len. * Returns NULL on error. */ char *efi_convert_cmdline(efi_system_table_t *sys_table_arg, efi_loaded_image_t *image, int *cmd_line_len) { const u16 *s2; u8 *s1 = NULL; unsigned long cmdline_addr = 0; int load_options_chars = image->load_options_size / 2; /* UTF-16 */ const u16 *options = image->load_options; int options_bytes = 0; /* UTF-8 bytes */ int options_chars = 0; /* UTF-16 chars */ efi_status_t status; u16 zero = 0; if (options) { s2 = options; while (*s2 && *s2 != '\n' && options_chars < load_options_chars) { options_bytes += efi_utf8_bytes(*s2++); options_chars++; } } if (!options_chars) { /* No command line options, so return empty string*/ options = &zero; } options_bytes++; /* NUL termination */ status = efi_low_alloc(sys_table_arg, options_bytes, 0, &cmdline_addr); if (status != EFI_SUCCESS) return NULL; s1 = (u8 *)cmdline_addr; s2 = (const u16 *)options; s1 = efi_utf16_to_utf8(s1, s2, options_chars); *s1 = '\0'; *cmd_line_len = options_bytes; return (char *)cmdline_addr; }
gpl-2.0
Lyanzh/linux
drivers/net/wireless/orinoco/scan.c
1427
6020
/* Helpers for managing scan queues * * See copyright notice in main.c */ #include <linux/gfp.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/ieee80211.h> #include <net/cfg80211.h> #include "hermes.h" #include "orinoco.h" #include "main.h" #include "scan.h" #define ZERO_DBM_OFFSET 0x95 #define MAX_SIGNAL_LEVEL 0x8A #define MIN_SIGNAL_LEVEL 0x2F #define SIGNAL_TO_DBM(x) \ (clamp_t(s32, (x), MIN_SIGNAL_LEVEL, MAX_SIGNAL_LEVEL) \ - ZERO_DBM_OFFSET) #define SIGNAL_TO_MBM(x) (SIGNAL_TO_DBM(x) * 100) static int symbol_build_supp_rates(u8 *buf, const __le16 *rates) { int i; u8 rate; buf[0] = WLAN_EID_SUPP_RATES; for (i = 0; i < 5; i++) { rate = le16_to_cpu(rates[i]); /* NULL terminated */ if (rate == 0x0) break; buf[i + 2] = rate; } buf[1] = i; return i + 2; } static int prism_build_supp_rates(u8 *buf, const u8 *rates) { int i; buf[0] = WLAN_EID_SUPP_RATES; for (i = 0; i < 8; i++) { /* NULL terminated */ if (rates[i] == 0x0) break; buf[i + 2] = rates[i]; } buf[1] = i; /* We might still have another 2 rates, which need to go in * extended supported rates */ if (i == 8 && rates[i] > 0) { buf[10] = WLAN_EID_EXT_SUPP_RATES; for (; i < 10; i++) { /* NULL terminated */ if (rates[i] == 0x0) break; buf[i + 2] = rates[i]; } buf[11] = i - 8; } return (i < 8) ? i + 2 : i + 4; } static void orinoco_add_hostscan_result(struct orinoco_private *priv, const union hermes_scan_info *bss) { struct wiphy *wiphy = priv_to_wiphy(priv); struct ieee80211_channel *channel; struct cfg80211_bss *cbss; u8 *ie; u8 ie_buf[46]; u64 timestamp; s32 signal; u16 capability; u16 beacon_interval; int ie_len; int freq; int len; len = le16_to_cpu(bss->a.essid_len); /* Reconstruct SSID and bitrate IEs to pass up */ ie_buf[0] = WLAN_EID_SSID; ie_buf[1] = len; memcpy(&ie_buf[2], bss->a.essid, len); ie = ie_buf + len + 2; ie_len = ie_buf[1] + 2; switch (priv->firmware_type) { case FIRMWARE_TYPE_SYMBOL: ie_len += symbol_build_supp_rates(ie, bss->s.rates); break; case FIRMWARE_TYPE_INTERSIL: ie_len += prism_build_supp_rates(ie, bss->p.rates); break; case FIRMWARE_TYPE_AGERE: default: break; } freq = ieee80211_channel_to_frequency( le16_to_cpu(bss->a.channel), IEEE80211_BAND_2GHZ); channel = ieee80211_get_channel(wiphy, freq); if (!channel) { printk(KERN_DEBUG "Invalid channel designation %04X(%04X)", bss->a.channel, freq); return; /* Then ignore it for now */ } timestamp = 0; capability = le16_to_cpu(bss->a.capabilities); beacon_interval = le16_to_cpu(bss->a.beacon_interv); signal = SIGNAL_TO_MBM(le16_to_cpu(bss->a.level)); cbss = cfg80211_inform_bss(wiphy, channel, CFG80211_BSS_FTYPE_UNKNOWN, bss->a.bssid, timestamp, capability, beacon_interval, ie_buf, ie_len, signal, GFP_KERNEL); cfg80211_put_bss(wiphy, cbss); } void orinoco_add_extscan_result(struct orinoco_private *priv, struct agere_ext_scan_info *bss, size_t len) { struct wiphy *wiphy = priv_to_wiphy(priv); struct ieee80211_channel *channel; struct cfg80211_bss *cbss; const u8 *ie; u64 timestamp; s32 signal; u16 capability; u16 beacon_interval; size_t ie_len; int chan, freq; ie_len = len - sizeof(*bss); ie = cfg80211_find_ie(WLAN_EID_DS_PARAMS, bss->data, ie_len); chan = ie ? ie[2] : 0; freq = ieee80211_channel_to_frequency(chan, IEEE80211_BAND_2GHZ); channel = ieee80211_get_channel(wiphy, freq); timestamp = le64_to_cpu(bss->timestamp); capability = le16_to_cpu(bss->capabilities); beacon_interval = le16_to_cpu(bss->beacon_interval); ie = bss->data; signal = SIGNAL_TO_MBM(bss->level); cbss = cfg80211_inform_bss(wiphy, channel, CFG80211_BSS_FTYPE_UNKNOWN, bss->bssid, timestamp, capability, beacon_interval, ie, ie_len, signal, GFP_KERNEL); cfg80211_put_bss(wiphy, cbss); } void orinoco_add_hostscan_results(struct orinoco_private *priv, unsigned char *buf, size_t len) { int offset; /* In the scan data */ size_t atom_len; bool abort = false; switch (priv->firmware_type) { case FIRMWARE_TYPE_AGERE: atom_len = sizeof(struct agere_scan_apinfo); offset = 0; break; case FIRMWARE_TYPE_SYMBOL: /* Lack of documentation necessitates this hack. * Different firmwares have 68 or 76 byte long atoms. * We try modulo first. If the length divides by both, * we check what would be the channel in the second * frame for a 68-byte atom. 76-byte atoms have 0 there. * Valid channel cannot be 0. */ if (len % 76) atom_len = 68; else if (len % 68) atom_len = 76; else if (len >= 1292 && buf[68] == 0) atom_len = 76; else atom_len = 68; offset = 0; break; case FIRMWARE_TYPE_INTERSIL: offset = 4; if (priv->has_hostscan) { atom_len = le16_to_cpup((__le16 *)buf); /* Sanity check for atom_len */ if (atom_len < sizeof(struct prism2_scan_apinfo)) { printk(KERN_ERR "%s: Invalid atom_len in scan " "data: %zu\n", priv->ndev->name, atom_len); abort = true; goto scan_abort; } } else atom_len = offsetof(struct prism2_scan_apinfo, atim); break; default: abort = true; goto scan_abort; } /* Check that we got an whole number of atoms */ if ((len - offset) % atom_len) { printk(KERN_ERR "%s: Unexpected scan data length %zu, " "atom_len %zu, offset %d\n", priv->ndev->name, len, atom_len, offset); abort = true; goto scan_abort; } /* Process the entries one by one */ for (; offset + atom_len <= len; offset += atom_len) { union hermes_scan_info *atom; atom = (union hermes_scan_info *) (buf + offset); orinoco_add_hostscan_result(priv, atom); } scan_abort: if (priv->scan_request) { cfg80211_scan_done(priv->scan_request, abort); priv->scan_request = NULL; } } void orinoco_scan_done(struct orinoco_private *priv, bool abort) { if (priv->scan_request) { cfg80211_scan_done(priv->scan_request, abort); priv->scan_request = NULL; } }
gpl-2.0
xcaliburinhand/I9000-Reoriented-for-I897-Ginger
drivers/staging/dream/qdsp5/adsp_lpm_verify_cmd.c
1683
2043
/* arch/arm/mach-msm/qdsp5/adsp_lpm_verify_cmd.c * * Verificion code for aDSP LPM packets from userspace. * * Copyright (c) 2008 QUALCOMM Incorporated * Copyright (C) 2008 Google, Inc. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <mach/qdsp5/qdsp5lpmcmdi.h> #include "adsp.h" int adsp_lpm_verify_cmd(struct msm_adsp_module *module, unsigned int queue_id, void *cmd_data, size_t cmd_size) { uint32_t cmd_id, col_height, input_row_incr, output_row_incr, input_size, output_size; uint32_t size_mask = 0x0fff; lpm_cmd_start *cmd; if (queue_id != QDSP_lpmCommandQueue) { printk(KERN_ERR "adsp: module %s: wrong queue id %d\n", module->name, queue_id); return -1; } cmd = (lpm_cmd_start *)cmd_data; cmd_id = cmd->cmd_id; if (cmd_id == LPM_CMD_START) { if (cmd_size != sizeof(lpm_cmd_start)) { printk(KERN_ERR "adsp: module %s: wrong size %d, expect %d\n", module->name, cmd_size, sizeof(lpm_cmd_start)); return -1; } col_height = cmd->ip_data_cfg_part1 & size_mask; input_row_incr = cmd->ip_data_cfg_part2 & size_mask; output_row_incr = cmd->op_data_cfg_part1 & size_mask; input_size = col_height * input_row_incr; output_size = col_height * output_row_incr; if ((cmd->ip_data_cfg_part4 && adsp_pmem_fixup(module, (void **)(&cmd->ip_data_cfg_part4), input_size)) || (cmd->op_data_cfg_part3 && adsp_pmem_fixup(module, (void **)(&cmd->op_data_cfg_part3), output_size))) return -1; } else if (cmd_id > 1) { printk(KERN_ERR "adsp: module %s: invalid cmd_id %d\n", module->name, cmd_id); return -1; } return 0; }
gpl-2.0
hiikezoe/android_kernel_kyocera_202k
drivers/usb/core/config.c
3731
24739
#include <linux/usb.h> #include <linux/usb/ch9.h> #include <linux/usb/hcd.h> #include <linux/usb/quirks.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/device.h> #include <asm/byteorder.h> #include "usb.h" #define USB_MAXALTSETTING 128 /* Hard limit */ #define USB_MAXENDPOINTS 30 /* Hard limit */ #define USB_MAXCONFIG 8 /* Arbitrary limit */ static inline const char *plural(int n) { return (n == 1 ? "" : "s"); } static int find_next_descriptor(unsigned char *buffer, int size, int dt1, int dt2, int *num_skipped) { struct usb_descriptor_header *h; int n = 0; unsigned char *buffer0 = buffer; /* Find the next descriptor of type dt1 or dt2 */ while (size > 0) { h = (struct usb_descriptor_header *) buffer; if (h->bDescriptorType == dt1 || h->bDescriptorType == dt2) break; buffer += h->bLength; size -= h->bLength; ++n; } /* Store the number of descriptors skipped and return the * number of bytes skipped */ if (num_skipped) *num_skipped = n; return buffer - buffer0; } static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno, int inum, int asnum, struct usb_host_endpoint *ep, unsigned char *buffer, int size) { struct usb_ss_ep_comp_descriptor *desc; int max_tx; /* The SuperSpeed endpoint companion descriptor is supposed to * be the first thing immediately following the endpoint descriptor. */ desc = (struct usb_ss_ep_comp_descriptor *) buffer; if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP || size < USB_DT_SS_EP_COMP_SIZE) { dev_warn(ddev, "No SuperSpeed endpoint companion for config %d " " interface %d altsetting %d ep %d: " "using minimum values\n", cfgno, inum, asnum, ep->desc.bEndpointAddress); /* Fill in some default values. * Leave bmAttributes as zero, which will mean no streams for * bulk, and isoc won't support multiple bursts of packets. * With bursts of only one packet, and a Mult of 1, the max * amount of data moved per endpoint service interval is one * packet. */ ep->ss_ep_comp.bLength = USB_DT_SS_EP_COMP_SIZE; ep->ss_ep_comp.bDescriptorType = USB_DT_SS_ENDPOINT_COMP; if (usb_endpoint_xfer_isoc(&ep->desc) || usb_endpoint_xfer_int(&ep->desc)) ep->ss_ep_comp.wBytesPerInterval = ep->desc.wMaxPacketSize; return; } memcpy(&ep->ss_ep_comp, desc, USB_DT_SS_EP_COMP_SIZE); /* Check the various values */ if (usb_endpoint_xfer_control(&ep->desc) && desc->bMaxBurst != 0) { dev_warn(ddev, "Control endpoint with bMaxBurst = %d in " "config %d interface %d altsetting %d ep %d: " "setting to zero\n", desc->bMaxBurst, cfgno, inum, asnum, ep->desc.bEndpointAddress); ep->ss_ep_comp.bMaxBurst = 0; } else if (desc->bMaxBurst > 15) { dev_warn(ddev, "Endpoint with bMaxBurst = %d in " "config %d interface %d altsetting %d ep %d: " "setting to 15\n", desc->bMaxBurst, cfgno, inum, asnum, ep->desc.bEndpointAddress); ep->ss_ep_comp.bMaxBurst = 15; } if ((usb_endpoint_xfer_control(&ep->desc) || usb_endpoint_xfer_int(&ep->desc)) && desc->bmAttributes != 0) { dev_warn(ddev, "%s endpoint with bmAttributes = %d in " "config %d interface %d altsetting %d ep %d: " "setting to zero\n", usb_endpoint_xfer_control(&ep->desc) ? "Control" : "Bulk", desc->bmAttributes, cfgno, inum, asnum, ep->desc.bEndpointAddress); ep->ss_ep_comp.bmAttributes = 0; } else if (usb_endpoint_xfer_bulk(&ep->desc) && desc->bmAttributes > 16) { dev_warn(ddev, "Bulk endpoint with more than 65536 streams in " "config %d interface %d altsetting %d ep %d: " "setting to max\n", cfgno, inum, asnum, ep->desc.bEndpointAddress); ep->ss_ep_comp.bmAttributes = 16; } else if (usb_endpoint_xfer_isoc(&ep->desc) && desc->bmAttributes > 2) { dev_warn(ddev, "Isoc endpoint has Mult of %d in " "config %d interface %d altsetting %d ep %d: " "setting to 3\n", desc->bmAttributes + 1, cfgno, inum, asnum, ep->desc.bEndpointAddress); ep->ss_ep_comp.bmAttributes = 2; } if (usb_endpoint_xfer_isoc(&ep->desc)) max_tx = (desc->bMaxBurst + 1) * (desc->bmAttributes + 1) * usb_endpoint_maxp(&ep->desc); else if (usb_endpoint_xfer_int(&ep->desc)) max_tx = usb_endpoint_maxp(&ep->desc) * (desc->bMaxBurst + 1); else max_tx = 999999; if (le16_to_cpu(desc->wBytesPerInterval) > max_tx) { dev_warn(ddev, "%s endpoint with wBytesPerInterval of %d in " "config %d interface %d altsetting %d ep %d: " "setting to %d\n", usb_endpoint_xfer_isoc(&ep->desc) ? "Isoc" : "Int", le16_to_cpu(desc->wBytesPerInterval), cfgno, inum, asnum, ep->desc.bEndpointAddress, max_tx); ep->ss_ep_comp.wBytesPerInterval = cpu_to_le16(max_tx); } } static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, int asnum, struct usb_host_interface *ifp, int num_ep, unsigned char *buffer, int size) { unsigned char *buffer0 = buffer; struct usb_endpoint_descriptor *d; struct usb_host_endpoint *endpoint; int n, i, j, retval; d = (struct usb_endpoint_descriptor *) buffer; buffer += d->bLength; size -= d->bLength; if (d->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE) n = USB_DT_ENDPOINT_AUDIO_SIZE; else if (d->bLength >= USB_DT_ENDPOINT_SIZE) n = USB_DT_ENDPOINT_SIZE; else { dev_warn(ddev, "config %d interface %d altsetting %d has an " "invalid endpoint descriptor of length %d, skipping\n", cfgno, inum, asnum, d->bLength); goto skip_to_next_endpoint_or_interface_descriptor; } i = d->bEndpointAddress & ~USB_ENDPOINT_DIR_MASK; if (i >= 16 || i == 0) { dev_warn(ddev, "config %d interface %d altsetting %d has an " "invalid endpoint with address 0x%X, skipping\n", cfgno, inum, asnum, d->bEndpointAddress); goto skip_to_next_endpoint_or_interface_descriptor; } /* Only store as many endpoints as we have room for */ if (ifp->desc.bNumEndpoints >= num_ep) goto skip_to_next_endpoint_or_interface_descriptor; endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints]; ++ifp->desc.bNumEndpoints; memcpy(&endpoint->desc, d, n); INIT_LIST_HEAD(&endpoint->urb_list); /* Fix up bInterval values outside the legal range. Use 32 ms if no * proper value can be guessed. */ i = 0; /* i = min, j = max, n = default */ j = 255; if (usb_endpoint_xfer_int(d)) { i = 1; switch (to_usb_device(ddev)->speed) { case USB_SPEED_SUPER: case USB_SPEED_HIGH: /* Many device manufacturers are using full-speed * bInterval values in high-speed interrupt endpoint * descriptors. Try to fix those and fall back to a * 32 ms default value otherwise. */ n = fls(d->bInterval*8); if (n == 0) n = 9; /* 32 ms = 2^(9-1) uframes */ j = 16; break; default: /* USB_SPEED_FULL or _LOW */ /* For low-speed, 10 ms is the official minimum. * But some "overclocked" devices might want faster * polling so we'll allow it. */ n = 32; break; } } else if (usb_endpoint_xfer_isoc(d)) { i = 1; j = 16; switch (to_usb_device(ddev)->speed) { case USB_SPEED_HIGH: n = 9; /* 32 ms = 2^(9-1) uframes */ break; default: /* USB_SPEED_FULL */ n = 6; /* 32 ms = 2^(6-1) frames */ break; } } if (d->bInterval < i || d->bInterval > j) { dev_warn(ddev, "config %d interface %d altsetting %d " "endpoint 0x%X has an invalid bInterval %d, " "changing to %d\n", cfgno, inum, asnum, d->bEndpointAddress, d->bInterval, n); endpoint->desc.bInterval = n; } /* Some buggy low-speed devices have Bulk endpoints, which is * explicitly forbidden by the USB spec. In an attempt to make * them usable, we will try treating them as Interrupt endpoints. */ if (to_usb_device(ddev)->speed == USB_SPEED_LOW && usb_endpoint_xfer_bulk(d)) { dev_warn(ddev, "config %d interface %d altsetting %d " "endpoint 0x%X is Bulk; changing to Interrupt\n", cfgno, inum, asnum, d->bEndpointAddress); endpoint->desc.bmAttributes = USB_ENDPOINT_XFER_INT; endpoint->desc.bInterval = 1; if (usb_endpoint_maxp(&endpoint->desc) > 8) endpoint->desc.wMaxPacketSize = cpu_to_le16(8); } /* * Some buggy high speed devices have bulk endpoints using * maxpacket sizes other than 512. High speed HCDs may not * be able to handle that particular bug, so let's warn... */ if (to_usb_device(ddev)->speed == USB_SPEED_HIGH && usb_endpoint_xfer_bulk(d)) { unsigned maxp; maxp = usb_endpoint_maxp(&endpoint->desc) & 0x07ff; if (maxp != 512) dev_warn(ddev, "config %d interface %d altsetting %d " "bulk endpoint 0x%X has invalid maxpacket %d\n", cfgno, inum, asnum, d->bEndpointAddress, maxp); } /* Parse a possible SuperSpeed endpoint companion descriptor */ if (to_usb_device(ddev)->speed == USB_SPEED_SUPER) usb_parse_ss_endpoint_companion(ddev, cfgno, inum, asnum, endpoint, buffer, size); /* Skip over any Class Specific or Vendor Specific descriptors; * find the next endpoint or interface descriptor */ endpoint->extra = buffer; i = find_next_descriptor(buffer, size, USB_DT_ENDPOINT, USB_DT_INTERFACE, &n); endpoint->extralen = i; retval = buffer - buffer0 + i; if (n > 0) dev_dbg(ddev, "skipped %d descriptor%s after %s\n", n, plural(n), "endpoint"); return retval; skip_to_next_endpoint_or_interface_descriptor: i = find_next_descriptor(buffer, size, USB_DT_ENDPOINT, USB_DT_INTERFACE, NULL); return buffer - buffer0 + i; } void usb_release_interface_cache(struct kref *ref) { struct usb_interface_cache *intfc = ref_to_usb_interface_cache(ref); int j; for (j = 0; j < intfc->num_altsetting; j++) { struct usb_host_interface *alt = &intfc->altsetting[j]; kfree(alt->endpoint); kfree(alt->string); } kfree(intfc); } static int usb_parse_interface(struct device *ddev, int cfgno, struct usb_host_config *config, unsigned char *buffer, int size, u8 inums[], u8 nalts[]) { unsigned char *buffer0 = buffer; struct usb_interface_descriptor *d; int inum, asnum; struct usb_interface_cache *intfc; struct usb_host_interface *alt; int i, n; int len, retval; int num_ep, num_ep_orig; d = (struct usb_interface_descriptor *) buffer; buffer += d->bLength; size -= d->bLength; if (d->bLength < USB_DT_INTERFACE_SIZE) goto skip_to_next_interface_descriptor; /* Which interface entry is this? */ intfc = NULL; inum = d->bInterfaceNumber; for (i = 0; i < config->desc.bNumInterfaces; ++i) { if (inums[i] == inum) { intfc = config->intf_cache[i]; break; } } if (!intfc || intfc->num_altsetting >= nalts[i]) goto skip_to_next_interface_descriptor; /* Check for duplicate altsetting entries */ asnum = d->bAlternateSetting; for ((i = 0, alt = &intfc->altsetting[0]); i < intfc->num_altsetting; (++i, ++alt)) { if (alt->desc.bAlternateSetting == asnum) { dev_warn(ddev, "Duplicate descriptor for config %d " "interface %d altsetting %d, skipping\n", cfgno, inum, asnum); goto skip_to_next_interface_descriptor; } } ++intfc->num_altsetting; memcpy(&alt->desc, d, USB_DT_INTERFACE_SIZE); /* Skip over any Class Specific or Vendor Specific descriptors; * find the first endpoint or interface descriptor */ alt->extra = buffer; i = find_next_descriptor(buffer, size, USB_DT_ENDPOINT, USB_DT_INTERFACE, &n); alt->extralen = i; if (n > 0) dev_dbg(ddev, "skipped %d descriptor%s after %s\n", n, plural(n), "interface"); buffer += i; size -= i; /* Allocate space for the right(?) number of endpoints */ num_ep = num_ep_orig = alt->desc.bNumEndpoints; alt->desc.bNumEndpoints = 0; /* Use as a counter */ if (num_ep > USB_MAXENDPOINTS) { dev_warn(ddev, "too many endpoints for config %d interface %d " "altsetting %d: %d, using maximum allowed: %d\n", cfgno, inum, asnum, num_ep, USB_MAXENDPOINTS); num_ep = USB_MAXENDPOINTS; } if (num_ep > 0) { /* Can't allocate 0 bytes */ len = sizeof(struct usb_host_endpoint) * num_ep; alt->endpoint = kzalloc(len, GFP_KERNEL); if (!alt->endpoint) return -ENOMEM; } /* Parse all the endpoint descriptors */ n = 0; while (size > 0) { if (((struct usb_descriptor_header *) buffer)->bDescriptorType == USB_DT_INTERFACE) break; retval = usb_parse_endpoint(ddev, cfgno, inum, asnum, alt, num_ep, buffer, size); if (retval < 0) return retval; ++n; buffer += retval; size -= retval; } if (n != num_ep_orig) dev_warn(ddev, "config %d interface %d altsetting %d has %d " "endpoint descriptor%s, different from the interface " "descriptor's value: %d\n", cfgno, inum, asnum, n, plural(n), num_ep_orig); return buffer - buffer0; skip_to_next_interface_descriptor: i = find_next_descriptor(buffer, size, USB_DT_INTERFACE, USB_DT_INTERFACE, NULL); return buffer - buffer0 + i; } static int usb_parse_configuration(struct usb_device *dev, int cfgidx, struct usb_host_config *config, unsigned char *buffer, int size) { struct device *ddev = &dev->dev; unsigned char *buffer0 = buffer; int cfgno; int nintf, nintf_orig; int i, j, n; struct usb_interface_cache *intfc; unsigned char *buffer2; int size2; struct usb_descriptor_header *header; int len, retval; u8 inums[USB_MAXINTERFACES], nalts[USB_MAXINTERFACES]; unsigned iad_num = 0; memcpy(&config->desc, buffer, USB_DT_CONFIG_SIZE); if (config->desc.bDescriptorType != USB_DT_CONFIG || config->desc.bLength < USB_DT_CONFIG_SIZE) { dev_err(ddev, "invalid descriptor for config index %d: " "type = 0x%X, length = %d\n", cfgidx, config->desc.bDescriptorType, config->desc.bLength); return -EINVAL; } cfgno = config->desc.bConfigurationValue; buffer += config->desc.bLength; size -= config->desc.bLength; nintf = nintf_orig = config->desc.bNumInterfaces; if (nintf > USB_MAXINTERFACES) { dev_warn(ddev, "config %d has too many interfaces: %d, " "using maximum allowed: %d\n", cfgno, nintf, USB_MAXINTERFACES); nintf = USB_MAXINTERFACES; } /* Go through the descriptors, checking their length and counting the * number of altsettings for each interface */ n = 0; for ((buffer2 = buffer, size2 = size); size2 > 0; (buffer2 += header->bLength, size2 -= header->bLength)) { if (size2 < sizeof(struct usb_descriptor_header)) { dev_warn(ddev, "config %d descriptor has %d excess " "byte%s, ignoring\n", cfgno, size2, plural(size2)); break; } header = (struct usb_descriptor_header *) buffer2; if ((header->bLength > size2) || (header->bLength < 2)) { dev_warn(ddev, "config %d has an invalid descriptor " "of length %d, skipping remainder of the config\n", cfgno, header->bLength); break; } if (header->bDescriptorType == USB_DT_INTERFACE) { struct usb_interface_descriptor *d; int inum; d = (struct usb_interface_descriptor *) header; if (d->bLength < USB_DT_INTERFACE_SIZE) { dev_warn(ddev, "config %d has an invalid " "interface descriptor of length %d, " "skipping\n", cfgno, d->bLength); continue; } inum = d->bInterfaceNumber; if ((dev->quirks & USB_QUIRK_HONOR_BNUMINTERFACES) && n >= nintf_orig) { dev_warn(ddev, "config %d has more interface " "descriptors, than it declares in " "bNumInterfaces, ignoring interface " "number: %d\n", cfgno, inum); continue; } if (inum >= nintf_orig) dev_warn(ddev, "config %d has an invalid " "interface number: %d but max is %d\n", cfgno, inum, nintf_orig - 1); /* Have we already encountered this interface? * Count its altsettings */ for (i = 0; i < n; ++i) { if (inums[i] == inum) break; } if (i < n) { if (nalts[i] < 255) ++nalts[i]; } else if (n < USB_MAXINTERFACES) { inums[n] = inum; nalts[n] = 1; ++n; } } else if (header->bDescriptorType == USB_DT_INTERFACE_ASSOCIATION) { if (iad_num == USB_MAXIADS) { dev_warn(ddev, "found more Interface " "Association Descriptors " "than allocated for in " "configuration %d\n", cfgno); } else { config->intf_assoc[iad_num] = (struct usb_interface_assoc_descriptor *)header; iad_num++; } } else if (header->bDescriptorType == USB_DT_DEVICE || header->bDescriptorType == USB_DT_CONFIG) dev_warn(ddev, "config %d contains an unexpected " "descriptor of type 0x%X, skipping\n", cfgno, header->bDescriptorType); } /* for ((buffer2 = buffer, size2 = size); ...) */ size = buffer2 - buffer; config->desc.wTotalLength = cpu_to_le16(buffer2 - buffer0); if (n != nintf) dev_warn(ddev, "config %d has %d interface%s, different from " "the descriptor's value: %d\n", cfgno, n, plural(n), nintf_orig); else if (n == 0) dev_warn(ddev, "config %d has no interfaces?\n", cfgno); config->desc.bNumInterfaces = nintf = n; /* Check for missing interface numbers */ for (i = 0; i < nintf; ++i) { for (j = 0; j < nintf; ++j) { if (inums[j] == i) break; } if (j >= nintf) dev_warn(ddev, "config %d has no interface number " "%d\n", cfgno, i); } /* Allocate the usb_interface_caches and altsetting arrays */ for (i = 0; i < nintf; ++i) { j = nalts[i]; if (j > USB_MAXALTSETTING) { dev_warn(ddev, "too many alternate settings for " "config %d interface %d: %d, " "using maximum allowed: %d\n", cfgno, inums[i], j, USB_MAXALTSETTING); nalts[i] = j = USB_MAXALTSETTING; } len = sizeof(*intfc) + sizeof(struct usb_host_interface) * j; config->intf_cache[i] = intfc = kzalloc(len, GFP_KERNEL); if (!intfc) return -ENOMEM; kref_init(&intfc->ref); } /* FIXME: parse the BOS descriptor */ /* Skip over any Class Specific or Vendor Specific descriptors; * find the first interface descriptor */ config->extra = buffer; i = find_next_descriptor(buffer, size, USB_DT_INTERFACE, USB_DT_INTERFACE, &n); config->extralen = i; if (n > 0) dev_dbg(ddev, "skipped %d descriptor%s after %s\n", n, plural(n), "configuration"); buffer += i; size -= i; /* Parse all the interface/altsetting descriptors */ while (size > 0) { retval = usb_parse_interface(ddev, cfgno, config, buffer, size, inums, nalts); if (retval < 0) return retval; buffer += retval; size -= retval; } /* Check for missing altsettings */ for (i = 0; i < nintf; ++i) { intfc = config->intf_cache[i]; for (j = 0; j < intfc->num_altsetting; ++j) { for (n = 0; n < intfc->num_altsetting; ++n) { if (intfc->altsetting[n].desc. bAlternateSetting == j) break; } if (n >= intfc->num_altsetting) dev_warn(ddev, "config %d interface %d has no " "altsetting %d\n", cfgno, inums[i], j); } } return 0; } /* hub-only!! ... and only exported for reset/reinit path. * otherwise used internally on disconnect/destroy path */ void usb_destroy_configuration(struct usb_device *dev) { int c, i; if (!dev->config) return; if (dev->rawdescriptors) { for (i = 0; i < dev->descriptor.bNumConfigurations; i++) kfree(dev->rawdescriptors[i]); kfree(dev->rawdescriptors); dev->rawdescriptors = NULL; } for (c = 0; c < dev->descriptor.bNumConfigurations; c++) { struct usb_host_config *cf = &dev->config[c]; kfree(cf->string); for (i = 0; i < cf->desc.bNumInterfaces; i++) { if (cf->intf_cache[i]) kref_put(&cf->intf_cache[i]->ref, usb_release_interface_cache); } } kfree(dev->config); dev->config = NULL; } /* * Get the USB config descriptors, cache and parse'em * * hub-only!! ... and only in reset path, or usb_new_device() * (used by real hubs and virtual root hubs) * * NOTE: if this is a WUSB device and is not authorized, we skip the * whole thing. A non-authorized USB device has no * configurations. */ int usb_get_configuration(struct usb_device *dev) { struct device *ddev = &dev->dev; int ncfg = dev->descriptor.bNumConfigurations; int result = 0; unsigned int cfgno, length; unsigned char *bigbuffer; struct usb_config_descriptor *desc; cfgno = 0; if (dev->authorized == 0) /* Not really an error */ goto out_not_authorized; result = -ENOMEM; if (ncfg > USB_MAXCONFIG) { dev_warn(ddev, "too many configurations: %d, " "using maximum allowed: %d\n", ncfg, USB_MAXCONFIG); dev->descriptor.bNumConfigurations = ncfg = USB_MAXCONFIG; } if (ncfg < 1) { dev_err(ddev, "no configurations\n"); return -EINVAL; } length = ncfg * sizeof(struct usb_host_config); dev->config = kzalloc(length, GFP_KERNEL); if (!dev->config) goto err2; length = ncfg * sizeof(char *); dev->rawdescriptors = kzalloc(length, GFP_KERNEL); if (!dev->rawdescriptors) goto err2; desc = kmalloc(USB_DT_CONFIG_SIZE, GFP_KERNEL); if (!desc) goto err2; result = 0; for (; cfgno < ncfg; cfgno++) { /* We grab just the first descriptor so we know how long * the whole configuration is */ result = usb_get_descriptor(dev, USB_DT_CONFIG, cfgno, desc, USB_DT_CONFIG_SIZE); if (result < 0) { dev_err(ddev, "unable to read config index %d " "descriptor/%s: %d\n", cfgno, "start", result); dev_err(ddev, "chopping to %d config(s)\n", cfgno); dev->descriptor.bNumConfigurations = cfgno; break; } else if (result < 4) { dev_err(ddev, "config index %d descriptor too short " "(expected %i, got %i)\n", cfgno, USB_DT_CONFIG_SIZE, result); result = -EINVAL; goto err; } length = max((int) le16_to_cpu(desc->wTotalLength), USB_DT_CONFIG_SIZE); /* Now that we know the length, get the whole thing */ bigbuffer = kmalloc(length, GFP_KERNEL); if (!bigbuffer) { result = -ENOMEM; goto err; } result = usb_get_descriptor(dev, USB_DT_CONFIG, cfgno, bigbuffer, length); if (result < 0) { dev_err(ddev, "unable to read config index %d " "descriptor/%s\n", cfgno, "all"); kfree(bigbuffer); goto err; } if (result < length) { dev_warn(ddev, "config index %d descriptor too short " "(expected %i, got %i)\n", cfgno, length, result); length = result; } dev->rawdescriptors[cfgno] = bigbuffer; result = usb_parse_configuration(dev, cfgno, &dev->config[cfgno], bigbuffer, length); if (result < 0) { ++cfgno; goto err; } } result = 0; err: kfree(desc); out_not_authorized: dev->descriptor.bNumConfigurations = cfgno; err2: if (result == -ENOMEM) dev_err(ddev, "out of memory\n"); return result; } void usb_release_bos_descriptor(struct usb_device *dev) { if (dev->bos) { kfree(dev->bos->desc); kfree(dev->bos); dev->bos = NULL; } } /* Get BOS descriptor set */ int usb_get_bos_descriptor(struct usb_device *dev) { struct device *ddev = &dev->dev; struct usb_bos_descriptor *bos; struct usb_dev_cap_header *cap; unsigned char *buffer; int length, total_len, num, i; int ret; bos = kzalloc(sizeof(struct usb_bos_descriptor), GFP_KERNEL); if (!bos) return -ENOMEM; /* Get BOS descriptor */ ret = usb_get_descriptor(dev, USB_DT_BOS, 0, bos, USB_DT_BOS_SIZE); if (ret < USB_DT_BOS_SIZE) { dev_err(ddev, "unable to get BOS descriptor\n"); if (ret >= 0) ret = -ENOMSG; kfree(bos); return ret; } length = bos->bLength; total_len = le16_to_cpu(bos->wTotalLength); num = bos->bNumDeviceCaps; kfree(bos); if (total_len < length) return -EINVAL; dev->bos = kzalloc(sizeof(struct usb_host_bos), GFP_KERNEL); if (!dev->bos) return -ENOMEM; /* Now let's get the whole BOS descriptor set */ buffer = kzalloc(total_len, GFP_KERNEL); if (!buffer) { ret = -ENOMEM; goto err; } dev->bos->desc = (struct usb_bos_descriptor *)buffer; ret = usb_get_descriptor(dev, USB_DT_BOS, 0, buffer, total_len); if (ret < total_len) { dev_err(ddev, "unable to get BOS descriptor set\n"); if (ret >= 0) ret = -ENOMSG; goto err; } total_len -= length; for (i = 0; i < num; i++) { buffer += length; cap = (struct usb_dev_cap_header *)buffer; length = cap->bLength; if (total_len < length) break; total_len -= length; if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) { dev_warn(ddev, "descriptor type invalid, skip\n"); continue; } switch (cap->bDevCapabilityType) { case USB_CAP_TYPE_WIRELESS_USB: /* Wireless USB cap descriptor is handled by wusb */ break; case USB_CAP_TYPE_EXT: dev->bos->ext_cap = (struct usb_ext_cap_descriptor *)buffer; break; case USB_SS_CAP_TYPE: dev->bos->ss_cap = (struct usb_ss_cap_descriptor *)buffer; break; case CONTAINER_ID_TYPE: dev->bos->ss_id = (struct usb_ss_container_id_descriptor *)buffer; break; default: break; } } return 0; err: usb_release_bos_descriptor(dev); return ret; }
gpl-2.0
supercairos/android_kernel_doro_msm8916
arch/mn10300/mm/dma-alloc.c
4243
2052
/* MN10300 Dynamic DMA mapping support * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * Derived from: arch/i386/kernel/pci-dma.c * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #include <linux/types.h> #include <linux/mm.h> #include <linux/string.h> #include <linux/pci.h> #include <linux/gfp.h> #include <linux/export.h> #include <asm/io.h> static unsigned long pci_sram_allocated = 0xbc000000; void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, int gfp) { unsigned long addr; void *ret; pr_debug("dma_alloc_coherent(%s,%zu,%x)\n", dev ? dev_name(dev) : "?", size, gfp); if (0xbe000000 - pci_sram_allocated >= size) { size = (size + 255) & ~255; addr = pci_sram_allocated; pci_sram_allocated += size; ret = (void *) addr; goto done; } /* ignore region specifiers */ gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); if (dev == NULL || dev->coherent_dma_mask < 0xffffffff) gfp |= GFP_DMA; addr = __get_free_pages(gfp, get_order(size)); if (!addr) return NULL; /* map the coherent memory through the uncached memory window */ ret = (void *) (addr | 0x20000000); /* fill the memory with obvious rubbish */ memset((void *) addr, 0xfb, size); /* write back and evict all cache lines covering this region */ mn10300_dcache_flush_inv_range2(virt_to_phys((void *) addr), PAGE_SIZE); done: *dma_handle = virt_to_bus((void *) addr); printk("dma_alloc_coherent() = %p [%x]\n", ret, *dma_handle); return ret; } EXPORT_SYMBOL(dma_alloc_coherent); void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) { unsigned long addr = (unsigned long) vaddr & ~0x20000000; if (addr >= 0x9c000000) return; free_pages(addr, get_order(size)); } EXPORT_SYMBOL(dma_free_coherent);
gpl-2.0
brymaster5000/vivowION
arch/h8300/mm/fault.c
4755
1441
/* * linux/arch/h8300/mm/fault.c * * Copyright (C) 1998 D. Jeff Dionne <jeff@lineo.ca>, * Copyright (C) 2000 Lineo, Inc. (www.lineo.com) * * Based on: * * linux/arch/m68knommu/mm/fault.c * linux/arch/m68k/mm/fault.c * * Copyright (C) 1995 Hamish Macdonald */ #include <linux/mman.h> #include <linux/mm.h> #include <linux/kernel.h> #include <linux/ptrace.h> #include <asm/system.h> #include <asm/pgtable.h> /* * This routine handles page faults. It determines the problem, and * then passes it off to one of the appropriate routines. * * error_code: * bit 0 == 0 means no page found, 1 means protection fault * bit 1 == 0 means read, 1 means write * * If this routine detects a bad access, it returns 1, otherwise it * returns 0. */ asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address, unsigned long error_code) { #ifdef DEBUG printk ("regs->sr=%#x, regs->pc=%#lx, address=%#lx, %ld\n", regs->sr, regs->pc, address, error_code); #endif /* * Oops. The kernel tried to access some bad page. We'll have to * terminate things with extreme prejudice. */ if ((unsigned long) address < PAGE_SIZE) { printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference"); } else printk(KERN_ALERT "Unable to handle kernel access"); printk(" at virtual address %08lx\n",address); if (!user_mode(regs)) die("Oops", regs, error_code); do_exit(SIGKILL); return 1; }
gpl-2.0
kamarush/yuga_aosp_kernel_lp
fs/qnx6/super_mmi.c
7827
4017
/* * QNX6 file system, Linux implementation. * * Version : 1.0.0 * * History : * * 01-02-2012 by Kai Bankett (chaosman@ontika.net) : first release. * */ #include <linux/buffer_head.h> #include <linux/slab.h> #include <linux/crc32.h> #include "qnx6.h" static void qnx6_mmi_copy_sb(struct qnx6_super_block *qsb, struct qnx6_mmi_super_block *sb) { qsb->sb_magic = sb->sb_magic; qsb->sb_checksum = sb->sb_checksum; qsb->sb_serial = sb->sb_serial; qsb->sb_blocksize = sb->sb_blocksize; qsb->sb_num_inodes = sb->sb_num_inodes; qsb->sb_free_inodes = sb->sb_free_inodes; qsb->sb_num_blocks = sb->sb_num_blocks; qsb->sb_free_blocks = sb->sb_free_blocks; /* the rest of the superblock is the same */ memcpy(&qsb->Inode, &sb->Inode, sizeof(sb->Inode)); memcpy(&qsb->Bitmap, &sb->Bitmap, sizeof(sb->Bitmap)); memcpy(&qsb->Longfile, &sb->Longfile, sizeof(sb->Longfile)); } struct qnx6_super_block *qnx6_mmi_fill_super(struct super_block *s, int silent) { struct buffer_head *bh1, *bh2 = NULL; struct qnx6_mmi_super_block *sb1, *sb2; struct qnx6_super_block *qsb = NULL; struct qnx6_sb_info *sbi; __u64 offset; /* Check the superblock signatures start with the first superblock */ bh1 = sb_bread(s, 0); if (!bh1) { printk(KERN_ERR "qnx6: Unable to read first mmi superblock\n"); return NULL; } sb1 = (struct qnx6_mmi_super_block *)bh1->b_data; sbi = QNX6_SB(s); if (fs32_to_cpu(sbi, sb1->sb_magic) != QNX6_SUPER_MAGIC) { if (!silent) { printk(KERN_ERR "qnx6: wrong signature (magic) in" " superblock #1.\n"); goto out; } } /* checksum check - start at byte 8 and end at byte 512 */ if (fs32_to_cpu(sbi, sb1->sb_checksum) != crc32_be(0, (char *)(bh1->b_data + 8), 504)) { printk(KERN_ERR "qnx6: superblock #1 checksum error\n"); goto out; } /* calculate second superblock blocknumber */ offset = fs32_to_cpu(sbi, sb1->sb_num_blocks) + QNX6_SUPERBLOCK_AREA / fs32_to_cpu(sbi, sb1->sb_blocksize); /* set new blocksize */ if (!sb_set_blocksize(s, fs32_to_cpu(sbi, sb1->sb_blocksize))) { printk(KERN_ERR "qnx6: unable to set blocksize\n"); goto out; } /* blocksize invalidates bh - pull it back in */ brelse(bh1); bh1 = sb_bread(s, 0); if (!bh1) goto out; sb1 = (struct qnx6_mmi_super_block *)bh1->b_data; /* read second superblock */ bh2 = sb_bread(s, offset); if (!bh2) { printk(KERN_ERR "qnx6: unable to read the second superblock\n"); goto out; } sb2 = (struct qnx6_mmi_super_block *)bh2->b_data; if (fs32_to_cpu(sbi, sb2->sb_magic) != QNX6_SUPER_MAGIC) { if (!silent) printk(KERN_ERR "qnx6: wrong signature (magic) in" " superblock #2.\n"); goto out; } /* checksum check - start at byte 8 and end at byte 512 */ if (fs32_to_cpu(sbi, sb2->sb_checksum) != crc32_be(0, (char *)(bh2->b_data + 8), 504)) { printk(KERN_ERR "qnx6: superblock #1 checksum error\n"); goto out; } qsb = kmalloc(sizeof(*qsb), GFP_KERNEL); if (!qsb) { printk(KERN_ERR "qnx6: unable to allocate memory.\n"); goto out; } if (fs64_to_cpu(sbi, sb1->sb_serial) > fs64_to_cpu(sbi, sb2->sb_serial)) { /* superblock #1 active */ qnx6_mmi_copy_sb(qsb, sb1); #ifdef CONFIG_QNX6FS_DEBUG qnx6_superblock_debug(qsb, s); #endif memcpy(bh1->b_data, qsb, sizeof(struct qnx6_super_block)); sbi->sb_buf = bh1; sbi->sb = (struct qnx6_super_block *)bh1->b_data; brelse(bh2); printk(KERN_INFO "qnx6: superblock #1 active\n"); } else { /* superblock #2 active */ qnx6_mmi_copy_sb(qsb, sb2); #ifdef CONFIG_QNX6FS_DEBUG qnx6_superblock_debug(qsb, s); #endif memcpy(bh2->b_data, qsb, sizeof(struct qnx6_super_block)); sbi->sb_buf = bh2; sbi->sb = (struct qnx6_super_block *)bh2->b_data; brelse(bh1); printk(KERN_INFO "qnx6: superblock #2 active\n"); } kfree(qsb); /* offset for mmi_fs is just SUPERBLOCK_AREA bytes */ sbi->s_blks_off = QNX6_SUPERBLOCK_AREA / s->s_blocksize; /* success */ return sbi->sb; out: if (bh1 != NULL) brelse(bh1); if (bh2 != NULL) brelse(bh2); return NULL; }
gpl-2.0
mastero9017/Crystal
drivers/staging/media/solo6x10/gpio.c
8339
2804
/* * Copyright (C) 2010 Bluecherry, LLC www.bluecherrydvr.com * Copyright (C) 2010 Ben Collins <bcollins@bluecherry.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/kernel.h> #include <linux/fs.h> #include <asm/uaccess.h> #include "solo6x10.h" static void solo_gpio_mode(struct solo_dev *solo_dev, unsigned int port_mask, unsigned int mode) { int port; unsigned int ret; ret = solo_reg_read(solo_dev, SOLO_GPIO_CONFIG_0); /* To set gpio */ for (port = 0; port < 16; port++) { if (!((1 << port) & port_mask)) continue; ret &= (~(3 << (port << 1))); ret |= ((mode & 3) << (port << 1)); } solo_reg_write(solo_dev, SOLO_GPIO_CONFIG_0, ret); /* To set extended gpio - sensor */ ret = solo_reg_read(solo_dev, SOLO_GPIO_CONFIG_1); for (port = 0; port < 16; port++) { if (!((1 << (port + 16)) & port_mask)) continue; if (!mode) ret &= ~(1 << port); else ret |= 1 << port; } solo_reg_write(solo_dev, SOLO_GPIO_CONFIG_1, ret); } static void solo_gpio_set(struct solo_dev *solo_dev, unsigned int value) { solo_reg_write(solo_dev, SOLO_GPIO_DATA_OUT, solo_reg_read(solo_dev, SOLO_GPIO_DATA_OUT) | value); } static void solo_gpio_clear(struct solo_dev *solo_dev, unsigned int value) { solo_reg_write(solo_dev, SOLO_GPIO_DATA_OUT, solo_reg_read(solo_dev, SOLO_GPIO_DATA_OUT) & ~value); } static void solo_gpio_config(struct solo_dev *solo_dev) { /* Video reset */ solo_gpio_mode(solo_dev, 0x30, 1); solo_gpio_clear(solo_dev, 0x30); udelay(100); solo_gpio_set(solo_dev, 0x30); udelay(100); /* Warning: Don't touch the next line unless you're sure of what * you're doing: first four gpio [0-3] are used for video. */ solo_gpio_mode(solo_dev, 0x0f, 2); /* We use bit 8-15 of SOLO_GPIO_CONFIG_0 for relay purposes */ solo_gpio_mode(solo_dev, 0xff00, 1); /* Initially set relay status to 0 */ solo_gpio_clear(solo_dev, 0xff00); } int solo_gpio_init(struct solo_dev *solo_dev) { solo_gpio_config(solo_dev); return 0; } void solo_gpio_exit(struct solo_dev *solo_dev) { solo_gpio_clear(solo_dev, 0x30); solo_gpio_config(solo_dev); }
gpl-2.0
michael1900/falcon_stock
security/tomoyo/environ.c
9875
3117
/* * security/tomoyo/environ.c * * Copyright (C) 2005-2011 NTT DATA CORPORATION */ #include "common.h" /** * tomoyo_check_env_acl - Check permission for environment variable's name. * * @r: Pointer to "struct tomoyo_request_info". * @ptr: Pointer to "struct tomoyo_acl_info". * * Returns true if granted, false otherwise. */ static bool tomoyo_check_env_acl(struct tomoyo_request_info *r, const struct tomoyo_acl_info *ptr) { const struct tomoyo_env_acl *acl = container_of(ptr, typeof(*acl), head); return tomoyo_path_matches_pattern(r->param.environ.name, acl->env); } /** * tomoyo_audit_env_log - Audit environment variable name log. * * @r: Pointer to "struct tomoyo_request_info". * * Returns 0 on success, negative value otherwise. */ static int tomoyo_audit_env_log(struct tomoyo_request_info *r) { return tomoyo_supervisor(r, "misc env %s\n", r->param.environ.name->name); } /** * tomoyo_env_perm - Check permission for environment variable's name. * * @r: Pointer to "struct tomoyo_request_info". * @env: The name of environment variable. * * Returns 0 on success, negative value otherwise. * * Caller holds tomoyo_read_lock(). */ int tomoyo_env_perm(struct tomoyo_request_info *r, const char *env) { struct tomoyo_path_info environ; int error; if (!env || !*env) return 0; environ.name = env; tomoyo_fill_path_info(&environ); r->param_type = TOMOYO_TYPE_ENV_ACL; r->param.environ.name = &environ; do { tomoyo_check_acl(r, tomoyo_check_env_acl); error = tomoyo_audit_env_log(r); } while (error == TOMOYO_RETRY_REQUEST); return error; } /** * tomoyo_same_env_acl - Check for duplicated "struct tomoyo_env_acl" entry. * * @a: Pointer to "struct tomoyo_acl_info". * @b: Pointer to "struct tomoyo_acl_info". * * Returns true if @a == @b, false otherwise. */ static bool tomoyo_same_env_acl(const struct tomoyo_acl_info *a, const struct tomoyo_acl_info *b) { const struct tomoyo_env_acl *p1 = container_of(a, typeof(*p1), head); const struct tomoyo_env_acl *p2 = container_of(b, typeof(*p2), head); return p1->env == p2->env; } /** * tomoyo_write_env - Write "struct tomoyo_env_acl" list. * * @param: Pointer to "struct tomoyo_acl_param". * * Returns 0 on success, negative value otherwise. * * Caller holds tomoyo_read_lock(). */ static int tomoyo_write_env(struct tomoyo_acl_param *param) { struct tomoyo_env_acl e = { .head.type = TOMOYO_TYPE_ENV_ACL }; int error = -ENOMEM; const char *data = tomoyo_read_token(param); if (!tomoyo_correct_word(data) || strchr(data, '=')) return -EINVAL; e.env = tomoyo_get_name(data); if (!e.env) return error; error = tomoyo_update_domain(&e.head, sizeof(e), param, tomoyo_same_env_acl, NULL); tomoyo_put_name(e.env); return error; } /** * tomoyo_write_misc - Update environment variable list. * * @param: Pointer to "struct tomoyo_acl_param". * * Returns 0 on success, negative value otherwise. */ int tomoyo_write_misc(struct tomoyo_acl_param *param) { if (tomoyo_str_starts(&param->data, "env ")) return tomoyo_write_env(param); return -EINVAL; }
gpl-2.0
keitaroht/SO
drivers/scsi/bnx2fc/bnx2fc_hwi.c
148
53529
/* bnx2fc_hwi.c: Broadcom NetXtreme II Linux FCoE offload driver. * This file contains the code that low level functions that interact * with 57712 FCoE firmware. * * Copyright (c) 2008 - 2010 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation. * * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com) */ #include "bnx2fc.h" DECLARE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu); static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba, struct fcoe_kcqe *new_cqe_kcqe); static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba, struct fcoe_kcqe *ofld_kcqe); static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba, struct fcoe_kcqe *ofld_kcqe); static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code); static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba, struct fcoe_kcqe *conn_destroy); int bnx2fc_send_stat_req(struct bnx2fc_hba *hba) { struct fcoe_kwqe_stat stat_req; struct kwqe *kwqe_arr[2]; int num_kwqes = 1; int rc = 0; memset(&stat_req, 0x00, sizeof(struct fcoe_kwqe_stat)); stat_req.hdr.op_code = FCOE_KWQE_OPCODE_STAT; stat_req.hdr.flags = (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); stat_req.stat_params_addr_lo = (u32) hba->stats_buf_dma; stat_req.stat_params_addr_hi = (u32) ((u64)hba->stats_buf_dma >> 32); kwqe_arr[0] = (struct kwqe *) &stat_req; if (hba->cnic && hba->cnic->submit_kwqes) rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); return rc; } /** * bnx2fc_send_fw_fcoe_init_msg - initiates initial handshake with FCoE f/w * * @hba: adapter structure pointer * * Send down FCoE firmware init KWQEs which initiates the initial handshake * with the f/w. * */ int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba) { struct fcoe_kwqe_init1 fcoe_init1; struct fcoe_kwqe_init2 fcoe_init2; struct fcoe_kwqe_init3 fcoe_init3; struct kwqe *kwqe_arr[3]; int num_kwqes = 3; int rc = 0; if (!hba->cnic) { printk(KERN_ALERT PFX "hba->cnic NULL during fcoe fw init\n"); return -ENODEV; } /* fill init1 KWQE */ memset(&fcoe_init1, 0x00, sizeof(struct fcoe_kwqe_init1)); fcoe_init1.hdr.op_code = FCOE_KWQE_OPCODE_INIT1; fcoe_init1.hdr.flags = (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); fcoe_init1.num_tasks = BNX2FC_MAX_TASKS; fcoe_init1.sq_num_wqes = BNX2FC_SQ_WQES_MAX; fcoe_init1.rq_num_wqes = BNX2FC_RQ_WQES_MAX; fcoe_init1.rq_buffer_log_size = BNX2FC_RQ_BUF_LOG_SZ; fcoe_init1.cq_num_wqes = BNX2FC_CQ_WQES_MAX; fcoe_init1.dummy_buffer_addr_lo = (u32) hba->dummy_buf_dma; fcoe_init1.dummy_buffer_addr_hi = (u32) ((u64)hba->dummy_buf_dma >> 32); fcoe_init1.task_list_pbl_addr_lo = (u32) hba->task_ctx_bd_dma; fcoe_init1.task_list_pbl_addr_hi = (u32) ((u64) hba->task_ctx_bd_dma >> 32); fcoe_init1.mtu = BNX2FC_MINI_JUMBO_MTU; fcoe_init1.flags = (PAGE_SHIFT << FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT); fcoe_init1.num_sessions_log = BNX2FC_NUM_MAX_SESS_LOG; /* fill init2 KWQE */ memset(&fcoe_init2, 0x00, sizeof(struct fcoe_kwqe_init2)); fcoe_init2.hdr.op_code = FCOE_KWQE_OPCODE_INIT2; fcoe_init2.hdr.flags = (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); fcoe_init2.hash_tbl_pbl_addr_lo = (u32) hba->hash_tbl_pbl_dma; fcoe_init2.hash_tbl_pbl_addr_hi = (u32) ((u64) hba->hash_tbl_pbl_dma >> 32); fcoe_init2.t2_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_dma; fcoe_init2.t2_hash_tbl_addr_hi = (u32) ((u64) hba->t2_hash_tbl_dma >> 32); fcoe_init2.t2_ptr_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_ptr_dma; fcoe_init2.t2_ptr_hash_tbl_addr_hi = (u32) ((u64) hba->t2_hash_tbl_ptr_dma >> 32); fcoe_init2.free_list_count = BNX2FC_NUM_MAX_SESS; /* fill init3 KWQE */ memset(&fcoe_init3, 0x00, sizeof(struct fcoe_kwqe_init3)); fcoe_init3.hdr.op_code = FCOE_KWQE_OPCODE_INIT3; fcoe_init3.hdr.flags = (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); fcoe_init3.error_bit_map_lo = 0xffffffff; fcoe_init3.error_bit_map_hi = 0xffffffff; kwqe_arr[0] = (struct kwqe *) &fcoe_init1; kwqe_arr[1] = (struct kwqe *) &fcoe_init2; kwqe_arr[2] = (struct kwqe *) &fcoe_init3; if (hba->cnic && hba->cnic->submit_kwqes) rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); return rc; } int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba *hba) { struct fcoe_kwqe_destroy fcoe_destroy; struct kwqe *kwqe_arr[2]; int num_kwqes = 1; int rc = -1; /* fill destroy KWQE */ memset(&fcoe_destroy, 0x00, sizeof(struct fcoe_kwqe_destroy)); fcoe_destroy.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY; fcoe_destroy.hdr.flags = (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); kwqe_arr[0] = (struct kwqe *) &fcoe_destroy; if (hba->cnic && hba->cnic->submit_kwqes) rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); return rc; } /** * bnx2fc_send_session_ofld_req - initiates FCoE Session offload process * * @port: port structure pointer * @tgt: bnx2fc_rport structure pointer */ int bnx2fc_send_session_ofld_req(struct fcoe_port *port, struct bnx2fc_rport *tgt) { struct fc_lport *lport = port->lport; struct bnx2fc_hba *hba = port->priv; struct kwqe *kwqe_arr[4]; struct fcoe_kwqe_conn_offload1 ofld_req1; struct fcoe_kwqe_conn_offload2 ofld_req2; struct fcoe_kwqe_conn_offload3 ofld_req3; struct fcoe_kwqe_conn_offload4 ofld_req4; struct fc_rport_priv *rdata = tgt->rdata; struct fc_rport *rport = tgt->rport; int num_kwqes = 4; u32 port_id; int rc = 0; u16 conn_id; /* Initialize offload request 1 structure */ memset(&ofld_req1, 0x00, sizeof(struct fcoe_kwqe_conn_offload1)); ofld_req1.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN1; ofld_req1.hdr.flags = (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); conn_id = (u16)tgt->fcoe_conn_id; ofld_req1.fcoe_conn_id = conn_id; ofld_req1.sq_addr_lo = (u32) tgt->sq_dma; ofld_req1.sq_addr_hi = (u32)((u64) tgt->sq_dma >> 32); ofld_req1.rq_pbl_addr_lo = (u32) tgt->rq_pbl_dma; ofld_req1.rq_pbl_addr_hi = (u32)((u64) tgt->rq_pbl_dma >> 32); ofld_req1.rq_first_pbe_addr_lo = (u32) tgt->rq_dma; ofld_req1.rq_first_pbe_addr_hi = (u32)((u64) tgt->rq_dma >> 32); ofld_req1.rq_prod = 0x8000; /* Initialize offload request 2 structure */ memset(&ofld_req2, 0x00, sizeof(struct fcoe_kwqe_conn_offload2)); ofld_req2.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN2; ofld_req2.hdr.flags = (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); ofld_req2.tx_max_fc_pay_len = rdata->maxframe_size; ofld_req2.cq_addr_lo = (u32) tgt->cq_dma; ofld_req2.cq_addr_hi = (u32)((u64)tgt->cq_dma >> 32); ofld_req2.xferq_addr_lo = (u32) tgt->xferq_dma; ofld_req2.xferq_addr_hi = (u32)((u64)tgt->xferq_dma >> 32); ofld_req2.conn_db_addr_lo = (u32)tgt->conn_db_dma; ofld_req2.conn_db_addr_hi = (u32)((u64)tgt->conn_db_dma >> 32); /* Initialize offload request 3 structure */ memset(&ofld_req3, 0x00, sizeof(struct fcoe_kwqe_conn_offload3)); ofld_req3.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN3; ofld_req3.hdr.flags = (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); ofld_req3.vlan_tag = hba->vlan_id << FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT; ofld_req3.vlan_tag |= 3 << FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT; port_id = fc_host_port_id(lport->host); if (port_id == 0) { BNX2FC_HBA_DBG(lport, "ofld_req: port_id = 0, link down?\n"); return -EINVAL; } /* * Store s_id of the initiator for further reference. This will * be used during disable/destroy during linkdown processing as * when the lport is reset, the port_id also is reset to 0 */ tgt->sid = port_id; ofld_req3.s_id[0] = (port_id & 0x000000FF); ofld_req3.s_id[1] = (port_id & 0x0000FF00) >> 8; ofld_req3.s_id[2] = (port_id & 0x00FF0000) >> 16; port_id = rport->port_id; ofld_req3.d_id[0] = (port_id & 0x000000FF); ofld_req3.d_id[1] = (port_id & 0x0000FF00) >> 8; ofld_req3.d_id[2] = (port_id & 0x00FF0000) >> 16; ofld_req3.tx_total_conc_seqs = rdata->max_seq; ofld_req3.tx_max_conc_seqs_c3 = rdata->max_seq; ofld_req3.rx_max_fc_pay_len = lport->mfs; ofld_req3.rx_total_conc_seqs = BNX2FC_MAX_SEQS; ofld_req3.rx_max_conc_seqs_c3 = BNX2FC_MAX_SEQS; ofld_req3.rx_open_seqs_exch_c3 = 1; ofld_req3.confq_first_pbe_addr_lo = tgt->confq_dma; ofld_req3.confq_first_pbe_addr_hi = (u32)((u64) tgt->confq_dma >> 32); /* set mul_n_port_ids supported flag to 0, until it is supported */ ofld_req3.flags = 0; /* ofld_req3.flags |= (((lport->send_sp_features & FC_SP_FT_MNA) ? 1:0) << FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT); */ /* Info from PLOGI response */ ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_EDTR) ? 1 : 0) << FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT); ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) << FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT); /* vlan flag */ ofld_req3.flags |= (hba->vlan_enabled << FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT); /* C2_VALID and ACK flags are not set as they are not suppported */ /* Initialize offload request 4 structure */ memset(&ofld_req4, 0x00, sizeof(struct fcoe_kwqe_conn_offload4)); ofld_req4.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN4; ofld_req4.hdr.flags = (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); ofld_req4.e_d_tov_timer_val = lport->e_d_tov / 20; ofld_req4.src_mac_addr_lo32[0] = port->data_src_addr[5]; /* local mac */ ofld_req4.src_mac_addr_lo32[1] = port->data_src_addr[4]; ofld_req4.src_mac_addr_lo32[2] = port->data_src_addr[3]; ofld_req4.src_mac_addr_lo32[3] = port->data_src_addr[2]; ofld_req4.src_mac_addr_hi16[0] = port->data_src_addr[1]; ofld_req4.src_mac_addr_hi16[1] = port->data_src_addr[0]; ofld_req4.dst_mac_addr_lo32[0] = hba->ctlr.dest_addr[5];/* fcf mac */ ofld_req4.dst_mac_addr_lo32[1] = hba->ctlr.dest_addr[4]; ofld_req4.dst_mac_addr_lo32[2] = hba->ctlr.dest_addr[3]; ofld_req4.dst_mac_addr_lo32[3] = hba->ctlr.dest_addr[2]; ofld_req4.dst_mac_addr_hi16[0] = hba->ctlr.dest_addr[1]; ofld_req4.dst_mac_addr_hi16[1] = hba->ctlr.dest_addr[0]; ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma; ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32); ofld_req4.confq_pbl_base_addr_lo = (u32) tgt->confq_pbl_dma; ofld_req4.confq_pbl_base_addr_hi = (u32)((u64) tgt->confq_pbl_dma >> 32); kwqe_arr[0] = (struct kwqe *) &ofld_req1; kwqe_arr[1] = (struct kwqe *) &ofld_req2; kwqe_arr[2] = (struct kwqe *) &ofld_req3; kwqe_arr[3] = (struct kwqe *) &ofld_req4; if (hba->cnic && hba->cnic->submit_kwqes) rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); return rc; } /** * bnx2fc_send_session_enable_req - initiates FCoE Session enablement * * @port: port structure pointer * @tgt: bnx2fc_rport structure pointer */ static int bnx2fc_send_session_enable_req(struct fcoe_port *port, struct bnx2fc_rport *tgt) { struct kwqe *kwqe_arr[2]; struct bnx2fc_hba *hba = port->priv; struct fcoe_kwqe_conn_enable_disable enbl_req; struct fc_lport *lport = port->lport; struct fc_rport *rport = tgt->rport; int num_kwqes = 1; int rc = 0; u32 port_id; memset(&enbl_req, 0x00, sizeof(struct fcoe_kwqe_conn_enable_disable)); enbl_req.hdr.op_code = FCOE_KWQE_OPCODE_ENABLE_CONN; enbl_req.hdr.flags = (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); enbl_req.src_mac_addr_lo32[0] = port->data_src_addr[5]; /* local mac */ enbl_req.src_mac_addr_lo32[1] = port->data_src_addr[4]; enbl_req.src_mac_addr_lo32[2] = port->data_src_addr[3]; enbl_req.src_mac_addr_lo32[3] = port->data_src_addr[2]; enbl_req.src_mac_addr_hi16[0] = port->data_src_addr[1]; enbl_req.src_mac_addr_hi16[1] = port->data_src_addr[0]; enbl_req.dst_mac_addr_lo32[0] = hba->ctlr.dest_addr[5];/* fcf mac */ enbl_req.dst_mac_addr_lo32[1] = hba->ctlr.dest_addr[4]; enbl_req.dst_mac_addr_lo32[2] = hba->ctlr.dest_addr[3]; enbl_req.dst_mac_addr_lo32[3] = hba->ctlr.dest_addr[2]; enbl_req.dst_mac_addr_hi16[0] = hba->ctlr.dest_addr[1]; enbl_req.dst_mac_addr_hi16[1] = hba->ctlr.dest_addr[0]; port_id = fc_host_port_id(lport->host); if (port_id != tgt->sid) { printk(KERN_ERR PFX "WARN: enable_req port_id = 0x%x," "sid = 0x%x\n", port_id, tgt->sid); port_id = tgt->sid; } enbl_req.s_id[0] = (port_id & 0x000000FF); enbl_req.s_id[1] = (port_id & 0x0000FF00) >> 8; enbl_req.s_id[2] = (port_id & 0x00FF0000) >> 16; port_id = rport->port_id; enbl_req.d_id[0] = (port_id & 0x000000FF); enbl_req.d_id[1] = (port_id & 0x0000FF00) >> 8; enbl_req.d_id[2] = (port_id & 0x00FF0000) >> 16; enbl_req.vlan_tag = hba->vlan_id << FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT; enbl_req.vlan_tag |= 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT; enbl_req.vlan_flag = hba->vlan_enabled; enbl_req.context_id = tgt->context_id; enbl_req.conn_id = tgt->fcoe_conn_id; kwqe_arr[0] = (struct kwqe *) &enbl_req; if (hba->cnic && hba->cnic->submit_kwqes) rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); return rc; } /** * bnx2fc_send_session_disable_req - initiates FCoE Session disable * * @port: port structure pointer * @tgt: bnx2fc_rport structure pointer */ int bnx2fc_send_session_disable_req(struct fcoe_port *port, struct bnx2fc_rport *tgt) { struct bnx2fc_hba *hba = port->priv; struct fcoe_kwqe_conn_enable_disable disable_req; struct kwqe *kwqe_arr[2]; struct fc_rport *rport = tgt->rport; int num_kwqes = 1; int rc = 0; u32 port_id; memset(&disable_req, 0x00, sizeof(struct fcoe_kwqe_conn_enable_disable)); disable_req.hdr.op_code = FCOE_KWQE_OPCODE_DISABLE_CONN; disable_req.hdr.flags = (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); disable_req.src_mac_addr_lo32[0] = port->data_src_addr[5]; disable_req.src_mac_addr_lo32[2] = port->data_src_addr[3]; disable_req.src_mac_addr_lo32[3] = port->data_src_addr[2]; disable_req.src_mac_addr_hi16[0] = port->data_src_addr[1]; disable_req.src_mac_addr_hi16[1] = port->data_src_addr[0]; disable_req.dst_mac_addr_lo32[0] = hba->ctlr.dest_addr[5];/* fcf mac */ disable_req.dst_mac_addr_lo32[1] = hba->ctlr.dest_addr[4]; disable_req.dst_mac_addr_lo32[2] = hba->ctlr.dest_addr[3]; disable_req.dst_mac_addr_lo32[3] = hba->ctlr.dest_addr[2]; disable_req.dst_mac_addr_hi16[0] = hba->ctlr.dest_addr[1]; disable_req.dst_mac_addr_hi16[1] = hba->ctlr.dest_addr[0]; port_id = tgt->sid; disable_req.s_id[0] = (port_id & 0x000000FF); disable_req.s_id[1] = (port_id & 0x0000FF00) >> 8; disable_req.s_id[2] = (port_id & 0x00FF0000) >> 16; port_id = rport->port_id; disable_req.d_id[0] = (port_id & 0x000000FF); disable_req.d_id[1] = (port_id & 0x0000FF00) >> 8; disable_req.d_id[2] = (port_id & 0x00FF0000) >> 16; disable_req.context_id = tgt->context_id; disable_req.conn_id = tgt->fcoe_conn_id; disable_req.vlan_tag = hba->vlan_id << FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT; disable_req.vlan_tag |= 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT; disable_req.vlan_flag = hba->vlan_enabled; kwqe_arr[0] = (struct kwqe *) &disable_req; if (hba->cnic && hba->cnic->submit_kwqes) rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); return rc; } /** * bnx2fc_send_session_destroy_req - initiates FCoE Session destroy * * @port: port structure pointer * @tgt: bnx2fc_rport structure pointer */ int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba, struct bnx2fc_rport *tgt) { struct fcoe_kwqe_conn_destroy destroy_req; struct kwqe *kwqe_arr[2]; int num_kwqes = 1; int rc = 0; memset(&destroy_req, 0x00, sizeof(struct fcoe_kwqe_conn_destroy)); destroy_req.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY_CONN; destroy_req.hdr.flags = (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); destroy_req.context_id = tgt->context_id; destroy_req.conn_id = tgt->fcoe_conn_id; kwqe_arr[0] = (struct kwqe *) &destroy_req; if (hba->cnic && hba->cnic->submit_kwqes) rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); return rc; } static void bnx2fc_unsol_els_work(struct work_struct *work) { struct bnx2fc_unsol_els *unsol_els; struct fc_lport *lport; struct fc_frame *fp; unsol_els = container_of(work, struct bnx2fc_unsol_els, unsol_els_work); lport = unsol_els->lport; fp = unsol_els->fp; fc_exch_recv(lport, fp); kfree(unsol_els); } void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt, unsigned char *buf, u32 frame_len, u16 l2_oxid) { struct fcoe_port *port = tgt->port; struct fc_lport *lport = port->lport; struct bnx2fc_unsol_els *unsol_els; struct fc_frame_header *fh; struct fc_frame *fp; struct sk_buff *skb; u32 payload_len; u32 crc; u8 op; unsol_els = kzalloc(sizeof(*unsol_els), GFP_ATOMIC); if (!unsol_els) { BNX2FC_TGT_DBG(tgt, "Unable to allocate unsol_work\n"); return; } BNX2FC_TGT_DBG(tgt, "l2_frame_compl l2_oxid = 0x%x, frame_len = %d\n", l2_oxid, frame_len); payload_len = frame_len - sizeof(struct fc_frame_header); fp = fc_frame_alloc(lport, payload_len); if (!fp) { printk(KERN_ERR PFX "fc_frame_alloc failure\n"); return; } fh = (struct fc_frame_header *) fc_frame_header_get(fp); /* Copy FC Frame header and payload into the frame */ memcpy(fh, buf, frame_len); if (l2_oxid != FC_XID_UNKNOWN) fh->fh_ox_id = htons(l2_oxid); skb = fp_skb(fp); if ((fh->fh_r_ctl == FC_RCTL_ELS_REQ) || (fh->fh_r_ctl == FC_RCTL_ELS_REP)) { if (fh->fh_type == FC_TYPE_ELS) { op = fc_frame_payload_op(fp); if ((op == ELS_TEST) || (op == ELS_ESTC) || (op == ELS_FAN) || (op == ELS_CSU)) { /* * No need to reply for these * ELS requests */ printk(KERN_ERR PFX "dropping ELS 0x%x\n", op); kfree_skb(skb); return; } } crc = fcoe_fc_crc(fp); fc_frame_init(fp); fr_dev(fp) = lport; fr_sof(fp) = FC_SOF_I3; fr_eof(fp) = FC_EOF_T; fr_crc(fp) = cpu_to_le32(~crc); unsol_els->lport = lport; unsol_els->fp = fp; INIT_WORK(&unsol_els->unsol_els_work, bnx2fc_unsol_els_work); queue_work(bnx2fc_wq, &unsol_els->unsol_els_work); } else { BNX2FC_HBA_DBG(lport, "fh_r_ctl = 0x%x\n", fh->fh_r_ctl); kfree_skb(skb); } } static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe) { u8 num_rq; struct fcoe_err_report_entry *err_entry; unsigned char *rq_data; unsigned char *buf = NULL, *buf1; int i; u16 xid; u32 frame_len, len; struct bnx2fc_cmd *io_req = NULL; struct fcoe_task_ctx_entry *task, *task_page; struct bnx2fc_hba *hba = tgt->port->priv; int task_idx, index; int rc = 0; BNX2FC_TGT_DBG(tgt, "Entered UNSOL COMPLETION wqe = 0x%x\n", wqe); switch (wqe & FCOE_UNSOLICITED_CQE_SUBTYPE) { case FCOE_UNSOLICITED_FRAME_CQE_TYPE: frame_len = (wqe & FCOE_UNSOLICITED_CQE_PKT_LEN) >> FCOE_UNSOLICITED_CQE_PKT_LEN_SHIFT; num_rq = (frame_len + BNX2FC_RQ_BUF_SZ - 1) / BNX2FC_RQ_BUF_SZ; spin_lock_bh(&tgt->tgt_lock); rq_data = (unsigned char *)bnx2fc_get_next_rqe(tgt, num_rq); spin_unlock_bh(&tgt->tgt_lock); if (rq_data) { buf = rq_data; } else { buf1 = buf = kmalloc((num_rq * BNX2FC_RQ_BUF_SZ), GFP_ATOMIC); if (!buf1) { BNX2FC_TGT_DBG(tgt, "Memory alloc failure\n"); break; } for (i = 0; i < num_rq; i++) { spin_lock_bh(&tgt->tgt_lock); rq_data = (unsigned char *) bnx2fc_get_next_rqe(tgt, 1); spin_unlock_bh(&tgt->tgt_lock); len = BNX2FC_RQ_BUF_SZ; memcpy(buf1, rq_data, len); buf1 += len; } } bnx2fc_process_l2_frame_compl(tgt, buf, frame_len, FC_XID_UNKNOWN); if (buf != rq_data) kfree(buf); spin_lock_bh(&tgt->tgt_lock); bnx2fc_return_rqe(tgt, num_rq); spin_unlock_bh(&tgt->tgt_lock); break; case FCOE_ERROR_DETECTION_CQE_TYPE: /* * In case of error reporting CQE a single RQ entry * is consumed. */ spin_lock_bh(&tgt->tgt_lock); num_rq = 1; err_entry = (struct fcoe_err_report_entry *) bnx2fc_get_next_rqe(tgt, 1); xid = err_entry->fc_hdr.ox_id; BNX2FC_TGT_DBG(tgt, "Unsol Error Frame OX_ID = 0x%x\n", xid); BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x\n", err_entry->err_warn_bitmap_hi, err_entry->err_warn_bitmap_lo); BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x\n", err_entry->tx_buf_off, err_entry->rx_buf_off); bnx2fc_return_rqe(tgt, 1); if (xid > BNX2FC_MAX_XID) { BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n", xid); spin_unlock_bh(&tgt->tgt_lock); break; } task_idx = xid / BNX2FC_TASKS_PER_PAGE; index = xid % BNX2FC_TASKS_PER_PAGE; task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx]; task = &(task_page[index]); io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid]; if (!io_req) { spin_unlock_bh(&tgt->tgt_lock); break; } if (io_req->cmd_type != BNX2FC_SCSI_CMD) { printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n"); spin_unlock_bh(&tgt->tgt_lock); break; } if (test_and_clear_bit(BNX2FC_FLAG_IO_CLEANUP, &io_req->req_flags)) { BNX2FC_IO_DBG(io_req, "unsol_err: cleanup in " "progress.. ignore unsol err\n"); spin_unlock_bh(&tgt->tgt_lock); break; } /* * If ABTS is already in progress, and FW error is * received after that, do not cancel the timeout_work * and let the error recovery continue by explicitly * logging out the target, when the ABTS eventually * times out. */ if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) { /* * Cancel the timeout_work, as we received IO * completion with FW error. */ if (cancel_delayed_work(&io_req->timeout_work)) kref_put(&io_req->refcount, bnx2fc_cmd_release); /* timer hold */ rc = bnx2fc_initiate_abts(io_req); if (rc != SUCCESS) { BNX2FC_IO_DBG(io_req, "err_warn: initiate_abts " "failed. issue cleanup\n"); rc = bnx2fc_initiate_cleanup(io_req); BUG_ON(rc); } } else printk(KERN_ERR PFX "err_warn: io_req (0x%x) already " "in ABTS processing\n", xid); spin_unlock_bh(&tgt->tgt_lock); break; case FCOE_WARNING_DETECTION_CQE_TYPE: /* *In case of warning reporting CQE a single RQ entry * is consumes. */ spin_lock_bh(&tgt->tgt_lock); num_rq = 1; err_entry = (struct fcoe_err_report_entry *) bnx2fc_get_next_rqe(tgt, 1); xid = cpu_to_be16(err_entry->fc_hdr.ox_id); BNX2FC_TGT_DBG(tgt, "Unsol Warning Frame OX_ID = 0x%x\n", xid); BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x", err_entry->err_warn_bitmap_hi, err_entry->err_warn_bitmap_lo); BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x", err_entry->tx_buf_off, err_entry->rx_buf_off); bnx2fc_return_rqe(tgt, 1); spin_unlock_bh(&tgt->tgt_lock); break; default: printk(KERN_ERR PFX "Unsol Compl: Invalid CQE Subtype\n"); break; } } void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe) { struct fcoe_task_ctx_entry *task; struct fcoe_task_ctx_entry *task_page; struct fcoe_port *port = tgt->port; struct bnx2fc_hba *hba = port->priv; struct bnx2fc_cmd *io_req; int task_idx, index; u16 xid; u8 cmd_type; u8 rx_state = 0; u8 num_rq; spin_lock_bh(&tgt->tgt_lock); xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID; if (xid >= BNX2FC_MAX_TASKS) { printk(KERN_ALERT PFX "ERROR:xid out of range\n"); spin_unlock_bh(&tgt->tgt_lock); return; } task_idx = xid / BNX2FC_TASKS_PER_PAGE; index = xid % BNX2FC_TASKS_PER_PAGE; task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx]; task = &(task_page[index]); num_rq = ((task->rx_wr_tx_rd.rx_flags & FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE) >> FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE_SHIFT); io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid]; if (io_req == NULL) { printk(KERN_ERR PFX "ERROR? cq_compl - io_req is NULL\n"); spin_unlock_bh(&tgt->tgt_lock); return; } /* Timestamp IO completion time */ cmd_type = io_req->cmd_type; /* optimized completion path */ if (cmd_type == BNX2FC_SCSI_CMD) { rx_state = ((task->rx_wr_tx_rd.rx_flags & FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE) >> FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE_SHIFT); if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) { bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq); spin_unlock_bh(&tgt->tgt_lock); return; } } /* Process other IO completion types */ switch (cmd_type) { case BNX2FC_SCSI_CMD: if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED) bnx2fc_process_abts_compl(io_req, task, num_rq); else if (rx_state == FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED) bnx2fc_process_cleanup_compl(io_req, task, num_rq); else printk(KERN_ERR PFX "Invalid rx state - %d\n", rx_state); break; case BNX2FC_TASK_MGMT_CMD: BNX2FC_IO_DBG(io_req, "Processing TM complete\n"); bnx2fc_process_tm_compl(io_req, task, num_rq); break; case BNX2FC_ABTS: /* * ABTS request received by firmware. ABTS response * will be delivered to the task belonging to the IO * that was aborted */ BNX2FC_IO_DBG(io_req, "cq_compl- ABTS sent out by fw\n"); kref_put(&io_req->refcount, bnx2fc_cmd_release); break; case BNX2FC_ELS: BNX2FC_IO_DBG(io_req, "cq_compl - call process_els_compl\n"); bnx2fc_process_els_compl(io_req, task, num_rq); break; case BNX2FC_CLEANUP: BNX2FC_IO_DBG(io_req, "cq_compl- cleanup resp rcvd\n"); kref_put(&io_req->refcount, bnx2fc_cmd_release); break; default: printk(KERN_ERR PFX "Invalid cmd_type %d\n", cmd_type); break; } spin_unlock_bh(&tgt->tgt_lock); } struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe) { struct bnx2fc_work *work; work = kzalloc(sizeof(struct bnx2fc_work), GFP_ATOMIC); if (!work) return NULL; INIT_LIST_HEAD(&work->list); work->tgt = tgt; work->wqe = wqe; return work; } int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt) { struct fcoe_cqe *cq; u32 cq_cons; struct fcoe_cqe *cqe; u16 wqe; bool more_cqes_found = false; /* * cq_lock is a low contention lock used to protect * the CQ data structure from being freed up during * the upload operation */ spin_lock_bh(&tgt->cq_lock); if (!tgt->cq) { printk(KERN_ERR PFX "process_new_cqes: cq is NULL\n"); spin_unlock_bh(&tgt->cq_lock); return 0; } cq = tgt->cq; cq_cons = tgt->cq_cons_idx; cqe = &cq[cq_cons]; do { more_cqes_found ^= true; while (((wqe = cqe->wqe) & FCOE_CQE_TOGGLE_BIT) == (tgt->cq_curr_toggle_bit << FCOE_CQE_TOGGLE_BIT_SHIFT)) { /* new entry on the cq */ if (wqe & FCOE_CQE_CQE_TYPE) { /* Unsolicited event notification */ bnx2fc_process_unsol_compl(tgt, wqe); } else { struct bnx2fc_work *work = NULL; struct bnx2fc_percpu_s *fps = NULL; unsigned int cpu = wqe % num_possible_cpus(); fps = &per_cpu(bnx2fc_percpu, cpu); spin_lock_bh(&fps->fp_work_lock); if (unlikely(!fps->iothread)) goto unlock; work = bnx2fc_alloc_work(tgt, wqe); if (work) list_add_tail(&work->list, &fps->work_list); unlock: spin_unlock_bh(&fps->fp_work_lock); /* Pending work request completion */ if (fps->iothread && work) wake_up_process(fps->iothread); else bnx2fc_process_cq_compl(tgt, wqe); } cqe++; tgt->cq_cons_idx++; if (tgt->cq_cons_idx == BNX2FC_CQ_WQES_MAX) { tgt->cq_cons_idx = 0; cqe = cq; tgt->cq_curr_toggle_bit = 1 - tgt->cq_curr_toggle_bit; } } /* Re-arm CQ */ if (more_cqes_found) { tgt->conn_db->cq_arm.lo = -1; wmb(); } } while (more_cqes_found); /* * Commit tgt->cq_cons_idx change to the memory * spin_lock implies full memory barrier, no need to smp_wmb */ spin_unlock_bh(&tgt->cq_lock); return 0; } /** * bnx2fc_fastpath_notification - process global event queue (KCQ) * * @hba: adapter structure pointer * @new_cqe_kcqe: pointer to newly DMA'd KCQ entry * * Fast path event notification handler */ static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba, struct fcoe_kcqe *new_cqe_kcqe) { u32 conn_id = new_cqe_kcqe->fcoe_conn_id; struct bnx2fc_rport *tgt = hba->tgt_ofld_list[conn_id]; if (!tgt) { printk(KERN_ALERT PFX "conn_id 0x%x not valid\n", conn_id); return; } bnx2fc_process_new_cqes(tgt); } /** * bnx2fc_process_ofld_cmpl - process FCoE session offload completion * * @hba: adapter structure pointer * @ofld_kcqe: connection offload kcqe pointer * * handle session offload completion, enable the session if offload is * successful. */ static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba, struct fcoe_kcqe *ofld_kcqe) { struct bnx2fc_rport *tgt; struct fcoe_port *port; u32 conn_id; u32 context_id; int rc; conn_id = ofld_kcqe->fcoe_conn_id; context_id = ofld_kcqe->fcoe_conn_context_id; tgt = hba->tgt_ofld_list[conn_id]; if (!tgt) { printk(KERN_ALERT PFX "ERROR:ofld_cmpl: No pending ofld req\n"); return; } BNX2FC_TGT_DBG(tgt, "Entered ofld compl - context_id = 0x%x\n", ofld_kcqe->fcoe_conn_context_id); port = tgt->port; if (hba != tgt->port->priv) { printk(KERN_ALERT PFX "ERROR:ofld_cmpl: HBA mis-match\n"); goto ofld_cmpl_err; } /* * cnic has allocated a context_id for this session; use this * while enabling the session. */ tgt->context_id = context_id; if (ofld_kcqe->completion_status) { if (ofld_kcqe->completion_status == FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE) { printk(KERN_ERR PFX "unable to allocate FCoE context " "resources\n"); set_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE, &tgt->flags); } goto ofld_cmpl_err; } else { /* now enable the session */ rc = bnx2fc_send_session_enable_req(port, tgt); if (rc) { printk(KERN_ALERT PFX "enable session failed\n"); goto ofld_cmpl_err; } } return; ofld_cmpl_err: set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags); wake_up_interruptible(&tgt->ofld_wait); } /** * bnx2fc_process_enable_conn_cmpl - process FCoE session enable completion * * @hba: adapter structure pointer * @ofld_kcqe: connection offload kcqe pointer * * handle session enable completion, mark the rport as ready */ static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba, struct fcoe_kcqe *ofld_kcqe) { struct bnx2fc_rport *tgt; u32 conn_id; u32 context_id; context_id = ofld_kcqe->fcoe_conn_context_id; conn_id = ofld_kcqe->fcoe_conn_id; tgt = hba->tgt_ofld_list[conn_id]; if (!tgt) { printk(KERN_ALERT PFX "ERROR:enbl_cmpl: No pending ofld req\n"); return; } BNX2FC_TGT_DBG(tgt, "Enable compl - context_id = 0x%x\n", ofld_kcqe->fcoe_conn_context_id); /* * context_id should be the same for this target during offload * and enable */ if (tgt->context_id != context_id) { printk(KERN_ALERT PFX "context id mis-match\n"); return; } if (hba != tgt->port->priv) { printk(KERN_ALERT PFX "bnx2fc-enbl_cmpl: HBA mis-match\n"); goto enbl_cmpl_err; } if (ofld_kcqe->completion_status) { goto enbl_cmpl_err; } else { /* enable successful - rport ready for issuing IOs */ set_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags); set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags); wake_up_interruptible(&tgt->ofld_wait); } return; enbl_cmpl_err: set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags); wake_up_interruptible(&tgt->ofld_wait); } static void bnx2fc_process_conn_disable_cmpl(struct bnx2fc_hba *hba, struct fcoe_kcqe *disable_kcqe) { struct bnx2fc_rport *tgt; u32 conn_id; conn_id = disable_kcqe->fcoe_conn_id; tgt = hba->tgt_ofld_list[conn_id]; if (!tgt) { printk(KERN_ALERT PFX "ERROR: disable_cmpl: No disable req\n"); return; } BNX2FC_TGT_DBG(tgt, PFX "disable_cmpl: conn_id %d\n", conn_id); if (disable_kcqe->completion_status) { printk(KERN_ALERT PFX "ERROR: Disable failed with cmpl status %d\n", disable_kcqe->completion_status); return; } else { /* disable successful */ BNX2FC_TGT_DBG(tgt, "disable successful\n"); clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags); set_bit(BNX2FC_FLAG_DISABLED, &tgt->flags); set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags); wake_up_interruptible(&tgt->upld_wait); } } static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba, struct fcoe_kcqe *destroy_kcqe) { struct bnx2fc_rport *tgt; u32 conn_id; conn_id = destroy_kcqe->fcoe_conn_id; tgt = hba->tgt_ofld_list[conn_id]; if (!tgt) { printk(KERN_ALERT PFX "destroy_cmpl: No destroy req\n"); return; } BNX2FC_TGT_DBG(tgt, "destroy_cmpl: conn_id %d\n", conn_id); if (destroy_kcqe->completion_status) { printk(KERN_ALERT PFX "Destroy conn failed, cmpl status %d\n", destroy_kcqe->completion_status); return; } else { /* destroy successful */ BNX2FC_TGT_DBG(tgt, "upload successful\n"); clear_bit(BNX2FC_FLAG_DISABLED, &tgt->flags); set_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags); set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags); wake_up_interruptible(&tgt->upld_wait); } } static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code) { switch (err_code) { case FCOE_KCQE_COMPLETION_STATUS_INVALID_OPCODE: printk(KERN_ERR PFX "init_failure due to invalid opcode\n"); break; case FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE: printk(KERN_ERR PFX "init failed due to ctx alloc failure\n"); break; case FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR: printk(KERN_ERR PFX "init_failure due to NIC error\n"); break; default: printk(KERN_ERR PFX "Unknown Error code %d\n", err_code); } } /** * bnx2fc_indicae_kcqe - process KCQE * * @hba: adapter structure pointer * @kcqe: kcqe pointer * @num_cqe: Number of completion queue elements * * Generic KCQ event handler */ void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[], u32 num_cqe) { struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context; int i = 0; struct fcoe_kcqe *kcqe = NULL; while (i < num_cqe) { kcqe = (struct fcoe_kcqe *) kcq[i++]; switch (kcqe->op_code) { case FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION: bnx2fc_fastpath_notification(hba, kcqe); break; case FCOE_KCQE_OPCODE_OFFLOAD_CONN: bnx2fc_process_ofld_cmpl(hba, kcqe); break; case FCOE_KCQE_OPCODE_ENABLE_CONN: bnx2fc_process_enable_conn_cmpl(hba, kcqe); break; case FCOE_KCQE_OPCODE_INIT_FUNC: if (kcqe->completion_status != FCOE_KCQE_COMPLETION_STATUS_SUCCESS) { bnx2fc_init_failure(hba, kcqe->completion_status); } else { set_bit(ADAPTER_STATE_UP, &hba->adapter_state); bnx2fc_get_link_state(hba); printk(KERN_INFO PFX "[%.2x]: FCOE_INIT passed\n", (u8)hba->pcidev->bus->number); } break; case FCOE_KCQE_OPCODE_DESTROY_FUNC: if (kcqe->completion_status != FCOE_KCQE_COMPLETION_STATUS_SUCCESS) { printk(KERN_ERR PFX "DESTROY failed\n"); } else { printk(KERN_ERR PFX "DESTROY success\n"); } hba->flags |= BNX2FC_FLAG_DESTROY_CMPL; wake_up_interruptible(&hba->destroy_wait); break; case FCOE_KCQE_OPCODE_DISABLE_CONN: bnx2fc_process_conn_disable_cmpl(hba, kcqe); break; case FCOE_KCQE_OPCODE_DESTROY_CONN: bnx2fc_process_conn_destroy_cmpl(hba, kcqe); break; case FCOE_KCQE_OPCODE_STAT_FUNC: if (kcqe->completion_status != FCOE_KCQE_COMPLETION_STATUS_SUCCESS) printk(KERN_ERR PFX "STAT failed\n"); complete(&hba->stat_req_done); break; case FCOE_KCQE_OPCODE_FCOE_ERROR: /* fall thru */ default: printk(KERN_ALERT PFX "unknown opcode 0x%x\n", kcqe->op_code); } } } void bnx2fc_add_2_sq(struct bnx2fc_rport *tgt, u16 xid) { struct fcoe_sqe *sqe; sqe = &tgt->sq[tgt->sq_prod_idx]; /* Fill SQ WQE */ sqe->wqe = xid << FCOE_SQE_TASK_ID_SHIFT; sqe->wqe |= tgt->sq_curr_toggle_bit << FCOE_SQE_TOGGLE_BIT_SHIFT; /* Advance SQ Prod Idx */ if (++tgt->sq_prod_idx == BNX2FC_SQ_WQES_MAX) { tgt->sq_prod_idx = 0; tgt->sq_curr_toggle_bit = 1 - tgt->sq_curr_toggle_bit; } } void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt) { struct b577xx_doorbell_set_prod ev_doorbell; u32 msg; wmb(); memset(&ev_doorbell, 0, sizeof(struct b577xx_doorbell_set_prod)); ev_doorbell.header.header = B577XX_DOORBELL_HDR_DB_TYPE; ev_doorbell.prod = tgt->sq_prod_idx | (tgt->sq_curr_toggle_bit << 15); ev_doorbell.header.header |= B577XX_FCOE_CONNECTION_TYPE << B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT; msg = *((u32 *)&ev_doorbell); writel(cpu_to_le32(msg), tgt->ctx_base); mmiowb(); } int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt) { u32 context_id = tgt->context_id; struct fcoe_port *port = tgt->port; u32 reg_off; resource_size_t reg_base; struct bnx2fc_hba *hba = port->priv; reg_base = pci_resource_start(hba->pcidev, BNX2X_DOORBELL_PCI_BAR); reg_off = BNX2FC_5771X_DB_PAGE_SIZE * (context_id & 0x1FFFF) + DPM_TRIGER_TYPE; tgt->ctx_base = ioremap_nocache(reg_base + reg_off, 4); if (!tgt->ctx_base) return -ENOMEM; return 0; } char *bnx2fc_get_next_rqe(struct bnx2fc_rport *tgt, u8 num_items) { char *buf = (char *)tgt->rq + (tgt->rq_cons_idx * BNX2FC_RQ_BUF_SZ); if (tgt->rq_cons_idx + num_items > BNX2FC_RQ_WQES_MAX) return NULL; tgt->rq_cons_idx += num_items; if (tgt->rq_cons_idx >= BNX2FC_RQ_WQES_MAX) tgt->rq_cons_idx -= BNX2FC_RQ_WQES_MAX; return buf; } void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items) { /* return the rq buffer */ u32 next_prod_idx = tgt->rq_prod_idx + num_items; if ((next_prod_idx & 0x7fff) == BNX2FC_RQ_WQES_MAX) { /* Wrap around RQ */ next_prod_idx += 0x8000 - BNX2FC_RQ_WQES_MAX; } tgt->rq_prod_idx = next_prod_idx; tgt->conn_db->rq_prod = tgt->rq_prod_idx; } void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req, struct fcoe_task_ctx_entry *task, u16 orig_xid) { u8 task_type = FCOE_TASK_TYPE_EXCHANGE_CLEANUP; struct bnx2fc_rport *tgt = io_req->tgt; u32 context_id = tgt->context_id; memset(task, 0, sizeof(struct fcoe_task_ctx_entry)); /* Tx Write Rx Read */ task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP << FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT; task->tx_wr_rx_rd.init_flags = task_type << FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT; task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 << FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT; /* Common */ task->cmn.common_flags = context_id << FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT; task->cmn.general.cleanup_info.task_id = orig_xid; } void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req, struct fcoe_task_ctx_entry *task) { struct bnx2fc_mp_req *mp_req = &(io_req->mp_req); struct bnx2fc_rport *tgt = io_req->tgt; struct fc_frame_header *fc_hdr; u8 task_type = 0; u64 *hdr; u64 temp_hdr[3]; u32 context_id; /* Obtain task_type */ if ((io_req->cmd_type == BNX2FC_TASK_MGMT_CMD) || (io_req->cmd_type == BNX2FC_ELS)) { task_type = FCOE_TASK_TYPE_MIDPATH; } else if (io_req->cmd_type == BNX2FC_ABTS) { task_type = FCOE_TASK_TYPE_ABTS; } memset(task, 0, sizeof(struct fcoe_task_ctx_entry)); /* Setup the task from io_req for easy reference */ io_req->task = task; BNX2FC_IO_DBG(io_req, "Init MP task for cmd_type = %d task_type = %d\n", io_req->cmd_type, task_type); /* Tx only */ if ((task_type == FCOE_TASK_TYPE_MIDPATH) || (task_type == FCOE_TASK_TYPE_UNSOLICITED)) { task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo = (u32)mp_req->mp_req_bd_dma; task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi = (u32)((u64)mp_req->mp_req_bd_dma >> 32); task->tx_wr_only.sgl_ctx.mul_sges.sgl_size = 1; BNX2FC_IO_DBG(io_req, "init_mp_task - bd_dma = 0x%llx\n", (unsigned long long)mp_req->mp_req_bd_dma); } /* Tx Write Rx Read */ task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_INIT << FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT; task->tx_wr_rx_rd.init_flags = task_type << FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT; task->tx_wr_rx_rd.init_flags |= FCOE_TASK_DEV_TYPE_DISK << FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT; task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 << FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT; /* Common */ task->cmn.data_2_trns = io_req->data_xfer_len; context_id = tgt->context_id; task->cmn.common_flags = context_id << FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT; task->cmn.common_flags |= 1 << FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID_SHIFT; task->cmn.common_flags |= 1 << FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME_SHIFT; /* Rx Write Tx Read */ fc_hdr = &(mp_req->req_fc_hdr); if (task_type == FCOE_TASK_TYPE_MIDPATH) { fc_hdr->fh_ox_id = cpu_to_be16(io_req->xid); fc_hdr->fh_rx_id = htons(0xffff); task->rx_wr_tx_rd.rx_id = 0xffff; } else if (task_type == FCOE_TASK_TYPE_UNSOLICITED) { fc_hdr->fh_rx_id = cpu_to_be16(io_req->xid); } /* Fill FC Header into middle path buffer */ hdr = (u64 *) &task->cmn.general.cmd_info.mp_fc_frame.fc_hdr; memcpy(temp_hdr, fc_hdr, sizeof(temp_hdr)); hdr[0] = cpu_to_be64(temp_hdr[0]); hdr[1] = cpu_to_be64(temp_hdr[1]); hdr[2] = cpu_to_be64(temp_hdr[2]); /* Rx Only */ if (task_type == FCOE_TASK_TYPE_MIDPATH) { task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo = (u32)mp_req->mp_resp_bd_dma; task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi = (u32)((u64)mp_req->mp_resp_bd_dma >> 32); task->rx_wr_only.sgl_ctx.mul_sges.sgl_size = 1; } } void bnx2fc_init_task(struct bnx2fc_cmd *io_req, struct fcoe_task_ctx_entry *task) { u8 task_type; struct scsi_cmnd *sc_cmd = io_req->sc_cmd; struct io_bdt *bd_tbl = io_req->bd_tbl; struct bnx2fc_rport *tgt = io_req->tgt; u64 *fcp_cmnd; u64 tmp_fcp_cmnd[4]; u32 context_id; int cnt, i; int bd_count; memset(task, 0, sizeof(struct fcoe_task_ctx_entry)); /* Setup the task from io_req for easy reference */ io_req->task = task; if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) task_type = FCOE_TASK_TYPE_WRITE; else task_type = FCOE_TASK_TYPE_READ; /* Tx only */ if (task_type == FCOE_TASK_TYPE_WRITE) { task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma; task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi = (u32)((u64)bd_tbl->bd_tbl_dma >> 32); task->tx_wr_only.sgl_ctx.mul_sges.sgl_size = bd_tbl->bd_valid; } /*Tx Write Rx Read */ /* Init state to NORMAL */ task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_NORMAL << FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT; task->tx_wr_rx_rd.init_flags = task_type << FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT; task->tx_wr_rx_rd.init_flags |= FCOE_TASK_DEV_TYPE_DISK << FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT; task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 << FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT; /* Common */ task->cmn.data_2_trns = io_req->data_xfer_len; context_id = tgt->context_id; task->cmn.common_flags = context_id << FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT; task->cmn.common_flags |= 1 << FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID_SHIFT; task->cmn.common_flags |= 1 << FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME_SHIFT; /* Set initiative ownership */ task->cmn.common_flags |= FCOE_TASK_CTX_ENTRY_TX_RX_CMN_SEQ_INIT; /* Set initial seq counter */ task->cmn.tx_low_seq_cnt = 1; /* Set state to "waiting for the first packet" */ task->cmn.common_flags |= FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME; /* Fill FCP_CMND IU */ fcp_cmnd = (u64 *) task->cmn.general.cmd_info.fcp_cmd_payload.opaque; bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd); /* swap fcp_cmnd */ cnt = sizeof(struct fcp_cmnd) / sizeof(u64); for (i = 0; i < cnt; i++) { *fcp_cmnd = cpu_to_be64(tmp_fcp_cmnd[i]); fcp_cmnd++; } /* Rx Write Tx Read */ task->rx_wr_tx_rd.rx_id = 0xffff; /* Rx Only */ if (task_type == FCOE_TASK_TYPE_READ) { bd_count = bd_tbl->bd_valid; if (bd_count == 1) { struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl; task->rx_wr_only.sgl_ctx.single_sge.cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo; task->rx_wr_only.sgl_ctx.single_sge.cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi; task->rx_wr_only.sgl_ctx.single_sge.cur_buf_rem = fcoe_bd_tbl->buf_len; task->tx_wr_rx_rd.init_flags |= 1 << FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE_SHIFT; } else { task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma; task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi = (u32)((u64)bd_tbl->bd_tbl_dma >> 32); task->rx_wr_only.sgl_ctx.mul_sges.sgl_size = bd_tbl->bd_valid; } } } /** * bnx2fc_setup_task_ctx - allocate and map task context * * @hba: pointer to adapter structure * * allocate memory for task context, and associated BD table to be used * by firmware * */ int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba) { int rc = 0; struct regpair *task_ctx_bdt; dma_addr_t addr; int i; /* * Allocate task context bd table. A page size of bd table * can map 256 buffers. Each buffer contains 32 task context * entries. Hence the limit with one page is 8192 task context * entries. */ hba->task_ctx_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, &hba->task_ctx_bd_dma, GFP_KERNEL); if (!hba->task_ctx_bd_tbl) { printk(KERN_ERR PFX "unable to allocate task context BDT\n"); rc = -1; goto out; } memset(hba->task_ctx_bd_tbl, 0, PAGE_SIZE); /* * Allocate task_ctx which is an array of pointers pointing to * a page containing 32 task contexts */ hba->task_ctx = kzalloc((BNX2FC_TASK_CTX_ARR_SZ * sizeof(void *)), GFP_KERNEL); if (!hba->task_ctx) { printk(KERN_ERR PFX "unable to allocate task context array\n"); rc = -1; goto out1; } /* * Allocate task_ctx_dma which is an array of dma addresses */ hba->task_ctx_dma = kmalloc((BNX2FC_TASK_CTX_ARR_SZ * sizeof(dma_addr_t)), GFP_KERNEL); if (!hba->task_ctx_dma) { printk(KERN_ERR PFX "unable to alloc context mapping array\n"); rc = -1; goto out2; } task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl; for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) { hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, &hba->task_ctx_dma[i], GFP_KERNEL); if (!hba->task_ctx[i]) { printk(KERN_ERR PFX "unable to alloc task context\n"); rc = -1; goto out3; } memset(hba->task_ctx[i], 0, PAGE_SIZE); addr = (u64)hba->task_ctx_dma[i]; task_ctx_bdt->hi = cpu_to_le32((u64)addr >> 32); task_ctx_bdt->lo = cpu_to_le32((u32)addr); task_ctx_bdt++; } return 0; out3: for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) { if (hba->task_ctx[i]) { dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, hba->task_ctx[i], hba->task_ctx_dma[i]); hba->task_ctx[i] = NULL; } } kfree(hba->task_ctx_dma); hba->task_ctx_dma = NULL; out2: kfree(hba->task_ctx); hba->task_ctx = NULL; out1: dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, hba->task_ctx_bd_tbl, hba->task_ctx_bd_dma); hba->task_ctx_bd_tbl = NULL; out: return rc; } void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba) { int i; if (hba->task_ctx_bd_tbl) { dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, hba->task_ctx_bd_tbl, hba->task_ctx_bd_dma); hba->task_ctx_bd_tbl = NULL; } if (hba->task_ctx) { for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) { if (hba->task_ctx[i]) { dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, hba->task_ctx[i], hba->task_ctx_dma[i]); hba->task_ctx[i] = NULL; } } kfree(hba->task_ctx); hba->task_ctx = NULL; } kfree(hba->task_ctx_dma); hba->task_ctx_dma = NULL; } static void bnx2fc_free_hash_table(struct bnx2fc_hba *hba) { int i; int segment_count; int hash_table_size; u32 *pbl; segment_count = hba->hash_tbl_segment_count; hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL * sizeof(struct fcoe_hash_table_entry); pbl = hba->hash_tbl_pbl; for (i = 0; i < segment_count; ++i) { dma_addr_t dma_address; dma_address = le32_to_cpu(*pbl); ++pbl; dma_address += ((u64)le32_to_cpu(*pbl)) << 32; ++pbl; dma_free_coherent(&hba->pcidev->dev, BNX2FC_HASH_TBL_CHUNK_SIZE, hba->hash_tbl_segments[i], dma_address); } if (hba->hash_tbl_pbl) { dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, hba->hash_tbl_pbl, hba->hash_tbl_pbl_dma); hba->hash_tbl_pbl = NULL; } } static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba) { int i; int hash_table_size; int segment_count; int segment_array_size; int dma_segment_array_size; dma_addr_t *dma_segment_array; u32 *pbl; hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL * sizeof(struct fcoe_hash_table_entry); segment_count = hash_table_size + BNX2FC_HASH_TBL_CHUNK_SIZE - 1; segment_count /= BNX2FC_HASH_TBL_CHUNK_SIZE; hba->hash_tbl_segment_count = segment_count; segment_array_size = segment_count * sizeof(*hba->hash_tbl_segments); hba->hash_tbl_segments = kzalloc(segment_array_size, GFP_KERNEL); if (!hba->hash_tbl_segments) { printk(KERN_ERR PFX "hash table pointers alloc failed\n"); return -ENOMEM; } dma_segment_array_size = segment_count * sizeof(*dma_segment_array); dma_segment_array = kzalloc(dma_segment_array_size, GFP_KERNEL); if (!dma_segment_array) { printk(KERN_ERR PFX "hash table pointers (dma) alloc failed\n"); return -ENOMEM; } for (i = 0; i < segment_count; ++i) { hba->hash_tbl_segments[i] = dma_alloc_coherent(&hba->pcidev->dev, BNX2FC_HASH_TBL_CHUNK_SIZE, &dma_segment_array[i], GFP_KERNEL); if (!hba->hash_tbl_segments[i]) { printk(KERN_ERR PFX "hash segment alloc failed\n"); while (--i >= 0) { dma_free_coherent(&hba->pcidev->dev, BNX2FC_HASH_TBL_CHUNK_SIZE, hba->hash_tbl_segments[i], dma_segment_array[i]); hba->hash_tbl_segments[i] = NULL; } kfree(dma_segment_array); return -ENOMEM; } memset(hba->hash_tbl_segments[i], 0, BNX2FC_HASH_TBL_CHUNK_SIZE); } hba->hash_tbl_pbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, &hba->hash_tbl_pbl_dma, GFP_KERNEL); if (!hba->hash_tbl_pbl) { printk(KERN_ERR PFX "hash table pbl alloc failed\n"); kfree(dma_segment_array); return -ENOMEM; } memset(hba->hash_tbl_pbl, 0, PAGE_SIZE); pbl = hba->hash_tbl_pbl; for (i = 0; i < segment_count; ++i) { u64 paddr = dma_segment_array[i]; *pbl = cpu_to_le32((u32) paddr); ++pbl; *pbl = cpu_to_le32((u32) (paddr >> 32)); ++pbl; } pbl = hba->hash_tbl_pbl; i = 0; while (*pbl && *(pbl + 1)) { u32 lo; u32 hi; lo = *pbl; ++pbl; hi = *pbl; ++pbl; ++i; } kfree(dma_segment_array); return 0; } /** * bnx2fc_setup_fw_resc - Allocate and map hash table and dummy buffer * * @hba: Pointer to adapter structure * */ int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba) { u64 addr; u32 mem_size; int i; if (bnx2fc_allocate_hash_table(hba)) return -ENOMEM; mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair); hba->t2_hash_tbl_ptr = dma_alloc_coherent(&hba->pcidev->dev, mem_size, &hba->t2_hash_tbl_ptr_dma, GFP_KERNEL); if (!hba->t2_hash_tbl_ptr) { printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n"); bnx2fc_free_fw_resc(hba); return -ENOMEM; } memset(hba->t2_hash_tbl_ptr, 0x00, mem_size); mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct fcoe_t2_hash_table_entry); hba->t2_hash_tbl = dma_alloc_coherent(&hba->pcidev->dev, mem_size, &hba->t2_hash_tbl_dma, GFP_KERNEL); if (!hba->t2_hash_tbl) { printk(KERN_ERR PFX "unable to allocate t2 hash table\n"); bnx2fc_free_fw_resc(hba); return -ENOMEM; } memset(hba->t2_hash_tbl, 0x00, mem_size); for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) { addr = (unsigned long) hba->t2_hash_tbl_dma + ((i+1) * sizeof(struct fcoe_t2_hash_table_entry)); hba->t2_hash_tbl[i].next.lo = addr & 0xffffffff; hba->t2_hash_tbl[i].next.hi = addr >> 32; } hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, &hba->dummy_buf_dma, GFP_KERNEL); if (!hba->dummy_buffer) { printk(KERN_ERR PFX "unable to alloc MP Dummy Buffer\n"); bnx2fc_free_fw_resc(hba); return -ENOMEM; } hba->stats_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, &hba->stats_buf_dma, GFP_KERNEL); if (!hba->stats_buffer) { printk(KERN_ERR PFX "unable to alloc Stats Buffer\n"); bnx2fc_free_fw_resc(hba); return -ENOMEM; } memset(hba->stats_buffer, 0x00, PAGE_SIZE); return 0; } void bnx2fc_free_fw_resc(struct bnx2fc_hba *hba) { u32 mem_size; if (hba->stats_buffer) { dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, hba->stats_buffer, hba->stats_buf_dma); hba->stats_buffer = NULL; } if (hba->dummy_buffer) { dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, hba->dummy_buffer, hba->dummy_buf_dma); hba->dummy_buffer = NULL; } if (hba->t2_hash_tbl_ptr) { mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair); dma_free_coherent(&hba->pcidev->dev, mem_size, hba->t2_hash_tbl_ptr, hba->t2_hash_tbl_ptr_dma); hba->t2_hash_tbl_ptr = NULL; } if (hba->t2_hash_tbl) { mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct fcoe_t2_hash_table_entry); dma_free_coherent(&hba->pcidev->dev, mem_size, hba->t2_hash_tbl, hba->t2_hash_tbl_dma); hba->t2_hash_tbl = NULL; } bnx2fc_free_hash_table(hba); }
gpl-2.0
jepler/odroid-linux
drivers/media/i2c/tvaudio.c
148
64596
/* * Driver for simple i2c audio chips. * * Copyright (c) 2000 Gerd Knorr * based on code by: * Eric Sandeen (eric_sandeen@bigfoot.com) * Steve VanDeBogart (vandebo@uclink.berkeley.edu) * Greg Alexander (galexand@acm.org) * * For the TDA9875 part: * Copyright (c) 2000 Guillaume Delvit based on Gerd Knorr source * and Eric Sandeen * * Copyright(c) 2005-2008 Mauro Carvalho Chehab * - Some cleanups, code fixes, etc * - Convert it to V4L2 API * * This code is placed under the terms of the GNU General Public License * * OPTIONS: * debug - set to 1 if you'd like to see debug messages * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/videodev2.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/kthread.h> #include <linux/freezer.h> #include <media/tvaudio.h> #include <media/v4l2-device.h> #include <media/v4l2-chip-ident.h> #include <media/i2c-addr.h> /* ---------------------------------------------------------------------- */ /* insmod args */ static int debug; /* insmod parameter */ module_param(debug, int, 0644); MODULE_DESCRIPTION("device driver for various i2c TV sound decoder / audiomux chips"); MODULE_AUTHOR("Eric Sandeen, Steve VanDeBogart, Greg Alexander, Gerd Knorr"); MODULE_LICENSE("GPL"); #define UNSET (-1U) /* ---------------------------------------------------------------------- */ /* our structs */ #define MAXREGS 256 struct CHIPSTATE; typedef int (*getvalue)(int); typedef int (*checkit)(struct CHIPSTATE*); typedef int (*initialize)(struct CHIPSTATE*); typedef int (*getrxsubchans)(struct CHIPSTATE *); typedef void (*setaudmode)(struct CHIPSTATE*, int mode); /* i2c command */ typedef struct AUDIOCMD { int count; /* # of bytes to send */ unsigned char bytes[MAXREGS+1]; /* addr, data, data, ... */ } audiocmd; /* chip description */ struct CHIPDESC { char *name; /* chip name */ int addr_lo, addr_hi; /* i2c address range */ int registers; /* # of registers */ int *insmodopt; checkit checkit; initialize initialize; int flags; #define CHIP_HAS_VOLUME 1 #define CHIP_HAS_BASSTREBLE 2 #define CHIP_HAS_INPUTSEL 4 #define CHIP_NEED_CHECKMODE 8 /* various i2c command sequences */ audiocmd init; /* which register has which value */ int leftreg,rightreg,treblereg,bassreg; /* initialize with (defaults to 65535/65535/32768/32768 */ int leftinit,rightinit,trebleinit,bassinit; /* functions to convert the values (v4l -> chip) */ getvalue volfunc,treblefunc,bassfunc; /* get/set mode */ getrxsubchans getrxsubchans; setaudmode setaudmode; /* input switch register + values for v4l inputs */ int inputreg; int inputmap[4]; int inputmute; int inputmask; }; /* current state of the chip */ struct CHIPSTATE { struct v4l2_subdev sd; /* chip-specific description - should point to an entry at CHIPDESC table */ struct CHIPDESC *desc; /* shadow register set */ audiocmd shadow; /* current settings */ __u16 left, right, treble, bass, muted; int prevmode; int radio; int input; /* thread */ struct task_struct *thread; struct timer_list wt; int audmode; }; static inline struct CHIPSTATE *to_state(struct v4l2_subdev *sd) { return container_of(sd, struct CHIPSTATE, sd); } /* ---------------------------------------------------------------------- */ /* i2c I/O functions */ static int chip_write(struct CHIPSTATE *chip, int subaddr, int val) { struct v4l2_subdev *sd = &chip->sd; struct i2c_client *c = v4l2_get_subdevdata(sd); unsigned char buffer[2]; if (subaddr < 0) { v4l2_dbg(1, debug, sd, "chip_write: 0x%x\n", val); chip->shadow.bytes[1] = val; buffer[0] = val; if (1 != i2c_master_send(c, buffer, 1)) { v4l2_warn(sd, "I/O error (write 0x%x)\n", val); return -1; } } else { if (subaddr + 1 >= ARRAY_SIZE(chip->shadow.bytes)) { v4l2_info(sd, "Tried to access a non-existent register: %d\n", subaddr); return -EINVAL; } v4l2_dbg(1, debug, sd, "chip_write: reg%d=0x%x\n", subaddr, val); chip->shadow.bytes[subaddr+1] = val; buffer[0] = subaddr; buffer[1] = val; if (2 != i2c_master_send(c, buffer, 2)) { v4l2_warn(sd, "I/O error (write reg%d=0x%x)\n", subaddr, val); return -1; } } return 0; } static int chip_write_masked(struct CHIPSTATE *chip, int subaddr, int val, int mask) { struct v4l2_subdev *sd = &chip->sd; if (mask != 0) { if (subaddr < 0) { val = (chip->shadow.bytes[1] & ~mask) | (val & mask); } else { if (subaddr + 1 >= ARRAY_SIZE(chip->shadow.bytes)) { v4l2_info(sd, "Tried to access a non-existent register: %d\n", subaddr); return -EINVAL; } val = (chip->shadow.bytes[subaddr+1] & ~mask) | (val & mask); } } return chip_write(chip, subaddr, val); } static int chip_read(struct CHIPSTATE *chip) { struct v4l2_subdev *sd = &chip->sd; struct i2c_client *c = v4l2_get_subdevdata(sd); unsigned char buffer; if (1 != i2c_master_recv(c, &buffer, 1)) { v4l2_warn(sd, "I/O error (read)\n"); return -1; } v4l2_dbg(1, debug, sd, "chip_read: 0x%x\n", buffer); return buffer; } static int chip_read2(struct CHIPSTATE *chip, int subaddr) { struct v4l2_subdev *sd = &chip->sd; struct i2c_client *c = v4l2_get_subdevdata(sd); unsigned char write[1]; unsigned char read[1]; struct i2c_msg msgs[2] = { { .addr = c->addr, .len = 1, .buf = write }, { .addr = c->addr, .flags = I2C_M_RD, .len = 1, .buf = read } }; write[0] = subaddr; if (2 != i2c_transfer(c->adapter, msgs, 2)) { v4l2_warn(sd, "I/O error (read2)\n"); return -1; } v4l2_dbg(1, debug, sd, "chip_read2: reg%d=0x%x\n", subaddr, read[0]); return read[0]; } static int chip_cmd(struct CHIPSTATE *chip, char *name, audiocmd *cmd) { struct v4l2_subdev *sd = &chip->sd; struct i2c_client *c = v4l2_get_subdevdata(sd); int i; if (0 == cmd->count) return 0; if (cmd->count + cmd->bytes[0] - 1 >= ARRAY_SIZE(chip->shadow.bytes)) { v4l2_info(sd, "Tried to access a non-existent register range: %d to %d\n", cmd->bytes[0] + 1, cmd->bytes[0] + cmd->count - 1); return -EINVAL; } /* FIXME: it seems that the shadow bytes are wrong bellow !*/ /* update our shadow register set; print bytes if (debug > 0) */ v4l2_dbg(1, debug, sd, "chip_cmd(%s): reg=%d, data:", name, cmd->bytes[0]); for (i = 1; i < cmd->count; i++) { if (debug) printk(KERN_CONT " 0x%x", cmd->bytes[i]); chip->shadow.bytes[i+cmd->bytes[0]] = cmd->bytes[i]; } if (debug) printk(KERN_CONT "\n"); /* send data to the chip */ if (cmd->count != i2c_master_send(c, cmd->bytes, cmd->count)) { v4l2_warn(sd, "I/O error (%s)\n", name); return -1; } return 0; } /* ---------------------------------------------------------------------- */ /* kernel thread for doing i2c stuff asyncronly * right now it is used only to check the audio mode (mono/stereo/whatever) * some time after switching to another TV channel, then turn on stereo * if available, ... */ static void chip_thread_wake(unsigned long data) { struct CHIPSTATE *chip = (struct CHIPSTATE*)data; wake_up_process(chip->thread); } static int chip_thread(void *data) { struct CHIPSTATE *chip = data; struct CHIPDESC *desc = chip->desc; struct v4l2_subdev *sd = &chip->sd; int mode, selected; v4l2_dbg(1, debug, sd, "thread started\n"); set_freezable(); for (;;) { set_current_state(TASK_INTERRUPTIBLE); if (!kthread_should_stop()) schedule(); set_current_state(TASK_RUNNING); try_to_freeze(); if (kthread_should_stop()) break; v4l2_dbg(1, debug, sd, "thread wakeup\n"); /* don't do anything for radio */ if (chip->radio) continue; /* have a look what's going on */ mode = desc->getrxsubchans(chip); if (mode == chip->prevmode) continue; /* chip detected a new audio mode - set it */ v4l2_dbg(1, debug, sd, "thread checkmode\n"); chip->prevmode = mode; selected = V4L2_TUNER_MODE_MONO; switch (chip->audmode) { case V4L2_TUNER_MODE_MONO: if (mode & V4L2_TUNER_SUB_LANG1) selected = V4L2_TUNER_MODE_LANG1; break; case V4L2_TUNER_MODE_STEREO: case V4L2_TUNER_MODE_LANG1: if (mode & V4L2_TUNER_SUB_LANG1) selected = V4L2_TUNER_MODE_LANG1; else if (mode & V4L2_TUNER_SUB_STEREO) selected = V4L2_TUNER_MODE_STEREO; break; case V4L2_TUNER_MODE_LANG2: if (mode & V4L2_TUNER_SUB_LANG2) selected = V4L2_TUNER_MODE_LANG2; else if (mode & V4L2_TUNER_SUB_STEREO) selected = V4L2_TUNER_MODE_STEREO; break; case V4L2_TUNER_MODE_LANG1_LANG2: if (mode & V4L2_TUNER_SUB_LANG2) selected = V4L2_TUNER_MODE_LANG1_LANG2; else if (mode & V4L2_TUNER_SUB_STEREO) selected = V4L2_TUNER_MODE_STEREO; } desc->setaudmode(chip, selected); /* schedule next check */ mod_timer(&chip->wt, jiffies+msecs_to_jiffies(2000)); } v4l2_dbg(1, debug, sd, "thread exiting\n"); return 0; } /* ---------------------------------------------------------------------- */ /* audio chip descriptions - defines+functions for tda9840 */ #define TDA9840_SW 0x00 #define TDA9840_LVADJ 0x02 #define TDA9840_STADJ 0x03 #define TDA9840_TEST 0x04 #define TDA9840_MONO 0x10 #define TDA9840_STEREO 0x2a #define TDA9840_DUALA 0x12 #define TDA9840_DUALB 0x1e #define TDA9840_DUALAB 0x1a #define TDA9840_DUALBA 0x16 #define TDA9840_EXTERNAL 0x7a #define TDA9840_DS_DUAL 0x20 /* Dual sound identified */ #define TDA9840_ST_STEREO 0x40 /* Stereo sound identified */ #define TDA9840_PONRES 0x80 /* Power-on reset detected if = 1 */ #define TDA9840_TEST_INT1SN 0x1 /* Integration time 0.5s when set */ #define TDA9840_TEST_INTFU 0x02 /* Disables integrator function */ static int tda9840_getrxsubchans(struct CHIPSTATE *chip) { struct v4l2_subdev *sd = &chip->sd; int val, mode; val = chip_read(chip); mode = V4L2_TUNER_SUB_MONO; if (val & TDA9840_DS_DUAL) mode |= V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2; if (val & TDA9840_ST_STEREO) mode = V4L2_TUNER_SUB_STEREO; v4l2_dbg(1, debug, sd, "tda9840_getrxsubchans(): raw chip read: %d, return: %d\n", val, mode); return mode; } static void tda9840_setaudmode(struct CHIPSTATE *chip, int mode) { int update = 1; int t = chip->shadow.bytes[TDA9840_SW + 1] & ~0x7e; switch (mode) { case V4L2_TUNER_MODE_MONO: t |= TDA9840_MONO; break; case V4L2_TUNER_MODE_STEREO: t |= TDA9840_STEREO; break; case V4L2_TUNER_MODE_LANG1: t |= TDA9840_DUALA; break; case V4L2_TUNER_MODE_LANG2: t |= TDA9840_DUALB; break; case V4L2_TUNER_MODE_LANG1_LANG2: t |= TDA9840_DUALAB; break; default: update = 0; } if (update) chip_write(chip, TDA9840_SW, t); } static int tda9840_checkit(struct CHIPSTATE *chip) { int rc; rc = chip_read(chip); /* lower 5 bits should be 0 */ return ((rc & 0x1f) == 0) ? 1 : 0; } /* ---------------------------------------------------------------------- */ /* audio chip descriptions - defines+functions for tda985x */ /* subaddresses for TDA9855 */ #define TDA9855_VR 0x00 /* Volume, right */ #define TDA9855_VL 0x01 /* Volume, left */ #define TDA9855_BA 0x02 /* Bass */ #define TDA9855_TR 0x03 /* Treble */ #define TDA9855_SW 0x04 /* Subwoofer - not connected on DTV2000 */ /* subaddresses for TDA9850 */ #define TDA9850_C4 0x04 /* Control 1 for TDA9850 */ /* subaddesses for both chips */ #define TDA985x_C5 0x05 /* Control 2 for TDA9850, Control 1 for TDA9855 */ #define TDA985x_C6 0x06 /* Control 3 for TDA9850, Control 2 for TDA9855 */ #define TDA985x_C7 0x07 /* Control 4 for TDA9850, Control 3 for TDA9855 */ #define TDA985x_A1 0x08 /* Alignment 1 for both chips */ #define TDA985x_A2 0x09 /* Alignment 2 for both chips */ #define TDA985x_A3 0x0a /* Alignment 3 for both chips */ /* Masks for bits in TDA9855 subaddresses */ /* 0x00 - VR in TDA9855 */ /* 0x01 - VL in TDA9855 */ /* lower 7 bits control gain from -71dB (0x28) to 16dB (0x7f) * in 1dB steps - mute is 0x27 */ /* 0x02 - BA in TDA9855 */ /* lower 5 bits control bass gain from -12dB (0x06) to 16.5dB (0x19) * in .5dB steps - 0 is 0x0E */ /* 0x03 - TR in TDA9855 */ /* 4 bits << 1 control treble gain from -12dB (0x3) to 12dB (0xb) * in 3dB steps - 0 is 0x7 */ /* Masks for bits in both chips' subaddresses */ /* 0x04 - SW in TDA9855, C4/Control 1 in TDA9850 */ /* Unique to TDA9855: */ /* 4 bits << 2 control subwoofer/surround gain from -14db (0x1) to 14db (0xf) * in 3dB steps - mute is 0x0 */ /* Unique to TDA9850: */ /* lower 4 bits control stereo noise threshold, over which stereo turns off * set to values of 0x00 through 0x0f for Ster1 through Ster16 */ /* 0x05 - C5 - Control 1 in TDA9855 , Control 2 in TDA9850*/ /* Unique to TDA9855: */ #define TDA9855_MUTE 1<<7 /* GMU, Mute at outputs */ #define TDA9855_AVL 1<<6 /* AVL, Automatic Volume Level */ #define TDA9855_LOUD 1<<5 /* Loudness, 1==off */ #define TDA9855_SUR 1<<3 /* Surround / Subwoofer 1==.5(L-R) 0==.5(L+R) */ /* Bits 0 to 3 select various combinations * of line in and line out, only the * interesting ones are defined */ #define TDA9855_EXT 1<<2 /* Selects inputs LIR and LIL. Pins 41 & 12 */ #define TDA9855_INT 0 /* Selects inputs LOR and LOL. (internal) */ /* Unique to TDA9850: */ /* lower 4 bits contol SAP noise threshold, over which SAP turns off * set to values of 0x00 through 0x0f for SAP1 through SAP16 */ /* 0x06 - C6 - Control 2 in TDA9855, Control 3 in TDA9850 */ /* Common to TDA9855 and TDA9850: */ #define TDA985x_SAP 3<<6 /* Selects SAP output, mute if not received */ #define TDA985x_MONOSAP 2<<6 /* Selects Mono on left, SAP on right */ #define TDA985x_STEREO 1<<6 /* Selects Stereo ouput, mono if not received */ #define TDA985x_MONO 0 /* Forces Mono output */ #define TDA985x_LMU 1<<3 /* Mute (LOR/LOL for 9855, OUTL/OUTR for 9850) */ /* Unique to TDA9855: */ #define TDA9855_TZCM 1<<5 /* If set, don't mute till zero crossing */ #define TDA9855_VZCM 1<<4 /* If set, don't change volume till zero crossing*/ #define TDA9855_LINEAR 0 /* Linear Stereo */ #define TDA9855_PSEUDO 1 /* Pseudo Stereo */ #define TDA9855_SPAT_30 2 /* Spatial Stereo, 30% anti-phase crosstalk */ #define TDA9855_SPAT_50 3 /* Spatial Stereo, 52% anti-phase crosstalk */ #define TDA9855_E_MONO 7 /* Forced mono - mono select elseware, so useless*/ /* 0x07 - C7 - Control 3 in TDA9855, Control 4 in TDA9850 */ /* Common to both TDA9855 and TDA9850: */ /* lower 4 bits control input gain from -3.5dB (0x0) to 4dB (0xF) * in .5dB steps - 0dB is 0x7 */ /* 0x08, 0x09 - A1 and A2 (read/write) */ /* Common to both TDA9855 and TDA9850: */ /* lower 5 bites are wideband and spectral expander alignment * from 0x00 to 0x1f - nominal at 0x0f and 0x10 (read/write) */ #define TDA985x_STP 1<<5 /* Stereo Pilot/detect (read-only) */ #define TDA985x_SAPP 1<<6 /* SAP Pilot/detect (read-only) */ #define TDA985x_STS 1<<7 /* Stereo trigger 1= <35mV 0= <30mV (write-only)*/ /* 0x0a - A3 */ /* Common to both TDA9855 and TDA9850: */ /* lower 3 bits control timing current for alignment: -30% (0x0), -20% (0x1), * -10% (0x2), nominal (0x3), +10% (0x6), +20% (0x5), +30% (0x4) */ #define TDA985x_ADJ 1<<7 /* Stereo adjust on/off (wideband and spectral */ static int tda9855_volume(int val) { return val/0x2e8+0x27; } static int tda9855_bass(int val) { return val/0xccc+0x06; } static int tda9855_treble(int val) { return (val/0x1c71+0x3)<<1; } static int tda985x_getrxsubchans(struct CHIPSTATE *chip) { int mode, val; /* Add mono mode regardless of SAP and stereo */ /* Allows forced mono */ mode = V4L2_TUNER_SUB_MONO; val = chip_read(chip); if (val & TDA985x_STP) mode = V4L2_TUNER_SUB_STEREO; if (val & TDA985x_SAPP) mode |= V4L2_TUNER_SUB_SAP; return mode; } static void tda985x_setaudmode(struct CHIPSTATE *chip, int mode) { int update = 1; int c6 = chip->shadow.bytes[TDA985x_C6+1] & 0x3f; switch (mode) { case V4L2_TUNER_MODE_MONO: c6 |= TDA985x_MONO; break; case V4L2_TUNER_MODE_STEREO: case V4L2_TUNER_MODE_LANG1: c6 |= TDA985x_STEREO; break; case V4L2_TUNER_MODE_SAP: c6 |= TDA985x_SAP; break; case V4L2_TUNER_MODE_LANG1_LANG2: c6 |= TDA985x_MONOSAP; break; default: update = 0; } if (update) chip_write(chip,TDA985x_C6,c6); } /* ---------------------------------------------------------------------- */ /* audio chip descriptions - defines+functions for tda9873h */ /* Subaddresses for TDA9873H */ #define TDA9873_SW 0x00 /* Switching */ #define TDA9873_AD 0x01 /* Adjust */ #define TDA9873_PT 0x02 /* Port */ /* Subaddress 0x00: Switching Data * B7..B0: * * B1, B0: Input source selection * 0, 0 internal * 1, 0 external stereo * 0, 1 external mono */ #define TDA9873_INP_MASK 3 #define TDA9873_INTERNAL 0 #define TDA9873_EXT_STEREO 2 #define TDA9873_EXT_MONO 1 /* B3, B2: output signal select * B4 : transmission mode * 0, 0, 1 Mono * 1, 0, 0 Stereo * 1, 1, 1 Stereo (reversed channel) * 0, 0, 0 Dual AB * 0, 0, 1 Dual AA * 0, 1, 0 Dual BB * 0, 1, 1 Dual BA */ #define TDA9873_TR_MASK (7 << 2) #define TDA9873_TR_MONO 4 #define TDA9873_TR_STEREO 1 << 4 #define TDA9873_TR_REVERSE ((1 << 3) | (1 << 2)) #define TDA9873_TR_DUALA 1 << 2 #define TDA9873_TR_DUALB 1 << 3 #define TDA9873_TR_DUALAB 0 /* output level controls * B5: output level switch (0 = reduced gain, 1 = normal gain) * B6: mute (1 = muted) * B7: auto-mute (1 = auto-mute enabled) */ #define TDA9873_GAIN_NORMAL 1 << 5 #define TDA9873_MUTE 1 << 6 #define TDA9873_AUTOMUTE 1 << 7 /* Subaddress 0x01: Adjust/standard */ /* Lower 4 bits (C3..C0) control stereo adjustment on R channel (-0.6 - +0.7 dB) * Recommended value is +0 dB */ #define TDA9873_STEREO_ADJ 0x06 /* 0dB gain */ /* Bits C6..C4 control FM stantard * C6, C5, C4 * 0, 0, 0 B/G (PAL FM) * 0, 0, 1 M * 0, 1, 0 D/K(1) * 0, 1, 1 D/K(2) * 1, 0, 0 D/K(3) * 1, 0, 1 I */ #define TDA9873_BG 0 #define TDA9873_M 1 #define TDA9873_DK1 2 #define TDA9873_DK2 3 #define TDA9873_DK3 4 #define TDA9873_I 5 /* C7 controls identification response time (1=fast/0=normal) */ #define TDA9873_IDR_NORM 0 #define TDA9873_IDR_FAST 1 << 7 /* Subaddress 0x02: Port data */ /* E1, E0 free programmable ports P1/P2 0, 0 both ports low 0, 1 P1 high 1, 0 P2 high 1, 1 both ports high */ #define TDA9873_PORTS 3 /* E2: test port */ #define TDA9873_TST_PORT 1 << 2 /* E5..E3 control mono output channel (together with transmission mode bit B4) * * E5 E4 E3 B4 OUTM * 0 0 0 0 mono * 0 0 1 0 DUAL B * 0 1 0 1 mono (from stereo decoder) */ #define TDA9873_MOUT_MONO 0 #define TDA9873_MOUT_FMONO 0 #define TDA9873_MOUT_DUALA 0 #define TDA9873_MOUT_DUALB 1 << 3 #define TDA9873_MOUT_ST 1 << 4 #define TDA9873_MOUT_EXTM ((1 << 4) | (1 << 3)) #define TDA9873_MOUT_EXTL 1 << 5 #define TDA9873_MOUT_EXTR ((1 << 5) | (1 << 3)) #define TDA9873_MOUT_EXTLR ((1 << 5) | (1 << 4)) #define TDA9873_MOUT_MUTE ((1 << 5) | (1 << 4) | (1 << 3)) /* Status bits: (chip read) */ #define TDA9873_PONR 0 /* Power-on reset detected if = 1 */ #define TDA9873_STEREO 2 /* Stereo sound is identified */ #define TDA9873_DUAL 4 /* Dual sound is identified */ static int tda9873_getrxsubchans(struct CHIPSTATE *chip) { struct v4l2_subdev *sd = &chip->sd; int val,mode; val = chip_read(chip); mode = V4L2_TUNER_SUB_MONO; if (val & TDA9873_STEREO) mode = V4L2_TUNER_SUB_STEREO; if (val & TDA9873_DUAL) mode |= V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2; v4l2_dbg(1, debug, sd, "tda9873_getrxsubchans(): raw chip read: %d, return: %d\n", val, mode); return mode; } static void tda9873_setaudmode(struct CHIPSTATE *chip, int mode) { struct v4l2_subdev *sd = &chip->sd; int sw_data = chip->shadow.bytes[TDA9873_SW+1] & ~ TDA9873_TR_MASK; /* int adj_data = chip->shadow.bytes[TDA9873_AD+1] ; */ if ((sw_data & TDA9873_INP_MASK) != TDA9873_INTERNAL) { v4l2_dbg(1, debug, sd, "tda9873_setaudmode(): external input\n"); return; } v4l2_dbg(1, debug, sd, "tda9873_setaudmode(): chip->shadow.bytes[%d] = %d\n", TDA9873_SW+1, chip->shadow.bytes[TDA9873_SW+1]); v4l2_dbg(1, debug, sd, "tda9873_setaudmode(): sw_data = %d\n", sw_data); switch (mode) { case V4L2_TUNER_MODE_MONO: sw_data |= TDA9873_TR_MONO; break; case V4L2_TUNER_MODE_STEREO: sw_data |= TDA9873_TR_STEREO; break; case V4L2_TUNER_MODE_LANG1: sw_data |= TDA9873_TR_DUALA; break; case V4L2_TUNER_MODE_LANG2: sw_data |= TDA9873_TR_DUALB; break; case V4L2_TUNER_MODE_LANG1_LANG2: sw_data |= TDA9873_TR_DUALAB; break; default: return; } chip_write(chip, TDA9873_SW, sw_data); v4l2_dbg(1, debug, sd, "tda9873_setaudmode(): req. mode %d; chip_write: %d\n", mode, sw_data); } static int tda9873_checkit(struct CHIPSTATE *chip) { int rc; if (-1 == (rc = chip_read2(chip,254))) return 0; return (rc & ~0x1f) == 0x80; } /* ---------------------------------------------------------------------- */ /* audio chip description - defines+functions for tda9874h and tda9874a */ /* Dariusz Kowalewski <darekk@automex.pl> */ /* Subaddresses for TDA9874H and TDA9874A (slave rx) */ #define TDA9874A_AGCGR 0x00 /* AGC gain */ #define TDA9874A_GCONR 0x01 /* general config */ #define TDA9874A_MSR 0x02 /* monitor select */ #define TDA9874A_C1FRA 0x03 /* carrier 1 freq. */ #define TDA9874A_C1FRB 0x04 /* carrier 1 freq. */ #define TDA9874A_C1FRC 0x05 /* carrier 1 freq. */ #define TDA9874A_C2FRA 0x06 /* carrier 2 freq. */ #define TDA9874A_C2FRB 0x07 /* carrier 2 freq. */ #define TDA9874A_C2FRC 0x08 /* carrier 2 freq. */ #define TDA9874A_DCR 0x09 /* demodulator config */ #define TDA9874A_FMER 0x0a /* FM de-emphasis */ #define TDA9874A_FMMR 0x0b /* FM dematrix */ #define TDA9874A_C1OLAR 0x0c /* ch.1 output level adj. */ #define TDA9874A_C2OLAR 0x0d /* ch.2 output level adj. */ #define TDA9874A_NCONR 0x0e /* NICAM config */ #define TDA9874A_NOLAR 0x0f /* NICAM output level adj. */ #define TDA9874A_NLELR 0x10 /* NICAM lower error limit */ #define TDA9874A_NUELR 0x11 /* NICAM upper error limit */ #define TDA9874A_AMCONR 0x12 /* audio mute control */ #define TDA9874A_SDACOSR 0x13 /* stereo DAC output select */ #define TDA9874A_AOSR 0x14 /* analog output select */ #define TDA9874A_DAICONR 0x15 /* digital audio interface config */ #define TDA9874A_I2SOSR 0x16 /* I2S-bus output select */ #define TDA9874A_I2SOLAR 0x17 /* I2S-bus output level adj. */ #define TDA9874A_MDACOSR 0x18 /* mono DAC output select (tda9874a) */ #define TDA9874A_ESP 0xFF /* easy standard progr. (tda9874a) */ /* Subaddresses for TDA9874H and TDA9874A (slave tx) */ #define TDA9874A_DSR 0x00 /* device status */ #define TDA9874A_NSR 0x01 /* NICAM status */ #define TDA9874A_NECR 0x02 /* NICAM error count */ #define TDA9874A_DR1 0x03 /* add. data LSB */ #define TDA9874A_DR2 0x04 /* add. data MSB */ #define TDA9874A_LLRA 0x05 /* monitor level read-out LSB */ #define TDA9874A_LLRB 0x06 /* monitor level read-out MSB */ #define TDA9874A_SIFLR 0x07 /* SIF level */ #define TDA9874A_TR2 252 /* test reg. 2 */ #define TDA9874A_TR1 253 /* test reg. 1 */ #define TDA9874A_DIC 254 /* device id. code */ #define TDA9874A_SIC 255 /* software id. code */ static int tda9874a_mode = 1; /* 0: A2, 1: NICAM */ static int tda9874a_GCONR = 0xc0; /* default config. input pin: SIFSEL=0 */ static int tda9874a_NCONR = 0x01; /* default NICAM config.: AMSEL=0,AMUTE=1 */ static int tda9874a_ESP = 0x07; /* default standard: NICAM D/K */ static int tda9874a_dic = -1; /* device id. code */ /* insmod options for tda9874a */ static unsigned int tda9874a_SIF = UNSET; static unsigned int tda9874a_AMSEL = UNSET; static unsigned int tda9874a_STD = UNSET; module_param(tda9874a_SIF, int, 0444); module_param(tda9874a_AMSEL, int, 0444); module_param(tda9874a_STD, int, 0444); /* * initialization table for tda9874 decoder: * - carrier 1 freq. registers (3 bytes) * - carrier 2 freq. registers (3 bytes) * - demudulator config register * - FM de-emphasis register (slow identification mode) * Note: frequency registers must be written in single i2c transfer. */ static struct tda9874a_MODES { char *name; audiocmd cmd; } tda9874a_modelist[9] = { { "A2, B/G", /* default */ { 9, { TDA9874A_C1FRA, 0x72,0x95,0x55, 0x77,0xA0,0x00, 0x00,0x00 }} }, { "A2, M (Korea)", { 9, { TDA9874A_C1FRA, 0x5D,0xC0,0x00, 0x62,0x6A,0xAA, 0x20,0x22 }} }, { "A2, D/K (1)", { 9, { TDA9874A_C1FRA, 0x87,0x6A,0xAA, 0x82,0x60,0x00, 0x00,0x00 }} }, { "A2, D/K (2)", { 9, { TDA9874A_C1FRA, 0x87,0x6A,0xAA, 0x8C,0x75,0x55, 0x00,0x00 }} }, { "A2, D/K (3)", { 9, { TDA9874A_C1FRA, 0x87,0x6A,0xAA, 0x77,0xA0,0x00, 0x00,0x00 }} }, { "NICAM, I", { 9, { TDA9874A_C1FRA, 0x7D,0x00,0x00, 0x88,0x8A,0xAA, 0x08,0x33 }} }, { "NICAM, B/G", { 9, { TDA9874A_C1FRA, 0x72,0x95,0x55, 0x79,0xEA,0xAA, 0x08,0x33 }} }, { "NICAM, D/K", { 9, { TDA9874A_C1FRA, 0x87,0x6A,0xAA, 0x79,0xEA,0xAA, 0x08,0x33 }} }, { "NICAM, L", { 9, { TDA9874A_C1FRA, 0x87,0x6A,0xAA, 0x79,0xEA,0xAA, 0x09,0x33 }} } }; static int tda9874a_setup(struct CHIPSTATE *chip) { struct v4l2_subdev *sd = &chip->sd; chip_write(chip, TDA9874A_AGCGR, 0x00); /* 0 dB */ chip_write(chip, TDA9874A_GCONR, tda9874a_GCONR); chip_write(chip, TDA9874A_MSR, (tda9874a_mode) ? 0x03:0x02); if(tda9874a_dic == 0x11) { chip_write(chip, TDA9874A_FMMR, 0x80); } else { /* dic == 0x07 */ chip_cmd(chip,"tda9874_modelist",&tda9874a_modelist[tda9874a_STD].cmd); chip_write(chip, TDA9874A_FMMR, 0x00); } chip_write(chip, TDA9874A_C1OLAR, 0x00); /* 0 dB */ chip_write(chip, TDA9874A_C2OLAR, 0x00); /* 0 dB */ chip_write(chip, TDA9874A_NCONR, tda9874a_NCONR); chip_write(chip, TDA9874A_NOLAR, 0x00); /* 0 dB */ /* Note: If signal quality is poor you may want to change NICAM */ /* error limit registers (NLELR and NUELR) to some greater values. */ /* Then the sound would remain stereo, but won't be so clear. */ chip_write(chip, TDA9874A_NLELR, 0x14); /* default */ chip_write(chip, TDA9874A_NUELR, 0x50); /* default */ if(tda9874a_dic == 0x11) { chip_write(chip, TDA9874A_AMCONR, 0xf9); chip_write(chip, TDA9874A_SDACOSR, (tda9874a_mode) ? 0x81:0x80); chip_write(chip, TDA9874A_AOSR, 0x80); chip_write(chip, TDA9874A_MDACOSR, (tda9874a_mode) ? 0x82:0x80); chip_write(chip, TDA9874A_ESP, tda9874a_ESP); } else { /* dic == 0x07 */ chip_write(chip, TDA9874A_AMCONR, 0xfb); chip_write(chip, TDA9874A_SDACOSR, (tda9874a_mode) ? 0x81:0x80); chip_write(chip, TDA9874A_AOSR, 0x00); /* or 0x10 */ } v4l2_dbg(1, debug, sd, "tda9874a_setup(): %s [0x%02X].\n", tda9874a_modelist[tda9874a_STD].name,tda9874a_STD); return 1; } static int tda9874a_getrxsubchans(struct CHIPSTATE *chip) { struct v4l2_subdev *sd = &chip->sd; int dsr,nsr,mode; int necr; /* just for debugging */ mode = V4L2_TUNER_SUB_MONO; if(-1 == (dsr = chip_read2(chip,TDA9874A_DSR))) return mode; if(-1 == (nsr = chip_read2(chip,TDA9874A_NSR))) return mode; if(-1 == (necr = chip_read2(chip,TDA9874A_NECR))) return mode; /* need to store dsr/nsr somewhere */ chip->shadow.bytes[MAXREGS-2] = dsr; chip->shadow.bytes[MAXREGS-1] = nsr; if(tda9874a_mode) { /* Note: DSR.RSSF and DSR.AMSTAT bits are also checked. * If NICAM auto-muting is enabled, DSR.AMSTAT=1 indicates * that sound has (temporarily) switched from NICAM to * mono FM (or AM) on 1st sound carrier due to high NICAM bit * error count. So in fact there is no stereo in this case :-( * But changing the mode to V4L2_TUNER_MODE_MONO would switch * external 4052 multiplexer in audio_hook(). */ if(nsr & 0x02) /* NSR.S/MB=1 */ mode = V4L2_TUNER_SUB_STEREO; if(nsr & 0x01) /* NSR.D/SB=1 */ mode |= V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2; } else { if(dsr & 0x02) /* DSR.IDSTE=1 */ mode = V4L2_TUNER_SUB_STEREO; if(dsr & 0x04) /* DSR.IDDUA=1 */ mode |= V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2; } v4l2_dbg(1, debug, sd, "tda9874a_getrxsubchans(): DSR=0x%X, NSR=0x%X, NECR=0x%X, return: %d.\n", dsr, nsr, necr, mode); return mode; } static void tda9874a_setaudmode(struct CHIPSTATE *chip, int mode) { struct v4l2_subdev *sd = &chip->sd; /* Disable/enable NICAM auto-muting (based on DSR.RSSF status bit). */ /* If auto-muting is disabled, we can hear a signal of degrading quality. */ if (tda9874a_mode) { if(chip->shadow.bytes[MAXREGS-2] & 0x20) /* DSR.RSSF=1 */ tda9874a_NCONR &= 0xfe; /* enable */ else tda9874a_NCONR |= 0x01; /* disable */ chip_write(chip, TDA9874A_NCONR, tda9874a_NCONR); } /* Note: TDA9874A supports automatic FM dematrixing (FMMR register) * and has auto-select function for audio output (AOSR register). * Old TDA9874H doesn't support these features. * TDA9874A also has additional mono output pin (OUTM), which * on same (all?) tv-cards is not used, anyway (as well as MONOIN). */ if(tda9874a_dic == 0x11) { int aosr = 0x80; int mdacosr = (tda9874a_mode) ? 0x82:0x80; switch(mode) { case V4L2_TUNER_MODE_MONO: case V4L2_TUNER_MODE_STEREO: break; case V4L2_TUNER_MODE_LANG1: aosr = 0x80; /* auto-select, dual A/A */ mdacosr = (tda9874a_mode) ? 0x82:0x80; break; case V4L2_TUNER_MODE_LANG2: aosr = 0xa0; /* auto-select, dual B/B */ mdacosr = (tda9874a_mode) ? 0x83:0x81; break; case V4L2_TUNER_MODE_LANG1_LANG2: aosr = 0x00; /* always route L to L and R to R */ mdacosr = (tda9874a_mode) ? 0x82:0x80; break; default: return; } chip_write(chip, TDA9874A_AOSR, aosr); chip_write(chip, TDA9874A_MDACOSR, mdacosr); v4l2_dbg(1, debug, sd, "tda9874a_setaudmode(): req. mode %d; AOSR=0x%X, MDACOSR=0x%X.\n", mode, aosr, mdacosr); } else { /* dic == 0x07 */ int fmmr,aosr; switch(mode) { case V4L2_TUNER_MODE_MONO: fmmr = 0x00; /* mono */ aosr = 0x10; /* A/A */ break; case V4L2_TUNER_MODE_STEREO: if(tda9874a_mode) { fmmr = 0x00; aosr = 0x00; /* handled by NICAM auto-mute */ } else { fmmr = (tda9874a_ESP == 1) ? 0x05 : 0x04; /* stereo */ aosr = 0x00; } break; case V4L2_TUNER_MODE_LANG1: fmmr = 0x02; /* dual */ aosr = 0x10; /* dual A/A */ break; case V4L2_TUNER_MODE_LANG2: fmmr = 0x02; /* dual */ aosr = 0x20; /* dual B/B */ break; case V4L2_TUNER_MODE_LANG1_LANG2: fmmr = 0x02; /* dual */ aosr = 0x00; /* dual A/B */ break; default: return; } chip_write(chip, TDA9874A_FMMR, fmmr); chip_write(chip, TDA9874A_AOSR, aosr); v4l2_dbg(1, debug, sd, "tda9874a_setaudmode(): req. mode %d; FMMR=0x%X, AOSR=0x%X.\n", mode, fmmr, aosr); } } static int tda9874a_checkit(struct CHIPSTATE *chip) { struct v4l2_subdev *sd = &chip->sd; int dic,sic; /* device id. and software id. codes */ if(-1 == (dic = chip_read2(chip,TDA9874A_DIC))) return 0; if(-1 == (sic = chip_read2(chip,TDA9874A_SIC))) return 0; v4l2_dbg(1, debug, sd, "tda9874a_checkit(): DIC=0x%X, SIC=0x%X.\n", dic, sic); if((dic == 0x11)||(dic == 0x07)) { v4l2_info(sd, "found tda9874%s.\n", (dic == 0x11) ? "a" : "h"); tda9874a_dic = dic; /* remember device id. */ return 1; } return 0; /* not found */ } static int tda9874a_initialize(struct CHIPSTATE *chip) { if (tda9874a_SIF > 2) tda9874a_SIF = 1; if (tda9874a_STD >= ARRAY_SIZE(tda9874a_modelist)) tda9874a_STD = 0; if(tda9874a_AMSEL > 1) tda9874a_AMSEL = 0; if(tda9874a_SIF == 1) tda9874a_GCONR = 0xc0; /* sound IF input 1 */ else tda9874a_GCONR = 0xc1; /* sound IF input 2 */ tda9874a_ESP = tda9874a_STD; tda9874a_mode = (tda9874a_STD < 5) ? 0 : 1; if(tda9874a_AMSEL == 0) tda9874a_NCONR = 0x01; /* auto-mute: analog mono input */ else tda9874a_NCONR = 0x05; /* auto-mute: 1st carrier FM or AM */ tda9874a_setup(chip); return 0; } /* ---------------------------------------------------------------------- */ /* audio chip description - defines+functions for tda9875 */ /* The TDA9875 is made by Philips Semiconductor * http://www.semiconductors.philips.com * TDA9875: I2C-bus controlled DSP audio processor, FM demodulator * */ /* subaddresses for TDA9875 */ #define TDA9875_MUT 0x12 /*General mute (value --> 0b11001100*/ #define TDA9875_CFG 0x01 /* Config register (value --> 0b00000000 */ #define TDA9875_DACOS 0x13 /*DAC i/o select (ADC) 0b0000100*/ #define TDA9875_LOSR 0x16 /*Line output select regirter 0b0100 0001*/ #define TDA9875_CH1V 0x0c /*Channel 1 volume (mute)*/ #define TDA9875_CH2V 0x0d /*Channel 2 volume (mute)*/ #define TDA9875_SC1 0x14 /*SCART 1 in (mono)*/ #define TDA9875_SC2 0x15 /*SCART 2 in (mono)*/ #define TDA9875_ADCIS 0x17 /*ADC input select (mono) 0b0110 000*/ #define TDA9875_AER 0x19 /*Audio effect (AVL+Pseudo) 0b0000 0110*/ #define TDA9875_MCS 0x18 /*Main channel select (DAC) 0b0000100*/ #define TDA9875_MVL 0x1a /* Main volume gauche */ #define TDA9875_MVR 0x1b /* Main volume droite */ #define TDA9875_MBA 0x1d /* Main Basse */ #define TDA9875_MTR 0x1e /* Main treble */ #define TDA9875_ACS 0x1f /* Auxiliary channel select (FM) 0b0000000*/ #define TDA9875_AVL 0x20 /* Auxiliary volume gauche */ #define TDA9875_AVR 0x21 /* Auxiliary volume droite */ #define TDA9875_ABA 0x22 /* Auxiliary Basse */ #define TDA9875_ATR 0x23 /* Auxiliary treble */ #define TDA9875_MSR 0x02 /* Monitor select register */ #define TDA9875_C1MSB 0x03 /* Carrier 1 (FM) frequency register MSB */ #define TDA9875_C1MIB 0x04 /* Carrier 1 (FM) frequency register (16-8]b */ #define TDA9875_C1LSB 0x05 /* Carrier 1 (FM) frequency register LSB */ #define TDA9875_C2MSB 0x06 /* Carrier 2 (nicam) frequency register MSB */ #define TDA9875_C2MIB 0x07 /* Carrier 2 (nicam) frequency register (16-8]b */ #define TDA9875_C2LSB 0x08 /* Carrier 2 (nicam) frequency register LSB */ #define TDA9875_DCR 0x09 /* Demodulateur configuration regirter*/ #define TDA9875_DEEM 0x0a /* FM de-emphasis regirter*/ #define TDA9875_FMAT 0x0b /* FM Matrix regirter*/ /* values */ #define TDA9875_MUTE_ON 0xff /* general mute */ #define TDA9875_MUTE_OFF 0xcc /* general no mute */ static int tda9875_initialize(struct CHIPSTATE *chip) { chip_write(chip, TDA9875_CFG, 0xd0); /*reg de config 0 (reset)*/ chip_write(chip, TDA9875_MSR, 0x03); /* Monitor 0b00000XXX*/ chip_write(chip, TDA9875_C1MSB, 0x00); /*Car1(FM) MSB XMHz*/ chip_write(chip, TDA9875_C1MIB, 0x00); /*Car1(FM) MIB XMHz*/ chip_write(chip, TDA9875_C1LSB, 0x00); /*Car1(FM) LSB XMHz*/ chip_write(chip, TDA9875_C2MSB, 0x00); /*Car2(NICAM) MSB XMHz*/ chip_write(chip, TDA9875_C2MIB, 0x00); /*Car2(NICAM) MIB XMHz*/ chip_write(chip, TDA9875_C2LSB, 0x00); /*Car2(NICAM) LSB XMHz*/ chip_write(chip, TDA9875_DCR, 0x00); /*Demod config 0x00*/ chip_write(chip, TDA9875_DEEM, 0x44); /*DE-Emph 0b0100 0100*/ chip_write(chip, TDA9875_FMAT, 0x00); /*FM Matrix reg 0x00*/ chip_write(chip, TDA9875_SC1, 0x00); /* SCART 1 (SC1)*/ chip_write(chip, TDA9875_SC2, 0x01); /* SCART 2 (sc2)*/ chip_write(chip, TDA9875_CH1V, 0x10); /* Channel volume 1 mute*/ chip_write(chip, TDA9875_CH2V, 0x10); /* Channel volume 2 mute */ chip_write(chip, TDA9875_DACOS, 0x02); /* sig DAC i/o(in:nicam)*/ chip_write(chip, TDA9875_ADCIS, 0x6f); /* sig ADC input(in:mono)*/ chip_write(chip, TDA9875_LOSR, 0x00); /* line out (in:mono)*/ chip_write(chip, TDA9875_AER, 0x00); /*06 Effect (AVL+PSEUDO) */ chip_write(chip, TDA9875_MCS, 0x44); /* Main ch select (DAC) */ chip_write(chip, TDA9875_MVL, 0x03); /* Vol Main left 10dB */ chip_write(chip, TDA9875_MVR, 0x03); /* Vol Main right 10dB*/ chip_write(chip, TDA9875_MBA, 0x00); /* Main Bass Main 0dB*/ chip_write(chip, TDA9875_MTR, 0x00); /* Main Treble Main 0dB*/ chip_write(chip, TDA9875_ACS, 0x44); /* Aux chan select (dac)*/ chip_write(chip, TDA9875_AVL, 0x00); /* Vol Aux left 0dB*/ chip_write(chip, TDA9875_AVR, 0x00); /* Vol Aux right 0dB*/ chip_write(chip, TDA9875_ABA, 0x00); /* Aux Bass Main 0dB*/ chip_write(chip, TDA9875_ATR, 0x00); /* Aux Aigus Main 0dB*/ chip_write(chip, TDA9875_MUT, 0xcc); /* General mute */ return 0; } static int tda9875_volume(int val) { return (unsigned char)(val / 602 - 84); } static int tda9875_bass(int val) { return (unsigned char)(max(-12, val / 2115 - 15)); } static int tda9875_treble(int val) { return (unsigned char)(val / 2622 - 12); } /* ----------------------------------------------------------------------- */ /* *********************** * * i2c interface functions * * *********************** */ static int tda9875_checkit(struct CHIPSTATE *chip) { struct v4l2_subdev *sd = &chip->sd; int dic, rev; dic = chip_read2(chip, 254); rev = chip_read2(chip, 255); if (dic == 0 || dic == 2) { /* tda9875 and tda9875A */ v4l2_info(sd, "found tda9875%s rev. %d.\n", dic == 0 ? "" : "A", rev); return 1; } return 0; } /* ---------------------------------------------------------------------- */ /* audio chip descriptions - defines+functions for tea6420 */ #define TEA6300_VL 0x00 /* volume left */ #define TEA6300_VR 0x01 /* volume right */ #define TEA6300_BA 0x02 /* bass */ #define TEA6300_TR 0x03 /* treble */ #define TEA6300_FA 0x04 /* fader control */ #define TEA6300_S 0x05 /* switch register */ /* values for those registers: */ #define TEA6300_S_SA 0x01 /* stereo A input */ #define TEA6300_S_SB 0x02 /* stereo B */ #define TEA6300_S_SC 0x04 /* stereo C */ #define TEA6300_S_GMU 0x80 /* general mute */ #define TEA6320_V 0x00 /* volume (0-5)/loudness off (6)/zero crossing mute(7) */ #define TEA6320_FFR 0x01 /* fader front right (0-5) */ #define TEA6320_FFL 0x02 /* fader front left (0-5) */ #define TEA6320_FRR 0x03 /* fader rear right (0-5) */ #define TEA6320_FRL 0x04 /* fader rear left (0-5) */ #define TEA6320_BA 0x05 /* bass (0-4) */ #define TEA6320_TR 0x06 /* treble (0-4) */ #define TEA6320_S 0x07 /* switch register */ /* values for those registers: */ #define TEA6320_S_SA 0x07 /* stereo A input */ #define TEA6320_S_SB 0x06 /* stereo B */ #define TEA6320_S_SC 0x05 /* stereo C */ #define TEA6320_S_SD 0x04 /* stereo D */ #define TEA6320_S_GMU 0x80 /* general mute */ #define TEA6420_S_SA 0x00 /* stereo A input */ #define TEA6420_S_SB 0x01 /* stereo B */ #define TEA6420_S_SC 0x02 /* stereo C */ #define TEA6420_S_SD 0x03 /* stereo D */ #define TEA6420_S_SE 0x04 /* stereo E */ #define TEA6420_S_GMU 0x05 /* general mute */ static int tea6300_shift10(int val) { return val >> 10; } static int tea6300_shift12(int val) { return val >> 12; } /* Assumes 16bit input (values 0x3f to 0x0c are unique, values less than */ /* 0x0c mirror those immediately higher) */ static int tea6320_volume(int val) { return (val / (65535/(63-12)) + 12) & 0x3f; } static int tea6320_shift11(int val) { return val >> 11; } static int tea6320_initialize(struct CHIPSTATE * chip) { chip_write(chip, TEA6320_FFR, 0x3f); chip_write(chip, TEA6320_FFL, 0x3f); chip_write(chip, TEA6320_FRR, 0x3f); chip_write(chip, TEA6320_FRL, 0x3f); return 0; } /* ---------------------------------------------------------------------- */ /* audio chip descriptions - defines+functions for tda8425 */ #define TDA8425_VL 0x00 /* volume left */ #define TDA8425_VR 0x01 /* volume right */ #define TDA8425_BA 0x02 /* bass */ #define TDA8425_TR 0x03 /* treble */ #define TDA8425_S1 0x08 /* switch functions */ /* values for those registers: */ #define TDA8425_S1_OFF 0xEE /* audio off (mute on) */ #define TDA8425_S1_CH1 0xCE /* audio channel 1 (mute off) - "linear stereo" mode */ #define TDA8425_S1_CH2 0xCF /* audio channel 2 (mute off) - "linear stereo" mode */ #define TDA8425_S1_MU 0x20 /* mute bit */ #define TDA8425_S1_STEREO 0x18 /* stereo bits */ #define TDA8425_S1_STEREO_SPATIAL 0x18 /* spatial stereo */ #define TDA8425_S1_STEREO_LINEAR 0x08 /* linear stereo */ #define TDA8425_S1_STEREO_PSEUDO 0x10 /* pseudo stereo */ #define TDA8425_S1_STEREO_MONO 0x00 /* forced mono */ #define TDA8425_S1_ML 0x06 /* language selector */ #define TDA8425_S1_ML_SOUND_A 0x02 /* sound a */ #define TDA8425_S1_ML_SOUND_B 0x04 /* sound b */ #define TDA8425_S1_ML_STEREO 0x06 /* stereo */ #define TDA8425_S1_IS 0x01 /* channel selector */ static int tda8425_shift10(int val) { return (val >> 10) | 0xc0; } static int tda8425_shift12(int val) { return (val >> 12) | 0xf0; } static void tda8425_setaudmode(struct CHIPSTATE *chip, int mode) { int s1 = chip->shadow.bytes[TDA8425_S1+1] & 0xe1; switch (mode) { case V4L2_TUNER_MODE_LANG1: s1 |= TDA8425_S1_ML_SOUND_A; s1 |= TDA8425_S1_STEREO_PSEUDO; break; case V4L2_TUNER_MODE_LANG2: s1 |= TDA8425_S1_ML_SOUND_B; s1 |= TDA8425_S1_STEREO_PSEUDO; break; case V4L2_TUNER_MODE_LANG1_LANG2: s1 |= TDA8425_S1_ML_STEREO; s1 |= TDA8425_S1_STEREO_LINEAR; break; case V4L2_TUNER_MODE_MONO: s1 |= TDA8425_S1_ML_STEREO; s1 |= TDA8425_S1_STEREO_MONO; break; case V4L2_TUNER_MODE_STEREO: s1 |= TDA8425_S1_ML_STEREO; s1 |= TDA8425_S1_STEREO_SPATIAL; break; default: return; } chip_write(chip,TDA8425_S1,s1); } /* ---------------------------------------------------------------------- */ /* audio chip descriptions - defines+functions for pic16c54 (PV951) */ /* the registers of 16C54, I2C sub address. */ #define PIC16C54_REG_KEY_CODE 0x01 /* Not use. */ #define PIC16C54_REG_MISC 0x02 /* bit definition of the RESET register, I2C data. */ #define PIC16C54_MISC_RESET_REMOTE_CTL 0x01 /* bit 0, Reset to receive the key */ /* code of remote controller */ #define PIC16C54_MISC_MTS_MAIN 0x02 /* bit 1 */ #define PIC16C54_MISC_MTS_SAP 0x04 /* bit 2 */ #define PIC16C54_MISC_MTS_BOTH 0x08 /* bit 3 */ #define PIC16C54_MISC_SND_MUTE 0x10 /* bit 4, Mute Audio(Line-in and Tuner) */ #define PIC16C54_MISC_SND_NOTMUTE 0x20 /* bit 5 */ #define PIC16C54_MISC_SWITCH_TUNER 0x40 /* bit 6 , Switch to Line-in */ #define PIC16C54_MISC_SWITCH_LINE 0x80 /* bit 7 , Switch to Tuner */ /* ---------------------------------------------------------------------- */ /* audio chip descriptions - defines+functions for TA8874Z */ /* write 1st byte */ #define TA8874Z_LED_STE 0x80 #define TA8874Z_LED_BIL 0x40 #define TA8874Z_LED_EXT 0x20 #define TA8874Z_MONO_SET 0x10 #define TA8874Z_MUTE 0x08 #define TA8874Z_F_MONO 0x04 #define TA8874Z_MODE_SUB 0x02 #define TA8874Z_MODE_MAIN 0x01 /* write 2nd byte */ /*#define TA8874Z_TI 0x80 */ /* test mode */ #define TA8874Z_SEPARATION 0x3f #define TA8874Z_SEPARATION_DEFAULT 0x10 /* read */ #define TA8874Z_B1 0x80 #define TA8874Z_B0 0x40 #define TA8874Z_CHAG_FLAG 0x20 /* * B1 B0 * mono L H * stereo L L * BIL H L */ static int ta8874z_getrxsubchans(struct CHIPSTATE *chip) { int val, mode; val = chip_read(chip); mode = V4L2_TUNER_SUB_MONO; if (val & TA8874Z_B1){ mode |= V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2; }else if (!(val & TA8874Z_B0)){ mode = V4L2_TUNER_SUB_STEREO; } /* v4l2_dbg(1, debug, &chip->sd, "ta8874z_getrxsubchans(): raw chip read: 0x%02x, return: 0x%02x\n", val, mode); */ return mode; } static audiocmd ta8874z_stereo = { 2, {0, TA8874Z_SEPARATION_DEFAULT}}; static audiocmd ta8874z_mono = {2, { TA8874Z_MONO_SET, TA8874Z_SEPARATION_DEFAULT}}; static audiocmd ta8874z_main = {2, { 0, TA8874Z_SEPARATION_DEFAULT}}; static audiocmd ta8874z_sub = {2, { TA8874Z_MODE_SUB, TA8874Z_SEPARATION_DEFAULT}}; static audiocmd ta8874z_both = {2, { TA8874Z_MODE_MAIN | TA8874Z_MODE_SUB, TA8874Z_SEPARATION_DEFAULT}}; static void ta8874z_setaudmode(struct CHIPSTATE *chip, int mode) { struct v4l2_subdev *sd = &chip->sd; int update = 1; audiocmd *t = NULL; v4l2_dbg(1, debug, sd, "ta8874z_setaudmode(): mode: 0x%02x\n", mode); switch(mode){ case V4L2_TUNER_MODE_MONO: t = &ta8874z_mono; break; case V4L2_TUNER_MODE_STEREO: t = &ta8874z_stereo; break; case V4L2_TUNER_MODE_LANG1: t = &ta8874z_main; break; case V4L2_TUNER_MODE_LANG2: t = &ta8874z_sub; break; case V4L2_TUNER_MODE_LANG1_LANG2: t = &ta8874z_both; break; default: update = 0; } if(update) chip_cmd(chip, "TA8874Z", t); } static int ta8874z_checkit(struct CHIPSTATE *chip) { int rc; rc = chip_read(chip); return ((rc & 0x1f) == 0x1f) ? 1 : 0; } /* ---------------------------------------------------------------------- */ /* audio chip descriptions - struct CHIPDESC */ /* insmod options to enable/disable individual audio chips */ static int tda8425 = 1; static int tda9840 = 1; static int tda9850 = 1; static int tda9855 = 1; static int tda9873 = 1; static int tda9874a = 1; static int tda9875 = 1; static int tea6300; /* default 0 - address clash with msp34xx */ static int tea6320; /* default 0 - address clash with msp34xx */ static int tea6420 = 1; static int pic16c54 = 1; static int ta8874z; /* default 0 - address clash with tda9840 */ module_param(tda8425, int, 0444); module_param(tda9840, int, 0444); module_param(tda9850, int, 0444); module_param(tda9855, int, 0444); module_param(tda9873, int, 0444); module_param(tda9874a, int, 0444); module_param(tda9875, int, 0444); module_param(tea6300, int, 0444); module_param(tea6320, int, 0444); module_param(tea6420, int, 0444); module_param(pic16c54, int, 0444); module_param(ta8874z, int, 0444); static struct CHIPDESC chiplist[] = { { .name = "tda9840", .insmodopt = &tda9840, .addr_lo = I2C_ADDR_TDA9840 >> 1, .addr_hi = I2C_ADDR_TDA9840 >> 1, .registers = 5, .flags = CHIP_NEED_CHECKMODE, /* callbacks */ .checkit = tda9840_checkit, .getrxsubchans = tda9840_getrxsubchans, .setaudmode = tda9840_setaudmode, .init = { 2, { TDA9840_TEST, TDA9840_TEST_INT1SN /* ,TDA9840_SW, TDA9840_MONO */} } }, { .name = "tda9873h", .insmodopt = &tda9873, .addr_lo = I2C_ADDR_TDA985x_L >> 1, .addr_hi = I2C_ADDR_TDA985x_H >> 1, .registers = 3, .flags = CHIP_HAS_INPUTSEL | CHIP_NEED_CHECKMODE, /* callbacks */ .checkit = tda9873_checkit, .getrxsubchans = tda9873_getrxsubchans, .setaudmode = tda9873_setaudmode, .init = { 4, { TDA9873_SW, 0xa4, 0x06, 0x03 } }, .inputreg = TDA9873_SW, .inputmute = TDA9873_MUTE | TDA9873_AUTOMUTE, .inputmap = {0xa0, 0xa2, 0xa0, 0xa0}, .inputmask = TDA9873_INP_MASK|TDA9873_MUTE|TDA9873_AUTOMUTE, }, { .name = "tda9874h/a", .insmodopt = &tda9874a, .addr_lo = I2C_ADDR_TDA9874 >> 1, .addr_hi = I2C_ADDR_TDA9874 >> 1, .flags = CHIP_NEED_CHECKMODE, /* callbacks */ .initialize = tda9874a_initialize, .checkit = tda9874a_checkit, .getrxsubchans = tda9874a_getrxsubchans, .setaudmode = tda9874a_setaudmode, }, { .name = "tda9875", .insmodopt = &tda9875, .addr_lo = I2C_ADDR_TDA9875 >> 1, .addr_hi = I2C_ADDR_TDA9875 >> 1, .flags = CHIP_HAS_VOLUME | CHIP_HAS_BASSTREBLE, /* callbacks */ .initialize = tda9875_initialize, .checkit = tda9875_checkit, .volfunc = tda9875_volume, .bassfunc = tda9875_bass, .treblefunc = tda9875_treble, .leftreg = TDA9875_MVL, .rightreg = TDA9875_MVR, .bassreg = TDA9875_MBA, .treblereg = TDA9875_MTR, .leftinit = 58880, .rightinit = 58880, }, { .name = "tda9850", .insmodopt = &tda9850, .addr_lo = I2C_ADDR_TDA985x_L >> 1, .addr_hi = I2C_ADDR_TDA985x_H >> 1, .registers = 11, .getrxsubchans = tda985x_getrxsubchans, .setaudmode = tda985x_setaudmode, .init = { 8, { TDA9850_C4, 0x08, 0x08, TDA985x_STEREO, 0x07, 0x10, 0x10, 0x03 } } }, { .name = "tda9855", .insmodopt = &tda9855, .addr_lo = I2C_ADDR_TDA985x_L >> 1, .addr_hi = I2C_ADDR_TDA985x_H >> 1, .registers = 11, .flags = CHIP_HAS_VOLUME | CHIP_HAS_BASSTREBLE, .leftreg = TDA9855_VL, .rightreg = TDA9855_VR, .bassreg = TDA9855_BA, .treblereg = TDA9855_TR, /* callbacks */ .volfunc = tda9855_volume, .bassfunc = tda9855_bass, .treblefunc = tda9855_treble, .getrxsubchans = tda985x_getrxsubchans, .setaudmode = tda985x_setaudmode, .init = { 12, { 0, 0x6f, 0x6f, 0x0e, 0x07<<1, 0x8<<2, TDA9855_MUTE | TDA9855_AVL | TDA9855_LOUD | TDA9855_INT, TDA985x_STEREO | TDA9855_LINEAR | TDA9855_TZCM | TDA9855_VZCM, 0x07, 0x10, 0x10, 0x03 }} }, { .name = "tea6300", .insmodopt = &tea6300, .addr_lo = I2C_ADDR_TEA6300 >> 1, .addr_hi = I2C_ADDR_TEA6300 >> 1, .registers = 6, .flags = CHIP_HAS_VOLUME | CHIP_HAS_BASSTREBLE | CHIP_HAS_INPUTSEL, .leftreg = TEA6300_VR, .rightreg = TEA6300_VL, .bassreg = TEA6300_BA, .treblereg = TEA6300_TR, /* callbacks */ .volfunc = tea6300_shift10, .bassfunc = tea6300_shift12, .treblefunc = tea6300_shift12, .inputreg = TEA6300_S, .inputmap = { TEA6300_S_SA, TEA6300_S_SB, TEA6300_S_SC }, .inputmute = TEA6300_S_GMU, }, { .name = "tea6320", .insmodopt = &tea6320, .addr_lo = I2C_ADDR_TEA6300 >> 1, .addr_hi = I2C_ADDR_TEA6300 >> 1, .registers = 8, .flags = CHIP_HAS_VOLUME | CHIP_HAS_BASSTREBLE | CHIP_HAS_INPUTSEL, .leftreg = TEA6320_V, .rightreg = TEA6320_V, .bassreg = TEA6320_BA, .treblereg = TEA6320_TR, /* callbacks */ .initialize = tea6320_initialize, .volfunc = tea6320_volume, .bassfunc = tea6320_shift11, .treblefunc = tea6320_shift11, .inputreg = TEA6320_S, .inputmap = { TEA6320_S_SA, TEA6420_S_SB, TEA6300_S_SC, TEA6320_S_SD }, .inputmute = TEA6300_S_GMU, }, { .name = "tea6420", .insmodopt = &tea6420, .addr_lo = I2C_ADDR_TEA6420 >> 1, .addr_hi = I2C_ADDR_TEA6420 >> 1, .registers = 1, .flags = CHIP_HAS_INPUTSEL, .inputreg = -1, .inputmap = { TEA6420_S_SA, TEA6420_S_SB, TEA6420_S_SC }, .inputmute = TEA6300_S_GMU, }, { .name = "tda8425", .insmodopt = &tda8425, .addr_lo = I2C_ADDR_TDA8425 >> 1, .addr_hi = I2C_ADDR_TDA8425 >> 1, .registers = 9, .flags = CHIP_HAS_VOLUME | CHIP_HAS_BASSTREBLE | CHIP_HAS_INPUTSEL, .leftreg = TDA8425_VL, .rightreg = TDA8425_VR, .bassreg = TDA8425_BA, .treblereg = TDA8425_TR, /* callbacks */ .volfunc = tda8425_shift10, .bassfunc = tda8425_shift12, .treblefunc = tda8425_shift12, .setaudmode = tda8425_setaudmode, .inputreg = TDA8425_S1, .inputmap = { TDA8425_S1_CH1, TDA8425_S1_CH1, TDA8425_S1_CH1 }, .inputmute = TDA8425_S1_OFF, }, { .name = "pic16c54 (PV951)", .insmodopt = &pic16c54, .addr_lo = I2C_ADDR_PIC16C54 >> 1, .addr_hi = I2C_ADDR_PIC16C54>> 1, .registers = 2, .flags = CHIP_HAS_INPUTSEL, .inputreg = PIC16C54_REG_MISC, .inputmap = {PIC16C54_MISC_SND_NOTMUTE|PIC16C54_MISC_SWITCH_TUNER, PIC16C54_MISC_SND_NOTMUTE|PIC16C54_MISC_SWITCH_LINE, PIC16C54_MISC_SND_NOTMUTE|PIC16C54_MISC_SWITCH_LINE, PIC16C54_MISC_SND_MUTE}, .inputmute = PIC16C54_MISC_SND_MUTE, }, { .name = "ta8874z", .checkit = ta8874z_checkit, .insmodopt = &ta8874z, .addr_lo = I2C_ADDR_TDA9840 >> 1, .addr_hi = I2C_ADDR_TDA9840 >> 1, .registers = 2, /* callbacks */ .getrxsubchans = ta8874z_getrxsubchans, .setaudmode = ta8874z_setaudmode, .init = {2, { TA8874Z_MONO_SET, TA8874Z_SEPARATION_DEFAULT}}, }, { .name = NULL } /* EOF */ }; /* ---------------------------------------------------------------------- */ static int tvaudio_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) { struct CHIPSTATE *chip = to_state(sd); struct CHIPDESC *desc = chip->desc; switch (ctrl->id) { case V4L2_CID_AUDIO_MUTE: if (!(desc->flags & CHIP_HAS_INPUTSEL)) break; ctrl->value=chip->muted; return 0; case V4L2_CID_AUDIO_VOLUME: if (!(desc->flags & CHIP_HAS_VOLUME)) break; ctrl->value = max(chip->left,chip->right); return 0; case V4L2_CID_AUDIO_BALANCE: { int volume; if (!(desc->flags & CHIP_HAS_VOLUME)) break; volume = max(chip->left,chip->right); if (volume) ctrl->value=(32768*min(chip->left,chip->right))/volume; else ctrl->value=32768; return 0; } case V4L2_CID_AUDIO_BASS: if (!(desc->flags & CHIP_HAS_BASSTREBLE)) break; ctrl->value = chip->bass; return 0; case V4L2_CID_AUDIO_TREBLE: if (!(desc->flags & CHIP_HAS_BASSTREBLE)) break; ctrl->value = chip->treble; return 0; } return -EINVAL; } static int tvaudio_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) { struct CHIPSTATE *chip = to_state(sd); struct CHIPDESC *desc = chip->desc; switch (ctrl->id) { case V4L2_CID_AUDIO_MUTE: if (!(desc->flags & CHIP_HAS_INPUTSEL)) break; if (ctrl->value < 0 || ctrl->value >= 2) return -ERANGE; chip->muted = ctrl->value; if (chip->muted) chip_write_masked(chip,desc->inputreg,desc->inputmute,desc->inputmask); else chip_write_masked(chip,desc->inputreg, desc->inputmap[chip->input],desc->inputmask); return 0; case V4L2_CID_AUDIO_VOLUME: { int volume,balance; if (!(desc->flags & CHIP_HAS_VOLUME)) break; volume = max(chip->left,chip->right); if (volume) balance=(32768*min(chip->left,chip->right))/volume; else balance=32768; volume=ctrl->value; chip->left = (min(65536 - balance,32768) * volume) / 32768; chip->right = (min(balance,volume *(__u16)32768)) / 32768; chip_write(chip,desc->leftreg,desc->volfunc(chip->left)); chip_write(chip,desc->rightreg,desc->volfunc(chip->right)); return 0; } case V4L2_CID_AUDIO_BALANCE: { int volume, balance; if (!(desc->flags & CHIP_HAS_VOLUME)) break; volume = max(chip->left, chip->right); balance = ctrl->value; chip->left = (min(65536 - balance, 32768) * volume) / 32768; chip->right = (min(balance, volume * (__u16)32768)) / 32768; chip_write(chip, desc->leftreg, desc->volfunc(chip->left)); chip_write(chip, desc->rightreg, desc->volfunc(chip->right)); return 0; } case V4L2_CID_AUDIO_BASS: if (!(desc->flags & CHIP_HAS_BASSTREBLE)) break; chip->bass = ctrl->value; chip_write(chip,desc->bassreg,desc->bassfunc(chip->bass)); return 0; case V4L2_CID_AUDIO_TREBLE: if (!(desc->flags & CHIP_HAS_BASSTREBLE)) break; chip->treble = ctrl->value; chip_write(chip,desc->treblereg,desc->treblefunc(chip->treble)); return 0; } return -EINVAL; } /* ---------------------------------------------------------------------- */ /* video4linux interface */ static int tvaudio_s_radio(struct v4l2_subdev *sd) { struct CHIPSTATE *chip = to_state(sd); chip->radio = 1; /* del_timer(&chip->wt); */ return 0; } static int tvaudio_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc) { struct CHIPSTATE *chip = to_state(sd); struct CHIPDESC *desc = chip->desc; switch (qc->id) { case V4L2_CID_AUDIO_MUTE: if (desc->flags & CHIP_HAS_INPUTSEL) return v4l2_ctrl_query_fill(qc, 0, 1, 1, 0); break; case V4L2_CID_AUDIO_VOLUME: if (desc->flags & CHIP_HAS_VOLUME) return v4l2_ctrl_query_fill(qc, 0, 65535, 65535 / 100, 58880); break; case V4L2_CID_AUDIO_BALANCE: if (desc->flags & CHIP_HAS_VOLUME) return v4l2_ctrl_query_fill(qc, 0, 65535, 65535 / 100, 32768); break; case V4L2_CID_AUDIO_BASS: case V4L2_CID_AUDIO_TREBLE: if (desc->flags & CHIP_HAS_BASSTREBLE) return v4l2_ctrl_query_fill(qc, 0, 65535, 65535 / 100, 32768); break; default: break; } return -EINVAL; } static int tvaudio_s_routing(struct v4l2_subdev *sd, u32 input, u32 output, u32 config) { struct CHIPSTATE *chip = to_state(sd); struct CHIPDESC *desc = chip->desc; if (!(desc->flags & CHIP_HAS_INPUTSEL)) return 0; if (input >= 4) return -EINVAL; /* There are four inputs: tuner, radio, extern and intern. */ chip->input = input; if (chip->muted) return 0; chip_write_masked(chip, desc->inputreg, desc->inputmap[chip->input], desc->inputmask); return 0; } static int tvaudio_s_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt) { struct CHIPSTATE *chip = to_state(sd); struct CHIPDESC *desc = chip->desc; if (!desc->setaudmode) return 0; if (chip->radio) return 0; switch (vt->audmode) { case V4L2_TUNER_MODE_MONO: case V4L2_TUNER_MODE_STEREO: case V4L2_TUNER_MODE_LANG1: case V4L2_TUNER_MODE_LANG2: case V4L2_TUNER_MODE_LANG1_LANG2: break; default: return -EINVAL; } chip->audmode = vt->audmode; if (chip->thread) wake_up_process(chip->thread); else desc->setaudmode(chip, vt->audmode); return 0; } static int tvaudio_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt) { struct CHIPSTATE *chip = to_state(sd); struct CHIPDESC *desc = chip->desc; if (!desc->getrxsubchans) return 0; if (chip->radio) return 0; vt->audmode = chip->audmode; vt->rxsubchans = desc->getrxsubchans(chip); vt->capability = V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2; return 0; } static int tvaudio_s_std(struct v4l2_subdev *sd, v4l2_std_id std) { struct CHIPSTATE *chip = to_state(sd); chip->radio = 0; return 0; } static int tvaudio_s_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *freq) { struct CHIPSTATE *chip = to_state(sd); struct CHIPDESC *desc = chip->desc; /* For chips that provide getrxsubchans and setaudmode, and doesn't automatically follows the stereo carrier, a kthread is created to set the audio standard. In this case, when then the video channel is changed, tvaudio starts on MONO mode. After waiting for 2 seconds, the kernel thread is called, to follow whatever audio standard is pointed by the audio carrier. */ if (chip->thread) { desc->setaudmode(chip, V4L2_TUNER_MODE_MONO); chip->prevmode = -1; /* reset previous mode */ mod_timer(&chip->wt, jiffies+msecs_to_jiffies(2000)); } return 0; } static int tvaudio_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip) { struct i2c_client *client = v4l2_get_subdevdata(sd); return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_TVAUDIO, 0); } /* ----------------------------------------------------------------------- */ static const struct v4l2_subdev_core_ops tvaudio_core_ops = { .g_chip_ident = tvaudio_g_chip_ident, .queryctrl = tvaudio_queryctrl, .g_ctrl = tvaudio_g_ctrl, .s_ctrl = tvaudio_s_ctrl, .s_std = tvaudio_s_std, }; static const struct v4l2_subdev_tuner_ops tvaudio_tuner_ops = { .s_radio = tvaudio_s_radio, .s_frequency = tvaudio_s_frequency, .s_tuner = tvaudio_s_tuner, .g_tuner = tvaudio_g_tuner, }; static const struct v4l2_subdev_audio_ops tvaudio_audio_ops = { .s_routing = tvaudio_s_routing, }; static const struct v4l2_subdev_ops tvaudio_ops = { .core = &tvaudio_core_ops, .tuner = &tvaudio_tuner_ops, .audio = &tvaudio_audio_ops, }; /* ----------------------------------------------------------------------- */ /* i2c registration */ static int tvaudio_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct CHIPSTATE *chip; struct CHIPDESC *desc; struct v4l2_subdev *sd; if (debug) { printk(KERN_INFO "tvaudio: TV audio decoder + audio/video mux driver\n"); printk(KERN_INFO "tvaudio: known chips: "); for (desc = chiplist; desc->name != NULL; desc++) printk("%s%s", (desc == chiplist) ? "" : ", ", desc->name); printk("\n"); } chip = kzalloc(sizeof(*chip), GFP_KERNEL); if (!chip) return -ENOMEM; sd = &chip->sd; v4l2_i2c_subdev_init(sd, client, &tvaudio_ops); /* find description for the chip */ v4l2_dbg(1, debug, sd, "chip found @ 0x%x\n", client->addr<<1); for (desc = chiplist; desc->name != NULL; desc++) { if (0 == *(desc->insmodopt)) continue; if (client->addr < desc->addr_lo || client->addr > desc->addr_hi) continue; if (desc->checkit && !desc->checkit(chip)) continue; break; } if (desc->name == NULL) { v4l2_dbg(1, debug, sd, "no matching chip description found\n"); kfree(chip); return -EIO; } v4l2_info(sd, "%s found @ 0x%x (%s)\n", desc->name, client->addr<<1, client->adapter->name); if (desc->flags) { v4l2_dbg(1, debug, sd, "matches:%s%s%s.\n", (desc->flags & CHIP_HAS_VOLUME) ? " volume" : "", (desc->flags & CHIP_HAS_BASSTREBLE) ? " bass/treble" : "", (desc->flags & CHIP_HAS_INPUTSEL) ? " audiomux" : ""); } /* fill required data structures */ if (!id) strlcpy(client->name, desc->name, I2C_NAME_SIZE); chip->desc = desc; chip->shadow.count = desc->registers+1; chip->prevmode = -1; chip->audmode = V4L2_TUNER_MODE_LANG1; /* initialization */ if (desc->initialize != NULL) desc->initialize(chip); else chip_cmd(chip, "init", &desc->init); if (desc->flags & CHIP_HAS_VOLUME) { if (!desc->volfunc) { /* This shouldn't be happen. Warn user, but keep working without volume controls */ v4l2_info(sd, "volume callback undefined!\n"); desc->flags &= ~CHIP_HAS_VOLUME; } else { chip->left = desc->leftinit ? desc->leftinit : 65535; chip->right = desc->rightinit ? desc->rightinit : 65535; chip_write(chip, desc->leftreg, desc->volfunc(chip->left)); chip_write(chip, desc->rightreg, desc->volfunc(chip->right)); } } if (desc->flags & CHIP_HAS_BASSTREBLE) { if (!desc->bassfunc || !desc->treblefunc) { /* This shouldn't be happen. Warn user, but keep working without bass/treble controls */ v4l2_info(sd, "bass/treble callbacks undefined!\n"); desc->flags &= ~CHIP_HAS_BASSTREBLE; } else { chip->treble = desc->trebleinit ? desc->trebleinit : 32768; chip->bass = desc->bassinit ? desc->bassinit : 32768; chip_write(chip, desc->bassreg, desc->bassfunc(chip->bass)); chip_write(chip, desc->treblereg, desc->treblefunc(chip->treble)); } } chip->thread = NULL; init_timer(&chip->wt); if (desc->flags & CHIP_NEED_CHECKMODE) { if (!desc->getrxsubchans || !desc->setaudmode) { /* This shouldn't be happen. Warn user, but keep working without kthread */ v4l2_info(sd, "set/get mode callbacks undefined!\n"); return 0; } /* start async thread */ chip->wt.function = chip_thread_wake; chip->wt.data = (unsigned long)chip; chip->thread = kthread_run(chip_thread, chip, client->name); if (IS_ERR(chip->thread)) { v4l2_warn(sd, "failed to create kthread\n"); chip->thread = NULL; } } return 0; } static int tvaudio_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); struct CHIPSTATE *chip = to_state(sd); del_timer_sync(&chip->wt); if (chip->thread) { /* shutdown async thread */ kthread_stop(chip->thread); chip->thread = NULL; } v4l2_device_unregister_subdev(sd); kfree(chip); return 0; } /* This driver supports many devices and the idea is to let the driver detect which device is present. So rather than listing all supported devices here, we pretend to support a single, fake device type. */ static const struct i2c_device_id tvaudio_id[] = { { "tvaudio", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, tvaudio_id); static struct i2c_driver tvaudio_driver = { .driver = { .owner = THIS_MODULE, .name = "tvaudio", }, .probe = tvaudio_probe, .remove = tvaudio_remove, .id_table = tvaudio_id, }; module_i2c_driver(tvaudio_driver);
gpl-2.0
pbystrup/CHIP-linux
net/netfilter/xt_TPROXY.c
404
17216
/* * Transparent proxy support for Linux/iptables * * Copyright (c) 2006-2010 BalaBit IT Ltd. * Author: Balazs Scheidler, Krisztian Kovacs * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/skbuff.h> #include <linux/ip.h> #include <net/checksum.h> #include <net/udp.h> #include <net/tcp.h> #include <net/inet_sock.h> #include <net/inet_hashtables.h> #include <linux/inetdevice.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter_ipv4/ip_tables.h> #include <net/netfilter/ipv4/nf_defrag_ipv4.h> #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) #define XT_TPROXY_HAVE_IPV6 1 #include <net/if_inet6.h> #include <net/addrconf.h> #include <net/inet6_hashtables.h> #include <linux/netfilter_ipv6/ip6_tables.h> #include <net/netfilter/ipv6/nf_defrag_ipv6.h> #endif #include <linux/netfilter/xt_TPROXY.h> enum nf_tproxy_lookup_t { NFT_LOOKUP_LISTENER, NFT_LOOKUP_ESTABLISHED, }; static bool tproxy_sk_is_transparent(struct sock *sk) { switch (sk->sk_state) { case TCP_TIME_WAIT: if (inet_twsk(sk)->tw_transparent) return true; break; case TCP_NEW_SYN_RECV: if (inet_rsk(inet_reqsk(sk))->no_srccheck) return true; break; default: if (inet_sk(sk)->transparent) return true; } sock_gen_put(sk); return false; } static inline __be32 tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr) { struct in_device *indev; __be32 laddr; if (user_laddr) return user_laddr; laddr = 0; rcu_read_lock(); indev = __in_dev_get_rcu(skb->dev); for_primary_ifa(indev) { laddr = ifa->ifa_local; break; } endfor_ifa(indev); rcu_read_unlock(); return laddr ? laddr : daddr; } /* * This is used when the user wants to intercept a connection matching * an explicit iptables rule. In this case the sockets are assumed * matching in preference order: * * - match: if there's a fully established connection matching the * _packet_ tuple, it is returned, assuming the redirection * already took place and we process a packet belonging to an * established connection * * - match: if there's a listening socket matching the redirection * (e.g. on-port & on-ip of the connection), it is returned, * regardless if it was bound to 0.0.0.0 or an explicit * address. The reasoning is that if there's an explicit rule, it * does not really matter if the listener is bound to an interface * or to 0. The user already stated that he wants redirection * (since he added the rule). * * Please note that there's an overlap between what a TPROXY target * and a socket match will match. Normally if you have both rules the * "socket" match will be the first one, effectively all packets * belonging to established connections going through that one. */ static inline struct sock * nf_tproxy_get_sock_v4(struct net *net, const u8 protocol, const __be32 saddr, const __be32 daddr, const __be16 sport, const __be16 dport, const struct net_device *in, const enum nf_tproxy_lookup_t lookup_type) { struct sock *sk; switch (protocol) { case IPPROTO_TCP: switch (lookup_type) { case NFT_LOOKUP_LISTENER: sk = inet_lookup_listener(net, &tcp_hashinfo, saddr, sport, daddr, dport, in->ifindex); /* NOTE: we return listeners even if bound to * 0.0.0.0, those are filtered out in * xt_socket, since xt_TPROXY needs 0 bound * listeners too */ break; case NFT_LOOKUP_ESTABLISHED: sk = inet_lookup_established(net, &tcp_hashinfo, saddr, sport, daddr, dport, in->ifindex); break; default: BUG(); } break; case IPPROTO_UDP: sk = udp4_lib_lookup(net, saddr, sport, daddr, dport, in->ifindex); if (sk) { int connected = (sk->sk_state == TCP_ESTABLISHED); int wildcard = (inet_sk(sk)->inet_rcv_saddr == 0); /* NOTE: we return listeners even if bound to * 0.0.0.0, those are filtered out in * xt_socket, since xt_TPROXY needs 0 bound * listeners too */ if ((lookup_type == NFT_LOOKUP_ESTABLISHED && (!connected || wildcard)) || (lookup_type == NFT_LOOKUP_LISTENER && connected)) { sock_put(sk); sk = NULL; } } break; default: WARN_ON(1); sk = NULL; } pr_debug("tproxy socket lookup: proto %u %08x:%u -> %08x:%u, lookup type: %d, sock %p\n", protocol, ntohl(saddr), ntohs(sport), ntohl(daddr), ntohs(dport), lookup_type, sk); return sk; } #ifdef XT_TPROXY_HAVE_IPV6 static inline struct sock * nf_tproxy_get_sock_v6(struct net *net, const u8 protocol, const struct in6_addr *saddr, const struct in6_addr *daddr, const __be16 sport, const __be16 dport, const struct net_device *in, const enum nf_tproxy_lookup_t lookup_type) { struct sock *sk; switch (protocol) { case IPPROTO_TCP: switch (lookup_type) { case NFT_LOOKUP_LISTENER: sk = inet6_lookup_listener(net, &tcp_hashinfo, saddr, sport, daddr, ntohs(dport), in->ifindex); /* NOTE: we return listeners even if bound to * 0.0.0.0, those are filtered out in * xt_socket, since xt_TPROXY needs 0 bound * listeners too */ break; case NFT_LOOKUP_ESTABLISHED: sk = __inet6_lookup_established(net, &tcp_hashinfo, saddr, sport, daddr, ntohs(dport), in->ifindex); break; default: BUG(); } break; case IPPROTO_UDP: sk = udp6_lib_lookup(net, saddr, sport, daddr, dport, in->ifindex); if (sk) { int connected = (sk->sk_state == TCP_ESTABLISHED); int wildcard = ipv6_addr_any(&sk->sk_v6_rcv_saddr); /* NOTE: we return listeners even if bound to * 0.0.0.0, those are filtered out in * xt_socket, since xt_TPROXY needs 0 bound * listeners too */ if ((lookup_type == NFT_LOOKUP_ESTABLISHED && (!connected || wildcard)) || (lookup_type == NFT_LOOKUP_LISTENER && connected)) { sock_put(sk); sk = NULL; } } break; default: WARN_ON(1); sk = NULL; } pr_debug("tproxy socket lookup: proto %u %pI6:%u -> %pI6:%u, lookup type: %d, sock %p\n", protocol, saddr, ntohs(sport), daddr, ntohs(dport), lookup_type, sk); return sk; } #endif /** * tproxy_handle_time_wait4 - handle IPv4 TCP TIME_WAIT reopen redirections * @skb: The skb being processed. * @laddr: IPv4 address to redirect to or zero. * @lport: TCP port to redirect to or zero. * @sk: The TIME_WAIT TCP socket found by the lookup. * * We have to handle SYN packets arriving to TIME_WAIT sockets * differently: instead of reopening the connection we should rather * redirect the new connection to the proxy if there's a listener * socket present. * * tproxy_handle_time_wait4() consumes the socket reference passed in. * * Returns the listener socket if there's one, the TIME_WAIT socket if * no such listener is found, or NULL if the TCP header is incomplete. */ static struct sock * tproxy_handle_time_wait4(struct sk_buff *skb, __be32 laddr, __be16 lport, struct sock *sk) { const struct iphdr *iph = ip_hdr(skb); struct tcphdr _hdr, *hp; hp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_hdr), &_hdr); if (hp == NULL) { inet_twsk_put(inet_twsk(sk)); return NULL; } if (hp->syn && !hp->rst && !hp->ack && !hp->fin) { /* SYN to a TIME_WAIT socket, we'd rather redirect it * to a listener socket if there's one */ struct sock *sk2; sk2 = nf_tproxy_get_sock_v4(dev_net(skb->dev), iph->protocol, iph->saddr, laddr ? laddr : iph->daddr, hp->source, lport ? lport : hp->dest, skb->dev, NFT_LOOKUP_LISTENER); if (sk2) { inet_twsk_deschedule(inet_twsk(sk)); inet_twsk_put(inet_twsk(sk)); sk = sk2; } } return sk; } /* assign a socket to the skb -- consumes sk */ static void nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk) { skb_orphan(skb); skb->sk = sk; skb->destructor = sock_edemux; } static unsigned int tproxy_tg4(struct sk_buff *skb, __be32 laddr, __be16 lport, u_int32_t mark_mask, u_int32_t mark_value) { const struct iphdr *iph = ip_hdr(skb); struct udphdr _hdr, *hp; struct sock *sk; hp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_hdr), &_hdr); if (hp == NULL) return NF_DROP; /* check if there's an ongoing connection on the packet * addresses, this happens if the redirect already happened * and the current packet belongs to an already established * connection */ sk = nf_tproxy_get_sock_v4(dev_net(skb->dev), iph->protocol, iph->saddr, iph->daddr, hp->source, hp->dest, skb->dev, NFT_LOOKUP_ESTABLISHED); laddr = tproxy_laddr4(skb, laddr, iph->daddr); if (!lport) lport = hp->dest; /* UDP has no TCP_TIME_WAIT state, so we never enter here */ if (sk && sk->sk_state == TCP_TIME_WAIT) /* reopening a TIME_WAIT connection needs special handling */ sk = tproxy_handle_time_wait4(skb, laddr, lport, sk); else if (!sk) /* no, there's no established connection, check if * there's a listener on the redirected addr/port */ sk = nf_tproxy_get_sock_v4(dev_net(skb->dev), iph->protocol, iph->saddr, laddr, hp->source, lport, skb->dev, NFT_LOOKUP_LISTENER); /* NOTE: assign_sock consumes our sk reference */ if (sk && tproxy_sk_is_transparent(sk)) { /* This should be in a separate target, but we don't do multiple targets on the same rule yet */ skb->mark = (skb->mark & ~mark_mask) ^ mark_value; pr_debug("redirecting: proto %hhu %pI4:%hu -> %pI4:%hu, mark: %x\n", iph->protocol, &iph->daddr, ntohs(hp->dest), &laddr, ntohs(lport), skb->mark); nf_tproxy_assign_sock(skb, sk); return NF_ACCEPT; } pr_debug("no socket, dropping: proto %hhu %pI4:%hu -> %pI4:%hu, mark: %x\n", iph->protocol, &iph->saddr, ntohs(hp->source), &iph->daddr, ntohs(hp->dest), skb->mark); return NF_DROP; } static unsigned int tproxy_tg4_v0(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_tproxy_target_info *tgi = par->targinfo; return tproxy_tg4(skb, tgi->laddr, tgi->lport, tgi->mark_mask, tgi->mark_value); } static unsigned int tproxy_tg4_v1(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_tproxy_target_info_v1 *tgi = par->targinfo; return tproxy_tg4(skb, tgi->laddr.ip, tgi->lport, tgi->mark_mask, tgi->mark_value); } #ifdef XT_TPROXY_HAVE_IPV6 static inline const struct in6_addr * tproxy_laddr6(struct sk_buff *skb, const struct in6_addr *user_laddr, const struct in6_addr *daddr) { struct inet6_dev *indev; struct inet6_ifaddr *ifa; struct in6_addr *laddr; if (!ipv6_addr_any(user_laddr)) return user_laddr; laddr = NULL; rcu_read_lock(); indev = __in6_dev_get(skb->dev); if (indev) list_for_each_entry(ifa, &indev->addr_list, if_list) { if (ifa->flags & (IFA_F_TENTATIVE | IFA_F_DEPRECATED)) continue; laddr = &ifa->addr; break; } rcu_read_unlock(); return laddr ? laddr : daddr; } /** * tproxy_handle_time_wait6 - handle IPv6 TCP TIME_WAIT reopen redirections * @skb: The skb being processed. * @tproto: Transport protocol. * @thoff: Transport protocol header offset. * @par: Iptables target parameters. * @sk: The TIME_WAIT TCP socket found by the lookup. * * We have to handle SYN packets arriving to TIME_WAIT sockets * differently: instead of reopening the connection we should rather * redirect the new connection to the proxy if there's a listener * socket present. * * tproxy_handle_time_wait6() consumes the socket reference passed in. * * Returns the listener socket if there's one, the TIME_WAIT socket if * no such listener is found, or NULL if the TCP header is incomplete. */ static struct sock * tproxy_handle_time_wait6(struct sk_buff *skb, int tproto, int thoff, const struct xt_action_param *par, struct sock *sk) { const struct ipv6hdr *iph = ipv6_hdr(skb); struct tcphdr _hdr, *hp; const struct xt_tproxy_target_info_v1 *tgi = par->targinfo; hp = skb_header_pointer(skb, thoff, sizeof(_hdr), &_hdr); if (hp == NULL) { inet_twsk_put(inet_twsk(sk)); return NULL; } if (hp->syn && !hp->rst && !hp->ack && !hp->fin) { /* SYN to a TIME_WAIT socket, we'd rather redirect it * to a listener socket if there's one */ struct sock *sk2; sk2 = nf_tproxy_get_sock_v6(dev_net(skb->dev), tproto, &iph->saddr, tproxy_laddr6(skb, &tgi->laddr.in6, &iph->daddr), hp->source, tgi->lport ? tgi->lport : hp->dest, skb->dev, NFT_LOOKUP_LISTENER); if (sk2) { inet_twsk_deschedule(inet_twsk(sk)); inet_twsk_put(inet_twsk(sk)); sk = sk2; } } return sk; } static unsigned int tproxy_tg6_v1(struct sk_buff *skb, const struct xt_action_param *par) { const struct ipv6hdr *iph = ipv6_hdr(skb); const struct xt_tproxy_target_info_v1 *tgi = par->targinfo; struct udphdr _hdr, *hp; struct sock *sk; const struct in6_addr *laddr; __be16 lport; int thoff = 0; int tproto; tproto = ipv6_find_hdr(skb, &thoff, -1, NULL, NULL); if (tproto < 0) { pr_debug("unable to find transport header in IPv6 packet, dropping\n"); return NF_DROP; } hp = skb_header_pointer(skb, thoff, sizeof(_hdr), &_hdr); if (hp == NULL) { pr_debug("unable to grab transport header contents in IPv6 packet, dropping\n"); return NF_DROP; } /* check if there's an ongoing connection on the packet * addresses, this happens if the redirect already happened * and the current packet belongs to an already established * connection */ sk = nf_tproxy_get_sock_v6(dev_net(skb->dev), tproto, &iph->saddr, &iph->daddr, hp->source, hp->dest, par->in, NFT_LOOKUP_ESTABLISHED); laddr = tproxy_laddr6(skb, &tgi->laddr.in6, &iph->daddr); lport = tgi->lport ? tgi->lport : hp->dest; /* UDP has no TCP_TIME_WAIT state, so we never enter here */ if (sk && sk->sk_state == TCP_TIME_WAIT) /* reopening a TIME_WAIT connection needs special handling */ sk = tproxy_handle_time_wait6(skb, tproto, thoff, par, sk); else if (!sk) /* no there's no established connection, check if * there's a listener on the redirected addr/port */ sk = nf_tproxy_get_sock_v6(dev_net(skb->dev), tproto, &iph->saddr, laddr, hp->source, lport, par->in, NFT_LOOKUP_LISTENER); /* NOTE: assign_sock consumes our sk reference */ if (sk && tproxy_sk_is_transparent(sk)) { /* This should be in a separate target, but we don't do multiple targets on the same rule yet */ skb->mark = (skb->mark & ~tgi->mark_mask) ^ tgi->mark_value; pr_debug("redirecting: proto %hhu %pI6:%hu -> %pI6:%hu, mark: %x\n", tproto, &iph->saddr, ntohs(hp->source), laddr, ntohs(lport), skb->mark); nf_tproxy_assign_sock(skb, sk); return NF_ACCEPT; } pr_debug("no socket, dropping: proto %hhu %pI6:%hu -> %pI6:%hu, mark: %x\n", tproto, &iph->saddr, ntohs(hp->source), &iph->daddr, ntohs(hp->dest), skb->mark); return NF_DROP; } static int tproxy_tg6_check(const struct xt_tgchk_param *par) { const struct ip6t_ip6 *i = par->entryinfo; if ((i->proto == IPPROTO_TCP || i->proto == IPPROTO_UDP) && !(i->invflags & IP6T_INV_PROTO)) return 0; pr_info("Can be used only in combination with " "either -p tcp or -p udp\n"); return -EINVAL; } #endif static int tproxy_tg4_check(const struct xt_tgchk_param *par) { const struct ipt_ip *i = par->entryinfo; if ((i->proto == IPPROTO_TCP || i->proto == IPPROTO_UDP) && !(i->invflags & IPT_INV_PROTO)) return 0; pr_info("Can be used only in combination with " "either -p tcp or -p udp\n"); return -EINVAL; } static struct xt_target tproxy_tg_reg[] __read_mostly = { { .name = "TPROXY", .family = NFPROTO_IPV4, .table = "mangle", .target = tproxy_tg4_v0, .revision = 0, .targetsize = sizeof(struct xt_tproxy_target_info), .checkentry = tproxy_tg4_check, .hooks = 1 << NF_INET_PRE_ROUTING, .me = THIS_MODULE, }, { .name = "TPROXY", .family = NFPROTO_IPV4, .table = "mangle", .target = tproxy_tg4_v1, .revision = 1, .targetsize = sizeof(struct xt_tproxy_target_info_v1), .checkentry = tproxy_tg4_check, .hooks = 1 << NF_INET_PRE_ROUTING, .me = THIS_MODULE, }, #ifdef XT_TPROXY_HAVE_IPV6 { .name = "TPROXY", .family = NFPROTO_IPV6, .table = "mangle", .target = tproxy_tg6_v1, .revision = 1, .targetsize = sizeof(struct xt_tproxy_target_info_v1), .checkentry = tproxy_tg6_check, .hooks = 1 << NF_INET_PRE_ROUTING, .me = THIS_MODULE, }, #endif }; static int __init tproxy_tg_init(void) { nf_defrag_ipv4_enable(); #ifdef XT_TPROXY_HAVE_IPV6 nf_defrag_ipv6_enable(); #endif return xt_register_targets(tproxy_tg_reg, ARRAY_SIZE(tproxy_tg_reg)); } static void __exit tproxy_tg_exit(void) { xt_unregister_targets(tproxy_tg_reg, ARRAY_SIZE(tproxy_tg_reg)); } module_init(tproxy_tg_init); module_exit(tproxy_tg_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Balazs Scheidler, Krisztian Kovacs"); MODULE_DESCRIPTION("Netfilter transparent proxy (TPROXY) target module."); MODULE_ALIAS("ipt_TPROXY"); MODULE_ALIAS("ip6t_TPROXY");
gpl-2.0
mtitinger/acme-linux
arch/powerpc/sysdev/mpic_msi.c
1684
2462
/* * Copyright 2006-2007, Michael Ellerman, IBM Corporation. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; version 2 of the * License. * */ #include <linux/irq.h> #include <linux/bitmap.h> #include <linux/msi.h> #include <asm/mpic.h> #include <asm/prom.h> #include <asm/hw_irq.h> #include <asm/ppc-pci.h> #include <asm/msi_bitmap.h> #include <sysdev/mpic.h> void mpic_msi_reserve_hwirq(struct mpic *mpic, irq_hw_number_t hwirq) { /* The mpic calls this even when there is no allocator setup */ if (!mpic->msi_bitmap.bitmap) return; msi_bitmap_reserve_hwirq(&mpic->msi_bitmap, hwirq); } #ifdef CONFIG_MPIC_U3_HT_IRQS static int mpic_msi_reserve_u3_hwirqs(struct mpic *mpic) { irq_hw_number_t hwirq; const struct irq_domain_ops *ops = mpic->irqhost->ops; struct device_node *np; int flags, index, i; struct of_phandle_args oirq; pr_debug("mpic: found U3, guessing msi allocator setup\n"); /* Reserve source numbers we know are reserved in the HW. * * This is a bit of a mix of U3 and U4 reserves but that's going * to work fine, we have plenty enugh numbers left so let's just * mark anything we don't like reserved. */ for (i = 0; i < 8; i++) msi_bitmap_reserve_hwirq(&mpic->msi_bitmap, i); for (i = 42; i < 46; i++) msi_bitmap_reserve_hwirq(&mpic->msi_bitmap, i); for (i = 100; i < 105; i++) msi_bitmap_reserve_hwirq(&mpic->msi_bitmap, i); for (i = 124; i < mpic->num_sources; i++) msi_bitmap_reserve_hwirq(&mpic->msi_bitmap, i); np = NULL; while ((np = of_find_all_nodes(np))) { pr_debug("mpic: mapping hwirqs for %s\n", np->full_name); index = 0; while (of_irq_parse_one(np, index++, &oirq) == 0) { ops->xlate(mpic->irqhost, NULL, oirq.args, oirq.args_count, &hwirq, &flags); msi_bitmap_reserve_hwirq(&mpic->msi_bitmap, hwirq); } } return 0; } #else static int mpic_msi_reserve_u3_hwirqs(struct mpic *mpic) { return -1; } #endif int mpic_msi_init_allocator(struct mpic *mpic) { int rc; rc = msi_bitmap_alloc(&mpic->msi_bitmap, mpic->num_sources, mpic->irqhost->of_node); if (rc) return rc; rc = msi_bitmap_reserve_dt_hwirqs(&mpic->msi_bitmap); if (rc > 0) { if (mpic->flags & MPIC_U3_HT_IRQS) rc = mpic_msi_reserve_u3_hwirqs(mpic); if (rc) { msi_bitmap_free(&mpic->msi_bitmap); return rc; } } return 0; }
gpl-2.0
Pafcholini/Beta_TW
sound/pci/rme96.c
1940
67958
/* * ALSA driver for RME Digi96, Digi96/8 and Digi96/8 PRO/PAD/PST audio * interfaces * * Copyright (c) 2000, 2001 Anders Torger <torger@ludd.luth.se> * * Thanks to Henk Hesselink <henk@anda.nl> for the analog volume control * code. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/module.h> #include <sound/core.h> #include <sound/info.h> #include <sound/control.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/asoundef.h> #include <sound/initval.h> #include <asm/io.h> /* note, two last pcis should be equal, it is not a bug */ MODULE_AUTHOR("Anders Torger <torger@ludd.luth.se>"); MODULE_DESCRIPTION("RME Digi96, Digi96/8, Digi96/8 PRO, Digi96/8 PST, " "Digi96/8 PAD"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{RME,Digi96}," "{RME,Digi96/8}," "{RME,Digi96/8 PRO}," "{RME,Digi96/8 PST}," "{RME,Digi96/8 PAD}}"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* Enable this card */ module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for RME Digi96 soundcard."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for RME Digi96 soundcard."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable RME Digi96 soundcard."); /* * Defines for RME Digi96 series, from internal RME reference documents * dated 12.01.00 */ #define RME96_SPDIF_NCHANNELS 2 /* Playback and capture buffer size */ #define RME96_BUFFER_SIZE 0x10000 /* IO area size */ #define RME96_IO_SIZE 0x60000 /* IO area offsets */ #define RME96_IO_PLAY_BUFFER 0x0 #define RME96_IO_REC_BUFFER 0x10000 #define RME96_IO_CONTROL_REGISTER 0x20000 #define RME96_IO_ADDITIONAL_REG 0x20004 #define RME96_IO_CONFIRM_PLAY_IRQ 0x20008 #define RME96_IO_CONFIRM_REC_IRQ 0x2000C #define RME96_IO_SET_PLAY_POS 0x40000 #define RME96_IO_RESET_PLAY_POS 0x4FFFC #define RME96_IO_SET_REC_POS 0x50000 #define RME96_IO_RESET_REC_POS 0x5FFFC #define RME96_IO_GET_PLAY_POS 0x20000 #define RME96_IO_GET_REC_POS 0x30000 /* Write control register bits */ #define RME96_WCR_START (1 << 0) #define RME96_WCR_START_2 (1 << 1) #define RME96_WCR_GAIN_0 (1 << 2) #define RME96_WCR_GAIN_1 (1 << 3) #define RME96_WCR_MODE24 (1 << 4) #define RME96_WCR_MODE24_2 (1 << 5) #define RME96_WCR_BM (1 << 6) #define RME96_WCR_BM_2 (1 << 7) #define RME96_WCR_ADAT (1 << 8) #define RME96_WCR_FREQ_0 (1 << 9) #define RME96_WCR_FREQ_1 (1 << 10) #define RME96_WCR_DS (1 << 11) #define RME96_WCR_PRO (1 << 12) #define RME96_WCR_EMP (1 << 13) #define RME96_WCR_SEL (1 << 14) #define RME96_WCR_MASTER (1 << 15) #define RME96_WCR_PD (1 << 16) #define RME96_WCR_INP_0 (1 << 17) #define RME96_WCR_INP_1 (1 << 18) #define RME96_WCR_THRU_0 (1 << 19) #define RME96_WCR_THRU_1 (1 << 20) #define RME96_WCR_THRU_2 (1 << 21) #define RME96_WCR_THRU_3 (1 << 22) #define RME96_WCR_THRU_4 (1 << 23) #define RME96_WCR_THRU_5 (1 << 24) #define RME96_WCR_THRU_6 (1 << 25) #define RME96_WCR_THRU_7 (1 << 26) #define RME96_WCR_DOLBY (1 << 27) #define RME96_WCR_MONITOR_0 (1 << 28) #define RME96_WCR_MONITOR_1 (1 << 29) #define RME96_WCR_ISEL (1 << 30) #define RME96_WCR_IDIS (1 << 31) #define RME96_WCR_BITPOS_GAIN_0 2 #define RME96_WCR_BITPOS_GAIN_1 3 #define RME96_WCR_BITPOS_FREQ_0 9 #define RME96_WCR_BITPOS_FREQ_1 10 #define RME96_WCR_BITPOS_INP_0 17 #define RME96_WCR_BITPOS_INP_1 18 #define RME96_WCR_BITPOS_MONITOR_0 28 #define RME96_WCR_BITPOS_MONITOR_1 29 /* Read control register bits */ #define RME96_RCR_AUDIO_ADDR_MASK 0xFFFF #define RME96_RCR_IRQ_2 (1 << 16) #define RME96_RCR_T_OUT (1 << 17) #define RME96_RCR_DEV_ID_0 (1 << 21) #define RME96_RCR_DEV_ID_1 (1 << 22) #define RME96_RCR_LOCK (1 << 23) #define RME96_RCR_VERF (1 << 26) #define RME96_RCR_F0 (1 << 27) #define RME96_RCR_F1 (1 << 28) #define RME96_RCR_F2 (1 << 29) #define RME96_RCR_AUTOSYNC (1 << 30) #define RME96_RCR_IRQ (1 << 31) #define RME96_RCR_BITPOS_F0 27 #define RME96_RCR_BITPOS_F1 28 #define RME96_RCR_BITPOS_F2 29 /* Additional register bits */ #define RME96_AR_WSEL (1 << 0) #define RME96_AR_ANALOG (1 << 1) #define RME96_AR_FREQPAD_0 (1 << 2) #define RME96_AR_FREQPAD_1 (1 << 3) #define RME96_AR_FREQPAD_2 (1 << 4) #define RME96_AR_PD2 (1 << 5) #define RME96_AR_DAC_EN (1 << 6) #define RME96_AR_CLATCH (1 << 7) #define RME96_AR_CCLK (1 << 8) #define RME96_AR_CDATA (1 << 9) #define RME96_AR_BITPOS_F0 2 #define RME96_AR_BITPOS_F1 3 #define RME96_AR_BITPOS_F2 4 /* Monitor tracks */ #define RME96_MONITOR_TRACKS_1_2 0 #define RME96_MONITOR_TRACKS_3_4 1 #define RME96_MONITOR_TRACKS_5_6 2 #define RME96_MONITOR_TRACKS_7_8 3 /* Attenuation */ #define RME96_ATTENUATION_0 0 #define RME96_ATTENUATION_6 1 #define RME96_ATTENUATION_12 2 #define RME96_ATTENUATION_18 3 /* Input types */ #define RME96_INPUT_OPTICAL 0 #define RME96_INPUT_COAXIAL 1 #define RME96_INPUT_INTERNAL 2 #define RME96_INPUT_XLR 3 #define RME96_INPUT_ANALOG 4 /* Clock modes */ #define RME96_CLOCKMODE_SLAVE 0 #define RME96_CLOCKMODE_MASTER 1 #define RME96_CLOCKMODE_WORDCLOCK 2 /* Block sizes in bytes */ #define RME96_SMALL_BLOCK_SIZE 2048 #define RME96_LARGE_BLOCK_SIZE 8192 /* Volume control */ #define RME96_AD1852_VOL_BITS 14 #define RME96_AD1855_VOL_BITS 10 struct rme96 { spinlock_t lock; int irq; unsigned long port; void __iomem *iobase; u32 wcreg; /* cached write control register value */ u32 wcreg_spdif; /* S/PDIF setup */ u32 wcreg_spdif_stream; /* S/PDIF setup (temporary) */ u32 rcreg; /* cached read control register value */ u32 areg; /* cached additional register value */ u16 vol[2]; /* cached volume of analog output */ u8 rev; /* card revision number */ struct snd_pcm_substream *playback_substream; struct snd_pcm_substream *capture_substream; int playback_frlog; /* log2 of framesize */ int capture_frlog; size_t playback_periodsize; /* in bytes, zero if not used */ size_t capture_periodsize; /* in bytes, zero if not used */ struct snd_card *card; struct snd_pcm *spdif_pcm; struct snd_pcm *adat_pcm; struct pci_dev *pci; struct snd_kcontrol *spdif_ctl; }; static DEFINE_PCI_DEVICE_TABLE(snd_rme96_ids) = { { PCI_VDEVICE(XILINX, PCI_DEVICE_ID_RME_DIGI96), 0, }, { PCI_VDEVICE(XILINX, PCI_DEVICE_ID_RME_DIGI96_8), 0, }, { PCI_VDEVICE(XILINX, PCI_DEVICE_ID_RME_DIGI96_8_PRO), 0, }, { PCI_VDEVICE(XILINX, PCI_DEVICE_ID_RME_DIGI96_8_PAD_OR_PST), 0, }, { 0, } }; MODULE_DEVICE_TABLE(pci, snd_rme96_ids); #define RME96_ISPLAYING(rme96) ((rme96)->wcreg & RME96_WCR_START) #define RME96_ISRECORDING(rme96) ((rme96)->wcreg & RME96_WCR_START_2) #define RME96_HAS_ANALOG_IN(rme96) ((rme96)->pci->device == PCI_DEVICE_ID_RME_DIGI96_8_PAD_OR_PST) #define RME96_HAS_ANALOG_OUT(rme96) ((rme96)->pci->device == PCI_DEVICE_ID_RME_DIGI96_8_PRO || \ (rme96)->pci->device == PCI_DEVICE_ID_RME_DIGI96_8_PAD_OR_PST) #define RME96_DAC_IS_1852(rme96) (RME96_HAS_ANALOG_OUT(rme96) && (rme96)->rev >= 4) #define RME96_DAC_IS_1855(rme96) (((rme96)->pci->device == PCI_DEVICE_ID_RME_DIGI96_8_PAD_OR_PST && (rme96)->rev < 4) || \ ((rme96)->pci->device == PCI_DEVICE_ID_RME_DIGI96_8_PRO && (rme96)->rev == 2)) #define RME96_185X_MAX_OUT(rme96) ((1 << (RME96_DAC_IS_1852(rme96) ? RME96_AD1852_VOL_BITS : RME96_AD1855_VOL_BITS)) - 1) static int snd_rme96_playback_prepare(struct snd_pcm_substream *substream); static int snd_rme96_capture_prepare(struct snd_pcm_substream *substream); static int snd_rme96_playback_trigger(struct snd_pcm_substream *substream, int cmd); static int snd_rme96_capture_trigger(struct snd_pcm_substream *substream, int cmd); static snd_pcm_uframes_t snd_rme96_playback_pointer(struct snd_pcm_substream *substream); static snd_pcm_uframes_t snd_rme96_capture_pointer(struct snd_pcm_substream *substream); static void snd_rme96_proc_init(struct rme96 *rme96); static int snd_rme96_create_switches(struct snd_card *card, struct rme96 *rme96); static int snd_rme96_getinputtype(struct rme96 *rme96); static inline unsigned int snd_rme96_playback_ptr(struct rme96 *rme96) { return (readl(rme96->iobase + RME96_IO_GET_PLAY_POS) & RME96_RCR_AUDIO_ADDR_MASK) >> rme96->playback_frlog; } static inline unsigned int snd_rme96_capture_ptr(struct rme96 *rme96) { return (readl(rme96->iobase + RME96_IO_GET_REC_POS) & RME96_RCR_AUDIO_ADDR_MASK) >> rme96->capture_frlog; } static int snd_rme96_playback_silence(struct snd_pcm_substream *substream, int channel, /* not used (interleaved data) */ snd_pcm_uframes_t pos, snd_pcm_uframes_t count) { struct rme96 *rme96 = snd_pcm_substream_chip(substream); count <<= rme96->playback_frlog; pos <<= rme96->playback_frlog; memset_io(rme96->iobase + RME96_IO_PLAY_BUFFER + pos, 0, count); return 0; } static int snd_rme96_playback_copy(struct snd_pcm_substream *substream, int channel, /* not used (interleaved data) */ snd_pcm_uframes_t pos, void __user *src, snd_pcm_uframes_t count) { struct rme96 *rme96 = snd_pcm_substream_chip(substream); count <<= rme96->playback_frlog; pos <<= rme96->playback_frlog; copy_from_user_toio(rme96->iobase + RME96_IO_PLAY_BUFFER + pos, src, count); return 0; } static int snd_rme96_capture_copy(struct snd_pcm_substream *substream, int channel, /* not used (interleaved data) */ snd_pcm_uframes_t pos, void __user *dst, snd_pcm_uframes_t count) { struct rme96 *rme96 = snd_pcm_substream_chip(substream); count <<= rme96->capture_frlog; pos <<= rme96->capture_frlog; copy_to_user_fromio(dst, rme96->iobase + RME96_IO_REC_BUFFER + pos, count); return 0; } /* * Digital output capabilities (S/PDIF) */ static struct snd_pcm_hardware snd_rme96_playback_spdif_info = { .info = (SNDRV_PCM_INFO_MMAP_IOMEM | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_PAUSE), .formats = (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE), .rates = (SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_64000 | SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000), .rate_min = 32000, .rate_max = 96000, .channels_min = 2, .channels_max = 2, .buffer_bytes_max = RME96_BUFFER_SIZE, .period_bytes_min = RME96_SMALL_BLOCK_SIZE, .period_bytes_max = RME96_LARGE_BLOCK_SIZE, .periods_min = RME96_BUFFER_SIZE / RME96_LARGE_BLOCK_SIZE, .periods_max = RME96_BUFFER_SIZE / RME96_SMALL_BLOCK_SIZE, .fifo_size = 0, }; /* * Digital input capabilities (S/PDIF) */ static struct snd_pcm_hardware snd_rme96_capture_spdif_info = { .info = (SNDRV_PCM_INFO_MMAP_IOMEM | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_PAUSE), .formats = (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE), .rates = (SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_64000 | SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000), .rate_min = 32000, .rate_max = 96000, .channels_min = 2, .channels_max = 2, .buffer_bytes_max = RME96_BUFFER_SIZE, .period_bytes_min = RME96_SMALL_BLOCK_SIZE, .period_bytes_max = RME96_LARGE_BLOCK_SIZE, .periods_min = RME96_BUFFER_SIZE / RME96_LARGE_BLOCK_SIZE, .periods_max = RME96_BUFFER_SIZE / RME96_SMALL_BLOCK_SIZE, .fifo_size = 0, }; /* * Digital output capabilities (ADAT) */ static struct snd_pcm_hardware snd_rme96_playback_adat_info = { .info = (SNDRV_PCM_INFO_MMAP_IOMEM | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_PAUSE), .formats = (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE), .rates = (SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000), .rate_min = 44100, .rate_max = 48000, .channels_min = 8, .channels_max = 8, .buffer_bytes_max = RME96_BUFFER_SIZE, .period_bytes_min = RME96_SMALL_BLOCK_SIZE, .period_bytes_max = RME96_LARGE_BLOCK_SIZE, .periods_min = RME96_BUFFER_SIZE / RME96_LARGE_BLOCK_SIZE, .periods_max = RME96_BUFFER_SIZE / RME96_SMALL_BLOCK_SIZE, .fifo_size = 0, }; /* * Digital input capabilities (ADAT) */ static struct snd_pcm_hardware snd_rme96_capture_adat_info = { .info = (SNDRV_PCM_INFO_MMAP_IOMEM | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_PAUSE), .formats = (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE), .rates = (SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000), .rate_min = 44100, .rate_max = 48000, .channels_min = 8, .channels_max = 8, .buffer_bytes_max = RME96_BUFFER_SIZE, .period_bytes_min = RME96_SMALL_BLOCK_SIZE, .period_bytes_max = RME96_LARGE_BLOCK_SIZE, .periods_min = RME96_BUFFER_SIZE / RME96_LARGE_BLOCK_SIZE, .periods_max = RME96_BUFFER_SIZE / RME96_SMALL_BLOCK_SIZE, .fifo_size = 0, }; /* * The CDATA, CCLK and CLATCH bits can be used to write to the SPI interface * of the AD1852 or AD1852 D/A converter on the board. CDATA must be set up * on the falling edge of CCLK and be stable on the rising edge. The rising * edge of CLATCH after the last data bit clocks in the whole data word. * A fast processor could probably drive the SPI interface faster than the * DAC can handle (3MHz for the 1855, unknown for the 1852). The udelay(1) * limits the data rate to 500KHz and only causes a delay of 33 microsecs. * * NOTE: increased delay from 1 to 10, since there where problems setting * the volume. */ static void snd_rme96_write_SPI(struct rme96 *rme96, u16 val) { int i; for (i = 0; i < 16; i++) { if (val & 0x8000) { rme96->areg |= RME96_AR_CDATA; } else { rme96->areg &= ~RME96_AR_CDATA; } rme96->areg &= ~(RME96_AR_CCLK | RME96_AR_CLATCH); writel(rme96->areg, rme96->iobase + RME96_IO_ADDITIONAL_REG); udelay(10); rme96->areg |= RME96_AR_CCLK; writel(rme96->areg, rme96->iobase + RME96_IO_ADDITIONAL_REG); udelay(10); val <<= 1; } rme96->areg &= ~(RME96_AR_CCLK | RME96_AR_CDATA); rme96->areg |= RME96_AR_CLATCH; writel(rme96->areg, rme96->iobase + RME96_IO_ADDITIONAL_REG); udelay(10); rme96->areg &= ~RME96_AR_CLATCH; writel(rme96->areg, rme96->iobase + RME96_IO_ADDITIONAL_REG); } static void snd_rme96_apply_dac_volume(struct rme96 *rme96) { if (RME96_DAC_IS_1852(rme96)) { snd_rme96_write_SPI(rme96, (rme96->vol[0] << 2) | 0x0); snd_rme96_write_SPI(rme96, (rme96->vol[1] << 2) | 0x2); } else if (RME96_DAC_IS_1855(rme96)) { snd_rme96_write_SPI(rme96, (rme96->vol[0] & 0x3FF) | 0x000); snd_rme96_write_SPI(rme96, (rme96->vol[1] & 0x3FF) | 0x400); } } static void snd_rme96_reset_dac(struct rme96 *rme96) { writel(rme96->wcreg | RME96_WCR_PD, rme96->iobase + RME96_IO_CONTROL_REGISTER); writel(rme96->wcreg, rme96->iobase + RME96_IO_CONTROL_REGISTER); } static int snd_rme96_getmontracks(struct rme96 *rme96) { return ((rme96->wcreg >> RME96_WCR_BITPOS_MONITOR_0) & 1) + (((rme96->wcreg >> RME96_WCR_BITPOS_MONITOR_1) & 1) << 1); } static int snd_rme96_setmontracks(struct rme96 *rme96, int montracks) { if (montracks & 1) { rme96->wcreg |= RME96_WCR_MONITOR_0; } else { rme96->wcreg &= ~RME96_WCR_MONITOR_0; } if (montracks & 2) { rme96->wcreg |= RME96_WCR_MONITOR_1; } else { rme96->wcreg &= ~RME96_WCR_MONITOR_1; } writel(rme96->wcreg, rme96->iobase + RME96_IO_CONTROL_REGISTER); return 0; } static int snd_rme96_getattenuation(struct rme96 *rme96) { return ((rme96->wcreg >> RME96_WCR_BITPOS_GAIN_0) & 1) + (((rme96->wcreg >> RME96_WCR_BITPOS_GAIN_1) & 1) << 1); } static int snd_rme96_setattenuation(struct rme96 *rme96, int attenuation) { switch (attenuation) { case 0: rme96->wcreg = (rme96->wcreg & ~RME96_WCR_GAIN_0) & ~RME96_WCR_GAIN_1; break; case 1: rme96->wcreg = (rme96->wcreg | RME96_WCR_GAIN_0) & ~RME96_WCR_GAIN_1; break; case 2: rme96->wcreg = (rme96->wcreg & ~RME96_WCR_GAIN_0) | RME96_WCR_GAIN_1; break; case 3: rme96->wcreg = (rme96->wcreg | RME96_WCR_GAIN_0) | RME96_WCR_GAIN_1; break; default: return -EINVAL; } writel(rme96->wcreg, rme96->iobase + RME96_IO_CONTROL_REGISTER); return 0; } static int snd_rme96_capture_getrate(struct rme96 *rme96, int *is_adat) { int n, rate; *is_adat = 0; if (rme96->areg & RME96_AR_ANALOG) { /* Analog input, overrides S/PDIF setting */ n = ((rme96->areg >> RME96_AR_BITPOS_F0) & 1) + (((rme96->areg >> RME96_AR_BITPOS_F1) & 1) << 1); switch (n) { case 1: rate = 32000; break; case 2: rate = 44100; break; case 3: rate = 48000; break; default: return -1; } return (rme96->areg & RME96_AR_BITPOS_F2) ? rate << 1 : rate; } rme96->rcreg = readl(rme96->iobase + RME96_IO_CONTROL_REGISTER); if (rme96->rcreg & RME96_RCR_LOCK) { /* ADAT rate */ *is_adat = 1; if (rme96->rcreg & RME96_RCR_T_OUT) { return 48000; } return 44100; } if (rme96->rcreg & RME96_RCR_VERF) { return -1; } /* S/PDIF rate */ n = ((rme96->rcreg >> RME96_RCR_BITPOS_F0) & 1) + (((rme96->rcreg >> RME96_RCR_BITPOS_F1) & 1) << 1) + (((rme96->rcreg >> RME96_RCR_BITPOS_F2) & 1) << 2); switch (n) { case 0: if (rme96->rcreg & RME96_RCR_T_OUT) { return 64000; } return -1; case 3: return 96000; case 4: return 88200; case 5: return 48000; case 6: return 44100; case 7: return 32000; default: break; } return -1; } static int snd_rme96_playback_getrate(struct rme96 *rme96) { int rate, dummy; if (!(rme96->wcreg & RME96_WCR_MASTER) && snd_rme96_getinputtype(rme96) != RME96_INPUT_ANALOG && (rate = snd_rme96_capture_getrate(rme96, &dummy)) > 0) { /* slave clock */ return rate; } rate = ((rme96->wcreg >> RME96_WCR_BITPOS_FREQ_0) & 1) + (((rme96->wcreg >> RME96_WCR_BITPOS_FREQ_1) & 1) << 1); switch (rate) { case 1: rate = 32000; break; case 2: rate = 44100; break; case 3: rate = 48000; break; default: return -1; } return (rme96->wcreg & RME96_WCR_DS) ? rate << 1 : rate; } static int snd_rme96_playback_setrate(struct rme96 *rme96, int rate) { int ds; ds = rme96->wcreg & RME96_WCR_DS; switch (rate) { case 32000: rme96->wcreg &= ~RME96_WCR_DS; rme96->wcreg = (rme96->wcreg | RME96_WCR_FREQ_0) & ~RME96_WCR_FREQ_1; break; case 44100: rme96->wcreg &= ~RME96_WCR_DS; rme96->wcreg = (rme96->wcreg | RME96_WCR_FREQ_1) & ~RME96_WCR_FREQ_0; break; case 48000: rme96->wcreg &= ~RME96_WCR_DS; rme96->wcreg = (rme96->wcreg | RME96_WCR_FREQ_0) | RME96_WCR_FREQ_1; break; case 64000: rme96->wcreg |= RME96_WCR_DS; rme96->wcreg = (rme96->wcreg | RME96_WCR_FREQ_0) & ~RME96_WCR_FREQ_1; break; case 88200: rme96->wcreg |= RME96_WCR_DS; rme96->wcreg = (rme96->wcreg | RME96_WCR_FREQ_1) & ~RME96_WCR_FREQ_0; break; case 96000: rme96->wcreg |= RME96_WCR_DS; rme96->wcreg = (rme96->wcreg | RME96_WCR_FREQ_0) | RME96_WCR_FREQ_1; break; default: return -EINVAL; } if ((!ds && rme96->wcreg & RME96_WCR_DS) || (ds && !(rme96->wcreg & RME96_WCR_DS))) { /* change to/from double-speed: reset the DAC (if available) */ snd_rme96_reset_dac(rme96); } else { writel(rme96->wcreg, rme96->iobase + RME96_IO_CONTROL_REGISTER); } return 0; } static int snd_rme96_capture_analog_setrate(struct rme96 *rme96, int rate) { switch (rate) { case 32000: rme96->areg = ((rme96->areg | RME96_AR_FREQPAD_0) & ~RME96_AR_FREQPAD_1) & ~RME96_AR_FREQPAD_2; break; case 44100: rme96->areg = ((rme96->areg & ~RME96_AR_FREQPAD_0) | RME96_AR_FREQPAD_1) & ~RME96_AR_FREQPAD_2; break; case 48000: rme96->areg = ((rme96->areg | RME96_AR_FREQPAD_0) | RME96_AR_FREQPAD_1) & ~RME96_AR_FREQPAD_2; break; case 64000: if (rme96->rev < 4) { return -EINVAL; } rme96->areg = ((rme96->areg | RME96_AR_FREQPAD_0) & ~RME96_AR_FREQPAD_1) | RME96_AR_FREQPAD_2; break; case 88200: if (rme96->rev < 4) { return -EINVAL; } rme96->areg = ((rme96->areg & ~RME96_AR_FREQPAD_0) | RME96_AR_FREQPAD_1) | RME96_AR_FREQPAD_2; break; case 96000: rme96->areg = ((rme96->areg | RME96_AR_FREQPAD_0) | RME96_AR_FREQPAD_1) | RME96_AR_FREQPAD_2; break; default: return -EINVAL; } writel(rme96->areg, rme96->iobase + RME96_IO_ADDITIONAL_REG); return 0; } static int snd_rme96_setclockmode(struct rme96 *rme96, int mode) { switch (mode) { case RME96_CLOCKMODE_SLAVE: /* AutoSync */ rme96->wcreg &= ~RME96_WCR_MASTER; rme96->areg &= ~RME96_AR_WSEL; break; case RME96_CLOCKMODE_MASTER: /* Internal */ rme96->wcreg |= RME96_WCR_MASTER; rme96->areg &= ~RME96_AR_WSEL; break; case RME96_CLOCKMODE_WORDCLOCK: /* Word clock is a master mode */ rme96->wcreg |= RME96_WCR_MASTER; rme96->areg |= RME96_AR_WSEL; break; default: return -EINVAL; } writel(rme96->wcreg, rme96->iobase + RME96_IO_CONTROL_REGISTER); writel(rme96->areg, rme96->iobase + RME96_IO_ADDITIONAL_REG); return 0; } static int snd_rme96_getclockmode(struct rme96 *rme96) { if (rme96->areg & RME96_AR_WSEL) { return RME96_CLOCKMODE_WORDCLOCK; } return (rme96->wcreg & RME96_WCR_MASTER) ? RME96_CLOCKMODE_MASTER : RME96_CLOCKMODE_SLAVE; } static int snd_rme96_setinputtype(struct rme96 *rme96, int type) { int n; switch (type) { case RME96_INPUT_OPTICAL: rme96->wcreg = (rme96->wcreg & ~RME96_WCR_INP_0) & ~RME96_WCR_INP_1; break; case RME96_INPUT_COAXIAL: rme96->wcreg = (rme96->wcreg | RME96_WCR_INP_0) & ~RME96_WCR_INP_1; break; case RME96_INPUT_INTERNAL: rme96->wcreg = (rme96->wcreg & ~RME96_WCR_INP_0) | RME96_WCR_INP_1; break; case RME96_INPUT_XLR: if ((rme96->pci->device != PCI_DEVICE_ID_RME_DIGI96_8_PAD_OR_PST && rme96->pci->device != PCI_DEVICE_ID_RME_DIGI96_8_PRO) || (rme96->pci->device == PCI_DEVICE_ID_RME_DIGI96_8_PAD_OR_PST && rme96->rev > 4)) { /* Only Digi96/8 PRO and Digi96/8 PAD supports XLR */ return -EINVAL; } rme96->wcreg = (rme96->wcreg | RME96_WCR_INP_0) | RME96_WCR_INP_1; break; case RME96_INPUT_ANALOG: if (!RME96_HAS_ANALOG_IN(rme96)) { return -EINVAL; } rme96->areg |= RME96_AR_ANALOG; writel(rme96->areg, rme96->iobase + RME96_IO_ADDITIONAL_REG); if (rme96->rev < 4) { /* * Revision less than 004 does not support 64 and * 88.2 kHz */ if (snd_rme96_capture_getrate(rme96, &n) == 88200) { snd_rme96_capture_analog_setrate(rme96, 44100); } if (snd_rme96_capture_getrate(rme96, &n) == 64000) { snd_rme96_capture_analog_setrate(rme96, 32000); } } return 0; default: return -EINVAL; } if (type != RME96_INPUT_ANALOG && RME96_HAS_ANALOG_IN(rme96)) { rme96->areg &= ~RME96_AR_ANALOG; writel(rme96->areg, rme96->iobase + RME96_IO_ADDITIONAL_REG); } writel(rme96->wcreg, rme96->iobase + RME96_IO_CONTROL_REGISTER); return 0; } static int snd_rme96_getinputtype(struct rme96 *rme96) { if (rme96->areg & RME96_AR_ANALOG) { return RME96_INPUT_ANALOG; } return ((rme96->wcreg >> RME96_WCR_BITPOS_INP_0) & 1) + (((rme96->wcreg >> RME96_WCR_BITPOS_INP_1) & 1) << 1); } static void snd_rme96_setframelog(struct rme96 *rme96, int n_channels, int is_playback) { int frlog; if (n_channels == 2) { frlog = 1; } else { /* assume 8 channels */ frlog = 3; } if (is_playback) { frlog += (rme96->wcreg & RME96_WCR_MODE24) ? 2 : 1; rme96->playback_frlog = frlog; } else { frlog += (rme96->wcreg & RME96_WCR_MODE24_2) ? 2 : 1; rme96->capture_frlog = frlog; } } static int snd_rme96_playback_setformat(struct rme96 *rme96, int format) { switch (format) { case SNDRV_PCM_FORMAT_S16_LE: rme96->wcreg &= ~RME96_WCR_MODE24; break; case SNDRV_PCM_FORMAT_S32_LE: rme96->wcreg |= RME96_WCR_MODE24; break; default: return -EINVAL; } writel(rme96->wcreg, rme96->iobase + RME96_IO_CONTROL_REGISTER); return 0; } static int snd_rme96_capture_setformat(struct rme96 *rme96, int format) { switch (format) { case SNDRV_PCM_FORMAT_S16_LE: rme96->wcreg &= ~RME96_WCR_MODE24_2; break; case SNDRV_PCM_FORMAT_S32_LE: rme96->wcreg |= RME96_WCR_MODE24_2; break; default: return -EINVAL; } writel(rme96->wcreg, rme96->iobase + RME96_IO_CONTROL_REGISTER); return 0; } static void snd_rme96_set_period_properties(struct rme96 *rme96, size_t period_bytes) { switch (period_bytes) { case RME96_LARGE_BLOCK_SIZE: rme96->wcreg &= ~RME96_WCR_ISEL; break; case RME96_SMALL_BLOCK_SIZE: rme96->wcreg |= RME96_WCR_ISEL; break; default: snd_BUG(); break; } rme96->wcreg &= ~RME96_WCR_IDIS; writel(rme96->wcreg, rme96->iobase + RME96_IO_CONTROL_REGISTER); } static int snd_rme96_playback_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct rme96 *rme96 = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; int err, rate, dummy; runtime->dma_area = (void __force *)(rme96->iobase + RME96_IO_PLAY_BUFFER); runtime->dma_addr = rme96->port + RME96_IO_PLAY_BUFFER; runtime->dma_bytes = RME96_BUFFER_SIZE; spin_lock_irq(&rme96->lock); if (!(rme96->wcreg & RME96_WCR_MASTER) && snd_rme96_getinputtype(rme96) != RME96_INPUT_ANALOG && (rate = snd_rme96_capture_getrate(rme96, &dummy)) > 0) { /* slave clock */ if ((int)params_rate(params) != rate) { spin_unlock_irq(&rme96->lock); return -EIO; } } else if ((err = snd_rme96_playback_setrate(rme96, params_rate(params))) < 0) { spin_unlock_irq(&rme96->lock); return err; } if ((err = snd_rme96_playback_setformat(rme96, params_format(params))) < 0) { spin_unlock_irq(&rme96->lock); return err; } snd_rme96_setframelog(rme96, params_channels(params), 1); if (rme96->capture_periodsize != 0) { if (params_period_size(params) << rme96->playback_frlog != rme96->capture_periodsize) { spin_unlock_irq(&rme96->lock); return -EBUSY; } } rme96->playback_periodsize = params_period_size(params) << rme96->playback_frlog; snd_rme96_set_period_properties(rme96, rme96->playback_periodsize); /* S/PDIF setup */ if ((rme96->wcreg & RME96_WCR_ADAT) == 0) { rme96->wcreg &= ~(RME96_WCR_PRO | RME96_WCR_DOLBY | RME96_WCR_EMP); writel(rme96->wcreg |= rme96->wcreg_spdif_stream, rme96->iobase + RME96_IO_CONTROL_REGISTER); } spin_unlock_irq(&rme96->lock); return 0; } static int snd_rme96_capture_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct rme96 *rme96 = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; int err, isadat, rate; runtime->dma_area = (void __force *)(rme96->iobase + RME96_IO_REC_BUFFER); runtime->dma_addr = rme96->port + RME96_IO_REC_BUFFER; runtime->dma_bytes = RME96_BUFFER_SIZE; spin_lock_irq(&rme96->lock); if ((err = snd_rme96_capture_setformat(rme96, params_format(params))) < 0) { spin_unlock_irq(&rme96->lock); return err; } if (snd_rme96_getinputtype(rme96) == RME96_INPUT_ANALOG) { if ((err = snd_rme96_capture_analog_setrate(rme96, params_rate(params))) < 0) { spin_unlock_irq(&rme96->lock); return err; } } else if ((rate = snd_rme96_capture_getrate(rme96, &isadat)) > 0) { if ((int)params_rate(params) != rate) { spin_unlock_irq(&rme96->lock); return -EIO; } if ((isadat && runtime->hw.channels_min == 2) || (!isadat && runtime->hw.channels_min == 8)) { spin_unlock_irq(&rme96->lock); return -EIO; } } snd_rme96_setframelog(rme96, params_channels(params), 0); if (rme96->playback_periodsize != 0) { if (params_period_size(params) << rme96->capture_frlog != rme96->playback_periodsize) { spin_unlock_irq(&rme96->lock); return -EBUSY; } } rme96->capture_periodsize = params_period_size(params) << rme96->capture_frlog; snd_rme96_set_period_properties(rme96, rme96->capture_periodsize); spin_unlock_irq(&rme96->lock); return 0; } static void snd_rme96_playback_start(struct rme96 *rme96, int from_pause) { if (!from_pause) { writel(0, rme96->iobase + RME96_IO_RESET_PLAY_POS); } rme96->wcreg |= RME96_WCR_START; writel(rme96->wcreg, rme96->iobase + RME96_IO_CONTROL_REGISTER); } static void snd_rme96_capture_start(struct rme96 *rme96, int from_pause) { if (!from_pause) { writel(0, rme96->iobase + RME96_IO_RESET_REC_POS); } rme96->wcreg |= RME96_WCR_START_2; writel(rme96->wcreg, rme96->iobase + RME96_IO_CONTROL_REGISTER); } static void snd_rme96_playback_stop(struct rme96 *rme96) { /* * Check if there is an unconfirmed IRQ, if so confirm it, or else * the hardware will not stop generating interrupts */ rme96->rcreg = readl(rme96->iobase + RME96_IO_CONTROL_REGISTER); if (rme96->rcreg & RME96_RCR_IRQ) { writel(0, rme96->iobase + RME96_IO_CONFIRM_PLAY_IRQ); } rme96->wcreg &= ~RME96_WCR_START; writel(rme96->wcreg, rme96->iobase + RME96_IO_CONTROL_REGISTER); } static void snd_rme96_capture_stop(struct rme96 *rme96) { rme96->rcreg = readl(rme96->iobase + RME96_IO_CONTROL_REGISTER); if (rme96->rcreg & RME96_RCR_IRQ_2) { writel(0, rme96->iobase + RME96_IO_CONFIRM_REC_IRQ); } rme96->wcreg &= ~RME96_WCR_START_2; writel(rme96->wcreg, rme96->iobase + RME96_IO_CONTROL_REGISTER); } static irqreturn_t snd_rme96_interrupt(int irq, void *dev_id) { struct rme96 *rme96 = (struct rme96 *)dev_id; rme96->rcreg = readl(rme96->iobase + RME96_IO_CONTROL_REGISTER); /* fastpath out, to ease interrupt sharing */ if (!((rme96->rcreg & RME96_RCR_IRQ) || (rme96->rcreg & RME96_RCR_IRQ_2))) { return IRQ_NONE; } if (rme96->rcreg & RME96_RCR_IRQ) { /* playback */ snd_pcm_period_elapsed(rme96->playback_substream); writel(0, rme96->iobase + RME96_IO_CONFIRM_PLAY_IRQ); } if (rme96->rcreg & RME96_RCR_IRQ_2) { /* capture */ snd_pcm_period_elapsed(rme96->capture_substream); writel(0, rme96->iobase + RME96_IO_CONFIRM_REC_IRQ); } return IRQ_HANDLED; } static unsigned int period_bytes[] = { RME96_SMALL_BLOCK_SIZE, RME96_LARGE_BLOCK_SIZE }; static struct snd_pcm_hw_constraint_list hw_constraints_period_bytes = { .count = ARRAY_SIZE(period_bytes), .list = period_bytes, .mask = 0 }; static void rme96_set_buffer_size_constraint(struct rme96 *rme96, struct snd_pcm_runtime *runtime) { unsigned int size; snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, RME96_BUFFER_SIZE, RME96_BUFFER_SIZE); if ((size = rme96->playback_periodsize) != 0 || (size = rme96->capture_periodsize) != 0) snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, size, size); else snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, &hw_constraints_period_bytes); } static int snd_rme96_playback_spdif_open(struct snd_pcm_substream *substream) { int rate, dummy; struct rme96 *rme96 = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; spin_lock_irq(&rme96->lock); if (rme96->playback_substream != NULL) { spin_unlock_irq(&rme96->lock); return -EBUSY; } rme96->wcreg &= ~RME96_WCR_ADAT; writel(rme96->wcreg, rme96->iobase + RME96_IO_CONTROL_REGISTER); rme96->playback_substream = substream; spin_unlock_irq(&rme96->lock); runtime->hw = snd_rme96_playback_spdif_info; if (!(rme96->wcreg & RME96_WCR_MASTER) && snd_rme96_getinputtype(rme96) != RME96_INPUT_ANALOG && (rate = snd_rme96_capture_getrate(rme96, &dummy)) > 0) { /* slave clock */ runtime->hw.rates = snd_pcm_rate_to_rate_bit(rate); runtime->hw.rate_min = rate; runtime->hw.rate_max = rate; } rme96_set_buffer_size_constraint(rme96, runtime); rme96->wcreg_spdif_stream = rme96->wcreg_spdif; rme96->spdif_ctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE; snd_ctl_notify(rme96->card, SNDRV_CTL_EVENT_MASK_VALUE | SNDRV_CTL_EVENT_MASK_INFO, &rme96->spdif_ctl->id); return 0; } static int snd_rme96_capture_spdif_open(struct snd_pcm_substream *substream) { int isadat, rate; struct rme96 *rme96 = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; runtime->hw = snd_rme96_capture_spdif_info; if (snd_rme96_getinputtype(rme96) != RME96_INPUT_ANALOG && (rate = snd_rme96_capture_getrate(rme96, &isadat)) > 0) { if (isadat) { return -EIO; } runtime->hw.rates = snd_pcm_rate_to_rate_bit(rate); runtime->hw.rate_min = rate; runtime->hw.rate_max = rate; } spin_lock_irq(&rme96->lock); if (rme96->capture_substream != NULL) { spin_unlock_irq(&rme96->lock); return -EBUSY; } rme96->capture_substream = substream; spin_unlock_irq(&rme96->lock); rme96_set_buffer_size_constraint(rme96, runtime); return 0; } static int snd_rme96_playback_adat_open(struct snd_pcm_substream *substream) { int rate, dummy; struct rme96 *rme96 = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; spin_lock_irq(&rme96->lock); if (rme96->playback_substream != NULL) { spin_unlock_irq(&rme96->lock); return -EBUSY; } rme96->wcreg |= RME96_WCR_ADAT; writel(rme96->wcreg, rme96->iobase + RME96_IO_CONTROL_REGISTER); rme96->playback_substream = substream; spin_unlock_irq(&rme96->lock); runtime->hw = snd_rme96_playback_adat_info; if (!(rme96->wcreg & RME96_WCR_MASTER) && snd_rme96_getinputtype(rme96) != RME96_INPUT_ANALOG && (rate = snd_rme96_capture_getrate(rme96, &dummy)) > 0) { /* slave clock */ runtime->hw.rates = snd_pcm_rate_to_rate_bit(rate); runtime->hw.rate_min = rate; runtime->hw.rate_max = rate; } rme96_set_buffer_size_constraint(rme96, runtime); return 0; } static int snd_rme96_capture_adat_open(struct snd_pcm_substream *substream) { int isadat, rate; struct rme96 *rme96 = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; runtime->hw = snd_rme96_capture_adat_info; if (snd_rme96_getinputtype(rme96) == RME96_INPUT_ANALOG) { /* makes no sense to use analog input. Note that analog expension cards AEB4/8-I are RME96_INPUT_INTERNAL */ return -EIO; } if ((rate = snd_rme96_capture_getrate(rme96, &isadat)) > 0) { if (!isadat) { return -EIO; } runtime->hw.rates = snd_pcm_rate_to_rate_bit(rate); runtime->hw.rate_min = rate; runtime->hw.rate_max = rate; } spin_lock_irq(&rme96->lock); if (rme96->capture_substream != NULL) { spin_unlock_irq(&rme96->lock); return -EBUSY; } rme96->capture_substream = substream; spin_unlock_irq(&rme96->lock); rme96_set_buffer_size_constraint(rme96, runtime); return 0; } static int snd_rme96_playback_close(struct snd_pcm_substream *substream) { struct rme96 *rme96 = snd_pcm_substream_chip(substream); int spdif = 0; spin_lock_irq(&rme96->lock); if (RME96_ISPLAYING(rme96)) { snd_rme96_playback_stop(rme96); } rme96->playback_substream = NULL; rme96->playback_periodsize = 0; spdif = (rme96->wcreg & RME96_WCR_ADAT) == 0; spin_unlock_irq(&rme96->lock); if (spdif) { rme96->spdif_ctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE; snd_ctl_notify(rme96->card, SNDRV_CTL_EVENT_MASK_VALUE | SNDRV_CTL_EVENT_MASK_INFO, &rme96->spdif_ctl->id); } return 0; } static int snd_rme96_capture_close(struct snd_pcm_substream *substream) { struct rme96 *rme96 = snd_pcm_substream_chip(substream); spin_lock_irq(&rme96->lock); if (RME96_ISRECORDING(rme96)) { snd_rme96_capture_stop(rme96); } rme96->capture_substream = NULL; rme96->capture_periodsize = 0; spin_unlock_irq(&rme96->lock); return 0; } static int snd_rme96_playback_prepare(struct snd_pcm_substream *substream) { struct rme96 *rme96 = snd_pcm_substream_chip(substream); spin_lock_irq(&rme96->lock); if (RME96_ISPLAYING(rme96)) { snd_rme96_playback_stop(rme96); } writel(0, rme96->iobase + RME96_IO_RESET_PLAY_POS); spin_unlock_irq(&rme96->lock); return 0; } static int snd_rme96_capture_prepare(struct snd_pcm_substream *substream) { struct rme96 *rme96 = snd_pcm_substream_chip(substream); spin_lock_irq(&rme96->lock); if (RME96_ISRECORDING(rme96)) { snd_rme96_capture_stop(rme96); } writel(0, rme96->iobase + RME96_IO_RESET_REC_POS); spin_unlock_irq(&rme96->lock); return 0; } static int snd_rme96_playback_trigger(struct snd_pcm_substream *substream, int cmd) { struct rme96 *rme96 = snd_pcm_substream_chip(substream); switch (cmd) { case SNDRV_PCM_TRIGGER_START: if (!RME96_ISPLAYING(rme96)) { if (substream != rme96->playback_substream) { return -EBUSY; } snd_rme96_playback_start(rme96, 0); } break; case SNDRV_PCM_TRIGGER_STOP: if (RME96_ISPLAYING(rme96)) { if (substream != rme96->playback_substream) { return -EBUSY; } snd_rme96_playback_stop(rme96); } break; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: if (RME96_ISPLAYING(rme96)) { snd_rme96_playback_stop(rme96); } break; case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: if (!RME96_ISPLAYING(rme96)) { snd_rme96_playback_start(rme96, 1); } break; default: return -EINVAL; } return 0; } static int snd_rme96_capture_trigger(struct snd_pcm_substream *substream, int cmd) { struct rme96 *rme96 = snd_pcm_substream_chip(substream); switch (cmd) { case SNDRV_PCM_TRIGGER_START: if (!RME96_ISRECORDING(rme96)) { if (substream != rme96->capture_substream) { return -EBUSY; } snd_rme96_capture_start(rme96, 0); } break; case SNDRV_PCM_TRIGGER_STOP: if (RME96_ISRECORDING(rme96)) { if (substream != rme96->capture_substream) { return -EBUSY; } snd_rme96_capture_stop(rme96); } break; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: if (RME96_ISRECORDING(rme96)) { snd_rme96_capture_stop(rme96); } break; case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: if (!RME96_ISRECORDING(rme96)) { snd_rme96_capture_start(rme96, 1); } break; default: return -EINVAL; } return 0; } static snd_pcm_uframes_t snd_rme96_playback_pointer(struct snd_pcm_substream *substream) { struct rme96 *rme96 = snd_pcm_substream_chip(substream); return snd_rme96_playback_ptr(rme96); } static snd_pcm_uframes_t snd_rme96_capture_pointer(struct snd_pcm_substream *substream) { struct rme96 *rme96 = snd_pcm_substream_chip(substream); return snd_rme96_capture_ptr(rme96); } static struct snd_pcm_ops snd_rme96_playback_spdif_ops = { .open = snd_rme96_playback_spdif_open, .close = snd_rme96_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_rme96_playback_hw_params, .prepare = snd_rme96_playback_prepare, .trigger = snd_rme96_playback_trigger, .pointer = snd_rme96_playback_pointer, .copy = snd_rme96_playback_copy, .silence = snd_rme96_playback_silence, .mmap = snd_pcm_lib_mmap_iomem, }; static struct snd_pcm_ops snd_rme96_capture_spdif_ops = { .open = snd_rme96_capture_spdif_open, .close = snd_rme96_capture_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_rme96_capture_hw_params, .prepare = snd_rme96_capture_prepare, .trigger = snd_rme96_capture_trigger, .pointer = snd_rme96_capture_pointer, .copy = snd_rme96_capture_copy, .mmap = snd_pcm_lib_mmap_iomem, }; static struct snd_pcm_ops snd_rme96_playback_adat_ops = { .open = snd_rme96_playback_adat_open, .close = snd_rme96_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_rme96_playback_hw_params, .prepare = snd_rme96_playback_prepare, .trigger = snd_rme96_playback_trigger, .pointer = snd_rme96_playback_pointer, .copy = snd_rme96_playback_copy, .silence = snd_rme96_playback_silence, .mmap = snd_pcm_lib_mmap_iomem, }; static struct snd_pcm_ops snd_rme96_capture_adat_ops = { .open = snd_rme96_capture_adat_open, .close = snd_rme96_capture_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_rme96_capture_hw_params, .prepare = snd_rme96_capture_prepare, .trigger = snd_rme96_capture_trigger, .pointer = snd_rme96_capture_pointer, .copy = snd_rme96_capture_copy, .mmap = snd_pcm_lib_mmap_iomem, }; static void snd_rme96_free(void *private_data) { struct rme96 *rme96 = (struct rme96 *)private_data; if (rme96 == NULL) { return; } if (rme96->irq >= 0) { snd_rme96_playback_stop(rme96); snd_rme96_capture_stop(rme96); rme96->areg &= ~RME96_AR_DAC_EN; writel(rme96->areg, rme96->iobase + RME96_IO_ADDITIONAL_REG); free_irq(rme96->irq, (void *)rme96); rme96->irq = -1; } if (rme96->iobase) { iounmap(rme96->iobase); rme96->iobase = NULL; } if (rme96->port) { pci_release_regions(rme96->pci); rme96->port = 0; } pci_disable_device(rme96->pci); } static void snd_rme96_free_spdif_pcm(struct snd_pcm *pcm) { struct rme96 *rme96 = pcm->private_data; rme96->spdif_pcm = NULL; } static void snd_rme96_free_adat_pcm(struct snd_pcm *pcm) { struct rme96 *rme96 = pcm->private_data; rme96->adat_pcm = NULL; } static int snd_rme96_create(struct rme96 *rme96) { struct pci_dev *pci = rme96->pci; int err; rme96->irq = -1; spin_lock_init(&rme96->lock); if ((err = pci_enable_device(pci)) < 0) return err; if ((err = pci_request_regions(pci, "RME96")) < 0) return err; rme96->port = pci_resource_start(rme96->pci, 0); rme96->iobase = ioremap_nocache(rme96->port, RME96_IO_SIZE); if (!rme96->iobase) { snd_printk(KERN_ERR "unable to remap memory region 0x%lx-0x%lx\n", rme96->port, rme96->port + RME96_IO_SIZE - 1); return -ENOMEM; } if (request_irq(pci->irq, snd_rme96_interrupt, IRQF_SHARED, KBUILD_MODNAME, rme96)) { snd_printk(KERN_ERR "unable to grab IRQ %d\n", pci->irq); return -EBUSY; } rme96->irq = pci->irq; /* read the card's revision number */ pci_read_config_byte(pci, 8, &rme96->rev); /* set up ALSA pcm device for S/PDIF */ if ((err = snd_pcm_new(rme96->card, "Digi96 IEC958", 0, 1, 1, &rme96->spdif_pcm)) < 0) { return err; } rme96->spdif_pcm->private_data = rme96; rme96->spdif_pcm->private_free = snd_rme96_free_spdif_pcm; strcpy(rme96->spdif_pcm->name, "Digi96 IEC958"); snd_pcm_set_ops(rme96->spdif_pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_rme96_playback_spdif_ops); snd_pcm_set_ops(rme96->spdif_pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_rme96_capture_spdif_ops); rme96->spdif_pcm->info_flags = 0; /* set up ALSA pcm device for ADAT */ if (pci->device == PCI_DEVICE_ID_RME_DIGI96) { /* ADAT is not available on the base model */ rme96->adat_pcm = NULL; } else { if ((err = snd_pcm_new(rme96->card, "Digi96 ADAT", 1, 1, 1, &rme96->adat_pcm)) < 0) { return err; } rme96->adat_pcm->private_data = rme96; rme96->adat_pcm->private_free = snd_rme96_free_adat_pcm; strcpy(rme96->adat_pcm->name, "Digi96 ADAT"); snd_pcm_set_ops(rme96->adat_pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_rme96_playback_adat_ops); snd_pcm_set_ops(rme96->adat_pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_rme96_capture_adat_ops); rme96->adat_pcm->info_flags = 0; } rme96->playback_periodsize = 0; rme96->capture_periodsize = 0; /* make sure playback/capture is stopped, if by some reason active */ snd_rme96_playback_stop(rme96); snd_rme96_capture_stop(rme96); /* set default values in registers */ rme96->wcreg = RME96_WCR_FREQ_1 | /* set 44.1 kHz playback */ RME96_WCR_SEL | /* normal playback */ RME96_WCR_MASTER | /* set to master clock mode */ RME96_WCR_INP_0; /* set coaxial input */ rme96->areg = RME96_AR_FREQPAD_1; /* set 44.1 kHz analog capture */ writel(rme96->wcreg, rme96->iobase + RME96_IO_CONTROL_REGISTER); writel(rme96->areg, rme96->iobase + RME96_IO_ADDITIONAL_REG); /* reset the ADC */ writel(rme96->areg | RME96_AR_PD2, rme96->iobase + RME96_IO_ADDITIONAL_REG); writel(rme96->areg, rme96->iobase + RME96_IO_ADDITIONAL_REG); /* reset and enable the DAC (order is important). */ snd_rme96_reset_dac(rme96); rme96->areg |= RME96_AR_DAC_EN; writel(rme96->areg, rme96->iobase + RME96_IO_ADDITIONAL_REG); /* reset playback and record buffer pointers */ writel(0, rme96->iobase + RME96_IO_RESET_PLAY_POS); writel(0, rme96->iobase + RME96_IO_RESET_REC_POS); /* reset volume */ rme96->vol[0] = rme96->vol[1] = 0; if (RME96_HAS_ANALOG_OUT(rme96)) { snd_rme96_apply_dac_volume(rme96); } /* init switch interface */ if ((err = snd_rme96_create_switches(rme96->card, rme96)) < 0) { return err; } /* init proc interface */ snd_rme96_proc_init(rme96); return 0; } /* * proc interface */ static void snd_rme96_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { int n; struct rme96 *rme96 = entry->private_data; rme96->rcreg = readl(rme96->iobase + RME96_IO_CONTROL_REGISTER); snd_iprintf(buffer, rme96->card->longname); snd_iprintf(buffer, " (index #%d)\n", rme96->card->number + 1); snd_iprintf(buffer, "\nGeneral settings\n"); if (rme96->wcreg & RME96_WCR_IDIS) { snd_iprintf(buffer, " period size: N/A (interrupts " "disabled)\n"); } else if (rme96->wcreg & RME96_WCR_ISEL) { snd_iprintf(buffer, " period size: 2048 bytes\n"); } else { snd_iprintf(buffer, " period size: 8192 bytes\n"); } snd_iprintf(buffer, "\nInput settings\n"); switch (snd_rme96_getinputtype(rme96)) { case RME96_INPUT_OPTICAL: snd_iprintf(buffer, " input: optical"); break; case RME96_INPUT_COAXIAL: snd_iprintf(buffer, " input: coaxial"); break; case RME96_INPUT_INTERNAL: snd_iprintf(buffer, " input: internal"); break; case RME96_INPUT_XLR: snd_iprintf(buffer, " input: XLR"); break; case RME96_INPUT_ANALOG: snd_iprintf(buffer, " input: analog"); break; } if (snd_rme96_capture_getrate(rme96, &n) < 0) { snd_iprintf(buffer, "\n sample rate: no valid signal\n"); } else { if (n) { snd_iprintf(buffer, " (8 channels)\n"); } else { snd_iprintf(buffer, " (2 channels)\n"); } snd_iprintf(buffer, " sample rate: %d Hz\n", snd_rme96_capture_getrate(rme96, &n)); } if (rme96->wcreg & RME96_WCR_MODE24_2) { snd_iprintf(buffer, " sample format: 24 bit\n"); } else { snd_iprintf(buffer, " sample format: 16 bit\n"); } snd_iprintf(buffer, "\nOutput settings\n"); if (rme96->wcreg & RME96_WCR_SEL) { snd_iprintf(buffer, " output signal: normal playback\n"); } else { snd_iprintf(buffer, " output signal: same as input\n"); } snd_iprintf(buffer, " sample rate: %d Hz\n", snd_rme96_playback_getrate(rme96)); if (rme96->wcreg & RME96_WCR_MODE24) { snd_iprintf(buffer, " sample format: 24 bit\n"); } else { snd_iprintf(buffer, " sample format: 16 bit\n"); } if (rme96->areg & RME96_AR_WSEL) { snd_iprintf(buffer, " sample clock source: word clock\n"); } else if (rme96->wcreg & RME96_WCR_MASTER) { snd_iprintf(buffer, " sample clock source: internal\n"); } else if (snd_rme96_getinputtype(rme96) == RME96_INPUT_ANALOG) { snd_iprintf(buffer, " sample clock source: autosync (internal anyway due to analog input setting)\n"); } else if (snd_rme96_capture_getrate(rme96, &n) < 0) { snd_iprintf(buffer, " sample clock source: autosync (internal anyway due to no valid signal)\n"); } else { snd_iprintf(buffer, " sample clock source: autosync\n"); } if (rme96->wcreg & RME96_WCR_PRO) { snd_iprintf(buffer, " format: AES/EBU (professional)\n"); } else { snd_iprintf(buffer, " format: IEC958 (consumer)\n"); } if (rme96->wcreg & RME96_WCR_EMP) { snd_iprintf(buffer, " emphasis: on\n"); } else { snd_iprintf(buffer, " emphasis: off\n"); } if (rme96->wcreg & RME96_WCR_DOLBY) { snd_iprintf(buffer, " non-audio (dolby): on\n"); } else { snd_iprintf(buffer, " non-audio (dolby): off\n"); } if (RME96_HAS_ANALOG_IN(rme96)) { snd_iprintf(buffer, "\nAnalog output settings\n"); switch (snd_rme96_getmontracks(rme96)) { case RME96_MONITOR_TRACKS_1_2: snd_iprintf(buffer, " monitored ADAT tracks: 1+2\n"); break; case RME96_MONITOR_TRACKS_3_4: snd_iprintf(buffer, " monitored ADAT tracks: 3+4\n"); break; case RME96_MONITOR_TRACKS_5_6: snd_iprintf(buffer, " monitored ADAT tracks: 5+6\n"); break; case RME96_MONITOR_TRACKS_7_8: snd_iprintf(buffer, " monitored ADAT tracks: 7+8\n"); break; } switch (snd_rme96_getattenuation(rme96)) { case RME96_ATTENUATION_0: snd_iprintf(buffer, " attenuation: 0 dB\n"); break; case RME96_ATTENUATION_6: snd_iprintf(buffer, " attenuation: -6 dB\n"); break; case RME96_ATTENUATION_12: snd_iprintf(buffer, " attenuation: -12 dB\n"); break; case RME96_ATTENUATION_18: snd_iprintf(buffer, " attenuation: -18 dB\n"); break; } snd_iprintf(buffer, " volume left: %u\n", rme96->vol[0]); snd_iprintf(buffer, " volume right: %u\n", rme96->vol[1]); } } static void snd_rme96_proc_init(struct rme96 *rme96) { struct snd_info_entry *entry; if (! snd_card_proc_new(rme96->card, "rme96", &entry)) snd_info_set_text_ops(entry, rme96, snd_rme96_proc_read); } /* * control interface */ #define snd_rme96_info_loopback_control snd_ctl_boolean_mono_info static int snd_rme96_get_loopback_control(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct rme96 *rme96 = snd_kcontrol_chip(kcontrol); spin_lock_irq(&rme96->lock); ucontrol->value.integer.value[0] = rme96->wcreg & RME96_WCR_SEL ? 0 : 1; spin_unlock_irq(&rme96->lock); return 0; } static int snd_rme96_put_loopback_control(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct rme96 *rme96 = snd_kcontrol_chip(kcontrol); unsigned int val; int change; val = ucontrol->value.integer.value[0] ? 0 : RME96_WCR_SEL; spin_lock_irq(&rme96->lock); val = (rme96->wcreg & ~RME96_WCR_SEL) | val; change = val != rme96->wcreg; rme96->wcreg = val; writel(val, rme96->iobase + RME96_IO_CONTROL_REGISTER); spin_unlock_irq(&rme96->lock); return change; } static int snd_rme96_info_inputtype_control(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { static char *_texts[5] = { "Optical", "Coaxial", "Internal", "XLR", "Analog" }; struct rme96 *rme96 = snd_kcontrol_chip(kcontrol); char *texts[5] = { _texts[0], _texts[1], _texts[2], _texts[3], _texts[4] }; uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; uinfo->count = 1; switch (rme96->pci->device) { case PCI_DEVICE_ID_RME_DIGI96: case PCI_DEVICE_ID_RME_DIGI96_8: uinfo->value.enumerated.items = 3; break; case PCI_DEVICE_ID_RME_DIGI96_8_PRO: uinfo->value.enumerated.items = 4; break; case PCI_DEVICE_ID_RME_DIGI96_8_PAD_OR_PST: if (rme96->rev > 4) { /* PST */ uinfo->value.enumerated.items = 4; texts[3] = _texts[4]; /* Analog instead of XLR */ } else { /* PAD */ uinfo->value.enumerated.items = 5; } break; default: snd_BUG(); break; } if (uinfo->value.enumerated.item > uinfo->value.enumerated.items - 1) { uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1; } strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]); return 0; } static int snd_rme96_get_inputtype_control(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct rme96 *rme96 = snd_kcontrol_chip(kcontrol); unsigned int items = 3; spin_lock_irq(&rme96->lock); ucontrol->value.enumerated.item[0] = snd_rme96_getinputtype(rme96); switch (rme96->pci->device) { case PCI_DEVICE_ID_RME_DIGI96: case PCI_DEVICE_ID_RME_DIGI96_8: items = 3; break; case PCI_DEVICE_ID_RME_DIGI96_8_PRO: items = 4; break; case PCI_DEVICE_ID_RME_DIGI96_8_PAD_OR_PST: if (rme96->rev > 4) { /* for handling PST case, (INPUT_ANALOG is moved to INPUT_XLR */ if (ucontrol->value.enumerated.item[0] == RME96_INPUT_ANALOG) { ucontrol->value.enumerated.item[0] = RME96_INPUT_XLR; } items = 4; } else { items = 5; } break; default: snd_BUG(); break; } if (ucontrol->value.enumerated.item[0] >= items) { ucontrol->value.enumerated.item[0] = items - 1; } spin_unlock_irq(&rme96->lock); return 0; } static int snd_rme96_put_inputtype_control(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct rme96 *rme96 = snd_kcontrol_chip(kcontrol); unsigned int val; int change, items = 3; switch (rme96->pci->device) { case PCI_DEVICE_ID_RME_DIGI96: case PCI_DEVICE_ID_RME_DIGI96_8: items = 3; break; case PCI_DEVICE_ID_RME_DIGI96_8_PRO: items = 4; break; case PCI_DEVICE_ID_RME_DIGI96_8_PAD_OR_PST: if (rme96->rev > 4) { items = 4; } else { items = 5; } break; default: snd_BUG(); break; } val = ucontrol->value.enumerated.item[0] % items; /* special case for PST */ if (rme96->pci->device == PCI_DEVICE_ID_RME_DIGI96_8_PAD_OR_PST && rme96->rev > 4) { if (val == RME96_INPUT_XLR) { val = RME96_INPUT_ANALOG; } } spin_lock_irq(&rme96->lock); change = (int)val != snd_rme96_getinputtype(rme96); snd_rme96_setinputtype(rme96, val); spin_unlock_irq(&rme96->lock); return change; } static int snd_rme96_info_clockmode_control(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { static char *texts[3] = { "AutoSync", "Internal", "Word" }; uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; uinfo->count = 1; uinfo->value.enumerated.items = 3; if (uinfo->value.enumerated.item > 2) { uinfo->value.enumerated.item = 2; } strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]); return 0; } static int snd_rme96_get_clockmode_control(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct rme96 *rme96 = snd_kcontrol_chip(kcontrol); spin_lock_irq(&rme96->lock); ucontrol->value.enumerated.item[0] = snd_rme96_getclockmode(rme96); spin_unlock_irq(&rme96->lock); return 0; } static int snd_rme96_put_clockmode_control(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct rme96 *rme96 = snd_kcontrol_chip(kcontrol); unsigned int val; int change; val = ucontrol->value.enumerated.item[0] % 3; spin_lock_irq(&rme96->lock); change = (int)val != snd_rme96_getclockmode(rme96); snd_rme96_setclockmode(rme96, val); spin_unlock_irq(&rme96->lock); return change; } static int snd_rme96_info_attenuation_control(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { static char *texts[4] = { "0 dB", "-6 dB", "-12 dB", "-18 dB" }; uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; uinfo->count = 1; uinfo->value.enumerated.items = 4; if (uinfo->value.enumerated.item > 3) { uinfo->value.enumerated.item = 3; } strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]); return 0; } static int snd_rme96_get_attenuation_control(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct rme96 *rme96 = snd_kcontrol_chip(kcontrol); spin_lock_irq(&rme96->lock); ucontrol->value.enumerated.item[0] = snd_rme96_getattenuation(rme96); spin_unlock_irq(&rme96->lock); return 0; } static int snd_rme96_put_attenuation_control(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct rme96 *rme96 = snd_kcontrol_chip(kcontrol); unsigned int val; int change; val = ucontrol->value.enumerated.item[0] % 4; spin_lock_irq(&rme96->lock); change = (int)val != snd_rme96_getattenuation(rme96); snd_rme96_setattenuation(rme96, val); spin_unlock_irq(&rme96->lock); return change; } static int snd_rme96_info_montracks_control(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { static char *texts[4] = { "1+2", "3+4", "5+6", "7+8" }; uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; uinfo->count = 1; uinfo->value.enumerated.items = 4; if (uinfo->value.enumerated.item > 3) { uinfo->value.enumerated.item = 3; } strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]); return 0; } static int snd_rme96_get_montracks_control(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct rme96 *rme96 = snd_kcontrol_chip(kcontrol); spin_lock_irq(&rme96->lock); ucontrol->value.enumerated.item[0] = snd_rme96_getmontracks(rme96); spin_unlock_irq(&rme96->lock); return 0; } static int snd_rme96_put_montracks_control(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct rme96 *rme96 = snd_kcontrol_chip(kcontrol); unsigned int val; int change; val = ucontrol->value.enumerated.item[0] % 4; spin_lock_irq(&rme96->lock); change = (int)val != snd_rme96_getmontracks(rme96); snd_rme96_setmontracks(rme96, val); spin_unlock_irq(&rme96->lock); return change; } static u32 snd_rme96_convert_from_aes(struct snd_aes_iec958 *aes) { u32 val = 0; val |= (aes->status[0] & IEC958_AES0_PROFESSIONAL) ? RME96_WCR_PRO : 0; val |= (aes->status[0] & IEC958_AES0_NONAUDIO) ? RME96_WCR_DOLBY : 0; if (val & RME96_WCR_PRO) val |= (aes->status[0] & IEC958_AES0_PRO_EMPHASIS_5015) ? RME96_WCR_EMP : 0; else val |= (aes->status[0] & IEC958_AES0_CON_EMPHASIS_5015) ? RME96_WCR_EMP : 0; return val; } static void snd_rme96_convert_to_aes(struct snd_aes_iec958 *aes, u32 val) { aes->status[0] = ((val & RME96_WCR_PRO) ? IEC958_AES0_PROFESSIONAL : 0) | ((val & RME96_WCR_DOLBY) ? IEC958_AES0_NONAUDIO : 0); if (val & RME96_WCR_PRO) aes->status[0] |= (val & RME96_WCR_EMP) ? IEC958_AES0_PRO_EMPHASIS_5015 : 0; else aes->status[0] |= (val & RME96_WCR_EMP) ? IEC958_AES0_CON_EMPHASIS_5015 : 0; } static int snd_rme96_control_spdif_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958; uinfo->count = 1; return 0; } static int snd_rme96_control_spdif_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct rme96 *rme96 = snd_kcontrol_chip(kcontrol); snd_rme96_convert_to_aes(&ucontrol->value.iec958, rme96->wcreg_spdif); return 0; } static int snd_rme96_control_spdif_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct rme96 *rme96 = snd_kcontrol_chip(kcontrol); int change; u32 val; val = snd_rme96_convert_from_aes(&ucontrol->value.iec958); spin_lock_irq(&rme96->lock); change = val != rme96->wcreg_spdif; rme96->wcreg_spdif = val; spin_unlock_irq(&rme96->lock); return change; } static int snd_rme96_control_spdif_stream_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958; uinfo->count = 1; return 0; } static int snd_rme96_control_spdif_stream_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct rme96 *rme96 = snd_kcontrol_chip(kcontrol); snd_rme96_convert_to_aes(&ucontrol->value.iec958, rme96->wcreg_spdif_stream); return 0; } static int snd_rme96_control_spdif_stream_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct rme96 *rme96 = snd_kcontrol_chip(kcontrol); int change; u32 val; val = snd_rme96_convert_from_aes(&ucontrol->value.iec958); spin_lock_irq(&rme96->lock); change = val != rme96->wcreg_spdif_stream; rme96->wcreg_spdif_stream = val; rme96->wcreg &= ~(RME96_WCR_PRO | RME96_WCR_DOLBY | RME96_WCR_EMP); rme96->wcreg |= val; writel(rme96->wcreg, rme96->iobase + RME96_IO_CONTROL_REGISTER); spin_unlock_irq(&rme96->lock); return change; } static int snd_rme96_control_spdif_mask_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958; uinfo->count = 1; return 0; } static int snd_rme96_control_spdif_mask_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { ucontrol->value.iec958.status[0] = kcontrol->private_value; return 0; } static int snd_rme96_dac_volume_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct rme96 *rme96 = snd_kcontrol_chip(kcontrol); uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = RME96_185X_MAX_OUT(rme96); return 0; } static int snd_rme96_dac_volume_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *u) { struct rme96 *rme96 = snd_kcontrol_chip(kcontrol); spin_lock_irq(&rme96->lock); u->value.integer.value[0] = rme96->vol[0]; u->value.integer.value[1] = rme96->vol[1]; spin_unlock_irq(&rme96->lock); return 0; } static int snd_rme96_dac_volume_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *u) { struct rme96 *rme96 = snd_kcontrol_chip(kcontrol); int change = 0; unsigned int vol, maxvol; if (!RME96_HAS_ANALOG_OUT(rme96)) return -EINVAL; maxvol = RME96_185X_MAX_OUT(rme96); spin_lock_irq(&rme96->lock); vol = u->value.integer.value[0]; if (vol != rme96->vol[0] && vol <= maxvol) { rme96->vol[0] = vol; change = 1; } vol = u->value.integer.value[1]; if (vol != rme96->vol[1] && vol <= maxvol) { rme96->vol[1] = vol; change = 1; } if (change) snd_rme96_apply_dac_volume(rme96); spin_unlock_irq(&rme96->lock); return change; } static struct snd_kcontrol_new snd_rme96_controls[] = { { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,DEFAULT), .info = snd_rme96_control_spdif_info, .get = snd_rme96_control_spdif_get, .put = snd_rme96_control_spdif_put }, { .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_INACTIVE, .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,PCM_STREAM), .info = snd_rme96_control_spdif_stream_info, .get = snd_rme96_control_spdif_stream_get, .put = snd_rme96_control_spdif_stream_put }, { .access = SNDRV_CTL_ELEM_ACCESS_READ, .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,CON_MASK), .info = snd_rme96_control_spdif_mask_info, .get = snd_rme96_control_spdif_mask_get, .private_value = IEC958_AES0_NONAUDIO | IEC958_AES0_PROFESSIONAL | IEC958_AES0_CON_EMPHASIS }, { .access = SNDRV_CTL_ELEM_ACCESS_READ, .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,PRO_MASK), .info = snd_rme96_control_spdif_mask_info, .get = snd_rme96_control_spdif_mask_get, .private_value = IEC958_AES0_NONAUDIO | IEC958_AES0_PROFESSIONAL | IEC958_AES0_PRO_EMPHASIS }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Input Connector", .info = snd_rme96_info_inputtype_control, .get = snd_rme96_get_inputtype_control, .put = snd_rme96_put_inputtype_control }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Loopback Input", .info = snd_rme96_info_loopback_control, .get = snd_rme96_get_loopback_control, .put = snd_rme96_put_loopback_control }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Sample Clock Source", .info = snd_rme96_info_clockmode_control, .get = snd_rme96_get_clockmode_control, .put = snd_rme96_put_clockmode_control }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Monitor Tracks", .info = snd_rme96_info_montracks_control, .get = snd_rme96_get_montracks_control, .put = snd_rme96_put_montracks_control }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Attenuation", .info = snd_rme96_info_attenuation_control, .get = snd_rme96_get_attenuation_control, .put = snd_rme96_put_attenuation_control }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "DAC Playback Volume", .info = snd_rme96_dac_volume_info, .get = snd_rme96_dac_volume_get, .put = snd_rme96_dac_volume_put } }; static int snd_rme96_create_switches(struct snd_card *card, struct rme96 *rme96) { int idx, err; struct snd_kcontrol *kctl; for (idx = 0; idx < 7; idx++) { if ((err = snd_ctl_add(card, kctl = snd_ctl_new1(&snd_rme96_controls[idx], rme96))) < 0) return err; if (idx == 1) /* IEC958 (S/PDIF) Stream */ rme96->spdif_ctl = kctl; } if (RME96_HAS_ANALOG_OUT(rme96)) { for (idx = 7; idx < 10; idx++) if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_rme96_controls[idx], rme96))) < 0) return err; } return 0; } /* * Card initialisation */ static void snd_rme96_card_free(struct snd_card *card) { snd_rme96_free(card->private_data); } static int snd_rme96_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) { static int dev; struct rme96 *rme96; struct snd_card *card; int err; u8 val; if (dev >= SNDRV_CARDS) { return -ENODEV; } if (!enable[dev]) { dev++; return -ENOENT; } err = snd_card_create(index[dev], id[dev], THIS_MODULE, sizeof(struct rme96), &card); if (err < 0) return err; card->private_free = snd_rme96_card_free; rme96 = card->private_data; rme96->card = card; rme96->pci = pci; snd_card_set_dev(card, &pci->dev); if ((err = snd_rme96_create(rme96)) < 0) { snd_card_free(card); return err; } strcpy(card->driver, "Digi96"); switch (rme96->pci->device) { case PCI_DEVICE_ID_RME_DIGI96: strcpy(card->shortname, "RME Digi96"); break; case PCI_DEVICE_ID_RME_DIGI96_8: strcpy(card->shortname, "RME Digi96/8"); break; case PCI_DEVICE_ID_RME_DIGI96_8_PRO: strcpy(card->shortname, "RME Digi96/8 PRO"); break; case PCI_DEVICE_ID_RME_DIGI96_8_PAD_OR_PST: pci_read_config_byte(rme96->pci, 8, &val); if (val < 5) { strcpy(card->shortname, "RME Digi96/8 PAD"); } else { strcpy(card->shortname, "RME Digi96/8 PST"); } break; } sprintf(card->longname, "%s at 0x%lx, irq %d", card->shortname, rme96->port, rme96->irq); if ((err = snd_card_register(card)) < 0) { snd_card_free(card); return err; } pci_set_drvdata(pci, card); dev++; return 0; } static void snd_rme96_remove(struct pci_dev *pci) { snd_card_free(pci_get_drvdata(pci)); pci_set_drvdata(pci, NULL); } static struct pci_driver rme96_driver = { .name = KBUILD_MODNAME, .id_table = snd_rme96_ids, .probe = snd_rme96_probe, .remove = snd_rme96_remove, }; module_pci_driver(rme96_driver);
gpl-2.0