repo_name
string
path
string
copies
string
size
string
content
string
license
string
Team-Blackout/Blackout_Ville_plus
net/caif/cffrml.c
5240
4609
/* * CAIF Framing Layer. * * Copyright (C) ST-Ericsson AB 2010 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com * License terms: GNU General Public License (GPL) version 2 */ #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__ #include <linux/stddef.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/crc-ccitt.h> #include <linux/netdevice.h> #include <net/caif/caif_layer.h> #include <net/caif/cfpkt.h> #include <net/caif/cffrml.h> #define container_obj(layr) container_of(layr, struct cffrml, layer) struct cffrml { struct cflayer layer; bool dofcs; /* !< FCS active */ int __percpu *pcpu_refcnt; }; static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt); static int cffrml_transmit(struct cflayer *layr, struct cfpkt *pkt); static void cffrml_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, int phyid); static u32 cffrml_rcv_error; static u32 cffrml_rcv_checsum_error; struct cflayer *cffrml_create(u16 phyid, bool use_fcs) { struct cffrml *this = kzalloc(sizeof(struct cffrml), GFP_ATOMIC); if (!this) return NULL; this->pcpu_refcnt = alloc_percpu(int); if (this->pcpu_refcnt == NULL) { kfree(this); return NULL; } caif_assert(offsetof(struct cffrml, layer) == 0); this->layer.receive = cffrml_receive; this->layer.transmit = cffrml_transmit; this->layer.ctrlcmd = cffrml_ctrlcmd; snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "frm%d", phyid); this->dofcs = use_fcs; this->layer.id = phyid; return (struct cflayer *) this; } void cffrml_free(struct cflayer *layer) { struct cffrml *this = container_obj(layer); free_percpu(this->pcpu_refcnt); kfree(layer); } void cffrml_set_uplayer(struct cflayer *this, struct cflayer *up) { this->up = up; } void cffrml_set_dnlayer(struct cflayer *this, struct cflayer *dn) { this->dn = dn; } static u16 cffrml_checksum(u16 chks, void *buf, u16 len) { /* FIXME: FCS should be moved to glue in order to use OS-Specific * solutions */ return crc_ccitt(chks, buf, len); } static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt) { u16 tmp; u16 len; u16 hdrchks; u16 pktchks; struct cffrml *this; this = container_obj(layr); cfpkt_extr_head(pkt, &tmp, 2); len = le16_to_cpu(tmp); /* Subtract for FCS on length if FCS is not used. */ if (!this->dofcs) len -= 2; if (cfpkt_setlen(pkt, len) < 0) { ++cffrml_rcv_error; pr_err("Framing length error (%d)\n", len); cfpkt_destroy(pkt); return -EPROTO; } /* * Don't do extract if FCS is false, rather do setlen - then we don't * get a cache-miss. */ if (this->dofcs) { cfpkt_extr_trail(pkt, &tmp, 2); hdrchks = le16_to_cpu(tmp); pktchks = cfpkt_iterate(pkt, cffrml_checksum, 0xffff); if (pktchks != hdrchks) { cfpkt_add_trail(pkt, &tmp, 2); ++cffrml_rcv_error; ++cffrml_rcv_checsum_error; pr_info("Frame checksum error (0x%x != 0x%x)\n", hdrchks, pktchks); return -EILSEQ; } } if (cfpkt_erroneous(pkt)) { ++cffrml_rcv_error; pr_err("Packet is erroneous!\n"); cfpkt_destroy(pkt); return -EPROTO; } if (layr->up == NULL) { pr_err("Layr up is missing!\n"); cfpkt_destroy(pkt); return -EINVAL; } return layr->up->receive(layr->up, pkt); } static int cffrml_transmit(struct cflayer *layr, struct cfpkt *pkt) { u16 chks; u16 len; __le16 data; struct cffrml *this = container_obj(layr); if (this->dofcs) { chks = cfpkt_iterate(pkt, cffrml_checksum, 0xffff); data = cpu_to_le16(chks); cfpkt_add_trail(pkt, &data, 2); } else { cfpkt_pad_trail(pkt, 2); } len = cfpkt_getlen(pkt); data = cpu_to_le16(len); cfpkt_add_head(pkt, &data, 2); cfpkt_info(pkt)->hdr_len += 2; if (cfpkt_erroneous(pkt)) { pr_err("Packet is erroneous!\n"); cfpkt_destroy(pkt); return -EPROTO; } if (layr->dn == NULL) { cfpkt_destroy(pkt); return -ENODEV; } return layr->dn->transmit(layr->dn, pkt); } static void cffrml_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, int phyid) { if (layr->up && layr->up->ctrlcmd) layr->up->ctrlcmd(layr->up, ctrl, layr->id); } void cffrml_put(struct cflayer *layr) { struct cffrml *this = container_obj(layr); if (layr != NULL && this->pcpu_refcnt != NULL) this_cpu_dec(*this->pcpu_refcnt); } void cffrml_hold(struct cflayer *layr) { struct cffrml *this = container_obj(layr); if (layr != NULL && this->pcpu_refcnt != NULL) this_cpu_inc(*this->pcpu_refcnt); } int cffrml_refcnt_read(struct cflayer *layr) { int i, refcnt = 0; struct cffrml *this = container_obj(layr); for_each_possible_cpu(i) refcnt += *per_cpu_ptr(this->pcpu_refcnt, i); return refcnt; }
gpl-2.0
alexandru-g/kernel_htc_m8_gpe
drivers/net/ethernet/cisco/enic/enic_dev.c
5240
5924
/* * Copyright 2011 Cisco Systems, Inc. All rights reserved. * * This program is free software; you may redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/pci.h> #include <linux/etherdevice.h> #include "vnic_dev.h" #include "vnic_vic.h" #include "enic_res.h" #include "enic.h" #include "enic_dev.h" int enic_dev_fw_info(struct enic *enic, struct vnic_devcmd_fw_info **fw_info) { int err; spin_lock(&enic->devcmd_lock); err = vnic_dev_fw_info(enic->vdev, fw_info); spin_unlock(&enic->devcmd_lock); return err; } int enic_dev_stats_dump(struct enic *enic, struct vnic_stats **vstats) { int err; spin_lock(&enic->devcmd_lock); err = vnic_dev_stats_dump(enic->vdev, vstats); spin_unlock(&enic->devcmd_lock); return err; } int enic_dev_add_station_addr(struct enic *enic) { int err; if (!is_valid_ether_addr(enic->netdev->dev_addr)) return -EADDRNOTAVAIL; spin_lock(&enic->devcmd_lock); err = vnic_dev_add_addr(enic->vdev, enic->netdev->dev_addr); spin_unlock(&enic->devcmd_lock); return err; } int enic_dev_del_station_addr(struct enic *enic) { int err; if (!is_valid_ether_addr(enic->netdev->dev_addr)) return -EADDRNOTAVAIL; spin_lock(&enic->devcmd_lock); err = vnic_dev_del_addr(enic->vdev, enic->netdev->dev_addr); spin_unlock(&enic->devcmd_lock); return err; } int enic_dev_packet_filter(struct enic *enic, int directed, int multicast, int broadcast, int promisc, int allmulti) { int err; spin_lock(&enic->devcmd_lock); err = vnic_dev_packet_filter(enic->vdev, directed, multicast, broadcast, promisc, allmulti); spin_unlock(&enic->devcmd_lock); return err; } int enic_dev_add_addr(struct enic *enic, u8 *addr) { int err; spin_lock(&enic->devcmd_lock); err = vnic_dev_add_addr(enic->vdev, addr); spin_unlock(&enic->devcmd_lock); return err; } int enic_dev_del_addr(struct enic *enic, u8 *addr) { int err; spin_lock(&enic->devcmd_lock); err = vnic_dev_del_addr(enic->vdev, addr); spin_unlock(&enic->devcmd_lock); return err; } int enic_dev_notify_unset(struct enic *enic) { int err; spin_lock(&enic->devcmd_lock); err = vnic_dev_notify_unset(enic->vdev); spin_unlock(&enic->devcmd_lock); return err; } int enic_dev_hang_notify(struct enic *enic) { int err; spin_lock(&enic->devcmd_lock); err = vnic_dev_hang_notify(enic->vdev); spin_unlock(&enic->devcmd_lock); return err; } int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic) { int err; spin_lock(&enic->devcmd_lock); err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev, IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN); spin_unlock(&enic->devcmd_lock); return err; } int enic_dev_enable(struct enic *enic) { int err; spin_lock(&enic->devcmd_lock); err = vnic_dev_enable_wait(enic->vdev); spin_unlock(&enic->devcmd_lock); return err; } int enic_dev_disable(struct enic *enic) { int err; spin_lock(&enic->devcmd_lock); err = vnic_dev_disable(enic->vdev); spin_unlock(&enic->devcmd_lock); return err; } int enic_dev_intr_coal_timer_info(struct enic *enic) { int err; spin_lock(&enic->devcmd_lock); err = vnic_dev_intr_coal_timer_info(enic->vdev); spin_unlock(&enic->devcmd_lock); return err; } int enic_vnic_dev_deinit(struct enic *enic) { int err; spin_lock(&enic->devcmd_lock); err = vnic_dev_deinit(enic->vdev); spin_unlock(&enic->devcmd_lock); return err; } int enic_dev_init_prov2(struct enic *enic, struct vic_provinfo *vp) { int err; spin_lock(&enic->devcmd_lock); err = vnic_dev_init_prov2(enic->vdev, (u8 *)vp, vic_provinfo_size(vp)); spin_unlock(&enic->devcmd_lock); return err; } int enic_dev_deinit_done(struct enic *enic, int *status) { int err; spin_lock(&enic->devcmd_lock); err = vnic_dev_deinit_done(enic->vdev, status); spin_unlock(&enic->devcmd_lock); return err; } /* rtnl lock is held */ int enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid) { struct enic *enic = netdev_priv(netdev); int err; spin_lock(&enic->devcmd_lock); err = enic_add_vlan(enic, vid); spin_unlock(&enic->devcmd_lock); return err; } /* rtnl lock is held */ int enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) { struct enic *enic = netdev_priv(netdev); int err; spin_lock(&enic->devcmd_lock); err = enic_del_vlan(enic, vid); spin_unlock(&enic->devcmd_lock); return err; } int enic_dev_enable2(struct enic *enic, int active) { int err; spin_lock(&enic->devcmd_lock); err = vnic_dev_enable2(enic->vdev, active); spin_unlock(&enic->devcmd_lock); return err; } int enic_dev_enable2_done(struct enic *enic, int *status) { int err; spin_lock(&enic->devcmd_lock); err = vnic_dev_enable2_done(enic->vdev, status); spin_unlock(&enic->devcmd_lock); return err; } int enic_dev_status_to_errno(int devcmd_status) { switch (devcmd_status) { case ERR_SUCCESS: return 0; case ERR_EINVAL: return -EINVAL; case ERR_EFAULT: return -EFAULT; case ERR_EPERM: return -EPERM; case ERR_EBUSY: return -EBUSY; case ERR_ECMDUNKNOWN: case ERR_ENOTSUPPORTED: return -EOPNOTSUPP; case ERR_EBADSTATE: return -EINVAL; case ERR_ENOMEM: return -ENOMEM; case ERR_ETIMEDOUT: return -ETIMEDOUT; case ERR_ELINKDOWN: return -ENETDOWN; case ERR_EINPROGRESS: return -EINPROGRESS; case ERR_EMAXRES: default: return (devcmd_status < 0) ? devcmd_status : -1; } }
gpl-2.0
elektroschmock/android_kernel_lge_hammerhead
drivers/scsi/a2091.c
5240
6690
#include <linux/types.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/zorro.h> #include <linux/module.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/amigaints.h> #include <asm/amigahw.h> #include "scsi.h" #include "wd33c93.h" #include "a2091.h" struct a2091_hostdata { struct WD33C93_hostdata wh; struct a2091_scsiregs *regs; }; static irqreturn_t a2091_intr(int irq, void *data) { struct Scsi_Host *instance = data; struct a2091_hostdata *hdata = shost_priv(instance); unsigned int status = hdata->regs->ISTR; unsigned long flags; if (!(status & (ISTR_INT_F | ISTR_INT_P)) || !(status & ISTR_INTS)) return IRQ_NONE; spin_lock_irqsave(instance->host_lock, flags); wd33c93_intr(instance); spin_unlock_irqrestore(instance->host_lock, flags); return IRQ_HANDLED; } static int dma_setup(struct scsi_cmnd *cmd, int dir_in) { struct Scsi_Host *instance = cmd->device->host; struct a2091_hostdata *hdata = shost_priv(instance); struct WD33C93_hostdata *wh = &hdata->wh; struct a2091_scsiregs *regs = hdata->regs; unsigned short cntr = CNTR_PDMD | CNTR_INTEN; unsigned long addr = virt_to_bus(cmd->SCp.ptr); /* don't allow DMA if the physical address is bad */ if (addr & A2091_XFER_MASK) { wh->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff; wh->dma_bounce_buffer = kmalloc(wh->dma_bounce_len, GFP_KERNEL); /* can't allocate memory; use PIO */ if (!wh->dma_bounce_buffer) { wh->dma_bounce_len = 0; return 1; } /* get the physical address of the bounce buffer */ addr = virt_to_bus(wh->dma_bounce_buffer); /* the bounce buffer may not be in the first 16M of physmem */ if (addr & A2091_XFER_MASK) { /* we could use chipmem... maybe later */ kfree(wh->dma_bounce_buffer); wh->dma_bounce_buffer = NULL; wh->dma_bounce_len = 0; return 1; } if (!dir_in) { /* copy to bounce buffer for a write */ memcpy(wh->dma_bounce_buffer, cmd->SCp.ptr, cmd->SCp.this_residual); } } /* setup dma direction */ if (!dir_in) cntr |= CNTR_DDIR; /* remember direction */ wh->dma_dir = dir_in; regs->CNTR = cntr; /* setup DMA *physical* address */ regs->ACR = addr; if (dir_in) { /* invalidate any cache */ cache_clear(addr, cmd->SCp.this_residual); } else { /* push any dirty cache */ cache_push(addr, cmd->SCp.this_residual); } /* start DMA */ regs->ST_DMA = 1; /* return success */ return 0; } static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt, int status) { struct a2091_hostdata *hdata = shost_priv(instance); struct WD33C93_hostdata *wh = &hdata->wh; struct a2091_scsiregs *regs = hdata->regs; /* disable SCSI interrupts */ unsigned short cntr = CNTR_PDMD; if (!wh->dma_dir) cntr |= CNTR_DDIR; /* disable SCSI interrupts */ regs->CNTR = cntr; /* flush if we were reading */ if (wh->dma_dir) { regs->FLUSH = 1; while (!(regs->ISTR & ISTR_FE_FLG)) ; } /* clear a possible interrupt */ regs->CINT = 1; /* stop DMA */ regs->SP_DMA = 1; /* restore the CONTROL bits (minus the direction flag) */ regs->CNTR = CNTR_PDMD | CNTR_INTEN; /* copy from a bounce buffer, if necessary */ if (status && wh->dma_bounce_buffer) { if (wh->dma_dir) memcpy(SCpnt->SCp.ptr, wh->dma_bounce_buffer, SCpnt->SCp.this_residual); kfree(wh->dma_bounce_buffer); wh->dma_bounce_buffer = NULL; wh->dma_bounce_len = 0; } } static int a2091_bus_reset(struct scsi_cmnd *cmd) { struct Scsi_Host *instance = cmd->device->host; /* FIXME perform bus-specific reset */ /* FIXME 2: kill this function, and let midlayer fall back to the same action, calling wd33c93_host_reset() */ spin_lock_irq(instance->host_lock); wd33c93_host_reset(cmd); spin_unlock_irq(instance->host_lock); return SUCCESS; } static struct scsi_host_template a2091_scsi_template = { .module = THIS_MODULE, .name = "Commodore A2091/A590 SCSI", .proc_info = wd33c93_proc_info, .proc_name = "A2901", .queuecommand = wd33c93_queuecommand, .eh_abort_handler = wd33c93_abort, .eh_bus_reset_handler = a2091_bus_reset, .eh_host_reset_handler = wd33c93_host_reset, .can_queue = CAN_QUEUE, .this_id = 7, .sg_tablesize = SG_ALL, .cmd_per_lun = CMD_PER_LUN, .use_clustering = DISABLE_CLUSTERING }; static int __devinit a2091_probe(struct zorro_dev *z, const struct zorro_device_id *ent) { struct Scsi_Host *instance; int error; struct a2091_scsiregs *regs; wd33c93_regs wdregs; struct a2091_hostdata *hdata; if (!request_mem_region(z->resource.start, 256, "wd33c93")) return -EBUSY; instance = scsi_host_alloc(&a2091_scsi_template, sizeof(struct a2091_hostdata)); if (!instance) { error = -ENOMEM; goto fail_alloc; } instance->irq = IRQ_AMIGA_PORTS; instance->unique_id = z->slotaddr; regs = (struct a2091_scsiregs *)ZTWO_VADDR(z->resource.start); regs->DAWR = DAWR_A2091; wdregs.SASR = &regs->SASR; wdregs.SCMD = &regs->SCMD; hdata = shost_priv(instance); hdata->wh.no_sync = 0xff; hdata->wh.fast = 0; hdata->wh.dma_mode = CTRL_DMA; hdata->regs = regs; wd33c93_init(instance, wdregs, dma_setup, dma_stop, WD33C93_FS_8_10); error = request_irq(IRQ_AMIGA_PORTS, a2091_intr, IRQF_SHARED, "A2091 SCSI", instance); if (error) goto fail_irq; regs->CNTR = CNTR_PDMD | CNTR_INTEN; error = scsi_add_host(instance, NULL); if (error) goto fail_host; zorro_set_drvdata(z, instance); scsi_scan_host(instance); return 0; fail_host: free_irq(IRQ_AMIGA_PORTS, instance); fail_irq: scsi_host_put(instance); fail_alloc: release_mem_region(z->resource.start, 256); return error; } static void __devexit a2091_remove(struct zorro_dev *z) { struct Scsi_Host *instance = zorro_get_drvdata(z); struct a2091_hostdata *hdata = shost_priv(instance); hdata->regs->CNTR = 0; scsi_remove_host(instance); free_irq(IRQ_AMIGA_PORTS, instance); scsi_host_put(instance); release_mem_region(z->resource.start, 256); } static struct zorro_device_id a2091_zorro_tbl[] __devinitdata = { { ZORRO_PROD_CBM_A590_A2091_1 }, { ZORRO_PROD_CBM_A590_A2091_2 }, { 0 } }; MODULE_DEVICE_TABLE(zorro, a2091_zorro_tbl); static struct zorro_driver a2091_driver = { .name = "a2091", .id_table = a2091_zorro_tbl, .probe = a2091_probe, .remove = __devexit_p(a2091_remove), }; static int __init a2091_init(void) { return zorro_register_driver(&a2091_driver); } module_init(a2091_init); static void __exit a2091_exit(void) { zorro_unregister_driver(&a2091_driver); } module_exit(a2091_exit); MODULE_DESCRIPTION("Commodore A2091/A590 SCSI"); MODULE_LICENSE("GPL");
gpl-2.0
sai9615/MY-kernel-for-grand-I9082
drivers/input/mouse/pc110pad.c
13176
4680
/* * Copyright (c) 2000-2001 Vojtech Pavlik * * Based on the work of: * Alan Cox Robin O'Leary */ /* * IBM PC110 touchpad driver for Linux */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Should you need to contact me, the author, you can do so either by * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail: * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/input.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/delay.h> #include <asm/io.h> #include <asm/irq.h> MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); MODULE_DESCRIPTION("IBM PC110 touchpad driver"); MODULE_LICENSE("GPL"); #define PC110PAD_OFF 0x30 #define PC110PAD_ON 0x38 static int pc110pad_irq = 10; static int pc110pad_io = 0x15e0; static struct input_dev *pc110pad_dev; static int pc110pad_data[3]; static int pc110pad_count; static irqreturn_t pc110pad_interrupt(int irq, void *ptr) { int value = inb_p(pc110pad_io); int handshake = inb_p(pc110pad_io + 2); outb(handshake | 1, pc110pad_io + 2); udelay(2); outb(handshake & ~1, pc110pad_io + 2); udelay(2); inb_p(0x64); pc110pad_data[pc110pad_count++] = value; if (pc110pad_count < 3) return IRQ_HANDLED; input_report_key(pc110pad_dev, BTN_TOUCH, pc110pad_data[0] & 0x01); input_report_abs(pc110pad_dev, ABS_X, pc110pad_data[1] | ((pc110pad_data[0] << 3) & 0x80) | ((pc110pad_data[0] << 1) & 0x100)); input_report_abs(pc110pad_dev, ABS_Y, pc110pad_data[2] | ((pc110pad_data[0] << 4) & 0x80)); input_sync(pc110pad_dev); pc110pad_count = 0; return IRQ_HANDLED; } static void pc110pad_close(struct input_dev *dev) { outb(PC110PAD_OFF, pc110pad_io + 2); } static int pc110pad_open(struct input_dev *dev) { pc110pad_interrupt(0, NULL); pc110pad_interrupt(0, NULL); pc110pad_interrupt(0, NULL); outb(PC110PAD_ON, pc110pad_io + 2); pc110pad_count = 0; return 0; } /* * We try to avoid enabling the hardware if it's not * there, but we don't know how to test. But we do know * that the PC110 is not a PCI system. So if we find any * PCI devices in the machine, we don't have a PC110. */ static int __init pc110pad_init(void) { int err; if (!no_pci_devices()) return -ENODEV; if (!request_region(pc110pad_io, 4, "pc110pad")) { printk(KERN_ERR "pc110pad: I/O area %#x-%#x in use.\n", pc110pad_io, pc110pad_io + 4); return -EBUSY; } outb(PC110PAD_OFF, pc110pad_io + 2); if (request_irq(pc110pad_irq, pc110pad_interrupt, 0, "pc110pad", NULL)) { printk(KERN_ERR "pc110pad: Unable to get irq %d.\n", pc110pad_irq); err = -EBUSY; goto err_release_region; } pc110pad_dev = input_allocate_device(); if (!pc110pad_dev) { printk(KERN_ERR "pc110pad: Not enough memory.\n"); err = -ENOMEM; goto err_free_irq; } pc110pad_dev->name = "IBM PC110 TouchPad"; pc110pad_dev->phys = "isa15e0/input0"; pc110pad_dev->id.bustype = BUS_ISA; pc110pad_dev->id.vendor = 0x0003; pc110pad_dev->id.product = 0x0001; pc110pad_dev->id.version = 0x0100; pc110pad_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); pc110pad_dev->absbit[0] = BIT_MASK(ABS_X) | BIT_MASK(ABS_Y); pc110pad_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH); input_abs_set_max(pc110pad_dev, ABS_X, 0x1ff); input_abs_set_max(pc110pad_dev, ABS_Y, 0x0ff); pc110pad_dev->open = pc110pad_open; pc110pad_dev->close = pc110pad_close; err = input_register_device(pc110pad_dev); if (err) goto err_free_dev; return 0; err_free_dev: input_free_device(pc110pad_dev); err_free_irq: free_irq(pc110pad_irq, NULL); err_release_region: release_region(pc110pad_io, 4); return err; } static void __exit pc110pad_exit(void) { outb(PC110PAD_OFF, pc110pad_io + 2); free_irq(pc110pad_irq, NULL); input_unregister_device(pc110pad_dev); release_region(pc110pad_io, 4); } module_init(pc110pad_init); module_exit(pc110pad_exit);
gpl-2.0
levex/qr-linux
mm/memory_hotplug.c
121
52610
/* * linux/mm/memory_hotplug.c * * Copyright (C) */ #include <linux/stddef.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/interrupt.h> #include <linux/pagemap.h> #include <linux/compiler.h> #include <linux/export.h> #include <linux/pagevec.h> #include <linux/writeback.h> #include <linux/slab.h> #include <linux/sysctl.h> #include <linux/cpu.h> #include <linux/memory.h> #include <linux/memory_hotplug.h> #include <linux/highmem.h> #include <linux/vmalloc.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/migrate.h> #include <linux/page-isolation.h> #include <linux/pfn.h> #include <linux/suspend.h> #include <linux/mm_inline.h> #include <linux/firmware-map.h> #include <linux/stop_machine.h> #include <linux/hugetlb.h> #include <linux/memblock.h> #include <linux/bootmem.h> #include <asm/tlbflush.h> #include "internal.h" /* * online_page_callback contains pointer to current page onlining function. * Initially it is generic_online_page(). If it is required it could be * changed by calling set_online_page_callback() for callback registration * and restore_online_page_callback() for generic callback restore. */ static void generic_online_page(struct page *page); static online_page_callback_t online_page_callback = generic_online_page; static DEFINE_MUTEX(online_page_callback_lock); /* The same as the cpu_hotplug lock, but for memory hotplug. */ static struct { struct task_struct *active_writer; struct mutex lock; /* Synchronizes accesses to refcount, */ /* * Also blocks the new readers during * an ongoing mem hotplug operation. */ int refcount; #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; #endif } mem_hotplug = { .active_writer = NULL, .lock = __MUTEX_INITIALIZER(mem_hotplug.lock), .refcount = 0, #ifdef CONFIG_DEBUG_LOCK_ALLOC .dep_map = {.name = "mem_hotplug.lock" }, #endif }; /* Lockdep annotations for get/put_online_mems() and mem_hotplug_begin/end() */ #define memhp_lock_acquire_read() lock_map_acquire_read(&mem_hotplug.dep_map) #define memhp_lock_acquire() lock_map_acquire(&mem_hotplug.dep_map) #define memhp_lock_release() lock_map_release(&mem_hotplug.dep_map) void get_online_mems(void) { might_sleep(); if (mem_hotplug.active_writer == current) return; memhp_lock_acquire_read(); mutex_lock(&mem_hotplug.lock); mem_hotplug.refcount++; mutex_unlock(&mem_hotplug.lock); } void put_online_mems(void) { if (mem_hotplug.active_writer == current) return; mutex_lock(&mem_hotplug.lock); if (WARN_ON(!mem_hotplug.refcount)) mem_hotplug.refcount++; /* try to fix things up */ if (!--mem_hotplug.refcount && unlikely(mem_hotplug.active_writer)) wake_up_process(mem_hotplug.active_writer); mutex_unlock(&mem_hotplug.lock); memhp_lock_release(); } static void mem_hotplug_begin(void) { mem_hotplug.active_writer = current; memhp_lock_acquire(); for (;;) { mutex_lock(&mem_hotplug.lock); if (likely(!mem_hotplug.refcount)) break; __set_current_state(TASK_UNINTERRUPTIBLE); mutex_unlock(&mem_hotplug.lock); schedule(); } } static void mem_hotplug_done(void) { mem_hotplug.active_writer = NULL; mutex_unlock(&mem_hotplug.lock); memhp_lock_release(); } /* add this memory to iomem resource */ static struct resource *register_memory_resource(u64 start, u64 size) { struct resource *res; res = kzalloc(sizeof(struct resource), GFP_KERNEL); BUG_ON(!res); res->name = "System RAM"; res->start = start; res->end = start + size - 1; res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; if (request_resource(&iomem_resource, res) < 0) { pr_debug("System RAM resource %pR cannot be added\n", res); kfree(res); res = NULL; } return res; } static void release_memory_resource(struct resource *res) { if (!res) return; release_resource(res); kfree(res); return; } #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE void get_page_bootmem(unsigned long info, struct page *page, unsigned long type) { page->lru.next = (struct list_head *) type; SetPagePrivate(page); set_page_private(page, info); atomic_inc(&page->_count); } void put_page_bootmem(struct page *page) { unsigned long type; type = (unsigned long) page->lru.next; BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE || type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE); if (atomic_dec_return(&page->_count) == 1) { ClearPagePrivate(page); set_page_private(page, 0); INIT_LIST_HEAD(&page->lru); free_reserved_page(page); } } #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE #ifndef CONFIG_SPARSEMEM_VMEMMAP static void register_page_bootmem_info_section(unsigned long start_pfn) { unsigned long *usemap, mapsize, section_nr, i; struct mem_section *ms; struct page *page, *memmap; section_nr = pfn_to_section_nr(start_pfn); ms = __nr_to_section(section_nr); /* Get section's memmap address */ memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr); /* * Get page for the memmap's phys address * XXX: need more consideration for sparse_vmemmap... */ page = virt_to_page(memmap); mapsize = sizeof(struct page) * PAGES_PER_SECTION; mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT; /* remember memmap's page */ for (i = 0; i < mapsize; i++, page++) get_page_bootmem(section_nr, page, SECTION_INFO); usemap = __nr_to_section(section_nr)->pageblock_flags; page = virt_to_page(usemap); mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT; for (i = 0; i < mapsize; i++, page++) get_page_bootmem(section_nr, page, MIX_SECTION_INFO); } #else /* CONFIG_SPARSEMEM_VMEMMAP */ static void register_page_bootmem_info_section(unsigned long start_pfn) { unsigned long *usemap, mapsize, section_nr, i; struct mem_section *ms; struct page *page, *memmap; if (!pfn_valid(start_pfn)) return; section_nr = pfn_to_section_nr(start_pfn); ms = __nr_to_section(section_nr); memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr); register_page_bootmem_memmap(section_nr, memmap, PAGES_PER_SECTION); usemap = __nr_to_section(section_nr)->pageblock_flags; page = virt_to_page(usemap); mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT; for (i = 0; i < mapsize; i++, page++) get_page_bootmem(section_nr, page, MIX_SECTION_INFO); } #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ void register_page_bootmem_info_node(struct pglist_data *pgdat) { unsigned long i, pfn, end_pfn, nr_pages; int node = pgdat->node_id; struct page *page; struct zone *zone; nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT; page = virt_to_page(pgdat); for (i = 0; i < nr_pages; i++, page++) get_page_bootmem(node, page, NODE_INFO); zone = &pgdat->node_zones[0]; for (; zone < pgdat->node_zones + MAX_NR_ZONES - 1; zone++) { if (zone_is_initialized(zone)) { nr_pages = zone->wait_table_hash_nr_entries * sizeof(wait_queue_head_t); nr_pages = PAGE_ALIGN(nr_pages) >> PAGE_SHIFT; page = virt_to_page(zone->wait_table); for (i = 0; i < nr_pages; i++, page++) get_page_bootmem(node, page, NODE_INFO); } } pfn = pgdat->node_start_pfn; end_pfn = pgdat_end_pfn(pgdat); /* register section info */ for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) { /* * Some platforms can assign the same pfn to multiple nodes - on * node0 as well as nodeN. To avoid registering a pfn against * multiple nodes we check that this pfn does not already * reside in some other nodes. */ if (pfn_valid(pfn) && (pfn_to_nid(pfn) == node)) register_page_bootmem_info_section(pfn); } } #endif /* CONFIG_HAVE_BOOTMEM_INFO_NODE */ static void __meminit grow_zone_span(struct zone *zone, unsigned long start_pfn, unsigned long end_pfn) { unsigned long old_zone_end_pfn; zone_span_writelock(zone); old_zone_end_pfn = zone_end_pfn(zone); if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn) zone->zone_start_pfn = start_pfn; zone->spanned_pages = max(old_zone_end_pfn, end_pfn) - zone->zone_start_pfn; zone_span_writeunlock(zone); } static void resize_zone(struct zone *zone, unsigned long start_pfn, unsigned long end_pfn) { zone_span_writelock(zone); if (end_pfn - start_pfn) { zone->zone_start_pfn = start_pfn; zone->spanned_pages = end_pfn - start_pfn; } else { /* * make it consist as free_area_init_core(), * if spanned_pages = 0, then keep start_pfn = 0 */ zone->zone_start_pfn = 0; zone->spanned_pages = 0; } zone_span_writeunlock(zone); } static void fix_zone_id(struct zone *zone, unsigned long start_pfn, unsigned long end_pfn) { enum zone_type zid = zone_idx(zone); int nid = zone->zone_pgdat->node_id; unsigned long pfn; for (pfn = start_pfn; pfn < end_pfn; pfn++) set_page_links(pfn_to_page(pfn), zid, nid, pfn); } /* Can fail with -ENOMEM from allocating a wait table with vmalloc() or * alloc_bootmem_node_nopanic()/memblock_virt_alloc_node_nopanic() */ static int __ref ensure_zone_is_initialized(struct zone *zone, unsigned long start_pfn, unsigned long num_pages) { if (!zone_is_initialized(zone)) return init_currently_empty_zone(zone, start_pfn, num_pages, MEMMAP_HOTPLUG); return 0; } static int __meminit move_pfn_range_left(struct zone *z1, struct zone *z2, unsigned long start_pfn, unsigned long end_pfn) { int ret; unsigned long flags; unsigned long z1_start_pfn; ret = ensure_zone_is_initialized(z1, start_pfn, end_pfn - start_pfn); if (ret) return ret; pgdat_resize_lock(z1->zone_pgdat, &flags); /* can't move pfns which are higher than @z2 */ if (end_pfn > zone_end_pfn(z2)) goto out_fail; /* the move out part must be at the left most of @z2 */ if (start_pfn > z2->zone_start_pfn) goto out_fail; /* must included/overlap */ if (end_pfn <= z2->zone_start_pfn) goto out_fail; /* use start_pfn for z1's start_pfn if z1 is empty */ if (!zone_is_empty(z1)) z1_start_pfn = z1->zone_start_pfn; else z1_start_pfn = start_pfn; resize_zone(z1, z1_start_pfn, end_pfn); resize_zone(z2, end_pfn, zone_end_pfn(z2)); pgdat_resize_unlock(z1->zone_pgdat, &flags); fix_zone_id(z1, start_pfn, end_pfn); return 0; out_fail: pgdat_resize_unlock(z1->zone_pgdat, &flags); return -1; } static int __meminit move_pfn_range_right(struct zone *z1, struct zone *z2, unsigned long start_pfn, unsigned long end_pfn) { int ret; unsigned long flags; unsigned long z2_end_pfn; ret = ensure_zone_is_initialized(z2, start_pfn, end_pfn - start_pfn); if (ret) return ret; pgdat_resize_lock(z1->zone_pgdat, &flags); /* can't move pfns which are lower than @z1 */ if (z1->zone_start_pfn > start_pfn) goto out_fail; /* the move out part mast at the right most of @z1 */ if (zone_end_pfn(z1) > end_pfn) goto out_fail; /* must included/overlap */ if (start_pfn >= zone_end_pfn(z1)) goto out_fail; /* use end_pfn for z2's end_pfn if z2 is empty */ if (!zone_is_empty(z2)) z2_end_pfn = zone_end_pfn(z2); else z2_end_pfn = end_pfn; resize_zone(z1, z1->zone_start_pfn, start_pfn); resize_zone(z2, start_pfn, z2_end_pfn); pgdat_resize_unlock(z1->zone_pgdat, &flags); fix_zone_id(z2, start_pfn, end_pfn); return 0; out_fail: pgdat_resize_unlock(z1->zone_pgdat, &flags); return -1; } static void __meminit grow_pgdat_span(struct pglist_data *pgdat, unsigned long start_pfn, unsigned long end_pfn) { unsigned long old_pgdat_end_pfn = pgdat_end_pfn(pgdat); if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn) pgdat->node_start_pfn = start_pfn; pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) - pgdat->node_start_pfn; } static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn) { struct pglist_data *pgdat = zone->zone_pgdat; int nr_pages = PAGES_PER_SECTION; int nid = pgdat->node_id; int zone_type; unsigned long flags; int ret; zone_type = zone - pgdat->node_zones; ret = ensure_zone_is_initialized(zone, phys_start_pfn, nr_pages); if (ret) return ret; pgdat_resize_lock(zone->zone_pgdat, &flags); grow_zone_span(zone, phys_start_pfn, phys_start_pfn + nr_pages); grow_pgdat_span(zone->zone_pgdat, phys_start_pfn, phys_start_pfn + nr_pages); pgdat_resize_unlock(zone->zone_pgdat, &flags); memmap_init_zone(nr_pages, nid, zone_type, phys_start_pfn, MEMMAP_HOTPLUG); return 0; } static int __meminit __add_section(int nid, struct zone *zone, unsigned long phys_start_pfn) { int ret; if (pfn_valid(phys_start_pfn)) return -EEXIST; ret = sparse_add_one_section(zone, phys_start_pfn); if (ret < 0) return ret; ret = __add_zone(zone, phys_start_pfn); if (ret < 0) return ret; return register_new_memory(nid, __pfn_to_section(phys_start_pfn)); } /* * Reasonably generic function for adding memory. It is * expected that archs that support memory hotplug will * call this function after deciding the zone to which to * add the new pages. */ int __ref __add_pages(int nid, struct zone *zone, unsigned long phys_start_pfn, unsigned long nr_pages) { unsigned long i; int err = 0; int start_sec, end_sec; /* during initialize mem_map, align hot-added range to section */ start_sec = pfn_to_section_nr(phys_start_pfn); end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1); for (i = start_sec; i <= end_sec; i++) { err = __add_section(nid, zone, i << PFN_SECTION_SHIFT); /* * EEXIST is finally dealt with by ioresource collision * check. see add_memory() => register_memory_resource() * Warning will be printed if there is collision. */ if (err && (err != -EEXIST)) break; err = 0; } return err; } EXPORT_SYMBOL_GPL(__add_pages); #ifdef CONFIG_MEMORY_HOTREMOVE /* find the smallest valid pfn in the range [start_pfn, end_pfn) */ static int find_smallest_section_pfn(int nid, struct zone *zone, unsigned long start_pfn, unsigned long end_pfn) { struct mem_section *ms; for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SECTION) { ms = __pfn_to_section(start_pfn); if (unlikely(!valid_section(ms))) continue; if (unlikely(pfn_to_nid(start_pfn) != nid)) continue; if (zone && zone != page_zone(pfn_to_page(start_pfn))) continue; return start_pfn; } return 0; } /* find the biggest valid pfn in the range [start_pfn, end_pfn). */ static int find_biggest_section_pfn(int nid, struct zone *zone, unsigned long start_pfn, unsigned long end_pfn) { struct mem_section *ms; unsigned long pfn; /* pfn is the end pfn of a memory section. */ pfn = end_pfn - 1; for (; pfn >= start_pfn; pfn -= PAGES_PER_SECTION) { ms = __pfn_to_section(pfn); if (unlikely(!valid_section(ms))) continue; if (unlikely(pfn_to_nid(pfn) != nid)) continue; if (zone && zone != page_zone(pfn_to_page(pfn))) continue; return pfn; } return 0; } static void shrink_zone_span(struct zone *zone, unsigned long start_pfn, unsigned long end_pfn) { unsigned long zone_start_pfn = zone->zone_start_pfn; unsigned long z = zone_end_pfn(zone); /* zone_end_pfn namespace clash */ unsigned long zone_end_pfn = z; unsigned long pfn; struct mem_section *ms; int nid = zone_to_nid(zone); zone_span_writelock(zone); if (zone_start_pfn == start_pfn) { /* * If the section is smallest section in the zone, it need * shrink zone->zone_start_pfn and zone->zone_spanned_pages. * In this case, we find second smallest valid mem_section * for shrinking zone. */ pfn = find_smallest_section_pfn(nid, zone, end_pfn, zone_end_pfn); if (pfn) { zone->zone_start_pfn = pfn; zone->spanned_pages = zone_end_pfn - pfn; } } else if (zone_end_pfn == end_pfn) { /* * If the section is biggest section in the zone, it need * shrink zone->spanned_pages. * In this case, we find second biggest valid mem_section for * shrinking zone. */ pfn = find_biggest_section_pfn(nid, zone, zone_start_pfn, start_pfn); if (pfn) zone->spanned_pages = pfn - zone_start_pfn + 1; } /* * The section is not biggest or smallest mem_section in the zone, it * only creates a hole in the zone. So in this case, we need not * change the zone. But perhaps, the zone has only hole data. Thus * it check the zone has only hole or not. */ pfn = zone_start_pfn; for (; pfn < zone_end_pfn; pfn += PAGES_PER_SECTION) { ms = __pfn_to_section(pfn); if (unlikely(!valid_section(ms))) continue; if (page_zone(pfn_to_page(pfn)) != zone) continue; /* If the section is current section, it continues the loop */ if (start_pfn == pfn) continue; /* If we find valid section, we have nothing to do */ zone_span_writeunlock(zone); return; } /* The zone has no valid section */ zone->zone_start_pfn = 0; zone->spanned_pages = 0; zone_span_writeunlock(zone); } static void shrink_pgdat_span(struct pglist_data *pgdat, unsigned long start_pfn, unsigned long end_pfn) { unsigned long pgdat_start_pfn = pgdat->node_start_pfn; unsigned long p = pgdat_end_pfn(pgdat); /* pgdat_end_pfn namespace clash */ unsigned long pgdat_end_pfn = p; unsigned long pfn; struct mem_section *ms; int nid = pgdat->node_id; if (pgdat_start_pfn == start_pfn) { /* * If the section is smallest section in the pgdat, it need * shrink pgdat->node_start_pfn and pgdat->node_spanned_pages. * In this case, we find second smallest valid mem_section * for shrinking zone. */ pfn = find_smallest_section_pfn(nid, NULL, end_pfn, pgdat_end_pfn); if (pfn) { pgdat->node_start_pfn = pfn; pgdat->node_spanned_pages = pgdat_end_pfn - pfn; } } else if (pgdat_end_pfn == end_pfn) { /* * If the section is biggest section in the pgdat, it need * shrink pgdat->node_spanned_pages. * In this case, we find second biggest valid mem_section for * shrinking zone. */ pfn = find_biggest_section_pfn(nid, NULL, pgdat_start_pfn, start_pfn); if (pfn) pgdat->node_spanned_pages = pfn - pgdat_start_pfn + 1; } /* * If the section is not biggest or smallest mem_section in the pgdat, * it only creates a hole in the pgdat. So in this case, we need not * change the pgdat. * But perhaps, the pgdat has only hole data. Thus it check the pgdat * has only hole or not. */ pfn = pgdat_start_pfn; for (; pfn < pgdat_end_pfn; pfn += PAGES_PER_SECTION) { ms = __pfn_to_section(pfn); if (unlikely(!valid_section(ms))) continue; if (pfn_to_nid(pfn) != nid) continue; /* If the section is current section, it continues the loop */ if (start_pfn == pfn) continue; /* If we find valid section, we have nothing to do */ return; } /* The pgdat has no valid section */ pgdat->node_start_pfn = 0; pgdat->node_spanned_pages = 0; } static void __remove_zone(struct zone *zone, unsigned long start_pfn) { struct pglist_data *pgdat = zone->zone_pgdat; int nr_pages = PAGES_PER_SECTION; int zone_type; unsigned long flags; zone_type = zone - pgdat->node_zones; pgdat_resize_lock(zone->zone_pgdat, &flags); shrink_zone_span(zone, start_pfn, start_pfn + nr_pages); shrink_pgdat_span(pgdat, start_pfn, start_pfn + nr_pages); pgdat_resize_unlock(zone->zone_pgdat, &flags); } static int __remove_section(struct zone *zone, struct mem_section *ms) { unsigned long start_pfn; int scn_nr; int ret = -EINVAL; if (!valid_section(ms)) return ret; ret = unregister_memory_section(ms); if (ret) return ret; scn_nr = __section_nr(ms); start_pfn = section_nr_to_pfn(scn_nr); __remove_zone(zone, start_pfn); sparse_remove_one_section(zone, ms); return 0; } /** * __remove_pages() - remove sections of pages from a zone * @zone: zone from which pages need to be removed * @phys_start_pfn: starting pageframe (must be aligned to start of a section) * @nr_pages: number of pages to remove (must be multiple of section size) * * Generic helper function to remove section mappings and sysfs entries * for the section of the memory we are removing. Caller needs to make * sure that pages are marked reserved and zones are adjust properly by * calling offline_pages(). */ int __remove_pages(struct zone *zone, unsigned long phys_start_pfn, unsigned long nr_pages) { unsigned long i; int sections_to_remove; resource_size_t start, size; int ret = 0; /* * We can only remove entire sections */ BUG_ON(phys_start_pfn & ~PAGE_SECTION_MASK); BUG_ON(nr_pages % PAGES_PER_SECTION); start = phys_start_pfn << PAGE_SHIFT; size = nr_pages * PAGE_SIZE; ret = release_mem_region_adjustable(&iomem_resource, start, size); if (ret) { resource_size_t endres = start + size - 1; pr_warn("Unable to release resource <%pa-%pa> (%d)\n", &start, &endres, ret); } sections_to_remove = nr_pages / PAGES_PER_SECTION; for (i = 0; i < sections_to_remove; i++) { unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION; ret = __remove_section(zone, __pfn_to_section(pfn)); if (ret) break; } return ret; } EXPORT_SYMBOL_GPL(__remove_pages); #endif /* CONFIG_MEMORY_HOTREMOVE */ int set_online_page_callback(online_page_callback_t callback) { int rc = -EINVAL; get_online_mems(); mutex_lock(&online_page_callback_lock); if (online_page_callback == generic_online_page) { online_page_callback = callback; rc = 0; } mutex_unlock(&online_page_callback_lock); put_online_mems(); return rc; } EXPORT_SYMBOL_GPL(set_online_page_callback); int restore_online_page_callback(online_page_callback_t callback) { int rc = -EINVAL; get_online_mems(); mutex_lock(&online_page_callback_lock); if (online_page_callback == callback) { online_page_callback = generic_online_page; rc = 0; } mutex_unlock(&online_page_callback_lock); put_online_mems(); return rc; } EXPORT_SYMBOL_GPL(restore_online_page_callback); void __online_page_set_limits(struct page *page) { } EXPORT_SYMBOL_GPL(__online_page_set_limits); void __online_page_increment_counters(struct page *page) { adjust_managed_page_count(page, 1); } EXPORT_SYMBOL_GPL(__online_page_increment_counters); void __online_page_free(struct page *page) { __free_reserved_page(page); } EXPORT_SYMBOL_GPL(__online_page_free); static void generic_online_page(struct page *page) { __online_page_set_limits(page); __online_page_increment_counters(page); __online_page_free(page); } static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages, void *arg) { unsigned long i; unsigned long onlined_pages = *(unsigned long *)arg; struct page *page; if (PageReserved(pfn_to_page(start_pfn))) for (i = 0; i < nr_pages; i++) { page = pfn_to_page(start_pfn + i); (*online_page_callback)(page); onlined_pages++; } *(unsigned long *)arg = onlined_pages; return 0; } #ifdef CONFIG_MOVABLE_NODE /* * When CONFIG_MOVABLE_NODE, we permit onlining of a node which doesn't have * normal memory. */ static bool can_online_high_movable(struct zone *zone) { return true; } #else /* CONFIG_MOVABLE_NODE */ /* ensure every online node has NORMAL memory */ static bool can_online_high_movable(struct zone *zone) { return node_state(zone_to_nid(zone), N_NORMAL_MEMORY); } #endif /* CONFIG_MOVABLE_NODE */ /* check which state of node_states will be changed when online memory */ static void node_states_check_changes_online(unsigned long nr_pages, struct zone *zone, struct memory_notify *arg) { int nid = zone_to_nid(zone); enum zone_type zone_last = ZONE_NORMAL; /* * If we have HIGHMEM or movable node, node_states[N_NORMAL_MEMORY] * contains nodes which have zones of 0...ZONE_NORMAL, * set zone_last to ZONE_NORMAL. * * If we don't have HIGHMEM nor movable node, * node_states[N_NORMAL_MEMORY] contains nodes which have zones of * 0...ZONE_MOVABLE, set zone_last to ZONE_MOVABLE. */ if (N_MEMORY == N_NORMAL_MEMORY) zone_last = ZONE_MOVABLE; /* * if the memory to be online is in a zone of 0...zone_last, and * the zones of 0...zone_last don't have memory before online, we will * need to set the node to node_states[N_NORMAL_MEMORY] after * the memory is online. */ if (zone_idx(zone) <= zone_last && !node_state(nid, N_NORMAL_MEMORY)) arg->status_change_nid_normal = nid; else arg->status_change_nid_normal = -1; #ifdef CONFIG_HIGHMEM /* * If we have movable node, node_states[N_HIGH_MEMORY] * contains nodes which have zones of 0...ZONE_HIGHMEM, * set zone_last to ZONE_HIGHMEM. * * If we don't have movable node, node_states[N_NORMAL_MEMORY] * contains nodes which have zones of 0...ZONE_MOVABLE, * set zone_last to ZONE_MOVABLE. */ zone_last = ZONE_HIGHMEM; if (N_MEMORY == N_HIGH_MEMORY) zone_last = ZONE_MOVABLE; if (zone_idx(zone) <= zone_last && !node_state(nid, N_HIGH_MEMORY)) arg->status_change_nid_high = nid; else arg->status_change_nid_high = -1; #else arg->status_change_nid_high = arg->status_change_nid_normal; #endif /* * if the node don't have memory befor online, we will need to * set the node to node_states[N_MEMORY] after the memory * is online. */ if (!node_state(nid, N_MEMORY)) arg->status_change_nid = nid; else arg->status_change_nid = -1; } static void node_states_set_node(int node, struct memory_notify *arg) { if (arg->status_change_nid_normal >= 0) node_set_state(node, N_NORMAL_MEMORY); if (arg->status_change_nid_high >= 0) node_set_state(node, N_HIGH_MEMORY); node_set_state(node, N_MEMORY); } int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_type) { unsigned long flags; unsigned long onlined_pages = 0; struct zone *zone; int need_zonelists_rebuild = 0; int nid; int ret; struct memory_notify arg; mem_hotplug_begin(); /* * This doesn't need a lock to do pfn_to_page(). * The section can't be removed here because of the * memory_block->state_mutex. */ zone = page_zone(pfn_to_page(pfn)); ret = -EINVAL; if ((zone_idx(zone) > ZONE_NORMAL || online_type == MMOP_ONLINE_MOVABLE) && !can_online_high_movable(zone)) goto out; if (online_type == MMOP_ONLINE_KERNEL && zone_idx(zone) == ZONE_MOVABLE) { if (move_pfn_range_left(zone - 1, zone, pfn, pfn + nr_pages)) goto out; } if (online_type == MMOP_ONLINE_MOVABLE && zone_idx(zone) == ZONE_MOVABLE - 1) { if (move_pfn_range_right(zone, zone + 1, pfn, pfn + nr_pages)) goto out; } /* Previous code may changed the zone of the pfn range */ zone = page_zone(pfn_to_page(pfn)); arg.start_pfn = pfn; arg.nr_pages = nr_pages; node_states_check_changes_online(nr_pages, zone, &arg); nid = pfn_to_nid(pfn); ret = memory_notify(MEM_GOING_ONLINE, &arg); ret = notifier_to_errno(ret); if (ret) { memory_notify(MEM_CANCEL_ONLINE, &arg); goto out; } /* * If this zone is not populated, then it is not in zonelist. * This means the page allocator ignores this zone. * So, zonelist must be updated after online. */ mutex_lock(&zonelists_mutex); if (!populated_zone(zone)) { need_zonelists_rebuild = 1; build_all_zonelists(NULL, zone); } ret = walk_system_ram_range(pfn, nr_pages, &onlined_pages, online_pages_range); if (ret) { if (need_zonelists_rebuild) zone_pcp_reset(zone); mutex_unlock(&zonelists_mutex); printk(KERN_DEBUG "online_pages [mem %#010llx-%#010llx] failed\n", (unsigned long long) pfn << PAGE_SHIFT, (((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1); memory_notify(MEM_CANCEL_ONLINE, &arg); goto out; } zone->present_pages += onlined_pages; pgdat_resize_lock(zone->zone_pgdat, &flags); zone->zone_pgdat->node_present_pages += onlined_pages; pgdat_resize_unlock(zone->zone_pgdat, &flags); if (onlined_pages) { node_states_set_node(zone_to_nid(zone), &arg); if (need_zonelists_rebuild) build_all_zonelists(NULL, NULL); else zone_pcp_update(zone); } mutex_unlock(&zonelists_mutex); init_per_zone_wmark_min(); if (onlined_pages) kswapd_run(zone_to_nid(zone)); vm_total_pages = nr_free_pagecache_pages(); writeback_set_ratelimit(); if (onlined_pages) memory_notify(MEM_ONLINE, &arg); out: mem_hotplug_done(); return ret; } #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ static void reset_node_present_pages(pg_data_t *pgdat) { struct zone *z; for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) z->present_pages = 0; pgdat->node_present_pages = 0; } /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start) { struct pglist_data *pgdat; unsigned long zones_size[MAX_NR_ZONES] = {0}; unsigned long zholes_size[MAX_NR_ZONES] = {0}; unsigned long start_pfn = PFN_DOWN(start); pgdat = NODE_DATA(nid); if (!pgdat) { pgdat = arch_alloc_nodedata(nid); if (!pgdat) return NULL; arch_refresh_nodedata(nid, pgdat); } /* we can use NODE_DATA(nid) from here */ /* init node's zones as empty zones, we don't have any present pages.*/ free_area_init_node(nid, zones_size, start_pfn, zholes_size); /* * The node we allocated has no zone fallback lists. For avoiding * to access not-initialized zonelist, build here. */ mutex_lock(&zonelists_mutex); build_all_zonelists(pgdat, NULL); mutex_unlock(&zonelists_mutex); /* * zone->managed_pages is set to an approximate value in * free_area_init_core(), which will cause * /sys/device/system/node/nodeX/meminfo has wrong data. * So reset it to 0 before any memory is onlined. */ reset_node_managed_pages(pgdat); /* * When memory is hot-added, all the memory is in offline state. So * clear all zones' present_pages because they will be updated in * online_pages() and offline_pages(). */ reset_node_present_pages(pgdat); return pgdat; } static void rollback_node_hotadd(int nid, pg_data_t *pgdat) { arch_refresh_nodedata(nid, NULL); arch_free_nodedata(pgdat); return; } /** * try_online_node - online a node if offlined * * called by cpu_up() to online a node without onlined memory. */ int try_online_node(int nid) { pg_data_t *pgdat; int ret; if (node_online(nid)) return 0; mem_hotplug_begin(); pgdat = hotadd_new_pgdat(nid, 0); if (!pgdat) { pr_err("Cannot online node %d due to NULL pgdat\n", nid); ret = -ENOMEM; goto out; } node_set_online(nid); ret = register_one_node(nid); BUG_ON(ret); if (pgdat->node_zonelists->_zonerefs->zone == NULL) { mutex_lock(&zonelists_mutex); build_all_zonelists(NULL, NULL); mutex_unlock(&zonelists_mutex); } out: mem_hotplug_done(); return ret; } static int check_hotplug_memory_range(u64 start, u64 size) { u64 start_pfn = PFN_DOWN(start); u64 nr_pages = size >> PAGE_SHIFT; /* Memory range must be aligned with section */ if ((start_pfn & ~PAGE_SECTION_MASK) || (nr_pages % PAGES_PER_SECTION) || (!nr_pages)) { pr_err("Section-unaligned hotplug range: start 0x%llx, size 0x%llx\n", (unsigned long long)start, (unsigned long long)size); return -EINVAL; } return 0; } /* * If movable zone has already been setup, newly added memory should be check. * If its address is higher than movable zone, it should be added as movable. * Without this check, movable zone may overlap with other zone. */ static int should_add_memory_movable(int nid, u64 start, u64 size) { unsigned long start_pfn = start >> PAGE_SHIFT; pg_data_t *pgdat = NODE_DATA(nid); struct zone *movable_zone = pgdat->node_zones + ZONE_MOVABLE; if (zone_is_empty(movable_zone)) return 0; if (movable_zone->zone_start_pfn <= start_pfn) return 1; return 0; } int zone_for_memory(int nid, u64 start, u64 size, int zone_default) { if (should_add_memory_movable(nid, start, size)) return ZONE_MOVABLE; return zone_default; } /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */ int __ref add_memory(int nid, u64 start, u64 size) { pg_data_t *pgdat = NULL; bool new_pgdat; bool new_node; struct resource *res; int ret; ret = check_hotplug_memory_range(start, size); if (ret) return ret; res = register_memory_resource(start, size); ret = -EEXIST; if (!res) return ret; { /* Stupid hack to suppress address-never-null warning */ void *p = NODE_DATA(nid); new_pgdat = !p; } mem_hotplug_begin(); new_node = !node_online(nid); if (new_node) { pgdat = hotadd_new_pgdat(nid, start); ret = -ENOMEM; if (!pgdat) goto error; } /* call arch's memory hotadd */ ret = arch_add_memory(nid, start, size); if (ret < 0) goto error; /* we online node here. we can't roll back from here. */ node_set_online(nid); if (new_node) { ret = register_one_node(nid); /* * If sysfs file of new node can't create, cpu on the node * can't be hot-added. There is no rollback way now. * So, check by BUG_ON() to catch it reluctantly.. */ BUG_ON(ret); } /* create new memmap entry */ firmware_map_add_hotplug(start, start + size, "System RAM"); goto out; error: /* rollback pgdat allocation and others */ if (new_pgdat) rollback_node_hotadd(nid, pgdat); release_memory_resource(res); out: mem_hotplug_done(); return ret; } EXPORT_SYMBOL_GPL(add_memory); #ifdef CONFIG_MEMORY_HOTREMOVE /* * A free page on the buddy free lists (not the per-cpu lists) has PageBuddy * set and the size of the free page is given by page_order(). Using this, * the function determines if the pageblock contains only free pages. * Due to buddy contraints, a free page at least the size of a pageblock will * be located at the start of the pageblock */ static inline int pageblock_free(struct page *page) { return PageBuddy(page) && page_order(page) >= pageblock_order; } /* Return the start of the next active pageblock after a given page */ static struct page *next_active_pageblock(struct page *page) { /* Ensure the starting page is pageblock-aligned */ BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1)); /* If the entire pageblock is free, move to the end of free page */ if (pageblock_free(page)) { int order; /* be careful. we don't have locks, page_order can be changed.*/ order = page_order(page); if ((order < MAX_ORDER) && (order >= pageblock_order)) return page + (1 << order); } return page + pageblock_nr_pages; } /* Checks if this range of memory is likely to be hot-removable. */ int is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages) { struct page *page = pfn_to_page(start_pfn); struct page *end_page = page + nr_pages; /* Check the starting page of each pageblock within the range */ for (; page < end_page; page = next_active_pageblock(page)) { if (!is_pageblock_removable_nolock(page)) return 0; cond_resched(); } /* All pageblocks in the memory block are likely to be hot-removable */ return 1; } /* * Confirm all pages in a range [start, end) is belongs to the same zone. */ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn) { unsigned long pfn; struct zone *zone = NULL; struct page *page; int i; for (pfn = start_pfn; pfn < end_pfn; pfn += MAX_ORDER_NR_PAGES) { i = 0; /* This is just a CONFIG_HOLES_IN_ZONE check.*/ while ((i < MAX_ORDER_NR_PAGES) && !pfn_valid_within(pfn + i)) i++; if (i == MAX_ORDER_NR_PAGES) continue; page = pfn_to_page(pfn + i); if (zone && page_zone(page) != zone) return 0; zone = page_zone(page); } return 1; } /* * Scan pfn range [start,end) to find movable/migratable pages (LRU pages * and hugepages). We scan pfn because it's much easier than scanning over * linked list. This function returns the pfn of the first found movable * page if it's found, otherwise 0. */ static unsigned long scan_movable_pages(unsigned long start, unsigned long end) { unsigned long pfn; struct page *page; for (pfn = start; pfn < end; pfn++) { if (pfn_valid(pfn)) { page = pfn_to_page(pfn); if (PageLRU(page)) return pfn; if (PageHuge(page)) { if (is_hugepage_active(page)) return pfn; else pfn = round_up(pfn + 1, 1 << compound_order(page)) - 1; } } } return 0; } #define NR_OFFLINE_AT_ONCE_PAGES (256) static int do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) { unsigned long pfn; struct page *page; int move_pages = NR_OFFLINE_AT_ONCE_PAGES; int not_managed = 0; int ret = 0; LIST_HEAD(source); for (pfn = start_pfn; pfn < end_pfn && move_pages > 0; pfn++) { if (!pfn_valid(pfn)) continue; page = pfn_to_page(pfn); if (PageHuge(page)) { struct page *head = compound_head(page); pfn = page_to_pfn(head) + (1<<compound_order(head)) - 1; if (compound_order(head) > PFN_SECTION_SHIFT) { ret = -EBUSY; break; } if (isolate_huge_page(page, &source)) move_pages -= 1 << compound_order(head); continue; } if (!get_page_unless_zero(page)) continue; /* * We can skip free pages. And we can only deal with pages on * LRU. */ ret = isolate_lru_page(page); if (!ret) { /* Success */ put_page(page); list_add_tail(&page->lru, &source); move_pages--; inc_zone_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page)); } else { #ifdef CONFIG_DEBUG_VM printk(KERN_ALERT "removing pfn %lx from LRU failed\n", pfn); dump_page(page, "failed to remove from LRU"); #endif put_page(page); /* Because we don't have big zone->lock. we should check this again here. */ if (page_count(page)) { not_managed++; ret = -EBUSY; break; } } } if (!list_empty(&source)) { if (not_managed) { putback_movable_pages(&source); goto out; } /* * alloc_migrate_target should be improooooved!! * migrate_pages returns # of failed pages. */ ret = migrate_pages(&source, alloc_migrate_target, NULL, 0, MIGRATE_SYNC, MR_MEMORY_HOTPLUG); if (ret) putback_movable_pages(&source); } out: return ret; } /* * remove from free_area[] and mark all as Reserved. */ static int offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages, void *data) { __offline_isolated_pages(start, start + nr_pages); return 0; } static void offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) { walk_system_ram_range(start_pfn, end_pfn - start_pfn, NULL, offline_isolated_pages_cb); } /* * Check all pages in range, recoreded as memory resource, are isolated. */ static int check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages, void *data) { int ret; long offlined = *(long *)data; ret = test_pages_isolated(start_pfn, start_pfn + nr_pages, true); offlined = nr_pages; if (!ret) *(long *)data += offlined; return ret; } static long check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn) { long offlined = 0; int ret; ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, &offlined, check_pages_isolated_cb); if (ret < 0) offlined = (long)ret; return offlined; } #ifdef CONFIG_MOVABLE_NODE /* * When CONFIG_MOVABLE_NODE, we permit offlining of a node which doesn't have * normal memory. */ static bool can_offline_normal(struct zone *zone, unsigned long nr_pages) { return true; } #else /* CONFIG_MOVABLE_NODE */ /* ensure the node has NORMAL memory if it is still online */ static bool can_offline_normal(struct zone *zone, unsigned long nr_pages) { struct pglist_data *pgdat = zone->zone_pgdat; unsigned long present_pages = 0; enum zone_type zt; for (zt = 0; zt <= ZONE_NORMAL; zt++) present_pages += pgdat->node_zones[zt].present_pages; if (present_pages > nr_pages) return true; present_pages = 0; for (; zt <= ZONE_MOVABLE; zt++) present_pages += pgdat->node_zones[zt].present_pages; /* * we can't offline the last normal memory until all * higher memory is offlined. */ return present_pages == 0; } #endif /* CONFIG_MOVABLE_NODE */ static int __init cmdline_parse_movable_node(char *p) { #ifdef CONFIG_MOVABLE_NODE /* * Memory used by the kernel cannot be hot-removed because Linux * cannot migrate the kernel pages. When memory hotplug is * enabled, we should prevent memblock from allocating memory * for the kernel. * * ACPI SRAT records all hotpluggable memory ranges. But before * SRAT is parsed, we don't know about it. * * The kernel image is loaded into memory at very early time. We * cannot prevent this anyway. So on NUMA system, we set any * node the kernel resides in as un-hotpluggable. * * Since on modern servers, one node could have double-digit * gigabytes memory, we can assume the memory around the kernel * image is also un-hotpluggable. So before SRAT is parsed, just * allocate memory near the kernel image to try the best to keep * the kernel away from hotpluggable memory. */ memblock_set_bottom_up(true); movable_node_enabled = true; #else pr_warn("movable_node option not supported\n"); #endif return 0; } early_param("movable_node", cmdline_parse_movable_node); /* check which state of node_states will be changed when offline memory */ static void node_states_check_changes_offline(unsigned long nr_pages, struct zone *zone, struct memory_notify *arg) { struct pglist_data *pgdat = zone->zone_pgdat; unsigned long present_pages = 0; enum zone_type zt, zone_last = ZONE_NORMAL; /* * If we have HIGHMEM or movable node, node_states[N_NORMAL_MEMORY] * contains nodes which have zones of 0...ZONE_NORMAL, * set zone_last to ZONE_NORMAL. * * If we don't have HIGHMEM nor movable node, * node_states[N_NORMAL_MEMORY] contains nodes which have zones of * 0...ZONE_MOVABLE, set zone_last to ZONE_MOVABLE. */ if (N_MEMORY == N_NORMAL_MEMORY) zone_last = ZONE_MOVABLE; /* * check whether node_states[N_NORMAL_MEMORY] will be changed. * If the memory to be offline is in a zone of 0...zone_last, * and it is the last present memory, 0...zone_last will * become empty after offline , thus we can determind we will * need to clear the node from node_states[N_NORMAL_MEMORY]. */ for (zt = 0; zt <= zone_last; zt++) present_pages += pgdat->node_zones[zt].present_pages; if (zone_idx(zone) <= zone_last && nr_pages >= present_pages) arg->status_change_nid_normal = zone_to_nid(zone); else arg->status_change_nid_normal = -1; #ifdef CONFIG_HIGHMEM /* * If we have movable node, node_states[N_HIGH_MEMORY] * contains nodes which have zones of 0...ZONE_HIGHMEM, * set zone_last to ZONE_HIGHMEM. * * If we don't have movable node, node_states[N_NORMAL_MEMORY] * contains nodes which have zones of 0...ZONE_MOVABLE, * set zone_last to ZONE_MOVABLE. */ zone_last = ZONE_HIGHMEM; if (N_MEMORY == N_HIGH_MEMORY) zone_last = ZONE_MOVABLE; for (; zt <= zone_last; zt++) present_pages += pgdat->node_zones[zt].present_pages; if (zone_idx(zone) <= zone_last && nr_pages >= present_pages) arg->status_change_nid_high = zone_to_nid(zone); else arg->status_change_nid_high = -1; #else arg->status_change_nid_high = arg->status_change_nid_normal; #endif /* * node_states[N_HIGH_MEMORY] contains nodes which have 0...ZONE_MOVABLE */ zone_last = ZONE_MOVABLE; /* * check whether node_states[N_HIGH_MEMORY] will be changed * If we try to offline the last present @nr_pages from the node, * we can determind we will need to clear the node from * node_states[N_HIGH_MEMORY]. */ for (; zt <= zone_last; zt++) present_pages += pgdat->node_zones[zt].present_pages; if (nr_pages >= present_pages) arg->status_change_nid = zone_to_nid(zone); else arg->status_change_nid = -1; } static void node_states_clear_node(int node, struct memory_notify *arg) { if (arg->status_change_nid_normal >= 0) node_clear_state(node, N_NORMAL_MEMORY); if ((N_MEMORY != N_NORMAL_MEMORY) && (arg->status_change_nid_high >= 0)) node_clear_state(node, N_HIGH_MEMORY); if ((N_MEMORY != N_HIGH_MEMORY) && (arg->status_change_nid >= 0)) node_clear_state(node, N_MEMORY); } static int __ref __offline_pages(unsigned long start_pfn, unsigned long end_pfn, unsigned long timeout) { unsigned long pfn, nr_pages, expire; long offlined_pages; int ret, drain, retry_max, node; unsigned long flags; struct zone *zone; struct memory_notify arg; /* at least, alignment against pageblock is necessary */ if (!IS_ALIGNED(start_pfn, pageblock_nr_pages)) return -EINVAL; if (!IS_ALIGNED(end_pfn, pageblock_nr_pages)) return -EINVAL; /* This makes hotplug much easier...and readable. we assume this for now. .*/ if (!test_pages_in_a_zone(start_pfn, end_pfn)) return -EINVAL; mem_hotplug_begin(); zone = page_zone(pfn_to_page(start_pfn)); node = zone_to_nid(zone); nr_pages = end_pfn - start_pfn; ret = -EINVAL; if (zone_idx(zone) <= ZONE_NORMAL && !can_offline_normal(zone, nr_pages)) goto out; /* set above range as isolated */ ret = start_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE, true); if (ret) goto out; arg.start_pfn = start_pfn; arg.nr_pages = nr_pages; node_states_check_changes_offline(nr_pages, zone, &arg); ret = memory_notify(MEM_GOING_OFFLINE, &arg); ret = notifier_to_errno(ret); if (ret) goto failed_removal; pfn = start_pfn; expire = jiffies + timeout; drain = 0; retry_max = 5; repeat: /* start memory hot removal */ ret = -EAGAIN; if (time_after(jiffies, expire)) goto failed_removal; ret = -EINTR; if (signal_pending(current)) goto failed_removal; ret = 0; if (drain) { lru_add_drain_all(); cond_resched(); drain_all_pages(zone); } pfn = scan_movable_pages(start_pfn, end_pfn); if (pfn) { /* We have movable pages */ ret = do_migrate_range(pfn, end_pfn); if (!ret) { drain = 1; goto repeat; } else { if (ret < 0) if (--retry_max == 0) goto failed_removal; yield(); drain = 1; goto repeat; } } /* drain all zone's lru pagevec, this is asynchronous... */ lru_add_drain_all(); yield(); /* drain pcp pages, this is synchronous. */ drain_all_pages(zone); /* * dissolve free hugepages in the memory block before doing offlining * actually in order to make hugetlbfs's object counting consistent. */ dissolve_free_huge_pages(start_pfn, end_pfn); /* check again */ offlined_pages = check_pages_isolated(start_pfn, end_pfn); if (offlined_pages < 0) { ret = -EBUSY; goto failed_removal; } printk(KERN_INFO "Offlined Pages %ld\n", offlined_pages); /* Ok, all of our target is isolated. We cannot do rollback at this point. */ offline_isolated_pages(start_pfn, end_pfn); /* reset pagetype flags and makes migrate type to be MOVABLE */ undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE); /* removal success */ adjust_managed_page_count(pfn_to_page(start_pfn), -offlined_pages); zone->present_pages -= offlined_pages; pgdat_resize_lock(zone->zone_pgdat, &flags); zone->zone_pgdat->node_present_pages -= offlined_pages; pgdat_resize_unlock(zone->zone_pgdat, &flags); init_per_zone_wmark_min(); if (!populated_zone(zone)) { zone_pcp_reset(zone); mutex_lock(&zonelists_mutex); build_all_zonelists(NULL, NULL); mutex_unlock(&zonelists_mutex); } else zone_pcp_update(zone); node_states_clear_node(node, &arg); if (arg.status_change_nid >= 0) kswapd_stop(node); vm_total_pages = nr_free_pagecache_pages(); writeback_set_ratelimit(); memory_notify(MEM_OFFLINE, &arg); mem_hotplug_done(); return 0; failed_removal: printk(KERN_INFO "memory offlining [mem %#010llx-%#010llx] failed\n", (unsigned long long) start_pfn << PAGE_SHIFT, ((unsigned long long) end_pfn << PAGE_SHIFT) - 1); memory_notify(MEM_CANCEL_OFFLINE, &arg); /* pushback to free area */ undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE); out: mem_hotplug_done(); return ret; } int offline_pages(unsigned long start_pfn, unsigned long nr_pages) { return __offline_pages(start_pfn, start_pfn + nr_pages, 120 * HZ); } #endif /* CONFIG_MEMORY_HOTREMOVE */ /** * walk_memory_range - walks through all mem sections in [start_pfn, end_pfn) * @start_pfn: start pfn of the memory range * @end_pfn: end pfn of the memory range * @arg: argument passed to func * @func: callback for each memory section walked * * This function walks through all present mem sections in range * [start_pfn, end_pfn) and call func on each mem section. * * Returns the return value of func. */ int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn, void *arg, int (*func)(struct memory_block *, void *)) { struct memory_block *mem = NULL; struct mem_section *section; unsigned long pfn, section_nr; int ret; for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { section_nr = pfn_to_section_nr(pfn); if (!present_section_nr(section_nr)) continue; section = __nr_to_section(section_nr); /* same memblock? */ if (mem) if ((section_nr >= mem->start_section_nr) && (section_nr <= mem->end_section_nr)) continue; mem = find_memory_block_hinted(section, mem); if (!mem) continue; ret = func(mem, arg); if (ret) { kobject_put(&mem->dev.kobj); return ret; } } if (mem) kobject_put(&mem->dev.kobj); return 0; } #ifdef CONFIG_MEMORY_HOTREMOVE static int check_memblock_offlined_cb(struct memory_block *mem, void *arg) { int ret = !is_memblock_offlined(mem); if (unlikely(ret)) { phys_addr_t beginpa, endpa; beginpa = PFN_PHYS(section_nr_to_pfn(mem->start_section_nr)); endpa = PFN_PHYS(section_nr_to_pfn(mem->end_section_nr + 1))-1; pr_warn("removing memory fails, because memory " "[%pa-%pa] is onlined\n", &beginpa, &endpa); } return ret; } static int check_cpu_on_node(pg_data_t *pgdat) { int cpu; for_each_present_cpu(cpu) { if (cpu_to_node(cpu) == pgdat->node_id) /* * the cpu on this node isn't removed, and we can't * offline this node. */ return -EBUSY; } return 0; } static void unmap_cpu_on_node(pg_data_t *pgdat) { #ifdef CONFIG_ACPI_NUMA int cpu; for_each_possible_cpu(cpu) if (cpu_to_node(cpu) == pgdat->node_id) numa_clear_node(cpu); #endif } static int check_and_unmap_cpu_on_node(pg_data_t *pgdat) { int ret; ret = check_cpu_on_node(pgdat); if (ret) return ret; /* * the node will be offlined when we come here, so we can clear * the cpu_to_node() now. */ unmap_cpu_on_node(pgdat); return 0; } /** * try_offline_node * * Offline a node if all memory sections and cpus of the node are removed. * * NOTE: The caller must call lock_device_hotplug() to serialize hotplug * and online/offline operations before this call. */ void try_offline_node(int nid) { pg_data_t *pgdat = NODE_DATA(nid); unsigned long start_pfn = pgdat->node_start_pfn; unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages; unsigned long pfn; int i; for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { unsigned long section_nr = pfn_to_section_nr(pfn); if (!present_section_nr(section_nr)) continue; if (pfn_to_nid(pfn) != nid) continue; /* * some memory sections of this node are not removed, and we * can't offline node now. */ return; } if (check_and_unmap_cpu_on_node(pgdat)) return; /* * all memory/cpu of this node are removed, we can offline this * node now. */ node_set_offline(nid); unregister_one_node(nid); /* free waittable in each zone */ for (i = 0; i < MAX_NR_ZONES; i++) { struct zone *zone = pgdat->node_zones + i; /* * wait_table may be allocated from boot memory, * here only free if it's allocated by vmalloc. */ if (is_vmalloc_addr(zone->wait_table)) vfree(zone->wait_table); } /* * Since there is no way to guarentee the address of pgdat/zone is not * on stack of any kernel threads or used by other kernel objects * without reference counting or other symchronizing method, do not * reset node_data and free pgdat here. Just reset it to 0 and reuse * the memory when the node is online again. */ memset(pgdat, 0, sizeof(*pgdat)); } EXPORT_SYMBOL(try_offline_node); /** * remove_memory * * NOTE: The caller must call lock_device_hotplug() to serialize hotplug * and online/offline operations before this call, as required by * try_offline_node(). */ void __ref remove_memory(int nid, u64 start, u64 size) { int ret; BUG_ON(check_hotplug_memory_range(start, size)); mem_hotplug_begin(); /* * All memory blocks must be offlined before removing memory. Check * whether all memory blocks in question are offline and trigger a BUG() * if this is not the case. */ ret = walk_memory_range(PFN_DOWN(start), PFN_UP(start + size - 1), NULL, check_memblock_offlined_cb); if (ret) BUG(); /* remove memmap entry */ firmware_map_remove(start, start + size, "System RAM"); arch_remove_memory(start, size); try_offline_node(nid); mem_hotplug_done(); } EXPORT_SYMBOL_GPL(remove_memory); #endif /* CONFIG_MEMORY_HOTREMOVE */
gpl-2.0
weritos666/BOOST_KERNEL
arch/arm/mach-msm/clock-pcom.c
377
5137
/* * Copyright (C) 2007 Google, Inc. * Copyright (c) 2007-2011, Code Aurora Forum. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/kernel.h> #include <linux/err.h> #include <mach/clk.h> #include <mach/socinfo.h> #include <mach/proc_comm.h> #include "clock.h" #include "clock-pcom.h" /* * glue for the proc_comm interface */ static int pc_clk_enable(struct clk *clk) { int rc; int id = to_pcom_clk(clk)->id; /* Ignore clocks that are always on */ if (id == P_EBI1_CLK || id == P_EBI1_FIXED_CLK) return 0; rc = msm_proc_comm(PCOM_CLKCTL_RPC_ENABLE, &id, NULL); if (rc < 0) return rc; else return (int)id < 0 ? -EINVAL : 0; } static void pc_clk_disable(struct clk *clk) { int id = to_pcom_clk(clk)->id; /* Ignore clocks that are always on */ if (id == P_EBI1_CLK || id == P_EBI1_FIXED_CLK) return; msm_proc_comm(PCOM_CLKCTL_RPC_DISABLE, &id, NULL); } int pc_clk_reset(unsigned id, enum clk_reset_action action) { int rc; if (action == CLK_RESET_ASSERT) rc = msm_proc_comm(PCOM_CLKCTL_RPC_RESET_ASSERT, &id, NULL); else rc = msm_proc_comm(PCOM_CLKCTL_RPC_RESET_DEASSERT, &id, NULL); if (rc < 0) return rc; else return (int)id < 0 ? -EINVAL : 0; } static int pc_reset(struct clk *clk, enum clk_reset_action action) { int id = to_pcom_clk(clk)->id; return pc_clk_reset(id, action); } static int _pc_clk_set_rate(struct clk *clk, unsigned long rate) { /* The rate _might_ be rounded off to the nearest KHz value by the * remote function. So a return value of 0 doesn't necessarily mean * that the exact rate was set successfully. */ unsigned r = rate; int id = to_pcom_clk(clk)->id; int rc = msm_proc_comm(PCOM_CLKCTL_RPC_SET_RATE, &id, &r); if (rc < 0) return rc; else return (int)id < 0 ? -EINVAL : 0; } static int _pc_clk_set_min_rate(struct clk *clk, unsigned long rate) { int rc; int id = to_pcom_clk(clk)->id; bool ignore_error = (cpu_is_msm7x27() && id == P_EBI1_CLK && rate >= INT_MAX); unsigned r = rate; rc = msm_proc_comm(PCOM_CLKCTL_RPC_MIN_RATE, &id, &r); if (rc < 0) return rc; else if (ignore_error) return 0; else return (int)id < 0 ? -EINVAL : 0; } static int pc_clk_set_rate(struct clk *clk, unsigned long rate) { if (clk->flags & CLKFLAG_MIN) return _pc_clk_set_min_rate(clk, rate); else return _pc_clk_set_rate(clk, rate); } static int pc_clk_set_max_rate(struct clk *clk, unsigned long rate) { int id = to_pcom_clk(clk)->id; unsigned r = rate; int rc = msm_proc_comm(PCOM_CLKCTL_RPC_MAX_RATE, &id, &r); if (rc < 0) return rc; else return (int)id < 0 ? -EINVAL : 0; } static int pc_clk_set_flags(struct clk *clk, unsigned flags) { int id = to_pcom_clk(clk)->id; int rc = msm_proc_comm(PCOM_CLKCTL_RPC_SET_FLAGS, &id, &flags); if (rc < 0) return rc; else return (int)id < 0 ? -EINVAL : 0; } static int pc_clk_set_ext_config(struct clk *clk, unsigned long config) { int id = to_pcom_clk(clk)->id; unsigned c = config; int rc = msm_proc_comm(PCOM_CLKCTL_RPC_SET_EXT_CONFIG, &id, &c); if (rc < 0) return rc; else return (int)id < 0 ? -EINVAL : 0; } static unsigned long pc_clk_get_rate(struct clk *clk) { int id = to_pcom_clk(clk)->id; if (msm_proc_comm(PCOM_CLKCTL_RPC_RATE, &id, NULL)) return 0; else return id; } static int pc_clk_is_enabled(struct clk *clk) { int id = to_pcom_clk(clk)->id; if (msm_proc_comm(PCOM_CLKCTL_RPC_ENABLED, &id, NULL)) return 0; else return id; } static long pc_clk_round_rate(struct clk *clk, unsigned long rate) { /* Not really supported; pc_clk_set_rate() does rounding on it's own. */ return rate; } static bool pc_clk_is_local(struct clk *clk) { return false; } static enum handoff pc_clk_handoff(struct clk *clk) { /* * Handoff clock state only since querying and caching the rate here * would incur more overhead than it would ever save. */ if (pc_clk_is_enabled(clk)) return HANDOFF_ENABLED_CLK; return HANDOFF_DISABLED_CLK; } struct clk_ops clk_ops_pcom = { .enable = pc_clk_enable, .disable = pc_clk_disable, .reset = pc_reset, .set_rate = pc_clk_set_rate, .set_max_rate = pc_clk_set_max_rate, .set_flags = pc_clk_set_flags, .get_rate = pc_clk_get_rate, .is_enabled = pc_clk_is_enabled, .round_rate = pc_clk_round_rate, .is_local = pc_clk_is_local, .handoff = pc_clk_handoff, }; struct clk_ops clk_ops_pcom_ext_config = { .enable = pc_clk_enable, .disable = pc_clk_disable, .reset = pc_reset, .set_rate = pc_clk_set_ext_config, .set_max_rate = pc_clk_set_max_rate, .set_flags = pc_clk_set_flags, .get_rate = pc_clk_get_rate, .is_enabled = pc_clk_is_enabled, .round_rate = pc_clk_round_rate, .is_local = pc_clk_is_local, .handoff = pc_clk_handoff, };
gpl-2.0
invisiblek/android_kernel_samsung_msm8960
drivers/staging/prima/CORE/MAC/src/pe/lim/limProcessDeauthFrame.c
377
18673
/* * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the * above copyright notice and this permission notice appear in all * copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ /* * Copyright (c) 2012, The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the * above copyright notice and this permission notice appear in all * copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ /* * * Airgo Networks, Inc proprietary. All rights reserved. * This file limProcessDeauthFrame.cc contains the code * for processing Deauthentication Frame. * Author: Chandra Modumudi * Date: 03/24/02 * History:- * Date Modified by Modification Information * -------------------------------------------------------------------- * */ #include "palTypes.h" #include "aniGlobal.h" #include "utilsApi.h" #include "limTypes.h" #include "limUtils.h" #include "limAssocUtils.h" #include "limSecurityUtils.h" #include "limSerDesUtils.h" #include "schApi.h" #include "limSendMessages.h" /** * limProcessDeauthFrame * *FUNCTION: * This function is called by limProcessMessageQueue() upon * Deauthentication frame reception. * *LOGIC: * *ASSUMPTIONS: * *NOTE: * * @param pMac - Pointer to Global MAC structure * @param *pRxPacketInfo - A pointer to Buffer descriptor + associated PDUs * @return None */ void limProcessDeauthFrame(tpAniSirGlobal pMac, tANI_U8 *pRxPacketInfo, tpPESession psessionEntry) { tANI_U8 *pBody; tANI_U16 aid, reasonCode; tpSirMacMgmtHdr pHdr; tLimMlmAssocCnf mlmAssocCnf; tLimMlmDeauthInd mlmDeauthInd; tpDphHashNode pStaDs; tpPESession pRoamSessionEntry=NULL; tANI_U8 roamSessionId; pHdr = WDA_GET_RX_MAC_HEADER(pRxPacketInfo); pBody = WDA_GET_RX_MPDU_DATA(pRxPacketInfo); if ((eLIM_STA_ROLE == psessionEntry->limSystemRole) && (eLIM_SME_WT_DEAUTH_STATE == psessionEntry->limSmeState)) { MTRACE(macTrace(pMac, TRACE_CODE_INFO_LOG, 0, eLOG_PROC_DEAUTH_FRAME_SCENARIO)); return; } if (limIsGroupAddr(pHdr->sa)) { // Received Deauth frame from a BC/MC address // Log error and ignore it PELOG1(limLog(pMac, LOG1, FL("received Deauth frame from a BC/MC address"));) return; } if (limIsGroupAddr(pHdr->da) && !limIsAddrBC(pHdr->da)) { // Received Deauth frame for a MC address // Log error and ignore it PELOG1(limLog(pMac, LOG1, FL("received Deauth frame for a MC address"));) return; } // Get reasonCode from Deauthentication frame body reasonCode = sirReadU16(pBody); PELOGE(limLog(pMac, LOGE, FL("received Deauth frame (mlm state = %s) with reason code %d from "), limMlmStateStr(psessionEntry->limMlmState), reasonCode); limPrintMacAddr(pMac, pHdr->sa, LOGE);) if (limCheckDisassocDeauthAckPending(pMac, (tANI_U8*)pHdr->sa)) { PELOGW(limLog(pMac, LOGW, FL("Ignore the Deauth received, while waiting for ack of disassoc/deauth"));) limCleanUpDisassocDeauthReq(pMac,(tANI_U8*)pHdr->sa, 1); return; } if ( (psessionEntry->limSystemRole == eLIM_AP_ROLE )||(psessionEntry->limSystemRole == eLIM_BT_AMP_AP_ROLE) ) { switch (reasonCode) { case eSIR_MAC_UNSPEC_FAILURE_REASON: case eSIR_MAC_DEAUTH_LEAVING_BSS_REASON: // Valid reasonCode in received Deauthentication frame break; default: // Invalid reasonCode in received Deauthentication frame // Log error and ignore the frame PELOG1(limLog(pMac, LOG1, FL("received Deauth frame with invalid reasonCode %d from "), reasonCode); limPrintMacAddr(pMac, pHdr->sa, LOG1);) break; } } else if (psessionEntry->limSystemRole == eLIM_STA_ROLE ||psessionEntry->limSystemRole == eLIM_BT_AMP_STA_ROLE) { switch (reasonCode) { case eSIR_MAC_UNSPEC_FAILURE_REASON: case eSIR_MAC_PREV_AUTH_NOT_VALID_REASON: case eSIR_MAC_DEAUTH_LEAVING_BSS_REASON: case eSIR_MAC_CLASS2_FRAME_FROM_NON_AUTH_STA_REASON: case eSIR_MAC_CLASS3_FRAME_FROM_NON_ASSOC_STA_REASON: case eSIR_MAC_STA_NOT_PRE_AUTHENTICATED_REASON: // Valid reasonCode in received Deauth frame break; default: // Invalid reasonCode in received Deauth frame // Log error and ignore the frame PELOG1(limLog(pMac, LOG1, FL("received Deauth frame with invalid reasonCode %d from "), reasonCode); limPrintMacAddr(pMac, pHdr->sa, LOG1);) break; } } else { // Received Deauth frame in either IBSS // or un-known role. Log and ignore it limLog(pMac, LOG1, FL("received Deauth frame with reasonCode %d in role %d from "), reasonCode, psessionEntry->limSystemRole); limPrintMacAddr(pMac, pHdr->sa, LOG1); return; } /** If we are in the middle of ReAssoc, a few things could happen: * - STA is reassociating to current AP, and receives deauth from: * a) current AP * b) other AP * - STA is reassociating to a new AP, and receives deauth from: * c) current AP * d) reassoc AP * e) other AP * * The logic is: * 1) If rcv deauth from an AP other than the one we're trying to * reassociate with, then drop the deauth frame (case b, c, e) * 2) If rcv deauth from the "new" reassoc AP (case d), then restore * context with previous AP and send SME_REASSOC_RSP failure. * 3) If rcv deauth from the reassoc AP, which is also the same * AP we're currently associated with (case a), then proceed * with normal deauth processing. */ if ( psessionEntry->limReAssocbssId!=NULL ) { pRoamSessionEntry = peFindSessionByBssid(pMac, psessionEntry->limReAssocbssId, &roamSessionId); } if (limIsReassocInProgress(pMac,psessionEntry) || limIsReassocInProgress(pMac,pRoamSessionEntry)) { if (!IS_REASSOC_BSSID(pMac,pHdr->sa,psessionEntry)) { PELOGE(limLog(pMac, LOGE, FL("Rcv Deauth from unknown/different AP while ReAssoc. Ignore "));) limPrintMacAddr(pMac, pHdr->sa, LOGE); limPrintMacAddr(pMac, psessionEntry->limReAssocbssId, LOGE); return; } /** Received deauth from the new AP to which we tried to ReAssociate. * Drop ReAssoc and Restore the Previous context( current connected AP). */ if (!IS_CURRENT_BSSID(pMac, pHdr->sa,psessionEntry)) { PELOGE(limLog(pMac, LOGE, FL("received DeAuth from the New AP to which ReAssoc is sent "));) limPrintMacAddr(pMac, pHdr->sa, LOGE); limPrintMacAddr(pMac, psessionEntry->bssId, LOGE); limRestorePreReassocState(pMac, eSIR_SME_REASSOC_REFUSED, reasonCode,psessionEntry); return; } } /* If received DeAuth from AP other than the one we're trying to join with * nor associated with, then ignore deauth and delete Pre-auth entry. */ if(psessionEntry->limSystemRole != eLIM_AP_ROLE ){ if (!IS_CURRENT_BSSID(pMac, pHdr->bssId, psessionEntry)) { PELOGE(limLog(pMac, LOGE, FL("received DeAuth from an AP other than we're trying to join. Ignore. "));) if (limSearchPreAuthList(pMac, pHdr->sa)) { PELOG1(limLog(pMac, LOG1, FL("Preauth entry exist. Deleting... "));) limDeletePreAuthNode(pMac, pHdr->sa); } return; } } pStaDs = dphLookupHashEntry(pMac, pHdr->sa, &aid, &psessionEntry->dph.dphHashTable); // Check for pre-assoc states switch (psessionEntry->limSystemRole) { case eLIM_STA_ROLE: case eLIM_BT_AMP_STA_ROLE: switch (psessionEntry->limMlmState) { case eLIM_MLM_WT_AUTH_FRAME2_STATE: /** * AP sent Deauth frame while waiting * for Auth frame2. Report Auth failure * to SME. */ // Log error PELOG1(limLog(pMac, LOG1, FL("received Deauth frame with failure code %d from "), reasonCode); limPrintMacAddr(pMac, pHdr->sa, LOG1);) limRestoreFromAuthState(pMac, eSIR_SME_DEAUTH_WHILE_JOIN, reasonCode,psessionEntry); return; case eLIM_MLM_AUTHENTICATED_STATE: /// Issue Deauth Indication to SME. palCopyMemory( pMac->hHdd, (tANI_U8 *) &mlmDeauthInd.peerMacAddr, pHdr->sa, sizeof(tSirMacAddr)); mlmDeauthInd.reasonCode = reasonCode; psessionEntry->limMlmState = eLIM_MLM_IDLE_STATE; MTRACE(macTrace(pMac, TRACE_CODE_MLM_STATE, psessionEntry->peSessionId, psessionEntry->limMlmState)); limPostSmeMessage(pMac, LIM_MLM_DEAUTH_IND, (tANI_U32 *) &mlmDeauthInd); return; case eLIM_MLM_WT_ASSOC_RSP_STATE: /** * AP may have 'aged-out' our Pre-auth * context. Delete local pre-auth context * if any and issue ASSOC_CNF to SME. */ if (limSearchPreAuthList(pMac, pHdr->sa)) limDeletePreAuthNode(pMac, pHdr->sa); if (psessionEntry->pLimMlmJoinReq) { palFreeMemory( pMac->hHdd, psessionEntry->pLimMlmJoinReq); psessionEntry->pLimMlmJoinReq = NULL; } mlmAssocCnf.resultCode = eSIR_SME_DEAUTH_WHILE_JOIN; mlmAssocCnf.protStatusCode = reasonCode; /* PE session Id*/ mlmAssocCnf.sessionId = psessionEntry->peSessionId; psessionEntry->limMlmState = psessionEntry->limPrevMlmState; MTRACE(macTrace(pMac, TRACE_CODE_MLM_STATE, psessionEntry->peSessionId, psessionEntry->limMlmState)); // Deactive Association response timeout limDeactivateAndChangeTimer( pMac, eLIM_ASSOC_FAIL_TIMER); limPostSmeMessage( pMac, LIM_MLM_ASSOC_CNF, (tANI_U32 *) &mlmAssocCnf); return; case eLIM_MLM_WT_ADD_STA_RSP_STATE: psessionEntry->fDeauthReceived = true; PELOGW(limLog(pMac, LOGW, FL("Received Deauth frame with Reason Code %d from Peer"), reasonCode); limPrintMacAddr(pMac, pHdr->sa, LOGW);) return ; case eLIM_MLM_IDLE_STATE: case eLIM_MLM_LINK_ESTABLISHED_STATE: #ifdef FEATURE_WLAN_TDLS if ((NULL != pStaDs) && (STA_ENTRY_TDLS_PEER == pStaDs->staType)) { PELOGE(limLog(pMac, LOGE, FL("received Deauth frame with reason code %d from Tdls peer"), reasonCode); limPrintMacAddr(pMac, pHdr->sa, LOGE);) limSendSmeTDLSDelStaInd(pMac, pStaDs, psessionEntry, reasonCode); return; } else { limDeleteTDLSPeers(pMac, psessionEntry); #endif /** * This could be Deauthentication frame from * a BSS with which pre-authentication was * performed. Delete Pre-auth entry if found. */ if (limSearchPreAuthList(pMac, pHdr->sa)) limDeletePreAuthNode(pMac, pHdr->sa); #ifdef FEATURE_WLAN_TDLS } #endif break; case eLIM_MLM_WT_REASSOC_RSP_STATE: break; case eLIM_MLM_WT_FT_REASSOC_RSP_STATE: PELOGE(limLog(pMac, LOGE, FL("received Deauth frame in FT state %X with reasonCode=%d from "), psessionEntry->limMlmState, reasonCode);) limPrintMacAddr(pMac, pHdr->sa, LOGE); break; default: PELOG1(limLog(pMac, LOG1, FL("received Deauth frame in state %X with reasonCode=%d from "), psessionEntry->limMlmState, reasonCode);) limPrintMacAddr(pMac, pHdr->sa, LOG1); return; } break; case eLIM_STA_IN_IBSS_ROLE: break; case eLIM_AP_ROLE: break; default: // eLIM_AP_ROLE or eLIM_BT_AMP_AP_ROLE return; } // end switch (pMac->lim.gLimSystemRole) /** * Extract 'associated' context for STA, if any. * This is maintained by DPH and created by LIM. */ if (NULL == pStaDs) return; if ((pStaDs->mlmStaContext.mlmState == eLIM_MLM_WT_DEL_STA_RSP_STATE) || (pStaDs->mlmStaContext.mlmState == eLIM_MLM_WT_DEL_BSS_RSP_STATE)) { /** * Already in the process of deleting context for the peer * and received Deauthentication frame. Log and Ignore. */ PELOG1(limLog(pMac, LOG1, FL("received Deauth frame from peer that is in state %X, addr "), pStaDs->mlmStaContext.mlmState); limPrintMacAddr(pMac, pHdr->sa, LOG1);) return; } pStaDs->mlmStaContext.disassocReason = (tSirMacReasonCodes)reasonCode; pStaDs->mlmStaContext.cleanupTrigger = eLIM_PEER_ENTITY_DEAUTH; /// Issue Deauth Indication to SME. palCopyMemory( pMac->hHdd, (tANI_U8 *) &mlmDeauthInd.peerMacAddr, pStaDs->staAddr, sizeof(tSirMacAddr)); mlmDeauthInd.reasonCode = (tANI_U8) pStaDs->mlmStaContext.disassocReason; mlmDeauthInd.deauthTrigger = eLIM_PEER_ENTITY_DEAUTH; /* * If we're in the middle of ReAssoc and received deauth from * the ReAssoc AP, then notify SME by sending REASSOC_RSP with * failure result code. SME will post the disconnect to the * supplicant and the latter would start a fresh assoc. */ if (limIsReassocInProgress(pMac,psessionEntry)) { /** * AP may have 'aged-out' our Pre-auth * context. Delete local pre-auth context * if any and issue REASSOC_CNF to SME. */ if (limSearchPreAuthList(pMac, pHdr->sa)) limDeletePreAuthNode(pMac, pHdr->sa); if (psessionEntry->limAssocResponseData) { palFreeMemory(pMac->hHdd, psessionEntry->limAssocResponseData); psessionEntry->limAssocResponseData = NULL; } PELOGE(limLog(pMac, LOGE, FL("Rcv Deauth from ReAssoc AP. Issue REASSOC_CNF. "));) /* * TODO: Instead of overloading eSIR_SME_FT_REASSOC_TIMEOUT_FAILURE * it would have been good to define/use a different failure type. * Using eSIR_SME_FT_REASSOC_FAILURE does not seem to clean-up * properly and we end up seeing "transmit queue timeout". */ limPostReassocFailure(pMac, eSIR_SME_FT_REASSOC_TIMEOUT_FAILURE, eSIR_MAC_UNSPEC_FAILURE_STATUS, psessionEntry); return; } /// Deauthentication from peer MAC entity limPostSmeMessage(pMac, LIM_MLM_DEAUTH_IND, (tANI_U32 *) &mlmDeauthInd); // send eWNI_SME_DEAUTH_IND to SME limSendSmeDeauthInd(pMac, pStaDs, psessionEntry); return; } /*** end limProcessDeauthFrame() ***/
gpl-2.0
tcp209/kernel_samsung_epic4gtouch
drivers/ide/hpt366.c
889
43072
/* * Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org> * Portions Copyright (C) 2001 Sun Microsystems, Inc. * Portions Copyright (C) 2003 Red Hat Inc * Portions Copyright (C) 2007 Bartlomiej Zolnierkiewicz * Portions Copyright (C) 2005-2009 MontaVista Software, Inc. * * Thanks to HighPoint Technologies for their assistance, and hardware. * Special Thanks to Jon Burchmore in SanDiego for the deep pockets, his * donation of an ABit BP6 mainboard, processor, and memory acellerated * development and support. * * * HighPoint has its own drivers (open source except for the RAID part) * available from http://www.highpoint-tech.com/BIOS%20+%20Driver/. * This may be useful to anyone wanting to work on this driver, however do not * trust them too much since the code tends to become less and less meaningful * as the time passes... :-/ * * Note that final HPT370 support was done by force extraction of GPL. * * - add function for getting/setting power status of drive * - the HPT370's state machine can get confused. reset it before each dma * xfer to prevent that from happening. * - reset state engine whenever we get an error. * - check for busmaster state at end of dma. * - use new highpoint timings. * - detect bus speed using highpoint register. * - use pll if we don't have a clock table. added a 66MHz table that's * just 2x the 33MHz table. * - removed turnaround. NOTE: we never want to switch between pll and * pci clocks as the chip can glitch in those cases. the highpoint * approved workaround slows everything down too much to be useful. in * addition, we would have to serialize access to each chip. * Adrian Sun <a.sun@sun.com> * * add drive timings for 66MHz PCI bus, * fix ATA Cable signal detection, fix incorrect /proc info * add /proc display for per-drive PIO/DMA/UDMA mode and * per-channel ATA-33/66 Cable detect. * Duncan Laurie <void@sun.com> * * fixup /proc output for multiple controllers * Tim Hockin <thockin@sun.com> * * On hpt366: * Reset the hpt366 on error, reset on dma * Fix disabling Fast Interrupt hpt366. * Mike Waychison <crlf@sun.com> * * Added support for 372N clocking and clock switching. The 372N needs * different clocks on read/write. This requires overloading rw_disk and * other deeply crazy things. Thanks to <http://www.hoerstreich.de> for * keeping me sane. * Alan Cox <alan@lxorguk.ukuu.org.uk> * * - fix the clock turnaround code: it was writing to the wrong ports when * called for the secondary channel, caching the current clock mode per- * channel caused the cached register value to get out of sync with the * actual one, the channels weren't serialized, the turnaround shouldn't * be done on 66 MHz PCI bus * - disable UltraATA/100 for HPT370 by default as the 33 MHz clock being used * does not allow for this speed anyway * - avoid touching disabled channels (e.g. HPT371/N are single channel chips, * their primary channel is kind of virtual, it isn't tied to any pins) * - fix/remove bad/unused timing tables and use one set of tables for the whole * HPT37x chip family; save space by introducing the separate transfer mode * table in which the mode lookup is done * - use f_CNT value saved by the HighPoint BIOS as reading it directly gives * the wrong PCI frequency since DPLL has already been calibrated by BIOS; * read it only from the function 0 of HPT374 chips * - fix the hotswap code: it caused RESET- to glitch when tristating the bus, * and for HPT36x the obsolete HDIO_TRISTATE_HWIF handler was called instead * - pass to init_chipset() handlers a copy of the IDE PCI device structure as * they tamper with its fields * - pass to the init_setup handlers a copy of the ide_pci_device_t structure * since they may tamper with its fields * - prefix the driver startup messages with the real chip name * - claim the extra 240 bytes of I/O space for all chips * - optimize the UltraDMA filtering and the drive list lookup code * - use pci_get_slot() to get to the function 1 of HPT36x/374 * - cache offset of the channel's misc. control registers (MCRs) being used * throughout the driver * - only touch the relevant MCR when detecting the cable type on HPT374's * function 1 * - rename all the register related variables consistently * - move all the interrupt twiddling code from the speedproc handlers into * init_hwif_hpt366(), also grouping all the DMA related code together there * - merge HPT36x/HPT37x speedproc handlers, fix PIO timing register mask and * separate the UltraDMA and MWDMA masks there to avoid changing PIO timings * when setting an UltraDMA mode * - fix hpt3xx_tune_drive() to set the PIO mode requested, not always select * the best possible one * - clean up DMA timeout handling for HPT370 * - switch to using the enumeration type to differ between the numerous chip * variants, matching PCI device/revision ID with the chip type early, at the * init_setup stage * - extend the hpt_info structure to hold the DPLL and PCI clock frequencies, * stop duplicating it for each channel by storing the pointer in the pci_dev * structure: first, at the init_setup stage, point it to a static "template" * with only the chip type and its specific base DPLL frequency, the highest * UltraDMA mode, and the chip settings table pointer filled, then, at the * init_chipset stage, allocate per-chip instance and fill it with the rest * of the necessary information * - get rid of the constant thresholds in the HPT37x PCI clock detection code, * switch to calculating PCI clock frequency based on the chip's base DPLL * frequency * - switch to using the DPLL clock and enable UltraATA/133 mode by default on * anything newer than HPT370/A (except HPT374 that is not capable of this * mode according to the manual) * - fold PCI clock detection and DPLL setup code into init_chipset_hpt366(), * also fixing the interchanged 25/40 MHz PCI clock cases for HPT36x chips; * unify HPT36x/37x timing setup code and the speedproc handlers by joining * the register setting lists into the table indexed by the clock selected * - set the correct hwif->ultra_mask for each individual chip * - add Ultra and MW DMA mode filtering for the HPT37[24] based SATA cards * - stop resetting HPT370's state machine before each DMA transfer as that has * caused more harm than good * Sergei Shtylyov, <sshtylyov@ru.mvista.com> or <source@mvista.com> */ #include <linux/types.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/blkdev.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/ide.h> #include <linux/slab.h> #include <asm/uaccess.h> #include <asm/io.h> #define DRV_NAME "hpt366" /* various tuning parameters */ #undef HPT_RESET_STATE_ENGINE #undef HPT_DELAY_INTERRUPT static const char *bad_ata100_5[] = { "IBM-DTLA-307075", "IBM-DTLA-307060", "IBM-DTLA-307045", "IBM-DTLA-307030", "IBM-DTLA-307020", "IBM-DTLA-307015", "IBM-DTLA-305040", "IBM-DTLA-305030", "IBM-DTLA-305020", "IC35L010AVER07-0", "IC35L020AVER07-0", "IC35L030AVER07-0", "IC35L040AVER07-0", "IC35L060AVER07-0", "WDC AC310200R", NULL }; static const char *bad_ata66_4[] = { "IBM-DTLA-307075", "IBM-DTLA-307060", "IBM-DTLA-307045", "IBM-DTLA-307030", "IBM-DTLA-307020", "IBM-DTLA-307015", "IBM-DTLA-305040", "IBM-DTLA-305030", "IBM-DTLA-305020", "IC35L010AVER07-0", "IC35L020AVER07-0", "IC35L030AVER07-0", "IC35L040AVER07-0", "IC35L060AVER07-0", "WDC AC310200R", "MAXTOR STM3320620A", NULL }; static const char *bad_ata66_3[] = { "WDC AC310200R", NULL }; static const char *bad_ata33[] = { "Maxtor 92720U8", "Maxtor 92040U6", "Maxtor 91360U4", "Maxtor 91020U3", "Maxtor 90845U3", "Maxtor 90650U2", "Maxtor 91360D8", "Maxtor 91190D7", "Maxtor 91020D6", "Maxtor 90845D5", "Maxtor 90680D4", "Maxtor 90510D3", "Maxtor 90340D2", "Maxtor 91152D8", "Maxtor 91008D7", "Maxtor 90845D6", "Maxtor 90840D6", "Maxtor 90720D5", "Maxtor 90648D5", "Maxtor 90576D4", "Maxtor 90510D4", "Maxtor 90432D3", "Maxtor 90288D2", "Maxtor 90256D2", "Maxtor 91000D8", "Maxtor 90910D8", "Maxtor 90875D7", "Maxtor 90840D7", "Maxtor 90750D6", "Maxtor 90625D5", "Maxtor 90500D4", "Maxtor 91728D8", "Maxtor 91512D7", "Maxtor 91303D6", "Maxtor 91080D5", "Maxtor 90845D4", "Maxtor 90680D4", "Maxtor 90648D3", "Maxtor 90432D2", NULL }; static u8 xfer_speeds[] = { XFER_UDMA_6, XFER_UDMA_5, XFER_UDMA_4, XFER_UDMA_3, XFER_UDMA_2, XFER_UDMA_1, XFER_UDMA_0, XFER_MW_DMA_2, XFER_MW_DMA_1, XFER_MW_DMA_0, XFER_PIO_4, XFER_PIO_3, XFER_PIO_2, XFER_PIO_1, XFER_PIO_0 }; /* Key for bus clock timings * 36x 37x * bits bits * 0:3 0:3 data_high_time. Inactive time of DIOW_/DIOR_ for PIO and MW DMA. * cycles = value + 1 * 4:7 4:8 data_low_time. Active time of DIOW_/DIOR_ for PIO and MW DMA. * cycles = value + 1 * 8:11 9:12 cmd_high_time. Inactive time of DIOW_/DIOR_ during task file * register access. * 12:15 13:17 cmd_low_time. Active time of DIOW_/DIOR_ during task file * register access. * 16:18 18:20 udma_cycle_time. Clock cycles for UDMA xfer. * - 21 CLK frequency: 0=ATA clock, 1=dual ATA clock. * 19:21 22:24 pre_high_time. Time to initialize the 1st cycle for PIO and * MW DMA xfer. * 22:24 25:27 cmd_pre_high_time. Time to initialize the 1st PIO cycle for * task file register access. * 28 28 UDMA enable. * 29 29 DMA enable. * 30 30 PIO MST enable. If set, the chip is in bus master mode during * PIO xfer. * 31 31 FIFO enable. */ static u32 forty_base_hpt36x[] = { /* XFER_UDMA_6 */ 0x900fd943, /* XFER_UDMA_5 */ 0x900fd943, /* XFER_UDMA_4 */ 0x900fd943, /* XFER_UDMA_3 */ 0x900ad943, /* XFER_UDMA_2 */ 0x900bd943, /* XFER_UDMA_1 */ 0x9008d943, /* XFER_UDMA_0 */ 0x9008d943, /* XFER_MW_DMA_2 */ 0xa008d943, /* XFER_MW_DMA_1 */ 0xa010d955, /* XFER_MW_DMA_0 */ 0xa010d9fc, /* XFER_PIO_4 */ 0xc008d963, /* XFER_PIO_3 */ 0xc010d974, /* XFER_PIO_2 */ 0xc010d997, /* XFER_PIO_1 */ 0xc010d9c7, /* XFER_PIO_0 */ 0xc018d9d9 }; static u32 thirty_three_base_hpt36x[] = { /* XFER_UDMA_6 */ 0x90c9a731, /* XFER_UDMA_5 */ 0x90c9a731, /* XFER_UDMA_4 */ 0x90c9a731, /* XFER_UDMA_3 */ 0x90cfa731, /* XFER_UDMA_2 */ 0x90caa731, /* XFER_UDMA_1 */ 0x90cba731, /* XFER_UDMA_0 */ 0x90c8a731, /* XFER_MW_DMA_2 */ 0xa0c8a731, /* XFER_MW_DMA_1 */ 0xa0c8a732, /* 0xa0c8a733 */ /* XFER_MW_DMA_0 */ 0xa0c8a797, /* XFER_PIO_4 */ 0xc0c8a731, /* XFER_PIO_3 */ 0xc0c8a742, /* XFER_PIO_2 */ 0xc0d0a753, /* XFER_PIO_1 */ 0xc0d0a7a3, /* 0xc0d0a793 */ /* XFER_PIO_0 */ 0xc0d0a7aa /* 0xc0d0a7a7 */ }; static u32 twenty_five_base_hpt36x[] = { /* XFER_UDMA_6 */ 0x90c98521, /* XFER_UDMA_5 */ 0x90c98521, /* XFER_UDMA_4 */ 0x90c98521, /* XFER_UDMA_3 */ 0x90cf8521, /* XFER_UDMA_2 */ 0x90cf8521, /* XFER_UDMA_1 */ 0x90cb8521, /* XFER_UDMA_0 */ 0x90cb8521, /* XFER_MW_DMA_2 */ 0xa0ca8521, /* XFER_MW_DMA_1 */ 0xa0ca8532, /* XFER_MW_DMA_0 */ 0xa0ca8575, /* XFER_PIO_4 */ 0xc0ca8521, /* XFER_PIO_3 */ 0xc0ca8532, /* XFER_PIO_2 */ 0xc0ca8542, /* XFER_PIO_1 */ 0xc0d08572, /* XFER_PIO_0 */ 0xc0d08585 }; /* * The following are the new timing tables with PIO mode data/taskfile transfer * overclocking fixed... */ /* This table is taken from the HPT370 data manual rev. 1.02 */ static u32 thirty_three_base_hpt37x[] = { /* XFER_UDMA_6 */ 0x16455031, /* 0x16655031 ?? */ /* XFER_UDMA_5 */ 0x16455031, /* XFER_UDMA_4 */ 0x16455031, /* XFER_UDMA_3 */ 0x166d5031, /* XFER_UDMA_2 */ 0x16495031, /* XFER_UDMA_1 */ 0x164d5033, /* XFER_UDMA_0 */ 0x16515097, /* XFER_MW_DMA_2 */ 0x26515031, /* XFER_MW_DMA_1 */ 0x26515033, /* XFER_MW_DMA_0 */ 0x26515097, /* XFER_PIO_4 */ 0x06515021, /* XFER_PIO_3 */ 0x06515022, /* XFER_PIO_2 */ 0x06515033, /* XFER_PIO_1 */ 0x06915065, /* XFER_PIO_0 */ 0x06d1508a }; static u32 fifty_base_hpt37x[] = { /* XFER_UDMA_6 */ 0x1a861842, /* XFER_UDMA_5 */ 0x1a861842, /* XFER_UDMA_4 */ 0x1aae1842, /* XFER_UDMA_3 */ 0x1a8e1842, /* XFER_UDMA_2 */ 0x1a0e1842, /* XFER_UDMA_1 */ 0x1a161854, /* XFER_UDMA_0 */ 0x1a1a18ea, /* XFER_MW_DMA_2 */ 0x2a821842, /* XFER_MW_DMA_1 */ 0x2a821854, /* XFER_MW_DMA_0 */ 0x2a8218ea, /* XFER_PIO_4 */ 0x0a821842, /* XFER_PIO_3 */ 0x0a821843, /* XFER_PIO_2 */ 0x0a821855, /* XFER_PIO_1 */ 0x0ac218a8, /* XFER_PIO_0 */ 0x0b02190c }; static u32 sixty_six_base_hpt37x[] = { /* XFER_UDMA_6 */ 0x1c86fe62, /* XFER_UDMA_5 */ 0x1caefe62, /* 0x1c8afe62 */ /* XFER_UDMA_4 */ 0x1c8afe62, /* XFER_UDMA_3 */ 0x1c8efe62, /* XFER_UDMA_2 */ 0x1c92fe62, /* XFER_UDMA_1 */ 0x1c9afe62, /* XFER_UDMA_0 */ 0x1c82fe62, /* XFER_MW_DMA_2 */ 0x2c82fe62, /* XFER_MW_DMA_1 */ 0x2c82fe66, /* XFER_MW_DMA_0 */ 0x2c82ff2e, /* XFER_PIO_4 */ 0x0c82fe62, /* XFER_PIO_3 */ 0x0c82fe84, /* XFER_PIO_2 */ 0x0c82fea6, /* XFER_PIO_1 */ 0x0d02ff26, /* XFER_PIO_0 */ 0x0d42ff7f }; #define HPT371_ALLOW_ATA133_6 1 #define HPT302_ALLOW_ATA133_6 1 #define HPT372_ALLOW_ATA133_6 1 #define HPT370_ALLOW_ATA100_5 0 #define HPT366_ALLOW_ATA66_4 1 #define HPT366_ALLOW_ATA66_3 1 /* Supported ATA clock frequencies */ enum ata_clock { ATA_CLOCK_25MHZ, ATA_CLOCK_33MHZ, ATA_CLOCK_40MHZ, ATA_CLOCK_50MHZ, ATA_CLOCK_66MHZ, NUM_ATA_CLOCKS }; struct hpt_timings { u32 pio_mask; u32 dma_mask; u32 ultra_mask; u32 *clock_table[NUM_ATA_CLOCKS]; }; /* * Hold all the HighPoint chip information in one place. */ struct hpt_info { char *chip_name; /* Chip name */ u8 chip_type; /* Chip type */ u8 udma_mask; /* Allowed UltraDMA modes mask. */ u8 dpll_clk; /* DPLL clock in MHz */ u8 pci_clk; /* PCI clock in MHz */ struct hpt_timings *timings; /* Chipset timing data */ u8 clock; /* ATA clock selected */ }; /* Supported HighPoint chips */ enum { HPT36x, HPT370, HPT370A, HPT374, HPT372, HPT372A, HPT302, HPT371, HPT372N, HPT302N, HPT371N }; static struct hpt_timings hpt36x_timings = { .pio_mask = 0xc1f8ffff, .dma_mask = 0x303800ff, .ultra_mask = 0x30070000, .clock_table = { [ATA_CLOCK_25MHZ] = twenty_five_base_hpt36x, [ATA_CLOCK_33MHZ] = thirty_three_base_hpt36x, [ATA_CLOCK_40MHZ] = forty_base_hpt36x, [ATA_CLOCK_50MHZ] = NULL, [ATA_CLOCK_66MHZ] = NULL } }; static struct hpt_timings hpt37x_timings = { .pio_mask = 0xcfc3ffff, .dma_mask = 0x31c001ff, .ultra_mask = 0x303c0000, .clock_table = { [ATA_CLOCK_25MHZ] = NULL, [ATA_CLOCK_33MHZ] = thirty_three_base_hpt37x, [ATA_CLOCK_40MHZ] = NULL, [ATA_CLOCK_50MHZ] = fifty_base_hpt37x, [ATA_CLOCK_66MHZ] = sixty_six_base_hpt37x } }; static const struct hpt_info hpt36x __devinitdata = { .chip_name = "HPT36x", .chip_type = HPT36x, .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2, .dpll_clk = 0, /* no DPLL */ .timings = &hpt36x_timings }; static const struct hpt_info hpt370 __devinitdata = { .chip_name = "HPT370", .chip_type = HPT370, .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4, .dpll_clk = 48, .timings = &hpt37x_timings }; static const struct hpt_info hpt370a __devinitdata = { .chip_name = "HPT370A", .chip_type = HPT370A, .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4, .dpll_clk = 48, .timings = &hpt37x_timings }; static const struct hpt_info hpt374 __devinitdata = { .chip_name = "HPT374", .chip_type = HPT374, .udma_mask = ATA_UDMA5, .dpll_clk = 48, .timings = &hpt37x_timings }; static const struct hpt_info hpt372 __devinitdata = { .chip_name = "HPT372", .chip_type = HPT372, .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, .dpll_clk = 55, .timings = &hpt37x_timings }; static const struct hpt_info hpt372a __devinitdata = { .chip_name = "HPT372A", .chip_type = HPT372A, .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, .dpll_clk = 66, .timings = &hpt37x_timings }; static const struct hpt_info hpt302 __devinitdata = { .chip_name = "HPT302", .chip_type = HPT302, .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, .dpll_clk = 66, .timings = &hpt37x_timings }; static const struct hpt_info hpt371 __devinitdata = { .chip_name = "HPT371", .chip_type = HPT371, .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, .dpll_clk = 66, .timings = &hpt37x_timings }; static const struct hpt_info hpt372n __devinitdata = { .chip_name = "HPT372N", .chip_type = HPT372N, .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, .dpll_clk = 77, .timings = &hpt37x_timings }; static const struct hpt_info hpt302n __devinitdata = { .chip_name = "HPT302N", .chip_type = HPT302N, .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, .dpll_clk = 77, .timings = &hpt37x_timings }; static const struct hpt_info hpt371n __devinitdata = { .chip_name = "HPT371N", .chip_type = HPT371N, .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, .dpll_clk = 77, .timings = &hpt37x_timings }; static int check_in_drive_list(ide_drive_t *drive, const char **list) { char *m = (char *)&drive->id[ATA_ID_PROD]; while (*list) if (!strcmp(*list++, m)) return 1; return 0; } static struct hpt_info *hpt3xx_get_info(struct device *dev) { struct ide_host *host = dev_get_drvdata(dev); struct hpt_info *info = (struct hpt_info *)host->host_priv; return dev == host->dev[1] ? info + 1 : info; } /* * The Marvell bridge chips used on the HighPoint SATA cards do not seem * to support the UltraDMA modes 1, 2, and 3 as well as any MWDMA modes... */ static u8 hpt3xx_udma_filter(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; struct hpt_info *info = hpt3xx_get_info(hwif->dev); u8 mask = hwif->ultra_mask; switch (info->chip_type) { case HPT36x: if (!HPT366_ALLOW_ATA66_4 || check_in_drive_list(drive, bad_ata66_4)) mask = ATA_UDMA3; if (!HPT366_ALLOW_ATA66_3 || check_in_drive_list(drive, bad_ata66_3)) mask = ATA_UDMA2; break; case HPT370: if (!HPT370_ALLOW_ATA100_5 || check_in_drive_list(drive, bad_ata100_5)) mask = ATA_UDMA4; break; case HPT370A: if (!HPT370_ALLOW_ATA100_5 || check_in_drive_list(drive, bad_ata100_5)) return ATA_UDMA4; case HPT372 : case HPT372A: case HPT372N: case HPT374 : if (ata_id_is_sata(drive->id)) mask &= ~0x0e; /* Fall thru */ default: return mask; } return check_in_drive_list(drive, bad_ata33) ? 0x00 : mask; } static u8 hpt3xx_mdma_filter(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; struct hpt_info *info = hpt3xx_get_info(hwif->dev); switch (info->chip_type) { case HPT372 : case HPT372A: case HPT372N: case HPT374 : if (ata_id_is_sata(drive->id)) return 0x00; /* Fall thru */ default: return 0x07; } } static u32 get_speed_setting(u8 speed, struct hpt_info *info) { int i; /* * Lookup the transfer mode table to get the index into * the timing table. * * NOTE: For XFER_PIO_SLOW, PIO mode 0 timings will be used. */ for (i = 0; i < ARRAY_SIZE(xfer_speeds) - 1; i++) if (xfer_speeds[i] == speed) break; return info->timings->clock_table[info->clock][i]; } static void hpt3xx_set_mode(ide_hwif_t *hwif, ide_drive_t *drive) { struct pci_dev *dev = to_pci_dev(hwif->dev); struct hpt_info *info = hpt3xx_get_info(hwif->dev); struct hpt_timings *t = info->timings; u8 itr_addr = 0x40 + (drive->dn * 4); u32 old_itr = 0; const u8 speed = drive->dma_mode; u32 new_itr = get_speed_setting(speed, info); u32 itr_mask = speed < XFER_MW_DMA_0 ? t->pio_mask : (speed < XFER_UDMA_0 ? t->dma_mask : t->ultra_mask); pci_read_config_dword(dev, itr_addr, &old_itr); new_itr = (old_itr & ~itr_mask) | (new_itr & itr_mask); /* * Disable on-chip PIO FIFO/buffer (and PIO MST mode as well) * to avoid problems handling I/O errors later */ new_itr &= ~0xc0000000; pci_write_config_dword(dev, itr_addr, new_itr); } static void hpt3xx_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { drive->dma_mode = drive->pio_mode; hpt3xx_set_mode(hwif, drive); } static void hpt3xx_maskproc(ide_drive_t *drive, int mask) { ide_hwif_t *hwif = drive->hwif; struct pci_dev *dev = to_pci_dev(hwif->dev); struct hpt_info *info = hpt3xx_get_info(hwif->dev); if ((drive->dev_flags & IDE_DFLAG_NIEN_QUIRK) == 0) return; if (info->chip_type >= HPT370) { u8 scr1 = 0; pci_read_config_byte(dev, 0x5a, &scr1); if (((scr1 & 0x10) >> 4) != mask) { if (mask) scr1 |= 0x10; else scr1 &= ~0x10; pci_write_config_byte(dev, 0x5a, scr1); } } else if (mask) disable_irq(hwif->irq); else enable_irq(hwif->irq); } /* * This is specific to the HPT366 UDMA chipset * by HighPoint|Triones Technologies, Inc. */ static void hpt366_dma_lost_irq(ide_drive_t *drive) { struct pci_dev *dev = to_pci_dev(drive->hwif->dev); u8 mcr1 = 0, mcr3 = 0, scr1 = 0; pci_read_config_byte(dev, 0x50, &mcr1); pci_read_config_byte(dev, 0x52, &mcr3); pci_read_config_byte(dev, 0x5a, &scr1); printk("%s: (%s) mcr1=0x%02x, mcr3=0x%02x, scr1=0x%02x\n", drive->name, __func__, mcr1, mcr3, scr1); if (scr1 & 0x10) pci_write_config_byte(dev, 0x5a, scr1 & ~0x10); ide_dma_lost_irq(drive); } static void hpt370_clear_engine(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; struct pci_dev *dev = to_pci_dev(hwif->dev); pci_write_config_byte(dev, hwif->select_data, 0x37); udelay(10); } static void hpt370_irq_timeout(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; struct pci_dev *dev = to_pci_dev(hwif->dev); u16 bfifo = 0; u8 dma_cmd; pci_read_config_word(dev, hwif->select_data + 2, &bfifo); printk(KERN_DEBUG "%s: %d bytes in FIFO\n", drive->name, bfifo & 0x1ff); /* get DMA command mode */ dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD); /* stop DMA */ outb(dma_cmd & ~ATA_DMA_START, hwif->dma_base + ATA_DMA_CMD); hpt370_clear_engine(drive); } static void hpt370_dma_start(ide_drive_t *drive) { #ifdef HPT_RESET_STATE_ENGINE hpt370_clear_engine(drive); #endif ide_dma_start(drive); } static int hpt370_dma_end(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; u8 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS); if (dma_stat & ATA_DMA_ACTIVE) { /* wait a little */ udelay(20); dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS); if (dma_stat & ATA_DMA_ACTIVE) hpt370_irq_timeout(drive); } return ide_dma_end(drive); } /* returns 1 if DMA IRQ issued, 0 otherwise */ static int hpt374_dma_test_irq(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; struct pci_dev *dev = to_pci_dev(hwif->dev); u16 bfifo = 0; u8 dma_stat; pci_read_config_word(dev, hwif->select_data + 2, &bfifo); if (bfifo & 0x1FF) { // printk("%s: %d bytes in FIFO\n", drive->name, bfifo); return 0; } dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS); /* return 1 if INTR asserted */ if (dma_stat & ATA_DMA_INTR) return 1; return 0; } static int hpt374_dma_end(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; struct pci_dev *dev = to_pci_dev(hwif->dev); u8 mcr = 0, mcr_addr = hwif->select_data; u8 bwsr = 0, mask = hwif->channel ? 0x02 : 0x01; pci_read_config_byte(dev, 0x6a, &bwsr); pci_read_config_byte(dev, mcr_addr, &mcr); if (bwsr & mask) pci_write_config_byte(dev, mcr_addr, mcr | 0x30); return ide_dma_end(drive); } /** * hpt3xxn_set_clock - perform clock switching dance * @hwif: hwif to switch * @mode: clocking mode (0x21 for write, 0x23 otherwise) * * Switch the DPLL clock on the HPT3xxN devices. This is a right mess. */ static void hpt3xxn_set_clock(ide_hwif_t *hwif, u8 mode) { unsigned long base = hwif->extra_base; u8 scr2 = inb(base + 0x6b); if ((scr2 & 0x7f) == mode) return; /* Tristate the bus */ outb(0x80, base + 0x63); outb(0x80, base + 0x67); /* Switch clock and reset channels */ outb(mode, base + 0x6b); outb(0xc0, base + 0x69); /* * Reset the state machines. * NOTE: avoid accidentally enabling the disabled channels. */ outb(inb(base + 0x60) | 0x32, base + 0x60); outb(inb(base + 0x64) | 0x32, base + 0x64); /* Complete reset */ outb(0x00, base + 0x69); /* Reconnect channels to bus */ outb(0x00, base + 0x63); outb(0x00, base + 0x67); } /** * hpt3xxn_rw_disk - prepare for I/O * @drive: drive for command * @rq: block request structure * * This is called when a disk I/O is issued to HPT3xxN. * We need it because of the clock switching. */ static void hpt3xxn_rw_disk(ide_drive_t *drive, struct request *rq) { hpt3xxn_set_clock(drive->hwif, rq_data_dir(rq) ? 0x23 : 0x21); } /** * hpt37x_calibrate_dpll - calibrate the DPLL * @dev: PCI device * * Perform a calibration cycle on the DPLL. * Returns 1 if this succeeds */ static int hpt37x_calibrate_dpll(struct pci_dev *dev, u16 f_low, u16 f_high) { u32 dpll = (f_high << 16) | f_low | 0x100; u8 scr2; int i; pci_write_config_dword(dev, 0x5c, dpll); /* Wait for oscillator ready */ for(i = 0; i < 0x5000; ++i) { udelay(50); pci_read_config_byte(dev, 0x5b, &scr2); if (scr2 & 0x80) break; } /* See if it stays ready (we'll just bail out if it's not yet) */ for(i = 0; i < 0x1000; ++i) { pci_read_config_byte(dev, 0x5b, &scr2); /* DPLL destabilized? */ if(!(scr2 & 0x80)) return 0; } /* Turn off tuning, we have the DPLL set */ pci_read_config_dword (dev, 0x5c, &dpll); pci_write_config_dword(dev, 0x5c, (dpll & ~0x100)); return 1; } static void hpt3xx_disable_fast_irq(struct pci_dev *dev, u8 mcr_addr) { struct ide_host *host = pci_get_drvdata(dev); struct hpt_info *info = host->host_priv + (&dev->dev == host->dev[1]); u8 chip_type = info->chip_type; u8 new_mcr, old_mcr = 0; /* * Disable the "fast interrupt" prediction. Don't hold off * on interrupts. (== 0x01 despite what the docs say) */ pci_read_config_byte(dev, mcr_addr + 1, &old_mcr); if (chip_type >= HPT374) new_mcr = old_mcr & ~0x07; else if (chip_type >= HPT370) { new_mcr = old_mcr; new_mcr &= ~0x02; #ifdef HPT_DELAY_INTERRUPT new_mcr &= ~0x01; #else new_mcr |= 0x01; #endif } else /* HPT366 and HPT368 */ new_mcr = old_mcr & ~0x80; if (new_mcr != old_mcr) pci_write_config_byte(dev, mcr_addr + 1, new_mcr); } static int init_chipset_hpt366(struct pci_dev *dev) { unsigned long io_base = pci_resource_start(dev, 4); struct hpt_info *info = hpt3xx_get_info(&dev->dev); const char *name = DRV_NAME; u8 pci_clk, dpll_clk = 0; /* PCI and DPLL clock in MHz */ u8 chip_type; enum ata_clock clock; chip_type = info->chip_type; pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, (L1_CACHE_BYTES / 4)); pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x78); pci_write_config_byte(dev, PCI_MIN_GNT, 0x08); pci_write_config_byte(dev, PCI_MAX_LAT, 0x08); /* * First, try to estimate the PCI clock frequency... */ if (chip_type >= HPT370) { u8 scr1 = 0; u16 f_cnt = 0; u32 temp = 0; /* Interrupt force enable. */ pci_read_config_byte(dev, 0x5a, &scr1); if (scr1 & 0x10) pci_write_config_byte(dev, 0x5a, scr1 & ~0x10); /* * HighPoint does this for HPT372A. * NOTE: This register is only writeable via I/O space. */ if (chip_type == HPT372A) outb(0x0e, io_base + 0x9c); /* * Default to PCI clock. Make sure MA15/16 are set to output * to prevent drives having problems with 40-pin cables. */ pci_write_config_byte(dev, 0x5b, 0x23); /* * We'll have to read f_CNT value in order to determine * the PCI clock frequency according to the following ratio: * * f_CNT = Fpci * 192 / Fdpll * * First try reading the register in which the HighPoint BIOS * saves f_CNT value before reprogramming the DPLL from its * default setting (which differs for the various chips). * * NOTE: This register is only accessible via I/O space; * HPT374 BIOS only saves it for the function 0, so we have to * always read it from there -- no need to check the result of * pci_get_slot() for the function 0 as the whole device has * been already "pinned" (via function 1) in init_setup_hpt374() */ if (chip_type == HPT374 && (PCI_FUNC(dev->devfn) & 1)) { struct pci_dev *dev1 = pci_get_slot(dev->bus, dev->devfn - 1); unsigned long io_base = pci_resource_start(dev1, 4); temp = inl(io_base + 0x90); pci_dev_put(dev1); } else temp = inl(io_base + 0x90); /* * In case the signature check fails, we'll have to * resort to reading the f_CNT register itself in hopes * that nobody has touched the DPLL yet... */ if ((temp & 0xFFFFF000) != 0xABCDE000) { int i; printk(KERN_WARNING "%s %s: no clock data saved by " "BIOS\n", name, pci_name(dev)); /* Calculate the average value of f_CNT. */ for (temp = i = 0; i < 128; i++) { pci_read_config_word(dev, 0x78, &f_cnt); temp += f_cnt & 0x1ff; mdelay(1); } f_cnt = temp / 128; } else f_cnt = temp & 0x1ff; dpll_clk = info->dpll_clk; pci_clk = (f_cnt * dpll_clk) / 192; /* Clamp PCI clock to bands. */ if (pci_clk < 40) pci_clk = 33; else if(pci_clk < 45) pci_clk = 40; else if(pci_clk < 55) pci_clk = 50; else pci_clk = 66; printk(KERN_INFO "%s %s: DPLL base: %d MHz, f_CNT: %d, " "assuming %d MHz PCI\n", name, pci_name(dev), dpll_clk, f_cnt, pci_clk); } else { u32 itr1 = 0; pci_read_config_dword(dev, 0x40, &itr1); /* Detect PCI clock by looking at cmd_high_time. */ switch((itr1 >> 8) & 0x07) { case 0x09: pci_clk = 40; break; case 0x05: pci_clk = 25; break; case 0x07: default: pci_clk = 33; break; } } /* Let's assume we'll use PCI clock for the ATA clock... */ switch (pci_clk) { case 25: clock = ATA_CLOCK_25MHZ; break; case 33: default: clock = ATA_CLOCK_33MHZ; break; case 40: clock = ATA_CLOCK_40MHZ; break; case 50: clock = ATA_CLOCK_50MHZ; break; case 66: clock = ATA_CLOCK_66MHZ; break; } /* * Only try the DPLL if we don't have a table for the PCI clock that * we are running at for HPT370/A, always use it for anything newer... * * NOTE: Using the internal DPLL results in slow reads on 33 MHz PCI. * We also don't like using the DPLL because this causes glitches * on PRST-/SRST- when the state engine gets reset... */ if (chip_type >= HPT374 || info->timings->clock_table[clock] == NULL) { u16 f_low, delta = pci_clk < 50 ? 2 : 4; int adjust; /* * Select 66 MHz DPLL clock only if UltraATA/133 mode is * supported/enabled, use 50 MHz DPLL clock otherwise... */ if (info->udma_mask == ATA_UDMA6) { dpll_clk = 66; clock = ATA_CLOCK_66MHZ; } else if (dpll_clk) { /* HPT36x chips don't have DPLL */ dpll_clk = 50; clock = ATA_CLOCK_50MHZ; } if (info->timings->clock_table[clock] == NULL) { printk(KERN_ERR "%s %s: unknown bus timing!\n", name, pci_name(dev)); return -EIO; } /* Select the DPLL clock. */ pci_write_config_byte(dev, 0x5b, 0x21); /* * Adjust the DPLL based upon PCI clock, enable it, * and wait for stabilization... */ f_low = (pci_clk * 48) / dpll_clk; for (adjust = 0; adjust < 8; adjust++) { if(hpt37x_calibrate_dpll(dev, f_low, f_low + delta)) break; /* * See if it'll settle at a fractionally different clock */ if (adjust & 1) f_low -= adjust >> 1; else f_low += adjust >> 1; } if (adjust == 8) { printk(KERN_ERR "%s %s: DPLL did not stabilize!\n", name, pci_name(dev)); return -EIO; } printk(KERN_INFO "%s %s: using %d MHz DPLL clock\n", name, pci_name(dev), dpll_clk); } else { /* Mark the fact that we're not using the DPLL. */ dpll_clk = 0; printk(KERN_INFO "%s %s: using %d MHz PCI clock\n", name, pci_name(dev), pci_clk); } /* Store the clock frequencies. */ info->dpll_clk = dpll_clk; info->pci_clk = pci_clk; info->clock = clock; if (chip_type >= HPT370) { u8 mcr1, mcr4; /* * Reset the state engines. * NOTE: Avoid accidentally enabling the disabled channels. */ pci_read_config_byte (dev, 0x50, &mcr1); pci_read_config_byte (dev, 0x54, &mcr4); pci_write_config_byte(dev, 0x50, (mcr1 | 0x32)); pci_write_config_byte(dev, 0x54, (mcr4 | 0x32)); udelay(100); } /* * On HPT371N, if ATA clock is 66 MHz we must set bit 2 in * the MISC. register to stretch the UltraDMA Tss timing. * NOTE: This register is only writeable via I/O space. */ if (chip_type == HPT371N && clock == ATA_CLOCK_66MHZ) outb(inb(io_base + 0x9c) | 0x04, io_base + 0x9c); hpt3xx_disable_fast_irq(dev, 0x50); hpt3xx_disable_fast_irq(dev, 0x54); return 0; } static u8 hpt3xx_cable_detect(ide_hwif_t *hwif) { struct pci_dev *dev = to_pci_dev(hwif->dev); struct hpt_info *info = hpt3xx_get_info(hwif->dev); u8 chip_type = info->chip_type; u8 scr1 = 0, ata66 = hwif->channel ? 0x01 : 0x02; /* * The HPT37x uses the CBLID pins as outputs for MA15/MA16 * address lines to access an external EEPROM. To read valid * cable detect state the pins must be enabled as inputs. */ if (chip_type == HPT374 && (PCI_FUNC(dev->devfn) & 1)) { /* * HPT374 PCI function 1 * - set bit 15 of reg 0x52 to enable TCBLID as input * - set bit 15 of reg 0x56 to enable FCBLID as input */ u8 mcr_addr = hwif->select_data + 2; u16 mcr; pci_read_config_word(dev, mcr_addr, &mcr); pci_write_config_word(dev, mcr_addr, (mcr | 0x8000)); /* now read cable id register */ pci_read_config_byte(dev, 0x5a, &scr1); pci_write_config_word(dev, mcr_addr, mcr); } else if (chip_type >= HPT370) { /* * HPT370/372 and 374 pcifn 0 * - clear bit 0 of reg 0x5b to enable P/SCBLID as inputs */ u8 scr2 = 0; pci_read_config_byte(dev, 0x5b, &scr2); pci_write_config_byte(dev, 0x5b, (scr2 & ~1)); /* now read cable id register */ pci_read_config_byte(dev, 0x5a, &scr1); pci_write_config_byte(dev, 0x5b, scr2); } else pci_read_config_byte(dev, 0x5a, &scr1); return (scr1 & ata66) ? ATA_CBL_PATA40 : ATA_CBL_PATA80; } static void __devinit init_hwif_hpt366(ide_hwif_t *hwif) { struct hpt_info *info = hpt3xx_get_info(hwif->dev); u8 chip_type = info->chip_type; /* Cache the channel's MISC. control registers' offset */ hwif->select_data = hwif->channel ? 0x54 : 0x50; /* * HPT3xxN chips have some complications: * * - on 33 MHz PCI we must clock switch * - on 66 MHz PCI we must NOT use the PCI clock */ if (chip_type >= HPT372N && info->dpll_clk && info->pci_clk < 66) { /* * Clock is shared between the channels, * so we'll have to serialize them... :-( */ hwif->host->host_flags |= IDE_HFLAG_SERIALIZE; hwif->rw_disk = &hpt3xxn_rw_disk; } } static int __devinit init_dma_hpt366(ide_hwif_t *hwif, const struct ide_port_info *d) { struct pci_dev *dev = to_pci_dev(hwif->dev); unsigned long flags, base = ide_pci_dma_base(hwif, d); u8 dma_old, dma_new, masterdma = 0, slavedma = 0; if (base == 0) return -1; hwif->dma_base = base; if (ide_pci_check_simplex(hwif, d) < 0) return -1; if (ide_pci_set_master(dev, d->name) < 0) return -1; dma_old = inb(base + 2); local_irq_save(flags); dma_new = dma_old; pci_read_config_byte(dev, hwif->channel ? 0x4b : 0x43, &masterdma); pci_read_config_byte(dev, hwif->channel ? 0x4f : 0x47, &slavedma); if (masterdma & 0x30) dma_new |= 0x20; if ( slavedma & 0x30) dma_new |= 0x40; if (dma_new != dma_old) outb(dma_new, base + 2); local_irq_restore(flags); printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n", hwif->name, base, base + 7); hwif->extra_base = base + (hwif->channel ? 8 : 16); if (ide_allocate_dma_engine(hwif)) return -1; return 0; } static void __devinit hpt374_init(struct pci_dev *dev, struct pci_dev *dev2) { if (dev2->irq != dev->irq) { /* FIXME: we need a core pci_set_interrupt() */ dev2->irq = dev->irq; printk(KERN_INFO DRV_NAME " %s: PCI config space interrupt " "fixed\n", pci_name(dev2)); } } static void __devinit hpt371_init(struct pci_dev *dev) { u8 mcr1 = 0; /* * HPT371 chips physically have only one channel, the secondary one, * but the primary channel registers do exist! Go figure... * So, we manually disable the non-existing channel here * (if the BIOS hasn't done this already). */ pci_read_config_byte(dev, 0x50, &mcr1); if (mcr1 & 0x04) pci_write_config_byte(dev, 0x50, mcr1 & ~0x04); } static int __devinit hpt36x_init(struct pci_dev *dev, struct pci_dev *dev2) { u8 mcr1 = 0, pin1 = 0, pin2 = 0; /* * Now we'll have to force both channels enabled if * at least one of them has been enabled by BIOS... */ pci_read_config_byte(dev, 0x50, &mcr1); if (mcr1 & 0x30) pci_write_config_byte(dev, 0x50, mcr1 | 0x30); pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin1); pci_read_config_byte(dev2, PCI_INTERRUPT_PIN, &pin2); if (pin1 != pin2 && dev->irq == dev2->irq) { printk(KERN_INFO DRV_NAME " %s: onboard version of chipset, " "pin1=%d pin2=%d\n", pci_name(dev), pin1, pin2); return 1; } return 0; } #define IDE_HFLAGS_HPT3XX \ (IDE_HFLAG_NO_ATAPI_DMA | \ IDE_HFLAG_OFF_BOARD) static const struct ide_port_ops hpt3xx_port_ops = { .set_pio_mode = hpt3xx_set_pio_mode, .set_dma_mode = hpt3xx_set_mode, .maskproc = hpt3xx_maskproc, .mdma_filter = hpt3xx_mdma_filter, .udma_filter = hpt3xx_udma_filter, .cable_detect = hpt3xx_cable_detect, }; static const struct ide_dma_ops hpt37x_dma_ops = { .dma_host_set = ide_dma_host_set, .dma_setup = ide_dma_setup, .dma_start = ide_dma_start, .dma_end = hpt374_dma_end, .dma_test_irq = hpt374_dma_test_irq, .dma_lost_irq = ide_dma_lost_irq, .dma_timer_expiry = ide_dma_sff_timer_expiry, .dma_sff_read_status = ide_dma_sff_read_status, }; static const struct ide_dma_ops hpt370_dma_ops = { .dma_host_set = ide_dma_host_set, .dma_setup = ide_dma_setup, .dma_start = hpt370_dma_start, .dma_end = hpt370_dma_end, .dma_test_irq = ide_dma_test_irq, .dma_lost_irq = ide_dma_lost_irq, .dma_timer_expiry = ide_dma_sff_timer_expiry, .dma_clear = hpt370_irq_timeout, .dma_sff_read_status = ide_dma_sff_read_status, }; static const struct ide_dma_ops hpt36x_dma_ops = { .dma_host_set = ide_dma_host_set, .dma_setup = ide_dma_setup, .dma_start = ide_dma_start, .dma_end = ide_dma_end, .dma_test_irq = ide_dma_test_irq, .dma_lost_irq = hpt366_dma_lost_irq, .dma_timer_expiry = ide_dma_sff_timer_expiry, .dma_sff_read_status = ide_dma_sff_read_status, }; static const struct ide_port_info hpt366_chipsets[] __devinitdata = { { /* 0: HPT36x */ .name = DRV_NAME, .init_chipset = init_chipset_hpt366, .init_hwif = init_hwif_hpt366, .init_dma = init_dma_hpt366, /* * HPT36x chips have one channel per function and have * both channel enable bits located differently and visible * to both functions -- really stupid design decision... :-( * Bit 4 is for the primary channel, bit 5 for the secondary. */ .enablebits = {{0x50,0x10,0x10}, {0x54,0x04,0x04}}, .port_ops = &hpt3xx_port_ops, .dma_ops = &hpt36x_dma_ops, .host_flags = IDE_HFLAGS_HPT3XX | IDE_HFLAG_SINGLE, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, }, { /* 1: HPT3xx */ .name = DRV_NAME, .init_chipset = init_chipset_hpt366, .init_hwif = init_hwif_hpt366, .init_dma = init_dma_hpt366, .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}}, .port_ops = &hpt3xx_port_ops, .dma_ops = &hpt37x_dma_ops, .host_flags = IDE_HFLAGS_HPT3XX, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, } }; /** * hpt366_init_one - called when an HPT366 is found * @dev: the hpt366 device * @id: the matching pci id * * Called when the PCI registration layer (or the IDE initialization) * finds a device matching our IDE device tables. */ static int __devinit hpt366_init_one(struct pci_dev *dev, const struct pci_device_id *id) { const struct hpt_info *info = NULL; struct hpt_info *dyn_info; struct pci_dev *dev2 = NULL; struct ide_port_info d; u8 idx = id->driver_data; u8 rev = dev->revision; int ret; if ((idx == 0 || idx == 4) && (PCI_FUNC(dev->devfn) & 1)) return -ENODEV; switch (idx) { case 0: if (rev < 3) info = &hpt36x; else { switch (min_t(u8, rev, 6)) { case 3: info = &hpt370; break; case 4: info = &hpt370a; break; case 5: info = &hpt372; break; case 6: info = &hpt372n; break; } idx++; } break; case 1: info = (rev > 1) ? &hpt372n : &hpt372a; break; case 2: info = (rev > 1) ? &hpt302n : &hpt302; break; case 3: hpt371_init(dev); info = (rev > 1) ? &hpt371n : &hpt371; break; case 4: info = &hpt374; break; case 5: info = &hpt372n; break; } printk(KERN_INFO DRV_NAME ": %s chipset detected\n", info->chip_name); d = hpt366_chipsets[min_t(u8, idx, 1)]; d.udma_mask = info->udma_mask; /* fixup ->dma_ops for HPT370/HPT370A */ if (info == &hpt370 || info == &hpt370a) d.dma_ops = &hpt370_dma_ops; if (info == &hpt36x || info == &hpt374) dev2 = pci_get_slot(dev->bus, dev->devfn + 1); dyn_info = kzalloc(sizeof(*dyn_info) * (dev2 ? 2 : 1), GFP_KERNEL); if (dyn_info == NULL) { printk(KERN_ERR "%s %s: out of memory!\n", d.name, pci_name(dev)); pci_dev_put(dev2); return -ENOMEM; } /* * Copy everything from a static "template" structure * to just allocated per-chip hpt_info structure. */ memcpy(dyn_info, info, sizeof(*dyn_info)); if (dev2) { memcpy(dyn_info + 1, info, sizeof(*dyn_info)); if (info == &hpt374) hpt374_init(dev, dev2); else { if (hpt36x_init(dev, dev2)) d.host_flags &= ~IDE_HFLAG_NON_BOOTABLE; } ret = ide_pci_init_two(dev, dev2, &d, dyn_info); if (ret < 0) { pci_dev_put(dev2); kfree(dyn_info); } return ret; } ret = ide_pci_init_one(dev, &d, dyn_info); if (ret < 0) kfree(dyn_info); return ret; } static void __devexit hpt366_remove(struct pci_dev *dev) { struct ide_host *host = pci_get_drvdata(dev); struct ide_info *info = host->host_priv; struct pci_dev *dev2 = host->dev[1] ? to_pci_dev(host->dev[1]) : NULL; ide_pci_remove(dev); pci_dev_put(dev2); kfree(info); } static const struct pci_device_id hpt366_pci_tbl[] __devinitconst = { { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT366), 0 }, { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT372), 1 }, { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT302), 2 }, { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT371), 3 }, { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT374), 4 }, { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT372N), 5 }, { 0, }, }; MODULE_DEVICE_TABLE(pci, hpt366_pci_tbl); static struct pci_driver hpt366_pci_driver = { .name = "HPT366_IDE", .id_table = hpt366_pci_tbl, .probe = hpt366_init_one, .remove = __devexit_p(hpt366_remove), .suspend = ide_pci_suspend, .resume = ide_pci_resume, }; static int __init hpt366_ide_init(void) { return ide_pci_register_driver(&hpt366_pci_driver); } static void __exit hpt366_ide_exit(void) { pci_unregister_driver(&hpt366_pci_driver); } module_init(hpt366_ide_init); module_exit(hpt366_ide_exit); MODULE_AUTHOR("Andre Hedrick"); MODULE_DESCRIPTION("PCI driver module for Highpoint HPT366 IDE"); MODULE_LICENSE("GPL");
gpl-2.0
tommytarts/QuantumKernelS3
drivers/xen/swiotlb-xen.c
1657
15866
/* * Copyright 2010 * by Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> * * This code provides a IOMMU for Xen PV guests with PCI passthrough. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License v2.0 as published by * the Free Software Foundation * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * PV guests under Xen are running in an non-contiguous memory architecture. * * When PCI pass-through is utilized, this necessitates an IOMMU for * translating bus (DMA) to virtual and vice-versa and also providing a * mechanism to have contiguous pages for device drivers operations (say DMA * operations). * * Specifically, under Xen the Linux idea of pages is an illusion. It * assumes that pages start at zero and go up to the available memory. To * help with that, the Linux Xen MMU provides a lookup mechanism to * translate the page frame numbers (PFN) to machine frame numbers (MFN) * and vice-versa. The MFN are the "real" frame numbers. Furthermore * memory is not contiguous. Xen hypervisor stitches memory for guests * from different pools, which means there is no guarantee that PFN==MFN * and PFN+1==MFN+1. Lastly with Xen 4.0, pages (in debug mode) are * allocated in descending order (high to low), meaning the guest might * never get any MFN's under the 4GB mark. * */ #include <linux/bootmem.h> #include <linux/dma-mapping.h> #include <linux/export.h> #include <xen/swiotlb-xen.h> #include <xen/page.h> #include <xen/xen-ops.h> #include <xen/hvc-console.h> /* * Used to do a quick range check in swiotlb_tbl_unmap_single and * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this * API. */ static char *xen_io_tlb_start, *xen_io_tlb_end; static unsigned long xen_io_tlb_nslabs; /* * Quick lookup value of the bus address of the IOTLB. */ u64 start_dma_addr; static dma_addr_t xen_phys_to_bus(phys_addr_t paddr) { return phys_to_machine(XPADDR(paddr)).maddr; } static phys_addr_t xen_bus_to_phys(dma_addr_t baddr) { return machine_to_phys(XMADDR(baddr)).paddr; } static dma_addr_t xen_virt_to_bus(void *address) { return xen_phys_to_bus(virt_to_phys(address)); } static int check_pages_physically_contiguous(unsigned long pfn, unsigned int offset, size_t length) { unsigned long next_mfn; int i; int nr_pages; next_mfn = pfn_to_mfn(pfn); nr_pages = (offset + length + PAGE_SIZE-1) >> PAGE_SHIFT; for (i = 1; i < nr_pages; i++) { if (pfn_to_mfn(++pfn) != ++next_mfn) return 0; } return 1; } static int range_straddles_page_boundary(phys_addr_t p, size_t size) { unsigned long pfn = PFN_DOWN(p); unsigned int offset = p & ~PAGE_MASK; if (offset + size <= PAGE_SIZE) return 0; if (check_pages_physically_contiguous(pfn, offset, size)) return 0; return 1; } static int is_xen_swiotlb_buffer(dma_addr_t dma_addr) { unsigned long mfn = PFN_DOWN(dma_addr); unsigned long pfn = mfn_to_local_pfn(mfn); phys_addr_t paddr; /* If the address is outside our domain, it CAN * have the same virtual address as another address * in our domain. Therefore _only_ check address within our domain. */ if (pfn_valid(pfn)) { paddr = PFN_PHYS(pfn); return paddr >= virt_to_phys(xen_io_tlb_start) && paddr < virt_to_phys(xen_io_tlb_end); } return 0; } static int max_dma_bits = 32; static int xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs) { int i, rc; int dma_bits; dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT; i = 0; do { int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE); do { rc = xen_create_contiguous_region( (unsigned long)buf + (i << IO_TLB_SHIFT), get_order(slabs << IO_TLB_SHIFT), dma_bits); } while (rc && dma_bits++ < max_dma_bits); if (rc) return rc; i += slabs; } while (i < nslabs); return 0; } void __init xen_swiotlb_init(int verbose) { unsigned long bytes; int rc = -ENOMEM; unsigned long nr_tbl; char *m = NULL; unsigned int repeat = 3; nr_tbl = swiotlb_nr_tbl(); if (nr_tbl) xen_io_tlb_nslabs = nr_tbl; else { xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT); xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE); } retry: bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT; /* * Get IO TLB memory from any location. */ xen_io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes)); if (!xen_io_tlb_start) { m = "Cannot allocate Xen-SWIOTLB buffer!\n"; goto error; } xen_io_tlb_end = xen_io_tlb_start + bytes; /* * And replace that memory with pages under 4GB. */ rc = xen_swiotlb_fixup(xen_io_tlb_start, bytes, xen_io_tlb_nslabs); if (rc) { free_bootmem(__pa(xen_io_tlb_start), PAGE_ALIGN(bytes)); m = "Failed to get contiguous memory for DMA from Xen!\n"\ "You either: don't have the permissions, do not have"\ " enough free memory under 4GB, or the hypervisor memory"\ "is too fragmented!"; goto error; } start_dma_addr = xen_virt_to_bus(xen_io_tlb_start); swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs, verbose); return; error: if (repeat--) { xen_io_tlb_nslabs = max(1024UL, /* Min is 2MB */ (xen_io_tlb_nslabs >> 1)); printk(KERN_INFO "Xen-SWIOTLB: Lowering to %luMB\n", (xen_io_tlb_nslabs << IO_TLB_SHIFT) >> 20); goto retry; } xen_raw_printk("%s (rc:%d)", m, rc); panic("%s (rc:%d)", m, rc); } void * xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags, struct dma_attrs *attrs) { void *ret; int order = get_order(size); u64 dma_mask = DMA_BIT_MASK(32); unsigned long vstart; phys_addr_t phys; dma_addr_t dev_addr; /* * Ignore region specifiers - the kernel's ideas of * pseudo-phys memory layout has nothing to do with the * machine physical layout. We can't allocate highmem * because we can't return a pointer to it. */ flags &= ~(__GFP_DMA | __GFP_HIGHMEM); if (dma_alloc_from_coherent(hwdev, size, dma_handle, &ret)) return ret; vstart = __get_free_pages(flags, order); ret = (void *)vstart; if (!ret) return ret; if (hwdev && hwdev->coherent_dma_mask) dma_mask = dma_alloc_coherent_mask(hwdev, flags); phys = virt_to_phys(ret); dev_addr = xen_phys_to_bus(phys); if (((dev_addr + size - 1 <= dma_mask)) && !range_straddles_page_boundary(phys, size)) *dma_handle = dev_addr; else { if (xen_create_contiguous_region(vstart, order, fls64(dma_mask)) != 0) { free_pages(vstart, order); return NULL; } *dma_handle = virt_to_machine(ret).maddr; } memset(ret, 0, size); return ret; } EXPORT_SYMBOL_GPL(xen_swiotlb_alloc_coherent); void xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dev_addr, struct dma_attrs *attrs) { int order = get_order(size); phys_addr_t phys; u64 dma_mask = DMA_BIT_MASK(32); if (dma_release_from_coherent(hwdev, order, vaddr)) return; if (hwdev && hwdev->coherent_dma_mask) dma_mask = hwdev->coherent_dma_mask; phys = virt_to_phys(vaddr); if (((dev_addr + size - 1 > dma_mask)) || range_straddles_page_boundary(phys, size)) xen_destroy_contiguous_region((unsigned long)vaddr, order); free_pages((unsigned long)vaddr, order); } EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent); /* * Map a single buffer of the indicated size for DMA in streaming mode. The * physical address to use is returned. * * Once the device is given the dma address, the device owns this memory until * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed. */ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { phys_addr_t phys = page_to_phys(page) + offset; dma_addr_t dev_addr = xen_phys_to_bus(phys); void *map; BUG_ON(dir == DMA_NONE); /* * If the address happens to be in the device's DMA window, * we can safely return the device addr and not worry about bounce * buffering it. */ if (dma_capable(dev, dev_addr, size) && !range_straddles_page_boundary(phys, size) && !swiotlb_force) return dev_addr; /* * Oh well, have to allocate and map a bounce buffer. */ map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir); if (!map) return DMA_ERROR_CODE; dev_addr = xen_virt_to_bus(map); /* * Ensure that the address returned is DMA'ble */ if (!dma_capable(dev, dev_addr, size)) { swiotlb_tbl_unmap_single(dev, map, size, dir); dev_addr = 0; } return dev_addr; } EXPORT_SYMBOL_GPL(xen_swiotlb_map_page); /* * Unmap a single streaming mode DMA translation. The dma_addr and size must * match what was provided for in a previous xen_swiotlb_map_page call. All * other usages are undefined. * * After this call, reads by the cpu to the buffer are guaranteed to see * whatever the device wrote there. */ static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir) { phys_addr_t paddr = xen_bus_to_phys(dev_addr); BUG_ON(dir == DMA_NONE); /* NOTE: We use dev_addr here, not paddr! */ if (is_xen_swiotlb_buffer(dev_addr)) { swiotlb_tbl_unmap_single(hwdev, phys_to_virt(paddr), size, dir); return; } if (dir != DMA_FROM_DEVICE) return; /* * phys_to_virt doesn't work with hihgmem page but we could * call dma_mark_clean() with hihgmem page here. However, we * are fine since dma_mark_clean() is null on POWERPC. We can * make dma_mark_clean() take a physical address if necessary. */ dma_mark_clean(phys_to_virt(paddr), size); } void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { xen_unmap_single(hwdev, dev_addr, size, dir); } EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_page); /* * Make physical memory consistent for a single streaming mode DMA translation * after a transfer. * * If you perform a xen_swiotlb_map_page() but wish to interrogate the buffer * using the cpu, yet do not wish to teardown the dma mapping, you must * call this function before doing so. At the next point you give the dma * address back to the card, you must first perform a * xen_swiotlb_dma_sync_for_device, and then the device again owns the buffer */ static void xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, enum dma_sync_target target) { phys_addr_t paddr = xen_bus_to_phys(dev_addr); BUG_ON(dir == DMA_NONE); /* NOTE: We use dev_addr here, not paddr! */ if (is_xen_swiotlb_buffer(dev_addr)) { swiotlb_tbl_sync_single(hwdev, phys_to_virt(paddr), size, dir, target); return; } if (dir != DMA_FROM_DEVICE) return; dma_mark_clean(phys_to_virt(paddr), size); } void xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir) { xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU); } EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_cpu); void xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir) { xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE); } EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_device); /* * Map a set of buffers described by scatterlist in streaming mode for DMA. * This is the scatter-gather version of the above xen_swiotlb_map_page * interface. Here the scatter gather list elements are each tagged with the * appropriate dma address and length. They are obtained via * sg_dma_{address,length}(SG). * * NOTE: An implementation may be able to use a smaller number of * DMA address/length pairs than there are SG table elements. * (for example via virtual mapping capabilities) * The routine returns the number of addr/length pairs actually * used, at most nents. * * Device ownership issues as mentioned above for xen_swiotlb_map_page are the * same here. */ int xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs) { struct scatterlist *sg; int i; BUG_ON(dir == DMA_NONE); for_each_sg(sgl, sg, nelems, i) { phys_addr_t paddr = sg_phys(sg); dma_addr_t dev_addr = xen_phys_to_bus(paddr); if (swiotlb_force || !dma_capable(hwdev, dev_addr, sg->length) || range_straddles_page_boundary(paddr, sg->length)) { void *map = swiotlb_tbl_map_single(hwdev, start_dma_addr, sg_phys(sg), sg->length, dir); if (!map) { /* Don't panic here, we expect map_sg users to do proper error handling. */ xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir, attrs); sgl[0].dma_length = 0; return DMA_ERROR_CODE; } sg->dma_address = xen_virt_to_bus(map); } else sg->dma_address = dev_addr; sg->dma_length = sg->length; } return nelems; } EXPORT_SYMBOL_GPL(xen_swiotlb_map_sg_attrs); int xen_swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir) { return xen_swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL); } EXPORT_SYMBOL_GPL(xen_swiotlb_map_sg); /* * Unmap a set of streaming mode DMA translations. Again, cpu read rules * concerning calls here are the same as for swiotlb_unmap_page() above. */ void xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs) { struct scatterlist *sg; int i; BUG_ON(dir == DMA_NONE); for_each_sg(sgl, sg, nelems, i) xen_unmap_single(hwdev, sg->dma_address, sg->dma_length, dir); } EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg_attrs); void xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir) { return xen_swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL); } EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg); /* * Make physical memory consistent for a set of streaming mode DMA translations * after a transfer. * * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules * and usage. */ static void xen_swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir, enum dma_sync_target target) { struct scatterlist *sg; int i; for_each_sg(sgl, sg, nelems, i) xen_swiotlb_sync_single(hwdev, sg->dma_address, sg->dma_length, dir, target); } void xen_swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, int nelems, enum dma_data_direction dir) { xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU); } EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_cpu); void xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, int nelems, enum dma_data_direction dir) { xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); } EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_device); int xen_swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr) { return !dma_addr; } EXPORT_SYMBOL_GPL(xen_swiotlb_dma_mapping_error); /* * Return whether the given device DMA address mask can be supported * properly. For example, if your device can only drive the low 24-bits * during bus mastering, then you would pass 0x00ffffff as the mask to * this function. */ int xen_swiotlb_dma_supported(struct device *hwdev, u64 mask) { return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask; } EXPORT_SYMBOL_GPL(xen_swiotlb_dma_supported);
gpl-2.0
bl4ckic3/linux
arch/microblaze/kernel/prom.c
1913
3152
/* * Procedures for creating, accessing and interpreting the device tree. * * Paul Mackerras August 1996. * Copyright (C) 1996-2005 Paul Mackerras. * * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner. * {engebret|bergner}@us.ibm.com * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <stdarg.h> #include <linux/export.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/init.h> #include <linux/threads.h> #include <linux/spinlock.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/stringify.h> #include <linux/delay.h> #include <linux/initrd.h> #include <linux/bitops.h> #include <linux/kexec.h> #include <linux/debugfs.h> #include <linux/irq.h> #include <linux/memblock.h> #include <linux/of_fdt.h> #include <asm/prom.h> #include <asm/page.h> #include <asm/processor.h> #include <asm/irq.h> #include <linux/io.h> #include <asm/mmu.h> #include <asm/pgtable.h> #include <asm/sections.h> #include <asm/pci-bridge.h> #ifdef CONFIG_EARLY_PRINTK static const char *stdout; static int __init early_init_dt_scan_chosen_serial(unsigned long node, const char *uname, int depth, void *data) { int l; const char *p; pr_debug("%s: depth: %d, uname: %s\n", __func__, depth, uname); if (depth == 1 && (strcmp(uname, "chosen") == 0 || strcmp(uname, "chosen@0") == 0)) { p = of_get_flat_dt_prop(node, "linux,stdout-path", &l); if (p != NULL && l > 0) stdout = p; /* store pointer to stdout-path */ } if (stdout && strstr(stdout, uname)) { p = of_get_flat_dt_prop(node, "compatible", &l); pr_debug("Compatible string: %s\n", p); if ((strncmp(p, "xlnx,xps-uart16550", 18) == 0) || (strncmp(p, "xlnx,axi-uart16550", 18) == 0)) { unsigned int addr; *(u32 *)data = UART16550; addr = *(u32 *)of_get_flat_dt_prop(node, "reg", &l); addr += *(u32 *)of_get_flat_dt_prop(node, "reg-offset", &l); /* clear register offset */ return be32_to_cpu(addr) & ~3; } if ((strncmp(p, "xlnx,xps-uartlite", 17) == 0) || (strncmp(p, "xlnx,opb-uartlite", 17) == 0) || (strncmp(p, "xlnx,axi-uartlite", 17) == 0) || (strncmp(p, "xlnx,mdm", 8) == 0)) { const unsigned int *addrp; *(u32 *)data = UARTLITE; addrp = of_get_flat_dt_prop(node, "reg", &l); return be32_to_cpup(addrp); /* return address */ } } return 0; } /* this function is looking for early console - Microblaze specific */ int __init of_early_console(void *version) { return of_scan_flat_dt(early_init_dt_scan_chosen_serial, version); } #endif void __init early_init_devtree(void *params) { pr_debug(" -> early_init_devtree(%p)\n", params); early_init_dt_scan(params); if (!strlen(boot_command_line)) strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE); parse_early_param(); memblock_allow_resize(); pr_debug("Phys. mem: %lx\n", (unsigned long) memblock_phys_mem_size()); pr_debug(" <- early_init_devtree()\n"); }
gpl-2.0
zarboz/thisisatest
drivers/staging/brcm80211/util/bcmutils.c
2425
17566
/* * Copyright (c) 2010 Broadcom Corporation * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/ctype.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/netdevice.h> #include <linux/sched.h> #include <linux/printk.h> #include <bcmdefs.h> #include <stdarg.h> #include <bcmutils.h> #include <bcmnvram.h> #include <bcmdevs.h> #include <proto/802.11.h> MODULE_AUTHOR("Broadcom Corporation"); MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN driver utilities."); MODULE_SUPPORTED_DEVICE("Broadcom 802.11n WLAN cards"); MODULE_LICENSE("Dual BSD/GPL"); struct sk_buff *bcm_pkt_buf_get_skb(uint len) { struct sk_buff *skb; skb = dev_alloc_skb(len); if (skb) { skb_put(skb, len); skb->priority = 0; } return skb; } EXPORT_SYMBOL(bcm_pkt_buf_get_skb); /* Free the driver packet. Free the tag if present */ void bcm_pkt_buf_free_skb(struct sk_buff *skb) { struct sk_buff *nskb; int nest = 0; /* perversion: we use skb->next to chain multi-skb packets */ while (skb) { nskb = skb->next; skb->next = NULL; if (skb->destructor) /* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if * destructor exists */ dev_kfree_skb_any(skb); else /* can free immediately (even in_irq()) if destructor * does not exist */ dev_kfree_skb(skb); nest++; skb = nskb; } } EXPORT_SYMBOL(bcm_pkt_buf_free_skb); /* copy a buffer into a pkt buffer chain */ uint bcm_pktfrombuf(struct sk_buff *p, uint offset, int len, unsigned char *buf) { uint n, ret = 0; /* skip 'offset' bytes */ for (; p && offset; p = p->next) { if (offset < (uint) (p->len)) break; offset -= p->len; } if (!p) return 0; /* copy the data */ for (; p && len; p = p->next) { n = min((uint) (p->len) - offset, (uint) len); memcpy(p->data + offset, buf, n); buf += n; len -= n; ret += n; offset = 0; } return ret; } EXPORT_SYMBOL(bcm_pktfrombuf); /* return total length of buffer chain */ uint bcm_pkttotlen(struct sk_buff *p) { uint total; total = 0; for (; p; p = p->next) total += p->len; return total; } EXPORT_SYMBOL(bcm_pkttotlen); /* * osl multiple-precedence packet queue * hi_prec is always >= the number of the highest non-empty precedence */ struct sk_buff *bcm_pktq_penq(struct pktq *pq, int prec, struct sk_buff *p) { struct pktq_prec *q; if (pktq_full(pq) || pktq_pfull(pq, prec)) return NULL; q = &pq->q[prec]; if (q->head) q->tail->prev = p; else q->head = p; q->tail = p; q->len++; pq->len++; if (pq->hi_prec < prec) pq->hi_prec = (u8) prec; return p; } EXPORT_SYMBOL(bcm_pktq_penq); struct sk_buff *bcm_pktq_penq_head(struct pktq *pq, int prec, struct sk_buff *p) { struct pktq_prec *q; if (pktq_full(pq) || pktq_pfull(pq, prec)) return NULL; q = &pq->q[prec]; if (q->head == NULL) q->tail = p; p->prev = q->head; q->head = p; q->len++; pq->len++; if (pq->hi_prec < prec) pq->hi_prec = (u8) prec; return p; } EXPORT_SYMBOL(bcm_pktq_penq_head); struct sk_buff *bcm_pktq_pdeq(struct pktq *pq, int prec) { struct pktq_prec *q; struct sk_buff *p; q = &pq->q[prec]; p = q->head; if (p == NULL) return NULL; q->head = p->prev; if (q->head == NULL) q->tail = NULL; q->len--; pq->len--; p->prev = NULL; return p; } EXPORT_SYMBOL(bcm_pktq_pdeq); struct sk_buff *bcm_pktq_pdeq_tail(struct pktq *pq, int prec) { struct pktq_prec *q; struct sk_buff *p, *prev; q = &pq->q[prec]; p = q->head; if (p == NULL) return NULL; for (prev = NULL; p != q->tail; p = p->prev) prev = p; if (prev) prev->prev = NULL; else q->head = NULL; q->tail = prev; q->len--; pq->len--; return p; } EXPORT_SYMBOL(bcm_pktq_pdeq_tail); void bcm_pktq_pflush(struct pktq *pq, int prec, bool dir, ifpkt_cb_t fn, void *arg) { struct pktq_prec *q; struct sk_buff *p, *prev = NULL; q = &pq->q[prec]; p = q->head; while (p) { if (fn == NULL || (*fn) (p, arg)) { bool head = (p == q->head); if (head) q->head = p->prev; else prev->prev = p->prev; p->prev = NULL; bcm_pkt_buf_free_skb(p); q->len--; pq->len--; p = (head ? q->head : prev->prev); } else { prev = p; p = p->prev; } } if (q->head == NULL) { q->tail = NULL; } } EXPORT_SYMBOL(bcm_pktq_pflush); void bcm_pktq_flush(struct pktq *pq, bool dir, ifpkt_cb_t fn, void *arg) { int prec; for (prec = 0; prec < pq->num_prec; prec++) bcm_pktq_pflush(pq, prec, dir, fn, arg); } EXPORT_SYMBOL(bcm_pktq_flush); void bcm_pktq_init(struct pktq *pq, int num_prec, int max_len) { int prec; /* pq is variable size; only zero out what's requested */ memset(pq, 0, offsetof(struct pktq, q) + (sizeof(struct pktq_prec) * num_prec)); pq->num_prec = (u16) num_prec; pq->max = (u16) max_len; for (prec = 0; prec < num_prec; prec++) pq->q[prec].max = pq->max; } EXPORT_SYMBOL(bcm_pktq_init); struct sk_buff *bcm_pktq_peek_tail(struct pktq *pq, int *prec_out) { int prec; if (pq->len == 0) return NULL; for (prec = 0; prec < pq->hi_prec; prec++) if (pq->q[prec].head) break; if (prec_out) *prec_out = prec; return pq->q[prec].tail; } EXPORT_SYMBOL(bcm_pktq_peek_tail); /* Return sum of lengths of a specific set of precedences */ int bcm_pktq_mlen(struct pktq *pq, uint prec_bmp) { int prec, len; len = 0; for (prec = 0; prec <= pq->hi_prec; prec++) if (prec_bmp & (1 << prec)) len += pq->q[prec].len; return len; } EXPORT_SYMBOL(bcm_pktq_mlen); /* Priority dequeue from a specific set of precedences */ struct sk_buff *bcm_pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out) { struct pktq_prec *q; struct sk_buff *p; int prec; if (pq->len == 0) return NULL; while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL) pq->hi_prec--; while ((prec_bmp & (1 << prec)) == 0 || pq->q[prec].head == NULL) if (prec-- == 0) return NULL; q = &pq->q[prec]; p = q->head; if (p == NULL) return NULL; q->head = p->prev; if (q->head == NULL) q->tail = NULL; q->len--; if (prec_out) *prec_out = prec; pq->len--; p->prev = NULL; return p; } EXPORT_SYMBOL(bcm_pktq_mdeq); /* parse a xx:xx:xx:xx:xx:xx format ethernet address */ int bcm_ether_atoe(char *p, u8 *ea) { int i = 0; for (;;) { ea[i++] = (char)simple_strtoul(p, &p, 16); if (!*p++ || i == 6) break; } return i == 6; } EXPORT_SYMBOL(bcm_ether_atoe); #if defined(BCMDBG) /* pretty hex print a pkt buffer chain */ void bcm_prpkt(const char *msg, struct sk_buff *p0) { struct sk_buff *p; if (msg && (msg[0] != '\0')) printk(KERN_DEBUG "%s:\n", msg); for (p = p0; p; p = p->next) print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, p->data, p->len); } EXPORT_SYMBOL(bcm_prpkt); #endif /* defined(BCMDBG) */ /* iovar table lookup */ const bcm_iovar_t *bcm_iovar_lookup(const bcm_iovar_t *table, const char *name) { const bcm_iovar_t *vi; const char *lookup_name; /* skip any ':' delimited option prefixes */ lookup_name = strrchr(name, ':'); if (lookup_name != NULL) lookup_name++; else lookup_name = name; for (vi = table; vi->name; vi++) { if (!strcmp(vi->name, lookup_name)) return vi; } /* ran to end of table */ return NULL; /* var name not found */ } EXPORT_SYMBOL(bcm_iovar_lookup); int bcm_iovar_lencheck(const bcm_iovar_t *vi, void *arg, int len, bool set) { int bcmerror = 0; /* length check on io buf */ switch (vi->type) { case IOVT_BOOL: case IOVT_INT8: case IOVT_INT16: case IOVT_INT32: case IOVT_UINT8: case IOVT_UINT16: case IOVT_UINT32: /* all integers are s32 sized args at the ioctl interface */ if (len < (int)sizeof(int)) { bcmerror = -EOVERFLOW; } break; case IOVT_BUFFER: /* buffer must meet minimum length requirement */ if (len < vi->minlen) { bcmerror = -EOVERFLOW; } break; case IOVT_VOID: if (!set) { /* Cannot return nil... */ bcmerror = -ENOTSUPP; } else if (len) { /* Set is an action w/o parameters */ bcmerror = -ENOBUFS; } break; default: /* unknown type for length check in iovar info */ bcmerror = -ENOTSUPP; } return bcmerror; } EXPORT_SYMBOL(bcm_iovar_lencheck); /******************************************************************************* * crc8 * * Computes a crc8 over the input data using the polynomial: * * x^8 + x^7 +x^6 + x^4 + x^2 + 1 * * The caller provides the initial value (either CRC8_INIT_VALUE * or the previous returned value) to allow for processing of * discontiguous blocks of data. When generating the CRC the * caller is responsible for complementing the final return value * and inserting it into the byte stream. When checking, a final * return value of CRC8_GOOD_VALUE indicates a valid CRC. * * Reference: Dallas Semiconductor Application Note 27 * Williams, Ross N., "A Painless Guide to CRC Error Detection Algorithms", * ver 3, Aug 1993, ross@guest.adelaide.edu.au, Rocksoft Pty Ltd., * ftp://ftp.rocksoft.com/clients/rocksoft/papers/crc_v3.txt * * **************************************************************************** */ static const u8 crc8_table[256] = { 0x00, 0xF7, 0xB9, 0x4E, 0x25, 0xD2, 0x9C, 0x6B, 0x4A, 0xBD, 0xF3, 0x04, 0x6F, 0x98, 0xD6, 0x21, 0x94, 0x63, 0x2D, 0xDA, 0xB1, 0x46, 0x08, 0xFF, 0xDE, 0x29, 0x67, 0x90, 0xFB, 0x0C, 0x42, 0xB5, 0x7F, 0x88, 0xC6, 0x31, 0x5A, 0xAD, 0xE3, 0x14, 0x35, 0xC2, 0x8C, 0x7B, 0x10, 0xE7, 0xA9, 0x5E, 0xEB, 0x1C, 0x52, 0xA5, 0xCE, 0x39, 0x77, 0x80, 0xA1, 0x56, 0x18, 0xEF, 0x84, 0x73, 0x3D, 0xCA, 0xFE, 0x09, 0x47, 0xB0, 0xDB, 0x2C, 0x62, 0x95, 0xB4, 0x43, 0x0D, 0xFA, 0x91, 0x66, 0x28, 0xDF, 0x6A, 0x9D, 0xD3, 0x24, 0x4F, 0xB8, 0xF6, 0x01, 0x20, 0xD7, 0x99, 0x6E, 0x05, 0xF2, 0xBC, 0x4B, 0x81, 0x76, 0x38, 0xCF, 0xA4, 0x53, 0x1D, 0xEA, 0xCB, 0x3C, 0x72, 0x85, 0xEE, 0x19, 0x57, 0xA0, 0x15, 0xE2, 0xAC, 0x5B, 0x30, 0xC7, 0x89, 0x7E, 0x5F, 0xA8, 0xE6, 0x11, 0x7A, 0x8D, 0xC3, 0x34, 0xAB, 0x5C, 0x12, 0xE5, 0x8E, 0x79, 0x37, 0xC0, 0xE1, 0x16, 0x58, 0xAF, 0xC4, 0x33, 0x7D, 0x8A, 0x3F, 0xC8, 0x86, 0x71, 0x1A, 0xED, 0xA3, 0x54, 0x75, 0x82, 0xCC, 0x3B, 0x50, 0xA7, 0xE9, 0x1E, 0xD4, 0x23, 0x6D, 0x9A, 0xF1, 0x06, 0x48, 0xBF, 0x9E, 0x69, 0x27, 0xD0, 0xBB, 0x4C, 0x02, 0xF5, 0x40, 0xB7, 0xF9, 0x0E, 0x65, 0x92, 0xDC, 0x2B, 0x0A, 0xFD, 0xB3, 0x44, 0x2F, 0xD8, 0x96, 0x61, 0x55, 0xA2, 0xEC, 0x1B, 0x70, 0x87, 0xC9, 0x3E, 0x1F, 0xE8, 0xA6, 0x51, 0x3A, 0xCD, 0x83, 0x74, 0xC1, 0x36, 0x78, 0x8F, 0xE4, 0x13, 0x5D, 0xAA, 0x8B, 0x7C, 0x32, 0xC5, 0xAE, 0x59, 0x17, 0xE0, 0x2A, 0xDD, 0x93, 0x64, 0x0F, 0xF8, 0xB6, 0x41, 0x60, 0x97, 0xD9, 0x2E, 0x45, 0xB2, 0xFC, 0x0B, 0xBE, 0x49, 0x07, 0xF0, 0x9B, 0x6C, 0x22, 0xD5, 0xF4, 0x03, 0x4D, 0xBA, 0xD1, 0x26, 0x68, 0x9F }; u8 bcm_crc8(u8 *pdata, /* pointer to array of data to process */ uint nbytes, /* number of input data bytes to process */ u8 crc /* either CRC8_INIT_VALUE or previous return value */ ) { /* loop over the buffer data */ while (nbytes-- > 0) crc = crc8_table[(crc ^ *pdata++) & 0xff]; return crc; } EXPORT_SYMBOL(bcm_crc8); /* * Traverse a string of 1-byte tag/1-byte length/variable-length value * triples, returning a pointer to the substring whose first element * matches tag */ bcm_tlv_t *bcm_parse_tlvs(void *buf, int buflen, uint key) { bcm_tlv_t *elt; int totlen; elt = (bcm_tlv_t *) buf; totlen = buflen; /* find tagged parameter */ while (totlen >= 2) { int len = elt->len; /* validate remaining totlen */ if ((elt->id == key) && (totlen >= (len + 2))) return elt; elt = (bcm_tlv_t *) ((u8 *) elt + (len + 2)); totlen -= (len + 2); } return NULL; } EXPORT_SYMBOL(bcm_parse_tlvs); #if defined(BCMDBG) int bcm_format_flags(const bcm_bit_desc_t *bd, u32 flags, char *buf, int len) { int i; char *p = buf; char hexstr[16]; int slen = 0, nlen = 0; u32 bit; const char *name; if (len < 2 || !buf) return 0; buf[0] = '\0'; for (i = 0; flags != 0; i++) { bit = bd[i].bit; name = bd[i].name; if (bit == 0 && flags != 0) { /* print any unnamed bits */ snprintf(hexstr, 16, "0x%X", flags); name = hexstr; flags = 0; /* exit loop */ } else if ((flags & bit) == 0) continue; flags &= ~bit; nlen = strlen(name); slen += nlen; /* count btwn flag space */ if (flags != 0) slen += 1; /* need NULL char as well */ if (len <= slen) break; /* copy NULL char but don't count it */ strncpy(p, name, nlen + 1); p += nlen; /* copy btwn flag space and NULL char */ if (flags != 0) p += snprintf(p, 2, " "); len -= slen; } /* indicate the str was too short */ if (flags != 0) { if (len < 2) p -= 2 - len; /* overwrite last char */ p += snprintf(p, 2, ">"); } return (int)(p - buf); } EXPORT_SYMBOL(bcm_format_flags); /* print bytes formatted as hex to a string. return the resulting string length */ int bcm_format_hex(char *str, const void *bytes, int len) { int i; char *p = str; const u8 *src = (const u8 *)bytes; for (i = 0; i < len; i++) { p += snprintf(p, 3, "%02X", *src); src++; } return (int)(p - str); } EXPORT_SYMBOL(bcm_format_hex); #endif /* defined(BCMDBG) */ char *bcm_chipname(uint chipid, char *buf, uint len) { const char *fmt; fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x"; snprintf(buf, len, fmt, chipid); return buf; } EXPORT_SYMBOL(bcm_chipname); uint bcm_mkiovar(char *name, char *data, uint datalen, char *buf, uint buflen) { uint len; len = strlen(name) + 1; if ((len + datalen) > buflen) return 0; strncpy(buf, name, buflen); /* append data onto the end of the name string */ memcpy(&buf[len], data, datalen); len += datalen; return len; } EXPORT_SYMBOL(bcm_mkiovar); /* Quarter dBm units to mW * Table starts at QDBM_OFFSET, so the first entry is mW for qdBm=153 * Table is offset so the last entry is largest mW value that fits in * a u16. */ #define QDBM_OFFSET 153 /* Offset for first entry */ #define QDBM_TABLE_LEN 40 /* Table size */ /* Smallest mW value that will round up to the first table entry, QDBM_OFFSET. * Value is ( mW(QDBM_OFFSET - 1) + mW(QDBM_OFFSET) ) / 2 */ #define QDBM_TABLE_LOW_BOUND 6493 /* Low bound */ /* Largest mW value that will round down to the last table entry, * QDBM_OFFSET + QDBM_TABLE_LEN-1. * Value is ( mW(QDBM_OFFSET + QDBM_TABLE_LEN - 1) + * mW(QDBM_OFFSET + QDBM_TABLE_LEN) ) / 2. */ #define QDBM_TABLE_HIGH_BOUND 64938 /* High bound */ static const u16 nqdBm_to_mW_map[QDBM_TABLE_LEN] = { /* qdBm: +0 +1 +2 +3 +4 +5 +6 +7 */ /* 153: */ 6683, 7079, 7499, 7943, 8414, 8913, 9441, 10000, /* 161: */ 10593, 11220, 11885, 12589, 13335, 14125, 14962, 15849, /* 169: */ 16788, 17783, 18836, 19953, 21135, 22387, 23714, 25119, /* 177: */ 26607, 28184, 29854, 31623, 33497, 35481, 37584, 39811, /* 185: */ 42170, 44668, 47315, 50119, 53088, 56234, 59566, 63096 }; u16 bcm_qdbm_to_mw(u8 qdbm) { uint factor = 1; int idx = qdbm - QDBM_OFFSET; if (idx >= QDBM_TABLE_LEN) { /* clamp to max u16 mW value */ return 0xFFFF; } /* scale the qdBm index up to the range of the table 0-40 * where an offset of 40 qdBm equals a factor of 10 mW. */ while (idx < 0) { idx += 40; factor *= 10; } /* return the mW value scaled down to the correct factor of 10, * adding in factor/2 to get proper rounding. */ return (nqdBm_to_mW_map[idx] + factor / 2) / factor; } EXPORT_SYMBOL(bcm_qdbm_to_mw); u8 bcm_mw_to_qdbm(u16 mw) { u8 qdbm; int offset; uint mw_uint = mw; uint boundary; /* handle boundary case */ if (mw_uint <= 1) return 0; offset = QDBM_OFFSET; /* move mw into the range of the table */ while (mw_uint < QDBM_TABLE_LOW_BOUND) { mw_uint *= 10; offset -= 40; } for (qdbm = 0; qdbm < QDBM_TABLE_LEN - 1; qdbm++) { boundary = nqdBm_to_mW_map[qdbm] + (nqdBm_to_mW_map[qdbm + 1] - nqdBm_to_mW_map[qdbm]) / 2; if (mw_uint < boundary) break; } qdbm += (u8) offset; return qdbm; } EXPORT_SYMBOL(bcm_mw_to_qdbm); uint bcm_bitcount(u8 *bitmap, uint length) { uint bitcount = 0, i; u8 tmp; for (i = 0; i < length; i++) { tmp = bitmap[i]; while (tmp) { bitcount++; tmp &= (tmp - 1); } } return bitcount; } EXPORT_SYMBOL(bcm_bitcount); /* Initialization of bcmstrbuf structure */ void bcm_binit(struct bcmstrbuf *b, char *buf, uint size) { b->origsize = b->size = size; b->origbuf = b->buf = buf; } EXPORT_SYMBOL(bcm_binit); /* Buffer sprintf wrapper to guard against buffer overflow */ int bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...) { va_list ap; int r; va_start(ap, fmt); r = vsnprintf(b->buf, b->size, fmt, ap); /* Non Ansi C99 compliant returns -1, * Ansi compliant return r >= b->size, * bcmstdlib returns 0, handle all */ if ((r == -1) || (r >= (int)b->size) || (r == 0)) { b->size = 0; } else { b->size -= r; b->buf += r; } va_end(ap); return r; } EXPORT_SYMBOL(bcm_bprintf);
gpl-2.0
rfbsoft/polievanie_kernel
arch/metag/mm/cache.c
2425
13474
/* * arch/metag/mm/cache.c * * Copyright (C) 2001, 2002, 2005, 2007, 2012 Imagination Technologies. * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License version 2 as published by the * Free Software Foundation. * * Cache control code */ #include <linux/export.h> #include <linux/io.h> #include <asm/cacheflush.h> #include <asm/core_reg.h> #include <asm/global_lock.h> #include <asm/metag_isa.h> #include <asm/metag_mem.h> #include <asm/metag_regs.h> #define DEFAULT_CACHE_WAYS_LOG2 2 /* * Size of a set in the caches. Initialised for default 16K stride, adjusted * according to values passed through TBI global heap segment via LDLK (on ATP) * or config registers (on HTP/MTP) */ static int dcache_set_shift = METAG_TBI_CACHE_SIZE_BASE_LOG2 - DEFAULT_CACHE_WAYS_LOG2; static int icache_set_shift = METAG_TBI_CACHE_SIZE_BASE_LOG2 - DEFAULT_CACHE_WAYS_LOG2; /* * The number of sets in the caches. Initialised for HTP/ATP, adjusted * according to NOMMU setting in config registers */ static unsigned char dcache_sets_log2 = DEFAULT_CACHE_WAYS_LOG2; static unsigned char icache_sets_log2 = DEFAULT_CACHE_WAYS_LOG2; #ifndef CONFIG_METAG_META12 /** * metag_lnkget_probe() - Probe whether lnkget/lnkset go around the cache */ static volatile u32 lnkget_testdata[16] __initdata __aligned(64); #define LNKGET_CONSTANT 0xdeadbeef static void __init metag_lnkget_probe(void) { int temp; long flags; /* * It's conceivable the user has configured a globally coherent cache * shared with non-Linux hardware threads, so use LOCK2 to prevent them * from executing and causing cache eviction during the test. */ __global_lock2(flags); /* read a value to bring it into the cache */ (void)lnkget_testdata[0]; lnkget_testdata[0] = 0; /* lnkget/lnkset it to modify it */ asm volatile( "1: LNKGETD %0, [%1]\n" " LNKSETD [%1], %2\n" " DEFR %0, TXSTAT\n" " ANDT %0, %0, #HI(0x3f000000)\n" " CMPT %0, #HI(0x02000000)\n" " BNZ 1b\n" : "=&d" (temp) : "da" (&lnkget_testdata[0]), "bd" (LNKGET_CONSTANT) : "cc"); /* re-read it to see if the cached value changed */ temp = lnkget_testdata[0]; __global_unlock2(flags); /* flush the cache line to fix any incoherency */ __builtin_dcache_flush((void *)&lnkget_testdata[0]); #if defined(CONFIG_METAG_LNKGET_AROUND_CACHE) /* if the cache is right, LNKGET_AROUND_CACHE is unnecessary */ if (temp == LNKGET_CONSTANT) pr_info("LNKGET/SET go through cache but CONFIG_METAG_LNKGET_AROUND_CACHE=y\n"); #elif defined(CONFIG_METAG_ATOMICITY_LNKGET) /* * if the cache is wrong, LNKGET_AROUND_CACHE is really necessary * because the kernel is configured to use LNKGET/SET for atomicity */ WARN(temp != LNKGET_CONSTANT, "LNKGET/SET go around cache but CONFIG_METAG_LNKGET_AROUND_CACHE=n\n" "Expect kernel failure as it's used for atomicity primitives\n"); #elif defined(CONFIG_SMP) /* * if the cache is wrong, LNKGET_AROUND_CACHE should be used or the * gateway page won't flush and userland could break. */ WARN(temp != LNKGET_CONSTANT, "LNKGET/SET go around cache but CONFIG_METAG_LNKGET_AROUND_CACHE=n\n" "Expect userland failure as it's used for user gateway page\n"); #else /* * if the cache is wrong, LNKGET_AROUND_CACHE is set wrong, but it * doesn't actually matter as it doesn't have any effect on !SMP && * !ATOMICITY_LNKGET. */ if (temp != LNKGET_CONSTANT) pr_warn("LNKGET/SET go around cache but CONFIG_METAG_LNKGET_AROUND_CACHE=n\n"); #endif } #endif /* !CONFIG_METAG_META12 */ /** * metag_cache_probe() - Probe L1 cache configuration. * * Probe the L1 cache configuration to aid the L1 physical cache flushing * functions. */ void __init metag_cache_probe(void) { #ifndef CONFIG_METAG_META12 int coreid = metag_in32(METAC_CORE_ID); int config = metag_in32(METAC_CORE_CONFIG2); int cfgcache = coreid & METAC_COREID_CFGCACHE_BITS; if (cfgcache == METAC_COREID_CFGCACHE_TYPE0 || cfgcache == METAC_COREID_CFGCACHE_PRIVNOMMU) { icache_sets_log2 = 1; dcache_sets_log2 = 1; } /* For normal size caches, the smallest size is 4Kb. For small caches, the smallest size is 64b */ icache_set_shift = (config & METAC_CORECFG2_ICSMALL_BIT) ? 6 : 12; icache_set_shift += (config & METAC_CORE_C2ICSZ_BITS) >> METAC_CORE_C2ICSZ_S; icache_set_shift -= icache_sets_log2; dcache_set_shift = (config & METAC_CORECFG2_DCSMALL_BIT) ? 6 : 12; dcache_set_shift += (config & METAC_CORECFG2_DCSZ_BITS) >> METAC_CORECFG2_DCSZ_S; dcache_set_shift -= dcache_sets_log2; metag_lnkget_probe(); #else /* Extract cache sizes from global heap segment */ unsigned long val, u; int width, shift, addend; PTBISEG seg; seg = __TBIFindSeg(NULL, TBID_SEG(TBID_THREAD_GLOBAL, TBID_SEGSCOPE_GLOBAL, TBID_SEGTYPE_HEAP)); if (seg != NULL) { val = seg->Data[1]; /* Work out width of I-cache size bit-field */ u = ((unsigned long) METAG_TBI_ICACHE_SIZE_BITS) >> METAG_TBI_ICACHE_SIZE_S; width = 0; while (u & 1) { width++; u >>= 1; } /* Extract sign-extended size addend value */ shift = 32 - (METAG_TBI_ICACHE_SIZE_S + width); addend = (long) ((val & METAG_TBI_ICACHE_SIZE_BITS) << shift) >> (shift + METAG_TBI_ICACHE_SIZE_S); /* Now calculate I-cache set size */ icache_set_shift = (METAG_TBI_CACHE_SIZE_BASE_LOG2 - DEFAULT_CACHE_WAYS_LOG2) + addend; /* Similarly for D-cache */ u = ((unsigned long) METAG_TBI_DCACHE_SIZE_BITS) >> METAG_TBI_DCACHE_SIZE_S; width = 0; while (u & 1) { width++; u >>= 1; } shift = 32 - (METAG_TBI_DCACHE_SIZE_S + width); addend = (long) ((val & METAG_TBI_DCACHE_SIZE_BITS) << shift) >> (shift + METAG_TBI_DCACHE_SIZE_S); dcache_set_shift = (METAG_TBI_CACHE_SIZE_BASE_LOG2 - DEFAULT_CACHE_WAYS_LOG2) + addend; } #endif } static void metag_phys_data_cache_flush(const void *start) { unsigned long flush0, flush1, flush2, flush3; int loops, step; int thread; int part, offset; int set_shift; /* Use a sequence of writes to flush the cache region requested */ thread = (__core_reg_get(TXENABLE) & TXENABLE_THREAD_BITS) >> TXENABLE_THREAD_S; /* Cache is broken into sets which lie in contiguous RAMs */ set_shift = dcache_set_shift; /* Move to the base of the physical cache flush region */ flush0 = LINSYSCFLUSH_DCACHE_LINE; step = 64; /* Get partition data for this thread */ part = metag_in32(SYSC_DCPART0 + (SYSC_xCPARTn_STRIDE * thread)); if ((int)start < 0) /* Access Global vs Local partition */ part >>= SYSC_xCPARTG_AND_S - SYSC_xCPARTL_AND_S; /* Extract offset and move SetOff */ offset = (part & SYSC_xCPARTL_OR_BITS) >> SYSC_xCPARTL_OR_S; flush0 += (offset << (set_shift - 4)); /* Shrink size */ part = (part & SYSC_xCPARTL_AND_BITS) >> SYSC_xCPARTL_AND_S; loops = ((part + 1) << (set_shift - 4)); /* Reduce loops by step of cache line size */ loops /= step; flush1 = flush0 + (1 << set_shift); flush2 = flush0 + (2 << set_shift); flush3 = flush0 + (3 << set_shift); if (dcache_sets_log2 == 1) { flush2 = flush1; flush3 = flush1 + step; flush1 = flush0 + step; step <<= 1; loops >>= 1; } /* Clear loops ways in cache */ while (loops-- != 0) { /* Clear the ways. */ #if 0 /* * GCC doesn't generate very good code for this so we * provide inline assembly instead. */ metag_out8(0, flush0); metag_out8(0, flush1); metag_out8(0, flush2); metag_out8(0, flush3); flush0 += step; flush1 += step; flush2 += step; flush3 += step; #else asm volatile ( "SETB\t[%0+%4++],%5\n" "SETB\t[%1+%4++],%5\n" "SETB\t[%2+%4++],%5\n" "SETB\t[%3+%4++],%5\n" : "+e" (flush0), "+e" (flush1), "+e" (flush2), "+e" (flush3) : "e" (step), "a" (0)); #endif } } void metag_data_cache_flush_all(const void *start) { if ((metag_in32(SYSC_CACHE_MMU_CONFIG) & SYSC_CMMUCFG_DC_ON_BIT) == 0) /* No need to flush the data cache it's not actually enabled */ return; metag_phys_data_cache_flush(start); } void metag_data_cache_flush(const void *start, int bytes) { unsigned long flush0; int loops, step; if ((metag_in32(SYSC_CACHE_MMU_CONFIG) & SYSC_CMMUCFG_DC_ON_BIT) == 0) /* No need to flush the data cache it's not actually enabled */ return; if (bytes >= 4096) { metag_phys_data_cache_flush(start); return; } /* Use linear cache flush mechanism on META IP */ flush0 = (int)start; loops = ((int)start & (DCACHE_LINE_BYTES - 1)) + bytes + (DCACHE_LINE_BYTES - 1); loops >>= DCACHE_LINE_S; #define PRIM_FLUSH(addr, offset) do { \ int __addr = ((int) (addr)) + ((offset) * 64); \ __builtin_dcache_flush((void *)(__addr)); \ } while (0) #define LOOP_INC (4*64) do { /* By default stop */ step = 0; switch (loops) { /* Drop Thru Cases! */ default: PRIM_FLUSH(flush0, 3); loops -= 4; step = 1; case 3: PRIM_FLUSH(flush0, 2); case 2: PRIM_FLUSH(flush0, 1); case 1: PRIM_FLUSH(flush0, 0); flush0 += LOOP_INC; case 0: break; } } while (step); } EXPORT_SYMBOL(metag_data_cache_flush); static void metag_phys_code_cache_flush(const void *start, int bytes) { unsigned long flush0, flush1, flush2, flush3, end_set; int loops, step; int thread; int set_shift, set_size; int part, offset; /* Use a sequence of writes to flush the cache region requested */ thread = (__core_reg_get(TXENABLE) & TXENABLE_THREAD_BITS) >> TXENABLE_THREAD_S; set_shift = icache_set_shift; /* Move to the base of the physical cache flush region */ flush0 = LINSYSCFLUSH_ICACHE_LINE; step = 64; /* Get partition code for this thread */ part = metag_in32(SYSC_ICPART0 + (SYSC_xCPARTn_STRIDE * thread)); if ((int)start < 0) /* Access Global vs Local partition */ part >>= SYSC_xCPARTG_AND_S-SYSC_xCPARTL_AND_S; /* Extract offset and move SetOff */ offset = (part & SYSC_xCPARTL_OR_BITS) >> SYSC_xCPARTL_OR_S; flush0 += (offset << (set_shift - 4)); /* Shrink size */ part = (part & SYSC_xCPARTL_AND_BITS) >> SYSC_xCPARTL_AND_S; loops = ((part + 1) << (set_shift - 4)); /* Where does the Set end? */ end_set = flush0 + loops; set_size = loops; #ifdef CONFIG_METAG_META12 if ((bytes < 4096) && (bytes < loops)) { /* Unreachable on HTP/MTP */ /* Only target the sets that could be relavent */ flush0 += (loops - step) & ((int) start); loops = (((int) start) & (step-1)) + bytes + step - 1; } #endif /* Reduce loops by step of cache line size */ loops /= step; flush1 = flush0 + (1<<set_shift); flush2 = flush0 + (2<<set_shift); flush3 = flush0 + (3<<set_shift); if (icache_sets_log2 == 1) { flush2 = flush1; flush3 = flush1 + step; flush1 = flush0 + step; #if 0 /* flush0 will stop one line early in this case * (flush1 will do the final line). * However we don't correct end_set here at the moment * because it will never wrap on HTP/MTP */ end_set -= step; #endif step <<= 1; loops >>= 1; } /* Clear loops ways in cache */ while (loops-- != 0) { #if 0 /* * GCC doesn't generate very good code for this so we * provide inline assembly instead. */ /* Clear the ways */ metag_out8(0, flush0); metag_out8(0, flush1); metag_out8(0, flush2); metag_out8(0, flush3); flush0 += step; flush1 += step; flush2 += step; flush3 += step; #else asm volatile ( "SETB\t[%0+%4++],%5\n" "SETB\t[%1+%4++],%5\n" "SETB\t[%2+%4++],%5\n" "SETB\t[%3+%4++],%5\n" : "+e" (flush0), "+e" (flush1), "+e" (flush2), "+e" (flush3) : "e" (step), "a" (0)); #endif if (flush0 == end_set) { /* Wrap within Set 0 */ flush0 -= set_size; flush1 -= set_size; flush2 -= set_size; flush3 -= set_size; } } } void metag_code_cache_flush_all(const void *start) { if ((metag_in32(SYSC_CACHE_MMU_CONFIG) & SYSC_CMMUCFG_IC_ON_BIT) == 0) /* No need to flush the code cache it's not actually enabled */ return; metag_phys_code_cache_flush(start, 4096); } EXPORT_SYMBOL(metag_code_cache_flush_all); void metag_code_cache_flush(const void *start, int bytes) { #ifndef CONFIG_METAG_META12 void *flush; int loops, step; #endif /* !CONFIG_METAG_META12 */ if ((metag_in32(SYSC_CACHE_MMU_CONFIG) & SYSC_CMMUCFG_IC_ON_BIT) == 0) /* No need to flush the code cache it's not actually enabled */ return; #ifdef CONFIG_METAG_META12 /* CACHEWD isn't available on Meta1, so always do full cache flush */ metag_phys_code_cache_flush(start, bytes); #else /* CONFIG_METAG_META12 */ /* If large size do full physical cache flush */ if (bytes >= 4096) { metag_phys_code_cache_flush(start, bytes); return; } /* Use linear cache flush mechanism on META IP */ flush = (void *)((int)start & ~(ICACHE_LINE_BYTES-1)); loops = ((int)start & (ICACHE_LINE_BYTES-1)) + bytes + (ICACHE_LINE_BYTES-1); loops >>= ICACHE_LINE_S; #define PRIM_IFLUSH(addr, offset) \ __builtin_meta2_cachewd(((addr) + ((offset) * 64)), CACHEW_ICACHE_BIT) #define LOOP_INC (4*64) do { /* By default stop */ step = 0; switch (loops) { /* Drop Thru Cases! */ default: PRIM_IFLUSH(flush, 3); loops -= 4; step = 1; case 3: PRIM_IFLUSH(flush, 2); case 2: PRIM_IFLUSH(flush, 1); case 1: PRIM_IFLUSH(flush, 0); flush += LOOP_INC; case 0: break; } } while (step); #endif /* !CONFIG_METAG_META12 */ } EXPORT_SYMBOL(metag_code_cache_flush);
gpl-2.0
omega-roms/I9300_Stock_Kernel_JB_4.3
drivers/uwb/scan.c
2937
4126
/* * Ultra Wide Band * Scanning management * * Copyright (C) 2005-2006 Intel Corporation * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * * * * FIXME: docs * FIXME: there are issues here on how BEACON and SCAN on USB RCI deal * with each other. Currently seems that START_BEACON while * SCAN_ONLY will cancel the scan, so we need to update the * state here. Clarification request sent by email on * 10/05/2005. * 10/28/2005 No clear answer heard--maybe we'll hack the API * so that when we start beaconing, if the HC is * scanning in a mode not compatible with beaconing * we just fail. */ #include <linux/device.h> #include <linux/err.h> #include <linux/slab.h> #include "uwb-internal.h" /** * Start/stop scanning in a radio controller * * @rc: UWB Radio Controller * @channel: Channel to scan; encodings in WUSB1.0[Table 5.12] * @type: Type of scanning to do. * @bpst_offset: value at which to start scanning (if type == * UWB_SCAN_ONLY_STARTTIME) * @returns: 0 if ok, < 0 errno code on error * * We put the command on kmalloc'ed memory as some arches cannot do * USB from the stack. The reply event is copied from an stage buffer, * so it can be in the stack. See WUSB1.0[8.6.2.4] for more details. */ int uwb_rc_scan(struct uwb_rc *rc, unsigned channel, enum uwb_scan_type type, unsigned bpst_offset) { int result; struct uwb_rc_cmd_scan *cmd; struct uwb_rc_evt_confirm reply; result = -ENOMEM; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) goto error_kzalloc; mutex_lock(&rc->uwb_dev.mutex); cmd->rccb.bCommandType = UWB_RC_CET_GENERAL; cmd->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_SCAN); cmd->bChannelNumber = channel; cmd->bScanState = type; cmd->wStartTime = cpu_to_le16(bpst_offset); reply.rceb.bEventType = UWB_RC_CET_GENERAL; reply.rceb.wEvent = UWB_RC_CMD_SCAN; result = uwb_rc_cmd(rc, "SCAN", &cmd->rccb, sizeof(*cmd), &reply.rceb, sizeof(reply)); if (result < 0) goto error_cmd; if (reply.bResultCode != UWB_RC_RES_SUCCESS) { dev_err(&rc->uwb_dev.dev, "SCAN: command execution failed: %s (%d)\n", uwb_rc_strerror(reply.bResultCode), reply.bResultCode); result = -EIO; goto error_cmd; } rc->scanning = channel; rc->scan_type = type; error_cmd: mutex_unlock(&rc->uwb_dev.mutex); kfree(cmd); error_kzalloc: return result; } /* * Print scanning state */ static ssize_t uwb_rc_scan_show(struct device *dev, struct device_attribute *attr, char *buf) { struct uwb_dev *uwb_dev = to_uwb_dev(dev); struct uwb_rc *rc = uwb_dev->rc; ssize_t result; mutex_lock(&rc->uwb_dev.mutex); result = sprintf(buf, "%d %d\n", rc->scanning, rc->scan_type); mutex_unlock(&rc->uwb_dev.mutex); return result; } /* * */ static ssize_t uwb_rc_scan_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct uwb_dev *uwb_dev = to_uwb_dev(dev); struct uwb_rc *rc = uwb_dev->rc; unsigned channel; unsigned type; unsigned bpst_offset = 0; ssize_t result = -EINVAL; result = sscanf(buf, "%u %u %u\n", &channel, &type, &bpst_offset); if (result >= 2 && type < UWB_SCAN_TOP) result = uwb_rc_scan(rc, channel, type, bpst_offset); return result < 0 ? result : size; } /** Radio Control sysfs interface (declaration) */ DEVICE_ATTR(scan, S_IRUGO | S_IWUSR, uwb_rc_scan_show, uwb_rc_scan_store);
gpl-2.0
C457/android_kernel_samsung_corsica
drivers/video/omap/hwa742.c
3193
27786
/* * Epson HWA742 LCD controller driver * * Copyright (C) 2004-2005 Nokia Corporation * Authors: Juha Yrjölä <juha.yrjola@nokia.com> * Imre Deak <imre.deak@nokia.com> * YUV support: Jussi Laako <jussi.laako@nokia.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/module.h> #include <linux/mm.h> #include <linux/fb.h> #include <linux/delay.h> #include <linux/clk.h> #include <linux/interrupt.h> #include <plat/dma.h> #include <plat/hwa742.h> #include "omapfb.h" #define HWA742_REV_CODE_REG 0x0 #define HWA742_CONFIG_REG 0x2 #define HWA742_PLL_DIV_REG 0x4 #define HWA742_PLL_0_REG 0x6 #define HWA742_PLL_1_REG 0x8 #define HWA742_PLL_2_REG 0xa #define HWA742_PLL_3_REG 0xc #define HWA742_PLL_4_REG 0xe #define HWA742_CLK_SRC_REG 0x12 #define HWA742_PANEL_TYPE_REG 0x14 #define HWA742_H_DISP_REG 0x16 #define HWA742_H_NDP_REG 0x18 #define HWA742_V_DISP_1_REG 0x1a #define HWA742_V_DISP_2_REG 0x1c #define HWA742_V_NDP_REG 0x1e #define HWA742_HS_W_REG 0x20 #define HWA742_HP_S_REG 0x22 #define HWA742_VS_W_REG 0x24 #define HWA742_VP_S_REG 0x26 #define HWA742_PCLK_POL_REG 0x28 #define HWA742_INPUT_MODE_REG 0x2a #define HWA742_TRANSL_MODE_REG1 0x2e #define HWA742_DISP_MODE_REG 0x34 #define HWA742_WINDOW_TYPE 0x36 #define HWA742_WINDOW_X_START_0 0x38 #define HWA742_WINDOW_X_START_1 0x3a #define HWA742_WINDOW_Y_START_0 0x3c #define HWA742_WINDOW_Y_START_1 0x3e #define HWA742_WINDOW_X_END_0 0x40 #define HWA742_WINDOW_X_END_1 0x42 #define HWA742_WINDOW_Y_END_0 0x44 #define HWA742_WINDOW_Y_END_1 0x46 #define HWA742_MEMORY_WRITE_LSB 0x48 #define HWA742_MEMORY_WRITE_MSB 0x49 #define HWA742_MEMORY_READ_0 0x4a #define HWA742_MEMORY_READ_1 0x4c #define HWA742_MEMORY_READ_2 0x4e #define HWA742_POWER_SAVE 0x56 #define HWA742_NDP_CTRL 0x58 #define HWA742_AUTO_UPDATE_TIME (HZ / 20) /* Reserve 4 request slots for requests in irq context */ #define REQ_POOL_SIZE 24 #define IRQ_REQ_POOL_SIZE 4 #define REQ_FROM_IRQ_POOL 0x01 #define REQ_COMPLETE 0 #define REQ_PENDING 1 struct update_param { int x, y, width, height; int color_mode; int flags; }; struct hwa742_request { struct list_head entry; unsigned int flags; int (*handler)(struct hwa742_request *req); void (*complete)(void *data); void *complete_data; union { struct update_param update; struct completion *sync; } par; }; struct { enum omapfb_update_mode update_mode; enum omapfb_update_mode update_mode_before_suspend; struct timer_list auto_update_timer; int stop_auto_update; struct omapfb_update_window auto_update_window; unsigned te_connected:1; unsigned vsync_only:1; struct hwa742_request req_pool[REQ_POOL_SIZE]; struct list_head pending_req_list; struct list_head free_req_list; struct semaphore req_sema; spinlock_t req_lock; struct extif_timings reg_timings, lut_timings; int prev_color_mode; int prev_flags; int window_type; u32 max_transmit_size; u32 extif_clk_period; unsigned long pix_tx_time; unsigned long line_upd_time; struct omapfb_device *fbdev; struct lcd_ctrl_extif *extif; const struct lcd_ctrl *int_ctrl; struct clk *sys_ck; } hwa742; struct lcd_ctrl hwa742_ctrl; static u8 hwa742_read_reg(u8 reg) { u8 data; hwa742.extif->set_bits_per_cycle(8); hwa742.extif->write_command(&reg, 1); hwa742.extif->read_data(&data, 1); return data; } static void hwa742_write_reg(u8 reg, u8 data) { hwa742.extif->set_bits_per_cycle(8); hwa742.extif->write_command(&reg, 1); hwa742.extif->write_data(&data, 1); } static void set_window_regs(int x_start, int y_start, int x_end, int y_end) { u8 tmp[8]; u8 cmd; x_end--; y_end--; tmp[0] = x_start; tmp[1] = x_start >> 8; tmp[2] = y_start; tmp[3] = y_start >> 8; tmp[4] = x_end; tmp[5] = x_end >> 8; tmp[6] = y_end; tmp[7] = y_end >> 8; hwa742.extif->set_bits_per_cycle(8); cmd = HWA742_WINDOW_X_START_0; hwa742.extif->write_command(&cmd, 1); hwa742.extif->write_data(tmp, 8); } static void set_format_regs(int conv, int transl, int flags) { if (flags & OMAPFB_FORMAT_FLAG_DOUBLE) { hwa742.window_type = ((hwa742.window_type & 0xfc) | 0x01); #ifdef VERBOSE dev_dbg(hwa742.fbdev->dev, "hwa742: enabled pixel doubling\n"); #endif } else { hwa742.window_type = (hwa742.window_type & 0xfc); #ifdef VERBOSE dev_dbg(hwa742.fbdev->dev, "hwa742: disabled pixel doubling\n"); #endif } hwa742_write_reg(HWA742_INPUT_MODE_REG, conv); hwa742_write_reg(HWA742_TRANSL_MODE_REG1, transl); hwa742_write_reg(HWA742_WINDOW_TYPE, hwa742.window_type); } static void enable_tearsync(int y, int width, int height, int screen_height, int force_vsync) { u8 b; b = hwa742_read_reg(HWA742_NDP_CTRL); b |= 1 << 2; hwa742_write_reg(HWA742_NDP_CTRL, b); if (likely(hwa742.vsync_only || force_vsync)) { hwa742.extif->enable_tearsync(1, 0); return; } if (width * hwa742.pix_tx_time < hwa742.line_upd_time) { hwa742.extif->enable_tearsync(1, 0); return; } if ((width * hwa742.pix_tx_time / 1000) * height < (y + height) * (hwa742.line_upd_time / 1000)) { hwa742.extif->enable_tearsync(1, 0); return; } hwa742.extif->enable_tearsync(1, y + 1); } static void disable_tearsync(void) { u8 b; hwa742.extif->enable_tearsync(0, 0); b = hwa742_read_reg(HWA742_NDP_CTRL); b &= ~(1 << 2); hwa742_write_reg(HWA742_NDP_CTRL, b); } static inline struct hwa742_request *alloc_req(void) { unsigned long flags; struct hwa742_request *req; int req_flags = 0; if (!in_interrupt()) down(&hwa742.req_sema); else req_flags = REQ_FROM_IRQ_POOL; spin_lock_irqsave(&hwa742.req_lock, flags); BUG_ON(list_empty(&hwa742.free_req_list)); req = list_entry(hwa742.free_req_list.next, struct hwa742_request, entry); list_del(&req->entry); spin_unlock_irqrestore(&hwa742.req_lock, flags); INIT_LIST_HEAD(&req->entry); req->flags = req_flags; return req; } static inline void free_req(struct hwa742_request *req) { unsigned long flags; spin_lock_irqsave(&hwa742.req_lock, flags); list_move(&req->entry, &hwa742.free_req_list); if (!(req->flags & REQ_FROM_IRQ_POOL)) up(&hwa742.req_sema); spin_unlock_irqrestore(&hwa742.req_lock, flags); } static void process_pending_requests(void) { unsigned long flags; spin_lock_irqsave(&hwa742.req_lock, flags); while (!list_empty(&hwa742.pending_req_list)) { struct hwa742_request *req; void (*complete)(void *); void *complete_data; req = list_entry(hwa742.pending_req_list.next, struct hwa742_request, entry); spin_unlock_irqrestore(&hwa742.req_lock, flags); if (req->handler(req) == REQ_PENDING) return; complete = req->complete; complete_data = req->complete_data; free_req(req); if (complete) complete(complete_data); spin_lock_irqsave(&hwa742.req_lock, flags); } spin_unlock_irqrestore(&hwa742.req_lock, flags); } static void submit_req_list(struct list_head *head) { unsigned long flags; int process = 1; spin_lock_irqsave(&hwa742.req_lock, flags); if (likely(!list_empty(&hwa742.pending_req_list))) process = 0; list_splice_init(head, hwa742.pending_req_list.prev); spin_unlock_irqrestore(&hwa742.req_lock, flags); if (process) process_pending_requests(); } static void request_complete(void *data) { struct hwa742_request *req = (struct hwa742_request *)data; void (*complete)(void *); void *complete_data; complete = req->complete; complete_data = req->complete_data; free_req(req); if (complete) complete(complete_data); process_pending_requests(); } static int send_frame_handler(struct hwa742_request *req) { struct update_param *par = &req->par.update; int x = par->x; int y = par->y; int w = par->width; int h = par->height; int bpp; int conv, transl; unsigned long offset; int color_mode = par->color_mode; int flags = par->flags; int scr_width = hwa742.fbdev->panel->x_res; int scr_height = hwa742.fbdev->panel->y_res; #ifdef VERBOSE dev_dbg(hwa742.fbdev->dev, "x %d y %d w %d h %d scr_width %d " "color_mode %d flags %d\n", x, y, w, h, scr_width, color_mode, flags); #endif switch (color_mode) { case OMAPFB_COLOR_YUV422: bpp = 16; conv = 0x08; transl = 0x25; break; case OMAPFB_COLOR_YUV420: bpp = 12; conv = 0x09; transl = 0x25; break; case OMAPFB_COLOR_RGB565: bpp = 16; conv = 0x01; transl = 0x05; break; default: return -EINVAL; } if (hwa742.prev_flags != flags || hwa742.prev_color_mode != color_mode) { set_format_regs(conv, transl, flags); hwa742.prev_color_mode = color_mode; hwa742.prev_flags = flags; } flags = req->par.update.flags; if (flags & OMAPFB_FORMAT_FLAG_TEARSYNC) enable_tearsync(y, scr_width, h, scr_height, flags & OMAPFB_FORMAT_FLAG_FORCE_VSYNC); else disable_tearsync(); set_window_regs(x, y, x + w, y + h); offset = (scr_width * y + x) * bpp / 8; hwa742.int_ctrl->setup_plane(OMAPFB_PLANE_GFX, OMAPFB_CHANNEL_OUT_LCD, offset, scr_width, 0, 0, w, h, color_mode); hwa742.extif->set_bits_per_cycle(16); hwa742.int_ctrl->enable_plane(OMAPFB_PLANE_GFX, 1); hwa742.extif->transfer_area(w, h, request_complete, req); return REQ_PENDING; } static void send_frame_complete(void *data) { hwa742.int_ctrl->enable_plane(OMAPFB_PLANE_GFX, 0); } #define ADD_PREQ(_x, _y, _w, _h) do { \ req = alloc_req(); \ req->handler = send_frame_handler; \ req->complete = send_frame_complete; \ req->par.update.x = _x; \ req->par.update.y = _y; \ req->par.update.width = _w; \ req->par.update.height = _h; \ req->par.update.color_mode = color_mode;\ req->par.update.flags = flags; \ list_add_tail(&req->entry, req_head); \ } while(0) static void create_req_list(struct omapfb_update_window *win, struct list_head *req_head) { struct hwa742_request *req; int x = win->x; int y = win->y; int width = win->width; int height = win->height; int color_mode; int flags; flags = win->format & ~OMAPFB_FORMAT_MASK; color_mode = win->format & OMAPFB_FORMAT_MASK; if (x & 1) { ADD_PREQ(x, y, 1, height); width--; x++; flags &= ~OMAPFB_FORMAT_FLAG_TEARSYNC; } if (width & ~1) { unsigned int xspan = width & ~1; unsigned int ystart = y; unsigned int yspan = height; if (xspan * height * 2 > hwa742.max_transmit_size) { yspan = hwa742.max_transmit_size / (xspan * 2); ADD_PREQ(x, ystart, xspan, yspan); ystart += yspan; yspan = height - yspan; flags &= ~OMAPFB_FORMAT_FLAG_TEARSYNC; } ADD_PREQ(x, ystart, xspan, yspan); x += xspan; width -= xspan; flags &= ~OMAPFB_FORMAT_FLAG_TEARSYNC; } if (width) ADD_PREQ(x, y, 1, height); } static void auto_update_complete(void *data) { if (!hwa742.stop_auto_update) mod_timer(&hwa742.auto_update_timer, jiffies + HWA742_AUTO_UPDATE_TIME); } static void hwa742_update_window_auto(unsigned long arg) { LIST_HEAD(req_list); struct hwa742_request *last; create_req_list(&hwa742.auto_update_window, &req_list); last = list_entry(req_list.prev, struct hwa742_request, entry); last->complete = auto_update_complete; last->complete_data = NULL; submit_req_list(&req_list); } int hwa742_update_window_async(struct fb_info *fbi, struct omapfb_update_window *win, void (*complete_callback)(void *arg), void *complete_callback_data) { LIST_HEAD(req_list); struct hwa742_request *last; int r = 0; if (hwa742.update_mode != OMAPFB_MANUAL_UPDATE) { dev_dbg(hwa742.fbdev->dev, "invalid update mode\n"); r = -EINVAL; goto out; } if (unlikely(win->format & ~(0x03 | OMAPFB_FORMAT_FLAG_DOUBLE | OMAPFB_FORMAT_FLAG_TEARSYNC | OMAPFB_FORMAT_FLAG_FORCE_VSYNC))) { dev_dbg(hwa742.fbdev->dev, "invalid window flag\n"); r = -EINVAL; goto out; } create_req_list(win, &req_list); last = list_entry(req_list.prev, struct hwa742_request, entry); last->complete = complete_callback; last->complete_data = (void *)complete_callback_data; submit_req_list(&req_list); out: return r; } EXPORT_SYMBOL(hwa742_update_window_async); static int hwa742_setup_plane(int plane, int channel_out, unsigned long offset, int screen_width, int pos_x, int pos_y, int width, int height, int color_mode) { if (plane != OMAPFB_PLANE_GFX || channel_out != OMAPFB_CHANNEL_OUT_LCD) return -EINVAL; return 0; } static int hwa742_enable_plane(int plane, int enable) { if (plane != 0) return -EINVAL; hwa742.int_ctrl->enable_plane(plane, enable); return 0; } static int sync_handler(struct hwa742_request *req) { complete(req->par.sync); return REQ_COMPLETE; } static void hwa742_sync(void) { LIST_HEAD(req_list); struct hwa742_request *req; struct completion comp; req = alloc_req(); req->handler = sync_handler; req->complete = NULL; init_completion(&comp); req->par.sync = &comp; list_add(&req->entry, &req_list); submit_req_list(&req_list); wait_for_completion(&comp); } static void hwa742_bind_client(struct omapfb_notifier_block *nb) { dev_dbg(hwa742.fbdev->dev, "update_mode %d\n", hwa742.update_mode); if (hwa742.update_mode == OMAPFB_MANUAL_UPDATE) { omapfb_notify_clients(hwa742.fbdev, OMAPFB_EVENT_READY); } } static int hwa742_set_update_mode(enum omapfb_update_mode mode) { if (mode != OMAPFB_MANUAL_UPDATE && mode != OMAPFB_AUTO_UPDATE && mode != OMAPFB_UPDATE_DISABLED) return -EINVAL; if (mode == hwa742.update_mode) return 0; dev_info(hwa742.fbdev->dev, "HWA742: setting update mode to %s\n", mode == OMAPFB_UPDATE_DISABLED ? "disabled" : (mode == OMAPFB_AUTO_UPDATE ? "auto" : "manual")); switch (hwa742.update_mode) { case OMAPFB_MANUAL_UPDATE: omapfb_notify_clients(hwa742.fbdev, OMAPFB_EVENT_DISABLED); break; case OMAPFB_AUTO_UPDATE: hwa742.stop_auto_update = 1; del_timer_sync(&hwa742.auto_update_timer); break; case OMAPFB_UPDATE_DISABLED: break; } hwa742.update_mode = mode; hwa742_sync(); hwa742.stop_auto_update = 0; switch (mode) { case OMAPFB_MANUAL_UPDATE: omapfb_notify_clients(hwa742.fbdev, OMAPFB_EVENT_READY); break; case OMAPFB_AUTO_UPDATE: hwa742_update_window_auto(0); break; case OMAPFB_UPDATE_DISABLED: break; } return 0; } static enum omapfb_update_mode hwa742_get_update_mode(void) { return hwa742.update_mode; } static unsigned long round_to_extif_ticks(unsigned long ps, int div) { int bus_tick = hwa742.extif_clk_period * div; return (ps + bus_tick - 1) / bus_tick * bus_tick; } static int calc_reg_timing(unsigned long sysclk, int div) { struct extif_timings *t; unsigned long systim; /* CSOnTime 0, WEOnTime 2 ns, REOnTime 2 ns, * AccessTime 2 ns + 12.2 ns (regs), * WEOffTime = WEOnTime + 1 ns, * REOffTime = REOnTime + 16 ns (regs), * CSOffTime = REOffTime + 1 ns * ReadCycle = 2ns + 2*SYSCLK (regs), * WriteCycle = 2*SYSCLK + 2 ns, * CSPulseWidth = 10 ns */ systim = 1000000000 / (sysclk / 1000); dev_dbg(hwa742.fbdev->dev, "HWA742 systim %lu ps extif_clk_period %u ps" "extif_clk_div %d\n", systim, hwa742.extif_clk_period, div); t = &hwa742.reg_timings; memset(t, 0, sizeof(*t)); t->clk_div = div; t->cs_on_time = 0; t->we_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div); t->re_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div); t->access_time = round_to_extif_ticks(t->re_on_time + 12200, div); t->we_off_time = round_to_extif_ticks(t->we_on_time + 1000, div); t->re_off_time = round_to_extif_ticks(t->re_on_time + 16000, div); t->cs_off_time = round_to_extif_ticks(t->re_off_time + 1000, div); t->we_cycle_time = round_to_extif_ticks(2 * systim + 2000, div); if (t->we_cycle_time < t->we_off_time) t->we_cycle_time = t->we_off_time; t->re_cycle_time = round_to_extif_ticks(2 * systim + 2000, div); if (t->re_cycle_time < t->re_off_time) t->re_cycle_time = t->re_off_time; t->cs_pulse_width = 0; dev_dbg(hwa742.fbdev->dev, "[reg]cson %d csoff %d reon %d reoff %d\n", t->cs_on_time, t->cs_off_time, t->re_on_time, t->re_off_time); dev_dbg(hwa742.fbdev->dev, "[reg]weon %d weoff %d recyc %d wecyc %d\n", t->we_on_time, t->we_off_time, t->re_cycle_time, t->we_cycle_time); dev_dbg(hwa742.fbdev->dev, "[reg]rdaccess %d cspulse %d\n", t->access_time, t->cs_pulse_width); return hwa742.extif->convert_timings(t); } static int calc_lut_timing(unsigned long sysclk, int div) { struct extif_timings *t; unsigned long systim; /* CSOnTime 0, WEOnTime 2 ns, REOnTime 2 ns, * AccessTime 2 ns + 4 * SYSCLK + 26 (lut), * WEOffTime = WEOnTime + 1 ns, * REOffTime = REOnTime + 4*SYSCLK + 26 ns (lut), * CSOffTime = REOffTime + 1 ns * ReadCycle = 2ns + 4*SYSCLK + 26 ns (lut), * WriteCycle = 2*SYSCLK + 2 ns, * CSPulseWidth = 10 ns */ systim = 1000000000 / (sysclk / 1000); dev_dbg(hwa742.fbdev->dev, "HWA742 systim %lu ps extif_clk_period %u ps" "extif_clk_div %d\n", systim, hwa742.extif_clk_period, div); t = &hwa742.lut_timings; memset(t, 0, sizeof(*t)); t->clk_div = div; t->cs_on_time = 0; t->we_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div); t->re_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div); t->access_time = round_to_extif_ticks(t->re_on_time + 4 * systim + 26000, div); t->we_off_time = round_to_extif_ticks(t->we_on_time + 1000, div); t->re_off_time = round_to_extif_ticks(t->re_on_time + 4 * systim + 26000, div); t->cs_off_time = round_to_extif_ticks(t->re_off_time + 1000, div); t->we_cycle_time = round_to_extif_ticks(2 * systim + 2000, div); if (t->we_cycle_time < t->we_off_time) t->we_cycle_time = t->we_off_time; t->re_cycle_time = round_to_extif_ticks(2000 + 4 * systim + 26000, div); if (t->re_cycle_time < t->re_off_time) t->re_cycle_time = t->re_off_time; t->cs_pulse_width = 0; dev_dbg(hwa742.fbdev->dev, "[lut]cson %d csoff %d reon %d reoff %d\n", t->cs_on_time, t->cs_off_time, t->re_on_time, t->re_off_time); dev_dbg(hwa742.fbdev->dev, "[lut]weon %d weoff %d recyc %d wecyc %d\n", t->we_on_time, t->we_off_time, t->re_cycle_time, t->we_cycle_time); dev_dbg(hwa742.fbdev->dev, "[lut]rdaccess %d cspulse %d\n", t->access_time, t->cs_pulse_width); return hwa742.extif->convert_timings(t); } static int calc_extif_timings(unsigned long sysclk, int *extif_mem_div) { int max_clk_div; int div; hwa742.extif->get_clk_info(&hwa742.extif_clk_period, &max_clk_div); for (div = 1; div < max_clk_div; div++) { if (calc_reg_timing(sysclk, div) == 0) break; } if (div >= max_clk_div) goto err; *extif_mem_div = div; for (div = 1; div < max_clk_div; div++) { if (calc_lut_timing(sysclk, div) == 0) break; } if (div >= max_clk_div) goto err; return 0; err: dev_err(hwa742.fbdev->dev, "can't setup timings\n"); return -1; } static void calc_hwa742_clk_rates(unsigned long ext_clk, unsigned long *sys_clk, unsigned long *pix_clk) { int pix_clk_src; int sys_div = 0, sys_mul = 0; int pix_div; pix_clk_src = hwa742_read_reg(HWA742_CLK_SRC_REG); pix_div = ((pix_clk_src >> 3) & 0x1f) + 1; if ((pix_clk_src & (0x3 << 1)) == 0) { /* Source is the PLL */ sys_div = (hwa742_read_reg(HWA742_PLL_DIV_REG) & 0x3f) + 1; sys_mul = (hwa742_read_reg(HWA742_PLL_4_REG) & 0x7f) + 1; *sys_clk = ext_clk * sys_mul / sys_div; } else /* else source is ext clk, or oscillator */ *sys_clk = ext_clk; *pix_clk = *sys_clk / pix_div; /* HZ */ dev_dbg(hwa742.fbdev->dev, "ext_clk %ld pix_src %d pix_div %d sys_div %d sys_mul %d\n", ext_clk, pix_clk_src & (0x3 << 1), pix_div, sys_div, sys_mul); dev_dbg(hwa742.fbdev->dev, "sys_clk %ld pix_clk %ld\n", *sys_clk, *pix_clk); } static int setup_tearsync(unsigned long pix_clk, int extif_div) { int hdisp, vdisp; int hndp, vndp; int hsw, vsw; int hs, vs; int hs_pol_inv, vs_pol_inv; int use_hsvs, use_ndp; u8 b; hsw = hwa742_read_reg(HWA742_HS_W_REG); vsw = hwa742_read_reg(HWA742_VS_W_REG); hs_pol_inv = !(hsw & 0x80); vs_pol_inv = !(vsw & 0x80); hsw = hsw & 0x7f; vsw = vsw & 0x3f; hdisp = (hwa742_read_reg(HWA742_H_DISP_REG) & 0x7f) * 8; vdisp = hwa742_read_reg(HWA742_V_DISP_1_REG) + ((hwa742_read_reg(HWA742_V_DISP_2_REG) & 0x3) << 8); hndp = hwa742_read_reg(HWA742_H_NDP_REG) & 0x7f; vndp = hwa742_read_reg(HWA742_V_NDP_REG); /* time to transfer one pixel (16bpp) in ps */ hwa742.pix_tx_time = hwa742.reg_timings.we_cycle_time; if (hwa742.extif->get_max_tx_rate != NULL) { /* * The external interface might have a rate limitation, * if so, we have to maximize our transfer rate. */ unsigned long min_tx_time; unsigned long max_tx_rate = hwa742.extif->get_max_tx_rate(); dev_dbg(hwa742.fbdev->dev, "max_tx_rate %ld HZ\n", max_tx_rate); min_tx_time = 1000000000 / (max_tx_rate / 1000); /* ps */ if (hwa742.pix_tx_time < min_tx_time) hwa742.pix_tx_time = min_tx_time; } /* time to update one line in ps */ hwa742.line_upd_time = (hdisp + hndp) * 1000000 / (pix_clk / 1000); hwa742.line_upd_time *= 1000; if (hdisp * hwa742.pix_tx_time > hwa742.line_upd_time) /* * transfer speed too low, we might have to use both * HS and VS */ use_hsvs = 1; else /* decent transfer speed, we'll always use only VS */ use_hsvs = 0; if (use_hsvs && (hs_pol_inv || vs_pol_inv)) { /* * HS or'ed with VS doesn't work, use the active high * TE signal based on HNDP / VNDP */ use_ndp = 1; hs_pol_inv = 0; vs_pol_inv = 0; hs = hndp; vs = vndp; } else { /* * Use HS or'ed with VS as a TE signal if both are needed * or VNDP if only vsync is needed. */ use_ndp = 0; hs = hsw; vs = vsw; if (!use_hsvs) { hs_pol_inv = 0; vs_pol_inv = 0; } } hs = hs * 1000000 / (pix_clk / 1000); /* ps */ hs *= 1000; vs = vs * (hdisp + hndp) * 1000000 / (pix_clk / 1000); /* ps */ vs *= 1000; if (vs <= hs) return -EDOM; /* set VS to 120% of HS to minimize VS detection time */ vs = hs * 12 / 10; /* minimize HS too */ hs = 10000; b = hwa742_read_reg(HWA742_NDP_CTRL); b &= ~0x3; b |= use_hsvs ? 1 : 0; b |= (use_ndp && use_hsvs) ? 0 : 2; hwa742_write_reg(HWA742_NDP_CTRL, b); hwa742.vsync_only = !use_hsvs; dev_dbg(hwa742.fbdev->dev, "pix_clk %ld HZ pix_tx_time %ld ps line_upd_time %ld ps\n", pix_clk, hwa742.pix_tx_time, hwa742.line_upd_time); dev_dbg(hwa742.fbdev->dev, "hs %d ps vs %d ps mode %d vsync_only %d\n", hs, vs, (b & 0x3), !use_hsvs); return hwa742.extif->setup_tearsync(1, hs, vs, hs_pol_inv, vs_pol_inv, extif_div); } static void hwa742_get_caps(int plane, struct omapfb_caps *caps) { hwa742.int_ctrl->get_caps(plane, caps); caps->ctrl |= OMAPFB_CAPS_MANUAL_UPDATE | OMAPFB_CAPS_WINDOW_PIXEL_DOUBLE; if (hwa742.te_connected) caps->ctrl |= OMAPFB_CAPS_TEARSYNC; caps->wnd_color |= (1 << OMAPFB_COLOR_RGB565) | (1 << OMAPFB_COLOR_YUV420); } static void hwa742_suspend(void) { hwa742.update_mode_before_suspend = hwa742.update_mode; hwa742_set_update_mode(OMAPFB_UPDATE_DISABLED); /* Enable sleep mode */ hwa742_write_reg(HWA742_POWER_SAVE, 1 << 1); clk_disable(hwa742.sys_ck); } static void hwa742_resume(void) { clk_enable(hwa742.sys_ck); /* Disable sleep mode */ hwa742_write_reg(HWA742_POWER_SAVE, 0); while (1) { /* Loop until PLL output is stabilized */ if (hwa742_read_reg(HWA742_PLL_DIV_REG) & (1 << 7)) break; set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(msecs_to_jiffies(5)); } hwa742_set_update_mode(hwa742.update_mode_before_suspend); } static int hwa742_init(struct omapfb_device *fbdev, int ext_mode, struct omapfb_mem_desc *req_vram) { int r = 0, i; u8 rev, conf; unsigned long ext_clk; unsigned long sys_clk, pix_clk; int extif_mem_div; struct omapfb_platform_data *omapfb_conf; struct hwa742_platform_data *ctrl_conf; BUG_ON(!fbdev->ext_if || !fbdev->int_ctrl); hwa742.fbdev = fbdev; hwa742.extif = fbdev->ext_if; hwa742.int_ctrl = fbdev->int_ctrl; omapfb_conf = fbdev->dev->platform_data; ctrl_conf = omapfb_conf->ctrl_platform_data; if (ctrl_conf == NULL) { dev_err(fbdev->dev, "HWA742: missing platform data\n"); r = -ENOENT; goto err1; } hwa742.sys_ck = clk_get(NULL, "hwa_sys_ck"); spin_lock_init(&hwa742.req_lock); if ((r = hwa742.int_ctrl->init(fbdev, 1, req_vram)) < 0) goto err1; if ((r = hwa742.extif->init(fbdev)) < 0) goto err2; ext_clk = clk_get_rate(hwa742.sys_ck); if ((r = calc_extif_timings(ext_clk, &extif_mem_div)) < 0) goto err3; hwa742.extif->set_timings(&hwa742.reg_timings); clk_enable(hwa742.sys_ck); calc_hwa742_clk_rates(ext_clk, &sys_clk, &pix_clk); if ((r = calc_extif_timings(sys_clk, &extif_mem_div)) < 0) goto err4; hwa742.extif->set_timings(&hwa742.reg_timings); rev = hwa742_read_reg(HWA742_REV_CODE_REG); if ((rev & 0xfc) != 0x80) { dev_err(fbdev->dev, "HWA742: invalid revision %02x\n", rev); r = -ENODEV; goto err4; } if (!(hwa742_read_reg(HWA742_PLL_DIV_REG) & 0x80)) { dev_err(fbdev->dev, "HWA742: controller not initialized by the bootloader\n"); r = -ENODEV; goto err4; } if (ctrl_conf->te_connected) { if ((r = setup_tearsync(pix_clk, extif_mem_div)) < 0) { dev_err(hwa742.fbdev->dev, "HWA742: can't setup tearing synchronization\n"); goto err4; } hwa742.te_connected = 1; } hwa742.max_transmit_size = hwa742.extif->max_transmit_size; hwa742.update_mode = OMAPFB_UPDATE_DISABLED; hwa742.auto_update_window.x = 0; hwa742.auto_update_window.y = 0; hwa742.auto_update_window.width = fbdev->panel->x_res; hwa742.auto_update_window.height = fbdev->panel->y_res; hwa742.auto_update_window.format = 0; init_timer(&hwa742.auto_update_timer); hwa742.auto_update_timer.function = hwa742_update_window_auto; hwa742.auto_update_timer.data = 0; hwa742.prev_color_mode = -1; hwa742.prev_flags = 0; hwa742.fbdev = fbdev; INIT_LIST_HEAD(&hwa742.free_req_list); INIT_LIST_HEAD(&hwa742.pending_req_list); for (i = 0; i < ARRAY_SIZE(hwa742.req_pool); i++) list_add(&hwa742.req_pool[i].entry, &hwa742.free_req_list); BUG_ON(i <= IRQ_REQ_POOL_SIZE); sema_init(&hwa742.req_sema, i - IRQ_REQ_POOL_SIZE); conf = hwa742_read_reg(HWA742_CONFIG_REG); dev_info(fbdev->dev, ": Epson HWA742 LCD controller rev %d " "initialized (CNF pins %x)\n", rev & 0x03, conf & 0x07); return 0; err4: clk_disable(hwa742.sys_ck); err3: hwa742.extif->cleanup(); err2: hwa742.int_ctrl->cleanup(); err1: return r; } static void hwa742_cleanup(void) { hwa742_set_update_mode(OMAPFB_UPDATE_DISABLED); hwa742.extif->cleanup(); hwa742.int_ctrl->cleanup(); clk_disable(hwa742.sys_ck); } struct lcd_ctrl hwa742_ctrl = { .name = "hwa742", .init = hwa742_init, .cleanup = hwa742_cleanup, .bind_client = hwa742_bind_client, .get_caps = hwa742_get_caps, .set_update_mode = hwa742_set_update_mode, .get_update_mode = hwa742_get_update_mode, .setup_plane = hwa742_setup_plane, .enable_plane = hwa742_enable_plane, .update_window = hwa742_update_window_async, .sync = hwa742_sync, .suspend = hwa742_suspend, .resume = hwa742_resume, };
gpl-2.0
Team-Exhibit/android_kernel_samsung_u8500
drivers/staging/ft1000/ft1000-usb/ft1000_usb.c
3193
7378
/*===================================================== * CopyRight (C) 2007 Qualcomm Inc. All Rights Reserved. * * * This file is part of Express Card USB Driver * * $Id: *==================================================== */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/usb.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/firmware.h> #include "ft1000_usb.h" #include <linux/kthread.h> MODULE_DESCRIPTION("FT1000 EXPRESS CARD DRIVER"); MODULE_LICENSE("Dual MPL/GPL"); MODULE_SUPPORTED_DEVICE("QFT FT1000 Express Cards"); void *pFileStart; size_t FileLength; #define VENDOR_ID 0x1291 /* Qualcomm vendor id */ #define PRODUCT_ID 0x11 /* fake product id */ /* table of devices that work with this driver */ static struct usb_device_id id_table[] = { {USB_DEVICE(VENDOR_ID, PRODUCT_ID)}, {}, }; MODULE_DEVICE_TABLE(usb, id_table); static bool gPollingfailed = FALSE; int ft1000_poll_thread(void *arg) { int ret = STATUS_SUCCESS; while (!kthread_should_stop()) { msleep(10); if (!gPollingfailed) { ret = ft1000_poll(arg); if (ret != STATUS_SUCCESS) { DEBUG("ft1000_poll_thread: polling failed\n"); gPollingfailed = TRUE; } } } return STATUS_SUCCESS; } static int ft1000_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_host_interface *iface_desc; struct usb_endpoint_descriptor *endpoint; struct usb_device *dev; unsigned numaltsetting; int i, ret = 0, size; struct ft1000_device *ft1000dev; struct ft1000_info *pft1000info = NULL; const struct firmware *dsp_fw; ft1000dev = kmalloc(sizeof(struct ft1000_device), GFP_KERNEL); if (!ft1000dev) { printk(KERN_ERR "out of memory allocating device structure\n"); return 0; } memset(ft1000dev, 0, sizeof(*ft1000dev)); dev = interface_to_usbdev(interface); DEBUG("ft1000_probe: usb device descriptor info:\n"); DEBUG("ft1000_probe: number of configuration is %d\n", dev->descriptor.bNumConfigurations); ft1000dev->dev = dev; ft1000dev->status = 0; ft1000dev->net = NULL; ft1000dev->tx_urb = usb_alloc_urb(0, GFP_ATOMIC); ft1000dev->rx_urb = usb_alloc_urb(0, GFP_ATOMIC); DEBUG("ft1000_probe is called\n"); numaltsetting = interface->num_altsetting; DEBUG("ft1000_probe: number of alt settings is :%d\n", numaltsetting); iface_desc = interface->cur_altsetting; DEBUG("ft1000_probe: number of endpoints is %d\n", iface_desc->desc.bNumEndpoints); DEBUG("ft1000_probe: descriptor type is %d\n", iface_desc->desc.bDescriptorType); DEBUG("ft1000_probe: interface number is %d\n", iface_desc->desc.bInterfaceNumber); DEBUG("ft1000_probe: alternatesetting is %d\n", iface_desc->desc.bAlternateSetting); DEBUG("ft1000_probe: interface class is %d\n", iface_desc->desc.bInterfaceClass); DEBUG("ft1000_probe: control endpoint info:\n"); DEBUG("ft1000_probe: descriptor0 type -- %d\n", iface_desc->endpoint[0].desc.bmAttributes); DEBUG("ft1000_probe: descriptor1 type -- %d\n", iface_desc->endpoint[1].desc.bmAttributes); DEBUG("ft1000_probe: descriptor2 type -- %d\n", iface_desc->endpoint[2].desc.bmAttributes); for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) { endpoint = (struct usb_endpoint_descriptor *)&iface_desc-> endpoint[i].desc; DEBUG("endpoint %d\n", i); DEBUG("bEndpointAddress=%x, bmAttributes=%x\n", endpoint->bEndpointAddress, endpoint->bmAttributes); if ((endpoint->bEndpointAddress & USB_DIR_IN) && ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_BULK)) { ft1000dev->bulk_in_endpointAddr = endpoint->bEndpointAddress; DEBUG("ft1000_probe: in: %d\n", endpoint->bEndpointAddress); } if (!(endpoint->bEndpointAddress & USB_DIR_IN) && ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_BULK)) { ft1000dev->bulk_out_endpointAddr = endpoint->bEndpointAddress; DEBUG("ft1000_probe: out: %d\n", endpoint->bEndpointAddress); } } DEBUG("bulk_in=%d, bulk_out=%d\n", ft1000dev->bulk_in_endpointAddr, ft1000dev->bulk_out_endpointAddr); ret = request_firmware(&dsp_fw, "ft3000.img", &dev->dev); if (ret < 0) { printk(KERN_ERR "Error request_firmware().\n"); goto err_fw; } size = max_t(uint, dsp_fw->size, 4096); pFileStart = kmalloc(size, GFP_KERNEL); if (!pFileStart) { release_firmware(dsp_fw); ret = -ENOMEM; goto err_fw; } memcpy(pFileStart, dsp_fw->data, dsp_fw->size); FileLength = dsp_fw->size; release_firmware(dsp_fw); DEBUG("ft1000_probe: start downloading dsp image...\n"); ret = init_ft1000_netdev(ft1000dev); if (ret) goto err_load; pft1000info = netdev_priv(ft1000dev->net); DEBUG("In probe: pft1000info=%p\n", pft1000info); ret = dsp_reload(ft1000dev); if (ret) { printk(KERN_ERR "Problem with DSP image loading\n"); goto err_load; } gPollingfailed = FALSE; pft1000info->pPollThread = kthread_run(ft1000_poll_thread, ft1000dev, "ft1000_poll"); if (IS_ERR(pft1000info->pPollThread)) { ret = PTR_ERR(pft1000info->pPollThread); goto err_load; } msleep(500); while (!pft1000info->CardReady) { if (gPollingfailed) { ret = -EIO; goto err_thread; } msleep(100); DEBUG("ft1000_probe::Waiting for Card Ready\n"); } DEBUG("ft1000_probe::Card Ready!!!! Registering network device\n"); ret = reg_ft1000_netdev(ft1000dev, interface); if (ret) goto err_thread; ret = ft1000_init_proc(ft1000dev->net); if (ret) goto err_proc; pft1000info->NetDevRegDone = 1; return 0; err_proc: unregister_netdev(ft1000dev->net); free_netdev(ft1000dev->net); err_thread: kthread_stop(pft1000info->pPollThread); err_load: kfree(pFileStart); err_fw: kfree(ft1000dev); return ret; } static void ft1000_disconnect(struct usb_interface *interface) { struct ft1000_info *pft1000info; DEBUG("ft1000_disconnect is called\n"); pft1000info = (struct ft1000_info *) usb_get_intfdata(interface); DEBUG("In disconnect pft1000info=%p\n", pft1000info); if (pft1000info) { ft1000_cleanup_proc(pft1000info); if (pft1000info->pPollThread) kthread_stop(pft1000info->pPollThread); DEBUG("ft1000_disconnect: threads are terminated\n"); if (pft1000info->pFt1000Dev->net) { DEBUG("ft1000_disconnect: destroy char driver\n"); ft1000_destroy_dev(pft1000info->pFt1000Dev->net); unregister_netdev(pft1000info->pFt1000Dev->net); DEBUG ("ft1000_disconnect: network device unregisterd\n"); free_netdev(pft1000info->pFt1000Dev->net); } usb_free_urb(pft1000info->pFt1000Dev->rx_urb); usb_free_urb(pft1000info->pFt1000Dev->tx_urb); DEBUG("ft1000_disconnect: urb freed\n"); kfree(pft1000info->pFt1000Dev); } kfree(pFileStart); return; } static struct usb_driver ft1000_usb_driver = { .name = "ft1000usb", .probe = ft1000_probe, .disconnect = ft1000_disconnect, .id_table = id_table, }; static int __init usb_ft1000_init(void) { int ret = 0; DEBUG("Initialize and register the driver\n"); ret = usb_register(&ft1000_usb_driver); if (ret) err("usb_register failed. Error number %d", ret); return ret; } static void __exit usb_ft1000_exit(void) { DEBUG("Deregister the driver\n"); usb_deregister(&ft1000_usb_driver); } module_init(usb_ft1000_init); module_exit(usb_ft1000_exit);
gpl-2.0
GuneetAtwal/kernel_n9005
drivers/media/rc/ir-rc6-decoder.c
4729
7241
/* ir-rc6-decoder.c - A decoder for the RC6 IR protocol * * Copyright (C) 2010 by David Härdeman <david@hardeman.nu> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "rc-core-priv.h" #include <linux/module.h> /* * This decoder currently supports: * RC6-0-16 (standard toggle bit in header) * RC6-6A-20 (no toggle bit) * RC6-6A-24 (no toggle bit) * RC6-6A-32 (MCE version with toggle bit in body) */ #define RC6_UNIT 444444 /* nanosecs */ #define RC6_HEADER_NBITS 4 /* not including toggle bit */ #define RC6_0_NBITS 16 #define RC6_6A_32_NBITS 32 #define RC6_6A_NBITS 128 /* Variable 8..128 */ #define RC6_PREFIX_PULSE (6 * RC6_UNIT) #define RC6_PREFIX_SPACE (2 * RC6_UNIT) #define RC6_BIT_START (1 * RC6_UNIT) #define RC6_BIT_END (1 * RC6_UNIT) #define RC6_TOGGLE_START (2 * RC6_UNIT) #define RC6_TOGGLE_END (2 * RC6_UNIT) #define RC6_SUFFIX_SPACE (6 * RC6_UNIT) #define RC6_MODE_MASK 0x07 /* for the header bits */ #define RC6_STARTBIT_MASK 0x08 /* for the header bits */ #define RC6_6A_MCE_TOGGLE_MASK 0x8000 /* for the body bits */ #define RC6_6A_LCC_MASK 0xffff0000 /* RC6-6A-32 long customer code mask */ #define RC6_6A_MCE_CC 0x800f0000 /* MCE customer code */ #ifndef CHAR_BIT #define CHAR_BIT 8 /* Normally in <limits.h> */ #endif enum rc6_mode { RC6_MODE_0, RC6_MODE_6A, RC6_MODE_UNKNOWN, }; enum rc6_state { STATE_INACTIVE, STATE_PREFIX_SPACE, STATE_HEADER_BIT_START, STATE_HEADER_BIT_END, STATE_TOGGLE_START, STATE_TOGGLE_END, STATE_BODY_BIT_START, STATE_BODY_BIT_END, STATE_FINISHED, }; static enum rc6_mode rc6_mode(struct rc6_dec *data) { switch (data->header & RC6_MODE_MASK) { case 0: return RC6_MODE_0; case 6: if (!data->toggle) return RC6_MODE_6A; /* fall through */ default: return RC6_MODE_UNKNOWN; } } /** * ir_rc6_decode() - Decode one RC6 pulse or space * @dev: the struct rc_dev descriptor of the device * @ev: the struct ir_raw_event descriptor of the pulse/space * * This function returns -EINVAL if the pulse violates the state machine */ static int ir_rc6_decode(struct rc_dev *dev, struct ir_raw_event ev) { struct rc6_dec *data = &dev->raw->rc6; u32 scancode; u8 toggle; if (!(dev->raw->enabled_protocols & RC_TYPE_RC6)) return 0; if (!is_timing_event(ev)) { if (ev.reset) data->state = STATE_INACTIVE; return 0; } if (!geq_margin(ev.duration, RC6_UNIT, RC6_UNIT / 2)) goto out; again: IR_dprintk(2, "RC6 decode started at state %i (%uus %s)\n", data->state, TO_US(ev.duration), TO_STR(ev.pulse)); if (!geq_margin(ev.duration, RC6_UNIT, RC6_UNIT / 2)) return 0; switch (data->state) { case STATE_INACTIVE: if (!ev.pulse) break; /* Note: larger margin on first pulse since each RC6_UNIT is quite short and some hardware takes some time to adjust to the signal */ if (!eq_margin(ev.duration, RC6_PREFIX_PULSE, RC6_UNIT)) break; data->state = STATE_PREFIX_SPACE; data->count = 0; return 0; case STATE_PREFIX_SPACE: if (ev.pulse) break; if (!eq_margin(ev.duration, RC6_PREFIX_SPACE, RC6_UNIT / 2)) break; data->state = STATE_HEADER_BIT_START; data->header = 0; return 0; case STATE_HEADER_BIT_START: if (!eq_margin(ev.duration, RC6_BIT_START, RC6_UNIT / 2)) break; data->header <<= 1; if (ev.pulse) data->header |= 1; data->count++; data->state = STATE_HEADER_BIT_END; return 0; case STATE_HEADER_BIT_END: if (!is_transition(&ev, &dev->raw->prev_ev)) break; if (data->count == RC6_HEADER_NBITS) data->state = STATE_TOGGLE_START; else data->state = STATE_HEADER_BIT_START; decrease_duration(&ev, RC6_BIT_END); goto again; case STATE_TOGGLE_START: if (!eq_margin(ev.duration, RC6_TOGGLE_START, RC6_UNIT / 2)) break; data->toggle = ev.pulse; data->state = STATE_TOGGLE_END; return 0; case STATE_TOGGLE_END: if (!is_transition(&ev, &dev->raw->prev_ev) || !geq_margin(ev.duration, RC6_TOGGLE_END, RC6_UNIT / 2)) break; if (!(data->header & RC6_STARTBIT_MASK)) { IR_dprintk(1, "RC6 invalid start bit\n"); break; } data->state = STATE_BODY_BIT_START; decrease_duration(&ev, RC6_TOGGLE_END); data->count = 0; data->body = 0; switch (rc6_mode(data)) { case RC6_MODE_0: data->wanted_bits = RC6_0_NBITS; break; case RC6_MODE_6A: data->wanted_bits = RC6_6A_NBITS; break; default: IR_dprintk(1, "RC6 unknown mode\n"); goto out; } goto again; case STATE_BODY_BIT_START: if (eq_margin(ev.duration, RC6_BIT_START, RC6_UNIT / 2)) { /* Discard LSB's that won't fit in data->body */ if (data->count++ < CHAR_BIT * sizeof data->body) { data->body <<= 1; if (ev.pulse) data->body |= 1; } data->state = STATE_BODY_BIT_END; return 0; } else if (RC6_MODE_6A == rc6_mode(data) && !ev.pulse && geq_margin(ev.duration, RC6_SUFFIX_SPACE, RC6_UNIT / 2)) { data->state = STATE_FINISHED; goto again; } break; case STATE_BODY_BIT_END: if (!is_transition(&ev, &dev->raw->prev_ev)) break; if (data->count == data->wanted_bits) data->state = STATE_FINISHED; else data->state = STATE_BODY_BIT_START; decrease_duration(&ev, RC6_BIT_END); goto again; case STATE_FINISHED: if (ev.pulse) break; switch (rc6_mode(data)) { case RC6_MODE_0: scancode = data->body; toggle = data->toggle; IR_dprintk(1, "RC6(0) scancode 0x%04x (toggle: %u)\n", scancode, toggle); break; case RC6_MODE_6A: if (data->count > CHAR_BIT * sizeof data->body) { IR_dprintk(1, "RC6 too many (%u) data bits\n", data->count); goto out; } scancode = data->body; if (data->count == RC6_6A_32_NBITS && (scancode & RC6_6A_LCC_MASK) == RC6_6A_MCE_CC) { /* MCE RC */ toggle = (scancode & RC6_6A_MCE_TOGGLE_MASK) ? 1 : 0; scancode &= ~RC6_6A_MCE_TOGGLE_MASK; } else { toggle = 0; } IR_dprintk(1, "RC6(6A) scancode 0x%08x (toggle: %u)\n", scancode, toggle); break; default: IR_dprintk(1, "RC6 unknown mode\n"); goto out; } rc_keydown(dev, scancode, toggle); data->state = STATE_INACTIVE; return 0; } out: IR_dprintk(1, "RC6 decode failed at state %i (%uus %s)\n", data->state, TO_US(ev.duration), TO_STR(ev.pulse)); data->state = STATE_INACTIVE; return -EINVAL; } static struct ir_raw_handler rc6_handler = { .protocols = RC_TYPE_RC6, .decode = ir_rc6_decode, }; static int __init ir_rc6_decode_init(void) { ir_raw_handler_register(&rc6_handler); printk(KERN_INFO "IR RC6 protocol handler initialized\n"); return 0; } static void __exit ir_rc6_decode_exit(void) { ir_raw_handler_unregister(&rc6_handler); } module_init(ir_rc6_decode_init); module_exit(ir_rc6_decode_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("David Härdeman <david@hardeman.nu>"); MODULE_DESCRIPTION("RC6 IR protocol decoder");
gpl-2.0
cybojenix/android_kernel_nvidia_kalamata
drivers/media/rc/ir-rc6-decoder.c
4729
7241
/* ir-rc6-decoder.c - A decoder for the RC6 IR protocol * * Copyright (C) 2010 by David Härdeman <david@hardeman.nu> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "rc-core-priv.h" #include <linux/module.h> /* * This decoder currently supports: * RC6-0-16 (standard toggle bit in header) * RC6-6A-20 (no toggle bit) * RC6-6A-24 (no toggle bit) * RC6-6A-32 (MCE version with toggle bit in body) */ #define RC6_UNIT 444444 /* nanosecs */ #define RC6_HEADER_NBITS 4 /* not including toggle bit */ #define RC6_0_NBITS 16 #define RC6_6A_32_NBITS 32 #define RC6_6A_NBITS 128 /* Variable 8..128 */ #define RC6_PREFIX_PULSE (6 * RC6_UNIT) #define RC6_PREFIX_SPACE (2 * RC6_UNIT) #define RC6_BIT_START (1 * RC6_UNIT) #define RC6_BIT_END (1 * RC6_UNIT) #define RC6_TOGGLE_START (2 * RC6_UNIT) #define RC6_TOGGLE_END (2 * RC6_UNIT) #define RC6_SUFFIX_SPACE (6 * RC6_UNIT) #define RC6_MODE_MASK 0x07 /* for the header bits */ #define RC6_STARTBIT_MASK 0x08 /* for the header bits */ #define RC6_6A_MCE_TOGGLE_MASK 0x8000 /* for the body bits */ #define RC6_6A_LCC_MASK 0xffff0000 /* RC6-6A-32 long customer code mask */ #define RC6_6A_MCE_CC 0x800f0000 /* MCE customer code */ #ifndef CHAR_BIT #define CHAR_BIT 8 /* Normally in <limits.h> */ #endif enum rc6_mode { RC6_MODE_0, RC6_MODE_6A, RC6_MODE_UNKNOWN, }; enum rc6_state { STATE_INACTIVE, STATE_PREFIX_SPACE, STATE_HEADER_BIT_START, STATE_HEADER_BIT_END, STATE_TOGGLE_START, STATE_TOGGLE_END, STATE_BODY_BIT_START, STATE_BODY_BIT_END, STATE_FINISHED, }; static enum rc6_mode rc6_mode(struct rc6_dec *data) { switch (data->header & RC6_MODE_MASK) { case 0: return RC6_MODE_0; case 6: if (!data->toggle) return RC6_MODE_6A; /* fall through */ default: return RC6_MODE_UNKNOWN; } } /** * ir_rc6_decode() - Decode one RC6 pulse or space * @dev: the struct rc_dev descriptor of the device * @ev: the struct ir_raw_event descriptor of the pulse/space * * This function returns -EINVAL if the pulse violates the state machine */ static int ir_rc6_decode(struct rc_dev *dev, struct ir_raw_event ev) { struct rc6_dec *data = &dev->raw->rc6; u32 scancode; u8 toggle; if (!(dev->raw->enabled_protocols & RC_TYPE_RC6)) return 0; if (!is_timing_event(ev)) { if (ev.reset) data->state = STATE_INACTIVE; return 0; } if (!geq_margin(ev.duration, RC6_UNIT, RC6_UNIT / 2)) goto out; again: IR_dprintk(2, "RC6 decode started at state %i (%uus %s)\n", data->state, TO_US(ev.duration), TO_STR(ev.pulse)); if (!geq_margin(ev.duration, RC6_UNIT, RC6_UNIT / 2)) return 0; switch (data->state) { case STATE_INACTIVE: if (!ev.pulse) break; /* Note: larger margin on first pulse since each RC6_UNIT is quite short and some hardware takes some time to adjust to the signal */ if (!eq_margin(ev.duration, RC6_PREFIX_PULSE, RC6_UNIT)) break; data->state = STATE_PREFIX_SPACE; data->count = 0; return 0; case STATE_PREFIX_SPACE: if (ev.pulse) break; if (!eq_margin(ev.duration, RC6_PREFIX_SPACE, RC6_UNIT / 2)) break; data->state = STATE_HEADER_BIT_START; data->header = 0; return 0; case STATE_HEADER_BIT_START: if (!eq_margin(ev.duration, RC6_BIT_START, RC6_UNIT / 2)) break; data->header <<= 1; if (ev.pulse) data->header |= 1; data->count++; data->state = STATE_HEADER_BIT_END; return 0; case STATE_HEADER_BIT_END: if (!is_transition(&ev, &dev->raw->prev_ev)) break; if (data->count == RC6_HEADER_NBITS) data->state = STATE_TOGGLE_START; else data->state = STATE_HEADER_BIT_START; decrease_duration(&ev, RC6_BIT_END); goto again; case STATE_TOGGLE_START: if (!eq_margin(ev.duration, RC6_TOGGLE_START, RC6_UNIT / 2)) break; data->toggle = ev.pulse; data->state = STATE_TOGGLE_END; return 0; case STATE_TOGGLE_END: if (!is_transition(&ev, &dev->raw->prev_ev) || !geq_margin(ev.duration, RC6_TOGGLE_END, RC6_UNIT / 2)) break; if (!(data->header & RC6_STARTBIT_MASK)) { IR_dprintk(1, "RC6 invalid start bit\n"); break; } data->state = STATE_BODY_BIT_START; decrease_duration(&ev, RC6_TOGGLE_END); data->count = 0; data->body = 0; switch (rc6_mode(data)) { case RC6_MODE_0: data->wanted_bits = RC6_0_NBITS; break; case RC6_MODE_6A: data->wanted_bits = RC6_6A_NBITS; break; default: IR_dprintk(1, "RC6 unknown mode\n"); goto out; } goto again; case STATE_BODY_BIT_START: if (eq_margin(ev.duration, RC6_BIT_START, RC6_UNIT / 2)) { /* Discard LSB's that won't fit in data->body */ if (data->count++ < CHAR_BIT * sizeof data->body) { data->body <<= 1; if (ev.pulse) data->body |= 1; } data->state = STATE_BODY_BIT_END; return 0; } else if (RC6_MODE_6A == rc6_mode(data) && !ev.pulse && geq_margin(ev.duration, RC6_SUFFIX_SPACE, RC6_UNIT / 2)) { data->state = STATE_FINISHED; goto again; } break; case STATE_BODY_BIT_END: if (!is_transition(&ev, &dev->raw->prev_ev)) break; if (data->count == data->wanted_bits) data->state = STATE_FINISHED; else data->state = STATE_BODY_BIT_START; decrease_duration(&ev, RC6_BIT_END); goto again; case STATE_FINISHED: if (ev.pulse) break; switch (rc6_mode(data)) { case RC6_MODE_0: scancode = data->body; toggle = data->toggle; IR_dprintk(1, "RC6(0) scancode 0x%04x (toggle: %u)\n", scancode, toggle); break; case RC6_MODE_6A: if (data->count > CHAR_BIT * sizeof data->body) { IR_dprintk(1, "RC6 too many (%u) data bits\n", data->count); goto out; } scancode = data->body; if (data->count == RC6_6A_32_NBITS && (scancode & RC6_6A_LCC_MASK) == RC6_6A_MCE_CC) { /* MCE RC */ toggle = (scancode & RC6_6A_MCE_TOGGLE_MASK) ? 1 : 0; scancode &= ~RC6_6A_MCE_TOGGLE_MASK; } else { toggle = 0; } IR_dprintk(1, "RC6(6A) scancode 0x%08x (toggle: %u)\n", scancode, toggle); break; default: IR_dprintk(1, "RC6 unknown mode\n"); goto out; } rc_keydown(dev, scancode, toggle); data->state = STATE_INACTIVE; return 0; } out: IR_dprintk(1, "RC6 decode failed at state %i (%uus %s)\n", data->state, TO_US(ev.duration), TO_STR(ev.pulse)); data->state = STATE_INACTIVE; return -EINVAL; } static struct ir_raw_handler rc6_handler = { .protocols = RC_TYPE_RC6, .decode = ir_rc6_decode, }; static int __init ir_rc6_decode_init(void) { ir_raw_handler_register(&rc6_handler); printk(KERN_INFO "IR RC6 protocol handler initialized\n"); return 0; } static void __exit ir_rc6_decode_exit(void) { ir_raw_handler_unregister(&rc6_handler); } module_init(ir_rc6_decode_init); module_exit(ir_rc6_decode_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("David Härdeman <david@hardeman.nu>"); MODULE_DESCRIPTION("RC6 IR protocol decoder");
gpl-2.0
ysat0/linux-ysato
arch/um/drivers/pcap_kern.c
4729
2588
/* * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) * Licensed under the GPL. */ #include <linux/init.h> #include <linux/netdevice.h> #include <net_kern.h> #include "pcap_user.h" struct pcap_init { char *host_if; int promisc; int optimize; char *filter; }; void pcap_init(struct net_device *dev, void *data) { struct uml_net_private *pri; struct pcap_data *ppri; struct pcap_init *init = data; pri = netdev_priv(dev); ppri = (struct pcap_data *) pri->user; ppri->host_if = init->host_if; ppri->promisc = init->promisc; ppri->optimize = init->optimize; ppri->filter = init->filter; printk("pcap backend, host interface %s\n", ppri->host_if); } static int pcap_read(int fd, struct sk_buff *skb, struct uml_net_private *lp) { return pcap_user_read(fd, skb_mac_header(skb), skb->dev->mtu + ETH_HEADER_OTHER, (struct pcap_data *) &lp->user); } static int pcap_write(int fd, struct sk_buff *skb, struct uml_net_private *lp) { return -EPERM; } static const struct net_kern_info pcap_kern_info = { .init = pcap_init, .protocol = eth_protocol, .read = pcap_read, .write = pcap_write, }; int pcap_setup(char *str, char **mac_out, void *data) { struct pcap_init *init = data; char *remain, *host_if = NULL, *options[2] = { NULL, NULL }; int i; *init = ((struct pcap_init) { .host_if = "eth0", .promisc = 1, .optimize = 0, .filter = NULL }); remain = split_if_spec(str, &host_if, &init->filter, &options[0], &options[1], mac_out, NULL); if (remain != NULL) { printk(KERN_ERR "pcap_setup - Extra garbage on " "specification : '%s'\n", remain); return 0; } if (host_if != NULL) init->host_if = host_if; for (i = 0; i < ARRAY_SIZE(options); i++) { if (options[i] == NULL) continue; if (!strcmp(options[i], "promisc")) init->promisc = 1; else if (!strcmp(options[i], "nopromisc")) init->promisc = 0; else if (!strcmp(options[i], "optimize")) init->optimize = 1; else if (!strcmp(options[i], "nooptimize")) init->optimize = 0; else { printk(KERN_ERR "pcap_setup : bad option - '%s'\n", options[i]); return 0; } } return 1; } static struct transport pcap_transport = { .list = LIST_HEAD_INIT(pcap_transport.list), .name = "pcap", .setup = pcap_setup, .user = &pcap_user_info, .kern = &pcap_kern_info, .private_size = sizeof(struct pcap_data), .setup_size = sizeof(struct pcap_init), }; static int register_pcap(void) { register_transport(&pcap_transport); return 0; } late_initcall(register_pcap);
gpl-2.0
SM-G920P/G92XP-R4_COI9
arch/um/drivers/umcast_kern.c
4729
4711
/* * user-mode-linux networking multicast transport * Copyright (C) 2001 by Harald Welte <laforge@gnumonks.org> * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) * * based on the existing uml-networking code, which is * Copyright (C) 2001 Lennert Buytenhek (buytenh@gnu.org) and * James Leu (jleu@mindspring.net). * Copyright (C) 2001 by various other people who didn't put their name here. * * Licensed under the GPL. */ #include <linux/init.h> #include <linux/netdevice.h> #include "umcast.h" #include <net_kern.h> struct umcast_init { char *addr; int lport; int rport; int ttl; bool unicast; }; static void umcast_init(struct net_device *dev, void *data) { struct uml_net_private *pri; struct umcast_data *dpri; struct umcast_init *init = data; pri = netdev_priv(dev); dpri = (struct umcast_data *) pri->user; dpri->addr = init->addr; dpri->lport = init->lport; dpri->rport = init->rport; dpri->unicast = init->unicast; dpri->ttl = init->ttl; dpri->dev = dev; if (dpri->unicast) { printk(KERN_INFO "ucast backend address: %s:%u listen port: " "%u\n", dpri->addr, dpri->rport, dpri->lport); } else { printk(KERN_INFO "mcast backend multicast address: %s:%u, " "TTL:%u\n", dpri->addr, dpri->lport, dpri->ttl); } } static int umcast_read(int fd, struct sk_buff *skb, struct uml_net_private *lp) { return net_recvfrom(fd, skb_mac_header(skb), skb->dev->mtu + ETH_HEADER_OTHER); } static int umcast_write(int fd, struct sk_buff *skb, struct uml_net_private *lp) { return umcast_user_write(fd, skb->data, skb->len, (struct umcast_data *) &lp->user); } static const struct net_kern_info umcast_kern_info = { .init = umcast_init, .protocol = eth_protocol, .read = umcast_read, .write = umcast_write, }; static int mcast_setup(char *str, char **mac_out, void *data) { struct umcast_init *init = data; char *port_str = NULL, *ttl_str = NULL, *remain; char *last; *init = ((struct umcast_init) { .addr = "239.192.168.1", .lport = 1102, .ttl = 1 }); remain = split_if_spec(str, mac_out, &init->addr, &port_str, &ttl_str, NULL); if (remain != NULL) { printk(KERN_ERR "mcast_setup - Extra garbage on " "specification : '%s'\n", remain); return 0; } if (port_str != NULL) { init->lport = simple_strtoul(port_str, &last, 10); if ((*last != '\0') || (last == port_str)) { printk(KERN_ERR "mcast_setup - Bad port : '%s'\n", port_str); return 0; } } if (ttl_str != NULL) { init->ttl = simple_strtoul(ttl_str, &last, 10); if ((*last != '\0') || (last == ttl_str)) { printk(KERN_ERR "mcast_setup - Bad ttl : '%s'\n", ttl_str); return 0; } } init->unicast = false; init->rport = init->lport; printk(KERN_INFO "Configured mcast device: %s:%u-%u\n", init->addr, init->lport, init->ttl); return 1; } static int ucast_setup(char *str, char **mac_out, void *data) { struct umcast_init *init = data; char *lport_str = NULL, *rport_str = NULL, *remain; char *last; *init = ((struct umcast_init) { .addr = "", .lport = 1102, .rport = 1102 }); remain = split_if_spec(str, mac_out, &init->addr, &lport_str, &rport_str, NULL); if (remain != NULL) { printk(KERN_ERR "ucast_setup - Extra garbage on " "specification : '%s'\n", remain); return 0; } if (lport_str != NULL) { init->lport = simple_strtoul(lport_str, &last, 10); if ((*last != '\0') || (last == lport_str)) { printk(KERN_ERR "ucast_setup - Bad listen port : " "'%s'\n", lport_str); return 0; } } if (rport_str != NULL) { init->rport = simple_strtoul(rport_str, &last, 10); if ((*last != '\0') || (last == rport_str)) { printk(KERN_ERR "ucast_setup - Bad remote port : " "'%s'\n", rport_str); return 0; } } init->unicast = true; printk(KERN_INFO "Configured ucast device: :%u -> %s:%u\n", init->lport, init->addr, init->rport); return 1; } static struct transport mcast_transport = { .list = LIST_HEAD_INIT(mcast_transport.list), .name = "mcast", .setup = mcast_setup, .user = &umcast_user_info, .kern = &umcast_kern_info, .private_size = sizeof(struct umcast_data), .setup_size = sizeof(struct umcast_init), }; static struct transport ucast_transport = { .list = LIST_HEAD_INIT(ucast_transport.list), .name = "ucast", .setup = ucast_setup, .user = &umcast_user_info, .kern = &umcast_kern_info, .private_size = sizeof(struct umcast_data), .setup_size = sizeof(struct umcast_init), }; static int register_umcast(void) { register_transport(&mcast_transport); register_transport(&ucast_transport); return 0; } late_initcall(register_umcast);
gpl-2.0
AD5GB/android_kernel_google_msm
drivers/isdn/hisax/avma1_cs.c
4985
4146
/* * PCMCIA client driver for AVM A1 / Fritz!PCMCIA * * Author Carsten Paeth * Copyright 1998-2001 by Carsten Paeth <calle@calle.in-berlin.de> * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/ptrace.h> #include <linux/slab.h> #include <linux/string.h> #include <asm/io.h> #include <pcmcia/cistpl.h> #include <pcmcia/ds.h> #include "hisax_cfg.h" MODULE_DESCRIPTION("ISDN4Linux: PCMCIA client driver for AVM A1/Fritz!PCMCIA cards"); MODULE_AUTHOR("Carsten Paeth"); MODULE_LICENSE("GPL"); /*====================================================================*/ /* Parameters that can be set with 'insmod' */ static int isdnprot = 2; module_param(isdnprot, int, 0); /*====================================================================*/ static int avma1cs_config(struct pcmcia_device *link) __devinit; static void avma1cs_release(struct pcmcia_device *link); static void avma1cs_detach(struct pcmcia_device *p_dev) __devexit; static int __devinit avma1cs_probe(struct pcmcia_device *p_dev) { dev_dbg(&p_dev->dev, "avma1cs_attach()\n"); /* General socket configuration */ p_dev->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; p_dev->config_index = 1; p_dev->config_regs = PRESENT_OPTION; return avma1cs_config(p_dev); } /* avma1cs_attach */ static void __devexit avma1cs_detach(struct pcmcia_device *link) { dev_dbg(&link->dev, "avma1cs_detach(0x%p)\n", link); avma1cs_release(link); kfree(link->priv); } /* avma1cs_detach */ static int avma1cs_configcheck(struct pcmcia_device *p_dev, void *priv_data) { p_dev->resource[0]->end = 16; p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; p_dev->io_lines = 5; return pcmcia_request_io(p_dev); } static int __devinit avma1cs_config(struct pcmcia_device *link) { int i = -1; char devname[128]; IsdnCard_t icard; int busy = 0; dev_dbg(&link->dev, "avma1cs_config(0x%p)\n", link); devname[0] = 0; if (link->prod_id[1]) strlcpy(devname, link->prod_id[1], sizeof(devname)); if (pcmcia_loop_config(link, avma1cs_configcheck, NULL)) return -ENODEV; do { /* * allocate an interrupt line */ if (!link->irq) { /* undo */ pcmcia_disable_device(link); break; } /* * configure the PCMCIA socket */ i = pcmcia_enable_device(link); if (i != 0) { pcmcia_disable_device(link); break; } } while (0); /* If any step failed, release any partially configured state */ if (i != 0) { avma1cs_release(link); return -ENODEV; } icard.para[0] = link->irq; icard.para[1] = link->resource[0]->start; icard.protocol = isdnprot; icard.typ = ISDN_CTYPE_A1_PCMCIA; i = hisax_init_pcmcia(link, &busy, &icard); if (i < 0) { printk(KERN_ERR "avma1_cs: failed to initialize AVM A1 " "PCMCIA %d at i/o %#x\n", i, (unsigned int) link->resource[0]->start); avma1cs_release(link); return -ENODEV; } link->priv = (void *) (unsigned long) i; return 0; } /* avma1cs_config */ static void avma1cs_release(struct pcmcia_device *link) { unsigned long minor = (unsigned long) link->priv; dev_dbg(&link->dev, "avma1cs_release(0x%p)\n", link); /* now unregister function with hisax */ HiSax_closecard(minor); pcmcia_disable_device(link); } /* avma1cs_release */ static const struct pcmcia_device_id avma1cs_ids[] = { PCMCIA_DEVICE_PROD_ID12("AVM", "ISDN A", 0x95d42008, 0xadc9d4bb), PCMCIA_DEVICE_PROD_ID12("ISDN", "CARD", 0x8d9761c8, 0x01c5aa7b), PCMCIA_DEVICE_NULL }; MODULE_DEVICE_TABLE(pcmcia, avma1cs_ids); static struct pcmcia_driver avma1cs_driver = { .owner = THIS_MODULE, .name = "avma1_cs", .probe = avma1cs_probe, .remove = __devexit_p(avma1cs_detach), .id_table = avma1cs_ids, }; static int __init init_avma1_cs(void) { return pcmcia_register_driver(&avma1cs_driver); } static void __exit exit_avma1_cs(void) { pcmcia_unregister_driver(&avma1cs_driver); } module_init(init_avma1_cs); module_exit(exit_avma1_cs);
gpl-2.0
ReaperXL2/Overkill_v4_extended
drivers/input/keyboard/max7359_keypad.c
4985
8232
/* * max7359_keypad.c - MAX7359 Key Switch Controller Driver * * Copyright (C) 2009 Samsung Electronics * Kim Kyuwon <q1.kim@samsung.com> * * Based on pxa27x_keypad.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Datasheet: http://www.maxim-ic.com/quick_view2.cfm/qv_pk/5456 */ #include <linux/module.h> #include <linux/i2c.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/pm.h> #include <linux/input.h> #include <linux/input/matrix_keypad.h> #define MAX7359_MAX_KEY_ROWS 8 #define MAX7359_MAX_KEY_COLS 8 #define MAX7359_MAX_KEY_NUM (MAX7359_MAX_KEY_ROWS * MAX7359_MAX_KEY_COLS) #define MAX7359_ROW_SHIFT 3 /* * MAX7359 registers */ #define MAX7359_REG_KEYFIFO 0x00 #define MAX7359_REG_CONFIG 0x01 #define MAX7359_REG_DEBOUNCE 0x02 #define MAX7359_REG_INTERRUPT 0x03 #define MAX7359_REG_PORTS 0x04 #define MAX7359_REG_KEYREP 0x05 #define MAX7359_REG_SLEEP 0x06 /* * Configuration register bits */ #define MAX7359_CFG_SLEEP (1 << 7) #define MAX7359_CFG_INTERRUPT (1 << 5) #define MAX7359_CFG_KEY_RELEASE (1 << 3) #define MAX7359_CFG_WAKEUP (1 << 1) #define MAX7359_CFG_TIMEOUT (1 << 0) /* * Autosleep register values (ms) */ #define MAX7359_AUTOSLEEP_8192 0x01 #define MAX7359_AUTOSLEEP_4096 0x02 #define MAX7359_AUTOSLEEP_2048 0x03 #define MAX7359_AUTOSLEEP_1024 0x04 #define MAX7359_AUTOSLEEP_512 0x05 #define MAX7359_AUTOSLEEP_256 0x06 struct max7359_keypad { /* matrix key code map */ unsigned short keycodes[MAX7359_MAX_KEY_NUM]; struct input_dev *input_dev; struct i2c_client *client; }; static int max7359_write_reg(struct i2c_client *client, u8 reg, u8 val) { int ret = i2c_smbus_write_byte_data(client, reg, val); if (ret < 0) dev_err(&client->dev, "%s: reg 0x%x, val 0x%x, err %d\n", __func__, reg, val, ret); return ret; } static int max7359_read_reg(struct i2c_client *client, int reg) { int ret = i2c_smbus_read_byte_data(client, reg); if (ret < 0) dev_err(&client->dev, "%s: reg 0x%x, err %d\n", __func__, reg, ret); return ret; } static void max7359_build_keycode(struct max7359_keypad *keypad, const struct matrix_keymap_data *keymap_data) { struct input_dev *input_dev = keypad->input_dev; int i; for (i = 0; i < keymap_data->keymap_size; i++) { unsigned int key = keymap_data->keymap[i]; unsigned int row = KEY_ROW(key); unsigned int col = KEY_COL(key); unsigned int scancode = MATRIX_SCAN_CODE(row, col, MAX7359_ROW_SHIFT); unsigned short keycode = KEY_VAL(key); keypad->keycodes[scancode] = keycode; __set_bit(keycode, input_dev->keybit); } __clear_bit(KEY_RESERVED, input_dev->keybit); } /* runs in an IRQ thread -- can (and will!) sleep */ static irqreturn_t max7359_interrupt(int irq, void *dev_id) { struct max7359_keypad *keypad = dev_id; struct input_dev *input_dev = keypad->input_dev; int val, row, col, release, code; val = max7359_read_reg(keypad->client, MAX7359_REG_KEYFIFO); row = val & 0x7; col = (val >> 3) & 0x7; release = val & 0x40; code = MATRIX_SCAN_CODE(row, col, MAX7359_ROW_SHIFT); dev_dbg(&keypad->client->dev, "key[%d:%d] %s\n", row, col, release ? "release" : "press"); input_event(input_dev, EV_MSC, MSC_SCAN, code); input_report_key(input_dev, keypad->keycodes[code], !release); input_sync(input_dev); return IRQ_HANDLED; } /* * Let MAX7359 fall into a deep sleep: * If no keys are pressed, enter sleep mode for 8192 ms. And if any * key is pressed, the MAX7359 returns to normal operating mode. */ static inline void max7359_fall_deepsleep(struct i2c_client *client) { max7359_write_reg(client, MAX7359_REG_SLEEP, MAX7359_AUTOSLEEP_8192); } /* * Let MAX7359 take a catnap: * Autosleep just for 256 ms. */ static inline void max7359_take_catnap(struct i2c_client *client) { max7359_write_reg(client, MAX7359_REG_SLEEP, MAX7359_AUTOSLEEP_256); } static int max7359_open(struct input_dev *dev) { struct max7359_keypad *keypad = input_get_drvdata(dev); max7359_take_catnap(keypad->client); return 0; } static void max7359_close(struct input_dev *dev) { struct max7359_keypad *keypad = input_get_drvdata(dev); max7359_fall_deepsleep(keypad->client); } static void max7359_initialize(struct i2c_client *client) { max7359_write_reg(client, MAX7359_REG_CONFIG, MAX7359_CFG_INTERRUPT | /* Irq clears after host read */ MAX7359_CFG_KEY_RELEASE | /* Key release enable */ MAX7359_CFG_WAKEUP); /* Key press wakeup enable */ /* Full key-scan functionality */ max7359_write_reg(client, MAX7359_REG_DEBOUNCE, 0x1F); /* nINT asserts every debounce cycles */ max7359_write_reg(client, MAX7359_REG_INTERRUPT, 0x01); max7359_fall_deepsleep(client); } static int __devinit max7359_probe(struct i2c_client *client, const struct i2c_device_id *id) { const struct matrix_keymap_data *keymap_data = client->dev.platform_data; struct max7359_keypad *keypad; struct input_dev *input_dev; int ret; int error; if (!client->irq) { dev_err(&client->dev, "The irq number should not be zero\n"); return -EINVAL; } /* Detect MAX7359: The initial Keys FIFO value is '0x3F' */ ret = max7359_read_reg(client, MAX7359_REG_KEYFIFO); if (ret < 0) { dev_err(&client->dev, "failed to detect device\n"); return -ENODEV; } dev_dbg(&client->dev, "keys FIFO is 0x%02x\n", ret); keypad = kzalloc(sizeof(struct max7359_keypad), GFP_KERNEL); input_dev = input_allocate_device(); if (!keypad || !input_dev) { dev_err(&client->dev, "failed to allocate memory\n"); error = -ENOMEM; goto failed_free_mem; } keypad->client = client; keypad->input_dev = input_dev; input_dev->name = client->name; input_dev->id.bustype = BUS_I2C; input_dev->open = max7359_open; input_dev->close = max7359_close; input_dev->dev.parent = &client->dev; input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP); input_dev->keycodesize = sizeof(keypad->keycodes[0]); input_dev->keycodemax = ARRAY_SIZE(keypad->keycodes); input_dev->keycode = keypad->keycodes; input_set_capability(input_dev, EV_MSC, MSC_SCAN); input_set_drvdata(input_dev, keypad); max7359_build_keycode(keypad, keymap_data); error = request_threaded_irq(client->irq, NULL, max7359_interrupt, IRQF_TRIGGER_LOW | IRQF_ONESHOT, client->name, keypad); if (error) { dev_err(&client->dev, "failed to register interrupt\n"); goto failed_free_mem; } /* Register the input device */ error = input_register_device(input_dev); if (error) { dev_err(&client->dev, "failed to register input device\n"); goto failed_free_irq; } /* Initialize MAX7359 */ max7359_initialize(client); i2c_set_clientdata(client, keypad); device_init_wakeup(&client->dev, 1); return 0; failed_free_irq: free_irq(client->irq, keypad); failed_free_mem: input_free_device(input_dev); kfree(keypad); return error; } static int __devexit max7359_remove(struct i2c_client *client) { struct max7359_keypad *keypad = i2c_get_clientdata(client); free_irq(client->irq, keypad); input_unregister_device(keypad->input_dev); kfree(keypad); return 0; } #ifdef CONFIG_PM static int max7359_suspend(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); max7359_fall_deepsleep(client); if (device_may_wakeup(&client->dev)) enable_irq_wake(client->irq); return 0; } static int max7359_resume(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); if (device_may_wakeup(&client->dev)) disable_irq_wake(client->irq); /* Restore the default setting */ max7359_take_catnap(client); return 0; } #endif static SIMPLE_DEV_PM_OPS(max7359_pm, max7359_suspend, max7359_resume); static const struct i2c_device_id max7359_ids[] = { { "max7359", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, max7359_ids); static struct i2c_driver max7359_i2c_driver = { .driver = { .name = "max7359", .pm = &max7359_pm, }, .probe = max7359_probe, .remove = __devexit_p(max7359_remove), .id_table = max7359_ids, }; module_i2c_driver(max7359_i2c_driver); MODULE_AUTHOR("Kim Kyuwon <q1.kim@samsung.com>"); MODULE_DESCRIPTION("MAX7359 Key Switch Controller Driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
mastero9017/Blu_Spark
fs/ecryptfs/dentry.c
7801
3081
/** * eCryptfs: Linux filesystem encryption layer * * Copyright (C) 1997-2003 Erez Zadok * Copyright (C) 2001-2003 Stony Brook University * Copyright (C) 2004-2006 International Business Machines Corp. * Author(s): Michael A. Halcrow <mahalcro@us.ibm.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA * 02111-1307, USA. */ #include <linux/dcache.h> #include <linux/namei.h> #include <linux/mount.h> #include <linux/fs_stack.h> #include <linux/slab.h> #include "ecryptfs_kernel.h" /** * ecryptfs_d_revalidate - revalidate an ecryptfs dentry * @dentry: The ecryptfs dentry * @nd: The associated nameidata * * Called when the VFS needs to revalidate a dentry. This * is called whenever a name lookup finds a dentry in the * dcache. Most filesystems leave this as NULL, because all their * dentries in the dcache are valid. * * Returns 1 if valid, 0 otherwise. * */ static int ecryptfs_d_revalidate(struct dentry *dentry, struct nameidata *nd) { struct dentry *lower_dentry; struct vfsmount *lower_mnt; struct dentry *dentry_save = NULL; struct vfsmount *vfsmount_save = NULL; int rc = 1; if (nd && nd->flags & LOOKUP_RCU) return -ECHILD; lower_dentry = ecryptfs_dentry_to_lower(dentry); lower_mnt = ecryptfs_dentry_to_lower_mnt(dentry); if (!lower_dentry->d_op || !lower_dentry->d_op->d_revalidate) goto out; if (nd) { dentry_save = nd->path.dentry; vfsmount_save = nd->path.mnt; nd->path.dentry = lower_dentry; nd->path.mnt = lower_mnt; } rc = lower_dentry->d_op->d_revalidate(lower_dentry, nd); if (nd) { nd->path.dentry = dentry_save; nd->path.mnt = vfsmount_save; } if (dentry->d_inode) { struct inode *lower_inode = ecryptfs_inode_to_lower(dentry->d_inode); fsstack_copy_attr_all(dentry->d_inode, lower_inode); } out: return rc; } struct kmem_cache *ecryptfs_dentry_info_cache; /** * ecryptfs_d_release * @dentry: The ecryptfs dentry * * Called when a dentry is really deallocated. */ static void ecryptfs_d_release(struct dentry *dentry) { if (ecryptfs_dentry_to_private(dentry)) { if (ecryptfs_dentry_to_lower(dentry)) { dput(ecryptfs_dentry_to_lower(dentry)); mntput(ecryptfs_dentry_to_lower_mnt(dentry)); } kmem_cache_free(ecryptfs_dentry_info_cache, ecryptfs_dentry_to_private(dentry)); } return; } const struct dentry_operations ecryptfs_dops = { .d_revalidate = ecryptfs_d_revalidate, .d_release = ecryptfs_d_release, };
gpl-2.0
omnirom/android_kernel_samsung_n1
sound/oss/dmasound/dmasound_core.c
8313
45405
/* * linux/sound/oss/dmasound/dmasound_core.c * * * OSS/Free compatible Atari TT/Falcon and Amiga DMA sound driver for * Linux/m68k * Extended to support Power Macintosh for Linux/ppc by Paul Mackerras * * (c) 1995 by Michael Schlueter & Michael Marte * * Michael Schlueter (michael@duck.syd.de) did the basic structure of the VFS * interface and the u-law to signed byte conversion. * * Michael Marte (marte@informatik.uni-muenchen.de) did the sound queue, * /dev/mixer, /dev/sndstat and complemented the VFS interface. He would like * to thank: * - Michael Schlueter for initial ideas and documentation on the MFP and * the DMA sound hardware. * - Therapy? for their CD 'Troublegum' which really made me rock. * * /dev/sndstat is based on code by Hannu Savolainen, the author of the * VoxWare family of drivers. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. * * History: * * 1995/8/25 First release * * 1995/9/02 Roman Hodek: * - Fixed atari_stram_alloc() call, the timer * programming and several race conditions * 1995/9/14 Roman Hodek: * - After some discussion with Michael Schlueter, * revised the interrupt disabling * - Slightly speeded up U8->S8 translation by using * long operations where possible * - Added 4:3 interpolation for /dev/audio * * 1995/9/20 Torsten Scherer: * - Fixed a bug in sq_write and changed /dev/audio * converting to play at 12517Hz instead of 6258Hz. * * 1995/9/23 Torsten Scherer: * - Changed sq_interrupt() and sq_play() to pre-program * the DMA for another frame while there's still one * running. This allows the IRQ response to be * arbitrarily delayed and playing will still continue. * * 1995/10/14 Guenther Kelleter, Torsten Scherer: * - Better support for Falcon audio (the Falcon doesn't * raise an IRQ at the end of a frame, but at the * beginning instead!). uses 'if (codec_dma)' in lots * of places to simply switch between Falcon and TT * code. * * 1995/11/06 Torsten Scherer: * - Started introducing a hardware abstraction scheme * (may perhaps also serve for Amigas?) * - Can now play samples at almost all frequencies by * means of a more generalized expand routine * - Takes a good deal of care to cut data only at * sample sizes * - Buffer size is now a kernel runtime option * - Implemented fsync() & several minor improvements * Guenther Kelleter: * - Useful hints and bug fixes * - Cross-checked it for Falcons * * 1996/3/9 Geert Uytterhoeven: * - Support added for Amiga, A-law, 16-bit little * endian. * - Unification to drivers/sound/dmasound.c. * * 1996/4/6 Martin Mitchell: * - Updated to 1.3 kernel. * * 1996/6/13 Topi Kanerva: * - Fixed things that were broken (mainly the amiga * 14-bit routines) * - /dev/sndstat shows now the real hardware frequency * - The lowpass filter is disabled by default now * * 1996/9/25 Geert Uytterhoeven: * - Modularization * * 1998/6/10 Andreas Schwab: * - Converted to use sound_core * * 1999/12/28 Richard Zidlicky: * - Added support for Q40 * * 2000/2/27 Geert Uytterhoeven: * - Clean up and split the code into 4 parts: * o dmasound_core: machine-independent code * o dmasound_atari: Atari TT and Falcon support * o dmasound_awacs: Apple PowerMac support * o dmasound_paula: Amiga support * * 2000/3/25 Geert Uytterhoeven: * - Integration of dmasound_q40 * - Small clean ups * * 2001/01/26 [1.0] Iain Sandoe * - make /dev/sndstat show revision & edition info. * - since dmasound.mach.sq_setup() can fail on pmac * its type has been changed to int and the returns * are checked. * [1.1] - stop missing translations from being called. * 2001/02/08 [1.2] - remove unused translation tables & move machine- * specific tables to low-level. * - return correct info. for SNDCTL_DSP_GETFMTS. * [1.3] - implement SNDCTL_DSP_GETCAPS fully. * [1.4] - make /dev/sndstat text length usage deterministic. * - make /dev/sndstat call to low-level * dmasound.mach.state_info() pass max space to ll driver. * - tidy startup banners and output info. * [1.5] - tidy up a little (removed some unused #defines in * dmasound.h) * - fix up HAS_RECORD conditionalisation. * - add record code in places it is missing... * - change buf-sizes to bytes to allow < 1kb for pmac * if user param entry is < 256 the value is taken to * be in kb > 256 is taken to be in bytes. * - make default buff/frag params conditional on * machine to allow smaller values for pmac. * - made the ioctls, read & write comply with the OSS * rules on setting params. * - added parsing of _setup() params for record. * 2001/04/04 [1.6] - fix bug where sample rates higher than maximum were * being reported as OK. * - fix open() to return -EBUSY as per OSS doc. when * audio is in use - this is independent of O_NOBLOCK. * - fix bug where SNDCTL_DSP_POST was blocking. */ /* Record capability notes 30/01/2001: * At present these observations apply only to pmac LL driver (the only one * that can do record, at present). However, if other LL drivers for machines * with record are added they may apply. * * The fragment parameters for the record and play channels are separate. * However, if the driver is opened O_RDWR there is no way (in the current OSS * API) to specify their values independently for the record and playback * channels. Since the only common factor between the input & output is the * sample rate (on pmac) it should be possible to open /dev/dspX O_WRONLY and * /dev/dspY O_RDONLY. The input & output channels could then have different * characteristics (other than the first that sets sample rate claiming the * right to set it for ever). As it stands, the format, channels, number of * bits & sample rate are assumed to be common. In the future perhaps these * should be the responsibility of the LL driver - and then if a card really * does not share items between record & playback they can be specified * separately. */ /* Thread-safeness of shared_resources notes: 31/01/2001 * If the user opens O_RDWR and then splits record & play between two threads * both of which inherit the fd - and then starts changing things from both * - we will have difficulty telling. * * It's bad application coding - but ... * TODO: think about how to sort this out... without bogging everything down in * semaphores. * * Similarly, the OSS spec says "all changes to parameters must be between * open() and the first read() or write(). - and a bit later on (by * implication) "between SNDCTL_DSP_RESET and the first read() or write() after * it". If the app is multi-threaded and this rule is broken between threads * we will have trouble spotting it - and the fault will be rather obscure :-( * * We will try and put out at least a kmsg if we see it happen... but I think * it will be quite hard to trap it with an -EXXX return... because we can't * see the fault until after the damage is done. */ #include <linux/module.h> #include <linux/slab.h> #include <linux/sound.h> #include <linux/init.h> #include <linux/soundcard.h> #include <linux/poll.h> #include <linux/mutex.h> #include <asm/uaccess.h> #include "dmasound.h" #define DMASOUND_CORE_REVISION 1 #define DMASOUND_CORE_EDITION 6 /* * Declarations */ static DEFINE_MUTEX(dmasound_core_mutex); int dmasound_catchRadius = 0; module_param(dmasound_catchRadius, int, 0); static unsigned int numWriteBufs = DEFAULT_N_BUFFERS; module_param(numWriteBufs, int, 0); static unsigned int writeBufSize = DEFAULT_BUFF_SIZE ; /* in bytes */ module_param(writeBufSize, int, 0); MODULE_LICENSE("GPL"); #ifdef MODULE static int sq_unit = -1; static int mixer_unit = -1; static int state_unit = -1; static int irq_installed; #endif /* MODULE */ /* control over who can modify resources shared between play/record */ static fmode_t shared_resource_owner; static int shared_resources_initialised; /* * Mid level stuff */ struct sound_settings dmasound = { .lock = __SPIN_LOCK_UNLOCKED(dmasound.lock) }; static inline void sound_silence(void) { dmasound.mach.silence(); /* _MUST_ stop DMA */ } static inline int sound_set_format(int format) { return dmasound.mach.setFormat(format); } static int sound_set_speed(int speed) { if (speed < 0) return dmasound.soft.speed; /* trap out-of-range speed settings. at present we allow (arbitrarily) low rates - using soft up-conversion - but we can't allow > max because there is no soft down-conversion. */ if (dmasound.mach.max_dsp_speed && (speed > dmasound.mach.max_dsp_speed)) speed = dmasound.mach.max_dsp_speed ; dmasound.soft.speed = speed; if (dmasound.minDev == SND_DEV_DSP) dmasound.dsp.speed = dmasound.soft.speed; return dmasound.soft.speed; } static int sound_set_stereo(int stereo) { if (stereo < 0) return dmasound.soft.stereo; stereo = !!stereo; /* should be 0 or 1 now */ dmasound.soft.stereo = stereo; if (dmasound.minDev == SND_DEV_DSP) dmasound.dsp.stereo = stereo; return stereo; } static ssize_t sound_copy_translate(TRANS *trans, const u_char __user *userPtr, size_t userCount, u_char frame[], ssize_t *frameUsed, ssize_t frameLeft) { ssize_t (*ct_func)(const u_char __user *, size_t, u_char *, ssize_t *, ssize_t); switch (dmasound.soft.format) { case AFMT_MU_LAW: ct_func = trans->ct_ulaw; break; case AFMT_A_LAW: ct_func = trans->ct_alaw; break; case AFMT_S8: ct_func = trans->ct_s8; break; case AFMT_U8: ct_func = trans->ct_u8; break; case AFMT_S16_BE: ct_func = trans->ct_s16be; break; case AFMT_U16_BE: ct_func = trans->ct_u16be; break; case AFMT_S16_LE: ct_func = trans->ct_s16le; break; case AFMT_U16_LE: ct_func = trans->ct_u16le; break; default: return 0; } /* if the user has requested a non-existent translation don't try to call it but just return 0 bytes moved */ if (ct_func) return ct_func(userPtr, userCount, frame, frameUsed, frameLeft); return 0; } /* * /dev/mixer abstraction */ static struct { int busy; int modify_counter; } mixer; static int mixer_open(struct inode *inode, struct file *file) { mutex_lock(&dmasound_core_mutex); if (!try_module_get(dmasound.mach.owner)) { mutex_unlock(&dmasound_core_mutex); return -ENODEV; } mixer.busy = 1; mutex_unlock(&dmasound_core_mutex); return 0; } static int mixer_release(struct inode *inode, struct file *file) { mutex_lock(&dmasound_core_mutex); mixer.busy = 0; module_put(dmasound.mach.owner); mutex_unlock(&dmasound_core_mutex); return 0; } static int mixer_ioctl(struct file *file, u_int cmd, u_long arg) { if (_SIOC_DIR(cmd) & _SIOC_WRITE) mixer.modify_counter++; switch (cmd) { case OSS_GETVERSION: return IOCTL_OUT(arg, SOUND_VERSION); case SOUND_MIXER_INFO: { mixer_info info; memset(&info, 0, sizeof(info)); strlcpy(info.id, dmasound.mach.name2, sizeof(info.id)); strlcpy(info.name, dmasound.mach.name2, sizeof(info.name)); info.modify_counter = mixer.modify_counter; if (copy_to_user((void __user *)arg, &info, sizeof(info))) return -EFAULT; return 0; } } if (dmasound.mach.mixer_ioctl) return dmasound.mach.mixer_ioctl(cmd, arg); return -EINVAL; } static long mixer_unlocked_ioctl(struct file *file, u_int cmd, u_long arg) { int ret; mutex_lock(&dmasound_core_mutex); ret = mixer_ioctl(file, cmd, arg); mutex_unlock(&dmasound_core_mutex); return ret; } static const struct file_operations mixer_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .unlocked_ioctl = mixer_unlocked_ioctl, .open = mixer_open, .release = mixer_release, }; static void mixer_init(void) { #ifndef MODULE int mixer_unit; #endif mixer_unit = register_sound_mixer(&mixer_fops, -1); if (mixer_unit < 0) return; mixer.busy = 0; dmasound.treble = 0; dmasound.bass = 0; if (dmasound.mach.mixer_init) dmasound.mach.mixer_init(); } /* * Sound queue stuff, the heart of the driver */ struct sound_queue dmasound_write_sq; static void sq_reset_output(void) ; static int sq_allocate_buffers(struct sound_queue *sq, int num, int size) { int i; if (sq->buffers) return 0; sq->numBufs = num; sq->bufSize = size; sq->buffers = kmalloc (num * sizeof(char *), GFP_KERNEL); if (!sq->buffers) return -ENOMEM; for (i = 0; i < num; i++) { sq->buffers[i] = dmasound.mach.dma_alloc(size, GFP_KERNEL); if (!sq->buffers[i]) { while (i--) dmasound.mach.dma_free(sq->buffers[i], size); kfree(sq->buffers); sq->buffers = NULL; return -ENOMEM; } } return 0; } static void sq_release_buffers(struct sound_queue *sq) { int i; if (sq->buffers) { for (i = 0; i < sq->numBufs; i++) dmasound.mach.dma_free(sq->buffers[i], sq->bufSize); kfree(sq->buffers); sq->buffers = NULL; } } static int sq_setup(struct sound_queue *sq) { int (*setup_func)(void) = NULL; int hard_frame ; if (sq->locked) { /* are we already set? - and not changeable */ #ifdef DEBUG_DMASOUND printk("dmasound_core: tried to sq_setup a locked queue\n") ; #endif return -EINVAL ; } sq->locked = 1 ; /* don't think we have a race prob. here _check_ */ /* make sure that the parameters are set up This should have been done already... */ dmasound.mach.init(); /* OK. If the user has set fragment parameters explicitly, then we should leave them alone... as long as they are valid. Invalid user fragment params can occur if we allow the whole buffer to be used when the user requests the fragments sizes (with no soft x-lation) and then the user subsequently sets a soft x-lation that requires increased internal buffering. Othwerwise (if the user did not set them) OSS says that we should select frag params on the basis of 0.5 s output & 0.1 s input latency. (TODO. For now we will copy in the defaults.) */ if (sq->user_frags <= 0) { sq->max_count = sq->numBufs ; sq->max_active = sq->numBufs ; sq->block_size = sq->bufSize; /* set up the user info */ sq->user_frags = sq->numBufs ; sq->user_frag_size = sq->bufSize ; sq->user_frag_size *= (dmasound.soft.size * (dmasound.soft.stereo+1) ) ; sq->user_frag_size /= (dmasound.hard.size * (dmasound.hard.stereo+1) ) ; } else { /* work out requested block size */ sq->block_size = sq->user_frag_size ; sq->block_size *= (dmasound.hard.size * (dmasound.hard.stereo+1) ) ; sq->block_size /= (dmasound.soft.size * (dmasound.soft.stereo+1) ) ; /* the user wants to write frag-size chunks */ sq->block_size *= dmasound.hard.speed ; sq->block_size /= dmasound.soft.speed ; /* this only works for size values which are powers of 2 */ hard_frame = (dmasound.hard.size * (dmasound.hard.stereo+1))/8 ; sq->block_size += (hard_frame - 1) ; sq->block_size &= ~(hard_frame - 1) ; /* make sure we are aligned */ /* let's just check for obvious mistakes */ if ( sq->block_size <= 0 || sq->block_size > sq->bufSize) { #ifdef DEBUG_DMASOUND printk("dmasound_core: invalid frag size (user set %d)\n", sq->user_frag_size) ; #endif sq->block_size = sq->bufSize ; } if ( sq->user_frags <= sq->numBufs ) { sq->max_count = sq->user_frags ; /* if user has set max_active - then use it */ sq->max_active = (sq->max_active <= sq->max_count) ? sq->max_active : sq->max_count ; } else { #ifdef DEBUG_DMASOUND printk("dmasound_core: invalid frag count (user set %d)\n", sq->user_frags) ; #endif sq->max_count = sq->max_active = sq->numBufs ; } } sq->front = sq->count = sq->rear_size = 0; sq->syncing = 0; sq->active = 0; if (sq == &write_sq) { sq->rear = -1; setup_func = dmasound.mach.write_sq_setup; } if (setup_func) return setup_func(); return 0 ; } static inline void sq_play(void) { dmasound.mach.play(); } static ssize_t sq_write(struct file *file, const char __user *src, size_t uLeft, loff_t *ppos) { ssize_t uWritten = 0; u_char *dest; ssize_t uUsed = 0, bUsed, bLeft; unsigned long flags ; /* ++TeSche: Is something like this necessary? * Hey, that's an honest question! Or does any other part of the * filesystem already checks this situation? I really don't know. */ if (uLeft == 0) return 0; /* implement any changes we have made to the soft/hard params. this is not satisfactory really, all we have done up to now is to say what we would like - there hasn't been any real checking of capability */ if (shared_resources_initialised == 0) { dmasound.mach.init() ; shared_resources_initialised = 1 ; } /* set up the sq if it is not already done. This may seem a dumb place to do it - but it is what OSS requires. It means that write() can return memory allocation errors. To avoid this possibility use the GETBLKSIZE or GETOSPACE ioctls (after you've fiddled with all the params you want to change) - these ioctls also force the setup. */ if (write_sq.locked == 0) { if ((uWritten = sq_setup(&write_sq)) < 0) return uWritten ; uWritten = 0 ; } /* FIXME: I think that this may be the wrong behaviour when we get strapped for time and the cpu is close to being (or actually) behind in sending data. - because we've lost the time that the N samples, already in the buffer, would have given us to get here with the next lot from the user. */ /* The interrupt doesn't start to play the last, incomplete frame. * Thus we can append to it without disabling the interrupts! (Note * also that write_sq.rear isn't affected by the interrupt.) */ /* as of 1.6 this behaviour changes if SNDCTL_DSP_POST has been issued: this will mimic the behaviour of syncing and allow the sq_play() to queue a partial fragment. Since sq_play() may/will be called from the IRQ handler - at least on Pmac we have to deal with it. The strategy - possibly not optimum - is to kill _POST status if we get here. This seems, at least, reasonable - in the sense that POST is supposed to indicate that we might not write before the queue is drained - and if we get here in time then it does not apply. */ spin_lock_irqsave(&dmasound.lock, flags); write_sq.syncing &= ~2 ; /* take out POST status */ spin_unlock_irqrestore(&dmasound.lock, flags); if (write_sq.count > 0 && (bLeft = write_sq.block_size-write_sq.rear_size) > 0) { dest = write_sq.buffers[write_sq.rear]; bUsed = write_sq.rear_size; uUsed = sound_copy_translate(dmasound.trans_write, src, uLeft, dest, &bUsed, bLeft); if (uUsed <= 0) return uUsed; src += uUsed; uWritten += uUsed; uLeft = (uUsed <= uLeft) ? (uLeft - uUsed) : 0 ; /* paranoia */ write_sq.rear_size = bUsed; } while (uLeft) { while (write_sq.count >= write_sq.max_active) { sq_play(); if (write_sq.non_blocking) return uWritten > 0 ? uWritten : -EAGAIN; SLEEP(write_sq.action_queue); if (signal_pending(current)) return uWritten > 0 ? uWritten : -EINTR; } /* Here, we can avoid disabling the interrupt by first * copying and translating the data, and then updating * the write_sq variables. Until this is done, the interrupt * won't see the new frame and we can work on it * undisturbed. */ dest = write_sq.buffers[(write_sq.rear+1) % write_sq.max_count]; bUsed = 0; bLeft = write_sq.block_size; uUsed = sound_copy_translate(dmasound.trans_write, src, uLeft, dest, &bUsed, bLeft); if (uUsed <= 0) break; src += uUsed; uWritten += uUsed; uLeft = (uUsed <= uLeft) ? (uLeft - uUsed) : 0 ; /* paranoia */ if (bUsed) { write_sq.rear = (write_sq.rear+1) % write_sq.max_count; write_sq.rear_size = bUsed; write_sq.count++; } } /* uUsed may have been 0 */ sq_play(); return uUsed < 0? uUsed: uWritten; } static unsigned int sq_poll(struct file *file, struct poll_table_struct *wait) { unsigned int mask = 0; int retVal; if (write_sq.locked == 0) { if ((retVal = sq_setup(&write_sq)) < 0) return retVal; return 0; } if (file->f_mode & FMODE_WRITE ) poll_wait(file, &write_sq.action_queue, wait); if (file->f_mode & FMODE_WRITE) if (write_sq.count < write_sq.max_active || write_sq.block_size - write_sq.rear_size > 0) mask |= POLLOUT | POLLWRNORM; return mask; } static inline void sq_init_waitqueue(struct sound_queue *sq) { init_waitqueue_head(&sq->action_queue); init_waitqueue_head(&sq->open_queue); init_waitqueue_head(&sq->sync_queue); sq->busy = 0; } #if 0 /* blocking open() */ static inline void sq_wake_up(struct sound_queue *sq, struct file *file, fmode_t mode) { if (file->f_mode & mode) { sq->busy = 0; /* CHECK: IS THIS OK??? */ WAKE_UP(sq->open_queue); } } #endif static int sq_open2(struct sound_queue *sq, struct file *file, fmode_t mode, int numbufs, int bufsize) { int rc = 0; if (file->f_mode & mode) { if (sq->busy) { #if 0 /* blocking open() */ rc = -EBUSY; if (file->f_flags & O_NONBLOCK) return rc; rc = -EINTR; while (sq->busy) { SLEEP(sq->open_queue); if (signal_pending(current)) return rc; } rc = 0; #else /* OSS manual says we will return EBUSY regardless of O_NOBLOCK. */ return -EBUSY ; #endif } sq->busy = 1; /* Let's play spot-the-race-condition */ /* allocate the default number & size of buffers. (i.e. specified in _setup() or as module params) can't be changed at the moment - but _could_ be perhaps in the setfragments ioctl. */ if (( rc = sq_allocate_buffers(sq, numbufs, bufsize))) { #if 0 /* blocking open() */ sq_wake_up(sq, file, mode); #else sq->busy = 0 ; #endif return rc; } sq->non_blocking = file->f_flags & O_NONBLOCK; } return rc; } #define write_sq_init_waitqueue() sq_init_waitqueue(&write_sq) #if 0 /* blocking open() */ #define write_sq_wake_up(file) sq_wake_up(&write_sq, file, FMODE_WRITE) #endif #define write_sq_release_buffers() sq_release_buffers(&write_sq) #define write_sq_open(file) \ sq_open2(&write_sq, file, FMODE_WRITE, numWriteBufs, writeBufSize ) static int sq_open(struct inode *inode, struct file *file) { int rc; mutex_lock(&dmasound_core_mutex); if (!try_module_get(dmasound.mach.owner)) { mutex_unlock(&dmasound_core_mutex); return -ENODEV; } rc = write_sq_open(file); /* checks the f_mode */ if (rc) goto out; if (file->f_mode & FMODE_READ) { /* TODO: if O_RDWR, release any resources grabbed by write part */ rc = -ENXIO ; /* I think this is what is required by open(2) */ goto out; } if (dmasound.mach.sq_open) dmasound.mach.sq_open(file->f_mode); /* CHECK whether this is sensible - in the case that dsp0 could be opened O_RDONLY and dsp1 could be opened O_WRONLY */ dmasound.minDev = iminor(inode) & 0x0f; /* OK. - we should make some attempt at consistency. At least the H'ware options should be set with a valid mode. We will make it that the LL driver must supply defaults for hard & soft params. */ if (shared_resource_owner == 0) { /* you can make this AFMT_U8/mono/8K if you want to mimic old OSS behaviour - while we still have soft translations ;-) */ dmasound.soft = dmasound.mach.default_soft ; dmasound.dsp = dmasound.mach.default_soft ; dmasound.hard = dmasound.mach.default_hard ; } #ifndef DMASOUND_STRICT_OSS_COMPLIANCE /* none of the current LL drivers can actually do this "native" at the moment OSS does not really require us to supply /dev/audio if we can't do it. */ if (dmasound.minDev == SND_DEV_AUDIO) { sound_set_speed(8000); sound_set_stereo(0); sound_set_format(AFMT_MU_LAW); } #endif mutex_unlock(&dmasound_core_mutex); return 0; out: module_put(dmasound.mach.owner); mutex_unlock(&dmasound_core_mutex); return rc; } static void sq_reset_output(void) { sound_silence(); /* this _must_ stop DMA, we might be about to lose the buffers */ write_sq.active = 0; write_sq.count = 0; write_sq.rear_size = 0; /* write_sq.front = (write_sq.rear+1) % write_sq.max_count;*/ write_sq.front = 0 ; write_sq.rear = -1 ; /* same as for set-up */ /* OK - we can unlock the parameters and fragment settings */ write_sq.locked = 0 ; write_sq.user_frags = 0 ; write_sq.user_frag_size = 0 ; } static void sq_reset(void) { sq_reset_output() ; /* we could consider resetting the shared_resources_owner here... but I think it is probably still rather non-obvious to application writer */ /* we release everything else though */ shared_resources_initialised = 0 ; } static int sq_fsync(struct file *filp, struct dentry *dentry) { int rc = 0; int timeout = 5; write_sq.syncing |= 1; sq_play(); /* there may be an incomplete frame waiting */ while (write_sq.active) { SLEEP(write_sq.sync_queue); if (signal_pending(current)) { /* While waiting for audio output to drain, an * interrupt occurred. Stop audio output immediately * and clear the queue. */ sq_reset_output(); rc = -EINTR; break; } if (!--timeout) { printk(KERN_WARNING "dmasound: Timeout draining output\n"); sq_reset_output(); rc = -EIO; break; } } /* flag no sync regardless of whether we had a DSP_POST or not */ write_sq.syncing = 0 ; return rc; } static int sq_release(struct inode *inode, struct file *file) { int rc = 0; mutex_lock(&dmasound_core_mutex); if (file->f_mode & FMODE_WRITE) { if (write_sq.busy) rc = sq_fsync(file, file->f_path.dentry); sq_reset_output() ; /* make sure dma is stopped and all is quiet */ write_sq_release_buffers(); write_sq.busy = 0; } if (file->f_mode & shared_resource_owner) { /* it's us that has them */ shared_resource_owner = 0 ; shared_resources_initialised = 0 ; dmasound.hard = dmasound.mach.default_hard ; } module_put(dmasound.mach.owner); #if 0 /* blocking open() */ /* Wake up a process waiting for the queue being released. * Note: There may be several processes waiting for a call * to open() returning. */ /* Iain: hmm I don't understand this next comment ... */ /* There is probably a DOS atack here. They change the mode flag. */ /* XXX add check here,*/ read_sq_wake_up(file); /* checks f_mode */ write_sq_wake_up(file); /* checks f_mode */ #endif /* blocking open() */ mutex_unlock(&dmasound_core_mutex); return rc; } /* here we see if we have a right to modify format, channels, size and so on if no-one else has claimed it already then we do... TODO: We might change this to mask O_RDWR such that only one or the other channel is the owner - if we have problems. */ static int shared_resources_are_mine(fmode_t md) { if (shared_resource_owner) return (shared_resource_owner & md) != 0; else { shared_resource_owner = md ; return 1 ; } } /* if either queue is locked we must deny the right to change shared params */ static int queues_are_quiescent(void) { if (write_sq.locked) return 0 ; return 1 ; } /* check and set a queue's fragments per user's wishes... we will check against the pre-defined literals and the actual sizes. This is a bit fraught - because soft translations can mess with our buffer requirements *after* this call - OSS says "call setfrags first" */ /* It is possible to replace all the -EINVAL returns with an override that just puts the allowable value in. This may be what many OSS apps require */ static int set_queue_frags(struct sound_queue *sq, int bufs, int size) { if (sq->locked) { #ifdef DEBUG_DMASOUND printk("dmasound_core: tried to set_queue_frags on a locked queue\n") ; #endif return -EINVAL ; } if ((size < MIN_FRAG_SIZE) || (size > MAX_FRAG_SIZE)) return -EINVAL ; size = (1<<size) ; /* now in bytes */ if (size > sq->bufSize) return -EINVAL ; /* this might still not work */ if (bufs <= 0) return -EINVAL ; if (bufs > sq->numBufs) /* the user is allowed say "don't care" with 0x7fff */ bufs = sq->numBufs ; /* there is, currently, no way to specify max_active separately from max_count. This could be a LL driver issue - I guess if there is a requirement for these values to be different then we will have to pass that info. up to this level. */ sq->user_frags = sq->max_active = bufs ; sq->user_frag_size = size ; return 0 ; } static int sq_ioctl(struct file *file, u_int cmd, u_long arg) { int val, result; u_long fmt; int data; int size, nbufs; audio_buf_info info; switch (cmd) { case SNDCTL_DSP_RESET: sq_reset(); return 0; break ; case SNDCTL_DSP_GETFMTS: fmt = dmasound.mach.hardware_afmts ; /* this is what OSS says.. */ return IOCTL_OUT(arg, fmt); break ; case SNDCTL_DSP_GETBLKSIZE: /* this should tell the caller about bytes that the app can read/write - the app doesn't care about our internal buffers. We force sq_setup() here as per OSS 1.1 (which should compute the values necessary). Since there is no mechanism to specify read/write separately, for fds opened O_RDWR, the write_sq values will, arbitrarily, overwrite the read_sq ones. */ size = 0 ; if (file->f_mode & FMODE_WRITE) { if ( !write_sq.locked ) sq_setup(&write_sq) ; size = write_sq.user_frag_size ; } return IOCTL_OUT(arg, size); break ; case SNDCTL_DSP_POST: /* all we are going to do is to tell the LL that any partial frags can be queued for output. The LL will have to clear this flag when last output is queued. */ write_sq.syncing |= 0x2 ; sq_play() ; return 0 ; case SNDCTL_DSP_SYNC: /* This call, effectively, has the same behaviour as SNDCTL_DSP_RESET except that it waits for output to finish before resetting everything - read, however, is killed immediately. */ result = 0 ; if (file->f_mode & FMODE_WRITE) { result = sq_fsync(file, file->f_path.dentry); sq_reset_output() ; } /* if we are the shared resource owner then release them */ if (file->f_mode & shared_resource_owner) shared_resources_initialised = 0 ; return result ; break ; case SOUND_PCM_READ_RATE: return IOCTL_OUT(arg, dmasound.soft.speed); case SNDCTL_DSP_SPEED: /* changing this on the fly will have weird effects on the sound. Where there are rate conversions implemented in soft form - it will cause the _ctx_xxx() functions to be substituted. However, there doesn't appear to be any reason to dis-allow it from a driver pov. */ if (shared_resources_are_mine(file->f_mode)) { IOCTL_IN(arg, data); data = sound_set_speed(data) ; shared_resources_initialised = 0 ; return IOCTL_OUT(arg, data); } else return -EINVAL ; break ; /* OSS says these next 4 actions are undefined when the device is busy/active - we will just return -EINVAL. To be allowed to change one - (a) you have to own the right (b) the queue(s) must be quiescent */ case SNDCTL_DSP_STEREO: if (shared_resources_are_mine(file->f_mode) && queues_are_quiescent()) { IOCTL_IN(arg, data); shared_resources_initialised = 0 ; return IOCTL_OUT(arg, sound_set_stereo(data)); } else return -EINVAL ; break ; case SOUND_PCM_WRITE_CHANNELS: if (shared_resources_are_mine(file->f_mode) && queues_are_quiescent()) { IOCTL_IN(arg, data); /* the user might ask for 20 channels, we will return 1 or 2 */ shared_resources_initialised = 0 ; return IOCTL_OUT(arg, sound_set_stereo(data-1)+1); } else return -EINVAL ; break ; case SNDCTL_DSP_SETFMT: if (shared_resources_are_mine(file->f_mode) && queues_are_quiescent()) { int format; IOCTL_IN(arg, data); shared_resources_initialised = 0 ; format = sound_set_format(data); result = IOCTL_OUT(arg, format); if (result < 0) return result; if (format != data && data != AFMT_QUERY) return -EINVAL; return 0; } else return -EINVAL ; case SNDCTL_DSP_SUBDIVIDE: return -EINVAL ; case SNDCTL_DSP_SETFRAGMENT: /* we can do this independently for the two queues - with the proviso that for fds opened O_RDWR we cannot separate the actions and both queues will be set per the last call. NOTE: this does *NOT* actually set the queue up - merely registers our intentions. */ IOCTL_IN(arg, data); result = 0 ; nbufs = (data >> 16) & 0x7fff ; /* 0x7fff is 'use maximum' */ size = data & 0xffff; if (file->f_mode & FMODE_WRITE) { result = set_queue_frags(&write_sq, nbufs, size) ; if (result) return result ; } /* NOTE: this return value is irrelevant - OSS specifically says that the value is 'random' and that the user _must_ check the actual frags values using SNDCTL_DSP_GETBLKSIZE or similar */ return IOCTL_OUT(arg, data); break ; case SNDCTL_DSP_GETOSPACE: /* */ if (file->f_mode & FMODE_WRITE) { if ( !write_sq.locked ) sq_setup(&write_sq) ; info.fragments = write_sq.max_active - write_sq.count; info.fragstotal = write_sq.max_active; info.fragsize = write_sq.user_frag_size; info.bytes = info.fragments * info.fragsize; if (copy_to_user((void __user *)arg, &info, sizeof(info))) return -EFAULT; return 0; } else return -EINVAL ; break ; case SNDCTL_DSP_GETCAPS: val = dmasound.mach.capabilities & 0xffffff00; return IOCTL_OUT(arg,val); default: return mixer_ioctl(file, cmd, arg); } return -EINVAL; } static long sq_unlocked_ioctl(struct file *file, u_int cmd, u_long arg) { int ret; mutex_lock(&dmasound_core_mutex); ret = sq_ioctl(file, cmd, arg); mutex_unlock(&dmasound_core_mutex); return ret; } static const struct file_operations sq_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = sq_write, .poll = sq_poll, .unlocked_ioctl = sq_unlocked_ioctl, .open = sq_open, .release = sq_release, }; static int sq_init(void) { const struct file_operations *fops = &sq_fops; #ifndef MODULE int sq_unit; #endif sq_unit = register_sound_dsp(fops, -1); if (sq_unit < 0) { printk(KERN_ERR "dmasound_core: couldn't register fops\n") ; return sq_unit ; } write_sq_init_waitqueue(); /* These parameters will be restored for every clean open() * in the case of multiple open()s (e.g. dsp0 & dsp1) they * will be set so long as the shared resources have no owner. */ if (shared_resource_owner == 0) { dmasound.soft = dmasound.mach.default_soft ; dmasound.hard = dmasound.mach.default_hard ; dmasound.dsp = dmasound.mach.default_soft ; shared_resources_initialised = 0 ; } return 0 ; } /* * /dev/sndstat */ /* we allow more space for record-enabled because there are extra output lines. the number here must include the amount we are prepared to give to the low-level driver. */ #define STAT_BUFF_LEN 768 /* this is how much space we will allow the low-level driver to use in the stat buffer. Currently, 2 * (80 character line + <NL>). We do not police this (it is up to the ll driver to be honest). */ #define LOW_LEVEL_STAT_ALLOC 162 static struct { int busy; char buf[STAT_BUFF_LEN]; /* state.buf should not overflow! */ int len, ptr; } state; /* publish this function for use by low-level code, if required */ static char *get_afmt_string(int afmt) { switch(afmt) { case AFMT_MU_LAW: return "mu-law"; break; case AFMT_A_LAW: return "A-law"; break; case AFMT_U8: return "unsigned 8 bit"; break; case AFMT_S8: return "signed 8 bit"; break; case AFMT_S16_BE: return "signed 16 bit BE"; break; case AFMT_U16_BE: return "unsigned 16 bit BE"; break; case AFMT_S16_LE: return "signed 16 bit LE"; break; case AFMT_U16_LE: return "unsigned 16 bit LE"; break; case 0: return "format not set" ; break ; default: break ; } return "ERROR: Unsupported AFMT_XXXX code" ; } static int state_open(struct inode *inode, struct file *file) { char *buffer = state.buf; int len = 0; int ret; mutex_lock(&dmasound_core_mutex); ret = -EBUSY; if (state.busy) goto out; ret = -ENODEV; if (!try_module_get(dmasound.mach.owner)) goto out; state.ptr = 0; state.busy = 1; len += sprintf(buffer+len, "%sDMA sound driver rev %03d :\n", dmasound.mach.name, (DMASOUND_CORE_REVISION<<4) + ((dmasound.mach.version>>8) & 0x0f)); len += sprintf(buffer+len, "Core driver edition %02d.%02d : %s driver edition %02d.%02d\n", DMASOUND_CORE_REVISION, DMASOUND_CORE_EDITION, dmasound.mach.name2, (dmasound.mach.version >> 8), (dmasound.mach.version & 0xff)) ; /* call the low-level module to fill in any stat info. that it has if present. Maximum buffer usage is specified. */ if (dmasound.mach.state_info) len += dmasound.mach.state_info(buffer+len, (size_t) LOW_LEVEL_STAT_ALLOC) ; /* make usage of the state buffer as deterministic as poss. exceptional conditions could cause overrun - and this is flagged as a kernel error. */ /* formats and settings */ len += sprintf(buffer+len,"\t\t === Formats & settings ===\n") ; len += sprintf(buffer+len,"Parameter %20s%20s\n","soft","hard") ; len += sprintf(buffer+len,"Format :%20s%20s\n", get_afmt_string(dmasound.soft.format), get_afmt_string(dmasound.hard.format)); len += sprintf(buffer+len,"Samp Rate:%14d s/sec%14d s/sec\n", dmasound.soft.speed, dmasound.hard.speed); len += sprintf(buffer+len,"Channels :%20s%20s\n", dmasound.soft.stereo ? "stereo" : "mono", dmasound.hard.stereo ? "stereo" : "mono" ); /* sound queue status */ len += sprintf(buffer+len,"\t\t === Sound Queue status ===\n"); len += sprintf(buffer+len,"Allocated:%8s%6s\n","Buffers","Size") ; len += sprintf(buffer+len,"%9s:%8d%6d\n", "write", write_sq.numBufs, write_sq.bufSize) ; len += sprintf(buffer+len, "Current : MaxFrg FragSiz MaxAct Frnt Rear " "Cnt RrSize A B S L xruns\n") ; len += sprintf(buffer+len,"%9s:%7d%8d%7d%5d%5d%4d%7d%2d%2d%2d%2d%7d\n", "write", write_sq.max_count, write_sq.block_size, write_sq.max_active, write_sq.front, write_sq.rear, write_sq.count, write_sq.rear_size, write_sq.active, write_sq.busy, write_sq.syncing, write_sq.locked, write_sq.xruns) ; #ifdef DEBUG_DMASOUND printk("dmasound: stat buffer used %d bytes\n", len) ; #endif if (len >= STAT_BUFF_LEN) printk(KERN_ERR "dmasound_core: stat buffer overflowed!\n"); state.len = len; ret = 0; out: mutex_unlock(&dmasound_core_mutex); return ret; } static int state_release(struct inode *inode, struct file *file) { mutex_lock(&dmasound_core_mutex); state.busy = 0; module_put(dmasound.mach.owner); mutex_unlock(&dmasound_core_mutex); return 0; } static ssize_t state_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { int n = state.len - state.ptr; if (n > count) n = count; if (n <= 0) return 0; if (copy_to_user(buf, &state.buf[state.ptr], n)) return -EFAULT; state.ptr += n; return n; } static const struct file_operations state_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .read = state_read, .open = state_open, .release = state_release, }; static int state_init(void) { #ifndef MODULE int state_unit; #endif state_unit = register_sound_special(&state_fops, SND_DEV_STATUS); if (state_unit < 0) return state_unit ; state.busy = 0; return 0 ; } /* * Config & Setup * * This function is called by _one_ chipset-specific driver */ int dmasound_init(void) { int res ; #ifdef MODULE if (irq_installed) return -EBUSY; #endif /* Set up sound queue, /dev/audio and /dev/dsp. */ /* Set default settings. */ if ((res = sq_init()) < 0) return res ; /* Set up /dev/sndstat. */ if ((res = state_init()) < 0) return res ; /* Set up /dev/mixer. */ mixer_init(); if (!dmasound.mach.irqinit()) { printk(KERN_ERR "DMA sound driver: Interrupt initialization failed\n"); return -ENODEV; } #ifdef MODULE irq_installed = 1; #endif printk(KERN_INFO "%s DMA sound driver rev %03d installed\n", dmasound.mach.name, (DMASOUND_CORE_REVISION<<4) + ((dmasound.mach.version>>8) & 0x0f)); printk(KERN_INFO "Core driver edition %02d.%02d : %s driver edition %02d.%02d\n", DMASOUND_CORE_REVISION, DMASOUND_CORE_EDITION, dmasound.mach.name2, (dmasound.mach.version >> 8), (dmasound.mach.version & 0xff)) ; printk(KERN_INFO "Write will use %4d fragments of %7d bytes as default\n", numWriteBufs, writeBufSize) ; return 0; } #ifdef MODULE void dmasound_deinit(void) { if (irq_installed) { sound_silence(); dmasound.mach.irqcleanup(); irq_installed = 0; } write_sq_release_buffers(); if (mixer_unit >= 0) unregister_sound_mixer(mixer_unit); if (state_unit >= 0) unregister_sound_special(state_unit); if (sq_unit >= 0) unregister_sound_dsp(sq_unit); } #else /* !MODULE */ static int dmasound_setup(char *str) { int ints[6], size; str = get_options(str, ARRAY_SIZE(ints), ints); /* check the bootstrap parameter for "dmasound=" */ /* FIXME: other than in the most naive of cases there is no sense in these * buffers being other than powers of two. This is not checked yet. */ switch (ints[0]) { case 3: if ((ints[3] < 0) || (ints[3] > MAX_CATCH_RADIUS)) printk("dmasound_setup: invalid catch radius, using default = %d\n", catchRadius); else catchRadius = ints[3]; /* fall through */ case 2: if (ints[1] < MIN_BUFFERS) printk("dmasound_setup: invalid number of buffers, using default = %d\n", numWriteBufs); else numWriteBufs = ints[1]; /* fall through */ case 1: if ((size = ints[2]) < 256) /* check for small buffer specs */ size <<= 10 ; if (size < MIN_BUFSIZE || size > MAX_BUFSIZE) printk("dmasound_setup: invalid write buffer size, using default = %d\n", writeBufSize); else writeBufSize = size; case 0: break; default: printk("dmasound_setup: invalid number of arguments\n"); return 0; } return 1; } __setup("dmasound=", dmasound_setup); #endif /* !MODULE */ /* * Conversion tables */ #ifdef HAS_8BIT_TABLES /* 8 bit mu-law */ char dmasound_ulaw2dma8[] = { -126, -122, -118, -114, -110, -106, -102, -98, -94, -90, -86, -82, -78, -74, -70, -66, -63, -61, -59, -57, -55, -53, -51, -49, -47, -45, -43, -41, -39, -37, -35, -33, -31, -30, -29, -28, -27, -26, -25, -24, -23, -22, -21, -20, -19, -18, -17, -16, -16, -15, -15, -14, -14, -13, -13, -12, -12, -11, -11, -10, -10, -9, -9, -8, -8, -8, -7, -7, -7, -7, -6, -6, -6, -6, -5, -5, -5, -5, -4, -4, -4, -4, -4, -4, -3, -3, -3, -3, -3, -3, -3, -3, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 125, 121, 117, 113, 109, 105, 101, 97, 93, 89, 85, 81, 77, 73, 69, 65, 62, 60, 58, 56, 54, 52, 50, 48, 46, 44, 42, 40, 38, 36, 34, 32, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 15, 14, 14, 13, 13, 12, 12, 11, 11, 10, 10, 9, 9, 8, 8, 7, 7, 7, 6, 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; /* 8 bit A-law */ char dmasound_alaw2dma8[] = { -22, -21, -24, -23, -18, -17, -20, -19, -30, -29, -32, -31, -26, -25, -28, -27, -11, -11, -12, -12, -9, -9, -10, -10, -15, -15, -16, -16, -13, -13, -14, -14, -86, -82, -94, -90, -70, -66, -78, -74, -118, -114, -126, -122, -102, -98, -110, -106, -43, -41, -47, -45, -35, -33, -39, -37, -59, -57, -63, -61, -51, -49, -55, -53, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -6, -6, -6, -6, -5, -5, -5, -5, -8, -8, -8, -8, -7, -7, -7, -7, -3, -3, -3, -3, -3, -3, -3, -3, -4, -4, -4, -4, -4, -4, -4, -4, 21, 20, 23, 22, 17, 16, 19, 18, 29, 28, 31, 30, 25, 24, 27, 26, 10, 10, 11, 11, 8, 8, 9, 9, 14, 14, 15, 15, 12, 12, 13, 13, 86, 82, 94, 90, 70, 66, 78, 74, 118, 114, 126, 122, 102, 98, 110, 106, 43, 41, 47, 45, 35, 33, 39, 37, 59, 57, 63, 61, 51, 49, 55, 53, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 5, 5, 5, 4, 4, 4, 4, 7, 7, 7, 7, 6, 6, 6, 6, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3 }; #endif /* HAS_8BIT_TABLES */ /* * Visible symbols for modules */ EXPORT_SYMBOL(dmasound); EXPORT_SYMBOL(dmasound_init); #ifdef MODULE EXPORT_SYMBOL(dmasound_deinit); #endif EXPORT_SYMBOL(dmasound_write_sq); EXPORT_SYMBOL(dmasound_catchRadius); #ifdef HAS_8BIT_TABLES EXPORT_SYMBOL(dmasound_ulaw2dma8); EXPORT_SYMBOL(dmasound_alaw2dma8); #endif
gpl-2.0
ms705/linux
arch/blackfin/mach-bf538/ext-gpio.c
9337
4282
/* * GPIOLIB interface for BF538/9 PORT C, D, and E GPIOs * * Copyright 2009-2011 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ #include <linux/module.h> #include <linux/err.h> #include <asm/blackfin.h> #include <asm/gpio.h> #include <asm/portmux.h> #define DEFINE_REG(reg, off) \ static inline u16 read_##reg(void __iomem *port) \ { return bfin_read16(port + off); } \ static inline void write_##reg(void __iomem *port, u16 v) \ { bfin_write16(port + off, v); } DEFINE_REG(PORTIO, 0x00) DEFINE_REG(PORTIO_CLEAR, 0x10) DEFINE_REG(PORTIO_SET, 0x20) DEFINE_REG(PORTIO_DIR, 0x40) DEFINE_REG(PORTIO_INEN, 0x50) static void __iomem *gpio_chip_to_mmr(struct gpio_chip *chip) { switch (chip->base) { default: /* not really needed, but keeps gcc happy */ case GPIO_PC0: return (void __iomem *)PORTCIO; case GPIO_PD0: return (void __iomem *)PORTDIO; case GPIO_PE0: return (void __iomem *)PORTEIO; } } static int bf538_gpio_get_value(struct gpio_chip *chip, unsigned gpio) { void __iomem *port = gpio_chip_to_mmr(chip); return !!(read_PORTIO(port) & (1u << gpio)); } static void bf538_gpio_set_value(struct gpio_chip *chip, unsigned gpio, int value) { void __iomem *port = gpio_chip_to_mmr(chip); if (value) write_PORTIO_SET(port, (1u << gpio)); else write_PORTIO_CLEAR(port, (1u << gpio)); } static int bf538_gpio_direction_input(struct gpio_chip *chip, unsigned gpio) { void __iomem *port = gpio_chip_to_mmr(chip); write_PORTIO_DIR(port, read_PORTIO_DIR(port) & ~(1u << gpio)); write_PORTIO_INEN(port, read_PORTIO_INEN(port) | (1u << gpio)); return 0; } static int bf538_gpio_direction_output(struct gpio_chip *chip, unsigned gpio, int value) { void __iomem *port = gpio_chip_to_mmr(chip); write_PORTIO_INEN(port, read_PORTIO_INEN(port) & ~(1u << gpio)); bf538_gpio_set_value(port, gpio, value); write_PORTIO_DIR(port, read_PORTIO_DIR(port) | (1u << gpio)); return 0; } static int bf538_gpio_request(struct gpio_chip *chip, unsigned gpio) { return bfin_special_gpio_request(chip->base + gpio, chip->label); } static void bf538_gpio_free(struct gpio_chip *chip, unsigned gpio) { return bfin_special_gpio_free(chip->base + gpio); } /* We don't set the irq fields as these banks cannot generate interrupts */ static struct gpio_chip bf538_portc_chip = { .label = "GPIO-PC", .direction_input = bf538_gpio_direction_input, .get = bf538_gpio_get_value, .direction_output = bf538_gpio_direction_output, .set = bf538_gpio_set_value, .request = bf538_gpio_request, .free = bf538_gpio_free, .base = GPIO_PC0, .ngpio = GPIO_PC9 - GPIO_PC0 + 1, }; static struct gpio_chip bf538_portd_chip = { .label = "GPIO-PD", .direction_input = bf538_gpio_direction_input, .get = bf538_gpio_get_value, .direction_output = bf538_gpio_direction_output, .set = bf538_gpio_set_value, .request = bf538_gpio_request, .free = bf538_gpio_free, .base = GPIO_PD0, .ngpio = GPIO_PD13 - GPIO_PD0 + 1, }; static struct gpio_chip bf538_porte_chip = { .label = "GPIO-PE", .direction_input = bf538_gpio_direction_input, .get = bf538_gpio_get_value, .direction_output = bf538_gpio_direction_output, .set = bf538_gpio_set_value, .request = bf538_gpio_request, .free = bf538_gpio_free, .base = GPIO_PE0, .ngpio = GPIO_PE15 - GPIO_PE0 + 1, }; static int __init bf538_extgpio_setup(void) { return gpiochip_add(&bf538_portc_chip) | gpiochip_add(&bf538_portd_chip) | gpiochip_add(&bf538_porte_chip); } arch_initcall(bf538_extgpio_setup); #ifdef CONFIG_PM static struct { u16 data, dir, inen; } gpio_bank_saved[3]; static void __iomem * const port_bases[3] = { (void *)PORTCIO, (void *)PORTDIO, (void *)PORTEIO, }; void bfin_special_gpio_pm_hibernate_suspend(void) { int i; for (i = 0; i < ARRAY_SIZE(port_bases); ++i) { gpio_bank_saved[i].data = read_PORTIO(port_bases[i]); gpio_bank_saved[i].inen = read_PORTIO_INEN(port_bases[i]); gpio_bank_saved[i].dir = read_PORTIO_DIR(port_bases[i]); } } void bfin_special_gpio_pm_hibernate_restore(void) { int i; for (i = 0; i < ARRAY_SIZE(port_bases); ++i) { write_PORTIO_INEN(port_bases[i], gpio_bank_saved[i].inen); write_PORTIO_SET(port_bases[i], gpio_bank_saved[i].data & gpio_bank_saved[i].dir); write_PORTIO_DIR(port_bases[i], gpio_bank_saved[i].dir); } } #endif
gpl-2.0
k2wl/evolution_i9082
arch/powerpc/boot/addnote.c
10361
5143
/* * Program to hack in a PT_NOTE program header entry in an ELF file. * This is needed for OF on RS/6000s to load an image correctly. * Note that OF needs a program header entry for the note, not an * ELF section. * * Copyright 2000 Paul Mackerras. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Usage: addnote zImage */ #include <stdio.h> #include <stdlib.h> #include <fcntl.h> #include <unistd.h> #include <string.h> /* CHRP note section */ static const char arch[] = "PowerPC"; #define N_DESCR 6 unsigned int descr[N_DESCR] = { 0xffffffff, /* real-mode = true */ 0x02000000, /* real-base, i.e. where we expect OF to be */ 0xffffffff, /* real-size */ 0xffffffff, /* virt-base */ 0xffffffff, /* virt-size */ 0x4000, /* load-base */ }; /* RPA note section */ static const char rpaname[] = "IBM,RPA-Client-Config"; /* * Note: setting ignore_my_client_config *should* mean that OF ignores * all the other fields, but there is a firmware bug which means that * it looks at the splpar field at least. So these values need to be * reasonable. */ #define N_RPA_DESCR 8 unsigned int rpanote[N_RPA_DESCR] = { 0, /* lparaffinity */ 64, /* min_rmo_size */ 0, /* min_rmo_percent */ 40, /* max_pft_size */ 1, /* splpar */ -1, /* min_load */ 0, /* new_mem_def */ 1, /* ignore_my_client_config */ }; #define ROUNDUP(len) (((len) + 3) & ~3) unsigned char buf[512]; #define GET_16BE(off) ((buf[off] << 8) + (buf[(off)+1])) #define GET_32BE(off) ((GET_16BE(off) << 16) + GET_16BE((off)+2)) #define PUT_16BE(off, v) (buf[off] = ((v) >> 8) & 0xff, \ buf[(off) + 1] = (v) & 0xff) #define PUT_32BE(off, v) (PUT_16BE((off), (v) >> 16), \ PUT_16BE((off) + 2, (v))) /* Structure of an ELF file */ #define E_IDENT 0 /* ELF header */ #define E_PHOFF 28 #define E_PHENTSIZE 42 #define E_PHNUM 44 #define E_HSIZE 52 /* size of ELF header */ #define EI_MAGIC 0 /* offsets in E_IDENT area */ #define EI_CLASS 4 #define EI_DATA 5 #define PH_TYPE 0 /* ELF program header */ #define PH_OFFSET 4 #define PH_FILESZ 16 #define PH_HSIZE 32 /* size of program header */ #define PT_NOTE 4 /* Program header type = note */ #define ELFCLASS32 1 #define ELFDATA2MSB 2 unsigned char elf_magic[4] = { 0x7f, 'E', 'L', 'F' }; int main(int ac, char **av) { int fd, n, i; int ph, ps, np; int nnote, nnote2, ns; if (ac != 2) { fprintf(stderr, "Usage: %s elf-file\n", av[0]); exit(1); } fd = open(av[1], O_RDWR); if (fd < 0) { perror(av[1]); exit(1); } nnote = 12 + ROUNDUP(strlen(arch) + 1) + sizeof(descr); nnote2 = 12 + ROUNDUP(strlen(rpaname) + 1) + sizeof(rpanote); n = read(fd, buf, sizeof(buf)); if (n < 0) { perror("read"); exit(1); } if (n < E_HSIZE || memcmp(&buf[E_IDENT+EI_MAGIC], elf_magic, 4) != 0) goto notelf; if (buf[E_IDENT+EI_CLASS] != ELFCLASS32 || buf[E_IDENT+EI_DATA] != ELFDATA2MSB) { fprintf(stderr, "%s is not a big-endian 32-bit ELF image\n", av[1]); exit(1); } ph = GET_32BE(E_PHOFF); ps = GET_16BE(E_PHENTSIZE); np = GET_16BE(E_PHNUM); if (ph < E_HSIZE || ps < PH_HSIZE || np < 1) goto notelf; if (ph + (np + 2) * ps + nnote + nnote2 > n) goto nospace; for (i = 0; i < np; ++i) { if (GET_32BE(ph + PH_TYPE) == PT_NOTE) { fprintf(stderr, "%s already has a note entry\n", av[1]); exit(0); } ph += ps; } /* XXX check that the area we want to use is all zeroes */ for (i = 0; i < 2 * ps + nnote + nnote2; ++i) if (buf[ph + i] != 0) goto nospace; /* fill in the program header entry */ ns = ph + 2 * ps; PUT_32BE(ph + PH_TYPE, PT_NOTE); PUT_32BE(ph + PH_OFFSET, ns); PUT_32BE(ph + PH_FILESZ, nnote); /* fill in the note area we point to */ /* XXX we should probably make this a proper section */ PUT_32BE(ns, strlen(arch) + 1); PUT_32BE(ns + 4, N_DESCR * 4); PUT_32BE(ns + 8, 0x1275); strcpy((char *) &buf[ns + 12], arch); ns += 12 + strlen(arch) + 1; for (i = 0; i < N_DESCR; ++i, ns += 4) PUT_32BE(ns, descr[i]); /* fill in the second program header entry and the RPA note area */ ph += ps; PUT_32BE(ph + PH_TYPE, PT_NOTE); PUT_32BE(ph + PH_OFFSET, ns); PUT_32BE(ph + PH_FILESZ, nnote2); /* fill in the note area we point to */ PUT_32BE(ns, strlen(rpaname) + 1); PUT_32BE(ns + 4, sizeof(rpanote)); PUT_32BE(ns + 8, 0x12759999); strcpy((char *) &buf[ns + 12], rpaname); ns += 12 + ROUNDUP(strlen(rpaname) + 1); for (i = 0; i < N_RPA_DESCR; ++i, ns += 4) PUT_32BE(ns, rpanote[i]); /* Update the number of program headers */ PUT_16BE(E_PHNUM, np + 2); /* write back */ lseek(fd, (long) 0, SEEK_SET); i = write(fd, buf, n); if (i < 0) { perror("write"); exit(1); } if (i < n) { fprintf(stderr, "%s: write truncated\n", av[1]); exit(1); } exit(0); notelf: fprintf(stderr, "%s does not appear to be an ELF file\n", av[1]); exit(1); nospace: fprintf(stderr, "sorry, I can't find space in %s to put the note\n", av[1]); exit(1); }
gpl-2.0
PatrikKT/android_kernel_huawei_y536a1
arch/arm/mach-msm/lpm_levels.c
122
27982
/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/platform_device.h> #include <linux/mutex.h> #include <linux/cpu.h> #include <linux/of.h> #include <linux/hrtimer.h> #include <linux/ktime.h> #include <linux/tick.h> #include <linux/suspend.h> #include <linux/pm_qos.h> #include <linux/of_platform.h> #include <mach/mpm.h> #include <mach/cpuidle.h> #include <mach/event_timer.h> #include "pm.h" #include "rpm-notifier.h" #include "spm.h" #include "idle.h" #define SCLK_HZ (32768) enum { MSM_LPM_LVL_DBG_SUSPEND_LIMITS = BIT(0), MSM_LPM_LVL_DBG_IDLE_LIMITS = BIT(1), }; struct power_params { uint32_t latency_us; uint32_t ss_power; uint32_t energy_overhead; uint32_t time_overhead_us; uint32_t target_residency_us; }; struct lpm_cpu_level { const char *name; enum msm_pm_sleep_mode mode; struct power_params pwr; bool use_bc_timer; bool sync; }; struct lpm_system_level { const char *name; uint32_t l2_mode; struct power_params pwr; enum msm_pm_sleep_mode min_cpu_mode; int num_cpu_votes; bool notify_rpm; bool available; bool sync; }; struct lpm_system_state { struct lpm_cpu_level *cpu_level; int num_cpu_levels; struct lpm_system_level *system_level; int num_system_levels; enum msm_pm_sleep_mode sync_cpu_mode; int last_entered_cluster_index; bool allow_synched_levels; bool no_l2_saw; struct spinlock sync_lock; int num_cores_in_sync; }; static struct lpm_system_state sys_state; static bool suspend_in_progress; static int64_t suspend_time; struct lpm_lookup_table { uint32_t modes; const char *mode_name; }; static void lpm_system_level_update(void); static void setup_broadcast_timer(void *arg); static int lpm_cpu_callback(struct notifier_block *cpu_nb, unsigned long action, void *hcpu); static struct notifier_block __refdata lpm_cpu_nblk = { .notifier_call = lpm_cpu_callback, }; static uint32_t allowed_l2_mode; static uint32_t sysfs_dbg_l2_mode = MSM_SPM_L2_MODE_POWER_COLLAPSE; static uint32_t default_l2_mode; static ssize_t lpm_levels_attr_show( struct kobject *kobj, struct kobj_attribute *attr, char *buf); static ssize_t lpm_levels_attr_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count); static int lpm_lvl_dbg_msk; module_param_named( debug_mask, lpm_lvl_dbg_msk, int, S_IRUGO | S_IWUSR | S_IWGRP ); static bool menu_select; module_param_named( menu_select, menu_select, bool, S_IRUGO | S_IWUSR | S_IWGRP ); static int msm_pm_sleep_time_override; module_param_named(sleep_time_override, msm_pm_sleep_time_override, int, S_IRUGO | S_IWUSR | S_IWGRP); static int num_powered_cores; static struct hrtimer lpm_hrtimer; static struct kobj_attribute lpm_l2_kattr = __ATTR(l2, S_IRUGO|S_IWUSR,\ lpm_levels_attr_show, lpm_levels_attr_store); static struct attribute *lpm_levels_attr[] = { &lpm_l2_kattr.attr, NULL, }; static struct attribute_group lpm_levels_attr_grp = { .attrs = lpm_levels_attr, }; /* SYSFS */ static ssize_t lpm_levels_attr_show( struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct kernel_param kp; int rc; kp.arg = &sysfs_dbg_l2_mode; rc = param_get_uint(buf, &kp); if (rc > 0) { strlcat(buf, "\n", PAGE_SIZE); rc++; } return rc; } static ssize_t lpm_levels_attr_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { struct kernel_param kp; unsigned int temp; int rc; kp.arg = &temp; rc = param_set_uint(buf, &kp); if (rc) return rc; sysfs_dbg_l2_mode = temp; lpm_system_level_update(); return count; } static int msm_pm_get_sleep_mode_value(const char *mode_name) { struct lpm_lookup_table pm_sm_lookup[] = { {MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT, "wfi"}, {MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE, "standalone_pc"}, {MSM_PM_SLEEP_MODE_POWER_COLLAPSE, "pc"}, {MSM_PM_SLEEP_MODE_RETENTION, "retention"}, }; int i; int ret = -EINVAL; for (i = 0; i < ARRAY_SIZE(pm_sm_lookup); i++) { if (!strcmp(mode_name, pm_sm_lookup[i].mode_name)) { ret = pm_sm_lookup[i].modes; break; } } return ret; } static int lpm_set_l2_mode(struct lpm_system_state *system_state, int sleep_mode) { int lpm = sleep_mode; int rc = 0; if (system_state->no_l2_saw) goto bail_set_l2_mode; msm_pm_set_l2_flush_flag(MSM_SCM_L2_ON); switch (sleep_mode) { case MSM_SPM_L2_MODE_POWER_COLLAPSE: msm_pm_set_l2_flush_flag(MSM_SCM_L2_OFF); break; case MSM_SPM_L2_MODE_GDHS: msm_pm_set_l2_flush_flag(MSM_SCM_L2_GDHS); break; case MSM_SPM_L2_MODE_PC_NO_RPM: msm_pm_set_l2_flush_flag(MSM_SCM_L2_OFF); break; case MSM_SPM_L2_MODE_RETENTION: case MSM_SPM_L2_MODE_DISABLED: break; default: lpm = MSM_SPM_L2_MODE_DISABLED; break; } rc = msm_spm_l2_set_low_power_mode(lpm, true); if (rc) { if (rc == -ENXIO) WARN_ON_ONCE(1); else pr_err("%s: Failed to set L2 low power mode %d, ERR %d", __func__, lpm, rc); } bail_set_l2_mode: return rc; } static void lpm_system_level_update(void) { int i; struct lpm_system_level *l = NULL; uint32_t max_l2_mode; static DEFINE_MUTEX(lpm_lock); mutex_lock(&lpm_lock); if (num_powered_cores == 1) allowed_l2_mode = MSM_SPM_L2_MODE_POWER_COLLAPSE; else if (sys_state.allow_synched_levels) allowed_l2_mode = MSM_SPM_L2_MODE_POWER_COLLAPSE; else allowed_l2_mode = default_l2_mode; max_l2_mode = min(allowed_l2_mode, sysfs_dbg_l2_mode); for (i = 0; i < sys_state.num_system_levels; i++) { l = &sys_state.system_level[i]; l->available = !(l->l2_mode > max_l2_mode); } mutex_unlock(&lpm_lock); } static int lpm_system_mode_select( struct lpm_system_state *system_state, uint32_t sleep_us, bool from_idle) { int best_level = -1; int i; uint32_t best_level_pwr = ~0UL; uint32_t pwr; uint32_t latency_us = pm_qos_request(PM_QOS_CPU_DMA_LATENCY); if (!system_state->system_level) return -EINVAL; for (i = 0; i < system_state->num_system_levels; i++) { struct lpm_system_level *system_level = &system_state->system_level[i]; struct power_params *pwr_param = &system_level->pwr; if (!system_level->available) continue; if (system_level->sync && system_level->num_cpu_votes != num_powered_cores) continue; if (latency_us < pwr_param->latency_us && from_idle) continue; if (sleep_us < pwr_param->time_overhead_us) continue; /* * After the suspend prepare notifications its possible * for the CPU to enter a system sleep mode. But MPM would have * already requested a XO clock based on the wakeup irqs. To * prevent suspend votes from being overriden by idle irqs, MPM * doesn't send an updated MPM vote after suspend_prepare * callback. * To ensure that XO sleep vote isn't used if and when the * device enters idle PC after suspend prepare callback, * disallow any low power modes that notifies RPM after suspend * prepare function is called */ if (suspend_in_progress && system_level->notify_rpm && from_idle) continue; if ((sleep_us >> 10) > pwr_param->time_overhead_us) { pwr = pwr_param->ss_power; } else { pwr = pwr_param->ss_power; pwr -= (pwr_param->time_overhead_us * pwr_param->ss_power) / sleep_us; pwr += pwr_param->energy_overhead / sleep_us; } if (best_level_pwr >= pwr) { best_level = i; best_level_pwr = pwr; } } return best_level; } static void lpm_system_prepare(struct lpm_system_state *system_state, int index, bool from_idle) { struct lpm_system_level *lvl; struct clock_event_device *bc = tick_get_broadcast_device()->evtdev; uint32_t sclk; int64_t us = (~0ULL); int dbg_mask; int ret; const struct cpumask *nextcpu; spin_lock(&system_state->sync_lock); if (index < 0 || num_powered_cores != system_state->num_cores_in_sync) { spin_unlock(&system_state->sync_lock); return; } if (from_idle) { dbg_mask = lpm_lvl_dbg_msk & MSM_LPM_LVL_DBG_IDLE_LIMITS; us = ktime_to_us(ktime_sub(bc->next_event, ktime_get())); nextcpu = bc->cpumask; } else { dbg_mask = lpm_lvl_dbg_msk & MSM_LPM_LVL_DBG_SUSPEND_LIMITS; nextcpu = cpumask_of(smp_processor_id()); } lvl = &system_state->system_level[index]; ret = lpm_set_l2_mode(system_state, lvl->l2_mode); if (ret && ret != -ENXIO) { pr_warn("%s(): Cannot set L2 Mode %d, ret:%d\n", __func__, lvl->l2_mode, ret); goto bail_system_sleep; } if (lvl->notify_rpm) { ret = msm_rpm_enter_sleep(dbg_mask, nextcpu); if (ret) { pr_err("rpm_enter_sleep() failed with rc = %d\n", ret); goto bail_system_sleep; } if (!from_idle) us = USEC_PER_SEC * msm_pm_sleep_time_override; do_div(us, USEC_PER_SEC/SCLK_HZ); sclk = (uint32_t)us; msm_mpm_enter_sleep(sclk, from_idle, nextcpu); } system_state->last_entered_cluster_index = index; spin_unlock(&system_state->sync_lock); return; bail_system_sleep: if (default_l2_mode != system_state->system_level[index].l2_mode) lpm_set_l2_mode(system_state, default_l2_mode); spin_unlock(&system_state->sync_lock); } static void lpm_system_unprepare(struct lpm_system_state *system_state, int cpu_index, bool from_idle) { int index, i; struct lpm_cpu_level *cpu_level = &system_state->cpu_level[cpu_index]; bool first_core_up; if (cpu_level->mode < system_state->sync_cpu_mode) return; spin_lock(&system_state->sync_lock); first_core_up = (system_state->num_cores_in_sync == num_powered_cores); system_state->num_cores_in_sync--; if (!system_state->system_level) goto unlock_and_return; index = system_state->last_entered_cluster_index; for (i = 0; i < system_state->num_system_levels; i++) { struct lpm_system_level *system_lvl = &system_state->system_level[i]; if (cpu_level->mode >= system_lvl->min_cpu_mode) system_lvl->num_cpu_votes--; } if (!first_core_up || index < 0) goto unlock_and_return; if (default_l2_mode != system_state->system_level[index].l2_mode) lpm_set_l2_mode(system_state, default_l2_mode); if (system_state->system_level[index].notify_rpm) { msm_rpm_exit_sleep(); msm_mpm_exit_sleep(from_idle); } unlock_and_return: system_state->last_entered_cluster_index = -1; spin_unlock(&system_state->sync_lock); } s32 msm_cpuidle_get_deep_idle_latency(void) { int i; struct lpm_cpu_level *level = sys_state.cpu_level; if (!level) return 0; for (i = 0; i < sys_state.num_cpu_levels; i++, level++) { if (level->mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE) break; } if (i == sys_state.num_cpu_levels) return 0; else return level->pwr.latency_us; } static int lpm_cpu_callback(struct notifier_block *cpu_nb, unsigned long action, void *hcpu) { switch (action & ~CPU_TASKS_FROZEN) { case CPU_UP_PREPARE: ++num_powered_cores; lpm_system_level_update(); break; case CPU_DEAD: case CPU_UP_CANCELED: num_powered_cores = num_online_cpus(); lpm_system_level_update(); break; case CPU_ONLINE: smp_call_function_single((unsigned long)hcpu, setup_broadcast_timer, (void *)true, 1); break; default: break; } return NOTIFY_OK; } static enum hrtimer_restart lpm_hrtimer_cb(struct hrtimer *h) { return HRTIMER_NORESTART; } static void msm_pm_set_timer(uint32_t modified_time_us) { u64 modified_time_ns = modified_time_us * NSEC_PER_USEC; ktime_t modified_ktime = ns_to_ktime(modified_time_ns); lpm_hrtimer.function = lpm_hrtimer_cb; hrtimer_start(&lpm_hrtimer, modified_ktime, HRTIMER_MODE_REL_PINNED); } static noinline int lpm_cpu_power_select(struct cpuidle_device *dev, int *index) { int best_level = -1; uint32_t best_level_pwr = ~0UL; uint32_t latency_us = pm_qos_request(PM_QOS_CPU_DMA_LATENCY); uint32_t sleep_us = (uint32_t)(ktime_to_us(tick_nohz_get_sleep_length())); uint32_t modified_time_us = 0; uint32_t next_event_us = 0; uint32_t power; int i; if (!sys_state.cpu_level) return -EINVAL; if (!dev->cpu) next_event_us = (uint32_t)(ktime_to_us(get_next_event_time())); for (i = 0; i < sys_state.num_cpu_levels; i++) { struct lpm_cpu_level *level = &sys_state.cpu_level[i]; struct power_params *pwr = &level->pwr; uint32_t next_wakeup_us = sleep_us; enum msm_pm_sleep_mode mode = level->mode; bool allow; if (level->sync && num_online_cpus() > 1 && !sys_state.allow_synched_levels) continue; allow = msm_cpu_pm_check_mode(dev->cpu, mode, true); if (!allow) continue; if (latency_us < pwr->latency_us) continue; if (next_event_us) if (next_event_us < pwr->latency_us) continue; if (((next_event_us - pwr->latency_us) < sleep_us) || (next_event_us < sleep_us)) { next_wakeup_us = next_event_us - pwr->latency_us; } if (next_wakeup_us <= pwr->time_overhead_us) continue; if ((MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE == mode) || (MSM_PM_SLEEP_MODE_POWER_COLLAPSE == mode)) if (!dev->cpu && msm_rpm_waiting_for_ack()) break; if ((next_wakeup_us >> 10) > pwr->latency_us) { power = pwr->ss_power; } else { power = pwr->ss_power; power -= (pwr->latency_us * pwr->ss_power) / next_wakeup_us; power += pwr->energy_overhead / next_wakeup_us; } if (best_level_pwr >= power) { best_level = i; best_level_pwr = power; if (next_event_us < sleep_us && (mode != MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT)) modified_time_us = next_event_us - pwr->latency_us; else modified_time_us = 0; } } if (modified_time_us && !dev->cpu) msm_pm_set_timer(modified_time_us); return best_level; } static int lpm_get_l2_cache_value(const char *l2_str) { int i; struct lpm_lookup_table l2_mode_lookup[] = { {MSM_SPM_L2_MODE_POWER_COLLAPSE, "l2_cache_pc"}, {MSM_SPM_L2_MODE_PC_NO_RPM, "l2_cache_pc_no_rpm"}, {MSM_SPM_L2_MODE_GDHS, "l2_cache_gdhs"}, {MSM_SPM_L2_MODE_RETENTION, "l2_cache_retention"}, {MSM_SPM_L2_MODE_DISABLED, "l2_cache_active"} }; for (i = 0; i < ARRAY_SIZE(l2_mode_lookup); i++) if (!strcmp(l2_str, l2_mode_lookup[i].mode_name)) return l2_mode_lookup[i].modes; return -EINVAL; } static int lpm_levels_sysfs_add(void) { struct kobject *module_kobj = NULL; struct kobject *low_power_kobj = NULL; int rc = 0; module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME); if (!module_kobj) { pr_err("%s: cannot find kobject for module %s\n", __func__, KBUILD_MODNAME); rc = -ENOENT; goto resource_sysfs_add_exit; } low_power_kobj = kobject_create_and_add( "enable_low_power", module_kobj); if (!low_power_kobj) { pr_err("%s: cannot create kobject\n", __func__); rc = -ENOMEM; goto resource_sysfs_add_exit; } rc = sysfs_create_group(low_power_kobj, &lpm_levels_attr_grp); resource_sysfs_add_exit: if (rc) { if (low_power_kobj) { sysfs_remove_group(low_power_kobj, &lpm_levels_attr_grp); kobject_del(low_power_kobj); } } return rc; } static int lpm_cpu_menu_select(struct cpuidle_device *dev, int *index) { int j; for (; *index >= 0; (*index)--) { int mode = 0; bool allow = false; allow = msm_cpu_pm_check_mode(dev->cpu, mode, true); if (!allow) continue; for (j = sys_state.num_cpu_levels; j >= 0; j--) { struct lpm_cpu_level *l = &sys_state.cpu_level[j]; if (mode == l->mode) return j; } } return -EPERM; } static inline void lpm_cpu_prepare(struct lpm_system_state *system_state, int cpu_index, bool from_idle) { struct lpm_cpu_level *cpu_level = &system_state->cpu_level[cpu_index]; unsigned int cpu = smp_processor_id(); /* Use broadcast timer for aggregating sleep mode within a cluster. * A broadcast timer could be used because of harware restriction or * to ensure that we BC timer is used incase a cpu mode could trigger * a cluster level sleep */ if (from_idle && (cpu_level->use_bc_timer || (cpu_level->mode >= system_state->sync_cpu_mode))) clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); } static inline void lpm_cpu_unprepare(struct lpm_system_state *system_state, int cpu_index, bool from_idle) { struct lpm_cpu_level *cpu_level = &system_state->cpu_level[cpu_index]; unsigned int cpu = smp_processor_id(); if (from_idle && (cpu_level->use_bc_timer || (cpu_level->mode >= system_state->sync_cpu_mode))) clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu); } static int lpm_system_select(struct lpm_system_state *system_state, int cpu_index, bool from_idle) { uint64_t us = (~0ULL); struct clock_event_device *ed; struct lpm_cpu_level *cpu_level = &system_state->cpu_level[cpu_index]; int i; bool last_core_down; if (cpu_level->mode < system_state->sync_cpu_mode) return -EINVAL; spin_lock(&system_state->sync_lock); last_core_down = (++system_state->num_cores_in_sync == num_powered_cores); if (!system_state->system_level) { spin_unlock(&system_state->sync_lock); return -EINVAL; } for (i = 0; i < system_state->num_system_levels; i++) { struct lpm_system_level *system_lvl = &system_state->system_level[i]; if (cpu_level->mode >= system_lvl->min_cpu_mode) system_lvl->num_cpu_votes++; } spin_unlock(&system_state->sync_lock); if (!last_core_down) return -EBUSY; ed = tick_get_broadcast_device()->evtdev; if (!ed) return -EINVAL; if (from_idle) us = ktime_to_us(ktime_sub(ed->next_event, ktime_get())); else us = (~0ULL); return lpm_system_mode_select(system_state, (uint32_t)(us), from_idle); } static void lpm_enter_low_power(struct lpm_system_state *system_state, int cpu_index, bool from_idle) { int idx; struct lpm_cpu_level *cpu_level = &system_state->cpu_level[cpu_index]; cpu_level = &system_state->cpu_level[cpu_index]; lpm_cpu_prepare(system_state, cpu_index, from_idle); idx = lpm_system_select(system_state, cpu_index, from_idle); lpm_system_prepare(system_state, idx, from_idle); msm_cpu_pm_enter_sleep(cpu_level->mode, from_idle); lpm_system_unprepare(system_state, cpu_index, from_idle); lpm_cpu_unprepare(system_state, cpu_index, from_idle); } static int lpm_cpuidle_enter(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { int64_t time = ktime_to_ns(ktime_get()); int idx; idx = menu_select ? lpm_cpu_menu_select(dev, &index) : lpm_cpu_power_select(dev, &index); if (idx < 0) { local_irq_enable(); return -EPERM; } lpm_enter_low_power(&sys_state, idx, true); time = ktime_to_ns(ktime_get()) - time; do_div(time, 1000); dev->last_residency = (int)time; local_irq_enable(); return idx; } static int lpm_suspend_enter(suspend_state_t state) { int i; for (i = sys_state.num_cpu_levels - 1; i >= 0; i--) { bool allow = msm_cpu_pm_check_mode(smp_processor_id(), sys_state.cpu_level[i].mode, false); if (allow) break; } if (i < 0) return -EINVAL; lpm_enter_low_power(&sys_state, i, false); return 0; } static int lpm_suspend_prepare(void) { struct timespec ts; getnstimeofday(&ts); suspend_time = timespec_to_ns(&ts); suspend_in_progress = true; msm_mpm_suspend_prepare(); return 0; } static void lpm_suspend_wake(void) { struct timespec ts; getnstimeofday(&ts); suspend_time = timespec_to_ns(&ts) - suspend_time; msm_pm_add_stat(MSM_PM_STAT_SUSPEND, suspend_time); msm_mpm_suspend_wake(); suspend_in_progress = false; } static struct platform_device lpm_dev = { .name = "msm_pm", .id = -1, }; static const struct platform_suspend_ops lpm_suspend_ops = { .enter = lpm_suspend_enter, .valid = suspend_valid_only_mem, .prepare_late = lpm_suspend_prepare, .wake = lpm_suspend_wake, }; static void setup_broadcast_timer(void *arg) { unsigned long reason = (unsigned long)arg; int cpu = smp_processor_id(); reason = reason ? CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF; clockevents_notify(reason, &cpu); } static struct cpuidle_driver msm_cpuidle_driver = { .name = "msm_idle", .owner = THIS_MODULE, }; static void lpm_cpuidle_init(void) { int i = 0; int state_count = 0; if (!sys_state.cpu_level) return; BUG_ON(sys_state.num_cpu_levels > CPUIDLE_STATE_MAX); for (i = 0; i < sys_state.num_cpu_levels; i++) { struct cpuidle_state *st = &msm_cpuidle_driver.states[i]; struct lpm_cpu_level *cpu_level = &sys_state.cpu_level[i]; snprintf(st->name, CPUIDLE_NAME_LEN, "C%u\n", i); snprintf(st->desc, CPUIDLE_DESC_LEN, cpu_level->name); st->flags = 0; st->exit_latency = cpu_level->pwr.latency_us; st->power_usage = cpu_level->pwr.ss_power; st->target_residency = 0; st->enter = lpm_cpuidle_enter; state_count++; } msm_cpuidle_driver.state_count = state_count; msm_cpuidle_driver.safe_state_index = 0; if (cpuidle_register(&msm_cpuidle_driver, NULL)) pr_err("%s(): Failed to register CPUIDLE device\n", __func__); } static int lpm_parse_power_params(struct device_node *node, struct power_params *pwr) { char *key; int ret; key = "qcom,latency-us"; ret = of_property_read_u32(node, key, &pwr->latency_us); if (ret) goto fail; key = "qcom,ss-power"; ret = of_property_read_u32(node, key, &pwr->ss_power); if (ret) goto fail; key = "qcom,energy-overhead"; ret = of_property_read_u32(node, key, &pwr->energy_overhead); if (ret) goto fail; key = "qcom,time-overhead"; ret = of_property_read_u32(node, key, &pwr->time_overhead_us); if (ret) goto fail; fail: if (ret) pr_err("%s(): Error reading %s\n", __func__, key); return ret; } static int lpm_cpu_probe(struct platform_device *pdev) { struct lpm_cpu_level *level = NULL, *l; struct device_node *node = NULL; int num_levels = 0; char *key; int ret; for_each_child_of_node(pdev->dev.of_node, node) num_levels++; level = kzalloc(num_levels * sizeof(struct lpm_cpu_level), GFP_KERNEL); if (!level) return -ENOMEM; l = &level[0]; for_each_child_of_node(pdev->dev.of_node, node) { key = "qcom,mode"; ret = of_property_read_string(node, key, &l->name); if (ret) { pr_err("%s(): Cannot read cpu mode%s\n", __func__, key); goto fail; } l->mode = msm_pm_get_sleep_mode_value(l->name); if (l->mode < 0) { pr_err("%s():Cannot parse cpu mode:%s\n", __func__, l->name); goto fail; } if (l->mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE) l->sync = true; key = "qcom,use-broadcast-timer"; l->use_bc_timer = of_property_read_bool(node, key); ret = lpm_parse_power_params(node, &l->pwr); if (ret) { pr_err("%s(): cannot Parse power params\n", __func__); goto fail; } l++; } sys_state.cpu_level = level; sys_state.num_cpu_levels = num_levels; return ret; fail: kfree(level); return ret; } static int lpm_system_probe(struct platform_device *pdev) { struct lpm_system_level *level = NULL, *l; int num_levels = 0; struct device_node *node; char *key; int ret; for_each_child_of_node(pdev->dev.of_node, node) num_levels++; level = kzalloc(num_levels * sizeof(struct lpm_system_level), GFP_KERNEL); if (!level) return -ENOMEM; l = &level[0]; for_each_child_of_node(pdev->dev.of_node, node) { key = "qcom,l2"; ret = of_property_read_string(node, key, &l->name); if (ret) { pr_err("%s(): Failed to read L2 mode\n", __func__); goto fail; } l->l2_mode = lpm_get_l2_cache_value(l->name); if (l->l2_mode < 0) { pr_err("%s(): Failed to read l2 cache mode\n", __func__); goto fail; } key = "qcom,send-rpm-sleep-set"; l->notify_rpm = of_property_read_bool(node, key); if (l->l2_mode >= MSM_SPM_L2_MODE_GDHS) l->sync = true; ret = lpm_parse_power_params(node, &l->pwr); if (ret) { pr_err("%s(): Failed to parse power params\n", __func__); goto fail; } key = "qcom,sync-cpus"; l->sync = of_property_read_bool(node, key); if (l->sync) { const char *name; key = "qcom,min-cpu-mode"; ret = of_property_read_string(node, key, &name); if (ret) { pr_err("%s(): Required key %snot found\n", __func__, name); goto fail; } l->min_cpu_mode = msm_pm_get_sleep_mode_value(name); if (l->min_cpu_mode < 0) { pr_err("%s(): Cannot parse cpu mode:%s\n", __func__, name); goto fail; } if (l->min_cpu_mode < sys_state.sync_cpu_mode) sys_state.sync_cpu_mode = l->min_cpu_mode; } l++; } sys_state.system_level = level; sys_state.num_system_levels = num_levels; sys_state.last_entered_cluster_index = -1; return ret; fail: kfree(level); return ret; } static int lpm_probe(struct platform_device *pdev) { struct device_node *node = NULL; char *key = NULL; int ret; node = pdev->dev.of_node; key = "qcom,allow-synced-levels"; sys_state.allow_synched_levels = of_property_read_bool(node, key); key = "qcom,no-l2-saw"; sys_state.no_l2_saw = of_property_read_bool(node, key); sys_state.sync_cpu_mode = MSM_PM_SLEEP_MODE_NR; spin_lock_init(&sys_state.sync_lock); sys_state.num_cores_in_sync = 0; ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev); if (ret) goto fail; /* Do the following two steps only if L2 SAW is present */ num_powered_cores = num_online_cpus(); if (!sys_state.no_l2_saw) { int ret; const char *l2; key = "qcom,default-l2-state"; ret = of_property_read_string(node, key, &l2); if (ret) { pr_err("%s(): Failed to read default L2 mode\n", __func__); goto fail; } default_l2_mode = lpm_get_l2_cache_value(l2); if (default_l2_mode < 0) { pr_err("%s(): Unable to parse default L2 mode\n", __func__); goto fail; } if (lpm_levels_sysfs_add()) goto fail; msm_pm_set_l2_flush_flag(MSM_SCM_L2_ON); } else { msm_pm_set_l2_flush_flag(MSM_SCM_L2_OFF); default_l2_mode = MSM_SPM_L2_MODE_POWER_COLLAPSE; } get_cpu(); on_each_cpu(setup_broadcast_timer, (void *)true, 1); put_cpu(); register_hotcpu_notifier(&lpm_cpu_nblk); lpm_system_level_update(); platform_device_register(&lpm_dev); suspend_set_ops(&lpm_suspend_ops); hrtimer_init(&lpm_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); lpm_cpuidle_init(); return 0; fail: pr_err("%s: Error in name %s key %s\n", __func__, node->full_name, key); return -EFAULT; } static struct of_device_id cpu_modes_mtch_tbl[] = { {.compatible = "qcom,cpu-modes"}, {}, }; static struct platform_driver cpu_modes_driver = { .probe = lpm_cpu_probe, .driver = { .name = "cpu-modes", .owner = THIS_MODULE, .of_match_table = cpu_modes_mtch_tbl, }, }; static struct of_device_id system_modes_mtch_tbl[] = { {.compatible = "qcom,system-modes"}, {}, }; static struct platform_driver system_modes_driver = { .probe = lpm_system_probe, .driver = { .name = "system-modes", .owner = THIS_MODULE, .of_match_table = system_modes_mtch_tbl, }, }; static struct of_device_id lpm_levels_match_table[] = { {.compatible = "qcom,lpm-levels"}, {}, }; static struct platform_driver lpm_levels_driver = { .probe = lpm_probe, .driver = { .name = "lpm-levels", .owner = THIS_MODULE, .of_match_table = lpm_levels_match_table, }, }; static int __init lpm_levels_module_init(void) { int rc; rc = platform_driver_register(&cpu_modes_driver); if (rc) { pr_err("Error registering %s\n", cpu_modes_driver.driver.name); goto fail; } rc = platform_driver_register(&system_modes_driver); if (rc) { platform_driver_unregister(&cpu_modes_driver); pr_err("Error registering %s\n", system_modes_driver.driver.name); goto fail; } rc = platform_driver_register(&lpm_levels_driver); if (rc) { platform_driver_unregister(&cpu_modes_driver); platform_driver_unregister(&system_modes_driver); pr_err("Error registering %s\n", lpm_levels_driver.driver.name); } fail: return rc; } late_initcall(lpm_levels_module_init);
gpl-2.0
ztotherad/nd7
arch/arm/mach-exynos/kona-input.c
122
11117
/* * arch/arm/mach-exynos/p4-input.c * * Copyright (c) 2012 Samsung Electronics Co., Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/i2c.h> #include <linux/err.h> #include <linux/gpio.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <plat/gpio-cfg.h> #include <plat/iic.h> #include <linux/regulator/consumer.h> static u32 hw_rev; #if defined(CONFIG_TOUCHSCREEN_SYNAPTICS_S7301) #include <linux/synaptics_s7301.h> static bool have_tsp_ldo; static struct charger_callbacks *charger_callbacks; void synaptics_ts_charger_infom(bool en) { if (charger_callbacks && charger_callbacks->inform_charger) charger_callbacks->inform_charger(charger_callbacks, en); } static void synaptics_ts_register_callback(struct charger_callbacks *cb) { charger_callbacks = cb; printk(KERN_DEBUG "[TSP] %s\n", __func__); } static int synaptics_ts_set_power(bool en) { struct regulator *regulator; if (!have_tsp_ldo) return -1; printk(KERN_DEBUG "[TSP] %s(%d)\n", __func__, en); regulator = regulator_get(NULL, "tsp_3.3v"); if (IS_ERR(regulator)) return PTR_ERR(regulator); if (en) { s3c_gpio_cfgpin(GPIO_TSP_SDA_18V, S3C_GPIO_SFN(0x3)); s3c_gpio_setpull(GPIO_TSP_SDA_18V, S3C_GPIO_PULL_UP); s3c_gpio_cfgpin(GPIO_TSP_SCL_18V, S3C_GPIO_SFN(0x3)); s3c_gpio_setpull(GPIO_TSP_SCL_18V, S3C_GPIO_PULL_UP); s3c_gpio_cfgpin(GPIO_TSP_LDO_ON, S3C_GPIO_OUTPUT); s3c_gpio_setpull(GPIO_TSP_LDO_ON, S3C_GPIO_PULL_NONE); if (regulator_is_enabled(regulator)) { printk(KERN_DEBUG "[TSP] regulator force disabled before enabling\n"); regulator_force_disable(regulator); msleep(100); } regulator_enable(regulator); gpio_set_value(GPIO_TSP_LDO_ON, 1); s3c_gpio_setpull(GPIO_TSP_INT, S3C_GPIO_PULL_NONE); s3c_gpio_cfgpin(GPIO_TSP_INT, S3C_GPIO_SFN(0xf)); } else { s3c_gpio_cfgpin(GPIO_TSP_SDA_18V, S3C_GPIO_OUTPUT); s3c_gpio_setpull(GPIO_TSP_SDA_18V, S3C_GPIO_PULL_NONE); gpio_set_value(GPIO_TSP_SDA_18V, 0); s3c_gpio_cfgpin(GPIO_TSP_SCL_18V, S3C_GPIO_OUTPUT); s3c_gpio_setpull(GPIO_TSP_SCL_18V, S3C_GPIO_PULL_NONE); gpio_set_value(GPIO_TSP_SCL_18V, 0); s3c_gpio_cfgpin(GPIO_TSP_INT, S3C_GPIO_OUTPUT); s3c_gpio_setpull(GPIO_TSP_INT, S3C_GPIO_PULL_NONE); gpio_set_value(GPIO_TSP_INT, 0); s3c_gpio_cfgpin(GPIO_TSP_LDO_ON, S3C_GPIO_OUTPUT); s3c_gpio_setpull(GPIO_TSP_LDO_ON, S3C_GPIO_PULL_NONE); gpio_set_value(GPIO_TSP_LDO_ON, 0); if (regulator_is_enabled(regulator)) regulator_disable(regulator); } regulator_put(regulator); return 0; } static void synaptics_ts_reset(void) { printk(KERN_DEBUG "[TSP] %s\n", __func__); synaptics_ts_set_power(false); msleep(100); synaptics_ts_set_power(true); msleep(200); } #if defined(CONFIG_TOUCHSCREEN_SYNAPTICS_S7301_KEYLED) static void synaptics_ts_led_control(int on_off) { printk(KERN_DEBUG "[TSP] %s [%d]\n", __func__, on_off); if (hw_rev < 1) return ; if (on_off == 1) gpio_direction_output(GPIO_TSP_2TOUCH_EN, 1); else gpio_direction_output(GPIO_TSP_2TOUCH_EN, 0); } #endif #if defined(CONFIG_TOUCHSCREEN_SYNAPTICS_S7301_KEYS) static u8 synaptics_button_codes[] = {KEY_MENU, KEY_BACK}; static u8 synaptics_extend_button_codes[] = {KEY_DUMMY_1, KEY_MENU, KEY_DUMMY_2, KEY_BACK, KEY_DUMMY_3}; static struct synaptics_button_map synpatics_button_map = { .nbuttons = ARRAY_SIZE(synaptics_button_codes), .map = synaptics_button_codes, }; static struct synaptics_extend_button_map synptics_extend_button_map = { .nbuttons = ARRAY_SIZE(synaptics_extend_button_codes), .map = synaptics_extend_button_codes, .button_mask = BUTTON_0_MASK | BUTTON_2_MASK | BUTTON_4_MASK, }; #endif static struct synaptics_platform_data synaptics_ts_pdata = { .gpio_attn = GPIO_TSP_INT, .max_x = 799, .max_y = 1279, .max_pressure = 255, .max_width = 100, .x_line = 26, .y_line = 41, .swap_xy = false, .invert_x = false, .invert_y = false, #if defined(CONFIG_SEC_TOUCHSCREEN_SURFACE_TOUCH) .palm_threshold = 28, #endif .set_power = synaptics_ts_set_power, .hw_reset = synaptics_ts_reset, .register_cb = synaptics_ts_register_callback, #if defined(CONFIG_TOUCHSCREEN_SYNAPTICS_S7301_KEYLED) .led_control = synaptics_ts_led_control, .led_event = false, #endif #if defined(CONFIG_TOUCHSCREEN_SYNAPTICS_S7301_KEYS) .button_map = &synpatics_button_map, .extend_button_map = &synptics_extend_button_map, .support_extend_button = false, .enable_extend_button_event = false, #endif }; static struct i2c_board_info i2c_synaptics[] __initdata = { { I2C_BOARD_INFO(SYNAPTICS_TS_NAME, SYNAPTICS_TS_ADDR), .platform_data = &synaptics_ts_pdata, }, }; #elif defined(CONFIG_RMI4_I2C) #include <linux/interrupt.h> #include <linux/rmi4.h> #include <linux/input.h> #define TOUCH_ON 1 #define TOUCH_OFF 0 #define RMI4_DEFAULT_ATTN_GPIO GPIO_TSP_INT #define RMI4_DEFAULT_ATTN_NAME "TSP_INT" struct syna_gpio_data { u16 gpio_number; char *gpio_name; }; static bool have_tsp_ldo; static struct syna_gpio_data rmi4_default_gpio_data = { .gpio_number = RMI4_DEFAULT_ATTN_GPIO, .gpio_name = RMI4_DEFAULT_ATTN_NAME, }; #define SYNA_ADDR 0x20 static unsigned char SYNA_f1a_button_codes[] = {KEY_DUMMY_1, KEY_MENU, KEY_DUMMY_2, KEY_BACK, KEY_DUMMY_3}; static struct rmi_button_map SYNA_f1a_button_map = { .nbuttons = ARRAY_SIZE(SYNA_f1a_button_codes), .map = SYNA_f1a_button_codes, }; static int SYNA_ts_power(bool on_off) { struct regulator *regulator; if (!have_tsp_ldo) return -1; printk(KERN_DEBUG "[TSP] %s(%d)\n", __func__, on_off); regulator = regulator_get(NULL, "tsp_3.3v"); if (IS_ERR(regulator)) return PTR_ERR(regulator); if (on_off) { s3c_gpio_cfgpin(GPIO_TSP_SDA_18V, S3C_GPIO_SFN(0x3)); s3c_gpio_setpull(GPIO_TSP_SDA_18V, S3C_GPIO_PULL_UP); s3c_gpio_cfgpin(GPIO_TSP_SCL_18V, S3C_GPIO_SFN(0x3)); s3c_gpio_setpull(GPIO_TSP_SCL_18V, S3C_GPIO_PULL_UP); s3c_gpio_cfgpin(GPIO_TSP_LDO_ON, S3C_GPIO_OUTPUT); s3c_gpio_setpull(GPIO_TSP_LDO_ON, S3C_GPIO_PULL_NONE); gpio_set_value(GPIO_TSP_LDO_ON, 1); regulator_enable(regulator); s3c_gpio_setpull(GPIO_TSP_INT, S3C_GPIO_PULL_NONE); s3c_gpio_cfgpin(GPIO_TSP_INT, S3C_GPIO_SFN(0xf)); } else { s3c_gpio_cfgpin(GPIO_TSP_SDA_18V, S3C_GPIO_OUTPUT); s3c_gpio_setpull(GPIO_TSP_SDA_18V, S3C_GPIO_PULL_NONE); gpio_set_value(GPIO_TSP_SDA_18V, 0); s3c_gpio_cfgpin(GPIO_TSP_SCL_18V, S3C_GPIO_OUTPUT); s3c_gpio_setpull(GPIO_TSP_SCL_18V, S3C_GPIO_PULL_NONE); gpio_set_value(GPIO_TSP_SCL_18V, 0); s3c_gpio_cfgpin(GPIO_TSP_INT, S3C_GPIO_OUTPUT); s3c_gpio_setpull(GPIO_TSP_INT, S3C_GPIO_PULL_NONE); gpio_set_value(GPIO_TSP_INT, 0); s3c_gpio_cfgpin(GPIO_TSP_LDO_ON, S3C_GPIO_OUTPUT); s3c_gpio_setpull(GPIO_TSP_LDO_ON, S3C_GPIO_PULL_NONE); gpio_set_value(GPIO_TSP_LDO_ON, 0); if (regulator_is_enabled(regulator)) regulator_disable(regulator); } regulator_put(regulator); msleep(200); return 0; } static int synaptics_touchpad_gpio_setup(void *gpio_data, bool configure) { return SYNA_ts_power(configure); } int SYNA_post_suspend(void *pm_data) { pr_info("%s: RMI4 callback.\n", __func__); return SYNA_ts_power(TOUCH_OFF); } int SYNA_pre_resume(void *pm_data) { pr_info("%s: RMI4 callback.\n", __func__); return SYNA_ts_power(TOUCH_ON); } static struct rmi_device_platform_data SYNA_platformdata = { .sensor_name = "s7301", .attn_gpio = RMI4_DEFAULT_ATTN_GPIO, .attn_polarity = RMI_ATTN_ACTIVE_LOW, .gpio_data = &rmi4_default_gpio_data, .gpio_config = synaptics_touchpad_gpio_setup, .f1a_button_map = &SYNA_f1a_button_map, // .reset_delay_ms = 200, #ifdef CONFIG_PM .post_suspend = SYNA_post_suspend, .pre_resume = SYNA_pre_resume, #endif #ifdef CONFIG_RMI4_FWLIB .firmware_name = "KONA-E036", #endif }; static struct i2c_board_info __initdata i2c_synaptics[] = { { I2C_BOARD_INFO("rmi_i2c", SYNA_ADDR), .platform_data = &SYNA_platformdata, }, }; #endif /* CONFIG_RMI4_I2C */ void __init kona_tsp_init(u32 system_rev) { int gpio = 0, irq = 0, err = 0; hw_rev = system_rev; printk(KERN_DEBUG "[TSP] %s rev : %u\n", __func__, hw_rev); gpio = GPIO_TSP_LDO_ON; gpio_request(gpio, "TSP_LDO_ON"); gpio_direction_output(gpio, 0); gpio_export(gpio, 0); have_tsp_ldo = true; gpio = GPIO_TSP_INT; gpio_request(gpio, "TSP_INT"); s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(0xf)); s3c_gpio_setpull(gpio, S3C_GPIO_PULL_UP); s5p_register_gpio_interrupt(gpio); irq = gpio_to_irq(gpio); #ifdef CONFIG_S3C_DEV_I2C3 s3c_i2c3_set_platdata(NULL); i2c_synaptics[0].irq = irq; i2c_register_board_info(3, i2c_synaptics, ARRAY_SIZE(i2c_synaptics)); #endif /* CONFIG_S3C_DEV_I2C3 */ #if defined(CONFIG_MACH_KONA_EUR_OPEN) #if defined(CONFIG_TOUCHSCREEN_SYNAPTICS_S7301_KEYS) /* rev01 touch button0 & button1 position change */ if (system_rev == 1) { synaptics_ts_pdata.button_map->map[0] = KEY_BACK; synaptics_ts_pdata.button_map->map[1] = KEY_MENU; } #endif #endif #if defined(CONFIG_TOUCHSCREEN_SYNAPTICS_S7301_KEYLED) if (system_rev > 0) { synaptics_ts_pdata.led_event = true; err = gpio_request(GPIO_TSP_2TOUCH_EN, "GPIO_TSP_2TOUCH_EN"); if (err) printk(KERN_DEBUG "%s gpio_request error\n", __func__); else { s3c_gpio_cfgpin(GPIO_TSP_2TOUCH_EN, S3C_GPIO_OUTPUT); s3c_gpio_setpull(GPIO_TSP_2TOUCH_EN, S3C_GPIO_PULL_NONE); gpio_set_value(GPIO_TSP_2TOUCH_EN, 0); } } /* * button changed 2button -> 5button * KONA 3G, WIFI: gpio >= 3 * KONA LTE : gpio >=2 */ #if defined(CONFIG_MACH_KONA_EUR_LTE) || \ defined(CONFIG_MACH_KONALTE_USA_ATT) if (system_rev >= 2) { #else if (system_rev >= 3) { #endif synaptics_ts_pdata.support_extend_button = true; synaptics_ts_pdata.enable_extend_button_event = true; } #endif } #if defined(CONFIG_KEYBOARD_GPIO) #include <mach/sec_debug.h> #include <linux/gpio_keys.h> #define GPIO_KEYS(_code, _gpio, _active_low, _iswake, _hook) \ { \ .code = _code, \ .gpio = _gpio, \ .active_low = _active_low, \ .type = EV_KEY, \ .wakeup = _iswake, \ .debounce_interval = 10, \ .isr_hook = _hook, \ .value = 1 \ } struct gpio_keys_button kona_buttons[] = { GPIO_KEYS(KEY_VOLUMEUP, GPIO_VOL_UP, 1, 1, sec_debug_check_crash_key), GPIO_KEYS(KEY_VOLUMEDOWN, GPIO_VOL_DOWN, 1, 1, sec_debug_check_crash_key), GPIO_KEYS(KEY_POWER, GPIO_nPOWER, 1, 1, sec_debug_check_crash_key), GPIO_KEYS(KEY_HOMEPAGE, GPIO_OK_KEY_ANDROID, 1, 1, sec_debug_check_crash_key), { .code = SW_FLIP, .gpio = GPIO_HALL_SENSOR_INT, .active_low = 0, .type = EV_SW, .wakeup = 1, .debounce_interval = 10, .value = 1, .isr_hook = sec_debug_check_crash_key, }, }; struct gpio_keys_platform_data kona_gpiokeys_platform_data = { kona_buttons, ARRAY_SIZE(kona_buttons), }; static struct platform_device kona_keypad = { .name = "gpio-keys", .dev = { .platform_data = &kona_gpiokeys_platform_data, }, }; #endif void __init kona_key_init(void) { #if defined(CONFIG_KEYBOARD_GPIO) platform_device_register(&kona_keypad); #endif }
gpl-2.0
hzc1126/dell_gallo_kernel_ics
arch/mn10300/kernel/signal.c
122
15032
/* MN10300 Signal handling * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #include <linux/sched.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/kernel.h> #include <linux/signal.h> #include <linux/errno.h> #include <linux/wait.h> #include <linux/ptrace.h> #include <linux/unistd.h> #include <linux/stddef.h> #include <linux/tty.h> #include <linux/personality.h> #include <linux/suspend.h> #include <linux/tracehook.h> #include <asm/cacheflush.h> #include <asm/ucontext.h> #include <asm/uaccess.h> #include <asm/fpu.h> #include "sigframe.h" #define DEBUG_SIG 0 #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) /* * atomically swap in the new signal mask, and wait for a signal. */ asmlinkage long sys_sigsuspend(int history0, int history1, old_sigset_t mask) { mask &= _BLOCKABLE; spin_lock_irq(&current->sighand->siglock); current->saved_sigmask = current->blocked; siginitset(&current->blocked, mask); recalc_sigpending(); spin_unlock_irq(&current->sighand->siglock); current->state = TASK_INTERRUPTIBLE; schedule(); set_thread_flag(TIF_RESTORE_SIGMASK); return -ERESTARTNOHAND; } /* * set signal action syscall */ asmlinkage long sys_sigaction(int sig, const struct old_sigaction __user *act, struct old_sigaction __user *oact) { struct k_sigaction new_ka, old_ka; int ret; if (act) { old_sigset_t mask; if (verify_area(VERIFY_READ, act, sizeof(*act)) || __get_user(new_ka.sa.sa_handler, &act->sa_handler) || __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) || __get_user(new_ka.sa.sa_flags, &act->sa_flags) || __get_user(mask, &act->sa_mask)) return -EFAULT; siginitset(&new_ka.sa.sa_mask, mask); } ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); if (!ret && oact) { if (verify_area(VERIFY_WRITE, oact, sizeof(*oact)) || __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) || __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) return -EFAULT; } return ret; } /* * set alternate signal stack syscall */ asmlinkage long sys_sigaltstack(const stack_t __user *uss, stack_t *uoss) { return do_sigaltstack(uss, uoss, __frame->sp); } /* * do a signal return; undo the signal stack. */ static int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, long *_d0) { unsigned int err = 0; /* Always make any pending restarted system calls return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; if (is_using_fpu(current)) fpu_kill_state(current); #define COPY(x) err |= __get_user(regs->x, &sc->x) COPY(d1); COPY(d2); COPY(d3); COPY(a0); COPY(a1); COPY(a2); COPY(a3); COPY(e0); COPY(e1); COPY(e2); COPY(e3); COPY(e4); COPY(e5); COPY(e6); COPY(e7); COPY(lar); COPY(lir); COPY(mdr); COPY(mdrq); COPY(mcvf); COPY(mcrl); COPY(mcrh); COPY(sp); COPY(pc); #undef COPY { unsigned int tmpflags; #ifndef CONFIG_MN10300_USING_JTAG #define USER_EPSW (EPSW_FLAG_Z | EPSW_FLAG_N | EPSW_FLAG_C | EPSW_FLAG_V | \ EPSW_T | EPSW_nAR) #else #define USER_EPSW (EPSW_FLAG_Z | EPSW_FLAG_N | EPSW_FLAG_C | EPSW_FLAG_V | \ EPSW_nAR) #endif err |= __get_user(tmpflags, &sc->epsw); regs->epsw = (regs->epsw & ~USER_EPSW) | (tmpflags & USER_EPSW); regs->orig_d0 = -1; /* disable syscall checks */ } { struct fpucontext *buf; err |= __get_user(buf, &sc->fpucontext); if (buf) { if (verify_area(VERIFY_READ, buf, sizeof(*buf))) goto badframe; err |= fpu_restore_sigcontext(buf); } } err |= __get_user(*_d0, &sc->d0); return err; badframe: return 1; } /* * standard signal return syscall */ asmlinkage long sys_sigreturn(void) { struct sigframe __user *frame = (struct sigframe __user *) __frame->sp; sigset_t set; long d0; if (verify_area(VERIFY_READ, frame, sizeof(*frame))) goto badframe; if (__get_user(set.sig[0], &frame->sc.oldmask)) goto badframe; if (_NSIG_WORDS > 1 && __copy_from_user(&set.sig[1], &frame->extramask, sizeof(frame->extramask))) goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); spin_lock_irq(&current->sighand->siglock); current->blocked = set; recalc_sigpending(); spin_unlock_irq(&current->sighand->siglock); if (restore_sigcontext(__frame, &frame->sc, &d0)) goto badframe; return d0; badframe: force_sig(SIGSEGV, current); return 0; } /* * realtime signal return syscall */ asmlinkage long sys_rt_sigreturn(void) { struct rt_sigframe __user *frame = (struct rt_sigframe __user *) __frame->sp; sigset_t set; unsigned long d0; if (verify_area(VERIFY_READ, frame, sizeof(*frame))) goto badframe; if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); spin_lock_irq(&current->sighand->siglock); current->blocked = set; recalc_sigpending(); spin_unlock_irq(&current->sighand->siglock); if (restore_sigcontext(__frame, &frame->uc.uc_mcontext, &d0)) goto badframe; if (do_sigaltstack(&frame->uc.uc_stack, NULL, __frame->sp) == -EFAULT) goto badframe; return d0; badframe: force_sig(SIGSEGV, current); return 0; } /* * store the userspace context into a signal frame */ static int setup_sigcontext(struct sigcontext __user *sc, struct fpucontext *fpuctx, struct pt_regs *regs, unsigned long mask) { int tmp, err = 0; #define COPY(x) err |= __put_user(regs->x, &sc->x) COPY(d0); COPY(d1); COPY(d2); COPY(d3); COPY(a0); COPY(a1); COPY(a2); COPY(a3); COPY(e0); COPY(e1); COPY(e2); COPY(e3); COPY(e4); COPY(e5); COPY(e6); COPY(e7); COPY(lar); COPY(lir); COPY(mdr); COPY(mdrq); COPY(mcvf); COPY(mcrl); COPY(mcrh); COPY(sp); COPY(epsw); COPY(pc); #undef COPY tmp = fpu_setup_sigcontext(fpuctx); if (tmp < 0) err = 1; else err |= __put_user(tmp ? fpuctx : NULL, &sc->fpucontext); /* non-iBCS2 extensions.. */ err |= __put_user(mask, &sc->oldmask); return err; } /* * determine which stack to use.. */ static inline void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size) { unsigned long sp; /* default to using normal stack */ sp = regs->sp; /* this is the X/Open sanctioned signal stack switching. */ if (ka->sa.sa_flags & SA_ONSTACK) { if (sas_ss_flags(sp) == 0) sp = current->sas_ss_sp + current->sas_ss_size; } return (void __user *) ((sp - frame_size) & ~7UL); } /* * set up a normal signal frame */ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, struct pt_regs *regs) { struct sigframe __user *frame; int rsig; frame = get_sigframe(ka, regs, sizeof(*frame)); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) goto give_sigsegv; rsig = sig; if (sig < 32 && current_thread_info()->exec_domain && current_thread_info()->exec_domain->signal_invmap) rsig = current_thread_info()->exec_domain->signal_invmap[sig]; if (__put_user(rsig, &frame->sig) < 0 || __put_user(&frame->sc, &frame->psc) < 0) goto give_sigsegv; if (setup_sigcontext(&frame->sc, &frame->fpuctx, regs, set->sig[0])) goto give_sigsegv; if (_NSIG_WORDS > 1) { if (__copy_to_user(frame->extramask, &set->sig[1], sizeof(frame->extramask))) goto give_sigsegv; } /* set up to return from userspace. If provided, use a stub already in * userspace */ if (ka->sa.sa_flags & SA_RESTORER) { if (__put_user(ka->sa.sa_restorer, &frame->pretcode)) goto give_sigsegv; } else { if (__put_user((void (*)(void))frame->retcode, &frame->pretcode)) goto give_sigsegv; /* this is mov $,d0; syscall 0 */ if (__put_user(0x2c, (char *)(frame->retcode + 0)) || __put_user(__NR_sigreturn, (char *)(frame->retcode + 1)) || __put_user(0x00, (char *)(frame->retcode + 2)) || __put_user(0xf0, (char *)(frame->retcode + 3)) || __put_user(0xe0, (char *)(frame->retcode + 4))) goto give_sigsegv; flush_icache_range((unsigned long) frame->retcode, (unsigned long) frame->retcode + 5); } /* set up registers for signal handler */ regs->sp = (unsigned long) frame; regs->pc = (unsigned long) ka->sa.sa_handler; regs->d0 = sig; regs->d1 = (unsigned long) &frame->sc; /* the tracer may want to single-step inside the handler */ if (test_thread_flag(TIF_SINGLESTEP)) ptrace_notify(SIGTRAP); #if DEBUG_SIG printk(KERN_DEBUG "SIG deliver %d (%s:%d): sp=%p pc=%lx ra=%p\n", sig, current->comm, current->pid, frame, regs->pc, frame->pretcode); #endif return 0; give_sigsegv: force_sigsegv(sig, current); return -EFAULT; } /* * set up a realtime signal frame */ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, struct pt_regs *regs) { struct rt_sigframe __user *frame; int rsig; frame = get_sigframe(ka, regs, sizeof(*frame)); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) goto give_sigsegv; rsig = sig; if (sig < 32 && current_thread_info()->exec_domain && current_thread_info()->exec_domain->signal_invmap) rsig = current_thread_info()->exec_domain->signal_invmap[sig]; if (__put_user(rsig, &frame->sig) || __put_user(&frame->info, &frame->pinfo) || __put_user(&frame->uc, &frame->puc) || copy_siginfo_to_user(&frame->info, info)) goto give_sigsegv; /* create the ucontext. */ if (__put_user(0, &frame->uc.uc_flags) || __put_user(0, &frame->uc.uc_link) || __put_user((void *)current->sas_ss_sp, &frame->uc.uc_stack.ss_sp) || __put_user(sas_ss_flags(regs->sp), &frame->uc.uc_stack.ss_flags) || __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size) || setup_sigcontext(&frame->uc.uc_mcontext, &frame->fpuctx, regs, set->sig[0]) || __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set))) goto give_sigsegv; /* set up to return from userspace. If provided, use a stub already in * userspace */ if (ka->sa.sa_flags & SA_RESTORER) { if (__put_user(ka->sa.sa_restorer, &frame->pretcode)) goto give_sigsegv; } else { if (__put_user((void(*)(void))frame->retcode, &frame->pretcode) || /* This is mov $,d0; syscall 0 */ __put_user(0x2c, (char *)(frame->retcode + 0)) || __put_user(__NR_rt_sigreturn, (char *)(frame->retcode + 1)) || __put_user(0x00, (char *)(frame->retcode + 2)) || __put_user(0xf0, (char *)(frame->retcode + 3)) || __put_user(0xe0, (char *)(frame->retcode + 4))) goto give_sigsegv; flush_icache_range((u_long) frame->retcode, (u_long) frame->retcode + 5); } /* Set up registers for signal handler */ regs->sp = (unsigned long) frame; regs->pc = (unsigned long) ka->sa.sa_handler; regs->d0 = sig; regs->d1 = (long) &frame->info; /* the tracer may want to single-step inside the handler */ if (test_thread_flag(TIF_SINGLESTEP)) ptrace_notify(SIGTRAP); #if DEBUG_SIG printk(KERN_DEBUG "SIG deliver %d (%s:%d): sp=%p pc=%lx ra=%p\n", sig, current->comm, current->pid, frame, regs->pc, frame->pretcode); #endif return 0; give_sigsegv: force_sigsegv(sig, current); return -EFAULT; } static inline void stepback(struct pt_regs *regs) { regs->pc -= 2; regs->orig_d0 = -1; } /* * handle the actual delivery of a signal to userspace */ static int handle_signal(int sig, siginfo_t *info, struct k_sigaction *ka, sigset_t *oldset, struct pt_regs *regs) { int ret; /* Are we from a system call? */ if (regs->orig_d0 >= 0) { /* If so, check system call restarting.. */ switch (regs->d0) { case -ERESTART_RESTARTBLOCK: case -ERESTARTNOHAND: regs->d0 = -EINTR; break; case -ERESTARTSYS: if (!(ka->sa.sa_flags & SA_RESTART)) { regs->d0 = -EINTR; break; } /* fallthrough */ case -ERESTARTNOINTR: regs->d0 = regs->orig_d0; stepback(regs); } } /* Set up the stack frame */ if (ka->sa.sa_flags & SA_SIGINFO) ret = setup_rt_frame(sig, ka, info, oldset, regs); else ret = setup_frame(sig, ka, oldset, regs); if (ret == 0) { spin_lock_irq(&current->sighand->siglock); sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask); if (!(ka->sa.sa_flags & SA_NODEFER)) sigaddset(&current->blocked, sig); recalc_sigpending(); spin_unlock_irq(&current->sighand->siglock); } return ret; } /* * handle a potential signal */ static void do_signal(struct pt_regs *regs) { struct k_sigaction ka; siginfo_t info; sigset_t *oldset; int signr; /* we want the common case to go fast, which is why we may in certain * cases get here from kernel mode */ if (!user_mode(regs)) return; if (test_thread_flag(TIF_RESTORE_SIGMASK)) oldset = &current->saved_sigmask; else oldset = &current->blocked; signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { if (handle_signal(signr, &info, &ka, oldset, regs) == 0) { /* a signal was successfully delivered; the saved * sigmask will have been stored in the signal frame, * and will be restored by sigreturn, so we can simply * clear the TIF_RESTORE_SIGMASK flag */ if (test_thread_flag(TIF_RESTORE_SIGMASK)) clear_thread_flag(TIF_RESTORE_SIGMASK); tracehook_signal_handler(signr, &info, &ka, regs, test_thread_flag(TIF_SINGLESTEP)); } return; } /* did we come from a system call? */ if (regs->orig_d0 >= 0) { /* restart the system call - no handlers present */ switch (regs->d0) { case -ERESTARTNOHAND: case -ERESTARTSYS: case -ERESTARTNOINTR: regs->d0 = regs->orig_d0; stepback(regs); break; case -ERESTART_RESTARTBLOCK: regs->d0 = __NR_restart_syscall; stepback(regs); break; } } /* if there's no signal to deliver, we just put the saved sigmask * back */ if (test_thread_flag(TIF_RESTORE_SIGMASK)) { clear_thread_flag(TIF_RESTORE_SIGMASK); sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL); } } /* * notification of userspace execution resumption * - triggered by current->work.notify_resume */ asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags) { /* Pending single-step? */ if (thread_info_flags & _TIF_SINGLESTEP) { #ifndef CONFIG_MN10300_USING_JTAG regs->epsw |= EPSW_T; clear_thread_flag(TIF_SINGLESTEP); #else BUG(); /* no h/w single-step if using JTAG unit */ #endif } /* deal with pending signal delivery */ if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) do_signal(regs); if (thread_info_flags & _TIF_NOTIFY_RESUME) { clear_thread_flag(TIF_NOTIFY_RESUME); tracehook_notify_resume(__frame); if (current->replacement_session_keyring) key_replace_session_keyring(); } }
gpl-2.0
nics21212/android_kernel_samsung_msm8660-common
net/ipv4/ipmr.c
378
59095
/* * IP multicast routing support for mrouted 3.6/3.8 * * (c) 1995 Alan Cox, <alan@lxorguk.ukuu.org.uk> * Linux Consultancy and Custom Driver Development * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Fixes: * Michael Chastain : Incorrect size of copying. * Alan Cox : Added the cache manager code * Alan Cox : Fixed the clone/copy bug and device race. * Mike McLagan : Routing by source * Malcolm Beattie : Buffer handling fixes. * Alexey Kuznetsov : Double buffer free and other fixes. * SVR Anand : Fixed several multicast bugs and problems. * Alexey Kuznetsov : Status, optimisations and more. * Brad Parker : Better behaviour on mrouted upcall * overflow. * Carlos Picoto : PIMv1 Support * Pavlin Ivanov Radoslavov: PIMv2 Registers must checksum only PIM header * Relax this requirement to work with older peers. * */ #include <asm/system.h> #include <asm/uaccess.h> #include <linux/types.h> #include <linux/capability.h> #include <linux/errno.h> #include <linux/timer.h> #include <linux/mm.h> #include <linux/kernel.h> #include <linux/fcntl.h> #include <linux/stat.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/inetdevice.h> #include <linux/igmp.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/mroute.h> #include <linux/init.h> #include <linux/if_ether.h> #include <linux/slab.h> #include <net/net_namespace.h> #include <net/ip.h> #include <net/protocol.h> #include <linux/skbuff.h> #include <net/route.h> #include <net/sock.h> #include <net/icmp.h> #include <net/udp.h> #include <net/raw.h> #include <linux/notifier.h> #include <linux/if_arp.h> #include <linux/netfilter_ipv4.h> #include <linux/compat.h> #include <net/ipip.h> #include <net/checksum.h> #include <net/netlink.h> #include <net/fib_rules.h> #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2) #define CONFIG_IP_PIMSM 1 #endif struct mr_table { struct list_head list; #ifdef CONFIG_NET_NS struct net *net; #endif u32 id; struct sock __rcu *mroute_sk; struct timer_list ipmr_expire_timer; struct list_head mfc_unres_queue; struct list_head mfc_cache_array[MFC_LINES]; struct vif_device vif_table[MAXVIFS]; int maxvif; atomic_t cache_resolve_queue_len; int mroute_do_assert; int mroute_do_pim; #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2) int mroute_reg_vif_num; #endif }; struct ipmr_rule { struct fib_rule common; }; struct ipmr_result { struct mr_table *mrt; }; /* Big lock, protecting vif table, mrt cache and mroute socket state. * Note that the changes are semaphored via rtnl_lock. */ static DEFINE_RWLOCK(mrt_lock); /* * Multicast router control variables */ #define VIF_EXISTS(_mrt, _idx) ((_mrt)->vif_table[_idx].dev != NULL) /* Special spinlock for queue of unresolved entries */ static DEFINE_SPINLOCK(mfc_unres_lock); /* We return to original Alan's scheme. Hash table of resolved * entries is changed only in process context and protected * with weak lock mrt_lock. Queue of unresolved entries is protected * with strong spinlock mfc_unres_lock. * * In this case data path is free of exclusive locks at all. */ static struct kmem_cache *mrt_cachep __read_mostly; static struct mr_table *ipmr_new_table(struct net *net, u32 id); static void ipmr_free_table(struct mr_table *mrt); static int ip_mr_forward(struct net *net, struct mr_table *mrt, struct sk_buff *skb, struct mfc_cache *cache, int local); static int ipmr_cache_report(struct mr_table *mrt, struct sk_buff *pkt, vifi_t vifi, int assert); static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm); static void mroute_clean_tables(struct mr_table *mrt); static void ipmr_expire_process(unsigned long arg); #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES #define ipmr_for_each_table(mrt, net) \ list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list) static struct mr_table *ipmr_get_table(struct net *net, u32 id) { struct mr_table *mrt; ipmr_for_each_table(mrt, net) { if (mrt->id == id) return mrt; } return NULL; } static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4, struct mr_table **mrt) { struct ipmr_result res; struct fib_lookup_arg arg = { .result = &res, }; int err; err = fib_rules_lookup(net->ipv4.mr_rules_ops, flowi4_to_flowi(flp4), 0, &arg); if (err < 0) return err; *mrt = res.mrt; return 0; } static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp, int flags, struct fib_lookup_arg *arg) { struct ipmr_result *res = arg->result; struct mr_table *mrt; switch (rule->action) { case FR_ACT_TO_TBL: break; case FR_ACT_UNREACHABLE: return -ENETUNREACH; case FR_ACT_PROHIBIT: return -EACCES; case FR_ACT_BLACKHOLE: default: return -EINVAL; } mrt = ipmr_get_table(rule->fr_net, rule->table); if (mrt == NULL) return -EAGAIN; res->mrt = mrt; return 0; } static int ipmr_rule_match(struct fib_rule *rule, struct flowi *fl, int flags) { return 1; } static const struct nla_policy ipmr_rule_policy[FRA_MAX + 1] = { FRA_GENERIC_POLICY, }; static int ipmr_rule_configure(struct fib_rule *rule, struct sk_buff *skb, struct fib_rule_hdr *frh, struct nlattr **tb) { return 0; } static int ipmr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh, struct nlattr **tb) { return 1; } static int ipmr_rule_fill(struct fib_rule *rule, struct sk_buff *skb, struct fib_rule_hdr *frh) { frh->dst_len = 0; frh->src_len = 0; frh->tos = 0; return 0; } static const struct fib_rules_ops __net_initdata ipmr_rules_ops_template = { .family = RTNL_FAMILY_IPMR, .rule_size = sizeof(struct ipmr_rule), .addr_size = sizeof(u32), .action = ipmr_rule_action, .match = ipmr_rule_match, .configure = ipmr_rule_configure, .compare = ipmr_rule_compare, .default_pref = fib_default_rule_pref, .fill = ipmr_rule_fill, .nlgroup = RTNLGRP_IPV4_RULE, .policy = ipmr_rule_policy, .owner = THIS_MODULE, }; static int __net_init ipmr_rules_init(struct net *net) { struct fib_rules_ops *ops; struct mr_table *mrt; int err; ops = fib_rules_register(&ipmr_rules_ops_template, net); if (IS_ERR(ops)) return PTR_ERR(ops); INIT_LIST_HEAD(&net->ipv4.mr_tables); mrt = ipmr_new_table(net, RT_TABLE_DEFAULT); if (mrt == NULL) { err = -ENOMEM; goto err1; } err = fib_default_rule_add(ops, 0x7fff, RT_TABLE_DEFAULT, 0); if (err < 0) goto err2; net->ipv4.mr_rules_ops = ops; return 0; err2: kfree(mrt); err1: fib_rules_unregister(ops); return err; } static void __net_exit ipmr_rules_exit(struct net *net) { struct mr_table *mrt, *next; list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) { list_del(&mrt->list); ipmr_free_table(mrt); } fib_rules_unregister(net->ipv4.mr_rules_ops); } #else #define ipmr_for_each_table(mrt, net) \ for (mrt = net->ipv4.mrt; mrt; mrt = NULL) static struct mr_table *ipmr_get_table(struct net *net, u32 id) { return net->ipv4.mrt; } static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4, struct mr_table **mrt) { *mrt = net->ipv4.mrt; return 0; } static int __net_init ipmr_rules_init(struct net *net) { net->ipv4.mrt = ipmr_new_table(net, RT_TABLE_DEFAULT); return net->ipv4.mrt ? 0 : -ENOMEM; } static void __net_exit ipmr_rules_exit(struct net *net) { ipmr_free_table(net->ipv4.mrt); } #endif static struct mr_table *ipmr_new_table(struct net *net, u32 id) { struct mr_table *mrt; unsigned int i; mrt = ipmr_get_table(net, id); if (mrt != NULL) return mrt; mrt = kzalloc(sizeof(*mrt), GFP_KERNEL); if (mrt == NULL) return NULL; write_pnet(&mrt->net, net); mrt->id = id; /* Forwarding cache */ for (i = 0; i < MFC_LINES; i++) INIT_LIST_HEAD(&mrt->mfc_cache_array[i]); INIT_LIST_HEAD(&mrt->mfc_unres_queue); setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process, (unsigned long)mrt); #ifdef CONFIG_IP_PIMSM mrt->mroute_reg_vif_num = -1; #endif #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES list_add_tail_rcu(&mrt->list, &net->ipv4.mr_tables); #endif return mrt; } static void ipmr_free_table(struct mr_table *mrt) { del_timer_sync(&mrt->ipmr_expire_timer); mroute_clean_tables(mrt); kfree(mrt); } /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */ static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v) { struct net *net = dev_net(dev); dev_close(dev); dev = __dev_get_by_name(net, "tunl0"); if (dev) { const struct net_device_ops *ops = dev->netdev_ops; struct ifreq ifr; struct ip_tunnel_parm p; memset(&p, 0, sizeof(p)); p.iph.daddr = v->vifc_rmt_addr.s_addr; p.iph.saddr = v->vifc_lcl_addr.s_addr; p.iph.version = 4; p.iph.ihl = 5; p.iph.protocol = IPPROTO_IPIP; sprintf(p.name, "dvmrp%d", v->vifc_vifi); ifr.ifr_ifru.ifru_data = (__force void __user *)&p; if (ops->ndo_do_ioctl) { mm_segment_t oldfs = get_fs(); set_fs(KERNEL_DS); ops->ndo_do_ioctl(dev, &ifr, SIOCDELTUNNEL); set_fs(oldfs); } } } static struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v) { struct net_device *dev; dev = __dev_get_by_name(net, "tunl0"); if (dev) { const struct net_device_ops *ops = dev->netdev_ops; int err; struct ifreq ifr; struct ip_tunnel_parm p; struct in_device *in_dev; memset(&p, 0, sizeof(p)); p.iph.daddr = v->vifc_rmt_addr.s_addr; p.iph.saddr = v->vifc_lcl_addr.s_addr; p.iph.version = 4; p.iph.ihl = 5; p.iph.protocol = IPPROTO_IPIP; sprintf(p.name, "dvmrp%d", v->vifc_vifi); ifr.ifr_ifru.ifru_data = (__force void __user *)&p; if (ops->ndo_do_ioctl) { mm_segment_t oldfs = get_fs(); set_fs(KERNEL_DS); err = ops->ndo_do_ioctl(dev, &ifr, SIOCADDTUNNEL); set_fs(oldfs); } else { err = -EOPNOTSUPP; } dev = NULL; if (err == 0 && (dev = __dev_get_by_name(net, p.name)) != NULL) { dev->flags |= IFF_MULTICAST; in_dev = __in_dev_get_rtnl(dev); if (in_dev == NULL) goto failure; ipv4_devconf_setall(in_dev); IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0; if (dev_open(dev)) goto failure; dev_hold(dev); } } return dev; failure: /* allow the register to be completed before unregistering. */ rtnl_unlock(); rtnl_lock(); unregister_netdevice(dev); return NULL; } #ifdef CONFIG_IP_PIMSM static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev) { struct net *net = dev_net(dev); struct mr_table *mrt; struct flowi4 fl4 = { .flowi4_oif = dev->ifindex, .flowi4_iif = skb->skb_iif, .flowi4_mark = skb->mark, }; int err; err = ipmr_fib_lookup(net, &fl4, &mrt); if (err < 0) { kfree_skb(skb); return err; } read_lock(&mrt_lock); dev->stats.tx_bytes += skb->len; dev->stats.tx_packets++; ipmr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, IGMPMSG_WHOLEPKT); read_unlock(&mrt_lock); kfree_skb(skb); return NETDEV_TX_OK; } static const struct net_device_ops reg_vif_netdev_ops = { .ndo_start_xmit = reg_vif_xmit, }; static void reg_vif_setup(struct net_device *dev) { dev->type = ARPHRD_PIMREG; dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8; dev->flags = IFF_NOARP; dev->netdev_ops = &reg_vif_netdev_ops, dev->destructor = free_netdev; dev->features |= NETIF_F_NETNS_LOCAL; } static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt) { struct net_device *dev; struct in_device *in_dev; char name[IFNAMSIZ]; if (mrt->id == RT_TABLE_DEFAULT) sprintf(name, "pimreg"); else sprintf(name, "pimreg%u", mrt->id); dev = alloc_netdev(0, name, reg_vif_setup); if (dev == NULL) return NULL; dev_net_set(dev, net); if (register_netdevice(dev)) { free_netdev(dev); return NULL; } dev->iflink = 0; rcu_read_lock(); in_dev = __in_dev_get_rcu(dev); if (!in_dev) { rcu_read_unlock(); goto failure; } ipv4_devconf_setall(in_dev); IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0; rcu_read_unlock(); if (dev_open(dev)) goto failure; dev_hold(dev); return dev; failure: /* allow the register to be completed before unregistering. */ rtnl_unlock(); rtnl_lock(); unregister_netdevice(dev); return NULL; } #endif /* * Delete a VIF entry * @notify: Set to 1, if the caller is a notifier_call */ static int vif_delete(struct mr_table *mrt, int vifi, int notify, struct list_head *head) { struct vif_device *v; struct net_device *dev; struct in_device *in_dev; if (vifi < 0 || vifi >= mrt->maxvif) return -EADDRNOTAVAIL; v = &mrt->vif_table[vifi]; write_lock_bh(&mrt_lock); dev = v->dev; v->dev = NULL; if (!dev) { write_unlock_bh(&mrt_lock); return -EADDRNOTAVAIL; } #ifdef CONFIG_IP_PIMSM if (vifi == mrt->mroute_reg_vif_num) mrt->mroute_reg_vif_num = -1; #endif if (vifi + 1 == mrt->maxvif) { int tmp; for (tmp = vifi - 1; tmp >= 0; tmp--) { if (VIF_EXISTS(mrt, tmp)) break; } mrt->maxvif = tmp+1; } write_unlock_bh(&mrt_lock); dev_set_allmulti(dev, -1); in_dev = __in_dev_get_rtnl(dev); if (in_dev) { IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)--; ip_rt_multicast_event(in_dev); } if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER) && !notify) unregister_netdevice_queue(dev, head); dev_put(dev); return 0; } static void ipmr_cache_free_rcu(struct rcu_head *head) { struct mfc_cache *c = container_of(head, struct mfc_cache, rcu); kmem_cache_free(mrt_cachep, c); } static inline void ipmr_cache_free(struct mfc_cache *c) { call_rcu(&c->rcu, ipmr_cache_free_rcu); } /* Destroy an unresolved cache entry, killing queued skbs * and reporting error to netlink readers. */ static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c) { struct net *net = read_pnet(&mrt->net); struct sk_buff *skb; struct nlmsgerr *e; atomic_dec(&mrt->cache_resolve_queue_len); while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) { if (ip_hdr(skb)->version == 0) { struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr)); nlh->nlmsg_type = NLMSG_ERROR; nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr)); skb_trim(skb, nlh->nlmsg_len); e = NLMSG_DATA(nlh); e->error = -ETIMEDOUT; memset(&e->msg, 0, sizeof(e->msg)); rtnl_unicast(skb, net, NETLINK_CB(skb).pid); } else { kfree_skb(skb); } } ipmr_cache_free(c); } /* Timer process for the unresolved queue. */ static void ipmr_expire_process(unsigned long arg) { struct mr_table *mrt = (struct mr_table *)arg; unsigned long now; unsigned long expires; struct mfc_cache *c, *next; if (!spin_trylock(&mfc_unres_lock)) { mod_timer(&mrt->ipmr_expire_timer, jiffies+HZ/10); return; } if (list_empty(&mrt->mfc_unres_queue)) goto out; now = jiffies; expires = 10*HZ; list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) { if (time_after(c->mfc_un.unres.expires, now)) { unsigned long interval = c->mfc_un.unres.expires - now; if (interval < expires) expires = interval; continue; } list_del(&c->list); ipmr_destroy_unres(mrt, c); } if (!list_empty(&mrt->mfc_unres_queue)) mod_timer(&mrt->ipmr_expire_timer, jiffies + expires); out: spin_unlock(&mfc_unres_lock); } /* Fill oifs list. It is called under write locked mrt_lock. */ static void ipmr_update_thresholds(struct mr_table *mrt, struct mfc_cache *cache, unsigned char *ttls) { int vifi; cache->mfc_un.res.minvif = MAXVIFS; cache->mfc_un.res.maxvif = 0; memset(cache->mfc_un.res.ttls, 255, MAXVIFS); for (vifi = 0; vifi < mrt->maxvif; vifi++) { if (VIF_EXISTS(mrt, vifi) && ttls[vifi] && ttls[vifi] < 255) { cache->mfc_un.res.ttls[vifi] = ttls[vifi]; if (cache->mfc_un.res.minvif > vifi) cache->mfc_un.res.minvif = vifi; if (cache->mfc_un.res.maxvif <= vifi) cache->mfc_un.res.maxvif = vifi + 1; } } } static int vif_add(struct net *net, struct mr_table *mrt, struct vifctl *vifc, int mrtsock) { int vifi = vifc->vifc_vifi; struct vif_device *v = &mrt->vif_table[vifi]; struct net_device *dev; struct in_device *in_dev; int err; /* Is vif busy ? */ if (VIF_EXISTS(mrt, vifi)) return -EADDRINUSE; switch (vifc->vifc_flags) { #ifdef CONFIG_IP_PIMSM case VIFF_REGISTER: /* * Special Purpose VIF in PIM * All the packets will be sent to the daemon */ if (mrt->mroute_reg_vif_num >= 0) return -EADDRINUSE; dev = ipmr_reg_vif(net, mrt); if (!dev) return -ENOBUFS; err = dev_set_allmulti(dev, 1); if (err) { unregister_netdevice(dev); dev_put(dev); return err; } break; #endif case VIFF_TUNNEL: dev = ipmr_new_tunnel(net, vifc); if (!dev) return -ENOBUFS; err = dev_set_allmulti(dev, 1); if (err) { ipmr_del_tunnel(dev, vifc); dev_put(dev); return err; } break; case VIFF_USE_IFINDEX: case 0: if (vifc->vifc_flags == VIFF_USE_IFINDEX) { dev = dev_get_by_index(net, vifc->vifc_lcl_ifindex); if (dev && __in_dev_get_rtnl(dev) == NULL) { dev_put(dev); return -EADDRNOTAVAIL; } } else { dev = ip_dev_find(net, vifc->vifc_lcl_addr.s_addr); } if (!dev) return -EADDRNOTAVAIL; err = dev_set_allmulti(dev, 1); if (err) { dev_put(dev); return err; } break; default: return -EINVAL; } in_dev = __in_dev_get_rtnl(dev); if (!in_dev) { dev_put(dev); return -EADDRNOTAVAIL; } IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++; ip_rt_multicast_event(in_dev); /* Fill in the VIF structures */ v->rate_limit = vifc->vifc_rate_limit; v->local = vifc->vifc_lcl_addr.s_addr; v->remote = vifc->vifc_rmt_addr.s_addr; v->flags = vifc->vifc_flags; if (!mrtsock) v->flags |= VIFF_STATIC; v->threshold = vifc->vifc_threshold; v->bytes_in = 0; v->bytes_out = 0; v->pkt_in = 0; v->pkt_out = 0; v->link = dev->ifindex; if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER)) v->link = dev->iflink; /* And finish update writing critical data */ write_lock_bh(&mrt_lock); v->dev = dev; #ifdef CONFIG_IP_PIMSM if (v->flags & VIFF_REGISTER) mrt->mroute_reg_vif_num = vifi; #endif if (vifi+1 > mrt->maxvif) mrt->maxvif = vifi+1; write_unlock_bh(&mrt_lock); return 0; } /* called with rcu_read_lock() */ static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt, __be32 origin, __be32 mcastgrp) { int line = MFC_HASH(mcastgrp, origin); struct mfc_cache *c; list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list) { if (c->mfc_origin == origin && c->mfc_mcastgrp == mcastgrp) return c; } return NULL; } /* * Allocate a multicast cache entry */ static struct mfc_cache *ipmr_cache_alloc(void) { struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL); if (c) c->mfc_un.res.minvif = MAXVIFS; return c; } static struct mfc_cache *ipmr_cache_alloc_unres(void) { struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC); if (c) { skb_queue_head_init(&c->mfc_un.unres.unresolved); c->mfc_un.unres.expires = jiffies + 10*HZ; } return c; } /* * A cache entry has gone into a resolved state from queued */ static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt, struct mfc_cache *uc, struct mfc_cache *c) { struct sk_buff *skb; struct nlmsgerr *e; /* Play the pending entries through our router */ while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) { if (ip_hdr(skb)->version == 0) { struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr)); if (__ipmr_fill_mroute(mrt, skb, c, NLMSG_DATA(nlh)) > 0) { nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh; } else { nlh->nlmsg_type = NLMSG_ERROR; nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr)); skb_trim(skb, nlh->nlmsg_len); e = NLMSG_DATA(nlh); e->error = -EMSGSIZE; memset(&e->msg, 0, sizeof(e->msg)); } rtnl_unicast(skb, net, NETLINK_CB(skb).pid); } else { ip_mr_forward(net, mrt, skb, c, 0); } } } /* * Bounce a cache query up to mrouted. We could use netlink for this but mrouted * expects the following bizarre scheme. * * Called under mrt_lock. */ static int ipmr_cache_report(struct mr_table *mrt, struct sk_buff *pkt, vifi_t vifi, int assert) { struct sk_buff *skb; const int ihl = ip_hdrlen(pkt); struct igmphdr *igmp; struct igmpmsg *msg; struct sock *mroute_sk; int ret; #ifdef CONFIG_IP_PIMSM if (assert == IGMPMSG_WHOLEPKT) skb = skb_realloc_headroom(pkt, sizeof(struct iphdr)); else #endif skb = alloc_skb(128, GFP_ATOMIC); if (!skb) return -ENOBUFS; #ifdef CONFIG_IP_PIMSM if (assert == IGMPMSG_WHOLEPKT) { /* Ugly, but we have no choice with this interface. * Duplicate old header, fix ihl, length etc. * And all this only to mangle msg->im_msgtype and * to set msg->im_mbz to "mbz" :-) */ skb_push(skb, sizeof(struct iphdr)); skb_reset_network_header(skb); skb_reset_transport_header(skb); msg = (struct igmpmsg *)skb_network_header(skb); memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr)); msg->im_msgtype = IGMPMSG_WHOLEPKT; msg->im_mbz = 0; msg->im_vif = mrt->mroute_reg_vif_num; ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2; ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) + sizeof(struct iphdr)); } else #endif { /* Copy the IP header */ skb->network_header = skb->tail; skb_put(skb, ihl); skb_copy_to_linear_data(skb, pkt->data, ihl); ip_hdr(skb)->protocol = 0; /* Flag to the kernel this is a route add */ msg = (struct igmpmsg *)skb_network_header(skb); msg->im_vif = vifi; skb_dst_set(skb, dst_clone(skb_dst(pkt))); /* Add our header */ igmp = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr)); igmp->type = msg->im_msgtype = assert; igmp->code = 0; ip_hdr(skb)->tot_len = htons(skb->len); /* Fix the length */ skb->transport_header = skb->network_header; } rcu_read_lock(); mroute_sk = rcu_dereference(mrt->mroute_sk); if (mroute_sk == NULL) { rcu_read_unlock(); kfree_skb(skb); return -EINVAL; } /* Deliver to mrouted */ ret = sock_queue_rcv_skb(mroute_sk, skb); rcu_read_unlock(); if (ret < 0) { if (net_ratelimit()) printk(KERN_WARNING "mroute: pending queue full, dropping entries.\n"); kfree_skb(skb); } return ret; } /* * Queue a packet for resolution. It gets locked cache entry! */ static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, struct sk_buff *skb) { bool found = false; int err; struct mfc_cache *c; const struct iphdr *iph = ip_hdr(skb); spin_lock_bh(&mfc_unres_lock); list_for_each_entry(c, &mrt->mfc_unres_queue, list) { if (c->mfc_mcastgrp == iph->daddr && c->mfc_origin == iph->saddr) { found = true; break; } } if (!found) { /* Create a new entry if allowable */ if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 || (c = ipmr_cache_alloc_unres()) == NULL) { spin_unlock_bh(&mfc_unres_lock); kfree_skb(skb); return -ENOBUFS; } /* Fill in the new cache entry */ c->mfc_parent = -1; c->mfc_origin = iph->saddr; c->mfc_mcastgrp = iph->daddr; /* Reflect first query at mrouted. */ err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE); if (err < 0) { /* If the report failed throw the cache entry out - Brad Parker */ spin_unlock_bh(&mfc_unres_lock); ipmr_cache_free(c); kfree_skb(skb); return err; } atomic_inc(&mrt->cache_resolve_queue_len); list_add(&c->list, &mrt->mfc_unres_queue); if (atomic_read(&mrt->cache_resolve_queue_len) == 1) mod_timer(&mrt->ipmr_expire_timer, c->mfc_un.unres.expires); } /* See if we can append the packet */ if (c->mfc_un.unres.unresolved.qlen > 3) { kfree_skb(skb); err = -ENOBUFS; } else { skb_queue_tail(&c->mfc_un.unres.unresolved, skb); err = 0; } spin_unlock_bh(&mfc_unres_lock); return err; } /* * MFC cache manipulation by user space mroute daemon */ static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc) { int line; struct mfc_cache *c, *next; line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr); list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[line], list) { if (c->mfc_origin == mfc->mfcc_origin.s_addr && c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) { list_del_rcu(&c->list); ipmr_cache_free(c); return 0; } } return -ENOENT; } static int ipmr_mfc_add(struct net *net, struct mr_table *mrt, struct mfcctl *mfc, int mrtsock) { bool found = false; int line; struct mfc_cache *uc, *c; if (mfc->mfcc_parent >= MAXVIFS) return -ENFILE; line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr); list_for_each_entry(c, &mrt->mfc_cache_array[line], list) { if (c->mfc_origin == mfc->mfcc_origin.s_addr && c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) { found = true; break; } } if (found) { write_lock_bh(&mrt_lock); c->mfc_parent = mfc->mfcc_parent; ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls); if (!mrtsock) c->mfc_flags |= MFC_STATIC; write_unlock_bh(&mrt_lock); return 0; } if (!ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr)) return -EINVAL; c = ipmr_cache_alloc(); if (c == NULL) return -ENOMEM; c->mfc_origin = mfc->mfcc_origin.s_addr; c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr; c->mfc_parent = mfc->mfcc_parent; ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls); if (!mrtsock) c->mfc_flags |= MFC_STATIC; list_add_rcu(&c->list, &mrt->mfc_cache_array[line]); /* * Check to see if we resolved a queued list. If so we * need to send on the frames and tidy up. */ found = false; spin_lock_bh(&mfc_unres_lock); list_for_each_entry(uc, &mrt->mfc_unres_queue, list) { if (uc->mfc_origin == c->mfc_origin && uc->mfc_mcastgrp == c->mfc_mcastgrp) { list_del(&uc->list); atomic_dec(&mrt->cache_resolve_queue_len); found = true; break; } } if (list_empty(&mrt->mfc_unres_queue)) del_timer(&mrt->ipmr_expire_timer); spin_unlock_bh(&mfc_unres_lock); if (found) { ipmr_cache_resolve(net, mrt, uc, c); ipmr_cache_free(uc); } return 0; } /* * Close the multicast socket, and clear the vif tables etc */ static void mroute_clean_tables(struct mr_table *mrt) { int i; LIST_HEAD(list); struct mfc_cache *c, *next; /* Shut down all active vif entries */ for (i = 0; i < mrt->maxvif; i++) { if (!(mrt->vif_table[i].flags & VIFF_STATIC)) vif_delete(mrt, i, 0, &list); } unregister_netdevice_many(&list); /* Wipe the cache */ for (i = 0; i < MFC_LINES; i++) { list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) { if (c->mfc_flags & MFC_STATIC) continue; list_del_rcu(&c->list); ipmr_cache_free(c); } } if (atomic_read(&mrt->cache_resolve_queue_len) != 0) { spin_lock_bh(&mfc_unres_lock); list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) { list_del(&c->list); ipmr_destroy_unres(mrt, c); } spin_unlock_bh(&mfc_unres_lock); } } /* called from ip_ra_control(), before an RCU grace period, * we dont need to call synchronize_rcu() here */ static void mrtsock_destruct(struct sock *sk) { struct net *net = sock_net(sk); struct mr_table *mrt; rtnl_lock(); ipmr_for_each_table(mrt, net) { if (sk == rtnl_dereference(mrt->mroute_sk)) { IPV4_DEVCONF_ALL(net, MC_FORWARDING)--; rcu_assign_pointer(mrt->mroute_sk, NULL); mroute_clean_tables(mrt); } } rtnl_unlock(); } /* * Socket options and virtual interface manipulation. The whole * virtual interface system is a complete heap, but unfortunately * that's how BSD mrouted happens to think. Maybe one day with a proper * MOSPF/PIM router set up we can clean this up. */ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen) { int ret; struct vifctl vif; struct mfcctl mfc; struct net *net = sock_net(sk); struct mr_table *mrt; mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); if (mrt == NULL) return -ENOENT; if (optname != MRT_INIT) { if (sk != rcu_dereference_raw(mrt->mroute_sk) && !capable(CAP_NET_ADMIN)) return -EACCES; } switch (optname) { case MRT_INIT: if (sk->sk_type != SOCK_RAW || inet_sk(sk)->inet_num != IPPROTO_IGMP) return -EOPNOTSUPP; if (optlen != sizeof(int)) return -ENOPROTOOPT; rtnl_lock(); if (rtnl_dereference(mrt->mroute_sk)) { rtnl_unlock(); return -EADDRINUSE; } ret = ip_ra_control(sk, 1, mrtsock_destruct); if (ret == 0) { rcu_assign_pointer(mrt->mroute_sk, sk); IPV4_DEVCONF_ALL(net, MC_FORWARDING)++; } rtnl_unlock(); return ret; case MRT_DONE: if (sk != rcu_dereference_raw(mrt->mroute_sk)) return -EACCES; return ip_ra_control(sk, 0, NULL); case MRT_ADD_VIF: case MRT_DEL_VIF: if (optlen != sizeof(vif)) return -EINVAL; if (copy_from_user(&vif, optval, sizeof(vif))) return -EFAULT; if (vif.vifc_vifi >= MAXVIFS) return -ENFILE; rtnl_lock(); if (optname == MRT_ADD_VIF) { ret = vif_add(net, mrt, &vif, sk == rtnl_dereference(mrt->mroute_sk)); } else { ret = vif_delete(mrt, vif.vifc_vifi, 0, NULL); } rtnl_unlock(); return ret; /* * Manipulate the forwarding caches. These live * in a sort of kernel/user symbiosis. */ case MRT_ADD_MFC: case MRT_DEL_MFC: if (optlen != sizeof(mfc)) return -EINVAL; if (copy_from_user(&mfc, optval, sizeof(mfc))) return -EFAULT; rtnl_lock(); if (optname == MRT_DEL_MFC) ret = ipmr_mfc_delete(mrt, &mfc); else ret = ipmr_mfc_add(net, mrt, &mfc, sk == rtnl_dereference(mrt->mroute_sk)); rtnl_unlock(); return ret; /* * Control PIM assert. */ case MRT_ASSERT: { int v; if (get_user(v, (int __user *)optval)) return -EFAULT; mrt->mroute_do_assert = (v) ? 1 : 0; return 0; } #ifdef CONFIG_IP_PIMSM case MRT_PIM: { int v; if (get_user(v, (int __user *)optval)) return -EFAULT; v = (v) ? 1 : 0; rtnl_lock(); ret = 0; if (v != mrt->mroute_do_pim) { mrt->mroute_do_pim = v; mrt->mroute_do_assert = v; } rtnl_unlock(); return ret; } #endif #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES case MRT_TABLE: { u32 v; if (optlen != sizeof(u32)) return -EINVAL; if (get_user(v, (u32 __user *)optval)) return -EFAULT; rtnl_lock(); ret = 0; if (sk == rtnl_dereference(mrt->mroute_sk)) { ret = -EBUSY; } else { if (!ipmr_new_table(net, v)) ret = -ENOMEM; raw_sk(sk)->ipmr_table = v; } rtnl_unlock(); return ret; } #endif /* * Spurious command, or MRT_VERSION which you cannot * set. */ default: return -ENOPROTOOPT; } } /* * Getsock opt support for the multicast routing system. */ int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen) { int olr; int val; struct net *net = sock_net(sk); struct mr_table *mrt; mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); if (mrt == NULL) return -ENOENT; if (optname != MRT_VERSION && #ifdef CONFIG_IP_PIMSM optname != MRT_PIM && #endif optname != MRT_ASSERT) return -ENOPROTOOPT; if (get_user(olr, optlen)) return -EFAULT; olr = min_t(unsigned int, olr, sizeof(int)); if (olr < 0) return -EINVAL; if (put_user(olr, optlen)) return -EFAULT; if (optname == MRT_VERSION) val = 0x0305; #ifdef CONFIG_IP_PIMSM else if (optname == MRT_PIM) val = mrt->mroute_do_pim; #endif else val = mrt->mroute_do_assert; if (copy_to_user(optval, &val, olr)) return -EFAULT; return 0; } /* * The IP multicast ioctl support routines. */ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg) { struct sioc_sg_req sr; struct sioc_vif_req vr; struct vif_device *vif; struct mfc_cache *c; struct net *net = sock_net(sk); struct mr_table *mrt; mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); if (mrt == NULL) return -ENOENT; switch (cmd) { case SIOCGETVIFCNT: if (copy_from_user(&vr, arg, sizeof(vr))) return -EFAULT; if (vr.vifi >= mrt->maxvif) return -EINVAL; read_lock(&mrt_lock); vif = &mrt->vif_table[vr.vifi]; if (VIF_EXISTS(mrt, vr.vifi)) { vr.icount = vif->pkt_in; vr.ocount = vif->pkt_out; vr.ibytes = vif->bytes_in; vr.obytes = vif->bytes_out; read_unlock(&mrt_lock); if (copy_to_user(arg, &vr, sizeof(vr))) return -EFAULT; return 0; } read_unlock(&mrt_lock); return -EADDRNOTAVAIL; case SIOCGETSGCNT: if (copy_from_user(&sr, arg, sizeof(sr))) return -EFAULT; rcu_read_lock(); c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr); if (c) { sr.pktcnt = c->mfc_un.res.pkt; sr.bytecnt = c->mfc_un.res.bytes; sr.wrong_if = c->mfc_un.res.wrong_if; rcu_read_unlock(); if (copy_to_user(arg, &sr, sizeof(sr))) return -EFAULT; return 0; } rcu_read_unlock(); return -EADDRNOTAVAIL; default: return -ENOIOCTLCMD; } } #ifdef CONFIG_COMPAT struct compat_sioc_sg_req { struct in_addr src; struct in_addr grp; compat_ulong_t pktcnt; compat_ulong_t bytecnt; compat_ulong_t wrong_if; }; struct compat_sioc_vif_req { vifi_t vifi; /* Which iface */ compat_ulong_t icount; compat_ulong_t ocount; compat_ulong_t ibytes; compat_ulong_t obytes; }; int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg) { struct compat_sioc_sg_req sr; struct compat_sioc_vif_req vr; struct vif_device *vif; struct mfc_cache *c; struct net *net = sock_net(sk); struct mr_table *mrt; mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); if (mrt == NULL) return -ENOENT; switch (cmd) { case SIOCGETVIFCNT: if (copy_from_user(&vr, arg, sizeof(vr))) return -EFAULT; if (vr.vifi >= mrt->maxvif) return -EINVAL; read_lock(&mrt_lock); vif = &mrt->vif_table[vr.vifi]; if (VIF_EXISTS(mrt, vr.vifi)) { vr.icount = vif->pkt_in; vr.ocount = vif->pkt_out; vr.ibytes = vif->bytes_in; vr.obytes = vif->bytes_out; read_unlock(&mrt_lock); if (copy_to_user(arg, &vr, sizeof(vr))) return -EFAULT; return 0; } read_unlock(&mrt_lock); return -EADDRNOTAVAIL; case SIOCGETSGCNT: if (copy_from_user(&sr, arg, sizeof(sr))) return -EFAULT; rcu_read_lock(); c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr); if (c) { sr.pktcnt = c->mfc_un.res.pkt; sr.bytecnt = c->mfc_un.res.bytes; sr.wrong_if = c->mfc_un.res.wrong_if; rcu_read_unlock(); if (copy_to_user(arg, &sr, sizeof(sr))) return -EFAULT; return 0; } rcu_read_unlock(); return -EADDRNOTAVAIL; default: return -ENOIOCTLCMD; } } #endif static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = ptr; struct net *net = dev_net(dev); struct mr_table *mrt; struct vif_device *v; int ct; LIST_HEAD(list); if (event != NETDEV_UNREGISTER) return NOTIFY_DONE; ipmr_for_each_table(mrt, net) { v = &mrt->vif_table[0]; for (ct = 0; ct < mrt->maxvif; ct++, v++) { if (v->dev == dev) vif_delete(mrt, ct, 1, &list); } } unregister_netdevice_many(&list); return NOTIFY_DONE; } static struct notifier_block ip_mr_notifier = { .notifier_call = ipmr_device_event, }; /* * Encapsulate a packet by attaching a valid IPIP header to it. * This avoids tunnel drivers and other mess and gives us the speed so * important for multicast video. */ static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr) { struct iphdr *iph; const struct iphdr *old_iph = ip_hdr(skb); skb_push(skb, sizeof(struct iphdr)); skb->transport_header = skb->network_header; skb_reset_network_header(skb); iph = ip_hdr(skb); iph->version = 4; iph->tos = old_iph->tos; iph->ttl = old_iph->ttl; iph->frag_off = 0; iph->daddr = daddr; iph->saddr = saddr; iph->protocol = IPPROTO_IPIP; iph->ihl = 5; iph->tot_len = htons(skb->len); ip_select_ident(iph, skb_dst(skb), NULL); ip_send_check(iph); memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); nf_reset(skb); } static inline int ipmr_forward_finish(struct sk_buff *skb) { struct ip_options *opt = &(IPCB(skb)->opt); IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS); if (unlikely(opt->optlen)) ip_forward_options(skb); return dst_output(skb); } /* * Processing handlers for ipmr_forward */ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt, struct sk_buff *skb, struct mfc_cache *c, int vifi) { const struct iphdr *iph = ip_hdr(skb); struct vif_device *vif = &mrt->vif_table[vifi]; struct net_device *dev; struct rtable *rt; struct flowi4 fl4; int encap = 0; if (vif->dev == NULL) goto out_free; #ifdef CONFIG_IP_PIMSM if (vif->flags & VIFF_REGISTER) { vif->pkt_out++; vif->bytes_out += skb->len; vif->dev->stats.tx_bytes += skb->len; vif->dev->stats.tx_packets++; ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT); goto out_free; } #endif if (vif->flags & VIFF_TUNNEL) { rt = ip_route_output_ports(net, &fl4, NULL, vif->remote, vif->local, 0, 0, IPPROTO_IPIP, RT_TOS(iph->tos), vif->link); if (IS_ERR(rt)) goto out_free; encap = sizeof(struct iphdr); } else { rt = ip_route_output_ports(net, &fl4, NULL, iph->daddr, 0, 0, 0, IPPROTO_IPIP, RT_TOS(iph->tos), vif->link); if (IS_ERR(rt)) goto out_free; } dev = rt->dst.dev; if (skb->len+encap > dst_mtu(&rt->dst) && (ntohs(iph->frag_off) & IP_DF)) { /* Do not fragment multicasts. Alas, IPv4 does not * allow to send ICMP, so that packets will disappear * to blackhole. */ IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_FRAGFAILS); ip_rt_put(rt); goto out_free; } encap += LL_RESERVED_SPACE(dev) + rt->dst.header_len; if (skb_cow(skb, encap)) { ip_rt_put(rt); goto out_free; } vif->pkt_out++; vif->bytes_out += skb->len; skb_dst_drop(skb); skb_dst_set(skb, &rt->dst); ip_decrease_ttl(ip_hdr(skb)); /* FIXME: forward and output firewalls used to be called here. * What do we do with netfilter? -- RR */ if (vif->flags & VIFF_TUNNEL) { ip_encap(skb, vif->local, vif->remote); /* FIXME: extra output firewall step used to be here. --RR */ vif->dev->stats.tx_packets++; vif->dev->stats.tx_bytes += skb->len; } IPCB(skb)->flags |= IPSKB_FORWARDED; /* * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally * not only before forwarding, but after forwarding on all output * interfaces. It is clear, if mrouter runs a multicasting * program, it should receive packets not depending to what interface * program is joined. * If we will not make it, the program will have to join on all * interfaces. On the other hand, multihoming host (or router, but * not mrouter) cannot join to more than one interface - it will * result in receiving multiple packets. */ NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD, skb, skb->dev, dev, ipmr_forward_finish); return; out_free: kfree_skb(skb); } static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev) { int ct; for (ct = mrt->maxvif-1; ct >= 0; ct--) { if (mrt->vif_table[ct].dev == dev) break; } return ct; } /* "local" means that we should preserve one skb (for local delivery) */ static int ip_mr_forward(struct net *net, struct mr_table *mrt, struct sk_buff *skb, struct mfc_cache *cache, int local) { int psend = -1; int vif, ct; vif = cache->mfc_parent; cache->mfc_un.res.pkt++; cache->mfc_un.res.bytes += skb->len; /* * Wrong interface: drop packet and (maybe) send PIM assert. */ if (mrt->vif_table[vif].dev != skb->dev) { int true_vifi; if (rt_is_output_route(skb_rtable(skb))) { /* It is our own packet, looped back. * Very complicated situation... * * The best workaround until routing daemons will be * fixed is not to redistribute packet, if it was * send through wrong interface. It means, that * multicast applications WILL NOT work for * (S,G), which have default multicast route pointing * to wrong oif. In any case, it is not a good * idea to use multicasting applications on router. */ goto dont_forward; } cache->mfc_un.res.wrong_if++; true_vifi = ipmr_find_vif(mrt, skb->dev); if (true_vifi >= 0 && mrt->mroute_do_assert && /* pimsm uses asserts, when switching from RPT to SPT, * so that we cannot check that packet arrived on an oif. * It is bad, but otherwise we would need to move pretty * large chunk of pimd to kernel. Ough... --ANK */ (mrt->mroute_do_pim || cache->mfc_un.res.ttls[true_vifi] < 255) && time_after(jiffies, cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) { cache->mfc_un.res.last_assert = jiffies; ipmr_cache_report(mrt, skb, true_vifi, IGMPMSG_WRONGVIF); } goto dont_forward; } mrt->vif_table[vif].pkt_in++; mrt->vif_table[vif].bytes_in += skb->len; /* * Forward the frame */ for (ct = cache->mfc_un.res.maxvif - 1; ct >= cache->mfc_un.res.minvif; ct--) { if (ip_hdr(skb)->ttl > cache->mfc_un.res.ttls[ct]) { if (psend != -1) { struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); if (skb2) ipmr_queue_xmit(net, mrt, skb2, cache, psend); } psend = ct; } } if (psend != -1) { if (local) { struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); if (skb2) ipmr_queue_xmit(net, mrt, skb2, cache, psend); } else { ipmr_queue_xmit(net, mrt, skb, cache, psend); return 0; } } dont_forward: if (!local) kfree_skb(skb); return 0; } static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct sk_buff *skb) { struct rtable *rt = skb_rtable(skb); struct iphdr *iph = ip_hdr(skb); struct flowi4 fl4 = { .daddr = iph->daddr, .saddr = iph->saddr, .flowi4_tos = RT_TOS(iph->tos), .flowi4_oif = rt->rt_oif, .flowi4_iif = rt->rt_iif, .flowi4_mark = rt->rt_mark, }; struct mr_table *mrt; int err; err = ipmr_fib_lookup(net, &fl4, &mrt); if (err) return ERR_PTR(err); return mrt; } /* * Multicast packets for forwarding arrive here * Called with rcu_read_lock(); */ int ip_mr_input(struct sk_buff *skb) { struct mfc_cache *cache; struct net *net = dev_net(skb->dev); int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL; struct mr_table *mrt; /* Packet is looped back after forward, it should not be * forwarded second time, but still can be delivered locally. */ if (IPCB(skb)->flags & IPSKB_FORWARDED) goto dont_forward; mrt = ipmr_rt_fib_lookup(net, skb); if (IS_ERR(mrt)) { kfree_skb(skb); return PTR_ERR(mrt); } if (!local) { if (IPCB(skb)->opt.router_alert) { if (ip_call_ra_chain(skb)) return 0; } else if (ip_hdr(skb)->protocol == IPPROTO_IGMP) { /* IGMPv1 (and broken IGMPv2 implementations sort of * Cisco IOS <= 11.2(8)) do not put router alert * option to IGMP packets destined to routable * groups. It is very bad, because it means * that we can forward NO IGMP messages. */ struct sock *mroute_sk; mroute_sk = rcu_dereference(mrt->mroute_sk); if (mroute_sk) { nf_reset(skb); raw_rcv(mroute_sk, skb); return 0; } } } /* already under rcu_read_lock() */ cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr); /* * No usable cache entry */ if (cache == NULL) { int vif; if (local) { struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); ip_local_deliver(skb); if (skb2 == NULL) return -ENOBUFS; skb = skb2; } read_lock(&mrt_lock); vif = ipmr_find_vif(mrt, skb->dev); if (vif >= 0) { int err2 = ipmr_cache_unresolved(mrt, vif, skb); read_unlock(&mrt_lock); return err2; } read_unlock(&mrt_lock); kfree_skb(skb); return -ENODEV; } read_lock(&mrt_lock); ip_mr_forward(net, mrt, skb, cache, local); read_unlock(&mrt_lock); if (local) return ip_local_deliver(skb); return 0; dont_forward: if (local) return ip_local_deliver(skb); kfree_skb(skb); return 0; } #ifdef CONFIG_IP_PIMSM /* called with rcu_read_lock() */ static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb, unsigned int pimlen) { struct net_device *reg_dev = NULL; struct iphdr *encap; encap = (struct iphdr *)(skb_transport_header(skb) + pimlen); /* * Check that: * a. packet is really sent to a multicast group * b. packet is not a NULL-REGISTER * c. packet is not truncated */ if (!ipv4_is_multicast(encap->daddr) || encap->tot_len == 0 || ntohs(encap->tot_len) + pimlen > skb->len) return 1; read_lock(&mrt_lock); if (mrt->mroute_reg_vif_num >= 0) reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev; read_unlock(&mrt_lock); if (reg_dev == NULL) return 1; skb->mac_header = skb->network_header; skb_pull(skb, (u8 *)encap - skb->data); skb_reset_network_header(skb); skb->protocol = htons(ETH_P_IP); skb->ip_summed = CHECKSUM_NONE; skb->pkt_type = PACKET_HOST; skb_tunnel_rx(skb, reg_dev); netif_rx(skb); return NET_RX_SUCCESS; } #endif #ifdef CONFIG_IP_PIMSM_V1 /* * Handle IGMP messages of PIMv1 */ int pim_rcv_v1(struct sk_buff *skb) { struct igmphdr *pim; struct net *net = dev_net(skb->dev); struct mr_table *mrt; if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr))) goto drop; pim = igmp_hdr(skb); mrt = ipmr_rt_fib_lookup(net, skb); if (IS_ERR(mrt)) goto drop; if (!mrt->mroute_do_pim || pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER) goto drop; if (__pim_rcv(mrt, skb, sizeof(*pim))) { drop: kfree_skb(skb); } return 0; } #endif #ifdef CONFIG_IP_PIMSM_V2 static int pim_rcv(struct sk_buff *skb) { struct pimreghdr *pim; struct net *net = dev_net(skb->dev); struct mr_table *mrt; if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr))) goto drop; pim = (struct pimreghdr *)skb_transport_header(skb); if (pim->type != ((PIM_VERSION << 4) | (PIM_REGISTER)) || (pim->flags & PIM_NULL_REGISTER) || (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 && csum_fold(skb_checksum(skb, 0, skb->len, 0)))) goto drop; mrt = ipmr_rt_fib_lookup(net, skb); if (IS_ERR(mrt)) goto drop; if (__pim_rcv(mrt, skb, sizeof(*pim))) { drop: kfree_skb(skb); } return 0; } #endif static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm) { int ct; struct rtnexthop *nhp; u8 *b = skb_tail_pointer(skb); struct rtattr *mp_head; /* If cache is unresolved, don't try to parse IIF and OIF */ if (c->mfc_parent >= MAXVIFS) return -ENOENT; if (VIF_EXISTS(mrt, c->mfc_parent)) RTA_PUT(skb, RTA_IIF, 4, &mrt->vif_table[c->mfc_parent].dev->ifindex); mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0)); for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) { if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) { if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4)) goto rtattr_failure; nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp))); nhp->rtnh_flags = 0; nhp->rtnh_hops = c->mfc_un.res.ttls[ct]; nhp->rtnh_ifindex = mrt->vif_table[ct].dev->ifindex; nhp->rtnh_len = sizeof(*nhp); } } mp_head->rta_type = RTA_MULTIPATH; mp_head->rta_len = skb_tail_pointer(skb) - (u8 *)mp_head; rtm->rtm_type = RTN_MULTICAST; return 1; rtattr_failure: nlmsg_trim(skb, b); return -EMSGSIZE; } int ipmr_get_route(struct net *net, struct sk_buff *skb, __be32 saddr, __be32 daddr, struct rtmsg *rtm, int nowait) { struct mfc_cache *cache; struct mr_table *mrt; int err; mrt = ipmr_get_table(net, RT_TABLE_DEFAULT); if (mrt == NULL) return -ENOENT; rcu_read_lock(); cache = ipmr_cache_find(mrt, saddr, daddr); if (cache == NULL) { struct sk_buff *skb2; struct iphdr *iph; struct net_device *dev; int vif = -1; if (nowait) { rcu_read_unlock(); return -EAGAIN; } dev = skb->dev; read_lock(&mrt_lock); if (dev) vif = ipmr_find_vif(mrt, dev); if (vif < 0) { read_unlock(&mrt_lock); rcu_read_unlock(); return -ENODEV; } skb2 = skb_clone(skb, GFP_ATOMIC); if (!skb2) { read_unlock(&mrt_lock); rcu_read_unlock(); return -ENOMEM; } skb_push(skb2, sizeof(struct iphdr)); skb_reset_network_header(skb2); iph = ip_hdr(skb2); iph->ihl = sizeof(struct iphdr) >> 2; iph->saddr = saddr; iph->daddr = daddr; iph->version = 0; err = ipmr_cache_unresolved(mrt, vif, skb2); read_unlock(&mrt_lock); rcu_read_unlock(); return err; } read_lock(&mrt_lock); if (!nowait && (rtm->rtm_flags & RTM_F_NOTIFY)) cache->mfc_flags |= MFC_NOTIFY; err = __ipmr_fill_mroute(mrt, skb, cache, rtm); read_unlock(&mrt_lock); rcu_read_unlock(); return err; } static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, u32 pid, u32 seq, struct mfc_cache *c) { struct nlmsghdr *nlh; struct rtmsg *rtm; nlh = nlmsg_put(skb, pid, seq, RTM_NEWROUTE, sizeof(*rtm), NLM_F_MULTI); if (nlh == NULL) return -EMSGSIZE; rtm = nlmsg_data(nlh); rtm->rtm_family = RTNL_FAMILY_IPMR; rtm->rtm_dst_len = 32; rtm->rtm_src_len = 32; rtm->rtm_tos = 0; rtm->rtm_table = mrt->id; NLA_PUT_U32(skb, RTA_TABLE, mrt->id); rtm->rtm_type = RTN_MULTICAST; rtm->rtm_scope = RT_SCOPE_UNIVERSE; rtm->rtm_protocol = RTPROT_UNSPEC; rtm->rtm_flags = 0; NLA_PUT_BE32(skb, RTA_SRC, c->mfc_origin); NLA_PUT_BE32(skb, RTA_DST, c->mfc_mcastgrp); if (__ipmr_fill_mroute(mrt, skb, c, rtm) < 0) goto nla_put_failure; return nlmsg_end(skb, nlh); nla_put_failure: nlmsg_cancel(skb, nlh); return -EMSGSIZE; } static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb) { struct net *net = sock_net(skb->sk); struct mr_table *mrt; struct mfc_cache *mfc; unsigned int t = 0, s_t; unsigned int h = 0, s_h; unsigned int e = 0, s_e; s_t = cb->args[0]; s_h = cb->args[1]; s_e = cb->args[2]; rcu_read_lock(); ipmr_for_each_table(mrt, net) { if (t < s_t) goto next_table; if (t > s_t) s_h = 0; for (h = s_h; h < MFC_LINES; h++) { list_for_each_entry_rcu(mfc, &mrt->mfc_cache_array[h], list) { if (e < s_e) goto next_entry; if (ipmr_fill_mroute(mrt, skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, mfc) < 0) goto done; next_entry: e++; } e = s_e = 0; } s_h = 0; next_table: t++; } done: rcu_read_unlock(); cb->args[2] = e; cb->args[1] = h; cb->args[0] = t; return skb->len; } #ifdef CONFIG_PROC_FS /* * The /proc interfaces to multicast routing : * /proc/net/ip_mr_cache & /proc/net/ip_mr_vif */ struct ipmr_vif_iter { struct seq_net_private p; struct mr_table *mrt; int ct; }; static struct vif_device *ipmr_vif_seq_idx(struct net *net, struct ipmr_vif_iter *iter, loff_t pos) { struct mr_table *mrt = iter->mrt; for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) { if (!VIF_EXISTS(mrt, iter->ct)) continue; if (pos-- == 0) return &mrt->vif_table[iter->ct]; } return NULL; } static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos) __acquires(mrt_lock) { struct ipmr_vif_iter *iter = seq->private; struct net *net = seq_file_net(seq); struct mr_table *mrt; mrt = ipmr_get_table(net, RT_TABLE_DEFAULT); if (mrt == NULL) return ERR_PTR(-ENOENT); iter->mrt = mrt; read_lock(&mrt_lock); return *pos ? ipmr_vif_seq_idx(net, seq->private, *pos - 1) : SEQ_START_TOKEN; } static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct ipmr_vif_iter *iter = seq->private; struct net *net = seq_file_net(seq); struct mr_table *mrt = iter->mrt; ++*pos; if (v == SEQ_START_TOKEN) return ipmr_vif_seq_idx(net, iter, 0); while (++iter->ct < mrt->maxvif) { if (!VIF_EXISTS(mrt, iter->ct)) continue; return &mrt->vif_table[iter->ct]; } return NULL; } static void ipmr_vif_seq_stop(struct seq_file *seq, void *v) __releases(mrt_lock) { read_unlock(&mrt_lock); } static int ipmr_vif_seq_show(struct seq_file *seq, void *v) { struct ipmr_vif_iter *iter = seq->private; struct mr_table *mrt = iter->mrt; if (v == SEQ_START_TOKEN) { seq_puts(seq, "Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n"); } else { const struct vif_device *vif = v; const char *name = vif->dev ? vif->dev->name : "none"; seq_printf(seq, "%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n", vif - mrt->vif_table, name, vif->bytes_in, vif->pkt_in, vif->bytes_out, vif->pkt_out, vif->flags, vif->local, vif->remote); } return 0; } static const struct seq_operations ipmr_vif_seq_ops = { .start = ipmr_vif_seq_start, .next = ipmr_vif_seq_next, .stop = ipmr_vif_seq_stop, .show = ipmr_vif_seq_show, }; static int ipmr_vif_open(struct inode *inode, struct file *file) { return seq_open_net(inode, file, &ipmr_vif_seq_ops, sizeof(struct ipmr_vif_iter)); } static const struct file_operations ipmr_vif_fops = { .owner = THIS_MODULE, .open = ipmr_vif_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net, }; struct ipmr_mfc_iter { struct seq_net_private p; struct mr_table *mrt; struct list_head *cache; int ct; }; static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net, struct ipmr_mfc_iter *it, loff_t pos) { struct mr_table *mrt = it->mrt; struct mfc_cache *mfc; rcu_read_lock(); for (it->ct = 0; it->ct < MFC_LINES; it->ct++) { it->cache = &mrt->mfc_cache_array[it->ct]; list_for_each_entry_rcu(mfc, it->cache, list) if (pos-- == 0) return mfc; } rcu_read_unlock(); spin_lock_bh(&mfc_unres_lock); it->cache = &mrt->mfc_unres_queue; list_for_each_entry(mfc, it->cache, list) if (pos-- == 0) return mfc; spin_unlock_bh(&mfc_unres_lock); it->cache = NULL; return NULL; } static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos) { struct ipmr_mfc_iter *it = seq->private; struct net *net = seq_file_net(seq); struct mr_table *mrt; mrt = ipmr_get_table(net, RT_TABLE_DEFAULT); if (mrt == NULL) return ERR_PTR(-ENOENT); it->mrt = mrt; it->cache = NULL; it->ct = 0; return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1) : SEQ_START_TOKEN; } static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct mfc_cache *mfc = v; struct ipmr_mfc_iter *it = seq->private; struct net *net = seq_file_net(seq); struct mr_table *mrt = it->mrt; ++*pos; if (v == SEQ_START_TOKEN) return ipmr_mfc_seq_idx(net, seq->private, 0); if (mfc->list.next != it->cache) return list_entry(mfc->list.next, struct mfc_cache, list); if (it->cache == &mrt->mfc_unres_queue) goto end_of_list; BUG_ON(it->cache != &mrt->mfc_cache_array[it->ct]); while (++it->ct < MFC_LINES) { it->cache = &mrt->mfc_cache_array[it->ct]; if (list_empty(it->cache)) continue; return list_first_entry(it->cache, struct mfc_cache, list); } /* exhausted cache_array, show unresolved */ rcu_read_unlock(); it->cache = &mrt->mfc_unres_queue; it->ct = 0; spin_lock_bh(&mfc_unres_lock); if (!list_empty(it->cache)) return list_first_entry(it->cache, struct mfc_cache, list); end_of_list: spin_unlock_bh(&mfc_unres_lock); it->cache = NULL; return NULL; } static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v) { struct ipmr_mfc_iter *it = seq->private; struct mr_table *mrt = it->mrt; if (it->cache == &mrt->mfc_unres_queue) spin_unlock_bh(&mfc_unres_lock); else if (it->cache == &mrt->mfc_cache_array[it->ct]) rcu_read_unlock(); } static int ipmr_mfc_seq_show(struct seq_file *seq, void *v) { int n; if (v == SEQ_START_TOKEN) { seq_puts(seq, "Group Origin Iif Pkts Bytes Wrong Oifs\n"); } else { const struct mfc_cache *mfc = v; const struct ipmr_mfc_iter *it = seq->private; const struct mr_table *mrt = it->mrt; seq_printf(seq, "%08X %08X %-3hd", (__force u32) mfc->mfc_mcastgrp, (__force u32) mfc->mfc_origin, mfc->mfc_parent); if (it->cache != &mrt->mfc_unres_queue) { seq_printf(seq, " %8lu %8lu %8lu", mfc->mfc_un.res.pkt, mfc->mfc_un.res.bytes, mfc->mfc_un.res.wrong_if); for (n = mfc->mfc_un.res.minvif; n < mfc->mfc_un.res.maxvif; n++) { if (VIF_EXISTS(mrt, n) && mfc->mfc_un.res.ttls[n] < 255) seq_printf(seq, " %2d:%-3d", n, mfc->mfc_un.res.ttls[n]); } } else { /* unresolved mfc_caches don't contain * pkt, bytes and wrong_if values */ seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul); } seq_putc(seq, '\n'); } return 0; } static const struct seq_operations ipmr_mfc_seq_ops = { .start = ipmr_mfc_seq_start, .next = ipmr_mfc_seq_next, .stop = ipmr_mfc_seq_stop, .show = ipmr_mfc_seq_show, }; static int ipmr_mfc_open(struct inode *inode, struct file *file) { return seq_open_net(inode, file, &ipmr_mfc_seq_ops, sizeof(struct ipmr_mfc_iter)); } static const struct file_operations ipmr_mfc_fops = { .owner = THIS_MODULE, .open = ipmr_mfc_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net, }; #endif #ifdef CONFIG_IP_PIMSM_V2 static const struct net_protocol pim_protocol = { .handler = pim_rcv, .netns_ok = 1, }; #endif /* * Setup for IP multicast routing */ static int __net_init ipmr_net_init(struct net *net) { int err; err = ipmr_rules_init(net); if (err < 0) goto fail; #ifdef CONFIG_PROC_FS err = -ENOMEM; if (!proc_net_fops_create(net, "ip_mr_vif", 0, &ipmr_vif_fops)) goto proc_vif_fail; if (!proc_net_fops_create(net, "ip_mr_cache", 0, &ipmr_mfc_fops)) goto proc_cache_fail; #endif return 0; #ifdef CONFIG_PROC_FS proc_cache_fail: proc_net_remove(net, "ip_mr_vif"); proc_vif_fail: ipmr_rules_exit(net); #endif fail: return err; } static void __net_exit ipmr_net_exit(struct net *net) { #ifdef CONFIG_PROC_FS proc_net_remove(net, "ip_mr_cache"); proc_net_remove(net, "ip_mr_vif"); #endif ipmr_rules_exit(net); } static struct pernet_operations ipmr_net_ops = { .init = ipmr_net_init, .exit = ipmr_net_exit, }; int __init ip_mr_init(void) { int err; mrt_cachep = kmem_cache_create("ip_mrt_cache", sizeof(struct mfc_cache), 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); if (!mrt_cachep) return -ENOMEM; err = register_pernet_subsys(&ipmr_net_ops); if (err) goto reg_pernet_fail; err = register_netdevice_notifier(&ip_mr_notifier); if (err) goto reg_notif_fail; #ifdef CONFIG_IP_PIMSM_V2 if (inet_add_protocol(&pim_protocol, IPPROTO_PIM) < 0) { printk(KERN_ERR "ip_mr_init: can't add PIM protocol\n"); err = -EAGAIN; goto add_proto_fail; } #endif rtnl_register(RTNL_FAMILY_IPMR, RTM_GETROUTE, NULL, ipmr_rtm_dumproute, NULL); return 0; #ifdef CONFIG_IP_PIMSM_V2 add_proto_fail: unregister_netdevice_notifier(&ip_mr_notifier); #endif reg_notif_fail: unregister_pernet_subsys(&ipmr_net_ops); reg_pernet_fail: kmem_cache_destroy(mrt_cachep); return err; }
gpl-2.0
toastcfh/mecha-htc-2.6.32
fs/yaffs2/yaffs_nand.c
634
3273
/* * YAFFS: Yet Another Flash File System. A NAND-flash specific file system. * * Copyright (C) 2002-2007 Aleph One Ltd. * for Toby Churchill Ltd and Brightstar Engineering * * Created by Charles Manning <charles@aleph1.co.uk> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ const char *yaffs_nand_c_version = "$Id$"; #include "yaffs_nand.h" #include "yaffs_tagscompat.h" #include "yaffs_tagsvalidity.h" #include "yaffs_getblockinfo.h" int yaffs_ReadChunkWithTagsFromNAND(yaffs_Device *dev, int chunkInNAND, __u8 *buffer, yaffs_ExtendedTags *tags) { int result; yaffs_ExtendedTags localTags; int realignedChunkInNAND = chunkInNAND - dev->chunkOffset; dev->nPageReads++; /* If there are no tags provided, use local tags to get prioritised gc working */ if (!tags) tags = &localTags; if (dev->readChunkWithTagsFromNAND) result = dev->readChunkWithTagsFromNAND(dev, realignedChunkInNAND, buffer, tags); else result = yaffs_TagsCompatabilityReadChunkWithTagsFromNAND(dev, realignedChunkInNAND, buffer, tags); if (tags && tags->eccResult > YAFFS_ECC_RESULT_NO_ERROR) { yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, chunkInNAND/dev->nChunksPerBlock); yaffs_HandleChunkError(dev, bi); } return result; } int yaffs_WriteChunkWithTagsToNAND(yaffs_Device *dev, int chunkInNAND, const __u8 *buffer, yaffs_ExtendedTags *tags) { dev->nPageWrites++; chunkInNAND -= dev->chunkOffset; if (tags) { tags->sequenceNumber = dev->sequenceNumber; tags->chunkUsed = 1; if (!yaffs_ValidateTags(tags)) { T(YAFFS_TRACE_ERROR, (TSTR("Writing uninitialised tags" TENDSTR))); YBUG(); } T(YAFFS_TRACE_WRITE, (TSTR("Writing chunk %d tags %d %d" TENDSTR), chunkInNAND, tags->objectId, tags->chunkId)); } else { T(YAFFS_TRACE_ERROR, (TSTR("Writing with no tags" TENDSTR))); YBUG(); } if (dev->writeChunkWithTagsToNAND) return dev->writeChunkWithTagsToNAND(dev, chunkInNAND, buffer, tags); else return yaffs_TagsCompatabilityWriteChunkWithTagsToNAND(dev, chunkInNAND, buffer, tags); } int yaffs_MarkBlockBad(yaffs_Device *dev, int blockNo) { blockNo -= dev->blockOffset; if (dev->markNANDBlockBad) return dev->markNANDBlockBad(dev, blockNo); else return yaffs_TagsCompatabilityMarkNANDBlockBad(dev, blockNo); } int yaffs_QueryInitialBlockState(yaffs_Device *dev, int blockNo, yaffs_BlockState *state, __u32 *sequenceNumber) { blockNo -= dev->blockOffset; if (dev->queryNANDBlock) return dev->queryNANDBlock(dev, blockNo, state, sequenceNumber); else return yaffs_TagsCompatabilityQueryNANDBlock(dev, blockNo, state, sequenceNumber); } int yaffs_EraseBlockInNAND(struct yaffs_DeviceStruct *dev, int blockInNAND) { int result; blockInNAND -= dev->blockOffset; dev->nBlockErasures++; result = dev->eraseBlockInNAND(dev, blockInNAND); return result; } int yaffs_InitialiseNAND(struct yaffs_DeviceStruct *dev) { return dev->initialiseNAND(dev); }
gpl-2.0
andreasgal/GT9100-kernel
drivers/isdn/mISDN/l1oip_core.c
890
39990
/* * l1oip.c low level driver for tunneling layer 1 over IP * * NOTE: It is not compatible with TDMoIP nor "ISDN over IP". * * Author Andreas Eversberg (jolly@eversberg.eu) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ /* module parameters: * type: Value 1 = BRI Value 2 = PRI Value 3 = BRI (multi channel frame, not supported yet) Value 4 = PRI (multi channel frame, not supported yet) A multi channel frame reduces overhead to a single frame for all b-channels, but increases delay. (NOTE: Multi channel frames are not implemented yet.) * codec: Value 0 = transparent (default) Value 1 = transfer ALAW Value 2 = transfer ULAW Value 3 = transfer generic 4 bit compression. * ulaw: 0 = we use a-Law (default) 1 = we use u-Law * limit: limitation of B-channels to control bandwidth (1...126) BRI: 1 or 2 PRI: 1-30, 31-126 (126, because dchannel ist not counted here) Also limited ressources are used for stack, resulting in less channels. It is possible to have more channels than 30 in PRI mode, this must be supported by the application. * ip: byte representation of remote ip address (127.0.0.1 -> 127,0,0,1) If not given or four 0, no remote address is set. For multiple interfaces, concat ip addresses. (127,0,0,1,127,0,0,1) * port: port number (local interface) If not given or 0, port 931 is used for fist instance, 932 for next... For multiple interfaces, different ports must be given. * remoteport: port number (remote interface) If not given or 0, remote port equals local port For multiple interfaces on equal sites, different ports must be given. * ondemand: 0 = fixed (always transmit packets, even when remote side timed out) 1 = on demand (only transmit packets, when remote side is detected) the default is 0 NOTE: ID must also be set for on demand. * id: optional value to identify frames. This value must be equal on both peers and should be random. If omitted or 0, no ID is transmitted. * debug: NOTE: only one debug value must be given for all cards enable debugging (see l1oip.h for debug options) Special mISDN controls: op = MISDN_CTRL_SETPEER* p1 = bytes 0-3 : remote IP address in network order (left element first) p2 = bytes 1-2 : remote port in network order (high byte first) optional: p2 = bytes 3-4 : local port in network order (high byte first) op = MISDN_CTRL_UNSETPEER* * Use l1oipctrl for comfortable setting or removing ip address. (Layer 1 Over IP CTRL) L1oIP-Protocol -------------- Frame Header: 7 6 5 4 3 2 1 0 +---------------+ |Ver|T|I|Coding | +---------------+ | ID byte 3 * | +---------------+ | ID byte 2 * | +---------------+ | ID byte 1 * | +---------------+ | ID byte 0 * | +---------------+ |M| Channel | +---------------+ | Length * | +---------------+ | Time Base MSB | +---------------+ | Time Base LSB | +---------------+ | Data.... | ... | | +---------------+ |M| Channel | +---------------+ | Length * | +---------------+ | Time Base MSB | +---------------+ | Time Base LSB | +---------------+ | Data.... | ... * Only included in some cases. - Ver = Version If version is missmatch, the frame must be ignored. - T = Type of interface Must be 0 for S0 or 1 for E1. - I = Id present If bit is set, four ID bytes are included in frame. - ID = Connection ID Additional ID to prevent Denial of Service attacs. Also it prevents hijacking connections with dynamic IP. The ID should be random and must not be 0. - Coding = Type of codec Must be 0 for no transcoding. Also for D-channel and other HDLC frames. 1 and 2 are reserved for explicitly use of a-LAW or u-LAW codec. 3 is used for generic table compressor. - M = More channels to come. If this flag is 1, the following byte contains the length of the channel data. After the data block, the next channel will be defined. The flag for the last channel block (or if only one channel is transmitted), must be 0 and no length is given. - Channel = Channel number 0 reserved 1-3 channel data for S0 (3 is D-channel) 1-31 channel data for E1 (16 is D-channel) 32-127 channel data for extended E1 (16 is D-channel) - The length is used if the M-flag is 1. It is used to find the next channel inside frame. NOTE: A value of 0 equals 256 bytes of data. -> For larger data blocks, a single frame must be used. -> For larger streams, a single frame or multiple blocks with same channel ID must be used. - Time Base = Timestamp of first sample in frame The "Time Base" is used to rearange packets and to detect packet loss. The 16 bits are sent in network order (MSB first) and count 1/8000 th of a second. This causes a wrap arround each 8,192 seconds. There is no requirement for the initial "Time Base", but 0 should be used for the first packet. In case of HDLC data, this timestamp counts the packet or byte number. Two Timers: After initialisation, a timer of 15 seconds is started. Whenever a packet is transmitted, the timer is reset to 15 seconds again. If the timer expires, an empty packet is transmitted. This keep the connection alive. When a valid packet is received, a timer 65 seconds is started. The interface become ACTIVE. If the timer expires, the interface becomes INACTIVE. Dynamic IP handling: To allow dynamic IP, the ID must be non 0. In this case, any packet with the correct port number and ID will be accepted. If the remote side changes its IP the new IP is used for all transmitted packets until it changes again. On Demand: If the ondemand parameter is given, the remote IP is set to 0 on timeout. This will stop keepalive traffic to remote. If the remote is online again, traffic will continue to the remote address. This is usefull for road warriors. This feature only works with ID set, otherwhise it is highly unsecure. Socket and Thread ----------------- The complete socket opening and closing is done by a thread. When the thread opened a socket, the hc->socket descriptor is set. Whenever a packet shall be sent to the socket, the hc->socket must be checked wheter not NULL. To prevent change in socket descriptor, the hc->socket_lock must be used. To change the socket, a recall of l1oip_socket_open() will safely kill the socket process and create a new one. */ #define L1OIP_VERSION 0 /* 0...3 */ #include <linux/module.h> #include <linux/delay.h> #include <linux/mISDNif.h> #include <linux/mISDNhw.h> #include <linux/mISDNdsp.h> #include <linux/init.h> #include <linux/in.h> #include <linux/inet.h> #include <linux/workqueue.h> #include <linux/kthread.h> #include <linux/slab.h> #include <net/sock.h> #include "core.h" #include "l1oip.h" static const char *l1oip_revision = "2.00"; static int l1oip_cnt; static spinlock_t l1oip_lock; static struct list_head l1oip_ilist; #define MAX_CARDS 16 static u_int type[MAX_CARDS]; static u_int codec[MAX_CARDS]; static u_int ip[MAX_CARDS*4]; static u_int port[MAX_CARDS]; static u_int remoteport[MAX_CARDS]; static u_int ondemand[MAX_CARDS]; static u_int limit[MAX_CARDS]; static u_int id[MAX_CARDS]; static int debug; static int ulaw; MODULE_AUTHOR("Andreas Eversberg"); MODULE_LICENSE("GPL"); module_param_array(type, uint, NULL, S_IRUGO | S_IWUSR); module_param_array(codec, uint, NULL, S_IRUGO | S_IWUSR); module_param_array(ip, uint, NULL, S_IRUGO | S_IWUSR); module_param_array(port, uint, NULL, S_IRUGO | S_IWUSR); module_param_array(remoteport, uint, NULL, S_IRUGO | S_IWUSR); module_param_array(ondemand, uint, NULL, S_IRUGO | S_IWUSR); module_param_array(limit, uint, NULL, S_IRUGO | S_IWUSR); module_param_array(id, uint, NULL, S_IRUGO | S_IWUSR); module_param(ulaw, uint, S_IRUGO | S_IWUSR); module_param(debug, uint, S_IRUGO | S_IWUSR); /* * send a frame via socket, if open and restart timer */ static int l1oip_socket_send(struct l1oip *hc, u8 localcodec, u8 channel, u32 chanmask, u16 timebase, u8 *buf, int len) { u8 *p; int multi = 0; u8 frame[len+32]; struct socket *socket = NULL; if (debug & DEBUG_L1OIP_MSG) printk(KERN_DEBUG "%s: sending data to socket (len = %d)\n", __func__, len); p = frame; /* restart timer */ if ((int)(hc->keep_tl.expires-jiffies) < 5*HZ) { del_timer(&hc->keep_tl); hc->keep_tl.expires = jiffies + L1OIP_KEEPALIVE*HZ; add_timer(&hc->keep_tl); } else hc->keep_tl.expires = jiffies + L1OIP_KEEPALIVE*HZ; if (debug & DEBUG_L1OIP_MSG) printk(KERN_DEBUG "%s: resetting timer\n", __func__); /* drop if we have no remote ip or port */ if (!hc->sin_remote.sin_addr.s_addr || !hc->sin_remote.sin_port) { if (debug & DEBUG_L1OIP_MSG) printk(KERN_DEBUG "%s: dropping frame, because remote " "IP is not set.\n", __func__); return len; } /* assemble frame */ *p++ = (L1OIP_VERSION<<6) /* version and coding */ | (hc->pri ? 0x20 : 0x00) /* type */ | (hc->id ? 0x10 : 0x00) /* id */ | localcodec; if (hc->id) { *p++ = hc->id>>24; /* id */ *p++ = hc->id>>16; *p++ = hc->id>>8; *p++ = hc->id; } *p++ = (multi == 1) ? 0x80 : 0x00 + channel; /* m-flag, channel */ if (multi == 1) *p++ = len; /* length */ *p++ = timebase>>8; /* time base */ *p++ = timebase; if (buf && len) { /* add data to frame */ if (localcodec == 1 && ulaw) l1oip_ulaw_to_alaw(buf, len, p); else if (localcodec == 2 && !ulaw) l1oip_alaw_to_ulaw(buf, len, p); else if (localcodec == 3) len = l1oip_law_to_4bit(buf, len, p, &hc->chan[channel].codecstate); else memcpy(p, buf, len); } len += p - frame; /* check for socket in safe condition */ spin_lock(&hc->socket_lock); if (!hc->socket) { spin_unlock(&hc->socket_lock); return 0; } /* seize socket */ socket = hc->socket; hc->socket = NULL; spin_unlock(&hc->socket_lock); /* send packet */ if (debug & DEBUG_L1OIP_MSG) printk(KERN_DEBUG "%s: sending packet to socket (len " "= %d)\n", __func__, len); hc->sendiov.iov_base = frame; hc->sendiov.iov_len = len; len = kernel_sendmsg(socket, &hc->sendmsg, &hc->sendiov, 1, len); /* give socket back */ hc->socket = socket; /* no locking required */ return len; } /* * receive channel data from socket */ static void l1oip_socket_recv(struct l1oip *hc, u8 remotecodec, u8 channel, u16 timebase, u8 *buf, int len) { struct sk_buff *nskb; struct bchannel *bch; struct dchannel *dch; u8 *p; u32 rx_counter; if (len == 0) { if (debug & DEBUG_L1OIP_MSG) printk(KERN_DEBUG "%s: received empty keepalive data, " "ignoring\n", __func__); return; } if (debug & DEBUG_L1OIP_MSG) printk(KERN_DEBUG "%s: received data, sending to mISDN (%d)\n", __func__, len); if (channel < 1 || channel > 127) { printk(KERN_WARNING "%s: packet error - channel %d out of " "range\n", __func__, channel); return; } dch = hc->chan[channel].dch; bch = hc->chan[channel].bch; if (!dch && !bch) { printk(KERN_WARNING "%s: packet error - channel %d not in " "stack\n", __func__, channel); return; } /* prepare message */ nskb = mI_alloc_skb((remotecodec == 3) ? (len<<1) : len, GFP_ATOMIC); if (!nskb) { printk(KERN_ERR "%s: No mem for skb.\n", __func__); return; } p = skb_put(nskb, (remotecodec == 3) ? (len<<1) : len); if (remotecodec == 1 && ulaw) l1oip_alaw_to_ulaw(buf, len, p); else if (remotecodec == 2 && !ulaw) l1oip_ulaw_to_alaw(buf, len, p); else if (remotecodec == 3) len = l1oip_4bit_to_law(buf, len, p); else memcpy(p, buf, len); /* send message up */ if (dch && len >= 2) { dch->rx_skb = nskb; recv_Dchannel(dch); } if (bch) { /* expand 16 bit sequence number to 32 bit sequence number */ rx_counter = hc->chan[channel].rx_counter; if (((s16)(timebase - rx_counter)) >= 0) { /* time has changed forward */ if (timebase >= (rx_counter & 0xffff)) rx_counter = (rx_counter & 0xffff0000) | timebase; else rx_counter = ((rx_counter & 0xffff0000)+0x10000) | timebase; } else { /* time has changed backwards */ if (timebase < (rx_counter & 0xffff)) rx_counter = (rx_counter & 0xffff0000) | timebase; else rx_counter = ((rx_counter & 0xffff0000)-0x10000) | timebase; } hc->chan[channel].rx_counter = rx_counter; #ifdef REORDER_DEBUG if (hc->chan[channel].disorder_flag) { struct sk_buff *skb; int cnt; skb = hc->chan[channel].disorder_skb; hc->chan[channel].disorder_skb = nskb; nskb = skb; cnt = hc->chan[channel].disorder_cnt; hc->chan[channel].disorder_cnt = rx_counter; rx_counter = cnt; } hc->chan[channel].disorder_flag ^= 1; if (nskb) #endif queue_ch_frame(&bch->ch, PH_DATA_IND, rx_counter, nskb); } } /* * parse frame and extract channel data */ static void l1oip_socket_parse(struct l1oip *hc, struct sockaddr_in *sin, u8 *buf, int len) { u32 packet_id; u8 channel; u8 remotecodec; u16 timebase; int m, mlen; int len_start = len; /* initial frame length */ struct dchannel *dch = hc->chan[hc->d_idx].dch; if (debug & DEBUG_L1OIP_MSG) printk(KERN_DEBUG "%s: received frame, parsing... (%d)\n", __func__, len); /* check length */ if (len < 1+1+2) { printk(KERN_WARNING "%s: packet error - length %d below " "4 bytes\n", __func__, len); return; } /* check version */ if (((*buf)>>6) != L1OIP_VERSION) { printk(KERN_WARNING "%s: packet error - unknown version %d\n", __func__, buf[0]>>6); return; } /* check type */ if (((*buf)&0x20) && !hc->pri) { printk(KERN_WARNING "%s: packet error - received E1 packet " "on S0 interface\n", __func__); return; } if (!((*buf)&0x20) && hc->pri) { printk(KERN_WARNING "%s: packet error - received S0 packet " "on E1 interface\n", __func__); return; } /* get id flag */ packet_id = (*buf>>4)&1; /* check coding */ remotecodec = (*buf) & 0x0f; if (remotecodec > 3) { printk(KERN_WARNING "%s: packet error - remotecodec %d " "unsupported\n", __func__, remotecodec); return; } buf++; len--; /* check packet_id */ if (packet_id) { if (!hc->id) { printk(KERN_WARNING "%s: packet error - packet has id " "0x%x, but we have not\n", __func__, packet_id); return; } if (len < 4) { printk(KERN_WARNING "%s: packet error - packet too " "short for ID value\n", __func__); return; } packet_id = (*buf++) << 24; packet_id += (*buf++) << 16; packet_id += (*buf++) << 8; packet_id += (*buf++); len -= 4; if (packet_id != hc->id) { printk(KERN_WARNING "%s: packet error - ID mismatch, " "got 0x%x, we 0x%x\n", __func__, packet_id, hc->id); return; } } else { if (hc->id) { printk(KERN_WARNING "%s: packet error - packet has no " "ID, but we have\n", __func__); return; } } multiframe: if (len < 1) { printk(KERN_WARNING "%s: packet error - packet too short, " "channel expected at position %d.\n", __func__, len-len_start+1); return; } /* get channel and multiframe flag */ channel = *buf&0x7f; m = *buf >> 7; buf++; len--; /* check length on multiframe */ if (m) { if (len < 1) { printk(KERN_WARNING "%s: packet error - packet too " "short, length expected at position %d.\n", __func__, len_start-len-1); return; } mlen = *buf++; len--; if (mlen == 0) mlen = 256; if (len < mlen+3) { printk(KERN_WARNING "%s: packet error - length %d at " "position %d exceeds total length %d.\n", __func__, mlen, len_start-len-1, len_start); return; } if (len == mlen+3) { printk(KERN_WARNING "%s: packet error - length %d at " "position %d will not allow additional " "packet.\n", __func__, mlen, len_start-len+1); return; } } else mlen = len-2; /* single frame, substract timebase */ if (len < 2) { printk(KERN_WARNING "%s: packet error - packet too short, time " "base expected at position %d.\n", __func__, len-len_start+1); return; } /* get time base */ timebase = (*buf++) << 8; timebase |= (*buf++); len -= 2; /* if inactive, we send up a PH_ACTIVATE and activate */ if (!test_bit(FLG_ACTIVE, &dch->Flags)) { if (debug & (DEBUG_L1OIP_MSG|DEBUG_L1OIP_SOCKET)) printk(KERN_DEBUG "%s: interface become active due to " "received packet\n", __func__); test_and_set_bit(FLG_ACTIVE, &dch->Flags); _queue_data(&dch->dev.D, PH_ACTIVATE_IND, MISDN_ID_ANY, 0, NULL, GFP_ATOMIC); } /* distribute packet */ l1oip_socket_recv(hc, remotecodec, channel, timebase, buf, mlen); buf += mlen; len -= mlen; /* multiframe */ if (m) goto multiframe; /* restart timer */ if ((int)(hc->timeout_tl.expires-jiffies) < 5*HZ || !hc->timeout_on) { hc->timeout_on = 1; del_timer(&hc->timeout_tl); hc->timeout_tl.expires = jiffies + L1OIP_TIMEOUT*HZ; add_timer(&hc->timeout_tl); } else /* only adjust timer */ hc->timeout_tl.expires = jiffies + L1OIP_TIMEOUT*HZ; /* if ip or source port changes */ if ((hc->sin_remote.sin_addr.s_addr != sin->sin_addr.s_addr) || (hc->sin_remote.sin_port != sin->sin_port)) { if (debug & DEBUG_L1OIP_SOCKET) printk(KERN_DEBUG "%s: remote address changes from " "0x%08x to 0x%08x (port %d to %d)\n", __func__, ntohl(hc->sin_remote.sin_addr.s_addr), ntohl(sin->sin_addr.s_addr), ntohs(hc->sin_remote.sin_port), ntohs(sin->sin_port)); hc->sin_remote.sin_addr.s_addr = sin->sin_addr.s_addr; hc->sin_remote.sin_port = sin->sin_port; } } /* * socket stuff */ static int l1oip_socket_thread(void *data) { struct l1oip *hc = (struct l1oip *)data; int ret = 0; struct msghdr msg; struct sockaddr_in sin_rx; unsigned char *recvbuf; size_t recvbuf_size = 1500; int recvlen; struct socket *socket = NULL; DECLARE_COMPLETION_ONSTACK(wait); /* allocate buffer memory */ recvbuf = kmalloc(recvbuf_size, GFP_KERNEL); if (!recvbuf) { printk(KERN_ERR "%s: Failed to alloc recvbuf.\n", __func__); ret = -ENOMEM; goto fail; } /* make daemon */ allow_signal(SIGTERM); /* create socket */ if (sock_create(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &socket)) { printk(KERN_ERR "%s: Failed to create socket.\n", __func__); ret = -EIO; goto fail; } /* set incoming address */ hc->sin_local.sin_family = AF_INET; hc->sin_local.sin_addr.s_addr = INADDR_ANY; hc->sin_local.sin_port = htons((unsigned short)hc->localport); /* set outgoing address */ hc->sin_remote.sin_family = AF_INET; hc->sin_remote.sin_addr.s_addr = htonl(hc->remoteip); hc->sin_remote.sin_port = htons((unsigned short)hc->remoteport); /* bind to incomming port */ if (socket->ops->bind(socket, (struct sockaddr *)&hc->sin_local, sizeof(hc->sin_local))) { printk(KERN_ERR "%s: Failed to bind socket to port %d.\n", __func__, hc->localport); ret = -EINVAL; goto fail; } /* check sk */ if (socket->sk == NULL) { printk(KERN_ERR "%s: socket->sk == NULL\n", __func__); ret = -EIO; goto fail; } /* build receive message */ msg.msg_name = &sin_rx; msg.msg_namelen = sizeof(sin_rx); msg.msg_control = NULL; msg.msg_controllen = 0; /* build send message */ hc->sendmsg.msg_name = &hc->sin_remote; hc->sendmsg.msg_namelen = sizeof(hc->sin_remote); hc->sendmsg.msg_control = NULL; hc->sendmsg.msg_controllen = 0; /* give away socket */ spin_lock(&hc->socket_lock); hc->socket = socket; spin_unlock(&hc->socket_lock); /* read loop */ if (debug & DEBUG_L1OIP_SOCKET) printk(KERN_DEBUG "%s: socket created and open\n", __func__); while (!signal_pending(current)) { struct kvec iov = { .iov_base = recvbuf, .iov_len = recvbuf_size, }; recvlen = kernel_recvmsg(socket, &msg, &iov, 1, recvbuf_size, 0); if (recvlen > 0) { l1oip_socket_parse(hc, &sin_rx, recvbuf, recvlen); } else { if (debug & DEBUG_L1OIP_SOCKET) printk(KERN_WARNING "%s: broken pipe on socket\n", __func__); } } /* get socket back, check first if in use, maybe by send function */ spin_lock(&hc->socket_lock); /* if hc->socket is NULL, it is in use until it is given back */ while (!hc->socket) { spin_unlock(&hc->socket_lock); schedule_timeout(HZ/10); spin_lock(&hc->socket_lock); } hc->socket = NULL; spin_unlock(&hc->socket_lock); if (debug & DEBUG_L1OIP_SOCKET) printk(KERN_DEBUG "%s: socket thread terminating\n", __func__); fail: /* free recvbuf */ kfree(recvbuf); /* close socket */ if (socket) sock_release(socket); /* if we got killed, signal completion */ complete(&hc->socket_complete); hc->socket_thread = NULL; /* show termination of thread */ if (debug & DEBUG_L1OIP_SOCKET) printk(KERN_DEBUG "%s: socket thread terminated\n", __func__); return ret; } static void l1oip_socket_close(struct l1oip *hc) { struct dchannel *dch = hc->chan[hc->d_idx].dch; /* kill thread */ if (hc->socket_thread) { if (debug & DEBUG_L1OIP_SOCKET) printk(KERN_DEBUG "%s: socket thread exists, " "killing...\n", __func__); send_sig(SIGTERM, hc->socket_thread, 0); wait_for_completion(&hc->socket_complete); } /* if active, we send up a PH_DEACTIVATE and deactivate */ if (test_bit(FLG_ACTIVE, &dch->Flags)) { if (debug & (DEBUG_L1OIP_MSG|DEBUG_L1OIP_SOCKET)) printk(KERN_DEBUG "%s: interface become deactivated " "due to timeout\n", __func__); test_and_clear_bit(FLG_ACTIVE, &dch->Flags); _queue_data(&dch->dev.D, PH_DEACTIVATE_IND, MISDN_ID_ANY, 0, NULL, GFP_ATOMIC); } } static int l1oip_socket_open(struct l1oip *hc) { /* in case of reopen, we need to close first */ l1oip_socket_close(hc); init_completion(&hc->socket_complete); /* create receive process */ hc->socket_thread = kthread_run(l1oip_socket_thread, hc, "l1oip_%s", hc->name); if (IS_ERR(hc->socket_thread)) { int err = PTR_ERR(hc->socket_thread); printk(KERN_ERR "%s: Failed (%d) to create socket process.\n", __func__, err); hc->socket_thread = NULL; sock_release(hc->socket); return err; } if (debug & DEBUG_L1OIP_SOCKET) printk(KERN_DEBUG "%s: socket thread created\n", __func__); return 0; } static void l1oip_send_bh(struct work_struct *work) { struct l1oip *hc = container_of(work, struct l1oip, workq); if (debug & (DEBUG_L1OIP_MSG|DEBUG_L1OIP_SOCKET)) printk(KERN_DEBUG "%s: keepalive timer expired, sending empty " "frame on dchannel\n", __func__); /* send an empty l1oip frame at D-channel */ l1oip_socket_send(hc, 0, hc->d_idx, 0, 0, NULL, 0); } /* * timer stuff */ static void l1oip_keepalive(void *data) { struct l1oip *hc = (struct l1oip *)data; schedule_work(&hc->workq); } static void l1oip_timeout(void *data) { struct l1oip *hc = (struct l1oip *)data; struct dchannel *dch = hc->chan[hc->d_idx].dch; if (debug & DEBUG_L1OIP_MSG) printk(KERN_DEBUG "%s: timeout timer expired, turn layer one " "down.\n", __func__); hc->timeout_on = 0; /* state that timer must be initialized next time */ /* if timeout, we send up a PH_DEACTIVATE and deactivate */ if (test_bit(FLG_ACTIVE, &dch->Flags)) { if (debug & (DEBUG_L1OIP_MSG|DEBUG_L1OIP_SOCKET)) printk(KERN_DEBUG "%s: interface become deactivated " "due to timeout\n", __func__); test_and_clear_bit(FLG_ACTIVE, &dch->Flags); _queue_data(&dch->dev.D, PH_DEACTIVATE_IND, MISDN_ID_ANY, 0, NULL, GFP_ATOMIC); } /* if we have ondemand set, we remove ip address */ if (hc->ondemand) { if (debug & DEBUG_L1OIP_MSG) printk(KERN_DEBUG "%s: on demand causes ip address to " "be removed\n", __func__); hc->sin_remote.sin_addr.s_addr = 0; } } /* * message handling */ static int handle_dmsg(struct mISDNchannel *ch, struct sk_buff *skb) { struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D); struct dchannel *dch = container_of(dev, struct dchannel, dev); struct l1oip *hc = dch->hw; struct mISDNhead *hh = mISDN_HEAD_P(skb); int ret = -EINVAL; int l, ll; unsigned char *p; switch (hh->prim) { case PH_DATA_REQ: if (skb->len < 1) { printk(KERN_WARNING "%s: skb too small\n", __func__); break; } if (skb->len > MAX_DFRAME_LEN_L1 || skb->len > L1OIP_MAX_LEN) { printk(KERN_WARNING "%s: skb too large\n", __func__); break; } /* send frame */ p = skb->data; l = skb->len; while (l) { ll = (l < L1OIP_MAX_PERFRAME) ? l : L1OIP_MAX_PERFRAME; l1oip_socket_send(hc, 0, dch->slot, 0, hc->chan[dch->slot].tx_counter++, p, ll); p += ll; l -= ll; } skb_trim(skb, 0); queue_ch_frame(ch, PH_DATA_CNF, hh->id, skb); return 0; case PH_ACTIVATE_REQ: if (debug & (DEBUG_L1OIP_MSG|DEBUG_L1OIP_SOCKET)) printk(KERN_DEBUG "%s: PH_ACTIVATE channel %d (1..%d)\n" , __func__, dch->slot, hc->b_num+1); skb_trim(skb, 0); if (test_bit(FLG_ACTIVE, &dch->Flags)) queue_ch_frame(ch, PH_ACTIVATE_IND, hh->id, skb); else queue_ch_frame(ch, PH_DEACTIVATE_IND, hh->id, skb); return 0; case PH_DEACTIVATE_REQ: if (debug & (DEBUG_L1OIP_MSG|DEBUG_L1OIP_SOCKET)) printk(KERN_DEBUG "%s: PH_DEACTIVATE channel %d " "(1..%d)\n", __func__, dch->slot, hc->b_num+1); skb_trim(skb, 0); if (test_bit(FLG_ACTIVE, &dch->Flags)) queue_ch_frame(ch, PH_ACTIVATE_IND, hh->id, skb); else queue_ch_frame(ch, PH_DEACTIVATE_IND, hh->id, skb); return 0; } if (!ret) dev_kfree_skb(skb); return ret; } static int channel_dctrl(struct dchannel *dch, struct mISDN_ctrl_req *cq) { int ret = 0; struct l1oip *hc = dch->hw; switch (cq->op) { case MISDN_CTRL_GETOP: cq->op = MISDN_CTRL_SETPEER | MISDN_CTRL_UNSETPEER | MISDN_CTRL_GETPEER; break; case MISDN_CTRL_SETPEER: hc->remoteip = (u32)cq->p1; hc->remoteport = cq->p2 & 0xffff; hc->localport = cq->p2 >> 16; if (!hc->remoteport) hc->remoteport = hc->localport; if (debug & DEBUG_L1OIP_SOCKET) printk(KERN_DEBUG "%s: got new ip address from user " "space.\n", __func__); l1oip_socket_open(hc); break; case MISDN_CTRL_UNSETPEER: if (debug & DEBUG_L1OIP_SOCKET) printk(KERN_DEBUG "%s: removing ip address.\n", __func__); hc->remoteip = 0; l1oip_socket_open(hc); break; case MISDN_CTRL_GETPEER: if (debug & DEBUG_L1OIP_SOCKET) printk(KERN_DEBUG "%s: getting ip address.\n", __func__); cq->p1 = hc->remoteip; cq->p2 = hc->remoteport | (hc->localport << 16); break; default: printk(KERN_WARNING "%s: unknown Op %x\n", __func__, cq->op); ret = -EINVAL; break; } return ret; } static int open_dchannel(struct l1oip *hc, struct dchannel *dch, struct channel_req *rq) { if (debug & DEBUG_HW_OPEN) printk(KERN_DEBUG "%s: dev(%d) open from %p\n", __func__, dch->dev.id, __builtin_return_address(0)); if (rq->protocol == ISDN_P_NONE) return -EINVAL; if ((dch->dev.D.protocol != ISDN_P_NONE) && (dch->dev.D.protocol != rq->protocol)) { if (debug & DEBUG_HW_OPEN) printk(KERN_WARNING "%s: change protocol %x to %x\n", __func__, dch->dev.D.protocol, rq->protocol); } if (dch->dev.D.protocol != rq->protocol) dch->dev.D.protocol = rq->protocol; if (test_bit(FLG_ACTIVE, &dch->Flags)) { _queue_data(&dch->dev.D, PH_ACTIVATE_IND, MISDN_ID_ANY, 0, NULL, GFP_KERNEL); } rq->ch = &dch->dev.D; if (!try_module_get(THIS_MODULE)) printk(KERN_WARNING "%s:cannot get module\n", __func__); return 0; } static int open_bchannel(struct l1oip *hc, struct dchannel *dch, struct channel_req *rq) { struct bchannel *bch; int ch; if (!test_channelmap(rq->adr.channel, dch->dev.channelmap)) return -EINVAL; if (rq->protocol == ISDN_P_NONE) return -EINVAL; ch = rq->adr.channel; /* BRI: 1=B1 2=B2 PRI: 1..15,17.. */ bch = hc->chan[ch].bch; if (!bch) { printk(KERN_ERR "%s:internal error ch %d has no bch\n", __func__, ch); return -EINVAL; } if (test_and_set_bit(FLG_OPEN, &bch->Flags)) return -EBUSY; /* b-channel can be only open once */ bch->ch.protocol = rq->protocol; rq->ch = &bch->ch; if (!try_module_get(THIS_MODULE)) printk(KERN_WARNING "%s:cannot get module\n", __func__); return 0; } static int l1oip_dctrl(struct mISDNchannel *ch, u_int cmd, void *arg) { struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D); struct dchannel *dch = container_of(dev, struct dchannel, dev); struct l1oip *hc = dch->hw; struct channel_req *rq; int err = 0; if (dch->debug & DEBUG_HW) printk(KERN_DEBUG "%s: cmd:%x %p\n", __func__, cmd, arg); switch (cmd) { case OPEN_CHANNEL: rq = arg; switch (rq->protocol) { case ISDN_P_TE_S0: case ISDN_P_NT_S0: if (hc->pri) { err = -EINVAL; break; } err = open_dchannel(hc, dch, rq); break; case ISDN_P_TE_E1: case ISDN_P_NT_E1: if (!hc->pri) { err = -EINVAL; break; } err = open_dchannel(hc, dch, rq); break; default: err = open_bchannel(hc, dch, rq); } break; case CLOSE_CHANNEL: if (debug & DEBUG_HW_OPEN) printk(KERN_DEBUG "%s: dev(%d) close from %p\n", __func__, dch->dev.id, __builtin_return_address(0)); module_put(THIS_MODULE); break; case CONTROL_CHANNEL: err = channel_dctrl(dch, arg); break; default: if (dch->debug & DEBUG_HW) printk(KERN_DEBUG "%s: unknown command %x\n", __func__, cmd); err = -EINVAL; } return err; } static int handle_bmsg(struct mISDNchannel *ch, struct sk_buff *skb) { struct bchannel *bch = container_of(ch, struct bchannel, ch); struct l1oip *hc = bch->hw; int ret = -EINVAL; struct mISDNhead *hh = mISDN_HEAD_P(skb); int l, ll, i; unsigned char *p; switch (hh->prim) { case PH_DATA_REQ: if (skb->len <= 0) { printk(KERN_WARNING "%s: skb too small\n", __func__); break; } if (skb->len > MAX_DFRAME_LEN_L1 || skb->len > L1OIP_MAX_LEN) { printk(KERN_WARNING "%s: skb too large\n", __func__); break; } /* check for AIS / ulaw-silence */ p = skb->data; l = skb->len; for (i = 0; i < l; i++) { if (*p++ != 0xff) break; } if (i == l) { if (debug & DEBUG_L1OIP_MSG) printk(KERN_DEBUG "%s: got AIS, not sending, " "but counting\n", __func__); hc->chan[bch->slot].tx_counter += l; skb_trim(skb, 0); queue_ch_frame(ch, PH_DATA_CNF, hh->id, skb); return 0; } /* check for silence */ p = skb->data; l = skb->len; for (i = 0; i < l; i++) { if (*p++ != 0x2a) break; } if (i == l) { if (debug & DEBUG_L1OIP_MSG) printk(KERN_DEBUG "%s: got silence, not sending" ", but counting\n", __func__); hc->chan[bch->slot].tx_counter += l; skb_trim(skb, 0); queue_ch_frame(ch, PH_DATA_CNF, hh->id, skb); return 0; } /* send frame */ p = skb->data; l = skb->len; while (l) { ll = (l < L1OIP_MAX_PERFRAME) ? l : L1OIP_MAX_PERFRAME; l1oip_socket_send(hc, hc->codec, bch->slot, 0, hc->chan[bch->slot].tx_counter, p, ll); hc->chan[bch->slot].tx_counter += ll; p += ll; l -= ll; } skb_trim(skb, 0); queue_ch_frame(ch, PH_DATA_CNF, hh->id, skb); return 0; case PH_ACTIVATE_REQ: if (debug & (DEBUG_L1OIP_MSG|DEBUG_L1OIP_SOCKET)) printk(KERN_DEBUG "%s: PH_ACTIVATE channel %d (1..%d)\n" , __func__, bch->slot, hc->b_num+1); hc->chan[bch->slot].codecstate = 0; test_and_set_bit(FLG_ACTIVE, &bch->Flags); skb_trim(skb, 0); queue_ch_frame(ch, PH_ACTIVATE_IND, hh->id, skb); return 0; case PH_DEACTIVATE_REQ: if (debug & (DEBUG_L1OIP_MSG|DEBUG_L1OIP_SOCKET)) printk(KERN_DEBUG "%s: PH_DEACTIVATE channel %d " "(1..%d)\n", __func__, bch->slot, hc->b_num+1); test_and_clear_bit(FLG_ACTIVE, &bch->Flags); skb_trim(skb, 0); queue_ch_frame(ch, PH_DEACTIVATE_IND, hh->id, skb); return 0; } if (!ret) dev_kfree_skb(skb); return ret; } static int channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq) { int ret = 0; struct dsp_features *features = (struct dsp_features *)(*((u_long *)&cq->p1)); switch (cq->op) { case MISDN_CTRL_GETOP: cq->op = MISDN_CTRL_HW_FEATURES_OP; break; case MISDN_CTRL_HW_FEATURES: /* fill features structure */ if (debug & DEBUG_L1OIP_MSG) printk(KERN_DEBUG "%s: HW_FEATURE request\n", __func__); /* create confirm */ features->unclocked = 1; features->unordered = 1; break; default: printk(KERN_WARNING "%s: unknown Op %x\n", __func__, cq->op); ret = -EINVAL; break; } return ret; } static int l1oip_bctrl(struct mISDNchannel *ch, u_int cmd, void *arg) { struct bchannel *bch = container_of(ch, struct bchannel, ch); int err = -EINVAL; if (bch->debug & DEBUG_HW) printk(KERN_DEBUG "%s: cmd:%x %p\n", __func__, cmd, arg); switch (cmd) { case CLOSE_CHANNEL: test_and_clear_bit(FLG_OPEN, &bch->Flags); test_and_clear_bit(FLG_ACTIVE, &bch->Flags); ch->protocol = ISDN_P_NONE; ch->peer = NULL; module_put(THIS_MODULE); err = 0; break; case CONTROL_CHANNEL: err = channel_bctrl(bch, arg); break; default: printk(KERN_WARNING "%s: unknown prim(%x)\n", __func__, cmd); } return err; } /* * cleanup module and stack */ static void release_card(struct l1oip *hc) { int ch; if (timer_pending(&hc->keep_tl)) del_timer(&hc->keep_tl); if (timer_pending(&hc->timeout_tl)) del_timer(&hc->timeout_tl); if (hc->socket_thread) l1oip_socket_close(hc); if (hc->registered && hc->chan[hc->d_idx].dch) mISDN_unregister_device(&hc->chan[hc->d_idx].dch->dev); for (ch = 0; ch < 128; ch++) { if (hc->chan[ch].dch) { mISDN_freedchannel(hc->chan[ch].dch); kfree(hc->chan[ch].dch); } if (hc->chan[ch].bch) { mISDN_freebchannel(hc->chan[ch].bch); kfree(hc->chan[ch].bch); #ifdef REORDER_DEBUG if (hc->chan[ch].disorder_skb) dev_kfree_skb(hc->chan[ch].disorder_skb); #endif } } spin_lock(&l1oip_lock); list_del(&hc->list); spin_unlock(&l1oip_lock); kfree(hc); } static void l1oip_cleanup(void) { struct l1oip *hc, *next; list_for_each_entry_safe(hc, next, &l1oip_ilist, list) release_card(hc); l1oip_4bit_free(); } /* * module and stack init */ static int init_card(struct l1oip *hc, int pri, int bundle) { struct dchannel *dch; struct bchannel *bch; int ret; int i, ch; spin_lock_init(&hc->socket_lock); hc->idx = l1oip_cnt; hc->pri = pri; hc->d_idx = pri ? 16 : 3; hc->b_num = pri ? 30 : 2; hc->bundle = bundle; if (hc->pri) sprintf(hc->name, "l1oip-e1.%d", l1oip_cnt + 1); else sprintf(hc->name, "l1oip-s0.%d", l1oip_cnt + 1); switch (codec[l1oip_cnt]) { case 0: /* as is */ case 1: /* alaw */ case 2: /* ulaw */ case 3: /* 4bit */ break; default: printk(KERN_ERR "Codec(%d) not supported.\n", codec[l1oip_cnt]); return -EINVAL; } hc->codec = codec[l1oip_cnt]; if (debug & DEBUG_L1OIP_INIT) printk(KERN_DEBUG "%s: using codec %d\n", __func__, hc->codec); if (id[l1oip_cnt] == 0) { printk(KERN_WARNING "Warning: No 'id' value given or " "0, this is highly unsecure. Please use 32 " "bit randmom number 0x...\n"); } hc->id = id[l1oip_cnt]; if (debug & DEBUG_L1OIP_INIT) printk(KERN_DEBUG "%s: using id 0x%x\n", __func__, hc->id); hc->ondemand = ondemand[l1oip_cnt]; if (hc->ondemand && !hc->id) { printk(KERN_ERR "%s: ondemand option only allowed in " "conjunction with non 0 ID\n", __func__); return -EINVAL; } if (limit[l1oip_cnt]) hc->b_num = limit[l1oip_cnt]; if (!pri && hc->b_num > 2) { printk(KERN_ERR "Maximum limit for BRI interface is 2 " "channels.\n"); return -EINVAL; } if (pri && hc->b_num > 126) { printk(KERN_ERR "Maximum limit for PRI interface is 126 " "channels.\n"); return -EINVAL; } if (pri && hc->b_num > 30) { printk(KERN_WARNING "Maximum limit for BRI interface is 30 " "channels.\n"); printk(KERN_WARNING "Your selection of %d channels must be " "supported by application.\n", hc->limit); } hc->remoteip = ip[l1oip_cnt<<2] << 24 | ip[(l1oip_cnt<<2)+1] << 16 | ip[(l1oip_cnt<<2)+2] << 8 | ip[(l1oip_cnt<<2)+3]; hc->localport = port[l1oip_cnt]?:(L1OIP_DEFAULTPORT+l1oip_cnt); if (remoteport[l1oip_cnt]) hc->remoteport = remoteport[l1oip_cnt]; else hc->remoteport = hc->localport; if (debug & DEBUG_L1OIP_INIT) printk(KERN_DEBUG "%s: using local port %d remote ip " "%d.%d.%d.%d port %d ondemand %d\n", __func__, hc->localport, hc->remoteip >> 24, (hc->remoteip >> 16) & 0xff, (hc->remoteip >> 8) & 0xff, hc->remoteip & 0xff, hc->remoteport, hc->ondemand); dch = kzalloc(sizeof(struct dchannel), GFP_KERNEL); if (!dch) return -ENOMEM; dch->debug = debug; mISDN_initdchannel(dch, MAX_DFRAME_LEN_L1, NULL); dch->hw = hc; if (pri) dch->dev.Dprotocols = (1 << ISDN_P_TE_E1) | (1 << ISDN_P_NT_E1); else dch->dev.Dprotocols = (1 << ISDN_P_TE_S0) | (1 << ISDN_P_NT_S0); dch->dev.Bprotocols = (1 << (ISDN_P_B_RAW & ISDN_P_B_MASK)) | (1 << (ISDN_P_B_HDLC & ISDN_P_B_MASK)); dch->dev.D.send = handle_dmsg; dch->dev.D.ctrl = l1oip_dctrl; dch->dev.nrbchan = hc->b_num; dch->slot = hc->d_idx; hc->chan[hc->d_idx].dch = dch; i = 1; for (ch = 0; ch < dch->dev.nrbchan; ch++) { if (ch == 15) i++; bch = kzalloc(sizeof(struct bchannel), GFP_KERNEL); if (!bch) { printk(KERN_ERR "%s: no memory for bchannel\n", __func__); return -ENOMEM; } bch->nr = i + ch; bch->slot = i + ch; bch->debug = debug; mISDN_initbchannel(bch, MAX_DATA_MEM); bch->hw = hc; bch->ch.send = handle_bmsg; bch->ch.ctrl = l1oip_bctrl; bch->ch.nr = i + ch; list_add(&bch->ch.list, &dch->dev.bchannels); hc->chan[i + ch].bch = bch; set_channelmap(bch->nr, dch->dev.channelmap); } /* TODO: create a parent device for this driver */ ret = mISDN_register_device(&dch->dev, NULL, hc->name); if (ret) return ret; hc->registered = 1; if (debug & DEBUG_L1OIP_INIT) printk(KERN_DEBUG "%s: Setting up network card(%d)\n", __func__, l1oip_cnt + 1); ret = l1oip_socket_open(hc); if (ret) return ret; hc->keep_tl.function = (void *)l1oip_keepalive; hc->keep_tl.data = (ulong)hc; init_timer(&hc->keep_tl); hc->keep_tl.expires = jiffies + 2*HZ; /* two seconds first time */ add_timer(&hc->keep_tl); hc->timeout_tl.function = (void *)l1oip_timeout; hc->timeout_tl.data = (ulong)hc; init_timer(&hc->timeout_tl); hc->timeout_on = 0; /* state that we have timer off */ return 0; } static int __init l1oip_init(void) { int pri, bundle; struct l1oip *hc; int ret; printk(KERN_INFO "mISDN: Layer-1-over-IP driver Rev. %s\n", l1oip_revision); INIT_LIST_HEAD(&l1oip_ilist); spin_lock_init(&l1oip_lock); if (l1oip_4bit_alloc(ulaw)) return -ENOMEM; l1oip_cnt = 0; while (l1oip_cnt < MAX_CARDS && type[l1oip_cnt]) { switch (type[l1oip_cnt] & 0xff) { case 1: pri = 0; bundle = 0; break; case 2: pri = 1; bundle = 0; break; case 3: pri = 0; bundle = 1; break; case 4: pri = 1; bundle = 1; break; default: printk(KERN_ERR "Card type(%d) not supported.\n", type[l1oip_cnt] & 0xff); l1oip_cleanup(); return -EINVAL; } if (debug & DEBUG_L1OIP_INIT) printk(KERN_DEBUG "%s: interface %d is %s with %s.\n", __func__, l1oip_cnt, pri ? "PRI" : "BRI", bundle ? "bundled IP packet for all B-channels" : "separate IP packets for every B-channel"); hc = kzalloc(sizeof(struct l1oip), GFP_ATOMIC); if (!hc) { printk(KERN_ERR "No kmem for L1-over-IP driver.\n"); l1oip_cleanup(); return -ENOMEM; } INIT_WORK(&hc->workq, (void *)l1oip_send_bh); spin_lock(&l1oip_lock); list_add_tail(&hc->list, &l1oip_ilist); spin_unlock(&l1oip_lock); ret = init_card(hc, pri, bundle); if (ret) { l1oip_cleanup(); return ret; } l1oip_cnt++; } printk(KERN_INFO "%d virtual devices registered\n", l1oip_cnt); return 0; } module_init(l1oip_init); module_exit(l1oip_cleanup);
gpl-2.0
ezterry/kernel-biff-testing
drivers/net/mlx4/sense.c
1146
4592
/* * Copyright (c) 2007 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/errno.h> #include <linux/if_ether.h> #include <linux/mlx4/cmd.h> #include "mlx4.h" static int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port, enum mlx4_port_type *type) { u64 out_param; int err = 0; err = mlx4_cmd_imm(dev, 0, &out_param, port, 0, MLX4_CMD_SENSE_PORT, MLX4_CMD_TIME_CLASS_B); if (err) { mlx4_err(dev, "Sense command failed for port: %d\n", port); return err; } if (out_param > 2) { mlx4_err(dev, "Sense returned illegal value: 0x%llx\n", out_param); return -EINVAL; } *type = out_param; return 0; } void mlx4_do_sense_ports(struct mlx4_dev *dev, enum mlx4_port_type *stype, enum mlx4_port_type *defaults) { struct mlx4_sense *sense = &mlx4_priv(dev)->sense; int err; int i; for (i = 1; i <= dev->caps.num_ports; i++) { stype[i - 1] = 0; if (sense->do_sense_port[i] && sense->sense_allowed[i] && dev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) { err = mlx4_SENSE_PORT(dev, i, &stype[i - 1]); if (err) stype[i - 1] = defaults[i - 1]; } else stype[i - 1] = defaults[i - 1]; } /* * Adjust port configuration: * If port 1 sensed nothing and port 2 is IB, set both as IB * If port 2 sensed nothing and port 1 is Eth, set both as Eth */ if (stype[0] == MLX4_PORT_TYPE_ETH) { for (i = 1; i < dev->caps.num_ports; i++) stype[i] = stype[i] ? stype[i] : MLX4_PORT_TYPE_ETH; } if (stype[dev->caps.num_ports - 1] == MLX4_PORT_TYPE_IB) { for (i = 0; i < dev->caps.num_ports - 1; i++) stype[i] = stype[i] ? stype[i] : MLX4_PORT_TYPE_IB; } /* * If sensed nothing, remain in current configuration. */ for (i = 0; i < dev->caps.num_ports; i++) stype[i] = stype[i] ? stype[i] : defaults[i]; } static void mlx4_sense_port(struct work_struct *work) { struct delayed_work *delay = to_delayed_work(work); struct mlx4_sense *sense = container_of(delay, struct mlx4_sense, sense_poll); struct mlx4_dev *dev = sense->dev; struct mlx4_priv *priv = mlx4_priv(dev); enum mlx4_port_type stype[MLX4_MAX_PORTS]; mutex_lock(&priv->port_mutex); mlx4_do_sense_ports(dev, stype, &dev->caps.port_type[1]); if (mlx4_check_port_params(dev, stype)) goto sense_again; if (mlx4_change_port_types(dev, stype)) mlx4_err(dev, "Failed to change port_types\n"); sense_again: mutex_unlock(&priv->port_mutex); queue_delayed_work(mlx4_wq , &sense->sense_poll, round_jiffies_relative(MLX4_SENSE_RANGE)); } void mlx4_start_sense(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_sense *sense = &priv->sense; if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) return; queue_delayed_work(mlx4_wq , &sense->sense_poll, round_jiffies_relative(MLX4_SENSE_RANGE)); } void mlx4_stop_sense(struct mlx4_dev *dev) { cancel_delayed_work_sync(&mlx4_priv(dev)->sense.sense_poll); } void mlx4_sense_init(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_sense *sense = &priv->sense; int port; sense->dev = dev; for (port = 1; port <= dev->caps.num_ports; port++) sense->do_sense_port[port] = 1; INIT_DELAYED_WORK_DEFERRABLE(&sense->sense_poll, mlx4_sense_port); }
gpl-2.0
shivdasgujare/linux-omap
fs/jbd2/recovery.c
1402
22400
/* * linux/fs/jbd2/recovery.c * * Written by Stephen C. Tweedie <sct@redhat.com>, 1999 * * Copyright 1999-2000 Red Hat Software --- All Rights Reserved * * This file is part of the Linux kernel and is made available under * the terms of the GNU General Public License, version 2, or at your * option, any later version, incorporated herein by reference. * * Journal recovery routines for the generic filesystem journaling code; * part of the ext2fs journaling system. */ #ifndef __KERNEL__ #include "jfs_user.h" #else #include <linux/time.h> #include <linux/fs.h> #include <linux/jbd2.h> #include <linux/errno.h> #include <linux/crc32.h> #include <linux/blkdev.h> #endif /* * Maintain information about the progress of the recovery job, so that * the different passes can carry information between them. */ struct recovery_info { tid_t start_transaction; tid_t end_transaction; int nr_replays; int nr_revokes; int nr_revoke_hits; }; enum passtype {PASS_SCAN, PASS_REVOKE, PASS_REPLAY}; static int do_one_pass(journal_t *journal, struct recovery_info *info, enum passtype pass); static int scan_revoke_records(journal_t *, struct buffer_head *, tid_t, struct recovery_info *); #ifdef __KERNEL__ /* Release readahead buffers after use */ static void journal_brelse_array(struct buffer_head *b[], int n) { while (--n >= 0) brelse (b[n]); } /* * When reading from the journal, we are going through the block device * layer directly and so there is no readahead being done for us. We * need to implement any readahead ourselves if we want it to happen at * all. Recovery is basically one long sequential read, so make sure we * do the IO in reasonably large chunks. * * This is not so critical that we need to be enormously clever about * the readahead size, though. 128K is a purely arbitrary, good-enough * fixed value. */ #define MAXBUF 8 static int do_readahead(journal_t *journal, unsigned int start) { int err; unsigned int max, nbufs, next; unsigned long long blocknr; struct buffer_head *bh; struct buffer_head * bufs[MAXBUF]; /* Do up to 128K of readahead */ max = start + (128 * 1024 / journal->j_blocksize); if (max > journal->j_maxlen) max = journal->j_maxlen; /* Do the readahead itself. We'll submit MAXBUF buffer_heads at * a time to the block device IO layer. */ nbufs = 0; for (next = start; next < max; next++) { err = jbd2_journal_bmap(journal, next, &blocknr); if (err) { printk(KERN_ERR "JBD2: bad block at offset %u\n", next); goto failed; } bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize); if (!bh) { err = -ENOMEM; goto failed; } if (!buffer_uptodate(bh) && !buffer_locked(bh)) { bufs[nbufs++] = bh; if (nbufs == MAXBUF) { ll_rw_block(READ, nbufs, bufs); journal_brelse_array(bufs, nbufs); nbufs = 0; } } else brelse(bh); } if (nbufs) ll_rw_block(READ, nbufs, bufs); err = 0; failed: if (nbufs) journal_brelse_array(bufs, nbufs); return err; } #endif /* __KERNEL__ */ /* * Read a block from the journal */ static int jread(struct buffer_head **bhp, journal_t *journal, unsigned int offset) { int err; unsigned long long blocknr; struct buffer_head *bh; *bhp = NULL; if (offset >= journal->j_maxlen) { printk(KERN_ERR "JBD2: corrupted journal superblock\n"); return -EIO; } err = jbd2_journal_bmap(journal, offset, &blocknr); if (err) { printk(KERN_ERR "JBD2: bad block at offset %u\n", offset); return err; } bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize); if (!bh) return -ENOMEM; if (!buffer_uptodate(bh)) { /* If this is a brand new buffer, start readahead. Otherwise, we assume we are already reading it. */ if (!buffer_req(bh)) do_readahead(journal, offset); wait_on_buffer(bh); } if (!buffer_uptodate(bh)) { printk(KERN_ERR "JBD2: Failed to read block at offset %u\n", offset); brelse(bh); return -EIO; } *bhp = bh; return 0; } static int jbd2_descr_block_csum_verify(journal_t *j, void *buf) { struct jbd2_journal_block_tail *tail; __u32 provided, calculated; if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2)) return 1; tail = (struct jbd2_journal_block_tail *)(buf + j->j_blocksize - sizeof(struct jbd2_journal_block_tail)); provided = tail->t_checksum; tail->t_checksum = 0; calculated = jbd2_chksum(j, j->j_csum_seed, buf, j->j_blocksize); tail->t_checksum = provided; provided = be32_to_cpu(provided); return provided == calculated; } /* * Count the number of in-use tags in a journal descriptor block. */ static int count_tags(journal_t *journal, struct buffer_head *bh) { char * tagp; journal_block_tag_t * tag; int nr = 0, size = journal->j_blocksize; int tag_bytes = journal_tag_bytes(journal); if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) size -= sizeof(struct jbd2_journal_block_tail); tagp = &bh->b_data[sizeof(journal_header_t)]; while ((tagp - bh->b_data + tag_bytes) <= size) { tag = (journal_block_tag_t *) tagp; nr++; tagp += tag_bytes; if (!(tag->t_flags & cpu_to_be16(JBD2_FLAG_SAME_UUID))) tagp += 16; if (tag->t_flags & cpu_to_be16(JBD2_FLAG_LAST_TAG)) break; } return nr; } /* Make sure we wrap around the log correctly! */ #define wrap(journal, var) \ do { \ if (var >= (journal)->j_last) \ var -= ((journal)->j_last - (journal)->j_first); \ } while (0) /** * jbd2_journal_recover - recovers a on-disk journal * @journal: the journal to recover * * The primary function for recovering the log contents when mounting a * journaled device. * * Recovery is done in three passes. In the first pass, we look for the * end of the log. In the second, we assemble the list of revoke * blocks. In the third and final pass, we replay any un-revoked blocks * in the log. */ int jbd2_journal_recover(journal_t *journal) { int err, err2; journal_superblock_t * sb; struct recovery_info info; memset(&info, 0, sizeof(info)); sb = journal->j_superblock; /* * The journal superblock's s_start field (the current log head) * is always zero if, and only if, the journal was cleanly * unmounted. */ if (!sb->s_start) { jbd_debug(1, "No recovery required, last transaction %d\n", be32_to_cpu(sb->s_sequence)); journal->j_transaction_sequence = be32_to_cpu(sb->s_sequence) + 1; return 0; } err = do_one_pass(journal, &info, PASS_SCAN); if (!err) err = do_one_pass(journal, &info, PASS_REVOKE); if (!err) err = do_one_pass(journal, &info, PASS_REPLAY); jbd_debug(1, "JBD2: recovery, exit status %d, " "recovered transactions %u to %u\n", err, info.start_transaction, info.end_transaction); jbd_debug(1, "JBD2: Replayed %d and revoked %d/%d blocks\n", info.nr_replays, info.nr_revoke_hits, info.nr_revokes); /* Restart the log at the next transaction ID, thus invalidating * any existing commit records in the log. */ journal->j_transaction_sequence = ++info.end_transaction; jbd2_journal_clear_revoke(journal); err2 = sync_blockdev(journal->j_fs_dev); if (!err) err = err2; /* Make sure all replayed data is on permanent storage */ if (journal->j_flags & JBD2_BARRIER) { err2 = blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL); if (!err) err = err2; } return err; } /** * jbd2_journal_skip_recovery - Start journal and wipe exiting records * @journal: journal to startup * * Locate any valid recovery information from the journal and set up the * journal structures in memory to ignore it (presumably because the * caller has evidence that it is out of date). * This function does'nt appear to be exorted.. * * We perform one pass over the journal to allow us to tell the user how * much recovery information is being erased, and to let us initialise * the journal transaction sequence numbers to the next unused ID. */ int jbd2_journal_skip_recovery(journal_t *journal) { int err; struct recovery_info info; memset (&info, 0, sizeof(info)); err = do_one_pass(journal, &info, PASS_SCAN); if (err) { printk(KERN_ERR "JBD2: error %d scanning journal\n", err); ++journal->j_transaction_sequence; } else { #ifdef CONFIG_JBD2_DEBUG int dropped = info.end_transaction - be32_to_cpu(journal->j_superblock->s_sequence); jbd_debug(1, "JBD2: ignoring %d transaction%s from the journal.\n", dropped, (dropped == 1) ? "" : "s"); #endif journal->j_transaction_sequence = ++info.end_transaction; } journal->j_tail = 0; return err; } static inline unsigned long long read_tag_block(int tag_bytes, journal_block_tag_t *tag) { unsigned long long block = be32_to_cpu(tag->t_blocknr); if (tag_bytes > JBD2_TAG_SIZE32) block |= (u64)be32_to_cpu(tag->t_blocknr_high) << 32; return block; } /* * calc_chksums calculates the checksums for the blocks described in the * descriptor block. */ static int calc_chksums(journal_t *journal, struct buffer_head *bh, unsigned long *next_log_block, __u32 *crc32_sum) { int i, num_blks, err; unsigned long io_block; struct buffer_head *obh; num_blks = count_tags(journal, bh); /* Calculate checksum of the descriptor block. */ *crc32_sum = crc32_be(*crc32_sum, (void *)bh->b_data, bh->b_size); for (i = 0; i < num_blks; i++) { io_block = (*next_log_block)++; wrap(journal, *next_log_block); err = jread(&obh, journal, io_block); if (err) { printk(KERN_ERR "JBD2: IO error %d recovering block " "%lu in log\n", err, io_block); return 1; } else { *crc32_sum = crc32_be(*crc32_sum, (void *)obh->b_data, obh->b_size); } put_bh(obh); } return 0; } static int jbd2_commit_block_csum_verify(journal_t *j, void *buf) { struct commit_header *h; __u32 provided, calculated; if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2)) return 1; h = buf; provided = h->h_chksum[0]; h->h_chksum[0] = 0; calculated = jbd2_chksum(j, j->j_csum_seed, buf, j->j_blocksize); h->h_chksum[0] = provided; provided = be32_to_cpu(provided); return provided == calculated; } static int jbd2_block_tag_csum_verify(journal_t *j, journal_block_tag_t *tag, void *buf, __u32 sequence) { __u32 provided, calculated; if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2)) return 1; sequence = cpu_to_be32(sequence); calculated = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&sequence, sizeof(sequence)); calculated = jbd2_chksum(j, calculated, buf, j->j_blocksize); provided = be32_to_cpu(tag->t_checksum); return provided == cpu_to_be32(calculated); } static int do_one_pass(journal_t *journal, struct recovery_info *info, enum passtype pass) { unsigned int first_commit_ID, next_commit_ID; unsigned long next_log_block; int err, success = 0; journal_superblock_t * sb; journal_header_t * tmp; struct buffer_head * bh; unsigned int sequence; int blocktype; int tag_bytes = journal_tag_bytes(journal); __u32 crc32_sum = ~0; /* Transactional Checksums */ int descr_csum_size = 0; /* * First thing is to establish what we expect to find in the log * (in terms of transaction IDs), and where (in terms of log * block offsets): query the superblock. */ sb = journal->j_superblock; next_commit_ID = be32_to_cpu(sb->s_sequence); next_log_block = be32_to_cpu(sb->s_start); first_commit_ID = next_commit_ID; if (pass == PASS_SCAN) info->start_transaction = first_commit_ID; jbd_debug(1, "Starting recovery pass %d\n", pass); /* * Now we walk through the log, transaction by transaction, * making sure that each transaction has a commit block in the * expected place. Each complete transaction gets replayed back * into the main filesystem. */ while (1) { int flags; char * tagp; journal_block_tag_t * tag; struct buffer_head * obh; struct buffer_head * nbh; cond_resched(); /* If we already know where to stop the log traversal, * check right now that we haven't gone past the end of * the log. */ if (pass != PASS_SCAN) if (tid_geq(next_commit_ID, info->end_transaction)) break; jbd_debug(2, "Scanning for sequence ID %u at %lu/%lu\n", next_commit_ID, next_log_block, journal->j_last); /* Skip over each chunk of the transaction looking * either the next descriptor block or the final commit * record. */ jbd_debug(3, "JBD2: checking block %ld\n", next_log_block); err = jread(&bh, journal, next_log_block); if (err) goto failed; next_log_block++; wrap(journal, next_log_block); /* What kind of buffer is it? * * If it is a descriptor block, check that it has the * expected sequence number. Otherwise, we're all done * here. */ tmp = (journal_header_t *)bh->b_data; if (tmp->h_magic != cpu_to_be32(JBD2_MAGIC_NUMBER)) { brelse(bh); break; } blocktype = be32_to_cpu(tmp->h_blocktype); sequence = be32_to_cpu(tmp->h_sequence); jbd_debug(3, "Found magic %d, sequence %d\n", blocktype, sequence); if (sequence != next_commit_ID) { brelse(bh); break; } /* OK, we have a valid descriptor block which matches * all of the sequence number checks. What are we going * to do with it? That depends on the pass... */ switch(blocktype) { case JBD2_DESCRIPTOR_BLOCK: /* Verify checksum first */ if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) descr_csum_size = sizeof(struct jbd2_journal_block_tail); if (descr_csum_size > 0 && !jbd2_descr_block_csum_verify(journal, bh->b_data)) { err = -EIO; goto failed; } /* If it is a valid descriptor block, replay it * in pass REPLAY; if journal_checksums enabled, then * calculate checksums in PASS_SCAN, otherwise, * just skip over the blocks it describes. */ if (pass != PASS_REPLAY) { if (pass == PASS_SCAN && JBD2_HAS_COMPAT_FEATURE(journal, JBD2_FEATURE_COMPAT_CHECKSUM) && !info->end_transaction) { if (calc_chksums(journal, bh, &next_log_block, &crc32_sum)) { put_bh(bh); break; } put_bh(bh); continue; } next_log_block += count_tags(journal, bh); wrap(journal, next_log_block); put_bh(bh); continue; } /* A descriptor block: we can now write all of * the data blocks. Yay, useful work is finally * getting done here! */ tagp = &bh->b_data[sizeof(journal_header_t)]; while ((tagp - bh->b_data + tag_bytes) <= journal->j_blocksize - descr_csum_size) { unsigned long io_block; tag = (journal_block_tag_t *) tagp; flags = be16_to_cpu(tag->t_flags); io_block = next_log_block++; wrap(journal, next_log_block); err = jread(&obh, journal, io_block); if (err) { /* Recover what we can, but * report failure at the end. */ success = err; printk(KERN_ERR "JBD2: IO error %d recovering " "block %ld in log\n", err, io_block); } else { unsigned long long blocknr; J_ASSERT(obh != NULL); blocknr = read_tag_block(tag_bytes, tag); /* If the block has been * revoked, then we're all done * here. */ if (jbd2_journal_test_revoke (journal, blocknr, next_commit_ID)) { brelse(obh); ++info->nr_revoke_hits; goto skip_write; } /* Look for block corruption */ if (!jbd2_block_tag_csum_verify( journal, tag, obh->b_data, be32_to_cpu(tmp->h_sequence))) { brelse(obh); success = -EIO; printk(KERN_ERR "JBD: Invalid " "checksum recovering " "block %llu in log\n", blocknr); continue; } /* Find a buffer for the new * data being restored */ nbh = __getblk(journal->j_fs_dev, blocknr, journal->j_blocksize); if (nbh == NULL) { printk(KERN_ERR "JBD2: Out of memory " "during recovery.\n"); err = -ENOMEM; brelse(bh); brelse(obh); goto failed; } lock_buffer(nbh); memcpy(nbh->b_data, obh->b_data, journal->j_blocksize); if (flags & JBD2_FLAG_ESCAPE) { *((__be32 *)nbh->b_data) = cpu_to_be32(JBD2_MAGIC_NUMBER); } BUFFER_TRACE(nbh, "marking dirty"); set_buffer_uptodate(nbh); mark_buffer_dirty(nbh); BUFFER_TRACE(nbh, "marking uptodate"); ++info->nr_replays; /* ll_rw_block(WRITE, 1, &nbh); */ unlock_buffer(nbh); brelse(obh); brelse(nbh); } skip_write: tagp += tag_bytes; if (!(flags & JBD2_FLAG_SAME_UUID)) tagp += 16; if (flags & JBD2_FLAG_LAST_TAG) break; } brelse(bh); continue; case JBD2_COMMIT_BLOCK: /* How to differentiate between interrupted commit * and journal corruption ? * * {nth transaction} * Checksum Verification Failed * | * ____________________ * | | * async_commit sync_commit * | | * | GO TO NEXT "Journal Corruption" * | TRANSACTION * | * {(n+1)th transanction} * | * _______|______________ * | | * Commit block found Commit block not found * | | * "Journal Corruption" | * _____________|_________ * | | * nth trans corrupt OR nth trans * and (n+1)th interrupted interrupted * before commit block * could reach the disk. * (Cannot find the difference in above * mentioned conditions. Hence assume * "Interrupted Commit".) */ /* Found an expected commit block: if checksums * are present verify them in PASS_SCAN; else not * much to do other than move on to the next sequence * number. */ if (pass == PASS_SCAN && JBD2_HAS_COMPAT_FEATURE(journal, JBD2_FEATURE_COMPAT_CHECKSUM)) { int chksum_err, chksum_seen; struct commit_header *cbh = (struct commit_header *)bh->b_data; unsigned found_chksum = be32_to_cpu(cbh->h_chksum[0]); chksum_err = chksum_seen = 0; if (info->end_transaction) { journal->j_failed_commit = info->end_transaction; brelse(bh); break; } if (crc32_sum == found_chksum && cbh->h_chksum_type == JBD2_CRC32_CHKSUM && cbh->h_chksum_size == JBD2_CRC32_CHKSUM_SIZE) chksum_seen = 1; else if (!(cbh->h_chksum_type == 0 && cbh->h_chksum_size == 0 && found_chksum == 0 && !chksum_seen)) /* * If fs is mounted using an old kernel and then * kernel with journal_chksum is used then we * get a situation where the journal flag has * checksum flag set but checksums are not * present i.e chksum = 0, in the individual * commit blocks. * Hence to avoid checksum failures, in this * situation, this extra check is added. */ chksum_err = 1; if (chksum_err) { info->end_transaction = next_commit_ID; if (!JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)){ journal->j_failed_commit = next_commit_ID; brelse(bh); break; } } crc32_sum = ~0; } if (pass == PASS_SCAN && !jbd2_commit_block_csum_verify(journal, bh->b_data)) { info->end_transaction = next_commit_ID; if (!JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) { journal->j_failed_commit = next_commit_ID; brelse(bh); break; } } brelse(bh); next_commit_ID++; continue; case JBD2_REVOKE_BLOCK: /* If we aren't in the REVOKE pass, then we can * just skip over this block. */ if (pass != PASS_REVOKE) { brelse(bh); continue; } err = scan_revoke_records(journal, bh, next_commit_ID, info); brelse(bh); if (err) goto failed; continue; default: jbd_debug(3, "Unrecognised magic %d, end of scan.\n", blocktype); brelse(bh); goto done; } } done: /* * We broke out of the log scan loop: either we came to the * known end of the log or we found an unexpected block in the * log. If the latter happened, then we know that the "current" * transaction marks the end of the valid log. */ if (pass == PASS_SCAN) { if (!info->end_transaction) info->end_transaction = next_commit_ID; } else { /* It's really bad news if different passes end up at * different places (but possible due to IO errors). */ if (info->end_transaction != next_commit_ID) { printk(KERN_ERR "JBD2: recovery pass %d ended at " "transaction %u, expected %u\n", pass, next_commit_ID, info->end_transaction); if (!success) success = -EIO; } } return success; failed: return err; } static int jbd2_revoke_block_csum_verify(journal_t *j, void *buf) { struct jbd2_journal_revoke_tail *tail; __u32 provided, calculated; if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2)) return 1; tail = (struct jbd2_journal_revoke_tail *)(buf + j->j_blocksize - sizeof(struct jbd2_journal_revoke_tail)); provided = tail->r_checksum; tail->r_checksum = 0; calculated = jbd2_chksum(j, j->j_csum_seed, buf, j->j_blocksize); tail->r_checksum = provided; provided = be32_to_cpu(provided); return provided == calculated; } /* Scan a revoke record, marking all blocks mentioned as revoked. */ static int scan_revoke_records(journal_t *journal, struct buffer_head *bh, tid_t sequence, struct recovery_info *info) { jbd2_journal_revoke_header_t *header; int offset, max; int record_len = 4; header = (jbd2_journal_revoke_header_t *) bh->b_data; offset = sizeof(jbd2_journal_revoke_header_t); max = be32_to_cpu(header->r_count); if (!jbd2_revoke_block_csum_verify(journal, header)) return -EINVAL; if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT)) record_len = 8; while (offset + record_len <= max) { unsigned long long blocknr; int err; if (record_len == 4) blocknr = be32_to_cpu(* ((__be32 *) (bh->b_data+offset))); else blocknr = be64_to_cpu(* ((__be64 *) (bh->b_data+offset))); offset += record_len; err = jbd2_journal_set_revoke(journal, blocknr, sequence); if (err) return err; ++info->nr_revokes; } return 0; }
gpl-2.0
ProtouProject/android_kernel_msm
drivers/media/common/tuners/xc4000.c
3706
45349
/* * Driver for Xceive XC4000 "QAM/8VSB single chip tuner" * * Copyright (c) 2007 Xceive Corporation * Copyright (c) 2007 Steven Toth <stoth@linuxtv.org> * Copyright (c) 2009 Devin Heitmueller <dheitmueller@kernellabs.com> * Copyright (c) 2009 Davide Ferri <d.ferri@zero11.it> * Copyright (c) 2010 Istvan Varga <istvan_v@mailbox.hu> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/videodev2.h> #include <linux/delay.h> #include <linux/dvb/frontend.h> #include <linux/i2c.h> #include <linux/mutex.h> #include <asm/unaligned.h> #include "dvb_frontend.h" #include "xc4000.h" #include "tuner-i2c.h" #include "tuner-xc2028-types.h" static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Debugging level (0 to 2, default: 0 (off))."); static int no_poweroff; module_param(no_poweroff, int, 0644); MODULE_PARM_DESC(no_poweroff, "Power management (1: disabled, 2: enabled, " "0 (default): use device-specific default mode)."); static int audio_std; module_param(audio_std, int, 0644); MODULE_PARM_DESC(audio_std, "Audio standard. XC4000 audio decoder explicitly " "needs to know what audio standard is needed for some video standards " "with audio A2 or NICAM. The valid settings are a sum of:\n" " 1: use NICAM/B or A2/B instead of NICAM/A or A2/A\n" " 2: use A2 instead of NICAM or BTSC\n" " 4: use SECAM/K3 instead of K1\n" " 8: use PAL-D/K audio for SECAM-D/K\n" "16: use FM radio input 1 instead of input 2\n" "32: use mono audio (the lower three bits are ignored)"); static char firmware_name[30]; module_param_string(firmware_name, firmware_name, sizeof(firmware_name), 0); MODULE_PARM_DESC(firmware_name, "Firmware file name. Allows overriding the " "default firmware name."); static DEFINE_MUTEX(xc4000_list_mutex); static LIST_HEAD(hybrid_tuner_instance_list); #define dprintk(level, fmt, arg...) if (debug >= level) \ printk(KERN_INFO "%s: " fmt, "xc4000", ## arg) /* struct for storing firmware table */ struct firmware_description { unsigned int type; v4l2_std_id id; __u16 int_freq; unsigned char *ptr; unsigned int size; }; struct firmware_properties { unsigned int type; v4l2_std_id id; v4l2_std_id std_req; __u16 int_freq; unsigned int scode_table; int scode_nr; }; struct xc4000_priv { struct tuner_i2c_props i2c_props; struct list_head hybrid_tuner_instance_list; struct firmware_description *firm; int firm_size; u32 if_khz; u32 freq_hz; u32 bandwidth; u8 video_standard; u8 rf_mode; u8 default_pm; u8 dvb_amplitude; u8 set_smoothedcvbs; u8 ignore_i2c_write_errors; __u16 firm_version; struct firmware_properties cur_fw; __u16 hwmodel; __u16 hwvers; struct mutex lock; }; #define XC4000_AUDIO_STD_B 1 #define XC4000_AUDIO_STD_A2 2 #define XC4000_AUDIO_STD_K3 4 #define XC4000_AUDIO_STD_L 8 #define XC4000_AUDIO_STD_INPUT1 16 #define XC4000_AUDIO_STD_MONO 32 #define XC4000_DEFAULT_FIRMWARE "dvb-fe-xc4000-1.4.fw" /* Misc Defines */ #define MAX_TV_STANDARD 24 #define XC_MAX_I2C_WRITE_LENGTH 64 #define XC_POWERED_DOWN 0x80000000U /* Signal Types */ #define XC_RF_MODE_AIR 0 #define XC_RF_MODE_CABLE 1 /* Product id */ #define XC_PRODUCT_ID_FW_NOT_LOADED 0x2000 #define XC_PRODUCT_ID_XC4000 0x0FA0 #define XC_PRODUCT_ID_XC4100 0x1004 /* Registers (Write-only) */ #define XREG_INIT 0x00 #define XREG_VIDEO_MODE 0x01 #define XREG_AUDIO_MODE 0x02 #define XREG_RF_FREQ 0x03 #define XREG_D_CODE 0x04 #define XREG_DIRECTSITTING_MODE 0x05 #define XREG_SEEK_MODE 0x06 #define XREG_POWER_DOWN 0x08 #define XREG_SIGNALSOURCE 0x0A #define XREG_SMOOTHEDCVBS 0x0E #define XREG_AMPLITUDE 0x10 /* Registers (Read-only) */ #define XREG_ADC_ENV 0x00 #define XREG_QUALITY 0x01 #define XREG_FRAME_LINES 0x02 #define XREG_HSYNC_FREQ 0x03 #define XREG_LOCK 0x04 #define XREG_FREQ_ERROR 0x05 #define XREG_SNR 0x06 #define XREG_VERSION 0x07 #define XREG_PRODUCT_ID 0x08 #define XREG_SIGNAL_LEVEL 0x0A #define XREG_NOISE_LEVEL 0x0B /* Basic firmware description. This will remain with the driver for documentation purposes. This represents an I2C firmware file encoded as a string of unsigned char. Format is as follows: char[0 ]=len0_MSB -> len = len_MSB * 256 + len_LSB char[1 ]=len0_LSB -> length of first write transaction char[2 ]=data0 -> first byte to be sent char[3 ]=data1 char[4 ]=data2 char[ ]=... char[M ]=dataN -> last byte to be sent char[M+1]=len1_MSB -> len = len_MSB * 256 + len_LSB char[M+2]=len1_LSB -> length of second write transaction char[M+3]=data0 char[M+4]=data1 ... etc. The [len] value should be interpreted as follows: len= len_MSB _ len_LSB len=1111_1111_1111_1111 : End of I2C_SEQUENCE len=0000_0000_0000_0000 : Reset command: Do hardware reset len=0NNN_NNNN_NNNN_NNNN : Normal transaction: number of bytes = {1:32767) len=1WWW_WWWW_WWWW_WWWW : Wait command: wait for {1:32767} ms For the RESET and WAIT commands, the two following bytes will contain immediately the length of the following transaction. */ struct XC_TV_STANDARD { const char *Name; u16 audio_mode; u16 video_mode; u16 int_freq; }; /* Tuner standards */ #define XC4000_MN_NTSC_PAL_BTSC 0 #define XC4000_MN_NTSC_PAL_A2 1 #define XC4000_MN_NTSC_PAL_EIAJ 2 #define XC4000_MN_NTSC_PAL_Mono 3 #define XC4000_BG_PAL_A2 4 #define XC4000_BG_PAL_NICAM 5 #define XC4000_BG_PAL_MONO 6 #define XC4000_I_PAL_NICAM 7 #define XC4000_I_PAL_NICAM_MONO 8 #define XC4000_DK_PAL_A2 9 #define XC4000_DK_PAL_NICAM 10 #define XC4000_DK_PAL_MONO 11 #define XC4000_DK_SECAM_A2DK1 12 #define XC4000_DK_SECAM_A2LDK3 13 #define XC4000_DK_SECAM_A2MONO 14 #define XC4000_DK_SECAM_NICAM 15 #define XC4000_L_SECAM_NICAM 16 #define XC4000_LC_SECAM_NICAM 17 #define XC4000_DTV6 18 #define XC4000_DTV8 19 #define XC4000_DTV7_8 20 #define XC4000_DTV7 21 #define XC4000_FM_Radio_INPUT2 22 #define XC4000_FM_Radio_INPUT1 23 static struct XC_TV_STANDARD xc4000_standard[MAX_TV_STANDARD] = { {"M/N-NTSC/PAL-BTSC", 0x0000, 0x80A0, 4500}, {"M/N-NTSC/PAL-A2", 0x0000, 0x80A0, 4600}, {"M/N-NTSC/PAL-EIAJ", 0x0040, 0x80A0, 4500}, {"M/N-NTSC/PAL-Mono", 0x0078, 0x80A0, 4500}, {"B/G-PAL-A2", 0x0000, 0x8159, 5640}, {"B/G-PAL-NICAM", 0x0004, 0x8159, 5740}, {"B/G-PAL-MONO", 0x0078, 0x8159, 5500}, {"I-PAL-NICAM", 0x0080, 0x8049, 6240}, {"I-PAL-NICAM-MONO", 0x0078, 0x8049, 6000}, {"D/K-PAL-A2", 0x0000, 0x8049, 6380}, {"D/K-PAL-NICAM", 0x0080, 0x8049, 6200}, {"D/K-PAL-MONO", 0x0078, 0x8049, 6500}, {"D/K-SECAM-A2 DK1", 0x0000, 0x8049, 6340}, {"D/K-SECAM-A2 L/DK3", 0x0000, 0x8049, 6000}, {"D/K-SECAM-A2 MONO", 0x0078, 0x8049, 6500}, {"D/K-SECAM-NICAM", 0x0080, 0x8049, 6200}, {"L-SECAM-NICAM", 0x8080, 0x0009, 6200}, {"L'-SECAM-NICAM", 0x8080, 0x4009, 6200}, {"DTV6", 0x00C0, 0x8002, 0}, {"DTV8", 0x00C0, 0x800B, 0}, {"DTV7/8", 0x00C0, 0x801B, 0}, {"DTV7", 0x00C0, 0x8007, 0}, {"FM Radio-INPUT2", 0x0008, 0x9800, 10700}, {"FM Radio-INPUT1", 0x0008, 0x9000, 10700} }; static int xc4000_readreg(struct xc4000_priv *priv, u16 reg, u16 *val); static int xc4000_tuner_reset(struct dvb_frontend *fe); static void xc_debug_dump(struct xc4000_priv *priv); static int xc_send_i2c_data(struct xc4000_priv *priv, u8 *buf, int len) { struct i2c_msg msg = { .addr = priv->i2c_props.addr, .flags = 0, .buf = buf, .len = len }; if (i2c_transfer(priv->i2c_props.adap, &msg, 1) != 1) { if (priv->ignore_i2c_write_errors == 0) { printk(KERN_ERR "xc4000: I2C write failed (len=%i)\n", len); if (len == 4) { printk(KERN_ERR "bytes %02x %02x %02x %02x\n", buf[0], buf[1], buf[2], buf[3]); } return -EREMOTEIO; } } return 0; } static int xc4000_tuner_reset(struct dvb_frontend *fe) { struct xc4000_priv *priv = fe->tuner_priv; int ret; dprintk(1, "%s()\n", __func__); if (fe->callback) { ret = fe->callback(((fe->dvb) && (fe->dvb->priv)) ? fe->dvb->priv : priv->i2c_props.adap->algo_data, DVB_FRONTEND_COMPONENT_TUNER, XC4000_TUNER_RESET, 0); if (ret) { printk(KERN_ERR "xc4000: reset failed\n"); return -EREMOTEIO; } } else { printk(KERN_ERR "xc4000: no tuner reset callback function, " "fatal\n"); return -EINVAL; } return 0; } static int xc_write_reg(struct xc4000_priv *priv, u16 regAddr, u16 i2cData) { u8 buf[4]; int result; buf[0] = (regAddr >> 8) & 0xFF; buf[1] = regAddr & 0xFF; buf[2] = (i2cData >> 8) & 0xFF; buf[3] = i2cData & 0xFF; result = xc_send_i2c_data(priv, buf, 4); return result; } static int xc_load_i2c_sequence(struct dvb_frontend *fe, const u8 *i2c_sequence) { struct xc4000_priv *priv = fe->tuner_priv; int i, nbytes_to_send, result; unsigned int len, pos, index; u8 buf[XC_MAX_I2C_WRITE_LENGTH]; index = 0; while ((i2c_sequence[index] != 0xFF) || (i2c_sequence[index + 1] != 0xFF)) { len = i2c_sequence[index] * 256 + i2c_sequence[index+1]; if (len == 0x0000) { /* RESET command */ /* NOTE: this is ignored, as the reset callback was */ /* already called by check_firmware() */ index += 2; } else if (len & 0x8000) { /* WAIT command */ msleep(len & 0x7FFF); index += 2; } else { /* Send i2c data whilst ensuring individual transactions * do not exceed XC_MAX_I2C_WRITE_LENGTH bytes. */ index += 2; buf[0] = i2c_sequence[index]; buf[1] = i2c_sequence[index + 1]; pos = 2; while (pos < len) { if ((len - pos) > XC_MAX_I2C_WRITE_LENGTH - 2) nbytes_to_send = XC_MAX_I2C_WRITE_LENGTH; else nbytes_to_send = (len - pos + 2); for (i = 2; i < nbytes_to_send; i++) { buf[i] = i2c_sequence[index + pos + i - 2]; } result = xc_send_i2c_data(priv, buf, nbytes_to_send); if (result != 0) return result; pos += nbytes_to_send - 2; } index += len; } } return 0; } static int xc_set_tv_standard(struct xc4000_priv *priv, u16 video_mode, u16 audio_mode) { int ret; dprintk(1, "%s(0x%04x,0x%04x)\n", __func__, video_mode, audio_mode); dprintk(1, "%s() Standard = %s\n", __func__, xc4000_standard[priv->video_standard].Name); /* Don't complain when the request fails because of i2c stretching */ priv->ignore_i2c_write_errors = 1; ret = xc_write_reg(priv, XREG_VIDEO_MODE, video_mode); if (ret == 0) ret = xc_write_reg(priv, XREG_AUDIO_MODE, audio_mode); priv->ignore_i2c_write_errors = 0; return ret; } static int xc_set_signal_source(struct xc4000_priv *priv, u16 rf_mode) { dprintk(1, "%s(%d) Source = %s\n", __func__, rf_mode, rf_mode == XC_RF_MODE_AIR ? "ANTENNA" : "CABLE"); if ((rf_mode != XC_RF_MODE_AIR) && (rf_mode != XC_RF_MODE_CABLE)) { rf_mode = XC_RF_MODE_CABLE; printk(KERN_ERR "%s(), Invalid mode, defaulting to CABLE", __func__); } return xc_write_reg(priv, XREG_SIGNALSOURCE, rf_mode); } static const struct dvb_tuner_ops xc4000_tuner_ops; static int xc_set_rf_frequency(struct xc4000_priv *priv, u32 freq_hz) { u16 freq_code; dprintk(1, "%s(%u)\n", __func__, freq_hz); if ((freq_hz > xc4000_tuner_ops.info.frequency_max) || (freq_hz < xc4000_tuner_ops.info.frequency_min)) return -EINVAL; freq_code = (u16)(freq_hz / 15625); /* WAS: Starting in firmware version 1.1.44, Xceive recommends using the FINERFREQ for all normal tuning (the doc indicates reg 0x03 should only be used for fast scanning for channel lock) */ /* WAS: XREG_FINERFREQ */ return xc_write_reg(priv, XREG_RF_FREQ, freq_code); } static int xc_get_adc_envelope(struct xc4000_priv *priv, u16 *adc_envelope) { return xc4000_readreg(priv, XREG_ADC_ENV, adc_envelope); } static int xc_get_frequency_error(struct xc4000_priv *priv, u32 *freq_error_hz) { int result; u16 regData; u32 tmp; result = xc4000_readreg(priv, XREG_FREQ_ERROR, &regData); if (result != 0) return result; tmp = (u32)regData & 0xFFFFU; tmp = (tmp < 0x8000U ? tmp : 0x10000U - tmp); (*freq_error_hz) = tmp * 15625; return result; } static int xc_get_lock_status(struct xc4000_priv *priv, u16 *lock_status) { return xc4000_readreg(priv, XREG_LOCK, lock_status); } static int xc_get_version(struct xc4000_priv *priv, u8 *hw_majorversion, u8 *hw_minorversion, u8 *fw_majorversion, u8 *fw_minorversion) { u16 data; int result; result = xc4000_readreg(priv, XREG_VERSION, &data); if (result != 0) return result; (*hw_majorversion) = (data >> 12) & 0x0F; (*hw_minorversion) = (data >> 8) & 0x0F; (*fw_majorversion) = (data >> 4) & 0x0F; (*fw_minorversion) = data & 0x0F; return 0; } static int xc_get_hsync_freq(struct xc4000_priv *priv, u32 *hsync_freq_hz) { u16 regData; int result; result = xc4000_readreg(priv, XREG_HSYNC_FREQ, &regData); if (result != 0) return result; (*hsync_freq_hz) = ((regData & 0x0fff) * 763)/100; return result; } static int xc_get_frame_lines(struct xc4000_priv *priv, u16 *frame_lines) { return xc4000_readreg(priv, XREG_FRAME_LINES, frame_lines); } static int xc_get_quality(struct xc4000_priv *priv, u16 *quality) { return xc4000_readreg(priv, XREG_QUALITY, quality); } static int xc_get_signal_level(struct xc4000_priv *priv, u16 *signal) { return xc4000_readreg(priv, XREG_SIGNAL_LEVEL, signal); } static int xc_get_noise_level(struct xc4000_priv *priv, u16 *noise) { return xc4000_readreg(priv, XREG_NOISE_LEVEL, noise); } static u16 xc_wait_for_lock(struct xc4000_priv *priv) { u16 lock_state = 0; int watchdog_count = 40; while ((lock_state == 0) && (watchdog_count > 0)) { xc_get_lock_status(priv, &lock_state); if (lock_state != 1) { msleep(5); watchdog_count--; } } return lock_state; } static int xc_tune_channel(struct xc4000_priv *priv, u32 freq_hz) { int found = 1; int result; dprintk(1, "%s(%u)\n", __func__, freq_hz); /* Don't complain when the request fails because of i2c stretching */ priv->ignore_i2c_write_errors = 1; result = xc_set_rf_frequency(priv, freq_hz); priv->ignore_i2c_write_errors = 0; if (result != 0) return 0; /* wait for lock only in analog TV mode */ if ((priv->cur_fw.type & (FM | DTV6 | DTV7 | DTV78 | DTV8)) == 0) { if (xc_wait_for_lock(priv) != 1) found = 0; } /* Wait for stats to stabilize. * Frame Lines needs two frame times after initial lock * before it is valid. */ msleep(debug ? 100 : 10); if (debug) xc_debug_dump(priv); return found; } static int xc4000_readreg(struct xc4000_priv *priv, u16 reg, u16 *val) { u8 buf[2] = { reg >> 8, reg & 0xff }; u8 bval[2] = { 0, 0 }; struct i2c_msg msg[2] = { { .addr = priv->i2c_props.addr, .flags = 0, .buf = &buf[0], .len = 2 }, { .addr = priv->i2c_props.addr, .flags = I2C_M_RD, .buf = &bval[0], .len = 2 }, }; if (i2c_transfer(priv->i2c_props.adap, msg, 2) != 2) { printk(KERN_ERR "xc4000: I2C read failed\n"); return -EREMOTEIO; } *val = (bval[0] << 8) | bval[1]; return 0; } #define dump_firm_type(t) dump_firm_type_and_int_freq(t, 0) static void dump_firm_type_and_int_freq(unsigned int type, u16 int_freq) { if (type & BASE) printk(KERN_CONT "BASE "); if (type & INIT1) printk(KERN_CONT "INIT1 "); if (type & F8MHZ) printk(KERN_CONT "F8MHZ "); if (type & MTS) printk(KERN_CONT "MTS "); if (type & D2620) printk(KERN_CONT "D2620 "); if (type & D2633) printk(KERN_CONT "D2633 "); if (type & DTV6) printk(KERN_CONT "DTV6 "); if (type & QAM) printk(KERN_CONT "QAM "); if (type & DTV7) printk(KERN_CONT "DTV7 "); if (type & DTV78) printk(KERN_CONT "DTV78 "); if (type & DTV8) printk(KERN_CONT "DTV8 "); if (type & FM) printk(KERN_CONT "FM "); if (type & INPUT1) printk(KERN_CONT "INPUT1 "); if (type & LCD) printk(KERN_CONT "LCD "); if (type & NOGD) printk(KERN_CONT "NOGD "); if (type & MONO) printk(KERN_CONT "MONO "); if (type & ATSC) printk(KERN_CONT "ATSC "); if (type & IF) printk(KERN_CONT "IF "); if (type & LG60) printk(KERN_CONT "LG60 "); if (type & ATI638) printk(KERN_CONT "ATI638 "); if (type & OREN538) printk(KERN_CONT "OREN538 "); if (type & OREN36) printk(KERN_CONT "OREN36 "); if (type & TOYOTA388) printk(KERN_CONT "TOYOTA388 "); if (type & TOYOTA794) printk(KERN_CONT "TOYOTA794 "); if (type & DIBCOM52) printk(KERN_CONT "DIBCOM52 "); if (type & ZARLINK456) printk(KERN_CONT "ZARLINK456 "); if (type & CHINA) printk(KERN_CONT "CHINA "); if (type & F6MHZ) printk(KERN_CONT "F6MHZ "); if (type & INPUT2) printk(KERN_CONT "INPUT2 "); if (type & SCODE) printk(KERN_CONT "SCODE "); if (type & HAS_IF) printk(KERN_CONT "HAS_IF_%d ", int_freq); } static int seek_firmware(struct dvb_frontend *fe, unsigned int type, v4l2_std_id *id) { struct xc4000_priv *priv = fe->tuner_priv; int i, best_i = -1; unsigned int best_nr_diffs = 255U; if (!priv->firm) { printk(KERN_ERR "Error! firmware not loaded\n"); return -EINVAL; } if (((type & ~SCODE) == 0) && (*id == 0)) *id = V4L2_STD_PAL; /* Seek for generic video standard match */ for (i = 0; i < priv->firm_size; i++) { v4l2_std_id id_diff_mask = (priv->firm[i].id ^ (*id)) & (*id); unsigned int type_diff_mask = (priv->firm[i].type ^ type) & (BASE_TYPES | DTV_TYPES | LCD | NOGD | MONO | SCODE); unsigned int nr_diffs; if (type_diff_mask & (BASE | INIT1 | FM | DTV6 | DTV7 | DTV78 | DTV8 | SCODE)) continue; nr_diffs = hweight64(id_diff_mask) + hweight32(type_diff_mask); if (!nr_diffs) /* Supports all the requested standards */ goto found; if (nr_diffs < best_nr_diffs) { best_nr_diffs = nr_diffs; best_i = i; } } /* FIXME: Would make sense to seek for type "hint" match ? */ if (best_i < 0) { i = -ENOENT; goto ret; } if (best_nr_diffs > 0U) { printk(KERN_WARNING "Selecting best matching firmware (%u bits differ) for " "type=(%x), id %016llx:\n", best_nr_diffs, type, (unsigned long long)*id); i = best_i; } found: *id = priv->firm[i].id; ret: if (debug) { printk(KERN_DEBUG "%s firmware for type=", (i < 0) ? "Can't find" : "Found"); dump_firm_type(type); printk(KERN_DEBUG "(%x), id %016llx.\n", type, (unsigned long long)*id); } return i; } static int load_firmware(struct dvb_frontend *fe, unsigned int type, v4l2_std_id *id) { struct xc4000_priv *priv = fe->tuner_priv; int pos, rc; unsigned char *p; pos = seek_firmware(fe, type, id); if (pos < 0) return pos; p = priv->firm[pos].ptr; /* Don't complain when the request fails because of i2c stretching */ priv->ignore_i2c_write_errors = 1; rc = xc_load_i2c_sequence(fe, p); priv->ignore_i2c_write_errors = 0; return rc; } static int xc4000_fwupload(struct dvb_frontend *fe) { struct xc4000_priv *priv = fe->tuner_priv; const struct firmware *fw = NULL; const unsigned char *p, *endp; int rc = 0; int n, n_array; char name[33]; const char *fname; if (firmware_name[0] != '\0') fname = firmware_name; else fname = XC4000_DEFAULT_FIRMWARE; dprintk(1, "Reading firmware %s\n", fname); rc = request_firmware(&fw, fname, priv->i2c_props.adap->dev.parent); if (rc < 0) { if (rc == -ENOENT) printk(KERN_ERR "Error: firmware %s not found.\n", fname); else printk(KERN_ERR "Error %d while requesting firmware %s\n", rc, fname); return rc; } p = fw->data; endp = p + fw->size; if (fw->size < sizeof(name) - 1 + 2 + 2) { printk(KERN_ERR "Error: firmware file %s has invalid size!\n", fname); goto corrupt; } memcpy(name, p, sizeof(name) - 1); name[sizeof(name) - 1] = '\0'; p += sizeof(name) - 1; priv->firm_version = get_unaligned_le16(p); p += 2; n_array = get_unaligned_le16(p); p += 2; dprintk(1, "Loading %d firmware images from %s, type: %s, ver %d.%d\n", n_array, fname, name, priv->firm_version >> 8, priv->firm_version & 0xff); priv->firm = kcalloc(n_array, sizeof(*priv->firm), GFP_KERNEL); if (priv->firm == NULL) { printk(KERN_ERR "Not enough memory to load firmware file.\n"); rc = -ENOMEM; goto done; } priv->firm_size = n_array; n = -1; while (p < endp) { __u32 type, size; v4l2_std_id id; __u16 int_freq = 0; n++; if (n >= n_array) { printk(KERN_ERR "More firmware images in file than " "were expected!\n"); goto corrupt; } /* Checks if there's enough bytes to read */ if (endp - p < sizeof(type) + sizeof(id) + sizeof(size)) goto header; type = get_unaligned_le32(p); p += sizeof(type); id = get_unaligned_le64(p); p += sizeof(id); if (type & HAS_IF) { int_freq = get_unaligned_le16(p); p += sizeof(int_freq); if (endp - p < sizeof(size)) goto header; } size = get_unaligned_le32(p); p += sizeof(size); if (!size || size > endp - p) { printk(KERN_ERR "Firmware type (%x), id %llx is corrupted (size=%d, expected %d)\n", type, (unsigned long long)id, (unsigned)(endp - p), size); goto corrupt; } priv->firm[n].ptr = kzalloc(size, GFP_KERNEL); if (priv->firm[n].ptr == NULL) { printk(KERN_ERR "Not enough memory to load firmware file.\n"); rc = -ENOMEM; goto done; } if (debug) { printk(KERN_DEBUG "Reading firmware type "); dump_firm_type_and_int_freq(type, int_freq); printk(KERN_DEBUG "(%x), id %llx, size=%d.\n", type, (unsigned long long)id, size); } memcpy(priv->firm[n].ptr, p, size); priv->firm[n].type = type; priv->firm[n].id = id; priv->firm[n].size = size; priv->firm[n].int_freq = int_freq; p += size; } if (n + 1 != priv->firm_size) { printk(KERN_ERR "Firmware file is incomplete!\n"); goto corrupt; } goto done; header: printk(KERN_ERR "Firmware header is incomplete!\n"); corrupt: rc = -EINVAL; printk(KERN_ERR "Error: firmware file is corrupted!\n"); done: release_firmware(fw); if (rc == 0) dprintk(1, "Firmware files loaded.\n"); return rc; } static int load_scode(struct dvb_frontend *fe, unsigned int type, v4l2_std_id *id, __u16 int_freq, int scode) { struct xc4000_priv *priv = fe->tuner_priv; int pos, rc; unsigned char *p; u8 scode_buf[13]; u8 indirect_mode[5]; dprintk(1, "%s called int_freq=%d\n", __func__, int_freq); if (!int_freq) { pos = seek_firmware(fe, type, id); if (pos < 0) return pos; } else { for (pos = 0; pos < priv->firm_size; pos++) { if ((priv->firm[pos].int_freq == int_freq) && (priv->firm[pos].type & HAS_IF)) break; } if (pos == priv->firm_size) return -ENOENT; } p = priv->firm[pos].ptr; if (priv->firm[pos].size != 12 * 16 || scode >= 16) return -EINVAL; p += 12 * scode; if (debug) { tuner_info("Loading SCODE for type="); dump_firm_type_and_int_freq(priv->firm[pos].type, priv->firm[pos].int_freq); printk(KERN_CONT "(%x), id %016llx.\n", priv->firm[pos].type, (unsigned long long)*id); } scode_buf[0] = 0x00; memcpy(&scode_buf[1], p, 12); /* Enter direct-mode */ rc = xc_write_reg(priv, XREG_DIRECTSITTING_MODE, 0); if (rc < 0) { printk(KERN_ERR "failed to put device into direct mode!\n"); return -EIO; } rc = xc_send_i2c_data(priv, scode_buf, 13); if (rc != 0) { /* Even if the send failed, make sure we set back to indirect mode */ printk(KERN_ERR "Failed to set scode %d\n", rc); } /* Switch back to indirect-mode */ memset(indirect_mode, 0, sizeof(indirect_mode)); indirect_mode[4] = 0x88; xc_send_i2c_data(priv, indirect_mode, sizeof(indirect_mode)); msleep(10); return 0; } static int check_firmware(struct dvb_frontend *fe, unsigned int type, v4l2_std_id std, __u16 int_freq) { struct xc4000_priv *priv = fe->tuner_priv; struct firmware_properties new_fw; int rc = 0, is_retry = 0; u16 hwmodel; v4l2_std_id std0; u8 hw_major, hw_minor, fw_major, fw_minor; dprintk(1, "%s called\n", __func__); if (!priv->firm) { rc = xc4000_fwupload(fe); if (rc < 0) return rc; } retry: new_fw.type = type; new_fw.id = std; new_fw.std_req = std; new_fw.scode_table = SCODE; new_fw.scode_nr = 0; new_fw.int_freq = int_freq; dprintk(1, "checking firmware, user requested type="); if (debug) { dump_firm_type(new_fw.type); printk(KERN_CONT "(%x), id %016llx, ", new_fw.type, (unsigned long long)new_fw.std_req); if (!int_freq) printk(KERN_CONT "scode_tbl "); else printk(KERN_CONT "int_freq %d, ", new_fw.int_freq); printk(KERN_CONT "scode_nr %d\n", new_fw.scode_nr); } /* No need to reload base firmware if it matches */ if (priv->cur_fw.type & BASE) { dprintk(1, "BASE firmware not changed.\n"); goto skip_base; } /* Updating BASE - forget about all currently loaded firmware */ memset(&priv->cur_fw, 0, sizeof(priv->cur_fw)); /* Reset is needed before loading firmware */ rc = xc4000_tuner_reset(fe); if (rc < 0) goto fail; /* BASE firmwares are all std0 */ std0 = 0; rc = load_firmware(fe, BASE, &std0); if (rc < 0) { printk(KERN_ERR "Error %d while loading base firmware\n", rc); goto fail; } /* Load INIT1, if needed */ dprintk(1, "Load init1 firmware, if exists\n"); rc = load_firmware(fe, BASE | INIT1, &std0); if (rc == -ENOENT) rc = load_firmware(fe, BASE | INIT1, &std0); if (rc < 0 && rc != -ENOENT) { tuner_err("Error %d while loading init1 firmware\n", rc); goto fail; } skip_base: /* * No need to reload standard specific firmware if base firmware * was not reloaded and requested video standards have not changed. */ if (priv->cur_fw.type == (BASE | new_fw.type) && priv->cur_fw.std_req == std) { dprintk(1, "Std-specific firmware already loaded.\n"); goto skip_std_specific; } /* Reloading std-specific firmware forces a SCODE update */ priv->cur_fw.scode_table = 0; /* Load the standard firmware */ rc = load_firmware(fe, new_fw.type, &new_fw.id); if (rc < 0) goto fail; skip_std_specific: if (priv->cur_fw.scode_table == new_fw.scode_table && priv->cur_fw.scode_nr == new_fw.scode_nr) { dprintk(1, "SCODE firmware already loaded.\n"); goto check_device; } /* Load SCODE firmware, if exists */ rc = load_scode(fe, new_fw.type | new_fw.scode_table, &new_fw.id, new_fw.int_freq, new_fw.scode_nr); if (rc != 0) dprintk(1, "load scode failed %d\n", rc); check_device: rc = xc4000_readreg(priv, XREG_PRODUCT_ID, &hwmodel); if (xc_get_version(priv, &hw_major, &hw_minor, &fw_major, &fw_minor) != 0) { printk(KERN_ERR "Unable to read tuner registers.\n"); goto fail; } dprintk(1, "Device is Xceive %d version %d.%d, " "firmware version %d.%d\n", hwmodel, hw_major, hw_minor, fw_major, fw_minor); /* Check firmware version against what we downloaded. */ if (priv->firm_version != ((fw_major << 8) | fw_minor)) { printk(KERN_WARNING "Incorrect readback of firmware version %d.%d.\n", fw_major, fw_minor); goto fail; } /* Check that the tuner hardware model remains consistent over time. */ if (priv->hwmodel == 0 && (hwmodel == XC_PRODUCT_ID_XC4000 || hwmodel == XC_PRODUCT_ID_XC4100)) { priv->hwmodel = hwmodel; priv->hwvers = (hw_major << 8) | hw_minor; } else if (priv->hwmodel == 0 || priv->hwmodel != hwmodel || priv->hwvers != ((hw_major << 8) | hw_minor)) { printk(KERN_WARNING "Read invalid device hardware information - tuner " "hung?\n"); goto fail; } memcpy(&priv->cur_fw, &new_fw, sizeof(priv->cur_fw)); /* * By setting BASE in cur_fw.type only after successfully loading all * firmwares, we can: * 1. Identify that BASE firmware with type=0 has been loaded; * 2. Tell whether BASE firmware was just changed the next time through. */ priv->cur_fw.type |= BASE; return 0; fail: memset(&priv->cur_fw, 0, sizeof(priv->cur_fw)); if (!is_retry) { msleep(50); is_retry = 1; dprintk(1, "Retrying firmware load\n"); goto retry; } if (rc == -ENOENT) rc = -EINVAL; return rc; } static void xc_debug_dump(struct xc4000_priv *priv) { u16 adc_envelope; u32 freq_error_hz = 0; u16 lock_status; u32 hsync_freq_hz = 0; u16 frame_lines; u16 quality; u16 signal = 0; u16 noise = 0; u8 hw_majorversion = 0, hw_minorversion = 0; u8 fw_majorversion = 0, fw_minorversion = 0; xc_get_adc_envelope(priv, &adc_envelope); dprintk(1, "*** ADC envelope (0-1023) = %d\n", adc_envelope); xc_get_frequency_error(priv, &freq_error_hz); dprintk(1, "*** Frequency error = %d Hz\n", freq_error_hz); xc_get_lock_status(priv, &lock_status); dprintk(1, "*** Lock status (0-Wait, 1-Locked, 2-No-signal) = %d\n", lock_status); xc_get_version(priv, &hw_majorversion, &hw_minorversion, &fw_majorversion, &fw_minorversion); dprintk(1, "*** HW: V%02x.%02x, FW: V%02x.%02x\n", hw_majorversion, hw_minorversion, fw_majorversion, fw_minorversion); if (priv->video_standard < XC4000_DTV6) { xc_get_hsync_freq(priv, &hsync_freq_hz); dprintk(1, "*** Horizontal sync frequency = %d Hz\n", hsync_freq_hz); xc_get_frame_lines(priv, &frame_lines); dprintk(1, "*** Frame lines = %d\n", frame_lines); } xc_get_quality(priv, &quality); dprintk(1, "*** Quality (0:<8dB, 7:>56dB) = %d\n", quality); xc_get_signal_level(priv, &signal); dprintk(1, "*** Signal level = -%ddB (%d)\n", signal >> 8, signal); xc_get_noise_level(priv, &noise); dprintk(1, "*** Noise level = %ddB (%d)\n", noise >> 8, noise); } static int xc4000_set_params(struct dvb_frontend *fe) { struct dtv_frontend_properties *c = &fe->dtv_property_cache; u32 delsys = c->delivery_system; u32 bw = c->bandwidth_hz; struct xc4000_priv *priv = fe->tuner_priv; unsigned int type; int ret = -EREMOTEIO; dprintk(1, "%s() frequency=%d (Hz)\n", __func__, c->frequency); mutex_lock(&priv->lock); switch (delsys) { case SYS_ATSC: dprintk(1, "%s() VSB modulation\n", __func__); priv->rf_mode = XC_RF_MODE_AIR; priv->freq_hz = c->frequency - 1750000; priv->video_standard = XC4000_DTV6; type = DTV6; break; case SYS_DVBC_ANNEX_B: dprintk(1, "%s() QAM modulation\n", __func__); priv->rf_mode = XC_RF_MODE_CABLE; priv->freq_hz = c->frequency - 1750000; priv->video_standard = XC4000_DTV6; type = DTV6; break; case SYS_DVBT: case SYS_DVBT2: dprintk(1, "%s() OFDM\n", __func__); if (bw == 0) { if (c->frequency < 400000000) { priv->freq_hz = c->frequency - 2250000; } else { priv->freq_hz = c->frequency - 2750000; } priv->video_standard = XC4000_DTV7_8; type = DTV78; } else if (bw <= 6000000) { priv->video_standard = XC4000_DTV6; priv->freq_hz = c->frequency - 1750000; type = DTV6; } else if (bw <= 7000000) { priv->video_standard = XC4000_DTV7; priv->freq_hz = c->frequency - 2250000; type = DTV7; } else { priv->video_standard = XC4000_DTV8; priv->freq_hz = c->frequency - 2750000; type = DTV8; } priv->rf_mode = XC_RF_MODE_AIR; break; default: printk(KERN_ERR "xc4000 delivery system not supported!\n"); ret = -EINVAL; goto fail; } dprintk(1, "%s() frequency=%d (compensated)\n", __func__, priv->freq_hz); /* Make sure the correct firmware type is loaded */ if (check_firmware(fe, type, 0, priv->if_khz) != 0) goto fail; priv->bandwidth = c->bandwidth_hz; ret = xc_set_signal_source(priv, priv->rf_mode); if (ret != 0) { printk(KERN_ERR "xc4000: xc_set_signal_source(%d) failed\n", priv->rf_mode); goto fail; } else { u16 video_mode, audio_mode; video_mode = xc4000_standard[priv->video_standard].video_mode; audio_mode = xc4000_standard[priv->video_standard].audio_mode; if (type == DTV6 && priv->firm_version != 0x0102) video_mode |= 0x0001; ret = xc_set_tv_standard(priv, video_mode, audio_mode); if (ret != 0) { printk(KERN_ERR "xc4000: xc_set_tv_standard failed\n"); /* DJH - do not return when it fails... */ /* goto fail; */ } } if (xc_write_reg(priv, XREG_D_CODE, 0) == 0) ret = 0; if (priv->dvb_amplitude != 0) { if (xc_write_reg(priv, XREG_AMPLITUDE, (priv->firm_version != 0x0102 || priv->dvb_amplitude != 134 ? priv->dvb_amplitude : 132)) != 0) ret = -EREMOTEIO; } if (priv->set_smoothedcvbs != 0) { if (xc_write_reg(priv, XREG_SMOOTHEDCVBS, 1) != 0) ret = -EREMOTEIO; } if (ret != 0) { printk(KERN_ERR "xc4000: setting registers failed\n"); /* goto fail; */ } xc_tune_channel(priv, priv->freq_hz); ret = 0; fail: mutex_unlock(&priv->lock); return ret; } static int xc4000_set_analog_params(struct dvb_frontend *fe, struct analog_parameters *params) { struct xc4000_priv *priv = fe->tuner_priv; unsigned int type = 0; int ret = -EREMOTEIO; if (params->mode == V4L2_TUNER_RADIO) { dprintk(1, "%s() frequency=%d (in units of 62.5Hz)\n", __func__, params->frequency); mutex_lock(&priv->lock); params->std = 0; priv->freq_hz = params->frequency * 125L / 2; if (audio_std & XC4000_AUDIO_STD_INPUT1) { priv->video_standard = XC4000_FM_Radio_INPUT1; type = FM | INPUT1; } else { priv->video_standard = XC4000_FM_Radio_INPUT2; type = FM | INPUT2; } goto tune_channel; } dprintk(1, "%s() frequency=%d (in units of 62.5khz)\n", __func__, params->frequency); mutex_lock(&priv->lock); /* params->frequency is in units of 62.5khz */ priv->freq_hz = params->frequency * 62500; params->std &= V4L2_STD_ALL; /* if std is not defined, choose one */ if (!params->std) params->std = V4L2_STD_PAL_BG; if (audio_std & XC4000_AUDIO_STD_MONO) type = MONO; if (params->std & V4L2_STD_MN) { params->std = V4L2_STD_MN; if (audio_std & XC4000_AUDIO_STD_MONO) { priv->video_standard = XC4000_MN_NTSC_PAL_Mono; } else if (audio_std & XC4000_AUDIO_STD_A2) { params->std |= V4L2_STD_A2; priv->video_standard = XC4000_MN_NTSC_PAL_A2; } else { params->std |= V4L2_STD_BTSC; priv->video_standard = XC4000_MN_NTSC_PAL_BTSC; } goto tune_channel; } if (params->std & V4L2_STD_PAL_BG) { params->std = V4L2_STD_PAL_BG; if (audio_std & XC4000_AUDIO_STD_MONO) { priv->video_standard = XC4000_BG_PAL_MONO; } else if (!(audio_std & XC4000_AUDIO_STD_A2)) { if (!(audio_std & XC4000_AUDIO_STD_B)) { params->std |= V4L2_STD_NICAM_A; priv->video_standard = XC4000_BG_PAL_NICAM; } else { params->std |= V4L2_STD_NICAM_B; priv->video_standard = XC4000_BG_PAL_NICAM; } } else { if (!(audio_std & XC4000_AUDIO_STD_B)) { params->std |= V4L2_STD_A2_A; priv->video_standard = XC4000_BG_PAL_A2; } else { params->std |= V4L2_STD_A2_B; priv->video_standard = XC4000_BG_PAL_A2; } } goto tune_channel; } if (params->std & V4L2_STD_PAL_I) { /* default to NICAM audio standard */ params->std = V4L2_STD_PAL_I | V4L2_STD_NICAM; if (audio_std & XC4000_AUDIO_STD_MONO) priv->video_standard = XC4000_I_PAL_NICAM_MONO; else priv->video_standard = XC4000_I_PAL_NICAM; goto tune_channel; } if (params->std & V4L2_STD_PAL_DK) { params->std = V4L2_STD_PAL_DK; if (audio_std & XC4000_AUDIO_STD_MONO) { priv->video_standard = XC4000_DK_PAL_MONO; } else if (audio_std & XC4000_AUDIO_STD_A2) { params->std |= V4L2_STD_A2; priv->video_standard = XC4000_DK_PAL_A2; } else { params->std |= V4L2_STD_NICAM; priv->video_standard = XC4000_DK_PAL_NICAM; } goto tune_channel; } if (params->std & V4L2_STD_SECAM_DK) { /* default to A2 audio standard */ params->std = V4L2_STD_SECAM_DK | V4L2_STD_A2; if (audio_std & XC4000_AUDIO_STD_L) { type = 0; priv->video_standard = XC4000_DK_SECAM_NICAM; } else if (audio_std & XC4000_AUDIO_STD_MONO) { priv->video_standard = XC4000_DK_SECAM_A2MONO; } else if (audio_std & XC4000_AUDIO_STD_K3) { params->std |= V4L2_STD_SECAM_K3; priv->video_standard = XC4000_DK_SECAM_A2LDK3; } else { priv->video_standard = XC4000_DK_SECAM_A2DK1; } goto tune_channel; } if (params->std & V4L2_STD_SECAM_L) { /* default to NICAM audio standard */ type = 0; params->std = V4L2_STD_SECAM_L | V4L2_STD_NICAM; priv->video_standard = XC4000_L_SECAM_NICAM; goto tune_channel; } if (params->std & V4L2_STD_SECAM_LC) { /* default to NICAM audio standard */ type = 0; params->std = V4L2_STD_SECAM_LC | V4L2_STD_NICAM; priv->video_standard = XC4000_LC_SECAM_NICAM; goto tune_channel; } tune_channel: /* FIXME: it could be air. */ priv->rf_mode = XC_RF_MODE_CABLE; if (check_firmware(fe, type, params->std, xc4000_standard[priv->video_standard].int_freq) != 0) goto fail; ret = xc_set_signal_source(priv, priv->rf_mode); if (ret != 0) { printk(KERN_ERR "xc4000: xc_set_signal_source(%d) failed\n", priv->rf_mode); goto fail; } else { u16 video_mode, audio_mode; video_mode = xc4000_standard[priv->video_standard].video_mode; audio_mode = xc4000_standard[priv->video_standard].audio_mode; if (priv->video_standard < XC4000_BG_PAL_A2) { if (type & NOGD) video_mode &= 0xFF7F; } else if (priv->video_standard < XC4000_I_PAL_NICAM) { if (priv->firm_version == 0x0102) video_mode &= 0xFEFF; if (audio_std & XC4000_AUDIO_STD_B) video_mode |= 0x0080; } ret = xc_set_tv_standard(priv, video_mode, audio_mode); if (ret != 0) { printk(KERN_ERR "xc4000: xc_set_tv_standard failed\n"); goto fail; } } if (xc_write_reg(priv, XREG_D_CODE, 0) == 0) ret = 0; if (xc_write_reg(priv, XREG_AMPLITUDE, 1) != 0) ret = -EREMOTEIO; if (priv->set_smoothedcvbs != 0) { if (xc_write_reg(priv, XREG_SMOOTHEDCVBS, 1) != 0) ret = -EREMOTEIO; } if (ret != 0) { printk(KERN_ERR "xc4000: setting registers failed\n"); goto fail; } xc_tune_channel(priv, priv->freq_hz); ret = 0; fail: mutex_unlock(&priv->lock); return ret; } static int xc4000_get_signal(struct dvb_frontend *fe, u16 *strength) { struct xc4000_priv *priv = fe->tuner_priv; u16 value = 0; int rc; mutex_lock(&priv->lock); rc = xc4000_readreg(priv, XREG_SIGNAL_LEVEL, &value); mutex_unlock(&priv->lock); if (rc < 0) goto ret; /* Informations from real testing of DVB-T and radio part, coeficient for one dB is 0xff. */ tuner_dbg("Signal strength: -%ddB (%05d)\n", value >> 8, value); /* all known digital modes */ if ((priv->video_standard == XC4000_DTV6) || (priv->video_standard == XC4000_DTV7) || (priv->video_standard == XC4000_DTV7_8) || (priv->video_standard == XC4000_DTV8)) goto digital; /* Analog mode has NOISE LEVEL important, signal depends only on gain of antenna and amplifiers, but it doesn't tell anything about real quality of reception. */ mutex_lock(&priv->lock); rc = xc4000_readreg(priv, XREG_NOISE_LEVEL, &value); mutex_unlock(&priv->lock); tuner_dbg("Noise level: %ddB (%05d)\n", value >> 8, value); /* highest noise level: 32dB */ if (value >= 0x2000) { value = 0; } else { value = ~value << 3; } goto ret; /* Digital mode has SIGNAL LEVEL important and real noise level is stored in demodulator registers. */ digital: /* best signal: -50dB */ if (value <= 0x3200) { value = 0xffff; /* minimum: -114dB - should be 0x7200 but real zero is 0x713A */ } else if (value >= 0x713A) { value = 0; } else { value = ~(value - 0x3200) << 2; } ret: *strength = value; return rc; } static int xc4000_get_frequency(struct dvb_frontend *fe, u32 *freq) { struct xc4000_priv *priv = fe->tuner_priv; *freq = priv->freq_hz; if (debug) { mutex_lock(&priv->lock); if ((priv->cur_fw.type & (BASE | FM | DTV6 | DTV7 | DTV78 | DTV8)) == BASE) { u16 snr = 0; if (xc4000_readreg(priv, XREG_SNR, &snr) == 0) { mutex_unlock(&priv->lock); dprintk(1, "%s() freq = %u, SNR = %d\n", __func__, *freq, snr); return 0; } } mutex_unlock(&priv->lock); } dprintk(1, "%s()\n", __func__); return 0; } static int xc4000_get_bandwidth(struct dvb_frontend *fe, u32 *bw) { struct xc4000_priv *priv = fe->tuner_priv; dprintk(1, "%s()\n", __func__); *bw = priv->bandwidth; return 0; } static int xc4000_get_status(struct dvb_frontend *fe, u32 *status) { struct xc4000_priv *priv = fe->tuner_priv; u16 lock_status = 0; mutex_lock(&priv->lock); if (priv->cur_fw.type & BASE) xc_get_lock_status(priv, &lock_status); *status = (lock_status == 1 ? TUNER_STATUS_LOCKED | TUNER_STATUS_STEREO : 0); if (priv->cur_fw.type & (DTV6 | DTV7 | DTV78 | DTV8)) *status &= (~TUNER_STATUS_STEREO); mutex_unlock(&priv->lock); dprintk(2, "%s() lock_status = %d\n", __func__, lock_status); return 0; } static int xc4000_sleep(struct dvb_frontend *fe) { struct xc4000_priv *priv = fe->tuner_priv; int ret = 0; dprintk(1, "%s()\n", __func__); mutex_lock(&priv->lock); /* Avoid firmware reload on slow devices */ if ((no_poweroff == 2 || (no_poweroff == 0 && priv->default_pm != 0)) && (priv->cur_fw.type & BASE) != 0) { /* force reset and firmware reload */ priv->cur_fw.type = XC_POWERED_DOWN; if (xc_write_reg(priv, XREG_POWER_DOWN, 0) != 0) { printk(KERN_ERR "xc4000: %s() unable to shutdown tuner\n", __func__); ret = -EREMOTEIO; } msleep(20); } mutex_unlock(&priv->lock); return ret; } static int xc4000_init(struct dvb_frontend *fe) { dprintk(1, "%s()\n", __func__); return 0; } static int xc4000_release(struct dvb_frontend *fe) { struct xc4000_priv *priv = fe->tuner_priv; dprintk(1, "%s()\n", __func__); mutex_lock(&xc4000_list_mutex); if (priv) hybrid_tuner_release_state(priv); mutex_unlock(&xc4000_list_mutex); fe->tuner_priv = NULL; return 0; } static const struct dvb_tuner_ops xc4000_tuner_ops = { .info = { .name = "Xceive XC4000", .frequency_min = 1000000, .frequency_max = 1023000000, .frequency_step = 50000, }, .release = xc4000_release, .init = xc4000_init, .sleep = xc4000_sleep, .set_params = xc4000_set_params, .set_analog_params = xc4000_set_analog_params, .get_frequency = xc4000_get_frequency, .get_rf_strength = xc4000_get_signal, .get_bandwidth = xc4000_get_bandwidth, .get_status = xc4000_get_status }; struct dvb_frontend *xc4000_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct xc4000_config *cfg) { struct xc4000_priv *priv = NULL; int instance; u16 id = 0; dprintk(1, "%s(%d-%04x)\n", __func__, i2c ? i2c_adapter_id(i2c) : -1, cfg ? cfg->i2c_address : -1); mutex_lock(&xc4000_list_mutex); instance = hybrid_tuner_request_state(struct xc4000_priv, priv, hybrid_tuner_instance_list, i2c, cfg->i2c_address, "xc4000"); switch (instance) { case 0: goto fail; break; case 1: /* new tuner instance */ priv->bandwidth = 6000000; /* set default configuration */ priv->if_khz = 4560; priv->default_pm = 0; priv->dvb_amplitude = 134; priv->set_smoothedcvbs = 1; mutex_init(&priv->lock); fe->tuner_priv = priv; break; default: /* existing tuner instance */ fe->tuner_priv = priv; break; } if (cfg->if_khz != 0) { /* copy configuration if provided by the caller */ priv->if_khz = cfg->if_khz; priv->default_pm = cfg->default_pm; priv->dvb_amplitude = cfg->dvb_amplitude; priv->set_smoothedcvbs = cfg->set_smoothedcvbs; } /* Check if firmware has been loaded. It is possible that another instance of the driver has loaded the firmware. */ if (instance == 1) { if (xc4000_readreg(priv, XREG_PRODUCT_ID, &id) != 0) goto fail; } else { id = ((priv->cur_fw.type & BASE) != 0 ? priv->hwmodel : XC_PRODUCT_ID_FW_NOT_LOADED); } switch (id) { case XC_PRODUCT_ID_XC4000: case XC_PRODUCT_ID_XC4100: printk(KERN_INFO "xc4000: Successfully identified at address 0x%02x\n", cfg->i2c_address); printk(KERN_INFO "xc4000: Firmware has been loaded previously\n"); break; case XC_PRODUCT_ID_FW_NOT_LOADED: printk(KERN_INFO "xc4000: Successfully identified at address 0x%02x\n", cfg->i2c_address); printk(KERN_INFO "xc4000: Firmware has not been loaded previously\n"); break; default: printk(KERN_ERR "xc4000: Device not found at addr 0x%02x (0x%x)\n", cfg->i2c_address, id); goto fail; } mutex_unlock(&xc4000_list_mutex); memcpy(&fe->ops.tuner_ops, &xc4000_tuner_ops, sizeof(struct dvb_tuner_ops)); if (instance == 1) { int ret; mutex_lock(&priv->lock); ret = xc4000_fwupload(fe); mutex_unlock(&priv->lock); if (ret != 0) goto fail2; } return fe; fail: mutex_unlock(&xc4000_list_mutex); fail2: xc4000_release(fe); return NULL; } EXPORT_SYMBOL(xc4000_attach); MODULE_AUTHOR("Steven Toth, Davide Ferri"); MODULE_DESCRIPTION("Xceive xc4000 silicon tuner driver"); MODULE_LICENSE("GPL");
gpl-2.0
threader/Huawei_S7_kernel_2.6.35
arch/arm/mach-iop13xx/setup.c
3962
15096
/* * iop13xx platform Initialization * Copyright (c) 2005-2006, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * */ #include <linux/serial_8250.h> #include <linux/io.h> #ifdef CONFIG_MTD_PHYSMAP #include <linux/mtd/physmap.h> #endif #include <asm/mach/map.h> #include <mach/hardware.h> #include <asm/irq.h> #include <asm/hardware/iop_adma.h> #define IOP13XX_UART_XTAL 33334000 #define IOP13XX_SETUP_DEBUG 0 #define PRINTK(x...) ((void)(IOP13XX_SETUP_DEBUG && printk(x))) /* Standard IO mapping for all IOP13XX based systems */ static struct map_desc iop13xx_std_desc[] __initdata = { { /* mem mapped registers */ .virtual = IOP13XX_PMMR_VIRT_MEM_BASE, .pfn = __phys_to_pfn(IOP13XX_PMMR_PHYS_MEM_BASE), .length = IOP13XX_PMMR_SIZE, .type = MT_DEVICE, }, { /* PCIE IO space */ .virtual = IOP13XX_PCIE_LOWER_IO_VA, .pfn = __phys_to_pfn(IOP13XX_PCIE_LOWER_IO_PA), .length = IOP13XX_PCIX_IO_WINDOW_SIZE, .type = MT_DEVICE, }, { /* PCIX IO space */ .virtual = IOP13XX_PCIX_LOWER_IO_VA, .pfn = __phys_to_pfn(IOP13XX_PCIX_LOWER_IO_PA), .length = IOP13XX_PCIX_IO_WINDOW_SIZE, .type = MT_DEVICE, }, }; static struct resource iop13xx_uart0_resources[] = { [0] = { .start = IOP13XX_UART0_PHYS, .end = IOP13XX_UART0_PHYS + 0x3f, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_IOP13XX_UART0, .end = IRQ_IOP13XX_UART0, .flags = IORESOURCE_IRQ } }; static struct resource iop13xx_uart1_resources[] = { [0] = { .start = IOP13XX_UART1_PHYS, .end = IOP13XX_UART1_PHYS + 0x3f, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_IOP13XX_UART1, .end = IRQ_IOP13XX_UART1, .flags = IORESOURCE_IRQ } }; static struct plat_serial8250_port iop13xx_uart0_data[] = { { .membase = (char*)(IOP13XX_UART0_VIRT), .mapbase = (IOP13XX_UART0_PHYS), .irq = IRQ_IOP13XX_UART0, .uartclk = IOP13XX_UART_XTAL, .regshift = 2, .iotype = UPIO_MEM, .flags = UPF_SKIP_TEST, }, { }, }; static struct plat_serial8250_port iop13xx_uart1_data[] = { { .membase = (char*)(IOP13XX_UART1_VIRT), .mapbase = (IOP13XX_UART1_PHYS), .irq = IRQ_IOP13XX_UART1, .uartclk = IOP13XX_UART_XTAL, .regshift = 2, .iotype = UPIO_MEM, .flags = UPF_SKIP_TEST, }, { }, }; /* The ids are fixed up later in iop13xx_platform_init */ static struct platform_device iop13xx_uart0 = { .name = "serial8250", .id = 0, .dev.platform_data = iop13xx_uart0_data, .num_resources = 2, .resource = iop13xx_uart0_resources, }; static struct platform_device iop13xx_uart1 = { .name = "serial8250", .id = 0, .dev.platform_data = iop13xx_uart1_data, .num_resources = 2, .resource = iop13xx_uart1_resources }; static struct resource iop13xx_i2c_0_resources[] = { [0] = { .start = IOP13XX_I2C0_PHYS, .end = IOP13XX_I2C0_PHYS + 0x18, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_IOP13XX_I2C_0, .end = IRQ_IOP13XX_I2C_0, .flags = IORESOURCE_IRQ } }; static struct resource iop13xx_i2c_1_resources[] = { [0] = { .start = IOP13XX_I2C1_PHYS, .end = IOP13XX_I2C1_PHYS + 0x18, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_IOP13XX_I2C_1, .end = IRQ_IOP13XX_I2C_1, .flags = IORESOURCE_IRQ } }; static struct resource iop13xx_i2c_2_resources[] = { [0] = { .start = IOP13XX_I2C2_PHYS, .end = IOP13XX_I2C2_PHYS + 0x18, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_IOP13XX_I2C_2, .end = IRQ_IOP13XX_I2C_2, .flags = IORESOURCE_IRQ } }; /* I2C controllers. The IOP13XX uses the same block as the IOP3xx, so * we just use the same device name. */ /* The ids are fixed up later in iop13xx_platform_init */ static struct platform_device iop13xx_i2c_0_controller = { .name = "IOP3xx-I2C", .id = 0, .num_resources = 2, .resource = iop13xx_i2c_0_resources }; static struct platform_device iop13xx_i2c_1_controller = { .name = "IOP3xx-I2C", .id = 0, .num_resources = 2, .resource = iop13xx_i2c_1_resources }; static struct platform_device iop13xx_i2c_2_controller = { .name = "IOP3xx-I2C", .id = 0, .num_resources = 2, .resource = iop13xx_i2c_2_resources }; #ifdef CONFIG_MTD_PHYSMAP /* PBI Flash Device */ static struct physmap_flash_data iq8134x_flash_data = { .width = 2, }; static struct resource iq8134x_flash_resource = { .start = IQ81340_FLASHBASE, .end = 0, .flags = IORESOURCE_MEM, }; static struct platform_device iq8134x_flash = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &iq8134x_flash_data, }, .num_resources = 1, .resource = &iq8134x_flash_resource, }; static unsigned long iq8134x_probe_flash_size(void) { uint8_t __iomem *flash_addr = ioremap(IQ81340_FLASHBASE, PAGE_SIZE); int i; char query[3]; unsigned long size = 0; int width = iq8134x_flash_data.width; if (flash_addr) { /* send CFI 'query' command */ writew(0x98, flash_addr); /* check for CFI compliance */ for (i = 0; i < 3 * width; i += width) query[i / width] = readb(flash_addr + (0x10 * width) + i); /* read the size */ if (memcmp(query, "QRY", 3) == 0) size = 1 << readb(flash_addr + (0x27 * width)); /* send CFI 'read array' command */ writew(0xff, flash_addr); iounmap(flash_addr); } return size; } #endif /* ADMA Channels */ static struct resource iop13xx_adma_0_resources[] = { [0] = { .start = IOP13XX_ADMA_PHYS_BASE(0), .end = IOP13XX_ADMA_UPPER_PA(0), .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_IOP13XX_ADMA0_EOT, .end = IRQ_IOP13XX_ADMA0_EOT, .flags = IORESOURCE_IRQ }, [2] = { .start = IRQ_IOP13XX_ADMA0_EOC, .end = IRQ_IOP13XX_ADMA0_EOC, .flags = IORESOURCE_IRQ }, [3] = { .start = IRQ_IOP13XX_ADMA0_ERR, .end = IRQ_IOP13XX_ADMA0_ERR, .flags = IORESOURCE_IRQ } }; static struct resource iop13xx_adma_1_resources[] = { [0] = { .start = IOP13XX_ADMA_PHYS_BASE(1), .end = IOP13XX_ADMA_UPPER_PA(1), .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_IOP13XX_ADMA1_EOT, .end = IRQ_IOP13XX_ADMA1_EOT, .flags = IORESOURCE_IRQ }, [2] = { .start = IRQ_IOP13XX_ADMA1_EOC, .end = IRQ_IOP13XX_ADMA1_EOC, .flags = IORESOURCE_IRQ }, [3] = { .start = IRQ_IOP13XX_ADMA1_ERR, .end = IRQ_IOP13XX_ADMA1_ERR, .flags = IORESOURCE_IRQ } }; static struct resource iop13xx_adma_2_resources[] = { [0] = { .start = IOP13XX_ADMA_PHYS_BASE(2), .end = IOP13XX_ADMA_UPPER_PA(2), .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_IOP13XX_ADMA2_EOT, .end = IRQ_IOP13XX_ADMA2_EOT, .flags = IORESOURCE_IRQ }, [2] = { .start = IRQ_IOP13XX_ADMA2_EOC, .end = IRQ_IOP13XX_ADMA2_EOC, .flags = IORESOURCE_IRQ }, [3] = { .start = IRQ_IOP13XX_ADMA2_ERR, .end = IRQ_IOP13XX_ADMA2_ERR, .flags = IORESOURCE_IRQ } }; static u64 iop13xx_adma_dmamask = DMA_BIT_MASK(64); static struct iop_adma_platform_data iop13xx_adma_0_data = { .hw_id = 0, .pool_size = PAGE_SIZE, }; static struct iop_adma_platform_data iop13xx_adma_1_data = { .hw_id = 1, .pool_size = PAGE_SIZE, }; static struct iop_adma_platform_data iop13xx_adma_2_data = { .hw_id = 2, .pool_size = PAGE_SIZE, }; /* The ids are fixed up later in iop13xx_platform_init */ static struct platform_device iop13xx_adma_0_channel = { .name = "iop-adma", .id = 0, .num_resources = 4, .resource = iop13xx_adma_0_resources, .dev = { .dma_mask = &iop13xx_adma_dmamask, .coherent_dma_mask = DMA_BIT_MASK(64), .platform_data = (void *) &iop13xx_adma_0_data, }, }; static struct platform_device iop13xx_adma_1_channel = { .name = "iop-adma", .id = 0, .num_resources = 4, .resource = iop13xx_adma_1_resources, .dev = { .dma_mask = &iop13xx_adma_dmamask, .coherent_dma_mask = DMA_BIT_MASK(64), .platform_data = (void *) &iop13xx_adma_1_data, }, }; static struct platform_device iop13xx_adma_2_channel = { .name = "iop-adma", .id = 0, .num_resources = 4, .resource = iop13xx_adma_2_resources, .dev = { .dma_mask = &iop13xx_adma_dmamask, .coherent_dma_mask = DMA_BIT_MASK(64), .platform_data = (void *) &iop13xx_adma_2_data, }, }; void __init iop13xx_map_io(void) { /* Initialize the Static Page Table maps */ iotable_init(iop13xx_std_desc, ARRAY_SIZE(iop13xx_std_desc)); } static int init_uart; static int init_i2c; static int init_adma; void __init iop13xx_platform_init(void) { int i; u32 uart_idx, i2c_idx, adma_idx, plat_idx; struct platform_device *iop13xx_devices[IQ81340_MAX_PLAT_DEVICES]; /* set the bases so we can read the device id */ iop13xx_set_atu_mmr_bases(); memset(iop13xx_devices, 0, sizeof(iop13xx_devices)); if (init_uart == IOP13XX_INIT_UART_DEFAULT) { switch (iop13xx_dev_id()) { /* enable both uarts on iop341 */ case 0x3380: case 0x3384: case 0x3388: case 0x338c: init_uart |= IOP13XX_INIT_UART_0; init_uart |= IOP13XX_INIT_UART_1; break; /* only enable uart 1 */ default: init_uart |= IOP13XX_INIT_UART_1; } } if (init_i2c == IOP13XX_INIT_I2C_DEFAULT) { switch (iop13xx_dev_id()) { /* enable all i2c units on iop341 and iop342 */ case 0x3380: case 0x3384: case 0x3388: case 0x338c: case 0x3382: case 0x3386: case 0x338a: case 0x338e: init_i2c |= IOP13XX_INIT_I2C_0; init_i2c |= IOP13XX_INIT_I2C_1; init_i2c |= IOP13XX_INIT_I2C_2; break; /* only enable i2c 1 and 2 */ default: init_i2c |= IOP13XX_INIT_I2C_1; init_i2c |= IOP13XX_INIT_I2C_2; } } if (init_adma == IOP13XX_INIT_ADMA_DEFAULT) { init_adma |= IOP13XX_INIT_ADMA_0; init_adma |= IOP13XX_INIT_ADMA_1; init_adma |= IOP13XX_INIT_ADMA_2; } plat_idx = 0; uart_idx = 0; i2c_idx = 0; /* uart 1 (if enabled) is ttyS0 */ if (init_uart & IOP13XX_INIT_UART_1) { PRINTK("Adding uart1 to platform device list\n"); iop13xx_uart1.id = uart_idx++; iop13xx_devices[plat_idx++] = &iop13xx_uart1; } if (init_uart & IOP13XX_INIT_UART_0) { PRINTK("Adding uart0 to platform device list\n"); iop13xx_uart0.id = uart_idx++; iop13xx_devices[plat_idx++] = &iop13xx_uart0; } for(i = 0; i < IQ81340_NUM_I2C; i++) { if ((init_i2c & (1 << i)) && IOP13XX_SETUP_DEBUG) printk("Adding i2c%d to platform device list\n", i); switch(init_i2c & (1 << i)) { case IOP13XX_INIT_I2C_0: iop13xx_i2c_0_controller.id = i2c_idx++; iop13xx_devices[plat_idx++] = &iop13xx_i2c_0_controller; break; case IOP13XX_INIT_I2C_1: iop13xx_i2c_1_controller.id = i2c_idx++; iop13xx_devices[plat_idx++] = &iop13xx_i2c_1_controller; break; case IOP13XX_INIT_I2C_2: iop13xx_i2c_2_controller.id = i2c_idx++; iop13xx_devices[plat_idx++] = &iop13xx_i2c_2_controller; break; } } /* initialize adma channel ids and capabilities */ adma_idx = 0; for (i = 0; i < IQ81340_NUM_ADMA; i++) { struct iop_adma_platform_data *plat_data; if ((init_adma & (1 << i)) && IOP13XX_SETUP_DEBUG) printk(KERN_INFO "Adding adma%d to platform device list\n", i); switch (init_adma & (1 << i)) { case IOP13XX_INIT_ADMA_0: iop13xx_adma_0_channel.id = adma_idx++; iop13xx_devices[plat_idx++] = &iop13xx_adma_0_channel; plat_data = &iop13xx_adma_0_data; dma_cap_set(DMA_MEMCPY, plat_data->cap_mask); dma_cap_set(DMA_XOR, plat_data->cap_mask); dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask); dma_cap_set(DMA_MEMSET, plat_data->cap_mask); dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask); break; case IOP13XX_INIT_ADMA_1: iop13xx_adma_1_channel.id = adma_idx++; iop13xx_devices[plat_idx++] = &iop13xx_adma_1_channel; plat_data = &iop13xx_adma_1_data; dma_cap_set(DMA_MEMCPY, plat_data->cap_mask); dma_cap_set(DMA_XOR, plat_data->cap_mask); dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask); dma_cap_set(DMA_MEMSET, plat_data->cap_mask); dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask); break; case IOP13XX_INIT_ADMA_2: iop13xx_adma_2_channel.id = adma_idx++; iop13xx_devices[plat_idx++] = &iop13xx_adma_2_channel; plat_data = &iop13xx_adma_2_data; dma_cap_set(DMA_MEMCPY, plat_data->cap_mask); dma_cap_set(DMA_XOR, plat_data->cap_mask); dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask); dma_cap_set(DMA_MEMSET, plat_data->cap_mask); dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask); dma_cap_set(DMA_PQ, plat_data->cap_mask); dma_cap_set(DMA_PQ_VAL, plat_data->cap_mask); break; } } #ifdef CONFIG_MTD_PHYSMAP iq8134x_flash_resource.end = iq8134x_flash_resource.start + iq8134x_probe_flash_size() - 1; if (iq8134x_flash_resource.end > iq8134x_flash_resource.start) iop13xx_devices[plat_idx++] = &iq8134x_flash; else printk(KERN_ERR "%s: Failed to probe flash size\n", __func__); #endif platform_add_devices(iop13xx_devices, plat_idx); } static int __init iop13xx_init_uart_setup(char *str) { if (str) { while (*str != '\0') { switch(*str) { case '0': init_uart |= IOP13XX_INIT_UART_0; break; case '1': init_uart |= IOP13XX_INIT_UART_1; break; case ',': case '=': break; default: PRINTK("\"iop13xx_init_uart\" malformed" " at character: \'%c\'", *str); *(str + 1) = '\0'; init_uart = IOP13XX_INIT_UART_DEFAULT; } str++; } } return 1; } static int __init iop13xx_init_i2c_setup(char *str) { if (str) { while (*str != '\0') { switch(*str) { case '0': init_i2c |= IOP13XX_INIT_I2C_0; break; case '1': init_i2c |= IOP13XX_INIT_I2C_1; break; case '2': init_i2c |= IOP13XX_INIT_I2C_2; break; case ',': case '=': break; default: PRINTK("\"iop13xx_init_i2c\" malformed" " at character: \'%c\'", *str); *(str + 1) = '\0'; init_i2c = IOP13XX_INIT_I2C_DEFAULT; } str++; } } return 1; } static int __init iop13xx_init_adma_setup(char *str) { if (str) { while (*str != '\0') { switch (*str) { case '0': init_adma |= IOP13XX_INIT_ADMA_0; break; case '1': init_adma |= IOP13XX_INIT_ADMA_1; break; case '2': init_adma |= IOP13XX_INIT_ADMA_2; break; case ',': case '=': break; default: PRINTK("\"iop13xx_init_adma\" malformed" " at character: \'%c\'", *str); *(str + 1) = '\0'; init_adma = IOP13XX_INIT_ADMA_DEFAULT; } str++; } } return 1; } __setup("iop13xx_init_adma", iop13xx_init_adma_setup); __setup("iop13xx_init_uart", iop13xx_init_uart_setup); __setup("iop13xx_init_i2c", iop13xx_init_i2c_setup);
gpl-2.0
NoelMacwan/SXDHuashan
arch/arm/mach-mxs/mach-apx4devkit.c
4730
7429
/* * Copyright (C) 2011-2012 * Lauri Hintsala, Bluegiga, <lauri.hintsala@bluegiga.com> * Veli-Pekka Peltola, Bluegiga, <veli-pekka.peltola@bluegiga.com> * * based on: mach-mx28evk.c * Copyright 2010 Freescale Semiconductor, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include <linux/leds.h> #include <linux/clk.h> #include <linux/i2c.h> #include <linux/regulator/machine.h> #include <linux/regulator/fixed.h> #include <linux/micrel_phy.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/time.h> #include <mach/common.h> #include <mach/digctl.h> #include <mach/iomux-mx28.h> #include "devices-mx28.h" #define APX4DEVKIT_GPIO_USERLED MXS_GPIO_NR(3, 28) static const iomux_cfg_t apx4devkit_pads[] __initconst = { /* duart */ MX28_PAD_PWM0__DUART_RX | MXS_PAD_CTRL, MX28_PAD_PWM1__DUART_TX | MXS_PAD_CTRL, /* auart0 */ MX28_PAD_AUART0_RX__AUART0_RX | MXS_PAD_CTRL, MX28_PAD_AUART0_TX__AUART0_TX | MXS_PAD_CTRL, MX28_PAD_AUART0_CTS__AUART0_CTS | MXS_PAD_CTRL, MX28_PAD_AUART0_RTS__AUART0_RTS | MXS_PAD_CTRL, /* auart1 */ MX28_PAD_AUART1_RX__AUART1_RX | MXS_PAD_CTRL, MX28_PAD_AUART1_TX__AUART1_TX | MXS_PAD_CTRL, /* auart2 */ MX28_PAD_SSP2_SCK__AUART2_RX | MXS_PAD_CTRL, MX28_PAD_SSP2_MOSI__AUART2_TX | MXS_PAD_CTRL, /* auart3 */ MX28_PAD_SSP2_MISO__AUART3_RX | MXS_PAD_CTRL, MX28_PAD_SSP2_SS0__AUART3_TX | MXS_PAD_CTRL, #define MXS_PAD_FEC (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP) /* fec0 */ MX28_PAD_ENET0_MDC__ENET0_MDC | MXS_PAD_FEC, MX28_PAD_ENET0_MDIO__ENET0_MDIO | MXS_PAD_FEC, MX28_PAD_ENET0_RX_EN__ENET0_RX_EN | MXS_PAD_FEC, MX28_PAD_ENET0_RXD0__ENET0_RXD0 | MXS_PAD_FEC, MX28_PAD_ENET0_RXD1__ENET0_RXD1 | MXS_PAD_FEC, MX28_PAD_ENET0_TX_EN__ENET0_TX_EN | MXS_PAD_FEC, MX28_PAD_ENET0_TXD0__ENET0_TXD0 | MXS_PAD_FEC, MX28_PAD_ENET0_TXD1__ENET0_TXD1 | MXS_PAD_FEC, MX28_PAD_ENET_CLK__CLKCTRL_ENET | MXS_PAD_FEC, /* i2c */ MX28_PAD_I2C0_SCL__I2C0_SCL, MX28_PAD_I2C0_SDA__I2C0_SDA, /* mmc0 */ MX28_PAD_SSP0_DATA0__SSP0_D0 | (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), MX28_PAD_SSP0_DATA1__SSP0_D1 | (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), MX28_PAD_SSP0_DATA2__SSP0_D2 | (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), MX28_PAD_SSP0_DATA3__SSP0_D3 | (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), MX28_PAD_SSP0_DATA4__SSP0_D4 | (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), MX28_PAD_SSP0_DATA5__SSP0_D5 | (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), MX28_PAD_SSP0_DATA6__SSP0_D6 | (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), MX28_PAD_SSP0_DATA7__SSP0_D7 | (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), MX28_PAD_SSP0_CMD__SSP0_CMD | (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), MX28_PAD_SSP0_DETECT__SSP0_CARD_DETECT | (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_NOPULL), MX28_PAD_SSP0_SCK__SSP0_SCK | (MXS_PAD_12MA | MXS_PAD_3V3 | MXS_PAD_NOPULL), /* led */ MX28_PAD_PWM3__GPIO_3_28 | MXS_PAD_CTRL, /* saif0 & saif1 */ MX28_PAD_SAIF0_MCLK__SAIF0_MCLK | (MXS_PAD_12MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), MX28_PAD_SAIF0_LRCLK__SAIF0_LRCLK | (MXS_PAD_12MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), MX28_PAD_SAIF0_BITCLK__SAIF0_BITCLK | (MXS_PAD_12MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), MX28_PAD_SAIF0_SDATA0__SAIF0_SDATA0 | (MXS_PAD_12MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), MX28_PAD_SAIF1_SDATA0__SAIF1_SDATA0 | (MXS_PAD_12MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), }; /* led */ static const struct gpio_led apx4devkit_leds[] __initconst = { { .name = "user-led", .default_trigger = "heartbeat", .gpio = APX4DEVKIT_GPIO_USERLED, }, }; static const struct gpio_led_platform_data apx4devkit_led_data __initconst = { .leds = apx4devkit_leds, .num_leds = ARRAY_SIZE(apx4devkit_leds), }; static const struct fec_platform_data mx28_fec_pdata __initconst = { .phy = PHY_INTERFACE_MODE_RMII, }; static const struct mxs_mmc_platform_data apx4devkit_mmc_pdata __initconst = { .wp_gpio = -EINVAL, .flags = SLOTF_4_BIT_CAPABLE, }; static const struct i2c_board_info apx4devkit_i2c_boardinfo[] __initconst = { { I2C_BOARD_INFO("sgtl5000", 0x0a) }, /* ASoC */ { I2C_BOARD_INFO("pcf8563", 0x51) }, /* RTC */ }; #if defined(CONFIG_REGULATOR_FIXED_VOLTAGE) || \ defined(CONFIG_REGULATOR_FIXED_VOLTAGE_MODULE) static struct regulator_consumer_supply apx4devkit_audio_consumer_supplies[] = { REGULATOR_SUPPLY("VDDA", "0-000a"), REGULATOR_SUPPLY("VDDIO", "0-000a"), }; static struct regulator_init_data apx4devkit_vdd_reg_init_data = { .constraints = { .name = "3V3", .always_on = 1, }, .consumer_supplies = apx4devkit_audio_consumer_supplies, .num_consumer_supplies = ARRAY_SIZE(apx4devkit_audio_consumer_supplies), }; static struct fixed_voltage_config apx4devkit_vdd_pdata = { .supply_name = "board-3V3", .microvolts = 3300000, .gpio = -EINVAL, .enabled_at_boot = 1, .init_data = &apx4devkit_vdd_reg_init_data, }; static struct platform_device apx4devkit_voltage_regulator = { .name = "reg-fixed-voltage", .id = -1, .num_resources = 0, .dev = { .platform_data = &apx4devkit_vdd_pdata, }, }; static void __init apx4devkit_add_regulators(void) { platform_device_register(&apx4devkit_voltage_regulator); } #else static void __init apx4devkit_add_regulators(void) {} #endif static const struct mxs_saif_platform_data apx4devkit_mxs_saif_pdata[] __initconst = { /* working on EXTMSTR0 mode (saif0 master, saif1 slave) */ { .master_mode = 1, .master_id = 0, }, { .master_mode = 0, .master_id = 0, }, }; static int apx4devkit_phy_fixup(struct phy_device *phy) { phy->dev_flags |= MICREL_PHY_50MHZ_CLK; return 0; } static void __init apx4devkit_init(void) { mxs_iomux_setup_multiple_pads(apx4devkit_pads, ARRAY_SIZE(apx4devkit_pads)); mx28_add_duart(); mx28_add_auart0(); mx28_add_auart1(); mx28_add_auart2(); mx28_add_auart3(); /* * Register fixup for the Micrel KS8031 PHY clock * (shares same ID with KS8051) */ phy_register_fixup_for_uid(PHY_ID_KS8051, MICREL_PHY_ID_MASK, apx4devkit_phy_fixup); mx28_add_fec(0, &mx28_fec_pdata); mx28_add_mxs_mmc(0, &apx4devkit_mmc_pdata); gpio_led_register_device(0, &apx4devkit_led_data); mxs_saif_clkmux_select(MXS_DIGCTL_SAIF_CLKMUX_EXTMSTR0); mx28_add_saif(0, &apx4devkit_mxs_saif_pdata[0]); mx28_add_saif(1, &apx4devkit_mxs_saif_pdata[1]); apx4devkit_add_regulators(); mx28_add_mxs_i2c(0); i2c_register_board_info(0, apx4devkit_i2c_boardinfo, ARRAY_SIZE(apx4devkit_i2c_boardinfo)); mxs_add_platform_device("mxs-sgtl5000", 0, NULL, 0, NULL, 0); } static void __init apx4devkit_timer_init(void) { mx28_clocks_init(); } static struct sys_timer apx4devkit_timer = { .init = apx4devkit_timer_init, }; MACHINE_START(APX4DEVKIT, "Bluegiga APX4 Development Kit") .map_io = mx28_map_io, .init_irq = mx28_init_irq, .timer = &apx4devkit_timer, .init_machine = apx4devkit_init, .restart = mxs_restart, MACHINE_END
gpl-2.0
kecinzer/kernel_opo_kecinzer
arch/arm/mach-at91/board-yl-9200.c
4730
19175
/* * linux/arch/arm/mach-at91/board-yl-9200.c * * Adapted from various board files in arch/arm/mach-at91 * * Modifications for YL-9200 platform: * Copyright (C) 2007 S. Birtles * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/types.h> #include <linux/gpio.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/dma-mapping.h> #include <linux/platform_device.h> #include <linux/spi/spi.h> #include <linux/spi/ads7846.h> #include <linux/mtd/physmap.h> #include <linux/gpio_keys.h> #include <linux/input.h> #include <asm/setup.h> #include <asm/mach-types.h> #include <asm/irq.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include <mach/hardware.h> #include <mach/board.h> #include <mach/at91rm9200_mc.h> #include <mach/at91_ramc.h> #include <mach/cpu.h> #include "generic.h" static void __init yl9200_init_early(void) { /* Set cpu type: PQFP */ at91rm9200_set_type(ARCH_REVISON_9200_PQFP); /* Initialize processor: 18.432 MHz crystal */ at91_initialize(18432000); /* Setup the LEDs D2=PB17 (timer), D3=PB16 (cpu) */ at91_init_leds(AT91_PIN_PB16, AT91_PIN_PB17); /* DBGU on ttyS0. (Rx & Tx only) */ at91_register_uart(0, 0, 0); /* USART1 on ttyS1. (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */ at91_register_uart(AT91RM9200_ID_US1, 1, ATMEL_UART_CTS | ATMEL_UART_RTS | ATMEL_UART_DTR | ATMEL_UART_DSR | ATMEL_UART_DCD | ATMEL_UART_RI); /* USART0 on ttyS2. (Rx & Tx only to JP3) */ at91_register_uart(AT91RM9200_ID_US0, 2, 0); /* USART3 on ttyS3. (Rx, Tx, RTS - RS485 interface) */ at91_register_uart(AT91RM9200_ID_US3, 3, ATMEL_UART_RTS); /* set serial console to ttyS0 (ie, DBGU) */ at91_set_serial_console(0); } /* * LEDs */ static struct gpio_led yl9200_leds[] = { { /* D2 */ .name = "led2", .gpio = AT91_PIN_PB17, .active_low = 1, .default_trigger = "timer", }, { /* D3 */ .name = "led3", .gpio = AT91_PIN_PB16, .active_low = 1, .default_trigger = "heartbeat", }, { /* D4 */ .name = "led4", .gpio = AT91_PIN_PB15, .active_low = 1, }, { /* D5 */ .name = "led5", .gpio = AT91_PIN_PB8, .active_low = 1, } }; /* * Ethernet */ static struct macb_platform_data __initdata yl9200_eth_data = { .phy_irq_pin = AT91_PIN_PB28, .is_rmii = 1, }; /* * USB Host */ static struct at91_usbh_data __initdata yl9200_usbh_data = { .ports = 1, /* PQFP version of AT91RM9200 */ .vbus_pin = {-EINVAL, -EINVAL}, .overcurrent_pin= {-EINVAL, -EINVAL}, }; /* * USB Device */ static struct at91_udc_data __initdata yl9200_udc_data = { .pullup_pin = AT91_PIN_PC4, .vbus_pin = AT91_PIN_PC5, .pullup_active_low = 1, /* Active Low due to PNP transistor (pg 7) */ }; /* * MMC */ static struct at91_mmc_data __initdata yl9200_mmc_data = { .det_pin = AT91_PIN_PB9, .wire4 = 1, .wp_pin = -EINVAL, .vcc_pin = -EINVAL, }; /* * NAND Flash */ static struct mtd_partition __initdata yl9200_nand_partition[] = { { .name = "AT91 NAND partition 1, boot", .offset = 0, .size = SZ_256K }, { .name = "AT91 NAND partition 2, kernel", .offset = MTDPART_OFS_NXTBLK, .size = (2 * SZ_1M) - SZ_256K }, { .name = "AT91 NAND partition 3, filesystem", .offset = MTDPART_OFS_NXTBLK, .size = 14 * SZ_1M }, { .name = "AT91 NAND partition 4, storage", .offset = MTDPART_OFS_NXTBLK, .size = SZ_16M }, { .name = "AT91 NAND partition 5, ext-fs", .offset = MTDPART_OFS_NXTBLK, .size = SZ_32M } }; static struct atmel_nand_data __initdata yl9200_nand_data = { .ale = 6, .cle = 7, .det_pin = -EINVAL, .rdy_pin = AT91_PIN_PC14, /* R/!B (Sheet10) */ .enable_pin = AT91_PIN_PC15, /* !CE (Sheet10) */ .ecc_mode = NAND_ECC_SOFT, .parts = yl9200_nand_partition, .num_parts = ARRAY_SIZE(yl9200_nand_partition), }; /* * NOR Flash */ #define YL9200_FLASH_BASE AT91_CHIPSELECT_0 #define YL9200_FLASH_SIZE SZ_16M static struct mtd_partition yl9200_flash_partitions[] = { { .name = "Bootloader", .offset = 0, .size = SZ_256K, .mask_flags = MTD_WRITEABLE, /* force read-only */ }, { .name = "Kernel", .offset = MTDPART_OFS_NXTBLK, .size = (2 * SZ_1M) - SZ_256K }, { .name = "Filesystem", .offset = MTDPART_OFS_NXTBLK, .size = MTDPART_SIZ_FULL } }; static struct physmap_flash_data yl9200_flash_data = { .width = 2, .parts = yl9200_flash_partitions, .nr_parts = ARRAY_SIZE(yl9200_flash_partitions), }; static struct resource yl9200_flash_resources[] = { { .start = YL9200_FLASH_BASE, .end = YL9200_FLASH_BASE + YL9200_FLASH_SIZE - 1, .flags = IORESOURCE_MEM, } }; static struct platform_device yl9200_flash = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &yl9200_flash_data, }, .resource = yl9200_flash_resources, .num_resources = ARRAY_SIZE(yl9200_flash_resources), }; /* * I2C (TWI) */ static struct i2c_board_info __initdata yl9200_i2c_devices[] = { { /* EEPROM */ I2C_BOARD_INFO("24c128", 0x50), } }; /* * GPIO Buttons */ #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) static struct gpio_keys_button yl9200_buttons[] = { { .gpio = AT91_PIN_PA24, .code = BTN_2, .desc = "SW2", .active_low = 1, .wakeup = 1, }, { .gpio = AT91_PIN_PB1, .code = BTN_3, .desc = "SW3", .active_low = 1, .wakeup = 1, }, { .gpio = AT91_PIN_PB2, .code = BTN_4, .desc = "SW4", .active_low = 1, .wakeup = 1, }, { .gpio = AT91_PIN_PB6, .code = BTN_5, .desc = "SW5", .active_low = 1, .wakeup = 1, } }; static struct gpio_keys_platform_data yl9200_button_data = { .buttons = yl9200_buttons, .nbuttons = ARRAY_SIZE(yl9200_buttons), }; static struct platform_device yl9200_button_device = { .name = "gpio-keys", .id = -1, .num_resources = 0, .dev = { .platform_data = &yl9200_button_data, } }; static void __init yl9200_add_device_buttons(void) { at91_set_gpio_input(AT91_PIN_PA24, 1); /* SW2 */ at91_set_deglitch(AT91_PIN_PA24, 1); at91_set_gpio_input(AT91_PIN_PB1, 1); /* SW3 */ at91_set_deglitch(AT91_PIN_PB1, 1); at91_set_gpio_input(AT91_PIN_PB2, 1); /* SW4 */ at91_set_deglitch(AT91_PIN_PB2, 1); at91_set_gpio_input(AT91_PIN_PB6, 1); /* SW5 */ at91_set_deglitch(AT91_PIN_PB6, 1); /* Enable buttons (Sheet 5) */ at91_set_gpio_output(AT91_PIN_PB7, 1); platform_device_register(&yl9200_button_device); } #else static void __init yl9200_add_device_buttons(void) {} #endif /* * Touchscreen */ #if defined(CONFIG_TOUCHSCREEN_ADS7846) || defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE) static int ads7843_pendown_state(void) { return !at91_get_gpio_value(AT91_PIN_PB11); /* Touchscreen PENIRQ */ } static struct ads7846_platform_data ads_info = { .model = 7843, .x_min = 150, .x_max = 3830, .y_min = 190, .y_max = 3830, .vref_delay_usecs = 100, /* For a 8" touch-screen */ // .x_plate_ohms = 603, // .y_plate_ohms = 332, /* For a 10.4" touch-screen */ // .x_plate_ohms = 611, // .y_plate_ohms = 325, .x_plate_ohms = 576, .y_plate_ohms = 366, .pressure_max = 15000, /* generally nonsense on the 7843 */ .debounce_max = 1, .debounce_rep = 0, .debounce_tol = (~0), .get_pendown_state = ads7843_pendown_state, }; static void __init yl9200_add_device_ts(void) { at91_set_gpio_input(AT91_PIN_PB11, 1); /* Touchscreen interrupt pin */ at91_set_gpio_input(AT91_PIN_PB10, 1); /* Touchscreen BUSY signal - not used! */ } #else static void __init yl9200_add_device_ts(void) {} #endif /* * SPI devices */ static struct spi_board_info yl9200_spi_devices[] = { #if defined(CONFIG_TOUCHSCREEN_ADS7846) || defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE) { /* Touchscreen */ .modalias = "ads7846", .chip_select = 0, .max_speed_hz = 5000 * 26, .platform_data = &ads_info, .irq = AT91_PIN_PB11, }, #endif { /* CAN */ .modalias = "mcp2510", .chip_select = 1, .max_speed_hz = 25000 * 26, .irq = AT91_PIN_PC0, } }; /* * LCD / VGA * * EPSON S1D13806 FB (discontinued chip) * EPSON S1D13506 FB */ #if defined(CONFIG_FB_S1D13XXX) || defined(CONFIG_FB_S1D13XXX_MODULE) #include <video/s1d13xxxfb.h> static void yl9200_init_video(void) { /* NWAIT Signal */ at91_set_A_periph(AT91_PIN_PC6, 0); /* Initialization of the Static Memory Controller for Chip Select 2 */ at91_ramc_write(0, AT91_SMC_CSR(2), AT91_SMC_DBW_16 /* 16 bit */ | AT91_SMC_WSEN | AT91_SMC_NWS_(0x4) /* wait states */ | AT91_SMC_TDF_(0x100) /* float time */ ); } static struct s1d13xxxfb_regval yl9200_s1dfb_initregs[] = { {S1DREG_MISC, 0x00}, /* Miscellaneous Register*/ {S1DREG_COM_DISP_MODE, 0x01}, /* Display Mode Register, LCD only*/ {S1DREG_GPIO_CNF0, 0x00}, /* General IO Pins Configuration Register*/ {S1DREG_GPIO_CTL0, 0x00}, /* General IO Pins Control Register*/ {S1DREG_CLK_CNF, 0x11}, /* Memory Clock Configuration Register*/ {S1DREG_LCD_CLK_CNF, 0x10}, /* LCD Pixel Clock Configuration Register*/ {S1DREG_CRT_CLK_CNF, 0x12}, /* CRT/TV Pixel Clock Configuration Register*/ {S1DREG_MPLUG_CLK_CNF, 0x01}, /* MediaPlug Clock Configuration Register*/ {S1DREG_CPU2MEM_WST_SEL, 0x02}, /* CPU To Memory Wait State Select Register*/ {S1DREG_MEM_CNF, 0x00}, /* Memory Configuration Register*/ {S1DREG_SDRAM_REF_RATE, 0x04}, /* DRAM Refresh Rate Register, MCLK source*/ {S1DREG_SDRAM_TC0, 0x12}, /* DRAM Timings Control Register 0*/ {S1DREG_SDRAM_TC1, 0x02}, /* DRAM Timings Control Register 1*/ {S1DREG_PANEL_TYPE, 0x25}, /* Panel Type Register*/ {S1DREG_MOD_RATE, 0x00}, /* MOD Rate Register*/ {S1DREG_LCD_DISP_HWIDTH, 0x4F}, /* LCD Horizontal Display Width Register*/ {S1DREG_LCD_NDISP_HPER, 0x13}, /* LCD Horizontal Non-Display Period Register*/ {S1DREG_TFT_FPLINE_START, 0x01}, /* TFT FPLINE Start Position Register*/ {S1DREG_TFT_FPLINE_PWIDTH, 0x0c}, /* TFT FPLINE Pulse Width Register*/ {S1DREG_LCD_DISP_VHEIGHT0, 0xDF}, /* LCD Vertical Display Height Register 0*/ {S1DREG_LCD_DISP_VHEIGHT1, 0x01}, /* LCD Vertical Display Height Register 1*/ {S1DREG_LCD_NDISP_VPER, 0x2c}, /* LCD Vertical Non-Display Period Register*/ {S1DREG_TFT_FPFRAME_START, 0x0a}, /* TFT FPFRAME Start Position Register*/ {S1DREG_TFT_FPFRAME_PWIDTH, 0x02}, /* TFT FPFRAME Pulse Width Register*/ {S1DREG_LCD_DISP_MODE, 0x05}, /* LCD Display Mode Register*/ {S1DREG_LCD_MISC, 0x01}, /* LCD Miscellaneous Register*/ {S1DREG_LCD_DISP_START0, 0x00}, /* LCD Display Start Address Register 0*/ {S1DREG_LCD_DISP_START1, 0x00}, /* LCD Display Start Address Register 1*/ {S1DREG_LCD_DISP_START2, 0x00}, /* LCD Display Start Address Register 2*/ {S1DREG_LCD_MEM_OFF0, 0x80}, /* LCD Memory Address Offset Register 0*/ {S1DREG_LCD_MEM_OFF1, 0x02}, /* LCD Memory Address Offset Register 1*/ {S1DREG_LCD_PIX_PAN, 0x03}, /* LCD Pixel Panning Register*/ {S1DREG_LCD_DISP_FIFO_HTC, 0x00}, /* LCD Display FIFO High Threshold Control Register*/ {S1DREG_LCD_DISP_FIFO_LTC, 0x00}, /* LCD Display FIFO Low Threshold Control Register*/ {S1DREG_CRT_DISP_HWIDTH, 0x4F}, /* CRT/TV Horizontal Display Width Register*/ {S1DREG_CRT_NDISP_HPER, 0x13}, /* CRT/TV Horizontal Non-Display Period Register*/ {S1DREG_CRT_HRTC_START, 0x01}, /* CRT/TV HRTC Start Position Register*/ {S1DREG_CRT_HRTC_PWIDTH, 0x0B}, /* CRT/TV HRTC Pulse Width Register*/ {S1DREG_CRT_DISP_VHEIGHT0, 0xDF}, /* CRT/TV Vertical Display Height Register 0*/ {S1DREG_CRT_DISP_VHEIGHT1, 0x01}, /* CRT/TV Vertical Display Height Register 1*/ {S1DREG_CRT_NDISP_VPER, 0x2B}, /* CRT/TV Vertical Non-Display Period Register*/ {S1DREG_CRT_VRTC_START, 0x09}, /* CRT/TV VRTC Start Position Register*/ {S1DREG_CRT_VRTC_PWIDTH, 0x01}, /* CRT/TV VRTC Pulse Width Register*/ {S1DREG_TV_OUT_CTL, 0x18}, /* TV Output Control Register */ {S1DREG_CRT_DISP_MODE, 0x05}, /* CRT/TV Display Mode Register, 16BPP*/ {S1DREG_CRT_DISP_START0, 0x00}, /* CRT/TV Display Start Address Register 0*/ {S1DREG_CRT_DISP_START1, 0x00}, /* CRT/TV Display Start Address Register 1*/ {S1DREG_CRT_DISP_START2, 0x00}, /* CRT/TV Display Start Address Register 2*/ {S1DREG_CRT_MEM_OFF0, 0x80}, /* CRT/TV Memory Address Offset Register 0*/ {S1DREG_CRT_MEM_OFF1, 0x02}, /* CRT/TV Memory Address Offset Register 1*/ {S1DREG_CRT_PIX_PAN, 0x00}, /* CRT/TV Pixel Panning Register*/ {S1DREG_CRT_DISP_FIFO_HTC, 0x00}, /* CRT/TV Display FIFO High Threshold Control Register*/ {S1DREG_CRT_DISP_FIFO_LTC, 0x00}, /* CRT/TV Display FIFO Low Threshold Control Register*/ {S1DREG_LCD_CUR_CTL, 0x00}, /* LCD Ink/Cursor Control Register*/ {S1DREG_LCD_CUR_START, 0x01}, /* LCD Ink/Cursor Start Address Register*/ {S1DREG_LCD_CUR_XPOS0, 0x00}, /* LCD Cursor X Position Register 0*/ {S1DREG_LCD_CUR_XPOS1, 0x00}, /* LCD Cursor X Position Register 1*/ {S1DREG_LCD_CUR_YPOS0, 0x00}, /* LCD Cursor Y Position Register 0*/ {S1DREG_LCD_CUR_YPOS1, 0x00}, /* LCD Cursor Y Position Register 1*/ {S1DREG_LCD_CUR_BCTL0, 0x00}, /* LCD Ink/Cursor Blue Color 0 Register*/ {S1DREG_LCD_CUR_GCTL0, 0x00}, /* LCD Ink/Cursor Green Color 0 Register*/ {S1DREG_LCD_CUR_RCTL0, 0x00}, /* LCD Ink/Cursor Red Color 0 Register*/ {S1DREG_LCD_CUR_BCTL1, 0x1F}, /* LCD Ink/Cursor Blue Color 1 Register*/ {S1DREG_LCD_CUR_GCTL1, 0x3F}, /* LCD Ink/Cursor Green Color 1 Register*/ {S1DREG_LCD_CUR_RCTL1, 0x1F}, /* LCD Ink/Cursor Red Color 1 Register*/ {S1DREG_LCD_CUR_FIFO_HTC, 0x00}, /* LCD Ink/Cursor FIFO Threshold Register*/ {S1DREG_CRT_CUR_CTL, 0x00}, /* CRT/TV Ink/Cursor Control Register*/ {S1DREG_CRT_CUR_START, 0x01}, /* CRT/TV Ink/Cursor Start Address Register*/ {S1DREG_CRT_CUR_XPOS0, 0x00}, /* CRT/TV Cursor X Position Register 0*/ {S1DREG_CRT_CUR_XPOS1, 0x00}, /* CRT/TV Cursor X Position Register 1*/ {S1DREG_CRT_CUR_YPOS0, 0x00}, /* CRT/TV Cursor Y Position Register 0*/ {S1DREG_CRT_CUR_YPOS1, 0x00}, /* CRT/TV Cursor Y Position Register 1*/ {S1DREG_CRT_CUR_BCTL0, 0x00}, /* CRT/TV Ink/Cursor Blue Color 0 Register*/ {S1DREG_CRT_CUR_GCTL0, 0x00}, /* CRT/TV Ink/Cursor Green Color 0 Register*/ {S1DREG_CRT_CUR_RCTL0, 0x00}, /* CRT/TV Ink/Cursor Red Color 0 Register*/ {S1DREG_CRT_CUR_BCTL1, 0x1F}, /* CRT/TV Ink/Cursor Blue Color 1 Register*/ {S1DREG_CRT_CUR_GCTL1, 0x3F}, /* CRT/TV Ink/Cursor Green Color 1 Register*/ {S1DREG_CRT_CUR_RCTL1, 0x1F}, /* CRT/TV Ink/Cursor Red Color 1 Register*/ {S1DREG_CRT_CUR_FIFO_HTC, 0x00}, /* CRT/TV Ink/Cursor FIFO Threshold Register*/ {S1DREG_BBLT_CTL0, 0x00}, /* BitBlt Control Register 0*/ {S1DREG_BBLT_CTL1, 0x01}, /* BitBlt Control Register 1*/ {S1DREG_BBLT_CC_EXP, 0x00}, /* BitBlt ROP Code/Color Expansion Register*/ {S1DREG_BBLT_OP, 0x00}, /* BitBlt Operation Register*/ {S1DREG_BBLT_SRC_START0, 0x00}, /* BitBlt Source Start Address Register 0*/ {S1DREG_BBLT_SRC_START1, 0x00}, /* BitBlt Source Start Address Register 1*/ {S1DREG_BBLT_SRC_START2, 0x00}, /* BitBlt Source Start Address Register 2*/ {S1DREG_BBLT_DST_START0, 0x00}, /* BitBlt Destination Start Address Register 0*/ {S1DREG_BBLT_DST_START1, 0x00}, /* BitBlt Destination Start Address Register 1*/ {S1DREG_BBLT_DST_START2, 0x00}, /* BitBlt Destination Start Address Register 2*/ {S1DREG_BBLT_MEM_OFF0, 0x00}, /* BitBlt Memory Address Offset Register 0*/ {S1DREG_BBLT_MEM_OFF1, 0x00}, /* BitBlt Memory Address Offset Register 1*/ {S1DREG_BBLT_WIDTH0, 0x00}, /* BitBlt Width Register 0*/ {S1DREG_BBLT_WIDTH1, 0x00}, /* BitBlt Width Register 1*/ {S1DREG_BBLT_HEIGHT0, 0x00}, /* BitBlt Height Register 0*/ {S1DREG_BBLT_HEIGHT1, 0x00}, /* BitBlt Height Register 1*/ {S1DREG_BBLT_BGC0, 0x00}, /* BitBlt Background Color Register 0*/ {S1DREG_BBLT_BGC1, 0x00}, /* BitBlt Background Color Register 1*/ {S1DREG_BBLT_FGC0, 0x00}, /* BitBlt Foreground Color Register 0*/ {S1DREG_BBLT_FGC1, 0x00}, /* BitBlt Foreground Color Register 1*/ {S1DREG_LKUP_MODE, 0x00}, /* Look-Up Table Mode Register*/ {S1DREG_LKUP_ADDR, 0x00}, /* Look-Up Table Address Register*/ {S1DREG_PS_CNF, 0x00}, /* Power Save Configuration Register*/ {S1DREG_PS_STATUS, 0x00}, /* Power Save Status Register*/ {S1DREG_CPU2MEM_WDOGT, 0x00}, /* CPU-to-Memory Access Watchdog Timer Register*/ {S1DREG_COM_DISP_MODE, 0x01}, /* Display Mode Register, LCD only*/ }; static struct s1d13xxxfb_pdata yl9200_s1dfb_pdata = { .initregs = yl9200_s1dfb_initregs, .initregssize = ARRAY_SIZE(yl9200_s1dfb_initregs), .platform_init_video = yl9200_init_video, }; #define YL9200_FB_REG_BASE AT91_CHIPSELECT_7 #define YL9200_FB_VMEM_BASE YL9200_FB_REG_BASE + SZ_2M #define YL9200_FB_VMEM_SIZE SZ_2M static struct resource yl9200_s1dfb_resource[] = { [0] = { /* video mem */ .name = "s1d13xxxfb memory", .start = YL9200_FB_VMEM_BASE, .end = YL9200_FB_VMEM_BASE + YL9200_FB_VMEM_SIZE -1, .flags = IORESOURCE_MEM, }, [1] = { /* video registers */ .name = "s1d13xxxfb registers", .start = YL9200_FB_REG_BASE, .end = YL9200_FB_REG_BASE + SZ_512 -1, .flags = IORESOURCE_MEM, }, }; static u64 s1dfb_dmamask = DMA_BIT_MASK(32); static struct platform_device yl9200_s1dfb_device = { .name = "s1d13806fb", .id = -1, .dev = { .dma_mask = &s1dfb_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &yl9200_s1dfb_pdata, }, .resource = yl9200_s1dfb_resource, .num_resources = ARRAY_SIZE(yl9200_s1dfb_resource), }; void __init yl9200_add_device_video(void) { platform_device_register(&yl9200_s1dfb_device); } #else void __init yl9200_add_device_video(void) {} #endif static void __init yl9200_board_init(void) { /* Serial */ at91_add_device_serial(); /* Ethernet */ at91_add_device_eth(&yl9200_eth_data); /* USB Host */ at91_add_device_usbh(&yl9200_usbh_data); /* USB Device */ at91_add_device_udc(&yl9200_udc_data); /* I2C */ at91_add_device_i2c(yl9200_i2c_devices, ARRAY_SIZE(yl9200_i2c_devices)); /* MMC */ at91_add_device_mmc(0, &yl9200_mmc_data); /* NAND */ at91_add_device_nand(&yl9200_nand_data); /* NOR Flash */ platform_device_register(&yl9200_flash); #if defined(CONFIG_SPI_ATMEL) || defined(CONFIG_SPI_ATMEL_MODULE) /* SPI */ at91_add_device_spi(yl9200_spi_devices, ARRAY_SIZE(yl9200_spi_devices)); /* Touchscreen */ yl9200_add_device_ts(); #endif /* LEDs. */ at91_gpio_leds(yl9200_leds, ARRAY_SIZE(yl9200_leds)); /* Push Buttons */ yl9200_add_device_buttons(); /* VGA */ yl9200_add_device_video(); } MACHINE_START(YL9200, "uCdragon YL-9200") /* Maintainer: S.Birtles */ .timer = &at91rm9200_timer, .map_io = at91_map_io, .init_early = yl9200_init_early, .init_irq = at91_init_irq_default, .init_machine = yl9200_board_init, MACHINE_END
gpl-2.0
vanyasvl/android_kernel_samsung_picassoeur
arch/arm/mach-at91/board-cpu9krea.c
4730
8792
/* * linux/arch/arm/mach-at91/board-cpu9krea.c * * Copyright (C) 2005 SAN People * Copyright (C) 2006 Atmel * Copyright (C) 2009 Eric Benard - eric@eukrea.com * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/types.h> #include <linux/gpio.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/gpio_keys.h> #include <linux/input.h> #include <linux/mtd/physmap.h> #include <asm/setup.h> #include <asm/mach-types.h> #include <asm/irq.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include <mach/hardware.h> #include <mach/board.h> #include <mach/at91sam9_smc.h> #include <mach/at91sam9260_matrix.h> #include <mach/at91_matrix.h> #include "sam9_smc.h" #include "generic.h" static void __init cpu9krea_init_early(void) { /* Initialize processor: 18.432 MHz crystal */ at91_initialize(18432000); /* DGBU on ttyS0. (Rx & Tx only) */ at91_register_uart(0, 0, 0); /* USART0 on ttyS1. (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */ at91_register_uart(AT91SAM9260_ID_US0, 1, ATMEL_UART_CTS | ATMEL_UART_RTS | ATMEL_UART_DTR | ATMEL_UART_DSR | ATMEL_UART_DCD | ATMEL_UART_RI); /* USART1 on ttyS2. (Rx, Tx, RTS, CTS) */ at91_register_uart(AT91SAM9260_ID_US1, 2, ATMEL_UART_CTS | ATMEL_UART_RTS); /* USART2 on ttyS3. (Rx, Tx, RTS, CTS) */ at91_register_uart(AT91SAM9260_ID_US2, 3, ATMEL_UART_CTS | ATMEL_UART_RTS); /* USART3 on ttyS4. (Rx, Tx) */ at91_register_uart(AT91SAM9260_ID_US3, 4, 0); /* USART4 on ttyS5. (Rx, Tx) */ at91_register_uart(AT91SAM9260_ID_US4, 5, 0); /* USART5 on ttyS6. (Rx, Tx) */ at91_register_uart(AT91SAM9260_ID_US5, 6, 0); /* set serial console to ttyS0 (ie, DBGU) */ at91_set_serial_console(0); } /* * USB Host port */ static struct at91_usbh_data __initdata cpu9krea_usbh_data = { .ports = 2, .vbus_pin = {-EINVAL, -EINVAL}, .overcurrent_pin= {-EINVAL, -EINVAL}, }; /* * USB Device port */ static struct at91_udc_data __initdata cpu9krea_udc_data = { .vbus_pin = AT91_PIN_PC8, .pullup_pin = -EINVAL, /* pull-up driven by UDC */ }; /* * MACB Ethernet device */ static struct macb_platform_data __initdata cpu9krea_macb_data = { .phy_irq_pin = -EINVAL, .is_rmii = 1, }; /* * NAND flash */ static struct atmel_nand_data __initdata cpu9krea_nand_data = { .ale = 21, .cle = 22, .rdy_pin = AT91_PIN_PC13, .enable_pin = AT91_PIN_PC14, .bus_width_16 = 0, .det_pin = -EINVAL, .ecc_mode = NAND_ECC_SOFT, }; #ifdef CONFIG_MACH_CPU9260 static struct sam9_smc_config __initdata cpu9krea_nand_smc_config = { .ncs_read_setup = 0, .nrd_setup = 1, .ncs_write_setup = 0, .nwe_setup = 1, .ncs_read_pulse = 3, .nrd_pulse = 3, .ncs_write_pulse = 3, .nwe_pulse = 3, .read_cycle = 5, .write_cycle = 5, .mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE | AT91_SMC_EXNWMODE_DISABLE | AT91_SMC_DBW_8, .tdf_cycles = 2, }; #else static struct sam9_smc_config __initdata cpu9krea_nand_smc_config = { .ncs_read_setup = 0, .nrd_setup = 2, .ncs_write_setup = 0, .nwe_setup = 2, .ncs_read_pulse = 4, .nrd_pulse = 4, .ncs_write_pulse = 4, .nwe_pulse = 4, .read_cycle = 7, .write_cycle = 7, .mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE | AT91_SMC_EXNWMODE_DISABLE | AT91_SMC_DBW_8, .tdf_cycles = 3, }; #endif static void __init cpu9krea_add_device_nand(void) { sam9_smc_configure(0, 3, &cpu9krea_nand_smc_config); at91_add_device_nand(&cpu9krea_nand_data); } /* * NOR flash */ static struct physmap_flash_data cpuat9260_nor_data = { .width = 2, }; #define NOR_BASE AT91_CHIPSELECT_0 #define NOR_SIZE SZ_64M static struct resource nor_flash_resources[] = { { .start = NOR_BASE, .end = NOR_BASE + NOR_SIZE - 1, .flags = IORESOURCE_MEM, } }; static struct platform_device cpu9krea_nor_flash = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &cpuat9260_nor_data, }, .resource = nor_flash_resources, .num_resources = ARRAY_SIZE(nor_flash_resources), }; #ifdef CONFIG_MACH_CPU9260 static struct sam9_smc_config __initdata cpu9krea_nor_smc_config = { .ncs_read_setup = 0, .nrd_setup = 1, .ncs_write_setup = 0, .nwe_setup = 1, .ncs_read_pulse = 10, .nrd_pulse = 10, .ncs_write_pulse = 6, .nwe_pulse = 6, .read_cycle = 12, .write_cycle = 8, .mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE | AT91_SMC_EXNWMODE_DISABLE | AT91_SMC_BAT_WRITE | AT91_SMC_DBW_16, .tdf_cycles = 2, }; #else static struct sam9_smc_config __initdata cpu9krea_nor_smc_config = { .ncs_read_setup = 0, .nrd_setup = 1, .ncs_write_setup = 0, .nwe_setup = 1, .ncs_read_pulse = 13, .nrd_pulse = 13, .ncs_write_pulse = 8, .nwe_pulse = 8, .read_cycle = 15, .write_cycle = 10, .mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE | AT91_SMC_EXNWMODE_DISABLE | AT91_SMC_BAT_WRITE | AT91_SMC_DBW_16, .tdf_cycles = 2, }; #endif static __init void cpu9krea_add_device_nor(void) { unsigned long csa; csa = at91_matrix_read(AT91_MATRIX_EBICSA); at91_matrix_write(AT91_MATRIX_EBICSA, csa | AT91_MATRIX_VDDIOMSEL_3_3V); /* configure chip-select 0 (NOR) */ sam9_smc_configure(0, 0, &cpu9krea_nor_smc_config); platform_device_register(&cpu9krea_nor_flash); } /* * LEDs */ static struct gpio_led cpu9krea_leds[] = { { /* LED1 */ .name = "LED1", .gpio = AT91_PIN_PC11, .active_low = 1, .default_trigger = "timer", }, { /* LED2 */ .name = "LED2", .gpio = AT91_PIN_PC12, .active_low = 1, .default_trigger = "heartbeat", }, { /* LED3 */ .name = "LED3", .gpio = AT91_PIN_PC7, .active_low = 1, .default_trigger = "none", }, { /* LED4 */ .name = "LED4", .gpio = AT91_PIN_PC9, .active_low = 1, .default_trigger = "none", } }; static struct i2c_board_info __initdata cpu9krea_i2c_devices[] = { { I2C_BOARD_INFO("rtc-ds1307", 0x68), .type = "ds1339", }, }; /* * GPIO Buttons */ #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) static struct gpio_keys_button cpu9krea_buttons[] = { { .gpio = AT91_PIN_PC3, .code = BTN_0, .desc = "BP1", .active_low = 1, .wakeup = 1, }, { .gpio = AT91_PIN_PB20, .code = BTN_1, .desc = "BP2", .active_low = 1, .wakeup = 1, } }; static struct gpio_keys_platform_data cpu9krea_button_data = { .buttons = cpu9krea_buttons, .nbuttons = ARRAY_SIZE(cpu9krea_buttons), }; static struct platform_device cpu9krea_button_device = { .name = "gpio-keys", .id = -1, .num_resources = 0, .dev = { .platform_data = &cpu9krea_button_data, } }; static void __init cpu9krea_add_device_buttons(void) { at91_set_gpio_input(AT91_PIN_PC3, 1); /* BP1 */ at91_set_deglitch(AT91_PIN_PC3, 1); at91_set_gpio_input(AT91_PIN_PB20, 1); /* BP2 */ at91_set_deglitch(AT91_PIN_PB20, 1); platform_device_register(&cpu9krea_button_device); } #else static void __init cpu9krea_add_device_buttons(void) { } #endif /* * MCI (SD/MMC) */ static struct at91_mmc_data __initdata cpu9krea_mmc_data = { .slot_b = 0, .wire4 = 1, .det_pin = AT91_PIN_PA29, .wp_pin = -EINVAL, .vcc_pin = -EINVAL, }; static void __init cpu9krea_board_init(void) { /* NOR */ cpu9krea_add_device_nor(); /* Serial */ at91_add_device_serial(); /* USB Host */ at91_add_device_usbh(&cpu9krea_usbh_data); /* USB Device */ at91_add_device_udc(&cpu9krea_udc_data); /* NAND */ cpu9krea_add_device_nand(); /* Ethernet */ at91_add_device_eth(&cpu9krea_macb_data); /* MMC */ at91_add_device_mmc(0, &cpu9krea_mmc_data); /* I2C */ at91_add_device_i2c(cpu9krea_i2c_devices, ARRAY_SIZE(cpu9krea_i2c_devices)); /* LEDs */ at91_gpio_leds(cpu9krea_leds, ARRAY_SIZE(cpu9krea_leds)); /* Push Buttons */ cpu9krea_add_device_buttons(); } #ifdef CONFIG_MACH_CPU9260 MACHINE_START(CPUAT9260, "Eukrea CPU9260") #else MACHINE_START(CPUAT9G20, "Eukrea CPU9G20") #endif /* Maintainer: Eric Benard - EUKREA Electromatique */ .timer = &at91sam926x_timer, .map_io = at91_map_io, .init_early = cpu9krea_init_early, .init_irq = at91_init_irq_default, .init_machine = cpu9krea_board_init, MACHINE_END
gpl-2.0
intervigilium/android_kernel_samsung_klte
arch/arm/mach-ixp4xx/goramo_mlr.c
4730
12484
/* * Goramo MultiLink router platform code * Copyright (C) 2006-2009 Krzysztof Halasa <khc@pm.waw.pl> */ #include <linux/delay.h> #include <linux/hdlc.h> #include <linux/i2c-gpio.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/serial_8250.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/flash.h> #include <asm/mach/pci.h> #define SLOT_ETHA 0x0B /* IDSEL = AD21 */ #define SLOT_ETHB 0x0C /* IDSEL = AD20 */ #define SLOT_MPCI 0x0D /* IDSEL = AD19 */ #define SLOT_NEC 0x0E /* IDSEL = AD18 */ /* GPIO lines */ #define GPIO_SCL 0 #define GPIO_SDA 1 #define GPIO_STR 2 #define GPIO_IRQ_NEC 3 #define GPIO_IRQ_ETHA 4 #define GPIO_IRQ_ETHB 5 #define GPIO_HSS0_DCD_N 6 #define GPIO_HSS1_DCD_N 7 #define GPIO_UART0_DCD 8 #define GPIO_UART1_DCD 9 #define GPIO_HSS0_CTS_N 10 #define GPIO_HSS1_CTS_N 11 #define GPIO_IRQ_MPCI 12 #define GPIO_HSS1_RTS_N 13 #define GPIO_HSS0_RTS_N 14 /* GPIO15 is not connected */ /* Control outputs from 74HC4094 */ #define CONTROL_HSS0_CLK_INT 0 #define CONTROL_HSS1_CLK_INT 1 #define CONTROL_HSS0_DTR_N 2 #define CONTROL_HSS1_DTR_N 3 #define CONTROL_EXT 4 #define CONTROL_AUTO_RESET 5 #define CONTROL_PCI_RESET_N 6 #define CONTROL_EEPROM_WC_N 7 /* offsets from start of flash ROM = 0x50000000 */ #define CFG_ETH0_ADDRESS 0x40 /* 6 bytes */ #define CFG_ETH1_ADDRESS 0x46 /* 6 bytes */ #define CFG_REV 0x4C /* u32 */ #define CFG_SDRAM_SIZE 0x50 /* u32 */ #define CFG_SDRAM_CONF 0x54 /* u32 */ #define CFG_SDRAM_MODE 0x58 /* u32 */ #define CFG_SDRAM_REFRESH 0x5C /* u32 */ #define CFG_HW_BITS 0x60 /* u32 */ #define CFG_HW_USB_PORTS 0x00000007 /* 0 = no NEC chip, 1-5 = ports # */ #define CFG_HW_HAS_PCI_SLOT 0x00000008 #define CFG_HW_HAS_ETH0 0x00000010 #define CFG_HW_HAS_ETH1 0x00000020 #define CFG_HW_HAS_HSS0 0x00000040 #define CFG_HW_HAS_HSS1 0x00000080 #define CFG_HW_HAS_UART0 0x00000100 #define CFG_HW_HAS_UART1 0x00000200 #define CFG_HW_HAS_EEPROM 0x00000400 #define FLASH_CMD_READ_ARRAY 0xFF #define FLASH_CMD_READ_ID 0x90 #define FLASH_SER_OFF 0x102 /* 0x81 in 16-bit mode */ static u32 hw_bits = 0xFFFFFFFD; /* assume all hardware present */; static u8 control_value; static void set_scl(u8 value) { gpio_line_set(GPIO_SCL, !!value); udelay(3); } static void set_sda(u8 value) { gpio_line_set(GPIO_SDA, !!value); udelay(3); } static void set_str(u8 value) { gpio_line_set(GPIO_STR, !!value); udelay(3); } static inline void set_control(int line, int value) { if (value) control_value |= (1 << line); else control_value &= ~(1 << line); } static void output_control(void) { int i; gpio_line_config(GPIO_SCL, IXP4XX_GPIO_OUT); gpio_line_config(GPIO_SDA, IXP4XX_GPIO_OUT); for (i = 0; i < 8; i++) { set_scl(0); set_sda(control_value & (0x80 >> i)); /* MSB first */ set_scl(1); /* active edge */ } set_str(1); set_str(0); set_scl(0); set_sda(1); /* Be ready for START */ set_scl(1); } static void (*set_carrier_cb_tab[2])(void *pdev, int carrier); static int hss_set_clock(int port, unsigned int clock_type) { int ctrl_int = port ? CONTROL_HSS1_CLK_INT : CONTROL_HSS0_CLK_INT; switch (clock_type) { case CLOCK_DEFAULT: case CLOCK_EXT: set_control(ctrl_int, 0); output_control(); return CLOCK_EXT; case CLOCK_INT: set_control(ctrl_int, 1); output_control(); return CLOCK_INT; default: return -EINVAL; } } static irqreturn_t hss_dcd_irq(int irq, void *pdev) { int i, port = (irq == IXP4XX_GPIO_IRQ(GPIO_HSS1_DCD_N)); gpio_line_get(port ? GPIO_HSS1_DCD_N : GPIO_HSS0_DCD_N, &i); set_carrier_cb_tab[port](pdev, !i); return IRQ_HANDLED; } static int hss_open(int port, void *pdev, void (*set_carrier_cb)(void *pdev, int carrier)) { int i, irq; if (!port) irq = IXP4XX_GPIO_IRQ(GPIO_HSS0_DCD_N); else irq = IXP4XX_GPIO_IRQ(GPIO_HSS1_DCD_N); gpio_line_get(port ? GPIO_HSS1_DCD_N : GPIO_HSS0_DCD_N, &i); set_carrier_cb(pdev, !i); set_carrier_cb_tab[!!port] = set_carrier_cb; if ((i = request_irq(irq, hss_dcd_irq, 0, "IXP4xx HSS", pdev)) != 0) { printk(KERN_ERR "ixp4xx_hss: failed to request IRQ%i (%i)\n", irq, i); return i; } set_control(port ? CONTROL_HSS1_DTR_N : CONTROL_HSS0_DTR_N, 0); output_control(); gpio_line_set(port ? GPIO_HSS1_RTS_N : GPIO_HSS0_RTS_N, 0); return 0; } static void hss_close(int port, void *pdev) { free_irq(port ? IXP4XX_GPIO_IRQ(GPIO_HSS1_DCD_N) : IXP4XX_GPIO_IRQ(GPIO_HSS0_DCD_N), pdev); set_carrier_cb_tab[!!port] = NULL; /* catch bugs */ set_control(port ? CONTROL_HSS1_DTR_N : CONTROL_HSS0_DTR_N, 1); output_control(); gpio_line_set(port ? GPIO_HSS1_RTS_N : GPIO_HSS0_RTS_N, 1); } /* Flash memory */ static struct flash_platform_data flash_data = { .map_name = "cfi_probe", .width = 2, }; static struct resource flash_resource = { .flags = IORESOURCE_MEM, }; static struct platform_device device_flash = { .name = "IXP4XX-Flash", .id = 0, .dev = { .platform_data = &flash_data }, .num_resources = 1, .resource = &flash_resource, }; /* I^2C interface */ static struct i2c_gpio_platform_data i2c_data = { .sda_pin = GPIO_SDA, .scl_pin = GPIO_SCL, }; static struct platform_device device_i2c = { .name = "i2c-gpio", .id = 0, .dev = { .platform_data = &i2c_data }, }; /* IXP425 2 UART ports */ static struct resource uart_resources[] = { { .start = IXP4XX_UART1_BASE_PHYS, .end = IXP4XX_UART1_BASE_PHYS + 0x0fff, .flags = IORESOURCE_MEM, }, { .start = IXP4XX_UART2_BASE_PHYS, .end = IXP4XX_UART2_BASE_PHYS + 0x0fff, .flags = IORESOURCE_MEM, } }; static struct plat_serial8250_port uart_data[] = { { .mapbase = IXP4XX_UART1_BASE_PHYS, .membase = (char __iomem *)IXP4XX_UART1_BASE_VIRT + REG_OFFSET, .irq = IRQ_IXP4XX_UART1, .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, .iotype = UPIO_MEM, .regshift = 2, .uartclk = IXP4XX_UART_XTAL, }, { .mapbase = IXP4XX_UART2_BASE_PHYS, .membase = (char __iomem *)IXP4XX_UART2_BASE_VIRT + REG_OFFSET, .irq = IRQ_IXP4XX_UART2, .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, .iotype = UPIO_MEM, .regshift = 2, .uartclk = IXP4XX_UART_XTAL, }, { }, }; static struct platform_device device_uarts = { .name = "serial8250", .id = PLAT8250_DEV_PLATFORM, .dev.platform_data = uart_data, .num_resources = 2, .resource = uart_resources, }; /* Built-in 10/100 Ethernet MAC interfaces */ static struct eth_plat_info eth_plat[] = { { .phy = 0, .rxq = 3, .txreadyq = 32, }, { .phy = 1, .rxq = 4, .txreadyq = 33, } }; static struct platform_device device_eth_tab[] = { { .name = "ixp4xx_eth", .id = IXP4XX_ETH_NPEB, .dev.platform_data = eth_plat, }, { .name = "ixp4xx_eth", .id = IXP4XX_ETH_NPEC, .dev.platform_data = eth_plat + 1, } }; /* IXP425 2 synchronous serial ports */ static struct hss_plat_info hss_plat[] = { { .set_clock = hss_set_clock, .open = hss_open, .close = hss_close, .txreadyq = 34, }, { .set_clock = hss_set_clock, .open = hss_open, .close = hss_close, .txreadyq = 35, } }; static struct platform_device device_hss_tab[] = { { .name = "ixp4xx_hss", .id = 0, .dev.platform_data = hss_plat, }, { .name = "ixp4xx_hss", .id = 1, .dev.platform_data = hss_plat + 1, } }; static struct platform_device *device_tab[6] __initdata = { &device_flash, /* index 0 */ }; static inline u8 __init flash_readb(u8 __iomem *flash, u32 addr) { #ifdef __ARMEB__ return __raw_readb(flash + addr); #else return __raw_readb(flash + (addr ^ 3)); #endif } static inline u16 __init flash_readw(u8 __iomem *flash, u32 addr) { #ifdef __ARMEB__ return __raw_readw(flash + addr); #else return __raw_readw(flash + (addr ^ 2)); #endif } static void __init gmlr_init(void) { u8 __iomem *flash; int i, devices = 1; /* flash */ ixp4xx_sys_init(); if ((flash = ioremap(IXP4XX_EXP_BUS_BASE_PHYS, 0x80)) == NULL) printk(KERN_ERR "goramo-mlr: unable to access system" " configuration data\n"); else { system_rev = __raw_readl(flash + CFG_REV); hw_bits = __raw_readl(flash + CFG_HW_BITS); for (i = 0; i < ETH_ALEN; i++) { eth_plat[0].hwaddr[i] = flash_readb(flash, CFG_ETH0_ADDRESS + i); eth_plat[1].hwaddr[i] = flash_readb(flash, CFG_ETH1_ADDRESS + i); } __raw_writew(FLASH_CMD_READ_ID, flash); system_serial_high = flash_readw(flash, FLASH_SER_OFF); system_serial_high <<= 16; system_serial_high |= flash_readw(flash, FLASH_SER_OFF + 2); system_serial_low = flash_readw(flash, FLASH_SER_OFF + 4); system_serial_low <<= 16; system_serial_low |= flash_readw(flash, FLASH_SER_OFF + 6); __raw_writew(FLASH_CMD_READ_ARRAY, flash); iounmap(flash); } switch (hw_bits & (CFG_HW_HAS_UART0 | CFG_HW_HAS_UART1)) { case CFG_HW_HAS_UART0: memset(&uart_data[1], 0, sizeof(uart_data[1])); device_uarts.num_resources = 1; break; case CFG_HW_HAS_UART1: device_uarts.dev.platform_data = &uart_data[1]; device_uarts.resource = &uart_resources[1]; device_uarts.num_resources = 1; break; } if (hw_bits & (CFG_HW_HAS_UART0 | CFG_HW_HAS_UART1)) device_tab[devices++] = &device_uarts; /* max index 1 */ if (hw_bits & CFG_HW_HAS_ETH0) device_tab[devices++] = &device_eth_tab[0]; /* max index 2 */ if (hw_bits & CFG_HW_HAS_ETH1) device_tab[devices++] = &device_eth_tab[1]; /* max index 3 */ if (hw_bits & CFG_HW_HAS_HSS0) device_tab[devices++] = &device_hss_tab[0]; /* max index 4 */ if (hw_bits & CFG_HW_HAS_HSS1) device_tab[devices++] = &device_hss_tab[1]; /* max index 5 */ if (hw_bits & CFG_HW_HAS_EEPROM) device_tab[devices++] = &device_i2c; /* max index 6 */ gpio_line_config(GPIO_SCL, IXP4XX_GPIO_OUT); gpio_line_config(GPIO_SDA, IXP4XX_GPIO_OUT); gpio_line_config(GPIO_STR, IXP4XX_GPIO_OUT); gpio_line_config(GPIO_HSS0_RTS_N, IXP4XX_GPIO_OUT); gpio_line_config(GPIO_HSS1_RTS_N, IXP4XX_GPIO_OUT); gpio_line_config(GPIO_HSS0_DCD_N, IXP4XX_GPIO_IN); gpio_line_config(GPIO_HSS1_DCD_N, IXP4XX_GPIO_IN); irq_set_irq_type(IXP4XX_GPIO_IRQ(GPIO_HSS0_DCD_N), IRQ_TYPE_EDGE_BOTH); irq_set_irq_type(IXP4XX_GPIO_IRQ(GPIO_HSS1_DCD_N), IRQ_TYPE_EDGE_BOTH); set_control(CONTROL_HSS0_DTR_N, 1); set_control(CONTROL_HSS1_DTR_N, 1); set_control(CONTROL_EEPROM_WC_N, 1); set_control(CONTROL_PCI_RESET_N, 1); output_control(); msleep(1); /* Wait for PCI devices to initialize */ flash_resource.start = IXP4XX_EXP_BUS_BASE(0); flash_resource.end = IXP4XX_EXP_BUS_BASE(0) + ixp4xx_exp_bus_size - 1; platform_add_devices(device_tab, devices); } #ifdef CONFIG_PCI static void __init gmlr_pci_preinit(void) { irq_set_irq_type(IXP4XX_GPIO_IRQ(GPIO_IRQ_ETHA), IRQ_TYPE_LEVEL_LOW); irq_set_irq_type(IXP4XX_GPIO_IRQ(GPIO_IRQ_ETHB), IRQ_TYPE_LEVEL_LOW); irq_set_irq_type(IXP4XX_GPIO_IRQ(GPIO_IRQ_NEC), IRQ_TYPE_LEVEL_LOW); irq_set_irq_type(IXP4XX_GPIO_IRQ(GPIO_IRQ_MPCI), IRQ_TYPE_LEVEL_LOW); ixp4xx_pci_preinit(); } static void __init gmlr_pci_postinit(void) { if ((hw_bits & CFG_HW_USB_PORTS) >= 2 && (hw_bits & CFG_HW_USB_PORTS) < 5) { /* need to adjust number of USB ports on NEC chip */ u32 value, addr = BIT(32 - SLOT_NEC) | 0xE0; if (!ixp4xx_pci_read(addr, NP_CMD_CONFIGREAD, &value)) { value &= ~7; value |= (hw_bits & CFG_HW_USB_PORTS); ixp4xx_pci_write(addr, NP_CMD_CONFIGWRITE, value); } } } static int __init gmlr_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { switch(slot) { case SLOT_ETHA: return IXP4XX_GPIO_IRQ(GPIO_IRQ_ETHA); case SLOT_ETHB: return IXP4XX_GPIO_IRQ(GPIO_IRQ_ETHB); case SLOT_NEC: return IXP4XX_GPIO_IRQ(GPIO_IRQ_NEC); default: return IXP4XX_GPIO_IRQ(GPIO_IRQ_MPCI); } } static struct hw_pci gmlr_hw_pci __initdata = { .nr_controllers = 1, .preinit = gmlr_pci_preinit, .postinit = gmlr_pci_postinit, .swizzle = pci_std_swizzle, .setup = ixp4xx_setup, .scan = ixp4xx_scan_bus, .map_irq = gmlr_map_irq, }; static int __init gmlr_pci_init(void) { if (machine_is_goramo_mlr() && (hw_bits & (CFG_HW_USB_PORTS | CFG_HW_HAS_PCI_SLOT))) pci_common_init(&gmlr_hw_pci); return 0; } subsys_initcall(gmlr_pci_init); #endif /* CONFIG_PCI */ MACHINE_START(GORAMO_MLR, "MultiLink") /* Maintainer: Krzysztof Halasa */ .map_io = ixp4xx_map_io, .init_early = ixp4xx_init_early, .init_irq = ixp4xx_init_irq, .timer = &ixp4xx_timer, .atag_offset = 0x100, .init_machine = gmlr_init, #if defined(CONFIG_PCI) .dma_zone_size = SZ_64M, #endif .restart = ixp4xx_restart, MACHINE_END
gpl-2.0
ali-filth/android_kernel_samsung_msm8226
arch/arm/mach-at91/board-rm9200dk.c
4730
5799
/* * linux/arch/arm/mach-at91/board-rm9200dk.c * * Copyright (C) 2005 SAN People * * Epson S1D framebuffer glue code is: * Copyright (C) 2005 Thibaut VARENE <varenet@parisc-linux.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/types.h> #include <linux/gpio.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/spi/spi.h> #include <linux/mtd/physmap.h> #include <asm/setup.h> #include <asm/mach-types.h> #include <asm/irq.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include <mach/hardware.h> #include <mach/board.h> #include <mach/at91rm9200_mc.h> #include <mach/at91_ramc.h> #include "generic.h" static void __init dk_init_early(void) { /* Initialize processor: 18.432 MHz crystal */ at91_initialize(18432000); /* Setup the LEDs */ at91_init_leds(AT91_PIN_PB2, AT91_PIN_PB2); /* DBGU on ttyS0. (Rx & Tx only) */ at91_register_uart(0, 0, 0); /* USART1 on ttyS1. (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */ at91_register_uart(AT91RM9200_ID_US1, 1, ATMEL_UART_CTS | ATMEL_UART_RTS | ATMEL_UART_DTR | ATMEL_UART_DSR | ATMEL_UART_DCD | ATMEL_UART_RI); /* set serial console to ttyS0 (ie, DBGU) */ at91_set_serial_console(0); } static struct macb_platform_data __initdata dk_eth_data = { .phy_irq_pin = AT91_PIN_PC4, .is_rmii = 1, }; static struct at91_usbh_data __initdata dk_usbh_data = { .ports = 2, .vbus_pin = {-EINVAL, -EINVAL}, .overcurrent_pin= {-EINVAL, -EINVAL}, }; static struct at91_udc_data __initdata dk_udc_data = { .vbus_pin = AT91_PIN_PD4, .pullup_pin = AT91_PIN_PD5, }; static struct at91_cf_data __initdata dk_cf_data = { .irq_pin = -EINVAL, .det_pin = AT91_PIN_PB0, .vcc_pin = -EINVAL, .rst_pin = AT91_PIN_PC5, }; #ifndef CONFIG_MTD_AT91_DATAFLASH_CARD static struct at91_mmc_data __initdata dk_mmc_data = { .slot_b = 0, .wire4 = 1, .det_pin = -EINVAL, .wp_pin = -EINVAL, .vcc_pin = -EINVAL, }; #endif static struct spi_board_info dk_spi_devices[] = { { /* DataFlash chip */ .modalias = "mtd_dataflash", .chip_select = 0, .max_speed_hz = 15 * 1000 * 1000, }, { /* UR6HCPS2-SP40 PS2-to-SPI adapter */ .modalias = "ur6hcps2", .chip_select = 1, .max_speed_hz = 250 * 1000, }, { /* TLV1504 ADC, 4 channels, 10 bits; one is a temp sensor */ .modalias = "tlv1504", .chip_select = 2, .max_speed_hz = 20 * 1000 * 1000, }, #ifdef CONFIG_MTD_AT91_DATAFLASH_CARD { /* DataFlash card */ .modalias = "mtd_dataflash", .chip_select = 3, .max_speed_hz = 15 * 1000 * 1000, } #endif }; static struct i2c_board_info __initdata dk_i2c_devices[] = { { I2C_BOARD_INFO("ics1523", 0x26), }, { I2C_BOARD_INFO("x9429", 0x28), }, { I2C_BOARD_INFO("24c1024", 0x50), } }; static struct mtd_partition __initdata dk_nand_partition[] = { { .name = "NAND Partition 1", .offset = 0, .size = MTDPART_SIZ_FULL, }, }; static struct atmel_nand_data __initdata dk_nand_data = { .ale = 22, .cle = 21, .det_pin = AT91_PIN_PB1, .rdy_pin = AT91_PIN_PC2, .enable_pin = -EINVAL, .ecc_mode = NAND_ECC_SOFT, .on_flash_bbt = 1, .parts = dk_nand_partition, .num_parts = ARRAY_SIZE(dk_nand_partition), }; #define DK_FLASH_BASE AT91_CHIPSELECT_0 #define DK_FLASH_SIZE SZ_2M static struct physmap_flash_data dk_flash_data = { .width = 2, }; static struct resource dk_flash_resource = { .start = DK_FLASH_BASE, .end = DK_FLASH_BASE + DK_FLASH_SIZE - 1, .flags = IORESOURCE_MEM, }; static struct platform_device dk_flash = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &dk_flash_data, }, .resource = &dk_flash_resource, .num_resources = 1, }; static struct gpio_led dk_leds[] = { { .name = "led0", .gpio = AT91_PIN_PB2, .active_low = 1, .default_trigger = "heartbeat", } }; static void __init dk_board_init(void) { /* Serial */ at91_add_device_serial(); /* Ethernet */ at91_add_device_eth(&dk_eth_data); /* USB Host */ at91_add_device_usbh(&dk_usbh_data); /* USB Device */ at91_add_device_udc(&dk_udc_data); at91_set_multi_drive(dk_udc_data.pullup_pin, 1); /* pullup_pin is connected to reset */ /* Compact Flash */ at91_add_device_cf(&dk_cf_data); /* I2C */ at91_add_device_i2c(dk_i2c_devices, ARRAY_SIZE(dk_i2c_devices)); /* SPI */ at91_add_device_spi(dk_spi_devices, ARRAY_SIZE(dk_spi_devices)); #ifdef CONFIG_MTD_AT91_DATAFLASH_CARD /* DataFlash card */ at91_set_gpio_output(AT91_PIN_PB7, 0); #else /* MMC */ at91_set_gpio_output(AT91_PIN_PB7, 1); /* this MMC card slot can optionally use SPI signaling (CS3). */ at91_add_device_mmc(0, &dk_mmc_data); #endif /* NAND */ at91_add_device_nand(&dk_nand_data); /* NOR Flash */ platform_device_register(&dk_flash); /* LEDs */ at91_gpio_leds(dk_leds, ARRAY_SIZE(dk_leds)); /* VGA */ // dk_add_device_video(); } MACHINE_START(AT91RM9200DK, "Atmel AT91RM9200-DK") /* Maintainer: SAN People/Atmel */ .timer = &at91rm9200_timer, .map_io = at91_map_io, .init_early = dk_init_early, .init_irq = at91_init_irq_default, .init_machine = dk_board_init, MACHINE_END
gpl-2.0
tyler6389/android_kernel_samsung_v700
fs/ext4/hash.c
4986
4448
/* * linux/fs/ext4/hash.c * * Copyright (C) 2002 by Theodore Ts'o * * This file is released under the GPL v2. * * This file may be redistributed under the terms of the GNU Public * License. */ #include <linux/fs.h> #include <linux/jbd2.h> #include <linux/cryptohash.h> #include "ext4.h" #define DELTA 0x9E3779B9 static void TEA_transform(__u32 buf[4], __u32 const in[]) { __u32 sum = 0; __u32 b0 = buf[0], b1 = buf[1]; __u32 a = in[0], b = in[1], c = in[2], d = in[3]; int n = 16; do { sum += DELTA; b0 += ((b1 << 4)+a) ^ (b1+sum) ^ ((b1 >> 5)+b); b1 += ((b0 << 4)+c) ^ (b0+sum) ^ ((b0 >> 5)+d); } while (--n); buf[0] += b0; buf[1] += b1; } /* The old legacy hash */ static __u32 dx_hack_hash_unsigned(const char *name, int len) { __u32 hash, hash0 = 0x12a3fe2d, hash1 = 0x37abe8f9; const unsigned char *ucp = (const unsigned char *) name; while (len--) { hash = hash1 + (hash0 ^ (((int) *ucp++) * 7152373)); if (hash & 0x80000000) hash -= 0x7fffffff; hash1 = hash0; hash0 = hash; } return hash0 << 1; } static __u32 dx_hack_hash_signed(const char *name, int len) { __u32 hash, hash0 = 0x12a3fe2d, hash1 = 0x37abe8f9; const signed char *scp = (const signed char *) name; while (len--) { hash = hash1 + (hash0 ^ (((int) *scp++) * 7152373)); if (hash & 0x80000000) hash -= 0x7fffffff; hash1 = hash0; hash0 = hash; } return hash0 << 1; } static void str2hashbuf_signed(const char *msg, int len, __u32 *buf, int num) { __u32 pad, val; int i; const signed char *scp = (const signed char *) msg; pad = (__u32)len | ((__u32)len << 8); pad |= pad << 16; val = pad; if (len > num*4) len = num * 4; for (i = 0; i < len; i++) { if ((i % 4) == 0) val = pad; val = ((int) scp[i]) + (val << 8); if ((i % 4) == 3) { *buf++ = val; val = pad; num--; } } if (--num >= 0) *buf++ = val; while (--num >= 0) *buf++ = pad; } static void str2hashbuf_unsigned(const char *msg, int len, __u32 *buf, int num) { __u32 pad, val; int i; const unsigned char *ucp = (const unsigned char *) msg; pad = (__u32)len | ((__u32)len << 8); pad |= pad << 16; val = pad; if (len > num*4) len = num * 4; for (i = 0; i < len; i++) { if ((i % 4) == 0) val = pad; val = ((int) ucp[i]) + (val << 8); if ((i % 4) == 3) { *buf++ = val; val = pad; num--; } } if (--num >= 0) *buf++ = val; while (--num >= 0) *buf++ = pad; } /* * Returns the hash of a filename. If len is 0 and name is NULL, then * this function can be used to test whether or not a hash version is * supported. * * The seed is an 4 longword (32 bits) "secret" which can be used to * uniquify a hash. If the seed is all zero's, then some default seed * may be used. * * A particular hash version specifies whether or not the seed is * represented, and whether or not the returned hash is 32 bits or 64 * bits. 32 bit hashes will return 0 for the minor hash. */ int ext4fs_dirhash(const char *name, int len, struct dx_hash_info *hinfo) { __u32 hash; __u32 minor_hash = 0; const char *p; int i; __u32 in[8], buf[4]; void (*str2hashbuf)(const char *, int, __u32 *, int) = str2hashbuf_signed; /* Initialize the default seed for the hash checksum functions */ buf[0] = 0x67452301; buf[1] = 0xefcdab89; buf[2] = 0x98badcfe; buf[3] = 0x10325476; /* Check to see if the seed is all zero's */ if (hinfo->seed) { for (i = 0; i < 4; i++) { if (hinfo->seed[i]) break; } if (i < 4) memcpy(buf, hinfo->seed, sizeof(buf)); } switch (hinfo->hash_version) { case DX_HASH_LEGACY_UNSIGNED: hash = dx_hack_hash_unsigned(name, len); break; case DX_HASH_LEGACY: hash = dx_hack_hash_signed(name, len); break; case DX_HASH_HALF_MD4_UNSIGNED: str2hashbuf = str2hashbuf_unsigned; case DX_HASH_HALF_MD4: p = name; while (len > 0) { (*str2hashbuf)(p, len, in, 8); half_md4_transform(buf, in); len -= 32; p += 32; } minor_hash = buf[2]; hash = buf[1]; break; case DX_HASH_TEA_UNSIGNED: str2hashbuf = str2hashbuf_unsigned; case DX_HASH_TEA: p = name; while (len > 0) { (*str2hashbuf)(p, len, in, 4); TEA_transform(buf, in); len -= 16; p += 16; } hash = buf[0]; minor_hash = buf[1]; break; default: hinfo->hash = 0; return -1; } hash = hash & ~1; if (hash == (EXT4_HTREE_EOF << 1)) hash = (EXT4_HTREE_EOF-1) << 1; hinfo->hash = hash; hinfo->minor_hash = minor_hash; return 0; }
gpl-2.0
Tegra4/android_kernel_hp_maya
sound/soc/kirkwood/kirkwood-t5325.c
4986
3299
/* * kirkwood-t5325.c * * (c) 2010 Arnaud Patard <arnaud.patard@rtp-net.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <sound/soc.h> #include <mach/kirkwood.h> #include <plat/audio.h> #include <asm/mach-types.h> #include "../codecs/alc5623.h" static int t5325_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *codec_dai = rtd->codec_dai; unsigned int freq; freq = params_rate(params) * 256; return snd_soc_dai_set_sysclk(codec_dai, 0, freq, SND_SOC_CLOCK_IN); } static struct snd_soc_ops t5325_ops = { .hw_params = t5325_hw_params, }; static const struct snd_soc_dapm_widget t5325_dapm_widgets[] = { SND_SOC_DAPM_HP("Headphone Jack", NULL), SND_SOC_DAPM_SPK("Speaker", NULL), SND_SOC_DAPM_MIC("Mic Jack", NULL), }; static const struct snd_soc_dapm_route t5325_route[] = { { "Headphone Jack", NULL, "HPL" }, { "Headphone Jack", NULL, "HPR" }, {"Speaker", NULL, "SPKOUT"}, {"Speaker", NULL, "SPKOUTN"}, { "MIC1", NULL, "Mic Jack" }, { "MIC2", NULL, "Mic Jack" }, }; static int t5325_dai_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; struct snd_soc_dapm_context *dapm = &codec->dapm; snd_soc_dapm_enable_pin(dapm, "Mic Jack"); snd_soc_dapm_enable_pin(dapm, "Headphone Jack"); snd_soc_dapm_enable_pin(dapm, "Speaker"); return 0; } static struct snd_soc_dai_link t5325_dai[] = { { .name = "ALC5621", .stream_name = "ALC5621 HiFi", .cpu_dai_name = "kirkwood-i2s", .platform_name = "kirkwood-pcm-audio", .codec_dai_name = "alc5621-hifi", .codec_name = "alc562x-codec.0-001a", .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBS_CFS, .ops = &t5325_ops, .init = t5325_dai_init, }, }; static struct snd_soc_card t5325 = { .name = "t5325", .owner = THIS_MODULE, .dai_link = t5325_dai, .num_links = ARRAY_SIZE(t5325_dai), .dapm_widgets = t5325_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(t5325_dapm_widgets), .dapm_routes = t5325_route, .num_dapm_routes = ARRAY_SIZE(t5325_route), }; static int __devinit t5325_probe(struct platform_device *pdev) { struct snd_soc_card *card = &t5325; int ret; card->dev = &pdev->dev; ret = snd_soc_register_card(card); if (ret) dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n", ret); return ret; } static int __devexit t5325_remove(struct platform_device *pdev) { struct snd_soc_card *card = platform_get_drvdata(pdev); snd_soc_unregister_card(card); return 0; } static struct platform_driver t5325_driver = { .driver = { .name = "t5325-audio", .owner = THIS_MODULE, }, .probe = t5325_probe, .remove = __devexit_p(t5325_remove), }; module_platform_driver(t5325_driver); MODULE_AUTHOR("Arnaud Patard <arnaud.patard@rtp-net.org>"); MODULE_DESCRIPTION("ALSA SoC t5325 audio client"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:t5325-audio");
gpl-2.0
XePeleato/android_ALE-L21_kernel
arch/hexagon/mm/cache.c
5754
2735
/* * Cache management functions for Hexagon * * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. */ #include <linux/mm.h> #include <asm/cacheflush.h> #include <asm/hexagon_vm.h> #define spanlines(start, end) \ (((end - (start & ~(LINESIZE - 1))) >> LINEBITS) + 1) void flush_dcache_range(unsigned long start, unsigned long end) { unsigned long lines = spanlines(start, end-1); unsigned long i, flags; start &= ~(LINESIZE - 1); local_irq_save(flags); for (i = 0; i < lines; i++) { __asm__ __volatile__ ( " dccleaninva(%0); " : : "r" (start) ); start += LINESIZE; } local_irq_restore(flags); } void flush_icache_range(unsigned long start, unsigned long end) { unsigned long lines = spanlines(start, end-1); unsigned long i, flags; start &= ~(LINESIZE - 1); local_irq_save(flags); for (i = 0; i < lines; i++) { __asm__ __volatile__ ( " dccleana(%0); " " icinva(%0); " : : "r" (start) ); start += LINESIZE; } __asm__ __volatile__ ( "isync" ); local_irq_restore(flags); } void hexagon_clean_dcache_range(unsigned long start, unsigned long end) { unsigned long lines = spanlines(start, end-1); unsigned long i, flags; start &= ~(LINESIZE - 1); local_irq_save(flags); for (i = 0; i < lines; i++) { __asm__ __volatile__ ( " dccleana(%0); " : : "r" (start) ); start += LINESIZE; } local_irq_restore(flags); } void hexagon_inv_dcache_range(unsigned long start, unsigned long end) { unsigned long lines = spanlines(start, end-1); unsigned long i, flags; start &= ~(LINESIZE - 1); local_irq_save(flags); for (i = 0; i < lines; i++) { __asm__ __volatile__ ( " dcinva(%0); " : : "r" (start) ); start += LINESIZE; } local_irq_restore(flags); } /* * This is just really brutal and shouldn't be used anyways, * especially on V2. Left here just in case. */ void flush_cache_all_hexagon(void) { unsigned long flags; local_irq_save(flags); __vmcache_ickill(); __vmcache_dckill(); __vmcache_l2kill(); local_irq_restore(flags); mb(); }
gpl-2.0
brymaster5000/m7-GPE-L
drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.c
9850
5416
/* * Generic library functions for the MSF (Media and Switch Fabric) unit * found on the Intel IXP2400 network processor. * * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org> * Dedicated to Marija Kulikova. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of the * License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/init.h> #include <mach/hardware.h> #include <mach/ixp2000-regs.h> #include <asm/delay.h> #include <asm/io.h> #include "ixp2400-msf.h" /* * This is the Intel recommended PLL init procedure as described on * page 340 of the IXP2400/IXP2800 Programmer's Reference Manual. */ static void ixp2400_pll_init(struct ixp2400_msf_parameters *mp) { int rx_dual_clock; int tx_dual_clock; u32 value; /* * If the RX mode is not 1x32, we have to enable both RX PLLs * (#0 and #1.) The same thing for the TX direction. */ rx_dual_clock = !!(mp->rx_mode & IXP2400_RX_MODE_WIDTH_MASK); tx_dual_clock = !!(mp->tx_mode & IXP2400_TX_MODE_WIDTH_MASK); /* * Read initial value. */ value = ixp2000_reg_read(IXP2000_MSF_CLK_CNTRL); /* * Put PLLs in powerdown and bypass mode. */ value |= 0x0000f0f0; ixp2000_reg_write(IXP2000_MSF_CLK_CNTRL, value); /* * Set single or dual clock mode bits. */ value &= ~0x03000000; value |= (rx_dual_clock << 24) | (tx_dual_clock << 25); /* * Set multipliers. */ value &= ~0x00ff0000; value |= mp->rxclk01_multiplier << 16; value |= mp->rxclk23_multiplier << 18; value |= mp->txclk01_multiplier << 20; value |= mp->txclk23_multiplier << 22; /* * And write value. */ ixp2000_reg_write(IXP2000_MSF_CLK_CNTRL, value); /* * Disable PLL bypass mode. */ value &= ~(0x00005000 | rx_dual_clock << 13 | tx_dual_clock << 15); ixp2000_reg_write(IXP2000_MSF_CLK_CNTRL, value); /* * Turn on PLLs. */ value &= ~(0x00000050 | rx_dual_clock << 5 | tx_dual_clock << 7); ixp2000_reg_write(IXP2000_MSF_CLK_CNTRL, value); /* * Wait for PLLs to lock. There are lock status bits, but IXP2400 * erratum #65 says that these lock bits should not be relied upon * as they might not accurately reflect the true state of the PLLs. */ udelay(100); } /* * Needed according to p480 of Programmer's Reference Manual. */ static void ixp2400_msf_free_rbuf_entries(struct ixp2400_msf_parameters *mp) { int size_bits; int i; /* * Work around IXP2400 erratum #69 (silent RBUF-to-DRAM transfer * corruption) in the Intel-recommended way: do not add the RBUF * elements susceptible to corruption to the freelist. */ size_bits = mp->rx_mode & IXP2400_RX_MODE_RBUF_SIZE_MASK; if (size_bits == IXP2400_RX_MODE_RBUF_SIZE_64) { for (i = 1; i < 128; i++) { if (i == 9 || i == 18 || i == 27) continue; ixp2000_reg_write(IXP2000_MSF_RBUF_ELEMENT_DONE, i); } } else if (size_bits == IXP2400_RX_MODE_RBUF_SIZE_128) { for (i = 1; i < 64; i++) { if (i == 4 || i == 9 || i == 13) continue; ixp2000_reg_write(IXP2000_MSF_RBUF_ELEMENT_DONE, i); } } else if (size_bits == IXP2400_RX_MODE_RBUF_SIZE_256) { for (i = 1; i < 32; i++) { if (i == 2 || i == 4 || i == 6) continue; ixp2000_reg_write(IXP2000_MSF_RBUF_ELEMENT_DONE, i); } } } static u32 ixp2400_msf_valid_channels(u32 reg) { u32 channels; channels = 0; switch (reg & IXP2400_RX_MODE_WIDTH_MASK) { case IXP2400_RX_MODE_1x32: channels = 0x1; if (reg & IXP2400_RX_MODE_MPHY && !(reg & IXP2400_RX_MODE_MPHY_32)) channels = 0xf; break; case IXP2400_RX_MODE_2x16: channels = 0x5; break; case IXP2400_RX_MODE_4x8: channels = 0xf; break; case IXP2400_RX_MODE_1x16_2x8: channels = 0xd; break; } return channels; } static void ixp2400_msf_enable_rx(struct ixp2400_msf_parameters *mp) { u32 value; value = ixp2000_reg_read(IXP2000_MSF_RX_CONTROL) & 0x0fffffff; value |= ixp2400_msf_valid_channels(mp->rx_mode) << 28; ixp2000_reg_write(IXP2000_MSF_RX_CONTROL, value); } static void ixp2400_msf_enable_tx(struct ixp2400_msf_parameters *mp) { u32 value; value = ixp2000_reg_read(IXP2000_MSF_TX_CONTROL) & 0x0fffffff; value |= ixp2400_msf_valid_channels(mp->tx_mode) << 28; ixp2000_reg_write(IXP2000_MSF_TX_CONTROL, value); } void ixp2400_msf_init(struct ixp2400_msf_parameters *mp) { u32 value; int i; /* * Init the RX/TX PLLs based on the passed parameter block. */ ixp2400_pll_init(mp); /* * Reset MSF. Bit 7 in IXP_RESET_0 resets the MSF. */ value = ixp2000_reg_read(IXP2000_RESET0); ixp2000_reg_write(IXP2000_RESET0, value | 0x80); ixp2000_reg_write(IXP2000_RESET0, value & ~0x80); /* * Initialise the RX section. */ ixp2000_reg_write(IXP2000_MSF_RX_MPHY_POLL_LIMIT, mp->rx_poll_ports - 1); ixp2000_reg_write(IXP2000_MSF_RX_CONTROL, mp->rx_mode); for (i = 0; i < 4; i++) { ixp2000_reg_write(IXP2000_MSF_RX_UP_CONTROL_0 + i, mp->rx_channel_mode[i]); } ixp2400_msf_free_rbuf_entries(mp); ixp2400_msf_enable_rx(mp); /* * Initialise the TX section. */ ixp2000_reg_write(IXP2000_MSF_TX_MPHY_POLL_LIMIT, mp->tx_poll_ports - 1); ixp2000_reg_write(IXP2000_MSF_TX_CONTROL, mp->tx_mode); for (i = 0; i < 4; i++) { ixp2000_reg_write(IXP2000_MSF_TX_UP_CONTROL_0 + i, mp->tx_channel_mode[i]); } ixp2400_msf_enable_tx(mp); }
gpl-2.0
StarKissed/android_kernel_htc_mecha
kernel/trace/trace_workqueue.c
891
7555
/* * Workqueue statistical tracer. * * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com> * */ #include <trace/events/workqueue.h> #include <linux/list.h> #include <linux/percpu.h> #include <linux/slab.h> #include <linux/kref.h> #include "trace_stat.h" #include "trace.h" /* A cpu workqueue thread */ struct cpu_workqueue_stats { struct list_head list; struct kref kref; int cpu; pid_t pid; /* Can be inserted from interrupt or user context, need to be atomic */ atomic_t inserted; /* * Don't need to be atomic, works are serialized in a single workqueue thread * on a single CPU. */ unsigned int executed; }; /* List of workqueue threads on one cpu */ struct workqueue_global_stats { struct list_head list; spinlock_t lock; }; /* Don't need a global lock because allocated before the workqueues, and * never freed. */ static DEFINE_PER_CPU(struct workqueue_global_stats, all_workqueue_stat); #define workqueue_cpu_stat(cpu) (&per_cpu(all_workqueue_stat, cpu)) static void cpu_workqueue_stat_free(struct kref *kref) { kfree(container_of(kref, struct cpu_workqueue_stats, kref)); } /* Insertion of a work */ static void probe_workqueue_insertion(void *ignore, struct task_struct *wq_thread, struct work_struct *work) { int cpu = cpumask_first(&wq_thread->cpus_allowed); struct cpu_workqueue_stats *node; unsigned long flags; spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) { if (node->pid == wq_thread->pid) { atomic_inc(&node->inserted); goto found; } } pr_debug("trace_workqueue: entry not found\n"); found: spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); } /* Execution of a work */ static void probe_workqueue_execution(void *ignore, struct task_struct *wq_thread, struct work_struct *work) { int cpu = cpumask_first(&wq_thread->cpus_allowed); struct cpu_workqueue_stats *node; unsigned long flags; spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) { if (node->pid == wq_thread->pid) { node->executed++; goto found; } } pr_debug("trace_workqueue: entry not found\n"); found: spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); } /* Creation of a cpu workqueue thread */ static void probe_workqueue_creation(void *ignore, struct task_struct *wq_thread, int cpu) { struct cpu_workqueue_stats *cws; unsigned long flags; WARN_ON(cpu < 0); /* Workqueues are sometimes created in atomic context */ cws = kzalloc(sizeof(struct cpu_workqueue_stats), GFP_ATOMIC); if (!cws) { pr_warning("trace_workqueue: not enough memory\n"); return; } INIT_LIST_HEAD(&cws->list); kref_init(&cws->kref); cws->cpu = cpu; cws->pid = wq_thread->pid; spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); list_add_tail(&cws->list, &workqueue_cpu_stat(cpu)->list); spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); } /* Destruction of a cpu workqueue thread */ static void probe_workqueue_destruction(void *ignore, struct task_struct *wq_thread) { /* Workqueue only execute on one cpu */ int cpu = cpumask_first(&wq_thread->cpus_allowed); struct cpu_workqueue_stats *node, *next; unsigned long flags; spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); list_for_each_entry_safe(node, next, &workqueue_cpu_stat(cpu)->list, list) { if (node->pid == wq_thread->pid) { list_del(&node->list); kref_put(&node->kref, cpu_workqueue_stat_free); goto found; } } pr_debug("trace_workqueue: don't find workqueue to destroy\n"); found: spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); } static struct cpu_workqueue_stats *workqueue_stat_start_cpu(int cpu) { unsigned long flags; struct cpu_workqueue_stats *ret = NULL; spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); if (!list_empty(&workqueue_cpu_stat(cpu)->list)) { ret = list_entry(workqueue_cpu_stat(cpu)->list.next, struct cpu_workqueue_stats, list); kref_get(&ret->kref); } spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); return ret; } static void *workqueue_stat_start(struct tracer_stat *trace) { int cpu; void *ret = NULL; for_each_possible_cpu(cpu) { ret = workqueue_stat_start_cpu(cpu); if (ret) return ret; } return NULL; } static void *workqueue_stat_next(void *prev, int idx) { struct cpu_workqueue_stats *prev_cws = prev; struct cpu_workqueue_stats *ret; int cpu = prev_cws->cpu; unsigned long flags; spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); if (list_is_last(&prev_cws->list, &workqueue_cpu_stat(cpu)->list)) { spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); do { cpu = cpumask_next(cpu, cpu_possible_mask); if (cpu >= nr_cpu_ids) return NULL; } while (!(ret = workqueue_stat_start_cpu(cpu))); return ret; } else { ret = list_entry(prev_cws->list.next, struct cpu_workqueue_stats, list); kref_get(&ret->kref); } spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); return ret; } static int workqueue_stat_show(struct seq_file *s, void *p) { struct cpu_workqueue_stats *cws = p; struct pid *pid; struct task_struct *tsk; pid = find_get_pid(cws->pid); if (pid) { tsk = get_pid_task(pid, PIDTYPE_PID); if (tsk) { seq_printf(s, "%3d %6d %6u %s\n", cws->cpu, atomic_read(&cws->inserted), cws->executed, tsk->comm); put_task_struct(tsk); } put_pid(pid); } return 0; } static void workqueue_stat_release(void *stat) { struct cpu_workqueue_stats *node = stat; kref_put(&node->kref, cpu_workqueue_stat_free); } static int workqueue_stat_headers(struct seq_file *s) { seq_printf(s, "# CPU INSERTED EXECUTED NAME\n"); seq_printf(s, "# | | | |\n"); return 0; } struct tracer_stat workqueue_stats __read_mostly = { .name = "workqueues", .stat_start = workqueue_stat_start, .stat_next = workqueue_stat_next, .stat_show = workqueue_stat_show, .stat_release = workqueue_stat_release, .stat_headers = workqueue_stat_headers }; int __init stat_workqueue_init(void) { if (register_stat_tracer(&workqueue_stats)) { pr_warning("Unable to register workqueue stat tracer\n"); return 1; } return 0; } fs_initcall(stat_workqueue_init); /* * Workqueues are created very early, just after pre-smp initcalls. * So we must register our tracepoints at this stage. */ int __init trace_workqueue_early_init(void) { int ret, cpu; ret = register_trace_workqueue_insertion(probe_workqueue_insertion, NULL); if (ret) goto out; ret = register_trace_workqueue_execution(probe_workqueue_execution, NULL); if (ret) goto no_insertion; ret = register_trace_workqueue_creation(probe_workqueue_creation, NULL); if (ret) goto no_execution; ret = register_trace_workqueue_destruction(probe_workqueue_destruction, NULL); if (ret) goto no_creation; for_each_possible_cpu(cpu) { spin_lock_init(&workqueue_cpu_stat(cpu)->lock); INIT_LIST_HEAD(&workqueue_cpu_stat(cpu)->list); } return 0; no_creation: unregister_trace_workqueue_creation(probe_workqueue_creation, NULL); no_execution: unregister_trace_workqueue_execution(probe_workqueue_execution, NULL); no_insertion: unregister_trace_workqueue_insertion(probe_workqueue_insertion, NULL); out: pr_warning("trace_workqueue: unable to trace workqueues\n"); return 1; } early_initcall(trace_workqueue_early_init);
gpl-2.0
yajnab/android_kernel_sony_taoshan
drivers/iommu/iommu.c
1147
9673
/* * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. * Author: Joerg Roedel <joerg.roedel@amd.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #define pr_fmt(fmt) "%s: " fmt, __func__ #include <linux/device.h> #include <linux/kernel.h> #include <linux/bug.h> #include <linux/types.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/iommu.h> #include <linux/scatterlist.h> static ssize_t show_iommu_group(struct device *dev, struct device_attribute *attr, char *buf) { unsigned int groupid; if (iommu_device_group(dev, &groupid)) return 0; return sprintf(buf, "%u", groupid); } static DEVICE_ATTR(iommu_group, S_IRUGO, show_iommu_group, NULL); static int add_iommu_group(struct device *dev, void *data) { unsigned int groupid; if (iommu_device_group(dev, &groupid) == 0) return device_create_file(dev, &dev_attr_iommu_group); return 0; } static int remove_iommu_group(struct device *dev) { unsigned int groupid; if (iommu_device_group(dev, &groupid) == 0) device_remove_file(dev, &dev_attr_iommu_group); return 0; } static int iommu_device_notifier(struct notifier_block *nb, unsigned long action, void *data) { struct device *dev = data; if (action == BUS_NOTIFY_ADD_DEVICE) return add_iommu_group(dev, NULL); else if (action == BUS_NOTIFY_DEL_DEVICE) return remove_iommu_group(dev); return 0; } static struct notifier_block iommu_device_nb = { .notifier_call = iommu_device_notifier, }; static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops) { bus_register_notifier(bus, &iommu_device_nb); bus_for_each_dev(bus, NULL, NULL, add_iommu_group); } /** * bus_set_iommu - set iommu-callbacks for the bus * @bus: bus. * @ops: the callbacks provided by the iommu-driver * * This function is called by an iommu driver to set the iommu methods * used for a particular bus. Drivers for devices on that bus can use * the iommu-api after these ops are registered. * This special function is needed because IOMMUs are usually devices on * the bus itself, so the iommu drivers are not initialized when the bus * is set up. With this function the iommu-driver can set the iommu-ops * afterwards. */ int bus_set_iommu(struct bus_type *bus, struct iommu_ops *ops) { if (bus->iommu_ops != NULL) return -EBUSY; bus->iommu_ops = ops; /* Do IOMMU specific setup for this bus-type */ iommu_bus_init(bus, ops); return 0; } EXPORT_SYMBOL_GPL(bus_set_iommu); bool iommu_present(struct bus_type *bus) { return bus->iommu_ops != NULL; } EXPORT_SYMBOL_GPL(iommu_present); /** * iommu_set_fault_handler() - set a fault handler for an iommu domain * @domain: iommu domain * @handler: fault handler * * This function should be used by IOMMU users which want to be notified * whenever an IOMMU fault happens. * * The fault handler itself should return 0 on success, and an appropriate * error code otherwise. */ void iommu_set_fault_handler(struct iommu_domain *domain, iommu_fault_handler_t handler) { BUG_ON(!domain); domain->handler = handler; } EXPORT_SYMBOL_GPL(iommu_set_fault_handler); struct iommu_domain *iommu_domain_alloc(struct bus_type *bus, int flags) { struct iommu_domain *domain; int ret; if (bus == NULL || bus->iommu_ops == NULL) return NULL; domain = kzalloc(sizeof(*domain), GFP_KERNEL); if (!domain) return NULL; domain->ops = bus->iommu_ops; ret = domain->ops->domain_init(domain, flags); if (ret) goto out_free; return domain; out_free: kfree(domain); return NULL; } EXPORT_SYMBOL_GPL(iommu_domain_alloc); void iommu_domain_free(struct iommu_domain *domain) { if (likely(domain->ops->domain_destroy != NULL)) domain->ops->domain_destroy(domain); kfree(domain); } EXPORT_SYMBOL_GPL(iommu_domain_free); int iommu_attach_device(struct iommu_domain *domain, struct device *dev) { if (unlikely(domain->ops->attach_dev == NULL)) return -ENODEV; return domain->ops->attach_dev(domain, dev); } EXPORT_SYMBOL_GPL(iommu_attach_device); void iommu_detach_device(struct iommu_domain *domain, struct device *dev) { if (unlikely(domain->ops->detach_dev == NULL)) return; domain->ops->detach_dev(domain, dev); } EXPORT_SYMBOL_GPL(iommu_detach_device); phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, unsigned long iova) { if (unlikely(domain->ops->iova_to_phys == NULL)) return 0; return domain->ops->iova_to_phys(domain, iova); } EXPORT_SYMBOL_GPL(iommu_iova_to_phys); int iommu_domain_has_cap(struct iommu_domain *domain, unsigned long cap) { if (unlikely(domain->ops->domain_has_cap == NULL)) return 0; return domain->ops->domain_has_cap(domain, cap); } EXPORT_SYMBOL_GPL(iommu_domain_has_cap); int iommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot) { unsigned long orig_iova = iova; unsigned int min_pagesz; size_t orig_size = size; int ret = 0; if (unlikely(domain->ops->map == NULL)) return -ENODEV; /* find out the minimum page size supported */ min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); /* * both the virtual address and the physical one, as well as * the size of the mapping, must be aligned (at least) to the * size of the smallest page supported by the hardware */ if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) { pr_err("unaligned: iova 0x%lx pa 0x%lx size 0x%lx min_pagesz " "0x%x\n", iova, (unsigned long)paddr, (unsigned long)size, min_pagesz); return -EINVAL; } pr_debug("map: iova 0x%lx pa 0x%lx size 0x%lx\n", iova, (unsigned long)paddr, (unsigned long)size); while (size) { unsigned long pgsize, addr_merge = iova | paddr; unsigned int pgsize_idx; /* Max page size that still fits into 'size' */ pgsize_idx = __fls(size); /* need to consider alignment requirements ? */ if (likely(addr_merge)) { /* Max page size allowed by both iova and paddr */ unsigned int align_pgsize_idx = __ffs(addr_merge); pgsize_idx = min(pgsize_idx, align_pgsize_idx); } /* build a mask of acceptable page sizes */ pgsize = (1UL << (pgsize_idx + 1)) - 1; /* throw away page sizes not supported by the hardware */ pgsize &= domain->ops->pgsize_bitmap; /* make sure we're still sane */ BUG_ON(!pgsize); /* pick the biggest page */ pgsize_idx = __fls(pgsize); pgsize = 1UL << pgsize_idx; pr_debug("mapping: iova 0x%lx pa 0x%lx pgsize %lu\n", iova, (unsigned long)paddr, pgsize); ret = domain->ops->map(domain, iova, paddr, pgsize, prot); if (ret) break; iova += pgsize; paddr += pgsize; size -= pgsize; } /* unroll mapping in case something went wrong */ if (ret) iommu_unmap(domain, orig_iova, orig_size - size); return ret; } EXPORT_SYMBOL_GPL(iommu_map); size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size) { size_t unmapped_page, unmapped = 0; unsigned int min_pagesz; if (unlikely(domain->ops->unmap == NULL)) return -ENODEV; /* find out the minimum page size supported */ min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); /* * The virtual address, as well as the size of the mapping, must be * aligned (at least) to the size of the smallest page supported * by the hardware */ if (!IS_ALIGNED(iova | size, min_pagesz)) { pr_err("unaligned: iova 0x%lx size 0x%lx min_pagesz 0x%x\n", iova, (unsigned long)size, min_pagesz); return -EINVAL; } pr_debug("unmap this: iova 0x%lx size 0x%lx\n", iova, (unsigned long)size); /* * Keep iterating until we either unmap 'size' bytes (or more) * or we hit an area that isn't mapped. */ while (unmapped < size) { size_t left = size - unmapped; unmapped_page = domain->ops->unmap(domain, iova, left); if (!unmapped_page) break; pr_debug("unmapped: iova 0x%lx size %lx\n", iova, (unsigned long)unmapped_page); iova += unmapped_page; unmapped += unmapped_page; } return unmapped; } EXPORT_SYMBOL_GPL(iommu_unmap); int iommu_map_range(struct iommu_domain *domain, unsigned int iova, struct scatterlist *sg, unsigned int len, int prot) { if (unlikely(domain->ops->map_range == NULL)) return -ENODEV; BUG_ON(iova & (~PAGE_MASK)); return domain->ops->map_range(domain, iova, sg, len, prot); } EXPORT_SYMBOL_GPL(iommu_map_range); int iommu_unmap_range(struct iommu_domain *domain, unsigned int iova, unsigned int len) { if (unlikely(domain->ops->unmap_range == NULL)) return -ENODEV; BUG_ON(iova & (~PAGE_MASK)); return domain->ops->unmap_range(domain, iova, len); } EXPORT_SYMBOL_GPL(iommu_unmap_range); phys_addr_t iommu_get_pt_base_addr(struct iommu_domain *domain) { if (unlikely(domain->ops->get_pt_base_addr == NULL)) return 0; return domain->ops->get_pt_base_addr(domain); } EXPORT_SYMBOL_GPL(iommu_get_pt_base_addr); int iommu_device_group(struct device *dev, unsigned int *groupid) { if (iommu_present(dev->bus) && dev->bus->iommu_ops->device_group) return dev->bus->iommu_ops->device_group(dev, groupid); return -ENODEV; } EXPORT_SYMBOL_GPL(iommu_device_group);
gpl-2.0
samno1607/Xperia-Z-Source-Differences-JB
arch/arm/mach-msm/rpc_server_dog_keepalive.c
1659
2059
/* arch/arm/mach-msm/rpc_server_dog_keepalive.c * * Copyright (C) 2007 Google, Inc. * Copyright (c) 2009, Code Aurora Forum. All rights reserved. * Author: Iliyan Malchev <ibm@android.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/module.h> #include <linux/kernel.h> #include <mach/msm_rpcrouter.h> /* dog_keepalive server definitions */ #define DOG_KEEPALIVE_PROG 0x30000015 #if CONFIG_MSM_AMSS_VERSION==6210 #define DOG_KEEPALIVE_VERS 0 #define RPC_DOG_KEEPALIVE_BEACON 1 #elif (CONFIG_MSM_AMSS_VERSION==6220) || (CONFIG_MSM_AMSS_VERSION==6225) #define DOG_KEEPALIVE_VERS 0x731fa727 #define RPC_DOG_KEEPALIVE_BEACON 2 #else #error "Unsupported AMSS version" #endif #define DOG_KEEPALIVE_VERS_COMP 0x00010001 #define RPC_DOG_KEEPALIVE_NULL 0 /* TODO: Remove server registration with _VERS when modem is upated with _COMP*/ static int handle_rpc_call(struct msm_rpc_server *server, struct rpc_request_hdr *req, unsigned len) { switch (req->procedure) { case RPC_DOG_KEEPALIVE_NULL: return 0; case RPC_DOG_KEEPALIVE_BEACON: return 0; default: return -ENODEV; } } static struct msm_rpc_server rpc_server[] = { { .prog = DOG_KEEPALIVE_PROG, .vers = DOG_KEEPALIVE_VERS, .rpc_call = handle_rpc_call, }, { .prog = DOG_KEEPALIVE_PROG, .vers = DOG_KEEPALIVE_VERS_COMP, .rpc_call = handle_rpc_call, }, }; static int __init rpc_server_init(void) { /* Dual server registration to support backwards compatibility vers */ int ret; ret = msm_rpc_create_server(&rpc_server[1]); if (ret < 0) return ret; return msm_rpc_create_server(&rpc_server[0]); } module_init(rpc_server_init);
gpl-2.0
TeamExodus/kernel_yu_msm8916
drivers/hwmon/asb100.c
2683
28571
/* * asb100.c - Part of lm_sensors, Linux kernel modules for hardware * monitoring * * Copyright (C) 2004 Mark M. Hoffman <mhoffman@lightlink.com> * * (derived from w83781d.c) * * Copyright (C) 1998 - 2003 Frodo Looijaard <frodol@dds.nl>, * Philip Edelbrock <phil@netroedge.com>, and * Mark Studebaker <mdsxyz123@yahoo.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * This driver supports the hardware sensor chips: Asus ASB100 and * ASB100-A "BACH". * * ASB100-A supports pwm1, while plain ASB100 does not. There is no known * way for the driver to tell which one is there. * * Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA * asb100 7 3 1 4 0x31 0x0694 yes no */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/hwmon-vid.h> #include <linux/err.h> #include <linux/init.h> #include <linux/jiffies.h> #include <linux/mutex.h> #include "lm75.h" /* I2C addresses to scan */ static const unsigned short normal_i2c[] = { 0x2d, I2C_CLIENT_END }; static unsigned short force_subclients[4]; module_param_array(force_subclients, short, NULL, 0); MODULE_PARM_DESC(force_subclients, "List of subclient addresses: {bus, clientaddr, subclientaddr1, subclientaddr2}"); /* Voltage IN registers 0-6 */ #define ASB100_REG_IN(nr) (0x20 + (nr)) #define ASB100_REG_IN_MAX(nr) (0x2b + (nr * 2)) #define ASB100_REG_IN_MIN(nr) (0x2c + (nr * 2)) /* FAN IN registers 1-3 */ #define ASB100_REG_FAN(nr) (0x28 + (nr)) #define ASB100_REG_FAN_MIN(nr) (0x3b + (nr)) /* TEMPERATURE registers 1-4 */ static const u16 asb100_reg_temp[] = {0, 0x27, 0x150, 0x250, 0x17}; static const u16 asb100_reg_temp_max[] = {0, 0x39, 0x155, 0x255, 0x18}; static const u16 asb100_reg_temp_hyst[] = {0, 0x3a, 0x153, 0x253, 0x19}; #define ASB100_REG_TEMP(nr) (asb100_reg_temp[nr]) #define ASB100_REG_TEMP_MAX(nr) (asb100_reg_temp_max[nr]) #define ASB100_REG_TEMP_HYST(nr) (asb100_reg_temp_hyst[nr]) #define ASB100_REG_TEMP2_CONFIG 0x0152 #define ASB100_REG_TEMP3_CONFIG 0x0252 #define ASB100_REG_CONFIG 0x40 #define ASB100_REG_ALARM1 0x41 #define ASB100_REG_ALARM2 0x42 #define ASB100_REG_SMIM1 0x43 #define ASB100_REG_SMIM2 0x44 #define ASB100_REG_VID_FANDIV 0x47 #define ASB100_REG_I2C_ADDR 0x48 #define ASB100_REG_CHIPID 0x49 #define ASB100_REG_I2C_SUBADDR 0x4a #define ASB100_REG_PIN 0x4b #define ASB100_REG_IRQ 0x4c #define ASB100_REG_BANK 0x4e #define ASB100_REG_CHIPMAN 0x4f #define ASB100_REG_WCHIPID 0x58 /* bit 7 -> enable, bits 0-3 -> duty cycle */ #define ASB100_REG_PWM1 0x59 /* * CONVERSIONS * Rounding and limit checking is only done on the TO_REG variants. */ /* These constants are a guess, consistent w/ w83781d */ #define ASB100_IN_MIN 0 #define ASB100_IN_MAX 4080 /* * IN: 1/1000 V (0V to 4.08V) * REG: 16mV/bit */ static u8 IN_TO_REG(unsigned val) { unsigned nval = clamp_val(val, ASB100_IN_MIN, ASB100_IN_MAX); return (nval + 8) / 16; } static unsigned IN_FROM_REG(u8 reg) { return reg * 16; } static u8 FAN_TO_REG(long rpm, int div) { if (rpm == -1) return 0; if (rpm == 0) return 255; rpm = clamp_val(rpm, 1, 1000000); return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254); } static int FAN_FROM_REG(u8 val, int div) { return val == 0 ? -1 : val == 255 ? 0 : 1350000 / (val * div); } /* These constants are a guess, consistent w/ w83781d */ #define ASB100_TEMP_MIN -128000 #define ASB100_TEMP_MAX 127000 /* * TEMP: 0.001C/bit (-128C to +127C) * REG: 1C/bit, two's complement */ static u8 TEMP_TO_REG(long temp) { int ntemp = clamp_val(temp, ASB100_TEMP_MIN, ASB100_TEMP_MAX); ntemp += (ntemp < 0 ? -500 : 500); return (u8)(ntemp / 1000); } static int TEMP_FROM_REG(u8 reg) { return (s8)reg * 1000; } /* * PWM: 0 - 255 per sensors documentation * REG: (6.25% duty cycle per bit) */ static u8 ASB100_PWM_TO_REG(int pwm) { pwm = clamp_val(pwm, 0, 255); return (u8)(pwm / 16); } static int ASB100_PWM_FROM_REG(u8 reg) { return reg * 16; } #define DIV_FROM_REG(val) (1 << (val)) /* * FAN DIV: 1, 2, 4, or 8 (defaults to 2) * REG: 0, 1, 2, or 3 (respectively) (defaults to 1) */ static u8 DIV_TO_REG(long val) { return val == 8 ? 3 : val == 4 ? 2 : val == 1 ? 0 : 1; } /* * For each registered client, we need to keep some data in memory. That * data is pointed to by client->data. The structure itself is * dynamically allocated, at the same time the client itself is allocated. */ struct asb100_data { struct device *hwmon_dev; struct mutex lock; struct mutex update_lock; unsigned long last_updated; /* In jiffies */ /* array of 2 pointers to subclients */ struct i2c_client *lm75[2]; char valid; /* !=0 if following fields are valid */ u8 in[7]; /* Register value */ u8 in_max[7]; /* Register value */ u8 in_min[7]; /* Register value */ u8 fan[3]; /* Register value */ u8 fan_min[3]; /* Register value */ u16 temp[4]; /* Register value (0 and 3 are u8 only) */ u16 temp_max[4]; /* Register value (0 and 3 are u8 only) */ u16 temp_hyst[4]; /* Register value (0 and 3 are u8 only) */ u8 fan_div[3]; /* Register encoding, right justified */ u8 pwm; /* Register encoding */ u8 vid; /* Register encoding, combined */ u32 alarms; /* Register encoding, combined */ u8 vrm; }; static int asb100_read_value(struct i2c_client *client, u16 reg); static void asb100_write_value(struct i2c_client *client, u16 reg, u16 val); static int asb100_probe(struct i2c_client *client, const struct i2c_device_id *id); static int asb100_detect(struct i2c_client *client, struct i2c_board_info *info); static int asb100_remove(struct i2c_client *client); static struct asb100_data *asb100_update_device(struct device *dev); static void asb100_init_client(struct i2c_client *client); static const struct i2c_device_id asb100_id[] = { { "asb100", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, asb100_id); static struct i2c_driver asb100_driver = { .class = I2C_CLASS_HWMON, .driver = { .name = "asb100", }, .probe = asb100_probe, .remove = asb100_remove, .id_table = asb100_id, .detect = asb100_detect, .address_list = normal_i2c, }; /* 7 Voltages */ #define show_in_reg(reg) \ static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ int nr = to_sensor_dev_attr(attr)->index; \ struct asb100_data *data = asb100_update_device(dev); \ return sprintf(buf, "%d\n", IN_FROM_REG(data->reg[nr])); \ } show_in_reg(in) show_in_reg(in_min) show_in_reg(in_max) #define set_in_reg(REG, reg) \ static ssize_t set_in_##reg(struct device *dev, struct device_attribute *attr, \ const char *buf, size_t count) \ { \ int nr = to_sensor_dev_attr(attr)->index; \ struct i2c_client *client = to_i2c_client(dev); \ struct asb100_data *data = i2c_get_clientdata(client); \ unsigned long val; \ int err = kstrtoul(buf, 10, &val); \ if (err) \ return err; \ mutex_lock(&data->update_lock); \ data->in_##reg[nr] = IN_TO_REG(val); \ asb100_write_value(client, ASB100_REG_IN_##REG(nr), \ data->in_##reg[nr]); \ mutex_unlock(&data->update_lock); \ return count; \ } set_in_reg(MIN, min) set_in_reg(MAX, max) #define sysfs_in(offset) \ static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO, \ show_in, NULL, offset); \ static SENSOR_DEVICE_ATTR(in##offset##_min, S_IRUGO | S_IWUSR, \ show_in_min, set_in_min, offset); \ static SENSOR_DEVICE_ATTR(in##offset##_max, S_IRUGO | S_IWUSR, \ show_in_max, set_in_max, offset) sysfs_in(0); sysfs_in(1); sysfs_in(2); sysfs_in(3); sysfs_in(4); sysfs_in(5); sysfs_in(6); /* 3 Fans */ static ssize_t show_fan(struct device *dev, struct device_attribute *attr, char *buf) { int nr = to_sensor_dev_attr(attr)->index; struct asb100_data *data = asb100_update_device(dev); return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan[nr], DIV_FROM_REG(data->fan_div[nr]))); } static ssize_t show_fan_min(struct device *dev, struct device_attribute *attr, char *buf) { int nr = to_sensor_dev_attr(attr)->index; struct asb100_data *data = asb100_update_device(dev); return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan_min[nr], DIV_FROM_REG(data->fan_div[nr]))); } static ssize_t show_fan_div(struct device *dev, struct device_attribute *attr, char *buf) { int nr = to_sensor_dev_attr(attr)->index; struct asb100_data *data = asb100_update_device(dev); return sprintf(buf, "%d\n", DIV_FROM_REG(data->fan_div[nr])); } static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nr = to_sensor_dev_attr(attr)->index; struct i2c_client *client = to_i2c_client(dev); struct asb100_data *data = i2c_get_clientdata(client); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->fan_min[nr] = FAN_TO_REG(val, DIV_FROM_REG(data->fan_div[nr])); asb100_write_value(client, ASB100_REG_FAN_MIN(nr), data->fan_min[nr]); mutex_unlock(&data->update_lock); return count; } /* * Note: we save and restore the fan minimum here, because its value is * determined in part by the fan divisor. This follows the principle of * least surprise; the user doesn't expect the fan minimum to change just * because the divisor changed. */ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nr = to_sensor_dev_attr(attr)->index; struct i2c_client *client = to_i2c_client(dev); struct asb100_data *data = i2c_get_clientdata(client); unsigned long min; int reg; unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); min = FAN_FROM_REG(data->fan_min[nr], DIV_FROM_REG(data->fan_div[nr])); data->fan_div[nr] = DIV_TO_REG(val); switch (nr) { case 0: /* fan 1 */ reg = asb100_read_value(client, ASB100_REG_VID_FANDIV); reg = (reg & 0xcf) | (data->fan_div[0] << 4); asb100_write_value(client, ASB100_REG_VID_FANDIV, reg); break; case 1: /* fan 2 */ reg = asb100_read_value(client, ASB100_REG_VID_FANDIV); reg = (reg & 0x3f) | (data->fan_div[1] << 6); asb100_write_value(client, ASB100_REG_VID_FANDIV, reg); break; case 2: /* fan 3 */ reg = asb100_read_value(client, ASB100_REG_PIN); reg = (reg & 0x3f) | (data->fan_div[2] << 6); asb100_write_value(client, ASB100_REG_PIN, reg); break; } data->fan_min[nr] = FAN_TO_REG(min, DIV_FROM_REG(data->fan_div[nr])); asb100_write_value(client, ASB100_REG_FAN_MIN(nr), data->fan_min[nr]); mutex_unlock(&data->update_lock); return count; } #define sysfs_fan(offset) \ static SENSOR_DEVICE_ATTR(fan##offset##_input, S_IRUGO, \ show_fan, NULL, offset - 1); \ static SENSOR_DEVICE_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR, \ show_fan_min, set_fan_min, offset - 1); \ static SENSOR_DEVICE_ATTR(fan##offset##_div, S_IRUGO | S_IWUSR, \ show_fan_div, set_fan_div, offset - 1) sysfs_fan(1); sysfs_fan(2); sysfs_fan(3); /* 4 Temp. Sensors */ static int sprintf_temp_from_reg(u16 reg, char *buf, int nr) { int ret = 0; switch (nr) { case 1: case 2: ret = sprintf(buf, "%d\n", LM75_TEMP_FROM_REG(reg)); break; case 0: case 3: default: ret = sprintf(buf, "%d\n", TEMP_FROM_REG(reg)); break; } return ret; } #define show_temp_reg(reg) \ static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ int nr = to_sensor_dev_attr(attr)->index; \ struct asb100_data *data = asb100_update_device(dev); \ return sprintf_temp_from_reg(data->reg[nr], buf, nr); \ } show_temp_reg(temp); show_temp_reg(temp_max); show_temp_reg(temp_hyst); #define set_temp_reg(REG, reg) \ static ssize_t set_##reg(struct device *dev, struct device_attribute *attr, \ const char *buf, size_t count) \ { \ int nr = to_sensor_dev_attr(attr)->index; \ struct i2c_client *client = to_i2c_client(dev); \ struct asb100_data *data = i2c_get_clientdata(client); \ long val; \ int err = kstrtol(buf, 10, &val); \ if (err) \ return err; \ mutex_lock(&data->update_lock); \ switch (nr) { \ case 1: case 2: \ data->reg[nr] = LM75_TEMP_TO_REG(val); \ break; \ case 0: case 3: default: \ data->reg[nr] = TEMP_TO_REG(val); \ break; \ } \ asb100_write_value(client, ASB100_REG_TEMP_##REG(nr+1), \ data->reg[nr]); \ mutex_unlock(&data->update_lock); \ return count; \ } set_temp_reg(MAX, temp_max); set_temp_reg(HYST, temp_hyst); #define sysfs_temp(num) \ static SENSOR_DEVICE_ATTR(temp##num##_input, S_IRUGO, \ show_temp, NULL, num - 1); \ static SENSOR_DEVICE_ATTR(temp##num##_max, S_IRUGO | S_IWUSR, \ show_temp_max, set_temp_max, num - 1); \ static SENSOR_DEVICE_ATTR(temp##num##_max_hyst, S_IRUGO | S_IWUSR, \ show_temp_hyst, set_temp_hyst, num - 1) sysfs_temp(1); sysfs_temp(2); sysfs_temp(3); sysfs_temp(4); /* VID */ static ssize_t show_vid(struct device *dev, struct device_attribute *attr, char *buf) { struct asb100_data *data = asb100_update_device(dev); return sprintf(buf, "%d\n", vid_from_reg(data->vid, data->vrm)); } static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid, NULL); /* VRM */ static ssize_t show_vrm(struct device *dev, struct device_attribute *attr, char *buf) { struct asb100_data *data = dev_get_drvdata(dev); return sprintf(buf, "%d\n", data->vrm); } static ssize_t set_vrm(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct asb100_data *data = dev_get_drvdata(dev); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; data->vrm = val; return count; } /* Alarms */ static DEVICE_ATTR(vrm, S_IRUGO | S_IWUSR, show_vrm, set_vrm); static ssize_t show_alarms(struct device *dev, struct device_attribute *attr, char *buf) { struct asb100_data *data = asb100_update_device(dev); return sprintf(buf, "%u\n", data->alarms); } static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL); static ssize_t show_alarm(struct device *dev, struct device_attribute *attr, char *buf) { int bitnr = to_sensor_dev_attr(attr)->index; struct asb100_data *data = asb100_update_device(dev); return sprintf(buf, "%u\n", (data->alarms >> bitnr) & 1); } static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0); static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1); static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2); static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3); static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 8); static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 6); static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 7); static SENSOR_DEVICE_ATTR(fan3_alarm, S_IRUGO, show_alarm, NULL, 11); static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 4); static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 5); static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 13); /* 1 PWM */ static ssize_t show_pwm1(struct device *dev, struct device_attribute *attr, char *buf) { struct asb100_data *data = asb100_update_device(dev); return sprintf(buf, "%d\n", ASB100_PWM_FROM_REG(data->pwm & 0x0f)); } static ssize_t set_pwm1(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct asb100_data *data = i2c_get_clientdata(client); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->pwm &= 0x80; /* keep the enable bit */ data->pwm |= (0x0f & ASB100_PWM_TO_REG(val)); asb100_write_value(client, ASB100_REG_PWM1, data->pwm); mutex_unlock(&data->update_lock); return count; } static ssize_t show_pwm_enable1(struct device *dev, struct device_attribute *attr, char *buf) { struct asb100_data *data = asb100_update_device(dev); return sprintf(buf, "%d\n", (data->pwm & 0x80) ? 1 : 0); } static ssize_t set_pwm_enable1(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct asb100_data *data = i2c_get_clientdata(client); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->pwm &= 0x0f; /* keep the duty cycle bits */ data->pwm |= (val ? 0x80 : 0x00); asb100_write_value(client, ASB100_REG_PWM1, data->pwm); mutex_unlock(&data->update_lock); return count; } static DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, show_pwm1, set_pwm1); static DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, show_pwm_enable1, set_pwm_enable1); static struct attribute *asb100_attributes[] = { &sensor_dev_attr_in0_input.dev_attr.attr, &sensor_dev_attr_in0_min.dev_attr.attr, &sensor_dev_attr_in0_max.dev_attr.attr, &sensor_dev_attr_in1_input.dev_attr.attr, &sensor_dev_attr_in1_min.dev_attr.attr, &sensor_dev_attr_in1_max.dev_attr.attr, &sensor_dev_attr_in2_input.dev_attr.attr, &sensor_dev_attr_in2_min.dev_attr.attr, &sensor_dev_attr_in2_max.dev_attr.attr, &sensor_dev_attr_in3_input.dev_attr.attr, &sensor_dev_attr_in3_min.dev_attr.attr, &sensor_dev_attr_in3_max.dev_attr.attr, &sensor_dev_attr_in4_input.dev_attr.attr, &sensor_dev_attr_in4_min.dev_attr.attr, &sensor_dev_attr_in4_max.dev_attr.attr, &sensor_dev_attr_in5_input.dev_attr.attr, &sensor_dev_attr_in5_min.dev_attr.attr, &sensor_dev_attr_in5_max.dev_attr.attr, &sensor_dev_attr_in6_input.dev_attr.attr, &sensor_dev_attr_in6_min.dev_attr.attr, &sensor_dev_attr_in6_max.dev_attr.attr, &sensor_dev_attr_fan1_input.dev_attr.attr, &sensor_dev_attr_fan1_min.dev_attr.attr, &sensor_dev_attr_fan1_div.dev_attr.attr, &sensor_dev_attr_fan2_input.dev_attr.attr, &sensor_dev_attr_fan2_min.dev_attr.attr, &sensor_dev_attr_fan2_div.dev_attr.attr, &sensor_dev_attr_fan3_input.dev_attr.attr, &sensor_dev_attr_fan3_min.dev_attr.attr, &sensor_dev_attr_fan3_div.dev_attr.attr, &sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_temp1_max.dev_attr.attr, &sensor_dev_attr_temp1_max_hyst.dev_attr.attr, &sensor_dev_attr_temp2_input.dev_attr.attr, &sensor_dev_attr_temp2_max.dev_attr.attr, &sensor_dev_attr_temp2_max_hyst.dev_attr.attr, &sensor_dev_attr_temp3_input.dev_attr.attr, &sensor_dev_attr_temp3_max.dev_attr.attr, &sensor_dev_attr_temp3_max_hyst.dev_attr.attr, &sensor_dev_attr_temp4_input.dev_attr.attr, &sensor_dev_attr_temp4_max.dev_attr.attr, &sensor_dev_attr_temp4_max_hyst.dev_attr.attr, &sensor_dev_attr_in0_alarm.dev_attr.attr, &sensor_dev_attr_in1_alarm.dev_attr.attr, &sensor_dev_attr_in2_alarm.dev_attr.attr, &sensor_dev_attr_in3_alarm.dev_attr.attr, &sensor_dev_attr_in4_alarm.dev_attr.attr, &sensor_dev_attr_fan1_alarm.dev_attr.attr, &sensor_dev_attr_fan2_alarm.dev_attr.attr, &sensor_dev_attr_fan3_alarm.dev_attr.attr, &sensor_dev_attr_temp1_alarm.dev_attr.attr, &sensor_dev_attr_temp2_alarm.dev_attr.attr, &sensor_dev_attr_temp3_alarm.dev_attr.attr, &dev_attr_cpu0_vid.attr, &dev_attr_vrm.attr, &dev_attr_alarms.attr, &dev_attr_pwm1.attr, &dev_attr_pwm1_enable.attr, NULL }; static const struct attribute_group asb100_group = { .attrs = asb100_attributes, }; static int asb100_detect_subclients(struct i2c_client *client) { int i, id, err; int address = client->addr; unsigned short sc_addr[2]; struct asb100_data *data = i2c_get_clientdata(client); struct i2c_adapter *adapter = client->adapter; id = i2c_adapter_id(adapter); if (force_subclients[0] == id && force_subclients[1] == address) { for (i = 2; i <= 3; i++) { if (force_subclients[i] < 0x48 || force_subclients[i] > 0x4f) { dev_err(&client->dev, "invalid subclient address %d; must be 0x48-0x4f\n", force_subclients[i]); err = -ENODEV; goto ERROR_SC_2; } } asb100_write_value(client, ASB100_REG_I2C_SUBADDR, (force_subclients[2] & 0x07) | ((force_subclients[3] & 0x07) << 4)); sc_addr[0] = force_subclients[2]; sc_addr[1] = force_subclients[3]; } else { int val = asb100_read_value(client, ASB100_REG_I2C_SUBADDR); sc_addr[0] = 0x48 + (val & 0x07); sc_addr[1] = 0x48 + ((val >> 4) & 0x07); } if (sc_addr[0] == sc_addr[1]) { dev_err(&client->dev, "duplicate addresses 0x%x for subclients\n", sc_addr[0]); err = -ENODEV; goto ERROR_SC_2; } data->lm75[0] = i2c_new_dummy(adapter, sc_addr[0]); if (!data->lm75[0]) { dev_err(&client->dev, "subclient %d registration at address 0x%x failed.\n", 1, sc_addr[0]); err = -ENOMEM; goto ERROR_SC_2; } data->lm75[1] = i2c_new_dummy(adapter, sc_addr[1]); if (!data->lm75[1]) { dev_err(&client->dev, "subclient %d registration at address 0x%x failed.\n", 2, sc_addr[1]); err = -ENOMEM; goto ERROR_SC_3; } return 0; /* Undo inits in case of errors */ ERROR_SC_3: i2c_unregister_device(data->lm75[0]); ERROR_SC_2: return err; } /* Return 0 if detection is successful, -ENODEV otherwise */ static int asb100_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; int val1, val2; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { pr_debug("detect failed, smbus byte data not supported!\n"); return -ENODEV; } val1 = i2c_smbus_read_byte_data(client, ASB100_REG_BANK); val2 = i2c_smbus_read_byte_data(client, ASB100_REG_CHIPMAN); /* If we're in bank 0 */ if ((!(val1 & 0x07)) && /* Check for ASB100 ID (low byte) */ (((!(val1 & 0x80)) && (val2 != 0x94)) || /* Check for ASB100 ID (high byte ) */ ((val1 & 0x80) && (val2 != 0x06)))) { pr_debug("detect failed, bad chip id 0x%02x!\n", val2); return -ENODEV; } /* Put it now into bank 0 and Vendor ID High Byte */ i2c_smbus_write_byte_data(client, ASB100_REG_BANK, (i2c_smbus_read_byte_data(client, ASB100_REG_BANK) & 0x78) | 0x80); /* Determine the chip type. */ val1 = i2c_smbus_read_byte_data(client, ASB100_REG_WCHIPID); val2 = i2c_smbus_read_byte_data(client, ASB100_REG_CHIPMAN); if (val1 != 0x31 || val2 != 0x06) return -ENODEV; strlcpy(info->type, "asb100", I2C_NAME_SIZE); return 0; } static int asb100_probe(struct i2c_client *client, const struct i2c_device_id *id) { int err; struct asb100_data *data; data = devm_kzalloc(&client->dev, sizeof(struct asb100_data), GFP_KERNEL); if (!data) return -ENOMEM; i2c_set_clientdata(client, data); mutex_init(&data->lock); mutex_init(&data->update_lock); /* Attach secondary lm75 clients */ err = asb100_detect_subclients(client); if (err) return err; /* Initialize the chip */ asb100_init_client(client); /* A few vars need to be filled upon startup */ data->fan_min[0] = asb100_read_value(client, ASB100_REG_FAN_MIN(0)); data->fan_min[1] = asb100_read_value(client, ASB100_REG_FAN_MIN(1)); data->fan_min[2] = asb100_read_value(client, ASB100_REG_FAN_MIN(2)); /* Register sysfs hooks */ err = sysfs_create_group(&client->dev.kobj, &asb100_group); if (err) goto ERROR3; data->hwmon_dev = hwmon_device_register(&client->dev); if (IS_ERR(data->hwmon_dev)) { err = PTR_ERR(data->hwmon_dev); goto ERROR4; } return 0; ERROR4: sysfs_remove_group(&client->dev.kobj, &asb100_group); ERROR3: i2c_unregister_device(data->lm75[1]); i2c_unregister_device(data->lm75[0]); return err; } static int asb100_remove(struct i2c_client *client) { struct asb100_data *data = i2c_get_clientdata(client); hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &asb100_group); i2c_unregister_device(data->lm75[1]); i2c_unregister_device(data->lm75[0]); return 0; } /* * The SMBus locks itself, usually, but nothing may access the chip between * bank switches. */ static int asb100_read_value(struct i2c_client *client, u16 reg) { struct asb100_data *data = i2c_get_clientdata(client); struct i2c_client *cl; int res, bank; mutex_lock(&data->lock); bank = (reg >> 8) & 0x0f; if (bank > 2) /* switch banks */ i2c_smbus_write_byte_data(client, ASB100_REG_BANK, bank); if (bank == 0 || bank > 2) { res = i2c_smbus_read_byte_data(client, reg & 0xff); } else { /* switch to subclient */ cl = data->lm75[bank - 1]; /* convert from ISA to LM75 I2C addresses */ switch (reg & 0xff) { case 0x50: /* TEMP */ res = i2c_smbus_read_word_swapped(cl, 0); break; case 0x52: /* CONFIG */ res = i2c_smbus_read_byte_data(cl, 1); break; case 0x53: /* HYST */ res = i2c_smbus_read_word_swapped(cl, 2); break; case 0x55: /* MAX */ default: res = i2c_smbus_read_word_swapped(cl, 3); break; } } if (bank > 2) i2c_smbus_write_byte_data(client, ASB100_REG_BANK, 0); mutex_unlock(&data->lock); return res; } static void asb100_write_value(struct i2c_client *client, u16 reg, u16 value) { struct asb100_data *data = i2c_get_clientdata(client); struct i2c_client *cl; int bank; mutex_lock(&data->lock); bank = (reg >> 8) & 0x0f; if (bank > 2) /* switch banks */ i2c_smbus_write_byte_data(client, ASB100_REG_BANK, bank); if (bank == 0 || bank > 2) { i2c_smbus_write_byte_data(client, reg & 0xff, value & 0xff); } else { /* switch to subclient */ cl = data->lm75[bank - 1]; /* convert from ISA to LM75 I2C addresses */ switch (reg & 0xff) { case 0x52: /* CONFIG */ i2c_smbus_write_byte_data(cl, 1, value & 0xff); break; case 0x53: /* HYST */ i2c_smbus_write_word_swapped(cl, 2, value); break; case 0x55: /* MAX */ i2c_smbus_write_word_swapped(cl, 3, value); break; } } if (bank > 2) i2c_smbus_write_byte_data(client, ASB100_REG_BANK, 0); mutex_unlock(&data->lock); } static void asb100_init_client(struct i2c_client *client) { struct asb100_data *data = i2c_get_clientdata(client); data->vrm = vid_which_vrm(); /* Start monitoring */ asb100_write_value(client, ASB100_REG_CONFIG, (asb100_read_value(client, ASB100_REG_CONFIG) & 0xf7) | 0x01); } static struct asb100_data *asb100_update_device(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct asb100_data *data = i2c_get_clientdata(client); int i; mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ + HZ / 2) || !data->valid) { dev_dbg(&client->dev, "starting device update...\n"); /* 7 voltage inputs */ for (i = 0; i < 7; i++) { data->in[i] = asb100_read_value(client, ASB100_REG_IN(i)); data->in_min[i] = asb100_read_value(client, ASB100_REG_IN_MIN(i)); data->in_max[i] = asb100_read_value(client, ASB100_REG_IN_MAX(i)); } /* 3 fan inputs */ for (i = 0; i < 3; i++) { data->fan[i] = asb100_read_value(client, ASB100_REG_FAN(i)); data->fan_min[i] = asb100_read_value(client, ASB100_REG_FAN_MIN(i)); } /* 4 temperature inputs */ for (i = 1; i <= 4; i++) { data->temp[i-1] = asb100_read_value(client, ASB100_REG_TEMP(i)); data->temp_max[i-1] = asb100_read_value(client, ASB100_REG_TEMP_MAX(i)); data->temp_hyst[i-1] = asb100_read_value(client, ASB100_REG_TEMP_HYST(i)); } /* VID and fan divisors */ i = asb100_read_value(client, ASB100_REG_VID_FANDIV); data->vid = i & 0x0f; data->vid |= (asb100_read_value(client, ASB100_REG_CHIPID) & 0x01) << 4; data->fan_div[0] = (i >> 4) & 0x03; data->fan_div[1] = (i >> 6) & 0x03; data->fan_div[2] = (asb100_read_value(client, ASB100_REG_PIN) >> 6) & 0x03; /* PWM */ data->pwm = asb100_read_value(client, ASB100_REG_PWM1); /* alarms */ data->alarms = asb100_read_value(client, ASB100_REG_ALARM1) + (asb100_read_value(client, ASB100_REG_ALARM2) << 8); data->last_updated = jiffies; data->valid = 1; dev_dbg(&client->dev, "... device update complete\n"); } mutex_unlock(&data->update_lock); return data; } module_i2c_driver(asb100_driver); MODULE_AUTHOR("Mark M. Hoffman <mhoffman@lightlink.com>"); MODULE_DESCRIPTION("ASB100 Bach driver"); MODULE_LICENSE("GPL");
gpl-2.0
mrimp/SM-G928T_Kernel
arch/powerpc/platforms/pseries/eeh_dev.c
2683
3441
/* * The file intends to implement dynamic creation of EEH device, which will * be bound with OF node and PCI device simutaneously. The EEH devices would * be foundamental information for EEH core components to work proerly. Besides, * We have to support multiple situations where dynamic creation of EEH device * is required: * * 1) Before PCI emunation starts, we need create EEH devices according to the * PCI sensitive OF nodes. * 2) When PCI emunation is done, we need do the binding between PCI device and * the associated EEH device. * 3) DR (Dynamic Reconfiguration) would create PCI sensitive OF node. EEH device * will be created while PCI sensitive OF node is detected from DR. * 4) PCI hotplug needs redoing the binding between PCI device and EEH device. If * PHB is newly inserted, we also need create EEH devices accordingly. * * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2012. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/export.h> #include <linux/gfp.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/string.h> #include <asm/pci-bridge.h> #include <asm/ppc-pci.h> /** * eeh_dev_init - Create EEH device according to OF node * @dn: device node * @data: PHB * * It will create EEH device according to the given OF node. The function * might be called by PCI emunation, DR, PHB hotplug. */ void *eeh_dev_init(struct device_node *dn, void *data) { struct pci_controller *phb = data; struct eeh_dev *edev; /* Allocate EEH device */ edev = kzalloc(sizeof(*edev), GFP_KERNEL); if (!edev) { pr_warning("%s: out of memory\n", __func__); return NULL; } /* Associate EEH device with OF node */ PCI_DN(dn)->edev = edev; edev->dn = dn; edev->phb = phb; INIT_LIST_HEAD(&edev->list); return NULL; } /** * eeh_dev_phb_init_dynamic - Create EEH devices for devices included in PHB * @phb: PHB * * Scan the PHB OF node and its child association, then create the * EEH devices accordingly */ void eeh_dev_phb_init_dynamic(struct pci_controller *phb) { struct device_node *dn = phb->dn; /* EEH PE for PHB */ eeh_phb_pe_create(phb); /* EEH device for PHB */ eeh_dev_init(dn, phb); /* EEH devices for children OF nodes */ traverse_pci_devices(dn, eeh_dev_init, phb); } /** * eeh_dev_phb_init - Create EEH devices for devices included in existing PHBs * * Scan all the existing PHBs and create EEH devices for their OF * nodes and their children OF nodes */ static int __init eeh_dev_phb_init(void) { struct pci_controller *phb, *tmp; list_for_each_entry_safe(phb, tmp, &hose_list, list_node) eeh_dev_phb_init_dynamic(phb); pr_info("EEH: devices created\n"); return 0; } core_initcall(eeh_dev_phb_init);
gpl-2.0
TeamWin/android_kernel_samsung_msm8660-common
fs/gfs2/sys.c
2939
16480
/* * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU General Public License version 2. */ #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/completion.h> #include <linux/buffer_head.h> #include <linux/module.h> #include <linux/kobject.h> #include <asm/uaccess.h> #include <linux/gfs2_ondisk.h> #include <linux/genhd.h> #include "gfs2.h" #include "incore.h" #include "sys.h" #include "super.h" #include "glock.h" #include "quota.h" #include "util.h" #include "glops.h" #include "recovery.h" struct gfs2_attr { struct attribute attr; ssize_t (*show)(struct gfs2_sbd *, char *); ssize_t (*store)(struct gfs2_sbd *, const char *, size_t); }; static ssize_t gfs2_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct gfs2_sbd *sdp = container_of(kobj, struct gfs2_sbd, sd_kobj); struct gfs2_attr *a = container_of(attr, struct gfs2_attr, attr); return a->show ? a->show(sdp, buf) : 0; } static ssize_t gfs2_attr_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t len) { struct gfs2_sbd *sdp = container_of(kobj, struct gfs2_sbd, sd_kobj); struct gfs2_attr *a = container_of(attr, struct gfs2_attr, attr); return a->store ? a->store(sdp, buf, len) : len; } static const struct sysfs_ops gfs2_attr_ops = { .show = gfs2_attr_show, .store = gfs2_attr_store, }; static struct kset *gfs2_kset; static ssize_t id_show(struct gfs2_sbd *sdp, char *buf) { return snprintf(buf, PAGE_SIZE, "%u:%u\n", MAJOR(sdp->sd_vfs->s_dev), MINOR(sdp->sd_vfs->s_dev)); } static ssize_t fsname_show(struct gfs2_sbd *sdp, char *buf) { return snprintf(buf, PAGE_SIZE, "%s\n", sdp->sd_fsname); } static int gfs2_uuid_valid(const u8 *uuid) { int i; for (i = 0; i < 16; i++) { if (uuid[i]) return 1; } return 0; } static ssize_t uuid_show(struct gfs2_sbd *sdp, char *buf) { struct super_block *s = sdp->sd_vfs; const u8 *uuid = s->s_uuid; buf[0] = '\0'; if (!gfs2_uuid_valid(uuid)) return 0; return snprintf(buf, PAGE_SIZE, "%pUB\n", uuid); } static ssize_t freeze_show(struct gfs2_sbd *sdp, char *buf) { unsigned int count; mutex_lock(&sdp->sd_freeze_lock); count = sdp->sd_freeze_count; mutex_unlock(&sdp->sd_freeze_lock); return snprintf(buf, PAGE_SIZE, "%u\n", count); } static ssize_t freeze_store(struct gfs2_sbd *sdp, const char *buf, size_t len) { ssize_t ret = len; int error = 0; int n = simple_strtol(buf, NULL, 0); if (!capable(CAP_SYS_ADMIN)) return -EACCES; switch (n) { case 0: gfs2_unfreeze_fs(sdp); break; case 1: error = gfs2_freeze_fs(sdp); break; default: ret = -EINVAL; } if (error) fs_warn(sdp, "freeze %d error %d", n, error); return ret; } static ssize_t withdraw_show(struct gfs2_sbd *sdp, char *buf) { unsigned int b = test_bit(SDF_SHUTDOWN, &sdp->sd_flags); return snprintf(buf, PAGE_SIZE, "%u\n", b); } static ssize_t withdraw_store(struct gfs2_sbd *sdp, const char *buf, size_t len) { if (!capable(CAP_SYS_ADMIN)) return -EACCES; if (simple_strtol(buf, NULL, 0) != 1) return -EINVAL; gfs2_lm_withdraw(sdp, "GFS2: fsid=%s: withdrawing from cluster at user's request\n", sdp->sd_fsname); return len; } static ssize_t statfs_sync_store(struct gfs2_sbd *sdp, const char *buf, size_t len) { if (!capable(CAP_SYS_ADMIN)) return -EACCES; if (simple_strtol(buf, NULL, 0) != 1) return -EINVAL; gfs2_statfs_sync(sdp->sd_vfs, 0); return len; } static ssize_t quota_sync_store(struct gfs2_sbd *sdp, const char *buf, size_t len) { if (!capable(CAP_SYS_ADMIN)) return -EACCES; if (simple_strtol(buf, NULL, 0) != 1) return -EINVAL; gfs2_quota_sync(sdp->sd_vfs, 0, 1); return len; } static ssize_t quota_refresh_user_store(struct gfs2_sbd *sdp, const char *buf, size_t len) { int error; u32 id; if (!capable(CAP_SYS_ADMIN)) return -EACCES; id = simple_strtoul(buf, NULL, 0); error = gfs2_quota_refresh(sdp, 1, id); return error ? error : len; } static ssize_t quota_refresh_group_store(struct gfs2_sbd *sdp, const char *buf, size_t len) { int error; u32 id; if (!capable(CAP_SYS_ADMIN)) return -EACCES; id = simple_strtoul(buf, NULL, 0); error = gfs2_quota_refresh(sdp, 0, id); return error ? error : len; } static ssize_t demote_rq_store(struct gfs2_sbd *sdp, const char *buf, size_t len) { struct gfs2_glock *gl; const struct gfs2_glock_operations *glops; unsigned int glmode; unsigned int gltype; unsigned long long glnum; char mode[16]; int rv; if (!capable(CAP_SYS_ADMIN)) return -EACCES; rv = sscanf(buf, "%u:%llu %15s", &gltype, &glnum, mode); if (rv != 3) return -EINVAL; if (strcmp(mode, "EX") == 0) glmode = LM_ST_UNLOCKED; else if ((strcmp(mode, "CW") == 0) || (strcmp(mode, "DF") == 0)) glmode = LM_ST_DEFERRED; else if ((strcmp(mode, "PR") == 0) || (strcmp(mode, "SH") == 0)) glmode = LM_ST_SHARED; else return -EINVAL; if (gltype > LM_TYPE_JOURNAL) return -EINVAL; if (gltype == LM_TYPE_NONDISK && glnum == GFS2_TRANS_LOCK) glops = &gfs2_trans_glops; else glops = gfs2_glops_list[gltype]; if (glops == NULL) return -EINVAL; if (!test_and_set_bit(SDF_DEMOTE, &sdp->sd_flags)) fs_info(sdp, "demote interface used\n"); rv = gfs2_glock_get(sdp, glnum, glops, 0, &gl); if (rv) return rv; gfs2_glock_cb(gl, glmode); gfs2_glock_put(gl); return len; } #define GFS2_ATTR(name, mode, show, store) \ static struct gfs2_attr gfs2_attr_##name = __ATTR(name, mode, show, store) GFS2_ATTR(id, 0444, id_show, NULL); GFS2_ATTR(fsname, 0444, fsname_show, NULL); GFS2_ATTR(uuid, 0444, uuid_show, NULL); GFS2_ATTR(freeze, 0644, freeze_show, freeze_store); GFS2_ATTR(withdraw, 0644, withdraw_show, withdraw_store); GFS2_ATTR(statfs_sync, 0200, NULL, statfs_sync_store); GFS2_ATTR(quota_sync, 0200, NULL, quota_sync_store); GFS2_ATTR(quota_refresh_user, 0200, NULL, quota_refresh_user_store); GFS2_ATTR(quota_refresh_group, 0200, NULL, quota_refresh_group_store); GFS2_ATTR(demote_rq, 0200, NULL, demote_rq_store); static struct attribute *gfs2_attrs[] = { &gfs2_attr_id.attr, &gfs2_attr_fsname.attr, &gfs2_attr_uuid.attr, &gfs2_attr_freeze.attr, &gfs2_attr_withdraw.attr, &gfs2_attr_statfs_sync.attr, &gfs2_attr_quota_sync.attr, &gfs2_attr_quota_refresh_user.attr, &gfs2_attr_quota_refresh_group.attr, &gfs2_attr_demote_rq.attr, NULL, }; static struct kobj_type gfs2_ktype = { .default_attrs = gfs2_attrs, .sysfs_ops = &gfs2_attr_ops, }; /* * lock_module. Originally from lock_dlm */ static ssize_t proto_name_show(struct gfs2_sbd *sdp, char *buf) { const struct lm_lockops *ops = sdp->sd_lockstruct.ls_ops; return sprintf(buf, "%s\n", ops->lm_proto_name); } static ssize_t block_show(struct gfs2_sbd *sdp, char *buf) { struct lm_lockstruct *ls = &sdp->sd_lockstruct; ssize_t ret; int val = 0; if (test_bit(DFL_BLOCK_LOCKS, &ls->ls_flags)) val = 1; ret = sprintf(buf, "%d\n", val); return ret; } static ssize_t block_store(struct gfs2_sbd *sdp, const char *buf, size_t len) { struct lm_lockstruct *ls = &sdp->sd_lockstruct; ssize_t ret = len; int val; val = simple_strtol(buf, NULL, 0); if (val == 1) set_bit(DFL_BLOCK_LOCKS, &ls->ls_flags); else if (val == 0) { clear_bit(DFL_BLOCK_LOCKS, &ls->ls_flags); smp_mb__after_clear_bit(); gfs2_glock_thaw(sdp); } else { ret = -EINVAL; } return ret; } static ssize_t lkfirst_show(struct gfs2_sbd *sdp, char *buf) { struct lm_lockstruct *ls = &sdp->sd_lockstruct; return sprintf(buf, "%d\n", ls->ls_first); } static ssize_t lkfirst_store(struct gfs2_sbd *sdp, const char *buf, size_t len) { unsigned first; int rv; rv = sscanf(buf, "%u", &first); if (rv != 1 || first > 1) return -EINVAL; rv = wait_for_completion_killable(&sdp->sd_locking_init); if (rv) return rv; spin_lock(&sdp->sd_jindex_spin); rv = -EBUSY; if (test_bit(SDF_NOJOURNALID, &sdp->sd_flags) == 0) goto out; rv = -EINVAL; if (sdp->sd_args.ar_spectator) goto out; if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL) goto out; sdp->sd_lockstruct.ls_first = first; rv = 0; out: spin_unlock(&sdp->sd_jindex_spin); return rv ? rv : len; } static ssize_t first_done_show(struct gfs2_sbd *sdp, char *buf) { struct lm_lockstruct *ls = &sdp->sd_lockstruct; return sprintf(buf, "%d\n", ls->ls_first_done); } static ssize_t recover_store(struct gfs2_sbd *sdp, const char *buf, size_t len) { unsigned jid; struct gfs2_jdesc *jd; int rv; rv = sscanf(buf, "%u", &jid); if (rv != 1) return -EINVAL; rv = -ESHUTDOWN; spin_lock(&sdp->sd_jindex_spin); if (test_bit(SDF_NORECOVERY, &sdp->sd_flags)) goto out; rv = -EBUSY; if (sdp->sd_jdesc->jd_jid == jid) goto out; rv = -ENOENT; list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) { if (jd->jd_jid != jid) continue; rv = gfs2_recover_journal(jd, false); break; } out: spin_unlock(&sdp->sd_jindex_spin); return rv ? rv : len; } static ssize_t recover_done_show(struct gfs2_sbd *sdp, char *buf) { struct lm_lockstruct *ls = &sdp->sd_lockstruct; return sprintf(buf, "%d\n", ls->ls_recover_jid_done); } static ssize_t recover_status_show(struct gfs2_sbd *sdp, char *buf) { struct lm_lockstruct *ls = &sdp->sd_lockstruct; return sprintf(buf, "%d\n", ls->ls_recover_jid_status); } static ssize_t jid_show(struct gfs2_sbd *sdp, char *buf) { return sprintf(buf, "%d\n", sdp->sd_lockstruct.ls_jid); } static ssize_t jid_store(struct gfs2_sbd *sdp, const char *buf, size_t len) { int jid; int rv; rv = sscanf(buf, "%d", &jid); if (rv != 1) return -EINVAL; rv = wait_for_completion_killable(&sdp->sd_locking_init); if (rv) return rv; spin_lock(&sdp->sd_jindex_spin); rv = -EINVAL; if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL) goto out; rv = -EBUSY; if (test_bit(SDF_NOJOURNALID, &sdp->sd_flags) == 0) goto out; rv = 0; if (sdp->sd_args.ar_spectator && jid > 0) rv = jid = -EINVAL; sdp->sd_lockstruct.ls_jid = jid; clear_bit(SDF_NOJOURNALID, &sdp->sd_flags); smp_mb__after_clear_bit(); wake_up_bit(&sdp->sd_flags, SDF_NOJOURNALID); out: spin_unlock(&sdp->sd_jindex_spin); return rv ? rv : len; } #define GDLM_ATTR(_name,_mode,_show,_store) \ static struct gfs2_attr gdlm_attr_##_name = __ATTR(_name,_mode,_show,_store) GDLM_ATTR(proto_name, 0444, proto_name_show, NULL); GDLM_ATTR(block, 0644, block_show, block_store); GDLM_ATTR(withdraw, 0644, withdraw_show, withdraw_store); GDLM_ATTR(jid, 0644, jid_show, jid_store); GDLM_ATTR(first, 0644, lkfirst_show, lkfirst_store); GDLM_ATTR(first_done, 0444, first_done_show, NULL); GDLM_ATTR(recover, 0600, NULL, recover_store); GDLM_ATTR(recover_done, 0444, recover_done_show, NULL); GDLM_ATTR(recover_status, 0444, recover_status_show, NULL); static struct attribute *lock_module_attrs[] = { &gdlm_attr_proto_name.attr, &gdlm_attr_block.attr, &gdlm_attr_withdraw.attr, &gdlm_attr_jid.attr, &gdlm_attr_first.attr, &gdlm_attr_first_done.attr, &gdlm_attr_recover.attr, &gdlm_attr_recover_done.attr, &gdlm_attr_recover_status.attr, NULL, }; /* * get and set struct gfs2_tune fields */ static ssize_t quota_scale_show(struct gfs2_sbd *sdp, char *buf) { return snprintf(buf, PAGE_SIZE, "%u %u\n", sdp->sd_tune.gt_quota_scale_num, sdp->sd_tune.gt_quota_scale_den); } static ssize_t quota_scale_store(struct gfs2_sbd *sdp, const char *buf, size_t len) { struct gfs2_tune *gt = &sdp->sd_tune; unsigned int x, y; if (!capable(CAP_SYS_ADMIN)) return -EACCES; if (sscanf(buf, "%u %u", &x, &y) != 2 || !y) return -EINVAL; spin_lock(&gt->gt_spin); gt->gt_quota_scale_num = x; gt->gt_quota_scale_den = y; spin_unlock(&gt->gt_spin); return len; } static ssize_t tune_set(struct gfs2_sbd *sdp, unsigned int *field, int check_zero, const char *buf, size_t len) { struct gfs2_tune *gt = &sdp->sd_tune; unsigned int x; if (!capable(CAP_SYS_ADMIN)) return -EACCES; x = simple_strtoul(buf, NULL, 0); if (check_zero && !x) return -EINVAL; spin_lock(&gt->gt_spin); *field = x; spin_unlock(&gt->gt_spin); return len; } #define TUNE_ATTR_3(name, show, store) \ static struct gfs2_attr tune_attr_##name = __ATTR(name, 0644, show, store) #define TUNE_ATTR_2(name, store) \ static ssize_t name##_show(struct gfs2_sbd *sdp, char *buf) \ { \ return snprintf(buf, PAGE_SIZE, "%u\n", sdp->sd_tune.gt_##name); \ } \ TUNE_ATTR_3(name, name##_show, store) #define TUNE_ATTR(name, check_zero) \ static ssize_t name##_store(struct gfs2_sbd *sdp, const char *buf, size_t len)\ { \ return tune_set(sdp, &sdp->sd_tune.gt_##name, check_zero, buf, len); \ } \ TUNE_ATTR_2(name, name##_store) TUNE_ATTR(quota_warn_period, 0); TUNE_ATTR(quota_quantum, 0); TUNE_ATTR(max_readahead, 0); TUNE_ATTR(complain_secs, 0); TUNE_ATTR(statfs_slow, 0); TUNE_ATTR(new_files_jdata, 0); TUNE_ATTR(quota_simul_sync, 1); TUNE_ATTR(statfs_quantum, 1); TUNE_ATTR_3(quota_scale, quota_scale_show, quota_scale_store); static struct attribute *tune_attrs[] = { &tune_attr_quota_warn_period.attr, &tune_attr_quota_quantum.attr, &tune_attr_max_readahead.attr, &tune_attr_complain_secs.attr, &tune_attr_statfs_slow.attr, &tune_attr_quota_simul_sync.attr, &tune_attr_statfs_quantum.attr, &tune_attr_quota_scale.attr, &tune_attr_new_files_jdata.attr, NULL, }; static struct attribute_group tune_group = { .name = "tune", .attrs = tune_attrs, }; static struct attribute_group lock_module_group = { .name = "lock_module", .attrs = lock_module_attrs, }; int gfs2_sys_fs_add(struct gfs2_sbd *sdp) { struct super_block *sb = sdp->sd_vfs; int error; char ro[20]; char spectator[20]; char *envp[] = { ro, spectator, NULL }; sprintf(ro, "RDONLY=%d", (sb->s_flags & MS_RDONLY) ? 1 : 0); sprintf(spectator, "SPECTATOR=%d", sdp->sd_args.ar_spectator ? 1 : 0); sdp->sd_kobj.kset = gfs2_kset; error = kobject_init_and_add(&sdp->sd_kobj, &gfs2_ktype, NULL, "%s", sdp->sd_table_name); if (error) goto fail; error = sysfs_create_group(&sdp->sd_kobj, &tune_group); if (error) goto fail_reg; error = sysfs_create_group(&sdp->sd_kobj, &lock_module_group); if (error) goto fail_tune; error = sysfs_create_link(&sdp->sd_kobj, &disk_to_dev(sb->s_bdev->bd_disk)->kobj, "device"); if (error) goto fail_lock_module; kobject_uevent_env(&sdp->sd_kobj, KOBJ_ADD, envp); return 0; fail_lock_module: sysfs_remove_group(&sdp->sd_kobj, &lock_module_group); fail_tune: sysfs_remove_group(&sdp->sd_kobj, &tune_group); fail_reg: kobject_put(&sdp->sd_kobj); fail: fs_err(sdp, "error %d adding sysfs files", error); return error; } void gfs2_sys_fs_del(struct gfs2_sbd *sdp) { sysfs_remove_link(&sdp->sd_kobj, "device"); sysfs_remove_group(&sdp->sd_kobj, &tune_group); sysfs_remove_group(&sdp->sd_kobj, &lock_module_group); kobject_put(&sdp->sd_kobj); } static int gfs2_uevent(struct kset *kset, struct kobject *kobj, struct kobj_uevent_env *env) { struct gfs2_sbd *sdp = container_of(kobj, struct gfs2_sbd, sd_kobj); struct super_block *s = sdp->sd_vfs; const u8 *uuid = s->s_uuid; add_uevent_var(env, "LOCKTABLE=%s", sdp->sd_table_name); add_uevent_var(env, "LOCKPROTO=%s", sdp->sd_proto_name); if (!test_bit(SDF_NOJOURNALID, &sdp->sd_flags)) add_uevent_var(env, "JOURNALID=%d", sdp->sd_lockstruct.ls_jid); if (gfs2_uuid_valid(uuid)) add_uevent_var(env, "UUID=%pUB", uuid); return 0; } static const struct kset_uevent_ops gfs2_uevent_ops = { .uevent = gfs2_uevent, }; int gfs2_sys_init(void) { gfs2_kset = kset_create_and_add("gfs2", &gfs2_uevent_ops, fs_kobj); if (!gfs2_kset) return -ENOMEM; return 0; } void gfs2_sys_uninit(void) { kset_unregister(gfs2_kset); }
gpl-2.0
hiikezoe/android_kernel_nec_n06e
arch/arm/mach-at91/board-cpuat91.c
4731
4826
/* * linux/arch/arm/mach-at91/board-cpuat91.c * * Copyright (C) 2009 Eric Benard - eric@eukrea.com * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/types.h> #include <linux/gpio.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/mtd/physmap.h> #include <linux/mtd/plat-ram.h> #include <mach/hardware.h> #include <asm/setup.h> #include <asm/mach-types.h> #include <asm/irq.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include <mach/board.h> #include <mach/at91rm9200_mc.h> #include <mach/at91_ramc.h> #include <mach/cpu.h> #include "generic.h" static struct gpio_led cpuat91_leds[] = { { .name = "led1", .default_trigger = "heartbeat", .active_low = 1, .gpio = AT91_PIN_PC0, }, }; static void __init cpuat91_init_early(void) { /* Set cpu type: PQFP */ at91rm9200_set_type(ARCH_REVISON_9200_PQFP); /* Initialize processor: 18.432 MHz crystal */ at91_initialize(18432000); /* DBGU on ttyS0. (Rx & Tx only) */ at91_register_uart(0, 0, 0); /* USART0 on ttyS1. (Rx, Tx, CTS, RTS) */ at91_register_uart(AT91RM9200_ID_US0, 1, ATMEL_UART_CTS | ATMEL_UART_RTS); /* USART1 on ttyS2. (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */ at91_register_uart(AT91RM9200_ID_US1, 2, ATMEL_UART_CTS | ATMEL_UART_RTS | ATMEL_UART_DTR | ATMEL_UART_DSR | ATMEL_UART_DCD | ATMEL_UART_RI); /* USART2 on ttyS3 (Rx, Tx) */ at91_register_uart(AT91RM9200_ID_US2, 3, 0); /* USART3 on ttyS4 (Rx, Tx, CTS, RTS) */ at91_register_uart(AT91RM9200_ID_US3, 4, ATMEL_UART_CTS | ATMEL_UART_RTS); /* set serial console to ttyS0 (ie, DBGU) */ at91_set_serial_console(0); } static struct macb_platform_data __initdata cpuat91_eth_data = { .phy_irq_pin = -EINVAL, .is_rmii = 1, }; static struct at91_usbh_data __initdata cpuat91_usbh_data = { .ports = 1, .vbus_pin = {-EINVAL, -EINVAL}, .overcurrent_pin= {-EINVAL, -EINVAL}, }; static struct at91_udc_data __initdata cpuat91_udc_data = { .vbus_pin = AT91_PIN_PC15, .pullup_pin = AT91_PIN_PC14, }; static struct at91_mmc_data __initdata cpuat91_mmc_data = { .det_pin = AT91_PIN_PC2, .wire4 = 1, .wp_pin = -EINVAL, .vcc_pin = -EINVAL, }; static struct physmap_flash_data cpuat91_flash_data = { .width = 2, }; static struct resource cpuat91_flash_resource = { .start = AT91_CHIPSELECT_0, .end = AT91_CHIPSELECT_0 + SZ_16M - 1, .flags = IORESOURCE_MEM, }; static struct platform_device cpuat91_norflash = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &cpuat91_flash_data, }, .resource = &cpuat91_flash_resource, .num_resources = 1, }; #ifdef CONFIG_MTD_PLATRAM struct platdata_mtd_ram at91_sram_pdata = { .mapname = "SRAM", .bankwidth = 2, }; static struct resource at91_sram_resource[] = { [0] = { .start = AT91RM9200_SRAM_BASE, .end = AT91RM9200_SRAM_BASE + AT91RM9200_SRAM_SIZE - 1, .flags = IORESOURCE_MEM, }, }; static struct platform_device at91_sram = { .name = "mtd-ram", .id = 0, .resource = at91_sram_resource, .num_resources = ARRAY_SIZE(at91_sram_resource), .dev = { .platform_data = &at91_sram_pdata, }, }; #endif /* MTD_PLATRAM */ static struct platform_device *platform_devices[] __initdata = { &cpuat91_norflash, #ifdef CONFIG_MTD_PLATRAM &at91_sram, #endif /* CONFIG_MTD_PLATRAM */ }; static void __init cpuat91_board_init(void) { /* Serial */ at91_add_device_serial(); /* LEDs. */ at91_gpio_leds(cpuat91_leds, ARRAY_SIZE(cpuat91_leds)); /* Ethernet */ at91_add_device_eth(&cpuat91_eth_data); /* USB Host */ at91_add_device_usbh(&cpuat91_usbh_data); /* USB Device */ at91_add_device_udc(&cpuat91_udc_data); /* MMC */ at91_add_device_mmc(0, &cpuat91_mmc_data); /* I2C */ at91_add_device_i2c(NULL, 0); /* Platform devices */ platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices)); } MACHINE_START(CPUAT91, "Eukrea") /* Maintainer: Eric Benard - EUKREA Electromatique */ .timer = &at91rm9200_timer, .map_io = at91_map_io, .init_early = cpuat91_init_early, .init_irq = at91_init_irq_default, .init_machine = cpuat91_board_init, MACHINE_END
gpl-2.0
SerenityS/android_kernel_lge_msm8974
sound/soc/tegra/tegra_i2s.c
4731
11309
/* * tegra_i2s.c - Tegra I2S driver * * Author: Stephen Warren <swarren@nvidia.com> * Copyright (C) 2010 - NVIDIA, Inc. * * Based on code copyright/by: * * Copyright (c) 2009-2010, NVIDIA Corporation. * Scott Peterson <speterson@nvidia.com> * * Copyright (C) 2010 Google, Inc. * Iliyan Malchev <malchev@google.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * */ #include <linux/clk.h> #include <linux/module.h> #include <linux/debugfs.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/of.h> #include <mach/iomap.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include "tegra_i2s.h" #define DRV_NAME "tegra-i2s" static inline void tegra_i2s_write(struct tegra_i2s *i2s, u32 reg, u32 val) { __raw_writel(val, i2s->regs + reg); } static inline u32 tegra_i2s_read(struct tegra_i2s *i2s, u32 reg) { return __raw_readl(i2s->regs + reg); } #ifdef CONFIG_DEBUG_FS static int tegra_i2s_show(struct seq_file *s, void *unused) { #define REG(r) { r, #r } static const struct { int offset; const char *name; } regs[] = { REG(TEGRA_I2S_CTRL), REG(TEGRA_I2S_STATUS), REG(TEGRA_I2S_TIMING), REG(TEGRA_I2S_FIFO_SCR), REG(TEGRA_I2S_PCM_CTRL), REG(TEGRA_I2S_NW_CTRL), REG(TEGRA_I2S_TDM_CTRL), REG(TEGRA_I2S_TDM_TX_RX_CTRL), }; #undef REG struct tegra_i2s *i2s = s->private; int i; clk_enable(i2s->clk_i2s); for (i = 0; i < ARRAY_SIZE(regs); i++) { u32 val = tegra_i2s_read(i2s, regs[i].offset); seq_printf(s, "%s = %08x\n", regs[i].name, val); } clk_disable(i2s->clk_i2s); return 0; } static int tegra_i2s_debug_open(struct inode *inode, struct file *file) { return single_open(file, tegra_i2s_show, inode->i_private); } static const struct file_operations tegra_i2s_debug_fops = { .open = tegra_i2s_debug_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static void tegra_i2s_debug_add(struct tegra_i2s *i2s) { i2s->debug = debugfs_create_file(i2s->dai.name, S_IRUGO, snd_soc_debugfs_root, i2s, &tegra_i2s_debug_fops); } static void tegra_i2s_debug_remove(struct tegra_i2s *i2s) { if (i2s->debug) debugfs_remove(i2s->debug); } #else static inline void tegra_i2s_debug_add(struct tegra_i2s *i2s) { } static inline void tegra_i2s_debug_remove(struct tegra_i2s *i2s) { } #endif static int tegra_i2s_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) { struct tegra_i2s *i2s = snd_soc_dai_get_drvdata(dai); switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: break; default: return -EINVAL; } i2s->reg_ctrl &= ~TEGRA_I2S_CTRL_MASTER_ENABLE; switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBS_CFS: i2s->reg_ctrl |= TEGRA_I2S_CTRL_MASTER_ENABLE; break; case SND_SOC_DAIFMT_CBM_CFM: break; default: return -EINVAL; } i2s->reg_ctrl &= ~(TEGRA_I2S_CTRL_BIT_FORMAT_MASK | TEGRA_I2S_CTRL_LRCK_MASK); switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_DSP_A: i2s->reg_ctrl |= TEGRA_I2S_CTRL_BIT_FORMAT_DSP; i2s->reg_ctrl |= TEGRA_I2S_CTRL_LRCK_L_LOW; break; case SND_SOC_DAIFMT_DSP_B: i2s->reg_ctrl |= TEGRA_I2S_CTRL_BIT_FORMAT_DSP; i2s->reg_ctrl |= TEGRA_I2S_CTRL_LRCK_R_LOW; break; case SND_SOC_DAIFMT_I2S: i2s->reg_ctrl |= TEGRA_I2S_CTRL_BIT_FORMAT_I2S; i2s->reg_ctrl |= TEGRA_I2S_CTRL_LRCK_L_LOW; break; case SND_SOC_DAIFMT_RIGHT_J: i2s->reg_ctrl |= TEGRA_I2S_CTRL_BIT_FORMAT_RJM; i2s->reg_ctrl |= TEGRA_I2S_CTRL_LRCK_L_LOW; break; case SND_SOC_DAIFMT_LEFT_J: i2s->reg_ctrl |= TEGRA_I2S_CTRL_BIT_FORMAT_LJM; i2s->reg_ctrl |= TEGRA_I2S_CTRL_LRCK_L_LOW; break; default: return -EINVAL; } return 0; } static int tegra_i2s_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct device *dev = substream->pcm->card->dev; struct tegra_i2s *i2s = snd_soc_dai_get_drvdata(dai); u32 reg; int ret, sample_size, srate, i2sclock, bitcnt; i2s->reg_ctrl &= ~TEGRA_I2S_CTRL_BIT_SIZE_MASK; switch (params_format(params)) { case SNDRV_PCM_FORMAT_S16_LE: i2s->reg_ctrl |= TEGRA_I2S_CTRL_BIT_SIZE_16; sample_size = 16; break; case SNDRV_PCM_FORMAT_S24_LE: i2s->reg_ctrl |= TEGRA_I2S_CTRL_BIT_SIZE_24; sample_size = 24; break; case SNDRV_PCM_FORMAT_S32_LE: i2s->reg_ctrl |= TEGRA_I2S_CTRL_BIT_SIZE_32; sample_size = 32; break; default: return -EINVAL; } srate = params_rate(params); /* Final "* 2" required by Tegra hardware */ i2sclock = srate * params_channels(params) * sample_size * 2; ret = clk_set_rate(i2s->clk_i2s, i2sclock); if (ret) { dev_err(dev, "Can't set I2S clock rate: %d\n", ret); return ret; } bitcnt = (i2sclock / (2 * srate)) - 1; if (bitcnt < 0 || bitcnt > TEGRA_I2S_TIMING_CHANNEL_BIT_COUNT_MASK_US) return -EINVAL; reg = bitcnt << TEGRA_I2S_TIMING_CHANNEL_BIT_COUNT_SHIFT; if (i2sclock % (2 * srate)) reg |= TEGRA_I2S_TIMING_NON_SYM_ENABLE; if (!i2s->clk_refs) clk_enable(i2s->clk_i2s); tegra_i2s_write(i2s, TEGRA_I2S_TIMING, reg); tegra_i2s_write(i2s, TEGRA_I2S_FIFO_SCR, TEGRA_I2S_FIFO_SCR_FIFO2_ATN_LVL_FOUR_SLOTS | TEGRA_I2S_FIFO_SCR_FIFO1_ATN_LVL_FOUR_SLOTS); if (!i2s->clk_refs) clk_disable(i2s->clk_i2s); return 0; } static void tegra_i2s_start_playback(struct tegra_i2s *i2s) { i2s->reg_ctrl |= TEGRA_I2S_CTRL_FIFO1_ENABLE; tegra_i2s_write(i2s, TEGRA_I2S_CTRL, i2s->reg_ctrl); } static void tegra_i2s_stop_playback(struct tegra_i2s *i2s) { i2s->reg_ctrl &= ~TEGRA_I2S_CTRL_FIFO1_ENABLE; tegra_i2s_write(i2s, TEGRA_I2S_CTRL, i2s->reg_ctrl); } static void tegra_i2s_start_capture(struct tegra_i2s *i2s) { i2s->reg_ctrl |= TEGRA_I2S_CTRL_FIFO2_ENABLE; tegra_i2s_write(i2s, TEGRA_I2S_CTRL, i2s->reg_ctrl); } static void tegra_i2s_stop_capture(struct tegra_i2s *i2s) { i2s->reg_ctrl &= ~TEGRA_I2S_CTRL_FIFO2_ENABLE; tegra_i2s_write(i2s, TEGRA_I2S_CTRL, i2s->reg_ctrl); } static int tegra_i2s_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) { struct tegra_i2s *i2s = snd_soc_dai_get_drvdata(dai); switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: case SNDRV_PCM_TRIGGER_RESUME: if (!i2s->clk_refs) clk_enable(i2s->clk_i2s); i2s->clk_refs++; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) tegra_i2s_start_playback(i2s); else tegra_i2s_start_capture(i2s); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: case SNDRV_PCM_TRIGGER_SUSPEND: if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) tegra_i2s_stop_playback(i2s); else tegra_i2s_stop_capture(i2s); i2s->clk_refs--; if (!i2s->clk_refs) clk_disable(i2s->clk_i2s); break; default: return -EINVAL; } return 0; } static int tegra_i2s_probe(struct snd_soc_dai *dai) { struct tegra_i2s * i2s = snd_soc_dai_get_drvdata(dai); dai->capture_dma_data = &i2s->capture_dma_data; dai->playback_dma_data = &i2s->playback_dma_data; return 0; } static const struct snd_soc_dai_ops tegra_i2s_dai_ops = { .set_fmt = tegra_i2s_set_fmt, .hw_params = tegra_i2s_hw_params, .trigger = tegra_i2s_trigger, }; static const struct snd_soc_dai_driver tegra_i2s_dai_template = { .probe = tegra_i2s_probe, .playback = { .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_96000, .formats = SNDRV_PCM_FMTBIT_S16_LE, }, .capture = { .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_96000, .formats = SNDRV_PCM_FMTBIT_S16_LE, }, .ops = &tegra_i2s_dai_ops, .symmetric_rates = 1, }; static __devinit int tegra_i2s_platform_probe(struct platform_device *pdev) { struct tegra_i2s * i2s; struct resource *mem, *memregion, *dmareq; u32 of_dma[2]; u32 dma_ch; int ret; i2s = devm_kzalloc(&pdev->dev, sizeof(struct tegra_i2s), GFP_KERNEL); if (!i2s) { dev_err(&pdev->dev, "Can't allocate tegra_i2s\n"); ret = -ENOMEM; goto err; } dev_set_drvdata(&pdev->dev, i2s); i2s->dai = tegra_i2s_dai_template; i2s->dai.name = dev_name(&pdev->dev); i2s->clk_i2s = clk_get(&pdev->dev, NULL); if (IS_ERR(i2s->clk_i2s)) { dev_err(&pdev->dev, "Can't retrieve i2s clock\n"); ret = PTR_ERR(i2s->clk_i2s); goto err; } mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem) { dev_err(&pdev->dev, "No memory resource\n"); ret = -ENODEV; goto err_clk_put; } dmareq = platform_get_resource(pdev, IORESOURCE_DMA, 0); if (!dmareq) { if (of_property_read_u32_array(pdev->dev.of_node, "nvidia,dma-request-selector", of_dma, 2) < 0) { dev_err(&pdev->dev, "No DMA resource\n"); ret = -ENODEV; goto err_clk_put; } dma_ch = of_dma[1]; } else { dma_ch = dmareq->start; } memregion = devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem), DRV_NAME); if (!memregion) { dev_err(&pdev->dev, "Memory region already claimed\n"); ret = -EBUSY; goto err_clk_put; } i2s->regs = devm_ioremap(&pdev->dev, mem->start, resource_size(mem)); if (!i2s->regs) { dev_err(&pdev->dev, "ioremap failed\n"); ret = -ENOMEM; goto err_clk_put; } i2s->capture_dma_data.addr = mem->start + TEGRA_I2S_FIFO2; i2s->capture_dma_data.wrap = 4; i2s->capture_dma_data.width = 32; i2s->capture_dma_data.req_sel = dma_ch; i2s->playback_dma_data.addr = mem->start + TEGRA_I2S_FIFO1; i2s->playback_dma_data.wrap = 4; i2s->playback_dma_data.width = 32; i2s->playback_dma_data.req_sel = dma_ch; i2s->reg_ctrl = TEGRA_I2S_CTRL_FIFO_FORMAT_PACKED; ret = snd_soc_register_dai(&pdev->dev, &i2s->dai); if (ret) { dev_err(&pdev->dev, "Could not register DAI: %d\n", ret); ret = -ENOMEM; goto err_clk_put; } tegra_i2s_debug_add(i2s); return 0; err_clk_put: clk_put(i2s->clk_i2s); err: return ret; } static int __devexit tegra_i2s_platform_remove(struct platform_device *pdev) { struct tegra_i2s *i2s = dev_get_drvdata(&pdev->dev); snd_soc_unregister_dai(&pdev->dev); tegra_i2s_debug_remove(i2s); clk_put(i2s->clk_i2s); return 0; } static const struct of_device_id tegra_i2s_of_match[] __devinitconst = { { .compatible = "nvidia,tegra20-i2s", }, {}, }; static struct platform_driver tegra_i2s_driver = { .driver = { .name = DRV_NAME, .owner = THIS_MODULE, .of_match_table = tegra_i2s_of_match, }, .probe = tegra_i2s_platform_probe, .remove = __devexit_p(tegra_i2s_platform_remove), }; module_platform_driver(tegra_i2s_driver); MODULE_AUTHOR("Stephen Warren <swarren@nvidia.com>"); MODULE_DESCRIPTION("Tegra I2S ASoC driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:" DRV_NAME); MODULE_DEVICE_TABLE(of, tegra_i2s_of_match);
gpl-2.0
omnirom/android_kernel_oppo_find5
arch/arm/mach-pxa/mioa701.c
4731
19985
/* * Handles the Mitac Mio A701 Board * * Copyright (C) 2008 Robert Jarzmik * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/syscore_ops.h> #include <linux/input.h> #include <linux/delay.h> #include <linux/gpio_keys.h> #include <linux/pwm_backlight.h> #include <linux/rtc.h> #include <linux/leds.h> #include <linux/gpio.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/pda_power.h> #include <linux/power_supply.h> #include <linux/wm97xx.h> #include <linux/mtd/physmap.h> #include <linux/usb/gpio_vbus.h> #include <linux/regulator/max1586.h> #include <linux/slab.h> #include <linux/i2c/pxa-i2c.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <mach/pxa27x.h> #include <mach/regs-rtc.h> #include <plat/pxa27x_keypad.h> #include <mach/pxafb.h> #include <mach/mmc.h> #include <mach/udc.h> #include <mach/pxa27x-udc.h> #include <mach/camera.h> #include <mach/audio.h> #include <mach/smemc.h> #include <media/soc_camera.h> #include <mach/mioa701.h> #include "generic.h" #include "devices.h" static unsigned long mioa701_pin_config[] = { /* Mio global */ MIO_CFG_OUT(GPIO9_CHARGE_EN, AF0, DRIVE_LOW), MIO_CFG_OUT(GPIO18_POWEROFF, AF0, DRIVE_LOW), MFP_CFG_OUT(GPIO3, AF0, DRIVE_HIGH), MFP_CFG_OUT(GPIO4, AF0, DRIVE_HIGH), MIO_CFG_IN(GPIO80_MAYBE_CHARGE_VDROP, AF0), /* Backlight PWM 0 */ GPIO16_PWM0_OUT, /* MMC */ GPIO32_MMC_CLK, GPIO92_MMC_DAT_0, GPIO109_MMC_DAT_1, GPIO110_MMC_DAT_2, GPIO111_MMC_DAT_3, GPIO112_MMC_CMD, MIO_CFG_IN(GPIO78_SDIO_RO, AF0), MIO_CFG_IN(GPIO15_SDIO_INSERT, AF0), MIO_CFG_OUT(GPIO91_SDIO_EN, AF0, DRIVE_LOW), /* USB */ MIO_CFG_IN(GPIO13_nUSB_DETECT, AF0), MIO_CFG_OUT(GPIO22_USB_ENABLE, AF0, DRIVE_LOW), /* LCD */ GPIOxx_LCD_TFT_16BPP, /* QCI */ GPIO12_CIF_DD_7, GPIO17_CIF_DD_6, GPIO50_CIF_DD_3, GPIO51_CIF_DD_2, GPIO52_CIF_DD_4, GPIO53_CIF_MCLK, GPIO54_CIF_PCLK, GPIO55_CIF_DD_1, GPIO81_CIF_DD_0, GPIO82_CIF_DD_5, GPIO84_CIF_FV, GPIO85_CIF_LV, /* Bluetooth */ MIO_CFG_IN(GPIO14_BT_nACTIVITY, AF0), GPIO44_BTUART_CTS, GPIO42_BTUART_RXD, GPIO45_BTUART_RTS, GPIO43_BTUART_TXD, MIO_CFG_OUT(GPIO83_BT_ON, AF0, DRIVE_LOW), MIO_CFG_OUT(GPIO77_BT_UNKNOWN1, AF0, DRIVE_HIGH), MIO_CFG_OUT(GPIO86_BT_MAYBE_nRESET, AF0, DRIVE_HIGH), /* GPS */ MIO_CFG_OUT(GPIO23_GPS_UNKNOWN1, AF0, DRIVE_LOW), MIO_CFG_OUT(GPIO26_GPS_ON, AF0, DRIVE_LOW), MIO_CFG_OUT(GPIO27_GPS_RESET, AF0, DRIVE_LOW), MIO_CFG_OUT(GPIO106_GPS_UNKNOWN2, AF0, DRIVE_LOW), MIO_CFG_OUT(GPIO107_GPS_UNKNOWN3, AF0, DRIVE_LOW), GPIO46_STUART_RXD, GPIO47_STUART_TXD, /* GSM */ MIO_CFG_OUT(GPIO24_GSM_MOD_RESET_CMD, AF0, DRIVE_LOW), MIO_CFG_OUT(GPIO88_GSM_nMOD_ON_CMD, AF0, DRIVE_HIGH), MIO_CFG_OUT(GPIO90_GSM_nMOD_OFF_CMD, AF0, DRIVE_HIGH), MIO_CFG_OUT(GPIO114_GSM_nMOD_DTE_UART_STATE, AF0, DRIVE_HIGH), MIO_CFG_IN(GPIO25_GSM_MOD_ON_STATE, AF0), MIO_CFG_IN(GPIO113_GSM_EVENT, AF0) | WAKEUP_ON_EDGE_BOTH, GPIO34_FFUART_RXD, GPIO35_FFUART_CTS, GPIO36_FFUART_DCD, GPIO37_FFUART_DSR, GPIO39_FFUART_TXD, GPIO40_FFUART_DTR, GPIO41_FFUART_RTS, /* Sound */ GPIO28_AC97_BITCLK, GPIO29_AC97_SDATA_IN_0, GPIO30_AC97_SDATA_OUT, GPIO31_AC97_SYNC, GPIO89_AC97_SYSCLK, MIO_CFG_IN(GPIO12_HPJACK_INSERT, AF0), /* Leds */ MIO_CFG_OUT(GPIO10_LED_nCharging, AF0, DRIVE_HIGH), MIO_CFG_OUT(GPIO97_LED_nBlue, AF0, DRIVE_HIGH), MIO_CFG_OUT(GPIO98_LED_nOrange, AF0, DRIVE_HIGH), MIO_CFG_OUT(GPIO82_LED_nVibra, AF0, DRIVE_HIGH), MIO_CFG_OUT(GPIO115_LED_nKeyboard, AF0, DRIVE_HIGH), /* Keyboard */ MIO_CFG_IN(GPIO0_KEY_POWER, AF0) | WAKEUP_ON_EDGE_BOTH, MIO_CFG_IN(GPIO93_KEY_VOLUME_UP, AF0), MIO_CFG_IN(GPIO94_KEY_VOLUME_DOWN, AF0), GPIO100_KP_MKIN_0, GPIO101_KP_MKIN_1, GPIO102_KP_MKIN_2, GPIO103_KP_MKOUT_0, GPIO104_KP_MKOUT_1, GPIO105_KP_MKOUT_2, /* I2C */ GPIO117_I2C_SCL, GPIO118_I2C_SDA, /* Unknown */ MFP_CFG_IN(GPIO20, AF0), MFP_CFG_IN(GPIO21, AF0), MFP_CFG_IN(GPIO33, AF0), MFP_CFG_OUT(GPIO49, AF0, DRIVE_HIGH), MFP_CFG_OUT(GPIO57, AF0, DRIVE_HIGH), MFP_CFG_IN(GPIO96, AF0), MFP_CFG_OUT(GPIO116, AF0, DRIVE_HIGH), }; /* LCD Screen and Backlight */ static struct platform_pwm_backlight_data mioa701_backlight_data = { .pwm_id = 0, .max_brightness = 100, .dft_brightness = 50, .pwm_period_ns = 4000 * 1024, /* Fl = 250kHz */ }; /* * LTM0305A776C LCD panel timings * * see: * - the LTM0305A776C datasheet, * - and the PXA27x Programmers' manual */ static struct pxafb_mode_info mioa701_ltm0305a776c = { .pixclock = 220000, /* CLK=4.545 MHz */ .xres = 240, .yres = 320, .bpp = 16, .hsync_len = 4, .vsync_len = 2, .left_margin = 6, .right_margin = 4, .upper_margin = 5, .lower_margin = 3, }; static void mioa701_lcd_power(int on, struct fb_var_screeninfo *si) { gpio_set_value(GPIO87_LCD_POWER, on); } static struct pxafb_mach_info mioa701_pxafb_info = { .modes = &mioa701_ltm0305a776c, .num_modes = 1, .lcd_conn = LCD_COLOR_TFT_16BPP | LCD_PCLK_EDGE_FALL, .pxafb_lcd_power = mioa701_lcd_power, }; /* * Keyboard configuration */ static unsigned int mioa701_matrix_keys[] = { KEY(0, 0, KEY_UP), KEY(0, 1, KEY_RIGHT), KEY(0, 2, KEY_MEDIA), KEY(1, 0, KEY_DOWN), KEY(1, 1, KEY_ENTER), KEY(1, 2, KEY_CONNECT), /* GPS key */ KEY(2, 0, KEY_LEFT), KEY(2, 1, KEY_PHONE), /* Phone Green key */ KEY(2, 2, KEY_CAMERA) /* Camera key */ }; static struct pxa27x_keypad_platform_data mioa701_keypad_info = { .matrix_key_rows = 3, .matrix_key_cols = 3, .matrix_key_map = mioa701_matrix_keys, .matrix_key_map_size = ARRAY_SIZE(mioa701_matrix_keys), }; /* * GPIO Key Configuration */ #define MIO_KEY(key, _gpio, _desc, _wakeup) \ { .code = (key), .gpio = (_gpio), .active_low = 0, \ .desc = (_desc), .type = EV_KEY, .wakeup = (_wakeup) } static struct gpio_keys_button mioa701_button_table[] = { MIO_KEY(KEY_EXIT, GPIO0_KEY_POWER, "Power button", 1), MIO_KEY(KEY_VOLUMEUP, GPIO93_KEY_VOLUME_UP, "Volume up", 0), MIO_KEY(KEY_VOLUMEDOWN, GPIO94_KEY_VOLUME_DOWN, "Volume down", 0), MIO_KEY(KEY_HP, GPIO12_HPJACK_INSERT, "HP jack detect", 0) }; static struct gpio_keys_platform_data mioa701_gpio_keys_data = { .buttons = mioa701_button_table, .nbuttons = ARRAY_SIZE(mioa701_button_table), }; /* * Leds and vibrator */ #define ONE_LED(_gpio, _name) \ { .gpio = (_gpio), .name = (_name), .active_low = true } static struct gpio_led gpio_leds[] = { ONE_LED(GPIO10_LED_nCharging, "mioa701:charging"), ONE_LED(GPIO97_LED_nBlue, "mioa701:blue"), ONE_LED(GPIO98_LED_nOrange, "mioa701:orange"), ONE_LED(GPIO82_LED_nVibra, "mioa701:vibra"), ONE_LED(GPIO115_LED_nKeyboard, "mioa701:keyboard") }; static struct gpio_led_platform_data gpio_led_info = { .leds = gpio_leds, .num_leds = ARRAY_SIZE(gpio_leds), }; /* * GSM Sagem XS200 chip * * GSM handling was purged from kernel. For history, this is the way to go : * - init : GPIO24_GSM_MOD_RESET_CMD = 0, GPIO114_GSM_nMOD_DTE_UART_STATE = 1 * GPIO88_GSM_nMOD_ON_CMD = 1, GPIO90_GSM_nMOD_OFF_CMD = 1 * - reset : GPIO24_GSM_MOD_RESET_CMD = 1, msleep(100), * GPIO24_GSM_MOD_RESET_CMD = 0 * - turn on : GPIO88_GSM_nMOD_ON_CMD = 0, msleep(1000), * GPIO88_GSM_nMOD_ON_CMD = 1 * - turn off : GPIO90_GSM_nMOD_OFF_CMD = 0, msleep(1000), * GPIO90_GSM_nMOD_OFF_CMD = 1 */ static int is_gsm_on(void) { int is_on; is_on = !!gpio_get_value(GPIO25_GSM_MOD_ON_STATE); return is_on; } irqreturn_t gsm_on_irq(int irq, void *p) { printk(KERN_DEBUG "Mioa701: GSM status changed to %s\n", is_gsm_on() ? "on" : "off"); return IRQ_HANDLED; } static struct gpio gsm_gpios[] = { { GPIO25_GSM_MOD_ON_STATE, GPIOF_IN, "GSM state" }, { GPIO113_GSM_EVENT, GPIOF_IN, "GSM event" }, }; static int __init gsm_init(void) { int rc; rc = gpio_request_array(ARRAY_AND_SIZE(gsm_gpios)); if (rc) goto err_gpio; rc = request_irq(gpio_to_irq(GPIO25_GSM_MOD_ON_STATE), gsm_on_irq, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, "GSM XS200 Power Irq", NULL); if (rc) goto err_irq; gpio_set_wake(GPIO113_GSM_EVENT, 1); return 0; err_irq: printk(KERN_ERR "Mioa701: Can't request GSM_ON irq\n"); gpio_free_array(ARRAY_AND_SIZE(gsm_gpios)); err_gpio: printk(KERN_ERR "Mioa701: gsm not available\n"); return rc; } static void gsm_exit(void) { free_irq(gpio_to_irq(GPIO25_GSM_MOD_ON_STATE), NULL); gpio_free_array(ARRAY_AND_SIZE(gsm_gpios)); } /* * Bluetooth BRF6150 chip * * BT handling was purged from kernel. For history, this is the way to go : * - turn on : GPIO83_BT_ON = 1 * - turn off : GPIO83_BT_ON = 0 */ /* * GPS Sirf Star III chip * * GPS handling was purged from kernel. For history, this is the way to go : * - init : GPIO23_GPS_UNKNOWN1 = 1, GPIO26_GPS_ON = 0, GPIO27_GPS_RESET = 0 * GPIO106_GPS_UNKNOWN2 = 0, GPIO107_GPS_UNKNOWN3 = 0 * - turn on : GPIO27_GPS_RESET = 1, GPIO26_GPS_ON = 1 * - turn off : GPIO26_GPS_ON = 0, GPIO27_GPS_RESET = 0 */ /* * USB UDC */ static int is_usb_connected(void) { return !gpio_get_value(GPIO13_nUSB_DETECT); } static struct pxa2xx_udc_mach_info mioa701_udc_info = { .udc_is_connected = is_usb_connected, .gpio_pullup = GPIO22_USB_ENABLE, }; struct gpio_vbus_mach_info gpio_vbus_data = { .gpio_vbus = GPIO13_nUSB_DETECT, .gpio_vbus_inverted = 1, .gpio_pullup = -1, }; /* * SDIO/MMC Card controller */ /** * The card detect interrupt isn't debounced so we delay it by 250ms * to give the card a chance to fully insert/eject. */ static struct pxamci_platform_data mioa701_mci_info = { .detect_delay_ms = 250, .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, .gpio_card_detect = GPIO15_SDIO_INSERT, .gpio_card_ro = GPIO78_SDIO_RO, .gpio_power = GPIO91_SDIO_EN, }; /* FlashRAM */ static struct resource docg3_resource = { .start = PXA_CS0_PHYS, .end = PXA_CS0_PHYS + SZ_8K - 1, .flags = IORESOURCE_MEM, }; static struct platform_device docg3 = { .name = "docg3", .id = -1, .resource = &docg3_resource, .num_resources = 1, .dev = { .platform_data = NULL, }, }; /* * Suspend/Resume bootstrap management * * MIO A701 reboot sequence is highly ROM dependent. From the one dissassembled, * this sequence is as follows : * - disables interrupts * - initialize SDRAM (self refresh RAM into active RAM) * - initialize GPIOs (depends on value at 0xa020b020) * - initialize coprossessors * - if edge detect on PWR_SCL(GPIO3), then proceed to cold start * - or if value at 0xa020b000 not equal to 0x0f0f0f0f, proceed to cold start * - else do a resume, ie. jump to addr 0xa0100000 */ #define RESUME_ENABLE_ADDR 0xa020b000 #define RESUME_ENABLE_VAL 0x0f0f0f0f #define RESUME_BT_ADDR 0xa020b020 #define RESUME_UNKNOWN_ADDR 0xa020b024 #define RESUME_VECTOR_ADDR 0xa0100000 #define BOOTSTRAP_WORDS mioa701_bootstrap_lg/4 static u32 *save_buffer; static void install_bootstrap(void) { int i; u32 *rom_bootstrap = phys_to_virt(RESUME_VECTOR_ADDR); u32 *src = &mioa701_bootstrap; for (i = 0; i < BOOTSTRAP_WORDS; i++) rom_bootstrap[i] = src[i]; } static int mioa701_sys_suspend(void) { int i = 0, is_bt_on; u32 *mem_resume_vector = phys_to_virt(RESUME_VECTOR_ADDR); u32 *mem_resume_enabler = phys_to_virt(RESUME_ENABLE_ADDR); u32 *mem_resume_bt = phys_to_virt(RESUME_BT_ADDR); u32 *mem_resume_unknown = phys_to_virt(RESUME_UNKNOWN_ADDR); /* Devices prepare suspend */ is_bt_on = !!gpio_get_value(GPIO83_BT_ON); pxa2xx_mfp_set_lpm(GPIO83_BT_ON, is_bt_on ? MFP_LPM_DRIVE_HIGH : MFP_LPM_DRIVE_LOW); for (i = 0; i < BOOTSTRAP_WORDS; i++) save_buffer[i] = mem_resume_vector[i]; save_buffer[i++] = *mem_resume_enabler; save_buffer[i++] = *mem_resume_bt; save_buffer[i++] = *mem_resume_unknown; *mem_resume_enabler = RESUME_ENABLE_VAL; *mem_resume_bt = is_bt_on; install_bootstrap(); return 0; } static void mioa701_sys_resume(void) { int i = 0; u32 *mem_resume_vector = phys_to_virt(RESUME_VECTOR_ADDR); u32 *mem_resume_enabler = phys_to_virt(RESUME_ENABLE_ADDR); u32 *mem_resume_bt = phys_to_virt(RESUME_BT_ADDR); u32 *mem_resume_unknown = phys_to_virt(RESUME_UNKNOWN_ADDR); for (i = 0; i < BOOTSTRAP_WORDS; i++) mem_resume_vector[i] = save_buffer[i]; *mem_resume_enabler = save_buffer[i++]; *mem_resume_bt = save_buffer[i++]; *mem_resume_unknown = save_buffer[i++]; } static struct syscore_ops mioa701_syscore_ops = { .suspend = mioa701_sys_suspend, .resume = mioa701_sys_resume, }; static int __init bootstrap_init(void) { int save_size = mioa701_bootstrap_lg + (sizeof(u32) * 3); register_syscore_ops(&mioa701_syscore_ops); save_buffer = kmalloc(save_size, GFP_KERNEL); if (!save_buffer) return -ENOMEM; printk(KERN_INFO "MioA701: allocated %d bytes for bootstrap\n", save_size); return 0; } static void bootstrap_exit(void) { kfree(save_buffer); unregister_syscore_ops(&mioa701_syscore_ops); printk(KERN_CRIT "Unregistering mioa701 suspend will hang next" "resume !!!\n"); } /* * Power Supply */ static char *supplicants[] = { "mioa701_battery" }; static int is_ac_connected(void) { return gpio_get_value(GPIO96_AC_DETECT); } static void mioa701_set_charge(int flags) { gpio_set_value(GPIO9_CHARGE_EN, (flags == PDA_POWER_CHARGE_USB)); } static struct pda_power_pdata power_pdata = { .is_ac_online = is_ac_connected, .is_usb_online = is_usb_connected, .set_charge = mioa701_set_charge, .supplied_to = supplicants, .num_supplicants = ARRAY_SIZE(supplicants), }; static struct resource power_resources[] = { [0] = { .name = "ac", .start = PXA_GPIO_TO_IRQ(GPIO96_AC_DETECT), .end = PXA_GPIO_TO_IRQ(GPIO96_AC_DETECT), .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_LOWEDGE, }, [1] = { .name = "usb", .start = PXA_GPIO_TO_IRQ(GPIO13_nUSB_DETECT), .end = PXA_GPIO_TO_IRQ(GPIO13_nUSB_DETECT), .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_LOWEDGE, }, }; static struct platform_device power_dev = { .name = "pda-power", .id = -1, .resource = power_resources, .num_resources = ARRAY_SIZE(power_resources), .dev = { .platform_data = &power_pdata, }, }; static struct wm97xx_batt_pdata mioa701_battery_data = { .batt_aux = WM97XX_AUX_ID1, .temp_aux = -1, .charge_gpio = -1, .min_voltage = 0xc00, .max_voltage = 0xfc0, .batt_tech = POWER_SUPPLY_TECHNOLOGY_LION, .batt_div = 1, .batt_mult = 1, .batt_name = "mioa701_battery", }; static struct wm97xx_pdata mioa701_wm97xx_pdata = { .batt_pdata = &mioa701_battery_data, }; /* * Voltage regulation */ static struct regulator_consumer_supply max1586_consumers[] = { { .supply = "vcc_core", } }; static struct regulator_init_data max1586_v3_info = { .constraints = { .name = "vcc_core range", .min_uV = 1000000, .max_uV = 1705000, .always_on = 1, .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE, }, .num_consumer_supplies = ARRAY_SIZE(max1586_consumers), .consumer_supplies = max1586_consumers, }; static struct max1586_subdev_data max1586_subdevs[] = { { .name = "vcc_core", .id = MAX1586_V3, .platform_data = &max1586_v3_info }, }; static struct max1586_platform_data max1586_info = { .subdevs = max1586_subdevs, .num_subdevs = ARRAY_SIZE(max1586_subdevs), .v3_gain = MAX1586_GAIN_NO_R24, /* 700..1475 mV */ }; /* * Camera interface */ struct pxacamera_platform_data mioa701_pxacamera_platform_data = { .flags = PXA_CAMERA_MASTER | PXA_CAMERA_DATAWIDTH_8 | PXA_CAMERA_PCLK_EN | PXA_CAMERA_MCLK_EN, .mclk_10khz = 5000, }; static struct i2c_board_info __initdata mioa701_pi2c_devices[] = { { I2C_BOARD_INFO("max1586", 0x14), .platform_data = &max1586_info, }, }; /* Board I2C devices. */ static struct i2c_board_info mioa701_i2c_devices[] = { { I2C_BOARD_INFO("mt9m111", 0x5d), }, }; static struct soc_camera_link iclink = { .bus_id = 0, /* Match id in pxa27x_device_camera in device.c */ .board_info = &mioa701_i2c_devices[0], .i2c_adapter_id = 0, }; struct i2c_pxa_platform_data i2c_pdata = { .fast_mode = 1, }; static pxa2xx_audio_ops_t mioa701_ac97_info = { .reset_gpio = 95, .codec_pdata = { &mioa701_wm97xx_pdata, }, }; /* * Mio global */ /* Devices */ #define MIO_PARENT_DEV(var, strname, tparent, pdata) \ static struct platform_device var = { \ .name = strname, \ .id = -1, \ .dev = { \ .platform_data = pdata, \ .parent = tparent, \ }, \ }; #define MIO_SIMPLE_DEV(var, strname, pdata) \ MIO_PARENT_DEV(var, strname, NULL, pdata) MIO_SIMPLE_DEV(mioa701_gpio_keys, "gpio-keys", &mioa701_gpio_keys_data) MIO_PARENT_DEV(mioa701_backlight, "pwm-backlight", &pxa27x_device_pwm0.dev, &mioa701_backlight_data); MIO_SIMPLE_DEV(mioa701_led, "leds-gpio", &gpio_led_info) MIO_SIMPLE_DEV(pxa2xx_pcm, "pxa2xx-pcm", NULL) MIO_SIMPLE_DEV(mioa701_sound, "mioa701-wm9713", NULL) MIO_SIMPLE_DEV(mioa701_board, "mioa701-board", NULL) MIO_SIMPLE_DEV(gpio_vbus, "gpio-vbus", &gpio_vbus_data); MIO_SIMPLE_DEV(mioa701_camera, "soc-camera-pdrv",&iclink); static struct platform_device *devices[] __initdata = { &mioa701_gpio_keys, &mioa701_backlight, &mioa701_led, &pxa2xx_pcm, &mioa701_sound, &power_dev, &docg3, &gpio_vbus, &mioa701_camera, &mioa701_board, }; static void mioa701_machine_exit(void); static void mioa701_poweroff(void) { mioa701_machine_exit(); pxa_restart('s', NULL); } static void mioa701_restart(char c, const char *cmd) { mioa701_machine_exit(); pxa_restart('s', cmd); } static struct gpio global_gpios[] = { { GPIO9_CHARGE_EN, GPIOF_OUT_INIT_HIGH, "Charger enable" }, { GPIO18_POWEROFF, GPIOF_OUT_INIT_LOW, "Power Off" }, { GPIO87_LCD_POWER, GPIOF_OUT_INIT_LOW, "LCD Power" }, }; static void __init mioa701_machine_init(void) { int rc; PSLR = 0xff100000; /* SYSDEL=125ms, PWRDEL=125ms, PSLR_SL_ROD=1 */ PCFR = PCFR_DC_EN | PCFR_GPR_EN | PCFR_OPDE; RTTR = 32768 - 1; /* Reset crazy WinCE value */ UP2OCR = UP2OCR_HXOE; /* * Set up the flash memory : DiskOnChip G3 on first static memory bank */ __raw_writel(0x7ff02dd8, MSC0); __raw_writel(0x0001c391, MCMEM0); __raw_writel(0x0001c391, MCATT0); __raw_writel(0x0001c391, MCIO0); pxa2xx_mfp_config(ARRAY_AND_SIZE(mioa701_pin_config)); pxa_set_ffuart_info(NULL); pxa_set_btuart_info(NULL); pxa_set_stuart_info(NULL); rc = gpio_request_array(ARRAY_AND_SIZE(global_gpios)); if (rc) pr_err("MioA701: Failed to request GPIOs: %d", rc); bootstrap_init(); pxa_set_fb_info(NULL, &mioa701_pxafb_info); pxa_set_mci_info(&mioa701_mci_info); pxa_set_keypad_info(&mioa701_keypad_info); pxa_set_udc_info(&mioa701_udc_info); pxa_set_ac97_info(&mioa701_ac97_info); pm_power_off = mioa701_poweroff; platform_add_devices(devices, ARRAY_SIZE(devices)); gsm_init(); i2c_register_board_info(1, ARRAY_AND_SIZE(mioa701_pi2c_devices)); pxa_set_i2c_info(&i2c_pdata); pxa27x_set_i2c_power_info(NULL); pxa_set_camera_info(&mioa701_pxacamera_platform_data); } static void mioa701_machine_exit(void) { bootstrap_exit(); gsm_exit(); } MACHINE_START(MIOA701, "MIO A701") .atag_offset = 0x100, .restart_mode = 's', .map_io = &pxa27x_map_io, .nr_irqs = PXA_NR_IRQS, .init_irq = &pxa27x_init_irq, .handle_irq = &pxa27x_handle_irq, .init_machine = mioa701_machine_init, .timer = &pxa_timer, .restart = mioa701_restart, MACHINE_END
gpl-2.0
engine95/navel-855
drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
4987
62846
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/slab.h> #include "qlge.h" /* Read a NIC register from the alternate function. */ static u32 ql_read_other_func_reg(struct ql_adapter *qdev, u32 reg) { u32 register_to_read; u32 reg_val; unsigned int status = 0; register_to_read = MPI_NIC_REG_BLOCK | MPI_NIC_READ | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT) | reg; status = ql_read_mpi_reg(qdev, register_to_read, &reg_val); if (status != 0) return 0xffffffff; return reg_val; } /* Write a NIC register from the alternate function. */ static int ql_write_other_func_reg(struct ql_adapter *qdev, u32 reg, u32 reg_val) { u32 register_to_read; int status = 0; register_to_read = MPI_NIC_REG_BLOCK | MPI_NIC_READ | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT) | reg; status = ql_write_mpi_reg(qdev, register_to_read, reg_val); return status; } static int ql_wait_other_func_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit) { u32 temp; int count = 10; while (count) { temp = ql_read_other_func_reg(qdev, reg); /* check for errors */ if (temp & err_bit) return -1; else if (temp & bit) return 0; mdelay(10); count--; } return -1; } static int ql_read_other_func_serdes_reg(struct ql_adapter *qdev, u32 reg, u32 *data) { int status; /* wait for reg to come ready */ status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4, XG_SERDES_ADDR_RDY, 0); if (status) goto exit; /* set up for reg read */ ql_write_other_func_reg(qdev, XG_SERDES_ADDR/4, reg | PROC_ADDR_R); /* wait for reg to come ready */ status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4, XG_SERDES_ADDR_RDY, 0); if (status) goto exit; /* get the data */ *data = ql_read_other_func_reg(qdev, (XG_SERDES_DATA / 4)); exit: return status; } /* Read out the SERDES registers */ static int ql_read_serdes_reg(struct ql_adapter *qdev, u32 reg, u32 * data) { int status; /* wait for reg to come ready */ status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0); if (status) goto exit; /* set up for reg read */ ql_write32(qdev, XG_SERDES_ADDR, reg | PROC_ADDR_R); /* wait for reg to come ready */ status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0); if (status) goto exit; /* get the data */ *data = ql_read32(qdev, XG_SERDES_DATA); exit: return status; } static void ql_get_both_serdes(struct ql_adapter *qdev, u32 addr, u32 *direct_ptr, u32 *indirect_ptr, unsigned int direct_valid, unsigned int indirect_valid) { unsigned int status; status = 1; if (direct_valid) status = ql_read_serdes_reg(qdev, addr, direct_ptr); /* Dead fill any failures or invalids. */ if (status) *direct_ptr = 0xDEADBEEF; status = 1; if (indirect_valid) status = ql_read_other_func_serdes_reg( qdev, addr, indirect_ptr); /* Dead fill any failures or invalids. */ if (status) *indirect_ptr = 0xDEADBEEF; } static int ql_get_serdes_regs(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump) { int status; unsigned int xfi_direct_valid, xfi_indirect_valid, xaui_direct_valid; unsigned int xaui_indirect_valid, i; u32 *direct_ptr, temp; u32 *indirect_ptr; xfi_direct_valid = xfi_indirect_valid = 0; xaui_direct_valid = xaui_indirect_valid = 1; /* The XAUI needs to be read out per port */ if (qdev->func & 1) { /* We are NIC 2 */ status = ql_read_other_func_serdes_reg(qdev, XG_SERDES_XAUI_HSS_PCS_START, &temp); if (status) temp = XG_SERDES_ADDR_XAUI_PWR_DOWN; if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) == XG_SERDES_ADDR_XAUI_PWR_DOWN) xaui_indirect_valid = 0; status = ql_read_serdes_reg(qdev, XG_SERDES_XAUI_HSS_PCS_START, &temp); if (status) temp = XG_SERDES_ADDR_XAUI_PWR_DOWN; if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) == XG_SERDES_ADDR_XAUI_PWR_DOWN) xaui_direct_valid = 0; } else { /* We are NIC 1 */ status = ql_read_other_func_serdes_reg(qdev, XG_SERDES_XAUI_HSS_PCS_START, &temp); if (status) temp = XG_SERDES_ADDR_XAUI_PWR_DOWN; if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) == XG_SERDES_ADDR_XAUI_PWR_DOWN) xaui_indirect_valid = 0; status = ql_read_serdes_reg(qdev, XG_SERDES_XAUI_HSS_PCS_START, &temp); if (status) temp = XG_SERDES_ADDR_XAUI_PWR_DOWN; if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) == XG_SERDES_ADDR_XAUI_PWR_DOWN) xaui_direct_valid = 0; } /* * XFI register is shared so only need to read one * functions and then check the bits. */ status = ql_read_serdes_reg(qdev, XG_SERDES_ADDR_STS, &temp); if (status) temp = 0; if ((temp & XG_SERDES_ADDR_XFI1_PWR_UP) == XG_SERDES_ADDR_XFI1_PWR_UP) { /* now see if i'm NIC 1 or NIC 2 */ if (qdev->func & 1) /* I'm NIC 2, so the indirect (NIC1) xfi is up. */ xfi_indirect_valid = 1; else xfi_direct_valid = 1; } if ((temp & XG_SERDES_ADDR_XFI2_PWR_UP) == XG_SERDES_ADDR_XFI2_PWR_UP) { /* now see if i'm NIC 1 or NIC 2 */ if (qdev->func & 1) /* I'm NIC 2, so the indirect (NIC1) xfi is up. */ xfi_direct_valid = 1; else xfi_indirect_valid = 1; } /* Get XAUI_AN register block. */ if (qdev->func & 1) { /* Function 2 is direct */ direct_ptr = mpi_coredump->serdes2_xaui_an; indirect_ptr = mpi_coredump->serdes_xaui_an; } else { /* Function 1 is direct */ direct_ptr = mpi_coredump->serdes_xaui_an; indirect_ptr = mpi_coredump->serdes2_xaui_an; } for (i = 0; i <= 0x000000034; i += 4, direct_ptr++, indirect_ptr++) ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, xaui_direct_valid, xaui_indirect_valid); /* Get XAUI_HSS_PCS register block. */ if (qdev->func & 1) { direct_ptr = mpi_coredump->serdes2_xaui_hss_pcs; indirect_ptr = mpi_coredump->serdes_xaui_hss_pcs; } else { direct_ptr = mpi_coredump->serdes_xaui_hss_pcs; indirect_ptr = mpi_coredump->serdes2_xaui_hss_pcs; } for (i = 0x800; i <= 0x880; i += 4, direct_ptr++, indirect_ptr++) ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, xaui_direct_valid, xaui_indirect_valid); /* Get XAUI_XFI_AN register block. */ if (qdev->func & 1) { direct_ptr = mpi_coredump->serdes2_xfi_an; indirect_ptr = mpi_coredump->serdes_xfi_an; } else { direct_ptr = mpi_coredump->serdes_xfi_an; indirect_ptr = mpi_coredump->serdes2_xfi_an; } for (i = 0x1000; i <= 0x1034; i += 4, direct_ptr++, indirect_ptr++) ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, xfi_direct_valid, xfi_indirect_valid); /* Get XAUI_XFI_TRAIN register block. */ if (qdev->func & 1) { direct_ptr = mpi_coredump->serdes2_xfi_train; indirect_ptr = mpi_coredump->serdes_xfi_train; } else { direct_ptr = mpi_coredump->serdes_xfi_train; indirect_ptr = mpi_coredump->serdes2_xfi_train; } for (i = 0x1050; i <= 0x107c; i += 4, direct_ptr++, indirect_ptr++) ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, xfi_direct_valid, xfi_indirect_valid); /* Get XAUI_XFI_HSS_PCS register block. */ if (qdev->func & 1) { direct_ptr = mpi_coredump->serdes2_xfi_hss_pcs; indirect_ptr = mpi_coredump->serdes_xfi_hss_pcs; } else { direct_ptr = mpi_coredump->serdes_xfi_hss_pcs; indirect_ptr = mpi_coredump->serdes2_xfi_hss_pcs; } for (i = 0x1800; i <= 0x1838; i += 4, direct_ptr++, indirect_ptr++) ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, xfi_direct_valid, xfi_indirect_valid); /* Get XAUI_XFI_HSS_TX register block. */ if (qdev->func & 1) { direct_ptr = mpi_coredump->serdes2_xfi_hss_tx; indirect_ptr = mpi_coredump->serdes_xfi_hss_tx; } else { direct_ptr = mpi_coredump->serdes_xfi_hss_tx; indirect_ptr = mpi_coredump->serdes2_xfi_hss_tx; } for (i = 0x1c00; i <= 0x1c1f; i++, direct_ptr++, indirect_ptr++) ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, xfi_direct_valid, xfi_indirect_valid); /* Get XAUI_XFI_HSS_RX register block. */ if (qdev->func & 1) { direct_ptr = mpi_coredump->serdes2_xfi_hss_rx; indirect_ptr = mpi_coredump->serdes_xfi_hss_rx; } else { direct_ptr = mpi_coredump->serdes_xfi_hss_rx; indirect_ptr = mpi_coredump->serdes2_xfi_hss_rx; } for (i = 0x1c40; i <= 0x1c5f; i++, direct_ptr++, indirect_ptr++) ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, xfi_direct_valid, xfi_indirect_valid); /* Get XAUI_XFI_HSS_PLL register block. */ if (qdev->func & 1) { direct_ptr = mpi_coredump->serdes2_xfi_hss_pll; indirect_ptr = mpi_coredump->serdes_xfi_hss_pll; } else { direct_ptr = mpi_coredump->serdes_xfi_hss_pll; indirect_ptr = mpi_coredump->serdes2_xfi_hss_pll; } for (i = 0x1e00; i <= 0x1e1f; i++, direct_ptr++, indirect_ptr++) ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, xfi_direct_valid, xfi_indirect_valid); return 0; } static int ql_read_other_func_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data) { int status = 0; /* wait for reg to come ready */ status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4, XGMAC_ADDR_RDY, XGMAC_ADDR_XME); if (status) goto exit; /* set up for reg read */ ql_write_other_func_reg(qdev, XGMAC_ADDR / 4, reg | XGMAC_ADDR_R); /* wait for reg to come ready */ status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4, XGMAC_ADDR_RDY, XGMAC_ADDR_XME); if (status) goto exit; /* get the data */ *data = ql_read_other_func_reg(qdev, XGMAC_DATA / 4); exit: return status; } /* Read the 400 xgmac control/statistics registers * skipping unused locations. */ static int ql_get_xgmac_regs(struct ql_adapter *qdev, u32 * buf, unsigned int other_function) { int status = 0; int i; for (i = PAUSE_SRC_LO; i < XGMAC_REGISTER_END; i += 4, buf++) { /* We're reading 400 xgmac registers, but we filter out * serveral locations that are non-responsive to reads. */ if ((i == 0x00000114) || (i == 0x00000118) || (i == 0x0000013c) || (i == 0x00000140) || (i > 0x00000150 && i < 0x000001fc) || (i > 0x00000278 && i < 0x000002a0) || (i > 0x000002c0 && i < 0x000002cf) || (i > 0x000002dc && i < 0x000002f0) || (i > 0x000003c8 && i < 0x00000400) || (i > 0x00000400 && i < 0x00000410) || (i > 0x00000410 && i < 0x00000420) || (i > 0x00000420 && i < 0x00000430) || (i > 0x00000430 && i < 0x00000440) || (i > 0x00000440 && i < 0x00000450) || (i > 0x00000450 && i < 0x00000500) || (i > 0x0000054c && i < 0x00000568) || (i > 0x000005c8 && i < 0x00000600)) { if (other_function) status = ql_read_other_func_xgmac_reg(qdev, i, buf); else status = ql_read_xgmac_reg(qdev, i, buf); if (status) *buf = 0xdeadbeef; break; } } return status; } static int ql_get_ets_regs(struct ql_adapter *qdev, u32 * buf) { int status = 0; int i; for (i = 0; i < 8; i++, buf++) { ql_write32(qdev, NIC_ETS, i << 29 | 0x08000000); *buf = ql_read32(qdev, NIC_ETS); } for (i = 0; i < 2; i++, buf++) { ql_write32(qdev, CNA_ETS, i << 29 | 0x08000000); *buf = ql_read32(qdev, CNA_ETS); } return status; } static void ql_get_intr_states(struct ql_adapter *qdev, u32 * buf) { int i; for (i = 0; i < qdev->rx_ring_count; i++, buf++) { ql_write32(qdev, INTR_EN, qdev->intr_context[i].intr_read_mask); *buf = ql_read32(qdev, INTR_EN); } } static int ql_get_cam_entries(struct ql_adapter *qdev, u32 * buf) { int i, status; u32 value[3]; status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); if (status) return status; for (i = 0; i < 16; i++) { status = ql_get_mac_addr_reg(qdev, MAC_ADDR_TYPE_CAM_MAC, i, value); if (status) { netif_err(qdev, drv, qdev->ndev, "Failed read of mac index register\n"); goto err; } *buf++ = value[0]; /* lower MAC address */ *buf++ = value[1]; /* upper MAC address */ *buf++ = value[2]; /* output */ } for (i = 0; i < 32; i++) { status = ql_get_mac_addr_reg(qdev, MAC_ADDR_TYPE_MULTI_MAC, i, value); if (status) { netif_err(qdev, drv, qdev->ndev, "Failed read of mac index register\n"); goto err; } *buf++ = value[0]; /* lower Mcast address */ *buf++ = value[1]; /* upper Mcast address */ } err: ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); return status; } static int ql_get_routing_entries(struct ql_adapter *qdev, u32 * buf) { int status; u32 value, i; status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); if (status) return status; for (i = 0; i < 16; i++) { status = ql_get_routing_reg(qdev, i, &value); if (status) { netif_err(qdev, drv, qdev->ndev, "Failed read of routing index register\n"); goto err; } else { *buf++ = value; } } err: ql_sem_unlock(qdev, SEM_RT_IDX_MASK); return status; } /* Read the MPI Processor shadow registers */ static int ql_get_mpi_shadow_regs(struct ql_adapter *qdev, u32 * buf) { u32 i; int status; for (i = 0; i < MPI_CORE_SH_REGS_CNT; i++, buf++) { status = ql_write_mpi_reg(qdev, RISC_124, (SHADOW_OFFSET | i << SHADOW_REG_SHIFT)); if (status) goto end; status = ql_read_mpi_reg(qdev, RISC_127, buf); if (status) goto end; } end: return status; } /* Read the MPI Processor core registers */ static int ql_get_mpi_regs(struct ql_adapter *qdev, u32 * buf, u32 offset, u32 count) { int i, status = 0; for (i = 0; i < count; i++, buf++) { status = ql_read_mpi_reg(qdev, offset + i, buf); if (status) return status; } return status; } /* Read the ASIC probe dump */ static unsigned int *ql_get_probe(struct ql_adapter *qdev, u32 clock, u32 valid, u32 *buf) { u32 module, mux_sel, probe, lo_val, hi_val; for (module = 0; module < PRB_MX_ADDR_MAX_MODS; module++) { if (!((valid >> module) & 1)) continue; for (mux_sel = 0; mux_sel < PRB_MX_ADDR_MAX_MUX; mux_sel++) { probe = clock | PRB_MX_ADDR_ARE | mux_sel | (module << PRB_MX_ADDR_MOD_SEL_SHIFT); ql_write32(qdev, PRB_MX_ADDR, probe); lo_val = ql_read32(qdev, PRB_MX_DATA); if (mux_sel == 0) { *buf = probe; buf++; } probe |= PRB_MX_ADDR_UP; ql_write32(qdev, PRB_MX_ADDR, probe); hi_val = ql_read32(qdev, PRB_MX_DATA); *buf = lo_val; buf++; *buf = hi_val; buf++; } } return buf; } static int ql_get_probe_dump(struct ql_adapter *qdev, unsigned int *buf) { /* First we have to enable the probe mux */ ql_write_mpi_reg(qdev, MPI_TEST_FUNC_PRB_CTL, MPI_TEST_FUNC_PRB_EN); buf = ql_get_probe(qdev, PRB_MX_ADDR_SYS_CLOCK, PRB_MX_ADDR_VALID_SYS_MOD, buf); buf = ql_get_probe(qdev, PRB_MX_ADDR_PCI_CLOCK, PRB_MX_ADDR_VALID_PCI_MOD, buf); buf = ql_get_probe(qdev, PRB_MX_ADDR_XGM_CLOCK, PRB_MX_ADDR_VALID_XGM_MOD, buf); buf = ql_get_probe(qdev, PRB_MX_ADDR_FC_CLOCK, PRB_MX_ADDR_VALID_FC_MOD, buf); return 0; } /* Read out the routing index registers */ static int ql_get_routing_index_registers(struct ql_adapter *qdev, u32 *buf) { int status; u32 type, index, index_max; u32 result_index; u32 result_data; u32 val; status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); if (status) return status; for (type = 0; type < 4; type++) { if (type < 2) index_max = 8; else index_max = 16; for (index = 0; index < index_max; index++) { val = RT_IDX_RS | (type << RT_IDX_TYPE_SHIFT) | (index << RT_IDX_IDX_SHIFT); ql_write32(qdev, RT_IDX, val); result_index = 0; while ((result_index & RT_IDX_MR) == 0) result_index = ql_read32(qdev, RT_IDX); result_data = ql_read32(qdev, RT_DATA); *buf = type; buf++; *buf = index; buf++; *buf = result_index; buf++; *buf = result_data; buf++; } } ql_sem_unlock(qdev, SEM_RT_IDX_MASK); return status; } /* Read out the MAC protocol registers */ static void ql_get_mac_protocol_registers(struct ql_adapter *qdev, u32 *buf) { u32 result_index, result_data; u32 type; u32 index; u32 offset; u32 val; u32 initial_val = MAC_ADDR_RS; u32 max_index; u32 max_offset; for (type = 0; type < MAC_ADDR_TYPE_COUNT; type++) { switch (type) { case 0: /* CAM */ initial_val |= MAC_ADDR_ADR; max_index = MAC_ADDR_MAX_CAM_ENTRIES; max_offset = MAC_ADDR_MAX_CAM_WCOUNT; break; case 1: /* Multicast MAC Address */ max_index = MAC_ADDR_MAX_CAM_WCOUNT; max_offset = MAC_ADDR_MAX_CAM_WCOUNT; break; case 2: /* VLAN filter mask */ case 3: /* MC filter mask */ max_index = MAC_ADDR_MAX_CAM_WCOUNT; max_offset = MAC_ADDR_MAX_CAM_WCOUNT; break; case 4: /* FC MAC addresses */ max_index = MAC_ADDR_MAX_FC_MAC_ENTRIES; max_offset = MAC_ADDR_MAX_FC_MAC_WCOUNT; break; case 5: /* Mgmt MAC addresses */ max_index = MAC_ADDR_MAX_MGMT_MAC_ENTRIES; max_offset = MAC_ADDR_MAX_MGMT_MAC_WCOUNT; break; case 6: /* Mgmt VLAN addresses */ max_index = MAC_ADDR_MAX_MGMT_VLAN_ENTRIES; max_offset = MAC_ADDR_MAX_MGMT_VLAN_WCOUNT; break; case 7: /* Mgmt IPv4 address */ max_index = MAC_ADDR_MAX_MGMT_V4_ENTRIES; max_offset = MAC_ADDR_MAX_MGMT_V4_WCOUNT; break; case 8: /* Mgmt IPv6 address */ max_index = MAC_ADDR_MAX_MGMT_V6_ENTRIES; max_offset = MAC_ADDR_MAX_MGMT_V6_WCOUNT; break; case 9: /* Mgmt TCP/UDP Dest port */ max_index = MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES; max_offset = MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT; break; default: pr_err("Bad type!!! 0x%08x\n", type); max_index = 0; max_offset = 0; break; } for (index = 0; index < max_index; index++) { for (offset = 0; offset < max_offset; offset++) { val = initial_val | (type << MAC_ADDR_TYPE_SHIFT) | (index << MAC_ADDR_IDX_SHIFT) | (offset); ql_write32(qdev, MAC_ADDR_IDX, val); result_index = 0; while ((result_index & MAC_ADDR_MR) == 0) { result_index = ql_read32(qdev, MAC_ADDR_IDX); } result_data = ql_read32(qdev, MAC_ADDR_DATA); *buf = result_index; buf++; *buf = result_data; buf++; } } } } static void ql_get_sem_registers(struct ql_adapter *qdev, u32 *buf) { u32 func_num, reg, reg_val; int status; for (func_num = 0; func_num < MAX_SEMAPHORE_FUNCTIONS ; func_num++) { reg = MPI_NIC_REG_BLOCK | (func_num << MPI_NIC_FUNCTION_SHIFT) | (SEM / 4); status = ql_read_mpi_reg(qdev, reg, &reg_val); *buf = reg_val; /* if the read failed then dead fill the element. */ if (!status) *buf = 0xdeadbeef; buf++; } } /* Create a coredump segment header */ static void ql_build_coredump_seg_header( struct mpi_coredump_segment_header *seg_hdr, u32 seg_number, u32 seg_size, u8 *desc) { memset(seg_hdr, 0, sizeof(struct mpi_coredump_segment_header)); seg_hdr->cookie = MPI_COREDUMP_COOKIE; seg_hdr->segNum = seg_number; seg_hdr->segSize = seg_size; memcpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1); } /* * This function should be called when a coredump / probedump * is to be extracted from the HBA. It is assumed there is a * qdev structure that contains the base address of the register * space for this function as well as a coredump structure that * will contain the dump. */ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump) { int status; int i; if (!mpi_coredump) { netif_err(qdev, drv, qdev->ndev, "No memory available\n"); return -ENOMEM; } /* Try to get the spinlock, but dont worry if * it isn't available. If the firmware died it * might be holding the sem. */ ql_sem_spinlock(qdev, SEM_PROC_REG_MASK); status = ql_pause_mpi_risc(qdev); if (status) { netif_err(qdev, drv, qdev->ndev, "Failed RISC pause. Status = 0x%.08x\n", status); goto err; } /* Insert the global header */ memset(&(mpi_coredump->mpi_global_header), 0, sizeof(struct mpi_coredump_global_header)); mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE; mpi_coredump->mpi_global_header.headerSize = sizeof(struct mpi_coredump_global_header); mpi_coredump->mpi_global_header.imageSize = sizeof(struct ql_mpi_coredump); memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump", sizeof(mpi_coredump->mpi_global_header.idString)); /* Get generic NIC reg dump */ ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr, NIC1_CONTROL_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->nic_regs), "NIC1 Registers"); ql_build_coredump_seg_header(&mpi_coredump->nic2_regs_seg_hdr, NIC2_CONTROL_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->nic2_regs), "NIC2 Registers"); /* Get XGMac registers. (Segment 18, Rev C. step 21) */ ql_build_coredump_seg_header(&mpi_coredump->xgmac1_seg_hdr, NIC1_XGMAC_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->xgmac1), "NIC1 XGMac Registers"); ql_build_coredump_seg_header(&mpi_coredump->xgmac2_seg_hdr, NIC2_XGMAC_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->xgmac2), "NIC2 XGMac Registers"); if (qdev->func & 1) { /* Odd means our function is NIC 2 */ for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++) mpi_coredump->nic2_regs[i] = ql_read32(qdev, i * sizeof(u32)); for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++) mpi_coredump->nic_regs[i] = ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4); ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 0); ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 1); } else { /* Even means our function is NIC 1 */ for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++) mpi_coredump->nic_regs[i] = ql_read32(qdev, i * sizeof(u32)); for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++) mpi_coredump->nic2_regs[i] = ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4); ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 0); ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 1); } /* Rev C. Step 20a */ ql_build_coredump_seg_header(&mpi_coredump->xaui_an_hdr, XAUI_AN_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->serdes_xaui_an), "XAUI AN Registers"); /* Rev C. Step 20b */ ql_build_coredump_seg_header(&mpi_coredump->xaui_hss_pcs_hdr, XAUI_HSS_PCS_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->serdes_xaui_hss_pcs), "XAUI HSS PCS Registers"); ql_build_coredump_seg_header(&mpi_coredump->xfi_an_hdr, XFI_AN_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->serdes_xfi_an), "XFI AN Registers"); ql_build_coredump_seg_header(&mpi_coredump->xfi_train_hdr, XFI_TRAIN_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->serdes_xfi_train), "XFI TRAIN Registers"); ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pcs_hdr, XFI_HSS_PCS_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->serdes_xfi_hss_pcs), "XFI HSS PCS Registers"); ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_tx_hdr, XFI_HSS_TX_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->serdes_xfi_hss_tx), "XFI HSS TX Registers"); ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_rx_hdr, XFI_HSS_RX_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->serdes_xfi_hss_rx), "XFI HSS RX Registers"); ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pll_hdr, XFI_HSS_PLL_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->serdes_xfi_hss_pll), "XFI HSS PLL Registers"); ql_build_coredump_seg_header(&mpi_coredump->xaui2_an_hdr, XAUI2_AN_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->serdes2_xaui_an), "XAUI2 AN Registers"); ql_build_coredump_seg_header(&mpi_coredump->xaui2_hss_pcs_hdr, XAUI2_HSS_PCS_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->serdes2_xaui_hss_pcs), "XAUI2 HSS PCS Registers"); ql_build_coredump_seg_header(&mpi_coredump->xfi2_an_hdr, XFI2_AN_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->serdes2_xfi_an), "XFI2 AN Registers"); ql_build_coredump_seg_header(&mpi_coredump->xfi2_train_hdr, XFI2_TRAIN_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->serdes2_xfi_train), "XFI2 TRAIN Registers"); ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pcs_hdr, XFI2_HSS_PCS_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->serdes2_xfi_hss_pcs), "XFI2 HSS PCS Registers"); ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_tx_hdr, XFI2_HSS_TX_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->serdes2_xfi_hss_tx), "XFI2 HSS TX Registers"); ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_rx_hdr, XFI2_HSS_RX_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->serdes2_xfi_hss_rx), "XFI2 HSS RX Registers"); ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pll_hdr, XFI2_HSS_PLL_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->serdes2_xfi_hss_pll), "XFI2 HSS PLL Registers"); status = ql_get_serdes_regs(qdev, mpi_coredump); if (status) { netif_err(qdev, drv, qdev->ndev, "Failed Dump of Serdes Registers. Status = 0x%.08x\n", status); goto err; } ql_build_coredump_seg_header(&mpi_coredump->core_regs_seg_hdr, CORE_SEG_NUM, sizeof(mpi_coredump->core_regs_seg_hdr) + sizeof(mpi_coredump->mpi_core_regs) + sizeof(mpi_coredump->mpi_core_sh_regs), "Core Registers"); /* Get the MPI Core Registers */ status = ql_get_mpi_regs(qdev, &mpi_coredump->mpi_core_regs[0], MPI_CORE_REGS_ADDR, MPI_CORE_REGS_CNT); if (status) goto err; /* Get the 16 MPI shadow registers */ status = ql_get_mpi_shadow_regs(qdev, &mpi_coredump->mpi_core_sh_regs[0]); if (status) goto err; /* Get the Test Logic Registers */ ql_build_coredump_seg_header(&mpi_coredump->test_logic_regs_seg_hdr, TEST_LOGIC_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->test_logic_regs), "Test Logic Regs"); status = ql_get_mpi_regs(qdev, &mpi_coredump->test_logic_regs[0], TEST_REGS_ADDR, TEST_REGS_CNT); if (status) goto err; /* Get the RMII Registers */ ql_build_coredump_seg_header(&mpi_coredump->rmii_regs_seg_hdr, RMII_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->rmii_regs), "RMII Registers"); status = ql_get_mpi_regs(qdev, &mpi_coredump->rmii_regs[0], RMII_REGS_ADDR, RMII_REGS_CNT); if (status) goto err; /* Get the FCMAC1 Registers */ ql_build_coredump_seg_header(&mpi_coredump->fcmac1_regs_seg_hdr, FCMAC1_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->fcmac1_regs), "FCMAC1 Registers"); status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac1_regs[0], FCMAC1_REGS_ADDR, FCMAC_REGS_CNT); if (status) goto err; /* Get the FCMAC2 Registers */ ql_build_coredump_seg_header(&mpi_coredump->fcmac2_regs_seg_hdr, FCMAC2_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->fcmac2_regs), "FCMAC2 Registers"); status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac2_regs[0], FCMAC2_REGS_ADDR, FCMAC_REGS_CNT); if (status) goto err; /* Get the FC1 MBX Registers */ ql_build_coredump_seg_header(&mpi_coredump->fc1_mbx_regs_seg_hdr, FC1_MBOX_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->fc1_mbx_regs), "FC1 MBox Regs"); status = ql_get_mpi_regs(qdev, &mpi_coredump->fc1_mbx_regs[0], FC1_MBX_REGS_ADDR, FC_MBX_REGS_CNT); if (status) goto err; /* Get the IDE Registers */ ql_build_coredump_seg_header(&mpi_coredump->ide_regs_seg_hdr, IDE_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->ide_regs), "IDE Registers"); status = ql_get_mpi_regs(qdev, &mpi_coredump->ide_regs[0], IDE_REGS_ADDR, IDE_REGS_CNT); if (status) goto err; /* Get the NIC1 MBX Registers */ ql_build_coredump_seg_header(&mpi_coredump->nic1_mbx_regs_seg_hdr, NIC1_MBOX_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->nic1_mbx_regs), "NIC1 MBox Regs"); status = ql_get_mpi_regs(qdev, &mpi_coredump->nic1_mbx_regs[0], NIC1_MBX_REGS_ADDR, NIC_MBX_REGS_CNT); if (status) goto err; /* Get the SMBus Registers */ ql_build_coredump_seg_header(&mpi_coredump->smbus_regs_seg_hdr, SMBUS_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->smbus_regs), "SMBus Registers"); status = ql_get_mpi_regs(qdev, &mpi_coredump->smbus_regs[0], SMBUS_REGS_ADDR, SMBUS_REGS_CNT); if (status) goto err; /* Get the FC2 MBX Registers */ ql_build_coredump_seg_header(&mpi_coredump->fc2_mbx_regs_seg_hdr, FC2_MBOX_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->fc2_mbx_regs), "FC2 MBox Regs"); status = ql_get_mpi_regs(qdev, &mpi_coredump->fc2_mbx_regs[0], FC2_MBX_REGS_ADDR, FC_MBX_REGS_CNT); if (status) goto err; /* Get the NIC2 MBX Registers */ ql_build_coredump_seg_header(&mpi_coredump->nic2_mbx_regs_seg_hdr, NIC2_MBOX_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->nic2_mbx_regs), "NIC2 MBox Regs"); status = ql_get_mpi_regs(qdev, &mpi_coredump->nic2_mbx_regs[0], NIC2_MBX_REGS_ADDR, NIC_MBX_REGS_CNT); if (status) goto err; /* Get the I2C Registers */ ql_build_coredump_seg_header(&mpi_coredump->i2c_regs_seg_hdr, I2C_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->i2c_regs), "I2C Registers"); status = ql_get_mpi_regs(qdev, &mpi_coredump->i2c_regs[0], I2C_REGS_ADDR, I2C_REGS_CNT); if (status) goto err; /* Get the MEMC Registers */ ql_build_coredump_seg_header(&mpi_coredump->memc_regs_seg_hdr, MEMC_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->memc_regs), "MEMC Registers"); status = ql_get_mpi_regs(qdev, &mpi_coredump->memc_regs[0], MEMC_REGS_ADDR, MEMC_REGS_CNT); if (status) goto err; /* Get the PBus Registers */ ql_build_coredump_seg_header(&mpi_coredump->pbus_regs_seg_hdr, PBUS_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->pbus_regs), "PBUS Registers"); status = ql_get_mpi_regs(qdev, &mpi_coredump->pbus_regs[0], PBUS_REGS_ADDR, PBUS_REGS_CNT); if (status) goto err; /* Get the MDE Registers */ ql_build_coredump_seg_header(&mpi_coredump->mde_regs_seg_hdr, MDE_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->mde_regs), "MDE Registers"); status = ql_get_mpi_regs(qdev, &mpi_coredump->mde_regs[0], MDE_REGS_ADDR, MDE_REGS_CNT); if (status) goto err; ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr, MISC_NIC_INFO_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->misc_nic_info), "MISC NIC INFO"); mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count; mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count; mpi_coredump->misc_nic_info.intr_count = qdev->intr_count; mpi_coredump->misc_nic_info.function = qdev->func; /* Segment 31 */ /* Get indexed register values. */ ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr, INTR_STATES_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->intr_states), "INTR States"); ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]); ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr, CAM_ENTRIES_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->cam_entries), "CAM Entries"); status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]); if (status) goto err; ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr, ROUTING_WORDS_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->nic_routing_words), "Routing Words"); status = ql_get_routing_entries(qdev, &mpi_coredump->nic_routing_words[0]); if (status) goto err; /* Segment 34 (Rev C. step 23) */ ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr, ETS_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->ets), "ETS Registers"); status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]); if (status) goto err; ql_build_coredump_seg_header(&mpi_coredump->probe_dump_seg_hdr, PROBE_DUMP_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->probe_dump), "Probe Dump"); ql_get_probe_dump(qdev, &mpi_coredump->probe_dump[0]); ql_build_coredump_seg_header(&mpi_coredump->routing_reg_seg_hdr, ROUTING_INDEX_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->routing_regs), "Routing Regs"); status = ql_get_routing_index_registers(qdev, &mpi_coredump->routing_regs[0]); if (status) goto err; ql_build_coredump_seg_header(&mpi_coredump->mac_prot_reg_seg_hdr, MAC_PROTOCOL_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->mac_prot_regs), "MAC Prot Regs"); ql_get_mac_protocol_registers(qdev, &mpi_coredump->mac_prot_regs[0]); /* Get the semaphore registers for all 5 functions */ ql_build_coredump_seg_header(&mpi_coredump->sem_regs_seg_hdr, SEM_REGS_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->sem_regs), "Sem Registers"); ql_get_sem_registers(qdev, &mpi_coredump->sem_regs[0]); /* Prevent the mpi restarting while we dump the memory.*/ ql_write_mpi_reg(qdev, MPI_TEST_FUNC_RST_STS, MPI_TEST_FUNC_RST_FRC); /* clear the pause */ status = ql_unpause_mpi_risc(qdev); if (status) { netif_err(qdev, drv, qdev->ndev, "Failed RISC unpause. Status = 0x%.08x\n", status); goto err; } /* Reset the RISC so we can dump RAM */ status = ql_hard_reset_mpi_risc(qdev); if (status) { netif_err(qdev, drv, qdev->ndev, "Failed RISC reset. Status = 0x%.08x\n", status); goto err; } ql_build_coredump_seg_header(&mpi_coredump->code_ram_seg_hdr, WCS_RAM_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->code_ram), "WCS RAM"); status = ql_dump_risc_ram_area(qdev, &mpi_coredump->code_ram[0], CODE_RAM_ADDR, CODE_RAM_CNT); if (status) { netif_err(qdev, drv, qdev->ndev, "Failed Dump of CODE RAM. Status = 0x%.08x\n", status); goto err; } /* Insert the segment header */ ql_build_coredump_seg_header(&mpi_coredump->memc_ram_seg_hdr, MEMC_RAM_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->memc_ram), "MEMC RAM"); status = ql_dump_risc_ram_area(qdev, &mpi_coredump->memc_ram[0], MEMC_RAM_ADDR, MEMC_RAM_CNT); if (status) { netif_err(qdev, drv, qdev->ndev, "Failed Dump of MEMC RAM. Status = 0x%.08x\n", status); goto err; } err: ql_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */ return status; } static void ql_get_core_dump(struct ql_adapter *qdev) { if (!ql_own_firmware(qdev)) { netif_err(qdev, drv, qdev->ndev, "Don't own firmware!\n"); return; } if (!netif_running(qdev->ndev)) { netif_err(qdev, ifup, qdev->ndev, "Force Coredump can only be done from interface that is up\n"); return; } ql_queue_fw_error(qdev); } void ql_gen_reg_dump(struct ql_adapter *qdev, struct ql_reg_dump *mpi_coredump) { int i, status; memset(&(mpi_coredump->mpi_global_header), 0, sizeof(struct mpi_coredump_global_header)); mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE; mpi_coredump->mpi_global_header.headerSize = sizeof(struct mpi_coredump_global_header); mpi_coredump->mpi_global_header.imageSize = sizeof(struct ql_reg_dump); memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump", sizeof(mpi_coredump->mpi_global_header.idString)); /* segment 16 */ ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr, MISC_NIC_INFO_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->misc_nic_info), "MISC NIC INFO"); mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count; mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count; mpi_coredump->misc_nic_info.intr_count = qdev->intr_count; mpi_coredump->misc_nic_info.function = qdev->func; /* Segment 16, Rev C. Step 18 */ ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr, NIC1_CONTROL_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->nic_regs), "NIC Registers"); /* Get generic reg dump */ for (i = 0; i < 64; i++) mpi_coredump->nic_regs[i] = ql_read32(qdev, i * sizeof(u32)); /* Segment 31 */ /* Get indexed register values. */ ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr, INTR_STATES_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->intr_states), "INTR States"); ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]); ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr, CAM_ENTRIES_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->cam_entries), "CAM Entries"); status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]); if (status) return; ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr, ROUTING_WORDS_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->nic_routing_words), "Routing Words"); status = ql_get_routing_entries(qdev, &mpi_coredump->nic_routing_words[0]); if (status) return; /* Segment 34 (Rev C. step 23) */ ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr, ETS_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->ets), "ETS Registers"); status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]); if (status) return; } void ql_get_dump(struct ql_adapter *qdev, void *buff) { /* * If the dump has already been taken and is stored * in our internal buffer and if force dump is set then * just start the spool to dump it to the log file * and also, take a snapshot of the general regs to * to the user's buffer or else take complete dump * to the user's buffer if force is not set. */ if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) { if (!ql_core_dump(qdev, buff)) ql_soft_reset_mpi_risc(qdev); else netif_err(qdev, drv, qdev->ndev, "coredump failed!\n"); } else { ql_gen_reg_dump(qdev, buff); ql_get_core_dump(qdev); } } /* Coredump to messages log file using separate worker thread */ void ql_mpi_core_to_log(struct work_struct *work) { struct ql_adapter *qdev = container_of(work, struct ql_adapter, mpi_core_to_log.work); u32 *tmp, count; int i; count = sizeof(struct ql_mpi_coredump) / sizeof(u32); tmp = (u32 *)qdev->mpi_coredump; netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev, "Core is dumping to log file!\n"); for (i = 0; i < count; i += 8) { pr_err("%.08x: %.08x %.08x %.08x %.08x %.08x " "%.08x %.08x %.08x\n", i, tmp[i + 0], tmp[i + 1], tmp[i + 2], tmp[i + 3], tmp[i + 4], tmp[i + 5], tmp[i + 6], tmp[i + 7]); msleep(5); } } #ifdef QL_REG_DUMP static void ql_dump_intr_states(struct ql_adapter *qdev) { int i; u32 value; for (i = 0; i < qdev->intr_count; i++) { ql_write32(qdev, INTR_EN, qdev->intr_context[i].intr_read_mask); value = ql_read32(qdev, INTR_EN); pr_err("%s: Interrupt %d is %s\n", qdev->ndev->name, i, (value & INTR_EN_EN ? "enabled" : "disabled")); } } #define DUMP_XGMAC(qdev, reg) \ do { \ u32 data; \ ql_read_xgmac_reg(qdev, reg, &data); \ pr_err("%s: %s = 0x%.08x\n", qdev->ndev->name, #reg, data); \ } while (0) void ql_dump_xgmac_control_regs(struct ql_adapter *qdev) { if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) { pr_err("%s: Couldn't get xgmac sem\n", __func__); return; } DUMP_XGMAC(qdev, PAUSE_SRC_LO); DUMP_XGMAC(qdev, PAUSE_SRC_HI); DUMP_XGMAC(qdev, GLOBAL_CFG); DUMP_XGMAC(qdev, TX_CFG); DUMP_XGMAC(qdev, RX_CFG); DUMP_XGMAC(qdev, FLOW_CTL); DUMP_XGMAC(qdev, PAUSE_OPCODE); DUMP_XGMAC(qdev, PAUSE_TIMER); DUMP_XGMAC(qdev, PAUSE_FRM_DEST_LO); DUMP_XGMAC(qdev, PAUSE_FRM_DEST_HI); DUMP_XGMAC(qdev, MAC_TX_PARAMS); DUMP_XGMAC(qdev, MAC_RX_PARAMS); DUMP_XGMAC(qdev, MAC_SYS_INT); DUMP_XGMAC(qdev, MAC_SYS_INT_MASK); DUMP_XGMAC(qdev, MAC_MGMT_INT); DUMP_XGMAC(qdev, MAC_MGMT_IN_MASK); DUMP_XGMAC(qdev, EXT_ARB_MODE); ql_sem_unlock(qdev, qdev->xg_sem_mask); } static void ql_dump_ets_regs(struct ql_adapter *qdev) { } static void ql_dump_cam_entries(struct ql_adapter *qdev) { int i; u32 value[3]; i = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); if (i) return; for (i = 0; i < 4; i++) { if (ql_get_mac_addr_reg(qdev, MAC_ADDR_TYPE_CAM_MAC, i, value)) { pr_err("%s: Failed read of mac index register\n", __func__); return; } else { if (value[0]) pr_err("%s: CAM index %d CAM Lookup Lower = 0x%.08x:%.08x, Output = 0x%.08x\n", qdev->ndev->name, i, value[1], value[0], value[2]); } } for (i = 0; i < 32; i++) { if (ql_get_mac_addr_reg (qdev, MAC_ADDR_TYPE_MULTI_MAC, i, value)) { pr_err("%s: Failed read of mac index register\n", __func__); return; } else { if (value[0]) pr_err("%s: MCAST index %d CAM Lookup Lower = 0x%.08x:%.08x\n", qdev->ndev->name, i, value[1], value[0]); } } ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); } void ql_dump_routing_entries(struct ql_adapter *qdev) { int i; u32 value; i = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); if (i) return; for (i = 0; i < 16; i++) { value = 0; if (ql_get_routing_reg(qdev, i, &value)) { pr_err("%s: Failed read of routing index register\n", __func__); return; } else { if (value) pr_err("%s: Routing Mask %d = 0x%.08x\n", qdev->ndev->name, i, value); } } ql_sem_unlock(qdev, SEM_RT_IDX_MASK); } #define DUMP_REG(qdev, reg) \ pr_err("%-32s= 0x%x\n", #reg, ql_read32(qdev, reg)) void ql_dump_regs(struct ql_adapter *qdev) { pr_err("reg dump for function #%d\n", qdev->func); DUMP_REG(qdev, SYS); DUMP_REG(qdev, RST_FO); DUMP_REG(qdev, FSC); DUMP_REG(qdev, CSR); DUMP_REG(qdev, ICB_RID); DUMP_REG(qdev, ICB_L); DUMP_REG(qdev, ICB_H); DUMP_REG(qdev, CFG); DUMP_REG(qdev, BIOS_ADDR); DUMP_REG(qdev, STS); DUMP_REG(qdev, INTR_EN); DUMP_REG(qdev, INTR_MASK); DUMP_REG(qdev, ISR1); DUMP_REG(qdev, ISR2); DUMP_REG(qdev, ISR3); DUMP_REG(qdev, ISR4); DUMP_REG(qdev, REV_ID); DUMP_REG(qdev, FRC_ECC_ERR); DUMP_REG(qdev, ERR_STS); DUMP_REG(qdev, RAM_DBG_ADDR); DUMP_REG(qdev, RAM_DBG_DATA); DUMP_REG(qdev, ECC_ERR_CNT); DUMP_REG(qdev, SEM); DUMP_REG(qdev, GPIO_1); DUMP_REG(qdev, GPIO_2); DUMP_REG(qdev, GPIO_3); DUMP_REG(qdev, XGMAC_ADDR); DUMP_REG(qdev, XGMAC_DATA); DUMP_REG(qdev, NIC_ETS); DUMP_REG(qdev, CNA_ETS); DUMP_REG(qdev, FLASH_ADDR); DUMP_REG(qdev, FLASH_DATA); DUMP_REG(qdev, CQ_STOP); DUMP_REG(qdev, PAGE_TBL_RID); DUMP_REG(qdev, WQ_PAGE_TBL_LO); DUMP_REG(qdev, WQ_PAGE_TBL_HI); DUMP_REG(qdev, CQ_PAGE_TBL_LO); DUMP_REG(qdev, CQ_PAGE_TBL_HI); DUMP_REG(qdev, COS_DFLT_CQ1); DUMP_REG(qdev, COS_DFLT_CQ2); DUMP_REG(qdev, SPLT_HDR); DUMP_REG(qdev, FC_PAUSE_THRES); DUMP_REG(qdev, NIC_PAUSE_THRES); DUMP_REG(qdev, FC_ETHERTYPE); DUMP_REG(qdev, FC_RCV_CFG); DUMP_REG(qdev, NIC_RCV_CFG); DUMP_REG(qdev, FC_COS_TAGS); DUMP_REG(qdev, NIC_COS_TAGS); DUMP_REG(qdev, MGMT_RCV_CFG); DUMP_REG(qdev, XG_SERDES_ADDR); DUMP_REG(qdev, XG_SERDES_DATA); DUMP_REG(qdev, PRB_MX_ADDR); DUMP_REG(qdev, PRB_MX_DATA); ql_dump_intr_states(qdev); ql_dump_xgmac_control_regs(qdev); ql_dump_ets_regs(qdev); ql_dump_cam_entries(qdev); ql_dump_routing_entries(qdev); } #endif #ifdef QL_STAT_DUMP #define DUMP_STAT(qdev, stat) \ pr_err("%s = %ld\n", #stat, (unsigned long)qdev->nic_stats.stat) void ql_dump_stat(struct ql_adapter *qdev) { pr_err("%s: Enter\n", __func__); DUMP_STAT(qdev, tx_pkts); DUMP_STAT(qdev, tx_bytes); DUMP_STAT(qdev, tx_mcast_pkts); DUMP_STAT(qdev, tx_bcast_pkts); DUMP_STAT(qdev, tx_ucast_pkts); DUMP_STAT(qdev, tx_ctl_pkts); DUMP_STAT(qdev, tx_pause_pkts); DUMP_STAT(qdev, tx_64_pkt); DUMP_STAT(qdev, tx_65_to_127_pkt); DUMP_STAT(qdev, tx_128_to_255_pkt); DUMP_STAT(qdev, tx_256_511_pkt); DUMP_STAT(qdev, tx_512_to_1023_pkt); DUMP_STAT(qdev, tx_1024_to_1518_pkt); DUMP_STAT(qdev, tx_1519_to_max_pkt); DUMP_STAT(qdev, tx_undersize_pkt); DUMP_STAT(qdev, tx_oversize_pkt); DUMP_STAT(qdev, rx_bytes); DUMP_STAT(qdev, rx_bytes_ok); DUMP_STAT(qdev, rx_pkts); DUMP_STAT(qdev, rx_pkts_ok); DUMP_STAT(qdev, rx_bcast_pkts); DUMP_STAT(qdev, rx_mcast_pkts); DUMP_STAT(qdev, rx_ucast_pkts); DUMP_STAT(qdev, rx_undersize_pkts); DUMP_STAT(qdev, rx_oversize_pkts); DUMP_STAT(qdev, rx_jabber_pkts); DUMP_STAT(qdev, rx_undersize_fcerr_pkts); DUMP_STAT(qdev, rx_drop_events); DUMP_STAT(qdev, rx_fcerr_pkts); DUMP_STAT(qdev, rx_align_err); DUMP_STAT(qdev, rx_symbol_err); DUMP_STAT(qdev, rx_mac_err); DUMP_STAT(qdev, rx_ctl_pkts); DUMP_STAT(qdev, rx_pause_pkts); DUMP_STAT(qdev, rx_64_pkts); DUMP_STAT(qdev, rx_65_to_127_pkts); DUMP_STAT(qdev, rx_128_255_pkts); DUMP_STAT(qdev, rx_256_511_pkts); DUMP_STAT(qdev, rx_512_to_1023_pkts); DUMP_STAT(qdev, rx_1024_to_1518_pkts); DUMP_STAT(qdev, rx_1519_to_max_pkts); DUMP_STAT(qdev, rx_len_err_pkts); }; #endif #ifdef QL_DEV_DUMP #define DUMP_QDEV_FIELD(qdev, type, field) \ pr_err("qdev->%-24s = " type "\n", #field, qdev->field) #define DUMP_QDEV_DMA_FIELD(qdev, field) \ pr_err("qdev->%-24s = %llx\n", #field, (unsigned long long)qdev->field) #define DUMP_QDEV_ARRAY(qdev, type, array, index, field) \ pr_err("%s[%d].%s = " type "\n", \ #array, index, #field, qdev->array[index].field); void ql_dump_qdev(struct ql_adapter *qdev) { int i; DUMP_QDEV_FIELD(qdev, "%lx", flags); DUMP_QDEV_FIELD(qdev, "%p", vlgrp); DUMP_QDEV_FIELD(qdev, "%p", pdev); DUMP_QDEV_FIELD(qdev, "%p", ndev); DUMP_QDEV_FIELD(qdev, "%d", chip_rev_id); DUMP_QDEV_FIELD(qdev, "%p", reg_base); DUMP_QDEV_FIELD(qdev, "%p", doorbell_area); DUMP_QDEV_FIELD(qdev, "%d", doorbell_area_size); DUMP_QDEV_FIELD(qdev, "%x", msg_enable); DUMP_QDEV_FIELD(qdev, "%p", rx_ring_shadow_reg_area); DUMP_QDEV_DMA_FIELD(qdev, rx_ring_shadow_reg_dma); DUMP_QDEV_FIELD(qdev, "%p", tx_ring_shadow_reg_area); DUMP_QDEV_DMA_FIELD(qdev, tx_ring_shadow_reg_dma); DUMP_QDEV_FIELD(qdev, "%d", intr_count); if (qdev->msi_x_entry) for (i = 0; i < qdev->intr_count; i++) { DUMP_QDEV_ARRAY(qdev, "%d", msi_x_entry, i, vector); DUMP_QDEV_ARRAY(qdev, "%d", msi_x_entry, i, entry); } for (i = 0; i < qdev->intr_count; i++) { DUMP_QDEV_ARRAY(qdev, "%p", intr_context, i, qdev); DUMP_QDEV_ARRAY(qdev, "%d", intr_context, i, intr); DUMP_QDEV_ARRAY(qdev, "%d", intr_context, i, hooked); DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_en_mask); DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_dis_mask); DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_read_mask); } DUMP_QDEV_FIELD(qdev, "%d", tx_ring_count); DUMP_QDEV_FIELD(qdev, "%d", rx_ring_count); DUMP_QDEV_FIELD(qdev, "%d", ring_mem_size); DUMP_QDEV_FIELD(qdev, "%p", ring_mem); DUMP_QDEV_FIELD(qdev, "%d", intr_count); DUMP_QDEV_FIELD(qdev, "%p", tx_ring); DUMP_QDEV_FIELD(qdev, "%d", rss_ring_count); DUMP_QDEV_FIELD(qdev, "%p", rx_ring); DUMP_QDEV_FIELD(qdev, "%d", default_rx_queue); DUMP_QDEV_FIELD(qdev, "0x%08x", xg_sem_mask); DUMP_QDEV_FIELD(qdev, "0x%08x", port_link_up); DUMP_QDEV_FIELD(qdev, "0x%08x", port_init); } #endif #ifdef QL_CB_DUMP void ql_dump_wqicb(struct wqicb *wqicb) { pr_err("Dumping wqicb stuff...\n"); pr_err("wqicb->len = 0x%x\n", le16_to_cpu(wqicb->len)); pr_err("wqicb->flags = %x\n", le16_to_cpu(wqicb->flags)); pr_err("wqicb->cq_id_rss = %d\n", le16_to_cpu(wqicb->cq_id_rss)); pr_err("wqicb->rid = 0x%x\n", le16_to_cpu(wqicb->rid)); pr_err("wqicb->wq_addr = 0x%llx\n", (unsigned long long) le64_to_cpu(wqicb->addr)); pr_err("wqicb->wq_cnsmr_idx_addr = 0x%llx\n", (unsigned long long) le64_to_cpu(wqicb->cnsmr_idx_addr)); } void ql_dump_tx_ring(struct tx_ring *tx_ring) { if (tx_ring == NULL) return; pr_err("===================== Dumping tx_ring %d ===============\n", tx_ring->wq_id); pr_err("tx_ring->base = %p\n", tx_ring->wq_base); pr_err("tx_ring->base_dma = 0x%llx\n", (unsigned long long) tx_ring->wq_base_dma); pr_err("tx_ring->cnsmr_idx_sh_reg, addr = 0x%p, value = %d\n", tx_ring->cnsmr_idx_sh_reg, tx_ring->cnsmr_idx_sh_reg ? ql_read_sh_reg(tx_ring->cnsmr_idx_sh_reg) : 0); pr_err("tx_ring->size = %d\n", tx_ring->wq_size); pr_err("tx_ring->len = %d\n", tx_ring->wq_len); pr_err("tx_ring->prod_idx_db_reg = %p\n", tx_ring->prod_idx_db_reg); pr_err("tx_ring->valid_db_reg = %p\n", tx_ring->valid_db_reg); pr_err("tx_ring->prod_idx = %d\n", tx_ring->prod_idx); pr_err("tx_ring->cq_id = %d\n", tx_ring->cq_id); pr_err("tx_ring->wq_id = %d\n", tx_ring->wq_id); pr_err("tx_ring->q = %p\n", tx_ring->q); pr_err("tx_ring->tx_count = %d\n", atomic_read(&tx_ring->tx_count)); } void ql_dump_ricb(struct ricb *ricb) { int i; pr_err("===================== Dumping ricb ===============\n"); pr_err("Dumping ricb stuff...\n"); pr_err("ricb->base_cq = %d\n", ricb->base_cq & 0x1f); pr_err("ricb->flags = %s%s%s%s%s%s%s%s%s\n", ricb->base_cq & RSS_L4K ? "RSS_L4K " : "", ricb->flags & RSS_L6K ? "RSS_L6K " : "", ricb->flags & RSS_LI ? "RSS_LI " : "", ricb->flags & RSS_LB ? "RSS_LB " : "", ricb->flags & RSS_LM ? "RSS_LM " : "", ricb->flags & RSS_RI4 ? "RSS_RI4 " : "", ricb->flags & RSS_RT4 ? "RSS_RT4 " : "", ricb->flags & RSS_RI6 ? "RSS_RI6 " : "", ricb->flags & RSS_RT6 ? "RSS_RT6 " : ""); pr_err("ricb->mask = 0x%.04x\n", le16_to_cpu(ricb->mask)); for (i = 0; i < 16; i++) pr_err("ricb->hash_cq_id[%d] = 0x%.08x\n", i, le32_to_cpu(ricb->hash_cq_id[i])); for (i = 0; i < 10; i++) pr_err("ricb->ipv6_hash_key[%d] = 0x%.08x\n", i, le32_to_cpu(ricb->ipv6_hash_key[i])); for (i = 0; i < 4; i++) pr_err("ricb->ipv4_hash_key[%d] = 0x%.08x\n", i, le32_to_cpu(ricb->ipv4_hash_key[i])); } void ql_dump_cqicb(struct cqicb *cqicb) { pr_err("Dumping cqicb stuff...\n"); pr_err("cqicb->msix_vect = %d\n", cqicb->msix_vect); pr_err("cqicb->flags = %x\n", cqicb->flags); pr_err("cqicb->len = %d\n", le16_to_cpu(cqicb->len)); pr_err("cqicb->addr = 0x%llx\n", (unsigned long long) le64_to_cpu(cqicb->addr)); pr_err("cqicb->prod_idx_addr = 0x%llx\n", (unsigned long long) le64_to_cpu(cqicb->prod_idx_addr)); pr_err("cqicb->pkt_delay = 0x%.04x\n", le16_to_cpu(cqicb->pkt_delay)); pr_err("cqicb->irq_delay = 0x%.04x\n", le16_to_cpu(cqicb->irq_delay)); pr_err("cqicb->lbq_addr = 0x%llx\n", (unsigned long long) le64_to_cpu(cqicb->lbq_addr)); pr_err("cqicb->lbq_buf_size = 0x%.04x\n", le16_to_cpu(cqicb->lbq_buf_size)); pr_err("cqicb->lbq_len = 0x%.04x\n", le16_to_cpu(cqicb->lbq_len)); pr_err("cqicb->sbq_addr = 0x%llx\n", (unsigned long long) le64_to_cpu(cqicb->sbq_addr)); pr_err("cqicb->sbq_buf_size = 0x%.04x\n", le16_to_cpu(cqicb->sbq_buf_size)); pr_err("cqicb->sbq_len = 0x%.04x\n", le16_to_cpu(cqicb->sbq_len)); } void ql_dump_rx_ring(struct rx_ring *rx_ring) { if (rx_ring == NULL) return; pr_err("===================== Dumping rx_ring %d ===============\n", rx_ring->cq_id); pr_err("Dumping rx_ring %d, type = %s%s%s\n", rx_ring->cq_id, rx_ring->type == DEFAULT_Q ? "DEFAULT" : "", rx_ring->type == TX_Q ? "OUTBOUND COMPLETIONS" : "", rx_ring->type == RX_Q ? "INBOUND_COMPLETIONS" : ""); pr_err("rx_ring->cqicb = %p\n", &rx_ring->cqicb); pr_err("rx_ring->cq_base = %p\n", rx_ring->cq_base); pr_err("rx_ring->cq_base_dma = %llx\n", (unsigned long long) rx_ring->cq_base_dma); pr_err("rx_ring->cq_size = %d\n", rx_ring->cq_size); pr_err("rx_ring->cq_len = %d\n", rx_ring->cq_len); pr_err("rx_ring->prod_idx_sh_reg, addr = 0x%p, value = %d\n", rx_ring->prod_idx_sh_reg, rx_ring->prod_idx_sh_reg ? ql_read_sh_reg(rx_ring->prod_idx_sh_reg) : 0); pr_err("rx_ring->prod_idx_sh_reg_dma = %llx\n", (unsigned long long) rx_ring->prod_idx_sh_reg_dma); pr_err("rx_ring->cnsmr_idx_db_reg = %p\n", rx_ring->cnsmr_idx_db_reg); pr_err("rx_ring->cnsmr_idx = %d\n", rx_ring->cnsmr_idx); pr_err("rx_ring->curr_entry = %p\n", rx_ring->curr_entry); pr_err("rx_ring->valid_db_reg = %p\n", rx_ring->valid_db_reg); pr_err("rx_ring->lbq_base = %p\n", rx_ring->lbq_base); pr_err("rx_ring->lbq_base_dma = %llx\n", (unsigned long long) rx_ring->lbq_base_dma); pr_err("rx_ring->lbq_base_indirect = %p\n", rx_ring->lbq_base_indirect); pr_err("rx_ring->lbq_base_indirect_dma = %llx\n", (unsigned long long) rx_ring->lbq_base_indirect_dma); pr_err("rx_ring->lbq = %p\n", rx_ring->lbq); pr_err("rx_ring->lbq_len = %d\n", rx_ring->lbq_len); pr_err("rx_ring->lbq_size = %d\n", rx_ring->lbq_size); pr_err("rx_ring->lbq_prod_idx_db_reg = %p\n", rx_ring->lbq_prod_idx_db_reg); pr_err("rx_ring->lbq_prod_idx = %d\n", rx_ring->lbq_prod_idx); pr_err("rx_ring->lbq_curr_idx = %d\n", rx_ring->lbq_curr_idx); pr_err("rx_ring->lbq_clean_idx = %d\n", rx_ring->lbq_clean_idx); pr_err("rx_ring->lbq_free_cnt = %d\n", rx_ring->lbq_free_cnt); pr_err("rx_ring->lbq_buf_size = %d\n", rx_ring->lbq_buf_size); pr_err("rx_ring->sbq_base = %p\n", rx_ring->sbq_base); pr_err("rx_ring->sbq_base_dma = %llx\n", (unsigned long long) rx_ring->sbq_base_dma); pr_err("rx_ring->sbq_base_indirect = %p\n", rx_ring->sbq_base_indirect); pr_err("rx_ring->sbq_base_indirect_dma = %llx\n", (unsigned long long) rx_ring->sbq_base_indirect_dma); pr_err("rx_ring->sbq = %p\n", rx_ring->sbq); pr_err("rx_ring->sbq_len = %d\n", rx_ring->sbq_len); pr_err("rx_ring->sbq_size = %d\n", rx_ring->sbq_size); pr_err("rx_ring->sbq_prod_idx_db_reg addr = %p\n", rx_ring->sbq_prod_idx_db_reg); pr_err("rx_ring->sbq_prod_idx = %d\n", rx_ring->sbq_prod_idx); pr_err("rx_ring->sbq_curr_idx = %d\n", rx_ring->sbq_curr_idx); pr_err("rx_ring->sbq_clean_idx = %d\n", rx_ring->sbq_clean_idx); pr_err("rx_ring->sbq_free_cnt = %d\n", rx_ring->sbq_free_cnt); pr_err("rx_ring->sbq_buf_size = %d\n", rx_ring->sbq_buf_size); pr_err("rx_ring->cq_id = %d\n", rx_ring->cq_id); pr_err("rx_ring->irq = %d\n", rx_ring->irq); pr_err("rx_ring->cpu = %d\n", rx_ring->cpu); pr_err("rx_ring->qdev = %p\n", rx_ring->qdev); } void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id) { void *ptr; pr_err("%s: Enter\n", __func__); ptr = kmalloc(size, GFP_ATOMIC); if (ptr == NULL) return; if (ql_write_cfg(qdev, ptr, size, bit, q_id)) { pr_err("%s: Failed to upload control block!\n", __func__); goto fail_it; } switch (bit) { case CFG_DRQ: ql_dump_wqicb((struct wqicb *)ptr); break; case CFG_DCQ: ql_dump_cqicb((struct cqicb *)ptr); break; case CFG_DR: ql_dump_ricb((struct ricb *)ptr); break; default: pr_err("%s: Invalid bit value = %x\n", __func__, bit); break; } fail_it: kfree(ptr); } #endif #ifdef QL_OB_DUMP void ql_dump_tx_desc(struct tx_buf_desc *tbd) { pr_err("tbd->addr = 0x%llx\n", le64_to_cpu((u64) tbd->addr)); pr_err("tbd->len = %d\n", le32_to_cpu(tbd->len & TX_DESC_LEN_MASK)); pr_err("tbd->flags = %s %s\n", tbd->len & TX_DESC_C ? "C" : ".", tbd->len & TX_DESC_E ? "E" : "."); tbd++; pr_err("tbd->addr = 0x%llx\n", le64_to_cpu((u64) tbd->addr)); pr_err("tbd->len = %d\n", le32_to_cpu(tbd->len & TX_DESC_LEN_MASK)); pr_err("tbd->flags = %s %s\n", tbd->len & TX_DESC_C ? "C" : ".", tbd->len & TX_DESC_E ? "E" : "."); tbd++; pr_err("tbd->addr = 0x%llx\n", le64_to_cpu((u64) tbd->addr)); pr_err("tbd->len = %d\n", le32_to_cpu(tbd->len & TX_DESC_LEN_MASK)); pr_err("tbd->flags = %s %s\n", tbd->len & TX_DESC_C ? "C" : ".", tbd->len & TX_DESC_E ? "E" : "."); } void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb) { struct ob_mac_tso_iocb_req *ob_mac_tso_iocb = (struct ob_mac_tso_iocb_req *)ob_mac_iocb; struct tx_buf_desc *tbd; u16 frame_len; pr_err("%s\n", __func__); pr_err("opcode = %s\n", (ob_mac_iocb->opcode == OPCODE_OB_MAC_IOCB) ? "MAC" : "TSO"); pr_err("flags1 = %s %s %s %s %s\n", ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_OI ? "OI" : "", ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_I ? "I" : "", ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_D ? "D" : "", ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP4 ? "IP4" : "", ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP6 ? "IP6" : ""); pr_err("flags2 = %s %s %s\n", ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_LSO ? "LSO" : "", ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_UC ? "UC" : "", ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_TC ? "TC" : ""); pr_err("flags3 = %s %s %s\n", ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_IC ? "IC" : "", ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_DFP ? "DFP" : "", ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_V ? "V" : ""); pr_err("tid = %x\n", ob_mac_iocb->tid); pr_err("txq_idx = %d\n", ob_mac_iocb->txq_idx); pr_err("vlan_tci = %x\n", ob_mac_tso_iocb->vlan_tci); if (ob_mac_iocb->opcode == OPCODE_OB_MAC_TSO_IOCB) { pr_err("frame_len = %d\n", le32_to_cpu(ob_mac_tso_iocb->frame_len)); pr_err("mss = %d\n", le16_to_cpu(ob_mac_tso_iocb->mss)); pr_err("prot_hdr_len = %d\n", le16_to_cpu(ob_mac_tso_iocb->total_hdrs_len)); pr_err("hdr_offset = 0x%.04x\n", le16_to_cpu(ob_mac_tso_iocb->net_trans_offset)); frame_len = le32_to_cpu(ob_mac_tso_iocb->frame_len); } else { pr_err("frame_len = %d\n", le16_to_cpu(ob_mac_iocb->frame_len)); frame_len = le16_to_cpu(ob_mac_iocb->frame_len); } tbd = &ob_mac_iocb->tbd[0]; ql_dump_tx_desc(tbd); } void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp) { pr_err("%s\n", __func__); pr_err("opcode = %d\n", ob_mac_rsp->opcode); pr_err("flags = %s %s %s %s %s %s %s\n", ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_OI ? "OI" : ".", ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_I ? "I" : ".", ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_E ? "E" : ".", ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_S ? "S" : ".", ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_L ? "L" : ".", ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_P ? "P" : ".", ob_mac_rsp->flags2 & OB_MAC_IOCB_RSP_B ? "B" : "."); pr_err("tid = %x\n", ob_mac_rsp->tid); } #endif #ifdef QL_IB_DUMP void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp) { pr_err("%s\n", __func__); pr_err("opcode = 0x%x\n", ib_mac_rsp->opcode); pr_err("flags1 = %s%s%s%s%s%s\n", ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_OI ? "OI " : "", ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_I ? "I " : "", ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_TE ? "TE " : "", ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_NU ? "NU " : "", ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_IE ? "IE " : "", ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_B ? "B " : ""); if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) pr_err("%s%s%s Multicast\n", (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "", (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == IB_MAC_IOCB_RSP_M_REG ? "Registered" : "", (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : ""); pr_err("flags2 = %s%s%s%s%s\n", (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) ? "P " : "", (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? "V " : "", (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) ? "U " : "", (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) ? "T " : "", (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_FO) ? "FO " : ""); if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) pr_err("%s%s%s%s%s error\n", (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) == IB_MAC_IOCB_RSP_ERR_OVERSIZE ? "oversize" : "", (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) == IB_MAC_IOCB_RSP_ERR_UNDERSIZE ? "undersize" : "", (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) == IB_MAC_IOCB_RSP_ERR_PREAMBLE ? "preamble" : "", (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) == IB_MAC_IOCB_RSP_ERR_FRAME_LEN ? "frame length" : "", (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) == IB_MAC_IOCB_RSP_ERR_CRC ? "CRC" : ""); pr_err("flags3 = %s%s\n", ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS ? "DS " : "", ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL ? "DL " : ""); if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) pr_err("RSS flags = %s%s%s%s\n", ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) == IB_MAC_IOCB_RSP_M_IPV4) ? "IPv4 RSS" : "", ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) == IB_MAC_IOCB_RSP_M_IPV6) ? "IPv6 RSS " : "", ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) == IB_MAC_IOCB_RSP_M_TCP_V4) ? "TCP/IPv4 RSS" : "", ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) == IB_MAC_IOCB_RSP_M_TCP_V6) ? "TCP/IPv6 RSS" : ""); pr_err("data_len = %d\n", le32_to_cpu(ib_mac_rsp->data_len)); pr_err("data_addr = 0x%llx\n", (unsigned long long) le64_to_cpu(ib_mac_rsp->data_addr)); if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) pr_err("rss = %x\n", le32_to_cpu(ib_mac_rsp->rss)); if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) pr_err("vlan_id = %x\n", le16_to_cpu(ib_mac_rsp->vlan_id)); pr_err("flags4 = %s%s%s\n", ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV ? "HV " : "", ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS ? "HS " : "", ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HL ? "HL " : ""); if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) { pr_err("hdr length = %d\n", le32_to_cpu(ib_mac_rsp->hdr_len)); pr_err("hdr addr = 0x%llx\n", (unsigned long long) le64_to_cpu(ib_mac_rsp->hdr_addr)); } } #endif #ifdef QL_ALL_DUMP void ql_dump_all(struct ql_adapter *qdev) { int i; QL_DUMP_REGS(qdev); QL_DUMP_QDEV(qdev); for (i = 0; i < qdev->tx_ring_count; i++) { QL_DUMP_TX_RING(&qdev->tx_ring[i]); QL_DUMP_WQICB((struct wqicb *)&qdev->tx_ring[i]); } for (i = 0; i < qdev->rx_ring_count; i++) { QL_DUMP_RX_RING(&qdev->rx_ring[i]); QL_DUMP_CQICB((struct cqicb *)&qdev->rx_ring[i]); } } #endif
gpl-2.0
ansebovi/SmartDeviL_XMD
drivers/pinctrl/pinctrl-mmp2.c
4987
39622
/* * linux/drivers/pinctrl/pinmux-mmp2.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * publishhed by the Free Software Foundation. * * Copyright (C) 2011, Marvell Technology Group Ltd. * * Author: Haojian Zhuang <haojian.zhuang@marvell.com> * */ #include <linux/device.h> #include <linux/module.h> #include <linux/io.h> #include <linux/platform_device.h> #include "pinctrl-pxa3xx.h" #define MMP2_DS_MASK 0x1800 #define MMP2_DS_SHIFT 11 #define MMP2_SLEEP_MASK 0x38 #define MMP2_SLEEP_SELECT (1 << 9) #define MMP2_SLEEP_DATA (1 << 8) #define MMP2_SLEEP_DIR (1 << 7) #define MFPR_MMP2(a, r, f0, f1, f2, f3, f4, f5, f6, f7) \ { \ .name = #a, \ .pin = a, \ .mfpr = r, \ .func = { \ MMP2_MUX_##f0, \ MMP2_MUX_##f1, \ MMP2_MUX_##f2, \ MMP2_MUX_##f3, \ MMP2_MUX_##f4, \ MMP2_MUX_##f5, \ MMP2_MUX_##f6, \ MMP2_MUX_##f7, \ }, \ } #define GRP_MMP2(a, m, p) \ { .name = a, .mux = MMP2_MUX_##m, .pins = p, .npins = ARRAY_SIZE(p), } /* 174 pins */ enum mmp2_pin_list { /* 0~168: GPIO0~GPIO168 */ TWSI4_SCL = 169, TWSI4_SDA, /* 170 */ G_CLKREQ, VCXO_REQ, VCXO_OUT, }; enum mmp2_mux { /* PXA3xx_MUX_GPIO = 0 (predefined in pinctrl-pxa3xx.h) */ MMP2_MUX_GPIO = 0, MMP2_MUX_G_CLKREQ, MMP2_MUX_VCXO_REQ, MMP2_MUX_VCXO_OUT, MMP2_MUX_KP_MK, MMP2_MUX_KP_DK, MMP2_MUX_CCIC1, MMP2_MUX_CCIC2, MMP2_MUX_SPI, MMP2_MUX_SSPA2, MMP2_MUX_ROT, MMP2_MUX_I2S, MMP2_MUX_TB, MMP2_MUX_CAM2, MMP2_MUX_HDMI, MMP2_MUX_TWSI2, MMP2_MUX_TWSI3, MMP2_MUX_TWSI4, MMP2_MUX_TWSI5, MMP2_MUX_TWSI6, MMP2_MUX_UART1, MMP2_MUX_UART2, MMP2_MUX_UART3, MMP2_MUX_UART4, MMP2_MUX_SSP1_RX, MMP2_MUX_SSP1_FRM, MMP2_MUX_SSP1_TXRX, MMP2_MUX_SSP2_RX, MMP2_MUX_SSP2_FRM, MMP2_MUX_SSP1, MMP2_MUX_SSP2, MMP2_MUX_SSP3, MMP2_MUX_SSP4, MMP2_MUX_MMC1, MMP2_MUX_MMC2, MMP2_MUX_MMC3, MMP2_MUX_MMC4, MMP2_MUX_ULPI, MMP2_MUX_AC, MMP2_MUX_CA, MMP2_MUX_PWM, MMP2_MUX_USIM, MMP2_MUX_TIPU, MMP2_MUX_PLL, MMP2_MUX_NAND, MMP2_MUX_FSIC, MMP2_MUX_SLEEP_IND, MMP2_MUX_EXT_DMA, MMP2_MUX_ONE_WIRE, MMP2_MUX_LCD, MMP2_MUX_SMC, MMP2_MUX_SMC_INT, MMP2_MUX_MSP, MMP2_MUX_G_CLKOUT, MMP2_MUX_32K_CLKOUT, MMP2_MUX_PRI_JTAG, MMP2_MUX_AAS_JTAG, MMP2_MUX_AAS_GPIO, MMP2_MUX_AAS_SPI, MMP2_MUX_AAS_TWSI, MMP2_MUX_AAS_DEU_EX, MMP2_MUX_NONE = 0xffff, }; static struct pinctrl_pin_desc mmp2_pads[] = { /* * The name indicates function 0 of this pin. * After reset, function 0 is the default function of pin. */ PINCTRL_PIN(GPIO0, "GPIO0"), PINCTRL_PIN(GPIO1, "GPIO1"), PINCTRL_PIN(GPIO2, "GPIO2"), PINCTRL_PIN(GPIO3, "GPIO3"), PINCTRL_PIN(GPIO4, "GPIO4"), PINCTRL_PIN(GPIO5, "GPIO5"), PINCTRL_PIN(GPIO6, "GPIO6"), PINCTRL_PIN(GPIO7, "GPIO7"), PINCTRL_PIN(GPIO8, "GPIO8"), PINCTRL_PIN(GPIO9, "GPIO9"), PINCTRL_PIN(GPIO10, "GPIO10"), PINCTRL_PIN(GPIO11, "GPIO11"), PINCTRL_PIN(GPIO12, "GPIO12"), PINCTRL_PIN(GPIO13, "GPIO13"), PINCTRL_PIN(GPIO14, "GPIO14"), PINCTRL_PIN(GPIO15, "GPIO15"), PINCTRL_PIN(GPIO16, "GPIO16"), PINCTRL_PIN(GPIO17, "GPIO17"), PINCTRL_PIN(GPIO18, "GPIO18"), PINCTRL_PIN(GPIO19, "GPIO19"), PINCTRL_PIN(GPIO20, "GPIO20"), PINCTRL_PIN(GPIO21, "GPIO21"), PINCTRL_PIN(GPIO22, "GPIO22"), PINCTRL_PIN(GPIO23, "GPIO23"), PINCTRL_PIN(GPIO24, "GPIO24"), PINCTRL_PIN(GPIO25, "GPIO25"), PINCTRL_PIN(GPIO26, "GPIO26"), PINCTRL_PIN(GPIO27, "GPIO27"), PINCTRL_PIN(GPIO28, "GPIO28"), PINCTRL_PIN(GPIO29, "GPIO29"), PINCTRL_PIN(GPIO30, "GPIO30"), PINCTRL_PIN(GPIO31, "GPIO31"), PINCTRL_PIN(GPIO32, "GPIO32"), PINCTRL_PIN(GPIO33, "GPIO33"), PINCTRL_PIN(GPIO34, "GPIO34"), PINCTRL_PIN(GPIO35, "GPIO35"), PINCTRL_PIN(GPIO36, "GPIO36"), PINCTRL_PIN(GPIO37, "GPIO37"), PINCTRL_PIN(GPIO38, "GPIO38"), PINCTRL_PIN(GPIO39, "GPIO39"), PINCTRL_PIN(GPIO40, "GPIO40"), PINCTRL_PIN(GPIO41, "GPIO41"), PINCTRL_PIN(GPIO42, "GPIO42"), PINCTRL_PIN(GPIO43, "GPIO43"), PINCTRL_PIN(GPIO44, "GPIO44"), PINCTRL_PIN(GPIO45, "GPIO45"), PINCTRL_PIN(GPIO46, "GPIO46"), PINCTRL_PIN(GPIO47, "GPIO47"), PINCTRL_PIN(GPIO48, "GPIO48"), PINCTRL_PIN(GPIO49, "GPIO49"), PINCTRL_PIN(GPIO50, "GPIO50"), PINCTRL_PIN(GPIO51, "GPIO51"), PINCTRL_PIN(GPIO52, "GPIO52"), PINCTRL_PIN(GPIO53, "GPIO53"), PINCTRL_PIN(GPIO54, "GPIO54"), PINCTRL_PIN(GPIO55, "GPIO55"), PINCTRL_PIN(GPIO56, "GPIO56"), PINCTRL_PIN(GPIO57, "GPIO57"), PINCTRL_PIN(GPIO58, "GPIO58"), PINCTRL_PIN(GPIO59, "GPIO59"), PINCTRL_PIN(GPIO60, "GPIO60"), PINCTRL_PIN(GPIO61, "GPIO61"), PINCTRL_PIN(GPIO62, "GPIO62"), PINCTRL_PIN(GPIO63, "GPIO63"), PINCTRL_PIN(GPIO64, "GPIO64"), PINCTRL_PIN(GPIO65, "GPIO65"), PINCTRL_PIN(GPIO66, "GPIO66"), PINCTRL_PIN(GPIO67, "GPIO67"), PINCTRL_PIN(GPIO68, "GPIO68"), PINCTRL_PIN(GPIO69, "GPIO69"), PINCTRL_PIN(GPIO70, "GPIO70"), PINCTRL_PIN(GPIO71, "GPIO71"), PINCTRL_PIN(GPIO72, "GPIO72"), PINCTRL_PIN(GPIO73, "GPIO73"), PINCTRL_PIN(GPIO74, "GPIO74"), PINCTRL_PIN(GPIO75, "GPIO75"), PINCTRL_PIN(GPIO76, "GPIO76"), PINCTRL_PIN(GPIO77, "GPIO77"), PINCTRL_PIN(GPIO78, "GPIO78"), PINCTRL_PIN(GPIO79, "GPIO79"), PINCTRL_PIN(GPIO80, "GPIO80"), PINCTRL_PIN(GPIO81, "GPIO81"), PINCTRL_PIN(GPIO82, "GPIO82"), PINCTRL_PIN(GPIO83, "GPIO83"), PINCTRL_PIN(GPIO84, "GPIO84"), PINCTRL_PIN(GPIO85, "GPIO85"), PINCTRL_PIN(GPIO86, "GPIO86"), PINCTRL_PIN(GPIO87, "GPIO87"), PINCTRL_PIN(GPIO88, "GPIO88"), PINCTRL_PIN(GPIO89, "GPIO89"), PINCTRL_PIN(GPIO90, "GPIO90"), PINCTRL_PIN(GPIO91, "GPIO91"), PINCTRL_PIN(GPIO92, "GPIO92"), PINCTRL_PIN(GPIO93, "GPIO93"), PINCTRL_PIN(GPIO94, "GPIO94"), PINCTRL_PIN(GPIO95, "GPIO95"), PINCTRL_PIN(GPIO96, "GPIO96"), PINCTRL_PIN(GPIO97, "GPIO97"), PINCTRL_PIN(GPIO98, "GPIO98"), PINCTRL_PIN(GPIO99, "GPIO99"), PINCTRL_PIN(GPIO100, "GPIO100"), PINCTRL_PIN(GPIO101, "GPIO101"), PINCTRL_PIN(GPIO102, "GPIO102"), PINCTRL_PIN(GPIO103, "GPIO103"), PINCTRL_PIN(GPIO104, "GPIO104"), PINCTRL_PIN(GPIO105, "GPIO105"), PINCTRL_PIN(GPIO106, "GPIO106"), PINCTRL_PIN(GPIO107, "GPIO107"), PINCTRL_PIN(GPIO108, "GPIO108"), PINCTRL_PIN(GPIO109, "GPIO109"), PINCTRL_PIN(GPIO110, "GPIO110"), PINCTRL_PIN(GPIO111, "GPIO111"), PINCTRL_PIN(GPIO112, "GPIO112"), PINCTRL_PIN(GPIO113, "GPIO113"), PINCTRL_PIN(GPIO114, "GPIO114"), PINCTRL_PIN(GPIO115, "GPIO115"), PINCTRL_PIN(GPIO116, "GPIO116"), PINCTRL_PIN(GPIO117, "GPIO117"), PINCTRL_PIN(GPIO118, "GPIO118"), PINCTRL_PIN(GPIO119, "GPIO119"), PINCTRL_PIN(GPIO120, "GPIO120"), PINCTRL_PIN(GPIO121, "GPIO121"), PINCTRL_PIN(GPIO122, "GPIO122"), PINCTRL_PIN(GPIO123, "GPIO123"), PINCTRL_PIN(GPIO124, "GPIO124"), PINCTRL_PIN(GPIO125, "GPIO125"), PINCTRL_PIN(GPIO126, "GPIO126"), PINCTRL_PIN(GPIO127, "GPIO127"), PINCTRL_PIN(GPIO128, "GPIO128"), PINCTRL_PIN(GPIO129, "GPIO129"), PINCTRL_PIN(GPIO130, "GPIO130"), PINCTRL_PIN(GPIO131, "GPIO131"), PINCTRL_PIN(GPIO132, "GPIO132"), PINCTRL_PIN(GPIO133, "GPIO133"), PINCTRL_PIN(GPIO134, "GPIO134"), PINCTRL_PIN(GPIO135, "GPIO135"), PINCTRL_PIN(GPIO136, "GPIO136"), PINCTRL_PIN(GPIO137, "GPIO137"), PINCTRL_PIN(GPIO138, "GPIO138"), PINCTRL_PIN(GPIO139, "GPIO139"), PINCTRL_PIN(GPIO140, "GPIO140"), PINCTRL_PIN(GPIO141, "GPIO141"), PINCTRL_PIN(GPIO142, "GPIO142"), PINCTRL_PIN(GPIO143, "GPIO143"), PINCTRL_PIN(GPIO144, "GPIO144"), PINCTRL_PIN(GPIO145, "GPIO145"), PINCTRL_PIN(GPIO146, "GPIO146"), PINCTRL_PIN(GPIO147, "GPIO147"), PINCTRL_PIN(GPIO148, "GPIO148"), PINCTRL_PIN(GPIO149, "GPIO149"), PINCTRL_PIN(GPIO150, "GPIO150"), PINCTRL_PIN(GPIO151, "GPIO151"), PINCTRL_PIN(GPIO152, "GPIO152"), PINCTRL_PIN(GPIO153, "GPIO153"), PINCTRL_PIN(GPIO154, "GPIO154"), PINCTRL_PIN(GPIO155, "GPIO155"), PINCTRL_PIN(GPIO156, "GPIO156"), PINCTRL_PIN(GPIO157, "GPIO157"), PINCTRL_PIN(GPIO158, "GPIO158"), PINCTRL_PIN(GPIO159, "GPIO159"), PINCTRL_PIN(GPIO160, "GPIO160"), PINCTRL_PIN(GPIO161, "GPIO161"), PINCTRL_PIN(GPIO162, "GPIO162"), PINCTRL_PIN(GPIO163, "GPIO163"), PINCTRL_PIN(GPIO164, "GPIO164"), PINCTRL_PIN(GPIO165, "GPIO165"), PINCTRL_PIN(GPIO166, "GPIO166"), PINCTRL_PIN(GPIO167, "GPIO167"), PINCTRL_PIN(GPIO168, "GPIO168"), PINCTRL_PIN(TWSI4_SCL, "TWSI4_SCL"), PINCTRL_PIN(TWSI4_SDA, "TWSI4_SDA"), PINCTRL_PIN(G_CLKREQ, "G_CLKREQ"), PINCTRL_PIN(VCXO_REQ, "VCXO_REQ"), PINCTRL_PIN(VCXO_OUT, "VCXO_OUT"), }; struct pxa3xx_mfp_pin mmp2_mfp[] = { /* pin offs f0 f1 f2 f3 f4 f5 f6 f7 */ MFPR_MMP2(GPIO0, 0x054, GPIO, KP_MK, NONE, SPI, NONE, NONE, NONE, NONE), MFPR_MMP2(GPIO1, 0x058, GPIO, KP_MK, NONE, SPI, NONE, NONE, NONE, NONE), MFPR_MMP2(GPIO2, 0x05C, GPIO, KP_MK, NONE, SPI, NONE, NONE, NONE, NONE), MFPR_MMP2(GPIO3, 0x060, GPIO, KP_MK, NONE, SPI, NONE, NONE, NONE, NONE), MFPR_MMP2(GPIO4, 0x064, GPIO, KP_MK, NONE, NONE, NONE, NONE, NONE, NONE), MFPR_MMP2(GPIO5, 0x068, GPIO, KP_MK, NONE, SPI, NONE, NONE, NONE, NONE), MFPR_MMP2(GPIO6, 0x06C, GPIO, KP_MK, NONE, SPI, NONE, NONE, NONE, NONE), MFPR_MMP2(GPIO7, 0x070, GPIO, KP_MK, NONE, SPI, NONE, NONE, NONE, NONE), MFPR_MMP2(GPIO8, 0x074, GPIO, KP_MK, NONE, NONE, NONE, NONE, NONE, NONE), MFPR_MMP2(GPIO9, 0x078, GPIO, KP_MK, NONE, NONE, NONE, NONE, NONE, NONE), MFPR_MMP2(GPIO10, 0x07C, GPIO, KP_MK, NONE, NONE, NONE, NONE, NONE, NONE), MFPR_MMP2(GPIO11, 0x080, GPIO, KP_MK, NONE, NONE, NONE, NONE, NONE, NONE), MFPR_MMP2(GPIO12, 0x084, GPIO, KP_MK, NONE, CCIC1, NONE, NONE, NONE, NONE), MFPR_MMP2(GPIO13, 0x088, GPIO, KP_MK, NONE, CCIC1, NONE, NONE, NONE, NONE), MFPR_MMP2(GPIO14, 0x08C, GPIO, KP_MK, NONE, CCIC1, NONE, NONE, NONE, NONE), MFPR_MMP2(GPIO15, 0x090, GPIO, KP_MK, KP_DK, CCIC1, NONE, NONE, NONE, NONE), MFPR_MMP2(GPIO16, 0x094, GPIO, KP_DK, ROT, CCIC1, NONE, NONE, NONE, NONE), MFPR_MMP2(GPIO17, 0x098, GPIO, KP_DK, ROT, CCIC1, NONE, NONE, NONE, NONE), MFPR_MMP2(GPIO18, 0x09C, GPIO, KP_DK, ROT, CCIC1, NONE, NONE, NONE, NONE), MFPR_MMP2(GPIO19, 0x0A0, GPIO, KP_DK, ROT, CCIC1, NONE, NONE, NONE, NONE), MFPR_MMP2(GPIO20, 0x0A4, GPIO, KP_DK, TB, CCIC1, NONE, NONE, NONE, NONE), MFPR_MMP2(GPIO21, 0x0A8, GPIO, KP_DK, TB, CCIC1, NONE, NONE, NONE, NONE), MFPR_MMP2(GPIO22, 0x0AC, GPIO, KP_DK, TB, CCIC1, NONE, NONE, NONE, NONE), MFPR_MMP2(GPIO23, 0x0B0, GPIO, KP_DK, TB, CCIC1, NONE, NONE, NONE, NONE), MFPR_MMP2(GPIO24, 0x0B4, GPIO, I2S, VCXO_OUT, NONE, NONE, NONE, NONE, NONE), MFPR_MMP2(GPIO25, 0x0B8, GPIO, I2S, HDMI, SSPA2, NONE, NONE, NONE, NONE), MFPR_MMP2(GPIO26, 0x0BC, GPIO, I2S, HDMI, SSPA2, NONE, NONE, NONE, NONE), MFPR_MMP2(GPIO27, 0x0C0, GPIO, I2S, HDMI, SSPA2, NONE, NONE, NONE, NONE), MFPR_MMP2(GPIO28, 0x0C4, GPIO, I2S, NONE, SSPA2, NONE, NONE, NONE, NONE), MFPR_MMP2(GPIO29, 0x0C8, GPIO, UART1, KP_MK, NONE, NONE, NONE, AAS_SPI, NONE), MFPR_MMP2(GPIO30, 0x0CC, GPIO, UART1, KP_MK, NONE, NONE, NONE, AAS_SPI, NONE), MFPR_MMP2(GPIO31, 0x0D0, GPIO, UART1, KP_MK, NONE, NONE, NONE, AAS_SPI, NONE), MFPR_MMP2(GPIO32, 0x0D4, GPIO, UART1, KP_MK, NONE, NONE, NONE, AAS_SPI, NONE), MFPR_MMP2(GPIO33, 0x0D8, GPIO, SSPA2, I2S, NONE, NONE, NONE, NONE, NONE), MFPR_MMP2(GPIO34, 0x0DC, GPIO, SSPA2, I2S, NONE, NONE, NONE, NONE, NONE), MFPR_MMP2(GPIO35, 0x0E0, GPIO, SSPA2, I2S, NONE, NONE, NONE, NONE, NONE), MFPR_MMP2(GPIO36, 0x0E4, GPIO, SSPA2, I2S, NONE, NONE, NONE, NONE, NONE), MFPR_MMP2(GPIO37, 0x0E8, GPIO, MMC2, SSP1, TWSI2, UART2, UART3, AAS_SPI, AAS_TWSI), MFPR_MMP2(GPIO38, 0x0EC, GPIO, MMC2, SSP1, TWSI2, UART2, UART3, AAS_SPI, AAS_TWSI), MFPR_MMP2(GPIO39, 0x0F0, GPIO, MMC2, SSP1, TWSI2, UART2, UART3, AAS_SPI, AAS_TWSI), MFPR_MMP2(GPIO40, 0x0F4, GPIO, MMC2, SSP1, TWSI2, UART2, UART3, AAS_SPI, AAS_TWSI), MFPR_MMP2(GPIO41, 0x0F8, GPIO, MMC2, TWSI5, NONE, NONE, NONE, NONE, NONE), MFPR_MMP2(GPIO42, 0x0FC, GPIO, MMC2, TWSI5, NONE, NONE, NONE, NONE, NONE), MFPR_MMP2(GPIO43, 0x100, GPIO, TWSI2, UART4, SSP1, UART2, UART3, NONE, AAS_TWSI), MFPR_MMP2(GPIO44, 0x104, GPIO, TWSI2, UART4, SSP1, UART2, UART3, NONE, AAS_TWSI), MFPR_MMP2(GPIO45, 0x108, GPIO, UART1, UART4, SSP1, UART2, UART3, NONE, NONE), MFPR_MMP2(GPIO46, 0x10C, GPIO, UART1, UART4, SSP1, UART2, UART3, NONE, NONE), MFPR_MMP2(GPIO47, 0x110, GPIO, UART2, SSP2, TWSI6, CAM2, AAS_SPI, AAS_GPIO, NONE), MFPR_MMP2(GPIO48, 0x114, GPIO, UART2, SSP2, TWSI6, CAM2, AAS_SPI, AAS_GPIO, NONE), MFPR_MMP2(GPIO49, 0x118, GPIO, UART2, SSP2, PWM, CCIC2, AAS_SPI, NONE, NONE), MFPR_MMP2(GPIO50, 0x11C, GPIO, UART2, SSP2, PWM, CCIC2, AAS_SPI, NONE, NONE), MFPR_MMP2(GPIO51, 0x120, GPIO, UART3, ROT, AAS_GPIO, PWM, NONE, NONE, NONE), MFPR_MMP2(GPIO52, 0x124, GPIO, UART3, ROT, AAS_GPIO, PWM, NONE, NONE, NONE), MFPR_MMP2(GPIO53, 0x128, GPIO, UART3, TWSI2, VCXO_REQ, NONE, PWM, NONE, AAS_TWSI), MFPR_MMP2(GPIO54, 0x12C, GPIO, UART3, TWSI2, VCXO_OUT, HDMI, PWM, NONE, AAS_TWSI), MFPR_MMP2(GPIO55, 0x130, GPIO, SSP2, SSP1, UART2, ROT, TWSI2, SSP3, AAS_TWSI), MFPR_MMP2(GPIO56, 0x134, GPIO, SSP2, SSP1, UART2, ROT, TWSI2, KP_DK, AAS_TWSI), MFPR_MMP2(GPIO57, 0x138, GPIO, SSP2_RX, SSP1_TXRX, SSP2_FRM, SSP1_RX, VCXO_REQ, KP_DK, NONE), MFPR_MMP2(GPIO58, 0x13C, GPIO, SSP2, SSP1_RX, SSP1_FRM, SSP1_TXRX, VCXO_REQ, KP_DK, NONE), MFPR_MMP2(GPIO59, 0x280, GPIO, CCIC1, ULPI, MMC3, CCIC2, UART3, UART4, NONE), MFPR_MMP2(GPIO60, 0x284, GPIO, CCIC1, ULPI, MMC3, CCIC2, UART3, UART4, NONE), MFPR_MMP2(GPIO61, 0x288, GPIO, CCIC1, ULPI, MMC3, CCIC2, UART3, HDMI, NONE), MFPR_MMP2(GPIO62, 0x28C, GPIO, CCIC1, ULPI, MMC3, CCIC2, UART3, NONE, NONE), MFPR_MMP2(GPIO63, 0x290, GPIO, CCIC1, ULPI, MMC3, CCIC2, MSP, UART4, NONE), MFPR_MMP2(GPIO64, 0x294, GPIO, CCIC1, ULPI, MMC3, CCIC2, MSP, UART4, NONE), MFPR_MMP2(GPIO65, 0x298, GPIO, CCIC1, ULPI, MMC3, CCIC2, MSP, UART4, NONE), MFPR_MMP2(GPIO66, 0x29C, GPIO, CCIC1, ULPI, MMC3, CCIC2, MSP, UART4, NONE), MFPR_MMP2(GPIO67, 0x2A0, GPIO, CCIC1, ULPI, MMC3, CCIC2, MSP, NONE, NONE), MFPR_MMP2(GPIO68, 0x2A4, GPIO, CCIC1, ULPI, MMC3, CCIC2, MSP, LCD, NONE), MFPR_MMP2(GPIO69, 0x2A8, GPIO, CCIC1, ULPI, MMC3, CCIC2, NONE, LCD, NONE), MFPR_MMP2(GPIO70, 0x2AC, GPIO, CCIC1, ULPI, MMC3, CCIC2, MSP, LCD, NONE), MFPR_MMP2(GPIO71, 0x2B0, GPIO, TWSI3, NONE, PWM, NONE, NONE, LCD, AAS_TWSI), MFPR_MMP2(GPIO72, 0x2B4, GPIO, TWSI3, HDMI, PWM, NONE, NONE, LCD, AAS_TWSI), MFPR_MMP2(GPIO73, 0x2B8, GPIO, VCXO_REQ, 32K_CLKOUT, PWM, VCXO_OUT, NONE, LCD, NONE), MFPR_MMP2(GPIO74, 0x170, GPIO, LCD, SMC, MMC4, SSP3, UART2, UART4, TIPU), MFPR_MMP2(GPIO75, 0x174, GPIO, LCD, SMC, MMC4, SSP3, UART2, UART4, TIPU), MFPR_MMP2(GPIO76, 0x178, GPIO, LCD, SMC, MMC4, SSP3, UART2, UART4, TIPU), MFPR_MMP2(GPIO77, 0x17C, GPIO, LCD, SMC, MMC4, SSP3, UART2, UART4, TIPU), MFPR_MMP2(GPIO78, 0x180, GPIO, LCD, HDMI, MMC4, NONE, SSP4, AAS_SPI, TIPU), MFPR_MMP2(GPIO79, 0x184, GPIO, LCD, AAS_GPIO, MMC4, NONE, SSP4, AAS_SPI, TIPU), MFPR_MMP2(GPIO80, 0x188, GPIO, LCD, AAS_GPIO, MMC4, NONE, SSP4, AAS_SPI, TIPU), MFPR_MMP2(GPIO81, 0x18C, GPIO, LCD, AAS_GPIO, MMC4, NONE, SSP4, AAS_SPI, TIPU), MFPR_MMP2(GPIO82, 0x190, GPIO, LCD, NONE, MMC4, NONE, NONE, CCIC2, TIPU), MFPR_MMP2(GPIO83, 0x194, GPIO, LCD, NONE, MMC4, NONE, NONE, CCIC2, TIPU), MFPR_MMP2(GPIO84, 0x198, GPIO, LCD, SMC, MMC2, NONE, TWSI5, AAS_TWSI, TIPU), MFPR_MMP2(GPIO85, 0x19C, GPIO, LCD, SMC, MMC2, NONE, TWSI5, AAS_TWSI, TIPU), MFPR_MMP2(GPIO86, 0x1A0, GPIO, LCD, SMC, MMC2, NONE, TWSI6, CCIC2, TIPU), MFPR_MMP2(GPIO87, 0x1A4, GPIO, LCD, SMC, MMC2, NONE, TWSI6, CCIC2, TIPU), MFPR_MMP2(GPIO88, 0x1A8, GPIO, LCD, AAS_GPIO, MMC2, NONE, NONE, CCIC2, TIPU), MFPR_MMP2(GPIO89, 0x1AC, GPIO, LCD, AAS_GPIO, MMC2, NONE, NONE, CCIC2, TIPU), MFPR_MMP2(GPIO90, 0x1B0, GPIO, LCD, AAS_GPIO, MMC2, NONE, NONE, CCIC2, TIPU), MFPR_MMP2(GPIO91, 0x1B4, GPIO, LCD, AAS_GPIO, MMC2, NONE, NONE, CCIC2, TIPU), MFPR_MMP2(GPIO92, 0x1B8, GPIO, LCD, AAS_GPIO, MMC2, NONE, NONE, CCIC2, TIPU), MFPR_MMP2(GPIO93, 0x1BC, GPIO, LCD, AAS_GPIO, MMC2, NONE, NONE, CCIC2, TIPU), MFPR_MMP2(GPIO94, 0x1C0, GPIO, LCD, AAS_GPIO, SPI, NONE, AAS_SPI, CCIC2, TIPU), MFPR_MMP2(GPIO95, 0x1C4, GPIO, LCD, TWSI3, SPI, AAS_DEU_EX, AAS_SPI, CCIC2, TIPU), MFPR_MMP2(GPIO96, 0x1C8, GPIO, LCD, TWSI3, SPI, AAS_DEU_EX, AAS_SPI, NONE, TIPU), MFPR_MMP2(GPIO97, 0x1CC, GPIO, LCD, TWSI6, SPI, AAS_DEU_EX, AAS_SPI, NONE, TIPU), MFPR_MMP2(GPIO98, 0x1D0, GPIO, LCD, TWSI6, SPI, ONE_WIRE, NONE, NONE, TIPU), MFPR_MMP2(GPIO99, 0x1D4, GPIO, LCD, SMC, SPI, TWSI5, NONE, NONE, TIPU), MFPR_MMP2(GPIO100, 0x1D8, GPIO, LCD, SMC, SPI, TWSI5, NONE, NONE, TIPU), MFPR_MMP2(GPIO101, 0x1DC, GPIO, LCD, SMC, SPI, NONE, NONE, NONE, TIPU), MFPR_MMP2(GPIO102, 0x000, USIM, GPIO, FSIC, KP_DK, LCD, NONE, NONE, NONE), MFPR_MMP2(GPIO103, 0x004, USIM, GPIO, FSIC, KP_DK, LCD, NONE, NONE, NONE), MFPR_MMP2(GPIO104, 0x1FC, NAND, GPIO, NONE, NONE, NAND, NONE, NONE, NONE), MFPR_MMP2(GPIO105, 0x1F8, NAND, GPIO, NONE, NONE, NAND, NONE, NONE, NONE), MFPR_MMP2(GPIO106, 0x1F4, NAND, GPIO, NONE, NONE, NAND, NONE, NONE, NONE), MFPR_MMP2(GPIO107, 0x1F0, NAND, GPIO, NONE, NONE, NAND, NONE, NONE, NONE), MFPR_MMP2(GPIO108, 0x21C, NAND, GPIO, NONE, NONE, NAND, NONE, NONE, NONE), MFPR_MMP2(GPIO109, 0x218, NAND, GPIO, NONE, NONE, NAND, NONE, NONE, NONE), MFPR_MMP2(GPIO110, 0x214, NAND, GPIO, NONE, NONE, NAND, NONE, NONE, NONE), MFPR_MMP2(GPIO111, 0x200, NAND, GPIO, MMC3, NONE, NONE, NONE, NONE, NONE), MFPR_MMP2(GPIO112, 0x244, NAND, GPIO, MMC3, SMC, NONE, NONE, NONE, NONE), MFPR_MMP2(GPIO113, 0x25C, SMC, GPIO, EXT_DMA, MMC3, SMC, HDMI, NONE, NONE), MFPR_MMP2(GPIO114, 0x164, G_CLKOUT, 32K_CLKOUT, HDMI, NONE, NONE, NONE, NONE, NONE), MFPR_MMP2(GPIO115, 0x260, GPIO, NONE, AC, UART4, UART3, SSP1, NONE, NONE), MFPR_MMP2(GPIO116, 0x264, GPIO, NONE, AC, UART4, UART3, SSP1, NONE, NONE), MFPR_MMP2(GPIO117, 0x268, GPIO, NONE, AC, UART4, UART3, SSP1, NONE, NONE), MFPR_MMP2(GPIO118, 0x26C, GPIO, NONE, AC, UART4, UART3, SSP1, NONE, NONE), MFPR_MMP2(GPIO119, 0x270, GPIO, NONE, CA, SSP3, NONE, NONE, NONE, NONE), MFPR_MMP2(GPIO120, 0x274, GPIO, NONE, CA, SSP3, NONE, NONE, NONE, NONE), MFPR_MMP2(GPIO121, 0x278, GPIO, NONE, CA, SSP3, NONE, NONE, NONE, NONE), MFPR_MMP2(GPIO122, 0x27C, GPIO, NONE, CA, SSP3, NONE, NONE, NONE, NONE), MFPR_MMP2(GPIO123, 0x148, GPIO, SLEEP_IND, ONE_WIRE, 32K_CLKOUT, NONE, NONE, NONE, NONE), MFPR_MMP2(GPIO124, 0x00C, GPIO, MMC1, LCD, MMC3, NAND, NONE, NONE, NONE), MFPR_MMP2(GPIO125, 0x010, GPIO, MMC1, LCD, MMC3, NAND, NONE, NONE, NONE), MFPR_MMP2(GPIO126, 0x014, GPIO, MMC1, LCD, MMC3, NAND, NONE, NONE, NONE), MFPR_MMP2(GPIO127, 0x018, GPIO, NONE, LCD, MMC3, NAND, NONE, NONE, NONE), MFPR_MMP2(GPIO128, 0x01C, GPIO, NONE, LCD, MMC3, NAND, NONE, NONE, NONE), MFPR_MMP2(GPIO129, 0x020, GPIO, MMC1, LCD, MMC3, NAND, NONE, NONE, NONE), MFPR_MMP2(GPIO130, 0x024, GPIO, MMC1, LCD, MMC3, NAND, NONE, NONE, NONE), MFPR_MMP2(GPIO131, 0x028, GPIO, MMC1, NONE, MSP, NONE, NONE, NONE, NONE), MFPR_MMP2(GPIO132, 0x02C, GPIO, MMC1, PRI_JTAG, MSP, SSP3, AAS_JTAG, NONE, NONE), MFPR_MMP2(GPIO133, 0x030, GPIO, MMC1, PRI_JTAG, MSP, SSP3, AAS_JTAG, NONE, NONE), MFPR_MMP2(GPIO134, 0x034, GPIO, MMC1, PRI_JTAG, MSP, SSP3, AAS_JTAG, NONE, NONE), MFPR_MMP2(GPIO135, 0x038, GPIO, NONE, LCD, MMC3, NAND, NONE, NONE, NONE), MFPR_MMP2(GPIO136, 0x03C, GPIO, MMC1, PRI_JTAG, MSP, SSP3, AAS_JTAG, NONE, NONE), MFPR_MMP2(GPIO137, 0x040, GPIO, HDMI, LCD, MSP, NONE, NONE, NONE, NONE), MFPR_MMP2(GPIO138, 0x044, GPIO, NONE, LCD, MMC3, SMC, NONE, NONE, NONE), MFPR_MMP2(GPIO139, 0x048, GPIO, MMC1, PRI_JTAG, MSP, NONE, AAS_JTAG, NONE, NONE), MFPR_MMP2(GPIO140, 0x04C, GPIO, MMC1, LCD, NONE, NONE, UART2, UART1, NONE), MFPR_MMP2(GPIO141, 0x050, GPIO, MMC1, LCD, NONE, NONE, UART2, UART1, NONE), MFPR_MMP2(GPIO142, 0x008, USIM, GPIO, FSIC, KP_DK, NONE, NONE, NONE, NONE), MFPR_MMP2(GPIO143, 0x220, NAND, GPIO, SMC, NONE, NAND, NONE, NONE, NONE), MFPR_MMP2(GPIO144, 0x224, NAND, GPIO, SMC_INT, SMC, NAND, NONE, NONE, NONE), MFPR_MMP2(GPIO145, 0x228, SMC, GPIO, NONE, NONE, SMC, NONE, NONE, NONE), MFPR_MMP2(GPIO146, 0x22C, SMC, GPIO, NONE, NONE, SMC, NONE, NONE, NONE), MFPR_MMP2(GPIO147, 0x230, NAND, GPIO, NONE, NONE, NAND, NONE, NONE, NONE), MFPR_MMP2(GPIO148, 0x234, NAND, GPIO, NONE, NONE, NAND, NONE, NONE, NONE), MFPR_MMP2(GPIO149, 0x238, NAND, GPIO, NONE, NONE, NONE, NONE, NONE, NONE), MFPR_MMP2(GPIO150, 0x23C, NAND, GPIO, NONE, NONE, NAND, NONE, NONE, NONE), MFPR_MMP2(GPIO151, 0x240, SMC, GPIO, MMC3, NONE, NONE, NONE, NONE, NONE), MFPR_MMP2(GPIO152, 0x248, SMC, GPIO, NONE, NONE, SMC, NONE, NONE, NONE), MFPR_MMP2(GPIO153, 0x24C, SMC, GPIO, NONE, NONE, SMC, NONE, NONE, NONE), MFPR_MMP2(GPIO154, 0x254, SMC_INT, GPIO, SMC, NONE, NAND, NONE, NONE, NONE), MFPR_MMP2(GPIO155, 0x258, EXT_DMA, GPIO, SMC, NONE, EXT_DMA, NONE, NONE, NONE), MFPR_MMP2(GPIO156, 0x14C, PRI_JTAG, GPIO, PWM, NONE, NONE, NONE, NONE, NONE), MFPR_MMP2(GPIO157, 0x150, PRI_JTAG, GPIO, PWM, NONE, NONE, NONE, NONE, NONE), MFPR_MMP2(GPIO158, 0x154, PRI_JTAG, GPIO, PWM, NONE, NONE, NONE, NONE, NONE), MFPR_MMP2(GPIO159, 0x158, PRI_JTAG, GPIO, PWM, NONE, NONE, NONE, NONE, NONE), MFPR_MMP2(GPIO160, 0x250, NAND, GPIO, SMC, NONE, NAND, NONE, NONE, NONE), MFPR_MMP2(GPIO161, 0x210, NAND, GPIO, NONE, NONE, NAND, NONE, NONE, NONE), MFPR_MMP2(GPIO162, 0x20C, NAND, GPIO, MMC3, NONE, NONE, NONE, NONE, NONE), MFPR_MMP2(GPIO163, 0x208, NAND, GPIO, MMC3, NONE, NONE, NONE, NONE, NONE), MFPR_MMP2(GPIO164, 0x204, NAND, GPIO, MMC3, NONE, NONE, NONE, NONE, NONE), MFPR_MMP2(GPIO165, 0x1EC, NAND, GPIO, MMC3, NONE, NONE, NONE, NONE, NONE), MFPR_MMP2(GPIO166, 0x1E8, NAND, GPIO, MMC3, NONE, NONE, NONE, NONE, NONE), MFPR_MMP2(GPIO167, 0x1E4, NAND, GPIO, MMC3, NONE, NONE, NONE, NONE, NONE), MFPR_MMP2(GPIO168, 0x1E0, NAND, GPIO, MMC3, NONE, NONE, NONE, NONE, NONE), MFPR_MMP2(TWSI4_SCL, 0x2BC, TWSI4, LCD, NONE, NONE, NONE, NONE, NONE, NONE), MFPR_MMP2(TWSI4_SDA, 0x2C0, TWSI4, LCD, NONE, NONE, NONE, NONE, NONE, NONE), MFPR_MMP2(G_CLKREQ, 0x160, G_CLKREQ, ONE_WIRE, NONE, NONE, NONE, NONE, NONE, NONE), MFPR_MMP2(VCXO_REQ, 0x168, VCXO_REQ, ONE_WIRE, PLL, NONE, NONE, NONE, NONE, NONE), MFPR_MMP2(VCXO_OUT, 0x16C, VCXO_OUT, 32K_CLKOUT, NONE, NONE, NONE, NONE, NONE, NONE), }; static const unsigned mmp2_uart1_pin1[] = {GPIO29, GPIO30, GPIO31, GPIO32}; static const unsigned mmp2_uart1_pin2[] = {GPIO45, GPIO46}; static const unsigned mmp2_uart1_pin3[] = {GPIO140, GPIO141}; static const unsigned mmp2_uart2_pin1[] = {GPIO37, GPIO38, GPIO39, GPIO40}; static const unsigned mmp2_uart2_pin2[] = {GPIO43, GPIO44, GPIO45, GPIO46}; static const unsigned mmp2_uart2_pin3[] = {GPIO47, GPIO48, GPIO49, GPIO50}; static const unsigned mmp2_uart2_pin4[] = {GPIO74, GPIO75, GPIO76, GPIO77}; static const unsigned mmp2_uart2_pin5[] = {GPIO55, GPIO56}; static const unsigned mmp2_uart2_pin6[] = {GPIO140, GPIO141}; static const unsigned mmp2_uart3_pin1[] = {GPIO37, GPIO38, GPIO39, GPIO40}; static const unsigned mmp2_uart3_pin2[] = {GPIO43, GPIO44, GPIO45, GPIO46}; static const unsigned mmp2_uart3_pin3[] = {GPIO51, GPIO52, GPIO53, GPIO54}; static const unsigned mmp2_uart3_pin4[] = {GPIO59, GPIO60, GPIO61, GPIO62}; static const unsigned mmp2_uart3_pin5[] = {GPIO115, GPIO116, GPIO117, GPIO118}; static const unsigned mmp2_uart3_pin6[] = {GPIO51, GPIO52}; static const unsigned mmp2_uart4_pin1[] = {GPIO43, GPIO44, GPIO45, GPIO46}; static const unsigned mmp2_uart4_pin2[] = {GPIO63, GPIO64, GPIO65, GPIO66}; static const unsigned mmp2_uart4_pin3[] = {GPIO74, GPIO75, GPIO76, GPIO77}; static const unsigned mmp2_uart4_pin4[] = {GPIO115, GPIO116, GPIO117, GPIO118}; static const unsigned mmp2_uart4_pin5[] = {GPIO59, GPIO60}; static const unsigned mmp2_kpdk_pin1[] = {GPIO16, GPIO17, GPIO18, GPIO19}; static const unsigned mmp2_kpdk_pin2[] = {GPIO16, GPIO17}; static const unsigned mmp2_twsi2_pin1[] = {GPIO37, GPIO38}; static const unsigned mmp2_twsi2_pin2[] = {GPIO39, GPIO40}; static const unsigned mmp2_twsi2_pin3[] = {GPIO43, GPIO44}; static const unsigned mmp2_twsi2_pin4[] = {GPIO53, GPIO54}; static const unsigned mmp2_twsi2_pin5[] = {GPIO55, GPIO56}; static const unsigned mmp2_twsi3_pin1[] = {GPIO71, GPIO72}; static const unsigned mmp2_twsi3_pin2[] = {GPIO95, GPIO96}; static const unsigned mmp2_twsi4_pin1[] = {TWSI4_SCL, TWSI4_SDA}; static const unsigned mmp2_twsi5_pin1[] = {GPIO41, GPIO42}; static const unsigned mmp2_twsi5_pin2[] = {GPIO84, GPIO85}; static const unsigned mmp2_twsi5_pin3[] = {GPIO99, GPIO100}; static const unsigned mmp2_twsi6_pin1[] = {GPIO47, GPIO48}; static const unsigned mmp2_twsi6_pin2[] = {GPIO86, GPIO87}; static const unsigned mmp2_twsi6_pin3[] = {GPIO97, GPIO98}; static const unsigned mmp2_ccic1_pin1[] = {GPIO12, GPIO13, GPIO14, GPIO15, GPIO16, GPIO17, GPIO18, GPIO19, GPIO20, GPIO21, GPIO22, GPIO23}; static const unsigned mmp2_ccic1_pin2[] = {GPIO59, GPIO60, GPIO61, GPIO62, GPIO63, GPIO64, GPIO65, GPIO66, GPIO67, GPIO68, GPIO69, GPIO70}; static const unsigned mmp2_ccic2_pin1[] = {GPIO59, GPIO60, GPIO61, GPIO62, GPIO63, GPIO64, GPIO65, GPIO66, GPIO67, GPIO68, GPIO69, GPIO70}; static const unsigned mmp2_ccic2_pin2[] = {GPIO82, GPIO83, GPIO86, GPIO87, GPIO88, GPIO89, GPIO90, GPIO91, GPIO92, GPIO93, GPIO94, GPIO95}; static const unsigned mmp2_ulpi_pin1[] = {GPIO59, GPIO60, GPIO61, GPIO62, GPIO63, GPIO64, GPIO65, GPIO66, GPIO67, GPIO68, GPIO69, GPIO70}; static const unsigned mmp2_ro_pin1[] = {GPIO16, GPIO17}; static const unsigned mmp2_ro_pin2[] = {GPIO18, GPIO19}; static const unsigned mmp2_ro_pin3[] = {GPIO51, GPIO52}; static const unsigned mmp2_ro_pin4[] = {GPIO55, GPIO56}; static const unsigned mmp2_i2s_pin1[] = {GPIO24, GPIO25, GPIO26, GPIO27, GPIO28}; static const unsigned mmp2_i2s_pin2[] = {GPIO33, GPIO34, GPIO35, GPIO36}; static const unsigned mmp2_ssp1_pin1[] = {GPIO37, GPIO38, GPIO39, GPIO40}; static const unsigned mmp2_ssp1_pin2[] = {GPIO43, GPIO44, GPIO45, GPIO46}; static const unsigned mmp2_ssp1_pin3[] = {GPIO115, GPIO116, GPIO117, GPIO118}; static const unsigned mmp2_ssp2_pin1[] = {GPIO47, GPIO48, GPIO49, GPIO50}; static const unsigned mmp2_ssp3_pin1[] = {GPIO119, GPIO120, GPIO121, GPIO122}; static const unsigned mmp2_ssp3_pin2[] = {GPIO132, GPIO133, GPIO133, GPIO136}; static const unsigned mmp2_sspa2_pin1[] = {GPIO25, GPIO26, GPIO27, GPIO28}; static const unsigned mmp2_sspa2_pin2[] = {GPIO33, GPIO34, GPIO35, GPIO36}; static const unsigned mmp2_mmc1_pin1[] = {GPIO131, GPIO132, GPIO133, GPIO134, GPIO136, GPIO139, GPIO140, GPIO141}; static const unsigned mmp2_mmc2_pin1[] = {GPIO37, GPIO38, GPIO39, GPIO40, GPIO41, GPIO42}; static const unsigned mmp2_mmc3_pin1[] = {GPIO111, GPIO112, GPIO151, GPIO162, GPIO163, GPIO164, GPIO165, GPIO166, GPIO167, GPIO168}; static struct pxa3xx_pin_group mmp2_grps[] = { GRP_MMP2("uart1 4p1", UART1, mmp2_uart1_pin1), GRP_MMP2("uart1 2p2", UART1, mmp2_uart1_pin2), GRP_MMP2("uart1 2p3", UART1, mmp2_uart1_pin3), GRP_MMP2("uart2 4p1", UART2, mmp2_uart2_pin1), GRP_MMP2("uart2 4p2", UART2, mmp2_uart2_pin2), GRP_MMP2("uart2 4p3", UART2, mmp2_uart2_pin3), GRP_MMP2("uart2 4p4", UART2, mmp2_uart2_pin4), GRP_MMP2("uart2 2p5", UART2, mmp2_uart2_pin5), GRP_MMP2("uart2 2p6", UART2, mmp2_uart2_pin6), GRP_MMP2("uart3 4p1", UART3, mmp2_uart3_pin1), GRP_MMP2("uart3 4p2", UART3, mmp2_uart3_pin2), GRP_MMP2("uart3 4p3", UART3, mmp2_uart3_pin3), GRP_MMP2("uart3 4p4", UART3, mmp2_uart3_pin4), GRP_MMP2("uart3 4p5", UART3, mmp2_uart3_pin5), GRP_MMP2("uart3 2p6", UART3, mmp2_uart3_pin6), GRP_MMP2("uart4 4p1", UART4, mmp2_uart4_pin1), GRP_MMP2("uart4 4p2", UART4, mmp2_uart4_pin2), GRP_MMP2("uart4 4p3", UART4, mmp2_uart4_pin3), GRP_MMP2("uart4 4p4", UART4, mmp2_uart4_pin4), GRP_MMP2("uart4 2p5", UART4, mmp2_uart4_pin5), GRP_MMP2("kpdk 4p1", KP_DK, mmp2_kpdk_pin1), GRP_MMP2("kpdk 4p2", KP_DK, mmp2_kpdk_pin2), GRP_MMP2("twsi2-1", TWSI2, mmp2_twsi2_pin1), GRP_MMP2("twsi2-2", TWSI2, mmp2_twsi2_pin2), GRP_MMP2("twsi2-3", TWSI2, mmp2_twsi2_pin3), GRP_MMP2("twsi2-4", TWSI2, mmp2_twsi2_pin4), GRP_MMP2("twsi2-5", TWSI2, mmp2_twsi2_pin5), GRP_MMP2("twsi3-1", TWSI3, mmp2_twsi3_pin1), GRP_MMP2("twsi3-2", TWSI3, mmp2_twsi3_pin2), GRP_MMP2("twsi4", TWSI4, mmp2_twsi4_pin1), GRP_MMP2("twsi5-1", TWSI5, mmp2_twsi5_pin1), GRP_MMP2("twsi5-2", TWSI5, mmp2_twsi5_pin2), GRP_MMP2("twsi5-3", TWSI5, mmp2_twsi5_pin3), GRP_MMP2("twsi6-1", TWSI6, mmp2_twsi6_pin1), GRP_MMP2("twsi6-2", TWSI6, mmp2_twsi6_pin2), GRP_MMP2("twsi6-3", TWSI6, mmp2_twsi6_pin3), GRP_MMP2("ccic1-1", CCIC1, mmp2_ccic1_pin1), GRP_MMP2("ccic1-2", CCIC1, mmp2_ccic1_pin2), GRP_MMP2("ccic2-1", CCIC2, mmp2_ccic2_pin1), GRP_MMP2("ccic2-1", CCIC2, mmp2_ccic2_pin2), GRP_MMP2("ulpi", ULPI, mmp2_ulpi_pin1), GRP_MMP2("ro-1", ROT, mmp2_ro_pin1), GRP_MMP2("ro-2", ROT, mmp2_ro_pin2), GRP_MMP2("ro-3", ROT, mmp2_ro_pin3), GRP_MMP2("ro-4", ROT, mmp2_ro_pin4), GRP_MMP2("i2s 5p1", I2S, mmp2_i2s_pin1), GRP_MMP2("i2s 4p2", I2S, mmp2_i2s_pin2), GRP_MMP2("ssp1 4p1", SSP1, mmp2_ssp1_pin1), GRP_MMP2("ssp1 4p2", SSP1, mmp2_ssp1_pin2), GRP_MMP2("ssp1 4p3", SSP1, mmp2_ssp1_pin3), GRP_MMP2("ssp2 4p1", SSP2, mmp2_ssp2_pin1), GRP_MMP2("ssp3 4p1", SSP3, mmp2_ssp3_pin1), GRP_MMP2("ssp3 4p2", SSP3, mmp2_ssp3_pin2), GRP_MMP2("sspa2 4p1", SSPA2, mmp2_sspa2_pin1), GRP_MMP2("sspa2 4p2", SSPA2, mmp2_sspa2_pin2), GRP_MMP2("mmc1 8p1", MMC1, mmp2_mmc1_pin1), GRP_MMP2("mmc2 6p1", MMC2, mmp2_mmc2_pin1), GRP_MMP2("mmc3 10p1", MMC3, mmp2_mmc3_pin1), }; static const char * const mmp2_uart1_grps[] = {"uart1 4p1", "uart1 2p2", "uart1 2p3"}; static const char * const mmp2_uart2_grps[] = {"uart2 4p1", "uart2 4p2", "uart2 4p3", "uart2 4p4", "uart2 4p5", "uart2 4p6"}; static const char * const mmp2_uart3_grps[] = {"uart3 4p1", "uart3 4p2", "uart3 4p3", "uart3 4p4", "uart3 4p5", "uart3 2p6"}; static const char * const mmp2_uart4_grps[] = {"uart4 4p1", "uart4 4p2", "uart4 4p3", "uart4 4p4", "uart4 2p5"}; static const char * const mmp2_kpdk_grps[] = {"kpdk 4p1", "kpdk 4p2"}; static const char * const mmp2_twsi2_grps[] = {"twsi2-1", "twsi2-2", "twsi2-3", "twsi2-4", "twsi2-5"}; static const char * const mmp2_twsi3_grps[] = {"twsi3-1", "twsi3-2"}; static const char * const mmp2_twsi4_grps[] = {"twsi4"}; static const char * const mmp2_twsi5_grps[] = {"twsi5-1", "twsi5-2", "twsi5-3"}; static const char * const mmp2_twsi6_grps[] = {"twsi6-1", "twsi6-2", "twsi6-3"}; static const char * const mmp2_ccic1_grps[] = {"ccic1-1", "ccic1-2"}; static const char * const mmp2_ccic2_grps[] = {"ccic2-1", "ccic2-2"}; static const char * const mmp2_ulpi_grps[] = {"ulpi"}; static const char * const mmp2_ro_grps[] = {"ro-1", "ro-2", "ro-3", "ro-4"}; static const char * const mmp2_i2s_grps[] = {"i2s 5p1", "i2s 4p2"}; static const char * const mmp2_ssp1_grps[] = {"ssp1 4p1", "ssp1 4p2", "ssp1 4p3"}; static const char * const mmp2_ssp2_grps[] = {"ssp2 4p1"}; static const char * const mmp2_ssp3_grps[] = {"ssp3 4p1", "ssp3 4p2"}; static const char * const mmp2_sspa2_grps[] = {"sspa2 4p1", "sspa2 4p2"}; static const char * const mmp2_mmc1_grps[] = {"mmc1 8p1"}; static const char * const mmp2_mmc2_grps[] = {"mmc2 6p1"}; static const char * const mmp2_mmc3_grps[] = {"mmc3 10p1"}; static struct pxa3xx_pmx_func mmp2_funcs[] = { {"uart1", ARRAY_AND_SIZE(mmp2_uart1_grps)}, {"uart2", ARRAY_AND_SIZE(mmp2_uart2_grps)}, {"uart3", ARRAY_AND_SIZE(mmp2_uart3_grps)}, {"uart4", ARRAY_AND_SIZE(mmp2_uart4_grps)}, {"kpdk", ARRAY_AND_SIZE(mmp2_kpdk_grps)}, {"twsi2", ARRAY_AND_SIZE(mmp2_twsi2_grps)}, {"twsi3", ARRAY_AND_SIZE(mmp2_twsi3_grps)}, {"twsi4", ARRAY_AND_SIZE(mmp2_twsi4_grps)}, {"twsi5", ARRAY_AND_SIZE(mmp2_twsi5_grps)}, {"twsi6", ARRAY_AND_SIZE(mmp2_twsi6_grps)}, {"ccic1", ARRAY_AND_SIZE(mmp2_ccic1_grps)}, {"ccic2", ARRAY_AND_SIZE(mmp2_ccic2_grps)}, {"ulpi", ARRAY_AND_SIZE(mmp2_ulpi_grps)}, {"ro", ARRAY_AND_SIZE(mmp2_ro_grps)}, {"i2s", ARRAY_AND_SIZE(mmp2_i2s_grps)}, {"ssp1", ARRAY_AND_SIZE(mmp2_ssp1_grps)}, {"ssp2", ARRAY_AND_SIZE(mmp2_ssp2_grps)}, {"ssp3", ARRAY_AND_SIZE(mmp2_ssp3_grps)}, {"sspa2", ARRAY_AND_SIZE(mmp2_sspa2_grps)}, {"mmc1", ARRAY_AND_SIZE(mmp2_mmc1_grps)}, {"mmc2", ARRAY_AND_SIZE(mmp2_mmc2_grps)}, {"mmc3", ARRAY_AND_SIZE(mmp2_mmc3_grps)}, }; static struct pinctrl_desc mmp2_pctrl_desc = { .name = "mmp2-pinctrl", .owner = THIS_MODULE, }; static struct pxa3xx_pinmux_info mmp2_info = { .mfp = mmp2_mfp, .num_mfp = ARRAY_SIZE(mmp2_mfp), .grps = mmp2_grps, .num_grps = ARRAY_SIZE(mmp2_grps), .funcs = mmp2_funcs, .num_funcs = ARRAY_SIZE(mmp2_funcs), .num_gpio = 169, .desc = &mmp2_pctrl_desc, .pads = mmp2_pads, .num_pads = ARRAY_SIZE(mmp2_pads), .cputype = PINCTRL_MMP2, .ds_mask = MMP2_DS_MASK, .ds_shift = MMP2_DS_SHIFT, }; static int __devinit mmp2_pinmux_probe(struct platform_device *pdev) { return pxa3xx_pinctrl_register(pdev, &mmp2_info); } static int __devexit mmp2_pinmux_remove(struct platform_device *pdev) { return pxa3xx_pinctrl_unregister(pdev); } static struct platform_driver mmp2_pinmux_driver = { .driver = { .name = "mmp2-pinmux", .owner = THIS_MODULE, }, .probe = mmp2_pinmux_probe, .remove = __devexit_p(mmp2_pinmux_remove), }; static int __init mmp2_pinmux_init(void) { return platform_driver_register(&mmp2_pinmux_driver); } core_initcall_sync(mmp2_pinmux_init); static void __exit mmp2_pinmux_exit(void) { platform_driver_unregister(&mmp2_pinmux_driver); } module_exit(mmp2_pinmux_exit); MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>"); MODULE_DESCRIPTION("PXA3xx pin control driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
TheNameIsNigel/android_kernel_huawei_msm8928
arch/arm/mach-ixp4xx/miccpt-pci.c
4987
1873
/* * arch/arm/mach-ixp4xx/miccpt-pci.c * * MICCPT board-level PCI initialization * * Copyright (C) 2002 Intel Corporation. * Copyright (C) 2003-2004 MontaVista Software, Inc. * Copyright (C) 2006 OMICRON electronics GmbH * * Author: Michael Jochum <michael.jochum@omicron.at> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/irq.h> #include <asm/mach/pci.h> #include <asm/irq.h> #include <mach/hardware.h> #include <asm/mach-types.h> #define MAX_DEV 4 #define IRQ_LINES 4 /* PCI controller GPIO to IRQ pin mappings */ #define INTA 1 #define INTB 2 #define INTC 3 #define INTD 4 void __init miccpt_pci_preinit(void) { irq_set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW); irq_set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW); irq_set_irq_type(IXP4XX_GPIO_IRQ(INTC), IRQ_TYPE_LEVEL_LOW); irq_set_irq_type(IXP4XX_GPIO_IRQ(INTD), IRQ_TYPE_LEVEL_LOW); ixp4xx_pci_preinit(); } static int __init miccpt_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { static int pci_irq_table[IRQ_LINES] = { IXP4XX_GPIO_IRQ(INTA), IXP4XX_GPIO_IRQ(INTB), IXP4XX_GPIO_IRQ(INTC), IXP4XX_GPIO_IRQ(INTD) }; if (slot >= 1 && slot <= MAX_DEV && pin >= 1 && pin <= IRQ_LINES) return pci_irq_table[(slot + pin - 2) % 4]; return -1; } struct hw_pci miccpt_pci __initdata = { .nr_controllers = 1, .preinit = miccpt_pci_preinit, .swizzle = pci_std_swizzle, .setup = ixp4xx_setup, .scan = ixp4xx_scan_bus, .map_irq = miccpt_map_irq, }; int __init miccpt_pci_init(void) { if (machine_is_miccpt()) pci_common_init(&miccpt_pci); return 0; } subsys_initcall(miccpt_pci_init);
gpl-2.0
HSAFoundation/HSA-Drivers-Linux-AMD
src/kernel/drivers/isdn/hisax/st5481_init.c
4987
5409
/* * Driver for ST5481 USB ISDN modem * * Author Frode Isaksen * Copyright 2001 by Frode Isaksen <fisaksen@bewan.com> * 2001 by Kai Germaschewski <kai.germaschewski@gmx.de> * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ /* * TODO: * * b layer1 delay? * hotplug / unregister issues * mod_inc/dec_use_count * unify parts of d/b channel usb handling * file header * avoid copy to isoc buffer? * improve usb delay? * merge l1 state machines? * clean up debug */ #include <linux/module.h> #include <linux/init.h> #include <linux/usb.h> #include <linux/slab.h> #include "st5481.h" MODULE_DESCRIPTION("ISDN4Linux: driver for ST5481 USB ISDN adapter"); MODULE_AUTHOR("Frode Isaksen"); MODULE_LICENSE("GPL"); static int protocol = 2; /* EURO-ISDN Default */ module_param(protocol, int, 0); static int number_of_leds = 2; /* 2 LEDs on the adpater default */ module_param(number_of_leds, int, 0); #ifdef CONFIG_HISAX_DEBUG static int debug = 0; module_param(debug, int, 0); #endif int st5481_debug; /* ====================================================================== * registration/deregistration with the USB layer */ /* * This function will be called when the adapter is plugged * into the USB bus. */ static int probe_st5481(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *dev = interface_to_usbdev(intf); struct st5481_adapter *adapter; struct hisax_b_if *b_if[2]; int retval, i; printk(KERN_INFO "st541: found adapter VendorId %04x, ProductId %04x, LEDs %d\n", le16_to_cpu(dev->descriptor.idVendor), le16_to_cpu(dev->descriptor.idProduct), number_of_leds); adapter = kzalloc(sizeof(struct st5481_adapter), GFP_KERNEL); if (!adapter) return -ENOMEM; adapter->number_of_leds = number_of_leds; adapter->usb_dev = dev; adapter->hisax_d_if.owner = THIS_MODULE; adapter->hisax_d_if.ifc.priv = adapter; adapter->hisax_d_if.ifc.l2l1 = st5481_d_l2l1; for (i = 0; i < 2; i++) { adapter->bcs[i].adapter = adapter; adapter->bcs[i].channel = i; adapter->bcs[i].b_if.ifc.priv = &adapter->bcs[i]; adapter->bcs[i].b_if.ifc.l2l1 = st5481_b_l2l1; } retval = st5481_setup_usb(adapter); if (retval < 0) goto err; retval = st5481_setup_d(adapter); if (retval < 0) goto err_usb; retval = st5481_setup_b(&adapter->bcs[0]); if (retval < 0) goto err_d; retval = st5481_setup_b(&adapter->bcs[1]); if (retval < 0) goto err_b; for (i = 0; i < 2; i++) b_if[i] = &adapter->bcs[i].b_if; if (hisax_register(&adapter->hisax_d_if, b_if, "st5481_usb", protocol) != 0) goto err_b1; st5481_start(adapter); usb_set_intfdata(intf, adapter); return 0; err_b1: st5481_release_b(&adapter->bcs[1]); err_b: st5481_release_b(&adapter->bcs[0]); err_d: st5481_release_d(adapter); err_usb: st5481_release_usb(adapter); err: kfree(adapter); return -EIO; } /* * This function will be called when the adapter is removed * from the USB bus. */ static void disconnect_st5481(struct usb_interface *intf) { struct st5481_adapter *adapter = usb_get_intfdata(intf); DBG(1, ""); usb_set_intfdata(intf, NULL); if (!adapter) return; st5481_stop(adapter); st5481_release_b(&adapter->bcs[1]); st5481_release_b(&adapter->bcs[0]); st5481_release_d(adapter); // we would actually better wait for completion of outstanding urbs mdelay(2); st5481_release_usb(adapter); hisax_unregister(&adapter->hisax_d_if); kfree(adapter); } /* * The last 4 bits in the Product Id is set with 4 pins on the chip. */ static struct usb_device_id st5481_ids[] = { { USB_DEVICE(ST_VENDOR_ID, ST5481_PRODUCT_ID + 0x0) }, { USB_DEVICE(ST_VENDOR_ID, ST5481_PRODUCT_ID + 0x1) }, { USB_DEVICE(ST_VENDOR_ID, ST5481_PRODUCT_ID + 0x2) }, { USB_DEVICE(ST_VENDOR_ID, ST5481_PRODUCT_ID + 0x3) }, { USB_DEVICE(ST_VENDOR_ID, ST5481_PRODUCT_ID + 0x4) }, { USB_DEVICE(ST_VENDOR_ID, ST5481_PRODUCT_ID + 0x5) }, { USB_DEVICE(ST_VENDOR_ID, ST5481_PRODUCT_ID + 0x6) }, { USB_DEVICE(ST_VENDOR_ID, ST5481_PRODUCT_ID + 0x7) }, { USB_DEVICE(ST_VENDOR_ID, ST5481_PRODUCT_ID + 0x8) }, { USB_DEVICE(ST_VENDOR_ID, ST5481_PRODUCT_ID + 0x9) }, { USB_DEVICE(ST_VENDOR_ID, ST5481_PRODUCT_ID + 0xA) }, { USB_DEVICE(ST_VENDOR_ID, ST5481_PRODUCT_ID + 0xB) }, { USB_DEVICE(ST_VENDOR_ID, ST5481_PRODUCT_ID + 0xC) }, { USB_DEVICE(ST_VENDOR_ID, ST5481_PRODUCT_ID + 0xD) }, { USB_DEVICE(ST_VENDOR_ID, ST5481_PRODUCT_ID + 0xE) }, { USB_DEVICE(ST_VENDOR_ID, ST5481_PRODUCT_ID + 0xF) }, { } }; MODULE_DEVICE_TABLE(usb, st5481_ids); static struct usb_driver st5481_usb_driver = { .name = "st5481_usb", .probe = probe_st5481, .disconnect = disconnect_st5481, .id_table = st5481_ids, .disable_hub_initiated_lpm = 1, }; static int __init st5481_usb_init(void) { int retval; #ifdef CONFIG_HISAX_DEBUG st5481_debug = debug; #endif printk(KERN_INFO "hisax_st5481: ST5481 USB ISDN driver $Revision: 2.4.2.3 $\n"); retval = st5481_d_init(); if (retval < 0) goto out; retval = usb_register(&st5481_usb_driver); if (retval < 0) goto out_d_exit; return 0; out_d_exit: st5481_d_exit(); out: return retval; } static void __exit st5481_usb_exit(void) { usb_deregister(&st5481_usb_driver); st5481_d_exit(); } module_init(st5481_usb_init); module_exit(st5481_usb_exit);
gpl-2.0
LeMaker/linux-actions
arch/sh/kernel/cpu/sh4/perf_event.c
9851
6376
/* * Performance events support for SH7750-style performance counters * * Copyright (C) 2009 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/perf_event.h> #include <asm/processor.h> #define PM_CR_BASE 0xff000084 /* 16-bit */ #define PM_CTR_BASE 0xff100004 /* 32-bit */ #define PMCR(n) (PM_CR_BASE + ((n) * 0x04)) #define PMCTRH(n) (PM_CTR_BASE + 0x00 + ((n) * 0x08)) #define PMCTRL(n) (PM_CTR_BASE + 0x04 + ((n) * 0x08)) #define PMCR_PMM_MASK 0x0000003f #define PMCR_CLKF 0x00000100 #define PMCR_PMCLR 0x00002000 #define PMCR_PMST 0x00004000 #define PMCR_PMEN 0x00008000 static struct sh_pmu sh7750_pmu; /* * There are a number of events supported by each counter (33 in total). * Since we have 2 counters, each counter will take the event code as it * corresponds to the PMCR PMM setting. Each counter can be configured * independently. * * Event Code Description * ---------- ----------- * * 0x01 Operand read access * 0x02 Operand write access * 0x03 UTLB miss * 0x04 Operand cache read miss * 0x05 Operand cache write miss * 0x06 Instruction fetch (w/ cache) * 0x07 Instruction TLB miss * 0x08 Instruction cache miss * 0x09 All operand accesses * 0x0a All instruction accesses * 0x0b OC RAM operand access * 0x0d On-chip I/O space access * 0x0e Operand access (r/w) * 0x0f Operand cache miss (r/w) * 0x10 Branch instruction * 0x11 Branch taken * 0x12 BSR/BSRF/JSR * 0x13 Instruction execution * 0x14 Instruction execution in parallel * 0x15 FPU Instruction execution * 0x16 Interrupt * 0x17 NMI * 0x18 trapa instruction execution * 0x19 UBCA match * 0x1a UBCB match * 0x21 Instruction cache fill * 0x22 Operand cache fill * 0x23 Elapsed time * 0x24 Pipeline freeze by I-cache miss * 0x25 Pipeline freeze by D-cache miss * 0x27 Pipeline freeze by branch instruction * 0x28 Pipeline freeze by CPU register * 0x29 Pipeline freeze by FPU */ static const int sh7750_general_events[] = { [PERF_COUNT_HW_CPU_CYCLES] = 0x0023, [PERF_COUNT_HW_INSTRUCTIONS] = 0x000a, [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0006, /* I-cache */ [PERF_COUNT_HW_CACHE_MISSES] = 0x0008, /* I-cache */ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x0010, [PERF_COUNT_HW_BRANCH_MISSES] = -1, [PERF_COUNT_HW_BUS_CYCLES] = -1, }; #define C(x) PERF_COUNT_HW_CACHE_##x static const int sh7750_cache_events [PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX] = { [ C(L1D) ] = { [ C(OP_READ) ] = { [ C(RESULT_ACCESS) ] = 0x0001, [ C(RESULT_MISS) ] = 0x0004, }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = 0x0002, [ C(RESULT_MISS) ] = 0x0005, }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = 0, [ C(RESULT_MISS) ] = 0, }, }, [ C(L1I) ] = { [ C(OP_READ) ] = { [ C(RESULT_ACCESS) ] = 0x0006, [ C(RESULT_MISS) ] = 0x0008, }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = -1, [ C(RESULT_MISS) ] = -1, }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = 0, [ C(RESULT_MISS) ] = 0, }, }, [ C(LL) ] = { [ C(OP_READ) ] = { [ C(RESULT_ACCESS) ] = 0, [ C(RESULT_MISS) ] = 0, }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = 0, [ C(RESULT_MISS) ] = 0, }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = 0, [ C(RESULT_MISS) ] = 0, }, }, [ C(DTLB) ] = { [ C(OP_READ) ] = { [ C(RESULT_ACCESS) ] = 0, [ C(RESULT_MISS) ] = 0x0003, }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = 0, [ C(RESULT_MISS) ] = 0, }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = 0, [ C(RESULT_MISS) ] = 0, }, }, [ C(ITLB) ] = { [ C(OP_READ) ] = { [ C(RESULT_ACCESS) ] = 0, [ C(RESULT_MISS) ] = 0x0007, }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = -1, [ C(RESULT_MISS) ] = -1, }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = -1, [ C(RESULT_MISS) ] = -1, }, }, [ C(BPU) ] = { [ C(OP_READ) ] = { [ C(RESULT_ACCESS) ] = -1, [ C(RESULT_MISS) ] = -1, }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = -1, [ C(RESULT_MISS) ] = -1, }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = -1, [ C(RESULT_MISS) ] = -1, }, }, [ C(NODE) ] = { [ C(OP_READ) ] = { [ C(RESULT_ACCESS) ] = -1, [ C(RESULT_MISS) ] = -1, }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = -1, [ C(RESULT_MISS) ] = -1, }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = -1, [ C(RESULT_MISS) ] = -1, }, }, }; static int sh7750_event_map(int event) { return sh7750_general_events[event]; } static u64 sh7750_pmu_read(int idx) { return (u64)((u64)(__raw_readl(PMCTRH(idx)) & 0xffff) << 32) | __raw_readl(PMCTRL(idx)); } static void sh7750_pmu_disable(struct hw_perf_event *hwc, int idx) { unsigned int tmp; tmp = __raw_readw(PMCR(idx)); tmp &= ~(PMCR_PMM_MASK | PMCR_PMEN); __raw_writew(tmp, PMCR(idx)); } static void sh7750_pmu_enable(struct hw_perf_event *hwc, int idx) { __raw_writew(__raw_readw(PMCR(idx)) | PMCR_PMCLR, PMCR(idx)); __raw_writew(hwc->config | PMCR_PMEN | PMCR_PMST, PMCR(idx)); } static void sh7750_pmu_disable_all(void) { int i; for (i = 0; i < sh7750_pmu.num_events; i++) __raw_writew(__raw_readw(PMCR(i)) & ~PMCR_PMEN, PMCR(i)); } static void sh7750_pmu_enable_all(void) { int i; for (i = 0; i < sh7750_pmu.num_events; i++) __raw_writew(__raw_readw(PMCR(i)) | PMCR_PMEN, PMCR(i)); } static struct sh_pmu sh7750_pmu = { .name = "sh7750", .num_events = 2, .event_map = sh7750_event_map, .max_events = ARRAY_SIZE(sh7750_general_events), .raw_event_mask = PMCR_PMM_MASK, .cache_events = &sh7750_cache_events, .read = sh7750_pmu_read, .disable = sh7750_pmu_disable, .enable = sh7750_pmu_enable, .disable_all = sh7750_pmu_disable_all, .enable_all = sh7750_pmu_enable_all, }; static int __init sh7750_pmu_init(void) { /* * Make sure this CPU actually has perf counters. */ if (!(boot_cpu_data.flags & CPU_HAS_PERF_COUNTER)) { pr_notice("HW perf events unsupported, software events only.\n"); return -ENODEV; } return register_sh_pmu(&sh7750_pmu); } early_initcall(sh7750_pmu_init);
gpl-2.0
razrqcom-dev-team/android_kernel_motorola_msm8226
lib/zlib_inflate/infutil.c
15227
1231
#include <linux/zutil.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/vmalloc.h> /* Utility function: initialize zlib, unpack binary blob, clean up zlib, * return len or negative error code. */ int zlib_inflate_blob(void *gunzip_buf, unsigned int sz, const void *buf, unsigned int len) { const u8 *zbuf = buf; struct z_stream_s *strm; int rc; rc = -ENOMEM; strm = kmalloc(sizeof(*strm), GFP_KERNEL); if (strm == NULL) goto gunzip_nomem1; strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL); if (strm->workspace == NULL) goto gunzip_nomem2; /* gzip header (1f,8b,08... 10 bytes total + possible asciz filename) * expected to be stripped from input */ strm->next_in = zbuf; strm->avail_in = len; strm->next_out = gunzip_buf; strm->avail_out = sz; rc = zlib_inflateInit2(strm, -MAX_WBITS); if (rc == Z_OK) { rc = zlib_inflate(strm, Z_FINISH); /* after Z_FINISH, only Z_STREAM_END is "we unpacked it all" */ if (rc == Z_STREAM_END) rc = sz - strm->avail_out; else rc = -EINVAL; zlib_inflateEnd(strm); } else rc = -EINVAL; kfree(strm->workspace); gunzip_nomem2: kfree(strm); gunzip_nomem1: return rc; /* returns Z_OK (0) if successful */ }
gpl-2.0
gitrepo/openwrt
target/linux/adm8668/files/drivers/mtd/maps/adm8668.c
124
9782
/* * Copyright (C) 2010 Scott Nicholas <neutronscott@scottn.us> * Copyright (C) 2006 Felix Fietkau <nbd@openwrt.org> * Copyright (C) 2005 Waldemar Brodkorb <wbx@openwrt.org> * Copyright (C) 2004 Florian Schirmer (jolt@tuxbox.org) * * original functions for finding root filesystem from Mike Baker * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. * * * Copyright 2004, Broadcom Corporation * All Rights Reserved. * * THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY * KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM * SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE. * * Flash mapping for adm8668 boards * */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/wait.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> #include <linux/slab.h> #include <linux/mtd/partitions.h> #include <linux/crc32.h> #include <linux/magic.h> #include <asm/io.h> #define WINDOW_ADDR 0x10000000 #define WINDOW_SIZE 0x800000 #define BANKWIDTH 2 /* first a little bit about the headers i need.. */ /* just interested in part of the full struct */ struct squashfs_super_block { __le32 s_magic; __le32 pad0[9]; /* it's not really padding */ __le64 bytes_used; }; #define IH_MAGIC 0x56190527 /* Image Magic Number */ struct uboot_header { uint32_t ih_magic; /* Image Header Magic Number */ uint32_t ih_hcrc; /* Image Header CRC Checksum */ uint32_t ih_time; /* Image Creation Timestamp */ uint32_t ih_size; /* Image Data Size */ uint32_t ih_load; /* Data Load Address */ uint32_t ih_ep; /* Entry Point Address */ uint32_t ih_dcrc; /* Image Data CRC Checksum */ uint8_t ih_os; /* Operating System */ uint8_t ih_arch; /* CPU architecture */ uint8_t ih_type; /* Image Type */ uint8_t ih_comp; /* Compression Type */ char ih_name[32]; /* image name */ }; /************************************************/ static struct mtd_info *adm8668_mtd; struct map_info adm8668_map = { name: "adm8668-nor", size: WINDOW_SIZE, phys: WINDOW_ADDR, bankwidth: BANKWIDTH, }; /* * Copied from mtdblock.c * * Cache stuff... * * Since typical flash erasable sectors are much larger than what Linux's * buffer cache can handle, we must implement read-modify-write on flash * sectors for each block write requests. To avoid over-erasing flash sectors * and to speed things up, we locally cache a whole flash sector while it is * being written to until a different sector is required. */ static void erase_callback(struct erase_info *done) { wait_queue_head_t *wait_q = (wait_queue_head_t *)done->priv; wake_up(wait_q); } static int erase_write (struct mtd_info *mtd, unsigned long pos, int len, const char *buf) { struct erase_info erase; DECLARE_WAITQUEUE(wait, current); wait_queue_head_t wait_q; size_t retlen; int ret; /* * First, let's erase the flash block. */ init_waitqueue_head(&wait_q); erase.mtd = mtd; erase.callback = erase_callback; erase.addr = pos; erase.len = len; erase.priv = (u_long)&wait_q; set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&wait_q, &wait); ret = mtd->erase(mtd, &erase); if (ret) { set_current_state(TASK_RUNNING); remove_wait_queue(&wait_q, &wait); printk (KERN_WARNING "erase of region [0x%lx, 0x%x] " "on \"%s\" failed\n", pos, len, mtd->name); return ret; } schedule(); /* Wait for erase to finish. */ remove_wait_queue(&wait_q, &wait); /* * Next, write data to flash. */ ret = mtd->write (mtd, pos, len, &retlen, buf); if (ret) return ret; if (retlen != len) return -EIO; return 0; } /* decent defaults in case... shrug */ static struct mtd_partition adm8668_parts[] = { { name: "linux", offset: 0x40000, size: WINDOW_SIZE-0x40000, }, { name: "rootfs", offset: 0xe0000, size: 0x140000, }, { name: "uboot_env", offset: 0x20000, size: 0x20000, }, { name: NULL, }, }; /* in case i wanna change stuff later, and to clarify the math section... */ #define PART_LINUX 0 #define PART_ROOTFS 1 #define NR_PARTS 3 static int __init init_mtd_partitions(struct mtd_info *mtd, size_t size) { struct uboot_header uhdr; int off, blocksize; size_t len, linux_len; struct squashfs_super_block shdr; blocksize = mtd->erasesize; if (blocksize < 0x10000) blocksize = 0x10000; /* now find squashfs */ memset(&shdr, 0xe5, sizeof(shdr)); for (off = adm8668_parts[PART_LINUX].offset; off < size; off += blocksize) { /* * Read into buffer */ if (mtd->read(mtd, off, sizeof(shdr), &len, (char *)&shdr) || len != sizeof(shdr)) continue; if (shdr.s_magic == SQUASHFS_MAGIC) { uint32_t fs_size = (uint32_t)shdr.bytes_used; printk(KERN_INFO "%s: Filesystem type: squashfs, size=%dkB\n", mtd->name, fs_size>>10); /* Update rootfs based on the superblock info, and * stretch to end of MTD. rootfs_split will split it */ adm8668_parts[PART_ROOTFS].offset = off; adm8668_parts[PART_ROOTFS].size = mtd->size - adm8668_parts[PART_ROOTFS].offset; /* kernel ends where rootfs starts * but we'll keep it full-length for upgrades */ linux_len = adm8668_parts[PART_LINUX+1].offset - adm8668_parts[PART_LINUX].offset; #if 1 adm8668_parts[PART_LINUX].size = mtd->size - adm8668_parts[PART_LINUX].offset; #else adm8668_parts[PART_LINUX].size = linux_len; #endif goto found; } } printk(KERN_NOTICE "%s: Couldn't find root filesystem\n", mtd->name); return NR_PARTS; found: if (mtd->read(mtd, adm8668_parts[PART_LINUX].offset, sizeof(uhdr), &len, (char *)&uhdr) || len != sizeof(uhdr)) return NR_PARTS; /* that's odd. how'd ya boot it then */ if (uhdr.ih_magic != IH_MAGIC) return NR_PARTS; if (be32_to_cpu(uhdr.ih_size) != (linux_len - sizeof(uhdr))) { unsigned char *block, *data; unsigned int offset; offset = adm8668_parts[PART_LINUX].offset + sizeof(struct uboot_header); data = (unsigned char *)(WINDOW_ADDR | 0xA0000000 | offset); printk(KERN_NOTICE "Updating U-boot image:\n"); printk(KERN_NOTICE " old: [size: %8d crc32: 0x%08x]\n", be32_to_cpu(uhdr.ih_size), be32_to_cpu(uhdr.ih_dcrc)); /* Update the data length & crc32 */ uhdr.ih_size = cpu_to_be32(linux_len - sizeof(uhdr)); uhdr.ih_dcrc = crc32_le(~0, data, linux_len - sizeof(uhdr)) ^ (~0); uhdr.ih_dcrc = cpu_to_be32(uhdr.ih_dcrc); printk(KERN_NOTICE " new: [size: %8d crc32: 0x%08x]\n", be32_to_cpu(uhdr.ih_size), be32_to_cpu(uhdr.ih_dcrc)); /* update header's crc... */ uhdr.ih_hcrc = 0; uhdr.ih_hcrc = crc32_le(~0, (unsigned char *)&uhdr, sizeof(uhdr)) ^ (~0); uhdr.ih_hcrc = cpu_to_be32(uhdr.ih_hcrc); /* read first eraseblock from the image */ block = kmalloc(mtd->erasesize, GFP_KERNEL); if (mtd->read(mtd, adm8668_parts[PART_LINUX].offset, mtd->erasesize, &len, block) || len != mtd->erasesize) { printk("Error copying first eraseblock\n"); return 0; } /* Write updated header to the flash */ memcpy(block, &uhdr, sizeof(uhdr)); if (mtd->unlock) mtd->unlock(mtd, off, mtd->erasesize); erase_write(mtd, adm8668_parts[PART_LINUX].offset, mtd->erasesize, block); if (mtd->sync) mtd->sync(mtd); kfree(block); printk(KERN_NOTICE "Done\n"); } return NR_PARTS; } int __init init_adm8668_map(void) { int nr_parts, ret; adm8668_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE); if (!adm8668_map.virt) { printk(KERN_ERR "Failed to ioremap\n"); return -EIO; } simple_map_init(&adm8668_map); if (!(adm8668_mtd = do_map_probe("cfi_probe", &adm8668_map))) { printk(KERN_ERR "cfi_probe failed\n"); iounmap((void *)adm8668_map.virt); return -ENXIO; } adm8668_mtd->owner = THIS_MODULE; nr_parts = init_mtd_partitions(adm8668_mtd, adm8668_mtd->size); ret = mtd_device_register(adm8668_mtd, adm8668_parts, nr_parts); if (ret) { printk(KERN_ERR "Flash: mtd_device_register failed\n"); goto fail; } return 0; fail: if (adm8668_mtd) map_destroy(adm8668_mtd); if (adm8668_map.virt) iounmap((void *) adm8668_map.virt); adm8668_map.virt = 0; return ret; } void __exit cleanup_adm8668_map(void) { mtd_device_unregister(adm8668_mtd); map_destroy(adm8668_mtd); iounmap((void *) adm8668_map.virt); adm8668_map.virt = 0; } module_init(init_adm8668_map); module_exit(cleanup_adm8668_map); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Scott Nicholas <neutronscott@scottn.us>"); MODULE_DESCRIPTION("MTD map driver for ADM8668 NOR Flash");
gpl-2.0
vibhu0009/android_kernel_cyanogen_msm8916
drivers/bluetooth/btusb.c
380
43304
/* * * Generic Bluetooth USB driver * * Copyright (C) 2005-2008 Marcel Holtmann <marcel@holtmann.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/module.h> #include <linux/usb.h> #include <linux/firmware.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #define VERSION "0.6" static bool ignore_dga; static bool ignore_csr; static bool ignore_sniffer; static bool disable_scofix; static bool force_scofix; static int sco_conn; static int reset = 1; static struct usb_driver btusb_driver; #define BTUSB_IGNORE 0x01 #define BTUSB_DIGIANSWER 0x02 #define BTUSB_CSR 0x04 #define BTUSB_SNIFFER 0x08 #define BTUSB_BCM92035 0x10 #define BTUSB_BROKEN_ISOC 0x20 #define BTUSB_WRONG_SCO_MTU 0x40 #define BTUSB_ATH3012 0x80 #define BTUSB_INTEL 0x100 static struct usb_device_id btusb_table[] = { /* Generic Bluetooth USB device */ { USB_DEVICE_INFO(0xe0, 0x01, 0x01) }, /* Apple-specific (Broadcom) devices */ { USB_VENDOR_AND_INTERFACE_INFO(0x05ac, 0xff, 0x01, 0x01) }, /* MediaTek MT76x0E */ { USB_DEVICE(0x0e8d, 0x763f) }, /* Broadcom SoftSailing reporting vendor specific */ { USB_DEVICE(0x0a5c, 0x21e1) }, /* Apple MacBookPro 7,1 */ { USB_DEVICE(0x05ac, 0x8213) }, /* Apple iMac11,1 */ { USB_DEVICE(0x05ac, 0x8215) }, /* Apple MacBookPro6,2 */ { USB_DEVICE(0x05ac, 0x8218) }, /* Apple MacBookAir3,1, MacBookAir3,2 */ { USB_DEVICE(0x05ac, 0x821b) }, /* Apple MacBookAir4,1 */ { USB_DEVICE(0x05ac, 0x821f) }, /* Apple MacBookPro8,2 */ { USB_DEVICE(0x05ac, 0x821a) }, /* Apple MacMini5,1 */ { USB_DEVICE(0x05ac, 0x8281) }, /* AVM BlueFRITZ! USB v2.0 */ { USB_DEVICE(0x057c, 0x3800) }, /* Bluetooth Ultraport Module from IBM */ { USB_DEVICE(0x04bf, 0x030a) }, /* ALPS Modules with non-standard id */ { USB_DEVICE(0x044e, 0x3001) }, { USB_DEVICE(0x044e, 0x3002) }, /* Ericsson with non-standard id */ { USB_DEVICE(0x0bdb, 0x1002) }, /* Canyon CN-BTU1 with HID interfaces */ { USB_DEVICE(0x0c10, 0x0000) }, /* Broadcom BCM20702A0 */ { USB_DEVICE(0x0b05, 0x17b5) }, { USB_DEVICE(0x0b05, 0x17cb) }, { USB_DEVICE(0x04ca, 0x2003) }, { USB_DEVICE(0x0489, 0xe042) }, { USB_DEVICE(0x413c, 0x8197) }, /* Foxconn - Hon Hai */ { USB_VENDOR_AND_INTERFACE_INFO(0x0489, 0xff, 0x01, 0x01) }, /*Broadcom devices with vendor specific id */ { USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, btusb_table); static struct usb_device_id blacklist_table[] = { /* CSR BlueCore devices */ { USB_DEVICE(0x0a12, 0x0001), .driver_info = BTUSB_CSR }, /* Broadcom BCM2033 without firmware */ { USB_DEVICE(0x0a5c, 0x2033), .driver_info = BTUSB_IGNORE }, /* Atheros 3011 with sflash firmware */ { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE }, { USB_DEVICE(0x0cf3, 0xe019), .driver_info = BTUSB_IGNORE }, { USB_DEVICE(0x13d3, 0x3304), .driver_info = BTUSB_IGNORE }, { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE }, { USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE }, { USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE }, /* Atheros AR9285 Malbec with sflash firmware */ { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE }, /* Atheros 3012 with sflash firmware */ { USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x817a), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 }, /* Atheros AR5BBU12 with sflash firmware */ { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE }, /* Atheros AR5BBU12 with sflash firmware */ { USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0489, 0xe036), .driver_info = BTUSB_ATH3012 }, /* Broadcom BCM2035 */ { USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU }, { USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU }, { USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 }, /* Broadcom BCM2045 */ { USB_DEVICE(0x0a5c, 0x2039), .driver_info = BTUSB_WRONG_SCO_MTU }, { USB_DEVICE(0x0a5c, 0x2101), .driver_info = BTUSB_WRONG_SCO_MTU }, /* IBM/Lenovo ThinkPad with Broadcom chip */ { USB_DEVICE(0x0a5c, 0x201e), .driver_info = BTUSB_WRONG_SCO_MTU }, { USB_DEVICE(0x0a5c, 0x2110), .driver_info = BTUSB_WRONG_SCO_MTU }, /* HP laptop with Broadcom chip */ { USB_DEVICE(0x03f0, 0x171d), .driver_info = BTUSB_WRONG_SCO_MTU }, /* Dell laptop with Broadcom chip */ { USB_DEVICE(0x413c, 0x8126), .driver_info = BTUSB_WRONG_SCO_MTU }, /* Dell Wireless 370 and 410 devices */ { USB_DEVICE(0x413c, 0x8152), .driver_info = BTUSB_WRONG_SCO_MTU }, { USB_DEVICE(0x413c, 0x8156), .driver_info = BTUSB_WRONG_SCO_MTU }, /* Belkin F8T012 and F8T013 devices */ { USB_DEVICE(0x050d, 0x0012), .driver_info = BTUSB_WRONG_SCO_MTU }, { USB_DEVICE(0x050d, 0x0013), .driver_info = BTUSB_WRONG_SCO_MTU }, /* Asus WL-BTD202 device */ { USB_DEVICE(0x0b05, 0x1715), .driver_info = BTUSB_WRONG_SCO_MTU }, /* Kensington Bluetooth USB adapter */ { USB_DEVICE(0x047d, 0x105e), .driver_info = BTUSB_WRONG_SCO_MTU }, /* RTX Telecom based adapters with buggy SCO support */ { USB_DEVICE(0x0400, 0x0807), .driver_info = BTUSB_BROKEN_ISOC }, { USB_DEVICE(0x0400, 0x080a), .driver_info = BTUSB_BROKEN_ISOC }, /* CONWISE Technology based adapters with buggy SCO support */ { USB_DEVICE(0x0e5e, 0x6622), .driver_info = BTUSB_BROKEN_ISOC }, /* Digianswer devices */ { USB_DEVICE(0x08fd, 0x0001), .driver_info = BTUSB_DIGIANSWER }, { USB_DEVICE(0x08fd, 0x0002), .driver_info = BTUSB_IGNORE }, /* CSR BlueCore Bluetooth Sniffer */ { USB_DEVICE(0x0a12, 0x0002), .driver_info = BTUSB_SNIFFER }, /* Frontline ComProbe Bluetooth Sniffer */ { USB_DEVICE(0x16d3, 0x0002), .driver_info = BTUSB_SNIFFER }, /* Intel Bluetooth device */ { USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL }, { } /* Terminating entry */ }; #define BTUSB_MAX_ISOC_FRAMES 10 #define BTUSB_INTR_RUNNING 0 #define BTUSB_BULK_RUNNING 1 #define BTUSB_ISOC_RUNNING 2 #define BTUSB_SUSPENDING 3 #define BTUSB_DID_ISO_RESUME 4 struct btusb_data { struct hci_dev *hdev; struct usb_device *udev; struct usb_interface *intf; struct usb_interface *isoc; spinlock_t lock; unsigned long flags; struct work_struct work; struct work_struct waker; struct usb_anchor tx_anchor; struct usb_anchor intr_anchor; struct usb_anchor bulk_anchor; struct usb_anchor isoc_anchor; struct usb_anchor deferred; int tx_in_flight; spinlock_t txlock; struct usb_endpoint_descriptor *intr_ep; struct usb_endpoint_descriptor *bulk_tx_ep; struct usb_endpoint_descriptor *bulk_rx_ep; struct usb_endpoint_descriptor *isoc_tx_ep; struct usb_endpoint_descriptor *isoc_rx_ep; __u8 cmdreq_type; unsigned int sco_num; int isoc_altsetting; int suspend_count; }; static int inc_tx(struct btusb_data *data) { unsigned long flags; int rv; spin_lock_irqsave(&data->txlock, flags); rv = test_bit(BTUSB_SUSPENDING, &data->flags); if (!rv) data->tx_in_flight++; spin_unlock_irqrestore(&data->txlock, flags); return rv; } static void btusb_intr_complete(struct urb *urb) { struct hci_dev *hdev = urb->context; struct btusb_data *data = hci_get_drvdata(hdev); int err; BT_DBG("%s urb %p status %d count %d", hdev->name, urb, urb->status, urb->actual_length); if (!test_bit(HCI_RUNNING, &hdev->flags)) return; if (urb->status == 0) { hdev->stat.byte_rx += urb->actual_length; if (hci_recv_fragment(hdev, HCI_EVENT_PKT, urb->transfer_buffer, urb->actual_length) < 0) { BT_ERR("%s corrupted event packet", hdev->name); hdev->stat.err_rx++; } } if (!test_bit(BTUSB_INTR_RUNNING, &data->flags)) return; usb_mark_last_busy(data->udev); usb_anchor_urb(urb, &data->intr_anchor); err = usb_submit_urb(urb, GFP_ATOMIC); if (err < 0) { /* -EPERM: urb is being killed; * -ENODEV: device got disconnected */ if (err != -EPERM && err != -ENODEV) BT_ERR("%s urb %p failed to resubmit (%d)", hdev->name, urb, -err); usb_unanchor_urb(urb); } } static int btusb_submit_intr_urb(struct hci_dev *hdev, gfp_t mem_flags) { struct btusb_data *data = hci_get_drvdata(hdev); struct urb *urb; unsigned char *buf; unsigned int pipe; int err, size; BT_DBG("%s", hdev->name); if (!data->intr_ep) return -ENODEV; urb = usb_alloc_urb(0, mem_flags); if (!urb) return -ENOMEM; size = le16_to_cpu(data->intr_ep->wMaxPacketSize); buf = kmalloc(size, mem_flags); if (!buf) { usb_free_urb(urb); return -ENOMEM; } pipe = usb_rcvintpipe(data->udev, data->intr_ep->bEndpointAddress); usb_fill_int_urb(urb, data->udev, pipe, buf, size, btusb_intr_complete, hdev, data->intr_ep->bInterval); urb->transfer_flags |= URB_FREE_BUFFER; usb_anchor_urb(urb, &data->intr_anchor); err = usb_submit_urb(urb, mem_flags); if (err < 0) { if (err != -EPERM && err != -ENODEV) BT_ERR("%s urb %p submission failed (%d)", hdev->name, urb, -err); usb_unanchor_urb(urb); } usb_free_urb(urb); return err; } static void btusb_bulk_complete(struct urb *urb) { struct hci_dev *hdev = urb->context; struct btusb_data *data = hci_get_drvdata(hdev); int err; BT_DBG("%s urb %p status %d count %d", hdev->name, urb, urb->status, urb->actual_length); if (!test_bit(HCI_RUNNING, &hdev->flags)) return; if (urb->status == 0) { hdev->stat.byte_rx += urb->actual_length; if (hci_recv_fragment(hdev, HCI_ACLDATA_PKT, urb->transfer_buffer, urb->actual_length) < 0) { BT_ERR("%s corrupted ACL packet", hdev->name); hdev->stat.err_rx++; } } if (!test_bit(BTUSB_BULK_RUNNING, &data->flags)) return; usb_anchor_urb(urb, &data->bulk_anchor); usb_mark_last_busy(data->udev); err = usb_submit_urb(urb, GFP_ATOMIC); if (err < 0) { /* -EPERM: urb is being killed; * -ENODEV: device got disconnected */ if (err != -EPERM && err != -ENODEV) BT_ERR("%s urb %p failed to resubmit (%d)", hdev->name, urb, -err); usb_unanchor_urb(urb); } } static int btusb_submit_bulk_urb(struct hci_dev *hdev, gfp_t mem_flags) { struct btusb_data *data = hci_get_drvdata(hdev); struct urb *urb; unsigned char *buf; unsigned int pipe; int err, size = HCI_MAX_FRAME_SIZE; BT_DBG("%s", hdev->name); if (!data->bulk_rx_ep) return -ENODEV; urb = usb_alloc_urb(0, mem_flags); if (!urb) return -ENOMEM; buf = kmalloc(size, mem_flags); if (!buf) { usb_free_urb(urb); return -ENOMEM; } pipe = usb_rcvbulkpipe(data->udev, data->bulk_rx_ep->bEndpointAddress); usb_fill_bulk_urb(urb, data->udev, pipe, buf, size, btusb_bulk_complete, hdev); urb->transfer_flags |= URB_FREE_BUFFER; usb_mark_last_busy(data->udev); usb_anchor_urb(urb, &data->bulk_anchor); err = usb_submit_urb(urb, mem_flags); if (err < 0) { if (err != -EPERM && err != -ENODEV) BT_ERR("%s urb %p submission failed (%d)", hdev->name, urb, -err); usb_unanchor_urb(urb); } usb_free_urb(urb); return err; } static void btusb_isoc_complete(struct urb *urb) { struct hci_dev *hdev = urb->context; struct btusb_data *data = hci_get_drvdata(hdev); int i, err; BT_DBG("%s urb %p status %d count %d", hdev->name, urb, urb->status, urb->actual_length); if (!test_bit(HCI_RUNNING, &hdev->flags)) return; if (urb->status == 0) { for (i = 0; i < urb->number_of_packets; i++) { unsigned int offset = urb->iso_frame_desc[i].offset; unsigned int length = urb->iso_frame_desc[i].actual_length; if (urb->iso_frame_desc[i].status) continue; hdev->stat.byte_rx += length; if (hci_recv_fragment(hdev, HCI_SCODATA_PKT, urb->transfer_buffer + offset, length) < 0) { BT_ERR("%s corrupted SCO packet", hdev->name); hdev->stat.err_rx++; } } } if (!test_bit(BTUSB_ISOC_RUNNING, &data->flags)) return; usb_anchor_urb(urb, &data->isoc_anchor); err = usb_submit_urb(urb, GFP_ATOMIC); if (err < 0) { /* -EPERM: urb is being killed; * -ENODEV: device got disconnected */ if (err != -EPERM && err != -ENODEV) BT_ERR("%s urb %p failed to resubmit (%d)", hdev->name, urb, -err); usb_unanchor_urb(urb); } } static inline void __fill_isoc_descriptor(struct urb *urb, int len, int mtu) { int i, offset = 0; BT_DBG("len %d mtu %d", len, mtu); for (i = 0; i < BTUSB_MAX_ISOC_FRAMES && len >= mtu; i++, offset += mtu, len -= mtu) { urb->iso_frame_desc[i].offset = offset; urb->iso_frame_desc[i].length = mtu; } if (len && i < BTUSB_MAX_ISOC_FRAMES) { urb->iso_frame_desc[i].offset = offset; urb->iso_frame_desc[i].length = len; i++; } urb->number_of_packets = i; } static int btusb_submit_isoc_urb(struct hci_dev *hdev, gfp_t mem_flags) { struct btusb_data *data = hci_get_drvdata(hdev); struct urb *urb; unsigned char *buf; unsigned int pipe; int err, size; BT_DBG("%s", hdev->name); if (!data->isoc_rx_ep) return -ENODEV; urb = usb_alloc_urb(BTUSB_MAX_ISOC_FRAMES, mem_flags); if (!urb) return -ENOMEM; size = le16_to_cpu(data->isoc_rx_ep->wMaxPacketSize) * BTUSB_MAX_ISOC_FRAMES; buf = kmalloc(size, mem_flags); if (!buf) { usb_free_urb(urb); return -ENOMEM; } pipe = usb_rcvisocpipe(data->udev, data->isoc_rx_ep->bEndpointAddress); usb_fill_int_urb(urb, data->udev, pipe, buf, size, btusb_isoc_complete, hdev, data->isoc_rx_ep->bInterval); urb->transfer_flags = URB_FREE_BUFFER | URB_ISO_ASAP; __fill_isoc_descriptor(urb, size, le16_to_cpu(data->isoc_rx_ep->wMaxPacketSize)); usb_anchor_urb(urb, &data->isoc_anchor); err = usb_submit_urb(urb, mem_flags); if (err < 0) { if (err != -EPERM && err != -ENODEV) BT_ERR("%s urb %p submission failed (%d)", hdev->name, urb, -err); usb_unanchor_urb(urb); } usb_free_urb(urb); return err; } static void btusb_tx_complete(struct urb *urb) { struct sk_buff *skb = urb->context; struct hci_dev *hdev = (struct hci_dev *) skb->dev; struct btusb_data *data = hci_get_drvdata(hdev); BT_DBG("%s urb %p status %d count %d", hdev->name, urb, urb->status, urb->actual_length); if (!test_bit(HCI_RUNNING, &hdev->flags)) goto done; if (!urb->status) hdev->stat.byte_tx += urb->transfer_buffer_length; else hdev->stat.err_tx++; done: spin_lock(&data->txlock); data->tx_in_flight--; spin_unlock(&data->txlock); kfree(urb->setup_packet); kfree_skb(skb); } static void btusb_isoc_tx_complete(struct urb *urb) { struct sk_buff *skb = urb->context; struct hci_dev *hdev = (struct hci_dev *) skb->dev; BT_DBG("%s urb %p status %d count %d", hdev->name, urb, urb->status, urb->actual_length); if (!test_bit(HCI_RUNNING, &hdev->flags)) goto done; if (!urb->status) hdev->stat.byte_tx += urb->transfer_buffer_length; else hdev->stat.err_tx++; done: kfree(urb->setup_packet); kfree_skb(skb); } static int btusb_open(struct hci_dev *hdev) { struct btusb_data *data = hci_get_drvdata(hdev); int err; BT_DBG("%s", hdev->name); err = usb_autopm_get_interface(data->intf); if (err < 0) return err; data->intf->needs_remote_wakeup = 1; if (test_and_set_bit(HCI_RUNNING, &hdev->flags)) goto done; if (test_and_set_bit(BTUSB_INTR_RUNNING, &data->flags)) goto done; err = btusb_submit_intr_urb(hdev, GFP_KERNEL); if (err < 0) goto failed; err = btusb_submit_bulk_urb(hdev, GFP_KERNEL); if (err < 0) { usb_kill_anchored_urbs(&data->intr_anchor); goto failed; } set_bit(BTUSB_BULK_RUNNING, &data->flags); btusb_submit_bulk_urb(hdev, GFP_KERNEL); done: usb_autopm_put_interface(data->intf); return 0; failed: clear_bit(BTUSB_INTR_RUNNING, &data->flags); clear_bit(HCI_RUNNING, &hdev->flags); usb_autopm_put_interface(data->intf); return err; } static void btusb_stop_traffic(struct btusb_data *data) { usb_kill_anchored_urbs(&data->intr_anchor); usb_kill_anchored_urbs(&data->bulk_anchor); usb_kill_anchored_urbs(&data->isoc_anchor); } static int btusb_close(struct hci_dev *hdev) { struct btusb_data *data = hci_get_drvdata(hdev); int err; BT_DBG("%s", hdev->name); if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags)) return 0; cancel_work_sync(&data->work); cancel_work_sync(&data->waker); clear_bit(BTUSB_ISOC_RUNNING, &data->flags); clear_bit(BTUSB_BULK_RUNNING, &data->flags); clear_bit(BTUSB_INTR_RUNNING, &data->flags); btusb_stop_traffic(data); err = usb_autopm_get_interface(data->intf); if (err < 0) goto failed; data->intf->needs_remote_wakeup = 0; usb_autopm_put_interface(data->intf); failed: usb_scuttle_anchored_urbs(&data->deferred); return 0; } static int btusb_flush(struct hci_dev *hdev) { struct btusb_data *data = hci_get_drvdata(hdev); BT_DBG("%s", hdev->name); usb_kill_anchored_urbs(&data->tx_anchor); return 0; } static int btusb_send_frame(struct sk_buff *skb) { struct hci_dev *hdev = (struct hci_dev *) skb->dev; struct btusb_data *data = hci_get_drvdata(hdev); struct usb_ctrlrequest *dr; struct urb *urb; unsigned int pipe; int err; BT_DBG("%s", hdev->name); if (!test_bit(HCI_RUNNING, &hdev->flags)) return -EBUSY; switch (bt_cb(skb)->pkt_type) { case HCI_COMMAND_PKT: urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) return -ENOMEM; dr = kmalloc(sizeof(*dr), GFP_ATOMIC); if (!dr) { usb_free_urb(urb); return -ENOMEM; } dr->bRequestType = data->cmdreq_type; dr->bRequest = 0; dr->wIndex = 0; dr->wValue = 0; dr->wLength = __cpu_to_le16(skb->len); pipe = usb_sndctrlpipe(data->udev, 0x00); usb_fill_control_urb(urb, data->udev, pipe, (void *) dr, skb->data, skb->len, btusb_tx_complete, skb); hdev->stat.cmd_tx++; break; case HCI_ACLDATA_PKT: if (!data->bulk_tx_ep) return -ENODEV; urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) return -ENOMEM; pipe = usb_sndbulkpipe(data->udev, data->bulk_tx_ep->bEndpointAddress); usb_fill_bulk_urb(urb, data->udev, pipe, skb->data, skb->len, btusb_tx_complete, skb); hdev->stat.acl_tx++; break; case HCI_SCODATA_PKT: if (!data->isoc_tx_ep || hdev->conn_hash.sco_num < 1) return -ENODEV; urb = usb_alloc_urb(BTUSB_MAX_ISOC_FRAMES, GFP_ATOMIC); if (!urb) return -ENOMEM; pipe = usb_sndisocpipe(data->udev, data->isoc_tx_ep->bEndpointAddress); usb_fill_int_urb(urb, data->udev, pipe, skb->data, skb->len, btusb_isoc_tx_complete, skb, data->isoc_tx_ep->bInterval); urb->transfer_flags = URB_ISO_ASAP; __fill_isoc_descriptor(urb, skb->len, le16_to_cpu(data->isoc_tx_ep->wMaxPacketSize)); hdev->stat.sco_tx++; goto skip_waking; default: return -EILSEQ; } err = inc_tx(data); if (err) { usb_anchor_urb(urb, &data->deferred); schedule_work(&data->waker); err = 0; goto done; } skip_waking: usb_anchor_urb(urb, &data->tx_anchor); err = usb_submit_urb(urb, GFP_ATOMIC); if (err < 0) { if (err != -EPERM && err != -ENODEV) BT_ERR("%s urb %p submission failed (%d)", hdev->name, urb, -err); kfree(urb->setup_packet); usb_unanchor_urb(urb); } else { usb_mark_last_busy(data->udev); } done: usb_free_urb(urb); return err; } static void btusb_notify(struct hci_dev *hdev, unsigned int evt) { struct btusb_data *data = hci_get_drvdata(hdev); BT_DBG("%s evt %d", hdev->name, evt); if ((evt == HCI_NOTIFY_SCO_COMPLETE) || (evt == HCI_NOTIFY_CONN_DEL)) { BT_DBG("SCO conn state changed: evt %d", evt); sco_conn = (evt == HCI_NOTIFY_SCO_COMPLETE) ? 1 : 0; schedule_work(&data->work); } } static inline int __set_isoc_interface(struct hci_dev *hdev, int altsetting) { struct btusb_data *data = hci_get_drvdata(hdev); struct usb_interface *intf = data->isoc; struct usb_endpoint_descriptor *ep_desc; int i, err; if (!data->isoc) return -ENODEV; err = usb_set_interface(data->udev, 1, altsetting); if (err < 0) { BT_ERR("%s setting interface failed (%d)", hdev->name, -err); return err; } data->isoc_altsetting = altsetting; data->isoc_tx_ep = NULL; data->isoc_rx_ep = NULL; for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) { ep_desc = &intf->cur_altsetting->endpoint[i].desc; if (!data->isoc_tx_ep && usb_endpoint_is_isoc_out(ep_desc)) { data->isoc_tx_ep = ep_desc; continue; } if (!data->isoc_rx_ep && usb_endpoint_is_isoc_in(ep_desc)) { data->isoc_rx_ep = ep_desc; continue; } } if (!data->isoc_tx_ep || !data->isoc_rx_ep) { BT_ERR("%s invalid SCO descriptors", hdev->name); return -ENODEV; } return 0; } static void btusb_work(struct work_struct *work) { struct btusb_data *data = container_of(work, struct btusb_data, work); struct hci_dev *hdev = data->hdev; int new_alts; int err; if (sco_conn) { if (!test_bit(BTUSB_DID_ISO_RESUME, &data->flags)) { err = usb_autopm_get_interface(data->isoc ? data->isoc : data->intf); if (err < 0) { clear_bit(BTUSB_ISOC_RUNNING, &data->flags); usb_kill_anchored_urbs(&data->isoc_anchor); return; } set_bit(BTUSB_DID_ISO_RESUME, &data->flags); } if (hdev->voice_setting & 0x0020) { static const int alts[3] = { 2, 4, 5 }; new_alts = alts[hdev->conn_hash.sco_num - 1]; } else { new_alts = hdev->conn_hash.sco_num; } if (data->isoc_altsetting != new_alts) { clear_bit(BTUSB_ISOC_RUNNING, &data->flags); usb_kill_anchored_urbs(&data->isoc_anchor); if (__set_isoc_interface(hdev, new_alts) < 0) return; } if (!test_and_set_bit(BTUSB_ISOC_RUNNING, &data->flags)) { if (btusb_submit_isoc_urb(hdev, GFP_KERNEL) < 0) clear_bit(BTUSB_ISOC_RUNNING, &data->flags); else btusb_submit_isoc_urb(hdev, GFP_KERNEL); } } else { clear_bit(BTUSB_ISOC_RUNNING, &data->flags); usb_kill_anchored_urbs(&data->isoc_anchor); __set_isoc_interface(hdev, 0); if (test_and_clear_bit(BTUSB_DID_ISO_RESUME, &data->flags)) usb_autopm_put_interface(data->isoc ? data->isoc : data->intf); } } static void btusb_waker(struct work_struct *work) { struct btusb_data *data = container_of(work, struct btusb_data, waker); int err; err = usb_autopm_get_interface(data->intf); if (err < 0) return; usb_autopm_put_interface(data->intf); } static int btusb_setup_bcm92035(struct hci_dev *hdev) { struct sk_buff *skb; u8 val = 0x00; BT_DBG("%s", hdev->name); skb = __hci_cmd_sync(hdev, 0xfc3b, 1, &val, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) BT_ERR("BCM92035 command failed (%ld)", -PTR_ERR(skb)); else kfree_skb(skb); return 0; } struct intel_version { u8 status; u8 hw_platform; u8 hw_variant; u8 hw_revision; u8 fw_variant; u8 fw_revision; u8 fw_build_num; u8 fw_build_ww; u8 fw_build_yy; u8 fw_patch_num; } __packed; static const struct firmware *btusb_setup_intel_get_fw(struct hci_dev *hdev, struct intel_version *ver) { const struct firmware *fw; char fwname[64]; int ret; snprintf(fwname, sizeof(fwname), "intel/ibt-hw-%x.%x.%x-fw-%x.%x.%x.%x.%x.bseq", ver->hw_platform, ver->hw_variant, ver->hw_revision, ver->fw_variant, ver->fw_revision, ver->fw_build_num, ver->fw_build_ww, ver->fw_build_yy); ret = request_firmware(&fw, fwname, &hdev->dev); if (ret < 0) { if (ret == -EINVAL) { BT_ERR("%s Intel firmware file request failed (%d)", hdev->name, ret); return NULL; } BT_ERR("%s failed to open Intel firmware file: %s(%d)", hdev->name, fwname, ret); /* If the correct firmware patch file is not found, use the * default firmware patch file instead */ snprintf(fwname, sizeof(fwname), "intel/ibt-hw-%x.%x.bseq", ver->hw_platform, ver->hw_variant); if (request_firmware(&fw, fwname, &hdev->dev) < 0) { BT_ERR("%s failed to open default Intel fw file: %s", hdev->name, fwname); return NULL; } } BT_INFO("%s: Intel Bluetooth firmware file: %s", hdev->name, fwname); return fw; } static int btusb_setup_intel_patching(struct hci_dev *hdev, const struct firmware *fw, const u8 **fw_ptr, int *disable_patch) { struct sk_buff *skb; struct hci_command_hdr *cmd; const u8 *cmd_param; struct hci_event_hdr *evt = NULL; const u8 *evt_param = NULL; int remain = fw->size - (*fw_ptr - fw->data); /* The first byte indicates the types of the patch command or event. * 0x01 means HCI command and 0x02 is HCI event. If the first bytes * in the current firmware buffer doesn't start with 0x01 or * the size of remain buffer is smaller than HCI command header, * the firmware file is corrupted and it should stop the patching * process. */ if (remain > HCI_COMMAND_HDR_SIZE && *fw_ptr[0] != 0x01) { BT_ERR("%s Intel fw corrupted: invalid cmd read", hdev->name); return -EINVAL; } (*fw_ptr)++; remain--; cmd = (struct hci_command_hdr *)(*fw_ptr); *fw_ptr += sizeof(*cmd); remain -= sizeof(*cmd); /* Ensure that the remain firmware data is long enough than the length * of command parameter. If not, the firmware file is corrupted. */ if (remain < cmd->plen) { BT_ERR("%s Intel fw corrupted: invalid cmd len", hdev->name); return -EFAULT; } /* If there is a command that loads a patch in the firmware * file, then enable the patch upon success, otherwise just * disable the manufacturer mode, for example patch activation * is not required when the default firmware patch file is used * because there are no patch data to load. */ if (*disable_patch && le16_to_cpu(cmd->opcode) == 0xfc8e) *disable_patch = 0; cmd_param = *fw_ptr; *fw_ptr += cmd->plen; remain -= cmd->plen; /* This reads the expected events when the above command is sent to the * device. Some vendor commands expects more than one events, for * example command status event followed by vendor specific event. * For this case, it only keeps the last expected event. so the command * can be sent with __hci_cmd_sync_ev() which returns the sk_buff of * last expected event. */ while (remain > HCI_EVENT_HDR_SIZE && *fw_ptr[0] == 0x02) { (*fw_ptr)++; remain--; evt = (struct hci_event_hdr *)(*fw_ptr); *fw_ptr += sizeof(*evt); remain -= sizeof(*evt); if (remain < evt->plen) { BT_ERR("%s Intel fw corrupted: invalid evt len", hdev->name); return -EFAULT; } evt_param = *fw_ptr; *fw_ptr += evt->plen; remain -= evt->plen; } /* Every HCI commands in the firmware file has its correspond event. * If event is not found or remain is smaller than zero, the firmware * file is corrupted. */ if (!evt || !evt_param || remain < 0) { BT_ERR("%s Intel fw corrupted: invalid evt read", hdev->name); return -EFAULT; } skb = __hci_cmd_sync_ev(hdev, le16_to_cpu(cmd->opcode), cmd->plen, cmd_param, evt->evt, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { BT_ERR("%s sending Intel patch command (0x%4.4x) failed (%ld)", hdev->name, cmd->opcode, PTR_ERR(skb)); return PTR_ERR(skb); } /* It ensures that the returned event matches the event data read from * the firmware file. At fist, it checks the length and then * the contents of the event. */ if (skb->len != evt->plen) { BT_ERR("%s mismatch event length (opcode 0x%4.4x)", hdev->name, le16_to_cpu(cmd->opcode)); kfree_skb(skb); return -EFAULT; } if (memcmp(skb->data, evt_param, evt->plen)) { BT_ERR("%s mismatch event parameter (opcode 0x%4.4x)", hdev->name, le16_to_cpu(cmd->opcode)); kfree_skb(skb); return -EFAULT; } kfree_skb(skb); return 0; } static int btusb_setup_intel(struct hci_dev *hdev) { struct sk_buff *skb; const struct firmware *fw; const u8 *fw_ptr; int disable_patch; struct intel_version *ver; const u8 mfg_enable[] = { 0x01, 0x00 }; const u8 mfg_disable[] = { 0x00, 0x00 }; const u8 mfg_reset_deactivate[] = { 0x00, 0x01 }; const u8 mfg_reset_activate[] = { 0x00, 0x02 }; BT_DBG("%s", hdev->name); /* The controller has a bug with the first HCI command sent to it * returning number of completed commands as zero. This would stall the * command processing in the Bluetooth core. * * As a workaround, send HCI Reset command first which will reset the * number of completed commands and allow normal command processing * from now on. */ skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { BT_ERR("%s sending initial HCI reset command failed (%ld)", hdev->name, PTR_ERR(skb)); return PTR_ERR(skb); } kfree_skb(skb); /* Read Intel specific controller version first to allow selection of * which firmware file to load. * * The returned information are hardware variant and revision plus * firmware variant, revision and build number. */ skb = __hci_cmd_sync(hdev, 0xfc05, 0, NULL, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { BT_ERR("%s reading Intel fw version command failed (%ld)", hdev->name, PTR_ERR(skb)); return PTR_ERR(skb); } if (skb->len != sizeof(*ver)) { BT_ERR("%s Intel version event length mismatch", hdev->name); kfree_skb(skb); return -EIO; } ver = (struct intel_version *)skb->data; if (ver->status) { BT_ERR("%s Intel fw version event failed (%02x)", hdev->name, ver->status); kfree_skb(skb); return -bt_to_errno(ver->status); } BT_INFO("%s: read Intel version: %02x%02x%02x%02x%02x%02x%02x%02x%02x", hdev->name, ver->hw_platform, ver->hw_variant, ver->hw_revision, ver->fw_variant, ver->fw_revision, ver->fw_build_num, ver->fw_build_ww, ver->fw_build_yy, ver->fw_patch_num); /* fw_patch_num indicates the version of patch the device currently * have. If there is no patch data in the device, it is always 0x00. * So, if it is other than 0x00, no need to patch the deivce again. */ if (ver->fw_patch_num) { BT_INFO("%s: Intel device is already patched. patch num: %02x", hdev->name, ver->fw_patch_num); kfree_skb(skb); return 0; } /* Opens the firmware patch file based on the firmware version read * from the controller. If it fails to open the matching firmware * patch file, it tries to open the default firmware patch file. * If no patch file is found, allow the device to operate without * a patch. */ fw = btusb_setup_intel_get_fw(hdev, ver); if (!fw) { kfree_skb(skb); return 0; } fw_ptr = fw->data; /* This Intel specific command enables the manufacturer mode of the * controller. * * Only while this mode is enabled, the driver can download the * firmware patch data and configuration parameters. */ skb = __hci_cmd_sync(hdev, 0xfc11, 2, mfg_enable, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { BT_ERR("%s entering Intel manufacturer mode failed (%ld)", hdev->name, PTR_ERR(skb)); release_firmware(fw); return PTR_ERR(skb); } if (skb->data[0]) { u8 evt_status = skb->data[0]; BT_ERR("%s enable Intel manufacturer mode event failed (%02x)", hdev->name, evt_status); kfree_skb(skb); release_firmware(fw); return -bt_to_errno(evt_status); } kfree_skb(skb); disable_patch = 1; /* The firmware data file consists of list of Intel specific HCI * commands and its expected events. The first byte indicates the * type of the message, either HCI command or HCI event. * * It reads the command and its expected event from the firmware file, * and send to the controller. Once __hci_cmd_sync_ev() returns, * the returned event is compared with the event read from the firmware * file and it will continue until all the messages are downloaded to * the controller. * * Once the firmware patching is completed successfully, * the manufacturer mode is disabled with reset and activating the * downloaded patch. * * If the firmware patching fails, the manufacturer mode is * disabled with reset and deactivating the patch. * * If the default patch file is used, no reset is done when disabling * the manufacturer. */ while (fw->size > fw_ptr - fw->data) { int ret; ret = btusb_setup_intel_patching(hdev, fw, &fw_ptr, &disable_patch); if (ret < 0) goto exit_mfg_deactivate; } release_firmware(fw); if (disable_patch) goto exit_mfg_disable; /* Patching completed successfully and disable the manufacturer mode * with reset and activate the downloaded firmware patches. */ skb = __hci_cmd_sync(hdev, 0xfc11, sizeof(mfg_reset_activate), mfg_reset_activate, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { BT_ERR("%s exiting Intel manufacturer mode failed (%ld)", hdev->name, PTR_ERR(skb)); return PTR_ERR(skb); } kfree_skb(skb); BT_INFO("%s: Intel Bluetooth firmware patch completed and activated", hdev->name); return 0; exit_mfg_disable: /* Disable the manufacturer mode without reset */ skb = __hci_cmd_sync(hdev, 0xfc11, sizeof(mfg_disable), mfg_disable, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { BT_ERR("%s exiting Intel manufacturer mode failed (%ld)", hdev->name, PTR_ERR(skb)); return PTR_ERR(skb); } kfree_skb(skb); BT_INFO("%s: Intel Bluetooth firmware patch completed", hdev->name); return 0; exit_mfg_deactivate: release_firmware(fw); /* Patching failed. Disable the manufacturer mode with reset and * deactivate the downloaded firmware patches. */ skb = __hci_cmd_sync(hdev, 0xfc11, sizeof(mfg_reset_deactivate), mfg_reset_deactivate, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { BT_ERR("%s exiting Intel manufacturer mode failed (%ld)", hdev->name, PTR_ERR(skb)); return PTR_ERR(skb); } kfree_skb(skb); BT_INFO("%s: Intel Bluetooth firmware patch completed and deactivated", hdev->name); return 0; } static int btusb_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_endpoint_descriptor *ep_desc; struct btusb_data *data; struct hci_dev *hdev; int i, version, err; BT_DBG("intf %p id %p", intf, id); /* interface numbers are hardcoded in the spec */ if (intf->cur_altsetting->desc.bInterfaceNumber != 0) return -ENODEV; if (!id->driver_info) { const struct usb_device_id *match; match = usb_match_id(intf, blacklist_table); if (match) id = match; } if (id->driver_info == BTUSB_IGNORE) return -ENODEV; if (ignore_dga && id->driver_info & BTUSB_DIGIANSWER) return -ENODEV; if (ignore_csr && id->driver_info & BTUSB_CSR) return -ENODEV; if (ignore_sniffer && id->driver_info & BTUSB_SNIFFER) return -ENODEV; if (id->driver_info & BTUSB_ATH3012) { struct usb_device *udev = interface_to_usbdev(intf); version = get_rome_version(udev); BT_INFO("Rome Version: 0x%x", version); /* Old firmware would otherwise let ath3k driver load * patch and sysconfig files */ if (version) rome_download(udev); else if (le16_to_cpu(udev->descriptor.bcdDevice) <= 0x0001) { BT_INFO("FW for ar3k is yet to be downloaded"); return -ENODEV; } } data = devm_kzalloc(&intf->dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) { ep_desc = &intf->cur_altsetting->endpoint[i].desc; if (!data->intr_ep && usb_endpoint_is_int_in(ep_desc)) { data->intr_ep = ep_desc; continue; } if (!data->bulk_tx_ep && usb_endpoint_is_bulk_out(ep_desc)) { data->bulk_tx_ep = ep_desc; continue; } if (!data->bulk_rx_ep && usb_endpoint_is_bulk_in(ep_desc)) { data->bulk_rx_ep = ep_desc; continue; } } if (!data->intr_ep || !data->bulk_tx_ep || !data->bulk_rx_ep) return -ENODEV; data->cmdreq_type = USB_TYPE_CLASS; data->udev = interface_to_usbdev(intf); data->intf = intf; spin_lock_init(&data->lock); INIT_WORK(&data->work, btusb_work); INIT_WORK(&data->waker, btusb_waker); spin_lock_init(&data->txlock); init_usb_anchor(&data->tx_anchor); init_usb_anchor(&data->intr_anchor); init_usb_anchor(&data->bulk_anchor); init_usb_anchor(&data->isoc_anchor); init_usb_anchor(&data->deferred); hdev = hci_alloc_dev(); if (!hdev) return -ENOMEM; hdev->bus = HCI_USB; hci_set_drvdata(hdev, data); data->hdev = hdev; SET_HCIDEV_DEV(hdev, &intf->dev); hdev->open = btusb_open; hdev->close = btusb_close; hdev->flush = btusb_flush; hdev->send = btusb_send_frame; hdev->notify = btusb_notify; if (id->driver_info & BTUSB_BCM92035) hdev->setup = btusb_setup_bcm92035; if (id->driver_info & BTUSB_INTEL) hdev->setup = btusb_setup_intel; /* Interface numbers are hardcoded in the specification */ data->isoc = usb_ifnum_to_if(data->udev, 1); if (!reset) set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks); if (force_scofix || id->driver_info & BTUSB_WRONG_SCO_MTU) { if (!disable_scofix) set_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks); } if (id->driver_info & BTUSB_BROKEN_ISOC) data->isoc = NULL; if (id->driver_info & BTUSB_DIGIANSWER) { data->cmdreq_type = USB_TYPE_VENDOR; set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks); } if (id->driver_info & BTUSB_CSR) { struct usb_device *udev = data->udev; /* Old firmware would otherwise execute USB reset */ if (le16_to_cpu(udev->descriptor.bcdDevice) < 0x117) set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks); } if (id->driver_info & BTUSB_SNIFFER) { struct usb_device *udev = data->udev; /* New sniffer firmware has crippled HCI interface */ if (le16_to_cpu(udev->descriptor.bcdDevice) > 0x997) set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks); data->isoc = NULL; } if (data->isoc) { err = usb_driver_claim_interface(&btusb_driver, data->isoc, data); if (err < 0) { hci_free_dev(hdev); return err; } } err = hci_register_dev(hdev); if (err < 0) { hci_free_dev(hdev); return err; } usb_set_intfdata(intf, data); usb_enable_autosuspend(data->udev); return 0; } static void btusb_disconnect(struct usb_interface *intf) { struct btusb_data *data = usb_get_intfdata(intf); struct hci_dev *hdev; BT_DBG("intf %p", intf); if (!data) return; hdev = data->hdev; usb_set_intfdata(data->intf, NULL); if (data->isoc) usb_set_intfdata(data->isoc, NULL); hci_unregister_dev(hdev); if (intf == data->isoc) usb_driver_release_interface(&btusb_driver, data->intf); else if (data->isoc) usb_driver_release_interface(&btusb_driver, data->isoc); hci_free_dev(hdev); } #ifdef CONFIG_PM static int btusb_suspend(struct usb_interface *intf, pm_message_t message) { struct btusb_data *data = usb_get_intfdata(intf); BT_DBG("intf %p", intf); if (data->suspend_count++) return 0; spin_lock_irq(&data->txlock); if (!(PMSG_IS_AUTO(message) && data->tx_in_flight)) { set_bit(BTUSB_SUSPENDING, &data->flags); spin_unlock_irq(&data->txlock); } else { spin_unlock_irq(&data->txlock); data->suspend_count--; return -EBUSY; } cancel_work_sync(&data->work); btusb_stop_traffic(data); usb_kill_anchored_urbs(&data->tx_anchor); return 0; } static void play_deferred(struct btusb_data *data) { struct urb *urb; int err; while ((urb = usb_get_from_anchor(&data->deferred))) { err = usb_submit_urb(urb, GFP_ATOMIC); if (err < 0) break; data->tx_in_flight++; } usb_scuttle_anchored_urbs(&data->deferred); } static int btusb_resume(struct usb_interface *intf) { struct btusb_data *data = usb_get_intfdata(intf); struct hci_dev *hdev = data->hdev; int err = 0; BT_DBG("intf %p", intf); if (--data->suspend_count) return 0; if (!test_bit(HCI_RUNNING, &hdev->flags)) goto done; if (test_bit(BTUSB_INTR_RUNNING, &data->flags)) { err = btusb_submit_intr_urb(hdev, GFP_NOIO); if (err < 0) { clear_bit(BTUSB_INTR_RUNNING, &data->flags); goto failed; } } if (test_bit(BTUSB_BULK_RUNNING, &data->flags)) { err = btusb_submit_bulk_urb(hdev, GFP_NOIO); if (err < 0) { clear_bit(BTUSB_BULK_RUNNING, &data->flags); goto failed; } btusb_submit_bulk_urb(hdev, GFP_NOIO); } if (test_bit(BTUSB_ISOC_RUNNING, &data->flags)) { if (btusb_submit_isoc_urb(hdev, GFP_NOIO) < 0) clear_bit(BTUSB_ISOC_RUNNING, &data->flags); else btusb_submit_isoc_urb(hdev, GFP_NOIO); } spin_lock_irq(&data->txlock); play_deferred(data); clear_bit(BTUSB_SUSPENDING, &data->flags); spin_unlock_irq(&data->txlock); schedule_work(&data->work); return 0; failed: usb_scuttle_anchored_urbs(&data->deferred); done: spin_lock_irq(&data->txlock); clear_bit(BTUSB_SUSPENDING, &data->flags); spin_unlock_irq(&data->txlock); return err; } #endif static struct usb_driver btusb_driver = { .name = "btusb", .probe = btusb_probe, .disconnect = btusb_disconnect, #ifdef CONFIG_PM .suspend = btusb_suspend, .resume = btusb_resume, #endif .id_table = btusb_table, .supports_autosuspend = 1, .disable_hub_initiated_lpm = 1, }; module_usb_driver(btusb_driver); module_param(ignore_dga, bool, 0644); MODULE_PARM_DESC(ignore_dga, "Ignore devices with id 08fd:0001"); module_param(ignore_csr, bool, 0644); MODULE_PARM_DESC(ignore_csr, "Ignore devices with id 0a12:0001"); module_param(ignore_sniffer, bool, 0644); MODULE_PARM_DESC(ignore_sniffer, "Ignore devices with id 0a12:0002"); module_param(disable_scofix, bool, 0644); MODULE_PARM_DESC(disable_scofix, "Disable fixup of wrong SCO buffer size"); module_param(force_scofix, bool, 0644); MODULE_PARM_DESC(force_scofix, "Force fixup of wrong SCO buffers size"); module_param(reset, bool, 0644); MODULE_PARM_DESC(reset, "Send HCI reset command on initialization"); MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); MODULE_DESCRIPTION("Generic Bluetooth USB driver ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL");
gpl-2.0
sch2307/android_kernel_samsung_aries-kor
arch/arm/mm/dma-mapping.c
380
16810
/* * linux/arch/arm/mm/dma-mapping.c * * Copyright (C) 2000-2004 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * DMA uncached mapping support. */ #include <linux/module.h> #include <linux/mm.h> #include <linux/gfp.h> #include <linux/errno.h> #include <linux/list.h> #include <linux/init.h> #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/highmem.h> #include <asm/memory.h> #include <asm/highmem.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> #include <asm/sizes.h> static u64 get_coherent_dma_mask(struct device *dev) { u64 mask = ISA_DMA_THRESHOLD; if (dev) { mask = dev->coherent_dma_mask; /* * Sanity check the DMA mask - it must be non-zero, and * must be able to be satisfied by a DMA allocation. */ if (mask == 0) { dev_warn(dev, "coherent DMA mask is unset\n"); return 0; } if ((~mask) & ISA_DMA_THRESHOLD) { dev_warn(dev, "coherent DMA mask %#llx is smaller " "than system GFP_DMA mask %#llx\n", mask, (unsigned long long)ISA_DMA_THRESHOLD); return 0; } } return mask; } /* * Allocate a DMA buffer for 'dev' of size 'size' using the * specified gfp mask. Note that 'size' must be page aligned. */ static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp) { unsigned long order = get_order(size); struct page *page, *p, *e; void *ptr; u64 mask = get_coherent_dma_mask(dev); #ifdef CONFIG_DMA_API_DEBUG u64 limit = (mask + 1) & ~mask; if (limit && size >= limit) { dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n", size, mask); return NULL; } #endif if (!mask) return NULL; if (mask < 0xffffffffULL) gfp |= GFP_DMA; page = alloc_pages(gfp, order); if (!page) return NULL; /* * Now split the huge page and free the excess pages */ split_page(page, order); for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++) __free_page(p); /* * Ensure that the allocated pages are zeroed, and that any data * lurking in the kernel direct-mapped region is invalidated. */ ptr = page_address(page); memset(ptr, 0, size); dmac_flush_range(ptr, ptr + size); outer_flush_range(__pa(ptr), __pa(ptr) + size); return page; } /* * Free a DMA buffer. 'size' must be page aligned. */ static void __dma_free_buffer(struct page *page, size_t size) { struct page *e = page + (size >> PAGE_SHIFT); while (page < e) { __free_page(page); page++; } } #ifdef CONFIG_MMU /* Sanity check size */ #if (CONSISTENT_DMA_SIZE % SZ_2M) #error "CONSISTENT_DMA_SIZE must be multiple of 2MiB" #endif #define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT) #define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT) #define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT) /* * These are the page tables (2MB each) covering uncached, DMA consistent allocations */ static pte_t *consistent_pte[NUM_CONSISTENT_PTES]; #include "vmregion.h" static struct arm_vmregion_head consistent_head = { .vm_lock = __SPIN_LOCK_UNLOCKED(&consistent_head.vm_lock), .vm_list = LIST_HEAD_INIT(consistent_head.vm_list), .vm_start = CONSISTENT_BASE, .vm_end = CONSISTENT_END, }; #ifdef CONFIG_HUGETLB_PAGE #error ARM Coherent DMA allocator does not (yet) support huge TLB #endif /* * Initialise the consistent memory allocation. */ static int __init consistent_init(void) { int ret = 0; pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; int i = 0; u32 base = CONSISTENT_BASE; do { pgd = pgd_offset(&init_mm, base); pud = pud_alloc(&init_mm, pgd, base); if (!pud) { printk(KERN_ERR "%s: no pud tables\n", __func__); ret = -ENOMEM; break; } pmd = pmd_alloc(&init_mm, pud, base); if (!pmd) { printk(KERN_ERR "%s: no pmd tables\n", __func__); ret = -ENOMEM; break; } WARN_ON(!pmd_none(*pmd)); pte = pte_alloc_kernel(pmd, base); if (!pte) { printk(KERN_ERR "%s: no pte tables\n", __func__); ret = -ENOMEM; break; } consistent_pte[i++] = pte; base += (1 << PGDIR_SHIFT); } while (base < CONSISTENT_END); return ret; } core_initcall(consistent_init); static void * __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot) { struct arm_vmregion *c; size_t align; int bit; if (!consistent_pte[0]) { printk(KERN_ERR "%s: not initialised\n", __func__); dump_stack(); return NULL; } /* * Align the virtual region allocation - maximum alignment is * a section size, minimum is a page size. This helps reduce * fragmentation of the DMA space, and also prevents allocations * smaller than a section from crossing a section boundary. */ bit = fls(size - 1); if (bit > SECTION_SHIFT) bit = SECTION_SHIFT; align = 1 << bit; /* * Allocate a virtual address in the consistent mapping region. */ c = arm_vmregion_alloc(&consistent_head, align, size, gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); if (c) { pte_t *pte; int idx = CONSISTENT_PTE_INDEX(c->vm_start); u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1); pte = consistent_pte[idx] + off; c->vm_pages = page; do { BUG_ON(!pte_none(*pte)); set_pte_ext(pte, mk_pte(page, prot), 0); page++; pte++; off++; if (off >= PTRS_PER_PTE) { off = 0; pte = consistent_pte[++idx]; } } while (size -= PAGE_SIZE); dsb(); return (void *)c->vm_start; } return NULL; } static void __dma_free_remap(void *cpu_addr, size_t size) { struct arm_vmregion *c; unsigned long addr; pte_t *ptep; int idx; u32 off; c = arm_vmregion_find_remove(&consistent_head, (unsigned long)cpu_addr); if (!c) { printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n", __func__, cpu_addr); dump_stack(); return; } if ((c->vm_end - c->vm_start) != size) { printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n", __func__, c->vm_end - c->vm_start, size); dump_stack(); size = c->vm_end - c->vm_start; } idx = CONSISTENT_PTE_INDEX(c->vm_start); off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1); ptep = consistent_pte[idx] + off; addr = c->vm_start; do { pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep); ptep++; addr += PAGE_SIZE; off++; if (off >= PTRS_PER_PTE) { off = 0; ptep = consistent_pte[++idx]; } if (pte_none(pte) || !pte_present(pte)) printk(KERN_CRIT "%s: bad page in kernel page table\n", __func__); } while (size -= PAGE_SIZE); flush_tlb_kernel_range(c->vm_start, c->vm_end); arm_vmregion_free(&consistent_head, c); } #else /* !CONFIG_MMU */ #define __dma_alloc_remap(page, size, gfp, prot) page_address(page) #define __dma_free_remap(addr, size) do { } while (0) #endif /* CONFIG_MMU */ static void * __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, pgprot_t prot) { struct page *page; void *addr; *handle = ~0; size = PAGE_ALIGN(size); page = __dma_alloc_buffer(dev, size, gfp); if (!page) return NULL; if (!arch_is_coherent()) addr = __dma_alloc_remap(page, size, gfp, prot); else addr = page_address(page); if (addr) *handle = pfn_to_dma(dev, page_to_pfn(page)); else __dma_free_buffer(page, size); return addr; } /* * Allocate DMA-coherent memory space and return both the kernel remapped * virtual and bus address for that space. */ void * dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp) { void *memory; if (dma_alloc_from_coherent(dev, size, handle, &memory)) return memory; return __dma_alloc(dev, size, handle, gfp, pgprot_dmacoherent(pgprot_kernel)); } EXPORT_SYMBOL(dma_alloc_coherent); /* * Allocate a writecombining region, in much the same way as * dma_alloc_coherent above. */ void * dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp) { return __dma_alloc(dev, size, handle, gfp, pgprot_writecombine(pgprot_kernel)); } EXPORT_SYMBOL(dma_alloc_writecombine); static int dma_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size) { int ret = -ENXIO; #ifdef CONFIG_MMU unsigned long user_size, kern_size; struct arm_vmregion *c; user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr); if (c) { unsigned long off = vma->vm_pgoff; kern_size = (c->vm_end - c->vm_start) >> PAGE_SHIFT; if (off < kern_size && user_size <= (kern_size - off)) { ret = remap_pfn_range(vma, vma->vm_start, page_to_pfn(c->vm_pages) + off, user_size << PAGE_SHIFT, vma->vm_page_prot); } } #endif /* CONFIG_MMU */ return ret; } int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size) { vma->vm_page_prot = pgprot_dmacoherent(vma->vm_page_prot); return dma_mmap(dev, vma, cpu_addr, dma_addr, size); } EXPORT_SYMBOL(dma_mmap_coherent); int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size) { vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); return dma_mmap(dev, vma, cpu_addr, dma_addr, size); } EXPORT_SYMBOL(dma_mmap_writecombine); /* * free a page as defined by the above mapping. * Must not be called with IRQs disabled. */ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle) { WARN_ON(irqs_disabled()); if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) return; size = PAGE_ALIGN(size); if (!arch_is_coherent()) __dma_free_remap(cpu_addr, size); __dma_free_buffer(pfn_to_page(dma_to_pfn(dev, handle)), size); } EXPORT_SYMBOL(dma_free_coherent); /* * Make an area consistent for devices. * Note: Drivers should NOT use this function directly, as it will break * platforms with CONFIG_DMABOUNCE. * Use the driver DMA support - see dma-mapping.h (dma_sync_*) */ void ___dma_single_cpu_to_dev(const void *kaddr, size_t size, enum dma_data_direction dir) { unsigned long paddr; BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1)); dmac_map_area(kaddr, size, dir); paddr = __pa(kaddr); if (dir == DMA_FROM_DEVICE) { outer_inv_range(paddr, paddr + size); } else { outer_clean_range(paddr, paddr + size); } /* FIXME: non-speculating: flush on bidirectional mappings? */ } EXPORT_SYMBOL(___dma_single_cpu_to_dev); void ___dma_single_dev_to_cpu(const void *kaddr, size_t size, enum dma_data_direction dir) { BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1)); /* FIXME: non-speculating: not required */ /* don't bother invalidating if DMA to device */ if (dir != DMA_TO_DEVICE) { unsigned long paddr = __pa(kaddr); outer_inv_range(paddr, paddr + size); } dmac_unmap_area(kaddr, size, dir); } EXPORT_SYMBOL(___dma_single_dev_to_cpu); static void dma_cache_maint_page(struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, void (*op)(const void *, size_t, int)) { unsigned long pfn; size_t left = size; pfn = page_to_pfn(page) + offset / PAGE_SIZE; offset %= PAGE_SIZE; /* * A single sg entry may refer to multiple physically contiguous * pages. But we still need to process highmem pages individually. * If highmem is not configured then the bulk of this loop gets * optimized out. */ do { size_t len = left; void *vaddr; page = pfn_to_page(pfn); if (PageHighMem(page)) { if (len + offset > PAGE_SIZE) len = PAGE_SIZE - offset; vaddr = kmap_high_get(page); if (vaddr) { vaddr += offset; op(vaddr, len, dir); kunmap_high(page); } else if (cache_is_vipt()) { /* unmapped pages might still be cached */ vaddr = kmap_atomic(page); op(vaddr + offset, len, dir); kunmap_atomic(vaddr); } } else { vaddr = page_address(page) + offset; op(vaddr, len, dir); } offset = 0; pfn++; left -= len; } while (left); } void ___dma_page_cpu_to_dev(struct page *page, unsigned long off, size_t size, enum dma_data_direction dir) { unsigned long paddr; dma_cache_maint_page(page, off, size, dir, dmac_map_area); paddr = page_to_phys(page) + off; if (dir == DMA_FROM_DEVICE) { outer_inv_range(paddr, paddr + size); } else { outer_clean_range(paddr, paddr + size); } /* FIXME: non-speculating: flush on bidirectional mappings? */ } EXPORT_SYMBOL(___dma_page_cpu_to_dev); void ___dma_page_dev_to_cpu(struct page *page, unsigned long off, size_t size, enum dma_data_direction dir) { unsigned long paddr = page_to_phys(page) + off; /* FIXME: non-speculating: not required */ /* don't bother invalidating if DMA to device */ if (dir != DMA_TO_DEVICE) outer_inv_range(paddr, paddr + size); dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); /* * Mark the D-cache clean for this page to avoid extra flushing. */ if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE) set_bit(PG_dcache_clean, &page->flags); } EXPORT_SYMBOL(___dma_page_dev_to_cpu); /** * dma_map_sg - map a set of SG buffers for streaming mode DMA * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices * @sg: list of buffers * @nents: number of buffers to map * @dir: DMA transfer direction * * Map a set of buffers described by scatterlist in streaming mode for DMA. * This is the scatter-gather version of the dma_map_single interface. * Here the scatter gather list elements are each tagged with the * appropriate dma address and length. They are obtained via * sg_dma_{address,length}. * * Device ownership issues as mentioned for dma_map_single are the same * here. */ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { struct scatterlist *s; int i, j; BUG_ON(!valid_dma_direction(dir)); for_each_sg(sg, s, nents, i) { s->dma_address = __dma_map_page(dev, sg_page(s), s->offset, s->length, dir); if (dma_mapping_error(dev, s->dma_address)) goto bad_mapping; } debug_dma_map_sg(dev, sg, nents, nents, dir); return nents; bad_mapping: for_each_sg(sg, s, i, j) __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); return 0; } EXPORT_SYMBOL(dma_map_sg); /** * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices * @sg: list of buffers * @nents: number of buffers to unmap (same as was passed to dma_map_sg) * @dir: DMA transfer direction (same as was passed to dma_map_sg) * * Unmap a set of streaming mode DMA translations. Again, CPU access * rules concerning calls here are the same as for dma_unmap_single(). */ void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { struct scatterlist *s; int i; debug_dma_unmap_sg(dev, sg, nents, dir); for_each_sg(sg, s, nents, i) __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); } EXPORT_SYMBOL(dma_unmap_sg); /** * dma_sync_sg_for_cpu * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices * @sg: list of buffers * @nents: number of buffers to map (returned from dma_map_sg) * @dir: DMA transfer direction (same as was passed to dma_map_sg) */ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { struct scatterlist *s; int i; for_each_sg(sg, s, nents, i) { if (!dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0, sg_dma_len(s), dir)) continue; __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir); } debug_dma_sync_sg_for_cpu(dev, sg, nents, dir); } EXPORT_SYMBOL(dma_sync_sg_for_cpu); /** * dma_sync_sg_for_device * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices * @sg: list of buffers * @nents: number of buffers to map (returned from dma_map_sg) * @dir: DMA transfer direction (same as was passed to dma_map_sg) */ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { struct scatterlist *s; int i; for_each_sg(sg, s, nents, i) { if (!dmabounce_sync_for_device(dev, sg_dma_address(s), 0, sg_dma_len(s), dir)) continue; __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); } debug_dma_sync_sg_for_device(dev, sg, nents, dir); } EXPORT_SYMBOL(dma_sync_sg_for_device); #define PREALLOC_DMA_DEBUG_ENTRIES 4096 static int __init dma_debug_do_init(void) { dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); return 0; } fs_initcall(dma_debug_do_init);
gpl-2.0
EPDCenter/android_kernel_archos_97b_Titan
arch/arm/mach-s5pc100/mach-smdkc100.c
1916
7152
/* linux/arch/arm/mach-s5pc100/mach-smdkc100.c * * Copyright 2009 Samsung Electronics Co. * Author: Byungho Min <bhmin@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/timer.h> #include <linux/init.h> #include <linux/serial_core.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/gpio.h> #include <linux/i2c.h> #include <linux/fb.h> #include <linux/delay.h> #include <linux/input.h> #include <linux/pwm_backlight.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <mach/map.h> #include <mach/regs-fb.h> #include <mach/regs-gpio.h> #include <video/platform_lcd.h> #include <asm/irq.h> #include <asm/mach-types.h> #include <plat/regs-serial.h> #include <plat/gpio-cfg.h> #include <plat/clock.h> #include <plat/devs.h> #include <plat/cpu.h> #include <plat/s5pc100.h> #include <plat/fb.h> #include <plat/iic.h> #include <plat/ata.h> #include <plat/adc.h> #include <plat/keypad.h> #include <plat/ts.h> #include <plat/audio.h> /* Following are default values for UCON, ULCON and UFCON UART registers */ #define SMDKC100_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \ S3C2410_UCON_RXILEVEL | \ S3C2410_UCON_TXIRQMODE | \ S3C2410_UCON_RXIRQMODE | \ S3C2410_UCON_RXFIFO_TOI | \ S3C2443_UCON_RXERR_IRQEN) #define SMDKC100_ULCON_DEFAULT S3C2410_LCON_CS8 #define SMDKC100_UFCON_DEFAULT (S3C2410_UFCON_FIFOMODE | \ S3C2440_UFCON_RXTRIG8 | \ S3C2440_UFCON_TXTRIG16) static struct s3c2410_uartcfg smdkc100_uartcfgs[] __initdata = { [0] = { .hwport = 0, .flags = 0, .ucon = SMDKC100_UCON_DEFAULT, .ulcon = SMDKC100_ULCON_DEFAULT, .ufcon = SMDKC100_UFCON_DEFAULT, }, [1] = { .hwport = 1, .flags = 0, .ucon = SMDKC100_UCON_DEFAULT, .ulcon = SMDKC100_ULCON_DEFAULT, .ufcon = SMDKC100_UFCON_DEFAULT, }, [2] = { .hwport = 2, .flags = 0, .ucon = SMDKC100_UCON_DEFAULT, .ulcon = SMDKC100_ULCON_DEFAULT, .ufcon = SMDKC100_UFCON_DEFAULT, }, [3] = { .hwport = 3, .flags = 0, .ucon = SMDKC100_UCON_DEFAULT, .ulcon = SMDKC100_ULCON_DEFAULT, .ufcon = SMDKC100_UFCON_DEFAULT, }, }; /* I2C0 */ static struct i2c_board_info i2c_devs0[] __initdata = { {I2C_BOARD_INFO("wm8580", 0x1b),}, }; /* I2C1 */ static struct i2c_board_info i2c_devs1[] __initdata = { }; /* LCD power controller */ static void smdkc100_lcd_power_set(struct plat_lcd_data *pd, unsigned int power) { if (power) { /* module reset */ gpio_direction_output(S5PC100_GPH0(6), 1); mdelay(100); gpio_direction_output(S5PC100_GPH0(6), 0); mdelay(10); gpio_direction_output(S5PC100_GPH0(6), 1); mdelay(10); } } static struct plat_lcd_data smdkc100_lcd_power_data = { .set_power = smdkc100_lcd_power_set, }; static struct platform_device smdkc100_lcd_powerdev = { .name = "platform-lcd", .dev.parent = &s3c_device_fb.dev, .dev.platform_data = &smdkc100_lcd_power_data, }; /* Frame Buffer */ static struct s3c_fb_pd_win smdkc100_fb_win0 = { /* this is to ensure we use win0 */ .win_mode = { .left_margin = 8, .right_margin = 13, .upper_margin = 7, .lower_margin = 5, .hsync_len = 3, .vsync_len = 1, .xres = 800, .yres = 480, .refresh = 80, }, .max_bpp = 32, .default_bpp = 16, }; static struct s3c_fb_platdata smdkc100_lcd_pdata __initdata = { .win[0] = &smdkc100_fb_win0, .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB, .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC, .setup_gpio = s5pc100_fb_gpio_setup_24bpp, }; static struct s3c_ide_platdata smdkc100_ide_pdata __initdata = { .setup_gpio = s5pc100_ide_setup_gpio, }; static uint32_t smdkc100_keymap[] __initdata = { /* KEY(row, col, keycode) */ KEY(0, 3, KEY_1), KEY(0, 4, KEY_2), KEY(0, 5, KEY_3), KEY(0, 6, KEY_4), KEY(0, 7, KEY_5), KEY(1, 3, KEY_A), KEY(1, 4, KEY_B), KEY(1, 5, KEY_C), KEY(1, 6, KEY_D), KEY(1, 7, KEY_E) }; static struct matrix_keymap_data smdkc100_keymap_data __initdata = { .keymap = smdkc100_keymap, .keymap_size = ARRAY_SIZE(smdkc100_keymap), }; static struct samsung_keypad_platdata smdkc100_keypad_data __initdata = { .keymap_data = &smdkc100_keymap_data, .rows = 2, .cols = 8, }; static int smdkc100_backlight_init(struct device *dev) { int ret; ret = gpio_request(S5PC100_GPD(0), "Backlight"); if (ret) { printk(KERN_ERR "failed to request GPF for PWM-OUT0\n"); return ret; } /* Configure GPIO pin with S5PC100_GPD_TOUT_0 */ s3c_gpio_cfgpin(S5PC100_GPD(0), S3C_GPIO_SFN(2)); return 0; } static void smdkc100_backlight_exit(struct device *dev) { s3c_gpio_cfgpin(S5PC100_GPD(0), S3C_GPIO_OUTPUT); gpio_free(S5PC100_GPD(0)); } static struct platform_pwm_backlight_data smdkc100_backlight_data = { .pwm_id = 0, .max_brightness = 255, .dft_brightness = 255, .pwm_period_ns = 78770, .init = smdkc100_backlight_init, .exit = smdkc100_backlight_exit, }; static struct platform_device smdkc100_backlight_device = { .name = "pwm-backlight", .dev = { .parent = &s3c_device_timer[0].dev, .platform_data = &smdkc100_backlight_data, }, }; static struct platform_device *smdkc100_devices[] __initdata = { &s3c_device_adc, &s3c_device_cfcon, &s3c_device_i2c0, &s3c_device_i2c1, &s3c_device_fb, &s3c_device_hsmmc0, &s3c_device_hsmmc1, &s3c_device_hsmmc2, &s3c_device_ts, &s3c_device_wdt, &smdkc100_lcd_powerdev, &samsung_asoc_dma, &s5pc100_device_iis0, &samsung_device_keypad, &s5pc100_device_ac97, &s3c_device_rtc, &s5p_device_fimc0, &s5p_device_fimc1, &s5p_device_fimc2, &s5pc100_device_spdif, &s3c_device_timer[0], &smdkc100_backlight_device, }; static struct s3c2410_ts_mach_info s3c_ts_platform __initdata = { .delay = 10000, .presc = 49, .oversampling_shift = 2, }; static void __init smdkc100_map_io(void) { s5p_init_io(NULL, 0, S5P_VA_CHIPID); s3c24xx_init_clocks(12000000); s3c24xx_init_uarts(smdkc100_uartcfgs, ARRAY_SIZE(smdkc100_uartcfgs)); } static void __init smdkc100_machine_init(void) { s3c24xx_ts_set_platdata(&s3c_ts_platform); /* I2C */ s3c_i2c0_set_platdata(NULL); s3c_i2c1_set_platdata(NULL); i2c_register_board_info(0, i2c_devs0, ARRAY_SIZE(i2c_devs0)); i2c_register_board_info(1, i2c_devs1, ARRAY_SIZE(i2c_devs1)); s3c_fb_set_platdata(&smdkc100_lcd_pdata); s3c_ide_set_platdata(&smdkc100_ide_pdata); samsung_keypad_set_platdata(&smdkc100_keypad_data); s5pc100_spdif_setup_gpio(S5PC100_SPDIF_GPD); /* LCD init */ gpio_request(S5PC100_GPH0(6), "GPH0"); smdkc100_lcd_power_set(&smdkc100_lcd_power_data, 0); platform_add_devices(smdkc100_devices, ARRAY_SIZE(smdkc100_devices)); } MACHINE_START(SMDKC100, "SMDKC100") /* Maintainer: Byungho Min <bhmin@samsung.com> */ .boot_params = S5P_PA_SDRAM + 0x100, .init_irq = s5pc100_init_irq, .map_io = smdkc100_map_io, .init_machine = smdkc100_machine_init, .timer = &s3c24xx_timer, MACHINE_END
gpl-2.0
gchild320/flounder
drivers/pinctrl/sh-pfc/pfc-sh7203.c
2172
45379
/* * SH7203 Pinmux * * Copyright (C) 2008 Magnus Damm * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/kernel.h> #include <linux/gpio.h> #include <cpu/sh7203.h> #include "sh_pfc.h" enum { PINMUX_RESERVED = 0, PINMUX_DATA_BEGIN, PA7_DATA, PA6_DATA, PA5_DATA, PA4_DATA, PA3_DATA, PA2_DATA, PA1_DATA, PA0_DATA, PB12_DATA, PB11_DATA, PB10_DATA, PB9_DATA, PB8_DATA, PB7_DATA, PB6_DATA, PB5_DATA, PB4_DATA, PB3_DATA, PB2_DATA, PB1_DATA, PB0_DATA, PC14_DATA, PC13_DATA, PC12_DATA, PC11_DATA, PC10_DATA, PC9_DATA, PC8_DATA, PC7_DATA, PC6_DATA, PC5_DATA, PC4_DATA, PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA, PD15_DATA, PD14_DATA, PD13_DATA, PD12_DATA, PD11_DATA, PD10_DATA, PD9_DATA, PD8_DATA, PD7_DATA, PD6_DATA, PD5_DATA, PD4_DATA, PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA, PE15_DATA, PE14_DATA, PE13_DATA, PE12_DATA, PE11_DATA, PE10_DATA, PE9_DATA, PE8_DATA, PE7_DATA, PE6_DATA, PE5_DATA, PE4_DATA, PE3_DATA, PE2_DATA, PE1_DATA, PE0_DATA, PF30_DATA, PF29_DATA, PF28_DATA, PF27_DATA, PF26_DATA, PF25_DATA, PF24_DATA, PF23_DATA, PF22_DATA, PF21_DATA, PF20_DATA, PF19_DATA, PF18_DATA, PF17_DATA, PF16_DATA, PF15_DATA, PF14_DATA, PF13_DATA, PF12_DATA, PF11_DATA, PF10_DATA, PF9_DATA, PF8_DATA, PF7_DATA, PF6_DATA, PF5_DATA, PF4_DATA, PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA, PINMUX_DATA_END, PINMUX_INPUT_BEGIN, FORCE_IN, PA7_IN, PA6_IN, PA5_IN, PA4_IN, PA3_IN, PA2_IN, PA1_IN, PA0_IN, PB11_IN, PB10_IN, PB9_IN, PB8_IN, PC14_IN, PC13_IN, PC12_IN, PC11_IN, PC10_IN, PC9_IN, PC8_IN, PC7_IN, PC6_IN, PC5_IN, PC4_IN, PC3_IN, PC2_IN, PC1_IN, PC0_IN, PD15_IN, PD14_IN, PD13_IN, PD12_IN, PD11_IN, PD10_IN, PD9_IN, PD8_IN, PD7_IN, PD6_IN, PD5_IN, PD4_IN, PD3_IN, PD2_IN, PD1_IN, PD0_IN, PE15_IN, PE14_IN, PE13_IN, PE12_IN, PE11_IN, PE10_IN, PE9_IN, PE8_IN, PE7_IN, PE6_IN, PE5_IN, PE4_IN, PE3_IN, PE2_IN, PE1_IN, PE0_IN, PF30_IN, PF29_IN, PF28_IN, PF27_IN, PF26_IN, PF25_IN, PF24_IN, PF23_IN, PF22_IN, PF21_IN, PF20_IN, PF19_IN, PF18_IN, PF17_IN, PF16_IN, PF15_IN, PF14_IN, PF13_IN, PF12_IN, PF11_IN, PF10_IN, PF9_IN, PF8_IN, PF7_IN, PF6_IN, PF5_IN, PF4_IN, PF3_IN, PF2_IN, PF1_IN, PF0_IN, PINMUX_INPUT_END, PINMUX_OUTPUT_BEGIN, FORCE_OUT, PB11_OUT, PB10_OUT, PB9_OUT, PB8_OUT, PC14_OUT, PC13_OUT, PC12_OUT, PC11_OUT, PC10_OUT, PC9_OUT, PC8_OUT, PC7_OUT, PC6_OUT, PC5_OUT, PC4_OUT, PC3_OUT, PC2_OUT, PC1_OUT, PC0_OUT, PD15_OUT, PD14_OUT, PD13_OUT, PD12_OUT, PD11_OUT, PD10_OUT, PD9_OUT, PD8_OUT, PD7_OUT, PD6_OUT, PD5_OUT, PD4_OUT, PD3_OUT, PD2_OUT, PD1_OUT, PD0_OUT, PE15_OUT, PE14_OUT, PE13_OUT, PE12_OUT, PE11_OUT, PE10_OUT, PE9_OUT, PE8_OUT, PE7_OUT, PE6_OUT, PE5_OUT, PE4_OUT, PE3_OUT, PE2_OUT, PE1_OUT, PE0_OUT, PF30_OUT, PF29_OUT, PF28_OUT, PF27_OUT, PF26_OUT, PF25_OUT, PF24_OUT, PF23_OUT, PF22_OUT, PF21_OUT, PF20_OUT, PF19_OUT, PF18_OUT, PF17_OUT, PF16_OUT, PF15_OUT, PF14_OUT, PF13_OUT, PF12_OUT, PF11_OUT, PF10_OUT, PF9_OUT, PF8_OUT, PF7_OUT, PF6_OUT, PF5_OUT, PF4_OUT, PF3_OUT, PF2_OUT, PF1_OUT, PF0_OUT, PINMUX_OUTPUT_END, PINMUX_FUNCTION_BEGIN, PB11_IOR_IN, PB11_IOR_OUT, PB10_IOR_IN, PB10_IOR_OUT, PB9_IOR_IN, PB9_IOR_OUT, PB8_IOR_IN, PB8_IOR_OUT, PB12MD_00, PB12MD_01, PB12MD_10, PB12MD_11, PB11MD_0, PB11MD_1, PB10MD_0, PB10MD_1, PB9MD_00, PB9MD_01, PB9MD_10, PB8MD_00, PB8MD_01, PB8MD_10, PB7MD_00, PB7MD_01, PB7MD_10, PB7MD_11, PB6MD_00, PB6MD_01, PB6MD_10, PB6MD_11, PB5MD_00, PB5MD_01, PB5MD_10, PB5MD_11, PB4MD_00, PB4MD_01, PB4MD_10, PB4MD_11, PB3MD_00, PB3MD_01, PB3MD_10, PB3MD_11, PB2MD_00, PB2MD_01, PB2MD_10, PB2MD_11, PB1MD_00, PB1MD_01, PB1MD_10, PB1MD_11, PB0MD_00, PB0MD_01, PB0MD_10, PB0MD_11, PB12IRQ_00, PB12IRQ_01, PB12IRQ_10, PC14MD_0, PC14MD_1, PC13MD_0, PC13MD_1, PC12MD_0, PC12MD_1, PC11MD_00, PC11MD_01, PC11MD_10, PC10MD_00, PC10MD_01, PC10MD_10, PC9MD_0, PC9MD_1, PC8MD_0, PC8MD_1, PC7MD_0, PC7MD_1, PC6MD_0, PC6MD_1, PC5MD_0, PC5MD_1, PC4MD_0, PC4MD_1, PC3MD_0, PC3MD_1, PC2MD_0, PC2MD_1, PC1MD_0, PC1MD_1, PC0MD_00, PC0MD_01, PC0MD_10, PD15MD_000, PD15MD_001, PD15MD_010, PD15MD_100, PD15MD_101, PD14MD_000, PD14MD_001, PD14MD_010, PD14MD_101, PD13MD_000, PD13MD_001, PD13MD_010, PD13MD_100, PD13MD_101, PD12MD_000, PD12MD_001, PD12MD_010, PD12MD_100, PD12MD_101, PD11MD_000, PD11MD_001, PD11MD_010, PD11MD_100, PD11MD_101, PD10MD_000, PD10MD_001, PD10MD_010, PD10MD_100, PD10MD_101, PD9MD_000, PD9MD_001, PD9MD_010, PD9MD_100, PD9MD_101, PD8MD_000, PD8MD_001, PD8MD_010, PD8MD_100, PD8MD_101, PD7MD_000, PD7MD_001, PD7MD_010, PD7MD_011, PD7MD_100, PD7MD_101, PD6MD_000, PD6MD_001, PD6MD_010, PD6MD_011, PD6MD_100, PD6MD_101, PD5MD_000, PD5MD_001, PD5MD_010, PD5MD_011, PD5MD_100, PD5MD_101, PD4MD_000, PD4MD_001, PD4MD_010, PD4MD_011, PD4MD_100, PD4MD_101, PD3MD_000, PD3MD_001, PD3MD_010, PD3MD_011, PD3MD_100, PD3MD_101, PD2MD_000, PD2MD_001, PD2MD_010, PD2MD_011, PD2MD_100, PD2MD_101, PD1MD_000, PD1MD_001, PD1MD_010, PD1MD_011, PD1MD_100, PD1MD_101, PD0MD_000, PD0MD_001, PD0MD_010, PD0MD_011, PD0MD_100, PD0MD_101, PE15MD_00, PE15MD_01, PE15MD_11, PE14MD_00, PE14MD_01, PE14MD_11, PE13MD_00, PE13MD_11, PE12MD_00, PE12MD_11, PE11MD_000, PE11MD_001, PE11MD_010, PE11MD_100, PE10MD_000, PE10MD_001, PE10MD_010, PE10MD_100, PE9MD_00, PE9MD_01, PE9MD_10, PE9MD_11, PE8MD_00, PE8MD_01, PE8MD_10, PE8MD_11, PE7MD_000, PE7MD_001, PE7MD_010, PE7MD_011, PE7MD_100, PE6MD_000, PE6MD_001, PE6MD_010, PE6MD_011, PE6MD_100, PE5MD_000, PE5MD_001, PE5MD_010, PE5MD_011, PE5MD_100, PE4MD_000, PE4MD_001, PE4MD_010, PE4MD_011, PE4MD_100, PE3MD_00, PE3MD_01, PE3MD_11, PE2MD_00, PE2MD_01, PE2MD_11, PE1MD_00, PE1MD_01, PE1MD_10, PE1MD_11, PE0MD_000, PE0MD_001, PE0MD_011, PE0MD_100, PF30MD_0, PF30MD_1, PF29MD_0, PF29MD_1, PF28MD_0, PF28MD_1, PF27MD_0, PF27MD_1, PF26MD_0, PF26MD_1, PF25MD_0, PF25MD_1, PF24MD_0, PF24MD_1, PF23MD_00, PF23MD_01, PF23MD_10, PF22MD_00, PF22MD_01, PF22MD_10, PF21MD_00, PF21MD_01, PF21MD_10, PF20MD_00, PF20MD_01, PF20MD_10, PF19MD_00, PF19MD_01, PF19MD_10, PF18MD_00, PF18MD_01, PF18MD_10, PF17MD_00, PF17MD_01, PF17MD_10, PF16MD_00, PF16MD_01, PF16MD_10, PF15MD_00, PF15MD_01, PF15MD_10, PF14MD_00, PF14MD_01, PF14MD_10, PF13MD_00, PF13MD_01, PF13MD_10, PF12MD_00, PF12MD_01, PF12MD_10, PF11MD_00, PF11MD_01, PF11MD_10, PF10MD_00, PF10MD_01, PF10MD_10, PF9MD_00, PF9MD_01, PF9MD_10, PF8MD_00, PF8MD_01, PF8MD_10, PF7MD_00, PF7MD_01, PF7MD_10, PF7MD_11, PF6MD_00, PF6MD_01, PF6MD_10, PF6MD_11, PF5MD_00, PF5MD_01, PF5MD_10, PF5MD_11, PF4MD_00, PF4MD_01, PF4MD_10, PF4MD_11, PF3MD_00, PF3MD_01, PF3MD_10, PF3MD_11, PF2MD_00, PF2MD_01, PF2MD_10, PF2MD_11, PF1MD_00, PF1MD_01, PF1MD_10, PF1MD_11, PF0MD_00, PF0MD_01, PF0MD_10, PF0MD_11, PINMUX_FUNCTION_END, PINMUX_MARK_BEGIN, PINT7_PB_MARK, PINT6_PB_MARK, PINT5_PB_MARK, PINT4_PB_MARK, PINT3_PB_MARK, PINT2_PB_MARK, PINT1_PB_MARK, PINT0_PB_MARK, PINT7_PD_MARK, PINT6_PD_MARK, PINT5_PD_MARK, PINT4_PD_MARK, PINT3_PD_MARK, PINT2_PD_MARK, PINT1_PD_MARK, PINT0_PD_MARK, IRQ7_PB_MARK, IRQ6_PB_MARK, IRQ5_PB_MARK, IRQ4_PB_MARK, IRQ3_PB_MARK, IRQ2_PB_MARK, IRQ1_PB_MARK, IRQ0_PB_MARK, IRQ7_PD_MARK, IRQ6_PD_MARK, IRQ5_PD_MARK, IRQ4_PD_MARK, IRQ3_PD_MARK, IRQ2_PD_MARK, IRQ1_PD_MARK, IRQ0_PD_MARK, IRQ7_PE_MARK, IRQ6_PE_MARK, IRQ5_PE_MARK, IRQ4_PE_MARK, IRQ3_PE_MARK, IRQ2_PE_MARK, IRQ1_PE_MARK, IRQ0_PE_MARK, WDTOVF_MARK, IRQOUT_MARK, REFOUT_MARK, IRQOUT_REFOUT_MARK, UBCTRG_MARK, CTX1_MARK, CRX1_MARK, CTX0_MARK, CTX0_CTX1_MARK, CRX0_MARK, CRX0_CRX1_MARK, SDA3_MARK, SCL3_MARK, SDA2_MARK, SCL2_MARK, SDA1_MARK, SCL1_MARK, SDA0_MARK, SCL0_MARK, TEND0_PD_MARK, TEND0_PE_MARK, DACK0_PD_MARK, DACK0_PE_MARK, DREQ0_PD_MARK, DREQ0_PE_MARK, TEND1_PD_MARK, TEND1_PE_MARK, DACK1_PD_MARK, DACK1_PE_MARK, DREQ1_PD_MARK, DREQ1_PE_MARK, DACK2_MARK, DREQ2_MARK, DACK3_MARK, DREQ3_MARK, ADTRG_PD_MARK, ADTRG_PE_MARK, D31_MARK, D30_MARK, D29_MARK, D28_MARK, D27_MARK, D26_MARK, D25_MARK, D24_MARK, D23_MARK, D22_MARK, D21_MARK, D20_MARK, D19_MARK, D18_MARK, D17_MARK, D16_MARK, A25_MARK, A24_MARK, A23_MARK, A22_MARK, A21_MARK, CS4_MARK, MRES_MARK, BS_MARK, IOIS16_MARK, CS1_MARK, CS6_CE1B_MARK, CE2B_MARK, CS5_CE1A_MARK, CE2A_MARK, FRAME_MARK, WAIT_MARK, RDWR_MARK, CKE_MARK, CASU_MARK, BREQ_MARK, RASU_MARK, BACK_MARK, CASL_MARK, RASL_MARK, WE3_DQMUU_AH_ICIO_WR_MARK, WE2_DQMUL_ICIORD_MARK, WE1_DQMLU_WE_MARK, WE0_DQMLL_MARK, CS3_MARK, CS2_MARK, A1_MARK, A0_MARK, CS7_MARK, TIOC4D_MARK, TIOC4C_MARK, TIOC4B_MARK, TIOC4A_MARK, TIOC3D_MARK, TIOC3C_MARK, TIOC3B_MARK, TIOC3A_MARK, TIOC2B_MARK, TIOC1B_MARK, TIOC2A_MARK, TIOC1A_MARK, TIOC0D_MARK, TIOC0C_MARK, TIOC0B_MARK, TIOC0A_MARK, TCLKD_PD_MARK, TCLKC_PD_MARK, TCLKB_PD_MARK, TCLKA_PD_MARK, TCLKD_PF_MARK, TCLKC_PF_MARK, TCLKB_PF_MARK, TCLKA_PF_MARK, SCS0_PD_MARK, SSO0_PD_MARK, SSI0_PD_MARK, SSCK0_PD_MARK, SCS0_PF_MARK, SSO0_PF_MARK, SSI0_PF_MARK, SSCK0_PF_MARK, SCS1_PD_MARK, SSO1_PD_MARK, SSI1_PD_MARK, SSCK1_PD_MARK, SCS1_PF_MARK, SSO1_PF_MARK, SSI1_PF_MARK, SSCK1_PF_MARK, TXD0_MARK, RXD0_MARK, SCK0_MARK, TXD1_MARK, RXD1_MARK, SCK1_MARK, TXD2_MARK, RXD2_MARK, SCK2_MARK, RTS3_MARK, CTS3_MARK, TXD3_MARK, RXD3_MARK, SCK3_MARK, AUDIO_CLK_MARK, SSIDATA3_MARK, SSIWS3_MARK, SSISCK3_MARK, SSIDATA2_MARK, SSIWS2_MARK, SSISCK2_MARK, SSIDATA1_MARK, SSIWS1_MARK, SSISCK1_MARK, SSIDATA0_MARK, SSIWS0_MARK, SSISCK0_MARK, FCE_MARK, FRB_MARK, NAF7_MARK, NAF6_MARK, NAF5_MARK, NAF4_MARK, NAF3_MARK, NAF2_MARK, NAF1_MARK, NAF0_MARK, FSC_MARK, FOE_MARK, FCDE_MARK, FWE_MARK, LCD_VEPWC_MARK, LCD_VCPWC_MARK, LCD_CLK_MARK, LCD_FLM_MARK, LCD_M_DISP_MARK, LCD_CL2_MARK, LCD_CL1_MARK, LCD_DON_MARK, LCD_DATA15_MARK, LCD_DATA14_MARK, LCD_DATA13_MARK, LCD_DATA12_MARK, LCD_DATA11_MARK, LCD_DATA10_MARK, LCD_DATA9_MARK, LCD_DATA8_MARK, LCD_DATA7_MARK, LCD_DATA6_MARK, LCD_DATA5_MARK, LCD_DATA4_MARK, LCD_DATA3_MARK, LCD_DATA2_MARK, LCD_DATA1_MARK, LCD_DATA0_MARK, PINMUX_MARK_END, }; static const pinmux_enum_t pinmux_data[] = { /* PA */ PINMUX_DATA(PA7_DATA, PA7_IN), PINMUX_DATA(PA6_DATA, PA6_IN), PINMUX_DATA(PA5_DATA, PA5_IN), PINMUX_DATA(PA4_DATA, PA4_IN), PINMUX_DATA(PA3_DATA, PA3_IN), PINMUX_DATA(PA2_DATA, PA2_IN), PINMUX_DATA(PA1_DATA, PA1_IN), PINMUX_DATA(PA0_DATA, PA0_IN), /* PB */ PINMUX_DATA(PB12_DATA, PB12MD_00, FORCE_OUT), PINMUX_DATA(WDTOVF_MARK, PB12MD_01), PINMUX_DATA(IRQOUT_MARK, PB12MD_10, PB12IRQ_00), PINMUX_DATA(REFOUT_MARK, PB12MD_10, PB12IRQ_01), PINMUX_DATA(IRQOUT_REFOUT_MARK, PB12MD_10, PB12IRQ_10), PINMUX_DATA(UBCTRG_MARK, PB12MD_11), PINMUX_DATA(PB11_DATA, PB11MD_0, PB11_IN, PB11_OUT), PINMUX_DATA(CTX1_MARK, PB11MD_1), PINMUX_DATA(PB10_DATA, PB10MD_0, PB10_IN, PB10_OUT), PINMUX_DATA(CRX1_MARK, PB10MD_1), PINMUX_DATA(PB9_DATA, PB9MD_00, PB9_IN, PB9_OUT), PINMUX_DATA(CTX0_MARK, PB9MD_01), PINMUX_DATA(CTX0_CTX1_MARK, PB9MD_10), PINMUX_DATA(PB8_DATA, PB8MD_00, PB8_IN, PB8_OUT), PINMUX_DATA(CRX0_MARK, PB8MD_01), PINMUX_DATA(CRX0_CRX1_MARK, PB8MD_10), PINMUX_DATA(PB7_DATA, PB7MD_00, FORCE_IN), PINMUX_DATA(SDA3_MARK, PB7MD_01), PINMUX_DATA(PINT7_PB_MARK, PB7MD_10), PINMUX_DATA(IRQ7_PB_MARK, PB7MD_11), PINMUX_DATA(PB6_DATA, PB6MD_00, FORCE_IN), PINMUX_DATA(SCL3_MARK, PB6MD_01), PINMUX_DATA(PINT6_PB_MARK, PB6MD_10), PINMUX_DATA(IRQ6_PB_MARK, PB6MD_11), PINMUX_DATA(PB5_DATA, PB5MD_00, FORCE_IN), PINMUX_DATA(SDA2_MARK, PB6MD_01), PINMUX_DATA(PINT5_PB_MARK, PB6MD_10), PINMUX_DATA(IRQ5_PB_MARK, PB6MD_11), PINMUX_DATA(PB4_DATA, PB4MD_00, FORCE_IN), PINMUX_DATA(SCL2_MARK, PB4MD_01), PINMUX_DATA(PINT4_PB_MARK, PB4MD_10), PINMUX_DATA(IRQ4_PB_MARK, PB4MD_11), PINMUX_DATA(PB3_DATA, PB3MD_00, FORCE_IN), PINMUX_DATA(SDA1_MARK, PB3MD_01), PINMUX_DATA(PINT3_PB_MARK, PB3MD_10), PINMUX_DATA(IRQ3_PB_MARK, PB3MD_11), PINMUX_DATA(PB2_DATA, PB2MD_00, FORCE_IN), PINMUX_DATA(SCL1_MARK, PB2MD_01), PINMUX_DATA(PINT2_PB_MARK, PB2MD_10), PINMUX_DATA(IRQ2_PB_MARK, PB2MD_11), PINMUX_DATA(PB1_DATA, PB1MD_00, FORCE_IN), PINMUX_DATA(SDA0_MARK, PB1MD_01), PINMUX_DATA(PINT1_PB_MARK, PB1MD_10), PINMUX_DATA(IRQ1_PB_MARK, PB1MD_11), PINMUX_DATA(PB0_DATA, PB0MD_00, FORCE_IN), PINMUX_DATA(SCL0_MARK, PB0MD_01), PINMUX_DATA(PINT0_PB_MARK, PB0MD_10), PINMUX_DATA(IRQ0_PB_MARK, PB0MD_11), /* PC */ PINMUX_DATA(PC14_DATA, PC14MD_0, PC14_IN, PC14_OUT), PINMUX_DATA(WAIT_MARK, PC14MD_1), PINMUX_DATA(PC13_DATA, PC13MD_0, PC13_IN, PC13_OUT), PINMUX_DATA(RDWR_MARK, PC13MD_1), PINMUX_DATA(PC12_DATA, PC12MD_0, PC12_IN, PC12_OUT), PINMUX_DATA(CKE_MARK, PC12MD_1), PINMUX_DATA(PC11_DATA, PC11MD_00, PC11_IN, PC11_OUT), PINMUX_DATA(CASU_MARK, PC11MD_01), PINMUX_DATA(BREQ_MARK, PC11MD_10), PINMUX_DATA(PC10_DATA, PC10MD_00, PC10_IN, PC10_OUT), PINMUX_DATA(RASU_MARK, PC10MD_01), PINMUX_DATA(BACK_MARK, PC10MD_10), PINMUX_DATA(PC9_DATA, PC9MD_0, PC9_IN, PC9_OUT), PINMUX_DATA(CASL_MARK, PC9MD_1), PINMUX_DATA(PC8_DATA, PC8MD_0, PC8_IN, PC8_OUT), PINMUX_DATA(RASL_MARK, PC8MD_1), PINMUX_DATA(PC7_DATA, PC7MD_0, PC7_IN, PC7_OUT), PINMUX_DATA(WE3_DQMUU_AH_ICIO_WR_MARK, PC7MD_1), PINMUX_DATA(PC6_DATA, PC6MD_0, PC6_IN, PC6_OUT), PINMUX_DATA(WE2_DQMUL_ICIORD_MARK, PC6MD_1), PINMUX_DATA(PC5_DATA, PC5MD_0, PC5_IN, PC5_OUT), PINMUX_DATA(WE1_DQMLU_WE_MARK, PC5MD_1), PINMUX_DATA(PC4_DATA, PC4MD_0, PC4_IN, PC4_OUT), PINMUX_DATA(WE0_DQMLL_MARK, PC4MD_1), PINMUX_DATA(PC3_DATA, PC3MD_0, PC3_IN, PC3_OUT), PINMUX_DATA(CS3_MARK, PC3MD_1), PINMUX_DATA(PC2_DATA, PC2MD_0, PC2_IN, PC2_OUT), PINMUX_DATA(CS2_MARK, PC2MD_1), PINMUX_DATA(PC1_DATA, PC1MD_0, PC1_IN, PC1_OUT), PINMUX_DATA(A1_MARK, PC1MD_1), PINMUX_DATA(PC0_DATA, PC0MD_00, PC0_IN, PC0_OUT), PINMUX_DATA(A0_MARK, PC0MD_01), PINMUX_DATA(CS7_MARK, PC0MD_10), /* PD */ PINMUX_DATA(PD15_DATA, PD15MD_000, PD15_IN, PD15_OUT), PINMUX_DATA(D31_MARK, PD15MD_001), PINMUX_DATA(PINT7_PD_MARK, PD15MD_010), PINMUX_DATA(ADTRG_PD_MARK, PD15MD_100), PINMUX_DATA(TIOC4D_MARK, PD15MD_101), PINMUX_DATA(PD14_DATA, PD14MD_000, PD14_IN, PD14_OUT), PINMUX_DATA(D30_MARK, PD14MD_001), PINMUX_DATA(PINT6_PD_MARK, PD14MD_010), PINMUX_DATA(TIOC4C_MARK, PD14MD_101), PINMUX_DATA(PD13_DATA, PD13MD_000, PD13_IN, PD13_OUT), PINMUX_DATA(D29_MARK, PD13MD_001), PINMUX_DATA(PINT5_PD_MARK, PD13MD_010), PINMUX_DATA(TEND1_PD_MARK, PD13MD_100), PINMUX_DATA(TIOC4B_MARK, PD13MD_101), PINMUX_DATA(PD12_DATA, PD12MD_000, PD12_IN, PD12_OUT), PINMUX_DATA(D28_MARK, PD12MD_001), PINMUX_DATA(PINT4_PD_MARK, PD12MD_010), PINMUX_DATA(DACK1_PD_MARK, PD12MD_100), PINMUX_DATA(TIOC4A_MARK, PD12MD_101), PINMUX_DATA(PD11_DATA, PD11MD_000, PD11_IN, PD11_OUT), PINMUX_DATA(D27_MARK, PD11MD_001), PINMUX_DATA(PINT3_PD_MARK, PD11MD_010), PINMUX_DATA(DREQ1_PD_MARK, PD11MD_100), PINMUX_DATA(TIOC3D_MARK, PD11MD_101), PINMUX_DATA(PD10_DATA, PD10MD_000, PD10_IN, PD10_OUT), PINMUX_DATA(D26_MARK, PD10MD_001), PINMUX_DATA(PINT2_PD_MARK, PD10MD_010), PINMUX_DATA(TEND0_PD_MARK, PD10MD_100), PINMUX_DATA(TIOC3C_MARK, PD10MD_101), PINMUX_DATA(PD9_DATA, PD9MD_000, PD9_IN, PD9_OUT), PINMUX_DATA(D25_MARK, PD9MD_001), PINMUX_DATA(PINT1_PD_MARK, PD9MD_010), PINMUX_DATA(DACK0_PD_MARK, PD9MD_100), PINMUX_DATA(TIOC3B_MARK, PD9MD_101), PINMUX_DATA(PD8_DATA, PD8MD_000, PD8_IN, PD8_OUT), PINMUX_DATA(D24_MARK, PD8MD_001), PINMUX_DATA(PINT0_PD_MARK, PD8MD_010), PINMUX_DATA(DREQ0_PD_MARK, PD8MD_100), PINMUX_DATA(TIOC3A_MARK, PD8MD_101), PINMUX_DATA(PD7_DATA, PD7MD_000, PD7_IN, PD7_OUT), PINMUX_DATA(D23_MARK, PD7MD_001), PINMUX_DATA(IRQ7_PD_MARK, PD7MD_010), PINMUX_DATA(SCS1_PD_MARK, PD7MD_011), PINMUX_DATA(TCLKD_PD_MARK, PD7MD_100), PINMUX_DATA(TIOC2B_MARK, PD7MD_101), PINMUX_DATA(PD6_DATA, PD6MD_000, PD6_IN, PD6_OUT), PINMUX_DATA(D22_MARK, PD6MD_001), PINMUX_DATA(IRQ6_PD_MARK, PD6MD_010), PINMUX_DATA(SSO1_PD_MARK, PD6MD_011), PINMUX_DATA(TCLKC_PD_MARK, PD6MD_100), PINMUX_DATA(TIOC2A_MARK, PD6MD_101), PINMUX_DATA(PD5_DATA, PD5MD_000, PD5_IN, PD5_OUT), PINMUX_DATA(D21_MARK, PD5MD_001), PINMUX_DATA(IRQ5_PD_MARK, PD5MD_010), PINMUX_DATA(SSI1_PD_MARK, PD5MD_011), PINMUX_DATA(TCLKB_PD_MARK, PD5MD_100), PINMUX_DATA(TIOC1B_MARK, PD5MD_101), PINMUX_DATA(PD4_DATA, PD4MD_000, PD4_IN, PD4_OUT), PINMUX_DATA(D20_MARK, PD4MD_001), PINMUX_DATA(IRQ4_PD_MARK, PD4MD_010), PINMUX_DATA(SSCK1_PD_MARK, PD4MD_011), PINMUX_DATA(TCLKA_PD_MARK, PD4MD_100), PINMUX_DATA(TIOC1A_MARK, PD4MD_101), PINMUX_DATA(PD3_DATA, PD3MD_000, PD3_IN, PD3_OUT), PINMUX_DATA(D19_MARK, PD3MD_001), PINMUX_DATA(IRQ3_PD_MARK, PD3MD_010), PINMUX_DATA(SCS0_PD_MARK, PD3MD_011), PINMUX_DATA(DACK3_MARK, PD3MD_100), PINMUX_DATA(TIOC0D_MARK, PD3MD_101), PINMUX_DATA(PD2_DATA, PD2MD_000, PD2_IN, PD2_OUT), PINMUX_DATA(D18_MARK, PD2MD_001), PINMUX_DATA(IRQ2_PD_MARK, PD2MD_010), PINMUX_DATA(SSO0_PD_MARK, PD2MD_011), PINMUX_DATA(DREQ3_MARK, PD2MD_100), PINMUX_DATA(TIOC0C_MARK, PD2MD_101), PINMUX_DATA(PD1_DATA, PD1MD_000, PD1_IN, PD1_OUT), PINMUX_DATA(D17_MARK, PD1MD_001), PINMUX_DATA(IRQ1_PD_MARK, PD1MD_010), PINMUX_DATA(SSI0_PD_MARK, PD1MD_011), PINMUX_DATA(DACK2_MARK, PD1MD_100), PINMUX_DATA(TIOC0B_MARK, PD1MD_101), PINMUX_DATA(PD0_DATA, PD0MD_000, PD0_IN, PD0_OUT), PINMUX_DATA(D16_MARK, PD0MD_001), PINMUX_DATA(IRQ0_PD_MARK, PD0MD_010), PINMUX_DATA(SSCK0_PD_MARK, PD0MD_011), PINMUX_DATA(DREQ2_MARK, PD0MD_100), PINMUX_DATA(TIOC0A_MARK, PD0MD_101), /* PE */ PINMUX_DATA(PE15_DATA, PE15MD_00, PE15_IN, PE15_OUT), PINMUX_DATA(IOIS16_MARK, PE15MD_01), PINMUX_DATA(RTS3_MARK, PE15MD_11), PINMUX_DATA(PE14_DATA, PE14MD_00, PE14_IN, PE14_OUT), PINMUX_DATA(CS1_MARK, PE14MD_01), PINMUX_DATA(CTS3_MARK, PE14MD_11), PINMUX_DATA(PE13_DATA, PE13MD_00, PE13_IN, PE13_OUT), PINMUX_DATA(TXD3_MARK, PE13MD_11), PINMUX_DATA(PE12_DATA, PE12MD_00, PE12_IN, PE12_OUT), PINMUX_DATA(RXD3_MARK, PE12MD_11), PINMUX_DATA(PE11_DATA, PE11MD_000, PE11_IN, PE11_OUT), PINMUX_DATA(CS6_CE1B_MARK, PE11MD_001), PINMUX_DATA(IRQ7_PE_MARK, PE11MD_010), PINMUX_DATA(TEND1_PE_MARK, PE11MD_100), PINMUX_DATA(PE10_DATA, PE10MD_000, PE10_IN, PE10_OUT), PINMUX_DATA(CE2B_MARK, PE10MD_001), PINMUX_DATA(IRQ6_PE_MARK, PE10MD_010), PINMUX_DATA(TEND0_PE_MARK, PE10MD_100), PINMUX_DATA(PE9_DATA, PE9MD_00, PE9_IN, PE9_OUT), PINMUX_DATA(CS5_CE1A_MARK, PE9MD_01), PINMUX_DATA(IRQ5_PE_MARK, PE9MD_10), PINMUX_DATA(SCK3_MARK, PE9MD_11), PINMUX_DATA(PE8_DATA, PE8MD_00, PE8_IN, PE8_OUT), PINMUX_DATA(CE2A_MARK, PE8MD_01), PINMUX_DATA(IRQ4_PE_MARK, PE8MD_10), PINMUX_DATA(SCK2_MARK, PE8MD_11), PINMUX_DATA(PE7_DATA, PE7MD_000, PE7_IN, PE7_OUT), PINMUX_DATA(FRAME_MARK, PE7MD_001), PINMUX_DATA(IRQ3_PE_MARK, PE7MD_010), PINMUX_DATA(TXD2_MARK, PE7MD_011), PINMUX_DATA(DACK1_PE_MARK, PE7MD_100), PINMUX_DATA(PE6_DATA, PE6MD_000, PE6_IN, PE6_OUT), PINMUX_DATA(A25_MARK, PE6MD_001), PINMUX_DATA(IRQ2_PE_MARK, PE6MD_010), PINMUX_DATA(RXD2_MARK, PE6MD_011), PINMUX_DATA(DREQ1_PE_MARK, PE6MD_100), PINMUX_DATA(PE5_DATA, PE5MD_000, PE5_IN, PE5_OUT), PINMUX_DATA(A24_MARK, PE5MD_001), PINMUX_DATA(IRQ1_PE_MARK, PE5MD_010), PINMUX_DATA(TXD1_MARK, PE5MD_011), PINMUX_DATA(DACK0_PE_MARK, PE5MD_100), PINMUX_DATA(PE4_DATA, PE4MD_000, PE4_IN, PE4_OUT), PINMUX_DATA(A23_MARK, PE4MD_001), PINMUX_DATA(IRQ0_PE_MARK, PE4MD_010), PINMUX_DATA(RXD1_MARK, PE4MD_011), PINMUX_DATA(DREQ0_PE_MARK, PE4MD_100), PINMUX_DATA(PE3_DATA, PE3MD_00, PE3_IN, PE3_OUT), PINMUX_DATA(A22_MARK, PE3MD_01), PINMUX_DATA(SCK1_MARK, PE3MD_11), PINMUX_DATA(PE2_DATA, PE2MD_00, PE2_IN, PE2_OUT), PINMUX_DATA(A21_MARK, PE2MD_01), PINMUX_DATA(SCK0_MARK, PE2MD_11), PINMUX_DATA(PE1_DATA, PE1MD_00, PE1_IN, PE1_OUT), PINMUX_DATA(CS4_MARK, PE1MD_01), PINMUX_DATA(MRES_MARK, PE1MD_10), PINMUX_DATA(TXD0_MARK, PE1MD_11), PINMUX_DATA(PE0_DATA, PE0MD_000, PE0_IN, PE0_OUT), PINMUX_DATA(BS_MARK, PE0MD_001), PINMUX_DATA(RXD0_MARK, PE0MD_011), PINMUX_DATA(ADTRG_PE_MARK, PE0MD_100), /* PF */ PINMUX_DATA(PF30_DATA, PF30MD_0, PF30_IN, PF30_OUT), PINMUX_DATA(AUDIO_CLK_MARK, PF30MD_1), PINMUX_DATA(PF29_DATA, PF29MD_0, PF29_IN, PF29_OUT), PINMUX_DATA(SSIDATA3_MARK, PF29MD_1), PINMUX_DATA(PF28_DATA, PF28MD_0, PF28_IN, PF28_OUT), PINMUX_DATA(SSIWS3_MARK, PF28MD_1), PINMUX_DATA(PF27_DATA, PF27MD_0, PF27_IN, PF27_OUT), PINMUX_DATA(SSISCK3_MARK, PF27MD_1), PINMUX_DATA(PF26_DATA, PF26MD_0, PF26_IN, PF26_OUT), PINMUX_DATA(SSIDATA2_MARK, PF26MD_1), PINMUX_DATA(PF25_DATA, PF25MD_0, PF25_IN, PF25_OUT), PINMUX_DATA(SSIWS2_MARK, PF25MD_1), PINMUX_DATA(PF24_DATA, PF24MD_0, PF24_IN, PF24_OUT), PINMUX_DATA(SSISCK2_MARK, PF24MD_1), PINMUX_DATA(PF23_DATA, PF23MD_00, PF23_IN, PF23_OUT), PINMUX_DATA(SSIDATA1_MARK, PF23MD_01), PINMUX_DATA(LCD_VEPWC_MARK, PF23MD_10), PINMUX_DATA(PF22_DATA, PF22MD_00, PF22_IN, PF22_OUT), PINMUX_DATA(SSIWS1_MARK, PF22MD_01), PINMUX_DATA(LCD_VCPWC_MARK, PF22MD_10), PINMUX_DATA(PF21_DATA, PF21MD_00, PF21_IN, PF21_OUT), PINMUX_DATA(SSISCK1_MARK, PF21MD_01), PINMUX_DATA(LCD_CLK_MARK, PF21MD_10), PINMUX_DATA(PF20_DATA, PF20MD_00, PF20_IN, PF20_OUT), PINMUX_DATA(SSIDATA0_MARK, PF20MD_01), PINMUX_DATA(LCD_FLM_MARK, PF20MD_10), PINMUX_DATA(PF19_DATA, PF19MD_00, PF19_IN, PF19_OUT), PINMUX_DATA(SSIWS0_MARK, PF19MD_01), PINMUX_DATA(LCD_M_DISP_MARK, PF19MD_10), PINMUX_DATA(PF18_DATA, PF18MD_00, PF18_IN, PF18_OUT), PINMUX_DATA(SSISCK0_MARK, PF18MD_01), PINMUX_DATA(LCD_CL2_MARK, PF18MD_10), PINMUX_DATA(PF17_DATA, PF17MD_00, PF17_IN, PF17_OUT), PINMUX_DATA(FCE_MARK, PF17MD_01), PINMUX_DATA(LCD_CL1_MARK, PF17MD_10), PINMUX_DATA(PF16_DATA, PF16MD_00, PF16_IN, PF16_OUT), PINMUX_DATA(FRB_MARK, PF16MD_01), PINMUX_DATA(LCD_DON_MARK, PF16MD_10), PINMUX_DATA(PF15_DATA, PF15MD_00, PF15_IN, PF15_OUT), PINMUX_DATA(NAF7_MARK, PF15MD_01), PINMUX_DATA(LCD_DATA15_MARK, PF15MD_10), PINMUX_DATA(PF14_DATA, PF14MD_00, PF14_IN, PF14_OUT), PINMUX_DATA(NAF6_MARK, PF14MD_01), PINMUX_DATA(LCD_DATA14_MARK, PF14MD_10), PINMUX_DATA(PF13_DATA, PF13MD_00, PF13_IN, PF13_OUT), PINMUX_DATA(NAF5_MARK, PF13MD_01), PINMUX_DATA(LCD_DATA13_MARK, PF13MD_10), PINMUX_DATA(PF12_DATA, PF12MD_00, PF12_IN, PF12_OUT), PINMUX_DATA(NAF4_MARK, PF12MD_01), PINMUX_DATA(LCD_DATA12_MARK, PF12MD_10), PINMUX_DATA(PF11_DATA, PF11MD_00, PF11_IN, PF11_OUT), PINMUX_DATA(NAF3_MARK, PF11MD_01), PINMUX_DATA(LCD_DATA11_MARK, PF11MD_10), PINMUX_DATA(PF10_DATA, PF10MD_00, PF10_IN, PF10_OUT), PINMUX_DATA(NAF2_MARK, PF10MD_01), PINMUX_DATA(LCD_DATA10_MARK, PF10MD_10), PINMUX_DATA(PF9_DATA, PF9MD_00, PF9_IN, PF9_OUT), PINMUX_DATA(NAF1_MARK, PF9MD_01), PINMUX_DATA(LCD_DATA9_MARK, PF9MD_10), PINMUX_DATA(PF8_DATA, PF8MD_00, PF8_IN, PF8_OUT), PINMUX_DATA(NAF0_MARK, PF8MD_01), PINMUX_DATA(LCD_DATA8_MARK, PF8MD_10), PINMUX_DATA(PF7_DATA, PF7MD_00, PF7_IN, PF7_OUT), PINMUX_DATA(FSC_MARK, PF7MD_01), PINMUX_DATA(LCD_DATA7_MARK, PF7MD_10), PINMUX_DATA(SCS1_PF_MARK, PF7MD_11), PINMUX_DATA(PF6_DATA, PF6MD_00, PF6_IN, PF6_OUT), PINMUX_DATA(FOE_MARK, PF6MD_01), PINMUX_DATA(LCD_DATA6_MARK, PF6MD_10), PINMUX_DATA(SSO1_PF_MARK, PF6MD_11), PINMUX_DATA(PF5_DATA, PF5MD_00, PF5_IN, PF5_OUT), PINMUX_DATA(FCDE_MARK, PF5MD_01), PINMUX_DATA(LCD_DATA5_MARK, PF5MD_10), PINMUX_DATA(SSI1_PF_MARK, PF5MD_11), PINMUX_DATA(PF4_DATA, PF4MD_00, PF4_IN, PF4_OUT), PINMUX_DATA(FWE_MARK, PF4MD_01), PINMUX_DATA(LCD_DATA4_MARK, PF4MD_10), PINMUX_DATA(SSCK1_PF_MARK, PF4MD_11), PINMUX_DATA(PF3_DATA, PF3MD_00, PF3_IN, PF3_OUT), PINMUX_DATA(TCLKD_PF_MARK, PF3MD_01), PINMUX_DATA(LCD_DATA3_MARK, PF3MD_10), PINMUX_DATA(SCS0_PF_MARK, PF3MD_11), PINMUX_DATA(PF2_DATA, PF2MD_00, PF2_IN, PF2_OUT), PINMUX_DATA(TCLKC_PF_MARK, PF2MD_01), PINMUX_DATA(LCD_DATA2_MARK, PF2MD_10), PINMUX_DATA(SSO0_PF_MARK, PF2MD_11), PINMUX_DATA(PF1_DATA, PF1MD_00, PF1_IN, PF1_OUT), PINMUX_DATA(TCLKB_PF_MARK, PF1MD_01), PINMUX_DATA(LCD_DATA1_MARK, PF1MD_10), PINMUX_DATA(SSI0_PF_MARK, PF1MD_11), PINMUX_DATA(PF0_DATA, PF0MD_00, PF0_IN, PF0_OUT), PINMUX_DATA(TCLKA_PF_MARK, PF0MD_01), PINMUX_DATA(LCD_DATA0_MARK, PF0MD_10), PINMUX_DATA(SSCK0_PF_MARK, PF0MD_11), }; static struct sh_pfc_pin pinmux_pins[] = { /* PA */ PINMUX_GPIO(GPIO_PA7, PA7_DATA), PINMUX_GPIO(GPIO_PA6, PA6_DATA), PINMUX_GPIO(GPIO_PA5, PA5_DATA), PINMUX_GPIO(GPIO_PA4, PA4_DATA), PINMUX_GPIO(GPIO_PA3, PA3_DATA), PINMUX_GPIO(GPIO_PA2, PA2_DATA), PINMUX_GPIO(GPIO_PA1, PA1_DATA), PINMUX_GPIO(GPIO_PA0, PA0_DATA), /* PB */ PINMUX_GPIO(GPIO_PB12, PB12_DATA), PINMUX_GPIO(GPIO_PB11, PB11_DATA), PINMUX_GPIO(GPIO_PB10, PB10_DATA), PINMUX_GPIO(GPIO_PB9, PB9_DATA), PINMUX_GPIO(GPIO_PB8, PB8_DATA), PINMUX_GPIO(GPIO_PB7, PB7_DATA), PINMUX_GPIO(GPIO_PB6, PB6_DATA), PINMUX_GPIO(GPIO_PB5, PB5_DATA), PINMUX_GPIO(GPIO_PB4, PB4_DATA), PINMUX_GPIO(GPIO_PB3, PB3_DATA), PINMUX_GPIO(GPIO_PB2, PB2_DATA), PINMUX_GPIO(GPIO_PB1, PB1_DATA), PINMUX_GPIO(GPIO_PB0, PB0_DATA), /* PC */ PINMUX_GPIO(GPIO_PC14, PC14_DATA), PINMUX_GPIO(GPIO_PC13, PC13_DATA), PINMUX_GPIO(GPIO_PC12, PC12_DATA), PINMUX_GPIO(GPIO_PC11, PC11_DATA), PINMUX_GPIO(GPIO_PC10, PC10_DATA), PINMUX_GPIO(GPIO_PC9, PC9_DATA), PINMUX_GPIO(GPIO_PC8, PC8_DATA), PINMUX_GPIO(GPIO_PC7, PC7_DATA), PINMUX_GPIO(GPIO_PC6, PC6_DATA), PINMUX_GPIO(GPIO_PC5, PC5_DATA), PINMUX_GPIO(GPIO_PC4, PC4_DATA), PINMUX_GPIO(GPIO_PC3, PC3_DATA), PINMUX_GPIO(GPIO_PC2, PC2_DATA), PINMUX_GPIO(GPIO_PC1, PC1_DATA), PINMUX_GPIO(GPIO_PC0, PC0_DATA), /* PD */ PINMUX_GPIO(GPIO_PD15, PD15_DATA), PINMUX_GPIO(GPIO_PD14, PD14_DATA), PINMUX_GPIO(GPIO_PD13, PD13_DATA), PINMUX_GPIO(GPIO_PD12, PD12_DATA), PINMUX_GPIO(GPIO_PD11, PD11_DATA), PINMUX_GPIO(GPIO_PD10, PD10_DATA), PINMUX_GPIO(GPIO_PD9, PD9_DATA), PINMUX_GPIO(GPIO_PD8, PD8_DATA), PINMUX_GPIO(GPIO_PD7, PD7_DATA), PINMUX_GPIO(GPIO_PD6, PD6_DATA), PINMUX_GPIO(GPIO_PD5, PD5_DATA), PINMUX_GPIO(GPIO_PD4, PD4_DATA), PINMUX_GPIO(GPIO_PD3, PD3_DATA), PINMUX_GPIO(GPIO_PD2, PD2_DATA), PINMUX_GPIO(GPIO_PD1, PD1_DATA), PINMUX_GPIO(GPIO_PD0, PD0_DATA), /* PE */ PINMUX_GPIO(GPIO_PE15, PE15_DATA), PINMUX_GPIO(GPIO_PE14, PE14_DATA), PINMUX_GPIO(GPIO_PE13, PE13_DATA), PINMUX_GPIO(GPIO_PE12, PE12_DATA), PINMUX_GPIO(GPIO_PE11, PE11_DATA), PINMUX_GPIO(GPIO_PE10, PE10_DATA), PINMUX_GPIO(GPIO_PE9, PE9_DATA), PINMUX_GPIO(GPIO_PE8, PE8_DATA), PINMUX_GPIO(GPIO_PE7, PE7_DATA), PINMUX_GPIO(GPIO_PE6, PE6_DATA), PINMUX_GPIO(GPIO_PE5, PE5_DATA), PINMUX_GPIO(GPIO_PE4, PE4_DATA), PINMUX_GPIO(GPIO_PE3, PE3_DATA), PINMUX_GPIO(GPIO_PE2, PE2_DATA), PINMUX_GPIO(GPIO_PE1, PE1_DATA), PINMUX_GPIO(GPIO_PE0, PE0_DATA), /* PF */ PINMUX_GPIO(GPIO_PF30, PF30_DATA), PINMUX_GPIO(GPIO_PF29, PF29_DATA), PINMUX_GPIO(GPIO_PF28, PF28_DATA), PINMUX_GPIO(GPIO_PF27, PF27_DATA), PINMUX_GPIO(GPIO_PF26, PF26_DATA), PINMUX_GPIO(GPIO_PF25, PF25_DATA), PINMUX_GPIO(GPIO_PF24, PF24_DATA), PINMUX_GPIO(GPIO_PF23, PF23_DATA), PINMUX_GPIO(GPIO_PF22, PF22_DATA), PINMUX_GPIO(GPIO_PF21, PF21_DATA), PINMUX_GPIO(GPIO_PF20, PF20_DATA), PINMUX_GPIO(GPIO_PF19, PF19_DATA), PINMUX_GPIO(GPIO_PF18, PF18_DATA), PINMUX_GPIO(GPIO_PF17, PF17_DATA), PINMUX_GPIO(GPIO_PF16, PF16_DATA), PINMUX_GPIO(GPIO_PF15, PF15_DATA), PINMUX_GPIO(GPIO_PF14, PF14_DATA), PINMUX_GPIO(GPIO_PF13, PF13_DATA), PINMUX_GPIO(GPIO_PF12, PF12_DATA), PINMUX_GPIO(GPIO_PF11, PF11_DATA), PINMUX_GPIO(GPIO_PF10, PF10_DATA), PINMUX_GPIO(GPIO_PF9, PF9_DATA), PINMUX_GPIO(GPIO_PF8, PF8_DATA), PINMUX_GPIO(GPIO_PF7, PF7_DATA), PINMUX_GPIO(GPIO_PF6, PF6_DATA), PINMUX_GPIO(GPIO_PF5, PF5_DATA), PINMUX_GPIO(GPIO_PF4, PF4_DATA), PINMUX_GPIO(GPIO_PF3, PF3_DATA), PINMUX_GPIO(GPIO_PF2, PF2_DATA), PINMUX_GPIO(GPIO_PF1, PF1_DATA), PINMUX_GPIO(GPIO_PF0, PF0_DATA), }; #define PINMUX_FN_BASE ARRAY_SIZE(pinmux_pins) static const struct pinmux_func pinmux_func_gpios[] = { /* INTC */ GPIO_FN(PINT7_PB), GPIO_FN(PINT6_PB), GPIO_FN(PINT5_PB), GPIO_FN(PINT4_PB), GPIO_FN(PINT3_PB), GPIO_FN(PINT2_PB), GPIO_FN(PINT1_PB), GPIO_FN(PINT0_PB), GPIO_FN(PINT7_PD), GPIO_FN(PINT6_PD), GPIO_FN(PINT5_PD), GPIO_FN(PINT4_PD), GPIO_FN(PINT3_PD), GPIO_FN(PINT2_PD), GPIO_FN(PINT1_PD), GPIO_FN(PINT0_PD), GPIO_FN(IRQ7_PB), GPIO_FN(IRQ6_PB), GPIO_FN(IRQ5_PB), GPIO_FN(IRQ4_PB), GPIO_FN(IRQ3_PB), GPIO_FN(IRQ2_PB), GPIO_FN(IRQ1_PB), GPIO_FN(IRQ0_PB), GPIO_FN(IRQ7_PD), GPIO_FN(IRQ6_PD), GPIO_FN(IRQ5_PD), GPIO_FN(IRQ4_PD), GPIO_FN(IRQ3_PD), GPIO_FN(IRQ2_PD), GPIO_FN(IRQ1_PD), GPIO_FN(IRQ0_PD), GPIO_FN(IRQ7_PE), GPIO_FN(IRQ6_PE), GPIO_FN(IRQ5_PE), GPIO_FN(IRQ4_PE), GPIO_FN(IRQ3_PE), GPIO_FN(IRQ2_PE), GPIO_FN(IRQ1_PE), GPIO_FN(IRQ0_PE), GPIO_FN(WDTOVF), GPIO_FN(IRQOUT), GPIO_FN(REFOUT), GPIO_FN(IRQOUT_REFOUT), GPIO_FN(UBCTRG), /* CAN */ GPIO_FN(CTX1), GPIO_FN(CRX1), GPIO_FN(CTX0), GPIO_FN(CTX0_CTX1), GPIO_FN(CRX0), GPIO_FN(CRX0_CRX1), /* IIC3 */ GPIO_FN(SDA3), GPIO_FN(SCL3), GPIO_FN(SDA2), GPIO_FN(SCL2), GPIO_FN(SDA1), GPIO_FN(SCL1), GPIO_FN(SDA0), GPIO_FN(SCL0), /* DMAC */ GPIO_FN(TEND0_PD), GPIO_FN(TEND0_PE), GPIO_FN(DACK0_PD), GPIO_FN(DACK0_PE), GPIO_FN(DREQ0_PD), GPIO_FN(DREQ0_PE), GPIO_FN(TEND1_PD), GPIO_FN(TEND1_PE), GPIO_FN(DACK1_PD), GPIO_FN(DACK1_PE), GPIO_FN(DREQ1_PD), GPIO_FN(DREQ1_PE), GPIO_FN(DACK2), GPIO_FN(DREQ2), GPIO_FN(DACK3), GPIO_FN(DREQ3), /* ADC */ GPIO_FN(ADTRG_PD), GPIO_FN(ADTRG_PE), /* BSC */ GPIO_FN(D31), GPIO_FN(D30), GPIO_FN(D29), GPIO_FN(D28), GPIO_FN(D27), GPIO_FN(D26), GPIO_FN(D25), GPIO_FN(D24), GPIO_FN(D23), GPIO_FN(D22), GPIO_FN(D21), GPIO_FN(D20), GPIO_FN(D19), GPIO_FN(D18), GPIO_FN(D17), GPIO_FN(D16), GPIO_FN(A25), GPIO_FN(A24), GPIO_FN(A23), GPIO_FN(A22), GPIO_FN(A21), GPIO_FN(CS4), GPIO_FN(MRES), GPIO_FN(BS), GPIO_FN(IOIS16), GPIO_FN(CS1), GPIO_FN(CS6_CE1B), GPIO_FN(CE2B), GPIO_FN(CS5_CE1A), GPIO_FN(CE2A), GPIO_FN(FRAME), GPIO_FN(WAIT), GPIO_FN(RDWR), GPIO_FN(CKE), GPIO_FN(CASU), GPIO_FN(BREQ), GPIO_FN(RASU), GPIO_FN(BACK), GPIO_FN(CASL), GPIO_FN(RASL), GPIO_FN(WE3_DQMUU_AH_ICIO_WR), GPIO_FN(WE2_DQMUL_ICIORD), GPIO_FN(WE1_DQMLU_WE), GPIO_FN(WE0_DQMLL), GPIO_FN(CS3), GPIO_FN(CS2), GPIO_FN(A1), GPIO_FN(A0), GPIO_FN(CS7), /* TMU */ GPIO_FN(TIOC4D), GPIO_FN(TIOC4C), GPIO_FN(TIOC4B), GPIO_FN(TIOC4A), GPIO_FN(TIOC3D), GPIO_FN(TIOC3C), GPIO_FN(TIOC3B), GPIO_FN(TIOC3A), GPIO_FN(TIOC2B), GPIO_FN(TIOC1B), GPIO_FN(TIOC2A), GPIO_FN(TIOC1A), GPIO_FN(TIOC0D), GPIO_FN(TIOC0C), GPIO_FN(TIOC0B), GPIO_FN(TIOC0A), GPIO_FN(TCLKD_PD), GPIO_FN(TCLKC_PD), GPIO_FN(TCLKB_PD), GPIO_FN(TCLKA_PD), GPIO_FN(TCLKD_PF), GPIO_FN(TCLKC_PF), GPIO_FN(TCLKB_PF), GPIO_FN(TCLKA_PF), /* SSU */ GPIO_FN(SCS0_PD), GPIO_FN(SSO0_PD), GPIO_FN(SSI0_PD), GPIO_FN(SSCK0_PD), GPIO_FN(SCS0_PF), GPIO_FN(SSO0_PF), GPIO_FN(SSI0_PF), GPIO_FN(SSCK0_PF), GPIO_FN(SCS1_PD), GPIO_FN(SSO1_PD), GPIO_FN(SSI1_PD), GPIO_FN(SSCK1_PD), GPIO_FN(SCS1_PF), GPIO_FN(SSO1_PF), GPIO_FN(SSI1_PF), GPIO_FN(SSCK1_PF), /* SCIF */ GPIO_FN(TXD0), GPIO_FN(RXD0), GPIO_FN(SCK0), GPIO_FN(TXD1), GPIO_FN(RXD1), GPIO_FN(SCK1), GPIO_FN(TXD2), GPIO_FN(RXD2), GPIO_FN(SCK2), GPIO_FN(RTS3), GPIO_FN(CTS3), GPIO_FN(TXD3), GPIO_FN(RXD3), GPIO_FN(SCK3), /* SSI */ GPIO_FN(AUDIO_CLK), GPIO_FN(SSIDATA3), GPIO_FN(SSIWS3), GPIO_FN(SSISCK3), GPIO_FN(SSIDATA2), GPIO_FN(SSIWS2), GPIO_FN(SSISCK2), GPIO_FN(SSIDATA1), GPIO_FN(SSIWS1), GPIO_FN(SSISCK1), GPIO_FN(SSIDATA0), GPIO_FN(SSIWS0), GPIO_FN(SSISCK0), /* FLCTL */ GPIO_FN(FCE), GPIO_FN(FRB), GPIO_FN(NAF7), GPIO_FN(NAF6), GPIO_FN(NAF5), GPIO_FN(NAF4), GPIO_FN(NAF3), GPIO_FN(NAF2), GPIO_FN(NAF1), GPIO_FN(NAF0), GPIO_FN(FSC), GPIO_FN(FOE), GPIO_FN(FCDE), GPIO_FN(FWE), /* LCDC */ GPIO_FN(LCD_VEPWC), GPIO_FN(LCD_VCPWC), GPIO_FN(LCD_CLK), GPIO_FN(LCD_FLM), GPIO_FN(LCD_M_DISP), GPIO_FN(LCD_CL2), GPIO_FN(LCD_CL1), GPIO_FN(LCD_DON), GPIO_FN(LCD_DATA15), GPIO_FN(LCD_DATA14), GPIO_FN(LCD_DATA13), GPIO_FN(LCD_DATA12), GPIO_FN(LCD_DATA11), GPIO_FN(LCD_DATA10), GPIO_FN(LCD_DATA9), GPIO_FN(LCD_DATA8), GPIO_FN(LCD_DATA7), GPIO_FN(LCD_DATA6), GPIO_FN(LCD_DATA5), GPIO_FN(LCD_DATA4), GPIO_FN(LCD_DATA3), GPIO_FN(LCD_DATA2), GPIO_FN(LCD_DATA1), GPIO_FN(LCD_DATA0), }; static const struct pinmux_cfg_reg pinmux_config_regs[] = { { PINMUX_CFG_REG("PBIORL", 0xfffe3886, 16, 1) { 0, 0, 0, 0, 0, 0, 0, 0, PB11_IN, PB11_OUT, PB10_IN, PB10_OUT, PB9_IN, PB9_OUT, PB8_IN, PB8_OUT, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PBCRL4", 0xfffe3890, 16, 4) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PB12MD_00, PB12MD_01, PB12MD_10, PB12MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PBCRL3", 0xfffe3892, 16, 4) { PB11MD_0, PB11MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PB10MD_0, PB10MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PB9MD_00, PB9MD_01, PB9MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PB8MD_00, PB8MD_01, PB8MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PBCRL2", 0xfffe3894, 16, 4) { PB7MD_00, PB7MD_01, PB7MD_10, PB7MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PB6MD_00, PB6MD_01, PB6MD_10, PB6MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PB5MD_00, PB5MD_01, PB5MD_10, PB5MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PB4MD_00, PB4MD_01, PB4MD_10, PB4MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PBCRL1", 0xfffe3896, 16, 4) { PB3MD_00, PB3MD_01, PB3MD_10, PB3MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PB2MD_00, PB2MD_01, PB2MD_10, PB2MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PB1MD_00, PB1MD_01, PB1MD_10, PB1MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PB0MD_00, PB0MD_01, PB0MD_10, PB0MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("IFCR", 0xfffe38a2, 16, 4) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PB12IRQ_00, PB12IRQ_01, PB12IRQ_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PCIORL", 0xfffe3906, 16, 1) { 0, 0, PC14_IN, PC14_OUT, PC13_IN, PC13_OUT, PC12_IN, PC12_OUT, PC11_IN, PC11_OUT, PC10_IN, PC10_OUT, PC9_IN, PC9_OUT, PC8_IN, PC8_OUT, PC7_IN, PC7_OUT, PC6_IN, PC6_OUT, PC5_IN, PC5_OUT, PC4_IN, PC4_OUT, PC3_IN, PC3_OUT, PC2_IN, PC2_OUT, PC1_IN, PC1_OUT, PC0_IN, PC0_OUT } }, { PINMUX_CFG_REG("PCCRL4", 0xfffe3910, 16, 4) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PC14MD_0, PC14MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PC13MD_0, PC13MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PC12MD_0, PC12MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PCCRL3", 0xfffe3912, 16, 4) { PC11MD_00, PC11MD_01, PC11MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PC10MD_00, PC10MD_01, PC10MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PC9MD_0, PC9MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PC8MD_0, PC8MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PCCRL2", 0xfffe3914, 16, 4) { PC7MD_0, PC7MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PC6MD_0, PC6MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PC5MD_0, PC5MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PC4MD_0, PC4MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PCCRL1", 0xfffe3916, 16, 4) { PC3MD_0, PC3MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PC2MD_0, PC2MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PC1MD_0, PC1MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PC0MD_00, PC0MD_01, PC0MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PDIORL", 0xfffe3986, 16, 1) { PD15_IN, PD15_OUT, PD14_IN, PD14_OUT, PD13_IN, PD13_OUT, PD12_IN, PD12_OUT, PD11_IN, PD11_OUT, PD10_IN, PD10_OUT, PD9_IN, PD9_OUT, PD8_IN, PD8_OUT, PD7_IN, PD7_OUT, PD6_IN, PD6_OUT, PD5_IN, PD5_OUT, PD4_IN, PD4_OUT, PD3_IN, PD3_OUT, PD2_IN, PD2_OUT, PD1_IN, PD1_OUT, PD0_IN, PD0_OUT } }, { PINMUX_CFG_REG("PDCRL4", 0xfffe3990, 16, 4) { PD15MD_000, PD15MD_001, PD15MD_010, 0, PD15MD_100, PD15MD_101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PD14MD_000, PD14MD_001, PD14MD_010, 0, 0, PD14MD_101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PD13MD_000, PD13MD_001, PD13MD_010, 0, PD13MD_100, PD13MD_101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PD12MD_000, PD12MD_001, PD12MD_010, 0, PD12MD_100, PD12MD_101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PDCRL3", 0xfffe3992, 16, 4) { PD11MD_000, PD11MD_001, PD11MD_010, 0, PD11MD_100, PD11MD_101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PD10MD_000, PD10MD_001, PD10MD_010, 0, PD10MD_100, PD10MD_101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PD9MD_000, PD9MD_001, PD9MD_010, 0, PD9MD_100, PD9MD_101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PD8MD_000, PD8MD_001, PD8MD_010, 0, PD8MD_100, PD8MD_101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PDCRL2", 0xfffe3994, 16, 4) { PD7MD_000, PD7MD_001, PD7MD_010, PD7MD_011, PD7MD_100, PD7MD_101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PD6MD_000, PD6MD_001, PD6MD_010, PD6MD_011, PD6MD_100, PD6MD_101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PD5MD_000, PD5MD_001, PD5MD_010, PD5MD_011, PD5MD_100, PD5MD_101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PD4MD_000, PD4MD_001, PD4MD_010, PD4MD_011, PD4MD_100, PD4MD_101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PDCRL1", 0xfffe3996, 16, 4) { PD3MD_000, PD3MD_001, PD3MD_010, PD3MD_011, PD3MD_100, PD3MD_101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PD2MD_000, PD2MD_001, PD2MD_010, PD2MD_011, PD2MD_100, PD2MD_101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PD1MD_000, PD1MD_001, PD1MD_010, PD1MD_011, PD1MD_100, PD1MD_101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PD0MD_000, PD0MD_001, PD0MD_010, PD0MD_011, PD0MD_100, PD0MD_101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PEIORL", 0xfffe3a06, 16, 1) { PE15_IN, PE15_OUT, PE14_IN, PE14_OUT, PE13_IN, PE13_OUT, PE12_IN, PE12_OUT, PE11_IN, PE11_OUT, PE10_IN, PE10_OUT, PE9_IN, PE9_OUT, PE8_IN, PE8_OUT, PE7_IN, PE7_OUT, PE6_IN, PE6_OUT, PE5_IN, PE5_OUT, PE4_IN, PE4_OUT, PE3_IN, PE3_OUT, PE2_IN, PE2_OUT, PE1_IN, PE1_OUT, PE0_IN, PE0_OUT } }, { PINMUX_CFG_REG("PECRL4", 0xfffe3a10, 16, 4) { PE15MD_00, PE15MD_01, 0, PE15MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PE14MD_00, PE14MD_01, 0, PE14MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PE13MD_00, 0, 0, PE13MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PE12MD_00, 0, 0, PE12MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PECRL3", 0xfffe3a12, 16, 4) { PE11MD_000, PE11MD_001, PE11MD_010, 0, PE11MD_100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PE10MD_000, PE10MD_001, PE10MD_010, 0, PE10MD_100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PE9MD_00, PE9MD_01, PE9MD_10, PE9MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PE8MD_00, PE8MD_01, PE8MD_10, PE8MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PECRL2", 0xfffe3a14, 16, 4) { PE7MD_000, PE7MD_001, PE7MD_010, PE7MD_011, PE7MD_100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PE6MD_000, PE6MD_001, PE6MD_010, PE6MD_011, PE6MD_100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PE5MD_000, PE5MD_001, PE5MD_010, PE5MD_011, PE5MD_100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PE4MD_000, PE4MD_001, PE4MD_010, PE4MD_011, PE4MD_100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PECRL1", 0xfffe3a16, 16, 4) { PE3MD_00, PE3MD_01, 0, PE3MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PE2MD_00, PE2MD_01, 0, PE2MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PE1MD_00, PE1MD_01, PE1MD_10, PE1MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PE0MD_000, PE0MD_001, 0, PE0MD_011, PE0MD_100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PFIORH", 0xfffe3a84, 16, 1) { 0, 0, PF30_IN, PF30_OUT, PF29_IN, PF29_OUT, PF28_IN, PF28_OUT, PF27_IN, PF27_OUT, PF26_IN, PF26_OUT, PF25_IN, PF25_OUT, PF24_IN, PF24_OUT, PF23_IN, PF23_OUT, PF22_IN, PF22_OUT, PF21_IN, PF21_OUT, PF20_IN, PF20_OUT, PF19_IN, PF19_OUT, PF18_IN, PF18_OUT, PF17_IN, PF17_OUT, PF16_IN, PF16_OUT } }, { PINMUX_CFG_REG("PFIORL", 0xfffe3a86, 16, 1) { PF15_IN, PF15_OUT, PF14_IN, PF14_OUT, PF13_IN, PF13_OUT, PF12_IN, PF12_OUT, PF11_IN, PF11_OUT, PF10_IN, PF10_OUT, PF9_IN, PF9_OUT, PF8_IN, PF8_OUT, PF7_IN, PF7_OUT, PF6_IN, PF6_OUT, PF5_IN, PF5_OUT, PF4_IN, PF4_OUT, PF3_IN, PF3_OUT, PF2_IN, PF2_OUT, PF1_IN, PF1_OUT, PF0_IN, PF0_OUT } }, { PINMUX_CFG_REG("PFCRH4", 0xfffe3a88, 16, 4) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF30MD_0, PF30MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF29MD_0, PF29MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF28MD_0, PF28MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PFCRH3", 0xfffe3a8a, 16, 4) { PF27MD_0, PF27MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF26MD_0, PF26MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF25MD_0, PF25MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF24MD_0, PF24MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PFCRH2", 0xfffe3a8c, 16, 4) { PF23MD_00, PF23MD_01, PF23MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF22MD_00, PF22MD_01, PF22MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF21MD_00, PF21MD_01, PF21MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF20MD_00, PF20MD_01, PF20MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PFCRH1", 0xfffe3a8e, 16, 4) { PF19MD_00, PF19MD_01, PF19MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF18MD_00, PF18MD_01, PF18MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF17MD_00, PF17MD_01, PF17MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF16MD_00, PF16MD_01, PF16MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PFCRL4", 0xfffe3a90, 16, 4) { PF15MD_00, PF15MD_01, PF15MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF14MD_00, PF14MD_01, PF14MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF13MD_00, PF13MD_01, PF13MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF12MD_00, PF12MD_01, PF12MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PFCRL3", 0xfffe3a92, 16, 4) { PF11MD_00, PF11MD_01, PF11MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF10MD_00, PF10MD_01, PF10MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF9MD_00, PF9MD_01, PF9MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF8MD_00, PF8MD_01, PF8MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PFCRL2", 0xfffe3a94, 16, 4) { PF7MD_00, PF7MD_01, PF7MD_10, PF7MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF6MD_00, PF6MD_01, PF6MD_10, PF6MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF5MD_00, PF5MD_01, PF5MD_10, PF5MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF4MD_00, PF4MD_01, PF4MD_10, PF4MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PFCRL1", 0xfffe3a96, 16, 4) { PF3MD_00, PF3MD_01, PF3MD_10, PF3MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF2MD_00, PF2MD_01, PF2MD_10, PF2MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF1MD_00, PF1MD_01, PF1MD_10, PF1MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF0MD_00, PF0MD_01, PF0MD_10, PF0MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, {} }; static const struct pinmux_data_reg pinmux_data_regs[] = { { PINMUX_DATA_REG("PADRL", 0xfffe3802, 16) { 0, 0, 0, 0, 0, 0, 0, 0, PA7_DATA, PA6_DATA, PA5_DATA, PA4_DATA, PA3_DATA, PA2_DATA, PA1_DATA, PA0_DATA } }, { PINMUX_DATA_REG("PBDRL", 0xfffe3882, 16) { 0, 0, 0, PB12_DATA, PB11_DATA, PB10_DATA, PB9_DATA, PB8_DATA, PB7_DATA, PB6_DATA, PB5_DATA, PB4_DATA, PB3_DATA, PB2_DATA, PB1_DATA, PB0_DATA } }, { PINMUX_DATA_REG("PCDRL", 0xfffe3902, 16) { 0, PC14_DATA, PC13_DATA, PC12_DATA, PC11_DATA, PC10_DATA, PC9_DATA, PC8_DATA, PC7_DATA, PC6_DATA, PC5_DATA, PC4_DATA, PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA } }, { PINMUX_DATA_REG("PDDRL", 0xfffe3982, 16) { PD15_DATA, PD14_DATA, PD13_DATA, PD12_DATA, PD11_DATA, PD10_DATA, PD9_DATA, PD8_DATA, PD7_DATA, PD6_DATA, PD5_DATA, PD4_DATA, PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA } }, { PINMUX_DATA_REG("PEDRL", 0xfffe3a02, 16) { PE15_DATA, PE14_DATA, PE13_DATA, PE12_DATA, PE11_DATA, PE10_DATA, PE9_DATA, PE8_DATA, PE7_DATA, PE6_DATA, PE5_DATA, PE4_DATA, PE3_DATA, PE2_DATA, PE1_DATA, PE0_DATA } }, { PINMUX_DATA_REG("PFDRH", 0xfffe3a80, 16) { 0, PF30_DATA, PF29_DATA, PF28_DATA, PF27_DATA, PF26_DATA, PF25_DATA, PF24_DATA, PF23_DATA, PF22_DATA, PF21_DATA, PF20_DATA, PF19_DATA, PF18_DATA, PF17_DATA, PF16_DATA } }, { PINMUX_DATA_REG("PFDRL", 0xfffe3a82, 16) { PF15_DATA, PF14_DATA, PF13_DATA, PF12_DATA, PF11_DATA, PF10_DATA, PF9_DATA, PF8_DATA, PF7_DATA, PF6_DATA, PF5_DATA, PF4_DATA, PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA } }, { }, }; const struct sh_pfc_soc_info sh7203_pinmux_info = { .name = "sh7203_pfc", .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END, FORCE_IN }, .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END, FORCE_OUT }, .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END }, .pins = pinmux_pins, .nr_pins = ARRAY_SIZE(pinmux_pins), .func_gpios = pinmux_func_gpios, .nr_func_gpios = ARRAY_SIZE(pinmux_func_gpios), .cfg_regs = pinmux_config_regs, .data_regs = pinmux_data_regs, .gpio_data = pinmux_data, .gpio_data_size = ARRAY_SIZE(pinmux_data), };
gpl-2.0
namagi/android_kernel_motorola_msm8960-common
arch/arm/mach-omap2/cm2xxx_3xxx.c
2940
17969
/* * OMAP2/3 CM module functions * * Copyright (C) 2009 Nokia Corporation * Paul Walmsley * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/delay.h> #include <linux/spinlock.h> #include <linux/list.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/io.h> #include <plat/common.h> #include "cm.h" #include "cm2xxx_3xxx.h" #include "cm-regbits-24xx.h" #include "cm-regbits-34xx.h" /* CM_AUTOIDLE_PLL.AUTO_* bit values for DPLLs */ #define DPLL_AUTOIDLE_DISABLE 0x0 #define OMAP2XXX_DPLL_AUTOIDLE_LOW_POWER_STOP 0x3 /* CM_AUTOIDLE_PLL.AUTO_* bit values for APLLs (OMAP2xxx only) */ #define OMAP2XXX_APLL_AUTOIDLE_DISABLE 0x0 #define OMAP2XXX_APLL_AUTOIDLE_LOW_POWER_STOP 0x3 static const u8 cm_idlest_offs[] = { CM_IDLEST1, CM_IDLEST2, OMAP2430_CM_IDLEST3 }; u32 omap2_cm_read_mod_reg(s16 module, u16 idx) { return __raw_readl(cm_base + module + idx); } void omap2_cm_write_mod_reg(u32 val, s16 module, u16 idx) { __raw_writel(val, cm_base + module + idx); } /* Read-modify-write a register in a CM module. Caller must lock */ u32 omap2_cm_rmw_mod_reg_bits(u32 mask, u32 bits, s16 module, s16 idx) { u32 v; v = omap2_cm_read_mod_reg(module, idx); v &= ~mask; v |= bits; omap2_cm_write_mod_reg(v, module, idx); return v; } u32 omap2_cm_set_mod_reg_bits(u32 bits, s16 module, s16 idx) { return omap2_cm_rmw_mod_reg_bits(bits, bits, module, idx); } u32 omap2_cm_clear_mod_reg_bits(u32 bits, s16 module, s16 idx) { return omap2_cm_rmw_mod_reg_bits(bits, 0x0, module, idx); } /* * */ static void _write_clktrctrl(u8 c, s16 module, u32 mask) { u32 v; v = omap2_cm_read_mod_reg(module, OMAP2_CM_CLKSTCTRL); v &= ~mask; v |= c << __ffs(mask); omap2_cm_write_mod_reg(v, module, OMAP2_CM_CLKSTCTRL); } bool omap2_cm_is_clkdm_in_hwsup(s16 module, u32 mask) { u32 v; bool ret = 0; BUG_ON(!cpu_is_omap24xx() && !cpu_is_omap34xx()); v = omap2_cm_read_mod_reg(module, OMAP2_CM_CLKSTCTRL); v &= mask; v >>= __ffs(mask); if (cpu_is_omap24xx()) ret = (v == OMAP24XX_CLKSTCTRL_ENABLE_AUTO) ? 1 : 0; else ret = (v == OMAP34XX_CLKSTCTRL_ENABLE_AUTO) ? 1 : 0; return ret; } void omap2xxx_cm_clkdm_enable_hwsup(s16 module, u32 mask) { _write_clktrctrl(OMAP24XX_CLKSTCTRL_ENABLE_AUTO, module, mask); } void omap2xxx_cm_clkdm_disable_hwsup(s16 module, u32 mask) { _write_clktrctrl(OMAP24XX_CLKSTCTRL_DISABLE_AUTO, module, mask); } void omap3xxx_cm_clkdm_enable_hwsup(s16 module, u32 mask) { _write_clktrctrl(OMAP34XX_CLKSTCTRL_ENABLE_AUTO, module, mask); } void omap3xxx_cm_clkdm_disable_hwsup(s16 module, u32 mask) { _write_clktrctrl(OMAP34XX_CLKSTCTRL_DISABLE_AUTO, module, mask); } void omap3xxx_cm_clkdm_force_sleep(s16 module, u32 mask) { _write_clktrctrl(OMAP34XX_CLKSTCTRL_FORCE_SLEEP, module, mask); } void omap3xxx_cm_clkdm_force_wakeup(s16 module, u32 mask) { _write_clktrctrl(OMAP34XX_CLKSTCTRL_FORCE_WAKEUP, module, mask); } /* * DPLL autoidle control */ static void _omap2xxx_set_dpll_autoidle(u8 m) { u32 v; v = omap2_cm_read_mod_reg(PLL_MOD, CM_AUTOIDLE); v &= ~OMAP24XX_AUTO_DPLL_MASK; v |= m << OMAP24XX_AUTO_DPLL_SHIFT; omap2_cm_write_mod_reg(v, PLL_MOD, CM_AUTOIDLE); } void omap2xxx_cm_set_dpll_disable_autoidle(void) { _omap2xxx_set_dpll_autoidle(OMAP2XXX_DPLL_AUTOIDLE_LOW_POWER_STOP); } void omap2xxx_cm_set_dpll_auto_low_power_stop(void) { _omap2xxx_set_dpll_autoidle(DPLL_AUTOIDLE_DISABLE); } /* * APLL autoidle control */ static void _omap2xxx_set_apll_autoidle(u8 m, u32 mask) { u32 v; v = omap2_cm_read_mod_reg(PLL_MOD, CM_AUTOIDLE); v &= ~mask; v |= m << __ffs(mask); omap2_cm_write_mod_reg(v, PLL_MOD, CM_AUTOIDLE); } void omap2xxx_cm_set_apll54_disable_autoidle(void) { _omap2xxx_set_apll_autoidle(OMAP2XXX_APLL_AUTOIDLE_LOW_POWER_STOP, OMAP24XX_AUTO_54M_MASK); } void omap2xxx_cm_set_apll54_auto_low_power_stop(void) { _omap2xxx_set_apll_autoidle(OMAP2XXX_APLL_AUTOIDLE_DISABLE, OMAP24XX_AUTO_54M_MASK); } void omap2xxx_cm_set_apll96_disable_autoidle(void) { _omap2xxx_set_apll_autoidle(OMAP2XXX_APLL_AUTOIDLE_LOW_POWER_STOP, OMAP24XX_AUTO_96M_MASK); } void omap2xxx_cm_set_apll96_auto_low_power_stop(void) { _omap2xxx_set_apll_autoidle(OMAP2XXX_APLL_AUTOIDLE_DISABLE, OMAP24XX_AUTO_96M_MASK); } /* * */ /** * omap2_cm_wait_idlest_ready - wait for a module to leave idle or standby * @prcm_mod: PRCM module offset * @idlest_id: CM_IDLESTx register ID (i.e., x = 1, 2, 3) * @idlest_shift: shift of the bit in the CM_IDLEST* register to check * * XXX document */ int omap2_cm_wait_module_ready(s16 prcm_mod, u8 idlest_id, u8 idlest_shift) { int ena = 0, i = 0; u8 cm_idlest_reg; u32 mask; if (!idlest_id || (idlest_id > ARRAY_SIZE(cm_idlest_offs))) return -EINVAL; cm_idlest_reg = cm_idlest_offs[idlest_id - 1]; mask = 1 << idlest_shift; if (cpu_is_omap24xx()) ena = mask; else if (cpu_is_omap34xx()) ena = 0; else BUG(); omap_test_timeout(((omap2_cm_read_mod_reg(prcm_mod, cm_idlest_reg) & mask) == ena), MAX_MODULE_READY_TIME, i); return (i < MAX_MODULE_READY_TIME) ? 0 : -EBUSY; } /* * Context save/restore code - OMAP3 only */ #ifdef CONFIG_ARCH_OMAP3 struct omap3_cm_regs { u32 iva2_cm_clksel1; u32 iva2_cm_clksel2; u32 cm_sysconfig; u32 sgx_cm_clksel; u32 dss_cm_clksel; u32 cam_cm_clksel; u32 per_cm_clksel; u32 emu_cm_clksel; u32 emu_cm_clkstctrl; u32 pll_cm_autoidle; u32 pll_cm_autoidle2; u32 pll_cm_clksel4; u32 pll_cm_clksel5; u32 pll_cm_clken2; u32 cm_polctrl; u32 iva2_cm_fclken; u32 iva2_cm_clken_pll; u32 core_cm_fclken1; u32 core_cm_fclken3; u32 sgx_cm_fclken; u32 wkup_cm_fclken; u32 dss_cm_fclken; u32 cam_cm_fclken; u32 per_cm_fclken; u32 usbhost_cm_fclken; u32 core_cm_iclken1; u32 core_cm_iclken2; u32 core_cm_iclken3; u32 sgx_cm_iclken; u32 wkup_cm_iclken; u32 dss_cm_iclken; u32 cam_cm_iclken; u32 per_cm_iclken; u32 usbhost_cm_iclken; u32 iva2_cm_autoidle2; u32 mpu_cm_autoidle2; u32 iva2_cm_clkstctrl; u32 mpu_cm_clkstctrl; u32 core_cm_clkstctrl; u32 sgx_cm_clkstctrl; u32 dss_cm_clkstctrl; u32 cam_cm_clkstctrl; u32 per_cm_clkstctrl; u32 neon_cm_clkstctrl; u32 usbhost_cm_clkstctrl; u32 core_cm_autoidle1; u32 core_cm_autoidle2; u32 core_cm_autoidle3; u32 wkup_cm_autoidle; u32 dss_cm_autoidle; u32 cam_cm_autoidle; u32 per_cm_autoidle; u32 usbhost_cm_autoidle; u32 sgx_cm_sleepdep; u32 dss_cm_sleepdep; u32 cam_cm_sleepdep; u32 per_cm_sleepdep; u32 usbhost_cm_sleepdep; u32 cm_clkout_ctrl; }; static struct omap3_cm_regs cm_context; void omap3_cm_save_context(void) { cm_context.iva2_cm_clksel1 = omap2_cm_read_mod_reg(OMAP3430_IVA2_MOD, CM_CLKSEL1); cm_context.iva2_cm_clksel2 = omap2_cm_read_mod_reg(OMAP3430_IVA2_MOD, CM_CLKSEL2); cm_context.cm_sysconfig = __raw_readl(OMAP3430_CM_SYSCONFIG); cm_context.sgx_cm_clksel = omap2_cm_read_mod_reg(OMAP3430ES2_SGX_MOD, CM_CLKSEL); cm_context.dss_cm_clksel = omap2_cm_read_mod_reg(OMAP3430_DSS_MOD, CM_CLKSEL); cm_context.cam_cm_clksel = omap2_cm_read_mod_reg(OMAP3430_CAM_MOD, CM_CLKSEL); cm_context.per_cm_clksel = omap2_cm_read_mod_reg(OMAP3430_PER_MOD, CM_CLKSEL); cm_context.emu_cm_clksel = omap2_cm_read_mod_reg(OMAP3430_EMU_MOD, CM_CLKSEL1); cm_context.emu_cm_clkstctrl = omap2_cm_read_mod_reg(OMAP3430_EMU_MOD, OMAP2_CM_CLKSTCTRL); /* * As per erratum i671, ROM code does not respect the PER DPLL * programming scheme if CM_AUTOIDLE_PLL.AUTO_PERIPH_DPLL == 1. * In this case, even though this register has been saved in * scratchpad contents, we need to restore AUTO_PERIPH_DPLL * by ourselves. So, we need to save it anyway. */ cm_context.pll_cm_autoidle = omap2_cm_read_mod_reg(PLL_MOD, CM_AUTOIDLE); cm_context.pll_cm_autoidle2 = omap2_cm_read_mod_reg(PLL_MOD, CM_AUTOIDLE2); cm_context.pll_cm_clksel4 = omap2_cm_read_mod_reg(PLL_MOD, OMAP3430ES2_CM_CLKSEL4); cm_context.pll_cm_clksel5 = omap2_cm_read_mod_reg(PLL_MOD, OMAP3430ES2_CM_CLKSEL5); cm_context.pll_cm_clken2 = omap2_cm_read_mod_reg(PLL_MOD, OMAP3430ES2_CM_CLKEN2); cm_context.cm_polctrl = __raw_readl(OMAP3430_CM_POLCTRL); cm_context.iva2_cm_fclken = omap2_cm_read_mod_reg(OMAP3430_IVA2_MOD, CM_FCLKEN); cm_context.iva2_cm_clken_pll = omap2_cm_read_mod_reg(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKEN_PLL); cm_context.core_cm_fclken1 = omap2_cm_read_mod_reg(CORE_MOD, CM_FCLKEN1); cm_context.core_cm_fclken3 = omap2_cm_read_mod_reg(CORE_MOD, OMAP3430ES2_CM_FCLKEN3); cm_context.sgx_cm_fclken = omap2_cm_read_mod_reg(OMAP3430ES2_SGX_MOD, CM_FCLKEN); cm_context.wkup_cm_fclken = omap2_cm_read_mod_reg(WKUP_MOD, CM_FCLKEN); cm_context.dss_cm_fclken = omap2_cm_read_mod_reg(OMAP3430_DSS_MOD, CM_FCLKEN); cm_context.cam_cm_fclken = omap2_cm_read_mod_reg(OMAP3430_CAM_MOD, CM_FCLKEN); cm_context.per_cm_fclken = omap2_cm_read_mod_reg(OMAP3430_PER_MOD, CM_FCLKEN); cm_context.usbhost_cm_fclken = omap2_cm_read_mod_reg(OMAP3430ES2_USBHOST_MOD, CM_FCLKEN); cm_context.core_cm_iclken1 = omap2_cm_read_mod_reg(CORE_MOD, CM_ICLKEN1); cm_context.core_cm_iclken2 = omap2_cm_read_mod_reg(CORE_MOD, CM_ICLKEN2); cm_context.core_cm_iclken3 = omap2_cm_read_mod_reg(CORE_MOD, CM_ICLKEN3); cm_context.sgx_cm_iclken = omap2_cm_read_mod_reg(OMAP3430ES2_SGX_MOD, CM_ICLKEN); cm_context.wkup_cm_iclken = omap2_cm_read_mod_reg(WKUP_MOD, CM_ICLKEN); cm_context.dss_cm_iclken = omap2_cm_read_mod_reg(OMAP3430_DSS_MOD, CM_ICLKEN); cm_context.cam_cm_iclken = omap2_cm_read_mod_reg(OMAP3430_CAM_MOD, CM_ICLKEN); cm_context.per_cm_iclken = omap2_cm_read_mod_reg(OMAP3430_PER_MOD, CM_ICLKEN); cm_context.usbhost_cm_iclken = omap2_cm_read_mod_reg(OMAP3430ES2_USBHOST_MOD, CM_ICLKEN); cm_context.iva2_cm_autoidle2 = omap2_cm_read_mod_reg(OMAP3430_IVA2_MOD, CM_AUTOIDLE2); cm_context.mpu_cm_autoidle2 = omap2_cm_read_mod_reg(MPU_MOD, CM_AUTOIDLE2); cm_context.iva2_cm_clkstctrl = omap2_cm_read_mod_reg(OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL); cm_context.mpu_cm_clkstctrl = omap2_cm_read_mod_reg(MPU_MOD, OMAP2_CM_CLKSTCTRL); cm_context.core_cm_clkstctrl = omap2_cm_read_mod_reg(CORE_MOD, OMAP2_CM_CLKSTCTRL); cm_context.sgx_cm_clkstctrl = omap2_cm_read_mod_reg(OMAP3430ES2_SGX_MOD, OMAP2_CM_CLKSTCTRL); cm_context.dss_cm_clkstctrl = omap2_cm_read_mod_reg(OMAP3430_DSS_MOD, OMAP2_CM_CLKSTCTRL); cm_context.cam_cm_clkstctrl = omap2_cm_read_mod_reg(OMAP3430_CAM_MOD, OMAP2_CM_CLKSTCTRL); cm_context.per_cm_clkstctrl = omap2_cm_read_mod_reg(OMAP3430_PER_MOD, OMAP2_CM_CLKSTCTRL); cm_context.neon_cm_clkstctrl = omap2_cm_read_mod_reg(OMAP3430_NEON_MOD, OMAP2_CM_CLKSTCTRL); cm_context.usbhost_cm_clkstctrl = omap2_cm_read_mod_reg(OMAP3430ES2_USBHOST_MOD, OMAP2_CM_CLKSTCTRL); cm_context.core_cm_autoidle1 = omap2_cm_read_mod_reg(CORE_MOD, CM_AUTOIDLE1); cm_context.core_cm_autoidle2 = omap2_cm_read_mod_reg(CORE_MOD, CM_AUTOIDLE2); cm_context.core_cm_autoidle3 = omap2_cm_read_mod_reg(CORE_MOD, CM_AUTOIDLE3); cm_context.wkup_cm_autoidle = omap2_cm_read_mod_reg(WKUP_MOD, CM_AUTOIDLE); cm_context.dss_cm_autoidle = omap2_cm_read_mod_reg(OMAP3430_DSS_MOD, CM_AUTOIDLE); cm_context.cam_cm_autoidle = omap2_cm_read_mod_reg(OMAP3430_CAM_MOD, CM_AUTOIDLE); cm_context.per_cm_autoidle = omap2_cm_read_mod_reg(OMAP3430_PER_MOD, CM_AUTOIDLE); cm_context.usbhost_cm_autoidle = omap2_cm_read_mod_reg(OMAP3430ES2_USBHOST_MOD, CM_AUTOIDLE); cm_context.sgx_cm_sleepdep = omap2_cm_read_mod_reg(OMAP3430ES2_SGX_MOD, OMAP3430_CM_SLEEPDEP); cm_context.dss_cm_sleepdep = omap2_cm_read_mod_reg(OMAP3430_DSS_MOD, OMAP3430_CM_SLEEPDEP); cm_context.cam_cm_sleepdep = omap2_cm_read_mod_reg(OMAP3430_CAM_MOD, OMAP3430_CM_SLEEPDEP); cm_context.per_cm_sleepdep = omap2_cm_read_mod_reg(OMAP3430_PER_MOD, OMAP3430_CM_SLEEPDEP); cm_context.usbhost_cm_sleepdep = omap2_cm_read_mod_reg(OMAP3430ES2_USBHOST_MOD, OMAP3430_CM_SLEEPDEP); cm_context.cm_clkout_ctrl = omap2_cm_read_mod_reg(OMAP3430_CCR_MOD, OMAP3_CM_CLKOUT_CTRL_OFFSET); } void omap3_cm_restore_context(void) { omap2_cm_write_mod_reg(cm_context.iva2_cm_clksel1, OMAP3430_IVA2_MOD, CM_CLKSEL1); omap2_cm_write_mod_reg(cm_context.iva2_cm_clksel2, OMAP3430_IVA2_MOD, CM_CLKSEL2); __raw_writel(cm_context.cm_sysconfig, OMAP3430_CM_SYSCONFIG); omap2_cm_write_mod_reg(cm_context.sgx_cm_clksel, OMAP3430ES2_SGX_MOD, CM_CLKSEL); omap2_cm_write_mod_reg(cm_context.dss_cm_clksel, OMAP3430_DSS_MOD, CM_CLKSEL); omap2_cm_write_mod_reg(cm_context.cam_cm_clksel, OMAP3430_CAM_MOD, CM_CLKSEL); omap2_cm_write_mod_reg(cm_context.per_cm_clksel, OMAP3430_PER_MOD, CM_CLKSEL); omap2_cm_write_mod_reg(cm_context.emu_cm_clksel, OMAP3430_EMU_MOD, CM_CLKSEL1); omap2_cm_write_mod_reg(cm_context.emu_cm_clkstctrl, OMAP3430_EMU_MOD, OMAP2_CM_CLKSTCTRL); /* * As per erratum i671, ROM code does not respect the PER DPLL * programming scheme if CM_AUTOIDLE_PLL.AUTO_PERIPH_DPLL == 1. * In this case, we need to restore AUTO_PERIPH_DPLL by ourselves. */ omap2_cm_write_mod_reg(cm_context.pll_cm_autoidle, PLL_MOD, CM_AUTOIDLE); omap2_cm_write_mod_reg(cm_context.pll_cm_autoidle2, PLL_MOD, CM_AUTOIDLE2); omap2_cm_write_mod_reg(cm_context.pll_cm_clksel4, PLL_MOD, OMAP3430ES2_CM_CLKSEL4); omap2_cm_write_mod_reg(cm_context.pll_cm_clksel5, PLL_MOD, OMAP3430ES2_CM_CLKSEL5); omap2_cm_write_mod_reg(cm_context.pll_cm_clken2, PLL_MOD, OMAP3430ES2_CM_CLKEN2); __raw_writel(cm_context.cm_polctrl, OMAP3430_CM_POLCTRL); omap2_cm_write_mod_reg(cm_context.iva2_cm_fclken, OMAP3430_IVA2_MOD, CM_FCLKEN); omap2_cm_write_mod_reg(cm_context.iva2_cm_clken_pll, OMAP3430_IVA2_MOD, OMAP3430_CM_CLKEN_PLL); omap2_cm_write_mod_reg(cm_context.core_cm_fclken1, CORE_MOD, CM_FCLKEN1); omap2_cm_write_mod_reg(cm_context.core_cm_fclken3, CORE_MOD, OMAP3430ES2_CM_FCLKEN3); omap2_cm_write_mod_reg(cm_context.sgx_cm_fclken, OMAP3430ES2_SGX_MOD, CM_FCLKEN); omap2_cm_write_mod_reg(cm_context.wkup_cm_fclken, WKUP_MOD, CM_FCLKEN); omap2_cm_write_mod_reg(cm_context.dss_cm_fclken, OMAP3430_DSS_MOD, CM_FCLKEN); omap2_cm_write_mod_reg(cm_context.cam_cm_fclken, OMAP3430_CAM_MOD, CM_FCLKEN); omap2_cm_write_mod_reg(cm_context.per_cm_fclken, OMAP3430_PER_MOD, CM_FCLKEN); omap2_cm_write_mod_reg(cm_context.usbhost_cm_fclken, OMAP3430ES2_USBHOST_MOD, CM_FCLKEN); omap2_cm_write_mod_reg(cm_context.core_cm_iclken1, CORE_MOD, CM_ICLKEN1); omap2_cm_write_mod_reg(cm_context.core_cm_iclken2, CORE_MOD, CM_ICLKEN2); omap2_cm_write_mod_reg(cm_context.core_cm_iclken3, CORE_MOD, CM_ICLKEN3); omap2_cm_write_mod_reg(cm_context.sgx_cm_iclken, OMAP3430ES2_SGX_MOD, CM_ICLKEN); omap2_cm_write_mod_reg(cm_context.wkup_cm_iclken, WKUP_MOD, CM_ICLKEN); omap2_cm_write_mod_reg(cm_context.dss_cm_iclken, OMAP3430_DSS_MOD, CM_ICLKEN); omap2_cm_write_mod_reg(cm_context.cam_cm_iclken, OMAP3430_CAM_MOD, CM_ICLKEN); omap2_cm_write_mod_reg(cm_context.per_cm_iclken, OMAP3430_PER_MOD, CM_ICLKEN); omap2_cm_write_mod_reg(cm_context.usbhost_cm_iclken, OMAP3430ES2_USBHOST_MOD, CM_ICLKEN); omap2_cm_write_mod_reg(cm_context.iva2_cm_autoidle2, OMAP3430_IVA2_MOD, CM_AUTOIDLE2); omap2_cm_write_mod_reg(cm_context.mpu_cm_autoidle2, MPU_MOD, CM_AUTOIDLE2); omap2_cm_write_mod_reg(cm_context.iva2_cm_clkstctrl, OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL); omap2_cm_write_mod_reg(cm_context.mpu_cm_clkstctrl, MPU_MOD, OMAP2_CM_CLKSTCTRL); omap2_cm_write_mod_reg(cm_context.core_cm_clkstctrl, CORE_MOD, OMAP2_CM_CLKSTCTRL); omap2_cm_write_mod_reg(cm_context.sgx_cm_clkstctrl, OMAP3430ES2_SGX_MOD, OMAP2_CM_CLKSTCTRL); omap2_cm_write_mod_reg(cm_context.dss_cm_clkstctrl, OMAP3430_DSS_MOD, OMAP2_CM_CLKSTCTRL); omap2_cm_write_mod_reg(cm_context.cam_cm_clkstctrl, OMAP3430_CAM_MOD, OMAP2_CM_CLKSTCTRL); omap2_cm_write_mod_reg(cm_context.per_cm_clkstctrl, OMAP3430_PER_MOD, OMAP2_CM_CLKSTCTRL); omap2_cm_write_mod_reg(cm_context.neon_cm_clkstctrl, OMAP3430_NEON_MOD, OMAP2_CM_CLKSTCTRL); omap2_cm_write_mod_reg(cm_context.usbhost_cm_clkstctrl, OMAP3430ES2_USBHOST_MOD, OMAP2_CM_CLKSTCTRL); omap2_cm_write_mod_reg(cm_context.core_cm_autoidle1, CORE_MOD, CM_AUTOIDLE1); omap2_cm_write_mod_reg(cm_context.core_cm_autoidle2, CORE_MOD, CM_AUTOIDLE2); omap2_cm_write_mod_reg(cm_context.core_cm_autoidle3, CORE_MOD, CM_AUTOIDLE3); omap2_cm_write_mod_reg(cm_context.wkup_cm_autoidle, WKUP_MOD, CM_AUTOIDLE); omap2_cm_write_mod_reg(cm_context.dss_cm_autoidle, OMAP3430_DSS_MOD, CM_AUTOIDLE); omap2_cm_write_mod_reg(cm_context.cam_cm_autoidle, OMAP3430_CAM_MOD, CM_AUTOIDLE); omap2_cm_write_mod_reg(cm_context.per_cm_autoidle, OMAP3430_PER_MOD, CM_AUTOIDLE); omap2_cm_write_mod_reg(cm_context.usbhost_cm_autoidle, OMAP3430ES2_USBHOST_MOD, CM_AUTOIDLE); omap2_cm_write_mod_reg(cm_context.sgx_cm_sleepdep, OMAP3430ES2_SGX_MOD, OMAP3430_CM_SLEEPDEP); omap2_cm_write_mod_reg(cm_context.dss_cm_sleepdep, OMAP3430_DSS_MOD, OMAP3430_CM_SLEEPDEP); omap2_cm_write_mod_reg(cm_context.cam_cm_sleepdep, OMAP3430_CAM_MOD, OMAP3430_CM_SLEEPDEP); omap2_cm_write_mod_reg(cm_context.per_cm_sleepdep, OMAP3430_PER_MOD, OMAP3430_CM_SLEEPDEP); omap2_cm_write_mod_reg(cm_context.usbhost_cm_sleepdep, OMAP3430ES2_USBHOST_MOD, OMAP3430_CM_SLEEPDEP); omap2_cm_write_mod_reg(cm_context.cm_clkout_ctrl, OMAP3430_CCR_MOD, OMAP3_CM_CLKOUT_CTRL_OFFSET); } #endif
gpl-2.0
mikronac/android_kernel_htc_msm8960
arch/arm/mach-lpc32xx/common.c
4732
8498
/* * arch/arm/mach-lpc32xx/common.c * * Author: Kevin Wells <kevin.wells@nxp.com> * * Copyright (C) 2010 NXP Semiconductors * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/init.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/err.h> #include <linux/i2c.h> #include <linux/i2c-pnx.h> #include <linux/io.h> #include <asm/mach/map.h> #include <mach/i2c.h> #include <mach/hardware.h> #include <mach/platform.h> #include "common.h" /* * Watchdog timer */ static struct resource watchdog_resources[] = { [0] = { .start = LPC32XX_WDTIM_BASE, .end = LPC32XX_WDTIM_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, }; struct platform_device lpc32xx_watchdog_device = { .name = "pnx4008-watchdog", .id = -1, .num_resources = ARRAY_SIZE(watchdog_resources), .resource = watchdog_resources, }; /* * I2C busses */ static struct i2c_pnx_data i2c0_data = { .name = I2C_CHIP_NAME "1", .base = LPC32XX_I2C1_BASE, .irq = IRQ_LPC32XX_I2C_1, }; static struct i2c_pnx_data i2c1_data = { .name = I2C_CHIP_NAME "2", .base = LPC32XX_I2C2_BASE, .irq = IRQ_LPC32XX_I2C_2, }; static struct i2c_pnx_data i2c2_data = { .name = "USB-I2C", .base = LPC32XX_OTG_I2C_BASE, .irq = IRQ_LPC32XX_USB_I2C, }; struct platform_device lpc32xx_i2c0_device = { .name = "pnx-i2c", .id = 0, .dev = { .platform_data = &i2c0_data, }, }; struct platform_device lpc32xx_i2c1_device = { .name = "pnx-i2c", .id = 1, .dev = { .platform_data = &i2c1_data, }, }; struct platform_device lpc32xx_i2c2_device = { .name = "pnx-i2c", .id = 2, .dev = { .platform_data = &i2c2_data, }, }; /* TSC (Touch Screen Controller) */ static struct resource lpc32xx_tsc_resources[] = { { .start = LPC32XX_ADC_BASE, .end = LPC32XX_ADC_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, { .start = IRQ_LPC32XX_TS_IRQ, .end = IRQ_LPC32XX_TS_IRQ, .flags = IORESOURCE_IRQ, }, }; struct platform_device lpc32xx_tsc_device = { .name = "ts-lpc32xx", .id = -1, .num_resources = ARRAY_SIZE(lpc32xx_tsc_resources), .resource = lpc32xx_tsc_resources, }; /* RTC */ static struct resource lpc32xx_rtc_resources[] = { { .start = LPC32XX_RTC_BASE, .end = LPC32XX_RTC_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, },{ .start = IRQ_LPC32XX_RTC, .end = IRQ_LPC32XX_RTC, .flags = IORESOURCE_IRQ, }, }; struct platform_device lpc32xx_rtc_device = { .name = "rtc-lpc32xx", .id = -1, .num_resources = ARRAY_SIZE(lpc32xx_rtc_resources), .resource = lpc32xx_rtc_resources, }; /* * ADC support */ static struct resource adc_resources[] = { { .start = LPC32XX_ADC_BASE, .end = LPC32XX_ADC_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, { .start = IRQ_LPC32XX_TS_IRQ, .end = IRQ_LPC32XX_TS_IRQ, .flags = IORESOURCE_IRQ, }, }; struct platform_device lpc32xx_adc_device = { .name = "lpc32xx-adc", .id = -1, .num_resources = ARRAY_SIZE(adc_resources), .resource = adc_resources, }; /* * USB support */ /* The dmamask must be set for OHCI to work */ static u64 ohci_dmamask = ~(u32) 0; static struct resource ohci_resources[] = { { .start = IO_ADDRESS(LPC32XX_USB_BASE), .end = IO_ADDRESS(LPC32XX_USB_BASE + 0x100 - 1), .flags = IORESOURCE_MEM, }, { .start = IRQ_LPC32XX_USB_HOST, .flags = IORESOURCE_IRQ, }, }; struct platform_device lpc32xx_ohci_device = { .name = "usb-ohci", .id = -1, .dev = { .dma_mask = &ohci_dmamask, .coherent_dma_mask = 0xFFFFFFFF, }, .num_resources = ARRAY_SIZE(ohci_resources), .resource = ohci_resources, }; /* * Network Support */ static struct resource net_resources[] = { [0] = DEFINE_RES_MEM(LPC32XX_ETHERNET_BASE, SZ_4K), [1] = DEFINE_RES_MEM(LPC32XX_IRAM_BASE, SZ_128K), [2] = DEFINE_RES_IRQ(IRQ_LPC32XX_ETHERNET), }; static u64 lpc32xx_mac_dma_mask = 0xffffffffUL; struct platform_device lpc32xx_net_device = { .name = "lpc-eth", .id = 0, .dev = { .dma_mask = &lpc32xx_mac_dma_mask, .coherent_dma_mask = 0xffffffffUL, }, .num_resources = ARRAY_SIZE(net_resources), .resource = net_resources, }; /* * Returns the unique ID for the device */ void lpc32xx_get_uid(u32 devid[4]) { int i; for (i = 0; i < 4; i++) devid[i] = __raw_readl(LPC32XX_CLKPWR_DEVID(i << 2)); } /* * Returns SYSCLK source * 0 = PLL397, 1 = main oscillator */ int clk_is_sysclk_mainosc(void) { if ((__raw_readl(LPC32XX_CLKPWR_SYSCLK_CTRL) & LPC32XX_CLKPWR_SYSCTRL_SYSCLKMUX) == 0) return 1; return 0; } /* * System reset via the watchdog timer */ static void lpc32xx_watchdog_reset(void) { /* Make sure WDT clocks are enabled */ __raw_writel(LPC32XX_CLKPWR_PWMCLK_WDOG_EN, LPC32XX_CLKPWR_TIMER_CLK_CTRL); /* Instant assert of RESETOUT_N with pulse length 1mS */ __raw_writel(13000, io_p2v(LPC32XX_WDTIM_BASE + 0x18)); __raw_writel(0x70, io_p2v(LPC32XX_WDTIM_BASE + 0xC)); } /* * Detects and returns IRAM size for the device variation */ #define LPC32XX_IRAM_BANK_SIZE SZ_128K static u32 iram_size; u32 lpc32xx_return_iram_size(void) { if (iram_size == 0) { u32 savedval1, savedval2; void __iomem *iramptr1, *iramptr2; iramptr1 = io_p2v(LPC32XX_IRAM_BASE); iramptr2 = io_p2v(LPC32XX_IRAM_BASE + LPC32XX_IRAM_BANK_SIZE); savedval1 = __raw_readl(iramptr1); savedval2 = __raw_readl(iramptr2); if (savedval1 == savedval2) { __raw_writel(savedval2 + 1, iramptr2); if (__raw_readl(iramptr1) == savedval2 + 1) iram_size = LPC32XX_IRAM_BANK_SIZE; else iram_size = LPC32XX_IRAM_BANK_SIZE * 2; __raw_writel(savedval2, iramptr2); } else iram_size = LPC32XX_IRAM_BANK_SIZE * 2; } return iram_size; } /* * Computes PLL rate from PLL register and input clock */ u32 clk_check_pll_setup(u32 ifreq, struct clk_pll_setup *pllsetup) { u32 ilfreq, p, m, n, fcco, fref, cfreq; int mode; /* * PLL requirements * ifreq must be >= 1MHz and <= 20MHz * FCCO must be >= 156MHz and <= 320MHz * FREF must be >= 1MHz and <= 27MHz * Assume the passed input data is not valid */ ilfreq = ifreq; m = pllsetup->pll_m; n = pllsetup->pll_n; p = pllsetup->pll_p; mode = (pllsetup->cco_bypass_b15 << 2) | (pllsetup->direct_output_b14 << 1) | pllsetup->fdbk_div_ctrl_b13; switch (mode) { case 0x0: /* Non-integer mode */ cfreq = (m * ilfreq) / (2 * p * n); fcco = (m * ilfreq) / n; fref = ilfreq / n; break; case 0x1: /* integer mode */ cfreq = (m * ilfreq) / n; fcco = (m * ilfreq) / (n * 2 * p); fref = ilfreq / n; break; case 0x2: case 0x3: /* Direct mode */ cfreq = (m * ilfreq) / n; fcco = cfreq; fref = ilfreq / n; break; case 0x4: case 0x5: /* Bypass mode */ cfreq = ilfreq / (2 * p); fcco = 156000000; fref = 1000000; break; case 0x6: case 0x7: /* Direct bypass mode */ default: cfreq = ilfreq; fcco = 156000000; fref = 1000000; break; } if (fcco < 156000000 || fcco > 320000000) cfreq = 0; if (fref < 1000000 || fref > 27000000) cfreq = 0; return (u32) cfreq; } u32 clk_get_pclk_div(void) { return 1 + ((__raw_readl(LPC32XX_CLKPWR_HCLK_DIV) >> 2) & 0x1F); } static struct map_desc lpc32xx_io_desc[] __initdata = { { .virtual = IO_ADDRESS(LPC32XX_AHB0_START), .pfn = __phys_to_pfn(LPC32XX_AHB0_START), .length = LPC32XX_AHB0_SIZE, .type = MT_DEVICE }, { .virtual = IO_ADDRESS(LPC32XX_AHB1_START), .pfn = __phys_to_pfn(LPC32XX_AHB1_START), .length = LPC32XX_AHB1_SIZE, .type = MT_DEVICE }, { .virtual = IO_ADDRESS(LPC32XX_FABAPB_START), .pfn = __phys_to_pfn(LPC32XX_FABAPB_START), .length = LPC32XX_FABAPB_SIZE, .type = MT_DEVICE }, { .virtual = IO_ADDRESS(LPC32XX_IRAM_BASE), .pfn = __phys_to_pfn(LPC32XX_IRAM_BASE), .length = (LPC32XX_IRAM_BANK_SIZE * 2), .type = MT_DEVICE }, }; void __init lpc32xx_map_io(void) { iotable_init(lpc32xx_io_desc, ARRAY_SIZE(lpc32xx_io_desc)); } void lpc23xx_restart(char mode, const char *cmd) { switch (mode) { case 's': case 'h': lpc32xx_watchdog_reset(); break; default: /* Do nothing */ break; } /* Wait for watchdog to reset system */ while (1) ; }
gpl-2.0
neobuddy89/hammerhead
sound/soc/codecs/wm8995.c
4988
63858
/* * wm8995.c -- WM8995 ALSA SoC Audio driver * * Copyright 2010 Wolfson Microelectronics plc * * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com> * * Based on wm8994.c and wm_hubs.c by Mark Brown * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/pm.h> #include <linux/i2c.h> #include <linux/regmap.h> #include <linux/spi/spi.h> #include <linux/regulator/consumer.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/soc-dapm.h> #include <sound/initval.h> #include <sound/tlv.h> #include "wm8995.h" #define WM8995_NUM_SUPPLIES 8 static const char *wm8995_supply_names[WM8995_NUM_SUPPLIES] = { "DCVDD", "DBVDD1", "DBVDD2", "DBVDD3", "AVDD1", "AVDD2", "CPVDD", "MICVDD" }; static struct reg_default wm8995_reg_defaults[] = { { 0, 0x8995 }, { 5, 0x0100 }, { 16, 0x000b }, { 17, 0x000b }, { 24, 0x02c0 }, { 25, 0x02c0 }, { 26, 0x02c0 }, { 27, 0x02c0 }, { 28, 0x000f }, { 32, 0x0005 }, { 33, 0x0005 }, { 40, 0x0003 }, { 41, 0x0013 }, { 48, 0x0004 }, { 56, 0x09f8 }, { 64, 0x1f25 }, { 69, 0x0004 }, { 82, 0xaaaa }, { 84, 0x2a2a }, { 146, 0x0060 }, { 256, 0x0002 }, { 257, 0x8004 }, { 520, 0x0010 }, { 528, 0x0083 }, { 529, 0x0083 }, { 548, 0x0c80 }, { 580, 0x0c80 }, { 768, 0x4050 }, { 769, 0x4000 }, { 771, 0x0040 }, { 772, 0x0040 }, { 773, 0x0040 }, { 774, 0x0004 }, { 775, 0x0100 }, { 784, 0x4050 }, { 785, 0x4000 }, { 787, 0x0040 }, { 788, 0x0040 }, { 789, 0x0040 }, { 1024, 0x00c0 }, { 1025, 0x00c0 }, { 1026, 0x00c0 }, { 1027, 0x00c0 }, { 1028, 0x00c0 }, { 1029, 0x00c0 }, { 1030, 0x00c0 }, { 1031, 0x00c0 }, { 1056, 0x0200 }, { 1057, 0x0010 }, { 1058, 0x0200 }, { 1059, 0x0010 }, { 1088, 0x0098 }, { 1089, 0x0845 }, { 1104, 0x0098 }, { 1105, 0x0845 }, { 1152, 0x6318 }, { 1153, 0x6300 }, { 1154, 0x0fca }, { 1155, 0x0400 }, { 1156, 0x00d8 }, { 1157, 0x1eb5 }, { 1158, 0xf145 }, { 1159, 0x0b75 }, { 1160, 0x01c5 }, { 1161, 0x1c58 }, { 1162, 0xf373 }, { 1163, 0x0a54 }, { 1164, 0x0558 }, { 1165, 0x168e }, { 1166, 0xf829 }, { 1167, 0x07ad }, { 1168, 0x1103 }, { 1169, 0x0564 }, { 1170, 0x0559 }, { 1171, 0x4000 }, { 1184, 0x6318 }, { 1185, 0x6300 }, { 1186, 0x0fca }, { 1187, 0x0400 }, { 1188, 0x00d8 }, { 1189, 0x1eb5 }, { 1190, 0xf145 }, { 1191, 0x0b75 }, { 1192, 0x01c5 }, { 1193, 0x1c58 }, { 1194, 0xf373 }, { 1195, 0x0a54 }, { 1196, 0x0558 }, { 1197, 0x168e }, { 1198, 0xf829 }, { 1199, 0x07ad }, { 1200, 0x1103 }, { 1201, 0x0564 }, { 1202, 0x0559 }, { 1203, 0x4000 }, { 1280, 0x00c0 }, { 1281, 0x00c0 }, { 1282, 0x00c0 }, { 1283, 0x00c0 }, { 1312, 0x0200 }, { 1313, 0x0010 }, { 1344, 0x0098 }, { 1345, 0x0845 }, { 1408, 0x6318 }, { 1409, 0x6300 }, { 1410, 0x0fca }, { 1411, 0x0400 }, { 1412, 0x00d8 }, { 1413, 0x1eb5 }, { 1414, 0xf145 }, { 1415, 0x0b75 }, { 1416, 0x01c5 }, { 1417, 0x1c58 }, { 1418, 0xf373 }, { 1419, 0x0a54 }, { 1420, 0x0558 }, { 1421, 0x168e }, { 1422, 0xf829 }, { 1423, 0x07ad }, { 1424, 0x1103 }, { 1425, 0x0564 }, { 1426, 0x0559 }, { 1427, 0x4000 }, { 1568, 0x0002 }, { 1792, 0xa100 }, { 1793, 0xa101 }, { 1794, 0xa101 }, { 1795, 0xa101 }, { 1796, 0xa101 }, { 1797, 0xa101 }, { 1798, 0xa101 }, { 1799, 0xa101 }, { 1800, 0xa101 }, { 1801, 0xa101 }, { 1802, 0xa101 }, { 1803, 0xa101 }, { 1804, 0xa101 }, { 1805, 0xa101 }, { 1825, 0x0055 }, { 1848, 0x3fff }, { 1849, 0x1fff }, { 2049, 0x0001 }, { 2050, 0x0069 }, { 2056, 0x0002 }, { 2057, 0x0003 }, { 2058, 0x0069 }, { 12288, 0x0001 }, { 12289, 0x0001 }, { 12291, 0x0006 }, { 12292, 0x0040 }, { 12293, 0x0001 }, { 12294, 0x000f }, { 12295, 0x0006 }, { 12296, 0x0001 }, { 12297, 0x0003 }, { 12298, 0x0104 }, { 12300, 0x0060 }, { 12301, 0x0011 }, { 12302, 0x0401 }, { 12304, 0x0050 }, { 12305, 0x0003 }, { 12306, 0x0100 }, { 12308, 0x0051 }, { 12309, 0x0003 }, { 12310, 0x0104 }, { 12311, 0x000a }, { 12312, 0x0060 }, { 12313, 0x003b }, { 12314, 0x0502 }, { 12315, 0x0100 }, { 12316, 0x2fff }, { 12320, 0x2fff }, { 12324, 0x2fff }, { 12328, 0x2fff }, { 12332, 0x2fff }, { 12336, 0x2fff }, { 12340, 0x2fff }, { 12344, 0x2fff }, { 12348, 0x2fff }, { 12352, 0x0001 }, { 12353, 0x0001 }, { 12355, 0x0006 }, { 12356, 0x0040 }, { 12357, 0x0001 }, { 12358, 0x000f }, { 12359, 0x0006 }, { 12360, 0x0001 }, { 12361, 0x0003 }, { 12362, 0x0104 }, { 12364, 0x0060 }, { 12365, 0x0011 }, { 12366, 0x0401 }, { 12368, 0x0050 }, { 12369, 0x0003 }, { 12370, 0x0100 }, { 12372, 0x0060 }, { 12373, 0x003b }, { 12374, 0x0502 }, { 12375, 0x0100 }, { 12376, 0x2fff }, { 12380, 0x2fff }, { 12384, 0x2fff }, { 12388, 0x2fff }, { 12392, 0x2fff }, { 12396, 0x2fff }, { 12400, 0x2fff }, { 12404, 0x2fff }, { 12408, 0x2fff }, { 12412, 0x2fff }, { 12416, 0x0001 }, { 12417, 0x0001 }, { 12419, 0x0006 }, { 12420, 0x0040 }, { 12421, 0x0001 }, { 12422, 0x000f }, { 12423, 0x0006 }, { 12424, 0x0001 }, { 12425, 0x0003 }, { 12426, 0x0106 }, { 12428, 0x0061 }, { 12429, 0x0011 }, { 12430, 0x0401 }, { 12432, 0x0050 }, { 12433, 0x0003 }, { 12434, 0x0102 }, { 12436, 0x0051 }, { 12437, 0x0003 }, { 12438, 0x0106 }, { 12439, 0x000a }, { 12440, 0x0061 }, { 12441, 0x003b }, { 12442, 0x0502 }, { 12443, 0x0100 }, { 12444, 0x2fff }, { 12448, 0x2fff }, { 12452, 0x2fff }, { 12456, 0x2fff }, { 12460, 0x2fff }, { 12464, 0x2fff }, { 12468, 0x2fff }, { 12472, 0x2fff }, { 12476, 0x2fff }, { 12480, 0x0001 }, { 12481, 0x0001 }, { 12483, 0x0006 }, { 12484, 0x0040 }, { 12485, 0x0001 }, { 12486, 0x000f }, { 12487, 0x0006 }, { 12488, 0x0001 }, { 12489, 0x0003 }, { 12490, 0x0106 }, { 12492, 0x0061 }, { 12493, 0x0011 }, { 12494, 0x0401 }, { 12496, 0x0050 }, { 12497, 0x0003 }, { 12498, 0x0102 }, { 12500, 0x0061 }, { 12501, 0x003b }, { 12502, 0x0502 }, { 12503, 0x0100 }, { 12504, 0x2fff }, { 12508, 0x2fff }, { 12512, 0x2fff }, { 12516, 0x2fff }, { 12520, 0x2fff }, { 12524, 0x2fff }, { 12528, 0x2fff }, { 12532, 0x2fff }, { 12536, 0x2fff }, { 12540, 0x2fff }, { 12544, 0x0060 }, { 12546, 0x0601 }, { 12548, 0x0050 }, { 12550, 0x0100 }, { 12552, 0x0001 }, { 12554, 0x0104 }, { 12555, 0x0100 }, { 12556, 0x2fff }, { 12560, 0x2fff }, { 12564, 0x2fff }, { 12568, 0x2fff }, { 12572, 0x2fff }, { 12576, 0x2fff }, { 12580, 0x2fff }, { 12584, 0x2fff }, { 12588, 0x2fff }, { 12592, 0x2fff }, { 12596, 0x2fff }, { 12600, 0x2fff }, { 12604, 0x2fff }, { 12608, 0x0061 }, { 12610, 0x0601 }, { 12612, 0x0050 }, { 12614, 0x0102 }, { 12616, 0x0001 }, { 12618, 0x0106 }, { 12619, 0x0100 }, { 12620, 0x2fff }, { 12624, 0x2fff }, { 12628, 0x2fff }, { 12632, 0x2fff }, { 12636, 0x2fff }, { 12640, 0x2fff }, { 12644, 0x2fff }, { 12648, 0x2fff }, { 12652, 0x2fff }, { 12656, 0x2fff }, { 12660, 0x2fff }, { 12664, 0x2fff }, { 12668, 0x2fff }, { 12672, 0x0060 }, { 12674, 0x0601 }, { 12676, 0x0061 }, { 12678, 0x0601 }, { 12680, 0x0050 }, { 12682, 0x0300 }, { 12684, 0x0001 }, { 12686, 0x0304 }, { 12688, 0x0040 }, { 12690, 0x000f }, { 12692, 0x0001 }, { 12695, 0x0100 }, }; struct fll_config { int src; int in; int out; }; struct wm8995_priv { struct regmap *regmap; int sysclk[2]; int mclk[2]; int aifclk[2]; struct fll_config fll[2], fll_suspend[2]; struct regulator_bulk_data supplies[WM8995_NUM_SUPPLIES]; struct notifier_block disable_nb[WM8995_NUM_SUPPLIES]; struct snd_soc_codec *codec; }; /* * We can't use the same notifier block for more than one supply and * there's no way I can see to get from a callback to the caller * except container_of(). */ #define WM8995_REGULATOR_EVENT(n) \ static int wm8995_regulator_event_##n(struct notifier_block *nb, \ unsigned long event, void *data) \ { \ struct wm8995_priv *wm8995 = container_of(nb, struct wm8995_priv, \ disable_nb[n]); \ if (event & REGULATOR_EVENT_DISABLE) { \ regcache_mark_dirty(wm8995->regmap); \ } \ return 0; \ } WM8995_REGULATOR_EVENT(0) WM8995_REGULATOR_EVENT(1) WM8995_REGULATOR_EVENT(2) WM8995_REGULATOR_EVENT(3) WM8995_REGULATOR_EVENT(4) WM8995_REGULATOR_EVENT(5) WM8995_REGULATOR_EVENT(6) WM8995_REGULATOR_EVENT(7) static const DECLARE_TLV_DB_SCALE(digital_tlv, -7200, 75, 1); static const DECLARE_TLV_DB_SCALE(in1lr_pga_tlv, -1650, 150, 0); static const DECLARE_TLV_DB_SCALE(in1l_boost_tlv, 0, 600, 0); static const DECLARE_TLV_DB_SCALE(sidetone_tlv, -3600, 150, 0); static const char *in1l_text[] = { "Differential", "Single-ended IN1LN", "Single-ended IN1LP" }; static const SOC_ENUM_SINGLE_DECL(in1l_enum, WM8995_LEFT_LINE_INPUT_CONTROL, 2, in1l_text); static const char *in1r_text[] = { "Differential", "Single-ended IN1RN", "Single-ended IN1RP" }; static const SOC_ENUM_SINGLE_DECL(in1r_enum, WM8995_LEFT_LINE_INPUT_CONTROL, 0, in1r_text); static const char *dmic_src_text[] = { "DMICDAT1", "DMICDAT2", "DMICDAT3" }; static const SOC_ENUM_SINGLE_DECL(dmic_src1_enum, WM8995_POWER_MANAGEMENT_5, 8, dmic_src_text); static const SOC_ENUM_SINGLE_DECL(dmic_src2_enum, WM8995_POWER_MANAGEMENT_5, 6, dmic_src_text); static const struct snd_kcontrol_new wm8995_snd_controls[] = { SOC_DOUBLE_R_TLV("DAC1 Volume", WM8995_DAC1_LEFT_VOLUME, WM8995_DAC1_RIGHT_VOLUME, 0, 96, 0, digital_tlv), SOC_DOUBLE_R("DAC1 Switch", WM8995_DAC1_LEFT_VOLUME, WM8995_DAC1_RIGHT_VOLUME, 9, 1, 1), SOC_DOUBLE_R_TLV("DAC2 Volume", WM8995_DAC2_LEFT_VOLUME, WM8995_DAC2_RIGHT_VOLUME, 0, 96, 0, digital_tlv), SOC_DOUBLE_R("DAC2 Switch", WM8995_DAC2_LEFT_VOLUME, WM8995_DAC2_RIGHT_VOLUME, 9, 1, 1), SOC_DOUBLE_R_TLV("AIF1DAC1 Volume", WM8995_AIF1_DAC1_LEFT_VOLUME, WM8995_AIF1_DAC1_RIGHT_VOLUME, 0, 96, 0, digital_tlv), SOC_DOUBLE_R_TLV("AIF1DAC2 Volume", WM8995_AIF1_DAC2_LEFT_VOLUME, WM8995_AIF1_DAC2_RIGHT_VOLUME, 0, 96, 0, digital_tlv), SOC_DOUBLE_R_TLV("AIF2DAC Volume", WM8995_AIF2_DAC_LEFT_VOLUME, WM8995_AIF2_DAC_RIGHT_VOLUME, 0, 96, 0, digital_tlv), SOC_DOUBLE_R_TLV("IN1LR Volume", WM8995_LEFT_LINE_INPUT_1_VOLUME, WM8995_RIGHT_LINE_INPUT_1_VOLUME, 0, 31, 0, in1lr_pga_tlv), SOC_SINGLE_TLV("IN1L Boost", WM8995_LEFT_LINE_INPUT_CONTROL, 4, 3, 0, in1l_boost_tlv), SOC_ENUM("IN1L Mode", in1l_enum), SOC_ENUM("IN1R Mode", in1r_enum), SOC_ENUM("DMIC1 SRC", dmic_src1_enum), SOC_ENUM("DMIC2 SRC", dmic_src2_enum), SOC_DOUBLE_TLV("DAC1 Sidetone Volume", WM8995_DAC1_MIXER_VOLUMES, 0, 5, 24, 0, sidetone_tlv), SOC_DOUBLE_TLV("DAC2 Sidetone Volume", WM8995_DAC2_MIXER_VOLUMES, 0, 5, 24, 0, sidetone_tlv), SOC_DOUBLE_R_TLV("AIF1ADC1 Volume", WM8995_AIF1_ADC1_LEFT_VOLUME, WM8995_AIF1_ADC1_RIGHT_VOLUME, 0, 96, 0, digital_tlv), SOC_DOUBLE_R_TLV("AIF1ADC2 Volume", WM8995_AIF1_ADC2_LEFT_VOLUME, WM8995_AIF1_ADC2_RIGHT_VOLUME, 0, 96, 0, digital_tlv), SOC_DOUBLE_R_TLV("AIF2ADC Volume", WM8995_AIF2_ADC_LEFT_VOLUME, WM8995_AIF2_ADC_RIGHT_VOLUME, 0, 96, 0, digital_tlv) }; static void wm8995_update_class_w(struct snd_soc_codec *codec) { int enable = 1; int source = 0; /* GCC flow analysis can't track enable */ int reg, reg_r; /* We also need the same setting for L/R and only one path */ reg = snd_soc_read(codec, WM8995_DAC1_LEFT_MIXER_ROUTING); switch (reg) { case WM8995_AIF2DACL_TO_DAC1L: dev_dbg(codec->dev, "Class W source AIF2DAC\n"); source = 2 << WM8995_CP_DYN_SRC_SEL_SHIFT; break; case WM8995_AIF1DAC2L_TO_DAC1L: dev_dbg(codec->dev, "Class W source AIF1DAC2\n"); source = 1 << WM8995_CP_DYN_SRC_SEL_SHIFT; break; case WM8995_AIF1DAC1L_TO_DAC1L: dev_dbg(codec->dev, "Class W source AIF1DAC1\n"); source = 0 << WM8995_CP_DYN_SRC_SEL_SHIFT; break; default: dev_dbg(codec->dev, "DAC mixer setting: %x\n", reg); enable = 0; break; } reg_r = snd_soc_read(codec, WM8995_DAC1_RIGHT_MIXER_ROUTING); if (reg_r != reg) { dev_dbg(codec->dev, "Left and right DAC mixers different\n"); enable = 0; } if (enable) { dev_dbg(codec->dev, "Class W enabled\n"); snd_soc_update_bits(codec, WM8995_CLASS_W_1, WM8995_CP_DYN_PWR_MASK | WM8995_CP_DYN_SRC_SEL_MASK, source | WM8995_CP_DYN_PWR); } else { dev_dbg(codec->dev, "Class W disabled\n"); snd_soc_update_bits(codec, WM8995_CLASS_W_1, WM8995_CP_DYN_PWR_MASK, 0); } } static int check_clk_sys(struct snd_soc_dapm_widget *source, struct snd_soc_dapm_widget *sink) { unsigned int reg; const char *clk; reg = snd_soc_read(source->codec, WM8995_CLOCKING_1); /* Check what we're currently using for CLK_SYS */ if (reg & WM8995_SYSCLK_SRC) clk = "AIF2CLK"; else clk = "AIF1CLK"; return !strcmp(source->name, clk); } static int wm8995_put_class_w(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol); struct snd_soc_dapm_widget *w = wlist->widgets[0]; struct snd_soc_codec *codec; int ret; codec = w->codec; ret = snd_soc_dapm_put_volsw(kcontrol, ucontrol); wm8995_update_class_w(codec); return ret; } static int hp_supply_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_codec *codec; struct wm8995_priv *wm8995; codec = w->codec; wm8995 = snd_soc_codec_get_drvdata(codec); switch (event) { case SND_SOC_DAPM_PRE_PMU: /* Enable the headphone amp */ snd_soc_update_bits(codec, WM8995_POWER_MANAGEMENT_1, WM8995_HPOUT1L_ENA_MASK | WM8995_HPOUT1R_ENA_MASK, WM8995_HPOUT1L_ENA | WM8995_HPOUT1R_ENA); /* Enable the second stage */ snd_soc_update_bits(codec, WM8995_ANALOGUE_HP_1, WM8995_HPOUT1L_DLY_MASK | WM8995_HPOUT1R_DLY_MASK, WM8995_HPOUT1L_DLY | WM8995_HPOUT1R_DLY); break; case SND_SOC_DAPM_PRE_PMD: snd_soc_update_bits(codec, WM8995_CHARGE_PUMP_1, WM8995_CP_ENA_MASK, 0); break; } return 0; } static void dc_servo_cmd(struct snd_soc_codec *codec, unsigned int reg, unsigned int val, unsigned int mask) { int timeout = 10; dev_dbg(codec->dev, "%s: reg = %#x, val = %#x, mask = %#x\n", __func__, reg, val, mask); snd_soc_write(codec, reg, val); while (timeout--) { msleep(10); val = snd_soc_read(codec, WM8995_DC_SERVO_READBACK_0); if ((val & mask) == mask) return; } dev_err(codec->dev, "Timed out waiting for DC Servo\n"); } static int hp_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_codec *codec; unsigned int reg; codec = w->codec; reg = snd_soc_read(codec, WM8995_ANALOGUE_HP_1); switch (event) { case SND_SOC_DAPM_POST_PMU: snd_soc_update_bits(codec, WM8995_CHARGE_PUMP_1, WM8995_CP_ENA_MASK, WM8995_CP_ENA); msleep(5); snd_soc_update_bits(codec, WM8995_POWER_MANAGEMENT_1, WM8995_HPOUT1L_ENA_MASK | WM8995_HPOUT1R_ENA_MASK, WM8995_HPOUT1L_ENA | WM8995_HPOUT1R_ENA); udelay(20); reg |= WM8995_HPOUT1L_DLY | WM8995_HPOUT1R_DLY; snd_soc_write(codec, WM8995_ANALOGUE_HP_1, reg); snd_soc_write(codec, WM8995_DC_SERVO_1, WM8995_DCS_ENA_CHAN_0 | WM8995_DCS_ENA_CHAN_1); dc_servo_cmd(codec, WM8995_DC_SERVO_2, WM8995_DCS_TRIG_STARTUP_0 | WM8995_DCS_TRIG_STARTUP_1, WM8995_DCS_TRIG_DAC_WR_0 | WM8995_DCS_TRIG_DAC_WR_1); reg |= WM8995_HPOUT1R_OUTP | WM8995_HPOUT1R_RMV_SHORT | WM8995_HPOUT1L_OUTP | WM8995_HPOUT1L_RMV_SHORT; snd_soc_write(codec, WM8995_ANALOGUE_HP_1, reg); break; case SND_SOC_DAPM_PRE_PMD: snd_soc_update_bits(codec, WM8995_ANALOGUE_HP_1, WM8995_HPOUT1L_OUTP_MASK | WM8995_HPOUT1R_OUTP_MASK | WM8995_HPOUT1L_RMV_SHORT_MASK | WM8995_HPOUT1R_RMV_SHORT_MASK, 0); snd_soc_update_bits(codec, WM8995_ANALOGUE_HP_1, WM8995_HPOUT1L_DLY_MASK | WM8995_HPOUT1R_DLY_MASK, 0); snd_soc_write(codec, WM8995_DC_SERVO_1, 0); snd_soc_update_bits(codec, WM8995_POWER_MANAGEMENT_1, WM8995_HPOUT1L_ENA_MASK | WM8995_HPOUT1R_ENA_MASK, 0); break; } return 0; } static int configure_aif_clock(struct snd_soc_codec *codec, int aif) { struct wm8995_priv *wm8995; int rate; int reg1 = 0; int offset; wm8995 = snd_soc_codec_get_drvdata(codec); if (aif) offset = 4; else offset = 0; switch (wm8995->sysclk[aif]) { case WM8995_SYSCLK_MCLK1: rate = wm8995->mclk[0]; break; case WM8995_SYSCLK_MCLK2: reg1 |= 0x8; rate = wm8995->mclk[1]; break; case WM8995_SYSCLK_FLL1: reg1 |= 0x10; rate = wm8995->fll[0].out; break; case WM8995_SYSCLK_FLL2: reg1 |= 0x18; rate = wm8995->fll[1].out; break; default: return -EINVAL; } if (rate >= 13500000) { rate /= 2; reg1 |= WM8995_AIF1CLK_DIV; dev_dbg(codec->dev, "Dividing AIF%d clock to %dHz\n", aif + 1, rate); } wm8995->aifclk[aif] = rate; snd_soc_update_bits(codec, WM8995_AIF1_CLOCKING_1 + offset, WM8995_AIF1CLK_SRC_MASK | WM8995_AIF1CLK_DIV_MASK, reg1); return 0; } static int configure_clock(struct snd_soc_codec *codec) { struct wm8995_priv *wm8995; int change, new; wm8995 = snd_soc_codec_get_drvdata(codec); /* Bring up the AIF clocks first */ configure_aif_clock(codec, 0); configure_aif_clock(codec, 1); /* * Then switch CLK_SYS over to the higher of them; a change * can only happen as a result of a clocking change which can * only be made outside of DAPM so we can safely redo the * clocking. */ /* If they're equal it doesn't matter which is used */ if (wm8995->aifclk[0] == wm8995->aifclk[1]) return 0; if (wm8995->aifclk[0] < wm8995->aifclk[1]) new = WM8995_SYSCLK_SRC; else new = 0; change = snd_soc_update_bits(codec, WM8995_CLOCKING_1, WM8995_SYSCLK_SRC_MASK, new); if (!change) return 0; snd_soc_dapm_sync(&codec->dapm); return 0; } static int clk_sys_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_codec *codec; codec = w->codec; switch (event) { case SND_SOC_DAPM_PRE_PMU: return configure_clock(codec); case SND_SOC_DAPM_POST_PMD: configure_clock(codec); break; } return 0; } static const char *sidetone_text[] = { "ADC/DMIC1", "DMIC2", }; static const struct soc_enum sidetone1_enum = SOC_ENUM_SINGLE(WM8995_SIDETONE, 0, 2, sidetone_text); static const struct snd_kcontrol_new sidetone1_mux = SOC_DAPM_ENUM("Left Sidetone Mux", sidetone1_enum); static const struct soc_enum sidetone2_enum = SOC_ENUM_SINGLE(WM8995_SIDETONE, 1, 2, sidetone_text); static const struct snd_kcontrol_new sidetone2_mux = SOC_DAPM_ENUM("Right Sidetone Mux", sidetone2_enum); static const struct snd_kcontrol_new aif1adc1l_mix[] = { SOC_DAPM_SINGLE("ADC/DMIC Switch", WM8995_AIF1_ADC1_LEFT_MIXER_ROUTING, 1, 1, 0), SOC_DAPM_SINGLE("AIF2 Switch", WM8995_AIF1_ADC1_LEFT_MIXER_ROUTING, 0, 1, 0), }; static const struct snd_kcontrol_new aif1adc1r_mix[] = { SOC_DAPM_SINGLE("ADC/DMIC Switch", WM8995_AIF1_ADC1_RIGHT_MIXER_ROUTING, 1, 1, 0), SOC_DAPM_SINGLE("AIF2 Switch", WM8995_AIF1_ADC1_RIGHT_MIXER_ROUTING, 0, 1, 0), }; static const struct snd_kcontrol_new aif1adc2l_mix[] = { SOC_DAPM_SINGLE("DMIC Switch", WM8995_AIF1_ADC2_LEFT_MIXER_ROUTING, 1, 1, 0), SOC_DAPM_SINGLE("AIF2 Switch", WM8995_AIF1_ADC2_LEFT_MIXER_ROUTING, 0, 1, 0), }; static const struct snd_kcontrol_new aif1adc2r_mix[] = { SOC_DAPM_SINGLE("DMIC Switch", WM8995_AIF1_ADC2_RIGHT_MIXER_ROUTING, 1, 1, 0), SOC_DAPM_SINGLE("AIF2 Switch", WM8995_AIF1_ADC2_RIGHT_MIXER_ROUTING, 0, 1, 0), }; static const struct snd_kcontrol_new dac1l_mix[] = { WM8995_CLASS_W_SWITCH("Right Sidetone Switch", WM8995_DAC1_LEFT_MIXER_ROUTING, 5, 1, 0), WM8995_CLASS_W_SWITCH("Left Sidetone Switch", WM8995_DAC1_LEFT_MIXER_ROUTING, 4, 1, 0), WM8995_CLASS_W_SWITCH("AIF2 Switch", WM8995_DAC1_LEFT_MIXER_ROUTING, 2, 1, 0), WM8995_CLASS_W_SWITCH("AIF1.2 Switch", WM8995_DAC1_LEFT_MIXER_ROUTING, 1, 1, 0), WM8995_CLASS_W_SWITCH("AIF1.1 Switch", WM8995_DAC1_LEFT_MIXER_ROUTING, 0, 1, 0), }; static const struct snd_kcontrol_new dac1r_mix[] = { WM8995_CLASS_W_SWITCH("Right Sidetone Switch", WM8995_DAC1_RIGHT_MIXER_ROUTING, 5, 1, 0), WM8995_CLASS_W_SWITCH("Left Sidetone Switch", WM8995_DAC1_RIGHT_MIXER_ROUTING, 4, 1, 0), WM8995_CLASS_W_SWITCH("AIF2 Switch", WM8995_DAC1_RIGHT_MIXER_ROUTING, 2, 1, 0), WM8995_CLASS_W_SWITCH("AIF1.2 Switch", WM8995_DAC1_RIGHT_MIXER_ROUTING, 1, 1, 0), WM8995_CLASS_W_SWITCH("AIF1.1 Switch", WM8995_DAC1_RIGHT_MIXER_ROUTING, 0, 1, 0), }; static const struct snd_kcontrol_new aif2dac2l_mix[] = { SOC_DAPM_SINGLE("Right Sidetone Switch", WM8995_DAC2_LEFT_MIXER_ROUTING, 5, 1, 0), SOC_DAPM_SINGLE("Left Sidetone Switch", WM8995_DAC2_LEFT_MIXER_ROUTING, 4, 1, 0), SOC_DAPM_SINGLE("AIF2 Switch", WM8995_DAC2_LEFT_MIXER_ROUTING, 2, 1, 0), SOC_DAPM_SINGLE("AIF1.2 Switch", WM8995_DAC2_LEFT_MIXER_ROUTING, 1, 1, 0), SOC_DAPM_SINGLE("AIF1.1 Switch", WM8995_DAC2_LEFT_MIXER_ROUTING, 0, 1, 0), }; static const struct snd_kcontrol_new aif2dac2r_mix[] = { SOC_DAPM_SINGLE("Right Sidetone Switch", WM8995_DAC2_RIGHT_MIXER_ROUTING, 5, 1, 0), SOC_DAPM_SINGLE("Left Sidetone Switch", WM8995_DAC2_RIGHT_MIXER_ROUTING, 4, 1, 0), SOC_DAPM_SINGLE("AIF2 Switch", WM8995_DAC2_RIGHT_MIXER_ROUTING, 2, 1, 0), SOC_DAPM_SINGLE("AIF1.2 Switch", WM8995_DAC2_RIGHT_MIXER_ROUTING, 1, 1, 0), SOC_DAPM_SINGLE("AIF1.1 Switch", WM8995_DAC2_RIGHT_MIXER_ROUTING, 0, 1, 0), }; static const struct snd_kcontrol_new in1l_pga = SOC_DAPM_SINGLE("IN1L Switch", WM8995_POWER_MANAGEMENT_2, 5, 1, 0); static const struct snd_kcontrol_new in1r_pga = SOC_DAPM_SINGLE("IN1R Switch", WM8995_POWER_MANAGEMENT_2, 4, 1, 0); static const char *adc_mux_text[] = { "ADC", "DMIC", }; static const struct soc_enum adc_enum = SOC_ENUM_SINGLE(0, 0, 2, adc_mux_text); static const struct snd_kcontrol_new adcl_mux = SOC_DAPM_ENUM_VIRT("ADCL Mux", adc_enum); static const struct snd_kcontrol_new adcr_mux = SOC_DAPM_ENUM_VIRT("ADCR Mux", adc_enum); static const char *spk_src_text[] = { "DAC1L", "DAC1R", "DAC2L", "DAC2R" }; static const SOC_ENUM_SINGLE_DECL(spk1l_src_enum, WM8995_LEFT_PDM_SPEAKER_1, 0, spk_src_text); static const SOC_ENUM_SINGLE_DECL(spk1r_src_enum, WM8995_RIGHT_PDM_SPEAKER_1, 0, spk_src_text); static const SOC_ENUM_SINGLE_DECL(spk2l_src_enum, WM8995_LEFT_PDM_SPEAKER_2, 0, spk_src_text); static const SOC_ENUM_SINGLE_DECL(spk2r_src_enum, WM8995_RIGHT_PDM_SPEAKER_2, 0, spk_src_text); static const struct snd_kcontrol_new spk1l_mux = SOC_DAPM_ENUM("SPK1L SRC", spk1l_src_enum); static const struct snd_kcontrol_new spk1r_mux = SOC_DAPM_ENUM("SPK1R SRC", spk1r_src_enum); static const struct snd_kcontrol_new spk2l_mux = SOC_DAPM_ENUM("SPK2L SRC", spk2l_src_enum); static const struct snd_kcontrol_new spk2r_mux = SOC_DAPM_ENUM("SPK2R SRC", spk2r_src_enum); static const struct snd_soc_dapm_widget wm8995_dapm_widgets[] = { SND_SOC_DAPM_INPUT("DMIC1DAT"), SND_SOC_DAPM_INPUT("DMIC2DAT"), SND_SOC_DAPM_INPUT("IN1L"), SND_SOC_DAPM_INPUT("IN1R"), SND_SOC_DAPM_MIXER("IN1L PGA", SND_SOC_NOPM, 0, 0, &in1l_pga, 1), SND_SOC_DAPM_MIXER("IN1R PGA", SND_SOC_NOPM, 0, 0, &in1r_pga, 1), SND_SOC_DAPM_SUPPLY("MICBIAS1", WM8995_POWER_MANAGEMENT_1, 8, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("MICBIAS2", WM8995_POWER_MANAGEMENT_1, 9, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("AIF1CLK", WM8995_AIF1_CLOCKING_1, 0, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("AIF2CLK", WM8995_AIF2_CLOCKING_1, 0, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("DSP1CLK", WM8995_CLOCKING_1, 3, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("DSP2CLK", WM8995_CLOCKING_1, 2, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("SYSDSPCLK", WM8995_CLOCKING_1, 1, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("CLK_SYS", SND_SOC_NOPM, 0, 0, clk_sys_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD), SND_SOC_DAPM_AIF_OUT("AIF1ADC1L", "AIF1 Capture", 0, WM8995_POWER_MANAGEMENT_3, 9, 0), SND_SOC_DAPM_AIF_OUT("AIF1ADC1R", "AIF1 Capture", 0, WM8995_POWER_MANAGEMENT_3, 8, 0), SND_SOC_DAPM_AIF_OUT("AIF1ADCDAT", "AIF1 Capture", 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("AIF1ADC2L", "AIF1 Capture", 0, WM8995_POWER_MANAGEMENT_3, 11, 0), SND_SOC_DAPM_AIF_OUT("AIF1ADC2R", "AIF1 Capture", 0, WM8995_POWER_MANAGEMENT_3, 10, 0), SND_SOC_DAPM_VIRT_MUX("ADCL Mux", SND_SOC_NOPM, 1, 0, &adcl_mux), SND_SOC_DAPM_VIRT_MUX("ADCR Mux", SND_SOC_NOPM, 0, 0, &adcr_mux), SND_SOC_DAPM_ADC("DMIC2L", NULL, WM8995_POWER_MANAGEMENT_3, 5, 0), SND_SOC_DAPM_ADC("DMIC2R", NULL, WM8995_POWER_MANAGEMENT_3, 4, 0), SND_SOC_DAPM_ADC("DMIC1L", NULL, WM8995_POWER_MANAGEMENT_3, 3, 0), SND_SOC_DAPM_ADC("DMIC1R", NULL, WM8995_POWER_MANAGEMENT_3, 2, 0), SND_SOC_DAPM_ADC("ADCL", NULL, WM8995_POWER_MANAGEMENT_3, 1, 0), SND_SOC_DAPM_ADC("ADCR", NULL, WM8995_POWER_MANAGEMENT_3, 0, 0), SND_SOC_DAPM_MIXER("AIF1ADC1L Mixer", SND_SOC_NOPM, 0, 0, aif1adc1l_mix, ARRAY_SIZE(aif1adc1l_mix)), SND_SOC_DAPM_MIXER("AIF1ADC1R Mixer", SND_SOC_NOPM, 0, 0, aif1adc1r_mix, ARRAY_SIZE(aif1adc1r_mix)), SND_SOC_DAPM_MIXER("AIF1ADC2L Mixer", SND_SOC_NOPM, 0, 0, aif1adc2l_mix, ARRAY_SIZE(aif1adc2l_mix)), SND_SOC_DAPM_MIXER("AIF1ADC2R Mixer", SND_SOC_NOPM, 0, 0, aif1adc2r_mix, ARRAY_SIZE(aif1adc2r_mix)), SND_SOC_DAPM_AIF_IN("AIF1DAC1L", NULL, 0, WM8995_POWER_MANAGEMENT_4, 9, 0), SND_SOC_DAPM_AIF_IN("AIF1DAC1R", NULL, 0, WM8995_POWER_MANAGEMENT_4, 8, 0), SND_SOC_DAPM_AIF_IN("AIF1DACDAT", "AIF1 Playback", 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("AIF1DAC2L", NULL, 0, WM8995_POWER_MANAGEMENT_4, 11, 0), SND_SOC_DAPM_AIF_IN("AIF1DAC2R", NULL, 0, WM8995_POWER_MANAGEMENT_4, 10, 0), SND_SOC_DAPM_MIXER("AIF2DAC2L Mixer", SND_SOC_NOPM, 0, 0, aif2dac2l_mix, ARRAY_SIZE(aif2dac2l_mix)), SND_SOC_DAPM_MIXER("AIF2DAC2R Mixer", SND_SOC_NOPM, 0, 0, aif2dac2r_mix, ARRAY_SIZE(aif2dac2r_mix)), SND_SOC_DAPM_DAC("DAC2L", NULL, WM8995_POWER_MANAGEMENT_4, 3, 0), SND_SOC_DAPM_DAC("DAC2R", NULL, WM8995_POWER_MANAGEMENT_4, 2, 0), SND_SOC_DAPM_DAC("DAC1L", NULL, WM8995_POWER_MANAGEMENT_4, 1, 0), SND_SOC_DAPM_DAC("DAC1R", NULL, WM8995_POWER_MANAGEMENT_4, 0, 0), SND_SOC_DAPM_MIXER("DAC1L Mixer", SND_SOC_NOPM, 0, 0, dac1l_mix, ARRAY_SIZE(dac1l_mix)), SND_SOC_DAPM_MIXER("DAC1R Mixer", SND_SOC_NOPM, 0, 0, dac1r_mix, ARRAY_SIZE(dac1r_mix)), SND_SOC_DAPM_MUX("Left Sidetone", SND_SOC_NOPM, 0, 0, &sidetone1_mux), SND_SOC_DAPM_MUX("Right Sidetone", SND_SOC_NOPM, 0, 0, &sidetone2_mux), SND_SOC_DAPM_PGA_E("Headphone PGA", SND_SOC_NOPM, 0, 0, NULL, 0, hp_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD), SND_SOC_DAPM_SUPPLY("Headphone Supply", SND_SOC_NOPM, 0, 0, hp_supply_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD), SND_SOC_DAPM_MUX("SPK1L Driver", WM8995_LEFT_PDM_SPEAKER_1, 4, 0, &spk1l_mux), SND_SOC_DAPM_MUX("SPK1R Driver", WM8995_RIGHT_PDM_SPEAKER_1, 4, 0, &spk1r_mux), SND_SOC_DAPM_MUX("SPK2L Driver", WM8995_LEFT_PDM_SPEAKER_2, 4, 0, &spk2l_mux), SND_SOC_DAPM_MUX("SPK2R Driver", WM8995_RIGHT_PDM_SPEAKER_2, 4, 0, &spk2r_mux), SND_SOC_DAPM_SUPPLY("LDO2", WM8995_POWER_MANAGEMENT_2, 1, 0, NULL, 0), SND_SOC_DAPM_OUTPUT("HP1L"), SND_SOC_DAPM_OUTPUT("HP1R"), SND_SOC_DAPM_OUTPUT("SPK1L"), SND_SOC_DAPM_OUTPUT("SPK1R"), SND_SOC_DAPM_OUTPUT("SPK2L"), SND_SOC_DAPM_OUTPUT("SPK2R") }; static const struct snd_soc_dapm_route wm8995_intercon[] = { { "CLK_SYS", NULL, "AIF1CLK", check_clk_sys }, { "CLK_SYS", NULL, "AIF2CLK", check_clk_sys }, { "DSP1CLK", NULL, "CLK_SYS" }, { "DSP2CLK", NULL, "CLK_SYS" }, { "SYSDSPCLK", NULL, "CLK_SYS" }, { "AIF1ADC1L", NULL, "AIF1CLK" }, { "AIF1ADC1L", NULL, "DSP1CLK" }, { "AIF1ADC1R", NULL, "AIF1CLK" }, { "AIF1ADC1R", NULL, "DSP1CLK" }, { "AIF1ADC1R", NULL, "SYSDSPCLK" }, { "AIF1ADC2L", NULL, "AIF1CLK" }, { "AIF1ADC2L", NULL, "DSP1CLK" }, { "AIF1ADC2R", NULL, "AIF1CLK" }, { "AIF1ADC2R", NULL, "DSP1CLK" }, { "AIF1ADC2R", NULL, "SYSDSPCLK" }, { "DMIC1L", NULL, "DMIC1DAT" }, { "DMIC1L", NULL, "CLK_SYS" }, { "DMIC1R", NULL, "DMIC1DAT" }, { "DMIC1R", NULL, "CLK_SYS" }, { "DMIC2L", NULL, "DMIC2DAT" }, { "DMIC2L", NULL, "CLK_SYS" }, { "DMIC2R", NULL, "DMIC2DAT" }, { "DMIC2R", NULL, "CLK_SYS" }, { "ADCL", NULL, "AIF1CLK" }, { "ADCL", NULL, "DSP1CLK" }, { "ADCL", NULL, "SYSDSPCLK" }, { "ADCR", NULL, "AIF1CLK" }, { "ADCR", NULL, "DSP1CLK" }, { "ADCR", NULL, "SYSDSPCLK" }, { "IN1L PGA", "IN1L Switch", "IN1L" }, { "IN1R PGA", "IN1R Switch", "IN1R" }, { "IN1L PGA", NULL, "LDO2" }, { "IN1R PGA", NULL, "LDO2" }, { "ADCL", NULL, "IN1L PGA" }, { "ADCR", NULL, "IN1R PGA" }, { "ADCL Mux", "ADC", "ADCL" }, { "ADCL Mux", "DMIC", "DMIC1L" }, { "ADCR Mux", "ADC", "ADCR" }, { "ADCR Mux", "DMIC", "DMIC1R" }, /* AIF1 outputs */ { "AIF1ADC1L", NULL, "AIF1ADC1L Mixer" }, { "AIF1ADC1L Mixer", "ADC/DMIC Switch", "ADCL Mux" }, { "AIF1ADC1R", NULL, "AIF1ADC1R Mixer" }, { "AIF1ADC1R Mixer", "ADC/DMIC Switch", "ADCR Mux" }, { "AIF1ADC2L", NULL, "AIF1ADC2L Mixer" }, { "AIF1ADC2L Mixer", "DMIC Switch", "DMIC2L" }, { "AIF1ADC2R", NULL, "AIF1ADC2R Mixer" }, { "AIF1ADC2R Mixer", "DMIC Switch", "DMIC2R" }, /* Sidetone */ { "Left Sidetone", "ADC/DMIC1", "AIF1ADC1L" }, { "Left Sidetone", "DMIC2", "AIF1ADC2L" }, { "Right Sidetone", "ADC/DMIC1", "AIF1ADC1R" }, { "Right Sidetone", "DMIC2", "AIF1ADC2R" }, { "AIF1DAC1L", NULL, "AIF1CLK" }, { "AIF1DAC1L", NULL, "DSP1CLK" }, { "AIF1DAC1R", NULL, "AIF1CLK" }, { "AIF1DAC1R", NULL, "DSP1CLK" }, { "AIF1DAC1R", NULL, "SYSDSPCLK" }, { "AIF1DAC2L", NULL, "AIF1CLK" }, { "AIF1DAC2L", NULL, "DSP1CLK" }, { "AIF1DAC2R", NULL, "AIF1CLK" }, { "AIF1DAC2R", NULL, "DSP1CLK" }, { "AIF1DAC2R", NULL, "SYSDSPCLK" }, { "DAC1L", NULL, "AIF1CLK" }, { "DAC1L", NULL, "DSP1CLK" }, { "DAC1L", NULL, "SYSDSPCLK" }, { "DAC1R", NULL, "AIF1CLK" }, { "DAC1R", NULL, "DSP1CLK" }, { "DAC1R", NULL, "SYSDSPCLK" }, { "AIF1DAC1L", NULL, "AIF1DACDAT" }, { "AIF1DAC1R", NULL, "AIF1DACDAT" }, { "AIF1DAC2L", NULL, "AIF1DACDAT" }, { "AIF1DAC2R", NULL, "AIF1DACDAT" }, /* DAC1 inputs */ { "DAC1L", NULL, "DAC1L Mixer" }, { "DAC1L Mixer", "AIF1.1 Switch", "AIF1DAC1L" }, { "DAC1L Mixer", "AIF1.2 Switch", "AIF1DAC2L" }, { "DAC1L Mixer", "Left Sidetone Switch", "Left Sidetone" }, { "DAC1L Mixer", "Right Sidetone Switch", "Right Sidetone" }, { "DAC1R", NULL, "DAC1R Mixer" }, { "DAC1R Mixer", "AIF1.1 Switch", "AIF1DAC1R" }, { "DAC1R Mixer", "AIF1.2 Switch", "AIF1DAC2R" }, { "DAC1R Mixer", "Left Sidetone Switch", "Left Sidetone" }, { "DAC1R Mixer", "Right Sidetone Switch", "Right Sidetone" }, /* DAC2/AIF2 outputs */ { "DAC2L", NULL, "AIF2DAC2L Mixer" }, { "AIF2DAC2L Mixer", "AIF1.2 Switch", "AIF1DAC2L" }, { "AIF2DAC2L Mixer", "AIF1.1 Switch", "AIF1DAC1L" }, { "DAC2R", NULL, "AIF2DAC2R Mixer" }, { "AIF2DAC2R Mixer", "AIF1.2 Switch", "AIF1DAC2R" }, { "AIF2DAC2R Mixer", "AIF1.1 Switch", "AIF1DAC1R" }, /* Output stages */ { "Headphone PGA", NULL, "DAC1L" }, { "Headphone PGA", NULL, "DAC1R" }, { "Headphone PGA", NULL, "DAC2L" }, { "Headphone PGA", NULL, "DAC2R" }, { "Headphone PGA", NULL, "Headphone Supply" }, { "Headphone PGA", NULL, "CLK_SYS" }, { "Headphone PGA", NULL, "LDO2" }, { "HP1L", NULL, "Headphone PGA" }, { "HP1R", NULL, "Headphone PGA" }, { "SPK1L Driver", "DAC1L", "DAC1L" }, { "SPK1L Driver", "DAC1R", "DAC1R" }, { "SPK1L Driver", "DAC2L", "DAC2L" }, { "SPK1L Driver", "DAC2R", "DAC2R" }, { "SPK1L Driver", NULL, "CLK_SYS" }, { "SPK1R Driver", "DAC1L", "DAC1L" }, { "SPK1R Driver", "DAC1R", "DAC1R" }, { "SPK1R Driver", "DAC2L", "DAC2L" }, { "SPK1R Driver", "DAC2R", "DAC2R" }, { "SPK1R Driver", NULL, "CLK_SYS" }, { "SPK2L Driver", "DAC1L", "DAC1L" }, { "SPK2L Driver", "DAC1R", "DAC1R" }, { "SPK2L Driver", "DAC2L", "DAC2L" }, { "SPK2L Driver", "DAC2R", "DAC2R" }, { "SPK2L Driver", NULL, "CLK_SYS" }, { "SPK2R Driver", "DAC1L", "DAC1L" }, { "SPK2R Driver", "DAC1R", "DAC1R" }, { "SPK2R Driver", "DAC2L", "DAC2L" }, { "SPK2R Driver", "DAC2R", "DAC2R" }, { "SPK2R Driver", NULL, "CLK_SYS" }, { "SPK1L", NULL, "SPK1L Driver" }, { "SPK1R", NULL, "SPK1R Driver" }, { "SPK2L", NULL, "SPK2L Driver" }, { "SPK2R", NULL, "SPK2R Driver" } }; static bool wm8995_readable(struct device *dev, unsigned int reg) { switch (reg) { case WM8995_SOFTWARE_RESET: case WM8995_POWER_MANAGEMENT_1: case WM8995_POWER_MANAGEMENT_2: case WM8995_POWER_MANAGEMENT_3: case WM8995_POWER_MANAGEMENT_4: case WM8995_POWER_MANAGEMENT_5: case WM8995_LEFT_LINE_INPUT_1_VOLUME: case WM8995_RIGHT_LINE_INPUT_1_VOLUME: case WM8995_LEFT_LINE_INPUT_CONTROL: case WM8995_DAC1_LEFT_VOLUME: case WM8995_DAC1_RIGHT_VOLUME: case WM8995_DAC2_LEFT_VOLUME: case WM8995_DAC2_RIGHT_VOLUME: case WM8995_OUTPUT_VOLUME_ZC_1: case WM8995_MICBIAS_1: case WM8995_MICBIAS_2: case WM8995_LDO_1: case WM8995_LDO_2: case WM8995_ACCESSORY_DETECT_MODE1: case WM8995_ACCESSORY_DETECT_MODE2: case WM8995_HEADPHONE_DETECT1: case WM8995_HEADPHONE_DETECT2: case WM8995_MIC_DETECT_1: case WM8995_MIC_DETECT_2: case WM8995_CHARGE_PUMP_1: case WM8995_CLASS_W_1: case WM8995_DC_SERVO_1: case WM8995_DC_SERVO_2: case WM8995_DC_SERVO_3: case WM8995_DC_SERVO_5: case WM8995_DC_SERVO_6: case WM8995_DC_SERVO_7: case WM8995_DC_SERVO_READBACK_0: case WM8995_ANALOGUE_HP_1: case WM8995_ANALOGUE_HP_2: case WM8995_CHIP_REVISION: case WM8995_CONTROL_INTERFACE_1: case WM8995_CONTROL_INTERFACE_2: case WM8995_WRITE_SEQUENCER_CTRL_1: case WM8995_WRITE_SEQUENCER_CTRL_2: case WM8995_AIF1_CLOCKING_1: case WM8995_AIF1_CLOCKING_2: case WM8995_AIF2_CLOCKING_1: case WM8995_AIF2_CLOCKING_2: case WM8995_CLOCKING_1: case WM8995_CLOCKING_2: case WM8995_AIF1_RATE: case WM8995_AIF2_RATE: case WM8995_RATE_STATUS: case WM8995_FLL1_CONTROL_1: case WM8995_FLL1_CONTROL_2: case WM8995_FLL1_CONTROL_3: case WM8995_FLL1_CONTROL_4: case WM8995_FLL1_CONTROL_5: case WM8995_FLL2_CONTROL_1: case WM8995_FLL2_CONTROL_2: case WM8995_FLL2_CONTROL_3: case WM8995_FLL2_CONTROL_4: case WM8995_FLL2_CONTROL_5: case WM8995_AIF1_CONTROL_1: case WM8995_AIF1_CONTROL_2: case WM8995_AIF1_MASTER_SLAVE: case WM8995_AIF1_BCLK: case WM8995_AIF1ADC_LRCLK: case WM8995_AIF1DAC_LRCLK: case WM8995_AIF1DAC_DATA: case WM8995_AIF1ADC_DATA: case WM8995_AIF2_CONTROL_1: case WM8995_AIF2_CONTROL_2: case WM8995_AIF2_MASTER_SLAVE: case WM8995_AIF2_BCLK: case WM8995_AIF2ADC_LRCLK: case WM8995_AIF2DAC_LRCLK: case WM8995_AIF2DAC_DATA: case WM8995_AIF2ADC_DATA: case WM8995_AIF1_ADC1_LEFT_VOLUME: case WM8995_AIF1_ADC1_RIGHT_VOLUME: case WM8995_AIF1_DAC1_LEFT_VOLUME: case WM8995_AIF1_DAC1_RIGHT_VOLUME: case WM8995_AIF1_ADC2_LEFT_VOLUME: case WM8995_AIF1_ADC2_RIGHT_VOLUME: case WM8995_AIF1_DAC2_LEFT_VOLUME: case WM8995_AIF1_DAC2_RIGHT_VOLUME: case WM8995_AIF1_ADC1_FILTERS: case WM8995_AIF1_ADC2_FILTERS: case WM8995_AIF1_DAC1_FILTERS_1: case WM8995_AIF1_DAC1_FILTERS_2: case WM8995_AIF1_DAC2_FILTERS_1: case WM8995_AIF1_DAC2_FILTERS_2: case WM8995_AIF1_DRC1_1: case WM8995_AIF1_DRC1_2: case WM8995_AIF1_DRC1_3: case WM8995_AIF1_DRC1_4: case WM8995_AIF1_DRC1_5: case WM8995_AIF1_DRC2_1: case WM8995_AIF1_DRC2_2: case WM8995_AIF1_DRC2_3: case WM8995_AIF1_DRC2_4: case WM8995_AIF1_DRC2_5: case WM8995_AIF1_DAC1_EQ_GAINS_1: case WM8995_AIF1_DAC1_EQ_GAINS_2: case WM8995_AIF1_DAC1_EQ_BAND_1_A: case WM8995_AIF1_DAC1_EQ_BAND_1_B: case WM8995_AIF1_DAC1_EQ_BAND_1_PG: case WM8995_AIF1_DAC1_EQ_BAND_2_A: case WM8995_AIF1_DAC1_EQ_BAND_2_B: case WM8995_AIF1_DAC1_EQ_BAND_2_C: case WM8995_AIF1_DAC1_EQ_BAND_2_PG: case WM8995_AIF1_DAC1_EQ_BAND_3_A: case WM8995_AIF1_DAC1_EQ_BAND_3_B: case WM8995_AIF1_DAC1_EQ_BAND_3_C: case WM8995_AIF1_DAC1_EQ_BAND_3_PG: case WM8995_AIF1_DAC1_EQ_BAND_4_A: case WM8995_AIF1_DAC1_EQ_BAND_4_B: case WM8995_AIF1_DAC1_EQ_BAND_4_C: case WM8995_AIF1_DAC1_EQ_BAND_4_PG: case WM8995_AIF1_DAC1_EQ_BAND_5_A: case WM8995_AIF1_DAC1_EQ_BAND_5_B: case WM8995_AIF1_DAC1_EQ_BAND_5_PG: case WM8995_AIF1_DAC2_EQ_GAINS_1: case WM8995_AIF1_DAC2_EQ_GAINS_2: case WM8995_AIF1_DAC2_EQ_BAND_1_A: case WM8995_AIF1_DAC2_EQ_BAND_1_B: case WM8995_AIF1_DAC2_EQ_BAND_1_PG: case WM8995_AIF1_DAC2_EQ_BAND_2_A: case WM8995_AIF1_DAC2_EQ_BAND_2_B: case WM8995_AIF1_DAC2_EQ_BAND_2_C: case WM8995_AIF1_DAC2_EQ_BAND_2_PG: case WM8995_AIF1_DAC2_EQ_BAND_3_A: case WM8995_AIF1_DAC2_EQ_BAND_3_B: case WM8995_AIF1_DAC2_EQ_BAND_3_C: case WM8995_AIF1_DAC2_EQ_BAND_3_PG: case WM8995_AIF1_DAC2_EQ_BAND_4_A: case WM8995_AIF1_DAC2_EQ_BAND_4_B: case WM8995_AIF1_DAC2_EQ_BAND_4_C: case WM8995_AIF1_DAC2_EQ_BAND_4_PG: case WM8995_AIF1_DAC2_EQ_BAND_5_A: case WM8995_AIF1_DAC2_EQ_BAND_5_B: case WM8995_AIF1_DAC2_EQ_BAND_5_PG: case WM8995_AIF2_ADC_LEFT_VOLUME: case WM8995_AIF2_ADC_RIGHT_VOLUME: case WM8995_AIF2_DAC_LEFT_VOLUME: case WM8995_AIF2_DAC_RIGHT_VOLUME: case WM8995_AIF2_ADC_FILTERS: case WM8995_AIF2_DAC_FILTERS_1: case WM8995_AIF2_DAC_FILTERS_2: case WM8995_AIF2_DRC_1: case WM8995_AIF2_DRC_2: case WM8995_AIF2_DRC_3: case WM8995_AIF2_DRC_4: case WM8995_AIF2_DRC_5: case WM8995_AIF2_EQ_GAINS_1: case WM8995_AIF2_EQ_GAINS_2: case WM8995_AIF2_EQ_BAND_1_A: case WM8995_AIF2_EQ_BAND_1_B: case WM8995_AIF2_EQ_BAND_1_PG: case WM8995_AIF2_EQ_BAND_2_A: case WM8995_AIF2_EQ_BAND_2_B: case WM8995_AIF2_EQ_BAND_2_C: case WM8995_AIF2_EQ_BAND_2_PG: case WM8995_AIF2_EQ_BAND_3_A: case WM8995_AIF2_EQ_BAND_3_B: case WM8995_AIF2_EQ_BAND_3_C: case WM8995_AIF2_EQ_BAND_3_PG: case WM8995_AIF2_EQ_BAND_4_A: case WM8995_AIF2_EQ_BAND_4_B: case WM8995_AIF2_EQ_BAND_4_C: case WM8995_AIF2_EQ_BAND_4_PG: case WM8995_AIF2_EQ_BAND_5_A: case WM8995_AIF2_EQ_BAND_5_B: case WM8995_AIF2_EQ_BAND_5_PG: case WM8995_DAC1_MIXER_VOLUMES: case WM8995_DAC1_LEFT_MIXER_ROUTING: case WM8995_DAC1_RIGHT_MIXER_ROUTING: case WM8995_DAC2_MIXER_VOLUMES: case WM8995_DAC2_LEFT_MIXER_ROUTING: case WM8995_DAC2_RIGHT_MIXER_ROUTING: case WM8995_AIF1_ADC1_LEFT_MIXER_ROUTING: case WM8995_AIF1_ADC1_RIGHT_MIXER_ROUTING: case WM8995_AIF1_ADC2_LEFT_MIXER_ROUTING: case WM8995_AIF1_ADC2_RIGHT_MIXER_ROUTING: case WM8995_DAC_SOFTMUTE: case WM8995_OVERSAMPLING: case WM8995_SIDETONE: case WM8995_GPIO_1: case WM8995_GPIO_2: case WM8995_GPIO_3: case WM8995_GPIO_4: case WM8995_GPIO_5: case WM8995_GPIO_6: case WM8995_GPIO_7: case WM8995_GPIO_8: case WM8995_GPIO_9: case WM8995_GPIO_10: case WM8995_GPIO_11: case WM8995_GPIO_12: case WM8995_GPIO_13: case WM8995_GPIO_14: case WM8995_PULL_CONTROL_1: case WM8995_PULL_CONTROL_2: case WM8995_INTERRUPT_STATUS_1: case WM8995_INTERRUPT_STATUS_2: case WM8995_INTERRUPT_RAW_STATUS_2: case WM8995_INTERRUPT_STATUS_1_MASK: case WM8995_INTERRUPT_STATUS_2_MASK: case WM8995_INTERRUPT_CONTROL: case WM8995_LEFT_PDM_SPEAKER_1: case WM8995_RIGHT_PDM_SPEAKER_1: case WM8995_PDM_SPEAKER_1_MUTE_SEQUENCE: case WM8995_LEFT_PDM_SPEAKER_2: case WM8995_RIGHT_PDM_SPEAKER_2: case WM8995_PDM_SPEAKER_2_MUTE_SEQUENCE: return true; default: return false; } } static bool wm8995_volatile(struct device *dev, unsigned int reg) { switch (reg) { case WM8995_SOFTWARE_RESET: case WM8995_DC_SERVO_READBACK_0: case WM8995_INTERRUPT_STATUS_1: case WM8995_INTERRUPT_STATUS_2: case WM8995_INTERRUPT_CONTROL: case WM8995_ACCESSORY_DETECT_MODE1: case WM8995_ACCESSORY_DETECT_MODE2: case WM8995_HEADPHONE_DETECT1: case WM8995_HEADPHONE_DETECT2: case WM8995_RATE_STATUS: return true; default: return false; } } static int wm8995_aif_mute(struct snd_soc_dai *dai, int mute) { struct snd_soc_codec *codec = dai->codec; int mute_reg; switch (dai->id) { case 0: mute_reg = WM8995_AIF1_DAC1_FILTERS_1; break; case 1: mute_reg = WM8995_AIF2_DAC_FILTERS_1; break; default: return -EINVAL; } snd_soc_update_bits(codec, mute_reg, WM8995_AIF1DAC1_MUTE_MASK, !!mute << WM8995_AIF1DAC1_MUTE_SHIFT); return 0; } static int wm8995_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt) { struct snd_soc_codec *codec; int master; int aif; codec = dai->codec; master = 0; switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBS_CFS: break; case SND_SOC_DAIFMT_CBM_CFM: master = WM8995_AIF1_MSTR; break; default: dev_err(dai->dev, "Unknown master/slave configuration\n"); return -EINVAL; } aif = 0; switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_DSP_B: aif |= WM8995_AIF1_LRCLK_INV; case SND_SOC_DAIFMT_DSP_A: aif |= (0x3 << WM8995_AIF1_FMT_SHIFT); break; case SND_SOC_DAIFMT_I2S: aif |= (0x2 << WM8995_AIF1_FMT_SHIFT); break; case SND_SOC_DAIFMT_RIGHT_J: break; case SND_SOC_DAIFMT_LEFT_J: aif |= (0x1 << WM8995_AIF1_FMT_SHIFT); break; default: dev_err(dai->dev, "Unknown dai format\n"); return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_DSP_A: case SND_SOC_DAIFMT_DSP_B: /* frame inversion not valid for DSP modes */ switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: break; case SND_SOC_DAIFMT_IB_NF: aif |= WM8995_AIF1_BCLK_INV; break; default: return -EINVAL; } break; case SND_SOC_DAIFMT_I2S: case SND_SOC_DAIFMT_RIGHT_J: case SND_SOC_DAIFMT_LEFT_J: switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: break; case SND_SOC_DAIFMT_IB_IF: aif |= WM8995_AIF1_BCLK_INV | WM8995_AIF1_LRCLK_INV; break; case SND_SOC_DAIFMT_IB_NF: aif |= WM8995_AIF1_BCLK_INV; break; case SND_SOC_DAIFMT_NB_IF: aif |= WM8995_AIF1_LRCLK_INV; break; default: return -EINVAL; } break; default: return -EINVAL; } snd_soc_update_bits(codec, WM8995_AIF1_CONTROL_1, WM8995_AIF1_BCLK_INV_MASK | WM8995_AIF1_LRCLK_INV_MASK | WM8995_AIF1_FMT_MASK, aif); snd_soc_update_bits(codec, WM8995_AIF1_MASTER_SLAVE, WM8995_AIF1_MSTR_MASK, master); return 0; } static const int srs[] = { 8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000, 88200, 96000 }; static const int fs_ratios[] = { -1 /* reserved */, 128, 192, 256, 384, 512, 768, 1024, 1408, 1536 }; static const int bclk_divs[] = { 10, 15, 20, 30, 40, 55, 60, 80, 110, 120, 160, 220, 240, 320, 440, 480 }; static int wm8995_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct snd_soc_codec *codec; struct wm8995_priv *wm8995; int aif1_reg; int bclk_reg; int lrclk_reg; int rate_reg; int bclk_rate; int aif1; int lrclk, bclk; int i, rate_val, best, best_val, cur_val; codec = dai->codec; wm8995 = snd_soc_codec_get_drvdata(codec); switch (dai->id) { case 0: aif1_reg = WM8995_AIF1_CONTROL_1; bclk_reg = WM8995_AIF1_BCLK; rate_reg = WM8995_AIF1_RATE; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK /* || wm8995->lrclk_shared[0] */) { lrclk_reg = WM8995_AIF1DAC_LRCLK; } else { lrclk_reg = WM8995_AIF1ADC_LRCLK; dev_dbg(codec->dev, "AIF1 using split LRCLK\n"); } break; case 1: aif1_reg = WM8995_AIF2_CONTROL_1; bclk_reg = WM8995_AIF2_BCLK; rate_reg = WM8995_AIF2_RATE; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK /* || wm8995->lrclk_shared[1] */) { lrclk_reg = WM8995_AIF2DAC_LRCLK; } else { lrclk_reg = WM8995_AIF2ADC_LRCLK; dev_dbg(codec->dev, "AIF2 using split LRCLK\n"); } break; default: return -EINVAL; } bclk_rate = snd_soc_params_to_bclk(params); if (bclk_rate < 0) return bclk_rate; aif1 = 0; switch (params_format(params)) { case SNDRV_PCM_FORMAT_S16_LE: break; case SNDRV_PCM_FORMAT_S20_3LE: aif1 |= (0x1 << WM8995_AIF1_WL_SHIFT); break; case SNDRV_PCM_FORMAT_S24_LE: aif1 |= (0x2 << WM8995_AIF1_WL_SHIFT); break; case SNDRV_PCM_FORMAT_S32_LE: aif1 |= (0x3 << WM8995_AIF1_WL_SHIFT); break; default: dev_err(dai->dev, "Unsupported word length %u\n", params_format(params)); return -EINVAL; } /* try to find a suitable sample rate */ for (i = 0; i < ARRAY_SIZE(srs); ++i) if (srs[i] == params_rate(params)) break; if (i == ARRAY_SIZE(srs)) { dev_err(dai->dev, "Sample rate %d is not supported\n", params_rate(params)); return -EINVAL; } rate_val = i << WM8995_AIF1_SR_SHIFT; dev_dbg(dai->dev, "Sample rate is %dHz\n", srs[i]); dev_dbg(dai->dev, "AIF%dCLK is %dHz, target BCLK %dHz\n", dai->id + 1, wm8995->aifclk[dai->id], bclk_rate); /* AIFCLK/fs ratio; look for a close match in either direction */ best = 1; best_val = abs((fs_ratios[1] * params_rate(params)) - wm8995->aifclk[dai->id]); for (i = 2; i < ARRAY_SIZE(fs_ratios); i++) { cur_val = abs((fs_ratios[i] * params_rate(params)) - wm8995->aifclk[dai->id]); if (cur_val >= best_val) continue; best = i; best_val = cur_val; } rate_val |= best; dev_dbg(dai->dev, "Selected AIF%dCLK/fs = %d\n", dai->id + 1, fs_ratios[best]); /* * We may not get quite the right frequency if using * approximate clocks so look for the closest match that is * higher than the target (we need to ensure that there enough * BCLKs to clock out the samples). */ best = 0; bclk = 0; for (i = 0; i < ARRAY_SIZE(bclk_divs); i++) { cur_val = (wm8995->aifclk[dai->id] * 10 / bclk_divs[i]) - bclk_rate; if (cur_val < 0) /* BCLK table is sorted */ break; best = i; } bclk |= best << WM8995_AIF1_BCLK_DIV_SHIFT; bclk_rate = wm8995->aifclk[dai->id] * 10 / bclk_divs[best]; dev_dbg(dai->dev, "Using BCLK_DIV %d for actual BCLK %dHz\n", bclk_divs[best], bclk_rate); lrclk = bclk_rate / params_rate(params); dev_dbg(dai->dev, "Using LRCLK rate %d for actual LRCLK %dHz\n", lrclk, bclk_rate / lrclk); snd_soc_update_bits(codec, aif1_reg, WM8995_AIF1_WL_MASK, aif1); snd_soc_update_bits(codec, bclk_reg, WM8995_AIF1_BCLK_DIV_MASK, bclk); snd_soc_update_bits(codec, lrclk_reg, WM8995_AIF1DAC_RATE_MASK, lrclk); snd_soc_update_bits(codec, rate_reg, WM8995_AIF1_SR_MASK | WM8995_AIF1CLK_RATE_MASK, rate_val); return 0; } static int wm8995_set_tristate(struct snd_soc_dai *codec_dai, int tristate) { struct snd_soc_codec *codec = codec_dai->codec; int reg, val, mask; switch (codec_dai->id) { case 0: reg = WM8995_AIF1_MASTER_SLAVE; mask = WM8995_AIF1_TRI; break; case 1: reg = WM8995_AIF2_MASTER_SLAVE; mask = WM8995_AIF2_TRI; break; case 2: reg = WM8995_POWER_MANAGEMENT_5; mask = WM8995_AIF3_TRI; break; default: return -EINVAL; } if (tristate) val = mask; else val = 0; return snd_soc_update_bits(codec, reg, mask, val); } /* The size in bits of the FLL divide multiplied by 10 * to allow rounding later */ #define FIXED_FLL_SIZE ((1 << 16) * 10) struct fll_div { u16 outdiv; u16 n; u16 k; u16 clk_ref_div; u16 fll_fratio; }; static int wm8995_get_fll_config(struct fll_div *fll, int freq_in, int freq_out) { u64 Kpart; unsigned int K, Ndiv, Nmod; pr_debug("FLL input=%dHz, output=%dHz\n", freq_in, freq_out); /* Scale the input frequency down to <= 13.5MHz */ fll->clk_ref_div = 0; while (freq_in > 13500000) { fll->clk_ref_div++; freq_in /= 2; if (fll->clk_ref_div > 3) return -EINVAL; } pr_debug("CLK_REF_DIV=%d, Fref=%dHz\n", fll->clk_ref_div, freq_in); /* Scale the output to give 90MHz<=Fvco<=100MHz */ fll->outdiv = 3; while (freq_out * (fll->outdiv + 1) < 90000000) { fll->outdiv++; if (fll->outdiv > 63) return -EINVAL; } freq_out *= fll->outdiv + 1; pr_debug("OUTDIV=%d, Fvco=%dHz\n", fll->outdiv, freq_out); if (freq_in > 1000000) { fll->fll_fratio = 0; } else if (freq_in > 256000) { fll->fll_fratio = 1; freq_in *= 2; } else if (freq_in > 128000) { fll->fll_fratio = 2; freq_in *= 4; } else if (freq_in > 64000) { fll->fll_fratio = 3; freq_in *= 8; } else { fll->fll_fratio = 4; freq_in *= 16; } pr_debug("FLL_FRATIO=%d, Fref=%dHz\n", fll->fll_fratio, freq_in); /* Now, calculate N.K */ Ndiv = freq_out / freq_in; fll->n = Ndiv; Nmod = freq_out % freq_in; pr_debug("Nmod=%d\n", Nmod); /* Calculate fractional part - scale up so we can round. */ Kpart = FIXED_FLL_SIZE * (long long)Nmod; do_div(Kpart, freq_in); K = Kpart & 0xFFFFFFFF; if ((K % 10) >= 5) K += 5; /* Move down to proper range now rounding is done */ fll->k = K / 10; pr_debug("N=%x K=%x\n", fll->n, fll->k); return 0; } static int wm8995_set_fll(struct snd_soc_dai *dai, int id, int src, unsigned int freq_in, unsigned int freq_out) { struct snd_soc_codec *codec; struct wm8995_priv *wm8995; int reg_offset, ret; struct fll_div fll; u16 reg, aif1, aif2; codec = dai->codec; wm8995 = snd_soc_codec_get_drvdata(codec); aif1 = snd_soc_read(codec, WM8995_AIF1_CLOCKING_1) & WM8995_AIF1CLK_ENA; aif2 = snd_soc_read(codec, WM8995_AIF2_CLOCKING_1) & WM8995_AIF2CLK_ENA; switch (id) { case WM8995_FLL1: reg_offset = 0; id = 0; break; case WM8995_FLL2: reg_offset = 0x20; id = 1; break; default: return -EINVAL; } switch (src) { case 0: /* Allow no source specification when stopping */ if (freq_out) return -EINVAL; break; case WM8995_FLL_SRC_MCLK1: case WM8995_FLL_SRC_MCLK2: case WM8995_FLL_SRC_LRCLK: case WM8995_FLL_SRC_BCLK: break; default: return -EINVAL; } /* Are we changing anything? */ if (wm8995->fll[id].src == src && wm8995->fll[id].in == freq_in && wm8995->fll[id].out == freq_out) return 0; /* If we're stopping the FLL redo the old config - no * registers will actually be written but we avoid GCC flow * analysis bugs spewing warnings. */ if (freq_out) ret = wm8995_get_fll_config(&fll, freq_in, freq_out); else ret = wm8995_get_fll_config(&fll, wm8995->fll[id].in, wm8995->fll[id].out); if (ret < 0) return ret; /* Gate the AIF clocks while we reclock */ snd_soc_update_bits(codec, WM8995_AIF1_CLOCKING_1, WM8995_AIF1CLK_ENA_MASK, 0); snd_soc_update_bits(codec, WM8995_AIF2_CLOCKING_1, WM8995_AIF2CLK_ENA_MASK, 0); /* We always need to disable the FLL while reconfiguring */ snd_soc_update_bits(codec, WM8995_FLL1_CONTROL_1 + reg_offset, WM8995_FLL1_ENA_MASK, 0); reg = (fll.outdiv << WM8995_FLL1_OUTDIV_SHIFT) | (fll.fll_fratio << WM8995_FLL1_FRATIO_SHIFT); snd_soc_update_bits(codec, WM8995_FLL1_CONTROL_2 + reg_offset, WM8995_FLL1_OUTDIV_MASK | WM8995_FLL1_FRATIO_MASK, reg); snd_soc_write(codec, WM8995_FLL1_CONTROL_3 + reg_offset, fll.k); snd_soc_update_bits(codec, WM8995_FLL1_CONTROL_4 + reg_offset, WM8995_FLL1_N_MASK, fll.n << WM8995_FLL1_N_SHIFT); snd_soc_update_bits(codec, WM8995_FLL1_CONTROL_5 + reg_offset, WM8995_FLL1_REFCLK_DIV_MASK | WM8995_FLL1_REFCLK_SRC_MASK, (fll.clk_ref_div << WM8995_FLL1_REFCLK_DIV_SHIFT) | (src - 1)); if (freq_out) snd_soc_update_bits(codec, WM8995_FLL1_CONTROL_1 + reg_offset, WM8995_FLL1_ENA_MASK, WM8995_FLL1_ENA); wm8995->fll[id].in = freq_in; wm8995->fll[id].out = freq_out; wm8995->fll[id].src = src; /* Enable any gated AIF clocks */ snd_soc_update_bits(codec, WM8995_AIF1_CLOCKING_1, WM8995_AIF1CLK_ENA_MASK, aif1); snd_soc_update_bits(codec, WM8995_AIF2_CLOCKING_1, WM8995_AIF2CLK_ENA_MASK, aif2); configure_clock(codec); return 0; } static int wm8995_set_dai_sysclk(struct snd_soc_dai *dai, int clk_id, unsigned int freq, int dir) { struct snd_soc_codec *codec; struct wm8995_priv *wm8995; codec = dai->codec; wm8995 = snd_soc_codec_get_drvdata(codec); switch (dai->id) { case 0: case 1: break; default: /* AIF3 shares clocking with AIF1/2 */ return -EINVAL; } switch (clk_id) { case WM8995_SYSCLK_MCLK1: wm8995->sysclk[dai->id] = WM8995_SYSCLK_MCLK1; wm8995->mclk[0] = freq; dev_dbg(dai->dev, "AIF%d using MCLK1 at %uHz\n", dai->id + 1, freq); break; case WM8995_SYSCLK_MCLK2: wm8995->sysclk[dai->id] = WM8995_SYSCLK_MCLK1; wm8995->mclk[1] = freq; dev_dbg(dai->dev, "AIF%d using MCLK2 at %uHz\n", dai->id + 1, freq); break; case WM8995_SYSCLK_FLL1: wm8995->sysclk[dai->id] = WM8995_SYSCLK_FLL1; dev_dbg(dai->dev, "AIF%d using FLL1\n", dai->id + 1); break; case WM8995_SYSCLK_FLL2: wm8995->sysclk[dai->id] = WM8995_SYSCLK_FLL2; dev_dbg(dai->dev, "AIF%d using FLL2\n", dai->id + 1); break; case WM8995_SYSCLK_OPCLK: default: dev_err(dai->dev, "Unknown clock source %d\n", clk_id); return -EINVAL; } configure_clock(codec); return 0; } static int wm8995_set_bias_level(struct snd_soc_codec *codec, enum snd_soc_bias_level level) { struct wm8995_priv *wm8995; int ret; wm8995 = snd_soc_codec_get_drvdata(codec); switch (level) { case SND_SOC_BIAS_ON: case SND_SOC_BIAS_PREPARE: break; case SND_SOC_BIAS_STANDBY: if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) { ret = regulator_bulk_enable(ARRAY_SIZE(wm8995->supplies), wm8995->supplies); if (ret) return ret; ret = regcache_sync(wm8995->regmap); if (ret) { dev_err(codec->dev, "Failed to sync cache: %d\n", ret); return ret; } snd_soc_update_bits(codec, WM8995_POWER_MANAGEMENT_1, WM8995_BG_ENA_MASK, WM8995_BG_ENA); } break; case SND_SOC_BIAS_OFF: snd_soc_update_bits(codec, WM8995_POWER_MANAGEMENT_1, WM8995_BG_ENA_MASK, 0); regulator_bulk_disable(ARRAY_SIZE(wm8995->supplies), wm8995->supplies); break; } codec->dapm.bias_level = level; return 0; } #ifdef CONFIG_PM static int wm8995_suspend(struct snd_soc_codec *codec) { wm8995_set_bias_level(codec, SND_SOC_BIAS_OFF); return 0; } static int wm8995_resume(struct snd_soc_codec *codec) { wm8995_set_bias_level(codec, SND_SOC_BIAS_STANDBY); return 0; } #else #define wm8995_suspend NULL #define wm8995_resume NULL #endif static int wm8995_remove(struct snd_soc_codec *codec) { struct wm8995_priv *wm8995; int i; wm8995 = snd_soc_codec_get_drvdata(codec); wm8995_set_bias_level(codec, SND_SOC_BIAS_OFF); for (i = 0; i < ARRAY_SIZE(wm8995->supplies); ++i) regulator_unregister_notifier(wm8995->supplies[i].consumer, &wm8995->disable_nb[i]); regulator_bulk_free(ARRAY_SIZE(wm8995->supplies), wm8995->supplies); return 0; } static int wm8995_probe(struct snd_soc_codec *codec) { struct wm8995_priv *wm8995; int i; int ret; wm8995 = snd_soc_codec_get_drvdata(codec); wm8995->codec = codec; codec->control_data = wm8995->regmap; ret = snd_soc_codec_set_cache_io(codec, 16, 16, SND_SOC_REGMAP); if (ret < 0) { dev_err(codec->dev, "Failed to set cache i/o: %d\n", ret); return ret; } for (i = 0; i < ARRAY_SIZE(wm8995->supplies); i++) wm8995->supplies[i].supply = wm8995_supply_names[i]; ret = regulator_bulk_get(codec->dev, ARRAY_SIZE(wm8995->supplies), wm8995->supplies); if (ret) { dev_err(codec->dev, "Failed to request supplies: %d\n", ret); return ret; } wm8995->disable_nb[0].notifier_call = wm8995_regulator_event_0; wm8995->disable_nb[1].notifier_call = wm8995_regulator_event_1; wm8995->disable_nb[2].notifier_call = wm8995_regulator_event_2; wm8995->disable_nb[3].notifier_call = wm8995_regulator_event_3; wm8995->disable_nb[4].notifier_call = wm8995_regulator_event_4; wm8995->disable_nb[5].notifier_call = wm8995_regulator_event_5; wm8995->disable_nb[6].notifier_call = wm8995_regulator_event_6; wm8995->disable_nb[7].notifier_call = wm8995_regulator_event_7; /* This should really be moved into the regulator core */ for (i = 0; i < ARRAY_SIZE(wm8995->supplies); i++) { ret = regulator_register_notifier(wm8995->supplies[i].consumer, &wm8995->disable_nb[i]); if (ret) { dev_err(codec->dev, "Failed to register regulator notifier: %d\n", ret); } } ret = regulator_bulk_enable(ARRAY_SIZE(wm8995->supplies), wm8995->supplies); if (ret) { dev_err(codec->dev, "Failed to enable supplies: %d\n", ret); goto err_reg_get; } ret = snd_soc_read(codec, WM8995_SOFTWARE_RESET); if (ret < 0) { dev_err(codec->dev, "Failed to read device ID: %d\n", ret); goto err_reg_enable; } if (ret != 0x8995) { dev_err(codec->dev, "Invalid device ID: %#x\n", ret); ret = -EINVAL; goto err_reg_enable; } ret = snd_soc_write(codec, WM8995_SOFTWARE_RESET, 0); if (ret < 0) { dev_err(codec->dev, "Failed to issue reset: %d\n", ret); goto err_reg_enable; } wm8995_set_bias_level(codec, SND_SOC_BIAS_STANDBY); /* Latch volume updates (right only; we always do left then right). */ snd_soc_update_bits(codec, WM8995_AIF1_DAC1_RIGHT_VOLUME, WM8995_AIF1DAC1_VU_MASK, WM8995_AIF1DAC1_VU); snd_soc_update_bits(codec, WM8995_AIF1_DAC2_RIGHT_VOLUME, WM8995_AIF1DAC2_VU_MASK, WM8995_AIF1DAC2_VU); snd_soc_update_bits(codec, WM8995_AIF2_DAC_RIGHT_VOLUME, WM8995_AIF2DAC_VU_MASK, WM8995_AIF2DAC_VU); snd_soc_update_bits(codec, WM8995_AIF1_ADC1_RIGHT_VOLUME, WM8995_AIF1ADC1_VU_MASK, WM8995_AIF1ADC1_VU); snd_soc_update_bits(codec, WM8995_AIF1_ADC2_RIGHT_VOLUME, WM8995_AIF1ADC2_VU_MASK, WM8995_AIF1ADC2_VU); snd_soc_update_bits(codec, WM8995_AIF2_ADC_RIGHT_VOLUME, WM8995_AIF2ADC_VU_MASK, WM8995_AIF1ADC2_VU); snd_soc_update_bits(codec, WM8995_DAC1_RIGHT_VOLUME, WM8995_DAC1_VU_MASK, WM8995_DAC1_VU); snd_soc_update_bits(codec, WM8995_DAC2_RIGHT_VOLUME, WM8995_DAC2_VU_MASK, WM8995_DAC2_VU); snd_soc_update_bits(codec, WM8995_RIGHT_LINE_INPUT_1_VOLUME, WM8995_IN1_VU_MASK, WM8995_IN1_VU); wm8995_update_class_w(codec); snd_soc_add_codec_controls(codec, wm8995_snd_controls, ARRAY_SIZE(wm8995_snd_controls)); snd_soc_dapm_new_controls(&codec->dapm, wm8995_dapm_widgets, ARRAY_SIZE(wm8995_dapm_widgets)); snd_soc_dapm_add_routes(&codec->dapm, wm8995_intercon, ARRAY_SIZE(wm8995_intercon)); return 0; err_reg_enable: regulator_bulk_disable(ARRAY_SIZE(wm8995->supplies), wm8995->supplies); err_reg_get: regulator_bulk_free(ARRAY_SIZE(wm8995->supplies), wm8995->supplies); return ret; } #define WM8995_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE) static const struct snd_soc_dai_ops wm8995_aif1_dai_ops = { .set_sysclk = wm8995_set_dai_sysclk, .set_fmt = wm8995_set_dai_fmt, .hw_params = wm8995_hw_params, .digital_mute = wm8995_aif_mute, .set_pll = wm8995_set_fll, .set_tristate = wm8995_set_tristate, }; static const struct snd_soc_dai_ops wm8995_aif2_dai_ops = { .set_sysclk = wm8995_set_dai_sysclk, .set_fmt = wm8995_set_dai_fmt, .hw_params = wm8995_hw_params, .digital_mute = wm8995_aif_mute, .set_pll = wm8995_set_fll, .set_tristate = wm8995_set_tristate, }; static const struct snd_soc_dai_ops wm8995_aif3_dai_ops = { .set_tristate = wm8995_set_tristate, }; static struct snd_soc_dai_driver wm8995_dai[] = { { .name = "wm8995-aif1", .playback = { .stream_name = "AIF1 Playback", .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_96000, .formats = WM8995_FORMATS }, .capture = { .stream_name = "AIF1 Capture", .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_48000, .formats = WM8995_FORMATS }, .ops = &wm8995_aif1_dai_ops }, { .name = "wm8995-aif2", .playback = { .stream_name = "AIF2 Playback", .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_96000, .formats = WM8995_FORMATS }, .capture = { .stream_name = "AIF2 Capture", .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_48000, .formats = WM8995_FORMATS }, .ops = &wm8995_aif2_dai_ops }, { .name = "wm8995-aif3", .playback = { .stream_name = "AIF3 Playback", .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_96000, .formats = WM8995_FORMATS }, .capture = { .stream_name = "AIF3 Capture", .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_48000, .formats = WM8995_FORMATS }, .ops = &wm8995_aif3_dai_ops } }; static struct snd_soc_codec_driver soc_codec_dev_wm8995 = { .probe = wm8995_probe, .remove = wm8995_remove, .suspend = wm8995_suspend, .resume = wm8995_resume, .set_bias_level = wm8995_set_bias_level, .idle_bias_off = true, }; static struct regmap_config wm8995_regmap = { .reg_bits = 16, .val_bits = 16, .max_register = WM8995_MAX_REGISTER, .reg_defaults = wm8995_reg_defaults, .num_reg_defaults = ARRAY_SIZE(wm8995_reg_defaults), .volatile_reg = wm8995_volatile, .readable_reg = wm8995_readable, .cache_type = REGCACHE_RBTREE, }; #if defined(CONFIG_SPI_MASTER) static int __devinit wm8995_spi_probe(struct spi_device *spi) { struct wm8995_priv *wm8995; int ret; wm8995 = kzalloc(sizeof *wm8995, GFP_KERNEL); if (!wm8995) return -ENOMEM; spi_set_drvdata(spi, wm8995); wm8995->regmap = regmap_init_spi(spi, &wm8995_regmap); if (IS_ERR(wm8995->regmap)) { ret = PTR_ERR(wm8995->regmap); dev_err(&spi->dev, "Failed to register regmap: %d\n", ret); goto err_alloc; } ret = snd_soc_register_codec(&spi->dev, &soc_codec_dev_wm8995, wm8995_dai, ARRAY_SIZE(wm8995_dai)); if (ret < 0) goto err_regmap; return ret; err_regmap: regmap_exit(wm8995->regmap); err_alloc: kfree(wm8995); return ret; } static int __devexit wm8995_spi_remove(struct spi_device *spi) { struct wm8995_priv *wm8995 = spi_get_drvdata(spi); snd_soc_unregister_codec(&spi->dev); regmap_exit(wm8995->regmap); kfree(wm8995); return 0; } static struct spi_driver wm8995_spi_driver = { .driver = { .name = "wm8995", .owner = THIS_MODULE, }, .probe = wm8995_spi_probe, .remove = __devexit_p(wm8995_spi_remove) }; #endif #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) static __devinit int wm8995_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { struct wm8995_priv *wm8995; int ret; wm8995 = kzalloc(sizeof *wm8995, GFP_KERNEL); if (!wm8995) return -ENOMEM; i2c_set_clientdata(i2c, wm8995); wm8995->regmap = regmap_init_i2c(i2c, &wm8995_regmap); if (IS_ERR(wm8995->regmap)) { ret = PTR_ERR(wm8995->regmap); dev_err(&i2c->dev, "Failed to register regmap: %d\n", ret); goto err_alloc; } ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_wm8995, wm8995_dai, ARRAY_SIZE(wm8995_dai)); if (ret < 0) { dev_err(&i2c->dev, "Failed to register CODEC: %d\n", ret); goto err_regmap; } return ret; err_regmap: regmap_exit(wm8995->regmap); err_alloc: kfree(wm8995); return ret; } static __devexit int wm8995_i2c_remove(struct i2c_client *client) { struct wm8995_priv *wm8995 = i2c_get_clientdata(client); snd_soc_unregister_codec(&client->dev); regmap_exit(wm8995->regmap); kfree(wm8995); return 0; } static const struct i2c_device_id wm8995_i2c_id[] = { {"wm8995", 0}, {} }; MODULE_DEVICE_TABLE(i2c, wm8995_i2c_id); static struct i2c_driver wm8995_i2c_driver = { .driver = { .name = "wm8995", .owner = THIS_MODULE, }, .probe = wm8995_i2c_probe, .remove = __devexit_p(wm8995_i2c_remove), .id_table = wm8995_i2c_id }; #endif static int __init wm8995_modinit(void) { int ret = 0; #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) ret = i2c_add_driver(&wm8995_i2c_driver); if (ret) { printk(KERN_ERR "Failed to register wm8995 I2C driver: %d\n", ret); } #endif #if defined(CONFIG_SPI_MASTER) ret = spi_register_driver(&wm8995_spi_driver); if (ret) { printk(KERN_ERR "Failed to register wm8995 SPI driver: %d\n", ret); } #endif return ret; } module_init(wm8995_modinit); static void __exit wm8995_exit(void) { #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) i2c_del_driver(&wm8995_i2c_driver); #endif #if defined(CONFIG_SPI_MASTER) spi_unregister_driver(&wm8995_spi_driver); #endif } module_exit(wm8995_exit); MODULE_DESCRIPTION("ASoC WM8995 driver"); MODULE_AUTHOR("Dimitris Papastamos <dp@opensource.wolfsonmicro.com>"); MODULE_LICENSE("GPL");
gpl-2.0
jamison904/Galaxy_Note_3
drivers/isdn/hisax/config.c
4988
47158
/* $Id: config.c,v 2.84.2.5 2004/02/11 13:21:33 keil Exp $ * * Author Karsten Keil * Copyright by Karsten Keil <keil@isdn4linux.de> * by Kai Germaschewski <kai.germaschewski@gmx.de> * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * * For changes and modifications please read * Documentation/isdn/HiSax.cert * * based on the teles driver from Jan den Ouden * */ #include <linux/types.h> #include <linux/stddef.h> #include <linux/timer.h> #include <linux/init.h> #include "hisax.h" #include <linux/module.h> #include <linux/kernel_stat.h> #include <linux/workqueue.h> #include <linux/interrupt.h> #include <linux/slab.h> #define HISAX_STATUS_BUFSIZE 4096 /* * This structure array contains one entry per card. An entry looks * like this: * * { type, protocol, p0, p1, p2, NULL } * * type * 1 Teles 16.0 p0=irq p1=membase p2=iobase * 2 Teles 8.0 p0=irq p1=membase * 3 Teles 16.3 p0=irq p1=iobase * 4 Creatix PNP p0=irq p1=IO0 (ISAC) p2=IO1 (HSCX) * 5 AVM A1 (Fritz) p0=irq p1=iobase * 6 ELSA PC [p0=iobase] or nothing (autodetect) * 7 ELSA Quickstep p0=irq p1=iobase * 8 Teles PCMCIA p0=irq p1=iobase * 9 ITK ix1-micro p0=irq p1=iobase * 10 ELSA PCMCIA p0=irq p1=iobase * 11 Eicon.Diehl Diva p0=irq p1=iobase * 12 Asuscom ISDNLink p0=irq p1=iobase * 13 Teleint p0=irq p1=iobase * 14 Teles 16.3c p0=irq p1=iobase * 15 Sedlbauer speed p0=irq p1=iobase * 15 Sedlbauer PC/104 p0=irq p1=iobase * 15 Sedlbauer speed pci no parameter * 16 USR Sportster internal p0=irq p1=iobase * 17 MIC card p0=irq p1=iobase * 18 ELSA Quickstep 1000PCI no parameter * 19 Compaq ISDN S0 ISA card p0=irq p1=IO0 (HSCX) p2=IO1 (ISAC) p3=IO2 * 20 Travers Technologies NETjet-S PCI card * 21 TELES PCI no parameter * 22 Sedlbauer Speed Star p0=irq p1=iobase * 23 reserved * 24 Dr Neuhaus Niccy PnP/PCI card p0=irq p1=IO0 p2=IO1 (PnP only) * 25 Teles S0Box p0=irq p1=iobase (from isapnp setup) * 26 AVM A1 PCMCIA (Fritz) p0=irq p1=iobase * 27 AVM PnP/PCI p0=irq p1=iobase (PCI no parameter) * 28 Sedlbauer Speed Fax+ p0=irq p1=iobase (from isapnp setup) * 29 Siemens I-Surf p0=irq p1=iobase p2=memory (from isapnp setup) * 30 ACER P10 p0=irq p1=iobase (from isapnp setup) * 31 HST Saphir p0=irq p1=iobase * 32 Telekom A4T none * 33 Scitel Quadro p0=subcontroller (4*S0, subctrl 1...4) * 34 Gazel ISDN cards * 35 HFC 2BDS0 PCI none * 36 Winbond 6692 PCI none * 37 HFC 2BDS0 S+/SP p0=irq p1=iobase * 38 Travers Technologies NETspider-U PCI card * 39 HFC 2BDS0-SP PCMCIA p0=irq p1=iobase * 40 hotplug interface * 41 Formula-n enter:now ISDN PCI a/b none * * protocol can be either ISDN_PTYPE_EURO or ISDN_PTYPE_1TR6 or ISDN_PTYPE_NI1 * * */ const char *CardType[] = { "No Card", "Teles 16.0", "Teles 8.0", "Teles 16.3", "Creatix/Teles PnP", "AVM A1", "Elsa ML", "Elsa Quickstep", "Teles PCMCIA", "ITK ix1-micro Rev.2", "Elsa PCMCIA", "Eicon.Diehl Diva", "ISDNLink", "TeleInt", "Teles 16.3c", "Sedlbauer Speed Card", "USR Sportster", "ith mic Linux", "Elsa PCI", "Compaq ISA", "NETjet-S", "Teles PCI", "Sedlbauer Speed Star (PCMCIA)", "AMD 7930", "NICCY", "S0Box", "AVM A1 (PCMCIA)", "AVM Fritz PnP/PCI", "Sedlbauer Speed Fax +", "Siemens I-Surf", "Acer P10", "HST Saphir", "Telekom A4T", "Scitel Quadro", "Gazel", "HFC 2BDS0 PCI", "Winbond 6692", "HFC 2BDS0 SX", "NETspider-U", "HFC-2BDS0-SP PCMCIA", "Hotplug", "Formula-n enter:now PCI a/b", }; #ifdef CONFIG_HISAX_ELSA #define DEFAULT_CARD ISDN_CTYPE_ELSA #define DEFAULT_CFG {0, 0, 0, 0} #endif #ifdef CONFIG_HISAX_AVM_A1 #undef DEFAULT_CARD #undef DEFAULT_CFG #define DEFAULT_CARD ISDN_CTYPE_A1 #define DEFAULT_CFG {10, 0x340, 0, 0} #endif #ifdef CONFIG_HISAX_AVM_A1_PCMCIA #undef DEFAULT_CARD #undef DEFAULT_CFG #define DEFAULT_CARD ISDN_CTYPE_A1_PCMCIA #define DEFAULT_CFG {11, 0x170, 0, 0} #endif #ifdef CONFIG_HISAX_FRITZPCI #undef DEFAULT_CARD #undef DEFAULT_CFG #define DEFAULT_CARD ISDN_CTYPE_FRITZPCI #define DEFAULT_CFG {0, 0, 0, 0} #endif #ifdef CONFIG_HISAX_16_3 #undef DEFAULT_CARD #undef DEFAULT_CFG #define DEFAULT_CARD ISDN_CTYPE_16_3 #define DEFAULT_CFG {15, 0x180, 0, 0} #endif #ifdef CONFIG_HISAX_S0BOX #undef DEFAULT_CARD #undef DEFAULT_CFG #define DEFAULT_CARD ISDN_CTYPE_S0BOX #define DEFAULT_CFG {7, 0x378, 0, 0} #endif #ifdef CONFIG_HISAX_16_0 #undef DEFAULT_CARD #undef DEFAULT_CFG #define DEFAULT_CARD ISDN_CTYPE_16_0 #define DEFAULT_CFG {15, 0xd0000, 0xd80, 0} #endif #ifdef CONFIG_HISAX_TELESPCI #undef DEFAULT_CARD #undef DEFAULT_CFG #define DEFAULT_CARD ISDN_CTYPE_TELESPCI #define DEFAULT_CFG {0, 0, 0, 0} #endif #ifdef CONFIG_HISAX_IX1MICROR2 #undef DEFAULT_CARD #undef DEFAULT_CFG #define DEFAULT_CARD ISDN_CTYPE_IX1MICROR2 #define DEFAULT_CFG {5, 0x390, 0, 0} #endif #ifdef CONFIG_HISAX_DIEHLDIVA #undef DEFAULT_CARD #undef DEFAULT_CFG #define DEFAULT_CARD ISDN_CTYPE_DIEHLDIVA #define DEFAULT_CFG {0, 0x0, 0, 0} #endif #ifdef CONFIG_HISAX_ASUSCOM #undef DEFAULT_CARD #undef DEFAULT_CFG #define DEFAULT_CARD ISDN_CTYPE_ASUSCOM #define DEFAULT_CFG {5, 0x200, 0, 0} #endif #ifdef CONFIG_HISAX_TELEINT #undef DEFAULT_CARD #undef DEFAULT_CFG #define DEFAULT_CARD ISDN_CTYPE_TELEINT #define DEFAULT_CFG {5, 0x300, 0, 0} #endif #ifdef CONFIG_HISAX_SEDLBAUER #undef DEFAULT_CARD #undef DEFAULT_CFG #define DEFAULT_CARD ISDN_CTYPE_SEDLBAUER #define DEFAULT_CFG {11, 0x270, 0, 0} #endif #ifdef CONFIG_HISAX_SPORTSTER #undef DEFAULT_CARD #undef DEFAULT_CFG #define DEFAULT_CARD ISDN_CTYPE_SPORTSTER #define DEFAULT_CFG {7, 0x268, 0, 0} #endif #ifdef CONFIG_HISAX_MIC #undef DEFAULT_CARD #undef DEFAULT_CFG #define DEFAULT_CARD ISDN_CTYPE_MIC #define DEFAULT_CFG {12, 0x3e0, 0, 0} #endif #ifdef CONFIG_HISAX_NETJET #undef DEFAULT_CARD #undef DEFAULT_CFG #define DEFAULT_CARD ISDN_CTYPE_NETJET_S #define DEFAULT_CFG {0, 0, 0, 0} #endif #ifdef CONFIG_HISAX_HFCS #undef DEFAULT_CARD #undef DEFAULT_CFG #define DEFAULT_CARD ISDN_CTYPE_TELES3C #define DEFAULT_CFG {5, 0x500, 0, 0} #endif #ifdef CONFIG_HISAX_HFC_PCI #undef DEFAULT_CARD #undef DEFAULT_CFG #define DEFAULT_CARD ISDN_CTYPE_HFC_PCI #define DEFAULT_CFG {0, 0, 0, 0} #endif #ifdef CONFIG_HISAX_HFC_SX #undef DEFAULT_CARD #undef DEFAULT_CFG #define DEFAULT_CARD ISDN_CTYPE_HFC_SX #define DEFAULT_CFG {5, 0x2E0, 0, 0} #endif #ifdef CONFIG_HISAX_NICCY #undef DEFAULT_CARD #undef DEFAULT_CFG #define DEFAULT_CARD ISDN_CTYPE_NICCY #define DEFAULT_CFG {0, 0x0, 0, 0} #endif #ifdef CONFIG_HISAX_ISURF #undef DEFAULT_CARD #undef DEFAULT_CFG #define DEFAULT_CARD ISDN_CTYPE_ISURF #define DEFAULT_CFG {5, 0x100, 0xc8000, 0} #endif #ifdef CONFIG_HISAX_HSTSAPHIR #undef DEFAULT_CARD #undef DEFAULT_CFG #define DEFAULT_CARD ISDN_CTYPE_HSTSAPHIR #define DEFAULT_CFG {5, 0x250, 0, 0} #endif #ifdef CONFIG_HISAX_BKM_A4T #undef DEFAULT_CARD #undef DEFAULT_CFG #define DEFAULT_CARD ISDN_CTYPE_BKM_A4T #define DEFAULT_CFG {0, 0x0, 0, 0} #endif #ifdef CONFIG_HISAX_SCT_QUADRO #undef DEFAULT_CARD #undef DEFAULT_CFG #define DEFAULT_CARD ISDN_CTYPE_SCT_QUADRO #define DEFAULT_CFG {1, 0x0, 0, 0} #endif #ifdef CONFIG_HISAX_GAZEL #undef DEFAULT_CARD #undef DEFAULT_CFG #define DEFAULT_CARD ISDN_CTYPE_GAZEL #define DEFAULT_CFG {15, 0x180, 0, 0} #endif #ifdef CONFIG_HISAX_W6692 #undef DEFAULT_CARD #undef DEFAULT_CFG #define DEFAULT_CARD ISDN_CTYPE_W6692 #define DEFAULT_CFG {0, 0, 0, 0} #endif #ifdef CONFIG_HISAX_NETJET_U #undef DEFAULT_CARD #undef DEFAULT_CFG #define DEFAULT_CARD ISDN_CTYPE_NETJET_U #define DEFAULT_CFG {0, 0, 0, 0} #endif #ifdef CONFIG_HISAX_1TR6 #define DEFAULT_PROTO ISDN_PTYPE_1TR6 #define DEFAULT_PROTO_NAME "1TR6" #endif #ifdef CONFIG_HISAX_NI1 #undef DEFAULT_PROTO #define DEFAULT_PROTO ISDN_PTYPE_NI1 #undef DEFAULT_PROTO_NAME #define DEFAULT_PROTO_NAME "NI1" #endif #ifdef CONFIG_HISAX_EURO #undef DEFAULT_PROTO #define DEFAULT_PROTO ISDN_PTYPE_EURO #undef DEFAULT_PROTO_NAME #define DEFAULT_PROTO_NAME "EURO" #endif #ifndef DEFAULT_PROTO #define DEFAULT_PROTO ISDN_PTYPE_UNKNOWN #define DEFAULT_PROTO_NAME "UNKNOWN" #endif #ifndef DEFAULT_CARD #define DEFAULT_CARD 0 #define DEFAULT_CFG {0, 0, 0, 0} #endif #define FIRST_CARD { \ DEFAULT_CARD, \ DEFAULT_PROTO, \ DEFAULT_CFG, \ NULL, \ } struct IsdnCard cards[HISAX_MAX_CARDS] = { FIRST_CARD, }; #define HISAX_IDSIZE (HISAX_MAX_CARDS * 8) static char HiSaxID[HISAX_IDSIZE] = { 0, }; static char *HiSax_id = HiSaxID; #ifdef MODULE /* Variables for insmod */ static int type[HISAX_MAX_CARDS] = { 0, }; static int protocol[HISAX_MAX_CARDS] = { 0, }; static int io[HISAX_MAX_CARDS] = { 0, }; #undef IO0_IO1 #ifdef CONFIG_HISAX_16_3 #define IO0_IO1 #endif #ifdef CONFIG_HISAX_NICCY #undef IO0_IO1 #define IO0_IO1 #endif #ifdef IO0_IO1 static int io0[HISAX_MAX_CARDS] __devinitdata = { 0, }; static int io1[HISAX_MAX_CARDS] __devinitdata = { 0, }; #endif static int irq[HISAX_MAX_CARDS] __devinitdata = { 0, }; static int mem[HISAX_MAX_CARDS] __devinitdata = { 0, }; static char *id = HiSaxID; MODULE_DESCRIPTION("ISDN4Linux: Driver for passive ISDN cards"); MODULE_AUTHOR("Karsten Keil"); MODULE_LICENSE("GPL"); module_param_array(type, int, NULL, 0); module_param_array(protocol, int, NULL, 0); module_param_array(io, int, NULL, 0); module_param_array(irq, int, NULL, 0); module_param_array(mem, int, NULL, 0); module_param(id, charp, 0); #ifdef IO0_IO1 module_param_array(io0, int, NULL, 0); module_param_array(io1, int, NULL, 0); #endif #endif /* MODULE */ int nrcards; char *HiSax_getrev(const char *revision) { char *rev; char *p; if ((p = strchr(revision, ':'))) { rev = p + 2; p = strchr(rev, '$'); *--p = 0; } else rev = "???"; return rev; } static void __init HiSaxVersion(void) { char tmp[64]; printk(KERN_INFO "HiSax: Linux Driver for passive ISDN cards\n"); #ifdef MODULE printk(KERN_INFO "HiSax: Version 3.5 (module)\n"); #else printk(KERN_INFO "HiSax: Version 3.5 (kernel)\n"); #endif strcpy(tmp, l1_revision); printk(KERN_INFO "HiSax: Layer1 Revision %s\n", HiSax_getrev(tmp)); strcpy(tmp, l2_revision); printk(KERN_INFO "HiSax: Layer2 Revision %s\n", HiSax_getrev(tmp)); strcpy(tmp, tei_revision); printk(KERN_INFO "HiSax: TeiMgr Revision %s\n", HiSax_getrev(tmp)); strcpy(tmp, l3_revision); printk(KERN_INFO "HiSax: Layer3 Revision %s\n", HiSax_getrev(tmp)); strcpy(tmp, lli_revision); printk(KERN_INFO "HiSax: LinkLayer Revision %s\n", HiSax_getrev(tmp)); } #ifndef MODULE #define MAX_ARG (HISAX_MAX_CARDS * 5) static int __init HiSax_setup(char *line) { int i, j, argc; int ints[MAX_ARG + 1]; char *str; str = get_options(line, MAX_ARG, ints); argc = ints[0]; printk(KERN_DEBUG "HiSax_setup: argc(%d) str(%s)\n", argc, str); i = 0; j = 1; while (argc && (i < HISAX_MAX_CARDS)) { cards[i].protocol = DEFAULT_PROTO; if (argc) { cards[i].typ = ints[j]; j++; argc--; } if (argc) { cards[i].protocol = ints[j]; j++; argc--; } if (argc) { cards[i].para[0] = ints[j]; j++; argc--; } if (argc) { cards[i].para[1] = ints[j]; j++; argc--; } if (argc) { cards[i].para[2] = ints[j]; j++; argc--; } i++; } if (str && *str) { if (strlen(str) < HISAX_IDSIZE) strcpy(HiSaxID, str); else printk(KERN_WARNING "HiSax: ID too long!"); } else strcpy(HiSaxID, "HiSax"); HiSax_id = HiSaxID; return 1; } __setup("hisax=", HiSax_setup); #endif /* MODULES */ #if CARD_TELES0 extern int setup_teles0(struct IsdnCard *card); #endif #if CARD_TELES3 extern int setup_teles3(struct IsdnCard *card); #endif #if CARD_S0BOX extern int setup_s0box(struct IsdnCard *card); #endif #if CARD_TELESPCI extern int setup_telespci(struct IsdnCard *card); #endif #if CARD_AVM_A1 extern int setup_avm_a1(struct IsdnCard *card); #endif #if CARD_AVM_A1_PCMCIA extern int setup_avm_a1_pcmcia(struct IsdnCard *card); #endif #if CARD_FRITZPCI extern int setup_avm_pcipnp(struct IsdnCard *card); #endif #if CARD_ELSA extern int setup_elsa(struct IsdnCard *card); #endif #if CARD_IX1MICROR2 extern int setup_ix1micro(struct IsdnCard *card); #endif #if CARD_DIEHLDIVA extern int setup_diva(struct IsdnCard *card); #endif #if CARD_ASUSCOM extern int setup_asuscom(struct IsdnCard *card); #endif #if CARD_TELEINT extern int setup_TeleInt(struct IsdnCard *card); #endif #if CARD_SEDLBAUER extern int setup_sedlbauer(struct IsdnCard *card); #endif #if CARD_SPORTSTER extern int setup_sportster(struct IsdnCard *card); #endif #if CARD_MIC extern int setup_mic(struct IsdnCard *card); #endif #if CARD_NETJET_S extern int setup_netjet_s(struct IsdnCard *card); #endif #if CARD_HFCS extern int setup_hfcs(struct IsdnCard *card); #endif #if CARD_HFC_PCI extern int setup_hfcpci(struct IsdnCard *card); #endif #if CARD_HFC_SX extern int setup_hfcsx(struct IsdnCard *card); #endif #if CARD_NICCY extern int setup_niccy(struct IsdnCard *card); #endif #if CARD_ISURF extern int setup_isurf(struct IsdnCard *card); #endif #if CARD_HSTSAPHIR extern int setup_saphir(struct IsdnCard *card); #endif #if CARD_BKM_A4T extern int setup_bkm_a4t(struct IsdnCard *card); #endif #if CARD_SCT_QUADRO extern int setup_sct_quadro(struct IsdnCard *card); #endif #if CARD_GAZEL extern int setup_gazel(struct IsdnCard *card); #endif #if CARD_W6692 extern int setup_w6692(struct IsdnCard *card); #endif #if CARD_NETJET_U extern int setup_netjet_u(struct IsdnCard *card); #endif #if CARD_FN_ENTERNOW_PCI extern int setup_enternow_pci(struct IsdnCard *card); #endif /* * Find card with given driverId */ static inline struct IsdnCardState *hisax_findcard(int driverid) { int i; for (i = 0; i < nrcards; i++) if (cards[i].cs) if (cards[i].cs->myid == driverid) return cards[i].cs; return NULL; } /* * Find card with given card number */ #if 0 struct IsdnCardState *hisax_get_card(int cardnr) { if ((cardnr <= nrcards) && (cardnr > 0)) if (cards[cardnr - 1].cs) return cards[cardnr - 1].cs; return NULL; } #endif /* 0 */ static int HiSax_readstatus(u_char __user *buf, int len, int id, int channel) { int count, cnt; u_char __user *p = buf; struct IsdnCardState *cs = hisax_findcard(id); if (cs) { if (len > HISAX_STATUS_BUFSIZE) { printk(KERN_WARNING "HiSax: status overflow readstat %d/%d\n", len, HISAX_STATUS_BUFSIZE); } count = cs->status_end - cs->status_read + 1; if (count >= len) count = len; if (copy_to_user(p, cs->status_read, count)) return -EFAULT; cs->status_read += count; if (cs->status_read > cs->status_end) cs->status_read = cs->status_buf; p += count; count = len - count; while (count) { if (count > HISAX_STATUS_BUFSIZE) cnt = HISAX_STATUS_BUFSIZE; else cnt = count; if (copy_to_user(p, cs->status_read, cnt)) return -EFAULT; p += cnt; cs->status_read += cnt % HISAX_STATUS_BUFSIZE; count -= cnt; } return len; } else { printk(KERN_ERR "HiSax: if_readstatus called with invalid driverId!\n"); return -ENODEV; } } int jiftime(char *s, long mark) { s += 8; *s-- = '\0'; *s-- = mark % 10 + '0'; mark /= 10; *s-- = mark % 10 + '0'; mark /= 10; *s-- = '.'; *s-- = mark % 10 + '0'; mark /= 10; *s-- = mark % 6 + '0'; mark /= 6; *s-- = ':'; *s-- = mark % 10 + '0'; mark /= 10; *s-- = mark % 10 + '0'; return 8; } static u_char tmpbuf[HISAX_STATUS_BUFSIZE]; void VHiSax_putstatus(struct IsdnCardState *cs, char *head, char *fmt, va_list args) { /* if head == NULL the fmt contains the full info */ u_long flags; int count, i; u_char *p; isdn_ctrl ic; int len; if (!cs) { printk(KERN_WARNING "HiSax: No CardStatus for message"); return; } spin_lock_irqsave(&cs->statlock, flags); p = tmpbuf; if (head) { p += jiftime(p, jiffies); p += sprintf(p, " %s", head); p += vsprintf(p, fmt, args); *p++ = '\n'; *p = 0; len = p - tmpbuf; p = tmpbuf; } else { p = fmt; len = strlen(fmt); } if (len > HISAX_STATUS_BUFSIZE) { spin_unlock_irqrestore(&cs->statlock, flags); printk(KERN_WARNING "HiSax: status overflow %d/%d\n", len, HISAX_STATUS_BUFSIZE); return; } count = len; i = cs->status_end - cs->status_write + 1; if (i >= len) i = len; len -= i; memcpy(cs->status_write, p, i); cs->status_write += i; if (cs->status_write > cs->status_end) cs->status_write = cs->status_buf; p += i; if (len) { memcpy(cs->status_write, p, len); cs->status_write += len; } #ifdef KERNELSTACK_DEBUG i = (ulong) & len - current->kernel_stack_page; sprintf(tmpbuf, "kstack %s %lx use %ld\n", current->comm, current->kernel_stack_page, i); len = strlen(tmpbuf); for (p = tmpbuf, i = len; i > 0; i--, p++) { *cs->status_write++ = *p; if (cs->status_write > cs->status_end) cs->status_write = cs->status_buf; count++; } #endif spin_unlock_irqrestore(&cs->statlock, flags); if (count) { ic.command = ISDN_STAT_STAVAIL; ic.driver = cs->myid; ic.arg = count; cs->iif.statcallb(&ic); } } void HiSax_putstatus(struct IsdnCardState *cs, char *head, char *fmt, ...) { va_list args; va_start(args, fmt); VHiSax_putstatus(cs, head, fmt, args); va_end(args); } int ll_run(struct IsdnCardState *cs, int addfeatures) { isdn_ctrl ic; ic.driver = cs->myid; ic.command = ISDN_STAT_RUN; cs->iif.features |= addfeatures; cs->iif.statcallb(&ic); return 0; } static void ll_stop(struct IsdnCardState *cs) { isdn_ctrl ic; ic.command = ISDN_STAT_STOP; ic.driver = cs->myid; cs->iif.statcallb(&ic); // CallcFreeChan(cs); } static void ll_unload(struct IsdnCardState *cs) { isdn_ctrl ic; ic.command = ISDN_STAT_UNLOAD; ic.driver = cs->myid; cs->iif.statcallb(&ic); kfree(cs->status_buf); cs->status_read = NULL; cs->status_write = NULL; cs->status_end = NULL; kfree(cs->dlog); cs->dlog = NULL; } static void closecard(int cardnr) { struct IsdnCardState *csta = cards[cardnr].cs; if (csta->bcs->BC_Close != NULL) { csta->bcs->BC_Close(csta->bcs + 1); csta->bcs->BC_Close(csta->bcs); } skb_queue_purge(&csta->rq); skb_queue_purge(&csta->sq); kfree(csta->rcvbuf); csta->rcvbuf = NULL; if (csta->tx_skb) { dev_kfree_skb(csta->tx_skb); csta->tx_skb = NULL; } if (csta->DC_Close != NULL) { csta->DC_Close(csta); } if (csta->cardmsg) csta->cardmsg(csta, CARD_RELEASE, NULL); if (csta->dbusytimer.function != NULL) // FIXME? del_timer(&csta->dbusytimer); ll_unload(csta); } static irqreturn_t card_irq(int intno, void *dev_id) { struct IsdnCardState *cs = dev_id; irqreturn_t ret = cs->irq_func(intno, cs); if (ret == IRQ_HANDLED) cs->irq_cnt++; return ret; } static int init_card(struct IsdnCardState *cs) { int irq_cnt, cnt = 3, ret; if (!cs->irq) { ret = cs->cardmsg(cs, CARD_INIT, NULL); return (ret); } irq_cnt = cs->irq_cnt = 0; printk(KERN_INFO "%s: IRQ %d count %d\n", CardType[cs->typ], cs->irq, irq_cnt); if (request_irq(cs->irq, card_irq, cs->irq_flags, "HiSax", cs)) { printk(KERN_WARNING "HiSax: couldn't get interrupt %d\n", cs->irq); return 1; } while (cnt) { cs->cardmsg(cs, CARD_INIT, NULL); /* Timeout 10ms */ msleep(10); printk(KERN_INFO "%s: IRQ %d count %d\n", CardType[cs->typ], cs->irq, cs->irq_cnt); if (cs->irq_cnt == irq_cnt) { printk(KERN_WARNING "%s: IRQ(%d) getting no interrupts during init %d\n", CardType[cs->typ], cs->irq, 4 - cnt); if (cnt == 1) { free_irq(cs->irq, cs); return 2; } else { cs->cardmsg(cs, CARD_RESET, NULL); cnt--; } } else { cs->cardmsg(cs, CARD_TEST, NULL); return 0; } } return 3; } static int __devinit hisax_cs_setup_card(struct IsdnCard *card) { int ret; switch (card->typ) { #if CARD_TELES0 case ISDN_CTYPE_16_0: case ISDN_CTYPE_8_0: ret = setup_teles0(card); break; #endif #if CARD_TELES3 case ISDN_CTYPE_16_3: case ISDN_CTYPE_PNP: case ISDN_CTYPE_TELESPCMCIA: case ISDN_CTYPE_COMPAQ_ISA: ret = setup_teles3(card); break; #endif #if CARD_S0BOX case ISDN_CTYPE_S0BOX: ret = setup_s0box(card); break; #endif #if CARD_TELESPCI case ISDN_CTYPE_TELESPCI: ret = setup_telespci(card); break; #endif #if CARD_AVM_A1 case ISDN_CTYPE_A1: ret = setup_avm_a1(card); break; #endif #if CARD_AVM_A1_PCMCIA case ISDN_CTYPE_A1_PCMCIA: ret = setup_avm_a1_pcmcia(card); break; #endif #if CARD_FRITZPCI case ISDN_CTYPE_FRITZPCI: ret = setup_avm_pcipnp(card); break; #endif #if CARD_ELSA case ISDN_CTYPE_ELSA: case ISDN_CTYPE_ELSA_PNP: case ISDN_CTYPE_ELSA_PCMCIA: case ISDN_CTYPE_ELSA_PCI: ret = setup_elsa(card); break; #endif #if CARD_IX1MICROR2 case ISDN_CTYPE_IX1MICROR2: ret = setup_ix1micro(card); break; #endif #if CARD_DIEHLDIVA case ISDN_CTYPE_DIEHLDIVA: ret = setup_diva(card); break; #endif #if CARD_ASUSCOM case ISDN_CTYPE_ASUSCOM: ret = setup_asuscom(card); break; #endif #if CARD_TELEINT case ISDN_CTYPE_TELEINT: ret = setup_TeleInt(card); break; #endif #if CARD_SEDLBAUER case ISDN_CTYPE_SEDLBAUER: case ISDN_CTYPE_SEDLBAUER_PCMCIA: case ISDN_CTYPE_SEDLBAUER_FAX: ret = setup_sedlbauer(card); break; #endif #if CARD_SPORTSTER case ISDN_CTYPE_SPORTSTER: ret = setup_sportster(card); break; #endif #if CARD_MIC case ISDN_CTYPE_MIC: ret = setup_mic(card); break; #endif #if CARD_NETJET_S case ISDN_CTYPE_NETJET_S: ret = setup_netjet_s(card); break; #endif #if CARD_HFCS case ISDN_CTYPE_TELES3C: case ISDN_CTYPE_ACERP10: ret = setup_hfcs(card); break; #endif #if CARD_HFC_PCI case ISDN_CTYPE_HFC_PCI: ret = setup_hfcpci(card); break; #endif #if CARD_HFC_SX case ISDN_CTYPE_HFC_SX: ret = setup_hfcsx(card); break; #endif #if CARD_NICCY case ISDN_CTYPE_NICCY: ret = setup_niccy(card); break; #endif #if CARD_ISURF case ISDN_CTYPE_ISURF: ret = setup_isurf(card); break; #endif #if CARD_HSTSAPHIR case ISDN_CTYPE_HSTSAPHIR: ret = setup_saphir(card); break; #endif #if CARD_BKM_A4T case ISDN_CTYPE_BKM_A4T: ret = setup_bkm_a4t(card); break; #endif #if CARD_SCT_QUADRO case ISDN_CTYPE_SCT_QUADRO: ret = setup_sct_quadro(card); break; #endif #if CARD_GAZEL case ISDN_CTYPE_GAZEL: ret = setup_gazel(card); break; #endif #if CARD_W6692 case ISDN_CTYPE_W6692: ret = setup_w6692(card); break; #endif #if CARD_NETJET_U case ISDN_CTYPE_NETJET_U: ret = setup_netjet_u(card); break; #endif #if CARD_FN_ENTERNOW_PCI case ISDN_CTYPE_ENTERNOW: ret = setup_enternow_pci(card); break; #endif case ISDN_CTYPE_DYNAMIC: ret = 2; break; default: printk(KERN_WARNING "HiSax: Support for %s Card not selected\n", CardType[card->typ]); ret = 0; break; } return ret; } static int hisax_cs_new(int cardnr, char *id, struct IsdnCard *card, struct IsdnCardState **cs_out, int *busy_flag, struct module *lockowner) { struct IsdnCardState *cs; *cs_out = NULL; cs = kzalloc(sizeof(struct IsdnCardState), GFP_ATOMIC); if (!cs) { printk(KERN_WARNING "HiSax: No memory for IsdnCardState(card %d)\n", cardnr + 1); goto out; } card->cs = cs; spin_lock_init(&cs->statlock); spin_lock_init(&cs->lock); cs->chanlimit = 2; /* maximum B-channel number */ cs->logecho = 0; /* No echo logging */ cs->cardnr = cardnr; cs->debug = L1_DEB_WARN; cs->HW_Flags = 0; cs->busy_flag = busy_flag; cs->irq_flags = I4L_IRQ_FLAG; #if TEI_PER_CARD if (card->protocol == ISDN_PTYPE_NI1) test_and_set_bit(FLG_TWO_DCHAN, &cs->HW_Flags); #else test_and_set_bit(FLG_TWO_DCHAN, &cs->HW_Flags); #endif cs->protocol = card->protocol; if (card->typ <= 0 || card->typ > ISDN_CTYPE_COUNT) { printk(KERN_WARNING "HiSax: Card Type %d out of range\n", card->typ); goto outf_cs; } if (!(cs->dlog = kmalloc(MAX_DLOG_SPACE, GFP_ATOMIC))) { printk(KERN_WARNING "HiSax: No memory for dlog(card %d)\n", cardnr + 1); goto outf_cs; } if (!(cs->status_buf = kmalloc(HISAX_STATUS_BUFSIZE, GFP_ATOMIC))) { printk(KERN_WARNING "HiSax: No memory for status_buf(card %d)\n", cardnr + 1); goto outf_dlog; } cs->stlist = NULL; cs->status_read = cs->status_buf; cs->status_write = cs->status_buf; cs->status_end = cs->status_buf + HISAX_STATUS_BUFSIZE - 1; cs->typ = card->typ; #ifdef MODULE cs->iif.owner = lockowner; #endif strcpy(cs->iif.id, id); cs->iif.channels = 2; cs->iif.maxbufsize = MAX_DATA_SIZE; cs->iif.hl_hdrlen = MAX_HEADER_LEN; cs->iif.features = ISDN_FEATURE_L2_X75I | ISDN_FEATURE_L2_HDLC | ISDN_FEATURE_L2_HDLC_56K | ISDN_FEATURE_L2_TRANS | ISDN_FEATURE_L3_TRANS | #ifdef CONFIG_HISAX_1TR6 ISDN_FEATURE_P_1TR6 | #endif #ifdef CONFIG_HISAX_EURO ISDN_FEATURE_P_EURO | #endif #ifdef CONFIG_HISAX_NI1 ISDN_FEATURE_P_NI1 | #endif 0; cs->iif.command = HiSax_command; cs->iif.writecmd = NULL; cs->iif.writebuf_skb = HiSax_writebuf_skb; cs->iif.readstat = HiSax_readstatus; register_isdn(&cs->iif); cs->myid = cs->iif.channels; *cs_out = cs; return 1; /* success */ outf_dlog: kfree(cs->dlog); outf_cs: kfree(cs); card->cs = NULL; out: return 0; /* error */ } static int hisax_cs_setup(int cardnr, struct IsdnCard *card, struct IsdnCardState *cs) { int ret; if (!(cs->rcvbuf = kmalloc(MAX_DFRAME_LEN_L1, GFP_ATOMIC))) { printk(KERN_WARNING "HiSax: No memory for isac rcvbuf\n"); ll_unload(cs); goto outf_cs; } cs->rcvidx = 0; cs->tx_skb = NULL; cs->tx_cnt = 0; cs->event = 0; skb_queue_head_init(&cs->rq); skb_queue_head_init(&cs->sq); init_bcstate(cs, 0); init_bcstate(cs, 1); /* init_card only handles interrupts which are not */ /* used here for the loadable driver */ switch (card->typ) { case ISDN_CTYPE_DYNAMIC: ret = 0; break; default: ret = init_card(cs); break; } if (ret) { closecard(cardnr); goto outf_cs; } init_tei(cs, cs->protocol); ret = CallcNewChan(cs); if (ret) { closecard(cardnr); goto outf_cs; } /* ISAR needs firmware download first */ if (!test_bit(HW_ISAR, &cs->HW_Flags)) ll_run(cs, 0); return 1; outf_cs: kfree(cs); card->cs = NULL; return 0; } /* Used from an exported function but calls __devinit functions. * Tell modpost not to warn (__ref) */ static int __ref checkcard(int cardnr, char *id, int *busy_flag, struct module *lockowner, hisax_setup_func_t card_setup) { int ret; struct IsdnCard *card = cards + cardnr; struct IsdnCardState *cs; ret = hisax_cs_new(cardnr, id, card, &cs, busy_flag, lockowner); if (!ret) return 0; printk(KERN_INFO "HiSax: Card %d Protocol %s Id=%s (%d)\n", cardnr + 1, (card->protocol == ISDN_PTYPE_1TR6) ? "1TR6" : (card->protocol == ISDN_PTYPE_EURO) ? "EDSS1" : (card->protocol == ISDN_PTYPE_LEASED) ? "LEASED" : (card->protocol == ISDN_PTYPE_NI1) ? "NI1" : "NONE", cs->iif.id, cs->myid); ret = card_setup(card); if (!ret) { ll_unload(cs); goto outf_cs; } ret = hisax_cs_setup(cardnr, card, cs); goto out; outf_cs: kfree(cs); card->cs = NULL; out: return ret; } static void HiSax_shiftcards(int idx) { int i; for (i = idx; i < (HISAX_MAX_CARDS - 1); i++) memcpy(&cards[i], &cards[i + 1], sizeof(cards[i])); } static int __init HiSax_inithardware(int *busy_flag) { int foundcards = 0; int i = 0; int t = ','; int flg = 0; char *id; char *next_id = HiSax_id; char ids[20]; if (strchr(HiSax_id, ',')) t = ','; else if (strchr(HiSax_id, '%')) t = '%'; while (i < nrcards) { if (cards[i].typ < 1) break; id = next_id; if ((next_id = strchr(id, t))) { *next_id++ = 0; strcpy(ids, id); flg = i + 1; } else { next_id = id; if (flg >= i) strcpy(ids, id); else sprintf(ids, "%s%d", id, i); } if (checkcard(i, ids, busy_flag, THIS_MODULE, hisax_cs_setup_card)) { foundcards++; i++; } else { /* make sure we don't oops the module */ if (cards[i].typ > 0 && cards[i].typ <= ISDN_CTYPE_COUNT) { printk(KERN_WARNING "HiSax: Card %s not installed !\n", CardType[cards[i].typ]); } HiSax_shiftcards(i); nrcards--; } } return foundcards; } void HiSax_closecard(int cardnr) { int i, last = nrcards - 1; if (cardnr > last || cardnr < 0) return; if (cards[cardnr].cs) { ll_stop(cards[cardnr].cs); release_tei(cards[cardnr].cs); CallcFreeChan(cards[cardnr].cs); closecard(cardnr); if (cards[cardnr].cs->irq) free_irq(cards[cardnr].cs->irq, cards[cardnr].cs); kfree((void *) cards[cardnr].cs); cards[cardnr].cs = NULL; } i = cardnr; while (i <= last) { cards[i] = cards[i + 1]; i++; } nrcards--; } void HiSax_reportcard(int cardnr, int sel) { struct IsdnCardState *cs = cards[cardnr].cs; printk(KERN_DEBUG "HiSax: reportcard No %d\n", cardnr + 1); printk(KERN_DEBUG "HiSax: Type %s\n", CardType[cs->typ]); printk(KERN_DEBUG "HiSax: debuglevel %x\n", cs->debug); printk(KERN_DEBUG "HiSax: HiSax_reportcard address 0x%lX\n", (ulong) & HiSax_reportcard); printk(KERN_DEBUG "HiSax: cs 0x%lX\n", (ulong) cs); printk(KERN_DEBUG "HiSax: HW_Flags %lx bc0 flg %lx bc1 flg %lx\n", cs->HW_Flags, cs->bcs[0].Flag, cs->bcs[1].Flag); printk(KERN_DEBUG "HiSax: bcs 0 mode %d ch%d\n", cs->bcs[0].mode, cs->bcs[0].channel); printk(KERN_DEBUG "HiSax: bcs 1 mode %d ch%d\n", cs->bcs[1].mode, cs->bcs[1].channel); #ifdef ERROR_STATISTIC printk(KERN_DEBUG "HiSax: dc errors(rx,crc,tx) %d,%d,%d\n", cs->err_rx, cs->err_crc, cs->err_tx); printk(KERN_DEBUG "HiSax: bc0 errors(inv,rdo,crc,tx) %d,%d,%d,%d\n", cs->bcs[0].err_inv, cs->bcs[0].err_rdo, cs->bcs[0].err_crc, cs->bcs[0].err_tx); printk(KERN_DEBUG "HiSax: bc1 errors(inv,rdo,crc,tx) %d,%d,%d,%d\n", cs->bcs[1].err_inv, cs->bcs[1].err_rdo, cs->bcs[1].err_crc, cs->bcs[1].err_tx); if (sel == 99) { cs->err_rx = 0; cs->err_crc = 0; cs->err_tx = 0; cs->bcs[0].err_inv = 0; cs->bcs[0].err_rdo = 0; cs->bcs[0].err_crc = 0; cs->bcs[0].err_tx = 0; cs->bcs[1].err_inv = 0; cs->bcs[1].err_rdo = 0; cs->bcs[1].err_crc = 0; cs->bcs[1].err_tx = 0; } #endif } static int __init HiSax_init(void) { int i, retval; #ifdef MODULE int j; int nzproto = 0; #endif HiSaxVersion(); retval = CallcNew(); if (retval) goto out; retval = Isdnl3New(); if (retval) goto out_callc; retval = Isdnl2New(); if (retval) goto out_isdnl3; retval = TeiNew(); if (retval) goto out_isdnl2; retval = Isdnl1New(); if (retval) goto out_tei; #ifdef MODULE if (!type[0]) { /* We 'll register drivers later, but init basic functions */ for (i = 0; i < HISAX_MAX_CARDS; i++) cards[i].typ = 0; return 0; } #ifdef CONFIG_HISAX_ELSA if (type[0] == ISDN_CTYPE_ELSA_PCMCIA) { /* we have exported and return in this case */ return 0; } #endif #ifdef CONFIG_HISAX_SEDLBAUER if (type[0] == ISDN_CTYPE_SEDLBAUER_PCMCIA) { /* we have to export and return in this case */ return 0; } #endif #ifdef CONFIG_HISAX_AVM_A1_PCMCIA if (type[0] == ISDN_CTYPE_A1_PCMCIA) { /* we have to export and return in this case */ return 0; } #endif #ifdef CONFIG_HISAX_HFC_SX if (type[0] == ISDN_CTYPE_HFC_SP_PCMCIA) { /* we have to export and return in this case */ return 0; } #endif #endif nrcards = 0; #ifdef MODULE if (id) /* If id= string used */ HiSax_id = id; for (i = j = 0; j < HISAX_MAX_CARDS; i++) { cards[j].typ = type[i]; if (protocol[i]) { cards[j].protocol = protocol[i]; nzproto++; } else { cards[j].protocol = DEFAULT_PROTO; } switch (type[i]) { case ISDN_CTYPE_16_0: cards[j].para[0] = irq[i]; cards[j].para[1] = mem[i]; cards[j].para[2] = io[i]; break; case ISDN_CTYPE_8_0: cards[j].para[0] = irq[i]; cards[j].para[1] = mem[i]; break; #ifdef IO0_IO1 case ISDN_CTYPE_PNP: case ISDN_CTYPE_NICCY: cards[j].para[0] = irq[i]; cards[j].para[1] = io0[i]; cards[j].para[2] = io1[i]; break; case ISDN_CTYPE_COMPAQ_ISA: cards[j].para[0] = irq[i]; cards[j].para[1] = io0[i]; cards[j].para[2] = io1[i]; cards[j].para[3] = io[i]; break; #endif case ISDN_CTYPE_ELSA: case ISDN_CTYPE_HFC_PCI: cards[j].para[0] = io[i]; break; case ISDN_CTYPE_16_3: case ISDN_CTYPE_TELESPCMCIA: case ISDN_CTYPE_A1: case ISDN_CTYPE_A1_PCMCIA: case ISDN_CTYPE_ELSA_PNP: case ISDN_CTYPE_ELSA_PCMCIA: case ISDN_CTYPE_IX1MICROR2: case ISDN_CTYPE_DIEHLDIVA: case ISDN_CTYPE_ASUSCOM: case ISDN_CTYPE_TELEINT: case ISDN_CTYPE_SEDLBAUER: case ISDN_CTYPE_SEDLBAUER_PCMCIA: case ISDN_CTYPE_SEDLBAUER_FAX: case ISDN_CTYPE_SPORTSTER: case ISDN_CTYPE_MIC: case ISDN_CTYPE_TELES3C: case ISDN_CTYPE_ACERP10: case ISDN_CTYPE_S0BOX: case ISDN_CTYPE_FRITZPCI: case ISDN_CTYPE_HSTSAPHIR: case ISDN_CTYPE_GAZEL: case ISDN_CTYPE_HFC_SX: case ISDN_CTYPE_HFC_SP_PCMCIA: cards[j].para[0] = irq[i]; cards[j].para[1] = io[i]; break; case ISDN_CTYPE_ISURF: cards[j].para[0] = irq[i]; cards[j].para[1] = io[i]; cards[j].para[2] = mem[i]; break; case ISDN_CTYPE_ELSA_PCI: case ISDN_CTYPE_NETJET_S: case ISDN_CTYPE_TELESPCI: case ISDN_CTYPE_W6692: case ISDN_CTYPE_NETJET_U: break; case ISDN_CTYPE_BKM_A4T: break; case ISDN_CTYPE_SCT_QUADRO: if (irq[i]) { cards[j].para[0] = irq[i]; } else { /* QUADRO is a 4 BRI card */ cards[j++].para[0] = 1; /* we need to check if further cards can be added */ if (j < HISAX_MAX_CARDS) { cards[j].typ = ISDN_CTYPE_SCT_QUADRO; cards[j].protocol = protocol[i]; cards[j++].para[0] = 2; } if (j < HISAX_MAX_CARDS) { cards[j].typ = ISDN_CTYPE_SCT_QUADRO; cards[j].protocol = protocol[i]; cards[j++].para[0] = 3; } if (j < HISAX_MAX_CARDS) { cards[j].typ = ISDN_CTYPE_SCT_QUADRO; cards[j].protocol = protocol[i]; cards[j].para[0] = 4; } } break; } j++; } if (!nzproto) { printk(KERN_WARNING "HiSax: Warning - no protocol specified\n"); printk(KERN_WARNING "HiSax: using protocol %s\n", DEFAULT_PROTO_NAME); } #endif if (!HiSax_id) HiSax_id = HiSaxID; if (!HiSaxID[0]) strcpy(HiSaxID, "HiSax"); for (i = 0; i < HISAX_MAX_CARDS; i++) if (cards[i].typ > 0) nrcards++; printk(KERN_DEBUG "HiSax: Total %d card%s defined\n", nrcards, (nrcards > 1) ? "s" : ""); /* Install only, if at least one card found */ if (!HiSax_inithardware(NULL)) return -ENODEV; return 0; out_tei: TeiFree(); out_isdnl2: Isdnl2Free(); out_isdnl3: Isdnl3Free(); out_callc: CallcFree(); out: return retval; } static void __exit HiSax_exit(void) { int cardnr = nrcards - 1; while (cardnr >= 0) HiSax_closecard(cardnr--); Isdnl1Free(); TeiFree(); Isdnl2Free(); Isdnl3Free(); CallcFree(); printk(KERN_INFO "HiSax module removed\n"); } #ifdef CONFIG_HOTPLUG int __devinit hisax_init_pcmcia(void *pcm_iob, int *busy_flag, struct IsdnCard *card) { u_char ids[16]; int ret = -1; cards[nrcards] = *card; if (nrcards) sprintf(ids, "HiSax%d", nrcards); else sprintf(ids, "HiSax"); if (!checkcard(nrcards, ids, busy_flag, THIS_MODULE, hisax_cs_setup_card)) goto error; ret = nrcards; nrcards++; error: return ret; } EXPORT_SYMBOL(hisax_init_pcmcia); #endif EXPORT_SYMBOL(HiSax_closecard); #include "hisax_if.h" EXPORT_SYMBOL(hisax_register); EXPORT_SYMBOL(hisax_unregister); static void hisax_d_l1l2(struct hisax_if *ifc, int pr, void *arg); static void hisax_b_l1l2(struct hisax_if *ifc, int pr, void *arg); static void hisax_d_l2l1(struct PStack *st, int pr, void *arg); static void hisax_b_l2l1(struct PStack *st, int pr, void *arg); static int hisax_cardmsg(struct IsdnCardState *cs, int mt, void *arg); static int hisax_bc_setstack(struct PStack *st, struct BCState *bcs); static void hisax_bc_close(struct BCState *bcs); static void hisax_bh(struct work_struct *work); static void EChannel_proc_rcv(struct hisax_d_if *d_if); static int hisax_setup_card_dynamic(struct IsdnCard *card) { return 2; } int hisax_register(struct hisax_d_if *hisax_d_if, struct hisax_b_if *b_if[], char *name, int protocol) { int i, retval; char id[20]; struct IsdnCardState *cs; for (i = 0; i < HISAX_MAX_CARDS; i++) { if (!cards[i].typ) break; } if (i >= HISAX_MAX_CARDS) return -EBUSY; cards[i].typ = ISDN_CTYPE_DYNAMIC; cards[i].protocol = protocol; sprintf(id, "%s%d", name, i); nrcards++; retval = checkcard(i, id, NULL, hisax_d_if->owner, hisax_setup_card_dynamic); if (retval == 0) { // yuck cards[i].typ = 0; nrcards--; return -EINVAL; } cs = cards[i].cs; hisax_d_if->cs = cs; cs->hw.hisax_d_if = hisax_d_if; cs->cardmsg = hisax_cardmsg; INIT_WORK(&cs->tqueue, hisax_bh); cs->channel[0].d_st->l2.l2l1 = hisax_d_l2l1; for (i = 0; i < 2; i++) { cs->bcs[i].BC_SetStack = hisax_bc_setstack; cs->bcs[i].BC_Close = hisax_bc_close; b_if[i]->ifc.l1l2 = hisax_b_l1l2; hisax_d_if->b_if[i] = b_if[i]; } hisax_d_if->ifc.l1l2 = hisax_d_l1l2; skb_queue_head_init(&hisax_d_if->erq); clear_bit(0, &hisax_d_if->ph_state); return 0; } void hisax_unregister(struct hisax_d_if *hisax_d_if) { cards[hisax_d_if->cs->cardnr].typ = 0; HiSax_closecard(hisax_d_if->cs->cardnr); skb_queue_purge(&hisax_d_if->erq); } #include "isdnl1.h" static void hisax_sched_event(struct IsdnCardState *cs, int event) { test_and_set_bit(event, &cs->event); schedule_work(&cs->tqueue); } static void hisax_bh(struct work_struct *work) { struct IsdnCardState *cs = container_of(work, struct IsdnCardState, tqueue); struct PStack *st; int pr; if (test_and_clear_bit(D_RCVBUFREADY, &cs->event)) DChannel_proc_rcv(cs); if (test_and_clear_bit(E_RCVBUFREADY, &cs->event)) EChannel_proc_rcv(cs->hw.hisax_d_if); if (test_and_clear_bit(D_L1STATECHANGE, &cs->event)) { if (test_bit(0, &cs->hw.hisax_d_if->ph_state)) pr = PH_ACTIVATE | INDICATION; else pr = PH_DEACTIVATE | INDICATION; for (st = cs->stlist; st; st = st->next) st->l1.l1l2(st, pr, NULL); } } static void hisax_b_sched_event(struct BCState *bcs, int event) { test_and_set_bit(event, &bcs->event); schedule_work(&bcs->tqueue); } static inline void D_L2L1(struct hisax_d_if *d_if, int pr, void *arg) { struct hisax_if *ifc = (struct hisax_if *) d_if; ifc->l2l1(ifc, pr, arg); } static inline void B_L2L1(struct hisax_b_if *b_if, int pr, void *arg) { struct hisax_if *ifc = (struct hisax_if *) b_if; ifc->l2l1(ifc, pr, arg); } static void hisax_d_l1l2(struct hisax_if *ifc, int pr, void *arg) { struct hisax_d_if *d_if = (struct hisax_d_if *) ifc; struct IsdnCardState *cs = d_if->cs; struct PStack *st; struct sk_buff *skb; switch (pr) { case PH_ACTIVATE | INDICATION: set_bit(0, &d_if->ph_state); hisax_sched_event(cs, D_L1STATECHANGE); break; case PH_DEACTIVATE | INDICATION: clear_bit(0, &d_if->ph_state); hisax_sched_event(cs, D_L1STATECHANGE); break; case PH_DATA | INDICATION: skb_queue_tail(&cs->rq, arg); hisax_sched_event(cs, D_RCVBUFREADY); break; case PH_DATA | CONFIRM: skb = skb_dequeue(&cs->sq); if (skb) { D_L2L1(d_if, PH_DATA | REQUEST, skb); break; } clear_bit(FLG_L1_DBUSY, &cs->HW_Flags); for (st = cs->stlist; st; st = st->next) { if (test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags)) { st->l1.l1l2(st, PH_PULL | CONFIRM, NULL); break; } } break; case PH_DATA_E | INDICATION: skb_queue_tail(&d_if->erq, arg); hisax_sched_event(cs, E_RCVBUFREADY); break; default: printk("pr %#x\n", pr); break; } } static void hisax_b_l1l2(struct hisax_if *ifc, int pr, void *arg) { struct hisax_b_if *b_if = (struct hisax_b_if *) ifc; struct BCState *bcs = b_if->bcs; struct PStack *st = bcs->st; struct sk_buff *skb; // FIXME use isdnl1? switch (pr) { case PH_ACTIVATE | INDICATION: st->l1.l1l2(st, pr, NULL); break; case PH_DEACTIVATE | INDICATION: st->l1.l1l2(st, pr, NULL); clear_bit(BC_FLG_BUSY, &bcs->Flag); skb_queue_purge(&bcs->squeue); bcs->hw.b_if = NULL; break; case PH_DATA | INDICATION: skb_queue_tail(&bcs->rqueue, arg); hisax_b_sched_event(bcs, B_RCVBUFREADY); break; case PH_DATA | CONFIRM: bcs->tx_cnt -= (long)arg; if (test_bit(FLG_LLI_L1WAKEUP, &bcs->st->lli.flag)) { u_long flags; spin_lock_irqsave(&bcs->aclock, flags); bcs->ackcnt += (long)arg; spin_unlock_irqrestore(&bcs->aclock, flags); schedule_event(bcs, B_ACKPENDING); } skb = skb_dequeue(&bcs->squeue); if (skb) { B_L2L1(b_if, PH_DATA | REQUEST, skb); break; } clear_bit(BC_FLG_BUSY, &bcs->Flag); if (test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags)) { st->l1.l1l2(st, PH_PULL | CONFIRM, NULL); } break; default: printk("hisax_b_l1l2 pr %#x\n", pr); break; } } static void hisax_d_l2l1(struct PStack *st, int pr, void *arg) { struct IsdnCardState *cs = st->l1.hardware; struct hisax_d_if *hisax_d_if = cs->hw.hisax_d_if; struct sk_buff *skb = arg; switch (pr) { case PH_DATA | REQUEST: case PH_PULL | INDICATION: if (cs->debug & DEB_DLOG_HEX) LogFrame(cs, skb->data, skb->len); if (cs->debug & DEB_DLOG_VERBOSE) dlogframe(cs, skb, 0); Logl2Frame(cs, skb, "PH_DATA_REQ", 0); // FIXME lock? if (!test_and_set_bit(FLG_L1_DBUSY, &cs->HW_Flags)) D_L2L1(hisax_d_if, PH_DATA | REQUEST, skb); else skb_queue_tail(&cs->sq, skb); break; case PH_PULL | REQUEST: if (!test_bit(FLG_L1_DBUSY, &cs->HW_Flags)) st->l1.l1l2(st, PH_PULL | CONFIRM, NULL); else set_bit(FLG_L1_PULL_REQ, &st->l1.Flags); break; default: D_L2L1(hisax_d_if, pr, arg); break; } } static int hisax_cardmsg(struct IsdnCardState *cs, int mt, void *arg) { return 0; } static void hisax_b_l2l1(struct PStack *st, int pr, void *arg) { struct BCState *bcs = st->l1.bcs; struct hisax_b_if *b_if = bcs->hw.b_if; switch (pr) { case PH_ACTIVATE | REQUEST: B_L2L1(b_if, pr, (void *)(unsigned long)st->l1.mode); break; case PH_DATA | REQUEST: case PH_PULL | INDICATION: // FIXME lock? if (!test_and_set_bit(BC_FLG_BUSY, &bcs->Flag)) { B_L2L1(b_if, PH_DATA | REQUEST, arg); } else { skb_queue_tail(&bcs->squeue, arg); } break; case PH_PULL | REQUEST: if (!test_bit(BC_FLG_BUSY, &bcs->Flag)) st->l1.l1l2(st, PH_PULL | CONFIRM, NULL); else set_bit(FLG_L1_PULL_REQ, &st->l1.Flags); break; case PH_DEACTIVATE | REQUEST: test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag); skb_queue_purge(&bcs->squeue); default: B_L2L1(b_if, pr, arg); break; } } static int hisax_bc_setstack(struct PStack *st, struct BCState *bcs) { struct IsdnCardState *cs = st->l1.hardware; struct hisax_d_if *hisax_d_if = cs->hw.hisax_d_if; bcs->channel = st->l1.bc; bcs->hw.b_if = hisax_d_if->b_if[st->l1.bc]; hisax_d_if->b_if[st->l1.bc]->bcs = bcs; st->l1.bcs = bcs; st->l2.l2l1 = hisax_b_l2l1; setstack_manager(st); bcs->st = st; setstack_l1_B(st); skb_queue_head_init(&bcs->rqueue); skb_queue_head_init(&bcs->squeue); return 0; } static void hisax_bc_close(struct BCState *bcs) { struct hisax_b_if *b_if = bcs->hw.b_if; if (b_if) B_L2L1(b_if, PH_DEACTIVATE | REQUEST, NULL); } static void EChannel_proc_rcv(struct hisax_d_if *d_if) { struct IsdnCardState *cs = d_if->cs; u_char *ptr; struct sk_buff *skb; while ((skb = skb_dequeue(&d_if->erq)) != NULL) { if (cs->debug & DEB_DLOG_HEX) { ptr = cs->dlog; if ((skb->len) < MAX_DLOG_SPACE / 3 - 10) { *ptr++ = 'E'; *ptr++ = 'C'; *ptr++ = 'H'; *ptr++ = 'O'; *ptr++ = ':'; ptr += QuickHex(ptr, skb->data, skb->len); ptr--; *ptr++ = '\n'; *ptr = 0; HiSax_putstatus(cs, NULL, cs->dlog); } else HiSax_putstatus(cs, "LogEcho: ", "warning Frame too big (%d)", skb->len); } dev_kfree_skb_any(skb); } } #ifdef CONFIG_PCI #include <linux/pci.h> static struct pci_device_id hisax_pci_tbl[] __devinitdata __used = { #ifdef CONFIG_HISAX_FRITZPCI {PCI_VDEVICE(AVM, PCI_DEVICE_ID_AVM_A1) }, #endif #ifdef CONFIG_HISAX_DIEHLDIVA {PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_DIVA20) }, {PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_DIVA20_U) }, {PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_DIVA201) }, /*##########################################################################*/ {PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_DIVA202) }, /*##########################################################################*/ #endif #ifdef CONFIG_HISAX_ELSA {PCI_VDEVICE(ELSA, PCI_DEVICE_ID_ELSA_MICROLINK) }, {PCI_VDEVICE(ELSA, PCI_DEVICE_ID_ELSA_QS3000) }, #endif #ifdef CONFIG_HISAX_GAZEL {PCI_VDEVICE(PLX, PCI_DEVICE_ID_PLX_R685) }, {PCI_VDEVICE(PLX, PCI_DEVICE_ID_PLX_R753) }, {PCI_VDEVICE(PLX, PCI_DEVICE_ID_PLX_DJINN_ITOO) }, {PCI_VDEVICE(PLX, PCI_DEVICE_ID_PLX_OLITEC) }, #endif #ifdef CONFIG_HISAX_SCT_QUADRO {PCI_VDEVICE(PLX, PCI_DEVICE_ID_PLX_9050) }, #endif #ifdef CONFIG_HISAX_NICCY {PCI_VDEVICE(SATSAGEM, PCI_DEVICE_ID_SATSAGEM_NICCY) }, #endif #ifdef CONFIG_HISAX_SEDLBAUER {PCI_VDEVICE(TIGERJET, PCI_DEVICE_ID_TIGERJET_100) }, #endif #if defined(CONFIG_HISAX_NETJET) || defined(CONFIG_HISAX_NETJET_U) {PCI_VDEVICE(TIGERJET, PCI_DEVICE_ID_TIGERJET_300) }, #endif #if defined(CONFIG_HISAX_TELESPCI) || defined(CONFIG_HISAX_SCT_QUADRO) {PCI_VDEVICE(ZORAN, PCI_DEVICE_ID_ZORAN_36120) }, #endif #ifdef CONFIG_HISAX_W6692 {PCI_VDEVICE(DYNALINK, PCI_DEVICE_ID_DYNALINK_IS64PH) }, {PCI_VDEVICE(WINBOND2, PCI_DEVICE_ID_WINBOND2_6692) }, #endif #ifdef CONFIG_HISAX_HFC_PCI {PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_2BD0) }, {PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B000) }, {PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B006) }, {PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B007) }, {PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B008) }, {PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B009) }, {PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B00A) }, {PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B00B) }, {PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B00C) }, {PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B100) }, {PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B700) }, {PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B701) }, {PCI_VDEVICE(ABOCOM, PCI_DEVICE_ID_ABOCOM_2BD1) }, {PCI_VDEVICE(ASUSTEK, PCI_DEVICE_ID_ASUSTEK_0675) }, {PCI_VDEVICE(BERKOM, PCI_DEVICE_ID_BERKOM_T_CONCEPT) }, {PCI_VDEVICE(BERKOM, PCI_DEVICE_ID_BERKOM_A1T) }, {PCI_VDEVICE(ANIGMA, PCI_DEVICE_ID_ANIGMA_MC145575) }, {PCI_VDEVICE(ZOLTRIX, PCI_DEVICE_ID_ZOLTRIX_2BD0) }, {PCI_VDEVICE(DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_E) }, {PCI_VDEVICE(DIGI, PCI_DEVICE_ID_DIGI_DF_M_E) }, {PCI_VDEVICE(DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_A) }, {PCI_VDEVICE(DIGI, PCI_DEVICE_ID_DIGI_DF_M_A) }, #endif { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(pci, hisax_pci_tbl); #endif /* CONFIG_PCI */ module_init(HiSax_init); module_exit(HiSax_exit); EXPORT_SYMBOL(FsmNew); EXPORT_SYMBOL(FsmFree); EXPORT_SYMBOL(FsmEvent); EXPORT_SYMBOL(FsmChangeState); EXPORT_SYMBOL(FsmInitTimer); EXPORT_SYMBOL(FsmDelTimer); EXPORT_SYMBOL(FsmRestartTimer);
gpl-2.0
AscendG630-DEV/android_kernel_g630u20
drivers/rtc/rtc-vr41xx.c
4988
9503
/* * Driver for NEC VR4100 series Real Time Clock unit. * * Copyright (C) 2003-2008 Yoichi Yuasa <yuasa@linux-mips.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/err.h> #include <linux/fs.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/rtc.h> #include <linux/spinlock.h> #include <linux/types.h> #include <linux/log2.h> #include <asm/div64.h> #include <asm/io.h> #include <asm/uaccess.h> MODULE_AUTHOR("Yoichi Yuasa <yuasa@linux-mips.org>"); MODULE_DESCRIPTION("NEC VR4100 series RTC driver"); MODULE_LICENSE("GPL v2"); /* RTC 1 registers */ #define ETIMELREG 0x00 #define ETIMEMREG 0x02 #define ETIMEHREG 0x04 /* RFU */ #define ECMPLREG 0x08 #define ECMPMREG 0x0a #define ECMPHREG 0x0c /* RFU */ #define RTCL1LREG 0x10 #define RTCL1HREG 0x12 #define RTCL1CNTLREG 0x14 #define RTCL1CNTHREG 0x16 #define RTCL2LREG 0x18 #define RTCL2HREG 0x1a #define RTCL2CNTLREG 0x1c #define RTCL2CNTHREG 0x1e /* RTC 2 registers */ #define TCLKLREG 0x00 #define TCLKHREG 0x02 #define TCLKCNTLREG 0x04 #define TCLKCNTHREG 0x06 /* RFU */ #define RTCINTREG 0x1e #define TCLOCK_INT 0x08 #define RTCLONG2_INT 0x04 #define RTCLONG1_INT 0x02 #define ELAPSEDTIME_INT 0x01 #define RTC_FREQUENCY 32768 #define MAX_PERIODIC_RATE 6553 static void __iomem *rtc1_base; static void __iomem *rtc2_base; #define rtc1_read(offset) readw(rtc1_base + (offset)) #define rtc1_write(offset, value) writew((value), rtc1_base + (offset)) #define rtc2_read(offset) readw(rtc2_base + (offset)) #define rtc2_write(offset, value) writew((value), rtc2_base + (offset)) static unsigned long epoch = 1970; /* Jan 1 1970 00:00:00 */ static DEFINE_SPINLOCK(rtc_lock); static char rtc_name[] = "RTC"; static unsigned long periodic_count; static unsigned int alarm_enabled; static int aie_irq; static int pie_irq; static inline unsigned long read_elapsed_second(void) { unsigned long first_low, first_mid, first_high; unsigned long second_low, second_mid, second_high; do { first_low = rtc1_read(ETIMELREG); first_mid = rtc1_read(ETIMEMREG); first_high = rtc1_read(ETIMEHREG); second_low = rtc1_read(ETIMELREG); second_mid = rtc1_read(ETIMEMREG); second_high = rtc1_read(ETIMEHREG); } while (first_low != second_low || first_mid != second_mid || first_high != second_high); return (first_high << 17) | (first_mid << 1) | (first_low >> 15); } static inline void write_elapsed_second(unsigned long sec) { spin_lock_irq(&rtc_lock); rtc1_write(ETIMELREG, (uint16_t)(sec << 15)); rtc1_write(ETIMEMREG, (uint16_t)(sec >> 1)); rtc1_write(ETIMEHREG, (uint16_t)(sec >> 17)); spin_unlock_irq(&rtc_lock); } static void vr41xx_rtc_release(struct device *dev) { spin_lock_irq(&rtc_lock); rtc1_write(ECMPLREG, 0); rtc1_write(ECMPMREG, 0); rtc1_write(ECMPHREG, 0); rtc1_write(RTCL1LREG, 0); rtc1_write(RTCL1HREG, 0); spin_unlock_irq(&rtc_lock); disable_irq(aie_irq); disable_irq(pie_irq); } static int vr41xx_rtc_read_time(struct device *dev, struct rtc_time *time) { unsigned long epoch_sec, elapsed_sec; epoch_sec = mktime(epoch, 1, 1, 0, 0, 0); elapsed_sec = read_elapsed_second(); rtc_time_to_tm(epoch_sec + elapsed_sec, time); return 0; } static int vr41xx_rtc_set_time(struct device *dev, struct rtc_time *time) { unsigned long epoch_sec, current_sec; epoch_sec = mktime(epoch, 1, 1, 0, 0, 0); current_sec = mktime(time->tm_year + 1900, time->tm_mon + 1, time->tm_mday, time->tm_hour, time->tm_min, time->tm_sec); write_elapsed_second(current_sec - epoch_sec); return 0; } static int vr41xx_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *wkalrm) { unsigned long low, mid, high; struct rtc_time *time = &wkalrm->time; spin_lock_irq(&rtc_lock); low = rtc1_read(ECMPLREG); mid = rtc1_read(ECMPMREG); high = rtc1_read(ECMPHREG); wkalrm->enabled = alarm_enabled; spin_unlock_irq(&rtc_lock); rtc_time_to_tm((high << 17) | (mid << 1) | (low >> 15), time); return 0; } static int vr41xx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *wkalrm) { unsigned long alarm_sec; struct rtc_time *time = &wkalrm->time; alarm_sec = mktime(time->tm_year + 1900, time->tm_mon + 1, time->tm_mday, time->tm_hour, time->tm_min, time->tm_sec); spin_lock_irq(&rtc_lock); if (alarm_enabled) disable_irq(aie_irq); rtc1_write(ECMPLREG, (uint16_t)(alarm_sec << 15)); rtc1_write(ECMPMREG, (uint16_t)(alarm_sec >> 1)); rtc1_write(ECMPHREG, (uint16_t)(alarm_sec >> 17)); if (wkalrm->enabled) enable_irq(aie_irq); alarm_enabled = wkalrm->enabled; spin_unlock_irq(&rtc_lock); return 0; } static int vr41xx_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) { switch (cmd) { case RTC_EPOCH_READ: return put_user(epoch, (unsigned long __user *)arg); case RTC_EPOCH_SET: /* Doesn't support before 1900 */ if (arg < 1900) return -EINVAL; epoch = arg; break; default: return -ENOIOCTLCMD; } return 0; } static int vr41xx_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) { spin_lock_irq(&rtc_lock); if (enabled) { if (!alarm_enabled) { enable_irq(aie_irq); alarm_enabled = 1; } } else { if (alarm_enabled) { disable_irq(aie_irq); alarm_enabled = 0; } } spin_unlock_irq(&rtc_lock); return 0; } static irqreturn_t elapsedtime_interrupt(int irq, void *dev_id) { struct platform_device *pdev = (struct platform_device *)dev_id; struct rtc_device *rtc = platform_get_drvdata(pdev); rtc2_write(RTCINTREG, ELAPSEDTIME_INT); rtc_update_irq(rtc, 1, RTC_AF); return IRQ_HANDLED; } static irqreturn_t rtclong1_interrupt(int irq, void *dev_id) { struct platform_device *pdev = (struct platform_device *)dev_id; struct rtc_device *rtc = platform_get_drvdata(pdev); unsigned long count = periodic_count; rtc2_write(RTCINTREG, RTCLONG1_INT); rtc1_write(RTCL1LREG, count); rtc1_write(RTCL1HREG, count >> 16); rtc_update_irq(rtc, 1, RTC_PF); return IRQ_HANDLED; } static const struct rtc_class_ops vr41xx_rtc_ops = { .release = vr41xx_rtc_release, .ioctl = vr41xx_rtc_ioctl, .read_time = vr41xx_rtc_read_time, .set_time = vr41xx_rtc_set_time, .read_alarm = vr41xx_rtc_read_alarm, .set_alarm = vr41xx_rtc_set_alarm, }; static int __devinit rtc_probe(struct platform_device *pdev) { struct resource *res; struct rtc_device *rtc; int retval; if (pdev->num_resources != 4) return -EBUSY; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -EBUSY; rtc1_base = ioremap(res->start, resource_size(res)); if (!rtc1_base) return -EBUSY; res = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (!res) { retval = -EBUSY; goto err_rtc1_iounmap; } rtc2_base = ioremap(res->start, resource_size(res)); if (!rtc2_base) { retval = -EBUSY; goto err_rtc1_iounmap; } rtc = rtc_device_register(rtc_name, &pdev->dev, &vr41xx_rtc_ops, THIS_MODULE); if (IS_ERR(rtc)) { retval = PTR_ERR(rtc); goto err_iounmap_all; } rtc->max_user_freq = MAX_PERIODIC_RATE; spin_lock_irq(&rtc_lock); rtc1_write(ECMPLREG, 0); rtc1_write(ECMPMREG, 0); rtc1_write(ECMPHREG, 0); rtc1_write(RTCL1LREG, 0); rtc1_write(RTCL1HREG, 0); spin_unlock_irq(&rtc_lock); aie_irq = platform_get_irq(pdev, 0); if (aie_irq <= 0) { retval = -EBUSY; goto err_device_unregister; } retval = request_irq(aie_irq, elapsedtime_interrupt, 0, "elapsed_time", pdev); if (retval < 0) goto err_device_unregister; pie_irq = platform_get_irq(pdev, 1); if (pie_irq <= 0) goto err_free_irq; retval = request_irq(pie_irq, rtclong1_interrupt, 0, "rtclong1", pdev); if (retval < 0) goto err_free_irq; platform_set_drvdata(pdev, rtc); disable_irq(aie_irq); disable_irq(pie_irq); printk(KERN_INFO "rtc: Real Time Clock of NEC VR4100 series\n"); return 0; err_free_irq: free_irq(aie_irq, pdev); err_device_unregister: rtc_device_unregister(rtc); err_iounmap_all: iounmap(rtc2_base); rtc2_base = NULL; err_rtc1_iounmap: iounmap(rtc1_base); rtc1_base = NULL; return retval; } static int __devexit rtc_remove(struct platform_device *pdev) { struct rtc_device *rtc; rtc = platform_get_drvdata(pdev); if (rtc) rtc_device_unregister(rtc); platform_set_drvdata(pdev, NULL); free_irq(aie_irq, pdev); free_irq(pie_irq, pdev); if (rtc1_base) iounmap(rtc1_base); if (rtc2_base) iounmap(rtc2_base); return 0; } /* work with hotplug and coldplug */ MODULE_ALIAS("platform:RTC"); static struct platform_driver rtc_platform_driver = { .probe = rtc_probe, .remove = __devexit_p(rtc_remove), .driver = { .name = rtc_name, .owner = THIS_MODULE, }, }; module_platform_driver(rtc_platform_driver);
gpl-2.0
schuhumi/i9100-proper-linux-kernel
net/rxrpc/af_rxrpc.c
6268
20966
/* AF_RXRPC implementation * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/net.h> #include <linux/slab.h> #include <linux/skbuff.h> #include <linux/poll.h> #include <linux/proc_fs.h> #include <linux/key-type.h> #include <net/net_namespace.h> #include <net/sock.h> #include <net/af_rxrpc.h> #include "ar-internal.h" MODULE_DESCRIPTION("RxRPC network protocol"); MODULE_AUTHOR("Red Hat, Inc."); MODULE_LICENSE("GPL"); MODULE_ALIAS_NETPROTO(PF_RXRPC); unsigned rxrpc_debug; // = RXRPC_DEBUG_KPROTO; module_param_named(debug, rxrpc_debug, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(debug, "RxRPC debugging mask"); static int sysctl_rxrpc_max_qlen __read_mostly = 10; static struct proto rxrpc_proto; static const struct proto_ops rxrpc_rpc_ops; /* local epoch for detecting local-end reset */ __be32 rxrpc_epoch; /* current debugging ID */ atomic_t rxrpc_debug_id; /* count of skbs currently in use */ atomic_t rxrpc_n_skbs; struct workqueue_struct *rxrpc_workqueue; static void rxrpc_sock_destructor(struct sock *); /* * see if an RxRPC socket is currently writable */ static inline int rxrpc_writable(struct sock *sk) { return atomic_read(&sk->sk_wmem_alloc) < (size_t) sk->sk_sndbuf; } /* * wait for write bufferage to become available */ static void rxrpc_write_space(struct sock *sk) { _enter("%p", sk); rcu_read_lock(); if (rxrpc_writable(sk)) { struct socket_wq *wq = rcu_dereference(sk->sk_wq); if (wq_has_sleeper(wq)) wake_up_interruptible(&wq->wait); sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); } rcu_read_unlock(); } /* * validate an RxRPC address */ static int rxrpc_validate_address(struct rxrpc_sock *rx, struct sockaddr_rxrpc *srx, int len) { if (len < sizeof(struct sockaddr_rxrpc)) return -EINVAL; if (srx->srx_family != AF_RXRPC) return -EAFNOSUPPORT; if (srx->transport_type != SOCK_DGRAM) return -ESOCKTNOSUPPORT; len -= offsetof(struct sockaddr_rxrpc, transport); if (srx->transport_len < sizeof(sa_family_t) || srx->transport_len > len) return -EINVAL; if (srx->transport.family != rx->proto) return -EAFNOSUPPORT; switch (srx->transport.family) { case AF_INET: _debug("INET: %x @ %pI4", ntohs(srx->transport.sin.sin_port), &srx->transport.sin.sin_addr); if (srx->transport_len > 8) memset((void *)&srx->transport + 8, 0, srx->transport_len - 8); break; case AF_INET6: default: return -EAFNOSUPPORT; } return 0; } /* * bind a local address to an RxRPC socket */ static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len) { struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *) saddr; struct sock *sk = sock->sk; struct rxrpc_local *local; struct rxrpc_sock *rx = rxrpc_sk(sk), *prx; __be16 service_id; int ret; _enter("%p,%p,%d", rx, saddr, len); ret = rxrpc_validate_address(rx, srx, len); if (ret < 0) goto error; lock_sock(&rx->sk); if (rx->sk.sk_state != RXRPC_UNCONNECTED) { ret = -EINVAL; goto error_unlock; } memcpy(&rx->srx, srx, sizeof(rx->srx)); /* find a local transport endpoint if we don't have one already */ local = rxrpc_lookup_local(&rx->srx); if (IS_ERR(local)) { ret = PTR_ERR(local); goto error_unlock; } rx->local = local; if (srx->srx_service) { service_id = htons(srx->srx_service); write_lock_bh(&local->services_lock); list_for_each_entry(prx, &local->services, listen_link) { if (prx->service_id == service_id) goto service_in_use; } rx->service_id = service_id; list_add_tail(&rx->listen_link, &local->services); write_unlock_bh(&local->services_lock); rx->sk.sk_state = RXRPC_SERVER_BOUND; } else { rx->sk.sk_state = RXRPC_CLIENT_BOUND; } release_sock(&rx->sk); _leave(" = 0"); return 0; service_in_use: ret = -EADDRINUSE; write_unlock_bh(&local->services_lock); error_unlock: release_sock(&rx->sk); error: _leave(" = %d", ret); return ret; } /* * set the number of pending calls permitted on a listening socket */ static int rxrpc_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; struct rxrpc_sock *rx = rxrpc_sk(sk); int ret; _enter("%p,%d", rx, backlog); lock_sock(&rx->sk); switch (rx->sk.sk_state) { case RXRPC_UNCONNECTED: ret = -EADDRNOTAVAIL; break; case RXRPC_CLIENT_BOUND: case RXRPC_CLIENT_CONNECTED: default: ret = -EBUSY; break; case RXRPC_SERVER_BOUND: ASSERT(rx->local != NULL); sk->sk_max_ack_backlog = backlog; rx->sk.sk_state = RXRPC_SERVER_LISTENING; ret = 0; break; } release_sock(&rx->sk); _leave(" = %d", ret); return ret; } /* * find a transport by address */ static struct rxrpc_transport *rxrpc_name_to_transport(struct socket *sock, struct sockaddr *addr, int addr_len, int flags, gfp_t gfp) { struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *) addr; struct rxrpc_transport *trans; struct rxrpc_sock *rx = rxrpc_sk(sock->sk); struct rxrpc_peer *peer; _enter("%p,%p,%d,%d", rx, addr, addr_len, flags); ASSERT(rx->local != NULL); ASSERT(rx->sk.sk_state > RXRPC_UNCONNECTED); if (rx->srx.transport_type != srx->transport_type) return ERR_PTR(-ESOCKTNOSUPPORT); if (rx->srx.transport.family != srx->transport.family) return ERR_PTR(-EAFNOSUPPORT); /* find a remote transport endpoint from the local one */ peer = rxrpc_get_peer(srx, gfp); if (IS_ERR(peer)) return ERR_CAST(peer); /* find a transport */ trans = rxrpc_get_transport(rx->local, peer, gfp); rxrpc_put_peer(peer); _leave(" = %p", trans); return trans; } /** * rxrpc_kernel_begin_call - Allow a kernel service to begin a call * @sock: The socket on which to make the call * @srx: The address of the peer to contact (defaults to socket setting) * @key: The security context to use (defaults to socket setting) * @user_call_ID: The ID to use * * Allow a kernel service to begin a call on the nominated socket. This just * sets up all the internal tracking structures and allocates connection and * call IDs as appropriate. The call to be used is returned. * * The default socket destination address and security may be overridden by * supplying @srx and @key. */ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock, struct sockaddr_rxrpc *srx, struct key *key, unsigned long user_call_ID, gfp_t gfp) { struct rxrpc_conn_bundle *bundle; struct rxrpc_transport *trans; struct rxrpc_call *call; struct rxrpc_sock *rx = rxrpc_sk(sock->sk); __be16 service_id; _enter(",,%x,%lx", key_serial(key), user_call_ID); lock_sock(&rx->sk); if (srx) { trans = rxrpc_name_to_transport(sock, (struct sockaddr *) srx, sizeof(*srx), 0, gfp); if (IS_ERR(trans)) { call = ERR_CAST(trans); trans = NULL; goto out_notrans; } } else { trans = rx->trans; if (!trans) { call = ERR_PTR(-ENOTCONN); goto out_notrans; } atomic_inc(&trans->usage); } service_id = rx->service_id; if (srx) service_id = htons(srx->srx_service); if (!key) key = rx->key; if (key && !key->payload.data) key = NULL; /* a no-security key */ bundle = rxrpc_get_bundle(rx, trans, key, service_id, gfp); if (IS_ERR(bundle)) { call = ERR_CAST(bundle); goto out; } call = rxrpc_get_client_call(rx, trans, bundle, user_call_ID, true, gfp); rxrpc_put_bundle(trans, bundle); out: rxrpc_put_transport(trans); out_notrans: release_sock(&rx->sk); _leave(" = %p", call); return call; } EXPORT_SYMBOL(rxrpc_kernel_begin_call); /** * rxrpc_kernel_end_call - Allow a kernel service to end a call it was using * @call: The call to end * * Allow a kernel service to end a call it was using. The call must be * complete before this is called (the call should be aborted if necessary). */ void rxrpc_kernel_end_call(struct rxrpc_call *call) { _enter("%d{%d}", call->debug_id, atomic_read(&call->usage)); rxrpc_remove_user_ID(call->socket, call); rxrpc_put_call(call); } EXPORT_SYMBOL(rxrpc_kernel_end_call); /** * rxrpc_kernel_intercept_rx_messages - Intercept received RxRPC messages * @sock: The socket to intercept received messages on * @interceptor: The function to pass the messages to * * Allow a kernel service to intercept messages heading for the Rx queue on an * RxRPC socket. They get passed to the specified function instead. * @interceptor should free the socket buffers it is given. @interceptor is * called with the socket receive queue spinlock held and softirqs disabled - * this ensures that the messages will be delivered in the right order. */ void rxrpc_kernel_intercept_rx_messages(struct socket *sock, rxrpc_interceptor_t interceptor) { struct rxrpc_sock *rx = rxrpc_sk(sock->sk); _enter(""); rx->interceptor = interceptor; } EXPORT_SYMBOL(rxrpc_kernel_intercept_rx_messages); /* * connect an RxRPC socket * - this just targets it at a specific destination; no actual connection * negotiation takes place */ static int rxrpc_connect(struct socket *sock, struct sockaddr *addr, int addr_len, int flags) { struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *) addr; struct sock *sk = sock->sk; struct rxrpc_transport *trans; struct rxrpc_local *local; struct rxrpc_sock *rx = rxrpc_sk(sk); int ret; _enter("%p,%p,%d,%d", rx, addr, addr_len, flags); ret = rxrpc_validate_address(rx, srx, addr_len); if (ret < 0) { _leave(" = %d [bad addr]", ret); return ret; } lock_sock(&rx->sk); switch (rx->sk.sk_state) { case RXRPC_UNCONNECTED: /* find a local transport endpoint if we don't have one already */ ASSERTCMP(rx->local, ==, NULL); rx->srx.srx_family = AF_RXRPC; rx->srx.srx_service = 0; rx->srx.transport_type = srx->transport_type; rx->srx.transport_len = sizeof(sa_family_t); rx->srx.transport.family = srx->transport.family; local = rxrpc_lookup_local(&rx->srx); if (IS_ERR(local)) { release_sock(&rx->sk); return PTR_ERR(local); } rx->local = local; rx->sk.sk_state = RXRPC_CLIENT_BOUND; case RXRPC_CLIENT_BOUND: break; case RXRPC_CLIENT_CONNECTED: release_sock(&rx->sk); return -EISCONN; default: release_sock(&rx->sk); return -EBUSY; /* server sockets can't connect as well */ } trans = rxrpc_name_to_transport(sock, addr, addr_len, flags, GFP_KERNEL); if (IS_ERR(trans)) { release_sock(&rx->sk); _leave(" = %ld", PTR_ERR(trans)); return PTR_ERR(trans); } rx->trans = trans; rx->service_id = htons(srx->srx_service); rx->sk.sk_state = RXRPC_CLIENT_CONNECTED; release_sock(&rx->sk); return 0; } /* * send a message through an RxRPC socket * - in a client this does a number of things: * - finds/sets up a connection for the security specified (if any) * - initiates a call (ID in control data) * - ends the request phase of a call (if MSG_MORE is not set) * - sends a call data packet * - may send an abort (abort code in control data) */ static int rxrpc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, size_t len) { struct rxrpc_transport *trans; struct rxrpc_sock *rx = rxrpc_sk(sock->sk); int ret; _enter(",{%d},,%zu", rx->sk.sk_state, len); if (m->msg_flags & MSG_OOB) return -EOPNOTSUPP; if (m->msg_name) { ret = rxrpc_validate_address(rx, m->msg_name, m->msg_namelen); if (ret < 0) { _leave(" = %d [bad addr]", ret); return ret; } } trans = NULL; lock_sock(&rx->sk); if (m->msg_name) { ret = -EISCONN; trans = rxrpc_name_to_transport(sock, m->msg_name, m->msg_namelen, 0, GFP_KERNEL); if (IS_ERR(trans)) { ret = PTR_ERR(trans); trans = NULL; goto out; } } else { trans = rx->trans; if (trans) atomic_inc(&trans->usage); } switch (rx->sk.sk_state) { case RXRPC_SERVER_LISTENING: if (!m->msg_name) { ret = rxrpc_server_sendmsg(iocb, rx, m, len); break; } case RXRPC_SERVER_BOUND: case RXRPC_CLIENT_BOUND: if (!m->msg_name) { ret = -ENOTCONN; break; } case RXRPC_CLIENT_CONNECTED: ret = rxrpc_client_sendmsg(iocb, rx, trans, m, len); break; default: ret = -ENOTCONN; break; } out: release_sock(&rx->sk); if (trans) rxrpc_put_transport(trans); _leave(" = %d", ret); return ret; } /* * set RxRPC socket options */ static int rxrpc_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct rxrpc_sock *rx = rxrpc_sk(sock->sk); unsigned min_sec_level; int ret; _enter(",%d,%d,,%d", level, optname, optlen); lock_sock(&rx->sk); ret = -EOPNOTSUPP; if (level == SOL_RXRPC) { switch (optname) { case RXRPC_EXCLUSIVE_CONNECTION: ret = -EINVAL; if (optlen != 0) goto error; ret = -EISCONN; if (rx->sk.sk_state != RXRPC_UNCONNECTED) goto error; set_bit(RXRPC_SOCK_EXCLUSIVE_CONN, &rx->flags); goto success; case RXRPC_SECURITY_KEY: ret = -EINVAL; if (rx->key) goto error; ret = -EISCONN; if (rx->sk.sk_state != RXRPC_UNCONNECTED) goto error; ret = rxrpc_request_key(rx, optval, optlen); goto error; case RXRPC_SECURITY_KEYRING: ret = -EINVAL; if (rx->key) goto error; ret = -EISCONN; if (rx->sk.sk_state != RXRPC_UNCONNECTED) goto error; ret = rxrpc_server_keyring(rx, optval, optlen); goto error; case RXRPC_MIN_SECURITY_LEVEL: ret = -EINVAL; if (optlen != sizeof(unsigned)) goto error; ret = -EISCONN; if (rx->sk.sk_state != RXRPC_UNCONNECTED) goto error; ret = get_user(min_sec_level, (unsigned __user *) optval); if (ret < 0) goto error; ret = -EINVAL; if (min_sec_level > RXRPC_SECURITY_MAX) goto error; rx->min_sec_level = min_sec_level; goto success; default: break; } } success: ret = 0; error: release_sock(&rx->sk); return ret; } /* * permit an RxRPC socket to be polled */ static unsigned int rxrpc_poll(struct file *file, struct socket *sock, poll_table *wait) { unsigned int mask; struct sock *sk = sock->sk; sock_poll_wait(file, sk_sleep(sk), wait); mask = 0; /* the socket is readable if there are any messages waiting on the Rx * queue */ if (!skb_queue_empty(&sk->sk_receive_queue)) mask |= POLLIN | POLLRDNORM; /* the socket is writable if there is space to add new data to the * socket; there is no guarantee that any particular call in progress * on the socket may have space in the Tx ACK window */ if (rxrpc_writable(sk)) mask |= POLLOUT | POLLWRNORM; return mask; } /* * create an RxRPC socket */ static int rxrpc_create(struct net *net, struct socket *sock, int protocol, int kern) { struct rxrpc_sock *rx; struct sock *sk; _enter("%p,%d", sock, protocol); if (!net_eq(net, &init_net)) return -EAFNOSUPPORT; /* we support transport protocol UDP only */ if (protocol != PF_INET) return -EPROTONOSUPPORT; if (sock->type != SOCK_DGRAM) return -ESOCKTNOSUPPORT; sock->ops = &rxrpc_rpc_ops; sock->state = SS_UNCONNECTED; sk = sk_alloc(net, PF_RXRPC, GFP_KERNEL, &rxrpc_proto); if (!sk) return -ENOMEM; sock_init_data(sock, sk); sk->sk_state = RXRPC_UNCONNECTED; sk->sk_write_space = rxrpc_write_space; sk->sk_max_ack_backlog = sysctl_rxrpc_max_qlen; sk->sk_destruct = rxrpc_sock_destructor; rx = rxrpc_sk(sk); rx->proto = protocol; rx->calls = RB_ROOT; INIT_LIST_HEAD(&rx->listen_link); INIT_LIST_HEAD(&rx->secureq); INIT_LIST_HEAD(&rx->acceptq); rwlock_init(&rx->call_lock); memset(&rx->srx, 0, sizeof(rx->srx)); _leave(" = 0 [%p]", rx); return 0; } /* * RxRPC socket destructor */ static void rxrpc_sock_destructor(struct sock *sk) { _enter("%p", sk); rxrpc_purge_queue(&sk->sk_receive_queue); WARN_ON(atomic_read(&sk->sk_wmem_alloc)); WARN_ON(!sk_unhashed(sk)); WARN_ON(sk->sk_socket); if (!sock_flag(sk, SOCK_DEAD)) { printk("Attempt to release alive rxrpc socket: %p\n", sk); return; } } /* * release an RxRPC socket */ static int rxrpc_release_sock(struct sock *sk) { struct rxrpc_sock *rx = rxrpc_sk(sk); _enter("%p{%d,%d}", sk, sk->sk_state, atomic_read(&sk->sk_refcnt)); /* declare the socket closed for business */ sock_orphan(sk); sk->sk_shutdown = SHUTDOWN_MASK; spin_lock_bh(&sk->sk_receive_queue.lock); sk->sk_state = RXRPC_CLOSE; spin_unlock_bh(&sk->sk_receive_queue.lock); ASSERTCMP(rx->listen_link.next, !=, LIST_POISON1); if (!list_empty(&rx->listen_link)) { write_lock_bh(&rx->local->services_lock); list_del(&rx->listen_link); write_unlock_bh(&rx->local->services_lock); } /* try to flush out this socket */ rxrpc_release_calls_on_socket(rx); flush_workqueue(rxrpc_workqueue); rxrpc_purge_queue(&sk->sk_receive_queue); if (rx->conn) { rxrpc_put_connection(rx->conn); rx->conn = NULL; } if (rx->bundle) { rxrpc_put_bundle(rx->trans, rx->bundle); rx->bundle = NULL; } if (rx->trans) { rxrpc_put_transport(rx->trans); rx->trans = NULL; } if (rx->local) { rxrpc_put_local(rx->local); rx->local = NULL; } key_put(rx->key); rx->key = NULL; key_put(rx->securities); rx->securities = NULL; sock_put(sk); _leave(" = 0"); return 0; } /* * release an RxRPC BSD socket on close() or equivalent */ static int rxrpc_release(struct socket *sock) { struct sock *sk = sock->sk; _enter("%p{%p}", sock, sk); if (!sk) return 0; sock->sk = NULL; return rxrpc_release_sock(sk); } /* * RxRPC network protocol */ static const struct proto_ops rxrpc_rpc_ops = { .family = PF_UNIX, .owner = THIS_MODULE, .release = rxrpc_release, .bind = rxrpc_bind, .connect = rxrpc_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = sock_no_getname, .poll = rxrpc_poll, .ioctl = sock_no_ioctl, .listen = rxrpc_listen, .shutdown = sock_no_shutdown, .setsockopt = rxrpc_setsockopt, .getsockopt = sock_no_getsockopt, .sendmsg = rxrpc_sendmsg, .recvmsg = rxrpc_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, }; static struct proto rxrpc_proto = { .name = "RXRPC", .owner = THIS_MODULE, .obj_size = sizeof(struct rxrpc_sock), .max_header = sizeof(struct rxrpc_header), }; static const struct net_proto_family rxrpc_family_ops = { .family = PF_RXRPC, .create = rxrpc_create, .owner = THIS_MODULE, }; /* * initialise and register the RxRPC protocol */ static int __init af_rxrpc_init(void) { struct sk_buff *dummy_skb; int ret = -1; BUILD_BUG_ON(sizeof(struct rxrpc_skb_priv) > sizeof(dummy_skb->cb)); rxrpc_epoch = htonl(get_seconds()); ret = -ENOMEM; rxrpc_call_jar = kmem_cache_create( "rxrpc_call_jar", sizeof(struct rxrpc_call), 0, SLAB_HWCACHE_ALIGN, NULL); if (!rxrpc_call_jar) { printk(KERN_NOTICE "RxRPC: Failed to allocate call jar\n"); goto error_call_jar; } rxrpc_workqueue = alloc_workqueue("krxrpcd", 0, 1); if (!rxrpc_workqueue) { printk(KERN_NOTICE "RxRPC: Failed to allocate work queue\n"); goto error_work_queue; } ret = proto_register(&rxrpc_proto, 1); if (ret < 0) { printk(KERN_CRIT "RxRPC: Cannot register protocol\n"); goto error_proto; } ret = sock_register(&rxrpc_family_ops); if (ret < 0) { printk(KERN_CRIT "RxRPC: Cannot register socket family\n"); goto error_sock; } ret = register_key_type(&key_type_rxrpc); if (ret < 0) { printk(KERN_CRIT "RxRPC: Cannot register client key type\n"); goto error_key_type; } ret = register_key_type(&key_type_rxrpc_s); if (ret < 0) { printk(KERN_CRIT "RxRPC: Cannot register server key type\n"); goto error_key_type_s; } #ifdef CONFIG_PROC_FS proc_net_fops_create(&init_net, "rxrpc_calls", 0, &rxrpc_call_seq_fops); proc_net_fops_create(&init_net, "rxrpc_conns", 0, &rxrpc_connection_seq_fops); #endif return 0; error_key_type_s: unregister_key_type(&key_type_rxrpc); error_key_type: sock_unregister(PF_RXRPC); error_sock: proto_unregister(&rxrpc_proto); error_proto: destroy_workqueue(rxrpc_workqueue); error_work_queue: kmem_cache_destroy(rxrpc_call_jar); error_call_jar: return ret; } /* * unregister the RxRPC protocol */ static void __exit af_rxrpc_exit(void) { _enter(""); unregister_key_type(&key_type_rxrpc_s); unregister_key_type(&key_type_rxrpc); sock_unregister(PF_RXRPC); proto_unregister(&rxrpc_proto); rxrpc_destroy_all_calls(); rxrpc_destroy_all_connections(); rxrpc_destroy_all_transports(); rxrpc_destroy_all_peers(); rxrpc_destroy_all_locals(); ASSERTCMP(atomic_read(&rxrpc_n_skbs), ==, 0); _debug("flush scheduled work"); flush_workqueue(rxrpc_workqueue); proc_net_remove(&init_net, "rxrpc_conns"); proc_net_remove(&init_net, "rxrpc_calls"); destroy_workqueue(rxrpc_workqueue); kmem_cache_destroy(rxrpc_call_jar); _leave(""); } module_init(af_rxrpc_init); module_exit(af_rxrpc_exit);
gpl-2.0
boa19861105/BOA-A4TW
net/ax25/sysctl_net_ax25.c
9084
5184
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Copyright (C) 1996 Mike Shaver (shaver@zeroknowledge.com) */ #include <linux/mm.h> #include <linux/slab.h> #include <linux/sysctl.h> #include <linux/spinlock.h> #include <net/ax25.h> static int min_ipdefmode[1], max_ipdefmode[] = {1}; static int min_axdefmode[1], max_axdefmode[] = {1}; static int min_backoff[1], max_backoff[] = {2}; static int min_conmode[1], max_conmode[] = {2}; static int min_window[] = {1}, max_window[] = {7}; static int min_ewindow[] = {1}, max_ewindow[] = {63}; static int min_t1[] = {1}, max_t1[] = {30000}; static int min_t2[] = {1}, max_t2[] = {20000}; static int min_t3[1], max_t3[] = {3600000}; static int min_idle[1], max_idle[] = {65535000}; static int min_n2[] = {1}, max_n2[] = {31}; static int min_paclen[] = {1}, max_paclen[] = {512}; static int min_proto[1], max_proto[] = { AX25_PROTO_MAX }; #ifdef CONFIG_AX25_DAMA_SLAVE static int min_ds_timeout[1], max_ds_timeout[] = {65535000}; #endif static struct ctl_table_header *ax25_table_header; static ctl_table *ax25_table; static int ax25_table_size; static struct ctl_path ax25_path[] = { { .procname = "net", }, { .procname = "ax25", }, { } }; static const ctl_table ax25_param_table[] = { { .procname = "ip_default_mode", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &min_ipdefmode, .extra2 = &max_ipdefmode }, { .procname = "ax25_default_mode", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &min_axdefmode, .extra2 = &max_axdefmode }, { .procname = "backoff_type", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &min_backoff, .extra2 = &max_backoff }, { .procname = "connect_mode", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &min_conmode, .extra2 = &max_conmode }, { .procname = "standard_window_size", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &min_window, .extra2 = &max_window }, { .procname = "extended_window_size", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &min_ewindow, .extra2 = &max_ewindow }, { .procname = "t1_timeout", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &min_t1, .extra2 = &max_t1 }, { .procname = "t2_timeout", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &min_t2, .extra2 = &max_t2 }, { .procname = "t3_timeout", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &min_t3, .extra2 = &max_t3 }, { .procname = "idle_timeout", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &min_idle, .extra2 = &max_idle }, { .procname = "maximum_retry_count", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &min_n2, .extra2 = &max_n2 }, { .procname = "maximum_packet_length", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &min_paclen, .extra2 = &max_paclen }, { .procname = "protocol", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &min_proto, .extra2 = &max_proto }, #ifdef CONFIG_AX25_DAMA_SLAVE { .procname = "dama_slave_timeout", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &min_ds_timeout, .extra2 = &max_ds_timeout }, #endif { } /* that's all, folks! */ }; void ax25_register_sysctl(void) { ax25_dev *ax25_dev; int n, k; spin_lock_bh(&ax25_dev_lock); for (ax25_table_size = sizeof(ctl_table), ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next) ax25_table_size += sizeof(ctl_table); if ((ax25_table = kzalloc(ax25_table_size, GFP_ATOMIC)) == NULL) { spin_unlock_bh(&ax25_dev_lock); return; } for (n = 0, ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next) { struct ctl_table *child = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_ATOMIC); if (!child) { while (n--) kfree(ax25_table[n].child); kfree(ax25_table); spin_unlock_bh(&ax25_dev_lock); return; } ax25_table[n].child = ax25_dev->systable = child; ax25_table[n].procname = ax25_dev->dev->name; ax25_table[n].mode = 0555; for (k = 0; k < AX25_MAX_VALUES; k++) child[k].data = &ax25_dev->values[k]; n++; } spin_unlock_bh(&ax25_dev_lock); ax25_table_header = register_sysctl_paths(ax25_path, ax25_table); } void ax25_unregister_sysctl(void) { ctl_table *p; unregister_sysctl_table(ax25_table_header); for (p = ax25_table; p->procname; p++) kfree(p->child); kfree(ax25_table); }
gpl-2.0
zzicewind/linux
sound/soc/mediatek/mtk-afe-pcm.c
125
33479
/* * Mediatek ALSA SoC AFE platform driver * * Copyright (c) 2015 MediaTek Inc. * Author: Koro Chen <koro.chen@mediatek.com> * Sascha Hauer <s.hauer@pengutronix.de> * Hidalgo Huang <hidalgo.huang@mediatek.com> * Ir Lian <ir.lian@mediatek.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/delay.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/pm_runtime.h> #include <sound/soc.h> #include "mtk-afe-common.h" /***************************************************************************** * R E G I S T E R D E F I N I T I O N *****************************************************************************/ #define AUDIO_TOP_CON0 0x0000 #define AUDIO_TOP_CON1 0x0004 #define AFE_DAC_CON0 0x0010 #define AFE_DAC_CON1 0x0014 #define AFE_I2S_CON1 0x0034 #define AFE_I2S_CON2 0x0038 #define AFE_CONN_24BIT 0x006c #define AFE_CONN1 0x0024 #define AFE_CONN2 0x0028 #define AFE_CONN7 0x0460 #define AFE_CONN8 0x0464 #define AFE_HDMI_CONN0 0x0390 /* Memory interface */ #define AFE_DL1_BASE 0x0040 #define AFE_DL1_CUR 0x0044 #define AFE_DL2_BASE 0x0050 #define AFE_DL2_CUR 0x0054 #define AFE_AWB_BASE 0x0070 #define AFE_AWB_CUR 0x007c #define AFE_VUL_BASE 0x0080 #define AFE_VUL_CUR 0x008c #define AFE_DAI_BASE 0x0090 #define AFE_DAI_CUR 0x009c #define AFE_MOD_PCM_BASE 0x0330 #define AFE_MOD_PCM_CUR 0x033c #define AFE_HDMI_OUT_BASE 0x0374 #define AFE_HDMI_OUT_CUR 0x0378 #define AFE_ADDA2_TOP_CON0 0x0600 #define AFE_HDMI_OUT_CON0 0x0370 #define AFE_IRQ_MCU_CON 0x03a0 #define AFE_IRQ_STATUS 0x03a4 #define AFE_IRQ_CLR 0x03a8 #define AFE_IRQ_CNT1 0x03ac #define AFE_IRQ_CNT2 0x03b0 #define AFE_IRQ_MCU_EN 0x03b4 #define AFE_IRQ_CNT5 0x03bc #define AFE_IRQ_CNT7 0x03dc #define AFE_TDM_CON1 0x0548 #define AFE_TDM_CON2 0x054c #define AFE_BASE_END_OFFSET 8 #define AFE_IRQ_STATUS_BITS 0xff /* AUDIO_TOP_CON0 (0x0000) */ #define AUD_TCON0_PDN_SPDF (0x1 << 21) #define AUD_TCON0_PDN_HDMI (0x1 << 20) #define AUD_TCON0_PDN_24M (0x1 << 9) #define AUD_TCON0_PDN_22M (0x1 << 8) #define AUD_TCON0_PDN_AFE (0x1 << 2) /* AFE_I2S_CON1 (0x0034) */ #define AFE_I2S_CON1_LOW_JITTER_CLK (0x1 << 12) #define AFE_I2S_CON1_RATE(x) (((x) & 0xf) << 8) #define AFE_I2S_CON1_FORMAT_I2S (0x1 << 3) #define AFE_I2S_CON1_EN (0x1 << 0) /* AFE_I2S_CON2 (0x0038) */ #define AFE_I2S_CON2_LOW_JITTER_CLK (0x1 << 12) #define AFE_I2S_CON2_RATE(x) (((x) & 0xf) << 8) #define AFE_I2S_CON2_FORMAT_I2S (0x1 << 3) #define AFE_I2S_CON2_EN (0x1 << 0) /* AFE_CONN_24BIT (0x006c) */ #define AFE_CONN_24BIT_O04 (0x1 << 4) #define AFE_CONN_24BIT_O03 (0x1 << 3) /* AFE_HDMI_CONN0 (0x0390) */ #define AFE_HDMI_CONN0_O37_I37 (0x7 << 21) #define AFE_HDMI_CONN0_O36_I36 (0x6 << 18) #define AFE_HDMI_CONN0_O35_I33 (0x3 << 15) #define AFE_HDMI_CONN0_O34_I32 (0x2 << 12) #define AFE_HDMI_CONN0_O33_I35 (0x5 << 9) #define AFE_HDMI_CONN0_O32_I34 (0x4 << 6) #define AFE_HDMI_CONN0_O31_I31 (0x1 << 3) #define AFE_HDMI_CONN0_O30_I30 (0x0 << 0) /* AFE_TDM_CON1 (0x0548) */ #define AFE_TDM_CON1_LRCK_WIDTH(x) (((x) - 1) << 24) #define AFE_TDM_CON1_32_BCK_CYCLES (0x2 << 12) #define AFE_TDM_CON1_WLEN_32BIT (0x2 << 8) #define AFE_TDM_CON1_MSB_ALIGNED (0x1 << 4) #define AFE_TDM_CON1_1_BCK_DELAY (0x1 << 3) #define AFE_TDM_CON1_BCK_INV (0x1 << 1) #define AFE_TDM_CON1_EN (0x1 << 0) enum afe_tdm_ch_start { AFE_TDM_CH_START_O30_O31 = 0, AFE_TDM_CH_START_O32_O33, AFE_TDM_CH_START_O34_O35, AFE_TDM_CH_START_O36_O37, AFE_TDM_CH_ZERO, }; static const struct snd_pcm_hardware mtk_afe_hardware = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP_VALID), .buffer_bytes_max = 256 * 1024, .period_bytes_min = 512, .period_bytes_max = 128 * 1024, .periods_min = 2, .periods_max = 256, .fifo_size = 0, }; static snd_pcm_uframes_t mtk_afe_pcm_pointer (struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct mtk_afe *afe = snd_soc_platform_get_drvdata(rtd->platform); struct mtk_afe_memif *memif = &afe->memif[rtd->cpu_dai->id]; return bytes_to_frames(substream->runtime, memif->hw_ptr); } static const struct snd_pcm_ops mtk_afe_pcm_ops = { .ioctl = snd_pcm_lib_ioctl, .pointer = mtk_afe_pcm_pointer, }; static int mtk_afe_pcm_new(struct snd_soc_pcm_runtime *rtd) { size_t size; struct snd_card *card = rtd->card->snd_card; struct snd_pcm *pcm = rtd->pcm; size = mtk_afe_hardware.buffer_bytes_max; return snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, card->dev, size, size); } static void mtk_afe_pcm_free(struct snd_pcm *pcm) { snd_pcm_lib_preallocate_free_for_all(pcm); } static const struct snd_soc_platform_driver mtk_afe_pcm_platform = { .ops = &mtk_afe_pcm_ops, .pcm_new = mtk_afe_pcm_new, .pcm_free = mtk_afe_pcm_free, }; struct mtk_afe_rate { unsigned int rate; unsigned int regvalue; }; static const struct mtk_afe_rate mtk_afe_i2s_rates[] = { { .rate = 8000, .regvalue = 0 }, { .rate = 11025, .regvalue = 1 }, { .rate = 12000, .regvalue = 2 }, { .rate = 16000, .regvalue = 4 }, { .rate = 22050, .regvalue = 5 }, { .rate = 24000, .regvalue = 6 }, { .rate = 32000, .regvalue = 8 }, { .rate = 44100, .regvalue = 9 }, { .rate = 48000, .regvalue = 10 }, { .rate = 88000, .regvalue = 11 }, { .rate = 96000, .regvalue = 12 }, { .rate = 174000, .regvalue = 13 }, { .rate = 192000, .regvalue = 14 }, }; static int mtk_afe_i2s_fs(unsigned int sample_rate) { int i; for (i = 0; i < ARRAY_SIZE(mtk_afe_i2s_rates); i++) if (mtk_afe_i2s_rates[i].rate == sample_rate) return mtk_afe_i2s_rates[i].regvalue; return -EINVAL; } static int mtk_afe_set_i2s(struct mtk_afe *afe, unsigned int rate) { unsigned int val; int fs = mtk_afe_i2s_fs(rate); if (fs < 0) return -EINVAL; /* from external ADC */ regmap_update_bits(afe->regmap, AFE_ADDA2_TOP_CON0, 0x1, 0x1); /* set input */ val = AFE_I2S_CON2_LOW_JITTER_CLK | AFE_I2S_CON2_RATE(fs) | AFE_I2S_CON2_FORMAT_I2S; regmap_update_bits(afe->regmap, AFE_I2S_CON2, ~AFE_I2S_CON2_EN, val); /* set output */ val = AFE_I2S_CON1_LOW_JITTER_CLK | AFE_I2S_CON1_RATE(fs) | AFE_I2S_CON1_FORMAT_I2S; regmap_update_bits(afe->regmap, AFE_I2S_CON1, ~AFE_I2S_CON1_EN, val); return 0; } static void mtk_afe_set_i2s_enable(struct mtk_afe *afe, bool enable) { unsigned int val; regmap_read(afe->regmap, AFE_I2S_CON2, &val); if (!!(val & AFE_I2S_CON2_EN) == enable) return; /* must skip soft reset */ /* I2S soft reset begin */ regmap_update_bits(afe->regmap, AUDIO_TOP_CON1, 0x4, 0x4); /* input */ regmap_update_bits(afe->regmap, AFE_I2S_CON2, 0x1, enable); /* output */ regmap_update_bits(afe->regmap, AFE_I2S_CON1, 0x1, enable); /* I2S soft reset end */ udelay(1); regmap_update_bits(afe->regmap, AUDIO_TOP_CON1, 0x4, 0); } static int mtk_afe_dais_enable_clks(struct mtk_afe *afe, struct clk *m_ck, struct clk *b_ck) { int ret; if (m_ck) { ret = clk_prepare_enable(m_ck); if (ret) { dev_err(afe->dev, "Failed to enable m_ck\n"); return ret; } regmap_update_bits(afe->regmap, AUDIO_TOP_CON0, AUD_TCON0_PDN_22M | AUD_TCON0_PDN_24M, 0); } if (b_ck) { ret = clk_prepare_enable(b_ck); if (ret) { dev_err(afe->dev, "Failed to enable b_ck\n"); return ret; } } return 0; } static int mtk_afe_dais_set_clks(struct mtk_afe *afe, struct clk *m_ck, unsigned int mck_rate, struct clk *b_ck, unsigned int bck_rate) { int ret; if (m_ck) { ret = clk_set_rate(m_ck, mck_rate); if (ret) { dev_err(afe->dev, "Failed to set m_ck rate\n"); return ret; } } if (b_ck) { ret = clk_set_rate(b_ck, bck_rate); if (ret) { dev_err(afe->dev, "Failed to set b_ck rate\n"); return ret; } } return 0; } static void mtk_afe_dais_disable_clks(struct mtk_afe *afe, struct clk *m_ck, struct clk *b_ck) { if (m_ck) { regmap_update_bits(afe->regmap, AUDIO_TOP_CON0, AUD_TCON0_PDN_22M | AUD_TCON0_PDN_24M, AUD_TCON0_PDN_22M | AUD_TCON0_PDN_24M); clk_disable_unprepare(m_ck); } if (b_ck) clk_disable_unprepare(b_ck); } static int mtk_afe_i2s_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct mtk_afe *afe = snd_soc_platform_get_drvdata(rtd->platform); if (dai->active) return 0; mtk_afe_dais_enable_clks(afe, afe->clocks[MTK_CLK_I2S1_M], NULL); return 0; } static void mtk_afe_i2s_shutdown(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct mtk_afe *afe = snd_soc_platform_get_drvdata(rtd->platform); if (dai->active) return; mtk_afe_set_i2s_enable(afe, false); mtk_afe_dais_disable_clks(afe, afe->clocks[MTK_CLK_I2S1_M], NULL); /* disable AFE */ regmap_update_bits(afe->regmap, AFE_DAC_CON0, 0x1, 0); } static int mtk_afe_i2s_prepare(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_pcm_runtime * const runtime = substream->runtime; struct mtk_afe *afe = snd_soc_platform_get_drvdata(rtd->platform); int ret; mtk_afe_dais_set_clks(afe, afe->clocks[MTK_CLK_I2S1_M], runtime->rate * 256, NULL, 0); /* config I2S */ ret = mtk_afe_set_i2s(afe, substream->runtime->rate); if (ret) return ret; mtk_afe_set_i2s_enable(afe, true); return 0; } static int mtk_afe_hdmi_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct mtk_afe *afe = snd_soc_platform_get_drvdata(rtd->platform); if (dai->active) return 0; mtk_afe_dais_enable_clks(afe, afe->clocks[MTK_CLK_I2S3_M], afe->clocks[MTK_CLK_I2S3_B]); return 0; } static void mtk_afe_hdmi_shutdown(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct mtk_afe *afe = snd_soc_platform_get_drvdata(rtd->platform); if (dai->active) return; mtk_afe_dais_disable_clks(afe, afe->clocks[MTK_CLK_I2S3_M], afe->clocks[MTK_CLK_I2S3_B]); /* disable AFE */ regmap_update_bits(afe->regmap, AFE_DAC_CON0, 0x1, 0); } static int mtk_afe_hdmi_prepare(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_pcm_runtime * const runtime = substream->runtime; struct mtk_afe *afe = snd_soc_platform_get_drvdata(rtd->platform); unsigned int val; mtk_afe_dais_set_clks(afe, afe->clocks[MTK_CLK_I2S3_M], runtime->rate * 128, afe->clocks[MTK_CLK_I2S3_B], runtime->rate * runtime->channels * 32); val = AFE_TDM_CON1_BCK_INV | AFE_TDM_CON1_1_BCK_DELAY | AFE_TDM_CON1_MSB_ALIGNED | /* I2S mode */ AFE_TDM_CON1_WLEN_32BIT | AFE_TDM_CON1_32_BCK_CYCLES | AFE_TDM_CON1_LRCK_WIDTH(32); regmap_update_bits(afe->regmap, AFE_TDM_CON1, ~AFE_TDM_CON1_EN, val); /* set tdm2 config */ switch (runtime->channels) { case 1: case 2: val = AFE_TDM_CH_START_O30_O31; val |= (AFE_TDM_CH_ZERO << 4); val |= (AFE_TDM_CH_ZERO << 8); val |= (AFE_TDM_CH_ZERO << 12); break; case 3: case 4: val = AFE_TDM_CH_START_O30_O31; val |= (AFE_TDM_CH_START_O32_O33 << 4); val |= (AFE_TDM_CH_ZERO << 8); val |= (AFE_TDM_CH_ZERO << 12); break; case 5: case 6: val = AFE_TDM_CH_START_O30_O31; val |= (AFE_TDM_CH_START_O32_O33 << 4); val |= (AFE_TDM_CH_START_O34_O35 << 8); val |= (AFE_TDM_CH_ZERO << 12); break; case 7: case 8: val = AFE_TDM_CH_START_O30_O31; val |= (AFE_TDM_CH_START_O32_O33 << 4); val |= (AFE_TDM_CH_START_O34_O35 << 8); val |= (AFE_TDM_CH_START_O36_O37 << 12); break; default: val = 0; } regmap_update_bits(afe->regmap, AFE_TDM_CON2, 0x0000ffff, val); regmap_update_bits(afe->regmap, AFE_HDMI_OUT_CON0, 0x000000f0, runtime->channels << 4); return 0; } static int mtk_afe_hdmi_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct mtk_afe *afe = snd_soc_platform_get_drvdata(rtd->platform); dev_info(afe->dev, "%s cmd=%d %s\n", __func__, cmd, dai->name); switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: regmap_update_bits(afe->regmap, AUDIO_TOP_CON0, AUD_TCON0_PDN_HDMI | AUD_TCON0_PDN_SPDF, 0); /* set connections: O30~O37: L/R/LS/RS/C/LFE/CH7/CH8 */ regmap_write(afe->regmap, AFE_HDMI_CONN0, AFE_HDMI_CONN0_O30_I30 | AFE_HDMI_CONN0_O31_I31 | AFE_HDMI_CONN0_O32_I34 | AFE_HDMI_CONN0_O33_I35 | AFE_HDMI_CONN0_O34_I32 | AFE_HDMI_CONN0_O35_I33 | AFE_HDMI_CONN0_O36_I36 | AFE_HDMI_CONN0_O37_I37); /* enable Out control */ regmap_update_bits(afe->regmap, AFE_HDMI_OUT_CON0, 0x1, 0x1); /* enable tdm */ regmap_update_bits(afe->regmap, AFE_TDM_CON1, 0x1, 0x1); return 0; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: /* disable tdm */ regmap_update_bits(afe->regmap, AFE_TDM_CON1, 0x1, 0); /* disable Out control */ regmap_update_bits(afe->regmap, AFE_HDMI_OUT_CON0, 0x1, 0); regmap_update_bits(afe->regmap, AUDIO_TOP_CON0, AUD_TCON0_PDN_HDMI | AUD_TCON0_PDN_SPDF, AUD_TCON0_PDN_HDMI | AUD_TCON0_PDN_SPDF); return 0; default: return -EINVAL; } } static int mtk_afe_dais_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct mtk_afe *afe = snd_soc_platform_get_drvdata(rtd->platform); struct snd_pcm_runtime *runtime = substream->runtime; struct mtk_afe_memif *memif = &afe->memif[rtd->cpu_dai->id]; int ret; memif->substream = substream; snd_soc_set_runtime_hwparams(substream, &mtk_afe_hardware); ret = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); if (ret < 0) dev_err(afe->dev, "snd_pcm_hw_constraint_integer failed\n"); return ret; } static void mtk_afe_dais_shutdown(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct mtk_afe *afe = snd_soc_platform_get_drvdata(rtd->platform); struct mtk_afe_memif *memif = &afe->memif[rtd->cpu_dai->id]; memif->substream = NULL; } static int mtk_afe_dais_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct mtk_afe *afe = snd_soc_platform_get_drvdata(rtd->platform); struct mtk_afe_memif *memif = &afe->memif[rtd->cpu_dai->id]; int ret; dev_dbg(afe->dev, "%s period = %u, rate= %u, channels=%u\n", __func__, params_period_size(params), params_rate(params), params_channels(params)); ret = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params)); if (ret < 0) return ret; memif->phys_buf_addr = substream->runtime->dma_addr; memif->buffer_size = substream->runtime->dma_bytes; memif->hw_ptr = 0; /* start */ regmap_write(afe->regmap, memif->data->reg_ofs_base, memif->phys_buf_addr); /* end */ regmap_write(afe->regmap, memif->data->reg_ofs_base + AFE_BASE_END_OFFSET, memif->phys_buf_addr + memif->buffer_size - 1); /* set channel */ if (memif->data->mono_shift >= 0) { unsigned int mono = (params_channels(params) == 1) ? 1 : 0; regmap_update_bits(afe->regmap, AFE_DAC_CON1, 1 << memif->data->mono_shift, mono << memif->data->mono_shift); } /* set rate */ if (memif->data->fs_shift < 0) return 0; if (memif->data->id == MTK_AFE_MEMIF_DAI || memif->data->id == MTK_AFE_MEMIF_MOD_DAI) { unsigned int val; switch (params_rate(params)) { case 8000: val = 0; break; case 16000: val = 1; break; case 32000: val = 2; break; default: return -EINVAL; } if (memif->data->id == MTK_AFE_MEMIF_DAI) regmap_update_bits(afe->regmap, AFE_DAC_CON0, 0x3 << memif->data->fs_shift, val << memif->data->fs_shift); else regmap_update_bits(afe->regmap, AFE_DAC_CON1, 0x3 << memif->data->fs_shift, val << memif->data->fs_shift); } else { int fs = mtk_afe_i2s_fs(params_rate(params)); if (fs < 0) return -EINVAL; regmap_update_bits(afe->regmap, AFE_DAC_CON1, 0xf << memif->data->fs_shift, fs << memif->data->fs_shift); } return 0; } static int mtk_afe_dais_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { return snd_pcm_lib_free_pages(substream); } static int mtk_afe_dais_prepare(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct mtk_afe *afe = snd_soc_platform_get_drvdata(rtd->platform); /* enable AFE */ regmap_update_bits(afe->regmap, AFE_DAC_CON0, 0x1, 0x1); return 0; } static int mtk_afe_dais_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_pcm_runtime * const runtime = substream->runtime; struct mtk_afe *afe = snd_soc_platform_get_drvdata(rtd->platform); struct mtk_afe_memif *memif = &afe->memif[rtd->cpu_dai->id]; unsigned int counter = runtime->period_size; dev_info(afe->dev, "%s %s cmd=%d\n", __func__, memif->data->name, cmd); switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: if (memif->data->enable_shift >= 0) regmap_update_bits(afe->regmap, AFE_DAC_CON0, 1 << memif->data->enable_shift, 1 << memif->data->enable_shift); /* set irq counter */ regmap_update_bits(afe->regmap, memif->data->irq_reg_cnt, 0x3ffff << memif->data->irq_cnt_shift, counter << memif->data->irq_cnt_shift); /* set irq fs */ if (memif->data->irq_fs_shift >= 0) { int fs = mtk_afe_i2s_fs(runtime->rate); if (fs < 0) return -EINVAL; regmap_update_bits(afe->regmap, AFE_IRQ_MCU_CON, 0xf << memif->data->irq_fs_shift, fs << memif->data->irq_fs_shift); } /* enable interrupt */ regmap_update_bits(afe->regmap, AFE_IRQ_MCU_CON, 1 << memif->data->irq_en_shift, 1 << memif->data->irq_en_shift); return 0; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: if (memif->data->enable_shift >= 0) regmap_update_bits(afe->regmap, AFE_DAC_CON0, 1 << memif->data->enable_shift, 0); /* disable interrupt */ regmap_update_bits(afe->regmap, AFE_IRQ_MCU_CON, 1 << memif->data->irq_en_shift, 0 << memif->data->irq_en_shift); /* and clear pending IRQ */ regmap_write(afe->regmap, AFE_IRQ_CLR, 1 << memif->data->irq_clr_shift); memif->hw_ptr = 0; return 0; default: return -EINVAL; } } /* FE DAIs */ static const struct snd_soc_dai_ops mtk_afe_dai_ops = { .startup = mtk_afe_dais_startup, .shutdown = mtk_afe_dais_shutdown, .hw_params = mtk_afe_dais_hw_params, .hw_free = mtk_afe_dais_hw_free, .prepare = mtk_afe_dais_prepare, .trigger = mtk_afe_dais_trigger, }; /* BE DAIs */ static const struct snd_soc_dai_ops mtk_afe_i2s_ops = { .startup = mtk_afe_i2s_startup, .shutdown = mtk_afe_i2s_shutdown, .prepare = mtk_afe_i2s_prepare, }; static const struct snd_soc_dai_ops mtk_afe_hdmi_ops = { .startup = mtk_afe_hdmi_startup, .shutdown = mtk_afe_hdmi_shutdown, .prepare = mtk_afe_hdmi_prepare, .trigger = mtk_afe_hdmi_trigger, }; static struct snd_soc_dai_driver mtk_afe_pcm_dais[] = { /* FE DAIs: memory intefaces to CPU */ { .name = "DL1", /* downlink 1 */ .id = MTK_AFE_MEMIF_DL1, .playback = { .stream_name = "DL1", .channels_min = 1, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_48000, .formats = SNDRV_PCM_FMTBIT_S16_LE, }, .ops = &mtk_afe_dai_ops, }, { .name = "VUL", /* voice uplink */ .id = MTK_AFE_MEMIF_VUL, .capture = { .stream_name = "VUL", .channels_min = 1, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_48000, .formats = SNDRV_PCM_FMTBIT_S16_LE, }, .ops = &mtk_afe_dai_ops, }, { /* BE DAIs */ .name = "I2S", .id = MTK_AFE_IO_I2S, .playback = { .stream_name = "I2S Playback", .channels_min = 1, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_48000, .formats = SNDRV_PCM_FMTBIT_S16_LE, }, .capture = { .stream_name = "I2S Capture", .channels_min = 1, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_48000, .formats = SNDRV_PCM_FMTBIT_S16_LE, }, .ops = &mtk_afe_i2s_ops, .symmetric_rates = 1, }, }; static struct snd_soc_dai_driver mtk_afe_hdmi_dais[] = { /* FE DAIs */ { .name = "HDMI", .id = MTK_AFE_MEMIF_HDMI, .playback = { .stream_name = "HDMI", .channels_min = 2, .channels_max = 8, .rates = SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_192000, .formats = SNDRV_PCM_FMTBIT_S16_LE, }, .ops = &mtk_afe_dai_ops, }, { /* BE DAIs */ .name = "HDMIO", .id = MTK_AFE_IO_HDMI, .playback = { .stream_name = "HDMIO Playback", .channels_min = 2, .channels_max = 8, .rates = SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_192000, .formats = SNDRV_PCM_FMTBIT_S16_LE, }, .ops = &mtk_afe_hdmi_ops, }, }; static const struct snd_kcontrol_new mtk_afe_o03_mix[] = { SOC_DAPM_SINGLE_AUTODISABLE("I05 Switch", AFE_CONN1, 21, 1, 0), }; static const struct snd_kcontrol_new mtk_afe_o04_mix[] = { SOC_DAPM_SINGLE_AUTODISABLE("I06 Switch", AFE_CONN2, 6, 1, 0), }; static const struct snd_kcontrol_new mtk_afe_o09_mix[] = { SOC_DAPM_SINGLE_AUTODISABLE("I17 Switch", AFE_CONN7, 30, 1, 0), }; static const struct snd_kcontrol_new mtk_afe_o10_mix[] = { SOC_DAPM_SINGLE_AUTODISABLE("I18 Switch", AFE_CONN8, 0, 1, 0), }; static const struct snd_soc_dapm_widget mtk_afe_pcm_widgets[] = { /* Backend DAIs */ SND_SOC_DAPM_AIF_IN("I2S Capture", NULL, 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("I2S Playback", NULL, 0, SND_SOC_NOPM, 0, 0), /* inter-connections */ SND_SOC_DAPM_MIXER("I05", SND_SOC_NOPM, 0, 0, NULL, 0), SND_SOC_DAPM_MIXER("I06", SND_SOC_NOPM, 0, 0, NULL, 0), SND_SOC_DAPM_MIXER("I17", SND_SOC_NOPM, 0, 0, NULL, 0), SND_SOC_DAPM_MIXER("I18", SND_SOC_NOPM, 0, 0, NULL, 0), SND_SOC_DAPM_MIXER("O03", SND_SOC_NOPM, 0, 0, mtk_afe_o03_mix, ARRAY_SIZE(mtk_afe_o03_mix)), SND_SOC_DAPM_MIXER("O04", SND_SOC_NOPM, 0, 0, mtk_afe_o04_mix, ARRAY_SIZE(mtk_afe_o04_mix)), SND_SOC_DAPM_MIXER("O09", SND_SOC_NOPM, 0, 0, mtk_afe_o09_mix, ARRAY_SIZE(mtk_afe_o09_mix)), SND_SOC_DAPM_MIXER("O10", SND_SOC_NOPM, 0, 0, mtk_afe_o10_mix, ARRAY_SIZE(mtk_afe_o10_mix)), }; static const struct snd_soc_dapm_route mtk_afe_pcm_routes[] = { {"I05", NULL, "DL1"}, {"I06", NULL, "DL1"}, {"I2S Playback", NULL, "O03"}, {"I2S Playback", NULL, "O04"}, {"VUL", NULL, "O09"}, {"VUL", NULL, "O10"}, {"I17", NULL, "I2S Capture"}, {"I18", NULL, "I2S Capture"}, { "O03", "I05 Switch", "I05" }, { "O04", "I06 Switch", "I06" }, { "O09", "I17 Switch", "I17" }, { "O10", "I18 Switch", "I18" }, }; static const struct snd_soc_dapm_widget mtk_afe_hdmi_widgets[] = { /* Backend DAIs */ SND_SOC_DAPM_AIF_OUT("HDMIO Playback", NULL, 0, SND_SOC_NOPM, 0, 0), }; static const struct snd_soc_dapm_route mtk_afe_hdmi_routes[] = { {"HDMIO Playback", NULL, "HDMI"}, }; static const struct snd_soc_component_driver mtk_afe_pcm_dai_component = { .name = "mtk-afe-pcm-dai", .dapm_widgets = mtk_afe_pcm_widgets, .num_dapm_widgets = ARRAY_SIZE(mtk_afe_pcm_widgets), .dapm_routes = mtk_afe_pcm_routes, .num_dapm_routes = ARRAY_SIZE(mtk_afe_pcm_routes), }; static const struct snd_soc_component_driver mtk_afe_hdmi_dai_component = { .name = "mtk-afe-hdmi-dai", .dapm_widgets = mtk_afe_hdmi_widgets, .num_dapm_widgets = ARRAY_SIZE(mtk_afe_hdmi_widgets), .dapm_routes = mtk_afe_hdmi_routes, .num_dapm_routes = ARRAY_SIZE(mtk_afe_hdmi_routes), }; static const char *aud_clks[MTK_CLK_NUM] = { [MTK_CLK_INFRASYS_AUD] = "infra_sys_audio_clk", [MTK_CLK_TOP_PDN_AUD] = "top_pdn_audio", [MTK_CLK_TOP_PDN_AUD_BUS] = "top_pdn_aud_intbus", [MTK_CLK_I2S0_M] = "i2s0_m", [MTK_CLK_I2S1_M] = "i2s1_m", [MTK_CLK_I2S2_M] = "i2s2_m", [MTK_CLK_I2S3_M] = "i2s3_m", [MTK_CLK_I2S3_B] = "i2s3_b", [MTK_CLK_BCK0] = "bck0", [MTK_CLK_BCK1] = "bck1", }; static const struct mtk_afe_memif_data memif_data[MTK_AFE_MEMIF_NUM] = { { .name = "DL1", .id = MTK_AFE_MEMIF_DL1, .reg_ofs_base = AFE_DL1_BASE, .reg_ofs_cur = AFE_DL1_CUR, .fs_shift = 0, .mono_shift = 21, .enable_shift = 1, .irq_reg_cnt = AFE_IRQ_CNT1, .irq_cnt_shift = 0, .irq_en_shift = 0, .irq_fs_shift = 4, .irq_clr_shift = 0, }, { .name = "DL2", .id = MTK_AFE_MEMIF_DL2, .reg_ofs_base = AFE_DL2_BASE, .reg_ofs_cur = AFE_DL2_CUR, .fs_shift = 4, .mono_shift = 22, .enable_shift = 2, .irq_reg_cnt = AFE_IRQ_CNT1, .irq_cnt_shift = 20, .irq_en_shift = 2, .irq_fs_shift = 16, .irq_clr_shift = 2, }, { .name = "VUL", .id = MTK_AFE_MEMIF_VUL, .reg_ofs_base = AFE_VUL_BASE, .reg_ofs_cur = AFE_VUL_CUR, .fs_shift = 16, .mono_shift = 27, .enable_shift = 3, .irq_reg_cnt = AFE_IRQ_CNT2, .irq_cnt_shift = 0, .irq_en_shift = 1, .irq_fs_shift = 8, .irq_clr_shift = 1, }, { .name = "DAI", .id = MTK_AFE_MEMIF_DAI, .reg_ofs_base = AFE_DAI_BASE, .reg_ofs_cur = AFE_DAI_CUR, .fs_shift = 24, .mono_shift = -1, .enable_shift = 4, .irq_reg_cnt = AFE_IRQ_CNT2, .irq_cnt_shift = 20, .irq_en_shift = 3, .irq_fs_shift = 20, .irq_clr_shift = 3, }, { .name = "AWB", .id = MTK_AFE_MEMIF_AWB, .reg_ofs_base = AFE_AWB_BASE, .reg_ofs_cur = AFE_AWB_CUR, .fs_shift = 12, .mono_shift = 24, .enable_shift = 6, .irq_reg_cnt = AFE_IRQ_CNT7, .irq_cnt_shift = 0, .irq_en_shift = 14, .irq_fs_shift = 24, .irq_clr_shift = 6, }, { .name = "MOD_DAI", .id = MTK_AFE_MEMIF_MOD_DAI, .reg_ofs_base = AFE_MOD_PCM_BASE, .reg_ofs_cur = AFE_MOD_PCM_CUR, .fs_shift = 30, .mono_shift = 30, .enable_shift = 7, .irq_reg_cnt = AFE_IRQ_CNT2, .irq_cnt_shift = 20, .irq_en_shift = 3, .irq_fs_shift = 20, .irq_clr_shift = 3, }, { .name = "HDMI", .id = MTK_AFE_MEMIF_HDMI, .reg_ofs_base = AFE_HDMI_OUT_BASE, .reg_ofs_cur = AFE_HDMI_OUT_CUR, .fs_shift = -1, .mono_shift = -1, .enable_shift = -1, .irq_reg_cnt = AFE_IRQ_CNT5, .irq_cnt_shift = 0, .irq_en_shift = 12, .irq_fs_shift = -1, .irq_clr_shift = 4, }, }; static const struct regmap_config mtk_afe_regmap_config = { .reg_bits = 32, .reg_stride = 4, .val_bits = 32, .max_register = AFE_ADDA2_TOP_CON0, .cache_type = REGCACHE_NONE, }; static irqreturn_t mtk_afe_irq_handler(int irq, void *dev_id) { struct mtk_afe *afe = dev_id; unsigned int reg_value, hw_ptr; int i, ret; ret = regmap_read(afe->regmap, AFE_IRQ_STATUS, &reg_value); if (ret) { dev_err(afe->dev, "%s irq status err\n", __func__); reg_value = AFE_IRQ_STATUS_BITS; goto err_irq; } for (i = 0; i < MTK_AFE_MEMIF_NUM; i++) { struct mtk_afe_memif *memif = &afe->memif[i]; if (!(reg_value & (1 << memif->data->irq_clr_shift))) continue; ret = regmap_read(afe->regmap, memif->data->reg_ofs_cur, &hw_ptr); if (ret || hw_ptr == 0) { dev_err(afe->dev, "%s hw_ptr err\n", __func__); hw_ptr = memif->phys_buf_addr; } memif->hw_ptr = hw_ptr - memif->phys_buf_addr; snd_pcm_period_elapsed(memif->substream); } err_irq: /* clear irq */ regmap_write(afe->regmap, AFE_IRQ_CLR, reg_value & AFE_IRQ_STATUS_BITS); return IRQ_HANDLED; } static int mtk_afe_runtime_suspend(struct device *dev) { struct mtk_afe *afe = dev_get_drvdata(dev); /* disable AFE clk */ regmap_update_bits(afe->regmap, AUDIO_TOP_CON0, AUD_TCON0_PDN_AFE, AUD_TCON0_PDN_AFE); clk_disable_unprepare(afe->clocks[MTK_CLK_BCK0]); clk_disable_unprepare(afe->clocks[MTK_CLK_BCK1]); clk_disable_unprepare(afe->clocks[MTK_CLK_TOP_PDN_AUD]); clk_disable_unprepare(afe->clocks[MTK_CLK_TOP_PDN_AUD_BUS]); clk_disable_unprepare(afe->clocks[MTK_CLK_INFRASYS_AUD]); return 0; } static int mtk_afe_runtime_resume(struct device *dev) { struct mtk_afe *afe = dev_get_drvdata(dev); int ret; ret = clk_prepare_enable(afe->clocks[MTK_CLK_INFRASYS_AUD]); if (ret) return ret; ret = clk_prepare_enable(afe->clocks[MTK_CLK_TOP_PDN_AUD_BUS]); if (ret) goto err_infra; ret = clk_prepare_enable(afe->clocks[MTK_CLK_TOP_PDN_AUD]); if (ret) goto err_top_aud_bus; ret = clk_prepare_enable(afe->clocks[MTK_CLK_BCK0]); if (ret) goto err_top_aud; ret = clk_prepare_enable(afe->clocks[MTK_CLK_BCK1]); if (ret) goto err_bck0; /* enable AFE clk */ regmap_update_bits(afe->regmap, AUDIO_TOP_CON0, AUD_TCON0_PDN_AFE, 0); /* set O3/O4 16bits */ regmap_update_bits(afe->regmap, AFE_CONN_24BIT, AFE_CONN_24BIT_O03 | AFE_CONN_24BIT_O04, 0); /* unmask all IRQs */ regmap_update_bits(afe->regmap, AFE_IRQ_MCU_EN, 0xff, 0xff); return 0; err_bck0: clk_disable_unprepare(afe->clocks[MTK_CLK_BCK0]); err_top_aud: clk_disable_unprepare(afe->clocks[MTK_CLK_TOP_PDN_AUD]); err_top_aud_bus: clk_disable_unprepare(afe->clocks[MTK_CLK_TOP_PDN_AUD_BUS]); err_infra: clk_disable_unprepare(afe->clocks[MTK_CLK_INFRASYS_AUD]); return ret; } static int mtk_afe_init_audio_clk(struct mtk_afe *afe) { size_t i; for (i = 0; i < ARRAY_SIZE(aud_clks); i++) { afe->clocks[i] = devm_clk_get(afe->dev, aud_clks[i]); if (IS_ERR(afe->clocks[i])) { dev_err(afe->dev, "%s devm_clk_get %s fail\n", __func__, aud_clks[i]); return PTR_ERR(afe->clocks[i]); } } clk_set_rate(afe->clocks[MTK_CLK_BCK0], 22579200); /* 22M */ clk_set_rate(afe->clocks[MTK_CLK_BCK1], 24576000); /* 24M */ return 0; } static int mtk_afe_pcm_dev_probe(struct platform_device *pdev) { int ret, i; unsigned int irq_id; struct mtk_afe *afe; struct resource *res; afe = devm_kzalloc(&pdev->dev, sizeof(*afe), GFP_KERNEL); if (!afe) return -ENOMEM; afe->dev = &pdev->dev; irq_id = platform_get_irq(pdev, 0); if (!irq_id) { dev_err(afe->dev, "np %s no irq\n", afe->dev->of_node->name); return -ENXIO; } ret = devm_request_irq(afe->dev, irq_id, mtk_afe_irq_handler, 0, "Afe_ISR_Handle", (void *)afe); if (ret) { dev_err(afe->dev, "could not request_irq\n"); return ret; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); afe->base_addr = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(afe->base_addr)) return PTR_ERR(afe->base_addr); afe->regmap = devm_regmap_init_mmio(&pdev->dev, afe->base_addr, &mtk_afe_regmap_config); if (IS_ERR(afe->regmap)) return PTR_ERR(afe->regmap); /* initial audio related clock */ ret = mtk_afe_init_audio_clk(afe); if (ret) { dev_err(afe->dev, "mtk_afe_init_audio_clk fail\n"); return ret; } for (i = 0; i < MTK_AFE_MEMIF_NUM; i++) afe->memif[i].data = &memif_data[i]; platform_set_drvdata(pdev, afe); pm_runtime_enable(&pdev->dev); if (!pm_runtime_enabled(&pdev->dev)) { ret = mtk_afe_runtime_resume(&pdev->dev); if (ret) goto err_pm_disable; } ret = snd_soc_register_platform(&pdev->dev, &mtk_afe_pcm_platform); if (ret) goto err_pm_disable; ret = snd_soc_register_component(&pdev->dev, &mtk_afe_pcm_dai_component, mtk_afe_pcm_dais, ARRAY_SIZE(mtk_afe_pcm_dais)); if (ret) goto err_platform; ret = snd_soc_register_component(&pdev->dev, &mtk_afe_hdmi_dai_component, mtk_afe_hdmi_dais, ARRAY_SIZE(mtk_afe_hdmi_dais)); if (ret) goto err_comp; dev_info(&pdev->dev, "MTK AFE driver initialized.\n"); return 0; err_comp: snd_soc_unregister_component(&pdev->dev); err_platform: snd_soc_unregister_platform(&pdev->dev); err_pm_disable: pm_runtime_disable(&pdev->dev); return ret; } static int mtk_afe_pcm_dev_remove(struct platform_device *pdev) { pm_runtime_disable(&pdev->dev); if (!pm_runtime_status_suspended(&pdev->dev)) mtk_afe_runtime_suspend(&pdev->dev); snd_soc_unregister_component(&pdev->dev); snd_soc_unregister_platform(&pdev->dev); return 0; } static const struct of_device_id mtk_afe_pcm_dt_match[] = { { .compatible = "mediatek,mt8173-afe-pcm", }, { } }; MODULE_DEVICE_TABLE(of, mtk_afe_pcm_dt_match); static const struct dev_pm_ops mtk_afe_pm_ops = { SET_RUNTIME_PM_OPS(mtk_afe_runtime_suspend, mtk_afe_runtime_resume, NULL) }; static struct platform_driver mtk_afe_pcm_driver = { .driver = { .name = "mtk-afe-pcm", .owner = THIS_MODULE, .of_match_table = mtk_afe_pcm_dt_match, .pm = &mtk_afe_pm_ops, }, .probe = mtk_afe_pcm_dev_probe, .remove = mtk_afe_pcm_dev_remove, }; module_platform_driver(mtk_afe_pcm_driver); MODULE_DESCRIPTION("Mediatek ALSA SoC AFE platform driver"); MODULE_AUTHOR("Koro Chen <koro.chen@mediatek.com>"); MODULE_LICENSE("GPL v2");
gpl-2.0
jumpnow/linux
drivers/irqchip/irq-alpine-msi.c
381
7195
/* * Annapurna Labs MSIX support services * * Copyright (C) 2016, Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Antoine Tenart <antoine.tenart@free-electrons.com> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/irqchip.h> #include <linux/irqchip/arm-gic.h> #include <linux/msi.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/of_pci.h> #include <linux/pci.h> #include <linux/slab.h> #include <asm/irq.h> #include <asm/msi.h> /* MSIX message address format: local GIC target */ #define ALPINE_MSIX_SPI_TARGET_CLUSTER0 BIT(16) struct alpine_msix_data { spinlock_t msi_map_lock; phys_addr_t addr; u32 spi_first; /* The SGI number that MSIs start */ u32 num_spis; /* The number of SGIs for MSIs */ unsigned long *msi_map; }; static void alpine_msix_mask_msi_irq(struct irq_data *d) { pci_msi_mask_irq(d); irq_chip_mask_parent(d); } static void alpine_msix_unmask_msi_irq(struct irq_data *d) { pci_msi_unmask_irq(d); irq_chip_unmask_parent(d); } static struct irq_chip alpine_msix_irq_chip = { .name = "MSIx", .irq_mask = alpine_msix_mask_msi_irq, .irq_unmask = alpine_msix_unmask_msi_irq, .irq_eoi = irq_chip_eoi_parent, .irq_set_affinity = irq_chip_set_affinity_parent, }; static int alpine_msix_allocate_sgi(struct alpine_msix_data *priv, int num_req) { int first; spin_lock(&priv->msi_map_lock); first = bitmap_find_next_zero_area(priv->msi_map, priv->num_spis, 0, num_req, 0); if (first >= priv->num_spis) { spin_unlock(&priv->msi_map_lock); return -ENOSPC; } bitmap_set(priv->msi_map, first, num_req); spin_unlock(&priv->msi_map_lock); return priv->spi_first + first; } static void alpine_msix_free_sgi(struct alpine_msix_data *priv, unsigned sgi, int num_req) { int first = sgi - priv->spi_first; spin_lock(&priv->msi_map_lock); bitmap_clear(priv->msi_map, first, num_req); spin_unlock(&priv->msi_map_lock); } static void alpine_msix_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) { struct alpine_msix_data *priv = irq_data_get_irq_chip_data(data); phys_addr_t msg_addr = priv->addr; msg_addr |= (data->hwirq << 3); msg->address_hi = upper_32_bits(msg_addr); msg->address_lo = lower_32_bits(msg_addr); msg->data = 0; } static struct msi_domain_info alpine_msix_domain_info = { .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | MSI_FLAG_PCI_MSIX, .chip = &alpine_msix_irq_chip, }; static struct irq_chip middle_irq_chip = { .name = "alpine_msix_middle", .irq_mask = irq_chip_mask_parent, .irq_unmask = irq_chip_unmask_parent, .irq_eoi = irq_chip_eoi_parent, .irq_set_affinity = irq_chip_set_affinity_parent, .irq_compose_msi_msg = alpine_msix_compose_msi_msg, }; static int alpine_msix_gic_domain_alloc(struct irq_domain *domain, unsigned int virq, int sgi) { struct irq_fwspec fwspec; struct irq_data *d; int ret; if (!is_of_node(domain->parent->fwnode)) return -EINVAL; fwspec.fwnode = domain->parent->fwnode; fwspec.param_count = 3; fwspec.param[0] = 0; fwspec.param[1] = sgi; fwspec.param[2] = IRQ_TYPE_EDGE_RISING; ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec); if (ret) return ret; d = irq_domain_get_irq_data(domain->parent, virq); d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING); return 0; } static int alpine_msix_middle_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *args) { struct alpine_msix_data *priv = domain->host_data; int sgi, err, i; sgi = alpine_msix_allocate_sgi(priv, nr_irqs); if (sgi < 0) return sgi; for (i = 0; i < nr_irqs; i++) { err = alpine_msix_gic_domain_alloc(domain, virq + i, sgi + i); if (err) goto err_sgi; irq_domain_set_hwirq_and_chip(domain, virq + i, sgi + i, &middle_irq_chip, priv); } return 0; err_sgi: while (--i >= 0) irq_domain_free_irqs_parent(domain, virq, i); alpine_msix_free_sgi(priv, sgi, nr_irqs); return err; } static void alpine_msix_middle_domain_free(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs) { struct irq_data *d = irq_domain_get_irq_data(domain, virq); struct alpine_msix_data *priv = irq_data_get_irq_chip_data(d); irq_domain_free_irqs_parent(domain, virq, nr_irqs); alpine_msix_free_sgi(priv, d->hwirq, nr_irqs); } static const struct irq_domain_ops alpine_msix_middle_domain_ops = { .alloc = alpine_msix_middle_domain_alloc, .free = alpine_msix_middle_domain_free, }; static int alpine_msix_init_domains(struct alpine_msix_data *priv, struct device_node *node) { struct irq_domain *middle_domain, *msi_domain, *gic_domain; struct device_node *gic_node; gic_node = of_irq_find_parent(node); if (!gic_node) { pr_err("Failed to find the GIC node\n"); return -ENODEV; } gic_domain = irq_find_host(gic_node); if (!gic_domain) { pr_err("Failed to find the GIC domain\n"); return -ENXIO; } middle_domain = irq_domain_add_tree(NULL, &alpine_msix_middle_domain_ops, priv); if (!middle_domain) { pr_err("Failed to create the MSIX middle domain\n"); return -ENOMEM; } middle_domain->parent = gic_domain; msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node), &alpine_msix_domain_info, middle_domain); if (!msi_domain) { pr_err("Failed to create MSI domain\n"); irq_domain_remove(middle_domain); return -ENOMEM; } return 0; } static int alpine_msix_init(struct device_node *node, struct device_node *parent) { struct alpine_msix_data *priv; struct resource res; int ret; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; spin_lock_init(&priv->msi_map_lock); ret = of_address_to_resource(node, 0, &res); if (ret) { pr_err("Failed to allocate resource\n"); goto err_priv; } /* * The 20 least significant bits of addr provide direct information * regarding the interrupt destination. * * To select the primary GIC as the target GIC, bits [18:17] must be set * to 0x0. In this case, bit 16 (SPI_TARGET_CLUSTER0) must be set. */ priv->addr = res.start & GENMASK_ULL(63,20); priv->addr |= ALPINE_MSIX_SPI_TARGET_CLUSTER0; if (of_property_read_u32(node, "al,msi-base-spi", &priv->spi_first)) { pr_err("Unable to parse MSI base\n"); ret = -EINVAL; goto err_priv; } if (of_property_read_u32(node, "al,msi-num-spis", &priv->num_spis)) { pr_err("Unable to parse MSI numbers\n"); ret = -EINVAL; goto err_priv; } priv->msi_map = kzalloc(sizeof(*priv->msi_map) * BITS_TO_LONGS(priv->num_spis), GFP_KERNEL); if (!priv->msi_map) { ret = -ENOMEM; goto err_priv; } pr_debug("Registering %d msixs, starting at %d\n", priv->num_spis, priv->spi_first); ret = alpine_msix_init_domains(priv, node); if (ret) goto err_map; return 0; err_map: kfree(priv->msi_map); err_priv: kfree(priv); return ret; } IRQCHIP_DECLARE(alpine_msix, "al,alpine-msix", alpine_msix_init);
gpl-2.0
flar2/evita-ElementalX
net/activity_stats.c
381
2421
/* net/activity_stats.c * * Copyright (C) 2010 Google, Inc. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Author: Mike Chan (mike@android.com) */ #include <linux/proc_fs.h> #include <linux/suspend.h> #include <net/net_namespace.h> #define BUCKET_MAX 10 static unsigned long activity_stats[BUCKET_MAX]; static ktime_t last_transmit; static ktime_t suspend_time; static DEFINE_SPINLOCK(activity_lock); void activity_stats_update(void) { int i; unsigned long flags; ktime_t now; s64 delta; spin_lock_irqsave(&activity_lock, flags); now = ktime_get(); delta = ktime_to_ns(ktime_sub(now, last_transmit)); for (i = BUCKET_MAX - 1; i >= 0; i--) { if (delta < (1000000000ULL << i)) continue; activity_stats[i]++; last_transmit = now; break; } spin_unlock_irqrestore(&activity_lock, flags); } static int activity_stats_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { int i; int len; char *p = page; if (off || count < (30 * BUCKET_MAX + 22)) return -ENOMEM; len = snprintf(p, count, "Min Bucket(sec) Count\n"); count -= len; p += len; for (i = 0; i < BUCKET_MAX; i++) { len = snprintf(p, count, "%15d %lu\n", 1 << i, activity_stats[i]); count -= len; p += len; } *eof = 1; return p - page; } static int activity_stats_notifier(struct notifier_block *nb, unsigned long event, void *dummy) { switch (event) { case PM_SUSPEND_PREPARE: suspend_time = ktime_get_real(); break; case PM_POST_SUSPEND: suspend_time = ktime_sub(ktime_get_real(), suspend_time); last_transmit = ktime_sub(last_transmit, suspend_time); } return 0; } static struct notifier_block activity_stats_notifier_block = { .notifier_call = activity_stats_notifier, }; static int __init activity_stats_init(void) { create_proc_read_entry("activity", S_IRUGO, init_net.proc_net_stat, activity_stats_read_proc, NULL); return register_pm_notifier(&activity_stats_notifier_block); } subsys_initcall(activity_stats_init);
gpl-2.0
thaskell1/linux
drivers/net/wan/z85230.c
893
39934
/* * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * (c) Copyright 1998 Alan Cox <alan@lxorguk.ukuu.org.uk> * (c) Copyright 2000, 2001 Red Hat Inc * * Development of this driver was funded by Equiinet Ltd * http://www.equiinet.com * * ChangeLog: * * Asynchronous mode dropped for 2.2. For 2.5 we will attempt the * unification of all the Z85x30 asynchronous drivers for real. * * DMA now uses get_free_page as kmalloc buffers may span a 64K * boundary. * * Modified for SMP safety and SMP locking by Alan Cox * <alan@lxorguk.ukuu.org.uk> * * Performance * * Z85230: * Non DMA you want a 486DX50 or better to do 64Kbits. 9600 baud * X.25 is not unrealistic on all machines. DMA mode can in theory * handle T1/E1 quite nicely. In practice the limit seems to be about * 512Kbit->1Mbit depending on motherboard. * * Z85C30: * 64K will take DMA, 9600 baud X.25 should be ok. * * Z8530: * Synchronous mode without DMA is unlikely to pass about 2400 baud. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/net.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/delay.h> #include <linux/hdlc.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/gfp.h> #include <asm/dma.h> #include <asm/io.h> #define RT_LOCK #define RT_UNLOCK #include <linux/spinlock.h> #include "z85230.h" /** * z8530_read_port - Architecture specific interface function * @p: port to read * * Provided port access methods. The Comtrol SV11 requires no delays * between accesses and uses PC I/O. Some drivers may need a 5uS delay * * In the longer term this should become an architecture specific * section so that this can become a generic driver interface for all * platforms. For now we only handle PC I/O ports with or without the * dread 5uS sanity delay. * * The caller must hold sufficient locks to avoid violating the horrible * 5uS delay rule. */ static inline int z8530_read_port(unsigned long p) { u8 r=inb(Z8530_PORT_OF(p)); if(p&Z8530_PORT_SLEEP) /* gcc should figure this out efficiently ! */ udelay(5); return r; } /** * z8530_write_port - Architecture specific interface function * @p: port to write * @d: value to write * * Write a value to a port with delays if need be. Note that the * caller must hold locks to avoid read/writes from other contexts * violating the 5uS rule * * In the longer term this should become an architecture specific * section so that this can become a generic driver interface for all * platforms. For now we only handle PC I/O ports with or without the * dread 5uS sanity delay. */ static inline void z8530_write_port(unsigned long p, u8 d) { outb(d,Z8530_PORT_OF(p)); if(p&Z8530_PORT_SLEEP) udelay(5); } static void z8530_rx_done(struct z8530_channel *c); static void z8530_tx_done(struct z8530_channel *c); /** * read_zsreg - Read a register from a Z85230 * @c: Z8530 channel to read from (2 per chip) * @reg: Register to read * FIXME: Use a spinlock. * * Most of the Z8530 registers are indexed off the control registers. * A read is done by writing to the control register and reading the * register back. The caller must hold the lock */ static inline u8 read_zsreg(struct z8530_channel *c, u8 reg) { if(reg) z8530_write_port(c->ctrlio, reg); return z8530_read_port(c->ctrlio); } /** * read_zsdata - Read the data port of a Z8530 channel * @c: The Z8530 channel to read the data port from * * The data port provides fast access to some things. We still * have all the 5uS delays to worry about. */ static inline u8 read_zsdata(struct z8530_channel *c) { u8 r; r=z8530_read_port(c->dataio); return r; } /** * write_zsreg - Write to a Z8530 channel register * @c: The Z8530 channel * @reg: Register number * @val: Value to write * * Write a value to an indexed register. The caller must hold the lock * to honour the irritating delay rules. We know about register 0 * being fast to access. * * Assumes c->lock is held. */ static inline void write_zsreg(struct z8530_channel *c, u8 reg, u8 val) { if(reg) z8530_write_port(c->ctrlio, reg); z8530_write_port(c->ctrlio, val); } /** * write_zsctrl - Write to a Z8530 control register * @c: The Z8530 channel * @val: Value to write * * Write directly to the control register on the Z8530 */ static inline void write_zsctrl(struct z8530_channel *c, u8 val) { z8530_write_port(c->ctrlio, val); } /** * write_zsdata - Write to a Z8530 control register * @c: The Z8530 channel * @val: Value to write * * Write directly to the data register on the Z8530 */ static inline void write_zsdata(struct z8530_channel *c, u8 val) { z8530_write_port(c->dataio, val); } /* * Register loading parameters for a dead port */ u8 z8530_dead_port[]= { 255 }; EXPORT_SYMBOL(z8530_dead_port); /* * Register loading parameters for currently supported circuit types */ /* * Data clocked by telco end. This is the correct data for the UK * "kilostream" service, and most other similar services. */ u8 z8530_hdlc_kilostream[]= { 4, SYNC_ENAB|SDLC|X1CLK, 2, 0, /* No vector */ 1, 0, 3, ENT_HM|RxCRC_ENAB|Rx8, 5, TxCRC_ENAB|RTS|TxENAB|Tx8|DTR, 9, 0, /* Disable interrupts */ 6, 0xFF, 7, FLAG, 10, ABUNDER|NRZ|CRCPS,/*MARKIDLE ??*/ 11, TCTRxCP, 14, DISDPLL, 15, DCDIE|SYNCIE|CTSIE|TxUIE|BRKIE, 1, EXT_INT_ENAB|TxINT_ENAB|INT_ALL_Rx, 9, NV|MIE|NORESET, 255 }; EXPORT_SYMBOL(z8530_hdlc_kilostream); /* * As above but for enhanced chips. */ u8 z8530_hdlc_kilostream_85230[]= { 4, SYNC_ENAB|SDLC|X1CLK, 2, 0, /* No vector */ 1, 0, 3, ENT_HM|RxCRC_ENAB|Rx8, 5, TxCRC_ENAB|RTS|TxENAB|Tx8|DTR, 9, 0, /* Disable interrupts */ 6, 0xFF, 7, FLAG, 10, ABUNDER|NRZ|CRCPS, /* MARKIDLE?? */ 11, TCTRxCP, 14, DISDPLL, 15, DCDIE|SYNCIE|CTSIE|TxUIE|BRKIE, 1, EXT_INT_ENAB|TxINT_ENAB|INT_ALL_Rx, 9, NV|MIE|NORESET, 23, 3, /* Extended mode AUTO TX and EOM*/ 255 }; EXPORT_SYMBOL(z8530_hdlc_kilostream_85230); /** * z8530_flush_fifo - Flush on chip RX FIFO * @c: Channel to flush * * Flush the receive FIFO. There is no specific option for this, we * blindly read bytes and discard them. Reading when there is no data * is harmless. The 8530 has a 4 byte FIFO, the 85230 has 8 bytes. * * All locking is handled for the caller. On return data may still be * present if it arrived during the flush. */ static void z8530_flush_fifo(struct z8530_channel *c) { read_zsreg(c, R1); read_zsreg(c, R1); read_zsreg(c, R1); read_zsreg(c, R1); if(c->dev->type==Z85230) { read_zsreg(c, R1); read_zsreg(c, R1); read_zsreg(c, R1); read_zsreg(c, R1); } } /** * z8530_rtsdtr - Control the outgoing DTS/RTS line * @c: The Z8530 channel to control; * @set: 1 to set, 0 to clear * * Sets or clears DTR/RTS on the requested line. All locking is handled * by the caller. For now we assume all boards use the actual RTS/DTR * on the chip. Apparently one or two don't. We'll scream about them * later. */ static void z8530_rtsdtr(struct z8530_channel *c, int set) { if (set) c->regs[5] |= (RTS | DTR); else c->regs[5] &= ~(RTS | DTR); write_zsreg(c, R5, c->regs[5]); } /** * z8530_rx - Handle a PIO receive event * @c: Z8530 channel to process * * Receive handler for receiving in PIO mode. This is much like the * async one but not quite the same or as complex * * Note: Its intended that this handler can easily be separated from * the main code to run realtime. That'll be needed for some machines * (eg to ever clock 64kbits on a sparc ;)). * * The RT_LOCK macros don't do anything now. Keep the code covered * by them as short as possible in all circumstances - clocks cost * baud. The interrupt handler is assumed to be atomic w.r.t. to * other code - this is true in the RT case too. * * We only cover the sync cases for this. If you want 2Mbit async * do it yourself but consider medical assistance first. This non DMA * synchronous mode is portable code. The DMA mode assumes PCI like * ISA DMA * * Called with the device lock held */ static void z8530_rx(struct z8530_channel *c) { u8 ch,stat; while(1) { /* FIFO empty ? */ if(!(read_zsreg(c, R0)&1)) break; ch=read_zsdata(c); stat=read_zsreg(c, R1); /* * Overrun ? */ if(c->count < c->max) { *c->dptr++=ch; c->count++; } if(stat&END_FR) { /* * Error ? */ if(stat&(Rx_OVR|CRC_ERR)) { /* Rewind the buffer and return */ if(c->skb) c->dptr=c->skb->data; c->count=0; if(stat&Rx_OVR) { pr_warn("%s: overrun\n", c->dev->name); c->rx_overrun++; } if(stat&CRC_ERR) { c->rx_crc_err++; /* printk("crc error\n"); */ } /* Shove the frame upstream */ } else { /* * Drop the lock for RX processing, or * there are deadlocks */ z8530_rx_done(c); write_zsctrl(c, RES_Rx_CRC); } } } /* * Clear irq */ write_zsctrl(c, ERR_RES); write_zsctrl(c, RES_H_IUS); } /** * z8530_tx - Handle a PIO transmit event * @c: Z8530 channel to process * * Z8530 transmit interrupt handler for the PIO mode. The basic * idea is to attempt to keep the FIFO fed. We fill as many bytes * in as possible, its quite possible that we won't keep up with the * data rate otherwise. */ static void z8530_tx(struct z8530_channel *c) { while(c->txcount) { /* FIFO full ? */ if(!(read_zsreg(c, R0)&4)) return; c->txcount--; /* * Shovel out the byte */ write_zsreg(c, R8, *c->tx_ptr++); write_zsctrl(c, RES_H_IUS); /* We are about to underflow */ if(c->txcount==0) { write_zsctrl(c, RES_EOM_L); write_zsreg(c, R10, c->regs[10]&~ABUNDER); } } /* * End of frame TX - fire another one */ write_zsctrl(c, RES_Tx_P); z8530_tx_done(c); write_zsctrl(c, RES_H_IUS); } /** * z8530_status - Handle a PIO status exception * @chan: Z8530 channel to process * * A status event occurred in PIO synchronous mode. There are several * reasons the chip will bother us here. A transmit underrun means we * failed to feed the chip fast enough and just broke a packet. A DCD * change is a line up or down. */ static void z8530_status(struct z8530_channel *chan) { u8 status, altered; status = read_zsreg(chan, R0); altered = chan->status ^ status; chan->status = status; if (status & TxEOM) { /* printk("%s: Tx underrun.\n", chan->dev->name); */ chan->netdevice->stats.tx_fifo_errors++; write_zsctrl(chan, ERR_RES); z8530_tx_done(chan); } if (altered & chan->dcdcheck) { if (status & chan->dcdcheck) { pr_info("%s: DCD raised\n", chan->dev->name); write_zsreg(chan, R3, chan->regs[3] | RxENABLE); if (chan->netdevice) netif_carrier_on(chan->netdevice); } else { pr_info("%s: DCD lost\n", chan->dev->name); write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE); z8530_flush_fifo(chan); if (chan->netdevice) netif_carrier_off(chan->netdevice); } } write_zsctrl(chan, RES_EXT_INT); write_zsctrl(chan, RES_H_IUS); } struct z8530_irqhandler z8530_sync = { z8530_rx, z8530_tx, z8530_status }; EXPORT_SYMBOL(z8530_sync); /** * z8530_dma_rx - Handle a DMA RX event * @chan: Channel to handle * * Non bus mastering DMA interfaces for the Z8x30 devices. This * is really pretty PC specific. The DMA mode means that most receive * events are handled by the DMA hardware. We get a kick here only if * a frame ended. */ static void z8530_dma_rx(struct z8530_channel *chan) { if(chan->rxdma_on) { /* Special condition check only */ u8 status; read_zsreg(chan, R7); read_zsreg(chan, R6); status=read_zsreg(chan, R1); if(status&END_FR) { z8530_rx_done(chan); /* Fire up the next one */ } write_zsctrl(chan, ERR_RES); write_zsctrl(chan, RES_H_IUS); } else { /* DMA is off right now, drain the slow way */ z8530_rx(chan); } } /** * z8530_dma_tx - Handle a DMA TX event * @chan: The Z8530 channel to handle * * We have received an interrupt while doing DMA transmissions. It * shouldn't happen. Scream loudly if it does. */ static void z8530_dma_tx(struct z8530_channel *chan) { if(!chan->dma_tx) { pr_warn("Hey who turned the DMA off?\n"); z8530_tx(chan); return; } /* This shouldn't occur in DMA mode */ pr_err("DMA tx - bogus event!\n"); z8530_tx(chan); } /** * z8530_dma_status - Handle a DMA status exception * @chan: Z8530 channel to process * * A status event occurred on the Z8530. We receive these for two reasons * when in DMA mode. Firstly if we finished a packet transfer we get one * and kick the next packet out. Secondly we may see a DCD change. * */ static void z8530_dma_status(struct z8530_channel *chan) { u8 status, altered; status=read_zsreg(chan, R0); altered=chan->status^status; chan->status=status; if(chan->dma_tx) { if(status&TxEOM) { unsigned long flags; flags=claim_dma_lock(); disable_dma(chan->txdma); clear_dma_ff(chan->txdma); chan->txdma_on=0; release_dma_lock(flags); z8530_tx_done(chan); } } if (altered & chan->dcdcheck) { if (status & chan->dcdcheck) { pr_info("%s: DCD raised\n", chan->dev->name); write_zsreg(chan, R3, chan->regs[3] | RxENABLE); if (chan->netdevice) netif_carrier_on(chan->netdevice); } else { pr_info("%s: DCD lost\n", chan->dev->name); write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE); z8530_flush_fifo(chan); if (chan->netdevice) netif_carrier_off(chan->netdevice); } } write_zsctrl(chan, RES_EXT_INT); write_zsctrl(chan, RES_H_IUS); } static struct z8530_irqhandler z8530_dma_sync = { z8530_dma_rx, z8530_dma_tx, z8530_dma_status }; static struct z8530_irqhandler z8530_txdma_sync = { z8530_rx, z8530_dma_tx, z8530_dma_status }; /** * z8530_rx_clear - Handle RX events from a stopped chip * @c: Z8530 channel to shut up * * Receive interrupt vectors for a Z8530 that is in 'parked' mode. * For machines with PCI Z85x30 cards, or level triggered interrupts * (eg the MacII) we must clear the interrupt cause or die. */ static void z8530_rx_clear(struct z8530_channel *c) { /* * Data and status bytes */ u8 stat; read_zsdata(c); stat=read_zsreg(c, R1); if(stat&END_FR) write_zsctrl(c, RES_Rx_CRC); /* * Clear irq */ write_zsctrl(c, ERR_RES); write_zsctrl(c, RES_H_IUS); } /** * z8530_tx_clear - Handle TX events from a stopped chip * @c: Z8530 channel to shut up * * Transmit interrupt vectors for a Z8530 that is in 'parked' mode. * For machines with PCI Z85x30 cards, or level triggered interrupts * (eg the MacII) we must clear the interrupt cause or die. */ static void z8530_tx_clear(struct z8530_channel *c) { write_zsctrl(c, RES_Tx_P); write_zsctrl(c, RES_H_IUS); } /** * z8530_status_clear - Handle status events from a stopped chip * @chan: Z8530 channel to shut up * * Status interrupt vectors for a Z8530 that is in 'parked' mode. * For machines with PCI Z85x30 cards, or level triggered interrupts * (eg the MacII) we must clear the interrupt cause or die. */ static void z8530_status_clear(struct z8530_channel *chan) { u8 status=read_zsreg(chan, R0); if(status&TxEOM) write_zsctrl(chan, ERR_RES); write_zsctrl(chan, RES_EXT_INT); write_zsctrl(chan, RES_H_IUS); } struct z8530_irqhandler z8530_nop= { z8530_rx_clear, z8530_tx_clear, z8530_status_clear }; EXPORT_SYMBOL(z8530_nop); /** * z8530_interrupt - Handle an interrupt from a Z8530 * @irq: Interrupt number * @dev_id: The Z8530 device that is interrupting. * * A Z85[2]30 device has stuck its hand in the air for attention. * We scan both the channels on the chip for events and then call * the channel specific call backs for each channel that has events. * We have to use callback functions because the two channels can be * in different modes. * * Locking is done for the handlers. Note that locking is done * at the chip level (the 5uS delay issue is per chip not per * channel). c->lock for both channels points to dev->lock */ irqreturn_t z8530_interrupt(int irq, void *dev_id) { struct z8530_dev *dev=dev_id; u8 uninitialized_var(intr); static volatile int locker=0; int work=0; struct z8530_irqhandler *irqs; if(locker) { pr_err("IRQ re-enter\n"); return IRQ_NONE; } locker=1; spin_lock(&dev->lock); while(++work<5000) { intr = read_zsreg(&dev->chanA, R3); if(!(intr & (CHARxIP|CHATxIP|CHAEXT|CHBRxIP|CHBTxIP|CHBEXT))) break; /* This holds the IRQ status. On the 8530 you must read it from chan A even though it applies to the whole chip */ /* Now walk the chip and see what it is wanting - it may be an IRQ for someone else remember */ irqs=dev->chanA.irqs; if(intr & (CHARxIP|CHATxIP|CHAEXT)) { if(intr&CHARxIP) irqs->rx(&dev->chanA); if(intr&CHATxIP) irqs->tx(&dev->chanA); if(intr&CHAEXT) irqs->status(&dev->chanA); } irqs=dev->chanB.irqs; if(intr & (CHBRxIP|CHBTxIP|CHBEXT)) { if(intr&CHBRxIP) irqs->rx(&dev->chanB); if(intr&CHBTxIP) irqs->tx(&dev->chanB); if(intr&CHBEXT) irqs->status(&dev->chanB); } } spin_unlock(&dev->lock); if(work==5000) pr_err("%s: interrupt jammed - abort(0x%X)!\n", dev->name, intr); /* Ok all done */ locker=0; return IRQ_HANDLED; } EXPORT_SYMBOL(z8530_interrupt); static const u8 reg_init[16]= { 0,0,0,0, 0,0,0,0, 0,0,0,0, 0x55,0,0,0 }; /** * z8530_sync_open - Open a Z8530 channel for PIO * @dev: The network interface we are using * @c: The Z8530 channel to open in synchronous PIO mode * * Switch a Z8530 into synchronous mode without DMA assist. We * raise the RTS/DTR and commence network operation. */ int z8530_sync_open(struct net_device *dev, struct z8530_channel *c) { unsigned long flags; spin_lock_irqsave(c->lock, flags); c->sync = 1; c->mtu = dev->mtu+64; c->count = 0; c->skb = NULL; c->skb2 = NULL; c->irqs = &z8530_sync; /* This loads the double buffer up */ z8530_rx_done(c); /* Load the frame ring */ z8530_rx_done(c); /* Load the backup frame */ z8530_rtsdtr(c,1); c->dma_tx = 0; c->regs[R1]|=TxINT_ENAB; write_zsreg(c, R1, c->regs[R1]); write_zsreg(c, R3, c->regs[R3]|RxENABLE); spin_unlock_irqrestore(c->lock, flags); return 0; } EXPORT_SYMBOL(z8530_sync_open); /** * z8530_sync_close - Close a PIO Z8530 channel * @dev: Network device to close * @c: Z8530 channel to disassociate and move to idle * * Close down a Z8530 interface and switch its interrupt handlers * to discard future events. */ int z8530_sync_close(struct net_device *dev, struct z8530_channel *c) { u8 chk; unsigned long flags; spin_lock_irqsave(c->lock, flags); c->irqs = &z8530_nop; c->max = 0; c->sync = 0; chk=read_zsreg(c,R0); write_zsreg(c, R3, c->regs[R3]); z8530_rtsdtr(c,0); spin_unlock_irqrestore(c->lock, flags); return 0; } EXPORT_SYMBOL(z8530_sync_close); /** * z8530_sync_dma_open - Open a Z8530 for DMA I/O * @dev: The network device to attach * @c: The Z8530 channel to configure in sync DMA mode. * * Set up a Z85x30 device for synchronous DMA in both directions. Two * ISA DMA channels must be available for this to work. We assume ISA * DMA driven I/O and PC limits on access. */ int z8530_sync_dma_open(struct net_device *dev, struct z8530_channel *c) { unsigned long cflags, dflags; c->sync = 1; c->mtu = dev->mtu+64; c->count = 0; c->skb = NULL; c->skb2 = NULL; /* * Load the DMA interfaces up */ c->rxdma_on = 0; c->txdma_on = 0; /* * Allocate the DMA flip buffers. Limit by page size. * Everyone runs 1500 mtu or less on wan links so this * should be fine. */ if(c->mtu > PAGE_SIZE/2) return -EMSGSIZE; c->rx_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA); if(c->rx_buf[0]==NULL) return -ENOBUFS; c->rx_buf[1]=c->rx_buf[0]+PAGE_SIZE/2; c->tx_dma_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA); if(c->tx_dma_buf[0]==NULL) { free_page((unsigned long)c->rx_buf[0]); c->rx_buf[0]=NULL; return -ENOBUFS; } c->tx_dma_buf[1]=c->tx_dma_buf[0]+PAGE_SIZE/2; c->tx_dma_used=0; c->dma_tx = 1; c->dma_num=0; c->dma_ready=1; /* * Enable DMA control mode */ spin_lock_irqsave(c->lock, cflags); /* * TX DMA via DIR/REQ */ c->regs[R14]|= DTRREQ; write_zsreg(c, R14, c->regs[R14]); c->regs[R1]&= ~TxINT_ENAB; write_zsreg(c, R1, c->regs[R1]); /* * RX DMA via W/Req */ c->regs[R1]|= WT_FN_RDYFN; c->regs[R1]|= WT_RDY_RT; c->regs[R1]|= INT_ERR_Rx; c->regs[R1]&= ~TxINT_ENAB; write_zsreg(c, R1, c->regs[R1]); c->regs[R1]|= WT_RDY_ENAB; write_zsreg(c, R1, c->regs[R1]); /* * DMA interrupts */ /* * Set up the DMA configuration */ dflags=claim_dma_lock(); disable_dma(c->rxdma); clear_dma_ff(c->rxdma); set_dma_mode(c->rxdma, DMA_MODE_READ|0x10); set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[0])); set_dma_count(c->rxdma, c->mtu); enable_dma(c->rxdma); disable_dma(c->txdma); clear_dma_ff(c->txdma); set_dma_mode(c->txdma, DMA_MODE_WRITE); disable_dma(c->txdma); release_dma_lock(dflags); /* * Select the DMA interrupt handlers */ c->rxdma_on = 1; c->txdma_on = 1; c->tx_dma_used = 1; c->irqs = &z8530_dma_sync; z8530_rtsdtr(c,1); write_zsreg(c, R3, c->regs[R3]|RxENABLE); spin_unlock_irqrestore(c->lock, cflags); return 0; } EXPORT_SYMBOL(z8530_sync_dma_open); /** * z8530_sync_dma_close - Close down DMA I/O * @dev: Network device to detach * @c: Z8530 channel to move into discard mode * * Shut down a DMA mode synchronous interface. Halt the DMA, and * free the buffers. */ int z8530_sync_dma_close(struct net_device *dev, struct z8530_channel *c) { u8 chk; unsigned long flags; c->irqs = &z8530_nop; c->max = 0; c->sync = 0; /* * Disable the PC DMA channels */ flags=claim_dma_lock(); disable_dma(c->rxdma); clear_dma_ff(c->rxdma); c->rxdma_on = 0; disable_dma(c->txdma); clear_dma_ff(c->txdma); release_dma_lock(flags); c->txdma_on = 0; c->tx_dma_used = 0; spin_lock_irqsave(c->lock, flags); /* * Disable DMA control mode */ c->regs[R1]&= ~WT_RDY_ENAB; write_zsreg(c, R1, c->regs[R1]); c->regs[R1]&= ~(WT_RDY_RT|WT_FN_RDYFN|INT_ERR_Rx); c->regs[R1]|= INT_ALL_Rx; write_zsreg(c, R1, c->regs[R1]); c->regs[R14]&= ~DTRREQ; write_zsreg(c, R14, c->regs[R14]); if(c->rx_buf[0]) { free_page((unsigned long)c->rx_buf[0]); c->rx_buf[0]=NULL; } if(c->tx_dma_buf[0]) { free_page((unsigned long)c->tx_dma_buf[0]); c->tx_dma_buf[0]=NULL; } chk=read_zsreg(c,R0); write_zsreg(c, R3, c->regs[R3]); z8530_rtsdtr(c,0); spin_unlock_irqrestore(c->lock, flags); return 0; } EXPORT_SYMBOL(z8530_sync_dma_close); /** * z8530_sync_txdma_open - Open a Z8530 for TX driven DMA * @dev: The network device to attach * @c: The Z8530 channel to configure in sync DMA mode. * * Set up a Z85x30 device for synchronous DMA transmission. One * ISA DMA channel must be available for this to work. The receive * side is run in PIO mode, but then it has the bigger FIFO. */ int z8530_sync_txdma_open(struct net_device *dev, struct z8530_channel *c) { unsigned long cflags, dflags; printk("Opening sync interface for TX-DMA\n"); c->sync = 1; c->mtu = dev->mtu+64; c->count = 0; c->skb = NULL; c->skb2 = NULL; /* * Allocate the DMA flip buffers. Limit by page size. * Everyone runs 1500 mtu or less on wan links so this * should be fine. */ if(c->mtu > PAGE_SIZE/2) return -EMSGSIZE; c->tx_dma_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA); if(c->tx_dma_buf[0]==NULL) return -ENOBUFS; c->tx_dma_buf[1] = c->tx_dma_buf[0] + PAGE_SIZE/2; spin_lock_irqsave(c->lock, cflags); /* * Load the PIO receive ring */ z8530_rx_done(c); z8530_rx_done(c); /* * Load the DMA interfaces up */ c->rxdma_on = 0; c->txdma_on = 0; c->tx_dma_used=0; c->dma_num=0; c->dma_ready=1; c->dma_tx = 1; /* * Enable DMA control mode */ /* * TX DMA via DIR/REQ */ c->regs[R14]|= DTRREQ; write_zsreg(c, R14, c->regs[R14]); c->regs[R1]&= ~TxINT_ENAB; write_zsreg(c, R1, c->regs[R1]); /* * Set up the DMA configuration */ dflags = claim_dma_lock(); disable_dma(c->txdma); clear_dma_ff(c->txdma); set_dma_mode(c->txdma, DMA_MODE_WRITE); disable_dma(c->txdma); release_dma_lock(dflags); /* * Select the DMA interrupt handlers */ c->rxdma_on = 0; c->txdma_on = 1; c->tx_dma_used = 1; c->irqs = &z8530_txdma_sync; z8530_rtsdtr(c,1); write_zsreg(c, R3, c->regs[R3]|RxENABLE); spin_unlock_irqrestore(c->lock, cflags); return 0; } EXPORT_SYMBOL(z8530_sync_txdma_open); /** * z8530_sync_txdma_close - Close down a TX driven DMA channel * @dev: Network device to detach * @c: Z8530 channel to move into discard mode * * Shut down a DMA/PIO split mode synchronous interface. Halt the DMA, * and free the buffers. */ int z8530_sync_txdma_close(struct net_device *dev, struct z8530_channel *c) { unsigned long dflags, cflags; u8 chk; spin_lock_irqsave(c->lock, cflags); c->irqs = &z8530_nop; c->max = 0; c->sync = 0; /* * Disable the PC DMA channels */ dflags = claim_dma_lock(); disable_dma(c->txdma); clear_dma_ff(c->txdma); c->txdma_on = 0; c->tx_dma_used = 0; release_dma_lock(dflags); /* * Disable DMA control mode */ c->regs[R1]&= ~WT_RDY_ENAB; write_zsreg(c, R1, c->regs[R1]); c->regs[R1]&= ~(WT_RDY_RT|WT_FN_RDYFN|INT_ERR_Rx); c->regs[R1]|= INT_ALL_Rx; write_zsreg(c, R1, c->regs[R1]); c->regs[R14]&= ~DTRREQ; write_zsreg(c, R14, c->regs[R14]); if(c->tx_dma_buf[0]) { free_page((unsigned long)c->tx_dma_buf[0]); c->tx_dma_buf[0]=NULL; } chk=read_zsreg(c,R0); write_zsreg(c, R3, c->regs[R3]); z8530_rtsdtr(c,0); spin_unlock_irqrestore(c->lock, cflags); return 0; } EXPORT_SYMBOL(z8530_sync_txdma_close); /* * Name strings for Z8530 chips. SGI claim to have a 130, Zilog deny * it exists... */ static const char *z8530_type_name[]={ "Z8530", "Z85C30", "Z85230" }; /** * z8530_describe - Uniformly describe a Z8530 port * @dev: Z8530 device to describe * @mapping: string holding mapping type (eg "I/O" or "Mem") * @io: the port value in question * * Describe a Z8530 in a standard format. We must pass the I/O as * the port offset isn't predictable. The main reason for this function * is to try and get a common format of report. */ void z8530_describe(struct z8530_dev *dev, char *mapping, unsigned long io) { pr_info("%s: %s found at %s 0x%lX, IRQ %d\n", dev->name, z8530_type_name[dev->type], mapping, Z8530_PORT_OF(io), dev->irq); } EXPORT_SYMBOL(z8530_describe); /* * Locked operation part of the z8530 init code */ static inline int do_z8530_init(struct z8530_dev *dev) { /* NOP the interrupt handlers first - we might get a floating IRQ transition when we reset the chip */ dev->chanA.irqs=&z8530_nop; dev->chanB.irqs=&z8530_nop; dev->chanA.dcdcheck=DCD; dev->chanB.dcdcheck=DCD; /* Reset the chip */ write_zsreg(&dev->chanA, R9, 0xC0); udelay(200); /* Now check its valid */ write_zsreg(&dev->chanA, R12, 0xAA); if(read_zsreg(&dev->chanA, R12)!=0xAA) return -ENODEV; write_zsreg(&dev->chanA, R12, 0x55); if(read_zsreg(&dev->chanA, R12)!=0x55) return -ENODEV; dev->type=Z8530; /* * See the application note. */ write_zsreg(&dev->chanA, R15, 0x01); /* * If we can set the low bit of R15 then * the chip is enhanced. */ if(read_zsreg(&dev->chanA, R15)==0x01) { /* This C30 versus 230 detect is from Klaus Kudielka's dmascc */ /* Put a char in the fifo */ write_zsreg(&dev->chanA, R8, 0); if(read_zsreg(&dev->chanA, R0)&Tx_BUF_EMP) dev->type = Z85230; /* Has a FIFO */ else dev->type = Z85C30; /* Z85C30, 1 byte FIFO */ } /* * The code assumes R7' and friends are * off. Use write_zsext() for these and keep * this bit clear. */ write_zsreg(&dev->chanA, R15, 0); /* * At this point it looks like the chip is behaving */ memcpy(dev->chanA.regs, reg_init, 16); memcpy(dev->chanB.regs, reg_init ,16); return 0; } /** * z8530_init - Initialise a Z8530 device * @dev: Z8530 device to initialise. * * Configure up a Z8530/Z85C30 or Z85230 chip. We check the device * is present, identify the type and then program it to hopefully * keep quite and behave. This matters a lot, a Z8530 in the wrong * state will sometimes get into stupid modes generating 10Khz * interrupt streams and the like. * * We set the interrupt handler up to discard any events, in case * we get them during reset or setp. * * Return 0 for success, or a negative value indicating the problem * in errno form. */ int z8530_init(struct z8530_dev *dev) { unsigned long flags; int ret; /* Set up the chip level lock */ spin_lock_init(&dev->lock); dev->chanA.lock = &dev->lock; dev->chanB.lock = &dev->lock; spin_lock_irqsave(&dev->lock, flags); ret = do_z8530_init(dev); spin_unlock_irqrestore(&dev->lock, flags); return ret; } EXPORT_SYMBOL(z8530_init); /** * z8530_shutdown - Shutdown a Z8530 device * @dev: The Z8530 chip to shutdown * * We set the interrupt handlers to silence any interrupts. We then * reset the chip and wait 100uS to be sure the reset completed. Just * in case the caller then tries to do stuff. * * This is called without the lock held */ int z8530_shutdown(struct z8530_dev *dev) { unsigned long flags; /* Reset the chip */ spin_lock_irqsave(&dev->lock, flags); dev->chanA.irqs=&z8530_nop; dev->chanB.irqs=&z8530_nop; write_zsreg(&dev->chanA, R9, 0xC0); /* We must lock the udelay, the chip is offlimits here */ udelay(100); spin_unlock_irqrestore(&dev->lock, flags); return 0; } EXPORT_SYMBOL(z8530_shutdown); /** * z8530_channel_load - Load channel data * @c: Z8530 channel to configure * @rtable: table of register, value pairs * FIXME: ioctl to allow user uploaded tables * * Load a Z8530 channel up from the system data. We use +16 to * indicate the "prime" registers. The value 255 terminates the * table. */ int z8530_channel_load(struct z8530_channel *c, u8 *rtable) { unsigned long flags; spin_lock_irqsave(c->lock, flags); while(*rtable!=255) { int reg=*rtable++; if(reg>0x0F) write_zsreg(c, R15, c->regs[15]|1); write_zsreg(c, reg&0x0F, *rtable); if(reg>0x0F) write_zsreg(c, R15, c->regs[15]&~1); c->regs[reg]=*rtable++; } c->rx_function=z8530_null_rx; c->skb=NULL; c->tx_skb=NULL; c->tx_next_skb=NULL; c->mtu=1500; c->max=0; c->count=0; c->status=read_zsreg(c, R0); c->sync=1; write_zsreg(c, R3, c->regs[R3]|RxENABLE); spin_unlock_irqrestore(c->lock, flags); return 0; } EXPORT_SYMBOL(z8530_channel_load); /** * z8530_tx_begin - Begin packet transmission * @c: The Z8530 channel to kick * * This is the speed sensitive side of transmission. If we are called * and no buffer is being transmitted we commence the next buffer. If * nothing is queued we idle the sync. * * Note: We are handling this code path in the interrupt path, keep it * fast or bad things will happen. * * Called with the lock held. */ static void z8530_tx_begin(struct z8530_channel *c) { unsigned long flags; if(c->tx_skb) return; c->tx_skb=c->tx_next_skb; c->tx_next_skb=NULL; c->tx_ptr=c->tx_next_ptr; if(c->tx_skb==NULL) { /* Idle on */ if(c->dma_tx) { flags=claim_dma_lock(); disable_dma(c->txdma); /* * Check if we crapped out. */ if (get_dma_residue(c->txdma)) { c->netdevice->stats.tx_dropped++; c->netdevice->stats.tx_fifo_errors++; } release_dma_lock(flags); } c->txcount=0; } else { c->txcount=c->tx_skb->len; if(c->dma_tx) { /* * FIXME. DMA is broken for the original 8530, * on the older parts we need to set a flag and * wait for a further TX interrupt to fire this * stage off */ flags=claim_dma_lock(); disable_dma(c->txdma); /* * These two are needed by the 8530/85C30 * and must be issued when idling. */ if(c->dev->type!=Z85230) { write_zsctrl(c, RES_Tx_CRC); write_zsctrl(c, RES_EOM_L); } write_zsreg(c, R10, c->regs[10]&~ABUNDER); clear_dma_ff(c->txdma); set_dma_addr(c->txdma, virt_to_bus(c->tx_ptr)); set_dma_count(c->txdma, c->txcount); enable_dma(c->txdma); release_dma_lock(flags); write_zsctrl(c, RES_EOM_L); write_zsreg(c, R5, c->regs[R5]|TxENAB); } else { /* ABUNDER off */ write_zsreg(c, R10, c->regs[10]); write_zsctrl(c, RES_Tx_CRC); while(c->txcount && (read_zsreg(c,R0)&Tx_BUF_EMP)) { write_zsreg(c, R8, *c->tx_ptr++); c->txcount--; } } } /* * Since we emptied tx_skb we can ask for more */ netif_wake_queue(c->netdevice); } /** * z8530_tx_done - TX complete callback * @c: The channel that completed a transmit. * * This is called when we complete a packet send. We wake the queue, * start the next packet going and then free the buffer of the existing * packet. This code is fairly timing sensitive. * * Called with the register lock held. */ static void z8530_tx_done(struct z8530_channel *c) { struct sk_buff *skb; /* Actually this can happen.*/ if (c->tx_skb == NULL) return; skb = c->tx_skb; c->tx_skb = NULL; z8530_tx_begin(c); c->netdevice->stats.tx_packets++; c->netdevice->stats.tx_bytes += skb->len; dev_kfree_skb_irq(skb); } /** * z8530_null_rx - Discard a packet * @c: The channel the packet arrived on * @skb: The buffer * * We point the receive handler at this function when idle. Instead * of processing the frames we get to throw them away. */ void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb) { dev_kfree_skb_any(skb); } EXPORT_SYMBOL(z8530_null_rx); /** * z8530_rx_done - Receive completion callback * @c: The channel that completed a receive * * A new packet is complete. Our goal here is to get back into receive * mode as fast as possible. On the Z85230 we could change to using * ESCC mode, but on the older chips we have no choice. We flip to the * new buffer immediately in DMA mode so that the DMA of the next * frame can occur while we are copying the previous buffer to an sk_buff * * Called with the lock held */ static void z8530_rx_done(struct z8530_channel *c) { struct sk_buff *skb; int ct; /* * Is our receive engine in DMA mode */ if(c->rxdma_on) { /* * Save the ready state and the buffer currently * being used as the DMA target */ int ready=c->dma_ready; unsigned char *rxb=c->rx_buf[c->dma_num]; unsigned long flags; /* * Complete this DMA. Necessary to find the length */ flags=claim_dma_lock(); disable_dma(c->rxdma); clear_dma_ff(c->rxdma); c->rxdma_on=0; ct=c->mtu-get_dma_residue(c->rxdma); if(ct<0) ct=2; /* Shit happens.. */ c->dma_ready=0; /* * Normal case: the other slot is free, start the next DMA * into it immediately. */ if(ready) { c->dma_num^=1; set_dma_mode(c->rxdma, DMA_MODE_READ|0x10); set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[c->dma_num])); set_dma_count(c->rxdma, c->mtu); c->rxdma_on = 1; enable_dma(c->rxdma); /* Stop any frames that we missed the head of from passing */ write_zsreg(c, R0, RES_Rx_CRC); } else /* Can't occur as we dont reenable the DMA irq until after the flip is done */ netdev_warn(c->netdevice, "DMA flip overrun!\n"); release_dma_lock(flags); /* * Shove the old buffer into an sk_buff. We can't DMA * directly into one on a PC - it might be above the 16Mb * boundary. Optimisation - we could check to see if we * can avoid the copy. Optimisation 2 - make the memcpy * a copychecksum. */ skb = dev_alloc_skb(ct); if (skb == NULL) { c->netdevice->stats.rx_dropped++; netdev_warn(c->netdevice, "Memory squeeze\n"); } else { skb_put(skb, ct); skb_copy_to_linear_data(skb, rxb, ct); c->netdevice->stats.rx_packets++; c->netdevice->stats.rx_bytes += ct; } c->dma_ready = 1; } else { RT_LOCK; skb = c->skb; /* * The game we play for non DMA is similar. We want to * get the controller set up for the next packet as fast * as possible. We potentially only have one byte + the * fifo length for this. Thus we want to flip to the new * buffer and then mess around copying and allocating * things. For the current case it doesn't matter but * if you build a system where the sync irq isn't blocked * by the kernel IRQ disable then you need only block the * sync IRQ for the RT_LOCK area. * */ ct=c->count; c->skb = c->skb2; c->count = 0; c->max = c->mtu; if (c->skb) { c->dptr = c->skb->data; c->max = c->mtu; } else { c->count = 0; c->max = 0; } RT_UNLOCK; c->skb2 = dev_alloc_skb(c->mtu); if (c->skb2 == NULL) netdev_warn(c->netdevice, "memory squeeze\n"); else skb_put(c->skb2, c->mtu); c->netdevice->stats.rx_packets++; c->netdevice->stats.rx_bytes += ct; } /* * If we received a frame we must now process it. */ if (skb) { skb_trim(skb, ct); c->rx_function(c, skb); } else { c->netdevice->stats.rx_dropped++; netdev_err(c->netdevice, "Lost a frame\n"); } } /** * spans_boundary - Check a packet can be ISA DMA'd * @skb: The buffer to check * * Returns true if the buffer cross a DMA boundary on a PC. The poor * thing can only DMA within a 64K block not across the edges of it. */ static inline int spans_boundary(struct sk_buff *skb) { unsigned long a=(unsigned long)skb->data; a^=(a+skb->len); if(a&0x00010000) /* If the 64K bit is different.. */ return 1; return 0; } /** * z8530_queue_xmit - Queue a packet * @c: The channel to use * @skb: The packet to kick down the channel * * Queue a packet for transmission. Because we have rather * hard to hit interrupt latencies for the Z85230 per packet * even in DMA mode we do the flip to DMA buffer if needed here * not in the IRQ. * * Called from the network code. The lock is not held at this * point. */ netdev_tx_t z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb) { unsigned long flags; netif_stop_queue(c->netdevice); if(c->tx_next_skb) return NETDEV_TX_BUSY; /* PC SPECIFIC - DMA limits */ /* * If we will DMA the transmit and its gone over the ISA bus * limit, then copy to the flip buffer */ if(c->dma_tx && ((unsigned long)(virt_to_bus(skb->data+skb->len))>=16*1024*1024 || spans_boundary(skb))) { /* * Send the flip buffer, and flip the flippy bit. * We don't care which is used when just so long as * we never use the same buffer twice in a row. Since * only one buffer can be going out at a time the other * has to be safe. */ c->tx_next_ptr=c->tx_dma_buf[c->tx_dma_used]; c->tx_dma_used^=1; /* Flip temp buffer */ skb_copy_from_linear_data(skb, c->tx_next_ptr, skb->len); } else c->tx_next_ptr=skb->data; RT_LOCK; c->tx_next_skb=skb; RT_UNLOCK; spin_lock_irqsave(c->lock, flags); z8530_tx_begin(c); spin_unlock_irqrestore(c->lock, flags); return NETDEV_TX_OK; } EXPORT_SYMBOL(z8530_queue_xmit); /* * Module support */ static const char banner[] __initconst = KERN_INFO "Generic Z85C30/Z85230 interface driver v0.02\n"; static int __init z85230_init_driver(void) { printk(banner); return 0; } module_init(z85230_init_driver); static void __exit z85230_cleanup_driver(void) { } module_exit(z85230_cleanup_driver); MODULE_AUTHOR("Red Hat Inc."); MODULE_DESCRIPTION("Z85x30 synchronous driver core"); MODULE_LICENSE("GPL");
gpl-2.0
backup-kb/pnotify-linux-4.1.6
drivers/regulator/hi6421-regulator.c
1149
19410
/* * Device driver for regulators in Hi6421 IC * * Copyright (c) <2011-2014> HiSilicon Technologies Co., Ltd. * http://www.hisilicon.com * Copyright (c) <2013-2014> Linaro Ltd. * http://www.linaro.org * * Author: Guodong Xu <guodong.xu@linaro.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/slab.h> #include <linux/device.h> #include <linux/module.h> #include <linux/err.h> #include <linux/platform_device.h> #include <linux/of.h> #include <linux/regmap.h> #include <linux/regulator/driver.h> #include <linux/regulator/machine.h> #include <linux/regulator/of_regulator.h> #include <linux/mfd/hi6421-pmic.h> /* * struct hi6421_regulator_pdata - Hi6421 regulator data of platform device * @lock: mutex to serialize regulator enable */ struct hi6421_regulator_pdata { struct mutex lock; }; /* * struct hi6421_regulator_info - hi6421 regulator information * @desc: regulator description * @mode_mask: ECO mode bitmask of LDOs; for BUCKs, this masks sleep * @eco_microamp: eco mode load upper limit (in uA), valid for LDOs only */ struct hi6421_regulator_info { struct regulator_desc desc; u8 mode_mask; u32 eco_microamp; }; /* HI6421 regulators */ enum hi6421_regulator_id { HI6421_LDO0, HI6421_LDO1, HI6421_LDO2, HI6421_LDO3, HI6421_LDO4, HI6421_LDO5, HI6421_LDO6, HI6421_LDO7, HI6421_LDO8, HI6421_LDO9, HI6421_LDO10, HI6421_LDO11, HI6421_LDO12, HI6421_LDO13, HI6421_LDO14, HI6421_LDO15, HI6421_LDO16, HI6421_LDO17, HI6421_LDO18, HI6421_LDO19, HI6421_LDO20, HI6421_LDOAUDIO, HI6421_BUCK0, HI6421_BUCK1, HI6421_BUCK2, HI6421_BUCK3, HI6421_BUCK4, HI6421_BUCK5, HI6421_NUM_REGULATORS, }; #define HI6421_REGULATOR_OF_MATCH(_name, id) \ { \ .name = #_name, \ .driver_data = (void *) HI6421_##id, \ } static struct of_regulator_match hi6421_regulator_match[] = { HI6421_REGULATOR_OF_MATCH(hi6421_vout0, LDO0), HI6421_REGULATOR_OF_MATCH(hi6421_vout1, LDO1), HI6421_REGULATOR_OF_MATCH(hi6421_vout2, LDO2), HI6421_REGULATOR_OF_MATCH(hi6421_vout3, LDO3), HI6421_REGULATOR_OF_MATCH(hi6421_vout4, LDO4), HI6421_REGULATOR_OF_MATCH(hi6421_vout5, LDO5), HI6421_REGULATOR_OF_MATCH(hi6421_vout6, LDO6), HI6421_REGULATOR_OF_MATCH(hi6421_vout7, LDO7), HI6421_REGULATOR_OF_MATCH(hi6421_vout8, LDO8), HI6421_REGULATOR_OF_MATCH(hi6421_vout9, LDO9), HI6421_REGULATOR_OF_MATCH(hi6421_vout10, LDO10), HI6421_REGULATOR_OF_MATCH(hi6421_vout11, LDO11), HI6421_REGULATOR_OF_MATCH(hi6421_vout12, LDO12), HI6421_REGULATOR_OF_MATCH(hi6421_vout13, LDO13), HI6421_REGULATOR_OF_MATCH(hi6421_vout14, LDO14), HI6421_REGULATOR_OF_MATCH(hi6421_vout15, LDO15), HI6421_REGULATOR_OF_MATCH(hi6421_vout16, LDO16), HI6421_REGULATOR_OF_MATCH(hi6421_vout17, LDO17), HI6421_REGULATOR_OF_MATCH(hi6421_vout18, LDO18), HI6421_REGULATOR_OF_MATCH(hi6421_vout19, LDO19), HI6421_REGULATOR_OF_MATCH(hi6421_vout20, LDO20), HI6421_REGULATOR_OF_MATCH(hi6421_vout_audio, LDOAUDIO), HI6421_REGULATOR_OF_MATCH(hi6421_buck0, BUCK0), HI6421_REGULATOR_OF_MATCH(hi6421_buck1, BUCK1), HI6421_REGULATOR_OF_MATCH(hi6421_buck2, BUCK2), HI6421_REGULATOR_OF_MATCH(hi6421_buck3, BUCK3), HI6421_REGULATOR_OF_MATCH(hi6421_buck4, BUCK4), HI6421_REGULATOR_OF_MATCH(hi6421_buck5, BUCK5), }; /* LDO 0, 4~7, 9~14, 16~20 have same voltage table. */ static const unsigned int ldo_0_voltages[] = { 1500000, 1800000, 2400000, 2500000, 2600000, 2700000, 2850000, 3000000, }; /* LDO 8, 15 have same voltage table. */ static const unsigned int ldo_8_voltages[] = { 1500000, 1800000, 2400000, 2600000, 2700000, 2850000, 3000000, 3300000, }; /* Ranges are sorted in ascending order. */ static const struct regulator_linear_range ldo_audio_volt_range[] = { REGULATOR_LINEAR_RANGE(2800000, 0, 3, 50000), REGULATOR_LINEAR_RANGE(3000000, 4, 7, 100000), }; static const unsigned int buck_3_voltages[] = { 950000, 1050000, 1100000, 1117000, 1134000, 1150000, 1167000, 1200000, }; static const unsigned int buck_4_voltages[] = { 1150000, 1200000, 1250000, 1350000, 1700000, 1800000, 1900000, 2000000, }; static const unsigned int buck_5_voltages[] = { 1150000, 1200000, 1250000, 1350000, 1600000, 1700000, 1800000, 1900000, }; static const struct regulator_ops hi6421_ldo_ops; static const struct regulator_ops hi6421_ldo_linear_ops; static const struct regulator_ops hi6421_ldo_linear_range_ops; static const struct regulator_ops hi6421_buck012_ops; static const struct regulator_ops hi6421_buck345_ops; #define HI6421_LDO_ENABLE_TIME (350) /* * _id - LDO id name string * v_table - voltage table * vreg - voltage select register * vmask - voltage select mask * ereg - enable register * emask - enable mask * odelay - off/on delay time in uS * ecomask - eco mode mask * ecoamp - eco mode load uppler limit in uA */ #define HI6421_LDO(_id, v_table, vreg, vmask, ereg, emask, \ odelay, ecomask, ecoamp) \ [HI6421_##_id] = { \ .desc = { \ .name = #_id, \ .ops = &hi6421_ldo_ops, \ .type = REGULATOR_VOLTAGE, \ .id = HI6421_##_id, \ .owner = THIS_MODULE, \ .n_voltages = ARRAY_SIZE(v_table), \ .volt_table = v_table, \ .vsel_reg = HI6421_REG_TO_BUS_ADDR(vreg), \ .vsel_mask = vmask, \ .enable_reg = HI6421_REG_TO_BUS_ADDR(ereg), \ .enable_mask = emask, \ .enable_time = HI6421_LDO_ENABLE_TIME, \ .off_on_delay = odelay, \ }, \ .mode_mask = ecomask, \ .eco_microamp = ecoamp, \ } /* HI6421 LDO1~3 are linear voltage regulators at fixed uV_step * * _id - LDO id name string * _min_uV - minimum voltage supported in uV * n_volt - number of votages available * vstep - voltage increase in each linear step in uV * vreg - voltage select register * vmask - voltage select mask * ereg - enable register * emask - enable mask * odelay - off/on delay time in uS * ecomask - eco mode mask * ecoamp - eco mode load uppler limit in uA */ #define HI6421_LDO_LINEAR(_id, _min_uV, n_volt, vstep, vreg, vmask, \ ereg, emask, odelay, ecomask, ecoamp) \ [HI6421_##_id] = { \ .desc = { \ .name = #_id, \ .ops = &hi6421_ldo_linear_ops, \ .type = REGULATOR_VOLTAGE, \ .id = HI6421_##_id, \ .owner = THIS_MODULE, \ .min_uV = _min_uV, \ .n_voltages = n_volt, \ .uV_step = vstep, \ .vsel_reg = HI6421_REG_TO_BUS_ADDR(vreg), \ .vsel_mask = vmask, \ .enable_reg = HI6421_REG_TO_BUS_ADDR(ereg), \ .enable_mask = emask, \ .enable_time = HI6421_LDO_ENABLE_TIME, \ .off_on_delay = odelay, \ }, \ .mode_mask = ecomask, \ .eco_microamp = ecoamp, \ } /* HI6421 LDOAUDIO is a linear voltage regulator with two 4-step ranges * * _id - LDO id name string * n_volt - number of votages available * volt_ranges - array of regulator_linear_range * vstep - voltage increase in each linear step in uV * vreg - voltage select register * vmask - voltage select mask * ereg - enable register * emask - enable mask * odelay - off/on delay time in uS * ecomask - eco mode mask * ecoamp - eco mode load uppler limit in uA */ #define HI6421_LDO_LINEAR_RANGE(_id, n_volt, volt_ranges, vreg, vmask, \ ereg, emask, odelay, ecomask, ecoamp) \ [HI6421_##_id] = { \ .desc = { \ .name = #_id, \ .ops = &hi6421_ldo_linear_range_ops, \ .type = REGULATOR_VOLTAGE, \ .id = HI6421_##_id, \ .owner = THIS_MODULE, \ .n_voltages = n_volt, \ .linear_ranges = volt_ranges, \ .n_linear_ranges = ARRAY_SIZE(volt_ranges), \ .vsel_reg = HI6421_REG_TO_BUS_ADDR(vreg), \ .vsel_mask = vmask, \ .enable_reg = HI6421_REG_TO_BUS_ADDR(ereg), \ .enable_mask = emask, \ .enable_time = HI6421_LDO_ENABLE_TIME, \ .off_on_delay = odelay, \ }, \ .mode_mask = ecomask, \ .eco_microamp = ecoamp, \ } /* HI6421 BUCK0/1/2 are linear voltage regulators at fixed uV_step * * _id - BUCK0/1/2 id name string * vreg - voltage select register * vmask - voltage select mask * ereg - enable register * emask - enable mask * sleepmask - mask of sleep mode * etime - enable time * odelay - off/on delay time in uS */ #define HI6421_BUCK012(_id, vreg, vmask, ereg, emask, sleepmask, \ etime, odelay) \ [HI6421_##_id] = { \ .desc = { \ .name = #_id, \ .ops = &hi6421_buck012_ops, \ .type = REGULATOR_VOLTAGE, \ .id = HI6421_##_id, \ .owner = THIS_MODULE, \ .min_uV = 700000, \ .n_voltages = 128, \ .uV_step = 7086, \ .vsel_reg = HI6421_REG_TO_BUS_ADDR(vreg), \ .vsel_mask = vmask, \ .enable_reg = HI6421_REG_TO_BUS_ADDR(ereg), \ .enable_mask = emask, \ .enable_time = etime, \ .off_on_delay = odelay, \ }, \ .mode_mask = sleepmask, \ } /* HI6421 BUCK3/4/5 share similar configurations as LDOs, with exception * that it supports SLEEP mode, so has different .ops. * * _id - LDO id name string * v_table - voltage table * vreg - voltage select register * vmask - voltage select mask * ereg - enable register * emask - enable mask * odelay - off/on delay time in uS * sleepmask - mask of sleep mode */ #define HI6421_BUCK345(_id, v_table, vreg, vmask, ereg, emask, \ odelay, sleepmask) \ [HI6421_##_id] = { \ .desc = { \ .name = #_id, \ .ops = &hi6421_buck345_ops, \ .type = REGULATOR_VOLTAGE, \ .id = HI6421_##_id, \ .owner = THIS_MODULE, \ .n_voltages = ARRAY_SIZE(v_table), \ .volt_table = v_table, \ .vsel_reg = HI6421_REG_TO_BUS_ADDR(vreg), \ .vsel_mask = vmask, \ .enable_reg = HI6421_REG_TO_BUS_ADDR(ereg), \ .enable_mask = emask, \ .enable_time = HI6421_LDO_ENABLE_TIME, \ .off_on_delay = odelay, \ }, \ .mode_mask = sleepmask, \ } /* HI6421 regulator information */ static struct hi6421_regulator_info hi6421_regulator_info[HI6421_NUM_REGULATORS] = { HI6421_LDO(LDO0, ldo_0_voltages, 0x20, 0x07, 0x20, 0x10, 10000, 0x20, 8000), HI6421_LDO_LINEAR(LDO1, 1700000, 4, 100000, 0x21, 0x03, 0x21, 0x10, 10000, 0x20, 5000), HI6421_LDO_LINEAR(LDO2, 1050000, 8, 50000, 0x22, 0x07, 0x22, 0x10, 20000, 0x20, 8000), HI6421_LDO_LINEAR(LDO3, 1050000, 8, 50000, 0x23, 0x07, 0x23, 0x10, 20000, 0x20, 8000), HI6421_LDO(LDO4, ldo_0_voltages, 0x24, 0x07, 0x24, 0x10, 20000, 0x20, 8000), HI6421_LDO(LDO5, ldo_0_voltages, 0x25, 0x07, 0x25, 0x10, 20000, 0x20, 8000), HI6421_LDO(LDO6, ldo_0_voltages, 0x26, 0x07, 0x26, 0x10, 20000, 0x20, 8000), HI6421_LDO(LDO7, ldo_0_voltages, 0x27, 0x07, 0x27, 0x10, 20000, 0x20, 5000), HI6421_LDO(LDO8, ldo_8_voltages, 0x28, 0x07, 0x28, 0x10, 20000, 0x20, 8000), HI6421_LDO(LDO9, ldo_0_voltages, 0x29, 0x07, 0x29, 0x10, 40000, 0x20, 8000), HI6421_LDO(LDO10, ldo_0_voltages, 0x2a, 0x07, 0x2a, 0x10, 40000, 0x20, 8000), HI6421_LDO(LDO11, ldo_0_voltages, 0x2b, 0x07, 0x2b, 0x10, 40000, 0x20, 8000), HI6421_LDO(LDO12, ldo_0_voltages, 0x2c, 0x07, 0x2c, 0x10, 40000, 0x20, 8000), HI6421_LDO(LDO13, ldo_0_voltages, 0x2d, 0x07, 0x2d, 0x10, 40000, 0x20, 8000), HI6421_LDO(LDO14, ldo_0_voltages, 0x2e, 0x07, 0x2e, 0x10, 40000, 0x20, 8000), HI6421_LDO(LDO15, ldo_8_voltages, 0x2f, 0x07, 0x2f, 0x10, 40000, 0x20, 8000), HI6421_LDO(LDO16, ldo_0_voltages, 0x30, 0x07, 0x30, 0x10, 40000, 0x20, 8000), HI6421_LDO(LDO17, ldo_0_voltages, 0x31, 0x07, 0x31, 0x10, 40000, 0x20, 8000), HI6421_LDO(LDO18, ldo_0_voltages, 0x32, 0x07, 0x32, 0x10, 40000, 0x20, 8000), HI6421_LDO(LDO19, ldo_0_voltages, 0x33, 0x07, 0x33, 0x10, 40000, 0x20, 8000), HI6421_LDO(LDO20, ldo_0_voltages, 0x34, 0x07, 0x34, 0x10, 40000, 0x20, 8000), HI6421_LDO_LINEAR_RANGE(LDOAUDIO, 8, ldo_audio_volt_range, 0x36, 0x70, 0x36, 0x01, 40000, 0x02, 5000), HI6421_BUCK012(BUCK0, 0x0d, 0x7f, 0x0c, 0x01, 0x10, 400, 20000), HI6421_BUCK012(BUCK1, 0x0f, 0x7f, 0x0e, 0x01, 0x10, 400, 20000), HI6421_BUCK012(BUCK2, 0x11, 0x7f, 0x10, 0x01, 0x10, 350, 100), HI6421_BUCK345(BUCK3, buck_3_voltages, 0x13, 0x07, 0x12, 0x01, 20000, 0x10), HI6421_BUCK345(BUCK4, buck_4_voltages, 0x15, 0x07, 0x14, 0x01, 20000, 0x10), HI6421_BUCK345(BUCK5, buck_5_voltages, 0x17, 0x07, 0x16, 0x01, 20000, 0x10), }; static int hi6421_regulator_enable(struct regulator_dev *rdev) { struct hi6421_regulator_pdata *pdata; pdata = dev_get_drvdata(rdev->dev.parent); /* hi6421 spec requires regulator enablement must be serialized: * - Because when BUCK, LDO switching from off to on, it will have * a huge instantaneous current; so you can not turn on two or * more LDO or BUCKs simultaneously, or it may burn the chip. */ mutex_lock(&pdata->lock); /* call regulator regmap helper */ regulator_enable_regmap(rdev); mutex_unlock(&pdata->lock); return 0; } static unsigned int hi6421_regulator_ldo_get_mode(struct regulator_dev *rdev) { struct hi6421_regulator_info *info = rdev_get_drvdata(rdev); u32 reg_val; regmap_read(rdev->regmap, rdev->desc->enable_reg, &reg_val); if (reg_val & info->mode_mask) return REGULATOR_MODE_IDLE; return REGULATOR_MODE_NORMAL; } static unsigned int hi6421_regulator_buck_get_mode(struct regulator_dev *rdev) { struct hi6421_regulator_info *info = rdev_get_drvdata(rdev); u32 reg_val; regmap_read(rdev->regmap, rdev->desc->enable_reg, &reg_val); if (reg_val & info->mode_mask) return REGULATOR_MODE_STANDBY; return REGULATOR_MODE_NORMAL; } static int hi6421_regulator_ldo_set_mode(struct regulator_dev *rdev, unsigned int mode) { struct hi6421_regulator_info *info = rdev_get_drvdata(rdev); u32 new_mode; switch (mode) { case REGULATOR_MODE_NORMAL: new_mode = 0; break; case REGULATOR_MODE_IDLE: new_mode = info->mode_mask; break; default: return -EINVAL; } /* set mode */ regmap_update_bits(rdev->regmap, rdev->desc->enable_reg, info->mode_mask, new_mode); return 0; } static int hi6421_regulator_buck_set_mode(struct regulator_dev *rdev, unsigned int mode) { struct hi6421_regulator_info *info = rdev_get_drvdata(rdev); u32 new_mode; switch (mode) { case REGULATOR_MODE_NORMAL: new_mode = 0; break; case REGULATOR_MODE_STANDBY: new_mode = info->mode_mask; break; default: return -EINVAL; } /* set mode */ regmap_update_bits(rdev->regmap, rdev->desc->enable_reg, info->mode_mask, new_mode); return 0; } unsigned int hi6421_regulator_ldo_get_optimum_mode(struct regulator_dev *rdev, int input_uV, int output_uV, int load_uA) { struct hi6421_regulator_info *info = rdev_get_drvdata(rdev); if (load_uA > info->eco_microamp) return REGULATOR_MODE_NORMAL; return REGULATOR_MODE_IDLE; } static const struct regulator_ops hi6421_ldo_ops = { .is_enabled = regulator_is_enabled_regmap, .enable = hi6421_regulator_enable, .disable = regulator_disable_regmap, .list_voltage = regulator_list_voltage_table, .map_voltage = regulator_map_voltage_ascend, .get_voltage_sel = regulator_get_voltage_sel_regmap, .set_voltage_sel = regulator_set_voltage_sel_regmap, .get_mode = hi6421_regulator_ldo_get_mode, .set_mode = hi6421_regulator_ldo_set_mode, .get_optimum_mode = hi6421_regulator_ldo_get_optimum_mode, }; static const struct regulator_ops hi6421_ldo_linear_ops = { .is_enabled = regulator_is_enabled_regmap, .enable = hi6421_regulator_enable, .disable = regulator_disable_regmap, .list_voltage = regulator_list_voltage_linear, .map_voltage = regulator_map_voltage_linear, .get_voltage_sel = regulator_get_voltage_sel_regmap, .set_voltage_sel = regulator_set_voltage_sel_regmap, .get_mode = hi6421_regulator_ldo_get_mode, .set_mode = hi6421_regulator_ldo_set_mode, .get_optimum_mode = hi6421_regulator_ldo_get_optimum_mode, }; static const struct regulator_ops hi6421_ldo_linear_range_ops = { .is_enabled = regulator_is_enabled_regmap, .enable = hi6421_regulator_enable, .disable = regulator_disable_regmap, .list_voltage = regulator_list_voltage_linear_range, .map_voltage = regulator_map_voltage_linear_range, .get_voltage_sel = regulator_get_voltage_sel_regmap, .set_voltage_sel = regulator_set_voltage_sel_regmap, .get_mode = hi6421_regulator_ldo_get_mode, .set_mode = hi6421_regulator_ldo_set_mode, .get_optimum_mode = hi6421_regulator_ldo_get_optimum_mode, }; static const struct regulator_ops hi6421_buck012_ops = { .is_enabled = regulator_is_enabled_regmap, .enable = hi6421_regulator_enable, .disable = regulator_disable_regmap, .list_voltage = regulator_list_voltage_linear, .map_voltage = regulator_map_voltage_linear, .get_voltage_sel = regulator_get_voltage_sel_regmap, .set_voltage_sel = regulator_set_voltage_sel_regmap, .get_mode = hi6421_regulator_buck_get_mode, .set_mode = hi6421_regulator_buck_set_mode, }; static const struct regulator_ops hi6421_buck345_ops = { .is_enabled = regulator_is_enabled_regmap, .enable = hi6421_regulator_enable, .disable = regulator_disable_regmap, .list_voltage = regulator_list_voltage_table, .map_voltage = regulator_map_voltage_ascend, .get_voltage_sel = regulator_get_voltage_sel_regmap, .set_voltage_sel = regulator_set_voltage_sel_regmap, .get_mode = hi6421_regulator_buck_get_mode, .set_mode = hi6421_regulator_buck_set_mode, }; static int hi6421_regulator_register(struct platform_device *pdev, struct regmap *rmap, struct regulator_init_data *init_data, int id, struct device_node *np) { struct hi6421_regulator_info *info = NULL; struct regulator_config config = { }; struct regulator_dev *rdev; /* assign per-regulator data */ info = &hi6421_regulator_info[id]; config.dev = &pdev->dev; config.init_data = init_data; config.driver_data = info; config.regmap = rmap; config.of_node = np; /* register regulator with framework */ rdev = devm_regulator_register(&pdev->dev, &info->desc, &config); if (IS_ERR(rdev)) { dev_err(&pdev->dev, "failed to register regulator %s\n", info->desc.name); return PTR_ERR(rdev); } return 0; } static int hi6421_regulator_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *np; struct hi6421_pmic *pmic; struct hi6421_regulator_pdata *pdata; int i, ret = 0; pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) return -ENOMEM; mutex_init(&pdata->lock); platform_set_drvdata(pdev, pdata); np = of_get_child_by_name(dev->parent->of_node, "regulators"); if (!np) return -ENODEV; ret = of_regulator_match(dev, np, hi6421_regulator_match, ARRAY_SIZE(hi6421_regulator_match)); of_node_put(np); if (ret < 0) { dev_err(dev, "Error parsing regulator init data: %d\n", ret); return ret; } pmic = dev_get_drvdata(dev->parent); for (i = 0; i < ARRAY_SIZE(hi6421_regulator_info); i++) { ret = hi6421_regulator_register(pdev, pmic->regmap, hi6421_regulator_match[i].init_data, i, hi6421_regulator_match[i].of_node); if (ret) return ret; } return 0; } static struct platform_driver hi6421_regulator_driver = { .driver = { .name = "hi6421-regulator", }, .probe = hi6421_regulator_probe, }; module_platform_driver(hi6421_regulator_driver); MODULE_AUTHOR("Guodong Xu <guodong.xu@linaro.org>"); MODULE_DESCRIPTION("Hi6421 regulator driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
EPDCenter/android_kernel_amlogic_s805_3go_aplay2
arch/arm/mach-s3c64xx/clock.c
2173
23918
/* linux/arch/arm/plat-s3c64xx/clock.c * * Copyright 2008 Openmoko, Inc. * Copyright 2008 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * http://armlinux.simtec.co.uk/ * * S3C64XX Base clock support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/io.h> #include <mach/hardware.h> #include <mach/map.h> #include <mach/regs-clock.h> #include <plat/cpu.h> #include <plat/devs.h> #include <plat/cpu-freq.h> #include <plat/clock.h> #include <plat/clock-clksrc.h> #include <plat/pll.h> #include "regs-sys.h" /* fin_apll, fin_mpll and fin_epll are all the same clock, which we call * ext_xtal_mux for want of an actual name from the manual. */ static struct clk clk_ext_xtal_mux = { .name = "ext_xtal", }; #define clk_fin_apll clk_ext_xtal_mux #define clk_fin_mpll clk_ext_xtal_mux #define clk_fin_epll clk_ext_xtal_mux #define clk_fout_mpll clk_mpll #define clk_fout_epll clk_epll struct clk clk_h2 = { .name = "hclk2", .rate = 0, }; struct clk clk_27m = { .name = "clk_27m", .rate = 27000000, }; static int clk_48m_ctrl(struct clk *clk, int enable) { unsigned long flags; u32 val; /* can't rely on clock lock, this register has other usages */ local_irq_save(flags); val = __raw_readl(S3C64XX_OTHERS); if (enable) val |= S3C64XX_OTHERS_USBMASK; else val &= ~S3C64XX_OTHERS_USBMASK; __raw_writel(val, S3C64XX_OTHERS); local_irq_restore(flags); return 0; } struct clk clk_48m = { .name = "clk_48m", .rate = 48000000, .enable = clk_48m_ctrl, }; struct clk clk_xusbxti = { .name = "xusbxti", .rate = 48000000, }; static int inline s3c64xx_gate(void __iomem *reg, struct clk *clk, int enable) { unsigned int ctrlbit = clk->ctrlbit; u32 con; con = __raw_readl(reg); if (enable) con |= ctrlbit; else con &= ~ctrlbit; __raw_writel(con, reg); return 0; } static int s3c64xx_pclk_ctrl(struct clk *clk, int enable) { return s3c64xx_gate(S3C_PCLK_GATE, clk, enable); } static int s3c64xx_hclk_ctrl(struct clk *clk, int enable) { return s3c64xx_gate(S3C_HCLK_GATE, clk, enable); } int s3c64xx_sclk_ctrl(struct clk *clk, int enable) { return s3c64xx_gate(S3C_SCLK_GATE, clk, enable); } static struct clk init_clocks_off[] = { { .name = "nand", .parent = &clk_h, }, { .name = "rtc", .parent = &clk_p, .enable = s3c64xx_pclk_ctrl, .ctrlbit = S3C_CLKCON_PCLK_RTC, }, { .name = "adc", .parent = &clk_p, .enable = s3c64xx_pclk_ctrl, .ctrlbit = S3C_CLKCON_PCLK_TSADC, }, { .name = "i2c", .devname = "s3c2440-i2c.0", .parent = &clk_p, .enable = s3c64xx_pclk_ctrl, .ctrlbit = S3C_CLKCON_PCLK_IIC, }, { .name = "i2c", .devname = "s3c2440-i2c.1", .parent = &clk_p, .enable = s3c64xx_pclk_ctrl, .ctrlbit = S3C6410_CLKCON_PCLK_I2C1, }, { .name = "keypad", .parent = &clk_p, .enable = s3c64xx_pclk_ctrl, .ctrlbit = S3C_CLKCON_PCLK_KEYPAD, }, { .name = "spi", .devname = "s3c6410-spi.0", .parent = &clk_p, .enable = s3c64xx_pclk_ctrl, .ctrlbit = S3C_CLKCON_PCLK_SPI0, }, { .name = "spi", .devname = "s3c6410-spi.1", .parent = &clk_p, .enable = s3c64xx_pclk_ctrl, .ctrlbit = S3C_CLKCON_PCLK_SPI1, }, { .name = "48m", .devname = "s3c-sdhci.0", .parent = &clk_48m, .enable = s3c64xx_sclk_ctrl, .ctrlbit = S3C_CLKCON_SCLK_MMC0_48, }, { .name = "48m", .devname = "s3c-sdhci.1", .parent = &clk_48m, .enable = s3c64xx_sclk_ctrl, .ctrlbit = S3C_CLKCON_SCLK_MMC1_48, }, { .name = "48m", .devname = "s3c-sdhci.2", .parent = &clk_48m, .enable = s3c64xx_sclk_ctrl, .ctrlbit = S3C_CLKCON_SCLK_MMC2_48, }, { .name = "ac97", .parent = &clk_p, .ctrlbit = S3C_CLKCON_PCLK_AC97, }, { .name = "cfcon", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_IHOST, }, { .name = "dma0", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_DMA0, }, { .name = "dma1", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_DMA1, }, { .name = "3dse", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_3DSE, }, { .name = "hclk_secur", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_SECUR, }, { .name = "sdma1", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_SDMA1, }, { .name = "sdma0", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_SDMA0, }, { .name = "hclk_jpeg", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_JPEG, }, { .name = "camif", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_CAMIF, }, { .name = "hclk_scaler", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_SCALER, }, { .name = "2d", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_2D, }, { .name = "tv", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_TV, }, { .name = "post0", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_POST0, }, { .name = "rot", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_ROT, }, { .name = "hclk_mfc", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_MFC, }, { .name = "pclk_mfc", .parent = &clk_p, .enable = s3c64xx_pclk_ctrl, .ctrlbit = S3C_CLKCON_PCLK_MFC, }, { .name = "dac27", .enable = s3c64xx_sclk_ctrl, .ctrlbit = S3C_CLKCON_SCLK_DAC27, }, { .name = "tv27", .enable = s3c64xx_sclk_ctrl, .ctrlbit = S3C_CLKCON_SCLK_TV27, }, { .name = "scaler27", .enable = s3c64xx_sclk_ctrl, .ctrlbit = S3C_CLKCON_SCLK_SCALER27, }, { .name = "sclk_scaler", .enable = s3c64xx_sclk_ctrl, .ctrlbit = S3C_CLKCON_SCLK_SCALER, }, { .name = "post0_27", .enable = s3c64xx_sclk_ctrl, .ctrlbit = S3C_CLKCON_SCLK_POST0_27, }, { .name = "secur", .enable = s3c64xx_sclk_ctrl, .ctrlbit = S3C_CLKCON_SCLK_SECUR, }, { .name = "sclk_mfc", .enable = s3c64xx_sclk_ctrl, .ctrlbit = S3C_CLKCON_SCLK_MFC, }, { .name = "sclk_jpeg", .enable = s3c64xx_sclk_ctrl, .ctrlbit = S3C_CLKCON_SCLK_JPEG, }, }; static struct clk clk_48m_spi0 = { .name = "spi_48m", .devname = "s3c6410-spi.0", .parent = &clk_48m, .enable = s3c64xx_sclk_ctrl, .ctrlbit = S3C_CLKCON_SCLK_SPI0_48, }; static struct clk clk_48m_spi1 = { .name = "spi_48m", .devname = "s3c6410-spi.1", .parent = &clk_48m, .enable = s3c64xx_sclk_ctrl, .ctrlbit = S3C_CLKCON_SCLK_SPI1_48, }; static struct clk clk_i2s0 = { .name = "iis", .devname = "samsung-i2s.0", .parent = &clk_p, .enable = s3c64xx_pclk_ctrl, .ctrlbit = S3C_CLKCON_PCLK_IIS0, }; static struct clk clk_i2s1 = { .name = "iis", .devname = "samsung-i2s.1", .parent = &clk_p, .enable = s3c64xx_pclk_ctrl, .ctrlbit = S3C_CLKCON_PCLK_IIS1, }; #ifdef CONFIG_CPU_S3C6410 static struct clk clk_i2s2 = { .name = "iis", .devname = "samsung-i2s.2", .parent = &clk_p, .enable = s3c64xx_pclk_ctrl, .ctrlbit = S3C6410_CLKCON_PCLK_IIS2, }; #endif static struct clk init_clocks[] = { { .name = "lcd", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_LCD, }, { .name = "gpio", .parent = &clk_p, .enable = s3c64xx_pclk_ctrl, .ctrlbit = S3C_CLKCON_PCLK_GPIO, }, { .name = "usb-host", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_UHOST, }, { .name = "otg", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_USB, }, { .name = "timers", .parent = &clk_p, .enable = s3c64xx_pclk_ctrl, .ctrlbit = S3C_CLKCON_PCLK_PWM, }, { .name = "uart", .devname = "s3c6400-uart.0", .parent = &clk_p, .enable = s3c64xx_pclk_ctrl, .ctrlbit = S3C_CLKCON_PCLK_UART0, }, { .name = "uart", .devname = "s3c6400-uart.1", .parent = &clk_p, .enable = s3c64xx_pclk_ctrl, .ctrlbit = S3C_CLKCON_PCLK_UART1, }, { .name = "uart", .devname = "s3c6400-uart.2", .parent = &clk_p, .enable = s3c64xx_pclk_ctrl, .ctrlbit = S3C_CLKCON_PCLK_UART2, }, { .name = "uart", .devname = "s3c6400-uart.3", .parent = &clk_p, .enable = s3c64xx_pclk_ctrl, .ctrlbit = S3C_CLKCON_PCLK_UART3, }, { .name = "watchdog", .parent = &clk_p, .ctrlbit = S3C_CLKCON_PCLK_WDT, }, }; static struct clk clk_hsmmc0 = { .name = "hsmmc", .devname = "s3c-sdhci.0", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_HSMMC0, }; static struct clk clk_hsmmc1 = { .name = "hsmmc", .devname = "s3c-sdhci.1", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_HSMMC1, }; static struct clk clk_hsmmc2 = { .name = "hsmmc", .devname = "s3c-sdhci.2", .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_HSMMC2, }; static struct clk clk_fout_apll = { .name = "fout_apll", }; static struct clk *clk_src_apll_list[] = { [0] = &clk_fin_apll, [1] = &clk_fout_apll, }; static struct clksrc_sources clk_src_apll = { .sources = clk_src_apll_list, .nr_sources = ARRAY_SIZE(clk_src_apll_list), }; static struct clksrc_clk clk_mout_apll = { .clk = { .name = "mout_apll", }, .reg_src = { .reg = S3C_CLK_SRC, .shift = 0, .size = 1 }, .sources = &clk_src_apll, }; static struct clk *clk_src_epll_list[] = { [0] = &clk_fin_epll, [1] = &clk_fout_epll, }; static struct clksrc_sources clk_src_epll = { .sources = clk_src_epll_list, .nr_sources = ARRAY_SIZE(clk_src_epll_list), }; static struct clksrc_clk clk_mout_epll = { .clk = { .name = "mout_epll", }, .reg_src = { .reg = S3C_CLK_SRC, .shift = 2, .size = 1 }, .sources = &clk_src_epll, }; static struct clk *clk_src_mpll_list[] = { [0] = &clk_fin_mpll, [1] = &clk_fout_mpll, }; static struct clksrc_sources clk_src_mpll = { .sources = clk_src_mpll_list, .nr_sources = ARRAY_SIZE(clk_src_mpll_list), }; static struct clksrc_clk clk_mout_mpll = { .clk = { .name = "mout_mpll", }, .reg_src = { .reg = S3C_CLK_SRC, .shift = 1, .size = 1 }, .sources = &clk_src_mpll, }; static unsigned int armclk_mask; static unsigned long s3c64xx_clk_arm_get_rate(struct clk *clk) { unsigned long rate = clk_get_rate(clk->parent); u32 clkdiv; /* divisor mask starts at bit0, so no need to shift */ clkdiv = __raw_readl(S3C_CLK_DIV0) & armclk_mask; return rate / (clkdiv + 1); } static unsigned long s3c64xx_clk_arm_round_rate(struct clk *clk, unsigned long rate) { unsigned long parent = clk_get_rate(clk->parent); u32 div; if (parent < rate) return parent; div = (parent / rate) - 1; if (div > armclk_mask) div = armclk_mask; return parent / (div + 1); } static int s3c64xx_clk_arm_set_rate(struct clk *clk, unsigned long rate) { unsigned long parent = clk_get_rate(clk->parent); u32 div; u32 val; if (rate < parent / (armclk_mask + 1)) return -EINVAL; rate = clk_round_rate(clk, rate); div = clk_get_rate(clk->parent) / rate; val = __raw_readl(S3C_CLK_DIV0); val &= ~armclk_mask; val |= (div - 1); __raw_writel(val, S3C_CLK_DIV0); return 0; } static struct clk clk_arm = { .name = "armclk", .parent = &clk_mout_apll.clk, .ops = &(struct clk_ops) { .get_rate = s3c64xx_clk_arm_get_rate, .set_rate = s3c64xx_clk_arm_set_rate, .round_rate = s3c64xx_clk_arm_round_rate, }, }; static unsigned long s3c64xx_clk_doutmpll_get_rate(struct clk *clk) { unsigned long rate = clk_get_rate(clk->parent); printk(KERN_DEBUG "%s: parent is %ld\n", __func__, rate); if (__raw_readl(S3C_CLK_DIV0) & S3C6400_CLKDIV0_MPLL_MASK) rate /= 2; return rate; } static struct clk_ops clk_dout_ops = { .get_rate = s3c64xx_clk_doutmpll_get_rate, }; static struct clk clk_dout_mpll = { .name = "dout_mpll", .parent = &clk_mout_mpll.clk, .ops = &clk_dout_ops, }; static struct clk *clkset_spi_mmc_list[] = { &clk_mout_epll.clk, &clk_dout_mpll, &clk_fin_epll, &clk_27m, }; static struct clksrc_sources clkset_spi_mmc = { .sources = clkset_spi_mmc_list, .nr_sources = ARRAY_SIZE(clkset_spi_mmc_list), }; static struct clk *clkset_irda_list[] = { &clk_mout_epll.clk, &clk_dout_mpll, NULL, &clk_27m, }; static struct clksrc_sources clkset_irda = { .sources = clkset_irda_list, .nr_sources = ARRAY_SIZE(clkset_irda_list), }; static struct clk *clkset_uart_list[] = { &clk_mout_epll.clk, &clk_dout_mpll, NULL, NULL }; static struct clksrc_sources clkset_uart = { .sources = clkset_uart_list, .nr_sources = ARRAY_SIZE(clkset_uart_list), }; static struct clk *clkset_uhost_list[] = { &clk_48m, &clk_mout_epll.clk, &clk_dout_mpll, &clk_fin_epll, }; static struct clksrc_sources clkset_uhost = { .sources = clkset_uhost_list, .nr_sources = ARRAY_SIZE(clkset_uhost_list), }; /* The peripheral clocks are all controlled via clocksource followed * by an optional divider and gate stage. We currently roll this into * one clock which hides the intermediate clock from the mux. * * Note, the JPEG clock can only be an even divider... * * The scaler and LCD clocks depend on the S3C64XX version, and also * have a common parent divisor so are not included here. */ /* clocks that feed other parts of the clock source tree */ static struct clk clk_iis_cd0 = { .name = "iis_cdclk0", }; static struct clk clk_iis_cd1 = { .name = "iis_cdclk1", }; static struct clk clk_iisv4_cd = { .name = "iis_cdclk_v4", }; static struct clk clk_pcm_cd = { .name = "pcm_cdclk", }; static struct clk *clkset_audio0_list[] = { [0] = &clk_mout_epll.clk, [1] = &clk_dout_mpll, [2] = &clk_fin_epll, [3] = &clk_iis_cd0, [4] = &clk_pcm_cd, }; static struct clksrc_sources clkset_audio0 = { .sources = clkset_audio0_list, .nr_sources = ARRAY_SIZE(clkset_audio0_list), }; static struct clk *clkset_audio1_list[] = { [0] = &clk_mout_epll.clk, [1] = &clk_dout_mpll, [2] = &clk_fin_epll, [3] = &clk_iis_cd1, [4] = &clk_pcm_cd, }; static struct clksrc_sources clkset_audio1 = { .sources = clkset_audio1_list, .nr_sources = ARRAY_SIZE(clkset_audio1_list), }; #ifdef CONFIG_CPU_S3C6410 static struct clk *clkset_audio2_list[] = { [0] = &clk_mout_epll.clk, [1] = &clk_dout_mpll, [2] = &clk_fin_epll, [3] = &clk_iisv4_cd, [4] = &clk_pcm_cd, }; static struct clksrc_sources clkset_audio2 = { .sources = clkset_audio2_list, .nr_sources = ARRAY_SIZE(clkset_audio2_list), }; #endif static struct clksrc_clk clksrcs[] = { { .clk = { .name = "usb-bus-host", .ctrlbit = S3C_CLKCON_SCLK_UHOST, .enable = s3c64xx_sclk_ctrl, }, .reg_src = { .reg = S3C_CLK_SRC, .shift = 5, .size = 2 }, .reg_div = { .reg = S3C_CLK_DIV1, .shift = 20, .size = 4 }, .sources = &clkset_uhost, }, { .clk = { .name = "irda-bus", .ctrlbit = S3C_CLKCON_SCLK_IRDA, .enable = s3c64xx_sclk_ctrl, }, .reg_src = { .reg = S3C_CLK_SRC, .shift = 24, .size = 2 }, .reg_div = { .reg = S3C_CLK_DIV2, .shift = 20, .size = 4 }, .sources = &clkset_irda, }, { .clk = { .name = "camera", .ctrlbit = S3C_CLKCON_SCLK_CAM, .enable = s3c64xx_sclk_ctrl, .parent = &clk_h2, }, .reg_div = { .reg = S3C_CLK_DIV0, .shift = 20, .size = 4 }, }, }; /* Where does UCLK0 come from? */ static struct clksrc_clk clk_sclk_uclk = { .clk = { .name = "uclk1", .ctrlbit = S3C_CLKCON_SCLK_UART, .enable = s3c64xx_sclk_ctrl, }, .reg_src = { .reg = S3C_CLK_SRC, .shift = 13, .size = 1 }, .reg_div = { .reg = S3C_CLK_DIV2, .shift = 16, .size = 4 }, .sources = &clkset_uart, }; static struct clksrc_clk clk_sclk_mmc0 = { .clk = { .name = "mmc_bus", .devname = "s3c-sdhci.0", .ctrlbit = S3C_CLKCON_SCLK_MMC0, .enable = s3c64xx_sclk_ctrl, }, .reg_src = { .reg = S3C_CLK_SRC, .shift = 18, .size = 2 }, .reg_div = { .reg = S3C_CLK_DIV1, .shift = 0, .size = 4 }, .sources = &clkset_spi_mmc, }; static struct clksrc_clk clk_sclk_mmc1 = { .clk = { .name = "mmc_bus", .devname = "s3c-sdhci.1", .ctrlbit = S3C_CLKCON_SCLK_MMC1, .enable = s3c64xx_sclk_ctrl, }, .reg_src = { .reg = S3C_CLK_SRC, .shift = 20, .size = 2 }, .reg_div = { .reg = S3C_CLK_DIV1, .shift = 4, .size = 4 }, .sources = &clkset_spi_mmc, }; static struct clksrc_clk clk_sclk_mmc2 = { .clk = { .name = "mmc_bus", .devname = "s3c-sdhci.2", .ctrlbit = S3C_CLKCON_SCLK_MMC2, .enable = s3c64xx_sclk_ctrl, }, .reg_src = { .reg = S3C_CLK_SRC, .shift = 22, .size = 2 }, .reg_div = { .reg = S3C_CLK_DIV1, .shift = 8, .size = 4 }, .sources = &clkset_spi_mmc, }; static struct clksrc_clk clk_sclk_spi0 = { .clk = { .name = "spi-bus", .devname = "s3c6410-spi.0", .ctrlbit = S3C_CLKCON_SCLK_SPI0, .enable = s3c64xx_sclk_ctrl, }, .reg_src = { .reg = S3C_CLK_SRC, .shift = 14, .size = 2 }, .reg_div = { .reg = S3C_CLK_DIV2, .shift = 0, .size = 4 }, .sources = &clkset_spi_mmc, }; static struct clksrc_clk clk_sclk_spi1 = { .clk = { .name = "spi-bus", .devname = "s3c6410-spi.1", .ctrlbit = S3C_CLKCON_SCLK_SPI1, .enable = s3c64xx_sclk_ctrl, }, .reg_src = { .reg = S3C_CLK_SRC, .shift = 16, .size = 2 }, .reg_div = { .reg = S3C_CLK_DIV2, .shift = 4, .size = 4 }, .sources = &clkset_spi_mmc, }; static struct clksrc_clk clk_audio_bus0 = { .clk = { .name = "audio-bus", .devname = "samsung-i2s.0", .ctrlbit = S3C_CLKCON_SCLK_AUDIO0, .enable = s3c64xx_sclk_ctrl, }, .reg_src = { .reg = S3C_CLK_SRC, .shift = 7, .size = 3 }, .reg_div = { .reg = S3C_CLK_DIV2, .shift = 8, .size = 4 }, .sources = &clkset_audio0, }; static struct clksrc_clk clk_audio_bus1 = { .clk = { .name = "audio-bus", .devname = "samsung-i2s.1", .ctrlbit = S3C_CLKCON_SCLK_AUDIO1, .enable = s3c64xx_sclk_ctrl, }, .reg_src = { .reg = S3C_CLK_SRC, .shift = 10, .size = 3 }, .reg_div = { .reg = S3C_CLK_DIV2, .shift = 12, .size = 4 }, .sources = &clkset_audio1, }; #ifdef CONFIG_CPU_S3C6410 static struct clksrc_clk clk_audio_bus2 = { .clk = { .name = "audio-bus", .devname = "samsung-i2s.2", .ctrlbit = S3C6410_CLKCON_SCLK_AUDIO2, .enable = s3c64xx_sclk_ctrl, }, .reg_src = { .reg = S3C6410_CLK_SRC2, .shift = 0, .size = 3 }, .reg_div = { .reg = S3C_CLK_DIV2, .shift = 24, .size = 4 }, .sources = &clkset_audio2, }; #endif /* Clock initialisation code */ static struct clksrc_clk *init_parents[] = { &clk_mout_apll, &clk_mout_epll, &clk_mout_mpll, }; static struct clksrc_clk *clksrc_cdev[] = { &clk_sclk_uclk, &clk_sclk_mmc0, &clk_sclk_mmc1, &clk_sclk_mmc2, &clk_sclk_spi0, &clk_sclk_spi1, &clk_audio_bus0, &clk_audio_bus1, }; static struct clk *clk_cdev[] = { &clk_hsmmc0, &clk_hsmmc1, &clk_hsmmc2, &clk_48m_spi0, &clk_48m_spi1, &clk_i2s0, &clk_i2s1, }; static struct clk_lookup s3c64xx_clk_lookup[] = { CLKDEV_INIT(NULL, "clk_uart_baud2", &clk_p), CLKDEV_INIT(NULL, "clk_uart_baud3", &clk_sclk_uclk.clk), CLKDEV_INIT("s3c-sdhci.0", "mmc_busclk.0", &clk_hsmmc0), CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.0", &clk_hsmmc1), CLKDEV_INIT("s3c-sdhci.2", "mmc_busclk.0", &clk_hsmmc2), CLKDEV_INIT("s3c-sdhci.0", "mmc_busclk.2", &clk_sclk_mmc0.clk), CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.2", &clk_sclk_mmc1.clk), CLKDEV_INIT("s3c-sdhci.2", "mmc_busclk.2", &clk_sclk_mmc2.clk), CLKDEV_INIT(NULL, "spi_busclk0", &clk_p), CLKDEV_INIT("s3c6410-spi.0", "spi_busclk1", &clk_sclk_spi0.clk), CLKDEV_INIT("s3c6410-spi.0", "spi_busclk2", &clk_48m_spi0), CLKDEV_INIT("s3c6410-spi.1", "spi_busclk1", &clk_sclk_spi1.clk), CLKDEV_INIT("s3c6410-spi.1", "spi_busclk2", &clk_48m_spi1), CLKDEV_INIT("samsung-i2s.0", "i2s_opclk0", &clk_i2s0), CLKDEV_INIT("samsung-i2s.0", "i2s_opclk1", &clk_audio_bus0.clk), CLKDEV_INIT("samsung-i2s.1", "i2s_opclk0", &clk_i2s1), CLKDEV_INIT("samsung-i2s.1", "i2s_opclk1", &clk_audio_bus1.clk), #ifdef CONFIG_CPU_S3C6410 CLKDEV_INIT("samsung-i2s.2", "i2s_opclk0", &clk_i2s2), CLKDEV_INIT("samsung-i2s.2", "i2s_opclk1", &clk_audio_bus2.clk), #endif }; #define GET_DIV(clk, field) ((((clk) & field##_MASK) >> field##_SHIFT) + 1) void __init_or_cpufreq s3c64xx_setup_clocks(void) { struct clk *xtal_clk; unsigned long xtal; unsigned long fclk; unsigned long hclk; unsigned long hclk2; unsigned long pclk; unsigned long epll; unsigned long apll; unsigned long mpll; unsigned int ptr; u32 clkdiv0; printk(KERN_DEBUG "%s: registering clocks\n", __func__); clkdiv0 = __raw_readl(S3C_CLK_DIV0); printk(KERN_DEBUG "%s: clkdiv0 = %08x\n", __func__, clkdiv0); xtal_clk = clk_get(NULL, "xtal"); BUG_ON(IS_ERR(xtal_clk)); xtal = clk_get_rate(xtal_clk); clk_put(xtal_clk); printk(KERN_DEBUG "%s: xtal is %ld\n", __func__, xtal); /* For now assume the mux always selects the crystal */ clk_ext_xtal_mux.parent = xtal_clk; epll = s3c_get_pll6553x(xtal, __raw_readl(S3C_EPLL_CON0), __raw_readl(S3C_EPLL_CON1)); mpll = s3c6400_get_pll(xtal, __raw_readl(S3C_MPLL_CON)); apll = s3c6400_get_pll(xtal, __raw_readl(S3C_APLL_CON)); fclk = mpll; printk(KERN_INFO "S3C64XX: PLL settings, A=%ld, M=%ld, E=%ld\n", apll, mpll, epll); if(__raw_readl(S3C64XX_OTHERS) & S3C64XX_OTHERS_SYNCMUXSEL) /* Synchronous mode */ hclk2 = apll / GET_DIV(clkdiv0, S3C6400_CLKDIV0_HCLK2); else /* Asynchronous mode */ hclk2 = mpll / GET_DIV(clkdiv0, S3C6400_CLKDIV0_HCLK2); hclk = hclk2 / GET_DIV(clkdiv0, S3C6400_CLKDIV0_HCLK); pclk = hclk2 / GET_DIV(clkdiv0, S3C6400_CLKDIV0_PCLK); printk(KERN_INFO "S3C64XX: HCLK2=%ld, HCLK=%ld, PCLK=%ld\n", hclk2, hclk, pclk); clk_fout_mpll.rate = mpll; clk_fout_epll.rate = epll; clk_fout_apll.rate = apll; clk_h2.rate = hclk2; clk_h.rate = hclk; clk_p.rate = pclk; clk_f.rate = fclk; for (ptr = 0; ptr < ARRAY_SIZE(init_parents); ptr++) s3c_set_clksrc(init_parents[ptr], true); for (ptr = 0; ptr < ARRAY_SIZE(clksrcs); ptr++) s3c_set_clksrc(&clksrcs[ptr], true); } static struct clk *clks1[] __initdata = { &clk_ext_xtal_mux, &clk_iis_cd0, &clk_iis_cd1, &clk_iisv4_cd, &clk_pcm_cd, &clk_mout_epll.clk, &clk_mout_mpll.clk, &clk_dout_mpll, &clk_arm, }; static struct clk *clks[] __initdata = { &clk_ext, &clk_epll, &clk_27m, &clk_48m, &clk_h2, &clk_xusbxti, }; /** * s3c64xx_register_clocks - register clocks for s3c6400 and s3c6410 * @xtal: The rate for the clock crystal feeding the PLLs. * @armclk_divlimit: Divisor mask for ARMCLK. * * Register the clocks for the S3C6400 and S3C6410 SoC range, such * as ARMCLK as well as the necessary parent clocks. * * This call does not setup the clocks, which is left to the * s3c64xx_setup_clocks() call which may be needed by the cpufreq * or resume code to re-set the clocks if the bootloader has changed * them. */ void __init s3c64xx_register_clocks(unsigned long xtal, unsigned armclk_divlimit) { unsigned int cnt; armclk_mask = armclk_divlimit; s3c24xx_register_baseclocks(xtal); s3c24xx_register_clocks(clks, ARRAY_SIZE(clks)); s3c_register_clocks(init_clocks, ARRAY_SIZE(init_clocks)); s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); s3c24xx_register_clocks(clk_cdev, ARRAY_SIZE(clk_cdev)); for (cnt = 0; cnt < ARRAY_SIZE(clk_cdev); cnt++) s3c_disable_clocks(clk_cdev[cnt], 1); s3c24xx_register_clocks(clks1, ARRAY_SIZE(clks1)); s3c_register_clksrc(clksrcs, ARRAY_SIZE(clksrcs)); for (cnt = 0; cnt < ARRAY_SIZE(clksrc_cdev); cnt++) s3c_register_clksrc(clksrc_cdev[cnt], 1); clkdev_add_table(s3c64xx_clk_lookup, ARRAY_SIZE(s3c64xx_clk_lookup)); s3c_pwmclk_init(); }
gpl-2.0
AICP/kernel_nvidia_shieldtablet
drivers/tty/serial/lantiq.c
2173
18501
/* * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Copyright (C) 2004 Infineon IFAP DC COM CPE * Copyright (C) 2007 Felix Fietkau <nbd@openwrt.org> * Copyright (C) 2007 John Crispin <blogic@openwrt.org> * Copyright (C) 2010 Thomas Langer, <thomas.langer@lantiq.com> */ #include <linux/slab.h> #include <linux/module.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/console.h> #include <linux/sysrq.h> #include <linux/device.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/serial_core.h> #include <linux/serial.h> #include <linux/of_platform.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/io.h> #include <linux/clk.h> #include <linux/gpio.h> #include <lantiq_soc.h> #define PORT_LTQ_ASC 111 #define MAXPORTS 2 #define UART_DUMMY_UER_RX 1 #define DRVNAME "lantiq,asc" #ifdef __BIG_ENDIAN #define LTQ_ASC_TBUF (0x0020 + 3) #define LTQ_ASC_RBUF (0x0024 + 3) #else #define LTQ_ASC_TBUF 0x0020 #define LTQ_ASC_RBUF 0x0024 #endif #define LTQ_ASC_FSTAT 0x0048 #define LTQ_ASC_WHBSTATE 0x0018 #define LTQ_ASC_STATE 0x0014 #define LTQ_ASC_IRNCR 0x00F8 #define LTQ_ASC_CLC 0x0000 #define LTQ_ASC_ID 0x0008 #define LTQ_ASC_PISEL 0x0004 #define LTQ_ASC_TXFCON 0x0044 #define LTQ_ASC_RXFCON 0x0040 #define LTQ_ASC_CON 0x0010 #define LTQ_ASC_BG 0x0050 #define LTQ_ASC_IRNREN 0x00F4 #define ASC_IRNREN_TX 0x1 #define ASC_IRNREN_RX 0x2 #define ASC_IRNREN_ERR 0x4 #define ASC_IRNREN_TX_BUF 0x8 #define ASC_IRNCR_TIR 0x1 #define ASC_IRNCR_RIR 0x2 #define ASC_IRNCR_EIR 0x4 #define ASCOPT_CSIZE 0x3 #define TXFIFO_FL 1 #define RXFIFO_FL 1 #define ASCCLC_DISS 0x2 #define ASCCLC_RMCMASK 0x0000FF00 #define ASCCLC_RMCOFFSET 8 #define ASCCON_M_8ASYNC 0x0 #define ASCCON_M_7ASYNC 0x2 #define ASCCON_ODD 0x00000020 #define ASCCON_STP 0x00000080 #define ASCCON_BRS 0x00000100 #define ASCCON_FDE 0x00000200 #define ASCCON_R 0x00008000 #define ASCCON_FEN 0x00020000 #define ASCCON_ROEN 0x00080000 #define ASCCON_TOEN 0x00100000 #define ASCSTATE_PE 0x00010000 #define ASCSTATE_FE 0x00020000 #define ASCSTATE_ROE 0x00080000 #define ASCSTATE_ANY (ASCSTATE_ROE|ASCSTATE_PE|ASCSTATE_FE) #define ASCWHBSTATE_CLRREN 0x00000001 #define ASCWHBSTATE_SETREN 0x00000002 #define ASCWHBSTATE_CLRPE 0x00000004 #define ASCWHBSTATE_CLRFE 0x00000008 #define ASCWHBSTATE_CLRROE 0x00000020 #define ASCTXFCON_TXFEN 0x0001 #define ASCTXFCON_TXFFLU 0x0002 #define ASCTXFCON_TXFITLMASK 0x3F00 #define ASCTXFCON_TXFITLOFF 8 #define ASCRXFCON_RXFEN 0x0001 #define ASCRXFCON_RXFFLU 0x0002 #define ASCRXFCON_RXFITLMASK 0x3F00 #define ASCRXFCON_RXFITLOFF 8 #define ASCFSTAT_RXFFLMASK 0x003F #define ASCFSTAT_TXFFLMASK 0x3F00 #define ASCFSTAT_TXFREEMASK 0x3F000000 #define ASCFSTAT_TXFREEOFF 24 static void lqasc_tx_chars(struct uart_port *port); static struct ltq_uart_port *lqasc_port[MAXPORTS]; static struct uart_driver lqasc_reg; static DEFINE_SPINLOCK(ltq_asc_lock); struct ltq_uart_port { struct uart_port port; /* clock used to derive divider */ struct clk *fpiclk; /* clock gating of the ASC core */ struct clk *clk; unsigned int tx_irq; unsigned int rx_irq; unsigned int err_irq; }; static inline struct ltq_uart_port *to_ltq_uart_port(struct uart_port *port) { return container_of(port, struct ltq_uart_port, port); } static void lqasc_stop_tx(struct uart_port *port) { return; } static void lqasc_start_tx(struct uart_port *port) { unsigned long flags; spin_lock_irqsave(&ltq_asc_lock, flags); lqasc_tx_chars(port); spin_unlock_irqrestore(&ltq_asc_lock, flags); return; } static void lqasc_stop_rx(struct uart_port *port) { ltq_w32(ASCWHBSTATE_CLRREN, port->membase + LTQ_ASC_WHBSTATE); } static void lqasc_enable_ms(struct uart_port *port) { } static int lqasc_rx_chars(struct uart_port *port) { struct tty_port *tport = &port->state->port; unsigned int ch = 0, rsr = 0, fifocnt; fifocnt = ltq_r32(port->membase + LTQ_ASC_FSTAT) & ASCFSTAT_RXFFLMASK; while (fifocnt--) { u8 flag = TTY_NORMAL; ch = ltq_r8(port->membase + LTQ_ASC_RBUF); rsr = (ltq_r32(port->membase + LTQ_ASC_STATE) & ASCSTATE_ANY) | UART_DUMMY_UER_RX; tty_flip_buffer_push(tport); port->icount.rx++; /* * Note that the error handling code is * out of the main execution path */ if (rsr & ASCSTATE_ANY) { if (rsr & ASCSTATE_PE) { port->icount.parity++; ltq_w32_mask(0, ASCWHBSTATE_CLRPE, port->membase + LTQ_ASC_WHBSTATE); } else if (rsr & ASCSTATE_FE) { port->icount.frame++; ltq_w32_mask(0, ASCWHBSTATE_CLRFE, port->membase + LTQ_ASC_WHBSTATE); } if (rsr & ASCSTATE_ROE) { port->icount.overrun++; ltq_w32_mask(0, ASCWHBSTATE_CLRROE, port->membase + LTQ_ASC_WHBSTATE); } rsr &= port->read_status_mask; if (rsr & ASCSTATE_PE) flag = TTY_PARITY; else if (rsr & ASCSTATE_FE) flag = TTY_FRAME; } if ((rsr & port->ignore_status_mask) == 0) tty_insert_flip_char(tport, ch, flag); if (rsr & ASCSTATE_ROE) /* * Overrun is special, since it's reported * immediately, and doesn't affect the current * character */ tty_insert_flip_char(tport, 0, TTY_OVERRUN); } if (ch != 0) tty_flip_buffer_push(tport); return 0; } static void lqasc_tx_chars(struct uart_port *port) { struct circ_buf *xmit = &port->state->xmit; if (uart_tx_stopped(port)) { lqasc_stop_tx(port); return; } while (((ltq_r32(port->membase + LTQ_ASC_FSTAT) & ASCFSTAT_TXFREEMASK) >> ASCFSTAT_TXFREEOFF) != 0) { if (port->x_char) { ltq_w8(port->x_char, port->membase + LTQ_ASC_TBUF); port->icount.tx++; port->x_char = 0; continue; } if (uart_circ_empty(xmit)) break; ltq_w8(port->state->xmit.buf[port->state->xmit.tail], port->membase + LTQ_ASC_TBUF); xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); port->icount.tx++; } if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(port); } static irqreturn_t lqasc_tx_int(int irq, void *_port) { unsigned long flags; struct uart_port *port = (struct uart_port *)_port; spin_lock_irqsave(&ltq_asc_lock, flags); ltq_w32(ASC_IRNCR_TIR, port->membase + LTQ_ASC_IRNCR); spin_unlock_irqrestore(&ltq_asc_lock, flags); lqasc_start_tx(port); return IRQ_HANDLED; } static irqreturn_t lqasc_err_int(int irq, void *_port) { unsigned long flags; struct uart_port *port = (struct uart_port *)_port; spin_lock_irqsave(&ltq_asc_lock, flags); /* clear any pending interrupts */ ltq_w32_mask(0, ASCWHBSTATE_CLRPE | ASCWHBSTATE_CLRFE | ASCWHBSTATE_CLRROE, port->membase + LTQ_ASC_WHBSTATE); spin_unlock_irqrestore(&ltq_asc_lock, flags); return IRQ_HANDLED; } static irqreturn_t lqasc_rx_int(int irq, void *_port) { unsigned long flags; struct uart_port *port = (struct uart_port *)_port; spin_lock_irqsave(&ltq_asc_lock, flags); ltq_w32(ASC_IRNCR_RIR, port->membase + LTQ_ASC_IRNCR); lqasc_rx_chars(port); spin_unlock_irqrestore(&ltq_asc_lock, flags); return IRQ_HANDLED; } static unsigned int lqasc_tx_empty(struct uart_port *port) { int status; status = ltq_r32(port->membase + LTQ_ASC_FSTAT) & ASCFSTAT_TXFFLMASK; return status ? 0 : TIOCSER_TEMT; } static unsigned int lqasc_get_mctrl(struct uart_port *port) { return TIOCM_CTS | TIOCM_CAR | TIOCM_DSR; } static void lqasc_set_mctrl(struct uart_port *port, u_int mctrl) { } static void lqasc_break_ctl(struct uart_port *port, int break_state) { } static int lqasc_startup(struct uart_port *port) { struct ltq_uart_port *ltq_port = to_ltq_uart_port(port); int retval; if (ltq_port->clk) clk_enable(ltq_port->clk); port->uartclk = clk_get_rate(ltq_port->fpiclk); ltq_w32_mask(ASCCLC_DISS | ASCCLC_RMCMASK, (1 << ASCCLC_RMCOFFSET), port->membase + LTQ_ASC_CLC); ltq_w32(0, port->membase + LTQ_ASC_PISEL); ltq_w32( ((TXFIFO_FL << ASCTXFCON_TXFITLOFF) & ASCTXFCON_TXFITLMASK) | ASCTXFCON_TXFEN | ASCTXFCON_TXFFLU, port->membase + LTQ_ASC_TXFCON); ltq_w32( ((RXFIFO_FL << ASCRXFCON_RXFITLOFF) & ASCRXFCON_RXFITLMASK) | ASCRXFCON_RXFEN | ASCRXFCON_RXFFLU, port->membase + LTQ_ASC_RXFCON); /* make sure other settings are written to hardware before * setting enable bits */ wmb(); ltq_w32_mask(0, ASCCON_M_8ASYNC | ASCCON_FEN | ASCCON_TOEN | ASCCON_ROEN, port->membase + LTQ_ASC_CON); retval = request_irq(ltq_port->tx_irq, lqasc_tx_int, 0, "asc_tx", port); if (retval) { pr_err("failed to request lqasc_tx_int\n"); return retval; } retval = request_irq(ltq_port->rx_irq, lqasc_rx_int, 0, "asc_rx", port); if (retval) { pr_err("failed to request lqasc_rx_int\n"); goto err1; } retval = request_irq(ltq_port->err_irq, lqasc_err_int, 0, "asc_err", port); if (retval) { pr_err("failed to request lqasc_err_int\n"); goto err2; } ltq_w32(ASC_IRNREN_RX | ASC_IRNREN_ERR | ASC_IRNREN_TX, port->membase + LTQ_ASC_IRNREN); return 0; err2: free_irq(ltq_port->rx_irq, port); err1: free_irq(ltq_port->tx_irq, port); return retval; } static void lqasc_shutdown(struct uart_port *port) { struct ltq_uart_port *ltq_port = to_ltq_uart_port(port); free_irq(ltq_port->tx_irq, port); free_irq(ltq_port->rx_irq, port); free_irq(ltq_port->err_irq, port); ltq_w32(0, port->membase + LTQ_ASC_CON); ltq_w32_mask(ASCRXFCON_RXFEN, ASCRXFCON_RXFFLU, port->membase + LTQ_ASC_RXFCON); ltq_w32_mask(ASCTXFCON_TXFEN, ASCTXFCON_TXFFLU, port->membase + LTQ_ASC_TXFCON); if (ltq_port->clk) clk_disable(ltq_port->clk); } static void lqasc_set_termios(struct uart_port *port, struct ktermios *new, struct ktermios *old) { unsigned int cflag; unsigned int iflag; unsigned int divisor; unsigned int baud; unsigned int con = 0; unsigned long flags; cflag = new->c_cflag; iflag = new->c_iflag; switch (cflag & CSIZE) { case CS7: con = ASCCON_M_7ASYNC; break; case CS5: case CS6: default: new->c_cflag &= ~ CSIZE; new->c_cflag |= CS8; con = ASCCON_M_8ASYNC; break; } cflag &= ~CMSPAR; /* Mark/Space parity is not supported */ if (cflag & CSTOPB) con |= ASCCON_STP; if (cflag & PARENB) { if (!(cflag & PARODD)) con &= ~ASCCON_ODD; else con |= ASCCON_ODD; } port->read_status_mask = ASCSTATE_ROE; if (iflag & INPCK) port->read_status_mask |= ASCSTATE_FE | ASCSTATE_PE; port->ignore_status_mask = 0; if (iflag & IGNPAR) port->ignore_status_mask |= ASCSTATE_FE | ASCSTATE_PE; if (iflag & IGNBRK) { /* * If we're ignoring parity and break indicators, * ignore overruns too (for real raw support). */ if (iflag & IGNPAR) port->ignore_status_mask |= ASCSTATE_ROE; } if ((cflag & CREAD) == 0) port->ignore_status_mask |= UART_DUMMY_UER_RX; /* set error signals - framing, parity and overrun, enable receiver */ con |= ASCCON_FEN | ASCCON_TOEN | ASCCON_ROEN; spin_lock_irqsave(&ltq_asc_lock, flags); /* set up CON */ ltq_w32_mask(0, con, port->membase + LTQ_ASC_CON); /* Set baud rate - take a divider of 2 into account */ baud = uart_get_baud_rate(port, new, old, 0, port->uartclk / 16); divisor = uart_get_divisor(port, baud); divisor = divisor / 2 - 1; /* disable the baudrate generator */ ltq_w32_mask(ASCCON_R, 0, port->membase + LTQ_ASC_CON); /* make sure the fractional divider is off */ ltq_w32_mask(ASCCON_FDE, 0, port->membase + LTQ_ASC_CON); /* set up to use divisor of 2 */ ltq_w32_mask(ASCCON_BRS, 0, port->membase + LTQ_ASC_CON); /* now we can write the new baudrate into the register */ ltq_w32(divisor, port->membase + LTQ_ASC_BG); /* turn the baudrate generator back on */ ltq_w32_mask(0, ASCCON_R, port->membase + LTQ_ASC_CON); /* enable rx */ ltq_w32(ASCWHBSTATE_SETREN, port->membase + LTQ_ASC_WHBSTATE); spin_unlock_irqrestore(&ltq_asc_lock, flags); /* Don't rewrite B0 */ if (tty_termios_baud_rate(new)) tty_termios_encode_baud_rate(new, baud, baud); uart_update_timeout(port, cflag, baud); } static const char* lqasc_type(struct uart_port *port) { if (port->type == PORT_LTQ_ASC) return DRVNAME; else return NULL; } static void lqasc_release_port(struct uart_port *port) { if (port->flags & UPF_IOREMAP) { iounmap(port->membase); port->membase = NULL; } } static int lqasc_request_port(struct uart_port *port) { struct platform_device *pdev = to_platform_device(port->dev); struct resource *res; int size; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "cannot obtain I/O memory region"); return -ENODEV; } size = resource_size(res); res = devm_request_mem_region(&pdev->dev, res->start, size, dev_name(&pdev->dev)); if (!res) { dev_err(&pdev->dev, "cannot request I/O memory region"); return -EBUSY; } if (port->flags & UPF_IOREMAP) { port->membase = devm_ioremap_nocache(&pdev->dev, port->mapbase, size); if (port->membase == NULL) return -ENOMEM; } return 0; } static void lqasc_config_port(struct uart_port *port, int flags) { if (flags & UART_CONFIG_TYPE) { port->type = PORT_LTQ_ASC; lqasc_request_port(port); } } static int lqasc_verify_port(struct uart_port *port, struct serial_struct *ser) { int ret = 0; if (ser->type != PORT_UNKNOWN && ser->type != PORT_LTQ_ASC) ret = -EINVAL; if (ser->irq < 0 || ser->irq >= NR_IRQS) ret = -EINVAL; if (ser->baud_base < 9600) ret = -EINVAL; return ret; } static struct uart_ops lqasc_pops = { .tx_empty = lqasc_tx_empty, .set_mctrl = lqasc_set_mctrl, .get_mctrl = lqasc_get_mctrl, .stop_tx = lqasc_stop_tx, .start_tx = lqasc_start_tx, .stop_rx = lqasc_stop_rx, .enable_ms = lqasc_enable_ms, .break_ctl = lqasc_break_ctl, .startup = lqasc_startup, .shutdown = lqasc_shutdown, .set_termios = lqasc_set_termios, .type = lqasc_type, .release_port = lqasc_release_port, .request_port = lqasc_request_port, .config_port = lqasc_config_port, .verify_port = lqasc_verify_port, }; static void lqasc_console_putchar(struct uart_port *port, int ch) { int fifofree; if (!port->membase) return; do { fifofree = (ltq_r32(port->membase + LTQ_ASC_FSTAT) & ASCFSTAT_TXFREEMASK) >> ASCFSTAT_TXFREEOFF; } while (fifofree == 0); ltq_w8(ch, port->membase + LTQ_ASC_TBUF); } static void lqasc_console_write(struct console *co, const char *s, u_int count) { struct ltq_uart_port *ltq_port; struct uart_port *port; unsigned long flags; if (co->index >= MAXPORTS) return; ltq_port = lqasc_port[co->index]; if (!ltq_port) return; port = &ltq_port->port; spin_lock_irqsave(&ltq_asc_lock, flags); uart_console_write(port, s, count, lqasc_console_putchar); spin_unlock_irqrestore(&ltq_asc_lock, flags); } static int __init lqasc_console_setup(struct console *co, char *options) { struct ltq_uart_port *ltq_port; struct uart_port *port; int baud = 115200; int bits = 8; int parity = 'n'; int flow = 'n'; if (co->index >= MAXPORTS) return -ENODEV; ltq_port = lqasc_port[co->index]; if (!ltq_port) return -ENODEV; port = &ltq_port->port; port->uartclk = clk_get_rate(ltq_port->fpiclk); if (options) uart_parse_options(options, &baud, &parity, &bits, &flow); return uart_set_options(port, co, baud, parity, bits, flow); } static struct console lqasc_console = { .name = "ttyLTQ", .write = lqasc_console_write, .device = uart_console_device, .setup = lqasc_console_setup, .flags = CON_PRINTBUFFER, .index = -1, .data = &lqasc_reg, }; static int __init lqasc_console_init(void) { register_console(&lqasc_console); return 0; } console_initcall(lqasc_console_init); static struct uart_driver lqasc_reg = { .owner = THIS_MODULE, .driver_name = DRVNAME, .dev_name = "ttyLTQ", .major = 0, .minor = 0, .nr = MAXPORTS, .cons = &lqasc_console, }; static int __init lqasc_probe(struct platform_device *pdev) { struct device_node *node = pdev->dev.of_node; struct ltq_uart_port *ltq_port; struct uart_port *port; struct resource *mmres, irqres[3]; int line = 0; int ret; mmres = platform_get_resource(pdev, IORESOURCE_MEM, 0); ret = of_irq_to_resource_table(node, irqres, 3); if (!mmres || (ret != 3)) { dev_err(&pdev->dev, "failed to get memory/irq for serial port\n"); return -ENODEV; } /* check if this is the console port */ if (mmres->start != CPHYSADDR(LTQ_EARLY_ASC)) line = 1; if (lqasc_port[line]) { dev_err(&pdev->dev, "port %d already allocated\n", line); return -EBUSY; } ltq_port = devm_kzalloc(&pdev->dev, sizeof(struct ltq_uart_port), GFP_KERNEL); if (!ltq_port) return -ENOMEM; port = &ltq_port->port; port->iotype = SERIAL_IO_MEM; port->flags = ASYNC_BOOT_AUTOCONF | UPF_IOREMAP; port->ops = &lqasc_pops; port->fifosize = 16; port->type = PORT_LTQ_ASC, port->line = line; port->dev = &pdev->dev; /* unused, just to be backward-compatible */ port->irq = irqres[0].start; port->mapbase = mmres->start; ltq_port->fpiclk = clk_get_fpi(); if (IS_ERR(ltq_port->fpiclk)) { pr_err("failed to get fpi clk\n"); return -ENOENT; } /* not all asc ports have clock gates, lets ignore the return code */ ltq_port->clk = clk_get(&pdev->dev, NULL); ltq_port->tx_irq = irqres[0].start; ltq_port->rx_irq = irqres[1].start; ltq_port->err_irq = irqres[2].start; lqasc_port[line] = ltq_port; platform_set_drvdata(pdev, ltq_port); ret = uart_add_one_port(&lqasc_reg, port); return ret; } static const struct of_device_id ltq_asc_match[] = { { .compatible = DRVNAME }, {}, }; MODULE_DEVICE_TABLE(of, ltq_asc_match); static struct platform_driver lqasc_driver = { .driver = { .name = DRVNAME, .owner = THIS_MODULE, .of_match_table = ltq_asc_match, }, }; int __init init_lqasc(void) { int ret; ret = uart_register_driver(&lqasc_reg); if (ret != 0) return ret; ret = platform_driver_probe(&lqasc_driver, lqasc_probe); if (ret != 0) uart_unregister_driver(&lqasc_reg); return ret; } module_init(init_lqasc); MODULE_DESCRIPTION("Lantiq serial port driver"); MODULE_LICENSE("GPL");
gpl-2.0
emwno/android_kernel_U8500
net/compat-wireless/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_lcn.c
2429
133183
/* * Copyright (c) 2010 Broadcom Corporation * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/kernel.h> #include <linux/string.h> #include <linux/bitops.h> #include <linux/delay.h> #include <wlc_cfg.h> #include <linux/pci.h> #include <aiutils.h> #include <wlc_pmu.h> #include <bcmnvram.h> #include <bcmdevs.h> #include <sbhnddma.h> #include "wlc_phy_radio.h" #include "wlc_phy_int.h" #include "wlc_phy_qmath.h" #include "wlc_phy_lcn.h" #include "wlc_phytbl_lcn.h" #define PLL_2064_NDIV 90 #define PLL_2064_LOW_END_VCO 3000 #define PLL_2064_LOW_END_KVCO 27 #define PLL_2064_HIGH_END_VCO 4200 #define PLL_2064_HIGH_END_KVCO 68 #define PLL_2064_LOOP_BW_DOUBLER 200 #define PLL_2064_D30_DOUBLER 10500 #define PLL_2064_LOOP_BW 260 #define PLL_2064_D30 8000 #define PLL_2064_CAL_REF_TO 8 #define PLL_2064_MHZ 1000000 #define PLL_2064_OPEN_LOOP_DELAY 5 #define TEMPSENSE 1 #define VBATSENSE 2 #define NOISE_IF_UPD_CHK_INTERVAL 1 #define NOISE_IF_UPD_RST_INTERVAL 60 #define NOISE_IF_UPD_THRESHOLD_CNT 1 #define NOISE_IF_UPD_TRHRESHOLD 50 #define NOISE_IF_UPD_TIMEOUT 1000 #define NOISE_IF_OFF 0 #define NOISE_IF_CHK 1 #define NOISE_IF_ON 2 #define PAPD_BLANKING_PROFILE 3 #define PAPD2LUT 0 #define PAPD_CORR_NORM 0 #define PAPD_BLANKING_THRESHOLD 0 #define PAPD_STOP_AFTER_LAST_UPDATE 0 #define LCN_TARGET_PWR 60 #define LCN_VBAT_OFFSET_433X 34649679 #define LCN_VBAT_SLOPE_433X 8258032 #define LCN_VBAT_SCALE_NOM 53 #define LCN_VBAT_SCALE_DEN 432 #define LCN_TEMPSENSE_OFFSET 80812 #define LCN_TEMPSENSE_DEN 2647 #define LCNPHY_txgainctrlovrval1_pagain_ovr_val1_SHIFT \ (0 + 8) #define LCNPHY_txgainctrlovrval1_pagain_ovr_val1_MASK \ (0x7f << LCNPHY_txgainctrlovrval1_pagain_ovr_val1_SHIFT) #define LCNPHY_stxtxgainctrlovrval1_pagain_ovr_val1_SHIFT \ (0 + 8) #define LCNPHY_stxtxgainctrlovrval1_pagain_ovr_val1_MASK \ (0x7f << LCNPHY_stxtxgainctrlovrval1_pagain_ovr_val1_SHIFT) #define wlc_lcnphy_enable_tx_gain_override(pi) \ wlc_lcnphy_set_tx_gain_override(pi, true) #define wlc_lcnphy_disable_tx_gain_override(pi) \ wlc_lcnphy_set_tx_gain_override(pi, false) #define wlc_lcnphy_iqcal_active(pi) \ (read_phy_reg((pi), 0x451) & \ ((0x1 << 15) | (0x1 << 14))) #define txpwrctrl_off(pi) (0x7 != ((read_phy_reg(pi, 0x4a4) & 0xE000) >> 13)) #define wlc_lcnphy_tempsense_based_pwr_ctrl_enabled(pi) \ (pi->temppwrctrl_capable) #define wlc_lcnphy_tssi_based_pwr_ctrl_enabled(pi) \ (pi->hwpwrctrl_capable) #define SWCTRL_BT_TX 0x18 #define SWCTRL_OVR_DISABLE 0x40 #define AFE_CLK_INIT_MODE_TXRX2X 1 #define AFE_CLK_INIT_MODE_PAPD 0 #define LCNPHY_TBL_ID_IQLOCAL 0x00 #define LCNPHY_TBL_ID_RFSEQ 0x08 #define LCNPHY_TBL_ID_GAIN_IDX 0x0d #define LCNPHY_TBL_ID_SW_CTRL 0x0f #define LCNPHY_TBL_ID_GAIN_TBL 0x12 #define LCNPHY_TBL_ID_SPUR 0x14 #define LCNPHY_TBL_ID_SAMPLEPLAY 0x15 #define LCNPHY_TBL_ID_SAMPLEPLAY1 0x16 #define LCNPHY_TX_PWR_CTRL_RATE_OFFSET 832 #define LCNPHY_TX_PWR_CTRL_MAC_OFFSET 128 #define LCNPHY_TX_PWR_CTRL_GAIN_OFFSET 192 #define LCNPHY_TX_PWR_CTRL_IQ_OFFSET 320 #define LCNPHY_TX_PWR_CTRL_LO_OFFSET 448 #define LCNPHY_TX_PWR_CTRL_PWR_OFFSET 576 #define LCNPHY_TX_PWR_CTRL_START_INDEX_2G_4313 140 #define LCNPHY_TX_PWR_CTRL_START_NPT 1 #define LCNPHY_TX_PWR_CTRL_MAX_NPT 7 #define LCNPHY_NOISE_SAMPLES_DEFAULT 5000 #define LCNPHY_ACI_DETECT_START 1 #define LCNPHY_ACI_DETECT_PROGRESS 2 #define LCNPHY_ACI_DETECT_STOP 3 #define LCNPHY_ACI_CRSHIFRMLO_TRSH 100 #define LCNPHY_ACI_GLITCH_TRSH 2000 #define LCNPHY_ACI_TMOUT 250 #define LCNPHY_ACI_DETECT_TIMEOUT 2 #define LCNPHY_ACI_START_DELAY 0 #define wlc_lcnphy_tx_gain_override_enabled(pi) \ (0 != (read_phy_reg((pi), 0x43b) & (0x1 << 6))) #define wlc_lcnphy_total_tx_frames(pi) \ wlapi_bmac_read_shm((pi)->sh->physhim, M_UCODE_MACSTAT + offsetof(macstat_t, txallfrm)) typedef struct { u16 gm_gain; u16 pga_gain; u16 pad_gain; u16 dac_gain; } lcnphy_txgains_t; typedef enum { LCNPHY_CAL_FULL, LCNPHY_CAL_RECAL, LCNPHY_CAL_CURRECAL, LCNPHY_CAL_DIGCAL, LCNPHY_CAL_GCTRL } lcnphy_cal_mode_t; typedef struct { lcnphy_txgains_t gains; bool useindex; u8 index; } lcnphy_txcalgains_t; typedef struct { u8 chan; s16 a; s16 b; } lcnphy_rx_iqcomp_t; typedef struct { s16 re; s16 im; } lcnphy_spb_tone_t; typedef struct { u16 re; u16 im; } lcnphy_unsign16_struct; typedef struct { u32 iq_prod; u32 i_pwr; u32 q_pwr; } lcnphy_iq_est_t; typedef struct { u16 ptcentreTs20; u16 ptcentreFactor; } lcnphy_sfo_cfg_t; typedef enum { LCNPHY_PAPD_CAL_CW, LCNPHY_PAPD_CAL_OFDM } lcnphy_papd_cal_type_t; typedef u16 iqcal_gain_params_lcnphy[9]; static const iqcal_gain_params_lcnphy tbl_iqcal_gainparams_lcnphy_2G[] = { {0, 0, 0, 0, 0, 0, 0, 0, 0}, }; static const iqcal_gain_params_lcnphy *tbl_iqcal_gainparams_lcnphy[1] = { tbl_iqcal_gainparams_lcnphy_2G, }; static const u16 iqcal_gainparams_numgains_lcnphy[1] = { sizeof(tbl_iqcal_gainparams_lcnphy_2G) / sizeof(*tbl_iqcal_gainparams_lcnphy_2G), }; static const lcnphy_sfo_cfg_t lcnphy_sfo_cfg[] = { {965, 1087}, {967, 1085}, {969, 1082}, {971, 1080}, {973, 1078}, {975, 1076}, {977, 1073}, {979, 1071}, {981, 1069}, {983, 1067}, {985, 1065}, {987, 1063}, {989, 1060}, {994, 1055} }; static const u16 lcnphy_iqcal_loft_gainladder[] = { ((2 << 8) | 0), ((3 << 8) | 0), ((4 << 8) | 0), ((6 << 8) | 0), ((8 << 8) | 0), ((11 << 8) | 0), ((16 << 8) | 0), ((16 << 8) | 1), ((16 << 8) | 2), ((16 << 8) | 3), ((16 << 8) | 4), ((16 << 8) | 5), ((16 << 8) | 6), ((16 << 8) | 7), ((23 << 8) | 7), ((32 << 8) | 7), ((45 << 8) | 7), ((64 << 8) | 7), ((91 << 8) | 7), ((128 << 8) | 7) }; static const u16 lcnphy_iqcal_ir_gainladder[] = { ((1 << 8) | 0), ((2 << 8) | 0), ((4 << 8) | 0), ((6 << 8) | 0), ((8 << 8) | 0), ((11 << 8) | 0), ((16 << 8) | 0), ((23 << 8) | 0), ((32 << 8) | 0), ((45 << 8) | 0), ((64 << 8) | 0), ((64 << 8) | 1), ((64 << 8) | 2), ((64 << 8) | 3), ((64 << 8) | 4), ((64 << 8) | 5), ((64 << 8) | 6), ((64 << 8) | 7), ((91 << 8) | 7), ((128 << 8) | 7) }; static const lcnphy_spb_tone_t lcnphy_spb_tone_3750[] = { {88, 0}, {73, 49}, {34, 81}, {-17, 86}, {-62, 62}, {-86, 17}, {-81, -34}, {-49, -73}, {0, -88}, {49, -73}, {81, -34}, {86, 17}, {62, 62}, {17, 86}, {-34, 81}, {-73, 49}, {-88, 0}, {-73, -49}, {-34, -81}, {17, -86}, {62, -62}, {86, -17}, {81, 34}, {49, 73}, {0, 88}, {-49, 73}, {-81, 34}, {-86, -17}, {-62, -62}, {-17, -86}, {34, -81}, {73, -49}, }; static const u16 iqlo_loopback_rf_regs[20] = { RADIO_2064_REG036, RADIO_2064_REG11A, RADIO_2064_REG03A, RADIO_2064_REG025, RADIO_2064_REG028, RADIO_2064_REG005, RADIO_2064_REG112, RADIO_2064_REG0FF, RADIO_2064_REG11F, RADIO_2064_REG00B, RADIO_2064_REG113, RADIO_2064_REG007, RADIO_2064_REG0FC, RADIO_2064_REG0FD, RADIO_2064_REG012, RADIO_2064_REG057, RADIO_2064_REG059, RADIO_2064_REG05C, RADIO_2064_REG078, RADIO_2064_REG092, }; static const u16 tempsense_phy_regs[14] = { 0x503, 0x4a4, 0x4d0, 0x4d9, 0x4da, 0x4a6, 0x938, 0x939, 0x4d8, 0x4d0, 0x4d7, 0x4a5, 0x40d, 0x4a2, }; static const u16 rxiq_cal_rf_reg[11] = { RADIO_2064_REG098, RADIO_2064_REG116, RADIO_2064_REG12C, RADIO_2064_REG06A, RADIO_2064_REG00B, RADIO_2064_REG01B, RADIO_2064_REG113, RADIO_2064_REG01D, RADIO_2064_REG114, RADIO_2064_REG02E, RADIO_2064_REG12A, }; static const lcnphy_rx_iqcomp_t lcnphy_rx_iqcomp_table_rev0[] = { {1, 0, 0}, {2, 0, 0}, {3, 0, 0}, {4, 0, 0}, {5, 0, 0}, {6, 0, 0}, {7, 0, 0}, {8, 0, 0}, {9, 0, 0}, {10, 0, 0}, {11, 0, 0}, {12, 0, 0}, {13, 0, 0}, {14, 0, 0}, {34, 0, 0}, {38, 0, 0}, {42, 0, 0}, {46, 0, 0}, {36, 0, 0}, {40, 0, 0}, {44, 0, 0}, {48, 0, 0}, {52, 0, 0}, {56, 0, 0}, {60, 0, 0}, {64, 0, 0}, {100, 0, 0}, {104, 0, 0}, {108, 0, 0}, {112, 0, 0}, {116, 0, 0}, {120, 0, 0}, {124, 0, 0}, {128, 0, 0}, {132, 0, 0}, {136, 0, 0}, {140, 0, 0}, {149, 0, 0}, {153, 0, 0}, {157, 0, 0}, {161, 0, 0}, {165, 0, 0}, {184, 0, 0}, {188, 0, 0}, {192, 0, 0}, {196, 0, 0}, {200, 0, 0}, {204, 0, 0}, {208, 0, 0}, {212, 0, 0}, {216, 0, 0}, }; static const u32 lcnphy_23bitgaincode_table[] = { 0x200100, 0x200200, 0x200004, 0x200014, 0x200024, 0x200034, 0x200134, 0x200234, 0x200334, 0x200434, 0x200037, 0x200137, 0x200237, 0x200337, 0x200437, 0x000035, 0x000135, 0x000235, 0x000037, 0x000137, 0x000237, 0x000337, 0x00013f, 0x00023f, 0x00033f, 0x00034f, 0x00044f, 0x00144f, 0x00244f, 0x00254f, 0x00354f, 0x00454f, 0x00464f, 0x01464f, 0x02464f, 0x03464f, 0x04464f, }; static const s8 lcnphy_gain_table[] = { -16, -13, 10, 7, 4, 0, 3, 6, 9, 12, 15, 18, 21, 24, 27, 30, 33, 36, 39, 42, 45, 48, 50, 53, 56, 59, 62, 65, 68, 71, 74, 77, 80, 83, 86, 89, 92, }; static const s8 lcnphy_gain_index_offset_for_rssi[] = { 7, 7, 7, 7, 7, 7, 7, 8, 7, 7, 6, 7, 7, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 4, 2, 2, 2, 2, 2, 2, -1, -2, -2, -2 }; extern const u8 spur_tbl_rev0[]; extern const u32 dot11lcnphytbl_rx_gain_info_sz_rev1; extern const dot11lcnphytbl_info_t dot11lcnphytbl_rx_gain_info_rev1[]; extern const dot11lcnphytbl_info_t dot11lcn_sw_ctrl_tbl_info_4313_bt_epa; extern const dot11lcnphytbl_info_t dot11lcn_sw_ctrl_tbl_info_4313_bt_epa_p250; typedef struct _chan_info_2064_lcnphy { uint chan; uint freq; u8 logen_buftune; u8 logen_rccr_tx; u8 txrf_mix_tune_ctrl; u8 pa_input_tune_g; u8 logen_rccr_rx; u8 pa_rxrf_lna1_freq_tune; u8 pa_rxrf_lna2_freq_tune; u8 rxrf_rxrf_spare1; } chan_info_2064_lcnphy_t; static chan_info_2064_lcnphy_t chan_info_2064_lcnphy[] = { {1, 2412, 0x0B, 0x0A, 0x00, 0x07, 0x0A, 0x88, 0x88, 0x80}, {2, 2417, 0x0B, 0x0A, 0x00, 0x07, 0x0A, 0x88, 0x88, 0x80}, {3, 2422, 0x0B, 0x0A, 0x00, 0x07, 0x0A, 0x88, 0x88, 0x80}, {4, 2427, 0x0B, 0x0A, 0x00, 0x07, 0x0A, 0x88, 0x88, 0x80}, {5, 2432, 0x0B, 0x0A, 0x00, 0x07, 0x0A, 0x88, 0x88, 0x80}, {6, 2437, 0x0B, 0x0A, 0x00, 0x07, 0x0A, 0x88, 0x88, 0x80}, {7, 2442, 0x0B, 0x0A, 0x00, 0x07, 0x0A, 0x88, 0x88, 0x80}, {8, 2447, 0x0B, 0x0A, 0x00, 0x07, 0x0A, 0x88, 0x88, 0x80}, {9, 2452, 0x0B, 0x0A, 0x00, 0x07, 0x0A, 0x88, 0x88, 0x80}, {10, 2457, 0x0B, 0x0A, 0x00, 0x07, 0x0A, 0x88, 0x88, 0x80}, {11, 2462, 0x0B, 0x0A, 0x00, 0x07, 0x0A, 0x88, 0x88, 0x80}, {12, 2467, 0x0B, 0x0A, 0x00, 0x07, 0x0A, 0x88, 0x88, 0x80}, {13, 2472, 0x0B, 0x0A, 0x00, 0x07, 0x0A, 0x88, 0x88, 0x80}, {14, 2484, 0x0B, 0x0A, 0x00, 0x07, 0x0A, 0x88, 0x88, 0x80}, }; lcnphy_radio_regs_t lcnphy_radio_regs_2064[] = { {0x00, 0, 0, 0, 0}, {0x01, 0x64, 0x64, 0, 0}, {0x02, 0x20, 0x20, 0, 0}, {0x03, 0x66, 0x66, 0, 0}, {0x04, 0xf8, 0xf8, 0, 0}, {0x05, 0, 0, 0, 0}, {0x06, 0x10, 0x10, 0, 0}, {0x07, 0, 0, 0, 0}, {0x08, 0, 0, 0, 0}, {0x09, 0, 0, 0, 0}, {0x0A, 0x37, 0x37, 0, 0}, {0x0B, 0x6, 0x6, 0, 0}, {0x0C, 0x55, 0x55, 0, 0}, {0x0D, 0x8b, 0x8b, 0, 0}, {0x0E, 0, 0, 0, 0}, {0x0F, 0x5, 0x5, 0, 0}, {0x10, 0, 0, 0, 0}, {0x11, 0xe, 0xe, 0, 0}, {0x12, 0, 0, 0, 0}, {0x13, 0xb, 0xb, 0, 0}, {0x14, 0x2, 0x2, 0, 0}, {0x15, 0x12, 0x12, 0, 0}, {0x16, 0x12, 0x12, 0, 0}, {0x17, 0xc, 0xc, 0, 0}, {0x18, 0xc, 0xc, 0, 0}, {0x19, 0xc, 0xc, 0, 0}, {0x1A, 0x8, 0x8, 0, 0}, {0x1B, 0x2, 0x2, 0, 0}, {0x1C, 0, 0, 0, 0}, {0x1D, 0x1, 0x1, 0, 0}, {0x1E, 0x12, 0x12, 0, 0}, {0x1F, 0x6e, 0x6e, 0, 0}, {0x20, 0x2, 0x2, 0, 0}, {0x21, 0x23, 0x23, 0, 0}, {0x22, 0x8, 0x8, 0, 0}, {0x23, 0, 0, 0, 0}, {0x24, 0, 0, 0, 0}, {0x25, 0xc, 0xc, 0, 0}, {0x26, 0x33, 0x33, 0, 0}, {0x27, 0x55, 0x55, 0, 0}, {0x28, 0, 0, 0, 0}, {0x29, 0x30, 0x30, 0, 0}, {0x2A, 0xb, 0xb, 0, 0}, {0x2B, 0x1b, 0x1b, 0, 0}, {0x2C, 0x3, 0x3, 0, 0}, {0x2D, 0x1b, 0x1b, 0, 0}, {0x2E, 0, 0, 0, 0}, {0x2F, 0x20, 0x20, 0, 0}, {0x30, 0xa, 0xa, 0, 0}, {0x31, 0, 0, 0, 0}, {0x32, 0x62, 0x62, 0, 0}, {0x33, 0x19, 0x19, 0, 0}, {0x34, 0x33, 0x33, 0, 0}, {0x35, 0x77, 0x77, 0, 0}, {0x36, 0, 0, 0, 0}, {0x37, 0x70, 0x70, 0, 0}, {0x38, 0x3, 0x3, 0, 0}, {0x39, 0xf, 0xf, 0, 0}, {0x3A, 0x6, 0x6, 0, 0}, {0x3B, 0xcf, 0xcf, 0, 0}, {0x3C, 0x1a, 0x1a, 0, 0}, {0x3D, 0x6, 0x6, 0, 0}, {0x3E, 0x42, 0x42, 0, 0}, {0x3F, 0, 0, 0, 0}, {0x40, 0xfb, 0xfb, 0, 0}, {0x41, 0x9a, 0x9a, 0, 0}, {0x42, 0x7a, 0x7a, 0, 0}, {0x43, 0x29, 0x29, 0, 0}, {0x44, 0, 0, 0, 0}, {0x45, 0x8, 0x8, 0, 0}, {0x46, 0xce, 0xce, 0, 0}, {0x47, 0x27, 0x27, 0, 0}, {0x48, 0x62, 0x62, 0, 0}, {0x49, 0x6, 0x6, 0, 0}, {0x4A, 0x58, 0x58, 0, 0}, {0x4B, 0xf7, 0xf7, 0, 0}, {0x4C, 0, 0, 0, 0}, {0x4D, 0xb3, 0xb3, 0, 0}, {0x4E, 0, 0, 0, 0}, {0x4F, 0x2, 0x2, 0, 0}, {0x50, 0, 0, 0, 0}, {0x51, 0x9, 0x9, 0, 0}, {0x52, 0x5, 0x5, 0, 0}, {0x53, 0x17, 0x17, 0, 0}, {0x54, 0x38, 0x38, 0, 0}, {0x55, 0, 0, 0, 0}, {0x56, 0, 0, 0, 0}, {0x57, 0xb, 0xb, 0, 0}, {0x58, 0, 0, 0, 0}, {0x59, 0, 0, 0, 0}, {0x5A, 0, 0, 0, 0}, {0x5B, 0, 0, 0, 0}, {0x5C, 0, 0, 0, 0}, {0x5D, 0, 0, 0, 0}, {0x5E, 0x88, 0x88, 0, 0}, {0x5F, 0xcc, 0xcc, 0, 0}, {0x60, 0x74, 0x74, 0, 0}, {0x61, 0x74, 0x74, 0, 0}, {0x62, 0x74, 0x74, 0, 0}, {0x63, 0x44, 0x44, 0, 0}, {0x64, 0x77, 0x77, 0, 0}, {0x65, 0x44, 0x44, 0, 0}, {0x66, 0x77, 0x77, 0, 0}, {0x67, 0x55, 0x55, 0, 0}, {0x68, 0x77, 0x77, 0, 0}, {0x69, 0x77, 0x77, 0, 0}, {0x6A, 0, 0, 0, 0}, {0x6B, 0x7f, 0x7f, 0, 0}, {0x6C, 0x8, 0x8, 0, 0}, {0x6D, 0, 0, 0, 0}, {0x6E, 0x88, 0x88, 0, 0}, {0x6F, 0x66, 0x66, 0, 0}, {0x70, 0x66, 0x66, 0, 0}, {0x71, 0x28, 0x28, 0, 0}, {0x72, 0x55, 0x55, 0, 0}, {0x73, 0x4, 0x4, 0, 0}, {0x74, 0, 0, 0, 0}, {0x75, 0, 0, 0, 0}, {0x76, 0, 0, 0, 0}, {0x77, 0x1, 0x1, 0, 0}, {0x78, 0xd6, 0xd6, 0, 0}, {0x79, 0, 0, 0, 0}, {0x7A, 0, 0, 0, 0}, {0x7B, 0, 0, 0, 0}, {0x7C, 0, 0, 0, 0}, {0x7D, 0, 0, 0, 0}, {0x7E, 0, 0, 0, 0}, {0x7F, 0, 0, 0, 0}, {0x80, 0, 0, 0, 0}, {0x81, 0, 0, 0, 0}, {0x82, 0, 0, 0, 0}, {0x83, 0xb4, 0xb4, 0, 0}, {0x84, 0x1, 0x1, 0, 0}, {0x85, 0x20, 0x20, 0, 0}, {0x86, 0x5, 0x5, 0, 0}, {0x87, 0xff, 0xff, 0, 0}, {0x88, 0x7, 0x7, 0, 0}, {0x89, 0x77, 0x77, 0, 0}, {0x8A, 0x77, 0x77, 0, 0}, {0x8B, 0x77, 0x77, 0, 0}, {0x8C, 0x77, 0x77, 0, 0}, {0x8D, 0x8, 0x8, 0, 0}, {0x8E, 0xa, 0xa, 0, 0}, {0x8F, 0x8, 0x8, 0, 0}, {0x90, 0x18, 0x18, 0, 0}, {0x91, 0x5, 0x5, 0, 0}, {0x92, 0x1f, 0x1f, 0, 0}, {0x93, 0x10, 0x10, 0, 0}, {0x94, 0x3, 0x3, 0, 0}, {0x95, 0, 0, 0, 0}, {0x96, 0, 0, 0, 0}, {0x97, 0xaa, 0xaa, 0, 0}, {0x98, 0, 0, 0, 0}, {0x99, 0x23, 0x23, 0, 0}, {0x9A, 0x7, 0x7, 0, 0}, {0x9B, 0xf, 0xf, 0, 0}, {0x9C, 0x10, 0x10, 0, 0}, {0x9D, 0x3, 0x3, 0, 0}, {0x9E, 0x4, 0x4, 0, 0}, {0x9F, 0x20, 0x20, 0, 0}, {0xA0, 0, 0, 0, 0}, {0xA1, 0, 0, 0, 0}, {0xA2, 0, 0, 0, 0}, {0xA3, 0, 0, 0, 0}, {0xA4, 0x1, 0x1, 0, 0}, {0xA5, 0x77, 0x77, 0, 0}, {0xA6, 0x77, 0x77, 0, 0}, {0xA7, 0x77, 0x77, 0, 0}, {0xA8, 0x77, 0x77, 0, 0}, {0xA9, 0x8c, 0x8c, 0, 0}, {0xAA, 0x88, 0x88, 0, 0}, {0xAB, 0x78, 0x78, 0, 0}, {0xAC, 0x57, 0x57, 0, 0}, {0xAD, 0x88, 0x88, 0, 0}, {0xAE, 0, 0, 0, 0}, {0xAF, 0x8, 0x8, 0, 0}, {0xB0, 0x88, 0x88, 0, 0}, {0xB1, 0, 0, 0, 0}, {0xB2, 0x1b, 0x1b, 0, 0}, {0xB3, 0x3, 0x3, 0, 0}, {0xB4, 0x24, 0x24, 0, 0}, {0xB5, 0x3, 0x3, 0, 0}, {0xB6, 0x1b, 0x1b, 0, 0}, {0xB7, 0x24, 0x24, 0, 0}, {0xB8, 0x3, 0x3, 0, 0}, {0xB9, 0, 0, 0, 0}, {0xBA, 0xaa, 0xaa, 0, 0}, {0xBB, 0, 0, 0, 0}, {0xBC, 0x4, 0x4, 0, 0}, {0xBD, 0, 0, 0, 0}, {0xBE, 0x8, 0x8, 0, 0}, {0xBF, 0x11, 0x11, 0, 0}, {0xC0, 0, 0, 0, 0}, {0xC1, 0, 0, 0, 0}, {0xC2, 0x62, 0x62, 0, 0}, {0xC3, 0x1e, 0x1e, 0, 0}, {0xC4, 0x33, 0x33, 0, 0}, {0xC5, 0x37, 0x37, 0, 0}, {0xC6, 0, 0, 0, 0}, {0xC7, 0x70, 0x70, 0, 0}, {0xC8, 0x1e, 0x1e, 0, 0}, {0xC9, 0x6, 0x6, 0, 0}, {0xCA, 0x4, 0x4, 0, 0}, {0xCB, 0x2f, 0x2f, 0, 0}, {0xCC, 0xf, 0xf, 0, 0}, {0xCD, 0, 0, 0, 0}, {0xCE, 0xff, 0xff, 0, 0}, {0xCF, 0x8, 0x8, 0, 0}, {0xD0, 0x3f, 0x3f, 0, 0}, {0xD1, 0x3f, 0x3f, 0, 0}, {0xD2, 0x3f, 0x3f, 0, 0}, {0xD3, 0, 0, 0, 0}, {0xD4, 0, 0, 0, 0}, {0xD5, 0, 0, 0, 0}, {0xD6, 0xcc, 0xcc, 0, 0}, {0xD7, 0, 0, 0, 0}, {0xD8, 0x8, 0x8, 0, 0}, {0xD9, 0x8, 0x8, 0, 0}, {0xDA, 0x8, 0x8, 0, 0}, {0xDB, 0x11, 0x11, 0, 0}, {0xDC, 0, 0, 0, 0}, {0xDD, 0x87, 0x87, 0, 0}, {0xDE, 0x88, 0x88, 0, 0}, {0xDF, 0x8, 0x8, 0, 0}, {0xE0, 0x8, 0x8, 0, 0}, {0xE1, 0x8, 0x8, 0, 0}, {0xE2, 0, 0, 0, 0}, {0xE3, 0, 0, 0, 0}, {0xE4, 0, 0, 0, 0}, {0xE5, 0xf5, 0xf5, 0, 0}, {0xE6, 0x30, 0x30, 0, 0}, {0xE7, 0x1, 0x1, 0, 0}, {0xE8, 0, 0, 0, 0}, {0xE9, 0xff, 0xff, 0, 0}, {0xEA, 0, 0, 0, 0}, {0xEB, 0, 0, 0, 0}, {0xEC, 0x22, 0x22, 0, 0}, {0xED, 0, 0, 0, 0}, {0xEE, 0, 0, 0, 0}, {0xEF, 0, 0, 0, 0}, {0xF0, 0x3, 0x3, 0, 0}, {0xF1, 0x1, 0x1, 0, 0}, {0xF2, 0, 0, 0, 0}, {0xF3, 0, 0, 0, 0}, {0xF4, 0, 0, 0, 0}, {0xF5, 0, 0, 0, 0}, {0xF6, 0, 0, 0, 0}, {0xF7, 0x6, 0x6, 0, 0}, {0xF8, 0, 0, 0, 0}, {0xF9, 0, 0, 0, 0}, {0xFA, 0x40, 0x40, 0, 0}, {0xFB, 0, 0, 0, 0}, {0xFC, 0x1, 0x1, 0, 0}, {0xFD, 0x80, 0x80, 0, 0}, {0xFE, 0x2, 0x2, 0, 0}, {0xFF, 0x10, 0x10, 0, 0}, {0x100, 0x2, 0x2, 0, 0}, {0x101, 0x1e, 0x1e, 0, 0}, {0x102, 0x1e, 0x1e, 0, 0}, {0x103, 0, 0, 0, 0}, {0x104, 0x1f, 0x1f, 0, 0}, {0x105, 0, 0x8, 0, 1}, {0x106, 0x2a, 0x2a, 0, 0}, {0x107, 0xf, 0xf, 0, 0}, {0x108, 0, 0, 0, 0}, {0x109, 0, 0, 0, 0}, {0x10A, 0, 0, 0, 0}, {0x10B, 0, 0, 0, 0}, {0x10C, 0, 0, 0, 0}, {0x10D, 0, 0, 0, 0}, {0x10E, 0, 0, 0, 0}, {0x10F, 0, 0, 0, 0}, {0x110, 0, 0, 0, 0}, {0x111, 0, 0, 0, 0}, {0x112, 0, 0, 0, 0}, {0x113, 0, 0, 0, 0}, {0x114, 0, 0, 0, 0}, {0x115, 0, 0, 0, 0}, {0x116, 0, 0, 0, 0}, {0x117, 0, 0, 0, 0}, {0x118, 0, 0, 0, 0}, {0x119, 0, 0, 0, 0}, {0x11A, 0, 0, 0, 0}, {0x11B, 0, 0, 0, 0}, {0x11C, 0x1, 0x1, 0, 0}, {0x11D, 0, 0, 0, 0}, {0x11E, 0, 0, 0, 0}, {0x11F, 0, 0, 0, 0}, {0x120, 0, 0, 0, 0}, {0x121, 0, 0, 0, 0}, {0x122, 0x80, 0x80, 0, 0}, {0x123, 0, 0, 0, 0}, {0x124, 0xf8, 0xf8, 0, 0}, {0x125, 0, 0, 0, 0}, {0x126, 0, 0, 0, 0}, {0x127, 0, 0, 0, 0}, {0x128, 0, 0, 0, 0}, {0x129, 0, 0, 0, 0}, {0x12A, 0, 0, 0, 0}, {0x12B, 0, 0, 0, 0}, {0x12C, 0, 0, 0, 0}, {0x12D, 0, 0, 0, 0}, {0x12E, 0, 0, 0, 0}, {0x12F, 0, 0, 0, 0}, {0x130, 0, 0, 0, 0}, {0xFFFF, 0, 0, 0, 0} }; #define LCNPHY_NUM_DIG_FILT_COEFFS 16 #define LCNPHY_NUM_TX_DIG_FILTERS_CCK 13 u16 LCNPHY_txdigfiltcoeffs_cck[LCNPHY_NUM_TX_DIG_FILTERS_CCK] [LCNPHY_NUM_DIG_FILT_COEFFS + 1] = { {0, 1, 415, 1874, 64, 128, 64, 792, 1656, 64, 128, 64, 778, 1582, 64, 128, 64,}, {1, 1, 402, 1847, 259, 59, 259, 671, 1794, 68, 54, 68, 608, 1863, 93, 167, 93,}, {2, 1, 415, 1874, 64, 128, 64, 792, 1656, 192, 384, 192, 778, 1582, 64, 128, 64,}, {3, 1, 302, 1841, 129, 258, 129, 658, 1720, 205, 410, 205, 754, 1760, 170, 340, 170,}, {20, 1, 360, 1884, 242, 1734, 242, 752, 1720, 205, 1845, 205, 767, 1760, 256, 185, 256,}, {21, 1, 360, 1884, 149, 1874, 149, 752, 1720, 205, 1883, 205, 767, 1760, 256, 273, 256,}, {22, 1, 360, 1884, 98, 1948, 98, 752, 1720, 205, 1924, 205, 767, 1760, 256, 352, 256,}, {23, 1, 350, 1884, 116, 1966, 116, 752, 1720, 205, 2008, 205, 767, 1760, 128, 233, 128,}, {24, 1, 325, 1884, 32, 40, 32, 756, 1720, 256, 471, 256, 766, 1760, 256, 1881, 256,}, {25, 1, 299, 1884, 51, 64, 51, 736, 1720, 256, 471, 256, 765, 1760, 256, 1881, 256,}, {26, 1, 277, 1943, 39, 117, 88, 637, 1838, 64, 192, 144, 614, 1864, 128, 384, 288,}, {27, 1, 245, 1943, 49, 147, 110, 626, 1838, 256, 768, 576, 613, 1864, 128, 384, 288,}, {30, 1, 302, 1841, 61, 122, 61, 658, 1720, 205, 410, 205, 754, 1760, 170, 340, 170,}, }; #define LCNPHY_NUM_TX_DIG_FILTERS_OFDM 3 u16 LCNPHY_txdigfiltcoeffs_ofdm[LCNPHY_NUM_TX_DIG_FILTERS_OFDM] [LCNPHY_NUM_DIG_FILT_COEFFS + 1] = { {0, 0, 0xa2, 0x0, 0x100, 0x100, 0x0, 0x0, 0x0, 0x100, 0x0, 0x0, 0x278, 0xfea0, 0x80, 0x100, 0x80,}, {1, 0, 374, 0xFF79, 16, 32, 16, 799, 0xFE74, 50, 32, 50, 750, 0xFE2B, 212, 0xFFCE, 212,}, {2, 0, 375, 0xFF16, 37, 76, 37, 799, 0xFE74, 32, 20, 32, 748, 0xFEF2, 128, 0xFFE2, 128} }; #define wlc_lcnphy_set_start_tx_pwr_idx(pi, idx) \ mod_phy_reg(pi, 0x4a4, \ (0x1ff << 0), \ (u16)(idx) << 0) #define wlc_lcnphy_set_tx_pwr_npt(pi, npt) \ mod_phy_reg(pi, 0x4a5, \ (0x7 << 8), \ (u16)(npt) << 8) #define wlc_lcnphy_get_tx_pwr_ctrl(pi) \ (read_phy_reg((pi), 0x4a4) & \ ((0x1 << 15) | \ (0x1 << 14) | \ (0x1 << 13))) #define wlc_lcnphy_get_tx_pwr_npt(pi) \ ((read_phy_reg(pi, 0x4a5) & \ (0x7 << 8)) >> \ 8) #define wlc_lcnphy_get_current_tx_pwr_idx_if_pwrctrl_on(pi) \ (read_phy_reg(pi, 0x473) & 0x1ff) #define wlc_lcnphy_get_target_tx_pwr(pi) \ ((read_phy_reg(pi, 0x4a7) & \ (0xff << 0)) >> \ 0) #define wlc_lcnphy_set_target_tx_pwr(pi, target) \ mod_phy_reg(pi, 0x4a7, \ (0xff << 0), \ (u16)(target) << 0) #define wlc_radio_2064_rcal_done(pi) (0 != (read_radio_reg(pi, RADIO_2064_REG05C) & 0x20)) #define tempsense_done(pi) (0x8000 == (read_phy_reg(pi, 0x476) & 0x8000)) #define LCNPHY_IQLOCC_READ(val) ((u8)(-(s8)(((val) & 0xf0) >> 4) + (s8)((val) & 0x0f))) #define FIXED_TXPWR 78 #define LCNPHY_TEMPSENSE(val) ((s16)((val > 255) ? (val - 512) : val)) static u32 wlc_lcnphy_qdiv_roundup(u32 divident, u32 divisor, u8 precision); static void wlc_lcnphy_set_rx_gain_by_distribution(phy_info_t *pi, u16 ext_lna, u16 trsw, u16 biq2, u16 biq1, u16 tia, u16 lna2, u16 lna1); static void wlc_lcnphy_clear_tx_power_offsets(phy_info_t *pi); static void wlc_lcnphy_set_pa_gain(phy_info_t *pi, u16 gain); static void wlc_lcnphy_set_trsw_override(phy_info_t *pi, bool tx, bool rx); static void wlc_lcnphy_set_bbmult(phy_info_t *pi, u8 m0); static u8 wlc_lcnphy_get_bbmult(phy_info_t *pi); static void wlc_lcnphy_get_tx_gain(phy_info_t *pi, lcnphy_txgains_t *gains); static void wlc_lcnphy_set_tx_gain_override(phy_info_t *pi, bool bEnable); static void wlc_lcnphy_toggle_afe_pwdn(phy_info_t *pi); static void wlc_lcnphy_rx_gain_override_enable(phy_info_t *pi, bool enable); static void wlc_lcnphy_set_tx_gain(phy_info_t *pi, lcnphy_txgains_t *target_gains); static bool wlc_lcnphy_rx_iq_est(phy_info_t *pi, u16 num_samps, u8 wait_time, lcnphy_iq_est_t *iq_est); static bool wlc_lcnphy_calc_rx_iq_comp(phy_info_t *pi, u16 num_samps); static u16 wlc_lcnphy_get_pa_gain(phy_info_t *pi); static void wlc_lcnphy_afe_clk_init(phy_info_t *pi, u8 mode); extern void wlc_lcnphy_tx_pwr_ctrl_init(wlc_phy_t *ppi); static void wlc_lcnphy_radio_2064_channel_tune_4313(phy_info_t *pi, u8 channel); static void wlc_lcnphy_load_tx_gain_table(phy_info_t *pi, const lcnphy_tx_gain_tbl_entry *g); static void wlc_lcnphy_samp_cap(phy_info_t *pi, int clip_detect_algo, u16 thresh, s16 *ptr, int mode); static int wlc_lcnphy_calc_floor(s16 coeff, int type); static void wlc_lcnphy_tx_iqlo_loopback(phy_info_t *pi, u16 *values_to_save); static void wlc_lcnphy_tx_iqlo_loopback_cleanup(phy_info_t *pi, u16 *values_to_save); static void wlc_lcnphy_set_cc(phy_info_t *pi, int cal_type, s16 coeff_x, s16 coeff_y); static lcnphy_unsign16_struct wlc_lcnphy_get_cc(phy_info_t *pi, int cal_type); static void wlc_lcnphy_a1(phy_info_t *pi, int cal_type, int num_levels, int step_size_lg2); static void wlc_lcnphy_tx_iqlo_soft_cal_full(phy_info_t *pi); static void wlc_lcnphy_set_chanspec_tweaks(phy_info_t *pi, chanspec_t chanspec); static void wlc_lcnphy_agc_temp_init(phy_info_t *pi); static void wlc_lcnphy_temp_adj(phy_info_t *pi); static void wlc_lcnphy_clear_papd_comptable(phy_info_t *pi); static void wlc_lcnphy_baseband_init(phy_info_t *pi); static void wlc_lcnphy_radio_init(phy_info_t *pi); static void wlc_lcnphy_rc_cal(phy_info_t *pi); static void wlc_lcnphy_rcal(phy_info_t *pi); static void wlc_lcnphy_txrx_spur_avoidance_mode(phy_info_t *pi, bool enable); static int wlc_lcnphy_load_tx_iir_filter(phy_info_t *pi, bool is_ofdm, s16 filt_type); static void wlc_lcnphy_set_rx_iq_comp(phy_info_t *pi, u16 a, u16 b); void wlc_lcnphy_write_table(phy_info_t *pi, const phytbl_info_t *pti) { wlc_phy_write_table(pi, pti, 0x455, 0x457, 0x456); } void wlc_lcnphy_read_table(phy_info_t *pi, phytbl_info_t *pti) { wlc_phy_read_table(pi, pti, 0x455, 0x457, 0x456); } static void wlc_lcnphy_common_read_table(phy_info_t *pi, u32 tbl_id, const void *tbl_ptr, u32 tbl_len, u32 tbl_width, u32 tbl_offset) { phytbl_info_t tab; tab.tbl_id = tbl_id; tab.tbl_ptr = tbl_ptr; tab.tbl_len = tbl_len; tab.tbl_width = tbl_width; tab.tbl_offset = tbl_offset; wlc_lcnphy_read_table(pi, &tab); } static void wlc_lcnphy_common_write_table(phy_info_t *pi, u32 tbl_id, const void *tbl_ptr, u32 tbl_len, u32 tbl_width, u32 tbl_offset) { phytbl_info_t tab; tab.tbl_id = tbl_id; tab.tbl_ptr = tbl_ptr; tab.tbl_len = tbl_len; tab.tbl_width = tbl_width; tab.tbl_offset = tbl_offset; wlc_lcnphy_write_table(pi, &tab); } static u32 wlc_lcnphy_qdiv_roundup(u32 dividend, u32 divisor, u8 precision) { u32 quotient, remainder, roundup, rbit; quotient = dividend / divisor; remainder = dividend % divisor; rbit = divisor & 1; roundup = (divisor >> 1) + rbit; while (precision--) { quotient <<= 1; if (remainder >= roundup) { quotient++; remainder = ((remainder - roundup) << 1) + rbit; } else { remainder <<= 1; } } if (remainder >= roundup) quotient++; return quotient; } static int wlc_lcnphy_calc_floor(s16 coeff_x, int type) { int k; k = 0; if (type == 0) { if (coeff_x < 0) { k = (coeff_x - 1) / 2; } else { k = coeff_x / 2; } } if (type == 1) { if ((coeff_x + 1) < 0) k = (coeff_x) / 2; else k = (coeff_x + 1) / 2; } return k; } s8 wlc_lcnphy_get_current_tx_pwr_idx(phy_info_t *pi) { s8 index; phy_info_lcnphy_t *pi_lcn = pi->u.pi_lcnphy; if (txpwrctrl_off(pi)) index = pi_lcn->lcnphy_current_index; else if (wlc_lcnphy_tssi_based_pwr_ctrl_enabled(pi)) index = (s8) (wlc_lcnphy_get_current_tx_pwr_idx_if_pwrctrl_on(pi) / 2); else index = pi_lcn->lcnphy_current_index; return index; } static u32 wlc_lcnphy_measure_digital_power(phy_info_t *pi, u16 nsamples) { lcnphy_iq_est_t iq_est = { 0, 0, 0 }; if (!wlc_lcnphy_rx_iq_est(pi, nsamples, 32, &iq_est)) return 0; return (iq_est.i_pwr + iq_est.q_pwr) / nsamples; } void wlc_lcnphy_crsuprs(phy_info_t *pi, int channel) { u16 afectrlovr, afectrlovrval; afectrlovr = read_phy_reg(pi, 0x43b); afectrlovrval = read_phy_reg(pi, 0x43c); if (channel != 0) { mod_phy_reg(pi, 0x43b, (0x1 << 1), (1) << 1); mod_phy_reg(pi, 0x43c, (0x1 << 1), (0) << 1); mod_phy_reg(pi, 0x43b, (0x1 << 4), (1) << 4); mod_phy_reg(pi, 0x43c, (0x1 << 6), (0) << 6); write_phy_reg(pi, 0x44b, 0xffff); wlc_lcnphy_tx_pu(pi, 1); mod_phy_reg(pi, 0x634, (0xff << 8), (0) << 8); or_phy_reg(pi, 0x6da, 0x0080); or_phy_reg(pi, 0x00a, 0x228); } else { and_phy_reg(pi, 0x00a, ~(0x228)); and_phy_reg(pi, 0x6da, 0xFF7F); write_phy_reg(pi, 0x43b, afectrlovr); write_phy_reg(pi, 0x43c, afectrlovrval); } } static void wlc_lcnphy_toggle_afe_pwdn(phy_info_t *pi) { u16 save_AfeCtrlOvrVal, save_AfeCtrlOvr; save_AfeCtrlOvrVal = read_phy_reg(pi, 0x43c); save_AfeCtrlOvr = read_phy_reg(pi, 0x43b); write_phy_reg(pi, 0x43c, save_AfeCtrlOvrVal | 0x1); write_phy_reg(pi, 0x43b, save_AfeCtrlOvr | 0x1); write_phy_reg(pi, 0x43c, save_AfeCtrlOvrVal & 0xfffe); write_phy_reg(pi, 0x43b, save_AfeCtrlOvr & 0xfffe); write_phy_reg(pi, 0x43c, save_AfeCtrlOvrVal); write_phy_reg(pi, 0x43b, save_AfeCtrlOvr); } static void wlc_lcnphy_txrx_spur_avoidance_mode(phy_info_t *pi, bool enable) { if (enable) { write_phy_reg(pi, 0x942, 0x7); write_phy_reg(pi, 0x93b, ((1 << 13) + 23)); write_phy_reg(pi, 0x93c, ((1 << 13) + 1989)); write_phy_reg(pi, 0x44a, 0x084); write_phy_reg(pi, 0x44a, 0x080); write_phy_reg(pi, 0x6d3, 0x2222); write_phy_reg(pi, 0x6d3, 0x2220); } else { write_phy_reg(pi, 0x942, 0x0); write_phy_reg(pi, 0x93b, ((0 << 13) + 23)); write_phy_reg(pi, 0x93c, ((0 << 13) + 1989)); } wlapi_switch_macfreq(pi->sh->physhim, enable); } void wlc_phy_chanspec_set_lcnphy(phy_info_t *pi, chanspec_t chanspec) { u8 channel = CHSPEC_CHANNEL(chanspec); wlc_phy_chanspec_radio_set((wlc_phy_t *) pi, chanspec); wlc_lcnphy_set_chanspec_tweaks(pi, pi->radio_chanspec); or_phy_reg(pi, 0x44a, 0x44); write_phy_reg(pi, 0x44a, 0x80); if (!NORADIO_ENAB(pi->pubpi)) { wlc_lcnphy_radio_2064_channel_tune_4313(pi, channel); udelay(1000); } wlc_lcnphy_toggle_afe_pwdn(pi); write_phy_reg(pi, 0x657, lcnphy_sfo_cfg[channel - 1].ptcentreTs20); write_phy_reg(pi, 0x658, lcnphy_sfo_cfg[channel - 1].ptcentreFactor); if (CHSPEC_CHANNEL(pi->radio_chanspec) == 14) { mod_phy_reg(pi, 0x448, (0x3 << 8), (2) << 8); wlc_lcnphy_load_tx_iir_filter(pi, false, 3); } else { mod_phy_reg(pi, 0x448, (0x3 << 8), (1) << 8); wlc_lcnphy_load_tx_iir_filter(pi, false, 2); } wlc_lcnphy_load_tx_iir_filter(pi, true, 0); mod_phy_reg(pi, 0x4eb, (0x7 << 3), (1) << 3); } static void wlc_lcnphy_set_dac_gain(phy_info_t *pi, u16 dac_gain) { u16 dac_ctrl; dac_ctrl = (read_phy_reg(pi, 0x439) >> 0); dac_ctrl = dac_ctrl & 0xc7f; dac_ctrl = dac_ctrl | (dac_gain << 7); mod_phy_reg(pi, 0x439, (0xfff << 0), (dac_ctrl) << 0); } static void wlc_lcnphy_set_tx_gain_override(phy_info_t *pi, bool bEnable) { u16 bit = bEnable ? 1 : 0; mod_phy_reg(pi, 0x4b0, (0x1 << 7), bit << 7); mod_phy_reg(pi, 0x4b0, (0x1 << 14), bit << 14); mod_phy_reg(pi, 0x43b, (0x1 << 6), bit << 6); } static u16 wlc_lcnphy_get_pa_gain(phy_info_t *pi) { u16 pa_gain; pa_gain = (read_phy_reg(pi, 0x4fb) & LCNPHY_txgainctrlovrval1_pagain_ovr_val1_MASK) >> LCNPHY_txgainctrlovrval1_pagain_ovr_val1_SHIFT; return pa_gain; } static void wlc_lcnphy_set_tx_gain(phy_info_t *pi, lcnphy_txgains_t *target_gains) { u16 pa_gain = wlc_lcnphy_get_pa_gain(pi); mod_phy_reg(pi, 0x4b5, (0xffff << 0), ((target_gains->gm_gain) | (target_gains->pga_gain << 8)) << 0); mod_phy_reg(pi, 0x4fb, (0x7fff << 0), ((target_gains->pad_gain) | (pa_gain << 8)) << 0); mod_phy_reg(pi, 0x4fc, (0xffff << 0), ((target_gains->gm_gain) | (target_gains->pga_gain << 8)) << 0); mod_phy_reg(pi, 0x4fd, (0x7fff << 0), ((target_gains->pad_gain) | (pa_gain << 8)) << 0); wlc_lcnphy_set_dac_gain(pi, target_gains->dac_gain); wlc_lcnphy_enable_tx_gain_override(pi); } static void wlc_lcnphy_set_bbmult(phy_info_t *pi, u8 m0) { u16 m0m1 = (u16) m0 << 8; phytbl_info_t tab; tab.tbl_ptr = &m0m1; tab.tbl_len = 1; tab.tbl_id = LCNPHY_TBL_ID_IQLOCAL; tab.tbl_offset = 87; tab.tbl_width = 16; wlc_lcnphy_write_table(pi, &tab); } static void wlc_lcnphy_clear_tx_power_offsets(phy_info_t *pi) { u32 data_buf[64]; phytbl_info_t tab; memset(data_buf, 0, sizeof(data_buf)); tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL; tab.tbl_width = 32; tab.tbl_ptr = data_buf; if (!wlc_lcnphy_tempsense_based_pwr_ctrl_enabled(pi)) { tab.tbl_len = 30; tab.tbl_offset = LCNPHY_TX_PWR_CTRL_RATE_OFFSET; wlc_lcnphy_write_table(pi, &tab); } tab.tbl_len = 64; tab.tbl_offset = LCNPHY_TX_PWR_CTRL_MAC_OFFSET; wlc_lcnphy_write_table(pi, &tab); } typedef enum { LCNPHY_TSSI_PRE_PA, LCNPHY_TSSI_POST_PA, LCNPHY_TSSI_EXT } lcnphy_tssi_mode_t; static void wlc_lcnphy_set_tssi_mux(phy_info_t *pi, lcnphy_tssi_mode_t pos) { mod_phy_reg(pi, 0x4d7, (0x1 << 0), (0x1) << 0); mod_phy_reg(pi, 0x4d7, (0x1 << 6), (1) << 6); if (LCNPHY_TSSI_POST_PA == pos) { mod_phy_reg(pi, 0x4d9, (0x1 << 2), (0) << 2); mod_phy_reg(pi, 0x4d9, (0x1 << 3), (1) << 3); if (LCNREV_IS(pi->pubpi.phy_rev, 2)) { mod_radio_reg(pi, RADIO_2064_REG086, 0x4, 0x4); } else { mod_radio_reg(pi, RADIO_2064_REG03A, 1, 0x1); mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 0x8); } } else { mod_phy_reg(pi, 0x4d9, (0x1 << 2), (0x1) << 2); mod_phy_reg(pi, 0x4d9, (0x1 << 3), (0) << 3); if (LCNREV_IS(pi->pubpi.phy_rev, 2)) { mod_radio_reg(pi, RADIO_2064_REG086, 0x4, 0x4); } else { mod_radio_reg(pi, RADIO_2064_REG03A, 1, 0); mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 0x8); } } mod_phy_reg(pi, 0x637, (0x3 << 14), (0) << 14); if (LCNPHY_TSSI_EXT == pos) { write_radio_reg(pi, RADIO_2064_REG07F, 1); mod_radio_reg(pi, RADIO_2064_REG005, 0x7, 0x2); mod_radio_reg(pi, RADIO_2064_REG112, 0x80, 0x1 << 7); mod_radio_reg(pi, RADIO_2064_REG028, 0x1f, 0x3); } } static u16 wlc_lcnphy_rfseq_tbl_adc_pwrup(phy_info_t *pi) { u16 N1, N2, N3, N4, N5, N6, N; N1 = ((read_phy_reg(pi, 0x4a5) & (0xff << 0)) >> 0); N2 = 1 << ((read_phy_reg(pi, 0x4a5) & (0x7 << 12)) >> 12); N3 = ((read_phy_reg(pi, 0x40d) & (0xff << 0)) >> 0); N4 = 1 << ((read_phy_reg(pi, 0x40d) & (0x7 << 8)) >> 8); N5 = ((read_phy_reg(pi, 0x4a2) & (0xff << 0)) >> 0); N6 = 1 << ((read_phy_reg(pi, 0x4a2) & (0x7 << 8)) >> 8); N = 2 * (N1 + N2 + N3 + N4 + 2 * (N5 + N6)) + 80; if (N < 1600) N = 1600; return N; } static void wlc_lcnphy_pwrctrl_rssiparams(phy_info_t *pi) { u16 auxpga_vmid, auxpga_vmid_temp, auxpga_gain_temp; phy_info_lcnphy_t *pi_lcn = pi->u.pi_lcnphy; auxpga_vmid = (2 << 8) | (pi_lcn->lcnphy_rssi_vc << 4) | pi_lcn->lcnphy_rssi_vf; auxpga_vmid_temp = (2 << 8) | (8 << 4) | 4; auxpga_gain_temp = 2; mod_phy_reg(pi, 0x4d8, (0x1 << 0), (0) << 0); mod_phy_reg(pi, 0x4d8, (0x1 << 1), (0) << 1); mod_phy_reg(pi, 0x4d7, (0x1 << 3), (0) << 3); mod_phy_reg(pi, 0x4db, (0x3ff << 0) | (0x7 << 12), (auxpga_vmid << 0) | (pi_lcn->lcnphy_rssi_gs << 12)); mod_phy_reg(pi, 0x4dc, (0x3ff << 0) | (0x7 << 12), (auxpga_vmid << 0) | (pi_lcn->lcnphy_rssi_gs << 12)); mod_phy_reg(pi, 0x40a, (0x3ff << 0) | (0x7 << 12), (auxpga_vmid << 0) | (pi_lcn->lcnphy_rssi_gs << 12)); mod_phy_reg(pi, 0x40b, (0x3ff << 0) | (0x7 << 12), (auxpga_vmid_temp << 0) | (auxpga_gain_temp << 12)); mod_phy_reg(pi, 0x40c, (0x3ff << 0) | (0x7 << 12), (auxpga_vmid_temp << 0) | (auxpga_gain_temp << 12)); mod_radio_reg(pi, RADIO_2064_REG082, (1 << 5), (1 << 5)); } static void wlc_lcnphy_tssi_setup(phy_info_t *pi) { phytbl_info_t tab; u32 rfseq, ind; tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL; tab.tbl_width = 32; tab.tbl_ptr = &ind; tab.tbl_len = 1; tab.tbl_offset = 0; for (ind = 0; ind < 128; ind++) { wlc_lcnphy_write_table(pi, &tab); tab.tbl_offset++; } tab.tbl_offset = 704; for (ind = 0; ind < 128; ind++) { wlc_lcnphy_write_table(pi, &tab); tab.tbl_offset++; } mod_phy_reg(pi, 0x503, (0x1 << 0), (0) << 0); mod_phy_reg(pi, 0x503, (0x1 << 2), (0) << 2); mod_phy_reg(pi, 0x503, (0x1 << 4), (1) << 4); wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_EXT); mod_phy_reg(pi, 0x4a4, (0x1 << 14), (0) << 14); mod_phy_reg(pi, 0x4a4, (0x1 << 15), (1) << 15); mod_phy_reg(pi, 0x4d0, (0x1 << 5), (0) << 5); mod_phy_reg(pi, 0x4a4, (0x1ff << 0), (0) << 0); mod_phy_reg(pi, 0x4a5, (0xff << 0), (255) << 0); mod_phy_reg(pi, 0x4a5, (0x7 << 12), (5) << 12); mod_phy_reg(pi, 0x4a5, (0x7 << 8), (0) << 8); mod_phy_reg(pi, 0x40d, (0xff << 0), (64) << 0); mod_phy_reg(pi, 0x40d, (0x7 << 8), (4) << 8); mod_phy_reg(pi, 0x4a2, (0xff << 0), (64) << 0); mod_phy_reg(pi, 0x4a2, (0x7 << 8), (4) << 8); mod_phy_reg(pi, 0x4d0, (0x1ff << 6), (0) << 6); mod_phy_reg(pi, 0x4a8, (0xff << 0), (0x1) << 0); wlc_lcnphy_clear_tx_power_offsets(pi); mod_phy_reg(pi, 0x4a6, (0x1 << 15), (1) << 15); mod_phy_reg(pi, 0x4a6, (0x1ff << 0), (0xff) << 0); mod_phy_reg(pi, 0x49a, (0x1ff << 0), (0xff) << 0); if (LCNREV_IS(pi->pubpi.phy_rev, 2)) { mod_radio_reg(pi, RADIO_2064_REG028, 0xf, 0xe); mod_radio_reg(pi, RADIO_2064_REG086, 0x4, 0x4); } else { mod_radio_reg(pi, RADIO_2064_REG03A, 0x1, 1); mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 1 << 3); } write_radio_reg(pi, RADIO_2064_REG025, 0xc); if (LCNREV_IS(pi->pubpi.phy_rev, 2)) { mod_radio_reg(pi, RADIO_2064_REG03A, 0x1, 1); } else { if (CHSPEC_IS2G(pi->radio_chanspec)) mod_radio_reg(pi, RADIO_2064_REG03A, 0x2, 1 << 1); else mod_radio_reg(pi, RADIO_2064_REG03A, 0x2, 0 << 1); } if (LCNREV_IS(pi->pubpi.phy_rev, 2)) mod_radio_reg(pi, RADIO_2064_REG03A, 0x2, 1 << 1); else mod_radio_reg(pi, RADIO_2064_REG03A, 0x4, 1 << 2); mod_radio_reg(pi, RADIO_2064_REG11A, 0x1, 1 << 0); mod_radio_reg(pi, RADIO_2064_REG005, 0x8, 1 << 3); if (!wlc_lcnphy_tempsense_based_pwr_ctrl_enabled(pi)) { mod_phy_reg(pi, 0x4d7, (0x1 << 3) | (0x7 << 12), 0 << 3 | 2 << 12); } rfseq = wlc_lcnphy_rfseq_tbl_adc_pwrup(pi); tab.tbl_id = LCNPHY_TBL_ID_RFSEQ; tab.tbl_width = 16; tab.tbl_ptr = &rfseq; tab.tbl_len = 1; tab.tbl_offset = 6; wlc_lcnphy_write_table(pi, &tab); mod_phy_reg(pi, 0x938, (0x1 << 2), (1) << 2); mod_phy_reg(pi, 0x939, (0x1 << 2), (1) << 2); mod_phy_reg(pi, 0x4a4, (0x1 << 12), (1) << 12); mod_phy_reg(pi, 0x4d7, (0x1 << 2), (1) << 2); mod_phy_reg(pi, 0x4d7, (0xf << 8), (0) << 8); wlc_lcnphy_pwrctrl_rssiparams(pi); } void wlc_lcnphy_tx_pwr_update_npt(phy_info_t *pi) { u16 tx_cnt, tx_total, npt; phy_info_lcnphy_t *pi_lcn = pi->u.pi_lcnphy; tx_total = wlc_lcnphy_total_tx_frames(pi); tx_cnt = tx_total - pi_lcn->lcnphy_tssi_tx_cnt; npt = wlc_lcnphy_get_tx_pwr_npt(pi); if (tx_cnt > (1 << npt)) { pi_lcn->lcnphy_tssi_tx_cnt = tx_total; pi_lcn->lcnphy_tssi_idx = wlc_lcnphy_get_current_tx_pwr_idx(pi); pi_lcn->lcnphy_tssi_npt = npt; } } s32 wlc_lcnphy_tssi2dbm(s32 tssi, s32 a1, s32 b0, s32 b1) { s32 a, b, p; a = 32768 + (a1 * tssi); b = (1024 * b0) + (64 * b1 * tssi); p = ((2 * b) + a) / (2 * a); return p; } static void wlc_lcnphy_txpower_reset_npt(phy_info_t *pi) { phy_info_lcnphy_t *pi_lcn = pi->u.pi_lcnphy; if (wlc_lcnphy_tempsense_based_pwr_ctrl_enabled(pi)) return; pi_lcn->lcnphy_tssi_idx = LCNPHY_TX_PWR_CTRL_START_INDEX_2G_4313; pi_lcn->lcnphy_tssi_npt = LCNPHY_TX_PWR_CTRL_START_NPT; } void wlc_lcnphy_txpower_recalc_target(phy_info_t *pi) { phytbl_info_t tab; u32 rate_table[WLC_NUM_RATES_CCK + WLC_NUM_RATES_OFDM + WLC_NUM_RATES_MCS_1_STREAM]; uint i, j; if (wlc_lcnphy_tempsense_based_pwr_ctrl_enabled(pi)) return; for (i = 0, j = 0; i < ARRAY_SIZE(rate_table); i++, j++) { if (i == WLC_NUM_RATES_CCK + WLC_NUM_RATES_OFDM) j = TXP_FIRST_MCS_20_SISO; rate_table[i] = (u32) ((s32) (-pi->tx_power_offset[j])); } tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL; tab.tbl_width = 32; tab.tbl_len = ARRAY_SIZE(rate_table); tab.tbl_ptr = rate_table; tab.tbl_offset = LCNPHY_TX_PWR_CTRL_RATE_OFFSET; wlc_lcnphy_write_table(pi, &tab); if (wlc_lcnphy_get_target_tx_pwr(pi) != pi->tx_power_min) { wlc_lcnphy_set_target_tx_pwr(pi, pi->tx_power_min); wlc_lcnphy_txpower_reset_npt(pi); } } static void wlc_lcnphy_set_tx_pwr_soft_ctrl(phy_info_t *pi, s8 index) { u32 cck_offset[4] = { 22, 22, 22, 22 }; u32 ofdm_offset, reg_offset_cck; int i; u16 index2; phytbl_info_t tab; if (wlc_lcnphy_tssi_based_pwr_ctrl_enabled(pi)) return; mod_phy_reg(pi, 0x4a4, (0x1 << 14), (0x1) << 14); mod_phy_reg(pi, 0x4a4, (0x1 << 14), (0x0) << 14); or_phy_reg(pi, 0x6da, 0x0040); reg_offset_cck = 0; for (i = 0; i < 4; i++) cck_offset[i] -= reg_offset_cck; tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL; tab.tbl_width = 32; tab.tbl_len = 4; tab.tbl_ptr = cck_offset; tab.tbl_offset = LCNPHY_TX_PWR_CTRL_RATE_OFFSET; wlc_lcnphy_write_table(pi, &tab); ofdm_offset = 0; tab.tbl_len = 1; tab.tbl_ptr = &ofdm_offset; for (i = 836; i < 862; i++) { tab.tbl_offset = i; wlc_lcnphy_write_table(pi, &tab); } mod_phy_reg(pi, 0x4a4, (0x1 << 15), (0x1) << 15); mod_phy_reg(pi, 0x4a4, (0x1 << 14), (0x1) << 14); mod_phy_reg(pi, 0x4a4, (0x1 << 13), (0x1) << 13); mod_phy_reg(pi, 0x4b0, (0x1 << 7), (0) << 7); mod_phy_reg(pi, 0x43b, (0x1 << 6), (0) << 6); mod_phy_reg(pi, 0x4a9, (0x1 << 15), (1) << 15); index2 = (u16) (index * 2); mod_phy_reg(pi, 0x4a9, (0x1ff << 0), (index2) << 0); mod_phy_reg(pi, 0x6a3, (0x1 << 4), (0) << 4); } static s8 wlc_lcnphy_tempcompensated_txpwrctrl(phy_info_t *pi) { s8 index, delta_brd, delta_temp, new_index, tempcorrx; s16 manp, meas_temp, temp_diff; bool neg = 0; u16 temp; phy_info_lcnphy_t *pi_lcn = pi->u.pi_lcnphy; if (wlc_lcnphy_tssi_based_pwr_ctrl_enabled(pi)) return pi_lcn->lcnphy_current_index; index = FIXED_TXPWR; if (NORADIO_ENAB(pi->pubpi)) return index; if (pi_lcn->lcnphy_tempsense_slope == 0) { return index; } temp = (u16) wlc_lcnphy_tempsense(pi, 0); meas_temp = LCNPHY_TEMPSENSE(temp); if (pi->tx_power_min != 0) { delta_brd = (pi_lcn->lcnphy_measPower - pi->tx_power_min); } else { delta_brd = 0; } manp = LCNPHY_TEMPSENSE(pi_lcn->lcnphy_rawtempsense); temp_diff = manp - meas_temp; if (temp_diff < 0) { neg = 1; temp_diff = -temp_diff; } delta_temp = (s8) wlc_lcnphy_qdiv_roundup((u32) (temp_diff * 192), (u32) (pi_lcn-> lcnphy_tempsense_slope * 10), 0); if (neg) delta_temp = -delta_temp; if (pi_lcn->lcnphy_tempsense_option == 3 && LCNREV_IS(pi->pubpi.phy_rev, 0)) delta_temp = 0; if (pi_lcn->lcnphy_tempcorrx > 31) tempcorrx = (s8) (pi_lcn->lcnphy_tempcorrx - 64); else tempcorrx = (s8) pi_lcn->lcnphy_tempcorrx; if (LCNREV_IS(pi->pubpi.phy_rev, 1)) tempcorrx = 4; new_index = index + delta_brd + delta_temp - pi_lcn->lcnphy_bandedge_corr; new_index += tempcorrx; if (LCNREV_IS(pi->pubpi.phy_rev, 1)) index = 127; if (new_index < 0 || new_index > 126) { return index; } return new_index; } static u16 wlc_lcnphy_set_tx_pwr_ctrl_mode(phy_info_t *pi, u16 mode) { u16 current_mode = mode; if (wlc_lcnphy_tempsense_based_pwr_ctrl_enabled(pi) && mode == LCNPHY_TX_PWR_CTRL_HW) current_mode = LCNPHY_TX_PWR_CTRL_TEMPBASED; if (wlc_lcnphy_tssi_based_pwr_ctrl_enabled(pi) && mode == LCNPHY_TX_PWR_CTRL_TEMPBASED) current_mode = LCNPHY_TX_PWR_CTRL_HW; return current_mode; } void wlc_lcnphy_set_tx_pwr_ctrl(phy_info_t *pi, u16 mode) { u16 old_mode = wlc_lcnphy_get_tx_pwr_ctrl(pi); s8 index; phy_info_lcnphy_t *pi_lcn = pi->u.pi_lcnphy; mode = wlc_lcnphy_set_tx_pwr_ctrl_mode(pi, mode); old_mode = wlc_lcnphy_set_tx_pwr_ctrl_mode(pi, old_mode); mod_phy_reg(pi, 0x6da, (0x1 << 6), ((LCNPHY_TX_PWR_CTRL_HW == mode) ? 1 : 0) << 6); mod_phy_reg(pi, 0x6a3, (0x1 << 4), ((LCNPHY_TX_PWR_CTRL_HW == mode) ? 0 : 1) << 4); if (old_mode != mode) { if (LCNPHY_TX_PWR_CTRL_HW == old_mode) { wlc_lcnphy_tx_pwr_update_npt(pi); wlc_lcnphy_clear_tx_power_offsets(pi); } if (LCNPHY_TX_PWR_CTRL_HW == mode) { wlc_lcnphy_txpower_recalc_target(pi); wlc_lcnphy_set_start_tx_pwr_idx(pi, pi_lcn-> lcnphy_tssi_idx); wlc_lcnphy_set_tx_pwr_npt(pi, pi_lcn->lcnphy_tssi_npt); mod_radio_reg(pi, RADIO_2064_REG11F, 0x4, 0); pi_lcn->lcnphy_tssi_tx_cnt = wlc_lcnphy_total_tx_frames(pi); wlc_lcnphy_disable_tx_gain_override(pi); pi_lcn->lcnphy_tx_power_idx_override = -1; } else wlc_lcnphy_enable_tx_gain_override(pi); mod_phy_reg(pi, 0x4a4, ((0x1 << 15) | (0x1 << 14) | (0x1 << 13)), mode); if (mode == LCNPHY_TX_PWR_CTRL_TEMPBASED) { index = wlc_lcnphy_tempcompensated_txpwrctrl(pi); wlc_lcnphy_set_tx_pwr_soft_ctrl(pi, index); pi_lcn->lcnphy_current_index = (s8) ((read_phy_reg(pi, 0x4a9) & 0xFF) / 2); } } } static bool wlc_lcnphy_iqcal_wait(phy_info_t *pi) { uint delay_count = 0; while (wlc_lcnphy_iqcal_active(pi)) { udelay(100); delay_count++; if (delay_count > (10 * 500)) break; } return (0 == wlc_lcnphy_iqcal_active(pi)); } static void wlc_lcnphy_tx_iqlo_cal(phy_info_t *pi, lcnphy_txgains_t *target_gains, lcnphy_cal_mode_t cal_mode, bool keep_tone) { lcnphy_txgains_t cal_gains, temp_gains; u16 hash; u8 band_idx; int j; u16 ncorr_override[5]; u16 syst_coeffs[] = { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }; u16 commands_fullcal[] = { 0x8434, 0x8334, 0x8084, 0x8267, 0x8056, 0x8234 }; u16 commands_recal[] = { 0x8434, 0x8334, 0x8084, 0x8267, 0x8056, 0x8234 }; u16 command_nums_fullcal[] = { 0x7a97, 0x7a97, 0x7a97, 0x7a87, 0x7a87, 0x7b97 }; u16 command_nums_recal[] = { 0x7a97, 0x7a97, 0x7a97, 0x7a87, 0x7a87, 0x7b97 }; u16 *command_nums = command_nums_fullcal; u16 *start_coeffs = NULL, *cal_cmds = NULL, cal_type, diq_start; u16 tx_pwr_ctrl_old, save_txpwrctrlrfctrl2; u16 save_sslpnCalibClkEnCtrl, save_sslpnRxFeClkEnCtrl; bool tx_gain_override_old; lcnphy_txgains_t old_gains; uint i, n_cal_cmds = 0, n_cal_start = 0; u16 *values_to_save; phy_info_lcnphy_t *pi_lcn = pi->u.pi_lcnphy; if (NORADIO_ENAB(pi->pubpi)) return; values_to_save = kmalloc(sizeof(u16) * 20, GFP_ATOMIC); if (NULL == values_to_save) { return; } save_sslpnRxFeClkEnCtrl = read_phy_reg(pi, 0x6db); save_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da); or_phy_reg(pi, 0x6da, 0x40); or_phy_reg(pi, 0x6db, 0x3); switch (cal_mode) { case LCNPHY_CAL_FULL: start_coeffs = syst_coeffs; cal_cmds = commands_fullcal; n_cal_cmds = ARRAY_SIZE(commands_fullcal); break; case LCNPHY_CAL_RECAL: start_coeffs = syst_coeffs; cal_cmds = commands_recal; n_cal_cmds = ARRAY_SIZE(commands_recal); command_nums = command_nums_recal; break; default: break; } wlc_lcnphy_common_write_table(pi, LCNPHY_TBL_ID_IQLOCAL, start_coeffs, 11, 16, 64); write_phy_reg(pi, 0x6da, 0xffff); mod_phy_reg(pi, 0x503, (0x1 << 3), (1) << 3); tx_pwr_ctrl_old = wlc_lcnphy_get_tx_pwr_ctrl(pi); mod_phy_reg(pi, 0x4a4, (0x1 << 12), (1) << 12); wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF); save_txpwrctrlrfctrl2 = read_phy_reg(pi, 0x4db); mod_phy_reg(pi, 0x4db, (0x3ff << 0), (0x2a6) << 0); mod_phy_reg(pi, 0x4db, (0x7 << 12), (2) << 12); wlc_lcnphy_tx_iqlo_loopback(pi, values_to_save); tx_gain_override_old = wlc_lcnphy_tx_gain_override_enabled(pi); if (tx_gain_override_old) wlc_lcnphy_get_tx_gain(pi, &old_gains); if (!target_gains) { if (!tx_gain_override_old) wlc_lcnphy_set_tx_pwr_by_index(pi, pi_lcn->lcnphy_tssi_idx); wlc_lcnphy_get_tx_gain(pi, &temp_gains); target_gains = &temp_gains; } hash = (target_gains->gm_gain << 8) | (target_gains->pga_gain << 4) | (target_gains->pad_gain); band_idx = (CHSPEC_IS5G(pi->radio_chanspec) ? 1 : 0); cal_gains = *target_gains; memset(ncorr_override, 0, sizeof(ncorr_override)); for (j = 0; j < iqcal_gainparams_numgains_lcnphy[band_idx]; j++) { if (hash == tbl_iqcal_gainparams_lcnphy[band_idx][j][0]) { cal_gains.gm_gain = tbl_iqcal_gainparams_lcnphy[band_idx][j][1]; cal_gains.pga_gain = tbl_iqcal_gainparams_lcnphy[band_idx][j][2]; cal_gains.pad_gain = tbl_iqcal_gainparams_lcnphy[band_idx][j][3]; memcpy(ncorr_override, &tbl_iqcal_gainparams_lcnphy[band_idx][j][3], sizeof(ncorr_override)); break; } } wlc_lcnphy_set_tx_gain(pi, &cal_gains); write_phy_reg(pi, 0x453, 0xaa9); write_phy_reg(pi, 0x93d, 0xc0); wlc_lcnphy_common_write_table(pi, LCNPHY_TBL_ID_IQLOCAL, (const void *) lcnphy_iqcal_loft_gainladder, ARRAY_SIZE(lcnphy_iqcal_loft_gainladder), 16, 0); wlc_lcnphy_common_write_table(pi, LCNPHY_TBL_ID_IQLOCAL, (const void *)lcnphy_iqcal_ir_gainladder, ARRAY_SIZE(lcnphy_iqcal_ir_gainladder), 16, 32); if (pi->phy_tx_tone_freq) { wlc_lcnphy_stop_tx_tone(pi); udelay(5); wlc_lcnphy_start_tx_tone(pi, 3750, 88, 1); } else { wlc_lcnphy_start_tx_tone(pi, 3750, 88, 1); } write_phy_reg(pi, 0x6da, 0xffff); for (i = n_cal_start; i < n_cal_cmds; i++) { u16 zero_diq = 0; u16 best_coeffs[11]; u16 command_num; cal_type = (cal_cmds[i] & 0x0f00) >> 8; command_num = command_nums[i]; if (ncorr_override[cal_type]) command_num = ncorr_override[cal_type] << 8 | (command_num & 0xff); write_phy_reg(pi, 0x452, command_num); if ((cal_type == 3) || (cal_type == 4)) { wlc_lcnphy_common_read_table(pi, LCNPHY_TBL_ID_IQLOCAL, &diq_start, 1, 16, 69); wlc_lcnphy_common_write_table(pi, LCNPHY_TBL_ID_IQLOCAL, &zero_diq, 1, 16, 69); } write_phy_reg(pi, 0x451, cal_cmds[i]); if (!wlc_lcnphy_iqcal_wait(pi)) { goto cleanup; } wlc_lcnphy_common_read_table(pi, LCNPHY_TBL_ID_IQLOCAL, best_coeffs, ARRAY_SIZE(best_coeffs), 16, 96); wlc_lcnphy_common_write_table(pi, LCNPHY_TBL_ID_IQLOCAL, best_coeffs, ARRAY_SIZE(best_coeffs), 16, 64); if ((cal_type == 3) || (cal_type == 4)) { wlc_lcnphy_common_write_table(pi, LCNPHY_TBL_ID_IQLOCAL, &diq_start, 1, 16, 69); } wlc_lcnphy_common_read_table(pi, LCNPHY_TBL_ID_IQLOCAL, pi_lcn->lcnphy_cal_results. txiqlocal_bestcoeffs, ARRAY_SIZE(pi_lcn-> lcnphy_cal_results. txiqlocal_bestcoeffs), 16, 96); } wlc_lcnphy_common_read_table(pi, LCNPHY_TBL_ID_IQLOCAL, pi_lcn->lcnphy_cal_results. txiqlocal_bestcoeffs, ARRAY_SIZE(pi_lcn->lcnphy_cal_results. txiqlocal_bestcoeffs), 16, 96); pi_lcn->lcnphy_cal_results.txiqlocal_bestcoeffs_valid = true; wlc_lcnphy_common_write_table(pi, LCNPHY_TBL_ID_IQLOCAL, &pi_lcn->lcnphy_cal_results. txiqlocal_bestcoeffs[0], 4, 16, 80); wlc_lcnphy_common_write_table(pi, LCNPHY_TBL_ID_IQLOCAL, &pi_lcn->lcnphy_cal_results. txiqlocal_bestcoeffs[5], 2, 16, 85); cleanup: wlc_lcnphy_tx_iqlo_loopback_cleanup(pi, values_to_save); kfree(values_to_save); if (!keep_tone) wlc_lcnphy_stop_tx_tone(pi); write_phy_reg(pi, 0x4db, save_txpwrctrlrfctrl2); write_phy_reg(pi, 0x453, 0); if (tx_gain_override_old) wlc_lcnphy_set_tx_gain(pi, &old_gains); wlc_lcnphy_set_tx_pwr_ctrl(pi, tx_pwr_ctrl_old); write_phy_reg(pi, 0x6da, save_sslpnCalibClkEnCtrl); write_phy_reg(pi, 0x6db, save_sslpnRxFeClkEnCtrl); } static void wlc_lcnphy_idle_tssi_est(wlc_phy_t *ppi) { bool suspend, tx_gain_override_old; lcnphy_txgains_t old_gains; phy_info_t *pi = (phy_info_t *) ppi; u16 idleTssi, idleTssi0_2C, idleTssi0_OB, idleTssi0_regvalue_OB, idleTssi0_regvalue_2C; u16 SAVE_txpwrctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi); u16 SAVE_lpfgain = read_radio_reg(pi, RADIO_2064_REG112); u16 SAVE_jtag_bb_afe_switch = read_radio_reg(pi, RADIO_2064_REG007) & 1; u16 SAVE_jtag_auxpga = read_radio_reg(pi, RADIO_2064_REG0FF) & 0x10; u16 SAVE_iqadc_aux_en = read_radio_reg(pi, RADIO_2064_REG11F) & 4; idleTssi = read_phy_reg(pi, 0x4ab); suspend = (0 == (R_REG(&((phy_info_t *) pi)->regs->maccontrol) & MCTL_EN_MAC)); if (!suspend) wlapi_suspend_mac_and_wait(pi->sh->physhim); wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF); tx_gain_override_old = wlc_lcnphy_tx_gain_override_enabled(pi); wlc_lcnphy_get_tx_gain(pi, &old_gains); wlc_lcnphy_enable_tx_gain_override(pi); wlc_lcnphy_set_tx_pwr_by_index(pi, 127); write_radio_reg(pi, RADIO_2064_REG112, 0x6); mod_radio_reg(pi, RADIO_2064_REG007, 0x1, 1); mod_radio_reg(pi, RADIO_2064_REG0FF, 0x10, 1 << 4); mod_radio_reg(pi, RADIO_2064_REG11F, 0x4, 1 << 2); wlc_lcnphy_tssi_setup(pi); wlc_phy_do_dummy_tx(pi, true, OFF); idleTssi = ((read_phy_reg(pi, 0x4ab) & (0x1ff << 0)) >> 0); idleTssi0_2C = ((read_phy_reg(pi, 0x63e) & (0x1ff << 0)) >> 0); if (idleTssi0_2C >= 256) idleTssi0_OB = idleTssi0_2C - 256; else idleTssi0_OB = idleTssi0_2C + 256; idleTssi0_regvalue_OB = idleTssi0_OB; if (idleTssi0_regvalue_OB >= 256) idleTssi0_regvalue_2C = idleTssi0_regvalue_OB - 256; else idleTssi0_regvalue_2C = idleTssi0_regvalue_OB + 256; mod_phy_reg(pi, 0x4a6, (0x1ff << 0), (idleTssi0_regvalue_2C) << 0); mod_phy_reg(pi, 0x44c, (0x1 << 12), (0) << 12); wlc_lcnphy_set_tx_gain_override(pi, tx_gain_override_old); wlc_lcnphy_set_tx_gain(pi, &old_gains); wlc_lcnphy_set_tx_pwr_ctrl(pi, SAVE_txpwrctrl); write_radio_reg(pi, RADIO_2064_REG112, SAVE_lpfgain); mod_radio_reg(pi, RADIO_2064_REG007, 0x1, SAVE_jtag_bb_afe_switch); mod_radio_reg(pi, RADIO_2064_REG0FF, 0x10, SAVE_jtag_auxpga); mod_radio_reg(pi, RADIO_2064_REG11F, 0x4, SAVE_iqadc_aux_en); mod_radio_reg(pi, RADIO_2064_REG112, 0x80, 1 << 7); if (!suspend) wlapi_enable_mac(pi->sh->physhim); } static void wlc_lcnphy_vbat_temp_sense_setup(phy_info_t *pi, u8 mode) { bool suspend; u16 save_txpwrCtrlEn; u8 auxpga_vmidcourse, auxpga_vmidfine, auxpga_gain; u16 auxpga_vmid; phytbl_info_t tab; u32 val; u8 save_reg007, save_reg0FF, save_reg11F, save_reg005, save_reg025, save_reg112; u16 values_to_save[14]; s8 index; int i; phy_info_lcnphy_t *pi_lcn = pi->u.pi_lcnphy; udelay(999); save_reg007 = (u8) read_radio_reg(pi, RADIO_2064_REG007); save_reg0FF = (u8) read_radio_reg(pi, RADIO_2064_REG0FF); save_reg11F = (u8) read_radio_reg(pi, RADIO_2064_REG11F); save_reg005 = (u8) read_radio_reg(pi, RADIO_2064_REG005); save_reg025 = (u8) read_radio_reg(pi, RADIO_2064_REG025); save_reg112 = (u8) read_radio_reg(pi, RADIO_2064_REG112); for (i = 0; i < 14; i++) values_to_save[i] = read_phy_reg(pi, tempsense_phy_regs[i]); suspend = (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC)); if (!suspend) wlapi_suspend_mac_and_wait(pi->sh->physhim); save_txpwrCtrlEn = read_radio_reg(pi, 0x4a4); wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF); index = pi_lcn->lcnphy_current_index; wlc_lcnphy_set_tx_pwr_by_index(pi, 127); mod_radio_reg(pi, RADIO_2064_REG007, 0x1, 0x1); mod_radio_reg(pi, RADIO_2064_REG0FF, 0x10, 0x1 << 4); mod_radio_reg(pi, RADIO_2064_REG11F, 0x4, 0x1 << 2); mod_phy_reg(pi, 0x503, (0x1 << 0), (0) << 0); mod_phy_reg(pi, 0x503, (0x1 << 2), (0) << 2); mod_phy_reg(pi, 0x4a4, (0x1 << 14), (0) << 14); mod_phy_reg(pi, 0x4a4, (0x1 << 15), (0) << 15); mod_phy_reg(pi, 0x4d0, (0x1 << 5), (0) << 5); mod_phy_reg(pi, 0x4a5, (0xff << 0), (255) << 0); mod_phy_reg(pi, 0x4a5, (0x7 << 12), (5) << 12); mod_phy_reg(pi, 0x4a5, (0x7 << 8), (0) << 8); mod_phy_reg(pi, 0x40d, (0xff << 0), (64) << 0); mod_phy_reg(pi, 0x40d, (0x7 << 8), (6) << 8); mod_phy_reg(pi, 0x4a2, (0xff << 0), (64) << 0); mod_phy_reg(pi, 0x4a2, (0x7 << 8), (6) << 8); mod_phy_reg(pi, 0x4d9, (0x7 << 4), (2) << 4); mod_phy_reg(pi, 0x4d9, (0x7 << 8), (3) << 8); mod_phy_reg(pi, 0x4d9, (0x7 << 12), (1) << 12); mod_phy_reg(pi, 0x4da, (0x1 << 12), (0) << 12); mod_phy_reg(pi, 0x4da, (0x1 << 13), (1) << 13); mod_phy_reg(pi, 0x4a6, (0x1 << 15), (1) << 15); write_radio_reg(pi, RADIO_2064_REG025, 0xC); mod_radio_reg(pi, RADIO_2064_REG005, 0x8, 0x1 << 3); mod_phy_reg(pi, 0x938, (0x1 << 2), (1) << 2); mod_phy_reg(pi, 0x939, (0x1 << 2), (1) << 2); mod_phy_reg(pi, 0x4a4, (0x1 << 12), (1) << 12); val = wlc_lcnphy_rfseq_tbl_adc_pwrup(pi); tab.tbl_id = LCNPHY_TBL_ID_RFSEQ; tab.tbl_width = 16; tab.tbl_len = 1; tab.tbl_ptr = &val; tab.tbl_offset = 6; wlc_lcnphy_write_table(pi, &tab); if (mode == TEMPSENSE) { mod_phy_reg(pi, 0x4d7, (0x1 << 3), (1) << 3); mod_phy_reg(pi, 0x4d7, (0x7 << 12), (1) << 12); auxpga_vmidcourse = 8; auxpga_vmidfine = 0x4; auxpga_gain = 2; mod_radio_reg(pi, RADIO_2064_REG082, 0x20, 1 << 5); } else { mod_phy_reg(pi, 0x4d7, (0x1 << 3), (1) << 3); mod_phy_reg(pi, 0x4d7, (0x7 << 12), (3) << 12); auxpga_vmidcourse = 7; auxpga_vmidfine = 0xa; auxpga_gain = 2; } auxpga_vmid = (u16) ((2 << 8) | (auxpga_vmidcourse << 4) | auxpga_vmidfine); mod_phy_reg(pi, 0x4d8, (0x1 << 0), (1) << 0); mod_phy_reg(pi, 0x4d8, (0x3ff << 2), (auxpga_vmid) << 2); mod_phy_reg(pi, 0x4d8, (0x1 << 1), (1) << 1); mod_phy_reg(pi, 0x4d8, (0x7 << 12), (auxpga_gain) << 12); mod_phy_reg(pi, 0x4d0, (0x1 << 5), (1) << 5); write_radio_reg(pi, RADIO_2064_REG112, 0x6); wlc_phy_do_dummy_tx(pi, true, OFF); if (!tempsense_done(pi)) udelay(10); write_radio_reg(pi, RADIO_2064_REG007, (u16) save_reg007); write_radio_reg(pi, RADIO_2064_REG0FF, (u16) save_reg0FF); write_radio_reg(pi, RADIO_2064_REG11F, (u16) save_reg11F); write_radio_reg(pi, RADIO_2064_REG005, (u16) save_reg005); write_radio_reg(pi, RADIO_2064_REG025, (u16) save_reg025); write_radio_reg(pi, RADIO_2064_REG112, (u16) save_reg112); for (i = 0; i < 14; i++) write_phy_reg(pi, tempsense_phy_regs[i], values_to_save[i]); wlc_lcnphy_set_tx_pwr_by_index(pi, (int)index); write_radio_reg(pi, 0x4a4, save_txpwrCtrlEn); if (!suspend) wlapi_enable_mac(pi->sh->physhim); udelay(999); } void WLBANDINITFN(wlc_lcnphy_tx_pwr_ctrl_init) (wlc_phy_t *ppi) { lcnphy_txgains_t tx_gains; u8 bbmult; phytbl_info_t tab; s32 a1, b0, b1; s32 tssi, pwr, maxtargetpwr, mintargetpwr; bool suspend; phy_info_t *pi = (phy_info_t *) ppi; suspend = (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC)); if (!suspend) wlapi_suspend_mac_and_wait(pi->sh->physhim); if (NORADIO_ENAB(pi->pubpi)) { wlc_lcnphy_set_bbmult(pi, 0x30); if (!suspend) wlapi_enable_mac(pi->sh->physhim); return; } if (!pi->hwpwrctrl_capable) { if (CHSPEC_IS2G(pi->radio_chanspec)) { tx_gains.gm_gain = 4; tx_gains.pga_gain = 12; tx_gains.pad_gain = 12; tx_gains.dac_gain = 0; bbmult = 150; } else { tx_gains.gm_gain = 7; tx_gains.pga_gain = 15; tx_gains.pad_gain = 14; tx_gains.dac_gain = 0; bbmult = 150; } wlc_lcnphy_set_tx_gain(pi, &tx_gains); wlc_lcnphy_set_bbmult(pi, bbmult); wlc_lcnphy_vbat_temp_sense_setup(pi, TEMPSENSE); } else { wlc_lcnphy_idle_tssi_est(ppi); wlc_lcnphy_clear_tx_power_offsets(pi); b0 = pi->txpa_2g[0]; b1 = pi->txpa_2g[1]; a1 = pi->txpa_2g[2]; maxtargetpwr = wlc_lcnphy_tssi2dbm(10, a1, b0, b1); mintargetpwr = wlc_lcnphy_tssi2dbm(125, a1, b0, b1); tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL; tab.tbl_width = 32; tab.tbl_ptr = &pwr; tab.tbl_len = 1; tab.tbl_offset = 0; for (tssi = 0; tssi < 128; tssi++) { pwr = wlc_lcnphy_tssi2dbm(tssi, a1, b0, b1); pwr = (pwr < mintargetpwr) ? mintargetpwr : pwr; wlc_lcnphy_write_table(pi, &tab); tab.tbl_offset++; } mod_phy_reg(pi, 0x410, (0x1 << 7), (0) << 7); write_phy_reg(pi, 0x4a8, 10); wlc_lcnphy_set_target_tx_pwr(pi, LCN_TARGET_PWR); wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_HW); } if (!suspend) wlapi_enable_mac(pi->sh->physhim); } static u8 wlc_lcnphy_get_bbmult(phy_info_t *pi) { u16 m0m1; phytbl_info_t tab; tab.tbl_ptr = &m0m1; tab.tbl_len = 1; tab.tbl_id = LCNPHY_TBL_ID_IQLOCAL; tab.tbl_offset = 87; tab.tbl_width = 16; wlc_lcnphy_read_table(pi, &tab); return (u8) ((m0m1 & 0xff00) >> 8); } static void wlc_lcnphy_set_pa_gain(phy_info_t *pi, u16 gain) { mod_phy_reg(pi, 0x4fb, LCNPHY_txgainctrlovrval1_pagain_ovr_val1_MASK, gain << LCNPHY_txgainctrlovrval1_pagain_ovr_val1_SHIFT); mod_phy_reg(pi, 0x4fd, LCNPHY_stxtxgainctrlovrval1_pagain_ovr_val1_MASK, gain << LCNPHY_stxtxgainctrlovrval1_pagain_ovr_val1_SHIFT); } void wlc_lcnphy_get_radio_loft(phy_info_t *pi, u8 *ei0, u8 *eq0, u8 *fi0, u8 *fq0) { *ei0 = LCNPHY_IQLOCC_READ(read_radio_reg(pi, RADIO_2064_REG089)); *eq0 = LCNPHY_IQLOCC_READ(read_radio_reg(pi, RADIO_2064_REG08A)); *fi0 = LCNPHY_IQLOCC_READ(read_radio_reg(pi, RADIO_2064_REG08B)); *fq0 = LCNPHY_IQLOCC_READ(read_radio_reg(pi, RADIO_2064_REG08C)); } static void wlc_lcnphy_get_tx_gain(phy_info_t *pi, lcnphy_txgains_t *gains) { u16 dac_gain; dac_gain = read_phy_reg(pi, 0x439) >> 0; gains->dac_gain = (dac_gain & 0x380) >> 7; { u16 rfgain0, rfgain1; rfgain0 = (read_phy_reg(pi, 0x4b5) & (0xffff << 0)) >> 0; rfgain1 = (read_phy_reg(pi, 0x4fb) & (0x7fff << 0)) >> 0; gains->gm_gain = rfgain0 & 0xff; gains->pga_gain = (rfgain0 >> 8) & 0xff; gains->pad_gain = rfgain1 & 0xff; } } void wlc_lcnphy_set_tx_iqcc(phy_info_t *pi, u16 a, u16 b) { phytbl_info_t tab; u16 iqcc[2]; iqcc[0] = a; iqcc[1] = b; tab.tbl_id = LCNPHY_TBL_ID_IQLOCAL; tab.tbl_width = 16; tab.tbl_ptr = iqcc; tab.tbl_len = 2; tab.tbl_offset = 80; wlc_lcnphy_write_table(pi, &tab); } void wlc_lcnphy_set_tx_locc(phy_info_t *pi, u16 didq) { phytbl_info_t tab; tab.tbl_id = LCNPHY_TBL_ID_IQLOCAL; tab.tbl_width = 16; tab.tbl_ptr = &didq; tab.tbl_len = 1; tab.tbl_offset = 85; wlc_lcnphy_write_table(pi, &tab); } void wlc_lcnphy_set_tx_pwr_by_index(phy_info_t *pi, int index) { phytbl_info_t tab; u16 a, b; u8 bb_mult; u32 bbmultiqcomp, txgain, locoeffs, rfpower; lcnphy_txgains_t gains; phy_info_lcnphy_t *pi_lcn = pi->u.pi_lcnphy; pi_lcn->lcnphy_tx_power_idx_override = (s8) index; pi_lcn->lcnphy_current_index = (u8) index; tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL; tab.tbl_width = 32; tab.tbl_len = 1; wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF); tab.tbl_offset = LCNPHY_TX_PWR_CTRL_IQ_OFFSET + index; tab.tbl_ptr = &bbmultiqcomp; wlc_lcnphy_read_table(pi, &tab); tab.tbl_offset = LCNPHY_TX_PWR_CTRL_GAIN_OFFSET + index; tab.tbl_width = 32; tab.tbl_ptr = &txgain; wlc_lcnphy_read_table(pi, &tab); gains.gm_gain = (u16) (txgain & 0xff); gains.pga_gain = (u16) (txgain >> 8) & 0xff; gains.pad_gain = (u16) (txgain >> 16) & 0xff; gains.dac_gain = (u16) (bbmultiqcomp >> 28) & 0x07; wlc_lcnphy_set_tx_gain(pi, &gains); wlc_lcnphy_set_pa_gain(pi, (u16) (txgain >> 24) & 0x7f); bb_mult = (u8) ((bbmultiqcomp >> 20) & 0xff); wlc_lcnphy_set_bbmult(pi, bb_mult); wlc_lcnphy_enable_tx_gain_override(pi); if (!wlc_lcnphy_tempsense_based_pwr_ctrl_enabled(pi)) { a = (u16) ((bbmultiqcomp >> 10) & 0x3ff); b = (u16) (bbmultiqcomp & 0x3ff); wlc_lcnphy_set_tx_iqcc(pi, a, b); tab.tbl_offset = LCNPHY_TX_PWR_CTRL_LO_OFFSET + index; tab.tbl_ptr = &locoeffs; wlc_lcnphy_read_table(pi, &tab); wlc_lcnphy_set_tx_locc(pi, (u16) locoeffs); tab.tbl_offset = LCNPHY_TX_PWR_CTRL_PWR_OFFSET + index; tab.tbl_ptr = &rfpower; wlc_lcnphy_read_table(pi, &tab); mod_phy_reg(pi, 0x6a6, (0x1fff << 0), (rfpower * 8) << 0); } } static void wlc_lcnphy_set_trsw_override(phy_info_t *pi, bool tx, bool rx) { mod_phy_reg(pi, 0x44d, (0x1 << 1) | (0x1 << 0), (tx ? (0x1 << 1) : 0) | (rx ? (0x1 << 0) : 0)); or_phy_reg(pi, 0x44c, (0x1 << 1) | (0x1 << 0)); } static void wlc_lcnphy_clear_papd_comptable(phy_info_t *pi) { u32 j; phytbl_info_t tab; u32 temp_offset[128]; tab.tbl_ptr = temp_offset; tab.tbl_len = 128; tab.tbl_id = LCNPHY_TBL_ID_PAPDCOMPDELTATBL; tab.tbl_width = 32; tab.tbl_offset = 0; memset(temp_offset, 0, sizeof(temp_offset)); for (j = 1; j < 128; j += 2) temp_offset[j] = 0x80000; wlc_lcnphy_write_table(pi, &tab); return; } static void wlc_lcnphy_set_rx_gain_by_distribution(phy_info_t *pi, u16 trsw, u16 ext_lna, u16 biq2, u16 biq1, u16 tia, u16 lna2, u16 lna1) { u16 gain0_15, gain16_19; gain16_19 = biq2 & 0xf; gain0_15 = ((biq1 & 0xf) << 12) | ((tia & 0xf) << 8) | ((lna2 & 0x3) << 6) | ((lna2 & 0x3) << 4) | ((lna1 & 0x3) << 2) | ((lna1 & 0x3) << 0); mod_phy_reg(pi, 0x4b6, (0xffff << 0), gain0_15 << 0); mod_phy_reg(pi, 0x4b7, (0xf << 0), gain16_19 << 0); mod_phy_reg(pi, 0x4b1, (0x3 << 11), lna1 << 11); if (LCNREV_LT(pi->pubpi.phy_rev, 2)) { mod_phy_reg(pi, 0x4b1, (0x1 << 9), ext_lna << 9); mod_phy_reg(pi, 0x4b1, (0x1 << 10), ext_lna << 10); } else { mod_phy_reg(pi, 0x4b1, (0x1 << 10), 0 << 10); mod_phy_reg(pi, 0x4b1, (0x1 << 15), 0 << 15); mod_phy_reg(pi, 0x4b1, (0x1 << 9), ext_lna << 9); } mod_phy_reg(pi, 0x44d, (0x1 << 0), (!trsw) << 0); } static void wlc_lcnphy_rx_gain_override_enable(phy_info_t *pi, bool enable) { u16 ebit = enable ? 1 : 0; mod_phy_reg(pi, 0x4b0, (0x1 << 8), ebit << 8); mod_phy_reg(pi, 0x44c, (0x1 << 0), ebit << 0); if (LCNREV_LT(pi->pubpi.phy_rev, 2)) { mod_phy_reg(pi, 0x44c, (0x1 << 4), ebit << 4); mod_phy_reg(pi, 0x44c, (0x1 << 6), ebit << 6); mod_phy_reg(pi, 0x4b0, (0x1 << 5), ebit << 5); mod_phy_reg(pi, 0x4b0, (0x1 << 6), ebit << 6); } else { mod_phy_reg(pi, 0x4b0, (0x1 << 12), ebit << 12); mod_phy_reg(pi, 0x4b0, (0x1 << 13), ebit << 13); mod_phy_reg(pi, 0x4b0, (0x1 << 5), ebit << 5); } if (CHSPEC_IS2G(pi->radio_chanspec)) { mod_phy_reg(pi, 0x4b0, (0x1 << 10), ebit << 10); mod_phy_reg(pi, 0x4e5, (0x1 << 3), ebit << 3); } } void wlc_lcnphy_tx_pu(phy_info_t *pi, bool bEnable) { if (!bEnable) { and_phy_reg(pi, 0x43b, ~(u16) ((0x1 << 1) | (0x1 << 4))); mod_phy_reg(pi, 0x43c, (0x1 << 1), 1 << 1); and_phy_reg(pi, 0x44c, ~(u16) ((0x1 << 3) | (0x1 << 5) | (0x1 << 12) | (0x1 << 0) | (0x1 << 1) | (0x1 << 2))); and_phy_reg(pi, 0x44d, ~(u16) ((0x1 << 3) | (0x1 << 5) | (0x1 << 14))); mod_phy_reg(pi, 0x44d, (0x1 << 2), 1 << 2); mod_phy_reg(pi, 0x44d, (0x1 << 1) | (0x1 << 0), (0x1 << 0)); and_phy_reg(pi, 0x4f9, ~(u16) ((0x1 << 0) | (0x1 << 1) | (0x1 << 2))); and_phy_reg(pi, 0x4fa, ~(u16) ((0x1 << 0) | (0x1 << 1) | (0x1 << 2))); } else { mod_phy_reg(pi, 0x43b, (0x1 << 1), 1 << 1); mod_phy_reg(pi, 0x43c, (0x1 << 1), 0 << 1); mod_phy_reg(pi, 0x43b, (0x1 << 4), 1 << 4); mod_phy_reg(pi, 0x43c, (0x1 << 6), 0 << 6); mod_phy_reg(pi, 0x44c, (0x1 << 12), 1 << 12); mod_phy_reg(pi, 0x44d, (0x1 << 14), 1 << 14); wlc_lcnphy_set_trsw_override(pi, true, false); mod_phy_reg(pi, 0x44d, (0x1 << 2), 0 << 2); mod_phy_reg(pi, 0x44c, (0x1 << 2), 1 << 2); if (CHSPEC_IS2G(pi->radio_chanspec)) { mod_phy_reg(pi, 0x44c, (0x1 << 3), 1 << 3); mod_phy_reg(pi, 0x44d, (0x1 << 3), 1 << 3); mod_phy_reg(pi, 0x44c, (0x1 << 5), 1 << 5); mod_phy_reg(pi, 0x44d, (0x1 << 5), 0 << 5); mod_phy_reg(pi, 0x4f9, (0x1 << 1), 1 << 1); mod_phy_reg(pi, 0x4fa, (0x1 << 1), 1 << 1); mod_phy_reg(pi, 0x4f9, (0x1 << 2), 1 << 2); mod_phy_reg(pi, 0x4fa, (0x1 << 2), 1 << 2); mod_phy_reg(pi, 0x4f9, (0x1 << 0), 1 << 0); mod_phy_reg(pi, 0x4fa, (0x1 << 0), 1 << 0); } else { mod_phy_reg(pi, 0x44c, (0x1 << 3), 1 << 3); mod_phy_reg(pi, 0x44d, (0x1 << 3), 0 << 3); mod_phy_reg(pi, 0x44c, (0x1 << 5), 1 << 5); mod_phy_reg(pi, 0x44d, (0x1 << 5), 1 << 5); mod_phy_reg(pi, 0x4f9, (0x1 << 1), 1 << 1); mod_phy_reg(pi, 0x4fa, (0x1 << 1), 0 << 1); mod_phy_reg(pi, 0x4f9, (0x1 << 2), 1 << 2); mod_phy_reg(pi, 0x4fa, (0x1 << 2), 0 << 2); mod_phy_reg(pi, 0x4f9, (0x1 << 0), 1 << 0); mod_phy_reg(pi, 0x4fa, (0x1 << 0), 0 << 0); } } } static void wlc_lcnphy_run_samples(phy_info_t *pi, u16 num_samps, u16 num_loops, u16 wait, bool iqcalmode) { or_phy_reg(pi, 0x6da, 0x8080); mod_phy_reg(pi, 0x642, (0x7f << 0), (num_samps - 1) << 0); if (num_loops != 0xffff) num_loops--; mod_phy_reg(pi, 0x640, (0xffff << 0), num_loops << 0); mod_phy_reg(pi, 0x641, (0xffff << 0), wait << 0); if (iqcalmode) { and_phy_reg(pi, 0x453, (u16) ~(0x1 << 15)); or_phy_reg(pi, 0x453, (0x1 << 15)); } else { write_phy_reg(pi, 0x63f, 1); wlc_lcnphy_tx_pu(pi, 1); } or_radio_reg(pi, RADIO_2064_REG112, 0x6); } void wlc_lcnphy_deaf_mode(phy_info_t *pi, bool mode) { u8 phybw40; phybw40 = CHSPEC_IS40(pi->radio_chanspec); if (LCNREV_LT(pi->pubpi.phy_rev, 2)) { mod_phy_reg(pi, 0x4b0, (0x1 << 5), (mode) << 5); mod_phy_reg(pi, 0x4b1, (0x1 << 9), 0 << 9); } else { mod_phy_reg(pi, 0x4b0, (0x1 << 5), (mode) << 5); mod_phy_reg(pi, 0x4b1, (0x1 << 9), 0 << 9); } if (phybw40 == 0) { mod_phy_reg((pi), 0x410, (0x1 << 6) | (0x1 << 5), ((CHSPEC_IS2G(pi->radio_chanspec)) ? (!mode) : 0) << 6 | (!mode) << 5); mod_phy_reg(pi, 0x410, (0x1 << 7), (mode) << 7); } } void wlc_lcnphy_start_tx_tone(phy_info_t *pi, s32 f_kHz, u16 max_val, bool iqcalmode) { u8 phy_bw; u16 num_samps, t, k; u32 bw; fixed theta = 0, rot = 0; cs32 tone_samp; u32 data_buf[64]; u16 i_samp, q_samp; phytbl_info_t tab; phy_info_lcnphy_t *pi_lcn = pi->u.pi_lcnphy; pi->phy_tx_tone_freq = f_kHz; wlc_lcnphy_deaf_mode(pi, true); phy_bw = 40; if (pi_lcn->lcnphy_spurmod) { write_phy_reg(pi, 0x942, 0x2); write_phy_reg(pi, 0x93b, 0x0); write_phy_reg(pi, 0x93c, 0x0); wlc_lcnphy_txrx_spur_avoidance_mode(pi, false); } if (f_kHz) { k = 1; do { bw = phy_bw * 1000 * k; num_samps = bw / ABS(f_kHz); k++; } while ((num_samps * (u32) (ABS(f_kHz))) != bw); } else num_samps = 2; rot = FIXED((f_kHz * 36) / phy_bw) / 100; theta = 0; for (t = 0; t < num_samps; t++) { wlc_phy_cordic(theta, &tone_samp); theta += rot; i_samp = (u16) (FLOAT(tone_samp.i * max_val) & 0x3ff); q_samp = (u16) (FLOAT(tone_samp.q * max_val) & 0x3ff); data_buf[t] = (i_samp << 10) | q_samp; } mod_phy_reg(pi, 0x6d6, (0x3 << 0), 0 << 0); mod_phy_reg(pi, 0x6da, (0x1 << 3), 1 << 3); tab.tbl_ptr = data_buf; tab.tbl_len = num_samps; tab.tbl_id = LCNPHY_TBL_ID_SAMPLEPLAY; tab.tbl_offset = 0; tab.tbl_width = 32; wlc_lcnphy_write_table(pi, &tab); wlc_lcnphy_run_samples(pi, num_samps, 0xffff, 0, iqcalmode); } void wlc_lcnphy_stop_tx_tone(phy_info_t *pi) { s16 playback_status; phy_info_lcnphy_t *pi_lcn = pi->u.pi_lcnphy; pi->phy_tx_tone_freq = 0; if (pi_lcn->lcnphy_spurmod) { write_phy_reg(pi, 0x942, 0x7); write_phy_reg(pi, 0x93b, 0x2017); write_phy_reg(pi, 0x93c, 0x27c5); wlc_lcnphy_txrx_spur_avoidance_mode(pi, true); } playback_status = read_phy_reg(pi, 0x644); if (playback_status & (0x1 << 0)) { wlc_lcnphy_tx_pu(pi, 0); mod_phy_reg(pi, 0x63f, (0x1 << 1), 1 << 1); } else if (playback_status & (0x1 << 1)) mod_phy_reg(pi, 0x453, (0x1 << 15), 0 << 15); mod_phy_reg(pi, 0x6d6, (0x3 << 0), 1 << 0); mod_phy_reg(pi, 0x6da, (0x1 << 3), 0 << 3); mod_phy_reg(pi, 0x6da, (0x1 << 7), 0 << 7); and_radio_reg(pi, RADIO_2064_REG112, 0xFFF9); wlc_lcnphy_deaf_mode(pi, false); } static void wlc_lcnphy_clear_trsw_override(phy_info_t *pi) { and_phy_reg(pi, 0x44c, (u16) ~((0x1 << 1) | (0x1 << 0))); } void wlc_lcnphy_get_tx_iqcc(phy_info_t *pi, u16 *a, u16 *b) { u16 iqcc[2]; phytbl_info_t tab; tab.tbl_ptr = iqcc; tab.tbl_len = 2; tab.tbl_id = 0; tab.tbl_offset = 80; tab.tbl_width = 16; wlc_lcnphy_read_table(pi, &tab); *a = iqcc[0]; *b = iqcc[1]; } u16 wlc_lcnphy_get_tx_locc(phy_info_t *pi) { phytbl_info_t tab; u16 didq; tab.tbl_id = 0; tab.tbl_width = 16; tab.tbl_ptr = &didq; tab.tbl_len = 1; tab.tbl_offset = 85; wlc_lcnphy_read_table(pi, &tab); return didq; } static void wlc_lcnphy_txpwrtbl_iqlo_cal(phy_info_t *pi) { lcnphy_txgains_t target_gains, old_gains; u8 save_bb_mult; u16 a, b, didq, save_pa_gain = 0; uint idx, SAVE_txpwrindex = 0xFF; u32 val; u16 SAVE_txpwrctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi); phytbl_info_t tab; u8 ei0, eq0, fi0, fq0; phy_info_lcnphy_t *pi_lcn = pi->u.pi_lcnphy; wlc_lcnphy_get_tx_gain(pi, &old_gains); save_pa_gain = wlc_lcnphy_get_pa_gain(pi); save_bb_mult = wlc_lcnphy_get_bbmult(pi); if (SAVE_txpwrctrl == LCNPHY_TX_PWR_CTRL_OFF) SAVE_txpwrindex = wlc_lcnphy_get_current_tx_pwr_idx(pi); wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF); target_gains.gm_gain = 7; target_gains.pga_gain = 0; target_gains.pad_gain = 21; target_gains.dac_gain = 0; wlc_lcnphy_set_tx_gain(pi, &target_gains); wlc_lcnphy_set_tx_pwr_by_index(pi, 16); if (LCNREV_IS(pi->pubpi.phy_rev, 1) || pi_lcn->lcnphy_hw_iqcal_en) { wlc_lcnphy_set_tx_pwr_by_index(pi, 30); wlc_lcnphy_tx_iqlo_cal(pi, &target_gains, (pi_lcn-> lcnphy_recal ? LCNPHY_CAL_RECAL : LCNPHY_CAL_FULL), false); } else { wlc_lcnphy_tx_iqlo_soft_cal_full(pi); } wlc_lcnphy_get_radio_loft(pi, &ei0, &eq0, &fi0, &fq0); if ((ABS((s8) fi0) == 15) && (ABS((s8) fq0) == 15)) { if (CHSPEC_IS5G(pi->radio_chanspec)) { target_gains.gm_gain = 255; target_gains.pga_gain = 255; target_gains.pad_gain = 0xf0; target_gains.dac_gain = 0; } else { target_gains.gm_gain = 7; target_gains.pga_gain = 45; target_gains.pad_gain = 186; target_gains.dac_gain = 0; } if (LCNREV_IS(pi->pubpi.phy_rev, 1) || pi_lcn->lcnphy_hw_iqcal_en) { target_gains.pga_gain = 0; target_gains.pad_gain = 30; wlc_lcnphy_set_tx_pwr_by_index(pi, 16); wlc_lcnphy_tx_iqlo_cal(pi, &target_gains, LCNPHY_CAL_FULL, false); } else { wlc_lcnphy_tx_iqlo_soft_cal_full(pi); } } wlc_lcnphy_get_tx_iqcc(pi, &a, &b); didq = wlc_lcnphy_get_tx_locc(pi); tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL; tab.tbl_width = 32; tab.tbl_ptr = &val; tab.tbl_len = 1; tab.tbl_offset = LCNPHY_TX_PWR_CTRL_RATE_OFFSET; for (idx = 0; idx < 128; idx++) { tab.tbl_offset = LCNPHY_TX_PWR_CTRL_IQ_OFFSET + idx; wlc_lcnphy_read_table(pi, &tab); val = (val & 0xfff00000) | ((u32) (a & 0x3FF) << 10) | (b & 0x3ff); wlc_lcnphy_write_table(pi, &tab); val = didq; tab.tbl_offset = LCNPHY_TX_PWR_CTRL_LO_OFFSET + idx; wlc_lcnphy_write_table(pi, &tab); } pi_lcn->lcnphy_cal_results.txiqlocal_a = a; pi_lcn->lcnphy_cal_results.txiqlocal_b = b; pi_lcn->lcnphy_cal_results.txiqlocal_didq = didq; pi_lcn->lcnphy_cal_results.txiqlocal_ei0 = ei0; pi_lcn->lcnphy_cal_results.txiqlocal_eq0 = eq0; pi_lcn->lcnphy_cal_results.txiqlocal_fi0 = fi0; pi_lcn->lcnphy_cal_results.txiqlocal_fq0 = fq0; wlc_lcnphy_set_bbmult(pi, save_bb_mult); wlc_lcnphy_set_pa_gain(pi, save_pa_gain); wlc_lcnphy_set_tx_gain(pi, &old_gains); if (SAVE_txpwrctrl != LCNPHY_TX_PWR_CTRL_OFF) wlc_lcnphy_set_tx_pwr_ctrl(pi, SAVE_txpwrctrl); else wlc_lcnphy_set_tx_pwr_by_index(pi, SAVE_txpwrindex); } s16 wlc_lcnphy_tempsense_new(phy_info_t *pi, bool mode) { u16 tempsenseval1, tempsenseval2; s16 avg = 0; bool suspend = 0; if (NORADIO_ENAB(pi->pubpi)) return -1; if (mode == 1) { suspend = (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC)); if (!suspend) wlapi_suspend_mac_and_wait(pi->sh->physhim); wlc_lcnphy_vbat_temp_sense_setup(pi, TEMPSENSE); } tempsenseval1 = read_phy_reg(pi, 0x476) & 0x1FF; tempsenseval2 = read_phy_reg(pi, 0x477) & 0x1FF; if (tempsenseval1 > 255) avg = (s16) (tempsenseval1 - 512); else avg = (s16) tempsenseval1; if (tempsenseval2 > 255) avg += (s16) (tempsenseval2 - 512); else avg += (s16) tempsenseval2; avg /= 2; if (mode == 1) { mod_phy_reg(pi, 0x448, (0x1 << 14), (1) << 14); udelay(100); mod_phy_reg(pi, 0x448, (0x1 << 14), (0) << 14); if (!suspend) wlapi_enable_mac(pi->sh->physhim); } return avg; } u16 wlc_lcnphy_tempsense(phy_info_t *pi, bool mode) { u16 tempsenseval1, tempsenseval2; s32 avg = 0; bool suspend = 0; u16 SAVE_txpwrctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi); phy_info_lcnphy_t *pi_lcn = pi->u.pi_lcnphy; if (NORADIO_ENAB(pi->pubpi)) return -1; if (mode == 1) { suspend = (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC)); if (!suspend) wlapi_suspend_mac_and_wait(pi->sh->physhim); wlc_lcnphy_vbat_temp_sense_setup(pi, TEMPSENSE); } tempsenseval1 = read_phy_reg(pi, 0x476) & 0x1FF; tempsenseval2 = read_phy_reg(pi, 0x477) & 0x1FF; if (tempsenseval1 > 255) avg = (int)(tempsenseval1 - 512); else avg = (int)tempsenseval1; if (pi_lcn->lcnphy_tempsense_option == 1 || pi->hwpwrctrl_capable) { if (tempsenseval2 > 255) avg = (int)(avg - tempsenseval2 + 512); else avg = (int)(avg - tempsenseval2); } else { if (tempsenseval2 > 255) avg = (int)(avg + tempsenseval2 - 512); else avg = (int)(avg + tempsenseval2); avg = avg / 2; } if (avg < 0) avg = avg + 512; if (pi_lcn->lcnphy_tempsense_option == 2) avg = tempsenseval1; if (mode) wlc_lcnphy_set_tx_pwr_ctrl(pi, SAVE_txpwrctrl); if (mode == 1) { mod_phy_reg(pi, 0x448, (0x1 << 14), (1) << 14); udelay(100); mod_phy_reg(pi, 0x448, (0x1 << 14), (0) << 14); if (!suspend) wlapi_enable_mac(pi->sh->physhim); } return (u16) avg; } s8 wlc_lcnphy_tempsense_degree(phy_info_t *pi, bool mode) { s32 degree = wlc_lcnphy_tempsense_new(pi, mode); degree = ((degree << 10) + LCN_TEMPSENSE_OFFSET + (LCN_TEMPSENSE_DEN >> 1)) / LCN_TEMPSENSE_DEN; return (s8) degree; } s8 wlc_lcnphy_vbatsense(phy_info_t *pi, bool mode) { u16 vbatsenseval; s32 avg = 0; bool suspend = 0; if (NORADIO_ENAB(pi->pubpi)) return -1; if (mode == 1) { suspend = (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC)); if (!suspend) wlapi_suspend_mac_and_wait(pi->sh->physhim); wlc_lcnphy_vbat_temp_sense_setup(pi, VBATSENSE); } vbatsenseval = read_phy_reg(pi, 0x475) & 0x1FF; if (vbatsenseval > 255) avg = (s32) (vbatsenseval - 512); else avg = (s32) vbatsenseval; avg = (avg * LCN_VBAT_SCALE_NOM + (LCN_VBAT_SCALE_DEN >> 1)) / LCN_VBAT_SCALE_DEN; if (mode == 1) { if (!suspend) wlapi_enable_mac(pi->sh->physhim); } return (s8) avg; } static void wlc_lcnphy_afe_clk_init(phy_info_t *pi, u8 mode) { u8 phybw40; phybw40 = CHSPEC_IS40(pi->radio_chanspec); mod_phy_reg(pi, 0x6d1, (0x1 << 7), (1) << 7); if (((mode == AFE_CLK_INIT_MODE_PAPD) && (phybw40 == 0)) || (mode == AFE_CLK_INIT_MODE_TXRX2X)) write_phy_reg(pi, 0x6d0, 0x7); wlc_lcnphy_toggle_afe_pwdn(pi); } static bool wlc_lcnphy_rx_iq_est(phy_info_t *pi, u16 num_samps, u8 wait_time, lcnphy_iq_est_t *iq_est) { int wait_count = 0; bool result = true; u8 phybw40; phybw40 = CHSPEC_IS40(pi->radio_chanspec); mod_phy_reg(pi, 0x6da, (0x1 << 5), (1) << 5); mod_phy_reg(pi, 0x410, (0x1 << 3), (0) << 3); mod_phy_reg(pi, 0x482, (0xffff << 0), (num_samps) << 0); mod_phy_reg(pi, 0x481, (0xff << 0), ((u16) wait_time) << 0); mod_phy_reg(pi, 0x481, (0x1 << 8), (0) << 8); mod_phy_reg(pi, 0x481, (0x1 << 9), (1) << 9); while (read_phy_reg(pi, 0x481) & (0x1 << 9)) { if (wait_count > (10 * 500)) { result = false; goto cleanup; } udelay(100); wait_count++; } iq_est->iq_prod = ((u32) read_phy_reg(pi, 0x483) << 16) | (u32) read_phy_reg(pi, 0x484); iq_est->i_pwr = ((u32) read_phy_reg(pi, 0x485) << 16) | (u32) read_phy_reg(pi, 0x486); iq_est->q_pwr = ((u32) read_phy_reg(pi, 0x487) << 16) | (u32) read_phy_reg(pi, 0x488); cleanup: mod_phy_reg(pi, 0x410, (0x1 << 3), (1) << 3); mod_phy_reg(pi, 0x6da, (0x1 << 5), (0) << 5); return result; } static bool wlc_lcnphy_calc_rx_iq_comp(phy_info_t *pi, u16 num_samps) { #define LCNPHY_MIN_RXIQ_PWR 2 bool result; u16 a0_new, b0_new; lcnphy_iq_est_t iq_est = { 0, 0, 0 }; s32 a, b, temp; s16 iq_nbits, qq_nbits, arsh, brsh; s32 iq; u32 ii, qq; phy_info_lcnphy_t *pi_lcn = pi->u.pi_lcnphy; a0_new = ((read_phy_reg(pi, 0x645) & (0x3ff << 0)) >> 0); b0_new = ((read_phy_reg(pi, 0x646) & (0x3ff << 0)) >> 0); mod_phy_reg(pi, 0x6d1, (0x1 << 2), (0) << 2); mod_phy_reg(pi, 0x64b, (0x1 << 6), (1) << 6); wlc_lcnphy_set_rx_iq_comp(pi, 0, 0); result = wlc_lcnphy_rx_iq_est(pi, num_samps, 32, &iq_est); if (!result) goto cleanup; iq = (s32) iq_est.iq_prod; ii = iq_est.i_pwr; qq = iq_est.q_pwr; if ((ii + qq) < LCNPHY_MIN_RXIQ_PWR) { result = false; goto cleanup; } iq_nbits = wlc_phy_nbits(iq); qq_nbits = wlc_phy_nbits(qq); arsh = 10 - (30 - iq_nbits); if (arsh >= 0) { a = (-(iq << (30 - iq_nbits)) + (ii >> (1 + arsh))); temp = (s32) (ii >> arsh); if (temp == 0) { return false; } } else { a = (-(iq << (30 - iq_nbits)) + (ii << (-1 - arsh))); temp = (s32) (ii << -arsh); if (temp == 0) { return false; } } a /= temp; brsh = qq_nbits - 31 + 20; if (brsh >= 0) { b = (qq << (31 - qq_nbits)); temp = (s32) (ii >> brsh); if (temp == 0) { return false; } } else { b = (qq << (31 - qq_nbits)); temp = (s32) (ii << -brsh); if (temp == 0) { return false; } } b /= temp; b -= a * a; b = (s32) int_sqrt((unsigned long) b); b -= (1 << 10); a0_new = (u16) (a & 0x3ff); b0_new = (u16) (b & 0x3ff); cleanup: wlc_lcnphy_set_rx_iq_comp(pi, a0_new, b0_new); mod_phy_reg(pi, 0x64b, (0x1 << 0), (1) << 0); mod_phy_reg(pi, 0x64b, (0x1 << 3), (1) << 3); pi_lcn->lcnphy_cal_results.rxiqcal_coeff_a0 = a0_new; pi_lcn->lcnphy_cal_results.rxiqcal_coeff_b0 = b0_new; return result; } static bool wlc_lcnphy_rx_iq_cal(phy_info_t *pi, const lcnphy_rx_iqcomp_t *iqcomp, int iqcomp_sz, bool tx_switch, bool rx_switch, int module, int tx_gain_idx) { lcnphy_txgains_t old_gains; u16 tx_pwr_ctrl; u8 tx_gain_index_old = 0; bool result = false, tx_gain_override_old = false; u16 i, Core1TxControl_old, RFOverride0_old, RFOverrideVal0_old, rfoverride2_old, rfoverride2val_old, rfoverride3_old, rfoverride3val_old, rfoverride4_old, rfoverride4val_old, afectrlovr_old, afectrlovrval_old; int tia_gain; u32 received_power, rx_pwr_threshold; u16 old_sslpnCalibClkEnCtrl, old_sslpnRxFeClkEnCtrl; u16 values_to_save[11]; s16 *ptr; phy_info_lcnphy_t *pi_lcn = pi->u.pi_lcnphy; ptr = kmalloc(sizeof(s16) * 131, GFP_ATOMIC); if (NULL == ptr) { return false; } if (module == 2) { while (iqcomp_sz--) { if (iqcomp[iqcomp_sz].chan == CHSPEC_CHANNEL(pi->radio_chanspec)) { wlc_lcnphy_set_rx_iq_comp(pi, (u16) iqcomp[iqcomp_sz].a, (u16) iqcomp[iqcomp_sz].b); result = true; break; } } goto cal_done; } if (module == 1) { tx_pwr_ctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi); wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF); for (i = 0; i < 11; i++) { values_to_save[i] = read_radio_reg(pi, rxiq_cal_rf_reg[i]); } Core1TxControl_old = read_phy_reg(pi, 0x631); or_phy_reg(pi, 0x631, 0x0015); RFOverride0_old = read_phy_reg(pi, 0x44c); RFOverrideVal0_old = read_phy_reg(pi, 0x44d); rfoverride2_old = read_phy_reg(pi, 0x4b0); rfoverride2val_old = read_phy_reg(pi, 0x4b1); rfoverride3_old = read_phy_reg(pi, 0x4f9); rfoverride3val_old = read_phy_reg(pi, 0x4fa); rfoverride4_old = read_phy_reg(pi, 0x938); rfoverride4val_old = read_phy_reg(pi, 0x939); afectrlovr_old = read_phy_reg(pi, 0x43b); afectrlovrval_old = read_phy_reg(pi, 0x43c); old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da); old_sslpnRxFeClkEnCtrl = read_phy_reg(pi, 0x6db); tx_gain_override_old = wlc_lcnphy_tx_gain_override_enabled(pi); if (tx_gain_override_old) { wlc_lcnphy_get_tx_gain(pi, &old_gains); tx_gain_index_old = pi_lcn->lcnphy_current_index; } wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_idx); mod_phy_reg(pi, 0x4f9, (0x1 << 0), 1 << 0); mod_phy_reg(pi, 0x4fa, (0x1 << 0), 0 << 0); mod_phy_reg(pi, 0x43b, (0x1 << 1), 1 << 1); mod_phy_reg(pi, 0x43c, (0x1 << 1), 0 << 1); write_radio_reg(pi, RADIO_2064_REG116, 0x06); write_radio_reg(pi, RADIO_2064_REG12C, 0x07); write_radio_reg(pi, RADIO_2064_REG06A, 0xd3); write_radio_reg(pi, RADIO_2064_REG098, 0x03); write_radio_reg(pi, RADIO_2064_REG00B, 0x7); mod_radio_reg(pi, RADIO_2064_REG113, 1 << 4, 1 << 4); write_radio_reg(pi, RADIO_2064_REG01D, 0x01); write_radio_reg(pi, RADIO_2064_REG114, 0x01); write_radio_reg(pi, RADIO_2064_REG02E, 0x10); write_radio_reg(pi, RADIO_2064_REG12A, 0x08); mod_phy_reg(pi, 0x938, (0x1 << 0), 1 << 0); mod_phy_reg(pi, 0x939, (0x1 << 0), 0 << 0); mod_phy_reg(pi, 0x938, (0x1 << 1), 1 << 1); mod_phy_reg(pi, 0x939, (0x1 << 1), 1 << 1); mod_phy_reg(pi, 0x938, (0x1 << 2), 1 << 2); mod_phy_reg(pi, 0x939, (0x1 << 2), 1 << 2); mod_phy_reg(pi, 0x938, (0x1 << 3), 1 << 3); mod_phy_reg(pi, 0x939, (0x1 << 3), 1 << 3); mod_phy_reg(pi, 0x938, (0x1 << 5), 1 << 5); mod_phy_reg(pi, 0x939, (0x1 << 5), 0 << 5); mod_phy_reg(pi, 0x43b, (0x1 << 0), 1 << 0); mod_phy_reg(pi, 0x43c, (0x1 << 0), 0 << 0); wlc_lcnphy_start_tx_tone(pi, 2000, 120, 0); write_phy_reg(pi, 0x6da, 0xffff); or_phy_reg(pi, 0x6db, 0x3); wlc_lcnphy_set_trsw_override(pi, tx_switch, rx_switch); wlc_lcnphy_rx_gain_override_enable(pi, true); tia_gain = 8; rx_pwr_threshold = 950; while (tia_gain > 0) { tia_gain -= 1; wlc_lcnphy_set_rx_gain_by_distribution(pi, 0, 0, 2, 2, (u16) tia_gain, 1, 0); udelay(500); received_power = wlc_lcnphy_measure_digital_power(pi, 2000); if (received_power < rx_pwr_threshold) break; } result = wlc_lcnphy_calc_rx_iq_comp(pi, 0xffff); wlc_lcnphy_stop_tx_tone(pi); write_phy_reg(pi, 0x631, Core1TxControl_old); write_phy_reg(pi, 0x44c, RFOverrideVal0_old); write_phy_reg(pi, 0x44d, RFOverrideVal0_old); write_phy_reg(pi, 0x4b0, rfoverride2_old); write_phy_reg(pi, 0x4b1, rfoverride2val_old); write_phy_reg(pi, 0x4f9, rfoverride3_old); write_phy_reg(pi, 0x4fa, rfoverride3val_old); write_phy_reg(pi, 0x938, rfoverride4_old); write_phy_reg(pi, 0x939, rfoverride4val_old); write_phy_reg(pi, 0x43b, afectrlovr_old); write_phy_reg(pi, 0x43c, afectrlovrval_old); write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl); write_phy_reg(pi, 0x6db, old_sslpnRxFeClkEnCtrl); wlc_lcnphy_clear_trsw_override(pi); mod_phy_reg(pi, 0x44c, (0x1 << 2), 0 << 2); for (i = 0; i < 11; i++) { write_radio_reg(pi, rxiq_cal_rf_reg[i], values_to_save[i]); } if (tx_gain_override_old) { wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_index_old); } else wlc_lcnphy_disable_tx_gain_override(pi); wlc_lcnphy_set_tx_pwr_ctrl(pi, tx_pwr_ctrl); wlc_lcnphy_rx_gain_override_enable(pi, false); } cal_done: kfree(ptr); return result; } static void wlc_lcnphy_temp_adj(phy_info_t *pi) { if (NORADIO_ENAB(pi->pubpi)) return; } static void wlc_lcnphy_glacial_timer_based_cal(phy_info_t *pi) { bool suspend; s8 index; u16 SAVE_pwrctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi); phy_info_lcnphy_t *pi_lcn = pi->u.pi_lcnphy; suspend = (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC)); if (!suspend) wlapi_suspend_mac_and_wait(pi->sh->physhim); wlc_lcnphy_deaf_mode(pi, true); pi->phy_lastcal = pi->sh->now; pi->phy_forcecal = false; index = pi_lcn->lcnphy_current_index; wlc_lcnphy_txpwrtbl_iqlo_cal(pi); wlc_lcnphy_set_tx_pwr_by_index(pi, index); wlc_lcnphy_set_tx_pwr_ctrl(pi, SAVE_pwrctrl); wlc_lcnphy_deaf_mode(pi, false); if (!suspend) wlapi_enable_mac(pi->sh->physhim); } static void wlc_lcnphy_periodic_cal(phy_info_t *pi) { bool suspend, full_cal; const lcnphy_rx_iqcomp_t *rx_iqcomp; int rx_iqcomp_sz; u16 SAVE_pwrctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi); s8 index; phytbl_info_t tab; s32 a1, b0, b1; s32 tssi, pwr, maxtargetpwr, mintargetpwr; phy_info_lcnphy_t *pi_lcn = pi->u.pi_lcnphy; if (NORADIO_ENAB(pi->pubpi)) return; pi->phy_lastcal = pi->sh->now; pi->phy_forcecal = false; full_cal = (pi_lcn->lcnphy_full_cal_channel != CHSPEC_CHANNEL(pi->radio_chanspec)); pi_lcn->lcnphy_full_cal_channel = CHSPEC_CHANNEL(pi->radio_chanspec); index = pi_lcn->lcnphy_current_index; suspend = (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC)); if (!suspend) { wlapi_bmac_write_shm(pi->sh->physhim, M_CTS_DURATION, 10000); wlapi_suspend_mac_and_wait(pi->sh->physhim); } wlc_lcnphy_deaf_mode(pi, true); wlc_lcnphy_txpwrtbl_iqlo_cal(pi); rx_iqcomp = lcnphy_rx_iqcomp_table_rev0; rx_iqcomp_sz = ARRAY_SIZE(lcnphy_rx_iqcomp_table_rev0); if (LCNREV_IS(pi->pubpi.phy_rev, 1)) wlc_lcnphy_rx_iq_cal(pi, NULL, 0, true, false, 1, 40); else wlc_lcnphy_rx_iq_cal(pi, NULL, 0, true, false, 1, 127); if (wlc_lcnphy_tssi_based_pwr_ctrl_enabled(pi)) { wlc_lcnphy_idle_tssi_est((wlc_phy_t *) pi); b0 = pi->txpa_2g[0]; b1 = pi->txpa_2g[1]; a1 = pi->txpa_2g[2]; maxtargetpwr = wlc_lcnphy_tssi2dbm(10, a1, b0, b1); mintargetpwr = wlc_lcnphy_tssi2dbm(125, a1, b0, b1); tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL; tab.tbl_width = 32; tab.tbl_ptr = &pwr; tab.tbl_len = 1; tab.tbl_offset = 0; for (tssi = 0; tssi < 128; tssi++) { pwr = wlc_lcnphy_tssi2dbm(tssi, a1, b0, b1); pwr = (pwr < mintargetpwr) ? mintargetpwr : pwr; wlc_lcnphy_write_table(pi, &tab); tab.tbl_offset++; } } wlc_lcnphy_set_tx_pwr_by_index(pi, index); wlc_lcnphy_set_tx_pwr_ctrl(pi, SAVE_pwrctrl); wlc_lcnphy_deaf_mode(pi, false); if (!suspend) wlapi_enable_mac(pi->sh->physhim); } void wlc_lcnphy_calib_modes(phy_info_t *pi, uint mode) { u16 temp_new; int temp1, temp2, temp_diff; phy_info_lcnphy_t *pi_lcn = pi->u.pi_lcnphy; switch (mode) { case PHY_PERICAL_CHAN: break; case PHY_FULLCAL: wlc_lcnphy_periodic_cal(pi); break; case PHY_PERICAL_PHYINIT: wlc_lcnphy_periodic_cal(pi); break; case PHY_PERICAL_WATCHDOG: if (wlc_lcnphy_tempsense_based_pwr_ctrl_enabled(pi)) { temp_new = wlc_lcnphy_tempsense(pi, 0); temp1 = LCNPHY_TEMPSENSE(temp_new); temp2 = LCNPHY_TEMPSENSE(pi_lcn->lcnphy_cal_temper); temp_diff = temp1 - temp2; if ((pi_lcn->lcnphy_cal_counter > 90) || (temp_diff > 60) || (temp_diff < -60)) { wlc_lcnphy_glacial_timer_based_cal(pi); wlc_2064_vco_cal(pi); pi_lcn->lcnphy_cal_temper = temp_new; pi_lcn->lcnphy_cal_counter = 0; } else pi_lcn->lcnphy_cal_counter++; } break; case LCNPHY_PERICAL_TEMPBASED_TXPWRCTRL: if (wlc_lcnphy_tempsense_based_pwr_ctrl_enabled(pi)) wlc_lcnphy_tx_power_adjustment((wlc_phy_t *) pi); break; } } void wlc_lcnphy_get_tssi(phy_info_t *pi, s8 *ofdm_pwr, s8 *cck_pwr) { s8 cck_offset; u16 status; status = (read_phy_reg(pi, 0x4ab)); if (wlc_lcnphy_tssi_based_pwr_ctrl_enabled(pi) && (status & (0x1 << 15))) { *ofdm_pwr = (s8) (((read_phy_reg(pi, 0x4ab) & (0x1ff << 0)) >> 0) >> 1); if (wlc_phy_tpc_isenabled_lcnphy(pi)) cck_offset = pi->tx_power_offset[TXP_FIRST_CCK]; else cck_offset = 0; *cck_pwr = *ofdm_pwr + cck_offset; } else { *cck_pwr = 0; *ofdm_pwr = 0; } } void WLBANDINITFN(wlc_phy_cal_init_lcnphy) (phy_info_t *pi) { return; } static void wlc_lcnphy_set_chanspec_tweaks(phy_info_t *pi, chanspec_t chanspec) { u8 channel = CHSPEC_CHANNEL(chanspec); phy_info_lcnphy_t *pi_lcn = pi->u.pi_lcnphy; if (NORADIO_ENAB(pi->pubpi)) return; if (channel == 14) { mod_phy_reg(pi, 0x448, (0x3 << 8), (2) << 8); } else { mod_phy_reg(pi, 0x448, (0x3 << 8), (1) << 8); } pi_lcn->lcnphy_bandedge_corr = 2; if (channel == 1) pi_lcn->lcnphy_bandedge_corr = 4; if (channel == 1 || channel == 2 || channel == 3 || channel == 4 || channel == 9 || channel == 10 || channel == 11 || channel == 12) { si_pmu_pllcontrol(pi->sh->sih, 0x2, 0xffffffff, 0x03000c04); si_pmu_pllcontrol(pi->sh->sih, 0x3, 0xffffff, 0x0); si_pmu_pllcontrol(pi->sh->sih, 0x4, 0xffffffff, 0x200005c0); si_pmu_pllupd(pi->sh->sih); write_phy_reg(pi, 0x942, 0); wlc_lcnphy_txrx_spur_avoidance_mode(pi, false); pi_lcn->lcnphy_spurmod = 0; mod_phy_reg(pi, 0x424, (0xff << 8), (0x1b) << 8); write_phy_reg(pi, 0x425, 0x5907); } else { si_pmu_pllcontrol(pi->sh->sih, 0x2, 0xffffffff, 0x03140c04); si_pmu_pllcontrol(pi->sh->sih, 0x3, 0xffffff, 0x333333); si_pmu_pllcontrol(pi->sh->sih, 0x4, 0xffffffff, 0x202c2820); si_pmu_pllupd(pi->sh->sih); write_phy_reg(pi, 0x942, 0); wlc_lcnphy_txrx_spur_avoidance_mode(pi, true); pi_lcn->lcnphy_spurmod = 0; mod_phy_reg(pi, 0x424, (0xff << 8), (0x1f) << 8); write_phy_reg(pi, 0x425, 0x590a); } or_phy_reg(pi, 0x44a, 0x44); write_phy_reg(pi, 0x44a, 0x80); } void wlc_lcnphy_tx_power_adjustment(wlc_phy_t *ppi) { s8 index; u16 index2; phy_info_t *pi = (phy_info_t *) ppi; phy_info_lcnphy_t *pi_lcn = pi->u.pi_lcnphy; u16 SAVE_txpwrctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi); if (wlc_lcnphy_tempsense_based_pwr_ctrl_enabled(pi) && SAVE_txpwrctrl) { index = wlc_lcnphy_tempcompensated_txpwrctrl(pi); index2 = (u16) (index * 2); mod_phy_reg(pi, 0x4a9, (0x1ff << 0), (index2) << 0); pi_lcn->lcnphy_current_index = (s8) ((read_phy_reg(pi, 0x4a9) & 0xFF) / 2); } } static void wlc_lcnphy_set_rx_iq_comp(phy_info_t *pi, u16 a, u16 b) { mod_phy_reg(pi, 0x645, (0x3ff << 0), (a) << 0); mod_phy_reg(pi, 0x646, (0x3ff << 0), (b) << 0); mod_phy_reg(pi, 0x647, (0x3ff << 0), (a) << 0); mod_phy_reg(pi, 0x648, (0x3ff << 0), (b) << 0); mod_phy_reg(pi, 0x649, (0x3ff << 0), (a) << 0); mod_phy_reg(pi, 0x64a, (0x3ff << 0), (b) << 0); } void WLBANDINITFN(wlc_phy_init_lcnphy) (phy_info_t *pi) { u8 phybw40; phy_info_lcnphy_t *pi_lcn = pi->u.pi_lcnphy; phybw40 = CHSPEC_IS40(pi->radio_chanspec); pi_lcn->lcnphy_cal_counter = 0; pi_lcn->lcnphy_cal_temper = pi_lcn->lcnphy_rawtempsense; or_phy_reg(pi, 0x44a, 0x80); and_phy_reg(pi, 0x44a, 0x7f); wlc_lcnphy_afe_clk_init(pi, AFE_CLK_INIT_MODE_TXRX2X); write_phy_reg(pi, 0x60a, 160); write_phy_reg(pi, 0x46a, 25); wlc_lcnphy_baseband_init(pi); wlc_lcnphy_radio_init(pi); if (CHSPEC_IS2G(pi->radio_chanspec)) wlc_lcnphy_tx_pwr_ctrl_init((wlc_phy_t *) pi); wlc_phy_chanspec_set((wlc_phy_t *) pi, pi->radio_chanspec); si_pmu_regcontrol(pi->sh->sih, 0, 0xf, 0x9); si_pmu_chipcontrol(pi->sh->sih, 0, 0xffffffff, 0x03CDDDDD); if ((pi->sh->boardflags & BFL_FEM) && wlc_lcnphy_tempsense_based_pwr_ctrl_enabled(pi)) wlc_lcnphy_set_tx_pwr_by_index(pi, FIXED_TXPWR); wlc_lcnphy_agc_temp_init(pi); wlc_lcnphy_temp_adj(pi); mod_phy_reg(pi, 0x448, (0x1 << 14), (1) << 14); udelay(100); mod_phy_reg(pi, 0x448, (0x1 << 14), (0) << 14); wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_HW); pi_lcn->lcnphy_noise_samples = LCNPHY_NOISE_SAMPLES_DEFAULT; wlc_lcnphy_calib_modes(pi, PHY_PERICAL_PHYINIT); } static void wlc_lcnphy_tx_iqlo_loopback(phy_info_t *pi, u16 *values_to_save) { u16 vmid; int i; for (i = 0; i < 20; i++) { values_to_save[i] = read_radio_reg(pi, iqlo_loopback_rf_regs[i]); } mod_phy_reg(pi, 0x44c, (0x1 << 12), 1 << 12); mod_phy_reg(pi, 0x44d, (0x1 << 14), 1 << 14); mod_phy_reg(pi, 0x44c, (0x1 << 11), 1 << 11); mod_phy_reg(pi, 0x44d, (0x1 << 13), 0 << 13); mod_phy_reg(pi, 0x43b, (0x1 << 1), 1 << 1); mod_phy_reg(pi, 0x43c, (0x1 << 1), 0 << 1); mod_phy_reg(pi, 0x43b, (0x1 << 0), 1 << 0); mod_phy_reg(pi, 0x43c, (0x1 << 0), 0 << 0); if (LCNREV_IS(pi->pubpi.phy_rev, 2)) and_radio_reg(pi, RADIO_2064_REG03A, 0xFD); else and_radio_reg(pi, RADIO_2064_REG03A, 0xF9); or_radio_reg(pi, RADIO_2064_REG11A, 0x1); or_radio_reg(pi, RADIO_2064_REG036, 0x01); or_radio_reg(pi, RADIO_2064_REG11A, 0x18); udelay(20); if (LCNREV_IS(pi->pubpi.phy_rev, 2)) { if (CHSPEC_IS5G(pi->radio_chanspec)) mod_radio_reg(pi, RADIO_2064_REG03A, 1, 0); else or_radio_reg(pi, RADIO_2064_REG03A, 1); } else { if (CHSPEC_IS5G(pi->radio_chanspec)) mod_radio_reg(pi, RADIO_2064_REG03A, 3, 1); else or_radio_reg(pi, RADIO_2064_REG03A, 0x3); } udelay(20); write_radio_reg(pi, RADIO_2064_REG025, 0xF); if (LCNREV_IS(pi->pubpi.phy_rev, 2)) { if (CHSPEC_IS5G(pi->radio_chanspec)) mod_radio_reg(pi, RADIO_2064_REG028, 0xF, 0x4); else mod_radio_reg(pi, RADIO_2064_REG028, 0xF, 0x6); } else { if (CHSPEC_IS5G(pi->radio_chanspec)) mod_radio_reg(pi, RADIO_2064_REG028, 0x1e, 0x4 << 1); else mod_radio_reg(pi, RADIO_2064_REG028, 0x1e, 0x6 << 1); } udelay(20); write_radio_reg(pi, RADIO_2064_REG005, 0x8); or_radio_reg(pi, RADIO_2064_REG112, 0x80); udelay(20); or_radio_reg(pi, RADIO_2064_REG0FF, 0x10); or_radio_reg(pi, RADIO_2064_REG11F, 0x44); udelay(20); or_radio_reg(pi, RADIO_2064_REG00B, 0x7); or_radio_reg(pi, RADIO_2064_REG113, 0x10); udelay(20); write_radio_reg(pi, RADIO_2064_REG007, 0x1); udelay(20); vmid = 0x2A6; mod_radio_reg(pi, RADIO_2064_REG0FC, 0x3 << 0, (vmid >> 8) & 0x3); write_radio_reg(pi, RADIO_2064_REG0FD, (vmid & 0xff)); or_radio_reg(pi, RADIO_2064_REG11F, 0x44); udelay(20); or_radio_reg(pi, RADIO_2064_REG0FF, 0x10); udelay(20); write_radio_reg(pi, RADIO_2064_REG012, 0x02); or_radio_reg(pi, RADIO_2064_REG112, 0x06); write_radio_reg(pi, RADIO_2064_REG036, 0x11); write_radio_reg(pi, RADIO_2064_REG059, 0xcc); write_radio_reg(pi, RADIO_2064_REG05C, 0x2e); write_radio_reg(pi, RADIO_2064_REG078, 0xd7); write_radio_reg(pi, RADIO_2064_REG092, 0x15); } static void wlc_lcnphy_samp_cap(phy_info_t *pi, int clip_detect_algo, u16 thresh, s16 *ptr, int mode) { u32 curval1, curval2, stpptr, curptr, strptr, val; u16 sslpnCalibClkEnCtrl, timer; u16 old_sslpnCalibClkEnCtrl; s16 imag, real; phy_info_lcnphy_t *pi_lcn = pi->u.pi_lcnphy; timer = 0; old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da); curval1 = R_REG(&pi->regs->psm_corectlsts); ptr[130] = 0; W_REG(&pi->regs->psm_corectlsts, ((1 << 6) | curval1)); W_REG(&pi->regs->smpl_clct_strptr, 0x7E00); W_REG(&pi->regs->smpl_clct_stpptr, 0x8000); udelay(20); curval2 = R_REG(&pi->regs->psm_phy_hdr_param); W_REG(&pi->regs->psm_phy_hdr_param, curval2 | 0x30); write_phy_reg(pi, 0x555, 0x0); write_phy_reg(pi, 0x5a6, 0x5); write_phy_reg(pi, 0x5a2, (u16) (mode | mode << 6)); write_phy_reg(pi, 0x5cf, 3); write_phy_reg(pi, 0x5a5, 0x3); write_phy_reg(pi, 0x583, 0x0); write_phy_reg(pi, 0x584, 0x0); write_phy_reg(pi, 0x585, 0x0fff); write_phy_reg(pi, 0x586, 0x0000); write_phy_reg(pi, 0x580, 0x4501); sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da); write_phy_reg(pi, 0x6da, (u32) (sslpnCalibClkEnCtrl | 0x2008)); stpptr = R_REG(&pi->regs->smpl_clct_stpptr); curptr = R_REG(&pi->regs->smpl_clct_curptr); do { udelay(10); curptr = R_REG(&pi->regs->smpl_clct_curptr); timer++; } while ((curptr != stpptr) && (timer < 500)); W_REG(&pi->regs->psm_phy_hdr_param, 0x2); strptr = 0x7E00; W_REG(&pi->regs->tplatewrptr, strptr); while (strptr < 0x8000) { val = R_REG(&pi->regs->tplatewrdata); imag = ((val >> 16) & 0x3ff); real = ((val) & 0x3ff); if (imag > 511) { imag -= 1024; } if (real > 511) { real -= 1024; } if (pi_lcn->lcnphy_iqcal_swp_dis) ptr[(strptr - 0x7E00) / 4] = real; else ptr[(strptr - 0x7E00) / 4] = imag; if (clip_detect_algo) { if (imag > thresh || imag < -thresh) { strptr = 0x8000; ptr[130] = 1; } } strptr += 4; } write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl); W_REG(&pi->regs->psm_phy_hdr_param, curval2); W_REG(&pi->regs->psm_corectlsts, curval1); } static void wlc_lcnphy_tx_iqlo_soft_cal_full(phy_info_t *pi) { lcnphy_unsign16_struct iqcc0, locc2, locc3, locc4; wlc_lcnphy_set_cc(pi, 0, 0, 0); wlc_lcnphy_set_cc(pi, 2, 0, 0); wlc_lcnphy_set_cc(pi, 3, 0, 0); wlc_lcnphy_set_cc(pi, 4, 0, 0); wlc_lcnphy_a1(pi, 4, 0, 0); wlc_lcnphy_a1(pi, 3, 0, 0); wlc_lcnphy_a1(pi, 2, 3, 2); wlc_lcnphy_a1(pi, 0, 5, 8); wlc_lcnphy_a1(pi, 2, 2, 1); wlc_lcnphy_a1(pi, 0, 4, 3); iqcc0 = wlc_lcnphy_get_cc(pi, 0); locc2 = wlc_lcnphy_get_cc(pi, 2); locc3 = wlc_lcnphy_get_cc(pi, 3); locc4 = wlc_lcnphy_get_cc(pi, 4); } static void wlc_lcnphy_set_cc(phy_info_t *pi, int cal_type, s16 coeff_x, s16 coeff_y) { u16 di0dq0; u16 x, y, data_rf; int k; switch (cal_type) { case 0: wlc_lcnphy_set_tx_iqcc(pi, coeff_x, coeff_y); break; case 2: di0dq0 = (coeff_x & 0xff) << 8 | (coeff_y & 0xff); wlc_lcnphy_set_tx_locc(pi, di0dq0); break; case 3: k = wlc_lcnphy_calc_floor(coeff_x, 0); y = 8 + k; k = wlc_lcnphy_calc_floor(coeff_x, 1); x = 8 - k; data_rf = (x * 16 + y); write_radio_reg(pi, RADIO_2064_REG089, data_rf); k = wlc_lcnphy_calc_floor(coeff_y, 0); y = 8 + k; k = wlc_lcnphy_calc_floor(coeff_y, 1); x = 8 - k; data_rf = (x * 16 + y); write_radio_reg(pi, RADIO_2064_REG08A, data_rf); break; case 4: k = wlc_lcnphy_calc_floor(coeff_x, 0); y = 8 + k; k = wlc_lcnphy_calc_floor(coeff_x, 1); x = 8 - k; data_rf = (x * 16 + y); write_radio_reg(pi, RADIO_2064_REG08B, data_rf); k = wlc_lcnphy_calc_floor(coeff_y, 0); y = 8 + k; k = wlc_lcnphy_calc_floor(coeff_y, 1); x = 8 - k; data_rf = (x * 16 + y); write_radio_reg(pi, RADIO_2064_REG08C, data_rf); break; } } static lcnphy_unsign16_struct wlc_lcnphy_get_cc(phy_info_t *pi, int cal_type) { u16 a, b, didq; u8 di0, dq0, ei, eq, fi, fq; lcnphy_unsign16_struct cc; cc.re = 0; cc.im = 0; switch (cal_type) { case 0: wlc_lcnphy_get_tx_iqcc(pi, &a, &b); cc.re = a; cc.im = b; break; case 2: didq = wlc_lcnphy_get_tx_locc(pi); di0 = (((didq & 0xff00) << 16) >> 24); dq0 = (((didq & 0x00ff) << 24) >> 24); cc.re = (u16) di0; cc.im = (u16) dq0; break; case 3: wlc_lcnphy_get_radio_loft(pi, &ei, &eq, &fi, &fq); cc.re = (u16) ei; cc.im = (u16) eq; break; case 4: wlc_lcnphy_get_radio_loft(pi, &ei, &eq, &fi, &fq); cc.re = (u16) fi; cc.im = (u16) fq; break; } return cc; } static void wlc_lcnphy_a1(phy_info_t *pi, int cal_type, int num_levels, int step_size_lg2) { const lcnphy_spb_tone_t *phy_c1; lcnphy_spb_tone_t phy_c2; lcnphy_unsign16_struct phy_c3; int phy_c4, phy_c5, k, l, j, phy_c6; u16 phy_c7, phy_c8, phy_c9; s16 phy_c10, phy_c11, phy_c12, phy_c13, phy_c14, phy_c15, phy_c16; s16 *ptr, phy_c17; s32 phy_c18, phy_c19; u32 phy_c20, phy_c21; bool phy_c22, phy_c23, phy_c24, phy_c25; u16 phy_c26, phy_c27; u16 phy_c28, phy_c29, phy_c30; u16 phy_c31; u16 *phy_c32; phy_c21 = 0; phy_c10 = phy_c13 = phy_c14 = phy_c8 = 0; ptr = kmalloc(sizeof(s16) * 131, GFP_ATOMIC); if (NULL == ptr) { return; } phy_c32 = kmalloc(sizeof(u16) * 20, GFP_ATOMIC); if (NULL == phy_c32) { kfree(ptr); return; } phy_c26 = read_phy_reg(pi, 0x6da); phy_c27 = read_phy_reg(pi, 0x6db); phy_c31 = read_radio_reg(pi, RADIO_2064_REG026); write_phy_reg(pi, 0x93d, 0xC0); wlc_lcnphy_start_tx_tone(pi, 3750, 88, 0); write_phy_reg(pi, 0x6da, 0xffff); or_phy_reg(pi, 0x6db, 0x3); wlc_lcnphy_tx_iqlo_loopback(pi, phy_c32); udelay(500); phy_c28 = read_phy_reg(pi, 0x938); phy_c29 = read_phy_reg(pi, 0x4d7); phy_c30 = read_phy_reg(pi, 0x4d8); or_phy_reg(pi, 0x938, 0x1 << 2); or_phy_reg(pi, 0x4d7, 0x1 << 2); or_phy_reg(pi, 0x4d7, 0x1 << 3); mod_phy_reg(pi, 0x4d7, (0x7 << 12), 0x2 << 12); or_phy_reg(pi, 0x4d8, 1 << 0); or_phy_reg(pi, 0x4d8, 1 << 1); mod_phy_reg(pi, 0x4d8, (0x3ff << 2), 0x23A << 2); mod_phy_reg(pi, 0x4d8, (0x7 << 12), 0x7 << 12); phy_c1 = &lcnphy_spb_tone_3750[0]; phy_c4 = 32; if (num_levels == 0) { if (cal_type != 0) { num_levels = 4; } else { num_levels = 9; } } if (step_size_lg2 == 0) { if (cal_type != 0) { step_size_lg2 = 3; } else { step_size_lg2 = 8; } } phy_c7 = (1 << step_size_lg2); phy_c3 = wlc_lcnphy_get_cc(pi, cal_type); phy_c15 = (s16) phy_c3.re; phy_c16 = (s16) phy_c3.im; if (cal_type == 2) { if (phy_c3.re > 127) phy_c15 = phy_c3.re - 256; if (phy_c3.im > 127) phy_c16 = phy_c3.im - 256; } wlc_lcnphy_set_cc(pi, cal_type, phy_c15, phy_c16); udelay(20); for (phy_c8 = 0; phy_c7 != 0 && phy_c8 < num_levels; phy_c8++) { phy_c23 = 1; phy_c22 = 0; switch (cal_type) { case 0: phy_c10 = 511; break; case 2: phy_c10 = 127; break; case 3: phy_c10 = 15; break; case 4: phy_c10 = 15; break; } phy_c9 = read_phy_reg(pi, 0x93d); phy_c9 = 2 * phy_c9; phy_c24 = 0; phy_c5 = 7; phy_c25 = 1; while (1) { write_radio_reg(pi, RADIO_2064_REG026, (phy_c5 & 0x7) | ((phy_c5 & 0x7) << 4)); udelay(50); phy_c22 = 0; ptr[130] = 0; wlc_lcnphy_samp_cap(pi, 1, phy_c9, &ptr[0], 2); if (ptr[130] == 1) phy_c22 = 1; if (phy_c22) phy_c5 -= 1; if ((phy_c22 != phy_c24) && (!phy_c25)) break; if (!phy_c22) phy_c5 += 1; if (phy_c5 <= 0 || phy_c5 >= 7) break; phy_c24 = phy_c22; phy_c25 = 0; } if (phy_c5 < 0) phy_c5 = 0; else if (phy_c5 > 7) phy_c5 = 7; for (k = -phy_c7; k <= phy_c7; k += phy_c7) { for (l = -phy_c7; l <= phy_c7; l += phy_c7) { phy_c11 = phy_c15 + k; phy_c12 = phy_c16 + l; if (phy_c11 < -phy_c10) phy_c11 = -phy_c10; else if (phy_c11 > phy_c10) phy_c11 = phy_c10; if (phy_c12 < -phy_c10) phy_c12 = -phy_c10; else if (phy_c12 > phy_c10) phy_c12 = phy_c10; wlc_lcnphy_set_cc(pi, cal_type, phy_c11, phy_c12); udelay(20); wlc_lcnphy_samp_cap(pi, 0, 0, ptr, 2); phy_c18 = 0; phy_c19 = 0; for (j = 0; j < 128; j++) { if (cal_type != 0) { phy_c6 = j % phy_c4; } else { phy_c6 = (2 * j) % phy_c4; } phy_c2.re = phy_c1[phy_c6].re; phy_c2.im = phy_c1[phy_c6].im; phy_c17 = ptr[j]; phy_c18 = phy_c18 + phy_c17 * phy_c2.re; phy_c19 = phy_c19 + phy_c17 * phy_c2.im; } phy_c18 = phy_c18 >> 10; phy_c19 = phy_c19 >> 10; phy_c20 = ((phy_c18 * phy_c18) + (phy_c19 * phy_c19)); if (phy_c23 || phy_c20 < phy_c21) { phy_c21 = phy_c20; phy_c13 = phy_c11; phy_c14 = phy_c12; } phy_c23 = 0; } } phy_c23 = 1; phy_c15 = phy_c13; phy_c16 = phy_c14; phy_c7 = phy_c7 >> 1; wlc_lcnphy_set_cc(pi, cal_type, phy_c15, phy_c16); udelay(20); } goto cleanup; cleanup: wlc_lcnphy_tx_iqlo_loopback_cleanup(pi, phy_c32); wlc_lcnphy_stop_tx_tone(pi); write_phy_reg(pi, 0x6da, phy_c26); write_phy_reg(pi, 0x6db, phy_c27); write_phy_reg(pi, 0x938, phy_c28); write_phy_reg(pi, 0x4d7, phy_c29); write_phy_reg(pi, 0x4d8, phy_c30); write_radio_reg(pi, RADIO_2064_REG026, phy_c31); kfree(phy_c32); kfree(ptr); } static void wlc_lcnphy_tx_iqlo_loopback_cleanup(phy_info_t *pi, u16 *values_to_save) { int i; and_phy_reg(pi, 0x44c, 0x0 >> 11); and_phy_reg(pi, 0x43b, 0xC); for (i = 0; i < 20; i++) { write_radio_reg(pi, iqlo_loopback_rf_regs[i], values_to_save[i]); } } static void WLBANDINITFN(wlc_lcnphy_load_tx_gain_table) (phy_info_t *pi, const lcnphy_tx_gain_tbl_entry * gain_table) { u32 j; phytbl_info_t tab; u32 val; u16 pa_gain; u16 gm_gain; if (CHSPEC_IS5G(pi->radio_chanspec)) pa_gain = 0x70; else pa_gain = 0x70; if (pi->sh->boardflags & BFL_FEM) pa_gain = 0x10; tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL; tab.tbl_width = 32; tab.tbl_len = 1; tab.tbl_ptr = &val; for (j = 0; j < 128; j++) { gm_gain = gain_table[j].gm; val = (((u32) pa_gain << 24) | (gain_table[j].pad << 16) | (gain_table[j].pga << 8) | gm_gain); tab.tbl_offset = LCNPHY_TX_PWR_CTRL_GAIN_OFFSET + j; wlc_lcnphy_write_table(pi, &tab); val = (gain_table[j].dac << 28) | (gain_table[j].bb_mult << 20); tab.tbl_offset = LCNPHY_TX_PWR_CTRL_IQ_OFFSET + j; wlc_lcnphy_write_table(pi, &tab); } } static void wlc_lcnphy_load_rfpower(phy_info_t *pi) { phytbl_info_t tab; u32 val, bbmult, rfgain; u8 index; u8 scale_factor = 1; s16 temp, temp1, temp2, qQ, qQ1, qQ2, shift; tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL; tab.tbl_width = 32; tab.tbl_len = 1; for (index = 0; index < 128; index++) { tab.tbl_ptr = &bbmult; tab.tbl_offset = LCNPHY_TX_PWR_CTRL_IQ_OFFSET + index; wlc_lcnphy_read_table(pi, &tab); bbmult = bbmult >> 20; tab.tbl_ptr = &rfgain; tab.tbl_offset = LCNPHY_TX_PWR_CTRL_GAIN_OFFSET + index; wlc_lcnphy_read_table(pi, &tab); qm_log10((s32) (bbmult), 0, &temp1, &qQ1); qm_log10((s32) (1 << 6), 0, &temp2, &qQ2); if (qQ1 < qQ2) { temp2 = qm_shr16(temp2, qQ2 - qQ1); qQ = qQ1; } else { temp1 = qm_shr16(temp1, qQ1 - qQ2); qQ = qQ2; } temp = qm_sub16(temp1, temp2); if (qQ >= 4) shift = qQ - 4; else shift = 4 - qQ; val = (((index << shift) + (5 * temp) + (1 << (scale_factor + shift - 3))) >> (scale_factor + shift - 2)); tab.tbl_ptr = &val; tab.tbl_offset = LCNPHY_TX_PWR_CTRL_PWR_OFFSET + index; wlc_lcnphy_write_table(pi, &tab); } } static void WLBANDINITFN(wlc_lcnphy_tbl_init) (phy_info_t *pi) { uint idx; u8 phybw40; phytbl_info_t tab; u32 val; phybw40 = CHSPEC_IS40(pi->radio_chanspec); for (idx = 0; idx < dot11lcnphytbl_info_sz_rev0; idx++) { wlc_lcnphy_write_table(pi, &dot11lcnphytbl_info_rev0[idx]); } if (pi->sh->boardflags & BFL_FEM_BT) { tab.tbl_id = LCNPHY_TBL_ID_RFSEQ; tab.tbl_width = 16; tab.tbl_ptr = &val; tab.tbl_len = 1; val = 100; tab.tbl_offset = 4; wlc_lcnphy_write_table(pi, &tab); } tab.tbl_id = LCNPHY_TBL_ID_RFSEQ; tab.tbl_width = 16; tab.tbl_ptr = &val; tab.tbl_len = 1; val = 114; tab.tbl_offset = 0; wlc_lcnphy_write_table(pi, &tab); val = 130; tab.tbl_offset = 1; wlc_lcnphy_write_table(pi, &tab); val = 6; tab.tbl_offset = 8; wlc_lcnphy_write_table(pi, &tab); if (CHSPEC_IS2G(pi->radio_chanspec)) { if (pi->sh->boardflags & BFL_FEM) wlc_lcnphy_load_tx_gain_table(pi, dot11lcnphy_2GHz_extPA_gaintable_rev0); else wlc_lcnphy_load_tx_gain_table(pi, dot11lcnphy_2GHz_gaintable_rev0); } if (LCNREV_IS(pi->pubpi.phy_rev, 2)) { if (CHSPEC_IS2G(pi->radio_chanspec)) { for (idx = 0; idx < dot11lcnphytbl_rx_gain_info_2G_rev2_sz; idx++) if (pi->sh->boardflags & BFL_EXTLNA) wlc_lcnphy_write_table(pi, &dot11lcnphytbl_rx_gain_info_extlna_2G_rev2 [idx]); else wlc_lcnphy_write_table(pi, &dot11lcnphytbl_rx_gain_info_2G_rev2 [idx]); } else { for (idx = 0; idx < dot11lcnphytbl_rx_gain_info_5G_rev2_sz; idx++) if (pi->sh->boardflags & BFL_EXTLNA_5GHz) wlc_lcnphy_write_table(pi, &dot11lcnphytbl_rx_gain_info_extlna_5G_rev2 [idx]); else wlc_lcnphy_write_table(pi, &dot11lcnphytbl_rx_gain_info_5G_rev2 [idx]); } } if ((pi->sh->boardflags & BFL_FEM) && !(pi->sh->boardflags & BFL_FEM_BT)) wlc_lcnphy_write_table(pi, &dot11lcn_sw_ctrl_tbl_info_4313_epa); else if (pi->sh->boardflags & BFL_FEM_BT) { if (pi->sh->boardrev < 0x1250) wlc_lcnphy_write_table(pi, &dot11lcn_sw_ctrl_tbl_info_4313_bt_epa); else wlc_lcnphy_write_table(pi, &dot11lcn_sw_ctrl_tbl_info_4313_bt_epa_p250); } else wlc_lcnphy_write_table(pi, &dot11lcn_sw_ctrl_tbl_info_4313); wlc_lcnphy_load_rfpower(pi); wlc_lcnphy_clear_papd_comptable(pi); } static void WLBANDINITFN(wlc_lcnphy_rev0_baseband_init) (phy_info_t *pi) { u16 afectrl1; phy_info_lcnphy_t *pi_lcn = pi->u.pi_lcnphy; write_radio_reg(pi, RADIO_2064_REG11C, 0x0); write_phy_reg(pi, 0x43b, 0x0); write_phy_reg(pi, 0x43c, 0x0); write_phy_reg(pi, 0x44c, 0x0); write_phy_reg(pi, 0x4e6, 0x0); write_phy_reg(pi, 0x4f9, 0x0); write_phy_reg(pi, 0x4b0, 0x0); write_phy_reg(pi, 0x938, 0x0); write_phy_reg(pi, 0x4b0, 0x0); write_phy_reg(pi, 0x44e, 0); or_phy_reg(pi, 0x567, 0x03); or_phy_reg(pi, 0x44a, 0x44); write_phy_reg(pi, 0x44a, 0x80); if (!(pi->sh->boardflags & BFL_FEM)) wlc_lcnphy_set_tx_pwr_by_index(pi, 52); if (0) { afectrl1 = 0; afectrl1 = (u16) ((pi_lcn->lcnphy_rssi_vf) | (pi_lcn->lcnphy_rssi_vc << 4) | (pi_lcn-> lcnphy_rssi_gs << 10)); write_phy_reg(pi, 0x43e, afectrl1); } mod_phy_reg(pi, 0x634, (0xff << 0), 0xC << 0); if (pi->sh->boardflags & BFL_FEM) { mod_phy_reg(pi, 0x634, (0xff << 0), 0xA << 0); write_phy_reg(pi, 0x910, 0x1); } mod_phy_reg(pi, 0x448, (0x3 << 8), 1 << 8); mod_phy_reg(pi, 0x608, (0xff << 0), 0x17 << 0); mod_phy_reg(pi, 0x604, (0x7ff << 0), 0x3EA << 0); } static void WLBANDINITFN(wlc_lcnphy_rev2_baseband_init) (phy_info_t *pi) { if (CHSPEC_IS5G(pi->radio_chanspec)) { mod_phy_reg(pi, 0x416, (0xff << 0), 80 << 0); mod_phy_reg(pi, 0x416, (0xff << 8), 80 << 8); } } static void wlc_lcnphy_agc_temp_init(phy_info_t *pi) { s16 temp; phytbl_info_t tab; u32 tableBuffer[2]; phy_info_lcnphy_t *pi_lcn = pi->u.pi_lcnphy; if (NORADIO_ENAB(pi->pubpi)) return; temp = (s16) read_phy_reg(pi, 0x4df); pi_lcn->lcnphy_ofdmgainidxtableoffset = (temp & (0xff << 0)) >> 0; if (pi_lcn->lcnphy_ofdmgainidxtableoffset > 127) pi_lcn->lcnphy_ofdmgainidxtableoffset -= 256; pi_lcn->lcnphy_dsssgainidxtableoffset = (temp & (0xff << 8)) >> 8; if (pi_lcn->lcnphy_dsssgainidxtableoffset > 127) pi_lcn->lcnphy_dsssgainidxtableoffset -= 256; tab.tbl_ptr = tableBuffer; tab.tbl_len = 2; tab.tbl_id = 17; tab.tbl_offset = 59; tab.tbl_width = 32; wlc_lcnphy_read_table(pi, &tab); if (tableBuffer[0] > 63) tableBuffer[0] -= 128; pi_lcn->lcnphy_tr_R_gain_val = tableBuffer[0]; if (tableBuffer[1] > 63) tableBuffer[1] -= 128; pi_lcn->lcnphy_tr_T_gain_val = tableBuffer[1]; temp = (s16) (read_phy_reg(pi, 0x434) & (0xff << 0)); if (temp > 127) temp -= 256; pi_lcn->lcnphy_input_pwr_offset_db = (s8) temp; pi_lcn->lcnphy_Med_Low_Gain_db = (read_phy_reg(pi, 0x424) & (0xff << 8)) >> 8; pi_lcn->lcnphy_Very_Low_Gain_db = (read_phy_reg(pi, 0x425) & (0xff << 0)) >> 0; tab.tbl_ptr = tableBuffer; tab.tbl_len = 2; tab.tbl_id = LCNPHY_TBL_ID_GAIN_IDX; tab.tbl_offset = 28; tab.tbl_width = 32; wlc_lcnphy_read_table(pi, &tab); pi_lcn->lcnphy_gain_idx_14_lowword = tableBuffer[0]; pi_lcn->lcnphy_gain_idx_14_hiword = tableBuffer[1]; } static void WLBANDINITFN(wlc_lcnphy_bu_tweaks) (phy_info_t *pi) { if (NORADIO_ENAB(pi->pubpi)) return; or_phy_reg(pi, 0x805, 0x1); mod_phy_reg(pi, 0x42f, (0x7 << 0), (0x3) << 0); mod_phy_reg(pi, 0x030, (0x7 << 0), (0x3) << 0); write_phy_reg(pi, 0x414, 0x1e10); write_phy_reg(pi, 0x415, 0x0640); mod_phy_reg(pi, 0x4df, (0xff << 8), -9 << 8); or_phy_reg(pi, 0x44a, 0x44); write_phy_reg(pi, 0x44a, 0x80); mod_phy_reg(pi, 0x434, (0xff << 0), (0xFD) << 0); mod_phy_reg(pi, 0x420, (0xff << 0), (16) << 0); if (!(pi->sh->boardrev < 0x1204)) mod_radio_reg(pi, RADIO_2064_REG09B, 0xF0, 0xF0); write_phy_reg(pi, 0x7d6, 0x0902); mod_phy_reg(pi, 0x429, (0xf << 0), (0x9) << 0); mod_phy_reg(pi, 0x429, (0x3f << 4), (0xe) << 4); if (LCNREV_IS(pi->pubpi.phy_rev, 1)) { mod_phy_reg(pi, 0x423, (0xff << 0), (0x46) << 0); mod_phy_reg(pi, 0x411, (0xff << 0), (1) << 0); mod_phy_reg(pi, 0x434, (0xff << 0), (0xFF) << 0); mod_phy_reg(pi, 0x656, (0xf << 0), (2) << 0); mod_phy_reg(pi, 0x44d, (0x1 << 2), (1) << 2); mod_radio_reg(pi, RADIO_2064_REG0F7, 0x4, 0x4); mod_radio_reg(pi, RADIO_2064_REG0F1, 0x3, 0); mod_radio_reg(pi, RADIO_2064_REG0F2, 0xF8, 0x90); mod_radio_reg(pi, RADIO_2064_REG0F3, 0x3, 0x2); mod_radio_reg(pi, RADIO_2064_REG0F3, 0xf0, 0xa0); mod_radio_reg(pi, RADIO_2064_REG11F, 0x2, 0x2); wlc_lcnphy_clear_tx_power_offsets(pi); mod_phy_reg(pi, 0x4d0, (0x1ff << 6), (10) << 6); } } static void WLBANDINITFN(wlc_lcnphy_baseband_init) (phy_info_t *pi) { wlc_lcnphy_tbl_init(pi); wlc_lcnphy_rev0_baseband_init(pi); if (LCNREV_IS(pi->pubpi.phy_rev, 2)) wlc_lcnphy_rev2_baseband_init(pi); wlc_lcnphy_bu_tweaks(pi); } static void WLBANDINITFN(wlc_radio_2064_init) (phy_info_t *pi) { u32 i; lcnphy_radio_regs_t *lcnphyregs = NULL; lcnphyregs = lcnphy_radio_regs_2064; for (i = 0; lcnphyregs[i].address != 0xffff; i++) if (CHSPEC_IS5G(pi->radio_chanspec) && lcnphyregs[i].do_init_a) write_radio_reg(pi, ((lcnphyregs[i].address & 0x3fff) | RADIO_DEFAULT_CORE), (u16) lcnphyregs[i].init_a); else if (lcnphyregs[i].do_init_g) write_radio_reg(pi, ((lcnphyregs[i].address & 0x3fff) | RADIO_DEFAULT_CORE), (u16) lcnphyregs[i].init_g); write_radio_reg(pi, RADIO_2064_REG032, 0x62); write_radio_reg(pi, RADIO_2064_REG033, 0x19); write_radio_reg(pi, RADIO_2064_REG090, 0x10); write_radio_reg(pi, RADIO_2064_REG010, 0x00); if (LCNREV_IS(pi->pubpi.phy_rev, 1)) { write_radio_reg(pi, RADIO_2064_REG060, 0x7f); write_radio_reg(pi, RADIO_2064_REG061, 0x72); write_radio_reg(pi, RADIO_2064_REG062, 0x7f); } write_radio_reg(pi, RADIO_2064_REG01D, 0x02); write_radio_reg(pi, RADIO_2064_REG01E, 0x06); mod_phy_reg(pi, 0x4ea, (0x7 << 0), 0 << 0); mod_phy_reg(pi, 0x4ea, (0x7 << 3), 1 << 3); mod_phy_reg(pi, 0x4ea, (0x7 << 6), 2 << 6); mod_phy_reg(pi, 0x4ea, (0x7 << 9), 3 << 9); mod_phy_reg(pi, 0x4ea, (0x7 << 12), 4 << 12); write_phy_reg(pi, 0x4ea, 0x4688); mod_phy_reg(pi, 0x4eb, (0x7 << 0), 2 << 0); mod_phy_reg(pi, 0x4eb, (0x7 << 6), 0 << 6); mod_phy_reg(pi, 0x46a, (0xffff << 0), 25 << 0); wlc_lcnphy_set_tx_locc(pi, 0); wlc_lcnphy_rcal(pi); wlc_lcnphy_rc_cal(pi); } static void WLBANDINITFN(wlc_lcnphy_radio_init) (phy_info_t *pi) { if (NORADIO_ENAB(pi->pubpi)) return; wlc_radio_2064_init(pi); } static void wlc_lcnphy_rcal(phy_info_t *pi) { u8 rcal_value; if (NORADIO_ENAB(pi->pubpi)) return; and_radio_reg(pi, RADIO_2064_REG05B, 0xfD); or_radio_reg(pi, RADIO_2064_REG004, 0x40); or_radio_reg(pi, RADIO_2064_REG120, 0x10); or_radio_reg(pi, RADIO_2064_REG078, 0x80); or_radio_reg(pi, RADIO_2064_REG129, 0x02); or_radio_reg(pi, RADIO_2064_REG057, 0x01); or_radio_reg(pi, RADIO_2064_REG05B, 0x02); mdelay(5); SPINWAIT(!wlc_radio_2064_rcal_done(pi), 10 * 1000 * 1000); if (wlc_radio_2064_rcal_done(pi)) { rcal_value = (u8) read_radio_reg(pi, RADIO_2064_REG05C); rcal_value = rcal_value & 0x1f; } and_radio_reg(pi, RADIO_2064_REG05B, 0xfD); and_radio_reg(pi, RADIO_2064_REG057, 0xFE); } static void wlc_lcnphy_rc_cal(phy_info_t *pi) { u8 dflt_rc_cal_val; u16 flt_val; if (NORADIO_ENAB(pi->pubpi)) return; dflt_rc_cal_val = 7; if (LCNREV_IS(pi->pubpi.phy_rev, 1)) dflt_rc_cal_val = 11; flt_val = (dflt_rc_cal_val << 10) | (dflt_rc_cal_val << 5) | (dflt_rc_cal_val); write_phy_reg(pi, 0x933, flt_val); write_phy_reg(pi, 0x934, flt_val); write_phy_reg(pi, 0x935, flt_val); write_phy_reg(pi, 0x936, flt_val); write_phy_reg(pi, 0x937, (flt_val & 0x1FF)); return; } static bool wlc_phy_txpwr_srom_read_lcnphy(phy_info_t *pi) { s8 txpwr = 0; int i; phy_info_lcnphy_t *pi_lcn = pi->u.pi_lcnphy; if (CHSPEC_IS2G(pi->radio_chanspec)) { u16 cckpo = 0; u32 offset_ofdm, offset_mcs; pi_lcn->lcnphy_tr_isolation_mid = (u8) PHY_GETINTVAR(pi, "triso2g"); pi_lcn->lcnphy_rx_power_offset = (u8) PHY_GETINTVAR(pi, "rxpo2g"); pi->txpa_2g[0] = (s16) PHY_GETINTVAR(pi, "pa0b0"); pi->txpa_2g[1] = (s16) PHY_GETINTVAR(pi, "pa0b1"); pi->txpa_2g[2] = (s16) PHY_GETINTVAR(pi, "pa0b2"); pi_lcn->lcnphy_rssi_vf = (u8) PHY_GETINTVAR(pi, "rssismf2g"); pi_lcn->lcnphy_rssi_vc = (u8) PHY_GETINTVAR(pi, "rssismc2g"); pi_lcn->lcnphy_rssi_gs = (u8) PHY_GETINTVAR(pi, "rssisav2g"); { pi_lcn->lcnphy_rssi_vf_lowtemp = pi_lcn->lcnphy_rssi_vf; pi_lcn->lcnphy_rssi_vc_lowtemp = pi_lcn->lcnphy_rssi_vc; pi_lcn->lcnphy_rssi_gs_lowtemp = pi_lcn->lcnphy_rssi_gs; pi_lcn->lcnphy_rssi_vf_hightemp = pi_lcn->lcnphy_rssi_vf; pi_lcn->lcnphy_rssi_vc_hightemp = pi_lcn->lcnphy_rssi_vc; pi_lcn->lcnphy_rssi_gs_hightemp = pi_lcn->lcnphy_rssi_gs; } txpwr = (s8) PHY_GETINTVAR(pi, "maxp2ga0"); pi->tx_srom_max_2g = txpwr; for (i = 0; i < PWRTBL_NUM_COEFF; i++) { pi->txpa_2g_low_temp[i] = pi->txpa_2g[i]; pi->txpa_2g_high_temp[i] = pi->txpa_2g[i]; } cckpo = (u16) PHY_GETINTVAR(pi, "cck2gpo"); if (cckpo) { uint max_pwr_chan = txpwr; for (i = TXP_FIRST_CCK; i <= TXP_LAST_CCK; i++) { pi->tx_srom_max_rate_2g[i] = max_pwr_chan - ((cckpo & 0xf) * 2); cckpo >>= 4; } offset_ofdm = (u32) PHY_GETINTVAR(pi, "ofdm2gpo"); for (i = TXP_FIRST_OFDM; i <= TXP_LAST_OFDM; i++) { pi->tx_srom_max_rate_2g[i] = max_pwr_chan - ((offset_ofdm & 0xf) * 2); offset_ofdm >>= 4; } } else { u8 opo = 0; opo = (u8) PHY_GETINTVAR(pi, "opo"); for (i = TXP_FIRST_CCK; i <= TXP_LAST_CCK; i++) { pi->tx_srom_max_rate_2g[i] = txpwr; } offset_ofdm = (u32) PHY_GETINTVAR(pi, "ofdm2gpo"); for (i = TXP_FIRST_OFDM; i <= TXP_LAST_OFDM; i++) { pi->tx_srom_max_rate_2g[i] = txpwr - ((offset_ofdm & 0xf) * 2); offset_ofdm >>= 4; } offset_mcs = ((u16) PHY_GETINTVAR(pi, "mcs2gpo1") << 16) | (u16) PHY_GETINTVAR(pi, "mcs2gpo0"); pi_lcn->lcnphy_mcs20_po = offset_mcs; for (i = TXP_FIRST_SISO_MCS_20; i <= TXP_LAST_SISO_MCS_20; i++) { pi->tx_srom_max_rate_2g[i] = txpwr - ((offset_mcs & 0xf) * 2); offset_mcs >>= 4; } } pi_lcn->lcnphy_rawtempsense = (u16) PHY_GETINTVAR(pi, "rawtempsense"); pi_lcn->lcnphy_measPower = (u8) PHY_GETINTVAR(pi, "measpower"); pi_lcn->lcnphy_tempsense_slope = (u8) PHY_GETINTVAR(pi, "tempsense_slope"); pi_lcn->lcnphy_hw_iqcal_en = (bool) PHY_GETINTVAR(pi, "hw_iqcal_en"); pi_lcn->lcnphy_iqcal_swp_dis = (bool) PHY_GETINTVAR(pi, "iqcal_swp_dis"); pi_lcn->lcnphy_tempcorrx = (u8) PHY_GETINTVAR(pi, "tempcorrx"); pi_lcn->lcnphy_tempsense_option = (u8) PHY_GETINTVAR(pi, "tempsense_option"); pi_lcn->lcnphy_freqoffset_corr = (u8) PHY_GETINTVAR(pi, "freqoffset_corr"); if ((u8) getintvar(pi->vars, "aa2g") > 1) wlc_phy_ant_rxdiv_set((wlc_phy_t *) pi, (u8) getintvar(pi->vars, "aa2g")); } pi_lcn->lcnphy_cck_dig_filt_type = -1; if (PHY_GETVAR(pi, "cckdigfilttype")) { s16 temp; temp = (s16) PHY_GETINTVAR(pi, "cckdigfilttype"); if (temp >= 0) { pi_lcn->lcnphy_cck_dig_filt_type = temp; } } return true; } void wlc_2064_vco_cal(phy_info_t *pi) { u8 calnrst; mod_radio_reg(pi, RADIO_2064_REG057, 1 << 3, 1 << 3); calnrst = (u8) read_radio_reg(pi, RADIO_2064_REG056) & 0xf8; write_radio_reg(pi, RADIO_2064_REG056, calnrst); udelay(1); write_radio_reg(pi, RADIO_2064_REG056, calnrst | 0x03); udelay(1); write_radio_reg(pi, RADIO_2064_REG056, calnrst | 0x07); udelay(300); mod_radio_reg(pi, RADIO_2064_REG057, 1 << 3, 0); } static void wlc_lcnphy_radio_2064_channel_tune_4313(phy_info_t *pi, u8 channel) { uint i; const chan_info_2064_lcnphy_t *ci; u8 rfpll_doubler = 0; u8 pll_pwrup, pll_pwrup_ovr; fixed qFxtal, qFref, qFvco, qFcal; u8 d15, d16, f16, e44, e45; u32 div_int, div_frac, fvco3, fpfd, fref3, fcal_div; u16 loop_bw, d30, setCount; if (NORADIO_ENAB(pi->pubpi)) return; ci = &chan_info_2064_lcnphy[0]; rfpll_doubler = 1; mod_radio_reg(pi, RADIO_2064_REG09D, 0x4, 0x1 << 2); write_radio_reg(pi, RADIO_2064_REG09E, 0xf); if (!rfpll_doubler) { loop_bw = PLL_2064_LOOP_BW; d30 = PLL_2064_D30; } else { loop_bw = PLL_2064_LOOP_BW_DOUBLER; d30 = PLL_2064_D30_DOUBLER; } if (CHSPEC_IS2G(pi->radio_chanspec)) { for (i = 0; i < ARRAY_SIZE(chan_info_2064_lcnphy); i++) if (chan_info_2064_lcnphy[i].chan == channel) break; if (i >= ARRAY_SIZE(chan_info_2064_lcnphy)) { return; } ci = &chan_info_2064_lcnphy[i]; } write_radio_reg(pi, RADIO_2064_REG02A, ci->logen_buftune); mod_radio_reg(pi, RADIO_2064_REG030, 0x3, ci->logen_rccr_tx); mod_radio_reg(pi, RADIO_2064_REG091, 0x3, ci->txrf_mix_tune_ctrl); mod_radio_reg(pi, RADIO_2064_REG038, 0xf, ci->pa_input_tune_g); mod_radio_reg(pi, RADIO_2064_REG030, 0x3 << 2, (ci->logen_rccr_rx) << 2); mod_radio_reg(pi, RADIO_2064_REG05E, 0xf, ci->pa_rxrf_lna1_freq_tune); mod_radio_reg(pi, RADIO_2064_REG05E, (0xf) << 4, (ci->pa_rxrf_lna2_freq_tune) << 4); write_radio_reg(pi, RADIO_2064_REG06C, ci->rxrf_rxrf_spare1); pll_pwrup = (u8) read_radio_reg(pi, RADIO_2064_REG044); pll_pwrup_ovr = (u8) read_radio_reg(pi, RADIO_2064_REG12B); or_radio_reg(pi, RADIO_2064_REG044, 0x07); or_radio_reg(pi, RADIO_2064_REG12B, (0x07) << 1); e44 = 0; e45 = 0; fpfd = rfpll_doubler ? (pi->xtalfreq << 1) : (pi->xtalfreq); if (pi->xtalfreq > 26000000) e44 = 1; if (pi->xtalfreq > 52000000) e45 = 1; if (e44 == 0) fcal_div = 1; else if (e45 == 0) fcal_div = 2; else fcal_div = 4; fvco3 = (ci->freq * 3); fref3 = 2 * fpfd; qFxtal = wlc_lcnphy_qdiv_roundup(pi->xtalfreq, PLL_2064_MHZ, 16); qFref = wlc_lcnphy_qdiv_roundup(fpfd, PLL_2064_MHZ, 16); qFcal = pi->xtalfreq * fcal_div / PLL_2064_MHZ; qFvco = wlc_lcnphy_qdiv_roundup(fvco3, 2, 16); write_radio_reg(pi, RADIO_2064_REG04F, 0x02); d15 = (pi->xtalfreq * fcal_div * 4 / 5) / PLL_2064_MHZ - 1; write_radio_reg(pi, RADIO_2064_REG052, (0x07 & (d15 >> 2))); write_radio_reg(pi, RADIO_2064_REG053, (d15 & 0x3) << 5); d16 = (qFcal * 8 / (d15 + 1)) - 1; write_radio_reg(pi, RADIO_2064_REG051, d16); f16 = ((d16 + 1) * (d15 + 1)) / qFcal; setCount = f16 * 3 * (ci->freq) / 32 - 1; mod_radio_reg(pi, RADIO_2064_REG053, (0x0f << 0), (u8) (setCount >> 8)); or_radio_reg(pi, RADIO_2064_REG053, 0x10); write_radio_reg(pi, RADIO_2064_REG054, (u8) (setCount & 0xff)); div_int = ((fvco3 * (PLL_2064_MHZ >> 4)) / fref3) << 4; div_frac = ((fvco3 * (PLL_2064_MHZ >> 4)) % fref3) << 4; while (div_frac >= fref3) { div_int++; div_frac -= fref3; } div_frac = wlc_lcnphy_qdiv_roundup(div_frac, fref3, 20); mod_radio_reg(pi, RADIO_2064_REG045, (0x1f << 0), (u8) (div_int >> 4)); mod_radio_reg(pi, RADIO_2064_REG046, (0x1f << 4), (u8) (div_int << 4)); mod_radio_reg(pi, RADIO_2064_REG046, (0x0f << 0), (u8) (div_frac >> 16)); write_radio_reg(pi, RADIO_2064_REG047, (u8) (div_frac >> 8) & 0xff); write_radio_reg(pi, RADIO_2064_REG048, (u8) div_frac & 0xff); write_radio_reg(pi, RADIO_2064_REG040, 0xfb); write_radio_reg(pi, RADIO_2064_REG041, 0x9A); write_radio_reg(pi, RADIO_2064_REG042, 0xA3); write_radio_reg(pi, RADIO_2064_REG043, 0x0C); { u8 h29, h23, c28, d29, h28_ten, e30, h30_ten, cp_current; u16 c29, c38, c30, g30, d28; c29 = loop_bw; d29 = 200; c38 = 1250; h29 = d29 / c29; h23 = 1; c28 = 30; d28 = (((PLL_2064_HIGH_END_KVCO - PLL_2064_LOW_END_KVCO) * (fvco3 / 2 - PLL_2064_LOW_END_VCO)) / (PLL_2064_HIGH_END_VCO - PLL_2064_LOW_END_VCO)) + PLL_2064_LOW_END_KVCO; h28_ten = (d28 * 10) / c28; c30 = 2640; e30 = (d30 - 680) / 490; g30 = 680 + (e30 * 490); h30_ten = (g30 * 10) / c30; cp_current = ((c38 * h29 * h23 * 100) / h28_ten) / h30_ten; mod_radio_reg(pi, RADIO_2064_REG03C, 0x3f, cp_current); } if (channel >= 1 && channel <= 5) write_radio_reg(pi, RADIO_2064_REG03C, 0x8); else write_radio_reg(pi, RADIO_2064_REG03C, 0x7); write_radio_reg(pi, RADIO_2064_REG03D, 0x3); mod_radio_reg(pi, RADIO_2064_REG044, 0x0c, 0x0c); udelay(1); wlc_2064_vco_cal(pi); write_radio_reg(pi, RADIO_2064_REG044, pll_pwrup); write_radio_reg(pi, RADIO_2064_REG12B, pll_pwrup_ovr); if (LCNREV_IS(pi->pubpi.phy_rev, 1)) { write_radio_reg(pi, RADIO_2064_REG038, 3); write_radio_reg(pi, RADIO_2064_REG091, 7); } } bool wlc_phy_tpc_isenabled_lcnphy(phy_info_t *pi) { if (wlc_lcnphy_tempsense_based_pwr_ctrl_enabled(pi)) return 0; else return (LCNPHY_TX_PWR_CTRL_HW == wlc_lcnphy_get_tx_pwr_ctrl((pi))); } void wlc_phy_txpower_recalc_target_lcnphy(phy_info_t *pi) { u16 pwr_ctrl; if (wlc_lcnphy_tempsense_based_pwr_ctrl_enabled(pi)) { wlc_lcnphy_calib_modes(pi, LCNPHY_PERICAL_TEMPBASED_TXPWRCTRL); } else if (wlc_lcnphy_tssi_based_pwr_ctrl_enabled(pi)) { pwr_ctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi); wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF); wlc_lcnphy_txpower_recalc_target(pi); wlc_lcnphy_set_tx_pwr_ctrl(pi, pwr_ctrl); } else return; } void wlc_phy_detach_lcnphy(phy_info_t *pi) { kfree(pi->u.pi_lcnphy); } bool wlc_phy_attach_lcnphy(phy_info_t *pi) { phy_info_lcnphy_t *pi_lcn; pi->u.pi_lcnphy = kzalloc(sizeof(phy_info_lcnphy_t), GFP_ATOMIC); if (pi->u.pi_lcnphy == NULL) { return false; } pi_lcn = pi->u.pi_lcnphy; if ((0 == (pi->sh->boardflags & BFL_NOPA)) && !NORADIO_ENAB(pi->pubpi)) { pi->hwpwrctrl = true; pi->hwpwrctrl_capable = true; } pi->xtalfreq = si_pmu_alp_clock(pi->sh->sih); pi_lcn->lcnphy_papd_rxGnCtrl_init = 0; pi->pi_fptr.init = wlc_phy_init_lcnphy; pi->pi_fptr.calinit = wlc_phy_cal_init_lcnphy; pi->pi_fptr.chanset = wlc_phy_chanspec_set_lcnphy; pi->pi_fptr.txpwrrecalc = wlc_phy_txpower_recalc_target_lcnphy; pi->pi_fptr.txiqccget = wlc_lcnphy_get_tx_iqcc; pi->pi_fptr.txiqccset = wlc_lcnphy_set_tx_iqcc; pi->pi_fptr.txloccget = wlc_lcnphy_get_tx_locc; pi->pi_fptr.radioloftget = wlc_lcnphy_get_radio_loft; pi->pi_fptr.detach = wlc_phy_detach_lcnphy; if (!wlc_phy_txpwr_srom_read_lcnphy(pi)) return false; if ((pi->sh->boardflags & BFL_FEM) && (LCNREV_IS(pi->pubpi.phy_rev, 1))) { if (pi_lcn->lcnphy_tempsense_option == 3) { pi->hwpwrctrl = true; pi->hwpwrctrl_capable = true; pi->temppwrctrl_capable = false; } else { pi->hwpwrctrl = false; pi->hwpwrctrl_capable = false; pi->temppwrctrl_capable = true; } } return true; } static void wlc_lcnphy_set_rx_gain(phy_info_t *pi, u32 gain) { u16 trsw, ext_lna, lna1, lna2, tia, biq0, biq1, gain0_15, gain16_19; trsw = (gain & ((u32) 1 << 28)) ? 0 : 1; ext_lna = (u16) (gain >> 29) & 0x01; lna1 = (u16) (gain >> 0) & 0x0f; lna2 = (u16) (gain >> 4) & 0x0f; tia = (u16) (gain >> 8) & 0xf; biq0 = (u16) (gain >> 12) & 0xf; biq1 = (u16) (gain >> 16) & 0xf; gain0_15 = (u16) ((lna1 & 0x3) | ((lna1 & 0x3) << 2) | ((lna2 & 0x3) << 4) | ((lna2 & 0x3) << 6) | ((tia & 0xf) << 8) | ((biq0 & 0xf) << 12)); gain16_19 = biq1; mod_phy_reg(pi, 0x44d, (0x1 << 0), trsw << 0); mod_phy_reg(pi, 0x4b1, (0x1 << 9), ext_lna << 9); mod_phy_reg(pi, 0x4b1, (0x1 << 10), ext_lna << 10); mod_phy_reg(pi, 0x4b6, (0xffff << 0), gain0_15 << 0); mod_phy_reg(pi, 0x4b7, (0xf << 0), gain16_19 << 0); if (CHSPEC_IS2G(pi->radio_chanspec)) { mod_phy_reg(pi, 0x4b1, (0x3 << 11), lna1 << 11); mod_phy_reg(pi, 0x4e6, (0x3 << 3), lna1 << 3); } wlc_lcnphy_rx_gain_override_enable(pi, true); } static u32 wlc_lcnphy_get_receive_power(phy_info_t *pi, s32 *gain_index) { u32 received_power = 0; s32 max_index = 0; u32 gain_code = 0; phy_info_lcnphy_t *pi_lcn = pi->u.pi_lcnphy; max_index = 36; if (*gain_index >= 0) gain_code = lcnphy_23bitgaincode_table[*gain_index]; if (-1 == *gain_index) { *gain_index = 0; while ((*gain_index <= (s32) max_index) && (received_power < 700)) { wlc_lcnphy_set_rx_gain(pi, lcnphy_23bitgaincode_table [*gain_index]); received_power = wlc_lcnphy_measure_digital_power(pi, pi_lcn-> lcnphy_noise_samples); (*gain_index)++; } (*gain_index)--; } else { wlc_lcnphy_set_rx_gain(pi, gain_code); received_power = wlc_lcnphy_measure_digital_power(pi, pi_lcn-> lcnphy_noise_samples); } return received_power; } s32 wlc_lcnphy_rx_signal_power(phy_info_t *pi, s32 gain_index) { s32 gain = 0; s32 nominal_power_db; s32 log_val, gain_mismatch, desired_gain, input_power_offset_db, input_power_db; s32 received_power, temperature; uint freq; phy_info_lcnphy_t *pi_lcn = pi->u.pi_lcnphy; received_power = wlc_lcnphy_get_receive_power(pi, &gain_index); gain = lcnphy_gain_table[gain_index]; nominal_power_db = read_phy_reg(pi, 0x425) >> 8; { u32 power = (received_power * 16); u32 msb1, msb2, val1, val2, diff1, diff2; msb1 = ffs(power) - 1; msb2 = msb1 + 1; val1 = 1 << msb1; val2 = 1 << msb2; diff1 = (power - val1); diff2 = (val2 - power); if (diff1 < diff2) log_val = msb1; else log_val = msb2; } log_val = log_val * 3; gain_mismatch = (nominal_power_db / 2) - (log_val); desired_gain = gain + gain_mismatch; input_power_offset_db = read_phy_reg(pi, 0x434) & 0xFF; if (input_power_offset_db > 127) input_power_offset_db -= 256; input_power_db = input_power_offset_db - desired_gain; input_power_db = input_power_db + lcnphy_gain_index_offset_for_rssi[gain_index]; freq = wlc_phy_channel2freq(CHSPEC_CHANNEL(pi->radio_chanspec)); if ((freq > 2427) && (freq <= 2467)) input_power_db = input_power_db - 1; temperature = pi_lcn->lcnphy_lastsensed_temperature; if ((temperature - 15) < -30) { input_power_db = input_power_db + (((temperature - 10 - 25) * 286) >> 12) - 7; } else if ((temperature - 15) < 4) { input_power_db = input_power_db + (((temperature - 10 - 25) * 286) >> 12) - 3; } else { input_power_db = input_power_db + (((temperature - 10 - 25) * 286) >> 12); } wlc_lcnphy_rx_gain_override_enable(pi, 0); return input_power_db; } static int wlc_lcnphy_load_tx_iir_filter(phy_info_t *pi, bool is_ofdm, s16 filt_type) { s16 filt_index = -1; int j; u16 addr[] = { 0x910, 0x91e, 0x91f, 0x924, 0x925, 0x926, 0x920, 0x921, 0x927, 0x928, 0x929, 0x922, 0x923, 0x930, 0x931, 0x932 }; u16 addr_ofdm[] = { 0x90f, 0x900, 0x901, 0x906, 0x907, 0x908, 0x902, 0x903, 0x909, 0x90a, 0x90b, 0x904, 0x905, 0x90c, 0x90d, 0x90e }; if (!is_ofdm) { for (j = 0; j < LCNPHY_NUM_TX_DIG_FILTERS_CCK; j++) { if (filt_type == LCNPHY_txdigfiltcoeffs_cck[j][0]) { filt_index = (s16) j; break; } } if (filt_index != -1) { for (j = 0; j < LCNPHY_NUM_DIG_FILT_COEFFS; j++) { write_phy_reg(pi, addr[j], LCNPHY_txdigfiltcoeffs_cck [filt_index][j + 1]); } } } else { for (j = 0; j < LCNPHY_NUM_TX_DIG_FILTERS_OFDM; j++) { if (filt_type == LCNPHY_txdigfiltcoeffs_ofdm[j][0]) { filt_index = (s16) j; break; } } if (filt_index != -1) { for (j = 0; j < LCNPHY_NUM_DIG_FILT_COEFFS; j++) { write_phy_reg(pi, addr_ofdm[j], LCNPHY_txdigfiltcoeffs_ofdm [filt_index][j + 1]); } } } return (filt_index != -1) ? 0 : -1; }
gpl-2.0
bilalliberty/android_kernel_htc_villec2-caf-based
net/ipv4/esp4.c
2685
17068
#define pr_fmt(fmt) "IPsec: " fmt #include <crypto/aead.h> #include <crypto/authenc.h> #include <linux/err.h> #include <linux/module.h> #include <net/ip.h> #include <net/xfrm.h> #include <net/esp.h> #include <linux/scatterlist.h> #include <linux/kernel.h> #include <linux/pfkeyv2.h> #include <linux/rtnetlink.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/in6.h> #include <net/icmp.h> #include <net/protocol.h> #include <net/udp.h> struct esp_skb_cb { struct xfrm_skb_cb xfrm; void *tmp; }; #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0])) static u32 esp4_get_mtu(struct xfrm_state *x, int mtu); /* * Allocate an AEAD request structure with extra space for SG and IV. * * For alignment considerations the IV is placed at the front, followed * by the request and finally the SG list. * * TODO: Use spare space in skb for this where possible. */ static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqhilen) { unsigned int len; len = seqhilen; len += crypto_aead_ivsize(aead); if (len) { len += crypto_aead_alignmask(aead) & ~(crypto_tfm_ctx_alignment() - 1); len = ALIGN(len, crypto_tfm_ctx_alignment()); } len += sizeof(struct aead_givcrypt_request) + crypto_aead_reqsize(aead); len = ALIGN(len, __alignof__(struct scatterlist)); len += sizeof(struct scatterlist) * nfrags; return kmalloc(len, GFP_ATOMIC); } static inline __be32 *esp_tmp_seqhi(void *tmp) { return PTR_ALIGN((__be32 *)tmp, __alignof__(__be32)); } static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen) { return crypto_aead_ivsize(aead) ? PTR_ALIGN((u8 *)tmp + seqhilen, crypto_aead_alignmask(aead) + 1) : tmp + seqhilen; } static inline struct aead_givcrypt_request *esp_tmp_givreq( struct crypto_aead *aead, u8 *iv) { struct aead_givcrypt_request *req; req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead), crypto_tfm_ctx_alignment()); aead_givcrypt_set_tfm(req, aead); return req; } static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv) { struct aead_request *req; req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead), crypto_tfm_ctx_alignment()); aead_request_set_tfm(req, aead); return req; } static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead, struct aead_request *req) { return (void *)ALIGN((unsigned long)(req + 1) + crypto_aead_reqsize(aead), __alignof__(struct scatterlist)); } static inline struct scatterlist *esp_givreq_sg( struct crypto_aead *aead, struct aead_givcrypt_request *req) { return (void *)ALIGN((unsigned long)(req + 1) + crypto_aead_reqsize(aead), __alignof__(struct scatterlist)); } static void esp_output_done(struct crypto_async_request *base, int err) { struct sk_buff *skb = base->data; kfree(ESP_SKB_CB(skb)->tmp); xfrm_output_resume(skb, err); } static int esp_output(struct xfrm_state *x, struct sk_buff *skb) { int err; struct ip_esp_hdr *esph; struct crypto_aead *aead; struct aead_givcrypt_request *req; struct scatterlist *sg; struct scatterlist *asg; struct esp_data *esp; struct sk_buff *trailer; void *tmp; u8 *iv; u8 *tail; int blksize; int clen; int alen; int plen; int tfclen; int nfrags; int assoclen; int sglists; int seqhilen; __be32 *seqhi; /* skb is pure payload to encrypt */ err = -ENOMEM; esp = x->data; aead = esp->aead; alen = crypto_aead_authsize(aead); tfclen = 0; if (x->tfcpad) { struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb); u32 padto; padto = min(x->tfcpad, esp4_get_mtu(x, dst->child_mtu_cached)); if (skb->len < padto) tfclen = padto - skb->len; } blksize = ALIGN(crypto_aead_blocksize(aead), 4); clen = ALIGN(skb->len + 2 + tfclen, blksize); if (esp->padlen) clen = ALIGN(clen, esp->padlen); plen = clen - skb->len - tfclen; err = skb_cow_data(skb, tfclen + plen + alen, &trailer); if (err < 0) goto error; nfrags = err; assoclen = sizeof(*esph); sglists = 1; seqhilen = 0; if (x->props.flags & XFRM_STATE_ESN) { sglists += 2; seqhilen += sizeof(__be32); assoclen += seqhilen; } tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen); if (!tmp) goto error; seqhi = esp_tmp_seqhi(tmp); iv = esp_tmp_iv(aead, tmp, seqhilen); req = esp_tmp_givreq(aead, iv); asg = esp_givreq_sg(aead, req); sg = asg + sglists; /* Fill padding... */ tail = skb_tail_pointer(trailer); if (tfclen) { memset(tail, 0, tfclen); tail += tfclen; } do { int i; for (i = 0; i < plen - 2; i++) tail[i] = i + 1; } while (0); tail[plen - 2] = plen - 2; tail[plen - 1] = *skb_mac_header(skb); pskb_put(skb, trailer, clen - skb->len + alen); skb_push(skb, -skb_network_offset(skb)); esph = ip_esp_hdr(skb); *skb_mac_header(skb) = IPPROTO_ESP; /* this is non-NULL only with UDP Encapsulation */ if (x->encap) { struct xfrm_encap_tmpl *encap = x->encap; struct udphdr *uh; __be32 *udpdata32; __be16 sport, dport; int encap_type; spin_lock_bh(&x->lock); sport = encap->encap_sport; dport = encap->encap_dport; encap_type = encap->encap_type; spin_unlock_bh(&x->lock); uh = (struct udphdr *)esph; uh->source = sport; uh->dest = dport; uh->len = htons(skb->len - skb_transport_offset(skb)); uh->check = 0; switch (encap_type) { default: case UDP_ENCAP_ESPINUDP: esph = (struct ip_esp_hdr *)(uh + 1); break; case UDP_ENCAP_ESPINUDP_NON_IKE: udpdata32 = (__be32 *)(uh + 1); udpdata32[0] = udpdata32[1] = 0; esph = (struct ip_esp_hdr *)(udpdata32 + 2); break; } *skb_mac_header(skb) = IPPROTO_UDP; } esph->spi = x->id.spi; esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); sg_init_table(sg, nfrags); skb_to_sgvec(skb, sg, esph->enc_data + crypto_aead_ivsize(aead) - skb->data, clen + alen); if ((x->props.flags & XFRM_STATE_ESN)) { sg_init_table(asg, 3); sg_set_buf(asg, &esph->spi, sizeof(__be32)); *seqhi = htonl(XFRM_SKB_CB(skb)->seq.output.hi); sg_set_buf(asg + 1, seqhi, seqhilen); sg_set_buf(asg + 2, &esph->seq_no, sizeof(__be32)); } else sg_init_one(asg, esph, sizeof(*esph)); aead_givcrypt_set_callback(req, 0, esp_output_done, skb); aead_givcrypt_set_crypt(req, sg, sg, clen, iv); aead_givcrypt_set_assoc(req, asg, assoclen); aead_givcrypt_set_giv(req, esph->enc_data, XFRM_SKB_CB(skb)->seq.output.low); ESP_SKB_CB(skb)->tmp = tmp; err = crypto_aead_givencrypt(req); if (err == -EINPROGRESS) goto error; if (err == -EBUSY) err = NET_XMIT_DROP; kfree(tmp); error: return err; } static int esp_input_done2(struct sk_buff *skb, int err) { const struct iphdr *iph; struct xfrm_state *x = xfrm_input_state(skb); struct esp_data *esp = x->data; struct crypto_aead *aead = esp->aead; int alen = crypto_aead_authsize(aead); int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead); int elen = skb->len - hlen; int ihl; u8 nexthdr[2]; int padlen; kfree(ESP_SKB_CB(skb)->tmp); if (unlikely(err)) goto out; if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2)) BUG(); err = -EINVAL; padlen = nexthdr[0]; if (padlen + 2 + alen >= elen) goto out; /* ... check padding bits here. Silly. :-) */ iph = ip_hdr(skb); ihl = iph->ihl * 4; if (x->encap) { struct xfrm_encap_tmpl *encap = x->encap; struct udphdr *uh = (void *)(skb_network_header(skb) + ihl); /* * 1) if the NAT-T peer's IP or port changed then * advertize the change to the keying daemon. * This is an inbound SA, so just compare * SRC ports. */ if (iph->saddr != x->props.saddr.a4 || uh->source != encap->encap_sport) { xfrm_address_t ipaddr; ipaddr.a4 = iph->saddr; km_new_mapping(x, &ipaddr, uh->source); /* XXX: perhaps add an extra * policy check here, to see * if we should allow or * reject a packet from a * different source * address/port. */ } /* * 2) ignore UDP/TCP checksums in case * of NAT-T in Transport Mode, or * perform other post-processing fixes * as per draft-ietf-ipsec-udp-encaps-06, * section 3.1.2 */ if (x->props.mode == XFRM_MODE_TRANSPORT) skb->ip_summed = CHECKSUM_UNNECESSARY; } pskb_trim(skb, skb->len - alen - padlen - 2); __skb_pull(skb, hlen); skb_set_transport_header(skb, -ihl); err = nexthdr[1]; /* RFC4303: Drop dummy packets without any error */ if (err == IPPROTO_NONE) err = -EINVAL; out: return err; } static void esp_input_done(struct crypto_async_request *base, int err) { struct sk_buff *skb = base->data; xfrm_input_resume(skb, esp_input_done2(skb, err)); } /* * Note: detecting truncated vs. non-truncated authentication data is very * expensive, so we only support truncated data, which is the recommended * and common case. */ static int esp_input(struct xfrm_state *x, struct sk_buff *skb) { struct ip_esp_hdr *esph; struct esp_data *esp = x->data; struct crypto_aead *aead = esp->aead; struct aead_request *req; struct sk_buff *trailer; int elen = skb->len - sizeof(*esph) - crypto_aead_ivsize(aead); int nfrags; int assoclen; int sglists; int seqhilen; __be32 *seqhi; void *tmp; u8 *iv; struct scatterlist *sg; struct scatterlist *asg; int err = -EINVAL; if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead))) goto out; if (elen <= 0) goto out; if ((err = skb_cow_data(skb, 0, &trailer)) < 0) goto out; nfrags = err; assoclen = sizeof(*esph); sglists = 1; seqhilen = 0; if (x->props.flags & XFRM_STATE_ESN) { sglists += 2; seqhilen += sizeof(__be32); assoclen += seqhilen; } err = -ENOMEM; tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen); if (!tmp) goto out; ESP_SKB_CB(skb)->tmp = tmp; seqhi = esp_tmp_seqhi(tmp); iv = esp_tmp_iv(aead, tmp, seqhilen); req = esp_tmp_req(aead, iv); asg = esp_req_sg(aead, req); sg = asg + sglists; skb->ip_summed = CHECKSUM_NONE; esph = (struct ip_esp_hdr *)skb->data; /* Get ivec. This can be wrong, check against another impls. */ iv = esph->enc_data; sg_init_table(sg, nfrags); skb_to_sgvec(skb, sg, sizeof(*esph) + crypto_aead_ivsize(aead), elen); if ((x->props.flags & XFRM_STATE_ESN)) { sg_init_table(asg, 3); sg_set_buf(asg, &esph->spi, sizeof(__be32)); *seqhi = XFRM_SKB_CB(skb)->seq.input.hi; sg_set_buf(asg + 1, seqhi, seqhilen); sg_set_buf(asg + 2, &esph->seq_no, sizeof(__be32)); } else sg_init_one(asg, esph, sizeof(*esph)); aead_request_set_callback(req, 0, esp_input_done, skb); aead_request_set_crypt(req, sg, sg, elen, iv); aead_request_set_assoc(req, asg, assoclen); err = crypto_aead_decrypt(req); if (err == -EINPROGRESS) goto out; err = esp_input_done2(skb, err); out: return err; } static u32 esp4_get_mtu(struct xfrm_state *x, int mtu) { struct esp_data *esp = x->data; u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4); u32 align = max_t(u32, blksize, esp->padlen); u32 rem; mtu -= x->props.header_len + crypto_aead_authsize(esp->aead); rem = mtu & (align - 1); mtu &= ~(align - 1); switch (x->props.mode) { case XFRM_MODE_TUNNEL: break; default: case XFRM_MODE_TRANSPORT: /* The worst case */ mtu -= blksize - 4; mtu += min_t(u32, blksize - 4, rem); break; case XFRM_MODE_BEET: /* The worst case. */ mtu += min_t(u32, IPV4_BEET_PHMAXLEN, rem); break; } return mtu - 2; } static void esp4_err(struct sk_buff *skb, u32 info) { struct net *net = dev_net(skb->dev); const struct iphdr *iph = (const struct iphdr *)skb->data; struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2)); struct xfrm_state *x; if (icmp_hdr(skb)->type != ICMP_DEST_UNREACH || icmp_hdr(skb)->code != ICMP_FRAG_NEEDED) return; x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr, esph->spi, IPPROTO_ESP, AF_INET); if (!x) return; NETDEBUG(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%08x\n", ntohl(esph->spi), ntohl(iph->daddr)); xfrm_state_put(x); } static void esp_destroy(struct xfrm_state *x) { struct esp_data *esp = x->data; if (!esp) return; crypto_free_aead(esp->aead); kfree(esp); } static int esp_init_aead(struct xfrm_state *x) { struct esp_data *esp = x->data; struct crypto_aead *aead; int err; aead = crypto_alloc_aead(x->aead->alg_name, 0, 0); err = PTR_ERR(aead); if (IS_ERR(aead)) goto error; esp->aead = aead; err = crypto_aead_setkey(aead, x->aead->alg_key, (x->aead->alg_key_len + 7) / 8); if (err) goto error; err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8); if (err) goto error; error: return err; } static int esp_init_authenc(struct xfrm_state *x) { struct esp_data *esp = x->data; struct crypto_aead *aead; struct crypto_authenc_key_param *param; struct rtattr *rta; char *key; char *p; char authenc_name[CRYPTO_MAX_ALG_NAME]; unsigned int keylen; int err; err = -EINVAL; if (x->ealg == NULL) goto error; err = -ENAMETOOLONG; if ((x->props.flags & XFRM_STATE_ESN)) { if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, "authencesn(%s,%s)", x->aalg ? x->aalg->alg_name : "digest_null", x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME) goto error; } else { if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, "authenc(%s,%s)", x->aalg ? x->aalg->alg_name : "digest_null", x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME) goto error; } aead = crypto_alloc_aead(authenc_name, 0, 0); err = PTR_ERR(aead); if (IS_ERR(aead)) goto error; esp->aead = aead; keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) + (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param)); err = -ENOMEM; key = kmalloc(keylen, GFP_KERNEL); if (!key) goto error; p = key; rta = (void *)p; rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM; rta->rta_len = RTA_LENGTH(sizeof(*param)); param = RTA_DATA(rta); p += RTA_SPACE(sizeof(*param)); if (x->aalg) { struct xfrm_algo_desc *aalg_desc; memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8); p += (x->aalg->alg_key_len + 7) / 8; aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); BUG_ON(!aalg_desc); err = -EINVAL; if (aalg_desc->uinfo.auth.icv_fullbits/8 != crypto_aead_authsize(aead)) { NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n", x->aalg->alg_name, crypto_aead_authsize(aead), aalg_desc->uinfo.auth.icv_fullbits/8); goto free_key; } err = crypto_aead_setauthsize( aead, x->aalg->alg_trunc_len / 8); if (err) goto free_key; } param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8); memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8); err = crypto_aead_setkey(aead, key, keylen); free_key: kfree(key); error: return err; } static int esp_init_state(struct xfrm_state *x) { struct esp_data *esp; struct crypto_aead *aead; u32 align; int err; esp = kzalloc(sizeof(*esp), GFP_KERNEL); if (esp == NULL) return -ENOMEM; x->data = esp; if (x->aead) err = esp_init_aead(x); else err = esp_init_authenc(x); if (err) goto error; aead = esp->aead; esp->padlen = 0; x->props.header_len = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead); if (x->props.mode == XFRM_MODE_TUNNEL) x->props.header_len += sizeof(struct iphdr); else if (x->props.mode == XFRM_MODE_BEET && x->sel.family != AF_INET6) x->props.header_len += IPV4_BEET_PHMAXLEN; if (x->encap) { struct xfrm_encap_tmpl *encap = x->encap; switch (encap->encap_type) { default: goto error; case UDP_ENCAP_ESPINUDP: x->props.header_len += sizeof(struct udphdr); break; case UDP_ENCAP_ESPINUDP_NON_IKE: x->props.header_len += sizeof(struct udphdr) + 2 * sizeof(u32); break; } } align = ALIGN(crypto_aead_blocksize(aead), 4); if (esp->padlen) align = max_t(u32, align, esp->padlen); x->props.trailer_len = align + 1 + crypto_aead_authsize(esp->aead); error: return err; } static const struct xfrm_type esp_type = { .description = "ESP4", .owner = THIS_MODULE, .proto = IPPROTO_ESP, .flags = XFRM_TYPE_REPLAY_PROT, .init_state = esp_init_state, .destructor = esp_destroy, .get_mtu = esp4_get_mtu, .input = esp_input, .output = esp_output }; static const struct net_protocol esp4_protocol = { .handler = xfrm4_rcv, .err_handler = esp4_err, .no_policy = 1, .netns_ok = 1, }; static int __init esp4_init(void) { if (xfrm_register_type(&esp_type, AF_INET) < 0) { pr_info("%s: can't add xfrm type\n", __func__); return -EAGAIN; } if (inet_add_protocol(&esp4_protocol, IPPROTO_ESP) < 0) { pr_info("%s: can't add protocol\n", __func__); xfrm_unregister_type(&esp_type, AF_INET); return -EAGAIN; } return 0; } static void __exit esp4_fini(void) { if (inet_del_protocol(&esp4_protocol, IPPROTO_ESP) < 0) pr_info("%s: can't remove protocol\n", __func__); if (xfrm_unregister_type(&esp_type, AF_INET) < 0) pr_info("%s: can't remove xfrm type\n", __func__); } module_init(esp4_init); module_exit(esp4_fini); MODULE_LICENSE("GPL"); MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_ESP);
gpl-2.0
CyanogenMod/lge-kernel-p700
drivers/input/mouse/psmouse-base.c
2941
44009
/* * PS/2 mouse driver * * Copyright (c) 1999-2002 Vojtech Pavlik * Copyright (c) 2003-2004 Dmitry Torokhov */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. */ #include <linux/delay.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/input.h> #include <linux/serio.h> #include <linux/init.h> #include <linux/libps2.h> #include <linux/mutex.h> #include "psmouse.h" #include "synaptics.h" #include "logips2pp.h" #include "alps.h" #include "hgpk.h" #include "lifebook.h" #include "trackpoint.h" #include "touchkit_ps2.h" #include "elantech.h" #include "sentelic.h" #define DRIVER_DESC "PS/2 mouse driver" MODULE_AUTHOR("Vojtech Pavlik <vojtech@suse.cz>"); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); static unsigned int psmouse_max_proto = PSMOUSE_AUTO; static int psmouse_set_maxproto(const char *val, const struct kernel_param *); static int psmouse_get_maxproto(char *buffer, const struct kernel_param *kp); static struct kernel_param_ops param_ops_proto_abbrev = { .set = psmouse_set_maxproto, .get = psmouse_get_maxproto, }; #define param_check_proto_abbrev(name, p) __param_check(name, p, unsigned int) module_param_named(proto, psmouse_max_proto, proto_abbrev, 0644); MODULE_PARM_DESC(proto, "Highest protocol extension to probe (bare, imps, exps, any). Useful for KVM switches."); static unsigned int psmouse_resolution = 200; module_param_named(resolution, psmouse_resolution, uint, 0644); MODULE_PARM_DESC(resolution, "Resolution, in dpi."); static unsigned int psmouse_rate = 100; module_param_named(rate, psmouse_rate, uint, 0644); MODULE_PARM_DESC(rate, "Report rate, in reports per second."); static unsigned int psmouse_smartscroll = 1; module_param_named(smartscroll, psmouse_smartscroll, bool, 0644); MODULE_PARM_DESC(smartscroll, "Logitech Smartscroll autorepeat, 1 = enabled (default), 0 = disabled."); static unsigned int psmouse_resetafter = 5; module_param_named(resetafter, psmouse_resetafter, uint, 0644); MODULE_PARM_DESC(resetafter, "Reset device after so many bad packets (0 = never)."); static unsigned int psmouse_resync_time; module_param_named(resync_time, psmouse_resync_time, uint, 0644); MODULE_PARM_DESC(resync_time, "How long can mouse stay idle before forcing resync (in seconds, 0 = never)."); PSMOUSE_DEFINE_ATTR(protocol, S_IWUSR | S_IRUGO, NULL, psmouse_attr_show_protocol, psmouse_attr_set_protocol); PSMOUSE_DEFINE_ATTR(rate, S_IWUSR | S_IRUGO, (void *) offsetof(struct psmouse, rate), psmouse_show_int_attr, psmouse_attr_set_rate); PSMOUSE_DEFINE_ATTR(resolution, S_IWUSR | S_IRUGO, (void *) offsetof(struct psmouse, resolution), psmouse_show_int_attr, psmouse_attr_set_resolution); PSMOUSE_DEFINE_ATTR(resetafter, S_IWUSR | S_IRUGO, (void *) offsetof(struct psmouse, resetafter), psmouse_show_int_attr, psmouse_set_int_attr); PSMOUSE_DEFINE_ATTR(resync_time, S_IWUSR | S_IRUGO, (void *) offsetof(struct psmouse, resync_time), psmouse_show_int_attr, psmouse_set_int_attr); static struct attribute *psmouse_attributes[] = { &psmouse_attr_protocol.dattr.attr, &psmouse_attr_rate.dattr.attr, &psmouse_attr_resolution.dattr.attr, &psmouse_attr_resetafter.dattr.attr, &psmouse_attr_resync_time.dattr.attr, NULL }; static struct attribute_group psmouse_attribute_group = { .attrs = psmouse_attributes, }; /* * psmouse_mutex protects all operations changing state of mouse * (connecting, disconnecting, changing rate or resolution via * sysfs). We could use a per-device semaphore but since there * rarely more than one PS/2 mouse connected and since semaphore * is taken in "slow" paths it is not worth it. */ static DEFINE_MUTEX(psmouse_mutex); static struct workqueue_struct *kpsmoused_wq; struct psmouse_protocol { enum psmouse_type type; bool maxproto; bool ignore_parity; /* Protocol should ignore parity errors from KBC */ const char *name; const char *alias; int (*detect)(struct psmouse *, bool); int (*init)(struct psmouse *); }; /* * psmouse_process_byte() analyzes the PS/2 data stream and reports * relevant events to the input module once full packet has arrived. */ static psmouse_ret_t psmouse_process_byte(struct psmouse *psmouse) { struct input_dev *dev = psmouse->dev; unsigned char *packet = psmouse->packet; if (psmouse->pktcnt < psmouse->pktsize) return PSMOUSE_GOOD_DATA; /* * Full packet accumulated, process it */ /* * Scroll wheel on IntelliMice, scroll buttons on NetMice */ if (psmouse->type == PSMOUSE_IMPS || psmouse->type == PSMOUSE_GENPS) input_report_rel(dev, REL_WHEEL, -(signed char) packet[3]); /* * Scroll wheel and buttons on IntelliMouse Explorer */ if (psmouse->type == PSMOUSE_IMEX) { switch (packet[3] & 0xC0) { case 0x80: /* vertical scroll on IntelliMouse Explorer 4.0 */ input_report_rel(dev, REL_WHEEL, (int) (packet[3] & 32) - (int) (packet[3] & 31)); break; case 0x40: /* horizontal scroll on IntelliMouse Explorer 4.0 */ input_report_rel(dev, REL_HWHEEL, (int) (packet[3] & 32) - (int) (packet[3] & 31)); break; case 0x00: case 0xC0: input_report_rel(dev, REL_WHEEL, (int) (packet[3] & 8) - (int) (packet[3] & 7)); input_report_key(dev, BTN_SIDE, (packet[3] >> 4) & 1); input_report_key(dev, BTN_EXTRA, (packet[3] >> 5) & 1); break; } } /* * Extra buttons on Genius NewNet 3D */ if (psmouse->type == PSMOUSE_GENPS) { input_report_key(dev, BTN_SIDE, (packet[0] >> 6) & 1); input_report_key(dev, BTN_EXTRA, (packet[0] >> 7) & 1); } /* * Extra button on ThinkingMouse */ if (psmouse->type == PSMOUSE_THINKPS) { input_report_key(dev, BTN_EXTRA, (packet[0] >> 3) & 1); /* Without this bit of weirdness moving up gives wildly high Y changes. */ packet[1] |= (packet[0] & 0x40) << 1; } /* * Cortron PS2 Trackball reports SIDE button on the 4th bit of the first * byte. */ if (psmouse->type == PSMOUSE_CORTRON) { input_report_key(dev, BTN_SIDE, (packet[0] >> 3) & 1); packet[0] |= 0x08; } /* * Generic PS/2 Mouse */ input_report_key(dev, BTN_LEFT, packet[0] & 1); input_report_key(dev, BTN_MIDDLE, (packet[0] >> 2) & 1); input_report_key(dev, BTN_RIGHT, (packet[0] >> 1) & 1); input_report_rel(dev, REL_X, packet[1] ? (int) packet[1] - (int) ((packet[0] << 4) & 0x100) : 0); input_report_rel(dev, REL_Y, packet[2] ? (int) ((packet[0] << 3) & 0x100) - (int) packet[2] : 0); input_sync(dev); return PSMOUSE_FULL_PACKET; } void psmouse_queue_work(struct psmouse *psmouse, struct delayed_work *work, unsigned long delay) { queue_delayed_work(kpsmoused_wq, work, delay); } /* * __psmouse_set_state() sets new psmouse state and resets all flags. */ static inline void __psmouse_set_state(struct psmouse *psmouse, enum psmouse_state new_state) { psmouse->state = new_state; psmouse->pktcnt = psmouse->out_of_sync_cnt = 0; psmouse->ps2dev.flags = 0; psmouse->last = jiffies; } /* * psmouse_set_state() sets new psmouse state and resets all flags and * counters while holding serio lock so fighting with interrupt handler * is not a concern. */ void psmouse_set_state(struct psmouse *psmouse, enum psmouse_state new_state) { serio_pause_rx(psmouse->ps2dev.serio); __psmouse_set_state(psmouse, new_state); serio_continue_rx(psmouse->ps2dev.serio); } /* * psmouse_handle_byte() processes one byte of the input data stream * by calling corresponding protocol handler. */ static int psmouse_handle_byte(struct psmouse *psmouse) { psmouse_ret_t rc = psmouse->protocol_handler(psmouse); switch (rc) { case PSMOUSE_BAD_DATA: if (psmouse->state == PSMOUSE_ACTIVATED) { printk(KERN_WARNING "psmouse.c: %s at %s lost sync at byte %d\n", psmouse->name, psmouse->phys, psmouse->pktcnt); if (++psmouse->out_of_sync_cnt == psmouse->resetafter) { __psmouse_set_state(psmouse, PSMOUSE_IGNORE); printk(KERN_NOTICE "psmouse.c: issuing reconnect request\n"); serio_reconnect(psmouse->ps2dev.serio); return -1; } } psmouse->pktcnt = 0; break; case PSMOUSE_FULL_PACKET: psmouse->pktcnt = 0; if (psmouse->out_of_sync_cnt) { psmouse->out_of_sync_cnt = 0; printk(KERN_NOTICE "psmouse.c: %s at %s - driver resynched.\n", psmouse->name, psmouse->phys); } break; case PSMOUSE_GOOD_DATA: break; } return 0; } /* * psmouse_interrupt() handles incoming characters, either passing them * for normal processing or gathering them as command response. */ static irqreturn_t psmouse_interrupt(struct serio *serio, unsigned char data, unsigned int flags) { struct psmouse *psmouse = serio_get_drvdata(serio); if (psmouse->state == PSMOUSE_IGNORE) goto out; if (unlikely((flags & SERIO_TIMEOUT) || ((flags & SERIO_PARITY) && !psmouse->ignore_parity))) { if (psmouse->state == PSMOUSE_ACTIVATED) printk(KERN_WARNING "psmouse.c: bad data from KBC -%s%s\n", flags & SERIO_TIMEOUT ? " timeout" : "", flags & SERIO_PARITY ? " bad parity" : ""); ps2_cmd_aborted(&psmouse->ps2dev); goto out; } if (unlikely(psmouse->ps2dev.flags & PS2_FLAG_ACK)) if (ps2_handle_ack(&psmouse->ps2dev, data)) goto out; if (unlikely(psmouse->ps2dev.flags & PS2_FLAG_CMD)) if (ps2_handle_response(&psmouse->ps2dev, data)) goto out; if (psmouse->state <= PSMOUSE_RESYNCING) goto out; if (psmouse->state == PSMOUSE_ACTIVATED && psmouse->pktcnt && time_after(jiffies, psmouse->last + HZ/2)) { printk(KERN_INFO "psmouse.c: %s at %s lost synchronization, throwing %d bytes away.\n", psmouse->name, psmouse->phys, psmouse->pktcnt); psmouse->badbyte = psmouse->packet[0]; __psmouse_set_state(psmouse, PSMOUSE_RESYNCING); psmouse_queue_work(psmouse, &psmouse->resync_work, 0); goto out; } psmouse->packet[psmouse->pktcnt++] = data; /* * Check if this is a new device announcement (0xAA 0x00) */ if (unlikely(psmouse->packet[0] == PSMOUSE_RET_BAT && psmouse->pktcnt <= 2)) { if (psmouse->pktcnt == 1) { psmouse->last = jiffies; goto out; } if (psmouse->packet[1] == PSMOUSE_RET_ID || (psmouse->type == PSMOUSE_HGPK && psmouse->packet[1] == PSMOUSE_RET_BAT)) { __psmouse_set_state(psmouse, PSMOUSE_IGNORE); serio_reconnect(serio); goto out; } /* * Not a new device, try processing first byte normally */ psmouse->pktcnt = 1; if (psmouse_handle_byte(psmouse)) goto out; psmouse->packet[psmouse->pktcnt++] = data; } /* * See if we need to force resync because mouse was idle for too long */ if (psmouse->state == PSMOUSE_ACTIVATED && psmouse->pktcnt == 1 && psmouse->resync_time && time_after(jiffies, psmouse->last + psmouse->resync_time * HZ)) { psmouse->badbyte = psmouse->packet[0]; __psmouse_set_state(psmouse, PSMOUSE_RESYNCING); psmouse_queue_work(psmouse, &psmouse->resync_work, 0); goto out; } psmouse->last = jiffies; psmouse_handle_byte(psmouse); out: return IRQ_HANDLED; } /* * psmouse_sliced_command() sends an extended PS/2 command to the mouse * using sliced syntax, understood by advanced devices, such as Logitech * or Synaptics touchpads. The command is encoded as: * 0xE6 0xE8 rr 0xE8 ss 0xE8 tt 0xE8 uu where (rr*64)+(ss*16)+(tt*4)+uu * is the command. */ int psmouse_sliced_command(struct psmouse *psmouse, unsigned char command) { int i; if (ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_SETSCALE11)) return -1; for (i = 6; i >= 0; i -= 2) { unsigned char d = (command >> i) & 3; if (ps2_command(&psmouse->ps2dev, &d, PSMOUSE_CMD_SETRES)) return -1; } return 0; } /* * psmouse_reset() resets the mouse into power-on state. */ int psmouse_reset(struct psmouse *psmouse) { unsigned char param[2]; if (ps2_command(&psmouse->ps2dev, param, PSMOUSE_CMD_RESET_BAT)) return -1; if (param[0] != PSMOUSE_RET_BAT && param[1] != PSMOUSE_RET_ID) return -1; return 0; } /* * Genius NetMouse magic init. */ static int genius_detect(struct psmouse *psmouse, bool set_properties) { struct ps2dev *ps2dev = &psmouse->ps2dev; unsigned char param[4]; param[0] = 3; ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES); ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11); ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11); ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11); ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO); if (param[0] != 0x00 || param[1] != 0x33 || param[2] != 0x55) return -1; if (set_properties) { __set_bit(BTN_MIDDLE, psmouse->dev->keybit); __set_bit(BTN_EXTRA, psmouse->dev->keybit); __set_bit(BTN_SIDE, psmouse->dev->keybit); __set_bit(REL_WHEEL, psmouse->dev->relbit); psmouse->vendor = "Genius"; psmouse->name = "Mouse"; psmouse->pktsize = 4; } return 0; } /* * IntelliMouse magic init. */ static int intellimouse_detect(struct psmouse *psmouse, bool set_properties) { struct ps2dev *ps2dev = &psmouse->ps2dev; unsigned char param[2]; param[0] = 200; ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE); param[0] = 100; ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE); param[0] = 80; ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE); ps2_command(ps2dev, param, PSMOUSE_CMD_GETID); if (param[0] != 3) return -1; if (set_properties) { __set_bit(BTN_MIDDLE, psmouse->dev->keybit); __set_bit(REL_WHEEL, psmouse->dev->relbit); if (!psmouse->vendor) psmouse->vendor = "Generic"; if (!psmouse->name) psmouse->name = "Wheel Mouse"; psmouse->pktsize = 4; } return 0; } /* * Try IntelliMouse/Explorer magic init. */ static int im_explorer_detect(struct psmouse *psmouse, bool set_properties) { struct ps2dev *ps2dev = &psmouse->ps2dev; unsigned char param[2]; intellimouse_detect(psmouse, 0); param[0] = 200; ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE); param[0] = 200; ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE); param[0] = 80; ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE); ps2_command(ps2dev, param, PSMOUSE_CMD_GETID); if (param[0] != 4) return -1; /* Magic to enable horizontal scrolling on IntelliMouse 4.0 */ param[0] = 200; ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE); param[0] = 80; ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE); param[0] = 40; ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE); if (set_properties) { __set_bit(BTN_MIDDLE, psmouse->dev->keybit); __set_bit(REL_WHEEL, psmouse->dev->relbit); __set_bit(REL_HWHEEL, psmouse->dev->relbit); __set_bit(BTN_SIDE, psmouse->dev->keybit); __set_bit(BTN_EXTRA, psmouse->dev->keybit); if (!psmouse->vendor) psmouse->vendor = "Generic"; if (!psmouse->name) psmouse->name = "Explorer Mouse"; psmouse->pktsize = 4; } return 0; } /* * Kensington ThinkingMouse / ExpertMouse magic init. */ static int thinking_detect(struct psmouse *psmouse, bool set_properties) { struct ps2dev *ps2dev = &psmouse->ps2dev; unsigned char param[2]; static const unsigned char seq[] = { 20, 60, 40, 20, 20, 60, 40, 20, 20 }; int i; param[0] = 10; ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE); param[0] = 0; ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES); for (i = 0; i < ARRAY_SIZE(seq); i++) { param[0] = seq[i]; ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE); } ps2_command(ps2dev, param, PSMOUSE_CMD_GETID); if (param[0] != 2) return -1; if (set_properties) { __set_bit(BTN_MIDDLE, psmouse->dev->keybit); __set_bit(BTN_EXTRA, psmouse->dev->keybit); psmouse->vendor = "Kensington"; psmouse->name = "ThinkingMouse"; } return 0; } /* * Bare PS/2 protocol "detection". Always succeeds. */ static int ps2bare_detect(struct psmouse *psmouse, bool set_properties) { if (set_properties) { if (!psmouse->vendor) psmouse->vendor = "Generic"; if (!psmouse->name) psmouse->name = "Mouse"; /* * We have no way of figuring true number of buttons so let's * assume that the device has 3. */ __set_bit(BTN_MIDDLE, psmouse->dev->keybit); } return 0; } /* * Cortron PS/2 protocol detection. There's no special way to detect it, so it * must be forced by sysfs protocol writing. */ static int cortron_detect(struct psmouse *psmouse, bool set_properties) { if (set_properties) { psmouse->vendor = "Cortron"; psmouse->name = "PS/2 Trackball"; __set_bit(BTN_MIDDLE, psmouse->dev->keybit); __set_bit(BTN_SIDE, psmouse->dev->keybit); } return 0; } /* * psmouse_extensions() probes for any extensions to the basic PS/2 protocol * the mouse may have. */ static int psmouse_extensions(struct psmouse *psmouse, unsigned int max_proto, bool set_properties) { bool synaptics_hardware = false; /* * We always check for lifebook because it does not disturb mouse * (it only checks DMI information). */ if (lifebook_detect(psmouse, set_properties) == 0) { if (max_proto > PSMOUSE_IMEX) { if (!set_properties || lifebook_init(psmouse) == 0) return PSMOUSE_LIFEBOOK; } } /* * Try Kensington ThinkingMouse (we try first, because synaptics probe * upsets the thinkingmouse). */ if (max_proto > PSMOUSE_IMEX && thinking_detect(psmouse, set_properties) == 0) return PSMOUSE_THINKPS; /* * Try Synaptics TouchPad. Note that probing is done even if Synaptics protocol * support is disabled in config - we need to know if it is synaptics so we * can reset it properly after probing for intellimouse. */ if (max_proto > PSMOUSE_PS2 && synaptics_detect(psmouse, set_properties) == 0) { synaptics_hardware = true; if (max_proto > PSMOUSE_IMEX) { /* * Try activating protocol, but check if support is enabled first, since * we try detecting Synaptics even when protocol is disabled. */ if (synaptics_supported() && (!set_properties || synaptics_init(psmouse) == 0)) { return PSMOUSE_SYNAPTICS; } /* * Some Synaptics touchpads can emulate extended protocols (like IMPS/2). * Unfortunately Logitech/Genius probes confuse some firmware versions so * we'll have to skip them. */ max_proto = PSMOUSE_IMEX; } /* * Make sure that touchpad is in relative mode, gestures (taps) are enabled */ synaptics_reset(psmouse); } /* * Try ALPS TouchPad */ if (max_proto > PSMOUSE_IMEX) { ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_RESET_DIS); if (alps_detect(psmouse, set_properties) == 0) { if (!set_properties || alps_init(psmouse) == 0) return PSMOUSE_ALPS; /* * Init failed, try basic relative protocols */ max_proto = PSMOUSE_IMEX; } } /* * Try OLPC HGPK touchpad. */ if (max_proto > PSMOUSE_IMEX && hgpk_detect(psmouse, set_properties) == 0) { if (!set_properties || hgpk_init(psmouse) == 0) return PSMOUSE_HGPK; /* * Init failed, try basic relative protocols */ max_proto = PSMOUSE_IMEX; } /* * Try Elantech touchpad. */ if (max_proto > PSMOUSE_IMEX && elantech_detect(psmouse, set_properties) == 0) { if (!set_properties || elantech_init(psmouse) == 0) return PSMOUSE_ELANTECH; /* * Init failed, try basic relative protocols */ max_proto = PSMOUSE_IMEX; } if (max_proto > PSMOUSE_IMEX) { if (genius_detect(psmouse, set_properties) == 0) return PSMOUSE_GENPS; if (ps2pp_init(psmouse, set_properties) == 0) return PSMOUSE_PS2PP; if (trackpoint_detect(psmouse, set_properties) == 0) return PSMOUSE_TRACKPOINT; if (touchkit_ps2_detect(psmouse, set_properties) == 0) return PSMOUSE_TOUCHKIT_PS2; } /* * Try Finger Sensing Pad. We do it here because its probe upsets * Trackpoint devices (causing TP_READ_ID command to time out). */ if (max_proto > PSMOUSE_IMEX) { if (fsp_detect(psmouse, set_properties) == 0) { if (!set_properties || fsp_init(psmouse) == 0) return PSMOUSE_FSP; /* * Init failed, try basic relative protocols */ max_proto = PSMOUSE_IMEX; } } /* * Reset to defaults in case the device got confused by extended * protocol probes. Note that we follow up with full reset because * some mice put themselves to sleep when they see PSMOUSE_RESET_DIS. */ ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_RESET_DIS); psmouse_reset(psmouse); if (max_proto >= PSMOUSE_IMEX && im_explorer_detect(psmouse, set_properties) == 0) return PSMOUSE_IMEX; if (max_proto >= PSMOUSE_IMPS && intellimouse_detect(psmouse, set_properties) == 0) return PSMOUSE_IMPS; /* * Okay, all failed, we have a standard mouse here. The number of the buttons * is still a question, though. We assume 3. */ ps2bare_detect(psmouse, set_properties); if (synaptics_hardware) { /* * We detected Synaptics hardware but it did not respond to IMPS/2 probes. * We need to reset the touchpad because if there is a track point on the * pass through port it could get disabled while probing for protocol * extensions. */ psmouse_reset(psmouse); } return PSMOUSE_PS2; } static const struct psmouse_protocol psmouse_protocols[] = { { .type = PSMOUSE_PS2, .name = "PS/2", .alias = "bare", .maxproto = true, .ignore_parity = true, .detect = ps2bare_detect, }, #ifdef CONFIG_MOUSE_PS2_LOGIPS2PP { .type = PSMOUSE_PS2PP, .name = "PS2++", .alias = "logitech", .detect = ps2pp_init, }, #endif { .type = PSMOUSE_THINKPS, .name = "ThinkPS/2", .alias = "thinkps", .detect = thinking_detect, }, { .type = PSMOUSE_GENPS, .name = "GenPS/2", .alias = "genius", .detect = genius_detect, }, { .type = PSMOUSE_IMPS, .name = "ImPS/2", .alias = "imps", .maxproto = true, .ignore_parity = true, .detect = intellimouse_detect, }, { .type = PSMOUSE_IMEX, .name = "ImExPS/2", .alias = "exps", .maxproto = true, .ignore_parity = true, .detect = im_explorer_detect, }, #ifdef CONFIG_MOUSE_PS2_SYNAPTICS { .type = PSMOUSE_SYNAPTICS, .name = "SynPS/2", .alias = "synaptics", .detect = synaptics_detect, .init = synaptics_init, }, #endif #ifdef CONFIG_MOUSE_PS2_ALPS { .type = PSMOUSE_ALPS, .name = "AlpsPS/2", .alias = "alps", .detect = alps_detect, .init = alps_init, }, #endif #ifdef CONFIG_MOUSE_PS2_LIFEBOOK { .type = PSMOUSE_LIFEBOOK, .name = "LBPS/2", .alias = "lifebook", .init = lifebook_init, }, #endif #ifdef CONFIG_MOUSE_PS2_TRACKPOINT { .type = PSMOUSE_TRACKPOINT, .name = "TPPS/2", .alias = "trackpoint", .detect = trackpoint_detect, }, #endif #ifdef CONFIG_MOUSE_PS2_TOUCHKIT { .type = PSMOUSE_TOUCHKIT_PS2, .name = "touchkitPS/2", .alias = "touchkit", .detect = touchkit_ps2_detect, }, #endif #ifdef CONFIG_MOUSE_PS2_OLPC { .type = PSMOUSE_HGPK, .name = "OLPC HGPK", .alias = "hgpk", .detect = hgpk_detect, }, #endif #ifdef CONFIG_MOUSE_PS2_ELANTECH { .type = PSMOUSE_ELANTECH, .name = "ETPS/2", .alias = "elantech", .detect = elantech_detect, .init = elantech_init, }, #endif #ifdef CONFIG_MOUSE_PS2_SENTELIC { .type = PSMOUSE_FSP, .name = "FSPPS/2", .alias = "fsp", .detect = fsp_detect, .init = fsp_init, }, #endif { .type = PSMOUSE_CORTRON, .name = "CortronPS/2", .alias = "cortps", .detect = cortron_detect, }, { .type = PSMOUSE_AUTO, .name = "auto", .alias = "any", .maxproto = true, }, }; static const struct psmouse_protocol *psmouse_protocol_by_type(enum psmouse_type type) { int i; for (i = 0; i < ARRAY_SIZE(psmouse_protocols); i++) if (psmouse_protocols[i].type == type) return &psmouse_protocols[i]; WARN_ON(1); return &psmouse_protocols[0]; } static const struct psmouse_protocol *psmouse_protocol_by_name(const char *name, size_t len) { const struct psmouse_protocol *p; int i; for (i = 0; i < ARRAY_SIZE(psmouse_protocols); i++) { p = &psmouse_protocols[i]; if ((strlen(p->name) == len && !strncmp(p->name, name, len)) || (strlen(p->alias) == len && !strncmp(p->alias, name, len))) return &psmouse_protocols[i]; } return NULL; } /* * psmouse_probe() probes for a PS/2 mouse. */ static int psmouse_probe(struct psmouse *psmouse) { struct ps2dev *ps2dev = &psmouse->ps2dev; unsigned char param[2]; /* * First, we check if it's a mouse. It should send 0x00 or 0x03 * in case of an IntelliMouse in 4-byte mode or 0x04 for IM Explorer. * Sunrex K8561 IR Keyboard/Mouse reports 0xff on second and subsequent * ID queries, probably due to a firmware bug. */ param[0] = 0xa5; if (ps2_command(ps2dev, param, PSMOUSE_CMD_GETID)) return -1; if (param[0] != 0x00 && param[0] != 0x03 && param[0] != 0x04 && param[0] != 0xff) return -1; /* * Then we reset and disable the mouse so that it doesn't generate events. */ if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_RESET_DIS)) printk(KERN_WARNING "psmouse.c: Failed to reset mouse on %s\n", ps2dev->serio->phys); return 0; } /* * Here we set the mouse resolution. */ void psmouse_set_resolution(struct psmouse *psmouse, unsigned int resolution) { static const unsigned char params[] = { 0, 1, 2, 2, 3 }; unsigned char p; if (resolution == 0 || resolution > 200) resolution = 200; p = params[resolution / 50]; ps2_command(&psmouse->ps2dev, &p, PSMOUSE_CMD_SETRES); psmouse->resolution = 25 << p; } /* * Here we set the mouse report rate. */ static void psmouse_set_rate(struct psmouse *psmouse, unsigned int rate) { static const unsigned char rates[] = { 200, 100, 80, 60, 40, 20, 10, 0 }; unsigned char r; int i = 0; while (rates[i] > rate) i++; r = rates[i]; ps2_command(&psmouse->ps2dev, &r, PSMOUSE_CMD_SETRATE); psmouse->rate = r; } /* * psmouse_initialize() initializes the mouse to a sane state. */ static void psmouse_initialize(struct psmouse *psmouse) { /* * We set the mouse report rate, resolution and scaling. */ if (psmouse_max_proto != PSMOUSE_PS2) { psmouse->set_rate(psmouse, psmouse->rate); psmouse->set_resolution(psmouse, psmouse->resolution); ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_SETSCALE11); } } /* * psmouse_activate() enables the mouse so that we get motion reports from it. */ static void psmouse_activate(struct psmouse *psmouse) { if (ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_ENABLE)) printk(KERN_WARNING "psmouse.c: Failed to enable mouse on %s\n", psmouse->ps2dev.serio->phys); psmouse_set_state(psmouse, PSMOUSE_ACTIVATED); } /* * psmouse_deactivate() puts the mouse into poll mode so that we don't get motion * reports from it unless we explicitly request it. */ static void psmouse_deactivate(struct psmouse *psmouse) { if (ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_DISABLE)) printk(KERN_WARNING "psmouse.c: Failed to deactivate mouse on %s\n", psmouse->ps2dev.serio->phys); psmouse_set_state(psmouse, PSMOUSE_CMD_MODE); } /* * psmouse_poll() - default poll hanlder. Everyone except for ALPS uses it. */ static int psmouse_poll(struct psmouse *psmouse) { return ps2_command(&psmouse->ps2dev, psmouse->packet, PSMOUSE_CMD_POLL | (psmouse->pktsize << 8)); } /* * psmouse_resync() attempts to re-validate current protocol. */ static void psmouse_resync(struct work_struct *work) { struct psmouse *parent = NULL, *psmouse = container_of(work, struct psmouse, resync_work.work); struct serio *serio = psmouse->ps2dev.serio; psmouse_ret_t rc = PSMOUSE_GOOD_DATA; bool failed = false, enabled = false; int i; mutex_lock(&psmouse_mutex); if (psmouse->state != PSMOUSE_RESYNCING) goto out; if (serio->parent && serio->id.type == SERIO_PS_PSTHRU) { parent = serio_get_drvdata(serio->parent); psmouse_deactivate(parent); } /* * Some mice don't ACK commands sent while they are in the middle of * transmitting motion packet. To avoid delay we use ps2_sendbyte() * instead of ps2_command() which would wait for 200ms for an ACK * that may never come. * As an additional quirk ALPS touchpads may not only forget to ACK * disable command but will stop reporting taps, so if we see that * mouse at least once ACKs disable we will do full reconnect if ACK * is missing. */ psmouse->num_resyncs++; if (ps2_sendbyte(&psmouse->ps2dev, PSMOUSE_CMD_DISABLE, 20)) { if (psmouse->num_resyncs < 3 || psmouse->acks_disable_command) failed = true; } else psmouse->acks_disable_command = true; /* * Poll the mouse. If it was reset the packet will be shorter than * psmouse->pktsize and ps2_command will fail. We do not expect and * do not handle scenario when mouse "upgrades" its protocol while * disconnected since it would require additional delay. If we ever * see a mouse that does it we'll adjust the code. */ if (!failed) { if (psmouse->poll(psmouse)) failed = true; else { psmouse_set_state(psmouse, PSMOUSE_CMD_MODE); for (i = 0; i < psmouse->pktsize; i++) { psmouse->pktcnt++; rc = psmouse->protocol_handler(psmouse); if (rc != PSMOUSE_GOOD_DATA) break; } if (rc != PSMOUSE_FULL_PACKET) failed = true; psmouse_set_state(psmouse, PSMOUSE_RESYNCING); } } /* * Now try to enable mouse. We try to do that even if poll failed and also * repeat our attempts 5 times, otherwise we may be left out with disabled * mouse. */ for (i = 0; i < 5; i++) { if (!ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_ENABLE)) { enabled = true; break; } msleep(200); } if (!enabled) { printk(KERN_WARNING "psmouse.c: failed to re-enable mouse on %s\n", psmouse->ps2dev.serio->phys); failed = true; } if (failed) { psmouse_set_state(psmouse, PSMOUSE_IGNORE); printk(KERN_INFO "psmouse.c: resync failed, issuing reconnect request\n"); serio_reconnect(serio); } else psmouse_set_state(psmouse, PSMOUSE_ACTIVATED); if (parent) psmouse_activate(parent); out: mutex_unlock(&psmouse_mutex); } /* * psmouse_cleanup() resets the mouse into power-on state. */ static void psmouse_cleanup(struct serio *serio) { struct psmouse *psmouse = serio_get_drvdata(serio); struct psmouse *parent = NULL; mutex_lock(&psmouse_mutex); if (serio->parent && serio->id.type == SERIO_PS_PSTHRU) { parent = serio_get_drvdata(serio->parent); psmouse_deactivate(parent); } psmouse_set_state(psmouse, PSMOUSE_INITIALIZING); /* * Disable stream mode so cleanup routine can proceed undisturbed. */ if (ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_DISABLE)) printk(KERN_WARNING "psmouse.c: Failed to disable mouse on %s\n", psmouse->ps2dev.serio->phys); if (psmouse->cleanup) psmouse->cleanup(psmouse); /* * Reset the mouse to defaults (bare PS/2 protocol). */ ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_RESET_DIS); /* * Some boxes, such as HP nx7400, get terribly confused if mouse * is not fully enabled before suspending/shutting down. */ ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_ENABLE); if (parent) { if (parent->pt_deactivate) parent->pt_deactivate(parent); psmouse_activate(parent); } mutex_unlock(&psmouse_mutex); } /* * psmouse_disconnect() closes and frees. */ static void psmouse_disconnect(struct serio *serio) { struct psmouse *psmouse, *parent = NULL; psmouse = serio_get_drvdata(serio); sysfs_remove_group(&serio->dev.kobj, &psmouse_attribute_group); mutex_lock(&psmouse_mutex); psmouse_set_state(psmouse, PSMOUSE_CMD_MODE); /* make sure we don't have a resync in progress */ mutex_unlock(&psmouse_mutex); flush_workqueue(kpsmoused_wq); mutex_lock(&psmouse_mutex); if (serio->parent && serio->id.type == SERIO_PS_PSTHRU) { parent = serio_get_drvdata(serio->parent); psmouse_deactivate(parent); } if (psmouse->disconnect) psmouse->disconnect(psmouse); if (parent && parent->pt_deactivate) parent->pt_deactivate(parent); psmouse_set_state(psmouse, PSMOUSE_IGNORE); serio_close(serio); serio_set_drvdata(serio, NULL); input_unregister_device(psmouse->dev); kfree(psmouse); if (parent) psmouse_activate(parent); mutex_unlock(&psmouse_mutex); } static int psmouse_switch_protocol(struct psmouse *psmouse, const struct psmouse_protocol *proto) { const struct psmouse_protocol *selected_proto; struct input_dev *input_dev = psmouse->dev; input_dev->dev.parent = &psmouse->ps2dev.serio->dev; input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL); input_dev->keybit[BIT_WORD(BTN_MOUSE)] = BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_RIGHT); input_dev->relbit[0] = BIT_MASK(REL_X) | BIT_MASK(REL_Y); psmouse->set_rate = psmouse_set_rate; psmouse->set_resolution = psmouse_set_resolution; psmouse->poll = psmouse_poll; psmouse->protocol_handler = psmouse_process_byte; psmouse->pktsize = 3; if (proto && (proto->detect || proto->init)) { if (proto->detect && proto->detect(psmouse, true) < 0) return -1; if (proto->init && proto->init(psmouse) < 0) return -1; psmouse->type = proto->type; selected_proto = proto; } else { psmouse->type = psmouse_extensions(psmouse, psmouse_max_proto, true); selected_proto = psmouse_protocol_by_type(psmouse->type); } psmouse->ignore_parity = selected_proto->ignore_parity; /* * If mouse's packet size is 3 there is no point in polling the * device in hopes to detect protocol reset - we won't get less * than 3 bytes response anyhow. */ if (psmouse->pktsize == 3) psmouse->resync_time = 0; /* * Some smart KVMs fake response to POLL command returning just * 3 bytes and messing up our resync logic, so if initial poll * fails we won't try polling the device anymore. Hopefully * such KVM will maintain initially selected protocol. */ if (psmouse->resync_time && psmouse->poll(psmouse)) psmouse->resync_time = 0; snprintf(psmouse->devname, sizeof(psmouse->devname), "%s %s %s", selected_proto->name, psmouse->vendor, psmouse->name); input_dev->name = psmouse->devname; input_dev->phys = psmouse->phys; input_dev->id.bustype = BUS_I8042; input_dev->id.vendor = 0x0002; input_dev->id.product = psmouse->type; input_dev->id.version = psmouse->model; return 0; } /* * psmouse_connect() is a callback from the serio module when * an unhandled serio port is found. */ static int psmouse_connect(struct serio *serio, struct serio_driver *drv) { struct psmouse *psmouse, *parent = NULL; struct input_dev *input_dev; int retval = 0, error = -ENOMEM; mutex_lock(&psmouse_mutex); /* * If this is a pass-through port deactivate parent so the device * connected to this port can be successfully identified */ if (serio->parent && serio->id.type == SERIO_PS_PSTHRU) { parent = serio_get_drvdata(serio->parent); psmouse_deactivate(parent); } psmouse = kzalloc(sizeof(struct psmouse), GFP_KERNEL); input_dev = input_allocate_device(); if (!psmouse || !input_dev) goto err_free; ps2_init(&psmouse->ps2dev, serio); INIT_DELAYED_WORK(&psmouse->resync_work, psmouse_resync); psmouse->dev = input_dev; snprintf(psmouse->phys, sizeof(psmouse->phys), "%s/input0", serio->phys); psmouse_set_state(psmouse, PSMOUSE_INITIALIZING); serio_set_drvdata(serio, psmouse); error = serio_open(serio, drv); if (error) goto err_clear_drvdata; if (psmouse_probe(psmouse) < 0) { error = -ENODEV; goto err_close_serio; } psmouse->rate = psmouse_rate; psmouse->resolution = psmouse_resolution; psmouse->resetafter = psmouse_resetafter; psmouse->resync_time = parent ? 0 : psmouse_resync_time; psmouse->smartscroll = psmouse_smartscroll; psmouse_switch_protocol(psmouse, NULL); psmouse_set_state(psmouse, PSMOUSE_CMD_MODE); psmouse_initialize(psmouse); error = input_register_device(psmouse->dev); if (error) goto err_protocol_disconnect; if (parent && parent->pt_activate) parent->pt_activate(parent); error = sysfs_create_group(&serio->dev.kobj, &psmouse_attribute_group); if (error) goto err_pt_deactivate; psmouse_activate(psmouse); out: /* If this is a pass-through port the parent needs to be re-activated */ if (parent) psmouse_activate(parent); mutex_unlock(&psmouse_mutex); return retval; err_pt_deactivate: if (parent && parent->pt_deactivate) parent->pt_deactivate(parent); input_unregister_device(psmouse->dev); input_dev = NULL; /* so we don't try to free it below */ err_protocol_disconnect: if (psmouse->disconnect) psmouse->disconnect(psmouse); psmouse_set_state(psmouse, PSMOUSE_IGNORE); err_close_serio: serio_close(serio); err_clear_drvdata: serio_set_drvdata(serio, NULL); err_free: input_free_device(input_dev); kfree(psmouse); retval = error; goto out; } static int psmouse_reconnect(struct serio *serio) { struct psmouse *psmouse = serio_get_drvdata(serio); struct psmouse *parent = NULL; struct serio_driver *drv = serio->drv; unsigned char type; int rc = -1; if (!drv || !psmouse) { printk(KERN_DEBUG "psmouse: reconnect request, but serio is disconnected, ignoring...\n"); return -1; } mutex_lock(&psmouse_mutex); if (serio->parent && serio->id.type == SERIO_PS_PSTHRU) { parent = serio_get_drvdata(serio->parent); psmouse_deactivate(parent); } psmouse_set_state(psmouse, PSMOUSE_INITIALIZING); if (psmouse->reconnect) { if (psmouse->reconnect(psmouse)) goto out; } else { psmouse_reset(psmouse); if (psmouse_probe(psmouse) < 0) goto out; type = psmouse_extensions(psmouse, psmouse_max_proto, false); if (psmouse->type != type) goto out; } /* ok, the device type (and capabilities) match the old one, * we can continue using it, complete intialization */ psmouse_set_state(psmouse, PSMOUSE_CMD_MODE); psmouse_initialize(psmouse); if (parent && parent->pt_activate) parent->pt_activate(parent); psmouse_activate(psmouse); rc = 0; out: /* If this is a pass-through port the parent waits to be activated */ if (parent) psmouse_activate(parent); mutex_unlock(&psmouse_mutex); return rc; } static struct serio_device_id psmouse_serio_ids[] = { { .type = SERIO_8042, .proto = SERIO_ANY, .id = SERIO_ANY, .extra = SERIO_ANY, }, { .type = SERIO_PS_PSTHRU, .proto = SERIO_ANY, .id = SERIO_ANY, .extra = SERIO_ANY, }, { 0 } }; MODULE_DEVICE_TABLE(serio, psmouse_serio_ids); static struct serio_driver psmouse_drv = { .driver = { .name = "psmouse", }, .description = DRIVER_DESC, .id_table = psmouse_serio_ids, .interrupt = psmouse_interrupt, .connect = psmouse_connect, .reconnect = psmouse_reconnect, .disconnect = psmouse_disconnect, .cleanup = psmouse_cleanup, }; ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *devattr, char *buf) { struct serio *serio = to_serio_port(dev); struct psmouse_attribute *attr = to_psmouse_attr(devattr); struct psmouse *psmouse; psmouse = serio_get_drvdata(serio); return attr->show(psmouse, attr->data, buf); } ssize_t psmouse_attr_set_helper(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct serio *serio = to_serio_port(dev); struct psmouse_attribute *attr = to_psmouse_attr(devattr); struct psmouse *psmouse, *parent = NULL; int retval; retval = mutex_lock_interruptible(&psmouse_mutex); if (retval) goto out; psmouse = serio_get_drvdata(serio); if (attr->protect) { if (psmouse->state == PSMOUSE_IGNORE) { retval = -ENODEV; goto out_unlock; } if (serio->parent && serio->id.type == SERIO_PS_PSTHRU) { parent = serio_get_drvdata(serio->parent); psmouse_deactivate(parent); } psmouse_deactivate(psmouse); } retval = attr->set(psmouse, attr->data, buf, count); if (attr->protect) { if (retval != -ENODEV) psmouse_activate(psmouse); if (parent) psmouse_activate(parent); } out_unlock: mutex_unlock(&psmouse_mutex); out: return retval; } static ssize_t psmouse_show_int_attr(struct psmouse *psmouse, void *offset, char *buf) { unsigned int *field = (unsigned int *)((char *)psmouse + (size_t)offset); return sprintf(buf, "%u\n", *field); } static ssize_t psmouse_set_int_attr(struct psmouse *psmouse, void *offset, const char *buf, size_t count) { unsigned int *field = (unsigned int *)((char *)psmouse + (size_t)offset); unsigned long value; if (strict_strtoul(buf, 10, &value)) return -EINVAL; if ((unsigned int)value != value) return -EINVAL; *field = value; return count; } static ssize_t psmouse_attr_show_protocol(struct psmouse *psmouse, void *data, char *buf) { return sprintf(buf, "%s\n", psmouse_protocol_by_type(psmouse->type)->name); } static ssize_t psmouse_attr_set_protocol(struct psmouse *psmouse, void *data, const char *buf, size_t count) { struct serio *serio = psmouse->ps2dev.serio; struct psmouse *parent = NULL; struct input_dev *old_dev, *new_dev; const struct psmouse_protocol *proto, *old_proto; int error; int retry = 0; proto = psmouse_protocol_by_name(buf, count); if (!proto) return -EINVAL; if (psmouse->type == proto->type) return count; new_dev = input_allocate_device(); if (!new_dev) return -ENOMEM; while (!list_empty(&serio->children)) { if (++retry > 3) { printk(KERN_WARNING "psmouse: failed to destroy children ports, " "protocol change aborted.\n"); input_free_device(new_dev); return -EIO; } mutex_unlock(&psmouse_mutex); serio_unregister_child_port(serio); mutex_lock(&psmouse_mutex); if (serio->drv != &psmouse_drv) { input_free_device(new_dev); return -ENODEV; } if (psmouse->type == proto->type) { input_free_device(new_dev); return count; /* switched by other thread */ } } if (serio->parent && serio->id.type == SERIO_PS_PSTHRU) { parent = serio_get_drvdata(serio->parent); if (parent->pt_deactivate) parent->pt_deactivate(parent); } old_dev = psmouse->dev; old_proto = psmouse_protocol_by_type(psmouse->type); if (psmouse->disconnect) psmouse->disconnect(psmouse); psmouse_set_state(psmouse, PSMOUSE_IGNORE); psmouse->dev = new_dev; psmouse_set_state(psmouse, PSMOUSE_INITIALIZING); if (psmouse_switch_protocol(psmouse, proto) < 0) { psmouse_reset(psmouse); /* default to PSMOUSE_PS2 */ psmouse_switch_protocol(psmouse, &psmouse_protocols[0]); } psmouse_initialize(psmouse); psmouse_set_state(psmouse, PSMOUSE_CMD_MODE); error = input_register_device(psmouse->dev); if (error) { if (psmouse->disconnect) psmouse->disconnect(psmouse); psmouse_set_state(psmouse, PSMOUSE_IGNORE); input_free_device(new_dev); psmouse->dev = old_dev; psmouse_set_state(psmouse, PSMOUSE_INITIALIZING); psmouse_switch_protocol(psmouse, old_proto); psmouse_initialize(psmouse); psmouse_set_state(psmouse, PSMOUSE_CMD_MODE); return error; } input_unregister_device(old_dev); if (parent && parent->pt_activate) parent->pt_activate(parent); return count; } static ssize_t psmouse_attr_set_rate(struct psmouse *psmouse, void *data, const char *buf, size_t count) { unsigned long value; if (strict_strtoul(buf, 10, &value)) return -EINVAL; psmouse->set_rate(psmouse, value); return count; } static ssize_t psmouse_attr_set_resolution(struct psmouse *psmouse, void *data, const char *buf, size_t count) { unsigned long value; if (strict_strtoul(buf, 10, &value)) return -EINVAL; psmouse->set_resolution(psmouse, value); return count; } static int psmouse_set_maxproto(const char *val, const struct kernel_param *kp) { const struct psmouse_protocol *proto; if (!val) return -EINVAL; proto = psmouse_protocol_by_name(val, strlen(val)); if (!proto || !proto->maxproto) return -EINVAL; *((unsigned int *)kp->arg) = proto->type; return 0; } static int psmouse_get_maxproto(char *buffer, const struct kernel_param *kp) { int type = *((unsigned int *)kp->arg); return sprintf(buffer, "%s", psmouse_protocol_by_type(type)->name); } static int __init psmouse_init(void) { int err; lifebook_module_init(); synaptics_module_init(); hgpk_module_init(); kpsmoused_wq = create_singlethread_workqueue("kpsmoused"); if (!kpsmoused_wq) { printk(KERN_ERR "psmouse: failed to create kpsmoused workqueue\n"); return -ENOMEM; } err = serio_register_driver(&psmouse_drv); if (err) destroy_workqueue(kpsmoused_wq); return err; } static void __exit psmouse_exit(void) { serio_unregister_driver(&psmouse_drv); destroy_workqueue(kpsmoused_wq); } module_init(psmouse_init); module_exit(psmouse_exit);
gpl-2.0
djmatt604/android_kernel_T989D_JB
drivers/media/dvb/frontends/cx24113.c
3709
14761
/* * Driver for Conexant CX24113/CX24128 Tuner (Satellite) * * Copyright (C) 2007-8 Patrick Boettcher <pb@linuxtv.org> * * Developed for BBTI / Technisat * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/slab.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include "dvb_frontend.h" #include "cx24113.h" static int debug; #define info(args...) do { printk(KERN_INFO "CX24113: " args); } while (0) #define err(args...) do { printk(KERN_ERR "CX24113: " args); } while (0) #define dprintk(args...) \ do { \ if (debug) { \ printk(KERN_DEBUG "CX24113: %s: ", __func__); \ printk(args); \ } \ } while (0) struct cx24113_state { struct i2c_adapter *i2c; const struct cx24113_config *config; #define REV_CX24113 0x23 u8 rev; u8 ver; u8 icp_mode:1; #define ICP_LEVEL1 0 #define ICP_LEVEL2 1 #define ICP_LEVEL3 2 #define ICP_LEVEL4 3 u8 icp_man:2; u8 icp_auto_low:2; u8 icp_auto_mlow:2; u8 icp_auto_mhi:2; u8 icp_auto_hi:2; u8 icp_dig; #define LNA_MIN_GAIN 0 #define LNA_MID_GAIN 1 #define LNA_MAX_GAIN 2 u8 lna_gain:2; u8 acp_on:1; u8 vco_mode:2; u8 vco_shift:1; #define VCOBANDSEL_6 0x80 #define VCOBANDSEL_5 0x01 #define VCOBANDSEL_4 0x02 #define VCOBANDSEL_3 0x04 #define VCOBANDSEL_2 0x08 #define VCOBANDSEL_1 0x10 u8 vco_band; #define VCODIV4 4 #define VCODIV2 2 u8 vcodiv; u8 bs_delay:4; u16 bs_freqcnt:13; u16 bs_rdiv; u8 prescaler_mode:1; u8 rfvga_bias_ctrl; s16 tuner_gain_thres; u8 gain_level; u32 frequency; u8 refdiv; u8 Fwindow_enabled; }; static int cx24113_writereg(struct cx24113_state *state, int reg, int data) { u8 buf[] = { reg, data }; struct i2c_msg msg = { .addr = state->config->i2c_addr, .flags = 0, .buf = buf, .len = 2 }; int err = i2c_transfer(state->i2c, &msg, 1); if (err != 1) { printk(KERN_DEBUG "%s: writereg error(err == %i, reg == 0x%02x," " data == 0x%02x)\n", __func__, err, reg, data); return err; } return 0; } static int cx24113_readreg(struct cx24113_state *state, u8 reg) { int ret; u8 b; struct i2c_msg msg[] = { { .addr = state->config->i2c_addr, .flags = 0, .buf = &reg, .len = 1 }, { .addr = state->config->i2c_addr, .flags = I2C_M_RD, .buf = &b, .len = 1 } }; ret = i2c_transfer(state->i2c, msg, 2); if (ret != 2) { printk(KERN_DEBUG "%s: reg=0x%x (error=%d)\n", __func__, reg, ret); return ret; } return b; } static void cx24113_set_parameters(struct cx24113_state *state) { u8 r; r = cx24113_readreg(state, 0x10) & 0x82; r |= state->icp_mode; r |= state->icp_man << 4; r |= state->icp_dig << 2; r |= state->prescaler_mode << 5; cx24113_writereg(state, 0x10, r); r = (state->icp_auto_low << 0) | (state->icp_auto_mlow << 2) | (state->icp_auto_mhi << 4) | (state->icp_auto_hi << 6); cx24113_writereg(state, 0x11, r); if (state->rev == REV_CX24113) { r = cx24113_readreg(state, 0x20) & 0xec; r |= state->lna_gain; r |= state->rfvga_bias_ctrl << 4; cx24113_writereg(state, 0x20, r); } r = cx24113_readreg(state, 0x12) & 0x03; r |= state->acp_on << 2; r |= state->bs_delay << 4; cx24113_writereg(state, 0x12, r); r = cx24113_readreg(state, 0x18) & 0x40; r |= state->vco_shift; if (state->vco_band == VCOBANDSEL_6) r |= (1 << 7); else r |= (state->vco_band << 1); cx24113_writereg(state, 0x18, r); r = cx24113_readreg(state, 0x14) & 0x20; r |= (state->vco_mode << 6) | ((state->bs_freqcnt >> 8) & 0x1f); cx24113_writereg(state, 0x14, r); cx24113_writereg(state, 0x15, (state->bs_freqcnt & 0xff)); cx24113_writereg(state, 0x16, (state->bs_rdiv >> 4) & 0xff); r = (cx24113_readreg(state, 0x17) & 0x0f) | ((state->bs_rdiv & 0x0f) << 4); cx24113_writereg(state, 0x17, r); } #define VGA_0 0x00 #define VGA_1 0x04 #define VGA_2 0x02 #define VGA_3 0x06 #define VGA_4 0x01 #define VGA_5 0x05 #define VGA_6 0x03 #define VGA_7 0x07 #define RFVGA_0 0x00 #define RFVGA_1 0x01 #define RFVGA_2 0x02 #define RFVGA_3 0x03 static int cx24113_set_gain_settings(struct cx24113_state *state, s16 power_estimation) { u8 ampout = cx24113_readreg(state, 0x1d) & 0xf0, vga = cx24113_readreg(state, 0x1f) & 0x3f, rfvga = cx24113_readreg(state, 0x20) & 0xf3; u8 gain_level = power_estimation >= state->tuner_gain_thres; dprintk("power estimation: %d, thres: %d, gain_level: %d/%d\n", power_estimation, state->tuner_gain_thres, state->gain_level, gain_level); if (gain_level == state->gain_level) return 0; /* nothing to be done */ ampout |= 0xf; if (gain_level) { rfvga |= RFVGA_0 << 2; vga |= (VGA_7 << 3) | VGA_7; } else { rfvga |= RFVGA_2 << 2; vga |= (VGA_6 << 3) | VGA_2; } state->gain_level = gain_level; cx24113_writereg(state, 0x1d, ampout); cx24113_writereg(state, 0x1f, vga); cx24113_writereg(state, 0x20, rfvga); return 1; /* did something */ } static int cx24113_set_Fref(struct cx24113_state *state, u8 high) { u8 xtal = cx24113_readreg(state, 0x02); if (state->rev == 0x43 && state->vcodiv == VCODIV4) high = 1; xtal &= ~0x2; if (high) xtal |= high << 1; return cx24113_writereg(state, 0x02, xtal); } static int cx24113_enable(struct cx24113_state *state, u8 enable) { u8 r21 = (cx24113_readreg(state, 0x21) & 0xc0) | enable; if (state->rev == REV_CX24113) r21 |= (1 << 1); return cx24113_writereg(state, 0x21, r21); } static int cx24113_set_bandwidth(struct cx24113_state *state, u32 bandwidth_khz) { u8 r; if (bandwidth_khz <= 19000) r = 0x03 << 6; else if (bandwidth_khz <= 25000) r = 0x02 << 6; else r = 0x01 << 6; dprintk("bandwidth to be set: %d\n", bandwidth_khz); bandwidth_khz *= 10; bandwidth_khz -= 10000; bandwidth_khz /= 1000; bandwidth_khz += 5; bandwidth_khz /= 10; dprintk("bandwidth: %d %d\n", r >> 6, bandwidth_khz); r |= bandwidth_khz & 0x3f; return cx24113_writereg(state, 0x1e, r); } static int cx24113_set_clk_inversion(struct cx24113_state *state, u8 on) { u8 r = (cx24113_readreg(state, 0x10) & 0x7f) | ((on & 0x1) << 7); return cx24113_writereg(state, 0x10, r); } static int cx24113_get_status(struct dvb_frontend *fe, u32 *status) { struct cx24113_state *state = fe->tuner_priv; u8 r = (cx24113_readreg(state, 0x10) & 0x02) >> 1; if (r) *status |= TUNER_STATUS_LOCKED; dprintk("PLL locked: %d\n", r); return 0; } static u8 cx24113_set_ref_div(struct cx24113_state *state, u8 refdiv) { if (state->rev == 0x43 && state->vcodiv == VCODIV4) refdiv = 2; return state->refdiv = refdiv; } static void cx24113_calc_pll_nf(struct cx24113_state *state, u16 *n, s32 *f) { s32 N; s64 F; u64 dividend; u8 R, r; u8 vcodiv; u8 factor; s32 freq_hz = state->frequency * 1000; if (state->config->xtal_khz < 20000) factor = 1; else factor = 2; if (state->rev == REV_CX24113) { if (state->frequency >= 1100000) vcodiv = VCODIV2; else vcodiv = VCODIV4; } else { if (state->frequency >= 1165000) vcodiv = VCODIV2; else vcodiv = VCODIV4; } state->vcodiv = vcodiv; dprintk("calculating N/F for %dHz with vcodiv %d\n", freq_hz, vcodiv); R = 0; do { R = cx24113_set_ref_div(state, R + 1); /* calculate tuner PLL settings: */ N = (freq_hz / 100 * vcodiv) * R; N /= (state->config->xtal_khz) * factor * 2; N += 5; /* For round up. */ N /= 10; N -= 32; } while (N < 6 && R < 3); if (N < 6) { err("strange frequency: N < 6\n"); return; } F = freq_hz; F *= (u64) (R * vcodiv * 262144); dprintk("1 N: %d, F: %lld, R: %d\n", N, (long long)F, R); /* do_div needs an u64 as first argument */ dividend = F; do_div(dividend, state->config->xtal_khz * 1000 * factor * 2); F = dividend; dprintk("2 N: %d, F: %lld, R: %d\n", N, (long long)F, R); F -= (N + 32) * 262144; dprintk("3 N: %d, F: %lld, R: %d\n", N, (long long)F, R); if (state->Fwindow_enabled) { if (F > (262144 / 2 - 1638)) F = 262144 / 2 - 1638; if (F < (-262144 / 2 + 1638)) F = -262144 / 2 + 1638; if ((F < 3277 && F > 0) || (F > -3277 && F < 0)) { F = 0; r = cx24113_readreg(state, 0x10); cx24113_writereg(state, 0x10, r | (1 << 6)); } } dprintk("4 N: %d, F: %lld, R: %d\n", N, (long long)F, R); *n = (u16) N; *f = (s32) F; } static void cx24113_set_nfr(struct cx24113_state *state, u16 n, s32 f, u8 r) { u8 reg; cx24113_writereg(state, 0x19, (n >> 1) & 0xff); reg = ((n & 0x1) << 7) | ((f >> 11) & 0x7f); cx24113_writereg(state, 0x1a, reg); cx24113_writereg(state, 0x1b, (f >> 3) & 0xff); reg = cx24113_readreg(state, 0x1c) & 0x1f; cx24113_writereg(state, 0x1c, reg | ((f & 0x7) << 5)); cx24113_set_Fref(state, r - 1); } static int cx24113_set_frequency(struct cx24113_state *state, u32 frequency) { u8 r = 1; /* or 2 */ u16 n = 6; s32 f = 0; r = cx24113_readreg(state, 0x14); cx24113_writereg(state, 0x14, r & 0x3f); r = cx24113_readreg(state, 0x10); cx24113_writereg(state, 0x10, r & 0xbf); state->frequency = frequency; dprintk("tuning to frequency: %d\n", frequency); cx24113_calc_pll_nf(state, &n, &f); cx24113_set_nfr(state, n, f, state->refdiv); r = cx24113_readreg(state, 0x18) & 0xbf; if (state->vcodiv != VCODIV2) r |= 1 << 6; cx24113_writereg(state, 0x18, r); /* The need for this sleep is not clear. But helps in some cases */ msleep(5); r = cx24113_readreg(state, 0x1c) & 0xef; cx24113_writereg(state, 0x1c, r | (1 << 4)); return 0; } static int cx24113_init(struct dvb_frontend *fe) { struct cx24113_state *state = fe->tuner_priv; int ret; state->tuner_gain_thres = -50; state->gain_level = 255; /* to force a gain-setting initialization */ state->icp_mode = 0; if (state->config->xtal_khz < 11000) { state->icp_auto_hi = ICP_LEVEL4; state->icp_auto_mhi = ICP_LEVEL4; state->icp_auto_mlow = ICP_LEVEL3; state->icp_auto_low = ICP_LEVEL3; } else { state->icp_auto_hi = ICP_LEVEL4; state->icp_auto_mhi = ICP_LEVEL4; state->icp_auto_mlow = ICP_LEVEL3; state->icp_auto_low = ICP_LEVEL2; } state->icp_dig = ICP_LEVEL3; state->icp_man = ICP_LEVEL1; state->acp_on = 1; state->vco_mode = 0; state->vco_shift = 0; state->vco_band = VCOBANDSEL_1; state->bs_delay = 8; state->bs_freqcnt = 0x0fff; state->bs_rdiv = 0x0fff; state->prescaler_mode = 0; state->lna_gain = LNA_MAX_GAIN; state->rfvga_bias_ctrl = 1; state->Fwindow_enabled = 1; cx24113_set_Fref(state, 0); cx24113_enable(state, 0x3d); cx24113_set_parameters(state); cx24113_set_gain_settings(state, -30); cx24113_set_bandwidth(state, 18025); cx24113_set_clk_inversion(state, 1); if (state->config->xtal_khz >= 40000) ret = cx24113_writereg(state, 0x02, (cx24113_readreg(state, 0x02) & 0xfb) | (1 << 2)); else ret = cx24113_writereg(state, 0x02, (cx24113_readreg(state, 0x02) & 0xfb) | (0 << 2)); return ret; } static int cx24113_set_params(struct dvb_frontend *fe, struct dvb_frontend_parameters *p) { struct cx24113_state *state = fe->tuner_priv; /* for a ROLL-OFF factor of 0.35, 0.2: 600, 0.25: 625 */ u32 roll_off = 675; u32 bw; bw = ((p->u.qpsk.symbol_rate/100) * roll_off) / 1000; bw += (10000000/100) + 5; bw /= 10; bw += 1000; cx24113_set_bandwidth(state, bw); cx24113_set_frequency(state, p->frequency); msleep(5); return cx24113_get_status(fe, &bw); } static s8 cx24113_agc_table[2][10] = { {-54, -41, -35, -30, -25, -21, -16, -10, -6, -2}, {-39, -35, -30, -25, -19, -15, -11, -5, 1, 9}, }; void cx24113_agc_callback(struct dvb_frontend *fe) { struct cx24113_state *state = fe->tuner_priv; s16 s, i; if (!fe->ops.read_signal_strength) return; do { /* this only works with the current CX24123 implementation */ fe->ops.read_signal_strength(fe, (u16 *) &s); s >>= 8; dprintk("signal strength: %d\n", s); for (i = 0; i < sizeof(cx24113_agc_table[0]); i++) if (cx24113_agc_table[state->gain_level][i] > s) break; s = -25 - i*5; } while (cx24113_set_gain_settings(state, s)); } EXPORT_SYMBOL(cx24113_agc_callback); static int cx24113_get_frequency(struct dvb_frontend *fe, u32 *frequency) { struct cx24113_state *state = fe->tuner_priv; *frequency = state->frequency; return 0; } static int cx24113_release(struct dvb_frontend *fe) { struct cx24113_state *state = fe->tuner_priv; dprintk("\n"); fe->tuner_priv = NULL; kfree(state); return 0; } static const struct dvb_tuner_ops cx24113_tuner_ops = { .info = { .name = "Conexant CX24113", .frequency_min = 950000, .frequency_max = 2150000, .frequency_step = 125, }, .release = cx24113_release, .init = cx24113_init, .sleep = NULL, .set_params = cx24113_set_params, .get_frequency = cx24113_get_frequency, .get_bandwidth = NULL, .get_status = cx24113_get_status, }; struct dvb_frontend *cx24113_attach(struct dvb_frontend *fe, const struct cx24113_config *config, struct i2c_adapter *i2c) { /* allocate memory for the internal state */ struct cx24113_state *state = kzalloc(sizeof(struct cx24113_state), GFP_KERNEL); int rc; if (state == NULL) { err("Unable to kzalloc\n"); goto error; } /* setup the state */ state->config = config; state->i2c = i2c; info("trying to detect myself\n"); /* making a dummy read, because of some expected troubles * after power on */ cx24113_readreg(state, 0x00); rc = cx24113_readreg(state, 0x00); if (rc < 0) { info("CX24113 not found.\n"); goto error; } state->rev = rc; switch (rc) { case 0x43: info("detected CX24113 variant\n"); break; case REV_CX24113: info("successfully detected\n"); break; default: err("unsupported device id: %x\n", state->rev); goto error; } state->ver = cx24113_readreg(state, 0x01); info("version: %x\n", state->ver); /* create dvb_frontend */ memcpy(&fe->ops.tuner_ops, &cx24113_tuner_ops, sizeof(struct dvb_tuner_ops)); fe->tuner_priv = state; return fe; error: kfree(state); return NULL; } EXPORT_SYMBOL(cx24113_attach); module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Activates frontend debugging (default:0)"); MODULE_AUTHOR("Patrick Boettcher <pb@linuxtv.org>"); MODULE_DESCRIPTION("DVB Frontend module for Conexant CX24113/CX24128hardware"); MODULE_LICENSE("GPL");
gpl-2.0
qyx210an/kernel
arch/alpha/boot/bootp.c
3965
5717
/* * arch/alpha/boot/bootp.c * * Copyright (C) 1997 Jay Estabrook * * This file is used for creating a bootp file for the Linux/AXP kernel * * based significantly on the arch/alpha/boot/main.c of Linus Torvalds */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/string.h> #include <generated/utsrelease.h> #include <linux/mm.h> #include <asm/system.h> #include <asm/console.h> #include <asm/hwrpb.h> #include <asm/pgtable.h> #include <asm/io.h> #include <stdarg.h> #include "ksize.h" extern unsigned long switch_to_osf_pal(unsigned long nr, struct pcb_struct * pcb_va, struct pcb_struct * pcb_pa, unsigned long *vptb); extern void move_stack(unsigned long new_stack); struct hwrpb_struct *hwrpb = INIT_HWRPB; static struct pcb_struct pcb_va[1]; /* * Find a physical address of a virtual object.. * * This is easy using the virtual page table address. */ static inline void * find_pa(unsigned long *vptb, void *ptr) { unsigned long address = (unsigned long) ptr; unsigned long result; result = vptb[address >> 13]; result >>= 32; result <<= 13; result |= address & 0x1fff; return (void *) result; } /* * This function moves into OSF/1 pal-code, and has a temporary * PCB for that. The kernel proper should replace this PCB with * the real one as soon as possible. * * The page table muckery in here depends on the fact that the boot * code has the L1 page table identity-map itself in the second PTE * in the L1 page table. Thus the L1-page is virtually addressable * itself (through three levels) at virtual address 0x200802000. */ #define VPTB ((unsigned long *) 0x200000000) #define L1 ((unsigned long *) 0x200802000) void pal_init(void) { unsigned long i, rev; struct percpu_struct * percpu; struct pcb_struct * pcb_pa; /* Create the dummy PCB. */ pcb_va->ksp = 0; pcb_va->usp = 0; pcb_va->ptbr = L1[1] >> 32; pcb_va->asn = 0; pcb_va->pcc = 0; pcb_va->unique = 0; pcb_va->flags = 1; pcb_va->res1 = 0; pcb_va->res2 = 0; pcb_pa = find_pa(VPTB, pcb_va); /* * a0 = 2 (OSF) * a1 = return address, but we give the asm the vaddr of the PCB * a2 = physical addr of PCB * a3 = new virtual page table pointer * a4 = KSP (but the asm sets it) */ srm_printk("Switching to OSF PAL-code .. "); i = switch_to_osf_pal(2, pcb_va, pcb_pa, VPTB); if (i) { srm_printk("failed, code %ld\n", i); __halt(); } percpu = (struct percpu_struct *) (INIT_HWRPB->processor_offset + (unsigned long) INIT_HWRPB); rev = percpu->pal_revision = percpu->palcode_avail[2]; srm_printk("Ok (rev %lx)\n", rev); tbia(); /* do it directly in case we are SMP */ } static inline void load(unsigned long dst, unsigned long src, unsigned long count) { memcpy((void *)dst, (void *)src, count); } /* * Start the kernel. */ static inline void runkernel(void) { __asm__ __volatile__( "bis %0,%0,$27\n\t" "jmp ($27)" : /* no outputs: it doesn't even return */ : "r" (START_ADDR)); } extern char _end; #define KERNEL_ORIGIN \ ((((unsigned long)&_end) + 511) & ~511) void start_kernel(void) { /* * Note that this crufty stuff with static and envval * and envbuf is because: * * 1. Frequently, the stack is short, and we don't want to overrun; * 2. Frequently the stack is where we are going to copy the kernel to; * 3. A certain SRM console required the GET_ENV output to stack. * ??? A comment in the aboot sources indicates that the GET_ENV * destination must be quadword aligned. Might this explain the * behaviour, rather than requiring output to the stack, which * seems rather far-fetched. */ static long nbytes; static char envval[256] __attribute__((aligned(8))); static unsigned long initrd_start; srm_printk("Linux/AXP bootp loader for Linux " UTS_RELEASE "\n"); if (INIT_HWRPB->pagesize != 8192) { srm_printk("Expected 8kB pages, got %ldkB\n", INIT_HWRPB->pagesize >> 10); return; } if (INIT_HWRPB->vptb != (unsigned long) VPTB) { srm_printk("Expected vptb at %p, got %p\n", VPTB, (void *)INIT_HWRPB->vptb); return; } pal_init(); /* The initrd must be page-aligned. See below for the cause of the magic number 5. */ initrd_start = ((START_ADDR + 5*KERNEL_SIZE + PAGE_SIZE) | (PAGE_SIZE-1)) + 1; #ifdef INITRD_IMAGE_SIZE srm_printk("Initrd positioned at %#lx\n", initrd_start); #endif /* * Move the stack to a safe place to ensure it won't be * overwritten by kernel image. */ move_stack(initrd_start - PAGE_SIZE); nbytes = callback_getenv(ENV_BOOTED_OSFLAGS, envval, sizeof(envval)); if (nbytes < 0 || nbytes >= sizeof(envval)) { nbytes = 0; } envval[nbytes] = '\0'; srm_printk("Loading the kernel...'%s'\n", envval); /* NOTE: *no* callbacks or printouts from here on out!!! */ /* This is a hack, as some consoles seem to get virtual 20000000 (ie * where the SRM console puts the kernel bootp image) memory * overlapping physical memory where the kernel wants to be put, * which causes real problems when attempting to copy the former to * the latter... :-( * * So, we first move the kernel virtual-to-physical way above where * we physically want the kernel to end up, then copy it from there * to its final resting place... ;-} * * Sigh... */ #ifdef INITRD_IMAGE_SIZE load(initrd_start, KERNEL_ORIGIN+KERNEL_SIZE, INITRD_IMAGE_SIZE); #endif load(START_ADDR+(4*KERNEL_SIZE), KERNEL_ORIGIN, KERNEL_SIZE); load(START_ADDR, START_ADDR+(4*KERNEL_SIZE), KERNEL_SIZE); memset((char*)ZERO_PGE, 0, PAGE_SIZE); strcpy((char*)ZERO_PGE, envval); #ifdef INITRD_IMAGE_SIZE ((long *)(ZERO_PGE+256))[0] = initrd_start; ((long *)(ZERO_PGE+256))[1] = INITRD_IMAGE_SIZE; #endif runkernel(); }
gpl-2.0
nikhiljan93/sony_rhine_msm8974
arch/arm/mach-cns3xxx/pcie.c
4733
10352
/* * PCI-E support for CNS3xxx * * Copyright 2008 Cavium Networks * Richard Liu <richard.liu@caviumnetworks.com> * Copyright 2010 MontaVista Software, LLC. * Anton Vorontsov <avorontsov@mvista.com> * * This file is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, Version 2, as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/bug.h> #include <linux/pci.h> #include <linux/io.h> #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/ptrace.h> #include <asm/mach/map.h> #include <mach/cns3xxx.h> #include "core.h" enum cns3xxx_access_type { CNS3XXX_HOST_TYPE = 0, CNS3XXX_CFG0_TYPE, CNS3XXX_CFG1_TYPE, CNS3XXX_NUM_ACCESS_TYPES, }; struct cns3xxx_pcie { struct map_desc cfg_bases[CNS3XXX_NUM_ACCESS_TYPES]; unsigned int irqs[2]; struct resource res_io; struct resource res_mem; struct hw_pci hw_pci; bool linked; }; static struct cns3xxx_pcie cns3xxx_pcie[]; /* forward decl. */ static struct cns3xxx_pcie *sysdata_to_cnspci(void *sysdata) { struct pci_sys_data *root = sysdata; return &cns3xxx_pcie[root->domain]; } static struct cns3xxx_pcie *pdev_to_cnspci(const struct pci_dev *dev) { return sysdata_to_cnspci(dev->sysdata); } static struct cns3xxx_pcie *pbus_to_cnspci(struct pci_bus *bus) { return sysdata_to_cnspci(bus->sysdata); } static void __iomem *cns3xxx_pci_cfg_base(struct pci_bus *bus, unsigned int devfn, int where) { struct cns3xxx_pcie *cnspci = pbus_to_cnspci(bus); int busno = bus->number; int slot = PCI_SLOT(devfn); int offset; enum cns3xxx_access_type type; void __iomem *base; /* If there is no link, just show the CNS PCI bridge. */ if (!cnspci->linked && (busno > 0 || slot > 0)) return NULL; /* * The CNS PCI bridge doesn't fit into the PCI hierarchy, though * we still want to access it. For this to work, we must place * the first device on the same bus as the CNS PCI bridge. */ if (busno == 0) { if (slot > 1) return NULL; type = slot; } else { type = CNS3XXX_CFG1_TYPE; } base = (void __iomem *)cnspci->cfg_bases[type].virtual; offset = ((busno & 0xf) << 20) | (devfn << 12) | (where & 0xffc); return base + offset; } static int cns3xxx_pci_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { u32 v; void __iomem *base; u32 mask = (0x1ull << (size * 8)) - 1; int shift = (where % 4) * 8; base = cns3xxx_pci_cfg_base(bus, devfn, where); if (!base) { *val = 0xffffffff; return PCIBIOS_SUCCESSFUL; } v = __raw_readl(base); if (bus->number == 0 && devfn == 0 && (where & 0xffc) == PCI_CLASS_REVISION) { /* * RC's class is 0xb, but Linux PCI driver needs 0x604 * for a PCIe bridge. So we must fixup the class code * to 0x604 here. */ v &= 0xff; v |= 0x604 << 16; } *val = (v >> shift) & mask; return PCIBIOS_SUCCESSFUL; } static int cns3xxx_pci_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { u32 v; void __iomem *base; u32 mask = (0x1ull << (size * 8)) - 1; int shift = (where % 4) * 8; base = cns3xxx_pci_cfg_base(bus, devfn, where); if (!base) return PCIBIOS_SUCCESSFUL; v = __raw_readl(base); v &= ~(mask << shift); v |= (val & mask) << shift; __raw_writel(v, base); return PCIBIOS_SUCCESSFUL; } static int cns3xxx_pci_setup(int nr, struct pci_sys_data *sys) { struct cns3xxx_pcie *cnspci = sysdata_to_cnspci(sys); struct resource *res_io = &cnspci->res_io; struct resource *res_mem = &cnspci->res_mem; BUG_ON(request_resource(&iomem_resource, res_io) || request_resource(&iomem_resource, res_mem)); pci_add_resource_offset(&sys->resources, res_io, sys->io_offset); pci_add_resource_offset(&sys->resources, res_mem, sys->mem_offset); return 1; } static struct pci_ops cns3xxx_pcie_ops = { .read = cns3xxx_pci_read_config, .write = cns3xxx_pci_write_config, }; static struct pci_bus *cns3xxx_pci_scan_bus(int nr, struct pci_sys_data *sys) { return pci_scan_root_bus(NULL, sys->busnr, &cns3xxx_pcie_ops, sys, &sys->resources); } static int cns3xxx_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { struct cns3xxx_pcie *cnspci = pdev_to_cnspci(dev); int irq = cnspci->irqs[slot]; pr_info("PCIe map irq: %04d:%02x:%02x.%02x slot %d, pin %d, irq: %d\n", pci_domain_nr(dev->bus), dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn), slot, pin, irq); return irq; } static struct cns3xxx_pcie cns3xxx_pcie[] = { [0] = { .cfg_bases = { [CNS3XXX_HOST_TYPE] = { .virtual = CNS3XXX_PCIE0_HOST_BASE_VIRT, .pfn = __phys_to_pfn(CNS3XXX_PCIE0_HOST_BASE), .length = SZ_16M, .type = MT_DEVICE, }, [CNS3XXX_CFG0_TYPE] = { .virtual = CNS3XXX_PCIE0_CFG0_BASE_VIRT, .pfn = __phys_to_pfn(CNS3XXX_PCIE0_CFG0_BASE), .length = SZ_16M, .type = MT_DEVICE, }, [CNS3XXX_CFG1_TYPE] = { .virtual = CNS3XXX_PCIE0_CFG1_BASE_VIRT, .pfn = __phys_to_pfn(CNS3XXX_PCIE0_CFG1_BASE), .length = SZ_16M, .type = MT_DEVICE, }, }, .res_io = { .name = "PCIe0 I/O space", .start = CNS3XXX_PCIE0_IO_BASE, .end = CNS3XXX_PCIE0_IO_BASE + SZ_16M - 1, .flags = IORESOURCE_IO, }, .res_mem = { .name = "PCIe0 non-prefetchable", .start = CNS3XXX_PCIE0_MEM_BASE, .end = CNS3XXX_PCIE0_MEM_BASE + SZ_16M - 1, .flags = IORESOURCE_MEM, }, .irqs = { IRQ_CNS3XXX_PCIE0_RC, IRQ_CNS3XXX_PCIE0_DEVICE, }, .hw_pci = { .domain = 0, .swizzle = pci_std_swizzle, .nr_controllers = 1, .setup = cns3xxx_pci_setup, .scan = cns3xxx_pci_scan_bus, .map_irq = cns3xxx_pcie_map_irq, }, }, [1] = { .cfg_bases = { [CNS3XXX_HOST_TYPE] = { .virtual = CNS3XXX_PCIE1_HOST_BASE_VIRT, .pfn = __phys_to_pfn(CNS3XXX_PCIE1_HOST_BASE), .length = SZ_16M, .type = MT_DEVICE, }, [CNS3XXX_CFG0_TYPE] = { .virtual = CNS3XXX_PCIE1_CFG0_BASE_VIRT, .pfn = __phys_to_pfn(CNS3XXX_PCIE1_CFG0_BASE), .length = SZ_16M, .type = MT_DEVICE, }, [CNS3XXX_CFG1_TYPE] = { .virtual = CNS3XXX_PCIE1_CFG1_BASE_VIRT, .pfn = __phys_to_pfn(CNS3XXX_PCIE1_CFG1_BASE), .length = SZ_16M, .type = MT_DEVICE, }, }, .res_io = { .name = "PCIe1 I/O space", .start = CNS3XXX_PCIE1_IO_BASE, .end = CNS3XXX_PCIE1_IO_BASE + SZ_16M - 1, .flags = IORESOURCE_IO, }, .res_mem = { .name = "PCIe1 non-prefetchable", .start = CNS3XXX_PCIE1_MEM_BASE, .end = CNS3XXX_PCIE1_MEM_BASE + SZ_16M - 1, .flags = IORESOURCE_MEM, }, .irqs = { IRQ_CNS3XXX_PCIE1_RC, IRQ_CNS3XXX_PCIE1_DEVICE, }, .hw_pci = { .domain = 1, .swizzle = pci_std_swizzle, .nr_controllers = 1, .setup = cns3xxx_pci_setup, .scan = cns3xxx_pci_scan_bus, .map_irq = cns3xxx_pcie_map_irq, }, }, }; static void __init cns3xxx_pcie_check_link(struct cns3xxx_pcie *cnspci) { int port = cnspci->hw_pci.domain; u32 reg; unsigned long time; reg = __raw_readl(MISC_PCIE_CTRL(port)); /* * Enable Application Request to 1, it will exit L1 automatically, * but when chip back, it will use another clock, still can use 0x1. */ reg |= 0x3; __raw_writel(reg, MISC_PCIE_CTRL(port)); pr_info("PCIe: Port[%d] Enable PCIe LTSSM\n", port); pr_info("PCIe: Port[%d] Check data link layer...", port); time = jiffies; while (1) { reg = __raw_readl(MISC_PCIE_PM_DEBUG(port)); if (reg & 0x1) { pr_info("Link up.\n"); cnspci->linked = 1; break; } else if (time_after(jiffies, time + 50)) { pr_info("Device not found.\n"); break; } } } static void __init cns3xxx_pcie_hw_init(struct cns3xxx_pcie *cnspci) { int port = cnspci->hw_pci.domain; struct pci_sys_data sd = { .domain = port, }; struct pci_bus bus = { .number = 0, .ops = &cns3xxx_pcie_ops, .sysdata = &sd, }; u32 io_base = cnspci->res_io.start >> 16; u32 mem_base = cnspci->res_mem.start >> 16; u32 host_base = cnspci->cfg_bases[CNS3XXX_HOST_TYPE].pfn; u32 cfg0_base = cnspci->cfg_bases[CNS3XXX_CFG0_TYPE].pfn; u32 devfn = 0; u8 tmp8; u16 pos; u16 dc; host_base = (__pfn_to_phys(host_base) - 1) >> 16; cfg0_base = (__pfn_to_phys(cfg0_base) - 1) >> 16; pci_bus_write_config_byte(&bus, devfn, PCI_PRIMARY_BUS, 0); pci_bus_write_config_byte(&bus, devfn, PCI_SECONDARY_BUS, 1); pci_bus_write_config_byte(&bus, devfn, PCI_SUBORDINATE_BUS, 1); pci_bus_read_config_byte(&bus, devfn, PCI_PRIMARY_BUS, &tmp8); pci_bus_read_config_byte(&bus, devfn, PCI_SECONDARY_BUS, &tmp8); pci_bus_read_config_byte(&bus, devfn, PCI_SUBORDINATE_BUS, &tmp8); pci_bus_write_config_word(&bus, devfn, PCI_MEMORY_BASE, mem_base); pci_bus_write_config_word(&bus, devfn, PCI_MEMORY_LIMIT, host_base); pci_bus_write_config_word(&bus, devfn, PCI_IO_BASE_UPPER16, io_base); pci_bus_write_config_word(&bus, devfn, PCI_IO_LIMIT_UPPER16, cfg0_base); if (!cnspci->linked) return; /* Set Device Max_Read_Request_Size to 128 byte */ devfn = PCI_DEVFN(1, 0); pos = pci_bus_find_capability(&bus, devfn, PCI_CAP_ID_EXP); pci_bus_read_config_word(&bus, devfn, pos + PCI_EXP_DEVCTL, &dc); dc &= ~(0x3 << 12); /* Clear Device Control Register [14:12] */ pci_bus_write_config_word(&bus, devfn, pos + PCI_EXP_DEVCTL, dc); pci_bus_read_config_word(&bus, devfn, pos + PCI_EXP_DEVCTL, &dc); if (!(dc & (0x3 << 12))) pr_info("PCIe: Set Device Max_Read_Request_Size to 128 byte\n"); /* Disable PCIe0 Interrupt Mask INTA to INTD */ __raw_writel(~0x3FFF, MISC_PCIE_INT_MASK(port)); } static int cns3xxx_pcie_abort_handler(unsigned long addr, unsigned int fsr, struct pt_regs *regs) { if (fsr & (1 << 10)) regs->ARM_pc += 4; return 0; } static int __init cns3xxx_pcie_init(void) { int i; pcibios_min_io = 0; pcibios_min_mem = 0; hook_fault_code(16 + 6, cns3xxx_pcie_abort_handler, SIGBUS, 0, "imprecise external abort"); for (i = 0; i < ARRAY_SIZE(cns3xxx_pcie); i++) { iotable_init(cns3xxx_pcie[i].cfg_bases, ARRAY_SIZE(cns3xxx_pcie[i].cfg_bases)); cns3xxx_pwr_clk_en(0x1 << PM_CLK_GATE_REG_OFFSET_PCIE(i)); cns3xxx_pwr_soft_rst(0x1 << PM_SOFT_RST_REG_OFFST_PCIE(i)); cns3xxx_pcie_check_link(&cns3xxx_pcie[i]); cns3xxx_pcie_hw_init(&cns3xxx_pcie[i]); pci_common_init(&cns3xxx_pcie[i].hw_pci); } pci_assign_unassigned_resources(); return 0; } device_initcall(cns3xxx_pcie_init);
gpl-2.0
AdrianoMartins/android_kernel_lge_v500
net/netfilter/xt_TCPOPTSTRIP.c
5245
3740
/* * A module for stripping a specific TCP option from TCP packets. * * Copyright (C) 2007 Sven Schnelle <svens@bitebene.org> * Copyright © CC Computer Consultants GmbH, 2007 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/skbuff.h> #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/tcp.h> #include <net/ipv6.h> #include <net/tcp.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/xt_TCPOPTSTRIP.h> static inline unsigned int optlen(const u_int8_t *opt, unsigned int offset) { /* Beware zero-length options: make finite progress */ if (opt[offset] <= TCPOPT_NOP || opt[offset+1] == 0) return 1; else return opt[offset+1]; } static unsigned int tcpoptstrip_mangle_packet(struct sk_buff *skb, const struct xt_tcpoptstrip_target_info *info, unsigned int tcphoff, unsigned int minlen) { unsigned int optl, i, j; struct tcphdr *tcph; u_int16_t n, o; u_int8_t *opt; if (!skb_make_writable(skb, skb->len)) return NF_DROP; tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff); opt = (u_int8_t *)tcph; /* * Walk through all TCP options - if we find some option to remove, * set all octets to %TCPOPT_NOP and adjust checksum. */ for (i = sizeof(struct tcphdr); i < tcp_hdrlen(skb); i += optl) { optl = optlen(opt, i); if (i + optl > tcp_hdrlen(skb)) break; if (!tcpoptstrip_test_bit(info->strip_bmap, opt[i])) continue; for (j = 0; j < optl; ++j) { o = opt[i+j]; n = TCPOPT_NOP; if ((i + j) % 2 == 0) { o <<= 8; n <<= 8; } inet_proto_csum_replace2(&tcph->check, skb, htons(o), htons(n), 0); } memset(opt + i, TCPOPT_NOP, optl); } return XT_CONTINUE; } static unsigned int tcpoptstrip_tg4(struct sk_buff *skb, const struct xt_action_param *par) { return tcpoptstrip_mangle_packet(skb, par->targinfo, ip_hdrlen(skb), sizeof(struct iphdr) + sizeof(struct tcphdr)); } #if IS_ENABLED(CONFIG_IP6_NF_MANGLE) static unsigned int tcpoptstrip_tg6(struct sk_buff *skb, const struct xt_action_param *par) { struct ipv6hdr *ipv6h = ipv6_hdr(skb); int tcphoff; u_int8_t nexthdr; __be16 frag_off; nexthdr = ipv6h->nexthdr; tcphoff = ipv6_skip_exthdr(skb, sizeof(*ipv6h), &nexthdr, &frag_off); if (tcphoff < 0) return NF_DROP; return tcpoptstrip_mangle_packet(skb, par->targinfo, tcphoff, sizeof(*ipv6h) + sizeof(struct tcphdr)); } #endif static struct xt_target tcpoptstrip_tg_reg[] __read_mostly = { { .name = "TCPOPTSTRIP", .family = NFPROTO_IPV4, .table = "mangle", .proto = IPPROTO_TCP, .target = tcpoptstrip_tg4, .targetsize = sizeof(struct xt_tcpoptstrip_target_info), .me = THIS_MODULE, }, #if IS_ENABLED(CONFIG_IP6_NF_MANGLE) { .name = "TCPOPTSTRIP", .family = NFPROTO_IPV6, .table = "mangle", .proto = IPPROTO_TCP, .target = tcpoptstrip_tg6, .targetsize = sizeof(struct xt_tcpoptstrip_target_info), .me = THIS_MODULE, }, #endif }; static int __init tcpoptstrip_tg_init(void) { return xt_register_targets(tcpoptstrip_tg_reg, ARRAY_SIZE(tcpoptstrip_tg_reg)); } static void __exit tcpoptstrip_tg_exit(void) { xt_unregister_targets(tcpoptstrip_tg_reg, ARRAY_SIZE(tcpoptstrip_tg_reg)); } module_init(tcpoptstrip_tg_init); module_exit(tcpoptstrip_tg_exit); MODULE_AUTHOR("Sven Schnelle <svens@bitebene.org>, Jan Engelhardt <jengelh@medozas.de>"); MODULE_DESCRIPTION("Xtables: TCP option stripping"); MODULE_LICENSE("GPL"); MODULE_ALIAS("ipt_TCPOPTSTRIP"); MODULE_ALIAS("ip6t_TCPOPTSTRIP");
gpl-2.0
Megatron007/Megabyte_kernel_victara
net/ceph/msgpool.c
6013
2035
#include <linux/ceph/ceph_debug.h> #include <linux/err.h> #include <linux/sched.h> #include <linux/types.h> #include <linux/vmalloc.h> #include <linux/ceph/msgpool.h> static void *msgpool_alloc(gfp_t gfp_mask, void *arg) { struct ceph_msgpool *pool = arg; struct ceph_msg *msg; msg = ceph_msg_new(pool->type, pool->front_len, gfp_mask, true); if (!msg) { dout("msgpool_alloc %s failed\n", pool->name); } else { dout("msgpool_alloc %s %p\n", pool->name, msg); msg->pool = pool; } return msg; } static void msgpool_free(void *element, void *arg) { struct ceph_msgpool *pool = arg; struct ceph_msg *msg = element; dout("msgpool_release %s %p\n", pool->name, msg); msg->pool = NULL; ceph_msg_put(msg); } int ceph_msgpool_init(struct ceph_msgpool *pool, int type, int front_len, int size, bool blocking, const char *name) { dout("msgpool %s init\n", name); pool->type = type; pool->front_len = front_len; pool->pool = mempool_create(size, msgpool_alloc, msgpool_free, pool); if (!pool->pool) return -ENOMEM; pool->name = name; return 0; } void ceph_msgpool_destroy(struct ceph_msgpool *pool) { dout("msgpool %s destroy\n", pool->name); mempool_destroy(pool->pool); } struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool, int front_len) { struct ceph_msg *msg; if (front_len > pool->front_len) { dout("msgpool_get %s need front %d, pool size is %d\n", pool->name, front_len, pool->front_len); WARN_ON(1); /* try to alloc a fresh message */ return ceph_msg_new(pool->type, front_len, GFP_NOFS, false); } msg = mempool_alloc(pool->pool, GFP_NOFS); dout("msgpool_get %s %p\n", pool->name, msg); return msg; } void ceph_msgpool_put(struct ceph_msgpool *pool, struct ceph_msg *msg) { dout("msgpool_put %s %p\n", pool->name, msg); /* reset msg front_len; user may have changed it */ msg->front.iov_len = pool->front_len; msg->hdr.front_len = cpu_to_le32(pool->front_len); kref_init(&msg->kref); /* retake single ref */ mempool_free(msg, pool->pool); }
gpl-2.0
speedbot/android_kernel_sony_Nicki
drivers/ide/ide-gd.c
8317
11060
#include <linux/module.h> #include <linux/types.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/genhd.h> #include <linux/mutex.h> #include <linux/ide.h> #include <linux/hdreg.h> #include <linux/dmi.h> #include <linux/slab.h> #if !defined(CONFIG_DEBUG_BLOCK_EXT_DEVT) #define IDE_DISK_MINORS (1 << PARTN_BITS) #else #define IDE_DISK_MINORS 0 #endif #include "ide-disk.h" #include "ide-floppy.h" #define IDE_GD_VERSION "1.18" /* module parameters */ static DEFINE_MUTEX(ide_gd_mutex); static unsigned long debug_mask; module_param(debug_mask, ulong, 0644); static DEFINE_MUTEX(ide_disk_ref_mutex); static void ide_disk_release(struct device *); static struct ide_disk_obj *ide_disk_get(struct gendisk *disk) { struct ide_disk_obj *idkp = NULL; mutex_lock(&ide_disk_ref_mutex); idkp = ide_drv_g(disk, ide_disk_obj); if (idkp) { if (ide_device_get(idkp->drive)) idkp = NULL; else get_device(&idkp->dev); } mutex_unlock(&ide_disk_ref_mutex); return idkp; } static void ide_disk_put(struct ide_disk_obj *idkp) { ide_drive_t *drive = idkp->drive; mutex_lock(&ide_disk_ref_mutex); put_device(&idkp->dev); ide_device_put(drive); mutex_unlock(&ide_disk_ref_mutex); } sector_t ide_gd_capacity(ide_drive_t *drive) { return drive->capacity64; } static int ide_gd_probe(ide_drive_t *); static void ide_gd_remove(ide_drive_t *drive) { struct ide_disk_obj *idkp = drive->driver_data; struct gendisk *g = idkp->disk; ide_proc_unregister_driver(drive, idkp->driver); device_del(&idkp->dev); del_gendisk(g); drive->disk_ops->flush(drive); mutex_lock(&ide_disk_ref_mutex); put_device(&idkp->dev); mutex_unlock(&ide_disk_ref_mutex); } static void ide_disk_release(struct device *dev) { struct ide_disk_obj *idkp = to_ide_drv(dev, ide_disk_obj); ide_drive_t *drive = idkp->drive; struct gendisk *g = idkp->disk; drive->disk_ops = NULL; drive->driver_data = NULL; g->private_data = NULL; put_disk(g); kfree(idkp); } /* * On HPA drives the capacity needs to be * reinitialized on resume otherwise the disk * can not be used and a hard reset is required */ static void ide_gd_resume(ide_drive_t *drive) { if (ata_id_hpa_enabled(drive->id)) (void)drive->disk_ops->get_capacity(drive); } static const struct dmi_system_id ide_coldreboot_table[] = { { /* Acer TravelMate 66x cuts power during reboot */ .ident = "Acer TravelMate 660", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 660"), }, }, { } /* terminate list */ }; static void ide_gd_shutdown(ide_drive_t *drive) { #ifdef CONFIG_ALPHA /* On Alpha, halt(8) doesn't actually turn the machine off, it puts you into the sort of firmware monitor. Typically, it's used to boot another kernel image, so it's not much different from reboot(8). Therefore, we don't need to spin down the disk in this case, especially since Alpha firmware doesn't handle disks in standby mode properly. On the other hand, it's reasonably safe to turn the power off when the shutdown process reaches the firmware prompt, as the firmware initialization takes rather long time - at least 10 seconds, which should be sufficient for the disk to expire its write cache. */ if (system_state != SYSTEM_POWER_OFF) { #else if (system_state == SYSTEM_RESTART && !dmi_check_system(ide_coldreboot_table)) { #endif drive->disk_ops->flush(drive); return; } printk(KERN_INFO "Shutdown: %s\n", drive->name); drive->gendev.bus->suspend(&drive->gendev, PMSG_SUSPEND); } #ifdef CONFIG_IDE_PROC_FS static ide_proc_entry_t *ide_disk_proc_entries(ide_drive_t *drive) { return (drive->media == ide_disk) ? ide_disk_proc : ide_floppy_proc; } static const struct ide_proc_devset *ide_disk_proc_devsets(ide_drive_t *drive) { return (drive->media == ide_disk) ? ide_disk_settings : ide_floppy_settings; } #endif static ide_startstop_t ide_gd_do_request(ide_drive_t *drive, struct request *rq, sector_t sector) { return drive->disk_ops->do_request(drive, rq, sector); } static struct ide_driver ide_gd_driver = { .gen_driver = { .owner = THIS_MODULE, .name = "ide-gd", .bus = &ide_bus_type, }, .probe = ide_gd_probe, .remove = ide_gd_remove, .resume = ide_gd_resume, .shutdown = ide_gd_shutdown, .version = IDE_GD_VERSION, .do_request = ide_gd_do_request, #ifdef CONFIG_IDE_PROC_FS .proc_entries = ide_disk_proc_entries, .proc_devsets = ide_disk_proc_devsets, #endif }; static int ide_gd_open(struct block_device *bdev, fmode_t mode) { struct gendisk *disk = bdev->bd_disk; struct ide_disk_obj *idkp; ide_drive_t *drive; int ret = 0; idkp = ide_disk_get(disk); if (idkp == NULL) return -ENXIO; drive = idkp->drive; ide_debug_log(IDE_DBG_FUNC, "enter"); idkp->openers++; if ((drive->dev_flags & IDE_DFLAG_REMOVABLE) && idkp->openers == 1) { drive->dev_flags &= ~IDE_DFLAG_FORMAT_IN_PROGRESS; /* Just in case */ ret = drive->disk_ops->init_media(drive, disk); /* * Allow O_NDELAY to open a drive without a disk, or with an * unreadable disk, so that we can get the format capacity * of the drive or begin the format - Sam */ if (ret && (mode & FMODE_NDELAY) == 0) { ret = -EIO; goto out_put_idkp; } if ((drive->dev_flags & IDE_DFLAG_WP) && (mode & FMODE_WRITE)) { ret = -EROFS; goto out_put_idkp; } /* * Ignore the return code from door_lock, * since the open() has already succeeded, * and the door_lock is irrelevant at this point. */ drive->disk_ops->set_doorlock(drive, disk, 1); drive->dev_flags |= IDE_DFLAG_MEDIA_CHANGED; check_disk_change(bdev); } else if (drive->dev_flags & IDE_DFLAG_FORMAT_IN_PROGRESS) { ret = -EBUSY; goto out_put_idkp; } return 0; out_put_idkp: idkp->openers--; ide_disk_put(idkp); return ret; } static int ide_gd_unlocked_open(struct block_device *bdev, fmode_t mode) { int ret; mutex_lock(&ide_gd_mutex); ret = ide_gd_open(bdev, mode); mutex_unlock(&ide_gd_mutex); return ret; } static int ide_gd_release(struct gendisk *disk, fmode_t mode) { struct ide_disk_obj *idkp = ide_drv_g(disk, ide_disk_obj); ide_drive_t *drive = idkp->drive; ide_debug_log(IDE_DBG_FUNC, "enter"); mutex_lock(&ide_gd_mutex); if (idkp->openers == 1) drive->disk_ops->flush(drive); if ((drive->dev_flags & IDE_DFLAG_REMOVABLE) && idkp->openers == 1) { drive->disk_ops->set_doorlock(drive, disk, 0); drive->dev_flags &= ~IDE_DFLAG_FORMAT_IN_PROGRESS; } idkp->openers--; ide_disk_put(idkp); mutex_unlock(&ide_gd_mutex); return 0; } static int ide_gd_getgeo(struct block_device *bdev, struct hd_geometry *geo) { struct ide_disk_obj *idkp = ide_drv_g(bdev->bd_disk, ide_disk_obj); ide_drive_t *drive = idkp->drive; geo->heads = drive->bios_head; geo->sectors = drive->bios_sect; geo->cylinders = (u16)drive->bios_cyl; /* truncate */ return 0; } static unsigned int ide_gd_check_events(struct gendisk *disk, unsigned int clearing) { struct ide_disk_obj *idkp = ide_drv_g(disk, ide_disk_obj); ide_drive_t *drive = idkp->drive; bool ret; /* do not scan partitions twice if this is a removable device */ if (drive->dev_flags & IDE_DFLAG_ATTACH) { drive->dev_flags &= ~IDE_DFLAG_ATTACH; return 0; } /* * The following is used to force revalidation on the first open on * removeable devices, and never gets reported to userland as * genhd->events is 0. This is intended as removeable ide disk * can't really detect MEDIA_CHANGE events. */ ret = drive->dev_flags & IDE_DFLAG_MEDIA_CHANGED; drive->dev_flags &= ~IDE_DFLAG_MEDIA_CHANGED; return ret ? DISK_EVENT_MEDIA_CHANGE : 0; } static void ide_gd_unlock_native_capacity(struct gendisk *disk) { struct ide_disk_obj *idkp = ide_drv_g(disk, ide_disk_obj); ide_drive_t *drive = idkp->drive; const struct ide_disk_ops *disk_ops = drive->disk_ops; if (disk_ops->unlock_native_capacity) disk_ops->unlock_native_capacity(drive); } static int ide_gd_revalidate_disk(struct gendisk *disk) { struct ide_disk_obj *idkp = ide_drv_g(disk, ide_disk_obj); ide_drive_t *drive = idkp->drive; if (ide_gd_check_events(disk, 0)) drive->disk_ops->get_capacity(drive); set_capacity(disk, ide_gd_capacity(drive)); return 0; } static int ide_gd_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { struct ide_disk_obj *idkp = ide_drv_g(bdev->bd_disk, ide_disk_obj); ide_drive_t *drive = idkp->drive; return drive->disk_ops->ioctl(drive, bdev, mode, cmd, arg); } static const struct block_device_operations ide_gd_ops = { .owner = THIS_MODULE, .open = ide_gd_unlocked_open, .release = ide_gd_release, .ioctl = ide_gd_ioctl, .getgeo = ide_gd_getgeo, .check_events = ide_gd_check_events, .unlock_native_capacity = ide_gd_unlock_native_capacity, .revalidate_disk = ide_gd_revalidate_disk }; static int ide_gd_probe(ide_drive_t *drive) { const struct ide_disk_ops *disk_ops = NULL; struct ide_disk_obj *idkp; struct gendisk *g; /* strstr("foo", "") is non-NULL */ if (!strstr("ide-gd", drive->driver_req)) goto failed; #ifdef CONFIG_IDE_GD_ATA if (drive->media == ide_disk) disk_ops = &ide_ata_disk_ops; #endif #ifdef CONFIG_IDE_GD_ATAPI if (drive->media == ide_floppy) disk_ops = &ide_atapi_disk_ops; #endif if (disk_ops == NULL) goto failed; if (disk_ops->check(drive, DRV_NAME) == 0) { printk(KERN_ERR PFX "%s: not supported by this driver\n", drive->name); goto failed; } idkp = kzalloc(sizeof(*idkp), GFP_KERNEL); if (!idkp) { printk(KERN_ERR PFX "%s: can't allocate a disk structure\n", drive->name); goto failed; } g = alloc_disk_node(IDE_DISK_MINORS, hwif_to_node(drive->hwif)); if (!g) goto out_free_idkp; ide_init_disk(g, drive); idkp->dev.parent = &drive->gendev; idkp->dev.release = ide_disk_release; dev_set_name(&idkp->dev, dev_name(&drive->gendev)); if (device_register(&idkp->dev)) goto out_free_disk; idkp->drive = drive; idkp->driver = &ide_gd_driver; idkp->disk = g; g->private_data = &idkp->driver; drive->driver_data = idkp; drive->debug_mask = debug_mask; drive->disk_ops = disk_ops; disk_ops->setup(drive); set_capacity(g, ide_gd_capacity(drive)); g->minors = IDE_DISK_MINORS; g->driverfs_dev = &drive->gendev; g->flags |= GENHD_FL_EXT_DEVT; if (drive->dev_flags & IDE_DFLAG_REMOVABLE) g->flags = GENHD_FL_REMOVABLE; g->fops = &ide_gd_ops; add_disk(g); return 0; out_free_disk: put_disk(g); out_free_idkp: kfree(idkp); failed: return -ENODEV; } static int __init ide_gd_init(void) { printk(KERN_INFO DRV_NAME " driver " IDE_GD_VERSION "\n"); return driver_register(&ide_gd_driver.gen_driver); } static void __exit ide_gd_exit(void) { driver_unregister(&ide_gd_driver.gen_driver); } MODULE_ALIAS("ide:*m-disk*"); MODULE_ALIAS("ide-disk"); MODULE_ALIAS("ide:*m-floppy*"); MODULE_ALIAS("ide-floppy"); module_init(ide_gd_init); module_exit(ide_gd_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("generic ATA/ATAPI disk driver");
gpl-2.0
Yongci/CC-A80-kernel-source
drivers/staging/vt6655/srom.c
8573
10967
/* * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * File: srom.c * * Purpose:Implement functions to access eeprom * * Author: Jerry Chen * * Date: Jan 29, 2003 * * Functions: * SROMbyReadEmbedded - Embedded read eeprom via MAC * SROMbWriteEmbedded - Embedded write eeprom via MAC * SROMvRegBitsOn - Set Bits On in eeprom * SROMvRegBitsOff - Clear Bits Off in eeprom * SROMbIsRegBitsOn - Test if Bits On in eeprom * SROMbIsRegBitsOff - Test if Bits Off in eeprom * SROMvReadAllContents - Read all contents in eeprom * SROMvWriteAllContents - Write all contents in eeprom * SROMvReadEtherAddress - Read Ethernet Address in eeprom * SROMvWriteEtherAddress - Write Ethernet Address in eeprom * SROMvReadSubSysVenId - Read Sub_VID and Sub_SysId in eeprom * SROMbAutoLoad - Auto Load eeprom to MAC register * * Revision History: * */ #include "upc.h" #include "tmacro.h" #include "tether.h" #include "mac.h" #include "srom.h" /*--------------------- Static Definitions -------------------------*/ /*--------------------- Static Classes ----------------------------*/ /*--------------------- Static Variables --------------------------*/ /*--------------------- Static Functions --------------------------*/ /*--------------------- Export Variables --------------------------*/ /*--------------------- Export Functions --------------------------*/ /* * Description: Read a byte from EEPROM, by MAC I2C * * Parameters: * In: * dwIoBase - I/O base address * byContntOffset - address of EEPROM * Out: * none * * Return Value: data read * */ unsigned char SROMbyReadEmbedded(unsigned long dwIoBase, unsigned char byContntOffset) { unsigned short wDelay, wNoACK; unsigned char byWait; unsigned char byData; unsigned char byOrg; byData = 0xFF; VNSvInPortB(dwIoBase + MAC_REG_I2MCFG, &byOrg); /* turn off hardware retry for getting NACK */ VNSvOutPortB(dwIoBase + MAC_REG_I2MCFG, (byOrg & (~I2MCFG_NORETRY))); for (wNoACK = 0; wNoACK < W_MAX_I2CRETRY; wNoACK++) { VNSvOutPortB(dwIoBase + MAC_REG_I2MTGID, EEP_I2C_DEV_ID); VNSvOutPortB(dwIoBase + MAC_REG_I2MTGAD, byContntOffset); /* issue read command */ VNSvOutPortB(dwIoBase + MAC_REG_I2MCSR, I2MCSR_EEMR); /* wait DONE be set */ for (wDelay = 0; wDelay < W_MAX_TIMEOUT; wDelay++) { VNSvInPortB(dwIoBase + MAC_REG_I2MCSR, &byWait); if (byWait & (I2MCSR_DONE | I2MCSR_NACK)) break; PCAvDelayByIO(CB_DELAY_LOOP_WAIT); } if ((wDelay < W_MAX_TIMEOUT) && ( !(byWait & I2MCSR_NACK))) { break; } } VNSvInPortB(dwIoBase + MAC_REG_I2MDIPT, &byData); VNSvOutPortB(dwIoBase + MAC_REG_I2MCFG, byOrg); return byData; } /* * Description: Write a byte to EEPROM, by MAC I2C * * Parameters: * In: * dwIoBase - I/O base address * byContntOffset - address of EEPROM * wData - data to write * Out: * none * * Return Value: true if succeeded; false if failed. * */ bool SROMbWriteEmbedded(unsigned long dwIoBase, unsigned char byContntOffset, unsigned char byData) { unsigned short wDelay, wNoACK; unsigned char byWait; unsigned char byOrg; VNSvInPortB(dwIoBase + MAC_REG_I2MCFG, &byOrg); /* turn off hardware retry for getting NACK */ VNSvOutPortB(dwIoBase + MAC_REG_I2MCFG, (byOrg & (~I2MCFG_NORETRY))); for (wNoACK = 0; wNoACK < W_MAX_I2CRETRY; wNoACK++) { VNSvOutPortB(dwIoBase + MAC_REG_I2MTGID, EEP_I2C_DEV_ID); VNSvOutPortB(dwIoBase + MAC_REG_I2MTGAD, byContntOffset); VNSvOutPortB(dwIoBase + MAC_REG_I2MDOPT, byData); /* issue write command */ VNSvOutPortB(dwIoBase + MAC_REG_I2MCSR, I2MCSR_EEMW); /* wait DONE be set */ for (wDelay = 0; wDelay < W_MAX_TIMEOUT; wDelay++) { VNSvInPortB(dwIoBase + MAC_REG_I2MCSR, &byWait); if (byWait & (I2MCSR_DONE | I2MCSR_NACK)) break; PCAvDelayByIO(CB_DELAY_LOOP_WAIT); } if ((wDelay < W_MAX_TIMEOUT) && ( !(byWait & I2MCSR_NACK))) { break; } } if (wNoACK == W_MAX_I2CRETRY) { VNSvOutPortB(dwIoBase + MAC_REG_I2MCFG, byOrg); return false; } VNSvOutPortB(dwIoBase + MAC_REG_I2MCFG, byOrg); return true; } /* * Description: Turn bits on in eeprom * * Parameters: * In: * dwIoBase - I/O base address * byContntOffset - address of EEPROM * byBits - bits to turn on * Out: * none * * Return Value: none * */ void SROMvRegBitsOn(unsigned long dwIoBase, unsigned char byContntOffset, unsigned char byBits) { unsigned char byOrgData; byOrgData = SROMbyReadEmbedded(dwIoBase, byContntOffset); SROMbWriteEmbedded(dwIoBase, byContntOffset,(unsigned char)(byOrgData | byBits)); } /* * Description: Turn bits off in eeprom * * Parameters: * In: * dwIoBase - I/O base address * byContntOffset - address of EEPROM * byBits - bits to turn off * Out: * none * */ void SROMvRegBitsOff(unsigned long dwIoBase, unsigned char byContntOffset, unsigned char byBits) { unsigned char byOrgData; byOrgData = SROMbyReadEmbedded(dwIoBase, byContntOffset); SROMbWriteEmbedded(dwIoBase, byContntOffset,(unsigned char)(byOrgData & (~byBits))); } /* * Description: Test if bits on in eeprom * * Parameters: * In: * dwIoBase - I/O base address * byContntOffset - address of EEPROM * byTestBits - bits to test * Out: * none * * Return Value: true if all test bits on; otherwise false * */ bool SROMbIsRegBitsOn(unsigned long dwIoBase, unsigned char byContntOffset, unsigned char byTestBits) { unsigned char byOrgData; byOrgData = SROMbyReadEmbedded(dwIoBase, byContntOffset); return (byOrgData & byTestBits) == byTestBits; } /* * Description: Test if bits off in eeprom * * Parameters: * In: * dwIoBase - I/O base address * byContntOffset - address of EEPROM * byTestBits - bits to test * Out: * none * * Return Value: true if all test bits off; otherwise false * */ bool SROMbIsRegBitsOff(unsigned long dwIoBase, unsigned char byContntOffset, unsigned char byTestBits) { unsigned char byOrgData; byOrgData = SROMbyReadEmbedded(dwIoBase, byContntOffset); return !(byOrgData & byTestBits); } /* * Description: Read all contents of eeprom to buffer * * Parameters: * In: * dwIoBase - I/O base address * Out: * pbyEepromRegs - EEPROM content Buffer * * Return Value: none * */ void SROMvReadAllContents(unsigned long dwIoBase, unsigned char *pbyEepromRegs) { int ii; /* ii = Rom Address */ for (ii = 0; ii < EEP_MAX_CONTEXT_SIZE; ii++) { *pbyEepromRegs = SROMbyReadEmbedded(dwIoBase,(unsigned char) ii); pbyEepromRegs++; } } /* * Description: Write all contents of buffer to eeprom * * Parameters: * In: * dwIoBase - I/O base address * pbyEepromRegs - EEPROM content Buffer * Out: * none * * Return Value: none * */ void SROMvWriteAllContents(unsigned long dwIoBase, unsigned char *pbyEepromRegs) { int ii; /* ii = Rom Address */ for (ii = 0; ii < EEP_MAX_CONTEXT_SIZE; ii++) { SROMbWriteEmbedded(dwIoBase,(unsigned char) ii, *pbyEepromRegs); pbyEepromRegs++; } } /* * Description: Read Ethernet Address from eeprom to buffer * * Parameters: * In: * dwIoBase - I/O base address * Out: * pbyEtherAddress - Ethernet Address buffer * * Return Value: none * */ void SROMvReadEtherAddress(unsigned long dwIoBase, unsigned char *pbyEtherAddress) { unsigned char ii; /* ii = Rom Address */ for (ii = 0; ii < ETH_ALEN; ii++) { *pbyEtherAddress = SROMbyReadEmbedded(dwIoBase, ii); pbyEtherAddress++; } } /* * Description: Write Ethernet Address from buffer to eeprom * * Parameters: * In: * dwIoBase - I/O base address * pbyEtherAddress - Ethernet Address buffer * Out: * none * * Return Value: none * */ void SROMvWriteEtherAddress(unsigned long dwIoBase, unsigned char *pbyEtherAddress) { unsigned char ii; /* ii = Rom Address */ for (ii = 0; ii < ETH_ALEN; ii++) { SROMbWriteEmbedded(dwIoBase, ii, *pbyEtherAddress); pbyEtherAddress++; } } /* * Description: Read Sub_VID and Sub_SysId from eeprom to buffer * * Parameters: * In: * dwIoBase - I/O base address * Out: * pdwSubSysVenId - Sub_VID and Sub_SysId read * * Return Value: none * */ void SROMvReadSubSysVenId(unsigned long dwIoBase, unsigned long *pdwSubSysVenId) { unsigned char *pbyData; pbyData = (unsigned char *)pdwSubSysVenId; /* sub vendor */ *pbyData = SROMbyReadEmbedded(dwIoBase, 6); *(pbyData+1) = SROMbyReadEmbedded(dwIoBase, 7); /* sub system */ *(pbyData+2) = SROMbyReadEmbedded(dwIoBase, 8); *(pbyData+3) = SROMbyReadEmbedded(dwIoBase, 9); } /* * Description: Auto Load EEPROM to MAC register * * Parameters: * In: * dwIoBase - I/O base address * Out: * none * * Return Value: true if success; otherwise false * */ bool SROMbAutoLoad(unsigned long dwIoBase) { unsigned char byWait; int ii; unsigned char byOrg; VNSvInPortB(dwIoBase + MAC_REG_I2MCFG, &byOrg); /* turn on hardware retry */ VNSvOutPortB(dwIoBase + MAC_REG_I2MCFG, (byOrg | I2MCFG_NORETRY)); MACvRegBitsOn(dwIoBase, MAC_REG_I2MCSR, I2MCSR_AUTOLD); /* ii = Rom Address */ for (ii = 0; ii < EEP_MAX_CONTEXT_SIZE; ii++) { MACvTimer0MicroSDelay(dwIoBase, CB_EEPROM_READBYTE_WAIT); VNSvInPortB(dwIoBase + MAC_REG_I2MCSR, &byWait); if ( !(byWait & I2MCSR_AUTOLD)) break; } VNSvOutPortB(dwIoBase + MAC_REG_I2MCFG, byOrg); if (ii == EEP_MAX_CONTEXT_SIZE) return false; return true; }
gpl-2.0
byoungm/linux-kernel-test
arch/powerpc/platforms/cell/cbe_regs.c
9085
6678
/* * cbe_regs.c * * Accessor routines for the various MMIO register blocks of the CBE * * (c) 2006 Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp. */ #include <linux/percpu.h> #include <linux/types.h> #include <linux/export.h> #include <linux/of_device.h> #include <linux/of_platform.h> #include <asm/io.h> #include <asm/pgtable.h> #include <asm/prom.h> #include <asm/ptrace.h> #include <asm/cell-regs.h> /* * Current implementation uses "cpu" nodes. We build our own mapping * array of cpu numbers to cpu nodes locally for now to allow interrupt * time code to have a fast path rather than call of_get_cpu_node(). If * we implement cpu hotplug, we'll have to install an appropriate norifier * in order to release references to the cpu going away */ static struct cbe_regs_map { struct device_node *cpu_node; struct device_node *be_node; struct cbe_pmd_regs __iomem *pmd_regs; struct cbe_iic_regs __iomem *iic_regs; struct cbe_mic_tm_regs __iomem *mic_tm_regs; struct cbe_pmd_shadow_regs pmd_shadow_regs; } cbe_regs_maps[MAX_CBE]; static int cbe_regs_map_count; static struct cbe_thread_map { struct device_node *cpu_node; struct device_node *be_node; struct cbe_regs_map *regs; unsigned int thread_id; unsigned int cbe_id; } cbe_thread_map[NR_CPUS]; static cpumask_t cbe_local_mask[MAX_CBE] = { [0 ... MAX_CBE-1] = {CPU_BITS_NONE} }; static cpumask_t cbe_first_online_cpu = { CPU_BITS_NONE }; static struct cbe_regs_map *cbe_find_map(struct device_node *np) { int i; struct device_node *tmp_np; if (strcasecmp(np->type, "spe")) { for (i = 0; i < cbe_regs_map_count; i++) if (cbe_regs_maps[i].cpu_node == np || cbe_regs_maps[i].be_node == np) return &cbe_regs_maps[i]; return NULL; } if (np->data) return np->data; /* walk up path until cpu or be node was found */ tmp_np = np; do { tmp_np = tmp_np->parent; /* on a correct devicetree we wont get up to root */ BUG_ON(!tmp_np); } while (strcasecmp(tmp_np->type, "cpu") && strcasecmp(tmp_np->type, "be")); np->data = cbe_find_map(tmp_np); return np->data; } struct cbe_pmd_regs __iomem *cbe_get_pmd_regs(struct device_node *np) { struct cbe_regs_map *map = cbe_find_map(np); if (map == NULL) return NULL; return map->pmd_regs; } EXPORT_SYMBOL_GPL(cbe_get_pmd_regs); struct cbe_pmd_regs __iomem *cbe_get_cpu_pmd_regs(int cpu) { struct cbe_regs_map *map = cbe_thread_map[cpu].regs; if (map == NULL) return NULL; return map->pmd_regs; } EXPORT_SYMBOL_GPL(cbe_get_cpu_pmd_regs); struct cbe_pmd_shadow_regs *cbe_get_pmd_shadow_regs(struct device_node *np) { struct cbe_regs_map *map = cbe_find_map(np); if (map == NULL) return NULL; return &map->pmd_shadow_regs; } struct cbe_pmd_shadow_regs *cbe_get_cpu_pmd_shadow_regs(int cpu) { struct cbe_regs_map *map = cbe_thread_map[cpu].regs; if (map == NULL) return NULL; return &map->pmd_shadow_regs; } struct cbe_iic_regs __iomem *cbe_get_iic_regs(struct device_node *np) { struct cbe_regs_map *map = cbe_find_map(np); if (map == NULL) return NULL; return map->iic_regs; } struct cbe_iic_regs __iomem *cbe_get_cpu_iic_regs(int cpu) { struct cbe_regs_map *map = cbe_thread_map[cpu].regs; if (map == NULL) return NULL; return map->iic_regs; } struct cbe_mic_tm_regs __iomem *cbe_get_mic_tm_regs(struct device_node *np) { struct cbe_regs_map *map = cbe_find_map(np); if (map == NULL) return NULL; return map->mic_tm_regs; } struct cbe_mic_tm_regs __iomem *cbe_get_cpu_mic_tm_regs(int cpu) { struct cbe_regs_map *map = cbe_thread_map[cpu].regs; if (map == NULL) return NULL; return map->mic_tm_regs; } EXPORT_SYMBOL_GPL(cbe_get_cpu_mic_tm_regs); u32 cbe_get_hw_thread_id(int cpu) { return cbe_thread_map[cpu].thread_id; } EXPORT_SYMBOL_GPL(cbe_get_hw_thread_id); u32 cbe_cpu_to_node(int cpu) { return cbe_thread_map[cpu].cbe_id; } EXPORT_SYMBOL_GPL(cbe_cpu_to_node); u32 cbe_node_to_cpu(int node) { return cpumask_first(&cbe_local_mask[node]); } EXPORT_SYMBOL_GPL(cbe_node_to_cpu); static struct device_node *cbe_get_be_node(int cpu_id) { struct device_node *np; for_each_node_by_type (np, "be") { int len,i; const phandle *cpu_handle; cpu_handle = of_get_property(np, "cpus", &len); /* * the CAB SLOF tree is non compliant, so we just assume * there is only one node */ if (WARN_ON_ONCE(!cpu_handle)) return np; for (i=0; i<len; i++) if (of_find_node_by_phandle(cpu_handle[i]) == of_get_cpu_node(cpu_id, NULL)) return np; } return NULL; } void __init cbe_fill_regs_map(struct cbe_regs_map *map) { if(map->be_node) { struct device_node *be, *np; be = map->be_node; for_each_node_by_type(np, "pervasive") if (of_get_parent(np) == be) map->pmd_regs = of_iomap(np, 0); for_each_node_by_type(np, "CBEA-Internal-Interrupt-Controller") if (of_get_parent(np) == be) map->iic_regs = of_iomap(np, 2); for_each_node_by_type(np, "mic-tm") if (of_get_parent(np) == be) map->mic_tm_regs = of_iomap(np, 0); } else { struct device_node *cpu; /* That hack must die die die ! */ const struct address_prop { unsigned long address; unsigned int len; } __attribute__((packed)) *prop; cpu = map->cpu_node; prop = of_get_property(cpu, "pervasive", NULL); if (prop != NULL) map->pmd_regs = ioremap(prop->address, prop->len); prop = of_get_property(cpu, "iic", NULL); if (prop != NULL) map->iic_regs = ioremap(prop->address, prop->len); prop = of_get_property(cpu, "mic-tm", NULL); if (prop != NULL) map->mic_tm_regs = ioremap(prop->address, prop->len); } } void __init cbe_regs_init(void) { int i; unsigned int thread_id; struct device_node *cpu; /* Build local fast map of CPUs */ for_each_possible_cpu(i) { cbe_thread_map[i].cpu_node = of_get_cpu_node(i, &thread_id); cbe_thread_map[i].be_node = cbe_get_be_node(i); cbe_thread_map[i].thread_id = thread_id; } /* Find maps for each device tree CPU */ for_each_node_by_type(cpu, "cpu") { struct cbe_regs_map *map; unsigned int cbe_id; cbe_id = cbe_regs_map_count++; map = &cbe_regs_maps[cbe_id]; if (cbe_regs_map_count > MAX_CBE) { printk(KERN_ERR "cbe_regs: More BE chips than supported" "!\n"); cbe_regs_map_count--; of_node_put(cpu); return; } map->cpu_node = cpu; for_each_possible_cpu(i) { struct cbe_thread_map *thread = &cbe_thread_map[i]; if (thread->cpu_node == cpu) { thread->regs = map; thread->cbe_id = cbe_id; map->be_node = thread->be_node; cpumask_set_cpu(i, &cbe_local_mask[cbe_id]); if(thread->thread_id == 0) cpumask_set_cpu(i, &cbe_first_online_cpu); } } cbe_fill_regs_map(map); } }
gpl-2.0
zarboz/Ville-Z_Blackout_edition
arch/mips/math-emu/dp_flong.c
10365
1886
/* IEEE754 floating point arithmetic * double precision: common utilities */ /* * MIPS floating point support * Copyright (C) 1994-2000 Algorithmics Ltd. * * ######################################################################## * * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. * * ######################################################################## */ #include "ieee754dp.h" ieee754dp ieee754dp_flong(s64 x) { u64 xm; int xe; int xs; CLEARCX; if (x == 0) return ieee754dp_zero(0); if (x == 1 || x == -1) return ieee754dp_one(x < 0); if (x == 10 || x == -10) return ieee754dp_ten(x < 0); xs = (x < 0); if (xs) { if (x == (1ULL << 63)) xm = (1ULL << 63); /* max neg can't be safely negated */ else xm = -x; } else { xm = x; } /* normalize */ xe = DP_MBITS + 3; if (xm >> (DP_MBITS + 1 + 3)) { /* shunt out overflow bits */ while (xm >> (DP_MBITS + 1 + 3)) { XDPSRSX1(); } } else { /* normalize in grs extended double precision */ while ((xm >> (DP_MBITS + 3)) == 0) { xm <<= 1; xe--; } } DPNORMRET1(xs, xe, xm, "dp_flong", x); } ieee754dp ieee754dp_fulong(u64 u) { if ((s64) u < 0) return ieee754dp_add(ieee754dp_1e63(), ieee754dp_flong(u & ~(1ULL << 63))); return ieee754dp_flong(u); }
gpl-2.0
task650/kernel_htc_msm8974
arch/mips/math-emu/dp_simple.c
10365
1879
/* IEEE754 floating point arithmetic * double precision: common utilities */ /* * MIPS floating point support * Copyright (C) 1994-2000 Algorithmics Ltd. * * ######################################################################## * * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. * * ######################################################################## */ #include "ieee754dp.h" int ieee754dp_finite(ieee754dp x) { return DPBEXP(x) != DP_EMAX + 1 + DP_EBIAS; } ieee754dp ieee754dp_copysign(ieee754dp x, ieee754dp y) { CLEARCX; DPSIGN(x) = DPSIGN(y); return x; } ieee754dp ieee754dp_neg(ieee754dp x) { COMPXDP; EXPLODEXDP; CLEARCX; FLUSHXDP; /* * Invert the sign ALWAYS to prevent an endless recursion on * pow() in libc. */ /* quick fix up */ DPSIGN(x) ^= 1; if (xc == IEEE754_CLASS_SNAN) { ieee754dp y = ieee754dp_indef(); SETCX(IEEE754_INVALID_OPERATION); DPSIGN(y) = DPSIGN(x); return ieee754dp_nanxcpt(y, "neg"); } return x; } ieee754dp ieee754dp_abs(ieee754dp x) { COMPXDP; EXPLODEXDP; CLEARCX; FLUSHXDP; /* Clear sign ALWAYS, irrespective of NaN */ DPSIGN(x) = 0; if (xc == IEEE754_CLASS_SNAN) { SETCX(IEEE754_INVALID_OPERATION); return ieee754dp_nanxcpt(ieee754dp_indef(), "abs"); } return x; }
gpl-2.0
yank555-lu/TF101G_V9.2.2.6
net/bridge/br_netfilter.c
126
27729
/* * Handle firewalling * Linux ethernet bridge * * Authors: * Lennert Buytenhek <buytenh@gnu.org> * Bart De Schuymer <bdschuym@pandora.be> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Lennert dedicates this file to Kerstin Wurdinger. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/ip.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/if_arp.h> #include <linux/if_ether.h> #include <linux/if_vlan.h> #include <linux/if_pppox.h> #include <linux/ppp_defs.h> #include <linux/netfilter_bridge.h> #include <linux/netfilter_ipv4.h> #include <linux/netfilter_ipv6.h> #include <linux/netfilter_arp.h> #include <linux/in_route.h> #include <linux/inetdevice.h> #include <net/ip.h> #include <net/ipv6.h> #include <net/route.h> #include <asm/uaccess.h> #include "br_private.h" #ifdef CONFIG_SYSCTL #include <linux/sysctl.h> #endif #define skb_origaddr(skb) (((struct bridge_skb_cb *) \ (skb->nf_bridge->data))->daddr.ipv4) #define store_orig_dstaddr(skb) (skb_origaddr(skb) = ip_hdr(skb)->daddr) #define dnat_took_place(skb) (skb_origaddr(skb) != ip_hdr(skb)->daddr) #ifdef CONFIG_SYSCTL static struct ctl_table_header *brnf_sysctl_header; static int brnf_call_iptables __read_mostly = 1; static int brnf_call_ip6tables __read_mostly = 1; static int brnf_call_arptables __read_mostly = 1; static int brnf_filter_vlan_tagged __read_mostly = 0; static int brnf_filter_pppoe_tagged __read_mostly = 0; #else #define brnf_call_iptables 1 #define brnf_call_ip6tables 1 #define brnf_call_arptables 1 #define brnf_filter_vlan_tagged 0 #define brnf_filter_pppoe_tagged 0 #endif static inline __be16 vlan_proto(const struct sk_buff *skb) { if (vlan_tx_tag_present(skb)) return skb->protocol; else if (skb->protocol == htons(ETH_P_8021Q)) return vlan_eth_hdr(skb)->h_vlan_encapsulated_proto; else return 0; } #define IS_VLAN_IP(skb) \ (vlan_proto(skb) == htons(ETH_P_IP) && \ brnf_filter_vlan_tagged) #define IS_VLAN_IPV6(skb) \ (vlan_proto(skb) == htons(ETH_P_IPV6) && \ brnf_filter_vlan_tagged) #define IS_VLAN_ARP(skb) \ (vlan_proto(skb) == htons(ETH_P_ARP) && \ brnf_filter_vlan_tagged) static inline __be16 pppoe_proto(const struct sk_buff *skb) { return *((__be16 *)(skb_mac_header(skb) + ETH_HLEN + sizeof(struct pppoe_hdr))); } #define IS_PPPOE_IP(skb) \ (skb->protocol == htons(ETH_P_PPP_SES) && \ pppoe_proto(skb) == htons(PPP_IP) && \ brnf_filter_pppoe_tagged) #define IS_PPPOE_IPV6(skb) \ (skb->protocol == htons(ETH_P_PPP_SES) && \ pppoe_proto(skb) == htons(PPP_IPV6) && \ brnf_filter_pppoe_tagged) static void fake_update_pmtu(struct dst_entry *dst, u32 mtu) { } static u32 *fake_cow_metrics(struct dst_entry *dst, unsigned long old) { return NULL; } static struct dst_ops fake_dst_ops = { .family = AF_INET, .protocol = cpu_to_be16(ETH_P_IP), .update_pmtu = fake_update_pmtu, .cow_metrics = fake_cow_metrics, }; /* * Initialize bogus route table used to keep netfilter happy. * Currently, we fill in the PMTU entry because netfilter * refragmentation needs it, and the rt_flags entry because * ipt_REJECT needs it. Future netfilter modules might * require us to fill additional fields. */ static const u32 br_dst_default_metrics[RTAX_MAX] = { [RTAX_MTU - 1] = 1500, }; void br_netfilter_rtable_init(struct net_bridge *br) { struct rtable *rt = &br->fake_rtable; atomic_set(&rt->dst.__refcnt, 1); rt->dst.dev = br->dev; rt->dst.path = &rt->dst; dst_init_metrics(&rt->dst, br_dst_default_metrics, true); rt->dst.flags = DST_NOXFRM; rt->dst.ops = &fake_dst_ops; } static inline struct rtable *bridge_parent_rtable(const struct net_device *dev) { struct net_bridge_port *port; port = br_port_get_rcu(dev); return port ? &port->br->fake_rtable : NULL; } static inline struct net_device *bridge_parent(const struct net_device *dev) { struct net_bridge_port *port; port = br_port_get_rcu(dev); return port ? port->br->dev : NULL; } static inline struct nf_bridge_info *nf_bridge_alloc(struct sk_buff *skb) { skb->nf_bridge = kzalloc(sizeof(struct nf_bridge_info), GFP_ATOMIC); if (likely(skb->nf_bridge)) atomic_set(&(skb->nf_bridge->use), 1); return skb->nf_bridge; } static inline struct nf_bridge_info *nf_bridge_unshare(struct sk_buff *skb) { struct nf_bridge_info *nf_bridge = skb->nf_bridge; if (atomic_read(&nf_bridge->use) > 1) { struct nf_bridge_info *tmp = nf_bridge_alloc(skb); if (tmp) { memcpy(tmp, nf_bridge, sizeof(struct nf_bridge_info)); atomic_set(&tmp->use, 1); } nf_bridge_put(nf_bridge); nf_bridge = tmp; } return nf_bridge; } static inline void nf_bridge_push_encap_header(struct sk_buff *skb) { unsigned int len = nf_bridge_encap_header_len(skb); skb_push(skb, len); skb->network_header -= len; } static inline void nf_bridge_pull_encap_header(struct sk_buff *skb) { unsigned int len = nf_bridge_encap_header_len(skb); skb_pull(skb, len); skb->network_header += len; } static inline void nf_bridge_pull_encap_header_rcsum(struct sk_buff *skb) { unsigned int len = nf_bridge_encap_header_len(skb); skb_pull_rcsum(skb, len); skb->network_header += len; } static inline void nf_bridge_save_header(struct sk_buff *skb) { int header_size = ETH_HLEN + nf_bridge_encap_header_len(skb); skb_copy_from_linear_data_offset(skb, -header_size, skb->nf_bridge->data, header_size); } static inline void nf_bridge_update_protocol(struct sk_buff *skb) { if (skb->nf_bridge->mask & BRNF_8021Q) skb->protocol = htons(ETH_P_8021Q); else if (skb->nf_bridge->mask & BRNF_PPPoE) skb->protocol = htons(ETH_P_PPP_SES); } /* When handing a packet over to the IP layer * check whether we have a skb that is in the * expected format */ static int br_parse_ip_options(struct sk_buff *skb) { struct ip_options *opt; struct iphdr *iph; struct net_device *dev = skb->dev; u32 len; iph = ip_hdr(skb); opt = &(IPCB(skb)->opt); /* Basic sanity checks */ if (iph->ihl < 5 || iph->version != 4) goto inhdr_error; if (!pskb_may_pull(skb, iph->ihl*4)) goto inhdr_error; iph = ip_hdr(skb); if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl))) goto inhdr_error; len = ntohs(iph->tot_len); if (skb->len < len) { IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INTRUNCATEDPKTS); goto drop; } else if (len < (iph->ihl*4)) goto inhdr_error; if (pskb_trim_rcsum(skb, len)) { IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS); goto drop; } memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); if (iph->ihl == 5) return 0; opt->optlen = iph->ihl*4 - sizeof(struct iphdr); if (ip_options_compile(dev_net(dev), opt, skb)) goto inhdr_error; /* Check correct handling of SRR option */ if (unlikely(opt->srr)) { struct in_device *in_dev = __in_dev_get_rcu(dev); if (in_dev && !IN_DEV_SOURCE_ROUTE(in_dev)) goto drop; if (ip_options_rcv_srr(skb)) goto drop; } return 0; inhdr_error: IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INHDRERRORS); drop: return -1; } /* Fill in the header for fragmented IP packets handled by * the IPv4 connection tracking code. */ int nf_bridge_copy_header(struct sk_buff *skb) { int err; unsigned int header_size; nf_bridge_update_protocol(skb); header_size = ETH_HLEN + nf_bridge_encap_header_len(skb); err = skb_cow_head(skb, header_size); if (err) return err; skb_copy_to_linear_data_offset(skb, -header_size, skb->nf_bridge->data, header_size); __skb_push(skb, nf_bridge_encap_header_len(skb)); return 0; } /* PF_BRIDGE/PRE_ROUTING *********************************************/ /* Undo the changes made for ip6tables PREROUTING and continue the * bridge PRE_ROUTING hook. */ static int br_nf_pre_routing_finish_ipv6(struct sk_buff *skb) { struct nf_bridge_info *nf_bridge = skb->nf_bridge; struct rtable *rt; if (nf_bridge->mask & BRNF_PKT_TYPE) { skb->pkt_type = PACKET_OTHERHOST; nf_bridge->mask ^= BRNF_PKT_TYPE; } nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING; rt = bridge_parent_rtable(nf_bridge->physindev); if (!rt) { kfree_skb(skb); return 0; } skb_dst_set_noref(skb, &rt->dst); skb->dev = nf_bridge->physindev; nf_bridge_update_protocol(skb); nf_bridge_push_encap_header(skb); NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL, br_handle_frame_finish, 1); return 0; } /* Obtain the correct destination MAC address, while preserving the original * source MAC address. If we already know this address, we just copy it. If we * don't, we use the neighbour framework to find out. In both cases, we make * sure that br_handle_frame_finish() is called afterwards. */ static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb) { struct nf_bridge_info *nf_bridge = skb->nf_bridge; struct dst_entry *dst; skb->dev = bridge_parent(skb->dev); if (!skb->dev) goto free_skb; dst = skb_dst(skb); if (dst->hh) { neigh_hh_bridge(dst->hh, skb); skb->dev = nf_bridge->physindev; return br_handle_frame_finish(skb); } else if (dst->neighbour) { /* the neighbour function below overwrites the complete * MAC header, so we save the Ethernet source address and * protocol number. */ skb_copy_from_linear_data_offset(skb, -(ETH_HLEN-ETH_ALEN), skb->nf_bridge->data, ETH_HLEN-ETH_ALEN); /* tell br_dev_xmit to continue with forwarding */ nf_bridge->mask |= BRNF_BRIDGED_DNAT; return dst->neighbour->output(skb); } free_skb: kfree_skb(skb); return 0; } /* This requires some explaining. If DNAT has taken place, * we will need to fix up the destination Ethernet address. * * There are two cases to consider: * 1. The packet was DNAT'ed to a device in the same bridge * port group as it was received on. We can still bridge * the packet. * 2. The packet was DNAT'ed to a different device, either * a non-bridged device or another bridge port group. * The packet will need to be routed. * * The correct way of distinguishing between these two cases is to * call ip_route_input() and to look at skb->dst->dev, which is * changed to the destination device if ip_route_input() succeeds. * * Let's first consider the case that ip_route_input() succeeds: * * If the output device equals the logical bridge device the packet * came in on, we can consider this bridging. The corresponding MAC * address will be obtained in br_nf_pre_routing_finish_bridge. * Otherwise, the packet is considered to be routed and we just * change the destination MAC address so that the packet will * later be passed up to the IP stack to be routed. For a redirected * packet, ip_route_input() will give back the localhost as output device, * which differs from the bridge device. * * Let's now consider the case that ip_route_input() fails: * * This can be because the destination address is martian, in which case * the packet will be dropped. * If IP forwarding is disabled, ip_route_input() will fail, while * ip_route_output_key() can return success. The source * address for ip_route_output_key() is set to zero, so ip_route_output_key() * thinks we're handling a locally generated packet and won't care * if IP forwarding is enabled. If the output device equals the logical bridge * device, we proceed as if ip_route_input() succeeded. If it differs from the * logical bridge port or if ip_route_output_key() fails we drop the packet. */ static int br_nf_pre_routing_finish(struct sk_buff *skb) { struct net_device *dev = skb->dev; struct iphdr *iph = ip_hdr(skb); struct nf_bridge_info *nf_bridge = skb->nf_bridge; struct rtable *rt; int err; if (nf_bridge->mask & BRNF_PKT_TYPE) { skb->pkt_type = PACKET_OTHERHOST; nf_bridge->mask ^= BRNF_PKT_TYPE; } nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING; if (dnat_took_place(skb)) { if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) { struct in_device *in_dev = __in_dev_get_rcu(dev); /* If err equals -EHOSTUNREACH the error is due to a * martian destination or due to the fact that * forwarding is disabled. For most martian packets, * ip_route_output_key() will fail. It won't fail for 2 types of * martian destinations: loopback destinations and destination * 0.0.0.0. In both cases the packet will be dropped because the * destination is the loopback device and not the bridge. */ if (err != -EHOSTUNREACH || !in_dev || IN_DEV_FORWARD(in_dev)) goto free_skb; rt = ip_route_output(dev_net(dev), iph->daddr, 0, RT_TOS(iph->tos), 0); if (!IS_ERR(rt)) { /* - Bridged-and-DNAT'ed traffic doesn't * require ip_forwarding. */ if (rt->dst.dev == dev) { skb_dst_set(skb, &rt->dst); goto bridged_dnat; } ip_rt_put(rt); } free_skb: kfree_skb(skb); return 0; } else { if (skb_dst(skb)->dev == dev) { bridged_dnat: skb->dev = nf_bridge->physindev; nf_bridge_update_protocol(skb); nf_bridge_push_encap_header(skb); NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL, br_nf_pre_routing_finish_bridge, 1); return 0; } memcpy(eth_hdr(skb)->h_dest, dev->dev_addr, ETH_ALEN); skb->pkt_type = PACKET_HOST; } } else { rt = bridge_parent_rtable(nf_bridge->physindev); if (!rt) { kfree_skb(skb); return 0; } skb_dst_set_noref(skb, &rt->dst); } skb->dev = nf_bridge->physindev; nf_bridge_update_protocol(skb); nf_bridge_push_encap_header(skb); NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL, br_handle_frame_finish, 1); return 0; } /* Some common code for IPv4/IPv6 */ static struct net_device *setup_pre_routing(struct sk_buff *skb) { struct nf_bridge_info *nf_bridge = skb->nf_bridge; if (skb->pkt_type == PACKET_OTHERHOST) { skb->pkt_type = PACKET_HOST; nf_bridge->mask |= BRNF_PKT_TYPE; } nf_bridge->mask |= BRNF_NF_BRIDGE_PREROUTING; nf_bridge->physindev = skb->dev; skb->dev = bridge_parent(skb->dev); if (skb->protocol == htons(ETH_P_8021Q)) nf_bridge->mask |= BRNF_8021Q; else if (skb->protocol == htons(ETH_P_PPP_SES)) nf_bridge->mask |= BRNF_PPPoE; return skb->dev; } /* We only check the length. A bridge shouldn't do any hop-by-hop stuff anyway */ static int check_hbh_len(struct sk_buff *skb) { unsigned char *raw = (u8 *)(ipv6_hdr(skb) + 1); u32 pkt_len; const unsigned char *nh = skb_network_header(skb); int off = raw - nh; int len = (raw[1] + 1) << 3; if ((raw + len) - skb->data > skb_headlen(skb)) goto bad; off += 2; len -= 2; while (len > 0) { int optlen = nh[off + 1] + 2; switch (nh[off]) { case IPV6_TLV_PAD0: optlen = 1; break; case IPV6_TLV_PADN: break; case IPV6_TLV_JUMBO: if (nh[off + 1] != 4 || (off & 3) != 2) goto bad; pkt_len = ntohl(*(__be32 *) (nh + off + 2)); if (pkt_len <= IPV6_MAXPLEN || ipv6_hdr(skb)->payload_len) goto bad; if (pkt_len > skb->len - sizeof(struct ipv6hdr)) goto bad; if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr))) goto bad; nh = skb_network_header(skb); break; default: if (optlen > len) goto bad; break; } off += optlen; len -= optlen; } if (len == 0) return 0; bad: return -1; } /* Replicate the checks that IPv6 does on packet reception and pass the packet * to ip6tables, which doesn't support NAT, so things are fairly simple. */ static unsigned int br_nf_pre_routing_ipv6(unsigned int hook, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *)) { struct ipv6hdr *hdr; u32 pkt_len; if (skb->len < sizeof(struct ipv6hdr)) return NF_DROP; if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) return NF_DROP; hdr = ipv6_hdr(skb); if (hdr->version != 6) return NF_DROP; pkt_len = ntohs(hdr->payload_len); if (pkt_len || hdr->nexthdr != NEXTHDR_HOP) { if (pkt_len + sizeof(struct ipv6hdr) > skb->len) return NF_DROP; if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr))) return NF_DROP; } if (hdr->nexthdr == NEXTHDR_HOP && check_hbh_len(skb)) return NF_DROP; nf_bridge_put(skb->nf_bridge); if (!nf_bridge_alloc(skb)) return NF_DROP; if (!setup_pre_routing(skb)) return NF_DROP; skb->protocol = htons(ETH_P_IPV6); NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, skb->dev, NULL, br_nf_pre_routing_finish_ipv6); return NF_STOLEN; } /* Direct IPv6 traffic to br_nf_pre_routing_ipv6. * Replicate the checks that IPv4 does on packet reception. * Set skb->dev to the bridge device (i.e. parent of the * receiving device) to make netfilter happy, the REDIRECT * target in particular. Save the original destination IP * address to be able to detect DNAT afterwards. */ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *)) { struct net_bridge_port *p; struct net_bridge *br; __u32 len = nf_bridge_encap_header_len(skb); if (unlikely(!pskb_may_pull(skb, len))) return NF_DROP; p = br_port_get_rcu(in); if (p == NULL) return NF_DROP; br = p->br; if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb)) { if (!brnf_call_ip6tables && !br->nf_call_ip6tables) return NF_ACCEPT; nf_bridge_pull_encap_header_rcsum(skb); return br_nf_pre_routing_ipv6(hook, skb, in, out, okfn); } if (!brnf_call_iptables && !br->nf_call_iptables) return NF_ACCEPT; if (skb->protocol != htons(ETH_P_IP) && !IS_VLAN_IP(skb) && !IS_PPPOE_IP(skb)) return NF_ACCEPT; nf_bridge_pull_encap_header_rcsum(skb); if (br_parse_ip_options(skb)) return NF_DROP; nf_bridge_put(skb->nf_bridge); if (!nf_bridge_alloc(skb)) return NF_DROP; if (!setup_pre_routing(skb)) return NF_DROP; store_orig_dstaddr(skb); skb->protocol = htons(ETH_P_IP); NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, skb->dev, NULL, br_nf_pre_routing_finish); return NF_STOLEN; } /* PF_BRIDGE/LOCAL_IN ************************************************/ /* The packet is locally destined, which requires a real * dst_entry, so detach the fake one. On the way up, the * packet would pass through PRE_ROUTING again (which already * took place when the packet entered the bridge), but we * register an IPv4 PRE_ROUTING 'sabotage' hook that will * prevent this from happening. */ static unsigned int br_nf_local_in(unsigned int hook, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *)) { struct rtable *rt = skb_rtable(skb); if (rt && rt == bridge_parent_rtable(in)) skb_dst_drop(skb); return NF_ACCEPT; } /* PF_BRIDGE/FORWARD *************************************************/ static int br_nf_forward_finish(struct sk_buff *skb) { struct nf_bridge_info *nf_bridge = skb->nf_bridge; struct net_device *in; if (skb->protocol != htons(ETH_P_ARP) && !IS_VLAN_ARP(skb)) { in = nf_bridge->physindev; if (nf_bridge->mask & BRNF_PKT_TYPE) { skb->pkt_type = PACKET_OTHERHOST; nf_bridge->mask ^= BRNF_PKT_TYPE; } nf_bridge_update_protocol(skb); } else { in = *((struct net_device **)(skb->cb)); } nf_bridge_push_encap_header(skb); NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_FORWARD, skb, in, skb->dev, br_forward_finish, 1); return 0; } /* This is the 'purely bridged' case. For IP, we pass the packet to * netfilter with indev and outdev set to the bridge device, * but we are still able to filter on the 'real' indev/outdev * because of the physdev module. For ARP, indev and outdev are the * bridge ports. */ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *)) { struct nf_bridge_info *nf_bridge; struct net_device *parent; u_int8_t pf; if (!skb->nf_bridge) return NF_ACCEPT; /* Need exclusive nf_bridge_info since we might have multiple * different physoutdevs. */ if (!nf_bridge_unshare(skb)) return NF_DROP; parent = bridge_parent(out); if (!parent) return NF_DROP; if (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb) || IS_PPPOE_IP(skb)) pf = PF_INET; else if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb)) pf = PF_INET6; else return NF_ACCEPT; nf_bridge_pull_encap_header(skb); nf_bridge = skb->nf_bridge; if (skb->pkt_type == PACKET_OTHERHOST) { skb->pkt_type = PACKET_HOST; nf_bridge->mask |= BRNF_PKT_TYPE; } if (pf == PF_INET && br_parse_ip_options(skb)) return NF_DROP; /* The physdev module checks on this */ nf_bridge->mask |= BRNF_BRIDGED; nf_bridge->physoutdev = skb->dev; if (pf == PF_INET) skb->protocol = htons(ETH_P_IP); else skb->protocol = htons(ETH_P_IPV6); NF_HOOK(pf, NF_INET_FORWARD, skb, bridge_parent(in), parent, br_nf_forward_finish); return NF_STOLEN; } static unsigned int br_nf_forward_arp(unsigned int hook, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *)) { struct net_bridge_port *p; struct net_bridge *br; struct net_device **d = (struct net_device **)(skb->cb); p = br_port_get_rcu(out); if (p == NULL) return NF_ACCEPT; br = p->br; if (!brnf_call_arptables && !br->nf_call_arptables) return NF_ACCEPT; if (skb->protocol != htons(ETH_P_ARP)) { if (!IS_VLAN_ARP(skb)) return NF_ACCEPT; nf_bridge_pull_encap_header(skb); } if (arp_hdr(skb)->ar_pln != 4) { if (IS_VLAN_ARP(skb)) nf_bridge_push_encap_header(skb); return NF_ACCEPT; } *d = (struct net_device *)in; NF_HOOK(NFPROTO_ARP, NF_ARP_FORWARD, skb, (struct net_device *)in, (struct net_device *)out, br_nf_forward_finish); return NF_STOLEN; } #if defined(CONFIG_NF_CONNTRACK_IPV4) || defined(CONFIG_NF_CONNTRACK_IPV4_MODULE) static int br_nf_dev_queue_xmit(struct sk_buff *skb) { int ret; if (skb->nfct != NULL && skb->protocol == htons(ETH_P_IP) && skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu && !skb_is_gso(skb)) { if (br_parse_ip_options(skb)) /* Drop invalid packet */ return NF_DROP; ret = ip_fragment(skb, br_dev_queue_push_xmit); } else ret = br_dev_queue_push_xmit(skb); return ret; } #else static int br_nf_dev_queue_xmit(struct sk_buff *skb) { return br_dev_queue_push_xmit(skb); } #endif /* PF_BRIDGE/POST_ROUTING ********************************************/ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *)) { struct nf_bridge_info *nf_bridge = skb->nf_bridge; struct net_device *realoutdev = bridge_parent(skb->dev); u_int8_t pf; if (!nf_bridge || !(nf_bridge->mask & BRNF_BRIDGED)) return NF_ACCEPT; if (!realoutdev) return NF_DROP; if (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb) || IS_PPPOE_IP(skb)) pf = PF_INET; else if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb)) pf = PF_INET6; else return NF_ACCEPT; /* We assume any code from br_dev_queue_push_xmit onwards doesn't care * about the value of skb->pkt_type. */ if (skb->pkt_type == PACKET_OTHERHOST) { skb->pkt_type = PACKET_HOST; nf_bridge->mask |= BRNF_PKT_TYPE; } nf_bridge_pull_encap_header(skb); nf_bridge_save_header(skb); if (pf == PF_INET) skb->protocol = htons(ETH_P_IP); else skb->protocol = htons(ETH_P_IPV6); NF_HOOK(pf, NF_INET_POST_ROUTING, skb, NULL, realoutdev, br_nf_dev_queue_xmit); return NF_STOLEN; } /* IP/SABOTAGE *****************************************************/ /* Don't hand locally destined packets to PF_INET(6)/PRE_ROUTING * for the second time. */ static unsigned int ip_sabotage_in(unsigned int hook, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *)) { if (skb->nf_bridge && !(skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)) { return NF_STOP; } return NF_ACCEPT; } /* For br_nf_post_routing, we need (prio = NF_BR_PRI_LAST), because * br_dev_queue_push_xmit is called afterwards */ static struct nf_hook_ops br_nf_ops[] __read_mostly = { { .hook = br_nf_pre_routing, .owner = THIS_MODULE, .pf = PF_BRIDGE, .hooknum = NF_BR_PRE_ROUTING, .priority = NF_BR_PRI_BRNF, }, { .hook = br_nf_local_in, .owner = THIS_MODULE, .pf = PF_BRIDGE, .hooknum = NF_BR_LOCAL_IN, .priority = NF_BR_PRI_BRNF, }, { .hook = br_nf_forward_ip, .owner = THIS_MODULE, .pf = PF_BRIDGE, .hooknum = NF_BR_FORWARD, .priority = NF_BR_PRI_BRNF - 1, }, { .hook = br_nf_forward_arp, .owner = THIS_MODULE, .pf = PF_BRIDGE, .hooknum = NF_BR_FORWARD, .priority = NF_BR_PRI_BRNF, }, { .hook = br_nf_post_routing, .owner = THIS_MODULE, .pf = PF_BRIDGE, .hooknum = NF_BR_POST_ROUTING, .priority = NF_BR_PRI_LAST, }, { .hook = ip_sabotage_in, .owner = THIS_MODULE, .pf = PF_INET, .hooknum = NF_INET_PRE_ROUTING, .priority = NF_IP_PRI_FIRST, }, { .hook = ip_sabotage_in, .owner = THIS_MODULE, .pf = PF_INET6, .hooknum = NF_INET_PRE_ROUTING, .priority = NF_IP6_PRI_FIRST, }, }; #ifdef CONFIG_SYSCTL static int brnf_sysctl_call_tables(ctl_table * ctl, int write, void __user * buffer, size_t * lenp, loff_t * ppos) { int ret; ret = proc_dointvec(ctl, write, buffer, lenp, ppos); if (write && *(int *)(ctl->data)) *(int *)(ctl->data) = 1; return ret; } static ctl_table brnf_table[] = { { .procname = "bridge-nf-call-arptables", .data = &brnf_call_arptables, .maxlen = sizeof(int), .mode = 0644, .proc_handler = brnf_sysctl_call_tables, }, { .procname = "bridge-nf-call-iptables", .data = &brnf_call_iptables, .maxlen = sizeof(int), .mode = 0644, .proc_handler = brnf_sysctl_call_tables, }, { .procname = "bridge-nf-call-ip6tables", .data = &brnf_call_ip6tables, .maxlen = sizeof(int), .mode = 0644, .proc_handler = brnf_sysctl_call_tables, }, { .procname = "bridge-nf-filter-vlan-tagged", .data = &brnf_filter_vlan_tagged, .maxlen = sizeof(int), .mode = 0644, .proc_handler = brnf_sysctl_call_tables, }, { .procname = "bridge-nf-filter-pppoe-tagged", .data = &brnf_filter_pppoe_tagged, .maxlen = sizeof(int), .mode = 0644, .proc_handler = brnf_sysctl_call_tables, }, { } }; static struct ctl_path brnf_path[] = { { .procname = "net", }, { .procname = "bridge", }, { } }; #endif int __init br_netfilter_init(void) { int ret; ret = dst_entries_init(&fake_dst_ops); if (ret < 0) return ret; ret = nf_register_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops)); if (ret < 0) { dst_entries_destroy(&fake_dst_ops); return ret; } #ifdef CONFIG_SYSCTL brnf_sysctl_header = register_sysctl_paths(brnf_path, brnf_table); if (brnf_sysctl_header == NULL) { printk(KERN_WARNING "br_netfilter: can't register to sysctl.\n"); nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops)); dst_entries_destroy(&fake_dst_ops); return -ENOMEM; } #endif printk(KERN_NOTICE "Bridge firewalling registered\n"); return 0; } void br_netfilter_fini(void) { nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops)); #ifdef CONFIG_SYSCTL unregister_sysctl_table(brnf_sysctl_header); #endif dst_entries_destroy(&fake_dst_ops); }
gpl-2.0
lazybios/linux
drivers/acpi/acpica/exregion.c
126
15317
/****************************************************************************** * * Module Name: exregion - ACPI default op_region (address space) handlers * *****************************************************************************/ /* * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acinterp.h" #define _COMPONENT ACPI_EXECUTER ACPI_MODULE_NAME("exregion") /******************************************************************************* * * FUNCTION: acpi_ex_system_memory_space_handler * * PARAMETERS: function - Read or Write operation * address - Where in the space to read or write * bit_width - Field width in bits (8, 16, or 32) * value - Pointer to in or out value * handler_context - Pointer to Handler's context * region_context - Pointer to context specific to the * accessed region * * RETURN: Status * * DESCRIPTION: Handler for the System Memory address space (Op Region) * ******************************************************************************/ acpi_status acpi_ex_system_memory_space_handler(u32 function, acpi_physical_address address, u32 bit_width, u64 *value, void *handler_context, void *region_context) { acpi_status status = AE_OK; void *logical_addr_ptr = NULL; struct acpi_mem_space_context *mem_info = region_context; u32 length; acpi_size map_length; acpi_size page_boundary_map_length; #ifdef ACPI_MISALIGNMENT_NOT_SUPPORTED u32 remainder; #endif ACPI_FUNCTION_TRACE(ex_system_memory_space_handler); /* Validate and translate the bit width */ switch (bit_width) { case 8: length = 1; break; case 16: length = 2; break; case 32: length = 4; break; case 64: length = 8; break; default: ACPI_ERROR((AE_INFO, "Invalid SystemMemory width %u", bit_width)); return_ACPI_STATUS(AE_AML_OPERAND_VALUE); } #ifdef ACPI_MISALIGNMENT_NOT_SUPPORTED /* * Hardware does not support non-aligned data transfers, we must verify * the request. */ (void)acpi_ut_short_divide((u64) address, length, NULL, &remainder); if (remainder != 0) { return_ACPI_STATUS(AE_AML_ALIGNMENT); } #endif /* * Does the request fit into the cached memory mapping? * Is 1) Address below the current mapping? OR * 2) Address beyond the current mapping? */ if ((address < mem_info->mapped_physical_address) || (((u64) address + length) > ((u64) mem_info->mapped_physical_address + mem_info->mapped_length))) { /* * The request cannot be resolved by the current memory mapping; * Delete the existing mapping and create a new one. */ if (mem_info->mapped_length) { /* Valid mapping, delete it */ acpi_os_unmap_memory(mem_info->mapped_logical_address, mem_info->mapped_length); } /* * Attempt to map from the requested address to the end of the region. * However, we will never map more than one page, nor will we cross * a page boundary. */ map_length = (acpi_size) ((mem_info->address + mem_info->length) - address); /* * If mapping the entire remaining portion of the region will cross * a page boundary, just map up to the page boundary, do not cross. * On some systems, crossing a page boundary while mapping regions * can cause warnings if the pages have different attributes * due to resource management */ page_boundary_map_length = ACPI_ROUND_UP(address, ACPI_DEFAULT_PAGE_SIZE) - address; if (!page_boundary_map_length) { page_boundary_map_length = ACPI_DEFAULT_PAGE_SIZE; } if (map_length > page_boundary_map_length) { map_length = page_boundary_map_length; } /* Create a new mapping starting at the address given */ mem_info->mapped_logical_address = acpi_os_map_memory((acpi_physical_address) address, map_length); if (!mem_info->mapped_logical_address) { ACPI_ERROR((AE_INFO, "Could not map memory at 0x%8.8X%8.8X, size %u", ACPI_FORMAT_NATIVE_UINT(address), (u32) map_length)); mem_info->mapped_length = 0; return_ACPI_STATUS(AE_NO_MEMORY); } /* Save the physical address and mapping size */ mem_info->mapped_physical_address = address; mem_info->mapped_length = map_length; } /* * Generate a logical pointer corresponding to the address we want to * access */ logical_addr_ptr = mem_info->mapped_logical_address + ((u64) address - (u64) mem_info->mapped_physical_address); ACPI_DEBUG_PRINT((ACPI_DB_INFO, "System-Memory (width %u) R/W %u Address=%8.8X%8.8X\n", bit_width, function, ACPI_FORMAT_NATIVE_UINT(address))); /* * Perform the memory read or write * * Note: For machines that do not support non-aligned transfers, the target * address was checked for alignment above. We do not attempt to break the * transfer up into smaller (byte-size) chunks because the AML specifically * asked for a transfer width that the hardware may require. */ switch (function) { case ACPI_READ: *value = 0; switch (bit_width) { case 8: *value = (u64) ACPI_GET8(logical_addr_ptr); break; case 16: *value = (u64) ACPI_GET16(logical_addr_ptr); break; case 32: *value = (u64) ACPI_GET32(logical_addr_ptr); break; case 64: *value = (u64) ACPI_GET64(logical_addr_ptr); break; default: /* bit_width was already validated */ break; } break; case ACPI_WRITE: switch (bit_width) { case 8: ACPI_SET8(logical_addr_ptr) = (u8) * value; break; case 16: ACPI_SET16(logical_addr_ptr) = (u16) * value; break; case 32: ACPI_SET32(logical_addr_ptr) = (u32) * value; break; case 64: ACPI_SET64(logical_addr_ptr) = (u64) * value; break; default: /* bit_width was already validated */ break; } break; default: status = AE_BAD_PARAMETER; break; } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ex_system_io_space_handler * * PARAMETERS: function - Read or Write operation * address - Where in the space to read or write * bit_width - Field width in bits (8, 16, or 32) * value - Pointer to in or out value * handler_context - Pointer to Handler's context * region_context - Pointer to context specific to the * accessed region * * RETURN: Status * * DESCRIPTION: Handler for the System IO address space (Op Region) * ******************************************************************************/ acpi_status acpi_ex_system_io_space_handler(u32 function, acpi_physical_address address, u32 bit_width, u64 *value, void *handler_context, void *region_context) { acpi_status status = AE_OK; u32 value32; ACPI_FUNCTION_TRACE(ex_system_io_space_handler); ACPI_DEBUG_PRINT((ACPI_DB_INFO, "System-IO (width %u) R/W %u Address=%8.8X%8.8X\n", bit_width, function, ACPI_FORMAT_NATIVE_UINT(address))); /* Decode the function parameter */ switch (function) { case ACPI_READ: status = acpi_hw_read_port((acpi_io_address) address, &value32, bit_width); *value = value32; break; case ACPI_WRITE: status = acpi_hw_write_port((acpi_io_address) address, (u32) * value, bit_width); break; default: status = AE_BAD_PARAMETER; break; } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ex_pci_config_space_handler * * PARAMETERS: function - Read or Write operation * address - Where in the space to read or write * bit_width - Field width in bits (8, 16, or 32) * value - Pointer to in or out value * handler_context - Pointer to Handler's context * region_context - Pointer to context specific to the * accessed region * * RETURN: Status * * DESCRIPTION: Handler for the PCI Config address space (Op Region) * ******************************************************************************/ acpi_status acpi_ex_pci_config_space_handler(u32 function, acpi_physical_address address, u32 bit_width, u64 *value, void *handler_context, void *region_context) { acpi_status status = AE_OK; struct acpi_pci_id *pci_id; u16 pci_register; ACPI_FUNCTION_TRACE(ex_pci_config_space_handler); /* * The arguments to acpi_os(Read|Write)pci_configuration are: * * pci_segment is the PCI bus segment range 0-31 * pci_bus is the PCI bus number range 0-255 * pci_device is the PCI device number range 0-31 * pci_function is the PCI device function number * pci_register is the Config space register range 0-255 bytes * * value - input value for write, output address for read * */ pci_id = (struct acpi_pci_id *)region_context; pci_register = (u16) (u32) address; ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Pci-Config %u (%u) Seg(%04x) Bus(%04x) Dev(%04x) Func(%04x) Reg(%04x)\n", function, bit_width, pci_id->segment, pci_id->bus, pci_id->device, pci_id->function, pci_register)); switch (function) { case ACPI_READ: status = acpi_os_read_pci_configuration(pci_id, pci_register, value, bit_width); break; case ACPI_WRITE: status = acpi_os_write_pci_configuration(pci_id, pci_register, *value, bit_width); break; default: status = AE_BAD_PARAMETER; break; } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ex_cmos_space_handler * * PARAMETERS: function - Read or Write operation * address - Where in the space to read or write * bit_width - Field width in bits (8, 16, or 32) * value - Pointer to in or out value * handler_context - Pointer to Handler's context * region_context - Pointer to context specific to the * accessed region * * RETURN: Status * * DESCRIPTION: Handler for the CMOS address space (Op Region) * ******************************************************************************/ acpi_status acpi_ex_cmos_space_handler(u32 function, acpi_physical_address address, u32 bit_width, u64 *value, void *handler_context, void *region_context) { acpi_status status = AE_OK; ACPI_FUNCTION_TRACE(ex_cmos_space_handler); return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ex_pci_bar_space_handler * * PARAMETERS: function - Read or Write operation * address - Where in the space to read or write * bit_width - Field width in bits (8, 16, or 32) * value - Pointer to in or out value * handler_context - Pointer to Handler's context * region_context - Pointer to context specific to the * accessed region * * RETURN: Status * * DESCRIPTION: Handler for the PCI bar_target address space (Op Region) * ******************************************************************************/ acpi_status acpi_ex_pci_bar_space_handler(u32 function, acpi_physical_address address, u32 bit_width, u64 *value, void *handler_context, void *region_context) { acpi_status status = AE_OK; ACPI_FUNCTION_TRACE(ex_pci_bar_space_handler); return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ex_data_table_space_handler * * PARAMETERS: function - Read or Write operation * address - Where in the space to read or write * bit_width - Field width in bits (8, 16, or 32) * value - Pointer to in or out value * handler_context - Pointer to Handler's context * region_context - Pointer to context specific to the * accessed region * * RETURN: Status * * DESCRIPTION: Handler for the Data Table address space (Op Region) * ******************************************************************************/ acpi_status acpi_ex_data_table_space_handler(u32 function, acpi_physical_address address, u32 bit_width, u64 *value, void *handler_context, void *region_context) { ACPI_FUNCTION_TRACE(ex_data_table_space_handler); /* * Perform the memory read or write. The bit_width was already * validated. */ switch (function) { case ACPI_READ: ACPI_MEMCPY(ACPI_CAST_PTR(char, value), ACPI_PHYSADDR_TO_PTR(address), ACPI_DIV_8(bit_width)); break; case ACPI_WRITE: ACPI_MEMCPY(ACPI_PHYSADDR_TO_PTR(address), ACPI_CAST_PTR(char, value), ACPI_DIV_8(bit_width)); break; default: return_ACPI_STATUS(AE_BAD_PARAMETER); } return_ACPI_STATUS(AE_OK); }
gpl-2.0
Satius/pia-linux-kernel
arch/arm/mach-omap2/mcbsp.c
126
4570
/* * linux/arch/arm/mach-omap2/mcbsp.c * * Copyright (C) 2008 Instituto Nokia de Tecnologia * Contact: Eduardo Valentin <eduardo.valentin@indt.org.br> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Multichannel mode not supported. */ #include <linux/module.h> #include <linux/init.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/io.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <mach/irqs.h> #include <plat/dma.h> #include <plat/cpu.h> #include <plat/mcbsp.h> #include <plat/omap_device.h> #include <linux/pm_runtime.h> #include "control.h" /* * FIXME: Find a mechanism to enable/disable runtime the McBSP ICLK autoidle. * Sidetone needs non-gated ICLK and sidetone autoidle is broken. */ #include "cm2xxx_3xxx.h" #include "cm-regbits-34xx.h" /* McBSP internal signal muxing function */ static int omap2_mcbsp1_mux_rx_clk(struct device *dev, const char *signal, const char *src) { u32 v; v = omap_ctrl_readl(OMAP2_CONTROL_DEVCONF0); if (!strcmp(signal, "clkr")) { if (!strcmp(src, "clkr")) v &= ~OMAP2_MCBSP1_CLKR_MASK; else if (!strcmp(src, "clkx")) v |= OMAP2_MCBSP1_CLKR_MASK; else return -EINVAL; } else if (!strcmp(signal, "fsr")) { if (!strcmp(src, "fsr")) v &= ~OMAP2_MCBSP1_FSR_MASK; else if (!strcmp(src, "fsx")) v |= OMAP2_MCBSP1_FSR_MASK; else return -EINVAL; } else { return -EINVAL; } omap_ctrl_writel(v, OMAP2_CONTROL_DEVCONF0); return 0; } /* McBSP CLKS source switching function */ static int omap2_mcbsp_set_clk_src(struct device *dev, struct clk *clk, const char *src) { struct clk *fck_src; char *fck_src_name; int r; if (!strcmp(src, "clks_ext")) fck_src_name = "pad_fck"; else if (!strcmp(src, "clks_fclk")) fck_src_name = "prcm_fck"; else return -EINVAL; fck_src = clk_get(dev, fck_src_name); if (IS_ERR_OR_NULL(fck_src)) { pr_err("omap-mcbsp: %s: could not clk_get() %s\n", "clks", fck_src_name); return -EINVAL; } pm_runtime_put_sync(dev); r = clk_set_parent(clk, fck_src); if (IS_ERR_VALUE(r)) { pr_err("omap-mcbsp: %s: could not clk_set_parent() to %s\n", "clks", fck_src_name); clk_put(fck_src); return -EINVAL; } pm_runtime_get_sync(dev); clk_put(fck_src); return 0; } static int omap3_enable_st_clock(unsigned int id, bool enable) { unsigned int w; /* * Sidetone uses McBSP ICLK - which must not idle when sidetones * are enabled or sidetones start sounding ugly. */ w = omap2_cm_read_mod_reg(OMAP3430_PER_MOD, CM_AUTOIDLE); if (enable) w &= ~(1 << (id - 2)); else w |= 1 << (id - 2); omap2_cm_write_mod_reg(w, OMAP3430_PER_MOD, CM_AUTOIDLE); return 0; } static int omap_init_mcbsp(struct omap_hwmod *oh, void *unused) { int id, count = 1; char *name = "omap-mcbsp"; struct omap_hwmod *oh_device[2]; struct omap_mcbsp_platform_data *pdata = NULL; struct platform_device *pdev; sscanf(oh->name, "mcbsp%d", &id); pdata = kzalloc(sizeof(struct omap_mcbsp_platform_data), GFP_KERNEL); if (!pdata) { pr_err("%s: No memory for mcbsp\n", __func__); return -ENOMEM; } pdata->reg_step = 4; if (oh->class->rev < MCBSP_CONFIG_TYPE2) { pdata->reg_size = 2; } else { pdata->reg_size = 4; pdata->has_ccr = true; } pdata->set_clk_src = omap2_mcbsp_set_clk_src; if (id == 1) pdata->mux_signal = omap2_mcbsp1_mux_rx_clk; if (oh->class->rev == MCBSP_CONFIG_TYPE3) { if (id == 2) /* The FIFO has 1024 + 256 locations */ pdata->buffer_size = 0x500; else /* The FIFO has 128 locations */ pdata->buffer_size = 0x80; } if (oh->class->rev >= MCBSP_CONFIG_TYPE3) pdata->has_wakeup = true; oh_device[0] = oh; if (oh->dev_attr) { oh_device[1] = omap_hwmod_lookup(( (struct omap_mcbsp_dev_attr *)(oh->dev_attr))->sidetone); pdata->enable_st_clock = omap3_enable_st_clock; count++; } pdev = omap_device_build_ss(name, id, oh_device, count, pdata, sizeof(*pdata), NULL, 0, false); kfree(pdata); if (IS_ERR(pdev)) { pr_err("%s: Can't build omap_device for %s:%s.\n", __func__, name, oh->name); return PTR_ERR(pdev); } omap_mcbsp_count++; return 0; } static int __init omap2_mcbsp_init(void) { omap_hwmod_for_each_by_class("mcbsp", omap_init_mcbsp, NULL); mcbsp_ptr = kzalloc(omap_mcbsp_count * sizeof(struct omap_mcbsp *), GFP_KERNEL); if (!mcbsp_ptr) return -ENOMEM; return omap_mcbsp_init(); } arch_initcall(omap2_mcbsp_init);
gpl-2.0
NAM-IL/LINUX-rpi-4.2.y
drivers/phy/phy-xgene.c
894
61258
/* * AppliedMicro X-Gene Multi-purpose PHY driver * * Copyright (c) 2014, Applied Micro Circuits Corporation * Author: Loc Ho <lho@apm.com> * Tuan Phan <tphan@apm.com> * Suman Tripathi <stripathi@apm.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * * The APM X-Gene PHY consists of two PLL clock macro's (CMU) and lanes. * The first PLL clock macro is used for internal reference clock. The second * PLL clock macro is used to generate the clock for the PHY. This driver * configures the first PLL CMU, the second PLL CMU, and programs the PHY to * operate according to the mode of operation. The first PLL CMU is only * required if internal clock is enabled. * * Logical Layer Out Of HW module units: * * ----------------- * | Internal | |------| * | Ref PLL CMU |----| | ------------- --------- * ------------ ---- | MUX |-----|PHY PLL CMU|----| Serdes| * | | | | --------- * External Clock ------| | ------------- * |------| * * The Ref PLL CMU CSR (Configuration System Registers) is accessed * indirectly from the SDS offset at 0x2000. It is only required for * internal reference clock. * The PHY PLL CMU CSR is accessed indirectly from the SDS offset at 0x0000. * The Serdes CSR is accessed indirectly from the SDS offset at 0x0400. * * The Ref PLL CMU can be located within the same PHY IP or outside the PHY IP * due to shared Ref PLL CMU. For PHY with Ref PLL CMU shared with another IP, * it is located outside the PHY IP. This is the case for the PHY located * at 0x1f23a000 (SATA Port 4/5). For such PHY, another resource is required * to located the SDS/Ref PLL CMU module and its clock for that IP enabled. * * Currently, this driver only supports Gen3 SATA mode with external clock. */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/delay.h> #include <linux/phy/phy.h> #include <linux/clk.h> /* Max 2 lanes per a PHY unit */ #define MAX_LANE 2 /* Register offset inside the PHY */ #define SERDES_PLL_INDIRECT_OFFSET 0x0000 #define SERDES_PLL_REF_INDIRECT_OFFSET 0x2000 #define SERDES_INDIRECT_OFFSET 0x0400 #define SERDES_LANE_STRIDE 0x0200 /* Some default Serdes parameters */ #define DEFAULT_SATA_TXBOOST_GAIN { 0x1e, 0x1e, 0x1e } #define DEFAULT_SATA_TXEYEDIRECTION { 0x0, 0x0, 0x0 } #define DEFAULT_SATA_TXEYETUNING { 0xa, 0xa, 0xa } #define DEFAULT_SATA_SPD_SEL { 0x1, 0x3, 0x7 } #define DEFAULT_SATA_TXAMP { 0x8, 0x8, 0x8 } #define DEFAULT_SATA_TXCN1 { 0x2, 0x2, 0x2 } #define DEFAULT_SATA_TXCN2 { 0x0, 0x0, 0x0 } #define DEFAULT_SATA_TXCP1 { 0xa, 0xa, 0xa } #define SATA_SPD_SEL_GEN3 0x7 #define SATA_SPD_SEL_GEN2 0x3 #define SATA_SPD_SEL_GEN1 0x1 #define SSC_DISABLE 0 #define SSC_ENABLE 1 #define FBDIV_VAL_50M 0x77 #define REFDIV_VAL_50M 0x1 #define FBDIV_VAL_100M 0x3B #define REFDIV_VAL_100M 0x0 /* SATA Clock/Reset CSR */ #define SATACLKENREG 0x00000000 #define SATA0_CORE_CLKEN 0x00000002 #define SATA1_CORE_CLKEN 0x00000004 #define SATASRESETREG 0x00000004 #define SATA_MEM_RESET_MASK 0x00000020 #define SATA_MEM_RESET_RD(src) (((src) & 0x00000020) >> 5) #define SATA_SDS_RESET_MASK 0x00000004 #define SATA_CSR_RESET_MASK 0x00000001 #define SATA_CORE_RESET_MASK 0x00000002 #define SATA_PMCLK_RESET_MASK 0x00000010 #define SATA_PCLK_RESET_MASK 0x00000008 /* SDS CSR used for PHY Indirect access */ #define SATA_ENET_SDS_PCS_CTL0 0x00000000 #define REGSPEC_CFG_I_TX_WORDMODE0_SET(dst, src) \ (((dst) & ~0x00070000) | (((u32) (src) << 16) & 0x00070000)) #define REGSPEC_CFG_I_RX_WORDMODE0_SET(dst, src) \ (((dst) & ~0x00e00000) | (((u32) (src) << 21) & 0x00e00000)) #define SATA_ENET_SDS_CTL0 0x0000000c #define REGSPEC_CFG_I_CUSTOMER_PIN_MODE0_SET(dst, src) \ (((dst) & ~0x00007fff) | (((u32) (src)) & 0x00007fff)) #define SATA_ENET_SDS_CTL1 0x00000010 #define CFG_I_SPD_SEL_CDR_OVR1_SET(dst, src) \ (((dst) & ~0x0000000f) | (((u32) (src)) & 0x0000000f)) #define SATA_ENET_SDS_RST_CTL 0x00000024 #define SATA_ENET_SDS_IND_CMD_REG 0x0000003c #define CFG_IND_WR_CMD_MASK 0x00000001 #define CFG_IND_RD_CMD_MASK 0x00000002 #define CFG_IND_CMD_DONE_MASK 0x00000004 #define CFG_IND_ADDR_SET(dst, src) \ (((dst) & ~0x003ffff0) | (((u32) (src) << 4) & 0x003ffff0)) #define SATA_ENET_SDS_IND_RDATA_REG 0x00000040 #define SATA_ENET_SDS_IND_WDATA_REG 0x00000044 #define SATA_ENET_CLK_MACRO_REG 0x0000004c #define I_RESET_B_SET(dst, src) \ (((dst) & ~0x00000001) | (((u32) (src)) & 0x00000001)) #define I_PLL_FBDIV_SET(dst, src) \ (((dst) & ~0x001ff000) | (((u32) (src) << 12) & 0x001ff000)) #define I_CUSTOMEROV_SET(dst, src) \ (((dst) & ~0x00000f80) | (((u32) (src) << 7) & 0x00000f80)) #define O_PLL_LOCK_RD(src) (((src) & 0x40000000) >> 30) #define O_PLL_READY_RD(src) (((src) & 0x80000000) >> 31) /* PLL Clock Macro Unit (CMU) CSR accessing from SDS indirectly */ #define CMU_REG0 0x00000 #define CMU_REG0_PLL_REF_SEL_MASK 0x00002000 #define CMU_REG0_PLL_REF_SEL_SET(dst, src) \ (((dst) & ~0x00002000) | (((u32) (src) << 13) & 0x00002000)) #define CMU_REG0_PDOWN_MASK 0x00004000 #define CMU_REG0_CAL_COUNT_RESOL_SET(dst, src) \ (((dst) & ~0x000000e0) | (((u32) (src) << 5) & 0x000000e0)) #define CMU_REG1 0x00002 #define CMU_REG1_PLL_CP_SET(dst, src) \ (((dst) & ~0x00003c00) | (((u32) (src) << 10) & 0x00003c00)) #define CMU_REG1_PLL_MANUALCAL_SET(dst, src) \ (((dst) & ~0x00000008) | (((u32) (src) << 3) & 0x00000008)) #define CMU_REG1_PLL_CP_SEL_SET(dst, src) \ (((dst) & ~0x000003e0) | (((u32) (src) << 5) & 0x000003e0)) #define CMU_REG1_REFCLK_CMOS_SEL_MASK 0x00000001 #define CMU_REG1_REFCLK_CMOS_SEL_SET(dst, src) \ (((dst) & ~0x00000001) | (((u32) (src) << 0) & 0x00000001)) #define CMU_REG2 0x00004 #define CMU_REG2_PLL_REFDIV_SET(dst, src) \ (((dst) & ~0x0000c000) | (((u32) (src) << 14) & 0x0000c000)) #define CMU_REG2_PLL_LFRES_SET(dst, src) \ (((dst) & ~0x0000001e) | (((u32) (src) << 1) & 0x0000001e)) #define CMU_REG2_PLL_FBDIV_SET(dst, src) \ (((dst) & ~0x00003fe0) | (((u32) (src) << 5) & 0x00003fe0)) #define CMU_REG3 0x00006 #define CMU_REG3_VCOVARSEL_SET(dst, src) \ (((dst) & ~0x0000000f) | (((u32) (src) << 0) & 0x0000000f)) #define CMU_REG3_VCO_MOMSEL_INIT_SET(dst, src) \ (((dst) & ~0x000003f0) | (((u32) (src) << 4) & 0x000003f0)) #define CMU_REG3_VCO_MANMOMSEL_SET(dst, src) \ (((dst) & ~0x0000fc00) | (((u32) (src) << 10) & 0x0000fc00)) #define CMU_REG4 0x00008 #define CMU_REG5 0x0000a #define CMU_REG5_PLL_LFSMCAP_SET(dst, src) \ (((dst) & ~0x0000c000) | (((u32) (src) << 14) & 0x0000c000)) #define CMU_REG5_PLL_LOCK_RESOLUTION_SET(dst, src) \ (((dst) & ~0x0000000e) | (((u32) (src) << 1) & 0x0000000e)) #define CMU_REG5_PLL_LFCAP_SET(dst, src) \ (((dst) & ~0x00003000) | (((u32) (src) << 12) & 0x00003000)) #define CMU_REG5_PLL_RESETB_MASK 0x00000001 #define CMU_REG6 0x0000c #define CMU_REG6_PLL_VREGTRIM_SET(dst, src) \ (((dst) & ~0x00000600) | (((u32) (src) << 9) & 0x00000600)) #define CMU_REG6_MAN_PVT_CAL_SET(dst, src) \ (((dst) & ~0x00000004) | (((u32) (src) << 2) & 0x00000004)) #define CMU_REG7 0x0000e #define CMU_REG7_PLL_CALIB_DONE_RD(src) ((0x00004000 & (u32) (src)) >> 14) #define CMU_REG7_VCO_CAL_FAIL_RD(src) ((0x00000c00 & (u32) (src)) >> 10) #define CMU_REG8 0x00010 #define CMU_REG9 0x00012 #define CMU_REG9_WORD_LEN_8BIT 0x000 #define CMU_REG9_WORD_LEN_10BIT 0x001 #define CMU_REG9_WORD_LEN_16BIT 0x002 #define CMU_REG9_WORD_LEN_20BIT 0x003 #define CMU_REG9_WORD_LEN_32BIT 0x004 #define CMU_REG9_WORD_LEN_40BIT 0x005 #define CMU_REG9_WORD_LEN_64BIT 0x006 #define CMU_REG9_WORD_LEN_66BIT 0x007 #define CMU_REG9_TX_WORD_MODE_CH1_SET(dst, src) \ (((dst) & ~0x00000380) | (((u32) (src) << 7) & 0x00000380)) #define CMU_REG9_TX_WORD_MODE_CH0_SET(dst, src) \ (((dst) & ~0x00000070) | (((u32) (src) << 4) & 0x00000070)) #define CMU_REG9_PLL_POST_DIVBY2_SET(dst, src) \ (((dst) & ~0x00000008) | (((u32) (src) << 3) & 0x00000008)) #define CMU_REG9_VBG_BYPASSB_SET(dst, src) \ (((dst) & ~0x00000004) | (((u32) (src) << 2) & 0x00000004)) #define CMU_REG9_IGEN_BYPASS_SET(dst, src) \ (((dst) & ~0x00000002) | (((u32) (src) << 1) & 0x00000002)) #define CMU_REG10 0x00014 #define CMU_REG10_VREG_REFSEL_SET(dst, src) \ (((dst) & ~0x00000001) | (((u32) (src) << 0) & 0x00000001)) #define CMU_REG11 0x00016 #define CMU_REG12 0x00018 #define CMU_REG12_STATE_DELAY9_SET(dst, src) \ (((dst) & ~0x000000f0) | (((u32) (src) << 4) & 0x000000f0)) #define CMU_REG13 0x0001a #define CMU_REG14 0x0001c #define CMU_REG15 0x0001e #define CMU_REG16 0x00020 #define CMU_REG16_PVT_DN_MAN_ENA_MASK 0x00000001 #define CMU_REG16_PVT_UP_MAN_ENA_MASK 0x00000002 #define CMU_REG16_VCOCAL_WAIT_BTW_CODE_SET(dst, src) \ (((dst) & ~0x0000001c) | (((u32) (src) << 2) & 0x0000001c)) #define CMU_REG16_CALIBRATION_DONE_OVERRIDE_SET(dst, src) \ (((dst) & ~0x00000040) | (((u32) (src) << 6) & 0x00000040)) #define CMU_REG16_BYPASS_PLL_LOCK_SET(dst, src) \ (((dst) & ~0x00000020) | (((u32) (src) << 5) & 0x00000020)) #define CMU_REG17 0x00022 #define CMU_REG17_PVT_CODE_R2A_SET(dst, src) \ (((dst) & ~0x00007f00) | (((u32) (src) << 8) & 0x00007f00)) #define CMU_REG17_RESERVED_7_SET(dst, src) \ (((dst) & ~0x000000e0) | (((u32) (src) << 5) & 0x000000e0)) #define CMU_REG17_PVT_TERM_MAN_ENA_MASK 0x00008000 #define CMU_REG18 0x00024 #define CMU_REG19 0x00026 #define CMU_REG20 0x00028 #define CMU_REG21 0x0002a #define CMU_REG22 0x0002c #define CMU_REG23 0x0002e #define CMU_REG24 0x00030 #define CMU_REG25 0x00032 #define CMU_REG26 0x00034 #define CMU_REG26_FORCE_PLL_LOCK_SET(dst, src) \ (((dst) & ~0x00000001) | (((u32) (src) << 0) & 0x00000001)) #define CMU_REG27 0x00036 #define CMU_REG28 0x00038 #define CMU_REG29 0x0003a #define CMU_REG30 0x0003c #define CMU_REG30_LOCK_COUNT_SET(dst, src) \ (((dst) & ~0x00000006) | (((u32) (src) << 1) & 0x00000006)) #define CMU_REG30_PCIE_MODE_SET(dst, src) \ (((dst) & ~0x00000008) | (((u32) (src) << 3) & 0x00000008)) #define CMU_REG31 0x0003e #define CMU_REG32 0x00040 #define CMU_REG32_FORCE_VCOCAL_START_MASK 0x00004000 #define CMU_REG32_PVT_CAL_WAIT_SEL_SET(dst, src) \ (((dst) & ~0x00000006) | (((u32) (src) << 1) & 0x00000006)) #define CMU_REG32_IREF_ADJ_SET(dst, src) \ (((dst) & ~0x00000180) | (((u32) (src) << 7) & 0x00000180)) #define CMU_REG33 0x00042 #define CMU_REG34 0x00044 #define CMU_REG34_VCO_CAL_VTH_LO_MAX_SET(dst, src) \ (((dst) & ~0x0000000f) | (((u32) (src) << 0) & 0x0000000f)) #define CMU_REG34_VCO_CAL_VTH_HI_MAX_SET(dst, src) \ (((dst) & ~0x00000f00) | (((u32) (src) << 8) & 0x00000f00)) #define CMU_REG34_VCO_CAL_VTH_LO_MIN_SET(dst, src) \ (((dst) & ~0x000000f0) | (((u32) (src) << 4) & 0x000000f0)) #define CMU_REG34_VCO_CAL_VTH_HI_MIN_SET(dst, src) \ (((dst) & ~0x0000f000) | (((u32) (src) << 12) & 0x0000f000)) #define CMU_REG35 0x00046 #define CMU_REG35_PLL_SSC_MOD_SET(dst, src) \ (((dst) & ~0x0000fe00) | (((u32) (src) << 9) & 0x0000fe00)) #define CMU_REG36 0x00048 #define CMU_REG36_PLL_SSC_EN_SET(dst, src) \ (((dst) & ~0x00000010) | (((u32) (src) << 4) & 0x00000010)) #define CMU_REG36_PLL_SSC_VSTEP_SET(dst, src) \ (((dst) & ~0x0000ffc0) | (((u32) (src) << 6) & 0x0000ffc0)) #define CMU_REG36_PLL_SSC_DSMSEL_SET(dst, src) \ (((dst) & ~0x00000020) | (((u32) (src) << 5) & 0x00000020)) #define CMU_REG37 0x0004a #define CMU_REG38 0x0004c #define CMU_REG39 0x0004e /* PHY lane CSR accessing from SDS indirectly */ #define RXTX_REG0 0x000 #define RXTX_REG0_CTLE_EQ_HR_SET(dst, src) \ (((dst) & ~0x0000f800) | (((u32) (src) << 11) & 0x0000f800)) #define RXTX_REG0_CTLE_EQ_QR_SET(dst, src) \ (((dst) & ~0x000007c0) | (((u32) (src) << 6) & 0x000007c0)) #define RXTX_REG0_CTLE_EQ_FR_SET(dst, src) \ (((dst) & ~0x0000003e) | (((u32) (src) << 1) & 0x0000003e)) #define RXTX_REG1 0x002 #define RXTX_REG1_RXACVCM_SET(dst, src) \ (((dst) & ~0x0000f000) | (((u32) (src) << 12) & 0x0000f000)) #define RXTX_REG1_CTLE_EQ_SET(dst, src) \ (((dst) & ~0x00000f80) | (((u32) (src) << 7) & 0x00000f80)) #define RXTX_REG1_RXVREG1_SET(dst, src) \ (((dst) & ~0x00000060) | (((u32) (src) << 5) & 0x00000060)) #define RXTX_REG1_RXIREF_ADJ_SET(dst, src) \ (((dst) & ~0x00000006) | (((u32) (src) << 1) & 0x00000006)) #define RXTX_REG2 0x004 #define RXTX_REG2_VTT_ENA_SET(dst, src) \ (((dst) & ~0x00000100) | (((u32) (src) << 8) & 0x00000100)) #define RXTX_REG2_TX_FIFO_ENA_SET(dst, src) \ (((dst) & ~0x00000020) | (((u32) (src) << 5) & 0x00000020)) #define RXTX_REG2_VTT_SEL_SET(dst, src) \ (((dst) & ~0x000000c0) | (((u32) (src) << 6) & 0x000000c0)) #define RXTX_REG4 0x008 #define RXTX_REG4_TX_LOOPBACK_BUF_EN_MASK 0x00000040 #define RXTX_REG4_TX_DATA_RATE_SET(dst, src) \ (((dst) & ~0x0000c000) | (((u32) (src) << 14) & 0x0000c000)) #define RXTX_REG4_TX_WORD_MODE_SET(dst, src) \ (((dst) & ~0x00003800) | (((u32) (src) << 11) & 0x00003800)) #define RXTX_REG5 0x00a #define RXTX_REG5_TX_CN1_SET(dst, src) \ (((dst) & ~0x0000f800) | (((u32) (src) << 11) & 0x0000f800)) #define RXTX_REG5_TX_CP1_SET(dst, src) \ (((dst) & ~0x000007e0) | (((u32) (src) << 5) & 0x000007e0)) #define RXTX_REG5_TX_CN2_SET(dst, src) \ (((dst) & ~0x0000001f) | (((u32) (src) << 0) & 0x0000001f)) #define RXTX_REG6 0x00c #define RXTX_REG6_TXAMP_CNTL_SET(dst, src) \ (((dst) & ~0x00000780) | (((u32) (src) << 7) & 0x00000780)) #define RXTX_REG6_TXAMP_ENA_SET(dst, src) \ (((dst) & ~0x00000040) | (((u32) (src) << 6) & 0x00000040)) #define RXTX_REG6_RX_BIST_ERRCNT_RD_SET(dst, src) \ (((dst) & ~0x00000001) | (((u32) (src) << 0) & 0x00000001)) #define RXTX_REG6_TX_IDLE_SET(dst, src) \ (((dst) & ~0x00000008) | (((u32) (src) << 3) & 0x00000008)) #define RXTX_REG6_RX_BIST_RESYNC_SET(dst, src) \ (((dst) & ~0x00000002) | (((u32) (src) << 1) & 0x00000002)) #define RXTX_REG7 0x00e #define RXTX_REG7_RESETB_RXD_MASK 0x00000100 #define RXTX_REG7_RESETB_RXA_MASK 0x00000080 #define RXTX_REG7_BIST_ENA_RX_SET(dst, src) \ (((dst) & ~0x00000040) | (((u32) (src) << 6) & 0x00000040)) #define RXTX_REG7_RX_WORD_MODE_SET(dst, src) \ (((dst) & ~0x00003800) | (((u32) (src) << 11) & 0x00003800)) #define RXTX_REG8 0x010 #define RXTX_REG8_CDR_LOOP_ENA_SET(dst, src) \ (((dst) & ~0x00004000) | (((u32) (src) << 14) & 0x00004000)) #define RXTX_REG8_CDR_BYPASS_RXLOS_SET(dst, src) \ (((dst) & ~0x00000800) | (((u32) (src) << 11) & 0x00000800)) #define RXTX_REG8_SSC_ENABLE_SET(dst, src) \ (((dst) & ~0x00000200) | (((u32) (src) << 9) & 0x00000200)) #define RXTX_REG8_SD_VREF_SET(dst, src) \ (((dst) & ~0x000000f0) | (((u32) (src) << 4) & 0x000000f0)) #define RXTX_REG8_SD_DISABLE_SET(dst, src) \ (((dst) & ~0x00000100) | (((u32) (src) << 8) & 0x00000100)) #define RXTX_REG7 0x00e #define RXTX_REG7_RESETB_RXD_SET(dst, src) \ (((dst) & ~0x00000100) | (((u32) (src) << 8) & 0x00000100)) #define RXTX_REG7_RESETB_RXA_SET(dst, src) \ (((dst) & ~0x00000080) | (((u32) (src) << 7) & 0x00000080)) #define RXTX_REG7_LOOP_BACK_ENA_CTLE_MASK 0x00004000 #define RXTX_REG7_LOOP_BACK_ENA_CTLE_SET(dst, src) \ (((dst) & ~0x00004000) | (((u32) (src) << 14) & 0x00004000)) #define RXTX_REG11 0x016 #define RXTX_REG11_PHASE_ADJUST_LIMIT_SET(dst, src) \ (((dst) & ~0x0000f800) | (((u32) (src) << 11) & 0x0000f800)) #define RXTX_REG12 0x018 #define RXTX_REG12_LATCH_OFF_ENA_SET(dst, src) \ (((dst) & ~0x00002000) | (((u32) (src) << 13) & 0x00002000)) #define RXTX_REG12_SUMOS_ENABLE_SET(dst, src) \ (((dst) & ~0x00000004) | (((u32) (src) << 2) & 0x00000004)) #define RXTX_REG12_RX_DET_TERM_ENABLE_MASK 0x00000002 #define RXTX_REG12_RX_DET_TERM_ENABLE_SET(dst, src) \ (((dst) & ~0x00000002) | (((u32) (src) << 1) & 0x00000002)) #define RXTX_REG13 0x01a #define RXTX_REG14 0x01c #define RXTX_REG14_CLTE_LATCAL_MAN_PROG_SET(dst, src) \ (((dst) & ~0x0000003f) | (((u32) (src) << 0) & 0x0000003f)) #define RXTX_REG14_CTLE_LATCAL_MAN_ENA_SET(dst, src) \ (((dst) & ~0x00000040) | (((u32) (src) << 6) & 0x00000040)) #define RXTX_REG26 0x034 #define RXTX_REG26_PERIOD_ERROR_LATCH_SET(dst, src) \ (((dst) & ~0x00003800) | (((u32) (src) << 11) & 0x00003800)) #define RXTX_REG26_BLWC_ENA_SET(dst, src) \ (((dst) & ~0x00000008) | (((u32) (src) << 3) & 0x00000008)) #define RXTX_REG21 0x02a #define RXTX_REG21_DO_LATCH_CALOUT_RD(src) ((0x0000fc00 & (u32) (src)) >> 10) #define RXTX_REG21_XO_LATCH_CALOUT_RD(src) ((0x000003f0 & (u32) (src)) >> 4) #define RXTX_REG21_LATCH_CAL_FAIL_ODD_RD(src) ((0x0000000f & (u32)(src))) #define RXTX_REG22 0x02c #define RXTX_REG22_SO_LATCH_CALOUT_RD(src) ((0x000003f0 & (u32) (src)) >> 4) #define RXTX_REG22_EO_LATCH_CALOUT_RD(src) ((0x0000fc00 & (u32) (src)) >> 10) #define RXTX_REG22_LATCH_CAL_FAIL_EVEN_RD(src) ((0x0000000f & (u32)(src))) #define RXTX_REG23 0x02e #define RXTX_REG23_DE_LATCH_CALOUT_RD(src) ((0x0000fc00 & (u32) (src)) >> 10) #define RXTX_REG23_XE_LATCH_CALOUT_RD(src) ((0x000003f0 & (u32) (src)) >> 4) #define RXTX_REG24 0x030 #define RXTX_REG24_EE_LATCH_CALOUT_RD(src) ((0x0000fc00 & (u32) (src)) >> 10) #define RXTX_REG24_SE_LATCH_CALOUT_RD(src) ((0x000003f0 & (u32) (src)) >> 4) #define RXTX_REG27 0x036 #define RXTX_REG28 0x038 #define RXTX_REG31 0x03e #define RXTX_REG38 0x04c #define RXTX_REG38_CUSTOMER_PINMODE_INV_SET(dst, src) \ (((dst) & 0x0000fffe) | (((u32) (src) << 1) & 0x0000fffe)) #define RXTX_REG39 0x04e #define RXTX_REG40 0x050 #define RXTX_REG41 0x052 #define RXTX_REG42 0x054 #define RXTX_REG43 0x056 #define RXTX_REG44 0x058 #define RXTX_REG45 0x05a #define RXTX_REG46 0x05c #define RXTX_REG47 0x05e #define RXTX_REG48 0x060 #define RXTX_REG49 0x062 #define RXTX_REG50 0x064 #define RXTX_REG51 0x066 #define RXTX_REG52 0x068 #define RXTX_REG53 0x06a #define RXTX_REG54 0x06c #define RXTX_REG55 0x06e #define RXTX_REG61 0x07a #define RXTX_REG61_ISCAN_INBERT_SET(dst, src) \ (((dst) & ~0x00000010) | (((u32) (src) << 4) & 0x00000010)) #define RXTX_REG61_LOADFREQ_SHIFT_SET(dst, src) \ (((dst) & ~0x00000008) | (((u32) (src) << 3) & 0x00000008)) #define RXTX_REG61_EYE_COUNT_WIDTH_SEL_SET(dst, src) \ (((dst) & ~0x000000c0) | (((u32) (src) << 6) & 0x000000c0)) #define RXTX_REG61_SPD_SEL_CDR_SET(dst, src) \ (((dst) & ~0x00003c00) | (((u32) (src) << 10) & 0x00003c00)) #define RXTX_REG62 0x07c #define RXTX_REG62_PERIOD_H1_QLATCH_SET(dst, src) \ (((dst) & ~0x00003800) | (((u32) (src) << 11) & 0x00003800)) #define RXTX_REG81 0x0a2 #define RXTX_REG89_MU_TH7_SET(dst, src) \ (((dst) & ~0x0000f800) | (((u32) (src) << 11) & 0x0000f800)) #define RXTX_REG89_MU_TH8_SET(dst, src) \ (((dst) & ~0x000007c0) | (((u32) (src) << 6) & 0x000007c0)) #define RXTX_REG89_MU_TH9_SET(dst, src) \ (((dst) & ~0x0000003e) | (((u32) (src) << 1) & 0x0000003e)) #define RXTX_REG96 0x0c0 #define RXTX_REG96_MU_FREQ1_SET(dst, src) \ (((dst) & ~0x0000f800) | (((u32) (src) << 11) & 0x0000f800)) #define RXTX_REG96_MU_FREQ2_SET(dst, src) \ (((dst) & ~0x000007c0) | (((u32) (src) << 6) & 0x000007c0)) #define RXTX_REG96_MU_FREQ3_SET(dst, src) \ (((dst) & ~0x0000003e) | (((u32) (src) << 1) & 0x0000003e)) #define RXTX_REG99 0x0c6 #define RXTX_REG99_MU_PHASE1_SET(dst, src) \ (((dst) & ~0x0000f800) | (((u32) (src) << 11) & 0x0000f800)) #define RXTX_REG99_MU_PHASE2_SET(dst, src) \ (((dst) & ~0x000007c0) | (((u32) (src) << 6) & 0x000007c0)) #define RXTX_REG99_MU_PHASE3_SET(dst, src) \ (((dst) & ~0x0000003e) | (((u32) (src) << 1) & 0x0000003e)) #define RXTX_REG102 0x0cc #define RXTX_REG102_FREQLOOP_LIMIT_SET(dst, src) \ (((dst) & ~0x00000060) | (((u32) (src) << 5) & 0x00000060)) #define RXTX_REG114 0x0e4 #define RXTX_REG121 0x0f2 #define RXTX_REG121_SUMOS_CAL_CODE_RD(src) ((0x0000003e & (u32)(src)) >> 0x1) #define RXTX_REG125 0x0fa #define RXTX_REG125_PQ_REG_SET(dst, src) \ (((dst) & ~0x0000fe00) | (((u32) (src) << 9) & 0x0000fe00)) #define RXTX_REG125_SIGN_PQ_SET(dst, src) \ (((dst) & ~0x00000100) | (((u32) (src) << 8) & 0x00000100)) #define RXTX_REG125_SIGN_PQ_2C_SET(dst, src) \ (((dst) & ~0x00000080) | (((u32) (src) << 7) & 0x00000080)) #define RXTX_REG125_PHZ_MANUALCODE_SET(dst, src) \ (((dst) & ~0x0000007c) | (((u32) (src) << 2) & 0x0000007c)) #define RXTX_REG125_PHZ_MANUAL_SET(dst, src) \ (((dst) & ~0x00000002) | (((u32) (src) << 1) & 0x00000002)) #define RXTX_REG127 0x0fe #define RXTX_REG127_FORCE_SUM_CAL_START_MASK 0x00000002 #define RXTX_REG127_FORCE_LAT_CAL_START_MASK 0x00000004 #define RXTX_REG127_FORCE_SUM_CAL_START_SET(dst, src) \ (((dst) & ~0x00000002) | (((u32) (src) << 1) & 0x00000002)) #define RXTX_REG127_FORCE_LAT_CAL_START_SET(dst, src) \ (((dst) & ~0x00000004) | (((u32) (src) << 2) & 0x00000004)) #define RXTX_REG127_LATCH_MAN_CAL_ENA_SET(dst, src) \ (((dst) & ~0x00000008) | (((u32) (src) << 3) & 0x00000008)) #define RXTX_REG127_DO_LATCH_MANCAL_SET(dst, src) \ (((dst) & ~0x0000fc00) | (((u32) (src) << 10) & 0x0000fc00)) #define RXTX_REG127_XO_LATCH_MANCAL_SET(dst, src) \ (((dst) & ~0x000003f0) | (((u32) (src) << 4) & 0x000003f0)) #define RXTX_REG128 0x100 #define RXTX_REG128_LATCH_CAL_WAIT_SEL_SET(dst, src) \ (((dst) & ~0x0000000c) | (((u32) (src) << 2) & 0x0000000c)) #define RXTX_REG128_EO_LATCH_MANCAL_SET(dst, src) \ (((dst) & ~0x0000fc00) | (((u32) (src) << 10) & 0x0000fc00)) #define RXTX_REG128_SO_LATCH_MANCAL_SET(dst, src) \ (((dst) & ~0x000003f0) | (((u32) (src) << 4) & 0x000003f0)) #define RXTX_REG129 0x102 #define RXTX_REG129_DE_LATCH_MANCAL_SET(dst, src) \ (((dst) & ~0x0000fc00) | (((u32) (src) << 10) & 0x0000fc00)) #define RXTX_REG129_XE_LATCH_MANCAL_SET(dst, src) \ (((dst) & ~0x000003f0) | (((u32) (src) << 4) & 0x000003f0)) #define RXTX_REG130 0x104 #define RXTX_REG130_EE_LATCH_MANCAL_SET(dst, src) \ (((dst) & ~0x0000fc00) | (((u32) (src) << 10) & 0x0000fc00)) #define RXTX_REG130_SE_LATCH_MANCAL_SET(dst, src) \ (((dst) & ~0x000003f0) | (((u32) (src) << 4) & 0x000003f0)) #define RXTX_REG145 0x122 #define RXTX_REG145_TX_IDLE_SATA_SET(dst, src) \ (((dst) & ~0x00000001) | (((u32) (src) << 0) & 0x00000001)) #define RXTX_REG145_RXES_ENA_SET(dst, src) \ (((dst) & ~0x00000002) | (((u32) (src) << 1) & 0x00000002)) #define RXTX_REG145_RXDFE_CONFIG_SET(dst, src) \ (((dst) & ~0x0000c000) | (((u32) (src) << 14) & 0x0000c000)) #define RXTX_REG145_RXVWES_LATENA_SET(dst, src) \ (((dst) & ~0x00000004) | (((u32) (src) << 2) & 0x00000004)) #define RXTX_REG147 0x126 #define RXTX_REG148 0x128 /* Clock macro type */ enum cmu_type_t { REF_CMU = 0, /* Clock macro is the internal reference clock */ PHY_CMU = 1, /* Clock macro is the PLL for the Serdes */ }; enum mux_type_t { MUX_SELECT_ATA = 0, /* Switch the MUX to ATA */ MUX_SELECT_SGMMII = 0, /* Switch the MUX to SGMII */ }; enum clk_type_t { CLK_EXT_DIFF = 0, /* External differential */ CLK_INT_DIFF = 1, /* Internal differential */ CLK_INT_SING = 2, /* Internal single ended */ }; enum phy_mode { MODE_SATA = 0, /* List them for simple reference */ MODE_SGMII = 1, MODE_PCIE = 2, MODE_USB = 3, MODE_XFI = 4, MODE_MAX }; struct xgene_sata_override_param { u32 speed[MAX_LANE]; /* Index for override parameter per lane */ u32 txspeed[3]; /* Tx speed */ u32 txboostgain[MAX_LANE*3]; /* Tx freq boost and gain control */ u32 txeyetuning[MAX_LANE*3]; /* Tx eye tuning */ u32 txeyedirection[MAX_LANE*3]; /* Tx eye tuning direction */ u32 txamplitude[MAX_LANE*3]; /* Tx amplitude control */ u32 txprecursor_cn1[MAX_LANE*3]; /* Tx emphasis taps 1st pre-cursor */ u32 txprecursor_cn2[MAX_LANE*3]; /* Tx emphasis taps 2nd pre-cursor */ u32 txpostcursor_cp1[MAX_LANE*3]; /* Tx emphasis taps post-cursor */ }; struct xgene_phy_ctx { struct device *dev; struct phy *phy; enum phy_mode mode; /* Mode of operation */ enum clk_type_t clk_type; /* Input clock selection */ void __iomem *sds_base; /* PHY CSR base addr */ struct clk *clk; /* Optional clock */ /* Override Serdes parameters */ struct xgene_sata_override_param sata_param; }; /* * For chip earlier than A3 version, enable this flag. * To enable, pass boot argument phy_xgene.preA3Chip=1 */ static int preA3Chip; MODULE_PARM_DESC(preA3Chip, "Enable pre-A3 chip support (1=enable 0=disable)"); module_param_named(preA3Chip, preA3Chip, int, 0444); static void sds_wr(void __iomem *csr_base, u32 indirect_cmd_reg, u32 indirect_data_reg, u32 addr, u32 data) { unsigned long deadline = jiffies + HZ; u32 val; u32 cmd; cmd = CFG_IND_WR_CMD_MASK | CFG_IND_CMD_DONE_MASK; cmd = CFG_IND_ADDR_SET(cmd, addr); writel(data, csr_base + indirect_data_reg); readl(csr_base + indirect_data_reg); /* Force a barrier */ writel(cmd, csr_base + indirect_cmd_reg); readl(csr_base + indirect_cmd_reg); /* Force a barrier */ do { val = readl(csr_base + indirect_cmd_reg); } while (!(val & CFG_IND_CMD_DONE_MASK) && time_before(jiffies, deadline)); if (!(val & CFG_IND_CMD_DONE_MASK)) pr_err("SDS WR timeout at 0x%p offset 0x%08X value 0x%08X\n", csr_base + indirect_cmd_reg, addr, data); } static void sds_rd(void __iomem *csr_base, u32 indirect_cmd_reg, u32 indirect_data_reg, u32 addr, u32 *data) { unsigned long deadline = jiffies + HZ; u32 val; u32 cmd; cmd = CFG_IND_RD_CMD_MASK | CFG_IND_CMD_DONE_MASK; cmd = CFG_IND_ADDR_SET(cmd, addr); writel(cmd, csr_base + indirect_cmd_reg); readl(csr_base + indirect_cmd_reg); /* Force a barrier */ do { val = readl(csr_base + indirect_cmd_reg); } while (!(val & CFG_IND_CMD_DONE_MASK) && time_before(jiffies, deadline)); *data = readl(csr_base + indirect_data_reg); if (!(val & CFG_IND_CMD_DONE_MASK)) pr_err("SDS WR timeout at 0x%p offset 0x%08X value 0x%08X\n", csr_base + indirect_cmd_reg, addr, *data); } static void cmu_wr(struct xgene_phy_ctx *ctx, enum cmu_type_t cmu_type, u32 reg, u32 data) { void __iomem *sds_base = ctx->sds_base; u32 val; if (cmu_type == REF_CMU) reg += SERDES_PLL_REF_INDIRECT_OFFSET; else reg += SERDES_PLL_INDIRECT_OFFSET; sds_wr(sds_base, SATA_ENET_SDS_IND_CMD_REG, SATA_ENET_SDS_IND_WDATA_REG, reg, data); sds_rd(sds_base, SATA_ENET_SDS_IND_CMD_REG, SATA_ENET_SDS_IND_RDATA_REG, reg, &val); pr_debug("CMU WR addr 0x%X value 0x%08X <-> 0x%08X\n", reg, data, val); } static void cmu_rd(struct xgene_phy_ctx *ctx, enum cmu_type_t cmu_type, u32 reg, u32 *data) { void __iomem *sds_base = ctx->sds_base; if (cmu_type == REF_CMU) reg += SERDES_PLL_REF_INDIRECT_OFFSET; else reg += SERDES_PLL_INDIRECT_OFFSET; sds_rd(sds_base, SATA_ENET_SDS_IND_CMD_REG, SATA_ENET_SDS_IND_RDATA_REG, reg, data); pr_debug("CMU RD addr 0x%X value 0x%08X\n", reg, *data); } static void cmu_toggle1to0(struct xgene_phy_ctx *ctx, enum cmu_type_t cmu_type, u32 reg, u32 bits) { u32 val; cmu_rd(ctx, cmu_type, reg, &val); val |= bits; cmu_wr(ctx, cmu_type, reg, val); cmu_rd(ctx, cmu_type, reg, &val); val &= ~bits; cmu_wr(ctx, cmu_type, reg, val); } static void cmu_clrbits(struct xgene_phy_ctx *ctx, enum cmu_type_t cmu_type, u32 reg, u32 bits) { u32 val; cmu_rd(ctx, cmu_type, reg, &val); val &= ~bits; cmu_wr(ctx, cmu_type, reg, val); } static void cmu_setbits(struct xgene_phy_ctx *ctx, enum cmu_type_t cmu_type, u32 reg, u32 bits) { u32 val; cmu_rd(ctx, cmu_type, reg, &val); val |= bits; cmu_wr(ctx, cmu_type, reg, val); } static void serdes_wr(struct xgene_phy_ctx *ctx, int lane, u32 reg, u32 data) { void __iomem *sds_base = ctx->sds_base; u32 val; reg += SERDES_INDIRECT_OFFSET; reg += lane * SERDES_LANE_STRIDE; sds_wr(sds_base, SATA_ENET_SDS_IND_CMD_REG, SATA_ENET_SDS_IND_WDATA_REG, reg, data); sds_rd(sds_base, SATA_ENET_SDS_IND_CMD_REG, SATA_ENET_SDS_IND_RDATA_REG, reg, &val); pr_debug("SERDES WR addr 0x%X value 0x%08X <-> 0x%08X\n", reg, data, val); } static void serdes_rd(struct xgene_phy_ctx *ctx, int lane, u32 reg, u32 *data) { void __iomem *sds_base = ctx->sds_base; reg += SERDES_INDIRECT_OFFSET; reg += lane * SERDES_LANE_STRIDE; sds_rd(sds_base, SATA_ENET_SDS_IND_CMD_REG, SATA_ENET_SDS_IND_RDATA_REG, reg, data); pr_debug("SERDES RD addr 0x%X value 0x%08X\n", reg, *data); } static void serdes_clrbits(struct xgene_phy_ctx *ctx, int lane, u32 reg, u32 bits) { u32 val; serdes_rd(ctx, lane, reg, &val); val &= ~bits; serdes_wr(ctx, lane, reg, val); } static void serdes_setbits(struct xgene_phy_ctx *ctx, int lane, u32 reg, u32 bits) { u32 val; serdes_rd(ctx, lane, reg, &val); val |= bits; serdes_wr(ctx, lane, reg, val); } static void xgene_phy_cfg_cmu_clk_type(struct xgene_phy_ctx *ctx, enum cmu_type_t cmu_type, enum clk_type_t clk_type) { u32 val; /* Set the reset sequence delay for TX ready assertion */ cmu_rd(ctx, cmu_type, CMU_REG12, &val); val = CMU_REG12_STATE_DELAY9_SET(val, 0x1); cmu_wr(ctx, cmu_type, CMU_REG12, val); /* Set the programmable stage delays between various enable stages */ cmu_wr(ctx, cmu_type, CMU_REG13, 0x0222); cmu_wr(ctx, cmu_type, CMU_REG14, 0x2225); /* Configure clock type */ if (clk_type == CLK_EXT_DIFF) { /* Select external clock mux */ cmu_rd(ctx, cmu_type, CMU_REG0, &val); val = CMU_REG0_PLL_REF_SEL_SET(val, 0x0); cmu_wr(ctx, cmu_type, CMU_REG0, val); /* Select CMOS as reference clock */ cmu_rd(ctx, cmu_type, CMU_REG1, &val); val = CMU_REG1_REFCLK_CMOS_SEL_SET(val, 0x0); cmu_wr(ctx, cmu_type, CMU_REG1, val); dev_dbg(ctx->dev, "Set external reference clock\n"); } else if (clk_type == CLK_INT_DIFF) { /* Select internal clock mux */ cmu_rd(ctx, cmu_type, CMU_REG0, &val); val = CMU_REG0_PLL_REF_SEL_SET(val, 0x1); cmu_wr(ctx, cmu_type, CMU_REG0, val); /* Select CMOS as reference clock */ cmu_rd(ctx, cmu_type, CMU_REG1, &val); val = CMU_REG1_REFCLK_CMOS_SEL_SET(val, 0x1); cmu_wr(ctx, cmu_type, CMU_REG1, val); dev_dbg(ctx->dev, "Set internal reference clock\n"); } else if (clk_type == CLK_INT_SING) { /* * NOTE: This clock type is NOT support for controller * whose internal clock shared in the PCIe controller * * Select internal clock mux */ cmu_rd(ctx, cmu_type, CMU_REG1, &val); val = CMU_REG1_REFCLK_CMOS_SEL_SET(val, 0x1); cmu_wr(ctx, cmu_type, CMU_REG1, val); /* Select CML as reference clock */ cmu_rd(ctx, cmu_type, CMU_REG1, &val); val = CMU_REG1_REFCLK_CMOS_SEL_SET(val, 0x0); cmu_wr(ctx, cmu_type, CMU_REG1, val); dev_dbg(ctx->dev, "Set internal single ended reference clock\n"); } } static void xgene_phy_sata_cfg_cmu_core(struct xgene_phy_ctx *ctx, enum cmu_type_t cmu_type, enum clk_type_t clk_type) { u32 val; int ref_100MHz; if (cmu_type == REF_CMU) { /* Set VCO calibration voltage threshold */ cmu_rd(ctx, cmu_type, CMU_REG34, &val); val = CMU_REG34_VCO_CAL_VTH_LO_MAX_SET(val, 0x7); val = CMU_REG34_VCO_CAL_VTH_HI_MAX_SET(val, 0xc); val = CMU_REG34_VCO_CAL_VTH_LO_MIN_SET(val, 0x3); val = CMU_REG34_VCO_CAL_VTH_HI_MIN_SET(val, 0x8); cmu_wr(ctx, cmu_type, CMU_REG34, val); } /* Set the VCO calibration counter */ cmu_rd(ctx, cmu_type, CMU_REG0, &val); if (cmu_type == REF_CMU || preA3Chip) val = CMU_REG0_CAL_COUNT_RESOL_SET(val, 0x4); else val = CMU_REG0_CAL_COUNT_RESOL_SET(val, 0x7); cmu_wr(ctx, cmu_type, CMU_REG0, val); /* Configure PLL for calibration */ cmu_rd(ctx, cmu_type, CMU_REG1, &val); val = CMU_REG1_PLL_CP_SET(val, 0x1); if (cmu_type == REF_CMU || preA3Chip) val = CMU_REG1_PLL_CP_SEL_SET(val, 0x5); else val = CMU_REG1_PLL_CP_SEL_SET(val, 0x3); if (cmu_type == REF_CMU) val = CMU_REG1_PLL_MANUALCAL_SET(val, 0x0); else val = CMU_REG1_PLL_MANUALCAL_SET(val, 0x1); cmu_wr(ctx, cmu_type, CMU_REG1, val); if (cmu_type != REF_CMU) cmu_clrbits(ctx, cmu_type, CMU_REG5, CMU_REG5_PLL_RESETB_MASK); /* Configure the PLL for either 100MHz or 50MHz */ cmu_rd(ctx, cmu_type, CMU_REG2, &val); if (cmu_type == REF_CMU) { val = CMU_REG2_PLL_LFRES_SET(val, 0xa); ref_100MHz = 1; } else { val = CMU_REG2_PLL_LFRES_SET(val, 0x3); if (clk_type == CLK_EXT_DIFF) ref_100MHz = 0; else ref_100MHz = 1; } if (ref_100MHz) { val = CMU_REG2_PLL_FBDIV_SET(val, FBDIV_VAL_100M); val = CMU_REG2_PLL_REFDIV_SET(val, REFDIV_VAL_100M); } else { val = CMU_REG2_PLL_FBDIV_SET(val, FBDIV_VAL_50M); val = CMU_REG2_PLL_REFDIV_SET(val, REFDIV_VAL_50M); } cmu_wr(ctx, cmu_type, CMU_REG2, val); /* Configure the VCO */ cmu_rd(ctx, cmu_type, CMU_REG3, &val); if (cmu_type == REF_CMU) { val = CMU_REG3_VCOVARSEL_SET(val, 0x3); val = CMU_REG3_VCO_MOMSEL_INIT_SET(val, 0x10); } else { val = CMU_REG3_VCOVARSEL_SET(val, 0xF); if (preA3Chip) val = CMU_REG3_VCO_MOMSEL_INIT_SET(val, 0x15); else val = CMU_REG3_VCO_MOMSEL_INIT_SET(val, 0x1a); val = CMU_REG3_VCO_MANMOMSEL_SET(val, 0x15); } cmu_wr(ctx, cmu_type, CMU_REG3, val); /* Disable force PLL lock */ cmu_rd(ctx, cmu_type, CMU_REG26, &val); val = CMU_REG26_FORCE_PLL_LOCK_SET(val, 0x0); cmu_wr(ctx, cmu_type, CMU_REG26, val); /* Setup PLL loop filter */ cmu_rd(ctx, cmu_type, CMU_REG5, &val); val = CMU_REG5_PLL_LFSMCAP_SET(val, 0x3); val = CMU_REG5_PLL_LFCAP_SET(val, 0x3); if (cmu_type == REF_CMU || !preA3Chip) val = CMU_REG5_PLL_LOCK_RESOLUTION_SET(val, 0x7); else val = CMU_REG5_PLL_LOCK_RESOLUTION_SET(val, 0x4); cmu_wr(ctx, cmu_type, CMU_REG5, val); /* Enable or disable manual calibration */ cmu_rd(ctx, cmu_type, CMU_REG6, &val); val = CMU_REG6_PLL_VREGTRIM_SET(val, preA3Chip ? 0x0 : 0x2); val = CMU_REG6_MAN_PVT_CAL_SET(val, preA3Chip ? 0x1 : 0x0); cmu_wr(ctx, cmu_type, CMU_REG6, val); /* Configure lane for 20-bits */ if (cmu_type == PHY_CMU) { cmu_rd(ctx, cmu_type, CMU_REG9, &val); val = CMU_REG9_TX_WORD_MODE_CH1_SET(val, CMU_REG9_WORD_LEN_20BIT); val = CMU_REG9_TX_WORD_MODE_CH0_SET(val, CMU_REG9_WORD_LEN_20BIT); val = CMU_REG9_PLL_POST_DIVBY2_SET(val, 0x1); if (!preA3Chip) { val = CMU_REG9_VBG_BYPASSB_SET(val, 0x0); val = CMU_REG9_IGEN_BYPASS_SET(val , 0x0); } cmu_wr(ctx, cmu_type, CMU_REG9, val); if (!preA3Chip) { cmu_rd(ctx, cmu_type, CMU_REG10, &val); val = CMU_REG10_VREG_REFSEL_SET(val, 0x1); cmu_wr(ctx, cmu_type, CMU_REG10, val); } } cmu_rd(ctx, cmu_type, CMU_REG16, &val); val = CMU_REG16_CALIBRATION_DONE_OVERRIDE_SET(val, 0x1); val = CMU_REG16_BYPASS_PLL_LOCK_SET(val, 0x1); if (cmu_type == REF_CMU || preA3Chip) val = CMU_REG16_VCOCAL_WAIT_BTW_CODE_SET(val, 0x4); else val = CMU_REG16_VCOCAL_WAIT_BTW_CODE_SET(val, 0x7); cmu_wr(ctx, cmu_type, CMU_REG16, val); /* Configure for SATA */ cmu_rd(ctx, cmu_type, CMU_REG30, &val); val = CMU_REG30_PCIE_MODE_SET(val, 0x0); val = CMU_REG30_LOCK_COUNT_SET(val, 0x3); cmu_wr(ctx, cmu_type, CMU_REG30, val); /* Disable state machine bypass */ cmu_wr(ctx, cmu_type, CMU_REG31, 0xF); cmu_rd(ctx, cmu_type, CMU_REG32, &val); val = CMU_REG32_PVT_CAL_WAIT_SEL_SET(val, 0x3); if (cmu_type == REF_CMU || preA3Chip) val = CMU_REG32_IREF_ADJ_SET(val, 0x3); else val = CMU_REG32_IREF_ADJ_SET(val, 0x1); cmu_wr(ctx, cmu_type, CMU_REG32, val); /* Set VCO calibration threshold */ if (cmu_type != REF_CMU && preA3Chip) cmu_wr(ctx, cmu_type, CMU_REG34, 0x8d27); else cmu_wr(ctx, cmu_type, CMU_REG34, 0x873c); /* Set CTLE Override and override waiting from state machine */ cmu_wr(ctx, cmu_type, CMU_REG37, 0xF00F); } static void xgene_phy_ssc_enable(struct xgene_phy_ctx *ctx, enum cmu_type_t cmu_type) { u32 val; /* Set SSC modulation value */ cmu_rd(ctx, cmu_type, CMU_REG35, &val); val = CMU_REG35_PLL_SSC_MOD_SET(val, 98); cmu_wr(ctx, cmu_type, CMU_REG35, val); /* Enable SSC, set vertical step and DSM value */ cmu_rd(ctx, cmu_type, CMU_REG36, &val); val = CMU_REG36_PLL_SSC_VSTEP_SET(val, 30); val = CMU_REG36_PLL_SSC_EN_SET(val, 1); val = CMU_REG36_PLL_SSC_DSMSEL_SET(val, 1); cmu_wr(ctx, cmu_type, CMU_REG36, val); /* Reset the PLL */ cmu_clrbits(ctx, cmu_type, CMU_REG5, CMU_REG5_PLL_RESETB_MASK); cmu_setbits(ctx, cmu_type, CMU_REG5, CMU_REG5_PLL_RESETB_MASK); /* Force VCO calibration to restart */ cmu_toggle1to0(ctx, cmu_type, CMU_REG32, CMU_REG32_FORCE_VCOCAL_START_MASK); } static void xgene_phy_sata_cfg_lanes(struct xgene_phy_ctx *ctx) { u32 val; u32 reg; int i; int lane; for (lane = 0; lane < MAX_LANE; lane++) { serdes_wr(ctx, lane, RXTX_REG147, 0x6); /* Set boost control for quarter, half, and full rate */ serdes_rd(ctx, lane, RXTX_REG0, &val); val = RXTX_REG0_CTLE_EQ_HR_SET(val, 0x10); val = RXTX_REG0_CTLE_EQ_QR_SET(val, 0x10); val = RXTX_REG0_CTLE_EQ_FR_SET(val, 0x10); serdes_wr(ctx, lane, RXTX_REG0, val); /* Set boost control value */ serdes_rd(ctx, lane, RXTX_REG1, &val); val = RXTX_REG1_RXACVCM_SET(val, 0x7); val = RXTX_REG1_CTLE_EQ_SET(val, ctx->sata_param.txboostgain[lane * 3 + ctx->sata_param.speed[lane]]); serdes_wr(ctx, lane, RXTX_REG1, val); /* Latch VTT value based on the termination to ground and enable TX FIFO */ serdes_rd(ctx, lane, RXTX_REG2, &val); val = RXTX_REG2_VTT_ENA_SET(val, 0x1); val = RXTX_REG2_VTT_SEL_SET(val, 0x1); val = RXTX_REG2_TX_FIFO_ENA_SET(val, 0x1); serdes_wr(ctx, lane, RXTX_REG2, val); /* Configure Tx for 20-bits */ serdes_rd(ctx, lane, RXTX_REG4, &val); val = RXTX_REG4_TX_WORD_MODE_SET(val, CMU_REG9_WORD_LEN_20BIT); serdes_wr(ctx, lane, RXTX_REG4, val); if (!preA3Chip) { serdes_rd(ctx, lane, RXTX_REG1, &val); val = RXTX_REG1_RXVREG1_SET(val, 0x2); val = RXTX_REG1_RXIREF_ADJ_SET(val, 0x2); serdes_wr(ctx, lane, RXTX_REG1, val); } /* Set pre-emphasis first 1 and 2, and post-emphasis values */ serdes_rd(ctx, lane, RXTX_REG5, &val); val = RXTX_REG5_TX_CN1_SET(val, ctx->sata_param.txprecursor_cn1[lane * 3 + ctx->sata_param.speed[lane]]); val = RXTX_REG5_TX_CP1_SET(val, ctx->sata_param.txpostcursor_cp1[lane * 3 + ctx->sata_param.speed[lane]]); val = RXTX_REG5_TX_CN2_SET(val, ctx->sata_param.txprecursor_cn2[lane * 3 + ctx->sata_param.speed[lane]]); serdes_wr(ctx, lane, RXTX_REG5, val); /* Set TX amplitude value */ serdes_rd(ctx, lane, RXTX_REG6, &val); val = RXTX_REG6_TXAMP_CNTL_SET(val, ctx->sata_param.txamplitude[lane * 3 + ctx->sata_param.speed[lane]]); val = RXTX_REG6_TXAMP_ENA_SET(val, 0x1); val = RXTX_REG6_TX_IDLE_SET(val, 0x0); val = RXTX_REG6_RX_BIST_RESYNC_SET(val, 0x0); val = RXTX_REG6_RX_BIST_ERRCNT_RD_SET(val, 0x0); serdes_wr(ctx, lane, RXTX_REG6, val); /* Configure Rx for 20-bits */ serdes_rd(ctx, lane, RXTX_REG7, &val); val = RXTX_REG7_BIST_ENA_RX_SET(val, 0x0); val = RXTX_REG7_RX_WORD_MODE_SET(val, CMU_REG9_WORD_LEN_20BIT); serdes_wr(ctx, lane, RXTX_REG7, val); /* Set CDR and LOS values and enable Rx SSC */ serdes_rd(ctx, lane, RXTX_REG8, &val); val = RXTX_REG8_CDR_LOOP_ENA_SET(val, 0x1); val = RXTX_REG8_CDR_BYPASS_RXLOS_SET(val, 0x0); val = RXTX_REG8_SSC_ENABLE_SET(val, 0x1); val = RXTX_REG8_SD_DISABLE_SET(val, 0x0); val = RXTX_REG8_SD_VREF_SET(val, 0x4); serdes_wr(ctx, lane, RXTX_REG8, val); /* Set phase adjust upper/lower limits */ serdes_rd(ctx, lane, RXTX_REG11, &val); val = RXTX_REG11_PHASE_ADJUST_LIMIT_SET(val, 0x0); serdes_wr(ctx, lane, RXTX_REG11, val); /* Enable Latch Off; disable SUMOS and Tx termination */ serdes_rd(ctx, lane, RXTX_REG12, &val); val = RXTX_REG12_LATCH_OFF_ENA_SET(val, 0x1); val = RXTX_REG12_SUMOS_ENABLE_SET(val, 0x0); val = RXTX_REG12_RX_DET_TERM_ENABLE_SET(val, 0x0); serdes_wr(ctx, lane, RXTX_REG12, val); /* Set period error latch to 512T and enable BWL */ serdes_rd(ctx, lane, RXTX_REG26, &val); val = RXTX_REG26_PERIOD_ERROR_LATCH_SET(val, 0x0); val = RXTX_REG26_BLWC_ENA_SET(val, 0x1); serdes_wr(ctx, lane, RXTX_REG26, val); serdes_wr(ctx, lane, RXTX_REG28, 0x0); /* Set DFE loop preset value */ serdes_wr(ctx, lane, RXTX_REG31, 0x0); /* Set Eye Monitor counter width to 12-bit */ serdes_rd(ctx, lane, RXTX_REG61, &val); val = RXTX_REG61_ISCAN_INBERT_SET(val, 0x1); val = RXTX_REG61_LOADFREQ_SHIFT_SET(val, 0x0); val = RXTX_REG61_EYE_COUNT_WIDTH_SEL_SET(val, 0x0); serdes_wr(ctx, lane, RXTX_REG61, val); serdes_rd(ctx, lane, RXTX_REG62, &val); val = RXTX_REG62_PERIOD_H1_QLATCH_SET(val, 0x0); serdes_wr(ctx, lane, RXTX_REG62, val); /* Set BW select tap X for DFE loop */ for (i = 0; i < 9; i++) { reg = RXTX_REG81 + i * 2; serdes_rd(ctx, lane, reg, &val); val = RXTX_REG89_MU_TH7_SET(val, 0xe); val = RXTX_REG89_MU_TH8_SET(val, 0xe); val = RXTX_REG89_MU_TH9_SET(val, 0xe); serdes_wr(ctx, lane, reg, val); } /* Set BW select tap X for frequency adjust loop */ for (i = 0; i < 3; i++) { reg = RXTX_REG96 + i * 2; serdes_rd(ctx, lane, reg, &val); val = RXTX_REG96_MU_FREQ1_SET(val, 0x10); val = RXTX_REG96_MU_FREQ2_SET(val, 0x10); val = RXTX_REG96_MU_FREQ3_SET(val, 0x10); serdes_wr(ctx, lane, reg, val); } /* Set BW select tap X for phase adjust loop */ for (i = 0; i < 3; i++) { reg = RXTX_REG99 + i * 2; serdes_rd(ctx, lane, reg, &val); val = RXTX_REG99_MU_PHASE1_SET(val, 0x7); val = RXTX_REG99_MU_PHASE2_SET(val, 0x7); val = RXTX_REG99_MU_PHASE3_SET(val, 0x7); serdes_wr(ctx, lane, reg, val); } serdes_rd(ctx, lane, RXTX_REG102, &val); val = RXTX_REG102_FREQLOOP_LIMIT_SET(val, 0x0); serdes_wr(ctx, lane, RXTX_REG102, val); serdes_wr(ctx, lane, RXTX_REG114, 0xffe0); serdes_rd(ctx, lane, RXTX_REG125, &val); val = RXTX_REG125_SIGN_PQ_SET(val, ctx->sata_param.txeyedirection[lane * 3 + ctx->sata_param.speed[lane]]); val = RXTX_REG125_PQ_REG_SET(val, ctx->sata_param.txeyetuning[lane * 3 + ctx->sata_param.speed[lane]]); val = RXTX_REG125_PHZ_MANUAL_SET(val, 0x1); serdes_wr(ctx, lane, RXTX_REG125, val); serdes_rd(ctx, lane, RXTX_REG127, &val); val = RXTX_REG127_LATCH_MAN_CAL_ENA_SET(val, 0x0); serdes_wr(ctx, lane, RXTX_REG127, val); serdes_rd(ctx, lane, RXTX_REG128, &val); val = RXTX_REG128_LATCH_CAL_WAIT_SEL_SET(val, 0x3); serdes_wr(ctx, lane, RXTX_REG128, val); serdes_rd(ctx, lane, RXTX_REG145, &val); val = RXTX_REG145_RXDFE_CONFIG_SET(val, 0x3); val = RXTX_REG145_TX_IDLE_SATA_SET(val, 0x0); if (preA3Chip) { val = RXTX_REG145_RXES_ENA_SET(val, 0x1); val = RXTX_REG145_RXVWES_LATENA_SET(val, 0x1); } else { val = RXTX_REG145_RXES_ENA_SET(val, 0x0); val = RXTX_REG145_RXVWES_LATENA_SET(val, 0x0); } serdes_wr(ctx, lane, RXTX_REG145, val); /* * Set Rx LOS filter clock rate, sample rate, and threshold * windows */ for (i = 0; i < 4; i++) { reg = RXTX_REG148 + i * 2; serdes_wr(ctx, lane, reg, 0xFFFF); } } } static int xgene_phy_cal_rdy_chk(struct xgene_phy_ctx *ctx, enum cmu_type_t cmu_type, enum clk_type_t clk_type) { void __iomem *csr_serdes = ctx->sds_base; int loop; u32 val; /* Release PHY main reset */ writel(0xdf, csr_serdes + SATA_ENET_SDS_RST_CTL); readl(csr_serdes + SATA_ENET_SDS_RST_CTL); /* Force a barrier */ if (cmu_type != REF_CMU) { cmu_setbits(ctx, cmu_type, CMU_REG5, CMU_REG5_PLL_RESETB_MASK); /* * As per PHY design spec, the PLL reset requires a minimum * of 800us. */ usleep_range(800, 1000); cmu_rd(ctx, cmu_type, CMU_REG1, &val); val = CMU_REG1_PLL_MANUALCAL_SET(val, 0x0); cmu_wr(ctx, cmu_type, CMU_REG1, val); /* * As per PHY design spec, the PLL auto calibration requires * a minimum of 800us. */ usleep_range(800, 1000); cmu_toggle1to0(ctx, cmu_type, CMU_REG32, CMU_REG32_FORCE_VCOCAL_START_MASK); /* * As per PHY design spec, the PLL requires a minimum of * 800us to settle. */ usleep_range(800, 1000); } if (!preA3Chip) goto skip_manual_cal; /* * Configure the termination resister calibration * The serial receive pins, RXP/RXN, have TERMination resistor * that is required to be calibrated. */ cmu_rd(ctx, cmu_type, CMU_REG17, &val); val = CMU_REG17_PVT_CODE_R2A_SET(val, 0x12); val = CMU_REG17_RESERVED_7_SET(val, 0x0); cmu_wr(ctx, cmu_type, CMU_REG17, val); cmu_toggle1to0(ctx, cmu_type, CMU_REG17, CMU_REG17_PVT_TERM_MAN_ENA_MASK); /* * The serial transmit pins, TXP/TXN, have Pull-UP and Pull-DOWN * resistors that are required to the calibrated. * Configure the pull DOWN calibration */ cmu_rd(ctx, cmu_type, CMU_REG17, &val); val = CMU_REG17_PVT_CODE_R2A_SET(val, 0x29); val = CMU_REG17_RESERVED_7_SET(val, 0x0); cmu_wr(ctx, cmu_type, CMU_REG17, val); cmu_toggle1to0(ctx, cmu_type, CMU_REG16, CMU_REG16_PVT_DN_MAN_ENA_MASK); /* Configure the pull UP calibration */ cmu_rd(ctx, cmu_type, CMU_REG17, &val); val = CMU_REG17_PVT_CODE_R2A_SET(val, 0x28); val = CMU_REG17_RESERVED_7_SET(val, 0x0); cmu_wr(ctx, cmu_type, CMU_REG17, val); cmu_toggle1to0(ctx, cmu_type, CMU_REG16, CMU_REG16_PVT_UP_MAN_ENA_MASK); skip_manual_cal: /* Poll the PLL calibration completion status for at least 1 ms */ loop = 100; do { cmu_rd(ctx, cmu_type, CMU_REG7, &val); if (CMU_REG7_PLL_CALIB_DONE_RD(val)) break; /* * As per PHY design spec, PLL calibration status requires * a minimum of 10us to be updated. */ usleep_range(10, 100); } while (--loop > 0); cmu_rd(ctx, cmu_type, CMU_REG7, &val); dev_dbg(ctx->dev, "PLL calibration %s\n", CMU_REG7_PLL_CALIB_DONE_RD(val) ? "done" : "failed"); if (CMU_REG7_VCO_CAL_FAIL_RD(val)) { dev_err(ctx->dev, "PLL calibration failed due to VCO failure\n"); return -1; } dev_dbg(ctx->dev, "PLL calibration successful\n"); cmu_rd(ctx, cmu_type, CMU_REG15, &val); dev_dbg(ctx->dev, "PHY Tx is %sready\n", val & 0x300 ? "" : "not "); return 0; } static void xgene_phy_pdwn_force_vco(struct xgene_phy_ctx *ctx, enum cmu_type_t cmu_type, enum clk_type_t clk_type) { u32 val; dev_dbg(ctx->dev, "Reset VCO and re-start again\n"); if (cmu_type == PHY_CMU) { cmu_rd(ctx, cmu_type, CMU_REG16, &val); val = CMU_REG16_VCOCAL_WAIT_BTW_CODE_SET(val, 0x7); cmu_wr(ctx, cmu_type, CMU_REG16, val); } cmu_toggle1to0(ctx, cmu_type, CMU_REG0, CMU_REG0_PDOWN_MASK); cmu_toggle1to0(ctx, cmu_type, CMU_REG32, CMU_REG32_FORCE_VCOCAL_START_MASK); } static int xgene_phy_hw_init_sata(struct xgene_phy_ctx *ctx, enum clk_type_t clk_type, int ssc_enable) { void __iomem *sds_base = ctx->sds_base; u32 val; int i; /* Configure the PHY for operation */ dev_dbg(ctx->dev, "Reset PHY\n"); /* Place PHY into reset */ writel(0x0, sds_base + SATA_ENET_SDS_RST_CTL); val = readl(sds_base + SATA_ENET_SDS_RST_CTL); /* Force a barrier */ /* Release PHY lane from reset (active high) */ writel(0x20, sds_base + SATA_ENET_SDS_RST_CTL); readl(sds_base + SATA_ENET_SDS_RST_CTL); /* Force a barrier */ /* Release all PHY module out of reset except PHY main reset */ writel(0xde, sds_base + SATA_ENET_SDS_RST_CTL); readl(sds_base + SATA_ENET_SDS_RST_CTL); /* Force a barrier */ /* Set the operation speed */ val = readl(sds_base + SATA_ENET_SDS_CTL1); val = CFG_I_SPD_SEL_CDR_OVR1_SET(val, ctx->sata_param.txspeed[ctx->sata_param.speed[0]]); writel(val, sds_base + SATA_ENET_SDS_CTL1); dev_dbg(ctx->dev, "Set the customer pin mode to SATA\n"); val = readl(sds_base + SATA_ENET_SDS_CTL0); val = REGSPEC_CFG_I_CUSTOMER_PIN_MODE0_SET(val, 0x4421); writel(val, sds_base + SATA_ENET_SDS_CTL0); /* Configure the clock macro unit (CMU) clock type */ xgene_phy_cfg_cmu_clk_type(ctx, PHY_CMU, clk_type); /* Configure the clock macro */ xgene_phy_sata_cfg_cmu_core(ctx, PHY_CMU, clk_type); /* Enable SSC if enabled */ if (ssc_enable) xgene_phy_ssc_enable(ctx, PHY_CMU); /* Configure PHY lanes */ xgene_phy_sata_cfg_lanes(ctx); /* Set Rx/Tx 20-bit */ val = readl(sds_base + SATA_ENET_SDS_PCS_CTL0); val = REGSPEC_CFG_I_RX_WORDMODE0_SET(val, 0x3); val = REGSPEC_CFG_I_TX_WORDMODE0_SET(val, 0x3); writel(val, sds_base + SATA_ENET_SDS_PCS_CTL0); /* Start PLL calibration and try for three times */ i = 10; do { if (!xgene_phy_cal_rdy_chk(ctx, PHY_CMU, clk_type)) break; /* If failed, toggle the VCO power signal and start again */ xgene_phy_pdwn_force_vco(ctx, PHY_CMU, clk_type); } while (--i > 0); /* Even on failure, allow to continue any way */ if (i <= 0) dev_err(ctx->dev, "PLL calibration failed\n"); return 0; } static int xgene_phy_hw_initialize(struct xgene_phy_ctx *ctx, enum clk_type_t clk_type, int ssc_enable) { int rc; dev_dbg(ctx->dev, "PHY init clk type %d\n", clk_type); if (ctx->mode == MODE_SATA) { rc = xgene_phy_hw_init_sata(ctx, clk_type, ssc_enable); if (rc) return rc; } else { dev_err(ctx->dev, "Un-supported customer pin mode %d\n", ctx->mode); return -ENODEV; } return 0; } /* * Receiver Offset Calibration: * * Calibrate the receiver signal path offset in two steps - summar and * latch calibrations */ static void xgene_phy_force_lat_summer_cal(struct xgene_phy_ctx *ctx, int lane) { int i; struct { u32 reg; u32 val; } serdes_reg[] = { {RXTX_REG38, 0x0}, {RXTX_REG39, 0xff00}, {RXTX_REG40, 0xffff}, {RXTX_REG41, 0xffff}, {RXTX_REG42, 0xffff}, {RXTX_REG43, 0xffff}, {RXTX_REG44, 0xffff}, {RXTX_REG45, 0xffff}, {RXTX_REG46, 0xffff}, {RXTX_REG47, 0xfffc}, {RXTX_REG48, 0x0}, {RXTX_REG49, 0x0}, {RXTX_REG50, 0x0}, {RXTX_REG51, 0x0}, {RXTX_REG52, 0x0}, {RXTX_REG53, 0x0}, {RXTX_REG54, 0x0}, {RXTX_REG55, 0x0}, }; /* Start SUMMER calibration */ serdes_setbits(ctx, lane, RXTX_REG127, RXTX_REG127_FORCE_SUM_CAL_START_MASK); /* * As per PHY design spec, the Summer calibration requires a minimum * of 100us to complete. */ usleep_range(100, 500); serdes_clrbits(ctx, lane, RXTX_REG127, RXTX_REG127_FORCE_SUM_CAL_START_MASK); /* * As per PHY design spec, the auto calibration requires a minimum * of 100us to complete. */ usleep_range(100, 500); /* Start latch calibration */ serdes_setbits(ctx, lane, RXTX_REG127, RXTX_REG127_FORCE_LAT_CAL_START_MASK); /* * As per PHY design spec, the latch calibration requires a minimum * of 100us to complete. */ usleep_range(100, 500); serdes_clrbits(ctx, lane, RXTX_REG127, RXTX_REG127_FORCE_LAT_CAL_START_MASK); /* Configure the PHY lane for calibration */ serdes_wr(ctx, lane, RXTX_REG28, 0x7); serdes_wr(ctx, lane, RXTX_REG31, 0x7e00); serdes_clrbits(ctx, lane, RXTX_REG4, RXTX_REG4_TX_LOOPBACK_BUF_EN_MASK); serdes_clrbits(ctx, lane, RXTX_REG7, RXTX_REG7_LOOP_BACK_ENA_CTLE_MASK); for (i = 0; i < ARRAY_SIZE(serdes_reg); i++) serdes_wr(ctx, lane, serdes_reg[i].reg, serdes_reg[i].val); } static void xgene_phy_reset_rxd(struct xgene_phy_ctx *ctx, int lane) { /* Reset digital Rx */ serdes_clrbits(ctx, lane, RXTX_REG7, RXTX_REG7_RESETB_RXD_MASK); /* As per PHY design spec, the reset requires a minimum of 100us. */ usleep_range(100, 150); serdes_setbits(ctx, lane, RXTX_REG7, RXTX_REG7_RESETB_RXD_MASK); } static int xgene_phy_get_avg(int accum, int samples) { return (accum + (samples / 2)) / samples; } static void xgene_phy_gen_avg_val(struct xgene_phy_ctx *ctx, int lane) { int max_loop = 10; int avg_loop = 0; int lat_do = 0, lat_xo = 0, lat_eo = 0, lat_so = 0; int lat_de = 0, lat_xe = 0, lat_ee = 0, lat_se = 0; int sum_cal = 0; int lat_do_itr, lat_xo_itr, lat_eo_itr, lat_so_itr; int lat_de_itr, lat_xe_itr, lat_ee_itr, lat_se_itr; int sum_cal_itr; int fail_even; int fail_odd; u32 val; dev_dbg(ctx->dev, "Generating avg calibration value for lane %d\n", lane); /* Enable RX Hi-Z termination */ serdes_setbits(ctx, lane, RXTX_REG12, RXTX_REG12_RX_DET_TERM_ENABLE_MASK); /* Turn off DFE */ serdes_wr(ctx, lane, RXTX_REG28, 0x0000); /* DFE Presets to zero */ serdes_wr(ctx, lane, RXTX_REG31, 0x0000); /* * Receiver Offset Calibration: * Calibrate the receiver signal path offset in two steps - summar * and latch calibration. * Runs the "Receiver Offset Calibration multiple times to determine * the average value to use. */ while (avg_loop < max_loop) { /* Start the calibration */ xgene_phy_force_lat_summer_cal(ctx, lane); serdes_rd(ctx, lane, RXTX_REG21, &val); lat_do_itr = RXTX_REG21_DO_LATCH_CALOUT_RD(val); lat_xo_itr = RXTX_REG21_XO_LATCH_CALOUT_RD(val); fail_odd = RXTX_REG21_LATCH_CAL_FAIL_ODD_RD(val); serdes_rd(ctx, lane, RXTX_REG22, &val); lat_eo_itr = RXTX_REG22_EO_LATCH_CALOUT_RD(val); lat_so_itr = RXTX_REG22_SO_LATCH_CALOUT_RD(val); fail_even = RXTX_REG22_LATCH_CAL_FAIL_EVEN_RD(val); serdes_rd(ctx, lane, RXTX_REG23, &val); lat_de_itr = RXTX_REG23_DE_LATCH_CALOUT_RD(val); lat_xe_itr = RXTX_REG23_XE_LATCH_CALOUT_RD(val); serdes_rd(ctx, lane, RXTX_REG24, &val); lat_ee_itr = RXTX_REG24_EE_LATCH_CALOUT_RD(val); lat_se_itr = RXTX_REG24_SE_LATCH_CALOUT_RD(val); serdes_rd(ctx, lane, RXTX_REG121, &val); sum_cal_itr = RXTX_REG121_SUMOS_CAL_CODE_RD(val); /* Check for failure. If passed, sum them for averaging */ if ((fail_even == 0 || fail_even == 1) && (fail_odd == 0 || fail_odd == 1)) { lat_do += lat_do_itr; lat_xo += lat_xo_itr; lat_eo += lat_eo_itr; lat_so += lat_so_itr; lat_de += lat_de_itr; lat_xe += lat_xe_itr; lat_ee += lat_ee_itr; lat_se += lat_se_itr; sum_cal += sum_cal_itr; dev_dbg(ctx->dev, "Iteration %d:\n", avg_loop); dev_dbg(ctx->dev, "DO 0x%x XO 0x%x EO 0x%x SO 0x%x\n", lat_do_itr, lat_xo_itr, lat_eo_itr, lat_so_itr); dev_dbg(ctx->dev, "DE 0x%x XE 0x%x EE 0x%x SE 0x%x\n", lat_de_itr, lat_xe_itr, lat_ee_itr, lat_se_itr); dev_dbg(ctx->dev, "SUM 0x%x\n", sum_cal_itr); ++avg_loop; } else { dev_err(ctx->dev, "Receiver calibration failed at %d loop\n", avg_loop); } xgene_phy_reset_rxd(ctx, lane); } /* Update latch manual calibration with average value */ serdes_rd(ctx, lane, RXTX_REG127, &val); val = RXTX_REG127_DO_LATCH_MANCAL_SET(val, xgene_phy_get_avg(lat_do, max_loop)); val = RXTX_REG127_XO_LATCH_MANCAL_SET(val, xgene_phy_get_avg(lat_xo, max_loop)); serdes_wr(ctx, lane, RXTX_REG127, val); serdes_rd(ctx, lane, RXTX_REG128, &val); val = RXTX_REG128_EO_LATCH_MANCAL_SET(val, xgene_phy_get_avg(lat_eo, max_loop)); val = RXTX_REG128_SO_LATCH_MANCAL_SET(val, xgene_phy_get_avg(lat_so, max_loop)); serdes_wr(ctx, lane, RXTX_REG128, val); serdes_rd(ctx, lane, RXTX_REG129, &val); val = RXTX_REG129_DE_LATCH_MANCAL_SET(val, xgene_phy_get_avg(lat_de, max_loop)); val = RXTX_REG129_XE_LATCH_MANCAL_SET(val, xgene_phy_get_avg(lat_xe, max_loop)); serdes_wr(ctx, lane, RXTX_REG129, val); serdes_rd(ctx, lane, RXTX_REG130, &val); val = RXTX_REG130_EE_LATCH_MANCAL_SET(val, xgene_phy_get_avg(lat_ee, max_loop)); val = RXTX_REG130_SE_LATCH_MANCAL_SET(val, xgene_phy_get_avg(lat_se, max_loop)); serdes_wr(ctx, lane, RXTX_REG130, val); /* Update SUMMER calibration with average value */ serdes_rd(ctx, lane, RXTX_REG14, &val); val = RXTX_REG14_CLTE_LATCAL_MAN_PROG_SET(val, xgene_phy_get_avg(sum_cal, max_loop)); serdes_wr(ctx, lane, RXTX_REG14, val); dev_dbg(ctx->dev, "Average Value:\n"); dev_dbg(ctx->dev, "DO 0x%x XO 0x%x EO 0x%x SO 0x%x\n", xgene_phy_get_avg(lat_do, max_loop), xgene_phy_get_avg(lat_xo, max_loop), xgene_phy_get_avg(lat_eo, max_loop), xgene_phy_get_avg(lat_so, max_loop)); dev_dbg(ctx->dev, "DE 0x%x XE 0x%x EE 0x%x SE 0x%x\n", xgene_phy_get_avg(lat_de, max_loop), xgene_phy_get_avg(lat_xe, max_loop), xgene_phy_get_avg(lat_ee, max_loop), xgene_phy_get_avg(lat_se, max_loop)); dev_dbg(ctx->dev, "SUM 0x%x\n", xgene_phy_get_avg(sum_cal, max_loop)); serdes_rd(ctx, lane, RXTX_REG14, &val); val = RXTX_REG14_CTLE_LATCAL_MAN_ENA_SET(val, 0x1); serdes_wr(ctx, lane, RXTX_REG14, val); dev_dbg(ctx->dev, "Enable Manual Summer calibration\n"); serdes_rd(ctx, lane, RXTX_REG127, &val); val = RXTX_REG127_LATCH_MAN_CAL_ENA_SET(val, 0x1); dev_dbg(ctx->dev, "Enable Manual Latch calibration\n"); serdes_wr(ctx, lane, RXTX_REG127, val); /* Disable RX Hi-Z termination */ serdes_rd(ctx, lane, RXTX_REG12, &val); val = RXTX_REG12_RX_DET_TERM_ENABLE_SET(val, 0); serdes_wr(ctx, lane, RXTX_REG12, val); /* Turn on DFE */ serdes_wr(ctx, lane, RXTX_REG28, 0x0007); /* Set DFE preset */ serdes_wr(ctx, lane, RXTX_REG31, 0x7e00); } static int xgene_phy_hw_init(struct phy *phy) { struct xgene_phy_ctx *ctx = phy_get_drvdata(phy); int rc; int i; rc = xgene_phy_hw_initialize(ctx, CLK_EXT_DIFF, SSC_DISABLE); if (rc) { dev_err(ctx->dev, "PHY initialize failed %d\n", rc); return rc; } /* Setup clock properly after PHY configuration */ if (!IS_ERR(ctx->clk)) { /* HW requires an toggle of the clock */ clk_prepare_enable(ctx->clk); clk_disable_unprepare(ctx->clk); clk_prepare_enable(ctx->clk); } /* Compute average value */ for (i = 0; i < MAX_LANE; i++) xgene_phy_gen_avg_val(ctx, i); dev_dbg(ctx->dev, "PHY initialized\n"); return 0; } static const struct phy_ops xgene_phy_ops = { .init = xgene_phy_hw_init, .owner = THIS_MODULE, }; static struct phy *xgene_phy_xlate(struct device *dev, struct of_phandle_args *args) { struct xgene_phy_ctx *ctx = dev_get_drvdata(dev); if (args->args_count <= 0) return ERR_PTR(-EINVAL); if (args->args[0] < MODE_SATA || args->args[0] >= MODE_MAX) return ERR_PTR(-EINVAL); ctx->mode = args->args[0]; return ctx->phy; } static void xgene_phy_get_param(struct platform_device *pdev, const char *name, u32 *buffer, int count, u32 *default_val, u32 conv_factor) { int i; if (!of_property_read_u32_array(pdev->dev.of_node, name, buffer, count)) { for (i = 0; i < count; i++) buffer[i] /= conv_factor; return; } /* Does not exist, load default */ for (i = 0; i < count; i++) buffer[i] = default_val[i % 3]; } static int xgene_phy_probe(struct platform_device *pdev) { struct phy_provider *phy_provider; struct xgene_phy_ctx *ctx; struct resource *res; u32 default_spd[] = DEFAULT_SATA_SPD_SEL; u32 default_txboost_gain[] = DEFAULT_SATA_TXBOOST_GAIN; u32 default_txeye_direction[] = DEFAULT_SATA_TXEYEDIRECTION; u32 default_txeye_tuning[] = DEFAULT_SATA_TXEYETUNING; u32 default_txamp[] = DEFAULT_SATA_TXAMP; u32 default_txcn1[] = DEFAULT_SATA_TXCN1; u32 default_txcn2[] = DEFAULT_SATA_TXCN2; u32 default_txcp1[] = DEFAULT_SATA_TXCP1; int i; ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; ctx->dev = &pdev->dev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ctx->sds_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(ctx->sds_base)) return PTR_ERR(ctx->sds_base); /* Retrieve optional clock */ ctx->clk = clk_get(&pdev->dev, NULL); /* Load override paramaters */ xgene_phy_get_param(pdev, "apm,tx-eye-tuning", ctx->sata_param.txeyetuning, 6, default_txeye_tuning, 1); xgene_phy_get_param(pdev, "apm,tx-eye-direction", ctx->sata_param.txeyedirection, 6, default_txeye_direction, 1); xgene_phy_get_param(pdev, "apm,tx-boost-gain", ctx->sata_param.txboostgain, 6, default_txboost_gain, 1); xgene_phy_get_param(pdev, "apm,tx-amplitude", ctx->sata_param.txamplitude, 6, default_txamp, 13300); xgene_phy_get_param(pdev, "apm,tx-pre-cursor1", ctx->sata_param.txprecursor_cn1, 6, default_txcn1, 18200); xgene_phy_get_param(pdev, "apm,tx-pre-cursor2", ctx->sata_param.txprecursor_cn2, 6, default_txcn2, 18200); xgene_phy_get_param(pdev, "apm,tx-post-cursor", ctx->sata_param.txpostcursor_cp1, 6, default_txcp1, 18200); xgene_phy_get_param(pdev, "apm,tx-speed", ctx->sata_param.txspeed, 3, default_spd, 1); for (i = 0; i < MAX_LANE; i++) ctx->sata_param.speed[i] = 2; /* Default to Gen3 */ platform_set_drvdata(pdev, ctx); ctx->phy = devm_phy_create(ctx->dev, NULL, &xgene_phy_ops); if (IS_ERR(ctx->phy)) { dev_dbg(&pdev->dev, "Failed to create PHY\n"); return PTR_ERR(ctx->phy); } phy_set_drvdata(ctx->phy, ctx); phy_provider = devm_of_phy_provider_register(ctx->dev, xgene_phy_xlate); return PTR_ERR_OR_ZERO(phy_provider); } static const struct of_device_id xgene_phy_of_match[] = { {.compatible = "apm,xgene-phy",}, {}, }; MODULE_DEVICE_TABLE(of, xgene_phy_of_match); static struct platform_driver xgene_phy_driver = { .probe = xgene_phy_probe, .driver = { .name = "xgene-phy", .of_match_table = xgene_phy_of_match, }, }; module_platform_driver(xgene_phy_driver); MODULE_DESCRIPTION("APM X-Gene Multi-Purpose PHY driver"); MODULE_AUTHOR("Loc Ho <lho@apm.com>"); MODULE_LICENSE("GPL v2"); MODULE_VERSION("0.1");
gpl-2.0
mason-hock/CHIP-linux-libre
CHIP-linux-libre/drivers/gpu/drm/amd/amdgpu/amdgpu_ioc32.c
1150
1661
/** * \file amdgpu_ioc32.c * * 32-bit ioctl compatibility routines for the AMDGPU DRM. * * \author Paul Mackerras <paulus@samba.org> * * Copyright (C) Paul Mackerras 2005 * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include <linux/compat.h> #include <drm/drmP.h> #include <drm/amdgpu_drm.h> #include "amdgpu_drv.h" long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { unsigned int nr = DRM_IOCTL_NR(cmd); int ret; if (nr < DRM_COMMAND_BASE) return drm_compat_ioctl(filp, cmd, arg); ret = amdgpu_drm_ioctl(filp, cmd, arg); return ret; }
gpl-2.0
ilikenwf/android_kernel_ms_surfacepro3
drivers/pinctrl/pinctrl-adi2-bf60x.c
1150
15325
/* * Pinctrl Driver for ADI GPIO2 controller * * Copyright 2007-2013 Analog Devices Inc. * * Licensed under the GPLv2 or later */ #include <asm/portmux.h> #include "pinctrl-adi2.h" static const struct pinctrl_pin_desc adi_pads[] = { PINCTRL_PIN(0, "PA0"), PINCTRL_PIN(1, "PA1"), PINCTRL_PIN(2, "PA2"), PINCTRL_PIN(3, "PG3"), PINCTRL_PIN(4, "PA4"), PINCTRL_PIN(5, "PA5"), PINCTRL_PIN(6, "PA6"), PINCTRL_PIN(7, "PA7"), PINCTRL_PIN(8, "PA8"), PINCTRL_PIN(9, "PA9"), PINCTRL_PIN(10, "PA10"), PINCTRL_PIN(11, "PA11"), PINCTRL_PIN(12, "PA12"), PINCTRL_PIN(13, "PA13"), PINCTRL_PIN(14, "PA14"), PINCTRL_PIN(15, "PA15"), PINCTRL_PIN(16, "PB0"), PINCTRL_PIN(17, "PB1"), PINCTRL_PIN(18, "PB2"), PINCTRL_PIN(19, "PB3"), PINCTRL_PIN(20, "PB4"), PINCTRL_PIN(21, "PB5"), PINCTRL_PIN(22, "PB6"), PINCTRL_PIN(23, "PB7"), PINCTRL_PIN(24, "PB8"), PINCTRL_PIN(25, "PB9"), PINCTRL_PIN(26, "PB10"), PINCTRL_PIN(27, "PB11"), PINCTRL_PIN(28, "PB12"), PINCTRL_PIN(29, "PB13"), PINCTRL_PIN(30, "PB14"), PINCTRL_PIN(31, "PB15"), PINCTRL_PIN(32, "PC0"), PINCTRL_PIN(33, "PC1"), PINCTRL_PIN(34, "PC2"), PINCTRL_PIN(35, "PC3"), PINCTRL_PIN(36, "PC4"), PINCTRL_PIN(37, "PC5"), PINCTRL_PIN(38, "PC6"), PINCTRL_PIN(39, "PC7"), PINCTRL_PIN(40, "PC8"), PINCTRL_PIN(41, "PC9"), PINCTRL_PIN(42, "PC10"), PINCTRL_PIN(43, "PC11"), PINCTRL_PIN(44, "PC12"), PINCTRL_PIN(45, "PC13"), PINCTRL_PIN(46, "PC14"), PINCTRL_PIN(47, "PC15"), PINCTRL_PIN(48, "PD0"), PINCTRL_PIN(49, "PD1"), PINCTRL_PIN(50, "PD2"), PINCTRL_PIN(51, "PD3"), PINCTRL_PIN(52, "PD4"), PINCTRL_PIN(53, "PD5"), PINCTRL_PIN(54, "PD6"), PINCTRL_PIN(55, "PD7"), PINCTRL_PIN(56, "PD8"), PINCTRL_PIN(57, "PD9"), PINCTRL_PIN(58, "PD10"), PINCTRL_PIN(59, "PD11"), PINCTRL_PIN(60, "PD12"), PINCTRL_PIN(61, "PD13"), PINCTRL_PIN(62, "PD14"), PINCTRL_PIN(63, "PD15"), PINCTRL_PIN(64, "PE0"), PINCTRL_PIN(65, "PE1"), PINCTRL_PIN(66, "PE2"), PINCTRL_PIN(67, "PE3"), PINCTRL_PIN(68, "PE4"), PINCTRL_PIN(69, "PE5"), PINCTRL_PIN(70, "PE6"), PINCTRL_PIN(71, "PE7"), PINCTRL_PIN(72, "PE8"), PINCTRL_PIN(73, "PE9"), PINCTRL_PIN(74, "PE10"), PINCTRL_PIN(75, "PE11"), PINCTRL_PIN(76, "PE12"), PINCTRL_PIN(77, "PE13"), PINCTRL_PIN(78, "PE14"), PINCTRL_PIN(79, "PE15"), PINCTRL_PIN(80, "PF0"), PINCTRL_PIN(81, "PF1"), PINCTRL_PIN(82, "PF2"), PINCTRL_PIN(83, "PF3"), PINCTRL_PIN(84, "PF4"), PINCTRL_PIN(85, "PF5"), PINCTRL_PIN(86, "PF6"), PINCTRL_PIN(87, "PF7"), PINCTRL_PIN(88, "PF8"), PINCTRL_PIN(89, "PF9"), PINCTRL_PIN(90, "PF10"), PINCTRL_PIN(91, "PF11"), PINCTRL_PIN(92, "PF12"), PINCTRL_PIN(93, "PF13"), PINCTRL_PIN(94, "PF14"), PINCTRL_PIN(95, "PF15"), PINCTRL_PIN(96, "PG0"), PINCTRL_PIN(97, "PG1"), PINCTRL_PIN(98, "PG2"), PINCTRL_PIN(99, "PG3"), PINCTRL_PIN(100, "PG4"), PINCTRL_PIN(101, "PG5"), PINCTRL_PIN(102, "PG6"), PINCTRL_PIN(103, "PG7"), PINCTRL_PIN(104, "PG8"), PINCTRL_PIN(105, "PG9"), PINCTRL_PIN(106, "PG10"), PINCTRL_PIN(107, "PG11"), PINCTRL_PIN(108, "PG12"), PINCTRL_PIN(109, "PG13"), PINCTRL_PIN(110, "PG14"), PINCTRL_PIN(111, "PG15"), }; static const unsigned uart0_pins[] = { GPIO_PD7, GPIO_PD8, }; static const unsigned uart0_ctsrts_pins[] = { GPIO_PD9, GPIO_PD10, }; static const unsigned uart1_pins[] = { GPIO_PG15, GPIO_PG14, }; static const unsigned uart1_ctsrts_pins[] = { GPIO_PG10, GPIO_PG13, }; static const unsigned rsi0_pins[] = { GPIO_PG3, GPIO_PG2, GPIO_PG0, GPIO_PE15, GPIO_PG5, GPIO_PG6, }; static const unsigned eth0_pins[] = { GPIO_PC6, GPIO_PC7, GPIO_PC2, GPIO_PC0, GPIO_PC3, GPIO_PC1, GPIO_PB13, GPIO_PD6, GPIO_PC5, GPIO_PC4, GPIO_PB14, GPIO_PB15, }; static const unsigned eth1_pins[] = { GPIO_PE10, GPIO_PE11, GPIO_PG3, GPIO_PG0, GPIO_PG2, GPIO_PE15, GPIO_PG5, GPIO_PE12, GPIO_PE13, GPIO_PE14, GPIO_PG6, GPIO_PC9, }; static const unsigned spi0_pins[] = { GPIO_PD4, GPIO_PD2, GPIO_PD3, }; static const unsigned spi1_pins[] = { GPIO_PD5, GPIO_PD14, GPIO_PD13, }; static const unsigned twi0_pins[] = { }; static const unsigned twi1_pins[] = { }; static const unsigned rotary_pins[] = { GPIO_PG7, GPIO_PG11, GPIO_PG12, }; static const unsigned can0_pins[] = { GPIO_PG1, GPIO_PG4, }; static const unsigned smc0_pins[] = { GPIO_PA0, GPIO_PA1, GPIO_PA2, GPIO_PA3, GPIO_PA4, GPIO_PA5, GPIO_PA6, GPIO_PA7, GPIO_PA8, GPIO_PA9, GPIO_PB2, GPIO_PA10, GPIO_PA11, GPIO_PB3, GPIO_PA12, GPIO_PA13, GPIO_PA14, GPIO_PA15, GPIO_PB6, GPIO_PB7, GPIO_PB8, GPIO_PB10, GPIO_PB11, GPIO_PB0, }; static const unsigned sport0_pins[] = { GPIO_PB5, GPIO_PB4, GPIO_PB9, GPIO_PB8, GPIO_PB7, GPIO_PB11, }; static const unsigned sport1_pins[] = { GPIO_PE2, GPIO_PE5, GPIO_PD15, GPIO_PE4, GPIO_PE3, GPIO_PE1, }; static const unsigned sport2_pins[] = { GPIO_PG4, GPIO_PG1, GPIO_PG9, GPIO_PG10, GPIO_PG7, GPIO_PB12, }; static const unsigned ppi0_8b_pins[] = { GPIO_PF0, GPIO_PF1, GPIO_PF2, GPIO_PF3, GPIO_PF4, GPIO_PF5, GPIO_PF6, GPIO_PF7, GPIO_PF13, GPIO_PF14, GPIO_PF15, GPIO_PE6, GPIO_PE7, GPIO_PE8, GPIO_PE9, }; static const unsigned ppi0_16b_pins[] = { GPIO_PF0, GPIO_PF1, GPIO_PF2, GPIO_PF3, GPIO_PF4, GPIO_PF5, GPIO_PF6, GPIO_PF7, GPIO_PF9, GPIO_PF10, GPIO_PF11, GPIO_PF12, GPIO_PF13, GPIO_PF14, GPIO_PF15, GPIO_PE6, GPIO_PE7, GPIO_PE8, GPIO_PE9, }; static const unsigned ppi0_24b_pins[] = { GPIO_PF0, GPIO_PF1, GPIO_PF2, GPIO_PF3, GPIO_PF4, GPIO_PF5, GPIO_PF6, GPIO_PF7, GPIO_PF8, GPIO_PF9, GPIO_PF10, GPIO_PF11, GPIO_PF12, GPIO_PF13, GPIO_PF14, GPIO_PF15, GPIO_PE0, GPIO_PE1, GPIO_PE2, GPIO_PE3, GPIO_PE4, GPIO_PE5, GPIO_PE6, GPIO_PE7, GPIO_PE8, GPIO_PE9, GPIO_PD12, GPIO_PD15, }; static const unsigned ppi1_8b_pins[] = { GPIO_PC0, GPIO_PC1, GPIO_PC2, GPIO_PC3, GPIO_PC4, GPIO_PC5, GPIO_PC6, GPIO_PC7, GPIO_PC8, GPIO_PB13, GPIO_PB14, GPIO_PB15, GPIO_PD6, }; static const unsigned ppi1_16b_pins[] = { GPIO_PC0, GPIO_PC1, GPIO_PC2, GPIO_PC3, GPIO_PC4, GPIO_PC5, GPIO_PC6, GPIO_PC7, GPIO_PC9, GPIO_PC10, GPIO_PC11, GPIO_PC12, GPIO_PC13, GPIO_PC14, GPIO_PC15, GPIO_PB13, GPIO_PB14, GPIO_PB15, GPIO_PD6, }; static const unsigned ppi2_8b_pins[] = { GPIO_PA0, GPIO_PA1, GPIO_PA2, GPIO_PA3, GPIO_PA4, GPIO_PA5, GPIO_PA6, GPIO_PA7, GPIO_PB0, GPIO_PB1, GPIO_PB2, GPIO_PB3, }; static const unsigned ppi2_16b_pins[] = { GPIO_PA0, GPIO_PA1, GPIO_PA2, GPIO_PA3, GPIO_PA4, GPIO_PA5, GPIO_PA6, GPIO_PA7, GPIO_PA8, GPIO_PA9, GPIO_PA10, GPIO_PA11, GPIO_PA12, GPIO_PA13, GPIO_PA14, GPIO_PA15, GPIO_PB0, GPIO_PB1, GPIO_PB2, }; static const unsigned lp0_pins[] = { GPIO_PB0, GPIO_PB1, GPIO_PA0, GPIO_PA1, GPIO_PA2, GPIO_PA3, GPIO_PA4, GPIO_PA5, GPIO_PA6, GPIO_PA7, }; static const unsigned lp1_pins[] = { GPIO_PB3, GPIO_PB2, GPIO_PA8, GPIO_PA9, GPIO_PA10, GPIO_PA11, GPIO_PA12, GPIO_PA13, GPIO_PA14, GPIO_PA15, }; static const unsigned lp2_pins[] = { GPIO_PE6, GPIO_PE7, GPIO_PF0, GPIO_PF1, GPIO_PF2, GPIO_PF3, GPIO_PF4, GPIO_PF5, GPIO_PF6, GPIO_PF7, }; static const unsigned lp3_pins[] = { GPIO_PE9, GPIO_PE8, GPIO_PF8, GPIO_PF9, GPIO_PF10, GPIO_PF11, GPIO_PF12, GPIO_PF13, GPIO_PF14, GPIO_PF15, }; static const unsigned short uart0_mux[] = { P_UART0_TX, P_UART0_RX, 0 }; static const unsigned short uart0_ctsrts_mux[] = { P_UART0_RTS, P_UART0_CTS, 0 }; static const unsigned short uart1_mux[] = { P_UART1_TX, P_UART1_RX, 0 }; static const unsigned short uart1_ctsrts_mux[] = { P_UART1_RTS, P_UART1_CTS, 0 }; static const unsigned short rsi0_mux[] = { P_RSI_DATA0, P_RSI_DATA1, P_RSI_DATA2, P_RSI_DATA3, P_RSI_CMD, P_RSI_CLK, 0 }; static const unsigned short eth0_mux[] = P_RMII0; static const unsigned short eth1_mux[] = P_RMII1; static const unsigned short spi0_mux[] = { P_SPI0_SCK, P_SPI0_MISO, P_SPI0_MOSI, 0 }; static const unsigned short spi1_mux[] = { P_SPI1_SCK, P_SPI1_MISO, P_SPI1_MOSI, 0 }; static const unsigned short twi0_mux[] = { P_TWI0_SCL, P_TWI0_SDA, 0 }; static const unsigned short twi1_mux[] = { P_TWI1_SCL, P_TWI1_SDA, 0 }; static const unsigned short rotary_mux[] = { P_CNT_CUD, P_CNT_CDG, P_CNT_CZM, 0 }; static const unsigned short sport0_mux[] = { P_SPORT0_ACLK, P_SPORT0_AFS, P_SPORT0_AD0, P_SPORT0_BCLK, P_SPORT0_BFS, P_SPORT0_BD0, 0, }; static const unsigned short sport1_mux[] = { P_SPORT1_ACLK, P_SPORT1_AFS, P_SPORT1_AD0, P_SPORT1_BCLK, P_SPORT1_BFS, P_SPORT1_BD0, 0, }; static const unsigned short sport2_mux[] = { P_SPORT2_ACLK, P_SPORT2_AFS, P_SPORT2_AD0, P_SPORT2_BCLK, P_SPORT2_BFS, P_SPORT2_BD0, 0, }; static const unsigned short can0_mux[] = { P_CAN0_RX, P_CAN0_TX, 0 }; static const unsigned short smc0_mux[] = { P_A3, P_A4, P_A5, P_A6, P_A7, P_A8, P_A9, P_A10, P_A11, P_A12, P_A13, P_A14, P_A15, P_A16, P_A17, P_A18, P_A19, P_A20, P_A21, P_A22, P_A23, P_A24, P_A25, P_NORCK, 0, }; static const unsigned short ppi0_8b_mux[] = { P_PPI0_D0, P_PPI0_D1, P_PPI0_D2, P_PPI0_D3, P_PPI0_D4, P_PPI0_D5, P_PPI0_D6, P_PPI0_D7, P_PPI0_CLK, P_PPI0_FS1, P_PPI0_FS2, 0, }; static const unsigned short ppi0_16b_mux[] = { P_PPI0_D0, P_PPI0_D1, P_PPI0_D2, P_PPI0_D3, P_PPI0_D4, P_PPI0_D5, P_PPI0_D6, P_PPI0_D7, P_PPI0_D8, P_PPI0_D9, P_PPI0_D10, P_PPI0_D11, P_PPI0_D12, P_PPI0_D13, P_PPI0_D14, P_PPI0_D15, P_PPI0_CLK, P_PPI0_FS1, P_PPI0_FS2, 0, }; static const unsigned short ppi0_24b_mux[] = { P_PPI0_D0, P_PPI0_D1, P_PPI0_D2, P_PPI0_D3, P_PPI0_D4, P_PPI0_D5, P_PPI0_D6, P_PPI0_D7, P_PPI0_D8, P_PPI0_D9, P_PPI0_D10, P_PPI0_D11, P_PPI0_D12, P_PPI0_D13, P_PPI0_D14, P_PPI0_D15, P_PPI0_D16, P_PPI0_D17, P_PPI0_D18, P_PPI0_D19, P_PPI0_D20, P_PPI0_D21, P_PPI0_D22, P_PPI0_D23, P_PPI0_CLK, P_PPI0_FS1, P_PPI0_FS2, 0, }; static const unsigned short ppi1_8b_mux[] = { P_PPI1_D0, P_PPI1_D1, P_PPI1_D2, P_PPI1_D3, P_PPI1_D4, P_PPI1_D5, P_PPI1_D6, P_PPI1_D7, P_PPI1_CLK, P_PPI1_FS1, P_PPI1_FS2, 0, }; static const unsigned short ppi1_16b_mux[] = { P_PPI1_D0, P_PPI1_D1, P_PPI1_D2, P_PPI1_D3, P_PPI1_D4, P_PPI1_D5, P_PPI1_D6, P_PPI1_D7, P_PPI1_D8, P_PPI1_D9, P_PPI1_D10, P_PPI1_D11, P_PPI1_D12, P_PPI1_D13, P_PPI1_D14, P_PPI1_D15, P_PPI1_CLK, P_PPI1_FS1, P_PPI1_FS2, 0, }; static const unsigned short ppi2_8b_mux[] = { P_PPI2_D0, P_PPI2_D1, P_PPI2_D2, P_PPI2_D3, P_PPI2_D4, P_PPI2_D5, P_PPI2_D6, P_PPI2_D7, P_PPI2_CLK, P_PPI2_FS1, P_PPI2_FS2, 0, }; static const unsigned short ppi2_16b_mux[] = { P_PPI2_D0, P_PPI2_D1, P_PPI2_D2, P_PPI2_D3, P_PPI2_D4, P_PPI2_D5, P_PPI2_D6, P_PPI2_D7, P_PPI2_D8, P_PPI2_D9, P_PPI2_D10, P_PPI2_D11, P_PPI2_D12, P_PPI2_D13, P_PPI2_D14, P_PPI2_D15, P_PPI2_CLK, P_PPI2_FS1, P_PPI2_FS2, 0, }; static const unsigned short lp0_mux[] = { P_LP0_CLK, P_LP0_ACK, P_LP0_D0, P_LP0_D1, P_LP0_D2, P_LP0_D3, P_LP0_D4, P_LP0_D5, P_LP0_D6, P_LP0_D7, 0 }; static const unsigned short lp1_mux[] = { P_LP1_CLK, P_LP1_ACK, P_LP1_D0, P_LP1_D1, P_LP1_D2, P_LP1_D3, P_LP1_D4, P_LP1_D5, P_LP1_D6, P_LP1_D7, 0 }; static const unsigned short lp2_mux[] = { P_LP2_CLK, P_LP2_ACK, P_LP2_D0, P_LP2_D1, P_LP2_D2, P_LP2_D3, P_LP2_D4, P_LP2_D5, P_LP2_D6, P_LP2_D7, 0 }; static const unsigned short lp3_mux[] = { P_LP3_CLK, P_LP3_ACK, P_LP3_D0, P_LP3_D1, P_LP3_D2, P_LP3_D3, P_LP3_D4, P_LP3_D5, P_LP3_D6, P_LP3_D7, 0 }; static const struct adi_pin_group adi_pin_groups[] = { ADI_PIN_GROUP("uart0grp", uart0_pins, uart0_mux), ADI_PIN_GROUP("uart0ctsrtsgrp", uart0_ctsrts_pins, uart0_ctsrts_mux), ADI_PIN_GROUP("uart1grp", uart1_pins, uart1_mux), ADI_PIN_GROUP("uart1ctsrtsgrp", uart1_ctsrts_pins, uart1_ctsrts_mux), ADI_PIN_GROUP("rsi0grp", rsi0_pins, rsi0_mux), ADI_PIN_GROUP("eth0grp", eth0_pins, eth0_mux), ADI_PIN_GROUP("eth1grp", eth1_pins, eth1_mux), ADI_PIN_GROUP("spi0grp", spi0_pins, spi0_mux), ADI_PIN_GROUP("spi1grp", spi1_pins, spi1_mux), ADI_PIN_GROUP("twi0grp", twi0_pins, twi0_mux), ADI_PIN_GROUP("twi1grp", twi1_pins, twi1_mux), ADI_PIN_GROUP("rotarygrp", rotary_pins, rotary_mux), ADI_PIN_GROUP("can0grp", can0_pins, can0_mux), ADI_PIN_GROUP("smc0grp", smc0_pins, smc0_mux), ADI_PIN_GROUP("sport0grp", sport0_pins, sport0_mux), ADI_PIN_GROUP("sport1grp", sport1_pins, sport1_mux), ADI_PIN_GROUP("sport2grp", sport2_pins, sport2_mux), ADI_PIN_GROUP("ppi0_8bgrp", ppi0_8b_pins, ppi0_8b_mux), ADI_PIN_GROUP("ppi0_16bgrp", ppi0_16b_pins, ppi0_16b_mux), ADI_PIN_GROUP("ppi0_24bgrp", ppi0_24b_pins, ppi0_24b_mux), ADI_PIN_GROUP("ppi1_8bgrp", ppi1_8b_pins, ppi1_8b_mux), ADI_PIN_GROUP("ppi1_16bgrp", ppi1_16b_pins, ppi1_16b_mux), ADI_PIN_GROUP("ppi2_8bgrp", ppi2_8b_pins, ppi2_8b_mux), ADI_PIN_GROUP("ppi2_16bgrp", ppi2_16b_pins, ppi2_16b_mux), ADI_PIN_GROUP("lp0grp", lp0_pins, lp0_mux), ADI_PIN_GROUP("lp1grp", lp1_pins, lp1_mux), ADI_PIN_GROUP("lp2grp", lp2_pins, lp2_mux), ADI_PIN_GROUP("lp3grp", lp3_pins, lp3_mux), }; static const char * const uart0grp[] = { "uart0grp" }; static const char * const uart0ctsrtsgrp[] = { "uart0ctsrtsgrp" }; static const char * const uart1grp[] = { "uart1grp" }; static const char * const uart1ctsrtsgrp[] = { "uart1ctsrtsgrp" }; static const char * const rsi0grp[] = { "rsi0grp" }; static const char * const eth0grp[] = { "eth0grp" }; static const char * const eth1grp[] = { "eth1grp" }; static const char * const spi0grp[] = { "spi0grp" }; static const char * const spi1grp[] = { "spi1grp" }; static const char * const twi0grp[] = { "twi0grp" }; static const char * const twi1grp[] = { "twi1grp" }; static const char * const rotarygrp[] = { "rotarygrp" }; static const char * const can0grp[] = { "can0grp" }; static const char * const smc0grp[] = { "smc0grp" }; static const char * const sport0grp[] = { "sport0grp" }; static const char * const sport1grp[] = { "sport1grp" }; static const char * const sport2grp[] = { "sport2grp" }; static const char * const ppi0grp[] = { "ppi0_8bgrp", "ppi0_16bgrp", "ppi0_24bgrp" }; static const char * const ppi1grp[] = { "ppi1_8bgrp", "ppi1_16bgrp" }; static const char * const ppi2grp[] = { "ppi2_8bgrp", "ppi2_16bgrp" }; static const char * const lp0grp[] = { "lp0grp" }; static const char * const lp1grp[] = { "lp1grp" }; static const char * const lp2grp[] = { "lp2grp" }; static const char * const lp3grp[] = { "lp3grp" }; static const struct adi_pmx_func adi_pmx_functions[] = { ADI_PMX_FUNCTION("uart0", uart0grp), ADI_PMX_FUNCTION("uart0_ctsrts", uart0ctsrtsgrp), ADI_PMX_FUNCTION("uart1", uart1grp), ADI_PMX_FUNCTION("uart1_ctsrts", uart1ctsrtsgrp), ADI_PMX_FUNCTION("rsi0", rsi0grp), ADI_PMX_FUNCTION("eth0", eth0grp), ADI_PMX_FUNCTION("eth1", eth1grp), ADI_PMX_FUNCTION("spi0", spi0grp), ADI_PMX_FUNCTION("spi1", spi1grp), ADI_PMX_FUNCTION("twi0", twi0grp), ADI_PMX_FUNCTION("twi1", twi1grp), ADI_PMX_FUNCTION("rotary", rotarygrp), ADI_PMX_FUNCTION("can0", can0grp), ADI_PMX_FUNCTION("smc0", smc0grp), ADI_PMX_FUNCTION("sport0", sport0grp), ADI_PMX_FUNCTION("sport1", sport1grp), ADI_PMX_FUNCTION("sport2", sport2grp), ADI_PMX_FUNCTION("ppi0", ppi0grp), ADI_PMX_FUNCTION("ppi1", ppi1grp), ADI_PMX_FUNCTION("ppi2", ppi2grp), ADI_PMX_FUNCTION("lp0", lp0grp), ADI_PMX_FUNCTION("lp1", lp1grp), ADI_PMX_FUNCTION("lp2", lp2grp), ADI_PMX_FUNCTION("lp3", lp3grp), }; static const struct adi_pinctrl_soc_data adi_bf60x_soc = { .functions = adi_pmx_functions, .nfunctions = ARRAY_SIZE(adi_pmx_functions), .groups = adi_pin_groups, .ngroups = ARRAY_SIZE(adi_pin_groups), .pins = adi_pads, .npins = ARRAY_SIZE(adi_pads), }; void adi_pinctrl_soc_init(const struct adi_pinctrl_soc_data **soc) { *soc = &adi_bf60x_soc; }
gpl-2.0
MoKee/android_kernel_motorola_victara_retcn
sound/soc/msm/msm-pcm-q6.c
1918
22051
/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/init.h> #include <linux/err.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/time.h> #include <linux/wait.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/soc.h> #include <sound/soc-dapm.h> #include <sound/pcm.h> #include <sound/initval.h> #include <sound/control.h> #include <asm/dma.h> #include <linux/dma-mapping.h> #include "msm-pcm-q6.h" #include "msm-pcm-routing.h" static struct audio_locks the_locks; struct snd_msm { struct snd_card *card; struct snd_pcm *pcm; }; #define PLAYBACK_NUM_PERIODS 8 #define PLAYBACK_PERIOD_SIZE 2048 #define CAPTURE_NUM_PERIODS 16 #define CAPTURE_PERIOD_SIZE 320 static struct snd_pcm_hardware msm_pcm_hardware_capture = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME), .formats = SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_8000_48000, .rate_min = 8000, .rate_max = 48000, .channels_min = 1, .channels_max = 4, .buffer_bytes_max = CAPTURE_NUM_PERIODS * CAPTURE_PERIOD_SIZE, .period_bytes_min = CAPTURE_PERIOD_SIZE, .period_bytes_max = CAPTURE_PERIOD_SIZE, .periods_min = CAPTURE_NUM_PERIODS, .periods_max = CAPTURE_NUM_PERIODS, .fifo_size = 0, }; static struct snd_pcm_hardware msm_pcm_hardware_playback = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME), .formats = SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_8000_48000 | SNDRV_PCM_RATE_KNOT, .rate_min = 8000, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = PLAYBACK_NUM_PERIODS * PLAYBACK_PERIOD_SIZE, .period_bytes_min = PLAYBACK_PERIOD_SIZE, .period_bytes_max = PLAYBACK_PERIOD_SIZE, .periods_min = PLAYBACK_NUM_PERIODS, .periods_max = PLAYBACK_NUM_PERIODS, .fifo_size = 0, }; /* Conventional and unconventional sample rate supported */ static unsigned int supported_sample_rates[] = { 8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000 }; static uint32_t in_frame_info[CAPTURE_NUM_PERIODS][2]; static struct snd_pcm_hw_constraint_list constraints_sample_rates = { .count = ARRAY_SIZE(supported_sample_rates), .list = supported_sample_rates, .mask = 0, }; static void msm_pcm_route_event_handler(enum msm_pcm_routing_event event, void *priv_data) { struct msm_audio *prtd = priv_data; BUG_ON(!prtd); pr_debug("%s: event %x\n", __func__, event); switch (event) { case MSM_PCM_RT_EVT_BUF_RECFG: q6asm_cmd(prtd->audio_client, CMD_PAUSE); q6asm_cmd(prtd->audio_client, CMD_FLUSH); q6asm_run(prtd->audio_client, 0, 0, 0); default: break; } } static void event_handler(uint32_t opcode, uint32_t token, uint32_t *payload, void *priv) { struct msm_audio *prtd = priv; struct snd_pcm_substream *substream = prtd->substream; uint32_t *ptrmem = (uint32_t *)payload; int i = 0; uint32_t idx = 0; uint32_t size = 0; pr_debug("%s\n", __func__); switch (opcode) { case ASM_DATA_EVENT_WRITE_DONE: { pr_debug("ASM_DATA_EVENT_WRITE_DONE\n"); pr_debug("Buffer Consumed = 0x%08x\n", *ptrmem); prtd->pcm_irq_pos += prtd->pcm_count; if (atomic_read(&prtd->start)) snd_pcm_period_elapsed(substream); atomic_inc(&prtd->out_count); wake_up(&the_locks.write_wait); if (!atomic_read(&prtd->start)) break; if (!prtd->mmap_flag) break; if (q6asm_is_cpu_buf_avail_nolock(IN, prtd->audio_client, &size, &idx)) { pr_debug("%s:writing %d bytes of buffer to dsp 2\n", __func__, prtd->pcm_count); q6asm_write_nolock(prtd->audio_client, prtd->pcm_count, 0, 0, NO_TIMESTAMP); } break; } case ASM_DATA_CMDRSP_EOS: pr_debug("ASM_DATA_CMDRSP_EOS\n"); prtd->cmd_ack = 1; wake_up(&the_locks.eos_wait); break; case ASM_DATA_EVENT_READ_DONE: { pr_debug("ASM_DATA_EVENT_READ_DONE\n"); pr_debug("token = 0x%08x\n", token); for (i = 0; i < 8; i++, ++ptrmem) pr_debug("cmd[%d]=0x%08x\n", i, *ptrmem); in_frame_info[token][0] = payload[2]; in_frame_info[token][1] = payload[3]; /* assume data size = 0 during flushing */ if (in_frame_info[token][0]) { prtd->pcm_irq_pos += prtd->pcm_count; pr_debug("pcm_irq_pos=%d\n", prtd->pcm_irq_pos); if (atomic_read(&prtd->start)) snd_pcm_period_elapsed(substream); if (atomic_read(&prtd->in_count) <= prtd->periods) atomic_inc(&prtd->in_count); wake_up(&the_locks.read_wait); if (prtd->mmap_flag && q6asm_is_cpu_buf_avail_nolock(OUT, prtd->audio_client, &size, &idx)) q6asm_read_nolock(prtd->audio_client); } else { pr_debug("%s: reclaim flushed buf in_count %x\n", __func__, atomic_read(&prtd->in_count)); atomic_inc(&prtd->in_count); if (atomic_read(&prtd->in_count) == prtd->periods) { pr_info("%s: reclaimed all bufs\n", __func__); if (atomic_read(&prtd->start)) snd_pcm_period_elapsed(substream); wake_up(&the_locks.read_wait); } } break; } case APR_BASIC_RSP_RESULT: { switch (payload[0]) { case ASM_SESSION_CMD_RUN: if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK) { atomic_set(&prtd->start, 1); break; } if (prtd->mmap_flag) { pr_debug("%s:writing %d bytes" " of buffer to dsp\n", __func__, prtd->pcm_count); q6asm_write_nolock(prtd->audio_client, prtd->pcm_count, 0, 0, NO_TIMESTAMP); } else { while (atomic_read(&prtd->out_needed)) { pr_debug("%s:writing %d bytes" " of buffer to dsp\n", __func__, prtd->pcm_count); q6asm_write_nolock(prtd->audio_client, prtd->pcm_count, 0, 0, NO_TIMESTAMP); atomic_dec(&prtd->out_needed); wake_up(&the_locks.write_wait); }; } atomic_set(&prtd->start, 1); break; default: break; } } break; default: pr_debug("Not Supported Event opcode[0x%x]\n", opcode); break; } } static int msm_pcm_playback_prepare(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct msm_audio *prtd = runtime->private_data; int ret; pr_debug("%s\n", __func__); prtd->pcm_size = snd_pcm_lib_buffer_bytes(substream); prtd->pcm_count = snd_pcm_lib_period_bytes(substream); prtd->pcm_irq_pos = 0; /* rate and channels are sent to audio driver */ prtd->samp_rate = runtime->rate; prtd->channel_mode = runtime->channels; if (prtd->enabled) return 0; ret = q6asm_media_format_block_pcm(prtd->audio_client, runtime->rate, runtime->channels); if (ret < 0) pr_info("%s: CMD Format block failed\n", __func__); atomic_set(&prtd->out_count, runtime->periods); prtd->enabled = 1; prtd->cmd_ack = 0; return 0; } static int msm_pcm_capture_prepare(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct msm_audio *prtd = runtime->private_data; int ret = 0; int i = 0; pr_debug("%s\n", __func__); prtd->pcm_size = snd_pcm_lib_buffer_bytes(substream); prtd->pcm_count = snd_pcm_lib_period_bytes(substream); prtd->pcm_irq_pos = 0; /* rate and channels are sent to audio driver */ prtd->samp_rate = runtime->rate; prtd->channel_mode = runtime->channels; if (prtd->enabled) return 0; pr_debug("Samp_rate = %d\n", prtd->samp_rate); pr_debug("Channel = %d\n", prtd->channel_mode); if (prtd->channel_mode > 2) { ret = q6asm_enc_cfg_blk_multi_ch_pcm(prtd->audio_client, prtd->samp_rate, prtd->channel_mode); } else { ret = q6asm_enc_cfg_blk_pcm(prtd->audio_client, prtd->samp_rate, prtd->channel_mode); } if (ret < 0) pr_debug("%s: cmd cfg pcm was block failed", __func__); for (i = 0; i < runtime->periods; i++) q6asm_read(prtd->audio_client); prtd->periods = runtime->periods; prtd->enabled = 1; return ret; } static int msm_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { int ret = 0; struct snd_pcm_runtime *runtime = substream->runtime; struct msm_audio *prtd = runtime->private_data; switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: pr_debug("%s: Trigger start\n", __func__); q6asm_run_nowait(prtd->audio_client, 0, 0, 0); break; case SNDRV_PCM_TRIGGER_STOP: pr_debug("SNDRV_PCM_TRIGGER_STOP\n"); atomic_set(&prtd->start, 0); if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK) break; prtd->cmd_ack = 0; q6asm_cmd_nowait(prtd->audio_client, CMD_EOS); break; case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: pr_debug("SNDRV_PCM_TRIGGER_PAUSE\n"); q6asm_cmd_nowait(prtd->audio_client, CMD_PAUSE); atomic_set(&prtd->start, 0); break; default: ret = -EINVAL; break; } return ret; } static int msm_pcm_open(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_soc_pcm_runtime *soc_prtd = substream->private_data; struct msm_audio *prtd; int ret = 0; pr_debug("%s\n", __func__); prtd = kzalloc(sizeof(struct msm_audio), GFP_KERNEL); if (prtd == NULL) { pr_err("Failed to allocate memory for msm_audio\n"); return -ENOMEM; } prtd->substream = substream; prtd->audio_client = q6asm_audio_client_alloc( (app_cb)event_handler, prtd); if (!prtd->audio_client) { pr_info("%s: Could not allocate memory\n", __func__); kfree(prtd); return -ENOMEM; } prtd->audio_client->perf_mode = false; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { runtime->hw = msm_pcm_hardware_playback; ret = q6asm_open_write(prtd->audio_client, FORMAT_LINEAR_PCM); if (ret < 0) { pr_err("%s: pcm out open failed\n", __func__); q6asm_audio_client_free(prtd->audio_client); kfree(prtd); return -ENOMEM; } pr_debug("%s: session ID %d\n", __func__, prtd->audio_client->session); prtd->session_id = prtd->audio_client->session; msm_pcm_routing_reg_phy_stream(soc_prtd->dai_link->be_id, prtd->audio_client->perf_mode, prtd->session_id, substream->stream); prtd->cmd_ack = 1; } /* Capture path */ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) { runtime->hw = msm_pcm_hardware_capture; } ret = snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &constraints_sample_rates); if (ret < 0) pr_info("snd_pcm_hw_constraint_list failed\n"); /* Ensure that buffer size is a multiple of period size */ ret = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); if (ret < 0) pr_info("snd_pcm_hw_constraint_integer failed\n"); prtd->dsp_cnt = 0; runtime->private_data = prtd; return 0; } static int msm_pcm_playback_copy(struct snd_pcm_substream *substream, int a, snd_pcm_uframes_t hwoff, void __user *buf, snd_pcm_uframes_t frames) { int ret = 0; int fbytes = 0; int xfer = 0; char *bufptr = NULL; void *data = NULL; uint32_t idx = 0; uint32_t size = 0; struct snd_pcm_runtime *runtime = substream->runtime; struct msm_audio *prtd = runtime->private_data; fbytes = frames_to_bytes(runtime, frames); pr_debug("%s: prtd->out_count = %d\n", __func__, atomic_read(&prtd->out_count)); ret = wait_event_timeout(the_locks.write_wait, (atomic_read(&prtd->out_count)), 5 * HZ); if (!ret) { pr_err("%s: wait_event_timeout failed\n", __func__); goto fail; } if (!atomic_read(&prtd->out_count)) { pr_err("%s: pcm stopped out_count 0\n", __func__); return 0; } data = q6asm_is_cpu_buf_avail(IN, prtd->audio_client, &size, &idx); bufptr = data; if (bufptr) { pr_debug("%s:fbytes =%d: xfer=%d size=%d\n", __func__, fbytes, xfer, size); xfer = fbytes; if (copy_from_user(bufptr, buf, xfer)) { ret = -EFAULT; goto fail; } buf += xfer; fbytes -= xfer; pr_debug("%s:fbytes = %d: xfer=%d\n", __func__, fbytes, xfer); if (atomic_read(&prtd->start)) { pr_debug("%s:writing %d bytes of buffer to dsp\n", __func__, xfer); ret = q6asm_write(prtd->audio_client, xfer, 0, 0, NO_TIMESTAMP); if (ret < 0) { ret = -EFAULT; goto fail; } } else atomic_inc(&prtd->out_needed); atomic_dec(&prtd->out_count); } fail: return ret; } static int msm_pcm_playback_close(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_soc_pcm_runtime *soc_prtd = substream->private_data; struct msm_audio *prtd = runtime->private_data; int dir = 0; int ret = 0; pr_debug("%s\n", __func__); dir = IN; ret = wait_event_timeout(the_locks.eos_wait, prtd->cmd_ack, 5 * HZ); if (!ret) pr_err("%s: CMD_EOS failed\n", __func__); q6asm_cmd(prtd->audio_client, CMD_CLOSE); q6asm_audio_client_buf_free_contiguous(dir, prtd->audio_client); msm_pcm_routing_dereg_phy_stream(soc_prtd->dai_link->be_id, SNDRV_PCM_STREAM_PLAYBACK); q6asm_audio_client_free(prtd->audio_client); kfree(prtd); return 0; } static int msm_pcm_capture_copy(struct snd_pcm_substream *substream, int channel, snd_pcm_uframes_t hwoff, void __user *buf, snd_pcm_uframes_t frames) { int ret = 0; int fbytes = 0; int xfer; char *bufptr; void *data = NULL; static uint32_t idx; static uint32_t size; uint32_t offset = 0; struct snd_pcm_runtime *runtime = substream->runtime; struct msm_audio *prtd = substream->runtime->private_data; pr_debug("%s\n", __func__); fbytes = frames_to_bytes(runtime, frames); pr_debug("appl_ptr %d\n", (int)runtime->control->appl_ptr); pr_debug("hw_ptr %d\n", (int)runtime->status->hw_ptr); pr_debug("avail_min %d\n", (int)runtime->control->avail_min); ret = wait_event_timeout(the_locks.read_wait, (atomic_read(&prtd->in_count)), 5 * HZ); if (!ret) { pr_debug("%s: wait_event_timeout failed\n", __func__); goto fail; } if (!atomic_read(&prtd->in_count)) { pr_debug("%s: pcm stopped in_count 0\n", __func__); return 0; } pr_debug("Checking if valid buffer is available...%08x\n", (unsigned int) data); data = q6asm_is_cpu_buf_avail(OUT, prtd->audio_client, &size, &idx); bufptr = data; pr_debug("Size = %d\n", size); pr_debug("fbytes = %d\n", fbytes); pr_debug("idx = %d\n", idx); if (bufptr) { xfer = fbytes; if (xfer > size) xfer = size; offset = in_frame_info[idx][1]; pr_debug("Offset value = %d\n", offset); if (copy_to_user(buf, bufptr+offset, xfer)) { pr_err("Failed to copy buf to user\n"); ret = -EFAULT; goto fail; } fbytes -= xfer; size -= xfer; in_frame_info[idx][1] += xfer; pr_debug("%s:fbytes = %d: size=%d: xfer=%d\n", __func__, fbytes, size, xfer); pr_debug(" Sending next buffer to dsp\n"); memset(&in_frame_info[idx], 0, sizeof(uint32_t) * 2); atomic_dec(&prtd->in_count); ret = q6asm_read(prtd->audio_client); if (ret < 0) { pr_err("q6asm read failed\n"); ret = -EFAULT; goto fail; } } else pr_err("No valid buffer\n"); pr_debug("Returning from capture_copy... %d\n", ret); fail: return ret; } static int msm_pcm_capture_close(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_soc_pcm_runtime *soc_prtd = substream->private_data; struct msm_audio *prtd = runtime->private_data; int dir = OUT; pr_debug("%s\n", __func__); if (prtd->audio_client) { q6asm_cmd(prtd->audio_client, CMD_CLOSE); q6asm_audio_client_buf_free_contiguous(dir, prtd->audio_client); q6asm_audio_client_free(prtd->audio_client); } msm_pcm_routing_dereg_phy_stream(soc_prtd->dai_link->be_id, SNDRV_PCM_STREAM_CAPTURE); kfree(prtd); return 0; } static int msm_pcm_copy(struct snd_pcm_substream *substream, int a, snd_pcm_uframes_t hwoff, void __user *buf, snd_pcm_uframes_t frames) { int ret = 0; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ret = msm_pcm_playback_copy(substream, a, hwoff, buf, frames); else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) ret = msm_pcm_capture_copy(substream, a, hwoff, buf, frames); return ret; } static int msm_pcm_close(struct snd_pcm_substream *substream) { int ret = 0; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ret = msm_pcm_playback_close(substream); else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) ret = msm_pcm_capture_close(substream); return ret; } static int msm_pcm_prepare(struct snd_pcm_substream *substream) { int ret = 0; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ret = msm_pcm_playback_prepare(substream); else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) ret = msm_pcm_capture_prepare(substream); return ret; } static snd_pcm_uframes_t msm_pcm_pointer(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct msm_audio *prtd = runtime->private_data; if (prtd->pcm_irq_pos >= prtd->pcm_size) prtd->pcm_irq_pos = 0; pr_debug("pcm_irq_pos = %d\n", prtd->pcm_irq_pos); return bytes_to_frames(runtime, (prtd->pcm_irq_pos)); } static int msm_pcm_mmap(struct snd_pcm_substream *substream, struct vm_area_struct *vma) { int result = 0; struct snd_pcm_runtime *runtime = substream->runtime; struct msm_audio *prtd = runtime->private_data; pr_debug("%s\n", __func__); prtd->mmap_flag = 1; if (runtime->dma_addr && runtime->dma_bytes) { vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); result = remap_pfn_range(vma, vma->vm_start, runtime->dma_addr >> PAGE_SHIFT, runtime->dma_bytes, vma->vm_page_prot); } else { pr_err("Physical address or size of buf is NULL"); return -EINVAL; } return result; } static int msm_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_pcm_runtime *runtime = substream->runtime; struct msm_audio *prtd = runtime->private_data; struct snd_dma_buffer *dma_buf = &substream->dma_buffer; struct snd_soc_pcm_runtime *soc_prtd = substream->private_data; struct audio_buffer *buf; int dir, ret; int format = FORMAT_LINEAR_PCM; struct msm_pcm_routing_evt event; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) dir = IN; else dir = OUT; /*capture path*/ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) { if (params_channels(params) > 2) format = FORMAT_MULTI_CHANNEL_LINEAR_PCM; pr_debug("%s format = :0x%x\n", __func__, format); ret = q6asm_open_read(prtd->audio_client, format); if (ret < 0) { pr_err("%s: q6asm_open_read failed\n", __func__); q6asm_audio_client_free(prtd->audio_client); prtd->audio_client = NULL; return -ENOMEM; } pr_debug("%s: session ID %d\n", __func__, prtd->audio_client->session); prtd->session_id = prtd->audio_client->session; event.event_func = msm_pcm_route_event_handler; event.priv_data = (void *) prtd; msm_pcm_routing_reg_phy_stream_v2(soc_prtd->dai_link->be_id, prtd->audio_client->perf_mode, prtd->session_id, substream->stream, event); } ret = q6asm_audio_client_buf_alloc_contiguous(dir, prtd->audio_client, runtime->hw.period_bytes_min, runtime->hw.periods_max); if (ret < 0) { pr_err("Audio Start: Buffer Allocation failed \ rc = %d\n", ret); return -ENOMEM; } buf = prtd->audio_client->port[dir].buf; if (buf == NULL || buf[0].data == NULL) return -ENOMEM; pr_debug("%s:buf = %p\n", __func__, buf); dma_buf->dev.type = SNDRV_DMA_TYPE_DEV; dma_buf->dev.dev = substream->pcm->card->dev; dma_buf->private_data = NULL; dma_buf->area = buf[0].data; dma_buf->addr = buf[0].phys; dma_buf->bytes = runtime->hw.buffer_bytes_max; if (!dma_buf->area) return -ENOMEM; snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer); return 0; } static struct snd_pcm_ops msm_pcm_ops = { .open = msm_pcm_open, .copy = msm_pcm_copy, .hw_params = msm_pcm_hw_params, .close = msm_pcm_close, .ioctl = snd_pcm_lib_ioctl, .prepare = msm_pcm_prepare, .trigger = msm_pcm_trigger, .pointer = msm_pcm_pointer, .mmap = msm_pcm_mmap, }; static int msm_asoc_pcm_new(struct snd_soc_pcm_runtime *rtd) { struct snd_card *card = rtd->card->snd_card; int ret = 0; if (!card->dev->coherent_dma_mask) card->dev->coherent_dma_mask = DMA_BIT_MASK(32); return ret; } static struct snd_soc_platform_driver msm_soc_platform = { .ops = &msm_pcm_ops, .pcm_new = msm_asoc_pcm_new, }; static __devinit int msm_pcm_probe(struct platform_device *pdev) { pr_info("%s: dev name %s\n", __func__, dev_name(&pdev->dev)); return snd_soc_register_platform(&pdev->dev, &msm_soc_platform); } static int msm_pcm_remove(struct platform_device *pdev) { snd_soc_unregister_platform(&pdev->dev); return 0; } static struct platform_driver msm_pcm_driver = { .driver = { .name = "msm-pcm-dsp", .owner = THIS_MODULE, }, .probe = msm_pcm_probe, .remove = __devexit_p(msm_pcm_remove), }; static int __init msm_soc_platform_init(void) { init_waitqueue_head(&the_locks.enable_wait); init_waitqueue_head(&the_locks.eos_wait); init_waitqueue_head(&the_locks.write_wait); init_waitqueue_head(&the_locks.read_wait); return platform_driver_register(&msm_pcm_driver); } module_init(msm_soc_platform_init); static void __exit msm_soc_platform_exit(void) { platform_driver_unregister(&msm_pcm_driver); } module_exit(msm_soc_platform_exit); MODULE_DESCRIPTION("PCM module platform driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
dennes544/aosp_kernel_lge_hammerhead_dennes544
arch/arm/mach-ep93xx/ts72xx.c
4734
6484
/* * arch/arm/mach-ep93xx/ts72xx.c * Technologic Systems TS72xx SBC support. * * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/m48t86.h> #include <linux/mtd/nand.h> #include <linux/mtd/partitions.h> #include <mach/hardware.h> #include <mach/ts72xx.h> #include <asm/hardware/vic.h> #include <asm/mach-types.h> #include <asm/mach/map.h> #include <asm/mach/arch.h> #include "soc.h" static struct map_desc ts72xx_io_desc[] __initdata = { { .virtual = TS72XX_MODEL_VIRT_BASE, .pfn = __phys_to_pfn(TS72XX_MODEL_PHYS_BASE), .length = TS72XX_MODEL_SIZE, .type = MT_DEVICE, }, { .virtual = TS72XX_OPTIONS_VIRT_BASE, .pfn = __phys_to_pfn(TS72XX_OPTIONS_PHYS_BASE), .length = TS72XX_OPTIONS_SIZE, .type = MT_DEVICE, }, { .virtual = TS72XX_OPTIONS2_VIRT_BASE, .pfn = __phys_to_pfn(TS72XX_OPTIONS2_PHYS_BASE), .length = TS72XX_OPTIONS2_SIZE, .type = MT_DEVICE, }, { .virtual = TS72XX_RTC_INDEX_VIRT_BASE, .pfn = __phys_to_pfn(TS72XX_RTC_INDEX_PHYS_BASE), .length = TS72XX_RTC_INDEX_SIZE, .type = MT_DEVICE, }, { .virtual = TS72XX_RTC_DATA_VIRT_BASE, .pfn = __phys_to_pfn(TS72XX_RTC_DATA_PHYS_BASE), .length = TS72XX_RTC_DATA_SIZE, .type = MT_DEVICE, } }; static void __init ts72xx_map_io(void) { ep93xx_map_io(); iotable_init(ts72xx_io_desc, ARRAY_SIZE(ts72xx_io_desc)); } /************************************************************************* * NAND flash *************************************************************************/ #define TS72XX_NAND_CONTROL_ADDR_LINE 22 /* 0xN0400000 */ #define TS72XX_NAND_BUSY_ADDR_LINE 23 /* 0xN0800000 */ static void ts72xx_nand_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl) { struct nand_chip *chip = mtd->priv; if (ctrl & NAND_CTRL_CHANGE) { void __iomem *addr = chip->IO_ADDR_R; unsigned char bits; addr += (1 << TS72XX_NAND_CONTROL_ADDR_LINE); bits = __raw_readb(addr) & ~0x07; bits |= (ctrl & NAND_NCE) << 2; /* bit 0 -> bit 2 */ bits |= (ctrl & NAND_CLE); /* bit 1 -> bit 1 */ bits |= (ctrl & NAND_ALE) >> 2; /* bit 2 -> bit 0 */ __raw_writeb(bits, addr); } if (cmd != NAND_CMD_NONE) __raw_writeb(cmd, chip->IO_ADDR_W); } static int ts72xx_nand_device_ready(struct mtd_info *mtd) { struct nand_chip *chip = mtd->priv; void __iomem *addr = chip->IO_ADDR_R; addr += (1 << TS72XX_NAND_BUSY_ADDR_LINE); return !!(__raw_readb(addr) & 0x20); } static const char *ts72xx_nand_part_probes[] = { "cmdlinepart", NULL }; #define TS72XX_BOOTROM_PART_SIZE (SZ_16K) #define TS72XX_REDBOOT_PART_SIZE (SZ_2M + SZ_1M) static struct mtd_partition ts72xx_nand_parts[] = { { .name = "TS-BOOTROM", .offset = 0, .size = TS72XX_BOOTROM_PART_SIZE, .mask_flags = MTD_WRITEABLE, /* force read-only */ }, { .name = "Linux", .offset = MTDPART_OFS_RETAIN, .size = TS72XX_REDBOOT_PART_SIZE, /* leave so much for last partition */ }, { .name = "RedBoot", .offset = MTDPART_OFS_APPEND, .size = MTDPART_SIZ_FULL, .mask_flags = MTD_WRITEABLE, /* force read-only */ }, }; static struct platform_nand_data ts72xx_nand_data = { .chip = { .nr_chips = 1, .chip_offset = 0, .chip_delay = 15, .part_probe_types = ts72xx_nand_part_probes, .partitions = ts72xx_nand_parts, .nr_partitions = ARRAY_SIZE(ts72xx_nand_parts), }, .ctrl = { .cmd_ctrl = ts72xx_nand_hwcontrol, .dev_ready = ts72xx_nand_device_ready, }, }; static struct resource ts72xx_nand_resource[] = { { .start = 0, /* filled in later */ .end = 0, /* filled in later */ .flags = IORESOURCE_MEM, }, }; static struct platform_device ts72xx_nand_flash = { .name = "gen_nand", .id = -1, .dev.platform_data = &ts72xx_nand_data, .resource = ts72xx_nand_resource, .num_resources = ARRAY_SIZE(ts72xx_nand_resource), }; static void __init ts72xx_register_flash(void) { /* * TS7200 has NOR flash all other TS72xx board have NAND flash. */ if (board_is_ts7200()) { ep93xx_register_flash(2, EP93XX_CS6_PHYS_BASE, SZ_16M); } else { resource_size_t start; if (is_ts9420_installed()) start = EP93XX_CS7_PHYS_BASE; else start = EP93XX_CS6_PHYS_BASE; ts72xx_nand_resource[0].start = start; ts72xx_nand_resource[0].end = start + SZ_16M - 1; platform_device_register(&ts72xx_nand_flash); } } static unsigned char ts72xx_rtc_readbyte(unsigned long addr) { __raw_writeb(addr, TS72XX_RTC_INDEX_VIRT_BASE); return __raw_readb(TS72XX_RTC_DATA_VIRT_BASE); } static void ts72xx_rtc_writebyte(unsigned char value, unsigned long addr) { __raw_writeb(addr, TS72XX_RTC_INDEX_VIRT_BASE); __raw_writeb(value, TS72XX_RTC_DATA_VIRT_BASE); } static struct m48t86_ops ts72xx_rtc_ops = { .readbyte = ts72xx_rtc_readbyte, .writebyte = ts72xx_rtc_writebyte, }; static struct platform_device ts72xx_rtc_device = { .name = "rtc-m48t86", .id = -1, .dev = { .platform_data = &ts72xx_rtc_ops, }, .num_resources = 0, }; static struct resource ts72xx_wdt_resources[] = { { .start = TS72XX_WDT_CONTROL_PHYS_BASE, .end = TS72XX_WDT_CONTROL_PHYS_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, { .start = TS72XX_WDT_FEED_PHYS_BASE, .end = TS72XX_WDT_FEED_PHYS_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, }; static struct platform_device ts72xx_wdt_device = { .name = "ts72xx-wdt", .id = -1, .num_resources = ARRAY_SIZE(ts72xx_wdt_resources), .resource = ts72xx_wdt_resources, }; static struct ep93xx_eth_data __initdata ts72xx_eth_data = { .phy_id = 1, }; static void __init ts72xx_init_machine(void) { ep93xx_init_devices(); ts72xx_register_flash(); platform_device_register(&ts72xx_rtc_device); platform_device_register(&ts72xx_wdt_device); ep93xx_register_eth(&ts72xx_eth_data, 1); } MACHINE_START(TS72XX, "Technologic Systems TS-72xx SBC") /* Maintainer: Lennert Buytenhek <buytenh@wantstofly.org> */ .atag_offset = 0x100, .map_io = ts72xx_map_io, .init_irq = ep93xx_init_irq, .handle_irq = vic_handle_irq, .timer = &ep93xx_timer, .init_machine = ts72xx_init_machine, .restart = ep93xx_restart, MACHINE_END
gpl-2.0
lollipop-og/F93_LGE975_KK_Kernel
lib/hexdump.c
4734
7158
/* * lib/hexdump.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. See README and COPYING for * more details. */ #include <linux/types.h> #include <linux/ctype.h> #include <linux/kernel.h> #include <linux/export.h> const char hex_asc[] = "0123456789abcdef"; EXPORT_SYMBOL(hex_asc); /** * hex_to_bin - convert a hex digit to its real value * @ch: ascii character represents hex digit * * hex_to_bin() converts one hex digit to its actual value or -1 in case of bad * input. */ int hex_to_bin(char ch) { if ((ch >= '0') && (ch <= '9')) return ch - '0'; ch = tolower(ch); if ((ch >= 'a') && (ch <= 'f')) return ch - 'a' + 10; return -1; } EXPORT_SYMBOL(hex_to_bin); /** * hex2bin - convert an ascii hexadecimal string to its binary representation * @dst: binary result * @src: ascii hexadecimal string * @count: result length * * Return 0 on success, -1 in case of bad input. */ int hex2bin(u8 *dst, const char *src, size_t count) { while (count--) { int hi = hex_to_bin(*src++); int lo = hex_to_bin(*src++); if ((hi < 0) || (lo < 0)) return -1; *dst++ = (hi << 4) | lo; } return 0; } EXPORT_SYMBOL(hex2bin); /** * hex_dump_to_buffer - convert a blob of data to "hex ASCII" in memory * @buf: data blob to dump * @len: number of bytes in the @buf * @rowsize: number of bytes to print per line; must be 16 or 32 * @groupsize: number of bytes to print at a time (1, 2, 4, 8; default = 1) * @linebuf: where to put the converted data * @linebuflen: total size of @linebuf, including space for terminating NUL * @ascii: include ASCII after the hex output * * hex_dump_to_buffer() works on one "line" of output at a time, i.e., * 16 or 32 bytes of input data converted to hex + ASCII output. * * Given a buffer of u8 data, hex_dump_to_buffer() converts the input data * to a hex + ASCII dump at the supplied memory location. * The converted output is always NUL-terminated. * * E.g.: * hex_dump_to_buffer(frame->data, frame->len, 16, 1, * linebuf, sizeof(linebuf), true); * * example output buffer: * 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO */ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize, int groupsize, char *linebuf, size_t linebuflen, bool ascii) { const u8 *ptr = buf; u8 ch; int j, lx = 0; int ascii_column; if (rowsize != 16 && rowsize != 32) rowsize = 16; if (!len) goto nil; if (len > rowsize) /* limit to one line at a time */ len = rowsize; if ((len % groupsize) != 0) /* no mixed size output */ groupsize = 1; switch (groupsize) { case 8: { const u64 *ptr8 = buf; int ngroups = len / groupsize; for (j = 0; j < ngroups; j++) lx += scnprintf(linebuf + lx, linebuflen - lx, "%s%16.16llx", j ? " " : "", (unsigned long long)*(ptr8 + j)); ascii_column = 17 * ngroups + 2; break; } case 4: { const u32 *ptr4 = buf; int ngroups = len / groupsize; for (j = 0; j < ngroups; j++) lx += scnprintf(linebuf + lx, linebuflen - lx, "%s%8.8x", j ? " " : "", *(ptr4 + j)); ascii_column = 9 * ngroups + 2; break; } case 2: { const u16 *ptr2 = buf; int ngroups = len / groupsize; for (j = 0; j < ngroups; j++) lx += scnprintf(linebuf + lx, linebuflen - lx, "%s%4.4x", j ? " " : "", *(ptr2 + j)); ascii_column = 5 * ngroups + 2; break; } default: for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) { ch = ptr[j]; linebuf[lx++] = hex_asc_hi(ch); linebuf[lx++] = hex_asc_lo(ch); linebuf[lx++] = ' '; } if (j) lx--; ascii_column = 3 * rowsize + 2; break; } if (!ascii) goto nil; while (lx < (linebuflen - 1) && lx < (ascii_column - 1)) linebuf[lx++] = ' '; for (j = 0; (j < len) && (lx + 2) < linebuflen; j++) { ch = ptr[j]; linebuf[lx++] = (isascii(ch) && isprint(ch)) ? ch : '.'; } nil: linebuf[lx++] = '\0'; } EXPORT_SYMBOL(hex_dump_to_buffer); #ifdef CONFIG_PRINTK /** * print_hex_dump - print a text hex dump to syslog for a binary blob of data * @level: kernel log level (e.g. KERN_DEBUG) * @prefix_str: string to prefix each line with; * caller supplies trailing spaces for alignment if desired * @prefix_type: controls whether prefix of an offset, address, or none * is printed (%DUMP_PREFIX_OFFSET, %DUMP_PREFIX_ADDRESS, %DUMP_PREFIX_NONE) * @rowsize: number of bytes to print per line; must be 16 or 32 * @groupsize: number of bytes to print at a time (1, 2, 4, 8; default = 1) * @buf: data blob to dump * @len: number of bytes in the @buf * @ascii: include ASCII after the hex output * * Given a buffer of u8 data, print_hex_dump() prints a hex + ASCII dump * to the kernel log at the specified kernel log level, with an optional * leading prefix. * * print_hex_dump() works on one "line" of output at a time, i.e., * 16 or 32 bytes of input data converted to hex + ASCII output. * print_hex_dump() iterates over the entire input @buf, breaking it into * "line size" chunks to format and print. * * E.g.: * print_hex_dump(KERN_DEBUG, "raw data: ", DUMP_PREFIX_ADDRESS, * 16, 1, frame->data, frame->len, true); * * Example output using %DUMP_PREFIX_OFFSET and 1-byte mode: * 0009ab42: 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO * Example output using %DUMP_PREFIX_ADDRESS and 4-byte mode: * ffffffff88089af0: 73727170 77767574 7b7a7978 7f7e7d7c pqrstuvwxyz{|}~. */ void print_hex_dump(const char *level, const char *prefix_str, int prefix_type, int rowsize, int groupsize, const void *buf, size_t len, bool ascii) { const u8 *ptr = buf; int i, linelen, remaining = len; unsigned char linebuf[32 * 3 + 2 + 32 + 1]; if (rowsize != 16 && rowsize != 32) rowsize = 16; for (i = 0; i < len; i += rowsize) { linelen = min(remaining, rowsize); remaining -= rowsize; hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize, linebuf, sizeof(linebuf), ascii); switch (prefix_type) { case DUMP_PREFIX_ADDRESS: printk("%s%s%p: %s\n", level, prefix_str, ptr + i, linebuf); break; case DUMP_PREFIX_OFFSET: printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf); break; default: printk("%s%s%s\n", level, prefix_str, linebuf); break; } } } EXPORT_SYMBOL(print_hex_dump); /** * print_hex_dump_bytes - shorthand form of print_hex_dump() with default params * @prefix_str: string to prefix each line with; * caller supplies trailing spaces for alignment if desired * @prefix_type: controls whether prefix of an offset, address, or none * is printed (%DUMP_PREFIX_OFFSET, %DUMP_PREFIX_ADDRESS, %DUMP_PREFIX_NONE) * @buf: data blob to dump * @len: number of bytes in the @buf * * Calls print_hex_dump(), with log level of KERN_DEBUG, * rowsize of 16, groupsize of 1, and ASCII output included. */ void print_hex_dump_bytes(const char *prefix_str, int prefix_type, const void *buf, size_t len) { print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, 16, 1, buf, len, true); } EXPORT_SYMBOL(print_hex_dump_bytes); #endif
gpl-2.0