repo_name
string
path
string
copies
string
size
string
content
string
license
string
EPDCenter/android_kernel_rockchip_mk908
sound/core/seq/oss/seq_oss.c
3098
7459
/* * OSS compatible sequencer driver * * registration of device and proc * * Copyright (C) 1998,99 Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/moduleparam.h> #include <linux/mutex.h> #include <sound/core.h> #include <sound/minors.h> #include <sound/initval.h> #include "seq_oss_device.h" #include "seq_oss_synth.h" /* * module option */ MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>"); MODULE_DESCRIPTION("OSS-compatible sequencer module"); MODULE_LICENSE("GPL"); /* Takashi says this is really only for sound-service-0-, but this is OK. */ MODULE_ALIAS_SNDRV_MINOR(SNDRV_MINOR_OSS_SEQUENCER); MODULE_ALIAS_SNDRV_MINOR(SNDRV_MINOR_OSS_MUSIC); #ifdef SNDRV_SEQ_OSS_DEBUG module_param(seq_oss_debug, int, 0644); MODULE_PARM_DESC(seq_oss_debug, "debug option"); int seq_oss_debug = 0; #endif /* * prototypes */ static int register_device(void); static void unregister_device(void); #ifdef CONFIG_PROC_FS static int register_proc(void); static void unregister_proc(void); #else static inline int register_proc(void) { return 0; } static inline void unregister_proc(void) {} #endif static int odev_open(struct inode *inode, struct file *file); static int odev_release(struct inode *inode, struct file *file); static ssize_t odev_read(struct file *file, char __user *buf, size_t count, loff_t *offset); static ssize_t odev_write(struct file *file, const char __user *buf, size_t count, loff_t *offset); static long odev_ioctl(struct file *file, unsigned int cmd, unsigned long arg); static unsigned int odev_poll(struct file *file, poll_table * wait); /* * module interface */ static int __init alsa_seq_oss_init(void) { int rc; static struct snd_seq_dev_ops ops = { snd_seq_oss_synth_register, snd_seq_oss_synth_unregister, }; snd_seq_autoload_lock(); if ((rc = register_device()) < 0) goto error; if ((rc = register_proc()) < 0) { unregister_device(); goto error; } if ((rc = snd_seq_oss_create_client()) < 0) { unregister_proc(); unregister_device(); goto error; } if ((rc = snd_seq_device_register_driver(SNDRV_SEQ_DEV_ID_OSS, &ops, sizeof(struct snd_seq_oss_reg))) < 0) { snd_seq_oss_delete_client(); unregister_proc(); unregister_device(); goto error; } /* success */ snd_seq_oss_synth_init(); error: snd_seq_autoload_unlock(); return rc; } static void __exit alsa_seq_oss_exit(void) { snd_seq_device_unregister_driver(SNDRV_SEQ_DEV_ID_OSS); snd_seq_oss_delete_client(); unregister_proc(); unregister_device(); } module_init(alsa_seq_oss_init) module_exit(alsa_seq_oss_exit) /* * ALSA minor device interface */ static DEFINE_MUTEX(register_mutex); static int odev_open(struct inode *inode, struct file *file) { int level, rc; if (iminor(inode) == SNDRV_MINOR_OSS_MUSIC) level = SNDRV_SEQ_OSS_MODE_MUSIC; else level = SNDRV_SEQ_OSS_MODE_SYNTH; mutex_lock(&register_mutex); rc = snd_seq_oss_open(file, level); mutex_unlock(&register_mutex); return rc; } static int odev_release(struct inode *inode, struct file *file) { struct seq_oss_devinfo *dp; if ((dp = file->private_data) == NULL) return 0; snd_seq_oss_drain_write(dp); mutex_lock(&register_mutex); snd_seq_oss_release(dp); mutex_unlock(&register_mutex); return 0; } static ssize_t odev_read(struct file *file, char __user *buf, size_t count, loff_t *offset) { struct seq_oss_devinfo *dp; dp = file->private_data; if (snd_BUG_ON(!dp)) return -ENXIO; return snd_seq_oss_read(dp, buf, count); } static ssize_t odev_write(struct file *file, const char __user *buf, size_t count, loff_t *offset) { struct seq_oss_devinfo *dp; dp = file->private_data; if (snd_BUG_ON(!dp)) return -ENXIO; return snd_seq_oss_write(dp, buf, count, file); } static long odev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct seq_oss_devinfo *dp; dp = file->private_data; if (snd_BUG_ON(!dp)) return -ENXIO; return snd_seq_oss_ioctl(dp, cmd, arg); } #ifdef CONFIG_COMPAT #define odev_ioctl_compat odev_ioctl #else #define odev_ioctl_compat NULL #endif static unsigned int odev_poll(struct file *file, poll_table * wait) { struct seq_oss_devinfo *dp; dp = file->private_data; if (snd_BUG_ON(!dp)) return -ENXIO; return snd_seq_oss_poll(dp, file, wait); } /* * registration of sequencer minor device */ static const struct file_operations seq_oss_f_ops = { .owner = THIS_MODULE, .read = odev_read, .write = odev_write, .open = odev_open, .release = odev_release, .poll = odev_poll, .unlocked_ioctl = odev_ioctl, .compat_ioctl = odev_ioctl_compat, .llseek = noop_llseek, }; static int __init register_device(void) { int rc; mutex_lock(&register_mutex); if ((rc = snd_register_oss_device(SNDRV_OSS_DEVICE_TYPE_SEQUENCER, NULL, 0, &seq_oss_f_ops, NULL, SNDRV_SEQ_OSS_DEVNAME)) < 0) { snd_printk(KERN_ERR "can't register device seq\n"); mutex_unlock(&register_mutex); return rc; } if ((rc = snd_register_oss_device(SNDRV_OSS_DEVICE_TYPE_MUSIC, NULL, 0, &seq_oss_f_ops, NULL, SNDRV_SEQ_OSS_DEVNAME)) < 0) { snd_printk(KERN_ERR "can't register device music\n"); snd_unregister_oss_device(SNDRV_OSS_DEVICE_TYPE_SEQUENCER, NULL, 0); mutex_unlock(&register_mutex); return rc; } debug_printk(("device registered\n")); mutex_unlock(&register_mutex); return 0; } static void unregister_device(void) { mutex_lock(&register_mutex); debug_printk(("device unregistered\n")); if (snd_unregister_oss_device(SNDRV_OSS_DEVICE_TYPE_MUSIC, NULL, 0) < 0) snd_printk(KERN_ERR "error unregister device music\n"); if (snd_unregister_oss_device(SNDRV_OSS_DEVICE_TYPE_SEQUENCER, NULL, 0) < 0) snd_printk(KERN_ERR "error unregister device seq\n"); mutex_unlock(&register_mutex); } /* * /proc interface */ #ifdef CONFIG_PROC_FS static struct snd_info_entry *info_entry; static void info_read(struct snd_info_entry *entry, struct snd_info_buffer *buf) { mutex_lock(&register_mutex); snd_iprintf(buf, "OSS sequencer emulation version %s\n", SNDRV_SEQ_OSS_VERSION_STR); snd_seq_oss_system_info_read(buf); snd_seq_oss_synth_info_read(buf); snd_seq_oss_midi_info_read(buf); mutex_unlock(&register_mutex); } static int __init register_proc(void) { struct snd_info_entry *entry; entry = snd_info_create_module_entry(THIS_MODULE, SNDRV_SEQ_OSS_PROCNAME, snd_seq_root); if (entry == NULL) return -ENOMEM; entry->content = SNDRV_INFO_CONTENT_TEXT; entry->private_data = NULL; entry->c.text.read = info_read; if (snd_info_register(entry) < 0) { snd_info_free_entry(entry); return -ENOMEM; } info_entry = entry; return 0; } static void unregister_proc(void) { snd_info_free_entry(info_entry); info_entry = NULL; } #endif /* CONFIG_PROC_FS */
gpl-2.0
TheNotOnly/android_kernel_lge_jagnm_kk
sound/soc/omap/omap-pcm.c
4378
12271
/* * omap-pcm.c -- ALSA PCM interface for the OMAP SoC * * Copyright (C) 2008 Nokia Corporation * * Contact: Jarkko Nikula <jarkko.nikula@bitmer.com> * Peter Ujfalusi <peter.ujfalusi@ti.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * */ #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/module.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <plat/dma.h> #include "omap-pcm.h" static const struct snd_pcm_hardware omap_pcm_hardware = { .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME | SNDRV_PCM_INFO_NO_PERIOD_WAKEUP, .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE, .period_bytes_min = 32, .period_bytes_max = 64 * 1024, .periods_min = 2, .periods_max = 255, .buffer_bytes_max = 128 * 1024, }; struct omap_runtime_data { spinlock_t lock; struct omap_pcm_dma_data *dma_data; int dma_ch; int period_index; }; static void omap_pcm_dma_irq(int ch, u16 stat, void *data) { struct snd_pcm_substream *substream = data; struct snd_pcm_runtime *runtime = substream->runtime; struct omap_runtime_data *prtd = runtime->private_data; unsigned long flags; if ((cpu_is_omap1510())) { /* * OMAP1510 doesn't fully support DMA progress counter * and there is no software emulation implemented yet, * so have to maintain our own progress counters * that can be used by omap_pcm_pointer() instead. */ spin_lock_irqsave(&prtd->lock, flags); if ((stat == OMAP_DMA_LAST_IRQ) && (prtd->period_index == runtime->periods - 1)) { /* we are in sync, do nothing */ spin_unlock_irqrestore(&prtd->lock, flags); return; } if (prtd->period_index >= 0) { if (stat & OMAP_DMA_BLOCK_IRQ) { /* end of buffer reached, loop back */ prtd->period_index = 0; } else if (stat & OMAP_DMA_LAST_IRQ) { /* update the counter for the last period */ prtd->period_index = runtime->periods - 1; } else if (++prtd->period_index >= runtime->periods) { /* end of buffer missed? loop back */ prtd->period_index = 0; } } spin_unlock_irqrestore(&prtd->lock, flags); } snd_pcm_period_elapsed(substream); } /* this may get called several times by oss emulation */ static int omap_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_soc_pcm_runtime *rtd = substream->private_data; struct omap_runtime_data *prtd = runtime->private_data; struct omap_pcm_dma_data *dma_data; int err = 0; dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream); /* return if this is a bufferless transfer e.g. * codec <--> BT codec or GSM modem -- lg FIXME */ if (!dma_data) return 0; snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer); runtime->dma_bytes = params_buffer_bytes(params); if (prtd->dma_data) return 0; prtd->dma_data = dma_data; err = omap_request_dma(dma_data->dma_req, dma_data->name, omap_pcm_dma_irq, substream, &prtd->dma_ch); if (!err) { /* * Link channel with itself so DMA doesn't need any * reprogramming while looping the buffer */ omap_dma_link_lch(prtd->dma_ch, prtd->dma_ch); } return err; } static int omap_pcm_hw_free(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct omap_runtime_data *prtd = runtime->private_data; if (prtd->dma_data == NULL) return 0; omap_dma_unlink_lch(prtd->dma_ch, prtd->dma_ch); omap_free_dma(prtd->dma_ch); prtd->dma_data = NULL; snd_pcm_set_runtime_buffer(substream, NULL); return 0; } static int omap_pcm_prepare(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct omap_runtime_data *prtd = runtime->private_data; struct omap_pcm_dma_data *dma_data = prtd->dma_data; struct omap_dma_channel_params dma_params; int bytes; /* return if this is a bufferless transfer e.g. * codec <--> BT codec or GSM modem -- lg FIXME */ if (!prtd->dma_data) return 0; memset(&dma_params, 0, sizeof(dma_params)); dma_params.data_type = dma_data->data_type; dma_params.trigger = dma_data->dma_req; dma_params.sync_mode = dma_data->sync_mode; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { dma_params.src_amode = OMAP_DMA_AMODE_POST_INC; dma_params.dst_amode = OMAP_DMA_AMODE_CONSTANT; dma_params.src_or_dst_synch = OMAP_DMA_DST_SYNC; dma_params.src_start = runtime->dma_addr; dma_params.dst_start = dma_data->port_addr; dma_params.dst_port = OMAP_DMA_PORT_MPUI; dma_params.dst_fi = dma_data->packet_size; } else { dma_params.src_amode = OMAP_DMA_AMODE_CONSTANT; dma_params.dst_amode = OMAP_DMA_AMODE_POST_INC; dma_params.src_or_dst_synch = OMAP_DMA_SRC_SYNC; dma_params.src_start = dma_data->port_addr; dma_params.dst_start = runtime->dma_addr; dma_params.src_port = OMAP_DMA_PORT_MPUI; dma_params.src_fi = dma_data->packet_size; } /* * Set DMA transfer frame size equal to ALSA period size and frame * count as no. of ALSA periods. Then with DMA frame interrupt enabled, * we can transfer the whole ALSA buffer with single DMA transfer but * still can get an interrupt at each period bounary */ bytes = snd_pcm_lib_period_bytes(substream); dma_params.elem_count = bytes >> dma_data->data_type; dma_params.frame_count = runtime->periods; omap_set_dma_params(prtd->dma_ch, &dma_params); if ((cpu_is_omap1510())) omap_enable_dma_irq(prtd->dma_ch, OMAP_DMA_FRAME_IRQ | OMAP_DMA_LAST_IRQ | OMAP_DMA_BLOCK_IRQ); else if (!substream->runtime->no_period_wakeup) omap_enable_dma_irq(prtd->dma_ch, OMAP_DMA_FRAME_IRQ); else { /* * No period wakeup: * we need to disable BLOCK_IRQ, which is enabled by the omap * dma core at request dma time. */ omap_disable_dma_irq(prtd->dma_ch, OMAP_DMA_BLOCK_IRQ); } if (!(cpu_class_is_omap1())) { omap_set_dma_src_burst_mode(prtd->dma_ch, OMAP_DMA_DATA_BURST_16); omap_set_dma_dest_burst_mode(prtd->dma_ch, OMAP_DMA_DATA_BURST_16); } return 0; } static int omap_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_pcm_runtime *runtime = substream->runtime; struct omap_runtime_data *prtd = runtime->private_data; struct omap_pcm_dma_data *dma_data = prtd->dma_data; unsigned long flags; int ret = 0; spin_lock_irqsave(&prtd->lock, flags); switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: prtd->period_index = 0; /* Configure McBSP internal buffer usage */ if (dma_data->set_threshold) dma_data->set_threshold(substream); omap_start_dma(prtd->dma_ch); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: prtd->period_index = -1; omap_stop_dma(prtd->dma_ch); break; default: ret = -EINVAL; } spin_unlock_irqrestore(&prtd->lock, flags); return ret; } static snd_pcm_uframes_t omap_pcm_pointer(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct omap_runtime_data *prtd = runtime->private_data; dma_addr_t ptr; snd_pcm_uframes_t offset; if (cpu_is_omap1510()) { offset = prtd->period_index * runtime->period_size; } else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) { ptr = omap_get_dma_dst_pos(prtd->dma_ch); offset = bytes_to_frames(runtime, ptr - runtime->dma_addr); } else { ptr = omap_get_dma_src_pos(prtd->dma_ch); offset = bytes_to_frames(runtime, ptr - runtime->dma_addr); } if (offset >= runtime->buffer_size) offset = 0; return offset; } static int omap_pcm_open(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct omap_runtime_data *prtd; int ret; snd_soc_set_runtime_hwparams(substream, &omap_pcm_hardware); /* Ensure that buffer size is a multiple of period size */ ret = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); if (ret < 0) goto out; prtd = kzalloc(sizeof(*prtd), GFP_KERNEL); if (prtd == NULL) { ret = -ENOMEM; goto out; } spin_lock_init(&prtd->lock); runtime->private_data = prtd; out: return ret; } static int omap_pcm_close(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; kfree(runtime->private_data); return 0; } static int omap_pcm_mmap(struct snd_pcm_substream *substream, struct vm_area_struct *vma) { struct snd_pcm_runtime *runtime = substream->runtime; return dma_mmap_writecombine(substream->pcm->card->dev, vma, runtime->dma_area, runtime->dma_addr, runtime->dma_bytes); } static struct snd_pcm_ops omap_pcm_ops = { .open = omap_pcm_open, .close = omap_pcm_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = omap_pcm_hw_params, .hw_free = omap_pcm_hw_free, .prepare = omap_pcm_prepare, .trigger = omap_pcm_trigger, .pointer = omap_pcm_pointer, .mmap = omap_pcm_mmap, }; static u64 omap_pcm_dmamask = DMA_BIT_MASK(64); static int omap_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream) { struct snd_pcm_substream *substream = pcm->streams[stream].substream; struct snd_dma_buffer *buf = &substream->dma_buffer; size_t size = omap_pcm_hardware.buffer_bytes_max; buf->dev.type = SNDRV_DMA_TYPE_DEV; buf->dev.dev = pcm->card->dev; buf->private_data = NULL; buf->area = dma_alloc_writecombine(pcm->card->dev, size, &buf->addr, GFP_KERNEL); if (!buf->area) return -ENOMEM; buf->bytes = size; return 0; } static void omap_pcm_free_dma_buffers(struct snd_pcm *pcm) { struct snd_pcm_substream *substream; struct snd_dma_buffer *buf; int stream; for (stream = 0; stream < 2; stream++) { substream = pcm->streams[stream].substream; if (!substream) continue; buf = &substream->dma_buffer; if (!buf->area) continue; dma_free_writecombine(pcm->card->dev, buf->bytes, buf->area, buf->addr); buf->area = NULL; } } static int omap_pcm_new(struct snd_soc_pcm_runtime *rtd) { struct snd_card *card = rtd->card->snd_card; struct snd_pcm *pcm = rtd->pcm; int ret = 0; if (!card->dev->dma_mask) card->dev->dma_mask = &omap_pcm_dmamask; if (!card->dev->coherent_dma_mask) card->dev->coherent_dma_mask = DMA_BIT_MASK(64); if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) { ret = omap_pcm_preallocate_dma_buffer(pcm, SNDRV_PCM_STREAM_PLAYBACK); if (ret) goto out; } if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) { ret = omap_pcm_preallocate_dma_buffer(pcm, SNDRV_PCM_STREAM_CAPTURE); if (ret) goto out; } out: /* free preallocated buffers in case of error */ if (ret) omap_pcm_free_dma_buffers(pcm); return ret; } static struct snd_soc_platform_driver omap_soc_platform = { .ops = &omap_pcm_ops, .pcm_new = omap_pcm_new, .pcm_free = omap_pcm_free_dma_buffers, }; static __devinit int omap_pcm_probe(struct platform_device *pdev) { return snd_soc_register_platform(&pdev->dev, &omap_soc_platform); } static int __devexit omap_pcm_remove(struct platform_device *pdev) { snd_soc_unregister_platform(&pdev->dev); return 0; } static struct platform_driver omap_pcm_driver = { .driver = { .name = "omap-pcm-audio", .owner = THIS_MODULE, }, .probe = omap_pcm_probe, .remove = __devexit_p(omap_pcm_remove), }; module_platform_driver(omap_pcm_driver); MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@bitmer.com>"); MODULE_DESCRIPTION("OMAP PCM DMA module"); MODULE_LICENSE("GPL");
gpl-2.0
TheNameIsNigel/android_kernel_carbon_msm8928
arch/sh/boards/mach-se/7724/irq.c
7450
3684
/* * linux/arch/sh/boards/se/7724/irq.c * * Copyright (C) 2009 Renesas Solutions Corp. * * Kuninori Morimoto <morimoto.kuninori@renesas.com> * * Based on linux/arch/sh/boards/se/7722/irq.c * Copyright (C) 2007 Nobuhiro Iwamatsu * * Hitachi UL SolutionEngine 7724 Support. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <asm/irq.h> #include <asm/io.h> #include <mach-se/mach/se7724.h> struct fpga_irq { unsigned long sraddr; unsigned long mraddr; unsigned short mask; unsigned int base; }; static unsigned int fpga2irq(unsigned int irq) { if (irq >= IRQ0_BASE && irq <= IRQ0_END) return IRQ0_IRQ; else if (irq >= IRQ1_BASE && irq <= IRQ1_END) return IRQ1_IRQ; else return IRQ2_IRQ; } static struct fpga_irq get_fpga_irq(unsigned int irq) { struct fpga_irq set; switch (irq) { case IRQ0_IRQ: set.sraddr = IRQ0_SR; set.mraddr = IRQ0_MR; set.mask = IRQ0_MASK; set.base = IRQ0_BASE; break; case IRQ1_IRQ: set.sraddr = IRQ1_SR; set.mraddr = IRQ1_MR; set.mask = IRQ1_MASK; set.base = IRQ1_BASE; break; default: set.sraddr = IRQ2_SR; set.mraddr = IRQ2_MR; set.mask = IRQ2_MASK; set.base = IRQ2_BASE; break; } return set; } static void disable_se7724_irq(struct irq_data *data) { unsigned int irq = data->irq; struct fpga_irq set = get_fpga_irq(fpga2irq(irq)); unsigned int bit = irq - set.base; __raw_writew(__raw_readw(set.mraddr) | 0x0001 << bit, set.mraddr); } static void enable_se7724_irq(struct irq_data *data) { unsigned int irq = data->irq; struct fpga_irq set = get_fpga_irq(fpga2irq(irq)); unsigned int bit = irq - set.base; __raw_writew(__raw_readw(set.mraddr) & ~(0x0001 << bit), set.mraddr); } static struct irq_chip se7724_irq_chip __read_mostly = { .name = "SE7724-FPGA", .irq_mask = disable_se7724_irq, .irq_unmask = enable_se7724_irq, }; static void se7724_irq_demux(unsigned int irq, struct irq_desc *desc) { struct fpga_irq set = get_fpga_irq(irq); unsigned short intv = __raw_readw(set.sraddr); unsigned int ext_irq = set.base; intv &= set.mask; for (; intv; intv >>= 1, ext_irq++) { if (!(intv & 1)) continue; generic_handle_irq(ext_irq); } } /* * Initialize IRQ setting */ void __init init_se7724_IRQ(void) { int i, nid = cpu_to_node(boot_cpu_data); __raw_writew(0xffff, IRQ0_MR); /* mask all */ __raw_writew(0xffff, IRQ1_MR); /* mask all */ __raw_writew(0xffff, IRQ2_MR); /* mask all */ __raw_writew(0x0000, IRQ0_SR); /* clear irq */ __raw_writew(0x0000, IRQ1_SR); /* clear irq */ __raw_writew(0x0000, IRQ2_SR); /* clear irq */ __raw_writew(0x002a, IRQ_MODE); /* set irq type */ for (i = 0; i < SE7724_FPGA_IRQ_NR; i++) { int irq, wanted; wanted = SE7724_FPGA_IRQ_BASE + i; irq = create_irq_nr(wanted, nid); if (unlikely(irq == 0)) { pr_err("%s: failed hooking irq %d for FPGA\n", __func__, wanted); return; } if (unlikely(irq != wanted)) { pr_err("%s: got irq %d but wanted %d, bailing.\n", __func__, irq, wanted); destroy_irq(irq); return; } irq_set_chip_and_handler_name(irq, &se7724_irq_chip, handle_level_irq, "level"); } irq_set_chained_handler(IRQ0_IRQ, se7724_irq_demux); irq_set_irq_type(IRQ0_IRQ, IRQ_TYPE_LEVEL_LOW); irq_set_chained_handler(IRQ1_IRQ, se7724_irq_demux); irq_set_irq_type(IRQ1_IRQ, IRQ_TYPE_LEVEL_LOW); irq_set_chained_handler(IRQ2_IRQ, se7724_irq_demux); irq_set_irq_type(IRQ2_IRQ, IRQ_TYPE_LEVEL_LOW); }
gpl-2.0
kernel-hut/android_kernel_xiaomi_cancro
arch/unicore32/kernel/early_printk.c
7706
1301
/* * linux/arch/unicore32/kernel/early_printk.c * * Code specific to PKUnity SoC and UniCore ISA * * Copyright (C) 2001-2010 GUAN Xue-tao * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/console.h> #include <linux/init.h> #include <linux/string.h> #include <mach/ocd.h> /* On-Chip-Debugger functions */ static void early_ocd_write(struct console *con, const char *s, unsigned n) { while (*s && n-- > 0) { if (*s == '\n') ocd_putc((int)'\r'); ocd_putc((int)*s); s++; } } static struct console early_ocd_console = { .name = "earlyocd", .write = early_ocd_write, .flags = CON_PRINTBUFFER, .index = -1, }; /* Direct interface for emergencies */ static struct console *early_console = &early_ocd_console; static int __initdata keep_early; static int __init setup_early_printk(char *buf) { if (!buf) return 0; if (strstr(buf, "keep")) keep_early = 1; if (!strncmp(buf, "ocd", 3)) early_console = &early_ocd_console; if (keep_early) early_console->flags &= ~CON_BOOT; else early_console->flags |= CON_BOOT; register_console(early_console); return 0; } early_param("earlyprintk", setup_early_printk);
gpl-2.0
ipaccess/fsm92xx-kernel-sources
arch/x86/kernel/cpu/vmware.c
7962
4182
/* * VMware Detection code. * * Copyright (C) 2008, VMware, Inc. * Author : Alok N Kataria <akataria@vmware.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * */ #include <linux/dmi.h> #include <linux/module.h> #include <asm/div64.h> #include <asm/x86_init.h> #include <asm/hypervisor.h> #define CPUID_VMWARE_INFO_LEAF 0x40000000 #define VMWARE_HYPERVISOR_MAGIC 0x564D5868 #define VMWARE_HYPERVISOR_PORT 0x5658 #define VMWARE_PORT_CMD_GETVERSION 10 #define VMWARE_PORT_CMD_GETHZ 45 #define VMWARE_PORT(cmd, eax, ebx, ecx, edx) \ __asm__("inl (%%dx)" : \ "=a"(eax), "=c"(ecx), "=d"(edx), "=b"(ebx) : \ "0"(VMWARE_HYPERVISOR_MAGIC), \ "1"(VMWARE_PORT_CMD_##cmd), \ "2"(VMWARE_HYPERVISOR_PORT), "3"(UINT_MAX) : \ "memory"); static inline int __vmware_platform(void) { uint32_t eax, ebx, ecx, edx; VMWARE_PORT(GETVERSION, eax, ebx, ecx, edx); return eax != (uint32_t)-1 && ebx == VMWARE_HYPERVISOR_MAGIC; } static unsigned long vmware_get_tsc_khz(void) { uint64_t tsc_hz, lpj; uint32_t eax, ebx, ecx, edx; VMWARE_PORT(GETHZ, eax, ebx, ecx, edx); tsc_hz = eax | (((uint64_t)ebx) << 32); do_div(tsc_hz, 1000); BUG_ON(tsc_hz >> 32); printk(KERN_INFO "TSC freq read from hypervisor : %lu.%03lu MHz\n", (unsigned long) tsc_hz / 1000, (unsigned long) tsc_hz % 1000); if (!preset_lpj) { lpj = ((u64)tsc_hz * 1000); do_div(lpj, HZ); preset_lpj = lpj; } return tsc_hz; } static void __init vmware_platform_setup(void) { uint32_t eax, ebx, ecx, edx; VMWARE_PORT(GETHZ, eax, ebx, ecx, edx); if (ebx != UINT_MAX) x86_platform.calibrate_tsc = vmware_get_tsc_khz; else printk(KERN_WARNING "Failed to get TSC freq from the hypervisor\n"); } /* * While checking the dmi string information, just checking the product * serial key should be enough, as this will always have a VMware * specific string when running under VMware hypervisor. */ static bool __init vmware_platform(void) { if (cpu_has_hypervisor) { unsigned int eax; unsigned int hyper_vendor_id[3]; cpuid(CPUID_VMWARE_INFO_LEAF, &eax, &hyper_vendor_id[0], &hyper_vendor_id[1], &hyper_vendor_id[2]); if (!memcmp(hyper_vendor_id, "VMwareVMware", 12)) return true; } else if (dmi_available && dmi_name_in_serial("VMware") && __vmware_platform()) return true; return false; } /* * VMware hypervisor takes care of exporting a reliable TSC to the guest. * Still, due to timing difference when running on virtual cpus, the TSC can * be marked as unstable in some cases. For example, the TSC sync check at * bootup can fail due to a marginal offset between vcpus' TSCs (though the * TSCs do not drift from each other). Also, the ACPI PM timer clocksource * is not suitable as a watchdog when running on a hypervisor because the * kernel may miss a wrap of the counter if the vcpu is descheduled for a * long time. To skip these checks at runtime we set these capability bits, * so that the kernel could just trust the hypervisor with providing a * reliable virtual TSC that is suitable for timekeeping. */ static void __cpuinit vmware_set_cpu_features(struct cpuinfo_x86 *c) { set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); set_cpu_cap(c, X86_FEATURE_TSC_RELIABLE); } const __refconst struct hypervisor_x86 x86_hyper_vmware = { .name = "VMware", .detect = vmware_platform, .set_cpu_features = vmware_set_cpu_features, .init_platform = vmware_platform_setup, }; EXPORT_SYMBOL(x86_hyper_vmware);
gpl-2.0
Eliminater74/LGD851_G3_L_Kernel_v20E
sound/drivers/opl3/opl3_lib.c
7962
14114
/* * Copyright (c) by Jaroslav Kysela <perex@perex.cz>, * Hannu Savolainen 1993-1996, * Rob Hooft * * Routines for control of AdLib FM cards (OPL2/OPL3/OPL4 chips) * * Most if code is ported from OSS/Lite. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <sound/opl3.h> #include <asm/io.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/ioport.h> #include <sound/minors.h> MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>, Hannu Savolainen 1993-1996, Rob Hooft"); MODULE_DESCRIPTION("Routines for control of AdLib FM cards (OPL2/OPL3/OPL4 chips)"); MODULE_LICENSE("GPL"); extern char snd_opl3_regmap[MAX_OPL2_VOICES][4]; static void snd_opl2_command(struct snd_opl3 * opl3, unsigned short cmd, unsigned char val) { unsigned long flags; unsigned long port; /* * The original 2-OP synth requires a quite long delay * after writing to a register. */ port = (cmd & OPL3_RIGHT) ? opl3->r_port : opl3->l_port; spin_lock_irqsave(&opl3->reg_lock, flags); outb((unsigned char) cmd, port); udelay(10); outb((unsigned char) val, port + 1); udelay(30); spin_unlock_irqrestore(&opl3->reg_lock, flags); } static void snd_opl3_command(struct snd_opl3 * opl3, unsigned short cmd, unsigned char val) { unsigned long flags; unsigned long port; /* * The OPL-3 survives with just two INBs * after writing to a register. */ port = (cmd & OPL3_RIGHT) ? opl3->r_port : opl3->l_port; spin_lock_irqsave(&opl3->reg_lock, flags); outb((unsigned char) cmd, port); inb(opl3->l_port); inb(opl3->l_port); outb((unsigned char) val, port + 1); inb(opl3->l_port); inb(opl3->l_port); spin_unlock_irqrestore(&opl3->reg_lock, flags); } static int snd_opl3_detect(struct snd_opl3 * opl3) { /* * This function returns 1 if the FM chip is present at the given I/O port * The detection algorithm plays with the timer built in the FM chip and * looks for a change in the status register. * * Note! The timers of the FM chip are not connected to AdLib (and compatible) * boards. * * Note2! The chip is initialized if detected. */ unsigned char stat1, stat2, signature; /* Reset timers 1 and 2 */ opl3->command(opl3, OPL3_LEFT | OPL3_REG_TIMER_CONTROL, OPL3_TIMER1_MASK | OPL3_TIMER2_MASK); /* Reset the IRQ of the FM chip */ opl3->command(opl3, OPL3_LEFT | OPL3_REG_TIMER_CONTROL, OPL3_IRQ_RESET); signature = stat1 = inb(opl3->l_port); /* Status register */ if ((stat1 & 0xe0) != 0x00) { /* Should be 0x00 */ snd_printd("OPL3: stat1 = 0x%x\n", stat1); return -ENODEV; } /* Set timer1 to 0xff */ opl3->command(opl3, OPL3_LEFT | OPL3_REG_TIMER1, 0xff); /* Unmask and start timer 1 */ opl3->command(opl3, OPL3_LEFT | OPL3_REG_TIMER_CONTROL, OPL3_TIMER2_MASK | OPL3_TIMER1_START); /* Now we have to delay at least 80us */ udelay(200); /* Read status after timers have expired */ stat2 = inb(opl3->l_port); /* Stop the timers */ opl3->command(opl3, OPL3_LEFT | OPL3_REG_TIMER_CONTROL, OPL3_TIMER1_MASK | OPL3_TIMER2_MASK); /* Reset the IRQ of the FM chip */ opl3->command(opl3, OPL3_LEFT | OPL3_REG_TIMER_CONTROL, OPL3_IRQ_RESET); if ((stat2 & 0xe0) != 0xc0) { /* There is no YM3812 */ snd_printd("OPL3: stat2 = 0x%x\n", stat2); return -ENODEV; } /* If the toplevel code knows exactly the type of chip, don't try to detect it. */ if (opl3->hardware != OPL3_HW_AUTO) return 0; /* There is a FM chip on this address. Detect the type (OPL2 to OPL4) */ if (signature == 0x06) { /* OPL2 */ opl3->hardware = OPL3_HW_OPL2; } else { /* * If we had an OPL4 chip, opl3->hardware would have been set * by the OPL4 driver; so we can assume OPL3 here. */ if (snd_BUG_ON(!opl3->r_port)) return -ENODEV; opl3->hardware = OPL3_HW_OPL3; } return 0; } /* * AdLib timers */ /* * Timer 1 - 80us */ static int snd_opl3_timer1_start(struct snd_timer * timer) { unsigned long flags; unsigned char tmp; unsigned int ticks; struct snd_opl3 *opl3; opl3 = snd_timer_chip(timer); spin_lock_irqsave(&opl3->timer_lock, flags); ticks = timer->sticks; tmp = (opl3->timer_enable | OPL3_TIMER1_START) & ~OPL3_TIMER1_MASK; opl3->timer_enable = tmp; opl3->command(opl3, OPL3_LEFT | OPL3_REG_TIMER1, 256 - ticks); /* timer 1 count */ opl3->command(opl3, OPL3_LEFT | OPL3_REG_TIMER_CONTROL, tmp); /* enable timer 1 IRQ */ spin_unlock_irqrestore(&opl3->timer_lock, flags); return 0; } static int snd_opl3_timer1_stop(struct snd_timer * timer) { unsigned long flags; unsigned char tmp; struct snd_opl3 *opl3; opl3 = snd_timer_chip(timer); spin_lock_irqsave(&opl3->timer_lock, flags); tmp = (opl3->timer_enable | OPL3_TIMER1_MASK) & ~OPL3_TIMER1_START; opl3->timer_enable = tmp; opl3->command(opl3, OPL3_LEFT | OPL3_REG_TIMER_CONTROL, tmp); /* disable timer #1 */ spin_unlock_irqrestore(&opl3->timer_lock, flags); return 0; } /* * Timer 2 - 320us */ static int snd_opl3_timer2_start(struct snd_timer * timer) { unsigned long flags; unsigned char tmp; unsigned int ticks; struct snd_opl3 *opl3; opl3 = snd_timer_chip(timer); spin_lock_irqsave(&opl3->timer_lock, flags); ticks = timer->sticks; tmp = (opl3->timer_enable | OPL3_TIMER2_START) & ~OPL3_TIMER2_MASK; opl3->timer_enable = tmp; opl3->command(opl3, OPL3_LEFT | OPL3_REG_TIMER2, 256 - ticks); /* timer 1 count */ opl3->command(opl3, OPL3_LEFT | OPL3_REG_TIMER_CONTROL, tmp); /* enable timer 1 IRQ */ spin_unlock_irqrestore(&opl3->timer_lock, flags); return 0; } static int snd_opl3_timer2_stop(struct snd_timer * timer) { unsigned long flags; unsigned char tmp; struct snd_opl3 *opl3; opl3 = snd_timer_chip(timer); spin_lock_irqsave(&opl3->timer_lock, flags); tmp = (opl3->timer_enable | OPL3_TIMER2_MASK) & ~OPL3_TIMER2_START; opl3->timer_enable = tmp; opl3->command(opl3, OPL3_LEFT | OPL3_REG_TIMER_CONTROL, tmp); /* disable timer #1 */ spin_unlock_irqrestore(&opl3->timer_lock, flags); return 0; } /* */ static struct snd_timer_hardware snd_opl3_timer1 = { .flags = SNDRV_TIMER_HW_STOP, .resolution = 80000, .ticks = 256, .start = snd_opl3_timer1_start, .stop = snd_opl3_timer1_stop, }; static struct snd_timer_hardware snd_opl3_timer2 = { .flags = SNDRV_TIMER_HW_STOP, .resolution = 320000, .ticks = 256, .start = snd_opl3_timer2_start, .stop = snd_opl3_timer2_stop, }; static int snd_opl3_timer1_init(struct snd_opl3 * opl3, int timer_no) { struct snd_timer *timer = NULL; struct snd_timer_id tid; int err; tid.dev_class = SNDRV_TIMER_CLASS_CARD; tid.dev_sclass = SNDRV_TIMER_SCLASS_NONE; tid.card = opl3->card->number; tid.device = timer_no; tid.subdevice = 0; if ((err = snd_timer_new(opl3->card, "AdLib timer #1", &tid, &timer)) >= 0) { strcpy(timer->name, "AdLib timer #1"); timer->private_data = opl3; timer->hw = snd_opl3_timer1; } opl3->timer1 = timer; return err; } static int snd_opl3_timer2_init(struct snd_opl3 * opl3, int timer_no) { struct snd_timer *timer = NULL; struct snd_timer_id tid; int err; tid.dev_class = SNDRV_TIMER_CLASS_CARD; tid.dev_sclass = SNDRV_TIMER_SCLASS_NONE; tid.card = opl3->card->number; tid.device = timer_no; tid.subdevice = 0; if ((err = snd_timer_new(opl3->card, "AdLib timer #2", &tid, &timer)) >= 0) { strcpy(timer->name, "AdLib timer #2"); timer->private_data = opl3; timer->hw = snd_opl3_timer2; } opl3->timer2 = timer; return err; } /* */ void snd_opl3_interrupt(struct snd_hwdep * hw) { unsigned char status; struct snd_opl3 *opl3; struct snd_timer *timer; if (hw == NULL) return; opl3 = hw->private_data; status = inb(opl3->l_port); #if 0 snd_printk(KERN_DEBUG "AdLib IRQ status = 0x%x\n", status); #endif if (!(status & 0x80)) return; if (status & 0x40) { timer = opl3->timer1; snd_timer_interrupt(timer, timer->sticks); } if (status & 0x20) { timer = opl3->timer2; snd_timer_interrupt(timer, timer->sticks); } } EXPORT_SYMBOL(snd_opl3_interrupt); /* */ static int snd_opl3_free(struct snd_opl3 *opl3) { if (snd_BUG_ON(!opl3)) return -ENXIO; if (opl3->private_free) opl3->private_free(opl3); snd_opl3_clear_patches(opl3); release_and_free_resource(opl3->res_l_port); release_and_free_resource(opl3->res_r_port); kfree(opl3); return 0; } static int snd_opl3_dev_free(struct snd_device *device) { struct snd_opl3 *opl3 = device->device_data; return snd_opl3_free(opl3); } int snd_opl3_new(struct snd_card *card, unsigned short hardware, struct snd_opl3 **ropl3) { static struct snd_device_ops ops = { .dev_free = snd_opl3_dev_free, }; struct snd_opl3 *opl3; int err; *ropl3 = NULL; opl3 = kzalloc(sizeof(*opl3), GFP_KERNEL); if (opl3 == NULL) { snd_printk(KERN_ERR "opl3: cannot allocate\n"); return -ENOMEM; } opl3->card = card; opl3->hardware = hardware; spin_lock_init(&opl3->reg_lock); spin_lock_init(&opl3->timer_lock); if ((err = snd_device_new(card, SNDRV_DEV_CODEC, opl3, &ops)) < 0) { snd_opl3_free(opl3); return err; } *ropl3 = opl3; return 0; } EXPORT_SYMBOL(snd_opl3_new); int snd_opl3_init(struct snd_opl3 *opl3) { if (! opl3->command) { printk(KERN_ERR "snd_opl3_init: command not defined!\n"); return -EINVAL; } opl3->command(opl3, OPL3_LEFT | OPL3_REG_TEST, OPL3_ENABLE_WAVE_SELECT); /* Melodic mode */ opl3->command(opl3, OPL3_LEFT | OPL3_REG_PERCUSSION, 0x00); switch (opl3->hardware & OPL3_HW_MASK) { case OPL3_HW_OPL2: opl3->max_voices = MAX_OPL2_VOICES; break; case OPL3_HW_OPL3: case OPL3_HW_OPL4: opl3->max_voices = MAX_OPL3_VOICES; /* Enter OPL3 mode */ opl3->command(opl3, OPL3_RIGHT | OPL3_REG_MODE, OPL3_OPL3_ENABLE); } return 0; } EXPORT_SYMBOL(snd_opl3_init); int snd_opl3_create(struct snd_card *card, unsigned long l_port, unsigned long r_port, unsigned short hardware, int integrated, struct snd_opl3 ** ropl3) { struct snd_opl3 *opl3; int err; *ropl3 = NULL; if ((err = snd_opl3_new(card, hardware, &opl3)) < 0) return err; if (! integrated) { if ((opl3->res_l_port = request_region(l_port, 2, "OPL2/3 (left)")) == NULL) { snd_printk(KERN_ERR "opl3: can't grab left port 0x%lx\n", l_port); snd_device_free(card, opl3); return -EBUSY; } if (r_port != 0 && (opl3->res_r_port = request_region(r_port, 2, "OPL2/3 (right)")) == NULL) { snd_printk(KERN_ERR "opl3: can't grab right port 0x%lx\n", r_port); snd_device_free(card, opl3); return -EBUSY; } } opl3->l_port = l_port; opl3->r_port = r_port; switch (opl3->hardware) { /* some hardware doesn't support timers */ case OPL3_HW_OPL3_SV: case OPL3_HW_OPL3_CS: case OPL3_HW_OPL3_FM801: opl3->command = &snd_opl3_command; break; default: opl3->command = &snd_opl2_command; if ((err = snd_opl3_detect(opl3)) < 0) { snd_printd("OPL2/3 chip not detected at 0x%lx/0x%lx\n", opl3->l_port, opl3->r_port); snd_device_free(card, opl3); return err; } /* detect routine returns correct hardware type */ switch (opl3->hardware & OPL3_HW_MASK) { case OPL3_HW_OPL3: case OPL3_HW_OPL4: opl3->command = &snd_opl3_command; } } snd_opl3_init(opl3); *ropl3 = opl3; return 0; } EXPORT_SYMBOL(snd_opl3_create); int snd_opl3_timer_new(struct snd_opl3 * opl3, int timer1_dev, int timer2_dev) { int err; if (timer1_dev >= 0) if ((err = snd_opl3_timer1_init(opl3, timer1_dev)) < 0) return err; if (timer2_dev >= 0) { if ((err = snd_opl3_timer2_init(opl3, timer2_dev)) < 0) { snd_device_free(opl3->card, opl3->timer1); opl3->timer1 = NULL; return err; } } return 0; } EXPORT_SYMBOL(snd_opl3_timer_new); int snd_opl3_hwdep_new(struct snd_opl3 * opl3, int device, int seq_device, struct snd_hwdep ** rhwdep) { struct snd_hwdep *hw; struct snd_card *card = opl3->card; int err; if (rhwdep) *rhwdep = NULL; /* create hardware dependent device (direct FM) */ if ((err = snd_hwdep_new(card, "OPL2/OPL3", device, &hw)) < 0) { snd_device_free(card, opl3); return err; } hw->private_data = opl3; hw->exclusive = 1; #ifdef CONFIG_SND_OSSEMUL if (device == 0) { hw->oss_type = SNDRV_OSS_DEVICE_TYPE_DMFM; sprintf(hw->oss_dev, "dmfm%i", card->number); } #endif strcpy(hw->name, hw->id); switch (opl3->hardware & OPL3_HW_MASK) { case OPL3_HW_OPL2: strcpy(hw->name, "OPL2 FM"); hw->iface = SNDRV_HWDEP_IFACE_OPL2; break; case OPL3_HW_OPL3: strcpy(hw->name, "OPL3 FM"); hw->iface = SNDRV_HWDEP_IFACE_OPL3; break; case OPL3_HW_OPL4: strcpy(hw->name, "OPL4 FM"); hw->iface = SNDRV_HWDEP_IFACE_OPL4; break; } /* operators - only ioctl */ hw->ops.open = snd_opl3_open; hw->ops.ioctl = snd_opl3_ioctl; hw->ops.write = snd_opl3_write; hw->ops.release = snd_opl3_release; opl3->hwdep = hw; opl3->seq_dev_num = seq_device; #if defined(CONFIG_SND_SEQUENCER) || (defined(MODULE) && defined(CONFIG_SND_SEQUENCER_MODULE)) if (snd_seq_device_new(card, seq_device, SNDRV_SEQ_DEV_ID_OPL3, sizeof(struct snd_opl3 *), &opl3->seq_dev) >= 0) { strcpy(opl3->seq_dev->name, hw->name); *(struct snd_opl3 **)SNDRV_SEQ_DEVICE_ARGPTR(opl3->seq_dev) = opl3; } #endif if (rhwdep) *rhwdep = hw; return 0; } EXPORT_SYMBOL(snd_opl3_hwdep_new); /* * INIT part */ static int __init alsa_opl3_init(void) { return 0; } static void __exit alsa_opl3_exit(void) { } module_init(alsa_opl3_init) module_exit(alsa_opl3_exit)
gpl-2.0
faux123/xperia_8974
drivers/net/wireless/wl1251/init.c
11034
9169
/* * This file is part of wl1251 * * Copyright (C) 2009 Nokia Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include "init.h" #include "wl12xx_80211.h" #include "acx.h" #include "cmd.h" #include "reg.h" int wl1251_hw_init_hwenc_config(struct wl1251 *wl) { int ret; ret = wl1251_acx_feature_cfg(wl); if (ret < 0) { wl1251_warning("couldn't set feature config"); return ret; } ret = wl1251_acx_default_key(wl, wl->default_key); if (ret < 0) { wl1251_warning("couldn't set default key"); return ret; } return 0; } int wl1251_hw_init_templates_config(struct wl1251 *wl) { int ret; u8 partial_vbm[PARTIAL_VBM_MAX]; /* send empty templates for fw memory reservation */ ret = wl1251_cmd_template_set(wl, CMD_PROBE_REQ, NULL, sizeof(struct wl12xx_probe_req_template)); if (ret < 0) return ret; ret = wl1251_cmd_template_set(wl, CMD_NULL_DATA, NULL, sizeof(struct wl12xx_null_data_template)); if (ret < 0) return ret; ret = wl1251_cmd_template_set(wl, CMD_PS_POLL, NULL, sizeof(struct wl12xx_ps_poll_template)); if (ret < 0) return ret; ret = wl1251_cmd_template_set(wl, CMD_QOS_NULL_DATA, NULL, sizeof (struct wl12xx_qos_null_data_template)); if (ret < 0) return ret; ret = wl1251_cmd_template_set(wl, CMD_PROBE_RESP, NULL, sizeof (struct wl12xx_probe_resp_template)); if (ret < 0) return ret; ret = wl1251_cmd_template_set(wl, CMD_BEACON, NULL, sizeof (struct wl12xx_beacon_template)); if (ret < 0) return ret; /* tim templates, first reserve space then allocate an empty one */ memset(partial_vbm, 0, PARTIAL_VBM_MAX); ret = wl1251_cmd_vbm(wl, TIM_ELE_ID, partial_vbm, PARTIAL_VBM_MAX, 0); if (ret < 0) return ret; ret = wl1251_cmd_vbm(wl, TIM_ELE_ID, partial_vbm, 1, 0); if (ret < 0) return ret; return 0; } int wl1251_hw_init_rx_config(struct wl1251 *wl, u32 config, u32 filter) { int ret; ret = wl1251_acx_rx_msdu_life_time(wl, RX_MSDU_LIFETIME_DEF); if (ret < 0) return ret; ret = wl1251_acx_rx_config(wl, config, filter); if (ret < 0) return ret; return 0; } int wl1251_hw_init_phy_config(struct wl1251 *wl) { int ret; ret = wl1251_acx_pd_threshold(wl); if (ret < 0) return ret; ret = wl1251_acx_slot(wl, DEFAULT_SLOT_TIME); if (ret < 0) return ret; ret = wl1251_acx_group_address_tbl(wl); if (ret < 0) return ret; ret = wl1251_acx_service_period_timeout(wl); if (ret < 0) return ret; ret = wl1251_acx_rts_threshold(wl, RTS_THRESHOLD_DEF); if (ret < 0) return ret; return 0; } int wl1251_hw_init_beacon_filter(struct wl1251 *wl) { int ret; /* disable beacon filtering at this stage */ ret = wl1251_acx_beacon_filter_opt(wl, false); if (ret < 0) return ret; ret = wl1251_acx_beacon_filter_table(wl); if (ret < 0) return ret; return 0; } int wl1251_hw_init_pta(struct wl1251 *wl) { int ret; ret = wl1251_acx_sg_enable(wl); if (ret < 0) return ret; ret = wl1251_acx_sg_cfg(wl); if (ret < 0) return ret; return 0; } int wl1251_hw_init_energy_detection(struct wl1251 *wl) { int ret; ret = wl1251_acx_cca_threshold(wl); if (ret < 0) return ret; return 0; } int wl1251_hw_init_beacon_broadcast(struct wl1251 *wl) { int ret; ret = wl1251_acx_bcn_dtim_options(wl); if (ret < 0) return ret; return 0; } int wl1251_hw_init_power_auth(struct wl1251 *wl) { return wl1251_acx_sleep_auth(wl, WL1251_PSM_CAM); } int wl1251_hw_init_mem_config(struct wl1251 *wl) { int ret; ret = wl1251_acx_mem_cfg(wl); if (ret < 0) return ret; wl->target_mem_map = kzalloc(sizeof(struct wl1251_acx_mem_map), GFP_KERNEL); if (!wl->target_mem_map) { wl1251_error("couldn't allocate target memory map"); return -ENOMEM; } /* we now ask for the firmware built memory map */ ret = wl1251_acx_mem_map(wl, wl->target_mem_map, sizeof(struct wl1251_acx_mem_map)); if (ret < 0) { wl1251_error("couldn't retrieve firmware memory map"); kfree(wl->target_mem_map); wl->target_mem_map = NULL; return ret; } return 0; } static int wl1251_hw_init_txq_fill(u8 qid, struct acx_tx_queue_qos_config *config, u32 num_blocks) { config->qid = qid; switch (qid) { case QOS_AC_BE: config->high_threshold = (QOS_TX_HIGH_BE_DEF * num_blocks) / 100; config->low_threshold = (QOS_TX_LOW_BE_DEF * num_blocks) / 100; break; case QOS_AC_BK: config->high_threshold = (QOS_TX_HIGH_BK_DEF * num_blocks) / 100; config->low_threshold = (QOS_TX_LOW_BK_DEF * num_blocks) / 100; break; case QOS_AC_VI: config->high_threshold = (QOS_TX_HIGH_VI_DEF * num_blocks) / 100; config->low_threshold = (QOS_TX_LOW_VI_DEF * num_blocks) / 100; break; case QOS_AC_VO: config->high_threshold = (QOS_TX_HIGH_VO_DEF * num_blocks) / 100; config->low_threshold = (QOS_TX_LOW_VO_DEF * num_blocks) / 100; break; default: wl1251_error("Invalid TX queue id: %d", qid); return -EINVAL; } return 0; } static int wl1251_hw_init_tx_queue_config(struct wl1251 *wl) { struct acx_tx_queue_qos_config *config; struct wl1251_acx_mem_map *wl_mem_map = wl->target_mem_map; int ret, i; wl1251_debug(DEBUG_ACX, "acx tx queue config"); config = kzalloc(sizeof(*config), GFP_KERNEL); if (!config) { ret = -ENOMEM; goto out; } for (i = 0; i < MAX_NUM_OF_AC; i++) { ret = wl1251_hw_init_txq_fill(i, config, wl_mem_map->num_tx_mem_blocks); if (ret < 0) goto out; ret = wl1251_cmd_configure(wl, ACX_TX_QUEUE_CFG, config, sizeof(*config)); if (ret < 0) goto out; } wl1251_acx_ac_cfg(wl, AC_BE, CWMIN_BE, CWMAX_BE, AIFS_DIFS, TXOP_BE); wl1251_acx_ac_cfg(wl, AC_BK, CWMIN_BK, CWMAX_BK, AIFS_DIFS, TXOP_BK); wl1251_acx_ac_cfg(wl, AC_VI, CWMIN_VI, CWMAX_VI, AIFS_DIFS, TXOP_VI); wl1251_acx_ac_cfg(wl, AC_VO, CWMIN_VO, CWMAX_VO, AIFS_DIFS, TXOP_VO); out: kfree(config); return ret; } static int wl1251_hw_init_data_path_config(struct wl1251 *wl) { int ret; /* asking for the data path parameters */ wl->data_path = kzalloc(sizeof(struct acx_data_path_params_resp), GFP_KERNEL); if (!wl->data_path) { wl1251_error("Couldnt allocate data path parameters"); return -ENOMEM; } ret = wl1251_acx_data_path_params(wl, wl->data_path); if (ret < 0) { kfree(wl->data_path); wl->data_path = NULL; return ret; } return 0; } int wl1251_hw_init(struct wl1251 *wl) { struct wl1251_acx_mem_map *wl_mem_map; int ret; ret = wl1251_hw_init_hwenc_config(wl); if (ret < 0) return ret; /* Template settings */ ret = wl1251_hw_init_templates_config(wl); if (ret < 0) return ret; /* Default memory configuration */ ret = wl1251_hw_init_mem_config(wl); if (ret < 0) return ret; /* Default data path configuration */ ret = wl1251_hw_init_data_path_config(wl); if (ret < 0) goto out_free_memmap; /* RX config */ ret = wl1251_hw_init_rx_config(wl, RX_CFG_PROMISCUOUS | RX_CFG_TSF, RX_FILTER_OPTION_DEF); /* RX_CONFIG_OPTION_ANY_DST_ANY_BSS, RX_FILTER_OPTION_FILTER_ALL); */ if (ret < 0) goto out_free_data_path; /* TX queues config */ ret = wl1251_hw_init_tx_queue_config(wl); if (ret < 0) goto out_free_data_path; /* PHY layer config */ ret = wl1251_hw_init_phy_config(wl); if (ret < 0) goto out_free_data_path; /* Initialize connection monitoring thresholds */ ret = wl1251_acx_conn_monit_params(wl); if (ret < 0) goto out_free_data_path; /* Beacon filtering */ ret = wl1251_hw_init_beacon_filter(wl); if (ret < 0) goto out_free_data_path; /* Bluetooth WLAN coexistence */ ret = wl1251_hw_init_pta(wl); if (ret < 0) goto out_free_data_path; /* Energy detection */ ret = wl1251_hw_init_energy_detection(wl); if (ret < 0) goto out_free_data_path; /* Beacons and boradcast settings */ ret = wl1251_hw_init_beacon_broadcast(wl); if (ret < 0) goto out_free_data_path; /* Enable data path */ ret = wl1251_cmd_data_path(wl, wl->channel, 1); if (ret < 0) goto out_free_data_path; /* Default power state */ ret = wl1251_hw_init_power_auth(wl); if (ret < 0) goto out_free_data_path; wl_mem_map = wl->target_mem_map; wl1251_info("%d tx blocks at 0x%x, %d rx blocks at 0x%x", wl_mem_map->num_tx_mem_blocks, wl->data_path->tx_control_addr, wl_mem_map->num_rx_mem_blocks, wl->data_path->rx_control_addr); return 0; out_free_data_path: kfree(wl->data_path); out_free_memmap: kfree(wl->target_mem_map); return ret; }
gpl-2.0
carlocaione/geniatech-kernel
drivers/amlogic/dvb/ite9173/H_tuner.c
27
1893
/** * @(#)Afatech_SAMBA.cpp * * ========================================================== * Version: 2.0 * Date: 2009.06.15 * ========================================================== * * ========================================================== * History: * * Date Author Description * ---------------------------------------------------------- * * 2009.06.15 M.-C. Ho new tuner * ========================================================== * * Copyright 2009 Afatech, Inc. All rights reserved. * */ //#include <stdio.h> #include "type.h" #include "error.h" #include "user.h" #include "register.h" #define __SAMBADEMOD_H__ #include "standard.h" #include "tuner.h" #include "samba.h" #include "Afa_Samba_Script.h" Demodulator* Afatech_SAMBA_demodulator; Dword SAMBA_open ( IN Demodulator* demodulator ) { Dword error = Error_NO_ERROR; Afatech_SAMBA_demodulator = demodulator; error = samba_init(); return (error); } Dword SAMBA_close ( IN Demodulator* demodulator ) { return (Error_NO_ERROR); } Dword SAMBA_set ( IN Demodulator* demodulator, IN Word bandwidth, IN Dword frequency ) { Dword error = Error_NO_ERROR; Afatech_SAMBA_demodulator = demodulator; error = samba_setfreq((unsigned int)bandwidth, (unsigned int)frequency); return (error); } TunerDescription tunerDescription= { SAMBA_open, SAMBA_close, SAMBA_set, SAMBA_scripts, SAMBA_scriptSets, SAMBA_ADDRESS, /** tuner i2c address */ 2, /** length of tuner register address */ 0, /** tuner if */ False, /** spectrum inverse */ 0x70, /** tuner id */ }; Dword SAMBA_supportLNA ( IN Demodulator* demodulator, IN Byte supporttype ) { Dword error = Error_INVALID_DEV_TYPE; return error; }
gpl-2.0
prohaska7/mariadb-server
storage/mroonga/vendor/groonga/lib/string.c
27
11193
/* -*- c-basic-offset: 2 -*- */ /* Copyright(C) 2009-2012 Brazil This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "grn.h" #include <string.h> #include "grn_string.h" #include "grn_normalizer.h" #include "grn_str.h" #include "grn_util.h" #include <groonga/tokenizer.h> static grn_string * grn_fake_string_open(grn_ctx *ctx, grn_string *string) { /* TODO: support GRN_STRING_REMOVE_BLANK flag and ctypes */ grn_string *nstr = string; const char *str; unsigned int str_len; str = nstr->original; str_len = nstr->original_length_in_bytes; if (!(nstr->normalized = GRN_MALLOC(str_len + 1))) { ERR(GRN_NO_MEMORY_AVAILABLE, "[strinig][fake] failed to allocate normalized text space"); grn_string_close(ctx, (grn_obj *)nstr); return NULL; } if (nstr->flags & GRN_STRING_REMOVE_TOKENIZED_DELIMITER && ctx->encoding == GRN_ENC_UTF8) { int char_length; const char *source_current = str; const char *source_end = str + str_len; char *destination = nstr->normalized; unsigned int destination_length = 0; while ((char_length = grn_charlen(ctx, source_current, source_end)) > 0) { if (!grn_tokenizer_is_tokenized_delimiter(ctx, source_current, char_length, ctx->encoding)) { grn_memcpy(destination, source_current, char_length); destination += char_length; destination_length += char_length; } source_current += char_length; } nstr->normalized[destination_length] = '\0'; nstr->normalized_length_in_bytes = destination_length; } else { grn_memcpy(nstr->normalized, str, str_len); nstr->normalized[str_len] = '\0'; nstr->normalized_length_in_bytes = str_len; } if (nstr->flags & GRN_STRING_WITH_CHECKS) { int16_t f = 0; unsigned char c; size_t i; if (!(nstr->checks = (int16_t *) GRN_MALLOC(sizeof(int16_t) * str_len))) { grn_string_close(ctx, (grn_obj *)nstr); ERR(GRN_NO_MEMORY_AVAILABLE, "[strinig][fake] failed to allocate checks space"); return NULL; } switch (nstr->encoding) { case GRN_ENC_EUC_JP: for (i = 0; i < str_len; i++) { if (!f) { c = (unsigned char) str[i]; f = ((c >= 0xa1U && c <= 0xfeU) || c == 0x8eU ? 2 : (c == 0x8fU ? 3 : 1) ); nstr->checks[i] = f; } else { nstr->checks[i] = 0; } f--; } break; case GRN_ENC_SJIS: for (i = 0; i < str_len; i++) { if (!f) { c = (unsigned char) str[i]; f = (c >= 0x81U && ((c <= 0x9fU) || (c >= 0xe0U && c <= 0xfcU)) ? 2 : 1); nstr->checks[i] = f; } else { nstr->checks[i] = 0; } f--; } break; case GRN_ENC_UTF8: for (i = 0; i < str_len; i++) { if (!f) { c = (unsigned char) str[i]; f = (c & 0x80U ? (c & 0x20U ? (c & 0x10U ? 4 : 3) : 2) : 1); nstr->checks[i] = f; } else { nstr->checks[i] = 0; } f--; } break; default: for (i = 0; i < str_len; i++) { nstr->checks[i] = 1; } break; } } return nstr; } grn_obj * grn_string_open_(grn_ctx *ctx, const char *str, unsigned int str_len, grn_obj *normalizer, int flags, grn_encoding encoding) { grn_string *string; grn_obj *obj; grn_bool is_normalizer_auto; if (!str || !str_len) { return NULL; } is_normalizer_auto = (normalizer == GRN_NORMALIZER_AUTO); if (is_normalizer_auto) { normalizer = grn_ctx_get(ctx, GRN_NORMALIZER_AUTO_NAME, -1); if (!normalizer) { ERR(GRN_INVALID_ARGUMENT, "[string][open] NormalizerAuto normalizer isn't available"); return NULL; } } string = GRN_MALLOCN(grn_string, 1); if (!string) { if (is_normalizer_auto) { grn_obj_unlink(ctx, normalizer); } GRN_LOG(ctx, GRN_LOG_ALERT, "[string][open] failed to allocate memory"); return NULL; } obj = (grn_obj *)string; GRN_OBJ_INIT(obj, GRN_STRING, GRN_OBJ_ALLOCATED, GRN_ID_NIL); string->original = str; string->original_length_in_bytes = str_len; string->normalized = NULL; string->normalized_length_in_bytes = 0; string->n_characters = 0; string->checks = NULL; string->ctypes = NULL; string->encoding = encoding; string->flags = flags; if (!normalizer) { return (grn_obj *)grn_fake_string_open(ctx, string); } grn_normalizer_normalize(ctx, normalizer, (grn_obj *)string); if (ctx->rc) { grn_obj_close(ctx, obj); obj = NULL; } if (is_normalizer_auto) { grn_obj_unlink(ctx, normalizer); } return obj; } grn_obj * grn_string_open(grn_ctx *ctx, const char *str, unsigned int str_len, grn_obj *normalizer, int flags) { return grn_string_open_(ctx, str, str_len, normalizer, flags, ctx->encoding); } grn_rc grn_string_get_original(grn_ctx *ctx, grn_obj *string, const char **original, unsigned int *length_in_bytes) { grn_rc rc; grn_string *string_ = (grn_string *)string; GRN_API_ENTER; if (string_) { if (original) { *original = string_->original; } if (length_in_bytes) { *length_in_bytes = string_->original_length_in_bytes; } rc = GRN_SUCCESS; } else { rc = GRN_INVALID_ARGUMENT; } GRN_API_RETURN(rc); } int grn_string_get_flags(grn_ctx *ctx, grn_obj *string) { int flags = 0; grn_string *string_ = (grn_string *)string; GRN_API_ENTER; if (string_) { flags = string_->flags; } GRN_API_RETURN(flags); } grn_rc grn_string_get_normalized(grn_ctx *ctx, grn_obj *string, const char **normalized, unsigned int *length_in_bytes, unsigned int *n_characters) { grn_rc rc; grn_string *string_ = (grn_string *)string; GRN_API_ENTER; if (string_) { if (normalized) { *normalized = string_->normalized; } if (length_in_bytes) { *length_in_bytes = string_->normalized_length_in_bytes; } if (n_characters) { *n_characters = string_->n_characters; } rc = GRN_SUCCESS; } else { rc = GRN_INVALID_ARGUMENT; } GRN_API_RETURN(rc); } grn_rc grn_string_set_normalized(grn_ctx *ctx, grn_obj *string, char *normalized, unsigned int length_in_bytes, unsigned int n_characters) { grn_rc rc; grn_string *string_ = (grn_string *)string; GRN_API_ENTER; if (string_) { if (string_->normalized) { GRN_FREE(string_->normalized); } string_->normalized = normalized; string_->normalized_length_in_bytes = length_in_bytes; string_->n_characters = n_characters; rc = GRN_SUCCESS; } else { rc = GRN_INVALID_ARGUMENT; } GRN_API_RETURN(rc); } const short * grn_string_get_checks(grn_ctx *ctx, grn_obj *string) { int16_t *checks = NULL; grn_string *string_ = (grn_string *)string; GRN_API_ENTER; if (string_) { checks = string_->checks; } else { checks = NULL; } GRN_API_RETURN(checks); } grn_rc grn_string_set_checks(grn_ctx *ctx, grn_obj *string, short *checks) { grn_rc rc; grn_string *string_ = (grn_string *)string; GRN_API_ENTER; if (string_) { if (string_->checks) { GRN_FREE(string_->checks); } string_->checks = checks; rc = GRN_SUCCESS; } else { rc = GRN_INVALID_ARGUMENT; } GRN_API_RETURN(rc); } const unsigned char * grn_string_get_types(grn_ctx *ctx, grn_obj *string) { unsigned char *types = NULL; grn_string *string_ = (grn_string *)string; GRN_API_ENTER; if (string_) { types = string_->ctypes; } else { types = NULL; } GRN_API_RETURN(types); } grn_rc grn_string_set_types(grn_ctx *ctx, grn_obj *string, unsigned char *types) { grn_rc rc; grn_string *string_ = (grn_string *)string; GRN_API_ENTER; if (string_) { if (string_->ctypes) { GRN_FREE(string_->ctypes); } string_->ctypes = types; rc = GRN_SUCCESS; } else { rc = GRN_INVALID_ARGUMENT; } GRN_API_RETURN(rc); } grn_encoding grn_string_get_encoding(grn_ctx *ctx, grn_obj *string) { grn_encoding encoding = GRN_ENC_NONE; grn_string *string_ = (grn_string *)string; GRN_API_ENTER; if (string_) { encoding = string_->encoding; } GRN_API_RETURN(encoding); } grn_rc grn_string_inspect(grn_ctx *ctx, grn_obj *buffer, grn_obj *string) { grn_string *string_ = (grn_string *)string; GRN_TEXT_PUTS(ctx, buffer, "#<string:"); GRN_TEXT_PUTS(ctx, buffer, " original:<"); GRN_TEXT_PUT(ctx, buffer, string_->original, string_->original_length_in_bytes); GRN_TEXT_PUTS(ctx, buffer, ">"); GRN_TEXT_PUTS(ctx, buffer, "("); grn_text_itoa(ctx, buffer, string_->original_length_in_bytes); GRN_TEXT_PUTS(ctx, buffer, ")"); GRN_TEXT_PUTS(ctx, buffer, " normalized:<"); GRN_TEXT_PUT(ctx, buffer, string_->normalized, string_->normalized_length_in_bytes); GRN_TEXT_PUTS(ctx, buffer, ">"); GRN_TEXT_PUTS(ctx, buffer, "("); grn_text_itoa(ctx, buffer, string_->normalized_length_in_bytes); GRN_TEXT_PUTS(ctx, buffer, ")"); GRN_TEXT_PUTS(ctx, buffer, " n_characters:"); grn_text_itoa(ctx, buffer, string_->n_characters); GRN_TEXT_PUTS(ctx, buffer, " encoding:"); grn_inspect_encoding(ctx, buffer, string_->encoding); GRN_TEXT_PUTS(ctx, buffer, " flags:"); if (string_->flags & GRN_STRING_REMOVE_BLANK) { GRN_TEXT_PUTS(ctx, buffer, "REMOVE_BLANK|"); } if (string_->flags & GRN_STRING_WITH_TYPES) { GRN_TEXT_PUTS(ctx, buffer, "WITH_TYPES|"); } if (string_->flags & GRN_STRING_WITH_CHECKS) { GRN_TEXT_PUTS(ctx, buffer, "WITH_CHECKS|"); } if (string_->flags & GRN_STRING_REMOVE_TOKENIZED_DELIMITER) { GRN_TEXT_PUTS(ctx, buffer, "REMOVE_TOKENIZED_DELIMITER|"); } if (GRN_TEXT_VALUE(buffer)[GRN_TEXT_LEN(buffer) - 1] == '|') { grn_bulk_truncate(ctx, buffer, GRN_TEXT_LEN(buffer) - 1); } GRN_TEXT_PUTS(ctx, buffer, ">"); return GRN_SUCCESS; } grn_rc grn_string_close(grn_ctx *ctx, grn_obj *string) { grn_rc rc; grn_string *string_ = (grn_string *)string; if (string_) { if (string_->normalized) { GRN_FREE(string_->normalized); } if (string_->ctypes) { GRN_FREE(string_->ctypes); } if (string_->checks) { GRN_FREE(string_->checks); } GRN_FREE(string); rc = GRN_SUCCESS; } else { rc = GRN_INVALID_ARGUMENT; } return rc; }
gpl-2.0
fishbaoz/coreboot
src/vendorcode/amd/cimx/sb900/IoLib.c
27
2842
/*;******************************************************************************** ; ; Copyright (c) 2011, Advanced Micro Devices, Inc. ; All rights reserved. ; ; Redistribution and use in source and binary forms, with or without ; modification, are permitted provided that the following conditions are met: ; * Redistributions of source code must retain the above copyright ; notice, this list of conditions and the following disclaimer. ; * Redistributions in binary form must reproduce the above copyright ; notice, this list of conditions and the following disclaimer in the ; documentation and/or other materials provided with the distribution. ; * Neither the name of Advanced Micro Devices, Inc. nor the names of ; its contributors may be used to endorse or promote products derived ; from this software without specific prior written permission. ; ; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ; ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ; WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ; DISCLAIMED. IN NO EVENT SHALL ADVANCED MICRO DEVICES, INC. BE LIABLE FOR ANY ; DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ; (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ; ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ; ;*********************************************************************************/ #include "SbPlatform.h" #include "cbtypes.h" VOID ReadIO ( IN UINT16 Address, IN UINT8 OpFlag, IN VOID* Value ) { OpFlag = OpFlag & 0x7f; switch ( OpFlag ) { case AccWidthUint8: *(UINT8*)Value = ReadIo8 (Address); break; case AccWidthUint16: *(UINT16*)Value = ReadIo16 (Address); break; case AccWidthUint32: *(UINT32*)Value = ReadIo32 (Address); break; default: break; } } VOID WriteIO ( IN UINT16 Address, IN UINT8 OpFlag, IN VOID* Value ) { OpFlag = OpFlag & 0x7f; switch ( OpFlag ) { case AccWidthUint8: WriteIo8 (Address, *(UINT8*)Value); break; case AccWidthUint16: WriteIo16 (Address, *(UINT16*)Value); break; case AccWidthUint32: WriteIo32 (Address, *(UINT32*)Value); break; default: break; } } VOID RWIO ( IN UINT16 Address, IN UINT8 OpFlag, IN UINT32 Mask, IN UINT32 Data ) { UINT32 Result; ReadIO (Address, OpFlag, &Result); Result = (Result & Mask) | Data; WriteIO (Address, OpFlag, &Result); }
gpl-2.0
rminnich/linux
sound/soc/codecs/cs35l35.c
27
47358
/* * cs35l35.c -- CS35L35 ALSA SoC audio driver * * Copyright 2017 Cirrus Logic, Inc. * * Author: Brian Austin <brian.austin@cirrus.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/version.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/i2c.h> #include <linux/slab.h> #include <linux/platform_device.h> #include <linux/regulator/consumer.h> #include <linux/gpio/consumer.h> #include <linux/of_device.h> #include <linux/of_gpio.h> #include <linux/regmap.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/soc-dapm.h> #include <linux/gpio.h> #include <sound/initval.h> #include <sound/tlv.h> #include <sound/cs35l35.h> #include <linux/of_irq.h> #include <linux/completion.h> #include "cs35l35.h" /* * Some fields take zero as a valid value so use a high bit flag that won't * get written to the device to mark those. */ #define CS35L35_VALID_PDATA 0x80000000 static const struct reg_default cs35l35_reg[] = { {CS35L35_PWRCTL1, 0x01}, {CS35L35_PWRCTL2, 0x11}, {CS35L35_PWRCTL3, 0x00}, {CS35L35_CLK_CTL1, 0x04}, {CS35L35_CLK_CTL2, 0x12}, {CS35L35_CLK_CTL3, 0xCF}, {CS35L35_SP_FMT_CTL1, 0x20}, {CS35L35_SP_FMT_CTL2, 0x00}, {CS35L35_SP_FMT_CTL3, 0x02}, {CS35L35_MAG_COMP_CTL, 0x00}, {CS35L35_AMP_INP_DRV_CTL, 0x01}, {CS35L35_AMP_DIG_VOL_CTL, 0x12}, {CS35L35_AMP_DIG_VOL, 0x00}, {CS35L35_ADV_DIG_VOL, 0x00}, {CS35L35_PROTECT_CTL, 0x06}, {CS35L35_AMP_GAIN_AUD_CTL, 0x13}, {CS35L35_AMP_GAIN_PDM_CTL, 0x00}, {CS35L35_AMP_GAIN_ADV_CTL, 0x00}, {CS35L35_GPI_CTL, 0x00}, {CS35L35_BST_CVTR_V_CTL, 0x00}, {CS35L35_BST_PEAK_I, 0x07}, {CS35L35_BST_RAMP_CTL, 0x85}, {CS35L35_BST_CONV_COEF_1, 0x24}, {CS35L35_BST_CONV_COEF_2, 0x24}, {CS35L35_BST_CONV_SLOPE_COMP, 0x4E}, {CS35L35_BST_CONV_SW_FREQ, 0x04}, {CS35L35_CLASS_H_CTL, 0x0B}, {CS35L35_CLASS_H_HEADRM_CTL, 0x0B}, {CS35L35_CLASS_H_RELEASE_RATE, 0x08}, {CS35L35_CLASS_H_FET_DRIVE_CTL, 0x41}, {CS35L35_CLASS_H_VP_CTL, 0xC5}, {CS35L35_VPBR_CTL, 0x0A}, {CS35L35_VPBR_VOL_CTL, 0x90}, {CS35L35_VPBR_TIMING_CTL, 0x6A}, {CS35L35_VPBR_MODE_VOL_CTL, 0x00}, {CS35L35_SPKR_MON_CTL, 0xC0}, {CS35L35_IMON_SCALE_CTL, 0x30}, {CS35L35_AUDIN_RXLOC_CTL, 0x00}, {CS35L35_ADVIN_RXLOC_CTL, 0x80}, {CS35L35_VMON_TXLOC_CTL, 0x00}, {CS35L35_IMON_TXLOC_CTL, 0x80}, {CS35L35_VPMON_TXLOC_CTL, 0x04}, {CS35L35_VBSTMON_TXLOC_CTL, 0x84}, {CS35L35_VPBR_STATUS_TXLOC_CTL, 0x04}, {CS35L35_ZERO_FILL_LOC_CTL, 0x00}, {CS35L35_AUDIN_DEPTH_CTL, 0x0F}, {CS35L35_SPKMON_DEPTH_CTL, 0x0F}, {CS35L35_SUPMON_DEPTH_CTL, 0x0F}, {CS35L35_ZEROFILL_DEPTH_CTL, 0x00}, {CS35L35_MULT_DEV_SYNCH1, 0x02}, {CS35L35_MULT_DEV_SYNCH2, 0x80}, {CS35L35_PROT_RELEASE_CTL, 0x00}, {CS35L35_DIAG_MODE_REG_LOCK, 0x00}, {CS35L35_DIAG_MODE_CTL_1, 0x40}, {CS35L35_DIAG_MODE_CTL_2, 0x00}, {CS35L35_INT_MASK_1, 0xFF}, {CS35L35_INT_MASK_2, 0xFF}, {CS35L35_INT_MASK_3, 0xFF}, {CS35L35_INT_MASK_4, 0xFF}, }; static bool cs35l35_volatile_register(struct device *dev, unsigned int reg) { switch (reg) { case CS35L35_INT_STATUS_1: case CS35L35_INT_STATUS_2: case CS35L35_INT_STATUS_3: case CS35L35_INT_STATUS_4: case CS35L35_PLL_STATUS: case CS35L35_OTP_TRIM_STATUS: return true; default: return false; } } static bool cs35l35_readable_register(struct device *dev, unsigned int reg) { switch (reg) { case CS35L35_DEVID_AB ... CS35L35_PWRCTL3: case CS35L35_CLK_CTL1 ... CS35L35_SP_FMT_CTL3: case CS35L35_MAG_COMP_CTL ... CS35L35_AMP_GAIN_AUD_CTL: case CS35L35_AMP_GAIN_PDM_CTL ... CS35L35_BST_PEAK_I: case CS35L35_BST_RAMP_CTL ... CS35L35_BST_CONV_SW_FREQ: case CS35L35_CLASS_H_CTL ... CS35L35_CLASS_H_VP_CTL: case CS35L35_CLASS_H_STATUS: case CS35L35_VPBR_CTL ... CS35L35_VPBR_MODE_VOL_CTL: case CS35L35_VPBR_ATTEN_STATUS: case CS35L35_SPKR_MON_CTL: case CS35L35_IMON_SCALE_CTL ... CS35L35_ZEROFILL_DEPTH_CTL: case CS35L35_MULT_DEV_SYNCH1 ... CS35L35_PROT_RELEASE_CTL: case CS35L35_DIAG_MODE_REG_LOCK ... CS35L35_DIAG_MODE_CTL_2: case CS35L35_INT_MASK_1 ... CS35L35_PLL_STATUS: case CS35L35_OTP_TRIM_STATUS: return true; default: return false; } } static bool cs35l35_precious_register(struct device *dev, unsigned int reg) { switch (reg) { case CS35L35_INT_STATUS_1: case CS35L35_INT_STATUS_2: case CS35L35_INT_STATUS_3: case CS35L35_INT_STATUS_4: case CS35L35_PLL_STATUS: case CS35L35_OTP_TRIM_STATUS: return true; default: return false; } } static void cs35l35_reset(struct cs35l35_private *cs35l35) { gpiod_set_value_cansleep(cs35l35->reset_gpio, 0); usleep_range(2000, 2100); gpiod_set_value_cansleep(cs35l35->reset_gpio, 1); usleep_range(1000, 1100); } static int cs35l35_wait_for_pdn(struct cs35l35_private *cs35l35) { int ret; if (cs35l35->pdata.ext_bst) { usleep_range(5000, 5500); return 0; } reinit_completion(&cs35l35->pdn_done); ret = wait_for_completion_timeout(&cs35l35->pdn_done, msecs_to_jiffies(100)); if (ret == 0) { dev_err(cs35l35->dev, "PDN_DONE did not complete\n"); return -ETIMEDOUT; } return 0; } static int cs35l35_sdin_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); struct cs35l35_private *cs35l35 = snd_soc_codec_get_drvdata(codec); int ret = 0; switch (event) { case SND_SOC_DAPM_PRE_PMU: regmap_update_bits(cs35l35->regmap, CS35L35_CLK_CTL1, CS35L35_MCLK_DIS_MASK, 0 << CS35L35_MCLK_DIS_SHIFT); regmap_update_bits(cs35l35->regmap, CS35L35_PWRCTL1, CS35L35_DISCHG_FILT_MASK, 0 << CS35L35_DISCHG_FILT_SHIFT); regmap_update_bits(cs35l35->regmap, CS35L35_PWRCTL1, CS35L35_PDN_ALL_MASK, 0); break; case SND_SOC_DAPM_POST_PMD: regmap_update_bits(cs35l35->regmap, CS35L35_PWRCTL1, CS35L35_DISCHG_FILT_MASK, 1 << CS35L35_DISCHG_FILT_SHIFT); regmap_update_bits(cs35l35->regmap, CS35L35_PWRCTL1, CS35L35_PDN_ALL_MASK, 1); /* Already muted, so disable volume ramp for faster shutdown */ regmap_update_bits(cs35l35->regmap, CS35L35_AMP_DIG_VOL_CTL, CS35L35_AMP_DIGSFT_MASK, 0); ret = cs35l35_wait_for_pdn(cs35l35); regmap_update_bits(cs35l35->regmap, CS35L35_CLK_CTL1, CS35L35_MCLK_DIS_MASK, 1 << CS35L35_MCLK_DIS_SHIFT); regmap_update_bits(cs35l35->regmap, CS35L35_AMP_DIG_VOL_CTL, CS35L35_AMP_DIGSFT_MASK, 1 << CS35L35_AMP_DIGSFT_SHIFT); break; default: dev_err(codec->dev, "Invalid event = 0x%x\n", event); ret = -EINVAL; } return ret; } static int cs35l35_main_amp_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); struct cs35l35_private *cs35l35 = snd_soc_codec_get_drvdata(codec); unsigned int reg[4]; int i; switch (event) { case SND_SOC_DAPM_PRE_PMU: if (cs35l35->pdata.bst_pdn_fet_on) regmap_update_bits(cs35l35->regmap, CS35L35_PWRCTL2, CS35L35_PDN_BST_MASK, 0 << CS35L35_PDN_BST_FETON_SHIFT); else regmap_update_bits(cs35l35->regmap, CS35L35_PWRCTL2, CS35L35_PDN_BST_MASK, 0 << CS35L35_PDN_BST_FETOFF_SHIFT); break; case SND_SOC_DAPM_POST_PMU: usleep_range(5000, 5100); /* If in PDM mode we must use VP for Voltage control */ if (cs35l35->pdm_mode) regmap_update_bits(cs35l35->regmap, CS35L35_BST_CVTR_V_CTL, CS35L35_BST_CTL_MASK, 0 << CS35L35_BST_CTL_SHIFT); regmap_update_bits(cs35l35->regmap, CS35L35_PROTECT_CTL, CS35L35_AMP_MUTE_MASK, 0); for (i = 0; i < 2; i++) regmap_bulk_read(cs35l35->regmap, CS35L35_INT_STATUS_1, &reg, ARRAY_SIZE(reg)); break; case SND_SOC_DAPM_PRE_PMD: regmap_update_bits(cs35l35->regmap, CS35L35_PROTECT_CTL, CS35L35_AMP_MUTE_MASK, 1 << CS35L35_AMP_MUTE_SHIFT); if (cs35l35->pdata.bst_pdn_fet_on) regmap_update_bits(cs35l35->regmap, CS35L35_PWRCTL2, CS35L35_PDN_BST_MASK, 1 << CS35L35_PDN_BST_FETON_SHIFT); else regmap_update_bits(cs35l35->regmap, CS35L35_PWRCTL2, CS35L35_PDN_BST_MASK, 1 << CS35L35_PDN_BST_FETOFF_SHIFT); break; case SND_SOC_DAPM_POST_PMD: usleep_range(5000, 5100); /* * If PDM mode we should switch back to pdata value * for Voltage control when we go down */ if (cs35l35->pdm_mode) regmap_update_bits(cs35l35->regmap, CS35L35_BST_CVTR_V_CTL, CS35L35_BST_CTL_MASK, cs35l35->pdata.bst_vctl << CS35L35_BST_CTL_SHIFT); break; default: dev_err(codec->dev, "Invalid event = 0x%x\n", event); } return 0; } static DECLARE_TLV_DB_SCALE(amp_gain_tlv, 0, 1, 1); static DECLARE_TLV_DB_SCALE(dig_vol_tlv, -10200, 50, 0); static const struct snd_kcontrol_new cs35l35_aud_controls[] = { SOC_SINGLE_SX_TLV("Digital Audio Volume", CS35L35_AMP_DIG_VOL, 0, 0x34, 0xE4, dig_vol_tlv), SOC_SINGLE_TLV("Analog Audio Volume", CS35L35_AMP_GAIN_AUD_CTL, 0, 19, 0, amp_gain_tlv), SOC_SINGLE_TLV("PDM Volume", CS35L35_AMP_GAIN_PDM_CTL, 0, 19, 0, amp_gain_tlv), }; static const struct snd_kcontrol_new cs35l35_adv_controls[] = { SOC_SINGLE_SX_TLV("Digital Advisory Volume", CS35L35_ADV_DIG_VOL, 0, 0x34, 0xE4, dig_vol_tlv), SOC_SINGLE_TLV("Analog Advisory Volume", CS35L35_AMP_GAIN_ADV_CTL, 0, 19, 0, amp_gain_tlv), }; static const struct snd_soc_dapm_widget cs35l35_dapm_widgets[] = { SND_SOC_DAPM_AIF_IN_E("SDIN", NULL, 0, CS35L35_PWRCTL3, 1, 1, cs35l35_sdin_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), SND_SOC_DAPM_AIF_OUT("SDOUT", NULL, 0, CS35L35_PWRCTL3, 2, 1), SND_SOC_DAPM_OUTPUT("SPK"), SND_SOC_DAPM_INPUT("VP"), SND_SOC_DAPM_INPUT("VBST"), SND_SOC_DAPM_INPUT("ISENSE"), SND_SOC_DAPM_INPUT("VSENSE"), SND_SOC_DAPM_ADC("VMON ADC", NULL, CS35L35_PWRCTL2, 7, 1), SND_SOC_DAPM_ADC("IMON ADC", NULL, CS35L35_PWRCTL2, 6, 1), SND_SOC_DAPM_ADC("VPMON ADC", NULL, CS35L35_PWRCTL3, 3, 1), SND_SOC_DAPM_ADC("VBSTMON ADC", NULL, CS35L35_PWRCTL3, 4, 1), SND_SOC_DAPM_ADC("CLASS H", NULL, CS35L35_PWRCTL2, 5, 1), SND_SOC_DAPM_OUT_DRV_E("Main AMP", CS35L35_PWRCTL2, 0, 1, NULL, 0, cs35l35_main_amp_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD | SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD), }; static const struct snd_soc_dapm_route cs35l35_audio_map[] = { {"VPMON ADC", NULL, "VP"}, {"VBSTMON ADC", NULL, "VBST"}, {"IMON ADC", NULL, "ISENSE"}, {"VMON ADC", NULL, "VSENSE"}, {"SDOUT", NULL, "IMON ADC"}, {"SDOUT", NULL, "VMON ADC"}, {"SDOUT", NULL, "VBSTMON ADC"}, {"SDOUT", NULL, "VPMON ADC"}, {"AMP Capture", NULL, "SDOUT"}, {"SDIN", NULL, "AMP Playback"}, {"CLASS H", NULL, "SDIN"}, {"Main AMP", NULL, "CLASS H"}, {"SPK", NULL, "Main AMP"}, }; static int cs35l35_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) { struct snd_soc_codec *codec = codec_dai->codec; struct cs35l35_private *cs35l35 = snd_soc_codec_get_drvdata(codec); switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBM_CFM: regmap_update_bits(cs35l35->regmap, CS35L35_CLK_CTL1, CS35L35_MS_MASK, 1 << CS35L35_MS_SHIFT); cs35l35->slave_mode = false; break; case SND_SOC_DAIFMT_CBS_CFS: regmap_update_bits(cs35l35->regmap, CS35L35_CLK_CTL1, CS35L35_MS_MASK, 0 << CS35L35_MS_SHIFT); cs35l35->slave_mode = true; break; default: return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: cs35l35->i2s_mode = true; cs35l35->pdm_mode = false; break; case SND_SOC_DAIFMT_PDM: cs35l35->pdm_mode = true; cs35l35->i2s_mode = false; break; default: return -EINVAL; } return 0; } struct cs35l35_sysclk_config { int sysclk; int srate; u8 clk_cfg; }; static struct cs35l35_sysclk_config cs35l35_clk_ctl[] = { /* SYSCLK, Sample Rate, Serial Port Cfg */ {5644800, 44100, 0x00}, {5644800, 88200, 0x40}, {6144000, 48000, 0x10}, {6144000, 96000, 0x50}, {11289600, 44100, 0x01}, {11289600, 88200, 0x41}, {11289600, 176400, 0x81}, {12000000, 44100, 0x03}, {12000000, 48000, 0x13}, {12000000, 88200, 0x43}, {12000000, 96000, 0x53}, {12000000, 176400, 0x83}, {12000000, 192000, 0x93}, {12288000, 48000, 0x11}, {12288000, 96000, 0x51}, {12288000, 192000, 0x91}, {13000000, 44100, 0x07}, {13000000, 48000, 0x17}, {13000000, 88200, 0x47}, {13000000, 96000, 0x57}, {13000000, 176400, 0x87}, {13000000, 192000, 0x97}, {22579200, 44100, 0x02}, {22579200, 88200, 0x42}, {22579200, 176400, 0x82}, {24000000, 44100, 0x0B}, {24000000, 48000, 0x1B}, {24000000, 88200, 0x4B}, {24000000, 96000, 0x5B}, {24000000, 176400, 0x8B}, {24000000, 192000, 0x9B}, {24576000, 48000, 0x12}, {24576000, 96000, 0x52}, {24576000, 192000, 0x92}, {26000000, 44100, 0x0F}, {26000000, 48000, 0x1F}, {26000000, 88200, 0x4F}, {26000000, 96000, 0x5F}, {26000000, 176400, 0x8F}, {26000000, 192000, 0x9F}, }; static int cs35l35_get_clk_config(int sysclk, int srate) { int i; for (i = 0; i < ARRAY_SIZE(cs35l35_clk_ctl); i++) { if (cs35l35_clk_ctl[i].sysclk == sysclk && cs35l35_clk_ctl[i].srate == srate) return cs35l35_clk_ctl[i].clk_cfg; } return -EINVAL; } static int cs35l35_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct snd_soc_codec *codec = dai->codec; struct cs35l35_private *cs35l35 = snd_soc_codec_get_drvdata(codec); struct classh_cfg *classh = &cs35l35->pdata.classh_algo; int srate = params_rate(params); int ret = 0; u8 sp_sclks; int audin_format; int errata_chk; int clk_ctl = cs35l35_get_clk_config(cs35l35->sysclk, srate); if (clk_ctl < 0) { dev_err(codec->dev, "Invalid CLK:Rate %d:%d\n", cs35l35->sysclk, srate); return -EINVAL; } ret = regmap_update_bits(cs35l35->regmap, CS35L35_CLK_CTL2, CS35L35_CLK_CTL2_MASK, clk_ctl); if (ret != 0) { dev_err(codec->dev, "Failed to set port config %d\n", ret); return ret; } /* * Rev A0 Errata * When configured for the weak-drive detection path (CH_WKFET_DIS = 0) * the Class H algorithm does not enable weak-drive operation for * nonzero values of CH_WKFET_DELAY if SP_RATE = 01 or 10 */ errata_chk = clk_ctl & CS35L35_SP_RATE_MASK; if (classh->classh_wk_fet_disable == 0x00 && (errata_chk == 0x01 || errata_chk == 0x03)) { ret = regmap_update_bits(cs35l35->regmap, CS35L35_CLASS_H_FET_DRIVE_CTL, CS35L35_CH_WKFET_DEL_MASK, 0 << CS35L35_CH_WKFET_DEL_SHIFT); if (ret != 0) { dev_err(codec->dev, "Failed to set fet config %d\n", ret); return ret; } } /* * You can pull more Monitor data from the SDOUT pin than going to SDIN * Just make sure your SCLK is fast enough to fill the frame */ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { switch (params_width(params)) { case 8: audin_format = CS35L35_SDIN_DEPTH_8; break; case 16: audin_format = CS35L35_SDIN_DEPTH_16; break; case 24: audin_format = CS35L35_SDIN_DEPTH_24; break; default: dev_err(codec->dev, "Unsupported Width %d\n", params_width(params)); return -EINVAL; } regmap_update_bits(cs35l35->regmap, CS35L35_AUDIN_DEPTH_CTL, CS35L35_AUDIN_DEPTH_MASK, audin_format << CS35L35_AUDIN_DEPTH_SHIFT); if (cs35l35->pdata.stereo) { regmap_update_bits(cs35l35->regmap, CS35L35_AUDIN_DEPTH_CTL, CS35L35_ADVIN_DEPTH_MASK, audin_format << CS35L35_ADVIN_DEPTH_SHIFT); } } if (cs35l35->i2s_mode) { /* We have to take the SCLK to derive num sclks * to configure the CLOCK_CTL3 register correctly */ if ((cs35l35->sclk / srate) % 4) { dev_err(codec->dev, "Unsupported sclk/fs ratio %d:%d\n", cs35l35->sclk, srate); return -EINVAL; } sp_sclks = ((cs35l35->sclk / srate) / 4) - 1; /* Only certain ratios are supported in I2S Slave Mode */ if (cs35l35->slave_mode) { switch (sp_sclks) { case CS35L35_SP_SCLKS_32FS: case CS35L35_SP_SCLKS_48FS: case CS35L35_SP_SCLKS_64FS: break; default: dev_err(codec->dev, "ratio not supported\n"); return -EINVAL; } } else { /* Only certain ratios supported in I2S MASTER Mode */ switch (sp_sclks) { case CS35L35_SP_SCLKS_32FS: case CS35L35_SP_SCLKS_64FS: break; default: dev_err(codec->dev, "ratio not supported\n"); return -EINVAL; } } ret = regmap_update_bits(cs35l35->regmap, CS35L35_CLK_CTL3, CS35L35_SP_SCLKS_MASK, sp_sclks << CS35L35_SP_SCLKS_SHIFT); if (ret != 0) { dev_err(codec->dev, "Failed to set fsclk %d\n", ret); return ret; } } return ret; } static const unsigned int cs35l35_src_rates[] = { 44100, 48000, 88200, 96000, 176400, 192000 }; static const struct snd_pcm_hw_constraint_list cs35l35_constraints = { .count = ARRAY_SIZE(cs35l35_src_rates), .list = cs35l35_src_rates, }; static int cs35l35_pcm_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct snd_soc_codec *codec = dai->codec; struct cs35l35_private *cs35l35 = snd_soc_codec_get_drvdata(codec); if (!substream->runtime) return 0; snd_pcm_hw_constraint_list(substream->runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &cs35l35_constraints); regmap_update_bits(cs35l35->regmap, CS35L35_AMP_INP_DRV_CTL, CS35L35_PDM_MODE_MASK, 0 << CS35L35_PDM_MODE_SHIFT); return 0; } static const unsigned int cs35l35_pdm_rates[] = { 44100, 48000, 88200, 96000 }; static const struct snd_pcm_hw_constraint_list cs35l35_pdm_constraints = { .count = ARRAY_SIZE(cs35l35_pdm_rates), .list = cs35l35_pdm_rates, }; static int cs35l35_pdm_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct snd_soc_codec *codec = dai->codec; struct cs35l35_private *cs35l35 = snd_soc_codec_get_drvdata(codec); if (!substream->runtime) return 0; snd_pcm_hw_constraint_list(substream->runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &cs35l35_pdm_constraints); regmap_update_bits(cs35l35->regmap, CS35L35_AMP_INP_DRV_CTL, CS35L35_PDM_MODE_MASK, 1 << CS35L35_PDM_MODE_SHIFT); return 0; } static int cs35l35_dai_set_sysclk(struct snd_soc_dai *dai, int clk_id, unsigned int freq, int dir) { struct snd_soc_codec *codec = dai->codec; struct cs35l35_private *cs35l35 = snd_soc_codec_get_drvdata(codec); /* Need the SCLK Frequency regardless of sysclk source for I2S */ cs35l35->sclk = freq; return 0; } static const struct snd_soc_dai_ops cs35l35_ops = { .startup = cs35l35_pcm_startup, .set_fmt = cs35l35_set_dai_fmt, .hw_params = cs35l35_hw_params, .set_sysclk = cs35l35_dai_set_sysclk, }; static const struct snd_soc_dai_ops cs35l35_pdm_ops = { .startup = cs35l35_pdm_startup, .set_fmt = cs35l35_set_dai_fmt, .hw_params = cs35l35_hw_params, }; static struct snd_soc_dai_driver cs35l35_dai[] = { { .name = "cs35l35-pcm", .id = 0, .playback = { .stream_name = "AMP Playback", .channels_min = 1, .channels_max = 8, .rates = SNDRV_PCM_RATE_KNOT, .formats = CS35L35_FORMATS, }, .capture = { .stream_name = "AMP Capture", .channels_min = 1, .channels_max = 8, .rates = SNDRV_PCM_RATE_KNOT, .formats = CS35L35_FORMATS, }, .ops = &cs35l35_ops, .symmetric_rates = 1, }, { .name = "cs35l35-pdm", .id = 1, .playback = { .stream_name = "PDM Playback", .channels_min = 1, .channels_max = 2, .rates = SNDRV_PCM_RATE_KNOT, .formats = CS35L35_FORMATS, }, .ops = &cs35l35_pdm_ops, }, }; static int cs35l35_codec_set_sysclk(struct snd_soc_codec *codec, int clk_id, int source, unsigned int freq, int dir) { struct cs35l35_private *cs35l35 = snd_soc_codec_get_drvdata(codec); int clksrc; int ret = 0; switch (clk_id) { case 0: clksrc = CS35L35_CLK_SOURCE_MCLK; break; case 1: clksrc = CS35L35_CLK_SOURCE_SCLK; break; case 2: clksrc = CS35L35_CLK_SOURCE_PDM; break; default: dev_err(codec->dev, "Invalid CLK Source\n"); return -EINVAL; } switch (freq) { case 5644800: case 6144000: case 11289600: case 12000000: case 12288000: case 13000000: case 22579200: case 24000000: case 24576000: case 26000000: cs35l35->sysclk = freq; break; default: dev_err(codec->dev, "Invalid CLK Frequency Input : %d\n", freq); return -EINVAL; } ret = regmap_update_bits(cs35l35->regmap, CS35L35_CLK_CTL1, CS35L35_CLK_SOURCE_MASK, clksrc << CS35L35_CLK_SOURCE_SHIFT); if (ret != 0) { dev_err(codec->dev, "Failed to set sysclk %d\n", ret); return ret; } return ret; } static int cs35l35_boost_inductor(struct cs35l35_private *cs35l35, int inductor) { struct regmap *regmap = cs35l35->regmap; unsigned int bst_ipk = 0; /* * Digital Boost Converter Configuration for feedback, * ramping, switching frequency, and estimation block seeding. */ regmap_update_bits(regmap, CS35L35_BST_CONV_SW_FREQ, CS35L35_BST_CONV_SWFREQ_MASK, 0x00); regmap_read(regmap, CS35L35_BST_PEAK_I, &bst_ipk); bst_ipk &= CS35L35_BST_IPK_MASK; switch (inductor) { case 1000: /* 1 uH */ regmap_write(regmap, CS35L35_BST_CONV_COEF_1, 0x24); regmap_write(regmap, CS35L35_BST_CONV_COEF_2, 0x24); regmap_update_bits(regmap, CS35L35_BST_CONV_SW_FREQ, CS35L35_BST_CONV_LBST_MASK, 0x00); if (bst_ipk < 0x04) regmap_write(regmap, CS35L35_BST_CONV_SLOPE_COMP, 0x1B); else regmap_write(regmap, CS35L35_BST_CONV_SLOPE_COMP, 0x4E); break; case 1200: /* 1.2 uH */ regmap_write(regmap, CS35L35_BST_CONV_COEF_1, 0x20); regmap_write(regmap, CS35L35_BST_CONV_COEF_2, 0x20); regmap_update_bits(regmap, CS35L35_BST_CONV_SW_FREQ, CS35L35_BST_CONV_LBST_MASK, 0x01); if (bst_ipk < 0x04) regmap_write(regmap, CS35L35_BST_CONV_SLOPE_COMP, 0x1B); else regmap_write(regmap, CS35L35_BST_CONV_SLOPE_COMP, 0x47); break; case 1500: /* 1.5uH */ regmap_write(regmap, CS35L35_BST_CONV_COEF_1, 0x20); regmap_write(regmap, CS35L35_BST_CONV_COEF_2, 0x20); regmap_update_bits(regmap, CS35L35_BST_CONV_SW_FREQ, CS35L35_BST_CONV_LBST_MASK, 0x02); if (bst_ipk < 0x04) regmap_write(regmap, CS35L35_BST_CONV_SLOPE_COMP, 0x1B); else regmap_write(regmap, CS35L35_BST_CONV_SLOPE_COMP, 0x3C); break; case 2200: /* 2.2uH */ regmap_write(regmap, CS35L35_BST_CONV_COEF_1, 0x19); regmap_write(regmap, CS35L35_BST_CONV_COEF_2, 0x25); regmap_update_bits(regmap, CS35L35_BST_CONV_SW_FREQ, CS35L35_BST_CONV_LBST_MASK, 0x03); if (bst_ipk < 0x04) regmap_write(regmap, CS35L35_BST_CONV_SLOPE_COMP, 0x1B); else regmap_write(regmap, CS35L35_BST_CONV_SLOPE_COMP, 0x23); break; default: dev_err(cs35l35->dev, "Invalid Inductor Value %d uH\n", inductor); return -EINVAL; } return 0; } static int cs35l35_codec_probe(struct snd_soc_codec *codec) { struct cs35l35_private *cs35l35 = snd_soc_codec_get_drvdata(codec); struct classh_cfg *classh = &cs35l35->pdata.classh_algo; struct monitor_cfg *monitor_config = &cs35l35->pdata.mon_cfg; int ret; /* Set Platform Data */ if (cs35l35->pdata.bst_vctl) regmap_update_bits(cs35l35->regmap, CS35L35_BST_CVTR_V_CTL, CS35L35_BST_CTL_MASK, cs35l35->pdata.bst_vctl); if (cs35l35->pdata.bst_ipk) regmap_update_bits(cs35l35->regmap, CS35L35_BST_PEAK_I, CS35L35_BST_IPK_MASK, cs35l35->pdata.bst_ipk << CS35L35_BST_IPK_SHIFT); ret = cs35l35_boost_inductor(cs35l35, cs35l35->pdata.boost_ind); if (ret) return ret; if (cs35l35->pdata.gain_zc) regmap_update_bits(cs35l35->regmap, CS35L35_PROTECT_CTL, CS35L35_AMP_GAIN_ZC_MASK, cs35l35->pdata.gain_zc << CS35L35_AMP_GAIN_ZC_SHIFT); if (cs35l35->pdata.aud_channel) regmap_update_bits(cs35l35->regmap, CS35L35_AUDIN_RXLOC_CTL, CS35L35_AUD_IN_LR_MASK, cs35l35->pdata.aud_channel << CS35L35_AUD_IN_LR_SHIFT); if (cs35l35->pdata.stereo) { regmap_update_bits(cs35l35->regmap, CS35L35_ADVIN_RXLOC_CTL, CS35L35_ADV_IN_LR_MASK, cs35l35->pdata.adv_channel << CS35L35_ADV_IN_LR_SHIFT); if (cs35l35->pdata.shared_bst) regmap_update_bits(cs35l35->regmap, CS35L35_CLASS_H_CTL, CS35L35_CH_STEREO_MASK, 1 << CS35L35_CH_STEREO_SHIFT); ret = snd_soc_add_codec_controls(codec, cs35l35_adv_controls, ARRAY_SIZE(cs35l35_adv_controls)); if (ret) return ret; } if (cs35l35->pdata.sp_drv_str) regmap_update_bits(cs35l35->regmap, CS35L35_CLK_CTL1, CS35L35_SP_DRV_MASK, cs35l35->pdata.sp_drv_str << CS35L35_SP_DRV_SHIFT); if (cs35l35->pdata.sp_drv_unused) regmap_update_bits(cs35l35->regmap, CS35L35_SP_FMT_CTL3, CS35L35_SP_I2S_DRV_MASK, cs35l35->pdata.sp_drv_unused << CS35L35_SP_I2S_DRV_SHIFT); if (classh->classh_algo_enable) { if (classh->classh_bst_override) regmap_update_bits(cs35l35->regmap, CS35L35_CLASS_H_CTL, CS35L35_CH_BST_OVR_MASK, classh->classh_bst_override << CS35L35_CH_BST_OVR_SHIFT); if (classh->classh_bst_max_limit) regmap_update_bits(cs35l35->regmap, CS35L35_CLASS_H_CTL, CS35L35_CH_BST_LIM_MASK, classh->classh_bst_max_limit << CS35L35_CH_BST_LIM_SHIFT); if (classh->classh_mem_depth) regmap_update_bits(cs35l35->regmap, CS35L35_CLASS_H_CTL, CS35L35_CH_MEM_DEPTH_MASK, classh->classh_mem_depth << CS35L35_CH_MEM_DEPTH_SHIFT); if (classh->classh_headroom) regmap_update_bits(cs35l35->regmap, CS35L35_CLASS_H_HEADRM_CTL, CS35L35_CH_HDRM_CTL_MASK, classh->classh_headroom << CS35L35_CH_HDRM_CTL_SHIFT); if (classh->classh_release_rate) regmap_update_bits(cs35l35->regmap, CS35L35_CLASS_H_RELEASE_RATE, CS35L35_CH_REL_RATE_MASK, classh->classh_release_rate << CS35L35_CH_REL_RATE_SHIFT); if (classh->classh_wk_fet_disable) regmap_update_bits(cs35l35->regmap, CS35L35_CLASS_H_FET_DRIVE_CTL, CS35L35_CH_WKFET_DIS_MASK, classh->classh_wk_fet_disable << CS35L35_CH_WKFET_DIS_SHIFT); if (classh->classh_wk_fet_delay) regmap_update_bits(cs35l35->regmap, CS35L35_CLASS_H_FET_DRIVE_CTL, CS35L35_CH_WKFET_DEL_MASK, classh->classh_wk_fet_delay << CS35L35_CH_WKFET_DEL_SHIFT); if (classh->classh_wk_fet_thld) regmap_update_bits(cs35l35->regmap, CS35L35_CLASS_H_FET_DRIVE_CTL, CS35L35_CH_WKFET_THLD_MASK, classh->classh_wk_fet_thld << CS35L35_CH_WKFET_THLD_SHIFT); if (classh->classh_vpch_auto) regmap_update_bits(cs35l35->regmap, CS35L35_CLASS_H_VP_CTL, CS35L35_CH_VP_AUTO_MASK, classh->classh_vpch_auto << CS35L35_CH_VP_AUTO_SHIFT); if (classh->classh_vpch_rate) regmap_update_bits(cs35l35->regmap, CS35L35_CLASS_H_VP_CTL, CS35L35_CH_VP_RATE_MASK, classh->classh_vpch_rate << CS35L35_CH_VP_RATE_SHIFT); if (classh->classh_vpch_man) regmap_update_bits(cs35l35->regmap, CS35L35_CLASS_H_VP_CTL, CS35L35_CH_VP_MAN_MASK, classh->classh_vpch_man << CS35L35_CH_VP_MAN_SHIFT); } if (monitor_config->is_present) { if (monitor_config->vmon_specs) { regmap_update_bits(cs35l35->regmap, CS35L35_SPKMON_DEPTH_CTL, CS35L35_VMON_DEPTH_MASK, monitor_config->vmon_dpth << CS35L35_VMON_DEPTH_SHIFT); regmap_update_bits(cs35l35->regmap, CS35L35_VMON_TXLOC_CTL, CS35L35_MON_TXLOC_MASK, monitor_config->vmon_loc << CS35L35_MON_TXLOC_SHIFT); regmap_update_bits(cs35l35->regmap, CS35L35_VMON_TXLOC_CTL, CS35L35_MON_FRM_MASK, monitor_config->vmon_frm << CS35L35_MON_FRM_SHIFT); } if (monitor_config->imon_specs) { regmap_update_bits(cs35l35->regmap, CS35L35_SPKMON_DEPTH_CTL, CS35L35_IMON_DEPTH_MASK, monitor_config->imon_dpth << CS35L35_IMON_DEPTH_SHIFT); regmap_update_bits(cs35l35->regmap, CS35L35_IMON_TXLOC_CTL, CS35L35_MON_TXLOC_MASK, monitor_config->imon_loc << CS35L35_MON_TXLOC_SHIFT); regmap_update_bits(cs35l35->regmap, CS35L35_IMON_TXLOC_CTL, CS35L35_MON_FRM_MASK, monitor_config->imon_frm << CS35L35_MON_FRM_SHIFT); regmap_update_bits(cs35l35->regmap, CS35L35_IMON_SCALE_CTL, CS35L35_IMON_SCALE_MASK, monitor_config->imon_scale << CS35L35_IMON_SCALE_SHIFT); } if (monitor_config->vpmon_specs) { regmap_update_bits(cs35l35->regmap, CS35L35_SUPMON_DEPTH_CTL, CS35L35_VPMON_DEPTH_MASK, monitor_config->vpmon_dpth << CS35L35_VPMON_DEPTH_SHIFT); regmap_update_bits(cs35l35->regmap, CS35L35_VPMON_TXLOC_CTL, CS35L35_MON_TXLOC_MASK, monitor_config->vpmon_loc << CS35L35_MON_TXLOC_SHIFT); regmap_update_bits(cs35l35->regmap, CS35L35_VPMON_TXLOC_CTL, CS35L35_MON_FRM_MASK, monitor_config->vpmon_frm << CS35L35_MON_FRM_SHIFT); } if (monitor_config->vbstmon_specs) { regmap_update_bits(cs35l35->regmap, CS35L35_SUPMON_DEPTH_CTL, CS35L35_VBSTMON_DEPTH_MASK, monitor_config->vpmon_dpth << CS35L35_VBSTMON_DEPTH_SHIFT); regmap_update_bits(cs35l35->regmap, CS35L35_VBSTMON_TXLOC_CTL, CS35L35_MON_TXLOC_MASK, monitor_config->vbstmon_loc << CS35L35_MON_TXLOC_SHIFT); regmap_update_bits(cs35l35->regmap, CS35L35_VBSTMON_TXLOC_CTL, CS35L35_MON_FRM_MASK, monitor_config->vbstmon_frm << CS35L35_MON_FRM_SHIFT); } if (monitor_config->vpbrstat_specs) { regmap_update_bits(cs35l35->regmap, CS35L35_SUPMON_DEPTH_CTL, CS35L35_VPBRSTAT_DEPTH_MASK, monitor_config->vpbrstat_dpth << CS35L35_VPBRSTAT_DEPTH_SHIFT); regmap_update_bits(cs35l35->regmap, CS35L35_VPBR_STATUS_TXLOC_CTL, CS35L35_MON_TXLOC_MASK, monitor_config->vpbrstat_loc << CS35L35_MON_TXLOC_SHIFT); regmap_update_bits(cs35l35->regmap, CS35L35_VPBR_STATUS_TXLOC_CTL, CS35L35_MON_FRM_MASK, monitor_config->vpbrstat_frm << CS35L35_MON_FRM_SHIFT); } if (monitor_config->zerofill_specs) { regmap_update_bits(cs35l35->regmap, CS35L35_SUPMON_DEPTH_CTL, CS35L35_ZEROFILL_DEPTH_MASK, monitor_config->zerofill_dpth << CS35L35_ZEROFILL_DEPTH_SHIFT); regmap_update_bits(cs35l35->regmap, CS35L35_ZERO_FILL_LOC_CTL, CS35L35_MON_TXLOC_MASK, monitor_config->zerofill_loc << CS35L35_MON_TXLOC_SHIFT); regmap_update_bits(cs35l35->regmap, CS35L35_ZERO_FILL_LOC_CTL, CS35L35_MON_FRM_MASK, monitor_config->zerofill_frm << CS35L35_MON_FRM_SHIFT); } } return 0; } static struct snd_soc_codec_driver soc_codec_dev_cs35l35 = { .probe = cs35l35_codec_probe, .set_sysclk = cs35l35_codec_set_sysclk, .component_driver = { .dapm_widgets = cs35l35_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(cs35l35_dapm_widgets), .dapm_routes = cs35l35_audio_map, .num_dapm_routes = ARRAY_SIZE(cs35l35_audio_map), .controls = cs35l35_aud_controls, .num_controls = ARRAY_SIZE(cs35l35_aud_controls), }, }; static struct regmap_config cs35l35_regmap = { .reg_bits = 8, .val_bits = 8, .max_register = CS35L35_MAX_REGISTER, .reg_defaults = cs35l35_reg, .num_reg_defaults = ARRAY_SIZE(cs35l35_reg), .volatile_reg = cs35l35_volatile_register, .readable_reg = cs35l35_readable_register, .precious_reg = cs35l35_precious_register, .cache_type = REGCACHE_RBTREE, }; static irqreturn_t cs35l35_irq(int irq, void *data) { struct cs35l35_private *cs35l35 = data; unsigned int sticky1, sticky2, sticky3, sticky4; unsigned int mask1, mask2, mask3, mask4, current1; /* ack the irq by reading all status registers */ regmap_read(cs35l35->regmap, CS35L35_INT_STATUS_4, &sticky4); regmap_read(cs35l35->regmap, CS35L35_INT_STATUS_3, &sticky3); regmap_read(cs35l35->regmap, CS35L35_INT_STATUS_2, &sticky2); regmap_read(cs35l35->regmap, CS35L35_INT_STATUS_1, &sticky1); regmap_read(cs35l35->regmap, CS35L35_INT_MASK_4, &mask4); regmap_read(cs35l35->regmap, CS35L35_INT_MASK_3, &mask3); regmap_read(cs35l35->regmap, CS35L35_INT_MASK_2, &mask2); regmap_read(cs35l35->regmap, CS35L35_INT_MASK_1, &mask1); /* Check to see if unmasked bits are active */ if (!(sticky1 & ~mask1) && !(sticky2 & ~mask2) && !(sticky3 & ~mask3) && !(sticky4 & ~mask4)) return IRQ_NONE; if (sticky2 & CS35L35_PDN_DONE) complete(&cs35l35->pdn_done); /* read the current values */ regmap_read(cs35l35->regmap, CS35L35_INT_STATUS_1, &current1); /* handle the interrupts */ if (sticky1 & CS35L35_CAL_ERR) { dev_crit(cs35l35->dev, "Calibration Error\n"); /* error is no longer asserted; safe to reset */ if (!(current1 & CS35L35_CAL_ERR)) { pr_debug("%s : Cal error release\n", __func__); regmap_update_bits(cs35l35->regmap, CS35L35_PROT_RELEASE_CTL, CS35L35_CAL_ERR_RLS, 0); regmap_update_bits(cs35l35->regmap, CS35L35_PROT_RELEASE_CTL, CS35L35_CAL_ERR_RLS, CS35L35_CAL_ERR_RLS); regmap_update_bits(cs35l35->regmap, CS35L35_PROT_RELEASE_CTL, CS35L35_CAL_ERR_RLS, 0); } } if (sticky1 & CS35L35_AMP_SHORT) { dev_crit(cs35l35->dev, "AMP Short Error\n"); /* error is no longer asserted; safe to reset */ if (!(current1 & CS35L35_AMP_SHORT)) { dev_dbg(cs35l35->dev, "Amp short error release\n"); regmap_update_bits(cs35l35->regmap, CS35L35_PROT_RELEASE_CTL, CS35L35_SHORT_RLS, 0); regmap_update_bits(cs35l35->regmap, CS35L35_PROT_RELEASE_CTL, CS35L35_SHORT_RLS, CS35L35_SHORT_RLS); regmap_update_bits(cs35l35->regmap, CS35L35_PROT_RELEASE_CTL, CS35L35_SHORT_RLS, 0); } } if (sticky1 & CS35L35_OTW) { dev_warn(cs35l35->dev, "Over temperature warning\n"); /* error is no longer asserted; safe to reset */ if (!(current1 & CS35L35_OTW)) { dev_dbg(cs35l35->dev, "Over temperature warn release\n"); regmap_update_bits(cs35l35->regmap, CS35L35_PROT_RELEASE_CTL, CS35L35_OTW_RLS, 0); regmap_update_bits(cs35l35->regmap, CS35L35_PROT_RELEASE_CTL, CS35L35_OTW_RLS, CS35L35_OTW_RLS); regmap_update_bits(cs35l35->regmap, CS35L35_PROT_RELEASE_CTL, CS35L35_OTW_RLS, 0); } } if (sticky1 & CS35L35_OTE) { dev_crit(cs35l35->dev, "Over temperature error\n"); /* error is no longer asserted; safe to reset */ if (!(current1 & CS35L35_OTE)) { dev_dbg(cs35l35->dev, "Over temperature error release\n"); regmap_update_bits(cs35l35->regmap, CS35L35_PROT_RELEASE_CTL, CS35L35_OTE_RLS, 0); regmap_update_bits(cs35l35->regmap, CS35L35_PROT_RELEASE_CTL, CS35L35_OTE_RLS, CS35L35_OTE_RLS); regmap_update_bits(cs35l35->regmap, CS35L35_PROT_RELEASE_CTL, CS35L35_OTE_RLS, 0); } } if (sticky3 & CS35L35_BST_HIGH) { dev_crit(cs35l35->dev, "VBST error: powering off!\n"); regmap_update_bits(cs35l35->regmap, CS35L35_PWRCTL2, CS35L35_PDN_AMP, CS35L35_PDN_AMP); regmap_update_bits(cs35l35->regmap, CS35L35_PWRCTL1, CS35L35_PDN_ALL, CS35L35_PDN_ALL); } if (sticky3 & CS35L35_LBST_SHORT) { dev_crit(cs35l35->dev, "LBST error: powering off!\n"); regmap_update_bits(cs35l35->regmap, CS35L35_PWRCTL2, CS35L35_PDN_AMP, CS35L35_PDN_AMP); regmap_update_bits(cs35l35->regmap, CS35L35_PWRCTL1, CS35L35_PDN_ALL, CS35L35_PDN_ALL); } if (sticky2 & CS35L35_VPBR_ERR) dev_dbg(cs35l35->dev, "Error: Reactive Brownout\n"); if (sticky4 & CS35L35_VMON_OVFL) dev_dbg(cs35l35->dev, "Error: VMON overflow\n"); if (sticky4 & CS35L35_IMON_OVFL) dev_dbg(cs35l35->dev, "Error: IMON overflow\n"); return IRQ_HANDLED; } static int cs35l35_handle_of_data(struct i2c_client *i2c_client, struct cs35l35_platform_data *pdata) { struct device_node *np = i2c_client->dev.of_node; struct device_node *classh, *signal_format; struct classh_cfg *classh_config = &pdata->classh_algo; struct monitor_cfg *monitor_config = &pdata->mon_cfg; unsigned int val32 = 0; u8 monitor_array[4]; const int imon_array_size = ARRAY_SIZE(monitor_array); const int mon_array_size = imon_array_size - 1; int ret = 0; if (!np) return 0; pdata->bst_pdn_fet_on = of_property_read_bool(np, "cirrus,boost-pdn-fet-on"); ret = of_property_read_u32(np, "cirrus,boost-ctl-millivolt", &val32); if (ret >= 0) { if (val32 < 2600 || val32 > 9000) { dev_err(&i2c_client->dev, "Invalid Boost Voltage %d mV\n", val32); return -EINVAL; } pdata->bst_vctl = ((val32 - 2600) / 100) + 1; } ret = of_property_read_u32(np, "cirrus,boost-peak-milliamp", &val32); if (ret >= 0) { if (val32 < 1680 || val32 > 4480) { dev_err(&i2c_client->dev, "Invalid Boost Peak Current %u mA\n", val32); return -EINVAL; } pdata->bst_ipk = ((val32 - 1680) / 110) | CS35L35_VALID_PDATA; } ret = of_property_read_u32(np, "cirrus,boost-ind-nanohenry", &val32); if (ret >= 0) { pdata->boost_ind = val32; } else { dev_err(&i2c_client->dev, "Inductor not specified.\n"); return -EINVAL; } if (of_property_read_u32(np, "cirrus,sp-drv-strength", &val32) >= 0) pdata->sp_drv_str = val32; if (of_property_read_u32(np, "cirrus,sp-drv-unused", &val32) >= 0) pdata->sp_drv_unused = val32 | CS35L35_VALID_PDATA; pdata->stereo = of_property_read_bool(np, "cirrus,stereo-config"); if (pdata->stereo) { ret = of_property_read_u32(np, "cirrus,audio-channel", &val32); if (ret >= 0) pdata->aud_channel = val32; ret = of_property_read_u32(np, "cirrus,advisory-channel", &val32); if (ret >= 0) pdata->adv_channel = val32; pdata->shared_bst = of_property_read_bool(np, "cirrus,shared-boost"); } pdata->ext_bst = of_property_read_bool(np, "cirrus,external-boost"); pdata->gain_zc = of_property_read_bool(np, "cirrus,amp-gain-zc"); classh = of_get_child_by_name(np, "cirrus,classh-internal-algo"); classh_config->classh_algo_enable = classh ? true : false; if (classh_config->classh_algo_enable) { classh_config->classh_bst_override = of_property_read_bool(np, "cirrus,classh-bst-overide"); ret = of_property_read_u32(classh, "cirrus,classh-bst-max-limit", &val32); if (ret >= 0) { val32 |= CS35L35_VALID_PDATA; classh_config->classh_bst_max_limit = val32; } ret = of_property_read_u32(classh, "cirrus,classh-bst-max-limit", &val32); if (ret >= 0) { val32 |= CS35L35_VALID_PDATA; classh_config->classh_bst_max_limit = val32; } ret = of_property_read_u32(classh, "cirrus,classh-mem-depth", &val32); if (ret >= 0) { val32 |= CS35L35_VALID_PDATA; classh_config->classh_mem_depth = val32; } ret = of_property_read_u32(classh, "cirrus,classh-release-rate", &val32); if (ret >= 0) classh_config->classh_release_rate = val32; ret = of_property_read_u32(classh, "cirrus,classh-headroom", &val32); if (ret >= 0) { val32 |= CS35L35_VALID_PDATA; classh_config->classh_headroom = val32; } ret = of_property_read_u32(classh, "cirrus,classh-wk-fet-disable", &val32); if (ret >= 0) classh_config->classh_wk_fet_disable = val32; ret = of_property_read_u32(classh, "cirrus,classh-wk-fet-delay", &val32); if (ret >= 0) { val32 |= CS35L35_VALID_PDATA; classh_config->classh_wk_fet_delay = val32; } ret = of_property_read_u32(classh, "cirrus,classh-wk-fet-thld", &val32); if (ret >= 0) classh_config->classh_wk_fet_thld = val32; ret = of_property_read_u32(classh, "cirrus,classh-vpch-auto", &val32); if (ret >= 0) { val32 |= CS35L35_VALID_PDATA; classh_config->classh_vpch_auto = val32; } ret = of_property_read_u32(classh, "cirrus,classh-vpch-rate", &val32); if (ret >= 0) { val32 |= CS35L35_VALID_PDATA; classh_config->classh_vpch_rate = val32; } ret = of_property_read_u32(classh, "cirrus,classh-vpch-man", &val32); if (ret >= 0) classh_config->classh_vpch_man = val32; } of_node_put(classh); /* frame depth location */ signal_format = of_get_child_by_name(np, "cirrus,monitor-signal-format"); monitor_config->is_present = signal_format ? true : false; if (monitor_config->is_present) { ret = of_property_read_u8_array(signal_format, "cirrus,imon", monitor_array, imon_array_size); if (!ret) { monitor_config->imon_specs = true; monitor_config->imon_dpth = monitor_array[0]; monitor_config->imon_loc = monitor_array[1]; monitor_config->imon_frm = monitor_array[2]; monitor_config->imon_scale = monitor_array[3]; } ret = of_property_read_u8_array(signal_format, "cirrus,vmon", monitor_array, mon_array_size); if (!ret) { monitor_config->vmon_specs = true; monitor_config->vmon_dpth = monitor_array[0]; monitor_config->vmon_loc = monitor_array[1]; monitor_config->vmon_frm = monitor_array[2]; } ret = of_property_read_u8_array(signal_format, "cirrus,vpmon", monitor_array, mon_array_size); if (!ret) { monitor_config->vpmon_specs = true; monitor_config->vpmon_dpth = monitor_array[0]; monitor_config->vpmon_loc = monitor_array[1]; monitor_config->vpmon_frm = monitor_array[2]; } ret = of_property_read_u8_array(signal_format, "cirrus,vbstmon", monitor_array, mon_array_size); if (!ret) { monitor_config->vbstmon_specs = true; monitor_config->vbstmon_dpth = monitor_array[0]; monitor_config->vbstmon_loc = monitor_array[1]; monitor_config->vbstmon_frm = monitor_array[2]; } ret = of_property_read_u8_array(signal_format, "cirrus,vpbrstat", monitor_array, mon_array_size); if (!ret) { monitor_config->vpbrstat_specs = true; monitor_config->vpbrstat_dpth = monitor_array[0]; monitor_config->vpbrstat_loc = monitor_array[1]; monitor_config->vpbrstat_frm = monitor_array[2]; } ret = of_property_read_u8_array(signal_format, "cirrus,zerofill", monitor_array, mon_array_size); if (!ret) { monitor_config->zerofill_specs = true; monitor_config->zerofill_dpth = monitor_array[0]; monitor_config->zerofill_loc = monitor_array[1]; monitor_config->zerofill_frm = monitor_array[2]; } } of_node_put(signal_format); return 0; } /* Errata Rev A0 */ static const struct reg_sequence cs35l35_errata_patch[] = { { 0x7F, 0x99 }, { 0x00, 0x99 }, { 0x52, 0x22 }, { 0x04, 0x14 }, { 0x6D, 0x44 }, { 0x24, 0x10 }, { 0x58, 0xC4 }, { 0x00, 0x98 }, { 0x18, 0x08 }, { 0x00, 0x00 }, { 0x7F, 0x00 }, }; static int cs35l35_i2c_probe(struct i2c_client *i2c_client, const struct i2c_device_id *id) { struct cs35l35_private *cs35l35; struct device *dev = &i2c_client->dev; struct cs35l35_platform_data *pdata = dev_get_platdata(dev); int i; int ret; unsigned int devid = 0; unsigned int reg; cs35l35 = devm_kzalloc(dev, sizeof(struct cs35l35_private), GFP_KERNEL); if (!cs35l35) return -ENOMEM; cs35l35->dev = dev; i2c_set_clientdata(i2c_client, cs35l35); cs35l35->regmap = devm_regmap_init_i2c(i2c_client, &cs35l35_regmap); if (IS_ERR(cs35l35->regmap)) { ret = PTR_ERR(cs35l35->regmap); dev_err(dev, "regmap_init() failed: %d\n", ret); goto err; } for (i = 0; i < ARRAY_SIZE(cs35l35_supplies); i++) cs35l35->supplies[i].supply = cs35l35_supplies[i]; cs35l35->num_supplies = ARRAY_SIZE(cs35l35_supplies); ret = devm_regulator_bulk_get(dev, cs35l35->num_supplies, cs35l35->supplies); if (ret != 0) { dev_err(dev, "Failed to request core supplies: %d\n", ret); return ret; } if (pdata) { cs35l35->pdata = *pdata; } else { pdata = devm_kzalloc(dev, sizeof(struct cs35l35_platform_data), GFP_KERNEL); if (!pdata) return -ENOMEM; if (i2c_client->dev.of_node) { ret = cs35l35_handle_of_data(i2c_client, pdata); if (ret != 0) return ret; } cs35l35->pdata = *pdata; } ret = regulator_bulk_enable(cs35l35->num_supplies, cs35l35->supplies); if (ret != 0) { dev_err(dev, "Failed to enable core supplies: %d\n", ret); return ret; } /* returning NULL can be valid if in stereo mode */ cs35l35->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(cs35l35->reset_gpio)) { ret = PTR_ERR(cs35l35->reset_gpio); cs35l35->reset_gpio = NULL; if (ret == -EBUSY) { dev_info(dev, "Reset line busy, assuming shared reset\n"); } else { dev_err(dev, "Failed to get reset GPIO: %d\n", ret); goto err; } } cs35l35_reset(cs35l35); init_completion(&cs35l35->pdn_done); ret = devm_request_threaded_irq(dev, i2c_client->irq, NULL, cs35l35_irq, IRQF_ONESHOT | IRQF_TRIGGER_LOW | IRQF_SHARED, "cs35l35", cs35l35); if (ret != 0) { dev_err(dev, "Failed to request IRQ: %d\n", ret); goto err; } /* initialize codec */ ret = regmap_read(cs35l35->regmap, CS35L35_DEVID_AB, &reg); devid = (reg & 0xFF) << 12; ret = regmap_read(cs35l35->regmap, CS35L35_DEVID_CD, &reg); devid |= (reg & 0xFF) << 4; ret = regmap_read(cs35l35->regmap, CS35L35_DEVID_E, &reg); devid |= (reg & 0xF0) >> 4; if (devid != CS35L35_CHIP_ID) { dev_err(dev, "CS35L35 Device ID (%X). Expected ID %X\n", devid, CS35L35_CHIP_ID); ret = -ENODEV; goto err; } ret = regmap_read(cs35l35->regmap, CS35L35_REV_ID, &reg); if (ret < 0) { dev_err(dev, "Get Revision ID failed: %d\n", ret); goto err; } ret = regmap_register_patch(cs35l35->regmap, cs35l35_errata_patch, ARRAY_SIZE(cs35l35_errata_patch)); if (ret < 0) { dev_err(dev, "Failed to apply errata patch: %d\n", ret); goto err; } dev_info(dev, "Cirrus Logic CS35L35 (%x), Revision: %02X\n", devid, reg & 0xFF); /* Set the INT Masks for critical errors */ regmap_write(cs35l35->regmap, CS35L35_INT_MASK_1, CS35L35_INT1_CRIT_MASK); regmap_write(cs35l35->regmap, CS35L35_INT_MASK_2, CS35L35_INT2_CRIT_MASK); regmap_write(cs35l35->regmap, CS35L35_INT_MASK_3, CS35L35_INT3_CRIT_MASK); regmap_write(cs35l35->regmap, CS35L35_INT_MASK_4, CS35L35_INT4_CRIT_MASK); regmap_update_bits(cs35l35->regmap, CS35L35_PWRCTL2, CS35L35_PWR2_PDN_MASK, CS35L35_PWR2_PDN_MASK); if (cs35l35->pdata.bst_pdn_fet_on) regmap_update_bits(cs35l35->regmap, CS35L35_PWRCTL2, CS35L35_PDN_BST_MASK, 1 << CS35L35_PDN_BST_FETON_SHIFT); else regmap_update_bits(cs35l35->regmap, CS35L35_PWRCTL2, CS35L35_PDN_BST_MASK, 1 << CS35L35_PDN_BST_FETOFF_SHIFT); regmap_update_bits(cs35l35->regmap, CS35L35_PWRCTL3, CS35L35_PWR3_PDN_MASK, CS35L35_PWR3_PDN_MASK); regmap_update_bits(cs35l35->regmap, CS35L35_PROTECT_CTL, CS35L35_AMP_MUTE_MASK, 1 << CS35L35_AMP_MUTE_SHIFT); ret = snd_soc_register_codec(dev, &soc_codec_dev_cs35l35, cs35l35_dai, ARRAY_SIZE(cs35l35_dai)); if (ret < 0) { dev_err(dev, "Failed to register codec: %d\n", ret); goto err; } return 0; err: regulator_bulk_disable(cs35l35->num_supplies, cs35l35->supplies); gpiod_set_value_cansleep(cs35l35->reset_gpio, 0); return ret; } static int cs35l35_i2c_remove(struct i2c_client *client) { snd_soc_unregister_codec(&client->dev); return 0; } static const struct of_device_id cs35l35_of_match[] = { {.compatible = "cirrus,cs35l35"}, {}, }; MODULE_DEVICE_TABLE(of, cs35l35_of_match); static const struct i2c_device_id cs35l35_id[] = { {"cs35l35", 0}, {} }; MODULE_DEVICE_TABLE(i2c, cs35l35_id); static struct i2c_driver cs35l35_i2c_driver = { .driver = { .name = "cs35l35", .of_match_table = cs35l35_of_match, }, .id_table = cs35l35_id, .probe = cs35l35_i2c_probe, .remove = cs35l35_i2c_remove, }; module_i2c_driver(cs35l35_i2c_driver); MODULE_DESCRIPTION("ASoC CS35L35 driver"); MODULE_AUTHOR("Brian Austin, Cirrus Logic Inc, <brian.austin@cirrus.com>"); MODULE_LICENSE("GPL");
gpl-2.0
sysprogs/openocd
src/flash/nand/ecc_kw.c
27
4632
/* * Reed-Solomon ECC handling for the Marvell Kirkwood SOC * Copyright (C) 2009 Marvell Semiconductor, Inc. * * Authors: Lennert Buytenhek <buytenh@wantstofly.org> * Nicolas Pitre <nico@fluxnic.net> * * This file is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 or (at your option) any * later version. * * This file is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "core.h" /***************************************************************************** * Arithmetic in GF(2^10) ("F") modulo x^10 + x^3 + 1. * * For multiplication, a discrete log/exponent table is used, with * primitive element x (F is a primitive field, so x is primitive). */ #define MODPOLY 0x409 /* x^10 + x^3 + 1 in binary */ /* * Maps an integer a [0..1022] to a polynomial b = gf_exp[a] in * GF(2^10) mod x^10 + x^3 + 1 such that b = x ^ a. There's two * identical copies of this array back-to-back so that we can save * the mod 1023 operation when doing a GF multiplication. */ static uint16_t gf_exp[1023 + 1023]; /* * Maps a polynomial b in GF(2^10) mod x^10 + x^3 + 1 to an index * a = gf_log[b] in [0..1022] such that b = x ^ a. */ static uint16_t gf_log[1024]; static void gf_build_log_exp_table(void) { int i; int p_i; /* * p_i = x ^ i * * Initialise to 1 for i = 0. */ p_i = 1; for (i = 0; i < 1023; i++) { gf_exp[i] = p_i; gf_exp[i + 1023] = p_i; gf_log[p_i] = i; /* * p_i = p_i * x */ p_i <<= 1; if (p_i & (1 << 10)) p_i ^= MODPOLY; } } /***************************************************************************** * Reed-Solomon code * * This implements a (1023,1015) Reed-Solomon ECC code over GF(2^10) * mod x^10 + x^3 + 1, shortened to (520,512). The ECC data consists * of 8 10-bit symbols, or 10 8-bit bytes. * * Given 512 bytes of data, computes 10 bytes of ECC. * * This is done by converting the 512 bytes to 512 10-bit symbols * (elements of F), interpreting those symbols as a polynomial in F[X] * by taking symbol 0 as the coefficient of X^8 and symbol 511 as the * coefficient of X^519, and calculating the residue of that polynomial * divided by the generator polynomial, which gives us the 8 ECC symbols * as the remainder. Finally, we convert the 8 10-bit ECC symbols to 10 * 8-bit bytes. * * The generator polynomial is hardcoded, as that is faster, but it * can be computed by taking the primitive element a = x (in F), and * constructing a polynomial in F[X] with roots a, a^2, a^3, ..., a^8 * by multiplying the minimal polynomials for those roots (which are * just 'x - a^i' for each i). * * Note: due to unfortunate circumstances, the bootrom in the Kirkwood SOC * expects the ECC to be computed backward, i.e. from the last byte down * to the first one. */ int nand_calculate_ecc_kw(struct nand_device *nand, const uint8_t *data, uint8_t *ecc) { unsigned int r7, r6, r5, r4, r3, r2, r1, r0; int i; static int tables_initialized; if (!tables_initialized) { gf_build_log_exp_table(); tables_initialized = 1; } /* * Load bytes 504..511 of the data into r. */ r0 = data[504]; r1 = data[505]; r2 = data[506]; r3 = data[507]; r4 = data[508]; r5 = data[509]; r6 = data[510]; r7 = data[511]; /* * Shift bytes 503..0 (in that order) into r0, followed * by eight zero bytes, while reducing the polynomial by the * generator polynomial in every step. */ for (i = 503; i >= -8; i--) { unsigned int d; d = 0; if (i >= 0) d = data[i]; if (r7) { uint16_t *t = gf_exp + gf_log[r7]; r7 = r6 ^ t[0x21c]; r6 = r5 ^ t[0x181]; r5 = r4 ^ t[0x18e]; r4 = r3 ^ t[0x25f]; r3 = r2 ^ t[0x197]; r2 = r1 ^ t[0x193]; r1 = r0 ^ t[0x237]; r0 = d ^ t[0x024]; } else { r7 = r6; r6 = r5; r5 = r4; r4 = r3; r3 = r2; r2 = r1; r1 = r0; r0 = d; } } ecc[0] = r0; ecc[1] = (r0 >> 8) | (r1 << 2); ecc[2] = (r1 >> 6) | (r2 << 4); ecc[3] = (r2 >> 4) | (r3 << 6); ecc[4] = (r3 >> 2); ecc[5] = r4; ecc[6] = (r4 >> 8) | (r5 << 2); ecc[7] = (r5 >> 6) | (r6 << 4); ecc[8] = (r6 >> 4) | (r7 << 6); ecc[9] = (r7 >> 2); return 0; }
gpl-2.0
nitinkamble/x32-glibc
stdio-common/errnobug.c
27
1540
/* Regression test for reported old bug that errno is clobbered by the first successful output to a stream on an unseekable object. Copyright (C) 1995, 1997 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. */ #include <errno.h> #include <stdio.h> #include <unistd.h> int main (void) { int fd[2]; FILE *f; /* Get a stream that cannot seek. */ if (pipe (fd)) { perror ("pipe"); return 1; } f = fdopen (fd[1], "w"); if (f == NULL) { perror ("fdopen"); return 1; } errno = 0; if (fputs ("fnord", f) == EOF) { perror ("fputs"); return 1; } if (errno) { perror ("errno gratuitously set -- TEST FAILED"); return 1; } puts ("Test succeeded."); return 0; }
gpl-2.0
TeamApexQ/android_kernel_samsung_d2
net/core/fib_rules.c
539
18998
/* * net/core/fib_rules.c Generic Routing Rules * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2. * * Authors: Thomas Graf <tgraf@suug.ch> */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/module.h> #include <net/net_namespace.h> #include <net/sock.h> #include <net/fib_rules.h> #define INVALID_UID ((uid_t) -1) #define uid_valid(uid) ((uid) != -1) #define uid_lte(a, b) ((a) <= (b)) #define uid_eq(a, b) ((a) == (b)) #define uid_gte(a, b) ((a) >= (b)) int fib_default_rule_add(struct fib_rules_ops *ops, u32 pref, u32 table, u32 flags) { struct fib_rule *r; r = kzalloc(ops->rule_size, GFP_KERNEL); if (r == NULL) return -ENOMEM; atomic_set(&r->refcnt, 1); r->action = FR_ACT_TO_TBL; r->pref = pref; r->table = table; r->flags = flags; r->uid_start = INVALID_UID; r->uid_end = INVALID_UID; r->fr_net = hold_net(ops->fro_net); /* The lock is not required here, the list in unreacheable * at the moment this function is called */ list_add_tail(&r->list, &ops->rules_list); return 0; } EXPORT_SYMBOL(fib_default_rule_add); u32 fib_default_rule_pref(struct fib_rules_ops *ops) { struct list_head *pos; struct fib_rule *rule; if (!list_empty(&ops->rules_list)) { pos = ops->rules_list.next; if (pos->next != &ops->rules_list) { rule = list_entry(pos->next, struct fib_rule, list); if (rule->pref) return rule->pref - 1; } } return 0; } EXPORT_SYMBOL(fib_default_rule_pref); static void notify_rule_change(int event, struct fib_rule *rule, struct fib_rules_ops *ops, struct nlmsghdr *nlh, u32 pid); static struct fib_rules_ops *lookup_rules_ops(struct net *net, int family) { struct fib_rules_ops *ops; rcu_read_lock(); list_for_each_entry_rcu(ops, &net->rules_ops, list) { if (ops->family == family) { if (!try_module_get(ops->owner)) ops = NULL; rcu_read_unlock(); return ops; } } rcu_read_unlock(); return NULL; } static void rules_ops_put(struct fib_rules_ops *ops) { if (ops) module_put(ops->owner); } static void flush_route_cache(struct fib_rules_ops *ops) { if (ops->flush_cache) ops->flush_cache(ops); } static int __fib_rules_register(struct fib_rules_ops *ops) { int err = -EEXIST; struct fib_rules_ops *o; struct net *net; net = ops->fro_net; if (ops->rule_size < sizeof(struct fib_rule)) return -EINVAL; if (ops->match == NULL || ops->configure == NULL || ops->compare == NULL || ops->fill == NULL || ops->action == NULL) return -EINVAL; spin_lock(&net->rules_mod_lock); list_for_each_entry(o, &net->rules_ops, list) if (ops->family == o->family) goto errout; hold_net(net); list_add_tail_rcu(&ops->list, &net->rules_ops); err = 0; errout: spin_unlock(&net->rules_mod_lock); return err; } struct fib_rules_ops * fib_rules_register(const struct fib_rules_ops *tmpl, struct net *net) { struct fib_rules_ops *ops; int err; ops = kmemdup(tmpl, sizeof(*ops), GFP_KERNEL); if (ops == NULL) return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&ops->rules_list); ops->fro_net = net; err = __fib_rules_register(ops); if (err) { kfree(ops); ops = ERR_PTR(err); } return ops; } EXPORT_SYMBOL_GPL(fib_rules_register); static void fib_rules_cleanup_ops(struct fib_rules_ops *ops) { struct fib_rule *rule, *tmp; list_for_each_entry_safe(rule, tmp, &ops->rules_list, list) { list_del_rcu(&rule->list); fib_rule_put(rule); } } static void fib_rules_put_rcu(struct rcu_head *head) { struct fib_rules_ops *ops = container_of(head, struct fib_rules_ops, rcu); struct net *net = ops->fro_net; release_net(net); kfree(ops); } void fib_rules_unregister(struct fib_rules_ops *ops) { struct net *net = ops->fro_net; spin_lock(&net->rules_mod_lock); list_del_rcu(&ops->list); fib_rules_cleanup_ops(ops); spin_unlock(&net->rules_mod_lock); call_rcu(&ops->rcu, fib_rules_put_rcu); } EXPORT_SYMBOL_GPL(fib_rules_unregister); static inline uid_t fib_nl_uid(struct nlattr *nla) { return nla_get_u32(nla); } static int nla_put_uid(struct sk_buff *skb, int idx, uid_t uid) { return nla_put_u32(skb, idx, uid); } static int fib_uid_range_match(struct flowi *fl, struct fib_rule *rule) { return (!uid_valid(rule->uid_start) && !uid_valid(rule->uid_end)) || (uid_gte(fl->flowi_uid, rule->uid_start) && uid_lte(fl->flowi_uid, rule->uid_end)); } static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops, struct flowi *fl, int flags) { int ret = 0; if (rule->iifindex && (rule->iifindex != fl->flowi_iif)) goto out; if (rule->oifindex && (rule->oifindex != fl->flowi_oif)) goto out; if ((rule->mark ^ fl->flowi_mark) & rule->mark_mask) goto out; if (!fib_uid_range_match(fl, rule)) goto out; ret = ops->match(rule, fl, flags); out: return (rule->flags & FIB_RULE_INVERT) ? !ret : ret; } int fib_rules_lookup(struct fib_rules_ops *ops, struct flowi *fl, int flags, struct fib_lookup_arg *arg) { struct fib_rule *rule; int err; rcu_read_lock(); list_for_each_entry_rcu(rule, &ops->rules_list, list) { jumped: if (!fib_rule_match(rule, ops, fl, flags)) continue; if (rule->action == FR_ACT_GOTO) { struct fib_rule *target; target = rcu_dereference(rule->ctarget); if (target == NULL) { continue; } else { rule = target; goto jumped; } } else if (rule->action == FR_ACT_NOP) continue; else err = ops->action(rule, fl, flags, arg); if (err != -EAGAIN) { if ((arg->flags & FIB_LOOKUP_NOREF) || likely(atomic_inc_not_zero(&rule->refcnt))) { arg->rule = rule; goto out; } break; } } err = -ESRCH; out: rcu_read_unlock(); return err; } EXPORT_SYMBOL_GPL(fib_rules_lookup); static int validate_rulemsg(struct fib_rule_hdr *frh, struct nlattr **tb, struct fib_rules_ops *ops) { int err = -EINVAL; if (frh->src_len) if (tb[FRA_SRC] == NULL || frh->src_len > (ops->addr_size * 8) || nla_len(tb[FRA_SRC]) != ops->addr_size) goto errout; if (frh->dst_len) if (tb[FRA_DST] == NULL || frh->dst_len > (ops->addr_size * 8) || nla_len(tb[FRA_DST]) != ops->addr_size) goto errout; err = 0; errout: return err; } static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) { struct net *net = sock_net(skb->sk); struct fib_rule_hdr *frh = nlmsg_data(nlh); struct fib_rules_ops *ops = NULL; struct fib_rule *rule, *r, *last = NULL; struct nlattr *tb[FRA_MAX+1]; int err = -EINVAL, unresolved = 0; if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh))) goto errout; ops = lookup_rules_ops(net, frh->family); if (ops == NULL) { err = -EAFNOSUPPORT; goto errout; } err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy); if (err < 0) goto errout; err = validate_rulemsg(frh, tb, ops); if (err < 0) goto errout; rule = kzalloc(ops->rule_size, GFP_KERNEL); if (rule == NULL) { err = -ENOMEM; goto errout; } rule->fr_net = hold_net(net); if (tb[FRA_PRIORITY]) rule->pref = nla_get_u32(tb[FRA_PRIORITY]); if (tb[FRA_IIFNAME]) { struct net_device *dev; rule->iifindex = -1; nla_strlcpy(rule->iifname, tb[FRA_IIFNAME], IFNAMSIZ); dev = __dev_get_by_name(net, rule->iifname); if (dev) rule->iifindex = dev->ifindex; } if (tb[FRA_OIFNAME]) { struct net_device *dev; rule->oifindex = -1; nla_strlcpy(rule->oifname, tb[FRA_OIFNAME], IFNAMSIZ); dev = __dev_get_by_name(net, rule->oifname); if (dev) rule->oifindex = dev->ifindex; } if (tb[FRA_FWMARK]) { rule->mark = nla_get_u32(tb[FRA_FWMARK]); if (rule->mark) /* compatibility: if the mark value is non-zero all bits * are compared unless a mask is explicitly specified. */ rule->mark_mask = 0xFFFFFFFF; } if (tb[FRA_FWMASK]) rule->mark_mask = nla_get_u32(tb[FRA_FWMASK]); rule->action = frh->action; rule->flags = frh->flags; rule->table = frh_get_table(frh, tb); if (!tb[FRA_PRIORITY] && ops->default_pref) rule->pref = ops->default_pref(ops); err = -EINVAL; if (tb[FRA_GOTO]) { if (rule->action != FR_ACT_GOTO) goto errout_free; rule->target = nla_get_u32(tb[FRA_GOTO]); /* Backward jumps are prohibited to avoid endless loops */ if (rule->target <= rule->pref) goto errout_free; list_for_each_entry(r, &ops->rules_list, list) { if (r->pref == rule->target) { RCU_INIT_POINTER(rule->ctarget, r); break; } } if (rcu_dereference_protected(rule->ctarget, 1) == NULL) unresolved = 1; } else if (rule->action == FR_ACT_GOTO) goto errout_free; /* UID start and end must either both be valid or both unspecified. */ rule->uid_start = rule->uid_end = INVALID_UID; if (tb[FRA_UID_START] || tb[FRA_UID_END]) { if (tb[FRA_UID_START] && tb[FRA_UID_END]) { rule->uid_start = fib_nl_uid(tb[FRA_UID_START]); rule->uid_end = fib_nl_uid(tb[FRA_UID_END]); } if (!uid_valid(rule->uid_start) || !uid_valid(rule->uid_end) || !uid_lte(rule->uid_start, rule->uid_end)) goto errout_free; } err = ops->configure(rule, skb, frh, tb); if (err < 0) goto errout_free; list_for_each_entry(r, &ops->rules_list, list) { if (r->pref > rule->pref) break; last = r; } fib_rule_get(rule); if (last) list_add_rcu(&rule->list, &last->list); else list_add_rcu(&rule->list, &ops->rules_list); if (ops->unresolved_rules) { /* * There are unresolved goto rules in the list, check if * any of them are pointing to this new rule. */ list_for_each_entry(r, &ops->rules_list, list) { if (r->action == FR_ACT_GOTO && r->target == rule->pref && rtnl_dereference(r->ctarget) == NULL) { rcu_assign_pointer(r->ctarget, rule); if (--ops->unresolved_rules == 0) break; } } } if (rule->action == FR_ACT_GOTO) ops->nr_goto_rules++; if (unresolved) ops->unresolved_rules++; notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).pid); flush_route_cache(ops); rules_ops_put(ops); return 0; errout_free: release_net(rule->fr_net); kfree(rule); errout: rules_ops_put(ops); return err; } static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) { struct net *net = sock_net(skb->sk); struct fib_rule_hdr *frh = nlmsg_data(nlh); struct fib_rules_ops *ops = NULL; struct fib_rule *rule, *tmp; struct nlattr *tb[FRA_MAX+1]; int err = -EINVAL; if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh))) goto errout; ops = lookup_rules_ops(net, frh->family); if (ops == NULL) { err = -EAFNOSUPPORT; goto errout; } err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy); if (err < 0) goto errout; err = validate_rulemsg(frh, tb, ops); if (err < 0) goto errout; list_for_each_entry(rule, &ops->rules_list, list) { if (frh->action && (frh->action != rule->action)) continue; if (frh_get_table(frh, tb) && (frh_get_table(frh, tb) != rule->table)) continue; if (tb[FRA_PRIORITY] && (rule->pref != nla_get_u32(tb[FRA_PRIORITY]))) continue; if (tb[FRA_IIFNAME] && nla_strcmp(tb[FRA_IIFNAME], rule->iifname)) continue; if (tb[FRA_OIFNAME] && nla_strcmp(tb[FRA_OIFNAME], rule->oifname)) continue; if (tb[FRA_FWMARK] && (rule->mark != nla_get_u32(tb[FRA_FWMARK]))) continue; if (tb[FRA_FWMASK] && (rule->mark_mask != nla_get_u32(tb[FRA_FWMASK]))) continue; if (tb[FRA_UID_START] && !uid_eq(rule->uid_start, fib_nl_uid(tb[FRA_UID_START]))) continue; if (tb[FRA_UID_END] && !uid_eq(rule->uid_end, fib_nl_uid(tb[FRA_UID_END]))) continue; if (!ops->compare(rule, frh, tb)) continue; if (rule->flags & FIB_RULE_PERMANENT) { err = -EPERM; goto errout; } list_del_rcu(&rule->list); if (rule->action == FR_ACT_GOTO) { ops->nr_goto_rules--; if (rtnl_dereference(rule->ctarget) == NULL) ops->unresolved_rules--; } /* * Check if this rule is a target to any of them. If so, * disable them. As this operation is eventually very * expensive, it is only performed if goto rules have * actually been added. */ if (ops->nr_goto_rules > 0) { list_for_each_entry(tmp, &ops->rules_list, list) { if (rtnl_dereference(tmp->ctarget) == rule) { RCU_INIT_POINTER(tmp->ctarget, NULL); ops->unresolved_rules++; } } } notify_rule_change(RTM_DELRULE, rule, ops, nlh, NETLINK_CB(skb).pid); fib_rule_put(rule); flush_route_cache(ops); rules_ops_put(ops); return 0; } err = -ENOENT; errout: rules_ops_put(ops); return err; } static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops, struct fib_rule *rule) { size_t payload = NLMSG_ALIGN(sizeof(struct fib_rule_hdr)) + nla_total_size(IFNAMSIZ) /* FRA_IIFNAME */ + nla_total_size(IFNAMSIZ) /* FRA_OIFNAME */ + nla_total_size(4) /* FRA_PRIORITY */ + nla_total_size(4) /* FRA_TABLE */ + nla_total_size(4) /* FRA_FWMARK */ + nla_total_size(4) /* FRA_FWMASK */ + nla_total_size(4) /* FRA_UID_START */ + nla_total_size(4); /* FRA_UID_END */ if (ops->nlmsg_payload) payload += ops->nlmsg_payload(rule); return payload; } static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule, u32 pid, u32 seq, int type, int flags, struct fib_rules_ops *ops) { struct nlmsghdr *nlh; struct fib_rule_hdr *frh; nlh = nlmsg_put(skb, pid, seq, type, sizeof(*frh), flags); if (nlh == NULL) return -EMSGSIZE; frh = nlmsg_data(nlh); frh->family = ops->family; frh->table = rule->table; NLA_PUT_U32(skb, FRA_TABLE, rule->table); frh->res1 = 0; frh->res2 = 0; frh->action = rule->action; frh->flags = rule->flags; if (rule->action == FR_ACT_GOTO && rcu_access_pointer(rule->ctarget) == NULL) frh->flags |= FIB_RULE_UNRESOLVED; if (rule->iifname[0]) { NLA_PUT_STRING(skb, FRA_IIFNAME, rule->iifname); if (rule->iifindex == -1) frh->flags |= FIB_RULE_IIF_DETACHED; } if (rule->oifname[0]) { NLA_PUT_STRING(skb, FRA_OIFNAME, rule->oifname); if (rule->oifindex == -1) frh->flags |= FIB_RULE_OIF_DETACHED; } if (rule->pref) NLA_PUT_U32(skb, FRA_PRIORITY, rule->pref); if (rule->mark) NLA_PUT_U32(skb, FRA_FWMARK, rule->mark); if (rule->mark_mask || rule->mark) NLA_PUT_U32(skb, FRA_FWMASK, rule->mark_mask); if (rule->target) NLA_PUT_U32(skb, FRA_GOTO, rule->target); if (uid_valid(rule->uid_start)) nla_put_uid(skb, FRA_UID_START, rule->uid_start); if (uid_valid(rule->uid_end)) nla_put_uid(skb, FRA_UID_END, rule->uid_end); if (ops->fill(rule, skb, frh) < 0) goto nla_put_failure; return nlmsg_end(skb, nlh); nla_put_failure: nlmsg_cancel(skb, nlh); return -EMSGSIZE; } static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb, struct fib_rules_ops *ops) { int idx = 0; struct fib_rule *rule; rcu_read_lock(); list_for_each_entry_rcu(rule, &ops->rules_list, list) { if (idx < cb->args[1]) goto skip; if (fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, RTM_NEWRULE, NLM_F_MULTI, ops) < 0) break; skip: idx++; } rcu_read_unlock(); cb->args[1] = idx; rules_ops_put(ops); return skb->len; } static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb) { struct net *net = sock_net(skb->sk); struct fib_rules_ops *ops; int idx = 0, family; family = rtnl_msg_family(cb->nlh); if (family != AF_UNSPEC) { /* Protocol specific dump request */ ops = lookup_rules_ops(net, family); if (ops == NULL) return -EAFNOSUPPORT; return dump_rules(skb, cb, ops); } rcu_read_lock(); list_for_each_entry_rcu(ops, &net->rules_ops, list) { if (idx < cb->args[0] || !try_module_get(ops->owner)) goto skip; if (dump_rules(skb, cb, ops) < 0) break; cb->args[1] = 0; skip: idx++; } rcu_read_unlock(); cb->args[0] = idx; return skb->len; } static void notify_rule_change(int event, struct fib_rule *rule, struct fib_rules_ops *ops, struct nlmsghdr *nlh, u32 pid) { struct net *net; struct sk_buff *skb; int err = -ENOBUFS; net = ops->fro_net; skb = nlmsg_new(fib_rule_nlmsg_size(ops, rule), GFP_KERNEL); if (skb == NULL) goto errout; err = fib_nl_fill_rule(skb, rule, pid, nlh->nlmsg_seq, event, 0, ops); if (err < 0) { /* -EMSGSIZE implies BUG in fib_rule_nlmsg_size() */ WARN_ON(err == -EMSGSIZE); kfree_skb(skb); goto errout; } rtnl_notify(skb, net, pid, ops->nlgroup, nlh, GFP_KERNEL); return; errout: if (err < 0) rtnl_set_sk_err(net, ops->nlgroup, err); } static void attach_rules(struct list_head *rules, struct net_device *dev) { struct fib_rule *rule; list_for_each_entry(rule, rules, list) { if (rule->iifindex == -1 && strcmp(dev->name, rule->iifname) == 0) rule->iifindex = dev->ifindex; if (rule->oifindex == -1 && strcmp(dev->name, rule->oifname) == 0) rule->oifindex = dev->ifindex; } } static void detach_rules(struct list_head *rules, struct net_device *dev) { struct fib_rule *rule; list_for_each_entry(rule, rules, list) { if (rule->iifindex == dev->ifindex) rule->iifindex = -1; if (rule->oifindex == dev->ifindex) rule->oifindex = -1; } } static int fib_rules_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = ptr; struct net *net = dev_net(dev); struct fib_rules_ops *ops; ASSERT_RTNL(); switch (event) { case NETDEV_REGISTER: list_for_each_entry(ops, &net->rules_ops, list) attach_rules(&ops->rules_list, dev); break; case NETDEV_CHANGENAME: list_for_each_entry(ops, &net->rules_ops, list) { detach_rules(&ops->rules_list, dev); attach_rules(&ops->rules_list, dev); } break; case NETDEV_UNREGISTER: list_for_each_entry(ops, &net->rules_ops, list) detach_rules(&ops->rules_list, dev); break; } return NOTIFY_DONE; } static struct notifier_block fib_rules_notifier = { .notifier_call = fib_rules_event, }; static int __net_init fib_rules_net_init(struct net *net) { INIT_LIST_HEAD(&net->rules_ops); spin_lock_init(&net->rules_mod_lock); return 0; } static struct pernet_operations fib_rules_net_ops = { .init = fib_rules_net_init, }; static int __init fib_rules_init(void) { int err; rtnl_register(PF_UNSPEC, RTM_NEWRULE, fib_nl_newrule, NULL, NULL); rtnl_register(PF_UNSPEC, RTM_DELRULE, fib_nl_delrule, NULL, NULL); rtnl_register(PF_UNSPEC, RTM_GETRULE, NULL, fib_nl_dumprule, NULL); err = register_pernet_subsys(&fib_rules_net_ops); if (err < 0) goto fail; err = register_netdevice_notifier(&fib_rules_notifier); if (err < 0) goto fail_unregister; return 0; fail_unregister: unregister_pernet_subsys(&fib_rules_net_ops); fail: rtnl_unregister(PF_UNSPEC, RTM_NEWRULE); rtnl_unregister(PF_UNSPEC, RTM_DELRULE); rtnl_unregister(PF_UNSPEC, RTM_GETRULE); return err; } subsys_initcall(fib_rules_init);
gpl-2.0
GolovanovSrg/au-linux-kernel-spring-2016
linux/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
795
22237
/* * Copyright (C) 2013 Red Hat * Author: Rob Clark <robdclark@gmail.com> * * Copyright (c) 2014 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ #ifdef CONFIG_MSM_OCMEM # include <mach/ocmem.h> #endif #include "a3xx_gpu.h" #define A3XX_INT0_MASK \ (A3XX_INT0_RBBM_AHB_ERROR | \ A3XX_INT0_RBBM_ATB_BUS_OVERFLOW | \ A3XX_INT0_CP_T0_PACKET_IN_IB | \ A3XX_INT0_CP_OPCODE_ERROR | \ A3XX_INT0_CP_RESERVED_BIT_ERROR | \ A3XX_INT0_CP_HW_FAULT | \ A3XX_INT0_CP_IB1_INT | \ A3XX_INT0_CP_IB2_INT | \ A3XX_INT0_CP_RB_INT | \ A3XX_INT0_CP_REG_PROTECT_FAULT | \ A3XX_INT0_CP_AHB_ERROR_HALT | \ A3XX_INT0_UCHE_OOB_ACCESS) extern bool hang_debug; static void a3xx_dump(struct msm_gpu *gpu); static void a3xx_me_init(struct msm_gpu *gpu) { struct msm_ringbuffer *ring = gpu->rb; OUT_PKT3(ring, CP_ME_INIT, 17); OUT_RING(ring, 0x000003f7); OUT_RING(ring, 0x00000000); OUT_RING(ring, 0x00000000); OUT_RING(ring, 0x00000000); OUT_RING(ring, 0x00000080); OUT_RING(ring, 0x00000100); OUT_RING(ring, 0x00000180); OUT_RING(ring, 0x00006600); OUT_RING(ring, 0x00000150); OUT_RING(ring, 0x0000014e); OUT_RING(ring, 0x00000154); OUT_RING(ring, 0x00000001); OUT_RING(ring, 0x00000000); OUT_RING(ring, 0x00000000); OUT_RING(ring, 0x00000000); OUT_RING(ring, 0x00000000); OUT_RING(ring, 0x00000000); gpu->funcs->flush(gpu); gpu->funcs->idle(gpu); } static int a3xx_hw_init(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a3xx_gpu *a3xx_gpu = to_a3xx_gpu(adreno_gpu); uint32_t *ptr, len; int i, ret; DBG("%s", gpu->name); if (adreno_is_a305(adreno_gpu)) { /* Set up 16 deep read/write request queues: */ gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010); gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF1, 0x10101010); gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x10101010); gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x10101010); gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303); gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF0, 0x10101010); gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF1, 0x10101010); /* Enable WR-REQ: */ gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x0000ff); /* Set up round robin arbitration between both AXI ports: */ gpu_write(gpu, REG_A3XX_VBIF_ARB_CTL, 0x00000030); /* Set up AOOO: */ gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003c); gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003c003c); } else if (adreno_is_a306(adreno_gpu)) { gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0003); gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x0000000a); gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x0000000a); } else if (adreno_is_a320(adreno_gpu)) { /* Set up 16 deep read/write request queues: */ gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010); gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF1, 0x10101010); gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x10101010); gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x10101010); gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303); gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF0, 0x10101010); gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF1, 0x10101010); /* Enable WR-REQ: */ gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x0000ff); /* Set up round robin arbitration between both AXI ports: */ gpu_write(gpu, REG_A3XX_VBIF_ARB_CTL, 0x00000030); /* Set up AOOO: */ gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003c); gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003c003c); /* Enable 1K sort: */ gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x000000ff); gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4); } else if (adreno_is_a330v2(adreno_gpu)) { /* * Most of the VBIF registers on 8974v2 have the correct * values at power on, so we won't modify those if we don't * need to */ /* Enable 1k sort: */ gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x0001003f); gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4); /* Enable WR-REQ: */ gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x00003f); gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303); /* Set up VBIF_ROUND_ROBIN_QOS_ARB: */ gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0003); } else if (adreno_is_a330(adreno_gpu)) { /* Set up 16 deep read/write request queues: */ gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x18181818); gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF1, 0x18181818); gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x18181818); gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x18181818); gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303); gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF0, 0x18181818); gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF1, 0x18181818); /* Enable WR-REQ: */ gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x00003f); /* Set up round robin arbitration between both AXI ports: */ gpu_write(gpu, REG_A3XX_VBIF_ARB_CTL, 0x00000030); /* Set up VBIF_ROUND_ROBIN_QOS_ARB: */ gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0001); /* Set up AOOO: */ gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003f); gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003f003f); /* Enable 1K sort: */ gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x0001003f); gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4); /* Disable VBIF clock gating. This is to enable AXI running * higher frequency than GPU: */ gpu_write(gpu, REG_A3XX_VBIF_CLKON, 0x00000001); } else { BUG(); } /* Make all blocks contribute to the GPU BUSY perf counter: */ gpu_write(gpu, REG_A3XX_RBBM_GPU_BUSY_MASKED, 0xffffffff); /* Tune the hystersis counters for SP and CP idle detection: */ gpu_write(gpu, REG_A3XX_RBBM_SP_HYST_CNT, 0x10); gpu_write(gpu, REG_A3XX_RBBM_WAIT_IDLE_CLOCKS_CTL, 0x10); /* Enable the RBBM error reporting bits. This lets us get * useful information on failure: */ gpu_write(gpu, REG_A3XX_RBBM_AHB_CTL0, 0x00000001); /* Enable AHB error reporting: */ gpu_write(gpu, REG_A3XX_RBBM_AHB_CTL1, 0xa6ffffff); /* Turn on the power counters: */ gpu_write(gpu, REG_A3XX_RBBM_RBBM_CTL, 0x00030000); /* Turn on hang detection - this spews a lot of useful information * into the RBBM registers on a hang: */ gpu_write(gpu, REG_A3XX_RBBM_INTERFACE_HANG_INT_CTL, 0x00010fff); /* Enable 64-byte cacheline size. HW Default is 32-byte (0x000000E0): */ gpu_write(gpu, REG_A3XX_UCHE_CACHE_MODE_CONTROL_REG, 0x00000001); /* Enable Clock gating: */ if (adreno_is_a306(adreno_gpu)) gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xaaaaaaaa); else if (adreno_is_a320(adreno_gpu)) gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbfffffff); else if (adreno_is_a330v2(adreno_gpu)) gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xaaaaaaaa); else if (adreno_is_a330(adreno_gpu)) gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbffcffff); if (adreno_is_a330v2(adreno_gpu)) gpu_write(gpu, REG_A3XX_RBBM_GPR0_CTL, 0x05515455); else if (adreno_is_a330(adreno_gpu)) gpu_write(gpu, REG_A3XX_RBBM_GPR0_CTL, 0x00000000); /* Set the OCMEM base address for A330, etc */ if (a3xx_gpu->ocmem_hdl) { gpu_write(gpu, REG_A3XX_RB_GMEM_BASE_ADDR, (unsigned int)(a3xx_gpu->ocmem_base >> 14)); } /* Turn on performance counters: */ gpu_write(gpu, REG_A3XX_RBBM_PERFCTR_CTL, 0x01); /* Enable the perfcntrs that we use.. */ for (i = 0; i < gpu->num_perfcntrs; i++) { const struct msm_gpu_perfcntr *perfcntr = &gpu->perfcntrs[i]; gpu_write(gpu, perfcntr->select_reg, perfcntr->select_val); } gpu_write(gpu, REG_A3XX_RBBM_INT_0_MASK, A3XX_INT0_MASK); ret = adreno_hw_init(gpu); if (ret) return ret; /* setup access protection: */ gpu_write(gpu, REG_A3XX_CP_PROTECT_CTRL, 0x00000007); /* RBBM registers */ gpu_write(gpu, REG_A3XX_CP_PROTECT(0), 0x63000040); gpu_write(gpu, REG_A3XX_CP_PROTECT(1), 0x62000080); gpu_write(gpu, REG_A3XX_CP_PROTECT(2), 0x600000cc); gpu_write(gpu, REG_A3XX_CP_PROTECT(3), 0x60000108); gpu_write(gpu, REG_A3XX_CP_PROTECT(4), 0x64000140); gpu_write(gpu, REG_A3XX_CP_PROTECT(5), 0x66000400); /* CP registers */ gpu_write(gpu, REG_A3XX_CP_PROTECT(6), 0x65000700); gpu_write(gpu, REG_A3XX_CP_PROTECT(7), 0x610007d8); gpu_write(gpu, REG_A3XX_CP_PROTECT(8), 0x620007e0); gpu_write(gpu, REG_A3XX_CP_PROTECT(9), 0x61001178); gpu_write(gpu, REG_A3XX_CP_PROTECT(10), 0x64001180); /* RB registers */ gpu_write(gpu, REG_A3XX_CP_PROTECT(11), 0x60003300); /* VBIF registers */ gpu_write(gpu, REG_A3XX_CP_PROTECT(12), 0x6b00c000); /* NOTE: PM4/micro-engine firmware registers look to be the same * for a2xx and a3xx.. we could possibly push that part down to * adreno_gpu base class. Or push both PM4 and PFP but * parameterize the pfp ucode addr/data registers.. */ /* Load PM4: */ ptr = (uint32_t *)(adreno_gpu->pm4->data); len = adreno_gpu->pm4->size / 4; DBG("loading PM4 ucode version: %x", ptr[1]); gpu_write(gpu, REG_AXXX_CP_DEBUG, AXXX_CP_DEBUG_DYNAMIC_CLK_DISABLE | AXXX_CP_DEBUG_MIU_128BIT_WRITE_ENABLE); gpu_write(gpu, REG_AXXX_CP_ME_RAM_WADDR, 0); for (i = 1; i < len; i++) gpu_write(gpu, REG_AXXX_CP_ME_RAM_DATA, ptr[i]); /* Load PFP: */ ptr = (uint32_t *)(adreno_gpu->pfp->data); len = adreno_gpu->pfp->size / 4; DBG("loading PFP ucode version: %x", ptr[5]); gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_ADDR, 0); for (i = 1; i < len; i++) gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_DATA, ptr[i]); /* CP ROQ queue sizes (bytes) - RB:16, ST:16, IB1:32, IB2:64 */ if (adreno_is_a305(adreno_gpu) || adreno_is_a306(adreno_gpu) || adreno_is_a320(adreno_gpu)) { gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS, AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START(2) | AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START(6) | AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START(14)); } else if (adreno_is_a330(adreno_gpu)) { /* NOTE: this (value take from downstream android driver) * includes some bits outside of the known bitfields. But * A330 has this "MERCIU queue" thing too, which might * explain a new bitfield or reshuffling: */ gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS, 0x003e2008); } /* clear ME_HALT to start micro engine */ gpu_write(gpu, REG_AXXX_CP_ME_CNTL, 0); a3xx_me_init(gpu); return 0; } static void a3xx_recover(struct msm_gpu *gpu) { adreno_dump_info(gpu); /* dump registers before resetting gpu, if enabled: */ if (hang_debug) a3xx_dump(gpu); gpu_write(gpu, REG_A3XX_RBBM_SW_RESET_CMD, 1); gpu_read(gpu, REG_A3XX_RBBM_SW_RESET_CMD); gpu_write(gpu, REG_A3XX_RBBM_SW_RESET_CMD, 0); adreno_recover(gpu); } static void a3xx_destroy(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a3xx_gpu *a3xx_gpu = to_a3xx_gpu(adreno_gpu); DBG("%s", gpu->name); adreno_gpu_cleanup(adreno_gpu); #ifdef CONFIG_MSM_OCMEM if (a3xx_gpu->ocmem_base) ocmem_free(OCMEM_GRAPHICS, a3xx_gpu->ocmem_hdl); #endif kfree(a3xx_gpu); } static void a3xx_idle(struct msm_gpu *gpu) { /* wait for ringbuffer to drain: */ adreno_idle(gpu); /* then wait for GPU to finish: */ if (spin_until(!(gpu_read(gpu, REG_A3XX_RBBM_STATUS) & A3XX_RBBM_STATUS_GPU_BUSY))) DRM_ERROR("%s: timeout waiting for GPU to idle!\n", gpu->name); /* TODO maybe we need to reset GPU here to recover from hang? */ } static irqreturn_t a3xx_irq(struct msm_gpu *gpu) { uint32_t status; status = gpu_read(gpu, REG_A3XX_RBBM_INT_0_STATUS); DBG("%s: %08x", gpu->name, status); // TODO gpu_write(gpu, REG_A3XX_RBBM_INT_CLEAR_CMD, status); msm_gpu_retire(gpu); return IRQ_HANDLED; } static const unsigned int a3xx_registers[] = { 0x0000, 0x0002, 0x0010, 0x0012, 0x0018, 0x0018, 0x0020, 0x0027, 0x0029, 0x002b, 0x002e, 0x0033, 0x0040, 0x0042, 0x0050, 0x005c, 0x0060, 0x006c, 0x0080, 0x0082, 0x0084, 0x0088, 0x0090, 0x00e5, 0x00ea, 0x00ed, 0x0100, 0x0100, 0x0110, 0x0123, 0x01c0, 0x01c1, 0x01c3, 0x01c5, 0x01c7, 0x01c7, 0x01d5, 0x01d9, 0x01dc, 0x01dd, 0x01ea, 0x01ea, 0x01ee, 0x01f1, 0x01f5, 0x01f5, 0x01fc, 0x01ff, 0x0440, 0x0440, 0x0443, 0x0443, 0x0445, 0x0445, 0x044d, 0x044f, 0x0452, 0x0452, 0x0454, 0x046f, 0x047c, 0x047c, 0x047f, 0x047f, 0x0578, 0x057f, 0x0600, 0x0602, 0x0605, 0x0607, 0x060a, 0x060e, 0x0612, 0x0614, 0x0c01, 0x0c02, 0x0c06, 0x0c1d, 0x0c3d, 0x0c3f, 0x0c48, 0x0c4b, 0x0c80, 0x0c80, 0x0c88, 0x0c8b, 0x0ca0, 0x0cb7, 0x0cc0, 0x0cc1, 0x0cc6, 0x0cc7, 0x0ce4, 0x0ce5, 0x0e00, 0x0e05, 0x0e0c, 0x0e0c, 0x0e22, 0x0e23, 0x0e41, 0x0e45, 0x0e64, 0x0e65, 0x0e80, 0x0e82, 0x0e84, 0x0e89, 0x0ea0, 0x0ea1, 0x0ea4, 0x0ea7, 0x0ec4, 0x0ecb, 0x0ee0, 0x0ee0, 0x0f00, 0x0f01, 0x0f03, 0x0f09, 0x2040, 0x2040, 0x2044, 0x2044, 0x2048, 0x204d, 0x2068, 0x2069, 0x206c, 0x206d, 0x2070, 0x2070, 0x2072, 0x2072, 0x2074, 0x2075, 0x2079, 0x207a, 0x20c0, 0x20d3, 0x20e4, 0x20ef, 0x2100, 0x2109, 0x210c, 0x210c, 0x210e, 0x210e, 0x2110, 0x2111, 0x2114, 0x2115, 0x21e4, 0x21e4, 0x21ea, 0x21ea, 0x21ec, 0x21ed, 0x21f0, 0x21f0, 0x2200, 0x2212, 0x2214, 0x2217, 0x221a, 0x221a, 0x2240, 0x227e, 0x2280, 0x228b, 0x22c0, 0x22c0, 0x22c4, 0x22ce, 0x22d0, 0x22d8, 0x22df, 0x22e6, 0x22e8, 0x22e9, 0x22ec, 0x22ec, 0x22f0, 0x22f7, 0x22ff, 0x22ff, 0x2340, 0x2343, 0x2348, 0x2349, 0x2350, 0x2356, 0x2360, 0x2360, 0x2440, 0x2440, 0x2444, 0x2444, 0x2448, 0x244d, 0x2468, 0x2469, 0x246c, 0x246d, 0x2470, 0x2470, 0x2472, 0x2472, 0x2474, 0x2475, 0x2479, 0x247a, 0x24c0, 0x24d3, 0x24e4, 0x24ef, 0x2500, 0x2509, 0x250c, 0x250c, 0x250e, 0x250e, 0x2510, 0x2511, 0x2514, 0x2515, 0x25e4, 0x25e4, 0x25ea, 0x25ea, 0x25ec, 0x25ed, 0x25f0, 0x25f0, 0x2600, 0x2612, 0x2614, 0x2617, 0x261a, 0x261a, 0x2640, 0x267e, 0x2680, 0x268b, 0x26c0, 0x26c0, 0x26c4, 0x26ce, 0x26d0, 0x26d8, 0x26df, 0x26e6, 0x26e8, 0x26e9, 0x26ec, 0x26ec, 0x26f0, 0x26f7, 0x26ff, 0x26ff, 0x2740, 0x2743, 0x2748, 0x2749, 0x2750, 0x2756, 0x2760, 0x2760, 0x300c, 0x300e, 0x301c, 0x301d, 0x302a, 0x302a, 0x302c, 0x302d, 0x3030, 0x3031, 0x3034, 0x3036, 0x303c, 0x303c, 0x305e, 0x305f, ~0 /* sentinel */ }; #ifdef CONFIG_DEBUG_FS static void a3xx_show(struct msm_gpu *gpu, struct seq_file *m) { gpu->funcs->pm_resume(gpu); seq_printf(m, "status: %08x\n", gpu_read(gpu, REG_A3XX_RBBM_STATUS)); gpu->funcs->pm_suspend(gpu); adreno_show(gpu, m); } #endif /* would be nice to not have to duplicate the _show() stuff with printk(): */ static void a3xx_dump(struct msm_gpu *gpu) { printk("status: %08x\n", gpu_read(gpu, REG_A3XX_RBBM_STATUS)); adreno_dump(gpu); } /* Register offset defines for A3XX */ static const unsigned int a3xx_register_offsets[REG_ADRENO_REGISTER_MAX] = { REG_ADRENO_DEFINE(REG_ADRENO_CP_DEBUG, REG_AXXX_CP_DEBUG), REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_WADDR, REG_AXXX_CP_ME_RAM_WADDR), REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_DATA, REG_AXXX_CP_ME_RAM_DATA), REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_DATA, REG_A3XX_CP_PFP_UCODE_DATA), REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_ADDR, REG_A3XX_CP_PFP_UCODE_ADDR), REG_ADRENO_DEFINE(REG_ADRENO_CP_WFI_PEND_CTR, REG_A3XX_CP_WFI_PEND_CTR), REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_AXXX_CP_RB_BASE), REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_AXXX_CP_RB_RPTR_ADDR), REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_AXXX_CP_RB_RPTR), REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_AXXX_CP_RB_WPTR), REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_CTRL, REG_A3XX_CP_PROTECT_CTRL), REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_CNTL, REG_AXXX_CP_ME_CNTL), REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_AXXX_CP_RB_CNTL), REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BASE, REG_AXXX_CP_IB1_BASE), REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BUFSZ, REG_AXXX_CP_IB1_BUFSZ), REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BASE, REG_AXXX_CP_IB2_BASE), REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BUFSZ, REG_AXXX_CP_IB2_BUFSZ), REG_ADRENO_DEFINE(REG_ADRENO_CP_TIMESTAMP, REG_AXXX_CP_SCRATCH_REG0), REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_RADDR, REG_AXXX_CP_ME_RAM_RADDR), REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_ADDR, REG_AXXX_SCRATCH_ADDR), REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_UMSK, REG_AXXX_SCRATCH_UMSK), REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_ADDR, REG_A3XX_CP_ROQ_ADDR), REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_DATA, REG_A3XX_CP_ROQ_DATA), REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_ADDR, REG_A3XX_CP_MERCIU_ADDR), REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA, REG_A3XX_CP_MERCIU_DATA), REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA2, REG_A3XX_CP_MERCIU_DATA2), REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_ADDR, REG_A3XX_CP_MEQ_ADDR), REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_DATA, REG_A3XX_CP_MEQ_DATA), REG_ADRENO_DEFINE(REG_ADRENO_CP_HW_FAULT, REG_A3XX_CP_HW_FAULT), REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_STATUS, REG_A3XX_CP_PROTECT_STATUS), REG_ADRENO_DEFINE(REG_ADRENO_RBBM_STATUS, REG_A3XX_RBBM_STATUS), REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_CTL, REG_A3XX_RBBM_PERFCTR_CTL), REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD0, REG_A3XX_RBBM_PERFCTR_LOAD_CMD0), REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD1, REG_A3XX_RBBM_PERFCTR_LOAD_CMD1), REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_PWR_1_LO, REG_A3XX_RBBM_PERFCTR_PWR_1_LO), REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_MASK, REG_A3XX_RBBM_INT_0_MASK), REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_STATUS, REG_A3XX_RBBM_INT_0_STATUS), REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_ERROR_STATUS, REG_A3XX_RBBM_AHB_ERROR_STATUS), REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_CMD, REG_A3XX_RBBM_AHB_CMD), REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_CLEAR_CMD, REG_A3XX_RBBM_INT_CLEAR_CMD), REG_ADRENO_DEFINE(REG_ADRENO_RBBM_CLOCK_CTL, REG_A3XX_RBBM_CLOCK_CTL), REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_SEL, REG_A3XX_VPC_VPC_DEBUG_RAM_SEL), REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_READ, REG_A3XX_VPC_VPC_DEBUG_RAM_READ), REG_ADRENO_DEFINE(REG_ADRENO_VSC_SIZE_ADDRESS, REG_A3XX_VSC_SIZE_ADDRESS), REG_ADRENO_DEFINE(REG_ADRENO_VFD_CONTROL_0, REG_A3XX_VFD_CONTROL_0), REG_ADRENO_DEFINE(REG_ADRENO_VFD_INDEX_MAX, REG_A3XX_VFD_INDEX_MAX), REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_PVT_MEM_ADDR_REG, REG_A3XX_SP_VS_PVT_MEM_ADDR_REG), REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_PVT_MEM_ADDR_REG, REG_A3XX_SP_FS_PVT_MEM_ADDR_REG), REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_OBJ_START_REG, REG_A3XX_SP_VS_OBJ_START_REG), REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_OBJ_START_REG, REG_A3XX_SP_FS_OBJ_START_REG), REG_ADRENO_DEFINE(REG_ADRENO_PA_SC_AA_CONFIG, REG_A3XX_PA_SC_AA_CONFIG), REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PM_OVERRIDE2, REG_A3XX_RBBM_PM_OVERRIDE2), REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_REG2, REG_AXXX_CP_SCRATCH_REG2), REG_ADRENO_DEFINE(REG_ADRENO_SQ_GPR_MANAGEMENT, REG_A3XX_SQ_GPR_MANAGEMENT), REG_ADRENO_DEFINE(REG_ADRENO_SQ_INST_STORE_MANAGMENT, REG_A3XX_SQ_INST_STORE_MANAGMENT), REG_ADRENO_DEFINE(REG_ADRENO_TP0_CHICKEN, REG_A3XX_TP0_CHICKEN), REG_ADRENO_DEFINE(REG_ADRENO_RBBM_RBBM_CTL, REG_A3XX_RBBM_RBBM_CTL), REG_ADRENO_DEFINE(REG_ADRENO_RBBM_SW_RESET_CMD, REG_A3XX_RBBM_SW_RESET_CMD), REG_ADRENO_DEFINE(REG_ADRENO_UCHE_INVALIDATE0, REG_A3XX_UCHE_CACHE_INVALIDATE0_REG), REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_LO, REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_LO), REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_HI, REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_HI), }; static const struct adreno_gpu_funcs funcs = { .base = { .get_param = adreno_get_param, .hw_init = a3xx_hw_init, .pm_suspend = msm_gpu_pm_suspend, .pm_resume = msm_gpu_pm_resume, .recover = a3xx_recover, .last_fence = adreno_last_fence, .submit = adreno_submit, .flush = adreno_flush, .idle = a3xx_idle, .irq = a3xx_irq, .destroy = a3xx_destroy, #ifdef CONFIG_DEBUG_FS .show = a3xx_show, #endif }, }; static const struct msm_gpu_perfcntr perfcntrs[] = { { REG_A3XX_SP_PERFCOUNTER6_SELECT, REG_A3XX_RBBM_PERFCTR_SP_6_LO, SP_ALU_ACTIVE_CYCLES, "ALUACTIVE" }, { REG_A3XX_SP_PERFCOUNTER7_SELECT, REG_A3XX_RBBM_PERFCTR_SP_7_LO, SP_FS_FULL_ALU_INSTRUCTIONS, "ALUFULL" }, }; struct msm_gpu *a3xx_gpu_init(struct drm_device *dev) { struct a3xx_gpu *a3xx_gpu = NULL; struct adreno_gpu *adreno_gpu; struct msm_gpu *gpu; struct msm_drm_private *priv = dev->dev_private; struct platform_device *pdev = priv->gpu_pdev; int ret; if (!pdev) { dev_err(dev->dev, "no a3xx device\n"); ret = -ENXIO; goto fail; } a3xx_gpu = kzalloc(sizeof(*a3xx_gpu), GFP_KERNEL); if (!a3xx_gpu) { ret = -ENOMEM; goto fail; } adreno_gpu = &a3xx_gpu->base; gpu = &adreno_gpu->base; a3xx_gpu->pdev = pdev; gpu->perfcntrs = perfcntrs; gpu->num_perfcntrs = ARRAY_SIZE(perfcntrs); adreno_gpu->registers = a3xx_registers; adreno_gpu->reg_offsets = a3xx_register_offsets; ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs); if (ret) goto fail; /* if needed, allocate gmem: */ if (adreno_is_a330(adreno_gpu)) { #ifdef CONFIG_MSM_OCMEM /* TODO this is different/missing upstream: */ struct ocmem_buf *ocmem_hdl = ocmem_allocate(OCMEM_GRAPHICS, adreno_gpu->gmem); a3xx_gpu->ocmem_hdl = ocmem_hdl; a3xx_gpu->ocmem_base = ocmem_hdl->addr; adreno_gpu->gmem = ocmem_hdl->len; DBG("using %dK of OCMEM at 0x%08x", adreno_gpu->gmem / 1024, a3xx_gpu->ocmem_base); #endif } if (!gpu->mmu) { /* TODO we think it is possible to configure the GPU to * restrict access to VRAM carveout. But the required * registers are unknown. For now just bail out and * limp along with just modesetting. If it turns out * to not be possible to restrict access, then we must * implement a cmdstream validator. */ dev_err(dev->dev, "No memory protection without IOMMU\n"); ret = -ENXIO; goto fail; } return gpu; fail: if (a3xx_gpu) a3xx_destroy(&a3xx_gpu->base.base); return ERR_PTR(ret); }
gpl-2.0
tenfar/pyramid-gb-kernel
drivers/mfd/wm8400-core.c
1051
13080
/* * Core driver for WM8400. * * Copyright 2008 Wolfson Microelectronics PLC. * * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * */ #include <linux/bug.h> #include <linux/i2c.h> #include <linux/kernel.h> #include <linux/mfd/core.h> #include <linux/mfd/wm8400-private.h> #include <linux/mfd/wm8400-audio.h> #include <linux/slab.h> static struct { u16 readable; /* Mask of readable bits */ u16 writable; /* Mask of writable bits */ u16 vol; /* Mask of volatile bits */ int is_codec; /* Register controlled by codec reset */ u16 default_val; /* Value on reset */ } reg_data[] = { { 0xFFFF, 0xFFFF, 0x0000, 0, 0x6172 }, /* R0 */ { 0x7000, 0x0000, 0x8000, 0, 0x0000 }, /* R1 */ { 0xFF17, 0xFF17, 0x0000, 0, 0x0000 }, /* R2 */ { 0xEBF3, 0xEBF3, 0x0000, 1, 0x6000 }, /* R3 */ { 0x3CF3, 0x3CF3, 0x0000, 1, 0x0000 }, /* R4 */ { 0xF1F8, 0xF1F8, 0x0000, 1, 0x4050 }, /* R5 */ { 0xFC1F, 0xFC1F, 0x0000, 1, 0x4000 }, /* R6 */ { 0xDFDE, 0xDFDE, 0x0000, 1, 0x01C8 }, /* R7 */ { 0xFCFC, 0xFCFC, 0x0000, 1, 0x0000 }, /* R8 */ { 0xEFFF, 0xEFFF, 0x0000, 1, 0x0040 }, /* R9 */ { 0xEFFF, 0xEFFF, 0x0000, 1, 0x0040 }, /* R10 */ { 0x27F7, 0x27F7, 0x0000, 1, 0x0004 }, /* R11 */ { 0x01FF, 0x01FF, 0x0000, 1, 0x00C0 }, /* R12 */ { 0x01FF, 0x01FF, 0x0000, 1, 0x00C0 }, /* R13 */ { 0x1FEF, 0x1FEF, 0x0000, 1, 0x0000 }, /* R14 */ { 0x0163, 0x0163, 0x0000, 1, 0x0100 }, /* R15 */ { 0x01FF, 0x01FF, 0x0000, 1, 0x00C0 }, /* R16 */ { 0x01FF, 0x01FF, 0x0000, 1, 0x00C0 }, /* R17 */ { 0x1FFF, 0x0FFF, 0x0000, 1, 0x0000 }, /* R18 */ { 0xFFFF, 0xFFFF, 0x0000, 1, 0x1000 }, /* R19 */ { 0xFFFF, 0xFFFF, 0x0000, 1, 0x1010 }, /* R20 */ { 0xFFFF, 0xFFFF, 0x0000, 1, 0x1010 }, /* R21 */ { 0x0FDD, 0x0FDD, 0x0000, 1, 0x8000 }, /* R22 */ { 0x1FFF, 0x1FFF, 0x0000, 1, 0x0800 }, /* R23 */ { 0x0000, 0x01DF, 0x0000, 1, 0x008B }, /* R24 */ { 0x0000, 0x01DF, 0x0000, 1, 0x008B }, /* R25 */ { 0x0000, 0x01DF, 0x0000, 1, 0x008B }, /* R26 */ { 0x0000, 0x01DF, 0x0000, 1, 0x008B }, /* R27 */ { 0x0000, 0x01FF, 0x0000, 1, 0x0000 }, /* R28 */ { 0x0000, 0x01FF, 0x0000, 1, 0x0000 }, /* R29 */ { 0x0000, 0x0077, 0x0000, 1, 0x0066 }, /* R30 */ { 0x0000, 0x0033, 0x0000, 1, 0x0022 }, /* R31 */ { 0x0000, 0x01FF, 0x0000, 1, 0x0079 }, /* R32 */ { 0x0000, 0x01FF, 0x0000, 1, 0x0079 }, /* R33 */ { 0x0000, 0x0003, 0x0000, 1, 0x0003 }, /* R34 */ { 0x0000, 0x01FF, 0x0000, 1, 0x0003 }, /* R35 */ { 0x0000, 0x0000, 0x0000, 0, 0x0000 }, /* R36 */ { 0x0000, 0x003F, 0x0000, 1, 0x0100 }, /* R37 */ { 0x0000, 0x0000, 0x0000, 0, 0x0000 }, /* R38 */ { 0x0000, 0x000F, 0x0000, 0, 0x0000 }, /* R39 */ { 0x0000, 0x00FF, 0x0000, 1, 0x0000 }, /* R40 */ { 0x0000, 0x01B7, 0x0000, 1, 0x0000 }, /* R41 */ { 0x0000, 0x01B7, 0x0000, 1, 0x0000 }, /* R42 */ { 0x0000, 0x01FF, 0x0000, 1, 0x0000 }, /* R43 */ { 0x0000, 0x01FF, 0x0000, 1, 0x0000 }, /* R44 */ { 0x0000, 0x00FD, 0x0000, 1, 0x0000 }, /* R45 */ { 0x0000, 0x00FD, 0x0000, 1, 0x0000 }, /* R46 */ { 0x0000, 0x01FF, 0x0000, 1, 0x0000 }, /* R47 */ { 0x0000, 0x01FF, 0x0000, 1, 0x0000 }, /* R48 */ { 0x0000, 0x01FF, 0x0000, 1, 0x0000 }, /* R49 */ { 0x0000, 0x01FF, 0x0000, 1, 0x0000 }, /* R50 */ { 0x0000, 0x01B3, 0x0000, 1, 0x0180 }, /* R51 */ { 0x0000, 0x0077, 0x0000, 1, 0x0000 }, /* R52 */ { 0x0000, 0x0077, 0x0000, 1, 0x0000 }, /* R53 */ { 0x0000, 0x00FF, 0x0000, 1, 0x0000 }, /* R54 */ { 0x0000, 0x0001, 0x0000, 1, 0x0000 }, /* R55 */ { 0x0000, 0x003F, 0x0000, 1, 0x0000 }, /* R56 */ { 0x0000, 0x004F, 0x0000, 1, 0x0000 }, /* R57 */ { 0x0000, 0x00FD, 0x0000, 1, 0x0000 }, /* R58 */ { 0x0000, 0x0000, 0x0000, 0, 0x0000 }, /* R59 */ { 0x1FFF, 0x1FFF, 0x0000, 1, 0x0000 }, /* R60 */ { 0xFFFF, 0xFFFF, 0x0000, 1, 0x0000 }, /* R61 */ { 0x03FF, 0x03FF, 0x0000, 1, 0x0000 }, /* R62 */ { 0x007F, 0x007F, 0x0000, 1, 0x0000 }, /* R63 */ { 0x0000, 0x0000, 0x0000, 0, 0x0000 }, /* R64 */ { 0xDFFF, 0xDFFF, 0x0000, 0, 0x0000 }, /* R65 */ { 0xDFFF, 0xDFFF, 0x0000, 0, 0x0000 }, /* R66 */ { 0xDFFF, 0xDFFF, 0x0000, 0, 0x0000 }, /* R67 */ { 0xDFFF, 0xDFFF, 0x0000, 0, 0x0000 }, /* R68 */ { 0x0000, 0x0000, 0x0000, 0, 0x0000 }, /* R69 */ { 0xFFFF, 0xFFFF, 0x0000, 0, 0x4400 }, /* R70 */ { 0x23FF, 0x23FF, 0x0000, 0, 0x0000 }, /* R71 */ { 0xFFFF, 0xFFFF, 0x0000, 0, 0x4400 }, /* R72 */ { 0x23FF, 0x23FF, 0x0000, 0, 0x0000 }, /* R73 */ { 0x0000, 0x0000, 0x0000, 0, 0x0000 }, /* R74 */ { 0x000E, 0x000E, 0x0000, 0, 0x0008 }, /* R75 */ { 0xE00F, 0xE00F, 0x0000, 0, 0x0000 }, /* R76 */ { 0x0000, 0x0000, 0x0000, 0, 0x0000 }, /* R77 */ { 0x03C0, 0x03C0, 0x0000, 0, 0x02C0 }, /* R78 */ { 0xFFFF, 0x0000, 0xffff, 0, 0x0000 }, /* R79 */ { 0xFFFF, 0xFFFF, 0x0000, 0, 0x0000 }, /* R80 */ { 0xFFFF, 0x0000, 0xffff, 0, 0x0000 }, /* R81 */ { 0x2BFF, 0x0000, 0xffff, 0, 0x0000 }, /* R82 */ { 0x0000, 0x0000, 0x0000, 0, 0x0000 }, /* R83 */ { 0x80FF, 0x80FF, 0x0000, 0, 0x00ff }, /* R84 */ }; static int wm8400_read(struct wm8400 *wm8400, u8 reg, int num_regs, u16 *dest) { int i, ret = 0; BUG_ON(reg + num_regs > ARRAY_SIZE(wm8400->reg_cache)); /* If there are any volatile reads then read back the entire block */ for (i = reg; i < reg + num_regs; i++) if (reg_data[i].vol) { ret = wm8400->read_dev(wm8400->io_data, reg, num_regs, dest); if (ret != 0) return ret; for (i = 0; i < num_regs; i++) dest[i] = be16_to_cpu(dest[i]); return 0; } /* Otherwise use the cache */ memcpy(dest, &wm8400->reg_cache[reg], num_regs * sizeof(u16)); return 0; } static int wm8400_write(struct wm8400 *wm8400, u8 reg, int num_regs, u16 *src) { int ret, i; BUG_ON(reg + num_regs > ARRAY_SIZE(wm8400->reg_cache)); for (i = 0; i < num_regs; i++) { BUG_ON(!reg_data[reg + i].writable); wm8400->reg_cache[reg + i] = src[i]; src[i] = cpu_to_be16(src[i]); } /* Do the actual I/O */ ret = wm8400->write_dev(wm8400->io_data, reg, num_regs, src); if (ret != 0) return -EIO; return 0; } /** * wm8400_reg_read - Single register read * * @wm8400: Pointer to wm8400 control structure * @reg: Register to read * * @return Read value */ u16 wm8400_reg_read(struct wm8400 *wm8400, u8 reg) { u16 val; mutex_lock(&wm8400->io_lock); wm8400_read(wm8400, reg, 1, &val); mutex_unlock(&wm8400->io_lock); return val; } EXPORT_SYMBOL_GPL(wm8400_reg_read); int wm8400_block_read(struct wm8400 *wm8400, u8 reg, int count, u16 *data) { int ret; mutex_lock(&wm8400->io_lock); ret = wm8400_read(wm8400, reg, count, data); mutex_unlock(&wm8400->io_lock); return ret; } EXPORT_SYMBOL_GPL(wm8400_block_read); /** * wm8400_set_bits - Bitmask write * * @wm8400: Pointer to wm8400 control structure * @reg: Register to access * @mask: Mask of bits to change * @val: Value to set for masked bits */ int wm8400_set_bits(struct wm8400 *wm8400, u8 reg, u16 mask, u16 val) { u16 tmp; int ret; mutex_lock(&wm8400->io_lock); ret = wm8400_read(wm8400, reg, 1, &tmp); tmp = (tmp & ~mask) | val; if (ret == 0) ret = wm8400_write(wm8400, reg, 1, &tmp); mutex_unlock(&wm8400->io_lock); return ret; } EXPORT_SYMBOL_GPL(wm8400_set_bits); /** * wm8400_reset_codec_reg_cache - Reset cached codec registers to * their default values. */ void wm8400_reset_codec_reg_cache(struct wm8400 *wm8400) { int i; mutex_lock(&wm8400->io_lock); /* Reset all codec registers to their initial value */ for (i = 0; i < ARRAY_SIZE(wm8400->reg_cache); i++) if (reg_data[i].is_codec) wm8400->reg_cache[i] = reg_data[i].default_val; mutex_unlock(&wm8400->io_lock); } EXPORT_SYMBOL_GPL(wm8400_reset_codec_reg_cache); static int wm8400_register_codec(struct wm8400 *wm8400) { struct mfd_cell cell = { .name = "wm8400-codec", .driver_data = wm8400, }; return mfd_add_devices(wm8400->dev, -1, &cell, 1, NULL, 0); } /* * wm8400_init - Generic initialisation * * The WM8400 can be configured as either an I2C or SPI device. Probe * functions for each bus set up the accessors then call into this to * set up the device itself. */ static int wm8400_init(struct wm8400 *wm8400, struct wm8400_platform_data *pdata) { u16 reg; int ret, i; mutex_init(&wm8400->io_lock); dev_set_drvdata(wm8400->dev, wm8400); /* Check that this is actually a WM8400 */ ret = wm8400->read_dev(wm8400->io_data, WM8400_RESET_ID, 1, &reg); if (ret != 0) { dev_err(wm8400->dev, "Chip ID register read failed\n"); return -EIO; } if (be16_to_cpu(reg) != reg_data[WM8400_RESET_ID].default_val) { dev_err(wm8400->dev, "Device is not a WM8400, ID is %x\n", be16_to_cpu(reg)); return -ENODEV; } /* We don't know what state the hardware is in and since this * is a PMIC we can't reset it safely so initialise the register * cache from the hardware. */ ret = wm8400->read_dev(wm8400->io_data, 0, ARRAY_SIZE(wm8400->reg_cache), wm8400->reg_cache); if (ret != 0) { dev_err(wm8400->dev, "Register cache read failed\n"); return -EIO; } for (i = 0; i < ARRAY_SIZE(wm8400->reg_cache); i++) wm8400->reg_cache[i] = be16_to_cpu(wm8400->reg_cache[i]); /* If the codec is in reset use hard coded values */ if (!(wm8400->reg_cache[WM8400_POWER_MANAGEMENT_1] & WM8400_CODEC_ENA)) for (i = 0; i < ARRAY_SIZE(wm8400->reg_cache); i++) if (reg_data[i].is_codec) wm8400->reg_cache[i] = reg_data[i].default_val; ret = wm8400_read(wm8400, WM8400_ID, 1, &reg); if (ret != 0) { dev_err(wm8400->dev, "ID register read failed: %d\n", ret); return ret; } reg = (reg & WM8400_CHIP_REV_MASK) >> WM8400_CHIP_REV_SHIFT; dev_info(wm8400->dev, "WM8400 revision %x\n", reg); ret = wm8400_register_codec(wm8400); if (ret != 0) { dev_err(wm8400->dev, "Failed to register codec\n"); goto err_children; } if (pdata && pdata->platform_init) { ret = pdata->platform_init(wm8400->dev); if (ret != 0) { dev_err(wm8400->dev, "Platform init failed: %d\n", ret); goto err_children; } } else dev_warn(wm8400->dev, "No platform initialisation supplied\n"); return 0; err_children: mfd_remove_devices(wm8400->dev); return ret; } static void wm8400_release(struct wm8400 *wm8400) { mfd_remove_devices(wm8400->dev); } #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) static int wm8400_i2c_read(void *io_data, char reg, int count, u16 *dest) { struct i2c_client *i2c = io_data; struct i2c_msg xfer[2]; int ret; /* Write register */ xfer[0].addr = i2c->addr; xfer[0].flags = 0; xfer[0].len = 1; xfer[0].buf = &reg; /* Read data */ xfer[1].addr = i2c->addr; xfer[1].flags = I2C_M_RD; xfer[1].len = count * sizeof(u16); xfer[1].buf = (u8 *)dest; ret = i2c_transfer(i2c->adapter, xfer, 2); if (ret == 2) ret = 0; else if (ret >= 0) ret = -EIO; return ret; } static int wm8400_i2c_write(void *io_data, char reg, int count, const u16 *src) { struct i2c_client *i2c = io_data; u8 *msg; int ret; /* We add 1 byte for device register - ideally I2C would gather. */ msg = kmalloc((count * sizeof(u16)) + 1, GFP_KERNEL); if (msg == NULL) return -ENOMEM; msg[0] = reg; memcpy(&msg[1], src, count * sizeof(u16)); ret = i2c_master_send(i2c, msg, (count * sizeof(u16)) + 1); if (ret == (count * 2) + 1) ret = 0; else if (ret >= 0) ret = -EIO; kfree(msg); return ret; } static int wm8400_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { struct wm8400 *wm8400; int ret; wm8400 = kzalloc(sizeof(struct wm8400), GFP_KERNEL); if (wm8400 == NULL) { ret = -ENOMEM; goto err; } wm8400->io_data = i2c; wm8400->read_dev = wm8400_i2c_read; wm8400->write_dev = wm8400_i2c_write; wm8400->dev = &i2c->dev; i2c_set_clientdata(i2c, wm8400); ret = wm8400_init(wm8400, i2c->dev.platform_data); if (ret != 0) goto struct_err; return 0; struct_err: kfree(wm8400); err: return ret; } static int wm8400_i2c_remove(struct i2c_client *i2c) { struct wm8400 *wm8400 = i2c_get_clientdata(i2c); wm8400_release(wm8400); kfree(wm8400); return 0; } static const struct i2c_device_id wm8400_i2c_id[] = { { "wm8400", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, wm8400_i2c_id); static struct i2c_driver wm8400_i2c_driver = { .driver = { .name = "WM8400", .owner = THIS_MODULE, }, .probe = wm8400_i2c_probe, .remove = wm8400_i2c_remove, .id_table = wm8400_i2c_id, }; #endif static int __init wm8400_module_init(void) { int ret = -ENODEV; #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) ret = i2c_add_driver(&wm8400_i2c_driver); if (ret != 0) pr_err("Failed to register I2C driver: %d\n", ret); #endif return ret; } subsys_initcall(wm8400_module_init); static void __exit wm8400_module_exit(void) { #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) i2c_del_driver(&wm8400_i2c_driver); #endif } module_exit(wm8400_module_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
gpl-2.0
jerdog/sony_xperia_z
arch/m68k/mm/mcfmmu.c
4635
5140
/* * Based upon linux/arch/m68k/mm/sun3mmu.c * Based upon linux/arch/ppc/mm/mmu_context.c * * Implementations of mm routines specific to the Coldfire MMU. * * Copyright (c) 2008 Freescale Semiconductor, Inc. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/init.h> #include <linux/string.h> #include <linux/bootmem.h> #include <asm/setup.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/mmu_context.h> #include <asm/mcf_pgalloc.h> #include <asm/tlbflush.h> #define KMAPAREA(x) ((x >= VMALLOC_START) && (x < KMAP_END)) mm_context_t next_mmu_context; unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1]; atomic_t nr_free_contexts; struct mm_struct *context_mm[LAST_CONTEXT+1]; extern unsigned long num_pages; void free_initmem(void) { } /* * ColdFire paging_init derived from sun3. */ void __init paging_init(void) { pgd_t *pg_dir; pte_t *pg_table; unsigned long address, size; unsigned long next_pgtable, bootmem_end; unsigned long zones_size[MAX_NR_ZONES]; enum zone_type zone; int i; empty_zero_page = (void *) alloc_bootmem_pages(PAGE_SIZE); memset((void *) empty_zero_page, 0, PAGE_SIZE); pg_dir = swapper_pg_dir; memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir)); size = num_pages * sizeof(pte_t); size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1); next_pgtable = (unsigned long) alloc_bootmem_pages(size); bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK; pg_dir += PAGE_OFFSET >> PGDIR_SHIFT; address = PAGE_OFFSET; while (address < (unsigned long)high_memory) { pg_table = (pte_t *) next_pgtable; next_pgtable += PTRS_PER_PTE * sizeof(pte_t); pgd_val(*pg_dir) = (unsigned long) pg_table; pg_dir++; /* now change pg_table to kernel virtual addresses */ for (i = 0; i < PTRS_PER_PTE; ++i, ++pg_table) { pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT); if (address >= (unsigned long) high_memory) pte_val(pte) = 0; set_pte(pg_table, pte); address += PAGE_SIZE; } } current->mm = NULL; for (zone = 0; zone < MAX_NR_ZONES; zone++) zones_size[zone] = 0x0; zones_size[ZONE_DMA] = num_pages; free_area_init(zones_size); } int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word) { unsigned long flags, mmuar, mmutr; struct mm_struct *mm; pgd_t *pgd; pmd_t *pmd; pte_t *pte; int asid; local_irq_save(flags); mmuar = (dtlb) ? mmu_read(MMUAR) : regs->pc + (extension_word * sizeof(long)); mm = (!user_mode(regs) && KMAPAREA(mmuar)) ? &init_mm : current->mm; if (!mm) { local_irq_restore(flags); return -1; } pgd = pgd_offset(mm, mmuar); if (pgd_none(*pgd)) { local_irq_restore(flags); return -1; } pmd = pmd_offset(pgd, mmuar); if (pmd_none(*pmd)) { local_irq_restore(flags); return -1; } pte = (KMAPAREA(mmuar)) ? pte_offset_kernel(pmd, mmuar) : pte_offset_map(pmd, mmuar); if (pte_none(*pte) || !pte_present(*pte)) { local_irq_restore(flags); return -1; } if (write) { if (!pte_write(*pte)) { local_irq_restore(flags); return -1; } set_pte(pte, pte_mkdirty(*pte)); } set_pte(pte, pte_mkyoung(*pte)); asid = mm->context & 0xff; if (!pte_dirty(*pte) && !KMAPAREA(mmuar)) set_pte(pte, pte_wrprotect(*pte)); mmutr = (mmuar & PAGE_MASK) | (asid << MMUTR_IDN) | MMUTR_V; if ((mmuar < TASK_UNMAPPED_BASE) || (mmuar >= TASK_SIZE)) mmutr |= (pte->pte & CF_PAGE_MMUTR_MASK) >> CF_PAGE_MMUTR_SHIFT; mmu_write(MMUTR, mmutr); mmu_write(MMUDR, (pte_val(*pte) & PAGE_MASK) | ((pte->pte) & CF_PAGE_MMUDR_MASK) | MMUDR_SZ_8KB | MMUDR_X); if (dtlb) mmu_write(MMUOR, MMUOR_ACC | MMUOR_UAA); else mmu_write(MMUOR, MMUOR_ITLB | MMUOR_ACC | MMUOR_UAA); local_irq_restore(flags); return 0; } /* * Initialize the context management stuff. * The following was taken from arch/ppc/mmu_context.c */ void __init mmu_context_init(void) { /* * Some processors have too few contexts to reserve one for * init_mm, and require using context 0 for a normal task. * Other processors reserve the use of context zero for the kernel. * This code assumes FIRST_CONTEXT < 32. */ context_map[0] = (1 << FIRST_CONTEXT) - 1; next_mmu_context = FIRST_CONTEXT; atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1); } /* * Steal a context from a task that has one at the moment. * This is only used on 8xx and 4xx and we presently assume that * they don't do SMP. If they do then thicfpgalloc.hs will have to check * whether the MM we steal is in use. * We also assume that this is only used on systems that don't * use an MMU hash table - this is true for 8xx and 4xx. * This isn't an LRU system, it just frees up each context in * turn (sort-of pseudo-random replacement :). This would be the * place to implement an LRU scheme if anyone was motivated to do it. * -- paulus */ void steal_context(void) { struct mm_struct *mm; /* * free up context `next_mmu_context' * if we shouldn't free context 0, don't... */ if (next_mmu_context < FIRST_CONTEXT) next_mmu_context = FIRST_CONTEXT; mm = context_mm[next_mmu_context]; flush_tlb_mm(mm); destroy_context(mm); }
gpl-2.0
nychitman1/android_kernel_samsung_klte
arch/powerpc/platforms/embedded6xx/wii.c
7451
5641
/* * arch/powerpc/platforms/embedded6xx/wii.c * * Nintendo Wii board-specific support * Copyright (C) 2008-2009 The GameCube Linux Team * Copyright (C) 2008,2009 Albert Herranz * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * */ #define DRV_MODULE_NAME "wii" #define pr_fmt(fmt) DRV_MODULE_NAME ": " fmt #include <linux/kernel.h> #include <linux/init.h> #include <linux/irq.h> #include <linux/seq_file.h> #include <linux/of_platform.h> #include <linux/memblock.h> #include <mm/mmu_decl.h> #include <asm/io.h> #include <asm/machdep.h> #include <asm/prom.h> #include <asm/time.h> #include <asm/udbg.h> #include "flipper-pic.h" #include "hlwd-pic.h" #include "usbgecko_udbg.h" /* control block */ #define HW_CTRL_COMPATIBLE "nintendo,hollywood-control" #define HW_CTRL_RESETS 0x94 #define HW_CTRL_RESETS_SYS (1<<0) /* gpio */ #define HW_GPIO_COMPATIBLE "nintendo,hollywood-gpio" #define HW_GPIO_BASE(idx) (idx * 0x20) #define HW_GPIO_OUT(idx) (HW_GPIO_BASE(idx) + 0) #define HW_GPIO_DIR(idx) (HW_GPIO_BASE(idx) + 4) #define HW_GPIO_SHUTDOWN (1<<1) #define HW_GPIO_SLOT_LED (1<<5) #define HW_GPIO_SENSOR_BAR (1<<8) static void __iomem *hw_ctrl; static void __iomem *hw_gpio; unsigned long wii_hole_start; unsigned long wii_hole_size; static int __init page_aligned(unsigned long x) { return !(x & (PAGE_SIZE-1)); } void __init wii_memory_fixups(void) { struct memblock_region *p = memblock.memory.regions; /* * This is part of a workaround to allow the use of two * discontinuous RAM ranges on the Wii, even if this is * currently unsupported on 32-bit PowerPC Linux. * * We coalesce the two memory ranges of the Wii into a * single range, then create a reservation for the "hole" * between both ranges. */ BUG_ON(memblock.memory.cnt != 2); BUG_ON(!page_aligned(p[0].base) || !page_aligned(p[1].base)); /* trim unaligned tail */ memblock_remove(ALIGN(p[1].base + p[1].size, PAGE_SIZE), (phys_addr_t)ULLONG_MAX); /* determine hole, add & reserve them */ wii_hole_start = ALIGN(p[0].base + p[0].size, PAGE_SIZE); wii_hole_size = p[1].base - wii_hole_start; memblock_add(wii_hole_start, wii_hole_size); memblock_reserve(wii_hole_start, wii_hole_size); BUG_ON(memblock.memory.cnt != 1); __memblock_dump_all(); /* allow ioremapping the address space in the hole */ __allow_ioremap_reserved = 1; } unsigned long __init wii_mmu_mapin_mem2(unsigned long top) { unsigned long delta, size, bl; unsigned long max_size = (256<<20); /* MEM2 64MB@0x10000000 */ delta = wii_hole_start + wii_hole_size; size = top - delta; for (bl = 128<<10; bl < max_size; bl <<= 1) { if (bl * 2 > size) break; } setbat(4, PAGE_OFFSET+delta, delta, bl, PAGE_KERNEL_X); return delta + bl; } static void wii_spin(void) { local_irq_disable(); for (;;) cpu_relax(); } static void __iomem *wii_ioremap_hw_regs(char *name, char *compatible) { void __iomem *hw_regs = NULL; struct device_node *np; struct resource res; int error = -ENODEV; np = of_find_compatible_node(NULL, NULL, compatible); if (!np) { pr_err("no compatible node found for %s\n", compatible); goto out; } error = of_address_to_resource(np, 0, &res); if (error) { pr_err("no valid reg found for %s\n", np->name); goto out_put; } hw_regs = ioremap(res.start, resource_size(&res)); if (hw_regs) { pr_info("%s at 0x%08x mapped to 0x%p\n", name, res.start, hw_regs); } out_put: of_node_put(np); out: return hw_regs; } static void __init wii_setup_arch(void) { hw_ctrl = wii_ioremap_hw_regs("hw_ctrl", HW_CTRL_COMPATIBLE); hw_gpio = wii_ioremap_hw_regs("hw_gpio", HW_GPIO_COMPATIBLE); if (hw_gpio) { /* turn off the front blue led and IR light */ clrbits32(hw_gpio + HW_GPIO_OUT(0), HW_GPIO_SLOT_LED | HW_GPIO_SENSOR_BAR); } } static void wii_restart(char *cmd) { local_irq_disable(); if (hw_ctrl) { /* clear the system reset pin to cause a reset */ clrbits32(hw_ctrl + HW_CTRL_RESETS, HW_CTRL_RESETS_SYS); } wii_spin(); } static void wii_power_off(void) { local_irq_disable(); if (hw_gpio) { /* make sure that the poweroff GPIO is configured as output */ setbits32(hw_gpio + HW_GPIO_DIR(1), HW_GPIO_SHUTDOWN); /* drive the poweroff GPIO high */ setbits32(hw_gpio + HW_GPIO_OUT(1), HW_GPIO_SHUTDOWN); } wii_spin(); } static void wii_halt(void) { if (ppc_md.restart) ppc_md.restart(NULL); wii_spin(); } static void __init wii_init_early(void) { ug_udbg_init(); } static void __init wii_pic_probe(void) { flipper_pic_probe(); hlwd_pic_probe(); } static int __init wii_probe(void) { unsigned long dt_root; dt_root = of_get_flat_dt_root(); if (!of_flat_dt_is_compatible(dt_root, "nintendo,wii")) return 0; return 1; } static void wii_shutdown(void) { hlwd_quiesce(); flipper_quiesce(); } define_machine(wii) { .name = "wii", .probe = wii_probe, .init_early = wii_init_early, .setup_arch = wii_setup_arch, .restart = wii_restart, .power_off = wii_power_off, .halt = wii_halt, .init_IRQ = wii_pic_probe, .get_irq = flipper_pic_get_irq, .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, .machine_shutdown = wii_shutdown, }; static struct of_device_id wii_of_bus[] = { { .compatible = "nintendo,hollywood", }, { }, }; static int __init wii_device_probe(void) { if (!machine_is(wii)) return 0; of_platform_bus_probe(NULL, wii_of_bus, NULL); return 0; } device_initcall(wii_device_probe);
gpl-2.0
rellla/linux-sunxi
drivers/staging/vt6655/power.c
7963
10879
/* * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * * File: power.c * * Purpose: Handles 802.11 power management functions * * Author: Lyndon Chen * * Date: July 17, 2002 * * Functions: * PSvEnablePowerSaving - Enable Power Saving Mode * PSvDiasblePowerSaving - Disable Power Saving Mode * PSbConsiderPowerDown - Decide if we can Power Down * PSvSendPSPOLL - Send PS-POLL packet * PSbSendNullPacket - Send Null packet * PSbIsNextTBTTWakeUp - Decide if we need to wake up at next Beacon * * Revision History: * */ #include "ttype.h" #include "mac.h" #include "device.h" #include "wmgr.h" #include "power.h" #include "wcmd.h" #include "rxtx.h" #include "card.h" /*--------------------- Static Definitions -------------------------*/ /*--------------------- Static Classes ----------------------------*/ /*--------------------- Static Variables --------------------------*/ static int msglevel =MSG_LEVEL_INFO; /*--------------------- Static Functions --------------------------*/ /*--------------------- Export Variables --------------------------*/ /*--------------------- Export Functions --------------------------*/ /*+ * * Routine Description: * Enable hw power saving functions * * Return Value: * None. * -*/ void PSvEnablePowerSaving( void *hDeviceContext, unsigned short wListenInterval ) { PSDevice pDevice = (PSDevice)hDeviceContext; PSMgmtObject pMgmt = pDevice->pMgmt; unsigned short wAID = pMgmt->wCurrAID | BIT14 | BIT15; // set period of power up before TBTT VNSvOutPortW(pDevice->PortOffset + MAC_REG_PWBT, C_PWBT); if (pDevice->eOPMode != OP_MODE_ADHOC) { // set AID VNSvOutPortW(pDevice->PortOffset + MAC_REG_AIDATIM, wAID); } else { // set ATIM Window MACvWriteATIMW(pDevice->PortOffset, pMgmt->wCurrATIMWindow); } // Set AutoSleep MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCFG, PSCFG_AUTOSLEEP); // Set HWUTSF MACvRegBitsOn(pDevice->PortOffset, MAC_REG_TFTCTL, TFTCTL_HWUTSF); if (wListenInterval >= 2) { // clear always listen beacon MACvRegBitsOff(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_ALBCN); //pDevice->wCFG &= ~CFG_ALB; // first time set listen next beacon MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_LNBCN); pMgmt->wCountToWakeUp = wListenInterval; } else { // always listen beacon MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_ALBCN); //pDevice->wCFG |= CFG_ALB; pMgmt->wCountToWakeUp = 0; } // enable power saving hw function MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_PSEN); pDevice->bEnablePSMode = true; if (pDevice->eOPMode == OP_MODE_ADHOC) { // bMgrPrepareBeaconToSend((void *)pDevice, pMgmt); } // We don't send null pkt in ad hoc mode since beacon will handle this. else if (pDevice->eOPMode == OP_MODE_INFRASTRUCTURE) { PSbSendNullPacket(pDevice); } pDevice->bPWBitOn = true; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "PS:Power Saving Mode Enable... \n"); return; } /*+ * * Routine Description: * Disable hw power saving functions * * Return Value: * None. * -*/ void PSvDisablePowerSaving( void *hDeviceContext ) { PSDevice pDevice = (PSDevice)hDeviceContext; // PSMgmtObject pMgmt = pDevice->pMgmt; // disable power saving hw function MACbPSWakeup(pDevice->PortOffset); //clear AutoSleep MACvRegBitsOff(pDevice->PortOffset, MAC_REG_PSCFG, PSCFG_AUTOSLEEP); //clear HWUTSF MACvRegBitsOff(pDevice->PortOffset, MAC_REG_TFTCTL, TFTCTL_HWUTSF); // set always listen beacon MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_ALBCN); pDevice->bEnablePSMode = false; if (pDevice->eOPMode == OP_MODE_INFRASTRUCTURE) { PSbSendNullPacket(pDevice); } pDevice->bPWBitOn = false; return; } /*+ * * Routine Description: * Consider to power down when no more packets to tx or rx. * * Return Value: * true, if power down success * false, if fail -*/ bool PSbConsiderPowerDown( void *hDeviceContext, bool bCheckRxDMA, bool bCheckCountToWakeUp ) { PSDevice pDevice = (PSDevice)hDeviceContext; PSMgmtObject pMgmt = pDevice->pMgmt; unsigned int uIdx; // check if already in Doze mode if (MACbIsRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_PS)) return true; if (pMgmt->eCurrMode != WMAC_MODE_IBSS_STA) { // check if in TIM wake period if (pMgmt->bInTIMWake) return false; } // check scan state if (pDevice->bCmdRunning) return false; // Froce PSEN on MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_PSEN); // check if all TD are empty, for (uIdx = 0; uIdx < TYPE_MAXTD; uIdx ++) { if (pDevice->iTDUsed[uIdx] != 0) return false; } // check if rx isr is clear if (bCheckRxDMA && ((pDevice->dwIsr& ISR_RXDMA0) != 0) && ((pDevice->dwIsr & ISR_RXDMA1) != 0)){ return false; } if (pMgmt->eCurrMode != WMAC_MODE_IBSS_STA) { if (bCheckCountToWakeUp && (pMgmt->wCountToWakeUp == 0 || pMgmt->wCountToWakeUp == 1)) { return false; } } // no Tx, no Rx isr, now go to Doze MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_GO2DOZE); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Go to Doze ZZZZZZZZZZZZZZZ\n"); return true; } /*+ * * Routine Description: * Send PS-POLL packet * * Return Value: * None. * -*/ void PSvSendPSPOLL( void *hDeviceContext ) { PSDevice pDevice = (PSDevice)hDeviceContext; PSMgmtObject pMgmt = pDevice->pMgmt; PSTxMgmtPacket pTxPacket = NULL; memset(pMgmt->pbyPSPacketPool, 0, sizeof(STxMgmtPacket) + WLAN_HDR_ADDR2_LEN); pTxPacket = (PSTxMgmtPacket)pMgmt->pbyPSPacketPool; pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket + sizeof(STxMgmtPacket)); pTxPacket->p80211Header->sA2.wFrameCtl = cpu_to_le16( ( WLAN_SET_FC_FTYPE(WLAN_TYPE_CTL) | WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_PSPOLL) | WLAN_SET_FC_PWRMGT(0) )); pTxPacket->p80211Header->sA2.wDurationID = pMgmt->wCurrAID | BIT14 | BIT15; memcpy(pTxPacket->p80211Header->sA2.abyAddr1, pMgmt->abyCurrBSSID, WLAN_ADDR_LEN); memcpy(pTxPacket->p80211Header->sA2.abyAddr2, pMgmt->abyMACAddr, WLAN_ADDR_LEN); pTxPacket->cbMPDULen = WLAN_HDR_ADDR2_LEN; pTxPacket->cbPayloadLen = 0; // send the frame if (csMgmt_xmit(pDevice, pTxPacket) != CMD_STATUS_PENDING) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Send PS-Poll packet failed..\n"); } else { // DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Send PS-Poll packet success..\n"); }; return; } /*+ * * Routine Description: * Send NULL packet to AP for notification power state of STA * * Return Value: * None. * -*/ bool PSbSendNullPacket( void *hDeviceContext ) { PSDevice pDevice = (PSDevice)hDeviceContext; PSTxMgmtPacket pTxPacket = NULL; PSMgmtObject pMgmt = pDevice->pMgmt; unsigned int uIdx; if (pDevice->bLinkPass == false) { return false; } #ifdef TxInSleep if ((pDevice->bEnablePSMode == false) && (pDevice->fTxDataInSleep == false)){ return false; } #else if (pDevice->bEnablePSMode == false) { return false; } #endif if (pDevice->bEnablePSMode) { for (uIdx = 0; uIdx < TYPE_MAXTD; uIdx ++) { if (pDevice->iTDUsed[uIdx] != 0) return false; } } memset(pMgmt->pbyPSPacketPool, 0, sizeof(STxMgmtPacket) + WLAN_NULLDATA_FR_MAXLEN); pTxPacket = (PSTxMgmtPacket)pMgmt->pbyPSPacketPool; pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket + sizeof(STxMgmtPacket)); if (pDevice->bEnablePSMode) { pTxPacket->p80211Header->sA3.wFrameCtl = cpu_to_le16( ( WLAN_SET_FC_FTYPE(WLAN_TYPE_DATA) | WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_NULL) | WLAN_SET_FC_PWRMGT(1) )); } else { pTxPacket->p80211Header->sA3.wFrameCtl = cpu_to_le16( ( WLAN_SET_FC_FTYPE(WLAN_TYPE_DATA) | WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_NULL) | WLAN_SET_FC_PWRMGT(0) )); } if(pMgmt->eCurrMode != WMAC_MODE_IBSS_STA) { pTxPacket->p80211Header->sA3.wFrameCtl |= cpu_to_le16((unsigned short)WLAN_SET_FC_TODS(1)); } memcpy(pTxPacket->p80211Header->sA3.abyAddr1, pMgmt->abyCurrBSSID, WLAN_ADDR_LEN); memcpy(pTxPacket->p80211Header->sA3.abyAddr2, pMgmt->abyMACAddr, WLAN_ADDR_LEN); memcpy(pTxPacket->p80211Header->sA3.abyAddr3, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN); pTxPacket->cbMPDULen = WLAN_HDR_ADDR3_LEN; pTxPacket->cbPayloadLen = 0; // send the frame if (csMgmt_xmit(pDevice, pTxPacket) != CMD_STATUS_PENDING) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Send Null Packet failed !\n"); return false; } else { // DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Send Null Packet success....\n"); } return true ; } /*+ * * Routine Description: * Check if Next TBTT must wake up * * Return Value: * None. * -*/ bool PSbIsNextTBTTWakeUp( void *hDeviceContext ) { PSDevice pDevice = (PSDevice)hDeviceContext; PSMgmtObject pMgmt = pDevice->pMgmt; bool bWakeUp = false; if (pMgmt->wListenInterval >= 2) { if (pMgmt->wCountToWakeUp == 0) { pMgmt->wCountToWakeUp = pMgmt->wListenInterval; } pMgmt->wCountToWakeUp --; if (pMgmt->wCountToWakeUp == 1) { // Turn on wake up to listen next beacon MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_LNBCN); bWakeUp = true; } } return bWakeUp; }
gpl-2.0
AICP/kernel_oppo_msm8974
arch/avr32/kernel/signal.c
8987
7612
/* * Copyright (C) 2004-2006 Atmel Corporation * * Based on linux/arch/sh/kernel/signal.c * Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima * Copyright (C) 1991, 1992 Linus Torvalds * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/sched.h> #include <linux/mm.h> #include <linux/errno.h> #include <linux/ptrace.h> #include <linux/unistd.h> #include <linux/freezer.h> #include <linux/tracehook.h> #include <asm/uaccess.h> #include <asm/ucontext.h> #include <asm/syscalls.h> #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) asmlinkage int sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, struct pt_regs *regs) { return do_sigaltstack(uss, uoss, regs->sp); } struct rt_sigframe { struct siginfo info; struct ucontext uc; unsigned long retcode; }; static int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) { int err = 0; #define COPY(x) err |= __get_user(regs->x, &sc->x) COPY(sr); COPY(pc); COPY(lr); COPY(sp); COPY(r12); COPY(r11); COPY(r10); COPY(r9); COPY(r8); COPY(r7); COPY(r6); COPY(r5); COPY(r4); COPY(r3); COPY(r2); COPY(r1); COPY(r0); #undef COPY /* * Don't allow anyone to pretend they're running in supervisor * mode or something... */ err |= !valid_user_regs(regs); return err; } asmlinkage int sys_rt_sigreturn(struct pt_regs *regs) { struct rt_sigframe __user *frame; sigset_t set; frame = (struct rt_sigframe __user *)regs->sp; pr_debug("SIG return: frame = %p\n", frame); if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) goto badframe; if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); spin_lock_irq(&current->sighand->siglock); current->blocked = set; recalc_sigpending(); spin_unlock_irq(&current->sighand->siglock); if (restore_sigcontext(regs, &frame->uc.uc_mcontext)) goto badframe; if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->sp) == -EFAULT) goto badframe; pr_debug("Context restored: pc = %08lx, lr = %08lx, sp = %08lx\n", regs->pc, regs->lr, regs->sp); return regs->r12; badframe: force_sig(SIGSEGV, current); return 0; } static int setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs) { int err = 0; #define COPY(x) err |= __put_user(regs->x, &sc->x) COPY(sr); COPY(pc); COPY(lr); COPY(sp); COPY(r12); COPY(r11); COPY(r10); COPY(r9); COPY(r8); COPY(r7); COPY(r6); COPY(r5); COPY(r4); COPY(r3); COPY(r2); COPY(r1); COPY(r0); #undef COPY return err; } static inline void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, int framesize) { unsigned long sp = regs->sp; if ((ka->sa.sa_flags & SA_ONSTACK) && !sas_ss_flags(sp)) sp = current->sas_ss_sp + current->sas_ss_size; return (void __user *)((sp - framesize) & ~3); } static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, struct pt_regs *regs) { struct rt_sigframe __user *frame; int err = 0; frame = get_sigframe(ka, regs, sizeof(*frame)); err = -EFAULT; if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) goto out; /* * Set up the return code: * * mov r8, __NR_rt_sigreturn * scall * * Note: This will blow up since we're using a non-executable * stack. Better use SA_RESTORER. */ #if __NR_rt_sigreturn > 127 # error __NR_rt_sigreturn must be < 127 to fit in a short mov #endif err = __put_user(0x3008d733 | (__NR_rt_sigreturn << 20), &frame->retcode); err |= copy_siginfo_to_user(&frame->info, info); /* Set up the ucontext */ err |= __put_user(0, &frame->uc.uc_flags); err |= __put_user(NULL, &frame->uc.uc_link); err |= __put_user((void __user *)current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); err |= __put_user(sas_ss_flags(regs->sp), &frame->uc.uc_stack.ss_flags); err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); err |= setup_sigcontext(&frame->uc.uc_mcontext, regs); err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); if (err) goto out; regs->r12 = sig; regs->r11 = (unsigned long) &frame->info; regs->r10 = (unsigned long) &frame->uc; regs->sp = (unsigned long) frame; if (ka->sa.sa_flags & SA_RESTORER) regs->lr = (unsigned long)ka->sa.sa_restorer; else { printk(KERN_NOTICE "[%s:%d] did not set SA_RESTORER\n", current->comm, current->pid); regs->lr = (unsigned long) &frame->retcode; } pr_debug("SIG deliver [%s:%d]: sig=%d sp=0x%lx pc=0x%lx->0x%p lr=0x%lx\n", current->comm, current->pid, sig, regs->sp, regs->pc, ka->sa.sa_handler, regs->lr); regs->pc = (unsigned long) ka->sa.sa_handler; out: return err; } static inline void setup_syscall_restart(struct pt_regs *regs) { if (regs->r12 == -ERESTART_RESTARTBLOCK) regs->r8 = __NR_restart_syscall; else regs->r12 = regs->r12_orig; regs->pc -= 2; } static inline void handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *oldset, struct pt_regs *regs, int syscall) { int ret; /* * Set up the stack frame */ ret = setup_rt_frame(sig, ka, info, oldset, regs); /* * Check that the resulting registers are sane */ ret |= !valid_user_regs(regs); /* * Block the signal if we were unsuccessful. */ if (ret != 0 || !(ka->sa.sa_flags & SA_NODEFER)) { spin_lock_irq(&current->sighand->siglock); sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask); sigaddset(&current->blocked, sig); recalc_sigpending(); spin_unlock_irq(&current->sighand->siglock); } if (ret == 0) return; force_sigsegv(sig, current); } /* * Note that 'init' is a special process: it doesn't get signals it * doesn't want to handle. Thus you cannot kill init even with a * SIGKILL even by mistake. */ int do_signal(struct pt_regs *regs, sigset_t *oldset, int syscall) { siginfo_t info; int signr; struct k_sigaction ka; /* * We want the common case to go fast, which is why we may in * certain cases get here from kernel mode. Just return * without doing anything if so. */ if (!user_mode(regs)) return 0; if (test_thread_flag(TIF_RESTORE_SIGMASK)) oldset = &current->saved_sigmask; else if (!oldset) oldset = &current->blocked; signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (syscall) { switch (regs->r12) { case -ERESTART_RESTARTBLOCK: case -ERESTARTNOHAND: if (signr > 0) { regs->r12 = -EINTR; break; } /* fall through */ case -ERESTARTSYS: if (signr > 0 && !(ka.sa.sa_flags & SA_RESTART)) { regs->r12 = -EINTR; break; } /* fall through */ case -ERESTARTNOINTR: setup_syscall_restart(regs); } } if (signr == 0) { /* No signal to deliver -- put the saved sigmask back */ if (test_thread_flag(TIF_RESTORE_SIGMASK)) { clear_thread_flag(TIF_RESTORE_SIGMASK); sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL); } return 0; } handle_signal(signr, &ka, &info, oldset, regs, syscall); return 1; } asmlinkage void do_notify_resume(struct pt_regs *regs, struct thread_info *ti) { int syscall = 0; if ((sysreg_read(SR) & MODE_MASK) == MODE_SUPERVISOR) syscall = 1; if (ti->flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) do_signal(regs, &current->blocked, syscall); if (ti->flags & _TIF_NOTIFY_RESUME) { clear_thread_flag(TIF_NOTIFY_RESUME); tracehook_notify_resume(regs); if (current->replacement_session_keyring) key_replace_session_keyring(); } }
gpl-2.0
XePeleato/android_ALE-L21_kernel
fs/omfs/bitmap.c
12827
4107
#include <linux/kernel.h> #include <linux/fs.h> #include <linux/buffer_head.h> #include <asm/div64.h> #include "omfs.h" unsigned long omfs_count_free(struct super_block *sb) { unsigned int i; unsigned long sum = 0; struct omfs_sb_info *sbi = OMFS_SB(sb); int nbits = sb->s_blocksize * 8; for (i = 0; i < sbi->s_imap_size; i++) sum += nbits - bitmap_weight(sbi->s_imap[i], nbits); return sum; } /* * Counts the run of zero bits starting at bit up to max. * It handles the case where a run might spill over a buffer. * Called with bitmap lock. */ static int count_run(unsigned long **addr, int nbits, int addrlen, int bit, int max) { int count = 0; int x; for (; addrlen > 0; addrlen--, addr++) { x = find_next_bit(*addr, nbits, bit); count += x - bit; if (x < nbits || count > max) return min(count, max); bit = 0; } return min(count, max); } /* * Sets or clears the run of count bits starting with bit. * Called with bitmap lock. */ static int set_run(struct super_block *sb, int map, int nbits, int bit, int count, int set) { int i; int err; struct buffer_head *bh; struct omfs_sb_info *sbi = OMFS_SB(sb); err = -ENOMEM; bh = sb_bread(sb, clus_to_blk(sbi, sbi->s_bitmap_ino) + map); if (!bh) goto out; for (i = 0; i < count; i++, bit++) { if (bit >= nbits) { bit = 0; map++; mark_buffer_dirty(bh); brelse(bh); bh = sb_bread(sb, clus_to_blk(sbi, sbi->s_bitmap_ino) + map); if (!bh) goto out; } if (set) { set_bit(bit, sbi->s_imap[map]); set_bit(bit, (unsigned long *)bh->b_data); } else { clear_bit(bit, sbi->s_imap[map]); clear_bit(bit, (unsigned long *)bh->b_data); } } mark_buffer_dirty(bh); brelse(bh); err = 0; out: return err; } /* * Tries to allocate exactly one block. Returns true if successful. */ int omfs_allocate_block(struct super_block *sb, u64 block) { struct buffer_head *bh; struct omfs_sb_info *sbi = OMFS_SB(sb); int bits_per_entry = 8 * sb->s_blocksize; unsigned int map, bit; int ret = 0; u64 tmp; tmp = block; bit = do_div(tmp, bits_per_entry); map = tmp; mutex_lock(&sbi->s_bitmap_lock); if (map >= sbi->s_imap_size || test_and_set_bit(bit, sbi->s_imap[map])) goto out; if (sbi->s_bitmap_ino > 0) { bh = sb_bread(sb, clus_to_blk(sbi, sbi->s_bitmap_ino) + map); if (!bh) goto out; set_bit(bit, (unsigned long *)bh->b_data); mark_buffer_dirty(bh); brelse(bh); } ret = 1; out: mutex_unlock(&sbi->s_bitmap_lock); return ret; } /* * Tries to allocate a set of blocks. The request size depends on the * type: for inodes, we must allocate sbi->s_mirrors blocks, and for file * blocks, we try to allocate sbi->s_clustersize, but can always get away * with just one block. */ int omfs_allocate_range(struct super_block *sb, int min_request, int max_request, u64 *return_block, int *return_size) { struct omfs_sb_info *sbi = OMFS_SB(sb); int bits_per_entry = 8 * sb->s_blocksize; int ret = 0; int i, run, bit; mutex_lock(&sbi->s_bitmap_lock); for (i = 0; i < sbi->s_imap_size; i++) { bit = 0; while (bit < bits_per_entry) { bit = find_next_zero_bit(sbi->s_imap[i], bits_per_entry, bit); if (bit == bits_per_entry) break; run = count_run(&sbi->s_imap[i], bits_per_entry, sbi->s_imap_size-i, bit, max_request); if (run >= min_request) goto found; bit += run; } } ret = -ENOSPC; goto out; found: *return_block = i * bits_per_entry + bit; *return_size = run; ret = set_run(sb, i, bits_per_entry, bit, run, 1); out: mutex_unlock(&sbi->s_bitmap_lock); return ret; } /* * Clears count bits starting at a given block. */ int omfs_clear_range(struct super_block *sb, u64 block, int count) { struct omfs_sb_info *sbi = OMFS_SB(sb); int bits_per_entry = 8 * sb->s_blocksize; u64 tmp; unsigned int map, bit; int ret; tmp = block; bit = do_div(tmp, bits_per_entry); map = tmp; if (map >= sbi->s_imap_size) return 0; mutex_lock(&sbi->s_bitmap_lock); ret = set_run(sb, map, bits_per_entry, bit, count, 0); mutex_unlock(&sbi->s_bitmap_lock); return ret; }
gpl-2.0
SimonSickle/android_kernel_htc_primou
fs/omfs/bitmap.c
12827
4107
#include <linux/kernel.h> #include <linux/fs.h> #include <linux/buffer_head.h> #include <asm/div64.h> #include "omfs.h" unsigned long omfs_count_free(struct super_block *sb) { unsigned int i; unsigned long sum = 0; struct omfs_sb_info *sbi = OMFS_SB(sb); int nbits = sb->s_blocksize * 8; for (i = 0; i < sbi->s_imap_size; i++) sum += nbits - bitmap_weight(sbi->s_imap[i], nbits); return sum; } /* * Counts the run of zero bits starting at bit up to max. * It handles the case where a run might spill over a buffer. * Called with bitmap lock. */ static int count_run(unsigned long **addr, int nbits, int addrlen, int bit, int max) { int count = 0; int x; for (; addrlen > 0; addrlen--, addr++) { x = find_next_bit(*addr, nbits, bit); count += x - bit; if (x < nbits || count > max) return min(count, max); bit = 0; } return min(count, max); } /* * Sets or clears the run of count bits starting with bit. * Called with bitmap lock. */ static int set_run(struct super_block *sb, int map, int nbits, int bit, int count, int set) { int i; int err; struct buffer_head *bh; struct omfs_sb_info *sbi = OMFS_SB(sb); err = -ENOMEM; bh = sb_bread(sb, clus_to_blk(sbi, sbi->s_bitmap_ino) + map); if (!bh) goto out; for (i = 0; i < count; i++, bit++) { if (bit >= nbits) { bit = 0; map++; mark_buffer_dirty(bh); brelse(bh); bh = sb_bread(sb, clus_to_blk(sbi, sbi->s_bitmap_ino) + map); if (!bh) goto out; } if (set) { set_bit(bit, sbi->s_imap[map]); set_bit(bit, (unsigned long *)bh->b_data); } else { clear_bit(bit, sbi->s_imap[map]); clear_bit(bit, (unsigned long *)bh->b_data); } } mark_buffer_dirty(bh); brelse(bh); err = 0; out: return err; } /* * Tries to allocate exactly one block. Returns true if successful. */ int omfs_allocate_block(struct super_block *sb, u64 block) { struct buffer_head *bh; struct omfs_sb_info *sbi = OMFS_SB(sb); int bits_per_entry = 8 * sb->s_blocksize; unsigned int map, bit; int ret = 0; u64 tmp; tmp = block; bit = do_div(tmp, bits_per_entry); map = tmp; mutex_lock(&sbi->s_bitmap_lock); if (map >= sbi->s_imap_size || test_and_set_bit(bit, sbi->s_imap[map])) goto out; if (sbi->s_bitmap_ino > 0) { bh = sb_bread(sb, clus_to_blk(sbi, sbi->s_bitmap_ino) + map); if (!bh) goto out; set_bit(bit, (unsigned long *)bh->b_data); mark_buffer_dirty(bh); brelse(bh); } ret = 1; out: mutex_unlock(&sbi->s_bitmap_lock); return ret; } /* * Tries to allocate a set of blocks. The request size depends on the * type: for inodes, we must allocate sbi->s_mirrors blocks, and for file * blocks, we try to allocate sbi->s_clustersize, but can always get away * with just one block. */ int omfs_allocate_range(struct super_block *sb, int min_request, int max_request, u64 *return_block, int *return_size) { struct omfs_sb_info *sbi = OMFS_SB(sb); int bits_per_entry = 8 * sb->s_blocksize; int ret = 0; int i, run, bit; mutex_lock(&sbi->s_bitmap_lock); for (i = 0; i < sbi->s_imap_size; i++) { bit = 0; while (bit < bits_per_entry) { bit = find_next_zero_bit(sbi->s_imap[i], bits_per_entry, bit); if (bit == bits_per_entry) break; run = count_run(&sbi->s_imap[i], bits_per_entry, sbi->s_imap_size-i, bit, max_request); if (run >= min_request) goto found; bit += run; } } ret = -ENOSPC; goto out; found: *return_block = i * bits_per_entry + bit; *return_size = run; ret = set_run(sb, i, bits_per_entry, bit, run, 1); out: mutex_unlock(&sbi->s_bitmap_lock); return ret; } /* * Clears count bits starting at a given block. */ int omfs_clear_range(struct super_block *sb, u64 block, int count) { struct omfs_sb_info *sbi = OMFS_SB(sb); int bits_per_entry = 8 * sb->s_blocksize; u64 tmp; unsigned int map, bit; int ret; tmp = block; bit = do_div(tmp, bits_per_entry); map = tmp; if (map >= sbi->s_imap_size) return 0; mutex_lock(&sbi->s_bitmap_lock); ret = set_run(sb, map, bits_per_entry, bit, count, 0); mutex_unlock(&sbi->s_bitmap_lock); return ret; }
gpl-2.0
lozohcum/android_kernel_sony_msm7x27a-legacy-TeamBlur
arch/parisc/math-emu/fcnvuf.c
14107
8168
/* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * Floating-point emulation code * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * BEGIN_DESC * * File: * @(#) pa/spmath/fcnvuf.c $Revision: 1.1 $ * * Purpose: * Fixed point to Floating-point Converts * * External Interfaces: * dbl_to_dbl_fcnvuf(srcptr,nullptr,dstptr,status) * dbl_to_sgl_fcnvuf(srcptr,nullptr,dstptr,status) * sgl_to_dbl_fcnvuf(srcptr,nullptr,dstptr,status) * sgl_to_sgl_fcnvuf(srcptr,nullptr,dstptr,status) * * Internal Interfaces: * * Theory: * <<please update with a overview of the operation of this file>> * * END_DESC */ #include "float.h" #include "sgl_float.h" #include "dbl_float.h" #include "cnv_float.h" /************************************************************************ * Fixed point to Floating-point Converts * ************************************************************************/ /* * Convert Single Unsigned Fixed to Single Floating-point format */ int sgl_to_sgl_fcnvuf( unsigned int *srcptr, unsigned int *nullptr, sgl_floating_point *dstptr, unsigned int *status) { register unsigned int src, result = 0; register int dst_exponent; src = *srcptr; /* Check for zero */ if (src == 0) { Sgl_setzero(result); *dstptr = result; return(NOEXCEPTION); } /* * Generate exponent and normalized mantissa */ dst_exponent = 16; /* initialize for normalization */ /* * Check word for most significant bit set. Returns * a value in dst_exponent indicating the bit position, * between -1 and 30. */ Find_ms_one_bit(src,dst_exponent); /* left justify source, with msb at bit position 0 */ src <<= dst_exponent+1; Sgl_set_mantissa(result, src >> SGL_EXP_LENGTH); Sgl_set_exponent(result, 30+SGL_BIAS - dst_exponent); /* check for inexact */ if (Suint_isinexact_to_sgl(src)) { switch (Rounding_mode()) { case ROUNDPLUS: Sgl_increment(result); break; case ROUNDMINUS: /* never negative */ break; case ROUNDNEAREST: Sgl_roundnearest_from_suint(src,result); break; } if (Is_inexacttrap_enabled()) { *dstptr = result; return(INEXACTEXCEPTION); } else Set_inexactflag(); } *dstptr = result; return(NOEXCEPTION); } /* * Single Unsigned Fixed to Double Floating-point */ int sgl_to_dbl_fcnvuf( unsigned int *srcptr, unsigned int *nullptr, dbl_floating_point *dstptr, unsigned int *status) { register int dst_exponent; register unsigned int src, resultp1 = 0, resultp2 = 0; src = *srcptr; /* Check for zero */ if (src == 0) { Dbl_setzero(resultp1,resultp2); Dbl_copytoptr(resultp1,resultp2,dstptr); return(NOEXCEPTION); } /* * Generate exponent and normalized mantissa */ dst_exponent = 16; /* initialize for normalization */ /* * Check word for most significant bit set. Returns * a value in dst_exponent indicating the bit position, * between -1 and 30. */ Find_ms_one_bit(src,dst_exponent); /* left justify source, with msb at bit position 0 */ src <<= dst_exponent+1; Dbl_set_mantissap1(resultp1, src >> DBL_EXP_LENGTH); Dbl_set_mantissap2(resultp2, src << (32-DBL_EXP_LENGTH)); Dbl_set_exponent(resultp1, (30+DBL_BIAS) - dst_exponent); Dbl_copytoptr(resultp1,resultp2,dstptr); return(NOEXCEPTION); } /* * Double Unsigned Fixed to Single Floating-point */ int dbl_to_sgl_fcnvuf( dbl_unsigned *srcptr, unsigned int *nullptr, sgl_floating_point *dstptr, unsigned int *status) { int dst_exponent; unsigned int srcp1, srcp2, result = 0; Duint_copyfromptr(srcptr,srcp1,srcp2); /* Check for zero */ if (srcp1 == 0 && srcp2 == 0) { Sgl_setzero(result); *dstptr = result; return(NOEXCEPTION); } /* * Generate exponent and normalized mantissa */ dst_exponent = 16; /* initialize for normalization */ if (srcp1 == 0) { /* * Check word for most significant bit set. Returns * a value in dst_exponent indicating the bit position, * between -1 and 30. */ Find_ms_one_bit(srcp2,dst_exponent); /* left justify source, with msb at bit position 0 */ srcp1 = srcp2 << dst_exponent+1; srcp2 = 0; /* * since msb set is in second word, need to * adjust bit position count */ dst_exponent += 32; } else { /* * Check word for most significant bit set. Returns * a value in dst_exponent indicating the bit position, * between -1 and 30. * */ Find_ms_one_bit(srcp1,dst_exponent); /* left justify source, with msb at bit position 0 */ if (dst_exponent >= 0) { Variable_shift_double(srcp1,srcp2,(31-dst_exponent), srcp1); srcp2 <<= dst_exponent+1; } } Sgl_set_mantissa(result, srcp1 >> SGL_EXP_LENGTH); Sgl_set_exponent(result, (62+SGL_BIAS) - dst_exponent); /* check for inexact */ if (Duint_isinexact_to_sgl(srcp1,srcp2)) { switch (Rounding_mode()) { case ROUNDPLUS: Sgl_increment(result); break; case ROUNDMINUS: /* never negative */ break; case ROUNDNEAREST: Sgl_roundnearest_from_duint(srcp1,srcp2,result); break; } if (Is_inexacttrap_enabled()) { *dstptr = result; return(INEXACTEXCEPTION); } else Set_inexactflag(); } *dstptr = result; return(NOEXCEPTION); } /* * Double Unsigned Fixed to Double Floating-point */ int dbl_to_dbl_fcnvuf( dbl_unsigned *srcptr, unsigned int *nullptr, dbl_floating_point *dstptr, unsigned int *status) { register int dst_exponent; register unsigned int srcp1, srcp2, resultp1 = 0, resultp2 = 0; Duint_copyfromptr(srcptr,srcp1,srcp2); /* Check for zero */ if (srcp1 == 0 && srcp2 ==0) { Dbl_setzero(resultp1,resultp2); Dbl_copytoptr(resultp1,resultp2,dstptr); return(NOEXCEPTION); } /* * Generate exponent and normalized mantissa */ dst_exponent = 16; /* initialize for normalization */ if (srcp1 == 0) { /* * Check word for most significant bit set. Returns * a value in dst_exponent indicating the bit position, * between -1 and 30. */ Find_ms_one_bit(srcp2,dst_exponent); /* left justify source, with msb at bit position 0 */ srcp1 = srcp2 << dst_exponent+1; srcp2 = 0; /* * since msb set is in second word, need to * adjust bit position count */ dst_exponent += 32; } else { /* * Check word for most significant bit set. Returns * a value in dst_exponent indicating the bit position, * between -1 and 30. */ Find_ms_one_bit(srcp1,dst_exponent); /* left justify source, with msb at bit position 0 */ if (dst_exponent >= 0) { Variable_shift_double(srcp1,srcp2,(31-dst_exponent), srcp1); srcp2 <<= dst_exponent+1; } } Dbl_set_mantissap1(resultp1, srcp1 >> DBL_EXP_LENGTH); Shiftdouble(srcp1,srcp2,DBL_EXP_LENGTH,resultp2); Dbl_set_exponent(resultp1, (62+DBL_BIAS) - dst_exponent); /* check for inexact */ if (Duint_isinexact_to_dbl(srcp2)) { switch (Rounding_mode()) { case ROUNDPLUS: Dbl_increment(resultp1,resultp2); break; case ROUNDMINUS: /* never negative */ break; case ROUNDNEAREST: Dbl_roundnearest_from_duint(srcp2,resultp1, resultp2); break; } if (Is_inexacttrap_enabled()) { Dbl_copytoptr(resultp1,resultp2,dstptr); return(INEXACTEXCEPTION); } else Set_inexactflag(); } Dbl_copytoptr(resultp1,resultp2,dstptr); return(NOEXCEPTION); }
gpl-2.0
engine95/navel-855
fs/dlm/util.c
14875
4610
/****************************************************************************** ******************************************************************************* ** ** Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved. ** ** This copyrighted material is made available to anyone wishing to use, ** modify, copy, or redistribute it subject to the terms and conditions ** of the GNU General Public License v.2. ** ******************************************************************************* ******************************************************************************/ #include "dlm_internal.h" #include "rcom.h" #include "util.h" #define DLM_ERRNO_EDEADLK 35 #define DLM_ERRNO_EBADR 53 #define DLM_ERRNO_EBADSLT 57 #define DLM_ERRNO_EPROTO 71 #define DLM_ERRNO_EOPNOTSUPP 95 #define DLM_ERRNO_ETIMEDOUT 110 #define DLM_ERRNO_EINPROGRESS 115 static void header_out(struct dlm_header *hd) { hd->h_version = cpu_to_le32(hd->h_version); hd->h_lockspace = cpu_to_le32(hd->h_lockspace); hd->h_nodeid = cpu_to_le32(hd->h_nodeid); hd->h_length = cpu_to_le16(hd->h_length); } static void header_in(struct dlm_header *hd) { hd->h_version = le32_to_cpu(hd->h_version); hd->h_lockspace = le32_to_cpu(hd->h_lockspace); hd->h_nodeid = le32_to_cpu(hd->h_nodeid); hd->h_length = le16_to_cpu(hd->h_length); } /* higher errno values are inconsistent across architectures, so select one set of values for on the wire */ static int to_dlm_errno(int err) { switch (err) { case -EDEADLK: return -DLM_ERRNO_EDEADLK; case -EBADR: return -DLM_ERRNO_EBADR; case -EBADSLT: return -DLM_ERRNO_EBADSLT; case -EPROTO: return -DLM_ERRNO_EPROTO; case -EOPNOTSUPP: return -DLM_ERRNO_EOPNOTSUPP; case -ETIMEDOUT: return -DLM_ERRNO_ETIMEDOUT; case -EINPROGRESS: return -DLM_ERRNO_EINPROGRESS; } return err; } static int from_dlm_errno(int err) { switch (err) { case -DLM_ERRNO_EDEADLK: return -EDEADLK; case -DLM_ERRNO_EBADR: return -EBADR; case -DLM_ERRNO_EBADSLT: return -EBADSLT; case -DLM_ERRNO_EPROTO: return -EPROTO; case -DLM_ERRNO_EOPNOTSUPP: return -EOPNOTSUPP; case -DLM_ERRNO_ETIMEDOUT: return -ETIMEDOUT; case -DLM_ERRNO_EINPROGRESS: return -EINPROGRESS; } return err; } void dlm_message_out(struct dlm_message *ms) { header_out(&ms->m_header); ms->m_type = cpu_to_le32(ms->m_type); ms->m_nodeid = cpu_to_le32(ms->m_nodeid); ms->m_pid = cpu_to_le32(ms->m_pid); ms->m_lkid = cpu_to_le32(ms->m_lkid); ms->m_remid = cpu_to_le32(ms->m_remid); ms->m_parent_lkid = cpu_to_le32(ms->m_parent_lkid); ms->m_parent_remid = cpu_to_le32(ms->m_parent_remid); ms->m_exflags = cpu_to_le32(ms->m_exflags); ms->m_sbflags = cpu_to_le32(ms->m_sbflags); ms->m_flags = cpu_to_le32(ms->m_flags); ms->m_lvbseq = cpu_to_le32(ms->m_lvbseq); ms->m_hash = cpu_to_le32(ms->m_hash); ms->m_status = cpu_to_le32(ms->m_status); ms->m_grmode = cpu_to_le32(ms->m_grmode); ms->m_rqmode = cpu_to_le32(ms->m_rqmode); ms->m_bastmode = cpu_to_le32(ms->m_bastmode); ms->m_asts = cpu_to_le32(ms->m_asts); ms->m_result = cpu_to_le32(to_dlm_errno(ms->m_result)); } void dlm_message_in(struct dlm_message *ms) { header_in(&ms->m_header); ms->m_type = le32_to_cpu(ms->m_type); ms->m_nodeid = le32_to_cpu(ms->m_nodeid); ms->m_pid = le32_to_cpu(ms->m_pid); ms->m_lkid = le32_to_cpu(ms->m_lkid); ms->m_remid = le32_to_cpu(ms->m_remid); ms->m_parent_lkid = le32_to_cpu(ms->m_parent_lkid); ms->m_parent_remid = le32_to_cpu(ms->m_parent_remid); ms->m_exflags = le32_to_cpu(ms->m_exflags); ms->m_sbflags = le32_to_cpu(ms->m_sbflags); ms->m_flags = le32_to_cpu(ms->m_flags); ms->m_lvbseq = le32_to_cpu(ms->m_lvbseq); ms->m_hash = le32_to_cpu(ms->m_hash); ms->m_status = le32_to_cpu(ms->m_status); ms->m_grmode = le32_to_cpu(ms->m_grmode); ms->m_rqmode = le32_to_cpu(ms->m_rqmode); ms->m_bastmode = le32_to_cpu(ms->m_bastmode); ms->m_asts = le32_to_cpu(ms->m_asts); ms->m_result = from_dlm_errno(le32_to_cpu(ms->m_result)); } void dlm_rcom_out(struct dlm_rcom *rc) { header_out(&rc->rc_header); rc->rc_type = cpu_to_le32(rc->rc_type); rc->rc_result = cpu_to_le32(rc->rc_result); rc->rc_id = cpu_to_le64(rc->rc_id); rc->rc_seq = cpu_to_le64(rc->rc_seq); rc->rc_seq_reply = cpu_to_le64(rc->rc_seq_reply); } void dlm_rcom_in(struct dlm_rcom *rc) { header_in(&rc->rc_header); rc->rc_type = le32_to_cpu(rc->rc_type); rc->rc_result = le32_to_cpu(rc->rc_result); rc->rc_id = le64_to_cpu(rc->rc_id); rc->rc_seq = le64_to_cpu(rc->rc_seq); rc->rc_seq_reply = le64_to_cpu(rc->rc_seq_reply); }
gpl-2.0
metan-ucw/ltp
testcases/kernel/security/tomoyo/tomoyo_new_test.c
28
22313
/******************************************************************************/ /* This program is free software; you can redistribute it and/or modify */ /* it under the terms of the GNU General Public License as published by */ /* the Free Software Foundation; either version 2 of the License, or */ /* (at your option) any later version. */ /* */ /* This program is distributed in the hope that it will be useful, */ /* but WITHOUT ANY WARRANTY; without even the implied warranty of */ /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See */ /* the GNU General Public License for more details. */ /* */ /* You should have received a copy of the GNU General Public License */ /* along with this program; if not, write to the Free Software */ /* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /* */ /******************************************************************************/ /* * tomoyo_new_test.c * * Testing program for security/tomoyo/ * * Copyright (C) 2005-2010 NTT DATA CORPORATION */ #include "include.h" static int result; static int error; static void show_result(const char *test, int should_success) { error = errno; printf("%s : ", test); if (should_success) { if (error == 0) printf("OK (%d)\n", result); else printf("FAILED: %s\n", strerror(error)); } else { if (error == 0) printf("BUG: Didn't fail (%d)\n", result); else if (error == EPERM) printf("OK: permission denied\n"); else printf("FAILED: %s\n", strerror(error)); } } static void test_read_etc_fstab(void) { result = open("/etc/fstab", O_RDONLY); } static void test_write_dev_null(void) { result = open("/dev/null", O_WRONLY); } static void cleanup_file_open(void) { if (result != EOF) close(result); } static void test_mkdir_testdir(void) { result = mkdir("/tmp/testdir", 0755); } static void cleanup_mkdir_testdir(void) { rmdir("/tmp/testdir"); } static void setup_mkdir_testdir(void) { mkdir("/tmp/testdir", 0755); } static void test_rmdir_testdir(void) { result = rmdir("/tmp/testdir"); } static void setup_execute_bin_true(void) { fprintf(domain_fp, "%s /bin/true\n", self_domain); fprintf(domain_fp, "use_profile 0\n"); fprintf(domain_fp, "select pid=%u\n", pid); } static void cleanup_execute_bin_true(void) { wait(NULL); fprintf(domain_fp, "delete %s /bin/true\n", self_domain); fprintf(domain_fp, "select pid=%u\n", pid); } static void test_execute_bin_true(void) { char *argv[] = { "/bin/true", NULL }; char *envp[] = { "HOME=/", NULL }; int pipe_fd[2] = { EOF, EOF }; if (pipe(pipe_fd) == -1) err(1, "pipe"); switch (fork()) { case 0: execve("/bin/true", argv, envp); error = errno; if (write(pipe_fd[1], &error, sizeof(error)) == -1) err(1, "write"); _exit(0); break; case -1: error = ENOMEM; break; } close(pipe_fd[1]); (void)read(pipe_fd[0], &error, sizeof(error)); close(pipe_fd[0]); result = error ? EOF : 0; errno = error; } static void test_chmod_dev_null(void) { result = chmod("/dev/null", 0666); } static void test_chown_dev_null(void) { result = chown("/dev/null", 0, -1); } static void test_chgrp_dev_null(void) { result = chown("/dev/null", -1, 0); } static void test_ioctl_dev_null(void) { int fd = open("/dev/null", O_RDWR); errno = 0; result = ioctl(fd, 0x5451, NULL); error = errno; close(fd); errno = error; } static void setup_chmod_group(void) { write_exception_policy("path_group CHMOD_TARGET /dev/null", 0); write_exception_policy("number_group CHMOD_MODES 0666", 0); } static void cleanup_chmod_group(void) { write_exception_policy("path_group CHMOD_TARGET /dev/null", 1); write_exception_policy("number_group CHMOD_MODES 0666", 1); } static void setup_chown_group(void) { write_exception_policy("path_group CHOWN_TARGET /dev/\\*", 0); write_exception_policy("number_group CHOWN_IDS 0x0-0xFFFE", 0); } static void cleanup_chown_group(void) { write_exception_policy("path_group CHOWN_TARGET /dev/\\*", 1); write_exception_policy("number_group CHOWN_IDS 0x0-0xFFFE", 1); } static void setup_ioctl_group(void) { write_exception_policy("path_group IOCTL_TARGET /dev/\\*", 0); write_exception_policy("number_group IOCTL_NUMBERS 0x5450-0x5452", 0); } static void cleanup_ioctl_group(void) { write_exception_policy("path_group IOCTL_TARGET /dev/\\*", 1); write_exception_policy("number_group IOCTL_NUMBERS 0x5450-0x5452", 1); } static void setup_open_group(void) { write_exception_policy("path_group READABLE /etc/\\*", 0); write_exception_policy("number_group READABLE_IDS 0-0xFFF", 0); } static void cleanup_open_group(void) { cleanup_file_open(); write_exception_policy("path_group READABLE /etc/\\*", 1); write_exception_policy("number_group READABLE_IDS 0-0xFFF", 1); } static void test_file_open_0(void) { result = open("/tmp/testfile0", O_RDONLY, 0600); } static void test_file_open_1(void) { result = open("/tmp/testfile1", O_CREAT | O_RDONLY, 0600); } static void test_file_open_2(void) { result = open("/tmp/testfile2", O_TRUNC | O_RDONLY, 0600); } static void test_file_open_3(void) { result = open("/tmp/testfile3", O_TRUNC | O_CREAT | O_RDONLY, 0600); } static void test_file_open_4(void) { result = open("/tmp/testfile4", O_APPEND | O_RDONLY, 0600); } static void test_file_open_5(void) { result = open("/tmp/testfile5", O_APPEND | O_CREAT | O_RDONLY, 0600); } static void test_file_open_6(void) { result = open("/tmp/testfile6", O_APPEND | O_TRUNC | O_RDONLY, 0600); } static void test_file_open_7(void) { result = open("/tmp/testfile7", O_APPEND | O_TRUNC | O_CREAT | O_RDONLY, 0600); } static void test_file_open_8(void) { result = open("/tmp/testfile8", O_WRONLY, 0600); } static void test_file_open_9(void) { result = open("/tmp/testfile9", O_CREAT | O_WRONLY, 0600); } static void test_file_open_10(void) { result = open("/tmp/testfile10", O_TRUNC | O_WRONLY, 0600); } static void test_file_open_11(void) { result = open("/tmp/testfile11", O_TRUNC | O_CREAT | O_WRONLY, 0600); } static void test_file_open_12(void) { result = open("/tmp/testfile12", O_APPEND | O_WRONLY, 0600); } static void test_file_open_13(void) { result = open("/tmp/testfile13", O_APPEND | O_CREAT | O_WRONLY, 0600); } static void test_file_open_14(void) { result = open("/tmp/testfile14", O_APPEND | O_TRUNC | O_WRONLY, 0600); } static void test_file_open_15(void) { result = open("/tmp/testfile15", O_APPEND | O_TRUNC | O_CREAT | O_WRONLY, 0600); } static void test_file_open_16(void) { result = open("/tmp/testfile16", O_RDWR, 0600); } static void test_file_open_17(void) { result = open("/tmp/testfile17", O_CREAT | O_RDWR, 0600); } static void test_file_open_18(void) { result = open("/tmp/testfile18", O_TRUNC | O_RDWR, 0600); } static void test_file_open_19(void) { result = open("/tmp/testfile19", O_TRUNC | O_CREAT | O_RDWR, 0600); } static void test_file_open_20(void) { result = open("/tmp/testfile20", O_APPEND | O_RDWR, 0600); } static void test_file_open_21(void) { result = open("/tmp/testfile21", O_APPEND | O_CREAT | O_RDWR, 0600); } static void test_file_open_22(void) { result = open("/tmp/testfile22", O_APPEND | O_TRUNC | O_RDWR, 0600); } static void test_file_open_23(void) { result = open("/tmp/testfile23", O_APPEND | O_TRUNC | O_CREAT | O_RDWR, 0600); } static void setup_test_file(void) { int i; char buffer[32]; buffer[31] = '\0'; for (i = 0; i < 24; i += 2) { snprintf(buffer, sizeof(buffer) - 1, "/tmp/testfile%u", i); close(open(buffer, O_WRONLY | O_CREAT, 0600)); } write_exception_policy("deny_rewrite /tmp/testfile\\$", 0); } static void setup_test_file_truncate(void) { setup_test_file(); write_domain_policy("allow_truncate /tmp/testfile\\$", 0); set_profile(3, "file::truncate"); } static void setup_all_test_file(void) { int i; char buffer[32]; buffer[31] = '\0'; for (i = 0; i < 24; i++) { snprintf(buffer, sizeof(buffer) - 1, "/tmp/testfile%u", i); close(open(buffer, O_WRONLY | O_CREAT, 0600)); } write_exception_policy("deny_rewrite /tmp/testfile\\$", 0); } static void setup_all_test_file_truncate(void) { setup_all_test_file(); write_domain_policy("allow_truncate /tmp/testfile\\$", 0); set_profile(3, "file::truncate"); } static void cleanup_test_file(void) { int i; char buffer[32]; buffer[31] = '\0'; for (i = 0; i < 24; i++) { snprintf(buffer, sizeof(buffer) - 1, "/tmp/testfile%u", i); unlink(buffer); } write_exception_policy("deny_rewrite /tmp/testfile\\$", 1); cleanup_file_open(); } static void cleanup_test_file_truncate(void) { cleanup_test_file(); write_domain_policy("allow_truncate /tmp/testfile\\$", 1); set_profile(0, "file::truncate"); } static struct test_struct { void (*do_setup) (void); void (*do_test) (void); void (*do_cleanup) (void); const char *name; const char *policy; } tests[] = { { NULL, test_read_etc_fstab, cleanup_file_open, "file::open", "allow_read /etc/fstab"}, { NULL, test_read_etc_fstab, cleanup_file_open, "file::open", "allow_read /etc/fstab"}, { NULL, test_read_etc_fstab, cleanup_file_open, "file::open", "allow_read /etc/fstab"}, { setup_open_group, test_read_etc_fstab, cleanup_open_group, "file::open", "allow_read @READABLE"}, { NULL, test_write_dev_null, cleanup_file_open, "file::open", "allow_write /dev/null"}, { NULL, test_write_dev_null, cleanup_file_open, "file::open", "allow_write /dev/null"}, { NULL, test_write_dev_null, cleanup_file_open, "file::open", "allow_write /dev/null"}, { cleanup_mkdir_testdir, test_mkdir_testdir, cleanup_mkdir_testdir, "file::mkdir", "allow_mkdir /tmp/testdir/ 0755"}, { cleanup_mkdir_testdir, test_mkdir_testdir, cleanup_mkdir_testdir, "file::mkdir", "allow_mkdir /tmp/testdir/ 0755"}, { cleanup_mkdir_testdir, test_mkdir_testdir, cleanup_mkdir_testdir, "file::mkdir", "allow_mkdir /tmp/testdir/ 0755"}, { setup_mkdir_testdir, test_rmdir_testdir, cleanup_mkdir_testdir, "file::rmdir", "allow_rmdir /tmp/testdir/"}, { setup_mkdir_testdir, test_rmdir_testdir, cleanup_mkdir_testdir, "file::rmdir", "allow_rmdir /tmp/testdir/"}, { setup_mkdir_testdir, test_rmdir_testdir, cleanup_mkdir_testdir, "file::rmdir", "allow_rmdir /tmp/testdir/"}, { setup_execute_bin_true, test_execute_bin_true, cleanup_execute_bin_true, "file::execute", "allow_execute /bin/true"}, { setup_execute_bin_true, test_execute_bin_true, cleanup_execute_bin_true, "file::execute", "allow_execute /bin/true"}, { setup_execute_bin_true, test_execute_bin_true, cleanup_execute_bin_true, "file::execute", "allow_execute /bin/true"}, { NULL, test_chmod_dev_null, NULL, "file::chmod", "allow_chmod /dev/null 0666"}, { NULL, test_chown_dev_null, NULL, "file::chown", "allow_chown /dev/null 0"}, { NULL, test_chgrp_dev_null, NULL, "file::chgrp", "allow_chgrp /dev/null 0"}, { NULL, test_ioctl_dev_null, NULL, "file::ioctl", "allow_ioctl /dev/null 0x5451"}, { setup_chmod_group, test_chmod_dev_null, cleanup_chmod_group, "file::chmod", "allow_chmod @CHMOD_TARGET @CHMOD_MODES"}, { setup_chown_group, test_chown_dev_null, cleanup_chown_group, "file::chown", "allow_chown @CHOWN_TARGET @CHOWN_IDS"}, { setup_chown_group, test_chgrp_dev_null, cleanup_chown_group, "file::chgrp", "allow_chgrp @CHOWN_TARGET @CHOWN_IDS"}, { setup_ioctl_group, test_ioctl_dev_null, cleanup_ioctl_group, "file::ioctl", "allow_ioctl @IOCTL_TARGET @IOCTL_NUMBERS"}, { setup_test_file, test_file_open_0, cleanup_test_file, "file::open", "allow_read /tmp/testfile0"}, { setup_test_file, test_file_open_1, cleanup_test_file, "file::open", "allow_read /tmp/testfile1"}, { setup_test_file, test_file_open_1, cleanup_test_file, "file::create", "allow_create /tmp/testfile1 0600"}, { setup_test_file, test_file_open_2, cleanup_test_file, "file::open", "allow_read /tmp/testfile2"}, { setup_test_file, test_file_open_2, cleanup_test_file, "file::truncate", "allow_truncate /tmp/testfile2"}, { setup_test_file_truncate, test_file_open_2, cleanup_test_file_truncate, "file::rewrite", "allow_rewrite /tmp/testfile2"}, { setup_test_file, test_file_open_3, cleanup_test_file, "file::open", "allow_read /tmp/testfile3"}, { setup_test_file, test_file_open_3, cleanup_test_file, "file::create", "allow_create /tmp/testfile3 0600"}, { setup_test_file, test_file_open_4, cleanup_test_file, "file::open", "allow_read /tmp/testfile4"}, { setup_test_file, test_file_open_5, cleanup_test_file, "file::open", "allow_read /tmp/testfile5"}, { setup_test_file, test_file_open_5, cleanup_test_file, "file::create", "allow_create /tmp/testfile5 0600"}, { setup_test_file, test_file_open_6, cleanup_test_file, "file::open", "allow_read /tmp/testfile6"}, { setup_test_file, test_file_open_6, cleanup_test_file, "file::truncate", "allow_truncate /tmp/testfile6"}, { setup_test_file_truncate, test_file_open_6, cleanup_test_file_truncate, "file::rewrite", "allow_rewrite /tmp/testfile6"}, { setup_test_file, test_file_open_7, cleanup_test_file, "file::open", "allow_read /tmp/testfile7"}, { setup_test_file, test_file_open_7, cleanup_test_file, "file::create", "allow_create /tmp/testfile7 0600"}, { setup_test_file, test_file_open_8, cleanup_test_file, "file::open", "allow_write /tmp/testfile8"}, { setup_test_file, test_file_open_8, cleanup_test_file, "file::rewrite", "allow_rewrite /tmp/testfile8"}, { setup_test_file, test_file_open_9, cleanup_test_file, "file::open", "allow_write /tmp/testfile9"}, { setup_test_file, test_file_open_9, cleanup_test_file, "file::create", "allow_create /tmp/testfile9 0600"}, { setup_test_file, test_file_open_9, cleanup_test_file, "file::rewrite", "allow_rewrite /tmp/testfile9"}, { setup_test_file, test_file_open_10, cleanup_test_file, "file::open", "allow_write /tmp/testfile10"}, { setup_test_file, test_file_open_10, cleanup_test_file, "file::truncate", "allow_truncate /tmp/testfile10"}, { setup_test_file, test_file_open_10, cleanup_test_file, "file::rewrite", "allow_rewrite /tmp/testfile10"}, { setup_test_file, test_file_open_11, cleanup_test_file, "file::open", "allow_write /tmp/testfile11"}, { setup_test_file, test_file_open_11, cleanup_test_file, "file::create", "allow_create /tmp/testfile11 0600"}, { setup_test_file, test_file_open_11, cleanup_test_file, "file::rewrite", "allow_rewrite /tmp/testfile11"}, { setup_test_file, test_file_open_12, cleanup_test_file, "file::open", "allow_write /tmp/testfile12"}, { setup_test_file, test_file_open_13, cleanup_test_file, "file::open", "allow_write /tmp/testfile13"}, { setup_test_file, test_file_open_13, cleanup_test_file, "file::create", "allow_create /tmp/testfile13 0600"}, { setup_test_file, test_file_open_14, cleanup_test_file, "file::open", "allow_write /tmp/testfile14"}, { setup_test_file, test_file_open_14, cleanup_test_file, "file::truncate", "allow_truncate /tmp/testfile14"}, { setup_test_file_truncate, test_file_open_14, cleanup_test_file_truncate, "file::rewrite", "allow_rewrite /tmp/testfile14"}, { setup_test_file, test_file_open_15, cleanup_test_file, "file::open", "allow_write /tmp/testfile15"}, { setup_test_file, test_file_open_15, cleanup_test_file, "file::create", "allow_create /tmp/testfile15 0600"}, { setup_test_file, test_file_open_16, cleanup_test_file, "file::open", "allow_read/write /tmp/testfile16"}, { setup_test_file, test_file_open_16, cleanup_test_file, "file::rewrite", "allow_rewrite /tmp/testfile16"}, { setup_test_file, test_file_open_17, cleanup_test_file, "file::open", "allow_read/write /tmp/testfile17"}, { setup_test_file, test_file_open_17, cleanup_test_file, "file::create", "allow_create /tmp/testfile17 0600"}, { setup_test_file, test_file_open_17, cleanup_test_file, "file::rewrite", "allow_rewrite /tmp/testfile17"}, { setup_test_file, test_file_open_18, cleanup_test_file, "file::open", "allow_read/write /tmp/testfile18"}, { setup_test_file, test_file_open_18, cleanup_test_file, "file::truncate", "allow_truncate /tmp/testfile18"}, { setup_test_file, test_file_open_18, cleanup_test_file, "file::rewrite", "allow_rewrite /tmp/testfile18"}, { setup_test_file, test_file_open_19, cleanup_test_file, "file::open", "allow_read/write /tmp/testfile19"}, { setup_test_file, test_file_open_19, cleanup_test_file, "file::create", "allow_create /tmp/testfile19 0600"}, { setup_test_file, test_file_open_19, cleanup_test_file, "file::rewrite", "allow_rewrite /tmp/testfile19"}, { setup_test_file, test_file_open_20, cleanup_test_file, "file::open", "allow_read/write /tmp/testfile20"}, { setup_test_file, test_file_open_21, cleanup_test_file, "file::open", "allow_read/write /tmp/testfile21"}, { setup_test_file, test_file_open_21, cleanup_test_file, "file::create", "allow_create /tmp/testfile21 0600"}, { setup_test_file, test_file_open_22, cleanup_test_file, "file::open", "allow_read/write /tmp/testfile22"}, { setup_test_file, test_file_open_22, cleanup_test_file, "file::truncate", "allow_truncate /tmp/testfile22"}, { setup_test_file_truncate, test_file_open_22, cleanup_test_file_truncate, "file::rewrite", "allow_rewrite /tmp/testfile22"}, { setup_test_file, test_file_open_23, cleanup_test_file, "file::open", "allow_read/write /tmp/testfile23"}, { setup_test_file, test_file_open_23, cleanup_test_file, "file::create", "allow_create /tmp/testfile23 0600"}, { setup_all_test_file, test_file_open_0, cleanup_test_file, "file::open", "allow_read /tmp/testfile0"}, { setup_all_test_file, test_file_open_2, cleanup_test_file, "file::open", "allow_read /tmp/testfile2"}, { setup_all_test_file, test_file_open_2, cleanup_test_file, "file::truncate", "allow_truncate /tmp/testfile2"}, { setup_all_test_file_truncate, test_file_open_2, cleanup_test_file_truncate, "file::rewrite", "allow_rewrite /tmp/testfile2"}, { setup_all_test_file, test_file_open_4, cleanup_test_file, "file::open", "allow_read /tmp/testfile4"}, { setup_all_test_file, test_file_open_6, cleanup_test_file, "file::open", "allow_read /tmp/testfile6"}, { setup_all_test_file, test_file_open_6, cleanup_test_file, "file::truncate", "allow_truncate /tmp/testfile6"}, { setup_all_test_file_truncate, test_file_open_6, cleanup_test_file_truncate, "file::rewrite", "allow_rewrite /tmp/testfile6"}, { setup_all_test_file, test_file_open_8, cleanup_test_file, "file::open", "allow_write /tmp/testfile8"}, { setup_all_test_file, test_file_open_8, cleanup_test_file, "file::rewrite", "allow_rewrite /tmp/testfile8"}, { setup_all_test_file, test_file_open_10, cleanup_test_file, "file::open", "allow_write /tmp/testfile10"}, { setup_all_test_file, test_file_open_10, cleanup_test_file, "file::truncate", "allow_truncate /tmp/testfile10"}, { setup_all_test_file, test_file_open_10, cleanup_test_file, "file::rewrite", "allow_rewrite /tmp/testfile10"}, { setup_all_test_file, test_file_open_12, cleanup_test_file, "file::open", "allow_write /tmp/testfile12"}, { setup_all_test_file, test_file_open_14, cleanup_test_file, "file::open", "allow_write /tmp/testfile14"}, { setup_all_test_file, test_file_open_14, cleanup_test_file, "file::truncate", "allow_truncate /tmp/testfile14"}, { setup_all_test_file_truncate, test_file_open_14, cleanup_test_file_truncate, "file::rewrite", "allow_rewrite /tmp/testfile14"}, { setup_all_test_file, test_file_open_16, cleanup_test_file, "file::open", "allow_read/write /tmp/testfile16"}, { setup_all_test_file, test_file_open_16, cleanup_test_file, "file::rewrite", "allow_rewrite /tmp/testfile16"}, { setup_all_test_file, test_file_open_18, cleanup_test_file, "file::open", "allow_read/write /tmp/testfile18"}, { setup_all_test_file, test_file_open_18, cleanup_test_file, "file::truncate", "allow_truncate /tmp/testfile18"}, { setup_all_test_file, test_file_open_18, cleanup_test_file, "file::rewrite", "allow_rewrite /tmp/testfile18"}, { setup_all_test_file, test_file_open_20, cleanup_test_file, "file::open", "allow_read/write /tmp/testfile20"}, { setup_all_test_file, test_file_open_22, cleanup_test_file, "file::open", "allow_read/write /tmp/testfile22"}, { setup_all_test_file, test_file_open_22, cleanup_test_file, "file::truncate", "allow_truncate /tmp/testfile22"}, { setup_all_test_file_truncate, test_file_open_22, cleanup_test_file_truncate, "file::rewrite", "allow_rewrite /tmp/testfile22"}, { NULL} }; int main(int argc, char *argv[]) { int i; tomoyo_test_init(); for (i = 0; tests[i].do_test; i++) { int trial; for (trial = 0; trial < 2; trial++) { int should_fail; for (should_fail = 0; should_fail < 2; should_fail++) { if (tests[i].do_setup) tests[i].do_setup(); if (!should_fail) write_domain_policy(tests[i].policy, 0); set_profile(3, tests[i].name); tests[i].do_test(); show_result(tests[i].policy, !should_fail); set_profile(0, tests[i].name); if (tests[i].do_cleanup) tests[i].do_cleanup(); if (!should_fail) write_domain_policy(tests[i].policy, 1); } } } for (i = 0; tests[i].do_test; i++) { int mode; for (mode = 0; mode < 4; mode++) { if (tests[i].do_setup) tests[i].do_setup(); set_profile(mode, tests[i].name); tests[i].do_test(); show_result(tests[i].name, 1); set_profile(0, tests[i].name); if (tests[i].do_cleanup) tests[i].do_cleanup(); } } fprintf(domain_fp, "delete %s\n", self_domain); return 0; }
gpl-2.0
qilongyun/ltp
testcases/open_posix_testsuite/conformance/interfaces/pthread_exit/4-1.c
28
6216
/* * Copyright (c) 2004, Bull S.A.. All rights reserved. * Created by: Sebastien Decugis * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * This sample test aims to check the following assertion: * * No atexit() registered routine shall be called because of pthread_exit(). * The steps are: * * -> Create threads with different attributes (but all must be joinable) * -> inside the thread, * -> register a function with atexit() * -> call pthread_exit. * -> In the main thread, we join the thread and check the function did not execute. */ /* We are testing conformance to IEEE Std 1003.1, 2003 Edition */ #define _POSIX_C_SOURCE 200112L /* Some routines are part of the XSI Extensions */ #ifndef WITHOUT_XOPEN #define _XOPEN_SOURCE 600 #endif /********************************************************************************************/ /****************************** standard includes *****************************************/ /********************************************************************************************/ #include <pthread.h> #include <stdarg.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include <sched.h> #include <semaphore.h> #include <errno.h> #include <assert.h> /********************************************************************************************/ /****************************** Test framework *****************************************/ /********************************************************************************************/ #include "../testfrmw/testfrmw.h" #include "../testfrmw/testfrmw.c" /* This header is responsible for defining the following macros: * UNRESOLVED(ret, descr); * where descr is a description of the error and ret is an int (error code for example) * FAILED(descr); * where descr is a short text saying why the test has failed. * PASSED(); * No parameter. * * Both three macros shall terminate the calling process. * The testcase shall not terminate in any other maneer. * * The other file defines the functions * void output_init() * void output(char * string, ...) * * Those may be used to output information. */ /********************************************************************************************/ /********************************** Configuration ******************************************/ /********************************************************************************************/ #ifndef VERBOSE #define VERBOSE 1 #endif /********************************************************************************************/ /*********************************** Test cases *****************************************/ /********************************************************************************************/ #include "../testfrmw/threads_scenarii.c" /* This file will define the following objects: * scenarii: array of struct __scenario type. * NSCENAR : macro giving the total # of scenarii * scenar_init(): function to call before use the scenarii array. * scenar_fini(): function to call after end of use of the scenarii array. */ /********************************************************************************************/ /*********************************** Real Test *****************************************/ /********************************************************************************************/ int global = 0; /* atexit() routines */ void at1(void) { global +=1; } void at2(void) { global +=2; } /* Thread routine */ void *threaded(void *arg) { int ret = 0; /* Note that this funtion will be registered once again for each scenario. POSIX requires the ability to register at least 32 functions so it should not be an issue in our case, as long as we don't get more than 32 scenarii (with joinable threads) */ ret = atexit(at2); if (ret != 0) { UNRESOLVED(ret, "Failed to register an atexit() routine"); } pthread_exit(NULL + 1); FAILED("pthread_exit() did not terminate the thread"); return NULL; } int main(void) { int ret = 0; void *rval; pthread_t child; int i; output_init(); scenar_init(); for (i = 0; i < NSCENAR; i++) { if (scenarii[i].detached == 0) { #if VERBOSE > 0 output("-----\n"); output("Starting test with scenario (%i): %s\n", i, scenarii[i].descr); #endif ret = pthread_create(&child, &scenarii[i].ta, threaded, NULL); switch (scenarii[i].result) { case 0: /* Operation was expected to succeed */ if (ret != 0) { UNRESOLVED(ret, "Failed to create this thread"); } break; case 1: /* Operation was expected to fail */ if (ret == 0) { UNRESOLVED(-1, "An error was expected but the thread creation succeeded"); } break; case 2: /* We did not know the expected result */ default: #if VERBOSE > 0 if (ret == 0) { output ("Thread has been created successfully for this scenario\n"); } else { output ("Thread creation failed with the error: %s\n", strerror(ret)); } #endif } if (ret == 0) { /* The new thread is running */ ret = pthread_join(child, &rval); if (ret != 0) { UNRESOLVED(ret, "Unable to join a thread"); } if (rval != (NULL + 1)) { FAILED ("pthread_join() did not retrieve the pthread_exit() param"); } if (global !=0) { FAILED ("The function registered with atexit() executed"); } } } } scenar_fini(); #if VERBOSE > 0 output("-----\n"); output("All test data destroyed\n"); output("Test PASSED\n"); #endif PASSED; }
gpl-2.0
treetrees/PandariaEmu-5.3.0
dep/acelite/ace/OS_NS_stropts.cpp
540
6562
// $Id: OS_NS_stropts.cpp 91286 2010-08-05 09:04:31Z johnnyw $ #include "ace/OS_NS_stropts.h" #if !defined (ACE_HAS_INLINED_OSCALLS) # include "ace/OS_NS_stropts.inl" #endif /* ACE_HAS_INLINED_OSCALLS */ ACE_BEGIN_VERSIONED_NAMESPACE_DECL int ACE_OS::ioctl (ACE_HANDLE socket, unsigned long io_control_code, void *in_buffer_p, unsigned long in_buffer, void *out_buffer_p, unsigned long out_buffer, unsigned long *bytes_returned, ACE_OVERLAPPED *overlapped, ACE_OVERLAPPED_COMPLETION_FUNC func) { # if defined (ACE_HAS_WINSOCK2) && (ACE_HAS_WINSOCK2 != 0) ACE_SOCKCALL_RETURN (::WSAIoctl ((ACE_SOCKET) socket, io_control_code, in_buffer_p, in_buffer, out_buffer_p, out_buffer, bytes_returned, (WSAOVERLAPPED *) overlapped, func), int, SOCKET_ERROR); # else ACE_UNUSED_ARG (socket); ACE_UNUSED_ARG (io_control_code); ACE_UNUSED_ARG (in_buffer_p); ACE_UNUSED_ARG (in_buffer); ACE_UNUSED_ARG (out_buffer_p); ACE_UNUSED_ARG (out_buffer); ACE_UNUSED_ARG (bytes_returned); ACE_UNUSED_ARG (overlapped); ACE_UNUSED_ARG (func); ACE_NOTSUP_RETURN (-1); # endif /* ACE_HAS_WINSOCK2 */ } int ACE_OS::ioctl (ACE_HANDLE socket, unsigned long io_control_code, ACE_QoS &ace_qos, unsigned long *bytes_returned, void *buffer_p, unsigned long buffer, ACE_OVERLAPPED *overlapped, ACE_OVERLAPPED_COMPLETION_FUNC func) { # if defined (ACE_HAS_WINSOCK2) && (ACE_HAS_WINSOCK2 != 0) QOS qos; unsigned long qos_len = sizeof (QOS); if (io_control_code == SIO_SET_QOS) { qos.SendingFlowspec = *(ace_qos.sending_flowspec ()); qos.ReceivingFlowspec = *(ace_qos.receiving_flowspec ()); qos.ProviderSpecific = (WSABUF) ace_qos.provider_specific (); qos_len += ace_qos.provider_specific ().iov_len; ACE_SOCKCALL_RETURN (::WSAIoctl ((ACE_SOCKET) socket, io_control_code, &qos, qos_len, buffer_p, buffer, bytes_returned, (WSAOVERLAPPED *) overlapped, func), int, SOCKET_ERROR); } else { unsigned long dwBufferLen = 0; // Query for the buffer size. int result = ::WSAIoctl ((ACE_SOCKET) socket, io_control_code, 0, 0, &dwBufferLen, sizeof (dwBufferLen), bytes_returned, 0, 0); if (result == SOCKET_ERROR) { unsigned long dwErr = ::WSAGetLastError (); if (dwErr == WSAEWOULDBLOCK) { errno = dwErr; return -1; } else if (dwErr != WSAENOBUFS) { errno = dwErr; return -1; } } char *qos_buf = 0; ACE_NEW_RETURN (qos_buf, char [dwBufferLen], -1); QOS *qos = reinterpret_cast<QOS*> (qos_buf); result = ::WSAIoctl ((ACE_SOCKET) socket, io_control_code, 0, 0, qos, dwBufferLen, bytes_returned, 0, 0); if (result == SOCKET_ERROR) return result; ACE_Flow_Spec sending_flowspec (qos->SendingFlowspec.TokenRate, qos->SendingFlowspec.TokenBucketSize, qos->SendingFlowspec.PeakBandwidth, qos->SendingFlowspec.Latency, qos->SendingFlowspec.DelayVariation, # if defined(ACE_HAS_WINSOCK2_GQOS) qos->SendingFlowspec.ServiceType, qos->SendingFlowspec.MaxSduSize, qos->SendingFlowspec.MinimumPolicedSize, # else /* ACE_HAS_WINSOCK2_GQOS */ 0, 0, 0, # endif /* ACE_HAS_WINSOCK2_GQOS */ 0, 0); ACE_Flow_Spec receiving_flowspec (qos->ReceivingFlowspec.TokenRate, qos->ReceivingFlowspec.TokenBucketSize, qos->ReceivingFlowspec.PeakBandwidth, qos->ReceivingFlowspec.Latency, qos->ReceivingFlowspec.DelayVariation, # if defined(ACE_HAS_WINSOCK2_GQOS) qos->ReceivingFlowspec.ServiceType, qos->ReceivingFlowspec.MaxSduSize, qos->ReceivingFlowspec.MinimumPolicedSize, # else /* ACE_HAS_WINSOCK2_GQOS */ 0, 0, 0, # endif /* ACE_HAS_WINSOCK2_GQOS */ 0, 0); ace_qos.sending_flowspec (&sending_flowspec); ace_qos.receiving_flowspec (&receiving_flowspec); ace_qos.provider_specific (*((struct iovec *) (&qos->ProviderSpecific))); return result; } # else ACE_UNUSED_ARG (socket); ACE_UNUSED_ARG (io_control_code); ACE_UNUSED_ARG (ace_qos); ACE_UNUSED_ARG (bytes_returned); ACE_UNUSED_ARG (buffer_p); ACE_UNUSED_ARG (buffer); ACE_UNUSED_ARG (overlapped); ACE_UNUSED_ARG (func); ACE_NOTSUP_RETURN (-1); # endif /* ACE_HAS_WINSOCK2 */ } ACE_END_VERSIONED_NAMESPACE_DECL
gpl-2.0
JoseDuque/linux.4.1.7
arch/arm/mm/pgd.c
540
3494
/* * linux/arch/arm/mm/pgd.c * * Copyright (C) 1998-2005 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/mm.h> #include <linux/gfp.h> #include <linux/highmem.h> #include <linux/slab.h> #include <asm/cp15.h> #include <asm/pgalloc.h> #include <asm/page.h> #include <asm/tlbflush.h> #include "mm.h" #ifdef CONFIG_ARM_LPAE #define __pgd_alloc() kmalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL) #define __pgd_free(pgd) kfree(pgd) #else #define __pgd_alloc() (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_REPEAT, 2) #define __pgd_free(pgd) free_pages((unsigned long)pgd, 2) #endif /* * need to get a 16k page for level 1 */ pgd_t *pgd_alloc(struct mm_struct *mm) { pgd_t *new_pgd, *init_pgd; pud_t *new_pud, *init_pud; pmd_t *new_pmd, *init_pmd; pte_t *new_pte, *init_pte; new_pgd = __pgd_alloc(); if (!new_pgd) goto no_pgd; memset(new_pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); /* * Copy over the kernel and IO PGD entries */ init_pgd = pgd_offset_k(0); memcpy(new_pgd + USER_PTRS_PER_PGD, init_pgd + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); #ifdef CONFIG_ARM_LPAE /* * Allocate PMD table for modules and pkmap mappings. */ new_pud = pud_alloc(mm, new_pgd + pgd_index(MODULES_VADDR), MODULES_VADDR); if (!new_pud) goto no_pud; new_pmd = pmd_alloc(mm, new_pud, 0); if (!new_pmd) goto no_pmd; #endif if (!vectors_high()) { /* * On ARM, first page must always be allocated since it * contains the machine vectors. The vectors are always high * with LPAE. */ new_pud = pud_alloc(mm, new_pgd, 0); if (!new_pud) goto no_pud; new_pmd = pmd_alloc(mm, new_pud, 0); if (!new_pmd) goto no_pmd; new_pte = pte_alloc_map(mm, NULL, new_pmd, 0); if (!new_pte) goto no_pte; init_pud = pud_offset(init_pgd, 0); init_pmd = pmd_offset(init_pud, 0); init_pte = pte_offset_map(init_pmd, 0); set_pte_ext(new_pte + 0, init_pte[0], 0); set_pte_ext(new_pte + 1, init_pte[1], 0); pte_unmap(init_pte); pte_unmap(new_pte); } return new_pgd; no_pte: pmd_free(mm, new_pmd); mm_dec_nr_pmds(mm); no_pmd: pud_free(mm, new_pud); no_pud: __pgd_free(new_pgd); no_pgd: return NULL; } void pgd_free(struct mm_struct *mm, pgd_t *pgd_base) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pgtable_t pte; if (!pgd_base) return; pgd = pgd_base + pgd_index(0); if (pgd_none_or_clear_bad(pgd)) goto no_pgd; pud = pud_offset(pgd, 0); if (pud_none_or_clear_bad(pud)) goto no_pud; pmd = pmd_offset(pud, 0); if (pmd_none_or_clear_bad(pmd)) goto no_pmd; pte = pmd_pgtable(*pmd); pmd_clear(pmd); pte_free(mm, pte); atomic_long_dec(&mm->nr_ptes); no_pmd: pud_clear(pud); pmd_free(mm, pmd); mm_dec_nr_pmds(mm); no_pud: pgd_clear(pgd); pud_free(mm, pud); no_pgd: #ifdef CONFIG_ARM_LPAE /* * Free modules/pkmap or identity pmd tables. */ for (pgd = pgd_base; pgd < pgd_base + PTRS_PER_PGD; pgd++) { if (pgd_none_or_clear_bad(pgd)) continue; if (pgd_val(*pgd) & L_PGD_SWAPPER) continue; pud = pud_offset(pgd, 0); if (pud_none_or_clear_bad(pud)) continue; pmd = pmd_offset(pud, 0); pud_clear(pud); pmd_free(mm, pmd); mm_dec_nr_pmds(mm); pgd_clear(pgd); pud_free(mm, pud); } #endif __pgd_free(pgd_base); }
gpl-2.0
gauravdatir/linux
drivers/mtd/nand/xway_nand.c
540
5202
/* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. * * Copyright © 2012 John Crispin <blogic@openwrt.org> */ #include <linux/mtd/nand.h> #include <linux/of_gpio.h> #include <linux/of_platform.h> #include <lantiq_soc.h> /* nand registers */ #define EBU_ADDSEL1 0x24 #define EBU_NAND_CON 0xB0 #define EBU_NAND_WAIT 0xB4 #define EBU_NAND_ECC0 0xB8 #define EBU_NAND_ECC_AC 0xBC /* nand commands */ #define NAND_CMD_ALE (1 << 2) #define NAND_CMD_CLE (1 << 3) #define NAND_CMD_CS (1 << 4) #define NAND_WRITE_CMD_RESET 0xff #define NAND_WRITE_CMD (NAND_CMD_CS | NAND_CMD_CLE) #define NAND_WRITE_ADDR (NAND_CMD_CS | NAND_CMD_ALE) #define NAND_WRITE_DATA (NAND_CMD_CS) #define NAND_READ_DATA (NAND_CMD_CS) #define NAND_WAIT_WR_C (1 << 3) #define NAND_WAIT_RD (0x1) /* we need to tel the ebu which addr we mapped the nand to */ #define ADDSEL1_MASK(x) (x << 4) #define ADDSEL1_REGEN 1 /* we need to tell the EBU that we have nand attached and set it up properly */ #define BUSCON1_SETUP (1 << 22) #define BUSCON1_BCGEN_RES (0x3 << 12) #define BUSCON1_WAITWRC2 (2 << 8) #define BUSCON1_WAITRDC2 (2 << 6) #define BUSCON1_HOLDC1 (1 << 4) #define BUSCON1_RECOVC1 (1 << 2) #define BUSCON1_CMULT4 1 #define NAND_CON_CE (1 << 20) #define NAND_CON_OUT_CS1 (1 << 10) #define NAND_CON_IN_CS1 (1 << 8) #define NAND_CON_PRE_P (1 << 7) #define NAND_CON_WP_P (1 << 6) #define NAND_CON_SE_P (1 << 5) #define NAND_CON_CS_P (1 << 4) #define NAND_CON_CSMUX (1 << 1) #define NAND_CON_NANDM 1 static void xway_reset_chip(struct nand_chip *chip) { unsigned long nandaddr = (unsigned long) chip->IO_ADDR_W; unsigned long flags; nandaddr &= ~NAND_WRITE_ADDR; nandaddr |= NAND_WRITE_CMD; /* finish with a reset */ spin_lock_irqsave(&ebu_lock, flags); writeb(NAND_WRITE_CMD_RESET, (void __iomem *) nandaddr); while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0) ; spin_unlock_irqrestore(&ebu_lock, flags); } static void xway_select_chip(struct mtd_info *mtd, int chip) { switch (chip) { case -1: ltq_ebu_w32_mask(NAND_CON_CE, 0, EBU_NAND_CON); ltq_ebu_w32_mask(NAND_CON_NANDM, 0, EBU_NAND_CON); break; case 0: ltq_ebu_w32_mask(0, NAND_CON_NANDM, EBU_NAND_CON); ltq_ebu_w32_mask(0, NAND_CON_CE, EBU_NAND_CON); break; default: BUG(); } } static void xway_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) { struct nand_chip *this = mtd->priv; unsigned long nandaddr = (unsigned long) this->IO_ADDR_W; unsigned long flags; if (ctrl & NAND_CTRL_CHANGE) { nandaddr &= ~(NAND_WRITE_CMD | NAND_WRITE_ADDR); if (ctrl & NAND_CLE) nandaddr |= NAND_WRITE_CMD; else nandaddr |= NAND_WRITE_ADDR; this->IO_ADDR_W = (void __iomem *) nandaddr; } if (cmd != NAND_CMD_NONE) { spin_lock_irqsave(&ebu_lock, flags); writeb(cmd, this->IO_ADDR_W); while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0) ; spin_unlock_irqrestore(&ebu_lock, flags); } } static int xway_dev_ready(struct mtd_info *mtd) { return ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_RD; } static unsigned char xway_read_byte(struct mtd_info *mtd) { struct nand_chip *this = mtd->priv; unsigned long nandaddr = (unsigned long) this->IO_ADDR_R; unsigned long flags; int ret; spin_lock_irqsave(&ebu_lock, flags); ret = ltq_r8((void __iomem *)(nandaddr + NAND_READ_DATA)); spin_unlock_irqrestore(&ebu_lock, flags); return ret; } static int xway_nand_probe(struct platform_device *pdev) { struct nand_chip *this = platform_get_drvdata(pdev); unsigned long nandaddr = (unsigned long) this->IO_ADDR_W; const __be32 *cs = of_get_property(pdev->dev.of_node, "lantiq,cs", NULL); u32 cs_flag = 0; /* load our CS from the DT. Either we find a valid 1 or default to 0 */ if (cs && (*cs == 1)) cs_flag = NAND_CON_IN_CS1 | NAND_CON_OUT_CS1; /* setup the EBU to run in NAND mode on our base addr */ ltq_ebu_w32(CPHYSADDR(nandaddr) | ADDSEL1_MASK(3) | ADDSEL1_REGEN, EBU_ADDSEL1); ltq_ebu_w32(BUSCON1_SETUP | BUSCON1_BCGEN_RES | BUSCON1_WAITWRC2 | BUSCON1_WAITRDC2 | BUSCON1_HOLDC1 | BUSCON1_RECOVC1 | BUSCON1_CMULT4, LTQ_EBU_BUSCON1); ltq_ebu_w32(NAND_CON_NANDM | NAND_CON_CSMUX | NAND_CON_CS_P | NAND_CON_SE_P | NAND_CON_WP_P | NAND_CON_PRE_P | cs_flag, EBU_NAND_CON); /* finish with a reset */ xway_reset_chip(this); return 0; } static struct platform_nand_data xway_nand_data = { .chip = { .nr_chips = 1, .chip_delay = 30, }, .ctrl = { .probe = xway_nand_probe, .cmd_ctrl = xway_cmd_ctrl, .dev_ready = xway_dev_ready, .select_chip = xway_select_chip, .read_byte = xway_read_byte, } }; /* * Try to find the node inside the DT. If it is available attach out * platform_nand_data */ static int __init xway_register_nand(void) { struct device_node *node; struct platform_device *pdev; node = of_find_compatible_node(NULL, NULL, "lantiq,nand-xway"); if (!node) return -ENOENT; pdev = of_find_device_by_node(node); if (!pdev) return -EINVAL; pdev->dev.platform_data = &xway_nand_data; of_node_put(node); return 0; } subsys_initcall(xway_register_nand);
gpl-2.0
TheWhisp/android_kernel_samsung_msm7x27
drivers/staging/rt2860/common/ee_efuse.c
1052
9708
/* ************************************************************************* * Ralink Tech Inc. * 5F., No.36, Taiyuan St., Jhubei City, * Hsinchu County 302, * Taiwan, R.O.C. * * (c) Copyright 2002-2007, Ralink Technology, Inc. * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program; if not, write to the * * Free Software Foundation, Inc., * * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * * ************************************************************************* Module Name: ee_efuse.c Abstract: Miniport generic portion header file Revision History: Who When What -------- ---------- ---------------------------------------------- */ #include "../rt_config.h" #define EFUSE_USAGE_MAP_START 0x2d0 #define EFUSE_USAGE_MAP_END 0x2fc #define EFUSE_USAGE_MAP_SIZE 45 #define EFUSE_EEPROM_DEFULT_FILE "RT30xxEEPROM.bin" #define MAX_EEPROM_BIN_FILE_SIZE 1024 #define EFUSE_TAG 0x2fe typedef union _EFUSE_CTRL_STRUC { struct { u32 EFSROM_AOUT:6; u32 EFSROM_MODE:2; u32 EFSROM_LDO_OFF_TIME:6; u32 EFSROM_LDO_ON_TIME:2; u32 EFSROM_AIN:10; u32 RESERVED:4; u32 EFSROM_KICK:1; u32 SEL_EFUSE:1; } field; u32 word; } EFUSE_CTRL_STRUC, *PEFUSE_CTRL_STRUC; /* ======================================================================== Routine Description: Arguments: Return Value: Note: ======================================================================== */ u8 eFuseReadRegisters(struct rt_rtmp_adapter *pAd, u16 Offset, u16 Length, u16 * pData) { EFUSE_CTRL_STRUC eFuseCtrlStruc; int i; u16 efuseDataOffset; u32 data; RTMP_IO_READ32(pAd, EFUSE_CTRL, &eFuseCtrlStruc.word); /*Step0. Write 10-bit of address to EFSROM_AIN (0x580, bit25:bit16). The address must be 16-byte alignment. */ /*Use the eeprom logical address and covert to address to block number */ eFuseCtrlStruc.field.EFSROM_AIN = Offset & 0xfff0; /*Step1. Write EFSROM_MODE (0x580, bit7:bit6) to 0. */ eFuseCtrlStruc.field.EFSROM_MODE = 0; /*Step2. Write EFSROM_KICK (0x580, bit30) to 1 to kick-off physical read procedure. */ eFuseCtrlStruc.field.EFSROM_KICK = 1; NdisMoveMemory(&data, &eFuseCtrlStruc, 4); RTMP_IO_WRITE32(pAd, EFUSE_CTRL, data); /*Step3. Polling EFSROM_KICK(0x580, bit30) until it become 0 again. */ i = 0; while (i < 500) { /*rtmp.HwMemoryReadDword(EFUSE_CTRL, (DWORD *) &eFuseCtrlStruc, 4); */ RTMP_IO_READ32(pAd, EFUSE_CTRL, &eFuseCtrlStruc.word); if (eFuseCtrlStruc.field.EFSROM_KICK == 0) { break; } RTMPusecDelay(2); i++; } /*if EFSROM_AOUT is not found in physical address, write 0xffff */ if (eFuseCtrlStruc.field.EFSROM_AOUT == 0x3f) { for (i = 0; i < Length / 2; i++) *(pData + 2 * i) = 0xffff; } else { /*Step4. Read 16-byte of data from EFUSE_DATA0-3 (0x590-0x59C) */ efuseDataOffset = EFUSE_DATA3 - (Offset & 0xC); /*data hold 4 bytes data. */ /*In RTMP_IO_READ32 will automatically execute 32-bytes swapping */ RTMP_IO_READ32(pAd, efuseDataOffset, &data); /*Decide the upper 2 bytes or the bottom 2 bytes. */ /* Little-endian S | S Big-endian */ /* addr 3 2 1 0 | 0 1 2 3 */ /* Ori-V D C B A | A B C D */ /*After swapping */ /* D C B A | D C B A */ /*Return 2-bytes */ /*The return byte statrs from S. Therefore, the little-endian will return BA, the Big-endian will return DC. */ /*For returning the bottom 2 bytes, the Big-endian should shift right 2-bytes. */ data = data >> (8 * (Offset & 0x3)); NdisMoveMemory(pData, &data, Length); } return (u8)eFuseCtrlStruc.field.EFSROM_AOUT; } /* ======================================================================== Routine Description: Arguments: Return Value: Note: ======================================================================== */ void eFusePhysicalReadRegisters(struct rt_rtmp_adapter *pAd, u16 Offset, u16 Length, u16 * pData) { EFUSE_CTRL_STRUC eFuseCtrlStruc; int i; u16 efuseDataOffset; u32 data; RTMP_IO_READ32(pAd, EFUSE_CTRL, &eFuseCtrlStruc.word); /*Step0. Write 10-bit of address to EFSROM_AIN (0x580, bit25:bit16). The address must be 16-byte alignment. */ eFuseCtrlStruc.field.EFSROM_AIN = Offset & 0xfff0; /*Step1. Write EFSROM_MODE (0x580, bit7:bit6) to 1. */ /*Read in physical view */ eFuseCtrlStruc.field.EFSROM_MODE = 1; /*Step2. Write EFSROM_KICK (0x580, bit30) to 1 to kick-off physical read procedure. */ eFuseCtrlStruc.field.EFSROM_KICK = 1; NdisMoveMemory(&data, &eFuseCtrlStruc, 4); RTMP_IO_WRITE32(pAd, EFUSE_CTRL, data); /*Step3. Polling EFSROM_KICK(0x580, bit30) until it become 0 again. */ i = 0; while (i < 500) { RTMP_IO_READ32(pAd, EFUSE_CTRL, &eFuseCtrlStruc.word); if (eFuseCtrlStruc.field.EFSROM_KICK == 0) break; RTMPusecDelay(2); i++; } /*Step4. Read 16-byte of data from EFUSE_DATA0-3 (0x59C-0x590) */ /*Because the size of each EFUSE_DATA is 4 Bytes, the size of address of each is 2 bits. */ /*The previous 2 bits is the EFUSE_DATA number, the last 2 bits is used to decide which bytes */ /*Decide which EFUSE_DATA to read */ /*590:F E D C */ /*594:B A 9 8 */ /*598:7 6 5 4 */ /*59C:3 2 1 0 */ efuseDataOffset = EFUSE_DATA3 - (Offset & 0xC); RTMP_IO_READ32(pAd, efuseDataOffset, &data); data = data >> (8 * (Offset & 0x3)); NdisMoveMemory(pData, &data, Length); } /* ======================================================================== Routine Description: Arguments: Return Value: Note: ======================================================================== */ static void eFuseReadPhysical(struct rt_rtmp_adapter *pAd, u16 *lpInBuffer, unsigned long nInBufferSize, u16 *lpOutBuffer, unsigned long nOutBufferSize) { u16 *pInBuf = (u16 *) lpInBuffer; u16 *pOutBuf = (u16 *) lpOutBuffer; u16 Offset = pInBuf[0]; /*addr */ u16 Length = pInBuf[1]; /*length */ int i; for (i = 0; i < Length; i += 2) { eFusePhysicalReadRegisters(pAd, Offset + i, 2, &pOutBuf[i / 2]); } } /* ======================================================================== Routine Description: Arguments: Return Value: Note: ======================================================================== */ int set_eFuseGetFreeBlockCount_Proc(struct rt_rtmp_adapter *pAd, char *arg) { u16 i; u16 LogicalAddress; u16 efusefreenum = 0; if (!pAd->bUseEfuse) return FALSE; for (i = EFUSE_USAGE_MAP_START; i <= EFUSE_USAGE_MAP_END; i += 2) { eFusePhysicalReadRegisters(pAd, i, 2, &LogicalAddress); if ((LogicalAddress & 0xff) == 0) { efusefreenum = (u8)(EFUSE_USAGE_MAP_END - i + 1); break; } else if (((LogicalAddress >> 8) & 0xff) == 0) { efusefreenum = (u8)(EFUSE_USAGE_MAP_END - i); break; } if (i == EFUSE_USAGE_MAP_END) efusefreenum = 0; } printk("efuseFreeNumber is %d\n", efusefreenum); return TRUE; } int set_eFusedump_Proc(struct rt_rtmp_adapter *pAd, char *arg) { u16 InBuf[3]; int i = 0; if (!pAd->bUseEfuse) return FALSE; for (i = 0; i < EFUSE_USAGE_MAP_END / 2; i++) { InBuf[0] = 2 * i; InBuf[1] = 2; InBuf[2] = 0x0; eFuseReadPhysical(pAd, &InBuf[0], 4, &InBuf[2], 2); if (i % 4 == 0) printk("\nBlock %x:", i / 8); printk("%04x ", InBuf[2]); } return TRUE; } int rtmp_ee_efuse_read16(struct rt_rtmp_adapter *pAd, u16 Offset, u16 * pValue) { eFuseReadRegisters(pAd, Offset, 2, pValue); return (*pValue); } int RtmpEfuseSupportCheck(struct rt_rtmp_adapter *pAd) { u16 value; if (IS_RT30xx(pAd)) { eFusePhysicalReadRegisters(pAd, EFUSE_TAG, 2, &value); pAd->EFuseTag = (value & 0xff); } return 0; } void eFuseGetFreeBlockCount(struct rt_rtmp_adapter *pAd, u32 *EfuseFreeBlock) { u16 i; u16 LogicalAddress; if (!pAd->bUseEfuse) { DBGPRINT(RT_DEBUG_TRACE, ("eFuseGetFreeBlockCount Only supports efuse Mode\n")); return; } for (i = EFUSE_USAGE_MAP_START; i <= EFUSE_USAGE_MAP_END; i += 2) { eFusePhysicalReadRegisters(pAd, i, 2, &LogicalAddress); if ((LogicalAddress & 0xff) == 0) { *EfuseFreeBlock = (u8)(EFUSE_USAGE_MAP_END - i + 1); break; } else if (((LogicalAddress >> 8) & 0xff) == 0) { *EfuseFreeBlock = (u8)(EFUSE_USAGE_MAP_END - i); break; } if (i == EFUSE_USAGE_MAP_END) *EfuseFreeBlock = 0; } DBGPRINT(RT_DEBUG_TRACE, ("eFuseGetFreeBlockCount is 0x%x\n", *EfuseFreeBlock)); } int eFuse_init(struct rt_rtmp_adapter *pAd) { u32 EfuseFreeBlock = 0; DBGPRINT(RT_DEBUG_ERROR, ("NVM is Efuse and its size =%x[%x-%x] \n", EFUSE_USAGE_MAP_SIZE, EFUSE_USAGE_MAP_START, EFUSE_USAGE_MAP_END)); eFuseGetFreeBlockCount(pAd, &EfuseFreeBlock); return 0; }
gpl-2.0
Ken-Liu/OpenScrKernel_For_XC210
drivers/net/a2065.c
1052
20676
/* * Amiga Linux/68k A2065 Ethernet Driver * * (C) Copyright 1995-2003 by Geert Uytterhoeven <geert@linux-m68k.org> * * Fixes and tips by: * - Janos Farkas (CHEXUM@sparta.banki.hu) * - Jes Degn Soerensen (jds@kom.auc.dk) * - Matt Domsch (Matt_Domsch@dell.com) * * ---------------------------------------------------------------------------- * * This program is based on * * ariadne.?: Amiga Linux/68k Ariadne Ethernet Driver * (C) Copyright 1995 by Geert Uytterhoeven, * Peter De Schrijver * * lance.c: An AMD LANCE ethernet driver for linux. * Written 1993-94 by Donald Becker. * * Am79C960: PCnet(tm)-ISA Single-Chip Ethernet Controller * Advanced Micro Devices * Publication #16907, Rev. B, Amendment/0, May 1994 * * ---------------------------------------------------------------------------- * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of the Linux * distribution for more details. * * ---------------------------------------------------------------------------- * * The A2065 is a Zorro-II board made by Commodore/Ameristar. It contains: * * - an Am7990 Local Area Network Controller for Ethernet (LANCE) with * both 10BASE-2 (thin coax) and AUI (DB-15) connectors */ #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/module.h> #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/skbuff.h> #include <linux/string.h> #include <linux/init.h> #include <linux/crc32.h> #include <linux/zorro.h> #include <linux/bitops.h> #include <asm/irq.h> #include <asm/amigaints.h> #include <asm/amigahw.h> #include "a2065.h" /* * Transmit/Receive Ring Definitions */ #define LANCE_LOG_TX_BUFFERS (2) #define LANCE_LOG_RX_BUFFERS (4) #define TX_RING_SIZE (1<<LANCE_LOG_TX_BUFFERS) #define RX_RING_SIZE (1<<LANCE_LOG_RX_BUFFERS) #define TX_RING_MOD_MASK (TX_RING_SIZE-1) #define RX_RING_MOD_MASK (RX_RING_SIZE-1) #define PKT_BUF_SIZE (1544) #define RX_BUFF_SIZE PKT_BUF_SIZE #define TX_BUFF_SIZE PKT_BUF_SIZE /* * Layout of the Lance's RAM Buffer */ struct lance_init_block { unsigned short mode; /* Pre-set mode (reg. 15) */ unsigned char phys_addr[6]; /* Physical ethernet address */ unsigned filter[2]; /* Multicast filter. */ /* Receive and transmit ring base, along with extra bits. */ unsigned short rx_ptr; /* receive descriptor addr */ unsigned short rx_len; /* receive len and high addr */ unsigned short tx_ptr; /* transmit descriptor addr */ unsigned short tx_len; /* transmit len and high addr */ /* The Tx and Rx ring entries must aligned on 8-byte boundaries. */ struct lance_rx_desc brx_ring[RX_RING_SIZE]; struct lance_tx_desc btx_ring[TX_RING_SIZE]; char rx_buf [RX_RING_SIZE][RX_BUFF_SIZE]; char tx_buf [TX_RING_SIZE][TX_BUFF_SIZE]; }; /* * Private Device Data */ struct lance_private { char *name; volatile struct lance_regs *ll; volatile struct lance_init_block *init_block; /* Hosts view */ volatile struct lance_init_block *lance_init_block; /* Lance view */ int rx_new, tx_new; int rx_old, tx_old; int lance_log_rx_bufs, lance_log_tx_bufs; int rx_ring_mod_mask, tx_ring_mod_mask; int tpe; /* cable-selection is TPE */ int auto_select; /* cable-selection by carrier */ unsigned short busmaster_regval; #ifdef CONFIG_SUNLANCE struct Linux_SBus_DMA *ledma; /* if set this points to ledma and arch=4m */ int burst_sizes; /* ledma SBus burst sizes */ #endif struct timer_list multicast_timer; }; #define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\ lp->tx_old+lp->tx_ring_mod_mask-lp->tx_new:\ lp->tx_old - lp->tx_new-1) #define LANCE_ADDR(x) ((int)(x) & ~0xff000000) /* Load the CSR registers */ static void load_csrs (struct lance_private *lp) { volatile struct lance_regs *ll = lp->ll; volatile struct lance_init_block *aib = lp->lance_init_block; int leptr; leptr = LANCE_ADDR (aib); ll->rap = LE_CSR1; ll->rdp = (leptr & 0xFFFF); ll->rap = LE_CSR2; ll->rdp = leptr >> 16; ll->rap = LE_CSR3; ll->rdp = lp->busmaster_regval; /* Point back to csr0 */ ll->rap = LE_CSR0; } #define ZERO 0 /* Setup the Lance Rx and Tx rings */ static void lance_init_ring (struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); volatile struct lance_init_block *ib = lp->init_block; volatile struct lance_init_block *aib; /* for LANCE_ADDR computations */ int leptr; int i; aib = lp->lance_init_block; /* Lock out other processes while setting up hardware */ netif_stop_queue(dev); lp->rx_new = lp->tx_new = 0; lp->rx_old = lp->tx_old = 0; ib->mode = 0; /* Copy the ethernet address to the lance init block * Note that on the sparc you need to swap the ethernet address. */ ib->phys_addr [0] = dev->dev_addr [1]; ib->phys_addr [1] = dev->dev_addr [0]; ib->phys_addr [2] = dev->dev_addr [3]; ib->phys_addr [3] = dev->dev_addr [2]; ib->phys_addr [4] = dev->dev_addr [5]; ib->phys_addr [5] = dev->dev_addr [4]; if (ZERO) printk(KERN_DEBUG "TX rings:\n"); /* Setup the Tx ring entries */ for (i = 0; i <= (1<<lp->lance_log_tx_bufs); i++) { leptr = LANCE_ADDR(&aib->tx_buf[i][0]); ib->btx_ring [i].tmd0 = leptr; ib->btx_ring [i].tmd1_hadr = leptr >> 16; ib->btx_ring [i].tmd1_bits = 0; ib->btx_ring [i].length = 0xf000; /* The ones required by tmd2 */ ib->btx_ring [i].misc = 0; if (i < 3 && ZERO) printk(KERN_DEBUG "%d: 0x%8.8x\n", i, leptr); } /* Setup the Rx ring entries */ if (ZERO) printk(KERN_DEBUG "RX rings:\n"); for (i = 0; i < (1<<lp->lance_log_rx_bufs); i++) { leptr = LANCE_ADDR(&aib->rx_buf[i][0]); ib->brx_ring [i].rmd0 = leptr; ib->brx_ring [i].rmd1_hadr = leptr >> 16; ib->brx_ring [i].rmd1_bits = LE_R1_OWN; ib->brx_ring [i].length = -RX_BUFF_SIZE | 0xf000; ib->brx_ring [i].mblength = 0; if (i < 3 && ZERO) printk(KERN_DEBUG "%d: 0x%8.8x\n", i, leptr); } /* Setup the initialization block */ /* Setup rx descriptor pointer */ leptr = LANCE_ADDR(&aib->brx_ring); ib->rx_len = (lp->lance_log_rx_bufs << 13) | (leptr >> 16); ib->rx_ptr = leptr; if (ZERO) printk(KERN_DEBUG "RX ptr: %8.8x\n", leptr); /* Setup tx descriptor pointer */ leptr = LANCE_ADDR(&aib->btx_ring); ib->tx_len = (lp->lance_log_tx_bufs << 13) | (leptr >> 16); ib->tx_ptr = leptr; if (ZERO) printk(KERN_DEBUG "TX ptr: %8.8x\n", leptr); /* Clear the multicast filter */ ib->filter [0] = 0; ib->filter [1] = 0; } static int init_restart_lance (struct lance_private *lp) { volatile struct lance_regs *ll = lp->ll; int i; ll->rap = LE_CSR0; ll->rdp = LE_C0_INIT; /* Wait for the lance to complete initialization */ for (i = 0; (i < 100) && !(ll->rdp & (LE_C0_ERR | LE_C0_IDON)); i++) barrier(); if ((i == 100) || (ll->rdp & LE_C0_ERR)) { printk(KERN_ERR "LANCE unopened after %d ticks, csr0=%4.4x.\n", i, ll->rdp); return -EIO; } /* Clear IDON by writing a "1", enable interrupts and start lance */ ll->rdp = LE_C0_IDON; ll->rdp = LE_C0_INEA | LE_C0_STRT; return 0; } static int lance_rx (struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); volatile struct lance_init_block *ib = lp->init_block; volatile struct lance_regs *ll = lp->ll; volatile struct lance_rx_desc *rd; unsigned char bits; #ifdef TEST_HITS int i; printk(KERN_DEBUG "["); for (i = 0; i < RX_RING_SIZE; i++) { if (i == lp->rx_new) printk ("%s", ib->brx_ring [i].rmd1_bits & LE_R1_OWN ? "_" : "X"); else printk ("%s", ib->brx_ring [i].rmd1_bits & LE_R1_OWN ? "." : "1"); } printk ("]\n"); #endif ll->rdp = LE_C0_RINT|LE_C0_INEA; for (rd = &ib->brx_ring [lp->rx_new]; !((bits = rd->rmd1_bits) & LE_R1_OWN); rd = &ib->brx_ring [lp->rx_new]) { /* We got an incomplete frame? */ if ((bits & LE_R1_POK) != LE_R1_POK) { dev->stats.rx_over_errors++; dev->stats.rx_errors++; continue; } else if (bits & LE_R1_ERR) { /* Count only the end frame as a rx error, * not the beginning */ if (bits & LE_R1_BUF) dev->stats.rx_fifo_errors++; if (bits & LE_R1_CRC) dev->stats.rx_crc_errors++; if (bits & LE_R1_OFL) dev->stats.rx_over_errors++; if (bits & LE_R1_FRA) dev->stats.rx_frame_errors++; if (bits & LE_R1_EOP) dev->stats.rx_errors++; } else { int len = (rd->mblength & 0xfff) - 4; struct sk_buff *skb = dev_alloc_skb (len+2); if (!skb) { printk(KERN_WARNING "%s: Memory squeeze, " "deferring packet.\n", dev->name); dev->stats.rx_dropped++; rd->mblength = 0; rd->rmd1_bits = LE_R1_OWN; lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask; return 0; } skb_reserve (skb, 2); /* 16 byte align */ skb_put (skb, len); /* make room */ skb_copy_to_linear_data(skb, (unsigned char *)&(ib->rx_buf [lp->rx_new][0]), len); skb->protocol = eth_type_trans (skb, dev); netif_rx (skb); dev->stats.rx_packets++; dev->stats.rx_bytes += len; } /* Return the packet to the pool */ rd->mblength = 0; rd->rmd1_bits = LE_R1_OWN; lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask; } return 0; } static int lance_tx (struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); volatile struct lance_init_block *ib = lp->init_block; volatile struct lance_regs *ll = lp->ll; volatile struct lance_tx_desc *td; int i, j; int status; /* csr0 is 2f3 */ ll->rdp = LE_C0_TINT | LE_C0_INEA; /* csr0 is 73 */ j = lp->tx_old; for (i = j; i != lp->tx_new; i = j) { td = &ib->btx_ring [i]; /* If we hit a packet not owned by us, stop */ if (td->tmd1_bits & LE_T1_OWN) break; if (td->tmd1_bits & LE_T1_ERR) { status = td->misc; dev->stats.tx_errors++; if (status & LE_T3_RTY) dev->stats.tx_aborted_errors++; if (status & LE_T3_LCOL) dev->stats.tx_window_errors++; if (status & LE_T3_CLOS) { dev->stats.tx_carrier_errors++; if (lp->auto_select) { lp->tpe = 1 - lp->tpe; printk(KERN_ERR "%s: Carrier Lost, " "trying %s\n", dev->name, lp->tpe?"TPE":"AUI"); /* Stop the lance */ ll->rap = LE_CSR0; ll->rdp = LE_C0_STOP; lance_init_ring (dev); load_csrs (lp); init_restart_lance (lp); return 0; } } /* buffer errors and underflows turn off the transmitter */ /* Restart the adapter */ if (status & (LE_T3_BUF|LE_T3_UFL)) { dev->stats.tx_fifo_errors++; printk(KERN_ERR "%s: Tx: ERR_BUF|ERR_UFL, " "restarting\n", dev->name); /* Stop the lance */ ll->rap = LE_CSR0; ll->rdp = LE_C0_STOP; lance_init_ring (dev); load_csrs (lp); init_restart_lance (lp); return 0; } } else if ((td->tmd1_bits & LE_T1_POK) == LE_T1_POK) { /* * So we don't count the packet more than once. */ td->tmd1_bits &= ~(LE_T1_POK); /* One collision before packet was sent. */ if (td->tmd1_bits & LE_T1_EONE) dev->stats.collisions++; /* More than one collision, be optimistic. */ if (td->tmd1_bits & LE_T1_EMORE) dev->stats.collisions += 2; dev->stats.tx_packets++; } j = (j + 1) & lp->tx_ring_mod_mask; } lp->tx_old = j; ll->rdp = LE_C0_TINT | LE_C0_INEA; return 0; } static irqreturn_t lance_interrupt (int irq, void *dev_id) { struct net_device *dev; struct lance_private *lp; volatile struct lance_regs *ll; int csr0; dev = (struct net_device *) dev_id; lp = netdev_priv(dev); ll = lp->ll; ll->rap = LE_CSR0; /* LANCE Controller Status */ csr0 = ll->rdp; if (!(csr0 & LE_C0_INTR)) /* Check if any interrupt has */ return IRQ_NONE; /* been generated by the Lance. */ /* Acknowledge all the interrupt sources ASAP */ ll->rdp = csr0 & ~(LE_C0_INEA|LE_C0_TDMD|LE_C0_STOP|LE_C0_STRT| LE_C0_INIT); if ((csr0 & LE_C0_ERR)) { /* Clear the error condition */ ll->rdp = LE_C0_BABL|LE_C0_ERR|LE_C0_MISS|LE_C0_INEA; } if (csr0 & LE_C0_RINT) lance_rx (dev); if (csr0 & LE_C0_TINT) lance_tx (dev); /* Log misc errors. */ if (csr0 & LE_C0_BABL) dev->stats.tx_errors++; /* Tx babble. */ if (csr0 & LE_C0_MISS) dev->stats.rx_errors++; /* Missed a Rx frame. */ if (csr0 & LE_C0_MERR) { printk(KERN_ERR "%s: Bus master arbitration failure, status " "%4.4x.\n", dev->name, csr0); /* Restart the chip. */ ll->rdp = LE_C0_STRT; } if (netif_queue_stopped(dev) && TX_BUFFS_AVAIL > 0) netif_wake_queue(dev); ll->rap = LE_CSR0; ll->rdp = LE_C0_BABL|LE_C0_CERR|LE_C0_MISS|LE_C0_MERR| LE_C0_IDON|LE_C0_INEA; return IRQ_HANDLED; } static int lance_open (struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); volatile struct lance_regs *ll = lp->ll; int ret; /* Stop the Lance */ ll->rap = LE_CSR0; ll->rdp = LE_C0_STOP; /* Install the Interrupt handler */ ret = request_irq(IRQ_AMIGA_PORTS, lance_interrupt, IRQF_SHARED, dev->name, dev); if (ret) return ret; load_csrs (lp); lance_init_ring (dev); netif_start_queue(dev); return init_restart_lance (lp); } static int lance_close (struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); volatile struct lance_regs *ll = lp->ll; netif_stop_queue(dev); del_timer_sync(&lp->multicast_timer); /* Stop the card */ ll->rap = LE_CSR0; ll->rdp = LE_C0_STOP; free_irq(IRQ_AMIGA_PORTS, dev); return 0; } static inline int lance_reset (struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); volatile struct lance_regs *ll = lp->ll; int status; /* Stop the lance */ ll->rap = LE_CSR0; ll->rdp = LE_C0_STOP; load_csrs (lp); lance_init_ring (dev); dev->trans_start = jiffies; /* prevent tx timeout */ netif_start_queue(dev); status = init_restart_lance (lp); #ifdef DEBUG_DRIVER printk(KERN_DEBUG "Lance restart=%d\n", status); #endif return status; } static void lance_tx_timeout(struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); volatile struct lance_regs *ll = lp->ll; printk(KERN_ERR "%s: transmit timed out, status %04x, reset\n", dev->name, ll->rdp); lance_reset(dev); netif_wake_queue(dev); } static netdev_tx_t lance_start_xmit (struct sk_buff *skb, struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); volatile struct lance_regs *ll = lp->ll; volatile struct lance_init_block *ib = lp->init_block; int entry, skblen; int status = NETDEV_TX_OK; unsigned long flags; if (skb_padto(skb, ETH_ZLEN)) return NETDEV_TX_OK; skblen = max_t(unsigned, skb->len, ETH_ZLEN); local_irq_save(flags); if (!TX_BUFFS_AVAIL){ local_irq_restore(flags); return NETDEV_TX_LOCKED; } #ifdef DEBUG_DRIVER /* dump the packet */ print_hex_dump(KERN_DEBUG, "skb->data: ", DUMP_PREFIX_NONE, 16, 1, skb->data, 64, true); #endif entry = lp->tx_new & lp->tx_ring_mod_mask; ib->btx_ring [entry].length = (-skblen) | 0xf000; ib->btx_ring [entry].misc = 0; skb_copy_from_linear_data(skb, (void *)&ib->tx_buf [entry][0], skblen); /* Now, give the packet to the lance */ ib->btx_ring [entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN); lp->tx_new = (lp->tx_new+1) & lp->tx_ring_mod_mask; dev->stats.tx_bytes += skblen; if (TX_BUFFS_AVAIL <= 0) netif_stop_queue(dev); /* Kick the lance: transmit now */ ll->rdp = LE_C0_INEA | LE_C0_TDMD; dev_kfree_skb (skb); local_irq_restore(flags); return status; } /* taken from the depca driver */ static void lance_load_multicast (struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); volatile struct lance_init_block *ib = lp->init_block; volatile u16 *mcast_table = (u16 *)&ib->filter; struct netdev_hw_addr *ha; char *addrs; u32 crc; /* set all multicast bits */ if (dev->flags & IFF_ALLMULTI){ ib->filter [0] = 0xffffffff; ib->filter [1] = 0xffffffff; return; } /* clear the multicast filter */ ib->filter [0] = 0; ib->filter [1] = 0; /* Add addresses */ netdev_for_each_mc_addr(ha, dev) { addrs = ha->addr; /* multicast address? */ if (!(*addrs & 1)) continue; crc = ether_crc_le(6, addrs); crc = crc >> 26; mcast_table [crc >> 4] |= 1 << (crc & 0xf); } } static void lance_set_multicast (struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); volatile struct lance_init_block *ib = lp->init_block; volatile struct lance_regs *ll = lp->ll; if (!netif_running(dev)) return; if (lp->tx_old != lp->tx_new) { mod_timer(&lp->multicast_timer, jiffies + 4); netif_wake_queue(dev); return; } netif_stop_queue(dev); ll->rap = LE_CSR0; ll->rdp = LE_C0_STOP; lance_init_ring (dev); if (dev->flags & IFF_PROMISC) { ib->mode |= LE_MO_PROM; } else { ib->mode &= ~LE_MO_PROM; lance_load_multicast (dev); } load_csrs (lp); init_restart_lance (lp); netif_wake_queue(dev); } static int __devinit a2065_init_one(struct zorro_dev *z, const struct zorro_device_id *ent); static void __devexit a2065_remove_one(struct zorro_dev *z); static struct zorro_device_id a2065_zorro_tbl[] __devinitdata = { { ZORRO_PROD_CBM_A2065_1 }, { ZORRO_PROD_CBM_A2065_2 }, { ZORRO_PROD_AMERISTAR_A2065 }, { 0 } }; MODULE_DEVICE_TABLE(zorro, a2065_zorro_tbl); static struct zorro_driver a2065_driver = { .name = "a2065", .id_table = a2065_zorro_tbl, .probe = a2065_init_one, .remove = __devexit_p(a2065_remove_one), }; static const struct net_device_ops lance_netdev_ops = { .ndo_open = lance_open, .ndo_stop = lance_close, .ndo_start_xmit = lance_start_xmit, .ndo_tx_timeout = lance_tx_timeout, .ndo_set_multicast_list = lance_set_multicast, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, }; static int __devinit a2065_init_one(struct zorro_dev *z, const struct zorro_device_id *ent) { struct net_device *dev; struct lance_private *priv; unsigned long board, base_addr, mem_start; struct resource *r1, *r2; int err; board = z->resource.start; base_addr = board+A2065_LANCE; mem_start = board+A2065_RAM; r1 = request_mem_region(base_addr, sizeof(struct lance_regs), "Am7990"); if (!r1) return -EBUSY; r2 = request_mem_region(mem_start, A2065_RAM_SIZE, "RAM"); if (!r2) { release_resource(r1); return -EBUSY; } dev = alloc_etherdev(sizeof(struct lance_private)); if (dev == NULL) { release_resource(r1); release_resource(r2); return -ENOMEM; } priv = netdev_priv(dev); r1->name = dev->name; r2->name = dev->name; dev->dev_addr[0] = 0x00; if (z->id != ZORRO_PROD_AMERISTAR_A2065) { /* Commodore */ dev->dev_addr[1] = 0x80; dev->dev_addr[2] = 0x10; } else { /* Ameristar */ dev->dev_addr[1] = 0x00; dev->dev_addr[2] = 0x9f; } dev->dev_addr[3] = (z->rom.er_SerialNumber>>16) & 0xff; dev->dev_addr[4] = (z->rom.er_SerialNumber>>8) & 0xff; dev->dev_addr[5] = z->rom.er_SerialNumber & 0xff; dev->base_addr = ZTWO_VADDR(base_addr); dev->mem_start = ZTWO_VADDR(mem_start); dev->mem_end = dev->mem_start+A2065_RAM_SIZE; priv->ll = (volatile struct lance_regs *)dev->base_addr; priv->init_block = (struct lance_init_block *)dev->mem_start; priv->lance_init_block = (struct lance_init_block *)A2065_RAM; priv->auto_select = 0; priv->busmaster_regval = LE_C3_BSWP; priv->lance_log_rx_bufs = LANCE_LOG_RX_BUFFERS; priv->lance_log_tx_bufs = LANCE_LOG_TX_BUFFERS; priv->rx_ring_mod_mask = RX_RING_MOD_MASK; priv->tx_ring_mod_mask = TX_RING_MOD_MASK; dev->netdev_ops = &lance_netdev_ops; dev->watchdog_timeo = 5*HZ; dev->dma = 0; init_timer(&priv->multicast_timer); priv->multicast_timer.data = (unsigned long) dev; priv->multicast_timer.function = (void (*)(unsigned long)) &lance_set_multicast; err = register_netdev(dev); if (err) { release_resource(r1); release_resource(r2); free_netdev(dev); return err; } zorro_set_drvdata(z, dev); printk(KERN_INFO "%s: A2065 at 0x%08lx, Ethernet Address " "%pM\n", dev->name, board, dev->dev_addr); return 0; } static void __devexit a2065_remove_one(struct zorro_dev *z) { struct net_device *dev = zorro_get_drvdata(z); unregister_netdev(dev); release_mem_region(ZTWO_PADDR(dev->base_addr), sizeof(struct lance_regs)); release_mem_region(ZTWO_PADDR(dev->mem_start), A2065_RAM_SIZE); free_netdev(dev); } static int __init a2065_init_module(void) { return zorro_register_driver(&a2065_driver); } static void __exit a2065_cleanup_module(void) { zorro_unregister_driver(&a2065_driver); } module_init(a2065_init_module); module_exit(a2065_cleanup_module); MODULE_LICENSE("GPL");
gpl-2.0
losfair/MiracleKernel
arch/arm/mach-pxa/vpac270.c
1820
17749
/* * Hardware definitions for Voipac PXA270 * * Copyright (C) 2010 * Marek Vasut <marek.vasut@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/irq.h> #include <linux/gpio_keys.h> #include <linux/input.h> #include <linux/gpio.h> #include <linux/usb/gpio_vbus.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/mtd/physmap.h> #include <linux/mtd/onenand.h> #include <linux/dm9000.h> #include <linux/ucb1400.h> #include <linux/ata_platform.h> #include <linux/regulator/max1586.h> #include <linux/i2c/pxa-i2c.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <mach/pxa27x.h> #include <mach/audio.h> #include <mach/vpac270.h> #include <mach/mmc.h> #include <mach/pxafb.h> #include <mach/ohci.h> #include <mach/pxa27x-udc.h> #include <mach/udc.h> #include <mach/pata_pxa.h> #include "generic.h" #include "devices.h" /****************************************************************************** * Pin configuration ******************************************************************************/ static unsigned long vpac270_pin_config[] __initdata = { /* MMC */ GPIO32_MMC_CLK, GPIO92_MMC_DAT_0, GPIO109_MMC_DAT_1, GPIO110_MMC_DAT_2, GPIO111_MMC_DAT_3, GPIO112_MMC_CMD, GPIO53_GPIO, /* SD detect */ GPIO52_GPIO, /* SD r/o switch */ /* GPIO KEYS */ GPIO1_GPIO, /* USER BTN */ /* LEDs */ GPIO15_GPIO, /* orange led */ /* FFUART */ GPIO34_FFUART_RXD, GPIO39_FFUART_TXD, GPIO27_FFUART_RTS, GPIO100_FFUART_CTS, GPIO33_FFUART_DSR, GPIO40_FFUART_DTR, GPIO10_FFUART_DCD, GPIO38_FFUART_RI, /* LCD */ GPIO58_LCD_LDD_0, GPIO59_LCD_LDD_1, GPIO60_LCD_LDD_2, GPIO61_LCD_LDD_3, GPIO62_LCD_LDD_4, GPIO63_LCD_LDD_5, GPIO64_LCD_LDD_6, GPIO65_LCD_LDD_7, GPIO66_LCD_LDD_8, GPIO67_LCD_LDD_9, GPIO68_LCD_LDD_10, GPIO69_LCD_LDD_11, GPIO70_LCD_LDD_12, GPIO71_LCD_LDD_13, GPIO72_LCD_LDD_14, GPIO73_LCD_LDD_15, GPIO86_LCD_LDD_16, GPIO87_LCD_LDD_17, GPIO74_LCD_FCLK, GPIO75_LCD_LCLK, GPIO76_LCD_PCLK, GPIO77_LCD_BIAS, /* PCMCIA */ GPIO48_nPOE, GPIO49_nPWE, GPIO50_nPIOR, GPIO51_nPIOW, GPIO85_nPCE_1, GPIO54_nPCE_2, GPIO55_nPREG, GPIO57_nIOIS16, GPIO56_nPWAIT, GPIO104_PSKTSEL, GPIO84_GPIO, /* PCMCIA CD */ GPIO35_GPIO, /* PCMCIA RDY */ GPIO107_GPIO, /* PCMCIA PPEN */ GPIO11_GPIO, /* PCMCIA RESET */ GPIO17_GPIO, /* CF CD */ GPIO12_GPIO, /* CF RDY */ GPIO16_GPIO, /* CF RESET */ /* UHC */ GPIO88_USBH1_PWR, GPIO89_USBH1_PEN, GPIO119_USBH2_PWR, GPIO120_USBH2_PEN, /* UDC */ GPIO41_GPIO, /* Ethernet */ GPIO114_GPIO, /* IRQ */ /* AC97 */ GPIO28_AC97_BITCLK, GPIO29_AC97_SDATA_IN_0, GPIO30_AC97_SDATA_OUT, GPIO31_AC97_SYNC, GPIO95_AC97_nRESET, GPIO98_AC97_SYSCLK, GPIO113_GPIO, /* TS IRQ */ /* I2C */ GPIO117_I2C_SCL, GPIO118_I2C_SDA, /* IDE */ GPIO36_GPIO, /* IDE IRQ */ GPIO80_DREQ_1, }; /****************************************************************************** * NOR Flash ******************************************************************************/ #if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) static struct mtd_partition vpac270_nor_partitions[] = { { .name = "Flash", .offset = 0x00000000, .size = MTDPART_SIZ_FULL, } }; static struct physmap_flash_data vpac270_flash_data[] = { { .width = 2, /* bankwidth in bytes */ .parts = vpac270_nor_partitions, .nr_parts = ARRAY_SIZE(vpac270_nor_partitions) } }; static struct resource vpac270_flash_resource = { .start = PXA_CS0_PHYS, .end = PXA_CS0_PHYS + SZ_64M - 1, .flags = IORESOURCE_MEM, }; static struct platform_device vpac270_flash = { .name = "physmap-flash", .id = 0, .resource = &vpac270_flash_resource, .num_resources = 1, .dev = { .platform_data = vpac270_flash_data, }, }; static void __init vpac270_nor_init(void) { platform_device_register(&vpac270_flash); } #else static inline void vpac270_nor_init(void) {} #endif /****************************************************************************** * OneNAND Flash ******************************************************************************/ #if defined(CONFIG_MTD_ONENAND) || defined(CONFIG_MTD_ONENAND_MODULE) static struct mtd_partition vpac270_onenand_partitions[] = { { .name = "Flash", .offset = 0x00000000, .size = MTDPART_SIZ_FULL, } }; static struct onenand_platform_data vpac270_onenand_info = { .parts = vpac270_onenand_partitions, .nr_parts = ARRAY_SIZE(vpac270_onenand_partitions), }; static struct resource vpac270_onenand_resources[] = { [0] = { .start = PXA_CS0_PHYS, .end = PXA_CS0_PHYS + SZ_1M, .flags = IORESOURCE_MEM, }, }; static struct platform_device vpac270_onenand = { .name = "onenand-flash", .id = -1, .resource = vpac270_onenand_resources, .num_resources = ARRAY_SIZE(vpac270_onenand_resources), .dev = { .platform_data = &vpac270_onenand_info, }, }; static void __init vpac270_onenand_init(void) { platform_device_register(&vpac270_onenand); } #else static void __init vpac270_onenand_init(void) {} #endif /****************************************************************************** * SD/MMC card controller ******************************************************************************/ #if defined(CONFIG_MMC_PXA) || defined(CONFIG_MMC_PXA_MODULE) static struct pxamci_platform_data vpac270_mci_platform_data = { .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, .gpio_power = -1, .gpio_card_detect = GPIO53_VPAC270_SD_DETECT_N, .gpio_card_ro = GPIO52_VPAC270_SD_READONLY, .detect_delay_ms = 200, }; static void __init vpac270_mmc_init(void) { pxa_set_mci_info(&vpac270_mci_platform_data); } #else static inline void vpac270_mmc_init(void) {} #endif /****************************************************************************** * GPIO keys ******************************************************************************/ #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) static struct gpio_keys_button vpac270_pxa_buttons[] = { {KEY_POWER, GPIO1_VPAC270_USER_BTN, 0, "USER BTN"}, }; static struct gpio_keys_platform_data vpac270_pxa_keys_data = { .buttons = vpac270_pxa_buttons, .nbuttons = ARRAY_SIZE(vpac270_pxa_buttons), }; static struct platform_device vpac270_pxa_keys = { .name = "gpio-keys", .id = -1, .dev = { .platform_data = &vpac270_pxa_keys_data, }, }; static void __init vpac270_keys_init(void) { platform_device_register(&vpac270_pxa_keys); } #else static inline void vpac270_keys_init(void) {} #endif /****************************************************************************** * LED ******************************************************************************/ #if defined(CONFIG_LEDS_GPIO) || defined(CONFIG_LEDS_GPIO_MODULE) struct gpio_led vpac270_gpio_leds[] = { { .name = "vpac270:orange:user", .default_trigger = "none", .gpio = GPIO15_VPAC270_LED_ORANGE, .active_low = 1, } }; static struct gpio_led_platform_data vpac270_gpio_led_info = { .leds = vpac270_gpio_leds, .num_leds = ARRAY_SIZE(vpac270_gpio_leds), }; static struct platform_device vpac270_leds = { .name = "leds-gpio", .id = -1, .dev = { .platform_data = &vpac270_gpio_led_info, } }; static void __init vpac270_leds_init(void) { platform_device_register(&vpac270_leds); } #else static inline void vpac270_leds_init(void) {} #endif /****************************************************************************** * USB Host ******************************************************************************/ #if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) static int vpac270_ohci_init(struct device *dev) { UP2OCR = UP2OCR_HXS | UP2OCR_HXOE | UP2OCR_DPPDE | UP2OCR_DMPDE; return 0; } static struct pxaohci_platform_data vpac270_ohci_info = { .port_mode = PMM_PERPORT_MODE, .flags = ENABLE_PORT1 | ENABLE_PORT2 | POWER_CONTROL_LOW | POWER_SENSE_LOW, .init = vpac270_ohci_init, }; static void __init vpac270_uhc_init(void) { pxa_set_ohci_info(&vpac270_ohci_info); } #else static inline void vpac270_uhc_init(void) {} #endif /****************************************************************************** * USB Gadget ******************************************************************************/ #if defined(CONFIG_USB_PXA27X)||defined(CONFIG_USB_PXA27X_MODULE) static struct gpio_vbus_mach_info vpac270_gpio_vbus_info = { .gpio_vbus = GPIO41_VPAC270_UDC_DETECT, .gpio_pullup = -1, }; static struct platform_device vpac270_gpio_vbus = { .name = "gpio-vbus", .id = -1, .dev = { .platform_data = &vpac270_gpio_vbus_info, }, }; static void vpac270_udc_command(int cmd) { if (cmd == PXA2XX_UDC_CMD_CONNECT) UP2OCR = UP2OCR_HXOE | UP2OCR_DPPUE; else if (cmd == PXA2XX_UDC_CMD_DISCONNECT) UP2OCR = UP2OCR_HXOE; } static struct pxa2xx_udc_mach_info vpac270_udc_info __initdata = { .udc_command = vpac270_udc_command, .gpio_pullup = -1, }; static void __init vpac270_udc_init(void) { pxa_set_udc_info(&vpac270_udc_info); platform_device_register(&vpac270_gpio_vbus); } #else static inline void vpac270_udc_init(void) {} #endif /****************************************************************************** * Ethernet ******************************************************************************/ #if defined(CONFIG_DM9000) || defined(CONFIG_DM9000_MODULE) static struct resource vpac270_dm9000_resources[] = { [0] = { .start = PXA_CS2_PHYS + 0x300, .end = PXA_CS2_PHYS + 0x303, .flags = IORESOURCE_MEM, }, [1] = { .start = PXA_CS2_PHYS + 0x304, .end = PXA_CS2_PHYS + 0x343, .flags = IORESOURCE_MEM, }, [2] = { .start = IRQ_GPIO(GPIO114_VPAC270_ETH_IRQ), .end = IRQ_GPIO(GPIO114_VPAC270_ETH_IRQ), .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE, }, }; static struct dm9000_plat_data vpac270_dm9000_platdata = { .flags = DM9000_PLATF_32BITONLY, }; static struct platform_device vpac270_dm9000_device = { .name = "dm9000", .id = -1, .num_resources = ARRAY_SIZE(vpac270_dm9000_resources), .resource = vpac270_dm9000_resources, .dev = { .platform_data = &vpac270_dm9000_platdata, } }; static void __init vpac270_eth_init(void) { platform_device_register(&vpac270_dm9000_device); } #else static inline void vpac270_eth_init(void) {} #endif /****************************************************************************** * Audio and Touchscreen ******************************************************************************/ #if defined(CONFIG_TOUCHSCREEN_UCB1400) || \ defined(CONFIG_TOUCHSCREEN_UCB1400_MODULE) static pxa2xx_audio_ops_t vpac270_ac97_pdata = { .reset_gpio = 95, }; static struct ucb1400_pdata vpac270_ucb1400_pdata = { .irq = IRQ_GPIO(GPIO113_VPAC270_TS_IRQ), }; static struct platform_device vpac270_ucb1400_device = { .name = "ucb1400_core", .id = -1, .dev = { .platform_data = &vpac270_ucb1400_pdata, }, }; static void __init vpac270_ts_init(void) { pxa_set_ac97_info(&vpac270_ac97_pdata); platform_device_register(&vpac270_ucb1400_device); } #else static inline void vpac270_ts_init(void) {} #endif /****************************************************************************** * RTC ******************************************************************************/ #if defined(CONFIG_RTC_DRV_DS1307) || defined(CONFIG_RTC_DRV_DS1307_MODULE) static struct i2c_board_info __initdata vpac270_i2c_devs[] = { { I2C_BOARD_INFO("ds1339", 0x68), }, }; static void __init vpac270_rtc_init(void) { i2c_register_board_info(0, ARRAY_AND_SIZE(vpac270_i2c_devs)); } #else static inline void vpac270_rtc_init(void) {} #endif /****************************************************************************** * Framebuffer ******************************************************************************/ #if defined(CONFIG_FB_PXA) || defined(CONFIG_FB_PXA_MODULE) static struct pxafb_mode_info vpac270_lcd_modes[] = { { .pixclock = 57692, .xres = 640, .yres = 480, .bpp = 32, .depth = 18, .left_margin = 144, .right_margin = 32, .upper_margin = 13, .lower_margin = 30, .hsync_len = 32, .vsync_len = 2, .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, }, { /* CRT 640x480 */ .pixclock = 35000, .xres = 640, .yres = 480, .bpp = 16, .depth = 16, .left_margin = 96, .right_margin = 48, .upper_margin = 33, .lower_margin = 10, .hsync_len = 48, .vsync_len = 1, .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, }, { /* CRT 800x600 H=30kHz V=48HZ */ .pixclock = 25000, .xres = 800, .yres = 600, .bpp = 16, .depth = 16, .left_margin = 50, .right_margin = 1, .upper_margin = 21, .lower_margin = 12, .hsync_len = 8, .vsync_len = 1, .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, }, { /* CRT 1024x768 H=40kHz V=50Hz */ .pixclock = 15000, .xres = 1024, .yres = 768, .bpp = 16, .depth = 16, .left_margin = 220, .right_margin = 8, .upper_margin = 33, .lower_margin = 2, .hsync_len = 48, .vsync_len = 1, .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, } }; static struct pxafb_mach_info vpac270_lcd_screen = { .modes = vpac270_lcd_modes, .num_modes = ARRAY_SIZE(vpac270_lcd_modes), .lcd_conn = LCD_COLOR_TFT_18BPP, }; static void vpac270_lcd_power(int on, struct fb_var_screeninfo *info) { gpio_set_value(GPIO81_VPAC270_BKL_ON, on); } static void __init vpac270_lcd_init(void) { int ret; ret = gpio_request(GPIO81_VPAC270_BKL_ON, "BKL-ON"); if (ret) { pr_err("Requesting BKL-ON GPIO failed!\n"); goto err; } ret = gpio_direction_output(GPIO81_VPAC270_BKL_ON, 1); if (ret) { pr_err("Setting BKL-ON GPIO direction failed!\n"); goto err2; } vpac270_lcd_screen.pxafb_lcd_power = vpac270_lcd_power; pxa_set_fb_info(NULL, &vpac270_lcd_screen); return; err2: gpio_free(GPIO81_VPAC270_BKL_ON); err: return; } #else static inline void vpac270_lcd_init(void) {} #endif /****************************************************************************** * PATA IDE ******************************************************************************/ #if defined(CONFIG_PATA_PXA) || defined(CONFIG_PATA_PXA_MODULE) static struct pata_pxa_pdata vpac270_pata_pdata = { .reg_shift = 1, .dma_dreq = 1, .irq_flags = IRQF_TRIGGER_RISING, }; static struct resource vpac270_ide_resources[] = { [0] = { /* I/O Base address */ .start = PXA_CS3_PHYS + 0x120, .end = PXA_CS3_PHYS + 0x13f, .flags = IORESOURCE_MEM }, [1] = { /* CTL Base address */ .start = PXA_CS3_PHYS + 0x15c, .end = PXA_CS3_PHYS + 0x15f, .flags = IORESOURCE_MEM }, [2] = { /* DMA Base address */ .start = PXA_CS3_PHYS + 0x20, .end = PXA_CS3_PHYS + 0x2f, .flags = IORESOURCE_DMA }, [3] = { /* IDE IRQ pin */ .start = gpio_to_irq(GPIO36_VPAC270_IDE_IRQ), .end = gpio_to_irq(GPIO36_VPAC270_IDE_IRQ), .flags = IORESOURCE_IRQ } }; static struct platform_device vpac270_ide_device = { .name = "pata_pxa", .num_resources = ARRAY_SIZE(vpac270_ide_resources), .resource = vpac270_ide_resources, .dev = { .platform_data = &vpac270_pata_pdata, .coherent_dma_mask = 0xffffffff, } }; static void __init vpac270_ide_init(void) { platform_device_register(&vpac270_ide_device); } #else static inline void vpac270_ide_init(void) {} #endif /****************************************************************************** * Core power regulator ******************************************************************************/ #if defined(CONFIG_REGULATOR_MAX1586) || \ defined(CONFIG_REGULATOR_MAX1586_MODULE) static struct regulator_consumer_supply vpac270_max1587a_consumers[] = { { .supply = "vcc_core", } }; static struct regulator_init_data vpac270_max1587a_v3_info = { .constraints = { .name = "vcc_core range", .min_uV = 900000, .max_uV = 1705000, .always_on = 1, .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE, }, .consumer_supplies = vpac270_max1587a_consumers, .num_consumer_supplies = ARRAY_SIZE(vpac270_max1587a_consumers), }; static struct max1586_subdev_data vpac270_max1587a_subdevs[] = { { .name = "vcc_core", .id = MAX1586_V3, .platform_data = &vpac270_max1587a_v3_info, } }; static struct max1586_platform_data vpac270_max1587a_info = { .subdevs = vpac270_max1587a_subdevs, .num_subdevs = ARRAY_SIZE(vpac270_max1587a_subdevs), .v3_gain = MAX1586_GAIN_R24_3k32, /* 730..1550 mV */ }; static struct i2c_board_info __initdata vpac270_pi2c_board_info[] = { { I2C_BOARD_INFO("max1586", 0x14), .platform_data = &vpac270_max1587a_info, }, }; static void __init vpac270_pmic_init(void) { i2c_register_board_info(1, ARRAY_AND_SIZE(vpac270_pi2c_board_info)); } #else static inline void vpac270_pmic_init(void) {} #endif /****************************************************************************** * Machine init ******************************************************************************/ static void __init vpac270_init(void) { pxa2xx_mfp_config(ARRAY_AND_SIZE(vpac270_pin_config)); pxa_set_ffuart_info(NULL); pxa_set_btuart_info(NULL); pxa_set_stuart_info(NULL); pxa_set_i2c_info(NULL); pxa27x_set_i2c_power_info(NULL); vpac270_pmic_init(); vpac270_lcd_init(); vpac270_mmc_init(); vpac270_nor_init(); vpac270_onenand_init(); vpac270_leds_init(); vpac270_keys_init(); vpac270_uhc_init(); vpac270_udc_init(); vpac270_eth_init(); vpac270_ts_init(); vpac270_rtc_init(); vpac270_ide_init(); } MACHINE_START(VPAC270, "Voipac PXA270") .boot_params = 0xa0000100, .map_io = pxa27x_map_io, .init_irq = pxa27x_init_irq, .timer = &pxa_timer, .init_machine = vpac270_init MACHINE_END
gpl-2.0
agat63/E4GT_ICS_kernel
arch/arm/mach-pxa/colibri-pxa320.c
1820
6451
/* * arch/arm/mach-pxa/colibri-pxa320.c * * Support for Toradex PXA320/310 based Colibri module * * Daniel Mack <daniel@caiaq.de> * Matthias Meier <matthias.j.meier@gmx.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include <linux/interrupt.h> #include <linux/usb/gpio_vbus.h> #include <asm/mach-types.h> #include <asm/sizes.h> #include <asm/mach/arch.h> #include <asm/mach/irq.h> #include <mach/pxa3xx-regs.h> #include <mach/mfp-pxa320.h> #include <mach/colibri.h> #include <mach/pxafb.h> #include <mach/ohci.h> #include <mach/audio.h> #include <mach/pxa27x-udc.h> #include <mach/udc.h> #include "generic.h" #include "devices.h" #ifdef CONFIG_MACH_COLIBRI_EVALBOARD static mfp_cfg_t colibri_pxa320_evalboard_pin_config[] __initdata = { /* MMC */ GPIO22_MMC1_CLK, GPIO23_MMC1_CMD, GPIO18_MMC1_DAT0, GPIO19_MMC1_DAT1, GPIO20_MMC1_DAT2, GPIO21_MMC1_DAT3, GPIO28_GPIO, /* SD detect */ /* UART 1 configuration (may be set by bootloader) */ GPIO99_UART1_CTS, GPIO104_UART1_RTS, GPIO97_UART1_RXD, GPIO98_UART1_TXD, GPIO101_UART1_DTR, GPIO103_UART1_DSR, GPIO100_UART1_DCD, GPIO102_UART1_RI, /* UART 2 configuration */ GPIO109_UART2_CTS, GPIO112_UART2_RTS, GPIO110_UART2_RXD, GPIO111_UART2_TXD, /* UART 3 configuration */ GPIO30_UART3_RXD, GPIO31_UART3_TXD, /* UHC */ GPIO2_2_USBH_PEN, GPIO3_2_USBH_PWR, /* I2C */ GPIO32_I2C_SCL, GPIO33_I2C_SDA, /* PCMCIA */ MFP_CFG(GPIO59, AF7), /* PRST ; AF7 to tristate */ MFP_CFG(GPIO61, AF7), /* PCE1 ; AF7 to tristate */ MFP_CFG(GPIO60, AF7), /* PCE2 ; AF7 to tristate */ MFP_CFG(GPIO62, AF7), /* PCD ; AF7 to tristate */ MFP_CFG(GPIO56, AF7), /* PSKTSEL ; AF7 to tristate */ GPIO27_GPIO, /* RDnWR ; input/tristate */ GPIO50_GPIO, /* PREG ; input/tristate */ GPIO2_RDY, GPIO5_NPIOR, GPIO6_NPIOW, GPIO7_NPIOS16, GPIO8_NPWAIT, GPIO29_GPIO, /* PRDY (READY GPIO) */ GPIO57_GPIO, /* PPEN (POWER GPIO) */ GPIO81_GPIO, /* PCD (DETECT GPIO) */ GPIO77_GPIO, /* PRST (RESET GPIO) */ GPIO53_GPIO, /* PBVD1 */ GPIO79_GPIO, /* PBVD2 */ GPIO54_GPIO, /* POE */ }; #else static mfp_cfg_t colibri_pxa320_evalboard_pin_config[] __initdata = {}; #endif #if defined(CONFIG_AX88796) #define COLIBRI_ETH_IRQ_GPIO mfp_to_gpio(GPIO36_GPIO) /* * Asix AX88796 Ethernet */ static struct ax_plat_data colibri_asix_platdata = { .flags = 0, /* defined later */ .wordlength = 2, }; static struct resource colibri_asix_resource[] = { [0] = { .start = PXA3xx_CS2_PHYS, .end = PXA3xx_CS2_PHYS + (0x20 * 2) - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = gpio_to_irq(COLIBRI_ETH_IRQ_GPIO), .end = gpio_to_irq(COLIBRI_ETH_IRQ_GPIO), .flags = IORESOURCE_IRQ | IRQF_TRIGGER_FALLING, } }; static struct platform_device asix_device = { .name = "ax88796", .id = 0, .num_resources = ARRAY_SIZE(colibri_asix_resource), .resource = colibri_asix_resource, .dev = { .platform_data = &colibri_asix_platdata } }; static mfp_cfg_t colibri_pxa320_eth_pin_config[] __initdata = { GPIO3_nCS2, /* AX88796 chip select */ GPIO36_GPIO | MFP_PULL_HIGH /* AX88796 IRQ */ }; static void __init colibri_pxa320_init_eth(void) { colibri_pxa3xx_init_eth(&colibri_asix_platdata); pxa3xx_mfp_config(ARRAY_AND_SIZE(colibri_pxa320_eth_pin_config)); platform_device_register(&asix_device); } #else static inline void __init colibri_pxa320_init_eth(void) {} #endif /* CONFIG_AX88796 */ #if defined(CONFIG_USB_PXA27X)||defined(CONFIG_USB_PXA27X_MODULE) static struct gpio_vbus_mach_info colibri_pxa320_gpio_vbus_info = { .gpio_vbus = mfp_to_gpio(MFP_PIN_GPIO96), .gpio_pullup = -1, }; static struct platform_device colibri_pxa320_gpio_vbus = { .name = "gpio-vbus", .id = -1, .dev = { .platform_data = &colibri_pxa320_gpio_vbus_info, }, }; static void colibri_pxa320_udc_command(int cmd) { if (cmd == PXA2XX_UDC_CMD_CONNECT) UP2OCR = UP2OCR_HXOE | UP2OCR_DPPUE; else if (cmd == PXA2XX_UDC_CMD_DISCONNECT) UP2OCR = UP2OCR_HXOE; } static struct pxa2xx_udc_mach_info colibri_pxa320_udc_info __initdata = { .udc_command = colibri_pxa320_udc_command, .gpio_pullup = -1, }; static void __init colibri_pxa320_init_udc(void) { pxa_set_udc_info(&colibri_pxa320_udc_info); platform_device_register(&colibri_pxa320_gpio_vbus); } #else static inline void colibri_pxa320_init_udc(void) {} #endif #if defined(CONFIG_FB_PXA) || defined(CONFIG_FB_PXA_MODULE) static mfp_cfg_t colibri_pxa320_lcd_pin_config[] __initdata = { GPIO6_2_LCD_LDD_0, GPIO7_2_LCD_LDD_1, GPIO8_2_LCD_LDD_2, GPIO9_2_LCD_LDD_3, GPIO10_2_LCD_LDD_4, GPIO11_2_LCD_LDD_5, GPIO12_2_LCD_LDD_6, GPIO13_2_LCD_LDD_7, GPIO63_LCD_LDD_8, GPIO64_LCD_LDD_9, GPIO65_LCD_LDD_10, GPIO66_LCD_LDD_11, GPIO67_LCD_LDD_12, GPIO68_LCD_LDD_13, GPIO69_LCD_LDD_14, GPIO70_LCD_LDD_15, GPIO71_LCD_LDD_16, GPIO72_LCD_LDD_17, GPIO73_LCD_CS_N, GPIO74_LCD_VSYNC, GPIO14_2_LCD_FCLK, GPIO15_2_LCD_LCLK, GPIO16_2_LCD_PCLK, GPIO17_2_LCD_BIAS, }; static void __init colibri_pxa320_init_lcd(void) { pxa3xx_mfp_config(ARRAY_AND_SIZE(colibri_pxa320_lcd_pin_config)); } #else static inline void colibri_pxa320_init_lcd(void) {} #endif #if defined(CONFIG_SND_AC97_CODEC) || \ defined(CONFIG_SND_AC97_CODEC_MODULE) static mfp_cfg_t colibri_pxa320_ac97_pin_config[] __initdata = { GPIO34_AC97_SYSCLK, GPIO35_AC97_SDATA_IN_0, GPIO37_AC97_SDATA_OUT, GPIO38_AC97_SYNC, GPIO39_AC97_BITCLK, GPIO40_AC97_nACRESET }; static inline void __init colibri_pxa320_init_ac97(void) { pxa3xx_mfp_config(ARRAY_AND_SIZE(colibri_pxa320_ac97_pin_config)); pxa_set_ac97_info(NULL); } #else static inline void colibri_pxa320_init_ac97(void) {} #endif void __init colibri_pxa320_init(void) { colibri_pxa320_init_eth(); colibri_pxa3xx_init_nand(); colibri_pxa320_init_lcd(); colibri_pxa3xx_init_lcd(mfp_to_gpio(GPIO49_GPIO)); colibri_pxa320_init_ac97(); colibri_pxa320_init_udc(); /* Evalboard init */ pxa3xx_mfp_config(ARRAY_AND_SIZE(colibri_pxa320_evalboard_pin_config)); colibri_evalboard_init(); } MACHINE_START(COLIBRI320, "Toradex Colibri PXA320") .boot_params = COLIBRI_SDRAM_BASE + 0x100, .init_machine = colibri_pxa320_init, .map_io = pxa3xx_map_io, .init_irq = pxa3xx_init_irq, .timer = &pxa_timer, MACHINE_END
gpl-2.0
The-Sickness/G920T-MM
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
2076
43974
/* * QLogic qlcnic NIC Driver * Copyright (c) 2009-2013 QLogic Corporation * * See LICENSE.qlcnic for copyright and licensing details. */ #include "qlcnic_sriov.h" #include "qlcnic.h" #include <linux/types.h> #define QLCNIC_SRIOV_VF_MAX_MAC 1 #define QLC_VF_MIN_TX_RATE 100 #define QLC_VF_MAX_TX_RATE 9999 static int qlcnic_sriov_pf_get_vport_handle(struct qlcnic_adapter *, u8); struct qlcnic_sriov_cmd_handler { int (*fn) (struct qlcnic_bc_trans *, struct qlcnic_cmd_args *); }; struct qlcnic_sriov_fw_cmd_handler { u32 cmd; int (*fn) (struct qlcnic_bc_trans *, struct qlcnic_cmd_args *); }; static int qlcnic_sriov_pf_set_vport_info(struct qlcnic_adapter *adapter, struct qlcnic_info *npar_info, u16 vport_id) { struct qlcnic_cmd_args cmd; int err; if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO)) return -ENOMEM; cmd.req.arg[1] = (vport_id << 16) | 0x1; cmd.req.arg[2] = npar_info->bit_offsets; cmd.req.arg[2] |= npar_info->min_tx_bw << 16; cmd.req.arg[3] = npar_info->max_tx_bw | (npar_info->max_tx_ques << 16); cmd.req.arg[4] = npar_info->max_tx_mac_filters; cmd.req.arg[4] |= npar_info->max_rx_mcast_mac_filters << 16; cmd.req.arg[5] = npar_info->max_rx_ucast_mac_filters | (npar_info->max_rx_ip_addr << 16); cmd.req.arg[6] = npar_info->max_rx_lro_flow | (npar_info->max_rx_status_rings << 16); cmd.req.arg[7] = npar_info->max_rx_buf_rings | (npar_info->max_rx_ques << 16); cmd.req.arg[8] = npar_info->max_tx_vlan_keys; cmd.req.arg[8] |= npar_info->max_local_ipv6_addrs << 16; cmd.req.arg[9] = npar_info->max_remote_ipv6_addrs; err = qlcnic_issue_cmd(adapter, &cmd); if (err) dev_err(&adapter->pdev->dev, "Failed to set vport info, err=%d\n", err); qlcnic_free_mbx_args(&cmd); return err; } static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter, struct qlcnic_info *info, u16 func) { struct qlcnic_sriov *sriov = adapter->ahw->sriov; struct qlcnic_resources *res = &sriov->ff_max; u32 temp, num_vf_macs, num_vfs, max; int ret = -EIO, vpid, id; struct qlcnic_vport *vp; vpid = qlcnic_sriov_pf_get_vport_handle(adapter, func); if (vpid < 0) return -EINVAL; num_vfs = sriov->num_vfs; max = num_vfs + 1; info->bit_offsets = 0xffff; info->max_tx_ques = res->num_tx_queues / max; info->max_rx_mcast_mac_filters = res->num_rx_mcast_mac_filters; num_vf_macs = QLCNIC_SRIOV_VF_MAX_MAC; if (adapter->ahw->pci_func == func) { temp = res->num_rx_mcast_mac_filters - (num_vfs * num_vf_macs); info->max_rx_ucast_mac_filters = temp; temp = res->num_tx_mac_filters - (num_vfs * num_vf_macs); info->max_tx_mac_filters = temp; info->min_tx_bw = 0; info->max_tx_bw = MAX_BW; } else { id = qlcnic_sriov_func_to_index(adapter, func); if (id < 0) return id; vp = sriov->vf_info[id].vp; info->min_tx_bw = vp->min_tx_bw; info->max_tx_bw = vp->max_tx_bw; info->max_rx_ucast_mac_filters = num_vf_macs; info->max_tx_mac_filters = num_vf_macs; } info->max_rx_ip_addr = res->num_destip / max; info->max_rx_status_rings = res->num_rx_status_rings / max; info->max_rx_buf_rings = res->num_rx_buf_rings / max; info->max_rx_ques = res->num_rx_queues / max; info->max_rx_lro_flow = res->num_lro_flows_supported / max; info->max_tx_vlan_keys = res->num_txvlan_keys; info->max_local_ipv6_addrs = res->max_local_ipv6_addrs; info->max_remote_ipv6_addrs = res->max_remote_ipv6_addrs; ret = qlcnic_sriov_pf_set_vport_info(adapter, info, vpid); if (ret) return ret; return 0; } static void qlcnic_sriov_pf_set_ff_max_res(struct qlcnic_adapter *adapter, struct qlcnic_info *info) { struct qlcnic_resources *ff_max = &adapter->ahw->sriov->ff_max; ff_max->num_tx_mac_filters = info->max_tx_mac_filters; ff_max->num_rx_ucast_mac_filters = info->max_rx_ucast_mac_filters; ff_max->num_rx_mcast_mac_filters = info->max_rx_mcast_mac_filters; ff_max->num_txvlan_keys = info->max_tx_vlan_keys; ff_max->num_rx_queues = info->max_rx_ques; ff_max->num_tx_queues = info->max_tx_ques; ff_max->num_lro_flows_supported = info->max_rx_lro_flow; ff_max->num_destip = info->max_rx_ip_addr; ff_max->num_rx_buf_rings = info->max_rx_buf_rings; ff_max->num_rx_status_rings = info->max_rx_status_rings; ff_max->max_remote_ipv6_addrs = info->max_remote_ipv6_addrs; ff_max->max_local_ipv6_addrs = info->max_local_ipv6_addrs; } static int qlcnic_sriov_get_pf_info(struct qlcnic_adapter *adapter, struct qlcnic_info *npar_info) { int err; struct qlcnic_cmd_args cmd; if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO)) return -ENOMEM; cmd.req.arg[1] = 0x2; err = qlcnic_issue_cmd(adapter, &cmd); if (err) { dev_err(&adapter->pdev->dev, "Failed to get PF info, err=%d\n", err); goto out; } npar_info->total_pf = cmd.rsp.arg[2] & 0xff; npar_info->total_rss_engines = (cmd.rsp.arg[2] >> 8) & 0xff; npar_info->max_vports = MSW(cmd.rsp.arg[2]); npar_info->max_tx_ques = LSW(cmd.rsp.arg[3]); npar_info->max_tx_mac_filters = MSW(cmd.rsp.arg[3]); npar_info->max_rx_mcast_mac_filters = LSW(cmd.rsp.arg[4]); npar_info->max_rx_ucast_mac_filters = MSW(cmd.rsp.arg[4]); npar_info->max_rx_ip_addr = LSW(cmd.rsp.arg[5]); npar_info->max_rx_lro_flow = MSW(cmd.rsp.arg[5]); npar_info->max_rx_status_rings = LSW(cmd.rsp.arg[6]); npar_info->max_rx_buf_rings = MSW(cmd.rsp.arg[6]); npar_info->max_rx_ques = LSW(cmd.rsp.arg[7]); npar_info->max_tx_vlan_keys = MSW(cmd.rsp.arg[7]); npar_info->max_local_ipv6_addrs = LSW(cmd.rsp.arg[8]); npar_info->max_remote_ipv6_addrs = MSW(cmd.rsp.arg[8]); qlcnic_sriov_pf_set_ff_max_res(adapter, npar_info); dev_info(&adapter->pdev->dev, "\n\ttotal_pf: %d,\n" "\n\ttotal_rss_engines: %d max_vports: %d max_tx_ques %d,\n" "\tmax_tx_mac_filters: %d max_rx_mcast_mac_filters: %d,\n" "\tmax_rx_ucast_mac_filters: 0x%x, max_rx_ip_addr: %d,\n" "\tmax_rx_lro_flow: %d max_rx_status_rings: %d,\n" "\tmax_rx_buf_rings: %d, max_rx_ques: %d, max_tx_vlan_keys %d\n" "\tmax_local_ipv6_addrs: %d, max_remote_ipv6_addrs: %d\n", npar_info->total_pf, npar_info->total_rss_engines, npar_info->max_vports, npar_info->max_tx_ques, npar_info->max_tx_mac_filters, npar_info->max_rx_mcast_mac_filters, npar_info->max_rx_ucast_mac_filters, npar_info->max_rx_ip_addr, npar_info->max_rx_lro_flow, npar_info->max_rx_status_rings, npar_info->max_rx_buf_rings, npar_info->max_rx_ques, npar_info->max_tx_vlan_keys, npar_info->max_local_ipv6_addrs, npar_info->max_remote_ipv6_addrs); out: qlcnic_free_mbx_args(&cmd); return err; } static void qlcnic_sriov_pf_reset_vport_handle(struct qlcnic_adapter *adapter, u8 func) { struct qlcnic_sriov *sriov = adapter->ahw->sriov; struct qlcnic_vport *vp; int index; if (adapter->ahw->pci_func == func) { sriov->vp_handle = 0; } else { index = qlcnic_sriov_func_to_index(adapter, func); if (index < 0) return; vp = sriov->vf_info[index].vp; vp->handle = 0; } } static void qlcnic_sriov_pf_set_vport_handle(struct qlcnic_adapter *adapter, u16 vport_handle, u8 func) { struct qlcnic_sriov *sriov = adapter->ahw->sriov; struct qlcnic_vport *vp; int index; if (adapter->ahw->pci_func == func) { sriov->vp_handle = vport_handle; } else { index = qlcnic_sriov_func_to_index(adapter, func); if (index < 0) return; vp = sriov->vf_info[index].vp; vp->handle = vport_handle; } } static int qlcnic_sriov_pf_get_vport_handle(struct qlcnic_adapter *adapter, u8 func) { struct qlcnic_sriov *sriov = adapter->ahw->sriov; struct qlcnic_vf_info *vf_info; int index; if (adapter->ahw->pci_func == func) { return sriov->vp_handle; } else { index = qlcnic_sriov_func_to_index(adapter, func); if (index >= 0) { vf_info = &sriov->vf_info[index]; return vf_info->vp->handle; } } return -EINVAL; } static int qlcnic_sriov_pf_config_vport(struct qlcnic_adapter *adapter, u8 flag, u16 func) { struct qlcnic_cmd_args cmd; int ret; int vpid; if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_VPORT)) return -ENOMEM; if (flag) { cmd.req.arg[3] = func << 8; } else { vpid = qlcnic_sriov_pf_get_vport_handle(adapter, func); if (vpid < 0) { ret = -EINVAL; goto out; } cmd.req.arg[3] = ((vpid & 0xffff) << 8) | 1; } ret = qlcnic_issue_cmd(adapter, &cmd); if (ret) { dev_err(&adapter->pdev->dev, "Failed %s vport, err %d for func 0x%x\n", (flag ? "enable" : "disable"), ret, func); goto out; } if (flag) { vpid = cmd.rsp.arg[2] & 0xffff; qlcnic_sriov_pf_set_vport_handle(adapter, vpid, func); } else { qlcnic_sriov_pf_reset_vport_handle(adapter, func); } out: qlcnic_free_mbx_args(&cmd); return ret; } static int qlcnic_sriov_pf_cfg_vlan_filtering(struct qlcnic_adapter *adapter, u8 enable) { struct qlcnic_cmd_args cmd; int err; err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO); if (err) return err; cmd.req.arg[1] = 0x4; if (enable) cmd.req.arg[1] |= BIT_16; err = qlcnic_issue_cmd(adapter, &cmd); if (err) dev_err(&adapter->pdev->dev, "Failed to configure VLAN filtering, err=%d\n", err); qlcnic_free_mbx_args(&cmd); return err; } static int qlcnic_sriov_pf_cfg_eswitch(struct qlcnic_adapter *adapter, u8 func, u8 enable) { struct qlcnic_cmd_args cmd; int err = -EIO; if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_TOGGLE_ESWITCH)) return -ENOMEM; cmd.req.arg[0] |= (3 << 29); cmd.req.arg[1] = ((func & 0xf) << 2) | BIT_6 | BIT_1; if (enable) cmd.req.arg[1] |= BIT_0; err = qlcnic_issue_cmd(adapter, &cmd); if (err != QLCNIC_RCODE_SUCCESS) { dev_err(&adapter->pdev->dev, "Failed to enable sriov eswitch%d\n", err); err = -EIO; } qlcnic_free_mbx_args(&cmd); return err; } static void qlcnic_sriov_pf_del_flr_queue(struct qlcnic_adapter *adapter) { struct qlcnic_sriov *sriov = adapter->ahw->sriov; struct qlcnic_back_channel *bc = &sriov->bc; int i; for (i = 0; i < sriov->num_vfs; i++) cancel_work_sync(&sriov->vf_info[i].flr_work); destroy_workqueue(bc->bc_flr_wq); } static int qlcnic_sriov_pf_create_flr_queue(struct qlcnic_adapter *adapter) { struct qlcnic_back_channel *bc = &adapter->ahw->sriov->bc; struct workqueue_struct *wq; wq = create_singlethread_workqueue("qlcnic-flr"); if (wq == NULL) { dev_err(&adapter->pdev->dev, "Cannot create FLR workqueue\n"); return -ENOMEM; } bc->bc_flr_wq = wq; return 0; } void qlcnic_sriov_pf_cleanup(struct qlcnic_adapter *adapter) { u8 func = adapter->ahw->pci_func; if (!qlcnic_sriov_enable_check(adapter)) return; qlcnic_sriov_pf_del_flr_queue(adapter); qlcnic_sriov_cfg_bc_intr(adapter, 0); qlcnic_sriov_pf_config_vport(adapter, 0, func); qlcnic_sriov_pf_cfg_eswitch(adapter, func, 0); qlcnic_sriov_pf_cfg_vlan_filtering(adapter, 0); __qlcnic_sriov_cleanup(adapter); adapter->ahw->op_mode = QLCNIC_MGMT_FUNC; clear_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state); } void qlcnic_sriov_pf_disable(struct qlcnic_adapter *adapter) { if (!qlcnic_sriov_pf_check(adapter)) return; if (!qlcnic_sriov_enable_check(adapter)) return; pci_disable_sriov(adapter->pdev); netdev_info(adapter->netdev, "SR-IOV is disabled successfully on port %d\n", adapter->portnum); } static int qlcnic_pci_sriov_disable(struct qlcnic_adapter *adapter) { struct net_device *netdev = adapter->netdev; if (netif_running(netdev)) __qlcnic_down(adapter, netdev); qlcnic_sriov_pf_disable(adapter); qlcnic_sriov_pf_cleanup(adapter); /* After disabling SRIOV re-init the driver in default mode configure opmode based on op_mode of function */ if (qlcnic_83xx_configure_opmode(adapter)) return -EIO; if (netif_running(netdev)) __qlcnic_up(adapter, netdev); return 0; } static int qlcnic_sriov_pf_init(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; struct qlcnic_info nic_info, pf_info, vp_info; int err; u8 func = ahw->pci_func; if (!qlcnic_sriov_enable_check(adapter)) return 0; err = qlcnic_sriov_pf_cfg_vlan_filtering(adapter, 1); if (err) return err; err = qlcnic_sriov_pf_cfg_eswitch(adapter, func, 1); if (err) goto disable_vlan_filtering; err = qlcnic_sriov_pf_config_vport(adapter, 1, func); if (err) goto disable_eswitch; err = qlcnic_sriov_get_pf_info(adapter, &pf_info); if (err) goto delete_vport; err = qlcnic_get_nic_info(adapter, &nic_info, func); if (err) goto delete_vport; err = qlcnic_sriov_pf_cal_res_limit(adapter, &vp_info, func); if (err) goto delete_vport; err = qlcnic_sriov_cfg_bc_intr(adapter, 1); if (err) goto delete_vport; ahw->physical_port = (u8) nic_info.phys_port; ahw->switch_mode = nic_info.switch_mode; ahw->max_mtu = nic_info.max_mtu; ahw->capabilities = nic_info.capabilities; ahw->nic_mode = QLC_83XX_SRIOV_MODE; return err; delete_vport: qlcnic_sriov_pf_config_vport(adapter, 0, func); disable_eswitch: qlcnic_sriov_pf_cfg_eswitch(adapter, func, 0); disable_vlan_filtering: qlcnic_sriov_pf_cfg_vlan_filtering(adapter, 0); return err; } static int qlcnic_sriov_pf_enable(struct qlcnic_adapter *adapter, int num_vfs) { int err; if (!qlcnic_sriov_enable_check(adapter)) return 0; err = pci_enable_sriov(adapter->pdev, num_vfs); if (err) qlcnic_sriov_pf_cleanup(adapter); return err; } static int __qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter, int num_vfs) { int err = 0; set_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state); adapter->ahw->op_mode = QLCNIC_SRIOV_PF_FUNC; err = qlcnic_sriov_init(adapter, num_vfs); if (err) goto clear_op_mode; err = qlcnic_sriov_pf_create_flr_queue(adapter); if (err) goto sriov_cleanup; err = qlcnic_sriov_pf_init(adapter); if (err) goto del_flr_queue; err = qlcnic_sriov_pf_enable(adapter, num_vfs); return err; del_flr_queue: qlcnic_sriov_pf_del_flr_queue(adapter); sriov_cleanup: __qlcnic_sriov_cleanup(adapter); clear_op_mode: clear_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state); adapter->ahw->op_mode = QLCNIC_MGMT_FUNC; return err; } static int qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter, int num_vfs) { struct net_device *netdev = adapter->netdev; int err; if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) { netdev_err(netdev, "SR-IOV cannot be enabled, when legacy interrupts are enabled\n"); return -EIO; } if (netif_running(netdev)) __qlcnic_down(adapter, netdev); err = __qlcnic_pci_sriov_enable(adapter, num_vfs); if (err) { netdev_info(netdev, "Failed to enable SR-IOV on port %d\n", adapter->portnum); err = -EIO; if (qlcnic_83xx_configure_opmode(adapter)) goto error; } else { netdev_info(netdev, "SR-IOV is enabled successfully on port %d\n", adapter->portnum); /* Return number of vfs enabled */ err = num_vfs; } if (netif_running(netdev)) __qlcnic_up(adapter, netdev); error: return err; } int qlcnic_pci_sriov_configure(struct pci_dev *dev, int num_vfs) { struct qlcnic_adapter *adapter = pci_get_drvdata(dev); int err; if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) return -EBUSY; if (num_vfs == 0) err = qlcnic_pci_sriov_disable(adapter); else err = qlcnic_pci_sriov_enable(adapter, num_vfs); clear_bit(__QLCNIC_RESETTING, &adapter->state); return err; } static int qlcnic_sriov_set_vf_acl(struct qlcnic_adapter *adapter, u8 func) { struct qlcnic_cmd_args cmd; struct qlcnic_vport *vp; int err, id; id = qlcnic_sriov_func_to_index(adapter, func); if (id < 0) return id; vp = adapter->ahw->sriov->vf_info[id].vp; err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO); if (err) return err; cmd.req.arg[1] = 0x3 | func << 16; if (vp->vlan_mode == QLC_PVID_MODE) { cmd.req.arg[2] |= BIT_6; cmd.req.arg[3] |= vp->vlan << 8; } err = qlcnic_issue_cmd(adapter, &cmd); if (err) dev_err(&adapter->pdev->dev, "Failed to set ACL, err=%d\n", err); qlcnic_free_mbx_args(&cmd); return err; } static int qlcnic_sriov_set_vf_vport_info(struct qlcnic_adapter *adapter, u16 func) { struct qlcnic_info defvp_info; int err; err = qlcnic_sriov_pf_cal_res_limit(adapter, &defvp_info, func); if (err) return -EIO; err = qlcnic_sriov_set_vf_acl(adapter, func); if (err) return err; return 0; } static int qlcnic_sriov_pf_channel_cfg_cmd(struct qlcnic_bc_trans *trans, struct qlcnic_cmd_args *cmd) { struct qlcnic_vf_info *vf = trans->vf; struct qlcnic_adapter *adapter = vf->adapter; int err; u16 func = vf->pci_func; cmd->rsp.arg[0] = trans->req_hdr->cmd_op; cmd->rsp.arg[0] |= (1 << 16); if (trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) { err = qlcnic_sriov_pf_config_vport(adapter, 1, func); if (!err) { err = qlcnic_sriov_set_vf_vport_info(adapter, func); if (err) qlcnic_sriov_pf_config_vport(adapter, 0, func); } } else { err = qlcnic_sriov_pf_config_vport(adapter, 0, func); } if (err) goto err_out; cmd->rsp.arg[0] |= (1 << 25); if (trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) set_bit(QLC_BC_VF_STATE, &vf->state); else clear_bit(QLC_BC_VF_STATE, &vf->state); return err; err_out: cmd->rsp.arg[0] |= (2 << 25); return err; } static int qlcnic_sriov_cfg_vf_def_mac(struct qlcnic_adapter *adapter, struct qlcnic_vport *vp, u16 func, u16 vlan, u8 op) { struct qlcnic_cmd_args cmd; struct qlcnic_macvlan_mbx mv; u8 *addr; int err; u32 *buf; int vpid; if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN)) return -ENOMEM; vpid = qlcnic_sriov_pf_get_vport_handle(adapter, func); if (vpid < 0) { err = -EINVAL; goto out; } if (vlan) op = ((op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL); cmd.req.arg[1] = op | (1 << 8) | (3 << 6); cmd.req.arg[1] |= ((vpid & 0xffff) << 16) | BIT_31; addr = vp->mac; mv.vlan = vlan; mv.mac_addr0 = addr[0]; mv.mac_addr1 = addr[1]; mv.mac_addr2 = addr[2]; mv.mac_addr3 = addr[3]; mv.mac_addr4 = addr[4]; mv.mac_addr5 = addr[5]; buf = &cmd.req.arg[2]; memcpy(buf, &mv, sizeof(struct qlcnic_macvlan_mbx)); err = qlcnic_issue_cmd(adapter, &cmd); if (err) dev_err(&adapter->pdev->dev, "MAC-VLAN %s to CAM failed, err=%d.\n", ((op == 1) ? "add " : "delete "), err); out: qlcnic_free_mbx_args(&cmd); return err; } static int qlcnic_sriov_validate_create_rx_ctx(struct qlcnic_cmd_args *cmd) { if ((cmd->req.arg[0] >> 29) != 0x3) return -EINVAL; return 0; } static int qlcnic_sriov_pf_create_rx_ctx_cmd(struct qlcnic_bc_trans *tran, struct qlcnic_cmd_args *cmd) { struct qlcnic_vf_info *vf = tran->vf; struct qlcnic_adapter *adapter = vf->adapter; struct qlcnic_rcv_mbx_out *mbx_out; int err; u16 vlan; err = qlcnic_sriov_validate_create_rx_ctx(cmd); if (err) { cmd->rsp.arg[0] |= (0x6 << 25); return err; } cmd->req.arg[6] = vf->vp->handle; err = qlcnic_issue_cmd(adapter, cmd); vlan = vf->vp->vlan; if (!err) { mbx_out = (struct qlcnic_rcv_mbx_out *)&cmd->rsp.arg[1]; vf->rx_ctx_id = mbx_out->ctx_id; qlcnic_sriov_cfg_vf_def_mac(adapter, vf->vp, vf->pci_func, vlan, QLCNIC_MAC_ADD); } else { vf->rx_ctx_id = 0; } return err; } static int qlcnic_sriov_pf_mac_address_cmd(struct qlcnic_bc_trans *trans, struct qlcnic_cmd_args *cmd) { struct qlcnic_vf_info *vf = trans->vf; u8 type, *mac; type = cmd->req.arg[1]; switch (type) { case QLCNIC_SET_STATION_MAC: case QLCNIC_SET_FAC_DEF_MAC: cmd->rsp.arg[0] = (2 << 25); break; case QLCNIC_GET_CURRENT_MAC: cmd->rsp.arg[0] = (1 << 25); mac = vf->vp->mac; cmd->rsp.arg[2] = mac[1] | ((mac[0] << 8) & 0xff00); cmd->rsp.arg[1] = mac[5] | ((mac[4] << 8) & 0xff00) | ((mac[3]) << 16 & 0xff0000) | ((mac[2]) << 24 & 0xff000000); } return 0; } static int qlcnic_sriov_validate_create_tx_ctx(struct qlcnic_cmd_args *cmd) { if ((cmd->req.arg[0] >> 29) != 0x3) return -EINVAL; return 0; } static int qlcnic_sriov_pf_create_tx_ctx_cmd(struct qlcnic_bc_trans *trans, struct qlcnic_cmd_args *cmd) { struct qlcnic_vf_info *vf = trans->vf; struct qlcnic_adapter *adapter = vf->adapter; struct qlcnic_tx_mbx_out *mbx_out; int err; err = qlcnic_sriov_validate_create_tx_ctx(cmd); if (err) { cmd->rsp.arg[0] |= (0x6 << 25); return err; } cmd->req.arg[5] |= vf->vp->handle << 16; err = qlcnic_issue_cmd(adapter, cmd); if (!err) { mbx_out = (struct qlcnic_tx_mbx_out *)&cmd->rsp.arg[2]; vf->tx_ctx_id = mbx_out->ctx_id; } else { vf->tx_ctx_id = 0; } return err; } static int qlcnic_sriov_validate_del_rx_ctx(struct qlcnic_vf_info *vf, struct qlcnic_cmd_args *cmd) { if ((cmd->req.arg[0] >> 29) != 0x3) return -EINVAL; if ((cmd->req.arg[1] & 0xffff) != vf->rx_ctx_id) return -EINVAL; return 0; } static int qlcnic_sriov_pf_del_rx_ctx_cmd(struct qlcnic_bc_trans *trans, struct qlcnic_cmd_args *cmd) { struct qlcnic_vf_info *vf = trans->vf; struct qlcnic_adapter *adapter = vf->adapter; int err; u16 vlan; err = qlcnic_sriov_validate_del_rx_ctx(vf, cmd); if (err) { cmd->rsp.arg[0] |= (0x6 << 25); return err; } vlan = vf->vp->vlan; qlcnic_sriov_cfg_vf_def_mac(adapter, vf->vp, vf->pci_func, vlan, QLCNIC_MAC_DEL); cmd->req.arg[1] |= vf->vp->handle << 16; err = qlcnic_issue_cmd(adapter, cmd); if (!err) vf->rx_ctx_id = 0; return err; } static int qlcnic_sriov_validate_del_tx_ctx(struct qlcnic_vf_info *vf, struct qlcnic_cmd_args *cmd) { if ((cmd->req.arg[0] >> 29) != 0x3) return -EINVAL; if ((cmd->req.arg[1] & 0xffff) != vf->tx_ctx_id) return -EINVAL; return 0; } static int qlcnic_sriov_pf_del_tx_ctx_cmd(struct qlcnic_bc_trans *trans, struct qlcnic_cmd_args *cmd) { struct qlcnic_vf_info *vf = trans->vf; struct qlcnic_adapter *adapter = vf->adapter; int err; err = qlcnic_sriov_validate_del_tx_ctx(vf, cmd); if (err) { cmd->rsp.arg[0] |= (0x6 << 25); return err; } cmd->req.arg[1] |= vf->vp->handle << 16; err = qlcnic_issue_cmd(adapter, cmd); if (!err) vf->tx_ctx_id = 0; return err; } static int qlcnic_sriov_validate_cfg_lro(struct qlcnic_vf_info *vf, struct qlcnic_cmd_args *cmd) { if ((cmd->req.arg[1] >> 16) != vf->rx_ctx_id) return -EINVAL; return 0; } static int qlcnic_sriov_pf_cfg_lro_cmd(struct qlcnic_bc_trans *trans, struct qlcnic_cmd_args *cmd) { struct qlcnic_vf_info *vf = trans->vf; struct qlcnic_adapter *adapter = vf->adapter; int err; err = qlcnic_sriov_validate_cfg_lro(vf, cmd); if (err) { cmd->rsp.arg[0] |= (0x6 << 25); return err; } err = qlcnic_issue_cmd(adapter, cmd); return err; } static int qlcnic_sriov_pf_cfg_ip_cmd(struct qlcnic_bc_trans *trans, struct qlcnic_cmd_args *cmd) { struct qlcnic_vf_info *vf = trans->vf; struct qlcnic_adapter *adapter = vf->adapter; int err = -EIO; u8 op; op = cmd->req.arg[1] & 0xff; cmd->req.arg[1] |= vf->vp->handle << 16; cmd->req.arg[1] |= BIT_31; err = qlcnic_issue_cmd(adapter, cmd); return err; } static int qlcnic_sriov_validate_cfg_intrpt(struct qlcnic_vf_info *vf, struct qlcnic_cmd_args *cmd) { if (((cmd->req.arg[1] >> 8) & 0xff) != vf->pci_func) return -EINVAL; if (!(cmd->req.arg[1] & BIT_16)) return -EINVAL; if ((cmd->req.arg[1] & 0xff) != 0x1) return -EINVAL; return 0; } static int qlcnic_sriov_pf_cfg_intrpt_cmd(struct qlcnic_bc_trans *trans, struct qlcnic_cmd_args *cmd) { struct qlcnic_vf_info *vf = trans->vf; struct qlcnic_adapter *adapter = vf->adapter; int err; err = qlcnic_sriov_validate_cfg_intrpt(vf, cmd); if (err) cmd->rsp.arg[0] |= (0x6 << 25); else err = qlcnic_issue_cmd(adapter, cmd); return err; } static int qlcnic_sriov_validate_mtu(struct qlcnic_adapter *adapter, struct qlcnic_vf_info *vf, struct qlcnic_cmd_args *cmd) { if (cmd->req.arg[1] != vf->rx_ctx_id) return -EINVAL; if (cmd->req.arg[2] > adapter->ahw->max_mtu) return -EINVAL; return 0; } static int qlcnic_sriov_pf_set_mtu_cmd(struct qlcnic_bc_trans *trans, struct qlcnic_cmd_args *cmd) { struct qlcnic_vf_info *vf = trans->vf; struct qlcnic_adapter *adapter = vf->adapter; int err; err = qlcnic_sriov_validate_mtu(adapter, vf, cmd); if (err) cmd->rsp.arg[0] |= (0x6 << 25); else err = qlcnic_issue_cmd(adapter, cmd); return err; } static int qlcnic_sriov_validate_get_nic_info(struct qlcnic_vf_info *vf, struct qlcnic_cmd_args *cmd) { if (cmd->req.arg[1] & BIT_31) { if (((cmd->req.arg[1] >> 16) & 0x7fff) != vf->pci_func) return -EINVAL; } else { cmd->req.arg[1] |= vf->vp->handle << 16; } return 0; } static int qlcnic_sriov_pf_get_nic_info_cmd(struct qlcnic_bc_trans *trans, struct qlcnic_cmd_args *cmd) { struct qlcnic_vf_info *vf = trans->vf; struct qlcnic_adapter *adapter = vf->adapter; int err; err = qlcnic_sriov_validate_get_nic_info(vf, cmd); if (err) { cmd->rsp.arg[0] |= (0x6 << 25); return err; } err = qlcnic_issue_cmd(adapter, cmd); return err; } static int qlcnic_sriov_validate_cfg_rss(struct qlcnic_vf_info *vf, struct qlcnic_cmd_args *cmd) { if (cmd->req.arg[1] != vf->rx_ctx_id) return -EINVAL; return 0; } static int qlcnic_sriov_pf_cfg_rss_cmd(struct qlcnic_bc_trans *trans, struct qlcnic_cmd_args *cmd) { struct qlcnic_vf_info *vf = trans->vf; struct qlcnic_adapter *adapter = vf->adapter; int err; err = qlcnic_sriov_validate_cfg_rss(vf, cmd); if (err) cmd->rsp.arg[0] |= (0x6 << 25); else err = qlcnic_issue_cmd(adapter, cmd); return err; } static int qlcnic_sriov_validate_cfg_intrcoal(struct qlcnic_adapter *adapter, struct qlcnic_vf_info *vf, struct qlcnic_cmd_args *cmd) { struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal; u16 ctx_id, pkts, time; ctx_id = cmd->req.arg[1] >> 16; pkts = cmd->req.arg[2] & 0xffff; time = cmd->req.arg[2] >> 16; if (ctx_id != vf->rx_ctx_id) return -EINVAL; if (pkts > coal->rx_packets) return -EINVAL; if (time < coal->rx_time_us) return -EINVAL; return 0; } static int qlcnic_sriov_pf_cfg_intrcoal_cmd(struct qlcnic_bc_trans *tran, struct qlcnic_cmd_args *cmd) { struct qlcnic_vf_info *vf = tran->vf; struct qlcnic_adapter *adapter = vf->adapter; int err; err = qlcnic_sriov_validate_cfg_intrcoal(adapter, vf, cmd); if (err) { cmd->rsp.arg[0] |= (0x6 << 25); return err; } err = qlcnic_issue_cmd(adapter, cmd); return err; } static int qlcnic_sriov_validate_cfg_macvlan(struct qlcnic_adapter *adapter, struct qlcnic_vf_info *vf, struct qlcnic_cmd_args *cmd) { struct qlcnic_macvlan_mbx *macvlan; struct qlcnic_vport *vp = vf->vp; u8 op, new_op; if (!(cmd->req.arg[1] & BIT_8)) return -EINVAL; cmd->req.arg[1] |= (vf->vp->handle << 16); cmd->req.arg[1] |= BIT_31; macvlan = (struct qlcnic_macvlan_mbx *)&cmd->req.arg[2]; if (!(macvlan->mac_addr0 & BIT_0)) { dev_err(&adapter->pdev->dev, "MAC address change is not allowed from VF %d", vf->pci_func); return -EINVAL; } if (vp->vlan_mode == QLC_PVID_MODE) { op = cmd->req.arg[1] & 0x7; cmd->req.arg[1] &= ~0x7; new_op = (op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL; cmd->req.arg[3] |= vp->vlan << 16; cmd->req.arg[1] |= new_op; } return 0; } static int qlcnic_sriov_pf_cfg_macvlan_cmd(struct qlcnic_bc_trans *trans, struct qlcnic_cmd_args *cmd) { struct qlcnic_vf_info *vf = trans->vf; struct qlcnic_adapter *adapter = vf->adapter; int err; err = qlcnic_sriov_validate_cfg_macvlan(adapter, vf, cmd); if (err) { cmd->rsp.arg[0] |= (0x6 << 25); return err; } err = qlcnic_issue_cmd(adapter, cmd); return err; } static int qlcnic_sriov_validate_linkevent(struct qlcnic_vf_info *vf, struct qlcnic_cmd_args *cmd) { if ((cmd->req.arg[1] >> 16) != vf->rx_ctx_id) return -EINVAL; return 0; } static int qlcnic_sriov_pf_linkevent_cmd(struct qlcnic_bc_trans *trans, struct qlcnic_cmd_args *cmd) { struct qlcnic_vf_info *vf = trans->vf; struct qlcnic_adapter *adapter = vf->adapter; int err; err = qlcnic_sriov_validate_linkevent(vf, cmd); if (err) { cmd->rsp.arg[0] |= (0x6 << 25); return err; } err = qlcnic_issue_cmd(adapter, cmd); return err; } static int qlcnic_sriov_pf_cfg_promisc_cmd(struct qlcnic_bc_trans *trans, struct qlcnic_cmd_args *cmd) { struct qlcnic_vf_info *vf = trans->vf; struct qlcnic_adapter *adapter = vf->adapter; int err; cmd->req.arg[1] |= vf->vp->handle << 16; cmd->req.arg[1] |= BIT_31; err = qlcnic_issue_cmd(adapter, cmd); return err; } static int qlcnic_sriov_pf_get_acl_cmd(struct qlcnic_bc_trans *trans, struct qlcnic_cmd_args *cmd) { struct qlcnic_vf_info *vf = trans->vf; struct qlcnic_vport *vp = vf->vp; u8 cmd_op, mode = vp->vlan_mode; cmd_op = trans->req_hdr->cmd_op; cmd->rsp.arg[0] = (cmd_op & 0xffff) | 14 << 16 | 1 << 25; switch (mode) { case QLC_GUEST_VLAN_MODE: cmd->rsp.arg[1] = mode | 1 << 8; cmd->rsp.arg[2] = 1 << 16; break; case QLC_PVID_MODE: cmd->rsp.arg[1] = mode | 1 << 8 | vp->vlan << 16; break; } return 0; } static int qlcnic_sriov_pf_del_guest_vlan(struct qlcnic_adapter *adapter, struct qlcnic_vf_info *vf) { struct qlcnic_vport *vp = vf->vp; if (!vp->vlan) return -EINVAL; if (!vf->rx_ctx_id) { vp->vlan = 0; return 0; } qlcnic_sriov_cfg_vf_def_mac(adapter, vp, vf->pci_func, vp->vlan, QLCNIC_MAC_DEL); vp->vlan = 0; qlcnic_sriov_cfg_vf_def_mac(adapter, vp, vf->pci_func, 0, QLCNIC_MAC_ADD); return 0; } static int qlcnic_sriov_pf_add_guest_vlan(struct qlcnic_adapter *adapter, struct qlcnic_vf_info *vf, struct qlcnic_cmd_args *cmd) { struct qlcnic_vport *vp = vf->vp; int err = -EIO; if (vp->vlan) return err; if (!vf->rx_ctx_id) { vp->vlan = cmd->req.arg[1] >> 16; return 0; } err = qlcnic_sriov_cfg_vf_def_mac(adapter, vp, vf->pci_func, 0, QLCNIC_MAC_DEL); if (err) return err; vp->vlan = cmd->req.arg[1] >> 16; err = qlcnic_sriov_cfg_vf_def_mac(adapter, vp, vf->pci_func, vp->vlan, QLCNIC_MAC_ADD); if (err) { qlcnic_sriov_cfg_vf_def_mac(adapter, vp, vf->pci_func, 0, QLCNIC_MAC_ADD); vp->vlan = 0; } return err; } static int qlcnic_sriov_pf_cfg_guest_vlan_cmd(struct qlcnic_bc_trans *tran, struct qlcnic_cmd_args *cmd) { struct qlcnic_vf_info *vf = tran->vf; struct qlcnic_adapter *adapter = vf->adapter; struct qlcnic_vport *vp = vf->vp; int err = -EIO; u8 op; if (vp->vlan_mode != QLC_GUEST_VLAN_MODE) { cmd->rsp.arg[0] |= 2 << 25; return err; } op = cmd->req.arg[1] & 0xf; if (op) err = qlcnic_sriov_pf_add_guest_vlan(adapter, vf, cmd); else err = qlcnic_sriov_pf_del_guest_vlan(adapter, vf); cmd->rsp.arg[0] |= err ? 2 << 25 : 1 << 25; return err; } static const int qlcnic_pf_passthru_supp_cmds[] = { QLCNIC_CMD_GET_STATISTICS, QLCNIC_CMD_GET_PORT_CONFIG, QLCNIC_CMD_GET_LINK_STATUS, }; static const struct qlcnic_sriov_cmd_handler qlcnic_pf_bc_cmd_hdlr[] = { [QLCNIC_BC_CMD_CHANNEL_INIT] = {&qlcnic_sriov_pf_channel_cfg_cmd}, [QLCNIC_BC_CMD_CHANNEL_TERM] = {&qlcnic_sriov_pf_channel_cfg_cmd}, [QLCNIC_BC_CMD_GET_ACL] = {&qlcnic_sriov_pf_get_acl_cmd}, [QLCNIC_BC_CMD_CFG_GUEST_VLAN] = {&qlcnic_sriov_pf_cfg_guest_vlan_cmd}, }; static const struct qlcnic_sriov_fw_cmd_handler qlcnic_pf_fw_cmd_hdlr[] = { {QLCNIC_CMD_CREATE_RX_CTX, qlcnic_sriov_pf_create_rx_ctx_cmd}, {QLCNIC_CMD_CREATE_TX_CTX, qlcnic_sriov_pf_create_tx_ctx_cmd}, {QLCNIC_CMD_MAC_ADDRESS, qlcnic_sriov_pf_mac_address_cmd}, {QLCNIC_CMD_DESTROY_RX_CTX, qlcnic_sriov_pf_del_rx_ctx_cmd}, {QLCNIC_CMD_DESTROY_TX_CTX, qlcnic_sriov_pf_del_tx_ctx_cmd}, {QLCNIC_CMD_CONFIGURE_HW_LRO, qlcnic_sriov_pf_cfg_lro_cmd}, {QLCNIC_CMD_CONFIGURE_IP_ADDR, qlcnic_sriov_pf_cfg_ip_cmd}, {QLCNIC_CMD_CONFIG_INTRPT, qlcnic_sriov_pf_cfg_intrpt_cmd}, {QLCNIC_CMD_SET_MTU, qlcnic_sriov_pf_set_mtu_cmd}, {QLCNIC_CMD_GET_NIC_INFO, qlcnic_sriov_pf_get_nic_info_cmd}, {QLCNIC_CMD_CONFIGURE_RSS, qlcnic_sriov_pf_cfg_rss_cmd}, {QLCNIC_CMD_CONFIG_INTR_COAL, qlcnic_sriov_pf_cfg_intrcoal_cmd}, {QLCNIC_CMD_CONFIG_MAC_VLAN, qlcnic_sriov_pf_cfg_macvlan_cmd}, {QLCNIC_CMD_GET_LINK_EVENT, qlcnic_sriov_pf_linkevent_cmd}, {QLCNIC_CMD_CONFIGURE_MAC_RX_MODE, qlcnic_sriov_pf_cfg_promisc_cmd}, }; void qlcnic_sriov_pf_process_bc_cmd(struct qlcnic_adapter *adapter, struct qlcnic_bc_trans *trans, struct qlcnic_cmd_args *cmd) { u8 size, cmd_op; cmd_op = trans->req_hdr->cmd_op; if (trans->req_hdr->op_type == QLC_BC_CMD) { size = ARRAY_SIZE(qlcnic_pf_bc_cmd_hdlr); if (cmd_op < size) { qlcnic_pf_bc_cmd_hdlr[cmd_op].fn(trans, cmd); return; } } else { int i; size = ARRAY_SIZE(qlcnic_pf_fw_cmd_hdlr); for (i = 0; i < size; i++) { if (cmd_op == qlcnic_pf_fw_cmd_hdlr[i].cmd) { qlcnic_pf_fw_cmd_hdlr[i].fn(trans, cmd); return; } } size = ARRAY_SIZE(qlcnic_pf_passthru_supp_cmds); for (i = 0; i < size; i++) { if (cmd_op == qlcnic_pf_passthru_supp_cmds[i]) { qlcnic_issue_cmd(adapter, cmd); return; } } } cmd->rsp.arg[0] |= (0x9 << 25); } void qlcnic_pf_set_interface_id_create_rx_ctx(struct qlcnic_adapter *adapter, u32 *int_id) { u16 vpid; vpid = qlcnic_sriov_pf_get_vport_handle(adapter, adapter->ahw->pci_func); *int_id |= vpid; } void qlcnic_pf_set_interface_id_del_rx_ctx(struct qlcnic_adapter *adapter, u32 *int_id) { u16 vpid; vpid = qlcnic_sriov_pf_get_vport_handle(adapter, adapter->ahw->pci_func); *int_id |= vpid << 16; } void qlcnic_pf_set_interface_id_create_tx_ctx(struct qlcnic_adapter *adapter, u32 *int_id) { int vpid; vpid = qlcnic_sriov_pf_get_vport_handle(adapter, adapter->ahw->pci_func); *int_id |= vpid << 16; } void qlcnic_pf_set_interface_id_del_tx_ctx(struct qlcnic_adapter *adapter, u32 *int_id) { u16 vpid; vpid = qlcnic_sriov_pf_get_vport_handle(adapter, adapter->ahw->pci_func); *int_id |= vpid << 16; } void qlcnic_pf_set_interface_id_promisc(struct qlcnic_adapter *adapter, u32 *int_id) { u16 vpid; vpid = qlcnic_sriov_pf_get_vport_handle(adapter, adapter->ahw->pci_func); *int_id |= (vpid << 16) | BIT_31; } void qlcnic_pf_set_interface_id_ipaddr(struct qlcnic_adapter *adapter, u32 *int_id) { u16 vpid; vpid = qlcnic_sriov_pf_get_vport_handle(adapter, adapter->ahw->pci_func); *int_id |= (vpid << 16) | BIT_31; } void qlcnic_pf_set_interface_id_macaddr(struct qlcnic_adapter *adapter, u32 *int_id) { u16 vpid; vpid = qlcnic_sriov_pf_get_vport_handle(adapter, adapter->ahw->pci_func); *int_id |= (vpid << 16) | BIT_31; } static void qlcnic_sriov_del_rx_ctx(struct qlcnic_adapter *adapter, struct qlcnic_vf_info *vf) { struct qlcnic_cmd_args cmd; int vpid; if (!vf->rx_ctx_id) return; if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_RX_CTX)) return; vpid = qlcnic_sriov_pf_get_vport_handle(adapter, vf->pci_func); if (vpid >= 0) { cmd.req.arg[1] = vf->rx_ctx_id | (vpid & 0xffff) << 16; if (qlcnic_issue_cmd(adapter, &cmd)) dev_err(&adapter->pdev->dev, "Failed to delete Tx ctx in firmware for func 0x%x\n", vf->pci_func); else vf->rx_ctx_id = 0; } qlcnic_free_mbx_args(&cmd); } static void qlcnic_sriov_del_tx_ctx(struct qlcnic_adapter *adapter, struct qlcnic_vf_info *vf) { struct qlcnic_cmd_args cmd; int vpid; if (!vf->tx_ctx_id) return; if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_TX_CTX)) return; vpid = qlcnic_sriov_pf_get_vport_handle(adapter, vf->pci_func); if (vpid >= 0) { cmd.req.arg[1] |= vf->tx_ctx_id | (vpid & 0xffff) << 16; if (qlcnic_issue_cmd(adapter, &cmd)) dev_err(&adapter->pdev->dev, "Failed to delete Tx ctx in firmware for func 0x%x\n", vf->pci_func); else vf->tx_ctx_id = 0; } qlcnic_free_mbx_args(&cmd); } static int qlcnic_sriov_add_act_list_irqsave(struct qlcnic_sriov *sriov, struct qlcnic_vf_info *vf, struct qlcnic_bc_trans *trans) { struct qlcnic_trans_list *t_list = &vf->rcv_act; unsigned long flag; spin_lock_irqsave(&t_list->lock, flag); __qlcnic_sriov_add_act_list(sriov, vf, trans); spin_unlock_irqrestore(&t_list->lock, flag); return 0; } static void __qlcnic_sriov_process_flr(struct qlcnic_vf_info *vf) { struct qlcnic_adapter *adapter = vf->adapter; qlcnic_sriov_cleanup_list(&vf->rcv_pend); cancel_work_sync(&vf->trans_work); qlcnic_sriov_cleanup_list(&vf->rcv_act); if (test_bit(QLC_BC_VF_SOFT_FLR, &vf->state)) { qlcnic_sriov_del_tx_ctx(adapter, vf); qlcnic_sriov_del_rx_ctx(adapter, vf); } qlcnic_sriov_pf_config_vport(adapter, 0, vf->pci_func); clear_bit(QLC_BC_VF_FLR, &vf->state); if (test_bit(QLC_BC_VF_SOFT_FLR, &vf->state)) { qlcnic_sriov_add_act_list_irqsave(adapter->ahw->sriov, vf, vf->flr_trans); clear_bit(QLC_BC_VF_SOFT_FLR, &vf->state); vf->flr_trans = NULL; } } static void qlcnic_sriov_pf_process_flr(struct work_struct *work) { struct qlcnic_vf_info *vf; vf = container_of(work, struct qlcnic_vf_info, flr_work); __qlcnic_sriov_process_flr(vf); return; } static void qlcnic_sriov_schedule_flr(struct qlcnic_sriov *sriov, struct qlcnic_vf_info *vf, work_func_t func) { if (test_bit(__QLCNIC_RESETTING, &vf->adapter->state)) return; INIT_WORK(&vf->flr_work, func); queue_work(sriov->bc.bc_flr_wq, &vf->flr_work); } static void qlcnic_sriov_handle_soft_flr(struct qlcnic_adapter *adapter, struct qlcnic_bc_trans *trans, struct qlcnic_vf_info *vf) { struct qlcnic_sriov *sriov = adapter->ahw->sriov; set_bit(QLC_BC_VF_FLR, &vf->state); clear_bit(QLC_BC_VF_STATE, &vf->state); set_bit(QLC_BC_VF_SOFT_FLR, &vf->state); vf->flr_trans = trans; qlcnic_sriov_schedule_flr(sriov, vf, qlcnic_sriov_pf_process_flr); netdev_info(adapter->netdev, "Software FLR for PCI func %d\n", vf->pci_func); } bool qlcnic_sriov_soft_flr_check(struct qlcnic_adapter *adapter, struct qlcnic_bc_trans *trans, struct qlcnic_vf_info *vf) { struct qlcnic_bc_hdr *hdr = trans->req_hdr; if ((hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) && (hdr->op_type == QLC_BC_CMD) && test_bit(QLC_BC_VF_STATE, &vf->state)) { qlcnic_sriov_handle_soft_flr(adapter, trans, vf); return true; } return false; } void qlcnic_sriov_pf_handle_flr(struct qlcnic_sriov *sriov, struct qlcnic_vf_info *vf) { struct net_device *dev = vf->adapter->netdev; if (!test_and_clear_bit(QLC_BC_VF_STATE, &vf->state)) { clear_bit(QLC_BC_VF_FLR, &vf->state); return; } if (test_and_set_bit(QLC_BC_VF_FLR, &vf->state)) { netdev_info(dev, "FLR for PCI func %d in progress\n", vf->pci_func); return; } qlcnic_sriov_schedule_flr(sriov, vf, qlcnic_sriov_pf_process_flr); netdev_info(dev, "FLR received for PCI func %d\n", vf->pci_func); } void qlcnic_sriov_pf_reset(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; struct qlcnic_sriov *sriov = ahw->sriov; struct qlcnic_vf_info *vf; u16 num_vfs = sriov->num_vfs; int i; for (i = 0; i < num_vfs; i++) { vf = &sriov->vf_info[i]; vf->rx_ctx_id = 0; vf->tx_ctx_id = 0; cancel_work_sync(&vf->flr_work); __qlcnic_sriov_process_flr(vf); clear_bit(QLC_BC_VF_STATE, &vf->state); } qlcnic_sriov_pf_reset_vport_handle(adapter, ahw->pci_func); QLCWRX(ahw, QLCNIC_MBX_INTR_ENBL, (ahw->num_msix - 1) << 8); } int qlcnic_sriov_pf_reinit(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; int err; if (!qlcnic_sriov_enable_check(adapter)) return 0; ahw->op_mode = QLCNIC_SRIOV_PF_FUNC; err = qlcnic_sriov_pf_init(adapter); if (err) return err; dev_info(&adapter->pdev->dev, "%s: op_mode %d\n", __func__, ahw->op_mode); return err; } int qlcnic_sriov_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_sriov *sriov = adapter->ahw->sriov; int i, num_vfs = sriov->num_vfs; struct qlcnic_vf_info *vf_info; u8 *curr_mac; if (!qlcnic_sriov_pf_check(adapter)) return -EOPNOTSUPP; if (!is_valid_ether_addr(mac) || vf >= num_vfs) return -EINVAL; if (!compare_ether_addr(adapter->mac_addr, mac)) { netdev_err(netdev, "MAC address is already in use by the PF\n"); return -EINVAL; } for (i = 0; i < num_vfs; i++) { vf_info = &sriov->vf_info[i]; if (!compare_ether_addr(vf_info->vp->mac, mac)) { netdev_err(netdev, "MAC address is already in use by VF %d\n", i); return -EINVAL; } } vf_info = &sriov->vf_info[vf]; curr_mac = vf_info->vp->mac; if (test_bit(QLC_BC_VF_STATE, &vf_info->state)) { netdev_err(netdev, "MAC address change failed for VF %d, as VF driver is loaded. Please unload VF driver and retry the operation\n", vf); return -EOPNOTSUPP; } memcpy(curr_mac, mac, netdev->addr_len); netdev_info(netdev, "MAC Address %pM is configured for VF %d\n", mac, vf); return 0; } int qlcnic_sriov_set_vf_tx_rate(struct net_device *netdev, int vf, int tx_rate) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_sriov *sriov = adapter->ahw->sriov; struct qlcnic_vf_info *vf_info; struct qlcnic_info nic_info; struct qlcnic_vport *vp; u16 vpid; if (!qlcnic_sriov_pf_check(adapter)) return -EOPNOTSUPP; if (vf >= sriov->num_vfs) return -EINVAL; if (tx_rate >= 10000 || tx_rate < 100) { netdev_err(netdev, "Invalid Tx rate, allowed range is [%d - %d]", QLC_VF_MIN_TX_RATE, QLC_VF_MAX_TX_RATE); return -EINVAL; } if (tx_rate == 0) tx_rate = 10000; vf_info = &sriov->vf_info[vf]; vp = vf_info->vp; vpid = vp->handle; if (test_bit(QLC_BC_VF_STATE, &vf_info->state)) { if (qlcnic_sriov_get_vf_vport_info(adapter, &nic_info, vpid)) return -EIO; nic_info.max_tx_bw = tx_rate / 100; nic_info.bit_offsets = BIT_0; if (qlcnic_sriov_pf_set_vport_info(adapter, &nic_info, vpid)) return -EIO; } vp->max_tx_bw = tx_rate / 100; netdev_info(netdev, "Setting Tx rate %d (Mbps), %d %% of PF bandwidth, for VF %d\n", tx_rate, vp->max_tx_bw, vf); return 0; } int qlcnic_sriov_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_sriov *sriov = adapter->ahw->sriov; struct qlcnic_vf_info *vf_info; struct qlcnic_vport *vp; if (!qlcnic_sriov_pf_check(adapter)) return -EOPNOTSUPP; if (vf >= sriov->num_vfs || qos > 7) return -EINVAL; if (vlan > MAX_VLAN_ID) { netdev_err(netdev, "Invalid VLAN ID, allowed range is [0 - %d]\n", MAX_VLAN_ID); return -EINVAL; } vf_info = &sriov->vf_info[vf]; vp = vf_info->vp; if (test_bit(QLC_BC_VF_STATE, &vf_info->state)) { netdev_err(netdev, "VLAN change failed for VF %d, as VF driver is loaded. Please unload VF driver and retry the operation\n", vf); return -EOPNOTSUPP; } switch (vlan) { case 4095: vp->vlan_mode = QLC_GUEST_VLAN_MODE; break; case 0: vp->vlan_mode = QLC_NO_VLAN_MODE; vp->vlan = 0; vp->qos = 0; break; default: vp->vlan_mode = QLC_PVID_MODE; vp->vlan = vlan; vp->qos = qos; } netdev_info(netdev, "Setting VLAN %d, QoS %d, for VF %d\n", vlan, qos, vf); return 0; } int qlcnic_sriov_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivi) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_sriov *sriov = adapter->ahw->sriov; struct qlcnic_vport *vp; if (!qlcnic_sriov_pf_check(adapter)) return -EOPNOTSUPP; if (vf >= sriov->num_vfs) return -EINVAL; vp = sriov->vf_info[vf].vp; memcpy(&ivi->mac, vp->mac, ETH_ALEN); ivi->vlan = vp->vlan; ivi->qos = vp->qos; if (vp->max_tx_bw == MAX_BW) ivi->tx_rate = 0; else ivi->tx_rate = vp->max_tx_bw * 100; ivi->vf = vf; return 0; }
gpl-2.0
nics21212/android_kernel_samsung_msm8660-common
arch/arm/mach-s3c2416/irq.c
2844
6332
/* linux/arch/arm/mach-s3c2416/irq.c * * Copyright (c) 2009 Yauhen Kharuzhy <jekhor@gmail.com>, * as part of OpenInkpot project * Copyright (c) 2009 Promwad Innovation Company * Yauhen Kharuzhy <yauhen.kharuzhy@promwad.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/sysdev.h> #include <linux/io.h> #include <mach/hardware.h> #include <asm/irq.h> #include <asm/mach/irq.h> #include <mach/regs-irq.h> #include <mach/regs-gpio.h> #include <plat/cpu.h> #include <plat/pm.h> #include <plat/irq.h> #define INTMSK(start, end) ((1 << ((end) + 1 - (start))) - 1) static inline void s3c2416_irq_demux(unsigned int irq, unsigned int len) { unsigned int subsrc, submsk; unsigned int end; /* read the current pending interrupts, and the mask * for what it is available */ subsrc = __raw_readl(S3C2410_SUBSRCPND); submsk = __raw_readl(S3C2410_INTSUBMSK); subsrc &= ~submsk; subsrc >>= (irq - S3C2410_IRQSUB(0)); subsrc &= (1 << len)-1; end = len + irq; for (; irq < end && subsrc; irq++) { if (subsrc & 1) generic_handle_irq(irq); subsrc >>= 1; } } /* WDT/AC97 sub interrupts */ static void s3c2416_irq_demux_wdtac97(unsigned int irq, struct irq_desc *desc) { s3c2416_irq_demux(IRQ_S3C2443_WDT, 4); } #define INTMSK_WDTAC97 (1UL << (IRQ_WDT - IRQ_EINT0)) #define SUBMSK_WDTAC97 INTMSK(IRQ_S3C2443_WDT, IRQ_S3C2443_AC97) static void s3c2416_irq_wdtac97_mask(struct irq_data *data) { s3c_irqsub_mask(data->irq, INTMSK_WDTAC97, SUBMSK_WDTAC97); } static void s3c2416_irq_wdtac97_unmask(struct irq_data *data) { s3c_irqsub_unmask(data->irq, INTMSK_WDTAC97); } static void s3c2416_irq_wdtac97_ack(struct irq_data *data) { s3c_irqsub_maskack(data->irq, INTMSK_WDTAC97, SUBMSK_WDTAC97); } static struct irq_chip s3c2416_irq_wdtac97 = { .irq_mask = s3c2416_irq_wdtac97_mask, .irq_unmask = s3c2416_irq_wdtac97_unmask, .irq_ack = s3c2416_irq_wdtac97_ack, }; /* LCD sub interrupts */ static void s3c2416_irq_demux_lcd(unsigned int irq, struct irq_desc *desc) { s3c2416_irq_demux(IRQ_S3C2443_LCD1, 4); } #define INTMSK_LCD (1UL << (IRQ_LCD - IRQ_EINT0)) #define SUBMSK_LCD INTMSK(IRQ_S3C2443_LCD1, IRQ_S3C2443_LCD4) static void s3c2416_irq_lcd_mask(struct irq_data *data) { s3c_irqsub_mask(data->irq, INTMSK_LCD, SUBMSK_LCD); } static void s3c2416_irq_lcd_unmask(struct irq_data *data) { s3c_irqsub_unmask(data->irq, INTMSK_LCD); } static void s3c2416_irq_lcd_ack(struct irq_data *data) { s3c_irqsub_maskack(data->irq, INTMSK_LCD, SUBMSK_LCD); } static struct irq_chip s3c2416_irq_lcd = { .irq_mask = s3c2416_irq_lcd_mask, .irq_unmask = s3c2416_irq_lcd_unmask, .irq_ack = s3c2416_irq_lcd_ack, }; /* DMA sub interrupts */ static void s3c2416_irq_demux_dma(unsigned int irq, struct irq_desc *desc) { s3c2416_irq_demux(IRQ_S3C2443_DMA0, 6); } #define INTMSK_DMA (1UL << (IRQ_S3C2443_DMA - IRQ_EINT0)) #define SUBMSK_DMA INTMSK(IRQ_S3C2443_DMA0, IRQ_S3C2443_DMA5) static void s3c2416_irq_dma_mask(struct irq_data *data) { s3c_irqsub_mask(data->irq, INTMSK_DMA, SUBMSK_DMA); } static void s3c2416_irq_dma_unmask(struct irq_data *data) { s3c_irqsub_unmask(data->irq, INTMSK_DMA); } static void s3c2416_irq_dma_ack(struct irq_data *data) { s3c_irqsub_maskack(data->irq, INTMSK_DMA, SUBMSK_DMA); } static struct irq_chip s3c2416_irq_dma = { .irq_mask = s3c2416_irq_dma_mask, .irq_unmask = s3c2416_irq_dma_unmask, .irq_ack = s3c2416_irq_dma_ack, }; /* UART3 sub interrupts */ static void s3c2416_irq_demux_uart3(unsigned int irq, struct irq_desc *desc) { s3c2416_irq_demux(IRQ_S3C2443_RX3, 3); } #define INTMSK_UART3 (1UL << (IRQ_S3C2443_UART3 - IRQ_EINT0)) #define SUBMSK_UART3 (0x7 << (IRQ_S3C2443_RX3 - S3C2410_IRQSUB(0))) static void s3c2416_irq_uart3_mask(struct irq_data *data) { s3c_irqsub_mask(data->irq, INTMSK_UART3, SUBMSK_UART3); } static void s3c2416_irq_uart3_unmask(struct irq_data *data) { s3c_irqsub_unmask(data->irq, INTMSK_UART3); } static void s3c2416_irq_uart3_ack(struct irq_data *data) { s3c_irqsub_maskack(data->irq, INTMSK_UART3, SUBMSK_UART3); } static struct irq_chip s3c2416_irq_uart3 = { .irq_mask = s3c2416_irq_uart3_mask, .irq_unmask = s3c2416_irq_uart3_unmask, .irq_ack = s3c2416_irq_uart3_ack, }; /* IRQ initialisation code */ static int __init s3c2416_add_sub(unsigned int base, void (*demux)(unsigned int, struct irq_desc *), struct irq_chip *chip, unsigned int start, unsigned int end) { unsigned int irqno; irq_set_chip_and_handler(base, &s3c_irq_level_chip, handle_level_irq); irq_set_chained_handler(base, demux); for (irqno = start; irqno <= end; irqno++) { irq_set_chip_and_handler(irqno, chip, handle_level_irq); set_irq_flags(irqno, IRQF_VALID); } return 0; } static int __init s3c2416_irq_add(struct sys_device *sysdev) { printk(KERN_INFO "S3C2416: IRQ Support\n"); s3c2416_add_sub(IRQ_LCD, s3c2416_irq_demux_lcd, &s3c2416_irq_lcd, IRQ_S3C2443_LCD2, IRQ_S3C2443_LCD4); s3c2416_add_sub(IRQ_S3C2443_DMA, s3c2416_irq_demux_dma, &s3c2416_irq_dma, IRQ_S3C2443_DMA0, IRQ_S3C2443_DMA5); s3c2416_add_sub(IRQ_S3C2443_UART3, s3c2416_irq_demux_uart3, &s3c2416_irq_uart3, IRQ_S3C2443_RX3, IRQ_S3C2443_ERR3); s3c2416_add_sub(IRQ_WDT, s3c2416_irq_demux_wdtac97, &s3c2416_irq_wdtac97, IRQ_S3C2443_WDT, IRQ_S3C2443_AC97); return 0; } static struct sysdev_driver s3c2416_irq_driver = { .add = s3c2416_irq_add, }; static int __init s3c2416_irq_init(void) { return sysdev_driver_register(&s3c2416_sysclass, &s3c2416_irq_driver); } arch_initcall(s3c2416_irq_init);
gpl-2.0
oliliango/linux-cedarview_gfx
drivers/isdn/sc/init.c
5148
13352
/* * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ #include <linux/module.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/sched.h> #include <linux/slab.h> #include "includes.h" #include "hardware.h" #include "card.h" MODULE_DESCRIPTION("ISDN4Linux: Driver for Spellcaster card"); MODULE_AUTHOR("Spellcaster Telecommunications Inc."); MODULE_LICENSE("GPL"); board *sc_adapter[MAX_CARDS]; int cinst; static char devname[] = "scX"; static const char version[] = "2.0b1"; static const char *boardname[] = { "DataCommute/BRI", "DataCommute/PRI", "TeleCommute/BRI" }; /* insmod set parameters */ static unsigned int io[] = {0, 0, 0, 0}; static unsigned char irq[] = {0, 0, 0, 0}; static unsigned long ram[] = {0, 0, 0, 0}; static bool do_reset = 0; module_param_array(io, int, NULL, 0); module_param_array(irq, int, NULL, 0); module_param_array(ram, int, NULL, 0); module_param(do_reset, bool, 0); static int identify_board(unsigned long, unsigned int); static int __init sc_init(void) { int b = -1; int i, j; int status = -ENODEV; unsigned long memsize = 0; unsigned long features = 0; isdn_if *interface; unsigned char channels; unsigned char pgport; unsigned long magic; int model; int last_base = IOBASE_MIN; int probe_exhasted = 0; #ifdef MODULE pr_info("SpellCaster ISA ISDN Adapter Driver rev. %s Loaded\n", version); #else pr_info("SpellCaster ISA ISDN Adapter Driver rev. %s\n", version); #endif pr_info("Copyright (C) 1996 SpellCaster Telecommunications Inc.\n"); while (b++ < MAX_CARDS - 1) { pr_debug("Probing for adapter #%d\n", b); /* * Initialize reusable variables */ model = -1; magic = 0; channels = 0; pgport = 0; /* * See if we should probe for IO base */ pr_debug("I/O Base for board %d is 0x%x, %s probe\n", b, io[b], io[b] == 0 ? "will" : "won't"); if (io[b]) { /* * No, I/O Base has been provided */ for (i = 0; i < MAX_IO_REGS - 1; i++) { if (!request_region(io[b] + i * 0x400, 1, "sc test")) { pr_debug("request_region for 0x%x failed\n", io[b] + i * 0x400); io[b] = 0; break; } else release_region(io[b] + i * 0x400, 1); } /* * Confirm the I/O Address with a test */ if (io[b] == 0) { pr_debug("I/O Address invalid.\n"); continue; } outb(0x18, io[b] + 0x400 * EXP_PAGE0); if (inb(io[b] + 0x400 * EXP_PAGE0) != 0x18) { pr_debug("I/O Base 0x%x fails test\n", io[b] + 0x400 * EXP_PAGE0); continue; } } else { /* * Yes, probe for I/O Base */ if (probe_exhasted) { pr_debug("All probe addresses exhasted, skipping\n"); continue; } pr_debug("Probing for I/O...\n"); for (i = last_base; i <= IOBASE_MAX; i += IOBASE_OFFSET) { int found_io = 1; if (i == IOBASE_MAX) { probe_exhasted = 1; /* No more addresses to probe */ pr_debug("End of Probes\n"); } last_base = i + IOBASE_OFFSET; pr_debug(" checking 0x%x...", i); for (j = 0; j < MAX_IO_REGS - 1; j++) { if (!request_region(i + j * 0x400, 1, "sc test")) { pr_debug("Failed\n"); found_io = 0; break; } else release_region(i + j * 0x400, 1); } if (found_io) { io[b] = i; outb(0x18, io[b] + 0x400 * EXP_PAGE0); if (inb(io[b] + 0x400 * EXP_PAGE0) != 0x18) { pr_debug("Failed by test\n"); continue; } pr_debug("Passed\n"); break; } } if (probe_exhasted) { continue; } } /* * See if we should probe for shared RAM */ if (do_reset) { pr_debug("Doing a SAFE probe reset\n"); outb(0xFF, io[b] + RESET_OFFSET); msleep_interruptible(10000); } pr_debug("RAM Base for board %d is 0x%lx, %s probe\n", b, ram[b], ram[b] == 0 ? "will" : "won't"); if (ram[b]) { /* * No, the RAM base has been provided * Just look for a signature and ID the * board model */ if (request_region(ram[b], SRAM_PAGESIZE, "sc test")) { pr_debug("request_region for RAM base 0x%lx succeeded\n", ram[b]); model = identify_board(ram[b], io[b]); release_region(ram[b], SRAM_PAGESIZE); } } else { /* * Yes, probe for free RAM and look for * a signature and id the board model */ for (i = SRAM_MIN; i < SRAM_MAX; i += SRAM_PAGESIZE) { pr_debug("Checking RAM address 0x%x...\n", i); if (request_region(i, SRAM_PAGESIZE, "sc test")) { pr_debug(" request_region succeeded\n"); model = identify_board(i, io[b]); release_region(i, SRAM_PAGESIZE); if (model >= 0) { pr_debug(" Identified a %s\n", boardname[model]); ram[b] = i; break; } pr_debug(" Unidentifed or inaccessible\n"); continue; } pr_debug(" request failed\n"); } } /* * See if we found free RAM and the board model */ if (!ram[b] || model < 0) { /* * Nope, there was no place in RAM for the * board, or it couldn't be identified */ pr_debug("Failed to find an adapter at 0x%lx\n", ram[b]); continue; } /* * Set the board's magic number, memory size and page register */ switch (model) { case PRI_BOARD: channels = 23; magic = 0x20000; memsize = 0x100000; features = PRI_FEATURES; break; case BRI_BOARD: case POTS_BOARD: channels = 2; magic = 0x60000; memsize = 0x10000; features = BRI_FEATURES; break; } switch (ram[b] >> 12 & 0x0F) { case 0x0: pr_debug("RAM Page register set to EXP_PAGE0\n"); pgport = EXP_PAGE0; break; case 0x4: pr_debug("RAM Page register set to EXP_PAGE1\n"); pgport = EXP_PAGE1; break; case 0x8: pr_debug("RAM Page register set to EXP_PAGE2\n"); pgport = EXP_PAGE2; break; case 0xC: pr_debug("RAM Page register set to EXP_PAGE3\n"); pgport = EXP_PAGE3; break; default: pr_debug("RAM base address doesn't fall on 16K boundary\n"); continue; } pr_debug("current IRQ: %d b: %d\n", irq[b], b); /* * Make sure we got an IRQ */ if (!irq[b]) { /* * No interrupt could be used */ pr_debug("Failed to acquire an IRQ line\n"); continue; } /* * Horray! We found a board, Make sure we can register * it with ISDN4Linux */ interface = kzalloc(sizeof(isdn_if), GFP_KERNEL); if (interface == NULL) { /* * Oops, can't malloc isdn_if */ continue; } interface->owner = THIS_MODULE; interface->hl_hdrlen = 0; interface->channels = channels; interface->maxbufsize = BUFFER_SIZE; interface->features = features; interface->writebuf_skb = sndpkt; interface->writecmd = NULL; interface->command = command; strcpy(interface->id, devname); interface->id[2] = '0' + cinst; /* * Allocate the board structure */ sc_adapter[cinst] = kzalloc(sizeof(board), GFP_KERNEL); if (sc_adapter[cinst] == NULL) { /* * Oops, can't alloc memory for the board */ kfree(interface); continue; } spin_lock_init(&sc_adapter[cinst]->lock); if (!register_isdn(interface)) { /* * Oops, couldn't register for some reason */ kfree(interface); kfree(sc_adapter[cinst]); continue; } sc_adapter[cinst]->card = interface; sc_adapter[cinst]->driverId = interface->channels; strcpy(sc_adapter[cinst]->devicename, interface->id); sc_adapter[cinst]->nChannels = channels; sc_adapter[cinst]->ramsize = memsize; sc_adapter[cinst]->shmem_magic = magic; sc_adapter[cinst]->shmem_pgport = pgport; sc_adapter[cinst]->StartOnReset = 1; /* * Allocate channels status structures */ sc_adapter[cinst]->channel = kzalloc(sizeof(bchan) * channels, GFP_KERNEL); if (sc_adapter[cinst]->channel == NULL) { /* * Oops, can't alloc memory for the channels */ indicate_status(cinst, ISDN_STAT_UNLOAD, 0, NULL); /* Fix me */ kfree(interface); kfree(sc_adapter[cinst]); continue; } /* * Lock down the hardware resources */ sc_adapter[cinst]->interrupt = irq[b]; if (request_irq(sc_adapter[cinst]->interrupt, interrupt_handler, IRQF_DISABLED, interface->id, (void *)(unsigned long) cinst)) { kfree(sc_adapter[cinst]->channel); indicate_status(cinst, ISDN_STAT_UNLOAD, 0, NULL); /* Fix me */ kfree(interface); kfree(sc_adapter[cinst]); continue; } sc_adapter[cinst]->iobase = io[b]; for (i = 0; i < MAX_IO_REGS - 1; i++) { sc_adapter[cinst]->ioport[i] = io[b] + i * 0x400; request_region(sc_adapter[cinst]->ioport[i], 1, interface->id); pr_debug("Requesting I/O Port %#x\n", sc_adapter[cinst]->ioport[i]); } sc_adapter[cinst]->ioport[IRQ_SELECT] = io[b] + 0x2; request_region(sc_adapter[cinst]->ioport[IRQ_SELECT], 1, interface->id); pr_debug("Requesting I/O Port %#x\n", sc_adapter[cinst]->ioport[IRQ_SELECT]); sc_adapter[cinst]->rambase = ram[b]; request_region(sc_adapter[cinst]->rambase, SRAM_PAGESIZE, interface->id); pr_info(" %s (%d) - %s %d channels IRQ %d, I/O Base 0x%x, RAM Base 0x%lx\n", sc_adapter[cinst]->devicename, sc_adapter[cinst]->driverId, boardname[model], channels, irq[b], io[b], ram[b]); /* * reset the adapter to put things in motion */ reset(cinst); cinst++; status = 0; } if (status) pr_info("Failed to find any adapters, driver unloaded\n"); return status; } static void __exit sc_exit(void) { int i, j; for (i = 0; i < cinst; i++) { pr_debug("Cleaning up after adapter %d\n", i); /* * kill the timers */ del_timer(&(sc_adapter[i]->reset_timer)); del_timer(&(sc_adapter[i]->stat_timer)); /* * Tell I4L we're toast */ indicate_status(i, ISDN_STAT_STOP, 0, NULL); indicate_status(i, ISDN_STAT_UNLOAD, 0, NULL); /* * Release shared RAM */ release_region(sc_adapter[i]->rambase, SRAM_PAGESIZE); /* * Release the IRQ */ free_irq(sc_adapter[i]->interrupt, NULL); /* * Reset for a clean start */ outb(0xFF, sc_adapter[i]->ioport[SFT_RESET]); /* * Release the I/O Port regions */ for (j = 0; j < MAX_IO_REGS - 1; j++) { release_region(sc_adapter[i]->ioport[j], 1); pr_debug("Releasing I/O Port %#x\n", sc_adapter[i]->ioport[j]); } release_region(sc_adapter[i]->ioport[IRQ_SELECT], 1); pr_debug("Releasing I/O Port %#x\n", sc_adapter[i]->ioport[IRQ_SELECT]); /* * Release any memory we alloced */ kfree(sc_adapter[i]->channel); kfree(sc_adapter[i]->card); kfree(sc_adapter[i]); } pr_info("SpellCaster ISA ISDN Adapter Driver Unloaded.\n"); } static int identify_board(unsigned long rambase, unsigned int iobase) { unsigned int pgport; unsigned long sig; DualPortMemory *dpm; RspMessage rcvmsg; ReqMessage sndmsg; HWConfig_pl hwci; int x; pr_debug("Attempting to identify adapter @ 0x%lx io 0x%x\n", rambase, iobase); /* * Enable the base pointer */ outb(rambase >> 12, iobase + 0x2c00); switch (rambase >> 12 & 0x0F) { case 0x0: pgport = iobase + PG0_OFFSET; pr_debug("Page Register offset is 0x%x\n", PG0_OFFSET); break; case 0x4: pgport = iobase + PG1_OFFSET; pr_debug("Page Register offset is 0x%x\n", PG1_OFFSET); break; case 0x8: pgport = iobase + PG2_OFFSET; pr_debug("Page Register offset is 0x%x\n", PG2_OFFSET); break; case 0xC: pgport = iobase + PG3_OFFSET; pr_debug("Page Register offset is 0x%x\n", PG3_OFFSET); break; default: pr_debug("Invalid rambase 0x%lx\n", rambase); return -1; } /* * Try to identify a PRI card */ outb(PRI_BASEPG_VAL, pgport); msleep_interruptible(1000); sig = readl(rambase + SIG_OFFSET); pr_debug("Looking for a signature, got 0x%lx\n", sig); if (sig == SIGNATURE) return PRI_BOARD; /* * Try to identify a PRI card */ outb(BRI_BASEPG_VAL, pgport); msleep_interruptible(1000); sig = readl(rambase + SIG_OFFSET); pr_debug("Looking for a signature, got 0x%lx\n", sig); if (sig == SIGNATURE) return BRI_BOARD; return -1; /* * Try to spot a card */ sig = readl(rambase + SIG_OFFSET); pr_debug("Looking for a signature, got 0x%lx\n", sig); if (sig != SIGNATURE) return -1; dpm = (DualPortMemory *) rambase; memset(&sndmsg, 0, MSG_LEN); sndmsg.msg_byte_cnt = 3; sndmsg.type = cmReqType1; sndmsg.class = cmReqClass0; sndmsg.code = cmReqHWConfig; memcpy_toio(&(dpm->req_queue[dpm->req_head++]), &sndmsg, MSG_LEN); outb(0, iobase + 0x400); pr_debug("Sent HWConfig message\n"); /* * Wait for the response */ x = 0; while ((inb(iobase + FIFOSTAT_OFFSET) & RF_HAS_DATA) && x < 100) { schedule_timeout_interruptible(1); x++; } if (x == 100) { pr_debug("Timeout waiting for response\n"); return -1; } memcpy_fromio(&rcvmsg, &(dpm->rsp_queue[dpm->rsp_tail]), MSG_LEN); pr_debug("Got HWConfig response, status = 0x%x\n", rcvmsg.rsp_status); memcpy(&hwci, &(rcvmsg.msg_data.HWCresponse), sizeof(HWConfig_pl)); pr_debug("Hardware Config: Interface: %s, RAM Size: %ld, Serial: %s\n" " Part: %s, Rev: %s\n", hwci.st_u_sense ? "S/T" : "U", hwci.ram_size, hwci.serial_no, hwci.part_no, hwci.rev_no); if (!strncmp(PRI_PARTNO, hwci.part_no, 6)) return PRI_BOARD; if (!strncmp(BRI_PARTNO, hwci.part_no, 6)) return BRI_BOARD; return -1; } module_init(sc_init); module_exit(sc_exit);
gpl-2.0
jcadduono/nethunter_kernel_klte
mm/sparse-vmemmap.c
5404
5999
/* * Virtual Memory Map support * * (C) 2007 sgi. Christoph Lameter. * * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn, * virt_to_page, page_address() to be implemented as a base offset * calculation without memory access. * * However, virtual mappings need a page table and TLBs. Many Linux * architectures already map their physical space using 1-1 mappings * via TLBs. For those arches the virtual memory map is essentially * for free if we use the same page size as the 1-1 mappings. In that * case the overhead consists of a few additional pages that are * allocated to create a view of memory for vmemmap. * * The architecture is expected to provide a vmemmap_populate() function * to instantiate the mapping. */ #include <linux/mm.h> #include <linux/mmzone.h> #include <linux/bootmem.h> #include <linux/highmem.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/vmalloc.h> #include <linux/sched.h> #include <asm/dma.h> #include <asm/pgalloc.h> #include <asm/pgtable.h> /* * Allocate a block of memory to be used to back the virtual memory map * or to back the page tables that are used to create the mapping. * Uses the main allocators if they are available, else bootmem. */ static void * __init_refok __earlyonly_bootmem_alloc(int node, unsigned long size, unsigned long align, unsigned long goal) { return __alloc_bootmem_node_high(NODE_DATA(node), size, align, goal); } static void *vmemmap_buf; static void *vmemmap_buf_end; void * __meminit vmemmap_alloc_block(unsigned long size, int node) { /* If the main allocator is up use that, fallback to bootmem. */ if (slab_is_available()) { struct page *page; if (node_state(node, N_HIGH_MEMORY)) page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, get_order(size)); else page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(size)); if (page) return page_address(page); return NULL; } else return __earlyonly_bootmem_alloc(node, size, size, __pa(MAX_DMA_ADDRESS)); } /* need to make sure size is all the same during early stage */ void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node) { void *ptr; if (!vmemmap_buf) return vmemmap_alloc_block(size, node); /* take the from buf */ ptr = (void *)ALIGN((unsigned long)vmemmap_buf, size); if (ptr + size > vmemmap_buf_end) return vmemmap_alloc_block(size, node); vmemmap_buf = ptr + size; return ptr; } void __meminit vmemmap_verify(pte_t *pte, int node, unsigned long start, unsigned long end) { unsigned long pfn = pte_pfn(*pte); int actual_node = early_pfn_to_nid(pfn); if (node_distance(actual_node, node) > LOCAL_DISTANCE) printk(KERN_WARNING "[%lx-%lx] potential offnode " "page_structs\n", start, end - 1); } pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node) { pte_t *pte = pte_offset_kernel(pmd, addr); if (pte_none(*pte)) { pte_t entry; void *p = vmemmap_alloc_block_buf(PAGE_SIZE, node); if (!p) return NULL; entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL); set_pte_at(&init_mm, addr, pte, entry); } return pte; } pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node) { pmd_t *pmd = pmd_offset(pud, addr); if (pmd_none(*pmd)) { void *p = vmemmap_alloc_block(PAGE_SIZE, node); if (!p) return NULL; pmd_populate_kernel(&init_mm, pmd, p); } return pmd; } pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node) { pud_t *pud = pud_offset(pgd, addr); if (pud_none(*pud)) { void *p = vmemmap_alloc_block(PAGE_SIZE, node); if (!p) return NULL; pud_populate(&init_mm, pud, p); } return pud; } pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node) { pgd_t *pgd = pgd_offset_k(addr); if (pgd_none(*pgd)) { void *p = vmemmap_alloc_block(PAGE_SIZE, node); if (!p) return NULL; pgd_populate(&init_mm, pgd, p); } return pgd; } int __meminit vmemmap_populate_basepages(struct page *start_page, unsigned long size, int node) { unsigned long addr = (unsigned long)start_page; unsigned long end = (unsigned long)(start_page + size); pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; for (; addr < end; addr += PAGE_SIZE) { pgd = vmemmap_pgd_populate(addr, node); if (!pgd) return -ENOMEM; pud = vmemmap_pud_populate(pgd, addr, node); if (!pud) return -ENOMEM; pmd = vmemmap_pmd_populate(pud, addr, node); if (!pmd) return -ENOMEM; pte = vmemmap_pte_populate(pmd, addr, node); if (!pte) return -ENOMEM; vmemmap_verify(pte, node, addr, addr + PAGE_SIZE); } return 0; } struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid) { struct page *map = pfn_to_page(pnum * PAGES_PER_SECTION); int error = vmemmap_populate(map, PAGES_PER_SECTION, nid); if (error) return NULL; return map; } void __init sparse_mem_maps_populate_node(struct page **map_map, unsigned long pnum_begin, unsigned long pnum_end, unsigned long map_count, int nodeid) { unsigned long pnum; unsigned long size = sizeof(struct page) * PAGES_PER_SECTION; void *vmemmap_buf_start; size = ALIGN(size, PMD_SIZE); vmemmap_buf_start = __earlyonly_bootmem_alloc(nodeid, size * map_count, PMD_SIZE, __pa(MAX_DMA_ADDRESS)); if (vmemmap_buf_start) { vmemmap_buf = vmemmap_buf_start; vmemmap_buf_end = vmemmap_buf_start + size * map_count; } for (pnum = pnum_begin; pnum < pnum_end; pnum++) { struct mem_section *ms; if (!present_section_nr(pnum)) continue; map_map[pnum] = sparse_mem_map_populate(pnum, nodeid); if (map_map[pnum]) continue; ms = __nr_to_section(pnum); printk(KERN_ERR "%s: sparsemem memory map backing failed " "some memory will not be available.\n", __func__); ms->section_mem_map = 0; } if (vmemmap_buf_start) { /* need to free left buf */ free_bootmem(__pa(vmemmap_buf), vmemmap_buf_end - vmemmap_buf); vmemmap_buf = NULL; vmemmap_buf_end = NULL; } }
gpl-2.0
YagiAsuka/sample
drivers/isdn/divert/divert_procfs.c
7708
9035
/* $Id: divert_procfs.c,v 1.11.6.2 2001/09/23 22:24:36 kai Exp $ * * Filesystem handling for the diversion supplementary services. * * Copyright 1998 by Werner Cornelius (werner@isdn4linux.de) * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ #include <linux/module.h> #include <linux/poll.h> #include <linux/slab.h> #ifdef CONFIG_PROC_FS #include <linux/proc_fs.h> #else #include <linux/fs.h> #endif #include <linux/sched.h> #include <linux/isdnif.h> #include <net/net_namespace.h> #include <linux/mutex.h> #include "isdn_divert.h" /*********************************/ /* Variables for interface queue */ /*********************************/ ulong if_used = 0; /* number of interface users */ static DEFINE_MUTEX(isdn_divert_mutex); static struct divert_info *divert_info_head = NULL; /* head of queue */ static struct divert_info *divert_info_tail = NULL; /* pointer to last entry */ static DEFINE_SPINLOCK(divert_info_lock);/* lock for queue */ static wait_queue_head_t rd_queue; /*********************************/ /* put an info buffer into queue */ /*********************************/ void put_info_buffer(char *cp) { struct divert_info *ib; unsigned long flags; if (if_used <= 0) return; if (!cp) return; if (!*cp) return; if (!(ib = kmalloc(sizeof(struct divert_info) + strlen(cp), GFP_ATOMIC))) return; /* no memory */ strcpy(ib->info_start, cp); /* set output string */ ib->next = NULL; spin_lock_irqsave(&divert_info_lock, flags); ib->usage_cnt = if_used; if (!divert_info_head) divert_info_head = ib; /* new head */ else divert_info_tail->next = ib; /* follows existing messages */ divert_info_tail = ib; /* new tail */ /* delete old entrys */ while (divert_info_head->next) { if ((divert_info_head->usage_cnt <= 0) && (divert_info_head->next->usage_cnt <= 0)) { ib = divert_info_head; divert_info_head = divert_info_head->next; kfree(ib); } else break; } /* divert_info_head->next */ spin_unlock_irqrestore(&divert_info_lock, flags); wake_up_interruptible(&(rd_queue)); } /* put_info_buffer */ #ifdef CONFIG_PROC_FS /**********************************/ /* deflection device read routine */ /**********************************/ static ssize_t isdn_divert_read(struct file *file, char __user *buf, size_t count, loff_t *off) { struct divert_info *inf; int len; if (!*((struct divert_info **) file->private_data)) { if (file->f_flags & O_NONBLOCK) return -EAGAIN; interruptible_sleep_on(&(rd_queue)); } if (!(inf = *((struct divert_info **) file->private_data))) return (0); inf->usage_cnt--; /* new usage count */ file->private_data = &inf->next; /* next structure */ if ((len = strlen(inf->info_start)) <= count) { if (copy_to_user(buf, inf->info_start, len)) return -EFAULT; *off += len; return (len); } return (0); } /* isdn_divert_read */ /**********************************/ /* deflection device write routine */ /**********************************/ static ssize_t isdn_divert_write(struct file *file, const char __user *buf, size_t count, loff_t *off) { return (-ENODEV); } /* isdn_divert_write */ /***************************************/ /* select routines for various kernels */ /***************************************/ static unsigned int isdn_divert_poll(struct file *file, poll_table *wait) { unsigned int mask = 0; poll_wait(file, &(rd_queue), wait); /* mask = POLLOUT | POLLWRNORM; */ if (*((struct divert_info **) file->private_data)) { mask |= POLLIN | POLLRDNORM; } return mask; } /* isdn_divert_poll */ /****************/ /* Open routine */ /****************/ static int isdn_divert_open(struct inode *ino, struct file *filep) { unsigned long flags; spin_lock_irqsave(&divert_info_lock, flags); if_used++; if (divert_info_head) filep->private_data = &(divert_info_tail->next); else filep->private_data = &divert_info_head; spin_unlock_irqrestore(&divert_info_lock, flags); /* start_divert(); */ return nonseekable_open(ino, filep); } /* isdn_divert_open */ /*******************/ /* close routine */ /*******************/ static int isdn_divert_close(struct inode *ino, struct file *filep) { struct divert_info *inf; unsigned long flags; spin_lock_irqsave(&divert_info_lock, flags); if_used--; inf = *((struct divert_info **) filep->private_data); while (inf) { inf->usage_cnt--; inf = inf->next; } if (if_used <= 0) while (divert_info_head) { inf = divert_info_head; divert_info_head = divert_info_head->next; kfree(inf); } spin_unlock_irqrestore(&divert_info_lock, flags); return (0); } /* isdn_divert_close */ /*********/ /* IOCTL */ /*********/ static int isdn_divert_ioctl_unlocked(struct file *file, uint cmd, ulong arg) { divert_ioctl dioctl; int i; unsigned long flags; divert_rule *rulep; char *cp; if (copy_from_user(&dioctl, (void __user *) arg, sizeof(dioctl))) return -EFAULT; switch (cmd) { case IIOCGETVER: dioctl.drv_version = DIVERT_IIOC_VERSION; /* set version */ break; case IIOCGETDRV: if ((dioctl.getid.drvid = divert_if.name_to_drv(dioctl.getid.drvnam)) < 0) return (-EINVAL); break; case IIOCGETNAM: cp = divert_if.drv_to_name(dioctl.getid.drvid); if (!cp) return (-EINVAL); if (!*cp) return (-EINVAL); strcpy(dioctl.getid.drvnam, cp); break; case IIOCGETRULE: if (!(rulep = getruleptr(dioctl.getsetrule.ruleidx))) return (-EINVAL); dioctl.getsetrule.rule = *rulep; /* copy data */ break; case IIOCMODRULE: if (!(rulep = getruleptr(dioctl.getsetrule.ruleidx))) return (-EINVAL); spin_lock_irqsave(&divert_lock, flags); *rulep = dioctl.getsetrule.rule; /* copy data */ spin_unlock_irqrestore(&divert_lock, flags); return (0); /* no copy required */ break; case IIOCINSRULE: return (insertrule(dioctl.getsetrule.ruleidx, &dioctl.getsetrule.rule)); break; case IIOCDELRULE: return (deleterule(dioctl.getsetrule.ruleidx)); break; case IIOCDODFACT: return (deflect_extern_action(dioctl.fwd_ctrl.subcmd, dioctl.fwd_ctrl.callid, dioctl.fwd_ctrl.to_nr)); case IIOCDOCFACT: case IIOCDOCFDIS: case IIOCDOCFINT: if (!divert_if.drv_to_name(dioctl.cf_ctrl.drvid)) return (-EINVAL); /* invalid driver */ if (strnlen(dioctl.cf_ctrl.msn, sizeof(dioctl.cf_ctrl.msn)) == sizeof(dioctl.cf_ctrl.msn)) return -EINVAL; if (strnlen(dioctl.cf_ctrl.fwd_nr, sizeof(dioctl.cf_ctrl.fwd_nr)) == sizeof(dioctl.cf_ctrl.fwd_nr)) return -EINVAL; if ((i = cf_command(dioctl.cf_ctrl.drvid, (cmd == IIOCDOCFACT) ? 1 : (cmd == IIOCDOCFDIS) ? 0 : 2, dioctl.cf_ctrl.cfproc, dioctl.cf_ctrl.msn, dioctl.cf_ctrl.service, dioctl.cf_ctrl.fwd_nr, &dioctl.cf_ctrl.procid))) return (i); break; default: return (-EINVAL); } /* switch cmd */ return copy_to_user((void __user *)arg, &dioctl, sizeof(dioctl)) ? -EFAULT : 0; } /* isdn_divert_ioctl */ static long isdn_divert_ioctl(struct file *file, uint cmd, ulong arg) { long ret; mutex_lock(&isdn_divert_mutex); ret = isdn_divert_ioctl_unlocked(file, cmd, arg); mutex_unlock(&isdn_divert_mutex); return ret; } static const struct file_operations isdn_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .read = isdn_divert_read, .write = isdn_divert_write, .poll = isdn_divert_poll, .unlocked_ioctl = isdn_divert_ioctl, .open = isdn_divert_open, .release = isdn_divert_close, }; /****************************/ /* isdn subdir in /proc/net */ /****************************/ static struct proc_dir_entry *isdn_proc_entry = NULL; static struct proc_dir_entry *isdn_divert_entry = NULL; #endif /* CONFIG_PROC_FS */ /***************************************************************************/ /* divert_dev_init must be called before the proc filesystem may be used */ /***************************************************************************/ int divert_dev_init(void) { init_waitqueue_head(&rd_queue); #ifdef CONFIG_PROC_FS isdn_proc_entry = proc_mkdir("isdn", init_net.proc_net); if (!isdn_proc_entry) return (-1); isdn_divert_entry = proc_create("divert", S_IFREG | S_IRUGO, isdn_proc_entry, &isdn_fops); if (!isdn_divert_entry) { remove_proc_entry("isdn", init_net.proc_net); return (-1); } #endif /* CONFIG_PROC_FS */ return (0); } /* divert_dev_init */ /***************************************************************************/ /* divert_dev_deinit must be called before leaving isdn when included as */ /* a module. */ /***************************************************************************/ int divert_dev_deinit(void) { #ifdef CONFIG_PROC_FS remove_proc_entry("divert", isdn_proc_entry); remove_proc_entry("isdn", init_net.proc_net); #endif /* CONFIG_PROC_FS */ return (0); } /* divert_dev_deinit */
gpl-2.0
barracuda7/android_kernel_samsung_exynos5420
arch/mips/powertv/ioremap.c
7964
4733
/* * ioremap.c * * Support for mapping between dma_addr_t values a phys_addr_t values. * * Copyright (C) 2005-2009 Scientific-Atlanta, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * Author: David VomLehn <dvomlehn@cisco.com> * * Description: Defines the platform resources for the SA settop. * * NOTE: The bootloader allocates persistent memory at an address which is * 16 MiB below the end of the highest address in KSEG0. All fixed * address memory reservations must avoid this region. */ #include <linux/kernel.h> #include <linux/module.h> #include <asm/mach-powertv/ioremap.h> /* * Define the sizes of and masks for grains in physical and DMA space. The * values are the same but the types are not. */ #define IOR_PHYS_GRAIN ((phys_addr_t) 1 << IOR_LSBITS) #define IOR_PHYS_GRAIN_MASK (IOR_PHYS_GRAIN - 1) #define IOR_DMA_GRAIN ((dma_addr_t) 1 << IOR_LSBITS) #define IOR_DMA_GRAIN_MASK (IOR_DMA_GRAIN - 1) /* * Values that, when accessed by an index derived from a phys_addr_t and * added to phys_addr_t value, yield a DMA address */ struct ior_phys_to_dma _ior_phys_to_dma[IOR_NUM_PHYS_TO_DMA]; EXPORT_SYMBOL(_ior_phys_to_dma); /* * Values that, when accessed by an index derived from a dma_addr_t and * added to that dma_addr_t value, yield a physical address */ struct ior_dma_to_phys _ior_dma_to_phys[IOR_NUM_DMA_TO_PHYS]; EXPORT_SYMBOL(_ior_dma_to_phys); /** * setup_dma_to_phys - set up conversion from DMA to physical addresses * @dma_idx: Top IOR_LSBITS bits of the DMA address, i.e. an index * into the array _dma_to_phys. * @delta: Value that, when added to the DMA address, will yield the * physical address * @s: Number of bytes in the section of memory with the given delta * between DMA and physical addresses. */ static void setup_dma_to_phys(dma_addr_t dma, phys_addr_t delta, dma_addr_t s) { int dma_idx, first_idx, last_idx; phys_addr_t first, last; /* * Calculate the first and last indices, rounding the first up and * the second down. */ first = dma & ~IOR_DMA_GRAIN_MASK; last = (dma + s - 1) & ~IOR_DMA_GRAIN_MASK; first_idx = first >> IOR_LSBITS; /* Convert to indices */ last_idx = last >> IOR_LSBITS; for (dma_idx = first_idx; dma_idx <= last_idx; dma_idx++) _ior_dma_to_phys[dma_idx].offset = delta >> IOR_DMA_SHIFT; } /** * setup_phys_to_dma - set up conversion from DMA to physical addresses * @phys_idx: Top IOR_LSBITS bits of the DMA address, i.e. an index * into the array _phys_to_dma. * @delta: Value that, when added to the DMA address, will yield the * physical address * @s: Number of bytes in the section of memory with the given delta * between DMA and physical addresses. */ static void setup_phys_to_dma(phys_addr_t phys, dma_addr_t delta, phys_addr_t s) { int phys_idx, first_idx, last_idx; phys_addr_t first, last; /* * Calculate the first and last indices, rounding the first up and * the second down. */ first = phys & ~IOR_PHYS_GRAIN_MASK; last = (phys + s - 1) & ~IOR_PHYS_GRAIN_MASK; first_idx = first >> IOR_LSBITS; /* Convert to indices */ last_idx = last >> IOR_LSBITS; for (phys_idx = first_idx; phys_idx <= last_idx; phys_idx++) _ior_phys_to_dma[phys_idx].offset = delta >> IOR_PHYS_SHIFT; } /** * ioremap_add_map - add to the physical and DMA address conversion arrays * @phys: Process's view of the address of the start of the memory chunk * @dma: DMA address of the start of the memory chunk * @size: Size, in bytes, of the chunk of memory * * NOTE: It might be obvious, but the assumption is that all @size bytes have * the same offset between the physical address and the DMA address. */ void ioremap_add_map(phys_addr_t phys, phys_addr_t dma, phys_addr_t size) { if (size == 0) return; if ((dma & IOR_DMA_GRAIN_MASK) != 0 || (phys & IOR_PHYS_GRAIN_MASK) != 0 || (size & IOR_PHYS_GRAIN_MASK) != 0) pr_crit("Memory allocation must be in chunks of 0x%x bytes\n", IOR_PHYS_GRAIN); setup_dma_to_phys(dma, phys - dma, size); setup_phys_to_dma(phys, dma - phys, size); }
gpl-2.0
sachinthomaspj/android_kernel_htc_pico
arch/sh/kernel/cpu/sh4a/pinmux-sh7722.c
8732
64294
#include <linux/init.h> #include <linux/kernel.h> #include <linux/gpio.h> #include <cpu/sh7722.h> enum { PINMUX_RESERVED = 0, PINMUX_DATA_BEGIN, PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA, PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA, PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA, PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA, PTC7_DATA, PTC5_DATA, PTC4_DATA, PTC3_DATA, PTC2_DATA, PTC0_DATA, PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA, PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA, PTE7_DATA, PTE6_DATA, PTE5_DATA, PTE4_DATA, PTE1_DATA, PTE0_DATA, PTF6_DATA, PTF5_DATA, PTF4_DATA, PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA, PTG4_DATA, PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA, PTH7_DATA, PTH6_DATA, PTH5_DATA, PTH4_DATA, PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA, PTJ7_DATA, PTJ6_DATA, PTJ5_DATA, PTJ1_DATA, PTJ0_DATA, PTK6_DATA, PTK5_DATA, PTK4_DATA, PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA, PTL7_DATA, PTL6_DATA, PTL5_DATA, PTL4_DATA, PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA, PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA, PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA, PTN7_DATA, PTN6_DATA, PTN5_DATA, PTN4_DATA, PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA, PTQ6_DATA, PTQ5_DATA, PTQ4_DATA, PTQ3_DATA, PTQ2_DATA, PTQ1_DATA, PTQ0_DATA, PTR4_DATA, PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA, PTS4_DATA, PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA, PTT4_DATA, PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA, PTU4_DATA, PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA, PTV4_DATA, PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA, PTW6_DATA, PTW5_DATA, PTW4_DATA, PTW3_DATA, PTW2_DATA, PTW1_DATA, PTW0_DATA, PTX6_DATA, PTX5_DATA, PTX4_DATA, PTX3_DATA, PTX2_DATA, PTX1_DATA, PTX0_DATA, PTY6_DATA, PTY5_DATA, PTY4_DATA, PTY3_DATA, PTY2_DATA, PTY1_DATA, PTY0_DATA, PTZ5_DATA, PTZ4_DATA, PTZ3_DATA, PTZ2_DATA, PTZ1_DATA, PTZ0_DATA, PINMUX_DATA_END, PINMUX_INPUT_BEGIN, PTA7_IN, PTA6_IN, PTA5_IN, PTA4_IN, PTA3_IN, PTA2_IN, PTA1_IN, PTA0_IN, PTB7_IN, PTB6_IN, PTB5_IN, PTB4_IN, PTB3_IN, PTB2_IN, PTB1_IN, PTB0_IN, PTC7_IN, PTC5_IN, PTC4_IN, PTC3_IN, PTC2_IN, PTC0_IN, PTD7_IN, PTD6_IN, PTD5_IN, PTD4_IN, PTD3_IN, PTD2_IN, PTD1_IN, PTE7_IN, PTE6_IN, PTE5_IN, PTE4_IN, PTE1_IN, PTE0_IN, PTF6_IN, PTF5_IN, PTF4_IN, PTF3_IN, PTF2_IN, PTF1_IN, PTH6_IN, PTH5_IN, PTH1_IN, PTH0_IN, PTJ1_IN, PTJ0_IN, PTK6_IN, PTK5_IN, PTK4_IN, PTK3_IN, PTK2_IN, PTK0_IN, PTL7_IN, PTL6_IN, PTL5_IN, PTL4_IN, PTL3_IN, PTL2_IN, PTL1_IN, PTL0_IN, PTM7_IN, PTM6_IN, PTM5_IN, PTM4_IN, PTM3_IN, PTM2_IN, PTM1_IN, PTM0_IN, PTN7_IN, PTN6_IN, PTN5_IN, PTN4_IN, PTN3_IN, PTN2_IN, PTN1_IN, PTN0_IN, PTQ5_IN, PTQ4_IN, PTQ3_IN, PTQ2_IN, PTQ0_IN, PTR2_IN, PTS4_IN, PTS2_IN, PTS1_IN, PTT4_IN, PTT3_IN, PTT2_IN, PTT1_IN, PTU4_IN, PTU3_IN, PTU2_IN, PTU1_IN, PTU0_IN, PTV4_IN, PTV3_IN, PTV2_IN, PTV1_IN, PTV0_IN, PTW6_IN, PTW4_IN, PTW3_IN, PTW2_IN, PTW1_IN, PTW0_IN, PTX6_IN, PTX5_IN, PTX4_IN, PTX3_IN, PTX2_IN, PTX1_IN, PTX0_IN, PTY5_IN, PTY4_IN, PTY3_IN, PTY2_IN, PTY0_IN, PTZ5_IN, PTZ4_IN, PTZ3_IN, PTZ2_IN, PTZ1_IN, PINMUX_INPUT_END, PINMUX_INPUT_PULLDOWN_BEGIN, PTA7_IN_PD, PTA6_IN_PD, PTA5_IN_PD, PTA4_IN_PD, PTA3_IN_PD, PTA2_IN_PD, PTA1_IN_PD, PTA0_IN_PD, PTE7_IN_PD, PTE6_IN_PD, PTE5_IN_PD, PTE4_IN_PD, PTE1_IN_PD, PTE0_IN_PD, PTF6_IN_PD, PTF5_IN_PD, PTF4_IN_PD, PTF3_IN_PD, PTF2_IN_PD, PTF1_IN_PD, PTH6_IN_PD, PTH5_IN_PD, PTH1_IN_PD, PTH0_IN_PD, PTK6_IN_PD, PTK5_IN_PD, PTK4_IN_PD, PTK3_IN_PD, PTK2_IN_PD, PTK0_IN_PD, PTL7_IN_PD, PTL6_IN_PD, PTL5_IN_PD, PTL4_IN_PD, PTL3_IN_PD, PTL2_IN_PD, PTL1_IN_PD, PTL0_IN_PD, PTM7_IN_PD, PTM6_IN_PD, PTM5_IN_PD, PTM4_IN_PD, PTM3_IN_PD, PTM2_IN_PD, PTM1_IN_PD, PTM0_IN_PD, PTQ5_IN_PD, PTQ4_IN_PD, PTQ3_IN_PD, PTQ2_IN_PD, PTS4_IN_PD, PTS2_IN_PD, PTS1_IN_PD, PTT4_IN_PD, PTT3_IN_PD, PTT2_IN_PD, PTT1_IN_PD, PTU4_IN_PD, PTU3_IN_PD, PTU2_IN_PD, PTU1_IN_PD, PTU0_IN_PD, PTV4_IN_PD, PTV3_IN_PD, PTV2_IN_PD, PTV1_IN_PD, PTV0_IN_PD, PTW6_IN_PD, PTW4_IN_PD, PTW3_IN_PD, PTW2_IN_PD, PTW1_IN_PD, PTW0_IN_PD, PTX6_IN_PD, PTX5_IN_PD, PTX4_IN_PD, PTX3_IN_PD, PTX2_IN_PD, PTX1_IN_PD, PTX0_IN_PD, PINMUX_INPUT_PULLDOWN_END, PINMUX_INPUT_PULLUP_BEGIN, PTC7_IN_PU, PTC5_IN_PU, PTD7_IN_PU, PTD6_IN_PU, PTD5_IN_PU, PTD4_IN_PU, PTD3_IN_PU, PTD2_IN_PU, PTD1_IN_PU, PTJ1_IN_PU, PTJ0_IN_PU, PTQ0_IN_PU, PTR2_IN_PU, PTX6_IN_PU, PTY5_IN_PU, PTY4_IN_PU, PTY3_IN_PU, PTY2_IN_PU, PTY0_IN_PU, PTZ5_IN_PU, PTZ4_IN_PU, PTZ3_IN_PU, PTZ2_IN_PU, PTZ1_IN_PU, PINMUX_INPUT_PULLUP_END, PINMUX_OUTPUT_BEGIN, PTA7_OUT, PTA5_OUT, PTB7_OUT, PTB6_OUT, PTB5_OUT, PTB4_OUT, PTB3_OUT, PTB2_OUT, PTB1_OUT, PTB0_OUT, PTC4_OUT, PTC3_OUT, PTC2_OUT, PTC0_OUT, PTD6_OUT, PTD5_OUT, PTD4_OUT, PTD3_OUT, PTD2_OUT, PTD1_OUT, PTD0_OUT, PTE7_OUT, PTE6_OUT, PTE5_OUT, PTE4_OUT, PTE1_OUT, PTE0_OUT, PTF6_OUT, PTF5_OUT, PTF4_OUT, PTF3_OUT, PTF2_OUT, PTF0_OUT, PTG4_OUT, PTG3_OUT, PTG2_OUT, PTG1_OUT, PTG0_OUT, PTH7_OUT, PTH6_OUT, PTH5_OUT, PTH4_OUT, PTH3_OUT, PTH2_OUT, PTH1_OUT, PTH0_OUT, PTJ7_OUT, PTJ6_OUT, PTJ5_OUT, PTJ1_OUT, PTJ0_OUT, PTK6_OUT, PTK5_OUT, PTK4_OUT, PTK3_OUT, PTK1_OUT, PTK0_OUT, PTL7_OUT, PTL6_OUT, PTL5_OUT, PTL4_OUT, PTL3_OUT, PTL2_OUT, PTL1_OUT, PTL0_OUT, PTM7_OUT, PTM6_OUT, PTM5_OUT, PTM4_OUT, PTM3_OUT, PTM2_OUT, PTM1_OUT, PTM0_OUT, PTN7_OUT, PTN6_OUT, PTN5_OUT, PTN4_OUT, PTN3_OUT, PTN2_OUT, PTN1_OUT, PTN0_OUT, PTQ6_OUT, PTQ5_OUT, PTQ4_OUT, PTQ3_OUT, PTQ2_OUT, PTQ1_OUT, PTQ0_OUT, PTR4_OUT, PTR3_OUT, PTR1_OUT, PTR0_OUT, PTS3_OUT, PTS2_OUT, PTS0_OUT, PTT4_OUT, PTT3_OUT, PTT2_OUT, PTT0_OUT, PTU4_OUT, PTU3_OUT, PTU2_OUT, PTU0_OUT, PTV4_OUT, PTV3_OUT, PTV2_OUT, PTV1_OUT, PTV0_OUT, PTW5_OUT, PTW4_OUT, PTW3_OUT, PTW2_OUT, PTW1_OUT, PTW0_OUT, PTX6_OUT, PTX5_OUT, PTX4_OUT, PTX3_OUT, PTX2_OUT, PTX1_OUT, PTX0_OUT, PTY5_OUT, PTY4_OUT, PTY3_OUT, PTY2_OUT, PTY1_OUT, PTY0_OUT, PINMUX_OUTPUT_END, PINMUX_MARK_BEGIN, SCIF0_TXD_MARK, SCIF0_RXD_MARK, SCIF0_RTS_MARK, SCIF0_CTS_MARK, SCIF0_SCK_MARK, SCIF1_TXD_MARK, SCIF1_RXD_MARK, SCIF1_RTS_MARK, SCIF1_CTS_MARK, SCIF1_SCK_MARK, SCIF2_TXD_MARK, SCIF2_RXD_MARK, SCIF2_RTS_MARK, SCIF2_CTS_MARK, SCIF2_SCK_MARK, SIOTXD_MARK, SIORXD_MARK, SIOD_MARK, SIOSTRB0_MARK, SIOSTRB1_MARK, SIOSCK_MARK, SIOMCK_MARK, VIO_D15_MARK, VIO_D14_MARK, VIO_D13_MARK, VIO_D12_MARK, VIO_D11_MARK, VIO_D10_MARK, VIO_D9_MARK, VIO_D8_MARK, VIO_D7_MARK, VIO_D6_MARK, VIO_D5_MARK, VIO_D4_MARK, VIO_D3_MARK, VIO_D2_MARK, VIO_D1_MARK, VIO_D0_MARK, VIO_CLK_MARK, VIO_VD_MARK, VIO_HD_MARK, VIO_FLD_MARK, VIO_CKO_MARK, VIO_STEX_MARK, VIO_STEM_MARK, VIO_VD2_MARK, VIO_HD2_MARK, VIO_CLK2_MARK, LCDD23_MARK, LCDD22_MARK, LCDD21_MARK, LCDD20_MARK, LCDD19_MARK, LCDD18_MARK, LCDD17_MARK, LCDD16_MARK, LCDD15_MARK, LCDD14_MARK, LCDD13_MARK, LCDD12_MARK, LCDD11_MARK, LCDD10_MARK, LCDD9_MARK, LCDD8_MARK, LCDD7_MARK, LCDD6_MARK, LCDD5_MARK, LCDD4_MARK, LCDD3_MARK, LCDD2_MARK, LCDD1_MARK, LCDD0_MARK, LCDLCLK_MARK, LCDDON_MARK, LCDVCPWC_MARK, LCDVEPWC_MARK, LCDVSYN_MARK, LCDDCK_MARK, LCDHSYN_MARK, LCDDISP_MARK, LCDRS_MARK, LCDCS_MARK, LCDWR_MARK, LCDRD_MARK, LCDDON2_MARK, LCDVCPWC2_MARK, LCDVEPWC2_MARK, LCDVSYN2_MARK, LCDCS2_MARK, IOIS16_MARK, A25_MARK, A24_MARK, A23_MARK, A22_MARK, BS_MARK, CS6B_CE1B_MARK, WAIT_MARK, CS6A_CE2B_MARK, HPD63_MARK, HPD62_MARK, HPD61_MARK, HPD60_MARK, HPD59_MARK, HPD58_MARK, HPD57_MARK, HPD56_MARK, HPD55_MARK, HPD54_MARK, HPD53_MARK, HPD52_MARK, HPD51_MARK, HPD50_MARK, HPD49_MARK, HPD48_MARK, HPDQM7_MARK, HPDQM6_MARK, HPDQM5_MARK, HPDQM4_MARK, IRQ0_MARK, IRQ1_MARK, IRQ2_MARK, IRQ3_MARK, IRQ4_MARK, IRQ5_MARK, IRQ6_MARK, IRQ7_MARK, SDHICD_MARK, SDHIWP_MARK, SDHID3_MARK, SDHID2_MARK, SDHID1_MARK, SDHID0_MARK, SDHICMD_MARK, SDHICLK_MARK, SIUAOLR_MARK, SIUAOBT_MARK, SIUAISLD_MARK, SIUAILR_MARK, SIUAIBT_MARK, SIUAOSLD_MARK, SIUMCKA_MARK, SIUFCKA_MARK, SIUBOLR_MARK, SIUBOBT_MARK, SIUBISLD_MARK, SIUBILR_MARK, SIUBIBT_MARK, SIUBOSLD_MARK, SIUMCKB_MARK, SIUFCKB_MARK, AUDSYNC_MARK, AUDATA3_MARK, AUDATA2_MARK, AUDATA1_MARK, AUDATA0_MARK, DACK_MARK, DREQ0_MARK, DV_CLKI_MARK, DV_CLK_MARK, DV_HSYNC_MARK, DV_VSYNC_MARK, DV_D15_MARK, DV_D14_MARK, DV_D13_MARK, DV_D12_MARK, DV_D11_MARK, DV_D10_MARK, DV_D9_MARK, DV_D8_MARK, DV_D7_MARK, DV_D6_MARK, DV_D5_MARK, DV_D4_MARK, DV_D3_MARK, DV_D2_MARK, DV_D1_MARK, DV_D0_MARK, STATUS0_MARK, PDSTATUS_MARK, SIOF0_MCK_MARK, SIOF0_SCK_MARK, SIOF0_SYNC_MARK, SIOF0_SS1_MARK, SIOF0_SS2_MARK, SIOF0_TXD_MARK, SIOF0_RXD_MARK, SIOF1_MCK_MARK, SIOF1_SCK_MARK, SIOF1_SYNC_MARK, SIOF1_SS1_MARK, SIOF1_SS2_MARK, SIOF1_TXD_MARK, SIOF1_RXD_MARK, SIM_D_MARK, SIM_CLK_MARK, SIM_RST_MARK, TS_SDAT_MARK, TS_SCK_MARK, TS_SDEN_MARK, TS_SPSYNC_MARK, IRDA_IN_MARK, IRDA_OUT_MARK, TPUTO_MARK, FCE_MARK, NAF7_MARK, NAF6_MARK, NAF5_MARK, NAF4_MARK, NAF3_MARK, NAF2_MARK, NAF1_MARK, NAF0_MARK, FCDE_MARK, FOE_MARK, FSC_MARK, FWE_MARK, FRB_MARK, KEYIN0_MARK, KEYIN1_MARK, KEYIN2_MARK, KEYIN3_MARK, KEYIN4_MARK, KEYOUT0_MARK, KEYOUT1_MARK, KEYOUT2_MARK, KEYOUT3_MARK, KEYOUT4_IN6_MARK, KEYOUT5_IN5_MARK, PINMUX_MARK_END, PINMUX_FUNCTION_BEGIN, VIO_D7_SCIF1_SCK, VIO_D6_SCIF1_RXD, VIO_D5_SCIF1_TXD, VIO_D4, VIO_D3, VIO_D2, VIO_D1, VIO_D0_LCDLCLK, HPD55, HPD54, HPD53, HPD52, HPD51, HPD50, HPD49, HPD48, IOIS16, HPDQM7, HPDQM6, HPDQM5, HPDQM4, SDHICD, SDHIWP, SDHID3, IRQ2_SDHID2, SDHID1, SDHID0, SDHICMD, SDHICLK, A25, A24, A23, A22, IRQ5, IRQ4_BS, PTF6, SIOSCK_SIUBOBT, SIOSTRB1_SIUBOLR, SIOSTRB0_SIUBIBT, SIOD_SIUBILR, SIORXD_SIUBISLD, SIOTXD_SIUBOSLD, AUDSYNC, AUDATA3, AUDATA2, AUDATA1, AUDATA0, LCDVCPWC_LCDVCPWC2, LCDVSYN2_DACK, LCDVSYN, LCDDISP_LCDRS, LCDHSYN_LCDCS, LCDDON_LCDDON2, LCDD17_DV_HSYNC, LCDD16_DV_VSYNC, STATUS0, PDSTATUS, IRQ1, IRQ0, SIUAILR_SIOF1_SS2, SIUAIBT_SIOF1_SS1, SIUAOLR_SIOF1_SYNC, SIUAOBT_SIOF1_SCK, SIUAISLD_SIOF1_RXD, SIUAOSLD_SIOF1_TXD, PTK0, LCDD15_DV_D15, LCDD14_DV_D14, LCDD13_DV_D13, LCDD12_DV_D12, LCDD11_DV_D11, LCDD10_DV_D10, LCDD9_DV_D9, LCDD8_DV_D8, LCDD7_DV_D7, LCDD6_DV_D6, LCDD5_DV_D5, LCDD4_DV_D4, LCDD3_DV_D3, LCDD2_DV_D2, LCDD1_DV_D1, LCDD0_DV_D0, HPD63, HPD62, HPD61, HPD60, HPD59, HPD58, HPD57, HPD56, SIOF0_SS2_SIM_RST, SIOF0_SS1_TS_SPSYNC, SIOF0_SYNC_TS_SDEN, SIOF0_SCK_TS_SCK, PTQ2, PTQ1, PTQ0, LCDRD, CS6B_CE1B_LCDCS2, WAIT, LCDDCK_LCDWR, LCDVEPWC_LCDVEPWC2, SCIF0_CTS_SIUAISPD, SCIF0_RTS_SIUAOSPD, SCIF0_SCK_TPUTO, SCIF0_RXD, SCIF0_TXD, FOE_VIO_VD2, FWE, FSC, DREQ0, FCDE, NAF2_VIO_D10, NAF1_VIO_D9, NAF0_VIO_D8, FRB_VIO_CLK2, FCE_VIO_HD2, NAF7_VIO_D15, NAF6_VIO_D14, NAF5_VIO_D13, NAF4_VIO_D12, NAF3_VIO_D11, VIO_FLD_SCIF2_CTS, VIO_CKO_SCIF2_RTS, VIO_STEX_SCIF2_SCK, VIO_STEM_SCIF2_TXD, VIO_HD_SCIF2_RXD, VIO_VD_SCIF1_CTS, VIO_CLK_SCIF1_RTS, CS6A_CE2B, LCDD23, LCDD22, LCDD21, LCDD20, LCDD19_DV_CLKI, LCDD18_DV_CLK, KEYOUT5_IN5, KEYOUT4_IN6, KEYOUT3, KEYOUT2, KEYOUT1, KEYOUT0, KEYIN4_IRQ7, KEYIN3, KEYIN2, KEYIN1, KEYIN0_IRQ6, PSA15_KEYIN0, PSA15_IRQ6, PSA14_KEYIN4, PSA14_IRQ7, PSA9_IRQ4, PSA9_BS, PSA4_IRQ2, PSA4_SDHID2, PSB15_SIOTXD, PSB15_SIUBOSLD, PSB14_SIORXD, PSB14_SIUBISLD, PSB13_SIOD, PSB13_SIUBILR, PSB12_SIOSTRB0, PSB12_SIUBIBT, PSB11_SIOSTRB1, PSB11_SIUBOLR, PSB10_SIOSCK, PSB10_SIUBOBT, PSB9_SIOMCK, PSB9_SIUMCKB, PSB8_SIOF0_MCK, PSB8_IRQ3, PSB7_SIOF0_TXD, PSB7_IRDA_OUT, PSB6_SIOF0_RXD, PSB6_IRDA_IN, PSB5_SIOF0_SCK, PSB5_TS_SCK, PSB4_SIOF0_SYNC, PSB4_TS_SDEN, PSB3_SIOF0_SS1, PSB3_TS_SPSYNC, PSB2_SIOF0_SS2, PSB2_SIM_RST, PSB1_SIUMCKA, PSB1_SIOF1_MCK, PSB0_SIUAOSLD, PSB0_SIOF1_TXD, PSC15_SIUAISLD, PSC15_SIOF1_RXD, PSC14_SIUAOBT, PSC14_SIOF1_SCK, PSC13_SIUAOLR, PSC13_SIOF1_SYNC, PSC12_SIUAIBT, PSC12_SIOF1_SS1, PSC11_SIUAILR, PSC11_SIOF1_SS2, PSC0_NAF, PSC0_VIO, PSD13_VIO, PSD13_SCIF2, PSD12_VIO, PSD12_SCIF1, PSD11_VIO, PSD11_SCIF1, PSD10_VIO_D0, PSD10_LCDLCLK, PSD9_SIOMCK_SIUMCKB, PSD9_SIUFCKB, PSD8_SCIF0_SCK, PSD8_TPUTO, PSD7_SCIF0_RTS, PSD7_SIUAOSPD, PSD6_SCIF0_CTS, PSD6_SIUAISPD, PSD5_CS6B_CE1B, PSD5_LCDCS2, PSD3_LCDVEPWC_LCDVCPWC, PSD3_LCDVEPWC2_LCDVCPWC2, PSD2_LCDDON, PSD2_LCDDON2, PSD0_LCDD19_LCDD0, PSD0_DV, PSE15_SIOF0_MCK_IRQ3, PSE15_SIM_D, PSE14_SIOF0_TXD_IRDA_OUT, PSE14_SIM_CLK, PSE13_SIOF0_RXD_IRDA_IN, PSE13_TS_SDAT, PSE12_LCDVSYN2, PSE12_DACK, PSE11_SIUMCKA_SIOF1_MCK, PSE11_SIUFCKA, PSE3_FLCTL, PSE3_VIO, PSE2_NAF2, PSE2_VIO_D10, PSE1_NAF1, PSE1_VIO_D9, PSE0_NAF0, PSE0_VIO_D8, HIZA14_KEYSC, HIZA14_HIZ, HIZA10_NAF, HIZA10_HIZ, HIZA9_VIO, HIZA9_HIZ, HIZA8_LCDC, HIZA8_HIZ, HIZA7_LCDC, HIZA7_HIZ, HIZA6_LCDC, HIZA6_HIZ, HIZB4_SIUA, HIZB4_HIZ, HIZB1_VIO, HIZB1_HIZ, HIZB0_VIO, HIZB0_HIZ, HIZC15_IRQ7, HIZC15_HIZ, HIZC14_IRQ6, HIZC14_HIZ, HIZC13_IRQ5, HIZC13_HIZ, HIZC12_IRQ4, HIZC12_HIZ, HIZC11_IRQ3, HIZC11_HIZ, HIZC10_IRQ2, HIZC10_HIZ, HIZC9_IRQ1, HIZC9_HIZ, HIZC8_IRQ0, HIZC8_HIZ, MSELB9_VIO, MSELB9_VIO2, MSELB8_RGB, MSELB8_SYS, PINMUX_FUNCTION_END, }; static pinmux_enum_t pinmux_data[] = { /* PTA */ PINMUX_DATA(PTA7_DATA, PTA7_IN, PTA7_IN_PD, PTA7_OUT), PINMUX_DATA(PTA6_DATA, PTA6_IN, PTA6_IN_PD), PINMUX_DATA(PTA5_DATA, PTA5_IN, PTA5_IN_PD, PTA5_OUT), PINMUX_DATA(PTA4_DATA, PTA4_IN, PTA4_IN_PD), PINMUX_DATA(PTA3_DATA, PTA3_IN, PTA3_IN_PD), PINMUX_DATA(PTA2_DATA, PTA2_IN, PTA2_IN_PD), PINMUX_DATA(PTA1_DATA, PTA1_IN, PTA1_IN_PD), PINMUX_DATA(PTA0_DATA, PTA0_IN, PTA0_IN_PD), /* PTB */ PINMUX_DATA(PTB7_DATA, PTB7_IN, PTB7_OUT), PINMUX_DATA(PTB6_DATA, PTB6_IN, PTB6_OUT), PINMUX_DATA(PTB5_DATA, PTB5_IN, PTB5_OUT), PINMUX_DATA(PTB4_DATA, PTB4_IN, PTB4_OUT), PINMUX_DATA(PTB3_DATA, PTB3_IN, PTB3_OUT), PINMUX_DATA(PTB2_DATA, PTB2_IN, PTB2_OUT), PINMUX_DATA(PTB1_DATA, PTB1_IN, PTB1_OUT), PINMUX_DATA(PTB0_DATA, PTB0_IN, PTB0_OUT), /* PTC */ PINMUX_DATA(PTC7_DATA, PTC7_IN, PTC7_IN_PU), PINMUX_DATA(PTC5_DATA, PTC5_IN, PTC5_IN_PU), PINMUX_DATA(PTC4_DATA, PTC4_IN, PTC4_OUT), PINMUX_DATA(PTC3_DATA, PTC3_IN, PTC3_OUT), PINMUX_DATA(PTC2_DATA, PTC2_IN, PTC2_OUT), PINMUX_DATA(PTC0_DATA, PTC0_IN, PTC0_OUT), /* PTD */ PINMUX_DATA(PTD7_DATA, PTD7_IN, PTD7_IN_PU), PINMUX_DATA(PTD6_DATA, PTD6_OUT, PTD6_IN, PTD6_IN_PU), PINMUX_DATA(PTD5_DATA, PTD5_OUT, PTD5_IN, PTD5_IN_PU), PINMUX_DATA(PTD4_DATA, PTD4_OUT, PTD4_IN, PTD4_IN_PU), PINMUX_DATA(PTD3_DATA, PTD3_OUT, PTD3_IN, PTD3_IN_PU), PINMUX_DATA(PTD2_DATA, PTD2_OUT, PTD2_IN, PTD2_IN_PU), PINMUX_DATA(PTD1_DATA, PTD1_OUT, PTD1_IN, PTD1_IN_PU), PINMUX_DATA(PTD0_DATA, PTD0_OUT), /* PTE */ PINMUX_DATA(PTE7_DATA, PTE7_OUT, PTE7_IN, PTE7_IN_PD), PINMUX_DATA(PTE6_DATA, PTE6_OUT, PTE6_IN, PTE6_IN_PD), PINMUX_DATA(PTE5_DATA, PTE5_OUT, PTE5_IN, PTE5_IN_PD), PINMUX_DATA(PTE4_DATA, PTE4_OUT, PTE4_IN, PTE4_IN_PD), PINMUX_DATA(PTE1_DATA, PTE1_OUT, PTE1_IN, PTE1_IN_PD), PINMUX_DATA(PTE0_DATA, PTE0_OUT, PTE0_IN, PTE0_IN_PD), /* PTF */ PINMUX_DATA(PTF6_DATA, PTF6_OUT, PTF6_IN, PTF6_IN_PD), PINMUX_DATA(PTF5_DATA, PTF5_OUT, PTF5_IN, PTF5_IN_PD), PINMUX_DATA(PTF4_DATA, PTF4_OUT, PTF4_IN, PTF4_IN_PD), PINMUX_DATA(PTF3_DATA, PTF3_OUT, PTF3_IN, PTF3_IN_PD), PINMUX_DATA(PTF2_DATA, PTF2_OUT, PTF2_IN, PTF2_IN_PD), PINMUX_DATA(PTF1_DATA, PTF1_IN, PTF1_IN_PD), PINMUX_DATA(PTF0_DATA, PTF0_OUT), /* PTG */ PINMUX_DATA(PTG4_DATA, PTG4_OUT), PINMUX_DATA(PTG3_DATA, PTG3_OUT), PINMUX_DATA(PTG2_DATA, PTG2_OUT), PINMUX_DATA(PTG1_DATA, PTG1_OUT), PINMUX_DATA(PTG0_DATA, PTG0_OUT), /* PTH */ PINMUX_DATA(PTH7_DATA, PTH7_OUT), PINMUX_DATA(PTH6_DATA, PTH6_OUT, PTH6_IN, PTH6_IN_PD), PINMUX_DATA(PTH5_DATA, PTH5_OUT, PTH5_IN, PTH5_IN_PD), PINMUX_DATA(PTH4_DATA, PTH4_OUT), PINMUX_DATA(PTH3_DATA, PTH3_OUT), PINMUX_DATA(PTH2_DATA, PTH2_OUT), PINMUX_DATA(PTH1_DATA, PTH1_OUT, PTH1_IN, PTH1_IN_PD), PINMUX_DATA(PTH0_DATA, PTH0_OUT, PTH0_IN, PTH0_IN_PD), /* PTJ */ PINMUX_DATA(PTJ7_DATA, PTJ7_OUT), PINMUX_DATA(PTJ6_DATA, PTJ6_OUT), PINMUX_DATA(PTJ5_DATA, PTJ5_OUT), PINMUX_DATA(PTJ1_DATA, PTJ1_OUT, PTJ1_IN, PTJ1_IN_PU), PINMUX_DATA(PTJ0_DATA, PTJ0_OUT, PTJ0_IN, PTJ0_IN_PU), /* PTK */ PINMUX_DATA(PTK6_DATA, PTK6_OUT, PTK6_IN, PTK6_IN_PD), PINMUX_DATA(PTK5_DATA, PTK5_OUT, PTK5_IN, PTK5_IN_PD), PINMUX_DATA(PTK4_DATA, PTK4_OUT, PTK4_IN, PTK4_IN_PD), PINMUX_DATA(PTK3_DATA, PTK3_OUT, PTK3_IN, PTK3_IN_PD), PINMUX_DATA(PTK2_DATA, PTK2_IN, PTK2_IN_PD), PINMUX_DATA(PTK1_DATA, PTK1_OUT), PINMUX_DATA(PTK0_DATA, PTK0_OUT, PTK0_IN, PTK0_IN_PD), /* PTL */ PINMUX_DATA(PTL7_DATA, PTL7_OUT, PTL7_IN, PTL7_IN_PD), PINMUX_DATA(PTL6_DATA, PTL6_OUT, PTL6_IN, PTL6_IN_PD), PINMUX_DATA(PTL5_DATA, PTL5_OUT, PTL5_IN, PTL5_IN_PD), PINMUX_DATA(PTL4_DATA, PTL4_OUT, PTL4_IN, PTL4_IN_PD), PINMUX_DATA(PTL3_DATA, PTL3_OUT, PTL3_IN, PTL3_IN_PD), PINMUX_DATA(PTL2_DATA, PTL2_OUT, PTL2_IN, PTL2_IN_PD), PINMUX_DATA(PTL1_DATA, PTL1_OUT, PTL1_IN, PTL1_IN_PD), PINMUX_DATA(PTL0_DATA, PTL0_OUT, PTL0_IN, PTL0_IN_PD), /* PTM */ PINMUX_DATA(PTM7_DATA, PTM7_OUT, PTM7_IN, PTM7_IN_PD), PINMUX_DATA(PTM6_DATA, PTM6_OUT, PTM6_IN, PTM6_IN_PD), PINMUX_DATA(PTM5_DATA, PTM5_OUT, PTM5_IN, PTM5_IN_PD), PINMUX_DATA(PTM4_DATA, PTM4_OUT, PTM4_IN, PTM4_IN_PD), PINMUX_DATA(PTM3_DATA, PTM3_OUT, PTM3_IN, PTM3_IN_PD), PINMUX_DATA(PTM2_DATA, PTM2_OUT, PTM2_IN, PTM2_IN_PD), PINMUX_DATA(PTM1_DATA, PTM1_OUT, PTM1_IN, PTM1_IN_PD), PINMUX_DATA(PTM0_DATA, PTM0_OUT, PTM0_IN, PTM0_IN_PD), /* PTN */ PINMUX_DATA(PTN7_DATA, PTN7_OUT, PTN7_IN), PINMUX_DATA(PTN6_DATA, PTN6_OUT, PTN6_IN), PINMUX_DATA(PTN5_DATA, PTN5_OUT, PTN5_IN), PINMUX_DATA(PTN4_DATA, PTN4_OUT, PTN4_IN), PINMUX_DATA(PTN3_DATA, PTN3_OUT, PTN3_IN), PINMUX_DATA(PTN2_DATA, PTN2_OUT, PTN2_IN), PINMUX_DATA(PTN1_DATA, PTN1_OUT, PTN1_IN), PINMUX_DATA(PTN0_DATA, PTN0_OUT, PTN0_IN), /* PTQ */ PINMUX_DATA(PTQ6_DATA, PTQ6_OUT), PINMUX_DATA(PTQ5_DATA, PTQ5_OUT, PTQ5_IN, PTQ5_IN_PD), PINMUX_DATA(PTQ4_DATA, PTQ4_OUT, PTQ4_IN, PTQ4_IN_PD), PINMUX_DATA(PTQ3_DATA, PTQ3_OUT, PTQ3_IN, PTQ3_IN_PD), PINMUX_DATA(PTQ2_DATA, PTQ2_IN, PTQ2_IN_PD), PINMUX_DATA(PTQ1_DATA, PTQ1_OUT), PINMUX_DATA(PTQ0_DATA, PTQ0_OUT, PTQ0_IN, PTQ0_IN_PU), /* PTR */ PINMUX_DATA(PTR4_DATA, PTR4_OUT), PINMUX_DATA(PTR3_DATA, PTR3_OUT), PINMUX_DATA(PTR2_DATA, PTR2_IN, PTR2_IN_PU), PINMUX_DATA(PTR1_DATA, PTR1_OUT), PINMUX_DATA(PTR0_DATA, PTR0_OUT), /* PTS */ PINMUX_DATA(PTS4_DATA, PTS4_IN, PTS4_IN_PD), PINMUX_DATA(PTS3_DATA, PTS3_OUT), PINMUX_DATA(PTS2_DATA, PTS2_OUT, PTS2_IN, PTS2_IN_PD), PINMUX_DATA(PTS1_DATA, PTS1_IN, PTS1_IN_PD), PINMUX_DATA(PTS0_DATA, PTS0_OUT), /* PTT */ PINMUX_DATA(PTT4_DATA, PTT4_OUT, PTT4_IN, PTT4_IN_PD), PINMUX_DATA(PTT3_DATA, PTT3_OUT, PTT3_IN, PTT3_IN_PD), PINMUX_DATA(PTT2_DATA, PTT2_OUT, PTT2_IN, PTT2_IN_PD), PINMUX_DATA(PTT1_DATA, PTT1_IN, PTT1_IN_PD), PINMUX_DATA(PTT0_DATA, PTT0_OUT), /* PTU */ PINMUX_DATA(PTU4_DATA, PTU4_OUT, PTU4_IN, PTU4_IN_PD), PINMUX_DATA(PTU3_DATA, PTU3_OUT, PTU3_IN, PTU3_IN_PD), PINMUX_DATA(PTU2_DATA, PTU2_OUT, PTU2_IN, PTU2_IN_PD), PINMUX_DATA(PTU1_DATA, PTU1_IN, PTU1_IN_PD), PINMUX_DATA(PTU0_DATA, PTU0_OUT, PTU0_IN, PTU0_IN_PD), /* PTV */ PINMUX_DATA(PTV4_DATA, PTV4_OUT, PTV4_IN, PTV4_IN_PD), PINMUX_DATA(PTV3_DATA, PTV3_OUT, PTV3_IN, PTV3_IN_PD), PINMUX_DATA(PTV2_DATA, PTV2_OUT, PTV2_IN, PTV2_IN_PD), PINMUX_DATA(PTV1_DATA, PTV1_OUT, PTV1_IN, PTV1_IN_PD), PINMUX_DATA(PTV0_DATA, PTV0_OUT, PTV0_IN, PTV0_IN_PD), /* PTW */ PINMUX_DATA(PTW6_DATA, PTW6_IN, PTW6_IN_PD), PINMUX_DATA(PTW5_DATA, PTW5_OUT), PINMUX_DATA(PTW4_DATA, PTW4_OUT, PTW4_IN, PTW4_IN_PD), PINMUX_DATA(PTW3_DATA, PTW3_OUT, PTW3_IN, PTW3_IN_PD), PINMUX_DATA(PTW2_DATA, PTW2_OUT, PTW2_IN, PTW2_IN_PD), PINMUX_DATA(PTW1_DATA, PTW1_OUT, PTW1_IN, PTW1_IN_PD), PINMUX_DATA(PTW0_DATA, PTW0_OUT, PTW0_IN, PTW0_IN_PD), /* PTX */ PINMUX_DATA(PTX6_DATA, PTX6_OUT, PTX6_IN, PTX6_IN_PD), PINMUX_DATA(PTX5_DATA, PTX5_OUT, PTX5_IN, PTX5_IN_PD), PINMUX_DATA(PTX4_DATA, PTX4_OUT, PTX4_IN, PTX4_IN_PD), PINMUX_DATA(PTX3_DATA, PTX3_OUT, PTX3_IN, PTX3_IN_PD), PINMUX_DATA(PTX2_DATA, PTX2_OUT, PTX2_IN, PTX2_IN_PD), PINMUX_DATA(PTX1_DATA, PTX1_OUT, PTX1_IN, PTX1_IN_PD), PINMUX_DATA(PTX0_DATA, PTX0_OUT, PTX0_IN, PTX0_IN_PD), /* PTY */ PINMUX_DATA(PTY5_DATA, PTY5_OUT, PTY5_IN, PTY5_IN_PU), PINMUX_DATA(PTY4_DATA, PTY4_OUT, PTY4_IN, PTY4_IN_PU), PINMUX_DATA(PTY3_DATA, PTY3_OUT, PTY3_IN, PTY3_IN_PU), PINMUX_DATA(PTY2_DATA, PTY2_OUT, PTY2_IN, PTY2_IN_PU), PINMUX_DATA(PTY1_DATA, PTY1_OUT), PINMUX_DATA(PTY0_DATA, PTY0_OUT, PTY0_IN, PTY0_IN_PU), /* PTZ */ PINMUX_DATA(PTZ5_DATA, PTZ5_IN, PTZ5_IN_PU), PINMUX_DATA(PTZ4_DATA, PTZ4_IN, PTZ4_IN_PU), PINMUX_DATA(PTZ3_DATA, PTZ3_IN, PTZ3_IN_PU), PINMUX_DATA(PTZ2_DATA, PTZ2_IN, PTZ2_IN_PU), PINMUX_DATA(PTZ1_DATA, PTZ1_IN, PTZ1_IN_PU), /* SCIF0 */ PINMUX_DATA(SCIF0_TXD_MARK, SCIF0_TXD), PINMUX_DATA(SCIF0_RXD_MARK, SCIF0_RXD), PINMUX_DATA(SCIF0_RTS_MARK, PSD7_SCIF0_RTS, SCIF0_RTS_SIUAOSPD), PINMUX_DATA(SCIF0_CTS_MARK, PSD6_SCIF0_CTS, SCIF0_CTS_SIUAISPD), PINMUX_DATA(SCIF0_SCK_MARK, PSD8_SCIF0_SCK, SCIF0_SCK_TPUTO), /* SCIF1 */ PINMUX_DATA(SCIF1_TXD_MARK, PSD11_SCIF1, VIO_D5_SCIF1_TXD), PINMUX_DATA(SCIF1_RXD_MARK, PSD11_SCIF1, VIO_D6_SCIF1_RXD), PINMUX_DATA(SCIF1_RTS_MARK, PSD12_SCIF1, VIO_CLK_SCIF1_RTS), PINMUX_DATA(SCIF1_CTS_MARK, PSD12_SCIF1, VIO_VD_SCIF1_CTS), PINMUX_DATA(SCIF1_SCK_MARK, PSD11_SCIF1, VIO_D7_SCIF1_SCK), /* SCIF2 */ PINMUX_DATA(SCIF2_TXD_MARK, PSD13_SCIF2, VIO_STEM_SCIF2_TXD), PINMUX_DATA(SCIF2_RXD_MARK, PSD13_SCIF2, VIO_HD_SCIF2_RXD), PINMUX_DATA(SCIF2_RTS_MARK, PSD13_SCIF2, VIO_CKO_SCIF2_RTS), PINMUX_DATA(SCIF2_CTS_MARK, PSD13_SCIF2, VIO_FLD_SCIF2_CTS), PINMUX_DATA(SCIF2_SCK_MARK, PSD13_SCIF2, VIO_STEX_SCIF2_SCK), /* SIO */ PINMUX_DATA(SIOTXD_MARK, PSB15_SIOTXD, SIOTXD_SIUBOSLD), PINMUX_DATA(SIORXD_MARK, PSB14_SIORXD, SIORXD_SIUBISLD), PINMUX_DATA(SIOD_MARK, PSB13_SIOD, SIOD_SIUBILR), PINMUX_DATA(SIOSTRB0_MARK, PSB12_SIOSTRB0, SIOSTRB0_SIUBIBT), PINMUX_DATA(SIOSTRB1_MARK, PSB11_SIOSTRB1, SIOSTRB1_SIUBOLR), PINMUX_DATA(SIOSCK_MARK, PSB10_SIOSCK, SIOSCK_SIUBOBT), PINMUX_DATA(SIOMCK_MARK, PSD9_SIOMCK_SIUMCKB, PSB9_SIOMCK, PTF6), /* CEU */ PINMUX_DATA(VIO_D15_MARK, PSC0_VIO, HIZA10_NAF, NAF7_VIO_D15), PINMUX_DATA(VIO_D14_MARK, PSC0_VIO, HIZA10_NAF, NAF6_VIO_D14), PINMUX_DATA(VIO_D13_MARK, PSC0_VIO, HIZA10_NAF, NAF5_VIO_D13), PINMUX_DATA(VIO_D12_MARK, PSC0_VIO, HIZA10_NAF, NAF4_VIO_D12), PINMUX_DATA(VIO_D11_MARK, PSC0_VIO, HIZA10_NAF, NAF3_VIO_D11), PINMUX_DATA(VIO_D10_MARK, PSE2_VIO_D10, HIZB0_VIO, NAF2_VIO_D10), PINMUX_DATA(VIO_D9_MARK, PSE1_VIO_D9, HIZB0_VIO, NAF1_VIO_D9), PINMUX_DATA(VIO_D8_MARK, PSE0_VIO_D8, HIZB0_VIO, NAF0_VIO_D8), PINMUX_DATA(VIO_D7_MARK, PSD11_VIO, VIO_D7_SCIF1_SCK), PINMUX_DATA(VIO_D6_MARK, PSD11_VIO, VIO_D6_SCIF1_RXD), PINMUX_DATA(VIO_D5_MARK, PSD11_VIO, VIO_D5_SCIF1_TXD), PINMUX_DATA(VIO_D4_MARK, VIO_D4), PINMUX_DATA(VIO_D3_MARK, VIO_D3), PINMUX_DATA(VIO_D2_MARK, VIO_D2), PINMUX_DATA(VIO_D1_MARK, VIO_D1), PINMUX_DATA(VIO_D0_MARK, PSD10_VIO_D0, VIO_D0_LCDLCLK), PINMUX_DATA(VIO_CLK_MARK, PSD12_VIO, MSELB9_VIO, VIO_CLK_SCIF1_RTS), PINMUX_DATA(VIO_VD_MARK, PSD12_VIO, MSELB9_VIO, VIO_VD_SCIF1_CTS), PINMUX_DATA(VIO_HD_MARK, PSD13_VIO, MSELB9_VIO, VIO_HD_SCIF2_RXD), PINMUX_DATA(VIO_FLD_MARK, PSD13_VIO, HIZA9_VIO, VIO_FLD_SCIF2_CTS), PINMUX_DATA(VIO_CKO_MARK, PSD13_VIO, HIZA9_VIO, VIO_CKO_SCIF2_RTS), PINMUX_DATA(VIO_STEX_MARK, PSD13_VIO, HIZA9_VIO, VIO_STEX_SCIF2_SCK), PINMUX_DATA(VIO_STEM_MARK, PSD13_VIO, HIZA9_VIO, VIO_STEM_SCIF2_TXD), PINMUX_DATA(VIO_VD2_MARK, PSE3_VIO, MSELB9_VIO2, HIZB0_VIO, FOE_VIO_VD2), PINMUX_DATA(VIO_HD2_MARK, PSE3_VIO, MSELB9_VIO2, HIZB1_VIO, FCE_VIO_HD2), PINMUX_DATA(VIO_CLK2_MARK, PSE3_VIO, MSELB9_VIO2, HIZB1_VIO, FRB_VIO_CLK2), /* LCDC */ PINMUX_DATA(LCDD23_MARK, HIZA8_LCDC, LCDD23), PINMUX_DATA(LCDD22_MARK, HIZA8_LCDC, LCDD22), PINMUX_DATA(LCDD21_MARK, HIZA8_LCDC, LCDD21), PINMUX_DATA(LCDD20_MARK, HIZA8_LCDC, LCDD20), PINMUX_DATA(LCDD19_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD19_DV_CLKI), PINMUX_DATA(LCDD18_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD18_DV_CLK), PINMUX_DATA(LCDD17_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD17_DV_HSYNC), PINMUX_DATA(LCDD16_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD16_DV_VSYNC), PINMUX_DATA(LCDD15_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD15_DV_D15), PINMUX_DATA(LCDD14_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD14_DV_D14), PINMUX_DATA(LCDD13_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD13_DV_D13), PINMUX_DATA(LCDD12_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD12_DV_D12), PINMUX_DATA(LCDD11_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD11_DV_D11), PINMUX_DATA(LCDD10_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD10_DV_D10), PINMUX_DATA(LCDD9_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD9_DV_D9), PINMUX_DATA(LCDD8_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD8_DV_D8), PINMUX_DATA(LCDD7_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD7_DV_D7), PINMUX_DATA(LCDD6_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD6_DV_D6), PINMUX_DATA(LCDD5_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD5_DV_D5), PINMUX_DATA(LCDD4_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD4_DV_D4), PINMUX_DATA(LCDD3_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD3_DV_D3), PINMUX_DATA(LCDD2_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD2_DV_D2), PINMUX_DATA(LCDD1_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD1_DV_D1), PINMUX_DATA(LCDD0_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD0_DV_D0), PINMUX_DATA(LCDLCLK_MARK, PSD10_LCDLCLK, VIO_D0_LCDLCLK), /* Main LCD */ PINMUX_DATA(LCDDON_MARK, PSD2_LCDDON, HIZA7_LCDC, LCDDON_LCDDON2), PINMUX_DATA(LCDVCPWC_MARK, PSD3_LCDVEPWC_LCDVCPWC, HIZA6_LCDC, LCDVCPWC_LCDVCPWC2), PINMUX_DATA(LCDVEPWC_MARK, PSD3_LCDVEPWC_LCDVCPWC, HIZA6_LCDC, LCDVEPWC_LCDVEPWC2), PINMUX_DATA(LCDVSYN_MARK, HIZA7_LCDC, LCDVSYN), /* Main LCD - RGB Mode */ PINMUX_DATA(LCDDCK_MARK, MSELB8_RGB, HIZA8_LCDC, LCDDCK_LCDWR), PINMUX_DATA(LCDHSYN_MARK, MSELB8_RGB, HIZA7_LCDC, LCDHSYN_LCDCS), PINMUX_DATA(LCDDISP_MARK, MSELB8_RGB, HIZA7_LCDC, LCDDISP_LCDRS), /* Main LCD - SYS Mode */ PINMUX_DATA(LCDRS_MARK, MSELB8_SYS, HIZA7_LCDC, LCDDISP_LCDRS), PINMUX_DATA(LCDCS_MARK, MSELB8_SYS, HIZA7_LCDC, LCDHSYN_LCDCS), PINMUX_DATA(LCDWR_MARK, MSELB8_SYS, HIZA8_LCDC, LCDDCK_LCDWR), PINMUX_DATA(LCDRD_MARK, HIZA7_LCDC, LCDRD), /* Sub LCD - SYS Mode */ PINMUX_DATA(LCDDON2_MARK, PSD2_LCDDON2, HIZA7_LCDC, LCDDON_LCDDON2), PINMUX_DATA(LCDVCPWC2_MARK, PSD3_LCDVEPWC2_LCDVCPWC2, HIZA6_LCDC, LCDVCPWC_LCDVCPWC2), PINMUX_DATA(LCDVEPWC2_MARK, PSD3_LCDVEPWC2_LCDVCPWC2, HIZA6_LCDC, LCDVEPWC_LCDVEPWC2), PINMUX_DATA(LCDVSYN2_MARK, PSE12_LCDVSYN2, HIZA8_LCDC, LCDVSYN2_DACK), PINMUX_DATA(LCDCS2_MARK, PSD5_LCDCS2, CS6B_CE1B_LCDCS2), /* BSC */ PINMUX_DATA(IOIS16_MARK, IOIS16), PINMUX_DATA(A25_MARK, A25), PINMUX_DATA(A24_MARK, A24), PINMUX_DATA(A23_MARK, A23), PINMUX_DATA(A22_MARK, A22), PINMUX_DATA(BS_MARK, PSA9_BS, IRQ4_BS), PINMUX_DATA(CS6B_CE1B_MARK, PSD5_CS6B_CE1B, CS6B_CE1B_LCDCS2), PINMUX_DATA(WAIT_MARK, WAIT), PINMUX_DATA(CS6A_CE2B_MARK, CS6A_CE2B), /* SBSC */ PINMUX_DATA(HPD63_MARK, HPD63), PINMUX_DATA(HPD62_MARK, HPD62), PINMUX_DATA(HPD61_MARK, HPD61), PINMUX_DATA(HPD60_MARK, HPD60), PINMUX_DATA(HPD59_MARK, HPD59), PINMUX_DATA(HPD58_MARK, HPD58), PINMUX_DATA(HPD57_MARK, HPD57), PINMUX_DATA(HPD56_MARK, HPD56), PINMUX_DATA(HPD55_MARK, HPD55), PINMUX_DATA(HPD54_MARK, HPD54), PINMUX_DATA(HPD53_MARK, HPD53), PINMUX_DATA(HPD52_MARK, HPD52), PINMUX_DATA(HPD51_MARK, HPD51), PINMUX_DATA(HPD50_MARK, HPD50), PINMUX_DATA(HPD49_MARK, HPD49), PINMUX_DATA(HPD48_MARK, HPD48), PINMUX_DATA(HPDQM7_MARK, HPDQM7), PINMUX_DATA(HPDQM6_MARK, HPDQM6), PINMUX_DATA(HPDQM5_MARK, HPDQM5), PINMUX_DATA(HPDQM4_MARK, HPDQM4), /* IRQ */ PINMUX_DATA(IRQ0_MARK, HIZC8_IRQ0, IRQ0), PINMUX_DATA(IRQ1_MARK, HIZC9_IRQ1, IRQ1), PINMUX_DATA(IRQ2_MARK, PSA4_IRQ2, HIZC10_IRQ2, IRQ2_SDHID2), PINMUX_DATA(IRQ3_MARK, PSE15_SIOF0_MCK_IRQ3, PSB8_IRQ3, HIZC11_IRQ3, PTQ0), PINMUX_DATA(IRQ4_MARK, PSA9_IRQ4, HIZC12_IRQ4, IRQ4_BS), PINMUX_DATA(IRQ5_MARK, HIZC13_IRQ5, IRQ5), PINMUX_DATA(IRQ6_MARK, PSA15_IRQ6, HIZC14_IRQ6, KEYIN0_IRQ6), PINMUX_DATA(IRQ7_MARK, PSA14_IRQ7, HIZC15_IRQ7, KEYIN4_IRQ7), /* SDHI */ PINMUX_DATA(SDHICD_MARK, SDHICD), PINMUX_DATA(SDHIWP_MARK, SDHIWP), PINMUX_DATA(SDHID3_MARK, SDHID3), PINMUX_DATA(SDHID2_MARK, PSA4_SDHID2, IRQ2_SDHID2), PINMUX_DATA(SDHID1_MARK, SDHID1), PINMUX_DATA(SDHID0_MARK, SDHID0), PINMUX_DATA(SDHICMD_MARK, SDHICMD), PINMUX_DATA(SDHICLK_MARK, SDHICLK), /* SIU - Port A */ PINMUX_DATA(SIUAOLR_MARK, PSC13_SIUAOLR, HIZB4_SIUA, SIUAOLR_SIOF1_SYNC), PINMUX_DATA(SIUAOBT_MARK, PSC14_SIUAOBT, HIZB4_SIUA, SIUAOBT_SIOF1_SCK), PINMUX_DATA(SIUAISLD_MARK, PSC15_SIUAISLD, HIZB4_SIUA, SIUAISLD_SIOF1_RXD), PINMUX_DATA(SIUAILR_MARK, PSC11_SIUAILR, HIZB4_SIUA, SIUAILR_SIOF1_SS2), PINMUX_DATA(SIUAIBT_MARK, PSC12_SIUAIBT, HIZB4_SIUA, SIUAIBT_SIOF1_SS1), PINMUX_DATA(SIUAOSLD_MARK, PSB0_SIUAOSLD, HIZB4_SIUA, SIUAOSLD_SIOF1_TXD), PINMUX_DATA(SIUMCKA_MARK, PSE11_SIUMCKA_SIOF1_MCK, HIZB4_SIUA, PSB1_SIUMCKA, PTK0), PINMUX_DATA(SIUFCKA_MARK, PSE11_SIUFCKA, HIZB4_SIUA, PTK0), /* SIU - Port B */ PINMUX_DATA(SIUBOLR_MARK, PSB11_SIUBOLR, SIOSTRB1_SIUBOLR), PINMUX_DATA(SIUBOBT_MARK, PSB10_SIUBOBT, SIOSCK_SIUBOBT), PINMUX_DATA(SIUBISLD_MARK, PSB14_SIUBISLD, SIORXD_SIUBISLD), PINMUX_DATA(SIUBILR_MARK, PSB13_SIUBILR, SIOD_SIUBILR), PINMUX_DATA(SIUBIBT_MARK, PSB12_SIUBIBT, SIOSTRB0_SIUBIBT), PINMUX_DATA(SIUBOSLD_MARK, PSB15_SIUBOSLD, SIOTXD_SIUBOSLD), PINMUX_DATA(SIUMCKB_MARK, PSD9_SIOMCK_SIUMCKB, PSB9_SIUMCKB, PTF6), PINMUX_DATA(SIUFCKB_MARK, PSD9_SIUFCKB, PTF6), /* AUD */ PINMUX_DATA(AUDSYNC_MARK, AUDSYNC), PINMUX_DATA(AUDATA3_MARK, AUDATA3), PINMUX_DATA(AUDATA2_MARK, AUDATA2), PINMUX_DATA(AUDATA1_MARK, AUDATA1), PINMUX_DATA(AUDATA0_MARK, AUDATA0), /* DMAC */ PINMUX_DATA(DACK_MARK, PSE12_DACK, LCDVSYN2_DACK), PINMUX_DATA(DREQ0_MARK, DREQ0), /* VOU */ PINMUX_DATA(DV_CLKI_MARK, PSD0_DV, LCDD19_DV_CLKI), PINMUX_DATA(DV_CLK_MARK, PSD0_DV, LCDD18_DV_CLK), PINMUX_DATA(DV_HSYNC_MARK, PSD0_DV, LCDD17_DV_HSYNC), PINMUX_DATA(DV_VSYNC_MARK, PSD0_DV, LCDD16_DV_VSYNC), PINMUX_DATA(DV_D15_MARK, PSD0_DV, LCDD15_DV_D15), PINMUX_DATA(DV_D14_MARK, PSD0_DV, LCDD14_DV_D14), PINMUX_DATA(DV_D13_MARK, PSD0_DV, LCDD13_DV_D13), PINMUX_DATA(DV_D12_MARK, PSD0_DV, LCDD12_DV_D12), PINMUX_DATA(DV_D11_MARK, PSD0_DV, LCDD11_DV_D11), PINMUX_DATA(DV_D10_MARK, PSD0_DV, LCDD10_DV_D10), PINMUX_DATA(DV_D9_MARK, PSD0_DV, LCDD9_DV_D9), PINMUX_DATA(DV_D8_MARK, PSD0_DV, LCDD8_DV_D8), PINMUX_DATA(DV_D7_MARK, PSD0_DV, LCDD7_DV_D7), PINMUX_DATA(DV_D6_MARK, PSD0_DV, LCDD6_DV_D6), PINMUX_DATA(DV_D5_MARK, PSD0_DV, LCDD5_DV_D5), PINMUX_DATA(DV_D4_MARK, PSD0_DV, LCDD4_DV_D4), PINMUX_DATA(DV_D3_MARK, PSD0_DV, LCDD3_DV_D3), PINMUX_DATA(DV_D2_MARK, PSD0_DV, LCDD2_DV_D2), PINMUX_DATA(DV_D1_MARK, PSD0_DV, LCDD1_DV_D1), PINMUX_DATA(DV_D0_MARK, PSD0_DV, LCDD0_DV_D0), /* CPG */ PINMUX_DATA(STATUS0_MARK, STATUS0), PINMUX_DATA(PDSTATUS_MARK, PDSTATUS), /* SIOF0 */ PINMUX_DATA(SIOF0_MCK_MARK, PSE15_SIOF0_MCK_IRQ3, PSB8_SIOF0_MCK, PTQ0), PINMUX_DATA(SIOF0_SCK_MARK, PSB5_SIOF0_SCK, SIOF0_SCK_TS_SCK), PINMUX_DATA(SIOF0_SYNC_MARK, PSB4_SIOF0_SYNC, SIOF0_SYNC_TS_SDEN), PINMUX_DATA(SIOF0_SS1_MARK, PSB3_SIOF0_SS1, SIOF0_SS1_TS_SPSYNC), PINMUX_DATA(SIOF0_SS2_MARK, PSB2_SIOF0_SS2, SIOF0_SS2_SIM_RST), PINMUX_DATA(SIOF0_TXD_MARK, PSE14_SIOF0_TXD_IRDA_OUT, PSB7_SIOF0_TXD, PTQ1), PINMUX_DATA(SIOF0_RXD_MARK, PSE13_SIOF0_RXD_IRDA_IN, PSB6_SIOF0_RXD, PTQ2), /* SIOF1 */ PINMUX_DATA(SIOF1_MCK_MARK, PSE11_SIUMCKA_SIOF1_MCK, PSB1_SIOF1_MCK, PTK0), PINMUX_DATA(SIOF1_SCK_MARK, PSC14_SIOF1_SCK, SIUAOBT_SIOF1_SCK), PINMUX_DATA(SIOF1_SYNC_MARK, PSC13_SIOF1_SYNC, SIUAOLR_SIOF1_SYNC), PINMUX_DATA(SIOF1_SS1_MARK, PSC12_SIOF1_SS1, SIUAIBT_SIOF1_SS1), PINMUX_DATA(SIOF1_SS2_MARK, PSC11_SIOF1_SS2, SIUAILR_SIOF1_SS2), PINMUX_DATA(SIOF1_TXD_MARK, PSB0_SIOF1_TXD, SIUAOSLD_SIOF1_TXD), PINMUX_DATA(SIOF1_RXD_MARK, PSC15_SIOF1_RXD, SIUAISLD_SIOF1_RXD), /* SIM */ PINMUX_DATA(SIM_D_MARK, PSE15_SIM_D, PTQ0), PINMUX_DATA(SIM_CLK_MARK, PSE14_SIM_CLK, PTQ1), PINMUX_DATA(SIM_RST_MARK, PSB2_SIM_RST, SIOF0_SS2_SIM_RST), /* TSIF */ PINMUX_DATA(TS_SDAT_MARK, PSE13_TS_SDAT, PTQ2), PINMUX_DATA(TS_SCK_MARK, PSB5_TS_SCK, SIOF0_SCK_TS_SCK), PINMUX_DATA(TS_SDEN_MARK, PSB4_TS_SDEN, SIOF0_SYNC_TS_SDEN), PINMUX_DATA(TS_SPSYNC_MARK, PSB3_TS_SPSYNC, SIOF0_SS1_TS_SPSYNC), /* IRDA */ PINMUX_DATA(IRDA_IN_MARK, PSE13_SIOF0_RXD_IRDA_IN, PSB6_IRDA_IN, PTQ2), PINMUX_DATA(IRDA_OUT_MARK, PSE14_SIOF0_TXD_IRDA_OUT, PSB7_IRDA_OUT, PTQ1), /* TPU */ PINMUX_DATA(TPUTO_MARK, PSD8_TPUTO, SCIF0_SCK_TPUTO), /* FLCTL */ PINMUX_DATA(FCE_MARK, PSE3_FLCTL, FCE_VIO_HD2), PINMUX_DATA(NAF7_MARK, PSC0_NAF, HIZA10_NAF, NAF7_VIO_D15), PINMUX_DATA(NAF6_MARK, PSC0_NAF, HIZA10_NAF, NAF6_VIO_D14), PINMUX_DATA(NAF5_MARK, PSC0_NAF, HIZA10_NAF, NAF5_VIO_D13), PINMUX_DATA(NAF4_MARK, PSC0_NAF, HIZA10_NAF, NAF4_VIO_D12), PINMUX_DATA(NAF3_MARK, PSC0_NAF, HIZA10_NAF, NAF3_VIO_D11), PINMUX_DATA(NAF2_MARK, PSE2_NAF2, HIZB0_VIO, NAF2_VIO_D10), PINMUX_DATA(NAF1_MARK, PSE1_NAF1, HIZB0_VIO, NAF1_VIO_D9), PINMUX_DATA(NAF0_MARK, PSE0_NAF0, HIZB0_VIO, NAF0_VIO_D8), PINMUX_DATA(FCDE_MARK, FCDE), PINMUX_DATA(FOE_MARK, PSE3_FLCTL, HIZB0_VIO, FOE_VIO_VD2), PINMUX_DATA(FSC_MARK, FSC), PINMUX_DATA(FWE_MARK, FWE), PINMUX_DATA(FRB_MARK, PSE3_FLCTL, FRB_VIO_CLK2), /* KEYSC */ PINMUX_DATA(KEYIN0_MARK, PSA15_KEYIN0, HIZC14_IRQ6, KEYIN0_IRQ6), PINMUX_DATA(KEYIN1_MARK, HIZA14_KEYSC, KEYIN1), PINMUX_DATA(KEYIN2_MARK, HIZA14_KEYSC, KEYIN2), PINMUX_DATA(KEYIN3_MARK, HIZA14_KEYSC, KEYIN3), PINMUX_DATA(KEYIN4_MARK, PSA14_KEYIN4, HIZC15_IRQ7, KEYIN4_IRQ7), PINMUX_DATA(KEYOUT0_MARK, HIZA14_KEYSC, KEYOUT0), PINMUX_DATA(KEYOUT1_MARK, HIZA14_KEYSC, KEYOUT1), PINMUX_DATA(KEYOUT2_MARK, HIZA14_KEYSC, KEYOUT2), PINMUX_DATA(KEYOUT3_MARK, HIZA14_KEYSC, KEYOUT3), PINMUX_DATA(KEYOUT4_IN6_MARK, HIZA14_KEYSC, KEYOUT4_IN6), PINMUX_DATA(KEYOUT5_IN5_MARK, HIZA14_KEYSC, KEYOUT5_IN5), }; static struct pinmux_gpio pinmux_gpios[] = { /* PTA */ PINMUX_GPIO(GPIO_PTA7, PTA7_DATA), PINMUX_GPIO(GPIO_PTA6, PTA6_DATA), PINMUX_GPIO(GPIO_PTA5, PTA5_DATA), PINMUX_GPIO(GPIO_PTA4, PTA4_DATA), PINMUX_GPIO(GPIO_PTA3, PTA3_DATA), PINMUX_GPIO(GPIO_PTA2, PTA2_DATA), PINMUX_GPIO(GPIO_PTA1, PTA1_DATA), PINMUX_GPIO(GPIO_PTA0, PTA0_DATA), /* PTB */ PINMUX_GPIO(GPIO_PTB7, PTB7_DATA), PINMUX_GPIO(GPIO_PTB6, PTB6_DATA), PINMUX_GPIO(GPIO_PTB5, PTB5_DATA), PINMUX_GPIO(GPIO_PTB4, PTB4_DATA), PINMUX_GPIO(GPIO_PTB3, PTB3_DATA), PINMUX_GPIO(GPIO_PTB2, PTB2_DATA), PINMUX_GPIO(GPIO_PTB1, PTB1_DATA), PINMUX_GPIO(GPIO_PTB0, PTB0_DATA), /* PTC */ PINMUX_GPIO(GPIO_PTC7, PTC7_DATA), PINMUX_GPIO(GPIO_PTC5, PTC5_DATA), PINMUX_GPIO(GPIO_PTC4, PTC4_DATA), PINMUX_GPIO(GPIO_PTC3, PTC3_DATA), PINMUX_GPIO(GPIO_PTC2, PTC2_DATA), PINMUX_GPIO(GPIO_PTC0, PTC0_DATA), /* PTD */ PINMUX_GPIO(GPIO_PTD7, PTD7_DATA), PINMUX_GPIO(GPIO_PTD6, PTD6_DATA), PINMUX_GPIO(GPIO_PTD5, PTD5_DATA), PINMUX_GPIO(GPIO_PTD4, PTD4_DATA), PINMUX_GPIO(GPIO_PTD3, PTD3_DATA), PINMUX_GPIO(GPIO_PTD2, PTD2_DATA), PINMUX_GPIO(GPIO_PTD1, PTD1_DATA), PINMUX_GPIO(GPIO_PTD0, PTD0_DATA), /* PTE */ PINMUX_GPIO(GPIO_PTE7, PTE7_DATA), PINMUX_GPIO(GPIO_PTE6, PTE6_DATA), PINMUX_GPIO(GPIO_PTE5, PTE5_DATA), PINMUX_GPIO(GPIO_PTE4, PTE4_DATA), PINMUX_GPIO(GPIO_PTE1, PTE1_DATA), PINMUX_GPIO(GPIO_PTE0, PTE0_DATA), /* PTF */ PINMUX_GPIO(GPIO_PTF6, PTF6_DATA), PINMUX_GPIO(GPIO_PTF5, PTF5_DATA), PINMUX_GPIO(GPIO_PTF4, PTF4_DATA), PINMUX_GPIO(GPIO_PTF3, PTF3_DATA), PINMUX_GPIO(GPIO_PTF2, PTF2_DATA), PINMUX_GPIO(GPIO_PTF1, PTF1_DATA), PINMUX_GPIO(GPIO_PTF0, PTF0_DATA), /* PTG */ PINMUX_GPIO(GPIO_PTG4, PTG4_DATA), PINMUX_GPIO(GPIO_PTG3, PTG3_DATA), PINMUX_GPIO(GPIO_PTG2, PTG2_DATA), PINMUX_GPIO(GPIO_PTG1, PTG1_DATA), PINMUX_GPIO(GPIO_PTG0, PTG0_DATA), /* PTH */ PINMUX_GPIO(GPIO_PTH7, PTH7_DATA), PINMUX_GPIO(GPIO_PTH6, PTH6_DATA), PINMUX_GPIO(GPIO_PTH5, PTH5_DATA), PINMUX_GPIO(GPIO_PTH4, PTH4_DATA), PINMUX_GPIO(GPIO_PTH3, PTH3_DATA), PINMUX_GPIO(GPIO_PTH2, PTH2_DATA), PINMUX_GPIO(GPIO_PTH1, PTH1_DATA), PINMUX_GPIO(GPIO_PTH0, PTH0_DATA), /* PTJ */ PINMUX_GPIO(GPIO_PTJ7, PTJ7_DATA), PINMUX_GPIO(GPIO_PTJ6, PTJ6_DATA), PINMUX_GPIO(GPIO_PTJ5, PTJ5_DATA), PINMUX_GPIO(GPIO_PTJ1, PTJ1_DATA), PINMUX_GPIO(GPIO_PTJ0, PTJ0_DATA), /* PTK */ PINMUX_GPIO(GPIO_PTK6, PTK6_DATA), PINMUX_GPIO(GPIO_PTK5, PTK5_DATA), PINMUX_GPIO(GPIO_PTK4, PTK4_DATA), PINMUX_GPIO(GPIO_PTK3, PTK3_DATA), PINMUX_GPIO(GPIO_PTK2, PTK2_DATA), PINMUX_GPIO(GPIO_PTK1, PTK1_DATA), PINMUX_GPIO(GPIO_PTK0, PTK0_DATA), /* PTL */ PINMUX_GPIO(GPIO_PTL7, PTL7_DATA), PINMUX_GPIO(GPIO_PTL6, PTL6_DATA), PINMUX_GPIO(GPIO_PTL5, PTL5_DATA), PINMUX_GPIO(GPIO_PTL4, PTL4_DATA), PINMUX_GPIO(GPIO_PTL3, PTL3_DATA), PINMUX_GPIO(GPIO_PTL2, PTL2_DATA), PINMUX_GPIO(GPIO_PTL1, PTL1_DATA), PINMUX_GPIO(GPIO_PTL0, PTL0_DATA), /* PTM */ PINMUX_GPIO(GPIO_PTM7, PTM7_DATA), PINMUX_GPIO(GPIO_PTM6, PTM6_DATA), PINMUX_GPIO(GPIO_PTM5, PTM5_DATA), PINMUX_GPIO(GPIO_PTM4, PTM4_DATA), PINMUX_GPIO(GPIO_PTM3, PTM3_DATA), PINMUX_GPIO(GPIO_PTM2, PTM2_DATA), PINMUX_GPIO(GPIO_PTM1, PTM1_DATA), PINMUX_GPIO(GPIO_PTM0, PTM0_DATA), /* PTN */ PINMUX_GPIO(GPIO_PTN7, PTN7_DATA), PINMUX_GPIO(GPIO_PTN6, PTN6_DATA), PINMUX_GPIO(GPIO_PTN5, PTN5_DATA), PINMUX_GPIO(GPIO_PTN4, PTN4_DATA), PINMUX_GPIO(GPIO_PTN3, PTN3_DATA), PINMUX_GPIO(GPIO_PTN2, PTN2_DATA), PINMUX_GPIO(GPIO_PTN1, PTN1_DATA), PINMUX_GPIO(GPIO_PTN0, PTN0_DATA), /* PTQ */ PINMUX_GPIO(GPIO_PTQ6, PTQ6_DATA), PINMUX_GPIO(GPIO_PTQ5, PTQ5_DATA), PINMUX_GPIO(GPIO_PTQ4, PTQ4_DATA), PINMUX_GPIO(GPIO_PTQ3, PTQ3_DATA), PINMUX_GPIO(GPIO_PTQ2, PTQ2_DATA), PINMUX_GPIO(GPIO_PTQ1, PTQ1_DATA), PINMUX_GPIO(GPIO_PTQ0, PTQ0_DATA), /* PTR */ PINMUX_GPIO(GPIO_PTR4, PTR4_DATA), PINMUX_GPIO(GPIO_PTR3, PTR3_DATA), PINMUX_GPIO(GPIO_PTR2, PTR2_DATA), PINMUX_GPIO(GPIO_PTR1, PTR1_DATA), PINMUX_GPIO(GPIO_PTR0, PTR0_DATA), /* PTS */ PINMUX_GPIO(GPIO_PTS4, PTS4_DATA), PINMUX_GPIO(GPIO_PTS3, PTS3_DATA), PINMUX_GPIO(GPIO_PTS2, PTS2_DATA), PINMUX_GPIO(GPIO_PTS1, PTS1_DATA), PINMUX_GPIO(GPIO_PTS0, PTS0_DATA), /* PTT */ PINMUX_GPIO(GPIO_PTT4, PTT4_DATA), PINMUX_GPIO(GPIO_PTT3, PTT3_DATA), PINMUX_GPIO(GPIO_PTT2, PTT2_DATA), PINMUX_GPIO(GPIO_PTT1, PTT1_DATA), PINMUX_GPIO(GPIO_PTT0, PTT0_DATA), /* PTU */ PINMUX_GPIO(GPIO_PTU4, PTU4_DATA), PINMUX_GPIO(GPIO_PTU3, PTU3_DATA), PINMUX_GPIO(GPIO_PTU2, PTU2_DATA), PINMUX_GPIO(GPIO_PTU1, PTU1_DATA), PINMUX_GPIO(GPIO_PTU0, PTU0_DATA), /* PTV */ PINMUX_GPIO(GPIO_PTV4, PTV4_DATA), PINMUX_GPIO(GPIO_PTV3, PTV3_DATA), PINMUX_GPIO(GPIO_PTV2, PTV2_DATA), PINMUX_GPIO(GPIO_PTV1, PTV1_DATA), PINMUX_GPIO(GPIO_PTV0, PTV0_DATA), /* PTW */ PINMUX_GPIO(GPIO_PTW6, PTW6_DATA), PINMUX_GPIO(GPIO_PTW5, PTW5_DATA), PINMUX_GPIO(GPIO_PTW4, PTW4_DATA), PINMUX_GPIO(GPIO_PTW3, PTW3_DATA), PINMUX_GPIO(GPIO_PTW2, PTW2_DATA), PINMUX_GPIO(GPIO_PTW1, PTW1_DATA), PINMUX_GPIO(GPIO_PTW0, PTW0_DATA), /* PTX */ PINMUX_GPIO(GPIO_PTX6, PTX6_DATA), PINMUX_GPIO(GPIO_PTX5, PTX5_DATA), PINMUX_GPIO(GPIO_PTX4, PTX4_DATA), PINMUX_GPIO(GPIO_PTX3, PTX3_DATA), PINMUX_GPIO(GPIO_PTX2, PTX2_DATA), PINMUX_GPIO(GPIO_PTX1, PTX1_DATA), PINMUX_GPIO(GPIO_PTX0, PTX0_DATA), /* PTY */ PINMUX_GPIO(GPIO_PTY5, PTY5_DATA), PINMUX_GPIO(GPIO_PTY4, PTY4_DATA), PINMUX_GPIO(GPIO_PTY3, PTY3_DATA), PINMUX_GPIO(GPIO_PTY2, PTY2_DATA), PINMUX_GPIO(GPIO_PTY1, PTY1_DATA), PINMUX_GPIO(GPIO_PTY0, PTY0_DATA), /* PTZ */ PINMUX_GPIO(GPIO_PTZ5, PTZ5_DATA), PINMUX_GPIO(GPIO_PTZ4, PTZ4_DATA), PINMUX_GPIO(GPIO_PTZ3, PTZ3_DATA), PINMUX_GPIO(GPIO_PTZ2, PTZ2_DATA), PINMUX_GPIO(GPIO_PTZ1, PTZ1_DATA), /* SCIF0 */ PINMUX_GPIO(GPIO_FN_SCIF0_TXD, SCIF0_TXD_MARK), PINMUX_GPIO(GPIO_FN_SCIF0_RXD, SCIF0_RXD_MARK), PINMUX_GPIO(GPIO_FN_SCIF0_RTS, SCIF0_RTS_MARK), PINMUX_GPIO(GPIO_FN_SCIF0_CTS, SCIF0_CTS_MARK), PINMUX_GPIO(GPIO_FN_SCIF0_SCK, SCIF0_SCK_MARK), /* SCIF1 */ PINMUX_GPIO(GPIO_FN_SCIF1_TXD, SCIF1_TXD_MARK), PINMUX_GPIO(GPIO_FN_SCIF1_RXD, SCIF1_RXD_MARK), PINMUX_GPIO(GPIO_FN_SCIF1_RTS, SCIF1_RTS_MARK), PINMUX_GPIO(GPIO_FN_SCIF1_CTS, SCIF1_CTS_MARK), PINMUX_GPIO(GPIO_FN_SCIF1_SCK, SCIF1_SCK_MARK), /* SCIF2 */ PINMUX_GPIO(GPIO_FN_SCIF2_TXD, SCIF2_TXD_MARK), PINMUX_GPIO(GPIO_FN_SCIF2_RXD, SCIF2_RXD_MARK), PINMUX_GPIO(GPIO_FN_SCIF2_RTS, SCIF2_RTS_MARK), PINMUX_GPIO(GPIO_FN_SCIF2_CTS, SCIF2_CTS_MARK), PINMUX_GPIO(GPIO_FN_SCIF2_SCK, SCIF2_SCK_MARK), /* SIO */ PINMUX_GPIO(GPIO_FN_SIOTXD, SIOTXD_MARK), PINMUX_GPIO(GPIO_FN_SIORXD, SIORXD_MARK), PINMUX_GPIO(GPIO_FN_SIOD, SIOD_MARK), PINMUX_GPIO(GPIO_FN_SIOSTRB0, SIOSTRB0_MARK), PINMUX_GPIO(GPIO_FN_SIOSTRB1, SIOSTRB1_MARK), PINMUX_GPIO(GPIO_FN_SIOSCK, SIOSCK_MARK), PINMUX_GPIO(GPIO_FN_SIOMCK, SIOMCK_MARK), /* CEU */ PINMUX_GPIO(GPIO_FN_VIO_D15, VIO_D15_MARK), PINMUX_GPIO(GPIO_FN_VIO_D14, VIO_D14_MARK), PINMUX_GPIO(GPIO_FN_VIO_D13, VIO_D13_MARK), PINMUX_GPIO(GPIO_FN_VIO_D12, VIO_D12_MARK), PINMUX_GPIO(GPIO_FN_VIO_D11, VIO_D11_MARK), PINMUX_GPIO(GPIO_FN_VIO_D10, VIO_D10_MARK), PINMUX_GPIO(GPIO_FN_VIO_D9, VIO_D9_MARK), PINMUX_GPIO(GPIO_FN_VIO_D8, VIO_D8_MARK), PINMUX_GPIO(GPIO_FN_VIO_D7, VIO_D7_MARK), PINMUX_GPIO(GPIO_FN_VIO_D6, VIO_D6_MARK), PINMUX_GPIO(GPIO_FN_VIO_D5, VIO_D5_MARK), PINMUX_GPIO(GPIO_FN_VIO_D4, VIO_D4_MARK), PINMUX_GPIO(GPIO_FN_VIO_D3, VIO_D3_MARK), PINMUX_GPIO(GPIO_FN_VIO_D2, VIO_D2_MARK), PINMUX_GPIO(GPIO_FN_VIO_D1, VIO_D1_MARK), PINMUX_GPIO(GPIO_FN_VIO_D0, VIO_D0_MARK), PINMUX_GPIO(GPIO_FN_VIO_CLK, VIO_CLK_MARK), PINMUX_GPIO(GPIO_FN_VIO_VD, VIO_VD_MARK), PINMUX_GPIO(GPIO_FN_VIO_HD, VIO_HD_MARK), PINMUX_GPIO(GPIO_FN_VIO_FLD, VIO_FLD_MARK), PINMUX_GPIO(GPIO_FN_VIO_CKO, VIO_CKO_MARK), PINMUX_GPIO(GPIO_FN_VIO_STEX, VIO_STEX_MARK), PINMUX_GPIO(GPIO_FN_VIO_STEM, VIO_STEM_MARK), PINMUX_GPIO(GPIO_FN_VIO_VD2, VIO_VD2_MARK), PINMUX_GPIO(GPIO_FN_VIO_HD2, VIO_HD2_MARK), PINMUX_GPIO(GPIO_FN_VIO_CLK2, VIO_CLK2_MARK), /* LCDC */ PINMUX_GPIO(GPIO_FN_LCDD23, LCDD23_MARK), PINMUX_GPIO(GPIO_FN_LCDD22, LCDD22_MARK), PINMUX_GPIO(GPIO_FN_LCDD21, LCDD21_MARK), PINMUX_GPIO(GPIO_FN_LCDD20, LCDD20_MARK), PINMUX_GPIO(GPIO_FN_LCDD19, LCDD19_MARK), PINMUX_GPIO(GPIO_FN_LCDD18, LCDD18_MARK), PINMUX_GPIO(GPIO_FN_LCDD17, LCDD17_MARK), PINMUX_GPIO(GPIO_FN_LCDD16, LCDD16_MARK), PINMUX_GPIO(GPIO_FN_LCDD15, LCDD15_MARK), PINMUX_GPIO(GPIO_FN_LCDD14, LCDD14_MARK), PINMUX_GPIO(GPIO_FN_LCDD13, LCDD13_MARK), PINMUX_GPIO(GPIO_FN_LCDD12, LCDD12_MARK), PINMUX_GPIO(GPIO_FN_LCDD11, LCDD11_MARK), PINMUX_GPIO(GPIO_FN_LCDD10, LCDD10_MARK), PINMUX_GPIO(GPIO_FN_LCDD9, LCDD9_MARK), PINMUX_GPIO(GPIO_FN_LCDD8, LCDD8_MARK), PINMUX_GPIO(GPIO_FN_LCDD7, LCDD7_MARK), PINMUX_GPIO(GPIO_FN_LCDD6, LCDD6_MARK), PINMUX_GPIO(GPIO_FN_LCDD5, LCDD5_MARK), PINMUX_GPIO(GPIO_FN_LCDD4, LCDD4_MARK), PINMUX_GPIO(GPIO_FN_LCDD3, LCDD3_MARK), PINMUX_GPIO(GPIO_FN_LCDD2, LCDD2_MARK), PINMUX_GPIO(GPIO_FN_LCDD1, LCDD1_MARK), PINMUX_GPIO(GPIO_FN_LCDD0, LCDD0_MARK), PINMUX_GPIO(GPIO_FN_LCDLCLK, LCDLCLK_MARK), /* Main LCD */ PINMUX_GPIO(GPIO_FN_LCDDON, LCDDON_MARK), PINMUX_GPIO(GPIO_FN_LCDVCPWC, LCDVCPWC_MARK), PINMUX_GPIO(GPIO_FN_LCDVEPWC, LCDVEPWC_MARK), PINMUX_GPIO(GPIO_FN_LCDVSYN, LCDVSYN_MARK), /* Main LCD - RGB Mode */ PINMUX_GPIO(GPIO_FN_LCDDCK, LCDDCK_MARK), PINMUX_GPIO(GPIO_FN_LCDHSYN, LCDHSYN_MARK), PINMUX_GPIO(GPIO_FN_LCDDISP, LCDDISP_MARK), /* Main LCD - SYS Mode */ PINMUX_GPIO(GPIO_FN_LCDRS, LCDRS_MARK), PINMUX_GPIO(GPIO_FN_LCDCS, LCDCS_MARK), PINMUX_GPIO(GPIO_FN_LCDWR, LCDWR_MARK), PINMUX_GPIO(GPIO_FN_LCDRD, LCDRD_MARK), /* Sub LCD - SYS Mode */ PINMUX_GPIO(GPIO_FN_LCDDON2, LCDDON2_MARK), PINMUX_GPIO(GPIO_FN_LCDVCPWC2, LCDVCPWC2_MARK), PINMUX_GPIO(GPIO_FN_LCDVEPWC2, LCDVEPWC2_MARK), PINMUX_GPIO(GPIO_FN_LCDVSYN2, LCDVSYN2_MARK), PINMUX_GPIO(GPIO_FN_LCDCS2, LCDCS2_MARK), /* BSC */ PINMUX_GPIO(GPIO_FN_IOIS16, IOIS16_MARK), PINMUX_GPIO(GPIO_FN_A25, A25_MARK), PINMUX_GPIO(GPIO_FN_A24, A24_MARK), PINMUX_GPIO(GPIO_FN_A23, A23_MARK), PINMUX_GPIO(GPIO_FN_A22, A22_MARK), PINMUX_GPIO(GPIO_FN_BS, BS_MARK), PINMUX_GPIO(GPIO_FN_CS6B_CE1B, CS6B_CE1B_MARK), PINMUX_GPIO(GPIO_FN_WAIT, WAIT_MARK), PINMUX_GPIO(GPIO_FN_CS6A_CE2B, CS6A_CE2B_MARK), /* SBSC */ PINMUX_GPIO(GPIO_FN_HPD63, HPD63_MARK), PINMUX_GPIO(GPIO_FN_HPD62, HPD62_MARK), PINMUX_GPIO(GPIO_FN_HPD61, HPD61_MARK), PINMUX_GPIO(GPIO_FN_HPD60, HPD60_MARK), PINMUX_GPIO(GPIO_FN_HPD59, HPD59_MARK), PINMUX_GPIO(GPIO_FN_HPD58, HPD58_MARK), PINMUX_GPIO(GPIO_FN_HPD57, HPD57_MARK), PINMUX_GPIO(GPIO_FN_HPD56, HPD56_MARK), PINMUX_GPIO(GPIO_FN_HPD55, HPD55_MARK), PINMUX_GPIO(GPIO_FN_HPD54, HPD54_MARK), PINMUX_GPIO(GPIO_FN_HPD53, HPD53_MARK), PINMUX_GPIO(GPIO_FN_HPD52, HPD52_MARK), PINMUX_GPIO(GPIO_FN_HPD51, HPD51_MARK), PINMUX_GPIO(GPIO_FN_HPD50, HPD50_MARK), PINMUX_GPIO(GPIO_FN_HPD49, HPD49_MARK), PINMUX_GPIO(GPIO_FN_HPD48, HPD48_MARK), PINMUX_GPIO(GPIO_FN_HPDQM7, HPDQM7_MARK), PINMUX_GPIO(GPIO_FN_HPDQM6, HPDQM6_MARK), PINMUX_GPIO(GPIO_FN_HPDQM5, HPDQM5_MARK), PINMUX_GPIO(GPIO_FN_HPDQM4, HPDQM4_MARK), /* IRQ */ PINMUX_GPIO(GPIO_FN_IRQ0, IRQ0_MARK), PINMUX_GPIO(GPIO_FN_IRQ1, IRQ1_MARK), PINMUX_GPIO(GPIO_FN_IRQ2, IRQ2_MARK), PINMUX_GPIO(GPIO_FN_IRQ3, IRQ3_MARK), PINMUX_GPIO(GPIO_FN_IRQ4, IRQ4_MARK), PINMUX_GPIO(GPIO_FN_IRQ5, IRQ5_MARK), PINMUX_GPIO(GPIO_FN_IRQ6, IRQ6_MARK), PINMUX_GPIO(GPIO_FN_IRQ7, IRQ7_MARK), /* SDHI */ PINMUX_GPIO(GPIO_FN_SDHICD, SDHICD_MARK), PINMUX_GPIO(GPIO_FN_SDHIWP, SDHIWP_MARK), PINMUX_GPIO(GPIO_FN_SDHID3, SDHID3_MARK), PINMUX_GPIO(GPIO_FN_SDHID2, SDHID2_MARK), PINMUX_GPIO(GPIO_FN_SDHID1, SDHID1_MARK), PINMUX_GPIO(GPIO_FN_SDHID0, SDHID0_MARK), PINMUX_GPIO(GPIO_FN_SDHICMD, SDHICMD_MARK), PINMUX_GPIO(GPIO_FN_SDHICLK, SDHICLK_MARK), /* SIU - Port A */ PINMUX_GPIO(GPIO_FN_SIUAOLR, SIUAOLR_MARK), PINMUX_GPIO(GPIO_FN_SIUAOBT, SIUAOBT_MARK), PINMUX_GPIO(GPIO_FN_SIUAISLD, SIUAISLD_MARK), PINMUX_GPIO(GPIO_FN_SIUAILR, SIUAILR_MARK), PINMUX_GPIO(GPIO_FN_SIUAIBT, SIUAIBT_MARK), PINMUX_GPIO(GPIO_FN_SIUAOSLD, SIUAOSLD_MARK), PINMUX_GPIO(GPIO_FN_SIUMCKA, SIUMCKA_MARK), PINMUX_GPIO(GPIO_FN_SIUFCKA, SIUFCKA_MARK), /* SIU - Port B */ PINMUX_GPIO(GPIO_FN_SIUBOLR, SIUBOLR_MARK), PINMUX_GPIO(GPIO_FN_SIUBOBT, SIUBOBT_MARK), PINMUX_GPIO(GPIO_FN_SIUBISLD, SIUBISLD_MARK), PINMUX_GPIO(GPIO_FN_SIUBILR, SIUBILR_MARK), PINMUX_GPIO(GPIO_FN_SIUBIBT, SIUBIBT_MARK), PINMUX_GPIO(GPIO_FN_SIUBOSLD, SIUBOSLD_MARK), PINMUX_GPIO(GPIO_FN_SIUMCKB, SIUMCKB_MARK), PINMUX_GPIO(GPIO_FN_SIUFCKB, SIUFCKB_MARK), /* AUD */ PINMUX_GPIO(GPIO_FN_AUDSYNC, AUDSYNC_MARK), PINMUX_GPIO(GPIO_FN_AUDATA3, AUDATA3_MARK), PINMUX_GPIO(GPIO_FN_AUDATA2, AUDATA2_MARK), PINMUX_GPIO(GPIO_FN_AUDATA1, AUDATA1_MARK), PINMUX_GPIO(GPIO_FN_AUDATA0, AUDATA0_MARK), /* DMAC */ PINMUX_GPIO(GPIO_FN_DACK, DACK_MARK), PINMUX_GPIO(GPIO_FN_DREQ0, DREQ0_MARK), /* VOU */ PINMUX_GPIO(GPIO_FN_DV_CLKI, DV_CLKI_MARK), PINMUX_GPIO(GPIO_FN_DV_CLK, DV_CLK_MARK), PINMUX_GPIO(GPIO_FN_DV_HSYNC, DV_HSYNC_MARK), PINMUX_GPIO(GPIO_FN_DV_VSYNC, DV_VSYNC_MARK), PINMUX_GPIO(GPIO_FN_DV_D15, DV_D15_MARK), PINMUX_GPIO(GPIO_FN_DV_D14, DV_D14_MARK), PINMUX_GPIO(GPIO_FN_DV_D13, DV_D13_MARK), PINMUX_GPIO(GPIO_FN_DV_D12, DV_D12_MARK), PINMUX_GPIO(GPIO_FN_DV_D11, DV_D11_MARK), PINMUX_GPIO(GPIO_FN_DV_D10, DV_D10_MARK), PINMUX_GPIO(GPIO_FN_DV_D9, DV_D9_MARK), PINMUX_GPIO(GPIO_FN_DV_D8, DV_D8_MARK), PINMUX_GPIO(GPIO_FN_DV_D7, DV_D7_MARK), PINMUX_GPIO(GPIO_FN_DV_D6, DV_D6_MARK), PINMUX_GPIO(GPIO_FN_DV_D5, DV_D5_MARK), PINMUX_GPIO(GPIO_FN_DV_D4, DV_D4_MARK), PINMUX_GPIO(GPIO_FN_DV_D3, DV_D3_MARK), PINMUX_GPIO(GPIO_FN_DV_D2, DV_D2_MARK), PINMUX_GPIO(GPIO_FN_DV_D1, DV_D1_MARK), PINMUX_GPIO(GPIO_FN_DV_D0, DV_D0_MARK), /* CPG */ PINMUX_GPIO(GPIO_FN_STATUS0, STATUS0_MARK), PINMUX_GPIO(GPIO_FN_PDSTATUS, PDSTATUS_MARK), /* SIOF0 */ PINMUX_GPIO(GPIO_FN_SIOF0_MCK, SIOF0_MCK_MARK), PINMUX_GPIO(GPIO_FN_SIOF0_SCK, SIOF0_SCK_MARK), PINMUX_GPIO(GPIO_FN_SIOF0_SYNC, SIOF0_SYNC_MARK), PINMUX_GPIO(GPIO_FN_SIOF0_SS1, SIOF0_SS1_MARK), PINMUX_GPIO(GPIO_FN_SIOF0_SS2, SIOF0_SS2_MARK), PINMUX_GPIO(GPIO_FN_SIOF0_TXD, SIOF0_TXD_MARK), PINMUX_GPIO(GPIO_FN_SIOF0_RXD, SIOF0_RXD_MARK), /* SIOF1 */ PINMUX_GPIO(GPIO_FN_SIOF1_MCK, SIOF1_MCK_MARK), PINMUX_GPIO(GPIO_FN_SIOF1_SCK, SIOF1_SCK_MARK), PINMUX_GPIO(GPIO_FN_SIOF1_SYNC, SIOF1_SYNC_MARK), PINMUX_GPIO(GPIO_FN_SIOF1_SS1, SIOF1_SS1_MARK), PINMUX_GPIO(GPIO_FN_SIOF1_SS2, SIOF1_SS2_MARK), PINMUX_GPIO(GPIO_FN_SIOF1_TXD, SIOF1_TXD_MARK), PINMUX_GPIO(GPIO_FN_SIOF1_RXD, SIOF1_RXD_MARK), /* SIM */ PINMUX_GPIO(GPIO_FN_SIM_D, SIM_D_MARK), PINMUX_GPIO(GPIO_FN_SIM_CLK, SIM_CLK_MARK), PINMUX_GPIO(GPIO_FN_SIM_RST, SIM_RST_MARK), /* TSIF */ PINMUX_GPIO(GPIO_FN_TS_SDAT, TS_SDAT_MARK), PINMUX_GPIO(GPIO_FN_TS_SCK, TS_SCK_MARK), PINMUX_GPIO(GPIO_FN_TS_SDEN, TS_SDEN_MARK), PINMUX_GPIO(GPIO_FN_TS_SPSYNC, TS_SPSYNC_MARK), /* IRDA */ PINMUX_GPIO(GPIO_FN_IRDA_IN, IRDA_IN_MARK), PINMUX_GPIO(GPIO_FN_IRDA_OUT, IRDA_OUT_MARK), /* TPU */ PINMUX_GPIO(GPIO_FN_TPUTO, TPUTO_MARK), /* FLCTL */ PINMUX_GPIO(GPIO_FN_FCE, FCE_MARK), PINMUX_GPIO(GPIO_FN_NAF7, NAF7_MARK), PINMUX_GPIO(GPIO_FN_NAF6, NAF6_MARK), PINMUX_GPIO(GPIO_FN_NAF5, NAF5_MARK), PINMUX_GPIO(GPIO_FN_NAF4, NAF4_MARK), PINMUX_GPIO(GPIO_FN_NAF3, NAF3_MARK), PINMUX_GPIO(GPIO_FN_NAF2, NAF2_MARK), PINMUX_GPIO(GPIO_FN_NAF1, NAF1_MARK), PINMUX_GPIO(GPIO_FN_NAF0, NAF0_MARK), PINMUX_GPIO(GPIO_FN_FCDE, FCDE_MARK), PINMUX_GPIO(GPIO_FN_FOE, FOE_MARK), PINMUX_GPIO(GPIO_FN_FSC, FSC_MARK), PINMUX_GPIO(GPIO_FN_FWE, FWE_MARK), PINMUX_GPIO(GPIO_FN_FRB, FRB_MARK), /* KEYSC */ PINMUX_GPIO(GPIO_FN_KEYIN0, KEYIN0_MARK), PINMUX_GPIO(GPIO_FN_KEYIN1, KEYIN1_MARK), PINMUX_GPIO(GPIO_FN_KEYIN2, KEYIN2_MARK), PINMUX_GPIO(GPIO_FN_KEYIN3, KEYIN3_MARK), PINMUX_GPIO(GPIO_FN_KEYIN4, KEYIN4_MARK), PINMUX_GPIO(GPIO_FN_KEYOUT0, KEYOUT0_MARK), PINMUX_GPIO(GPIO_FN_KEYOUT1, KEYOUT1_MARK), PINMUX_GPIO(GPIO_FN_KEYOUT2, KEYOUT2_MARK), PINMUX_GPIO(GPIO_FN_KEYOUT3, KEYOUT3_MARK), PINMUX_GPIO(GPIO_FN_KEYOUT4_IN6, KEYOUT4_IN6_MARK), PINMUX_GPIO(GPIO_FN_KEYOUT5_IN5, KEYOUT5_IN5_MARK), }; static struct pinmux_cfg_reg pinmux_config_regs[] = { { PINMUX_CFG_REG("PACR", 0xa4050100, 16, 2) { VIO_D7_SCIF1_SCK, PTA7_OUT, PTA7_IN_PD, PTA7_IN, VIO_D6_SCIF1_RXD, 0, PTA6_IN_PD, PTA6_IN, VIO_D5_SCIF1_TXD, PTA5_OUT, PTA5_IN_PD, PTA5_IN, VIO_D4, 0, PTA4_IN_PD, PTA4_IN, VIO_D3, 0, PTA3_IN_PD, PTA3_IN, VIO_D2, 0, PTA2_IN_PD, PTA2_IN, VIO_D1, 0, PTA1_IN_PD, PTA1_IN, VIO_D0_LCDLCLK, 0, PTA0_IN_PD, PTA0_IN } }, { PINMUX_CFG_REG("PBCR", 0xa4050102, 16, 2) { HPD55, PTB7_OUT, 0, PTB7_IN, HPD54, PTB6_OUT, 0, PTB6_IN, HPD53, PTB5_OUT, 0, PTB5_IN, HPD52, PTB4_OUT, 0, PTB4_IN, HPD51, PTB3_OUT, 0, PTB3_IN, HPD50, PTB2_OUT, 0, PTB2_IN, HPD49, PTB1_OUT, 0, PTB1_IN, HPD48, PTB0_OUT, 0, PTB0_IN } }, { PINMUX_CFG_REG("PCCR", 0xa4050104, 16, 2) { 0, 0, PTC7_IN_PU, PTC7_IN, 0, 0, 0, 0, IOIS16, 0, PTC5_IN_PU, PTC5_IN, HPDQM7, PTC4_OUT, 0, PTC4_IN, HPDQM6, PTC3_OUT, 0, PTC3_IN, HPDQM5, PTC2_OUT, 0, PTC2_IN, 0, 0, 0, 0, HPDQM4, PTC0_OUT, 0, PTC0_IN } }, { PINMUX_CFG_REG("PDCR", 0xa4050106, 16, 2) { SDHICD, 0, PTD7_IN_PU, PTD7_IN, SDHIWP, PTD6_OUT, PTD6_IN_PU, PTD6_IN, SDHID3, PTD5_OUT, PTD5_IN_PU, PTD5_IN, IRQ2_SDHID2, PTD4_OUT, PTD4_IN_PU, PTD4_IN, SDHID1, PTD3_OUT, PTD3_IN_PU, PTD3_IN, SDHID0, PTD2_OUT, PTD2_IN_PU, PTD2_IN, SDHICMD, PTD1_OUT, PTD1_IN_PU, PTD1_IN, SDHICLK, PTD0_OUT, 0, 0 } }, { PINMUX_CFG_REG("PECR", 0xa4050108, 16, 2) { A25, PTE7_OUT, PTE7_IN_PD, PTE7_IN, A24, PTE6_OUT, PTE6_IN_PD, PTE6_IN, A23, PTE5_OUT, PTE5_IN_PD, PTE5_IN, A22, PTE4_OUT, PTE4_IN_PD, PTE4_IN, 0, 0, 0, 0, 0, 0, 0, 0, IRQ5, PTE1_OUT, PTE1_IN_PD, PTE1_IN, IRQ4_BS, PTE0_OUT, PTE0_IN_PD, PTE0_IN } }, { PINMUX_CFG_REG("PFCR", 0xa405010a, 16, 2) { 0, 0, 0, 0, PTF6, PTF6_OUT, PTF6_IN_PD, PTF6_IN, SIOSCK_SIUBOBT, PTF5_OUT, PTF5_IN_PD, PTF5_IN, SIOSTRB1_SIUBOLR, PTF4_OUT, PTF4_IN_PD, PTF4_IN, SIOSTRB0_SIUBIBT, PTF3_OUT, PTF3_IN_PD, PTF3_IN, SIOD_SIUBILR, PTF2_OUT, PTF2_IN_PD, PTF2_IN, SIORXD_SIUBISLD, 0, PTF1_IN_PD, PTF1_IN, SIOTXD_SIUBOSLD, PTF0_OUT, 0, 0 } }, { PINMUX_CFG_REG("PGCR", 0xa405010c, 16, 2) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, AUDSYNC, PTG4_OUT, 0, 0, AUDATA3, PTG3_OUT, 0, 0, AUDATA2, PTG2_OUT, 0, 0, AUDATA1, PTG1_OUT, 0, 0, AUDATA0, PTG0_OUT, 0, 0 } }, { PINMUX_CFG_REG("PHCR", 0xa405010e, 16, 2) { LCDVCPWC_LCDVCPWC2, PTH7_OUT, 0, 0, LCDVSYN2_DACK, PTH6_OUT, PTH6_IN_PD, PTH6_IN, LCDVSYN, PTH5_OUT, PTH5_IN_PD, PTH5_IN, LCDDISP_LCDRS, PTH4_OUT, 0, 0, LCDHSYN_LCDCS, PTH3_OUT, 0, 0, LCDDON_LCDDON2, PTH2_OUT, 0, 0, LCDD17_DV_HSYNC, PTH1_OUT, PTH1_IN_PD, PTH1_IN, LCDD16_DV_VSYNC, PTH0_OUT, PTH0_IN_PD, PTH0_IN } }, { PINMUX_CFG_REG("PJCR", 0xa4050110, 16, 2) { STATUS0, PTJ7_OUT, 0, 0, 0, PTJ6_OUT, 0, 0, PDSTATUS, PTJ5_OUT, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, IRQ1, PTJ1_OUT, PTJ1_IN_PU, PTJ1_IN, IRQ0, PTJ0_OUT, PTJ0_IN_PU, PTJ0_IN } }, { PINMUX_CFG_REG("PKCR", 0xa4050112, 16, 2) { 0, 0, 0, 0, SIUAILR_SIOF1_SS2, PTK6_OUT, PTK6_IN_PD, PTK6_IN, SIUAIBT_SIOF1_SS1, PTK5_OUT, PTK5_IN_PD, PTK5_IN, SIUAOLR_SIOF1_SYNC, PTK4_OUT, PTK4_IN_PD, PTK4_IN, SIUAOBT_SIOF1_SCK, PTK3_OUT, PTK3_IN_PD, PTK3_IN, SIUAISLD_SIOF1_RXD, 0, PTK2_IN_PD, PTK2_IN, SIUAOSLD_SIOF1_TXD, PTK1_OUT, 0, 0, PTK0, PTK0_OUT, PTK0_IN_PD, PTK0_IN } }, { PINMUX_CFG_REG("PLCR", 0xa4050114, 16, 2) { LCDD15_DV_D15, PTL7_OUT, PTL7_IN_PD, PTL7_IN, LCDD14_DV_D14, PTL6_OUT, PTL6_IN_PD, PTL6_IN, LCDD13_DV_D13, PTL5_OUT, PTL5_IN_PD, PTL5_IN, LCDD12_DV_D12, PTL4_OUT, PTL4_IN_PD, PTL4_IN, LCDD11_DV_D11, PTL3_OUT, PTL3_IN_PD, PTL3_IN, LCDD10_DV_D10, PTL2_OUT, PTL2_IN_PD, PTL2_IN, LCDD9_DV_D9, PTL1_OUT, PTL1_IN_PD, PTL1_IN, LCDD8_DV_D8, PTL0_OUT, PTL0_IN_PD, PTL0_IN } }, { PINMUX_CFG_REG("PMCR", 0xa4050116, 16, 2) { LCDD7_DV_D7, PTM7_OUT, PTM7_IN_PD, PTM7_IN, LCDD6_DV_D6, PTM6_OUT, PTM6_IN_PD, PTM6_IN, LCDD5_DV_D5, PTM5_OUT, PTM5_IN_PD, PTM5_IN, LCDD4_DV_D4, PTM4_OUT, PTM4_IN_PD, PTM4_IN, LCDD3_DV_D3, PTM3_OUT, PTM3_IN_PD, PTM3_IN, LCDD2_DV_D2, PTM2_OUT, PTM2_IN_PD, PTM2_IN, LCDD1_DV_D1, PTM1_OUT, PTM1_IN_PD, PTM1_IN, LCDD0_DV_D0, PTM0_OUT, PTM0_IN_PD, PTM0_IN } }, { PINMUX_CFG_REG("PNCR", 0xa4050118, 16, 2) { HPD63, PTN7_OUT, 0, PTN7_IN, HPD62, PTN6_OUT, 0, PTN6_IN, HPD61, PTN5_OUT, 0, PTN5_IN, HPD60, PTN4_OUT, 0, PTN4_IN, HPD59, PTN3_OUT, 0, PTN3_IN, HPD58, PTN2_OUT, 0, PTN2_IN, HPD57, PTN1_OUT, 0, PTN1_IN, HPD56, PTN0_OUT, 0, PTN0_IN } }, { PINMUX_CFG_REG("PQCR", 0xa405011a, 16, 2) { 0, 0, 0, 0, SIOF0_SS2_SIM_RST, PTQ6_OUT, 0, 0, SIOF0_SS1_TS_SPSYNC, PTQ5_OUT, PTQ5_IN_PD, PTQ5_IN, SIOF0_SYNC_TS_SDEN, PTQ4_OUT, PTQ4_IN_PD, PTQ4_IN, SIOF0_SCK_TS_SCK, PTQ3_OUT, PTQ3_IN_PD, PTQ3_IN, PTQ2, 0, PTQ2_IN_PD, PTQ2_IN, PTQ1, PTQ1_OUT, 0, 0, PTQ0, PTQ0_OUT, PTQ0_IN_PU, PTQ0_IN } }, { PINMUX_CFG_REG("PRCR", 0xa405011c, 16, 2) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, LCDRD, PTR4_OUT, 0, 0, CS6B_CE1B_LCDCS2, PTR3_OUT, 0, 0, WAIT, 0, PTR2_IN_PU, PTR2_IN, LCDDCK_LCDWR, PTR1_OUT, 0, 0, LCDVEPWC_LCDVEPWC2, PTR0_OUT, 0, 0 } }, { PINMUX_CFG_REG("PSCR", 0xa405011e, 16, 2) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, SCIF0_CTS_SIUAISPD, 0, PTS4_IN_PD, PTS4_IN, SCIF0_RTS_SIUAOSPD, PTS3_OUT, 0, 0, SCIF0_SCK_TPUTO, PTS2_OUT, PTS2_IN_PD, PTS2_IN, SCIF0_RXD, 0, PTS1_IN_PD, PTS1_IN, SCIF0_TXD, PTS0_OUT, 0, 0 } }, { PINMUX_CFG_REG("PTCR", 0xa4050140, 16, 2) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, FOE_VIO_VD2, PTT4_OUT, PTT4_IN_PD, PTT4_IN, FWE, PTT3_OUT, PTT3_IN_PD, PTT3_IN, FSC, PTT2_OUT, PTT2_IN_PD, PTT2_IN, DREQ0, 0, PTT1_IN_PD, PTT1_IN, FCDE, PTT0_OUT, 0, 0 } }, { PINMUX_CFG_REG("PUCR", 0xa4050142, 16, 2) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, NAF2_VIO_D10, PTU4_OUT, PTU4_IN_PD, PTU4_IN, NAF1_VIO_D9, PTU3_OUT, PTU3_IN_PD, PTU3_IN, NAF0_VIO_D8, PTU2_OUT, PTU2_IN_PD, PTU2_IN, FRB_VIO_CLK2, 0, PTU1_IN_PD, PTU1_IN, FCE_VIO_HD2, PTU0_OUT, PTU0_IN_PD, PTU0_IN } }, { PINMUX_CFG_REG("PVCR", 0xa4050144, 16, 2) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, NAF7_VIO_D15, PTV4_OUT, PTV4_IN_PD, PTV4_IN, NAF6_VIO_D14, PTV3_OUT, PTV3_IN_PD, PTV3_IN, NAF5_VIO_D13, PTV2_OUT, PTV2_IN_PD, PTV2_IN, NAF4_VIO_D12, PTV1_OUT, PTV1_IN_PD, PTV1_IN, NAF3_VIO_D11, PTV0_OUT, PTV0_IN_PD, PTV0_IN } }, { PINMUX_CFG_REG("PWCR", 0xa4050146, 16, 2) { 0, 0, 0, 0, VIO_FLD_SCIF2_CTS, 0, PTW6_IN_PD, PTW6_IN, VIO_CKO_SCIF2_RTS, PTW5_OUT, 0, 0, VIO_STEX_SCIF2_SCK, PTW4_OUT, PTW4_IN_PD, PTW4_IN, VIO_STEM_SCIF2_TXD, PTW3_OUT, PTW3_IN_PD, PTW3_IN, VIO_HD_SCIF2_RXD, PTW2_OUT, PTW2_IN_PD, PTW2_IN, VIO_VD_SCIF1_CTS, PTW1_OUT, PTW1_IN_PD, PTW1_IN, VIO_CLK_SCIF1_RTS, PTW0_OUT, PTW0_IN_PD, PTW0_IN } }, { PINMUX_CFG_REG("PXCR", 0xa4050148, 16, 2) { 0, 0, 0, 0, CS6A_CE2B, PTX6_OUT, PTX6_IN_PU, PTX6_IN, LCDD23, PTX5_OUT, PTX5_IN_PD, PTX5_IN, LCDD22, PTX4_OUT, PTX4_IN_PD, PTX4_IN, LCDD21, PTX3_OUT, PTX3_IN_PD, PTX3_IN, LCDD20, PTX2_OUT, PTX2_IN_PD, PTX2_IN, LCDD19_DV_CLKI, PTX1_OUT, PTX1_IN_PD, PTX1_IN, LCDD18_DV_CLK, PTX0_OUT, PTX0_IN_PD, PTX0_IN } }, { PINMUX_CFG_REG("PYCR", 0xa405014a, 16, 2) { 0, 0, 0, 0, 0, 0, 0, 0, KEYOUT5_IN5, PTY5_OUT, PTY5_IN_PU, PTY5_IN, KEYOUT4_IN6, PTY4_OUT, PTY4_IN_PU, PTY4_IN, KEYOUT3, PTY3_OUT, PTY3_IN_PU, PTY3_IN, KEYOUT2, PTY2_OUT, PTY2_IN_PU, PTY2_IN, KEYOUT1, PTY1_OUT, 0, 0, KEYOUT0, PTY0_OUT, PTY0_IN_PU, PTY0_IN } }, { PINMUX_CFG_REG("PZCR", 0xa405014c, 16, 2) { 0, 0, 0, 0, 0, 0, 0, 0, KEYIN4_IRQ7, 0, PTZ5_IN_PU, PTZ5_IN, KEYIN3, 0, PTZ4_IN_PU, PTZ4_IN, KEYIN2, 0, PTZ3_IN_PU, PTZ3_IN, KEYIN1, 0, PTZ2_IN_PU, PTZ2_IN, KEYIN0_IRQ6, 0, PTZ1_IN_PU, PTZ1_IN, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PSELA", 0xa405014e, 16, 1) { PSA15_KEYIN0, PSA15_IRQ6, PSA14_KEYIN4, PSA14_IRQ7, 0, 0, 0, 0, 0, 0, 0, 0, PSA9_IRQ4, PSA9_BS, 0, 0, 0, 0, 0, 0, 0, 0, PSA4_IRQ2, PSA4_SDHID2, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PSELB", 0xa4050150, 16, 1) { PSB15_SIOTXD, PSB15_SIUBOSLD, PSB14_SIORXD, PSB14_SIUBISLD, PSB13_SIOD, PSB13_SIUBILR, PSB12_SIOSTRB0, PSB12_SIUBIBT, PSB11_SIOSTRB1, PSB11_SIUBOLR, PSB10_SIOSCK, PSB10_SIUBOBT, PSB9_SIOMCK, PSB9_SIUMCKB, PSB8_SIOF0_MCK, PSB8_IRQ3, PSB7_SIOF0_TXD, PSB7_IRDA_OUT, PSB6_SIOF0_RXD, PSB6_IRDA_IN, PSB5_SIOF0_SCK, PSB5_TS_SCK, PSB4_SIOF0_SYNC, PSB4_TS_SDEN, PSB3_SIOF0_SS1, PSB3_TS_SPSYNC, PSB2_SIOF0_SS2, PSB2_SIM_RST, PSB1_SIUMCKA, PSB1_SIOF1_MCK, PSB0_SIUAOSLD, PSB0_SIOF1_TXD } }, { PINMUX_CFG_REG("PSELC", 0xa4050152, 16, 1) { PSC15_SIUAISLD, PSC15_SIOF1_RXD, PSC14_SIUAOBT, PSC14_SIOF1_SCK, PSC13_SIUAOLR, PSC13_SIOF1_SYNC, PSC12_SIUAIBT, PSC12_SIOF1_SS1, PSC11_SIUAILR, PSC11_SIOF1_SS2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PSC0_NAF, PSC0_VIO } }, { PINMUX_CFG_REG("PSELD", 0xa4050154, 16, 1) { 0, 0, 0, 0, PSD13_VIO, PSD13_SCIF2, PSD12_VIO, PSD12_SCIF1, PSD11_VIO, PSD11_SCIF1, PSD10_VIO_D0, PSD10_LCDLCLK, PSD9_SIOMCK_SIUMCKB, PSD9_SIUFCKB, PSD8_SCIF0_SCK, PSD8_TPUTO, PSD7_SCIF0_RTS, PSD7_SIUAOSPD, PSD6_SCIF0_CTS, PSD6_SIUAISPD, PSD5_CS6B_CE1B, PSD5_LCDCS2, 0, 0, PSD3_LCDVEPWC_LCDVCPWC, PSD3_LCDVEPWC2_LCDVCPWC2, PSD2_LCDDON, PSD2_LCDDON2, 0, 0, PSD0_LCDD19_LCDD0, PSD0_DV } }, { PINMUX_CFG_REG("PSELE", 0xa4050156, 16, 1) { PSE15_SIOF0_MCK_IRQ3, PSE15_SIM_D, PSE14_SIOF0_TXD_IRDA_OUT, PSE14_SIM_CLK, PSE13_SIOF0_RXD_IRDA_IN, PSE13_TS_SDAT, PSE12_LCDVSYN2, PSE12_DACK, PSE11_SIUMCKA_SIOF1_MCK, PSE11_SIUFCKA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PSE3_FLCTL, PSE3_VIO, PSE2_NAF2, PSE2_VIO_D10, PSE1_NAF1, PSE1_VIO_D9, PSE0_NAF0, PSE0_VIO_D8 } }, { PINMUX_CFG_REG("HIZCRA", 0xa4050158, 16, 1) { 0, 0, HIZA14_KEYSC, HIZA14_HIZ, 0, 0, 0, 0, 0, 0, HIZA10_NAF, HIZA10_HIZ, HIZA9_VIO, HIZA9_HIZ, HIZA8_LCDC, HIZA8_HIZ, HIZA7_LCDC, HIZA7_HIZ, HIZA6_LCDC, HIZA6_HIZ, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("HIZCRB", 0xa405015a, 16, 1) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, HIZB4_SIUA, HIZB4_HIZ, 0, 0, 0, 0, HIZB1_VIO, HIZB1_HIZ, HIZB0_VIO, HIZB0_HIZ } }, { PINMUX_CFG_REG("HIZCRC", 0xa405015c, 16, 1) { HIZC15_IRQ7, HIZC15_HIZ, HIZC14_IRQ6, HIZC14_HIZ, HIZC13_IRQ5, HIZC13_HIZ, HIZC12_IRQ4, HIZC12_HIZ, HIZC11_IRQ3, HIZC11_HIZ, HIZC10_IRQ2, HIZC10_HIZ, HIZC9_IRQ1, HIZC9_HIZ, HIZC8_IRQ0, HIZC8_HIZ, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("MSELCRB", 0xa4050182, 16, 1) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, MSELB9_VIO, MSELB9_VIO2, MSELB8_RGB, MSELB8_SYS, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, {} }; static struct pinmux_data_reg pinmux_data_regs[] = { { PINMUX_DATA_REG("PADR", 0xa4050120, 8) { PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA, PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA } }, { PINMUX_DATA_REG("PBDR", 0xa4050122, 8) { PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA, PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA } }, { PINMUX_DATA_REG("PCDR", 0xa4050124, 8) { PTC7_DATA, 0, PTC5_DATA, PTC4_DATA, PTC3_DATA, PTC2_DATA, 0, PTC0_DATA } }, { PINMUX_DATA_REG("PDDR", 0xa4050126, 8) { PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA, PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA } }, { PINMUX_DATA_REG("PEDR", 0xa4050128, 8) { PTE7_DATA, PTE6_DATA, PTE5_DATA, PTE4_DATA, 0, 0, PTE1_DATA, PTE0_DATA } }, { PINMUX_DATA_REG("PFDR", 0xa405012a, 8) { 0, PTF6_DATA, PTF5_DATA, PTF4_DATA, PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA } }, { PINMUX_DATA_REG("PGDR", 0xa405012c, 8) { 0, 0, 0, PTG4_DATA, PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA } }, { PINMUX_DATA_REG("PHDR", 0xa405012e, 8) { PTH7_DATA, PTH6_DATA, PTH5_DATA, PTH4_DATA, PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA } }, { PINMUX_DATA_REG("PJDR", 0xa4050130, 8) { PTJ7_DATA, PTJ6_DATA, PTJ5_DATA, 0, 0, 0, PTJ1_DATA, PTJ0_DATA } }, { PINMUX_DATA_REG("PKDR", 0xa4050132, 8) { 0, PTK6_DATA, PTK5_DATA, PTK4_DATA, PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA } }, { PINMUX_DATA_REG("PLDR", 0xa4050134, 8) { PTL7_DATA, PTL6_DATA, PTL5_DATA, PTL4_DATA, PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA } }, { PINMUX_DATA_REG("PMDR", 0xa4050136, 8) { PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA, PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA } }, { PINMUX_DATA_REG("PNDR", 0xa4050138, 8) { PTN7_DATA, PTN6_DATA, PTN5_DATA, PTN4_DATA, PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA } }, { PINMUX_DATA_REG("PQDR", 0xa405013a, 8) { 0, PTQ6_DATA, PTQ5_DATA, PTQ4_DATA, PTQ3_DATA, PTQ2_DATA, PTQ1_DATA, PTQ0_DATA } }, { PINMUX_DATA_REG("PRDR", 0xa405013c, 8) { 0, 0, 0, PTR4_DATA, PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA } }, { PINMUX_DATA_REG("PSDR", 0xa405013e, 8) { 0, 0, 0, PTS4_DATA, PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA } }, { PINMUX_DATA_REG("PTDR", 0xa4050160, 8) { 0, 0, 0, PTT4_DATA, PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA } }, { PINMUX_DATA_REG("PUDR", 0xa4050162, 8) { 0, 0, 0, PTU4_DATA, PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA } }, { PINMUX_DATA_REG("PVDR", 0xa4050164, 8) { 0, 0, 0, PTV4_DATA, PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA } }, { PINMUX_DATA_REG("PWDR", 0xa4050166, 8) { 0, PTW6_DATA, PTW5_DATA, PTW4_DATA, PTW3_DATA, PTW2_DATA, PTW1_DATA, PTW0_DATA } }, { PINMUX_DATA_REG("PXDR", 0xa4050168, 8) { 0, PTX6_DATA, PTX5_DATA, PTX4_DATA, PTX3_DATA, PTX2_DATA, PTX1_DATA, PTX0_DATA } }, { PINMUX_DATA_REG("PYDR", 0xa405016a, 8) { 0, PTY6_DATA, PTY5_DATA, PTY4_DATA, PTY3_DATA, PTY2_DATA, PTY1_DATA, PTY0_DATA } }, { PINMUX_DATA_REG("PZDR", 0xa405016c, 8) { 0, 0, PTZ5_DATA, PTZ4_DATA, PTZ3_DATA, PTZ2_DATA, PTZ1_DATA, PTZ0_DATA } }, { }, }; static struct pinmux_info sh7722_pinmux_info = { .name = "sh7722_pfc", .reserved_id = PINMUX_RESERVED, .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END }, .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END }, .input_pd = { PINMUX_INPUT_PULLDOWN_BEGIN, PINMUX_INPUT_PULLDOWN_END }, .input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END }, .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END }, .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END }, .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END }, .first_gpio = GPIO_PTA7, .last_gpio = GPIO_FN_KEYOUT5_IN5, .gpios = pinmux_gpios, .cfg_regs = pinmux_config_regs, .data_regs = pinmux_data_regs, .gpio_data = pinmux_data, .gpio_data_size = ARRAY_SIZE(pinmux_data), }; static int __init plat_pinmux_setup(void) { return register_pinmux(&sh7722_pinmux_info); } arch_initcall(plat_pinmux_setup);
gpl-2.0
bemolxd/android_kernel_x2xtreme-test
drivers/misc/mediatek/trustzone/kree_mem.c
29
9068
#include <trustzone/kree/mem.h> #include <trustzone/kree/system.h> #include <trustzone/tz_cross/ta_mem.h> #include <linux/mm.h> #define DBG_KREE_MEM // notiec: handle type is the same static inline TZ_RESULT _allocFunc (uint32_t cmd, KREE_SESSION_HANDLE session, uint32_t *mem_handle, uint32_t alignment, uint32_t size, char *dbg) { MTEEC_PARAM p[4]; TZ_RESULT ret; if ((session == 0) || (mem_handle == NULL) || (size == 0)) { return TZ_RESULT_ERROR_BAD_PARAMETERS; } p[0].value.a = alignment; p[1].value.a = size; ret = KREE_TeeServiceCall(session, cmd, TZ_ParamTypes3( TZPT_VALUE_INPUT, TZPT_VALUE_INPUT, TZPT_VALUE_OUTPUT), p); if (ret != TZ_RESULT_SUCCESS) { #ifdef DBG_KREE_MEM printk("[kree] %s Error: %d\n", dbg, ret); #endif return ret; } *mem_handle = (KREE_SECUREMEM_HANDLE) p[2].value.a; return TZ_RESULT_SUCCESS; } static inline TZ_RESULT _handleOpFunc (uint32_t cmd, KREE_SESSION_HANDLE session, uint32_t mem_handle, char *dbg) { MTEEC_PARAM p[4]; TZ_RESULT ret; if ((session == 0) || (mem_handle == 0)) { return TZ_RESULT_ERROR_BAD_PARAMETERS; } p[0].value.a = (uint32_t) mem_handle; ret = KREE_TeeServiceCall(session, cmd, TZ_ParamTypes1( TZPT_VALUE_INPUT), p); if (ret < 0) { #ifdef DBG_KREE_MEM printk("[kree] %s Error: %d\n", dbg, ret); #endif return ret; } return TZ_RESULT_SUCCESS; } static inline TZ_RESULT _handleOpFunc_1 (uint32_t cmd, KREE_SESSION_HANDLE session, uint32_t mem_handle, uint32_t *count, char *dbg) { MTEEC_PARAM p[4]; TZ_RESULT ret; if ((session == 0) || (mem_handle == 0) || (count == NULL)) { return TZ_RESULT_ERROR_BAD_PARAMETERS; } p[0].value.a = (uint32_t) mem_handle; ret = KREE_TeeServiceCall(session, cmd, TZ_ParamTypes2( TZPT_VALUE_INPUT, TZPT_VALUE_OUTPUT), p); if (ret < 0) { #ifdef DBG_KREE_MEM printk("[kree] %s Error: %d\n", dbg, ret); #endif *count = 0; return ret; } *count = p[1].value.a; return TZ_RESULT_SUCCESS; } TZ_RESULT kree_register_sharedmem (KREE_SESSION_HANDLE session, KREE_SHAREDMEM_HANDLE *mem_handle, void *start, uint32_t size, void *map_p) { MTEEC_PARAM p[4]; TZ_RESULT ret; p[0].value.a = (unsigned long)start; p[0].value.b = (unsigned long)start >> 32; p[1].value.a = size; p[2].mem.buffer = map_p; if(map_p != NULL) p[2].mem.size = ((*(uint32_t *)map_p)+1)*sizeof(uint32_t); else p[2].mem.size = 0; ret = KREE_TeeServiceCall(session, TZCMD_MEM_SHAREDMEM_REG, TZ_ParamTypes4(TZPT_VALUE_INPUT, TZPT_VALUE_INPUT, TZPT_MEM_INPUT, TZPT_VALUE_OUTPUT), p); if (ret != TZ_RESULT_SUCCESS) { *mem_handle = 0; return ret; } *mem_handle = p[3].value.a; return TZ_RESULT_SUCCESS; } TZ_RESULT kree_unregister_sharedmem (KREE_SESSION_HANDLE session, KREE_SHAREDMEM_HANDLE mem_handle) { MTEEC_PARAM p[4]; TZ_RESULT ret; p[0].value.a = (uint32_t) mem_handle; ret = KREE_TeeServiceCall(session, TZCMD_MEM_SHAREDMEM_UNREG, TZ_ParamTypes1(TZPT_VALUE_INPUT), p); if (ret != TZ_RESULT_SUCCESS) { return ret; } return TZ_RESULT_SUCCESS; } /* APIs */ TZ_RESULT KREE_RegisterSharedmem (KREE_SESSION_HANDLE session, KREE_SHAREDMEM_HANDLE *shm_handle, KREE_SHAREDMEM_PARAM *param) { TZ_RESULT ret; if ((session == 0) || (shm_handle == NULL) || (param->buffer == NULL) || (param->size == 0)) { return TZ_RESULT_ERROR_BAD_PARAMETERS; } // only for kmalloc if ((param->buffer >= (void *)PAGE_OFFSET) && (param->buffer < high_memory)) { ret = kree_register_sharedmem (session, shm_handle, param->buffer, param->size, 0); // set 0 for no remap... if (ret != TZ_RESULT_SUCCESS) { #ifdef DBG_KREE_MEM printk("[kree] KREE_RegisterSharedmem Error: %d\n", ret); #endif return ret; } } else { printk("[kree] KREE_RegisterSharedmem Error: support kmalloc only!!!\n"); return TZ_RESULT_ERROR_NOT_IMPLEMENTED; } return TZ_RESULT_SUCCESS; } TZ_RESULT KREE_UnregisterSharedmem (KREE_SESSION_HANDLE session, KREE_SHAREDMEM_HANDLE shm_handle) { TZ_RESULT ret; if ((session == 0) || (shm_handle == 0)) { return TZ_RESULT_ERROR_BAD_PARAMETERS; } ret = kree_unregister_sharedmem (session, shm_handle); if (ret < 0) { #ifdef DBG_KREE_MEM printk("[kree] KREE_UnregisterSharedmem Error: %d\n", ret); #endif return ret; } return TZ_RESULT_SUCCESS; } TZ_RESULT KREE_AllocSecuremem (KREE_SESSION_HANDLE session, KREE_SECUREMEM_HANDLE *mem_handle, uint32_t alignment, uint32_t size) { TZ_RESULT ret; ret = _allocFunc (TZCMD_MEM_SECUREMEM_ALLOC, session, mem_handle, alignment, size, "KREE_AllocSecuremem"); return ret; } TZ_RESULT KREE_ReferenceSecuremem (KREE_SESSION_HANDLE session, KREE_SECUREMEM_HANDLE mem_handle) { TZ_RESULT ret; ret = _handleOpFunc (TZCMD_MEM_SECUREMEM_REF, session, mem_handle, "KREE_ReferenceSecuremem"); return ret; } TZ_RESULT KREE_UnreferenceSecuremem (KREE_SESSION_HANDLE session, KREE_SECUREMEM_HANDLE mem_handle) { TZ_RESULT ret; uint32_t count = 0; ret = _handleOpFunc_1 (TZCMD_MEM_SECUREMEM_UNREF, session, mem_handle, &count, "KREE_UnreferenceSecuremem"); #ifdef DBG_KREE_MEM printk ("KREE_UnreferenceSecuremem: count = 0x%x\n", count); #endif return ret; } TZ_RESULT KREE_AllocSecurechunkmem (KREE_SESSION_HANDLE session, KREE_SECUREMEM_HANDLE *cm_handle, uint32_t alignment, uint32_t size) { TZ_RESULT ret; ret = _allocFunc (TZCMD_MEM_SECURECM_ALLOC, session, cm_handle, alignment, size, "KREE_AllocSecurechunkmem"); return ret; } TZ_RESULT KREE_ReferenceSecurechunkmem (KREE_SESSION_HANDLE session, KREE_SECURECM_HANDLE cm_handle) { TZ_RESULT ret; ret = _handleOpFunc (TZCMD_MEM_SECURECM_REF, session, cm_handle, "KREE_ReferenceSecurechunkmem"); return ret; } TZ_RESULT KREE_UnreferenceSecurechunkmem (KREE_SESSION_HANDLE session, KREE_SECURECM_HANDLE cm_handle) { TZ_RESULT ret; uint32_t count = 0; ret = _handleOpFunc_1 (TZCMD_MEM_SECURECM_UNREF, session, cm_handle, &count, "KREE_UnreferenceSecurechunkmem"); #ifdef DBG_KREE_MEM printk ("KREE_UnreferenceSecurechunkmem: count = 0x%x\n", count); #endif return ret; } TZ_RESULT KREE_ReadSecurechunkmem (KREE_SESSION_HANDLE session, uint32_t offset, uint32_t size, void *buffer) { MTEEC_PARAM p[4]; TZ_RESULT ret; if ((session == 0) || (size == 0)) { return TZ_RESULT_ERROR_BAD_PARAMETERS; } p[0].value.a = offset; p[1].value.a = size; p[2].mem.buffer = buffer; p[2].mem.size = size; // fix me!!!! ret = KREE_TeeServiceCall(session, TZCMD_MEM_SECURECM_READ, TZ_ParamTypes3(TZPT_VALUE_INPUT, TZPT_VALUE_INPUT, TZPT_MEM_OUTPUT), p); if (ret != TZ_RESULT_SUCCESS) { #ifdef DBG_KREE_MEM printk("[kree] KREE_ReadSecurechunkmem Error: %d\n", ret); #endif return ret; } return TZ_RESULT_SUCCESS; } TZ_RESULT KREE_WriteSecurechunkmem (KREE_SESSION_HANDLE session, uint32_t offset, uint32_t size, void *buffer) { MTEEC_PARAM p[4]; TZ_RESULT ret; if ((session == 0) || (size == 0)) { return TZ_RESULT_ERROR_BAD_PARAMETERS; } p[0].value.a = offset; p[1].value.a = size; p[2].mem.buffer = buffer; p[2].mem.size = size; // fix me!!!! ret = KREE_TeeServiceCall(session, TZCMD_MEM_SECURECM_WRITE, TZ_ParamTypes3(TZPT_VALUE_INPUT, TZPT_VALUE_INPUT, TZPT_MEM_INPUT), p); if (ret != TZ_RESULT_SUCCESS) { #ifdef DBG_KREE_MEM printk("[kree] KREE_WriteSecurechunkmem Error: %d\n", ret); #endif return ret; } return TZ_RESULT_SUCCESS; } TZ_RESULT KREE_GetSecurechunkReleaseSize (KREE_SESSION_HANDLE session, uint32_t *size) { MTEEC_PARAM p[4]; TZ_RESULT ret; ret = KREE_TeeServiceCall(session, TZCMD_MEM_SECURECM_RSIZE, TZ_ParamTypes1(TZPT_VALUE_OUTPUT), p); if (ret != TZ_RESULT_SUCCESS) { #ifdef DBG_KREE_MEM printk("[kree] KREE_GetSecurechunkReleaseSize Error: %d\n", ret); #endif return ret; } *size = p[0].value.a; return TZ_RESULT_SUCCESS; } TZ_RESULT KREE_GetTEETotalSize (KREE_SESSION_HANDLE session, uint32_t *size) { MTEEC_PARAM p[4]; TZ_RESULT ret; ret = KREE_TeeServiceCall(session, TZCMD_MEM_TOTAL_SIZE, TZ_ParamTypes1(TZPT_VALUE_OUTPUT), p); if (ret != TZ_RESULT_SUCCESS) { #ifdef DBG_KREE_MEM printk("[kree] KREE_GetTEETotalSize Error: %d\n", ret); #endif return ret; } *size = p[0].value.a; return TZ_RESULT_SUCCESS; }
gpl-2.0
RWTH-OS/linux
drivers/net/wireless/marvell/mwifiex/cmdevt.c
29
50878
/* * Marvell Wireless LAN device driver: commands and events * * Copyright (C) 2011-2014, Marvell International Ltd. * * This software file (the "File") is distributed by Marvell International * Ltd. under the terms of the GNU General Public License Version 2, June 1991 * (the "License"). You may use, redistribute and/or modify this File in * accordance with the terms and conditions of the License, a copy of which * is available by writing to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. * * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE * ARE EXPRESSLY DISCLAIMED. The License provides additional details about * this warranty disclaimer. */ #include "decl.h" #include "ioctl.h" #include "util.h" #include "fw.h" #include "main.h" #include "wmm.h" #include "11n.h" #include "11ac.h" /* * This function initializes a command node. * * The actual allocation of the node is not done by this function. It only * initiates a node by filling it with default parameters. Similarly, * allocation of the different buffers used (IOCTL buffer, data buffer) are * not done by this function either. */ static void mwifiex_init_cmd_node(struct mwifiex_private *priv, struct cmd_ctrl_node *cmd_node, u32 cmd_oid, void *data_buf, bool sync) { cmd_node->priv = priv; cmd_node->cmd_oid = cmd_oid; if (sync) { cmd_node->wait_q_enabled = true; cmd_node->cmd_wait_q_woken = false; cmd_node->condition = &cmd_node->cmd_wait_q_woken; } cmd_node->data_buf = data_buf; cmd_node->cmd_skb = cmd_node->skb; } /* * This function returns a command node from the free queue depending upon * availability. */ static struct cmd_ctrl_node * mwifiex_get_cmd_node(struct mwifiex_adapter *adapter) { struct cmd_ctrl_node *cmd_node; unsigned long flags; spin_lock_irqsave(&adapter->cmd_free_q_lock, flags); if (list_empty(&adapter->cmd_free_q)) { mwifiex_dbg(adapter, ERROR, "GET_CMD_NODE: cmd node not available\n"); spin_unlock_irqrestore(&adapter->cmd_free_q_lock, flags); return NULL; } cmd_node = list_first_entry(&adapter->cmd_free_q, struct cmd_ctrl_node, list); list_del(&cmd_node->list); spin_unlock_irqrestore(&adapter->cmd_free_q_lock, flags); return cmd_node; } /* * This function cleans up a command node. * * The function resets the fields including the buffer pointers. * This function does not try to free the buffers. They must be * freed before calling this function. * * This function will however call the receive completion callback * in case a response buffer is still available before resetting * the pointer. */ static void mwifiex_clean_cmd_node(struct mwifiex_adapter *adapter, struct cmd_ctrl_node *cmd_node) { cmd_node->cmd_oid = 0; cmd_node->cmd_flag = 0; cmd_node->data_buf = NULL; cmd_node->wait_q_enabled = false; if (cmd_node->cmd_skb) skb_trim(cmd_node->cmd_skb, 0); if (cmd_node->resp_skb) { adapter->if_ops.cmdrsp_complete(adapter, cmd_node->resp_skb); cmd_node->resp_skb = NULL; } } /* * This function returns a command to the command free queue. * * The function also calls the completion callback if required, before * cleaning the command node and re-inserting it into the free queue. */ static void mwifiex_insert_cmd_to_free_q(struct mwifiex_adapter *adapter, struct cmd_ctrl_node *cmd_node) { unsigned long flags; if (!cmd_node) return; if (cmd_node->wait_q_enabled) mwifiex_complete_cmd(adapter, cmd_node); /* Clean the node */ mwifiex_clean_cmd_node(adapter, cmd_node); /* Insert node into cmd_free_q */ spin_lock_irqsave(&adapter->cmd_free_q_lock, flags); list_add_tail(&cmd_node->list, &adapter->cmd_free_q); spin_unlock_irqrestore(&adapter->cmd_free_q_lock, flags); } /* This function reuses a command node. */ void mwifiex_recycle_cmd_node(struct mwifiex_adapter *adapter, struct cmd_ctrl_node *cmd_node) { struct host_cmd_ds_command *host_cmd = (void *)cmd_node->cmd_skb->data; mwifiex_insert_cmd_to_free_q(adapter, cmd_node); atomic_dec(&adapter->cmd_pending); mwifiex_dbg(adapter, CMD, "cmd: FREE_CMD: cmd=%#x, cmd_pending=%d\n", le16_to_cpu(host_cmd->command), atomic_read(&adapter->cmd_pending)); } /* * This function sends a host command to the firmware. * * The function copies the host command into the driver command * buffer, which will be transferred to the firmware later by the * main thread. */ static int mwifiex_cmd_host_cmd(struct mwifiex_private *priv, struct host_cmd_ds_command *cmd, struct mwifiex_ds_misc_cmd *pcmd_ptr) { /* Copy the HOST command to command buffer */ memcpy(cmd, pcmd_ptr->cmd, pcmd_ptr->len); mwifiex_dbg(priv->adapter, CMD, "cmd: host cmd size = %d\n", pcmd_ptr->len); return 0; } /* * This function downloads a command to the firmware. * * The function performs sanity tests, sets the command sequence * number and size, converts the header fields to CPU format before * sending. Afterwards, it logs the command ID and action for debugging * and sets up the command timeout timer. */ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv, struct cmd_ctrl_node *cmd_node) { struct mwifiex_adapter *adapter = priv->adapter; int ret; struct host_cmd_ds_command *host_cmd; uint16_t cmd_code; uint16_t cmd_size; unsigned long flags; __le32 tmp; if (!adapter || !cmd_node) return -1; host_cmd = (struct host_cmd_ds_command *) (cmd_node->cmd_skb->data); /* Sanity test */ if (host_cmd == NULL || host_cmd->size == 0) { mwifiex_dbg(adapter, ERROR, "DNLD_CMD: host_cmd is null\t" "or cmd size is 0, not sending\n"); if (cmd_node->wait_q_enabled) adapter->cmd_wait_q.status = -1; mwifiex_recycle_cmd_node(adapter, cmd_node); return -1; } cmd_code = le16_to_cpu(host_cmd->command); cmd_size = le16_to_cpu(host_cmd->size); if (adapter->hw_status == MWIFIEX_HW_STATUS_RESET && cmd_code != HostCmd_CMD_FUNC_SHUTDOWN && cmd_code != HostCmd_CMD_FUNC_INIT) { mwifiex_dbg(adapter, ERROR, "DNLD_CMD: FW in reset state, ignore cmd %#x\n", cmd_code); mwifiex_recycle_cmd_node(adapter, cmd_node); queue_work(adapter->workqueue, &adapter->main_work); return -1; } /* Set command sequence number */ adapter->seq_num++; host_cmd->seq_num = cpu_to_le16(HostCmd_SET_SEQ_NO_BSS_INFO (adapter->seq_num, cmd_node->priv->bss_num, cmd_node->priv->bss_type)); spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags); adapter->curr_cmd = cmd_node; spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags); /* Adjust skb length */ if (cmd_node->cmd_skb->len > cmd_size) /* * cmd_size is less than sizeof(struct host_cmd_ds_command). * Trim off the unused portion. */ skb_trim(cmd_node->cmd_skb, cmd_size); else if (cmd_node->cmd_skb->len < cmd_size) /* * cmd_size is larger than sizeof(struct host_cmd_ds_command) * because we have appended custom IE TLV. Increase skb length * accordingly. */ skb_put(cmd_node->cmd_skb, cmd_size - cmd_node->cmd_skb->len); mwifiex_dbg(adapter, CMD, "cmd: DNLD_CMD: %#x, act %#x, len %d, seqno %#x\n", cmd_code, get_unaligned_le16((u8 *)host_cmd + S_DS_GEN), cmd_size, le16_to_cpu(host_cmd->seq_num)); mwifiex_dbg_dump(adapter, CMD_D, "cmd buffer:", host_cmd, cmd_size); if (adapter->iface_type == MWIFIEX_USB) { tmp = cpu_to_le32(MWIFIEX_USB_TYPE_CMD); skb_push(cmd_node->cmd_skb, MWIFIEX_TYPE_LEN); memcpy(cmd_node->cmd_skb->data, &tmp, MWIFIEX_TYPE_LEN); adapter->cmd_sent = true; ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_USB_EP_CMD_EVENT, cmd_node->cmd_skb, NULL); skb_pull(cmd_node->cmd_skb, MWIFIEX_TYPE_LEN); if (ret == -EBUSY) cmd_node->cmd_skb = NULL; } else { skb_push(cmd_node->cmd_skb, adapter->intf_hdr_len); ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_CMD, cmd_node->cmd_skb, NULL); skb_pull(cmd_node->cmd_skb, adapter->intf_hdr_len); } if (ret == -1) { mwifiex_dbg(adapter, ERROR, "DNLD_CMD: host to card failed\n"); if (adapter->iface_type == MWIFIEX_USB) adapter->cmd_sent = false; if (cmd_node->wait_q_enabled) adapter->cmd_wait_q.status = -1; mwifiex_recycle_cmd_node(adapter, adapter->curr_cmd); spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags); adapter->curr_cmd = NULL; spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags); adapter->dbg.num_cmd_host_to_card_failure++; return -1; } /* Save the last command id and action to debug log */ adapter->dbg.last_cmd_index = (adapter->dbg.last_cmd_index + 1) % DBG_CMD_NUM; adapter->dbg.last_cmd_id[adapter->dbg.last_cmd_index] = cmd_code; adapter->dbg.last_cmd_act[adapter->dbg.last_cmd_index] = get_unaligned_le16((u8 *)host_cmd + S_DS_GEN); /* Clear BSS_NO_BITS from HostCmd */ cmd_code &= HostCmd_CMD_ID_MASK; /* Setup the timer after transmit command */ mod_timer(&adapter->cmd_timer, jiffies + msecs_to_jiffies(MWIFIEX_TIMER_10S)); return 0; } /* * This function downloads a sleep confirm command to the firmware. * * The function performs sanity tests, sets the command sequence * number and size, converts the header fields to CPU format before * sending. * * No responses are needed for sleep confirm command. */ static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter) { int ret; struct mwifiex_private *priv; struct mwifiex_opt_sleep_confirm *sleep_cfm_buf = (struct mwifiex_opt_sleep_confirm *) adapter->sleep_cfm->data; struct sk_buff *sleep_cfm_tmp; __le32 tmp; priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); adapter->seq_num++; sleep_cfm_buf->seq_num = cpu_to_le16((HostCmd_SET_SEQ_NO_BSS_INFO (adapter->seq_num, priv->bss_num, priv->bss_type))); mwifiex_dbg(adapter, CMD, "cmd: DNLD_CMD: %#x, act %#x, len %d, seqno %#x\n", le16_to_cpu(sleep_cfm_buf->command), le16_to_cpu(sleep_cfm_buf->action), le16_to_cpu(sleep_cfm_buf->size), le16_to_cpu(sleep_cfm_buf->seq_num)); mwifiex_dbg_dump(adapter, CMD_D, "SLEEP_CFM buffer: ", sleep_cfm_buf, le16_to_cpu(sleep_cfm_buf->size)); if (adapter->iface_type == MWIFIEX_USB) { sleep_cfm_tmp = dev_alloc_skb(sizeof(struct mwifiex_opt_sleep_confirm) + MWIFIEX_TYPE_LEN); skb_put(sleep_cfm_tmp, sizeof(struct mwifiex_opt_sleep_confirm) + MWIFIEX_TYPE_LEN); tmp = cpu_to_le32(MWIFIEX_USB_TYPE_CMD); memcpy(sleep_cfm_tmp->data, &tmp, MWIFIEX_TYPE_LEN); memcpy(sleep_cfm_tmp->data + MWIFIEX_TYPE_LEN, adapter->sleep_cfm->data, sizeof(struct mwifiex_opt_sleep_confirm)); ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_USB_EP_CMD_EVENT, sleep_cfm_tmp, NULL); if (ret != -EBUSY) dev_kfree_skb_any(sleep_cfm_tmp); } else { skb_push(adapter->sleep_cfm, adapter->intf_hdr_len); ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_CMD, adapter->sleep_cfm, NULL); skb_pull(adapter->sleep_cfm, adapter->intf_hdr_len); } if (ret == -1) { mwifiex_dbg(adapter, ERROR, "SLEEP_CFM: failed\n"); adapter->dbg.num_cmd_sleep_cfm_host_to_card_failure++; return -1; } if (!le16_to_cpu(sleep_cfm_buf->resp_ctrl)) /* Response is not needed for sleep confirm command */ adapter->ps_state = PS_STATE_SLEEP; else adapter->ps_state = PS_STATE_SLEEP_CFM; if (!le16_to_cpu(sleep_cfm_buf->resp_ctrl) && (adapter->is_hs_configured && !adapter->sleep_period.period)) { adapter->pm_wakeup_card_req = true; mwifiex_hs_activated_event(mwifiex_get_priv (adapter, MWIFIEX_BSS_ROLE_ANY), true); } return ret; } /* * This function allocates the command buffers and links them to * the command free queue. * * The driver uses a pre allocated number of command buffers, which * are created at driver initializations and freed at driver cleanup. * Every command needs to obtain a command buffer from this pool before * it can be issued. The command free queue lists the command buffers * currently free to use, while the command pending queue lists the * command buffers already in use and awaiting handling. Command buffers * are returned to the free queue after use. */ int mwifiex_alloc_cmd_buffer(struct mwifiex_adapter *adapter) { struct cmd_ctrl_node *cmd_array; u32 i; /* Allocate and initialize struct cmd_ctrl_node */ cmd_array = kcalloc(MWIFIEX_NUM_OF_CMD_BUFFER, sizeof(struct cmd_ctrl_node), GFP_KERNEL); if (!cmd_array) return -ENOMEM; adapter->cmd_pool = cmd_array; /* Allocate and initialize command buffers */ for (i = 0; i < MWIFIEX_NUM_OF_CMD_BUFFER; i++) { cmd_array[i].skb = dev_alloc_skb(MWIFIEX_SIZE_OF_CMD_BUFFER); if (!cmd_array[i].skb) { mwifiex_dbg(adapter, ERROR, "unable to allocate command buffer\n"); return -ENOMEM; } } for (i = 0; i < MWIFIEX_NUM_OF_CMD_BUFFER; i++) mwifiex_insert_cmd_to_free_q(adapter, &cmd_array[i]); return 0; } /* * This function frees the command buffers. * * The function calls the completion callback for all the command * buffers that still have response buffers associated with them. */ int mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter) { struct cmd_ctrl_node *cmd_array; u32 i; /* Need to check if cmd pool is allocated or not */ if (!adapter->cmd_pool) { mwifiex_dbg(adapter, FATAL, "info: FREE_CMD_BUF: cmd_pool is null\n"); return 0; } cmd_array = adapter->cmd_pool; /* Release shared memory buffers */ for (i = 0; i < MWIFIEX_NUM_OF_CMD_BUFFER; i++) { if (cmd_array[i].skb) { mwifiex_dbg(adapter, CMD, "cmd: free cmd buffer %d\n", i); dev_kfree_skb_any(cmd_array[i].skb); } if (!cmd_array[i].resp_skb) continue; if (adapter->iface_type == MWIFIEX_USB) adapter->if_ops.cmdrsp_complete(adapter, cmd_array[i].resp_skb); else dev_kfree_skb_any(cmd_array[i].resp_skb); } /* Release struct cmd_ctrl_node */ if (adapter->cmd_pool) { mwifiex_dbg(adapter, CMD, "cmd: free cmd pool\n"); kfree(adapter->cmd_pool); adapter->cmd_pool = NULL; } return 0; } /* * This function handles events generated by firmware. * * Event body of events received from firmware are not used (though they are * saved), only the event ID is used. Some events are re-invoked by * the driver, with a new event body. * * After processing, the function calls the completion callback * for cleanup. */ int mwifiex_process_event(struct mwifiex_adapter *adapter) { int ret, i; struct mwifiex_private *priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); struct sk_buff *skb = adapter->event_skb; u32 eventcause; struct mwifiex_rxinfo *rx_info; if ((adapter->event_cause & EVENT_ID_MASK) == EVENT_RADAR_DETECTED) { for (i = 0; i < adapter->priv_num; i++) { priv = adapter->priv[i]; if (priv && mwifiex_is_11h_active(priv)) { adapter->event_cause |= ((priv->bss_num & 0xff) << 16) | ((priv->bss_type & 0xff) << 24); break; } } } eventcause = adapter->event_cause; /* Save the last event to debug log */ adapter->dbg.last_event_index = (adapter->dbg.last_event_index + 1) % DBG_CMD_NUM; adapter->dbg.last_event[adapter->dbg.last_event_index] = (u16) eventcause; /* Get BSS number and corresponding priv */ priv = mwifiex_get_priv_by_id(adapter, EVENT_GET_BSS_NUM(eventcause), EVENT_GET_BSS_TYPE(eventcause)); if (!priv) priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); /* Clear BSS_NO_BITS from event */ eventcause &= EVENT_ID_MASK; adapter->event_cause = eventcause; if (skb) { rx_info = MWIFIEX_SKB_RXCB(skb); memset(rx_info, 0, sizeof(*rx_info)); rx_info->bss_num = priv->bss_num; rx_info->bss_type = priv->bss_type; mwifiex_dbg_dump(adapter, EVT_D, "Event Buf:", skb->data, skb->len); } mwifiex_dbg(adapter, EVENT, "EVENT: cause: %#x\n", eventcause); if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP) ret = mwifiex_process_uap_event(priv); else ret = mwifiex_process_sta_event(priv); adapter->event_cause = 0; adapter->event_skb = NULL; adapter->if_ops.event_complete(adapter, skb); return ret; } /* * This function prepares a command and send it to the firmware. * * Preparation includes - * - Sanity tests to make sure the card is still present or the FW * is not reset * - Getting a new command node from the command free queue * - Initializing the command node for default parameters * - Fill up the non-default parameters and buffer pointers * - Add the command to pending queue */ int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no, u16 cmd_action, u32 cmd_oid, void *data_buf, bool sync) { int ret; struct mwifiex_adapter *adapter = priv->adapter; struct cmd_ctrl_node *cmd_node; struct host_cmd_ds_command *cmd_ptr; if (!adapter) { pr_err("PREP_CMD: adapter is NULL\n"); return -1; } if (adapter->is_suspended) { mwifiex_dbg(adapter, ERROR, "PREP_CMD: device in suspended state\n"); return -1; } if (adapter->hs_enabling && cmd_no != HostCmd_CMD_802_11_HS_CFG_ENH) { mwifiex_dbg(adapter, ERROR, "PREP_CMD: host entering sleep state\n"); return -1; } if (adapter->surprise_removed) { mwifiex_dbg(adapter, ERROR, "PREP_CMD: card is removed\n"); return -1; } if (adapter->is_cmd_timedout) { mwifiex_dbg(adapter, ERROR, "PREP_CMD: FW is in bad state\n"); return -1; } if (adapter->hw_status == MWIFIEX_HW_STATUS_RESET) { if (cmd_no != HostCmd_CMD_FUNC_INIT) { mwifiex_dbg(adapter, ERROR, "PREP_CMD: FW in reset state\n"); return -1; } } /* We don't expect commands in manufacturing mode. They are cooked * in application and ready to download buffer is passed to the driver */ if (adapter->mfg_mode && cmd_no) { dev_dbg(adapter->dev, "Ignoring commands in manufacturing mode\n"); return -1; } /* Get a new command node */ cmd_node = mwifiex_get_cmd_node(adapter); if (!cmd_node) { mwifiex_dbg(adapter, ERROR, "PREP_CMD: no free cmd node\n"); return -1; } /* Initialize the command node */ mwifiex_init_cmd_node(priv, cmd_node, cmd_oid, data_buf, sync); if (!cmd_node->cmd_skb) { mwifiex_dbg(adapter, ERROR, "PREP_CMD: no free cmd buf\n"); return -1; } skb_put_zero(cmd_node->cmd_skb, sizeof(struct host_cmd_ds_command)); cmd_ptr = (struct host_cmd_ds_command *) (cmd_node->cmd_skb->data); cmd_ptr->command = cpu_to_le16(cmd_no); cmd_ptr->result = 0; /* Prepare command */ if (cmd_no) { switch (cmd_no) { case HostCmd_CMD_UAP_SYS_CONFIG: case HostCmd_CMD_UAP_BSS_START: case HostCmd_CMD_UAP_BSS_STOP: case HostCmd_CMD_UAP_STA_DEAUTH: case HOST_CMD_APCMD_SYS_RESET: case HOST_CMD_APCMD_STA_LIST: ret = mwifiex_uap_prepare_cmd(priv, cmd_no, cmd_action, cmd_oid, data_buf, cmd_ptr); break; default: ret = mwifiex_sta_prepare_cmd(priv, cmd_no, cmd_action, cmd_oid, data_buf, cmd_ptr); break; } } else { ret = mwifiex_cmd_host_cmd(priv, cmd_ptr, data_buf); cmd_node->cmd_flag |= CMD_F_HOSTCMD; } /* Return error, since the command preparation failed */ if (ret) { mwifiex_dbg(adapter, ERROR, "PREP_CMD: cmd %#x preparation failed\n", cmd_no); mwifiex_insert_cmd_to_free_q(adapter, cmd_node); return -1; } /* Send command */ if (cmd_no == HostCmd_CMD_802_11_SCAN || cmd_no == HostCmd_CMD_802_11_SCAN_EXT) { mwifiex_queue_scan_cmd(priv, cmd_node); } else { mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true); queue_work(adapter->workqueue, &adapter->main_work); if (cmd_node->wait_q_enabled) ret = mwifiex_wait_queue_complete(adapter, cmd_node); } return ret; } /* * This function queues a command to the command pending queue. * * This in effect adds the command to the command list to be executed. * Exit PS command is handled specially, by placing it always to the * front of the command queue. */ void mwifiex_insert_cmd_to_pending_q(struct mwifiex_adapter *adapter, struct cmd_ctrl_node *cmd_node, u32 add_tail) { struct host_cmd_ds_command *host_cmd = NULL; u16 command; unsigned long flags; host_cmd = (struct host_cmd_ds_command *) (cmd_node->cmd_skb->data); if (!host_cmd) { mwifiex_dbg(adapter, ERROR, "QUEUE_CMD: host_cmd is NULL\n"); return; } command = le16_to_cpu(host_cmd->command); /* Exit_PS command needs to be queued in the header always. */ if (command == HostCmd_CMD_802_11_PS_MODE_ENH) { struct host_cmd_ds_802_11_ps_mode_enh *pm = &host_cmd->params.psmode_enh; if ((le16_to_cpu(pm->action) == DIS_PS) || (le16_to_cpu(pm->action) == DIS_AUTO_PS)) { if (adapter->ps_state != PS_STATE_AWAKE) add_tail = false; } } spin_lock_irqsave(&adapter->cmd_pending_q_lock, flags); if (add_tail) list_add_tail(&cmd_node->list, &adapter->cmd_pending_q); else list_add(&cmd_node->list, &adapter->cmd_pending_q); spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags); atomic_inc(&adapter->cmd_pending); mwifiex_dbg(adapter, CMD, "cmd: QUEUE_CMD: cmd=%#x, cmd_pending=%d\n", command, atomic_read(&adapter->cmd_pending)); } /* * This function executes the next command in command pending queue. * * This function will fail if a command is already in processing stage, * otherwise it will dequeue the first command from the command pending * queue and send to the firmware. * * If the device is currently in host sleep mode, any commands, except the * host sleep configuration command will de-activate the host sleep. For PS * mode, the function will put the firmware back to sleep if applicable. */ int mwifiex_exec_next_cmd(struct mwifiex_adapter *adapter) { struct mwifiex_private *priv; struct cmd_ctrl_node *cmd_node; int ret = 0; struct host_cmd_ds_command *host_cmd; unsigned long cmd_flags; unsigned long cmd_pending_q_flags; /* Check if already in processing */ if (adapter->curr_cmd) { mwifiex_dbg(adapter, FATAL, "EXEC_NEXT_CMD: cmd in processing\n"); return -1; } spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags); /* Check if any command is pending */ spin_lock_irqsave(&adapter->cmd_pending_q_lock, cmd_pending_q_flags); if (list_empty(&adapter->cmd_pending_q)) { spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, cmd_pending_q_flags); spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags); return 0; } cmd_node = list_first_entry(&adapter->cmd_pending_q, struct cmd_ctrl_node, list); host_cmd = (struct host_cmd_ds_command *) (cmd_node->cmd_skb->data); priv = cmd_node->priv; if (adapter->ps_state != PS_STATE_AWAKE) { mwifiex_dbg(adapter, ERROR, "%s: cannot send cmd in sleep state,\t" "this should not happen\n", __func__); spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, cmd_pending_q_flags); spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags); return ret; } list_del(&cmd_node->list); spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, cmd_pending_q_flags); spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags); ret = mwifiex_dnld_cmd_to_fw(priv, cmd_node); priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); /* Any command sent to the firmware when host is in sleep * mode should de-configure host sleep. We should skip the * host sleep configuration command itself though */ if (priv && (host_cmd->command != cpu_to_le16(HostCmd_CMD_802_11_HS_CFG_ENH))) { if (adapter->hs_activated) { adapter->is_hs_configured = false; mwifiex_hs_activated_event(priv, false); } } return ret; } /* * This function handles the command response. * * After processing, the function cleans the command node and puts * it back to the command free queue. */ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter) { struct host_cmd_ds_command *resp; struct mwifiex_private *priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); int ret = 0; uint16_t orig_cmdresp_no; uint16_t cmdresp_no; uint16_t cmdresp_result; unsigned long flags; /* Now we got response from FW, cancel the command timer */ del_timer_sync(&adapter->cmd_timer); if (!adapter->curr_cmd || !adapter->curr_cmd->resp_skb) { resp = (struct host_cmd_ds_command *) adapter->upld_buf; mwifiex_dbg(adapter, ERROR, "CMD_RESP: NULL curr_cmd, %#x\n", le16_to_cpu(resp->command)); return -1; } adapter->is_cmd_timedout = 0; resp = (struct host_cmd_ds_command *) adapter->curr_cmd->resp_skb->data; if (adapter->curr_cmd->cmd_flag & CMD_F_HOSTCMD) { /* Copy original response back to response buffer */ struct mwifiex_ds_misc_cmd *hostcmd; uint16_t size = le16_to_cpu(resp->size); mwifiex_dbg(adapter, INFO, "info: host cmd resp size = %d\n", size); size = min_t(u16, size, MWIFIEX_SIZE_OF_CMD_BUFFER); if (adapter->curr_cmd->data_buf) { hostcmd = adapter->curr_cmd->data_buf; hostcmd->len = size; memcpy(hostcmd->cmd, resp, size); } } orig_cmdresp_no = le16_to_cpu(resp->command); /* Get BSS number and corresponding priv */ priv = mwifiex_get_priv_by_id(adapter, HostCmd_GET_BSS_NO(le16_to_cpu(resp->seq_num)), HostCmd_GET_BSS_TYPE(le16_to_cpu(resp->seq_num))); if (!priv) priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); /* Clear RET_BIT from HostCmd */ resp->command = cpu_to_le16(orig_cmdresp_no & HostCmd_CMD_ID_MASK); cmdresp_no = le16_to_cpu(resp->command); cmdresp_result = le16_to_cpu(resp->result); /* Save the last command response to debug log */ adapter->dbg.last_cmd_resp_index = (adapter->dbg.last_cmd_resp_index + 1) % DBG_CMD_NUM; adapter->dbg.last_cmd_resp_id[adapter->dbg.last_cmd_resp_index] = orig_cmdresp_no; mwifiex_dbg(adapter, CMD, "cmd: CMD_RESP: 0x%x, result %d, len %d, seqno 0x%x\n", orig_cmdresp_no, cmdresp_result, le16_to_cpu(resp->size), le16_to_cpu(resp->seq_num)); mwifiex_dbg_dump(adapter, CMD_D, "CMD_RESP buffer:", resp, le16_to_cpu(resp->size)); if (!(orig_cmdresp_no & HostCmd_RET_BIT)) { mwifiex_dbg(adapter, ERROR, "CMD_RESP: invalid cmd resp\n"); if (adapter->curr_cmd->wait_q_enabled) adapter->cmd_wait_q.status = -1; mwifiex_recycle_cmd_node(adapter, adapter->curr_cmd); spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags); adapter->curr_cmd = NULL; spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags); return -1; } if (adapter->curr_cmd->cmd_flag & CMD_F_HOSTCMD) { adapter->curr_cmd->cmd_flag &= ~CMD_F_HOSTCMD; if ((cmdresp_result == HostCmd_RESULT_OK) && (cmdresp_no == HostCmd_CMD_802_11_HS_CFG_ENH)) ret = mwifiex_ret_802_11_hs_cfg(priv, resp); } else { /* handle response */ ret = mwifiex_process_sta_cmdresp(priv, cmdresp_no, resp); } /* Check init command response */ if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING) { if (ret) { mwifiex_dbg(adapter, ERROR, "%s: cmd %#x failed during\t" "initialization\n", __func__, cmdresp_no); mwifiex_init_fw_complete(adapter); return -1; } else if (adapter->last_init_cmd == cmdresp_no) adapter->hw_status = MWIFIEX_HW_STATUS_INIT_DONE; } if (adapter->curr_cmd) { if (adapter->curr_cmd->wait_q_enabled) adapter->cmd_wait_q.status = ret; mwifiex_recycle_cmd_node(adapter, adapter->curr_cmd); spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags); adapter->curr_cmd = NULL; spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags); } return ret; } /* * This function handles the timeout of command sending. * * It will re-send the same command again. */ void mwifiex_cmd_timeout_func(unsigned long function_context) { struct mwifiex_adapter *adapter = (struct mwifiex_adapter *) function_context; struct cmd_ctrl_node *cmd_node; adapter->is_cmd_timedout = 1; if (!adapter->curr_cmd) { mwifiex_dbg(adapter, ERROR, "cmd: empty curr_cmd\n"); return; } cmd_node = adapter->curr_cmd; if (cmd_node) { adapter->dbg.timeout_cmd_id = adapter->dbg.last_cmd_id[adapter->dbg.last_cmd_index]; adapter->dbg.timeout_cmd_act = adapter->dbg.last_cmd_act[adapter->dbg.last_cmd_index]; mwifiex_dbg(adapter, MSG, "%s: Timeout cmd id = %#x, act = %#x\n", __func__, adapter->dbg.timeout_cmd_id, adapter->dbg.timeout_cmd_act); mwifiex_dbg(adapter, MSG, "num_data_h2c_failure = %d\n", adapter->dbg.num_tx_host_to_card_failure); mwifiex_dbg(adapter, MSG, "num_cmd_h2c_failure = %d\n", adapter->dbg.num_cmd_host_to_card_failure); mwifiex_dbg(adapter, MSG, "is_cmd_timedout = %d\n", adapter->is_cmd_timedout); mwifiex_dbg(adapter, MSG, "num_tx_timeout = %d\n", adapter->dbg.num_tx_timeout); mwifiex_dbg(adapter, MSG, "last_cmd_index = %d\n", adapter->dbg.last_cmd_index); mwifiex_dbg(adapter, MSG, "last_cmd_id: %*ph\n", (int)sizeof(adapter->dbg.last_cmd_id), adapter->dbg.last_cmd_id); mwifiex_dbg(adapter, MSG, "last_cmd_act: %*ph\n", (int)sizeof(adapter->dbg.last_cmd_act), adapter->dbg.last_cmd_act); mwifiex_dbg(adapter, MSG, "last_cmd_resp_index = %d\n", adapter->dbg.last_cmd_resp_index); mwifiex_dbg(adapter, MSG, "last_cmd_resp_id: %*ph\n", (int)sizeof(adapter->dbg.last_cmd_resp_id), adapter->dbg.last_cmd_resp_id); mwifiex_dbg(adapter, MSG, "last_event_index = %d\n", adapter->dbg.last_event_index); mwifiex_dbg(adapter, MSG, "last_event: %*ph\n", (int)sizeof(adapter->dbg.last_event), adapter->dbg.last_event); mwifiex_dbg(adapter, MSG, "data_sent=%d cmd_sent=%d\n", adapter->data_sent, adapter->cmd_sent); mwifiex_dbg(adapter, MSG, "ps_mode=%d ps_state=%d\n", adapter->ps_mode, adapter->ps_state); if (cmd_node->wait_q_enabled) { adapter->cmd_wait_q.status = -ETIMEDOUT; mwifiex_cancel_pending_ioctl(adapter); } } if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING) { mwifiex_init_fw_complete(adapter); return; } if (adapter->if_ops.device_dump) adapter->if_ops.device_dump(adapter); if (adapter->if_ops.card_reset) adapter->if_ops.card_reset(adapter); } void mwifiex_cancel_pending_scan_cmd(struct mwifiex_adapter *adapter) { struct cmd_ctrl_node *cmd_node = NULL, *tmp_node; unsigned long flags; /* Cancel all pending scan command */ spin_lock_irqsave(&adapter->scan_pending_q_lock, flags); list_for_each_entry_safe(cmd_node, tmp_node, &adapter->scan_pending_q, list) { list_del(&cmd_node->list); cmd_node->wait_q_enabled = false; mwifiex_insert_cmd_to_free_q(adapter, cmd_node); } spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags); } /* * This function cancels all the pending commands. * * The current command, all commands in command pending queue and all scan * commands in scan pending queue are cancelled. All the completion callbacks * are called with failure status to ensure cleanup. */ void mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter) { struct cmd_ctrl_node *cmd_node = NULL, *tmp_node; unsigned long flags, cmd_flags; spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags); /* Cancel current cmd */ if ((adapter->curr_cmd) && (adapter->curr_cmd->wait_q_enabled)) { adapter->cmd_wait_q.status = -1; mwifiex_complete_cmd(adapter, adapter->curr_cmd); adapter->curr_cmd->wait_q_enabled = false; /* no recycle probably wait for response */ } /* Cancel all pending command */ spin_lock_irqsave(&adapter->cmd_pending_q_lock, flags); list_for_each_entry_safe(cmd_node, tmp_node, &adapter->cmd_pending_q, list) { list_del(&cmd_node->list); if (cmd_node->wait_q_enabled) adapter->cmd_wait_q.status = -1; mwifiex_recycle_cmd_node(adapter, cmd_node); } spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags); spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags); mwifiex_cancel_scan(adapter); } /* * This function cancels all pending commands that matches with * the given IOCTL request. * * Both the current command buffer and the pending command queue are * searched for matching IOCTL request. The completion callback of * the matched command is called with failure status to ensure cleanup. * In case of scan commands, all pending commands in scan pending queue * are cancelled. */ void mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter) { struct cmd_ctrl_node *cmd_node = NULL; unsigned long cmd_flags; if ((adapter->curr_cmd) && (adapter->curr_cmd->wait_q_enabled)) { spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags); cmd_node = adapter->curr_cmd; /* setting curr_cmd to NULL is quite dangerous, because * mwifiex_process_cmdresp checks curr_cmd to be != NULL * at the beginning then relies on it and dereferences * it at will * this probably works since mwifiex_cmd_timeout_func * is the only caller of this function and responses * at that point */ adapter->curr_cmd = NULL; spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags); mwifiex_recycle_cmd_node(adapter, cmd_node); } mwifiex_cancel_scan(adapter); } /* * This function sends the sleep confirm command to firmware, if * possible. * * The sleep confirm command cannot be issued if command response, * data response or event response is awaiting handling, or if we * are in the middle of sending a command, or expecting a command * response. */ void mwifiex_check_ps_cond(struct mwifiex_adapter *adapter) { if (!adapter->cmd_sent && !atomic_read(&adapter->tx_hw_pending) && !adapter->curr_cmd && !IS_CARD_RX_RCVD(adapter)) mwifiex_dnld_sleep_confirm_cmd(adapter); else mwifiex_dbg(adapter, CMD, "cmd: Delay Sleep Confirm (%s%s%s%s)\n", (adapter->cmd_sent) ? "D" : "", atomic_read(&adapter->tx_hw_pending) ? "T" : "", (adapter->curr_cmd) ? "C" : "", (IS_CARD_RX_RCVD(adapter)) ? "R" : ""); } /* * This function sends a Host Sleep activated event to applications. * * This event is generated by the driver, with a blank event body. */ void mwifiex_hs_activated_event(struct mwifiex_private *priv, u8 activated) { if (activated) { if (priv->adapter->is_hs_configured) { priv->adapter->hs_activated = true; mwifiex_update_rxreor_flags(priv->adapter, RXREOR_FORCE_NO_DROP); mwifiex_dbg(priv->adapter, EVENT, "event: hs_activated\n"); priv->adapter->hs_activate_wait_q_woken = true; wake_up_interruptible( &priv->adapter->hs_activate_wait_q); } else { mwifiex_dbg(priv->adapter, EVENT, "event: HS not configured\n"); } } else { mwifiex_dbg(priv->adapter, EVENT, "event: hs_deactivated\n"); priv->adapter->hs_activated = false; } } /* * This function handles the command response of a Host Sleep configuration * command. * * Handling includes changing the header fields into CPU format * and setting the current host sleep activation status in driver. * * In case host sleep status change, the function generates an event to * notify the applications. */ int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv, struct host_cmd_ds_command *resp) { struct mwifiex_adapter *adapter = priv->adapter; struct host_cmd_ds_802_11_hs_cfg_enh *phs_cfg = &resp->params.opt_hs_cfg; uint32_t conditions = le32_to_cpu(phs_cfg->params.hs_config.conditions); if (phs_cfg->action == cpu_to_le16(HS_ACTIVATE) && adapter->iface_type != MWIFIEX_USB) { mwifiex_hs_activated_event(priv, true); return 0; } else { mwifiex_dbg(adapter, CMD, "cmd: CMD_RESP: HS_CFG cmd reply\t" " result=%#x, conditions=0x%x gpio=0x%x gap=0x%x\n", resp->result, conditions, phs_cfg->params.hs_config.gpio, phs_cfg->params.hs_config.gap); } if (conditions != HS_CFG_CANCEL) { adapter->is_hs_configured = true; if (adapter->iface_type == MWIFIEX_USB) mwifiex_hs_activated_event(priv, true); } else { adapter->is_hs_configured = false; if (adapter->hs_activated) mwifiex_hs_activated_event(priv, false); } return 0; } /* * This function wakes up the adapter and generates a Host Sleep * cancel event on receiving the power up interrupt. */ void mwifiex_process_hs_config(struct mwifiex_adapter *adapter) { mwifiex_dbg(adapter, INFO, "info: %s: auto cancelling host sleep\t" "since there is interrupt from the firmware\n", __func__); adapter->if_ops.wakeup(adapter); adapter->hs_activated = false; adapter->is_hs_configured = false; adapter->is_suspended = false; mwifiex_hs_activated_event(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY), false); } EXPORT_SYMBOL_GPL(mwifiex_process_hs_config); /* * This function handles the command response of a sleep confirm command. * * The function sets the card state to SLEEP if the response indicates success. */ void mwifiex_process_sleep_confirm_resp(struct mwifiex_adapter *adapter, u8 *pbuf, u32 upld_len) { struct host_cmd_ds_command *cmd = (struct host_cmd_ds_command *) pbuf; struct mwifiex_private *priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); uint16_t result = le16_to_cpu(cmd->result); uint16_t command = le16_to_cpu(cmd->command); uint16_t seq_num = le16_to_cpu(cmd->seq_num); if (!upld_len) { mwifiex_dbg(adapter, ERROR, "%s: cmd size is 0\n", __func__); return; } mwifiex_dbg(adapter, CMD, "cmd: CMD_RESP: 0x%x, result %d, len %d, seqno 0x%x\n", command, result, le16_to_cpu(cmd->size), seq_num); /* Get BSS number and corresponding priv */ priv = mwifiex_get_priv_by_id(adapter, HostCmd_GET_BSS_NO(seq_num), HostCmd_GET_BSS_TYPE(seq_num)); if (!priv) priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); /* Update sequence number */ seq_num = HostCmd_GET_SEQ_NO(seq_num); /* Clear RET_BIT from HostCmd */ command &= HostCmd_CMD_ID_MASK; if (command != HostCmd_CMD_802_11_PS_MODE_ENH) { mwifiex_dbg(adapter, ERROR, "%s: rcvd unexpected resp for cmd %#x, result = %x\n", __func__, command, result); return; } if (result) { mwifiex_dbg(adapter, ERROR, "%s: sleep confirm cmd failed\n", __func__); adapter->pm_wakeup_card_req = false; adapter->ps_state = PS_STATE_AWAKE; return; } adapter->pm_wakeup_card_req = true; if (adapter->is_hs_configured) mwifiex_hs_activated_event(mwifiex_get_priv (adapter, MWIFIEX_BSS_ROLE_ANY), true); adapter->ps_state = PS_STATE_SLEEP; cmd->command = cpu_to_le16(command); cmd->seq_num = cpu_to_le16(seq_num); } EXPORT_SYMBOL_GPL(mwifiex_process_sleep_confirm_resp); /* * This function prepares an enhanced power mode command. * * This function can be used to disable power save or to configure * power save with auto PS or STA PS or auto deep sleep. * * Preparation includes - * - Setting command ID, action and proper size * - Setting Power Save bitmap, PS parameters TLV, PS mode TLV, * auto deep sleep TLV (as required) * - Ensuring correct endian-ness */ int mwifiex_cmd_enh_power_mode(struct mwifiex_private *priv, struct host_cmd_ds_command *cmd, u16 cmd_action, uint16_t ps_bitmap, struct mwifiex_ds_auto_ds *auto_ds) { struct host_cmd_ds_802_11_ps_mode_enh *psmode_enh = &cmd->params.psmode_enh; u8 *tlv; u16 cmd_size = 0; cmd->command = cpu_to_le16(HostCmd_CMD_802_11_PS_MODE_ENH); if (cmd_action == DIS_AUTO_PS) { psmode_enh->action = cpu_to_le16(DIS_AUTO_PS); psmode_enh->params.ps_bitmap = cpu_to_le16(ps_bitmap); cmd->size = cpu_to_le16(S_DS_GEN + sizeof(psmode_enh->action) + sizeof(psmode_enh->params.ps_bitmap)); } else if (cmd_action == GET_PS) { psmode_enh->action = cpu_to_le16(GET_PS); psmode_enh->params.ps_bitmap = cpu_to_le16(ps_bitmap); cmd->size = cpu_to_le16(S_DS_GEN + sizeof(psmode_enh->action) + sizeof(psmode_enh->params.ps_bitmap)); } else if (cmd_action == EN_AUTO_PS) { psmode_enh->action = cpu_to_le16(EN_AUTO_PS); psmode_enh->params.ps_bitmap = cpu_to_le16(ps_bitmap); cmd_size = S_DS_GEN + sizeof(psmode_enh->action) + sizeof(psmode_enh->params.ps_bitmap); tlv = (u8 *) cmd + cmd_size; if (ps_bitmap & BITMAP_STA_PS) { struct mwifiex_adapter *adapter = priv->adapter; struct mwifiex_ie_types_ps_param *ps_tlv = (struct mwifiex_ie_types_ps_param *) tlv; struct mwifiex_ps_param *ps_mode = &ps_tlv->param; ps_tlv->header.type = cpu_to_le16(TLV_TYPE_PS_PARAM); ps_tlv->header.len = cpu_to_le16(sizeof(*ps_tlv) - sizeof(struct mwifiex_ie_types_header)); cmd_size += sizeof(*ps_tlv); tlv += sizeof(*ps_tlv); mwifiex_dbg(priv->adapter, CMD, "cmd: PS Command: Enter PS\n"); ps_mode->null_pkt_interval = cpu_to_le16(adapter->null_pkt_interval); ps_mode->multiple_dtims = cpu_to_le16(adapter->multiple_dtim); ps_mode->bcn_miss_timeout = cpu_to_le16(adapter->bcn_miss_time_out); ps_mode->local_listen_interval = cpu_to_le16(adapter->local_listen_interval); ps_mode->adhoc_wake_period = cpu_to_le16(adapter->adhoc_awake_period); ps_mode->delay_to_ps = cpu_to_le16(adapter->delay_to_ps); ps_mode->mode = cpu_to_le16(adapter->enhanced_ps_mode); } if (ps_bitmap & BITMAP_AUTO_DS) { struct mwifiex_ie_types_auto_ds_param *auto_ds_tlv = (struct mwifiex_ie_types_auto_ds_param *) tlv; u16 idletime = 0; auto_ds_tlv->header.type = cpu_to_le16(TLV_TYPE_AUTO_DS_PARAM); auto_ds_tlv->header.len = cpu_to_le16(sizeof(*auto_ds_tlv) - sizeof(struct mwifiex_ie_types_header)); cmd_size += sizeof(*auto_ds_tlv); tlv += sizeof(*auto_ds_tlv); if (auto_ds) idletime = auto_ds->idle_time; mwifiex_dbg(priv->adapter, CMD, "cmd: PS Command: Enter Auto Deep Sleep\n"); auto_ds_tlv->deep_sleep_timeout = cpu_to_le16(idletime); } cmd->size = cpu_to_le16(cmd_size); } return 0; } /* * This function handles the command response of an enhanced power mode * command. * * Handling includes changing the header fields into CPU format * and setting the current enhanced power mode in driver. */ int mwifiex_ret_enh_power_mode(struct mwifiex_private *priv, struct host_cmd_ds_command *resp, struct mwifiex_ds_pm_cfg *pm_cfg) { struct mwifiex_adapter *adapter = priv->adapter; struct host_cmd_ds_802_11_ps_mode_enh *ps_mode = &resp->params.psmode_enh; uint16_t action = le16_to_cpu(ps_mode->action); uint16_t ps_bitmap = le16_to_cpu(ps_mode->params.ps_bitmap); uint16_t auto_ps_bitmap = le16_to_cpu(ps_mode->params.ps_bitmap); mwifiex_dbg(adapter, INFO, "info: %s: PS_MODE cmd reply result=%#x action=%#X\n", __func__, resp->result, action); if (action == EN_AUTO_PS) { if (auto_ps_bitmap & BITMAP_AUTO_DS) { mwifiex_dbg(adapter, CMD, "cmd: Enabled auto deep sleep\n"); priv->adapter->is_deep_sleep = true; } if (auto_ps_bitmap & BITMAP_STA_PS) { mwifiex_dbg(adapter, CMD, "cmd: Enabled STA power save\n"); if (adapter->sleep_period.period) mwifiex_dbg(adapter, CMD, "cmd: set to uapsd/pps mode\n"); } } else if (action == DIS_AUTO_PS) { if (ps_bitmap & BITMAP_AUTO_DS) { priv->adapter->is_deep_sleep = false; mwifiex_dbg(adapter, CMD, "cmd: Disabled auto deep sleep\n"); } if (ps_bitmap & BITMAP_STA_PS) { mwifiex_dbg(adapter, CMD, "cmd: Disabled STA power save\n"); if (adapter->sleep_period.period) { adapter->delay_null_pkt = false; adapter->tx_lock_flag = false; adapter->pps_uapsd_mode = false; } } } else if (action == GET_PS) { if (ps_bitmap & BITMAP_STA_PS) adapter->ps_mode = MWIFIEX_802_11_POWER_MODE_PSP; else adapter->ps_mode = MWIFIEX_802_11_POWER_MODE_CAM; mwifiex_dbg(adapter, CMD, "cmd: ps_bitmap=%#x\n", ps_bitmap); if (pm_cfg) { /* This section is for get power save mode */ if (ps_bitmap & BITMAP_STA_PS) pm_cfg->param.ps_mode = 1; else pm_cfg->param.ps_mode = 0; } } return 0; } /* * This function prepares command to get hardware specifications. * * Preparation includes - * - Setting command ID, action and proper size * - Setting permanent address parameter * - Ensuring correct endian-ness */ int mwifiex_cmd_get_hw_spec(struct mwifiex_private *priv, struct host_cmd_ds_command *cmd) { struct host_cmd_ds_get_hw_spec *hw_spec = &cmd->params.hw_spec; cmd->command = cpu_to_le16(HostCmd_CMD_GET_HW_SPEC); cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_get_hw_spec) + S_DS_GEN); memcpy(hw_spec->permanent_addr, priv->curr_addr, ETH_ALEN); return 0; } /* * This function handles the command response of get hardware * specifications. * * Handling includes changing the header fields into CPU format * and saving/updating the following parameters in driver - * - Firmware capability information * - Firmware band settings * - Ad-hoc start band and channel * - Ad-hoc 11n activation status * - Firmware release number * - Number of antennas * - Hardware address * - Hardware interface version * - Firmware version * - Region code * - 11n capabilities * - MCS support fields * - MP end port */ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv, struct host_cmd_ds_command *resp) { struct host_cmd_ds_get_hw_spec *hw_spec = &resp->params.hw_spec; struct mwifiex_adapter *adapter = priv->adapter; struct mwifiex_ie_types_header *tlv; struct hw_spec_api_rev *api_rev; u16 resp_size, api_id; int i, left_len, parsed_len = 0; adapter->fw_cap_info = le32_to_cpu(hw_spec->fw_cap_info); if (IS_SUPPORT_MULTI_BANDS(adapter)) adapter->fw_bands = (u8) GET_FW_DEFAULT_BANDS(adapter); else adapter->fw_bands = BAND_B; adapter->config_bands = adapter->fw_bands; if (adapter->fw_bands & BAND_A) { if (adapter->fw_bands & BAND_GN) { adapter->config_bands |= BAND_AN; adapter->fw_bands |= BAND_AN; } if (adapter->fw_bands & BAND_AN) { adapter->adhoc_start_band = BAND_A | BAND_AN; adapter->adhoc_11n_enabled = true; } else { adapter->adhoc_start_band = BAND_A; } priv->adhoc_channel = DEFAULT_AD_HOC_CHANNEL_A; } else if (adapter->fw_bands & BAND_GN) { adapter->adhoc_start_band = BAND_G | BAND_B | BAND_GN; priv->adhoc_channel = DEFAULT_AD_HOC_CHANNEL; adapter->adhoc_11n_enabled = true; } else if (adapter->fw_bands & BAND_G) { adapter->adhoc_start_band = BAND_G | BAND_B; priv->adhoc_channel = DEFAULT_AD_HOC_CHANNEL; } else if (adapter->fw_bands & BAND_B) { adapter->adhoc_start_band = BAND_B; priv->adhoc_channel = DEFAULT_AD_HOC_CHANNEL; } adapter->fw_release_number = le32_to_cpu(hw_spec->fw_release_number); adapter->fw_api_ver = (adapter->fw_release_number >> 16) & 0xff; adapter->number_of_antenna = le16_to_cpu(hw_spec->number_of_antenna); if (le32_to_cpu(hw_spec->dot_11ac_dev_cap)) { adapter->is_hw_11ac_capable = true; /* Copy 11AC cap */ adapter->hw_dot_11ac_dev_cap = le32_to_cpu(hw_spec->dot_11ac_dev_cap); adapter->usr_dot_11ac_dev_cap_bg = adapter->hw_dot_11ac_dev_cap & ~MWIFIEX_DEF_11AC_CAP_BF_RESET_MASK; adapter->usr_dot_11ac_dev_cap_a = adapter->hw_dot_11ac_dev_cap & ~MWIFIEX_DEF_11AC_CAP_BF_RESET_MASK; /* Copy 11AC mcs */ adapter->hw_dot_11ac_mcs_support = le32_to_cpu(hw_spec->dot_11ac_mcs_support); adapter->usr_dot_11ac_mcs_support = adapter->hw_dot_11ac_mcs_support; } else { adapter->is_hw_11ac_capable = false; } resp_size = le16_to_cpu(resp->size) - S_DS_GEN; if (resp_size > sizeof(struct host_cmd_ds_get_hw_spec)) { /* we have variable HW SPEC information */ left_len = resp_size - sizeof(struct host_cmd_ds_get_hw_spec); while (left_len > sizeof(struct mwifiex_ie_types_header)) { tlv = (void *)&hw_spec->tlvs + parsed_len; switch (le16_to_cpu(tlv->type)) { case TLV_TYPE_API_REV: api_rev = (struct hw_spec_api_rev *)tlv; api_id = le16_to_cpu(api_rev->api_id); switch (api_id) { case KEY_API_VER_ID: adapter->key_api_major_ver = api_rev->major_ver; adapter->key_api_minor_ver = api_rev->minor_ver; mwifiex_dbg(adapter, INFO, "key_api v%d.%d\n", adapter->key_api_major_ver, adapter->key_api_minor_ver); break; case FW_API_VER_ID: adapter->fw_api_ver = api_rev->major_ver; mwifiex_dbg(adapter, INFO, "Firmware api version %d\n", adapter->fw_api_ver); break; default: mwifiex_dbg(adapter, FATAL, "Unknown api_id: %d\n", api_id); break; } break; default: mwifiex_dbg(adapter, FATAL, "Unknown GET_HW_SPEC TLV type: %#x\n", le16_to_cpu(tlv->type)); break; } parsed_len += le16_to_cpu(tlv->len) + sizeof(struct mwifiex_ie_types_header); left_len -= le16_to_cpu(tlv->len) + sizeof(struct mwifiex_ie_types_header); } } mwifiex_dbg(adapter, INFO, "info: GET_HW_SPEC: fw_release_number- %#x\n", adapter->fw_release_number); mwifiex_dbg(adapter, INFO, "info: GET_HW_SPEC: permanent addr: %pM\n", hw_spec->permanent_addr); mwifiex_dbg(adapter, INFO, "info: GET_HW_SPEC: hw_if_version=%#x version=%#x\n", le16_to_cpu(hw_spec->hw_if_version), le16_to_cpu(hw_spec->version)); ether_addr_copy(priv->adapter->perm_addr, hw_spec->permanent_addr); adapter->region_code = le16_to_cpu(hw_spec->region_code); for (i = 0; i < MWIFIEX_MAX_REGION_CODE; i++) /* Use the region code to search for the index */ if (adapter->region_code == region_code_index[i]) break; /* If it's unidentified region code, use the default (world) */ if (i >= MWIFIEX_MAX_REGION_CODE) { adapter->region_code = 0x00; mwifiex_dbg(adapter, WARN, "cmd: unknown region code, use default (USA)\n"); } adapter->hw_dot_11n_dev_cap = le32_to_cpu(hw_spec->dot_11n_dev_cap); adapter->hw_dev_mcs_support = hw_spec->dev_mcs_support; adapter->user_dev_mcs_support = adapter->hw_dev_mcs_support; if (adapter->if_ops.update_mp_end_port) adapter->if_ops.update_mp_end_port(adapter, le16_to_cpu(hw_spec->mp_end_port)); if (adapter->fw_api_ver == MWIFIEX_FW_V15) adapter->scan_chan_gap_enabled = true; return 0; } /* This function handles the command response of hs wakeup reason * command. */ int mwifiex_ret_wakeup_reason(struct mwifiex_private *priv, struct host_cmd_ds_command *resp, struct host_cmd_ds_wakeup_reason *wakeup_reason) { wakeup_reason->wakeup_reason = resp->params.hs_wakeup_reason.wakeup_reason; return 0; }
gpl-2.0
garwedgess/LuPuS-STOCK-ICS-Xperia2011
drivers/media/video/saa5246a.c
541
30399
/* * Driver for the SAA5246A or SAA5281 Teletext (=Videotext) decoder chips from * Philips. * * Only capturing of Teletext pages is tested. The videotext chips also have a * TV output but my hardware doesn't use it. For this reason this driver does * not support changing any TV display settings. * * Copyright (C) 2004 Michael Geng <linux@MichaelGeng.de> * * Derived from * * saa5249 driver * Copyright (C) 1998 Richard Guenther * <richard.guenther@student.uni-tuebingen.de> * * with changes by * Alan Cox <alan@lxorguk.ukuu.org.uk> * * and * * vtx.c * Copyright (C) 1994-97 Martin Buck <martin-2.buck@student.uni-ulm.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, * USA. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/init.h> #include <linux/i2c.h> #include <linux/mutex.h> #include <linux/videotext.h> #include <linux/videodev2.h> #include <media/v4l2-device.h> #include <media/v4l2-chip-ident.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-i2c-drv.h> MODULE_AUTHOR("Michael Geng <linux@MichaelGeng.de>"); MODULE_DESCRIPTION("Philips SAA5246A, SAA5281 Teletext decoder driver"); MODULE_LICENSE("GPL"); #define MAJOR_VERSION 1 /* driver major version number */ #define MINOR_VERSION 8 /* driver minor version number */ /* Number of DAUs = number of pages that can be searched at the same time. */ #define NUM_DAUS 4 #define NUM_ROWS_PER_PAGE 40 /* first column is 0 (not 1) */ #define POS_TIME_START 32 #define POS_TIME_END 39 #define POS_HEADER_START 7 #define POS_HEADER_END 31 /* Returns 'true' if the part of the videotext page described with req contains (at least parts of) the time field */ #define REQ_CONTAINS_TIME(p_req) \ ((p_req)->start <= POS_TIME_END && \ (p_req)->end >= POS_TIME_START) /* Returns 'true' if the part of the videotext page described with req contains (at least parts of) the page header */ #define REQ_CONTAINS_HEADER(p_req) \ ((p_req)->start <= POS_HEADER_END && \ (p_req)->end >= POS_HEADER_START) /*****************************************************************************/ /* Mode register numbers of the SAA5246A */ /*****************************************************************************/ #define SAA5246A_REGISTER_R0 0 #define SAA5246A_REGISTER_R1 1 #define SAA5246A_REGISTER_R2 2 #define SAA5246A_REGISTER_R3 3 #define SAA5246A_REGISTER_R4 4 #define SAA5246A_REGISTER_R5 5 #define SAA5246A_REGISTER_R6 6 #define SAA5246A_REGISTER_R7 7 #define SAA5246A_REGISTER_R8 8 #define SAA5246A_REGISTER_R9 9 #define SAA5246A_REGISTER_R10 10 #define SAA5246A_REGISTER_R11 11 #define SAA5246A_REGISTER_R11B 11 /* SAA5246A mode registers often autoincrement to the next register. Therefore we use variable argument lists. The following macro indicates the end of a command list. */ #define COMMAND_END (-1) /*****************************************************************************/ /* Contents of the mode registers of the SAA5246A */ /*****************************************************************************/ /* Register R0 (Advanced Control) */ #define R0_SELECT_R11 0x00 #define R0_SELECT_R11B 0x01 #define R0_PLL_TIME_CONSTANT_LONG 0x00 #define R0_PLL_TIME_CONSTANT_SHORT 0x02 #define R0_ENABLE_nODD_EVEN_OUTPUT 0x00 #define R0_DISABLE_nODD_EVEN_OUTPUT 0x04 #define R0_ENABLE_HDR_POLL 0x00 #define R0_DISABLE_HDR_POLL 0x10 #define R0_DO_NOT_FORCE_nODD_EVEN_LOW_IF_PICTURE_DISPLAYED 0x00 #define R0_FORCE_nODD_EVEN_LOW_IF_PICTURE_DISPLAYED 0x20 #define R0_NO_FREE_RUN_PLL 0x00 #define R0_FREE_RUN_PLL 0x40 #define R0_NO_AUTOMATIC_FASTEXT_PROMPT 0x00 #define R0_AUTOMATIC_FASTEXT_PROMPT 0x80 /* Register R1 (Mode) */ #define R1_INTERLACED_312_AND_HALF_312_AND_HALF_LINES 0x00 #define R1_NON_INTERLACED_312_313_LINES 0x01 #define R1_NON_INTERLACED_312_312_LINES 0x02 #define R1_FFB_LEADING_EDGE_IN_FIRST_BROAD_PULSE 0x03 #define R1_FFB_LEADING_EDGE_IN_SECOND_BROAD_PULSE 0x07 #define R1_DEW 0x00 #define R1_FULL_FIELD 0x08 #define R1_EXTENDED_PACKET_DISABLE 0x00 #define R1_EXTENDED_PACKET_ENABLE 0x10 #define R1_DAUS_ALL_ON 0x00 #define R1_DAUS_ALL_OFF 0x20 #define R1_7_BITS_PLUS_PARITY 0x00 #define R1_8_BITS_NO_PARITY 0x40 #define R1_VCS_TO_SCS 0x00 #define R1_NO_VCS_TO_SCS 0x80 /* Register R2 (Page request address) */ #define R2_IN_R3_SELECT_PAGE_HUNDREDS 0x00 #define R2_IN_R3_SELECT_PAGE_TENS 0x01 #define R2_IN_R3_SELECT_PAGE_UNITS 0x02 #define R2_IN_R3_SELECT_HOURS_TENS 0x03 #define R2_IN_R3_SELECT_HOURS_UNITS 0x04 #define R2_IN_R3_SELECT_MINUTES_TENS 0x05 #define R2_IN_R3_SELECT_MINUTES_UNITS 0x06 #define R2_DAU_0 0x00 #define R2_DAU_1 0x10 #define R2_DAU_2 0x20 #define R2_DAU_3 0x30 #define R2_BANK_0 0x00 #define R2_BANK 1 0x40 #define R2_HAMMING_CHECK_ON 0x80 #define R2_HAMMING_CHECK_OFF 0x00 /* Register R3 (Page request data) */ #define R3_PAGE_HUNDREDS_0 0x00 #define R3_PAGE_HUNDREDS_1 0x01 #define R3_PAGE_HUNDREDS_2 0x02 #define R3_PAGE_HUNDREDS_3 0x03 #define R3_PAGE_HUNDREDS_4 0x04 #define R3_PAGE_HUNDREDS_5 0x05 #define R3_PAGE_HUNDREDS_6 0x06 #define R3_PAGE_HUNDREDS_7 0x07 #define R3_HOLD_PAGE 0x00 #define R3_UPDATE_PAGE 0x08 #define R3_PAGE_HUNDREDS_DO_NOT_CARE 0x00 #define R3_PAGE_HUNDREDS_DO_CARE 0x10 #define R3_PAGE_TENS_DO_NOT_CARE 0x00 #define R3_PAGE_TENS_DO_CARE 0x10 #define R3_PAGE_UNITS_DO_NOT_CARE 0x00 #define R3_PAGE_UNITS_DO_CARE 0x10 #define R3_HOURS_TENS_DO_NOT_CARE 0x00 #define R3_HOURS_TENS_DO_CARE 0x10 #define R3_HOURS_UNITS_DO_NOT_CARE 0x00 #define R3_HOURS_UNITS_DO_CARE 0x10 #define R3_MINUTES_TENS_DO_NOT_CARE 0x00 #define R3_MINUTES_TENS_DO_CARE 0x10 #define R3_MINUTES_UNITS_DO_NOT_CARE 0x00 #define R3_MINUTES_UNITS_DO_CARE 0x10 /* Register R4 (Display chapter) */ #define R4_DISPLAY_PAGE_0 0x00 #define R4_DISPLAY_PAGE_1 0x01 #define R4_DISPLAY_PAGE_2 0x02 #define R4_DISPLAY_PAGE_3 0x03 #define R4_DISPLAY_PAGE_4 0x04 #define R4_DISPLAY_PAGE_5 0x05 #define R4_DISPLAY_PAGE_6 0x06 #define R4_DISPLAY_PAGE_7 0x07 /* Register R5 (Normal display control) */ #define R5_PICTURE_INSIDE_BOXING_OFF 0x00 #define R5_PICTURE_INSIDE_BOXING_ON 0x01 #define R5_PICTURE_OUTSIDE_BOXING_OFF 0x00 #define R5_PICTURE_OUTSIDE_BOXING_ON 0x02 #define R5_TEXT_INSIDE_BOXING_OFF 0x00 #define R5_TEXT_INSIDE_BOXING_ON 0x04 #define R5_TEXT_OUTSIDE_BOXING_OFF 0x00 #define R5_TEXT_OUTSIDE_BOXING_ON 0x08 #define R5_CONTRAST_REDUCTION_INSIDE_BOXING_OFF 0x00 #define R5_CONTRAST_REDUCTION_INSIDE_BOXING_ON 0x10 #define R5_CONTRAST_REDUCTION_OUTSIDE_BOXING_OFF 0x00 #define R5_CONTRAST_REDUCTION_OUTSIDE_BOXING_ON 0x20 #define R5_BACKGROUND_COLOR_INSIDE_BOXING_OFF 0x00 #define R5_BACKGROUND_COLOR_INSIDE_BOXING_ON 0x40 #define R5_BACKGROUND_COLOR_OUTSIDE_BOXING_OFF 0x00 #define R5_BACKGROUND_COLOR_OUTSIDE_BOXING_ON 0x80 /* Register R6 (Newsflash display) */ #define R6_NEWSFLASH_PICTURE_INSIDE_BOXING_OFF 0x00 #define R6_NEWSFLASH_PICTURE_INSIDE_BOXING_ON 0x01 #define R6_NEWSFLASH_PICTURE_OUTSIDE_BOXING_OFF 0x00 #define R6_NEWSFLASH_PICTURE_OUTSIDE_BOXING_ON 0x02 #define R6_NEWSFLASH_TEXT_INSIDE_BOXING_OFF 0x00 #define R6_NEWSFLASH_TEXT_INSIDE_BOXING_ON 0x04 #define R6_NEWSFLASH_TEXT_OUTSIDE_BOXING_OFF 0x00 #define R6_NEWSFLASH_TEXT_OUTSIDE_BOXING_ON 0x08 #define R6_NEWSFLASH_CONTRAST_REDUCTION_INSIDE_BOXING_OFF 0x00 #define R6_NEWSFLASH_CONTRAST_REDUCTION_INSIDE_BOXING_ON 0x10 #define R6_NEWSFLASH_CONTRAST_REDUCTION_OUTSIDE_BOXING_OFF 0x00 #define R6_NEWSFLASH_CONTRAST_REDUCTION_OUTSIDE_BOXING_ON 0x20 #define R6_NEWSFLASH_BACKGROUND_COLOR_INSIDE_BOXING_OFF 0x00 #define R6_NEWSFLASH_BACKGROUND_COLOR_INSIDE_BOXING_ON 0x40 #define R6_NEWSFLASH_BACKGROUND_COLOR_OUTSIDE_BOXING_OFF 0x00 #define R6_NEWSFLASH_BACKGROUND_COLOR_OUTSIDE_BOXING_ON 0x80 /* Register R7 (Display mode) */ #define R7_BOX_OFF_ROW_0 0x00 #define R7_BOX_ON_ROW_0 0x01 #define R7_BOX_OFF_ROW_1_TO_23 0x00 #define R7_BOX_ON_ROW_1_TO_23 0x02 #define R7_BOX_OFF_ROW_24 0x00 #define R7_BOX_ON_ROW_24 0x04 #define R7_SINGLE_HEIGHT 0x00 #define R7_DOUBLE_HEIGHT 0x08 #define R7_TOP_HALF 0x00 #define R7_BOTTOM_HALF 0x10 #define R7_REVEAL_OFF 0x00 #define R7_REVEAL_ON 0x20 #define R7_CURSER_OFF 0x00 #define R7_CURSER_ON 0x40 #define R7_STATUS_BOTTOM 0x00 #define R7_STATUS_TOP 0x80 /* Register R8 (Active chapter) */ #define R8_ACTIVE_CHAPTER_0 0x00 #define R8_ACTIVE_CHAPTER_1 0x01 #define R8_ACTIVE_CHAPTER_2 0x02 #define R8_ACTIVE_CHAPTER_3 0x03 #define R8_ACTIVE_CHAPTER_4 0x04 #define R8_ACTIVE_CHAPTER_5 0x05 #define R8_ACTIVE_CHAPTER_6 0x06 #define R8_ACTIVE_CHAPTER_7 0x07 #define R8_CLEAR_MEMORY 0x08 #define R8_DO_NOT_CLEAR_MEMORY 0x00 /* Register R9 (Curser row) */ #define R9_CURSER_ROW_0 0x00 #define R9_CURSER_ROW_1 0x01 #define R9_CURSER_ROW_2 0x02 #define R9_CURSER_ROW_25 0x19 /* Register R10 (Curser column) */ #define R10_CURSER_COLUMN_0 0x00 #define R10_CURSER_COLUMN_6 0x06 #define R10_CURSER_COLUMN_8 0x08 /*****************************************************************************/ /* Row 25 control data in column 0 to 9 */ /*****************************************************************************/ #define ROW25_COLUMN0_PAGE_UNITS 0x0F #define ROW25_COLUMN1_PAGE_TENS 0x0F #define ROW25_COLUMN2_MINUTES_UNITS 0x0F #define ROW25_COLUMN3_MINUTES_TENS 0x07 #define ROW25_COLUMN3_DELETE_PAGE 0x08 #define ROW25_COLUMN4_HOUR_UNITS 0x0F #define ROW25_COLUMN5_HOUR_TENS 0x03 #define ROW25_COLUMN5_INSERT_HEADLINE 0x04 #define ROW25_COLUMN5_INSERT_SUBTITLE 0x08 #define ROW25_COLUMN6_SUPPRESS_HEADER 0x01 #define ROW25_COLUMN6_UPDATE_PAGE 0x02 #define ROW25_COLUMN6_INTERRUPTED_SEQUENCE 0x04 #define ROW25_COLUMN6_SUPPRESS_DISPLAY 0x08 #define ROW25_COLUMN7_SERIAL_MODE 0x01 #define ROW25_COLUMN7_CHARACTER_SET 0x0E #define ROW25_COLUMN8_PAGE_HUNDREDS 0x07 #define ROW25_COLUMN8_PAGE_NOT_FOUND 0x10 #define ROW25_COLUMN9_PAGE_BEING_LOOKED_FOR 0x20 #define ROW25_COLUMN0_TO_7_HAMMING_ERROR 0x10 /*****************************************************************************/ /* Helper macros for extracting page, hour and minute digits */ /*****************************************************************************/ /* BYTE_POS 0 is at row 0, column 0, BYTE_POS 1 is at row 0, column 1, BYTE_POS 40 is at row 1, column 0, (with NUM_ROWS_PER_PAGE = 40) BYTE_POS 41 is at row 1, column 1, (with NUM_ROWS_PER_PAGE = 40), ... */ #define ROW(BYTE_POS) (BYTE_POS / NUM_ROWS_PER_PAGE) #define COLUMN(BYTE_POS) (BYTE_POS % NUM_ROWS_PER_PAGE) /*****************************************************************************/ /* Helper macros for extracting page, hour and minute digits */ /*****************************************************************************/ /* Macros for extracting hundreds, tens and units of a page number which must be in the range 0 ... 0x799. Note that page is coded in hexadecimal, i.e. 0x123 means page 123. page 0x.. means page 8.. */ #define HUNDREDS_OF_PAGE(page) (((page) / 0x100) & 0x7) #define TENS_OF_PAGE(page) (((page) / 0x10) & 0xF) #define UNITS_OF_PAGE(page) ((page) & 0xF) /* Macros for extracting tens and units of a hour information which must be in the range 0 ... 0x24. Note that hour is coded in hexadecimal, i.e. 0x12 means 12 hours */ #define TENS_OF_HOUR(hour) ((hour) / 0x10) #define UNITS_OF_HOUR(hour) ((hour) & 0xF) /* Macros for extracting tens and units of a minute information which must be in the range 0 ... 0x59. Note that minute is coded in hexadecimal, i.e. 0x12 means 12 minutes */ #define TENS_OF_MINUTE(minute) ((minute) / 0x10) #define UNITS_OF_MINUTE(minute) ((minute) & 0xF) #define HOUR_MAX 0x23 #define MINUTE_MAX 0x59 #define PAGE_MAX 0x8FF struct saa5246a_device { struct v4l2_subdev sd; struct video_device *vdev; u8 pgbuf[NUM_DAUS][VTX_VIRTUALSIZE]; int is_searching[NUM_DAUS]; unsigned long in_use; struct mutex lock; }; static inline struct saa5246a_device *to_dev(struct v4l2_subdev *sd) { return container_of(sd, struct saa5246a_device, sd); } static struct video_device saa_template; /* Declared near bottom */ /* * I2C interfaces */ static int i2c_sendbuf(struct saa5246a_device *t, int reg, int count, u8 *data) { struct i2c_client *client = v4l2_get_subdevdata(&t->sd); char buf[64]; buf[0] = reg; memcpy(buf+1, data, count); if (i2c_master_send(client, buf, count + 1) == count + 1) return 0; return -1; } static int i2c_senddata(struct saa5246a_device *t, ...) { unsigned char buf[64]; int v; int ct = 0; va_list argp; va_start(argp, t); while ((v = va_arg(argp, int)) != -1) buf[ct++] = v; va_end(argp); return i2c_sendbuf(t, buf[0], ct-1, buf+1); } /* Get count number of bytes from I²C-device at address adr, store them in buf. * Start & stop handshaking is done by this routine, ack will be sent after the * last byte to inhibit further sending of data. If uaccess is 'true', data is * written to user-space with put_user. Returns -1 if I²C-device didn't send * acknowledge, 0 otherwise */ static int i2c_getdata(struct saa5246a_device *t, int count, u8 *buf) { struct i2c_client *client = v4l2_get_subdevdata(&t->sd); if (i2c_master_recv(client, buf, count) != count) return -1; return 0; } /* When a page is found then the not FOUND bit in one of the status registers * of the SAA5264A chip is cleared. Unfortunately this bit is not set * automatically when a new page is requested. Instead this function must be * called after a page has been requested. * * Return value: 0 if successful */ static int saa5246a_clear_found_bit(struct saa5246a_device *t, unsigned char dau_no) { unsigned char row_25_column_8; if (i2c_senddata(t, SAA5246A_REGISTER_R8, dau_no | R8_DO_NOT_CLEAR_MEMORY, R9_CURSER_ROW_25, R10_CURSER_COLUMN_8, COMMAND_END) || i2c_getdata(t, 1, &row_25_column_8)) { return -EIO; } row_25_column_8 |= ROW25_COLUMN8_PAGE_NOT_FOUND; if (i2c_senddata(t, SAA5246A_REGISTER_R8, dau_no | R8_DO_NOT_CLEAR_MEMORY, R9_CURSER_ROW_25, R10_CURSER_COLUMN_8, row_25_column_8, COMMAND_END)) { return -EIO; } return 0; } /* Requests one videotext page as described in req. The fields of req are * checked and an error is returned if something is invalid. * * Return value: 0 if successful */ static int saa5246a_request_page(struct saa5246a_device *t, vtx_pagereq_t *req) { if (req->pagemask < 0 || req->pagemask >= PGMASK_MAX) return -EINVAL; if (req->pagemask & PGMASK_PAGE) if (req->page < 0 || req->page > PAGE_MAX) return -EINVAL; if (req->pagemask & PGMASK_HOUR) if (req->hour < 0 || req->hour > HOUR_MAX) return -EINVAL; if (req->pagemask & PGMASK_MINUTE) if (req->minute < 0 || req->minute > MINUTE_MAX) return -EINVAL; if (req->pgbuf < 0 || req->pgbuf >= NUM_DAUS) return -EINVAL; if (i2c_senddata(t, SAA5246A_REGISTER_R2, R2_IN_R3_SELECT_PAGE_HUNDREDS | req->pgbuf << 4 | R2_BANK_0 | R2_HAMMING_CHECK_OFF, HUNDREDS_OF_PAGE(req->page) | R3_HOLD_PAGE | (req->pagemask & PG_HUND ? R3_PAGE_HUNDREDS_DO_CARE : R3_PAGE_HUNDREDS_DO_NOT_CARE), TENS_OF_PAGE(req->page) | (req->pagemask & PG_TEN ? R3_PAGE_TENS_DO_CARE : R3_PAGE_TENS_DO_NOT_CARE), UNITS_OF_PAGE(req->page) | (req->pagemask & PG_UNIT ? R3_PAGE_UNITS_DO_CARE : R3_PAGE_UNITS_DO_NOT_CARE), TENS_OF_HOUR(req->hour) | (req->pagemask & HR_TEN ? R3_HOURS_TENS_DO_CARE : R3_HOURS_TENS_DO_NOT_CARE), UNITS_OF_HOUR(req->hour) | (req->pagemask & HR_UNIT ? R3_HOURS_UNITS_DO_CARE : R3_HOURS_UNITS_DO_NOT_CARE), TENS_OF_MINUTE(req->minute) | (req->pagemask & MIN_TEN ? R3_MINUTES_TENS_DO_CARE : R3_MINUTES_TENS_DO_NOT_CARE), UNITS_OF_MINUTE(req->minute) | (req->pagemask & MIN_UNIT ? R3_MINUTES_UNITS_DO_CARE : R3_MINUTES_UNITS_DO_NOT_CARE), COMMAND_END) || i2c_senddata(t, SAA5246A_REGISTER_R2, R2_IN_R3_SELECT_PAGE_HUNDREDS | req->pgbuf << 4 | R2_BANK_0 | R2_HAMMING_CHECK_OFF, HUNDREDS_OF_PAGE(req->page) | R3_UPDATE_PAGE | (req->pagemask & PG_HUND ? R3_PAGE_HUNDREDS_DO_CARE : R3_PAGE_HUNDREDS_DO_NOT_CARE), COMMAND_END)) { return -EIO; } t->is_searching[req->pgbuf] = true; return 0; } /* This routine decodes the page number from the infobits contained in line 25. * * Parameters: * infobits: must be bits 0 to 9 of column 25 * * Return value: page number coded in hexadecimal, i. e. page 123 is coded 0x123 */ static inline int saa5246a_extract_pagenum_from_infobits( unsigned char infobits[10]) { int page_hundreds, page_tens, page_units; page_units = infobits[0] & ROW25_COLUMN0_PAGE_UNITS; page_tens = infobits[1] & ROW25_COLUMN1_PAGE_TENS; page_hundreds = infobits[8] & ROW25_COLUMN8_PAGE_HUNDREDS; /* page 0x.. means page 8.. */ if (page_hundreds == 0) page_hundreds = 8; return((page_hundreds << 8) | (page_tens << 4) | page_units); } /* Decodes the hour from the infobits contained in line 25. * * Parameters: * infobits: must be bits 0 to 9 of column 25 * * Return: hour coded in hexadecimal, i. e. 12h is coded 0x12 */ static inline int saa5246a_extract_hour_from_infobits( unsigned char infobits[10]) { int hour_tens, hour_units; hour_units = infobits[4] & ROW25_COLUMN4_HOUR_UNITS; hour_tens = infobits[5] & ROW25_COLUMN5_HOUR_TENS; return((hour_tens << 4) | hour_units); } /* Decodes the minutes from the infobits contained in line 25. * * Parameters: * infobits: must be bits 0 to 9 of column 25 * * Return: minutes coded in hexadecimal, i. e. 10min is coded 0x10 */ static inline int saa5246a_extract_minutes_from_infobits( unsigned char infobits[10]) { int minutes_tens, minutes_units; minutes_units = infobits[2] & ROW25_COLUMN2_MINUTES_UNITS; minutes_tens = infobits[3] & ROW25_COLUMN3_MINUTES_TENS; return((minutes_tens << 4) | minutes_units); } /* Reads the status bits contained in the first 10 columns of the first line * and extracts the information into info. * * Return value: 0 if successful */ static inline int saa5246a_get_status(struct saa5246a_device *t, vtx_pageinfo_t *info, unsigned char dau_no) { unsigned char infobits[10]; int column; if (dau_no >= NUM_DAUS) return -EINVAL; if (i2c_senddata(t, SAA5246A_REGISTER_R8, dau_no | R8_DO_NOT_CLEAR_MEMORY, R9_CURSER_ROW_25, R10_CURSER_COLUMN_0, COMMAND_END) || i2c_getdata(t, 10, infobits)) { return -EIO; } info->pagenum = saa5246a_extract_pagenum_from_infobits(infobits); info->hour = saa5246a_extract_hour_from_infobits(infobits); info->minute = saa5246a_extract_minutes_from_infobits(infobits); info->charset = ((infobits[7] & ROW25_COLUMN7_CHARACTER_SET) >> 1); info->delete = !!(infobits[3] & ROW25_COLUMN3_DELETE_PAGE); info->headline = !!(infobits[5] & ROW25_COLUMN5_INSERT_HEADLINE); info->subtitle = !!(infobits[5] & ROW25_COLUMN5_INSERT_SUBTITLE); info->supp_header = !!(infobits[6] & ROW25_COLUMN6_SUPPRESS_HEADER); info->update = !!(infobits[6] & ROW25_COLUMN6_UPDATE_PAGE); info->inter_seq = !!(infobits[6] & ROW25_COLUMN6_INTERRUPTED_SEQUENCE); info->dis_disp = !!(infobits[6] & ROW25_COLUMN6_SUPPRESS_DISPLAY); info->serial = !!(infobits[7] & ROW25_COLUMN7_SERIAL_MODE); info->notfound = !!(infobits[8] & ROW25_COLUMN8_PAGE_NOT_FOUND); info->pblf = !!(infobits[9] & ROW25_COLUMN9_PAGE_BEING_LOOKED_FOR); info->hamming = 0; for (column = 0; column <= 7; column++) { if (infobits[column] & ROW25_COLUMN0_TO_7_HAMMING_ERROR) { info->hamming = 1; break; } } if (!info->hamming && !info->notfound) t->is_searching[dau_no] = false; return 0; } /* Reads 1 videotext page buffer of the SAA5246A. * * req is used both as input and as output. It contains information which part * must be read. The videotext page is copied into req->buffer. * * Return value: 0 if successful */ static inline int saa5246a_get_page(struct saa5246a_device *t, vtx_pagereq_t *req) { int start, end, size; char *buf; int err; if (req->pgbuf < 0 || req->pgbuf >= NUM_DAUS || req->start < 0 || req->start > req->end || req->end >= VTX_PAGESIZE) return -EINVAL; buf = kmalloc(VTX_PAGESIZE, GFP_KERNEL); if (!buf) return -ENOMEM; /* Read "normal" part of page */ err = -EIO; end = min(req->end, VTX_PAGESIZE - 1); if (i2c_senddata(t, SAA5246A_REGISTER_R8, req->pgbuf | R8_DO_NOT_CLEAR_MEMORY, ROW(req->start), COLUMN(req->start), COMMAND_END)) goto out; if (i2c_getdata(t, end - req->start + 1, buf)) goto out; err = -EFAULT; if (copy_to_user(req->buffer, buf, end - req->start + 1)) goto out; /* Always get the time from buffer 4, since this stupid SAA5246A only * updates the currently displayed buffer... */ if (REQ_CONTAINS_TIME(req)) { start = max(req->start, POS_TIME_START); end = min(req->end, POS_TIME_END); size = end - start + 1; err = -EINVAL; if (size < 0) goto out; err = -EIO; if (i2c_senddata(t, SAA5246A_REGISTER_R8, R8_ACTIVE_CHAPTER_4 | R8_DO_NOT_CLEAR_MEMORY, R9_CURSER_ROW_0, start, COMMAND_END)) goto out; if (i2c_getdata(t, size, buf)) goto out; err = -EFAULT; if (copy_to_user(req->buffer + start - req->start, buf, size)) goto out; } /* Insert the header from buffer 4 only, if acquisition circuit is still searching for a page */ if (REQ_CONTAINS_HEADER(req) && t->is_searching[req->pgbuf]) { start = max(req->start, POS_HEADER_START); end = min(req->end, POS_HEADER_END); size = end - start + 1; err = -EINVAL; if (size < 0) goto out; err = -EIO; if (i2c_senddata(t, SAA5246A_REGISTER_R8, R8_ACTIVE_CHAPTER_4 | R8_DO_NOT_CLEAR_MEMORY, R9_CURSER_ROW_0, start, COMMAND_END)) goto out; if (i2c_getdata(t, end - start + 1, buf)) goto out; err = -EFAULT; if (copy_to_user(req->buffer + start - req->start, buf, size)) goto out; } err = 0; out: kfree(buf); return err; } /* Stops the acquisition circuit given in dau_no. The page buffer associated * with this acquisition circuit will no more be updated. The other daus are * not affected. * * Return value: 0 if successful */ static inline int saa5246a_stop_dau(struct saa5246a_device *t, unsigned char dau_no) { if (dau_no >= NUM_DAUS) return -EINVAL; if (i2c_senddata(t, SAA5246A_REGISTER_R2, R2_IN_R3_SELECT_PAGE_HUNDREDS | dau_no << 4 | R2_BANK_0 | R2_HAMMING_CHECK_OFF, R3_PAGE_HUNDREDS_0 | R3_HOLD_PAGE | R3_PAGE_HUNDREDS_DO_NOT_CARE, COMMAND_END)) { return -EIO; } t->is_searching[dau_no] = false; return 0; } /* Handles ioctls defined in videotext.h * * Returns 0 if successful */ static long do_saa5246a_ioctl(struct file *file, unsigned int cmd, void *arg) { struct saa5246a_device *t = video_drvdata(file); switch(cmd) { case VTXIOCGETINFO: { vtx_info_t *info = arg; info->version_major = MAJOR_VERSION; info->version_minor = MINOR_VERSION; info->numpages = NUM_DAUS; return 0; } case VTXIOCCLRPAGE: { vtx_pagereq_t *req = arg; if (req->pgbuf < 0 || req->pgbuf >= NUM_DAUS) return -EINVAL; memset(t->pgbuf[req->pgbuf], ' ', sizeof(t->pgbuf[0])); return 0; } case VTXIOCCLRFOUND: { vtx_pagereq_t *req = arg; if (req->pgbuf < 0 || req->pgbuf >= NUM_DAUS) return -EINVAL; return(saa5246a_clear_found_bit(t, req->pgbuf)); } case VTXIOCPAGEREQ: { vtx_pagereq_t *req = arg; return(saa5246a_request_page(t, req)); } case VTXIOCGETSTAT: { vtx_pagereq_t *req = arg; vtx_pageinfo_t info; int rval; if ((rval = saa5246a_get_status(t, &info, req->pgbuf))) return rval; if(copy_to_user(req->buffer, &info, sizeof(vtx_pageinfo_t))) return -EFAULT; return 0; } case VTXIOCGETPAGE: { vtx_pagereq_t *req = arg; return(saa5246a_get_page(t, req)); } case VTXIOCSTOPDAU: { vtx_pagereq_t *req = arg; return(saa5246a_stop_dau(t, req->pgbuf)); } case VTXIOCPUTPAGE: case VTXIOCSETDISP: case VTXIOCPUTSTAT: return 0; case VTXIOCCLRCACHE: { return 0; } case VTXIOCSETVIRT: { /* I do not know what "virtual mode" means */ return 0; } } return -EINVAL; } /* * Translates old vtx IOCTLs to new ones * * This keeps new kernel versions compatible with old userspace programs. */ static inline unsigned int vtx_fix_command(unsigned int cmd) { switch (cmd) { case VTXIOCGETINFO_OLD: cmd = VTXIOCGETINFO; break; case VTXIOCCLRPAGE_OLD: cmd = VTXIOCCLRPAGE; break; case VTXIOCCLRFOUND_OLD: cmd = VTXIOCCLRFOUND; break; case VTXIOCPAGEREQ_OLD: cmd = VTXIOCPAGEREQ; break; case VTXIOCGETSTAT_OLD: cmd = VTXIOCGETSTAT; break; case VTXIOCGETPAGE_OLD: cmd = VTXIOCGETPAGE; break; case VTXIOCSTOPDAU_OLD: cmd = VTXIOCSTOPDAU; break; case VTXIOCPUTPAGE_OLD: cmd = VTXIOCPUTPAGE; break; case VTXIOCSETDISP_OLD: cmd = VTXIOCSETDISP; break; case VTXIOCPUTSTAT_OLD: cmd = VTXIOCPUTSTAT; break; case VTXIOCCLRCACHE_OLD: cmd = VTXIOCCLRCACHE; break; case VTXIOCSETVIRT_OLD: cmd = VTXIOCSETVIRT; break; } return cmd; } /* * Handle the locking */ static long saa5246a_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct saa5246a_device *t = video_drvdata(file); long err; cmd = vtx_fix_command(cmd); mutex_lock(&t->lock); err = video_usercopy(file, cmd, arg, do_saa5246a_ioctl); mutex_unlock(&t->lock); return err; } static int saa5246a_open(struct file *file) { struct saa5246a_device *t = video_drvdata(file); if (test_and_set_bit(0, &t->in_use)) return -EBUSY; if (i2c_senddata(t, SAA5246A_REGISTER_R0, R0_SELECT_R11 | R0_PLL_TIME_CONSTANT_LONG | R0_ENABLE_nODD_EVEN_OUTPUT | R0_ENABLE_HDR_POLL | R0_DO_NOT_FORCE_nODD_EVEN_LOW_IF_PICTURE_DISPLAYED | R0_NO_FREE_RUN_PLL | R0_NO_AUTOMATIC_FASTEXT_PROMPT, R1_NON_INTERLACED_312_312_LINES | R1_DEW | R1_EXTENDED_PACKET_DISABLE | R1_DAUS_ALL_ON | R1_8_BITS_NO_PARITY | R1_VCS_TO_SCS, COMMAND_END) || i2c_senddata(t, SAA5246A_REGISTER_R4, /* We do not care much for the TV display but nevertheless we * need the currently displayed page later because only on that * page the time is updated. */ R4_DISPLAY_PAGE_4, COMMAND_END)) { clear_bit(0, &t->in_use); return -EIO; } return 0; } static int saa5246a_release(struct file *file) { struct saa5246a_device *t = video_drvdata(file); /* Stop all acquisition circuits. */ i2c_senddata(t, SAA5246A_REGISTER_R1, R1_INTERLACED_312_AND_HALF_312_AND_HALF_LINES | R1_DEW | R1_EXTENDED_PACKET_DISABLE | R1_DAUS_ALL_OFF | R1_8_BITS_NO_PARITY | R1_VCS_TO_SCS, COMMAND_END); clear_bit(0, &t->in_use); return 0; } static const struct v4l2_file_operations saa_fops = { .owner = THIS_MODULE, .open = saa5246a_open, .release = saa5246a_release, .ioctl = saa5246a_ioctl, }; static struct video_device saa_template = { .name = "saa5246a", .fops = &saa_fops, .release = video_device_release, .minor = -1, }; static int saa5246a_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip) { struct i2c_client *client = v4l2_get_subdevdata(sd); return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_SAA5246A, 0); } static const struct v4l2_subdev_core_ops saa5246a_core_ops = { .g_chip_ident = saa5246a_g_chip_ident, }; static const struct v4l2_subdev_ops saa5246a_ops = { .core = &saa5246a_core_ops, }; static int saa5246a_probe(struct i2c_client *client, const struct i2c_device_id *id) { int pgbuf; int err; struct saa5246a_device *t; struct v4l2_subdev *sd; v4l_info(client, "chip found @ 0x%x (%s)\n", client->addr << 1, client->adapter->name); v4l_info(client, "VideoText version %d.%d\n", MAJOR_VERSION, MINOR_VERSION); t = kzalloc(sizeof(*t), GFP_KERNEL); if (t == NULL) return -ENOMEM; sd = &t->sd; v4l2_i2c_subdev_init(sd, client, &saa5246a_ops); mutex_init(&t->lock); /* Now create a video4linux device */ t->vdev = video_device_alloc(); if (t->vdev == NULL) { kfree(t); return -ENOMEM; } memcpy(t->vdev, &saa_template, sizeof(*t->vdev)); for (pgbuf = 0; pgbuf < NUM_DAUS; pgbuf++) { memset(t->pgbuf[pgbuf], ' ', sizeof(t->pgbuf[0])); t->is_searching[pgbuf] = false; } video_set_drvdata(t->vdev, t); /* Register it */ err = video_register_device(t->vdev, VFL_TYPE_VTX, -1); if (err < 0) { video_device_release(t->vdev); kfree(t); return err; } return 0; } static int saa5246a_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); struct saa5246a_device *t = to_dev(sd); video_unregister_device(t->vdev); v4l2_device_unregister_subdev(sd); kfree(t); return 0; } static const struct i2c_device_id saa5246a_id[] = { { "saa5246a", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, saa5246a_id); static struct v4l2_i2c_driver_data v4l2_i2c_data = { .name = "saa5246a", .probe = saa5246a_probe, .remove = saa5246a_remove, .id_table = saa5246a_id, };
gpl-2.0
pawitp/android_kernel_samsung_i9082
arch/arm/mach-pxa/palmtc.c
1821
13701
/* * linux/arch/arm/mach-pxa/palmtc.c * * Support for the Palm Tungsten|C * * Author: Marek Vasut <marek.vasut@gmail.com> * * Based on work of: * Petr Blaha <p3t3@centrum.cz> * Chetan S. Kumar <shivakumar.chetan@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/irq.h> #include <linux/input.h> #include <linux/pwm_backlight.h> #include <linux/gpio.h> #include <linux/input/matrix_keypad.h> #include <linux/ucb1400.h> #include <linux/power_supply.h> #include <linux/gpio_keys.h> #include <linux/mtd/physmap.h> #include <linux/usb/gpio_vbus.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <mach/audio.h> #include <mach/palmtc.h> #include <mach/mmc.h> #include <mach/pxafb.h> #include <mach/mfp-pxa25x.h> #include <mach/irda.h> #include <mach/udc.h> #include <mach/pxa2xx-regs.h> #include "generic.h" #include "devices.h" /****************************************************************************** * Pin configuration ******************************************************************************/ static unsigned long palmtc_pin_config[] __initdata = { /* MMC */ GPIO6_MMC_CLK, GPIO8_MMC_CS0, GPIO12_GPIO, /* detect */ GPIO32_GPIO, /* power */ GPIO54_GPIO, /* r/o switch */ /* PCMCIA */ GPIO52_nPCE_1, GPIO53_nPCE_2, GPIO50_nPIOR, GPIO51_nPIOW, GPIO49_nPWE, GPIO48_nPOE, GPIO52_nPCE_1, GPIO53_nPCE_2, GPIO57_nIOIS16, GPIO56_nPWAIT, /* AC97 */ GPIO28_AC97_BITCLK, GPIO29_AC97_SDATA_IN_0, GPIO30_AC97_SDATA_OUT, GPIO31_AC97_SYNC, /* IrDA */ GPIO45_GPIO, /* ir disable */ GPIO46_FICP_RXD, GPIO47_FICP_TXD, /* PWM */ GPIO17_PWM1_OUT, /* USB */ GPIO4_GPIO, /* detect */ GPIO36_GPIO, /* pullup */ /* LCD */ GPIOxx_LCD_TFT_16BPP, /* MATRIX KEYPAD */ GPIO0_GPIO | WAKEUP_ON_EDGE_BOTH, /* in 0 */ GPIO9_GPIO | WAKEUP_ON_EDGE_BOTH, /* in 1 */ GPIO10_GPIO | WAKEUP_ON_EDGE_BOTH, /* in 2 */ GPIO11_GPIO | WAKEUP_ON_EDGE_BOTH, /* in 3 */ GPIO18_GPIO | MFP_LPM_DRIVE_LOW, /* out 0 */ GPIO19_GPIO | MFP_LPM_DRIVE_LOW, /* out 1 */ GPIO20_GPIO | MFP_LPM_DRIVE_LOW, /* out 2 */ GPIO21_GPIO | MFP_LPM_DRIVE_LOW, /* out 3 */ GPIO22_GPIO | MFP_LPM_DRIVE_LOW, /* out 4 */ GPIO23_GPIO | MFP_LPM_DRIVE_LOW, /* out 5 */ GPIO24_GPIO | MFP_LPM_DRIVE_LOW, /* out 6 */ GPIO25_GPIO | MFP_LPM_DRIVE_LOW, /* out 7 */ GPIO26_GPIO | MFP_LPM_DRIVE_LOW, /* out 8 */ GPIO27_GPIO | MFP_LPM_DRIVE_LOW, /* out 9 */ GPIO79_GPIO | MFP_LPM_DRIVE_LOW, /* out 10 */ GPIO80_GPIO | MFP_LPM_DRIVE_LOW, /* out 11 */ /* PXA GPIO KEYS */ GPIO7_GPIO | WAKEUP_ON_EDGE_BOTH, /* hotsync button on cradle */ /* MISC */ GPIO1_RST, /* reset */ GPIO2_GPIO, /* earphone detect */ GPIO16_GPIO, /* backlight switch */ }; /****************************************************************************** * SD/MMC card controller ******************************************************************************/ #if defined(CONFIG_MMC_PXA) || defined(CONFIG_MMC_PXA_MODULE) static struct pxamci_platform_data palmtc_mci_platform_data = { .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, .gpio_power = GPIO_NR_PALMTC_SD_POWER, .gpio_card_ro = GPIO_NR_PALMTC_SD_READONLY, .gpio_card_detect = GPIO_NR_PALMTC_SD_DETECT_N, .detect_delay_ms = 200, }; static void __init palmtc_mmc_init(void) { pxa_set_mci_info(&palmtc_mci_platform_data); } #else static inline void palmtc_mmc_init(void) {} #endif /****************************************************************************** * GPIO keys ******************************************************************************/ #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) static struct gpio_keys_button palmtc_pxa_buttons[] = { {KEY_F8, GPIO_NR_PALMTC_HOTSYNC_BUTTON, 1, "HotSync Button", EV_KEY, 1}, }; static struct gpio_keys_platform_data palmtc_pxa_keys_data = { .buttons = palmtc_pxa_buttons, .nbuttons = ARRAY_SIZE(palmtc_pxa_buttons), }; static struct platform_device palmtc_pxa_keys = { .name = "gpio-keys", .id = -1, .dev = { .platform_data = &palmtc_pxa_keys_data, }, }; static void __init palmtc_keys_init(void) { platform_device_register(&palmtc_pxa_keys); } #else static inline void palmtc_keys_init(void) {} #endif /****************************************************************************** * Backlight ******************************************************************************/ #if defined(CONFIG_BACKLIGHT_PWM) || defined(CONFIG_BACKLIGHT_PWM_MODULE) static int palmtc_backlight_init(struct device *dev) { int ret; ret = gpio_request(GPIO_NR_PALMTC_BL_POWER, "BL POWER"); if (ret) goto err; ret = gpio_direction_output(GPIO_NR_PALMTC_BL_POWER, 1); if (ret) goto err2; return 0; err2: gpio_free(GPIO_NR_PALMTC_BL_POWER); err: return ret; } static int palmtc_backlight_notify(struct device *dev, int brightness) { /* backlight is on when GPIO16 AF0 is high */ gpio_set_value(GPIO_NR_PALMTC_BL_POWER, brightness); return brightness; } static void palmtc_backlight_exit(struct device *dev) { gpio_free(GPIO_NR_PALMTC_BL_POWER); } static struct platform_pwm_backlight_data palmtc_backlight_data = { .pwm_id = 1, .max_brightness = PALMTC_MAX_INTENSITY, .dft_brightness = PALMTC_MAX_INTENSITY, .pwm_period_ns = PALMTC_PERIOD_NS, .init = palmtc_backlight_init, .notify = palmtc_backlight_notify, .exit = palmtc_backlight_exit, }; static struct platform_device palmtc_backlight = { .name = "pwm-backlight", .dev = { .parent = &pxa25x_device_pwm1.dev, .platform_data = &palmtc_backlight_data, }, }; static void __init palmtc_pwm_init(void) { platform_device_register(&palmtc_backlight); } #else static inline void palmtc_pwm_init(void) {} #endif /****************************************************************************** * IrDA ******************************************************************************/ #if defined(CONFIG_IRDA) || defined(CONFIG_IRDA_MODULE) static struct pxaficp_platform_data palmtc_ficp_platform_data = { .gpio_pwdown = GPIO_NR_PALMTC_IR_DISABLE, .transceiver_cap = IR_SIRMODE | IR_OFF, }; static void __init palmtc_irda_init(void) { pxa_set_ficp_info(&palmtc_ficp_platform_data); } #else static inline void palmtc_irda_init(void) {} #endif /****************************************************************************** * Keyboard ******************************************************************************/ #if defined(CONFIG_KEYBOARD_MATRIX) || defined(CONFIG_KEYBOARD_MATRIX_MODULE) static const uint32_t palmtc_matrix_keys[] = { KEY(0, 0, KEY_F1), KEY(0, 1, KEY_X), KEY(0, 2, KEY_POWER), KEY(0, 3, KEY_TAB), KEY(0, 4, KEY_A), KEY(0, 5, KEY_Q), KEY(0, 6, KEY_LEFTSHIFT), KEY(0, 7, KEY_Z), KEY(0, 8, KEY_S), KEY(0, 9, KEY_W), KEY(0, 10, KEY_E), KEY(0, 11, KEY_UP), KEY(1, 0, KEY_F2), KEY(1, 1, KEY_DOWN), KEY(1, 3, KEY_D), KEY(1, 4, KEY_C), KEY(1, 5, KEY_F), KEY(1, 6, KEY_R), KEY(1, 7, KEY_SPACE), KEY(1, 8, KEY_V), KEY(1, 9, KEY_G), KEY(1, 10, KEY_T), KEY(1, 11, KEY_LEFT), KEY(2, 0, KEY_F3), KEY(2, 1, KEY_LEFTCTRL), KEY(2, 3, KEY_H), KEY(2, 4, KEY_Y), KEY(2, 5, KEY_N), KEY(2, 6, KEY_J), KEY(2, 7, KEY_U), KEY(2, 8, KEY_M), KEY(2, 9, KEY_K), KEY(2, 10, KEY_I), KEY(2, 11, KEY_RIGHT), KEY(3, 0, KEY_F4), KEY(3, 1, KEY_ENTER), KEY(3, 3, KEY_DOT), KEY(3, 4, KEY_L), KEY(3, 5, KEY_O), KEY(3, 6, KEY_LEFTALT), KEY(3, 7, KEY_ENTER), KEY(3, 8, KEY_BACKSPACE), KEY(3, 9, KEY_P), KEY(3, 10, KEY_B), KEY(3, 11, KEY_FN), }; const struct matrix_keymap_data palmtc_keymap_data = { .keymap = palmtc_matrix_keys, .keymap_size = ARRAY_SIZE(palmtc_matrix_keys), }; static const unsigned int palmtc_keypad_row_gpios[] = { 0, 9, 10, 11 }; static const unsigned int palmtc_keypad_col_gpios[] = { 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 79, 80 }; static struct matrix_keypad_platform_data palmtc_keypad_platform_data = { .keymap_data = &palmtc_keymap_data, .row_gpios = palmtc_keypad_row_gpios, .num_row_gpios = ARRAY_SIZE(palmtc_keypad_row_gpios), .col_gpios = palmtc_keypad_col_gpios, .num_col_gpios = ARRAY_SIZE(palmtc_keypad_col_gpios), .active_low = 1, .debounce_ms = 20, .col_scan_delay_us = 5, }; static struct platform_device palmtc_keyboard = { .name = "matrix-keypad", .id = -1, .dev = { .platform_data = &palmtc_keypad_platform_data, }, }; static void __init palmtc_mkp_init(void) { platform_device_register(&palmtc_keyboard); } #else static inline void palmtc_mkp_init(void) {} #endif /****************************************************************************** * UDC ******************************************************************************/ #if defined(CONFIG_USB_PXA25X)||defined(CONFIG_USB_PXA25X_MODULE) static struct gpio_vbus_mach_info palmtc_udc_info = { .gpio_vbus = GPIO_NR_PALMTC_USB_DETECT_N, .gpio_vbus_inverted = 1, .gpio_pullup = GPIO_NR_PALMTC_USB_POWER, }; static struct platform_device palmtc_gpio_vbus = { .name = "gpio-vbus", .id = -1, .dev = { .platform_data = &palmtc_udc_info, }, }; static void __init palmtc_udc_init(void) { platform_device_register(&palmtc_gpio_vbus); }; #else static inline void palmtc_udc_init(void) {} #endif /****************************************************************************** * Touchscreen / Battery / GPIO-extender ******************************************************************************/ #if defined(CONFIG_TOUCHSCREEN_UCB1400) || \ defined(CONFIG_TOUCHSCREEN_UCB1400_MODULE) static struct platform_device palmtc_ucb1400_device = { .name = "ucb1400_core", .id = -1, }; static void __init palmtc_ts_init(void) { pxa_set_ac97_info(NULL); platform_device_register(&palmtc_ucb1400_device); } #else static inline void palmtc_ts_init(void) {} #endif /****************************************************************************** * LEDs ******************************************************************************/ #if defined(CONFIG_LEDS_GPIO) || defined(CONFIG_LEDS_GPIO_MODULE) struct gpio_led palmtc_gpio_leds[] = { { .name = "palmtc:green:user", .default_trigger = "none", .gpio = GPIO_NR_PALMTC_LED_POWER, .active_low = 1, }, { .name = "palmtc:vibra:vibra", .default_trigger = "none", .gpio = GPIO_NR_PALMTC_VIBRA_POWER, .active_low = 1, } }; static struct gpio_led_platform_data palmtc_gpio_led_info = { .leds = palmtc_gpio_leds, .num_leds = ARRAY_SIZE(palmtc_gpio_leds), }; static struct platform_device palmtc_leds = { .name = "leds-gpio", .id = -1, .dev = { .platform_data = &palmtc_gpio_led_info, } }; static void __init palmtc_leds_init(void) { platform_device_register(&palmtc_leds); } #else static inline void palmtc_leds_init(void) {} #endif /****************************************************************************** * NOR Flash ******************************************************************************/ #if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) static struct resource palmtc_flash_resource = { .start = PXA_CS0_PHYS, .end = PXA_CS0_PHYS + SZ_16M - 1, .flags = IORESOURCE_MEM, }; static struct mtd_partition palmtc_flash_parts[] = { { .name = "U-Boot Bootloader", .offset = 0x0, .size = 0x40000, }, { .name = "Linux Kernel", .offset = 0x40000, .size = 0x2c0000, }, { .name = "Filesystem", .offset = 0x300000, .size = 0xcc0000, }, { .name = "U-Boot Environment", .offset = 0xfc0000, .size = MTDPART_SIZ_FULL, }, }; static struct physmap_flash_data palmtc_flash_data = { .width = 4, .parts = palmtc_flash_parts, .nr_parts = ARRAY_SIZE(palmtc_flash_parts), }; static struct platform_device palmtc_flash = { .name = "physmap-flash", .id = -1, .resource = &palmtc_flash_resource, .num_resources = 1, .dev = { .platform_data = &palmtc_flash_data, }, }; static void __init palmtc_nor_init(void) { platform_device_register(&palmtc_flash); } #else static inline void palmtc_nor_init(void) {} #endif /****************************************************************************** * Framebuffer ******************************************************************************/ #if defined(CONFIG_FB_PXA) || defined(CONFIG_FB_PXA_MODULE) static struct pxafb_mode_info palmtc_lcd_modes[] = { { .pixclock = 115384, .xres = 320, .yres = 320, .bpp = 16, .left_margin = 27, .right_margin = 7, .upper_margin = 7, .lower_margin = 8, .hsync_len = 6, .vsync_len = 1, }, }; static struct pxafb_mach_info palmtc_lcd_screen = { .modes = palmtc_lcd_modes, .num_modes = ARRAY_SIZE(palmtc_lcd_modes), .lcd_conn = LCD_COLOR_TFT_16BPP | LCD_PCLK_EDGE_FALL, }; static void __init palmtc_lcd_init(void) { pxa_set_fb_info(NULL, &palmtc_lcd_screen); } #else static inline void palmtc_lcd_init(void) {} #endif /****************************************************************************** * Machine init ******************************************************************************/ static void __init palmtc_init(void) { pxa2xx_mfp_config(ARRAY_AND_SIZE(palmtc_pin_config)); pxa_set_ffuart_info(NULL); pxa_set_btuart_info(NULL); pxa_set_stuart_info(NULL); pxa_set_hwuart_info(NULL); palmtc_mmc_init(); palmtc_keys_init(); palmtc_pwm_init(); palmtc_irda_init(); palmtc_mkp_init(); palmtc_udc_init(); palmtc_ts_init(); palmtc_nor_init(); palmtc_lcd_init(); palmtc_leds_init(); }; MACHINE_START(PALMTC, "Palm Tungsten|C") .boot_params = 0xa0000100, .map_io = pxa25x_map_io, .init_irq = pxa25x_init_irq, .timer = &pxa_timer, .init_machine = palmtc_init MACHINE_END
gpl-2.0
nowster/linux-ubnt-e200
sound/usb/caiaq/device.c
1821
15636
/* * caiaq.c: ALSA driver for caiaq/NativeInstruments devices * * Copyright (c) 2007 Daniel Mack <daniel@caiaq.de> * Karsten Wiese <fzu@wemgehoertderstaat.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/moduleparam.h> #include <linux/device.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/init.h> #include <linux/gfp.h> #include <linux/usb.h> #include <sound/initval.h> #include <sound/core.h> #include <sound/pcm.h> #include "device.h" #include "audio.h" #include "midi.h" #include "control.h" #include "input.h" MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>"); MODULE_DESCRIPTION("caiaq USB audio"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{Native Instruments,RigKontrol2}," "{Native Instruments,RigKontrol3}," "{Native Instruments,Kore Controller}," "{Native Instruments,Kore Controller 2}," "{Native Instruments,Audio Kontrol 1}," "{Native Instruments,Audio 2 DJ}," "{Native Instruments,Audio 4 DJ}," "{Native Instruments,Audio 8 DJ}," "{Native Instruments,Traktor Audio 2}," "{Native Instruments,Session I/O}," "{Native Instruments,GuitarRig mobile}," "{Native Instruments,Traktor Kontrol X1}," "{Native Instruments,Traktor Kontrol S4}," "{Native Instruments,Maschine Controller}}"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-max */ static char* id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* Id for this card */ static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* Enable this card */ module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for the caiaq sound device"); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for the caiaq soundcard."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable the caiaq soundcard."); enum { SAMPLERATE_44100 = 0, SAMPLERATE_48000 = 1, SAMPLERATE_96000 = 2, SAMPLERATE_192000 = 3, SAMPLERATE_88200 = 4, SAMPLERATE_INVALID = 0xff }; enum { DEPTH_NONE = 0, DEPTH_16 = 1, DEPTH_24 = 2, DEPTH_32 = 3 }; static struct usb_device_id snd_usb_id_table[] = { { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = USB_VID_NATIVEINSTRUMENTS, .idProduct = USB_PID_RIGKONTROL2 }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = USB_VID_NATIVEINSTRUMENTS, .idProduct = USB_PID_RIGKONTROL3 }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = USB_VID_NATIVEINSTRUMENTS, .idProduct = USB_PID_KORECONTROLLER }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = USB_VID_NATIVEINSTRUMENTS, .idProduct = USB_PID_KORECONTROLLER2 }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = USB_VID_NATIVEINSTRUMENTS, .idProduct = USB_PID_AK1 }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = USB_VID_NATIVEINSTRUMENTS, .idProduct = USB_PID_AUDIO8DJ }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = USB_VID_NATIVEINSTRUMENTS, .idProduct = USB_PID_SESSIONIO }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = USB_VID_NATIVEINSTRUMENTS, .idProduct = USB_PID_GUITARRIGMOBILE }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = USB_VID_NATIVEINSTRUMENTS, .idProduct = USB_PID_AUDIO4DJ }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = USB_VID_NATIVEINSTRUMENTS, .idProduct = USB_PID_AUDIO2DJ }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = USB_VID_NATIVEINSTRUMENTS, .idProduct = USB_PID_TRAKTORKONTROLX1 }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = USB_VID_NATIVEINSTRUMENTS, .idProduct = USB_PID_TRAKTORKONTROLS4 }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = USB_VID_NATIVEINSTRUMENTS, .idProduct = USB_PID_TRAKTORAUDIO2 }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = USB_VID_NATIVEINSTRUMENTS, .idProduct = USB_PID_MASCHINECONTROLLER }, { /* terminator */ } }; static void usb_ep1_command_reply_dispatch (struct urb* urb) { int ret; struct device *dev = &urb->dev->dev; struct snd_usb_caiaqdev *cdev = urb->context; unsigned char *buf = urb->transfer_buffer; if (urb->status || !cdev) { dev_warn(dev, "received EP1 urb->status = %i\n", urb->status); return; } switch(buf[0]) { case EP1_CMD_GET_DEVICE_INFO: memcpy(&cdev->spec, buf+1, sizeof(struct caiaq_device_spec)); cdev->spec.fw_version = le16_to_cpu(cdev->spec.fw_version); dev_dbg(dev, "device spec (firmware %d): audio: %d in, %d out, " "MIDI: %d in, %d out, data alignment %d\n", cdev->spec.fw_version, cdev->spec.num_analog_audio_in, cdev->spec.num_analog_audio_out, cdev->spec.num_midi_in, cdev->spec.num_midi_out, cdev->spec.data_alignment); cdev->spec_received++; wake_up(&cdev->ep1_wait_queue); break; case EP1_CMD_AUDIO_PARAMS: cdev->audio_parm_answer = buf[1]; wake_up(&cdev->ep1_wait_queue); break; case EP1_CMD_MIDI_READ: snd_usb_caiaq_midi_handle_input(cdev, buf[1], buf + 3, buf[2]); break; case EP1_CMD_READ_IO: if (cdev->chip.usb_id == USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_AUDIO8DJ)) { if (urb->actual_length > sizeof(cdev->control_state)) urb->actual_length = sizeof(cdev->control_state); memcpy(cdev->control_state, buf + 1, urb->actual_length); wake_up(&cdev->ep1_wait_queue); break; } #ifdef CONFIG_SND_USB_CAIAQ_INPUT case EP1_CMD_READ_ERP: case EP1_CMD_READ_ANALOG: snd_usb_caiaq_input_dispatch(cdev, buf, urb->actual_length); #endif break; } cdev->ep1_in_urb.actual_length = 0; ret = usb_submit_urb(&cdev->ep1_in_urb, GFP_ATOMIC); if (ret < 0) dev_err(dev, "unable to submit urb. OOM!?\n"); } int snd_usb_caiaq_send_command(struct snd_usb_caiaqdev *cdev, unsigned char command, const unsigned char *buffer, int len) { int actual_len; struct usb_device *usb_dev = cdev->chip.dev; if (!usb_dev) return -EIO; if (len > EP1_BUFSIZE - 1) len = EP1_BUFSIZE - 1; if (buffer && len > 0) memcpy(cdev->ep1_out_buf+1, buffer, len); cdev->ep1_out_buf[0] = command; return usb_bulk_msg(usb_dev, usb_sndbulkpipe(usb_dev, 1), cdev->ep1_out_buf, len+1, &actual_len, 200); } int snd_usb_caiaq_send_command_bank(struct snd_usb_caiaqdev *cdev, unsigned char command, unsigned char bank, const unsigned char *buffer, int len) { int actual_len; struct usb_device *usb_dev = cdev->chip.dev; if (!usb_dev) return -EIO; if (len > EP1_BUFSIZE - 2) len = EP1_BUFSIZE - 2; if (buffer && len > 0) memcpy(cdev->ep1_out_buf+2, buffer, len); cdev->ep1_out_buf[0] = command; cdev->ep1_out_buf[1] = bank; return usb_bulk_msg(usb_dev, usb_sndbulkpipe(usb_dev, 1), cdev->ep1_out_buf, len+2, &actual_len, 200); } int snd_usb_caiaq_set_audio_params (struct snd_usb_caiaqdev *cdev, int rate, int depth, int bpp) { int ret; char tmp[5]; struct device *dev = caiaqdev_to_dev(cdev); switch (rate) { case 44100: tmp[0] = SAMPLERATE_44100; break; case 48000: tmp[0] = SAMPLERATE_48000; break; case 88200: tmp[0] = SAMPLERATE_88200; break; case 96000: tmp[0] = SAMPLERATE_96000; break; case 192000: tmp[0] = SAMPLERATE_192000; break; default: return -EINVAL; } switch (depth) { case 16: tmp[1] = DEPTH_16; break; case 24: tmp[1] = DEPTH_24; break; default: return -EINVAL; } tmp[2] = bpp & 0xff; tmp[3] = bpp >> 8; tmp[4] = 1; /* packets per microframe */ dev_dbg(dev, "setting audio params: %d Hz, %d bits, %d bpp\n", rate, depth, bpp); cdev->audio_parm_answer = -1; ret = snd_usb_caiaq_send_command(cdev, EP1_CMD_AUDIO_PARAMS, tmp, sizeof(tmp)); if (ret) return ret; if (!wait_event_timeout(cdev->ep1_wait_queue, cdev->audio_parm_answer >= 0, HZ)) return -EPIPE; if (cdev->audio_parm_answer != 1) dev_dbg(dev, "unable to set the device's audio params\n"); else cdev->bpp = bpp; return cdev->audio_parm_answer == 1 ? 0 : -EINVAL; } int snd_usb_caiaq_set_auto_msg(struct snd_usb_caiaqdev *cdev, int digital, int analog, int erp) { char tmp[3] = { digital, analog, erp }; return snd_usb_caiaq_send_command(cdev, EP1_CMD_AUTO_MSG, tmp, sizeof(tmp)); } static void setup_card(struct snd_usb_caiaqdev *cdev) { int ret; char val[4]; struct device *dev = caiaqdev_to_dev(cdev); /* device-specific startup specials */ switch (cdev->chip.usb_id) { case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_RIGKONTROL2): /* RigKontrol2 - display centered dash ('-') */ val[0] = 0x00; val[1] = 0x00; val[2] = 0x01; snd_usb_caiaq_send_command(cdev, EP1_CMD_WRITE_IO, val, 3); break; case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_RIGKONTROL3): /* RigKontrol2 - display two centered dashes ('--') */ val[0] = 0x00; val[1] = 0x40; val[2] = 0x40; val[3] = 0x00; snd_usb_caiaq_send_command(cdev, EP1_CMD_WRITE_IO, val, 4); break; case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_AK1): /* Audio Kontrol 1 - make USB-LED stop blinking */ val[0] = 0x00; snd_usb_caiaq_send_command(cdev, EP1_CMD_WRITE_IO, val, 1); break; case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_AUDIO8DJ): /* Audio 8 DJ - trigger read of current settings */ cdev->control_state[0] = 0xff; snd_usb_caiaq_set_auto_msg(cdev, 1, 0, 0); snd_usb_caiaq_send_command(cdev, EP1_CMD_READ_IO, NULL, 0); if (!wait_event_timeout(cdev->ep1_wait_queue, cdev->control_state[0] != 0xff, HZ)) return; /* fix up some defaults */ if ((cdev->control_state[1] != 2) || (cdev->control_state[2] != 3) || (cdev->control_state[4] != 2)) { cdev->control_state[1] = 2; cdev->control_state[2] = 3; cdev->control_state[4] = 2; snd_usb_caiaq_send_command(cdev, EP1_CMD_WRITE_IO, cdev->control_state, 6); } break; } if (cdev->spec.num_analog_audio_out + cdev->spec.num_analog_audio_in + cdev->spec.num_digital_audio_out + cdev->spec.num_digital_audio_in > 0) { ret = snd_usb_caiaq_audio_init(cdev); if (ret < 0) dev_err(dev, "Unable to set up audio system (ret=%d)\n", ret); } if (cdev->spec.num_midi_in + cdev->spec.num_midi_out > 0) { ret = snd_usb_caiaq_midi_init(cdev); if (ret < 0) dev_err(dev, "Unable to set up MIDI system (ret=%d)\n", ret); } #ifdef CONFIG_SND_USB_CAIAQ_INPUT ret = snd_usb_caiaq_input_init(cdev); if (ret < 0) dev_err(dev, "Unable to set up input system (ret=%d)\n", ret); #endif /* finally, register the card and all its sub-instances */ ret = snd_card_register(cdev->chip.card); if (ret < 0) { dev_err(dev, "snd_card_register() returned %d\n", ret); snd_card_free(cdev->chip.card); } ret = snd_usb_caiaq_control_init(cdev); if (ret < 0) dev_err(dev, "Unable to set up control system (ret=%d)\n", ret); } static int create_card(struct usb_device *usb_dev, struct usb_interface *intf, struct snd_card **cardp) { int devnum; int err; struct snd_card *card; struct snd_usb_caiaqdev *cdev; for (devnum = 0; devnum < SNDRV_CARDS; devnum++) if (enable[devnum]) break; if (devnum >= SNDRV_CARDS) return -ENODEV; err = snd_card_new(&intf->dev, index[devnum], id[devnum], THIS_MODULE, sizeof(struct snd_usb_caiaqdev), &card); if (err < 0) return err; cdev = caiaqdev(card); cdev->chip.dev = usb_dev; cdev->chip.card = card; cdev->chip.usb_id = USB_ID(le16_to_cpu(usb_dev->descriptor.idVendor), le16_to_cpu(usb_dev->descriptor.idProduct)); spin_lock_init(&cdev->spinlock); *cardp = card; return 0; } static int init_card(struct snd_usb_caiaqdev *cdev) { char *c, usbpath[32]; struct usb_device *usb_dev = cdev->chip.dev; struct snd_card *card = cdev->chip.card; struct device *dev = caiaqdev_to_dev(cdev); int err, len; if (usb_set_interface(usb_dev, 0, 1) != 0) { dev_err(dev, "can't set alt interface.\n"); return -EIO; } usb_init_urb(&cdev->ep1_in_urb); usb_init_urb(&cdev->midi_out_urb); usb_fill_bulk_urb(&cdev->ep1_in_urb, usb_dev, usb_rcvbulkpipe(usb_dev, 0x1), cdev->ep1_in_buf, EP1_BUFSIZE, usb_ep1_command_reply_dispatch, cdev); usb_fill_bulk_urb(&cdev->midi_out_urb, usb_dev, usb_sndbulkpipe(usb_dev, 0x1), cdev->midi_out_buf, EP1_BUFSIZE, snd_usb_caiaq_midi_output_done, cdev); init_waitqueue_head(&cdev->ep1_wait_queue); init_waitqueue_head(&cdev->prepare_wait_queue); if (usb_submit_urb(&cdev->ep1_in_urb, GFP_KERNEL) != 0) return -EIO; err = snd_usb_caiaq_send_command(cdev, EP1_CMD_GET_DEVICE_INFO, NULL, 0); if (err) return err; if (!wait_event_timeout(cdev->ep1_wait_queue, cdev->spec_received, HZ)) return -ENODEV; usb_string(usb_dev, usb_dev->descriptor.iManufacturer, cdev->vendor_name, CAIAQ_USB_STR_LEN); usb_string(usb_dev, usb_dev->descriptor.iProduct, cdev->product_name, CAIAQ_USB_STR_LEN); strlcpy(card->driver, MODNAME, sizeof(card->driver)); strlcpy(card->shortname, cdev->product_name, sizeof(card->shortname)); strlcpy(card->mixername, cdev->product_name, sizeof(card->mixername)); /* if the id was not passed as module option, fill it with a shortened * version of the product string which does not contain any * whitespaces */ if (*card->id == '\0') { char id[sizeof(card->id)]; memset(id, 0, sizeof(id)); for (c = card->shortname, len = 0; *c && len < sizeof(card->id); c++) if (*c != ' ') id[len++] = *c; snd_card_set_id(card, id); } usb_make_path(usb_dev, usbpath, sizeof(usbpath)); snprintf(card->longname, sizeof(card->longname), "%s %s (%s)", cdev->vendor_name, cdev->product_name, usbpath); setup_card(cdev); return 0; } static int snd_probe(struct usb_interface *intf, const struct usb_device_id *id) { int ret; struct snd_card *card = NULL; struct usb_device *usb_dev = interface_to_usbdev(intf); ret = create_card(usb_dev, intf, &card); if (ret < 0) return ret; usb_set_intfdata(intf, card); ret = init_card(caiaqdev(card)); if (ret < 0) { dev_err(&usb_dev->dev, "unable to init card! (ret=%d)\n", ret); snd_card_free(card); return ret; } return 0; } static void snd_disconnect(struct usb_interface *intf) { struct snd_card *card = usb_get_intfdata(intf); struct device *dev = intf->usb_dev; struct snd_usb_caiaqdev *cdev; if (!card) return; cdev = caiaqdev(card); dev_dbg(dev, "%s(%p)\n", __func__, intf); snd_card_disconnect(card); #ifdef CONFIG_SND_USB_CAIAQ_INPUT snd_usb_caiaq_input_free(cdev); #endif snd_usb_caiaq_audio_free(cdev); usb_kill_urb(&cdev->ep1_in_urb); usb_kill_urb(&cdev->midi_out_urb); snd_card_free(card); usb_reset_device(interface_to_usbdev(intf)); } MODULE_DEVICE_TABLE(usb, snd_usb_id_table); static struct usb_driver snd_usb_driver = { .name = MODNAME, .probe = snd_probe, .disconnect = snd_disconnect, .id_table = snd_usb_id_table, }; module_usb_driver(snd_usb_driver);
gpl-2.0
ztemt/A476_V1B_5.1_kernel
fs/xfs/xfs_iops.c
2077
30807
/* * Copyright (c) 2000-2005 Silicon Graphics, Inc. * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "xfs.h" #include "xfs_fs.h" #include "xfs_acl.h" #include "xfs_log.h" #include "xfs_trans.h" #include "xfs_sb.h" #include "xfs_ag.h" #include "xfs_alloc.h" #include "xfs_quota.h" #include "xfs_mount.h" #include "xfs_bmap_btree.h" #include "xfs_dinode.h" #include "xfs_inode.h" #include "xfs_bmap.h" #include "xfs_rtalloc.h" #include "xfs_error.h" #include "xfs_itable.h" #include "xfs_attr.h" #include "xfs_buf_item.h" #include "xfs_utils.h" #include "xfs_vnodeops.h" #include "xfs_inode_item.h" #include "xfs_trace.h" #include "xfs_icache.h" #include <linux/capability.h> #include <linux/xattr.h> #include <linux/namei.h> #include <linux/posix_acl.h> #include <linux/security.h> #include <linux/fiemap.h> #include <linux/slab.h> static int xfs_initxattrs( struct inode *inode, const struct xattr *xattr_array, void *fs_info) { const struct xattr *xattr; struct xfs_inode *ip = XFS_I(inode); int error = 0; for (xattr = xattr_array; xattr->name != NULL; xattr++) { error = xfs_attr_set(ip, xattr->name, xattr->value, xattr->value_len, ATTR_SECURE); if (error < 0) break; } return error; } /* * Hook in SELinux. This is not quite correct yet, what we really need * here (as we do for default ACLs) is a mechanism by which creation of * these attrs can be journalled at inode creation time (along with the * inode, of course, such that log replay can't cause these to be lost). */ STATIC int xfs_init_security( struct inode *inode, struct inode *dir, const struct qstr *qstr) { return security_inode_init_security(inode, dir, qstr, &xfs_initxattrs, NULL); } static void xfs_dentry_to_name( struct xfs_name *namep, struct dentry *dentry) { namep->name = dentry->d_name.name; namep->len = dentry->d_name.len; } STATIC void xfs_cleanup_inode( struct inode *dir, struct inode *inode, struct dentry *dentry) { struct xfs_name teardown; /* Oh, the horror. * If we can't add the ACL or we fail in * xfs_init_security we must back out. * ENOSPC can hit here, among other things. */ xfs_dentry_to_name(&teardown, dentry); xfs_remove(XFS_I(dir), &teardown, XFS_I(inode)); iput(inode); } STATIC int xfs_vn_mknod( struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev) { struct inode *inode; struct xfs_inode *ip = NULL; struct posix_acl *default_acl = NULL; struct xfs_name name; int error; /* * Irix uses Missed'em'V split, but doesn't want to see * the upper 5 bits of (14bit) major. */ if (S_ISCHR(mode) || S_ISBLK(mode)) { if (unlikely(!sysv_valid_dev(rdev) || MAJOR(rdev) & ~0x1ff)) return -EINVAL; rdev = sysv_encode_dev(rdev); } else { rdev = 0; } if (IS_POSIXACL(dir)) { default_acl = xfs_get_acl(dir, ACL_TYPE_DEFAULT); if (IS_ERR(default_acl)) return PTR_ERR(default_acl); if (!default_acl) mode &= ~current_umask(); } xfs_dentry_to_name(&name, dentry); error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip); if (unlikely(error)) goto out_free_acl; inode = VFS_I(ip); error = xfs_init_security(inode, dir, &dentry->d_name); if (unlikely(error)) goto out_cleanup_inode; if (default_acl) { error = -xfs_inherit_acl(inode, default_acl); default_acl = NULL; if (unlikely(error)) goto out_cleanup_inode; } d_instantiate(dentry, inode); return -error; out_cleanup_inode: xfs_cleanup_inode(dir, inode, dentry); out_free_acl: posix_acl_release(default_acl); return -error; } STATIC int xfs_vn_create( struct inode *dir, struct dentry *dentry, umode_t mode, bool flags) { return xfs_vn_mknod(dir, dentry, mode, 0); } STATIC int xfs_vn_mkdir( struct inode *dir, struct dentry *dentry, umode_t mode) { return xfs_vn_mknod(dir, dentry, mode|S_IFDIR, 0); } STATIC struct dentry * xfs_vn_lookup( struct inode *dir, struct dentry *dentry, unsigned int flags) { struct xfs_inode *cip; struct xfs_name name; int error; if (dentry->d_name.len >= MAXNAMELEN) return ERR_PTR(-ENAMETOOLONG); xfs_dentry_to_name(&name, dentry); error = xfs_lookup(XFS_I(dir), &name, &cip, NULL); if (unlikely(error)) { if (unlikely(error != ENOENT)) return ERR_PTR(-error); d_add(dentry, NULL); return NULL; } return d_splice_alias(VFS_I(cip), dentry); } STATIC struct dentry * xfs_vn_ci_lookup( struct inode *dir, struct dentry *dentry, unsigned int flags) { struct xfs_inode *ip; struct xfs_name xname; struct xfs_name ci_name; struct qstr dname; int error; if (dentry->d_name.len >= MAXNAMELEN) return ERR_PTR(-ENAMETOOLONG); xfs_dentry_to_name(&xname, dentry); error = xfs_lookup(XFS_I(dir), &xname, &ip, &ci_name); if (unlikely(error)) { if (unlikely(error != ENOENT)) return ERR_PTR(-error); /* * call d_add(dentry, NULL) here when d_drop_negative_children * is called in xfs_vn_mknod (ie. allow negative dentries * with CI filesystems). */ return NULL; } /* if exact match, just splice and exit */ if (!ci_name.name) return d_splice_alias(VFS_I(ip), dentry); /* else case-insensitive match... */ dname.name = ci_name.name; dname.len = ci_name.len; dentry = d_add_ci(dentry, VFS_I(ip), &dname); kmem_free(ci_name.name); return dentry; } STATIC int xfs_vn_link( struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { struct inode *inode = old_dentry->d_inode; struct xfs_name name; int error; xfs_dentry_to_name(&name, dentry); error = xfs_link(XFS_I(dir), XFS_I(inode), &name); if (unlikely(error)) return -error; ihold(inode); d_instantiate(dentry, inode); return 0; } STATIC int xfs_vn_unlink( struct inode *dir, struct dentry *dentry) { struct xfs_name name; int error; xfs_dentry_to_name(&name, dentry); error = -xfs_remove(XFS_I(dir), &name, XFS_I(dentry->d_inode)); if (error) return error; /* * With unlink, the VFS makes the dentry "negative": no inode, * but still hashed. This is incompatible with case-insensitive * mode, so invalidate (unhash) the dentry in CI-mode. */ if (xfs_sb_version_hasasciici(&XFS_M(dir->i_sb)->m_sb)) d_invalidate(dentry); return 0; } STATIC int xfs_vn_symlink( struct inode *dir, struct dentry *dentry, const char *symname) { struct inode *inode; struct xfs_inode *cip = NULL; struct xfs_name name; int error; umode_t mode; mode = S_IFLNK | (irix_symlink_mode ? 0777 & ~current_umask() : S_IRWXUGO); xfs_dentry_to_name(&name, dentry); error = xfs_symlink(XFS_I(dir), &name, symname, mode, &cip); if (unlikely(error)) goto out; inode = VFS_I(cip); error = xfs_init_security(inode, dir, &dentry->d_name); if (unlikely(error)) goto out_cleanup_inode; d_instantiate(dentry, inode); return 0; out_cleanup_inode: xfs_cleanup_inode(dir, inode, dentry); out: return -error; } STATIC int xfs_vn_rename( struct inode *odir, struct dentry *odentry, struct inode *ndir, struct dentry *ndentry) { struct inode *new_inode = ndentry->d_inode; struct xfs_name oname; struct xfs_name nname; xfs_dentry_to_name(&oname, odentry); xfs_dentry_to_name(&nname, ndentry); return -xfs_rename(XFS_I(odir), &oname, XFS_I(odentry->d_inode), XFS_I(ndir), &nname, new_inode ? XFS_I(new_inode) : NULL); } /* * careful here - this function can get called recursively, so * we need to be very careful about how much stack we use. * uio is kmalloced for this reason... */ STATIC void * xfs_vn_follow_link( struct dentry *dentry, struct nameidata *nd) { char *link; int error = -ENOMEM; link = kmalloc(MAXPATHLEN+1, GFP_KERNEL); if (!link) goto out_err; error = -xfs_readlink(XFS_I(dentry->d_inode), link); if (unlikely(error)) goto out_kfree; nd_set_link(nd, link); return NULL; out_kfree: kfree(link); out_err: nd_set_link(nd, ERR_PTR(error)); return NULL; } STATIC void xfs_vn_put_link( struct dentry *dentry, struct nameidata *nd, void *p) { char *s = nd_get_link(nd); if (!IS_ERR(s)) kfree(s); } STATIC int xfs_vn_getattr( struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { struct inode *inode = dentry->d_inode; struct xfs_inode *ip = XFS_I(inode); struct xfs_mount *mp = ip->i_mount; trace_xfs_getattr(ip); if (XFS_FORCED_SHUTDOWN(mp)) return -XFS_ERROR(EIO); stat->size = XFS_ISIZE(ip); stat->dev = inode->i_sb->s_dev; stat->mode = ip->i_d.di_mode; stat->nlink = ip->i_d.di_nlink; stat->uid = ip->i_d.di_uid; stat->gid = ip->i_d.di_gid; stat->ino = ip->i_ino; stat->atime = inode->i_atime; stat->mtime = inode->i_mtime; stat->ctime = inode->i_ctime; stat->blocks = XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks); switch (inode->i_mode & S_IFMT) { case S_IFBLK: case S_IFCHR: stat->blksize = BLKDEV_IOSIZE; stat->rdev = MKDEV(sysv_major(ip->i_df.if_u2.if_rdev) & 0x1ff, sysv_minor(ip->i_df.if_u2.if_rdev)); break; default: if (XFS_IS_REALTIME_INODE(ip)) { /* * If the file blocks are being allocated from a * realtime volume, then return the inode's realtime * extent size or the realtime volume's extent size. */ stat->blksize = xfs_get_extsz_hint(ip) << mp->m_sb.sb_blocklog; } else stat->blksize = xfs_preferred_iosize(mp); stat->rdev = 0; break; } return 0; } static void xfs_setattr_mode( struct xfs_trans *tp, struct xfs_inode *ip, struct iattr *iattr) { struct inode *inode = VFS_I(ip); umode_t mode = iattr->ia_mode; ASSERT(tp); ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID)) mode &= ~S_ISGID; ip->i_d.di_mode &= S_IFMT; ip->i_d.di_mode |= mode & ~S_IFMT; inode->i_mode &= S_IFMT; inode->i_mode |= mode & ~S_IFMT; } int xfs_setattr_nonsize( struct xfs_inode *ip, struct iattr *iattr, int flags) { xfs_mount_t *mp = ip->i_mount; struct inode *inode = VFS_I(ip); int mask = iattr->ia_valid; xfs_trans_t *tp; int error; uid_t uid = 0, iuid = 0; gid_t gid = 0, igid = 0; struct xfs_dquot *udqp = NULL, *gdqp = NULL; struct xfs_dquot *olddquot1 = NULL, *olddquot2 = NULL; trace_xfs_setattr(ip); if (mp->m_flags & XFS_MOUNT_RDONLY) return XFS_ERROR(EROFS); if (XFS_FORCED_SHUTDOWN(mp)) return XFS_ERROR(EIO); error = -inode_change_ok(inode, iattr); if (error) return XFS_ERROR(error); ASSERT((mask & ATTR_SIZE) == 0); /* * If disk quotas is on, we make sure that the dquots do exist on disk, * before we start any other transactions. Trying to do this later * is messy. We don't care to take a readlock to look at the ids * in inode here, because we can't hold it across the trans_reserve. * If the IDs do change before we take the ilock, we're covered * because the i_*dquot fields will get updated anyway. */ if (XFS_IS_QUOTA_ON(mp) && (mask & (ATTR_UID|ATTR_GID))) { uint qflags = 0; if ((mask & ATTR_UID) && XFS_IS_UQUOTA_ON(mp)) { uid = iattr->ia_uid; qflags |= XFS_QMOPT_UQUOTA; } else { uid = ip->i_d.di_uid; } if ((mask & ATTR_GID) && XFS_IS_GQUOTA_ON(mp)) { gid = iattr->ia_gid; qflags |= XFS_QMOPT_GQUOTA; } else { gid = ip->i_d.di_gid; } /* * We take a reference when we initialize udqp and gdqp, * so it is important that we never blindly double trip on * the same variable. See xfs_create() for an example. */ ASSERT(udqp == NULL); ASSERT(gdqp == NULL); error = xfs_qm_vop_dqalloc(ip, uid, gid, xfs_get_projid(ip), qflags, &udqp, &gdqp); if (error) return error; } tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE); error = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0); if (error) goto out_dqrele; xfs_ilock(ip, XFS_ILOCK_EXCL); /* * Change file ownership. Must be the owner or privileged. */ if (mask & (ATTR_UID|ATTR_GID)) { /* * These IDs could have changed since we last looked at them. * But, we're assured that if the ownership did change * while we didn't have the inode locked, inode's dquot(s) * would have changed also. */ iuid = ip->i_d.di_uid; igid = ip->i_d.di_gid; gid = (mask & ATTR_GID) ? iattr->ia_gid : igid; uid = (mask & ATTR_UID) ? iattr->ia_uid : iuid; /* * Do a quota reservation only if uid/gid is actually * going to change. */ if (XFS_IS_QUOTA_RUNNING(mp) && ((XFS_IS_UQUOTA_ON(mp) && iuid != uid) || (XFS_IS_GQUOTA_ON(mp) && igid != gid))) { ASSERT(tp); error = xfs_qm_vop_chown_reserve(tp, ip, udqp, gdqp, capable(CAP_FOWNER) ? XFS_QMOPT_FORCE_RES : 0); if (error) /* out of quota */ goto out_trans_cancel; } } xfs_trans_ijoin(tp, ip, 0); /* * Change file ownership. Must be the owner or privileged. */ if (mask & (ATTR_UID|ATTR_GID)) { /* * CAP_FSETID overrides the following restrictions: * * The set-user-ID and set-group-ID bits of a file will be * cleared upon successful return from chown() */ if ((ip->i_d.di_mode & (S_ISUID|S_ISGID)) && !capable(CAP_FSETID)) ip->i_d.di_mode &= ~(S_ISUID|S_ISGID); /* * Change the ownerships and register quota modifications * in the transaction. */ if (iuid != uid) { if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_UQUOTA_ON(mp)) { ASSERT(mask & ATTR_UID); ASSERT(udqp); olddquot1 = xfs_qm_vop_chown(tp, ip, &ip->i_udquot, udqp); } ip->i_d.di_uid = uid; inode->i_uid = uid; } if (igid != gid) { if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_GQUOTA_ON(mp)) { ASSERT(!XFS_IS_PQUOTA_ON(mp)); ASSERT(mask & ATTR_GID); ASSERT(gdqp); olddquot2 = xfs_qm_vop_chown(tp, ip, &ip->i_gdquot, gdqp); } ip->i_d.di_gid = gid; inode->i_gid = gid; } } /* * Change file access modes. */ if (mask & ATTR_MODE) xfs_setattr_mode(tp, ip, iattr); /* * Change file access or modified times. */ if (mask & ATTR_ATIME) { inode->i_atime = iattr->ia_atime; ip->i_d.di_atime.t_sec = iattr->ia_atime.tv_sec; ip->i_d.di_atime.t_nsec = iattr->ia_atime.tv_nsec; } if (mask & ATTR_CTIME) { inode->i_ctime = iattr->ia_ctime; ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec; ip->i_d.di_ctime.t_nsec = iattr->ia_ctime.tv_nsec; } if (mask & ATTR_MTIME) { inode->i_mtime = iattr->ia_mtime; ip->i_d.di_mtime.t_sec = iattr->ia_mtime.tv_sec; ip->i_d.di_mtime.t_nsec = iattr->ia_mtime.tv_nsec; } xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); XFS_STATS_INC(xs_ig_attrchg); if (mp->m_flags & XFS_MOUNT_WSYNC) xfs_trans_set_sync(tp); error = xfs_trans_commit(tp, 0); xfs_iunlock(ip, XFS_ILOCK_EXCL); /* * Release any dquot(s) the inode had kept before chown. */ xfs_qm_dqrele(olddquot1); xfs_qm_dqrele(olddquot2); xfs_qm_dqrele(udqp); xfs_qm_dqrele(gdqp); if (error) return XFS_ERROR(error); /* * XXX(hch): Updating the ACL entries is not atomic vs the i_mode * update. We could avoid this with linked transactions * and passing down the transaction pointer all the way * to attr_set. No previous user of the generic * Posix ACL code seems to care about this issue either. */ if ((mask & ATTR_MODE) && !(flags & XFS_ATTR_NOACL)) { error = -xfs_acl_chmod(inode); if (error) return XFS_ERROR(error); } return 0; out_trans_cancel: xfs_trans_cancel(tp, 0); xfs_iunlock(ip, XFS_ILOCK_EXCL); out_dqrele: xfs_qm_dqrele(udqp); xfs_qm_dqrele(gdqp); return error; } /* * Truncate file. Must have write permission and not be a directory. */ int xfs_setattr_size( struct xfs_inode *ip, struct iattr *iattr, int flags) { struct xfs_mount *mp = ip->i_mount; struct inode *inode = VFS_I(ip); int mask = iattr->ia_valid; xfs_off_t oldsize, newsize; struct xfs_trans *tp; int error; uint lock_flags = 0; uint commit_flags = 0; trace_xfs_setattr(ip); if (mp->m_flags & XFS_MOUNT_RDONLY) return XFS_ERROR(EROFS); if (XFS_FORCED_SHUTDOWN(mp)) return XFS_ERROR(EIO); error = -inode_change_ok(inode, iattr); if (error) return XFS_ERROR(error); ASSERT(S_ISREG(ip->i_d.di_mode)); ASSERT((mask & (ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET| ATTR_MTIME_SET|ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0); if (!(flags & XFS_ATTR_NOLOCK)) { lock_flags |= XFS_IOLOCK_EXCL; xfs_ilock(ip, lock_flags); } oldsize = inode->i_size; newsize = iattr->ia_size; /* * Short circuit the truncate case for zero length files. */ if (newsize == 0 && oldsize == 0 && ip->i_d.di_nextents == 0) { if (!(mask & (ATTR_CTIME|ATTR_MTIME))) goto out_unlock; /* * Use the regular setattr path to update the timestamps. */ xfs_iunlock(ip, lock_flags); iattr->ia_valid &= ~ATTR_SIZE; return xfs_setattr_nonsize(ip, iattr, 0); } /* * Make sure that the dquots are attached to the inode. */ error = xfs_qm_dqattach(ip, 0); if (error) goto out_unlock; /* * Now we can make the changes. Before we join the inode to the * transaction, take care of the part of the truncation that must be * done without the inode lock. This needs to be done before joining * the inode to the transaction, because the inode cannot be unlocked * once it is a part of the transaction. */ if (newsize > oldsize) { /* * Do the first part of growing a file: zero any data in the * last block that is beyond the old EOF. We need to do this * before the inode is joined to the transaction to modify * i_size. */ error = xfs_zero_eof(ip, newsize, oldsize); if (error) goto out_unlock; } /* * We are going to log the inode size change in this transaction so * any previous writes that are beyond the on disk EOF and the new * EOF that have not been written out need to be written here. If we * do not write the data out, we expose ourselves to the null files * problem. * * Only flush from the on disk size to the smaller of the in memory * file size or the new size as that's the range we really care about * here and prevents waiting for other data not within the range we * care about here. */ if (oldsize != ip->i_d.di_size && newsize > ip->i_d.di_size) { error = -filemap_write_and_wait_range(VFS_I(ip)->i_mapping, ip->i_d.di_size, newsize); if (error) goto out_unlock; } /* * Wait for all direct I/O to complete. */ inode_dio_wait(inode); error = -block_truncate_page(inode->i_mapping, newsize, xfs_get_blocks); if (error) goto out_unlock; tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_SIZE); error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, XFS_TRANS_PERM_LOG_RES, XFS_ITRUNCATE_LOG_COUNT); if (error) goto out_trans_cancel; truncate_setsize(inode, newsize); commit_flags = XFS_TRANS_RELEASE_LOG_RES; lock_flags |= XFS_ILOCK_EXCL; xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_trans_ijoin(tp, ip, 0); /* * Only change the c/mtime if we are changing the size or we are * explicitly asked to change it. This handles the semantic difference * between truncate() and ftruncate() as implemented in the VFS. * * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a * special case where we need to update the times despite not having * these flags set. For all other operations the VFS set these flags * explicitly if it wants a timestamp update. */ if (newsize != oldsize && (!(mask & (ATTR_CTIME | ATTR_MTIME)))) { iattr->ia_ctime = iattr->ia_mtime = current_fs_time(inode->i_sb); mask |= ATTR_CTIME | ATTR_MTIME; } /* * The first thing we do is set the size to new_size permanently on * disk. This way we don't have to worry about anyone ever being able * to look at the data being freed even in the face of a crash. * What we're getting around here is the case where we free a block, it * is allocated to another file, it is written to, and then we crash. * If the new data gets written to the file but the log buffers * containing the free and reallocation don't, then we'd end up with * garbage in the blocks being freed. As long as we make the new size * permanent before actually freeing any blocks it doesn't matter if * they get written to. */ ip->i_d.di_size = newsize; xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); if (newsize <= oldsize) { error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, newsize); if (error) goto out_trans_abort; /* * Truncated "down", so we're removing references to old data * here - if we delay flushing for a long time, we expose * ourselves unduly to the notorious NULL files problem. So, * we mark this inode and flush it when the file is closed, * and do not wait the usual (long) time for writeout. */ xfs_iflags_set(ip, XFS_ITRUNCATED); /* A truncate down always removes post-EOF blocks. */ xfs_inode_clear_eofblocks_tag(ip); } /* * Change file access modes. */ if (mask & ATTR_MODE) xfs_setattr_mode(tp, ip, iattr); if (mask & ATTR_CTIME) { inode->i_ctime = iattr->ia_ctime; ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec; ip->i_d.di_ctime.t_nsec = iattr->ia_ctime.tv_nsec; } if (mask & ATTR_MTIME) { inode->i_mtime = iattr->ia_mtime; ip->i_d.di_mtime.t_sec = iattr->ia_mtime.tv_sec; ip->i_d.di_mtime.t_nsec = iattr->ia_mtime.tv_nsec; } xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); XFS_STATS_INC(xs_ig_attrchg); if (mp->m_flags & XFS_MOUNT_WSYNC) xfs_trans_set_sync(tp); error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); out_unlock: if (lock_flags) xfs_iunlock(ip, lock_flags); return error; out_trans_abort: commit_flags |= XFS_TRANS_ABORT; out_trans_cancel: xfs_trans_cancel(tp, commit_flags); goto out_unlock; } STATIC int xfs_vn_setattr( struct dentry *dentry, struct iattr *iattr) { if (iattr->ia_valid & ATTR_SIZE) return -xfs_setattr_size(XFS_I(dentry->d_inode), iattr, 0); return -xfs_setattr_nonsize(XFS_I(dentry->d_inode), iattr, 0); } STATIC int xfs_vn_update_time( struct inode *inode, struct timespec *now, int flags) { struct xfs_inode *ip = XFS_I(inode); struct xfs_mount *mp = ip->i_mount; struct xfs_trans *tp; int error; trace_xfs_update_time(ip); tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS); error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0); if (error) { xfs_trans_cancel(tp, 0); return -error; } xfs_ilock(ip, XFS_ILOCK_EXCL); if (flags & S_CTIME) { inode->i_ctime = *now; ip->i_d.di_ctime.t_sec = (__int32_t)now->tv_sec; ip->i_d.di_ctime.t_nsec = (__int32_t)now->tv_nsec; } if (flags & S_MTIME) { inode->i_mtime = *now; ip->i_d.di_mtime.t_sec = (__int32_t)now->tv_sec; ip->i_d.di_mtime.t_nsec = (__int32_t)now->tv_nsec; } if (flags & S_ATIME) { inode->i_atime = *now; ip->i_d.di_atime.t_sec = (__int32_t)now->tv_sec; ip->i_d.di_atime.t_nsec = (__int32_t)now->tv_nsec; } xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP); return -xfs_trans_commit(tp, 0); } #define XFS_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR) /* * Call fiemap helper to fill in user data. * Returns positive errors to xfs_getbmap. */ STATIC int xfs_fiemap_format( void **arg, struct getbmapx *bmv, int *full) { int error; struct fiemap_extent_info *fieinfo = *arg; u32 fiemap_flags = 0; u64 logical, physical, length; /* Do nothing for a hole */ if (bmv->bmv_block == -1LL) return 0; logical = BBTOB(bmv->bmv_offset); physical = BBTOB(bmv->bmv_block); length = BBTOB(bmv->bmv_length); if (bmv->bmv_oflags & BMV_OF_PREALLOC) fiemap_flags |= FIEMAP_EXTENT_UNWRITTEN; else if (bmv->bmv_oflags & BMV_OF_DELALLOC) { fiemap_flags |= FIEMAP_EXTENT_DELALLOC; physical = 0; /* no block yet */ } if (bmv->bmv_oflags & BMV_OF_LAST) fiemap_flags |= FIEMAP_EXTENT_LAST; error = fiemap_fill_next_extent(fieinfo, logical, physical, length, fiemap_flags); if (error > 0) { error = 0; *full = 1; /* user array now full */ } return -error; } STATIC int xfs_vn_fiemap( struct inode *inode, struct fiemap_extent_info *fieinfo, u64 start, u64 length) { xfs_inode_t *ip = XFS_I(inode); struct getbmapx bm; int error; error = fiemap_check_flags(fieinfo, XFS_FIEMAP_FLAGS); if (error) return error; /* Set up bmap header for xfs internal routine */ bm.bmv_offset = BTOBB(start); /* Special case for whole file */ if (length == FIEMAP_MAX_OFFSET) bm.bmv_length = -1LL; else bm.bmv_length = BTOBB(length); /* We add one because in getbmap world count includes the header */ bm.bmv_count = !fieinfo->fi_extents_max ? MAXEXTNUM : fieinfo->fi_extents_max + 1; bm.bmv_count = min_t(__s32, bm.bmv_count, (PAGE_SIZE * 16 / sizeof(struct getbmapx))); bm.bmv_iflags = BMV_IF_PREALLOC | BMV_IF_NO_HOLES; if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) bm.bmv_iflags |= BMV_IF_ATTRFORK; if (!(fieinfo->fi_flags & FIEMAP_FLAG_SYNC)) bm.bmv_iflags |= BMV_IF_DELALLOC; error = xfs_getbmap(ip, &bm, xfs_fiemap_format, fieinfo); if (error) return -error; return 0; } static const struct inode_operations xfs_inode_operations = { .get_acl = xfs_get_acl, .getattr = xfs_vn_getattr, .setattr = xfs_vn_setattr, .setxattr = generic_setxattr, .getxattr = generic_getxattr, .removexattr = generic_removexattr, .listxattr = xfs_vn_listxattr, .fiemap = xfs_vn_fiemap, .update_time = xfs_vn_update_time, }; static const struct inode_operations xfs_dir_inode_operations = { .create = xfs_vn_create, .lookup = xfs_vn_lookup, .link = xfs_vn_link, .unlink = xfs_vn_unlink, .symlink = xfs_vn_symlink, .mkdir = xfs_vn_mkdir, /* * Yes, XFS uses the same method for rmdir and unlink. * * There are some subtile differences deeper in the code, * but we use S_ISDIR to check for those. */ .rmdir = xfs_vn_unlink, .mknod = xfs_vn_mknod, .rename = xfs_vn_rename, .get_acl = xfs_get_acl, .getattr = xfs_vn_getattr, .setattr = xfs_vn_setattr, .setxattr = generic_setxattr, .getxattr = generic_getxattr, .removexattr = generic_removexattr, .listxattr = xfs_vn_listxattr, .update_time = xfs_vn_update_time, }; static const struct inode_operations xfs_dir_ci_inode_operations = { .create = xfs_vn_create, .lookup = xfs_vn_ci_lookup, .link = xfs_vn_link, .unlink = xfs_vn_unlink, .symlink = xfs_vn_symlink, .mkdir = xfs_vn_mkdir, /* * Yes, XFS uses the same method for rmdir and unlink. * * There are some subtile differences deeper in the code, * but we use S_ISDIR to check for those. */ .rmdir = xfs_vn_unlink, .mknod = xfs_vn_mknod, .rename = xfs_vn_rename, .get_acl = xfs_get_acl, .getattr = xfs_vn_getattr, .setattr = xfs_vn_setattr, .setxattr = generic_setxattr, .getxattr = generic_getxattr, .removexattr = generic_removexattr, .listxattr = xfs_vn_listxattr, .update_time = xfs_vn_update_time, }; static const struct inode_operations xfs_symlink_inode_operations = { .readlink = generic_readlink, .follow_link = xfs_vn_follow_link, .put_link = xfs_vn_put_link, .get_acl = xfs_get_acl, .getattr = xfs_vn_getattr, .setattr = xfs_vn_setattr, .setxattr = generic_setxattr, .getxattr = generic_getxattr, .removexattr = generic_removexattr, .listxattr = xfs_vn_listxattr, .update_time = xfs_vn_update_time, }; STATIC void xfs_diflags_to_iflags( struct inode *inode, struct xfs_inode *ip) { if (ip->i_d.di_flags & XFS_DIFLAG_IMMUTABLE) inode->i_flags |= S_IMMUTABLE; else inode->i_flags &= ~S_IMMUTABLE; if (ip->i_d.di_flags & XFS_DIFLAG_APPEND) inode->i_flags |= S_APPEND; else inode->i_flags &= ~S_APPEND; if (ip->i_d.di_flags & XFS_DIFLAG_SYNC) inode->i_flags |= S_SYNC; else inode->i_flags &= ~S_SYNC; if (ip->i_d.di_flags & XFS_DIFLAG_NOATIME) inode->i_flags |= S_NOATIME; else inode->i_flags &= ~S_NOATIME; } /* * Initialize the Linux inode, set up the operation vectors and * unlock the inode. * * When reading existing inodes from disk this is called directly * from xfs_iget, when creating a new inode it is called from * xfs_ialloc after setting up the inode. * * We are always called with an uninitialised linux inode here. * We need to initialise the necessary fields and take a reference * on it. */ void xfs_setup_inode( struct xfs_inode *ip) { struct inode *inode = &ip->i_vnode; inode->i_ino = ip->i_ino; inode->i_state = I_NEW; inode_sb_list_add(inode); /* make the inode look hashed for the writeback code */ hlist_add_fake(&inode->i_hash); inode->i_mode = ip->i_d.di_mode; set_nlink(inode, ip->i_d.di_nlink); inode->i_uid = ip->i_d.di_uid; inode->i_gid = ip->i_d.di_gid; switch (inode->i_mode & S_IFMT) { case S_IFBLK: case S_IFCHR: inode->i_rdev = MKDEV(sysv_major(ip->i_df.if_u2.if_rdev) & 0x1ff, sysv_minor(ip->i_df.if_u2.if_rdev)); break; default: inode->i_rdev = 0; break; } inode->i_generation = ip->i_d.di_gen; i_size_write(inode, ip->i_d.di_size); inode->i_atime.tv_sec = ip->i_d.di_atime.t_sec; inode->i_atime.tv_nsec = ip->i_d.di_atime.t_nsec; inode->i_mtime.tv_sec = ip->i_d.di_mtime.t_sec; inode->i_mtime.tv_nsec = ip->i_d.di_mtime.t_nsec; inode->i_ctime.tv_sec = ip->i_d.di_ctime.t_sec; inode->i_ctime.tv_nsec = ip->i_d.di_ctime.t_nsec; xfs_diflags_to_iflags(inode, ip); switch (inode->i_mode & S_IFMT) { case S_IFREG: inode->i_op = &xfs_inode_operations; inode->i_fop = &xfs_file_operations; inode->i_mapping->a_ops = &xfs_address_space_operations; break; case S_IFDIR: if (xfs_sb_version_hasasciici(&XFS_M(inode->i_sb)->m_sb)) inode->i_op = &xfs_dir_ci_inode_operations; else inode->i_op = &xfs_dir_inode_operations; inode->i_fop = &xfs_dir_file_operations; break; case S_IFLNK: inode->i_op = &xfs_symlink_inode_operations; if (!(ip->i_df.if_flags & XFS_IFINLINE)) inode->i_mapping->a_ops = &xfs_address_space_operations; break; default: inode->i_op = &xfs_inode_operations; init_special_inode(inode, inode->i_mode, inode->i_rdev); break; } /* * If there is no attribute fork no ACL can exist on this inode, * and it can't have any file capabilities attached to it either. */ if (!XFS_IFORK_Q(ip)) { inode_has_no_xattr(inode); cache_no_acl(inode); } xfs_iflags_clear(ip, XFS_INEW); barrier(); unlock_new_inode(inode); }
gpl-2.0
LG-V10/CAF_msm_3.18.y
tools/perf/util/sort.c
2077
24004
#include "sort.h" #include "hist.h" regex_t parent_regex; const char default_parent_pattern[] = "^sys_|^do_page_fault"; const char *parent_pattern = default_parent_pattern; const char default_sort_order[] = "comm,dso,symbol"; const char *sort_order = default_sort_order; int sort__need_collapse = 0; int sort__has_parent = 0; int sort__has_sym = 0; int sort__branch_mode = -1; /* -1 = means not set */ enum sort_type sort__first_dimension; LIST_HEAD(hist_entry__sort_list); static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...) { int n; va_list ap; va_start(ap, fmt); n = vsnprintf(bf, size, fmt, ap); if (symbol_conf.field_sep && n > 0) { char *sep = bf; while (1) { sep = strchr(sep, *symbol_conf.field_sep); if (sep == NULL) break; *sep = '.'; } } va_end(ap); if (n >= (int)size) return size - 1; return n; } static int64_t cmp_null(void *l, void *r) { if (!l && !r) return 0; else if (!l) return -1; else return 1; } /* --sort pid */ static int64_t sort__thread_cmp(struct hist_entry *left, struct hist_entry *right) { return right->thread->pid - left->thread->pid; } static int hist_entry__thread_snprintf(struct hist_entry *self, char *bf, size_t size, unsigned int width) { return repsep_snprintf(bf, size, "%*s:%5d", width - 6, self->thread->comm ?: "", self->thread->pid); } struct sort_entry sort_thread = { .se_header = "Command: Pid", .se_cmp = sort__thread_cmp, .se_snprintf = hist_entry__thread_snprintf, .se_width_idx = HISTC_THREAD, }; /* --sort comm */ static int64_t sort__comm_cmp(struct hist_entry *left, struct hist_entry *right) { return right->thread->pid - left->thread->pid; } static int64_t sort__comm_collapse(struct hist_entry *left, struct hist_entry *right) { char *comm_l = left->thread->comm; char *comm_r = right->thread->comm; if (!comm_l || !comm_r) return cmp_null(comm_l, comm_r); return strcmp(comm_l, comm_r); } static int hist_entry__comm_snprintf(struct hist_entry *self, char *bf, size_t size, unsigned int width) { return repsep_snprintf(bf, size, "%*s", width, self->thread->comm); } struct sort_entry sort_comm = { .se_header = "Command", .se_cmp = sort__comm_cmp, .se_collapse = sort__comm_collapse, .se_snprintf = hist_entry__comm_snprintf, .se_width_idx = HISTC_COMM, }; /* --sort dso */ static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r) { struct dso *dso_l = map_l ? map_l->dso : NULL; struct dso *dso_r = map_r ? map_r->dso : NULL; const char *dso_name_l, *dso_name_r; if (!dso_l || !dso_r) return cmp_null(dso_l, dso_r); if (verbose) { dso_name_l = dso_l->long_name; dso_name_r = dso_r->long_name; } else { dso_name_l = dso_l->short_name; dso_name_r = dso_r->short_name; } return strcmp(dso_name_l, dso_name_r); } static int64_t sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) { return _sort__dso_cmp(left->ms.map, right->ms.map); } static int _hist_entry__dso_snprintf(struct map *map, char *bf, size_t size, unsigned int width) { if (map && map->dso) { const char *dso_name = !verbose ? map->dso->short_name : map->dso->long_name; return repsep_snprintf(bf, size, "%-*s", width, dso_name); } return repsep_snprintf(bf, size, "%-*s", width, "[unknown]"); } static int hist_entry__dso_snprintf(struct hist_entry *self, char *bf, size_t size, unsigned int width) { return _hist_entry__dso_snprintf(self->ms.map, bf, size, width); } struct sort_entry sort_dso = { .se_header = "Shared Object", .se_cmp = sort__dso_cmp, .se_snprintf = hist_entry__dso_snprintf, .se_width_idx = HISTC_DSO, }; /* --sort symbol */ static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r) { u64 ip_l, ip_r; if (!sym_l || !sym_r) return cmp_null(sym_l, sym_r); if (sym_l == sym_r) return 0; ip_l = sym_l->start; ip_r = sym_r->start; return (int64_t)(ip_r - ip_l); } static int64_t sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) { if (!left->ms.sym && !right->ms.sym) return right->level - left->level; return _sort__sym_cmp(left->ms.sym, right->ms.sym); } static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym, u64 ip, char level, char *bf, size_t size, unsigned int width) { size_t ret = 0; if (verbose) { char o = map ? dso__symtab_origin(map->dso) : '!'; ret += repsep_snprintf(bf, size, "%-#*llx %c ", BITS_PER_LONG / 4, ip, o); } ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level); if (sym && map) { if (map->type == MAP__VARIABLE) { ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name); ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", ip - map->unmap_ip(map, sym->start)); ret += repsep_snprintf(bf + ret, size - ret, "%-*s", width - ret, ""); } else { ret += repsep_snprintf(bf + ret, size - ret, "%-*s", width - ret, sym->name); } } else { size_t len = BITS_PER_LONG / 4; ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", len, ip); ret += repsep_snprintf(bf + ret, size - ret, "%-*s", width - ret, ""); } return ret; } static int hist_entry__sym_snprintf(struct hist_entry *self, char *bf, size_t size, unsigned int width) { return _hist_entry__sym_snprintf(self->ms.map, self->ms.sym, self->ip, self->level, bf, size, width); } struct sort_entry sort_sym = { .se_header = "Symbol", .se_cmp = sort__sym_cmp, .se_snprintf = hist_entry__sym_snprintf, .se_width_idx = HISTC_SYMBOL, }; /* --sort srcline */ static int64_t sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right) { return (int64_t)(right->ip - left->ip); } static int hist_entry__srcline_snprintf(struct hist_entry *self, char *bf, size_t size, unsigned int width __maybe_unused) { FILE *fp = NULL; char cmd[PATH_MAX + 2], *path = self->srcline, *nl; size_t line_len; if (path != NULL) goto out_path; if (!self->ms.map) goto out_ip; if (!strncmp(self->ms.map->dso->long_name, "/tmp/perf-", 10)) goto out_ip; snprintf(cmd, sizeof(cmd), "addr2line -e %s %016" PRIx64, self->ms.map->dso->long_name, self->ip); fp = popen(cmd, "r"); if (!fp) goto out_ip; if (getline(&path, &line_len, fp) < 0 || !line_len) goto out_ip; self->srcline = strdup(path); if (self->srcline == NULL) goto out_ip; nl = strchr(self->srcline, '\n'); if (nl != NULL) *nl = '\0'; path = self->srcline; out_path: if (fp) pclose(fp); return repsep_snprintf(bf, size, "%s", path); out_ip: if (fp) pclose(fp); return repsep_snprintf(bf, size, "%-#*llx", BITS_PER_LONG / 4, self->ip); } struct sort_entry sort_srcline = { .se_header = "Source:Line", .se_cmp = sort__srcline_cmp, .se_snprintf = hist_entry__srcline_snprintf, .se_width_idx = HISTC_SRCLINE, }; /* --sort parent */ static int64_t sort__parent_cmp(struct hist_entry *left, struct hist_entry *right) { struct symbol *sym_l = left->parent; struct symbol *sym_r = right->parent; if (!sym_l || !sym_r) return cmp_null(sym_l, sym_r); return strcmp(sym_l->name, sym_r->name); } static int hist_entry__parent_snprintf(struct hist_entry *self, char *bf, size_t size, unsigned int width) { return repsep_snprintf(bf, size, "%-*s", width, self->parent ? self->parent->name : "[other]"); } struct sort_entry sort_parent = { .se_header = "Parent symbol", .se_cmp = sort__parent_cmp, .se_snprintf = hist_entry__parent_snprintf, .se_width_idx = HISTC_PARENT, }; /* --sort cpu */ static int64_t sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right) { return right->cpu - left->cpu; } static int hist_entry__cpu_snprintf(struct hist_entry *self, char *bf, size_t size, unsigned int width) { return repsep_snprintf(bf, size, "%*d", width, self->cpu); } struct sort_entry sort_cpu = { .se_header = "CPU", .se_cmp = sort__cpu_cmp, .se_snprintf = hist_entry__cpu_snprintf, .se_width_idx = HISTC_CPU, }; /* sort keys for branch stacks */ static int64_t sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right) { return _sort__dso_cmp(left->branch_info->from.map, right->branch_info->from.map); } static int hist_entry__dso_from_snprintf(struct hist_entry *self, char *bf, size_t size, unsigned int width) { return _hist_entry__dso_snprintf(self->branch_info->from.map, bf, size, width); } static int64_t sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right) { return _sort__dso_cmp(left->branch_info->to.map, right->branch_info->to.map); } static int hist_entry__dso_to_snprintf(struct hist_entry *self, char *bf, size_t size, unsigned int width) { return _hist_entry__dso_snprintf(self->branch_info->to.map, bf, size, width); } static int64_t sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right) { struct addr_map_symbol *from_l = &left->branch_info->from; struct addr_map_symbol *from_r = &right->branch_info->from; if (!from_l->sym && !from_r->sym) return right->level - left->level; return _sort__sym_cmp(from_l->sym, from_r->sym); } static int64_t sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right) { struct addr_map_symbol *to_l = &left->branch_info->to; struct addr_map_symbol *to_r = &right->branch_info->to; if (!to_l->sym && !to_r->sym) return right->level - left->level; return _sort__sym_cmp(to_l->sym, to_r->sym); } static int hist_entry__sym_from_snprintf(struct hist_entry *self, char *bf, size_t size, unsigned int width) { struct addr_map_symbol *from = &self->branch_info->from; return _hist_entry__sym_snprintf(from->map, from->sym, from->addr, self->level, bf, size, width); } static int hist_entry__sym_to_snprintf(struct hist_entry *self, char *bf, size_t size, unsigned int width) { struct addr_map_symbol *to = &self->branch_info->to; return _hist_entry__sym_snprintf(to->map, to->sym, to->addr, self->level, bf, size, width); } struct sort_entry sort_dso_from = { .se_header = "Source Shared Object", .se_cmp = sort__dso_from_cmp, .se_snprintf = hist_entry__dso_from_snprintf, .se_width_idx = HISTC_DSO_FROM, }; struct sort_entry sort_dso_to = { .se_header = "Target Shared Object", .se_cmp = sort__dso_to_cmp, .se_snprintf = hist_entry__dso_to_snprintf, .se_width_idx = HISTC_DSO_TO, }; struct sort_entry sort_sym_from = { .se_header = "Source Symbol", .se_cmp = sort__sym_from_cmp, .se_snprintf = hist_entry__sym_from_snprintf, .se_width_idx = HISTC_SYMBOL_FROM, }; struct sort_entry sort_sym_to = { .se_header = "Target Symbol", .se_cmp = sort__sym_to_cmp, .se_snprintf = hist_entry__sym_to_snprintf, .se_width_idx = HISTC_SYMBOL_TO, }; static int64_t sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right) { const unsigned char mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred; const unsigned char p = left->branch_info->flags.predicted != right->branch_info->flags.predicted; return mp || p; } static int hist_entry__mispredict_snprintf(struct hist_entry *self, char *bf, size_t size, unsigned int width){ static const char *out = "N/A"; if (self->branch_info->flags.predicted) out = "N"; else if (self->branch_info->flags.mispred) out = "Y"; return repsep_snprintf(bf, size, "%-*s", width, out); } /* --sort daddr_sym */ static int64_t sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right) { uint64_t l = 0, r = 0; if (left->mem_info) l = left->mem_info->daddr.addr; if (right->mem_info) r = right->mem_info->daddr.addr; return (int64_t)(r - l); } static int hist_entry__daddr_snprintf(struct hist_entry *self, char *bf, size_t size, unsigned int width) { uint64_t addr = 0; struct map *map = NULL; struct symbol *sym = NULL; if (self->mem_info) { addr = self->mem_info->daddr.addr; map = self->mem_info->daddr.map; sym = self->mem_info->daddr.sym; } return _hist_entry__sym_snprintf(map, sym, addr, self->level, bf, size, width); } static int64_t sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right) { struct map *map_l = NULL; struct map *map_r = NULL; if (left->mem_info) map_l = left->mem_info->daddr.map; if (right->mem_info) map_r = right->mem_info->daddr.map; return _sort__dso_cmp(map_l, map_r); } static int hist_entry__dso_daddr_snprintf(struct hist_entry *self, char *bf, size_t size, unsigned int width) { struct map *map = NULL; if (self->mem_info) map = self->mem_info->daddr.map; return _hist_entry__dso_snprintf(map, bf, size, width); } static int64_t sort__locked_cmp(struct hist_entry *left, struct hist_entry *right) { union perf_mem_data_src data_src_l; union perf_mem_data_src data_src_r; if (left->mem_info) data_src_l = left->mem_info->data_src; else data_src_l.mem_lock = PERF_MEM_LOCK_NA; if (right->mem_info) data_src_r = right->mem_info->data_src; else data_src_r.mem_lock = PERF_MEM_LOCK_NA; return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock); } static int hist_entry__locked_snprintf(struct hist_entry *self, char *bf, size_t size, unsigned int width) { const char *out; u64 mask = PERF_MEM_LOCK_NA; if (self->mem_info) mask = self->mem_info->data_src.mem_lock; if (mask & PERF_MEM_LOCK_NA) out = "N/A"; else if (mask & PERF_MEM_LOCK_LOCKED) out = "Yes"; else out = "No"; return repsep_snprintf(bf, size, "%-*s", width, out); } static int64_t sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right) { union perf_mem_data_src data_src_l; union perf_mem_data_src data_src_r; if (left->mem_info) data_src_l = left->mem_info->data_src; else data_src_l.mem_dtlb = PERF_MEM_TLB_NA; if (right->mem_info) data_src_r = right->mem_info->data_src; else data_src_r.mem_dtlb = PERF_MEM_TLB_NA; return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb); } static const char * const tlb_access[] = { "N/A", "HIT", "MISS", "L1", "L2", "Walker", "Fault", }; #define NUM_TLB_ACCESS (sizeof(tlb_access)/sizeof(const char *)) static int hist_entry__tlb_snprintf(struct hist_entry *self, char *bf, size_t size, unsigned int width) { char out[64]; size_t sz = sizeof(out) - 1; /* -1 for null termination */ size_t l = 0, i; u64 m = PERF_MEM_TLB_NA; u64 hit, miss; out[0] = '\0'; if (self->mem_info) m = self->mem_info->data_src.mem_dtlb; hit = m & PERF_MEM_TLB_HIT; miss = m & PERF_MEM_TLB_MISS; /* already taken care of */ m &= ~(PERF_MEM_TLB_HIT|PERF_MEM_TLB_MISS); for (i = 0; m && i < NUM_TLB_ACCESS; i++, m >>= 1) { if (!(m & 0x1)) continue; if (l) { strcat(out, " or "); l += 4; } strncat(out, tlb_access[i], sz - l); l += strlen(tlb_access[i]); } if (*out == '\0') strcpy(out, "N/A"); if (hit) strncat(out, " hit", sz - l); if (miss) strncat(out, " miss", sz - l); return repsep_snprintf(bf, size, "%-*s", width, out); } static int64_t sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right) { union perf_mem_data_src data_src_l; union perf_mem_data_src data_src_r; if (left->mem_info) data_src_l = left->mem_info->data_src; else data_src_l.mem_lvl = PERF_MEM_LVL_NA; if (right->mem_info) data_src_r = right->mem_info->data_src; else data_src_r.mem_lvl = PERF_MEM_LVL_NA; return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl); } static const char * const mem_lvl[] = { "N/A", "HIT", "MISS", "L1", "LFB", "L2", "L3", "Local RAM", "Remote RAM (1 hop)", "Remote RAM (2 hops)", "Remote Cache (1 hop)", "Remote Cache (2 hops)", "I/O", "Uncached", }; #define NUM_MEM_LVL (sizeof(mem_lvl)/sizeof(const char *)) static int hist_entry__lvl_snprintf(struct hist_entry *self, char *bf, size_t size, unsigned int width) { char out[64]; size_t sz = sizeof(out) - 1; /* -1 for null termination */ size_t i, l = 0; u64 m = PERF_MEM_LVL_NA; u64 hit, miss; if (self->mem_info) m = self->mem_info->data_src.mem_lvl; out[0] = '\0'; hit = m & PERF_MEM_LVL_HIT; miss = m & PERF_MEM_LVL_MISS; /* already taken care of */ m &= ~(PERF_MEM_LVL_HIT|PERF_MEM_LVL_MISS); for (i = 0; m && i < NUM_MEM_LVL; i++, m >>= 1) { if (!(m & 0x1)) continue; if (l) { strcat(out, " or "); l += 4; } strncat(out, mem_lvl[i], sz - l); l += strlen(mem_lvl[i]); } if (*out == '\0') strcpy(out, "N/A"); if (hit) strncat(out, " hit", sz - l); if (miss) strncat(out, " miss", sz - l); return repsep_snprintf(bf, size, "%-*s", width, out); } static int64_t sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right) { union perf_mem_data_src data_src_l; union perf_mem_data_src data_src_r; if (left->mem_info) data_src_l = left->mem_info->data_src; else data_src_l.mem_snoop = PERF_MEM_SNOOP_NA; if (right->mem_info) data_src_r = right->mem_info->data_src; else data_src_r.mem_snoop = PERF_MEM_SNOOP_NA; return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop); } static const char * const snoop_access[] = { "N/A", "None", "Miss", "Hit", "HitM", }; #define NUM_SNOOP_ACCESS (sizeof(snoop_access)/sizeof(const char *)) static int hist_entry__snoop_snprintf(struct hist_entry *self, char *bf, size_t size, unsigned int width) { char out[64]; size_t sz = sizeof(out) - 1; /* -1 for null termination */ size_t i, l = 0; u64 m = PERF_MEM_SNOOP_NA; out[0] = '\0'; if (self->mem_info) m = self->mem_info->data_src.mem_snoop; for (i = 0; m && i < NUM_SNOOP_ACCESS; i++, m >>= 1) { if (!(m & 0x1)) continue; if (l) { strcat(out, " or "); l += 4; } strncat(out, snoop_access[i], sz - l); l += strlen(snoop_access[i]); } if (*out == '\0') strcpy(out, "N/A"); return repsep_snprintf(bf, size, "%-*s", width, out); } struct sort_entry sort_mispredict = { .se_header = "Branch Mispredicted", .se_cmp = sort__mispredict_cmp, .se_snprintf = hist_entry__mispredict_snprintf, .se_width_idx = HISTC_MISPREDICT, }; static u64 he_weight(struct hist_entry *he) { return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0; } static int64_t sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right) { return he_weight(left) - he_weight(right); } static int hist_entry__local_weight_snprintf(struct hist_entry *self, char *bf, size_t size, unsigned int width) { return repsep_snprintf(bf, size, "%-*llu", width, he_weight(self)); } struct sort_entry sort_local_weight = { .se_header = "Local Weight", .se_cmp = sort__local_weight_cmp, .se_snprintf = hist_entry__local_weight_snprintf, .se_width_idx = HISTC_LOCAL_WEIGHT, }; static int64_t sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right) { return left->stat.weight - right->stat.weight; } static int hist_entry__global_weight_snprintf(struct hist_entry *self, char *bf, size_t size, unsigned int width) { return repsep_snprintf(bf, size, "%-*llu", width, self->stat.weight); } struct sort_entry sort_global_weight = { .se_header = "Weight", .se_cmp = sort__global_weight_cmp, .se_snprintf = hist_entry__global_weight_snprintf, .se_width_idx = HISTC_GLOBAL_WEIGHT, }; struct sort_entry sort_mem_daddr_sym = { .se_header = "Data Symbol", .se_cmp = sort__daddr_cmp, .se_snprintf = hist_entry__daddr_snprintf, .se_width_idx = HISTC_MEM_DADDR_SYMBOL, }; struct sort_entry sort_mem_daddr_dso = { .se_header = "Data Object", .se_cmp = sort__dso_daddr_cmp, .se_snprintf = hist_entry__dso_daddr_snprintf, .se_width_idx = HISTC_MEM_DADDR_SYMBOL, }; struct sort_entry sort_mem_locked = { .se_header = "Locked", .se_cmp = sort__locked_cmp, .se_snprintf = hist_entry__locked_snprintf, .se_width_idx = HISTC_MEM_LOCKED, }; struct sort_entry sort_mem_tlb = { .se_header = "TLB access", .se_cmp = sort__tlb_cmp, .se_snprintf = hist_entry__tlb_snprintf, .se_width_idx = HISTC_MEM_TLB, }; struct sort_entry sort_mem_lvl = { .se_header = "Memory access", .se_cmp = sort__lvl_cmp, .se_snprintf = hist_entry__lvl_snprintf, .se_width_idx = HISTC_MEM_LVL, }; struct sort_entry sort_mem_snoop = { .se_header = "Snoop", .se_cmp = sort__snoop_cmp, .se_snprintf = hist_entry__snoop_snprintf, .se_width_idx = HISTC_MEM_SNOOP, }; struct sort_dimension { const char *name; struct sort_entry *entry; int taken; }; #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) } static struct sort_dimension common_sort_dimensions[] = { DIM(SORT_PID, "pid", sort_thread), DIM(SORT_COMM, "comm", sort_comm), DIM(SORT_DSO, "dso", sort_dso), DIM(SORT_SYM, "symbol", sort_sym), DIM(SORT_PARENT, "parent", sort_parent), DIM(SORT_CPU, "cpu", sort_cpu), DIM(SORT_SRCLINE, "srcline", sort_srcline), DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight), DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight), DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym), DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso), DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked), DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb), DIM(SORT_MEM_LVL, "mem", sort_mem_lvl), DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop), }; #undef DIM #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) } static struct sort_dimension bstack_sort_dimensions[] = { DIM(SORT_DSO_FROM, "dso_from", sort_dso_from), DIM(SORT_DSO_TO, "dso_to", sort_dso_to), DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from), DIM(SORT_SYM_TO, "symbol_to", sort_sym_to), DIM(SORT_MISPREDICT, "mispredict", sort_mispredict), }; #undef DIM int sort_dimension__add(const char *tok) { unsigned int i; for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { struct sort_dimension *sd = &common_sort_dimensions[i]; if (strncasecmp(tok, sd->name, strlen(tok))) continue; if (sd->entry == &sort_parent) { int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED); if (ret) { char err[BUFSIZ]; regerror(ret, &parent_regex, err, sizeof(err)); pr_err("Invalid regex: %s\n%s", parent_pattern, err); return -EINVAL; } sort__has_parent = 1; } else if (sd->entry == &sort_sym || sd->entry == &sort_sym_from || sd->entry == &sort_sym_to || sd->entry == &sort_mem_daddr_sym) { sort__has_sym = 1; } if (sd->taken) return 0; if (sd->entry->se_collapse) sort__need_collapse = 1; if (list_empty(&hist_entry__sort_list)) sort__first_dimension = i; list_add_tail(&sd->entry->list, &hist_entry__sort_list); sd->taken = 1; return 0; } for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { struct sort_dimension *sd = &bstack_sort_dimensions[i]; if (strncasecmp(tok, sd->name, strlen(tok))) continue; if (sort__branch_mode != 1) return -EINVAL; if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to) sort__has_sym = 1; if (sd->taken) return 0; if (sd->entry->se_collapse) sort__need_collapse = 1; if (list_empty(&hist_entry__sort_list)) sort__first_dimension = i + __SORT_BRANCH_STACK; list_add_tail(&sd->entry->list, &hist_entry__sort_list); sd->taken = 1; return 0; } return -ESRCH; } int setup_sorting(void) { char *tmp, *tok, *str = strdup(sort_order); int ret = 0; if (str == NULL) { error("Not enough memory to setup sort keys"); return -ENOMEM; } for (tok = strtok_r(str, ", ", &tmp); tok; tok = strtok_r(NULL, ", ", &tmp)) { ret = sort_dimension__add(tok); if (ret == -EINVAL) { error("Invalid --sort key: `%s'", tok); break; } else if (ret == -ESRCH) { error("Unknown --sort key: `%s'", tok); break; } } free(str); return ret; } void sort_entry__setup_elide(struct sort_entry *self, struct strlist *list, const char *list_name, FILE *fp) { if (list && strlist__nr_entries(list) == 1) { if (fp != NULL) fprintf(fp, "# %s: %s\n", list_name, strlist__entry(list, 0)->s); self->elide = true; } }
gpl-2.0
fortuna-dev/android_kernel_samsung_fortuna-common
tools/perf/ui/gtk/hists.c
2077
7332
#include "../evlist.h" #include "../cache.h" #include "../evsel.h" #include "../sort.h" #include "../hist.h" #include "../helpline.h" #include "gtk.h" #define MAX_COLUMNS 32 static int __percent_color_snprintf(char *buf, size_t size, double percent) { int ret = 0; const char *markup; markup = perf_gtk__get_percent_color(percent); if (markup) ret += scnprintf(buf, size, markup); ret += scnprintf(buf + ret, size - ret, " %6.2f%%", percent); if (markup) ret += scnprintf(buf + ret, size - ret, "</span>"); return ret; } static int __hpp__color_fmt(struct perf_hpp *hpp, struct hist_entry *he, u64 (*get_field)(struct hist_entry *)) { int ret; double percent = 0.0; struct hists *hists = he->hists; struct perf_evsel *evsel = hists_to_evsel(hists); if (hists->stats.total_period) percent = 100.0 * get_field(he) / hists->stats.total_period; ret = __percent_color_snprintf(hpp->buf, hpp->size, percent); if (perf_evsel__is_group_event(evsel)) { int prev_idx, idx_delta; struct hist_entry *pair; int nr_members = evsel->nr_members; prev_idx = perf_evsel__group_idx(evsel); list_for_each_entry(pair, &he->pairs.head, pairs.node) { u64 period = get_field(pair); u64 total = pair->hists->stats.total_period; evsel = hists_to_evsel(pair->hists); idx_delta = perf_evsel__group_idx(evsel) - prev_idx - 1; while (idx_delta--) { /* * zero-fill group members in the middle which * have no sample */ ret += __percent_color_snprintf(hpp->buf + ret, hpp->size - ret, 0.0); } percent = 100.0 * period / total; ret += __percent_color_snprintf(hpp->buf + ret, hpp->size - ret, percent); prev_idx = perf_evsel__group_idx(evsel); } idx_delta = nr_members - prev_idx - 1; while (idx_delta--) { /* * zero-fill group members at last which have no sample */ ret += __percent_color_snprintf(hpp->buf + ret, hpp->size - ret, 0.0); } } return ret; } #define __HPP_COLOR_PERCENT_FN(_type, _field) \ static u64 he_get_##_field(struct hist_entry *he) \ { \ return he->stat._field; \ } \ \ static int perf_gtk__hpp_color_##_type(struct perf_hpp *hpp, \ struct hist_entry *he) \ { \ return __hpp__color_fmt(hpp, he, he_get_##_field); \ } __HPP_COLOR_PERCENT_FN(overhead, period) __HPP_COLOR_PERCENT_FN(overhead_sys, period_sys) __HPP_COLOR_PERCENT_FN(overhead_us, period_us) __HPP_COLOR_PERCENT_FN(overhead_guest_sys, period_guest_sys) __HPP_COLOR_PERCENT_FN(overhead_guest_us, period_guest_us) #undef __HPP_COLOR_PERCENT_FN void perf_gtk__init_hpp(void) { perf_hpp__column_enable(PERF_HPP__OVERHEAD); perf_hpp__init(); perf_hpp__format[PERF_HPP__OVERHEAD].color = perf_gtk__hpp_color_overhead; perf_hpp__format[PERF_HPP__OVERHEAD_SYS].color = perf_gtk__hpp_color_overhead_sys; perf_hpp__format[PERF_HPP__OVERHEAD_US].color = perf_gtk__hpp_color_overhead_us; perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_SYS].color = perf_gtk__hpp_color_overhead_guest_sys; perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_US].color = perf_gtk__hpp_color_overhead_guest_us; } static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists) { struct perf_hpp_fmt *fmt; GType col_types[MAX_COLUMNS]; GtkCellRenderer *renderer; struct sort_entry *se; GtkListStore *store; struct rb_node *nd; GtkWidget *view; int col_idx; int nr_cols; char s[512]; struct perf_hpp hpp = { .buf = s, .size = sizeof(s), .ptr = hists_to_evsel(hists), }; nr_cols = 0; perf_hpp__for_each_format(fmt) col_types[nr_cols++] = G_TYPE_STRING; list_for_each_entry(se, &hist_entry__sort_list, list) { if (se->elide) continue; col_types[nr_cols++] = G_TYPE_STRING; } store = gtk_list_store_newv(nr_cols, col_types); view = gtk_tree_view_new(); renderer = gtk_cell_renderer_text_new(); col_idx = 0; perf_hpp__for_each_format(fmt) { fmt->header(&hpp); gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view), -1, ltrim(s), renderer, "markup", col_idx++, NULL); } list_for_each_entry(se, &hist_entry__sort_list, list) { if (se->elide) continue; gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view), -1, se->se_header, renderer, "text", col_idx++, NULL); } gtk_tree_view_set_model(GTK_TREE_VIEW(view), GTK_TREE_MODEL(store)); g_object_unref(GTK_TREE_MODEL(store)); for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); GtkTreeIter iter; if (h->filtered) continue; gtk_list_store_append(store, &iter); col_idx = 0; perf_hpp__for_each_format(fmt) { if (fmt->color) fmt->color(&hpp, h); else fmt->entry(&hpp, h); gtk_list_store_set(store, &iter, col_idx++, s, -1); } list_for_each_entry(se, &hist_entry__sort_list, list) { if (se->elide) continue; se->se_snprintf(h, s, ARRAY_SIZE(s), hists__col_len(hists, se->se_width_idx)); gtk_list_store_set(store, &iter, col_idx++, s, -1); } } gtk_container_add(GTK_CONTAINER(window), view); } int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist, const char *help, struct hist_browser_timer *hbt __maybe_unused) { struct perf_evsel *pos; GtkWidget *vbox; GtkWidget *notebook; GtkWidget *info_bar; GtkWidget *statbar; GtkWidget *window; signal(SIGSEGV, perf_gtk__signal); signal(SIGFPE, perf_gtk__signal); signal(SIGINT, perf_gtk__signal); signal(SIGQUIT, perf_gtk__signal); signal(SIGTERM, perf_gtk__signal); window = gtk_window_new(GTK_WINDOW_TOPLEVEL); gtk_window_set_title(GTK_WINDOW(window), "perf report"); g_signal_connect(window, "delete_event", gtk_main_quit, NULL); pgctx = perf_gtk__activate_context(window); if (!pgctx) return -1; vbox = gtk_vbox_new(FALSE, 0); notebook = gtk_notebook_new(); gtk_box_pack_start(GTK_BOX(vbox), notebook, TRUE, TRUE, 0); info_bar = perf_gtk__setup_info_bar(); if (info_bar) gtk_box_pack_start(GTK_BOX(vbox), info_bar, FALSE, FALSE, 0); statbar = perf_gtk__setup_statusbar(); gtk_box_pack_start(GTK_BOX(vbox), statbar, FALSE, FALSE, 0); gtk_container_add(GTK_CONTAINER(window), vbox); list_for_each_entry(pos, &evlist->entries, node) { struct hists *hists = &pos->hists; const char *evname = perf_evsel__name(pos); GtkWidget *scrolled_window; GtkWidget *tab_label; char buf[512]; size_t size = sizeof(buf); if (symbol_conf.event_group) { if (!perf_evsel__is_group_leader(pos)) continue; if (pos->nr_members > 1) { perf_evsel__group_desc(pos, buf, size); evname = buf; } } scrolled_window = gtk_scrolled_window_new(NULL, NULL); gtk_scrolled_window_set_policy(GTK_SCROLLED_WINDOW(scrolled_window), GTK_POLICY_AUTOMATIC, GTK_POLICY_AUTOMATIC); perf_gtk__show_hists(scrolled_window, hists); tab_label = gtk_label_new(evname); gtk_notebook_append_page(GTK_NOTEBOOK(notebook), scrolled_window, tab_label); } gtk_widget_show_all(window); perf_gtk__resize_window(window); gtk_window_set_position(GTK_WINDOW(window), GTK_WIN_POS_CENTER); ui_helpline__push(help); gtk_main(); perf_gtk__deactivate_context(&pgctx); return 0; }
gpl-2.0
TeamExodus/kernel_yu_tomato
drivers/block/xsysace.c
2077
33391
/* * Xilinx SystemACE device driver * * Copyright 2007 Secret Lab Technologies Ltd. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ /* * The SystemACE chip is designed to configure FPGAs by loading an FPGA * bitstream from a file on a CF card and squirting it into FPGAs connected * to the SystemACE JTAG chain. It also has the advantage of providing an * MPU interface which can be used to control the FPGA configuration process * and to use the attached CF card for general purpose storage. * * This driver is a block device driver for the SystemACE. * * Initialization: * The driver registers itself as a platform_device driver at module * load time. The platform bus will take care of calling the * ace_probe() method for all SystemACE instances in the system. Any * number of SystemACE instances are supported. ace_probe() calls * ace_setup() which initialized all data structures, reads the CF * id structure and registers the device. * * Processing: * Just about all of the heavy lifting in this driver is performed by * a Finite State Machine (FSM). The driver needs to wait on a number * of events; some raised by interrupts, some which need to be polled * for. Describing all of the behaviour in a FSM seems to be the * easiest way to keep the complexity low and make it easy to * understand what the driver is doing. If the block ops or the * request function need to interact with the hardware, then they * simply need to flag the request and kick of FSM processing. * * The FSM itself is atomic-safe code which can be run from any * context. The general process flow is: * 1. obtain the ace->lock spinlock. * 2. loop on ace_fsm_dostate() until the ace->fsm_continue flag is * cleared. * 3. release the lock. * * Individual states do not sleep in any way. If a condition needs to * be waited for then the state much clear the fsm_continue flag and * either schedule the FSM to be run again at a later time, or expect * an interrupt to call the FSM when the desired condition is met. * * In normal operation, the FSM is processed at interrupt context * either when the driver's tasklet is scheduled, or when an irq is * raised by the hardware. The tasklet can be scheduled at any time. * The request method in particular schedules the tasklet when a new * request has been indicated by the block layer. Once started, the * FSM proceeds as far as it can processing the request until it * needs on a hardware event. At this point, it must yield execution. * * A state has two options when yielding execution: * 1. ace_fsm_yield() * - Call if need to poll for event. * - clears the fsm_continue flag to exit the processing loop * - reschedules the tasklet to run again as soon as possible * 2. ace_fsm_yieldirq() * - Call if an irq is expected from the HW * - clears the fsm_continue flag to exit the processing loop * - does not reschedule the tasklet so the FSM will not be processed * again until an irq is received. * After calling a yield function, the state must return control back * to the FSM main loop. * * Additionally, the driver maintains a kernel timer which can process * the FSM. If the FSM gets stalled, typically due to a missed * interrupt, then the kernel timer will expire and the driver can * continue where it left off. * * To Do: * - Add FPGA configuration control interface. * - Request major number from lanana */ #undef DEBUG #include <linux/module.h> #include <linux/ctype.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/blkdev.h> #include <linux/mutex.h> #include <linux/ata.h> #include <linux/hdreg.h> #include <linux/platform_device.h> #if defined(CONFIG_OF) #include <linux/of_address.h> #include <linux/of_device.h> #include <linux/of_platform.h> #endif MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>"); MODULE_DESCRIPTION("Xilinx SystemACE device driver"); MODULE_LICENSE("GPL"); /* SystemACE register definitions */ #define ACE_BUSMODE (0x00) #define ACE_STATUS (0x04) #define ACE_STATUS_CFGLOCK (0x00000001) #define ACE_STATUS_MPULOCK (0x00000002) #define ACE_STATUS_CFGERROR (0x00000004) /* config controller error */ #define ACE_STATUS_CFCERROR (0x00000008) /* CF controller error */ #define ACE_STATUS_CFDETECT (0x00000010) #define ACE_STATUS_DATABUFRDY (0x00000020) #define ACE_STATUS_DATABUFMODE (0x00000040) #define ACE_STATUS_CFGDONE (0x00000080) #define ACE_STATUS_RDYFORCFCMD (0x00000100) #define ACE_STATUS_CFGMODEPIN (0x00000200) #define ACE_STATUS_CFGADDR_MASK (0x0000e000) #define ACE_STATUS_CFBSY (0x00020000) #define ACE_STATUS_CFRDY (0x00040000) #define ACE_STATUS_CFDWF (0x00080000) #define ACE_STATUS_CFDSC (0x00100000) #define ACE_STATUS_CFDRQ (0x00200000) #define ACE_STATUS_CFCORR (0x00400000) #define ACE_STATUS_CFERR (0x00800000) #define ACE_ERROR (0x08) #define ACE_CFGLBA (0x0c) #define ACE_MPULBA (0x10) #define ACE_SECCNTCMD (0x14) #define ACE_SECCNTCMD_RESET (0x0100) #define ACE_SECCNTCMD_IDENTIFY (0x0200) #define ACE_SECCNTCMD_READ_DATA (0x0300) #define ACE_SECCNTCMD_WRITE_DATA (0x0400) #define ACE_SECCNTCMD_ABORT (0x0600) #define ACE_VERSION (0x16) #define ACE_VERSION_REVISION_MASK (0x00FF) #define ACE_VERSION_MINOR_MASK (0x0F00) #define ACE_VERSION_MAJOR_MASK (0xF000) #define ACE_CTRL (0x18) #define ACE_CTRL_FORCELOCKREQ (0x0001) #define ACE_CTRL_LOCKREQ (0x0002) #define ACE_CTRL_FORCECFGADDR (0x0004) #define ACE_CTRL_FORCECFGMODE (0x0008) #define ACE_CTRL_CFGMODE (0x0010) #define ACE_CTRL_CFGSTART (0x0020) #define ACE_CTRL_CFGSEL (0x0040) #define ACE_CTRL_CFGRESET (0x0080) #define ACE_CTRL_DATABUFRDYIRQ (0x0100) #define ACE_CTRL_ERRORIRQ (0x0200) #define ACE_CTRL_CFGDONEIRQ (0x0400) #define ACE_CTRL_RESETIRQ (0x0800) #define ACE_CTRL_CFGPROG (0x1000) #define ACE_CTRL_CFGADDR_MASK (0xe000) #define ACE_FATSTAT (0x1c) #define ACE_NUM_MINORS 16 #define ACE_SECTOR_SIZE (512) #define ACE_FIFO_SIZE (32) #define ACE_BUF_PER_SECTOR (ACE_SECTOR_SIZE / ACE_FIFO_SIZE) #define ACE_BUS_WIDTH_8 0 #define ACE_BUS_WIDTH_16 1 struct ace_reg_ops; struct ace_device { /* driver state data */ int id; int media_change; int users; struct list_head list; /* finite state machine data */ struct tasklet_struct fsm_tasklet; uint fsm_task; /* Current activity (ACE_TASK_*) */ uint fsm_state; /* Current state (ACE_FSM_STATE_*) */ uint fsm_continue_flag; /* cleared to exit FSM mainloop */ uint fsm_iter_num; struct timer_list stall_timer; /* Transfer state/result, use for both id and block request */ struct request *req; /* request being processed */ void *data_ptr; /* pointer to I/O buffer */ int data_count; /* number of buffers remaining */ int data_result; /* Result of transfer; 0 := success */ int id_req_count; /* count of id requests */ int id_result; struct completion id_completion; /* used when id req finishes */ int in_irq; /* Details of hardware device */ resource_size_t physaddr; void __iomem *baseaddr; int irq; int bus_width; /* 0 := 8 bit; 1 := 16 bit */ struct ace_reg_ops *reg_ops; int lock_count; /* Block device data structures */ spinlock_t lock; struct device *dev; struct request_queue *queue; struct gendisk *gd; /* Inserted CF card parameters */ u16 cf_id[ATA_ID_WORDS]; }; static DEFINE_MUTEX(xsysace_mutex); static int ace_major; /* --------------------------------------------------------------------- * Low level register access */ struct ace_reg_ops { u16(*in) (struct ace_device * ace, int reg); void (*out) (struct ace_device * ace, int reg, u16 val); void (*datain) (struct ace_device * ace); void (*dataout) (struct ace_device * ace); }; /* 8 Bit bus width */ static u16 ace_in_8(struct ace_device *ace, int reg) { void __iomem *r = ace->baseaddr + reg; return in_8(r) | (in_8(r + 1) << 8); } static void ace_out_8(struct ace_device *ace, int reg, u16 val) { void __iomem *r = ace->baseaddr + reg; out_8(r, val); out_8(r + 1, val >> 8); } static void ace_datain_8(struct ace_device *ace) { void __iomem *r = ace->baseaddr + 0x40; u8 *dst = ace->data_ptr; int i = ACE_FIFO_SIZE; while (i--) *dst++ = in_8(r++); ace->data_ptr = dst; } static void ace_dataout_8(struct ace_device *ace) { void __iomem *r = ace->baseaddr + 0x40; u8 *src = ace->data_ptr; int i = ACE_FIFO_SIZE; while (i--) out_8(r++, *src++); ace->data_ptr = src; } static struct ace_reg_ops ace_reg_8_ops = { .in = ace_in_8, .out = ace_out_8, .datain = ace_datain_8, .dataout = ace_dataout_8, }; /* 16 bit big endian bus attachment */ static u16 ace_in_be16(struct ace_device *ace, int reg) { return in_be16(ace->baseaddr + reg); } static void ace_out_be16(struct ace_device *ace, int reg, u16 val) { out_be16(ace->baseaddr + reg, val); } static void ace_datain_be16(struct ace_device *ace) { int i = ACE_FIFO_SIZE / 2; u16 *dst = ace->data_ptr; while (i--) *dst++ = in_le16(ace->baseaddr + 0x40); ace->data_ptr = dst; } static void ace_dataout_be16(struct ace_device *ace) { int i = ACE_FIFO_SIZE / 2; u16 *src = ace->data_ptr; while (i--) out_le16(ace->baseaddr + 0x40, *src++); ace->data_ptr = src; } /* 16 bit little endian bus attachment */ static u16 ace_in_le16(struct ace_device *ace, int reg) { return in_le16(ace->baseaddr + reg); } static void ace_out_le16(struct ace_device *ace, int reg, u16 val) { out_le16(ace->baseaddr + reg, val); } static void ace_datain_le16(struct ace_device *ace) { int i = ACE_FIFO_SIZE / 2; u16 *dst = ace->data_ptr; while (i--) *dst++ = in_be16(ace->baseaddr + 0x40); ace->data_ptr = dst; } static void ace_dataout_le16(struct ace_device *ace) { int i = ACE_FIFO_SIZE / 2; u16 *src = ace->data_ptr; while (i--) out_be16(ace->baseaddr + 0x40, *src++); ace->data_ptr = src; } static struct ace_reg_ops ace_reg_be16_ops = { .in = ace_in_be16, .out = ace_out_be16, .datain = ace_datain_be16, .dataout = ace_dataout_be16, }; static struct ace_reg_ops ace_reg_le16_ops = { .in = ace_in_le16, .out = ace_out_le16, .datain = ace_datain_le16, .dataout = ace_dataout_le16, }; static inline u16 ace_in(struct ace_device *ace, int reg) { return ace->reg_ops->in(ace, reg); } static inline u32 ace_in32(struct ace_device *ace, int reg) { return ace_in(ace, reg) | (ace_in(ace, reg + 2) << 16); } static inline void ace_out(struct ace_device *ace, int reg, u16 val) { ace->reg_ops->out(ace, reg, val); } static inline void ace_out32(struct ace_device *ace, int reg, u32 val) { ace_out(ace, reg, val); ace_out(ace, reg + 2, val >> 16); } /* --------------------------------------------------------------------- * Debug support functions */ #if defined(DEBUG) static void ace_dump_mem(void *base, int len) { const char *ptr = base; int i, j; for (i = 0; i < len; i += 16) { printk(KERN_INFO "%.8x:", i); for (j = 0; j < 16; j++) { if (!(j % 4)) printk(" "); printk("%.2x", ptr[i + j]); } printk(" "); for (j = 0; j < 16; j++) printk("%c", isprint(ptr[i + j]) ? ptr[i + j] : '.'); printk("\n"); } } #else static inline void ace_dump_mem(void *base, int len) { } #endif static void ace_dump_regs(struct ace_device *ace) { dev_info(ace->dev, " ctrl: %.8x seccnt/cmd: %.4x ver:%.4x\n" " status:%.8x mpu_lba:%.8x busmode:%4x\n" " error: %.8x cfg_lba:%.8x fatstat:%.4x\n", ace_in32(ace, ACE_CTRL), ace_in(ace, ACE_SECCNTCMD), ace_in(ace, ACE_VERSION), ace_in32(ace, ACE_STATUS), ace_in32(ace, ACE_MPULBA), ace_in(ace, ACE_BUSMODE), ace_in32(ace, ACE_ERROR), ace_in32(ace, ACE_CFGLBA), ace_in(ace, ACE_FATSTAT)); } void ace_fix_driveid(u16 *id) { #if defined(__BIG_ENDIAN) int i; /* All half words have wrong byte order; swap the bytes */ for (i = 0; i < ATA_ID_WORDS; i++, id++) *id = le16_to_cpu(*id); #endif } /* --------------------------------------------------------------------- * Finite State Machine (FSM) implementation */ /* FSM tasks; used to direct state transitions */ #define ACE_TASK_IDLE 0 #define ACE_TASK_IDENTIFY 1 #define ACE_TASK_READ 2 #define ACE_TASK_WRITE 3 #define ACE_FSM_NUM_TASKS 4 /* FSM state definitions */ #define ACE_FSM_STATE_IDLE 0 #define ACE_FSM_STATE_REQ_LOCK 1 #define ACE_FSM_STATE_WAIT_LOCK 2 #define ACE_FSM_STATE_WAIT_CFREADY 3 #define ACE_FSM_STATE_IDENTIFY_PREPARE 4 #define ACE_FSM_STATE_IDENTIFY_TRANSFER 5 #define ACE_FSM_STATE_IDENTIFY_COMPLETE 6 #define ACE_FSM_STATE_REQ_PREPARE 7 #define ACE_FSM_STATE_REQ_TRANSFER 8 #define ACE_FSM_STATE_REQ_COMPLETE 9 #define ACE_FSM_STATE_ERROR 10 #define ACE_FSM_NUM_STATES 11 /* Set flag to exit FSM loop and reschedule tasklet */ static inline void ace_fsm_yield(struct ace_device *ace) { dev_dbg(ace->dev, "ace_fsm_yield()\n"); tasklet_schedule(&ace->fsm_tasklet); ace->fsm_continue_flag = 0; } /* Set flag to exit FSM loop and wait for IRQ to reschedule tasklet */ static inline void ace_fsm_yieldirq(struct ace_device *ace) { dev_dbg(ace->dev, "ace_fsm_yieldirq()\n"); if (!ace->irq) /* No IRQ assigned, so need to poll */ tasklet_schedule(&ace->fsm_tasklet); ace->fsm_continue_flag = 0; } /* Get the next read/write request; ending requests that we don't handle */ struct request *ace_get_next_request(struct request_queue * q) { struct request *req; while ((req = blk_peek_request(q)) != NULL) { if (req->cmd_type == REQ_TYPE_FS) break; blk_start_request(req); __blk_end_request_all(req, -EIO); } return req; } static void ace_fsm_dostate(struct ace_device *ace) { struct request *req; u32 status; u16 val; int count; #if defined(DEBUG) dev_dbg(ace->dev, "fsm_state=%i, id_req_count=%i\n", ace->fsm_state, ace->id_req_count); #endif /* Verify that there is actually a CF in the slot. If not, then * bail out back to the idle state and wake up all the waiters */ status = ace_in32(ace, ACE_STATUS); if ((status & ACE_STATUS_CFDETECT) == 0) { ace->fsm_state = ACE_FSM_STATE_IDLE; ace->media_change = 1; set_capacity(ace->gd, 0); dev_info(ace->dev, "No CF in slot\n"); /* Drop all in-flight and pending requests */ if (ace->req) { __blk_end_request_all(ace->req, -EIO); ace->req = NULL; } while ((req = blk_fetch_request(ace->queue)) != NULL) __blk_end_request_all(req, -EIO); /* Drop back to IDLE state and notify waiters */ ace->fsm_state = ACE_FSM_STATE_IDLE; ace->id_result = -EIO; while (ace->id_req_count) { complete(&ace->id_completion); ace->id_req_count--; } } switch (ace->fsm_state) { case ACE_FSM_STATE_IDLE: /* See if there is anything to do */ if (ace->id_req_count || ace_get_next_request(ace->queue)) { ace->fsm_iter_num++; ace->fsm_state = ACE_FSM_STATE_REQ_LOCK; mod_timer(&ace->stall_timer, jiffies + HZ); if (!timer_pending(&ace->stall_timer)) add_timer(&ace->stall_timer); break; } del_timer(&ace->stall_timer); ace->fsm_continue_flag = 0; break; case ACE_FSM_STATE_REQ_LOCK: if (ace_in(ace, ACE_STATUS) & ACE_STATUS_MPULOCK) { /* Already have the lock, jump to next state */ ace->fsm_state = ACE_FSM_STATE_WAIT_CFREADY; break; } /* Request the lock */ val = ace_in(ace, ACE_CTRL); ace_out(ace, ACE_CTRL, val | ACE_CTRL_LOCKREQ); ace->fsm_state = ACE_FSM_STATE_WAIT_LOCK; break; case ACE_FSM_STATE_WAIT_LOCK: if (ace_in(ace, ACE_STATUS) & ACE_STATUS_MPULOCK) { /* got the lock; move to next state */ ace->fsm_state = ACE_FSM_STATE_WAIT_CFREADY; break; } /* wait a bit for the lock */ ace_fsm_yield(ace); break; case ACE_FSM_STATE_WAIT_CFREADY: status = ace_in32(ace, ACE_STATUS); if (!(status & ACE_STATUS_RDYFORCFCMD) || (status & ACE_STATUS_CFBSY)) { /* CF card isn't ready; it needs to be polled */ ace_fsm_yield(ace); break; } /* Device is ready for command; determine what to do next */ if (ace->id_req_count) ace->fsm_state = ACE_FSM_STATE_IDENTIFY_PREPARE; else ace->fsm_state = ACE_FSM_STATE_REQ_PREPARE; break; case ACE_FSM_STATE_IDENTIFY_PREPARE: /* Send identify command */ ace->fsm_task = ACE_TASK_IDENTIFY; ace->data_ptr = ace->cf_id; ace->data_count = ACE_BUF_PER_SECTOR; ace_out(ace, ACE_SECCNTCMD, ACE_SECCNTCMD_IDENTIFY); /* As per datasheet, put config controller in reset */ val = ace_in(ace, ACE_CTRL); ace_out(ace, ACE_CTRL, val | ACE_CTRL_CFGRESET); /* irq handler takes over from this point; wait for the * transfer to complete */ ace->fsm_state = ACE_FSM_STATE_IDENTIFY_TRANSFER; ace_fsm_yieldirq(ace); break; case ACE_FSM_STATE_IDENTIFY_TRANSFER: /* Check that the sysace is ready to receive data */ status = ace_in32(ace, ACE_STATUS); if (status & ACE_STATUS_CFBSY) { dev_dbg(ace->dev, "CFBSY set; t=%i iter=%i dc=%i\n", ace->fsm_task, ace->fsm_iter_num, ace->data_count); ace_fsm_yield(ace); break; } if (!(status & ACE_STATUS_DATABUFRDY)) { ace_fsm_yield(ace); break; } /* Transfer the next buffer */ ace->reg_ops->datain(ace); ace->data_count--; /* If there are still buffers to be transfers; jump out here */ if (ace->data_count != 0) { ace_fsm_yieldirq(ace); break; } /* transfer finished; kick state machine */ dev_dbg(ace->dev, "identify finished\n"); ace->fsm_state = ACE_FSM_STATE_IDENTIFY_COMPLETE; break; case ACE_FSM_STATE_IDENTIFY_COMPLETE: ace_fix_driveid(ace->cf_id); ace_dump_mem(ace->cf_id, 512); /* Debug: Dump out disk ID */ if (ace->data_result) { /* Error occurred, disable the disk */ ace->media_change = 1; set_capacity(ace->gd, 0); dev_err(ace->dev, "error fetching CF id (%i)\n", ace->data_result); } else { ace->media_change = 0; /* Record disk parameters */ set_capacity(ace->gd, ata_id_u32(ace->cf_id, ATA_ID_LBA_CAPACITY)); dev_info(ace->dev, "capacity: %i sectors\n", ata_id_u32(ace->cf_id, ATA_ID_LBA_CAPACITY)); } /* We're done, drop to IDLE state and notify waiters */ ace->fsm_state = ACE_FSM_STATE_IDLE; ace->id_result = ace->data_result; while (ace->id_req_count) { complete(&ace->id_completion); ace->id_req_count--; } break; case ACE_FSM_STATE_REQ_PREPARE: req = ace_get_next_request(ace->queue); if (!req) { ace->fsm_state = ACE_FSM_STATE_IDLE; break; } blk_start_request(req); /* Okay, it's a data request, set it up for transfer */ dev_dbg(ace->dev, "request: sec=%llx hcnt=%x, ccnt=%x, dir=%i\n", (unsigned long long)blk_rq_pos(req), blk_rq_sectors(req), blk_rq_cur_sectors(req), rq_data_dir(req)); ace->req = req; ace->data_ptr = req->buffer; ace->data_count = blk_rq_cur_sectors(req) * ACE_BUF_PER_SECTOR; ace_out32(ace, ACE_MPULBA, blk_rq_pos(req) & 0x0FFFFFFF); count = blk_rq_sectors(req); if (rq_data_dir(req)) { /* Kick off write request */ dev_dbg(ace->dev, "write data\n"); ace->fsm_task = ACE_TASK_WRITE; ace_out(ace, ACE_SECCNTCMD, count | ACE_SECCNTCMD_WRITE_DATA); } else { /* Kick off read request */ dev_dbg(ace->dev, "read data\n"); ace->fsm_task = ACE_TASK_READ; ace_out(ace, ACE_SECCNTCMD, count | ACE_SECCNTCMD_READ_DATA); } /* As per datasheet, put config controller in reset */ val = ace_in(ace, ACE_CTRL); ace_out(ace, ACE_CTRL, val | ACE_CTRL_CFGRESET); /* Move to the transfer state. The systemace will raise * an interrupt once there is something to do */ ace->fsm_state = ACE_FSM_STATE_REQ_TRANSFER; if (ace->fsm_task == ACE_TASK_READ) ace_fsm_yieldirq(ace); /* wait for data ready */ break; case ACE_FSM_STATE_REQ_TRANSFER: /* Check that the sysace is ready to receive data */ status = ace_in32(ace, ACE_STATUS); if (status & ACE_STATUS_CFBSY) { dev_dbg(ace->dev, "CFBSY set; t=%i iter=%i c=%i dc=%i irq=%i\n", ace->fsm_task, ace->fsm_iter_num, blk_rq_cur_sectors(ace->req) * 16, ace->data_count, ace->in_irq); ace_fsm_yield(ace); /* need to poll CFBSY bit */ break; } if (!(status & ACE_STATUS_DATABUFRDY)) { dev_dbg(ace->dev, "DATABUF not set; t=%i iter=%i c=%i dc=%i irq=%i\n", ace->fsm_task, ace->fsm_iter_num, blk_rq_cur_sectors(ace->req) * 16, ace->data_count, ace->in_irq); ace_fsm_yieldirq(ace); break; } /* Transfer the next buffer */ if (ace->fsm_task == ACE_TASK_WRITE) ace->reg_ops->dataout(ace); else ace->reg_ops->datain(ace); ace->data_count--; /* If there are still buffers to be transfers; jump out here */ if (ace->data_count != 0) { ace_fsm_yieldirq(ace); break; } /* bio finished; is there another one? */ if (__blk_end_request_cur(ace->req, 0)) { /* dev_dbg(ace->dev, "next block; h=%u c=%u\n", * blk_rq_sectors(ace->req), * blk_rq_cur_sectors(ace->req)); */ ace->data_ptr = ace->req->buffer; ace->data_count = blk_rq_cur_sectors(ace->req) * 16; ace_fsm_yieldirq(ace); break; } ace->fsm_state = ACE_FSM_STATE_REQ_COMPLETE; break; case ACE_FSM_STATE_REQ_COMPLETE: ace->req = NULL; /* Finished request; go to idle state */ ace->fsm_state = ACE_FSM_STATE_IDLE; break; default: ace->fsm_state = ACE_FSM_STATE_IDLE; break; } } static void ace_fsm_tasklet(unsigned long data) { struct ace_device *ace = (void *)data; unsigned long flags; spin_lock_irqsave(&ace->lock, flags); /* Loop over state machine until told to stop */ ace->fsm_continue_flag = 1; while (ace->fsm_continue_flag) ace_fsm_dostate(ace); spin_unlock_irqrestore(&ace->lock, flags); } static void ace_stall_timer(unsigned long data) { struct ace_device *ace = (void *)data; unsigned long flags; dev_warn(ace->dev, "kicking stalled fsm; state=%i task=%i iter=%i dc=%i\n", ace->fsm_state, ace->fsm_task, ace->fsm_iter_num, ace->data_count); spin_lock_irqsave(&ace->lock, flags); /* Rearm the stall timer *before* entering FSM (which may then * delete the timer) */ mod_timer(&ace->stall_timer, jiffies + HZ); /* Loop over state machine until told to stop */ ace->fsm_continue_flag = 1; while (ace->fsm_continue_flag) ace_fsm_dostate(ace); spin_unlock_irqrestore(&ace->lock, flags); } /* --------------------------------------------------------------------- * Interrupt handling routines */ static int ace_interrupt_checkstate(struct ace_device *ace) { u32 sreg = ace_in32(ace, ACE_STATUS); u16 creg = ace_in(ace, ACE_CTRL); /* Check for error occurrence */ if ((sreg & (ACE_STATUS_CFGERROR | ACE_STATUS_CFCERROR)) && (creg & ACE_CTRL_ERRORIRQ)) { dev_err(ace->dev, "transfer failure\n"); ace_dump_regs(ace); return -EIO; } return 0; } static irqreturn_t ace_interrupt(int irq, void *dev_id) { u16 creg; struct ace_device *ace = dev_id; /* be safe and get the lock */ spin_lock(&ace->lock); ace->in_irq = 1; /* clear the interrupt */ creg = ace_in(ace, ACE_CTRL); ace_out(ace, ACE_CTRL, creg | ACE_CTRL_RESETIRQ); ace_out(ace, ACE_CTRL, creg); /* check for IO failures */ if (ace_interrupt_checkstate(ace)) ace->data_result = -EIO; if (ace->fsm_task == 0) { dev_err(ace->dev, "spurious irq; stat=%.8x ctrl=%.8x cmd=%.4x\n", ace_in32(ace, ACE_STATUS), ace_in32(ace, ACE_CTRL), ace_in(ace, ACE_SECCNTCMD)); dev_err(ace->dev, "fsm_task=%i fsm_state=%i data_count=%i\n", ace->fsm_task, ace->fsm_state, ace->data_count); } /* Loop over state machine until told to stop */ ace->fsm_continue_flag = 1; while (ace->fsm_continue_flag) ace_fsm_dostate(ace); /* done with interrupt; drop the lock */ ace->in_irq = 0; spin_unlock(&ace->lock); return IRQ_HANDLED; } /* --------------------------------------------------------------------- * Block ops */ static void ace_request(struct request_queue * q) { struct request *req; struct ace_device *ace; req = ace_get_next_request(q); if (req) { ace = req->rq_disk->private_data; tasklet_schedule(&ace->fsm_tasklet); } } static unsigned int ace_check_events(struct gendisk *gd, unsigned int clearing) { struct ace_device *ace = gd->private_data; dev_dbg(ace->dev, "ace_check_events(): %i\n", ace->media_change); return ace->media_change ? DISK_EVENT_MEDIA_CHANGE : 0; } static int ace_revalidate_disk(struct gendisk *gd) { struct ace_device *ace = gd->private_data; unsigned long flags; dev_dbg(ace->dev, "ace_revalidate_disk()\n"); if (ace->media_change) { dev_dbg(ace->dev, "requesting cf id and scheduling tasklet\n"); spin_lock_irqsave(&ace->lock, flags); ace->id_req_count++; spin_unlock_irqrestore(&ace->lock, flags); tasklet_schedule(&ace->fsm_tasklet); wait_for_completion(&ace->id_completion); } dev_dbg(ace->dev, "revalidate complete\n"); return ace->id_result; } static int ace_open(struct block_device *bdev, fmode_t mode) { struct ace_device *ace = bdev->bd_disk->private_data; unsigned long flags; dev_dbg(ace->dev, "ace_open() users=%i\n", ace->users + 1); mutex_lock(&xsysace_mutex); spin_lock_irqsave(&ace->lock, flags); ace->users++; spin_unlock_irqrestore(&ace->lock, flags); check_disk_change(bdev); mutex_unlock(&xsysace_mutex); return 0; } static void ace_release(struct gendisk *disk, fmode_t mode) { struct ace_device *ace = disk->private_data; unsigned long flags; u16 val; dev_dbg(ace->dev, "ace_release() users=%i\n", ace->users - 1); mutex_lock(&xsysace_mutex); spin_lock_irqsave(&ace->lock, flags); ace->users--; if (ace->users == 0) { val = ace_in(ace, ACE_CTRL); ace_out(ace, ACE_CTRL, val & ~ACE_CTRL_LOCKREQ); } spin_unlock_irqrestore(&ace->lock, flags); mutex_unlock(&xsysace_mutex); } static int ace_getgeo(struct block_device *bdev, struct hd_geometry *geo) { struct ace_device *ace = bdev->bd_disk->private_data; u16 *cf_id = ace->cf_id; dev_dbg(ace->dev, "ace_getgeo()\n"); geo->heads = cf_id[ATA_ID_HEADS]; geo->sectors = cf_id[ATA_ID_SECTORS]; geo->cylinders = cf_id[ATA_ID_CYLS]; return 0; } static const struct block_device_operations ace_fops = { .owner = THIS_MODULE, .open = ace_open, .release = ace_release, .check_events = ace_check_events, .revalidate_disk = ace_revalidate_disk, .getgeo = ace_getgeo, }; /* -------------------------------------------------------------------- * SystemACE device setup/teardown code */ static int ace_setup(struct ace_device *ace) { u16 version; u16 val; int rc; dev_dbg(ace->dev, "ace_setup(ace=0x%p)\n", ace); dev_dbg(ace->dev, "physaddr=0x%llx irq=%i\n", (unsigned long long)ace->physaddr, ace->irq); spin_lock_init(&ace->lock); init_completion(&ace->id_completion); /* * Map the device */ ace->baseaddr = ioremap(ace->physaddr, 0x80); if (!ace->baseaddr) goto err_ioremap; /* * Initialize the state machine tasklet and stall timer */ tasklet_init(&ace->fsm_tasklet, ace_fsm_tasklet, (unsigned long)ace); setup_timer(&ace->stall_timer, ace_stall_timer, (unsigned long)ace); /* * Initialize the request queue */ ace->queue = blk_init_queue(ace_request, &ace->lock); if (ace->queue == NULL) goto err_blk_initq; blk_queue_logical_block_size(ace->queue, 512); /* * Allocate and initialize GD structure */ ace->gd = alloc_disk(ACE_NUM_MINORS); if (!ace->gd) goto err_alloc_disk; ace->gd->major = ace_major; ace->gd->first_minor = ace->id * ACE_NUM_MINORS; ace->gd->fops = &ace_fops; ace->gd->queue = ace->queue; ace->gd->private_data = ace; snprintf(ace->gd->disk_name, 32, "xs%c", ace->id + 'a'); /* set bus width */ if (ace->bus_width == ACE_BUS_WIDTH_16) { /* 0x0101 should work regardless of endianess */ ace_out_le16(ace, ACE_BUSMODE, 0x0101); /* read it back to determine endianess */ if (ace_in_le16(ace, ACE_BUSMODE) == 0x0001) ace->reg_ops = &ace_reg_le16_ops; else ace->reg_ops = &ace_reg_be16_ops; } else { ace_out_8(ace, ACE_BUSMODE, 0x00); ace->reg_ops = &ace_reg_8_ops; } /* Make sure version register is sane */ version = ace_in(ace, ACE_VERSION); if ((version == 0) || (version == 0xFFFF)) goto err_read; /* Put sysace in a sane state by clearing most control reg bits */ ace_out(ace, ACE_CTRL, ACE_CTRL_FORCECFGMODE | ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ); /* Now we can hook up the irq handler */ if (ace->irq) { rc = request_irq(ace->irq, ace_interrupt, 0, "systemace", ace); if (rc) { /* Failure - fall back to polled mode */ dev_err(ace->dev, "request_irq failed\n"); ace->irq = 0; } } /* Enable interrupts */ val = ace_in(ace, ACE_CTRL); val |= ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ; ace_out(ace, ACE_CTRL, val); /* Print the identification */ dev_info(ace->dev, "Xilinx SystemACE revision %i.%i.%i\n", (version >> 12) & 0xf, (version >> 8) & 0x0f, version & 0xff); dev_dbg(ace->dev, "physaddr 0x%llx, mapped to 0x%p, irq=%i\n", (unsigned long long) ace->physaddr, ace->baseaddr, ace->irq); ace->media_change = 1; ace_revalidate_disk(ace->gd); /* Make the sysace device 'live' */ add_disk(ace->gd); return 0; err_read: put_disk(ace->gd); err_alloc_disk: blk_cleanup_queue(ace->queue); err_blk_initq: iounmap(ace->baseaddr); err_ioremap: dev_info(ace->dev, "xsysace: error initializing device at 0x%llx\n", (unsigned long long) ace->physaddr); return -ENOMEM; } static void ace_teardown(struct ace_device *ace) { if (ace->gd) { del_gendisk(ace->gd); put_disk(ace->gd); } if (ace->queue) blk_cleanup_queue(ace->queue); tasklet_kill(&ace->fsm_tasklet); if (ace->irq) free_irq(ace->irq, ace); iounmap(ace->baseaddr); } static int ace_alloc(struct device *dev, int id, resource_size_t physaddr, int irq, int bus_width) { struct ace_device *ace; int rc; dev_dbg(dev, "ace_alloc(%p)\n", dev); if (!physaddr) { rc = -ENODEV; goto err_noreg; } /* Allocate and initialize the ace device structure */ ace = kzalloc(sizeof(struct ace_device), GFP_KERNEL); if (!ace) { rc = -ENOMEM; goto err_alloc; } ace->dev = dev; ace->id = id; ace->physaddr = physaddr; ace->irq = irq; ace->bus_width = bus_width; /* Call the setup code */ rc = ace_setup(ace); if (rc) goto err_setup; dev_set_drvdata(dev, ace); return 0; err_setup: dev_set_drvdata(dev, NULL); kfree(ace); err_alloc: err_noreg: dev_err(dev, "could not initialize device, err=%i\n", rc); return rc; } static void ace_free(struct device *dev) { struct ace_device *ace = dev_get_drvdata(dev); dev_dbg(dev, "ace_free(%p)\n", dev); if (ace) { ace_teardown(ace); dev_set_drvdata(dev, NULL); kfree(ace); } } /* --------------------------------------------------------------------- * Platform Bus Support */ static int ace_probe(struct platform_device *dev) { resource_size_t physaddr = 0; int bus_width = ACE_BUS_WIDTH_16; /* FIXME: should not be hard coded */ u32 id = dev->id; int irq = 0; int i; dev_dbg(&dev->dev, "ace_probe(%p)\n", dev); /* device id and bus width */ if (of_property_read_u32(dev->dev.of_node, "port-number", &id)) id = 0; if (of_find_property(dev->dev.of_node, "8-bit", NULL)) bus_width = ACE_BUS_WIDTH_8; for (i = 0; i < dev->num_resources; i++) { if (dev->resource[i].flags & IORESOURCE_MEM) physaddr = dev->resource[i].start; if (dev->resource[i].flags & IORESOURCE_IRQ) irq = dev->resource[i].start; } /* Call the bus-independent setup code */ return ace_alloc(&dev->dev, id, physaddr, irq, bus_width); } /* * Platform bus remove() method */ static int ace_remove(struct platform_device *dev) { ace_free(&dev->dev); return 0; } #if defined(CONFIG_OF) /* Match table for of_platform binding */ static const struct of_device_id ace_of_match[] = { { .compatible = "xlnx,opb-sysace-1.00.b", }, { .compatible = "xlnx,opb-sysace-1.00.c", }, { .compatible = "xlnx,xps-sysace-1.00.a", }, { .compatible = "xlnx,sysace", }, {}, }; MODULE_DEVICE_TABLE(of, ace_of_match); #else /* CONFIG_OF */ #define ace_of_match NULL #endif /* CONFIG_OF */ static struct platform_driver ace_platform_driver = { .probe = ace_probe, .remove = ace_remove, .driver = { .owner = THIS_MODULE, .name = "xsysace", .of_match_table = ace_of_match, }, }; /* --------------------------------------------------------------------- * Module init/exit routines */ static int __init ace_init(void) { int rc; ace_major = register_blkdev(ace_major, "xsysace"); if (ace_major <= 0) { rc = -ENOMEM; goto err_blk; } rc = platform_driver_register(&ace_platform_driver); if (rc) goto err_plat; pr_info("Xilinx SystemACE device driver, major=%i\n", ace_major); return 0; err_plat: unregister_blkdev(ace_major, "xsysace"); err_blk: printk(KERN_ERR "xsysace: registration failed; err=%i\n", rc); return rc; } module_init(ace_init); static void __exit ace_exit(void) { pr_debug("Unregistering Xilinx SystemACE driver\n"); platform_driver_unregister(&ace_platform_driver); unregister_blkdev(ace_major, "xsysace"); } module_exit(ace_exit);
gpl-2.0
free-z4u/android_kernel_htc_msm7x30
arch/powerpc/kernel/pci_64.c
2333
7665
/* * Port for PPC64 David Engebretsen, IBM Corp. * Contains common pci routines for ppc64 platform, pSeries and iSeries brands. * * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM * Rework, based on alpha PCI code. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #undef DEBUG #include <linux/kernel.h> #include <linux/pci.h> #include <linux/string.h> #include <linux/init.h> #include <linux/bootmem.h> #include <linux/mm.h> #include <linux/list.h> #include <linux/syscalls.h> #include <linux/irq.h> #include <linux/vmalloc.h> #include <asm/processor.h> #include <asm/io.h> #include <asm/prom.h> #include <asm/pci-bridge.h> #include <asm/byteorder.h> #include <asm/machdep.h> #include <asm/ppc-pci.h> unsigned long pci_probe_only = 1; /* pci_io_base -- the base address from which io bars are offsets. * This is the lowest I/O base address (so bar values are always positive), * and it *must* be the start of ISA space if an ISA bus exists because * ISA drivers use hard coded offsets. If no ISA bus exists nothing * is mapped on the first 64K of IO space */ unsigned long pci_io_base = ISA_IO_BASE; EXPORT_SYMBOL(pci_io_base); static int __init pcibios_init(void) { struct pci_controller *hose, *tmp; printk(KERN_INFO "PCI: Probing PCI hardware\n"); /* For now, override phys_mem_access_prot. If we need it,g * later, we may move that initialization to each ppc_md */ ppc_md.phys_mem_access_prot = pci_phys_mem_access_prot; if (pci_probe_only) ppc_pci_flags |= PPC_PCI_PROBE_ONLY; /* On ppc64, we always enable PCI domains and we keep domain 0 * backward compatible in /proc for video cards */ ppc_pci_flags |= PPC_PCI_ENABLE_PROC_DOMAINS | PPC_PCI_COMPAT_DOMAIN_0; /* Scan all of the recorded PCI controllers. */ list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { pcibios_scan_phb(hose); pci_bus_add_devices(hose->bus); } /* Call common code to handle resource allocation */ pcibios_resource_survey(); printk(KERN_DEBUG "PCI: Probing PCI hardware done\n"); return 0; } subsys_initcall(pcibios_init); #ifdef CONFIG_HOTPLUG int pcibios_unmap_io_space(struct pci_bus *bus) { struct pci_controller *hose; WARN_ON(bus == NULL); /* If this is not a PHB, we only flush the hash table over * the area mapped by this bridge. We don't play with the PTE * mappings since we might have to deal with sub-page alignemnts * so flushing the hash table is the only sane way to make sure * that no hash entries are covering that removed bridge area * while still allowing other busses overlapping those pages * * Note: If we ever support P2P hotplug on Book3E, we'll have * to do an appropriate TLB flush here too */ if (bus->self) { #ifdef CONFIG_PPC_STD_MMU_64 struct resource *res = bus->resource[0]; #endif pr_debug("IO unmapping for PCI-PCI bridge %s\n", pci_name(bus->self)); #ifdef CONFIG_PPC_STD_MMU_64 __flush_hash_table_range(&init_mm, res->start + _IO_BASE, res->end + _IO_BASE + 1); #endif return 0; } /* Get the host bridge */ hose = pci_bus_to_host(bus); /* Check if we have IOs allocated */ if (hose->io_base_alloc == 0) return 0; pr_debug("IO unmapping for PHB %s\n", hose->dn->full_name); pr_debug(" alloc=0x%p\n", hose->io_base_alloc); /* This is a PHB, we fully unmap the IO area */ vunmap(hose->io_base_alloc); return 0; } EXPORT_SYMBOL_GPL(pcibios_unmap_io_space); #endif /* CONFIG_HOTPLUG */ int __devinit pcibios_map_io_space(struct pci_bus *bus) { struct vm_struct *area; unsigned long phys_page; unsigned long size_page; unsigned long io_virt_offset; struct pci_controller *hose; WARN_ON(bus == NULL); /* If this not a PHB, nothing to do, page tables still exist and * thus HPTEs will be faulted in when needed */ if (bus->self) { pr_debug("IO mapping for PCI-PCI bridge %s\n", pci_name(bus->self)); pr_debug(" virt=0x%016llx...0x%016llx\n", bus->resource[0]->start + _IO_BASE, bus->resource[0]->end + _IO_BASE); return 0; } /* Get the host bridge */ hose = pci_bus_to_host(bus); phys_page = _ALIGN_DOWN(hose->io_base_phys, PAGE_SIZE); size_page = _ALIGN_UP(hose->pci_io_size, PAGE_SIZE); /* Make sure IO area address is clear */ hose->io_base_alloc = NULL; /* If there's no IO to map on that bus, get away too */ if (hose->pci_io_size == 0 || hose->io_base_phys == 0) return 0; /* Let's allocate some IO space for that guy. We don't pass * VM_IOREMAP because we don't care about alignment tricks that * the core does in that case. Maybe we should due to stupid card * with incomplete address decoding but I'd rather not deal with * those outside of the reserved 64K legacy region. */ area = __get_vm_area(size_page, 0, PHB_IO_BASE, PHB_IO_END); if (area == NULL) return -ENOMEM; hose->io_base_alloc = area->addr; hose->io_base_virt = (void __iomem *)(area->addr + hose->io_base_phys - phys_page); pr_debug("IO mapping for PHB %s\n", hose->dn->full_name); pr_debug(" phys=0x%016llx, virt=0x%p (alloc=0x%p)\n", hose->io_base_phys, hose->io_base_virt, hose->io_base_alloc); pr_debug(" size=0x%016llx (alloc=0x%016lx)\n", hose->pci_io_size, size_page); /* Establish the mapping */ if (__ioremap_at(phys_page, area->addr, size_page, _PAGE_NO_CACHE | _PAGE_GUARDED) == NULL) return -ENOMEM; /* Fixup hose IO resource */ io_virt_offset = (unsigned long)hose->io_base_virt - _IO_BASE; hose->io_resource.start += io_virt_offset; hose->io_resource.end += io_virt_offset; pr_debug(" hose->io_resource=%pR\n", &hose->io_resource); return 0; } EXPORT_SYMBOL_GPL(pcibios_map_io_space); void __devinit pcibios_setup_phb_io_space(struct pci_controller *hose) { pcibios_map_io_space(hose->bus); } #define IOBASE_BRIDGE_NUMBER 0 #define IOBASE_MEMORY 1 #define IOBASE_IO 2 #define IOBASE_ISA_IO 3 #define IOBASE_ISA_MEM 4 long sys_pciconfig_iobase(long which, unsigned long in_bus, unsigned long in_devfn) { struct pci_controller* hose; struct list_head *ln; struct pci_bus *bus = NULL; struct device_node *hose_node; /* Argh ! Please forgive me for that hack, but that's the * simplest way to get existing XFree to not lockup on some * G5 machines... So when something asks for bus 0 io base * (bus 0 is HT root), we return the AGP one instead. */ if (in_bus == 0 && of_machine_is_compatible("MacRISC4")) { struct device_node *agp; agp = of_find_compatible_node(NULL, NULL, "u3-agp"); if (agp) in_bus = 0xf0; of_node_put(agp); } /* That syscall isn't quite compatible with PCI domains, but it's * used on pre-domains setup. We return the first match */ for (ln = pci_root_buses.next; ln != &pci_root_buses; ln = ln->next) { bus = pci_bus_b(ln); if (in_bus >= bus->number && in_bus <= bus->subordinate) break; bus = NULL; } if (bus == NULL || bus->dev.of_node == NULL) return -ENODEV; hose_node = bus->dev.of_node; hose = PCI_DN(hose_node)->phb; switch (which) { case IOBASE_BRIDGE_NUMBER: return (long)hose->first_busno; case IOBASE_MEMORY: return (long)hose->pci_mem_offset; case IOBASE_IO: return (long)hose->io_base_phys; case IOBASE_ISA_IO: return (long)isa_io_base; case IOBASE_ISA_MEM: return -EINVAL; } return -EOPNOTSUPP; } #ifdef CONFIG_NUMA int pcibus_to_node(struct pci_bus *bus) { struct pci_controller *phb = pci_bus_to_host(bus); return phb->node; } EXPORT_SYMBOL(pcibus_to_node); #endif
gpl-2.0
Thinkware-Device/willow
arch/arm/mach-ixp2000/ixdp2800.c
2589
7401
/* * arch/arm/mach-ixp2000/ixdp2800.c * * IXDP2800 platform support * * Original Author: Jeffrey Daly <jeffrey.daly@intel.com> * Maintainer: Deepak Saxena <dsaxena@plexity.net> * * Copyright (C) 2002 Intel Corp. * Copyright (C) 2003-2004 MontaVista Software, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/interrupt.h> #include <linux/device.h> #include <linux/bitops.h> #include <linux/pci.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/io.h> #include <asm/irq.h> #include <asm/pgtable.h> #include <asm/page.h> #include <asm/system.h> #include <mach/hardware.h> #include <asm/mach-types.h> #include <asm/mach/pci.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include <asm/mach/time.h> #include <asm/mach/flash.h> #include <asm/mach/arch.h> /************************************************************************* * IXDP2800 timer tick *************************************************************************/ static void __init ixdp2800_timer_init(void) { ixp2000_init_time(50000000); } static struct sys_timer ixdp2800_timer = { .init = ixdp2800_timer_init, .offset = ixp2000_gettimeoffset, }; /************************************************************************* * IXDP2800 PCI *************************************************************************/ static void __init ixdp2800_slave_disable_pci_master(void) { *IXP2000_PCI_CMDSTAT &= ~(PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY); } static void __init ixdp2800_master_wait_for_slave(void) { volatile u32 *addr; printk(KERN_INFO "IXDP2800: waiting for slave NPU to configure " "its BAR sizes\n"); addr = ixp2000_pci_config_addr(0, IXDP2X00_SLAVE_NPU_DEVFN, PCI_BASE_ADDRESS_1); do { *addr = 0xffffffff; cpu_relax(); } while (*addr != 0xfe000008); addr = ixp2000_pci_config_addr(0, IXDP2X00_SLAVE_NPU_DEVFN, PCI_BASE_ADDRESS_2); do { *addr = 0xffffffff; cpu_relax(); } while (*addr != 0xc0000008); /* * Configure the slave's SDRAM BAR by hand. */ *addr = 0x40000008; } static void __init ixdp2800_slave_wait_for_master_enable(void) { printk(KERN_INFO "IXDP2800: waiting for master NPU to enable us\n"); while ((*IXP2000_PCI_CMDSTAT & PCI_COMMAND_MASTER) == 0) cpu_relax(); } void __init ixdp2800_pci_preinit(void) { printk("ixdp2x00_pci_preinit called\n"); *IXP2000_PCI_ADDR_EXT = 0x0001e000; if (!ixdp2x00_master_npu()) ixdp2800_slave_disable_pci_master(); *IXP2000_PCI_SRAM_BASE_ADDR_MASK = (0x2000000 - 1) & ~0x3ffff; *IXP2000_PCI_DRAM_BASE_ADDR_MASK = (0x40000000 - 1) & ~0xfffff; ixp2000_pci_preinit(); if (ixdp2x00_master_npu()) { /* * Wait until the slave set its SRAM/SDRAM BAR sizes * correctly before we proceed to scan and enumerate * the bus. */ ixdp2800_master_wait_for_slave(); /* * We configure the SDRAM BARs by hand because they * are 1G and fall outside of the regular allocated * PCI address space. */ *IXP2000_PCI_SDRAM_BAR = 0x00000008; } else { /* * Wait for the master to complete scanning the bus * and assigning resources before we proceed to scan * the bus ourselves. Set pci=firmware to honor the * master's resource assignment. */ ixdp2800_slave_wait_for_master_enable(); pcibios_setup("firmware"); } } /* * We assign the SDRAM BARs for the two IXP2800 CPUs by hand, outside * of the regular PCI window, because there's only 512M of outbound PCI * memory window on each IXP, while we need 1G for each of the BARs. */ static void __devinit ixp2800_pci_fixup(struct pci_dev *dev) { if (machine_is_ixdp2800()) { dev->resource[2].start = 0; dev->resource[2].end = 0; dev->resource[2].flags = 0; } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IXP2800, ixp2800_pci_fixup); static int __init ixdp2800_pci_setup(int nr, struct pci_sys_data *sys) { sys->mem_offset = 0x00000000; ixp2000_pci_setup(nr, sys); return 1; } static int __init ixdp2800_pci_map_irq(struct pci_dev *dev, u8 slot, u8 pin) { if (ixdp2x00_master_npu()) { /* * Root bus devices. Slave NPU is only one with interrupt. * Everything else, we just return -1 which is invalid. */ if(!dev->bus->self) { if(dev->devfn == IXDP2X00_SLAVE_NPU_DEVFN ) return IRQ_IXDP2800_INGRESS_NPU; return -1; } /* * Bridge behind the PMC slot. */ if(dev->bus->self->devfn == IXDP2X00_PMC_DEVFN && dev->bus->parent->self->devfn == IXDP2X00_P2P_DEVFN && !dev->bus->parent->self->bus->parent) return IRQ_IXDP2800_PMC; /* * Device behind the first bridge */ if(dev->bus->self->devfn == IXDP2X00_P2P_DEVFN) { switch(dev->devfn) { case IXDP2X00_PMC_DEVFN: return IRQ_IXDP2800_PMC; case IXDP2800_MASTER_ENET_DEVFN: return IRQ_IXDP2800_EGRESS_ENET; case IXDP2800_SWITCH_FABRIC_DEVFN: return IRQ_IXDP2800_FABRIC; } } return -1; } else return IRQ_IXP2000_PCIB; /* Slave NIC interrupt */ } static void __init ixdp2800_master_enable_slave(void) { volatile u32 *addr; printk(KERN_INFO "IXDP2800: enabling slave NPU\n"); addr = (volatile u32 *)ixp2000_pci_config_addr(0, IXDP2X00_SLAVE_NPU_DEVFN, PCI_COMMAND); *addr |= PCI_COMMAND_MASTER; } static void __init ixdp2800_master_wait_for_slave_bus_scan(void) { volatile u32 *addr; printk(KERN_INFO "IXDP2800: waiting for slave to finish bus scan\n"); addr = (volatile u32 *)ixp2000_pci_config_addr(0, IXDP2X00_SLAVE_NPU_DEVFN, PCI_COMMAND); while ((*addr & PCI_COMMAND_MEMORY) == 0) cpu_relax(); } static void __init ixdp2800_slave_signal_bus_scan_completion(void) { printk(KERN_INFO "IXDP2800: bus scan done, signaling master\n"); *IXP2000_PCI_CMDSTAT |= PCI_COMMAND_MEMORY; } static void __init ixdp2800_pci_postinit(void) { if (!ixdp2x00_master_npu()) { ixdp2x00_slave_pci_postinit(); ixdp2800_slave_signal_bus_scan_completion(); } } struct __initdata hw_pci ixdp2800_pci __initdata = { .nr_controllers = 1, .setup = ixdp2800_pci_setup, .preinit = ixdp2800_pci_preinit, .postinit = ixdp2800_pci_postinit, .scan = ixp2000_pci_scan_bus, .map_irq = ixdp2800_pci_map_irq, }; int __init ixdp2800_pci_init(void) { if (machine_is_ixdp2800()) { struct pci_dev *dev; pci_common_init(&ixdp2800_pci); if (ixdp2x00_master_npu()) { dev = pci_get_bus_and_slot(1, IXDP2800_SLAVE_ENET_DEVFN); pci_remove_bus_device(dev); pci_dev_put(dev); ixdp2800_master_enable_slave(); ixdp2800_master_wait_for_slave_bus_scan(); } else { dev = pci_get_bus_and_slot(1, IXDP2800_MASTER_ENET_DEVFN); pci_remove_bus_device(dev); pci_dev_put(dev); } } return 0; } subsys_initcall(ixdp2800_pci_init); void __init ixdp2800_init_irq(void) { ixdp2x00_init_irq(IXDP2800_CPLD_INT_STAT, IXDP2800_CPLD_INT_MASK, IXDP2800_NR_IRQS); } MACHINE_START(IXDP2800, "Intel IXDP2800 Development Platform") /* Maintainer: MontaVista Software, Inc. */ .boot_params = 0x00000100, .map_io = ixdp2x00_map_io, .init_irq = ixdp2800_init_irq, .timer = &ixdp2800_timer, .init_machine = ixdp2x00_init_machine, MACHINE_END
gpl-2.0
AndroidDeveloperAlliance/kernel_samsung_smdk4210
net/dccp/timer.c
3101
7678
/* * net/dccp/timer.c * * An implementation of the DCCP protocol * Arnaldo Carvalho de Melo <acme@conectiva.com.br> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/dccp.h> #include <linux/skbuff.h> #include "dccp.h" /* sysctl variables governing numbers of retransmission attempts */ int sysctl_dccp_request_retries __read_mostly = TCP_SYN_RETRIES; int sysctl_dccp_retries1 __read_mostly = TCP_RETR1; int sysctl_dccp_retries2 __read_mostly = TCP_RETR2; static void dccp_write_err(struct sock *sk) { sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT; sk->sk_error_report(sk); dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED); dccp_done(sk); DCCP_INC_STATS_BH(DCCP_MIB_ABORTONTIMEOUT); } /* A write timeout has occurred. Process the after effects. */ static int dccp_write_timeout(struct sock *sk) { const struct inet_connection_sock *icsk = inet_csk(sk); int retry_until; if (sk->sk_state == DCCP_REQUESTING || sk->sk_state == DCCP_PARTOPEN) { if (icsk->icsk_retransmits != 0) dst_negative_advice(sk); retry_until = icsk->icsk_syn_retries ? : sysctl_dccp_request_retries; } else { if (icsk->icsk_retransmits >= sysctl_dccp_retries1) { /* NOTE. draft-ietf-tcpimpl-pmtud-01.txt requires pmtu black hole detection. :-( It is place to make it. It is not made. I do not want to make it. It is disguisting. It does not work in any case. Let me to cite the same draft, which requires for us to implement this: "The one security concern raised by this memo is that ICMP black holes are often caused by over-zealous security administrators who block all ICMP messages. It is vitally important that those who design and deploy security systems understand the impact of strict filtering on upper-layer protocols. The safest web site in the world is worthless if most TCP implementations cannot transfer data from it. It would be far nicer to have all of the black holes fixed rather than fixing all of the TCP implementations." Golden words :-). */ dst_negative_advice(sk); } retry_until = sysctl_dccp_retries2; /* * FIXME: see tcp_write_timout and tcp_out_of_resources */ } if (icsk->icsk_retransmits >= retry_until) { /* Has it gone just too far? */ dccp_write_err(sk); return 1; } return 0; } /* * The DCCP retransmit timer. */ static void dccp_retransmit_timer(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); /* * More than than 4MSL (8 minutes) has passed, a RESET(aborted) was * sent, no need to retransmit, this sock is dead. */ if (dccp_write_timeout(sk)) return; /* * We want to know the number of packets retransmitted, not the * total number of retransmissions of clones of original packets. */ if (icsk->icsk_retransmits == 0) DCCP_INC_STATS_BH(DCCP_MIB_TIMEOUTS); if (dccp_retransmit_skb(sk) != 0) { /* * Retransmission failed because of local congestion, * do not backoff. */ if (--icsk->icsk_retransmits == 0) icsk->icsk_retransmits = 1; inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, min(icsk->icsk_rto, TCP_RESOURCE_PROBE_INTERVAL), DCCP_RTO_MAX); return; } icsk->icsk_backoff++; icsk->icsk_rto = min(icsk->icsk_rto << 1, DCCP_RTO_MAX); inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, DCCP_RTO_MAX); if (icsk->icsk_retransmits > sysctl_dccp_retries1) __sk_dst_reset(sk); } static void dccp_write_timer(unsigned long data) { struct sock *sk = (struct sock *)data; struct inet_connection_sock *icsk = inet_csk(sk); int event = 0; bh_lock_sock(sk); if (sock_owned_by_user(sk)) { /* Try again later */ sk_reset_timer(sk, &icsk->icsk_retransmit_timer, jiffies + (HZ / 20)); goto out; } if (sk->sk_state == DCCP_CLOSED || !icsk->icsk_pending) goto out; if (time_after(icsk->icsk_timeout, jiffies)) { sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout); goto out; } event = icsk->icsk_pending; icsk->icsk_pending = 0; switch (event) { case ICSK_TIME_RETRANS: dccp_retransmit_timer(sk); break; } out: bh_unlock_sock(sk); sock_put(sk); } /* * Timer for listening sockets */ static void dccp_response_timer(struct sock *sk) { inet_csk_reqsk_queue_prune(sk, TCP_SYNQ_INTERVAL, DCCP_TIMEOUT_INIT, DCCP_RTO_MAX); } static void dccp_keepalive_timer(unsigned long data) { struct sock *sk = (struct sock *)data; /* Only process if socket is not in use. */ bh_lock_sock(sk); if (sock_owned_by_user(sk)) { /* Try again later. */ inet_csk_reset_keepalive_timer(sk, HZ / 20); goto out; } if (sk->sk_state == DCCP_LISTEN) { dccp_response_timer(sk); goto out; } out: bh_unlock_sock(sk); sock_put(sk); } /* This is the same as tcp_delack_timer, sans prequeue & mem_reclaim stuff */ static void dccp_delack_timer(unsigned long data) { struct sock *sk = (struct sock *)data; struct inet_connection_sock *icsk = inet_csk(sk); bh_lock_sock(sk); if (sock_owned_by_user(sk)) { /* Try again later. */ icsk->icsk_ack.blocked = 1; NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED); sk_reset_timer(sk, &icsk->icsk_delack_timer, jiffies + TCP_DELACK_MIN); goto out; } if (sk->sk_state == DCCP_CLOSED || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER)) goto out; if (time_after(icsk->icsk_ack.timeout, jiffies)) { sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout); goto out; } icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER; if (inet_csk_ack_scheduled(sk)) { if (!icsk->icsk_ack.pingpong) { /* Delayed ACK missed: inflate ATO. */ icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, icsk->icsk_rto); } else { /* Delayed ACK missed: leave pingpong mode and * deflate ATO. */ icsk->icsk_ack.pingpong = 0; icsk->icsk_ack.ato = TCP_ATO_MIN; } dccp_send_ack(sk); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKS); } out: bh_unlock_sock(sk); sock_put(sk); } /** * dccp_write_xmitlet - Workhorse for CCID packet dequeueing interface * See the comments above %ccid_dequeueing_decision for supported modes. */ static void dccp_write_xmitlet(unsigned long data) { struct sock *sk = (struct sock *)data; bh_lock_sock(sk); if (sock_owned_by_user(sk)) sk_reset_timer(sk, &dccp_sk(sk)->dccps_xmit_timer, jiffies + 1); else dccp_write_xmit(sk); bh_unlock_sock(sk); } static void dccp_write_xmit_timer(unsigned long data) { dccp_write_xmitlet(data); sock_put((struct sock *)data); } void dccp_init_xmit_timers(struct sock *sk) { struct dccp_sock *dp = dccp_sk(sk); tasklet_init(&dp->dccps_xmitlet, dccp_write_xmitlet, (unsigned long)sk); setup_timer(&dp->dccps_xmit_timer, dccp_write_xmit_timer, (unsigned long)sk); inet_csk_init_xmit_timers(sk, &dccp_write_timer, &dccp_delack_timer, &dccp_keepalive_timer); } static ktime_t dccp_timestamp_seed; /** * dccp_timestamp - 10s of microseconds time source * Returns the number of 10s of microseconds since loading DCCP. This is native * DCCP time difference format (RFC 4340, sec. 13). * Please note: This will wrap around about circa every 11.9 hours. */ u32 dccp_timestamp(void) { s64 delta = ktime_us_delta(ktime_get_real(), dccp_timestamp_seed); do_div(delta, 10); return delta; } EXPORT_SYMBOL_GPL(dccp_timestamp); void __init dccp_timestamping_init(void) { dccp_timestamp_seed = ktime_get_real(); }
gpl-2.0
linuxium/rkm-kk
drivers/media/video/saa7164/saa7164-bus.c
3101
14164
/* * Driver for the NXP SAA7164 PCIe bridge * * Copyright (c) 2010 Steven Toth <stoth@kernellabs.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "saa7164.h" /* The message bus to/from the firmware is a ring buffer in PCI address * space. Establish the defaults. */ int saa7164_bus_setup(struct saa7164_dev *dev) { struct tmComResBusInfo *b = &dev->bus; mutex_init(&b->lock); b->Type = TYPE_BUS_PCIe; b->m_wMaxReqSize = SAA_DEVICE_MAXREQUESTSIZE; b->m_pdwSetRing = (u8 *)(dev->bmmio + ((u32)dev->busdesc.CommandRing)); b->m_dwSizeSetRing = SAA_DEVICE_BUFFERBLOCKSIZE; b->m_pdwGetRing = (u8 *)(dev->bmmio + ((u32)dev->busdesc.ResponseRing)); b->m_dwSizeGetRing = SAA_DEVICE_BUFFERBLOCKSIZE; b->m_dwSetWritePos = ((u32)dev->intfdesc.BARLocation) + (2 * sizeof(u64)); b->m_dwSetReadPos = b->m_dwSetWritePos + (1 * sizeof(u32)); b->m_dwGetWritePos = b->m_dwSetWritePos + (2 * sizeof(u32)); b->m_dwGetReadPos = b->m_dwSetWritePos + (3 * sizeof(u32)); return 0; } void saa7164_bus_dump(struct saa7164_dev *dev) { struct tmComResBusInfo *b = &dev->bus; dprintk(DBGLVL_BUS, "Dumping the bus structure:\n"); dprintk(DBGLVL_BUS, " .type = %d\n", b->Type); dprintk(DBGLVL_BUS, " .dev->bmmio = 0x%p\n", dev->bmmio); dprintk(DBGLVL_BUS, " .m_wMaxReqSize = 0x%x\n", b->m_wMaxReqSize); dprintk(DBGLVL_BUS, " .m_pdwSetRing = 0x%p\n", b->m_pdwSetRing); dprintk(DBGLVL_BUS, " .m_dwSizeSetRing = 0x%x\n", b->m_dwSizeSetRing); dprintk(DBGLVL_BUS, " .m_pdwGetRing = 0x%p\n", b->m_pdwGetRing); dprintk(DBGLVL_BUS, " .m_dwSizeGetRing = 0x%x\n", b->m_dwSizeGetRing); dprintk(DBGLVL_BUS, " .m_dwSetReadPos = 0x%x (0x%08x)\n", b->m_dwSetReadPos, saa7164_readl(b->m_dwSetReadPos)); dprintk(DBGLVL_BUS, " .m_dwSetWritePos = 0x%x (0x%08x)\n", b->m_dwSetWritePos, saa7164_readl(b->m_dwSetWritePos)); dprintk(DBGLVL_BUS, " .m_dwGetReadPos = 0x%x (0x%08x)\n", b->m_dwGetReadPos, saa7164_readl(b->m_dwGetReadPos)); dprintk(DBGLVL_BUS, " .m_dwGetWritePos = 0x%x (0x%08x)\n", b->m_dwGetWritePos, saa7164_readl(b->m_dwGetWritePos)); } /* Intensionally throw a BUG() if the state of the message bus looks corrupt */ void saa7164_bus_verify(struct saa7164_dev *dev) { struct tmComResBusInfo *b = &dev->bus; int bug = 0; if (saa7164_readl(b->m_dwSetReadPos) > b->m_dwSizeSetRing) bug++; if (saa7164_readl(b->m_dwSetWritePos) > b->m_dwSizeSetRing) bug++; if (saa7164_readl(b->m_dwGetReadPos) > b->m_dwSizeGetRing) bug++; if (saa7164_readl(b->m_dwGetWritePos) > b->m_dwSizeGetRing) bug++; if (bug) { saa_debug = 0xffff; /* Ensure we get the bus dump */ saa7164_bus_dump(dev); saa_debug = 1024; /* Ensure we get the bus dump */ BUG(); } } void saa7164_bus_dumpmsg(struct saa7164_dev *dev, struct tmComResInfo* m, void *buf) { dprintk(DBGLVL_BUS, "Dumping msg structure:\n"); dprintk(DBGLVL_BUS, " .id = %d\n", m->id); dprintk(DBGLVL_BUS, " .flags = 0x%x\n", m->flags); dprintk(DBGLVL_BUS, " .size = 0x%x\n", m->size); dprintk(DBGLVL_BUS, " .command = 0x%x\n", m->command); dprintk(DBGLVL_BUS, " .controlselector = 0x%x\n", m->controlselector); dprintk(DBGLVL_BUS, " .seqno = %d\n", m->seqno); if (buf) dprintk(DBGLVL_BUS, " .buffer (ignored)\n"); } /* * Places a command or a response on the bus. The implementation does not * know if it is a command or a response it just places the data on the * bus depending on the bus information given in the struct tmComResBusInfo * structure. If the command or response does not fit into the bus ring * buffer it will be refused. * * Return Value: * SAA_OK The function executed successfully. * < 0 One or more members are not initialized. */ int saa7164_bus_set(struct saa7164_dev *dev, struct tmComResInfo* msg, void *buf) { struct tmComResBusInfo *bus = &dev->bus; u32 bytes_to_write, free_write_space, timeout, curr_srp, curr_swp; u32 new_swp, space_rem; int ret = SAA_ERR_BAD_PARAMETER; if (!msg) { printk(KERN_ERR "%s() !msg\n", __func__); return SAA_ERR_BAD_PARAMETER; } dprintk(DBGLVL_BUS, "%s()\n", __func__); saa7164_bus_verify(dev); msg->size = cpu_to_le16(msg->size); msg->command = cpu_to_le16(msg->command); msg->controlselector = cpu_to_le16(msg->controlselector); if (msg->size > dev->bus.m_wMaxReqSize) { printk(KERN_ERR "%s() Exceeded dev->bus.m_wMaxReqSize\n", __func__); return SAA_ERR_BAD_PARAMETER; } if ((msg->size > 0) && (buf == NULL)) { printk(KERN_ERR "%s() Missing message buffer\n", __func__); return SAA_ERR_BAD_PARAMETER; } /* Lock the bus from any other access */ mutex_lock(&bus->lock); bytes_to_write = sizeof(*msg) + msg->size; free_write_space = 0; timeout = SAA_BUS_TIMEOUT; curr_srp = le32_to_cpu(saa7164_readl(bus->m_dwSetReadPos)); curr_swp = le32_to_cpu(saa7164_readl(bus->m_dwSetWritePos)); /* Deal with ring wrapping issues */ if (curr_srp > curr_swp) /* Deal with the wrapped ring */ free_write_space = curr_srp - curr_swp; else /* The ring has not wrapped yet */ free_write_space = (curr_srp + bus->m_dwSizeSetRing) - curr_swp; dprintk(DBGLVL_BUS, "%s() bytes_to_write = %d\n", __func__, bytes_to_write); dprintk(DBGLVL_BUS, "%s() free_write_space = %d\n", __func__, free_write_space); dprintk(DBGLVL_BUS, "%s() curr_srp = %x\n", __func__, curr_srp); dprintk(DBGLVL_BUS, "%s() curr_swp = %x\n", __func__, curr_swp); /* Process the msg and write the content onto the bus */ while (bytes_to_write >= free_write_space) { if (timeout-- == 0) { printk(KERN_ERR "%s() bus timeout\n", __func__); ret = SAA_ERR_NO_RESOURCES; goto out; } /* TODO: Review this delay, efficient? */ /* Wait, allowing the hardware fetch time */ mdelay(1); /* Check the space usage again */ curr_srp = le32_to_cpu(saa7164_readl(bus->m_dwSetReadPos)); /* Deal with ring wrapping issues */ if (curr_srp > curr_swp) /* Deal with the wrapped ring */ free_write_space = curr_srp - curr_swp; else /* Read didn't wrap around the buffer */ free_write_space = (curr_srp + bus->m_dwSizeSetRing) - curr_swp; } /* Calculate the new write position */ new_swp = curr_swp + bytes_to_write; dprintk(DBGLVL_BUS, "%s() new_swp = %x\n", __func__, new_swp); dprintk(DBGLVL_BUS, "%s() bus->m_dwSizeSetRing = %x\n", __func__, bus->m_dwSizeSetRing); /* Mental Note: line 462 tmmhComResBusPCIe.cpp */ /* Check if we're going to wrap again */ if (new_swp > bus->m_dwSizeSetRing) { /* Ring wraps */ new_swp -= bus->m_dwSizeSetRing; space_rem = bus->m_dwSizeSetRing - curr_swp; dprintk(DBGLVL_BUS, "%s() space_rem = %x\n", __func__, space_rem); dprintk(DBGLVL_BUS, "%s() sizeof(*msg) = %d\n", __func__, (u32)sizeof(*msg)); if (space_rem < sizeof(*msg)) { dprintk(DBGLVL_BUS, "%s() tr4\n", __func__); /* Split the msg into pieces as the ring wraps */ memcpy(bus->m_pdwSetRing + curr_swp, msg, space_rem); memcpy(bus->m_pdwSetRing, (u8 *)msg + space_rem, sizeof(*msg) - space_rem); memcpy(bus->m_pdwSetRing + sizeof(*msg) - space_rem, buf, msg->size); } else if (space_rem == sizeof(*msg)) { dprintk(DBGLVL_BUS, "%s() tr5\n", __func__); /* Additional data at the beginning of the ring */ memcpy(bus->m_pdwSetRing + curr_swp, msg, sizeof(*msg)); memcpy(bus->m_pdwSetRing, buf, msg->size); } else { /* Additional data wraps around the ring */ memcpy(bus->m_pdwSetRing + curr_swp, msg, sizeof(*msg)); if (msg->size > 0) { memcpy(bus->m_pdwSetRing + curr_swp + sizeof(*msg), buf, space_rem - sizeof(*msg)); memcpy(bus->m_pdwSetRing, (u8 *)buf + space_rem - sizeof(*msg), bytes_to_write - space_rem); } } } /* (new_swp > bus->m_dwSizeSetRing) */ else { dprintk(DBGLVL_BUS, "%s() tr6\n", __func__); /* The ring buffer doesn't wrap, two simple copies */ memcpy(bus->m_pdwSetRing + curr_swp, msg, sizeof(*msg)); memcpy(bus->m_pdwSetRing + curr_swp + sizeof(*msg), buf, msg->size); } dprintk(DBGLVL_BUS, "%s() new_swp = %x\n", __func__, new_swp); /* Update the bus write position */ saa7164_writel(bus->m_dwSetWritePos, cpu_to_le32(new_swp)); ret = SAA_OK; out: saa7164_bus_dump(dev); mutex_unlock(&bus->lock); saa7164_bus_verify(dev); return ret; } /* * Receive a command or a response from the bus. The implementation does not * know if it is a command or a response it simply dequeues the data, * depending on the bus information given in the struct tmComResBusInfo * structure. * * Return Value: * 0 The function executed successfully. * < 0 One or more members are not initialized. */ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg, void *buf, int peekonly) { struct tmComResBusInfo *bus = &dev->bus; u32 bytes_to_read, write_distance, curr_grp, curr_gwp, new_grp, buf_size, space_rem; struct tmComResInfo msg_tmp; int ret = SAA_ERR_BAD_PARAMETER; saa7164_bus_verify(dev); if (msg == NULL) return ret; if (msg->size > dev->bus.m_wMaxReqSize) { printk(KERN_ERR "%s() Exceeded dev->bus.m_wMaxReqSize\n", __func__); return ret; } if ((peekonly == 0) && (msg->size > 0) && (buf == NULL)) { printk(KERN_ERR "%s() Missing msg buf, size should be %d bytes\n", __func__, msg->size); return ret; } mutex_lock(&bus->lock); /* Peek the bus to see if a msg exists, if it's not what we're expecting * then return cleanly else read the message from the bus. */ curr_gwp = le32_to_cpu(saa7164_readl(bus->m_dwGetWritePos)); curr_grp = le32_to_cpu(saa7164_readl(bus->m_dwGetReadPos)); if (curr_gwp == curr_grp) { ret = SAA_ERR_EMPTY; goto out; } bytes_to_read = sizeof(*msg); /* Calculate write distance to current read position */ write_distance = 0; if (curr_gwp >= curr_grp) /* Write doesn't wrap around the ring */ write_distance = curr_gwp - curr_grp; else /* Write wraps around the ring */ write_distance = curr_gwp + bus->m_dwSizeGetRing - curr_grp; if (bytes_to_read > write_distance) { printk(KERN_ERR "%s() No message/response found\n", __func__); ret = SAA_ERR_INVALID_COMMAND; goto out; } /* Calculate the new read position */ new_grp = curr_grp + bytes_to_read; if (new_grp > bus->m_dwSizeGetRing) { /* Ring wraps */ new_grp -= bus->m_dwSizeGetRing; space_rem = bus->m_dwSizeGetRing - curr_grp; memcpy(&msg_tmp, bus->m_pdwGetRing + curr_grp, space_rem); memcpy((u8 *)&msg_tmp + space_rem, bus->m_pdwGetRing, bytes_to_read - space_rem); } else { /* No wrapping */ memcpy(&msg_tmp, bus->m_pdwGetRing + curr_grp, bytes_to_read); } /* No need to update the read positions, because this was a peek */ /* If the caller specifically want to peek, return */ if (peekonly) { memcpy(msg, &msg_tmp, sizeof(*msg)); goto peekout; } /* Check if the command/response matches what is expected */ if ((msg_tmp.id != msg->id) || (msg_tmp.command != msg->command) || (msg_tmp.controlselector != msg->controlselector) || (msg_tmp.seqno != msg->seqno) || (msg_tmp.size != msg->size)) { printk(KERN_ERR "%s() Unexpected msg miss-match\n", __func__); saa7164_bus_dumpmsg(dev, msg, buf); saa7164_bus_dumpmsg(dev, &msg_tmp, NULL); ret = SAA_ERR_INVALID_COMMAND; goto out; } /* Get the actual command and response from the bus */ buf_size = msg->size; bytes_to_read = sizeof(*msg) + msg->size; /* Calculate write distance to current read position */ write_distance = 0; if (curr_gwp >= curr_grp) /* Write doesn't wrap around the ring */ write_distance = curr_gwp - curr_grp; else /* Write wraps around the ring */ write_distance = curr_gwp + bus->m_dwSizeGetRing - curr_grp; if (bytes_to_read > write_distance) { printk(KERN_ERR "%s() Invalid bus state, missing msg " "or mangled ring, faulty H/W / bad code?\n", __func__); ret = SAA_ERR_INVALID_COMMAND; goto out; } /* Calculate the new read position */ new_grp = curr_grp + bytes_to_read; if (new_grp > bus->m_dwSizeGetRing) { /* Ring wraps */ new_grp -= bus->m_dwSizeGetRing; space_rem = bus->m_dwSizeGetRing - curr_grp; if (space_rem < sizeof(*msg)) { /* msg wraps around the ring */ memcpy(msg, bus->m_pdwGetRing + curr_grp, space_rem); memcpy((u8 *)msg + space_rem, bus->m_pdwGetRing, sizeof(*msg) - space_rem); if (buf) memcpy(buf, bus->m_pdwGetRing + sizeof(*msg) - space_rem, buf_size); } else if (space_rem == sizeof(*msg)) { memcpy(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg)); if (buf) memcpy(buf, bus->m_pdwGetRing, buf_size); } else { /* Additional data wraps around the ring */ memcpy(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg)); if (buf) { memcpy(buf, bus->m_pdwGetRing + curr_grp + sizeof(*msg), space_rem - sizeof(*msg)); memcpy(buf + space_rem - sizeof(*msg), bus->m_pdwGetRing, bytes_to_read - space_rem); } } } else { /* No wrapping */ memcpy(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg)); if (buf) memcpy(buf, bus->m_pdwGetRing + curr_grp + sizeof(*msg), buf_size); } /* Update the read positions, adjusting the ring */ saa7164_writel(bus->m_dwGetReadPos, cpu_to_le32(new_grp)); peekout: msg->size = le16_to_cpu(msg->size); msg->command = le16_to_cpu(msg->command); msg->controlselector = le16_to_cpu(msg->controlselector); ret = SAA_OK; out: mutex_unlock(&bus->lock); saa7164_bus_verify(dev); return ret; }
gpl-2.0
hei1125/Nova_Kernel
net/dccp/timer.c
3101
7678
/* * net/dccp/timer.c * * An implementation of the DCCP protocol * Arnaldo Carvalho de Melo <acme@conectiva.com.br> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/dccp.h> #include <linux/skbuff.h> #include "dccp.h" /* sysctl variables governing numbers of retransmission attempts */ int sysctl_dccp_request_retries __read_mostly = TCP_SYN_RETRIES; int sysctl_dccp_retries1 __read_mostly = TCP_RETR1; int sysctl_dccp_retries2 __read_mostly = TCP_RETR2; static void dccp_write_err(struct sock *sk) { sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT; sk->sk_error_report(sk); dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED); dccp_done(sk); DCCP_INC_STATS_BH(DCCP_MIB_ABORTONTIMEOUT); } /* A write timeout has occurred. Process the after effects. */ static int dccp_write_timeout(struct sock *sk) { const struct inet_connection_sock *icsk = inet_csk(sk); int retry_until; if (sk->sk_state == DCCP_REQUESTING || sk->sk_state == DCCP_PARTOPEN) { if (icsk->icsk_retransmits != 0) dst_negative_advice(sk); retry_until = icsk->icsk_syn_retries ? : sysctl_dccp_request_retries; } else { if (icsk->icsk_retransmits >= sysctl_dccp_retries1) { /* NOTE. draft-ietf-tcpimpl-pmtud-01.txt requires pmtu black hole detection. :-( It is place to make it. It is not made. I do not want to make it. It is disguisting. It does not work in any case. Let me to cite the same draft, which requires for us to implement this: "The one security concern raised by this memo is that ICMP black holes are often caused by over-zealous security administrators who block all ICMP messages. It is vitally important that those who design and deploy security systems understand the impact of strict filtering on upper-layer protocols. The safest web site in the world is worthless if most TCP implementations cannot transfer data from it. It would be far nicer to have all of the black holes fixed rather than fixing all of the TCP implementations." Golden words :-). */ dst_negative_advice(sk); } retry_until = sysctl_dccp_retries2; /* * FIXME: see tcp_write_timout and tcp_out_of_resources */ } if (icsk->icsk_retransmits >= retry_until) { /* Has it gone just too far? */ dccp_write_err(sk); return 1; } return 0; } /* * The DCCP retransmit timer. */ static void dccp_retransmit_timer(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); /* * More than than 4MSL (8 minutes) has passed, a RESET(aborted) was * sent, no need to retransmit, this sock is dead. */ if (dccp_write_timeout(sk)) return; /* * We want to know the number of packets retransmitted, not the * total number of retransmissions of clones of original packets. */ if (icsk->icsk_retransmits == 0) DCCP_INC_STATS_BH(DCCP_MIB_TIMEOUTS); if (dccp_retransmit_skb(sk) != 0) { /* * Retransmission failed because of local congestion, * do not backoff. */ if (--icsk->icsk_retransmits == 0) icsk->icsk_retransmits = 1; inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, min(icsk->icsk_rto, TCP_RESOURCE_PROBE_INTERVAL), DCCP_RTO_MAX); return; } icsk->icsk_backoff++; icsk->icsk_rto = min(icsk->icsk_rto << 1, DCCP_RTO_MAX); inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, DCCP_RTO_MAX); if (icsk->icsk_retransmits > sysctl_dccp_retries1) __sk_dst_reset(sk); } static void dccp_write_timer(unsigned long data) { struct sock *sk = (struct sock *)data; struct inet_connection_sock *icsk = inet_csk(sk); int event = 0; bh_lock_sock(sk); if (sock_owned_by_user(sk)) { /* Try again later */ sk_reset_timer(sk, &icsk->icsk_retransmit_timer, jiffies + (HZ / 20)); goto out; } if (sk->sk_state == DCCP_CLOSED || !icsk->icsk_pending) goto out; if (time_after(icsk->icsk_timeout, jiffies)) { sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout); goto out; } event = icsk->icsk_pending; icsk->icsk_pending = 0; switch (event) { case ICSK_TIME_RETRANS: dccp_retransmit_timer(sk); break; } out: bh_unlock_sock(sk); sock_put(sk); } /* * Timer for listening sockets */ static void dccp_response_timer(struct sock *sk) { inet_csk_reqsk_queue_prune(sk, TCP_SYNQ_INTERVAL, DCCP_TIMEOUT_INIT, DCCP_RTO_MAX); } static void dccp_keepalive_timer(unsigned long data) { struct sock *sk = (struct sock *)data; /* Only process if socket is not in use. */ bh_lock_sock(sk); if (sock_owned_by_user(sk)) { /* Try again later. */ inet_csk_reset_keepalive_timer(sk, HZ / 20); goto out; } if (sk->sk_state == DCCP_LISTEN) { dccp_response_timer(sk); goto out; } out: bh_unlock_sock(sk); sock_put(sk); } /* This is the same as tcp_delack_timer, sans prequeue & mem_reclaim stuff */ static void dccp_delack_timer(unsigned long data) { struct sock *sk = (struct sock *)data; struct inet_connection_sock *icsk = inet_csk(sk); bh_lock_sock(sk); if (sock_owned_by_user(sk)) { /* Try again later. */ icsk->icsk_ack.blocked = 1; NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED); sk_reset_timer(sk, &icsk->icsk_delack_timer, jiffies + TCP_DELACK_MIN); goto out; } if (sk->sk_state == DCCP_CLOSED || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER)) goto out; if (time_after(icsk->icsk_ack.timeout, jiffies)) { sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout); goto out; } icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER; if (inet_csk_ack_scheduled(sk)) { if (!icsk->icsk_ack.pingpong) { /* Delayed ACK missed: inflate ATO. */ icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, icsk->icsk_rto); } else { /* Delayed ACK missed: leave pingpong mode and * deflate ATO. */ icsk->icsk_ack.pingpong = 0; icsk->icsk_ack.ato = TCP_ATO_MIN; } dccp_send_ack(sk); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKS); } out: bh_unlock_sock(sk); sock_put(sk); } /** * dccp_write_xmitlet - Workhorse for CCID packet dequeueing interface * See the comments above %ccid_dequeueing_decision for supported modes. */ static void dccp_write_xmitlet(unsigned long data) { struct sock *sk = (struct sock *)data; bh_lock_sock(sk); if (sock_owned_by_user(sk)) sk_reset_timer(sk, &dccp_sk(sk)->dccps_xmit_timer, jiffies + 1); else dccp_write_xmit(sk); bh_unlock_sock(sk); } static void dccp_write_xmit_timer(unsigned long data) { dccp_write_xmitlet(data); sock_put((struct sock *)data); } void dccp_init_xmit_timers(struct sock *sk) { struct dccp_sock *dp = dccp_sk(sk); tasklet_init(&dp->dccps_xmitlet, dccp_write_xmitlet, (unsigned long)sk); setup_timer(&dp->dccps_xmit_timer, dccp_write_xmit_timer, (unsigned long)sk); inet_csk_init_xmit_timers(sk, &dccp_write_timer, &dccp_delack_timer, &dccp_keepalive_timer); } static ktime_t dccp_timestamp_seed; /** * dccp_timestamp - 10s of microseconds time source * Returns the number of 10s of microseconds since loading DCCP. This is native * DCCP time difference format (RFC 4340, sec. 13). * Please note: This will wrap around about circa every 11.9 hours. */ u32 dccp_timestamp(void) { s64 delta = ktime_us_delta(ktime_get_real(), dccp_timestamp_seed); do_div(delta, 10); return delta; } EXPORT_SYMBOL_GPL(dccp_timestamp); void __init dccp_timestamping_init(void) { dccp_timestamp_seed = ktime_get_real(); }
gpl-2.0
klothius/htc_pyramid_kernel_30
net/dccp/timer.c
3101
7678
/* * net/dccp/timer.c * * An implementation of the DCCP protocol * Arnaldo Carvalho de Melo <acme@conectiva.com.br> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/dccp.h> #include <linux/skbuff.h> #include "dccp.h" /* sysctl variables governing numbers of retransmission attempts */ int sysctl_dccp_request_retries __read_mostly = TCP_SYN_RETRIES; int sysctl_dccp_retries1 __read_mostly = TCP_RETR1; int sysctl_dccp_retries2 __read_mostly = TCP_RETR2; static void dccp_write_err(struct sock *sk) { sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT; sk->sk_error_report(sk); dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED); dccp_done(sk); DCCP_INC_STATS_BH(DCCP_MIB_ABORTONTIMEOUT); } /* A write timeout has occurred. Process the after effects. */ static int dccp_write_timeout(struct sock *sk) { const struct inet_connection_sock *icsk = inet_csk(sk); int retry_until; if (sk->sk_state == DCCP_REQUESTING || sk->sk_state == DCCP_PARTOPEN) { if (icsk->icsk_retransmits != 0) dst_negative_advice(sk); retry_until = icsk->icsk_syn_retries ? : sysctl_dccp_request_retries; } else { if (icsk->icsk_retransmits >= sysctl_dccp_retries1) { /* NOTE. draft-ietf-tcpimpl-pmtud-01.txt requires pmtu black hole detection. :-( It is place to make it. It is not made. I do not want to make it. It is disguisting. It does not work in any case. Let me to cite the same draft, which requires for us to implement this: "The one security concern raised by this memo is that ICMP black holes are often caused by over-zealous security administrators who block all ICMP messages. It is vitally important that those who design and deploy security systems understand the impact of strict filtering on upper-layer protocols. The safest web site in the world is worthless if most TCP implementations cannot transfer data from it. It would be far nicer to have all of the black holes fixed rather than fixing all of the TCP implementations." Golden words :-). */ dst_negative_advice(sk); } retry_until = sysctl_dccp_retries2; /* * FIXME: see tcp_write_timout and tcp_out_of_resources */ } if (icsk->icsk_retransmits >= retry_until) { /* Has it gone just too far? */ dccp_write_err(sk); return 1; } return 0; } /* * The DCCP retransmit timer. */ static void dccp_retransmit_timer(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); /* * More than than 4MSL (8 minutes) has passed, a RESET(aborted) was * sent, no need to retransmit, this sock is dead. */ if (dccp_write_timeout(sk)) return; /* * We want to know the number of packets retransmitted, not the * total number of retransmissions of clones of original packets. */ if (icsk->icsk_retransmits == 0) DCCP_INC_STATS_BH(DCCP_MIB_TIMEOUTS); if (dccp_retransmit_skb(sk) != 0) { /* * Retransmission failed because of local congestion, * do not backoff. */ if (--icsk->icsk_retransmits == 0) icsk->icsk_retransmits = 1; inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, min(icsk->icsk_rto, TCP_RESOURCE_PROBE_INTERVAL), DCCP_RTO_MAX); return; } icsk->icsk_backoff++; icsk->icsk_rto = min(icsk->icsk_rto << 1, DCCP_RTO_MAX); inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, DCCP_RTO_MAX); if (icsk->icsk_retransmits > sysctl_dccp_retries1) __sk_dst_reset(sk); } static void dccp_write_timer(unsigned long data) { struct sock *sk = (struct sock *)data; struct inet_connection_sock *icsk = inet_csk(sk); int event = 0; bh_lock_sock(sk); if (sock_owned_by_user(sk)) { /* Try again later */ sk_reset_timer(sk, &icsk->icsk_retransmit_timer, jiffies + (HZ / 20)); goto out; } if (sk->sk_state == DCCP_CLOSED || !icsk->icsk_pending) goto out; if (time_after(icsk->icsk_timeout, jiffies)) { sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout); goto out; } event = icsk->icsk_pending; icsk->icsk_pending = 0; switch (event) { case ICSK_TIME_RETRANS: dccp_retransmit_timer(sk); break; } out: bh_unlock_sock(sk); sock_put(sk); } /* * Timer for listening sockets */ static void dccp_response_timer(struct sock *sk) { inet_csk_reqsk_queue_prune(sk, TCP_SYNQ_INTERVAL, DCCP_TIMEOUT_INIT, DCCP_RTO_MAX); } static void dccp_keepalive_timer(unsigned long data) { struct sock *sk = (struct sock *)data; /* Only process if socket is not in use. */ bh_lock_sock(sk); if (sock_owned_by_user(sk)) { /* Try again later. */ inet_csk_reset_keepalive_timer(sk, HZ / 20); goto out; } if (sk->sk_state == DCCP_LISTEN) { dccp_response_timer(sk); goto out; } out: bh_unlock_sock(sk); sock_put(sk); } /* This is the same as tcp_delack_timer, sans prequeue & mem_reclaim stuff */ static void dccp_delack_timer(unsigned long data) { struct sock *sk = (struct sock *)data; struct inet_connection_sock *icsk = inet_csk(sk); bh_lock_sock(sk); if (sock_owned_by_user(sk)) { /* Try again later. */ icsk->icsk_ack.blocked = 1; NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED); sk_reset_timer(sk, &icsk->icsk_delack_timer, jiffies + TCP_DELACK_MIN); goto out; } if (sk->sk_state == DCCP_CLOSED || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER)) goto out; if (time_after(icsk->icsk_ack.timeout, jiffies)) { sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout); goto out; } icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER; if (inet_csk_ack_scheduled(sk)) { if (!icsk->icsk_ack.pingpong) { /* Delayed ACK missed: inflate ATO. */ icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, icsk->icsk_rto); } else { /* Delayed ACK missed: leave pingpong mode and * deflate ATO. */ icsk->icsk_ack.pingpong = 0; icsk->icsk_ack.ato = TCP_ATO_MIN; } dccp_send_ack(sk); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKS); } out: bh_unlock_sock(sk); sock_put(sk); } /** * dccp_write_xmitlet - Workhorse for CCID packet dequeueing interface * See the comments above %ccid_dequeueing_decision for supported modes. */ static void dccp_write_xmitlet(unsigned long data) { struct sock *sk = (struct sock *)data; bh_lock_sock(sk); if (sock_owned_by_user(sk)) sk_reset_timer(sk, &dccp_sk(sk)->dccps_xmit_timer, jiffies + 1); else dccp_write_xmit(sk); bh_unlock_sock(sk); } static void dccp_write_xmit_timer(unsigned long data) { dccp_write_xmitlet(data); sock_put((struct sock *)data); } void dccp_init_xmit_timers(struct sock *sk) { struct dccp_sock *dp = dccp_sk(sk); tasklet_init(&dp->dccps_xmitlet, dccp_write_xmitlet, (unsigned long)sk); setup_timer(&dp->dccps_xmit_timer, dccp_write_xmit_timer, (unsigned long)sk); inet_csk_init_xmit_timers(sk, &dccp_write_timer, &dccp_delack_timer, &dccp_keepalive_timer); } static ktime_t dccp_timestamp_seed; /** * dccp_timestamp - 10s of microseconds time source * Returns the number of 10s of microseconds since loading DCCP. This is native * DCCP time difference format (RFC 4340, sec. 13). * Please note: This will wrap around about circa every 11.9 hours. */ u32 dccp_timestamp(void) { s64 delta = ktime_us_delta(ktime_get_real(), dccp_timestamp_seed); do_div(delta, 10); return delta; } EXPORT_SYMBOL_GPL(dccp_timestamp); void __init dccp_timestamping_init(void) { dccp_timestamp_seed = ktime_get_real(); }
gpl-2.0
thomhastings/linux-3.14
fs/ocfs2/export.c
3357
6341
/* -*- mode: c; c-basic-offset: 8; -*- * vim: noexpandtab sw=8 ts=8 sts=0: * * export.c * * Functions to facilitate NFS exporting * * Copyright (C) 2002, 2005 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/fs.h> #include <linux/types.h> #include <cluster/masklog.h> #include "ocfs2.h" #include "alloc.h" #include "dir.h" #include "dlmglue.h" #include "dcache.h" #include "export.h" #include "inode.h" #include "buffer_head_io.h" #include "suballoc.h" #include "ocfs2_trace.h" struct ocfs2_inode_handle { u64 ih_blkno; u32 ih_generation; }; static struct dentry *ocfs2_get_dentry(struct super_block *sb, struct ocfs2_inode_handle *handle) { struct inode *inode; struct ocfs2_super *osb = OCFS2_SB(sb); u64 blkno = handle->ih_blkno; int status, set; struct dentry *result; trace_ocfs2_get_dentry_begin(sb, handle, (unsigned long long)blkno); if (blkno == 0) { result = ERR_PTR(-ESTALE); goto bail; } inode = ocfs2_ilookup(sb, blkno); /* * If the inode exists in memory, we only need to check it's * generation number */ if (inode) goto check_gen; /* * This will synchronize us against ocfs2_delete_inode() on * all nodes */ status = ocfs2_nfs_sync_lock(osb, 1); if (status < 0) { mlog(ML_ERROR, "getting nfs sync lock(EX) failed %d\n", status); goto check_err; } status = ocfs2_test_inode_bit(osb, blkno, &set); trace_ocfs2_get_dentry_test_bit(status, set); if (status < 0) { if (status == -EINVAL) { /* * The blkno NFS gave us doesn't even show up * as an inode, we return -ESTALE to be * nice */ status = -ESTALE; } else mlog(ML_ERROR, "test inode bit failed %d\n", status); goto unlock_nfs_sync; } /* If the inode allocator bit is clear, this inode must be stale */ if (!set) { status = -ESTALE; goto unlock_nfs_sync; } inode = ocfs2_iget(osb, blkno, 0, 0); unlock_nfs_sync: ocfs2_nfs_sync_unlock(osb, 1); check_err: if (status < 0) { if (status == -ESTALE) { trace_ocfs2_get_dentry_stale((unsigned long long)blkno, handle->ih_generation); } result = ERR_PTR(status); goto bail; } if (IS_ERR(inode)) { mlog_errno(PTR_ERR(inode)); result = (void *)inode; goto bail; } check_gen: if (handle->ih_generation != inode->i_generation) { iput(inode); trace_ocfs2_get_dentry_generation((unsigned long long)blkno, handle->ih_generation, inode->i_generation); result = ERR_PTR(-ESTALE); goto bail; } result = d_obtain_alias(inode); if (IS_ERR(result)) mlog_errno(PTR_ERR(result)); bail: trace_ocfs2_get_dentry_end(result); return result; } static struct dentry *ocfs2_get_parent(struct dentry *child) { int status; u64 blkno; struct dentry *parent; struct inode *dir = child->d_inode; trace_ocfs2_get_parent(child, child->d_name.len, child->d_name.name, (unsigned long long)OCFS2_I(dir)->ip_blkno); status = ocfs2_inode_lock(dir, NULL, 0); if (status < 0) { if (status != -ENOENT) mlog_errno(status); parent = ERR_PTR(status); goto bail; } status = ocfs2_lookup_ino_from_name(dir, "..", 2, &blkno); if (status < 0) { parent = ERR_PTR(-ENOENT); goto bail_unlock; } parent = d_obtain_alias(ocfs2_iget(OCFS2_SB(dir->i_sb), blkno, 0, 0)); bail_unlock: ocfs2_inode_unlock(dir, 0); bail: trace_ocfs2_get_parent_end(parent); return parent; } static int ocfs2_encode_fh(struct inode *inode, u32 *fh_in, int *max_len, struct inode *parent) { int len = *max_len; int type = 1; u64 blkno; u32 generation; __le32 *fh = (__force __le32 *) fh_in; #ifdef TRACE_HOOKS_ARE_NOT_BRAINDEAD_IN_YOUR_OPINION #error "You go ahead and fix that mess, then. Somehow" trace_ocfs2_encode_fh_begin(dentry, dentry->d_name.len, dentry->d_name.name, fh, len, connectable); #endif if (parent && (len < 6)) { *max_len = 6; type = FILEID_INVALID; goto bail; } else if (len < 3) { *max_len = 3; type = FILEID_INVALID; goto bail; } blkno = OCFS2_I(inode)->ip_blkno; generation = inode->i_generation; trace_ocfs2_encode_fh_self((unsigned long long)blkno, generation); len = 3; fh[0] = cpu_to_le32((u32)(blkno >> 32)); fh[1] = cpu_to_le32((u32)(blkno & 0xffffffff)); fh[2] = cpu_to_le32(generation); if (parent) { blkno = OCFS2_I(parent)->ip_blkno; generation = parent->i_generation; fh[3] = cpu_to_le32((u32)(blkno >> 32)); fh[4] = cpu_to_le32((u32)(blkno & 0xffffffff)); fh[5] = cpu_to_le32(generation); len = 6; type = 2; trace_ocfs2_encode_fh_parent((unsigned long long)blkno, generation); } *max_len = len; bail: trace_ocfs2_encode_fh_type(type); return type; } static struct dentry *ocfs2_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { struct ocfs2_inode_handle handle; if (fh_len < 3 || fh_type > 2) return NULL; handle.ih_blkno = (u64)le32_to_cpu(fid->raw[0]) << 32; handle.ih_blkno |= (u64)le32_to_cpu(fid->raw[1]); handle.ih_generation = le32_to_cpu(fid->raw[2]); return ocfs2_get_dentry(sb, &handle); } static struct dentry *ocfs2_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { struct ocfs2_inode_handle parent; if (fh_type != 2 || fh_len < 6) return NULL; parent.ih_blkno = (u64)le32_to_cpu(fid->raw[3]) << 32; parent.ih_blkno |= (u64)le32_to_cpu(fid->raw[4]); parent.ih_generation = le32_to_cpu(fid->raw[5]); return ocfs2_get_dentry(sb, &parent); } const struct export_operations ocfs2_export_ops = { .encode_fh = ocfs2_encode_fh, .fh_to_dentry = ocfs2_fh_to_dentry, .fh_to_parent = ocfs2_fh_to_parent, .get_parent = ocfs2_get_parent, };
gpl-2.0
UberCM/kernel_asus_flo
drivers/usb/storage/alauda.c
4637
34365
/* * Driver for Alauda-based card readers * * Current development and maintenance by: * (c) 2005 Daniel Drake <dsd@gentoo.org> * * The 'Alauda' is a chip manufacturered by RATOC for OEM use. * * Alauda implements a vendor-specific command set to access two media reader * ports (XD, SmartMedia). This driver converts SCSI commands to the commands * which are accepted by these devices. * * The driver was developed through reverse-engineering, with the help of the * sddr09 driver which has many similarities, and with some help from the * (very old) vendor-supplied GPL sma03 driver. * * For protocol info, see http://alauda.sourceforge.net * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2, or (at your option) any * later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/slab.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include "usb.h" #include "transport.h" #include "protocol.h" #include "debug.h" MODULE_DESCRIPTION("Driver for Alauda-based card readers"); MODULE_AUTHOR("Daniel Drake <dsd@gentoo.org>"); MODULE_LICENSE("GPL"); /* * Status bytes */ #define ALAUDA_STATUS_ERROR 0x01 #define ALAUDA_STATUS_READY 0x40 /* * Control opcodes (for request field) */ #define ALAUDA_GET_XD_MEDIA_STATUS 0x08 #define ALAUDA_GET_SM_MEDIA_STATUS 0x98 #define ALAUDA_ACK_XD_MEDIA_CHANGE 0x0a #define ALAUDA_ACK_SM_MEDIA_CHANGE 0x9a #define ALAUDA_GET_XD_MEDIA_SIG 0x86 #define ALAUDA_GET_SM_MEDIA_SIG 0x96 /* * Bulk command identity (byte 0) */ #define ALAUDA_BULK_CMD 0x40 /* * Bulk opcodes (byte 1) */ #define ALAUDA_BULK_GET_REDU_DATA 0x85 #define ALAUDA_BULK_READ_BLOCK 0x94 #define ALAUDA_BULK_ERASE_BLOCK 0xa3 #define ALAUDA_BULK_WRITE_BLOCK 0xb4 #define ALAUDA_BULK_GET_STATUS2 0xb7 #define ALAUDA_BULK_RESET_MEDIA 0xe0 /* * Port to operate on (byte 8) */ #define ALAUDA_PORT_XD 0x00 #define ALAUDA_PORT_SM 0x01 /* * LBA and PBA are unsigned ints. Special values. */ #define UNDEF 0xffff #define SPARE 0xfffe #define UNUSABLE 0xfffd struct alauda_media_info { unsigned long capacity; /* total media size in bytes */ unsigned int pagesize; /* page size in bytes */ unsigned int blocksize; /* number of pages per block */ unsigned int uzonesize; /* number of usable blocks per zone */ unsigned int zonesize; /* number of blocks per zone */ unsigned int blockmask; /* mask to get page from address */ unsigned char pageshift; unsigned char blockshift; unsigned char zoneshift; u16 **lba_to_pba; /* logical to physical block map */ u16 **pba_to_lba; /* physical to logical block map */ }; struct alauda_info { struct alauda_media_info port[2]; int wr_ep; /* endpoint to write data out of */ unsigned char sense_key; unsigned long sense_asc; /* additional sense code */ unsigned long sense_ascq; /* additional sense code qualifier */ }; #define short_pack(lsb,msb) ( ((u16)(lsb)) | ( ((u16)(msb))<<8 ) ) #define LSB_of(s) ((s)&0xFF) #define MSB_of(s) ((s)>>8) #define MEDIA_PORT(us) us->srb->device->lun #define MEDIA_INFO(us) ((struct alauda_info *)us->extra)->port[MEDIA_PORT(us)] #define PBA_LO(pba) ((pba & 0xF) << 5) #define PBA_HI(pba) (pba >> 3) #define PBA_ZONE(pba) (pba >> 11) static int init_alauda(struct us_data *us); /* * The table of devices */ #define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \ vendorName, productName, useProtocol, useTransport, \ initFunction, flags) \ { USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \ .driver_info = (flags)|(USB_US_TYPE_STOR<<24) } static struct usb_device_id alauda_usb_ids[] = { # include "unusual_alauda.h" { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, alauda_usb_ids); #undef UNUSUAL_DEV /* * The flags table */ #define UNUSUAL_DEV(idVendor, idProduct, bcdDeviceMin, bcdDeviceMax, \ vendor_name, product_name, use_protocol, use_transport, \ init_function, Flags) \ { \ .vendorName = vendor_name, \ .productName = product_name, \ .useProtocol = use_protocol, \ .useTransport = use_transport, \ .initFunction = init_function, \ } static struct us_unusual_dev alauda_unusual_dev_list[] = { # include "unusual_alauda.h" { } /* Terminating entry */ }; #undef UNUSUAL_DEV /* * Media handling */ struct alauda_card_info { unsigned char id; /* id byte */ unsigned char chipshift; /* 1<<cs bytes total capacity */ unsigned char pageshift; /* 1<<ps bytes in a page */ unsigned char blockshift; /* 1<<bs pages per block */ unsigned char zoneshift; /* 1<<zs blocks per zone */ }; static struct alauda_card_info alauda_card_ids[] = { /* NAND flash */ { 0x6e, 20, 8, 4, 8}, /* 1 MB */ { 0xe8, 20, 8, 4, 8}, /* 1 MB */ { 0xec, 20, 8, 4, 8}, /* 1 MB */ { 0x64, 21, 8, 4, 9}, /* 2 MB */ { 0xea, 21, 8, 4, 9}, /* 2 MB */ { 0x6b, 22, 9, 4, 9}, /* 4 MB */ { 0xe3, 22, 9, 4, 9}, /* 4 MB */ { 0xe5, 22, 9, 4, 9}, /* 4 MB */ { 0xe6, 23, 9, 4, 10}, /* 8 MB */ { 0x73, 24, 9, 5, 10}, /* 16 MB */ { 0x75, 25, 9, 5, 10}, /* 32 MB */ { 0x76, 26, 9, 5, 10}, /* 64 MB */ { 0x79, 27, 9, 5, 10}, /* 128 MB */ { 0x71, 28, 9, 5, 10}, /* 256 MB */ /* MASK ROM */ { 0x5d, 21, 9, 4, 8}, /* 2 MB */ { 0xd5, 22, 9, 4, 9}, /* 4 MB */ { 0xd6, 23, 9, 4, 10}, /* 8 MB */ { 0x57, 24, 9, 4, 11}, /* 16 MB */ { 0x58, 25, 9, 4, 12}, /* 32 MB */ { 0,} }; static struct alauda_card_info *alauda_card_find_id(unsigned char id) { int i; for (i = 0; alauda_card_ids[i].id != 0; i++) if (alauda_card_ids[i].id == id) return &(alauda_card_ids[i]); return NULL; } /* * ECC computation. */ static unsigned char parity[256]; static unsigned char ecc2[256]; static void nand_init_ecc(void) { int i, j, a; parity[0] = 0; for (i = 1; i < 256; i++) parity[i] = (parity[i&(i-1)] ^ 1); for (i = 0; i < 256; i++) { a = 0; for (j = 0; j < 8; j++) { if (i & (1<<j)) { if ((j & 1) == 0) a ^= 0x04; if ((j & 2) == 0) a ^= 0x10; if ((j & 4) == 0) a ^= 0x40; } } ecc2[i] = ~(a ^ (a<<1) ^ (parity[i] ? 0xa8 : 0)); } } /* compute 3-byte ecc on 256 bytes */ static void nand_compute_ecc(unsigned char *data, unsigned char *ecc) { int i, j, a; unsigned char par, bit, bits[8]; par = 0; for (j = 0; j < 8; j++) bits[j] = 0; /* collect 16 checksum bits */ for (i = 0; i < 256; i++) { par ^= data[i]; bit = parity[data[i]]; for (j = 0; j < 8; j++) if ((i & (1<<j)) == 0) bits[j] ^= bit; } /* put 4+4+4 = 12 bits in the ecc */ a = (bits[3] << 6) + (bits[2] << 4) + (bits[1] << 2) + bits[0]; ecc[0] = ~(a ^ (a<<1) ^ (parity[par] ? 0xaa : 0)); a = (bits[7] << 6) + (bits[6] << 4) + (bits[5] << 2) + bits[4]; ecc[1] = ~(a ^ (a<<1) ^ (parity[par] ? 0xaa : 0)); ecc[2] = ecc2[par]; } static int nand_compare_ecc(unsigned char *data, unsigned char *ecc) { return (data[0] == ecc[0] && data[1] == ecc[1] && data[2] == ecc[2]); } static void nand_store_ecc(unsigned char *data, unsigned char *ecc) { memcpy(data, ecc, 3); } /* * Alauda driver */ /* * Forget our PBA <---> LBA mappings for a particular port */ static void alauda_free_maps (struct alauda_media_info *media_info) { unsigned int shift = media_info->zoneshift + media_info->blockshift + media_info->pageshift; unsigned int num_zones = media_info->capacity >> shift; unsigned int i; if (media_info->lba_to_pba != NULL) for (i = 0; i < num_zones; i++) { kfree(media_info->lba_to_pba[i]); media_info->lba_to_pba[i] = NULL; } if (media_info->pba_to_lba != NULL) for (i = 0; i < num_zones; i++) { kfree(media_info->pba_to_lba[i]); media_info->pba_to_lba[i] = NULL; } } /* * Returns 2 bytes of status data * The first byte describes media status, and second byte describes door status */ static int alauda_get_media_status(struct us_data *us, unsigned char *data) { int rc; unsigned char command; if (MEDIA_PORT(us) == ALAUDA_PORT_XD) command = ALAUDA_GET_XD_MEDIA_STATUS; else command = ALAUDA_GET_SM_MEDIA_STATUS; rc = usb_stor_ctrl_transfer(us, us->recv_ctrl_pipe, command, 0xc0, 0, 1, data, 2); US_DEBUGP("alauda_get_media_status: Media status %02X %02X\n", data[0], data[1]); return rc; } /* * Clears the "media was changed" bit so that we know when it changes again * in the future. */ static int alauda_ack_media(struct us_data *us) { unsigned char command; if (MEDIA_PORT(us) == ALAUDA_PORT_XD) command = ALAUDA_ACK_XD_MEDIA_CHANGE; else command = ALAUDA_ACK_SM_MEDIA_CHANGE; return usb_stor_ctrl_transfer(us, us->send_ctrl_pipe, command, 0x40, 0, 1, NULL, 0); } /* * Retrieves a 4-byte media signature, which indicates manufacturer, capacity, * and some other details. */ static int alauda_get_media_signature(struct us_data *us, unsigned char *data) { unsigned char command; if (MEDIA_PORT(us) == ALAUDA_PORT_XD) command = ALAUDA_GET_XD_MEDIA_SIG; else command = ALAUDA_GET_SM_MEDIA_SIG; return usb_stor_ctrl_transfer(us, us->recv_ctrl_pipe, command, 0xc0, 0, 0, data, 4); } /* * Resets the media status (but not the whole device?) */ static int alauda_reset_media(struct us_data *us) { unsigned char *command = us->iobuf; memset(command, 0, 9); command[0] = ALAUDA_BULK_CMD; command[1] = ALAUDA_BULK_RESET_MEDIA; command[8] = MEDIA_PORT(us); return usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, command, 9, NULL); } /* * Examines the media and deduces capacity, etc. */ static int alauda_init_media(struct us_data *us) { unsigned char *data = us->iobuf; int ready = 0; struct alauda_card_info *media_info; unsigned int num_zones; while (ready == 0) { msleep(20); if (alauda_get_media_status(us, data) != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; if (data[0] & 0x10) ready = 1; } US_DEBUGP("alauda_init_media: We are ready for action!\n"); if (alauda_ack_media(us) != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; msleep(10); if (alauda_get_media_status(us, data) != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; if (data[0] != 0x14) { US_DEBUGP("alauda_init_media: Media not ready after ack\n"); return USB_STOR_TRANSPORT_ERROR; } if (alauda_get_media_signature(us, data) != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; US_DEBUGP("alauda_init_media: Media signature: %02X %02X %02X %02X\n", data[0], data[1], data[2], data[3]); media_info = alauda_card_find_id(data[1]); if (media_info == NULL) { printk(KERN_WARNING "alauda_init_media: Unrecognised media signature: " "%02X %02X %02X %02X\n", data[0], data[1], data[2], data[3]); return USB_STOR_TRANSPORT_ERROR; } MEDIA_INFO(us).capacity = 1 << media_info->chipshift; US_DEBUGP("Found media with capacity: %ldMB\n", MEDIA_INFO(us).capacity >> 20); MEDIA_INFO(us).pageshift = media_info->pageshift; MEDIA_INFO(us).blockshift = media_info->blockshift; MEDIA_INFO(us).zoneshift = media_info->zoneshift; MEDIA_INFO(us).pagesize = 1 << media_info->pageshift; MEDIA_INFO(us).blocksize = 1 << media_info->blockshift; MEDIA_INFO(us).zonesize = 1 << media_info->zoneshift; MEDIA_INFO(us).uzonesize = ((1 << media_info->zoneshift) / 128) * 125; MEDIA_INFO(us).blockmask = MEDIA_INFO(us).blocksize - 1; num_zones = MEDIA_INFO(us).capacity >> (MEDIA_INFO(us).zoneshift + MEDIA_INFO(us).blockshift + MEDIA_INFO(us).pageshift); MEDIA_INFO(us).pba_to_lba = kcalloc(num_zones, sizeof(u16*), GFP_NOIO); MEDIA_INFO(us).lba_to_pba = kcalloc(num_zones, sizeof(u16*), GFP_NOIO); if (alauda_reset_media(us) != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; return USB_STOR_TRANSPORT_GOOD; } /* * Examines the media status and does the right thing when the media has gone, * appeared, or changed. */ static int alauda_check_media(struct us_data *us) { struct alauda_info *info = (struct alauda_info *) us->extra; unsigned char status[2]; int rc; rc = alauda_get_media_status(us, status); /* Check for no media or door open */ if ((status[0] & 0x80) || ((status[0] & 0x1F) == 0x10) || ((status[1] & 0x01) == 0)) { US_DEBUGP("alauda_check_media: No media, or door open\n"); alauda_free_maps(&MEDIA_INFO(us)); info->sense_key = 0x02; info->sense_asc = 0x3A; info->sense_ascq = 0x00; return USB_STOR_TRANSPORT_FAILED; } /* Check for media change */ if (status[0] & 0x08) { US_DEBUGP("alauda_check_media: Media change detected\n"); alauda_free_maps(&MEDIA_INFO(us)); alauda_init_media(us); info->sense_key = UNIT_ATTENTION; info->sense_asc = 0x28; info->sense_ascq = 0x00; return USB_STOR_TRANSPORT_FAILED; } return USB_STOR_TRANSPORT_GOOD; } /* * Checks the status from the 2nd status register * Returns 3 bytes of status data, only the first is known */ static int alauda_check_status2(struct us_data *us) { int rc; unsigned char command[] = { ALAUDA_BULK_CMD, ALAUDA_BULK_GET_STATUS2, 0, 0, 0, 0, 3, 0, MEDIA_PORT(us) }; unsigned char data[3]; rc = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, command, 9, NULL); if (rc != USB_STOR_XFER_GOOD) return rc; rc = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, data, 3, NULL); if (rc != USB_STOR_XFER_GOOD) return rc; US_DEBUGP("alauda_check_status2: %02X %02X %02X\n", data[0], data[1], data[2]); if (data[0] & ALAUDA_STATUS_ERROR) return USB_STOR_XFER_ERROR; return USB_STOR_XFER_GOOD; } /* * Gets the redundancy data for the first page of a PBA * Returns 16 bytes. */ static int alauda_get_redu_data(struct us_data *us, u16 pba, unsigned char *data) { int rc; unsigned char command[] = { ALAUDA_BULK_CMD, ALAUDA_BULK_GET_REDU_DATA, PBA_HI(pba), PBA_ZONE(pba), 0, PBA_LO(pba), 0, 0, MEDIA_PORT(us) }; rc = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, command, 9, NULL); if (rc != USB_STOR_XFER_GOOD) return rc; return usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, data, 16, NULL); } /* * Finds the first unused PBA in a zone * Returns the absolute PBA of an unused PBA, or 0 if none found. */ static u16 alauda_find_unused_pba(struct alauda_media_info *info, unsigned int zone) { u16 *pba_to_lba = info->pba_to_lba[zone]; unsigned int i; for (i = 0; i < info->zonesize; i++) if (pba_to_lba[i] == UNDEF) return (zone << info->zoneshift) + i; return 0; } /* * Reads the redundancy data for all PBA's in a zone * Produces lba <--> pba mappings */ static int alauda_read_map(struct us_data *us, unsigned int zone) { unsigned char *data = us->iobuf; int result; int i, j; unsigned int zonesize = MEDIA_INFO(us).zonesize; unsigned int uzonesize = MEDIA_INFO(us).uzonesize; unsigned int lba_offset, lba_real, blocknum; unsigned int zone_base_lba = zone * uzonesize; unsigned int zone_base_pba = zone * zonesize; u16 *lba_to_pba = kcalloc(zonesize, sizeof(u16), GFP_NOIO); u16 *pba_to_lba = kcalloc(zonesize, sizeof(u16), GFP_NOIO); if (lba_to_pba == NULL || pba_to_lba == NULL) { result = USB_STOR_TRANSPORT_ERROR; goto error; } US_DEBUGP("alauda_read_map: Mapping blocks for zone %d\n", zone); /* 1024 PBA's per zone */ for (i = 0; i < zonesize; i++) lba_to_pba[i] = pba_to_lba[i] = UNDEF; for (i = 0; i < zonesize; i++) { blocknum = zone_base_pba + i; result = alauda_get_redu_data(us, blocknum, data); if (result != USB_STOR_XFER_GOOD) { result = USB_STOR_TRANSPORT_ERROR; goto error; } /* special PBAs have control field 0^16 */ for (j = 0; j < 16; j++) if (data[j] != 0) goto nonz; pba_to_lba[i] = UNUSABLE; US_DEBUGP("alauda_read_map: PBA %d has no logical mapping\n", blocknum); continue; nonz: /* unwritten PBAs have control field FF^16 */ for (j = 0; j < 16; j++) if (data[j] != 0xff) goto nonff; continue; nonff: /* normal PBAs start with six FFs */ if (j < 6) { US_DEBUGP("alauda_read_map: PBA %d has no logical mapping: " "reserved area = %02X%02X%02X%02X " "data status %02X block status %02X\n", blocknum, data[0], data[1], data[2], data[3], data[4], data[5]); pba_to_lba[i] = UNUSABLE; continue; } if ((data[6] >> 4) != 0x01) { US_DEBUGP("alauda_read_map: PBA %d has invalid address " "field %02X%02X/%02X%02X\n", blocknum, data[6], data[7], data[11], data[12]); pba_to_lba[i] = UNUSABLE; continue; } /* check even parity */ if (parity[data[6] ^ data[7]]) { printk(KERN_WARNING "alauda_read_map: Bad parity in LBA for block %d" " (%02X %02X)\n", i, data[6], data[7]); pba_to_lba[i] = UNUSABLE; continue; } lba_offset = short_pack(data[7], data[6]); lba_offset = (lba_offset & 0x07FF) >> 1; lba_real = lba_offset + zone_base_lba; /* * Every 1024 physical blocks ("zone"), the LBA numbers * go back to zero, but are within a higher block of LBA's. * Also, there is a maximum of 1000 LBA's per zone. * In other words, in PBA 1024-2047 you will find LBA 0-999 * which are really LBA 1000-1999. This allows for 24 bad * or special physical blocks per zone. */ if (lba_offset >= uzonesize) { printk(KERN_WARNING "alauda_read_map: Bad low LBA %d for block %d\n", lba_real, blocknum); continue; } if (lba_to_pba[lba_offset] != UNDEF) { printk(KERN_WARNING "alauda_read_map: " "LBA %d seen for PBA %d and %d\n", lba_real, lba_to_pba[lba_offset], blocknum); continue; } pba_to_lba[i] = lba_real; lba_to_pba[lba_offset] = blocknum; continue; } MEDIA_INFO(us).lba_to_pba[zone] = lba_to_pba; MEDIA_INFO(us).pba_to_lba[zone] = pba_to_lba; result = 0; goto out; error: kfree(lba_to_pba); kfree(pba_to_lba); out: return result; } /* * Checks to see whether we have already mapped a certain zone * If we haven't, the map is generated */ static void alauda_ensure_map_for_zone(struct us_data *us, unsigned int zone) { if (MEDIA_INFO(us).lba_to_pba[zone] == NULL || MEDIA_INFO(us).pba_to_lba[zone] == NULL) alauda_read_map(us, zone); } /* * Erases an entire block */ static int alauda_erase_block(struct us_data *us, u16 pba) { int rc; unsigned char command[] = { ALAUDA_BULK_CMD, ALAUDA_BULK_ERASE_BLOCK, PBA_HI(pba), PBA_ZONE(pba), 0, PBA_LO(pba), 0x02, 0, MEDIA_PORT(us) }; unsigned char buf[2]; US_DEBUGP("alauda_erase_block: Erasing PBA %d\n", pba); rc = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, command, 9, NULL); if (rc != USB_STOR_XFER_GOOD) return rc; rc = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, buf, 2, NULL); if (rc != USB_STOR_XFER_GOOD) return rc; US_DEBUGP("alauda_erase_block: Erase result: %02X %02X\n", buf[0], buf[1]); return rc; } /* * Reads data from a certain offset page inside a PBA, including interleaved * redundancy data. Returns (pagesize+64)*pages bytes in data. */ static int alauda_read_block_raw(struct us_data *us, u16 pba, unsigned int page, unsigned int pages, unsigned char *data) { int rc; unsigned char command[] = { ALAUDA_BULK_CMD, ALAUDA_BULK_READ_BLOCK, PBA_HI(pba), PBA_ZONE(pba), 0, PBA_LO(pba) + page, pages, 0, MEDIA_PORT(us) }; US_DEBUGP("alauda_read_block: pba %d page %d count %d\n", pba, page, pages); rc = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, command, 9, NULL); if (rc != USB_STOR_XFER_GOOD) return rc; return usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, data, (MEDIA_INFO(us).pagesize + 64) * pages, NULL); } /* * Reads data from a certain offset page inside a PBA, excluding redundancy * data. Returns pagesize*pages bytes in data. Note that data must be big enough * to hold (pagesize+64)*pages bytes of data, but you can ignore those 'extra' * trailing bytes outside this function. */ static int alauda_read_block(struct us_data *us, u16 pba, unsigned int page, unsigned int pages, unsigned char *data) { int i, rc; unsigned int pagesize = MEDIA_INFO(us).pagesize; rc = alauda_read_block_raw(us, pba, page, pages, data); if (rc != USB_STOR_XFER_GOOD) return rc; /* Cut out the redundancy data */ for (i = 0; i < pages; i++) { int dest_offset = i * pagesize; int src_offset = i * (pagesize + 64); memmove(data + dest_offset, data + src_offset, pagesize); } return rc; } /* * Writes an entire block of data and checks status after write. * Redundancy data must be already included in data. Data should be * (pagesize+64)*blocksize bytes in length. */ static int alauda_write_block(struct us_data *us, u16 pba, unsigned char *data) { int rc; struct alauda_info *info = (struct alauda_info *) us->extra; unsigned char command[] = { ALAUDA_BULK_CMD, ALAUDA_BULK_WRITE_BLOCK, PBA_HI(pba), PBA_ZONE(pba), 0, PBA_LO(pba), 32, 0, MEDIA_PORT(us) }; US_DEBUGP("alauda_write_block: pba %d\n", pba); rc = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, command, 9, NULL); if (rc != USB_STOR_XFER_GOOD) return rc; rc = usb_stor_bulk_transfer_buf(us, info->wr_ep, data, (MEDIA_INFO(us).pagesize + 64) * MEDIA_INFO(us).blocksize, NULL); if (rc != USB_STOR_XFER_GOOD) return rc; return alauda_check_status2(us); } /* * Write some data to a specific LBA. */ static int alauda_write_lba(struct us_data *us, u16 lba, unsigned int page, unsigned int pages, unsigned char *ptr, unsigned char *blockbuffer) { u16 pba, lbap, new_pba; unsigned char *bptr, *cptr, *xptr; unsigned char ecc[3]; int i, result; unsigned int uzonesize = MEDIA_INFO(us).uzonesize; unsigned int zonesize = MEDIA_INFO(us).zonesize; unsigned int pagesize = MEDIA_INFO(us).pagesize; unsigned int blocksize = MEDIA_INFO(us).blocksize; unsigned int lba_offset = lba % uzonesize; unsigned int new_pba_offset; unsigned int zone = lba / uzonesize; alauda_ensure_map_for_zone(us, zone); pba = MEDIA_INFO(us).lba_to_pba[zone][lba_offset]; if (pba == 1) { /* Maybe it is impossible to write to PBA 1. Fake success, but don't do anything. */ printk(KERN_WARNING "alauda_write_lba: avoid writing to pba 1\n"); return USB_STOR_TRANSPORT_GOOD; } new_pba = alauda_find_unused_pba(&MEDIA_INFO(us), zone); if (!new_pba) { printk(KERN_WARNING "alauda_write_lba: Out of unused blocks\n"); return USB_STOR_TRANSPORT_ERROR; } /* read old contents */ if (pba != UNDEF) { result = alauda_read_block_raw(us, pba, 0, blocksize, blockbuffer); if (result != USB_STOR_XFER_GOOD) return result; } else { memset(blockbuffer, 0, blocksize * (pagesize + 64)); } lbap = (lba_offset << 1) | 0x1000; if (parity[MSB_of(lbap) ^ LSB_of(lbap)]) lbap ^= 1; /* check old contents and fill lba */ for (i = 0; i < blocksize; i++) { bptr = blockbuffer + (i * (pagesize + 64)); cptr = bptr + pagesize; nand_compute_ecc(bptr, ecc); if (!nand_compare_ecc(cptr+13, ecc)) { US_DEBUGP("Warning: bad ecc in page %d- of pba %d\n", i, pba); nand_store_ecc(cptr+13, ecc); } nand_compute_ecc(bptr + (pagesize / 2), ecc); if (!nand_compare_ecc(cptr+8, ecc)) { US_DEBUGP("Warning: bad ecc in page %d+ of pba %d\n", i, pba); nand_store_ecc(cptr+8, ecc); } cptr[6] = cptr[11] = MSB_of(lbap); cptr[7] = cptr[12] = LSB_of(lbap); } /* copy in new stuff and compute ECC */ xptr = ptr; for (i = page; i < page+pages; i++) { bptr = blockbuffer + (i * (pagesize + 64)); cptr = bptr + pagesize; memcpy(bptr, xptr, pagesize); xptr += pagesize; nand_compute_ecc(bptr, ecc); nand_store_ecc(cptr+13, ecc); nand_compute_ecc(bptr + (pagesize / 2), ecc); nand_store_ecc(cptr+8, ecc); } result = alauda_write_block(us, new_pba, blockbuffer); if (result != USB_STOR_XFER_GOOD) return result; new_pba_offset = new_pba - (zone * zonesize); MEDIA_INFO(us).pba_to_lba[zone][new_pba_offset] = lba; MEDIA_INFO(us).lba_to_pba[zone][lba_offset] = new_pba; US_DEBUGP("alauda_write_lba: Remapped LBA %d to PBA %d\n", lba, new_pba); if (pba != UNDEF) { unsigned int pba_offset = pba - (zone * zonesize); result = alauda_erase_block(us, pba); if (result != USB_STOR_XFER_GOOD) return result; MEDIA_INFO(us).pba_to_lba[zone][pba_offset] = UNDEF; } return USB_STOR_TRANSPORT_GOOD; } /* * Read data from a specific sector address */ static int alauda_read_data(struct us_data *us, unsigned long address, unsigned int sectors) { unsigned char *buffer; u16 lba, max_lba; unsigned int page, len, offset; unsigned int blockshift = MEDIA_INFO(us).blockshift; unsigned int pageshift = MEDIA_INFO(us).pageshift; unsigned int blocksize = MEDIA_INFO(us).blocksize; unsigned int pagesize = MEDIA_INFO(us).pagesize; unsigned int uzonesize = MEDIA_INFO(us).uzonesize; struct scatterlist *sg; int result; /* * Since we only read in one block at a time, we have to create * a bounce buffer and move the data a piece at a time between the * bounce buffer and the actual transfer buffer. * We make this buffer big enough to hold temporary redundancy data, * which we use when reading the data blocks. */ len = min(sectors, blocksize) * (pagesize + 64); buffer = kmalloc(len, GFP_NOIO); if (buffer == NULL) { printk(KERN_WARNING "alauda_read_data: Out of memory\n"); return USB_STOR_TRANSPORT_ERROR; } /* Figure out the initial LBA and page */ lba = address >> blockshift; page = (address & MEDIA_INFO(us).blockmask); max_lba = MEDIA_INFO(us).capacity >> (blockshift + pageshift); result = USB_STOR_TRANSPORT_GOOD; offset = 0; sg = NULL; while (sectors > 0) { unsigned int zone = lba / uzonesize; /* integer division */ unsigned int lba_offset = lba - (zone * uzonesize); unsigned int pages; u16 pba; alauda_ensure_map_for_zone(us, zone); /* Not overflowing capacity? */ if (lba >= max_lba) { US_DEBUGP("Error: Requested lba %u exceeds " "maximum %u\n", lba, max_lba); result = USB_STOR_TRANSPORT_ERROR; break; } /* Find number of pages we can read in this block */ pages = min(sectors, blocksize - page); len = pages << pageshift; /* Find where this lba lives on disk */ pba = MEDIA_INFO(us).lba_to_pba[zone][lba_offset]; if (pba == UNDEF) { /* this lba was never written */ US_DEBUGP("Read %d zero pages (LBA %d) page %d\n", pages, lba, page); /* This is not really an error. It just means that the block has never been written. Instead of returning USB_STOR_TRANSPORT_ERROR it is better to return all zero data. */ memset(buffer, 0, len); } else { US_DEBUGP("Read %d pages, from PBA %d" " (LBA %d) page %d\n", pages, pba, lba, page); result = alauda_read_block(us, pba, page, pages, buffer); if (result != USB_STOR_TRANSPORT_GOOD) break; } /* Store the data in the transfer buffer */ usb_stor_access_xfer_buf(buffer, len, us->srb, &sg, &offset, TO_XFER_BUF); page = 0; lba++; sectors -= pages; } kfree(buffer); return result; } /* * Write data to a specific sector address */ static int alauda_write_data(struct us_data *us, unsigned long address, unsigned int sectors) { unsigned char *buffer, *blockbuffer; unsigned int page, len, offset; unsigned int blockshift = MEDIA_INFO(us).blockshift; unsigned int pageshift = MEDIA_INFO(us).pageshift; unsigned int blocksize = MEDIA_INFO(us).blocksize; unsigned int pagesize = MEDIA_INFO(us).pagesize; struct scatterlist *sg; u16 lba, max_lba; int result; /* * Since we don't write the user data directly to the device, * we have to create a bounce buffer and move the data a piece * at a time between the bounce buffer and the actual transfer buffer. */ len = min(sectors, blocksize) * pagesize; buffer = kmalloc(len, GFP_NOIO); if (buffer == NULL) { printk(KERN_WARNING "alauda_write_data: Out of memory\n"); return USB_STOR_TRANSPORT_ERROR; } /* * We also need a temporary block buffer, where we read in the old data, * overwrite parts with the new data, and manipulate the redundancy data */ blockbuffer = kmalloc((pagesize + 64) * blocksize, GFP_NOIO); if (blockbuffer == NULL) { printk(KERN_WARNING "alauda_write_data: Out of memory\n"); kfree(buffer); return USB_STOR_TRANSPORT_ERROR; } /* Figure out the initial LBA and page */ lba = address >> blockshift; page = (address & MEDIA_INFO(us).blockmask); max_lba = MEDIA_INFO(us).capacity >> (pageshift + blockshift); result = USB_STOR_TRANSPORT_GOOD; offset = 0; sg = NULL; while (sectors > 0) { /* Write as many sectors as possible in this block */ unsigned int pages = min(sectors, blocksize - page); len = pages << pageshift; /* Not overflowing capacity? */ if (lba >= max_lba) { US_DEBUGP("alauda_write_data: Requested lba %u exceeds " "maximum %u\n", lba, max_lba); result = USB_STOR_TRANSPORT_ERROR; break; } /* Get the data from the transfer buffer */ usb_stor_access_xfer_buf(buffer, len, us->srb, &sg, &offset, FROM_XFER_BUF); result = alauda_write_lba(us, lba, page, pages, buffer, blockbuffer); if (result != USB_STOR_TRANSPORT_GOOD) break; page = 0; lba++; sectors -= pages; } kfree(buffer); kfree(blockbuffer); return result; } /* * Our interface with the rest of the world */ static void alauda_info_destructor(void *extra) { struct alauda_info *info = (struct alauda_info *) extra; int port; if (!info) return; for (port = 0; port < 2; port++) { struct alauda_media_info *media_info = &info->port[port]; alauda_free_maps(media_info); kfree(media_info->lba_to_pba); kfree(media_info->pba_to_lba); } } /* * Initialize alauda_info struct and find the data-write endpoint */ static int init_alauda(struct us_data *us) { struct alauda_info *info; struct usb_host_interface *altsetting = us->pusb_intf->cur_altsetting; nand_init_ecc(); us->extra = kzalloc(sizeof(struct alauda_info), GFP_NOIO); if (!us->extra) { US_DEBUGP("init_alauda: Gah! Can't allocate storage for" "alauda info struct!\n"); return USB_STOR_TRANSPORT_ERROR; } info = (struct alauda_info *) us->extra; us->extra_destructor = alauda_info_destructor; info->wr_ep = usb_sndbulkpipe(us->pusb_dev, altsetting->endpoint[0].desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); return USB_STOR_TRANSPORT_GOOD; } static int alauda_transport(struct scsi_cmnd *srb, struct us_data *us) { int rc; struct alauda_info *info = (struct alauda_info *) us->extra; unsigned char *ptr = us->iobuf; static unsigned char inquiry_response[36] = { 0x00, 0x80, 0x00, 0x01, 0x1F, 0x00, 0x00, 0x00 }; if (srb->cmnd[0] == INQUIRY) { US_DEBUGP("alauda_transport: INQUIRY. " "Returning bogus response.\n"); memcpy(ptr, inquiry_response, sizeof(inquiry_response)); fill_inquiry_response(us, ptr, 36); return USB_STOR_TRANSPORT_GOOD; } if (srb->cmnd[0] == TEST_UNIT_READY) { US_DEBUGP("alauda_transport: TEST_UNIT_READY.\n"); return alauda_check_media(us); } if (srb->cmnd[0] == READ_CAPACITY) { unsigned int num_zones; unsigned long capacity; rc = alauda_check_media(us); if (rc != USB_STOR_TRANSPORT_GOOD) return rc; num_zones = MEDIA_INFO(us).capacity >> (MEDIA_INFO(us).zoneshift + MEDIA_INFO(us).blockshift + MEDIA_INFO(us).pageshift); capacity = num_zones * MEDIA_INFO(us).uzonesize * MEDIA_INFO(us).blocksize; /* Report capacity and page size */ ((__be32 *) ptr)[0] = cpu_to_be32(capacity - 1); ((__be32 *) ptr)[1] = cpu_to_be32(512); usb_stor_set_xfer_buf(ptr, 8, srb); return USB_STOR_TRANSPORT_GOOD; } if (srb->cmnd[0] == READ_10) { unsigned int page, pages; rc = alauda_check_media(us); if (rc != USB_STOR_TRANSPORT_GOOD) return rc; page = short_pack(srb->cmnd[3], srb->cmnd[2]); page <<= 16; page |= short_pack(srb->cmnd[5], srb->cmnd[4]); pages = short_pack(srb->cmnd[8], srb->cmnd[7]); US_DEBUGP("alauda_transport: READ_10: page %d pagect %d\n", page, pages); return alauda_read_data(us, page, pages); } if (srb->cmnd[0] == WRITE_10) { unsigned int page, pages; rc = alauda_check_media(us); if (rc != USB_STOR_TRANSPORT_GOOD) return rc; page = short_pack(srb->cmnd[3], srb->cmnd[2]); page <<= 16; page |= short_pack(srb->cmnd[5], srb->cmnd[4]); pages = short_pack(srb->cmnd[8], srb->cmnd[7]); US_DEBUGP("alauda_transport: WRITE_10: page %d pagect %d\n", page, pages); return alauda_write_data(us, page, pages); } if (srb->cmnd[0] == REQUEST_SENSE) { US_DEBUGP("alauda_transport: REQUEST_SENSE.\n"); memset(ptr, 0, 18); ptr[0] = 0xF0; ptr[2] = info->sense_key; ptr[7] = 11; ptr[12] = info->sense_asc; ptr[13] = info->sense_ascq; usb_stor_set_xfer_buf(ptr, 18, srb); return USB_STOR_TRANSPORT_GOOD; } if (srb->cmnd[0] == ALLOW_MEDIUM_REMOVAL) { /* sure. whatever. not like we can stop the user from popping the media out of the device (no locking doors, etc) */ return USB_STOR_TRANSPORT_GOOD; } US_DEBUGP("alauda_transport: Gah! Unknown command: %d (0x%x)\n", srb->cmnd[0], srb->cmnd[0]); info->sense_key = 0x05; info->sense_asc = 0x20; info->sense_ascq = 0x00; return USB_STOR_TRANSPORT_FAILED; } static int alauda_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct us_data *us; int result; result = usb_stor_probe1(&us, intf, id, (id - alauda_usb_ids) + alauda_unusual_dev_list); if (result) return result; us->transport_name = "Alauda Control/Bulk"; us->transport = alauda_transport; us->transport_reset = usb_stor_Bulk_reset; us->max_lun = 1; result = usb_stor_probe2(us); return result; } static struct usb_driver alauda_driver = { .name = "ums-alauda", .probe = alauda_probe, .disconnect = usb_stor_disconnect, .suspend = usb_stor_suspend, .resume = usb_stor_resume, .reset_resume = usb_stor_reset_resume, .pre_reset = usb_stor_pre_reset, .post_reset = usb_stor_post_reset, .id_table = alauda_usb_ids, .soft_unbind = 1, .no_dynamic_id = 1, }; module_usb_driver(alauda_driver);
gpl-2.0
thune-xiaobai/android_kernel_zte_s2002
sound/pci/cs5535audio/cs5535audio.c
4893
10563
/* * Driver for audio on multifunction CS5535/6 companion device * Copyright (C) Jaya Kumar * * Based on Jaroslav Kysela and Takashi Iwai's examples. * This work was sponsored by CIS(M) Sdn Bhd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/module.h> #include <asm/io.h> #include <sound/core.h> #include <sound/control.h> #include <sound/pcm.h> #include <sound/rawmidi.h> #include <sound/ac97_codec.h> #include <sound/initval.h> #include <sound/asoundef.h> #include "cs5535audio.h" #define DRIVER_NAME "cs5535audio" static char *ac97_quirk; module_param(ac97_quirk, charp, 0444); MODULE_PARM_DESC(ac97_quirk, "AC'97 board specific workarounds."); static struct ac97_quirk ac97_quirks[] __devinitdata = { #if 0 /* Not yet confirmed if all 5536 boards are HP only */ { .subvendor = PCI_VENDOR_ID_AMD, .subdevice = PCI_DEVICE_ID_AMD_CS5536_AUDIO, .name = "AMD RDK", .type = AC97_TUNE_HP_ONLY }, #endif {} }; static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for " DRIVER_NAME); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for " DRIVER_NAME); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable " DRIVER_NAME); static DEFINE_PCI_DEVICE_TABLE(snd_cs5535audio_ids) = { { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_AUDIO) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_AUDIO) }, {} }; MODULE_DEVICE_TABLE(pci, snd_cs5535audio_ids); static void wait_till_cmd_acked(struct cs5535audio *cs5535au, unsigned long timeout) { unsigned int tmp; do { tmp = cs_readl(cs5535au, ACC_CODEC_CNTL); if (!(tmp & CMD_NEW)) break; udelay(1); } while (--timeout); if (!timeout) snd_printk(KERN_ERR "Failure writing to cs5535 codec\n"); } static unsigned short snd_cs5535audio_codec_read(struct cs5535audio *cs5535au, unsigned short reg) { unsigned int regdata; unsigned int timeout; unsigned int val; regdata = ((unsigned int) reg) << 24; regdata |= ACC_CODEC_CNTL_RD_CMD; regdata |= CMD_NEW; cs_writel(cs5535au, ACC_CODEC_CNTL, regdata); wait_till_cmd_acked(cs5535au, 50); timeout = 50; do { val = cs_readl(cs5535au, ACC_CODEC_STATUS); if ((val & STS_NEW) && reg == (val >> 24)) break; udelay(1); } while (--timeout); if (!timeout) snd_printk(KERN_ERR "Failure reading codec reg 0x%x," "Last value=0x%x\n", reg, val); return (unsigned short) val; } static void snd_cs5535audio_codec_write(struct cs5535audio *cs5535au, unsigned short reg, unsigned short val) { unsigned int regdata; regdata = ((unsigned int) reg) << 24; regdata |= val; regdata &= CMD_MASK; regdata |= CMD_NEW; regdata &= ACC_CODEC_CNTL_WR_CMD; cs_writel(cs5535au, ACC_CODEC_CNTL, regdata); wait_till_cmd_acked(cs5535au, 50); } static void snd_cs5535audio_ac97_codec_write(struct snd_ac97 *ac97, unsigned short reg, unsigned short val) { struct cs5535audio *cs5535au = ac97->private_data; snd_cs5535audio_codec_write(cs5535au, reg, val); } static unsigned short snd_cs5535audio_ac97_codec_read(struct snd_ac97 *ac97, unsigned short reg) { struct cs5535audio *cs5535au = ac97->private_data; return snd_cs5535audio_codec_read(cs5535au, reg); } static int __devinit snd_cs5535audio_mixer(struct cs5535audio *cs5535au) { struct snd_card *card = cs5535au->card; struct snd_ac97_bus *pbus; struct snd_ac97_template ac97; int err; static struct snd_ac97_bus_ops ops = { .write = snd_cs5535audio_ac97_codec_write, .read = snd_cs5535audio_ac97_codec_read, }; if ((err = snd_ac97_bus(card, 0, &ops, NULL, &pbus)) < 0) return err; memset(&ac97, 0, sizeof(ac97)); ac97.scaps = AC97_SCAP_AUDIO | AC97_SCAP_SKIP_MODEM | AC97_SCAP_POWER_SAVE; ac97.private_data = cs5535au; ac97.pci = cs5535au->pci; /* set any OLPC-specific scaps */ olpc_prequirks(card, &ac97); if ((err = snd_ac97_mixer(pbus, &ac97, &cs5535au->ac97)) < 0) { snd_printk(KERN_ERR "mixer failed\n"); return err; } snd_ac97_tune_hardware(cs5535au->ac97, ac97_quirks, ac97_quirk); err = olpc_quirks(card, cs5535au->ac97); if (err < 0) { snd_printk(KERN_ERR "olpc quirks failed\n"); return err; } return 0; } static void process_bm0_irq(struct cs5535audio *cs5535au) { u8 bm_stat; spin_lock(&cs5535au->reg_lock); bm_stat = cs_readb(cs5535au, ACC_BM0_STATUS); spin_unlock(&cs5535au->reg_lock); if (bm_stat & EOP) { struct cs5535audio_dma *dma; dma = cs5535au->playback_substream->runtime->private_data; snd_pcm_period_elapsed(cs5535au->playback_substream); } else { snd_printk(KERN_ERR "unexpected bm0 irq src, bm_stat=%x\n", bm_stat); } } static void process_bm1_irq(struct cs5535audio *cs5535au) { u8 bm_stat; spin_lock(&cs5535au->reg_lock); bm_stat = cs_readb(cs5535au, ACC_BM1_STATUS); spin_unlock(&cs5535au->reg_lock); if (bm_stat & EOP) { struct cs5535audio_dma *dma; dma = cs5535au->capture_substream->runtime->private_data; snd_pcm_period_elapsed(cs5535au->capture_substream); } } static irqreturn_t snd_cs5535audio_interrupt(int irq, void *dev_id) { u16 acc_irq_stat; unsigned char count; struct cs5535audio *cs5535au = dev_id; if (cs5535au == NULL) return IRQ_NONE; acc_irq_stat = cs_readw(cs5535au, ACC_IRQ_STATUS); if (!acc_irq_stat) return IRQ_NONE; for (count = 0; count < 4; count++) { if (acc_irq_stat & (1 << count)) { switch (count) { case IRQ_STS: cs_readl(cs5535au, ACC_GPIO_STATUS); break; case WU_IRQ_STS: cs_readl(cs5535au, ACC_GPIO_STATUS); break; case BM0_IRQ_STS: process_bm0_irq(cs5535au); break; case BM1_IRQ_STS: process_bm1_irq(cs5535au); break; default: snd_printk(KERN_ERR "Unexpected irq src: " "0x%x\n", acc_irq_stat); break; } } } return IRQ_HANDLED; } static int snd_cs5535audio_free(struct cs5535audio *cs5535au) { synchronize_irq(cs5535au->irq); pci_set_power_state(cs5535au->pci, 3); if (cs5535au->irq >= 0) free_irq(cs5535au->irq, cs5535au); pci_release_regions(cs5535au->pci); pci_disable_device(cs5535au->pci); kfree(cs5535au); return 0; } static int snd_cs5535audio_dev_free(struct snd_device *device) { struct cs5535audio *cs5535au = device->device_data; return snd_cs5535audio_free(cs5535au); } static int __devinit snd_cs5535audio_create(struct snd_card *card, struct pci_dev *pci, struct cs5535audio **rcs5535au) { struct cs5535audio *cs5535au; int err; static struct snd_device_ops ops = { .dev_free = snd_cs5535audio_dev_free, }; *rcs5535au = NULL; if ((err = pci_enable_device(pci)) < 0) return err; if (pci_set_dma_mask(pci, DMA_BIT_MASK(32)) < 0 || pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(32)) < 0) { printk(KERN_WARNING "unable to get 32bit dma\n"); err = -ENXIO; goto pcifail; } cs5535au = kzalloc(sizeof(*cs5535au), GFP_KERNEL); if (cs5535au == NULL) { err = -ENOMEM; goto pcifail; } spin_lock_init(&cs5535au->reg_lock); cs5535au->card = card; cs5535au->pci = pci; cs5535au->irq = -1; if ((err = pci_request_regions(pci, "CS5535 Audio")) < 0) { kfree(cs5535au); goto pcifail; } cs5535au->port = pci_resource_start(pci, 0); if (request_irq(pci->irq, snd_cs5535audio_interrupt, IRQF_SHARED, KBUILD_MODNAME, cs5535au)) { snd_printk(KERN_ERR "unable to grab IRQ %d\n", pci->irq); err = -EBUSY; goto sndfail; } cs5535au->irq = pci->irq; pci_set_master(pci); if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, cs5535au, &ops)) < 0) goto sndfail; snd_card_set_dev(card, &pci->dev); *rcs5535au = cs5535au; return 0; sndfail: /* leave the device alive, just kill the snd */ snd_cs5535audio_free(cs5535au); return err; pcifail: pci_disable_device(pci); return err; } static int __devinit snd_cs5535audio_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) { static int dev; struct snd_card *card; struct cs5535audio *cs5535au; int err; if (dev >= SNDRV_CARDS) return -ENODEV; if (!enable[dev]) { dev++; return -ENOENT; } err = snd_card_create(index[dev], id[dev], THIS_MODULE, 0, &card); if (err < 0) return err; if ((err = snd_cs5535audio_create(card, pci, &cs5535au)) < 0) goto probefail_out; card->private_data = cs5535au; if ((err = snd_cs5535audio_mixer(cs5535au)) < 0) goto probefail_out; if ((err = snd_cs5535audio_pcm(cs5535au)) < 0) goto probefail_out; strcpy(card->driver, DRIVER_NAME); strcpy(card->shortname, "CS5535 Audio"); sprintf(card->longname, "%s %s at 0x%lx, irq %i", card->shortname, card->driver, cs5535au->port, cs5535au->irq); if ((err = snd_card_register(card)) < 0) goto probefail_out; pci_set_drvdata(pci, card); dev++; return 0; probefail_out: snd_card_free(card); return err; } static void __devexit snd_cs5535audio_remove(struct pci_dev *pci) { olpc_quirks_cleanup(); snd_card_free(pci_get_drvdata(pci)); pci_set_drvdata(pci, NULL); } static struct pci_driver driver = { .name = KBUILD_MODNAME, .id_table = snd_cs5535audio_ids, .probe = snd_cs5535audio_probe, .remove = __devexit_p(snd_cs5535audio_remove), #ifdef CONFIG_PM .suspend = snd_cs5535audio_suspend, .resume = snd_cs5535audio_resume, #endif }; static int __init alsa_card_cs5535audio_init(void) { return pci_register_driver(&driver); } static void __exit alsa_card_cs5535audio_exit(void) { pci_unregister_driver(&driver); } module_init(alsa_card_cs5535audio_init) module_exit(alsa_card_cs5535audio_exit) MODULE_AUTHOR("Jaya Kumar"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("CS5535 Audio"); MODULE_SUPPORTED_DEVICE("CS5535 Audio");
gpl-2.0
KangDroid/android_kernel_lge_hammerhead
drivers/staging/wlags49_h2/mmd.c
4893
13379
// vim:tw=110:ts=4: /************************************************************************************************************ * * FILE : mmd.c * * DATE : $Date: 2004/07/23 11:57:45 $ $Revision: 1.4 $ * Original: 2004/05/28 14:05:35 Revision: 1.32 Tag: hcf7_t20040602_01 * Original: 2004/05/13 15:31:45 Revision: 1.30 Tag: hcf7_t7_20040513_01 * Original: 2004/04/15 09:24:42 Revision: 1.25 Tag: hcf7_t7_20040415_01 * Original: 2004/04/08 15:18:17 Revision: 1.24 Tag: t7_20040413_01 * Original: 2004/04/01 15:32:55 Revision: 1.22 Tag: t7_20040401_01 * Original: 2004/03/10 15:39:28 Revision: 1.18 Tag: t20040310_01 * Original: 2004/03/03 14:10:12 Revision: 1.16 Tag: t20040304_01 * Original: 2004/03/02 09:27:12 Revision: 1.14 Tag: t20040302_03 * Original: 2004/02/24 13:00:29 Revision: 1.12 Tag: t20040224_01 * Original: 2004/01/30 09:59:33 Revision: 1.11 Tag: t20040219_01 * * AUTHOR : Nico Valster * * DESC : Common routines for HCF, MSF, UIL as well as USF sources * * Note: relative to Asserts, the following can be observed: * Since the IFB is not known inside the routine, the macro HCFASSERT is replaced with MDDASSERT. * Also the line number reported in the assert is raised by FILE_NAME_OFFSET (20000) to discriminate the * MMD Asserts from HCF and DHF asserts. * *************************************************************************************************************** * * * SOFTWARE LICENSE * * This software is provided subject to the following terms and conditions, * which you should read carefully before using the software. Using this * software indicates your acceptance of these terms and conditions. If you do * not agree with these terms and conditions, do not use the software. * * COPYRIGHT © 2001 - 2004 by Agere Systems Inc. All Rights Reserved * All rights reserved. * * Redistribution and use in source or binary forms, with or without * modifications, are permitted provided that the following conditions are met: * * . Redistributions of source code must retain the above copyright notice, this * list of conditions and the following Disclaimer as comments in the code as * well as in the documentation and/or other materials provided with the * distribution. * * . Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following Disclaimer in the documentation * and/or other materials provided with the distribution. * * . Neither the name of Agere Systems Inc. nor the names of the contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * Disclaimer * * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. * * **************************************************************************************************************/ #include "hcf.h" // Needed as long as we do not really sort out the mess #include "hcfdef.h" // get CNV_LITTLE_TO_SHORT #include "mmd.h" // MoreModularDriver common include file //to distinguish DHF from HCF asserts by means of line number #undef FILE_NAME_OFFSET #define FILE_NAME_OFFSET DHF_FILE_NAME_OFFSET /************************************************************************************************************* * *.MODULE CFG_RANGE_SPEC_STRCT* mmd_check_comp( CFG_RANGES_STRCT *actp, CFG_SUP_RANGE_STRCT *supp ) *.PURPOSE Checks compatibility between an actor and a supplier. * *.ARGUMENTS * actp * supp * *.RETURNS * NULL incompatible * <>NULL pointer to matching CFG_RANGE_SPEC_STRCT substructure in actor-structure matching the supplier * *.NARRATIVE * * Parameters: * actp address of the actor specification * supp address of the supplier specification * * Description: mmd_check_comp is a support routine to check the compatibility between an actor and a * supplier. mmd_check_comp is independent of the endianess of the actp and supp structures. This is * achieved by checking the "bottom" or "role" fields of these structures. Since these fields are restricted * to a limited range, comparing the contents to a value with a known endian-ess gives a clue to their actual * endianess. * *.DIAGRAM *1a: The role-field of the actor structure has a known non-zero, not "byte symmetric" value (namely * COMP_ROLE_ACT or 0x0001), so if and only the contents of this field matches COMP_ROLE_ACT (in Native * Endian format), the actor structure is Native Endian. *2a: Since the role-field of the supplier structure is 0x0000, the test as used for the actor does not work * for a supplier. A supplier has always exactly 1 variant,top,bottom record with (officially, but see the * note below) each of these 3 values in the range 1 through 99, so one byte of the word value of variant, * top and bottom words is 0x00 and the other byte is non-zero. Whether the lowest address byte or the * highest address byte is non-zero depends on the Endianess of the LTV. If and only if the word value of * bottom is less than 0x0100, the supplier is Native Endian. * NOTE: the variant field of the supplier structure can not be used for the Endian Detection Algorithm, * because a a zero-valued variant has been used as Controlled Deployment indication in the past. * Note: An actor may have multiple sets of variant,top,bottom records, including dummy sets with variant, * top and bottom fields with a zero-value. As a consequence the endianess of the actor can not be determined * based on its variant,top,bottom values. * * Note: the L and T field of the structures are always in Native Endian format, so you can not draw * conclusions concerning the Endianess of the structure based on these two fields. * *1b/2b * The only purpose of the CFG_RANGE_SPEC_BYTE_STRCT is to give easy access to the non-zero byte of the word * value of variant, top and bottom. The variables sup_endian and act_endian are used for the supplier and * actor structure respectively. These variables must be 0 when the structure has LE format and 1 if the * structure has BE format. This can be phrased as: * the variable is false (i.e 0x0000) if either * (the platform is LE and the LTV is the same as the platform) * or * (the platform is BE and the LTV differs from the platform). * the variable is true (i.e 0x0001) if either * (the platform is BE and the LTV is the same as the platform) * or * (the platform is LE and the LTV differs from the platform). * * Alternatively this can be phrased as: * if the platform is LE * if the LTV is LE (i.e the same as the platform), then the variable = 0 * else (the LTV is BE (i.e. different from the platform) ), then the variable = 1 * if the platform is BE * if the LTV is BE (i.e the same as the platform), then the variable = 1 * else (the LTV is LE (i.e. different from the platform) ), then the variable = 0 * * This is implemented as: * #if HCF_BIG_ENDIAN == 0 //platform is LE * sup/act_endian becomes reverse of structure-endianess as determined in 1a/1b * #endif *6: Each of the actor variant-bottom-top records is checked against the (single) supplier variant-bottom-top * range till either an acceptable match is found or all actor records are tried. As explained above, due to * the limited ranges of these values, checking a byte is acceptable and suitable. *8: depending on whether a match was found or not (as reflected by the value of the control variable of the * for loop), the NULL pointer or a pointer to the matching Number/Bottom/Top record of the Actor structure * is returned. * As an additional safety, checking the supplier length protects against invalid Supplier structures, which * may be caused by failing hcf_get_info (in which case the len-field is zero). Note that the contraption * "supp->len != sizeof(CFG_SUP_RANGE_STRCT)/sizeof(hcf_16) - 1" * did turn out not to work for a compiler which padded the structure definition. * * Note: when consulting references like DesignNotes and Architecture specifications there is a confusing use * of the notions number and variant. This resulted in an inconsistent use in the HCF nomenclature as well. * This makes the logic hard to follow and one has to be very much aware of the context when walking through * the code. * NOTE: The Endian Detection Algorithm places limitations on future extensions of the fields, i.e. they should * stay within the currently defined boundaries of 1 through 99 (although 1 through 255) would work as well * and there should never be used a zero value for the bottom of a valid supplier. * Note: relative to Asserts, the following can be observed: * 1: Supplier variant 0x0000 has been used for Controlled Deployment * 2: An actor may have one or more variant record specifications with a top of zero and a non-zero bottom * to override the HCF default support of a particular variant by the MSF programmer via hcfcfg.h * 3: An actor range can be specified as all zeros, e.g. as padding in the automatically generated firmware * image files. *.ENDDOC END DOCUMENTATION *************************************************************************************************************/ CFG_RANGE_SPEC_STRCT* mmd_check_comp( CFG_RANGES_STRCT *actp, CFG_SUP_RANGE_STRCT *supp ) { CFG_RANGE_SPEC_BYTE_STRCT *actq = (CFG_RANGE_SPEC_BYTE_STRCT*)actp->var_rec; CFG_RANGE_SPEC_BYTE_STRCT *supq = (CFG_RANGE_SPEC_BYTE_STRCT*)&(supp->variant); hcf_16 i; int act_endian; //actor endian flag int sup_endian; //supplier endian flag act_endian = actp->role == COMP_ROLE_ACT; //true if native endian /* 1a */ sup_endian = supp->bottom < 0x0100; //true if native endian /* 2a */ #if HCF_ASSERT MMDASSERT( supp->len == 6, supp->len ) MMDASSERT( actp->len >= 6 && actp->len%3 == 0, actp->len ) if ( act_endian ) { //native endian MMDASSERT( actp->role == COMP_ROLE_ACT, actp->role ) MMDASSERT( 1 <= actp->id && actp->id <= 99, actp->id ) } else { //non-native endian MMDASSERT( actp->role == CNV_END_SHORT(COMP_ROLE_ACT), actp->role ) MMDASSERT( 1 <= CNV_END_SHORT(actp->id) && CNV_END_SHORT(actp->id) <= 99, actp->id ) } if ( sup_endian ) { //native endian MMDASSERT( supp->role == COMP_ROLE_SUPL, supp->role ) MMDASSERT( 1 <= supp->id && supp->id <= 99, supp->id ) MMDASSERT( 1 <= supp->variant && supp->variant <= 99, supp->variant ) MMDASSERT( 1 <= supp->bottom && supp->bottom <= 99, supp->bottom ) MMDASSERT( 1 <= supp->top && supp->top <= 99, supp->top ) MMDASSERT( supp->bottom <= supp->top, supp->bottom << 8 | supp->top ) } else { //non-native endian MMDASSERT( supp->role == CNV_END_SHORT(COMP_ROLE_SUPL), supp->role ) MMDASSERT( 1 <= CNV_END_SHORT(supp->id) && CNV_END_SHORT(supp->id) <= 99, supp->id ) MMDASSERT( 1 <= CNV_END_SHORT(supp->variant) && CNV_END_SHORT(supp->variant) <= 99, supp->variant ) MMDASSERT( 1 <= CNV_END_SHORT(supp->bottom) && CNV_END_SHORT(supp->bottom) <=99, supp->bottom ) MMDASSERT( 1 <= CNV_END_SHORT(supp->top) && CNV_END_SHORT(supp->top) <=99, supp->top ) MMDASSERT( CNV_END_SHORT(supp->bottom) <= CNV_END_SHORT(supp->top), supp->bottom << 8 | supp->top ) } #endif // HCF_ASSERT #if HCF_BIG_ENDIAN == 0 act_endian = !act_endian; /* 1b*/ sup_endian = !sup_endian; /* 2b*/ #endif // HCF_BIG_ENDIAN for ( i = actp->len ; i > 3; actq++, i -= 3 ) { /* 6 */ MMDASSERT( actq->variant[act_endian] <= 99, i<<8 | actq->variant[act_endian] ) MMDASSERT( actq->bottom[act_endian] <= 99 , i<<8 | actq->bottom[act_endian] ) MMDASSERT( actq->top[act_endian] <= 99 , i<<8 | actq->top[act_endian] ) MMDASSERT( actq->bottom[act_endian] <= actq->top[act_endian], i<<8 | actq->bottom[act_endian] ) if ( actq->variant[act_endian] == supq->variant[sup_endian] && actq->bottom[act_endian] <= supq->top[sup_endian] && actq->top[act_endian] >= supq->bottom[sup_endian] ) break; } if ( i <= 3 || supp->len != 6 /*sizeof(CFG_SUP_RANGE_STRCT)/sizeof(hcf_16) - 1 */ ) { actq = NULL; /* 8 */ } #if HCF_ASSERT if ( actq == NULL ) { for ( i = 0; i <= supp->len; i += 2 ) { MMDASSERT( DO_ASSERT, MERGE_2( ((hcf_16*)supp)[i], ((hcf_16*)supp)[i+1] ) ); } for ( i = 0; i <= actp->len; i += 2 ) { MMDASSERT( DO_ASSERT, MERGE_2( ((hcf_16*)actp)[i], ((hcf_16*)actp)[i+1] ) ); } } #endif // HCF_ASSERT return (CFG_RANGE_SPEC_STRCT*)actq; } // mmd_check_comp
gpl-2.0
flar2/m7-GPE-4.4.3
sound/pci/via82xx_modem.c
4893
35335
/* * ALSA modem driver for VIA VT82xx (South Bridge) * * VT82C686A/B/C, VT8233A/C, VT8235 * * Copyright (c) 2000 Jaroslav Kysela <perex@perex.cz> * Tjeerd.Mulder <Tjeerd.Mulder@fujitsu-siemens.com> * 2002 Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ /* * Changes: * * Sep. 2, 2004 Sasha Khapyorsky <sashak@alsa-project.org> * Modified from original audio driver 'via82xx.c' to support AC97 * modems. */ #include <asm/io.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/module.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/info.h> #include <sound/ac97_codec.h> #include <sound/initval.h> #if 0 #define POINTER_DEBUG #endif MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); MODULE_DESCRIPTION("VIA VT82xx modem"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{VIA,VT82C686A/B/C modem,pci}}"); static int index = -2; /* Exclude the first card */ static char *id = SNDRV_DEFAULT_STR1; /* ID for this card */ static int ac97_clock = 48000; module_param(index, int, 0444); MODULE_PARM_DESC(index, "Index value for VIA 82xx bridge."); module_param(id, charp, 0444); MODULE_PARM_DESC(id, "ID string for VIA 82xx bridge."); module_param(ac97_clock, int, 0444); MODULE_PARM_DESC(ac97_clock, "AC'97 codec clock (default 48000Hz)."); /* just for backward compatibility */ static bool enable; module_param(enable, bool, 0444); /* * Direct registers */ #define VIAREG(via, x) ((via)->port + VIA_REG_##x) #define VIADEV_REG(viadev, x) ((viadev)->port + VIA_REG_##x) /* common offsets */ #define VIA_REG_OFFSET_STATUS 0x00 /* byte - channel status */ #define VIA_REG_STAT_ACTIVE 0x80 /* RO */ #define VIA_REG_STAT_PAUSED 0x40 /* RO */ #define VIA_REG_STAT_TRIGGER_QUEUED 0x08 /* RO */ #define VIA_REG_STAT_STOPPED 0x04 /* RWC */ #define VIA_REG_STAT_EOL 0x02 /* RWC */ #define VIA_REG_STAT_FLAG 0x01 /* RWC */ #define VIA_REG_OFFSET_CONTROL 0x01 /* byte - channel control */ #define VIA_REG_CTRL_START 0x80 /* WO */ #define VIA_REG_CTRL_TERMINATE 0x40 /* WO */ #define VIA_REG_CTRL_AUTOSTART 0x20 #define VIA_REG_CTRL_PAUSE 0x08 /* RW */ #define VIA_REG_CTRL_INT_STOP 0x04 #define VIA_REG_CTRL_INT_EOL 0x02 #define VIA_REG_CTRL_INT_FLAG 0x01 #define VIA_REG_CTRL_RESET 0x01 /* RW - probably reset? undocumented */ #define VIA_REG_CTRL_INT (VIA_REG_CTRL_INT_FLAG | VIA_REG_CTRL_INT_EOL | VIA_REG_CTRL_AUTOSTART) #define VIA_REG_OFFSET_TYPE 0x02 /* byte - channel type (686 only) */ #define VIA_REG_TYPE_AUTOSTART 0x80 /* RW - autostart at EOL */ #define VIA_REG_TYPE_16BIT 0x20 /* RW */ #define VIA_REG_TYPE_STEREO 0x10 /* RW */ #define VIA_REG_TYPE_INT_LLINE 0x00 #define VIA_REG_TYPE_INT_LSAMPLE 0x04 #define VIA_REG_TYPE_INT_LESSONE 0x08 #define VIA_REG_TYPE_INT_MASK 0x0c #define VIA_REG_TYPE_INT_EOL 0x02 #define VIA_REG_TYPE_INT_FLAG 0x01 #define VIA_REG_OFFSET_TABLE_PTR 0x04 /* dword - channel table pointer */ #define VIA_REG_OFFSET_CURR_PTR 0x04 /* dword - channel current pointer */ #define VIA_REG_OFFSET_STOP_IDX 0x08 /* dword - stop index, channel type, sample rate */ #define VIA_REG_OFFSET_CURR_COUNT 0x0c /* dword - channel current count (24 bit) */ #define VIA_REG_OFFSET_CURR_INDEX 0x0f /* byte - channel current index (for via8233 only) */ #define DEFINE_VIA_REGSET(name,val) \ enum {\ VIA_REG_##name##_STATUS = (val),\ VIA_REG_##name##_CONTROL = (val) + 0x01,\ VIA_REG_##name##_TYPE = (val) + 0x02,\ VIA_REG_##name##_TABLE_PTR = (val) + 0x04,\ VIA_REG_##name##_CURR_PTR = (val) + 0x04,\ VIA_REG_##name##_STOP_IDX = (val) + 0x08,\ VIA_REG_##name##_CURR_COUNT = (val) + 0x0c,\ } /* modem block */ DEFINE_VIA_REGSET(MO, 0x40); DEFINE_VIA_REGSET(MI, 0x50); /* AC'97 */ #define VIA_REG_AC97 0x80 /* dword */ #define VIA_REG_AC97_CODEC_ID_MASK (3<<30) #define VIA_REG_AC97_CODEC_ID_SHIFT 30 #define VIA_REG_AC97_CODEC_ID_PRIMARY 0x00 #define VIA_REG_AC97_CODEC_ID_SECONDARY 0x01 #define VIA_REG_AC97_SECONDARY_VALID (1<<27) #define VIA_REG_AC97_PRIMARY_VALID (1<<25) #define VIA_REG_AC97_BUSY (1<<24) #define VIA_REG_AC97_READ (1<<23) #define VIA_REG_AC97_CMD_SHIFT 16 #define VIA_REG_AC97_CMD_MASK 0x7e #define VIA_REG_AC97_DATA_SHIFT 0 #define VIA_REG_AC97_DATA_MASK 0xffff #define VIA_REG_SGD_SHADOW 0x84 /* dword */ #define VIA_REG_SGD_STAT_PB_FLAG (1<<0) #define VIA_REG_SGD_STAT_CP_FLAG (1<<1) #define VIA_REG_SGD_STAT_FM_FLAG (1<<2) #define VIA_REG_SGD_STAT_PB_EOL (1<<4) #define VIA_REG_SGD_STAT_CP_EOL (1<<5) #define VIA_REG_SGD_STAT_FM_EOL (1<<6) #define VIA_REG_SGD_STAT_PB_STOP (1<<8) #define VIA_REG_SGD_STAT_CP_STOP (1<<9) #define VIA_REG_SGD_STAT_FM_STOP (1<<10) #define VIA_REG_SGD_STAT_PB_ACTIVE (1<<12) #define VIA_REG_SGD_STAT_CP_ACTIVE (1<<13) #define VIA_REG_SGD_STAT_FM_ACTIVE (1<<14) #define VIA_REG_SGD_STAT_MR_FLAG (1<<16) #define VIA_REG_SGD_STAT_MW_FLAG (1<<17) #define VIA_REG_SGD_STAT_MR_EOL (1<<20) #define VIA_REG_SGD_STAT_MW_EOL (1<<21) #define VIA_REG_SGD_STAT_MR_STOP (1<<24) #define VIA_REG_SGD_STAT_MW_STOP (1<<25) #define VIA_REG_SGD_STAT_MR_ACTIVE (1<<28) #define VIA_REG_SGD_STAT_MW_ACTIVE (1<<29) #define VIA_REG_GPI_STATUS 0x88 #define VIA_REG_GPI_INTR 0x8c #define VIA_TBL_BIT_FLAG 0x40000000 #define VIA_TBL_BIT_EOL 0x80000000 /* pci space */ #define VIA_ACLINK_STAT 0x40 #define VIA_ACLINK_C11_READY 0x20 #define VIA_ACLINK_C10_READY 0x10 #define VIA_ACLINK_C01_READY 0x04 /* secondary codec ready */ #define VIA_ACLINK_LOWPOWER 0x02 /* low-power state */ #define VIA_ACLINK_C00_READY 0x01 /* primary codec ready */ #define VIA_ACLINK_CTRL 0x41 #define VIA_ACLINK_CTRL_ENABLE 0x80 /* 0: disable, 1: enable */ #define VIA_ACLINK_CTRL_RESET 0x40 /* 0: assert, 1: de-assert */ #define VIA_ACLINK_CTRL_SYNC 0x20 /* 0: release SYNC, 1: force SYNC hi */ #define VIA_ACLINK_CTRL_SDO 0x10 /* 0: release SDO, 1: force SDO hi */ #define VIA_ACLINK_CTRL_VRA 0x08 /* 0: disable VRA, 1: enable VRA */ #define VIA_ACLINK_CTRL_PCM 0x04 /* 0: disable PCM, 1: enable PCM */ #define VIA_ACLINK_CTRL_FM 0x02 /* via686 only */ #define VIA_ACLINK_CTRL_SB 0x01 /* via686 only */ #define VIA_ACLINK_CTRL_INIT (VIA_ACLINK_CTRL_ENABLE|\ VIA_ACLINK_CTRL_RESET|\ VIA_ACLINK_CTRL_PCM) #define VIA_FUNC_ENABLE 0x42 #define VIA_FUNC_MIDI_PNP 0x80 /* FIXME: it's 0x40 in the datasheet! */ #define VIA_FUNC_MIDI_IRQMASK 0x40 /* FIXME: not documented! */ #define VIA_FUNC_RX2C_WRITE 0x20 #define VIA_FUNC_SB_FIFO_EMPTY 0x10 #define VIA_FUNC_ENABLE_GAME 0x08 #define VIA_FUNC_ENABLE_FM 0x04 #define VIA_FUNC_ENABLE_MIDI 0x02 #define VIA_FUNC_ENABLE_SB 0x01 #define VIA_PNP_CONTROL 0x43 #define VIA_MC97_CTRL 0x44 #define VIA_MC97_CTRL_ENABLE 0x80 #define VIA_MC97_CTRL_SECONDARY 0x40 #define VIA_MC97_CTRL_INIT (VIA_MC97_CTRL_ENABLE|\ VIA_MC97_CTRL_SECONDARY) /* * pcm stream */ struct snd_via_sg_table { unsigned int offset; unsigned int size; } ; #define VIA_TABLE_SIZE 255 struct viadev { unsigned int reg_offset; unsigned long port; int direction; /* playback = 0, capture = 1 */ struct snd_pcm_substream *substream; int running; unsigned int tbl_entries; /* # descriptors */ struct snd_dma_buffer table; struct snd_via_sg_table *idx_table; /* for recovery from the unexpected pointer */ unsigned int lastpos; unsigned int bufsize; unsigned int bufsize2; }; enum { TYPE_CARD_VIA82XX_MODEM = 1 }; #define VIA_MAX_MODEM_DEVS 2 struct via82xx_modem { int irq; unsigned long port; unsigned int intr_mask; /* SGD_SHADOW mask to check interrupts */ struct pci_dev *pci; struct snd_card *card; unsigned int num_devs; unsigned int playback_devno, capture_devno; struct viadev devs[VIA_MAX_MODEM_DEVS]; struct snd_pcm *pcms[2]; struct snd_ac97_bus *ac97_bus; struct snd_ac97 *ac97; unsigned int ac97_clock; unsigned int ac97_secondary; /* secondary AC'97 codec is present */ spinlock_t reg_lock; struct snd_info_entry *proc_entry; }; static DEFINE_PCI_DEVICE_TABLE(snd_via82xx_modem_ids) = { { PCI_VDEVICE(VIA, 0x3068), TYPE_CARD_VIA82XX_MODEM, }, { 0, } }; MODULE_DEVICE_TABLE(pci, snd_via82xx_modem_ids); /* */ /* * allocate and initialize the descriptor buffers * periods = number of periods * fragsize = period size in bytes */ static int build_via_table(struct viadev *dev, struct snd_pcm_substream *substream, struct pci_dev *pci, unsigned int periods, unsigned int fragsize) { unsigned int i, idx, ofs, rest; struct via82xx_modem *chip = snd_pcm_substream_chip(substream); if (dev->table.area == NULL) { /* the start of each lists must be aligned to 8 bytes, * but the kernel pages are much bigger, so we don't care */ if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci), PAGE_ALIGN(VIA_TABLE_SIZE * 2 * 8), &dev->table) < 0) return -ENOMEM; } if (! dev->idx_table) { dev->idx_table = kmalloc(sizeof(*dev->idx_table) * VIA_TABLE_SIZE, GFP_KERNEL); if (! dev->idx_table) return -ENOMEM; } /* fill the entries */ idx = 0; ofs = 0; for (i = 0; i < periods; i++) { rest = fragsize; /* fill descriptors for a period. * a period can be split to several descriptors if it's * over page boundary. */ do { unsigned int r; unsigned int flag; unsigned int addr; if (idx >= VIA_TABLE_SIZE) { snd_printk(KERN_ERR "via82xx: too much table size!\n"); return -EINVAL; } addr = snd_pcm_sgbuf_get_addr(substream, ofs); ((u32 *)dev->table.area)[idx << 1] = cpu_to_le32(addr); r = PAGE_SIZE - (ofs % PAGE_SIZE); if (rest < r) r = rest; rest -= r; if (! rest) { if (i == periods - 1) flag = VIA_TBL_BIT_EOL; /* buffer boundary */ else flag = VIA_TBL_BIT_FLAG; /* period boundary */ } else flag = 0; /* period continues to the next */ /* printk(KERN_DEBUG "via: tbl %d: at %d size %d " "(rest %d)\n", idx, ofs, r, rest); */ ((u32 *)dev->table.area)[(idx<<1) + 1] = cpu_to_le32(r | flag); dev->idx_table[idx].offset = ofs; dev->idx_table[idx].size = r; ofs += r; idx++; } while (rest > 0); } dev->tbl_entries = idx; dev->bufsize = periods * fragsize; dev->bufsize2 = dev->bufsize / 2; return 0; } static int clean_via_table(struct viadev *dev, struct snd_pcm_substream *substream, struct pci_dev *pci) { if (dev->table.area) { snd_dma_free_pages(&dev->table); dev->table.area = NULL; } kfree(dev->idx_table); dev->idx_table = NULL; return 0; } /* * Basic I/O */ static inline unsigned int snd_via82xx_codec_xread(struct via82xx_modem *chip) { return inl(VIAREG(chip, AC97)); } static inline void snd_via82xx_codec_xwrite(struct via82xx_modem *chip, unsigned int val) { outl(val, VIAREG(chip, AC97)); } static int snd_via82xx_codec_ready(struct via82xx_modem *chip, int secondary) { unsigned int timeout = 1000; /* 1ms */ unsigned int val; while (timeout-- > 0) { udelay(1); if (!((val = snd_via82xx_codec_xread(chip)) & VIA_REG_AC97_BUSY)) return val & 0xffff; } snd_printk(KERN_ERR "codec_ready: codec %i is not ready [0x%x]\n", secondary, snd_via82xx_codec_xread(chip)); return -EIO; } static int snd_via82xx_codec_valid(struct via82xx_modem *chip, int secondary) { unsigned int timeout = 1000; /* 1ms */ unsigned int val, val1; unsigned int stat = !secondary ? VIA_REG_AC97_PRIMARY_VALID : VIA_REG_AC97_SECONDARY_VALID; while (timeout-- > 0) { val = snd_via82xx_codec_xread(chip); val1 = val & (VIA_REG_AC97_BUSY | stat); if (val1 == stat) return val & 0xffff; udelay(1); } return -EIO; } static void snd_via82xx_codec_wait(struct snd_ac97 *ac97) { struct via82xx_modem *chip = ac97->private_data; int err; err = snd_via82xx_codec_ready(chip, ac97->num); /* here we need to wait fairly for long time.. */ msleep(500); } static void snd_via82xx_codec_write(struct snd_ac97 *ac97, unsigned short reg, unsigned short val) { struct via82xx_modem *chip = ac97->private_data; unsigned int xval; if(reg == AC97_GPIO_STATUS) { outl(val, VIAREG(chip, GPI_STATUS)); return; } xval = !ac97->num ? VIA_REG_AC97_CODEC_ID_PRIMARY : VIA_REG_AC97_CODEC_ID_SECONDARY; xval <<= VIA_REG_AC97_CODEC_ID_SHIFT; xval |= reg << VIA_REG_AC97_CMD_SHIFT; xval |= val << VIA_REG_AC97_DATA_SHIFT; snd_via82xx_codec_xwrite(chip, xval); snd_via82xx_codec_ready(chip, ac97->num); } static unsigned short snd_via82xx_codec_read(struct snd_ac97 *ac97, unsigned short reg) { struct via82xx_modem *chip = ac97->private_data; unsigned int xval, val = 0xffff; int again = 0; xval = ac97->num << VIA_REG_AC97_CODEC_ID_SHIFT; xval |= ac97->num ? VIA_REG_AC97_SECONDARY_VALID : VIA_REG_AC97_PRIMARY_VALID; xval |= VIA_REG_AC97_READ; xval |= (reg & 0x7f) << VIA_REG_AC97_CMD_SHIFT; while (1) { if (again++ > 3) { snd_printk(KERN_ERR "codec_read: codec %i is not valid [0x%x]\n", ac97->num, snd_via82xx_codec_xread(chip)); return 0xffff; } snd_via82xx_codec_xwrite(chip, xval); udelay (20); if (snd_via82xx_codec_valid(chip, ac97->num) >= 0) { udelay(25); val = snd_via82xx_codec_xread(chip); break; } } return val & 0xffff; } static void snd_via82xx_channel_reset(struct via82xx_modem *chip, struct viadev *viadev) { outb(VIA_REG_CTRL_PAUSE | VIA_REG_CTRL_TERMINATE | VIA_REG_CTRL_RESET, VIADEV_REG(viadev, OFFSET_CONTROL)); inb(VIADEV_REG(viadev, OFFSET_CONTROL)); udelay(50); /* disable interrupts */ outb(0x00, VIADEV_REG(viadev, OFFSET_CONTROL)); /* clear interrupts */ outb(0x03, VIADEV_REG(viadev, OFFSET_STATUS)); outb(0x00, VIADEV_REG(viadev, OFFSET_TYPE)); /* for via686 */ // outl(0, VIADEV_REG(viadev, OFFSET_CURR_PTR)); viadev->lastpos = 0; } /* * Interrupt handler */ static irqreturn_t snd_via82xx_interrupt(int irq, void *dev_id) { struct via82xx_modem *chip = dev_id; unsigned int status; unsigned int i; status = inl(VIAREG(chip, SGD_SHADOW)); if (! (status & chip->intr_mask)) { return IRQ_NONE; } // _skip_sgd: /* check status for each stream */ spin_lock(&chip->reg_lock); for (i = 0; i < chip->num_devs; i++) { struct viadev *viadev = &chip->devs[i]; unsigned char c_status = inb(VIADEV_REG(viadev, OFFSET_STATUS)); c_status &= (VIA_REG_STAT_EOL|VIA_REG_STAT_FLAG|VIA_REG_STAT_STOPPED); if (! c_status) continue; if (viadev->substream && viadev->running) { spin_unlock(&chip->reg_lock); snd_pcm_period_elapsed(viadev->substream); spin_lock(&chip->reg_lock); } outb(c_status, VIADEV_REG(viadev, OFFSET_STATUS)); /* ack */ } spin_unlock(&chip->reg_lock); return IRQ_HANDLED; } /* * PCM callbacks */ /* * trigger callback */ static int snd_via82xx_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { struct via82xx_modem *chip = snd_pcm_substream_chip(substream); struct viadev *viadev = substream->runtime->private_data; unsigned char val = 0; switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_SUSPEND: val |= VIA_REG_CTRL_START; viadev->running = 1; break; case SNDRV_PCM_TRIGGER_STOP: val = VIA_REG_CTRL_TERMINATE; viadev->running = 0; break; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: val |= VIA_REG_CTRL_PAUSE; viadev->running = 0; break; case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: viadev->running = 1; break; default: return -EINVAL; } outb(val, VIADEV_REG(viadev, OFFSET_CONTROL)); if (cmd == SNDRV_PCM_TRIGGER_STOP) snd_via82xx_channel_reset(chip, viadev); return 0; } /* * pointer callbacks */ /* * calculate the linear position at the given sg-buffer index and the rest count */ #define check_invalid_pos(viadev,pos) \ ((pos) < viadev->lastpos && ((pos) >= viadev->bufsize2 ||\ viadev->lastpos < viadev->bufsize2)) static inline unsigned int calc_linear_pos(struct viadev *viadev, unsigned int idx, unsigned int count) { unsigned int size, res; size = viadev->idx_table[idx].size; res = viadev->idx_table[idx].offset + size - count; /* check the validity of the calculated position */ if (size < count) { snd_printd(KERN_ERR "invalid via82xx_cur_ptr (size = %d, count = %d)\n", (int)size, (int)count); res = viadev->lastpos; } else if (check_invalid_pos(viadev, res)) { #ifdef POINTER_DEBUG printk(KERN_DEBUG "fail: idx = %i/%i, lastpos = 0x%x, " "bufsize2 = 0x%x, offsize = 0x%x, size = 0x%x, " "count = 0x%x\n", idx, viadev->tbl_entries, viadev->lastpos, viadev->bufsize2, viadev->idx_table[idx].offset, viadev->idx_table[idx].size, count); #endif if (count && size < count) { snd_printd(KERN_ERR "invalid via82xx_cur_ptr, " "using last valid pointer\n"); res = viadev->lastpos; } else { if (! count) /* bogus count 0 on the DMA boundary? */ res = viadev->idx_table[idx].offset; else /* count register returns full size * when end of buffer is reached */ res = viadev->idx_table[idx].offset + size; if (check_invalid_pos(viadev, res)) { snd_printd(KERN_ERR "invalid via82xx_cur_ptr (2), " "using last valid pointer\n"); res = viadev->lastpos; } } } viadev->lastpos = res; /* remember the last position */ if (res >= viadev->bufsize) res -= viadev->bufsize; return res; } /* * get the current pointer on via686 */ static snd_pcm_uframes_t snd_via686_pcm_pointer(struct snd_pcm_substream *substream) { struct via82xx_modem *chip = snd_pcm_substream_chip(substream); struct viadev *viadev = substream->runtime->private_data; unsigned int idx, ptr, count, res; if (snd_BUG_ON(!viadev->tbl_entries)) return 0; if (!(inb(VIADEV_REG(viadev, OFFSET_STATUS)) & VIA_REG_STAT_ACTIVE)) return 0; spin_lock(&chip->reg_lock); count = inl(VIADEV_REG(viadev, OFFSET_CURR_COUNT)) & 0xffffff; /* The via686a does not have the current index register, * so we need to calculate the index from CURR_PTR. */ ptr = inl(VIADEV_REG(viadev, OFFSET_CURR_PTR)); if (ptr <= (unsigned int)viadev->table.addr) idx = 0; else /* CURR_PTR holds the address + 8 */ idx = ((ptr - (unsigned int)viadev->table.addr) / 8 - 1) % viadev->tbl_entries; res = calc_linear_pos(viadev, idx, count); spin_unlock(&chip->reg_lock); return bytes_to_frames(substream->runtime, res); } /* * hw_params callback: * allocate the buffer and build up the buffer description table */ static int snd_via82xx_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct via82xx_modem *chip = snd_pcm_substream_chip(substream); struct viadev *viadev = substream->runtime->private_data; int err; err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); if (err < 0) return err; err = build_via_table(viadev, substream, chip->pci, params_periods(hw_params), params_period_bytes(hw_params)); if (err < 0) return err; snd_ac97_write(chip->ac97, AC97_LINE1_RATE, params_rate(hw_params)); snd_ac97_write(chip->ac97, AC97_LINE1_LEVEL, 0); return 0; } /* * hw_free callback: * clean up the buffer description table and release the buffer */ static int snd_via82xx_hw_free(struct snd_pcm_substream *substream) { struct via82xx_modem *chip = snd_pcm_substream_chip(substream); struct viadev *viadev = substream->runtime->private_data; clean_via_table(viadev, substream, chip->pci); snd_pcm_lib_free_pages(substream); return 0; } /* * set up the table pointer */ static void snd_via82xx_set_table_ptr(struct via82xx_modem *chip, struct viadev *viadev) { snd_via82xx_codec_ready(chip, chip->ac97_secondary); outl((u32)viadev->table.addr, VIADEV_REG(viadev, OFFSET_TABLE_PTR)); udelay(20); snd_via82xx_codec_ready(chip, chip->ac97_secondary); } /* * prepare callback for playback and capture */ static int snd_via82xx_pcm_prepare(struct snd_pcm_substream *substream) { struct via82xx_modem *chip = snd_pcm_substream_chip(substream); struct viadev *viadev = substream->runtime->private_data; snd_via82xx_channel_reset(chip, viadev); /* this must be set after channel_reset */ snd_via82xx_set_table_ptr(chip, viadev); outb(VIA_REG_TYPE_AUTOSTART|VIA_REG_TYPE_INT_EOL|VIA_REG_TYPE_INT_FLAG, VIADEV_REG(viadev, OFFSET_TYPE)); return 0; } /* * pcm hardware definition, identical for both playback and capture */ static struct snd_pcm_hardware snd_via82xx_hw = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | /* SNDRV_PCM_INFO_RESUME | */ SNDRV_PCM_INFO_PAUSE), .formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_KNOT, .rate_min = 8000, .rate_max = 16000, .channels_min = 1, .channels_max = 1, .buffer_bytes_max = 128 * 1024, .period_bytes_min = 32, .period_bytes_max = 128 * 1024, .periods_min = 2, .periods_max = VIA_TABLE_SIZE / 2, .fifo_size = 0, }; /* * open callback skeleton */ static int snd_via82xx_modem_pcm_open(struct via82xx_modem *chip, struct viadev *viadev, struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; int err; static unsigned int rates[] = { 8000, 9600, 12000, 16000 }; static struct snd_pcm_hw_constraint_list hw_constraints_rates = { .count = ARRAY_SIZE(rates), .list = rates, .mask = 0, }; runtime->hw = snd_via82xx_hw; if ((err = snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &hw_constraints_rates)) < 0) return err; /* we may remove following constaint when we modify table entries in interrupt */ if ((err = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS)) < 0) return err; runtime->private_data = viadev; viadev->substream = substream; return 0; } /* * open callback for playback */ static int snd_via82xx_playback_open(struct snd_pcm_substream *substream) { struct via82xx_modem *chip = snd_pcm_substream_chip(substream); struct viadev *viadev = &chip->devs[chip->playback_devno + substream->number]; return snd_via82xx_modem_pcm_open(chip, viadev, substream); } /* * open callback for capture */ static int snd_via82xx_capture_open(struct snd_pcm_substream *substream) { struct via82xx_modem *chip = snd_pcm_substream_chip(substream); struct viadev *viadev = &chip->devs[chip->capture_devno + substream->pcm->device]; return snd_via82xx_modem_pcm_open(chip, viadev, substream); } /* * close callback */ static int snd_via82xx_pcm_close(struct snd_pcm_substream *substream) { struct viadev *viadev = substream->runtime->private_data; viadev->substream = NULL; return 0; } /* via686 playback callbacks */ static struct snd_pcm_ops snd_via686_playback_ops = { .open = snd_via82xx_playback_open, .close = snd_via82xx_pcm_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_via82xx_hw_params, .hw_free = snd_via82xx_hw_free, .prepare = snd_via82xx_pcm_prepare, .trigger = snd_via82xx_pcm_trigger, .pointer = snd_via686_pcm_pointer, .page = snd_pcm_sgbuf_ops_page, }; /* via686 capture callbacks */ static struct snd_pcm_ops snd_via686_capture_ops = { .open = snd_via82xx_capture_open, .close = snd_via82xx_pcm_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_via82xx_hw_params, .hw_free = snd_via82xx_hw_free, .prepare = snd_via82xx_pcm_prepare, .trigger = snd_via82xx_pcm_trigger, .pointer = snd_via686_pcm_pointer, .page = snd_pcm_sgbuf_ops_page, }; static void init_viadev(struct via82xx_modem *chip, int idx, unsigned int reg_offset, int direction) { chip->devs[idx].reg_offset = reg_offset; chip->devs[idx].direction = direction; chip->devs[idx].port = chip->port + reg_offset; } /* * create a pcm instance for via686a/b */ static int __devinit snd_via686_pcm_new(struct via82xx_modem *chip) { struct snd_pcm *pcm; int err; chip->playback_devno = 0; chip->capture_devno = 1; chip->num_devs = 2; chip->intr_mask = 0x330000; /* FLAGS | EOL for MR, MW */ err = snd_pcm_new(chip->card, chip->card->shortname, 0, 1, 1, &pcm); if (err < 0) return err; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_via686_playback_ops); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_via686_capture_ops); pcm->dev_class = SNDRV_PCM_CLASS_MODEM; pcm->private_data = chip; strcpy(pcm->name, chip->card->shortname); chip->pcms[0] = pcm; init_viadev(chip, 0, VIA_REG_MO_STATUS, 0); init_viadev(chip, 1, VIA_REG_MI_STATUS, 1); if ((err = snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG, snd_dma_pci_data(chip->pci), 64*1024, 128*1024)) < 0) return err; return 0; } /* * Mixer part */ static void snd_via82xx_mixer_free_ac97_bus(struct snd_ac97_bus *bus) { struct via82xx_modem *chip = bus->private_data; chip->ac97_bus = NULL; } static void snd_via82xx_mixer_free_ac97(struct snd_ac97 *ac97) { struct via82xx_modem *chip = ac97->private_data; chip->ac97 = NULL; } static int __devinit snd_via82xx_mixer_new(struct via82xx_modem *chip) { struct snd_ac97_template ac97; int err; static struct snd_ac97_bus_ops ops = { .write = snd_via82xx_codec_write, .read = snd_via82xx_codec_read, .wait = snd_via82xx_codec_wait, }; if ((err = snd_ac97_bus(chip->card, 0, &ops, chip, &chip->ac97_bus)) < 0) return err; chip->ac97_bus->private_free = snd_via82xx_mixer_free_ac97_bus; chip->ac97_bus->clock = chip->ac97_clock; memset(&ac97, 0, sizeof(ac97)); ac97.private_data = chip; ac97.private_free = snd_via82xx_mixer_free_ac97; ac97.pci = chip->pci; ac97.scaps = AC97_SCAP_SKIP_AUDIO | AC97_SCAP_POWER_SAVE; ac97.num = chip->ac97_secondary; if ((err = snd_ac97_mixer(chip->ac97_bus, &ac97, &chip->ac97)) < 0) return err; return 0; } /* * proc interface */ static void snd_via82xx_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct via82xx_modem *chip = entry->private_data; int i; snd_iprintf(buffer, "%s\n\n", chip->card->longname); for (i = 0; i < 0xa0; i += 4) { snd_iprintf(buffer, "%02x: %08x\n", i, inl(chip->port + i)); } } static void __devinit snd_via82xx_proc_init(struct via82xx_modem *chip) { struct snd_info_entry *entry; if (! snd_card_proc_new(chip->card, "via82xx", &entry)) snd_info_set_text_ops(entry, chip, snd_via82xx_proc_read); } /* * */ static int snd_via82xx_chip_init(struct via82xx_modem *chip) { unsigned int val; unsigned long end_time; unsigned char pval; pci_read_config_byte(chip->pci, VIA_MC97_CTRL, &pval); if((pval & VIA_MC97_CTRL_INIT) != VIA_MC97_CTRL_INIT) { pci_write_config_byte(chip->pci, 0x44, pval|VIA_MC97_CTRL_INIT); udelay(100); } pci_read_config_byte(chip->pci, VIA_ACLINK_STAT, &pval); if (! (pval & VIA_ACLINK_C00_READY)) { /* codec not ready? */ /* deassert ACLink reset, force SYNC */ pci_write_config_byte(chip->pci, VIA_ACLINK_CTRL, VIA_ACLINK_CTRL_ENABLE | VIA_ACLINK_CTRL_RESET | VIA_ACLINK_CTRL_SYNC); udelay(100); #if 1 /* FIXME: should we do full reset here for all chip models? */ pci_write_config_byte(chip->pci, VIA_ACLINK_CTRL, 0x00); udelay(100); #else /* deassert ACLink reset, force SYNC (warm AC'97 reset) */ pci_write_config_byte(chip->pci, VIA_ACLINK_CTRL, VIA_ACLINK_CTRL_RESET|VIA_ACLINK_CTRL_SYNC); udelay(2); #endif /* ACLink on, deassert ACLink reset, VSR, SGD data out */ pci_write_config_byte(chip->pci, VIA_ACLINK_CTRL, VIA_ACLINK_CTRL_INIT); udelay(100); } pci_read_config_byte(chip->pci, VIA_ACLINK_CTRL, &pval); if ((pval & VIA_ACLINK_CTRL_INIT) != VIA_ACLINK_CTRL_INIT) { /* ACLink on, deassert ACLink reset, VSR, SGD data out */ pci_write_config_byte(chip->pci, VIA_ACLINK_CTRL, VIA_ACLINK_CTRL_INIT); udelay(100); } /* wait until codec ready */ end_time = jiffies + msecs_to_jiffies(750); do { pci_read_config_byte(chip->pci, VIA_ACLINK_STAT, &pval); if (pval & VIA_ACLINK_C00_READY) /* primary codec ready */ break; schedule_timeout_uninterruptible(1); } while (time_before(jiffies, end_time)); if ((val = snd_via82xx_codec_xread(chip)) & VIA_REG_AC97_BUSY) snd_printk(KERN_ERR "AC'97 codec is not ready [0x%x]\n", val); snd_via82xx_codec_xwrite(chip, VIA_REG_AC97_READ | VIA_REG_AC97_SECONDARY_VALID | (VIA_REG_AC97_CODEC_ID_SECONDARY << VIA_REG_AC97_CODEC_ID_SHIFT)); end_time = jiffies + msecs_to_jiffies(750); snd_via82xx_codec_xwrite(chip, VIA_REG_AC97_READ | VIA_REG_AC97_SECONDARY_VALID | (VIA_REG_AC97_CODEC_ID_SECONDARY << VIA_REG_AC97_CODEC_ID_SHIFT)); do { if ((val = snd_via82xx_codec_xread(chip)) & VIA_REG_AC97_SECONDARY_VALID) { chip->ac97_secondary = 1; goto __ac97_ok2; } schedule_timeout_uninterruptible(1); } while (time_before(jiffies, end_time)); /* This is ok, the most of motherboards have only one codec */ __ac97_ok2: /* route FM trap to IRQ, disable FM trap */ // pci_write_config_byte(chip->pci, VIA_FM_NMI_CTRL, 0); /* disable all GPI interrupts */ outl(0, VIAREG(chip, GPI_INTR)); return 0; } #ifdef CONFIG_PM /* * power management */ static int snd_via82xx_suspend(struct pci_dev *pci, pm_message_t state) { struct snd_card *card = pci_get_drvdata(pci); struct via82xx_modem *chip = card->private_data; int i; snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); for (i = 0; i < 2; i++) snd_pcm_suspend_all(chip->pcms[i]); for (i = 0; i < chip->num_devs; i++) snd_via82xx_channel_reset(chip, &chip->devs[i]); synchronize_irq(chip->irq); snd_ac97_suspend(chip->ac97); pci_disable_device(pci); pci_save_state(pci); pci_set_power_state(pci, pci_choose_state(pci, state)); return 0; } static int snd_via82xx_resume(struct pci_dev *pci) { struct snd_card *card = pci_get_drvdata(pci); struct via82xx_modem *chip = card->private_data; int i; pci_set_power_state(pci, PCI_D0); pci_restore_state(pci); if (pci_enable_device(pci) < 0) { printk(KERN_ERR "via82xx-modem: pci_enable_device failed, " "disabling device\n"); snd_card_disconnect(card); return -EIO; } pci_set_master(pci); snd_via82xx_chip_init(chip); snd_ac97_resume(chip->ac97); for (i = 0; i < chip->num_devs; i++) snd_via82xx_channel_reset(chip, &chip->devs[i]); snd_power_change_state(card, SNDRV_CTL_POWER_D0); return 0; } #endif /* CONFIG_PM */ static int snd_via82xx_free(struct via82xx_modem *chip) { unsigned int i; if (chip->irq < 0) goto __end_hw; /* disable interrupts */ for (i = 0; i < chip->num_devs; i++) snd_via82xx_channel_reset(chip, &chip->devs[i]); __end_hw: if (chip->irq >= 0) free_irq(chip->irq, chip); pci_release_regions(chip->pci); pci_disable_device(chip->pci); kfree(chip); return 0; } static int snd_via82xx_dev_free(struct snd_device *device) { struct via82xx_modem *chip = device->device_data; return snd_via82xx_free(chip); } static int __devinit snd_via82xx_create(struct snd_card *card, struct pci_dev *pci, int chip_type, int revision, unsigned int ac97_clock, struct via82xx_modem ** r_via) { struct via82xx_modem *chip; int err; static struct snd_device_ops ops = { .dev_free = snd_via82xx_dev_free, }; if ((err = pci_enable_device(pci)) < 0) return err; if ((chip = kzalloc(sizeof(*chip), GFP_KERNEL)) == NULL) { pci_disable_device(pci); return -ENOMEM; } spin_lock_init(&chip->reg_lock); chip->card = card; chip->pci = pci; chip->irq = -1; if ((err = pci_request_regions(pci, card->driver)) < 0) { kfree(chip); pci_disable_device(pci); return err; } chip->port = pci_resource_start(pci, 0); if (request_irq(pci->irq, snd_via82xx_interrupt, IRQF_SHARED, KBUILD_MODNAME, chip)) { snd_printk(KERN_ERR "unable to grab IRQ %d\n", pci->irq); snd_via82xx_free(chip); return -EBUSY; } chip->irq = pci->irq; if (ac97_clock >= 8000 && ac97_clock <= 48000) chip->ac97_clock = ac97_clock; synchronize_irq(chip->irq); if ((err = snd_via82xx_chip_init(chip)) < 0) { snd_via82xx_free(chip); return err; } if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) { snd_via82xx_free(chip); return err; } /* The 8233 ac97 controller does not implement the master bit * in the pci command register. IMHO this is a violation of the PCI spec. * We call pci_set_master here because it does not hurt. */ pci_set_master(pci); snd_card_set_dev(card, &pci->dev); *r_via = chip; return 0; } static int __devinit snd_via82xx_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) { struct snd_card *card; struct via82xx_modem *chip; int chip_type = 0, card_type; unsigned int i; int err; err = snd_card_create(index, id, THIS_MODULE, 0, &card); if (err < 0) return err; card_type = pci_id->driver_data; switch (card_type) { case TYPE_CARD_VIA82XX_MODEM: strcpy(card->driver, "VIA82XX-MODEM"); sprintf(card->shortname, "VIA 82XX modem"); break; default: snd_printk(KERN_ERR "invalid card type %d\n", card_type); err = -EINVAL; goto __error; } if ((err = snd_via82xx_create(card, pci, chip_type, pci->revision, ac97_clock, &chip)) < 0) goto __error; card->private_data = chip; if ((err = snd_via82xx_mixer_new(chip)) < 0) goto __error; if ((err = snd_via686_pcm_new(chip)) < 0 ) goto __error; /* disable interrupts */ for (i = 0; i < chip->num_devs; i++) snd_via82xx_channel_reset(chip, &chip->devs[i]); sprintf(card->longname, "%s at 0x%lx, irq %d", card->shortname, chip->port, chip->irq); snd_via82xx_proc_init(chip); if ((err = snd_card_register(card)) < 0) { snd_card_free(card); return err; } pci_set_drvdata(pci, card); return 0; __error: snd_card_free(card); return err; } static void __devexit snd_via82xx_remove(struct pci_dev *pci) { snd_card_free(pci_get_drvdata(pci)); pci_set_drvdata(pci, NULL); } static struct pci_driver driver = { .name = KBUILD_MODNAME, .id_table = snd_via82xx_modem_ids, .probe = snd_via82xx_probe, .remove = __devexit_p(snd_via82xx_remove), #ifdef CONFIG_PM .suspend = snd_via82xx_suspend, .resume = snd_via82xx_resume, #endif }; static int __init alsa_card_via82xx_init(void) { return pci_register_driver(&driver); } static void __exit alsa_card_via82xx_exit(void) { pci_unregister_driver(&driver); } module_init(alsa_card_via82xx_init) module_exit(alsa_card_via82xx_exit)
gpl-2.0
HTCKernels/One-SV-cricket-k2plccl
sound/pci/via82xx_modem.c
4893
35335
/* * ALSA modem driver for VIA VT82xx (South Bridge) * * VT82C686A/B/C, VT8233A/C, VT8235 * * Copyright (c) 2000 Jaroslav Kysela <perex@perex.cz> * Tjeerd.Mulder <Tjeerd.Mulder@fujitsu-siemens.com> * 2002 Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ /* * Changes: * * Sep. 2, 2004 Sasha Khapyorsky <sashak@alsa-project.org> * Modified from original audio driver 'via82xx.c' to support AC97 * modems. */ #include <asm/io.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/module.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/info.h> #include <sound/ac97_codec.h> #include <sound/initval.h> #if 0 #define POINTER_DEBUG #endif MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); MODULE_DESCRIPTION("VIA VT82xx modem"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{VIA,VT82C686A/B/C modem,pci}}"); static int index = -2; /* Exclude the first card */ static char *id = SNDRV_DEFAULT_STR1; /* ID for this card */ static int ac97_clock = 48000; module_param(index, int, 0444); MODULE_PARM_DESC(index, "Index value for VIA 82xx bridge."); module_param(id, charp, 0444); MODULE_PARM_DESC(id, "ID string for VIA 82xx bridge."); module_param(ac97_clock, int, 0444); MODULE_PARM_DESC(ac97_clock, "AC'97 codec clock (default 48000Hz)."); /* just for backward compatibility */ static bool enable; module_param(enable, bool, 0444); /* * Direct registers */ #define VIAREG(via, x) ((via)->port + VIA_REG_##x) #define VIADEV_REG(viadev, x) ((viadev)->port + VIA_REG_##x) /* common offsets */ #define VIA_REG_OFFSET_STATUS 0x00 /* byte - channel status */ #define VIA_REG_STAT_ACTIVE 0x80 /* RO */ #define VIA_REG_STAT_PAUSED 0x40 /* RO */ #define VIA_REG_STAT_TRIGGER_QUEUED 0x08 /* RO */ #define VIA_REG_STAT_STOPPED 0x04 /* RWC */ #define VIA_REG_STAT_EOL 0x02 /* RWC */ #define VIA_REG_STAT_FLAG 0x01 /* RWC */ #define VIA_REG_OFFSET_CONTROL 0x01 /* byte - channel control */ #define VIA_REG_CTRL_START 0x80 /* WO */ #define VIA_REG_CTRL_TERMINATE 0x40 /* WO */ #define VIA_REG_CTRL_AUTOSTART 0x20 #define VIA_REG_CTRL_PAUSE 0x08 /* RW */ #define VIA_REG_CTRL_INT_STOP 0x04 #define VIA_REG_CTRL_INT_EOL 0x02 #define VIA_REG_CTRL_INT_FLAG 0x01 #define VIA_REG_CTRL_RESET 0x01 /* RW - probably reset? undocumented */ #define VIA_REG_CTRL_INT (VIA_REG_CTRL_INT_FLAG | VIA_REG_CTRL_INT_EOL | VIA_REG_CTRL_AUTOSTART) #define VIA_REG_OFFSET_TYPE 0x02 /* byte - channel type (686 only) */ #define VIA_REG_TYPE_AUTOSTART 0x80 /* RW - autostart at EOL */ #define VIA_REG_TYPE_16BIT 0x20 /* RW */ #define VIA_REG_TYPE_STEREO 0x10 /* RW */ #define VIA_REG_TYPE_INT_LLINE 0x00 #define VIA_REG_TYPE_INT_LSAMPLE 0x04 #define VIA_REG_TYPE_INT_LESSONE 0x08 #define VIA_REG_TYPE_INT_MASK 0x0c #define VIA_REG_TYPE_INT_EOL 0x02 #define VIA_REG_TYPE_INT_FLAG 0x01 #define VIA_REG_OFFSET_TABLE_PTR 0x04 /* dword - channel table pointer */ #define VIA_REG_OFFSET_CURR_PTR 0x04 /* dword - channel current pointer */ #define VIA_REG_OFFSET_STOP_IDX 0x08 /* dword - stop index, channel type, sample rate */ #define VIA_REG_OFFSET_CURR_COUNT 0x0c /* dword - channel current count (24 bit) */ #define VIA_REG_OFFSET_CURR_INDEX 0x0f /* byte - channel current index (for via8233 only) */ #define DEFINE_VIA_REGSET(name,val) \ enum {\ VIA_REG_##name##_STATUS = (val),\ VIA_REG_##name##_CONTROL = (val) + 0x01,\ VIA_REG_##name##_TYPE = (val) + 0x02,\ VIA_REG_##name##_TABLE_PTR = (val) + 0x04,\ VIA_REG_##name##_CURR_PTR = (val) + 0x04,\ VIA_REG_##name##_STOP_IDX = (val) + 0x08,\ VIA_REG_##name##_CURR_COUNT = (val) + 0x0c,\ } /* modem block */ DEFINE_VIA_REGSET(MO, 0x40); DEFINE_VIA_REGSET(MI, 0x50); /* AC'97 */ #define VIA_REG_AC97 0x80 /* dword */ #define VIA_REG_AC97_CODEC_ID_MASK (3<<30) #define VIA_REG_AC97_CODEC_ID_SHIFT 30 #define VIA_REG_AC97_CODEC_ID_PRIMARY 0x00 #define VIA_REG_AC97_CODEC_ID_SECONDARY 0x01 #define VIA_REG_AC97_SECONDARY_VALID (1<<27) #define VIA_REG_AC97_PRIMARY_VALID (1<<25) #define VIA_REG_AC97_BUSY (1<<24) #define VIA_REG_AC97_READ (1<<23) #define VIA_REG_AC97_CMD_SHIFT 16 #define VIA_REG_AC97_CMD_MASK 0x7e #define VIA_REG_AC97_DATA_SHIFT 0 #define VIA_REG_AC97_DATA_MASK 0xffff #define VIA_REG_SGD_SHADOW 0x84 /* dword */ #define VIA_REG_SGD_STAT_PB_FLAG (1<<0) #define VIA_REG_SGD_STAT_CP_FLAG (1<<1) #define VIA_REG_SGD_STAT_FM_FLAG (1<<2) #define VIA_REG_SGD_STAT_PB_EOL (1<<4) #define VIA_REG_SGD_STAT_CP_EOL (1<<5) #define VIA_REG_SGD_STAT_FM_EOL (1<<6) #define VIA_REG_SGD_STAT_PB_STOP (1<<8) #define VIA_REG_SGD_STAT_CP_STOP (1<<9) #define VIA_REG_SGD_STAT_FM_STOP (1<<10) #define VIA_REG_SGD_STAT_PB_ACTIVE (1<<12) #define VIA_REG_SGD_STAT_CP_ACTIVE (1<<13) #define VIA_REG_SGD_STAT_FM_ACTIVE (1<<14) #define VIA_REG_SGD_STAT_MR_FLAG (1<<16) #define VIA_REG_SGD_STAT_MW_FLAG (1<<17) #define VIA_REG_SGD_STAT_MR_EOL (1<<20) #define VIA_REG_SGD_STAT_MW_EOL (1<<21) #define VIA_REG_SGD_STAT_MR_STOP (1<<24) #define VIA_REG_SGD_STAT_MW_STOP (1<<25) #define VIA_REG_SGD_STAT_MR_ACTIVE (1<<28) #define VIA_REG_SGD_STAT_MW_ACTIVE (1<<29) #define VIA_REG_GPI_STATUS 0x88 #define VIA_REG_GPI_INTR 0x8c #define VIA_TBL_BIT_FLAG 0x40000000 #define VIA_TBL_BIT_EOL 0x80000000 /* pci space */ #define VIA_ACLINK_STAT 0x40 #define VIA_ACLINK_C11_READY 0x20 #define VIA_ACLINK_C10_READY 0x10 #define VIA_ACLINK_C01_READY 0x04 /* secondary codec ready */ #define VIA_ACLINK_LOWPOWER 0x02 /* low-power state */ #define VIA_ACLINK_C00_READY 0x01 /* primary codec ready */ #define VIA_ACLINK_CTRL 0x41 #define VIA_ACLINK_CTRL_ENABLE 0x80 /* 0: disable, 1: enable */ #define VIA_ACLINK_CTRL_RESET 0x40 /* 0: assert, 1: de-assert */ #define VIA_ACLINK_CTRL_SYNC 0x20 /* 0: release SYNC, 1: force SYNC hi */ #define VIA_ACLINK_CTRL_SDO 0x10 /* 0: release SDO, 1: force SDO hi */ #define VIA_ACLINK_CTRL_VRA 0x08 /* 0: disable VRA, 1: enable VRA */ #define VIA_ACLINK_CTRL_PCM 0x04 /* 0: disable PCM, 1: enable PCM */ #define VIA_ACLINK_CTRL_FM 0x02 /* via686 only */ #define VIA_ACLINK_CTRL_SB 0x01 /* via686 only */ #define VIA_ACLINK_CTRL_INIT (VIA_ACLINK_CTRL_ENABLE|\ VIA_ACLINK_CTRL_RESET|\ VIA_ACLINK_CTRL_PCM) #define VIA_FUNC_ENABLE 0x42 #define VIA_FUNC_MIDI_PNP 0x80 /* FIXME: it's 0x40 in the datasheet! */ #define VIA_FUNC_MIDI_IRQMASK 0x40 /* FIXME: not documented! */ #define VIA_FUNC_RX2C_WRITE 0x20 #define VIA_FUNC_SB_FIFO_EMPTY 0x10 #define VIA_FUNC_ENABLE_GAME 0x08 #define VIA_FUNC_ENABLE_FM 0x04 #define VIA_FUNC_ENABLE_MIDI 0x02 #define VIA_FUNC_ENABLE_SB 0x01 #define VIA_PNP_CONTROL 0x43 #define VIA_MC97_CTRL 0x44 #define VIA_MC97_CTRL_ENABLE 0x80 #define VIA_MC97_CTRL_SECONDARY 0x40 #define VIA_MC97_CTRL_INIT (VIA_MC97_CTRL_ENABLE|\ VIA_MC97_CTRL_SECONDARY) /* * pcm stream */ struct snd_via_sg_table { unsigned int offset; unsigned int size; } ; #define VIA_TABLE_SIZE 255 struct viadev { unsigned int reg_offset; unsigned long port; int direction; /* playback = 0, capture = 1 */ struct snd_pcm_substream *substream; int running; unsigned int tbl_entries; /* # descriptors */ struct snd_dma_buffer table; struct snd_via_sg_table *idx_table; /* for recovery from the unexpected pointer */ unsigned int lastpos; unsigned int bufsize; unsigned int bufsize2; }; enum { TYPE_CARD_VIA82XX_MODEM = 1 }; #define VIA_MAX_MODEM_DEVS 2 struct via82xx_modem { int irq; unsigned long port; unsigned int intr_mask; /* SGD_SHADOW mask to check interrupts */ struct pci_dev *pci; struct snd_card *card; unsigned int num_devs; unsigned int playback_devno, capture_devno; struct viadev devs[VIA_MAX_MODEM_DEVS]; struct snd_pcm *pcms[2]; struct snd_ac97_bus *ac97_bus; struct snd_ac97 *ac97; unsigned int ac97_clock; unsigned int ac97_secondary; /* secondary AC'97 codec is present */ spinlock_t reg_lock; struct snd_info_entry *proc_entry; }; static DEFINE_PCI_DEVICE_TABLE(snd_via82xx_modem_ids) = { { PCI_VDEVICE(VIA, 0x3068), TYPE_CARD_VIA82XX_MODEM, }, { 0, } }; MODULE_DEVICE_TABLE(pci, snd_via82xx_modem_ids); /* */ /* * allocate and initialize the descriptor buffers * periods = number of periods * fragsize = period size in bytes */ static int build_via_table(struct viadev *dev, struct snd_pcm_substream *substream, struct pci_dev *pci, unsigned int periods, unsigned int fragsize) { unsigned int i, idx, ofs, rest; struct via82xx_modem *chip = snd_pcm_substream_chip(substream); if (dev->table.area == NULL) { /* the start of each lists must be aligned to 8 bytes, * but the kernel pages are much bigger, so we don't care */ if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci), PAGE_ALIGN(VIA_TABLE_SIZE * 2 * 8), &dev->table) < 0) return -ENOMEM; } if (! dev->idx_table) { dev->idx_table = kmalloc(sizeof(*dev->idx_table) * VIA_TABLE_SIZE, GFP_KERNEL); if (! dev->idx_table) return -ENOMEM; } /* fill the entries */ idx = 0; ofs = 0; for (i = 0; i < periods; i++) { rest = fragsize; /* fill descriptors for a period. * a period can be split to several descriptors if it's * over page boundary. */ do { unsigned int r; unsigned int flag; unsigned int addr; if (idx >= VIA_TABLE_SIZE) { snd_printk(KERN_ERR "via82xx: too much table size!\n"); return -EINVAL; } addr = snd_pcm_sgbuf_get_addr(substream, ofs); ((u32 *)dev->table.area)[idx << 1] = cpu_to_le32(addr); r = PAGE_SIZE - (ofs % PAGE_SIZE); if (rest < r) r = rest; rest -= r; if (! rest) { if (i == periods - 1) flag = VIA_TBL_BIT_EOL; /* buffer boundary */ else flag = VIA_TBL_BIT_FLAG; /* period boundary */ } else flag = 0; /* period continues to the next */ /* printk(KERN_DEBUG "via: tbl %d: at %d size %d " "(rest %d)\n", idx, ofs, r, rest); */ ((u32 *)dev->table.area)[(idx<<1) + 1] = cpu_to_le32(r | flag); dev->idx_table[idx].offset = ofs; dev->idx_table[idx].size = r; ofs += r; idx++; } while (rest > 0); } dev->tbl_entries = idx; dev->bufsize = periods * fragsize; dev->bufsize2 = dev->bufsize / 2; return 0; } static int clean_via_table(struct viadev *dev, struct snd_pcm_substream *substream, struct pci_dev *pci) { if (dev->table.area) { snd_dma_free_pages(&dev->table); dev->table.area = NULL; } kfree(dev->idx_table); dev->idx_table = NULL; return 0; } /* * Basic I/O */ static inline unsigned int snd_via82xx_codec_xread(struct via82xx_modem *chip) { return inl(VIAREG(chip, AC97)); } static inline void snd_via82xx_codec_xwrite(struct via82xx_modem *chip, unsigned int val) { outl(val, VIAREG(chip, AC97)); } static int snd_via82xx_codec_ready(struct via82xx_modem *chip, int secondary) { unsigned int timeout = 1000; /* 1ms */ unsigned int val; while (timeout-- > 0) { udelay(1); if (!((val = snd_via82xx_codec_xread(chip)) & VIA_REG_AC97_BUSY)) return val & 0xffff; } snd_printk(KERN_ERR "codec_ready: codec %i is not ready [0x%x]\n", secondary, snd_via82xx_codec_xread(chip)); return -EIO; } static int snd_via82xx_codec_valid(struct via82xx_modem *chip, int secondary) { unsigned int timeout = 1000; /* 1ms */ unsigned int val, val1; unsigned int stat = !secondary ? VIA_REG_AC97_PRIMARY_VALID : VIA_REG_AC97_SECONDARY_VALID; while (timeout-- > 0) { val = snd_via82xx_codec_xread(chip); val1 = val & (VIA_REG_AC97_BUSY | stat); if (val1 == stat) return val & 0xffff; udelay(1); } return -EIO; } static void snd_via82xx_codec_wait(struct snd_ac97 *ac97) { struct via82xx_modem *chip = ac97->private_data; int err; err = snd_via82xx_codec_ready(chip, ac97->num); /* here we need to wait fairly for long time.. */ msleep(500); } static void snd_via82xx_codec_write(struct snd_ac97 *ac97, unsigned short reg, unsigned short val) { struct via82xx_modem *chip = ac97->private_data; unsigned int xval; if(reg == AC97_GPIO_STATUS) { outl(val, VIAREG(chip, GPI_STATUS)); return; } xval = !ac97->num ? VIA_REG_AC97_CODEC_ID_PRIMARY : VIA_REG_AC97_CODEC_ID_SECONDARY; xval <<= VIA_REG_AC97_CODEC_ID_SHIFT; xval |= reg << VIA_REG_AC97_CMD_SHIFT; xval |= val << VIA_REG_AC97_DATA_SHIFT; snd_via82xx_codec_xwrite(chip, xval); snd_via82xx_codec_ready(chip, ac97->num); } static unsigned short snd_via82xx_codec_read(struct snd_ac97 *ac97, unsigned short reg) { struct via82xx_modem *chip = ac97->private_data; unsigned int xval, val = 0xffff; int again = 0; xval = ac97->num << VIA_REG_AC97_CODEC_ID_SHIFT; xval |= ac97->num ? VIA_REG_AC97_SECONDARY_VALID : VIA_REG_AC97_PRIMARY_VALID; xval |= VIA_REG_AC97_READ; xval |= (reg & 0x7f) << VIA_REG_AC97_CMD_SHIFT; while (1) { if (again++ > 3) { snd_printk(KERN_ERR "codec_read: codec %i is not valid [0x%x]\n", ac97->num, snd_via82xx_codec_xread(chip)); return 0xffff; } snd_via82xx_codec_xwrite(chip, xval); udelay (20); if (snd_via82xx_codec_valid(chip, ac97->num) >= 0) { udelay(25); val = snd_via82xx_codec_xread(chip); break; } } return val & 0xffff; } static void snd_via82xx_channel_reset(struct via82xx_modem *chip, struct viadev *viadev) { outb(VIA_REG_CTRL_PAUSE | VIA_REG_CTRL_TERMINATE | VIA_REG_CTRL_RESET, VIADEV_REG(viadev, OFFSET_CONTROL)); inb(VIADEV_REG(viadev, OFFSET_CONTROL)); udelay(50); /* disable interrupts */ outb(0x00, VIADEV_REG(viadev, OFFSET_CONTROL)); /* clear interrupts */ outb(0x03, VIADEV_REG(viadev, OFFSET_STATUS)); outb(0x00, VIADEV_REG(viadev, OFFSET_TYPE)); /* for via686 */ // outl(0, VIADEV_REG(viadev, OFFSET_CURR_PTR)); viadev->lastpos = 0; } /* * Interrupt handler */ static irqreturn_t snd_via82xx_interrupt(int irq, void *dev_id) { struct via82xx_modem *chip = dev_id; unsigned int status; unsigned int i; status = inl(VIAREG(chip, SGD_SHADOW)); if (! (status & chip->intr_mask)) { return IRQ_NONE; } // _skip_sgd: /* check status for each stream */ spin_lock(&chip->reg_lock); for (i = 0; i < chip->num_devs; i++) { struct viadev *viadev = &chip->devs[i]; unsigned char c_status = inb(VIADEV_REG(viadev, OFFSET_STATUS)); c_status &= (VIA_REG_STAT_EOL|VIA_REG_STAT_FLAG|VIA_REG_STAT_STOPPED); if (! c_status) continue; if (viadev->substream && viadev->running) { spin_unlock(&chip->reg_lock); snd_pcm_period_elapsed(viadev->substream); spin_lock(&chip->reg_lock); } outb(c_status, VIADEV_REG(viadev, OFFSET_STATUS)); /* ack */ } spin_unlock(&chip->reg_lock); return IRQ_HANDLED; } /* * PCM callbacks */ /* * trigger callback */ static int snd_via82xx_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { struct via82xx_modem *chip = snd_pcm_substream_chip(substream); struct viadev *viadev = substream->runtime->private_data; unsigned char val = 0; switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_SUSPEND: val |= VIA_REG_CTRL_START; viadev->running = 1; break; case SNDRV_PCM_TRIGGER_STOP: val = VIA_REG_CTRL_TERMINATE; viadev->running = 0; break; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: val |= VIA_REG_CTRL_PAUSE; viadev->running = 0; break; case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: viadev->running = 1; break; default: return -EINVAL; } outb(val, VIADEV_REG(viadev, OFFSET_CONTROL)); if (cmd == SNDRV_PCM_TRIGGER_STOP) snd_via82xx_channel_reset(chip, viadev); return 0; } /* * pointer callbacks */ /* * calculate the linear position at the given sg-buffer index and the rest count */ #define check_invalid_pos(viadev,pos) \ ((pos) < viadev->lastpos && ((pos) >= viadev->bufsize2 ||\ viadev->lastpos < viadev->bufsize2)) static inline unsigned int calc_linear_pos(struct viadev *viadev, unsigned int idx, unsigned int count) { unsigned int size, res; size = viadev->idx_table[idx].size; res = viadev->idx_table[idx].offset + size - count; /* check the validity of the calculated position */ if (size < count) { snd_printd(KERN_ERR "invalid via82xx_cur_ptr (size = %d, count = %d)\n", (int)size, (int)count); res = viadev->lastpos; } else if (check_invalid_pos(viadev, res)) { #ifdef POINTER_DEBUG printk(KERN_DEBUG "fail: idx = %i/%i, lastpos = 0x%x, " "bufsize2 = 0x%x, offsize = 0x%x, size = 0x%x, " "count = 0x%x\n", idx, viadev->tbl_entries, viadev->lastpos, viadev->bufsize2, viadev->idx_table[idx].offset, viadev->idx_table[idx].size, count); #endif if (count && size < count) { snd_printd(KERN_ERR "invalid via82xx_cur_ptr, " "using last valid pointer\n"); res = viadev->lastpos; } else { if (! count) /* bogus count 0 on the DMA boundary? */ res = viadev->idx_table[idx].offset; else /* count register returns full size * when end of buffer is reached */ res = viadev->idx_table[idx].offset + size; if (check_invalid_pos(viadev, res)) { snd_printd(KERN_ERR "invalid via82xx_cur_ptr (2), " "using last valid pointer\n"); res = viadev->lastpos; } } } viadev->lastpos = res; /* remember the last position */ if (res >= viadev->bufsize) res -= viadev->bufsize; return res; } /* * get the current pointer on via686 */ static snd_pcm_uframes_t snd_via686_pcm_pointer(struct snd_pcm_substream *substream) { struct via82xx_modem *chip = snd_pcm_substream_chip(substream); struct viadev *viadev = substream->runtime->private_data; unsigned int idx, ptr, count, res; if (snd_BUG_ON(!viadev->tbl_entries)) return 0; if (!(inb(VIADEV_REG(viadev, OFFSET_STATUS)) & VIA_REG_STAT_ACTIVE)) return 0; spin_lock(&chip->reg_lock); count = inl(VIADEV_REG(viadev, OFFSET_CURR_COUNT)) & 0xffffff; /* The via686a does not have the current index register, * so we need to calculate the index from CURR_PTR. */ ptr = inl(VIADEV_REG(viadev, OFFSET_CURR_PTR)); if (ptr <= (unsigned int)viadev->table.addr) idx = 0; else /* CURR_PTR holds the address + 8 */ idx = ((ptr - (unsigned int)viadev->table.addr) / 8 - 1) % viadev->tbl_entries; res = calc_linear_pos(viadev, idx, count); spin_unlock(&chip->reg_lock); return bytes_to_frames(substream->runtime, res); } /* * hw_params callback: * allocate the buffer and build up the buffer description table */ static int snd_via82xx_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct via82xx_modem *chip = snd_pcm_substream_chip(substream); struct viadev *viadev = substream->runtime->private_data; int err; err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); if (err < 0) return err; err = build_via_table(viadev, substream, chip->pci, params_periods(hw_params), params_period_bytes(hw_params)); if (err < 0) return err; snd_ac97_write(chip->ac97, AC97_LINE1_RATE, params_rate(hw_params)); snd_ac97_write(chip->ac97, AC97_LINE1_LEVEL, 0); return 0; } /* * hw_free callback: * clean up the buffer description table and release the buffer */ static int snd_via82xx_hw_free(struct snd_pcm_substream *substream) { struct via82xx_modem *chip = snd_pcm_substream_chip(substream); struct viadev *viadev = substream->runtime->private_data; clean_via_table(viadev, substream, chip->pci); snd_pcm_lib_free_pages(substream); return 0; } /* * set up the table pointer */ static void snd_via82xx_set_table_ptr(struct via82xx_modem *chip, struct viadev *viadev) { snd_via82xx_codec_ready(chip, chip->ac97_secondary); outl((u32)viadev->table.addr, VIADEV_REG(viadev, OFFSET_TABLE_PTR)); udelay(20); snd_via82xx_codec_ready(chip, chip->ac97_secondary); } /* * prepare callback for playback and capture */ static int snd_via82xx_pcm_prepare(struct snd_pcm_substream *substream) { struct via82xx_modem *chip = snd_pcm_substream_chip(substream); struct viadev *viadev = substream->runtime->private_data; snd_via82xx_channel_reset(chip, viadev); /* this must be set after channel_reset */ snd_via82xx_set_table_ptr(chip, viadev); outb(VIA_REG_TYPE_AUTOSTART|VIA_REG_TYPE_INT_EOL|VIA_REG_TYPE_INT_FLAG, VIADEV_REG(viadev, OFFSET_TYPE)); return 0; } /* * pcm hardware definition, identical for both playback and capture */ static struct snd_pcm_hardware snd_via82xx_hw = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | /* SNDRV_PCM_INFO_RESUME | */ SNDRV_PCM_INFO_PAUSE), .formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_KNOT, .rate_min = 8000, .rate_max = 16000, .channels_min = 1, .channels_max = 1, .buffer_bytes_max = 128 * 1024, .period_bytes_min = 32, .period_bytes_max = 128 * 1024, .periods_min = 2, .periods_max = VIA_TABLE_SIZE / 2, .fifo_size = 0, }; /* * open callback skeleton */ static int snd_via82xx_modem_pcm_open(struct via82xx_modem *chip, struct viadev *viadev, struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; int err; static unsigned int rates[] = { 8000, 9600, 12000, 16000 }; static struct snd_pcm_hw_constraint_list hw_constraints_rates = { .count = ARRAY_SIZE(rates), .list = rates, .mask = 0, }; runtime->hw = snd_via82xx_hw; if ((err = snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &hw_constraints_rates)) < 0) return err; /* we may remove following constaint when we modify table entries in interrupt */ if ((err = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS)) < 0) return err; runtime->private_data = viadev; viadev->substream = substream; return 0; } /* * open callback for playback */ static int snd_via82xx_playback_open(struct snd_pcm_substream *substream) { struct via82xx_modem *chip = snd_pcm_substream_chip(substream); struct viadev *viadev = &chip->devs[chip->playback_devno + substream->number]; return snd_via82xx_modem_pcm_open(chip, viadev, substream); } /* * open callback for capture */ static int snd_via82xx_capture_open(struct snd_pcm_substream *substream) { struct via82xx_modem *chip = snd_pcm_substream_chip(substream); struct viadev *viadev = &chip->devs[chip->capture_devno + substream->pcm->device]; return snd_via82xx_modem_pcm_open(chip, viadev, substream); } /* * close callback */ static int snd_via82xx_pcm_close(struct snd_pcm_substream *substream) { struct viadev *viadev = substream->runtime->private_data; viadev->substream = NULL; return 0; } /* via686 playback callbacks */ static struct snd_pcm_ops snd_via686_playback_ops = { .open = snd_via82xx_playback_open, .close = snd_via82xx_pcm_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_via82xx_hw_params, .hw_free = snd_via82xx_hw_free, .prepare = snd_via82xx_pcm_prepare, .trigger = snd_via82xx_pcm_trigger, .pointer = snd_via686_pcm_pointer, .page = snd_pcm_sgbuf_ops_page, }; /* via686 capture callbacks */ static struct snd_pcm_ops snd_via686_capture_ops = { .open = snd_via82xx_capture_open, .close = snd_via82xx_pcm_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_via82xx_hw_params, .hw_free = snd_via82xx_hw_free, .prepare = snd_via82xx_pcm_prepare, .trigger = snd_via82xx_pcm_trigger, .pointer = snd_via686_pcm_pointer, .page = snd_pcm_sgbuf_ops_page, }; static void init_viadev(struct via82xx_modem *chip, int idx, unsigned int reg_offset, int direction) { chip->devs[idx].reg_offset = reg_offset; chip->devs[idx].direction = direction; chip->devs[idx].port = chip->port + reg_offset; } /* * create a pcm instance for via686a/b */ static int __devinit snd_via686_pcm_new(struct via82xx_modem *chip) { struct snd_pcm *pcm; int err; chip->playback_devno = 0; chip->capture_devno = 1; chip->num_devs = 2; chip->intr_mask = 0x330000; /* FLAGS | EOL for MR, MW */ err = snd_pcm_new(chip->card, chip->card->shortname, 0, 1, 1, &pcm); if (err < 0) return err; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_via686_playback_ops); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_via686_capture_ops); pcm->dev_class = SNDRV_PCM_CLASS_MODEM; pcm->private_data = chip; strcpy(pcm->name, chip->card->shortname); chip->pcms[0] = pcm; init_viadev(chip, 0, VIA_REG_MO_STATUS, 0); init_viadev(chip, 1, VIA_REG_MI_STATUS, 1); if ((err = snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG, snd_dma_pci_data(chip->pci), 64*1024, 128*1024)) < 0) return err; return 0; } /* * Mixer part */ static void snd_via82xx_mixer_free_ac97_bus(struct snd_ac97_bus *bus) { struct via82xx_modem *chip = bus->private_data; chip->ac97_bus = NULL; } static void snd_via82xx_mixer_free_ac97(struct snd_ac97 *ac97) { struct via82xx_modem *chip = ac97->private_data; chip->ac97 = NULL; } static int __devinit snd_via82xx_mixer_new(struct via82xx_modem *chip) { struct snd_ac97_template ac97; int err; static struct snd_ac97_bus_ops ops = { .write = snd_via82xx_codec_write, .read = snd_via82xx_codec_read, .wait = snd_via82xx_codec_wait, }; if ((err = snd_ac97_bus(chip->card, 0, &ops, chip, &chip->ac97_bus)) < 0) return err; chip->ac97_bus->private_free = snd_via82xx_mixer_free_ac97_bus; chip->ac97_bus->clock = chip->ac97_clock; memset(&ac97, 0, sizeof(ac97)); ac97.private_data = chip; ac97.private_free = snd_via82xx_mixer_free_ac97; ac97.pci = chip->pci; ac97.scaps = AC97_SCAP_SKIP_AUDIO | AC97_SCAP_POWER_SAVE; ac97.num = chip->ac97_secondary; if ((err = snd_ac97_mixer(chip->ac97_bus, &ac97, &chip->ac97)) < 0) return err; return 0; } /* * proc interface */ static void snd_via82xx_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct via82xx_modem *chip = entry->private_data; int i; snd_iprintf(buffer, "%s\n\n", chip->card->longname); for (i = 0; i < 0xa0; i += 4) { snd_iprintf(buffer, "%02x: %08x\n", i, inl(chip->port + i)); } } static void __devinit snd_via82xx_proc_init(struct via82xx_modem *chip) { struct snd_info_entry *entry; if (! snd_card_proc_new(chip->card, "via82xx", &entry)) snd_info_set_text_ops(entry, chip, snd_via82xx_proc_read); } /* * */ static int snd_via82xx_chip_init(struct via82xx_modem *chip) { unsigned int val; unsigned long end_time; unsigned char pval; pci_read_config_byte(chip->pci, VIA_MC97_CTRL, &pval); if((pval & VIA_MC97_CTRL_INIT) != VIA_MC97_CTRL_INIT) { pci_write_config_byte(chip->pci, 0x44, pval|VIA_MC97_CTRL_INIT); udelay(100); } pci_read_config_byte(chip->pci, VIA_ACLINK_STAT, &pval); if (! (pval & VIA_ACLINK_C00_READY)) { /* codec not ready? */ /* deassert ACLink reset, force SYNC */ pci_write_config_byte(chip->pci, VIA_ACLINK_CTRL, VIA_ACLINK_CTRL_ENABLE | VIA_ACLINK_CTRL_RESET | VIA_ACLINK_CTRL_SYNC); udelay(100); #if 1 /* FIXME: should we do full reset here for all chip models? */ pci_write_config_byte(chip->pci, VIA_ACLINK_CTRL, 0x00); udelay(100); #else /* deassert ACLink reset, force SYNC (warm AC'97 reset) */ pci_write_config_byte(chip->pci, VIA_ACLINK_CTRL, VIA_ACLINK_CTRL_RESET|VIA_ACLINK_CTRL_SYNC); udelay(2); #endif /* ACLink on, deassert ACLink reset, VSR, SGD data out */ pci_write_config_byte(chip->pci, VIA_ACLINK_CTRL, VIA_ACLINK_CTRL_INIT); udelay(100); } pci_read_config_byte(chip->pci, VIA_ACLINK_CTRL, &pval); if ((pval & VIA_ACLINK_CTRL_INIT) != VIA_ACLINK_CTRL_INIT) { /* ACLink on, deassert ACLink reset, VSR, SGD data out */ pci_write_config_byte(chip->pci, VIA_ACLINK_CTRL, VIA_ACLINK_CTRL_INIT); udelay(100); } /* wait until codec ready */ end_time = jiffies + msecs_to_jiffies(750); do { pci_read_config_byte(chip->pci, VIA_ACLINK_STAT, &pval); if (pval & VIA_ACLINK_C00_READY) /* primary codec ready */ break; schedule_timeout_uninterruptible(1); } while (time_before(jiffies, end_time)); if ((val = snd_via82xx_codec_xread(chip)) & VIA_REG_AC97_BUSY) snd_printk(KERN_ERR "AC'97 codec is not ready [0x%x]\n", val); snd_via82xx_codec_xwrite(chip, VIA_REG_AC97_READ | VIA_REG_AC97_SECONDARY_VALID | (VIA_REG_AC97_CODEC_ID_SECONDARY << VIA_REG_AC97_CODEC_ID_SHIFT)); end_time = jiffies + msecs_to_jiffies(750); snd_via82xx_codec_xwrite(chip, VIA_REG_AC97_READ | VIA_REG_AC97_SECONDARY_VALID | (VIA_REG_AC97_CODEC_ID_SECONDARY << VIA_REG_AC97_CODEC_ID_SHIFT)); do { if ((val = snd_via82xx_codec_xread(chip)) & VIA_REG_AC97_SECONDARY_VALID) { chip->ac97_secondary = 1; goto __ac97_ok2; } schedule_timeout_uninterruptible(1); } while (time_before(jiffies, end_time)); /* This is ok, the most of motherboards have only one codec */ __ac97_ok2: /* route FM trap to IRQ, disable FM trap */ // pci_write_config_byte(chip->pci, VIA_FM_NMI_CTRL, 0); /* disable all GPI interrupts */ outl(0, VIAREG(chip, GPI_INTR)); return 0; } #ifdef CONFIG_PM /* * power management */ static int snd_via82xx_suspend(struct pci_dev *pci, pm_message_t state) { struct snd_card *card = pci_get_drvdata(pci); struct via82xx_modem *chip = card->private_data; int i; snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); for (i = 0; i < 2; i++) snd_pcm_suspend_all(chip->pcms[i]); for (i = 0; i < chip->num_devs; i++) snd_via82xx_channel_reset(chip, &chip->devs[i]); synchronize_irq(chip->irq); snd_ac97_suspend(chip->ac97); pci_disable_device(pci); pci_save_state(pci); pci_set_power_state(pci, pci_choose_state(pci, state)); return 0; } static int snd_via82xx_resume(struct pci_dev *pci) { struct snd_card *card = pci_get_drvdata(pci); struct via82xx_modem *chip = card->private_data; int i; pci_set_power_state(pci, PCI_D0); pci_restore_state(pci); if (pci_enable_device(pci) < 0) { printk(KERN_ERR "via82xx-modem: pci_enable_device failed, " "disabling device\n"); snd_card_disconnect(card); return -EIO; } pci_set_master(pci); snd_via82xx_chip_init(chip); snd_ac97_resume(chip->ac97); for (i = 0; i < chip->num_devs; i++) snd_via82xx_channel_reset(chip, &chip->devs[i]); snd_power_change_state(card, SNDRV_CTL_POWER_D0); return 0; } #endif /* CONFIG_PM */ static int snd_via82xx_free(struct via82xx_modem *chip) { unsigned int i; if (chip->irq < 0) goto __end_hw; /* disable interrupts */ for (i = 0; i < chip->num_devs; i++) snd_via82xx_channel_reset(chip, &chip->devs[i]); __end_hw: if (chip->irq >= 0) free_irq(chip->irq, chip); pci_release_regions(chip->pci); pci_disable_device(chip->pci); kfree(chip); return 0; } static int snd_via82xx_dev_free(struct snd_device *device) { struct via82xx_modem *chip = device->device_data; return snd_via82xx_free(chip); } static int __devinit snd_via82xx_create(struct snd_card *card, struct pci_dev *pci, int chip_type, int revision, unsigned int ac97_clock, struct via82xx_modem ** r_via) { struct via82xx_modem *chip; int err; static struct snd_device_ops ops = { .dev_free = snd_via82xx_dev_free, }; if ((err = pci_enable_device(pci)) < 0) return err; if ((chip = kzalloc(sizeof(*chip), GFP_KERNEL)) == NULL) { pci_disable_device(pci); return -ENOMEM; } spin_lock_init(&chip->reg_lock); chip->card = card; chip->pci = pci; chip->irq = -1; if ((err = pci_request_regions(pci, card->driver)) < 0) { kfree(chip); pci_disable_device(pci); return err; } chip->port = pci_resource_start(pci, 0); if (request_irq(pci->irq, snd_via82xx_interrupt, IRQF_SHARED, KBUILD_MODNAME, chip)) { snd_printk(KERN_ERR "unable to grab IRQ %d\n", pci->irq); snd_via82xx_free(chip); return -EBUSY; } chip->irq = pci->irq; if (ac97_clock >= 8000 && ac97_clock <= 48000) chip->ac97_clock = ac97_clock; synchronize_irq(chip->irq); if ((err = snd_via82xx_chip_init(chip)) < 0) { snd_via82xx_free(chip); return err; } if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) { snd_via82xx_free(chip); return err; } /* The 8233 ac97 controller does not implement the master bit * in the pci command register. IMHO this is a violation of the PCI spec. * We call pci_set_master here because it does not hurt. */ pci_set_master(pci); snd_card_set_dev(card, &pci->dev); *r_via = chip; return 0; } static int __devinit snd_via82xx_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) { struct snd_card *card; struct via82xx_modem *chip; int chip_type = 0, card_type; unsigned int i; int err; err = snd_card_create(index, id, THIS_MODULE, 0, &card); if (err < 0) return err; card_type = pci_id->driver_data; switch (card_type) { case TYPE_CARD_VIA82XX_MODEM: strcpy(card->driver, "VIA82XX-MODEM"); sprintf(card->shortname, "VIA 82XX modem"); break; default: snd_printk(KERN_ERR "invalid card type %d\n", card_type); err = -EINVAL; goto __error; } if ((err = snd_via82xx_create(card, pci, chip_type, pci->revision, ac97_clock, &chip)) < 0) goto __error; card->private_data = chip; if ((err = snd_via82xx_mixer_new(chip)) < 0) goto __error; if ((err = snd_via686_pcm_new(chip)) < 0 ) goto __error; /* disable interrupts */ for (i = 0; i < chip->num_devs; i++) snd_via82xx_channel_reset(chip, &chip->devs[i]); sprintf(card->longname, "%s at 0x%lx, irq %d", card->shortname, chip->port, chip->irq); snd_via82xx_proc_init(chip); if ((err = snd_card_register(card)) < 0) { snd_card_free(card); return err; } pci_set_drvdata(pci, card); return 0; __error: snd_card_free(card); return err; } static void __devexit snd_via82xx_remove(struct pci_dev *pci) { snd_card_free(pci_get_drvdata(pci)); pci_set_drvdata(pci, NULL); } static struct pci_driver driver = { .name = KBUILD_MODNAME, .id_table = snd_via82xx_modem_ids, .probe = snd_via82xx_probe, .remove = __devexit_p(snd_via82xx_remove), #ifdef CONFIG_PM .suspend = snd_via82xx_suspend, .resume = snd_via82xx_resume, #endif }; static int __init alsa_card_via82xx_init(void) { return pci_register_driver(&driver); } static void __exit alsa_card_via82xx_exit(void) { pci_unregister_driver(&driver); } module_init(alsa_card_via82xx_init) module_exit(alsa_card_via82xx_exit)
gpl-2.0
zparallax/amplitude-kk-tw
drivers/gpu/drm/nouveau/nv04_instmem.c
5917
4440
#include "drmP.h" #include "drm.h" #include "nouveau_drv.h" #include "nouveau_ramht.h" /* returns the size of fifo context */ static int nouveau_fifo_ctx_size(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; if (dev_priv->chipset >= 0x40) return 128; else if (dev_priv->chipset >= 0x17) return 64; return 32; } int nv04_instmem_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_gpuobj *ramht = NULL; u32 offset, length; int ret; /* RAMIN always available */ dev_priv->ramin_available = true; /* Reserve space at end of VRAM for PRAMIN */ if (dev_priv->card_type >= NV_40) { u32 vs = hweight8((nv_rd32(dev, 0x001540) & 0x0000ff00) >> 8); u32 rsvd; /* estimate grctx size, the magics come from nv40_grctx.c */ if (dev_priv->chipset == 0x40) rsvd = 0x6aa0 * vs; else if (dev_priv->chipset < 0x43) rsvd = 0x4f00 * vs; else if (nv44_graph_class(dev)) rsvd = 0x4980 * vs; else rsvd = 0x4a40 * vs; rsvd += 16 * 1024; rsvd *= dev_priv->engine.fifo.channels; /* pciegart table */ if (pci_is_pcie(dev->pdev)) rsvd += 512 * 1024; /* object storage */ rsvd += 512 * 1024; dev_priv->ramin_rsvd_vram = round_up(rsvd, 4096); } else { dev_priv->ramin_rsvd_vram = 512 * 1024; } /* Setup shared RAMHT */ ret = nouveau_gpuobj_new_fake(dev, 0x10000, ~0, 4096, NVOBJ_FLAG_ZERO_ALLOC, &ramht); if (ret) return ret; ret = nouveau_ramht_new(dev, ramht, &dev_priv->ramht); nouveau_gpuobj_ref(NULL, &ramht); if (ret) return ret; /* And RAMRO */ ret = nouveau_gpuobj_new_fake(dev, 0x11200, ~0, 512, NVOBJ_FLAG_ZERO_ALLOC, &dev_priv->ramro); if (ret) return ret; /* And RAMFC */ length = dev_priv->engine.fifo.channels * nouveau_fifo_ctx_size(dev); switch (dev_priv->card_type) { case NV_40: offset = 0x20000; break; default: offset = 0x11400; break; } ret = nouveau_gpuobj_new_fake(dev, offset, ~0, length, NVOBJ_FLAG_ZERO_ALLOC, &dev_priv->ramfc); if (ret) return ret; /* Only allow space after RAMFC to be used for object allocation */ offset += length; /* It appears RAMRO (or something?) is controlled by 0x2220/0x2230 * on certain NV4x chipsets as well as RAMFC. When 0x2230 == 0 * ("new style" control) the upper 16-bits of 0x2220 points at this * other mysterious table that's clobbering important things. * * We're now pointing this at RAMIN+0x30000 to avoid RAMFC getting * smashed to pieces on us, so reserve 0x30000-0x40000 too.. */ if (dev_priv->card_type >= NV_40) { if (offset < 0x40000) offset = 0x40000; } ret = drm_mm_init(&dev_priv->ramin_heap, offset, dev_priv->ramin_rsvd_vram - offset); if (ret) { NV_ERROR(dev, "Failed to init RAMIN heap: %d\n", ret); return ret; } return 0; } void nv04_instmem_takedown(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; nouveau_ramht_ref(NULL, &dev_priv->ramht, NULL); nouveau_gpuobj_ref(NULL, &dev_priv->ramro); nouveau_gpuobj_ref(NULL, &dev_priv->ramfc); if (drm_mm_initialized(&dev_priv->ramin_heap)) drm_mm_takedown(&dev_priv->ramin_heap); } int nv04_instmem_suspend(struct drm_device *dev) { return 0; } void nv04_instmem_resume(struct drm_device *dev) { } int nv04_instmem_get(struct nouveau_gpuobj *gpuobj, struct nouveau_channel *chan, u32 size, u32 align) { struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; struct drm_mm_node *ramin = NULL; do { if (drm_mm_pre_get(&dev_priv->ramin_heap)) return -ENOMEM; spin_lock(&dev_priv->ramin_lock); ramin = drm_mm_search_free(&dev_priv->ramin_heap, size, align, 0); if (ramin == NULL) { spin_unlock(&dev_priv->ramin_lock); return -ENOMEM; } ramin = drm_mm_get_block_atomic(ramin, size, align); spin_unlock(&dev_priv->ramin_lock); } while (ramin == NULL); gpuobj->node = ramin; gpuobj->vinst = ramin->start; return 0; } void nv04_instmem_put(struct nouveau_gpuobj *gpuobj) { struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; spin_lock(&dev_priv->ramin_lock); drm_mm_put_block(gpuobj->node); gpuobj->node = NULL; spin_unlock(&dev_priv->ramin_lock); } int nv04_instmem_map(struct nouveau_gpuobj *gpuobj) { gpuobj->pinst = gpuobj->vinst; return 0; } void nv04_instmem_unmap(struct nouveau_gpuobj *gpuobj) { } void nv04_instmem_flush(struct drm_device *dev) { }
gpl-2.0
binkybear/AK-OnePone
drivers/sbus/char/bbc_envctrl.c
9501
16169
/* bbc_envctrl.c: UltraSPARC-III environment control driver. * * Copyright (C) 2001, 2008 David S. Miller (davem@davemloft.net) */ #include <linux/kthread.h> #include <linux/delay.h> #include <linux/kmod.h> #include <linux/reboot.h> #include <linux/of.h> #include <linux/slab.h> #include <linux/of_device.h> #include <asm/oplib.h> #include "bbc_i2c.h" #include "max1617.h" #undef ENVCTRL_TRACE /* WARNING: Making changes to this driver is very dangerous. * If you misprogram the sensor chips they can * cut the power on you instantly. */ /* Two temperature sensors exist in the SunBLADE-1000 enclosure. * Both are implemented using max1617 i2c devices. Each max1617 * monitors 2 temperatures, one for one of the cpu dies and the other * for the ambient temperature. * * The max1617 is capable of being programmed with power-off * temperature values, one low limit and one high limit. These * can be controlled independently for the cpu or ambient temperature. * If a limit is violated, the power is simply shut off. The frequency * with which the max1617 does temperature sampling can be controlled * as well. * * Three fans exist inside the machine, all three are controlled with * an i2c digital to analog converter. There is a fan directed at the * two processor slots, another for the rest of the enclosure, and the * third is for the power supply. The first two fans may be speed * controlled by changing the voltage fed to them. The third fan may * only be completely off or on. The third fan is meant to only be * disabled/enabled when entering/exiting the lowest power-saving * mode of the machine. * * An environmental control kernel thread periodically monitors all * temperature sensors. Based upon the samples it will adjust the * fan speeds to try and keep the system within a certain temperature * range (the goal being to make the fans as quiet as possible without * allowing the system to get too hot). * * If the temperature begins to rise/fall outside of the acceptable * operating range, a periodic warning will be sent to the kernel log. * The fans will be put on full blast to attempt to deal with this * situation. After exceeding the acceptable operating range by a * certain threshold, the kernel thread will shut down the system. * Here, the thread is attempting to shut the machine down cleanly * before the hardware based power-off event is triggered. */ /* These settings are in Celsius. We use these defaults only * if we cannot interrogate the cpu-fru SEEPROM. */ struct temp_limits { s8 high_pwroff, high_shutdown, high_warn; s8 low_warn, low_shutdown, low_pwroff; }; static struct temp_limits cpu_temp_limits[2] = { { 100, 85, 80, 5, -5, -10 }, { 100, 85, 80, 5, -5, -10 }, }; static struct temp_limits amb_temp_limits[2] = { { 65, 55, 40, 5, -5, -10 }, { 65, 55, 40, 5, -5, -10 }, }; static LIST_HEAD(all_temps); static LIST_HEAD(all_fans); #define CPU_FAN_REG 0xf0 #define SYS_FAN_REG 0xf2 #define PSUPPLY_FAN_REG 0xf4 #define FAN_SPEED_MIN 0x0c #define FAN_SPEED_MAX 0x3f #define PSUPPLY_FAN_ON 0x1f #define PSUPPLY_FAN_OFF 0x00 static void set_fan_speeds(struct bbc_fan_control *fp) { /* Put temperatures into range so we don't mis-program * the hardware. */ if (fp->cpu_fan_speed < FAN_SPEED_MIN) fp->cpu_fan_speed = FAN_SPEED_MIN; if (fp->cpu_fan_speed > FAN_SPEED_MAX) fp->cpu_fan_speed = FAN_SPEED_MAX; if (fp->system_fan_speed < FAN_SPEED_MIN) fp->system_fan_speed = FAN_SPEED_MIN; if (fp->system_fan_speed > FAN_SPEED_MAX) fp->system_fan_speed = FAN_SPEED_MAX; #ifdef ENVCTRL_TRACE printk("fan%d: Changed fan speed to cpu(%02x) sys(%02x)\n", fp->index, fp->cpu_fan_speed, fp->system_fan_speed); #endif bbc_i2c_writeb(fp->client, fp->cpu_fan_speed, CPU_FAN_REG); bbc_i2c_writeb(fp->client, fp->system_fan_speed, SYS_FAN_REG); bbc_i2c_writeb(fp->client, (fp->psupply_fan_on ? PSUPPLY_FAN_ON : PSUPPLY_FAN_OFF), PSUPPLY_FAN_REG); } static void get_current_temps(struct bbc_cpu_temperature *tp) { tp->prev_amb_temp = tp->curr_amb_temp; bbc_i2c_readb(tp->client, (unsigned char *) &tp->curr_amb_temp, MAX1617_AMB_TEMP); tp->prev_cpu_temp = tp->curr_cpu_temp; bbc_i2c_readb(tp->client, (unsigned char *) &tp->curr_cpu_temp, MAX1617_CPU_TEMP); #ifdef ENVCTRL_TRACE printk("temp%d: cpu(%d C) amb(%d C)\n", tp->index, (int) tp->curr_cpu_temp, (int) tp->curr_amb_temp); #endif } static void do_envctrl_shutdown(struct bbc_cpu_temperature *tp) { static int shutting_down = 0; char *type = "???"; s8 val = -1; if (shutting_down != 0) return; if (tp->curr_amb_temp >= amb_temp_limits[tp->index].high_shutdown || tp->curr_amb_temp < amb_temp_limits[tp->index].low_shutdown) { type = "ambient"; val = tp->curr_amb_temp; } else if (tp->curr_cpu_temp >= cpu_temp_limits[tp->index].high_shutdown || tp->curr_cpu_temp < cpu_temp_limits[tp->index].low_shutdown) { type = "CPU"; val = tp->curr_cpu_temp; } printk(KERN_CRIT "temp%d: Outside of safe %s " "operating temperature, %d C.\n", tp->index, type, val); printk(KERN_CRIT "kenvctrld: Shutting down the system now.\n"); shutting_down = 1; if (orderly_poweroff(true) < 0) printk(KERN_CRIT "envctrl: shutdown execution failed\n"); } #define WARN_INTERVAL (30 * HZ) static void analyze_ambient_temp(struct bbc_cpu_temperature *tp, unsigned long *last_warn, int tick) { int ret = 0; if (time_after(jiffies, (*last_warn + WARN_INTERVAL))) { if (tp->curr_amb_temp >= amb_temp_limits[tp->index].high_warn) { printk(KERN_WARNING "temp%d: " "Above safe ambient operating temperature, %d C.\n", tp->index, (int) tp->curr_amb_temp); ret = 1; } else if (tp->curr_amb_temp < amb_temp_limits[tp->index].low_warn) { printk(KERN_WARNING "temp%d: " "Below safe ambient operating temperature, %d C.\n", tp->index, (int) tp->curr_amb_temp); ret = 1; } if (ret) *last_warn = jiffies; } else if (tp->curr_amb_temp >= amb_temp_limits[tp->index].high_warn || tp->curr_amb_temp < amb_temp_limits[tp->index].low_warn) ret = 1; /* Now check the shutdown limits. */ if (tp->curr_amb_temp >= amb_temp_limits[tp->index].high_shutdown || tp->curr_amb_temp < amb_temp_limits[tp->index].low_shutdown) { do_envctrl_shutdown(tp); ret = 1; } if (ret) { tp->fan_todo[FAN_AMBIENT] = FAN_FULLBLAST; } else if ((tick & (8 - 1)) == 0) { s8 amb_goal_hi = amb_temp_limits[tp->index].high_warn - 10; s8 amb_goal_lo; amb_goal_lo = amb_goal_hi - 3; /* We do not try to avoid 'too cold' events. Basically we * only try to deal with over-heating and fan noise reduction. */ if (tp->avg_amb_temp < amb_goal_hi) { if (tp->avg_amb_temp >= amb_goal_lo) tp->fan_todo[FAN_AMBIENT] = FAN_SAME; else tp->fan_todo[FAN_AMBIENT] = FAN_SLOWER; } else { tp->fan_todo[FAN_AMBIENT] = FAN_FASTER; } } else { tp->fan_todo[FAN_AMBIENT] = FAN_SAME; } } static void analyze_cpu_temp(struct bbc_cpu_temperature *tp, unsigned long *last_warn, int tick) { int ret = 0; if (time_after(jiffies, (*last_warn + WARN_INTERVAL))) { if (tp->curr_cpu_temp >= cpu_temp_limits[tp->index].high_warn) { printk(KERN_WARNING "temp%d: " "Above safe CPU operating temperature, %d C.\n", tp->index, (int) tp->curr_cpu_temp); ret = 1; } else if (tp->curr_cpu_temp < cpu_temp_limits[tp->index].low_warn) { printk(KERN_WARNING "temp%d: " "Below safe CPU operating temperature, %d C.\n", tp->index, (int) tp->curr_cpu_temp); ret = 1; } if (ret) *last_warn = jiffies; } else if (tp->curr_cpu_temp >= cpu_temp_limits[tp->index].high_warn || tp->curr_cpu_temp < cpu_temp_limits[tp->index].low_warn) ret = 1; /* Now check the shutdown limits. */ if (tp->curr_cpu_temp >= cpu_temp_limits[tp->index].high_shutdown || tp->curr_cpu_temp < cpu_temp_limits[tp->index].low_shutdown) { do_envctrl_shutdown(tp); ret = 1; } if (ret) { tp->fan_todo[FAN_CPU] = FAN_FULLBLAST; } else if ((tick & (8 - 1)) == 0) { s8 cpu_goal_hi = cpu_temp_limits[tp->index].high_warn - 10; s8 cpu_goal_lo; cpu_goal_lo = cpu_goal_hi - 3; /* We do not try to avoid 'too cold' events. Basically we * only try to deal with over-heating and fan noise reduction. */ if (tp->avg_cpu_temp < cpu_goal_hi) { if (tp->avg_cpu_temp >= cpu_goal_lo) tp->fan_todo[FAN_CPU] = FAN_SAME; else tp->fan_todo[FAN_CPU] = FAN_SLOWER; } else { tp->fan_todo[FAN_CPU] = FAN_FASTER; } } else { tp->fan_todo[FAN_CPU] = FAN_SAME; } } static void analyze_temps(struct bbc_cpu_temperature *tp, unsigned long *last_warn) { tp->avg_amb_temp = (s8)((int)((int)tp->avg_amb_temp + (int)tp->curr_amb_temp) / 2); tp->avg_cpu_temp = (s8)((int)((int)tp->avg_cpu_temp + (int)tp->curr_cpu_temp) / 2); analyze_ambient_temp(tp, last_warn, tp->sample_tick); analyze_cpu_temp(tp, last_warn, tp->sample_tick); tp->sample_tick++; } static enum fan_action prioritize_fan_action(int which_fan) { struct bbc_cpu_temperature *tp; enum fan_action decision = FAN_STATE_MAX; /* Basically, prioritize what the temperature sensors * recommend we do, and perform that action on all the * fans. */ list_for_each_entry(tp, &all_temps, glob_list) { if (tp->fan_todo[which_fan] == FAN_FULLBLAST) { decision = FAN_FULLBLAST; break; } if (tp->fan_todo[which_fan] == FAN_SAME && decision != FAN_FASTER) decision = FAN_SAME; else if (tp->fan_todo[which_fan] == FAN_FASTER) decision = FAN_FASTER; else if (decision != FAN_FASTER && decision != FAN_SAME && tp->fan_todo[which_fan] == FAN_SLOWER) decision = FAN_SLOWER; } if (decision == FAN_STATE_MAX) decision = FAN_SAME; return decision; } static int maybe_new_ambient_fan_speed(struct bbc_fan_control *fp) { enum fan_action decision = prioritize_fan_action(FAN_AMBIENT); int ret; if (decision == FAN_SAME) return 0; ret = 1; if (decision == FAN_FULLBLAST) { if (fp->system_fan_speed >= FAN_SPEED_MAX) ret = 0; else fp->system_fan_speed = FAN_SPEED_MAX; } else { if (decision == FAN_FASTER) { if (fp->system_fan_speed >= FAN_SPEED_MAX) ret = 0; else fp->system_fan_speed += 2; } else { int orig_speed = fp->system_fan_speed; if (orig_speed <= FAN_SPEED_MIN || orig_speed <= (fp->cpu_fan_speed - 3)) ret = 0; else fp->system_fan_speed -= 1; } } return ret; } static int maybe_new_cpu_fan_speed(struct bbc_fan_control *fp) { enum fan_action decision = prioritize_fan_action(FAN_CPU); int ret; if (decision == FAN_SAME) return 0; ret = 1; if (decision == FAN_FULLBLAST) { if (fp->cpu_fan_speed >= FAN_SPEED_MAX) ret = 0; else fp->cpu_fan_speed = FAN_SPEED_MAX; } else { if (decision == FAN_FASTER) { if (fp->cpu_fan_speed >= FAN_SPEED_MAX) ret = 0; else { fp->cpu_fan_speed += 2; if (fp->system_fan_speed < (fp->cpu_fan_speed - 3)) fp->system_fan_speed = fp->cpu_fan_speed - 3; } } else { if (fp->cpu_fan_speed <= FAN_SPEED_MIN) ret = 0; else fp->cpu_fan_speed -= 1; } } return ret; } static void maybe_new_fan_speeds(struct bbc_fan_control *fp) { int new; new = maybe_new_ambient_fan_speed(fp); new |= maybe_new_cpu_fan_speed(fp); if (new) set_fan_speeds(fp); } static void fans_full_blast(void) { struct bbc_fan_control *fp; /* Since we will not be monitoring things anymore, put * the fans on full blast. */ list_for_each_entry(fp, &all_fans, glob_list) { fp->cpu_fan_speed = FAN_SPEED_MAX; fp->system_fan_speed = FAN_SPEED_MAX; fp->psupply_fan_on = 1; set_fan_speeds(fp); } } #define POLL_INTERVAL (5 * 1000) static unsigned long last_warning_jiffies; static struct task_struct *kenvctrld_task; static int kenvctrld(void *__unused) { printk(KERN_INFO "bbc_envctrl: kenvctrld starting...\n"); last_warning_jiffies = jiffies - WARN_INTERVAL; for (;;) { struct bbc_cpu_temperature *tp; struct bbc_fan_control *fp; msleep_interruptible(POLL_INTERVAL); if (kthread_should_stop()) break; list_for_each_entry(tp, &all_temps, glob_list) { get_current_temps(tp); analyze_temps(tp, &last_warning_jiffies); } list_for_each_entry(fp, &all_fans, glob_list) maybe_new_fan_speeds(fp); } printk(KERN_INFO "bbc_envctrl: kenvctrld exiting...\n"); fans_full_blast(); return 0; } static void attach_one_temp(struct bbc_i2c_bus *bp, struct platform_device *op, int temp_idx) { struct bbc_cpu_temperature *tp; tp = kzalloc(sizeof(*tp), GFP_KERNEL); if (!tp) return; tp->client = bbc_i2c_attach(bp, op); if (!tp->client) { kfree(tp); return; } tp->index = temp_idx; list_add(&tp->glob_list, &all_temps); list_add(&tp->bp_list, &bp->temps); /* Tell it to convert once every 5 seconds, clear all cfg * bits. */ bbc_i2c_writeb(tp->client, 0x00, MAX1617_WR_CFG_BYTE); bbc_i2c_writeb(tp->client, 0x02, MAX1617_WR_CVRATE_BYTE); /* Program the hard temperature limits into the chip. */ bbc_i2c_writeb(tp->client, amb_temp_limits[tp->index].high_pwroff, MAX1617_WR_AMB_HIGHLIM); bbc_i2c_writeb(tp->client, amb_temp_limits[tp->index].low_pwroff, MAX1617_WR_AMB_LOWLIM); bbc_i2c_writeb(tp->client, cpu_temp_limits[tp->index].high_pwroff, MAX1617_WR_CPU_HIGHLIM); bbc_i2c_writeb(tp->client, cpu_temp_limits[tp->index].low_pwroff, MAX1617_WR_CPU_LOWLIM); get_current_temps(tp); tp->prev_cpu_temp = tp->avg_cpu_temp = tp->curr_cpu_temp; tp->prev_amb_temp = tp->avg_amb_temp = tp->curr_amb_temp; tp->fan_todo[FAN_AMBIENT] = FAN_SAME; tp->fan_todo[FAN_CPU] = FAN_SAME; } static void attach_one_fan(struct bbc_i2c_bus *bp, struct platform_device *op, int fan_idx) { struct bbc_fan_control *fp; fp = kzalloc(sizeof(*fp), GFP_KERNEL); if (!fp) return; fp->client = bbc_i2c_attach(bp, op); if (!fp->client) { kfree(fp); return; } fp->index = fan_idx; list_add(&fp->glob_list, &all_fans); list_add(&fp->bp_list, &bp->fans); /* The i2c device controlling the fans is write-only. * So the only way to keep track of the current power * level fed to the fans is via software. Choose half * power for cpu/system and 'on' fo the powersupply fan * and set it now. */ fp->psupply_fan_on = 1; fp->cpu_fan_speed = (FAN_SPEED_MAX - FAN_SPEED_MIN) / 2; fp->cpu_fan_speed += FAN_SPEED_MIN; fp->system_fan_speed = (FAN_SPEED_MAX - FAN_SPEED_MIN) / 2; fp->system_fan_speed += FAN_SPEED_MIN; set_fan_speeds(fp); } static void destroy_one_temp(struct bbc_cpu_temperature *tp) { bbc_i2c_detach(tp->client); kfree(tp); } static void destroy_all_temps(struct bbc_i2c_bus *bp) { struct bbc_cpu_temperature *tp, *tpos; list_for_each_entry_safe(tp, tpos, &bp->temps, bp_list) { list_del(&tp->bp_list); list_del(&tp->glob_list); destroy_one_temp(tp); } } static void destroy_one_fan(struct bbc_fan_control *fp) { bbc_i2c_detach(fp->client); kfree(fp); } static void destroy_all_fans(struct bbc_i2c_bus *bp) { struct bbc_fan_control *fp, *fpos; list_for_each_entry_safe(fp, fpos, &bp->fans, bp_list) { list_del(&fp->bp_list); list_del(&fp->glob_list); destroy_one_fan(fp); } } int bbc_envctrl_init(struct bbc_i2c_bus *bp) { struct platform_device *op; int temp_index = 0; int fan_index = 0; int devidx = 0; while ((op = bbc_i2c_getdev(bp, devidx++)) != NULL) { if (!strcmp(op->dev.of_node->name, "temperature")) attach_one_temp(bp, op, temp_index++); if (!strcmp(op->dev.of_node->name, "fan-control")) attach_one_fan(bp, op, fan_index++); } if (temp_index != 0 && fan_index != 0) { kenvctrld_task = kthread_run(kenvctrld, NULL, "kenvctrld"); if (IS_ERR(kenvctrld_task)) { int err = PTR_ERR(kenvctrld_task); kenvctrld_task = NULL; destroy_all_temps(bp); destroy_all_fans(bp); return err; } } return 0; } void bbc_envctrl_cleanup(struct bbc_i2c_bus *bp) { if (kenvctrld_task) kthread_stop(kenvctrld_task); destroy_all_temps(bp); destroy_all_fans(bp); }
gpl-2.0
PyYoshi/android_kernel_asus_a500kl
arch/arm/mach-exynos/setup-fimc.c
9757
1056
/* * Copyright (C) 2011 Samsung Electronics Co., Ltd. * * Exynos4 camera interface GPIO configuration. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/gpio.h> #include <plat/gpio-cfg.h> #include <plat/camport.h> int exynos4_fimc_setup_gpio(enum s5p_camport_id id) { u32 gpio8, gpio5; u32 sfn; int ret; switch (id) { case S5P_CAMPORT_A: gpio8 = EXYNOS4_GPJ0(0); /* PCLK, VSYNC, HREF, DATA[0:4] */ gpio5 = EXYNOS4_GPJ1(0); /* DATA[5:7], CLKOUT, FIELD */ sfn = S3C_GPIO_SFN(2); break; case S5P_CAMPORT_B: gpio8 = EXYNOS4_GPE0(0); /* DATA[0:7] */ gpio5 = EXYNOS4_GPE1(0); /* PCLK, VSYNC, HREF, CLKOUT, FIELD */ sfn = S3C_GPIO_SFN(3); break; default: WARN(1, "Wrong camport id: %d\n", id); return -EINVAL; } ret = s3c_gpio_cfgall_range(gpio8, 8, sfn, S3C_GPIO_PULL_UP); if (ret) return ret; return s3c_gpio_cfgall_range(gpio5, 5, sfn, S3C_GPIO_PULL_UP); }
gpl-2.0
aapav01/android_kernel_samsung_j7elte
fs/ocfs2/dlm/dlmconvert.c
10269
15658
/* -*- mode: c; c-basic-offset: 8; -*- * vim: noexpandtab sw=8 ts=8 sts=0: * * dlmconvert.c * * underlying calls for lock conversion * * Copyright (C) 2004 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. * */ #include <linux/module.h> #include <linux/fs.h> #include <linux/types.h> #include <linux/highmem.h> #include <linux/init.h> #include <linux/sysctl.h> #include <linux/random.h> #include <linux/blkdev.h> #include <linux/socket.h> #include <linux/inet.h> #include <linux/spinlock.h> #include "cluster/heartbeat.h" #include "cluster/nodemanager.h" #include "cluster/tcp.h" #include "dlmapi.h" #include "dlmcommon.h" #include "dlmconvert.h" #define MLOG_MASK_PREFIX ML_DLM #include "cluster/masklog.h" /* NOTE: __dlmconvert_master is the only function in here that * needs a spinlock held on entry (res->spinlock) and it is the * only one that holds a lock on exit (res->spinlock). * All other functions in here need no locks and drop all of * the locks that they acquire. */ static enum dlm_status __dlmconvert_master(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, int flags, int type, int *call_ast, int *kick_thread); static enum dlm_status dlm_send_remote_convert_request(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, int flags, int type); /* * this is only called directly by dlmlock(), and only when the * local node is the owner of the lockres * locking: * caller needs: none * taken: takes and drops res->spinlock * held on exit: none * returns: see __dlmconvert_master */ enum dlm_status dlmconvert_master(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, int flags, int type) { int call_ast = 0, kick_thread = 0; enum dlm_status status; spin_lock(&res->spinlock); /* we are not in a network handler, this is fine */ __dlm_wait_on_lockres(res); __dlm_lockres_reserve_ast(res); res->state |= DLM_LOCK_RES_IN_PROGRESS; status = __dlmconvert_master(dlm, res, lock, flags, type, &call_ast, &kick_thread); res->state &= ~DLM_LOCK_RES_IN_PROGRESS; spin_unlock(&res->spinlock); wake_up(&res->wq); if (status != DLM_NORMAL && status != DLM_NOTQUEUED) dlm_error(status); /* either queue the ast or release it */ if (call_ast) dlm_queue_ast(dlm, lock); else dlm_lockres_release_ast(dlm, res); if (kick_thread) dlm_kick_thread(dlm, res); return status; } /* performs lock conversion at the lockres master site * locking: * caller needs: res->spinlock * taken: takes and drops lock->spinlock * held on exit: res->spinlock * returns: DLM_NORMAL, DLM_NOTQUEUED, DLM_DENIED * call_ast: whether ast should be called for this lock * kick_thread: whether dlm_kick_thread should be called */ static enum dlm_status __dlmconvert_master(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, int flags, int type, int *call_ast, int *kick_thread) { enum dlm_status status = DLM_NORMAL; struct list_head *iter; struct dlm_lock *tmplock=NULL; assert_spin_locked(&res->spinlock); mlog(0, "type=%d, convert_type=%d, new convert_type=%d\n", lock->ml.type, lock->ml.convert_type, type); spin_lock(&lock->spinlock); /* already converting? */ if (lock->ml.convert_type != LKM_IVMODE) { mlog(ML_ERROR, "attempted to convert a lock with a lock " "conversion pending\n"); status = DLM_DENIED; goto unlock_exit; } /* must be on grant queue to convert */ if (!dlm_lock_on_list(&res->granted, lock)) { mlog(ML_ERROR, "attempted to convert a lock not on grant " "queue\n"); status = DLM_DENIED; goto unlock_exit; } if (flags & LKM_VALBLK) { switch (lock->ml.type) { case LKM_EXMODE: /* EX + LKM_VALBLK + convert == set lvb */ mlog(0, "will set lvb: converting %s->%s\n", dlm_lock_mode_name(lock->ml.type), dlm_lock_mode_name(type)); lock->lksb->flags |= DLM_LKSB_PUT_LVB; break; case LKM_PRMODE: case LKM_NLMODE: /* refetch if new level is not NL */ if (type > LKM_NLMODE) { mlog(0, "will fetch new value into " "lvb: converting %s->%s\n", dlm_lock_mode_name(lock->ml.type), dlm_lock_mode_name(type)); lock->lksb->flags |= DLM_LKSB_GET_LVB; } else { mlog(0, "will NOT fetch new value " "into lvb: converting %s->%s\n", dlm_lock_mode_name(lock->ml.type), dlm_lock_mode_name(type)); flags &= ~(LKM_VALBLK); } break; } } /* in-place downconvert? */ if (type <= lock->ml.type) goto grant; /* upconvert from here on */ status = DLM_NORMAL; list_for_each(iter, &res->granted) { tmplock = list_entry(iter, struct dlm_lock, list); if (tmplock == lock) continue; if (!dlm_lock_compatible(tmplock->ml.type, type)) goto switch_queues; } list_for_each(iter, &res->converting) { tmplock = list_entry(iter, struct dlm_lock, list); if (!dlm_lock_compatible(tmplock->ml.type, type)) goto switch_queues; /* existing conversion requests take precedence */ if (!dlm_lock_compatible(tmplock->ml.convert_type, type)) goto switch_queues; } /* fall thru to grant */ grant: mlog(0, "res %.*s, granting %s lock\n", res->lockname.len, res->lockname.name, dlm_lock_mode_name(type)); /* immediately grant the new lock type */ lock->lksb->status = DLM_NORMAL; if (lock->ml.node == dlm->node_num) mlog(0, "doing in-place convert for nonlocal lock\n"); lock->ml.type = type; if (lock->lksb->flags & DLM_LKSB_PUT_LVB) memcpy(res->lvb, lock->lksb->lvb, DLM_LVB_LEN); status = DLM_NORMAL; *call_ast = 1; goto unlock_exit; switch_queues: if (flags & LKM_NOQUEUE) { mlog(0, "failed to convert NOQUEUE lock %.*s from " "%d to %d...\n", res->lockname.len, res->lockname.name, lock->ml.type, type); status = DLM_NOTQUEUED; goto unlock_exit; } mlog(0, "res %.*s, queueing...\n", res->lockname.len, res->lockname.name); lock->ml.convert_type = type; /* do not alter lock refcount. switching lists. */ list_move_tail(&lock->list, &res->converting); unlock_exit: spin_unlock(&lock->spinlock); if (status == DLM_DENIED) { __dlm_print_one_lock_resource(res); } if (status == DLM_NORMAL) *kick_thread = 1; return status; } void dlm_revert_pending_convert(struct dlm_lock_resource *res, struct dlm_lock *lock) { /* do not alter lock refcount. switching lists. */ list_move_tail(&lock->list, &res->granted); lock->ml.convert_type = LKM_IVMODE; lock->lksb->flags &= ~(DLM_LKSB_GET_LVB|DLM_LKSB_PUT_LVB); } /* messages the master site to do lock conversion * locking: * caller needs: none * taken: takes and drops res->spinlock, uses DLM_LOCK_RES_IN_PROGRESS * held on exit: none * returns: DLM_NORMAL, DLM_RECOVERING, status from remote node */ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, int flags, int type) { enum dlm_status status; mlog(0, "type=%d, convert_type=%d, busy=%d\n", lock->ml.type, lock->ml.convert_type, res->state & DLM_LOCK_RES_IN_PROGRESS); spin_lock(&res->spinlock); if (res->state & DLM_LOCK_RES_RECOVERING) { mlog(0, "bailing out early since res is RECOVERING " "on secondary queue\n"); /* __dlm_print_one_lock_resource(res); */ status = DLM_RECOVERING; goto bail; } /* will exit this call with spinlock held */ __dlm_wait_on_lockres(res); if (lock->ml.convert_type != LKM_IVMODE) { __dlm_print_one_lock_resource(res); mlog(ML_ERROR, "converting a remote lock that is already " "converting! (cookie=%u:%llu, conv=%d)\n", dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), lock->ml.convert_type); status = DLM_DENIED; goto bail; } res->state |= DLM_LOCK_RES_IN_PROGRESS; /* move lock to local convert queue */ /* do not alter lock refcount. switching lists. */ list_move_tail(&lock->list, &res->converting); lock->convert_pending = 1; lock->ml.convert_type = type; if (flags & LKM_VALBLK) { if (lock->ml.type == LKM_EXMODE) { flags |= LKM_PUT_LVB; lock->lksb->flags |= DLM_LKSB_PUT_LVB; } else { if (lock->ml.convert_type == LKM_NLMODE) flags &= ~LKM_VALBLK; else { flags |= LKM_GET_LVB; lock->lksb->flags |= DLM_LKSB_GET_LVB; } } } spin_unlock(&res->spinlock); /* no locks held here. * need to wait for a reply as to whether it got queued or not. */ status = dlm_send_remote_convert_request(dlm, res, lock, flags, type); spin_lock(&res->spinlock); res->state &= ~DLM_LOCK_RES_IN_PROGRESS; lock->convert_pending = 0; /* if it failed, move it back to granted queue */ if (status != DLM_NORMAL) { if (status != DLM_NOTQUEUED) dlm_error(status); dlm_revert_pending_convert(res, lock); } bail: spin_unlock(&res->spinlock); /* TODO: should this be a wake_one? */ /* wake up any IN_PROGRESS waiters */ wake_up(&res->wq); return status; } /* sends DLM_CONVERT_LOCK_MSG to master site * locking: * caller needs: none * taken: none * held on exit: none * returns: DLM_NOLOCKMGR, status from remote node */ static enum dlm_status dlm_send_remote_convert_request(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, int flags, int type) { struct dlm_convert_lock convert; int tmpret; enum dlm_status ret; int status = 0; struct kvec vec[2]; size_t veclen = 1; mlog(0, "%.*s\n", res->lockname.len, res->lockname.name); memset(&convert, 0, sizeof(struct dlm_convert_lock)); convert.node_idx = dlm->node_num; convert.requested_type = type; convert.cookie = lock->ml.cookie; convert.namelen = res->lockname.len; convert.flags = cpu_to_be32(flags); memcpy(convert.name, res->lockname.name, convert.namelen); vec[0].iov_len = sizeof(struct dlm_convert_lock); vec[0].iov_base = &convert; if (flags & LKM_PUT_LVB) { /* extra data to send if we are updating lvb */ vec[1].iov_len = DLM_LVB_LEN; vec[1].iov_base = lock->lksb->lvb; veclen++; } tmpret = o2net_send_message_vec(DLM_CONVERT_LOCK_MSG, dlm->key, vec, veclen, res->owner, &status); if (tmpret >= 0) { // successfully sent and received ret = status; // this is already a dlm_status if (ret == DLM_RECOVERING) { mlog(0, "node %u returned DLM_RECOVERING from convert " "message!\n", res->owner); } else if (ret == DLM_MIGRATING) { mlog(0, "node %u returned DLM_MIGRATING from convert " "message!\n", res->owner); } else if (ret == DLM_FORWARD) { mlog(0, "node %u returned DLM_FORWARD from convert " "message!\n", res->owner); } else if (ret != DLM_NORMAL && ret != DLM_NOTQUEUED) dlm_error(ret); } else { mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to " "node %u\n", tmpret, DLM_CONVERT_LOCK_MSG, dlm->key, res->owner); if (dlm_is_host_down(tmpret)) { /* instead of logging the same network error over * and over, sleep here and wait for the heartbeat * to notice the node is dead. times out after 5s. */ dlm_wait_for_node_death(dlm, res->owner, DLM_NODE_DEATH_WAIT_MAX); ret = DLM_RECOVERING; mlog(0, "node %u died so returning DLM_RECOVERING " "from convert message!\n", res->owner); } else { ret = dlm_err_to_dlm_status(tmpret); } } return ret; } /* handler for DLM_CONVERT_LOCK_MSG on master site * locking: * caller needs: none * taken: takes and drop res->spinlock * held on exit: none * returns: DLM_NORMAL, DLM_IVLOCKID, DLM_BADARGS, * status from __dlmconvert_master */ int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data, void **ret_data) { struct dlm_ctxt *dlm = data; struct dlm_convert_lock *cnv = (struct dlm_convert_lock *)msg->buf; struct dlm_lock_resource *res = NULL; struct list_head *iter; struct dlm_lock *lock = NULL; struct dlm_lockstatus *lksb; enum dlm_status status = DLM_NORMAL; u32 flags; int call_ast = 0, kick_thread = 0, ast_reserved = 0, wake = 0; if (!dlm_grab(dlm)) { dlm_error(DLM_REJECTED); return DLM_REJECTED; } mlog_bug_on_msg(!dlm_domain_fully_joined(dlm), "Domain %s not fully joined!\n", dlm->name); if (cnv->namelen > DLM_LOCKID_NAME_MAX) { status = DLM_IVBUFLEN; dlm_error(status); goto leave; } flags = be32_to_cpu(cnv->flags); if ((flags & (LKM_PUT_LVB|LKM_GET_LVB)) == (LKM_PUT_LVB|LKM_GET_LVB)) { mlog(ML_ERROR, "both PUT and GET lvb specified\n"); status = DLM_BADARGS; goto leave; } mlog(0, "lvb: %s\n", flags & LKM_PUT_LVB ? "put lvb" : (flags & LKM_GET_LVB ? "get lvb" : "none")); status = DLM_IVLOCKID; res = dlm_lookup_lockres(dlm, cnv->name, cnv->namelen); if (!res) { dlm_error(status); goto leave; } spin_lock(&res->spinlock); status = __dlm_lockres_state_to_status(res); if (status != DLM_NORMAL) { spin_unlock(&res->spinlock); dlm_error(status); goto leave; } list_for_each(iter, &res->granted) { lock = list_entry(iter, struct dlm_lock, list); if (lock->ml.cookie == cnv->cookie && lock->ml.node == cnv->node_idx) { dlm_lock_get(lock); break; } lock = NULL; } spin_unlock(&res->spinlock); if (!lock) { status = DLM_IVLOCKID; mlog(ML_ERROR, "did not find lock to convert on grant queue! " "cookie=%u:%llu\n", dlm_get_lock_cookie_node(be64_to_cpu(cnv->cookie)), dlm_get_lock_cookie_seq(be64_to_cpu(cnv->cookie))); dlm_print_one_lock_resource(res); goto leave; } /* found the lock */ lksb = lock->lksb; /* see if caller needed to get/put lvb */ if (flags & LKM_PUT_LVB) { BUG_ON(lksb->flags & (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB)); lksb->flags |= DLM_LKSB_PUT_LVB; memcpy(&lksb->lvb[0], &cnv->lvb[0], DLM_LVB_LEN); } else if (flags & LKM_GET_LVB) { BUG_ON(lksb->flags & (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB)); lksb->flags |= DLM_LKSB_GET_LVB; } spin_lock(&res->spinlock); status = __dlm_lockres_state_to_status(res); if (status == DLM_NORMAL) { __dlm_lockres_reserve_ast(res); ast_reserved = 1; res->state |= DLM_LOCK_RES_IN_PROGRESS; status = __dlmconvert_master(dlm, res, lock, flags, cnv->requested_type, &call_ast, &kick_thread); res->state &= ~DLM_LOCK_RES_IN_PROGRESS; wake = 1; } spin_unlock(&res->spinlock); if (wake) wake_up(&res->wq); if (status != DLM_NORMAL) { if (status != DLM_NOTQUEUED) dlm_error(status); lksb->flags &= ~(DLM_LKSB_GET_LVB|DLM_LKSB_PUT_LVB); } leave: if (lock) dlm_lock_put(lock); /* either queue the ast or release it, if reserved */ if (call_ast) dlm_queue_ast(dlm, lock); else if (ast_reserved) dlm_lockres_release_ast(dlm, res); if (kick_thread) dlm_kick_thread(dlm, res); if (res) dlm_lockres_put(res); dlm_put(dlm); return status; }
gpl-2.0
PerthCharles/tcpcomment
linux-3.10/fs/ocfs2/dlm/dlmconvert.c
10269
15658
/* -*- mode: c; c-basic-offset: 8; -*- * vim: noexpandtab sw=8 ts=8 sts=0: * * dlmconvert.c * * underlying calls for lock conversion * * Copyright (C) 2004 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. * */ #include <linux/module.h> #include <linux/fs.h> #include <linux/types.h> #include <linux/highmem.h> #include <linux/init.h> #include <linux/sysctl.h> #include <linux/random.h> #include <linux/blkdev.h> #include <linux/socket.h> #include <linux/inet.h> #include <linux/spinlock.h> #include "cluster/heartbeat.h" #include "cluster/nodemanager.h" #include "cluster/tcp.h" #include "dlmapi.h" #include "dlmcommon.h" #include "dlmconvert.h" #define MLOG_MASK_PREFIX ML_DLM #include "cluster/masklog.h" /* NOTE: __dlmconvert_master is the only function in here that * needs a spinlock held on entry (res->spinlock) and it is the * only one that holds a lock on exit (res->spinlock). * All other functions in here need no locks and drop all of * the locks that they acquire. */ static enum dlm_status __dlmconvert_master(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, int flags, int type, int *call_ast, int *kick_thread); static enum dlm_status dlm_send_remote_convert_request(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, int flags, int type); /* * this is only called directly by dlmlock(), and only when the * local node is the owner of the lockres * locking: * caller needs: none * taken: takes and drops res->spinlock * held on exit: none * returns: see __dlmconvert_master */ enum dlm_status dlmconvert_master(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, int flags, int type) { int call_ast = 0, kick_thread = 0; enum dlm_status status; spin_lock(&res->spinlock); /* we are not in a network handler, this is fine */ __dlm_wait_on_lockres(res); __dlm_lockres_reserve_ast(res); res->state |= DLM_LOCK_RES_IN_PROGRESS; status = __dlmconvert_master(dlm, res, lock, flags, type, &call_ast, &kick_thread); res->state &= ~DLM_LOCK_RES_IN_PROGRESS; spin_unlock(&res->spinlock); wake_up(&res->wq); if (status != DLM_NORMAL && status != DLM_NOTQUEUED) dlm_error(status); /* either queue the ast or release it */ if (call_ast) dlm_queue_ast(dlm, lock); else dlm_lockres_release_ast(dlm, res); if (kick_thread) dlm_kick_thread(dlm, res); return status; } /* performs lock conversion at the lockres master site * locking: * caller needs: res->spinlock * taken: takes and drops lock->spinlock * held on exit: res->spinlock * returns: DLM_NORMAL, DLM_NOTQUEUED, DLM_DENIED * call_ast: whether ast should be called for this lock * kick_thread: whether dlm_kick_thread should be called */ static enum dlm_status __dlmconvert_master(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, int flags, int type, int *call_ast, int *kick_thread) { enum dlm_status status = DLM_NORMAL; struct list_head *iter; struct dlm_lock *tmplock=NULL; assert_spin_locked(&res->spinlock); mlog(0, "type=%d, convert_type=%d, new convert_type=%d\n", lock->ml.type, lock->ml.convert_type, type); spin_lock(&lock->spinlock); /* already converting? */ if (lock->ml.convert_type != LKM_IVMODE) { mlog(ML_ERROR, "attempted to convert a lock with a lock " "conversion pending\n"); status = DLM_DENIED; goto unlock_exit; } /* must be on grant queue to convert */ if (!dlm_lock_on_list(&res->granted, lock)) { mlog(ML_ERROR, "attempted to convert a lock not on grant " "queue\n"); status = DLM_DENIED; goto unlock_exit; } if (flags & LKM_VALBLK) { switch (lock->ml.type) { case LKM_EXMODE: /* EX + LKM_VALBLK + convert == set lvb */ mlog(0, "will set lvb: converting %s->%s\n", dlm_lock_mode_name(lock->ml.type), dlm_lock_mode_name(type)); lock->lksb->flags |= DLM_LKSB_PUT_LVB; break; case LKM_PRMODE: case LKM_NLMODE: /* refetch if new level is not NL */ if (type > LKM_NLMODE) { mlog(0, "will fetch new value into " "lvb: converting %s->%s\n", dlm_lock_mode_name(lock->ml.type), dlm_lock_mode_name(type)); lock->lksb->flags |= DLM_LKSB_GET_LVB; } else { mlog(0, "will NOT fetch new value " "into lvb: converting %s->%s\n", dlm_lock_mode_name(lock->ml.type), dlm_lock_mode_name(type)); flags &= ~(LKM_VALBLK); } break; } } /* in-place downconvert? */ if (type <= lock->ml.type) goto grant; /* upconvert from here on */ status = DLM_NORMAL; list_for_each(iter, &res->granted) { tmplock = list_entry(iter, struct dlm_lock, list); if (tmplock == lock) continue; if (!dlm_lock_compatible(tmplock->ml.type, type)) goto switch_queues; } list_for_each(iter, &res->converting) { tmplock = list_entry(iter, struct dlm_lock, list); if (!dlm_lock_compatible(tmplock->ml.type, type)) goto switch_queues; /* existing conversion requests take precedence */ if (!dlm_lock_compatible(tmplock->ml.convert_type, type)) goto switch_queues; } /* fall thru to grant */ grant: mlog(0, "res %.*s, granting %s lock\n", res->lockname.len, res->lockname.name, dlm_lock_mode_name(type)); /* immediately grant the new lock type */ lock->lksb->status = DLM_NORMAL; if (lock->ml.node == dlm->node_num) mlog(0, "doing in-place convert for nonlocal lock\n"); lock->ml.type = type; if (lock->lksb->flags & DLM_LKSB_PUT_LVB) memcpy(res->lvb, lock->lksb->lvb, DLM_LVB_LEN); status = DLM_NORMAL; *call_ast = 1; goto unlock_exit; switch_queues: if (flags & LKM_NOQUEUE) { mlog(0, "failed to convert NOQUEUE lock %.*s from " "%d to %d...\n", res->lockname.len, res->lockname.name, lock->ml.type, type); status = DLM_NOTQUEUED; goto unlock_exit; } mlog(0, "res %.*s, queueing...\n", res->lockname.len, res->lockname.name); lock->ml.convert_type = type; /* do not alter lock refcount. switching lists. */ list_move_tail(&lock->list, &res->converting); unlock_exit: spin_unlock(&lock->spinlock); if (status == DLM_DENIED) { __dlm_print_one_lock_resource(res); } if (status == DLM_NORMAL) *kick_thread = 1; return status; } void dlm_revert_pending_convert(struct dlm_lock_resource *res, struct dlm_lock *lock) { /* do not alter lock refcount. switching lists. */ list_move_tail(&lock->list, &res->granted); lock->ml.convert_type = LKM_IVMODE; lock->lksb->flags &= ~(DLM_LKSB_GET_LVB|DLM_LKSB_PUT_LVB); } /* messages the master site to do lock conversion * locking: * caller needs: none * taken: takes and drops res->spinlock, uses DLM_LOCK_RES_IN_PROGRESS * held on exit: none * returns: DLM_NORMAL, DLM_RECOVERING, status from remote node */ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, int flags, int type) { enum dlm_status status; mlog(0, "type=%d, convert_type=%d, busy=%d\n", lock->ml.type, lock->ml.convert_type, res->state & DLM_LOCK_RES_IN_PROGRESS); spin_lock(&res->spinlock); if (res->state & DLM_LOCK_RES_RECOVERING) { mlog(0, "bailing out early since res is RECOVERING " "on secondary queue\n"); /* __dlm_print_one_lock_resource(res); */ status = DLM_RECOVERING; goto bail; } /* will exit this call with spinlock held */ __dlm_wait_on_lockres(res); if (lock->ml.convert_type != LKM_IVMODE) { __dlm_print_one_lock_resource(res); mlog(ML_ERROR, "converting a remote lock that is already " "converting! (cookie=%u:%llu, conv=%d)\n", dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), lock->ml.convert_type); status = DLM_DENIED; goto bail; } res->state |= DLM_LOCK_RES_IN_PROGRESS; /* move lock to local convert queue */ /* do not alter lock refcount. switching lists. */ list_move_tail(&lock->list, &res->converting); lock->convert_pending = 1; lock->ml.convert_type = type; if (flags & LKM_VALBLK) { if (lock->ml.type == LKM_EXMODE) { flags |= LKM_PUT_LVB; lock->lksb->flags |= DLM_LKSB_PUT_LVB; } else { if (lock->ml.convert_type == LKM_NLMODE) flags &= ~LKM_VALBLK; else { flags |= LKM_GET_LVB; lock->lksb->flags |= DLM_LKSB_GET_LVB; } } } spin_unlock(&res->spinlock); /* no locks held here. * need to wait for a reply as to whether it got queued or not. */ status = dlm_send_remote_convert_request(dlm, res, lock, flags, type); spin_lock(&res->spinlock); res->state &= ~DLM_LOCK_RES_IN_PROGRESS; lock->convert_pending = 0; /* if it failed, move it back to granted queue */ if (status != DLM_NORMAL) { if (status != DLM_NOTQUEUED) dlm_error(status); dlm_revert_pending_convert(res, lock); } bail: spin_unlock(&res->spinlock); /* TODO: should this be a wake_one? */ /* wake up any IN_PROGRESS waiters */ wake_up(&res->wq); return status; } /* sends DLM_CONVERT_LOCK_MSG to master site * locking: * caller needs: none * taken: none * held on exit: none * returns: DLM_NOLOCKMGR, status from remote node */ static enum dlm_status dlm_send_remote_convert_request(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, int flags, int type) { struct dlm_convert_lock convert; int tmpret; enum dlm_status ret; int status = 0; struct kvec vec[2]; size_t veclen = 1; mlog(0, "%.*s\n", res->lockname.len, res->lockname.name); memset(&convert, 0, sizeof(struct dlm_convert_lock)); convert.node_idx = dlm->node_num; convert.requested_type = type; convert.cookie = lock->ml.cookie; convert.namelen = res->lockname.len; convert.flags = cpu_to_be32(flags); memcpy(convert.name, res->lockname.name, convert.namelen); vec[0].iov_len = sizeof(struct dlm_convert_lock); vec[0].iov_base = &convert; if (flags & LKM_PUT_LVB) { /* extra data to send if we are updating lvb */ vec[1].iov_len = DLM_LVB_LEN; vec[1].iov_base = lock->lksb->lvb; veclen++; } tmpret = o2net_send_message_vec(DLM_CONVERT_LOCK_MSG, dlm->key, vec, veclen, res->owner, &status); if (tmpret >= 0) { // successfully sent and received ret = status; // this is already a dlm_status if (ret == DLM_RECOVERING) { mlog(0, "node %u returned DLM_RECOVERING from convert " "message!\n", res->owner); } else if (ret == DLM_MIGRATING) { mlog(0, "node %u returned DLM_MIGRATING from convert " "message!\n", res->owner); } else if (ret == DLM_FORWARD) { mlog(0, "node %u returned DLM_FORWARD from convert " "message!\n", res->owner); } else if (ret != DLM_NORMAL && ret != DLM_NOTQUEUED) dlm_error(ret); } else { mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to " "node %u\n", tmpret, DLM_CONVERT_LOCK_MSG, dlm->key, res->owner); if (dlm_is_host_down(tmpret)) { /* instead of logging the same network error over * and over, sleep here and wait for the heartbeat * to notice the node is dead. times out after 5s. */ dlm_wait_for_node_death(dlm, res->owner, DLM_NODE_DEATH_WAIT_MAX); ret = DLM_RECOVERING; mlog(0, "node %u died so returning DLM_RECOVERING " "from convert message!\n", res->owner); } else { ret = dlm_err_to_dlm_status(tmpret); } } return ret; } /* handler for DLM_CONVERT_LOCK_MSG on master site * locking: * caller needs: none * taken: takes and drop res->spinlock * held on exit: none * returns: DLM_NORMAL, DLM_IVLOCKID, DLM_BADARGS, * status from __dlmconvert_master */ int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data, void **ret_data) { struct dlm_ctxt *dlm = data; struct dlm_convert_lock *cnv = (struct dlm_convert_lock *)msg->buf; struct dlm_lock_resource *res = NULL; struct list_head *iter; struct dlm_lock *lock = NULL; struct dlm_lockstatus *lksb; enum dlm_status status = DLM_NORMAL; u32 flags; int call_ast = 0, kick_thread = 0, ast_reserved = 0, wake = 0; if (!dlm_grab(dlm)) { dlm_error(DLM_REJECTED); return DLM_REJECTED; } mlog_bug_on_msg(!dlm_domain_fully_joined(dlm), "Domain %s not fully joined!\n", dlm->name); if (cnv->namelen > DLM_LOCKID_NAME_MAX) { status = DLM_IVBUFLEN; dlm_error(status); goto leave; } flags = be32_to_cpu(cnv->flags); if ((flags & (LKM_PUT_LVB|LKM_GET_LVB)) == (LKM_PUT_LVB|LKM_GET_LVB)) { mlog(ML_ERROR, "both PUT and GET lvb specified\n"); status = DLM_BADARGS; goto leave; } mlog(0, "lvb: %s\n", flags & LKM_PUT_LVB ? "put lvb" : (flags & LKM_GET_LVB ? "get lvb" : "none")); status = DLM_IVLOCKID; res = dlm_lookup_lockres(dlm, cnv->name, cnv->namelen); if (!res) { dlm_error(status); goto leave; } spin_lock(&res->spinlock); status = __dlm_lockres_state_to_status(res); if (status != DLM_NORMAL) { spin_unlock(&res->spinlock); dlm_error(status); goto leave; } list_for_each(iter, &res->granted) { lock = list_entry(iter, struct dlm_lock, list); if (lock->ml.cookie == cnv->cookie && lock->ml.node == cnv->node_idx) { dlm_lock_get(lock); break; } lock = NULL; } spin_unlock(&res->spinlock); if (!lock) { status = DLM_IVLOCKID; mlog(ML_ERROR, "did not find lock to convert on grant queue! " "cookie=%u:%llu\n", dlm_get_lock_cookie_node(be64_to_cpu(cnv->cookie)), dlm_get_lock_cookie_seq(be64_to_cpu(cnv->cookie))); dlm_print_one_lock_resource(res); goto leave; } /* found the lock */ lksb = lock->lksb; /* see if caller needed to get/put lvb */ if (flags & LKM_PUT_LVB) { BUG_ON(lksb->flags & (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB)); lksb->flags |= DLM_LKSB_PUT_LVB; memcpy(&lksb->lvb[0], &cnv->lvb[0], DLM_LVB_LEN); } else if (flags & LKM_GET_LVB) { BUG_ON(lksb->flags & (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB)); lksb->flags |= DLM_LKSB_GET_LVB; } spin_lock(&res->spinlock); status = __dlm_lockres_state_to_status(res); if (status == DLM_NORMAL) { __dlm_lockres_reserve_ast(res); ast_reserved = 1; res->state |= DLM_LOCK_RES_IN_PROGRESS; status = __dlmconvert_master(dlm, res, lock, flags, cnv->requested_type, &call_ast, &kick_thread); res->state &= ~DLM_LOCK_RES_IN_PROGRESS; wake = 1; } spin_unlock(&res->spinlock); if (wake) wake_up(&res->wq); if (status != DLM_NORMAL) { if (status != DLM_NOTQUEUED) dlm_error(status); lksb->flags &= ~(DLM_LKSB_GET_LVB|DLM_LKSB_PUT_LVB); } leave: if (lock) dlm_lock_put(lock); /* either queue the ast or release it, if reserved */ if (call_ast) dlm_queue_ast(dlm, lock); else if (ast_reserved) dlm_lockres_release_ast(dlm, res); if (kick_thread) dlm_kick_thread(dlm, res); if (res) dlm_lockres_put(res); dlm_put(dlm); return status; }
gpl-2.0
vibhu0009/android_kernel_cyanogen_msm8916
drivers/video/aty/radeon_pm.c
10525
89387
/* * drivers/video/aty/radeon_pm.c * * Copyright 2003,2004 Ben. Herrenschmidt <benh@kernel.crashing.org> * Copyright 2004 Paul Mackerras <paulus@samba.org> * * This is the power management code for ATI radeon chipsets. It contains * some dynamic clock PM enable/disable code similar to what X.org does, * some D2-state (APM-style) sleep/wakeup code for use on some PowerMacs, * and the necessary bits to re-initialize from scratch a few chips found * on PowerMacs as well. The later could be extended to more platforms * provided the memory controller configuration code be made more generic, * and you can get the proper mode register commands for your RAMs. * Those things may be found in the BIOS image... */ #include "radeonfb.h" #include <linux/console.h> #include <linux/agp_backend.h> #ifdef CONFIG_PPC_PMAC #include <asm/machdep.h> #include <asm/prom.h> #include <asm/pmac_feature.h> #endif #include "ati_ids.h" /* * Workarounds for bugs in PC laptops: * - enable D2 sleep in some IBM Thinkpads * - special case for Samsung P35 * * Whitelist by subsystem vendor/device because * its the subsystem vendor's fault! */ #if defined(CONFIG_PM) && defined(CONFIG_X86) static void radeon_reinitialize_M10(struct radeonfb_info *rinfo); struct radeon_device_id { const char *ident; /* (arbitrary) Name */ const unsigned short subsystem_vendor; /* Subsystem Vendor ID */ const unsigned short subsystem_device; /* Subsystem Device ID */ const enum radeon_pm_mode pm_mode_modifier; /* modify pm_mode */ const reinit_function_ptr new_reinit_func; /* changed reinit_func */ }; #define BUGFIX(model, sv, sd, pm, fn) { \ .ident = model, \ .subsystem_vendor = sv, \ .subsystem_device = sd, \ .pm_mode_modifier = pm, \ .new_reinit_func = fn \ } static struct radeon_device_id radeon_workaround_list[] = { BUGFIX("IBM Thinkpad R32", PCI_VENDOR_ID_IBM, 0x1905, radeon_pm_d2, NULL), BUGFIX("IBM Thinkpad R40", PCI_VENDOR_ID_IBM, 0x0526, radeon_pm_d2, NULL), BUGFIX("IBM Thinkpad R40", PCI_VENDOR_ID_IBM, 0x0527, radeon_pm_d2, NULL), BUGFIX("IBM Thinkpad R50/R51/T40/T41", PCI_VENDOR_ID_IBM, 0x0531, radeon_pm_d2, NULL), BUGFIX("IBM Thinkpad R51/T40/T41/T42", PCI_VENDOR_ID_IBM, 0x0530, radeon_pm_d2, NULL), BUGFIX("IBM Thinkpad T30", PCI_VENDOR_ID_IBM, 0x0517, radeon_pm_d2, NULL), BUGFIX("IBM Thinkpad T40p", PCI_VENDOR_ID_IBM, 0x054d, radeon_pm_d2, NULL), BUGFIX("IBM Thinkpad T42", PCI_VENDOR_ID_IBM, 0x0550, radeon_pm_d2, NULL), BUGFIX("IBM Thinkpad X31/X32", PCI_VENDOR_ID_IBM, 0x052f, radeon_pm_d2, NULL), BUGFIX("Samsung P35", PCI_VENDOR_ID_SAMSUNG, 0xc00c, radeon_pm_off, radeon_reinitialize_M10), BUGFIX("Acer Aspire 2010", PCI_VENDOR_ID_AI, 0x0061, radeon_pm_off, radeon_reinitialize_M10), BUGFIX("Acer Travelmate 290D/292LMi", PCI_VENDOR_ID_AI, 0x005a, radeon_pm_off, radeon_reinitialize_M10), { .ident = NULL } }; static int radeon_apply_workarounds(struct radeonfb_info *rinfo) { struct radeon_device_id *id; for (id = radeon_workaround_list; id->ident != NULL; id++ ) if ((id->subsystem_vendor == rinfo->pdev->subsystem_vendor ) && (id->subsystem_device == rinfo->pdev->subsystem_device )) { /* we found a device that requires workaround */ printk(KERN_DEBUG "radeonfb: %s detected" ", enabling workaround\n", id->ident); rinfo->pm_mode |= id->pm_mode_modifier; if (id->new_reinit_func != NULL) rinfo->reinit_func = id->new_reinit_func; return 1; } return 0; /* not found */ } #else /* defined(CONFIG_PM) && defined(CONFIG_X86) */ static inline int radeon_apply_workarounds(struct radeonfb_info *rinfo) { return 0; } #endif /* defined(CONFIG_PM) && defined(CONFIG_X86) */ static void radeon_pm_disable_dynamic_mode(struct radeonfb_info *rinfo) { u32 tmp; /* RV100 */ if ((rinfo->family == CHIP_FAMILY_RV100) && (!rinfo->is_mobility)) { if (rinfo->has_CRTC2) { tmp = INPLL(pllSCLK_CNTL); tmp &= ~SCLK_CNTL__DYN_STOP_LAT_MASK; tmp |= SCLK_CNTL__CP_MAX_DYN_STOP_LAT | SCLK_CNTL__FORCEON_MASK; OUTPLL(pllSCLK_CNTL, tmp); } tmp = INPLL(pllMCLK_CNTL); tmp |= (MCLK_CNTL__FORCE_MCLKA | MCLK_CNTL__FORCE_MCLKB | MCLK_CNTL__FORCE_YCLKA | MCLK_CNTL__FORCE_YCLKB | MCLK_CNTL__FORCE_AIC | MCLK_CNTL__FORCE_MC); OUTPLL(pllMCLK_CNTL, tmp); return; } /* R100 */ if (!rinfo->has_CRTC2) { tmp = INPLL(pllSCLK_CNTL); tmp |= (SCLK_CNTL__FORCE_CP | SCLK_CNTL__FORCE_HDP | SCLK_CNTL__FORCE_DISP1 | SCLK_CNTL__FORCE_TOP | SCLK_CNTL__FORCE_E2 | SCLK_CNTL__FORCE_SE | SCLK_CNTL__FORCE_IDCT | SCLK_CNTL__FORCE_VIP | SCLK_CNTL__FORCE_RE | SCLK_CNTL__FORCE_PB | SCLK_CNTL__FORCE_TAM | SCLK_CNTL__FORCE_TDM | SCLK_CNTL__FORCE_RB); OUTPLL(pllSCLK_CNTL, tmp); return; } /* RV350 (M10/M11) */ if (rinfo->family == CHIP_FAMILY_RV350) { /* for RV350/M10/M11, no delays are required. */ tmp = INPLL(pllSCLK_CNTL2); tmp |= (SCLK_CNTL2__R300_FORCE_TCL | SCLK_CNTL2__R300_FORCE_GA | SCLK_CNTL2__R300_FORCE_CBA); OUTPLL(pllSCLK_CNTL2, tmp); tmp = INPLL(pllSCLK_CNTL); tmp |= (SCLK_CNTL__FORCE_DISP2 | SCLK_CNTL__FORCE_CP | SCLK_CNTL__FORCE_HDP | SCLK_CNTL__FORCE_DISP1 | SCLK_CNTL__FORCE_TOP | SCLK_CNTL__FORCE_E2 | SCLK_CNTL__R300_FORCE_VAP | SCLK_CNTL__FORCE_IDCT | SCLK_CNTL__FORCE_VIP | SCLK_CNTL__R300_FORCE_SR | SCLK_CNTL__R300_FORCE_PX | SCLK_CNTL__R300_FORCE_TX | SCLK_CNTL__R300_FORCE_US | SCLK_CNTL__FORCE_TV_SCLK | SCLK_CNTL__R300_FORCE_SU | SCLK_CNTL__FORCE_OV0); OUTPLL(pllSCLK_CNTL, tmp); tmp = INPLL(pllSCLK_MORE_CNTL); tmp |= (SCLK_MORE_CNTL__FORCE_DISPREGS | SCLK_MORE_CNTL__FORCE_MC_GUI | SCLK_MORE_CNTL__FORCE_MC_HOST); OUTPLL(pllSCLK_MORE_CNTL, tmp); tmp = INPLL(pllMCLK_CNTL); tmp |= (MCLK_CNTL__FORCE_MCLKA | MCLK_CNTL__FORCE_MCLKB | MCLK_CNTL__FORCE_YCLKA | MCLK_CNTL__FORCE_YCLKB | MCLK_CNTL__FORCE_MC); OUTPLL(pllMCLK_CNTL, tmp); tmp = INPLL(pllVCLK_ECP_CNTL); tmp &= ~(VCLK_ECP_CNTL__PIXCLK_ALWAYS_ONb | VCLK_ECP_CNTL__PIXCLK_DAC_ALWAYS_ONb | VCLK_ECP_CNTL__R300_DISP_DAC_PIXCLK_DAC_BLANK_OFF); OUTPLL(pllVCLK_ECP_CNTL, tmp); tmp = INPLL(pllPIXCLKS_CNTL); tmp &= ~(PIXCLKS_CNTL__PIX2CLK_ALWAYS_ONb | PIXCLKS_CNTL__PIX2CLK_DAC_ALWAYS_ONb | PIXCLKS_CNTL__DISP_TVOUT_PIXCLK_TV_ALWAYS_ONb | PIXCLKS_CNTL__R300_DVOCLK_ALWAYS_ONb | PIXCLKS_CNTL__PIXCLK_BLEND_ALWAYS_ONb | PIXCLKS_CNTL__PIXCLK_GV_ALWAYS_ONb | PIXCLKS_CNTL__R300_PIXCLK_DVO_ALWAYS_ONb | PIXCLKS_CNTL__PIXCLK_LVDS_ALWAYS_ONb | PIXCLKS_CNTL__PIXCLK_TMDS_ALWAYS_ONb | PIXCLKS_CNTL__R300_PIXCLK_TRANS_ALWAYS_ONb | PIXCLKS_CNTL__R300_PIXCLK_TVO_ALWAYS_ONb | PIXCLKS_CNTL__R300_P2G2CLK_ALWAYS_ONb | PIXCLKS_CNTL__R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF); OUTPLL(pllPIXCLKS_CNTL, tmp); return; } /* Default */ /* Force Core Clocks */ tmp = INPLL(pllSCLK_CNTL); tmp |= (SCLK_CNTL__FORCE_CP | SCLK_CNTL__FORCE_E2); /* XFree doesn't do that case, but we had this code from Apple and it * seem necessary for proper suspend/resume operations */ if (rinfo->is_mobility) { tmp |= SCLK_CNTL__FORCE_HDP| SCLK_CNTL__FORCE_DISP1| SCLK_CNTL__FORCE_DISP2| SCLK_CNTL__FORCE_TOP| SCLK_CNTL__FORCE_SE| SCLK_CNTL__FORCE_IDCT| SCLK_CNTL__FORCE_VIP| SCLK_CNTL__FORCE_PB| SCLK_CNTL__FORCE_RE| SCLK_CNTL__FORCE_TAM| SCLK_CNTL__FORCE_TDM| SCLK_CNTL__FORCE_RB| SCLK_CNTL__FORCE_TV_SCLK| SCLK_CNTL__FORCE_SUBPIC| SCLK_CNTL__FORCE_OV0; } else if (rinfo->family == CHIP_FAMILY_R300 || rinfo->family == CHIP_FAMILY_R350) { tmp |= SCLK_CNTL__FORCE_HDP | SCLK_CNTL__FORCE_DISP1 | SCLK_CNTL__FORCE_DISP2 | SCLK_CNTL__FORCE_TOP | SCLK_CNTL__FORCE_IDCT | SCLK_CNTL__FORCE_VIP; } OUTPLL(pllSCLK_CNTL, tmp); radeon_msleep(16); if (rinfo->family == CHIP_FAMILY_R300 || rinfo->family == CHIP_FAMILY_R350) { tmp = INPLL(pllSCLK_CNTL2); tmp |= SCLK_CNTL2__R300_FORCE_TCL | SCLK_CNTL2__R300_FORCE_GA | SCLK_CNTL2__R300_FORCE_CBA; OUTPLL(pllSCLK_CNTL2, tmp); radeon_msleep(16); } tmp = INPLL(pllCLK_PIN_CNTL); tmp &= ~CLK_PIN_CNTL__SCLK_DYN_START_CNTL; OUTPLL(pllCLK_PIN_CNTL, tmp); radeon_msleep(15); if (rinfo->is_IGP) { /* Weird ... X is _un_ forcing clocks here, I think it's * doing backward. Imitate it for now... */ tmp = INPLL(pllMCLK_CNTL); tmp &= ~(MCLK_CNTL__FORCE_MCLKA | MCLK_CNTL__FORCE_YCLKA); OUTPLL(pllMCLK_CNTL, tmp); radeon_msleep(16); } /* Hrm... same shit, X doesn't do that but I have to */ else if (rinfo->is_mobility) { tmp = INPLL(pllMCLK_CNTL); tmp |= (MCLK_CNTL__FORCE_MCLKA | MCLK_CNTL__FORCE_MCLKB | MCLK_CNTL__FORCE_YCLKA | MCLK_CNTL__FORCE_YCLKB); OUTPLL(pllMCLK_CNTL, tmp); radeon_msleep(16); tmp = INPLL(pllMCLK_MISC); tmp &= ~(MCLK_MISC__MC_MCLK_MAX_DYN_STOP_LAT| MCLK_MISC__IO_MCLK_MAX_DYN_STOP_LAT| MCLK_MISC__MC_MCLK_DYN_ENABLE| MCLK_MISC__IO_MCLK_DYN_ENABLE); OUTPLL(pllMCLK_MISC, tmp); radeon_msleep(15); } if (rinfo->is_mobility) { tmp = INPLL(pllSCLK_MORE_CNTL); tmp |= SCLK_MORE_CNTL__FORCE_DISPREGS| SCLK_MORE_CNTL__FORCE_MC_GUI| SCLK_MORE_CNTL__FORCE_MC_HOST; OUTPLL(pllSCLK_MORE_CNTL, tmp); radeon_msleep(16); } tmp = INPLL(pllPIXCLKS_CNTL); tmp &= ~(PIXCLKS_CNTL__PIXCLK_GV_ALWAYS_ONb | PIXCLKS_CNTL__PIXCLK_BLEND_ALWAYS_ONb| PIXCLKS_CNTL__PIXCLK_DIG_TMDS_ALWAYS_ONb | PIXCLKS_CNTL__PIXCLK_LVDS_ALWAYS_ONb| PIXCLKS_CNTL__PIXCLK_TMDS_ALWAYS_ONb| PIXCLKS_CNTL__PIX2CLK_ALWAYS_ONb| PIXCLKS_CNTL__PIX2CLK_DAC_ALWAYS_ONb); OUTPLL(pllPIXCLKS_CNTL, tmp); radeon_msleep(16); tmp = INPLL( pllVCLK_ECP_CNTL); tmp &= ~(VCLK_ECP_CNTL__PIXCLK_ALWAYS_ONb | VCLK_ECP_CNTL__PIXCLK_DAC_ALWAYS_ONb); OUTPLL( pllVCLK_ECP_CNTL, tmp); radeon_msleep(16); } static void radeon_pm_enable_dynamic_mode(struct radeonfb_info *rinfo) { u32 tmp; /* R100 */ if (!rinfo->has_CRTC2) { tmp = INPLL(pllSCLK_CNTL); if ((INREG(CNFG_CNTL) & CFG_ATI_REV_ID_MASK) > CFG_ATI_REV_A13) tmp &= ~(SCLK_CNTL__FORCE_CP | SCLK_CNTL__FORCE_RB); tmp &= ~(SCLK_CNTL__FORCE_HDP | SCLK_CNTL__FORCE_DISP1 | SCLK_CNTL__FORCE_TOP | SCLK_CNTL__FORCE_SE | SCLK_CNTL__FORCE_IDCT | SCLK_CNTL__FORCE_RE | SCLK_CNTL__FORCE_PB | SCLK_CNTL__FORCE_TAM | SCLK_CNTL__FORCE_TDM); OUTPLL(pllSCLK_CNTL, tmp); return; } /* M10/M11 */ if (rinfo->family == CHIP_FAMILY_RV350) { tmp = INPLL(pllSCLK_CNTL2); tmp &= ~(SCLK_CNTL2__R300_FORCE_TCL | SCLK_CNTL2__R300_FORCE_GA | SCLK_CNTL2__R300_FORCE_CBA); tmp |= (SCLK_CNTL2__R300_TCL_MAX_DYN_STOP_LAT | SCLK_CNTL2__R300_GA_MAX_DYN_STOP_LAT | SCLK_CNTL2__R300_CBA_MAX_DYN_STOP_LAT); OUTPLL(pllSCLK_CNTL2, tmp); tmp = INPLL(pllSCLK_CNTL); tmp &= ~(SCLK_CNTL__FORCE_DISP2 | SCLK_CNTL__FORCE_CP | SCLK_CNTL__FORCE_HDP | SCLK_CNTL__FORCE_DISP1 | SCLK_CNTL__FORCE_TOP | SCLK_CNTL__FORCE_E2 | SCLK_CNTL__R300_FORCE_VAP | SCLK_CNTL__FORCE_IDCT | SCLK_CNTL__FORCE_VIP | SCLK_CNTL__R300_FORCE_SR | SCLK_CNTL__R300_FORCE_PX | SCLK_CNTL__R300_FORCE_TX | SCLK_CNTL__R300_FORCE_US | SCLK_CNTL__FORCE_TV_SCLK | SCLK_CNTL__R300_FORCE_SU | SCLK_CNTL__FORCE_OV0); tmp |= SCLK_CNTL__DYN_STOP_LAT_MASK; OUTPLL(pllSCLK_CNTL, tmp); tmp = INPLL(pllSCLK_MORE_CNTL); tmp &= ~SCLK_MORE_CNTL__FORCEON; tmp |= SCLK_MORE_CNTL__DISPREGS_MAX_DYN_STOP_LAT | SCLK_MORE_CNTL__MC_GUI_MAX_DYN_STOP_LAT | SCLK_MORE_CNTL__MC_HOST_MAX_DYN_STOP_LAT; OUTPLL(pllSCLK_MORE_CNTL, tmp); tmp = INPLL(pllVCLK_ECP_CNTL); tmp |= (VCLK_ECP_CNTL__PIXCLK_ALWAYS_ONb | VCLK_ECP_CNTL__PIXCLK_DAC_ALWAYS_ONb); OUTPLL(pllVCLK_ECP_CNTL, tmp); tmp = INPLL(pllPIXCLKS_CNTL); tmp |= (PIXCLKS_CNTL__PIX2CLK_ALWAYS_ONb | PIXCLKS_CNTL__PIX2CLK_DAC_ALWAYS_ONb | PIXCLKS_CNTL__DISP_TVOUT_PIXCLK_TV_ALWAYS_ONb | PIXCLKS_CNTL__R300_DVOCLK_ALWAYS_ONb | PIXCLKS_CNTL__PIXCLK_BLEND_ALWAYS_ONb | PIXCLKS_CNTL__PIXCLK_GV_ALWAYS_ONb | PIXCLKS_CNTL__R300_PIXCLK_DVO_ALWAYS_ONb | PIXCLKS_CNTL__PIXCLK_LVDS_ALWAYS_ONb | PIXCLKS_CNTL__PIXCLK_TMDS_ALWAYS_ONb | PIXCLKS_CNTL__R300_PIXCLK_TRANS_ALWAYS_ONb | PIXCLKS_CNTL__R300_PIXCLK_TVO_ALWAYS_ONb | PIXCLKS_CNTL__R300_P2G2CLK_ALWAYS_ONb | PIXCLKS_CNTL__R300_P2G2CLK_DAC_ALWAYS_ONb); OUTPLL(pllPIXCLKS_CNTL, tmp); tmp = INPLL(pllMCLK_MISC); tmp |= (MCLK_MISC__MC_MCLK_DYN_ENABLE | MCLK_MISC__IO_MCLK_DYN_ENABLE); OUTPLL(pllMCLK_MISC, tmp); tmp = INPLL(pllMCLK_CNTL); tmp |= (MCLK_CNTL__FORCE_MCLKA | MCLK_CNTL__FORCE_MCLKB); tmp &= ~(MCLK_CNTL__FORCE_YCLKA | MCLK_CNTL__FORCE_YCLKB | MCLK_CNTL__FORCE_MC); /* Some releases of vbios have set DISABLE_MC_MCLKA * and DISABLE_MC_MCLKB bits in the vbios table. Setting these * bits will cause H/W hang when reading video memory with dynamic * clocking enabled. */ if ((tmp & MCLK_CNTL__R300_DISABLE_MC_MCLKA) && (tmp & MCLK_CNTL__R300_DISABLE_MC_MCLKB)) { /* If both bits are set, then check the active channels */ tmp = INPLL(pllMCLK_CNTL); if (rinfo->vram_width == 64) { if (INREG(MEM_CNTL) & R300_MEM_USE_CD_CH_ONLY) tmp &= ~MCLK_CNTL__R300_DISABLE_MC_MCLKB; else tmp &= ~MCLK_CNTL__R300_DISABLE_MC_MCLKA; } else { tmp &= ~(MCLK_CNTL__R300_DISABLE_MC_MCLKA | MCLK_CNTL__R300_DISABLE_MC_MCLKB); } } OUTPLL(pllMCLK_CNTL, tmp); return; } /* R300 */ if (rinfo->family == CHIP_FAMILY_R300 || rinfo->family == CHIP_FAMILY_R350) { tmp = INPLL(pllSCLK_CNTL); tmp &= ~(SCLK_CNTL__R300_FORCE_VAP); tmp |= SCLK_CNTL__FORCE_CP; OUTPLL(pllSCLK_CNTL, tmp); radeon_msleep(15); tmp = INPLL(pllSCLK_CNTL2); tmp &= ~(SCLK_CNTL2__R300_FORCE_TCL | SCLK_CNTL2__R300_FORCE_GA | SCLK_CNTL2__R300_FORCE_CBA); OUTPLL(pllSCLK_CNTL2, tmp); } /* Others */ tmp = INPLL( pllCLK_PWRMGT_CNTL); tmp &= ~(CLK_PWRMGT_CNTL__ACTIVE_HILO_LAT_MASK| CLK_PWRMGT_CNTL__DISP_DYN_STOP_LAT_MASK| CLK_PWRMGT_CNTL__DYN_STOP_MODE_MASK); tmp |= CLK_PWRMGT_CNTL__ENGINE_DYNCLK_MODE_MASK | (0x01 << CLK_PWRMGT_CNTL__ACTIVE_HILO_LAT__SHIFT); OUTPLL( pllCLK_PWRMGT_CNTL, tmp); radeon_msleep(15); tmp = INPLL(pllCLK_PIN_CNTL); tmp |= CLK_PIN_CNTL__SCLK_DYN_START_CNTL; OUTPLL(pllCLK_PIN_CNTL, tmp); radeon_msleep(15); /* When DRI is enabled, setting DYN_STOP_LAT to zero can cause some R200 * to lockup randomly, leave them as set by BIOS. */ tmp = INPLL(pllSCLK_CNTL); tmp &= ~SCLK_CNTL__FORCEON_MASK; /*RAGE_6::A11 A12 A12N1 A13, RV250::A11 A12, R300*/ if ((rinfo->family == CHIP_FAMILY_RV250 && ((INREG(CNFG_CNTL) & CFG_ATI_REV_ID_MASK) < CFG_ATI_REV_A13)) || ((rinfo->family == CHIP_FAMILY_RV100) && ((INREG(CNFG_CNTL) & CFG_ATI_REV_ID_MASK) <= CFG_ATI_REV_A13))) { tmp |= SCLK_CNTL__FORCE_CP; tmp |= SCLK_CNTL__FORCE_VIP; } OUTPLL(pllSCLK_CNTL, tmp); radeon_msleep(15); if ((rinfo->family == CHIP_FAMILY_RV200) || (rinfo->family == CHIP_FAMILY_RV250) || (rinfo->family == CHIP_FAMILY_RV280)) { tmp = INPLL(pllSCLK_MORE_CNTL); tmp &= ~SCLK_MORE_CNTL__FORCEON; /* RV200::A11 A12 RV250::A11 A12 */ if (((rinfo->family == CHIP_FAMILY_RV200) || (rinfo->family == CHIP_FAMILY_RV250)) && ((INREG(CNFG_CNTL) & CFG_ATI_REV_ID_MASK) < CFG_ATI_REV_A13)) tmp |= SCLK_MORE_CNTL__FORCEON; OUTPLL(pllSCLK_MORE_CNTL, tmp); radeon_msleep(15); } /* RV200::A11 A12, RV250::A11 A12 */ if (((rinfo->family == CHIP_FAMILY_RV200) || (rinfo->family == CHIP_FAMILY_RV250)) && ((INREG(CNFG_CNTL) & CFG_ATI_REV_ID_MASK) < CFG_ATI_REV_A13)) { tmp = INPLL(pllPLL_PWRMGT_CNTL); tmp |= PLL_PWRMGT_CNTL__TCL_BYPASS_DISABLE; OUTPLL(pllPLL_PWRMGT_CNTL, tmp); radeon_msleep(15); } tmp = INPLL(pllPIXCLKS_CNTL); tmp |= PIXCLKS_CNTL__PIX2CLK_ALWAYS_ONb | PIXCLKS_CNTL__PIX2CLK_DAC_ALWAYS_ONb| PIXCLKS_CNTL__PIXCLK_BLEND_ALWAYS_ONb| PIXCLKS_CNTL__PIXCLK_GV_ALWAYS_ONb| PIXCLKS_CNTL__PIXCLK_DIG_TMDS_ALWAYS_ONb| PIXCLKS_CNTL__PIXCLK_LVDS_ALWAYS_ONb| PIXCLKS_CNTL__PIXCLK_TMDS_ALWAYS_ONb; OUTPLL(pllPIXCLKS_CNTL, tmp); radeon_msleep(15); tmp = INPLL(pllVCLK_ECP_CNTL); tmp |= VCLK_ECP_CNTL__PIXCLK_ALWAYS_ONb | VCLK_ECP_CNTL__PIXCLK_DAC_ALWAYS_ONb; OUTPLL(pllVCLK_ECP_CNTL, tmp); /* X doesn't do that ... hrm, we do on mobility && Macs */ #ifdef CONFIG_PPC_OF if (rinfo->is_mobility) { tmp = INPLL(pllMCLK_CNTL); tmp &= ~(MCLK_CNTL__FORCE_MCLKA | MCLK_CNTL__FORCE_MCLKB | MCLK_CNTL__FORCE_YCLKA | MCLK_CNTL__FORCE_YCLKB); OUTPLL(pllMCLK_CNTL, tmp); radeon_msleep(15); tmp = INPLL(pllMCLK_MISC); tmp |= MCLK_MISC__MC_MCLK_MAX_DYN_STOP_LAT| MCLK_MISC__IO_MCLK_MAX_DYN_STOP_LAT| MCLK_MISC__MC_MCLK_DYN_ENABLE| MCLK_MISC__IO_MCLK_DYN_ENABLE; OUTPLL(pllMCLK_MISC, tmp); radeon_msleep(15); } #endif /* CONFIG_PPC_OF */ } #ifdef CONFIG_PM static void OUTMC( struct radeonfb_info *rinfo, u8 indx, u32 value) { OUTREG( MC_IND_INDEX, indx | MC_IND_INDEX__MC_IND_WR_EN); OUTREG( MC_IND_DATA, value); } static u32 INMC(struct radeonfb_info *rinfo, u8 indx) { OUTREG( MC_IND_INDEX, indx); return INREG( MC_IND_DATA); } static void radeon_pm_save_regs(struct radeonfb_info *rinfo, int saving_for_d3) { rinfo->save_regs[0] = INPLL(PLL_PWRMGT_CNTL); rinfo->save_regs[1] = INPLL(CLK_PWRMGT_CNTL); rinfo->save_regs[2] = INPLL(MCLK_CNTL); rinfo->save_regs[3] = INPLL(SCLK_CNTL); rinfo->save_regs[4] = INPLL(CLK_PIN_CNTL); rinfo->save_regs[5] = INPLL(VCLK_ECP_CNTL); rinfo->save_regs[6] = INPLL(PIXCLKS_CNTL); rinfo->save_regs[7] = INPLL(MCLK_MISC); rinfo->save_regs[8] = INPLL(P2PLL_CNTL); rinfo->save_regs[9] = INREG(DISP_MISC_CNTL); rinfo->save_regs[10] = INREG(DISP_PWR_MAN); rinfo->save_regs[11] = INREG(LVDS_GEN_CNTL); rinfo->save_regs[13] = INREG(TV_DAC_CNTL); rinfo->save_regs[14] = INREG(BUS_CNTL1); rinfo->save_regs[15] = INREG(CRTC_OFFSET_CNTL); rinfo->save_regs[16] = INREG(AGP_CNTL); rinfo->save_regs[17] = (INREG(CRTC_GEN_CNTL) & 0xfdffffff) | 0x04000000; rinfo->save_regs[18] = (INREG(CRTC2_GEN_CNTL) & 0xfdffffff) | 0x04000000; rinfo->save_regs[19] = INREG(GPIOPAD_A); rinfo->save_regs[20] = INREG(GPIOPAD_EN); rinfo->save_regs[21] = INREG(GPIOPAD_MASK); rinfo->save_regs[22] = INREG(ZV_LCDPAD_A); rinfo->save_regs[23] = INREG(ZV_LCDPAD_EN); rinfo->save_regs[24] = INREG(ZV_LCDPAD_MASK); rinfo->save_regs[25] = INREG(GPIO_VGA_DDC); rinfo->save_regs[26] = INREG(GPIO_DVI_DDC); rinfo->save_regs[27] = INREG(GPIO_MONID); rinfo->save_regs[28] = INREG(GPIO_CRT2_DDC); rinfo->save_regs[29] = INREG(SURFACE_CNTL); rinfo->save_regs[30] = INREG(MC_FB_LOCATION); rinfo->save_regs[31] = INREG(DISPLAY_BASE_ADDR); rinfo->save_regs[32] = INREG(MC_AGP_LOCATION); rinfo->save_regs[33] = INREG(CRTC2_DISPLAY_BASE_ADDR); rinfo->save_regs[34] = INPLL(SCLK_MORE_CNTL); rinfo->save_regs[35] = INREG(MEM_SDRAM_MODE_REG); rinfo->save_regs[36] = INREG(BUS_CNTL); rinfo->save_regs[39] = INREG(RBBM_CNTL); rinfo->save_regs[40] = INREG(DAC_CNTL); rinfo->save_regs[41] = INREG(HOST_PATH_CNTL); rinfo->save_regs[37] = INREG(MPP_TB_CONFIG); rinfo->save_regs[38] = INREG(FCP_CNTL); if (rinfo->is_mobility) { rinfo->save_regs[12] = INREG(LVDS_PLL_CNTL); rinfo->save_regs[43] = INPLL(pllSSPLL_CNTL); rinfo->save_regs[44] = INPLL(pllSSPLL_REF_DIV); rinfo->save_regs[45] = INPLL(pllSSPLL_DIV_0); rinfo->save_regs[90] = INPLL(pllSS_INT_CNTL); rinfo->save_regs[91] = INPLL(pllSS_TST_CNTL); rinfo->save_regs[81] = INREG(LVDS_GEN_CNTL); } if (rinfo->family >= CHIP_FAMILY_RV200) { rinfo->save_regs[42] = INREG(MEM_REFRESH_CNTL); rinfo->save_regs[46] = INREG(MC_CNTL); rinfo->save_regs[47] = INREG(MC_INIT_GFX_LAT_TIMER); rinfo->save_regs[48] = INREG(MC_INIT_MISC_LAT_TIMER); rinfo->save_regs[49] = INREG(MC_TIMING_CNTL); rinfo->save_regs[50] = INREG(MC_READ_CNTL_AB); rinfo->save_regs[51] = INREG(MC_IOPAD_CNTL); rinfo->save_regs[52] = INREG(MC_CHIP_IO_OE_CNTL_AB); rinfo->save_regs[53] = INREG(MC_DEBUG); } rinfo->save_regs[54] = INREG(PAMAC0_DLY_CNTL); rinfo->save_regs[55] = INREG(PAMAC1_DLY_CNTL); rinfo->save_regs[56] = INREG(PAD_CTLR_MISC); rinfo->save_regs[57] = INREG(FW_CNTL); if (rinfo->family >= CHIP_FAMILY_R300) { rinfo->save_regs[58] = INMC(rinfo, ixR300_MC_MC_INIT_WR_LAT_TIMER); rinfo->save_regs[59] = INMC(rinfo, ixR300_MC_IMP_CNTL); rinfo->save_regs[60] = INMC(rinfo, ixR300_MC_CHP_IO_CNTL_C0); rinfo->save_regs[61] = INMC(rinfo, ixR300_MC_CHP_IO_CNTL_C1); rinfo->save_regs[62] = INMC(rinfo, ixR300_MC_CHP_IO_CNTL_D0); rinfo->save_regs[63] = INMC(rinfo, ixR300_MC_CHP_IO_CNTL_D1); rinfo->save_regs[64] = INMC(rinfo, ixR300_MC_BIST_CNTL_3); rinfo->save_regs[65] = INMC(rinfo, ixR300_MC_CHP_IO_CNTL_A0); rinfo->save_regs[66] = INMC(rinfo, ixR300_MC_CHP_IO_CNTL_A1); rinfo->save_regs[67] = INMC(rinfo, ixR300_MC_CHP_IO_CNTL_B0); rinfo->save_regs[68] = INMC(rinfo, ixR300_MC_CHP_IO_CNTL_B1); rinfo->save_regs[69] = INMC(rinfo, ixR300_MC_DEBUG_CNTL); rinfo->save_regs[70] = INMC(rinfo, ixR300_MC_DLL_CNTL); rinfo->save_regs[71] = INMC(rinfo, ixR300_MC_IMP_CNTL_0); rinfo->save_regs[72] = INMC(rinfo, ixR300_MC_ELPIDA_CNTL); rinfo->save_regs[96] = INMC(rinfo, ixR300_MC_READ_CNTL_CD); } else { rinfo->save_regs[59] = INMC(rinfo, ixMC_IMP_CNTL); rinfo->save_regs[65] = INMC(rinfo, ixMC_CHP_IO_CNTL_A0); rinfo->save_regs[66] = INMC(rinfo, ixMC_CHP_IO_CNTL_A1); rinfo->save_regs[67] = INMC(rinfo, ixMC_CHP_IO_CNTL_B0); rinfo->save_regs[68] = INMC(rinfo, ixMC_CHP_IO_CNTL_B1); rinfo->save_regs[71] = INMC(rinfo, ixMC_IMP_CNTL_0); } rinfo->save_regs[73] = INPLL(pllMPLL_CNTL); rinfo->save_regs[74] = INPLL(pllSPLL_CNTL); rinfo->save_regs[75] = INPLL(pllMPLL_AUX_CNTL); rinfo->save_regs[76] = INPLL(pllSPLL_AUX_CNTL); rinfo->save_regs[77] = INPLL(pllM_SPLL_REF_FB_DIV); rinfo->save_regs[78] = INPLL(pllAGP_PLL_CNTL); rinfo->save_regs[79] = INREG(PAMAC2_DLY_CNTL); rinfo->save_regs[80] = INREG(OV0_BASE_ADDR); rinfo->save_regs[82] = INREG(FP_GEN_CNTL); rinfo->save_regs[83] = INREG(FP2_GEN_CNTL); rinfo->save_regs[84] = INREG(TMDS_CNTL); rinfo->save_regs[85] = INREG(TMDS_TRANSMITTER_CNTL); rinfo->save_regs[86] = INREG(DISP_OUTPUT_CNTL); rinfo->save_regs[87] = INREG(DISP_HW_DEBUG); rinfo->save_regs[88] = INREG(TV_MASTER_CNTL); rinfo->save_regs[89] = INPLL(pllP2PLL_REF_DIV); rinfo->save_regs[92] = INPLL(pllPPLL_DIV_0); rinfo->save_regs[93] = INPLL(pllPPLL_CNTL); rinfo->save_regs[94] = INREG(GRPH_BUFFER_CNTL); rinfo->save_regs[95] = INREG(GRPH2_BUFFER_CNTL); rinfo->save_regs[96] = INREG(HDP_DEBUG); rinfo->save_regs[97] = INPLL(pllMDLL_CKO); rinfo->save_regs[98] = INPLL(pllMDLL_RDCKA); rinfo->save_regs[99] = INPLL(pllMDLL_RDCKB); } static void radeon_pm_restore_regs(struct radeonfb_info *rinfo) { OUTPLL(P2PLL_CNTL, rinfo->save_regs[8] & 0xFFFFFFFE); /* First */ OUTPLL(PLL_PWRMGT_CNTL, rinfo->save_regs[0]); OUTPLL(CLK_PWRMGT_CNTL, rinfo->save_regs[1]); OUTPLL(MCLK_CNTL, rinfo->save_regs[2]); OUTPLL(SCLK_CNTL, rinfo->save_regs[3]); OUTPLL(CLK_PIN_CNTL, rinfo->save_regs[4]); OUTPLL(VCLK_ECP_CNTL, rinfo->save_regs[5]); OUTPLL(PIXCLKS_CNTL, rinfo->save_regs[6]); OUTPLL(MCLK_MISC, rinfo->save_regs[7]); if (rinfo->family == CHIP_FAMILY_RV350) OUTPLL(SCLK_MORE_CNTL, rinfo->save_regs[34]); OUTREG(SURFACE_CNTL, rinfo->save_regs[29]); OUTREG(MC_FB_LOCATION, rinfo->save_regs[30]); OUTREG(DISPLAY_BASE_ADDR, rinfo->save_regs[31]); OUTREG(MC_AGP_LOCATION, rinfo->save_regs[32]); OUTREG(CRTC2_DISPLAY_BASE_ADDR, rinfo->save_regs[33]); OUTREG(CNFG_MEMSIZE, rinfo->video_ram); OUTREG(DISP_MISC_CNTL, rinfo->save_regs[9]); OUTREG(DISP_PWR_MAN, rinfo->save_regs[10]); OUTREG(LVDS_GEN_CNTL, rinfo->save_regs[11]); OUTREG(LVDS_PLL_CNTL,rinfo->save_regs[12]); OUTREG(TV_DAC_CNTL, rinfo->save_regs[13]); OUTREG(BUS_CNTL1, rinfo->save_regs[14]); OUTREG(CRTC_OFFSET_CNTL, rinfo->save_regs[15]); OUTREG(AGP_CNTL, rinfo->save_regs[16]); OUTREG(CRTC_GEN_CNTL, rinfo->save_regs[17]); OUTREG(CRTC2_GEN_CNTL, rinfo->save_regs[18]); OUTPLL(P2PLL_CNTL, rinfo->save_regs[8]); OUTREG(GPIOPAD_A, rinfo->save_regs[19]); OUTREG(GPIOPAD_EN, rinfo->save_regs[20]); OUTREG(GPIOPAD_MASK, rinfo->save_regs[21]); OUTREG(ZV_LCDPAD_A, rinfo->save_regs[22]); OUTREG(ZV_LCDPAD_EN, rinfo->save_regs[23]); OUTREG(ZV_LCDPAD_MASK, rinfo->save_regs[24]); OUTREG(GPIO_VGA_DDC, rinfo->save_regs[25]); OUTREG(GPIO_DVI_DDC, rinfo->save_regs[26]); OUTREG(GPIO_MONID, rinfo->save_regs[27]); OUTREG(GPIO_CRT2_DDC, rinfo->save_regs[28]); } static void radeon_pm_disable_iopad(struct radeonfb_info *rinfo) { OUTREG(GPIOPAD_MASK, 0x0001ffff); OUTREG(GPIOPAD_EN, 0x00000400); OUTREG(GPIOPAD_A, 0x00000000); OUTREG(ZV_LCDPAD_MASK, 0x00000000); OUTREG(ZV_LCDPAD_EN, 0x00000000); OUTREG(ZV_LCDPAD_A, 0x00000000); OUTREG(GPIO_VGA_DDC, 0x00030000); OUTREG(GPIO_DVI_DDC, 0x00000000); OUTREG(GPIO_MONID, 0x00030000); OUTREG(GPIO_CRT2_DDC, 0x00000000); } static void radeon_pm_program_v2clk(struct radeonfb_info *rinfo) { /* Set v2clk to 65MHz */ if (rinfo->family <= CHIP_FAMILY_RV280) { OUTPLL(pllPIXCLKS_CNTL, __INPLL(rinfo, pllPIXCLKS_CNTL) & ~PIXCLKS_CNTL__PIX2CLK_SRC_SEL_MASK); OUTPLL(pllP2PLL_REF_DIV, 0x0000000c); OUTPLL(pllP2PLL_CNTL, 0x0000bf00); } else { OUTPLL(pllP2PLL_REF_DIV, 0x0000000c); INPLL(pllP2PLL_REF_DIV); OUTPLL(pllP2PLL_CNTL, 0x0000a700); } OUTPLL(pllP2PLL_DIV_0, 0x00020074 | P2PLL_DIV_0__P2PLL_ATOMIC_UPDATE_W); OUTPLL(pllP2PLL_CNTL, INPLL(pllP2PLL_CNTL) & ~P2PLL_CNTL__P2PLL_SLEEP); mdelay(1); OUTPLL(pllP2PLL_CNTL, INPLL(pllP2PLL_CNTL) & ~P2PLL_CNTL__P2PLL_RESET); mdelay( 1); OUTPLL(pllPIXCLKS_CNTL, (INPLL(pllPIXCLKS_CNTL) & ~PIXCLKS_CNTL__PIX2CLK_SRC_SEL_MASK) | (0x03 << PIXCLKS_CNTL__PIX2CLK_SRC_SEL__SHIFT)); mdelay( 1); } static void radeon_pm_low_current(struct radeonfb_info *rinfo) { u32 reg; reg = INREG(BUS_CNTL1); if (rinfo->family <= CHIP_FAMILY_RV280) { reg &= ~BUS_CNTL1_MOBILE_PLATFORM_SEL_MASK; reg |= BUS_CNTL1_AGPCLK_VALID | (1<<BUS_CNTL1_MOBILE_PLATFORM_SEL_SHIFT); } else { reg |= 0x4080; } OUTREG(BUS_CNTL1, reg); reg = INPLL(PLL_PWRMGT_CNTL); reg |= PLL_PWRMGT_CNTL_SPLL_TURNOFF | PLL_PWRMGT_CNTL_PPLL_TURNOFF | PLL_PWRMGT_CNTL_P2PLL_TURNOFF | PLL_PWRMGT_CNTL_TVPLL_TURNOFF; reg &= ~PLL_PWRMGT_CNTL_SU_MCLK_USE_BCLK; reg &= ~PLL_PWRMGT_CNTL_MOBILE_SU; OUTPLL(PLL_PWRMGT_CNTL, reg); reg = INREG(TV_DAC_CNTL); reg &= ~(TV_DAC_CNTL_BGADJ_MASK |TV_DAC_CNTL_DACADJ_MASK); reg |=TV_DAC_CNTL_BGSLEEP | TV_DAC_CNTL_RDACPD | TV_DAC_CNTL_GDACPD | TV_DAC_CNTL_BDACPD | (8<<TV_DAC_CNTL_BGADJ__SHIFT) | (8<<TV_DAC_CNTL_DACADJ__SHIFT); OUTREG(TV_DAC_CNTL, reg); reg = INREG(TMDS_TRANSMITTER_CNTL); reg &= ~(TMDS_PLL_EN | TMDS_PLLRST); OUTREG(TMDS_TRANSMITTER_CNTL, reg); reg = INREG(DAC_CNTL); reg &= ~DAC_CMP_EN; OUTREG(DAC_CNTL, reg); reg = INREG(DAC_CNTL2); reg &= ~DAC2_CMP_EN; OUTREG(DAC_CNTL2, reg); reg = INREG(TV_DAC_CNTL); reg &= ~TV_DAC_CNTL_DETECT; OUTREG(TV_DAC_CNTL, reg); } static void radeon_pm_setup_for_suspend(struct radeonfb_info *rinfo) { u32 sclk_cntl, mclk_cntl, sclk_more_cntl; u32 pll_pwrmgt_cntl; u32 clk_pwrmgt_cntl; u32 clk_pin_cntl; u32 vclk_ecp_cntl; u32 pixclks_cntl; u32 disp_mis_cntl; u32 disp_pwr_man; u32 tmp; /* Force Core Clocks */ sclk_cntl = INPLL( pllSCLK_CNTL); sclk_cntl |= SCLK_CNTL__IDCT_MAX_DYN_STOP_LAT| SCLK_CNTL__VIP_MAX_DYN_STOP_LAT| SCLK_CNTL__RE_MAX_DYN_STOP_LAT| SCLK_CNTL__PB_MAX_DYN_STOP_LAT| SCLK_CNTL__TAM_MAX_DYN_STOP_LAT| SCLK_CNTL__TDM_MAX_DYN_STOP_LAT| SCLK_CNTL__RB_MAX_DYN_STOP_LAT| SCLK_CNTL__FORCE_DISP2| SCLK_CNTL__FORCE_CP| SCLK_CNTL__FORCE_HDP| SCLK_CNTL__FORCE_DISP1| SCLK_CNTL__FORCE_TOP| SCLK_CNTL__FORCE_E2| SCLK_CNTL__FORCE_SE| SCLK_CNTL__FORCE_IDCT| SCLK_CNTL__FORCE_VIP| SCLK_CNTL__FORCE_PB| SCLK_CNTL__FORCE_TAM| SCLK_CNTL__FORCE_TDM| SCLK_CNTL__FORCE_RB| SCLK_CNTL__FORCE_TV_SCLK| SCLK_CNTL__FORCE_SUBPIC| SCLK_CNTL__FORCE_OV0; if (rinfo->family <= CHIP_FAMILY_RV280) sclk_cntl |= SCLK_CNTL__FORCE_RE; else sclk_cntl |= SCLK_CNTL__SE_MAX_DYN_STOP_LAT | SCLK_CNTL__E2_MAX_DYN_STOP_LAT | SCLK_CNTL__TV_MAX_DYN_STOP_LAT | SCLK_CNTL__HDP_MAX_DYN_STOP_LAT | SCLK_CNTL__CP_MAX_DYN_STOP_LAT; OUTPLL( pllSCLK_CNTL, sclk_cntl); sclk_more_cntl = INPLL(pllSCLK_MORE_CNTL); sclk_more_cntl |= SCLK_MORE_CNTL__FORCE_DISPREGS | SCLK_MORE_CNTL__FORCE_MC_GUI | SCLK_MORE_CNTL__FORCE_MC_HOST; OUTPLL(pllSCLK_MORE_CNTL, sclk_more_cntl); mclk_cntl = INPLL( pllMCLK_CNTL); mclk_cntl &= ~( MCLK_CNTL__FORCE_MCLKA | MCLK_CNTL__FORCE_MCLKB | MCLK_CNTL__FORCE_YCLKA | MCLK_CNTL__FORCE_YCLKB | MCLK_CNTL__FORCE_MC ); OUTPLL( pllMCLK_CNTL, mclk_cntl); /* Force Display clocks */ vclk_ecp_cntl = INPLL( pllVCLK_ECP_CNTL); vclk_ecp_cntl &= ~(VCLK_ECP_CNTL__PIXCLK_ALWAYS_ONb | VCLK_ECP_CNTL__PIXCLK_DAC_ALWAYS_ONb); vclk_ecp_cntl |= VCLK_ECP_CNTL__ECP_FORCE_ON; OUTPLL( pllVCLK_ECP_CNTL, vclk_ecp_cntl); pixclks_cntl = INPLL( pllPIXCLKS_CNTL); pixclks_cntl &= ~( PIXCLKS_CNTL__PIXCLK_GV_ALWAYS_ONb | PIXCLKS_CNTL__PIXCLK_BLEND_ALWAYS_ONb| PIXCLKS_CNTL__PIXCLK_DIG_TMDS_ALWAYS_ONb | PIXCLKS_CNTL__PIXCLK_LVDS_ALWAYS_ONb| PIXCLKS_CNTL__PIXCLK_TMDS_ALWAYS_ONb| PIXCLKS_CNTL__PIX2CLK_ALWAYS_ONb| PIXCLKS_CNTL__PIX2CLK_DAC_ALWAYS_ONb); OUTPLL( pllPIXCLKS_CNTL, pixclks_cntl); /* Switch off LVDS interface */ OUTREG(LVDS_GEN_CNTL, INREG(LVDS_GEN_CNTL) & ~(LVDS_BLON | LVDS_EN | LVDS_ON | LVDS_DIGON)); /* Enable System power management */ pll_pwrmgt_cntl = INPLL( pllPLL_PWRMGT_CNTL); pll_pwrmgt_cntl |= PLL_PWRMGT_CNTL__SPLL_TURNOFF | PLL_PWRMGT_CNTL__MPLL_TURNOFF| PLL_PWRMGT_CNTL__PPLL_TURNOFF| PLL_PWRMGT_CNTL__P2PLL_TURNOFF| PLL_PWRMGT_CNTL__TVPLL_TURNOFF; OUTPLL( pllPLL_PWRMGT_CNTL, pll_pwrmgt_cntl); clk_pwrmgt_cntl = INPLL( pllCLK_PWRMGT_CNTL); clk_pwrmgt_cntl &= ~( CLK_PWRMGT_CNTL__MPLL_PWRMGT_OFF| CLK_PWRMGT_CNTL__SPLL_PWRMGT_OFF| CLK_PWRMGT_CNTL__PPLL_PWRMGT_OFF| CLK_PWRMGT_CNTL__P2PLL_PWRMGT_OFF| CLK_PWRMGT_CNTL__MCLK_TURNOFF| CLK_PWRMGT_CNTL__SCLK_TURNOFF| CLK_PWRMGT_CNTL__PCLK_TURNOFF| CLK_PWRMGT_CNTL__P2CLK_TURNOFF| CLK_PWRMGT_CNTL__TVPLL_PWRMGT_OFF| CLK_PWRMGT_CNTL__GLOBAL_PMAN_EN| CLK_PWRMGT_CNTL__ENGINE_DYNCLK_MODE| CLK_PWRMGT_CNTL__ACTIVE_HILO_LAT_MASK| CLK_PWRMGT_CNTL__CG_NO1_DEBUG_MASK ); clk_pwrmgt_cntl |= CLK_PWRMGT_CNTL__GLOBAL_PMAN_EN | CLK_PWRMGT_CNTL__DISP_PM; OUTPLL( pllCLK_PWRMGT_CNTL, clk_pwrmgt_cntl); clk_pin_cntl = INPLL( pllCLK_PIN_CNTL); clk_pin_cntl &= ~CLK_PIN_CNTL__ACCESS_REGS_IN_SUSPEND; /* because both INPLL and OUTPLL take the same lock, that's why. */ tmp = INPLL( pllMCLK_MISC) | MCLK_MISC__EN_MCLK_TRISTATE_IN_SUSPEND; OUTPLL( pllMCLK_MISC, tmp); /* BUS_CNTL1__MOBILE_PLATORM_SEL setting is northbridge chipset * and radeon chip dependent. Thus we only enable it on Mac for * now (until we get more info on how to compute the correct * value for various X86 bridges). */ #ifdef CONFIG_PPC_PMAC if (machine_is(powermac)) { /* AGP PLL control */ if (rinfo->family <= CHIP_FAMILY_RV280) { OUTREG(BUS_CNTL1, INREG(BUS_CNTL1) | BUS_CNTL1__AGPCLK_VALID); OUTREG(BUS_CNTL1, (INREG(BUS_CNTL1) & ~BUS_CNTL1__MOBILE_PLATFORM_SEL_MASK) | (2<<BUS_CNTL1__MOBILE_PLATFORM_SEL__SHIFT)); // 440BX } else { OUTREG(BUS_CNTL1, INREG(BUS_CNTL1)); OUTREG(BUS_CNTL1, (INREG(BUS_CNTL1) & ~0x4000) | 0x8000); } } #endif OUTREG(CRTC_OFFSET_CNTL, (INREG(CRTC_OFFSET_CNTL) & ~CRTC_OFFSET_CNTL__CRTC_STEREO_SYNC_OUT_EN)); clk_pin_cntl &= ~CLK_PIN_CNTL__CG_CLK_TO_OUTPIN; clk_pin_cntl |= CLK_PIN_CNTL__XTALIN_ALWAYS_ONb; OUTPLL( pllCLK_PIN_CNTL, clk_pin_cntl); /* Solano2M */ OUTREG(AGP_CNTL, (INREG(AGP_CNTL) & ~(AGP_CNTL__MAX_IDLE_CLK_MASK)) | (0x20<<AGP_CNTL__MAX_IDLE_CLK__SHIFT)); /* ACPI mode */ /* because both INPLL and OUTPLL take the same lock, that's why. */ tmp = INPLL( pllPLL_PWRMGT_CNTL) & ~PLL_PWRMGT_CNTL__PM_MODE_SEL; OUTPLL( pllPLL_PWRMGT_CNTL, tmp); disp_mis_cntl = INREG(DISP_MISC_CNTL); disp_mis_cntl &= ~( DISP_MISC_CNTL__SOFT_RESET_GRPH_PP | DISP_MISC_CNTL__SOFT_RESET_SUBPIC_PP | DISP_MISC_CNTL__SOFT_RESET_OV0_PP | DISP_MISC_CNTL__SOFT_RESET_GRPH_SCLK| DISP_MISC_CNTL__SOFT_RESET_SUBPIC_SCLK| DISP_MISC_CNTL__SOFT_RESET_OV0_SCLK| DISP_MISC_CNTL__SOFT_RESET_GRPH2_PP| DISP_MISC_CNTL__SOFT_RESET_GRPH2_SCLK| DISP_MISC_CNTL__SOFT_RESET_LVDS| DISP_MISC_CNTL__SOFT_RESET_TMDS| DISP_MISC_CNTL__SOFT_RESET_DIG_TMDS| DISP_MISC_CNTL__SOFT_RESET_TV); OUTREG(DISP_MISC_CNTL, disp_mis_cntl); disp_pwr_man = INREG(DISP_PWR_MAN); disp_pwr_man &= ~( DISP_PWR_MAN__DISP_PWR_MAN_D3_CRTC_EN | DISP_PWR_MAN__DISP2_PWR_MAN_D3_CRTC2_EN | DISP_PWR_MAN__DISP_PWR_MAN_DPMS_MASK| DISP_PWR_MAN__DISP_D3_RST| DISP_PWR_MAN__DISP_D3_REG_RST ); disp_pwr_man |= DISP_PWR_MAN__DISP_D3_GRPH_RST| DISP_PWR_MAN__DISP_D3_SUBPIC_RST| DISP_PWR_MAN__DISP_D3_OV0_RST| DISP_PWR_MAN__DISP_D1D2_GRPH_RST| DISP_PWR_MAN__DISP_D1D2_SUBPIC_RST| DISP_PWR_MAN__DISP_D1D2_OV0_RST| DISP_PWR_MAN__DIG_TMDS_ENABLE_RST| DISP_PWR_MAN__TV_ENABLE_RST| // DISP_PWR_MAN__AUTO_PWRUP_EN| 0; OUTREG(DISP_PWR_MAN, disp_pwr_man); clk_pwrmgt_cntl = INPLL( pllCLK_PWRMGT_CNTL); pll_pwrmgt_cntl = INPLL( pllPLL_PWRMGT_CNTL) ; clk_pin_cntl = INPLL( pllCLK_PIN_CNTL); disp_pwr_man = INREG(DISP_PWR_MAN); /* D2 */ clk_pwrmgt_cntl |= CLK_PWRMGT_CNTL__DISP_PM; pll_pwrmgt_cntl |= PLL_PWRMGT_CNTL__MOBILE_SU | PLL_PWRMGT_CNTL__SU_SCLK_USE_BCLK; clk_pin_cntl |= CLK_PIN_CNTL__XTALIN_ALWAYS_ONb; disp_pwr_man &= ~(DISP_PWR_MAN__DISP_PWR_MAN_D3_CRTC_EN_MASK | DISP_PWR_MAN__DISP2_PWR_MAN_D3_CRTC2_EN_MASK); OUTPLL( pllCLK_PWRMGT_CNTL, clk_pwrmgt_cntl); OUTPLL( pllPLL_PWRMGT_CNTL, pll_pwrmgt_cntl); OUTPLL( pllCLK_PIN_CNTL, clk_pin_cntl); OUTREG(DISP_PWR_MAN, disp_pwr_man); /* disable display request & disable display */ OUTREG( CRTC_GEN_CNTL, (INREG( CRTC_GEN_CNTL) & ~CRTC_GEN_CNTL__CRTC_EN) | CRTC_GEN_CNTL__CRTC_DISP_REQ_EN_B); OUTREG( CRTC2_GEN_CNTL, (INREG( CRTC2_GEN_CNTL) & ~CRTC2_GEN_CNTL__CRTC2_EN) | CRTC2_GEN_CNTL__CRTC2_DISP_REQ_EN_B); mdelay(17); } static void radeon_pm_yclk_mclk_sync(struct radeonfb_info *rinfo) { u32 mc_chp_io_cntl_a1, mc_chp_io_cntl_b1; mc_chp_io_cntl_a1 = INMC( rinfo, ixMC_CHP_IO_CNTL_A1) & ~MC_CHP_IO_CNTL_A1__MEM_SYNC_ENA_MASK; mc_chp_io_cntl_b1 = INMC( rinfo, ixMC_CHP_IO_CNTL_B1) & ~MC_CHP_IO_CNTL_B1__MEM_SYNC_ENB_MASK; OUTMC( rinfo, ixMC_CHP_IO_CNTL_A1, mc_chp_io_cntl_a1 | (1<<MC_CHP_IO_CNTL_A1__MEM_SYNC_ENA__SHIFT)); OUTMC( rinfo, ixMC_CHP_IO_CNTL_B1, mc_chp_io_cntl_b1 | (1<<MC_CHP_IO_CNTL_B1__MEM_SYNC_ENB__SHIFT)); OUTMC( rinfo, ixMC_CHP_IO_CNTL_A1, mc_chp_io_cntl_a1); OUTMC( rinfo, ixMC_CHP_IO_CNTL_B1, mc_chp_io_cntl_b1); mdelay( 1); } static void radeon_pm_yclk_mclk_sync_m10(struct radeonfb_info *rinfo) { u32 mc_chp_io_cntl_a1, mc_chp_io_cntl_b1; mc_chp_io_cntl_a1 = INMC(rinfo, ixR300_MC_CHP_IO_CNTL_A1) & ~MC_CHP_IO_CNTL_A1__MEM_SYNC_ENA_MASK; mc_chp_io_cntl_b1 = INMC(rinfo, ixR300_MC_CHP_IO_CNTL_B1) & ~MC_CHP_IO_CNTL_B1__MEM_SYNC_ENB_MASK; OUTMC( rinfo, ixR300_MC_CHP_IO_CNTL_A1, mc_chp_io_cntl_a1 | (1<<MC_CHP_IO_CNTL_A1__MEM_SYNC_ENA__SHIFT)); OUTMC( rinfo, ixR300_MC_CHP_IO_CNTL_B1, mc_chp_io_cntl_b1 | (1<<MC_CHP_IO_CNTL_B1__MEM_SYNC_ENB__SHIFT)); OUTMC( rinfo, ixR300_MC_CHP_IO_CNTL_A1, mc_chp_io_cntl_a1); OUTMC( rinfo, ixR300_MC_CHP_IO_CNTL_B1, mc_chp_io_cntl_b1); mdelay( 1); } static void radeon_pm_program_mode_reg(struct radeonfb_info *rinfo, u16 value, u8 delay_required) { u32 mem_sdram_mode; mem_sdram_mode = INREG( MEM_SDRAM_MODE_REG); mem_sdram_mode &= ~MEM_SDRAM_MODE_REG__MEM_MODE_REG_MASK; mem_sdram_mode |= (value<<MEM_SDRAM_MODE_REG__MEM_MODE_REG__SHIFT) | MEM_SDRAM_MODE_REG__MEM_CFG_TYPE; OUTREG( MEM_SDRAM_MODE_REG, mem_sdram_mode); if (delay_required >= 2) mdelay(1); mem_sdram_mode |= MEM_SDRAM_MODE_REG__MEM_SDRAM_RESET; OUTREG( MEM_SDRAM_MODE_REG, mem_sdram_mode); if (delay_required >= 2) mdelay(1); mem_sdram_mode &= ~MEM_SDRAM_MODE_REG__MEM_SDRAM_RESET; OUTREG( MEM_SDRAM_MODE_REG, mem_sdram_mode); if (delay_required >= 2) mdelay(1); if (delay_required) { do { if (delay_required >= 2) mdelay(1); } while ((INREG(MC_STATUS) & (MC_STATUS__MEM_PWRUP_COMPL_A | MC_STATUS__MEM_PWRUP_COMPL_B)) == 0); } } static void radeon_pm_m10_program_mode_wait(struct radeonfb_info *rinfo) { int cnt; for (cnt = 0; cnt < 100; ++cnt) { mdelay(1); if (INREG(MC_STATUS) & (MC_STATUS__MEM_PWRUP_COMPL_A | MC_STATUS__MEM_PWRUP_COMPL_B)) break; } } static void radeon_pm_enable_dll(struct radeonfb_info *rinfo) { #define DLL_RESET_DELAY 5 #define DLL_SLEEP_DELAY 1 u32 cko = INPLL(pllMDLL_CKO) | MDLL_CKO__MCKOA_SLEEP | MDLL_CKO__MCKOA_RESET; u32 cka = INPLL(pllMDLL_RDCKA) | MDLL_RDCKA__MRDCKA0_SLEEP | MDLL_RDCKA__MRDCKA1_SLEEP | MDLL_RDCKA__MRDCKA0_RESET | MDLL_RDCKA__MRDCKA1_RESET; u32 ckb = INPLL(pllMDLL_RDCKB) | MDLL_RDCKB__MRDCKB0_SLEEP | MDLL_RDCKB__MRDCKB1_SLEEP | MDLL_RDCKB__MRDCKB0_RESET | MDLL_RDCKB__MRDCKB1_RESET; /* Setting up the DLL range for write */ OUTPLL(pllMDLL_CKO, cko); OUTPLL(pllMDLL_RDCKA, cka); OUTPLL(pllMDLL_RDCKB, ckb); mdelay(DLL_RESET_DELAY*2); cko &= ~(MDLL_CKO__MCKOA_SLEEP | MDLL_CKO__MCKOB_SLEEP); OUTPLL(pllMDLL_CKO, cko); mdelay(DLL_SLEEP_DELAY); cko &= ~(MDLL_CKO__MCKOA_RESET | MDLL_CKO__MCKOB_RESET); OUTPLL(pllMDLL_CKO, cko); mdelay(DLL_RESET_DELAY); cka &= ~(MDLL_RDCKA__MRDCKA0_SLEEP | MDLL_RDCKA__MRDCKA1_SLEEP); OUTPLL(pllMDLL_RDCKA, cka); mdelay(DLL_SLEEP_DELAY); cka &= ~(MDLL_RDCKA__MRDCKA0_RESET | MDLL_RDCKA__MRDCKA1_RESET); OUTPLL(pllMDLL_RDCKA, cka); mdelay(DLL_RESET_DELAY); ckb &= ~(MDLL_RDCKB__MRDCKB0_SLEEP | MDLL_RDCKB__MRDCKB1_SLEEP); OUTPLL(pllMDLL_RDCKB, ckb); mdelay(DLL_SLEEP_DELAY); ckb &= ~(MDLL_RDCKB__MRDCKB0_RESET | MDLL_RDCKB__MRDCKB1_RESET); OUTPLL(pllMDLL_RDCKB, ckb); mdelay(DLL_RESET_DELAY); #undef DLL_RESET_DELAY #undef DLL_SLEEP_DELAY } static void radeon_pm_enable_dll_m10(struct radeonfb_info *rinfo) { u32 dll_value; u32 dll_sleep_mask = 0; u32 dll_reset_mask = 0; u32 mc; #define DLL_RESET_DELAY 5 #define DLL_SLEEP_DELAY 1 OUTMC(rinfo, ixR300_MC_DLL_CNTL, rinfo->save_regs[70]); mc = INREG(MC_CNTL); /* Check which channels are enabled */ switch (mc & 0x3) { case 1: if (mc & 0x4) break; case 2: dll_sleep_mask |= MDLL_R300_RDCK__MRDCKB_SLEEP; dll_reset_mask |= MDLL_R300_RDCK__MRDCKB_RESET; case 0: dll_sleep_mask |= MDLL_R300_RDCK__MRDCKA_SLEEP; dll_reset_mask |= MDLL_R300_RDCK__MRDCKA_RESET; } switch (mc & 0x3) { case 1: if (!(mc & 0x4)) break; case 2: dll_sleep_mask |= MDLL_R300_RDCK__MRDCKD_SLEEP; dll_reset_mask |= MDLL_R300_RDCK__MRDCKD_RESET; dll_sleep_mask |= MDLL_R300_RDCK__MRDCKC_SLEEP; dll_reset_mask |= MDLL_R300_RDCK__MRDCKC_RESET; } dll_value = INPLL(pllMDLL_RDCKA); /* Power Up */ dll_value &= ~(dll_sleep_mask); OUTPLL(pllMDLL_RDCKA, dll_value); mdelay( DLL_SLEEP_DELAY); dll_value &= ~(dll_reset_mask); OUTPLL(pllMDLL_RDCKA, dll_value); mdelay( DLL_RESET_DELAY); #undef DLL_RESET_DELAY #undef DLL_SLEEP_DELAY } static void radeon_pm_full_reset_sdram(struct radeonfb_info *rinfo) { u32 crtcGenCntl, crtcGenCntl2, memRefreshCntl, crtc_more_cntl, fp_gen_cntl, fp2_gen_cntl; crtcGenCntl = INREG( CRTC_GEN_CNTL); crtcGenCntl2 = INREG( CRTC2_GEN_CNTL); crtc_more_cntl = INREG( CRTC_MORE_CNTL); fp_gen_cntl = INREG( FP_GEN_CNTL); fp2_gen_cntl = INREG( FP2_GEN_CNTL); OUTREG( CRTC_MORE_CNTL, 0); OUTREG( FP_GEN_CNTL, 0); OUTREG( FP2_GEN_CNTL,0); OUTREG( CRTC_GEN_CNTL, (crtcGenCntl | CRTC_GEN_CNTL__CRTC_DISP_REQ_EN_B) ); OUTREG( CRTC2_GEN_CNTL, (crtcGenCntl2 | CRTC2_GEN_CNTL__CRTC2_DISP_REQ_EN_B) ); /* This is the code for the Aluminium PowerBooks M10 / iBooks M11 */ if (rinfo->family == CHIP_FAMILY_RV350) { u32 sdram_mode_reg = rinfo->save_regs[35]; static const u32 default_mrtable[] = { 0x21320032, 0x21321000, 0xa1321000, 0x21321000, 0xffffffff, 0x21320032, 0xa1320032, 0x21320032, 0xffffffff, 0x21321002, 0xa1321002, 0x21321002, 0xffffffff, 0x21320132, 0xa1320132, 0x21320132, 0xffffffff, 0x21320032, 0xa1320032, 0x21320032, 0xffffffff, 0x31320032 }; const u32 *mrtable = default_mrtable; int i, mrtable_size = ARRAY_SIZE(default_mrtable); mdelay(30); /* Disable refresh */ memRefreshCntl = INREG( MEM_REFRESH_CNTL) & ~MEM_REFRESH_CNTL__MEM_REFRESH_DIS; OUTREG( MEM_REFRESH_CNTL, memRefreshCntl | MEM_REFRESH_CNTL__MEM_REFRESH_DIS); /* Configure and enable M & SPLLs */ radeon_pm_enable_dll_m10(rinfo); radeon_pm_yclk_mclk_sync_m10(rinfo); #ifdef CONFIG_PPC_OF if (rinfo->of_node != NULL) { int size; mrtable = of_get_property(rinfo->of_node, "ATY,MRT", &size); if (mrtable) mrtable_size = size >> 2; else mrtable = default_mrtable; } #endif /* CONFIG_PPC_OF */ /* Program the SDRAM */ sdram_mode_reg = mrtable[0]; OUTREG(MEM_SDRAM_MODE_REG, sdram_mode_reg); for (i = 0; i < mrtable_size; i++) { if (mrtable[i] == 0xffffffffu) radeon_pm_m10_program_mode_wait(rinfo); else { sdram_mode_reg &= ~(MEM_SDRAM_MODE_REG__MEM_MODE_REG_MASK | MEM_SDRAM_MODE_REG__MC_INIT_COMPLETE | MEM_SDRAM_MODE_REG__MEM_SDRAM_RESET); sdram_mode_reg |= mrtable[i]; OUTREG(MEM_SDRAM_MODE_REG, sdram_mode_reg); mdelay(1); } } /* Restore memory refresh */ OUTREG(MEM_REFRESH_CNTL, memRefreshCntl); mdelay(30); } /* Here come the desktop RV200 "QW" card */ else if (!rinfo->is_mobility && rinfo->family == CHIP_FAMILY_RV200) { /* Disable refresh */ memRefreshCntl = INREG( MEM_REFRESH_CNTL) & ~MEM_REFRESH_CNTL__MEM_REFRESH_DIS; OUTREG(MEM_REFRESH_CNTL, memRefreshCntl | MEM_REFRESH_CNTL__MEM_REFRESH_DIS); mdelay(30); /* Reset memory */ OUTREG(MEM_SDRAM_MODE_REG, INREG( MEM_SDRAM_MODE_REG) & ~MEM_SDRAM_MODE_REG__MC_INIT_COMPLETE); radeon_pm_program_mode_reg(rinfo, 0x2002, 2); radeon_pm_program_mode_reg(rinfo, 0x0132, 2); radeon_pm_program_mode_reg(rinfo, 0x0032, 2); OUTREG(MEM_SDRAM_MODE_REG, INREG(MEM_SDRAM_MODE_REG) | MEM_SDRAM_MODE_REG__MC_INIT_COMPLETE); OUTREG( MEM_REFRESH_CNTL, memRefreshCntl); } /* The M6 */ else if (rinfo->is_mobility && rinfo->family == CHIP_FAMILY_RV100) { /* Disable refresh */ memRefreshCntl = INREG(EXT_MEM_CNTL) & ~(1 << 20); OUTREG( EXT_MEM_CNTL, memRefreshCntl | (1 << 20)); /* Reset memory */ OUTREG( MEM_SDRAM_MODE_REG, INREG( MEM_SDRAM_MODE_REG) & ~MEM_SDRAM_MODE_REG__MC_INIT_COMPLETE); /* DLL */ radeon_pm_enable_dll(rinfo); /* MLCK / YCLK sync */ radeon_pm_yclk_mclk_sync(rinfo); /* Program Mode Register */ radeon_pm_program_mode_reg(rinfo, 0x2000, 1); radeon_pm_program_mode_reg(rinfo, 0x2001, 1); radeon_pm_program_mode_reg(rinfo, 0x2002, 1); radeon_pm_program_mode_reg(rinfo, 0x0132, 1); radeon_pm_program_mode_reg(rinfo, 0x0032, 1); /* Complete & re-enable refresh */ OUTREG( MEM_SDRAM_MODE_REG, INREG( MEM_SDRAM_MODE_REG) | MEM_SDRAM_MODE_REG__MC_INIT_COMPLETE); OUTREG(EXT_MEM_CNTL, memRefreshCntl); } /* And finally, the M7..M9 models, including M9+ (RV280) */ else if (rinfo->is_mobility) { /* Disable refresh */ memRefreshCntl = INREG( MEM_REFRESH_CNTL) & ~MEM_REFRESH_CNTL__MEM_REFRESH_DIS; OUTREG( MEM_REFRESH_CNTL, memRefreshCntl | MEM_REFRESH_CNTL__MEM_REFRESH_DIS); /* Reset memory */ OUTREG( MEM_SDRAM_MODE_REG, INREG( MEM_SDRAM_MODE_REG) & ~MEM_SDRAM_MODE_REG__MC_INIT_COMPLETE); /* DLL */ radeon_pm_enable_dll(rinfo); /* MLCK / YCLK sync */ radeon_pm_yclk_mclk_sync(rinfo); /* M6, M7 and M9 so far ... */ if (rinfo->family <= CHIP_FAMILY_RV250) { radeon_pm_program_mode_reg(rinfo, 0x2000, 1); radeon_pm_program_mode_reg(rinfo, 0x2001, 1); radeon_pm_program_mode_reg(rinfo, 0x2002, 1); radeon_pm_program_mode_reg(rinfo, 0x0132, 1); radeon_pm_program_mode_reg(rinfo, 0x0032, 1); } /* M9+ (iBook G4) */ else if (rinfo->family == CHIP_FAMILY_RV280) { radeon_pm_program_mode_reg(rinfo, 0x2000, 1); radeon_pm_program_mode_reg(rinfo, 0x0132, 1); radeon_pm_program_mode_reg(rinfo, 0x0032, 1); } /* Complete & re-enable refresh */ OUTREG( MEM_SDRAM_MODE_REG, INREG( MEM_SDRAM_MODE_REG) | MEM_SDRAM_MODE_REG__MC_INIT_COMPLETE); OUTREG( MEM_REFRESH_CNTL, memRefreshCntl); } OUTREG( CRTC_GEN_CNTL, crtcGenCntl); OUTREG( CRTC2_GEN_CNTL, crtcGenCntl2); OUTREG( FP_GEN_CNTL, fp_gen_cntl); OUTREG( FP2_GEN_CNTL, fp2_gen_cntl); OUTREG( CRTC_MORE_CNTL, crtc_more_cntl); mdelay( 15); } static void radeon_pm_reset_pad_ctlr_strength(struct radeonfb_info *rinfo) { u32 tmp, tmp2; int i,j; /* Reset the PAD_CTLR_STRENGTH & wait for it to be stable */ INREG(PAD_CTLR_STRENGTH); OUTREG(PAD_CTLR_STRENGTH, INREG(PAD_CTLR_STRENGTH) & ~PAD_MANUAL_OVERRIDE); tmp = INREG(PAD_CTLR_STRENGTH); for (i = j = 0; i < 65; ++i) { mdelay(1); tmp2 = INREG(PAD_CTLR_STRENGTH); if (tmp != tmp2) { tmp = tmp2; i = 0; j++; if (j > 10) { printk(KERN_WARNING "radeon: PAD_CTLR_STRENGTH doesn't " "stabilize !\n"); break; } } } } static void radeon_pm_all_ppls_off(struct radeonfb_info *rinfo) { u32 tmp; tmp = INPLL(pllPPLL_CNTL); OUTPLL(pllPPLL_CNTL, tmp | 0x3); tmp = INPLL(pllP2PLL_CNTL); OUTPLL(pllP2PLL_CNTL, tmp | 0x3); tmp = INPLL(pllSPLL_CNTL); OUTPLL(pllSPLL_CNTL, tmp | 0x3); tmp = INPLL(pllMPLL_CNTL); OUTPLL(pllMPLL_CNTL, tmp | 0x3); } static void radeon_pm_start_mclk_sclk(struct radeonfb_info *rinfo) { u32 tmp; /* Switch SPLL to PCI source */ tmp = INPLL(pllSCLK_CNTL); OUTPLL(pllSCLK_CNTL, tmp & ~SCLK_CNTL__SCLK_SRC_SEL_MASK); /* Reconfigure SPLL charge pump, VCO gain, duty cycle */ tmp = INPLL(pllSPLL_CNTL); OUTREG8(CLOCK_CNTL_INDEX, pllSPLL_CNTL + PLL_WR_EN); radeon_pll_errata_after_index(rinfo); OUTREG8(CLOCK_CNTL_DATA + 1, (tmp >> 8) & 0xff); radeon_pll_errata_after_data(rinfo); /* Set SPLL feedback divider */ tmp = INPLL(pllM_SPLL_REF_FB_DIV); tmp = (tmp & 0xff00fffful) | (rinfo->save_regs[77] & 0x00ff0000ul); OUTPLL(pllM_SPLL_REF_FB_DIV, tmp); /* Power up SPLL */ tmp = INPLL(pllSPLL_CNTL); OUTPLL(pllSPLL_CNTL, tmp & ~1); (void)INPLL(pllSPLL_CNTL); mdelay(10); /* Release SPLL reset */ tmp = INPLL(pllSPLL_CNTL); OUTPLL(pllSPLL_CNTL, tmp & ~0x2); (void)INPLL(pllSPLL_CNTL); mdelay(10); /* Select SCLK source */ tmp = INPLL(pllSCLK_CNTL); tmp &= ~SCLK_CNTL__SCLK_SRC_SEL_MASK; tmp |= rinfo->save_regs[3] & SCLK_CNTL__SCLK_SRC_SEL_MASK; OUTPLL(pllSCLK_CNTL, tmp); (void)INPLL(pllSCLK_CNTL); mdelay(10); /* Reconfigure MPLL charge pump, VCO gain, duty cycle */ tmp = INPLL(pllMPLL_CNTL); OUTREG8(CLOCK_CNTL_INDEX, pllMPLL_CNTL + PLL_WR_EN); radeon_pll_errata_after_index(rinfo); OUTREG8(CLOCK_CNTL_DATA + 1, (tmp >> 8) & 0xff); radeon_pll_errata_after_data(rinfo); /* Set MPLL feedback divider */ tmp = INPLL(pllM_SPLL_REF_FB_DIV); tmp = (tmp & 0xffff00fful) | (rinfo->save_regs[77] & 0x0000ff00ul); OUTPLL(pllM_SPLL_REF_FB_DIV, tmp); /* Power up MPLL */ tmp = INPLL(pllMPLL_CNTL); OUTPLL(pllMPLL_CNTL, tmp & ~0x2); (void)INPLL(pllMPLL_CNTL); mdelay(10); /* Un-reset MPLL */ tmp = INPLL(pllMPLL_CNTL); OUTPLL(pllMPLL_CNTL, tmp & ~0x1); (void)INPLL(pllMPLL_CNTL); mdelay(10); /* Select source for MCLK */ tmp = INPLL(pllMCLK_CNTL); tmp |= rinfo->save_regs[2] & 0xffff; OUTPLL(pllMCLK_CNTL, tmp); (void)INPLL(pllMCLK_CNTL); mdelay(10); } static void radeon_pm_m10_disable_spread_spectrum(struct radeonfb_info *rinfo) { u32 r2ec; /* GACK ! I though we didn't have a DDA on Radeon's anymore * here we rewrite with the same value, ... I suppose we clear * some bits that are already clear ? Or maybe this 0x2ec * register is something new ? */ mdelay(20); r2ec = INREG(VGA_DDA_ON_OFF); OUTREG(VGA_DDA_ON_OFF, r2ec); mdelay(1); /* Spread spectrum PLLL off */ OUTPLL(pllSSPLL_CNTL, 0xbf03); /* Spread spectrum disabled */ OUTPLL(pllSS_INT_CNTL, rinfo->save_regs[90] & ~3); /* The trace shows read & rewrite of LVDS_PLL_CNTL here with same * value, not sure what for... */ r2ec |= 0x3f0; OUTREG(VGA_DDA_ON_OFF, r2ec); mdelay(1); } static void radeon_pm_m10_enable_lvds_spread_spectrum(struct radeonfb_info *rinfo) { u32 r2ec, tmp; /* GACK (bis) ! I though we didn't have a DDA on Radeon's anymore * here we rewrite with the same value, ... I suppose we clear/set * some bits that are already clear/set ? */ r2ec = INREG(VGA_DDA_ON_OFF); OUTREG(VGA_DDA_ON_OFF, r2ec); mdelay(1); /* Enable spread spectrum */ OUTPLL(pllSSPLL_CNTL, rinfo->save_regs[43] | 3); mdelay(3); OUTPLL(pllSSPLL_REF_DIV, rinfo->save_regs[44]); OUTPLL(pllSSPLL_DIV_0, rinfo->save_regs[45]); tmp = INPLL(pllSSPLL_CNTL); OUTPLL(pllSSPLL_CNTL, tmp & ~0x2); mdelay(6); tmp = INPLL(pllSSPLL_CNTL); OUTPLL(pllSSPLL_CNTL, tmp & ~0x1); mdelay(5); OUTPLL(pllSS_INT_CNTL, rinfo->save_regs[90]); r2ec |= 8; OUTREG(VGA_DDA_ON_OFF, r2ec); mdelay(20); /* Enable LVDS interface */ tmp = INREG(LVDS_GEN_CNTL); OUTREG(LVDS_GEN_CNTL, tmp | LVDS_EN); /* Enable LVDS_PLL */ tmp = INREG(LVDS_PLL_CNTL); tmp &= ~0x30000; tmp |= 0x10000; OUTREG(LVDS_PLL_CNTL, tmp); OUTPLL(pllSCLK_MORE_CNTL, rinfo->save_regs[34]); OUTPLL(pllSS_TST_CNTL, rinfo->save_regs[91]); /* The trace reads that one here, waiting for something to settle down ? */ INREG(RBBM_STATUS); /* Ugh ? SS_TST_DEC is supposed to be a read register in the * R300 register spec at least... */ tmp = INPLL(pllSS_TST_CNTL); tmp |= 0x00400000; OUTPLL(pllSS_TST_CNTL, tmp); } static void radeon_pm_restore_pixel_pll(struct radeonfb_info *rinfo) { u32 tmp; OUTREG8(CLOCK_CNTL_INDEX, pllHTOTAL_CNTL + PLL_WR_EN); radeon_pll_errata_after_index(rinfo); OUTREG8(CLOCK_CNTL_DATA, 0); radeon_pll_errata_after_data(rinfo); tmp = INPLL(pllVCLK_ECP_CNTL); OUTPLL(pllVCLK_ECP_CNTL, tmp | 0x80); mdelay(5); tmp = INPLL(pllPPLL_REF_DIV); tmp = (tmp & ~PPLL_REF_DIV_MASK) | rinfo->pll.ref_div; OUTPLL(pllPPLL_REF_DIV, tmp); INPLL(pllPPLL_REF_DIV); /* Reconfigure SPLL charge pump, VCO gain, duty cycle, * probably useless since we already did it ... */ tmp = INPLL(pllPPLL_CNTL); OUTREG8(CLOCK_CNTL_INDEX, pllSPLL_CNTL + PLL_WR_EN); radeon_pll_errata_after_index(rinfo); OUTREG8(CLOCK_CNTL_DATA + 1, (tmp >> 8) & 0xff); radeon_pll_errata_after_data(rinfo); /* Restore our "reference" PPLL divider set by firmware * according to proper spread spectrum calculations */ OUTPLL(pllPPLL_DIV_0, rinfo->save_regs[92]); tmp = INPLL(pllPPLL_CNTL); OUTPLL(pllPPLL_CNTL, tmp & ~0x2); mdelay(5); tmp = INPLL(pllPPLL_CNTL); OUTPLL(pllPPLL_CNTL, tmp & ~0x1); mdelay(5); tmp = INPLL(pllVCLK_ECP_CNTL); OUTPLL(pllVCLK_ECP_CNTL, tmp | 3); mdelay(5); tmp = INPLL(pllVCLK_ECP_CNTL); OUTPLL(pllVCLK_ECP_CNTL, tmp | 3); mdelay(5); /* Switch pixel clock to firmware default div 0 */ OUTREG8(CLOCK_CNTL_INDEX+1, 0); radeon_pll_errata_after_index(rinfo); radeon_pll_errata_after_data(rinfo); } static void radeon_pm_m10_reconfigure_mc(struct radeonfb_info *rinfo) { OUTREG(MC_CNTL, rinfo->save_regs[46]); OUTREG(MC_INIT_GFX_LAT_TIMER, rinfo->save_regs[47]); OUTREG(MC_INIT_MISC_LAT_TIMER, rinfo->save_regs[48]); OUTREG(MEM_SDRAM_MODE_REG, rinfo->save_regs[35] & ~MEM_SDRAM_MODE_REG__MC_INIT_COMPLETE); OUTREG(MC_TIMING_CNTL, rinfo->save_regs[49]); OUTREG(MEM_REFRESH_CNTL, rinfo->save_regs[42]); OUTREG(MC_READ_CNTL_AB, rinfo->save_regs[50]); OUTREG(MC_CHIP_IO_OE_CNTL_AB, rinfo->save_regs[52]); OUTREG(MC_IOPAD_CNTL, rinfo->save_regs[51]); OUTREG(MC_DEBUG, rinfo->save_regs[53]); OUTMC(rinfo, ixR300_MC_MC_INIT_WR_LAT_TIMER, rinfo->save_regs[58]); OUTMC(rinfo, ixR300_MC_IMP_CNTL, rinfo->save_regs[59]); OUTMC(rinfo, ixR300_MC_CHP_IO_CNTL_C0, rinfo->save_regs[60]); OUTMC(rinfo, ixR300_MC_CHP_IO_CNTL_C1, rinfo->save_regs[61]); OUTMC(rinfo, ixR300_MC_CHP_IO_CNTL_D0, rinfo->save_regs[62]); OUTMC(rinfo, ixR300_MC_CHP_IO_CNTL_D1, rinfo->save_regs[63]); OUTMC(rinfo, ixR300_MC_BIST_CNTL_3, rinfo->save_regs[64]); OUTMC(rinfo, ixR300_MC_CHP_IO_CNTL_A0, rinfo->save_regs[65]); OUTMC(rinfo, ixR300_MC_CHP_IO_CNTL_A1, rinfo->save_regs[66]); OUTMC(rinfo, ixR300_MC_CHP_IO_CNTL_B0, rinfo->save_regs[67]); OUTMC(rinfo, ixR300_MC_CHP_IO_CNTL_B1, rinfo->save_regs[68]); OUTMC(rinfo, ixR300_MC_DEBUG_CNTL, rinfo->save_regs[69]); OUTMC(rinfo, ixR300_MC_DLL_CNTL, rinfo->save_regs[70]); OUTMC(rinfo, ixR300_MC_IMP_CNTL_0, rinfo->save_regs[71]); OUTMC(rinfo, ixR300_MC_ELPIDA_CNTL, rinfo->save_regs[72]); OUTMC(rinfo, ixR300_MC_READ_CNTL_CD, rinfo->save_regs[96]); OUTREG(MC_IND_INDEX, 0); } static void radeon_reinitialize_M10(struct radeonfb_info *rinfo) { u32 tmp, i; /* Restore a bunch of registers first */ OUTREG(MC_AGP_LOCATION, rinfo->save_regs[32]); OUTREG(DISPLAY_BASE_ADDR, rinfo->save_regs[31]); OUTREG(CRTC2_DISPLAY_BASE_ADDR, rinfo->save_regs[33]); OUTREG(MC_FB_LOCATION, rinfo->save_regs[30]); OUTREG(OV0_BASE_ADDR, rinfo->save_regs[80]); OUTREG(CNFG_MEMSIZE, rinfo->video_ram); OUTREG(BUS_CNTL, rinfo->save_regs[36]); OUTREG(BUS_CNTL1, rinfo->save_regs[14]); OUTREG(MPP_TB_CONFIG, rinfo->save_regs[37]); OUTREG(FCP_CNTL, rinfo->save_regs[38]); OUTREG(RBBM_CNTL, rinfo->save_regs[39]); OUTREG(DAC_CNTL, rinfo->save_regs[40]); OUTREG(DAC_MACRO_CNTL, (INREG(DAC_MACRO_CNTL) & ~0x6) | 8); OUTREG(DAC_MACRO_CNTL, (INREG(DAC_MACRO_CNTL) & ~0x6) | 8); /* Hrm... */ OUTREG(DAC_CNTL2, INREG(DAC_CNTL2) | DAC2_EXPAND_MODE); /* Reset the PAD CTLR */ radeon_pm_reset_pad_ctlr_strength(rinfo); /* Some PLLs are Read & written identically in the trace here... * I suppose it's actually to switch them all off & reset, * let's assume off is what we want. I'm just doing that for all major PLLs now. */ radeon_pm_all_ppls_off(rinfo); /* Clear tiling, reset swappers */ INREG(SURFACE_CNTL); OUTREG(SURFACE_CNTL, 0); /* Some black magic with TV_DAC_CNTL, we should restore those from backups * rather than hard coding... */ tmp = INREG(TV_DAC_CNTL) & ~TV_DAC_CNTL_BGADJ_MASK; tmp |= 8 << TV_DAC_CNTL_BGADJ__SHIFT; OUTREG(TV_DAC_CNTL, tmp); tmp = INREG(TV_DAC_CNTL) & ~TV_DAC_CNTL_DACADJ_MASK; tmp |= 7 << TV_DAC_CNTL_DACADJ__SHIFT; OUTREG(TV_DAC_CNTL, tmp); /* More registers restored */ OUTREG(AGP_CNTL, rinfo->save_regs[16]); OUTREG(HOST_PATH_CNTL, rinfo->save_regs[41]); OUTREG(DISP_MISC_CNTL, rinfo->save_regs[9]); /* Hrmmm ... What is that ? */ tmp = rinfo->save_regs[1] & ~(CLK_PWRMGT_CNTL__ACTIVE_HILO_LAT_MASK | CLK_PWRMGT_CNTL__MC_BUSY); OUTPLL(pllCLK_PWRMGT_CNTL, tmp); OUTREG(PAD_CTLR_MISC, rinfo->save_regs[56]); OUTREG(FW_CNTL, rinfo->save_regs[57]); OUTREG(HDP_DEBUG, rinfo->save_regs[96]); OUTREG(PAMAC0_DLY_CNTL, rinfo->save_regs[54]); OUTREG(PAMAC1_DLY_CNTL, rinfo->save_regs[55]); OUTREG(PAMAC2_DLY_CNTL, rinfo->save_regs[79]); /* Restore Memory Controller configuration */ radeon_pm_m10_reconfigure_mc(rinfo); /* Make sure CRTC's dont touch memory */ OUTREG(CRTC_GEN_CNTL, INREG(CRTC_GEN_CNTL) | CRTC_GEN_CNTL__CRTC_DISP_REQ_EN_B); OUTREG(CRTC2_GEN_CNTL, INREG(CRTC2_GEN_CNTL) | CRTC2_GEN_CNTL__CRTC2_DISP_REQ_EN_B); mdelay(30); /* Disable SDRAM refresh */ OUTREG(MEM_REFRESH_CNTL, INREG(MEM_REFRESH_CNTL) | MEM_REFRESH_CNTL__MEM_REFRESH_DIS); /* Restore XTALIN routing (CLK_PIN_CNTL) */ OUTPLL(pllCLK_PIN_CNTL, rinfo->save_regs[4]); /* Switch MCLK, YCLK and SCLK PLLs to PCI source & force them ON */ tmp = rinfo->save_regs[2] & 0xff000000; tmp |= MCLK_CNTL__FORCE_MCLKA | MCLK_CNTL__FORCE_MCLKB | MCLK_CNTL__FORCE_YCLKA | MCLK_CNTL__FORCE_YCLKB | MCLK_CNTL__FORCE_MC; OUTPLL(pllMCLK_CNTL, tmp); /* Force all clocks on in SCLK */ tmp = INPLL(pllSCLK_CNTL); tmp |= SCLK_CNTL__FORCE_DISP2| SCLK_CNTL__FORCE_CP| SCLK_CNTL__FORCE_HDP| SCLK_CNTL__FORCE_DISP1| SCLK_CNTL__FORCE_TOP| SCLK_CNTL__FORCE_E2| SCLK_CNTL__FORCE_SE| SCLK_CNTL__FORCE_IDCT| SCLK_CNTL__FORCE_VIP| SCLK_CNTL__FORCE_PB| SCLK_CNTL__FORCE_TAM| SCLK_CNTL__FORCE_TDM| SCLK_CNTL__FORCE_RB| SCLK_CNTL__FORCE_TV_SCLK| SCLK_CNTL__FORCE_SUBPIC| SCLK_CNTL__FORCE_OV0; tmp |= SCLK_CNTL__CP_MAX_DYN_STOP_LAT | SCLK_CNTL__HDP_MAX_DYN_STOP_LAT | SCLK_CNTL__TV_MAX_DYN_STOP_LAT | SCLK_CNTL__E2_MAX_DYN_STOP_LAT | SCLK_CNTL__SE_MAX_DYN_STOP_LAT | SCLK_CNTL__IDCT_MAX_DYN_STOP_LAT| SCLK_CNTL__VIP_MAX_DYN_STOP_LAT | SCLK_CNTL__RE_MAX_DYN_STOP_LAT | SCLK_CNTL__PB_MAX_DYN_STOP_LAT | SCLK_CNTL__TAM_MAX_DYN_STOP_LAT | SCLK_CNTL__TDM_MAX_DYN_STOP_LAT | SCLK_CNTL__RB_MAX_DYN_STOP_LAT; OUTPLL(pllSCLK_CNTL, tmp); OUTPLL(pllVCLK_ECP_CNTL, 0); OUTPLL(pllPIXCLKS_CNTL, 0); OUTPLL(pllMCLK_MISC, MCLK_MISC__MC_MCLK_MAX_DYN_STOP_LAT | MCLK_MISC__IO_MCLK_MAX_DYN_STOP_LAT); mdelay(5); /* Restore the M_SPLL_REF_FB_DIV, MPLL_AUX_CNTL and SPLL_AUX_CNTL values */ OUTPLL(pllM_SPLL_REF_FB_DIV, rinfo->save_regs[77]); OUTPLL(pllMPLL_AUX_CNTL, rinfo->save_regs[75]); OUTPLL(pllSPLL_AUX_CNTL, rinfo->save_regs[76]); /* Now restore the major PLLs settings, keeping them off & reset though */ OUTPLL(pllPPLL_CNTL, rinfo->save_regs[93] | 0x3); OUTPLL(pllP2PLL_CNTL, rinfo->save_regs[8] | 0x3); OUTPLL(pllMPLL_CNTL, rinfo->save_regs[73] | 0x03); OUTPLL(pllSPLL_CNTL, rinfo->save_regs[74] | 0x03); /* Restore MC DLL state and switch it off/reset too */ OUTMC(rinfo, ixR300_MC_DLL_CNTL, rinfo->save_regs[70]); /* Switch MDLL off & reset */ OUTPLL(pllMDLL_RDCKA, rinfo->save_regs[98] | 0xff); mdelay(5); /* Setup some black magic bits in PLL_PWRMGT_CNTL. Hrm... we saved * 0xa1100007... and MacOS writes 0xa1000007 .. */ OUTPLL(pllPLL_PWRMGT_CNTL, rinfo->save_regs[0]); /* Restore more stuffs */ OUTPLL(pllHTOTAL_CNTL, 0); OUTPLL(pllHTOTAL2_CNTL, 0); /* More PLL initial configuration */ tmp = INPLL(pllSCLK_CNTL2); /* What for ? */ OUTPLL(pllSCLK_CNTL2, tmp); tmp = INPLL(pllSCLK_MORE_CNTL); tmp |= SCLK_MORE_CNTL__FORCE_DISPREGS | /* a guess */ SCLK_MORE_CNTL__FORCE_MC_GUI | SCLK_MORE_CNTL__FORCE_MC_HOST; OUTPLL(pllSCLK_MORE_CNTL, tmp); /* Now we actually start MCLK and SCLK */ radeon_pm_start_mclk_sclk(rinfo); /* Full reset sdrams, this also re-inits the MDLL */ radeon_pm_full_reset_sdram(rinfo); /* Fill palettes */ OUTREG(DAC_CNTL2, INREG(DAC_CNTL2) | 0x20); for (i=0; i<256; i++) OUTREG(PALETTE_30_DATA, 0x15555555); OUTREG(DAC_CNTL2, INREG(DAC_CNTL2) & ~20); udelay(20); for (i=0; i<256; i++) OUTREG(PALETTE_30_DATA, 0x15555555); OUTREG(DAC_CNTL2, INREG(DAC_CNTL2) & ~0x20); mdelay(3); /* Restore TMDS */ OUTREG(FP_GEN_CNTL, rinfo->save_regs[82]); OUTREG(FP2_GEN_CNTL, rinfo->save_regs[83]); /* Set LVDS registers but keep interface & pll down */ OUTREG(LVDS_GEN_CNTL, rinfo->save_regs[11] & ~(LVDS_EN | LVDS_ON | LVDS_DIGON | LVDS_BLON | LVDS_BL_MOD_EN)); OUTREG(LVDS_PLL_CNTL, (rinfo->save_regs[12] & ~0xf0000) | 0x20000); OUTREG(DISP_OUTPUT_CNTL, rinfo->save_regs[86]); /* Restore GPIOPAD state */ OUTREG(GPIOPAD_A, rinfo->save_regs[19]); OUTREG(GPIOPAD_EN, rinfo->save_regs[20]); OUTREG(GPIOPAD_MASK, rinfo->save_regs[21]); /* write some stuff to the framebuffer... */ for (i = 0; i < 0x8000; ++i) writeb(0, rinfo->fb_base + i); mdelay(40); OUTREG(LVDS_GEN_CNTL, INREG(LVDS_GEN_CNTL) | LVDS_DIGON | LVDS_ON); mdelay(40); /* Restore a few more things */ OUTREG(GRPH_BUFFER_CNTL, rinfo->save_regs[94]); OUTREG(GRPH2_BUFFER_CNTL, rinfo->save_regs[95]); /* Take care of spread spectrum & PPLLs now */ radeon_pm_m10_disable_spread_spectrum(rinfo); radeon_pm_restore_pixel_pll(rinfo); /* GRRRR... I can't figure out the proper LVDS power sequence, and the * code I have for blank/unblank doesn't quite work on some laptop models * it seems ... Hrm. What I have here works most of the time ... */ radeon_pm_m10_enable_lvds_spread_spectrum(rinfo); } #ifdef CONFIG_PPC_OF static void radeon_pm_m9p_reconfigure_mc(struct radeonfb_info *rinfo) { OUTREG(MC_CNTL, rinfo->save_regs[46]); OUTREG(MC_INIT_GFX_LAT_TIMER, rinfo->save_regs[47]); OUTREG(MC_INIT_MISC_LAT_TIMER, rinfo->save_regs[48]); OUTREG(MEM_SDRAM_MODE_REG, rinfo->save_regs[35] & ~MEM_SDRAM_MODE_REG__MC_INIT_COMPLETE); OUTREG(MC_TIMING_CNTL, rinfo->save_regs[49]); OUTREG(MC_READ_CNTL_AB, rinfo->save_regs[50]); OUTREG(MEM_REFRESH_CNTL, rinfo->save_regs[42]); OUTREG(MC_IOPAD_CNTL, rinfo->save_regs[51]); OUTREG(MC_DEBUG, rinfo->save_regs[53]); OUTREG(MC_CHIP_IO_OE_CNTL_AB, rinfo->save_regs[52]); OUTMC(rinfo, ixMC_IMP_CNTL, rinfo->save_regs[59] /*0x00f460d6*/); OUTMC(rinfo, ixMC_CHP_IO_CNTL_A0, rinfo->save_regs[65] /*0xfecfa666*/); OUTMC(rinfo, ixMC_CHP_IO_CNTL_A1, rinfo->save_regs[66] /*0x141555ff*/); OUTMC(rinfo, ixMC_CHP_IO_CNTL_B0, rinfo->save_regs[67] /*0xfecfa666*/); OUTMC(rinfo, ixMC_CHP_IO_CNTL_B1, rinfo->save_regs[68] /*0x141555ff*/); OUTMC(rinfo, ixMC_IMP_CNTL_0, rinfo->save_regs[71] /*0x00009249*/); OUTREG(MC_IND_INDEX, 0); OUTREG(CNFG_MEMSIZE, rinfo->video_ram); mdelay(20); } static void radeon_reinitialize_M9P(struct radeonfb_info *rinfo) { u32 tmp, i; /* Restore a bunch of registers first */ OUTREG(SURFACE_CNTL, rinfo->save_regs[29]); OUTREG(MC_AGP_LOCATION, rinfo->save_regs[32]); OUTREG(DISPLAY_BASE_ADDR, rinfo->save_regs[31]); OUTREG(CRTC2_DISPLAY_BASE_ADDR, rinfo->save_regs[33]); OUTREG(MC_FB_LOCATION, rinfo->save_regs[30]); OUTREG(OV0_BASE_ADDR, rinfo->save_regs[80]); OUTREG(BUS_CNTL, rinfo->save_regs[36]); OUTREG(BUS_CNTL1, rinfo->save_regs[14]); OUTREG(MPP_TB_CONFIG, rinfo->save_regs[37]); OUTREG(FCP_CNTL, rinfo->save_regs[38]); OUTREG(RBBM_CNTL, rinfo->save_regs[39]); OUTREG(DAC_CNTL, rinfo->save_regs[40]); OUTREG(DAC_CNTL2, INREG(DAC_CNTL2) | DAC2_EXPAND_MODE); /* Reset the PAD CTLR */ radeon_pm_reset_pad_ctlr_strength(rinfo); /* Some PLLs are Read & written identically in the trace here... * I suppose it's actually to switch them all off & reset, * let's assume off is what we want. I'm just doing that for all major PLLs now. */ radeon_pm_all_ppls_off(rinfo); /* Clear tiling, reset swappers */ INREG(SURFACE_CNTL); OUTREG(SURFACE_CNTL, 0); /* Some black magic with TV_DAC_CNTL, we should restore those from backups * rather than hard coding... */ tmp = INREG(TV_DAC_CNTL) & ~TV_DAC_CNTL_BGADJ_MASK; tmp |= 6 << TV_DAC_CNTL_BGADJ__SHIFT; OUTREG(TV_DAC_CNTL, tmp); tmp = INREG(TV_DAC_CNTL) & ~TV_DAC_CNTL_DACADJ_MASK; tmp |= 6 << TV_DAC_CNTL_DACADJ__SHIFT; OUTREG(TV_DAC_CNTL, tmp); OUTPLL(pllAGP_PLL_CNTL, rinfo->save_regs[78]); OUTREG(PAMAC0_DLY_CNTL, rinfo->save_regs[54]); OUTREG(PAMAC1_DLY_CNTL, rinfo->save_regs[55]); OUTREG(PAMAC2_DLY_CNTL, rinfo->save_regs[79]); OUTREG(AGP_CNTL, rinfo->save_regs[16]); OUTREG(HOST_PATH_CNTL, rinfo->save_regs[41]); /* MacOS sets that to 0 !!! */ OUTREG(DISP_MISC_CNTL, rinfo->save_regs[9]); tmp = rinfo->save_regs[1] & ~(CLK_PWRMGT_CNTL__ACTIVE_HILO_LAT_MASK | CLK_PWRMGT_CNTL__MC_BUSY); OUTPLL(pllCLK_PWRMGT_CNTL, tmp); OUTREG(FW_CNTL, rinfo->save_regs[57]); /* Disable SDRAM refresh */ OUTREG(MEM_REFRESH_CNTL, INREG(MEM_REFRESH_CNTL) | MEM_REFRESH_CNTL__MEM_REFRESH_DIS); /* Restore XTALIN routing (CLK_PIN_CNTL) */ OUTPLL(pllCLK_PIN_CNTL, rinfo->save_regs[4]); /* Force MCLK to be PCI sourced and forced ON */ tmp = rinfo->save_regs[2] & 0xff000000; tmp |= MCLK_CNTL__FORCE_MCLKA | MCLK_CNTL__FORCE_MCLKB | MCLK_CNTL__FORCE_YCLKA | MCLK_CNTL__FORCE_YCLKB | MCLK_CNTL__FORCE_MC | MCLK_CNTL__FORCE_AIC; OUTPLL(pllMCLK_CNTL, tmp); /* Force SCLK to be PCI sourced with a bunch forced */ tmp = 0 | SCLK_CNTL__FORCE_DISP2| SCLK_CNTL__FORCE_CP| SCLK_CNTL__FORCE_HDP| SCLK_CNTL__FORCE_DISP1| SCLK_CNTL__FORCE_TOP| SCLK_CNTL__FORCE_E2| SCLK_CNTL__FORCE_SE| SCLK_CNTL__FORCE_IDCT| SCLK_CNTL__FORCE_VIP| SCLK_CNTL__FORCE_RE| SCLK_CNTL__FORCE_PB| SCLK_CNTL__FORCE_TAM| SCLK_CNTL__FORCE_TDM| SCLK_CNTL__FORCE_RB; OUTPLL(pllSCLK_CNTL, tmp); /* Clear VCLK_ECP_CNTL & PIXCLKS_CNTL */ OUTPLL(pllVCLK_ECP_CNTL, 0); OUTPLL(pllPIXCLKS_CNTL, 0); /* Setup MCLK_MISC, non dynamic mode */ OUTPLL(pllMCLK_MISC, MCLK_MISC__MC_MCLK_MAX_DYN_STOP_LAT | MCLK_MISC__IO_MCLK_MAX_DYN_STOP_LAT); mdelay(5); /* Set back the default clock dividers */ OUTPLL(pllM_SPLL_REF_FB_DIV, rinfo->save_regs[77]); OUTPLL(pllMPLL_AUX_CNTL, rinfo->save_regs[75]); OUTPLL(pllSPLL_AUX_CNTL, rinfo->save_regs[76]); /* PPLL and P2PLL default values & off */ OUTPLL(pllPPLL_CNTL, rinfo->save_regs[93] | 0x3); OUTPLL(pllP2PLL_CNTL, rinfo->save_regs[8] | 0x3); /* S and M PLLs are reset & off, configure them */ OUTPLL(pllMPLL_CNTL, rinfo->save_regs[73] | 0x03); OUTPLL(pllSPLL_CNTL, rinfo->save_regs[74] | 0x03); /* Default values for MDLL ... fixme */ OUTPLL(pllMDLL_CKO, 0x9c009c); OUTPLL(pllMDLL_RDCKA, 0x08830883); OUTPLL(pllMDLL_RDCKB, 0x08830883); mdelay(5); /* Restore PLL_PWRMGT_CNTL */ // XXXX tmp = rinfo->save_regs[0]; tmp &= ~PLL_PWRMGT_CNTL_SU_SCLK_USE_BCLK; tmp |= PLL_PWRMGT_CNTL_SU_MCLK_USE_BCLK; OUTPLL(PLL_PWRMGT_CNTL, tmp); /* Clear HTOTAL_CNTL & HTOTAL2_CNTL */ OUTPLL(pllHTOTAL_CNTL, 0); OUTPLL(pllHTOTAL2_CNTL, 0); /* All outputs off */ OUTREG(CRTC_GEN_CNTL, 0x04000000); OUTREG(CRTC2_GEN_CNTL, 0x04000000); OUTREG(FP_GEN_CNTL, 0x00004008); OUTREG(FP2_GEN_CNTL, 0x00000008); OUTREG(LVDS_GEN_CNTL, 0x08000008); /* Restore Memory Controller configuration */ radeon_pm_m9p_reconfigure_mc(rinfo); /* Now we actually start MCLK and SCLK */ radeon_pm_start_mclk_sclk(rinfo); /* Full reset sdrams, this also re-inits the MDLL */ radeon_pm_full_reset_sdram(rinfo); /* Fill palettes */ OUTREG(DAC_CNTL2, INREG(DAC_CNTL2) | 0x20); for (i=0; i<256; i++) OUTREG(PALETTE_30_DATA, 0x15555555); OUTREG(DAC_CNTL2, INREG(DAC_CNTL2) & ~20); udelay(20); for (i=0; i<256; i++) OUTREG(PALETTE_30_DATA, 0x15555555); OUTREG(DAC_CNTL2, INREG(DAC_CNTL2) & ~0x20); mdelay(3); /* Restore TV stuff, make sure TV DAC is down */ OUTREG(TV_MASTER_CNTL, rinfo->save_regs[88]); OUTREG(TV_DAC_CNTL, rinfo->save_regs[13] | 0x07000000); /* Restore GPIOS. MacOS does some magic here with one of the GPIO bits, * possibly related to the weird PLL related workarounds and to the * fact that CLK_PIN_CNTL is tweaked in ways I don't fully understand, * but we keep things the simple way here */ OUTREG(GPIOPAD_A, rinfo->save_regs[19]); OUTREG(GPIOPAD_EN, rinfo->save_regs[20]); OUTREG(GPIOPAD_MASK, rinfo->save_regs[21]); /* Now do things with SCLK_MORE_CNTL. Force bits are already set, copy * high bits from backup */ tmp = INPLL(pllSCLK_MORE_CNTL) & 0x0000ffff; tmp |= rinfo->save_regs[34] & 0xffff0000; tmp |= SCLK_MORE_CNTL__FORCE_DISPREGS; OUTPLL(pllSCLK_MORE_CNTL, tmp); tmp = INPLL(pllSCLK_MORE_CNTL) & 0x0000ffff; tmp |= rinfo->save_regs[34] & 0xffff0000; tmp |= SCLK_MORE_CNTL__FORCE_DISPREGS; OUTPLL(pllSCLK_MORE_CNTL, tmp); OUTREG(LVDS_GEN_CNTL, rinfo->save_regs[11] & ~(LVDS_EN | LVDS_ON | LVDS_DIGON | LVDS_BLON | LVDS_BL_MOD_EN)); OUTREG(LVDS_GEN_CNTL, INREG(LVDS_GEN_CNTL) | LVDS_BLON); OUTREG(LVDS_PLL_CNTL, (rinfo->save_regs[12] & ~0xf0000) | 0x20000); mdelay(20); /* write some stuff to the framebuffer... */ for (i = 0; i < 0x8000; ++i) writeb(0, rinfo->fb_base + i); OUTREG(0x2ec, 0x6332a020); OUTPLL(pllSSPLL_REF_DIV, rinfo->save_regs[44] /*0x3f */); OUTPLL(pllSSPLL_DIV_0, rinfo->save_regs[45] /*0x000081bb */); tmp = INPLL(pllSSPLL_CNTL); tmp &= ~2; OUTPLL(pllSSPLL_CNTL, tmp); mdelay(6); tmp &= ~1; OUTPLL(pllSSPLL_CNTL, tmp); mdelay(5); tmp |= 3; OUTPLL(pllSSPLL_CNTL, tmp); mdelay(5); OUTPLL(pllSS_INT_CNTL, rinfo->save_regs[90] & ~3);/*0x0020300c*/ OUTREG(0x2ec, 0x6332a3f0); mdelay(17); OUTPLL(pllPPLL_REF_DIV, rinfo->pll.ref_div); OUTPLL(pllPPLL_DIV_0, rinfo->save_regs[92]); mdelay(40); OUTREG(LVDS_GEN_CNTL, INREG(LVDS_GEN_CNTL) | LVDS_DIGON | LVDS_ON); mdelay(40); /* Restore a few more things */ OUTREG(GRPH_BUFFER_CNTL, rinfo->save_regs[94]); OUTREG(GRPH2_BUFFER_CNTL, rinfo->save_regs[95]); /* Restore PPLL, spread spectrum & LVDS */ radeon_pm_m10_disable_spread_spectrum(rinfo); radeon_pm_restore_pixel_pll(rinfo); radeon_pm_m10_enable_lvds_spread_spectrum(rinfo); } #if 0 /* Not ready yet */ static void radeon_reinitialize_QW(struct radeonfb_info *rinfo) { int i; u32 tmp, tmp2; u32 cko, cka, ckb; u32 cgc, cec, c2gc; OUTREG(MC_AGP_LOCATION, rinfo->save_regs[32]); OUTREG(DISPLAY_BASE_ADDR, rinfo->save_regs[31]); OUTREG(CRTC2_DISPLAY_BASE_ADDR, rinfo->save_regs[33]); OUTREG(MC_FB_LOCATION, rinfo->save_regs[30]); OUTREG(BUS_CNTL, rinfo->save_regs[36]); OUTREG(RBBM_CNTL, rinfo->save_regs[39]); INREG(PAD_CTLR_STRENGTH); OUTREG(PAD_CTLR_STRENGTH, INREG(PAD_CTLR_STRENGTH) & ~0x10000); for (i = 0; i < 65; ++i) { mdelay(1); INREG(PAD_CTLR_STRENGTH); } OUTREG(DISP_TEST_DEBUG_CNTL, INREG(DISP_TEST_DEBUG_CNTL) | 0x10000000); OUTREG(OV0_FLAG_CNTRL, INREG(OV0_FLAG_CNTRL) | 0x100); OUTREG(CRTC_GEN_CNTL, INREG(CRTC_GEN_CNTL)); OUTREG(DAC_CNTL, 0xff00410a); OUTREG(CRTC2_GEN_CNTL, INREG(CRTC2_GEN_CNTL)); OUTREG(DAC_CNTL2, INREG(DAC_CNTL2) | 0x4000); OUTREG(SURFACE_CNTL, rinfo->save_regs[29]); OUTREG(AGP_CNTL, rinfo->save_regs[16]); OUTREG(HOST_PATH_CNTL, rinfo->save_regs[41]); OUTREG(DISP_MISC_CNTL, rinfo->save_regs[9]); OUTMC(rinfo, ixMC_CHP_IO_CNTL_A0, 0xf7bb4433); OUTREG(MC_IND_INDEX, 0); OUTMC(rinfo, ixMC_CHP_IO_CNTL_B0, 0xf7bb4433); OUTREG(MC_IND_INDEX, 0); OUTREG(CRTC_MORE_CNTL, INREG(CRTC_MORE_CNTL)); tmp = INPLL(pllVCLK_ECP_CNTL); OUTPLL(pllVCLK_ECP_CNTL, tmp); tmp = INPLL(pllPIXCLKS_CNTL); OUTPLL(pllPIXCLKS_CNTL, tmp); OUTPLL(MCLK_CNTL, 0xaa3f0000); OUTPLL(SCLK_CNTL, 0xffff0000); OUTPLL(pllMPLL_AUX_CNTL, 6); OUTPLL(pllSPLL_AUX_CNTL, 1); OUTPLL(MDLL_CKO, 0x9f009f); OUTPLL(MDLL_RDCKA, 0x830083); OUTPLL(pllMDLL_RDCKB, 0x830083); OUTPLL(PPLL_CNTL, 0xa433); OUTPLL(P2PLL_CNTL, 0xa433); OUTPLL(MPLL_CNTL, 0x0400a403); OUTPLL(SPLL_CNTL, 0x0400a433); tmp = INPLL(M_SPLL_REF_FB_DIV); OUTPLL(M_SPLL_REF_FB_DIV, tmp); tmp = INPLL(M_SPLL_REF_FB_DIV); OUTPLL(M_SPLL_REF_FB_DIV, tmp | 0xc); INPLL(M_SPLL_REF_FB_DIV); tmp = INPLL(MPLL_CNTL); OUTREG8(CLOCK_CNTL_INDEX, MPLL_CNTL + PLL_WR_EN); radeon_pll_errata_after_index(rinfo); OUTREG8(CLOCK_CNTL_DATA + 1, (tmp >> 8) & 0xff); radeon_pll_errata_after_data(rinfo); tmp = INPLL(M_SPLL_REF_FB_DIV); OUTPLL(M_SPLL_REF_FB_DIV, tmp | 0x5900); tmp = INPLL(MPLL_CNTL); OUTPLL(MPLL_CNTL, tmp & ~0x2); mdelay(1); tmp = INPLL(MPLL_CNTL); OUTPLL(MPLL_CNTL, tmp & ~0x1); mdelay(10); OUTPLL(MCLK_CNTL, 0xaa3f1212); mdelay(1); INPLL(M_SPLL_REF_FB_DIV); INPLL(MCLK_CNTL); INPLL(M_SPLL_REF_FB_DIV); tmp = INPLL(SPLL_CNTL); OUTREG8(CLOCK_CNTL_INDEX, SPLL_CNTL + PLL_WR_EN); radeon_pll_errata_after_index(rinfo); OUTREG8(CLOCK_CNTL_DATA + 1, (tmp >> 8) & 0xff); radeon_pll_errata_after_data(rinfo); tmp = INPLL(M_SPLL_REF_FB_DIV); OUTPLL(M_SPLL_REF_FB_DIV, tmp | 0x780000); tmp = INPLL(SPLL_CNTL); OUTPLL(SPLL_CNTL, tmp & ~0x1); mdelay(1); tmp = INPLL(SPLL_CNTL); OUTPLL(SPLL_CNTL, tmp & ~0x2); mdelay(10); tmp = INPLL(SCLK_CNTL); OUTPLL(SCLK_CNTL, tmp | 2); mdelay(1); cko = INPLL(pllMDLL_CKO); cka = INPLL(pllMDLL_RDCKA); ckb = INPLL(pllMDLL_RDCKB); cko &= ~(MDLL_CKO__MCKOA_SLEEP | MDLL_CKO__MCKOB_SLEEP); OUTPLL(pllMDLL_CKO, cko); mdelay(1); cko &= ~(MDLL_CKO__MCKOA_RESET | MDLL_CKO__MCKOB_RESET); OUTPLL(pllMDLL_CKO, cko); mdelay(5); cka &= ~(MDLL_RDCKA__MRDCKA0_SLEEP | MDLL_RDCKA__MRDCKA1_SLEEP); OUTPLL(pllMDLL_RDCKA, cka); mdelay(1); cka &= ~(MDLL_RDCKA__MRDCKA0_RESET | MDLL_RDCKA__MRDCKA1_RESET); OUTPLL(pllMDLL_RDCKA, cka); mdelay(5); ckb &= ~(MDLL_RDCKB__MRDCKB0_SLEEP | MDLL_RDCKB__MRDCKB1_SLEEP); OUTPLL(pllMDLL_RDCKB, ckb); mdelay(1); ckb &= ~(MDLL_RDCKB__MRDCKB0_RESET | MDLL_RDCKB__MRDCKB1_RESET); OUTPLL(pllMDLL_RDCKB, ckb); mdelay(5); OUTMC(rinfo, ixMC_CHP_IO_CNTL_A1, 0x151550ff); OUTREG(MC_IND_INDEX, 0); OUTMC(rinfo, ixMC_CHP_IO_CNTL_B1, 0x151550ff); OUTREG(MC_IND_INDEX, 0); mdelay(1); OUTMC(rinfo, ixMC_CHP_IO_CNTL_A1, 0x141550ff); OUTREG(MC_IND_INDEX, 0); OUTMC(rinfo, ixMC_CHP_IO_CNTL_B1, 0x141550ff); OUTREG(MC_IND_INDEX, 0); mdelay(1); OUTPLL(pllHTOTAL_CNTL, 0); OUTPLL(pllHTOTAL2_CNTL, 0); OUTREG(MEM_CNTL, 0x29002901); OUTREG(MEM_SDRAM_MODE_REG, 0x45320032); /* XXX use save_regs[35]? */ OUTREG(EXT_MEM_CNTL, 0x1a394333); OUTREG(MEM_IO_CNTL_A1, 0x0aac0aac); OUTREG(MEM_INIT_LATENCY_TIMER, 0x34444444); OUTREG(MEM_REFRESH_CNTL, 0x1f1f7218); /* XXX or save_regs[42]? */ OUTREG(MC_DEBUG, 0); OUTREG(MEM_IO_OE_CNTL, 0x04300430); OUTMC(rinfo, ixMC_IMP_CNTL, 0x00f460d6); OUTREG(MC_IND_INDEX, 0); OUTMC(rinfo, ixMC_IMP_CNTL_0, 0x00009249); OUTREG(MC_IND_INDEX, 0); OUTREG(CNFG_MEMSIZE, rinfo->video_ram); radeon_pm_full_reset_sdram(rinfo); INREG(FP_GEN_CNTL); OUTREG(TMDS_CNTL, 0x01000000); /* XXX ? */ tmp = INREG(FP_GEN_CNTL); tmp |= FP_CRTC_DONT_SHADOW_HEND | FP_CRTC_DONT_SHADOW_VPAR | 0x200; OUTREG(FP_GEN_CNTL, tmp); tmp = INREG(DISP_OUTPUT_CNTL); tmp &= ~0x400; OUTREG(DISP_OUTPUT_CNTL, tmp); OUTPLL(CLK_PIN_CNTL, rinfo->save_regs[4]); OUTPLL(CLK_PWRMGT_CNTL, rinfo->save_regs[1]); OUTPLL(PLL_PWRMGT_CNTL, rinfo->save_regs[0]); tmp = INPLL(MCLK_MISC); tmp |= MCLK_MISC__MC_MCLK_DYN_ENABLE | MCLK_MISC__IO_MCLK_DYN_ENABLE; OUTPLL(MCLK_MISC, tmp); tmp = INPLL(SCLK_CNTL); OUTPLL(SCLK_CNTL, tmp); OUTREG(CRTC_MORE_CNTL, 0); OUTREG8(CRTC_GEN_CNTL+1, 6); OUTREG8(CRTC_GEN_CNTL+3, 1); OUTREG(CRTC_PITCH, 32); tmp = INPLL(VCLK_ECP_CNTL); OUTPLL(VCLK_ECP_CNTL, tmp); tmp = INPLL(PPLL_CNTL); OUTPLL(PPLL_CNTL, tmp); /* palette stuff and BIOS_1_SCRATCH... */ tmp = INREG(FP_GEN_CNTL); tmp2 = INREG(TMDS_TRANSMITTER_CNTL); tmp |= 2; OUTREG(FP_GEN_CNTL, tmp); mdelay(5); OUTREG(FP_GEN_CNTL, tmp); mdelay(5); OUTREG(TMDS_TRANSMITTER_CNTL, tmp2); OUTREG(CRTC_MORE_CNTL, 0); mdelay(20); tmp = INREG(CRTC_MORE_CNTL); OUTREG(CRTC_MORE_CNTL, tmp); cgc = INREG(CRTC_GEN_CNTL); cec = INREG(CRTC_EXT_CNTL); c2gc = INREG(CRTC2_GEN_CNTL); OUTREG(CRTC_H_SYNC_STRT_WID, 0x008e0580); OUTREG(CRTC_H_TOTAL_DISP, 0x009f00d2); OUTREG8(CLOCK_CNTL_INDEX, HTOTAL_CNTL + PLL_WR_EN); radeon_pll_errata_after_index(rinfo); OUTREG8(CLOCK_CNTL_DATA, 0); radeon_pll_errata_after_data(rinfo); OUTREG(CRTC_V_SYNC_STRT_WID, 0x00830403); OUTREG(CRTC_V_TOTAL_DISP, 0x03ff0429); OUTREG(FP_CRTC_H_TOTAL_DISP, 0x009f0033); OUTREG(FP_H_SYNC_STRT_WID, 0x008e0080); OUTREG(CRT_CRTC_H_SYNC_STRT_WID, 0x008e0080); OUTREG(FP_CRTC_V_TOTAL_DISP, 0x03ff002a); OUTREG(FP_V_SYNC_STRT_WID, 0x00830004); OUTREG(CRT_CRTC_V_SYNC_STRT_WID, 0x00830004); OUTREG(FP_HORZ_VERT_ACTIVE, 0x009f03ff); OUTREG(FP_HORZ_STRETCH, 0); OUTREG(FP_VERT_STRETCH, 0); OUTREG(OVR_CLR, 0); OUTREG(OVR_WID_LEFT_RIGHT, 0); OUTREG(OVR_WID_TOP_BOTTOM, 0); tmp = INPLL(PPLL_REF_DIV); tmp = (tmp & ~PPLL_REF_DIV_MASK) | rinfo->pll.ref_div; OUTPLL(PPLL_REF_DIV, tmp); INPLL(PPLL_REF_DIV); OUTREG8(CLOCK_CNTL_INDEX, PPLL_CNTL + PLL_WR_EN); radeon_pll_errata_after_index(rinfo); OUTREG8(CLOCK_CNTL_DATA + 1, 0xbc); radeon_pll_errata_after_data(rinfo); tmp = INREG(CLOCK_CNTL_INDEX); radeon_pll_errata_after_index(rinfo); OUTREG(CLOCK_CNTL_INDEX, tmp & 0xff); radeon_pll_errata_after_index(rinfo); radeon_pll_errata_after_data(rinfo); OUTPLL(PPLL_DIV_0, 0x48090); tmp = INPLL(PPLL_CNTL); OUTPLL(PPLL_CNTL, tmp & ~0x2); mdelay(1); tmp = INPLL(PPLL_CNTL); OUTPLL(PPLL_CNTL, tmp & ~0x1); mdelay(10); tmp = INPLL(VCLK_ECP_CNTL); OUTPLL(VCLK_ECP_CNTL, tmp | 3); mdelay(1); tmp = INPLL(VCLK_ECP_CNTL); OUTPLL(VCLK_ECP_CNTL, tmp); c2gc |= CRTC2_DISP_REQ_EN_B; OUTREG(CRTC2_GEN_CNTL, c2gc); cgc |= CRTC_EN; OUTREG(CRTC_GEN_CNTL, cgc); OUTREG(CRTC_EXT_CNTL, cec); OUTREG(CRTC_PITCH, 0xa0); OUTREG(CRTC_OFFSET, 0); OUTREG(CRTC_OFFSET_CNTL, 0); OUTREG(GRPH_BUFFER_CNTL, 0x20117c7c); OUTREG(GRPH2_BUFFER_CNTL, 0x00205c5c); tmp2 = INREG(FP_GEN_CNTL); tmp = INREG(TMDS_TRANSMITTER_CNTL); OUTREG(0x2a8, 0x0000061b); tmp |= TMDS_PLL_EN; OUTREG(TMDS_TRANSMITTER_CNTL, tmp); mdelay(1); tmp &= ~TMDS_PLLRST; OUTREG(TMDS_TRANSMITTER_CNTL, tmp); tmp2 &= ~2; tmp2 |= FP_TMDS_EN; OUTREG(FP_GEN_CNTL, tmp2); mdelay(5); tmp2 |= FP_FPON; OUTREG(FP_GEN_CNTL, tmp2); OUTREG(CUR_HORZ_VERT_OFF, CUR_LOCK | 1); cgc = INREG(CRTC_GEN_CNTL); OUTREG(CUR_HORZ_VERT_POSN, 0xbfff0fff); cgc |= 0x10000; OUTREG(CUR_OFFSET, 0); } #endif /* 0 */ #endif /* CONFIG_PPC_OF */ static void radeonfb_whack_power_state(struct radeonfb_info *rinfo, pci_power_t state) { u16 pwr_cmd; for (;;) { pci_read_config_word(rinfo->pdev, rinfo->pm_reg+PCI_PM_CTRL, &pwr_cmd); if (pwr_cmd & 2) break; pwr_cmd = (pwr_cmd & ~PCI_PM_CTRL_STATE_MASK) | 2; pci_write_config_word(rinfo->pdev, rinfo->pm_reg+PCI_PM_CTRL, pwr_cmd); msleep(500); } rinfo->pdev->current_state = state; } static void radeon_set_suspend(struct radeonfb_info *rinfo, int suspend) { u32 tmp; if (!rinfo->pm_reg) return; /* Set the chip into appropriate suspend mode (we use D2, * D3 would require a compete re-initialization of the chip, * including PCI config registers, clocks, AGP conf, ...) */ if (suspend) { printk(KERN_DEBUG "radeonfb (%s): switching to D2 state...\n", pci_name(rinfo->pdev)); /* Disable dynamic power management of clocks for the * duration of the suspend/resume process */ radeon_pm_disable_dynamic_mode(rinfo); /* Save some registers */ radeon_pm_save_regs(rinfo, 0); /* Prepare mobility chips for suspend. */ if (rinfo->is_mobility) { /* Program V2CLK */ radeon_pm_program_v2clk(rinfo); /* Disable IO PADs */ radeon_pm_disable_iopad(rinfo); /* Set low current */ radeon_pm_low_current(rinfo); /* Prepare chip for power management */ radeon_pm_setup_for_suspend(rinfo); if (rinfo->family <= CHIP_FAMILY_RV280) { /* Reset the MDLL */ /* because both INPLL and OUTPLL take the same * lock, that's why. */ tmp = INPLL( pllMDLL_CKO) | MDLL_CKO__MCKOA_RESET | MDLL_CKO__MCKOB_RESET; OUTPLL( pllMDLL_CKO, tmp ); } } /* Switch PCI power management to D2. */ pci_disable_device(rinfo->pdev); pci_save_state(rinfo->pdev); /* The chip seems to need us to whack the PM register * repeatedly until it sticks. We do that -prior- to * calling pci_set_power_state() */ radeonfb_whack_power_state(rinfo, PCI_D2); __pci_complete_power_transition(rinfo->pdev, PCI_D2); } else { printk(KERN_DEBUG "radeonfb (%s): switching to D0 state...\n", pci_name(rinfo->pdev)); if (rinfo->family <= CHIP_FAMILY_RV250) { /* Reset the SDRAM controller */ radeon_pm_full_reset_sdram(rinfo); /* Restore some registers */ radeon_pm_restore_regs(rinfo); } else { /* Restore registers first */ radeon_pm_restore_regs(rinfo); /* init sdram controller */ radeon_pm_full_reset_sdram(rinfo); } } } int radeonfb_pci_suspend(struct pci_dev *pdev, pm_message_t mesg) { struct fb_info *info = pci_get_drvdata(pdev); struct radeonfb_info *rinfo = info->par; if (mesg.event == pdev->dev.power.power_state.event) return 0; printk(KERN_DEBUG "radeonfb (%s): suspending for event: %d...\n", pci_name(pdev), mesg.event); /* For suspend-to-disk, we cheat here. We don't suspend anything and * let fbcon continue drawing until we are all set. That shouldn't * really cause any problem at this point, provided that the wakeup * code knows that any state in memory may not match the HW */ switch (mesg.event) { case PM_EVENT_FREEZE: /* about to take snapshot */ case PM_EVENT_PRETHAW: /* before restoring snapshot */ goto done; } console_lock(); fb_set_suspend(info, 1); if (!(info->flags & FBINFO_HWACCEL_DISABLED)) { /* Make sure engine is reset */ radeon_engine_idle(); radeonfb_engine_reset(rinfo); radeon_engine_idle(); } /* Blank display and LCD */ radeon_screen_blank(rinfo, FB_BLANK_POWERDOWN, 1); /* Sleep */ rinfo->asleep = 1; rinfo->lock_blank = 1; del_timer_sync(&rinfo->lvds_timer); #ifdef CONFIG_PPC_PMAC /* On powermac, we have hooks to properly suspend/resume AGP now, * use them here. We'll ultimately need some generic support here, * but the generic code isn't quite ready for that yet */ pmac_suspend_agp_for_card(pdev); #endif /* CONFIG_PPC_PMAC */ /* It's unclear whether or when the generic code will do that, so let's * do it ourselves. We save state before we do any power management */ pci_save_state(pdev); /* If we support wakeup from poweroff, we save all regs we can including cfg * space */ if (rinfo->pm_mode & radeon_pm_off) { /* Always disable dynamic clocks or weird things are happening when * the chip goes off (basically the panel doesn't shut down properly * and we crash on wakeup), * also, we want the saved regs context to have no dynamic clocks in * it, we'll restore the dynamic clocks state on wakeup */ radeon_pm_disable_dynamic_mode(rinfo); mdelay(50); radeon_pm_save_regs(rinfo, 1); if (rinfo->is_mobility && !(rinfo->pm_mode & radeon_pm_d2)) { /* Switch off LVDS interface */ mdelay(1); OUTREG(LVDS_GEN_CNTL, INREG(LVDS_GEN_CNTL) & ~(LVDS_BL_MOD_EN)); mdelay(1); OUTREG(LVDS_GEN_CNTL, INREG(LVDS_GEN_CNTL) & ~(LVDS_EN | LVDS_ON)); OUTREG(LVDS_PLL_CNTL, (INREG(LVDS_PLL_CNTL) & ~30000) | 0x20000); mdelay(20); OUTREG(LVDS_GEN_CNTL, INREG(LVDS_GEN_CNTL) & ~(LVDS_DIGON)); } pci_disable_device(pdev); } /* If we support D2, we go to it (should be fixed later with a flag forcing * D3 only for some laptops) */ if (rinfo->pm_mode & radeon_pm_d2) radeon_set_suspend(rinfo, 1); console_unlock(); done: pdev->dev.power.power_state = mesg; return 0; } static int radeon_check_power_loss(struct radeonfb_info *rinfo) { return rinfo->save_regs[4] != INPLL(CLK_PIN_CNTL) || rinfo->save_regs[2] != INPLL(MCLK_CNTL) || rinfo->save_regs[3] != INPLL(SCLK_CNTL); } int radeonfb_pci_resume(struct pci_dev *pdev) { struct fb_info *info = pci_get_drvdata(pdev); struct radeonfb_info *rinfo = info->par; int rc = 0; if (pdev->dev.power.power_state.event == PM_EVENT_ON) return 0; if (rinfo->no_schedule) { if (!console_trylock()) return 0; } else console_lock(); printk(KERN_DEBUG "radeonfb (%s): resuming from state: %d...\n", pci_name(pdev), pdev->dev.power.power_state.event); /* PCI state will have been restored by the core, so * we should be in D0 now with our config space fully * restored */ if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { /* Wakeup chip */ if ((rinfo->pm_mode & radeon_pm_off) && radeon_check_power_loss(rinfo)) { if (rinfo->reinit_func != NULL) rinfo->reinit_func(rinfo); else { printk(KERN_ERR "radeonfb (%s): can't resume radeon from" " D3 cold, need softboot !", pci_name(pdev)); rc = -EIO; goto bail; } } /* If we support D2, try to resume... we should check what was our * state though... (were we really in D2 state ?). Right now, this code * is only enable on Macs so it's fine. */ else if (rinfo->pm_mode & radeon_pm_d2) radeon_set_suspend(rinfo, 0); rinfo->asleep = 0; } else radeon_engine_idle(); /* Restore display & engine */ radeon_write_mode (rinfo, &rinfo->state, 1); if (!(info->flags & FBINFO_HWACCEL_DISABLED)) radeonfb_engine_init (rinfo); fb_pan_display(info, &info->var); fb_set_cmap(&info->cmap, info); /* Refresh */ fb_set_suspend(info, 0); /* Unblank */ rinfo->lock_blank = 0; radeon_screen_blank(rinfo, FB_BLANK_UNBLANK, 1); #ifdef CONFIG_PPC_PMAC /* On powermac, we have hooks to properly suspend/resume AGP now, * use them here. We'll ultimately need some generic support here, * but the generic code isn't quite ready for that yet */ pmac_resume_agp_for_card(pdev); #endif /* CONFIG_PPC_PMAC */ /* Check status of dynclk */ if (rinfo->dynclk == 1) radeon_pm_enable_dynamic_mode(rinfo); else if (rinfo->dynclk == 0) radeon_pm_disable_dynamic_mode(rinfo); pdev->dev.power.power_state = PMSG_ON; bail: console_unlock(); return rc; } #ifdef CONFIG_PPC_OF__disabled static void radeonfb_early_resume(void *data) { struct radeonfb_info *rinfo = data; rinfo->no_schedule = 1; pci_restore_state(rinfo->pdev); radeonfb_pci_resume(rinfo->pdev); rinfo->no_schedule = 0; } #endif /* CONFIG_PPC_OF */ #endif /* CONFIG_PM */ void radeonfb_pm_init(struct radeonfb_info *rinfo, int dynclk, int ignore_devlist, int force_sleep) { /* Find PM registers in config space if any*/ rinfo->pm_reg = pci_find_capability(rinfo->pdev, PCI_CAP_ID_PM); /* Enable/Disable dynamic clocks: TODO add sysfs access */ if (rinfo->family == CHIP_FAMILY_RS480) rinfo->dynclk = -1; else rinfo->dynclk = dynclk; if (rinfo->dynclk == 1) { radeon_pm_enable_dynamic_mode(rinfo); printk("radeonfb: Dynamic Clock Power Management enabled\n"); } else if (rinfo->dynclk == 0) { radeon_pm_disable_dynamic_mode(rinfo); printk("radeonfb: Dynamic Clock Power Management disabled\n"); } #if defined(CONFIG_PM) #if defined(CONFIG_PPC_PMAC) /* Check if we can power manage on suspend/resume. We can do * D2 on M6, M7 and M9, and we can resume from D3 cold a few other * "Mac" cards, but that's all. We need more infos about what the * BIOS does tho. Right now, all this PM stuff is pmac-only for that * reason. --BenH */ if (machine_is(powermac) && rinfo->of_node) { if (rinfo->is_mobility && rinfo->pm_reg && rinfo->family <= CHIP_FAMILY_RV250) rinfo->pm_mode |= radeon_pm_d2; /* We can restart Jasper (M10 chip in albooks), BlueStone (7500 chip * in some desktop G4s), Via (M9+ chip on iBook G4) and * Snowy (M11 chip on iBook G4 manufactured after July 2005) */ if (!strcmp(rinfo->of_node->name, "ATY,JasperParent") || !strcmp(rinfo->of_node->name, "ATY,SnowyParent")) { rinfo->reinit_func = radeon_reinitialize_M10; rinfo->pm_mode |= radeon_pm_off; } #if 0 /* Not ready yet */ if (!strcmp(rinfo->of_node->name, "ATY,BlueStoneParent")) { rinfo->reinit_func = radeon_reinitialize_QW; rinfo->pm_mode |= radeon_pm_off; } #endif if (!strcmp(rinfo->of_node->name, "ATY,ViaParent")) { rinfo->reinit_func = radeon_reinitialize_M9P; rinfo->pm_mode |= radeon_pm_off; } /* If any of the above is set, we assume the machine can sleep/resume. * It's a bit of a "shortcut" but will work fine. Ideally, we need infos * from the platform about what happens to the chip... * Now we tell the platform about our capability */ if (rinfo->pm_mode != radeon_pm_none) { pmac_call_feature(PMAC_FTR_DEVICE_CAN_WAKE, rinfo->of_node, 0, 1); #if 0 /* Disable the early video resume hack for now as it's causing problems, among * others we now rely on the PCI core restoring the config space for us, which * isn't the case with that hack, and that code path causes various things to * be called with interrupts off while they shouldn't. I'm leaving the code in * as it can be useful for debugging purposes */ pmac_set_early_video_resume(radeonfb_early_resume, rinfo); #endif } #if 0 /* Power down TV DAC, that saves a significant amount of power, * we'll have something better once we actually have some TVOut * support */ OUTREG(TV_DAC_CNTL, INREG(TV_DAC_CNTL) | 0x07000000); #endif } #endif /* defined(CONFIG_PPC_PMAC) */ #endif /* defined(CONFIG_PM) */ if (ignore_devlist) printk(KERN_DEBUG "radeonfb: skipping test for device workarounds\n"); else radeon_apply_workarounds(rinfo); if (force_sleep) { printk(KERN_DEBUG "radeonfb: forcefully enabling D2 sleep mode\n"); rinfo->pm_mode |= radeon_pm_d2; } } void radeonfb_pm_exit(struct radeonfb_info *rinfo) { #if defined(CONFIG_PM) && defined(CONFIG_PPC_PMAC) if (rinfo->pm_mode != radeon_pm_none) pmac_set_early_video_resume(NULL, NULL); #endif }
gpl-2.0
junkie2100/android_kernel_zte_quantum
net/ieee802154/nl_policy.c
12061
2258
/* * nl802154.h * * Copyright (C) 2007, 2008 Siemens AG * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * */ #include <linux/kernel.h> #include <net/netlink.h> #include <linux/nl802154.h> #define NLA_HW_ADDR NLA_U64 const struct nla_policy ieee802154_policy[IEEE802154_ATTR_MAX + 1] = { [IEEE802154_ATTR_DEV_NAME] = { .type = NLA_STRING, }, [IEEE802154_ATTR_DEV_INDEX] = { .type = NLA_U32, }, [IEEE802154_ATTR_PHY_NAME] = { .type = NLA_STRING, }, [IEEE802154_ATTR_STATUS] = { .type = NLA_U8, }, [IEEE802154_ATTR_SHORT_ADDR] = { .type = NLA_U16, }, [IEEE802154_ATTR_HW_ADDR] = { .type = NLA_HW_ADDR, }, [IEEE802154_ATTR_PAN_ID] = { .type = NLA_U16, }, [IEEE802154_ATTR_CHANNEL] = { .type = NLA_U8, }, [IEEE802154_ATTR_PAGE] = { .type = NLA_U8, }, [IEEE802154_ATTR_COORD_SHORT_ADDR] = { .type = NLA_U16, }, [IEEE802154_ATTR_COORD_HW_ADDR] = { .type = NLA_HW_ADDR, }, [IEEE802154_ATTR_COORD_PAN_ID] = { .type = NLA_U16, }, [IEEE802154_ATTR_SRC_SHORT_ADDR] = { .type = NLA_U16, }, [IEEE802154_ATTR_SRC_HW_ADDR] = { .type = NLA_HW_ADDR, }, [IEEE802154_ATTR_SRC_PAN_ID] = { .type = NLA_U16, }, [IEEE802154_ATTR_DEST_SHORT_ADDR] = { .type = NLA_U16, }, [IEEE802154_ATTR_DEST_HW_ADDR] = { .type = NLA_HW_ADDR, }, [IEEE802154_ATTR_DEST_PAN_ID] = { .type = NLA_U16, }, [IEEE802154_ATTR_CAPABILITY] = { .type = NLA_U8, }, [IEEE802154_ATTR_REASON] = { .type = NLA_U8, }, [IEEE802154_ATTR_SCAN_TYPE] = { .type = NLA_U8, }, [IEEE802154_ATTR_CHANNELS] = { .type = NLA_U32, }, [IEEE802154_ATTR_DURATION] = { .type = NLA_U8, }, [IEEE802154_ATTR_ED_LIST] = { .len = 27 }, [IEEE802154_ATTR_CHANNEL_PAGE_LIST] = { .len = 32 * 4, }, };
gpl-2.0
CPFL/gxen
tools/qemu-xen-traditional/hw/usb-bt.c
30
20800
/* * QEMU Bluetooth HCI USB Transport Layer v1.0 * * Copyright (C) 2007 OpenMoko, Inc. * Copyright (C) 2008 Andrzej Zaborowski <balrog@zabor.org> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 or * (at your option) version 3 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "qemu-common.h" #include "usb.h" #include "net.h" #include "bt.h" struct USBBtState { USBDevice dev; struct HCIInfo *hci; int altsetting; int config; #define CFIFO_LEN_MASK 255 #define DFIFO_LEN_MASK 4095 struct usb_hci_in_fifo_s { uint8_t data[(DFIFO_LEN_MASK + 1) * 2]; struct { uint8_t *data; int len; } fifo[CFIFO_LEN_MASK + 1]; int dstart, dlen, dsize, start, len; } evt, acl, sco; struct usb_hci_out_fifo_s { uint8_t data[4096]; int len; } outcmd, outacl, outsco; }; #define USB_EVT_EP 1 #define USB_ACL_EP 2 #define USB_SCO_EP 3 static const uint8_t qemu_bt_dev_descriptor[] = { 0x12, /* u8 bLength; */ USB_DT_DEVICE, /* u8 bDescriptorType; Device */ 0x10, 0x01, /* u16 bcdUSB; v1.10 */ 0xe0, /* u8 bDeviceClass; Wireless */ 0x01, /* u8 bDeviceSubClass; Radio Frequency */ 0x01, /* u8 bDeviceProtocol; Bluetooth */ 0x40, /* u8 bMaxPacketSize0; 64 Bytes */ 0x12, 0x0a, /* u16 idVendor; */ 0x01, 0x00, /* u16 idProduct; Bluetooth Dongle (HCI mode) */ 0x58, 0x19, /* u16 bcdDevice; (some devices have 0x48, 0x02) */ 0x00, /* u8 iManufacturer; */ 0x00, /* u8 iProduct; */ 0x00, /* u8 iSerialNumber; */ 0x01, /* u8 bNumConfigurations; */ }; static const uint8_t qemu_bt_config_descriptor[] = { /* one configuration */ 0x09, /* u8 bLength; */ USB_DT_CONFIG, /* u8 bDescriptorType; */ 0xb1, 0x00, /* u16 wTotalLength; */ 0x02, /* u8 bNumInterfaces; (2) */ 0x01, /* u8 bConfigurationValue; */ 0x00, /* u8 iConfiguration; */ 0xc0, /* u8 bmAttributes; Bit 7: must be set, 6: Self-powered, 5: Remote wakeup, 4..0: resvd */ 0x00, /* u8 MaxPower; */ /* USB 1.1: * USB 2.0, single TT organization (mandatory): * one interface, protocol 0 * * USB 2.0, multiple TT organization (optional): * two interfaces, protocols 1 (like single TT) * and 2 (multiple TT mode) ... config is * sometimes settable * NOT IMPLEMENTED */ /* interface one */ 0x09, /* u8 if_bLength; */ USB_DT_INTERFACE, /* u8 if_bDescriptorType; */ 0x00, /* u8 if_bInterfaceNumber; */ 0x00, /* u8 if_bAlternateSetting; */ 0x03, /* u8 if_bNumEndpoints; */ 0xe0, /* u8 if_bInterfaceClass; Wireless */ 0x01, /* u8 if_bInterfaceSubClass; Radio Frequency */ 0x01, /* u8 if_bInterfaceProtocol; Bluetooth */ 0x00, /* u8 if_iInterface; */ /* endpoint one */ 0x07, /* u8 ep_bLength; */ USB_DT_ENDPOINT, /* u8 ep_bDescriptorType; */ USB_DIR_IN | USB_EVT_EP, /* u8 ep_bEndpointAddress; */ 0x03, /* u8 ep_bmAttributes; Interrupt */ 0x10, 0x00, /* u16 ep_wMaxPacketSize; */ 0x02, /* u8 ep_bInterval; */ /* endpoint two */ 0x07, /* u8 ep_bLength; */ USB_DT_ENDPOINT, /* u8 ep_bDescriptorType; */ USB_DIR_OUT | USB_ACL_EP, /* u8 ep_bEndpointAddress; */ 0x02, /* u8 ep_bmAttributes; Bulk */ 0x40, 0x00, /* u16 ep_wMaxPacketSize; */ 0x0a, /* u8 ep_bInterval; (255ms -- usb 2.0 spec) */ /* endpoint three */ 0x07, /* u8 ep_bLength; */ USB_DT_ENDPOINT, /* u8 ep_bDescriptorType; */ USB_DIR_IN | USB_ACL_EP, /* u8 ep_bEndpointAddress; */ 0x02, /* u8 ep_bmAttributes; Bulk */ 0x40, 0x00, /* u16 ep_wMaxPacketSize; */ 0x0a, /* u8 ep_bInterval; (255ms -- usb 2.0 spec) */ /* interface two setting one */ 0x09, /* u8 if_bLength; */ USB_DT_INTERFACE, /* u8 if_bDescriptorType; */ 0x01, /* u8 if_bInterfaceNumber; */ 0x00, /* u8 if_bAlternateSetting; */ 0x02, /* u8 if_bNumEndpoints; */ 0xe0, /* u8 if_bInterfaceClass; Wireless */ 0x01, /* u8 if_bInterfaceSubClass; Radio Frequency */ 0x01, /* u8 if_bInterfaceProtocol; Bluetooth */ 0x00, /* u8 if_iInterface; */ /* endpoint one */ 0x07, /* u8 ep_bLength; */ USB_DT_ENDPOINT, /* u8 ep_bDescriptorType; */ USB_DIR_OUT | USB_SCO_EP, /* u8 ep_bEndpointAddress; */ 0x01, /* u8 ep_bmAttributes; Isochronous */ 0x00, 0x00, /* u16 ep_wMaxPacketSize; */ 0x01, /* u8 ep_bInterval; (255ms -- usb 2.0 spec) */ /* endpoint two */ 0x07, /* u8 ep_bLength; */ USB_DT_ENDPOINT, /* u8 ep_bDescriptorType; */ USB_DIR_IN | USB_SCO_EP, /* u8 ep_bEndpointAddress; */ 0x01, /* u8 ep_bmAttributes; Isochronous */ 0x00, 0x00, /* u16 ep_wMaxPacketSize; */ 0x01, /* u8 ep_bInterval; (255ms -- usb 2.0 spec) */ /* interface two setting two */ 0x09, /* u8 if_bLength; */ USB_DT_INTERFACE, /* u8 if_bDescriptorType; */ 0x01, /* u8 if_bInterfaceNumber; */ 0x01, /* u8 if_bAlternateSetting; */ 0x02, /* u8 if_bNumEndpoints; */ 0xe0, /* u8 if_bInterfaceClass; Wireless */ 0x01, /* u8 if_bInterfaceSubClass; Radio Frequency */ 0x01, /* u8 if_bInterfaceProtocol; Bluetooth */ 0x00, /* u8 if_iInterface; */ /* endpoint one */ 0x07, /* u8 ep_bLength; */ USB_DT_ENDPOINT, /* u8 ep_bDescriptorType; */ USB_DIR_OUT | USB_SCO_EP, /* u8 ep_bEndpointAddress; */ 0x01, /* u8 ep_bmAttributes; Isochronous */ 0x09, 0x00, /* u16 ep_wMaxPacketSize; */ 0x01, /* u8 ep_bInterval; (255ms -- usb 2.0 spec) */ /* endpoint two */ 0x07, /* u8 ep_bLength; */ USB_DT_ENDPOINT, /* u8 ep_bDescriptorType; */ USB_DIR_IN | USB_SCO_EP, /* u8 ep_bEndpointAddress; */ 0x01, /* u8 ep_bmAttributes; Isochronous */ 0x09, 0x00, /* u16 ep_wMaxPacketSize; */ 0x01, /* u8 ep_bInterval; (255ms -- usb 2.0 spec) */ /* interface two setting three */ 0x09, /* u8 if_bLength; */ USB_DT_INTERFACE, /* u8 if_bDescriptorType; */ 0x01, /* u8 if_bInterfaceNumber; */ 0x02, /* u8 if_bAlternateSetting; */ 0x02, /* u8 if_bNumEndpoints; */ 0xe0, /* u8 if_bInterfaceClass; Wireless */ 0x01, /* u8 if_bInterfaceSubClass; Radio Frequency */ 0x01, /* u8 if_bInterfaceProtocol; Bluetooth */ 0x00, /* u8 if_iInterface; */ /* endpoint one */ 0x07, /* u8 ep_bLength; */ USB_DT_ENDPOINT, /* u8 ep_bDescriptorType; */ USB_DIR_OUT | USB_SCO_EP, /* u8 ep_bEndpointAddress; */ 0x01, /* u8 ep_bmAttributes; Isochronous */ 0x11, 0x00, /* u16 ep_wMaxPacketSize; */ 0x01, /* u8 ep_bInterval; (255ms -- usb 2.0 spec) */ /* endpoint two */ 0x07, /* u8 ep_bLength; */ USB_DT_ENDPOINT, /* u8 ep_bDescriptorType; */ USB_DIR_IN | USB_SCO_EP, /* u8 ep_bEndpointAddress; */ 0x01, /* u8 ep_bmAttributes; Isochronous */ 0x11, 0x00, /* u16 ep_wMaxPacketSize; */ 0x01, /* u8 ep_bInterval; (255ms -- usb 2.0 spec) */ /* interface two setting four */ 0x09, /* u8 if_bLength; */ USB_DT_INTERFACE, /* u8 if_bDescriptorType; */ 0x01, /* u8 if_bInterfaceNumber; */ 0x03, /* u8 if_bAlternateSetting; */ 0x02, /* u8 if_bNumEndpoints; */ 0xe0, /* u8 if_bInterfaceClass; Wireless */ 0x01, /* u8 if_bInterfaceSubClass; Radio Frequency */ 0x01, /* u8 if_bInterfaceProtocol; Bluetooth */ 0x00, /* u8 if_iInterface; */ /* endpoint one */ 0x07, /* u8 ep_bLength; */ USB_DT_ENDPOINT, /* u8 ep_bDescriptorType; */ USB_DIR_OUT | USB_SCO_EP, /* u8 ep_bEndpointAddress; */ 0x01, /* u8 ep_bmAttributes; Isochronous */ 0x19, 0x00, /* u16 ep_wMaxPacketSize; */ 0x01, /* u8 ep_bInterval; (255ms -- usb 2.0 spec) */ /* endpoint two */ 0x07, /* u8 ep_bLength; */ USB_DT_ENDPOINT, /* u8 ep_bDescriptorType; */ USB_DIR_IN | USB_SCO_EP, /* u8 ep_bEndpointAddress; */ 0x01, /* u8 ep_bmAttributes; Isochronous */ 0x19, 0x00, /* u16 ep_wMaxPacketSize; */ 0x01, /* u8 ep_bInterval; (255ms -- usb 2.0 spec) */ /* interface two setting five */ 0x09, /* u8 if_bLength; */ USB_DT_INTERFACE, /* u8 if_bDescriptorType; */ 0x01, /* u8 if_bInterfaceNumber; */ 0x04, /* u8 if_bAlternateSetting; */ 0x02, /* u8 if_bNumEndpoints; */ 0xe0, /* u8 if_bInterfaceClass; Wireless */ 0x01, /* u8 if_bInterfaceSubClass; Radio Frequency */ 0x01, /* u8 if_bInterfaceProtocol; Bluetooth */ 0x00, /* u8 if_iInterface; */ /* endpoint one */ 0x07, /* u8 ep_bLength; */ USB_DT_ENDPOINT, /* u8 ep_bDescriptorType; */ USB_DIR_OUT | USB_SCO_EP, /* u8 ep_bEndpointAddress; */ 0x01, /* u8 ep_bmAttributes; Isochronous */ 0x21, 0x00, /* u16 ep_wMaxPacketSize; */ 0x01, /* u8 ep_bInterval; (255ms -- usb 2.0 spec) */ /* endpoint two */ 0x07, /* u8 ep_bLength; */ USB_DT_ENDPOINT, /* u8 ep_bDescriptorType; */ USB_DIR_IN | USB_SCO_EP, /* u8 ep_bEndpointAddress; */ 0x01, /* u8 ep_bmAttributes; Isochronous */ 0x21, 0x00, /* u16 ep_wMaxPacketSize; */ 0x01, /* u8 ep_bInterval; (255ms -- usb 2.0 spec) */ /* interface two setting six */ 0x09, /* u8 if_bLength; */ USB_DT_INTERFACE, /* u8 if_bDescriptorType; */ 0x01, /* u8 if_bInterfaceNumber; */ 0x05, /* u8 if_bAlternateSetting; */ 0x02, /* u8 if_bNumEndpoints; */ 0xe0, /* u8 if_bInterfaceClass; Wireless */ 0x01, /* u8 if_bInterfaceSubClass; Radio Frequency */ 0x01, /* u8 if_bInterfaceProtocol; Bluetooth */ 0x00, /* u8 if_iInterface; */ /* endpoint one */ 0x07, /* u8 ep_bLength; */ USB_DT_ENDPOINT, /* u8 ep_bDescriptorType; */ USB_DIR_OUT | USB_SCO_EP, /* u8 ep_bEndpointAddress; */ 0x01, /* u8 ep_bmAttributes; Isochronous */ 0x31, 0x00, /* u16 ep_wMaxPacketSize; */ 0x01, /* u8 ep_bInterval; (255ms -- usb 2.0 spec) */ /* endpoint two */ 0x07, /* u8 ep_bLength; */ USB_DT_ENDPOINT, /* u8 ep_bDescriptorType; */ USB_DIR_IN | USB_SCO_EP, /* u8 ep_bEndpointAddress; */ 0x01, /* u8 ep_bmAttributes; Isochronous */ 0x31, 0x00, /* u16 ep_wMaxPacketSize; */ 0x01, /* u8 ep_bInterval; (255ms -- usb 2.0 spec) */ /* If implemented, the DFU interface descriptor goes here with no * endpoints or alternative settings. */ }; static void usb_bt_fifo_reset(struct usb_hci_in_fifo_s *fifo) { fifo->dstart = 0; fifo->dlen = 0; fifo->dsize = DFIFO_LEN_MASK + 1; fifo->start = 0; fifo->len = 0; } static void usb_bt_fifo_enqueue(struct usb_hci_in_fifo_s *fifo, const uint8_t *data, int len) { int off = fifo->dstart + fifo->dlen; uint8_t *buf; fifo->dlen += len; if (off <= DFIFO_LEN_MASK) { if (off + len > DFIFO_LEN_MASK + 1 && (fifo->dsize = off + len) > (DFIFO_LEN_MASK + 1) * 2) { fprintf(stderr, "%s: can't alloc %i bytes\n", __FUNCTION__, len); exit(-1); } buf = fifo->data + off; } else { if (fifo->dlen > fifo->dsize) { fprintf(stderr, "%s: can't alloc %i bytes\n", __FUNCTION__, len); exit(-1); } buf = fifo->data + off - fifo->dsize; } off = (fifo->start + fifo->len ++) & CFIFO_LEN_MASK; fifo->fifo[off].data = memcpy(buf, data, len); fifo->fifo[off].len = len; } static inline int usb_bt_fifo_dequeue(struct usb_hci_in_fifo_s *fifo, USBPacket *p) { int len; if (likely(!fifo->len)) return USB_RET_STALL; len = MIN(p->len, fifo->fifo[fifo->start].len); memcpy(p->data, fifo->fifo[fifo->start].data, len); if (len == p->len) { fifo->fifo[fifo->start].len -= len; fifo->fifo[fifo->start].data += len; } else { fifo->start ++; fifo->start &= CFIFO_LEN_MASK; fifo->len --; } fifo->dstart += len; fifo->dlen -= len; if (fifo->dstart >= fifo->dsize) { fifo->dstart = 0; fifo->dsize = DFIFO_LEN_MASK + 1; } return len; } static void inline usb_bt_fifo_out_enqueue(struct USBBtState *s, struct usb_hci_out_fifo_s *fifo, void (*send)(struct HCIInfo *, const uint8_t *, int), int (*complete)(const uint8_t *, int), const uint8_t *data, int len) { if (fifo->len) { memcpy(fifo->data + fifo->len, data, len); fifo->len += len; if (complete(fifo->data, fifo->len)) { send(s->hci, fifo->data, fifo->len); fifo->len = 0; } } else if (complete(data, len)) send(s->hci, data, len); else { memcpy(fifo->data, data, len); fifo->len = len; } /* TODO: do we need to loop? */ } static int usb_bt_hci_cmd_complete(const uint8_t *data, int len) { len -= HCI_COMMAND_HDR_SIZE; return len >= 0 && len >= ((struct hci_command_hdr *) data)->plen; } static int usb_bt_hci_acl_complete(const uint8_t *data, int len) { len -= HCI_ACL_HDR_SIZE; return len >= 0 && len >= le16_to_cpu(((struct hci_acl_hdr *) data)->dlen); } static int usb_bt_hci_sco_complete(const uint8_t *data, int len) { len -= HCI_SCO_HDR_SIZE; return len >= 0 && len >= ((struct hci_sco_hdr *) data)->dlen; } static void usb_bt_handle_reset(USBDevice *dev) { struct USBBtState *s = (struct USBBtState *) dev->opaque; usb_bt_fifo_reset(&s->evt); usb_bt_fifo_reset(&s->acl); usb_bt_fifo_reset(&s->sco); s->outcmd.len = 0; s->outacl.len = 0; s->outsco.len = 0; s->altsetting = 0; } static int usb_bt_handle_control(USBDevice *dev, int request, int value, int index, int length, uint8_t *data) { struct USBBtState *s = (struct USBBtState *) dev->opaque; int ret = 0; switch (request) { case DeviceRequest | USB_REQ_GET_STATUS: case InterfaceRequest | USB_REQ_GET_STATUS: case EndpointRequest | USB_REQ_GET_STATUS: data[0] = (1 << USB_DEVICE_SELF_POWERED) | (dev->remote_wakeup << USB_DEVICE_REMOTE_WAKEUP); data[1] = 0x00; ret = 2; break; case DeviceOutRequest | USB_REQ_CLEAR_FEATURE: case InterfaceOutRequest | USB_REQ_CLEAR_FEATURE: case EndpointOutRequest | USB_REQ_CLEAR_FEATURE: if (value == USB_DEVICE_REMOTE_WAKEUP) { dev->remote_wakeup = 0; } else { goto fail; } ret = 0; break; case DeviceOutRequest | USB_REQ_SET_FEATURE: case InterfaceOutRequest | USB_REQ_SET_FEATURE: case EndpointOutRequest | USB_REQ_SET_FEATURE: if (value == USB_DEVICE_REMOTE_WAKEUP) { dev->remote_wakeup = 1; } else { goto fail; } ret = 0; break; case DeviceOutRequest | USB_REQ_SET_ADDRESS: dev->addr = value; ret = 0; break; case DeviceRequest | USB_REQ_GET_DESCRIPTOR: switch (value >> 8) { case USB_DT_DEVICE: ret = sizeof(qemu_bt_dev_descriptor); memcpy(data, qemu_bt_dev_descriptor, ret); break; case USB_DT_CONFIG: ret = sizeof(qemu_bt_config_descriptor); memcpy(data, qemu_bt_config_descriptor, ret); break; case USB_DT_STRING: switch(value & 0xff) { case 0: /* language ids */ data[0] = 4; data[1] = 3; data[2] = 0x09; data[3] = 0x04; ret = 4; break; default: goto fail; } break; default: goto fail; } break; case DeviceRequest | USB_REQ_GET_CONFIGURATION: data[0] = qemu_bt_config_descriptor[0x5]; ret = 1; s->config = 0; break; case DeviceOutRequest | USB_REQ_SET_CONFIGURATION: ret = 0; if (value != qemu_bt_config_descriptor[0x5] && value != 0) { printf("%s: Wrong SET_CONFIGURATION request (%i)\n", __FUNCTION__, value); goto fail; } s->config = 1; usb_bt_fifo_reset(&s->evt); usb_bt_fifo_reset(&s->acl); usb_bt_fifo_reset(&s->sco); break; case InterfaceRequest | USB_REQ_GET_INTERFACE: if (value != 0 || (index & ~1) || length != 1) goto fail; if (index == 1) data[0] = s->altsetting; else data[0] = 0; ret = 1; break; case InterfaceOutRequest | USB_REQ_SET_INTERFACE: if ((index & ~1) || length != 0 || (index == 1 && (value < 0 || value > 4)) || (index == 0 && value != 0)) { printf("%s: Wrong SET_INTERFACE request (%i, %i)\n", __FUNCTION__, index, value); goto fail; } s->altsetting = value; ret = 0; break; case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_DEVICE) << 8): if (s->config) usb_bt_fifo_out_enqueue(s, &s->outcmd, s->hci->cmd_send, usb_bt_hci_cmd_complete, data, length); break; default: fail: ret = USB_RET_STALL; break; } return ret; } static int usb_bt_handle_data(USBDevice *dev, USBPacket *p) { struct USBBtState *s = (struct USBBtState *) dev->opaque; int ret = 0; if (!s->config) goto fail; switch (p->pid) { case USB_TOKEN_IN: switch (p->devep & 0xf) { case USB_EVT_EP: ret = usb_bt_fifo_dequeue(&s->evt, p); break; case USB_ACL_EP: ret = usb_bt_fifo_dequeue(&s->acl, p); break; case USB_SCO_EP: ret = usb_bt_fifo_dequeue(&s->sco, p); break; default: goto fail; } break; case USB_TOKEN_OUT: switch (p->devep & 0xf) { case USB_ACL_EP: usb_bt_fifo_out_enqueue(s, &s->outacl, s->hci->acl_send, usb_bt_hci_acl_complete, p->data, p->len); break; case USB_SCO_EP: usb_bt_fifo_out_enqueue(s, &s->outsco, s->hci->sco_send, usb_bt_hci_sco_complete, p->data, p->len); break; default: goto fail; } break; default: fail: ret = USB_RET_STALL; break; } return ret; } static void usb_bt_out_hci_packet_event(void *opaque, const uint8_t *data, int len) { struct USBBtState *s = (struct USBBtState *) opaque; usb_bt_fifo_enqueue(&s->evt, data, len); } static void usb_bt_out_hci_packet_acl(void *opaque, const uint8_t *data, int len) { struct USBBtState *s = (struct USBBtState *) opaque; usb_bt_fifo_enqueue(&s->acl, data, len); } static void usb_bt_handle_destroy(USBDevice *dev) { struct USBBtState *s = (struct USBBtState *) dev->opaque; s->hci->opaque = 0; s->hci->evt_recv = 0; s->hci->acl_recv = 0; qemu_free(s); } USBDevice *usb_bt_init(HCIInfo *hci) { struct USBBtState *s; if (!hci) return NULL; s = qemu_mallocz(sizeof(struct USBBtState)); s->dev.opaque = s; s->dev.speed = USB_SPEED_HIGH; s->dev.handle_packet = usb_generic_handle_packet; pstrcpy(s->dev.devname, sizeof(s->dev.devname), "QEMU BT dongle"); s->dev.handle_reset = usb_bt_handle_reset; s->dev.handle_control = usb_bt_handle_control; s->dev.handle_data = usb_bt_handle_data; s->dev.handle_destroy = usb_bt_handle_destroy; s->hci = hci; s->hci->opaque = s; s->hci->evt_recv = usb_bt_out_hci_packet_event; s->hci->acl_recv = usb_bt_out_hci_packet_acl; usb_bt_handle_reset(&s->dev); return &s->dev; }
gpl-2.0
sev3n85/samsung_s3ve3g_EUR
net/netfilter/xt_connmark.c
30
6501
/* * xt_connmark - Netfilter module to operate on connection marks * * Copyright (C) 2002,2004 MARA Systems AB <http://www.marasystems.com> * by Henrik Nordstrom <hno@marasystems.com> * Copyright © CC Computer Consultants GmbH, 2007 - 2008 * Jan Engelhardt <jengelh@medozas.de> * Copyright (c) 2015 Samsung Electronics Co., Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * Changes: * KwnagHyun Kim <kh0304.kim@samsung.com> 2015/07/08 * Baesung Park <baesung.park@samsung.com> 2015/07/08 * Vignesh Saravanaperumal <vignesh1.s@samsung.com> 2015/07/08 * Add codes to share UID/PID information * */ #include <linux/module.h> #include <linux/skbuff.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_ecache.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/xt_connmark.h> // ------------- START of KNOX_VPN ------------------// #include <linux/types.h> #include <linux/tcp.h> #include <linux/ip.h> #include <net/ip.h> // ------------- END of KNOX_VPN -------------------// MODULE_AUTHOR("Henrik Nordstrom <hno@marasystems.com>"); MODULE_DESCRIPTION("Xtables: connection mark operations"); MODULE_LICENSE("GPL"); MODULE_ALIAS("ipt_CONNMARK"); MODULE_ALIAS("ip6t_CONNMARK"); MODULE_ALIAS("ipt_connmark"); MODULE_ALIAS("ip6t_connmark"); // ------------- START of KNOX_VPN ------------------// /* KNOX framework uses mark value 100 to 500 * when the special meta data is added * This will indicate to the kernel code that * it needs to append meta data to the packets */ #define META_MARK_BASE_LOWER 100 #define META_MARK_BASE_UPPER 500 /* Structure to hold metadata values * intended for VPN clients to make * more intelligent decisions * when the KNOX meta mark * feature is enabled */ struct knox_meta_param { uid_t uid; pid_t pid; }; static unsigned int knoxvpn_uidpid(struct sk_buff *skb, u_int32_t newmark) { int szMetaData; struct skb_shared_info *knox_shinfo = NULL; szMetaData = sizeof(struct knox_meta_param); if (skb != NULL) { knox_shinfo = skb_shinfo(skb); } else { pr_err("KNOX: NULL SKB - no KNOX processing"); return -1; } if( skb->sk == NULL) { pr_err("KNOX: skb->sk value is null"); return -1; } if( knox_shinfo == NULL) { pr_err("KNOX: knox_shinfo is null"); return -1; } if (newmark < META_MARK_BASE_LOWER || newmark > META_MARK_BASE_UPPER) { pr_err("KNOX: The mark is out of range"); return -1; } else { knox_shinfo->uid = skb->sk->knox_uid; knox_shinfo->pid = skb->sk->knox_pid; knox_shinfo->knox_mark = newmark; } return 0; } // ------------- END of KNOX_VPN -------------------// static unsigned int connmark_tg(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_connmark_tginfo1 *info = par->targinfo; enum ip_conntrack_info ctinfo; struct nf_conn *ct; u_int32_t newmark; ct = nf_ct_get(skb, &ctinfo); if (ct == NULL) return XT_CONTINUE; switch (info->mode) { case XT_CONNMARK_SET: newmark = (ct->mark & ~info->ctmask) ^ info->ctmark; if (ct->mark != newmark) { ct->mark = newmark; nf_conntrack_event_cache(IPCT_MARK, ct); } break; case XT_CONNMARK_SAVE: newmark = (ct->mark & ~info->ctmask) ^ (skb->mark & info->nfmask); if (ct->mark != newmark) { ct->mark = newmark; nf_conntrack_event_cache(IPCT_MARK, ct); } break; case XT_CONNMARK_RESTORE: newmark = (skb->mark & ~info->nfmask) ^ (ct->mark & info->ctmask); skb->mark = newmark; // ------------- START of KNOX_VPN -----------------// knoxvpn_uidpid(skb, newmark); // ------------- END of KNOX_VPN -------------------// break; } return XT_CONTINUE; } static int connmark_tg_check(const struct xt_tgchk_param *par) { int ret; ret = nf_ct_l3proto_try_module_get(par->family); if (ret < 0) pr_info("cannot load conntrack support for proto=%u\n", par->family); return ret; } static void connmark_tg_destroy(const struct xt_tgdtor_param *par) { nf_ct_l3proto_module_put(par->family); } static bool connmark_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_connmark_mtinfo1 *info = par->matchinfo; enum ip_conntrack_info ctinfo; const struct nf_conn *ct; ct = nf_ct_get(skb, &ctinfo); if (ct == NULL) return false; return ((ct->mark & info->mask) == info->mark) ^ info->invert; } static int connmark_mt_check(const struct xt_mtchk_param *par) { int ret; ret = nf_ct_l3proto_try_module_get(par->family); if (ret < 0) pr_info("cannot load conntrack support for proto=%u\n", par->family); return ret; } static void connmark_mt_destroy(const struct xt_mtdtor_param *par) { nf_ct_l3proto_module_put(par->family); } static struct xt_target connmark_tg_reg __read_mostly = { .name = "CONNMARK", .revision = 1, .family = NFPROTO_UNSPEC, .checkentry = connmark_tg_check, .target = connmark_tg, .targetsize = sizeof(struct xt_connmark_tginfo1), .destroy = connmark_tg_destroy, .me = THIS_MODULE, }; static struct xt_match connmark_mt_reg __read_mostly = { .name = "connmark", .revision = 1, .family = NFPROTO_UNSPEC, .checkentry = connmark_mt_check, .match = connmark_mt, .matchsize = sizeof(struct xt_connmark_mtinfo1), .destroy = connmark_mt_destroy, .me = THIS_MODULE, }; static int __init connmark_mt_init(void) { int ret; ret = xt_register_target(&connmark_tg_reg); if (ret < 0) return ret; ret = xt_register_match(&connmark_mt_reg); if (ret < 0) { xt_unregister_target(&connmark_tg_reg); return ret; } return 0; } static void __exit connmark_mt_exit(void) { xt_unregister_match(&connmark_mt_reg); xt_unregister_target(&connmark_tg_reg); } module_init(connmark_mt_init); module_exit(connmark_mt_exit);
gpl-2.0
bigzz/kernel_msm
drivers/usb/gadget/ci13xxx_msm.c
286
9894
/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/usb/msm_hsusb_hw.h> #include <linux/usb/ulpi.h> #include <linux/gpio.h> #include "ci13xxx_udc.c" #define MSM_USB_BASE (udc->regs) #define CI13XXX_MSM_MAX_LOG2_ITC 7 struct ci13xxx_udc_context { int irq; void __iomem *regs; int wake_gpio; int wake_irq; bool wake_irq_state; }; static struct ci13xxx_udc_context _udc_ctxt; static irqreturn_t msm_udc_irq(int irq, void *data) { return udc_irq(); } static void ci13xxx_msm_suspend(void) { struct device *dev = _udc->gadget.dev.parent; dev_dbg(dev, "ci13xxx_msm_suspend\n"); if (_udc_ctxt.wake_irq && !_udc_ctxt.wake_irq_state) { enable_irq_wake(_udc_ctxt.wake_irq); enable_irq(_udc_ctxt.wake_irq); _udc_ctxt.wake_irq_state = true; } } static void ci13xxx_msm_resume(void) { struct device *dev = _udc->gadget.dev.parent; dev_dbg(dev, "ci13xxx_msm_resume\n"); if (_udc_ctxt.wake_irq && _udc_ctxt.wake_irq_state) { disable_irq_wake(_udc_ctxt.wake_irq); disable_irq_nosync(_udc_ctxt.wake_irq); _udc_ctxt.wake_irq_state = false; } } static void ci13xxx_msm_disconnect(void) { struct ci13xxx *udc = _udc; struct usb_phy *phy = udc->transceiver; if (phy && (phy->flags & ENABLE_DP_MANUAL_PULLUP)) usb_phy_io_write(phy, ULPI_MISC_A_VBUSVLDEXT | ULPI_MISC_A_VBUSVLDEXTSEL, ULPI_CLR(ULPI_MISC_A)); } /* Link power management will reduce power consumption by * short time HW suspend/resume. */ static void ci13xxx_msm_set_l1(struct ci13xxx *udc) { int temp; struct device *dev = udc->gadget.dev.parent; dev_dbg(dev, "Enable link power management\n"); /* Enable remote wakeup and L1 for IN EPs */ writel_relaxed(0xffff0000, USB_L1_EP_CTRL); temp = readl_relaxed(USB_L1_CONFIG); temp |= L1_CONFIG_LPM_EN | L1_CONFIG_REMOTE_WAKEUP | L1_CONFIG_GATE_SYS_CLK | L1_CONFIG_PHY_LPM | L1_CONFIG_PLL; writel_relaxed(temp, USB_L1_CONFIG); } static void ci13xxx_msm_connect(void) { struct ci13xxx *udc = _udc; struct usb_phy *phy = udc->transceiver; if (phy && (phy->flags & ENABLE_DP_MANUAL_PULLUP)) { int temp; usb_phy_io_write(phy, ULPI_MISC_A_VBUSVLDEXT | ULPI_MISC_A_VBUSVLDEXTSEL, ULPI_SET(ULPI_MISC_A)); temp = readl_relaxed(USB_GENCONFIG2); temp |= GENCFG2_SESS_VLD_CTRL_EN; writel_relaxed(temp, USB_GENCONFIG2); temp = readl_relaxed(USB_USBCMD); temp |= USBCMD_SESS_VLD_CTRL; writel_relaxed(temp, USB_USBCMD); /* * Add memory barrier as it is must to complete * above USB PHY and Link register writes before * moving ahead with USB peripheral mode enumeration, * otherwise USB peripheral mode may not work. */ mb(); } } static void ci13xxx_msm_reset(void) { struct ci13xxx *udc = _udc; struct usb_phy *phy = udc->transceiver; struct device *dev = udc->gadget.dev.parent; writel_relaxed(0, USB_AHBBURST); writel_relaxed(0x08, USB_AHBMODE); if (udc->gadget.l1_supported) ci13xxx_msm_set_l1(udc); if (phy && (phy->flags & ENABLE_SECONDARY_PHY)) { int temp; dev_dbg(dev, "using secondary hsphy\n"); temp = readl_relaxed(USB_PHY_CTRL2); temp |= (1<<16); writel_relaxed(temp, USB_PHY_CTRL2); /* * Add memory barrier to make sure above LINK writes are * complete before moving ahead with USB peripheral mode * enumeration. */ mb(); } } static void ci13xxx_msm_notify_event(struct ci13xxx *udc, unsigned event) { struct device *dev = udc->gadget.dev.parent; switch (event) { case CI13XXX_CONTROLLER_RESET_EVENT: dev_info(dev, "CI13XXX_CONTROLLER_RESET_EVENT received\n"); ci13xxx_msm_reset(); break; case CI13XXX_CONTROLLER_DISCONNECT_EVENT: dev_info(dev, "CI13XXX_CONTROLLER_DISCONNECT_EVENT received\n"); ci13xxx_msm_disconnect(); ci13xxx_msm_resume(); break; case CI13XXX_CONTROLLER_CONNECT_EVENT: dev_info(dev, "CI13XXX_CONTROLLER_CONNECT_EVENT received\n"); ci13xxx_msm_connect(); break; case CI13XXX_CONTROLLER_SUSPEND_EVENT: dev_info(dev, "CI13XXX_CONTROLLER_SUSPEND_EVENT received\n"); ci13xxx_msm_suspend(); break; case CI13XXX_CONTROLLER_RESUME_EVENT: dev_info(dev, "CI13XXX_CONTROLLER_RESUME_EVENT received\n"); ci13xxx_msm_resume(); break; default: dev_dbg(dev, "unknown ci13xxx_udc event\n"); break; } } static bool ci13xxx_msm_in_lpm(struct ci13xxx *udc) { struct msm_otg *otg; if (udc == NULL) return false; if (udc->transceiver == NULL) return false; otg = container_of(udc->transceiver, struct msm_otg, phy); return (atomic_read(&otg->in_lpm) != 0); } static void ci13xxx_msm_set_fpr_flag(struct ci13xxx *udc) { struct msm_otg *otg; if (udc == NULL) return; if (udc->transceiver == NULL) return; otg = container_of(udc->transceiver, struct msm_otg, phy); atomic_set(&otg->set_fpr_with_lpm_exit, 1); } static irqreturn_t ci13xxx_msm_resume_irq(int irq, void *data) { struct ci13xxx *udc = _udc; if (udc->transceiver && udc->vbus_active && udc->suspended) usb_phy_set_suspend(udc->transceiver, 0); else if (!udc->suspended) ci13xxx_msm_resume(); return IRQ_HANDLED; } static struct ci13xxx_udc_driver ci13xxx_msm_udc_driver = { .name = "ci13xxx_msm", .flags = CI13XXX_REGS_SHARED | CI13XXX_REQUIRE_TRANSCEIVER | CI13XXX_PULLUP_ON_VBUS | CI13XXX_ZERO_ITC | CI13XXX_DISABLE_STREAMING | CI13XXX_IS_OTG, .nz_itc = 0, .notify_event = ci13xxx_msm_notify_event, .in_lpm = ci13xxx_msm_in_lpm, .set_fpr_flag = ci13xxx_msm_set_fpr_flag, }; static int ci13xxx_msm_install_wake_gpio(struct platform_device *pdev, struct resource *res) { int wake_irq; int ret; dev_dbg(&pdev->dev, "ci13xxx_msm_install_wake_gpio\n"); _udc_ctxt.wake_gpio = res->start; gpio_request(_udc_ctxt.wake_gpio, "USB_RESUME"); gpio_direction_input(_udc_ctxt.wake_gpio); wake_irq = gpio_to_irq(_udc_ctxt.wake_gpio); if (wake_irq < 0) { dev_err(&pdev->dev, "could not register USB_RESUME GPIO.\n"); return -ENXIO; } dev_dbg(&pdev->dev, "_udc_ctxt.gpio_irq = %d and irq = %d\n", _udc_ctxt.wake_gpio, wake_irq); ret = request_irq(wake_irq, ci13xxx_msm_resume_irq, IRQF_TRIGGER_RISING | IRQF_ONESHOT, "usb resume", NULL); if (ret < 0) { dev_err(&pdev->dev, "could not register USB_RESUME IRQ.\n"); goto gpio_free; } disable_irq(wake_irq); _udc_ctxt.wake_irq = wake_irq; return 0; gpio_free: gpio_free(_udc_ctxt.wake_gpio); _udc_ctxt.wake_gpio = 0; return ret; } static void ci13xxx_msm_uninstall_wake_gpio(struct platform_device *pdev) { dev_dbg(&pdev->dev, "ci13xxx_msm_uninstall_wake_gpio\n"); if (_udc_ctxt.wake_gpio) { gpio_free(_udc_ctxt.wake_gpio); _udc_ctxt.wake_gpio = 0; } } static int ci13xxx_msm_probe(struct platform_device *pdev) { struct resource *res; int ret; struct ci13xxx_platform_data *pdata = pdev->dev.platform_data; bool is_l1_supported = false; dev_dbg(&pdev->dev, "ci13xxx_msm_probe\n"); if (pdata) { /* Acceptable values for nz_itc are: 0,1,2,4,8,16,32,64 */ if (pdata->log2_itc > CI13XXX_MSM_MAX_LOG2_ITC || pdata->log2_itc <= 0) ci13xxx_msm_udc_driver.nz_itc = 0; else ci13xxx_msm_udc_driver.nz_itc = 1 << (pdata->log2_itc-1); is_l1_supported = pdata->l1_supported; /* Set ahb2ahb bypass flag if it is requested. */ if (pdata->enable_ahb2ahb_bypass) ci13xxx_msm_udc_driver.flags |= CI13XXX_ENABLE_AHB2AHB_BYPASS; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "failed to get platform resource mem\n"); return -ENXIO; } _udc_ctxt.regs = ioremap(res->start, resource_size(res)); if (!_udc_ctxt.regs) { dev_err(&pdev->dev, "ioremap failed\n"); return -ENOMEM; } ret = udc_probe(&ci13xxx_msm_udc_driver, &pdev->dev, _udc_ctxt.regs); if (ret < 0) { dev_err(&pdev->dev, "udc_probe failed\n"); goto iounmap; } _udc->gadget.l1_supported = is_l1_supported; _udc_ctxt.irq = platform_get_irq(pdev, 0); if (_udc_ctxt.irq < 0) { dev_err(&pdev->dev, "IRQ not found\n"); ret = -ENXIO; goto udc_remove; } res = platform_get_resource_byname(pdev, IORESOURCE_IO, "USB_RESUME"); if (res) { ret = ci13xxx_msm_install_wake_gpio(pdev, res); if (ret < 0) { dev_err(&pdev->dev, "gpio irq install failed\n"); goto udc_remove; } } ret = request_irq(_udc_ctxt.irq, msm_udc_irq, IRQF_SHARED, pdev->name, pdev); if (ret < 0) { dev_err(&pdev->dev, "request_irq failed\n"); goto gpio_uninstall; } pm_runtime_no_callbacks(&pdev->dev); pm_runtime_enable(&pdev->dev); return 0; gpio_uninstall: ci13xxx_msm_uninstall_wake_gpio(pdev); udc_remove: udc_remove(); iounmap: iounmap(_udc_ctxt.regs); return ret; } int ci13xxx_msm_remove(struct platform_device *pdev) { pm_runtime_disable(&pdev->dev); free_irq(_udc_ctxt.irq, pdev); ci13xxx_msm_uninstall_wake_gpio(pdev); udc_remove(); iounmap(_udc_ctxt.regs); return 0; } void msm_hw_bam_disable(bool bam_disable) { u32 val; struct ci13xxx *udc = _udc; if (bam_disable) val = readl_relaxed(USB_GENCONFIG) | GENCONFIG_BAM_DISABLE; else val = readl_relaxed(USB_GENCONFIG) & ~GENCONFIG_BAM_DISABLE; writel_relaxed(val, USB_GENCONFIG); } static struct platform_driver ci13xxx_msm_driver = { .probe = ci13xxx_msm_probe, .driver = { .name = "msm_hsusb", }, .remove = ci13xxx_msm_remove, }; MODULE_ALIAS("platform:msm_hsusb"); static int __init ci13xxx_msm_init(void) { return platform_driver_register(&ci13xxx_msm_driver); } module_init(ci13xxx_msm_init); static void __exit ci13xxx_msm_exit(void) { platform_driver_unregister(&ci13xxx_msm_driver); } module_exit(ci13xxx_msm_exit); MODULE_LICENSE("GPL v2");
gpl-2.0
glewarne/Note2Core_v3_kernel_N710x
drivers/video/backlight/ams369fg06.c
542
13394
/* * ams369fg06 AMOLED LCD panel driver. * * Copyright (c) 2011 Samsung Electronics Co., Ltd. * Author: Jingoo Han <jg1.han@samsung.com> * * Derived from drivers/video/s6e63m0.c * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/wait.h> #include <linux/fb.h> #include <linux/delay.h> #include <linux/gpio.h> #include <linux/spi/spi.h> #include <linux/lcd.h> #include <linux/backlight.h> #ifdef CONFIG_HAS_EARLYSUSPEND #include <linux/earlysuspend.h> #endif #include "ams369fg06_gamma.h" #define SLEEPMSEC 0x1000 #define ENDDEF 0x2000 #define DEFMASK 0xFF00 #define COMMAND_ONLY 0xFE #define DATA_ONLY 0xFF #define MIN_BRIGHTNESS 0 #define MAX_BRIGHTNESS 255 #define DEFAULT_BRIGHTNESS 150 #define POWER_IS_ON(pwr) ((pwr) <= FB_BLANK_NORMAL) struct ams369fg06 { struct device *dev; struct spi_device *spi; unsigned int power; struct lcd_device *ld; struct backlight_device *bd; struct lcd_platform_data *lcd_pd; #ifdef CONFIG_HAS_EARLYSUSPEND struct early_suspend early_suspend; #endif }; const unsigned short SEQ_DISPLAY_ON[] = { 0x14, 0x03, ENDDEF, 0x0000 }; const unsigned short SEQ_DISPLAY_OFF[] = { 0x14, 0x00, ENDDEF, 0x0000 }; const unsigned short SEQ_STAND_BY_ON[] = { 0x1D, 0xA1, SLEEPMSEC, 200, ENDDEF, 0x0000 }; const unsigned short SEQ_STAND_BY_OFF[] = { 0x1D, 0xA0, SLEEPMSEC, 250, ENDDEF, 0x0000 }; const unsigned short SEQ_SETTING[] = { 0x31, 0x08, 0x32, 0x14, 0x30, 0x02, 0x27, 0x01, 0x12, 0x08, 0x13, 0x08, 0x15, 0x00, 0x16, 0x00, 0xef, 0xd0, DATA_ONLY, 0xe8, 0x39, 0x44, 0x40, 0x00, 0x41, 0x3f, 0x42, 0x2a, 0x43, 0x27, 0x44, 0x27, 0x45, 0x1f, 0x46, 0x44, 0x50, 0x00, 0x51, 0x00, 0x52, 0x17, 0x53, 0x24, 0x54, 0x26, 0x55, 0x1f, 0x56, 0x43, 0x60, 0x00, 0x61, 0x3f, 0x62, 0x2a, 0x63, 0x25, 0x64, 0x24, 0x65, 0x1b, 0x66, 0x5c, 0x17, 0x22, 0x18, 0x33, 0x19, 0x03, 0x1a, 0x01, 0x22, 0xa4, 0x23, 0x00, 0x26, 0xa0, 0x1d, 0xa0, SLEEPMSEC, 300, 0x14, 0x03, ENDDEF, 0x0000 }; static int ams369fg06_spi_write_byte(struct ams369fg06 *lcd, int addr, int data) { u16 buf[1]; struct spi_message msg; struct spi_transfer xfer = { .len = 2, .tx_buf = buf, }; buf[0] = (addr << 8) | data; spi_message_init(&msg); spi_message_add_tail(&xfer, &msg); return spi_sync(lcd->spi, &msg); } static int ams369fg06_spi_write(struct ams369fg06 *lcd, unsigned char address, unsigned char command) { int ret = 0; if (address != DATA_ONLY) ret = ams369fg06_spi_write_byte(lcd, 0x70, address); if (command != COMMAND_ONLY) ret = ams369fg06_spi_write_byte(lcd, 0x72, command); return ret; } static int ams369fg06_panel_send_sequence(struct ams369fg06 *lcd, const unsigned short *wbuf) { int ret = 0, i = 0; while ((wbuf[i] & DEFMASK) != ENDDEF) { if ((wbuf[i] & DEFMASK) != SLEEPMSEC) { ret = ams369fg06_spi_write(lcd, wbuf[i], wbuf[i+1]); if (ret) break; } else udelay(wbuf[i+1]*1000); i += 2; } return ret; } static int _ams369fg06_gamma_ctl(struct ams369fg06 *lcd, const unsigned int *gamma) { unsigned int i = 0; int ret = 0; for (i = 0 ; i < GAMMA_TABLE_COUNT / 3; i++) { ret = ams369fg06_spi_write(lcd, 0x40 + i, gamma[i]); ret = ams369fg06_spi_write(lcd, 0x50 + i, gamma[i+7*1]); ret = ams369fg06_spi_write(lcd, 0x60 + i, gamma[i+7*2]); if (ret) { dev_err(lcd->dev, "failed to set gamma table.\n"); goto gamma_err; } } gamma_err: return ret; } static int ams369fg06_gamma_ctl(struct ams369fg06 *lcd, int brightness) { int ret = 0; int gamma = 0; if ((brightness >= 0) && (brightness <= 50)) gamma = 0; else if ((brightness > 50) && (brightness <= 100)) gamma = 1; else if ((brightness > 100) && (brightness <= 150)) gamma = 2; else if ((brightness > 150) && (brightness <= 200)) gamma = 3; else if ((brightness > 200) && (brightness <= 255)) gamma = 4; ret = _ams369fg06_gamma_ctl(lcd, gamma_table.gamma_22_table[gamma]); return ret; } static int ams369fg06_ldi_init(struct ams369fg06 *lcd) { int ret, i; const unsigned short *init_seq[] = { SEQ_SETTING, SEQ_STAND_BY_OFF, }; for (i = 0; i < ARRAY_SIZE(init_seq); i++) { ret = ams369fg06_panel_send_sequence(lcd, init_seq[i]); if (ret) break; } return ret; } static int ams369fg06_ldi_enable(struct ams369fg06 *lcd) { int ret, i; const unsigned short *init_seq[] = { SEQ_STAND_BY_OFF, SEQ_DISPLAY_ON, }; for (i = 0; i < ARRAY_SIZE(init_seq); i++) { ret = ams369fg06_panel_send_sequence(lcd, init_seq[i]); if (ret) break; } return ret; } static int ams369fg06_ldi_disable(struct ams369fg06 *lcd) { int ret, i; const unsigned short *init_seq[] = { SEQ_DISPLAY_OFF, SEQ_STAND_BY_ON, }; for (i = 0; i < ARRAY_SIZE(init_seq); i++) { ret = ams369fg06_panel_send_sequence(lcd, init_seq[i]); if (ret) break; } return ret; } static int ams369fg06_power_on(struct ams369fg06 *lcd) { int ret = 0; struct lcd_platform_data *pd = NULL; struct backlight_device *bd = NULL; pd = lcd->lcd_pd; if (!pd) { dev_err(lcd->dev, "platform data is NULL.\n"); return -EFAULT; } bd = lcd->bd; if (!bd) { dev_err(lcd->dev, "backlight device is NULL.\n"); return -EFAULT; } if (!pd->power_on) { dev_err(lcd->dev, "power_on is NULL.\n"); return -EFAULT; } else { pd->power_on(lcd->ld, 1); mdelay(pd->power_on_delay); } if (!pd->reset) { dev_err(lcd->dev, "reset is NULL.\n"); return -EFAULT; } else { pd->reset(lcd->ld); mdelay(pd->reset_delay); } ret = ams369fg06_ldi_init(lcd); if (ret) { dev_err(lcd->dev, "failed to initialize ldi.\n"); return ret; } ret = ams369fg06_ldi_enable(lcd); if (ret) { dev_err(lcd->dev, "failed to enable ldi.\n"); return ret; } /* set brightness to current value after power on or resume. */ ret = ams369fg06_gamma_ctl(lcd, bd->props.brightness); if (ret) { dev_err(lcd->dev, "lcd gamma setting failed.\n"); return ret; } return 0; } static int ams369fg06_power_off(struct ams369fg06 *lcd) { int ret = 0; struct lcd_platform_data *pd = NULL; pd = lcd->lcd_pd; if (!pd) { dev_err(lcd->dev, "platform data is NULL\n"); return -EFAULT; } ret = ams369fg06_ldi_disable(lcd); if (ret) { dev_err(lcd->dev, "lcd setting failed.\n"); return -EIO; } mdelay(pd->power_off_delay); if (!pd->power_on) { dev_err(lcd->dev, "power_on is NULL.\n"); return -EFAULT; } else pd->power_on(lcd->ld, 0); return 0; } static int ams369fg06_power(struct ams369fg06 *lcd, int power) { int ret = 0; if (POWER_IS_ON(power) && !POWER_IS_ON(lcd->power)) ret = ams369fg06_power_on(lcd); else if (!POWER_IS_ON(power) && POWER_IS_ON(lcd->power)) ret = ams369fg06_power_off(lcd); if (!ret) lcd->power = power; return ret; } static int ams369fg06_get_power(struct lcd_device *ld) { struct ams369fg06 *lcd = lcd_get_data(ld); return lcd->power; } static int ams369fg06_set_power(struct lcd_device *ld, int power) { struct ams369fg06 *lcd = lcd_get_data(ld); if (power != FB_BLANK_UNBLANK && power != FB_BLANK_POWERDOWN && power != FB_BLANK_NORMAL) { dev_err(lcd->dev, "power value should be 0, 1 or 4.\n"); return -EINVAL; } return ams369fg06_power(lcd, power); } static int ams369fg06_get_brightness(struct backlight_device *bd) { return bd->props.brightness; } static int ams369fg06_set_brightness(struct backlight_device *bd) { int ret = 0; int brightness = bd->props.brightness; struct ams369fg06 *lcd = dev_get_drvdata(&bd->dev); if (brightness < MIN_BRIGHTNESS || brightness > bd->props.max_brightness) { dev_err(&bd->dev, "lcd brightness should be %d to %d.\n", MIN_BRIGHTNESS, MAX_BRIGHTNESS); return -EINVAL; } ret = ams369fg06_gamma_ctl(lcd, bd->props.brightness); if (ret) { dev_err(&bd->dev, "lcd brightness setting failed.\n"); return -EIO; } return ret; } static struct lcd_ops ams369fg06_lcd_ops = { .get_power = ams369fg06_get_power, .set_power = ams369fg06_set_power, }; static const struct backlight_ops ams369fg06_backlight_ops = { .get_brightness = ams369fg06_get_brightness, .update_status = ams369fg06_set_brightness, }; #ifdef CONFIG_HAS_EARLYSUSPEND unsigned int before_power; static void ams369fg06_early_suspend(struct early_suspend *handler) { struct ams369fg06 *lcd = NULL; lcd = container_of(handler, struct ams369fg06, early_suspend); before_power = lcd->power; ams369fg06_power(lcd, FB_BLANK_POWERDOWN); } static void ams369fg06_late_resume(struct early_suspend *handler) { struct ams369fg06 *lcd = NULL; lcd = container_of(handler, struct ams369fg06, early_suspend); if (before_power == FB_BLANK_UNBLANK) lcd->power = FB_BLANK_POWERDOWN; ams369fg06_power(lcd, before_power); } #endif static int __init ams369fg06_probe(struct spi_device *spi) { int ret = 0; struct ams369fg06 *lcd = NULL; struct lcd_device *ld = NULL; struct backlight_device *bd = NULL; lcd = kzalloc(sizeof(struct ams369fg06), GFP_KERNEL); if (!lcd) return -ENOMEM; /* ams369fg06 lcd panel uses 3-wire 16bits SPI Mode. */ spi->bits_per_word = 16; ret = spi_setup(spi); if (ret < 0) { dev_err(&spi->dev, "spi setup failed.\n"); goto out_free_lcd; } lcd->spi = spi; lcd->dev = &spi->dev; lcd->lcd_pd = (struct lcd_platform_data *)spi->dev.platform_data; if (!lcd->lcd_pd) { dev_err(&spi->dev, "platform data is NULL\n"); goto out_free_lcd; } ld = lcd_device_register("ams369fg06", &spi->dev, lcd, &ams369fg06_lcd_ops); if (IS_ERR(ld)) { ret = PTR_ERR(ld); goto out_free_lcd; } lcd->ld = ld; bd = backlight_device_register("ams369fg06-bl", &spi->dev, lcd, &ams369fg06_backlight_ops, NULL); if (IS_ERR(bd)) { ret = PTR_ERR(bd); goto out_lcd_unregister; } bd->props.max_brightness = MAX_BRIGHTNESS; bd->props.brightness = DEFAULT_BRIGHTNESS; bd->props.type = BACKLIGHT_RAW; lcd->bd = bd; if (!lcd->lcd_pd->lcd_enabled) { /* * if lcd panel was off from bootloader then * current lcd status is powerdown and then * it enables lcd panel. */ lcd->power = FB_BLANK_POWERDOWN; ams369fg06_power(lcd, FB_BLANK_UNBLANK); } else lcd->power = FB_BLANK_UNBLANK; dev_set_drvdata(&spi->dev, lcd); #ifdef CONFIG_HAS_EARLYSUSPEND lcd->early_suspend.suspend = ams369fg06_early_suspend; lcd->early_suspend.resume = ams369fg06_late_resume; lcd->early_suspend.level = EARLY_SUSPEND_LEVEL_DISABLE_FB - 1; register_early_suspend(&lcd->early_suspend); #endif dev_info(&spi->dev, "ams369fg06 panel driver has been probed.\n"); return 0; out_lcd_unregister: lcd_device_unregister(ld); out_free_lcd: kfree(lcd); return ret; } static int __devexit ams369fg06_remove(struct spi_device *spi) { struct ams369fg06 *lcd = dev_get_drvdata(&spi->dev); ams369fg06_power(lcd, FB_BLANK_POWERDOWN); lcd_device_unregister(lcd->ld); kfree(lcd); return 0; } #if defined(CONFIG_PM) #ifndef CONFIG_HAS_EARLYSUSPEND unsigned int before_power; static int ams369fg06_suspend(struct spi_device *spi, pm_message_t mesg) { int ret = 0; struct ams369fg06 *lcd = dev_get_drvdata(&spi->dev); dev_dbg(&spi->dev, "lcd->power = %d\n", lcd->power); before_power = lcd->power; /* * when lcd panel is suspend, lcd panel becomes off * regardless of status. */ ret = ams369fg06_power(lcd, FB_BLANK_POWERDOWN); return ret; } static int ams369fg06_resume(struct spi_device *spi) { int ret = 0; struct ams369fg06 *lcd = dev_get_drvdata(&spi->dev); /* * after suspended, if lcd panel status is FB_BLANK_UNBLANK * (at that time, before_power is FB_BLANK_UNBLANK) then * it changes that status to FB_BLANK_POWERDOWN to get lcd on. */ if (before_power == FB_BLANK_UNBLANK) lcd->power = FB_BLANK_POWERDOWN; dev_dbg(&spi->dev, "before_power = %d\n", before_power); ret = ams369fg06_power(lcd, before_power); return ret; } #endif #else #define ams369fg06_suspend NULL #define ams369fg06_resume NULL #endif void ams369fg06_shutdown(struct spi_device *spi) { struct ams369fg06 *lcd = dev_get_drvdata(&spi->dev); ams369fg06_power(lcd, FB_BLANK_POWERDOWN); } static struct spi_driver ams369fg06_driver = { .driver = { .name = "ams369fg06", .bus = &spi_bus_type, .owner = THIS_MODULE, }, .probe = ams369fg06_probe, .remove = __devexit_p(ams369fg06_remove), .shutdown = ams369fg06_shutdown, #ifndef CONFIG_HAS_EARLYSUSPEND .suspend = ams369fg06_suspend, .resume = ams369fg06_resume, #endif }; static int __init ams369fg06_init(void) { return spi_register_driver(&ams369fg06_driver); } static void __exit ams369fg06_exit(void) { spi_unregister_driver(&ams369fg06_driver); } module_init(ams369fg06_init); module_exit(ams369fg06_exit); MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>"); MODULE_DESCRIPTION("ams369fg06 LCD Driver"); MODULE_LICENSE("GPL");
gpl-2.0
SweetwaterBurns/E4GT-Kernel
arch/arm/mach-exynos/hotplug.c
542
3247
/* linux arch/arm/mach-exynos/hotplug.c * * Cloned from linux/arch/arm/mach-realview/hotplug.c * * Copyright (C) 2002 ARM Ltd. * All Rights Reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/smp.h> #include <linux/completion.h> #include <linux/io.h> #include <asm/cacheflush.h> #include <plat/cpu.h> #include <mach/regs-pmu.h> extern volatile int pen_release; static inline void cpu_enter_lowpower_a9(void) { unsigned int v; flush_cache_all(); asm volatile( " mcr p15, 0, %1, c7, c5, 0\n" " mcr p15, 0, %1, c7, c10, 4\n" /* * Turn off coherency */ " mrc p15, 0, %0, c1, c0, 1\n" " bic %0, %0, %3\n" " mcr p15, 0, %0, c1, c0, 1\n" " mrc p15, 0, %0, c1, c0, 0\n" " bic %0, %0, %2\n" " mcr p15, 0, %0, c1, c0, 0\n" : "=&r" (v) : "r" (0), "Ir" (CR_C), "Ir" (0x40) : "cc"); } static inline void cpu_enter_lowpower_a15(void) { unsigned int v; asm volatile( " mrc p15, 0, %0, c1, c0, 0\n" " bic %0, %0, %1\n" " mcr p15, 0, %0, c1, c0, 0\n" : "=&r" (v) : "Ir" (CR_C) : "cc"); flush_cache_all(); asm volatile( /* * Turn off coherency */ " mrc p15, 0, %0, c1, c0, 1\n" " bic %0, %0, %1\n" " mcr p15, 0, %0, c1, c0, 1\n" : "=&r" (v) : "Ir" (0x40) : "cc"); isb(); dsb(); } static inline void cpu_leave_lowpower(void) { unsigned int v; asm volatile( "mrc p15, 0, %0, c1, c0, 0\n" " orr %0, %0, %1\n" " mcr p15, 0, %0, c1, c0, 0\n" " mrc p15, 0, %0, c1, c0, 1\n" " orr %0, %0, %2\n" " mcr p15, 0, %0, c1, c0, 1\n" : "=&r" (v) : "Ir" (CR_C), "Ir" (0x40) : "cc"); } static inline void platform_do_lowpower(unsigned int cpu, int *spurious) { for (;;) { /* make cpu1 to be turned off at next WFI command */ if ((cpu >= 1) && (cpu < NR_CPUS)) __raw_writel(0, S5P_ARM_CORE_CONFIGURATION(cpu)); /* * here's the WFI */ asm(".word 0xe320f003\n" : : : "memory", "cc"); if (pen_release == cpu) { /* * OK, proper wakeup, we're done */ break; } /* * Getting here, means that we have come out of WFI without * having been woken up - this shouldn't happen * * Just note it happening - when we're woken, we can report * its occurrence. */ (*spurious)++; } } int platform_cpu_kill(unsigned int cpu) { return 1; } /* * platform-specific code to shutdown a CPU * * Called with IRQs disabled */ void platform_cpu_die(unsigned int cpu) { int spurious = 0; /* * we're ready for shutdown now, so do it */ if (soc_is_exynos5250()) cpu_enter_lowpower_a15(); else cpu_enter_lowpower_a9(); platform_do_lowpower(cpu, &spurious); /* * bring this CPU back into the world of cache * coherency, and then restore interrupts */ cpu_leave_lowpower(); if (spurious) pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious); } int platform_cpu_disable(unsigned int cpu) { /* * we don't allow CPU 0 to be shutdown (it is still too special * e.g. clock tick interrupts) */ return cpu == 0 ? -EPERM : 0; }
gpl-2.0
ronsaldo/linux-amdgpu-si
drivers/net/wan/sealevel.c
2078
8096
/* * Sealevel Systems 4021 driver. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * (c) Copyright 1999, 2001 Alan Cox * (c) Copyright 2001 Red Hat Inc. * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl> * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/net.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/delay.h> #include <linux/hdlc.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/slab.h> #include <net/arp.h> #include <asm/irq.h> #include <asm/io.h> #include <asm/dma.h> #include <asm/byteorder.h> #include "z85230.h" struct slvl_device { struct z8530_channel *chan; int channel; }; struct slvl_board { struct slvl_device dev[2]; struct z8530_dev board; int iobase; }; /* * Network driver support routines */ static inline struct slvl_device* dev_to_chan(struct net_device *dev) { return (struct slvl_device *)dev_to_hdlc(dev)->priv; } /* * Frame receive. Simple for our card as we do HDLC and there * is no funny garbage involved */ static void sealevel_input(struct z8530_channel *c, struct sk_buff *skb) { /* Drop the CRC - it's not a good idea to try and negotiate it ;) */ skb_trim(skb, skb->len - 2); skb->protocol = hdlc_type_trans(skb, c->netdevice); skb_reset_mac_header(skb); skb->dev = c->netdevice; netif_rx(skb); } /* * We've been placed in the UP state */ static int sealevel_open(struct net_device *d) { struct slvl_device *slvl = dev_to_chan(d); int err = -1; int unit = slvl->channel; /* * Link layer up. */ switch (unit) { case 0: err = z8530_sync_dma_open(d, slvl->chan); break; case 1: err = z8530_sync_open(d, slvl->chan); break; } if (err) return err; err = hdlc_open(d); if (err) { switch (unit) { case 0: z8530_sync_dma_close(d, slvl->chan); break; case 1: z8530_sync_close(d, slvl->chan); break; } return err; } slvl->chan->rx_function = sealevel_input; /* * Go go go */ netif_start_queue(d); return 0; } static int sealevel_close(struct net_device *d) { struct slvl_device *slvl = dev_to_chan(d); int unit = slvl->channel; /* * Discard new frames */ slvl->chan->rx_function = z8530_null_rx; hdlc_close(d); netif_stop_queue(d); switch (unit) { case 0: z8530_sync_dma_close(d, slvl->chan); break; case 1: z8530_sync_close(d, slvl->chan); break; } return 0; } static int sealevel_ioctl(struct net_device *d, struct ifreq *ifr, int cmd) { /* struct slvl_device *slvl=dev_to_chan(d); z8530_ioctl(d,&slvl->sync.chanA,ifr,cmd) */ return hdlc_ioctl(d, ifr, cmd); } /* * Passed network frames, fire them downwind. */ static netdev_tx_t sealevel_queue_xmit(struct sk_buff *skb, struct net_device *d) { return z8530_queue_xmit(dev_to_chan(d)->chan, skb); } static int sealevel_attach(struct net_device *dev, unsigned short encoding, unsigned short parity) { if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT) return 0; return -EINVAL; } static const struct net_device_ops sealevel_ops = { .ndo_open = sealevel_open, .ndo_stop = sealevel_close, .ndo_change_mtu = hdlc_change_mtu, .ndo_start_xmit = hdlc_start_xmit, .ndo_do_ioctl = sealevel_ioctl, }; static int slvl_setup(struct slvl_device *sv, int iobase, int irq) { struct net_device *dev = alloc_hdlcdev(sv); if (!dev) return -1; dev_to_hdlc(dev)->attach = sealevel_attach; dev_to_hdlc(dev)->xmit = sealevel_queue_xmit; dev->netdev_ops = &sealevel_ops; dev->base_addr = iobase; dev->irq = irq; if (register_hdlc_device(dev)) { pr_err("unable to register HDLC device\n"); free_netdev(dev); return -1; } sv->chan->netdevice = dev; return 0; } /* * Allocate and setup Sealevel board. */ static __init struct slvl_board *slvl_init(int iobase, int irq, int txdma, int rxdma, int slow) { struct z8530_dev *dev; struct slvl_board *b; /* * Get the needed I/O space */ if (!request_region(iobase, 8, "Sealevel 4021")) { pr_warn("I/O 0x%X already in use\n", iobase); return NULL; } b = kzalloc(sizeof(struct slvl_board), GFP_KERNEL); if (!b) goto err_kzalloc; b->dev[0].chan = &b->board.chanA; b->dev[0].channel = 0; b->dev[1].chan = &b->board.chanB; b->dev[1].channel = 1; dev = &b->board; /* * Stuff in the I/O addressing */ dev->active = 0; b->iobase = iobase; /* * Select 8530 delays for the old board */ if (slow) iobase |= Z8530_PORT_SLEEP; dev->chanA.ctrlio = iobase + 1; dev->chanA.dataio = iobase; dev->chanB.ctrlio = iobase + 3; dev->chanB.dataio = iobase + 2; dev->chanA.irqs = &z8530_nop; dev->chanB.irqs = &z8530_nop; /* * Assert DTR enable DMA */ outb(3 | (1 << 7), b->iobase + 4); /* We want a fast IRQ for this device. Actually we'd like an even faster IRQ ;) - This is one driver RtLinux is made for */ if (request_irq(irq, z8530_interrupt, 0, "SeaLevel", dev) < 0) { pr_warn("IRQ %d already in use\n", irq); goto err_request_irq; } dev->irq = irq; dev->chanA.private = &b->dev[0]; dev->chanB.private = &b->dev[1]; dev->chanA.dev = dev; dev->chanB.dev = dev; dev->chanA.txdma = 3; dev->chanA.rxdma = 1; if (request_dma(dev->chanA.txdma, "SeaLevel (TX)")) goto err_dma_tx; if (request_dma(dev->chanA.rxdma, "SeaLevel (RX)")) goto err_dma_rx; disable_irq(irq); /* * Begin normal initialise */ if (z8530_init(dev) != 0) { pr_err("Z8530 series device not found\n"); enable_irq(irq); goto free_hw; } if (dev->type == Z85C30) { z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream); z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream); } else { z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream_85230); z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream_85230); } /* * Now we can take the IRQ */ enable_irq(irq); if (slvl_setup(&b->dev[0], iobase, irq)) goto free_hw; if (slvl_setup(&b->dev[1], iobase, irq)) goto free_netdev0; z8530_describe(dev, "I/O", iobase); dev->active = 1; return b; free_netdev0: unregister_hdlc_device(b->dev[0].chan->netdevice); free_netdev(b->dev[0].chan->netdevice); free_hw: free_dma(dev->chanA.rxdma); err_dma_rx: free_dma(dev->chanA.txdma); err_dma_tx: free_irq(irq, dev); err_request_irq: kfree(b); err_kzalloc: release_region(iobase, 8); return NULL; } static void __exit slvl_shutdown(struct slvl_board *b) { int u; z8530_shutdown(&b->board); for (u = 0; u < 2; u++) { struct net_device *d = b->dev[u].chan->netdevice; unregister_hdlc_device(d); free_netdev(d); } free_irq(b->board.irq, &b->board); free_dma(b->board.chanA.rxdma); free_dma(b->board.chanA.txdma); /* DMA off on the card, drop DTR */ outb(0, b->iobase); release_region(b->iobase, 8); kfree(b); } static int io=0x238; static int txdma=1; static int rxdma=3; static int irq=5; static bool slow=false; module_param(io, int, 0); MODULE_PARM_DESC(io, "The I/O base of the Sealevel card"); module_param(txdma, int, 0); MODULE_PARM_DESC(txdma, "Transmit DMA channel"); module_param(rxdma, int, 0); MODULE_PARM_DESC(rxdma, "Receive DMA channel"); module_param(irq, int, 0); MODULE_PARM_DESC(irq, "The interrupt line setting for the SeaLevel card"); module_param(slow, bool, 0); MODULE_PARM_DESC(slow, "Set this for an older Sealevel card such as the 4012"); MODULE_AUTHOR("Alan Cox"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Modular driver for the SeaLevel 4021"); static struct slvl_board *slvl_unit; static int __init slvl_init_module(void) { slvl_unit = slvl_init(io, irq, txdma, rxdma, slow); return slvl_unit ? 0 : -ENODEV; } static void __exit slvl_cleanup_module(void) { if (slvl_unit) slvl_shutdown(slvl_unit); } module_init(slvl_init_module); module_exit(slvl_cleanup_module);
gpl-2.0
umiddelb/linux-kernel
drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
2078
27282
/* * Copyright 2012 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include <core/os.h> #include <core/class.h> #include <core/client.h> #include <core/handle.h> #include <core/engctx.h> #include <core/enum.h> #include <subdev/fb.h> #include <subdev/vm.h> #include <subdev/timer.h> #include <engine/fifo.h> #include <engine/graph.h> #include "nv50.h" struct nv50_graph_priv { struct nouveau_graph base; spinlock_t lock; u32 size; }; struct nv50_graph_chan { struct nouveau_graph_chan base; }; static u64 nv50_graph_units(struct nouveau_graph *graph) { struct nv50_graph_priv *priv = (void *)graph; return nv_rd32(priv, 0x1540); } /******************************************************************************* * Graphics object classes ******************************************************************************/ static int nv50_graph_object_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { struct nouveau_gpuobj *obj; int ret; ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent, 16, 16, 0, &obj); *pobject = nv_object(obj); if (ret) return ret; nv_wo32(obj, 0x00, nv_mclass(obj)); nv_wo32(obj, 0x04, 0x00000000); nv_wo32(obj, 0x08, 0x00000000); nv_wo32(obj, 0x0c, 0x00000000); return 0; } static struct nouveau_ofuncs nv50_graph_ofuncs = { .ctor = nv50_graph_object_ctor, .dtor = _nouveau_gpuobj_dtor, .init = _nouveau_gpuobj_init, .fini = _nouveau_gpuobj_fini, .rd32 = _nouveau_gpuobj_rd32, .wr32 = _nouveau_gpuobj_wr32, }; static struct nouveau_oclass nv50_graph_sclass[] = { { 0x0030, &nv50_graph_ofuncs }, { 0x502d, &nv50_graph_ofuncs }, { 0x5039, &nv50_graph_ofuncs }, { 0x5097, &nv50_graph_ofuncs }, { 0x50c0, &nv50_graph_ofuncs }, {} }; static struct nouveau_oclass nv84_graph_sclass[] = { { 0x0030, &nv50_graph_ofuncs }, { 0x502d, &nv50_graph_ofuncs }, { 0x5039, &nv50_graph_ofuncs }, { 0x50c0, &nv50_graph_ofuncs }, { 0x8297, &nv50_graph_ofuncs }, {} }; static struct nouveau_oclass nva0_graph_sclass[] = { { 0x0030, &nv50_graph_ofuncs }, { 0x502d, &nv50_graph_ofuncs }, { 0x5039, &nv50_graph_ofuncs }, { 0x50c0, &nv50_graph_ofuncs }, { 0x8397, &nv50_graph_ofuncs }, {} }; static struct nouveau_oclass nva3_graph_sclass[] = { { 0x0030, &nv50_graph_ofuncs }, { 0x502d, &nv50_graph_ofuncs }, { 0x5039, &nv50_graph_ofuncs }, { 0x50c0, &nv50_graph_ofuncs }, { 0x8597, &nv50_graph_ofuncs }, { 0x85c0, &nv50_graph_ofuncs }, {} }; static struct nouveau_oclass nvaf_graph_sclass[] = { { 0x0030, &nv50_graph_ofuncs }, { 0x502d, &nv50_graph_ofuncs }, { 0x5039, &nv50_graph_ofuncs }, { 0x50c0, &nv50_graph_ofuncs }, { 0x85c0, &nv50_graph_ofuncs }, { 0x8697, &nv50_graph_ofuncs }, {} }; /******************************************************************************* * PGRAPH context ******************************************************************************/ static int nv50_graph_context_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { struct nv50_graph_priv *priv = (void *)engine; struct nv50_graph_chan *chan; int ret; ret = nouveau_graph_context_create(parent, engine, oclass, NULL, priv->size, 0, NVOBJ_FLAG_ZERO_ALLOC, &chan); *pobject = nv_object(chan); if (ret) return ret; nv50_grctx_fill(nv_device(priv), nv_gpuobj(chan)); return 0; } static struct nouveau_oclass nv50_graph_cclass = { .handle = NV_ENGCTX(GR, 0x50), .ofuncs = &(struct nouveau_ofuncs) { .ctor = nv50_graph_context_ctor, .dtor = _nouveau_graph_context_dtor, .init = _nouveau_graph_context_init, .fini = _nouveau_graph_context_fini, .rd32 = _nouveau_graph_context_rd32, .wr32 = _nouveau_graph_context_wr32, }, }; /******************************************************************************* * PGRAPH engine/subdev functions ******************************************************************************/ static int nv50_graph_tlb_flush(struct nouveau_engine *engine) { nv50_vm_flush_engine(&engine->base, 0x00); return 0; } static const struct nouveau_bitfield nv50_pgraph_status[] = { { 0x00000001, "BUSY" }, /* set when any bit is set */ { 0x00000002, "DISPATCH" }, { 0x00000004, "UNK2" }, { 0x00000008, "UNK3" }, { 0x00000010, "UNK4" }, { 0x00000020, "UNK5" }, { 0x00000040, "M2MF" }, { 0x00000080, "UNK7" }, { 0x00000100, "CTXPROG" }, { 0x00000200, "VFETCH" }, { 0x00000400, "CCACHE_UNK4" }, { 0x00000800, "STRMOUT_GSCHED_UNK5" }, { 0x00001000, "UNK14XX" }, { 0x00002000, "UNK24XX_CSCHED" }, { 0x00004000, "UNK1CXX" }, { 0x00008000, "CLIPID" }, { 0x00010000, "ZCULL" }, { 0x00020000, "ENG2D" }, { 0x00040000, "UNK34XX" }, { 0x00080000, "TPRAST" }, { 0x00100000, "TPROP" }, { 0x00200000, "TEX" }, { 0x00400000, "TPVP" }, { 0x00800000, "MP" }, { 0x01000000, "ROP" }, {} }; static const char *const nv50_pgraph_vstatus_0[] = { "VFETCH", "CCACHE", "UNK4", "UNK5", "GSCHED", "STRMOUT", "UNK14XX", NULL }; static const char *const nv50_pgraph_vstatus_1[] = { "TPRAST", "TPROP", "TEXTURE", "TPVP", "MP", NULL }; static const char *const nv50_pgraph_vstatus_2[] = { "UNK24XX", "CSCHED", "UNK1CXX", "CLIPID", "ZCULL", "ENG2D", "UNK34XX", "ROP", NULL }; static void nouveau_pgraph_vstatus_print(struct nv50_graph_priv *priv, int r, const char *const units[], u32 status) { int i; nv_error(priv, "PGRAPH_VSTATUS%d: 0x%08x", r, status); for (i = 0; units[i] && status; i++) { if ((status & 7) == 1) pr_cont(" %s", units[i]); status >>= 3; } if (status) pr_cont(" (invalid: 0x%x)", status); pr_cont("\n"); } static int nv84_graph_tlb_flush(struct nouveau_engine *engine) { struct nouveau_timer *ptimer = nouveau_timer(engine); struct nv50_graph_priv *priv = (void *)engine; bool idle, timeout = false; unsigned long flags; u64 start; u32 tmp; spin_lock_irqsave(&priv->lock, flags); nv_mask(priv, 0x400500, 0x00000001, 0x00000000); start = ptimer->read(ptimer); do { idle = true; for (tmp = nv_rd32(priv, 0x400380); tmp && idle; tmp >>= 3) { if ((tmp & 7) == 1) idle = false; } for (tmp = nv_rd32(priv, 0x400384); tmp && idle; tmp >>= 3) { if ((tmp & 7) == 1) idle = false; } for (tmp = nv_rd32(priv, 0x400388); tmp && idle; tmp >>= 3) { if ((tmp & 7) == 1) idle = false; } } while (!idle && !(timeout = ptimer->read(ptimer) - start > 2000000000)); if (timeout) { nv_error(priv, "PGRAPH TLB flush idle timeout fail\n"); tmp = nv_rd32(priv, 0x400700); nv_error(priv, "PGRAPH_STATUS : 0x%08x", tmp); nouveau_bitfield_print(nv50_pgraph_status, tmp); pr_cont("\n"); nouveau_pgraph_vstatus_print(priv, 0, nv50_pgraph_vstatus_0, nv_rd32(priv, 0x400380)); nouveau_pgraph_vstatus_print(priv, 1, nv50_pgraph_vstatus_1, nv_rd32(priv, 0x400384)); nouveau_pgraph_vstatus_print(priv, 2, nv50_pgraph_vstatus_2, nv_rd32(priv, 0x400388)); } nv50_vm_flush_engine(&engine->base, 0x00); nv_mask(priv, 0x400500, 0x00000001, 0x00000001); spin_unlock_irqrestore(&priv->lock, flags); return timeout ? -EBUSY : 0; } static const struct nouveau_enum nv50_mp_exec_error_names[] = { { 3, "STACK_UNDERFLOW", NULL }, { 4, "QUADON_ACTIVE", NULL }, { 8, "TIMEOUT", NULL }, { 0x10, "INVALID_OPCODE", NULL }, { 0x40, "BREAKPOINT", NULL }, {} }; static const struct nouveau_bitfield nv50_graph_trap_m2mf[] = { { 0x00000001, "NOTIFY" }, { 0x00000002, "IN" }, { 0x00000004, "OUT" }, {} }; static const struct nouveau_bitfield nv50_graph_trap_vfetch[] = { { 0x00000001, "FAULT" }, {} }; static const struct nouveau_bitfield nv50_graph_trap_strmout[] = { { 0x00000001, "FAULT" }, {} }; static const struct nouveau_bitfield nv50_graph_trap_ccache[] = { { 0x00000001, "FAULT" }, {} }; /* There must be a *lot* of these. Will take some time to gather them up. */ const struct nouveau_enum nv50_data_error_names[] = { { 0x00000003, "INVALID_OPERATION", NULL }, { 0x00000004, "INVALID_VALUE", NULL }, { 0x00000005, "INVALID_ENUM", NULL }, { 0x00000008, "INVALID_OBJECT", NULL }, { 0x00000009, "READ_ONLY_OBJECT", NULL }, { 0x0000000a, "SUPERVISOR_OBJECT", NULL }, { 0x0000000b, "INVALID_ADDRESS_ALIGNMENT", NULL }, { 0x0000000c, "INVALID_BITFIELD", NULL }, { 0x0000000d, "BEGIN_END_ACTIVE", NULL }, { 0x0000000e, "SEMANTIC_COLOR_BACK_OVER_LIMIT", NULL }, { 0x0000000f, "VIEWPORT_ID_NEEDS_GP", NULL }, { 0x00000010, "RT_DOUBLE_BIND", NULL }, { 0x00000011, "RT_TYPES_MISMATCH", NULL }, { 0x00000012, "RT_LINEAR_WITH_ZETA", NULL }, { 0x00000015, "FP_TOO_FEW_REGS", NULL }, { 0x00000016, "ZETA_FORMAT_CSAA_MISMATCH", NULL }, { 0x00000017, "RT_LINEAR_WITH_MSAA", NULL }, { 0x00000018, "FP_INTERPOLANT_START_OVER_LIMIT", NULL }, { 0x00000019, "SEMANTIC_LAYER_OVER_LIMIT", NULL }, { 0x0000001a, "RT_INVALID_ALIGNMENT", NULL }, { 0x0000001b, "SAMPLER_OVER_LIMIT", NULL }, { 0x0000001c, "TEXTURE_OVER_LIMIT", NULL }, { 0x0000001e, "GP_TOO_MANY_OUTPUTS", NULL }, { 0x0000001f, "RT_BPP128_WITH_MS8", NULL }, { 0x00000021, "Z_OUT_OF_BOUNDS", NULL }, { 0x00000023, "XY_OUT_OF_BOUNDS", NULL }, { 0x00000024, "VP_ZERO_INPUTS", NULL }, { 0x00000027, "CP_MORE_PARAMS_THAN_SHARED", NULL }, { 0x00000028, "CP_NO_REG_SPACE_STRIPED", NULL }, { 0x00000029, "CP_NO_REG_SPACE_PACKED", NULL }, { 0x0000002a, "CP_NOT_ENOUGH_WARPS", NULL }, { 0x0000002b, "CP_BLOCK_SIZE_MISMATCH", NULL }, { 0x0000002c, "CP_NOT_ENOUGH_LOCAL_WARPS", NULL }, { 0x0000002d, "CP_NOT_ENOUGH_STACK_WARPS", NULL }, { 0x0000002e, "CP_NO_BLOCKDIM_LATCH", NULL }, { 0x00000031, "ENG2D_FORMAT_MISMATCH", NULL }, { 0x0000003f, "PRIMITIVE_ID_NEEDS_GP", NULL }, { 0x00000044, "SEMANTIC_VIEWPORT_OVER_LIMIT", NULL }, { 0x00000045, "SEMANTIC_COLOR_FRONT_OVER_LIMIT", NULL }, { 0x00000046, "LAYER_ID_NEEDS_GP", NULL }, { 0x00000047, "SEMANTIC_CLIP_OVER_LIMIT", NULL }, { 0x00000048, "SEMANTIC_PTSZ_OVER_LIMIT", NULL }, {} }; static const struct nouveau_bitfield nv50_graph_intr_name[] = { { 0x00000001, "NOTIFY" }, { 0x00000002, "COMPUTE_QUERY" }, { 0x00000010, "ILLEGAL_MTHD" }, { 0x00000020, "ILLEGAL_CLASS" }, { 0x00000040, "DOUBLE_NOTIFY" }, { 0x00001000, "CONTEXT_SWITCH" }, { 0x00010000, "BUFFER_NOTIFY" }, { 0x00100000, "DATA_ERROR" }, { 0x00200000, "TRAP" }, { 0x01000000, "SINGLE_STEP" }, {} }; static void nv50_priv_mp_trap(struct nv50_graph_priv *priv, int tpid, int display) { u32 units = nv_rd32(priv, 0x1540); u32 addr, mp10, status, pc, oplow, ophigh; int i; int mps = 0; for (i = 0; i < 4; i++) { if (!(units & 1 << (i+24))) continue; if (nv_device(priv)->chipset < 0xa0) addr = 0x408200 + (tpid << 12) + (i << 7); else addr = 0x408100 + (tpid << 11) + (i << 7); mp10 = nv_rd32(priv, addr + 0x10); status = nv_rd32(priv, addr + 0x14); if (!status) continue; if (display) { nv_rd32(priv, addr + 0x20); pc = nv_rd32(priv, addr + 0x24); oplow = nv_rd32(priv, addr + 0x70); ophigh = nv_rd32(priv, addr + 0x74); nv_error(priv, "TRAP_MP_EXEC - " "TP %d MP %d: ", tpid, i); nouveau_enum_print(nv50_mp_exec_error_names, status); pr_cont(" at %06x warp %d, opcode %08x %08x\n", pc&0xffffff, pc >> 24, oplow, ophigh); } nv_wr32(priv, addr + 0x10, mp10); nv_wr32(priv, addr + 0x14, 0); mps++; } if (!mps && display) nv_error(priv, "TRAP_MP_EXEC - TP %d: " "No MPs claiming errors?\n", tpid); } static void nv50_priv_tp_trap(struct nv50_graph_priv *priv, int type, u32 ustatus_old, u32 ustatus_new, int display, const char *name) { int tps = 0; u32 units = nv_rd32(priv, 0x1540); int i, r; u32 ustatus_addr, ustatus; for (i = 0; i < 16; i++) { if (!(units & (1 << i))) continue; if (nv_device(priv)->chipset < 0xa0) ustatus_addr = ustatus_old + (i << 12); else ustatus_addr = ustatus_new + (i << 11); ustatus = nv_rd32(priv, ustatus_addr) & 0x7fffffff; if (!ustatus) continue; tps++; switch (type) { case 6: /* texture error... unknown for now */ if (display) { nv_error(priv, "magic set %d:\n", i); for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4) nv_error(priv, "\t0x%08x: 0x%08x\n", r, nv_rd32(priv, r)); } break; case 7: /* MP error */ if (ustatus & 0x04030000) { nv50_priv_mp_trap(priv, i, display); ustatus &= ~0x04030000; } break; case 8: /* TPDMA error */ { u32 e0c = nv_rd32(priv, ustatus_addr + 4); u32 e10 = nv_rd32(priv, ustatus_addr + 8); u32 e14 = nv_rd32(priv, ustatus_addr + 0xc); u32 e18 = nv_rd32(priv, ustatus_addr + 0x10); u32 e1c = nv_rd32(priv, ustatus_addr + 0x14); u32 e20 = nv_rd32(priv, ustatus_addr + 0x18); u32 e24 = nv_rd32(priv, ustatus_addr + 0x1c); /* 2d engine destination */ if (ustatus & 0x00000010) { if (display) { nv_error(priv, "TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n", i, e14, e10); nv_error(priv, "TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n", i, e0c, e18, e1c, e20, e24); } ustatus &= ~0x00000010; } /* Render target */ if (ustatus & 0x00000040) { if (display) { nv_error(priv, "TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n", i, e14, e10); nv_error(priv, "TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n", i, e0c, e18, e1c, e20, e24); } ustatus &= ~0x00000040; } /* CUDA memory: l[], g[] or stack. */ if (ustatus & 0x00000080) { if (display) { if (e18 & 0x80000000) { /* g[] read fault? */ nv_error(priv, "TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n", i, e14, e10 | ((e18 >> 24) & 0x1f)); e18 &= ~0x1f000000; } else if (e18 & 0xc) { /* g[] write fault? */ nv_error(priv, "TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n", i, e14, e10 | ((e18 >> 7) & 0x1f)); e18 &= ~0x00000f80; } else { nv_error(priv, "TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n", i, e14, e10); } nv_error(priv, "TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n", i, e0c, e18, e1c, e20, e24); } ustatus &= ~0x00000080; } } break; } if (ustatus) { if (display) nv_error(priv, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus); } nv_wr32(priv, ustatus_addr, 0xc0000000); } if (!tps && display) nv_warn(priv, "%s - No TPs claiming errors?\n", name); } static int nv50_graph_trap_handler(struct nv50_graph_priv *priv, u32 display, int chid, u64 inst, struct nouveau_object *engctx) { u32 status = nv_rd32(priv, 0x400108); u32 ustatus; if (!status && display) { nv_error(priv, "TRAP: no units reporting traps?\n"); return 1; } /* DISPATCH: Relays commands to other units and handles NOTIFY, * COND, QUERY. If you get a trap from it, the command is still stuck * in DISPATCH and you need to do something about it. */ if (status & 0x001) { ustatus = nv_rd32(priv, 0x400804) & 0x7fffffff; if (!ustatus && display) { nv_error(priv, "TRAP_DISPATCH - no ustatus?\n"); } nv_wr32(priv, 0x400500, 0x00000000); /* Known to be triggered by screwed up NOTIFY and COND... */ if (ustatus & 0x00000001) { u32 addr = nv_rd32(priv, 0x400808); u32 subc = (addr & 0x00070000) >> 16; u32 mthd = (addr & 0x00001ffc); u32 datal = nv_rd32(priv, 0x40080c); u32 datah = nv_rd32(priv, 0x400810); u32 class = nv_rd32(priv, 0x400814); u32 r848 = nv_rd32(priv, 0x400848); nv_error(priv, "TRAP DISPATCH_FAULT\n"); if (display && (addr & 0x80000000)) { nv_error(priv, "ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x%08x 400808 0x%08x 400848 0x%08x\n", chid, inst, nouveau_client_name(engctx), subc, class, mthd, datah, datal, addr, r848); } else if (display) { nv_error(priv, "no stuck command?\n"); } nv_wr32(priv, 0x400808, 0); nv_wr32(priv, 0x4008e8, nv_rd32(priv, 0x4008e8) & 3); nv_wr32(priv, 0x400848, 0); ustatus &= ~0x00000001; } if (ustatus & 0x00000002) { u32 addr = nv_rd32(priv, 0x40084c); u32 subc = (addr & 0x00070000) >> 16; u32 mthd = (addr & 0x00001ffc); u32 data = nv_rd32(priv, 0x40085c); u32 class = nv_rd32(priv, 0x400814); nv_error(priv, "TRAP DISPATCH_QUERY\n"); if (display && (addr & 0x80000000)) { nv_error(priv, "ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x 40084c 0x%08x\n", chid, inst, nouveau_client_name(engctx), subc, class, mthd, data, addr); } else if (display) { nv_error(priv, "no stuck command?\n"); } nv_wr32(priv, 0x40084c, 0); ustatus &= ~0x00000002; } if (ustatus && display) { nv_error(priv, "TRAP_DISPATCH (unknown " "0x%08x)\n", ustatus); } nv_wr32(priv, 0x400804, 0xc0000000); nv_wr32(priv, 0x400108, 0x001); status &= ~0x001; if (!status) return 0; } /* M2MF: Memory to memory copy engine. */ if (status & 0x002) { u32 ustatus = nv_rd32(priv, 0x406800) & 0x7fffffff; if (display) { nv_error(priv, "TRAP_M2MF"); nouveau_bitfield_print(nv50_graph_trap_m2mf, ustatus); pr_cont("\n"); nv_error(priv, "TRAP_M2MF %08x %08x %08x %08x\n", nv_rd32(priv, 0x406804), nv_rd32(priv, 0x406808), nv_rd32(priv, 0x40680c), nv_rd32(priv, 0x406810)); } /* No sane way found yet -- just reset the bugger. */ nv_wr32(priv, 0x400040, 2); nv_wr32(priv, 0x400040, 0); nv_wr32(priv, 0x406800, 0xc0000000); nv_wr32(priv, 0x400108, 0x002); status &= ~0x002; } /* VFETCH: Fetches data from vertex buffers. */ if (status & 0x004) { u32 ustatus = nv_rd32(priv, 0x400c04) & 0x7fffffff; if (display) { nv_error(priv, "TRAP_VFETCH"); nouveau_bitfield_print(nv50_graph_trap_vfetch, ustatus); pr_cont("\n"); nv_error(priv, "TRAP_VFETCH %08x %08x %08x %08x\n", nv_rd32(priv, 0x400c00), nv_rd32(priv, 0x400c08), nv_rd32(priv, 0x400c0c), nv_rd32(priv, 0x400c10)); } nv_wr32(priv, 0x400c04, 0xc0000000); nv_wr32(priv, 0x400108, 0x004); status &= ~0x004; } /* STRMOUT: DirectX streamout / OpenGL transform feedback. */ if (status & 0x008) { ustatus = nv_rd32(priv, 0x401800) & 0x7fffffff; if (display) { nv_error(priv, "TRAP_STRMOUT"); nouveau_bitfield_print(nv50_graph_trap_strmout, ustatus); pr_cont("\n"); nv_error(priv, "TRAP_STRMOUT %08x %08x %08x %08x\n", nv_rd32(priv, 0x401804), nv_rd32(priv, 0x401808), nv_rd32(priv, 0x40180c), nv_rd32(priv, 0x401810)); } /* No sane way found yet -- just reset the bugger. */ nv_wr32(priv, 0x400040, 0x80); nv_wr32(priv, 0x400040, 0); nv_wr32(priv, 0x401800, 0xc0000000); nv_wr32(priv, 0x400108, 0x008); status &= ~0x008; } /* CCACHE: Handles code and c[] caches and fills them. */ if (status & 0x010) { ustatus = nv_rd32(priv, 0x405018) & 0x7fffffff; if (display) { nv_error(priv, "TRAP_CCACHE"); nouveau_bitfield_print(nv50_graph_trap_ccache, ustatus); pr_cont("\n"); nv_error(priv, "TRAP_CCACHE %08x %08x %08x %08x" " %08x %08x %08x\n", nv_rd32(priv, 0x405000), nv_rd32(priv, 0x405004), nv_rd32(priv, 0x405008), nv_rd32(priv, 0x40500c), nv_rd32(priv, 0x405010), nv_rd32(priv, 0x405014), nv_rd32(priv, 0x40501c)); } nv_wr32(priv, 0x405018, 0xc0000000); nv_wr32(priv, 0x400108, 0x010); status &= ~0x010; } /* Unknown, not seen yet... 0x402000 is the only trap status reg * remaining, so try to handle it anyway. Perhaps related to that * unknown DMA slot on tesla? */ if (status & 0x20) { ustatus = nv_rd32(priv, 0x402000) & 0x7fffffff; if (display) nv_error(priv, "TRAP_UNKC04 0x%08x\n", ustatus); nv_wr32(priv, 0x402000, 0xc0000000); /* no status modifiction on purpose */ } /* TEXTURE: CUDA texturing units */ if (status & 0x040) { nv50_priv_tp_trap(priv, 6, 0x408900, 0x408600, display, "TRAP_TEXTURE"); nv_wr32(priv, 0x400108, 0x040); status &= ~0x040; } /* MP: CUDA execution engines. */ if (status & 0x080) { nv50_priv_tp_trap(priv, 7, 0x408314, 0x40831c, display, "TRAP_MP"); nv_wr32(priv, 0x400108, 0x080); status &= ~0x080; } /* TPDMA: Handles TP-initiated uncached memory accesses: * l[], g[], stack, 2d surfaces, render targets. */ if (status & 0x100) { nv50_priv_tp_trap(priv, 8, 0x408e08, 0x408708, display, "TRAP_TPDMA"); nv_wr32(priv, 0x400108, 0x100); status &= ~0x100; } if (status) { if (display) nv_error(priv, "TRAP: unknown 0x%08x\n", status); nv_wr32(priv, 0x400108, status); } return 1; } static void nv50_graph_intr(struct nouveau_subdev *subdev) { struct nouveau_fifo *pfifo = nouveau_fifo(subdev); struct nouveau_engine *engine = nv_engine(subdev); struct nouveau_object *engctx; struct nouveau_handle *handle = NULL; struct nv50_graph_priv *priv = (void *)subdev; u32 stat = nv_rd32(priv, 0x400100); u32 inst = nv_rd32(priv, 0x40032c) & 0x0fffffff; u32 addr = nv_rd32(priv, 0x400704); u32 subc = (addr & 0x00070000) >> 16; u32 mthd = (addr & 0x00001ffc); u32 data = nv_rd32(priv, 0x400708); u32 class = nv_rd32(priv, 0x400814); u32 show = stat; int chid; engctx = nouveau_engctx_get(engine, inst); chid = pfifo->chid(pfifo, engctx); if (stat & 0x00000010) { handle = nouveau_handle_get_class(engctx, class); if (handle && !nv_call(handle->object, mthd, data)) show &= ~0x00000010; nouveau_handle_put(handle); } if (show & 0x00100000) { u32 ecode = nv_rd32(priv, 0x400110); nv_error(priv, "DATA_ERROR "); nouveau_enum_print(nv50_data_error_names, ecode); pr_cont("\n"); } if (stat & 0x00200000) { if (!nv50_graph_trap_handler(priv, show, chid, (u64)inst << 12, engctx)) show &= ~0x00200000; } nv_wr32(priv, 0x400100, stat); nv_wr32(priv, 0x400500, 0x00010001); if (show) { nv_error(priv, "%s", ""); nouveau_bitfield_print(nv50_graph_intr_name, show); pr_cont("\n"); nv_error(priv, "ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n", chid, (u64)inst << 12, nouveau_client_name(engctx), subc, class, mthd, data); } if (nv_rd32(priv, 0x400824) & (1 << 31)) nv_wr32(priv, 0x400824, nv_rd32(priv, 0x400824) & ~(1 << 31)); nouveau_engctx_put(engctx); } static int nv50_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { struct nv50_graph_priv *priv; int ret; ret = nouveau_graph_create(parent, engine, oclass, true, &priv); *pobject = nv_object(priv); if (ret) return ret; nv_subdev(priv)->unit = 0x00201000; nv_subdev(priv)->intr = nv50_graph_intr; nv_engine(priv)->cclass = &nv50_graph_cclass; priv->base.units = nv50_graph_units; switch (nv_device(priv)->chipset) { case 0x50: nv_engine(priv)->sclass = nv50_graph_sclass; break; case 0x84: case 0x86: case 0x92: case 0x94: case 0x96: case 0x98: nv_engine(priv)->sclass = nv84_graph_sclass; break; case 0xa0: case 0xaa: case 0xac: nv_engine(priv)->sclass = nva0_graph_sclass; break; case 0xa3: case 0xa5: case 0xa8: nv_engine(priv)->sclass = nva3_graph_sclass; break; case 0xaf: nv_engine(priv)->sclass = nvaf_graph_sclass; break; }; if (nv_device(priv)->chipset == 0x50 || nv_device(priv)->chipset == 0xac) nv_engine(priv)->tlb_flush = nv50_graph_tlb_flush; else nv_engine(priv)->tlb_flush = nv84_graph_tlb_flush; spin_lock_init(&priv->lock); return 0; } static int nv50_graph_init(struct nouveau_object *object) { struct nv50_graph_priv *priv = (void *)object; int ret, units, i; ret = nouveau_graph_init(&priv->base); if (ret) return ret; /* NV_PGRAPH_DEBUG_3_HW_CTX_SWITCH_ENABLED */ nv_wr32(priv, 0x40008c, 0x00000004); /* reset/enable traps and interrupts */ nv_wr32(priv, 0x400804, 0xc0000000); nv_wr32(priv, 0x406800, 0xc0000000); nv_wr32(priv, 0x400c04, 0xc0000000); nv_wr32(priv, 0x401800, 0xc0000000); nv_wr32(priv, 0x405018, 0xc0000000); nv_wr32(priv, 0x402000, 0xc0000000); units = nv_rd32(priv, 0x001540); for (i = 0; i < 16; i++) { if (!(units & (1 << i))) continue; if (nv_device(priv)->chipset < 0xa0) { nv_wr32(priv, 0x408900 + (i << 12), 0xc0000000); nv_wr32(priv, 0x408e08 + (i << 12), 0xc0000000); nv_wr32(priv, 0x408314 + (i << 12), 0xc0000000); } else { nv_wr32(priv, 0x408600 + (i << 11), 0xc0000000); nv_wr32(priv, 0x408708 + (i << 11), 0xc0000000); nv_wr32(priv, 0x40831c + (i << 11), 0xc0000000); } } nv_wr32(priv, 0x400108, 0xffffffff); nv_wr32(priv, 0x400138, 0xffffffff); nv_wr32(priv, 0x400100, 0xffffffff); nv_wr32(priv, 0x40013c, 0xffffffff); nv_wr32(priv, 0x400500, 0x00010001); /* upload context program, initialise ctxctl defaults */ ret = nv50_grctx_init(nv_device(priv), &priv->size); if (ret) return ret; nv_wr32(priv, 0x400824, 0x00000000); nv_wr32(priv, 0x400828, 0x00000000); nv_wr32(priv, 0x40082c, 0x00000000); nv_wr32(priv, 0x400830, 0x00000000); nv_wr32(priv, 0x40032c, 0x00000000); nv_wr32(priv, 0x400330, 0x00000000); /* some unknown zcull magic */ switch (nv_device(priv)->chipset & 0xf0) { case 0x50: case 0x80: case 0x90: nv_wr32(priv, 0x402ca8, 0x00000800); break; case 0xa0: default: nv_wr32(priv, 0x402cc0, 0x00000000); if (nv_device(priv)->chipset == 0xa0 || nv_device(priv)->chipset == 0xaa || nv_device(priv)->chipset == 0xac) { nv_wr32(priv, 0x402ca8, 0x00000802); } else { nv_wr32(priv, 0x402cc0, 0x00000000); nv_wr32(priv, 0x402ca8, 0x00000002); } break; } /* zero out zcull regions */ for (i = 0; i < 8; i++) { nv_wr32(priv, 0x402c20 + (i * 8), 0x00000000); nv_wr32(priv, 0x402c24 + (i * 8), 0x00000000); nv_wr32(priv, 0x402c28 + (i * 8), 0x00000000); nv_wr32(priv, 0x402c2c + (i * 8), 0x00000000); } return 0; } struct nouveau_oclass nv50_graph_oclass = { .handle = NV_ENGINE(GR, 0x50), .ofuncs = &(struct nouveau_ofuncs) { .ctor = nv50_graph_ctor, .dtor = _nouveau_graph_dtor, .init = nv50_graph_init, .fini = _nouveau_graph_fini, }, };
gpl-2.0
xcore995/lge_h502_kernel
drivers/rtc/rtc-efi.c
2078
4859
/* * rtc-efi: RTC Class Driver for EFI-based systems * * Copyright (C) 2009 Hewlett-Packard Development Company, L.P. * * Author: dann frazier <dannf@hp.com> * Based on efirtc.c by Stephane Eranian * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/time.h> #include <linux/platform_device.h> #include <linux/rtc.h> #include <linux/efi.h> #define EFI_ISDST (EFI_TIME_ADJUST_DAYLIGHT|EFI_TIME_IN_DAYLIGHT) /* * EFI Epoch is 1/1/1998 */ #define EFI_RTC_EPOCH 1998 /* * returns day of the year [0-365] */ static inline int compute_yday(efi_time_t *eft) { /* efi_time_t.month is in the [1-12] so, we need -1 */ return rtc_year_days(eft->day - 1, eft->month - 1, eft->year); } /* * returns day of the week [0-6] 0=Sunday * * Don't try to provide a year that's before 1998, please ! */ static int compute_wday(efi_time_t *eft) { int y; int ndays = 0; if (eft->year < 1998) { pr_err("EFI year < 1998, invalid date\n"); return -1; } for (y = EFI_RTC_EPOCH; y < eft->year; y++) ndays += 365 + (is_leap_year(y) ? 1 : 0); ndays += compute_yday(eft); /* * 4=1/1/1998 was a Thursday */ return (ndays + 4) % 7; } static void convert_to_efi_time(struct rtc_time *wtime, efi_time_t *eft) { eft->year = wtime->tm_year + 1900; eft->month = wtime->tm_mon + 1; eft->day = wtime->tm_mday; eft->hour = wtime->tm_hour; eft->minute = wtime->tm_min; eft->second = wtime->tm_sec; eft->nanosecond = 0; eft->daylight = wtime->tm_isdst ? EFI_ISDST : 0; eft->timezone = EFI_UNSPECIFIED_TIMEZONE; } static void convert_from_efi_time(efi_time_t *eft, struct rtc_time *wtime) { memset(wtime, 0, sizeof(*wtime)); wtime->tm_sec = eft->second; wtime->tm_min = eft->minute; wtime->tm_hour = eft->hour; wtime->tm_mday = eft->day; wtime->tm_mon = eft->month - 1; wtime->tm_year = eft->year - 1900; /* day of the week [0-6], Sunday=0 */ wtime->tm_wday = compute_wday(eft); /* day in the year [1-365]*/ wtime->tm_yday = compute_yday(eft); switch (eft->daylight & EFI_ISDST) { case EFI_ISDST: wtime->tm_isdst = 1; break; case EFI_TIME_ADJUST_DAYLIGHT: wtime->tm_isdst = 0; break; default: wtime->tm_isdst = -1; } } static int efi_read_alarm(struct device *dev, struct rtc_wkalrm *wkalrm) { efi_time_t eft; efi_status_t status; /* * As of EFI v1.10, this call always returns an unsupported status */ status = efi.get_wakeup_time((efi_bool_t *)&wkalrm->enabled, (efi_bool_t *)&wkalrm->pending, &eft); if (status != EFI_SUCCESS) return -EINVAL; convert_from_efi_time(&eft, &wkalrm->time); return rtc_valid_tm(&wkalrm->time); } static int efi_set_alarm(struct device *dev, struct rtc_wkalrm *wkalrm) { efi_time_t eft; efi_status_t status; convert_to_efi_time(&wkalrm->time, &eft); /* * XXX Fixme: * As of EFI 0.92 with the firmware I have on my * machine this call does not seem to work quite * right * * As of v1.10, this call always returns an unsupported status */ status = efi.set_wakeup_time((efi_bool_t)wkalrm->enabled, &eft); dev_warn(dev, "write status is %d\n", (int)status); return status == EFI_SUCCESS ? 0 : -EINVAL; } static int efi_read_time(struct device *dev, struct rtc_time *tm) { efi_status_t status; efi_time_t eft; efi_time_cap_t cap; status = efi.get_time(&eft, &cap); if (status != EFI_SUCCESS) { /* should never happen */ dev_err(dev, "can't read time\n"); return -EINVAL; } convert_from_efi_time(&eft, tm); return rtc_valid_tm(tm); } static int efi_set_time(struct device *dev, struct rtc_time *tm) { efi_status_t status; efi_time_t eft; convert_to_efi_time(tm, &eft); status = efi.set_time(&eft); return status == EFI_SUCCESS ? 0 : -EINVAL; } static const struct rtc_class_ops efi_rtc_ops = { .read_time = efi_read_time, .set_time = efi_set_time, .read_alarm = efi_read_alarm, .set_alarm = efi_set_alarm, }; static int __init efi_rtc_probe(struct platform_device *dev) { struct rtc_device *rtc; rtc = devm_rtc_device_register(&dev->dev, "rtc-efi", &efi_rtc_ops, THIS_MODULE); if (IS_ERR(rtc)) return PTR_ERR(rtc); platform_set_drvdata(dev, rtc); return 0; } static int __exit efi_rtc_remove(struct platform_device *dev) { return 0; } static struct platform_driver efi_rtc_driver = { .driver = { .name = "rtc-efi", .owner = THIS_MODULE, }, .remove = __exit_p(efi_rtc_remove), }; module_platform_driver_probe(efi_rtc_driver, efi_rtc_probe); MODULE_AUTHOR("dann frazier <dannf@hp.com>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("EFI RTC driver");
gpl-2.0
keiranFTW/sony-kernel-msm8660
arch/arm/mach-tegra/pcie.c
2334
24018
/* * arch/arm/mach-tegra/pci.c * * PCIe host controller driver for TEGRA(2) SOCs * * Copyright (c) 2010, CompuLab, Ltd. * Author: Mike Rapoport <mike@compulab.co.il> * * Based on NVIDIA PCIe driver * Copyright (c) 2008-2009, NVIDIA Corporation. * * Bits taken from arch/arm/mach-dove/pcie.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/clk.h> #include <linux/delay.h> #include <asm/sizes.h> #include <asm/mach/pci.h> #include <mach/pinmux.h> #include <mach/iomap.h> #include <mach/clk.h> #include <mach/powergate.h> /* register definitions */ #define AFI_OFFSET 0x3800 #define PADS_OFFSET 0x3000 #define RP0_OFFSET 0x0000 #define RP1_OFFSET 0x1000 #define AFI_AXI_BAR0_SZ 0x00 #define AFI_AXI_BAR1_SZ 0x04 #define AFI_AXI_BAR2_SZ 0x08 #define AFI_AXI_BAR3_SZ 0x0c #define AFI_AXI_BAR4_SZ 0x10 #define AFI_AXI_BAR5_SZ 0x14 #define AFI_AXI_BAR0_START 0x18 #define AFI_AXI_BAR1_START 0x1c #define AFI_AXI_BAR2_START 0x20 #define AFI_AXI_BAR3_START 0x24 #define AFI_AXI_BAR4_START 0x28 #define AFI_AXI_BAR5_START 0x2c #define AFI_FPCI_BAR0 0x30 #define AFI_FPCI_BAR1 0x34 #define AFI_FPCI_BAR2 0x38 #define AFI_FPCI_BAR3 0x3c #define AFI_FPCI_BAR4 0x40 #define AFI_FPCI_BAR5 0x44 #define AFI_CACHE_BAR0_SZ 0x48 #define AFI_CACHE_BAR0_ST 0x4c #define AFI_CACHE_BAR1_SZ 0x50 #define AFI_CACHE_BAR1_ST 0x54 #define AFI_MSI_BAR_SZ 0x60 #define AFI_MSI_FPCI_BAR_ST 0x64 #define AFI_MSI_AXI_BAR_ST 0x68 #define AFI_CONFIGURATION 0xac #define AFI_CONFIGURATION_EN_FPCI (1 << 0) #define AFI_FPCI_ERROR_MASKS 0xb0 #define AFI_INTR_MASK 0xb4 #define AFI_INTR_MASK_INT_MASK (1 << 0) #define AFI_INTR_MASK_MSI_MASK (1 << 8) #define AFI_INTR_CODE 0xb8 #define AFI_INTR_CODE_MASK 0xf #define AFI_INTR_MASTER_ABORT 4 #define AFI_INTR_LEGACY 6 #define AFI_INTR_SIGNATURE 0xbc #define AFI_SM_INTR_ENABLE 0xc4 #define AFI_AFI_INTR_ENABLE 0xc8 #define AFI_INTR_EN_INI_SLVERR (1 << 0) #define AFI_INTR_EN_INI_DECERR (1 << 1) #define AFI_INTR_EN_TGT_SLVERR (1 << 2) #define AFI_INTR_EN_TGT_DECERR (1 << 3) #define AFI_INTR_EN_TGT_WRERR (1 << 4) #define AFI_INTR_EN_DFPCI_DECERR (1 << 5) #define AFI_INTR_EN_AXI_DECERR (1 << 6) #define AFI_INTR_EN_FPCI_TIMEOUT (1 << 7) #define AFI_PCIE_CONFIG 0x0f8 #define AFI_PCIE_CONFIG_PCIEC0_DISABLE_DEVICE (1 << 1) #define AFI_PCIE_CONFIG_PCIEC1_DISABLE_DEVICE (1 << 2) #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK (0xf << 20) #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE (0x0 << 20) #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL (0x1 << 20) #define AFI_FUSE 0x104 #define AFI_FUSE_PCIE_T0_GEN2_DIS (1 << 2) #define AFI_PEX0_CTRL 0x110 #define AFI_PEX1_CTRL 0x118 #define AFI_PEX_CTRL_RST (1 << 0) #define AFI_PEX_CTRL_REFCLK_EN (1 << 3) #define RP_VEND_XP 0x00000F00 #define RP_VEND_XP_DL_UP (1 << 30) #define RP_LINK_CONTROL_STATUS 0x00000090 #define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000 #define PADS_CTL_SEL 0x0000009C #define PADS_CTL 0x000000A0 #define PADS_CTL_IDDQ_1L (1 << 0) #define PADS_CTL_TX_DATA_EN_1L (1 << 6) #define PADS_CTL_RX_DATA_EN_1L (1 << 10) #define PADS_PLL_CTL 0x000000B8 #define PADS_PLL_CTL_RST_B4SM (1 << 1) #define PADS_PLL_CTL_LOCKDET (1 << 8) #define PADS_PLL_CTL_REFCLK_MASK (0x3 << 16) #define PADS_PLL_CTL_REFCLK_INTERNAL_CML (0 << 16) #define PADS_PLL_CTL_REFCLK_INTERNAL_CMOS (1 << 16) #define PADS_PLL_CTL_REFCLK_EXTERNAL (2 << 16) #define PADS_PLL_CTL_TXCLKREF_MASK (0x1 << 20) #define PADS_PLL_CTL_TXCLKREF_DIV10 (0 << 20) #define PADS_PLL_CTL_TXCLKREF_DIV5 (1 << 20) /* PMC access is required for PCIE xclk (un)clamping */ #define PMC_SCRATCH42 0x144 #define PMC_SCRATCH42_PCX_CLAMP (1 << 0) static void __iomem *reg_pmc_base = IO_ADDRESS(TEGRA_PMC_BASE); #define pmc_writel(value, reg) \ __raw_writel(value, (u32)reg_pmc_base + (reg)) #define pmc_readl(reg) \ __raw_readl((u32)reg_pmc_base + (reg)) /* * Tegra2 defines 1GB in the AXI address map for PCIe. * * That address space is split into different regions, with sizes and * offsets as follows: * * 0x80000000 - 0x80003fff - PCI controller registers * 0x80004000 - 0x80103fff - PCI configuration space * 0x80104000 - 0x80203fff - PCI extended configuration space * 0x80203fff - 0x803fffff - unused * 0x80400000 - 0x8040ffff - downstream IO * 0x80410000 - 0x8fffffff - unused * 0x90000000 - 0x9fffffff - non-prefetchable memory * 0xa0000000 - 0xbfffffff - prefetchable memory */ #define TEGRA_PCIE_BASE 0x80000000 #define PCIE_REGS_SZ SZ_16K #define PCIE_CFG_OFF PCIE_REGS_SZ #define PCIE_CFG_SZ SZ_1M #define PCIE_EXT_CFG_OFF (PCIE_CFG_SZ + PCIE_CFG_OFF) #define PCIE_EXT_CFG_SZ SZ_1M #define PCIE_IOMAP_SZ (PCIE_REGS_SZ + PCIE_CFG_SZ + PCIE_EXT_CFG_SZ) #define MMIO_BASE (TEGRA_PCIE_BASE + SZ_4M) #define MMIO_SIZE SZ_64K #define MEM_BASE_0 (TEGRA_PCIE_BASE + SZ_256M) #define MEM_SIZE_0 SZ_128M #define MEM_BASE_1 (MEM_BASE_0 + MEM_SIZE_0) #define MEM_SIZE_1 SZ_128M #define PREFETCH_MEM_BASE_0 (MEM_BASE_1 + MEM_SIZE_1) #define PREFETCH_MEM_SIZE_0 SZ_128M #define PREFETCH_MEM_BASE_1 (PREFETCH_MEM_BASE_0 + PREFETCH_MEM_SIZE_0) #define PREFETCH_MEM_SIZE_1 SZ_128M #define PCIE_CONF_BUS(b) ((b) << 16) #define PCIE_CONF_DEV(d) ((d) << 11) #define PCIE_CONF_FUNC(f) ((f) << 8) #define PCIE_CONF_REG(r) \ (((r) & ~0x3) | (((r) < 256) ? PCIE_CFG_OFF : PCIE_EXT_CFG_OFF)) struct tegra_pcie_port { int index; u8 root_bus_nr; void __iomem *base; bool link_up; char io_space_name[16]; char mem_space_name[16]; char prefetch_space_name[20]; struct resource res[3]; }; struct tegra_pcie_info { struct tegra_pcie_port port[2]; int num_ports; void __iomem *regs; struct resource res_mmio; struct clk *pex_clk; struct clk *afi_clk; struct clk *pcie_xclk; struct clk *pll_e; }; static struct tegra_pcie_info tegra_pcie = { .res_mmio = { .name = "PCI IO", .start = MMIO_BASE, .end = MMIO_BASE + MMIO_SIZE - 1, .flags = IORESOURCE_MEM, }, }; void __iomem *tegra_pcie_io_base; EXPORT_SYMBOL(tegra_pcie_io_base); static inline void afi_writel(u32 value, unsigned long offset) { writel(value, offset + AFI_OFFSET + tegra_pcie.regs); } static inline u32 afi_readl(unsigned long offset) { return readl(offset + AFI_OFFSET + tegra_pcie.regs); } static inline void pads_writel(u32 value, unsigned long offset) { writel(value, offset + PADS_OFFSET + tegra_pcie.regs); } static inline u32 pads_readl(unsigned long offset) { return readl(offset + PADS_OFFSET + tegra_pcie.regs); } static struct tegra_pcie_port *bus_to_port(int bus) { int i; for (i = tegra_pcie.num_ports - 1; i >= 0; i--) { int rbus = tegra_pcie.port[i].root_bus_nr; if (rbus != -1 && rbus == bus) break; } return i >= 0 ? tegra_pcie.port + i : NULL; } static int tegra_pcie_read_conf(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { struct tegra_pcie_port *pp = bus_to_port(bus->number); void __iomem *addr; if (pp) { if (devfn != 0) { *val = 0xffffffff; return PCIBIOS_DEVICE_NOT_FOUND; } addr = pp->base + (where & ~0x3); } else { addr = tegra_pcie.regs + (PCIE_CONF_BUS(bus->number) + PCIE_CONF_DEV(PCI_SLOT(devfn)) + PCIE_CONF_FUNC(PCI_FUNC(devfn)) + PCIE_CONF_REG(where)); } *val = readl(addr); if (size == 1) *val = (*val >> (8 * (where & 3))) & 0xff; else if (size == 2) *val = (*val >> (8 * (where & 3))) & 0xffff; return PCIBIOS_SUCCESSFUL; } static int tegra_pcie_write_conf(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { struct tegra_pcie_port *pp = bus_to_port(bus->number); void __iomem *addr; u32 mask; u32 tmp; if (pp) { if (devfn != 0) return PCIBIOS_DEVICE_NOT_FOUND; addr = pp->base + (where & ~0x3); } else { addr = tegra_pcie.regs + (PCIE_CONF_BUS(bus->number) + PCIE_CONF_DEV(PCI_SLOT(devfn)) + PCIE_CONF_FUNC(PCI_FUNC(devfn)) + PCIE_CONF_REG(where)); } if (size == 4) { writel(val, addr); return PCIBIOS_SUCCESSFUL; } if (size == 2) mask = ~(0xffff << ((where & 0x3) * 8)); else if (size == 1) mask = ~(0xff << ((where & 0x3) * 8)); else return PCIBIOS_BAD_REGISTER_NUMBER; tmp = readl(addr) & mask; tmp |= val << ((where & 0x3) * 8); writel(tmp, addr); return PCIBIOS_SUCCESSFUL; } static struct pci_ops tegra_pcie_ops = { .read = tegra_pcie_read_conf, .write = tegra_pcie_write_conf, }; static void __devinit tegra_pcie_fixup_bridge(struct pci_dev *dev) { u16 reg; if ((dev->class >> 16) == PCI_BASE_CLASS_BRIDGE) { pci_read_config_word(dev, PCI_COMMAND, &reg); reg |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_SERR); pci_write_config_word(dev, PCI_COMMAND, reg); } } DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_fixup_bridge); /* Tegra PCIE root complex wrongly reports device class */ static void __devinit tegra_pcie_fixup_class(struct pci_dev *dev) { dev->class = PCI_CLASS_BRIDGE_PCI << 8; } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class); /* Tegra PCIE requires relaxed ordering */ static void __devinit tegra_pcie_relax_enable(struct pci_dev *dev) { u16 val16; int pos = pci_find_capability(dev, PCI_CAP_ID_EXP); if (pos <= 0) { dev_err(&dev->dev, "skipping relaxed ordering fixup\n"); return; } pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &val16); val16 |= PCI_EXP_DEVCTL_RELAX_EN; pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, val16); } DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable); static int tegra_pcie_setup(int nr, struct pci_sys_data *sys) { struct tegra_pcie_port *pp; if (nr >= tegra_pcie.num_ports) return 0; pp = tegra_pcie.port + nr; pp->root_bus_nr = sys->busnr; /* * IORESOURCE_IO */ snprintf(pp->io_space_name, sizeof(pp->io_space_name), "PCIe %d I/O", pp->index); pp->io_space_name[sizeof(pp->io_space_name) - 1] = 0; pp->res[0].name = pp->io_space_name; if (pp->index == 0) { pp->res[0].start = PCIBIOS_MIN_IO; pp->res[0].end = pp->res[0].start + SZ_32K - 1; } else { pp->res[0].start = PCIBIOS_MIN_IO + SZ_32K; pp->res[0].end = IO_SPACE_LIMIT; } pp->res[0].flags = IORESOURCE_IO; if (request_resource(&ioport_resource, &pp->res[0])) panic("Request PCIe IO resource failed\n"); sys->resource[0] = &pp->res[0]; /* * IORESOURCE_MEM */ snprintf(pp->mem_space_name, sizeof(pp->mem_space_name), "PCIe %d MEM", pp->index); pp->mem_space_name[sizeof(pp->mem_space_name) - 1] = 0; pp->res[1].name = pp->mem_space_name; if (pp->index == 0) { pp->res[1].start = MEM_BASE_0; pp->res[1].end = pp->res[1].start + MEM_SIZE_0 - 1; } else { pp->res[1].start = MEM_BASE_1; pp->res[1].end = pp->res[1].start + MEM_SIZE_1 - 1; } pp->res[1].flags = IORESOURCE_MEM; if (request_resource(&iomem_resource, &pp->res[1])) panic("Request PCIe Memory resource failed\n"); sys->resource[1] = &pp->res[1]; /* * IORESOURCE_MEM | IORESOURCE_PREFETCH */ snprintf(pp->prefetch_space_name, sizeof(pp->prefetch_space_name), "PCIe %d PREFETCH MEM", pp->index); pp->prefetch_space_name[sizeof(pp->prefetch_space_name) - 1] = 0; pp->res[2].name = pp->prefetch_space_name; if (pp->index == 0) { pp->res[2].start = PREFETCH_MEM_BASE_0; pp->res[2].end = pp->res[2].start + PREFETCH_MEM_SIZE_0 - 1; } else { pp->res[2].start = PREFETCH_MEM_BASE_1; pp->res[2].end = pp->res[2].start + PREFETCH_MEM_SIZE_1 - 1; } pp->res[2].flags = IORESOURCE_MEM | IORESOURCE_PREFETCH; if (request_resource(&iomem_resource, &pp->res[2])) panic("Request PCIe Prefetch Memory resource failed\n"); sys->resource[2] = &pp->res[2]; return 1; } static int tegra_pcie_map_irq(struct pci_dev *dev, u8 slot, u8 pin) { return INT_PCIE_INTR; } static struct pci_bus __init *tegra_pcie_scan_bus(int nr, struct pci_sys_data *sys) { struct tegra_pcie_port *pp; if (nr >= tegra_pcie.num_ports) return 0; pp = tegra_pcie.port + nr; pp->root_bus_nr = sys->busnr; return pci_scan_bus(sys->busnr, &tegra_pcie_ops, sys); } static struct hw_pci tegra_pcie_hw __initdata = { .nr_controllers = 2, .setup = tegra_pcie_setup, .scan = tegra_pcie_scan_bus, .swizzle = pci_std_swizzle, .map_irq = tegra_pcie_map_irq, }; static irqreturn_t tegra_pcie_isr(int irq, void *arg) { const char *err_msg[] = { "Unknown", "AXI slave error", "AXI decode error", "Target abort", "Master abort", "Invalid write", "Response decoding error", "AXI response decoding error", "Transcation timeout", }; u32 code, signature; code = afi_readl(AFI_INTR_CODE) & AFI_INTR_CODE_MASK; signature = afi_readl(AFI_INTR_SIGNATURE); afi_writel(0, AFI_INTR_CODE); if (code == AFI_INTR_LEGACY) return IRQ_NONE; if (code >= ARRAY_SIZE(err_msg)) code = 0; /* * do not pollute kernel log with master abort reports since they * happen a lot during enumeration */ if (code == AFI_INTR_MASTER_ABORT) pr_debug("PCIE: %s, signature: %08x\n", err_msg[code], signature); else pr_err("PCIE: %s, signature: %08x\n", err_msg[code], signature); return IRQ_HANDLED; } static void tegra_pcie_setup_translations(void) { u32 fpci_bar; u32 size; u32 axi_address; /* Bar 0: config Bar */ fpci_bar = ((u32)0xfdff << 16); size = PCIE_CFG_SZ; axi_address = TEGRA_PCIE_BASE + PCIE_CFG_OFF; afi_writel(axi_address, AFI_AXI_BAR0_START); afi_writel(size >> 12, AFI_AXI_BAR0_SZ); afi_writel(fpci_bar, AFI_FPCI_BAR0); /* Bar 1: extended config Bar */ fpci_bar = ((u32)0xfe1 << 20); size = PCIE_EXT_CFG_SZ; axi_address = TEGRA_PCIE_BASE + PCIE_EXT_CFG_OFF; afi_writel(axi_address, AFI_AXI_BAR1_START); afi_writel(size >> 12, AFI_AXI_BAR1_SZ); afi_writel(fpci_bar, AFI_FPCI_BAR1); /* Bar 2: downstream IO bar */ fpci_bar = ((__u32)0xfdfc << 16); size = MMIO_SIZE; axi_address = MMIO_BASE; afi_writel(axi_address, AFI_AXI_BAR2_START); afi_writel(size >> 12, AFI_AXI_BAR2_SZ); afi_writel(fpci_bar, AFI_FPCI_BAR2); /* Bar 3: prefetchable memory BAR */ fpci_bar = (((PREFETCH_MEM_BASE_0 >> 12) & 0x0fffffff) << 4) | 0x1; size = PREFETCH_MEM_SIZE_0 + PREFETCH_MEM_SIZE_1; axi_address = PREFETCH_MEM_BASE_0; afi_writel(axi_address, AFI_AXI_BAR3_START); afi_writel(size >> 12, AFI_AXI_BAR3_SZ); afi_writel(fpci_bar, AFI_FPCI_BAR3); /* Bar 4: non prefetchable memory BAR */ fpci_bar = (((MEM_BASE_0 >> 12) & 0x0FFFFFFF) << 4) | 0x1; size = MEM_SIZE_0 + MEM_SIZE_1; axi_address = MEM_BASE_0; afi_writel(axi_address, AFI_AXI_BAR4_START); afi_writel(size >> 12, AFI_AXI_BAR4_SZ); afi_writel(fpci_bar, AFI_FPCI_BAR4); /* Bar 5: NULL out the remaining BAR as it is not used */ fpci_bar = 0; size = 0; axi_address = 0; afi_writel(axi_address, AFI_AXI_BAR5_START); afi_writel(size >> 12, AFI_AXI_BAR5_SZ); afi_writel(fpci_bar, AFI_FPCI_BAR5); /* map all upstream transactions as uncached */ afi_writel(PHYS_OFFSET, AFI_CACHE_BAR0_ST); afi_writel(0, AFI_CACHE_BAR0_SZ); afi_writel(0, AFI_CACHE_BAR1_ST); afi_writel(0, AFI_CACHE_BAR1_SZ); /* No MSI */ afi_writel(0, AFI_MSI_FPCI_BAR_ST); afi_writel(0, AFI_MSI_BAR_SZ); afi_writel(0, AFI_MSI_AXI_BAR_ST); afi_writel(0, AFI_MSI_BAR_SZ); } static void tegra_pcie_enable_controller(void) { u32 val, reg; int i; /* Enable slot clock and pulse the reset signals */ for (i = 0, reg = AFI_PEX0_CTRL; i < 2; i++, reg += 0x8) { val = afi_readl(reg) | AFI_PEX_CTRL_REFCLK_EN; afi_writel(val, reg); val &= ~AFI_PEX_CTRL_RST; afi_writel(val, reg); val = afi_readl(reg) | AFI_PEX_CTRL_RST; afi_writel(val, reg); } /* Enable dual controller and both ports */ val = afi_readl(AFI_PCIE_CONFIG); val &= ~(AFI_PCIE_CONFIG_PCIEC0_DISABLE_DEVICE | AFI_PCIE_CONFIG_PCIEC1_DISABLE_DEVICE | AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK); val |= AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL; afi_writel(val, AFI_PCIE_CONFIG); val = afi_readl(AFI_FUSE) & ~AFI_FUSE_PCIE_T0_GEN2_DIS; afi_writel(val, AFI_FUSE); /* Initialze internal PHY, enable up to 16 PCIE lanes */ pads_writel(0x0, PADS_CTL_SEL); /* override IDDQ to 1 on all 4 lanes */ val = pads_readl(PADS_CTL) | PADS_CTL_IDDQ_1L; pads_writel(val, PADS_CTL); /* * set up PHY PLL inputs select PLLE output as refclock, * set TX ref sel to div10 (not div5) */ val = pads_readl(PADS_PLL_CTL); val &= ~(PADS_PLL_CTL_REFCLK_MASK | PADS_PLL_CTL_TXCLKREF_MASK); val |= (PADS_PLL_CTL_REFCLK_INTERNAL_CML | PADS_PLL_CTL_TXCLKREF_DIV10); pads_writel(val, PADS_PLL_CTL); /* take PLL out of reset */ val = pads_readl(PADS_PLL_CTL) | PADS_PLL_CTL_RST_B4SM; pads_writel(val, PADS_PLL_CTL); /* * Hack, set the clock voltage to the DEFAULT provided by hw folks. * This doesn't exist in the documentation */ pads_writel(0xfa5cfa5c, 0xc8); /* Wait for the PLL to lock */ do { val = pads_readl(PADS_PLL_CTL); } while (!(val & PADS_PLL_CTL_LOCKDET)); /* turn off IDDQ override */ val = pads_readl(PADS_CTL) & ~PADS_CTL_IDDQ_1L; pads_writel(val, PADS_CTL); /* enable TX/RX data */ val = pads_readl(PADS_CTL); val |= (PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L); pads_writel(val, PADS_CTL); /* Take the PCIe interface module out of reset */ tegra_periph_reset_deassert(tegra_pcie.pcie_xclk); /* Finally enable PCIe */ val = afi_readl(AFI_CONFIGURATION) | AFI_CONFIGURATION_EN_FPCI; afi_writel(val, AFI_CONFIGURATION); val = (AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR | AFI_INTR_EN_TGT_SLVERR | AFI_INTR_EN_TGT_DECERR | AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR); afi_writel(val, AFI_AFI_INTR_ENABLE); afi_writel(0xffffffff, AFI_SM_INTR_ENABLE); /* FIXME: No MSI for now, only INT */ afi_writel(AFI_INTR_MASK_INT_MASK, AFI_INTR_MASK); /* Disable all execptions */ afi_writel(0, AFI_FPCI_ERROR_MASKS); return; } static void tegra_pcie_xclk_clamp(bool clamp) { u32 reg; reg = pmc_readl(PMC_SCRATCH42) & ~PMC_SCRATCH42_PCX_CLAMP; if (clamp) reg |= PMC_SCRATCH42_PCX_CLAMP; pmc_writel(reg, PMC_SCRATCH42); } static void tegra_pcie_power_off(void) { tegra_periph_reset_assert(tegra_pcie.pcie_xclk); tegra_periph_reset_assert(tegra_pcie.afi_clk); tegra_periph_reset_assert(tegra_pcie.pex_clk); tegra_powergate_power_off(TEGRA_POWERGATE_PCIE); tegra_pcie_xclk_clamp(true); } static int tegra_pcie_power_regate(void) { int err; tegra_pcie_power_off(); tegra_pcie_xclk_clamp(true); tegra_periph_reset_assert(tegra_pcie.pcie_xclk); tegra_periph_reset_assert(tegra_pcie.afi_clk); err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE, tegra_pcie.pex_clk); if (err) { pr_err("PCIE: powerup sequence failed: %d\n", err); return err; } tegra_periph_reset_deassert(tegra_pcie.afi_clk); tegra_pcie_xclk_clamp(false); clk_enable(tegra_pcie.afi_clk); clk_enable(tegra_pcie.pex_clk); return clk_enable(tegra_pcie.pll_e); } static int tegra_pcie_clocks_get(void) { int err; tegra_pcie.pex_clk = clk_get(NULL, "pex"); if (IS_ERR(tegra_pcie.pex_clk)) return PTR_ERR(tegra_pcie.pex_clk); tegra_pcie.afi_clk = clk_get(NULL, "afi"); if (IS_ERR(tegra_pcie.afi_clk)) { err = PTR_ERR(tegra_pcie.afi_clk); goto err_afi_clk; } tegra_pcie.pcie_xclk = clk_get(NULL, "pcie_xclk"); if (IS_ERR(tegra_pcie.pcie_xclk)) { err = PTR_ERR(tegra_pcie.pcie_xclk); goto err_pcie_xclk; } tegra_pcie.pll_e = clk_get_sys(NULL, "pll_e"); if (IS_ERR(tegra_pcie.pll_e)) { err = PTR_ERR(tegra_pcie.pll_e); goto err_pll_e; } return 0; err_pll_e: clk_put(tegra_pcie.pcie_xclk); err_pcie_xclk: clk_put(tegra_pcie.afi_clk); err_afi_clk: clk_put(tegra_pcie.pex_clk); return err; } static void tegra_pcie_clocks_put(void) { clk_put(tegra_pcie.pll_e); clk_put(tegra_pcie.pcie_xclk); clk_put(tegra_pcie.afi_clk); clk_put(tegra_pcie.pex_clk); } static int __init tegra_pcie_get_resources(void) { struct resource *res_mmio = &tegra_pcie.res_mmio; int err; err = tegra_pcie_clocks_get(); if (err) { pr_err("PCIE: failed to get clocks: %d\n", err); return err; } err = tegra_pcie_power_regate(); if (err) { pr_err("PCIE: failed to power up: %d\n", err); goto err_pwr_on; } tegra_pcie.regs = ioremap_nocache(TEGRA_PCIE_BASE, PCIE_IOMAP_SZ); if (tegra_pcie.regs == NULL) { pr_err("PCIE: Failed to map PCI/AFI registers\n"); err = -ENOMEM; goto err_map_reg; } err = request_resource(&iomem_resource, res_mmio); if (err) { pr_err("PCIE: Failed to request resources: %d\n", err); goto err_req_io; } tegra_pcie_io_base = ioremap_nocache(res_mmio->start, resource_size(res_mmio)); if (tegra_pcie_io_base == NULL) { pr_err("PCIE: Failed to map IO\n"); err = -ENOMEM; goto err_map_io; } err = request_irq(INT_PCIE_INTR, tegra_pcie_isr, IRQF_SHARED, "PCIE", &tegra_pcie); if (err) { pr_err("PCIE: Failed to register IRQ: %d\n", err); goto err_irq; } set_irq_flags(INT_PCIE_INTR, IRQF_VALID); return 0; err_irq: iounmap(tegra_pcie_io_base); err_map_io: release_resource(&tegra_pcie.res_mmio); err_req_io: iounmap(tegra_pcie.regs); err_map_reg: tegra_pcie_power_off(); err_pwr_on: tegra_pcie_clocks_put(); return err; } /* * FIXME: If there are no PCIe cards attached, then calling this function * can result in the increase of the bootup time as there are big timeout * loops. */ #define TEGRA_PCIE_LINKUP_TIMEOUT 200 /* up to 1.2 seconds */ static bool tegra_pcie_check_link(struct tegra_pcie_port *pp, int idx, u32 reset_reg) { u32 reg; int retries = 3; int timeout; do { timeout = TEGRA_PCIE_LINKUP_TIMEOUT; while (timeout) { reg = readl(pp->base + RP_VEND_XP); if (reg & RP_VEND_XP_DL_UP) break; mdelay(1); timeout--; } if (!timeout) { pr_err("PCIE: port %d: link down, retrying\n", idx); goto retry; } timeout = TEGRA_PCIE_LINKUP_TIMEOUT; while (timeout) { reg = readl(pp->base + RP_LINK_CONTROL_STATUS); if (reg & 0x20000000) return true; mdelay(1); timeout--; } retry: /* Pulse the PEX reset */ reg = afi_readl(reset_reg) | AFI_PEX_CTRL_RST; afi_writel(reg, reset_reg); mdelay(1); reg = afi_readl(reset_reg) & ~AFI_PEX_CTRL_RST; afi_writel(reg, reset_reg); retries--; } while (retries); return false; } static void __init tegra_pcie_add_port(int index, u32 offset, u32 reset_reg) { struct tegra_pcie_port *pp; pp = tegra_pcie.port + tegra_pcie.num_ports; pp->index = -1; pp->base = tegra_pcie.regs + offset; pp->link_up = tegra_pcie_check_link(pp, index, reset_reg); if (!pp->link_up) { pp->base = NULL; printk(KERN_INFO "PCIE: port %d: link down, ignoring\n", index); return; } tegra_pcie.num_ports++; pp->index = index; pp->root_bus_nr = -1; memset(pp->res, 0, sizeof(pp->res)); } int __init tegra_pcie_init(bool init_port0, bool init_port1) { int err; if (!(init_port0 || init_port1)) return -ENODEV; err = tegra_pcie_get_resources(); if (err) return err; tegra_pcie_enable_controller(); /* setup the AFI address translations */ tegra_pcie_setup_translations(); if (init_port0) tegra_pcie_add_port(0, RP0_OFFSET, AFI_PEX0_CTRL); if (init_port1) tegra_pcie_add_port(1, RP1_OFFSET, AFI_PEX1_CTRL); pci_common_init(&tegra_pcie_hw); return 0; }
gpl-2.0
santod/NuK3rn3l_htc_m7_GPE-5.0.x
arch/ia64/kvm/kvm-ia64.c
4382
45694
/* * kvm_ia64.c: Basic KVM suppport On Itanium series processors * * * Copyright (C) 2007, Intel Corporation. * Xiantao Zhang (xiantao.zhang@intel.com) * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * */ #include <linux/module.h> #include <linux/errno.h> #include <linux/percpu.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/smp.h> #include <linux/kvm_host.h> #include <linux/kvm.h> #include <linux/bitops.h> #include <linux/hrtimer.h> #include <linux/uaccess.h> #include <linux/iommu.h> #include <linux/intel-iommu.h> #include <linux/pci.h> #include <asm/pgtable.h> #include <asm/gcc_intrin.h> #include <asm/pal.h> #include <asm/cacheflush.h> #include <asm/div64.h> #include <asm/tlb.h> #include <asm/elf.h> #include <asm/sn/addrs.h> #include <asm/sn/clksupport.h> #include <asm/sn/shub_mmr.h> #include "misc.h" #include "vti.h" #include "iodev.h" #include "ioapic.h" #include "lapic.h" #include "irq.h" static unsigned long kvm_vmm_base; static unsigned long kvm_vsa_base; static unsigned long kvm_vm_buffer; static unsigned long kvm_vm_buffer_size; unsigned long kvm_vmm_gp; static long vp_env_info; static struct kvm_vmm_info *kvm_vmm_info; static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu); struct kvm_stats_debugfs_item debugfs_entries[] = { { NULL } }; static unsigned long kvm_get_itc(struct kvm_vcpu *vcpu) { #if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC) if (vcpu->kvm->arch.is_sn2) return rtc_time(); else #endif return ia64_getreg(_IA64_REG_AR_ITC); } static void kvm_flush_icache(unsigned long start, unsigned long len) { int l; for (l = 0; l < (len + 32); l += 32) ia64_fc((void *)(start + l)); ia64_sync_i(); ia64_srlz_i(); } static void kvm_flush_tlb_all(void) { unsigned long i, j, count0, count1, stride0, stride1, addr; long flags; addr = local_cpu_data->ptce_base; count0 = local_cpu_data->ptce_count[0]; count1 = local_cpu_data->ptce_count[1]; stride0 = local_cpu_data->ptce_stride[0]; stride1 = local_cpu_data->ptce_stride[1]; local_irq_save(flags); for (i = 0; i < count0; ++i) { for (j = 0; j < count1; ++j) { ia64_ptce(addr); addr += stride1; } addr += stride0; } local_irq_restore(flags); ia64_srlz_i(); /* srlz.i implies srlz.d */ } long ia64_pal_vp_create(u64 *vpd, u64 *host_iva, u64 *opt_handler) { struct ia64_pal_retval iprv; PAL_CALL_STK(iprv, PAL_VP_CREATE, (u64)vpd, (u64)host_iva, (u64)opt_handler); return iprv.status; } static DEFINE_SPINLOCK(vp_lock); int kvm_arch_hardware_enable(void *garbage) { long status; long tmp_base; unsigned long pte; unsigned long saved_psr; int slot; pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), PAGE_KERNEL)); local_irq_save(saved_psr); slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT); local_irq_restore(saved_psr); if (slot < 0) return -EINVAL; spin_lock(&vp_lock); status = ia64_pal_vp_init_env(kvm_vsa_base ? VP_INIT_ENV : VP_INIT_ENV_INITALIZE, __pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base); if (status != 0) { spin_unlock(&vp_lock); printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n"); return -EINVAL; } if (!kvm_vsa_base) { kvm_vsa_base = tmp_base; printk(KERN_INFO"kvm: kvm_vsa_base:0x%lx\n", kvm_vsa_base); } spin_unlock(&vp_lock); ia64_ptr_entry(0x3, slot); return 0; } void kvm_arch_hardware_disable(void *garbage) { long status; int slot; unsigned long pte; unsigned long saved_psr; unsigned long host_iva = ia64_getreg(_IA64_REG_CR_IVA); pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), PAGE_KERNEL)); local_irq_save(saved_psr); slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT); local_irq_restore(saved_psr); if (slot < 0) return; status = ia64_pal_vp_exit_env(host_iva); if (status) printk(KERN_DEBUG"kvm: Failed to disable VT support! :%ld\n", status); ia64_ptr_entry(0x3, slot); } void kvm_arch_check_processor_compat(void *rtn) { *(int *)rtn = 0; } int kvm_dev_ioctl_check_extension(long ext) { int r; switch (ext) { case KVM_CAP_IRQCHIP: case KVM_CAP_MP_STATE: case KVM_CAP_IRQ_INJECT_STATUS: r = 1; break; case KVM_CAP_COALESCED_MMIO: r = KVM_COALESCED_MMIO_PAGE_OFFSET; break; case KVM_CAP_IOMMU: r = iommu_present(&pci_bus_type); break; default: r = 0; } return r; } static int handle_vm_error(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { kvm_run->exit_reason = KVM_EXIT_UNKNOWN; kvm_run->hw.hardware_exit_reason = 1; return 0; } static int handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { struct kvm_mmio_req *p; struct kvm_io_device *mmio_dev; int r; p = kvm_get_vcpu_ioreq(vcpu); if ((p->addr & PAGE_MASK) == IOAPIC_DEFAULT_BASE_ADDRESS) goto mmio; vcpu->mmio_needed = 1; vcpu->mmio_phys_addr = kvm_run->mmio.phys_addr = p->addr; vcpu->mmio_size = kvm_run->mmio.len = p->size; vcpu->mmio_is_write = kvm_run->mmio.is_write = !p->dir; if (vcpu->mmio_is_write) memcpy(vcpu->mmio_data, &p->data, p->size); memcpy(kvm_run->mmio.data, &p->data, p->size); kvm_run->exit_reason = KVM_EXIT_MMIO; return 0; mmio: if (p->dir) r = kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, p->addr, p->size, &p->data); else r = kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, p->addr, p->size, &p->data); if (r) printk(KERN_ERR"kvm: No iodevice found! addr:%lx\n", p->addr); p->state = STATE_IORESP_READY; return 1; } static int handle_pal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { struct exit_ctl_data *p; p = kvm_get_exit_data(vcpu); if (p->exit_reason == EXIT_REASON_PAL_CALL) return kvm_pal_emul(vcpu, kvm_run); else { kvm_run->exit_reason = KVM_EXIT_UNKNOWN; kvm_run->hw.hardware_exit_reason = 2; return 0; } } static int handle_sal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { struct exit_ctl_data *p; p = kvm_get_exit_data(vcpu); if (p->exit_reason == EXIT_REASON_SAL_CALL) { kvm_sal_emul(vcpu); return 1; } else { kvm_run->exit_reason = KVM_EXIT_UNKNOWN; kvm_run->hw.hardware_exit_reason = 3; return 0; } } static int __apic_accept_irq(struct kvm_vcpu *vcpu, uint64_t vector) { struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); if (!test_and_set_bit(vector, &vpd->irr[0])) { vcpu->arch.irq_new_pending = 1; kvm_vcpu_kick(vcpu); return 1; } return 0; } /* * offset: address offset to IPI space. * value: deliver value. */ static void vcpu_deliver_ipi(struct kvm_vcpu *vcpu, uint64_t dm, uint64_t vector) { switch (dm) { case SAPIC_FIXED: break; case SAPIC_NMI: vector = 2; break; case SAPIC_EXTINT: vector = 0; break; case SAPIC_INIT: case SAPIC_PMI: default: printk(KERN_ERR"kvm: Unimplemented Deliver reserved IPI!\n"); return; } __apic_accept_irq(vcpu, vector); } static struct kvm_vcpu *lid_to_vcpu(struct kvm *kvm, unsigned long id, unsigned long eid) { union ia64_lid lid; int i; struct kvm_vcpu *vcpu; kvm_for_each_vcpu(i, vcpu, kvm) { lid.val = VCPU_LID(vcpu); if (lid.id == id && lid.eid == eid) return vcpu; } return NULL; } static int handle_ipi(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { struct exit_ctl_data *p = kvm_get_exit_data(vcpu); struct kvm_vcpu *target_vcpu; struct kvm_pt_regs *regs; union ia64_ipi_a addr = p->u.ipi_data.addr; union ia64_ipi_d data = p->u.ipi_data.data; target_vcpu = lid_to_vcpu(vcpu->kvm, addr.id, addr.eid); if (!target_vcpu) return handle_vm_error(vcpu, kvm_run); if (!target_vcpu->arch.launched) { regs = vcpu_regs(target_vcpu); regs->cr_iip = vcpu->kvm->arch.rdv_sal_data.boot_ip; regs->r1 = vcpu->kvm->arch.rdv_sal_data.boot_gp; target_vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; if (waitqueue_active(&target_vcpu->wq)) wake_up_interruptible(&target_vcpu->wq); } else { vcpu_deliver_ipi(target_vcpu, data.dm, data.vector); if (target_vcpu != vcpu) kvm_vcpu_kick(target_vcpu); } return 1; } struct call_data { struct kvm_ptc_g ptc_g_data; struct kvm_vcpu *vcpu; }; static void vcpu_global_purge(void *info) { struct call_data *p = (struct call_data *)info; struct kvm_vcpu *vcpu = p->vcpu; if (test_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests)) return; set_bit(KVM_REQ_PTC_G, &vcpu->requests); if (vcpu->arch.ptc_g_count < MAX_PTC_G_NUM) { vcpu->arch.ptc_g_data[vcpu->arch.ptc_g_count++] = p->ptc_g_data; } else { clear_bit(KVM_REQ_PTC_G, &vcpu->requests); vcpu->arch.ptc_g_count = 0; set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests); } } static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { struct exit_ctl_data *p = kvm_get_exit_data(vcpu); struct kvm *kvm = vcpu->kvm; struct call_data call_data; int i; struct kvm_vcpu *vcpui; call_data.ptc_g_data = p->u.ptc_g_data; kvm_for_each_vcpu(i, vcpui, kvm) { if (vcpui->arch.mp_state == KVM_MP_STATE_UNINITIALIZED || vcpu == vcpui) continue; if (waitqueue_active(&vcpui->wq)) wake_up_interruptible(&vcpui->wq); if (vcpui->cpu != -1) { call_data.vcpu = vcpui; smp_call_function_single(vcpui->cpu, vcpu_global_purge, &call_data, 1); } else printk(KERN_WARNING"kvm: Uninit vcpu received ipi!\n"); } return 1; } static int handle_switch_rr6(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { return 1; } static int kvm_sn2_setup_mappings(struct kvm_vcpu *vcpu) { unsigned long pte, rtc_phys_addr, map_addr; int slot; map_addr = KVM_VMM_BASE + (1UL << KVM_VMM_SHIFT); rtc_phys_addr = LOCAL_MMR_OFFSET | SH_RTC; pte = pte_val(mk_pte_phys(rtc_phys_addr, PAGE_KERNEL_UC)); slot = ia64_itr_entry(0x3, map_addr, pte, PAGE_SHIFT); vcpu->arch.sn_rtc_tr_slot = slot; if (slot < 0) { printk(KERN_ERR "Mayday mayday! RTC mapping failed!\n"); slot = 0; } return slot; } int kvm_emulate_halt(struct kvm_vcpu *vcpu) { ktime_t kt; long itc_diff; unsigned long vcpu_now_itc; unsigned long expires; struct hrtimer *p_ht = &vcpu->arch.hlt_timer; unsigned long cyc_per_usec = local_cpu_data->cyc_per_usec; struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); if (irqchip_in_kernel(vcpu->kvm)) { vcpu_now_itc = kvm_get_itc(vcpu) + vcpu->arch.itc_offset; if (time_after(vcpu_now_itc, vpd->itm)) { vcpu->arch.timer_check = 1; return 1; } itc_diff = vpd->itm - vcpu_now_itc; if (itc_diff < 0) itc_diff = -itc_diff; expires = div64_u64(itc_diff, cyc_per_usec); kt = ktime_set(0, 1000 * expires); vcpu->arch.ht_active = 1; hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS); vcpu->arch.mp_state = KVM_MP_STATE_HALTED; kvm_vcpu_block(vcpu); hrtimer_cancel(p_ht); vcpu->arch.ht_active = 0; if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests) || kvm_cpu_has_pending_timer(vcpu)) if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE) return -EINTR; return 1; } else { printk(KERN_ERR"kvm: Unsupported userspace halt!"); return 0; } } static int handle_vm_shutdown(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { kvm_run->exit_reason = KVM_EXIT_SHUTDOWN; return 0; } static int handle_external_interrupt(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { return 1; } static int handle_vcpu_debug(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { printk("VMM: %s", vcpu->arch.log_buf); return 1; } static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) = { [EXIT_REASON_VM_PANIC] = handle_vm_error, [EXIT_REASON_MMIO_INSTRUCTION] = handle_mmio, [EXIT_REASON_PAL_CALL] = handle_pal_call, [EXIT_REASON_SAL_CALL] = handle_sal_call, [EXIT_REASON_SWITCH_RR6] = handle_switch_rr6, [EXIT_REASON_VM_DESTROY] = handle_vm_shutdown, [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt, [EXIT_REASON_IPI] = handle_ipi, [EXIT_REASON_PTC_G] = handle_global_purge, [EXIT_REASON_DEBUG] = handle_vcpu_debug, }; static const int kvm_vti_max_exit_handlers = sizeof(kvm_vti_exit_handlers)/sizeof(*kvm_vti_exit_handlers); static uint32_t kvm_get_exit_reason(struct kvm_vcpu *vcpu) { struct exit_ctl_data *p_exit_data; p_exit_data = kvm_get_exit_data(vcpu); return p_exit_data->exit_reason; } /* * The guest has exited. See if we can fix it or if we need userspace * assistance. */ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) { u32 exit_reason = kvm_get_exit_reason(vcpu); vcpu->arch.last_exit = exit_reason; if (exit_reason < kvm_vti_max_exit_handlers && kvm_vti_exit_handlers[exit_reason]) return kvm_vti_exit_handlers[exit_reason](vcpu, kvm_run); else { kvm_run->exit_reason = KVM_EXIT_UNKNOWN; kvm_run->hw.hardware_exit_reason = exit_reason; } return 0; } static inline void vti_set_rr6(unsigned long rr6) { ia64_set_rr(RR6, rr6); ia64_srlz_i(); } static int kvm_insert_vmm_mapping(struct kvm_vcpu *vcpu) { unsigned long pte; struct kvm *kvm = vcpu->kvm; int r; /*Insert a pair of tr to map vmm*/ pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), PAGE_KERNEL)); r = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT); if (r < 0) goto out; vcpu->arch.vmm_tr_slot = r; /*Insert a pairt of tr to map data of vm*/ pte = pte_val(mk_pte_phys(__pa(kvm->arch.vm_base), PAGE_KERNEL)); r = ia64_itr_entry(0x3, KVM_VM_DATA_BASE, pte, KVM_VM_DATA_SHIFT); if (r < 0) goto out; vcpu->arch.vm_tr_slot = r; #if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC) if (kvm->arch.is_sn2) { r = kvm_sn2_setup_mappings(vcpu); if (r < 0) goto out; } #endif r = 0; out: return r; } static void kvm_purge_vmm_mapping(struct kvm_vcpu *vcpu) { struct kvm *kvm = vcpu->kvm; ia64_ptr_entry(0x3, vcpu->arch.vmm_tr_slot); ia64_ptr_entry(0x3, vcpu->arch.vm_tr_slot); #if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC) if (kvm->arch.is_sn2) ia64_ptr_entry(0x3, vcpu->arch.sn_rtc_tr_slot); #endif } static int kvm_vcpu_pre_transition(struct kvm_vcpu *vcpu) { unsigned long psr; int r; int cpu = smp_processor_id(); if (vcpu->arch.last_run_cpu != cpu || per_cpu(last_vcpu, cpu) != vcpu) { per_cpu(last_vcpu, cpu) = vcpu; vcpu->arch.last_run_cpu = cpu; kvm_flush_tlb_all(); } vcpu->arch.host_rr6 = ia64_get_rr(RR6); vti_set_rr6(vcpu->arch.vmm_rr); local_irq_save(psr); r = kvm_insert_vmm_mapping(vcpu); local_irq_restore(psr); return r; } static void kvm_vcpu_post_transition(struct kvm_vcpu *vcpu) { kvm_purge_vmm_mapping(vcpu); vti_set_rr6(vcpu->arch.host_rr6); } static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { union context *host_ctx, *guest_ctx; int r, idx; idx = srcu_read_lock(&vcpu->kvm->srcu); again: if (signal_pending(current)) { r = -EINTR; kvm_run->exit_reason = KVM_EXIT_INTR; goto out; } preempt_disable(); local_irq_disable(); /*Get host and guest context with guest address space.*/ host_ctx = kvm_get_host_context(vcpu); guest_ctx = kvm_get_guest_context(vcpu); clear_bit(KVM_REQ_KICK, &vcpu->requests); r = kvm_vcpu_pre_transition(vcpu); if (r < 0) goto vcpu_run_fail; srcu_read_unlock(&vcpu->kvm->srcu, idx); vcpu->mode = IN_GUEST_MODE; kvm_guest_enter(); /* * Transition to the guest */ kvm_vmm_info->tramp_entry(host_ctx, guest_ctx); kvm_vcpu_post_transition(vcpu); vcpu->arch.launched = 1; set_bit(KVM_REQ_KICK, &vcpu->requests); local_irq_enable(); /* * We must have an instruction between local_irq_enable() and * kvm_guest_exit(), so the timer interrupt isn't delayed by * the interrupt shadow. The stat.exits increment will do nicely. * But we need to prevent reordering, hence this barrier(): */ barrier(); kvm_guest_exit(); vcpu->mode = OUTSIDE_GUEST_MODE; preempt_enable(); idx = srcu_read_lock(&vcpu->kvm->srcu); r = kvm_handle_exit(kvm_run, vcpu); if (r > 0) { if (!need_resched()) goto again; } out: srcu_read_unlock(&vcpu->kvm->srcu, idx); if (r > 0) { kvm_resched(vcpu); idx = srcu_read_lock(&vcpu->kvm->srcu); goto again; } return r; vcpu_run_fail: local_irq_enable(); preempt_enable(); kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; goto out; } static void kvm_set_mmio_data(struct kvm_vcpu *vcpu) { struct kvm_mmio_req *p = kvm_get_vcpu_ioreq(vcpu); if (!vcpu->mmio_is_write) memcpy(&p->data, vcpu->mmio_data, 8); p->state = STATE_IORESP_READY; } int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { int r; sigset_t sigsaved; if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { kvm_vcpu_block(vcpu); clear_bit(KVM_REQ_UNHALT, &vcpu->requests); r = -EAGAIN; goto out; } if (vcpu->mmio_needed) { memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8); kvm_set_mmio_data(vcpu); vcpu->mmio_read_completed = 1; vcpu->mmio_needed = 0; } r = __vcpu_run(vcpu, kvm_run); out: if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &sigsaved, NULL); return r; } struct kvm *kvm_arch_alloc_vm(void) { struct kvm *kvm; uint64_t vm_base; BUG_ON(sizeof(struct kvm) > KVM_VM_STRUCT_SIZE); vm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VM_DATA_SIZE)); if (!vm_base) return NULL; memset((void *)vm_base, 0, KVM_VM_DATA_SIZE); kvm = (struct kvm *)(vm_base + offsetof(struct kvm_vm_data, kvm_vm_struct)); kvm->arch.vm_base = vm_base; printk(KERN_DEBUG"kvm: vm's data area:0x%lx\n", vm_base); return kvm; } struct kvm_ia64_io_range { unsigned long start; unsigned long size; unsigned long type; }; static const struct kvm_ia64_io_range io_ranges[] = { {VGA_IO_START, VGA_IO_SIZE, GPFN_FRAME_BUFFER}, {MMIO_START, MMIO_SIZE, GPFN_LOW_MMIO}, {LEGACY_IO_START, LEGACY_IO_SIZE, GPFN_LEGACY_IO}, {IO_SAPIC_START, IO_SAPIC_SIZE, GPFN_IOSAPIC}, {PIB_START, PIB_SIZE, GPFN_PIB}, }; static void kvm_build_io_pmt(struct kvm *kvm) { unsigned long i, j; /* Mark I/O ranges */ for (i = 0; i < (sizeof(io_ranges) / sizeof(struct kvm_io_range)); i++) { for (j = io_ranges[i].start; j < io_ranges[i].start + io_ranges[i].size; j += PAGE_SIZE) kvm_set_pmt_entry(kvm, j >> PAGE_SHIFT, io_ranges[i].type, 0); } } /*Use unused rids to virtualize guest rid.*/ #define GUEST_PHYSICAL_RR0 0x1739 #define GUEST_PHYSICAL_RR4 0x2739 #define VMM_INIT_RR 0x1660 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) { BUG_ON(!kvm); if (type) return -EINVAL; kvm->arch.is_sn2 = ia64_platform_is("sn2"); kvm->arch.metaphysical_rr0 = GUEST_PHYSICAL_RR0; kvm->arch.metaphysical_rr4 = GUEST_PHYSICAL_RR4; kvm->arch.vmm_init_rr = VMM_INIT_RR; /* *Fill P2M entries for MMIO/IO ranges */ kvm_build_io_pmt(kvm); INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */ set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); return 0; } static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) { int r; r = 0; switch (chip->chip_id) { case KVM_IRQCHIP_IOAPIC: r = kvm_get_ioapic(kvm, &chip->chip.ioapic); break; default: r = -EINVAL; break; } return r; } static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) { int r; r = 0; switch (chip->chip_id) { case KVM_IRQCHIP_IOAPIC: r = kvm_set_ioapic(kvm, &chip->chip.ioapic); break; default: r = -EINVAL; break; } return r; } #define RESTORE_REGS(_x) vcpu->arch._x = regs->_x int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); int i; for (i = 0; i < 16; i++) { vpd->vgr[i] = regs->vpd.vgr[i]; vpd->vbgr[i] = regs->vpd.vbgr[i]; } for (i = 0; i < 128; i++) vpd->vcr[i] = regs->vpd.vcr[i]; vpd->vhpi = regs->vpd.vhpi; vpd->vnat = regs->vpd.vnat; vpd->vbnat = regs->vpd.vbnat; vpd->vpsr = regs->vpd.vpsr; vpd->vpr = regs->vpd.vpr; memcpy(&vcpu->arch.guest, &regs->saved_guest, sizeof(union context)); RESTORE_REGS(mp_state); RESTORE_REGS(vmm_rr); memcpy(vcpu->arch.itrs, regs->itrs, sizeof(struct thash_data) * NITRS); memcpy(vcpu->arch.dtrs, regs->dtrs, sizeof(struct thash_data) * NDTRS); RESTORE_REGS(itr_regions); RESTORE_REGS(dtr_regions); RESTORE_REGS(tc_regions); RESTORE_REGS(irq_check); RESTORE_REGS(itc_check); RESTORE_REGS(timer_check); RESTORE_REGS(timer_pending); RESTORE_REGS(last_itc); for (i = 0; i < 8; i++) { vcpu->arch.vrr[i] = regs->vrr[i]; vcpu->arch.ibr[i] = regs->ibr[i]; vcpu->arch.dbr[i] = regs->dbr[i]; } for (i = 0; i < 4; i++) vcpu->arch.insvc[i] = regs->insvc[i]; RESTORE_REGS(xtp); RESTORE_REGS(metaphysical_rr0); RESTORE_REGS(metaphysical_rr4); RESTORE_REGS(metaphysical_saved_rr0); RESTORE_REGS(metaphysical_saved_rr4); RESTORE_REGS(fp_psr); RESTORE_REGS(saved_gp); vcpu->arch.irq_new_pending = 1; vcpu->arch.itc_offset = regs->saved_itc - kvm_get_itc(vcpu); set_bit(KVM_REQ_RESUME, &vcpu->requests); return 0; } long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { struct kvm *kvm = filp->private_data; void __user *argp = (void __user *)arg; int r = -ENOTTY; switch (ioctl) { case KVM_SET_MEMORY_REGION: { struct kvm_memory_region kvm_mem; struct kvm_userspace_memory_region kvm_userspace_mem; r = -EFAULT; if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem)) goto out; kvm_userspace_mem.slot = kvm_mem.slot; kvm_userspace_mem.flags = kvm_mem.flags; kvm_userspace_mem.guest_phys_addr = kvm_mem.guest_phys_addr; kvm_userspace_mem.memory_size = kvm_mem.memory_size; r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 0); if (r) goto out; break; } case KVM_CREATE_IRQCHIP: r = -EFAULT; r = kvm_ioapic_init(kvm); if (r) goto out; r = kvm_setup_default_irq_routing(kvm); if (r) { mutex_lock(&kvm->slots_lock); kvm_ioapic_destroy(kvm); mutex_unlock(&kvm->slots_lock); goto out; } break; case KVM_IRQ_LINE_STATUS: case KVM_IRQ_LINE: { struct kvm_irq_level irq_event; r = -EFAULT; if (copy_from_user(&irq_event, argp, sizeof irq_event)) goto out; r = -ENXIO; if (irqchip_in_kernel(kvm)) { __s32 status; status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irq_event.irq, irq_event.level); if (ioctl == KVM_IRQ_LINE_STATUS) { r = -EFAULT; irq_event.status = status; if (copy_to_user(argp, &irq_event, sizeof irq_event)) goto out; } r = 0; } break; } case KVM_GET_IRQCHIP: { /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ struct kvm_irqchip chip; r = -EFAULT; if (copy_from_user(&chip, argp, sizeof chip)) goto out; r = -ENXIO; if (!irqchip_in_kernel(kvm)) goto out; r = kvm_vm_ioctl_get_irqchip(kvm, &chip); if (r) goto out; r = -EFAULT; if (copy_to_user(argp, &chip, sizeof chip)) goto out; r = 0; break; } case KVM_SET_IRQCHIP: { /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ struct kvm_irqchip chip; r = -EFAULT; if (copy_from_user(&chip, argp, sizeof chip)) goto out; r = -ENXIO; if (!irqchip_in_kernel(kvm)) goto out; r = kvm_vm_ioctl_set_irqchip(kvm, &chip); if (r) goto out; r = 0; break; } default: ; } out: return r; } int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { return -EINVAL; } int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { return -EINVAL; } int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr) { return -EINVAL; } static int kvm_alloc_vmm_area(void) { if (!kvm_vmm_base && (kvm_vm_buffer_size < KVM_VM_BUFFER_SIZE)) { kvm_vmm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VMM_SIZE)); if (!kvm_vmm_base) return -ENOMEM; memset((void *)kvm_vmm_base, 0, KVM_VMM_SIZE); kvm_vm_buffer = kvm_vmm_base + VMM_SIZE; printk(KERN_DEBUG"kvm:VMM's Base Addr:0x%lx, vm_buffer:0x%lx\n", kvm_vmm_base, kvm_vm_buffer); } return 0; } static void kvm_free_vmm_area(void) { if (kvm_vmm_base) { /*Zero this area before free to avoid bits leak!!*/ memset((void *)kvm_vmm_base, 0, KVM_VMM_SIZE); free_pages(kvm_vmm_base, get_order(KVM_VMM_SIZE)); kvm_vmm_base = 0; kvm_vm_buffer = 0; kvm_vsa_base = 0; } } static int vti_init_vpd(struct kvm_vcpu *vcpu) { int i; union cpuid3_t cpuid3; struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); if (IS_ERR(vpd)) return PTR_ERR(vpd); /* CPUID init */ for (i = 0; i < 5; i++) vpd->vcpuid[i] = ia64_get_cpuid(i); /* Limit the CPUID number to 5 */ cpuid3.value = vpd->vcpuid[3]; cpuid3.number = 4; /* 5 - 1 */ vpd->vcpuid[3] = cpuid3.value; /*Set vac and vdc fields*/ vpd->vac.a_from_int_cr = 1; vpd->vac.a_to_int_cr = 1; vpd->vac.a_from_psr = 1; vpd->vac.a_from_cpuid = 1; vpd->vac.a_cover = 1; vpd->vac.a_bsw = 1; vpd->vac.a_int = 1; vpd->vdc.d_vmsw = 1; /*Set virtual buffer*/ vpd->virt_env_vaddr = KVM_VM_BUFFER_BASE; return 0; } static int vti_create_vp(struct kvm_vcpu *vcpu) { long ret; struct vpd *vpd = vcpu->arch.vpd; unsigned long vmm_ivt; vmm_ivt = kvm_vmm_info->vmm_ivt; printk(KERN_DEBUG "kvm: vcpu:%p,ivt: 0x%lx\n", vcpu, vmm_ivt); ret = ia64_pal_vp_create((u64 *)vpd, (u64 *)vmm_ivt, 0); if (ret) { printk(KERN_ERR"kvm: ia64_pal_vp_create failed!\n"); return -EINVAL; } return 0; } static void init_ptce_info(struct kvm_vcpu *vcpu) { ia64_ptce_info_t ptce = {0}; ia64_get_ptce(&ptce); vcpu->arch.ptce_base = ptce.base; vcpu->arch.ptce_count[0] = ptce.count[0]; vcpu->arch.ptce_count[1] = ptce.count[1]; vcpu->arch.ptce_stride[0] = ptce.stride[0]; vcpu->arch.ptce_stride[1] = ptce.stride[1]; } static void kvm_migrate_hlt_timer(struct kvm_vcpu *vcpu) { struct hrtimer *p_ht = &vcpu->arch.hlt_timer; if (hrtimer_cancel(p_ht)) hrtimer_start_expires(p_ht, HRTIMER_MODE_ABS); } static enum hrtimer_restart hlt_timer_fn(struct hrtimer *data) { struct kvm_vcpu *vcpu; wait_queue_head_t *q; vcpu = container_of(data, struct kvm_vcpu, arch.hlt_timer); q = &vcpu->wq; if (vcpu->arch.mp_state != KVM_MP_STATE_HALTED) goto out; if (waitqueue_active(q)) wake_up_interruptible(q); out: vcpu->arch.timer_fired = 1; vcpu->arch.timer_check = 1; return HRTIMER_NORESTART; } #define PALE_RESET_ENTRY 0x80000000ffffffb0UL bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL); } int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) { struct kvm_vcpu *v; int r; int i; long itc_offset; struct kvm *kvm = vcpu->kvm; struct kvm_pt_regs *regs = vcpu_regs(vcpu); union context *p_ctx = &vcpu->arch.guest; struct kvm_vcpu *vmm_vcpu = to_guest(vcpu->kvm, vcpu); /*Init vcpu context for first run.*/ if (IS_ERR(vmm_vcpu)) return PTR_ERR(vmm_vcpu); if (kvm_vcpu_is_bsp(vcpu)) { vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; /*Set entry address for first run.*/ regs->cr_iip = PALE_RESET_ENTRY; /*Initialize itc offset for vcpus*/ itc_offset = 0UL - kvm_get_itc(vcpu); for (i = 0; i < KVM_MAX_VCPUS; i++) { v = (struct kvm_vcpu *)((char *)vcpu + sizeof(struct kvm_vcpu_data) * i); v->arch.itc_offset = itc_offset; v->arch.last_itc = 0; } } else vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED; r = -ENOMEM; vcpu->arch.apic = kzalloc(sizeof(struct kvm_lapic), GFP_KERNEL); if (!vcpu->arch.apic) goto out; vcpu->arch.apic->vcpu = vcpu; p_ctx->gr[1] = 0; p_ctx->gr[12] = (unsigned long)((char *)vmm_vcpu + KVM_STK_OFFSET); p_ctx->gr[13] = (unsigned long)vmm_vcpu; p_ctx->psr = 0x1008522000UL; p_ctx->ar[40] = FPSR_DEFAULT; /*fpsr*/ p_ctx->caller_unat = 0; p_ctx->pr = 0x0; p_ctx->ar[36] = 0x0; /*unat*/ p_ctx->ar[19] = 0x0; /*rnat*/ p_ctx->ar[18] = (unsigned long)vmm_vcpu + ((sizeof(struct kvm_vcpu)+15) & ~15); p_ctx->ar[64] = 0x0; /*pfs*/ p_ctx->cr[0] = 0x7e04UL; p_ctx->cr[2] = (unsigned long)kvm_vmm_info->vmm_ivt; p_ctx->cr[8] = 0x3c; /*Initialize region register*/ p_ctx->rr[0] = 0x30; p_ctx->rr[1] = 0x30; p_ctx->rr[2] = 0x30; p_ctx->rr[3] = 0x30; p_ctx->rr[4] = 0x30; p_ctx->rr[5] = 0x30; p_ctx->rr[7] = 0x30; /*Initialize branch register 0*/ p_ctx->br[0] = *(unsigned long *)kvm_vmm_info->vmm_entry; vcpu->arch.vmm_rr = kvm->arch.vmm_init_rr; vcpu->arch.metaphysical_rr0 = kvm->arch.metaphysical_rr0; vcpu->arch.metaphysical_rr4 = kvm->arch.metaphysical_rr4; hrtimer_init(&vcpu->arch.hlt_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); vcpu->arch.hlt_timer.function = hlt_timer_fn; vcpu->arch.last_run_cpu = -1; vcpu->arch.vpd = (struct vpd *)VPD_BASE(vcpu->vcpu_id); vcpu->arch.vsa_base = kvm_vsa_base; vcpu->arch.__gp = kvm_vmm_gp; vcpu->arch.dirty_log_lock_pa = __pa(&kvm->arch.dirty_log_lock); vcpu->arch.vhpt.hash = (struct thash_data *)VHPT_BASE(vcpu->vcpu_id); vcpu->arch.vtlb.hash = (struct thash_data *)VTLB_BASE(vcpu->vcpu_id); init_ptce_info(vcpu); r = 0; out: return r; } static int vti_vcpu_setup(struct kvm_vcpu *vcpu, int id) { unsigned long psr; int r; local_irq_save(psr); r = kvm_insert_vmm_mapping(vcpu); local_irq_restore(psr); if (r) goto fail; r = kvm_vcpu_init(vcpu, vcpu->kvm, id); if (r) goto fail; r = vti_init_vpd(vcpu); if (r) { printk(KERN_DEBUG"kvm: vpd init error!!\n"); goto uninit; } r = vti_create_vp(vcpu); if (r) goto uninit; kvm_purge_vmm_mapping(vcpu); return 0; uninit: kvm_vcpu_uninit(vcpu); fail: return r; } struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) { struct kvm_vcpu *vcpu; unsigned long vm_base = kvm->arch.vm_base; int r; int cpu; BUG_ON(sizeof(struct kvm_vcpu) > VCPU_STRUCT_SIZE/2); r = -EINVAL; if (id >= KVM_MAX_VCPUS) { printk(KERN_ERR"kvm: Can't configure vcpus > %ld", KVM_MAX_VCPUS); goto fail; } r = -ENOMEM; if (!vm_base) { printk(KERN_ERR"kvm: Create vcpu[%d] error!\n", id); goto fail; } vcpu = (struct kvm_vcpu *)(vm_base + offsetof(struct kvm_vm_data, vcpu_data[id].vcpu_struct)); vcpu->kvm = kvm; cpu = get_cpu(); r = vti_vcpu_setup(vcpu, id); put_cpu(); if (r) { printk(KERN_DEBUG"kvm: vcpu_setup error!!\n"); goto fail; } return vcpu; fail: return ERR_PTR(r); } int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) { return 0; } int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { return -EINVAL; } int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { return -EINVAL; } int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg) { return -EINVAL; } void kvm_arch_free_vm(struct kvm *kvm) { unsigned long vm_base = kvm->arch.vm_base; if (vm_base) { memset((void *)vm_base, 0, KVM_VM_DATA_SIZE); free_pages(vm_base, get_order(KVM_VM_DATA_SIZE)); } } static void kvm_release_vm_pages(struct kvm *kvm) { struct kvm_memslots *slots; struct kvm_memory_slot *memslot; int j; unsigned long base_gfn; slots = kvm_memslots(kvm); kvm_for_each_memslot(memslot, slots) { base_gfn = memslot->base_gfn; for (j = 0; j < memslot->npages; j++) { if (memslot->rmap[j]) put_page((struct page *)memslot->rmap[j]); } } } void kvm_arch_sync_events(struct kvm *kvm) { } void kvm_arch_destroy_vm(struct kvm *kvm) { kvm_iommu_unmap_guest(kvm); #ifdef KVM_CAP_DEVICE_ASSIGNMENT kvm_free_all_assigned_devices(kvm); #endif kfree(kvm->arch.vioapic); kvm_release_vm_pages(kvm); } void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { } void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { if (cpu != vcpu->cpu) { vcpu->cpu = cpu; if (vcpu->arch.ht_active) kvm_migrate_hlt_timer(vcpu); } } #define SAVE_REGS(_x) regs->_x = vcpu->arch._x int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); int i; vcpu_load(vcpu); for (i = 0; i < 16; i++) { regs->vpd.vgr[i] = vpd->vgr[i]; regs->vpd.vbgr[i] = vpd->vbgr[i]; } for (i = 0; i < 128; i++) regs->vpd.vcr[i] = vpd->vcr[i]; regs->vpd.vhpi = vpd->vhpi; regs->vpd.vnat = vpd->vnat; regs->vpd.vbnat = vpd->vbnat; regs->vpd.vpsr = vpd->vpsr; regs->vpd.vpr = vpd->vpr; memcpy(&regs->saved_guest, &vcpu->arch.guest, sizeof(union context)); SAVE_REGS(mp_state); SAVE_REGS(vmm_rr); memcpy(regs->itrs, vcpu->arch.itrs, sizeof(struct thash_data) * NITRS); memcpy(regs->dtrs, vcpu->arch.dtrs, sizeof(struct thash_data) * NDTRS); SAVE_REGS(itr_regions); SAVE_REGS(dtr_regions); SAVE_REGS(tc_regions); SAVE_REGS(irq_check); SAVE_REGS(itc_check); SAVE_REGS(timer_check); SAVE_REGS(timer_pending); SAVE_REGS(last_itc); for (i = 0; i < 8; i++) { regs->vrr[i] = vcpu->arch.vrr[i]; regs->ibr[i] = vcpu->arch.ibr[i]; regs->dbr[i] = vcpu->arch.dbr[i]; } for (i = 0; i < 4; i++) regs->insvc[i] = vcpu->arch.insvc[i]; regs->saved_itc = vcpu->arch.itc_offset + kvm_get_itc(vcpu); SAVE_REGS(xtp); SAVE_REGS(metaphysical_rr0); SAVE_REGS(metaphysical_rr4); SAVE_REGS(metaphysical_saved_rr0); SAVE_REGS(metaphysical_saved_rr4); SAVE_REGS(fp_psr); SAVE_REGS(saved_gp); vcpu_put(vcpu); return 0; } int kvm_arch_vcpu_ioctl_get_stack(struct kvm_vcpu *vcpu, struct kvm_ia64_vcpu_stack *stack) { memcpy(stack, vcpu, sizeof(struct kvm_ia64_vcpu_stack)); return 0; } int kvm_arch_vcpu_ioctl_set_stack(struct kvm_vcpu *vcpu, struct kvm_ia64_vcpu_stack *stack) { memcpy(vcpu + 1, &stack->stack[0] + sizeof(struct kvm_vcpu), sizeof(struct kvm_ia64_vcpu_stack) - sizeof(struct kvm_vcpu)); vcpu->arch.exit_data = ((struct kvm_vcpu *)stack)->arch.exit_data; return 0; } void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) { hrtimer_cancel(&vcpu->arch.hlt_timer); kfree(vcpu->arch.apic); } long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { struct kvm_vcpu *vcpu = filp->private_data; void __user *argp = (void __user *)arg; struct kvm_ia64_vcpu_stack *stack = NULL; long r; switch (ioctl) { case KVM_IA64_VCPU_GET_STACK: { struct kvm_ia64_vcpu_stack __user *user_stack; void __user *first_p = argp; r = -EFAULT; if (copy_from_user(&user_stack, first_p, sizeof(void *))) goto out; if (!access_ok(VERIFY_WRITE, user_stack, sizeof(struct kvm_ia64_vcpu_stack))) { printk(KERN_INFO "KVM_IA64_VCPU_GET_STACK: " "Illegal user destination address for stack\n"); goto out; } stack = kzalloc(sizeof(struct kvm_ia64_vcpu_stack), GFP_KERNEL); if (!stack) { r = -ENOMEM; goto out; } r = kvm_arch_vcpu_ioctl_get_stack(vcpu, stack); if (r) goto out; if (copy_to_user(user_stack, stack, sizeof(struct kvm_ia64_vcpu_stack))) { r = -EFAULT; goto out; } break; } case KVM_IA64_VCPU_SET_STACK: { struct kvm_ia64_vcpu_stack __user *user_stack; void __user *first_p = argp; r = -EFAULT; if (copy_from_user(&user_stack, first_p, sizeof(void *))) goto out; if (!access_ok(VERIFY_READ, user_stack, sizeof(struct kvm_ia64_vcpu_stack))) { printk(KERN_INFO "KVM_IA64_VCPU_SET_STACK: " "Illegal user address for stack\n"); goto out; } stack = kmalloc(sizeof(struct kvm_ia64_vcpu_stack), GFP_KERNEL); if (!stack) { r = -ENOMEM; goto out; } if (copy_from_user(stack, user_stack, sizeof(struct kvm_ia64_vcpu_stack))) goto out; r = kvm_arch_vcpu_ioctl_set_stack(vcpu, stack); break; } default: r = -EINVAL; } out: kfree(stack); return r; } int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) { return VM_FAULT_SIGBUS; } void kvm_arch_free_memslot(struct kvm_memory_slot *free, struct kvm_memory_slot *dont) { } int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) { return 0; } int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *memslot, struct kvm_memory_slot old, struct kvm_userspace_memory_region *mem, int user_alloc) { unsigned long i; unsigned long pfn; int npages = memslot->npages; unsigned long base_gfn = memslot->base_gfn; if (base_gfn + npages > (KVM_MAX_MEM_SIZE >> PAGE_SHIFT)) return -ENOMEM; for (i = 0; i < npages; i++) { pfn = gfn_to_pfn(kvm, base_gfn + i); if (!kvm_is_mmio_pfn(pfn)) { kvm_set_pmt_entry(kvm, base_gfn + i, pfn << PAGE_SHIFT, _PAGE_AR_RWX | _PAGE_MA_WB); memslot->rmap[i] = (unsigned long)pfn_to_page(pfn); } else { kvm_set_pmt_entry(kvm, base_gfn + i, GPFN_PHYS_MMIO | (pfn << PAGE_SHIFT), _PAGE_MA_UC); memslot->rmap[i] = 0; } } return 0; } void kvm_arch_commit_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem, struct kvm_memory_slot old, int user_alloc) { return; } void kvm_arch_flush_shadow(struct kvm *kvm) { kvm_flush_remote_tlbs(kvm); } long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { return -EINVAL; } void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) { kvm_vcpu_uninit(vcpu); } static int vti_cpu_has_kvm_support(void) { long avail = 1, status = 1, control = 1; long ret; ret = ia64_pal_proc_get_features(&avail, &status, &control, 0); if (ret) goto out; if (!(avail & PAL_PROC_VM_BIT)) goto out; printk(KERN_DEBUG"kvm: Hardware Supports VT\n"); ret = ia64_pal_vp_env_info(&kvm_vm_buffer_size, &vp_env_info); if (ret) goto out; printk(KERN_DEBUG"kvm: VM Buffer Size:0x%lx\n", kvm_vm_buffer_size); if (!(vp_env_info & VP_OPCODE)) { printk(KERN_WARNING"kvm: No opcode ability on hardware, " "vm_env_info:0x%lx\n", vp_env_info); } return 1; out: return 0; } /* * On SN2, the ITC isn't stable, so copy in fast path code to use the * SN2 RTC, replacing the ITC based default verion. */ static void kvm_patch_vmm(struct kvm_vmm_info *vmm_info, struct module *module) { unsigned long new_ar, new_ar_sn2; unsigned long module_base; if (!ia64_platform_is("sn2")) return; module_base = (unsigned long)module->module_core; new_ar = kvm_vmm_base + vmm_info->patch_mov_ar - module_base; new_ar_sn2 = kvm_vmm_base + vmm_info->patch_mov_ar_sn2 - module_base; printk(KERN_INFO "kvm: Patching ITC emulation to use SGI SN2 RTC " "as source\n"); /* * Copy the SN2 version of mov_ar into place. They are both * the same size, so 6 bundles is sufficient (6 * 0x10). */ memcpy((void *)new_ar, (void *)new_ar_sn2, 0x60); } static int kvm_relocate_vmm(struct kvm_vmm_info *vmm_info, struct module *module) { unsigned long module_base; unsigned long vmm_size; unsigned long vmm_offset, func_offset, fdesc_offset; struct fdesc *p_fdesc; BUG_ON(!module); if (!kvm_vmm_base) { printk("kvm: kvm area hasn't been initialized yet!!\n"); return -EFAULT; } /*Calculate new position of relocated vmm module.*/ module_base = (unsigned long)module->module_core; vmm_size = module->core_size; if (unlikely(vmm_size > KVM_VMM_SIZE)) return -EFAULT; memcpy((void *)kvm_vmm_base, (void *)module_base, vmm_size); kvm_patch_vmm(vmm_info, module); kvm_flush_icache(kvm_vmm_base, vmm_size); /*Recalculate kvm_vmm_info based on new VMM*/ vmm_offset = vmm_info->vmm_ivt - module_base; kvm_vmm_info->vmm_ivt = KVM_VMM_BASE + vmm_offset; printk(KERN_DEBUG"kvm: Relocated VMM's IVT Base Addr:%lx\n", kvm_vmm_info->vmm_ivt); fdesc_offset = (unsigned long)vmm_info->vmm_entry - module_base; kvm_vmm_info->vmm_entry = (kvm_vmm_entry *)(KVM_VMM_BASE + fdesc_offset); func_offset = *(unsigned long *)vmm_info->vmm_entry - module_base; p_fdesc = (struct fdesc *)(kvm_vmm_base + fdesc_offset); p_fdesc->ip = KVM_VMM_BASE + func_offset; p_fdesc->gp = KVM_VMM_BASE+(p_fdesc->gp - module_base); printk(KERN_DEBUG"kvm: Relocated VMM's Init Entry Addr:%lx\n", KVM_VMM_BASE+func_offset); fdesc_offset = (unsigned long)vmm_info->tramp_entry - module_base; kvm_vmm_info->tramp_entry = (kvm_tramp_entry *)(KVM_VMM_BASE + fdesc_offset); func_offset = *(unsigned long *)vmm_info->tramp_entry - module_base; p_fdesc = (struct fdesc *)(kvm_vmm_base + fdesc_offset); p_fdesc->ip = KVM_VMM_BASE + func_offset; p_fdesc->gp = KVM_VMM_BASE + (p_fdesc->gp - module_base); kvm_vmm_gp = p_fdesc->gp; printk(KERN_DEBUG"kvm: Relocated VMM's Entry IP:%p\n", kvm_vmm_info->vmm_entry); printk(KERN_DEBUG"kvm: Relocated VMM's Trampoline Entry IP:0x%lx\n", KVM_VMM_BASE + func_offset); return 0; } int kvm_arch_init(void *opaque) { int r; struct kvm_vmm_info *vmm_info = (struct kvm_vmm_info *)opaque; if (!vti_cpu_has_kvm_support()) { printk(KERN_ERR "kvm: No Hardware Virtualization Support!\n"); r = -EOPNOTSUPP; goto out; } if (kvm_vmm_info) { printk(KERN_ERR "kvm: Already loaded VMM module!\n"); r = -EEXIST; goto out; } r = -ENOMEM; kvm_vmm_info = kzalloc(sizeof(struct kvm_vmm_info), GFP_KERNEL); if (!kvm_vmm_info) goto out; if (kvm_alloc_vmm_area()) goto out_free0; r = kvm_relocate_vmm(vmm_info, vmm_info->module); if (r) goto out_free1; return 0; out_free1: kvm_free_vmm_area(); out_free0: kfree(kvm_vmm_info); out: return r; } void kvm_arch_exit(void) { kvm_free_vmm_area(); kfree(kvm_vmm_info); kvm_vmm_info = NULL; } static void kvm_ia64_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) { int i; long base; unsigned long n; unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base + offsetof(struct kvm_vm_data, kvm_mem_dirty_log)); n = kvm_dirty_bitmap_bytes(memslot); base = memslot->base_gfn / BITS_PER_LONG; spin_lock(&kvm->arch.dirty_log_lock); for (i = 0; i < n/sizeof(long); ++i) { memslot->dirty_bitmap[i] = dirty_bitmap[base + i]; dirty_bitmap[base + i] = 0; } spin_unlock(&kvm->arch.dirty_log_lock); } int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) { int r; unsigned long n; struct kvm_memory_slot *memslot; int is_dirty = 0; mutex_lock(&kvm->slots_lock); r = -EINVAL; if (log->slot >= KVM_MEMORY_SLOTS) goto out; memslot = id_to_memslot(kvm->memslots, log->slot); r = -ENOENT; if (!memslot->dirty_bitmap) goto out; kvm_ia64_sync_dirty_log(kvm, memslot); r = kvm_get_dirty_log(kvm, log, &is_dirty); if (r) goto out; /* If nothing is dirty, don't bother messing with page tables. */ if (is_dirty) { kvm_flush_remote_tlbs(kvm); n = kvm_dirty_bitmap_bytes(memslot); memset(memslot->dirty_bitmap, 0, n); } r = 0; out: mutex_unlock(&kvm->slots_lock); return r; } int kvm_arch_hardware_setup(void) { return 0; } void kvm_arch_hardware_unsetup(void) { } void kvm_vcpu_kick(struct kvm_vcpu *vcpu) { int me; int cpu = vcpu->cpu; if (waitqueue_active(&vcpu->wq)) wake_up_interruptible(&vcpu->wq); me = get_cpu(); if (cpu != me && (unsigned) cpu < nr_cpu_ids && cpu_online(cpu)) if (!test_and_set_bit(KVM_REQ_KICK, &vcpu->requests)) smp_send_reschedule(cpu); put_cpu(); } int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq) { return __apic_accept_irq(vcpu, irq->vector); } int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest) { return apic->vcpu->vcpu_id == dest; } int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda) { return 0; } int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2) { return vcpu1->arch.xtp - vcpu2->arch.xtp; } int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source, int short_hand, int dest, int dest_mode) { struct kvm_lapic *target = vcpu->arch.apic; return (dest_mode == 0) ? kvm_apic_match_physical_addr(target, dest) : kvm_apic_match_logical_addr(target, dest); } static int find_highest_bits(int *dat) { u32 bits, bitnum; int i; /* loop for all 256 bits */ for (i = 7; i >= 0 ; i--) { bits = dat[i]; if (bits) { bitnum = fls(bits); return i * 32 + bitnum - 1; } } return -1; } int kvm_highest_pending_irq(struct kvm_vcpu *vcpu) { struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); if (vpd->irr[0] & (1UL << NMI_VECTOR)) return NMI_VECTOR; if (vpd->irr[0] & (1UL << ExtINT_VECTOR)) return ExtINT_VECTOR; return find_highest_bits((int *)&vpd->irr[0]); } int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) { return vcpu->arch.timer_fired; } int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) { return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE) || (kvm_highest_pending_irq(vcpu) != -1); } int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state) { mp_state->mp_state = vcpu->arch.mp_state; return 0; } static int vcpu_reset(struct kvm_vcpu *vcpu) { int r; long psr; local_irq_save(psr); r = kvm_insert_vmm_mapping(vcpu); local_irq_restore(psr); if (r) goto fail; vcpu->arch.launched = 0; kvm_arch_vcpu_uninit(vcpu); r = kvm_arch_vcpu_init(vcpu); if (r) goto fail; kvm_purge_vmm_mapping(vcpu); r = 0; fail: return r; } int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state) { int r = 0; vcpu->arch.mp_state = mp_state->mp_state; if (vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED) r = vcpu_reset(vcpu); return r; }
gpl-2.0
v-superuser/android_kernel_htc_msm8974
sound/arm/pxa2xx-ac97.c
4894
6492
/* * linux/sound/pxa2xx-ac97.c -- AC97 support for the Intel PXA2xx chip. * * Author: Nicolas Pitre * Created: Dec 02, 2004 * Copyright: MontaVista Software Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/io.h> #include <linux/module.h> #include <linux/platform_device.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/ac97_codec.h> #include <sound/initval.h> #include <sound/pxa2xx-lib.h> #include <mach/regs-ac97.h> #include <mach/audio.h> #include "pxa2xx-pcm.h" static void pxa2xx_ac97_reset(struct snd_ac97 *ac97) { if (!pxa2xx_ac97_try_cold_reset(ac97)) { pxa2xx_ac97_try_warm_reset(ac97); } pxa2xx_ac97_finish_reset(ac97); } static struct snd_ac97_bus_ops pxa2xx_ac97_ops = { .read = pxa2xx_ac97_read, .write = pxa2xx_ac97_write, .reset = pxa2xx_ac97_reset, }; static struct pxa2xx_pcm_dma_params pxa2xx_ac97_pcm_out = { .name = "AC97 PCM out", .dev_addr = __PREG(PCDR), .drcmr = &DRCMR(12), .dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG | DCMD_BURST32 | DCMD_WIDTH4, }; static struct pxa2xx_pcm_dma_params pxa2xx_ac97_pcm_in = { .name = "AC97 PCM in", .dev_addr = __PREG(PCDR), .drcmr = &DRCMR(11), .dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC | DCMD_BURST32 | DCMD_WIDTH4, }; static struct snd_pcm *pxa2xx_ac97_pcm; static struct snd_ac97 *pxa2xx_ac97_ac97; static int pxa2xx_ac97_pcm_startup(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; pxa2xx_audio_ops_t *platform_ops; int r; runtime->hw.channels_min = 2; runtime->hw.channels_max = 2; r = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ? AC97_RATES_FRONT_DAC : AC97_RATES_ADC; runtime->hw.rates = pxa2xx_ac97_ac97->rates[r]; snd_pcm_limit_hw_rates(runtime); platform_ops = substream->pcm->card->dev->platform_data; if (platform_ops && platform_ops->startup) return platform_ops->startup(substream, platform_ops->priv); else return 0; } static void pxa2xx_ac97_pcm_shutdown(struct snd_pcm_substream *substream) { pxa2xx_audio_ops_t *platform_ops; platform_ops = substream->pcm->card->dev->platform_data; if (platform_ops && platform_ops->shutdown) platform_ops->shutdown(substream, platform_ops->priv); } static int pxa2xx_ac97_pcm_prepare(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; int reg = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ? AC97_PCM_FRONT_DAC_RATE : AC97_PCM_LR_ADC_RATE; return snd_ac97_set_rate(pxa2xx_ac97_ac97, reg, runtime->rate); } static struct pxa2xx_pcm_client pxa2xx_ac97_pcm_client = { .playback_params = &pxa2xx_ac97_pcm_out, .capture_params = &pxa2xx_ac97_pcm_in, .startup = pxa2xx_ac97_pcm_startup, .shutdown = pxa2xx_ac97_pcm_shutdown, .prepare = pxa2xx_ac97_pcm_prepare, }; #ifdef CONFIG_PM static int pxa2xx_ac97_do_suspend(struct snd_card *card, pm_message_t state) { pxa2xx_audio_ops_t *platform_ops = card->dev->platform_data; snd_power_change_state(card, SNDRV_CTL_POWER_D3cold); snd_pcm_suspend_all(pxa2xx_ac97_pcm); snd_ac97_suspend(pxa2xx_ac97_ac97); if (platform_ops && platform_ops->suspend) platform_ops->suspend(platform_ops->priv); return pxa2xx_ac97_hw_suspend(); } static int pxa2xx_ac97_do_resume(struct snd_card *card) { pxa2xx_audio_ops_t *platform_ops = card->dev->platform_data; int rc; rc = pxa2xx_ac97_hw_resume(); if (rc) return rc; if (platform_ops && platform_ops->resume) platform_ops->resume(platform_ops->priv); snd_ac97_resume(pxa2xx_ac97_ac97); snd_power_change_state(card, SNDRV_CTL_POWER_D0); return 0; } static int pxa2xx_ac97_suspend(struct device *dev) { struct snd_card *card = dev_get_drvdata(dev); int ret = 0; if (card) ret = pxa2xx_ac97_do_suspend(card, PMSG_SUSPEND); return ret; } static int pxa2xx_ac97_resume(struct device *dev) { struct snd_card *card = dev_get_drvdata(dev); int ret = 0; if (card) ret = pxa2xx_ac97_do_resume(card); return ret; } static const struct dev_pm_ops pxa2xx_ac97_pm_ops = { .suspend = pxa2xx_ac97_suspend, .resume = pxa2xx_ac97_resume, }; #endif static int __devinit pxa2xx_ac97_probe(struct platform_device *dev) { struct snd_card *card; struct snd_ac97_bus *ac97_bus; struct snd_ac97_template ac97_template; int ret; pxa2xx_audio_ops_t *pdata = dev->dev.platform_data; if (dev->id >= 0) { dev_err(&dev->dev, "PXA2xx has only one AC97 port.\n"); ret = -ENXIO; goto err_dev; } ret = snd_card_create(SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1, THIS_MODULE, 0, &card); if (ret < 0) goto err; card->dev = &dev->dev; strncpy(card->driver, dev->dev.driver->name, sizeof(card->driver)); ret = pxa2xx_pcm_new(card, &pxa2xx_ac97_pcm_client, &pxa2xx_ac97_pcm); if (ret) goto err; ret = pxa2xx_ac97_hw_probe(dev); if (ret) goto err; ret = snd_ac97_bus(card, 0, &pxa2xx_ac97_ops, NULL, &ac97_bus); if (ret) goto err_remove; memset(&ac97_template, 0, sizeof(ac97_template)); ret = snd_ac97_mixer(ac97_bus, &ac97_template, &pxa2xx_ac97_ac97); if (ret) goto err_remove; snprintf(card->shortname, sizeof(card->shortname), "%s", snd_ac97_get_short_name(pxa2xx_ac97_ac97)); snprintf(card->longname, sizeof(card->longname), "%s (%s)", dev->dev.driver->name, card->mixername); if (pdata && pdata->codec_pdata[0]) snd_ac97_dev_add_pdata(ac97_bus->codec[0], pdata->codec_pdata[0]); snd_card_set_dev(card, &dev->dev); ret = snd_card_register(card); if (ret == 0) { platform_set_drvdata(dev, card); return 0; } err_remove: pxa2xx_ac97_hw_remove(dev); err: if (card) snd_card_free(card); err_dev: return ret; } static int __devexit pxa2xx_ac97_remove(struct platform_device *dev) { struct snd_card *card = platform_get_drvdata(dev); if (card) { snd_card_free(card); platform_set_drvdata(dev, NULL); pxa2xx_ac97_hw_remove(dev); } return 0; } static struct platform_driver pxa2xx_ac97_driver = { .probe = pxa2xx_ac97_probe, .remove = __devexit_p(pxa2xx_ac97_remove), .driver = { .name = "pxa2xx-ac97", .owner = THIS_MODULE, #ifdef CONFIG_PM .pm = &pxa2xx_ac97_pm_ops, #endif }, }; module_platform_driver(pxa2xx_ac97_driver); MODULE_AUTHOR("Nicolas Pitre"); MODULE_DESCRIPTION("AC97 driver for the Intel PXA2xx chip"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:pxa2xx-ac97");
gpl-2.0
CyanogenMod/android_kernel_samsung_jf
sound/pci/cmipci.c
4894
104405
/* * Driver for C-Media CMI8338 and 8738 PCI soundcards. * Copyright (c) 2000 by Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* Does not work. Warning may block system in capture mode */ /* #define USE_VAR48KRATE */ #include <asm/io.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/gameport.h> #include <linux/module.h> #include <linux/mutex.h> #include <sound/core.h> #include <sound/info.h> #include <sound/control.h> #include <sound/pcm.h> #include <sound/rawmidi.h> #include <sound/mpu401.h> #include <sound/opl3.h> #include <sound/sb.h> #include <sound/asoundef.h> #include <sound/initval.h> MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>"); MODULE_DESCRIPTION("C-Media CMI8x38 PCI"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{C-Media,CMI8738}," "{C-Media,CMI8738B}," "{C-Media,CMI8338A}," "{C-Media,CMI8338B}}"); #if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE)) #define SUPPORT_JOYSTICK 1 #endif static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* Enable switches */ static long mpu_port[SNDRV_CARDS]; static long fm_port[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS-1)]=1}; static bool soft_ac3[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS-1)]=1}; #ifdef SUPPORT_JOYSTICK static int joystick_port[SNDRV_CARDS]; #endif module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for C-Media PCI soundcard."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for C-Media PCI soundcard."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable C-Media PCI soundcard."); module_param_array(mpu_port, long, NULL, 0444); MODULE_PARM_DESC(mpu_port, "MPU-401 port."); module_param_array(fm_port, long, NULL, 0444); MODULE_PARM_DESC(fm_port, "FM port."); module_param_array(soft_ac3, bool, NULL, 0444); MODULE_PARM_DESC(soft_ac3, "Software-conversion of raw SPDIF packets (model 033 only)."); #ifdef SUPPORT_JOYSTICK module_param_array(joystick_port, int, NULL, 0444); MODULE_PARM_DESC(joystick_port, "Joystick port address."); #endif /* * CM8x38 registers definition */ #define CM_REG_FUNCTRL0 0x00 #define CM_RST_CH1 0x00080000 #define CM_RST_CH0 0x00040000 #define CM_CHEN1 0x00020000 /* ch1: enable */ #define CM_CHEN0 0x00010000 /* ch0: enable */ #define CM_PAUSE1 0x00000008 /* ch1: pause */ #define CM_PAUSE0 0x00000004 /* ch0: pause */ #define CM_CHADC1 0x00000002 /* ch1, 0:playback, 1:record */ #define CM_CHADC0 0x00000001 /* ch0, 0:playback, 1:record */ #define CM_REG_FUNCTRL1 0x04 #define CM_DSFC_MASK 0x0000E000 /* channel 1 (DAC?) sampling frequency */ #define CM_DSFC_SHIFT 13 #define CM_ASFC_MASK 0x00001C00 /* channel 0 (ADC?) sampling frequency */ #define CM_ASFC_SHIFT 10 #define CM_SPDF_1 0x00000200 /* SPDIF IN/OUT at channel B */ #define CM_SPDF_0 0x00000100 /* SPDIF OUT only channel A */ #define CM_SPDFLOOP 0x00000080 /* ext. SPDIIF/IN -> OUT loopback */ #define CM_SPDO2DAC 0x00000040 /* SPDIF/OUT can be heard from internal DAC */ #define CM_INTRM 0x00000020 /* master control block (MCB) interrupt enabled */ #define CM_BREQ 0x00000010 /* bus master enabled */ #define CM_VOICE_EN 0x00000008 /* legacy voice (SB16,FM) */ #define CM_UART_EN 0x00000004 /* legacy UART */ #define CM_JYSTK_EN 0x00000002 /* legacy joystick */ #define CM_ZVPORT 0x00000001 /* ZVPORT */ #define CM_REG_CHFORMAT 0x08 #define CM_CHB3D5C 0x80000000 /* 5,6 channels */ #define CM_FMOFFSET2 0x40000000 /* initial FM PCM offset 2 when Fmute=1 */ #define CM_CHB3D 0x20000000 /* 4 channels */ #define CM_CHIP_MASK1 0x1f000000 #define CM_CHIP_037 0x01000000 #define CM_SETLAT48 0x00800000 /* set latency timer 48h */ #define CM_EDGEIRQ 0x00400000 /* emulated edge trigger legacy IRQ */ #define CM_SPD24SEL39 0x00200000 /* 24-bit spdif: model 039 */ #define CM_AC3EN1 0x00100000 /* enable AC3: model 037 */ #define CM_SPDIF_SELECT1 0x00080000 /* for model <= 037 ? */ #define CM_SPD24SEL 0x00020000 /* 24bit spdif: model 037 */ /* #define CM_SPDIF_INVERSE 0x00010000 */ /* ??? */ #define CM_ADCBITLEN_MASK 0x0000C000 #define CM_ADCBITLEN_16 0x00000000 #define CM_ADCBITLEN_15 0x00004000 #define CM_ADCBITLEN_14 0x00008000 #define CM_ADCBITLEN_13 0x0000C000 #define CM_ADCDACLEN_MASK 0x00003000 /* model 037 */ #define CM_ADCDACLEN_060 0x00000000 #define CM_ADCDACLEN_066 0x00001000 #define CM_ADCDACLEN_130 0x00002000 #define CM_ADCDACLEN_280 0x00003000 #define CM_ADCDLEN_MASK 0x00003000 /* model 039 */ #define CM_ADCDLEN_ORIGINAL 0x00000000 #define CM_ADCDLEN_EXTRA 0x00001000 #define CM_ADCDLEN_24K 0x00002000 #define CM_ADCDLEN_WEIGHT 0x00003000 #define CM_CH1_SRATE_176K 0x00000800 #define CM_CH1_SRATE_96K 0x00000800 /* model 055? */ #define CM_CH1_SRATE_88K 0x00000400 #define CM_CH0_SRATE_176K 0x00000200 #define CM_CH0_SRATE_96K 0x00000200 /* model 055? */ #define CM_CH0_SRATE_88K 0x00000100 #define CM_CH0_SRATE_128K 0x00000300 #define CM_CH0_SRATE_MASK 0x00000300 #define CM_SPDIF_INVERSE2 0x00000080 /* model 055? */ #define CM_DBLSPDS 0x00000040 /* double SPDIF sample rate 88.2/96 */ #define CM_POLVALID 0x00000020 /* inverse SPDIF/IN valid bit */ #define CM_SPDLOCKED 0x00000010 #define CM_CH1FMT_MASK 0x0000000C /* bit 3: 16 bits, bit 2: stereo */ #define CM_CH1FMT_SHIFT 2 #define CM_CH0FMT_MASK 0x00000003 /* bit 1: 16 bits, bit 0: stereo */ #define CM_CH0FMT_SHIFT 0 #define CM_REG_INT_HLDCLR 0x0C #define CM_CHIP_MASK2 0xff000000 #define CM_CHIP_8768 0x20000000 #define CM_CHIP_055 0x08000000 #define CM_CHIP_039 0x04000000 #define CM_CHIP_039_6CH 0x01000000 #define CM_UNKNOWN_INT_EN 0x00080000 /* ? */ #define CM_TDMA_INT_EN 0x00040000 #define CM_CH1_INT_EN 0x00020000 #define CM_CH0_INT_EN 0x00010000 #define CM_REG_INT_STATUS 0x10 #define CM_INTR 0x80000000 #define CM_VCO 0x08000000 /* Voice Control? CMI8738 */ #define CM_MCBINT 0x04000000 /* Master Control Block abort cond.? */ #define CM_UARTINT 0x00010000 #define CM_LTDMAINT 0x00008000 #define CM_HTDMAINT 0x00004000 #define CM_XDO46 0x00000080 /* Modell 033? Direct programming EEPROM (read data register) */ #define CM_LHBTOG 0x00000040 /* High/Low status from DMA ctrl register */ #define CM_LEG_HDMA 0x00000020 /* Legacy is in High DMA channel */ #define CM_LEG_STEREO 0x00000010 /* Legacy is in Stereo mode */ #define CM_CH1BUSY 0x00000008 #define CM_CH0BUSY 0x00000004 #define CM_CHINT1 0x00000002 #define CM_CHINT0 0x00000001 #define CM_REG_LEGACY_CTRL 0x14 #define CM_NXCHG 0x80000000 /* don't map base reg dword->sample */ #define CM_VMPU_MASK 0x60000000 /* MPU401 i/o port address */ #define CM_VMPU_330 0x00000000 #define CM_VMPU_320 0x20000000 #define CM_VMPU_310 0x40000000 #define CM_VMPU_300 0x60000000 #define CM_ENWR8237 0x10000000 /* enable bus master to write 8237 base reg */ #define CM_VSBSEL_MASK 0x0C000000 /* SB16 base address */ #define CM_VSBSEL_220 0x00000000 #define CM_VSBSEL_240 0x04000000 #define CM_VSBSEL_260 0x08000000 #define CM_VSBSEL_280 0x0C000000 #define CM_FMSEL_MASK 0x03000000 /* FM OPL3 base address */ #define CM_FMSEL_388 0x00000000 #define CM_FMSEL_3C8 0x01000000 #define CM_FMSEL_3E0 0x02000000 #define CM_FMSEL_3E8 0x03000000 #define CM_ENSPDOUT 0x00800000 /* enable XSPDIF/OUT to I/O interface */ #define CM_SPDCOPYRHT 0x00400000 /* spdif in/out copyright bit */ #define CM_DAC2SPDO 0x00200000 /* enable wave+fm_midi -> SPDIF/OUT */ #define CM_INVIDWEN 0x00100000 /* internal vendor ID write enable, model 039? */ #define CM_SETRETRY 0x00100000 /* 0: legacy i/o wait (default), 1: legacy i/o bus retry */ #define CM_C_EEACCESS 0x00080000 /* direct programming eeprom regs */ #define CM_C_EECS 0x00040000 #define CM_C_EEDI46 0x00020000 #define CM_C_EECK46 0x00010000 #define CM_CHB3D6C 0x00008000 /* 5.1 channels support */ #define CM_CENTR2LIN 0x00004000 /* line-in as center out */ #define CM_BASE2LIN 0x00002000 /* line-in as bass out */ #define CM_EXBASEN 0x00001000 /* external bass input enable */ #define CM_REG_MISC_CTRL 0x18 #define CM_PWD 0x80000000 /* power down */ #define CM_RESET 0x40000000 #define CM_SFIL_MASK 0x30000000 /* filter control at front end DAC, model 037? */ #define CM_VMGAIN 0x10000000 /* analog master amp +6dB, model 039? */ #define CM_TXVX 0x08000000 /* model 037? */ #define CM_N4SPK3D 0x04000000 /* copy front to rear */ #define CM_SPDO5V 0x02000000 /* 5V spdif output (1 = 0.5v (coax)) */ #define CM_SPDIF48K 0x01000000 /* write */ #define CM_SPATUS48K 0x01000000 /* read */ #define CM_ENDBDAC 0x00800000 /* enable double dac */ #define CM_XCHGDAC 0x00400000 /* 0: front=ch0, 1: front=ch1 */ #define CM_SPD32SEL 0x00200000 /* 0: 16bit SPDIF, 1: 32bit */ #define CM_SPDFLOOPI 0x00100000 /* int. SPDIF-OUT -> int. IN */ #define CM_FM_EN 0x00080000 /* enable legacy FM */ #define CM_AC3EN2 0x00040000 /* enable AC3: model 039 */ #define CM_ENWRASID 0x00010000 /* choose writable internal SUBID (audio) */ #define CM_VIDWPDSB 0x00010000 /* model 037? */ #define CM_SPDF_AC97 0x00008000 /* 0: SPDIF/OUT 44.1K, 1: 48K */ #define CM_MASK_EN 0x00004000 /* activate channel mask on legacy DMA */ #define CM_ENWRMSID 0x00002000 /* choose writable internal SUBID (modem) */ #define CM_VIDWPPRT 0x00002000 /* model 037? */ #define CM_SFILENB 0x00001000 /* filter stepping at front end DAC, model 037? */ #define CM_MMODE_MASK 0x00000E00 /* model DAA interface mode */ #define CM_SPDIF_SELECT2 0x00000100 /* for model > 039 ? */ #define CM_ENCENTER 0x00000080 #define CM_FLINKON 0x00000040 /* force modem link detection on, model 037 */ #define CM_MUTECH1 0x00000040 /* mute PCI ch1 to DAC */ #define CM_FLINKOFF 0x00000020 /* force modem link detection off, model 037 */ #define CM_MIDSMP 0x00000010 /* 1/2 interpolation at front end DAC */ #define CM_UPDDMA_MASK 0x0000000C /* TDMA position update notification */ #define CM_UPDDMA_2048 0x00000000 #define CM_UPDDMA_1024 0x00000004 #define CM_UPDDMA_512 0x00000008 #define CM_UPDDMA_256 0x0000000C #define CM_TWAIT_MASK 0x00000003 /* model 037 */ #define CM_TWAIT1 0x00000002 /* FM i/o cycle, 0: 48, 1: 64 PCICLKs */ #define CM_TWAIT0 0x00000001 /* i/o cycle, 0: 4, 1: 6 PCICLKs */ #define CM_REG_TDMA_POSITION 0x1C #define CM_TDMA_CNT_MASK 0xFFFF0000 /* current byte/word count */ #define CM_TDMA_ADR_MASK 0x0000FFFF /* current address */ /* byte */ #define CM_REG_MIXER0 0x20 #define CM_REG_SBVR 0x20 /* write: sb16 version */ #define CM_REG_DEV 0x20 /* read: hardware device version */ #define CM_REG_MIXER21 0x21 #define CM_UNKNOWN_21_MASK 0x78 /* ? */ #define CM_X_ADPCM 0x04 /* SB16 ADPCM enable */ #define CM_PROINV 0x02 /* SBPro left/right channel switching */ #define CM_X_SB16 0x01 /* SB16 compatible */ #define CM_REG_SB16_DATA 0x22 #define CM_REG_SB16_ADDR 0x23 #define CM_REFFREQ_XIN (315*1000*1000)/22 /* 14.31818 Mhz reference clock frequency pin XIN */ #define CM_ADCMULT_XIN 512 /* Guessed (487 best for 44.1kHz, not for 88/176kHz) */ #define CM_TOLERANCE_RATE 0.001 /* Tolerance sample rate pitch (1000ppm) */ #define CM_MAXIMUM_RATE 80000000 /* Note more than 80MHz */ #define CM_REG_MIXER1 0x24 #define CM_FMMUTE 0x80 /* mute FM */ #define CM_FMMUTE_SHIFT 7 #define CM_WSMUTE 0x40 /* mute PCM */ #define CM_WSMUTE_SHIFT 6 #define CM_REAR2LIN 0x20 /* lin-in -> rear line out */ #define CM_REAR2LIN_SHIFT 5 #define CM_REAR2FRONT 0x10 /* exchange rear/front */ #define CM_REAR2FRONT_SHIFT 4 #define CM_WAVEINL 0x08 /* digital wave rec. left chan */ #define CM_WAVEINL_SHIFT 3 #define CM_WAVEINR 0x04 /* digical wave rec. right */ #define CM_WAVEINR_SHIFT 2 #define CM_X3DEN 0x02 /* 3D surround enable */ #define CM_X3DEN_SHIFT 1 #define CM_CDPLAY 0x01 /* enable SPDIF/IN PCM -> DAC */ #define CM_CDPLAY_SHIFT 0 #define CM_REG_MIXER2 0x25 #define CM_RAUXREN 0x80 /* AUX right capture */ #define CM_RAUXREN_SHIFT 7 #define CM_RAUXLEN 0x40 /* AUX left capture */ #define CM_RAUXLEN_SHIFT 6 #define CM_VAUXRM 0x20 /* AUX right mute */ #define CM_VAUXRM_SHIFT 5 #define CM_VAUXLM 0x10 /* AUX left mute */ #define CM_VAUXLM_SHIFT 4 #define CM_VADMIC_MASK 0x0e /* mic gain level (0-3) << 1 */ #define CM_VADMIC_SHIFT 1 #define CM_MICGAINZ 0x01 /* mic boost */ #define CM_MICGAINZ_SHIFT 0 #define CM_REG_MIXER3 0x24 #define CM_REG_AUX_VOL 0x26 #define CM_VAUXL_MASK 0xf0 #define CM_VAUXR_MASK 0x0f #define CM_REG_MISC 0x27 #define CM_UNKNOWN_27_MASK 0xd8 /* ? */ #define CM_XGPO1 0x20 // #define CM_XGPBIO 0x04 #define CM_MIC_CENTER_LFE 0x04 /* mic as center/lfe out? (model 039 or later?) */ #define CM_SPDIF_INVERSE 0x04 /* spdif input phase inverse (model 037) */ #define CM_SPDVALID 0x02 /* spdif input valid check */ #define CM_DMAUTO 0x01 /* SB16 DMA auto detect */ #define CM_REG_AC97 0x28 /* hmmm.. do we have ac97 link? */ /* * For CMI-8338 (0x28 - 0x2b) .. is this valid for CMI-8738 * or identical with AC97 codec? */ #define CM_REG_EXTERN_CODEC CM_REG_AC97 /* * MPU401 pci port index address 0x40 - 0x4f (CMI-8738 spec ver. 0.6) */ #define CM_REG_MPU_PCI 0x40 /* * FM pci port index address 0x50 - 0x5f (CMI-8738 spec ver. 0.6) */ #define CM_REG_FM_PCI 0x50 /* * access from SB-mixer port */ #define CM_REG_EXTENT_IND 0xf0 #define CM_VPHONE_MASK 0xe0 /* Phone volume control (0-3) << 5 */ #define CM_VPHONE_SHIFT 5 #define CM_VPHOM 0x10 /* Phone mute control */ #define CM_VSPKM 0x08 /* Speaker mute control, default high */ #define CM_RLOOPREN 0x04 /* Rec. R-channel enable */ #define CM_RLOOPLEN 0x02 /* Rec. L-channel enable */ #define CM_VADMIC3 0x01 /* Mic record boost */ /* * CMI-8338 spec ver 0.5 (this is not valid for CMI-8738): * the 8 registers 0xf8 - 0xff are used for programming m/n counter by the PLL * unit (readonly?). */ #define CM_REG_PLL 0xf8 /* * extended registers */ #define CM_REG_CH0_FRAME1 0x80 /* write: base address */ #define CM_REG_CH0_FRAME2 0x84 /* read: current address */ #define CM_REG_CH1_FRAME1 0x88 /* 0-15: count of samples at bus master; buffer size */ #define CM_REG_CH1_FRAME2 0x8C /* 16-31: count of samples at codec; fragment size */ #define CM_REG_EXT_MISC 0x90 #define CM_ADC48K44K 0x10000000 /* ADC parameters group, 0: 44k, 1: 48k */ #define CM_CHB3D8C 0x00200000 /* 7.1 channels support */ #define CM_SPD32FMT 0x00100000 /* SPDIF/IN 32k sample rate */ #define CM_ADC2SPDIF 0x00080000 /* ADC output to SPDIF/OUT */ #define CM_SHAREADC 0x00040000 /* DAC in ADC as Center/LFE */ #define CM_REALTCMP 0x00020000 /* monitor the CMPL/CMPR of ADC */ #define CM_INVLRCK 0x00010000 /* invert ZVPORT's LRCK */ #define CM_UNKNOWN_90_MASK 0x0000FFFF /* ? */ /* * size of i/o region */ #define CM_EXTENT_CODEC 0x100 #define CM_EXTENT_MIDI 0x2 #define CM_EXTENT_SYNTH 0x4 /* * channels for playback / capture */ #define CM_CH_PLAY 0 #define CM_CH_CAPT 1 /* * flags to check device open/close */ #define CM_OPEN_NONE 0 #define CM_OPEN_CH_MASK 0x01 #define CM_OPEN_DAC 0x10 #define CM_OPEN_ADC 0x20 #define CM_OPEN_SPDIF 0x40 #define CM_OPEN_MCHAN 0x80 #define CM_OPEN_PLAYBACK (CM_CH_PLAY | CM_OPEN_DAC) #define CM_OPEN_PLAYBACK2 (CM_CH_CAPT | CM_OPEN_DAC) #define CM_OPEN_PLAYBACK_MULTI (CM_CH_PLAY | CM_OPEN_DAC | CM_OPEN_MCHAN) #define CM_OPEN_CAPTURE (CM_CH_CAPT | CM_OPEN_ADC) #define CM_OPEN_SPDIF_PLAYBACK (CM_CH_PLAY | CM_OPEN_DAC | CM_OPEN_SPDIF) #define CM_OPEN_SPDIF_CAPTURE (CM_CH_CAPT | CM_OPEN_ADC | CM_OPEN_SPDIF) #if CM_CH_PLAY == 1 #define CM_PLAYBACK_SRATE_176K CM_CH1_SRATE_176K #define CM_PLAYBACK_SPDF CM_SPDF_1 #define CM_CAPTURE_SPDF CM_SPDF_0 #else #define CM_PLAYBACK_SRATE_176K CM_CH0_SRATE_176K #define CM_PLAYBACK_SPDF CM_SPDF_0 #define CM_CAPTURE_SPDF CM_SPDF_1 #endif /* * driver data */ struct cmipci_pcm { struct snd_pcm_substream *substream; u8 running; /* dac/adc running? */ u8 fmt; /* format bits */ u8 is_dac; u8 needs_silencing; unsigned int dma_size; /* in frames */ unsigned int shift; unsigned int ch; /* channel (0/1) */ unsigned int offset; /* physical address of the buffer */ }; /* mixer elements toggled/resumed during ac3 playback */ struct cmipci_mixer_auto_switches { const char *name; /* switch to toggle */ int toggle_on; /* value to change when ac3 mode */ }; static const struct cmipci_mixer_auto_switches cm_saved_mixer[] = { {"PCM Playback Switch", 0}, {"IEC958 Output Switch", 1}, {"IEC958 Mix Analog", 0}, // {"IEC958 Out To DAC", 1}, // no longer used {"IEC958 Loop", 0}, }; #define CM_SAVED_MIXERS ARRAY_SIZE(cm_saved_mixer) struct cmipci { struct snd_card *card; struct pci_dev *pci; unsigned int device; /* device ID */ int irq; unsigned long iobase; unsigned int ctrl; /* FUNCTRL0 current value */ struct snd_pcm *pcm; /* DAC/ADC PCM */ struct snd_pcm *pcm2; /* 2nd DAC */ struct snd_pcm *pcm_spdif; /* SPDIF */ int chip_version; int max_channels; unsigned int can_ac3_sw: 1; unsigned int can_ac3_hw: 1; unsigned int can_multi_ch: 1; unsigned int can_96k: 1; /* samplerate above 48k */ unsigned int do_soft_ac3: 1; unsigned int spdif_playback_avail: 1; /* spdif ready? */ unsigned int spdif_playback_enabled: 1; /* spdif switch enabled? */ int spdif_counter; /* for software AC3 */ unsigned int dig_status; unsigned int dig_pcm_status; struct snd_pcm_hardware *hw_info[3]; /* for playbacks */ int opened[2]; /* open mode */ struct mutex open_mutex; unsigned int mixer_insensitive: 1; struct snd_kcontrol *mixer_res_ctl[CM_SAVED_MIXERS]; int mixer_res_status[CM_SAVED_MIXERS]; struct cmipci_pcm channel[2]; /* ch0 - DAC, ch1 - ADC or 2nd DAC */ /* external MIDI */ struct snd_rawmidi *rmidi; #ifdef SUPPORT_JOYSTICK struct gameport *gameport; #endif spinlock_t reg_lock; #ifdef CONFIG_PM unsigned int saved_regs[0x20]; unsigned char saved_mixers[0x20]; #endif }; /* read/write operations for dword register */ static inline void snd_cmipci_write(struct cmipci *cm, unsigned int cmd, unsigned int data) { outl(data, cm->iobase + cmd); } static inline unsigned int snd_cmipci_read(struct cmipci *cm, unsigned int cmd) { return inl(cm->iobase + cmd); } /* read/write operations for word register */ static inline void snd_cmipci_write_w(struct cmipci *cm, unsigned int cmd, unsigned short data) { outw(data, cm->iobase + cmd); } static inline unsigned short snd_cmipci_read_w(struct cmipci *cm, unsigned int cmd) { return inw(cm->iobase + cmd); } /* read/write operations for byte register */ static inline void snd_cmipci_write_b(struct cmipci *cm, unsigned int cmd, unsigned char data) { outb(data, cm->iobase + cmd); } static inline unsigned char snd_cmipci_read_b(struct cmipci *cm, unsigned int cmd) { return inb(cm->iobase + cmd); } /* bit operations for dword register */ static int snd_cmipci_set_bit(struct cmipci *cm, unsigned int cmd, unsigned int flag) { unsigned int val, oval; val = oval = inl(cm->iobase + cmd); val |= flag; if (val == oval) return 0; outl(val, cm->iobase + cmd); return 1; } static int snd_cmipci_clear_bit(struct cmipci *cm, unsigned int cmd, unsigned int flag) { unsigned int val, oval; val = oval = inl(cm->iobase + cmd); val &= ~flag; if (val == oval) return 0; outl(val, cm->iobase + cmd); return 1; } /* bit operations for byte register */ static int snd_cmipci_set_bit_b(struct cmipci *cm, unsigned int cmd, unsigned char flag) { unsigned char val, oval; val = oval = inb(cm->iobase + cmd); val |= flag; if (val == oval) return 0; outb(val, cm->iobase + cmd); return 1; } static int snd_cmipci_clear_bit_b(struct cmipci *cm, unsigned int cmd, unsigned char flag) { unsigned char val, oval; val = oval = inb(cm->iobase + cmd); val &= ~flag; if (val == oval) return 0; outb(val, cm->iobase + cmd); return 1; } /* * PCM interface */ /* * calculate frequency */ static unsigned int rates[] = { 5512, 11025, 22050, 44100, 8000, 16000, 32000, 48000 }; static unsigned int snd_cmipci_rate_freq(unsigned int rate) { unsigned int i; for (i = 0; i < ARRAY_SIZE(rates); i++) { if (rates[i] == rate) return i; } snd_BUG(); return 0; } #ifdef USE_VAR48KRATE /* * Determine PLL values for frequency setup, maybe the CMI8338 (CMI8738???) * does it this way .. maybe not. Never get any information from C-Media about * that <werner@suse.de>. */ static int snd_cmipci_pll_rmn(unsigned int rate, unsigned int adcmult, int *r, int *m, int *n) { unsigned int delta, tolerance; int xm, xn, xr; for (*r = 0; rate < CM_MAXIMUM_RATE/adcmult; *r += (1<<5)) rate <<= 1; *n = -1; if (*r > 0xff) goto out; tolerance = rate*CM_TOLERANCE_RATE; for (xn = (1+2); xn < (0x1f+2); xn++) { for (xm = (1+2); xm < (0xff+2); xm++) { xr = ((CM_REFFREQ_XIN/adcmult) * xm) / xn; if (xr < rate) delta = rate - xr; else delta = xr - rate; /* * If we found one, remember this, * and try to find a closer one */ if (delta < tolerance) { tolerance = delta; *m = xm - 2; *n = xn - 2; } } } out: return (*n > -1); } /* * Program pll register bits, I assume that the 8 registers 0xf8 up to 0xff * are mapped onto the 8 ADC/DAC sampling frequency which can be chosen * at the register CM_REG_FUNCTRL1 (0x04). * Problem: other ways are also possible (any information about that?) */ static void snd_cmipci_set_pll(struct cmipci *cm, unsigned int rate, unsigned int slot) { unsigned int reg = CM_REG_PLL + slot; /* * Guess that this programs at reg. 0x04 the pos 15:13/12:10 * for DSFC/ASFC (000 up to 111). */ /* FIXME: Init (Do we've to set an other register first before programming?) */ /* FIXME: Is this correct? Or shouldn't the m/n/r values be used for that? */ snd_cmipci_write_b(cm, reg, rate>>8); snd_cmipci_write_b(cm, reg, rate&0xff); /* FIXME: Setup (Do we've to set an other register first to enable this?) */ } #endif /* USE_VAR48KRATE */ static int snd_cmipci_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); } static int snd_cmipci_playback2_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct cmipci *cm = snd_pcm_substream_chip(substream); if (params_channels(hw_params) > 2) { mutex_lock(&cm->open_mutex); if (cm->opened[CM_CH_PLAY]) { mutex_unlock(&cm->open_mutex); return -EBUSY; } /* reserve the channel A */ cm->opened[CM_CH_PLAY] = CM_OPEN_PLAYBACK_MULTI; mutex_unlock(&cm->open_mutex); } return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); } static void snd_cmipci_ch_reset(struct cmipci *cm, int ch) { int reset = CM_RST_CH0 << (cm->channel[ch].ch); snd_cmipci_write(cm, CM_REG_FUNCTRL0, cm->ctrl | reset); snd_cmipci_write(cm, CM_REG_FUNCTRL0, cm->ctrl & ~reset); udelay(10); } static int snd_cmipci_hw_free(struct snd_pcm_substream *substream) { return snd_pcm_lib_free_pages(substream); } /* */ static unsigned int hw_channels[] = {1, 2, 4, 6, 8}; static struct snd_pcm_hw_constraint_list hw_constraints_channels_4 = { .count = 3, .list = hw_channels, .mask = 0, }; static struct snd_pcm_hw_constraint_list hw_constraints_channels_6 = { .count = 4, .list = hw_channels, .mask = 0, }; static struct snd_pcm_hw_constraint_list hw_constraints_channels_8 = { .count = 5, .list = hw_channels, .mask = 0, }; static int set_dac_channels(struct cmipci *cm, struct cmipci_pcm *rec, int channels) { if (channels > 2) { if (!cm->can_multi_ch || !rec->ch) return -EINVAL; if (rec->fmt != 0x03) /* stereo 16bit only */ return -EINVAL; } if (cm->can_multi_ch) { spin_lock_irq(&cm->reg_lock); if (channels > 2) { snd_cmipci_set_bit(cm, CM_REG_LEGACY_CTRL, CM_NXCHG); snd_cmipci_set_bit(cm, CM_REG_MISC_CTRL, CM_XCHGDAC); } else { snd_cmipci_clear_bit(cm, CM_REG_LEGACY_CTRL, CM_NXCHG); snd_cmipci_clear_bit(cm, CM_REG_MISC_CTRL, CM_XCHGDAC); } if (channels == 8) snd_cmipci_set_bit(cm, CM_REG_EXT_MISC, CM_CHB3D8C); else snd_cmipci_clear_bit(cm, CM_REG_EXT_MISC, CM_CHB3D8C); if (channels == 6) { snd_cmipci_set_bit(cm, CM_REG_CHFORMAT, CM_CHB3D5C); snd_cmipci_set_bit(cm, CM_REG_LEGACY_CTRL, CM_CHB3D6C); } else { snd_cmipci_clear_bit(cm, CM_REG_CHFORMAT, CM_CHB3D5C); snd_cmipci_clear_bit(cm, CM_REG_LEGACY_CTRL, CM_CHB3D6C); } if (channels == 4) snd_cmipci_set_bit(cm, CM_REG_CHFORMAT, CM_CHB3D); else snd_cmipci_clear_bit(cm, CM_REG_CHFORMAT, CM_CHB3D); spin_unlock_irq(&cm->reg_lock); } return 0; } /* * prepare playback/capture channel * channel to be used must have been set in rec->ch. */ static int snd_cmipci_pcm_prepare(struct cmipci *cm, struct cmipci_pcm *rec, struct snd_pcm_substream *substream) { unsigned int reg, freq, freq_ext, val; unsigned int period_size; struct snd_pcm_runtime *runtime = substream->runtime; rec->fmt = 0; rec->shift = 0; if (snd_pcm_format_width(runtime->format) >= 16) { rec->fmt |= 0x02; if (snd_pcm_format_width(runtime->format) > 16) rec->shift++; /* 24/32bit */ } if (runtime->channels > 1) rec->fmt |= 0x01; if (rec->is_dac && set_dac_channels(cm, rec, runtime->channels) < 0) { snd_printd("cannot set dac channels\n"); return -EINVAL; } rec->offset = runtime->dma_addr; /* buffer and period sizes in frame */ rec->dma_size = runtime->buffer_size << rec->shift; period_size = runtime->period_size << rec->shift; if (runtime->channels > 2) { /* multi-channels */ rec->dma_size = (rec->dma_size * runtime->channels) / 2; period_size = (period_size * runtime->channels) / 2; } spin_lock_irq(&cm->reg_lock); /* set buffer address */ reg = rec->ch ? CM_REG_CH1_FRAME1 : CM_REG_CH0_FRAME1; snd_cmipci_write(cm, reg, rec->offset); /* program sample counts */ reg = rec->ch ? CM_REG_CH1_FRAME2 : CM_REG_CH0_FRAME2; snd_cmipci_write_w(cm, reg, rec->dma_size - 1); snd_cmipci_write_w(cm, reg + 2, period_size - 1); /* set adc/dac flag */ val = rec->ch ? CM_CHADC1 : CM_CHADC0; if (rec->is_dac) cm->ctrl &= ~val; else cm->ctrl |= val; snd_cmipci_write(cm, CM_REG_FUNCTRL0, cm->ctrl); //snd_printd("cmipci: functrl0 = %08x\n", cm->ctrl); /* set sample rate */ freq = 0; freq_ext = 0; if (runtime->rate > 48000) switch (runtime->rate) { case 88200: freq_ext = CM_CH0_SRATE_88K; break; case 96000: freq_ext = CM_CH0_SRATE_96K; break; case 128000: freq_ext = CM_CH0_SRATE_128K; break; default: snd_BUG(); break; } else freq = snd_cmipci_rate_freq(runtime->rate); val = snd_cmipci_read(cm, CM_REG_FUNCTRL1); if (rec->ch) { val &= ~CM_DSFC_MASK; val |= (freq << CM_DSFC_SHIFT) & CM_DSFC_MASK; } else { val &= ~CM_ASFC_MASK; val |= (freq << CM_ASFC_SHIFT) & CM_ASFC_MASK; } snd_cmipci_write(cm, CM_REG_FUNCTRL1, val); //snd_printd("cmipci: functrl1 = %08x\n", val); /* set format */ val = snd_cmipci_read(cm, CM_REG_CHFORMAT); if (rec->ch) { val &= ~CM_CH1FMT_MASK; val |= rec->fmt << CM_CH1FMT_SHIFT; } else { val &= ~CM_CH0FMT_MASK; val |= rec->fmt << CM_CH0FMT_SHIFT; } if (cm->can_96k) { val &= ~(CM_CH0_SRATE_MASK << (rec->ch * 2)); val |= freq_ext << (rec->ch * 2); } snd_cmipci_write(cm, CM_REG_CHFORMAT, val); //snd_printd("cmipci: chformat = %08x\n", val); if (!rec->is_dac && cm->chip_version) { if (runtime->rate > 44100) snd_cmipci_set_bit(cm, CM_REG_EXT_MISC, CM_ADC48K44K); else snd_cmipci_clear_bit(cm, CM_REG_EXT_MISC, CM_ADC48K44K); } rec->running = 0; spin_unlock_irq(&cm->reg_lock); return 0; } /* * PCM trigger/stop */ static int snd_cmipci_pcm_trigger(struct cmipci *cm, struct cmipci_pcm *rec, int cmd) { unsigned int inthld, chen, reset, pause; int result = 0; inthld = CM_CH0_INT_EN << rec->ch; chen = CM_CHEN0 << rec->ch; reset = CM_RST_CH0 << rec->ch; pause = CM_PAUSE0 << rec->ch; spin_lock(&cm->reg_lock); switch (cmd) { case SNDRV_PCM_TRIGGER_START: rec->running = 1; /* set interrupt */ snd_cmipci_set_bit(cm, CM_REG_INT_HLDCLR, inthld); cm->ctrl |= chen; /* enable channel */ snd_cmipci_write(cm, CM_REG_FUNCTRL0, cm->ctrl); //snd_printd("cmipci: functrl0 = %08x\n", cm->ctrl); break; case SNDRV_PCM_TRIGGER_STOP: rec->running = 0; /* disable interrupt */ snd_cmipci_clear_bit(cm, CM_REG_INT_HLDCLR, inthld); /* reset */ cm->ctrl &= ~chen; snd_cmipci_write(cm, CM_REG_FUNCTRL0, cm->ctrl | reset); snd_cmipci_write(cm, CM_REG_FUNCTRL0, cm->ctrl & ~reset); rec->needs_silencing = rec->is_dac; break; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: case SNDRV_PCM_TRIGGER_SUSPEND: cm->ctrl |= pause; snd_cmipci_write(cm, CM_REG_FUNCTRL0, cm->ctrl); break; case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: case SNDRV_PCM_TRIGGER_RESUME: cm->ctrl &= ~pause; snd_cmipci_write(cm, CM_REG_FUNCTRL0, cm->ctrl); break; default: result = -EINVAL; break; } spin_unlock(&cm->reg_lock); return result; } /* * return the current pointer */ static snd_pcm_uframes_t snd_cmipci_pcm_pointer(struct cmipci *cm, struct cmipci_pcm *rec, struct snd_pcm_substream *substream) { size_t ptr; unsigned int reg, rem, tries; if (!rec->running) return 0; #if 1 // this seems better.. reg = rec->ch ? CM_REG_CH1_FRAME2 : CM_REG_CH0_FRAME2; for (tries = 0; tries < 3; tries++) { rem = snd_cmipci_read_w(cm, reg); if (rem < rec->dma_size) goto ok; } printk(KERN_ERR "cmipci: invalid PCM pointer: %#x\n", rem); return SNDRV_PCM_POS_XRUN; ok: ptr = (rec->dma_size - (rem + 1)) >> rec->shift; #else reg = rec->ch ? CM_REG_CH1_FRAME1 : CM_REG_CH0_FRAME1; ptr = snd_cmipci_read(cm, reg) - rec->offset; ptr = bytes_to_frames(substream->runtime, ptr); #endif if (substream->runtime->channels > 2) ptr = (ptr * 2) / substream->runtime->channels; return ptr; } /* * playback */ static int snd_cmipci_playback_trigger(struct snd_pcm_substream *substream, int cmd) { struct cmipci *cm = snd_pcm_substream_chip(substream); return snd_cmipci_pcm_trigger(cm, &cm->channel[CM_CH_PLAY], cmd); } static snd_pcm_uframes_t snd_cmipci_playback_pointer(struct snd_pcm_substream *substream) { struct cmipci *cm = snd_pcm_substream_chip(substream); return snd_cmipci_pcm_pointer(cm, &cm->channel[CM_CH_PLAY], substream); } /* * capture */ static int snd_cmipci_capture_trigger(struct snd_pcm_substream *substream, int cmd) { struct cmipci *cm = snd_pcm_substream_chip(substream); return snd_cmipci_pcm_trigger(cm, &cm->channel[CM_CH_CAPT], cmd); } static snd_pcm_uframes_t snd_cmipci_capture_pointer(struct snd_pcm_substream *substream) { struct cmipci *cm = snd_pcm_substream_chip(substream); return snd_cmipci_pcm_pointer(cm, &cm->channel[CM_CH_CAPT], substream); } /* * hw preparation for spdif */ static int snd_cmipci_spdif_default_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958; uinfo->count = 1; return 0; } static int snd_cmipci_spdif_default_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct cmipci *chip = snd_kcontrol_chip(kcontrol); int i; spin_lock_irq(&chip->reg_lock); for (i = 0; i < 4; i++) ucontrol->value.iec958.status[i] = (chip->dig_status >> (i * 8)) & 0xff; spin_unlock_irq(&chip->reg_lock); return 0; } static int snd_cmipci_spdif_default_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct cmipci *chip = snd_kcontrol_chip(kcontrol); int i, change; unsigned int val; val = 0; spin_lock_irq(&chip->reg_lock); for (i = 0; i < 4; i++) val |= (unsigned int)ucontrol->value.iec958.status[i] << (i * 8); change = val != chip->dig_status; chip->dig_status = val; spin_unlock_irq(&chip->reg_lock); return change; } static struct snd_kcontrol_new snd_cmipci_spdif_default __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,DEFAULT), .info = snd_cmipci_spdif_default_info, .get = snd_cmipci_spdif_default_get, .put = snd_cmipci_spdif_default_put }; static int snd_cmipci_spdif_mask_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958; uinfo->count = 1; return 0; } static int snd_cmipci_spdif_mask_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { ucontrol->value.iec958.status[0] = 0xff; ucontrol->value.iec958.status[1] = 0xff; ucontrol->value.iec958.status[2] = 0xff; ucontrol->value.iec958.status[3] = 0xff; return 0; } static struct snd_kcontrol_new snd_cmipci_spdif_mask __devinitdata = { .access = SNDRV_CTL_ELEM_ACCESS_READ, .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,CON_MASK), .info = snd_cmipci_spdif_mask_info, .get = snd_cmipci_spdif_mask_get, }; static int snd_cmipci_spdif_stream_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958; uinfo->count = 1; return 0; } static int snd_cmipci_spdif_stream_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct cmipci *chip = snd_kcontrol_chip(kcontrol); int i; spin_lock_irq(&chip->reg_lock); for (i = 0; i < 4; i++) ucontrol->value.iec958.status[i] = (chip->dig_pcm_status >> (i * 8)) & 0xff; spin_unlock_irq(&chip->reg_lock); return 0; } static int snd_cmipci_spdif_stream_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct cmipci *chip = snd_kcontrol_chip(kcontrol); int i, change; unsigned int val; val = 0; spin_lock_irq(&chip->reg_lock); for (i = 0; i < 4; i++) val |= (unsigned int)ucontrol->value.iec958.status[i] << (i * 8); change = val != chip->dig_pcm_status; chip->dig_pcm_status = val; spin_unlock_irq(&chip->reg_lock); return change; } static struct snd_kcontrol_new snd_cmipci_spdif_stream __devinitdata = { .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_INACTIVE, .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,PCM_STREAM), .info = snd_cmipci_spdif_stream_info, .get = snd_cmipci_spdif_stream_get, .put = snd_cmipci_spdif_stream_put }; /* */ /* save mixer setting and mute for AC3 playback */ static int save_mixer_state(struct cmipci *cm) { if (! cm->mixer_insensitive) { struct snd_ctl_elem_value *val; unsigned int i; val = kmalloc(sizeof(*val), GFP_ATOMIC); if (!val) return -ENOMEM; for (i = 0; i < CM_SAVED_MIXERS; i++) { struct snd_kcontrol *ctl = cm->mixer_res_ctl[i]; if (ctl) { int event; memset(val, 0, sizeof(*val)); ctl->get(ctl, val); cm->mixer_res_status[i] = val->value.integer.value[0]; val->value.integer.value[0] = cm_saved_mixer[i].toggle_on; event = SNDRV_CTL_EVENT_MASK_INFO; if (cm->mixer_res_status[i] != val->value.integer.value[0]) { ctl->put(ctl, val); /* toggle */ event |= SNDRV_CTL_EVENT_MASK_VALUE; } ctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE; snd_ctl_notify(cm->card, event, &ctl->id); } } kfree(val); cm->mixer_insensitive = 1; } return 0; } /* restore the previously saved mixer status */ static void restore_mixer_state(struct cmipci *cm) { if (cm->mixer_insensitive) { struct snd_ctl_elem_value *val; unsigned int i; val = kmalloc(sizeof(*val), GFP_KERNEL); if (!val) return; cm->mixer_insensitive = 0; /* at first clear this; otherwise the changes will be ignored */ for (i = 0; i < CM_SAVED_MIXERS; i++) { struct snd_kcontrol *ctl = cm->mixer_res_ctl[i]; if (ctl) { int event; memset(val, 0, sizeof(*val)); ctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE; ctl->get(ctl, val); event = SNDRV_CTL_EVENT_MASK_INFO; if (val->value.integer.value[0] != cm->mixer_res_status[i]) { val->value.integer.value[0] = cm->mixer_res_status[i]; ctl->put(ctl, val); event |= SNDRV_CTL_EVENT_MASK_VALUE; } snd_ctl_notify(cm->card, event, &ctl->id); } } kfree(val); } } /* spinlock held! */ static void setup_ac3(struct cmipci *cm, struct snd_pcm_substream *subs, int do_ac3, int rate) { if (do_ac3) { /* AC3EN for 037 */ snd_cmipci_set_bit(cm, CM_REG_CHFORMAT, CM_AC3EN1); /* AC3EN for 039 */ snd_cmipci_set_bit(cm, CM_REG_MISC_CTRL, CM_AC3EN2); if (cm->can_ac3_hw) { /* SPD24SEL for 037, 0x02 */ /* SPD24SEL for 039, 0x20, but cannot be set */ snd_cmipci_set_bit(cm, CM_REG_CHFORMAT, CM_SPD24SEL); snd_cmipci_clear_bit(cm, CM_REG_MISC_CTRL, CM_SPD32SEL); } else { /* can_ac3_sw */ /* SPD32SEL for 037 & 039, 0x20 */ snd_cmipci_set_bit(cm, CM_REG_MISC_CTRL, CM_SPD32SEL); /* set 176K sample rate to fix 033 HW bug */ if (cm->chip_version == 33) { if (rate >= 48000) { snd_cmipci_set_bit(cm, CM_REG_CHFORMAT, CM_PLAYBACK_SRATE_176K); } else { snd_cmipci_clear_bit(cm, CM_REG_CHFORMAT, CM_PLAYBACK_SRATE_176K); } } } } else { snd_cmipci_clear_bit(cm, CM_REG_CHFORMAT, CM_AC3EN1); snd_cmipci_clear_bit(cm, CM_REG_MISC_CTRL, CM_AC3EN2); if (cm->can_ac3_hw) { /* chip model >= 37 */ if (snd_pcm_format_width(subs->runtime->format) > 16) { snd_cmipci_set_bit(cm, CM_REG_MISC_CTRL, CM_SPD32SEL); snd_cmipci_set_bit(cm, CM_REG_CHFORMAT, CM_SPD24SEL); } else { snd_cmipci_clear_bit(cm, CM_REG_MISC_CTRL, CM_SPD32SEL); snd_cmipci_clear_bit(cm, CM_REG_CHFORMAT, CM_SPD24SEL); } } else { snd_cmipci_clear_bit(cm, CM_REG_MISC_CTRL, CM_SPD32SEL); snd_cmipci_clear_bit(cm, CM_REG_CHFORMAT, CM_SPD24SEL); snd_cmipci_clear_bit(cm, CM_REG_CHFORMAT, CM_PLAYBACK_SRATE_176K); } } } static int setup_spdif_playback(struct cmipci *cm, struct snd_pcm_substream *subs, int up, int do_ac3) { int rate, err; rate = subs->runtime->rate; if (up && do_ac3) if ((err = save_mixer_state(cm)) < 0) return err; spin_lock_irq(&cm->reg_lock); cm->spdif_playback_avail = up; if (up) { /* they are controlled via "IEC958 Output Switch" */ /* snd_cmipci_set_bit(cm, CM_REG_LEGACY_CTRL, CM_ENSPDOUT); */ /* snd_cmipci_set_bit(cm, CM_REG_FUNCTRL1, CM_SPDO2DAC); */ if (cm->spdif_playback_enabled) snd_cmipci_set_bit(cm, CM_REG_FUNCTRL1, CM_PLAYBACK_SPDF); setup_ac3(cm, subs, do_ac3, rate); if (rate == 48000 || rate == 96000) snd_cmipci_set_bit(cm, CM_REG_MISC_CTRL, CM_SPDIF48K | CM_SPDF_AC97); else snd_cmipci_clear_bit(cm, CM_REG_MISC_CTRL, CM_SPDIF48K | CM_SPDF_AC97); if (rate > 48000) snd_cmipci_set_bit(cm, CM_REG_CHFORMAT, CM_DBLSPDS); else snd_cmipci_clear_bit(cm, CM_REG_CHFORMAT, CM_DBLSPDS); } else { /* they are controlled via "IEC958 Output Switch" */ /* snd_cmipci_clear_bit(cm, CM_REG_LEGACY_CTRL, CM_ENSPDOUT); */ /* snd_cmipci_clear_bit(cm, CM_REG_FUNCTRL1, CM_SPDO2DAC); */ snd_cmipci_clear_bit(cm, CM_REG_CHFORMAT, CM_DBLSPDS); snd_cmipci_clear_bit(cm, CM_REG_FUNCTRL1, CM_PLAYBACK_SPDF); setup_ac3(cm, subs, 0, 0); } spin_unlock_irq(&cm->reg_lock); return 0; } /* * preparation */ /* playback - enable spdif only on the certain condition */ static int snd_cmipci_playback_prepare(struct snd_pcm_substream *substream) { struct cmipci *cm = snd_pcm_substream_chip(substream); int rate = substream->runtime->rate; int err, do_spdif, do_ac3 = 0; do_spdif = (rate >= 44100 && rate <= 96000 && substream->runtime->format == SNDRV_PCM_FORMAT_S16_LE && substream->runtime->channels == 2); if (do_spdif && cm->can_ac3_hw) do_ac3 = cm->dig_pcm_status & IEC958_AES0_NONAUDIO; if ((err = setup_spdif_playback(cm, substream, do_spdif, do_ac3)) < 0) return err; return snd_cmipci_pcm_prepare(cm, &cm->channel[CM_CH_PLAY], substream); } /* playback (via device #2) - enable spdif always */ static int snd_cmipci_playback_spdif_prepare(struct snd_pcm_substream *substream) { struct cmipci *cm = snd_pcm_substream_chip(substream); int err, do_ac3; if (cm->can_ac3_hw) do_ac3 = cm->dig_pcm_status & IEC958_AES0_NONAUDIO; else do_ac3 = 1; /* doesn't matter */ if ((err = setup_spdif_playback(cm, substream, 1, do_ac3)) < 0) return err; return snd_cmipci_pcm_prepare(cm, &cm->channel[CM_CH_PLAY], substream); } /* * Apparently, the samples last played on channel A stay in some buffer, even * after the channel is reset, and get added to the data for the rear DACs when * playing a multichannel stream on channel B. This is likely to generate * wraparounds and thus distortions. * To avoid this, we play at least one zero sample after the actual stream has * stopped. */ static void snd_cmipci_silence_hack(struct cmipci *cm, struct cmipci_pcm *rec) { struct snd_pcm_runtime *runtime = rec->substream->runtime; unsigned int reg, val; if (rec->needs_silencing && runtime && runtime->dma_area) { /* set up a small silence buffer */ memset(runtime->dma_area, 0, PAGE_SIZE); reg = rec->ch ? CM_REG_CH1_FRAME2 : CM_REG_CH0_FRAME2; val = ((PAGE_SIZE / 4) - 1) | (((PAGE_SIZE / 4) / 2 - 1) << 16); snd_cmipci_write(cm, reg, val); /* configure for 16 bits, 2 channels, 8 kHz */ if (runtime->channels > 2) set_dac_channels(cm, rec, 2); spin_lock_irq(&cm->reg_lock); val = snd_cmipci_read(cm, CM_REG_FUNCTRL1); val &= ~(CM_ASFC_MASK << (rec->ch * 3)); val |= (4 << CM_ASFC_SHIFT) << (rec->ch * 3); snd_cmipci_write(cm, CM_REG_FUNCTRL1, val); val = snd_cmipci_read(cm, CM_REG_CHFORMAT); val &= ~(CM_CH0FMT_MASK << (rec->ch * 2)); val |= (3 << CM_CH0FMT_SHIFT) << (rec->ch * 2); if (cm->can_96k) val &= ~(CM_CH0_SRATE_MASK << (rec->ch * 2)); snd_cmipci_write(cm, CM_REG_CHFORMAT, val); /* start stream (we don't need interrupts) */ cm->ctrl |= CM_CHEN0 << rec->ch; snd_cmipci_write(cm, CM_REG_FUNCTRL0, cm->ctrl); spin_unlock_irq(&cm->reg_lock); msleep(1); /* stop and reset stream */ spin_lock_irq(&cm->reg_lock); cm->ctrl &= ~(CM_CHEN0 << rec->ch); val = CM_RST_CH0 << rec->ch; snd_cmipci_write(cm, CM_REG_FUNCTRL0, cm->ctrl | val); snd_cmipci_write(cm, CM_REG_FUNCTRL0, cm->ctrl & ~val); spin_unlock_irq(&cm->reg_lock); rec->needs_silencing = 0; } } static int snd_cmipci_playback_hw_free(struct snd_pcm_substream *substream) { struct cmipci *cm = snd_pcm_substream_chip(substream); setup_spdif_playback(cm, substream, 0, 0); restore_mixer_state(cm); snd_cmipci_silence_hack(cm, &cm->channel[0]); return snd_cmipci_hw_free(substream); } static int snd_cmipci_playback2_hw_free(struct snd_pcm_substream *substream) { struct cmipci *cm = snd_pcm_substream_chip(substream); snd_cmipci_silence_hack(cm, &cm->channel[1]); return snd_cmipci_hw_free(substream); } /* capture */ static int snd_cmipci_capture_prepare(struct snd_pcm_substream *substream) { struct cmipci *cm = snd_pcm_substream_chip(substream); return snd_cmipci_pcm_prepare(cm, &cm->channel[CM_CH_CAPT], substream); } /* capture with spdif (via device #2) */ static int snd_cmipci_capture_spdif_prepare(struct snd_pcm_substream *substream) { struct cmipci *cm = snd_pcm_substream_chip(substream); spin_lock_irq(&cm->reg_lock); snd_cmipci_set_bit(cm, CM_REG_FUNCTRL1, CM_CAPTURE_SPDF); if (cm->can_96k) { if (substream->runtime->rate > 48000) snd_cmipci_set_bit(cm, CM_REG_CHFORMAT, CM_DBLSPDS); else snd_cmipci_clear_bit(cm, CM_REG_CHFORMAT, CM_DBLSPDS); } if (snd_pcm_format_width(substream->runtime->format) > 16) snd_cmipci_set_bit(cm, CM_REG_MISC_CTRL, CM_SPD32SEL); else snd_cmipci_clear_bit(cm, CM_REG_MISC_CTRL, CM_SPD32SEL); spin_unlock_irq(&cm->reg_lock); return snd_cmipci_pcm_prepare(cm, &cm->channel[CM_CH_CAPT], substream); } static int snd_cmipci_capture_spdif_hw_free(struct snd_pcm_substream *subs) { struct cmipci *cm = snd_pcm_substream_chip(subs); spin_lock_irq(&cm->reg_lock); snd_cmipci_clear_bit(cm, CM_REG_FUNCTRL1, CM_CAPTURE_SPDF); snd_cmipci_clear_bit(cm, CM_REG_MISC_CTRL, CM_SPD32SEL); spin_unlock_irq(&cm->reg_lock); return snd_cmipci_hw_free(subs); } /* * interrupt handler */ static irqreturn_t snd_cmipci_interrupt(int irq, void *dev_id) { struct cmipci *cm = dev_id; unsigned int status, mask = 0; /* fastpath out, to ease interrupt sharing */ status = snd_cmipci_read(cm, CM_REG_INT_STATUS); if (!(status & CM_INTR)) return IRQ_NONE; /* acknowledge interrupt */ spin_lock(&cm->reg_lock); if (status & CM_CHINT0) mask |= CM_CH0_INT_EN; if (status & CM_CHINT1) mask |= CM_CH1_INT_EN; snd_cmipci_clear_bit(cm, CM_REG_INT_HLDCLR, mask); snd_cmipci_set_bit(cm, CM_REG_INT_HLDCLR, mask); spin_unlock(&cm->reg_lock); if (cm->rmidi && (status & CM_UARTINT)) snd_mpu401_uart_interrupt(irq, cm->rmidi->private_data); if (cm->pcm) { if ((status & CM_CHINT0) && cm->channel[0].running) snd_pcm_period_elapsed(cm->channel[0].substream); if ((status & CM_CHINT1) && cm->channel[1].running) snd_pcm_period_elapsed(cm->channel[1].substream); } return IRQ_HANDLED; } /* * h/w infos */ /* playback on channel A */ static struct snd_pcm_hardware snd_cmipci_playback = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME | SNDRV_PCM_INFO_MMAP_VALID), .formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_5512 | SNDRV_PCM_RATE_8000_48000, .rate_min = 5512, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = (128*1024), .period_bytes_min = 64, .period_bytes_max = (128*1024), .periods_min = 2, .periods_max = 1024, .fifo_size = 0, }; /* capture on channel B */ static struct snd_pcm_hardware snd_cmipci_capture = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME | SNDRV_PCM_INFO_MMAP_VALID), .formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_5512 | SNDRV_PCM_RATE_8000_48000, .rate_min = 5512, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = (128*1024), .period_bytes_min = 64, .period_bytes_max = (128*1024), .periods_min = 2, .periods_max = 1024, .fifo_size = 0, }; /* playback on channel B - stereo 16bit only? */ static struct snd_pcm_hardware snd_cmipci_playback2 = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME | SNDRV_PCM_INFO_MMAP_VALID), .formats = SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_5512 | SNDRV_PCM_RATE_8000_48000, .rate_min = 5512, .rate_max = 48000, .channels_min = 2, .channels_max = 2, .buffer_bytes_max = (128*1024), .period_bytes_min = 64, .period_bytes_max = (128*1024), .periods_min = 2, .periods_max = 1024, .fifo_size = 0, }; /* spdif playback on channel A */ static struct snd_pcm_hardware snd_cmipci_playback_spdif = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME | SNDRV_PCM_INFO_MMAP_VALID), .formats = SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000, .rate_min = 44100, .rate_max = 48000, .channels_min = 2, .channels_max = 2, .buffer_bytes_max = (128*1024), .period_bytes_min = 64, .period_bytes_max = (128*1024), .periods_min = 2, .periods_max = 1024, .fifo_size = 0, }; /* spdif playback on channel A (32bit, IEC958 subframes) */ static struct snd_pcm_hardware snd_cmipci_playback_iec958_subframe = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME | SNDRV_PCM_INFO_MMAP_VALID), .formats = SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE, .rates = SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000, .rate_min = 44100, .rate_max = 48000, .channels_min = 2, .channels_max = 2, .buffer_bytes_max = (128*1024), .period_bytes_min = 64, .period_bytes_max = (128*1024), .periods_min = 2, .periods_max = 1024, .fifo_size = 0, }; /* spdif capture on channel B */ static struct snd_pcm_hardware snd_cmipci_capture_spdif = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME | SNDRV_PCM_INFO_MMAP_VALID), .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE, .rates = SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000, .rate_min = 44100, .rate_max = 48000, .channels_min = 2, .channels_max = 2, .buffer_bytes_max = (128*1024), .period_bytes_min = 64, .period_bytes_max = (128*1024), .periods_min = 2, .periods_max = 1024, .fifo_size = 0, }; static unsigned int rate_constraints[] = { 5512, 8000, 11025, 16000, 22050, 32000, 44100, 48000, 88200, 96000, 128000 }; static struct snd_pcm_hw_constraint_list hw_constraints_rates = { .count = ARRAY_SIZE(rate_constraints), .list = rate_constraints, .mask = 0, }; /* * check device open/close */ static int open_device_check(struct cmipci *cm, int mode, struct snd_pcm_substream *subs) { int ch = mode & CM_OPEN_CH_MASK; /* FIXME: a file should wait until the device becomes free * when it's opened on blocking mode. however, since the current * pcm framework doesn't pass file pointer before actually opened, * we can't know whether blocking mode or not in open callback.. */ mutex_lock(&cm->open_mutex); if (cm->opened[ch]) { mutex_unlock(&cm->open_mutex); return -EBUSY; } cm->opened[ch] = mode; cm->channel[ch].substream = subs; if (! (mode & CM_OPEN_DAC)) { /* disable dual DAC mode */ cm->channel[ch].is_dac = 0; spin_lock_irq(&cm->reg_lock); snd_cmipci_clear_bit(cm, CM_REG_MISC_CTRL, CM_ENDBDAC); spin_unlock_irq(&cm->reg_lock); } mutex_unlock(&cm->open_mutex); return 0; } static void close_device_check(struct cmipci *cm, int mode) { int ch = mode & CM_OPEN_CH_MASK; mutex_lock(&cm->open_mutex); if (cm->opened[ch] == mode) { if (cm->channel[ch].substream) { snd_cmipci_ch_reset(cm, ch); cm->channel[ch].running = 0; cm->channel[ch].substream = NULL; } cm->opened[ch] = 0; if (! cm->channel[ch].is_dac) { /* enable dual DAC mode again */ cm->channel[ch].is_dac = 1; spin_lock_irq(&cm->reg_lock); snd_cmipci_set_bit(cm, CM_REG_MISC_CTRL, CM_ENDBDAC); spin_unlock_irq(&cm->reg_lock); } } mutex_unlock(&cm->open_mutex); } /* */ static int snd_cmipci_playback_open(struct snd_pcm_substream *substream) { struct cmipci *cm = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; int err; if ((err = open_device_check(cm, CM_OPEN_PLAYBACK, substream)) < 0) return err; runtime->hw = snd_cmipci_playback; if (cm->chip_version == 68) { runtime->hw.rates |= SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000; runtime->hw.rate_max = 96000; } else if (cm->chip_version == 55) { err = snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &hw_constraints_rates); if (err < 0) return err; runtime->hw.rates |= SNDRV_PCM_RATE_KNOT; runtime->hw.rate_max = 128000; } snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 0, 0x10000); cm->dig_pcm_status = cm->dig_status; return 0; } static int snd_cmipci_capture_open(struct snd_pcm_substream *substream) { struct cmipci *cm = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; int err; if ((err = open_device_check(cm, CM_OPEN_CAPTURE, substream)) < 0) return err; runtime->hw = snd_cmipci_capture; if (cm->chip_version == 68) { // 8768 only supports 44k/48k recording runtime->hw.rate_min = 41000; runtime->hw.rates = SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000; } else if (cm->chip_version == 55) { err = snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &hw_constraints_rates); if (err < 0) return err; runtime->hw.rates |= SNDRV_PCM_RATE_KNOT; runtime->hw.rate_max = 128000; } snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 0, 0x10000); return 0; } static int snd_cmipci_playback2_open(struct snd_pcm_substream *substream) { struct cmipci *cm = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; int err; if ((err = open_device_check(cm, CM_OPEN_PLAYBACK2, substream)) < 0) /* use channel B */ return err; runtime->hw = snd_cmipci_playback2; mutex_lock(&cm->open_mutex); if (! cm->opened[CM_CH_PLAY]) { if (cm->can_multi_ch) { runtime->hw.channels_max = cm->max_channels; if (cm->max_channels == 4) snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS, &hw_constraints_channels_4); else if (cm->max_channels == 6) snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS, &hw_constraints_channels_6); else if (cm->max_channels == 8) snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS, &hw_constraints_channels_8); } } mutex_unlock(&cm->open_mutex); if (cm->chip_version == 68) { runtime->hw.rates |= SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000; runtime->hw.rate_max = 96000; } else if (cm->chip_version == 55) { err = snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &hw_constraints_rates); if (err < 0) return err; runtime->hw.rates |= SNDRV_PCM_RATE_KNOT; runtime->hw.rate_max = 128000; } snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 0, 0x10000); return 0; } static int snd_cmipci_playback_spdif_open(struct snd_pcm_substream *substream) { struct cmipci *cm = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; int err; if ((err = open_device_check(cm, CM_OPEN_SPDIF_PLAYBACK, substream)) < 0) /* use channel A */ return err; if (cm->can_ac3_hw) { runtime->hw = snd_cmipci_playback_spdif; if (cm->chip_version >= 37) { runtime->hw.formats |= SNDRV_PCM_FMTBIT_S32_LE; snd_pcm_hw_constraint_msbits(runtime, 0, 32, 24); } if (cm->can_96k) { runtime->hw.rates |= SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000; runtime->hw.rate_max = 96000; } } else { runtime->hw = snd_cmipci_playback_iec958_subframe; } snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 0, 0x40000); cm->dig_pcm_status = cm->dig_status; return 0; } static int snd_cmipci_capture_spdif_open(struct snd_pcm_substream *substream) { struct cmipci *cm = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; int err; if ((err = open_device_check(cm, CM_OPEN_SPDIF_CAPTURE, substream)) < 0) /* use channel B */ return err; runtime->hw = snd_cmipci_capture_spdif; if (cm->can_96k && !(cm->chip_version == 68)) { runtime->hw.rates |= SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000; runtime->hw.rate_max = 96000; } snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 0, 0x40000); return 0; } /* */ static int snd_cmipci_playback_close(struct snd_pcm_substream *substream) { struct cmipci *cm = snd_pcm_substream_chip(substream); close_device_check(cm, CM_OPEN_PLAYBACK); return 0; } static int snd_cmipci_capture_close(struct snd_pcm_substream *substream) { struct cmipci *cm = snd_pcm_substream_chip(substream); close_device_check(cm, CM_OPEN_CAPTURE); return 0; } static int snd_cmipci_playback2_close(struct snd_pcm_substream *substream) { struct cmipci *cm = snd_pcm_substream_chip(substream); close_device_check(cm, CM_OPEN_PLAYBACK2); close_device_check(cm, CM_OPEN_PLAYBACK_MULTI); return 0; } static int snd_cmipci_playback_spdif_close(struct snd_pcm_substream *substream) { struct cmipci *cm = snd_pcm_substream_chip(substream); close_device_check(cm, CM_OPEN_SPDIF_PLAYBACK); return 0; } static int snd_cmipci_capture_spdif_close(struct snd_pcm_substream *substream) { struct cmipci *cm = snd_pcm_substream_chip(substream); close_device_check(cm, CM_OPEN_SPDIF_CAPTURE); return 0; } /* */ static struct snd_pcm_ops snd_cmipci_playback_ops = { .open = snd_cmipci_playback_open, .close = snd_cmipci_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_cmipci_hw_params, .hw_free = snd_cmipci_playback_hw_free, .prepare = snd_cmipci_playback_prepare, .trigger = snd_cmipci_playback_trigger, .pointer = snd_cmipci_playback_pointer, }; static struct snd_pcm_ops snd_cmipci_capture_ops = { .open = snd_cmipci_capture_open, .close = snd_cmipci_capture_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_cmipci_hw_params, .hw_free = snd_cmipci_hw_free, .prepare = snd_cmipci_capture_prepare, .trigger = snd_cmipci_capture_trigger, .pointer = snd_cmipci_capture_pointer, }; static struct snd_pcm_ops snd_cmipci_playback2_ops = { .open = snd_cmipci_playback2_open, .close = snd_cmipci_playback2_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_cmipci_playback2_hw_params, .hw_free = snd_cmipci_playback2_hw_free, .prepare = snd_cmipci_capture_prepare, /* channel B */ .trigger = snd_cmipci_capture_trigger, /* channel B */ .pointer = snd_cmipci_capture_pointer, /* channel B */ }; static struct snd_pcm_ops snd_cmipci_playback_spdif_ops = { .open = snd_cmipci_playback_spdif_open, .close = snd_cmipci_playback_spdif_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_cmipci_hw_params, .hw_free = snd_cmipci_playback_hw_free, .prepare = snd_cmipci_playback_spdif_prepare, /* set up rate */ .trigger = snd_cmipci_playback_trigger, .pointer = snd_cmipci_playback_pointer, }; static struct snd_pcm_ops snd_cmipci_capture_spdif_ops = { .open = snd_cmipci_capture_spdif_open, .close = snd_cmipci_capture_spdif_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_cmipci_hw_params, .hw_free = snd_cmipci_capture_spdif_hw_free, .prepare = snd_cmipci_capture_spdif_prepare, .trigger = snd_cmipci_capture_trigger, .pointer = snd_cmipci_capture_pointer, }; /* */ static int __devinit snd_cmipci_pcm_new(struct cmipci *cm, int device) { struct snd_pcm *pcm; int err; err = snd_pcm_new(cm->card, cm->card->driver, device, 1, 1, &pcm); if (err < 0) return err; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_cmipci_playback_ops); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_cmipci_capture_ops); pcm->private_data = cm; pcm->info_flags = 0; strcpy(pcm->name, "C-Media PCI DAC/ADC"); cm->pcm = pcm; snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(cm->pci), 64*1024, 128*1024); return 0; } static int __devinit snd_cmipci_pcm2_new(struct cmipci *cm, int device) { struct snd_pcm *pcm; int err; err = snd_pcm_new(cm->card, cm->card->driver, device, 1, 0, &pcm); if (err < 0) return err; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_cmipci_playback2_ops); pcm->private_data = cm; pcm->info_flags = 0; strcpy(pcm->name, "C-Media PCI 2nd DAC"); cm->pcm2 = pcm; snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(cm->pci), 64*1024, 128*1024); return 0; } static int __devinit snd_cmipci_pcm_spdif_new(struct cmipci *cm, int device) { struct snd_pcm *pcm; int err; err = snd_pcm_new(cm->card, cm->card->driver, device, 1, 1, &pcm); if (err < 0) return err; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_cmipci_playback_spdif_ops); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_cmipci_capture_spdif_ops); pcm->private_data = cm; pcm->info_flags = 0; strcpy(pcm->name, "C-Media PCI IEC958"); cm->pcm_spdif = pcm; snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(cm->pci), 64*1024, 128*1024); return 0; } /* * mixer interface: * - CM8338/8738 has a compatible mixer interface with SB16, but * lack of some elements like tone control, i/o gain and AGC. * - Access to native registers: * - A 3D switch * - Output mute switches */ static void snd_cmipci_mixer_write(struct cmipci *s, unsigned char idx, unsigned char data) { outb(idx, s->iobase + CM_REG_SB16_ADDR); outb(data, s->iobase + CM_REG_SB16_DATA); } static unsigned char snd_cmipci_mixer_read(struct cmipci *s, unsigned char idx) { unsigned char v; outb(idx, s->iobase + CM_REG_SB16_ADDR); v = inb(s->iobase + CM_REG_SB16_DATA); return v; } /* * general mixer element */ struct cmipci_sb_reg { unsigned int left_reg, right_reg; unsigned int left_shift, right_shift; unsigned int mask; unsigned int invert: 1; unsigned int stereo: 1; }; #define COMPOSE_SB_REG(lreg,rreg,lshift,rshift,mask,invert,stereo) \ ((lreg) | ((rreg) << 8) | (lshift << 16) | (rshift << 19) | (mask << 24) | (invert << 22) | (stereo << 23)) #define CMIPCI_DOUBLE(xname, left_reg, right_reg, left_shift, right_shift, mask, invert, stereo) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \ .info = snd_cmipci_info_volume, \ .get = snd_cmipci_get_volume, .put = snd_cmipci_put_volume, \ .private_value = COMPOSE_SB_REG(left_reg, right_reg, left_shift, right_shift, mask, invert, stereo), \ } #define CMIPCI_SB_VOL_STEREO(xname,reg,shift,mask) CMIPCI_DOUBLE(xname, reg, reg+1, shift, shift, mask, 0, 1) #define CMIPCI_SB_VOL_MONO(xname,reg,shift,mask) CMIPCI_DOUBLE(xname, reg, reg, shift, shift, mask, 0, 0) #define CMIPCI_SB_SW_STEREO(xname,lshift,rshift) CMIPCI_DOUBLE(xname, SB_DSP4_OUTPUT_SW, SB_DSP4_OUTPUT_SW, lshift, rshift, 1, 0, 1) #define CMIPCI_SB_SW_MONO(xname,shift) CMIPCI_DOUBLE(xname, SB_DSP4_OUTPUT_SW, SB_DSP4_OUTPUT_SW, shift, shift, 1, 0, 0) static void cmipci_sb_reg_decode(struct cmipci_sb_reg *r, unsigned long val) { r->left_reg = val & 0xff; r->right_reg = (val >> 8) & 0xff; r->left_shift = (val >> 16) & 0x07; r->right_shift = (val >> 19) & 0x07; r->invert = (val >> 22) & 1; r->stereo = (val >> 23) & 1; r->mask = (val >> 24) & 0xff; } static int snd_cmipci_info_volume(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct cmipci_sb_reg reg; cmipci_sb_reg_decode(&reg, kcontrol->private_value); uinfo->type = reg.mask == 1 ? SNDRV_CTL_ELEM_TYPE_BOOLEAN : SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = reg.stereo + 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = reg.mask; return 0; } static int snd_cmipci_get_volume(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct cmipci *cm = snd_kcontrol_chip(kcontrol); struct cmipci_sb_reg reg; int val; cmipci_sb_reg_decode(&reg, kcontrol->private_value); spin_lock_irq(&cm->reg_lock); val = (snd_cmipci_mixer_read(cm, reg.left_reg) >> reg.left_shift) & reg.mask; if (reg.invert) val = reg.mask - val; ucontrol->value.integer.value[0] = val; if (reg.stereo) { val = (snd_cmipci_mixer_read(cm, reg.right_reg) >> reg.right_shift) & reg.mask; if (reg.invert) val = reg.mask - val; ucontrol->value.integer.value[1] = val; } spin_unlock_irq(&cm->reg_lock); return 0; } static int snd_cmipci_put_volume(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct cmipci *cm = snd_kcontrol_chip(kcontrol); struct cmipci_sb_reg reg; int change; int left, right, oleft, oright; cmipci_sb_reg_decode(&reg, kcontrol->private_value); left = ucontrol->value.integer.value[0] & reg.mask; if (reg.invert) left = reg.mask - left; left <<= reg.left_shift; if (reg.stereo) { right = ucontrol->value.integer.value[1] & reg.mask; if (reg.invert) right = reg.mask - right; right <<= reg.right_shift; } else right = 0; spin_lock_irq(&cm->reg_lock); oleft = snd_cmipci_mixer_read(cm, reg.left_reg); left |= oleft & ~(reg.mask << reg.left_shift); change = left != oleft; if (reg.stereo) { if (reg.left_reg != reg.right_reg) { snd_cmipci_mixer_write(cm, reg.left_reg, left); oright = snd_cmipci_mixer_read(cm, reg.right_reg); } else oright = left; right |= oright & ~(reg.mask << reg.right_shift); change |= right != oright; snd_cmipci_mixer_write(cm, reg.right_reg, right); } else snd_cmipci_mixer_write(cm, reg.left_reg, left); spin_unlock_irq(&cm->reg_lock); return change; } /* * input route (left,right) -> (left,right) */ #define CMIPCI_SB_INPUT_SW(xname, left_shift, right_shift) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \ .info = snd_cmipci_info_input_sw, \ .get = snd_cmipci_get_input_sw, .put = snd_cmipci_put_input_sw, \ .private_value = COMPOSE_SB_REG(SB_DSP4_INPUT_LEFT, SB_DSP4_INPUT_RIGHT, left_shift, right_shift, 1, 0, 1), \ } static int snd_cmipci_info_input_sw(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN; uinfo->count = 4; uinfo->value.integer.min = 0; uinfo->value.integer.max = 1; return 0; } static int snd_cmipci_get_input_sw(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct cmipci *cm = snd_kcontrol_chip(kcontrol); struct cmipci_sb_reg reg; int val1, val2; cmipci_sb_reg_decode(&reg, kcontrol->private_value); spin_lock_irq(&cm->reg_lock); val1 = snd_cmipci_mixer_read(cm, reg.left_reg); val2 = snd_cmipci_mixer_read(cm, reg.right_reg); spin_unlock_irq(&cm->reg_lock); ucontrol->value.integer.value[0] = (val1 >> reg.left_shift) & 1; ucontrol->value.integer.value[1] = (val2 >> reg.left_shift) & 1; ucontrol->value.integer.value[2] = (val1 >> reg.right_shift) & 1; ucontrol->value.integer.value[3] = (val2 >> reg.right_shift) & 1; return 0; } static int snd_cmipci_put_input_sw(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct cmipci *cm = snd_kcontrol_chip(kcontrol); struct cmipci_sb_reg reg; int change; int val1, val2, oval1, oval2; cmipci_sb_reg_decode(&reg, kcontrol->private_value); spin_lock_irq(&cm->reg_lock); oval1 = snd_cmipci_mixer_read(cm, reg.left_reg); oval2 = snd_cmipci_mixer_read(cm, reg.right_reg); val1 = oval1 & ~((1 << reg.left_shift) | (1 << reg.right_shift)); val2 = oval2 & ~((1 << reg.left_shift) | (1 << reg.right_shift)); val1 |= (ucontrol->value.integer.value[0] & 1) << reg.left_shift; val2 |= (ucontrol->value.integer.value[1] & 1) << reg.left_shift; val1 |= (ucontrol->value.integer.value[2] & 1) << reg.right_shift; val2 |= (ucontrol->value.integer.value[3] & 1) << reg.right_shift; change = val1 != oval1 || val2 != oval2; snd_cmipci_mixer_write(cm, reg.left_reg, val1); snd_cmipci_mixer_write(cm, reg.right_reg, val2); spin_unlock_irq(&cm->reg_lock); return change; } /* * native mixer switches/volumes */ #define CMIPCI_MIXER_SW_STEREO(xname, reg, lshift, rshift, invert) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \ .info = snd_cmipci_info_native_mixer, \ .get = snd_cmipci_get_native_mixer, .put = snd_cmipci_put_native_mixer, \ .private_value = COMPOSE_SB_REG(reg, reg, lshift, rshift, 1, invert, 1), \ } #define CMIPCI_MIXER_SW_MONO(xname, reg, shift, invert) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \ .info = snd_cmipci_info_native_mixer, \ .get = snd_cmipci_get_native_mixer, .put = snd_cmipci_put_native_mixer, \ .private_value = COMPOSE_SB_REG(reg, reg, shift, shift, 1, invert, 0), \ } #define CMIPCI_MIXER_VOL_STEREO(xname, reg, lshift, rshift, mask) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \ .info = snd_cmipci_info_native_mixer, \ .get = snd_cmipci_get_native_mixer, .put = snd_cmipci_put_native_mixer, \ .private_value = COMPOSE_SB_REG(reg, reg, lshift, rshift, mask, 0, 1), \ } #define CMIPCI_MIXER_VOL_MONO(xname, reg, shift, mask) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \ .info = snd_cmipci_info_native_mixer, \ .get = snd_cmipci_get_native_mixer, .put = snd_cmipci_put_native_mixer, \ .private_value = COMPOSE_SB_REG(reg, reg, shift, shift, mask, 0, 0), \ } static int snd_cmipci_info_native_mixer(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct cmipci_sb_reg reg; cmipci_sb_reg_decode(&reg, kcontrol->private_value); uinfo->type = reg.mask == 1 ? SNDRV_CTL_ELEM_TYPE_BOOLEAN : SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = reg.stereo + 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = reg.mask; return 0; } static int snd_cmipci_get_native_mixer(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct cmipci *cm = snd_kcontrol_chip(kcontrol); struct cmipci_sb_reg reg; unsigned char oreg, val; cmipci_sb_reg_decode(&reg, kcontrol->private_value); spin_lock_irq(&cm->reg_lock); oreg = inb(cm->iobase + reg.left_reg); val = (oreg >> reg.left_shift) & reg.mask; if (reg.invert) val = reg.mask - val; ucontrol->value.integer.value[0] = val; if (reg.stereo) { val = (oreg >> reg.right_shift) & reg.mask; if (reg.invert) val = reg.mask - val; ucontrol->value.integer.value[1] = val; } spin_unlock_irq(&cm->reg_lock); return 0; } static int snd_cmipci_put_native_mixer(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct cmipci *cm = snd_kcontrol_chip(kcontrol); struct cmipci_sb_reg reg; unsigned char oreg, nreg, val; cmipci_sb_reg_decode(&reg, kcontrol->private_value); spin_lock_irq(&cm->reg_lock); oreg = inb(cm->iobase + reg.left_reg); val = ucontrol->value.integer.value[0] & reg.mask; if (reg.invert) val = reg.mask - val; nreg = oreg & ~(reg.mask << reg.left_shift); nreg |= (val << reg.left_shift); if (reg.stereo) { val = ucontrol->value.integer.value[1] & reg.mask; if (reg.invert) val = reg.mask - val; nreg &= ~(reg.mask << reg.right_shift); nreg |= (val << reg.right_shift); } outb(nreg, cm->iobase + reg.left_reg); spin_unlock_irq(&cm->reg_lock); return (nreg != oreg); } /* * special case - check mixer sensitivity */ static int snd_cmipci_get_native_mixer_sensitive(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { //struct cmipci *cm = snd_kcontrol_chip(kcontrol); return snd_cmipci_get_native_mixer(kcontrol, ucontrol); } static int snd_cmipci_put_native_mixer_sensitive(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct cmipci *cm = snd_kcontrol_chip(kcontrol); if (cm->mixer_insensitive) { /* ignored */ return 0; } return snd_cmipci_put_native_mixer(kcontrol, ucontrol); } static struct snd_kcontrol_new snd_cmipci_mixers[] __devinitdata = { CMIPCI_SB_VOL_STEREO("Master Playback Volume", SB_DSP4_MASTER_DEV, 3, 31), CMIPCI_MIXER_SW_MONO("3D Control - Switch", CM_REG_MIXER1, CM_X3DEN_SHIFT, 0), CMIPCI_SB_VOL_STEREO("PCM Playback Volume", SB_DSP4_PCM_DEV, 3, 31), //CMIPCI_MIXER_SW_MONO("PCM Playback Switch", CM_REG_MIXER1, CM_WSMUTE_SHIFT, 1), { /* switch with sensitivity */ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "PCM Playback Switch", .info = snd_cmipci_info_native_mixer, .get = snd_cmipci_get_native_mixer_sensitive, .put = snd_cmipci_put_native_mixer_sensitive, .private_value = COMPOSE_SB_REG(CM_REG_MIXER1, CM_REG_MIXER1, CM_WSMUTE_SHIFT, CM_WSMUTE_SHIFT, 1, 1, 0), }, CMIPCI_MIXER_SW_STEREO("PCM Capture Switch", CM_REG_MIXER1, CM_WAVEINL_SHIFT, CM_WAVEINR_SHIFT, 0), CMIPCI_SB_VOL_STEREO("Synth Playback Volume", SB_DSP4_SYNTH_DEV, 3, 31), CMIPCI_MIXER_SW_MONO("Synth Playback Switch", CM_REG_MIXER1, CM_FMMUTE_SHIFT, 1), CMIPCI_SB_INPUT_SW("Synth Capture Route", 6, 5), CMIPCI_SB_VOL_STEREO("CD Playback Volume", SB_DSP4_CD_DEV, 3, 31), CMIPCI_SB_SW_STEREO("CD Playback Switch", 2, 1), CMIPCI_SB_INPUT_SW("CD Capture Route", 2, 1), CMIPCI_SB_VOL_STEREO("Line Playback Volume", SB_DSP4_LINE_DEV, 3, 31), CMIPCI_SB_SW_STEREO("Line Playback Switch", 4, 3), CMIPCI_SB_INPUT_SW("Line Capture Route", 4, 3), CMIPCI_SB_VOL_MONO("Mic Playback Volume", SB_DSP4_MIC_DEV, 3, 31), CMIPCI_SB_SW_MONO("Mic Playback Switch", 0), CMIPCI_DOUBLE("Mic Capture Switch", SB_DSP4_INPUT_LEFT, SB_DSP4_INPUT_RIGHT, 0, 0, 1, 0, 0), CMIPCI_SB_VOL_MONO("Beep Playback Volume", SB_DSP4_SPEAKER_DEV, 6, 3), CMIPCI_MIXER_VOL_STEREO("Aux Playback Volume", CM_REG_AUX_VOL, 4, 0, 15), CMIPCI_MIXER_SW_STEREO("Aux Playback Switch", CM_REG_MIXER2, CM_VAUXLM_SHIFT, CM_VAUXRM_SHIFT, 0), CMIPCI_MIXER_SW_STEREO("Aux Capture Switch", CM_REG_MIXER2, CM_RAUXLEN_SHIFT, CM_RAUXREN_SHIFT, 0), CMIPCI_MIXER_SW_MONO("Mic Boost Playback Switch", CM_REG_MIXER2, CM_MICGAINZ_SHIFT, 1), CMIPCI_MIXER_VOL_MONO("Mic Capture Volume", CM_REG_MIXER2, CM_VADMIC_SHIFT, 7), CMIPCI_SB_VOL_MONO("Phone Playback Volume", CM_REG_EXTENT_IND, 5, 7), CMIPCI_DOUBLE("Phone Playback Switch", CM_REG_EXTENT_IND, CM_REG_EXTENT_IND, 4, 4, 1, 0, 0), CMIPCI_DOUBLE("Beep Playback Switch", CM_REG_EXTENT_IND, CM_REG_EXTENT_IND, 3, 3, 1, 0, 0), CMIPCI_DOUBLE("Mic Boost Capture Switch", CM_REG_EXTENT_IND, CM_REG_EXTENT_IND, 0, 0, 1, 0, 0), }; /* * other switches */ struct cmipci_switch_args { int reg; /* register index */ unsigned int mask; /* mask bits */ unsigned int mask_on; /* mask bits to turn on */ unsigned int is_byte: 1; /* byte access? */ unsigned int ac3_sensitive: 1; /* access forbidden during * non-audio operation? */ }; #define snd_cmipci_uswitch_info snd_ctl_boolean_mono_info static int _snd_cmipci_uswitch_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol, struct cmipci_switch_args *args) { unsigned int val; struct cmipci *cm = snd_kcontrol_chip(kcontrol); spin_lock_irq(&cm->reg_lock); if (args->ac3_sensitive && cm->mixer_insensitive) { ucontrol->value.integer.value[0] = 0; spin_unlock_irq(&cm->reg_lock); return 0; } if (args->is_byte) val = inb(cm->iobase + args->reg); else val = snd_cmipci_read(cm, args->reg); ucontrol->value.integer.value[0] = ((val & args->mask) == args->mask_on) ? 1 : 0; spin_unlock_irq(&cm->reg_lock); return 0; } static int snd_cmipci_uswitch_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct cmipci_switch_args *args; args = (struct cmipci_switch_args *)kcontrol->private_value; if (snd_BUG_ON(!args)) return -EINVAL; return _snd_cmipci_uswitch_get(kcontrol, ucontrol, args); } static int _snd_cmipci_uswitch_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol, struct cmipci_switch_args *args) { unsigned int val; int change; struct cmipci *cm = snd_kcontrol_chip(kcontrol); spin_lock_irq(&cm->reg_lock); if (args->ac3_sensitive && cm->mixer_insensitive) { /* ignored */ spin_unlock_irq(&cm->reg_lock); return 0; } if (args->is_byte) val = inb(cm->iobase + args->reg); else val = snd_cmipci_read(cm, args->reg); change = (val & args->mask) != (ucontrol->value.integer.value[0] ? args->mask_on : (args->mask & ~args->mask_on)); if (change) { val &= ~args->mask; if (ucontrol->value.integer.value[0]) val |= args->mask_on; else val |= (args->mask & ~args->mask_on); if (args->is_byte) outb((unsigned char)val, cm->iobase + args->reg); else snd_cmipci_write(cm, args->reg, val); } spin_unlock_irq(&cm->reg_lock); return change; } static int snd_cmipci_uswitch_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct cmipci_switch_args *args; args = (struct cmipci_switch_args *)kcontrol->private_value; if (snd_BUG_ON(!args)) return -EINVAL; return _snd_cmipci_uswitch_put(kcontrol, ucontrol, args); } #define DEFINE_SWITCH_ARG(sname, xreg, xmask, xmask_on, xis_byte, xac3) \ static struct cmipci_switch_args cmipci_switch_arg_##sname = { \ .reg = xreg, \ .mask = xmask, \ .mask_on = xmask_on, \ .is_byte = xis_byte, \ .ac3_sensitive = xac3, \ } #define DEFINE_BIT_SWITCH_ARG(sname, xreg, xmask, xis_byte, xac3) \ DEFINE_SWITCH_ARG(sname, xreg, xmask, xmask, xis_byte, xac3) #if 0 /* these will be controlled in pcm device */ DEFINE_BIT_SWITCH_ARG(spdif_in, CM_REG_FUNCTRL1, CM_SPDF_1, 0, 0); DEFINE_BIT_SWITCH_ARG(spdif_out, CM_REG_FUNCTRL1, CM_SPDF_0, 0, 0); #endif DEFINE_BIT_SWITCH_ARG(spdif_in_sel1, CM_REG_CHFORMAT, CM_SPDIF_SELECT1, 0, 0); DEFINE_BIT_SWITCH_ARG(spdif_in_sel2, CM_REG_MISC_CTRL, CM_SPDIF_SELECT2, 0, 0); DEFINE_BIT_SWITCH_ARG(spdif_enable, CM_REG_LEGACY_CTRL, CM_ENSPDOUT, 0, 0); DEFINE_BIT_SWITCH_ARG(spdo2dac, CM_REG_FUNCTRL1, CM_SPDO2DAC, 0, 1); DEFINE_BIT_SWITCH_ARG(spdi_valid, CM_REG_MISC, CM_SPDVALID, 1, 0); DEFINE_BIT_SWITCH_ARG(spdif_copyright, CM_REG_LEGACY_CTRL, CM_SPDCOPYRHT, 0, 0); DEFINE_BIT_SWITCH_ARG(spdif_dac_out, CM_REG_LEGACY_CTRL, CM_DAC2SPDO, 0, 1); DEFINE_SWITCH_ARG(spdo_5v, CM_REG_MISC_CTRL, CM_SPDO5V, 0, 0, 0); /* inverse: 0 = 5V */ // DEFINE_BIT_SWITCH_ARG(spdo_48k, CM_REG_MISC_CTRL, CM_SPDF_AC97|CM_SPDIF48K, 0, 1); DEFINE_BIT_SWITCH_ARG(spdif_loop, CM_REG_FUNCTRL1, CM_SPDFLOOP, 0, 1); DEFINE_BIT_SWITCH_ARG(spdi_monitor, CM_REG_MIXER1, CM_CDPLAY, 1, 0); /* DEFINE_BIT_SWITCH_ARG(spdi_phase, CM_REG_CHFORMAT, CM_SPDIF_INVERSE, 0, 0); */ DEFINE_BIT_SWITCH_ARG(spdi_phase, CM_REG_MISC, CM_SPDIF_INVERSE, 1, 0); DEFINE_BIT_SWITCH_ARG(spdi_phase2, CM_REG_CHFORMAT, CM_SPDIF_INVERSE2, 0, 0); #if CM_CH_PLAY == 1 DEFINE_SWITCH_ARG(exchange_dac, CM_REG_MISC_CTRL, CM_XCHGDAC, 0, 0, 0); /* reversed */ #else DEFINE_SWITCH_ARG(exchange_dac, CM_REG_MISC_CTRL, CM_XCHGDAC, CM_XCHGDAC, 0, 0); #endif DEFINE_BIT_SWITCH_ARG(fourch, CM_REG_MISC_CTRL, CM_N4SPK3D, 0, 0); // DEFINE_BIT_SWITCH_ARG(line_rear, CM_REG_MIXER1, CM_REAR2LIN, 1, 0); // DEFINE_BIT_SWITCH_ARG(line_bass, CM_REG_LEGACY_CTRL, CM_CENTR2LIN|CM_BASE2LIN, 0, 0); // DEFINE_BIT_SWITCH_ARG(joystick, CM_REG_FUNCTRL1, CM_JYSTK_EN, 0, 0); /* now module option */ DEFINE_SWITCH_ARG(modem, CM_REG_MISC_CTRL, CM_FLINKON|CM_FLINKOFF, CM_FLINKON, 0, 0); #define DEFINE_SWITCH(sname, stype, sarg) \ { .name = sname, \ .iface = stype, \ .info = snd_cmipci_uswitch_info, \ .get = snd_cmipci_uswitch_get, \ .put = snd_cmipci_uswitch_put, \ .private_value = (unsigned long)&cmipci_switch_arg_##sarg,\ } #define DEFINE_CARD_SWITCH(sname, sarg) DEFINE_SWITCH(sname, SNDRV_CTL_ELEM_IFACE_CARD, sarg) #define DEFINE_MIXER_SWITCH(sname, sarg) DEFINE_SWITCH(sname, SNDRV_CTL_ELEM_IFACE_MIXER, sarg) /* * callbacks for spdif output switch * needs toggle two registers.. */ static int snd_cmipci_spdout_enable_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { int changed; changed = _snd_cmipci_uswitch_get(kcontrol, ucontrol, &cmipci_switch_arg_spdif_enable); changed |= _snd_cmipci_uswitch_get(kcontrol, ucontrol, &cmipci_switch_arg_spdo2dac); return changed; } static int snd_cmipci_spdout_enable_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct cmipci *chip = snd_kcontrol_chip(kcontrol); int changed; changed = _snd_cmipci_uswitch_put(kcontrol, ucontrol, &cmipci_switch_arg_spdif_enable); changed |= _snd_cmipci_uswitch_put(kcontrol, ucontrol, &cmipci_switch_arg_spdo2dac); if (changed) { if (ucontrol->value.integer.value[0]) { if (chip->spdif_playback_avail) snd_cmipci_set_bit(chip, CM_REG_FUNCTRL1, CM_PLAYBACK_SPDF); } else { if (chip->spdif_playback_avail) snd_cmipci_clear_bit(chip, CM_REG_FUNCTRL1, CM_PLAYBACK_SPDF); } } chip->spdif_playback_enabled = ucontrol->value.integer.value[0]; return changed; } static int snd_cmipci_line_in_mode_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct cmipci *cm = snd_kcontrol_chip(kcontrol); static const char *const texts[3] = { "Line-In", "Rear Output", "Bass Output" }; return snd_ctl_enum_info(uinfo, 1, cm->chip_version >= 39 ? 3 : 2, texts); } static inline unsigned int get_line_in_mode(struct cmipci *cm) { unsigned int val; if (cm->chip_version >= 39) { val = snd_cmipci_read(cm, CM_REG_LEGACY_CTRL); if (val & (CM_CENTR2LIN | CM_BASE2LIN)) return 2; } val = snd_cmipci_read_b(cm, CM_REG_MIXER1); if (val & CM_REAR2LIN) return 1; return 0; } static int snd_cmipci_line_in_mode_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct cmipci *cm = snd_kcontrol_chip(kcontrol); spin_lock_irq(&cm->reg_lock); ucontrol->value.enumerated.item[0] = get_line_in_mode(cm); spin_unlock_irq(&cm->reg_lock); return 0; } static int snd_cmipci_line_in_mode_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct cmipci *cm = snd_kcontrol_chip(kcontrol); int change; spin_lock_irq(&cm->reg_lock); if (ucontrol->value.enumerated.item[0] == 2) change = snd_cmipci_set_bit(cm, CM_REG_LEGACY_CTRL, CM_CENTR2LIN | CM_BASE2LIN); else change = snd_cmipci_clear_bit(cm, CM_REG_LEGACY_CTRL, CM_CENTR2LIN | CM_BASE2LIN); if (ucontrol->value.enumerated.item[0] == 1) change |= snd_cmipci_set_bit_b(cm, CM_REG_MIXER1, CM_REAR2LIN); else change |= snd_cmipci_clear_bit_b(cm, CM_REG_MIXER1, CM_REAR2LIN); spin_unlock_irq(&cm->reg_lock); return change; } static int snd_cmipci_mic_in_mode_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { static const char *const texts[2] = { "Mic-In", "Center/LFE Output" }; return snd_ctl_enum_info(uinfo, 1, 2, texts); } static int snd_cmipci_mic_in_mode_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct cmipci *cm = snd_kcontrol_chip(kcontrol); /* same bit as spdi_phase */ spin_lock_irq(&cm->reg_lock); ucontrol->value.enumerated.item[0] = (snd_cmipci_read_b(cm, CM_REG_MISC) & CM_SPDIF_INVERSE) ? 1 : 0; spin_unlock_irq(&cm->reg_lock); return 0; } static int snd_cmipci_mic_in_mode_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct cmipci *cm = snd_kcontrol_chip(kcontrol); int change; spin_lock_irq(&cm->reg_lock); if (ucontrol->value.enumerated.item[0]) change = snd_cmipci_set_bit_b(cm, CM_REG_MISC, CM_SPDIF_INVERSE); else change = snd_cmipci_clear_bit_b(cm, CM_REG_MISC, CM_SPDIF_INVERSE); spin_unlock_irq(&cm->reg_lock); return change; } /* both for CM8338/8738 */ static struct snd_kcontrol_new snd_cmipci_mixer_switches[] __devinitdata = { DEFINE_MIXER_SWITCH("Four Channel Mode", fourch), { .name = "Line-In Mode", .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .info = snd_cmipci_line_in_mode_info, .get = snd_cmipci_line_in_mode_get, .put = snd_cmipci_line_in_mode_put, }, }; /* for non-multichannel chips */ static struct snd_kcontrol_new snd_cmipci_nomulti_switch __devinitdata = DEFINE_MIXER_SWITCH("Exchange DAC", exchange_dac); /* only for CM8738 */ static struct snd_kcontrol_new snd_cmipci_8738_mixer_switches[] __devinitdata = { #if 0 /* controlled in pcm device */ DEFINE_MIXER_SWITCH("IEC958 In Record", spdif_in), DEFINE_MIXER_SWITCH("IEC958 Out", spdif_out), DEFINE_MIXER_SWITCH("IEC958 Out To DAC", spdo2dac), #endif // DEFINE_MIXER_SWITCH("IEC958 Output Switch", spdif_enable), { .name = "IEC958 Output Switch", .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .info = snd_cmipci_uswitch_info, .get = snd_cmipci_spdout_enable_get, .put = snd_cmipci_spdout_enable_put, }, DEFINE_MIXER_SWITCH("IEC958 In Valid", spdi_valid), DEFINE_MIXER_SWITCH("IEC958 Copyright", spdif_copyright), DEFINE_MIXER_SWITCH("IEC958 5V", spdo_5v), // DEFINE_MIXER_SWITCH("IEC958 In/Out 48KHz", spdo_48k), DEFINE_MIXER_SWITCH("IEC958 Loop", spdif_loop), DEFINE_MIXER_SWITCH("IEC958 In Monitor", spdi_monitor), }; /* only for model 033/037 */ static struct snd_kcontrol_new snd_cmipci_old_mixer_switches[] __devinitdata = { DEFINE_MIXER_SWITCH("IEC958 Mix Analog", spdif_dac_out), DEFINE_MIXER_SWITCH("IEC958 In Phase Inverse", spdi_phase), DEFINE_MIXER_SWITCH("IEC958 In Select", spdif_in_sel1), }; /* only for model 039 or later */ static struct snd_kcontrol_new snd_cmipci_extra_mixer_switches[] __devinitdata = { DEFINE_MIXER_SWITCH("IEC958 In Select", spdif_in_sel2), DEFINE_MIXER_SWITCH("IEC958 In Phase Inverse", spdi_phase2), { .name = "Mic-In Mode", .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .info = snd_cmipci_mic_in_mode_info, .get = snd_cmipci_mic_in_mode_get, .put = snd_cmipci_mic_in_mode_put, } }; /* card control switches */ static struct snd_kcontrol_new snd_cmipci_modem_switch __devinitdata = DEFINE_CARD_SWITCH("Modem", modem); static int __devinit snd_cmipci_mixer_new(struct cmipci *cm, int pcm_spdif_device) { struct snd_card *card; struct snd_kcontrol_new *sw; struct snd_kcontrol *kctl; unsigned int idx; int err; if (snd_BUG_ON(!cm || !cm->card)) return -EINVAL; card = cm->card; strcpy(card->mixername, "CMedia PCI"); spin_lock_irq(&cm->reg_lock); snd_cmipci_mixer_write(cm, 0x00, 0x00); /* mixer reset */ spin_unlock_irq(&cm->reg_lock); for (idx = 0; idx < ARRAY_SIZE(snd_cmipci_mixers); idx++) { if (cm->chip_version == 68) { // 8768 has no PCM volume if (!strcmp(snd_cmipci_mixers[idx].name, "PCM Playback Volume")) continue; } if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_cmipci_mixers[idx], cm))) < 0) return err; } /* mixer switches */ sw = snd_cmipci_mixer_switches; for (idx = 0; idx < ARRAY_SIZE(snd_cmipci_mixer_switches); idx++, sw++) { err = snd_ctl_add(cm->card, snd_ctl_new1(sw, cm)); if (err < 0) return err; } if (! cm->can_multi_ch) { err = snd_ctl_add(cm->card, snd_ctl_new1(&snd_cmipci_nomulti_switch, cm)); if (err < 0) return err; } if (cm->device == PCI_DEVICE_ID_CMEDIA_CM8738 || cm->device == PCI_DEVICE_ID_CMEDIA_CM8738B) { sw = snd_cmipci_8738_mixer_switches; for (idx = 0; idx < ARRAY_SIZE(snd_cmipci_8738_mixer_switches); idx++, sw++) { err = snd_ctl_add(cm->card, snd_ctl_new1(sw, cm)); if (err < 0) return err; } if (cm->can_ac3_hw) { if ((err = snd_ctl_add(card, kctl = snd_ctl_new1(&snd_cmipci_spdif_default, cm))) < 0) return err; kctl->id.device = pcm_spdif_device; if ((err = snd_ctl_add(card, kctl = snd_ctl_new1(&snd_cmipci_spdif_mask, cm))) < 0) return err; kctl->id.device = pcm_spdif_device; if ((err = snd_ctl_add(card, kctl = snd_ctl_new1(&snd_cmipci_spdif_stream, cm))) < 0) return err; kctl->id.device = pcm_spdif_device; } if (cm->chip_version <= 37) { sw = snd_cmipci_old_mixer_switches; for (idx = 0; idx < ARRAY_SIZE(snd_cmipci_old_mixer_switches); idx++, sw++) { err = snd_ctl_add(cm->card, snd_ctl_new1(sw, cm)); if (err < 0) return err; } } } if (cm->chip_version >= 39) { sw = snd_cmipci_extra_mixer_switches; for (idx = 0; idx < ARRAY_SIZE(snd_cmipci_extra_mixer_switches); idx++, sw++) { err = snd_ctl_add(cm->card, snd_ctl_new1(sw, cm)); if (err < 0) return err; } } /* card switches */ /* * newer chips don't have the register bits to force modem link * detection; the bit that was FLINKON now mutes CH1 */ if (cm->chip_version < 39) { err = snd_ctl_add(cm->card, snd_ctl_new1(&snd_cmipci_modem_switch, cm)); if (err < 0) return err; } for (idx = 0; idx < CM_SAVED_MIXERS; idx++) { struct snd_ctl_elem_id elem_id; struct snd_kcontrol *ctl; memset(&elem_id, 0, sizeof(elem_id)); elem_id.iface = SNDRV_CTL_ELEM_IFACE_MIXER; strcpy(elem_id.name, cm_saved_mixer[idx].name); ctl = snd_ctl_find_id(cm->card, &elem_id); if (ctl) cm->mixer_res_ctl[idx] = ctl; } return 0; } /* * proc interface */ #ifdef CONFIG_PROC_FS static void snd_cmipci_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct cmipci *cm = entry->private_data; int i, v; snd_iprintf(buffer, "%s\n", cm->card->longname); for (i = 0; i < 0x94; i++) { if (i == 0x28) i = 0x90; v = inb(cm->iobase + i); if (i % 4 == 0) snd_iprintf(buffer, "\n%02x:", i); snd_iprintf(buffer, " %02x", v); } snd_iprintf(buffer, "\n"); } static void __devinit snd_cmipci_proc_init(struct cmipci *cm) { struct snd_info_entry *entry; if (! snd_card_proc_new(cm->card, "cmipci", &entry)) snd_info_set_text_ops(entry, cm, snd_cmipci_proc_read); } #else /* !CONFIG_PROC_FS */ static inline void snd_cmipci_proc_init(struct cmipci *cm) {} #endif static DEFINE_PCI_DEVICE_TABLE(snd_cmipci_ids) = { {PCI_VDEVICE(CMEDIA, PCI_DEVICE_ID_CMEDIA_CM8338A), 0}, {PCI_VDEVICE(CMEDIA, PCI_DEVICE_ID_CMEDIA_CM8338B), 0}, {PCI_VDEVICE(CMEDIA, PCI_DEVICE_ID_CMEDIA_CM8738), 0}, {PCI_VDEVICE(CMEDIA, PCI_DEVICE_ID_CMEDIA_CM8738B), 0}, {PCI_VDEVICE(AL, PCI_DEVICE_ID_CMEDIA_CM8738), 0}, {0,}, }; /* * check chip version and capabilities * driver name is modified according to the chip model */ static void __devinit query_chip(struct cmipci *cm) { unsigned int detect; /* check reg 0Ch, bit 24-31 */ detect = snd_cmipci_read(cm, CM_REG_INT_HLDCLR) & CM_CHIP_MASK2; if (! detect) { /* check reg 08h, bit 24-28 */ detect = snd_cmipci_read(cm, CM_REG_CHFORMAT) & CM_CHIP_MASK1; switch (detect) { case 0: cm->chip_version = 33; if (cm->do_soft_ac3) cm->can_ac3_sw = 1; else cm->can_ac3_hw = 1; break; case CM_CHIP_037: cm->chip_version = 37; cm->can_ac3_hw = 1; break; default: cm->chip_version = 39; cm->can_ac3_hw = 1; break; } cm->max_channels = 2; } else { if (detect & CM_CHIP_039) { cm->chip_version = 39; if (detect & CM_CHIP_039_6CH) /* 4 or 6 channels */ cm->max_channels = 6; else cm->max_channels = 4; } else if (detect & CM_CHIP_8768) { cm->chip_version = 68; cm->max_channels = 8; cm->can_96k = 1; } else { cm->chip_version = 55; cm->max_channels = 6; cm->can_96k = 1; } cm->can_ac3_hw = 1; cm->can_multi_ch = 1; } } #ifdef SUPPORT_JOYSTICK static int __devinit snd_cmipci_create_gameport(struct cmipci *cm, int dev) { static int ports[] = { 0x201, 0x200, 0 }; /* FIXME: majority is 0x201? */ struct gameport *gp; struct resource *r = NULL; int i, io_port = 0; if (joystick_port[dev] == 0) return -ENODEV; if (joystick_port[dev] == 1) { /* auto-detect */ for (i = 0; ports[i]; i++) { io_port = ports[i]; r = request_region(io_port, 1, "CMIPCI gameport"); if (r) break; } } else { io_port = joystick_port[dev]; r = request_region(io_port, 1, "CMIPCI gameport"); } if (!r) { printk(KERN_WARNING "cmipci: cannot reserve joystick ports\n"); return -EBUSY; } cm->gameport = gp = gameport_allocate_port(); if (!gp) { printk(KERN_ERR "cmipci: cannot allocate memory for gameport\n"); release_and_free_resource(r); return -ENOMEM; } gameport_set_name(gp, "C-Media Gameport"); gameport_set_phys(gp, "pci%s/gameport0", pci_name(cm->pci)); gameport_set_dev_parent(gp, &cm->pci->dev); gp->io = io_port; gameport_set_port_data(gp, r); snd_cmipci_set_bit(cm, CM_REG_FUNCTRL1, CM_JYSTK_EN); gameport_register_port(cm->gameport); return 0; } static void snd_cmipci_free_gameport(struct cmipci *cm) { if (cm->gameport) { struct resource *r = gameport_get_port_data(cm->gameport); gameport_unregister_port(cm->gameport); cm->gameport = NULL; snd_cmipci_clear_bit(cm, CM_REG_FUNCTRL1, CM_JYSTK_EN); release_and_free_resource(r); } } #else static inline int snd_cmipci_create_gameport(struct cmipci *cm, int dev) { return -ENOSYS; } static inline void snd_cmipci_free_gameport(struct cmipci *cm) { } #endif static int snd_cmipci_free(struct cmipci *cm) { if (cm->irq >= 0) { snd_cmipci_clear_bit(cm, CM_REG_MISC_CTRL, CM_FM_EN); snd_cmipci_clear_bit(cm, CM_REG_LEGACY_CTRL, CM_ENSPDOUT); snd_cmipci_write(cm, CM_REG_INT_HLDCLR, 0); /* disable ints */ snd_cmipci_ch_reset(cm, CM_CH_PLAY); snd_cmipci_ch_reset(cm, CM_CH_CAPT); snd_cmipci_write(cm, CM_REG_FUNCTRL0, 0); /* disable channels */ snd_cmipci_write(cm, CM_REG_FUNCTRL1, 0); /* reset mixer */ snd_cmipci_mixer_write(cm, 0, 0); free_irq(cm->irq, cm); } snd_cmipci_free_gameport(cm); pci_release_regions(cm->pci); pci_disable_device(cm->pci); kfree(cm); return 0; } static int snd_cmipci_dev_free(struct snd_device *device) { struct cmipci *cm = device->device_data; return snd_cmipci_free(cm); } static int __devinit snd_cmipci_create_fm(struct cmipci *cm, long fm_port) { long iosynth; unsigned int val; struct snd_opl3 *opl3; int err; if (!fm_port) goto disable_fm; if (cm->chip_version >= 39) { /* first try FM regs in PCI port range */ iosynth = cm->iobase + CM_REG_FM_PCI; err = snd_opl3_create(cm->card, iosynth, iosynth + 2, OPL3_HW_OPL3, 1, &opl3); } else { err = -EIO; } if (err < 0) { /* then try legacy ports */ val = snd_cmipci_read(cm, CM_REG_LEGACY_CTRL) & ~CM_FMSEL_MASK; iosynth = fm_port; switch (iosynth) { case 0x3E8: val |= CM_FMSEL_3E8; break; case 0x3E0: val |= CM_FMSEL_3E0; break; case 0x3C8: val |= CM_FMSEL_3C8; break; case 0x388: val |= CM_FMSEL_388; break; default: goto disable_fm; } snd_cmipci_write(cm, CM_REG_LEGACY_CTRL, val); /* enable FM */ snd_cmipci_set_bit(cm, CM_REG_MISC_CTRL, CM_FM_EN); if (snd_opl3_create(cm->card, iosynth, iosynth + 2, OPL3_HW_OPL3, 0, &opl3) < 0) { printk(KERN_ERR "cmipci: no OPL device at %#lx, " "skipping...\n", iosynth); goto disable_fm; } } if ((err = snd_opl3_hwdep_new(opl3, 0, 1, NULL)) < 0) { printk(KERN_ERR "cmipci: cannot create OPL3 hwdep\n"); return err; } return 0; disable_fm: snd_cmipci_clear_bit(cm, CM_REG_LEGACY_CTRL, CM_FMSEL_MASK); snd_cmipci_clear_bit(cm, CM_REG_MISC_CTRL, CM_FM_EN); return 0; } static int __devinit snd_cmipci_create(struct snd_card *card, struct pci_dev *pci, int dev, struct cmipci **rcmipci) { struct cmipci *cm; int err; static struct snd_device_ops ops = { .dev_free = snd_cmipci_dev_free, }; unsigned int val; long iomidi = 0; int integrated_midi = 0; char modelstr[16]; int pcm_index, pcm_spdif_index; static DEFINE_PCI_DEVICE_TABLE(intel_82437vx) = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437VX) }, { }, }; *rcmipci = NULL; if ((err = pci_enable_device(pci)) < 0) return err; cm = kzalloc(sizeof(*cm), GFP_KERNEL); if (cm == NULL) { pci_disable_device(pci); return -ENOMEM; } spin_lock_init(&cm->reg_lock); mutex_init(&cm->open_mutex); cm->device = pci->device; cm->card = card; cm->pci = pci; cm->irq = -1; cm->channel[0].ch = 0; cm->channel[1].ch = 1; cm->channel[0].is_dac = cm->channel[1].is_dac = 1; /* dual DAC mode */ if ((err = pci_request_regions(pci, card->driver)) < 0) { kfree(cm); pci_disable_device(pci); return err; } cm->iobase = pci_resource_start(pci, 0); if (request_irq(pci->irq, snd_cmipci_interrupt, IRQF_SHARED, KBUILD_MODNAME, cm)) { snd_printk(KERN_ERR "unable to grab IRQ %d\n", pci->irq); snd_cmipci_free(cm); return -EBUSY; } cm->irq = pci->irq; pci_set_master(cm->pci); /* * check chip version, max channels and capabilities */ cm->chip_version = 0; cm->max_channels = 2; cm->do_soft_ac3 = soft_ac3[dev]; if (pci->device != PCI_DEVICE_ID_CMEDIA_CM8338A && pci->device != PCI_DEVICE_ID_CMEDIA_CM8338B) query_chip(cm); /* added -MCx suffix for chip supporting multi-channels */ if (cm->can_multi_ch) sprintf(cm->card->driver + strlen(cm->card->driver), "-MC%d", cm->max_channels); else if (cm->can_ac3_sw) strcpy(cm->card->driver + strlen(cm->card->driver), "-SWIEC"); cm->dig_status = SNDRV_PCM_DEFAULT_CON_SPDIF; cm->dig_pcm_status = SNDRV_PCM_DEFAULT_CON_SPDIF; #if CM_CH_PLAY == 1 cm->ctrl = CM_CHADC0; /* default FUNCNTRL0 */ #else cm->ctrl = CM_CHADC1; /* default FUNCNTRL0 */ #endif /* initialize codec registers */ snd_cmipci_set_bit(cm, CM_REG_MISC_CTRL, CM_RESET); snd_cmipci_clear_bit(cm, CM_REG_MISC_CTRL, CM_RESET); snd_cmipci_write(cm, CM_REG_INT_HLDCLR, 0); /* disable ints */ snd_cmipci_ch_reset(cm, CM_CH_PLAY); snd_cmipci_ch_reset(cm, CM_CH_CAPT); snd_cmipci_write(cm, CM_REG_FUNCTRL0, 0); /* disable channels */ snd_cmipci_write(cm, CM_REG_FUNCTRL1, 0); snd_cmipci_write(cm, CM_REG_CHFORMAT, 0); snd_cmipci_set_bit(cm, CM_REG_MISC_CTRL, CM_ENDBDAC|CM_N4SPK3D); #if CM_CH_PLAY == 1 snd_cmipci_set_bit(cm, CM_REG_MISC_CTRL, CM_XCHGDAC); #else snd_cmipci_clear_bit(cm, CM_REG_MISC_CTRL, CM_XCHGDAC); #endif if (cm->chip_version) { snd_cmipci_write_b(cm, CM_REG_EXT_MISC, 0x20); /* magic */ snd_cmipci_write_b(cm, CM_REG_EXT_MISC + 1, 0x09); /* more magic */ } /* Set Bus Master Request */ snd_cmipci_set_bit(cm, CM_REG_FUNCTRL1, CM_BREQ); /* Assume TX and compatible chip set (Autodetection required for VX chip sets) */ switch (pci->device) { case PCI_DEVICE_ID_CMEDIA_CM8738: case PCI_DEVICE_ID_CMEDIA_CM8738B: if (!pci_dev_present(intel_82437vx)) snd_cmipci_set_bit(cm, CM_REG_MISC_CTRL, CM_TXVX); break; default: break; } if (cm->chip_version < 68) { val = pci->device < 0x110 ? 8338 : 8738; } else { switch (snd_cmipci_read_b(cm, CM_REG_INT_HLDCLR + 3) & 0x03) { case 0: val = 8769; break; case 2: val = 8762; break; default: switch ((pci->subsystem_vendor << 16) | pci->subsystem_device) { case 0x13f69761: case 0x584d3741: case 0x584d3751: case 0x584d3761: case 0x584d3771: case 0x72848384: val = 8770; break; default: val = 8768; break; } } } sprintf(card->shortname, "C-Media CMI%d", val); if (cm->chip_version < 68) sprintf(modelstr, " (model %d)", cm->chip_version); else modelstr[0] = '\0'; sprintf(card->longname, "%s%s at %#lx, irq %i", card->shortname, modelstr, cm->iobase, cm->irq); if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, cm, &ops)) < 0) { snd_cmipci_free(cm); return err; } if (cm->chip_version >= 39) { val = snd_cmipci_read_b(cm, CM_REG_MPU_PCI + 1); if (val != 0x00 && val != 0xff) { iomidi = cm->iobase + CM_REG_MPU_PCI; integrated_midi = 1; } } if (!integrated_midi) { val = 0; iomidi = mpu_port[dev]; switch (iomidi) { case 0x320: val = CM_VMPU_320; break; case 0x310: val = CM_VMPU_310; break; case 0x300: val = CM_VMPU_300; break; case 0x330: val = CM_VMPU_330; break; default: iomidi = 0; break; } if (iomidi > 0) { snd_cmipci_write(cm, CM_REG_LEGACY_CTRL, val); /* enable UART */ snd_cmipci_set_bit(cm, CM_REG_FUNCTRL1, CM_UART_EN); if (inb(iomidi + 1) == 0xff) { snd_printk(KERN_ERR "cannot enable MPU-401 port" " at %#lx\n", iomidi); snd_cmipci_clear_bit(cm, CM_REG_FUNCTRL1, CM_UART_EN); iomidi = 0; } } } if (cm->chip_version < 68) { err = snd_cmipci_create_fm(cm, fm_port[dev]); if (err < 0) return err; } /* reset mixer */ snd_cmipci_mixer_write(cm, 0, 0); snd_cmipci_proc_init(cm); /* create pcm devices */ pcm_index = pcm_spdif_index = 0; if ((err = snd_cmipci_pcm_new(cm, pcm_index)) < 0) return err; pcm_index++; if ((err = snd_cmipci_pcm2_new(cm, pcm_index)) < 0) return err; pcm_index++; if (cm->can_ac3_hw || cm->can_ac3_sw) { pcm_spdif_index = pcm_index; if ((err = snd_cmipci_pcm_spdif_new(cm, pcm_index)) < 0) return err; } /* create mixer interface & switches */ if ((err = snd_cmipci_mixer_new(cm, pcm_spdif_index)) < 0) return err; if (iomidi > 0) { if ((err = snd_mpu401_uart_new(card, 0, MPU401_HW_CMIPCI, iomidi, (integrated_midi ? MPU401_INFO_INTEGRATED : 0) | MPU401_INFO_IRQ_HOOK, -1, &cm->rmidi)) < 0) { printk(KERN_ERR "cmipci: no UART401 device at 0x%lx\n", iomidi); } } #ifdef USE_VAR48KRATE for (val = 0; val < ARRAY_SIZE(rates); val++) snd_cmipci_set_pll(cm, rates[val], val); /* * (Re-)Enable external switch spdo_48k */ snd_cmipci_set_bit(cm, CM_REG_MISC_CTRL, CM_SPDIF48K|CM_SPDF_AC97); #endif /* USE_VAR48KRATE */ if (snd_cmipci_create_gameport(cm, dev) < 0) snd_cmipci_clear_bit(cm, CM_REG_FUNCTRL1, CM_JYSTK_EN); snd_card_set_dev(card, &pci->dev); *rcmipci = cm; return 0; } /* */ MODULE_DEVICE_TABLE(pci, snd_cmipci_ids); static int __devinit snd_cmipci_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) { static int dev; struct snd_card *card; struct cmipci *cm; int err; if (dev >= SNDRV_CARDS) return -ENODEV; if (! enable[dev]) { dev++; return -ENOENT; } err = snd_card_create(index[dev], id[dev], THIS_MODULE, 0, &card); if (err < 0) return err; switch (pci->device) { case PCI_DEVICE_ID_CMEDIA_CM8738: case PCI_DEVICE_ID_CMEDIA_CM8738B: strcpy(card->driver, "CMI8738"); break; case PCI_DEVICE_ID_CMEDIA_CM8338A: case PCI_DEVICE_ID_CMEDIA_CM8338B: strcpy(card->driver, "CMI8338"); break; default: strcpy(card->driver, "CMIPCI"); break; } if ((err = snd_cmipci_create(card, pci, dev, &cm)) < 0) { snd_card_free(card); return err; } card->private_data = cm; if ((err = snd_card_register(card)) < 0) { snd_card_free(card); return err; } pci_set_drvdata(pci, card); dev++; return 0; } static void __devexit snd_cmipci_remove(struct pci_dev *pci) { snd_card_free(pci_get_drvdata(pci)); pci_set_drvdata(pci, NULL); } #ifdef CONFIG_PM /* * power management */ static unsigned char saved_regs[] = { CM_REG_FUNCTRL1, CM_REG_CHFORMAT, CM_REG_LEGACY_CTRL, CM_REG_MISC_CTRL, CM_REG_MIXER0, CM_REG_MIXER1, CM_REG_MIXER2, CM_REG_MIXER3, CM_REG_PLL, CM_REG_CH0_FRAME1, CM_REG_CH0_FRAME2, CM_REG_CH1_FRAME1, CM_REG_CH1_FRAME2, CM_REG_EXT_MISC, CM_REG_INT_STATUS, CM_REG_INT_HLDCLR, CM_REG_FUNCTRL0, }; static unsigned char saved_mixers[] = { SB_DSP4_MASTER_DEV, SB_DSP4_MASTER_DEV + 1, SB_DSP4_PCM_DEV, SB_DSP4_PCM_DEV + 1, SB_DSP4_SYNTH_DEV, SB_DSP4_SYNTH_DEV + 1, SB_DSP4_CD_DEV, SB_DSP4_CD_DEV + 1, SB_DSP4_LINE_DEV, SB_DSP4_LINE_DEV + 1, SB_DSP4_MIC_DEV, SB_DSP4_SPEAKER_DEV, CM_REG_EXTENT_IND, SB_DSP4_OUTPUT_SW, SB_DSP4_INPUT_LEFT, SB_DSP4_INPUT_RIGHT, }; static int snd_cmipci_suspend(struct pci_dev *pci, pm_message_t state) { struct snd_card *card = pci_get_drvdata(pci); struct cmipci *cm = card->private_data; int i; snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); snd_pcm_suspend_all(cm->pcm); snd_pcm_suspend_all(cm->pcm2); snd_pcm_suspend_all(cm->pcm_spdif); /* save registers */ for (i = 0; i < ARRAY_SIZE(saved_regs); i++) cm->saved_regs[i] = snd_cmipci_read(cm, saved_regs[i]); for (i = 0; i < ARRAY_SIZE(saved_mixers); i++) cm->saved_mixers[i] = snd_cmipci_mixer_read(cm, saved_mixers[i]); /* disable ints */ snd_cmipci_write(cm, CM_REG_INT_HLDCLR, 0); pci_disable_device(pci); pci_save_state(pci); pci_set_power_state(pci, pci_choose_state(pci, state)); return 0; } static int snd_cmipci_resume(struct pci_dev *pci) { struct snd_card *card = pci_get_drvdata(pci); struct cmipci *cm = card->private_data; int i; pci_set_power_state(pci, PCI_D0); pci_restore_state(pci); if (pci_enable_device(pci) < 0) { printk(KERN_ERR "cmipci: pci_enable_device failed, " "disabling device\n"); snd_card_disconnect(card); return -EIO; } pci_set_master(pci); /* reset / initialize to a sane state */ snd_cmipci_write(cm, CM_REG_INT_HLDCLR, 0); snd_cmipci_ch_reset(cm, CM_CH_PLAY); snd_cmipci_ch_reset(cm, CM_CH_CAPT); snd_cmipci_mixer_write(cm, 0, 0); /* restore registers */ for (i = 0; i < ARRAY_SIZE(saved_regs); i++) snd_cmipci_write(cm, saved_regs[i], cm->saved_regs[i]); for (i = 0; i < ARRAY_SIZE(saved_mixers); i++) snd_cmipci_mixer_write(cm, saved_mixers[i], cm->saved_mixers[i]); snd_power_change_state(card, SNDRV_CTL_POWER_D0); return 0; } #endif /* CONFIG_PM */ static struct pci_driver driver = { .name = KBUILD_MODNAME, .id_table = snd_cmipci_ids, .probe = snd_cmipci_probe, .remove = __devexit_p(snd_cmipci_remove), #ifdef CONFIG_PM .suspend = snd_cmipci_suspend, .resume = snd_cmipci_resume, #endif }; static int __init alsa_card_cmipci_init(void) { return pci_register_driver(&driver); } static void __exit alsa_card_cmipci_exit(void) { pci_unregister_driver(&driver); } module_init(alsa_card_cmipci_init) module_exit(alsa_card_cmipci_exit)
gpl-2.0
flar2/m7-GPE
drivers/net/wireless/ath/ath5k/eeprom.c
5150
49071
/* * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org> * Copyright (c) 2006-2009 Nick Kossifidis <mickflemm@gmail.com> * Copyright (c) 2008-2009 Felix Fietkau <nbd@openwrt.org> * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * */ /*************************************\ * EEPROM access functions and helpers * \*************************************/ #include <linux/slab.h> #include "ath5k.h" #include "reg.h" #include "debug.h" /******************\ * Helper functions * \******************/ /* * Translate binary channel representation in EEPROM to frequency */ static u16 ath5k_eeprom_bin2freq(struct ath5k_eeprom_info *ee, u16 bin, unsigned int mode) { u16 val; if (bin == AR5K_EEPROM_CHANNEL_DIS) return bin; if (mode == AR5K_EEPROM_MODE_11A) { if (ee->ee_version > AR5K_EEPROM_VERSION_3_2) val = (5 * bin) + 4800; else val = bin > 62 ? (10 * 62) + (5 * (bin - 62)) + 5100 : (bin * 10) + 5100; } else { if (ee->ee_version > AR5K_EEPROM_VERSION_3_2) val = bin + 2300; else val = bin + 2400; } return val; } /*********\ * Parsers * \*********/ /* * Initialize eeprom & capabilities structs */ static int ath5k_eeprom_init_header(struct ath5k_hw *ah) { struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; u16 val; u32 cksum, offset, eep_max = AR5K_EEPROM_INFO_MAX; /* * Read values from EEPROM and store them in the capability structure */ AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MAGIC, ee_magic); AR5K_EEPROM_READ_HDR(AR5K_EEPROM_PROTECT, ee_protect); AR5K_EEPROM_READ_HDR(AR5K_EEPROM_REG_DOMAIN, ee_regdomain); AR5K_EEPROM_READ_HDR(AR5K_EEPROM_VERSION, ee_version); AR5K_EEPROM_READ_HDR(AR5K_EEPROM_HDR, ee_header); /* Return if we have an old EEPROM */ if (ah->ah_ee_version < AR5K_EEPROM_VERSION_3_0) return 0; /* * Validate the checksum of the EEPROM date. There are some * devices with invalid EEPROMs. */ AR5K_EEPROM_READ(AR5K_EEPROM_SIZE_UPPER, val); if (val) { eep_max = (val & AR5K_EEPROM_SIZE_UPPER_MASK) << AR5K_EEPROM_SIZE_ENDLOC_SHIFT; AR5K_EEPROM_READ(AR5K_EEPROM_SIZE_LOWER, val); eep_max = (eep_max | val) - AR5K_EEPROM_INFO_BASE; /* * Fail safe check to prevent stupid loops due * to busted EEPROMs. XXX: This value is likely too * big still, waiting on a better value. */ if (eep_max > (3 * AR5K_EEPROM_INFO_MAX)) { ATH5K_ERR(ah, "Invalid max custom EEPROM size: " "%d (0x%04x) max expected: %d (0x%04x)\n", eep_max, eep_max, 3 * AR5K_EEPROM_INFO_MAX, 3 * AR5K_EEPROM_INFO_MAX); return -EIO; } } for (cksum = 0, offset = 0; offset < eep_max; offset++) { AR5K_EEPROM_READ(AR5K_EEPROM_INFO(offset), val); cksum ^= val; } if (cksum != AR5K_EEPROM_INFO_CKSUM) { ATH5K_ERR(ah, "Invalid EEPROM " "checksum: 0x%04x eep_max: 0x%04x (%s)\n", cksum, eep_max, eep_max == AR5K_EEPROM_INFO_MAX ? "default size" : "custom size"); return -EIO; } AR5K_EEPROM_READ_HDR(AR5K_EEPROM_ANT_GAIN(ah->ah_ee_version), ee_ant_gain); if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0) { AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC0, ee_misc0); AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC1, ee_misc1); /* XXX: Don't know which versions include these two */ AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC2, ee_misc2); if (ee->ee_version >= AR5K_EEPROM_VERSION_4_3) AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC3, ee_misc3); if (ee->ee_version >= AR5K_EEPROM_VERSION_5_0) { AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC4, ee_misc4); AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC5, ee_misc5); AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC6, ee_misc6); } } if (ah->ah_ee_version < AR5K_EEPROM_VERSION_3_3) { AR5K_EEPROM_READ(AR5K_EEPROM_OBDB0_2GHZ, val); ee->ee_ob[AR5K_EEPROM_MODE_11B][0] = val & 0x7; ee->ee_db[AR5K_EEPROM_MODE_11B][0] = (val >> 3) & 0x7; AR5K_EEPROM_READ(AR5K_EEPROM_OBDB1_2GHZ, val); ee->ee_ob[AR5K_EEPROM_MODE_11G][0] = val & 0x7; ee->ee_db[AR5K_EEPROM_MODE_11G][0] = (val >> 3) & 0x7; } AR5K_EEPROM_READ(AR5K_EEPROM_IS_HB63, val); if ((ah->ah_mac_version == (AR5K_SREV_AR2425 >> 4)) && val) ee->ee_is_hb63 = true; else ee->ee_is_hb63 = false; AR5K_EEPROM_READ(AR5K_EEPROM_RFKILL, val); ee->ee_rfkill_pin = (u8) AR5K_REG_MS(val, AR5K_EEPROM_RFKILL_GPIO_SEL); ee->ee_rfkill_pol = val & AR5K_EEPROM_RFKILL_POLARITY ? true : false; /* Check if PCIE_OFFSET points to PCIE_SERDES_SECTION * and enable serdes programming if needed. * * XXX: Serdes values seem to be fixed so * no need to read them here, we write them * during ath5k_hw_init */ AR5K_EEPROM_READ(AR5K_EEPROM_PCIE_OFFSET, val); ee->ee_serdes = (val == AR5K_EEPROM_PCIE_SERDES_SECTION) ? true : false; return 0; } /* * Read antenna infos from eeprom */ static int ath5k_eeprom_read_ants(struct ath5k_hw *ah, u32 *offset, unsigned int mode) { struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; u32 o = *offset; u16 val; int i = 0; AR5K_EEPROM_READ(o++, val); ee->ee_switch_settling[mode] = (val >> 8) & 0x7f; ee->ee_atn_tx_rx[mode] = (val >> 2) & 0x3f; ee->ee_ant_control[mode][i] = (val << 4) & 0x3f; AR5K_EEPROM_READ(o++, val); ee->ee_ant_control[mode][i++] |= (val >> 12) & 0xf; ee->ee_ant_control[mode][i++] = (val >> 6) & 0x3f; ee->ee_ant_control[mode][i++] = val & 0x3f; AR5K_EEPROM_READ(o++, val); ee->ee_ant_control[mode][i++] = (val >> 10) & 0x3f; ee->ee_ant_control[mode][i++] = (val >> 4) & 0x3f; ee->ee_ant_control[mode][i] = (val << 2) & 0x3f; AR5K_EEPROM_READ(o++, val); ee->ee_ant_control[mode][i++] |= (val >> 14) & 0x3; ee->ee_ant_control[mode][i++] = (val >> 8) & 0x3f; ee->ee_ant_control[mode][i++] = (val >> 2) & 0x3f; ee->ee_ant_control[mode][i] = (val << 4) & 0x3f; AR5K_EEPROM_READ(o++, val); ee->ee_ant_control[mode][i++] |= (val >> 12) & 0xf; ee->ee_ant_control[mode][i++] = (val >> 6) & 0x3f; ee->ee_ant_control[mode][i++] = val & 0x3f; /* Get antenna switch tables */ ah->ah_ant_ctl[mode][AR5K_ANT_CTL] = (ee->ee_ant_control[mode][0] << 4); ah->ah_ant_ctl[mode][AR5K_ANT_SWTABLE_A] = ee->ee_ant_control[mode][1] | (ee->ee_ant_control[mode][2] << 6) | (ee->ee_ant_control[mode][3] << 12) | (ee->ee_ant_control[mode][4] << 18) | (ee->ee_ant_control[mode][5] << 24); ah->ah_ant_ctl[mode][AR5K_ANT_SWTABLE_B] = ee->ee_ant_control[mode][6] | (ee->ee_ant_control[mode][7] << 6) | (ee->ee_ant_control[mode][8] << 12) | (ee->ee_ant_control[mode][9] << 18) | (ee->ee_ant_control[mode][10] << 24); /* return new offset */ *offset = o; return 0; } /* * Read supported modes and some mode-specific calibration data * from eeprom */ static int ath5k_eeprom_read_modes(struct ath5k_hw *ah, u32 *offset, unsigned int mode) { struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; u32 o = *offset; u16 val; ee->ee_n_piers[mode] = 0; AR5K_EEPROM_READ(o++, val); ee->ee_adc_desired_size[mode] = (s8)((val >> 8) & 0xff); switch (mode) { case AR5K_EEPROM_MODE_11A: ee->ee_ob[mode][3] = (val >> 5) & 0x7; ee->ee_db[mode][3] = (val >> 2) & 0x7; ee->ee_ob[mode][2] = (val << 1) & 0x7; AR5K_EEPROM_READ(o++, val); ee->ee_ob[mode][2] |= (val >> 15) & 0x1; ee->ee_db[mode][2] = (val >> 12) & 0x7; ee->ee_ob[mode][1] = (val >> 9) & 0x7; ee->ee_db[mode][1] = (val >> 6) & 0x7; ee->ee_ob[mode][0] = (val >> 3) & 0x7; ee->ee_db[mode][0] = val & 0x7; break; case AR5K_EEPROM_MODE_11G: case AR5K_EEPROM_MODE_11B: ee->ee_ob[mode][1] = (val >> 4) & 0x7; ee->ee_db[mode][1] = val & 0x7; break; } AR5K_EEPROM_READ(o++, val); ee->ee_tx_end2xlna_enable[mode] = (val >> 8) & 0xff; ee->ee_thr_62[mode] = val & 0xff; if (ah->ah_ee_version <= AR5K_EEPROM_VERSION_3_2) ee->ee_thr_62[mode] = mode == AR5K_EEPROM_MODE_11A ? 15 : 28; AR5K_EEPROM_READ(o++, val); ee->ee_tx_end2xpa_disable[mode] = (val >> 8) & 0xff; ee->ee_tx_frm2xpa_enable[mode] = val & 0xff; AR5K_EEPROM_READ(o++, val); ee->ee_pga_desired_size[mode] = (val >> 8) & 0xff; if ((val & 0xff) & 0x80) ee->ee_noise_floor_thr[mode] = -((((val & 0xff) ^ 0xff)) + 1); else ee->ee_noise_floor_thr[mode] = val & 0xff; if (ah->ah_ee_version <= AR5K_EEPROM_VERSION_3_2) ee->ee_noise_floor_thr[mode] = mode == AR5K_EEPROM_MODE_11A ? -54 : -1; AR5K_EEPROM_READ(o++, val); ee->ee_xlna_gain[mode] = (val >> 5) & 0xff; ee->ee_x_gain[mode] = (val >> 1) & 0xf; ee->ee_xpd[mode] = val & 0x1; if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0 && mode != AR5K_EEPROM_MODE_11B) ee->ee_fixed_bias[mode] = (val >> 13) & 0x1; if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_3_3) { AR5K_EEPROM_READ(o++, val); ee->ee_false_detect[mode] = (val >> 6) & 0x7f; if (mode == AR5K_EEPROM_MODE_11A) ee->ee_xr_power[mode] = val & 0x3f; else { /* b_DB_11[bg] and b_OB_11[bg] */ ee->ee_ob[mode][0] = val & 0x7; ee->ee_db[mode][0] = (val >> 3) & 0x7; } } if (ah->ah_ee_version < AR5K_EEPROM_VERSION_3_4) { ee->ee_i_gain[mode] = AR5K_EEPROM_I_GAIN; ee->ee_cck_ofdm_power_delta = AR5K_EEPROM_CCK_OFDM_DELTA; } else { ee->ee_i_gain[mode] = (val >> 13) & 0x7; AR5K_EEPROM_READ(o++, val); ee->ee_i_gain[mode] |= (val << 3) & 0x38; if (mode == AR5K_EEPROM_MODE_11G) { ee->ee_cck_ofdm_power_delta = (val >> 3) & 0xff; if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_6) ee->ee_scaled_cck_delta = (val >> 11) & 0x1f; } } if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0 && mode == AR5K_EEPROM_MODE_11A) { ee->ee_i_cal[mode] = (val >> 8) & 0x3f; ee->ee_q_cal[mode] = (val >> 3) & 0x1f; } if (ah->ah_ee_version < AR5K_EEPROM_VERSION_4_0) goto done; /* Note: >= v5 have bg freq piers on another location * so these freq piers are ignored for >= v5 (should be 0xff * anyway) */ switch (mode) { case AR5K_EEPROM_MODE_11A: if (ah->ah_ee_version < AR5K_EEPROM_VERSION_4_1) break; AR5K_EEPROM_READ(o++, val); ee->ee_margin_tx_rx[mode] = val & 0x3f; break; case AR5K_EEPROM_MODE_11B: AR5K_EEPROM_READ(o++, val); ee->ee_pwr_cal_b[0].freq = ath5k_eeprom_bin2freq(ee, val & 0xff, mode); if (ee->ee_pwr_cal_b[0].freq != AR5K_EEPROM_CHANNEL_DIS) ee->ee_n_piers[mode]++; ee->ee_pwr_cal_b[1].freq = ath5k_eeprom_bin2freq(ee, (val >> 8) & 0xff, mode); if (ee->ee_pwr_cal_b[1].freq != AR5K_EEPROM_CHANNEL_DIS) ee->ee_n_piers[mode]++; AR5K_EEPROM_READ(o++, val); ee->ee_pwr_cal_b[2].freq = ath5k_eeprom_bin2freq(ee, val & 0xff, mode); if (ee->ee_pwr_cal_b[2].freq != AR5K_EEPROM_CHANNEL_DIS) ee->ee_n_piers[mode]++; if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_1) ee->ee_margin_tx_rx[mode] = (val >> 8) & 0x3f; break; case AR5K_EEPROM_MODE_11G: AR5K_EEPROM_READ(o++, val); ee->ee_pwr_cal_g[0].freq = ath5k_eeprom_bin2freq(ee, val & 0xff, mode); if (ee->ee_pwr_cal_g[0].freq != AR5K_EEPROM_CHANNEL_DIS) ee->ee_n_piers[mode]++; ee->ee_pwr_cal_g[1].freq = ath5k_eeprom_bin2freq(ee, (val >> 8) & 0xff, mode); if (ee->ee_pwr_cal_g[1].freq != AR5K_EEPROM_CHANNEL_DIS) ee->ee_n_piers[mode]++; AR5K_EEPROM_READ(o++, val); ee->ee_turbo_max_power[mode] = val & 0x7f; ee->ee_xr_power[mode] = (val >> 7) & 0x3f; AR5K_EEPROM_READ(o++, val); ee->ee_pwr_cal_g[2].freq = ath5k_eeprom_bin2freq(ee, val & 0xff, mode); if (ee->ee_pwr_cal_g[2].freq != AR5K_EEPROM_CHANNEL_DIS) ee->ee_n_piers[mode]++; if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_1) ee->ee_margin_tx_rx[mode] = (val >> 8) & 0x3f; AR5K_EEPROM_READ(o++, val); ee->ee_i_cal[mode] = (val >> 5) & 0x3f; ee->ee_q_cal[mode] = val & 0x1f; if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_2) { AR5K_EEPROM_READ(o++, val); ee->ee_cck_ofdm_gain_delta = val & 0xff; } break; } /* * Read turbo mode information on newer EEPROM versions */ if (ee->ee_version < AR5K_EEPROM_VERSION_5_0) goto done; switch (mode) { case AR5K_EEPROM_MODE_11A: ee->ee_switch_settling_turbo[mode] = (val >> 6) & 0x7f; ee->ee_atn_tx_rx_turbo[mode] = (val >> 13) & 0x7; AR5K_EEPROM_READ(o++, val); ee->ee_atn_tx_rx_turbo[mode] |= (val & 0x7) << 3; ee->ee_margin_tx_rx_turbo[mode] = (val >> 3) & 0x3f; ee->ee_adc_desired_size_turbo[mode] = (val >> 9) & 0x7f; AR5K_EEPROM_READ(o++, val); ee->ee_adc_desired_size_turbo[mode] |= (val & 0x1) << 7; ee->ee_pga_desired_size_turbo[mode] = (val >> 1) & 0xff; if (AR5K_EEPROM_EEMAP(ee->ee_misc0) >= 2) ee->ee_pd_gain_overlap = (val >> 9) & 0xf; break; case AR5K_EEPROM_MODE_11G: ee->ee_switch_settling_turbo[mode] = (val >> 8) & 0x7f; ee->ee_atn_tx_rx_turbo[mode] = (val >> 15) & 0x7; AR5K_EEPROM_READ(o++, val); ee->ee_atn_tx_rx_turbo[mode] |= (val & 0x1f) << 1; ee->ee_margin_tx_rx_turbo[mode] = (val >> 5) & 0x3f; ee->ee_adc_desired_size_turbo[mode] = (val >> 11) & 0x7f; AR5K_EEPROM_READ(o++, val); ee->ee_adc_desired_size_turbo[mode] |= (val & 0x7) << 5; ee->ee_pga_desired_size_turbo[mode] = (val >> 3) & 0xff; break; } done: /* return new offset */ *offset = o; return 0; } /* Read mode-specific data (except power calibration data) */ static int ath5k_eeprom_init_modes(struct ath5k_hw *ah) { struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; u32 mode_offset[3]; unsigned int mode; u32 offset; int ret; /* * Get values for all modes */ mode_offset[AR5K_EEPROM_MODE_11A] = AR5K_EEPROM_MODES_11A(ah->ah_ee_version); mode_offset[AR5K_EEPROM_MODE_11B] = AR5K_EEPROM_MODES_11B(ah->ah_ee_version); mode_offset[AR5K_EEPROM_MODE_11G] = AR5K_EEPROM_MODES_11G(ah->ah_ee_version); ee->ee_turbo_max_power[AR5K_EEPROM_MODE_11A] = AR5K_EEPROM_HDR_T_5GHZ_DBM(ee->ee_header); for (mode = AR5K_EEPROM_MODE_11A; mode <= AR5K_EEPROM_MODE_11G; mode++) { offset = mode_offset[mode]; ret = ath5k_eeprom_read_ants(ah, &offset, mode); if (ret) return ret; ret = ath5k_eeprom_read_modes(ah, &offset, mode); if (ret) return ret; } /* override for older eeprom versions for better performance */ if (ah->ah_ee_version <= AR5K_EEPROM_VERSION_3_2) { ee->ee_thr_62[AR5K_EEPROM_MODE_11A] = 15; ee->ee_thr_62[AR5K_EEPROM_MODE_11B] = 28; ee->ee_thr_62[AR5K_EEPROM_MODE_11G] = 28; } return 0; } /* Read the frequency piers for each mode (mostly used on newer eeproms with 0xff * frequency mask) */ static inline int ath5k_eeprom_read_freq_list(struct ath5k_hw *ah, int *offset, int max, struct ath5k_chan_pcal_info *pc, unsigned int mode) { struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; int o = *offset; int i = 0; u8 freq1, freq2; u16 val; ee->ee_n_piers[mode] = 0; while (i < max) { AR5K_EEPROM_READ(o++, val); freq1 = val & 0xff; if (!freq1) break; pc[i++].freq = ath5k_eeprom_bin2freq(ee, freq1, mode); ee->ee_n_piers[mode]++; freq2 = (val >> 8) & 0xff; if (!freq2) break; pc[i++].freq = ath5k_eeprom_bin2freq(ee, freq2, mode); ee->ee_n_piers[mode]++; } /* return new offset */ *offset = o; return 0; } /* Read frequency piers for 802.11a */ static int ath5k_eeprom_init_11a_pcal_freq(struct ath5k_hw *ah, int offset) { struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; struct ath5k_chan_pcal_info *pcal = ee->ee_pwr_cal_a; int i; u16 val; u8 mask; if (ee->ee_version >= AR5K_EEPROM_VERSION_3_3) { ath5k_eeprom_read_freq_list(ah, &offset, AR5K_EEPROM_N_5GHZ_CHAN, pcal, AR5K_EEPROM_MODE_11A); } else { mask = AR5K_EEPROM_FREQ_M(ah->ah_ee_version); AR5K_EEPROM_READ(offset++, val); pcal[0].freq = (val >> 9) & mask; pcal[1].freq = (val >> 2) & mask; pcal[2].freq = (val << 5) & mask; AR5K_EEPROM_READ(offset++, val); pcal[2].freq |= (val >> 11) & 0x1f; pcal[3].freq = (val >> 4) & mask; pcal[4].freq = (val << 3) & mask; AR5K_EEPROM_READ(offset++, val); pcal[4].freq |= (val >> 13) & 0x7; pcal[5].freq = (val >> 6) & mask; pcal[6].freq = (val << 1) & mask; AR5K_EEPROM_READ(offset++, val); pcal[6].freq |= (val >> 15) & 0x1; pcal[7].freq = (val >> 8) & mask; pcal[8].freq = (val >> 1) & mask; pcal[9].freq = (val << 6) & mask; AR5K_EEPROM_READ(offset++, val); pcal[9].freq |= (val >> 10) & 0x3f; /* Fixed number of piers */ ee->ee_n_piers[AR5K_EEPROM_MODE_11A] = 10; for (i = 0; i < AR5K_EEPROM_N_5GHZ_CHAN; i++) { pcal[i].freq = ath5k_eeprom_bin2freq(ee, pcal[i].freq, AR5K_EEPROM_MODE_11A); } } return 0; } /* Read frequency piers for 802.11bg on eeprom versions >= 5 and eemap >= 2 */ static inline int ath5k_eeprom_init_11bg_2413(struct ath5k_hw *ah, unsigned int mode, int offset) { struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; struct ath5k_chan_pcal_info *pcal; switch (mode) { case AR5K_EEPROM_MODE_11B: pcal = ee->ee_pwr_cal_b; break; case AR5K_EEPROM_MODE_11G: pcal = ee->ee_pwr_cal_g; break; default: return -EINVAL; } ath5k_eeprom_read_freq_list(ah, &offset, AR5K_EEPROM_N_2GHZ_CHAN_2413, pcal, mode); return 0; } /* * Read power calibration for RF5111 chips * * For RF5111 we have an XPD -eXternal Power Detector- curve * for each calibrated channel. Each curve has 0,5dB Power steps * on x axis and PCDAC steps (offsets) on y axis and looks like an * exponential function. To recreate the curve we read 11 points * here and interpolate later. */ /* Used to match PCDAC steps with power values on RF5111 chips * (eeprom versions < 4). For RF5111 we have 11 pre-defined PCDAC * steps that match with the power values we read from eeprom. On * older eeprom versions (< 3.2) these steps are equally spaced at * 10% of the pcdac curve -until the curve reaches its maximum- * (11 steps from 0 to 100%) but on newer eeprom versions (>= 3.2) * these 11 steps are spaced in a different way. This function returns * the pcdac steps based on eeprom version and curve min/max so that we * can have pcdac/pwr points. */ static inline void ath5k_get_pcdac_intercepts(struct ath5k_hw *ah, u8 min, u8 max, u8 *vp) { static const u16 intercepts3[] = { 0, 5, 10, 20, 30, 50, 70, 85, 90, 95, 100 }; static const u16 intercepts3_2[] = { 0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100 }; const u16 *ip; int i; if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_3_2) ip = intercepts3_2; else ip = intercepts3; for (i = 0; i < ARRAY_SIZE(intercepts3); i++) vp[i] = (ip[i] * max + (100 - ip[i]) * min) / 100; } static int ath5k_eeprom_free_pcal_info(struct ath5k_hw *ah, int mode) { struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; struct ath5k_chan_pcal_info *chinfo; u8 pier, pdg; switch (mode) { case AR5K_EEPROM_MODE_11A: if (!AR5K_EEPROM_HDR_11A(ee->ee_header)) return 0; chinfo = ee->ee_pwr_cal_a; break; case AR5K_EEPROM_MODE_11B: if (!AR5K_EEPROM_HDR_11B(ee->ee_header)) return 0; chinfo = ee->ee_pwr_cal_b; break; case AR5K_EEPROM_MODE_11G: if (!AR5K_EEPROM_HDR_11G(ee->ee_header)) return 0; chinfo = ee->ee_pwr_cal_g; break; default: return -EINVAL; } for (pier = 0; pier < ee->ee_n_piers[mode]; pier++) { if (!chinfo[pier].pd_curves) continue; for (pdg = 0; pdg < AR5K_EEPROM_N_PD_CURVES; pdg++) { struct ath5k_pdgain_info *pd = &chinfo[pier].pd_curves[pdg]; kfree(pd->pd_step); kfree(pd->pd_pwr); } kfree(chinfo[pier].pd_curves); } return 0; } /* Convert RF5111 specific data to generic raw data * used by interpolation code */ static int ath5k_eeprom_convert_pcal_info_5111(struct ath5k_hw *ah, int mode, struct ath5k_chan_pcal_info *chinfo) { struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; struct ath5k_chan_pcal_info_rf5111 *pcinfo; struct ath5k_pdgain_info *pd; u8 pier, point, idx; u8 *pdgain_idx = ee->ee_pdc_to_idx[mode]; /* Fill raw data for each calibration pier */ for (pier = 0; pier < ee->ee_n_piers[mode]; pier++) { pcinfo = &chinfo[pier].rf5111_info; /* Allocate pd_curves for this cal pier */ chinfo[pier].pd_curves = kcalloc(AR5K_EEPROM_N_PD_CURVES, sizeof(struct ath5k_pdgain_info), GFP_KERNEL); if (!chinfo[pier].pd_curves) goto err_out; /* Only one curve for RF5111 * find out which one and place * in pd_curves. * Note: ee_x_gain is reversed here */ for (idx = 0; idx < AR5K_EEPROM_N_PD_CURVES; idx++) { if (!((ee->ee_x_gain[mode] >> idx) & 0x1)) { pdgain_idx[0] = idx; break; } } ee->ee_pd_gains[mode] = 1; pd = &chinfo[pier].pd_curves[idx]; pd->pd_points = AR5K_EEPROM_N_PWR_POINTS_5111; /* Allocate pd points for this curve */ pd->pd_step = kcalloc(AR5K_EEPROM_N_PWR_POINTS_5111, sizeof(u8), GFP_KERNEL); if (!pd->pd_step) goto err_out; pd->pd_pwr = kcalloc(AR5K_EEPROM_N_PWR_POINTS_5111, sizeof(s16), GFP_KERNEL); if (!pd->pd_pwr) goto err_out; /* Fill raw dataset * (convert power to 0.25dB units * for RF5112 compatibility) */ for (point = 0; point < pd->pd_points; point++) { /* Absolute values */ pd->pd_pwr[point] = 2 * pcinfo->pwr[point]; /* Already sorted */ pd->pd_step[point] = pcinfo->pcdac[point]; } /* Set min/max pwr */ chinfo[pier].min_pwr = pd->pd_pwr[0]; chinfo[pier].max_pwr = pd->pd_pwr[10]; } return 0; err_out: ath5k_eeprom_free_pcal_info(ah, mode); return -ENOMEM; } /* Parse EEPROM data */ static int ath5k_eeprom_read_pcal_info_5111(struct ath5k_hw *ah, int mode) { struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; struct ath5k_chan_pcal_info *pcal; int offset, ret; int i; u16 val; offset = AR5K_EEPROM_GROUPS_START(ee->ee_version); switch (mode) { case AR5K_EEPROM_MODE_11A: if (!AR5K_EEPROM_HDR_11A(ee->ee_header)) return 0; ret = ath5k_eeprom_init_11a_pcal_freq(ah, offset + AR5K_EEPROM_GROUP1_OFFSET); if (ret < 0) return ret; offset += AR5K_EEPROM_GROUP2_OFFSET; pcal = ee->ee_pwr_cal_a; break; case AR5K_EEPROM_MODE_11B: if (!AR5K_EEPROM_HDR_11B(ee->ee_header) && !AR5K_EEPROM_HDR_11G(ee->ee_header)) return 0; pcal = ee->ee_pwr_cal_b; offset += AR5K_EEPROM_GROUP3_OFFSET; /* fixed piers */ pcal[0].freq = 2412; pcal[1].freq = 2447; pcal[2].freq = 2484; ee->ee_n_piers[mode] = 3; break; case AR5K_EEPROM_MODE_11G: if (!AR5K_EEPROM_HDR_11G(ee->ee_header)) return 0; pcal = ee->ee_pwr_cal_g; offset += AR5K_EEPROM_GROUP4_OFFSET; /* fixed piers */ pcal[0].freq = 2312; pcal[1].freq = 2412; pcal[2].freq = 2484; ee->ee_n_piers[mode] = 3; break; default: return -EINVAL; } for (i = 0; i < ee->ee_n_piers[mode]; i++) { struct ath5k_chan_pcal_info_rf5111 *cdata = &pcal[i].rf5111_info; AR5K_EEPROM_READ(offset++, val); cdata->pcdac_max = ((val >> 10) & AR5K_EEPROM_PCDAC_M); cdata->pcdac_min = ((val >> 4) & AR5K_EEPROM_PCDAC_M); cdata->pwr[0] = ((val << 2) & AR5K_EEPROM_POWER_M); AR5K_EEPROM_READ(offset++, val); cdata->pwr[0] |= ((val >> 14) & 0x3); cdata->pwr[1] = ((val >> 8) & AR5K_EEPROM_POWER_M); cdata->pwr[2] = ((val >> 2) & AR5K_EEPROM_POWER_M); cdata->pwr[3] = ((val << 4) & AR5K_EEPROM_POWER_M); AR5K_EEPROM_READ(offset++, val); cdata->pwr[3] |= ((val >> 12) & 0xf); cdata->pwr[4] = ((val >> 6) & AR5K_EEPROM_POWER_M); cdata->pwr[5] = (val & AR5K_EEPROM_POWER_M); AR5K_EEPROM_READ(offset++, val); cdata->pwr[6] = ((val >> 10) & AR5K_EEPROM_POWER_M); cdata->pwr[7] = ((val >> 4) & AR5K_EEPROM_POWER_M); cdata->pwr[8] = ((val << 2) & AR5K_EEPROM_POWER_M); AR5K_EEPROM_READ(offset++, val); cdata->pwr[8] |= ((val >> 14) & 0x3); cdata->pwr[9] = ((val >> 8) & AR5K_EEPROM_POWER_M); cdata->pwr[10] = ((val >> 2) & AR5K_EEPROM_POWER_M); ath5k_get_pcdac_intercepts(ah, cdata->pcdac_min, cdata->pcdac_max, cdata->pcdac); } return ath5k_eeprom_convert_pcal_info_5111(ah, mode, pcal); } /* * Read power calibration for RF5112 chips * * For RF5112 we have 4 XPD -eXternal Power Detector- curves * for each calibrated channel on 0, -6, -12 and -18dBm but we only * use the higher (3) and the lower (0) curves. Each curve has 0.5dB * power steps on x axis and PCDAC steps on y axis and looks like a * linear function. To recreate the curve and pass the power values * on hw, we read 4 points for xpd 0 (lower gain -> max power) * and 3 points for xpd 3 (higher gain -> lower power) here and * interpolate later. * * Note: Many vendors just use xpd 0 so xpd 3 is zeroed. */ /* Convert RF5112 specific data to generic raw data * used by interpolation code */ static int ath5k_eeprom_convert_pcal_info_5112(struct ath5k_hw *ah, int mode, struct ath5k_chan_pcal_info *chinfo) { struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; struct ath5k_chan_pcal_info_rf5112 *pcinfo; u8 *pdgain_idx = ee->ee_pdc_to_idx[mode]; unsigned int pier, pdg, point; /* Fill raw data for each calibration pier */ for (pier = 0; pier < ee->ee_n_piers[mode]; pier++) { pcinfo = &chinfo[pier].rf5112_info; /* Allocate pd_curves for this cal pier */ chinfo[pier].pd_curves = kcalloc(AR5K_EEPROM_N_PD_CURVES, sizeof(struct ath5k_pdgain_info), GFP_KERNEL); if (!chinfo[pier].pd_curves) goto err_out; /* Fill pd_curves */ for (pdg = 0; pdg < ee->ee_pd_gains[mode]; pdg++) { u8 idx = pdgain_idx[pdg]; struct ath5k_pdgain_info *pd = &chinfo[pier].pd_curves[idx]; /* Lowest gain curve (max power) */ if (pdg == 0) { /* One more point for better accuracy */ pd->pd_points = AR5K_EEPROM_N_XPD0_POINTS; /* Allocate pd points for this curve */ pd->pd_step = kcalloc(pd->pd_points, sizeof(u8), GFP_KERNEL); if (!pd->pd_step) goto err_out; pd->pd_pwr = kcalloc(pd->pd_points, sizeof(s16), GFP_KERNEL); if (!pd->pd_pwr) goto err_out; /* Fill raw dataset * (all power levels are in 0.25dB units) */ pd->pd_step[0] = pcinfo->pcdac_x0[0]; pd->pd_pwr[0] = pcinfo->pwr_x0[0]; for (point = 1; point < pd->pd_points; point++) { /* Absolute values */ pd->pd_pwr[point] = pcinfo->pwr_x0[point]; /* Deltas */ pd->pd_step[point] = pd->pd_step[point - 1] + pcinfo->pcdac_x0[point]; } /* Set min power for this frequency */ chinfo[pier].min_pwr = pd->pd_pwr[0]; /* Highest gain curve (min power) */ } else if (pdg == 1) { pd->pd_points = AR5K_EEPROM_N_XPD3_POINTS; /* Allocate pd points for this curve */ pd->pd_step = kcalloc(pd->pd_points, sizeof(u8), GFP_KERNEL); if (!pd->pd_step) goto err_out; pd->pd_pwr = kcalloc(pd->pd_points, sizeof(s16), GFP_KERNEL); if (!pd->pd_pwr) goto err_out; /* Fill raw dataset * (all power levels are in 0.25dB units) */ for (point = 0; point < pd->pd_points; point++) { /* Absolute values */ pd->pd_pwr[point] = pcinfo->pwr_x3[point]; /* Fixed points */ pd->pd_step[point] = pcinfo->pcdac_x3[point]; } /* Since we have a higher gain curve * override min power */ chinfo[pier].min_pwr = pd->pd_pwr[0]; } } } return 0; err_out: ath5k_eeprom_free_pcal_info(ah, mode); return -ENOMEM; } /* Parse EEPROM data */ static int ath5k_eeprom_read_pcal_info_5112(struct ath5k_hw *ah, int mode) { struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; struct ath5k_chan_pcal_info_rf5112 *chan_pcal_info; struct ath5k_chan_pcal_info *gen_chan_info; u8 *pdgain_idx = ee->ee_pdc_to_idx[mode]; u32 offset; u8 i, c; u16 val; u8 pd_gains = 0; /* Count how many curves we have and * identify them (which one of the 4 * available curves we have on each count). * Curves are stored from lower (x0) to * higher (x3) gain */ for (i = 0; i < AR5K_EEPROM_N_PD_CURVES; i++) { /* ee_x_gain[mode] is x gain mask */ if ((ee->ee_x_gain[mode] >> i) & 0x1) pdgain_idx[pd_gains++] = i; } ee->ee_pd_gains[mode] = pd_gains; if (pd_gains == 0 || pd_gains > 2) return -EINVAL; switch (mode) { case AR5K_EEPROM_MODE_11A: /* * Read 5GHz EEPROM channels */ offset = AR5K_EEPROM_GROUPS_START(ee->ee_version); ath5k_eeprom_init_11a_pcal_freq(ah, offset); offset += AR5K_EEPROM_GROUP2_OFFSET; gen_chan_info = ee->ee_pwr_cal_a; break; case AR5K_EEPROM_MODE_11B: offset = AR5K_EEPROM_GROUPS_START(ee->ee_version); if (AR5K_EEPROM_HDR_11A(ee->ee_header)) offset += AR5K_EEPROM_GROUP3_OFFSET; /* NB: frequency piers parsed during mode init */ gen_chan_info = ee->ee_pwr_cal_b; break; case AR5K_EEPROM_MODE_11G: offset = AR5K_EEPROM_GROUPS_START(ee->ee_version); if (AR5K_EEPROM_HDR_11A(ee->ee_header)) offset += AR5K_EEPROM_GROUP4_OFFSET; else if (AR5K_EEPROM_HDR_11B(ee->ee_header)) offset += AR5K_EEPROM_GROUP2_OFFSET; /* NB: frequency piers parsed during mode init */ gen_chan_info = ee->ee_pwr_cal_g; break; default: return -EINVAL; } for (i = 0; i < ee->ee_n_piers[mode]; i++) { chan_pcal_info = &gen_chan_info[i].rf5112_info; /* Power values in quarter dB * for the lower xpd gain curve * (0 dBm -> higher output power) */ for (c = 0; c < AR5K_EEPROM_N_XPD0_POINTS; c++) { AR5K_EEPROM_READ(offset++, val); chan_pcal_info->pwr_x0[c] = (s8) (val & 0xff); chan_pcal_info->pwr_x0[++c] = (s8) ((val >> 8) & 0xff); } /* PCDAC steps * corresponding to the above power * measurements */ AR5K_EEPROM_READ(offset++, val); chan_pcal_info->pcdac_x0[1] = (val & 0x1f); chan_pcal_info->pcdac_x0[2] = ((val >> 5) & 0x1f); chan_pcal_info->pcdac_x0[3] = ((val >> 10) & 0x1f); /* Power values in quarter dB * for the higher xpd gain curve * (18 dBm -> lower output power) */ AR5K_EEPROM_READ(offset++, val); chan_pcal_info->pwr_x3[0] = (s8) (val & 0xff); chan_pcal_info->pwr_x3[1] = (s8) ((val >> 8) & 0xff); AR5K_EEPROM_READ(offset++, val); chan_pcal_info->pwr_x3[2] = (val & 0xff); /* PCDAC steps * corresponding to the above power * measurements (fixed) */ chan_pcal_info->pcdac_x3[0] = 20; chan_pcal_info->pcdac_x3[1] = 35; chan_pcal_info->pcdac_x3[2] = 63; if (ee->ee_version >= AR5K_EEPROM_VERSION_4_3) { chan_pcal_info->pcdac_x0[0] = ((val >> 8) & 0x3f); /* Last xpd0 power level is also channel maximum */ gen_chan_info[i].max_pwr = chan_pcal_info->pwr_x0[3]; } else { chan_pcal_info->pcdac_x0[0] = 1; gen_chan_info[i].max_pwr = (s8) ((val >> 8) & 0xff); } } return ath5k_eeprom_convert_pcal_info_5112(ah, mode, gen_chan_info); } /* * Read power calibration for RF2413 chips * * For RF2413 we have a Power to PDDAC table (Power Detector) * instead of a PCDAC and 4 pd gain curves for each calibrated channel. * Each curve has power on x axis in 0.5 db steps and PDDADC steps on y * axis and looks like an exponential function like the RF5111 curve. * * To recreate the curves we read here the points and interpolate * later. Note that in most cases only 2 (higher and lower) curves are * used (like RF5112) but vendors have the opportunity to include all * 4 curves on eeprom. The final curve (higher power) has an extra * point for better accuracy like RF5112. */ /* For RF2413 power calibration data doesn't start on a fixed location and * if a mode is not supported, its section is missing -not zeroed-. * So we need to calculate the starting offset for each section by using * these two functions */ /* Return the size of each section based on the mode and the number of pd * gains available (maximum 4). */ static inline unsigned int ath5k_pdgains_size_2413(struct ath5k_eeprom_info *ee, unsigned int mode) { static const unsigned int pdgains_size[] = { 4, 6, 9, 12 }; unsigned int sz; sz = pdgains_size[ee->ee_pd_gains[mode] - 1]; sz *= ee->ee_n_piers[mode]; return sz; } /* Return the starting offset for a section based on the modes supported * and each section's size. */ static unsigned int ath5k_cal_data_offset_2413(struct ath5k_eeprom_info *ee, int mode) { u32 offset = AR5K_EEPROM_CAL_DATA_START(ee->ee_misc4); switch (mode) { case AR5K_EEPROM_MODE_11G: if (AR5K_EEPROM_HDR_11B(ee->ee_header)) offset += ath5k_pdgains_size_2413(ee, AR5K_EEPROM_MODE_11B) + AR5K_EEPROM_N_2GHZ_CHAN_2413 / 2; /* fall through */ case AR5K_EEPROM_MODE_11B: if (AR5K_EEPROM_HDR_11A(ee->ee_header)) offset += ath5k_pdgains_size_2413(ee, AR5K_EEPROM_MODE_11A) + AR5K_EEPROM_N_5GHZ_CHAN / 2; /* fall through */ case AR5K_EEPROM_MODE_11A: break; default: break; } return offset; } /* Convert RF2413 specific data to generic raw data * used by interpolation code */ static int ath5k_eeprom_convert_pcal_info_2413(struct ath5k_hw *ah, int mode, struct ath5k_chan_pcal_info *chinfo) { struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; struct ath5k_chan_pcal_info_rf2413 *pcinfo; u8 *pdgain_idx = ee->ee_pdc_to_idx[mode]; unsigned int pier, pdg, point; /* Fill raw data for each calibration pier */ for (pier = 0; pier < ee->ee_n_piers[mode]; pier++) { pcinfo = &chinfo[pier].rf2413_info; /* Allocate pd_curves for this cal pier */ chinfo[pier].pd_curves = kcalloc(AR5K_EEPROM_N_PD_CURVES, sizeof(struct ath5k_pdgain_info), GFP_KERNEL); if (!chinfo[pier].pd_curves) goto err_out; /* Fill pd_curves */ for (pdg = 0; pdg < ee->ee_pd_gains[mode]; pdg++) { u8 idx = pdgain_idx[pdg]; struct ath5k_pdgain_info *pd = &chinfo[pier].pd_curves[idx]; /* One more point for the highest power * curve (lowest gain) */ if (pdg == ee->ee_pd_gains[mode] - 1) pd->pd_points = AR5K_EEPROM_N_PD_POINTS; else pd->pd_points = AR5K_EEPROM_N_PD_POINTS - 1; /* Allocate pd points for this curve */ pd->pd_step = kcalloc(pd->pd_points, sizeof(u8), GFP_KERNEL); if (!pd->pd_step) goto err_out; pd->pd_pwr = kcalloc(pd->pd_points, sizeof(s16), GFP_KERNEL); if (!pd->pd_pwr) goto err_out; /* Fill raw dataset * convert all pwr levels to * quarter dB for RF5112 compatibility */ pd->pd_step[0] = pcinfo->pddac_i[pdg]; pd->pd_pwr[0] = 4 * pcinfo->pwr_i[pdg]; for (point = 1; point < pd->pd_points; point++) { pd->pd_pwr[point] = pd->pd_pwr[point - 1] + 2 * pcinfo->pwr[pdg][point - 1]; pd->pd_step[point] = pd->pd_step[point - 1] + pcinfo->pddac[pdg][point - 1]; } /* Highest gain curve -> min power */ if (pdg == 0) chinfo[pier].min_pwr = pd->pd_pwr[0]; /* Lowest gain curve -> max power */ if (pdg == ee->ee_pd_gains[mode] - 1) chinfo[pier].max_pwr = pd->pd_pwr[pd->pd_points - 1]; } } return 0; err_out: ath5k_eeprom_free_pcal_info(ah, mode); return -ENOMEM; } /* Parse EEPROM data */ static int ath5k_eeprom_read_pcal_info_2413(struct ath5k_hw *ah, int mode) { struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; struct ath5k_chan_pcal_info_rf2413 *pcinfo; struct ath5k_chan_pcal_info *chinfo; u8 *pdgain_idx = ee->ee_pdc_to_idx[mode]; u32 offset; int idx, i; u16 val; u8 pd_gains = 0; /* Count how many curves we have and * identify them (which one of the 4 * available curves we have on each count). * Curves are stored from higher to * lower gain so we go backwards */ for (idx = AR5K_EEPROM_N_PD_CURVES - 1; idx >= 0; idx--) { /* ee_x_gain[mode] is x gain mask */ if ((ee->ee_x_gain[mode] >> idx) & 0x1) pdgain_idx[pd_gains++] = idx; } ee->ee_pd_gains[mode] = pd_gains; if (pd_gains == 0) return -EINVAL; offset = ath5k_cal_data_offset_2413(ee, mode); switch (mode) { case AR5K_EEPROM_MODE_11A: if (!AR5K_EEPROM_HDR_11A(ee->ee_header)) return 0; ath5k_eeprom_init_11a_pcal_freq(ah, offset); offset += AR5K_EEPROM_N_5GHZ_CHAN / 2; chinfo = ee->ee_pwr_cal_a; break; case AR5K_EEPROM_MODE_11B: if (!AR5K_EEPROM_HDR_11B(ee->ee_header)) return 0; ath5k_eeprom_init_11bg_2413(ah, mode, offset); offset += AR5K_EEPROM_N_2GHZ_CHAN_2413 / 2; chinfo = ee->ee_pwr_cal_b; break; case AR5K_EEPROM_MODE_11G: if (!AR5K_EEPROM_HDR_11G(ee->ee_header)) return 0; ath5k_eeprom_init_11bg_2413(ah, mode, offset); offset += AR5K_EEPROM_N_2GHZ_CHAN_2413 / 2; chinfo = ee->ee_pwr_cal_g; break; default: return -EINVAL; } for (i = 0; i < ee->ee_n_piers[mode]; i++) { pcinfo = &chinfo[i].rf2413_info; /* * Read pwr_i, pddac_i and the first * 2 pd points (pwr, pddac) */ AR5K_EEPROM_READ(offset++, val); pcinfo->pwr_i[0] = val & 0x1f; pcinfo->pddac_i[0] = (val >> 5) & 0x7f; pcinfo->pwr[0][0] = (val >> 12) & 0xf; AR5K_EEPROM_READ(offset++, val); pcinfo->pddac[0][0] = val & 0x3f; pcinfo->pwr[0][1] = (val >> 6) & 0xf; pcinfo->pddac[0][1] = (val >> 10) & 0x3f; AR5K_EEPROM_READ(offset++, val); pcinfo->pwr[0][2] = val & 0xf; pcinfo->pddac[0][2] = (val >> 4) & 0x3f; pcinfo->pwr[0][3] = 0; pcinfo->pddac[0][3] = 0; if (pd_gains > 1) { /* * Pd gain 0 is not the last pd gain * so it only has 2 pd points. * Continue with pd gain 1. */ pcinfo->pwr_i[1] = (val >> 10) & 0x1f; pcinfo->pddac_i[1] = (val >> 15) & 0x1; AR5K_EEPROM_READ(offset++, val); pcinfo->pddac_i[1] |= (val & 0x3F) << 1; pcinfo->pwr[1][0] = (val >> 6) & 0xf; pcinfo->pddac[1][0] = (val >> 10) & 0x3f; AR5K_EEPROM_READ(offset++, val); pcinfo->pwr[1][1] = val & 0xf; pcinfo->pddac[1][1] = (val >> 4) & 0x3f; pcinfo->pwr[1][2] = (val >> 10) & 0xf; pcinfo->pddac[1][2] = (val >> 14) & 0x3; AR5K_EEPROM_READ(offset++, val); pcinfo->pddac[1][2] |= (val & 0xF) << 2; pcinfo->pwr[1][3] = 0; pcinfo->pddac[1][3] = 0; } else if (pd_gains == 1) { /* * Pd gain 0 is the last one so * read the extra point. */ pcinfo->pwr[0][3] = (val >> 10) & 0xf; pcinfo->pddac[0][3] = (val >> 14) & 0x3; AR5K_EEPROM_READ(offset++, val); pcinfo->pddac[0][3] |= (val & 0xF) << 2; } /* * Proceed with the other pd_gains * as above. */ if (pd_gains > 2) { pcinfo->pwr_i[2] = (val >> 4) & 0x1f; pcinfo->pddac_i[2] = (val >> 9) & 0x7f; AR5K_EEPROM_READ(offset++, val); pcinfo->pwr[2][0] = (val >> 0) & 0xf; pcinfo->pddac[2][0] = (val >> 4) & 0x3f; pcinfo->pwr[2][1] = (val >> 10) & 0xf; pcinfo->pddac[2][1] = (val >> 14) & 0x3; AR5K_EEPROM_READ(offset++, val); pcinfo->pddac[2][1] |= (val & 0xF) << 2; pcinfo->pwr[2][2] = (val >> 4) & 0xf; pcinfo->pddac[2][2] = (val >> 8) & 0x3f; pcinfo->pwr[2][3] = 0; pcinfo->pddac[2][3] = 0; } else if (pd_gains == 2) { pcinfo->pwr[1][3] = (val >> 4) & 0xf; pcinfo->pddac[1][3] = (val >> 8) & 0x3f; } if (pd_gains > 3) { pcinfo->pwr_i[3] = (val >> 14) & 0x3; AR5K_EEPROM_READ(offset++, val); pcinfo->pwr_i[3] |= ((val >> 0) & 0x7) << 2; pcinfo->pddac_i[3] = (val >> 3) & 0x7f; pcinfo->pwr[3][0] = (val >> 10) & 0xf; pcinfo->pddac[3][0] = (val >> 14) & 0x3; AR5K_EEPROM_READ(offset++, val); pcinfo->pddac[3][0] |= (val & 0xF) << 2; pcinfo->pwr[3][1] = (val >> 4) & 0xf; pcinfo->pddac[3][1] = (val >> 8) & 0x3f; pcinfo->pwr[3][2] = (val >> 14) & 0x3; AR5K_EEPROM_READ(offset++, val); pcinfo->pwr[3][2] |= ((val >> 0) & 0x3) << 2; pcinfo->pddac[3][2] = (val >> 2) & 0x3f; pcinfo->pwr[3][3] = (val >> 8) & 0xf; pcinfo->pddac[3][3] = (val >> 12) & 0xF; AR5K_EEPROM_READ(offset++, val); pcinfo->pddac[3][3] |= ((val >> 0) & 0x3) << 4; } else if (pd_gains == 3) { pcinfo->pwr[2][3] = (val >> 14) & 0x3; AR5K_EEPROM_READ(offset++, val); pcinfo->pwr[2][3] |= ((val >> 0) & 0x3) << 2; pcinfo->pddac[2][3] = (val >> 2) & 0x3f; } } return ath5k_eeprom_convert_pcal_info_2413(ah, mode, chinfo); } /* * Read per rate target power (this is the maximum tx power * supported by the card). This info is used when setting * tx power, no matter the channel. * * This also works for v5 EEPROMs. */ static int ath5k_eeprom_read_target_rate_pwr_info(struct ath5k_hw *ah, unsigned int mode) { struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; struct ath5k_rate_pcal_info *rate_pcal_info; u8 *rate_target_pwr_num; u32 offset; u16 val; int i; offset = AR5K_EEPROM_TARGET_PWRSTART(ee->ee_misc1); rate_target_pwr_num = &ee->ee_rate_target_pwr_num[mode]; switch (mode) { case AR5K_EEPROM_MODE_11A: offset += AR5K_EEPROM_TARGET_PWR_OFF_11A(ee->ee_version); rate_pcal_info = ee->ee_rate_tpwr_a; ee->ee_rate_target_pwr_num[mode] = AR5K_EEPROM_N_5GHZ_CHAN; break; case AR5K_EEPROM_MODE_11B: offset += AR5K_EEPROM_TARGET_PWR_OFF_11B(ee->ee_version); rate_pcal_info = ee->ee_rate_tpwr_b; ee->ee_rate_target_pwr_num[mode] = 2; /* 3rd is g mode's 1st */ break; case AR5K_EEPROM_MODE_11G: offset += AR5K_EEPROM_TARGET_PWR_OFF_11G(ee->ee_version); rate_pcal_info = ee->ee_rate_tpwr_g; ee->ee_rate_target_pwr_num[mode] = AR5K_EEPROM_N_2GHZ_CHAN; break; default: return -EINVAL; } /* Different freq mask for older eeproms (<= v3.2) */ if (ee->ee_version <= AR5K_EEPROM_VERSION_3_2) { for (i = 0; i < (*rate_target_pwr_num); i++) { AR5K_EEPROM_READ(offset++, val); rate_pcal_info[i].freq = ath5k_eeprom_bin2freq(ee, (val >> 9) & 0x7f, mode); rate_pcal_info[i].target_power_6to24 = ((val >> 3) & 0x3f); rate_pcal_info[i].target_power_36 = (val << 3) & 0x3f; AR5K_EEPROM_READ(offset++, val); if (rate_pcal_info[i].freq == AR5K_EEPROM_CHANNEL_DIS || val == 0) { (*rate_target_pwr_num) = i; break; } rate_pcal_info[i].target_power_36 |= ((val >> 13) & 0x7); rate_pcal_info[i].target_power_48 = ((val >> 7) & 0x3f); rate_pcal_info[i].target_power_54 = ((val >> 1) & 0x3f); } } else { for (i = 0; i < (*rate_target_pwr_num); i++) { AR5K_EEPROM_READ(offset++, val); rate_pcal_info[i].freq = ath5k_eeprom_bin2freq(ee, (val >> 8) & 0xff, mode); rate_pcal_info[i].target_power_6to24 = ((val >> 2) & 0x3f); rate_pcal_info[i].target_power_36 = (val << 4) & 0x3f; AR5K_EEPROM_READ(offset++, val); if (rate_pcal_info[i].freq == AR5K_EEPROM_CHANNEL_DIS || val == 0) { (*rate_target_pwr_num) = i; break; } rate_pcal_info[i].target_power_36 |= (val >> 12) & 0xf; rate_pcal_info[i].target_power_48 = ((val >> 6) & 0x3f); rate_pcal_info[i].target_power_54 = (val & 0x3f); } } return 0; } /* * Read per channel calibration info from EEPROM * * This info is used to calibrate the baseband power table. Imagine * that for each channel there is a power curve that's hw specific * (depends on amplifier etc) and we try to "correct" this curve using * offsets we pass on to phy chip (baseband -> before amplifier) so that * it can use accurate power values when setting tx power (takes amplifier's * performance on each channel into account). * * EEPROM provides us with the offsets for some pre-calibrated channels * and we have to interpolate to create the full table for these channels and * also the table for any channel. */ static int ath5k_eeprom_read_pcal_info(struct ath5k_hw *ah) { struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; int (*read_pcal)(struct ath5k_hw *hw, int mode); int mode; int err; if ((ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0) && (AR5K_EEPROM_EEMAP(ee->ee_misc0) == 1)) read_pcal = ath5k_eeprom_read_pcal_info_5112; else if ((ah->ah_ee_version >= AR5K_EEPROM_VERSION_5_0) && (AR5K_EEPROM_EEMAP(ee->ee_misc0) == 2)) read_pcal = ath5k_eeprom_read_pcal_info_2413; else read_pcal = ath5k_eeprom_read_pcal_info_5111; for (mode = AR5K_EEPROM_MODE_11A; mode <= AR5K_EEPROM_MODE_11G; mode++) { err = read_pcal(ah, mode); if (err) return err; err = ath5k_eeprom_read_target_rate_pwr_info(ah, mode); if (err < 0) return err; } return 0; } /* Read conformance test limits used for regulatory control */ static int ath5k_eeprom_read_ctl_info(struct ath5k_hw *ah) { struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; struct ath5k_edge_power *rep; unsigned int fmask, pmask; unsigned int ctl_mode; int i, j; u32 offset; u16 val; pmask = AR5K_EEPROM_POWER_M; fmask = AR5K_EEPROM_FREQ_M(ee->ee_version); offset = AR5K_EEPROM_CTL(ee->ee_version); ee->ee_ctls = AR5K_EEPROM_N_CTLS(ee->ee_version); for (i = 0; i < ee->ee_ctls; i += 2) { AR5K_EEPROM_READ(offset++, val); ee->ee_ctl[i] = (val >> 8) & 0xff; ee->ee_ctl[i + 1] = val & 0xff; } offset = AR5K_EEPROM_GROUP8_OFFSET; if (ee->ee_version >= AR5K_EEPROM_VERSION_4_0) offset += AR5K_EEPROM_TARGET_PWRSTART(ee->ee_misc1) - AR5K_EEPROM_GROUP5_OFFSET; else offset += AR5K_EEPROM_GROUPS_START(ee->ee_version); rep = ee->ee_ctl_pwr; for (i = 0; i < ee->ee_ctls; i++) { switch (ee->ee_ctl[i] & AR5K_CTL_MODE_M) { case AR5K_CTL_11A: case AR5K_CTL_TURBO: ctl_mode = AR5K_EEPROM_MODE_11A; break; default: ctl_mode = AR5K_EEPROM_MODE_11G; break; } if (ee->ee_ctl[i] == 0) { if (ee->ee_version >= AR5K_EEPROM_VERSION_3_3) offset += 8; else offset += 7; rep += AR5K_EEPROM_N_EDGES; continue; } if (ee->ee_version >= AR5K_EEPROM_VERSION_3_3) { for (j = 0; j < AR5K_EEPROM_N_EDGES; j += 2) { AR5K_EEPROM_READ(offset++, val); rep[j].freq = (val >> 8) & fmask; rep[j + 1].freq = val & fmask; } for (j = 0; j < AR5K_EEPROM_N_EDGES; j += 2) { AR5K_EEPROM_READ(offset++, val); rep[j].edge = (val >> 8) & pmask; rep[j].flag = (val >> 14) & 1; rep[j + 1].edge = val & pmask; rep[j + 1].flag = (val >> 6) & 1; } } else { AR5K_EEPROM_READ(offset++, val); rep[0].freq = (val >> 9) & fmask; rep[1].freq = (val >> 2) & fmask; rep[2].freq = (val << 5) & fmask; AR5K_EEPROM_READ(offset++, val); rep[2].freq |= (val >> 11) & 0x1f; rep[3].freq = (val >> 4) & fmask; rep[4].freq = (val << 3) & fmask; AR5K_EEPROM_READ(offset++, val); rep[4].freq |= (val >> 13) & 0x7; rep[5].freq = (val >> 6) & fmask; rep[6].freq = (val << 1) & fmask; AR5K_EEPROM_READ(offset++, val); rep[6].freq |= (val >> 15) & 0x1; rep[7].freq = (val >> 8) & fmask; rep[0].edge = (val >> 2) & pmask; rep[1].edge = (val << 4) & pmask; AR5K_EEPROM_READ(offset++, val); rep[1].edge |= (val >> 12) & 0xf; rep[2].edge = (val >> 6) & pmask; rep[3].edge = val & pmask; AR5K_EEPROM_READ(offset++, val); rep[4].edge = (val >> 10) & pmask; rep[5].edge = (val >> 4) & pmask; rep[6].edge = (val << 2) & pmask; AR5K_EEPROM_READ(offset++, val); rep[6].edge |= (val >> 14) & 0x3; rep[7].edge = (val >> 8) & pmask; } for (j = 0; j < AR5K_EEPROM_N_EDGES; j++) { rep[j].freq = ath5k_eeprom_bin2freq(ee, rep[j].freq, ctl_mode); } rep += AR5K_EEPROM_N_EDGES; } return 0; } static int ath5k_eeprom_read_spur_chans(struct ath5k_hw *ah) { struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; u32 offset; u16 val; int ret = 0, i; offset = AR5K_EEPROM_CTL(ee->ee_version) + AR5K_EEPROM_N_CTLS(ee->ee_version); if (ee->ee_version < AR5K_EEPROM_VERSION_5_3) { /* No spur info for 5GHz */ ee->ee_spur_chans[0][0] = AR5K_EEPROM_NO_SPUR; /* 2 channels for 2GHz (2464/2420) */ ee->ee_spur_chans[0][1] = AR5K_EEPROM_5413_SPUR_CHAN_1; ee->ee_spur_chans[1][1] = AR5K_EEPROM_5413_SPUR_CHAN_2; ee->ee_spur_chans[2][1] = AR5K_EEPROM_NO_SPUR; } else if (ee->ee_version >= AR5K_EEPROM_VERSION_5_3) { for (i = 0; i < AR5K_EEPROM_N_SPUR_CHANS; i++) { AR5K_EEPROM_READ(offset, val); ee->ee_spur_chans[i][0] = val; AR5K_EEPROM_READ(offset + AR5K_EEPROM_N_SPUR_CHANS, val); ee->ee_spur_chans[i][1] = val; offset++; } } return ret; } /***********************\ * Init/Detach functions * \***********************/ /* * Initialize eeprom data structure */ int ath5k_eeprom_init(struct ath5k_hw *ah) { int err; err = ath5k_eeprom_init_header(ah); if (err < 0) return err; err = ath5k_eeprom_init_modes(ah); if (err < 0) return err; err = ath5k_eeprom_read_pcal_info(ah); if (err < 0) return err; err = ath5k_eeprom_read_ctl_info(ah); if (err < 0) return err; err = ath5k_eeprom_read_spur_chans(ah); if (err < 0) return err; return 0; } void ath5k_eeprom_detach(struct ath5k_hw *ah) { u8 mode; for (mode = AR5K_EEPROM_MODE_11A; mode <= AR5K_EEPROM_MODE_11G; mode++) ath5k_eeprom_free_pcal_info(ah, mode); } int ath5k_eeprom_mode_from_channel(struct ieee80211_channel *channel) { switch (channel->hw_value) { case AR5K_MODE_11A: return AR5K_EEPROM_MODE_11A; case AR5K_MODE_11G: return AR5K_EEPROM_MODE_11G; case AR5K_MODE_11B: return AR5K_EEPROM_MODE_11B; default: return -1; } }
gpl-2.0
davidmueller13/lt03lte_tw_kernel_5.1.1
drivers/media/video/saa6588.c
5150
13205
/* Driver for SAA6588 RDS decoder (c) 2005 Hans J. Koch This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/i2c.h> #include <linux/types.h> #include <linux/videodev2.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/poll.h> #include <linux/wait.h> #include <asm/uaccess.h> #include <media/saa6588.h> #include <media/v4l2-device.h> #include <media/v4l2-chip-ident.h> /* insmod options */ static unsigned int debug; static unsigned int xtal; static unsigned int mmbs; static unsigned int plvl; static unsigned int bufblocks = 100; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "enable debug messages"); module_param(xtal, int, 0); MODULE_PARM_DESC(xtal, "select oscillator frequency (0..3), default 0"); module_param(mmbs, int, 0); MODULE_PARM_DESC(mmbs, "enable MMBS mode: 0=off (default), 1=on"); module_param(plvl, int, 0); MODULE_PARM_DESC(plvl, "select pause level (0..3), default 0"); module_param(bufblocks, int, 0); MODULE_PARM_DESC(bufblocks, "number of buffered blocks, default 100"); MODULE_DESCRIPTION("v4l2 driver module for SAA6588 RDS decoder"); MODULE_AUTHOR("Hans J. Koch <koch@hjk-az.de>"); MODULE_LICENSE("GPL"); /* ---------------------------------------------------------------------- */ #define UNSET (-1U) #define PREFIX "saa6588: " #define dprintk if (debug) printk struct saa6588 { struct v4l2_subdev sd; struct delayed_work work; spinlock_t lock; unsigned char *buffer; unsigned int buf_size; unsigned int rd_index; unsigned int wr_index; unsigned int block_count; unsigned char last_blocknum; wait_queue_head_t read_queue; int data_available_for_read; u8 sync; }; static inline struct saa6588 *to_saa6588(struct v4l2_subdev *sd) { return container_of(sd, struct saa6588, sd); } /* ---------------------------------------------------------------------- */ /* * SAA6588 defines */ /* Initialization and mode control byte (0w) */ /* bit 0+1 (DAC0/DAC1) */ #define cModeStandard 0x00 #define cModeFastPI 0x01 #define cModeReducedRequest 0x02 #define cModeInvalid 0x03 /* bit 2 (RBDS) */ #define cProcessingModeRDS 0x00 #define cProcessingModeRBDS 0x04 /* bit 3+4 (SYM0/SYM1) */ #define cErrCorrectionNone 0x00 #define cErrCorrection2Bits 0x08 #define cErrCorrection5Bits 0x10 #define cErrCorrectionNoneRBDS 0x18 /* bit 5 (NWSY) */ #define cSyncNormal 0x00 #define cSyncRestart 0x20 /* bit 6 (TSQD) */ #define cSigQualityDetectOFF 0x00 #define cSigQualityDetectON 0x40 /* bit 7 (SQCM) */ #define cSigQualityTriggered 0x00 #define cSigQualityContinous 0x80 /* Pause level and flywheel control byte (1w) */ /* bits 0..5 (FEB0..FEB5) */ #define cFlywheelMaxBlocksMask 0x3F #define cFlywheelDefault 0x20 /* bits 6+7 (PL0/PL1) */ #define cPauseLevel_11mV 0x00 #define cPauseLevel_17mV 0x40 #define cPauseLevel_27mV 0x80 #define cPauseLevel_43mV 0xC0 /* Pause time/oscillator frequency/quality detector control byte (1w) */ /* bits 0..4 (SQS0..SQS4) */ #define cQualityDetectSensMask 0x1F #define cQualityDetectDefault 0x0F /* bit 5 (SOSC) */ #define cSelectOscFreqOFF 0x00 #define cSelectOscFreqON 0x20 /* bit 6+7 (PTF0/PTF1) */ #define cOscFreq_4332kHz 0x00 #define cOscFreq_8664kHz 0x40 #define cOscFreq_12996kHz 0x80 #define cOscFreq_17328kHz 0xC0 /* ---------------------------------------------------------------------- */ static int block_to_user_buf(struct saa6588 *s, unsigned char __user *user_buf) { int i; if (s->rd_index == s->wr_index) { if (debug > 2) dprintk(PREFIX "Read: buffer empty.\n"); return 0; } if (debug > 2) { dprintk(PREFIX "Read: "); for (i = s->rd_index; i < s->rd_index + 3; i++) dprintk("0x%02x ", s->buffer[i]); } if (copy_to_user(user_buf, &s->buffer[s->rd_index], 3)) return -EFAULT; s->rd_index += 3; if (s->rd_index >= s->buf_size) s->rd_index = 0; s->block_count--; if (debug > 2) dprintk("%d blocks total.\n", s->block_count); return 1; } static void read_from_buf(struct saa6588 *s, struct saa6588_command *a) { unsigned long flags; unsigned char __user *buf_ptr = a->buffer; unsigned int i; unsigned int rd_blocks; a->result = 0; if (!a->buffer) return; while (!s->data_available_for_read) { int ret = wait_event_interruptible(s->read_queue, s->data_available_for_read); if (ret == -ERESTARTSYS) { a->result = -EINTR; return; } } spin_lock_irqsave(&s->lock, flags); rd_blocks = a->block_count; if (rd_blocks > s->block_count) rd_blocks = s->block_count; if (!rd_blocks) { spin_unlock_irqrestore(&s->lock, flags); return; } for (i = 0; i < rd_blocks; i++) { if (block_to_user_buf(s, buf_ptr)) { buf_ptr += 3; a->result++; } else break; } a->result *= 3; s->data_available_for_read = (s->block_count > 0); spin_unlock_irqrestore(&s->lock, flags); } static void block_to_buf(struct saa6588 *s, unsigned char *blockbuf) { unsigned int i; if (debug > 3) dprintk(PREFIX "New block: "); for (i = 0; i < 3; ++i) { if (debug > 3) dprintk("0x%02x ", blockbuf[i]); s->buffer[s->wr_index] = blockbuf[i]; s->wr_index++; } if (s->wr_index >= s->buf_size) s->wr_index = 0; if (s->wr_index == s->rd_index) { s->rd_index += 3; if (s->rd_index >= s->buf_size) s->rd_index = 0; } else s->block_count++; if (debug > 3) dprintk("%d blocks total.\n", s->block_count); } static void saa6588_i2c_poll(struct saa6588 *s) { struct i2c_client *client = v4l2_get_subdevdata(&s->sd); unsigned long flags; unsigned char tmpbuf[6]; unsigned char blocknum; unsigned char tmp; /* Although we only need 3 bytes, we have to read at least 6. SAA6588 returns garbage otherwise. */ if (6 != i2c_master_recv(client, &tmpbuf[0], 6)) { if (debug > 1) dprintk(PREFIX "read error!\n"); return; } s->sync = tmpbuf[0] & 0x10; if (!s->sync) return; blocknum = tmpbuf[0] >> 5; if (blocknum == s->last_blocknum) { if (debug > 3) dprintk("Saw block %d again.\n", blocknum); return; } s->last_blocknum = blocknum; /* Byte order according to v4l2 specification: Byte 0: Least Significant Byte of RDS Block Byte 1: Most Significant Byte of RDS Block Byte 2 Bit 7: Error bit. Indicates that an uncorrectable error occurred during reception of this block. Bit 6: Corrected bit. Indicates that an error was corrected for this data block. Bits 5-3: Same as bits 0-2. Bits 2-0: Block number. SAA6588 byte order is Status-MSB-LSB, so we have to swap the first and the last of the 3 bytes block. */ tmp = tmpbuf[2]; tmpbuf[2] = tmpbuf[0]; tmpbuf[0] = tmp; /* Map 'Invalid block E' to 'Invalid Block' */ if (blocknum == 6) blocknum = V4L2_RDS_BLOCK_INVALID; /* And if are not in mmbs mode, then 'Block E' is also mapped to 'Invalid Block'. As far as I can tell MMBS is discontinued, and if there is ever a need to support E blocks, then please contact the linux-media mailinglist. */ else if (!mmbs && blocknum == 5) blocknum = V4L2_RDS_BLOCK_INVALID; tmp = blocknum; tmp |= blocknum << 3; /* Received offset == Offset Name (OK ?) */ if ((tmpbuf[2] & 0x03) == 0x03) tmp |= V4L2_RDS_BLOCK_ERROR; /* uncorrectable error */ else if ((tmpbuf[2] & 0x03) != 0x00) tmp |= V4L2_RDS_BLOCK_CORRECTED; /* corrected error */ tmpbuf[2] = tmp; /* Is this enough ? Should we also check other bits ? */ spin_lock_irqsave(&s->lock, flags); block_to_buf(s, tmpbuf); spin_unlock_irqrestore(&s->lock, flags); s->data_available_for_read = 1; wake_up_interruptible(&s->read_queue); } static void saa6588_work(struct work_struct *work) { struct saa6588 *s = container_of(work, struct saa6588, work.work); saa6588_i2c_poll(s); schedule_delayed_work(&s->work, msecs_to_jiffies(20)); } static void saa6588_configure(struct saa6588 *s) { struct i2c_client *client = v4l2_get_subdevdata(&s->sd); unsigned char buf[3]; int rc; buf[0] = cSyncRestart; if (mmbs) buf[0] |= cProcessingModeRBDS; buf[1] = cFlywheelDefault; switch (plvl) { case 0: buf[1] |= cPauseLevel_11mV; break; case 1: buf[1] |= cPauseLevel_17mV; break; case 2: buf[1] |= cPauseLevel_27mV; break; case 3: buf[1] |= cPauseLevel_43mV; break; default: /* nothing */ break; } buf[2] = cQualityDetectDefault | cSelectOscFreqON; switch (xtal) { case 0: buf[2] |= cOscFreq_4332kHz; break; case 1: buf[2] |= cOscFreq_8664kHz; break; case 2: buf[2] |= cOscFreq_12996kHz; break; case 3: buf[2] |= cOscFreq_17328kHz; break; default: /* nothing */ break; } dprintk(PREFIX "writing: 0w=0x%02x 1w=0x%02x 2w=0x%02x\n", buf[0], buf[1], buf[2]); rc = i2c_master_send(client, buf, 3); if (rc != 3) printk(PREFIX "i2c i/o error: rc == %d (should be 3)\n", rc); } /* ---------------------------------------------------------------------- */ static long saa6588_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg) { struct saa6588 *s = to_saa6588(sd); struct saa6588_command *a = arg; switch (cmd) { /* --- open() for /dev/radio --- */ case SAA6588_CMD_OPEN: a->result = 0; /* return error if chip doesn't work ??? */ break; /* --- close() for /dev/radio --- */ case SAA6588_CMD_CLOSE: s->data_available_for_read = 1; wake_up_interruptible(&s->read_queue); a->result = 0; break; /* --- read() for /dev/radio --- */ case SAA6588_CMD_READ: read_from_buf(s, a); break; /* --- poll() for /dev/radio --- */ case SAA6588_CMD_POLL: a->result = 0; if (s->data_available_for_read) { a->result |= POLLIN | POLLRDNORM; } poll_wait(a->instance, &s->read_queue, a->event_list); break; default: /* nothing */ return -ENOIOCTLCMD; } return 0; } static int saa6588_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt) { struct saa6588 *s = to_saa6588(sd); vt->capability |= V4L2_TUNER_CAP_RDS | V4L2_TUNER_CAP_RDS_BLOCK_IO; if (s->sync) vt->rxsubchans |= V4L2_TUNER_SUB_RDS; return 0; } static int saa6588_s_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt) { struct saa6588 *s = to_saa6588(sd); saa6588_configure(s); return 0; } static int saa6588_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip) { struct i2c_client *client = v4l2_get_subdevdata(sd); return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_SAA6588, 0); } /* ----------------------------------------------------------------------- */ static const struct v4l2_subdev_core_ops saa6588_core_ops = { .g_chip_ident = saa6588_g_chip_ident, .ioctl = saa6588_ioctl, }; static const struct v4l2_subdev_tuner_ops saa6588_tuner_ops = { .g_tuner = saa6588_g_tuner, .s_tuner = saa6588_s_tuner, }; static const struct v4l2_subdev_ops saa6588_ops = { .core = &saa6588_core_ops, .tuner = &saa6588_tuner_ops, }; /* ---------------------------------------------------------------------- */ static int saa6588_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct saa6588 *s; struct v4l2_subdev *sd; v4l_info(client, "saa6588 found @ 0x%x (%s)\n", client->addr << 1, client->adapter->name); s = kzalloc(sizeof(*s), GFP_KERNEL); if (s == NULL) return -ENOMEM; s->buf_size = bufblocks * 3; s->buffer = kmalloc(s->buf_size, GFP_KERNEL); if (s->buffer == NULL) { kfree(s); return -ENOMEM; } sd = &s->sd; v4l2_i2c_subdev_init(sd, client, &saa6588_ops); spin_lock_init(&s->lock); s->block_count = 0; s->wr_index = 0; s->rd_index = 0; s->last_blocknum = 0xff; init_waitqueue_head(&s->read_queue); s->data_available_for_read = 0; saa6588_configure(s); /* start polling via eventd */ INIT_DELAYED_WORK(&s->work, saa6588_work); schedule_delayed_work(&s->work, 0); return 0; } static int saa6588_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); struct saa6588 *s = to_saa6588(sd); v4l2_device_unregister_subdev(sd); cancel_delayed_work_sync(&s->work); kfree(s->buffer); kfree(s); return 0; } /* ----------------------------------------------------------------------- */ static const struct i2c_device_id saa6588_id[] = { { "saa6588", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, saa6588_id); static struct i2c_driver saa6588_driver = { .driver = { .owner = THIS_MODULE, .name = "saa6588", }, .probe = saa6588_probe, .remove = saa6588_remove, .id_table = saa6588_id, }; module_i2c_driver(saa6588_driver);
gpl-2.0
zetalabs/linux-3.4-clover
drivers/net/wireless/ath/ath5k/eeprom.c
5150
49071
/* * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org> * Copyright (c) 2006-2009 Nick Kossifidis <mickflemm@gmail.com> * Copyright (c) 2008-2009 Felix Fietkau <nbd@openwrt.org> * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * */ /*************************************\ * EEPROM access functions and helpers * \*************************************/ #include <linux/slab.h> #include "ath5k.h" #include "reg.h" #include "debug.h" /******************\ * Helper functions * \******************/ /* * Translate binary channel representation in EEPROM to frequency */ static u16 ath5k_eeprom_bin2freq(struct ath5k_eeprom_info *ee, u16 bin, unsigned int mode) { u16 val; if (bin == AR5K_EEPROM_CHANNEL_DIS) return bin; if (mode == AR5K_EEPROM_MODE_11A) { if (ee->ee_version > AR5K_EEPROM_VERSION_3_2) val = (5 * bin) + 4800; else val = bin > 62 ? (10 * 62) + (5 * (bin - 62)) + 5100 : (bin * 10) + 5100; } else { if (ee->ee_version > AR5K_EEPROM_VERSION_3_2) val = bin + 2300; else val = bin + 2400; } return val; } /*********\ * Parsers * \*********/ /* * Initialize eeprom & capabilities structs */ static int ath5k_eeprom_init_header(struct ath5k_hw *ah) { struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; u16 val; u32 cksum, offset, eep_max = AR5K_EEPROM_INFO_MAX; /* * Read values from EEPROM and store them in the capability structure */ AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MAGIC, ee_magic); AR5K_EEPROM_READ_HDR(AR5K_EEPROM_PROTECT, ee_protect); AR5K_EEPROM_READ_HDR(AR5K_EEPROM_REG_DOMAIN, ee_regdomain); AR5K_EEPROM_READ_HDR(AR5K_EEPROM_VERSION, ee_version); AR5K_EEPROM_READ_HDR(AR5K_EEPROM_HDR, ee_header); /* Return if we have an old EEPROM */ if (ah->ah_ee_version < AR5K_EEPROM_VERSION_3_0) return 0; /* * Validate the checksum of the EEPROM date. There are some * devices with invalid EEPROMs. */ AR5K_EEPROM_READ(AR5K_EEPROM_SIZE_UPPER, val); if (val) { eep_max = (val & AR5K_EEPROM_SIZE_UPPER_MASK) << AR5K_EEPROM_SIZE_ENDLOC_SHIFT; AR5K_EEPROM_READ(AR5K_EEPROM_SIZE_LOWER, val); eep_max = (eep_max | val) - AR5K_EEPROM_INFO_BASE; /* * Fail safe check to prevent stupid loops due * to busted EEPROMs. XXX: This value is likely too * big still, waiting on a better value. */ if (eep_max > (3 * AR5K_EEPROM_INFO_MAX)) { ATH5K_ERR(ah, "Invalid max custom EEPROM size: " "%d (0x%04x) max expected: %d (0x%04x)\n", eep_max, eep_max, 3 * AR5K_EEPROM_INFO_MAX, 3 * AR5K_EEPROM_INFO_MAX); return -EIO; } } for (cksum = 0, offset = 0; offset < eep_max; offset++) { AR5K_EEPROM_READ(AR5K_EEPROM_INFO(offset), val); cksum ^= val; } if (cksum != AR5K_EEPROM_INFO_CKSUM) { ATH5K_ERR(ah, "Invalid EEPROM " "checksum: 0x%04x eep_max: 0x%04x (%s)\n", cksum, eep_max, eep_max == AR5K_EEPROM_INFO_MAX ? "default size" : "custom size"); return -EIO; } AR5K_EEPROM_READ_HDR(AR5K_EEPROM_ANT_GAIN(ah->ah_ee_version), ee_ant_gain); if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0) { AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC0, ee_misc0); AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC1, ee_misc1); /* XXX: Don't know which versions include these two */ AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC2, ee_misc2); if (ee->ee_version >= AR5K_EEPROM_VERSION_4_3) AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC3, ee_misc3); if (ee->ee_version >= AR5K_EEPROM_VERSION_5_0) { AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC4, ee_misc4); AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC5, ee_misc5); AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC6, ee_misc6); } } if (ah->ah_ee_version < AR5K_EEPROM_VERSION_3_3) { AR5K_EEPROM_READ(AR5K_EEPROM_OBDB0_2GHZ, val); ee->ee_ob[AR5K_EEPROM_MODE_11B][0] = val & 0x7; ee->ee_db[AR5K_EEPROM_MODE_11B][0] = (val >> 3) & 0x7; AR5K_EEPROM_READ(AR5K_EEPROM_OBDB1_2GHZ, val); ee->ee_ob[AR5K_EEPROM_MODE_11G][0] = val & 0x7; ee->ee_db[AR5K_EEPROM_MODE_11G][0] = (val >> 3) & 0x7; } AR5K_EEPROM_READ(AR5K_EEPROM_IS_HB63, val); if ((ah->ah_mac_version == (AR5K_SREV_AR2425 >> 4)) && val) ee->ee_is_hb63 = true; else ee->ee_is_hb63 = false; AR5K_EEPROM_READ(AR5K_EEPROM_RFKILL, val); ee->ee_rfkill_pin = (u8) AR5K_REG_MS(val, AR5K_EEPROM_RFKILL_GPIO_SEL); ee->ee_rfkill_pol = val & AR5K_EEPROM_RFKILL_POLARITY ? true : false; /* Check if PCIE_OFFSET points to PCIE_SERDES_SECTION * and enable serdes programming if needed. * * XXX: Serdes values seem to be fixed so * no need to read them here, we write them * during ath5k_hw_init */ AR5K_EEPROM_READ(AR5K_EEPROM_PCIE_OFFSET, val); ee->ee_serdes = (val == AR5K_EEPROM_PCIE_SERDES_SECTION) ? true : false; return 0; } /* * Read antenna infos from eeprom */ static int ath5k_eeprom_read_ants(struct ath5k_hw *ah, u32 *offset, unsigned int mode) { struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; u32 o = *offset; u16 val; int i = 0; AR5K_EEPROM_READ(o++, val); ee->ee_switch_settling[mode] = (val >> 8) & 0x7f; ee->ee_atn_tx_rx[mode] = (val >> 2) & 0x3f; ee->ee_ant_control[mode][i] = (val << 4) & 0x3f; AR5K_EEPROM_READ(o++, val); ee->ee_ant_control[mode][i++] |= (val >> 12) & 0xf; ee->ee_ant_control[mode][i++] = (val >> 6) & 0x3f; ee->ee_ant_control[mode][i++] = val & 0x3f; AR5K_EEPROM_READ(o++, val); ee->ee_ant_control[mode][i++] = (val >> 10) & 0x3f; ee->ee_ant_control[mode][i++] = (val >> 4) & 0x3f; ee->ee_ant_control[mode][i] = (val << 2) & 0x3f; AR5K_EEPROM_READ(o++, val); ee->ee_ant_control[mode][i++] |= (val >> 14) & 0x3; ee->ee_ant_control[mode][i++] = (val >> 8) & 0x3f; ee->ee_ant_control[mode][i++] = (val >> 2) & 0x3f; ee->ee_ant_control[mode][i] = (val << 4) & 0x3f; AR5K_EEPROM_READ(o++, val); ee->ee_ant_control[mode][i++] |= (val >> 12) & 0xf; ee->ee_ant_control[mode][i++] = (val >> 6) & 0x3f; ee->ee_ant_control[mode][i++] = val & 0x3f; /* Get antenna switch tables */ ah->ah_ant_ctl[mode][AR5K_ANT_CTL] = (ee->ee_ant_control[mode][0] << 4); ah->ah_ant_ctl[mode][AR5K_ANT_SWTABLE_A] = ee->ee_ant_control[mode][1] | (ee->ee_ant_control[mode][2] << 6) | (ee->ee_ant_control[mode][3] << 12) | (ee->ee_ant_control[mode][4] << 18) | (ee->ee_ant_control[mode][5] << 24); ah->ah_ant_ctl[mode][AR5K_ANT_SWTABLE_B] = ee->ee_ant_control[mode][6] | (ee->ee_ant_control[mode][7] << 6) | (ee->ee_ant_control[mode][8] << 12) | (ee->ee_ant_control[mode][9] << 18) | (ee->ee_ant_control[mode][10] << 24); /* return new offset */ *offset = o; return 0; } /* * Read supported modes and some mode-specific calibration data * from eeprom */ static int ath5k_eeprom_read_modes(struct ath5k_hw *ah, u32 *offset, unsigned int mode) { struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; u32 o = *offset; u16 val; ee->ee_n_piers[mode] = 0; AR5K_EEPROM_READ(o++, val); ee->ee_adc_desired_size[mode] = (s8)((val >> 8) & 0xff); switch (mode) { case AR5K_EEPROM_MODE_11A: ee->ee_ob[mode][3] = (val >> 5) & 0x7; ee->ee_db[mode][3] = (val >> 2) & 0x7; ee->ee_ob[mode][2] = (val << 1) & 0x7; AR5K_EEPROM_READ(o++, val); ee->ee_ob[mode][2] |= (val >> 15) & 0x1; ee->ee_db[mode][2] = (val >> 12) & 0x7; ee->ee_ob[mode][1] = (val >> 9) & 0x7; ee->ee_db[mode][1] = (val >> 6) & 0x7; ee->ee_ob[mode][0] = (val >> 3) & 0x7; ee->ee_db[mode][0] = val & 0x7; break; case AR5K_EEPROM_MODE_11G: case AR5K_EEPROM_MODE_11B: ee->ee_ob[mode][1] = (val >> 4) & 0x7; ee->ee_db[mode][1] = val & 0x7; break; } AR5K_EEPROM_READ(o++, val); ee->ee_tx_end2xlna_enable[mode] = (val >> 8) & 0xff; ee->ee_thr_62[mode] = val & 0xff; if (ah->ah_ee_version <= AR5K_EEPROM_VERSION_3_2) ee->ee_thr_62[mode] = mode == AR5K_EEPROM_MODE_11A ? 15 : 28; AR5K_EEPROM_READ(o++, val); ee->ee_tx_end2xpa_disable[mode] = (val >> 8) & 0xff; ee->ee_tx_frm2xpa_enable[mode] = val & 0xff; AR5K_EEPROM_READ(o++, val); ee->ee_pga_desired_size[mode] = (val >> 8) & 0xff; if ((val & 0xff) & 0x80) ee->ee_noise_floor_thr[mode] = -((((val & 0xff) ^ 0xff)) + 1); else ee->ee_noise_floor_thr[mode] = val & 0xff; if (ah->ah_ee_version <= AR5K_EEPROM_VERSION_3_2) ee->ee_noise_floor_thr[mode] = mode == AR5K_EEPROM_MODE_11A ? -54 : -1; AR5K_EEPROM_READ(o++, val); ee->ee_xlna_gain[mode] = (val >> 5) & 0xff; ee->ee_x_gain[mode] = (val >> 1) & 0xf; ee->ee_xpd[mode] = val & 0x1; if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0 && mode != AR5K_EEPROM_MODE_11B) ee->ee_fixed_bias[mode] = (val >> 13) & 0x1; if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_3_3) { AR5K_EEPROM_READ(o++, val); ee->ee_false_detect[mode] = (val >> 6) & 0x7f; if (mode == AR5K_EEPROM_MODE_11A) ee->ee_xr_power[mode] = val & 0x3f; else { /* b_DB_11[bg] and b_OB_11[bg] */ ee->ee_ob[mode][0] = val & 0x7; ee->ee_db[mode][0] = (val >> 3) & 0x7; } } if (ah->ah_ee_version < AR5K_EEPROM_VERSION_3_4) { ee->ee_i_gain[mode] = AR5K_EEPROM_I_GAIN; ee->ee_cck_ofdm_power_delta = AR5K_EEPROM_CCK_OFDM_DELTA; } else { ee->ee_i_gain[mode] = (val >> 13) & 0x7; AR5K_EEPROM_READ(o++, val); ee->ee_i_gain[mode] |= (val << 3) & 0x38; if (mode == AR5K_EEPROM_MODE_11G) { ee->ee_cck_ofdm_power_delta = (val >> 3) & 0xff; if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_6) ee->ee_scaled_cck_delta = (val >> 11) & 0x1f; } } if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0 && mode == AR5K_EEPROM_MODE_11A) { ee->ee_i_cal[mode] = (val >> 8) & 0x3f; ee->ee_q_cal[mode] = (val >> 3) & 0x1f; } if (ah->ah_ee_version < AR5K_EEPROM_VERSION_4_0) goto done; /* Note: >= v5 have bg freq piers on another location * so these freq piers are ignored for >= v5 (should be 0xff * anyway) */ switch (mode) { case AR5K_EEPROM_MODE_11A: if (ah->ah_ee_version < AR5K_EEPROM_VERSION_4_1) break; AR5K_EEPROM_READ(o++, val); ee->ee_margin_tx_rx[mode] = val & 0x3f; break; case AR5K_EEPROM_MODE_11B: AR5K_EEPROM_READ(o++, val); ee->ee_pwr_cal_b[0].freq = ath5k_eeprom_bin2freq(ee, val & 0xff, mode); if (ee->ee_pwr_cal_b[0].freq != AR5K_EEPROM_CHANNEL_DIS) ee->ee_n_piers[mode]++; ee->ee_pwr_cal_b[1].freq = ath5k_eeprom_bin2freq(ee, (val >> 8) & 0xff, mode); if (ee->ee_pwr_cal_b[1].freq != AR5K_EEPROM_CHANNEL_DIS) ee->ee_n_piers[mode]++; AR5K_EEPROM_READ(o++, val); ee->ee_pwr_cal_b[2].freq = ath5k_eeprom_bin2freq(ee, val & 0xff, mode); if (ee->ee_pwr_cal_b[2].freq != AR5K_EEPROM_CHANNEL_DIS) ee->ee_n_piers[mode]++; if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_1) ee->ee_margin_tx_rx[mode] = (val >> 8) & 0x3f; break; case AR5K_EEPROM_MODE_11G: AR5K_EEPROM_READ(o++, val); ee->ee_pwr_cal_g[0].freq = ath5k_eeprom_bin2freq(ee, val & 0xff, mode); if (ee->ee_pwr_cal_g[0].freq != AR5K_EEPROM_CHANNEL_DIS) ee->ee_n_piers[mode]++; ee->ee_pwr_cal_g[1].freq = ath5k_eeprom_bin2freq(ee, (val >> 8) & 0xff, mode); if (ee->ee_pwr_cal_g[1].freq != AR5K_EEPROM_CHANNEL_DIS) ee->ee_n_piers[mode]++; AR5K_EEPROM_READ(o++, val); ee->ee_turbo_max_power[mode] = val & 0x7f; ee->ee_xr_power[mode] = (val >> 7) & 0x3f; AR5K_EEPROM_READ(o++, val); ee->ee_pwr_cal_g[2].freq = ath5k_eeprom_bin2freq(ee, val & 0xff, mode); if (ee->ee_pwr_cal_g[2].freq != AR5K_EEPROM_CHANNEL_DIS) ee->ee_n_piers[mode]++; if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_1) ee->ee_margin_tx_rx[mode] = (val >> 8) & 0x3f; AR5K_EEPROM_READ(o++, val); ee->ee_i_cal[mode] = (val >> 5) & 0x3f; ee->ee_q_cal[mode] = val & 0x1f; if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_2) { AR5K_EEPROM_READ(o++, val); ee->ee_cck_ofdm_gain_delta = val & 0xff; } break; } /* * Read turbo mode information on newer EEPROM versions */ if (ee->ee_version < AR5K_EEPROM_VERSION_5_0) goto done; switch (mode) { case AR5K_EEPROM_MODE_11A: ee->ee_switch_settling_turbo[mode] = (val >> 6) & 0x7f; ee->ee_atn_tx_rx_turbo[mode] = (val >> 13) & 0x7; AR5K_EEPROM_READ(o++, val); ee->ee_atn_tx_rx_turbo[mode] |= (val & 0x7) << 3; ee->ee_margin_tx_rx_turbo[mode] = (val >> 3) & 0x3f; ee->ee_adc_desired_size_turbo[mode] = (val >> 9) & 0x7f; AR5K_EEPROM_READ(o++, val); ee->ee_adc_desired_size_turbo[mode] |= (val & 0x1) << 7; ee->ee_pga_desired_size_turbo[mode] = (val >> 1) & 0xff; if (AR5K_EEPROM_EEMAP(ee->ee_misc0) >= 2) ee->ee_pd_gain_overlap = (val >> 9) & 0xf; break; case AR5K_EEPROM_MODE_11G: ee->ee_switch_settling_turbo[mode] = (val >> 8) & 0x7f; ee->ee_atn_tx_rx_turbo[mode] = (val >> 15) & 0x7; AR5K_EEPROM_READ(o++, val); ee->ee_atn_tx_rx_turbo[mode] |= (val & 0x1f) << 1; ee->ee_margin_tx_rx_turbo[mode] = (val >> 5) & 0x3f; ee->ee_adc_desired_size_turbo[mode] = (val >> 11) & 0x7f; AR5K_EEPROM_READ(o++, val); ee->ee_adc_desired_size_turbo[mode] |= (val & 0x7) << 5; ee->ee_pga_desired_size_turbo[mode] = (val >> 3) & 0xff; break; } done: /* return new offset */ *offset = o; return 0; } /* Read mode-specific data (except power calibration data) */ static int ath5k_eeprom_init_modes(struct ath5k_hw *ah) { struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; u32 mode_offset[3]; unsigned int mode; u32 offset; int ret; /* * Get values for all modes */ mode_offset[AR5K_EEPROM_MODE_11A] = AR5K_EEPROM_MODES_11A(ah->ah_ee_version); mode_offset[AR5K_EEPROM_MODE_11B] = AR5K_EEPROM_MODES_11B(ah->ah_ee_version); mode_offset[AR5K_EEPROM_MODE_11G] = AR5K_EEPROM_MODES_11G(ah->ah_ee_version); ee->ee_turbo_max_power[AR5K_EEPROM_MODE_11A] = AR5K_EEPROM_HDR_T_5GHZ_DBM(ee->ee_header); for (mode = AR5K_EEPROM_MODE_11A; mode <= AR5K_EEPROM_MODE_11G; mode++) { offset = mode_offset[mode]; ret = ath5k_eeprom_read_ants(ah, &offset, mode); if (ret) return ret; ret = ath5k_eeprom_read_modes(ah, &offset, mode); if (ret) return ret; } /* override for older eeprom versions for better performance */ if (ah->ah_ee_version <= AR5K_EEPROM_VERSION_3_2) { ee->ee_thr_62[AR5K_EEPROM_MODE_11A] = 15; ee->ee_thr_62[AR5K_EEPROM_MODE_11B] = 28; ee->ee_thr_62[AR5K_EEPROM_MODE_11G] = 28; } return 0; } /* Read the frequency piers for each mode (mostly used on newer eeproms with 0xff * frequency mask) */ static inline int ath5k_eeprom_read_freq_list(struct ath5k_hw *ah, int *offset, int max, struct ath5k_chan_pcal_info *pc, unsigned int mode) { struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; int o = *offset; int i = 0; u8 freq1, freq2; u16 val; ee->ee_n_piers[mode] = 0; while (i < max) { AR5K_EEPROM_READ(o++, val); freq1 = val & 0xff; if (!freq1) break; pc[i++].freq = ath5k_eeprom_bin2freq(ee, freq1, mode); ee->ee_n_piers[mode]++; freq2 = (val >> 8) & 0xff; if (!freq2) break; pc[i++].freq = ath5k_eeprom_bin2freq(ee, freq2, mode); ee->ee_n_piers[mode]++; } /* return new offset */ *offset = o; return 0; } /* Read frequency piers for 802.11a */ static int ath5k_eeprom_init_11a_pcal_freq(struct ath5k_hw *ah, int offset) { struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; struct ath5k_chan_pcal_info *pcal = ee->ee_pwr_cal_a; int i; u16 val; u8 mask; if (ee->ee_version >= AR5K_EEPROM_VERSION_3_3) { ath5k_eeprom_read_freq_list(ah, &offset, AR5K_EEPROM_N_5GHZ_CHAN, pcal, AR5K_EEPROM_MODE_11A); } else { mask = AR5K_EEPROM_FREQ_M(ah->ah_ee_version); AR5K_EEPROM_READ(offset++, val); pcal[0].freq = (val >> 9) & mask; pcal[1].freq = (val >> 2) & mask; pcal[2].freq = (val << 5) & mask; AR5K_EEPROM_READ(offset++, val); pcal[2].freq |= (val >> 11) & 0x1f; pcal[3].freq = (val >> 4) & mask; pcal[4].freq = (val << 3) & mask; AR5K_EEPROM_READ(offset++, val); pcal[4].freq |= (val >> 13) & 0x7; pcal[5].freq = (val >> 6) & mask; pcal[6].freq = (val << 1) & mask; AR5K_EEPROM_READ(offset++, val); pcal[6].freq |= (val >> 15) & 0x1; pcal[7].freq = (val >> 8) & mask; pcal[8].freq = (val >> 1) & mask; pcal[9].freq = (val << 6) & mask; AR5K_EEPROM_READ(offset++, val); pcal[9].freq |= (val >> 10) & 0x3f; /* Fixed number of piers */ ee->ee_n_piers[AR5K_EEPROM_MODE_11A] = 10; for (i = 0; i < AR5K_EEPROM_N_5GHZ_CHAN; i++) { pcal[i].freq = ath5k_eeprom_bin2freq(ee, pcal[i].freq, AR5K_EEPROM_MODE_11A); } } return 0; } /* Read frequency piers for 802.11bg on eeprom versions >= 5 and eemap >= 2 */ static inline int ath5k_eeprom_init_11bg_2413(struct ath5k_hw *ah, unsigned int mode, int offset) { struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; struct ath5k_chan_pcal_info *pcal; switch (mode) { case AR5K_EEPROM_MODE_11B: pcal = ee->ee_pwr_cal_b; break; case AR5K_EEPROM_MODE_11G: pcal = ee->ee_pwr_cal_g; break; default: return -EINVAL; } ath5k_eeprom_read_freq_list(ah, &offset, AR5K_EEPROM_N_2GHZ_CHAN_2413, pcal, mode); return 0; } /* * Read power calibration for RF5111 chips * * For RF5111 we have an XPD -eXternal Power Detector- curve * for each calibrated channel. Each curve has 0,5dB Power steps * on x axis and PCDAC steps (offsets) on y axis and looks like an * exponential function. To recreate the curve we read 11 points * here and interpolate later. */ /* Used to match PCDAC steps with power values on RF5111 chips * (eeprom versions < 4). For RF5111 we have 11 pre-defined PCDAC * steps that match with the power values we read from eeprom. On * older eeprom versions (< 3.2) these steps are equally spaced at * 10% of the pcdac curve -until the curve reaches its maximum- * (11 steps from 0 to 100%) but on newer eeprom versions (>= 3.2) * these 11 steps are spaced in a different way. This function returns * the pcdac steps based on eeprom version and curve min/max so that we * can have pcdac/pwr points. */ static inline void ath5k_get_pcdac_intercepts(struct ath5k_hw *ah, u8 min, u8 max, u8 *vp) { static const u16 intercepts3[] = { 0, 5, 10, 20, 30, 50, 70, 85, 90, 95, 100 }; static const u16 intercepts3_2[] = { 0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100 }; const u16 *ip; int i; if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_3_2) ip = intercepts3_2; else ip = intercepts3; for (i = 0; i < ARRAY_SIZE(intercepts3); i++) vp[i] = (ip[i] * max + (100 - ip[i]) * min) / 100; } static int ath5k_eeprom_free_pcal_info(struct ath5k_hw *ah, int mode) { struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; struct ath5k_chan_pcal_info *chinfo; u8 pier, pdg; switch (mode) { case AR5K_EEPROM_MODE_11A: if (!AR5K_EEPROM_HDR_11A(ee->ee_header)) return 0; chinfo = ee->ee_pwr_cal_a; break; case AR5K_EEPROM_MODE_11B: if (!AR5K_EEPROM_HDR_11B(ee->ee_header)) return 0; chinfo = ee->ee_pwr_cal_b; break; case AR5K_EEPROM_MODE_11G: if (!AR5K_EEPROM_HDR_11G(ee->ee_header)) return 0; chinfo = ee->ee_pwr_cal_g; break; default: return -EINVAL; } for (pier = 0; pier < ee->ee_n_piers[mode]; pier++) { if (!chinfo[pier].pd_curves) continue; for (pdg = 0; pdg < AR5K_EEPROM_N_PD_CURVES; pdg++) { struct ath5k_pdgain_info *pd = &chinfo[pier].pd_curves[pdg]; kfree(pd->pd_step); kfree(pd->pd_pwr); } kfree(chinfo[pier].pd_curves); } return 0; } /* Convert RF5111 specific data to generic raw data * used by interpolation code */ static int ath5k_eeprom_convert_pcal_info_5111(struct ath5k_hw *ah, int mode, struct ath5k_chan_pcal_info *chinfo) { struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; struct ath5k_chan_pcal_info_rf5111 *pcinfo; struct ath5k_pdgain_info *pd; u8 pier, point, idx; u8 *pdgain_idx = ee->ee_pdc_to_idx[mode]; /* Fill raw data for each calibration pier */ for (pier = 0; pier < ee->ee_n_piers[mode]; pier++) { pcinfo = &chinfo[pier].rf5111_info; /* Allocate pd_curves for this cal pier */ chinfo[pier].pd_curves = kcalloc(AR5K_EEPROM_N_PD_CURVES, sizeof(struct ath5k_pdgain_info), GFP_KERNEL); if (!chinfo[pier].pd_curves) goto err_out; /* Only one curve for RF5111 * find out which one and place * in pd_curves. * Note: ee_x_gain is reversed here */ for (idx = 0; idx < AR5K_EEPROM_N_PD_CURVES; idx++) { if (!((ee->ee_x_gain[mode] >> idx) & 0x1)) { pdgain_idx[0] = idx; break; } } ee->ee_pd_gains[mode] = 1; pd = &chinfo[pier].pd_curves[idx]; pd->pd_points = AR5K_EEPROM_N_PWR_POINTS_5111; /* Allocate pd points for this curve */ pd->pd_step = kcalloc(AR5K_EEPROM_N_PWR_POINTS_5111, sizeof(u8), GFP_KERNEL); if (!pd->pd_step) goto err_out; pd->pd_pwr = kcalloc(AR5K_EEPROM_N_PWR_POINTS_5111, sizeof(s16), GFP_KERNEL); if (!pd->pd_pwr) goto err_out; /* Fill raw dataset * (convert power to 0.25dB units * for RF5112 compatibility) */ for (point = 0; point < pd->pd_points; point++) { /* Absolute values */ pd->pd_pwr[point] = 2 * pcinfo->pwr[point]; /* Already sorted */ pd->pd_step[point] = pcinfo->pcdac[point]; } /* Set min/max pwr */ chinfo[pier].min_pwr = pd->pd_pwr[0]; chinfo[pier].max_pwr = pd->pd_pwr[10]; } return 0; err_out: ath5k_eeprom_free_pcal_info(ah, mode); return -ENOMEM; } /* Parse EEPROM data */ static int ath5k_eeprom_read_pcal_info_5111(struct ath5k_hw *ah, int mode) { struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; struct ath5k_chan_pcal_info *pcal; int offset, ret; int i; u16 val; offset = AR5K_EEPROM_GROUPS_START(ee->ee_version); switch (mode) { case AR5K_EEPROM_MODE_11A: if (!AR5K_EEPROM_HDR_11A(ee->ee_header)) return 0; ret = ath5k_eeprom_init_11a_pcal_freq(ah, offset + AR5K_EEPROM_GROUP1_OFFSET); if (ret < 0) return ret; offset += AR5K_EEPROM_GROUP2_OFFSET; pcal = ee->ee_pwr_cal_a; break; case AR5K_EEPROM_MODE_11B: if (!AR5K_EEPROM_HDR_11B(ee->ee_header) && !AR5K_EEPROM_HDR_11G(ee->ee_header)) return 0; pcal = ee->ee_pwr_cal_b; offset += AR5K_EEPROM_GROUP3_OFFSET; /* fixed piers */ pcal[0].freq = 2412; pcal[1].freq = 2447; pcal[2].freq = 2484; ee->ee_n_piers[mode] = 3; break; case AR5K_EEPROM_MODE_11G: if (!AR5K_EEPROM_HDR_11G(ee->ee_header)) return 0; pcal = ee->ee_pwr_cal_g; offset += AR5K_EEPROM_GROUP4_OFFSET; /* fixed piers */ pcal[0].freq = 2312; pcal[1].freq = 2412; pcal[2].freq = 2484; ee->ee_n_piers[mode] = 3; break; default: return -EINVAL; } for (i = 0; i < ee->ee_n_piers[mode]; i++) { struct ath5k_chan_pcal_info_rf5111 *cdata = &pcal[i].rf5111_info; AR5K_EEPROM_READ(offset++, val); cdata->pcdac_max = ((val >> 10) & AR5K_EEPROM_PCDAC_M); cdata->pcdac_min = ((val >> 4) & AR5K_EEPROM_PCDAC_M); cdata->pwr[0] = ((val << 2) & AR5K_EEPROM_POWER_M); AR5K_EEPROM_READ(offset++, val); cdata->pwr[0] |= ((val >> 14) & 0x3); cdata->pwr[1] = ((val >> 8) & AR5K_EEPROM_POWER_M); cdata->pwr[2] = ((val >> 2) & AR5K_EEPROM_POWER_M); cdata->pwr[3] = ((val << 4) & AR5K_EEPROM_POWER_M); AR5K_EEPROM_READ(offset++, val); cdata->pwr[3] |= ((val >> 12) & 0xf); cdata->pwr[4] = ((val >> 6) & AR5K_EEPROM_POWER_M); cdata->pwr[5] = (val & AR5K_EEPROM_POWER_M); AR5K_EEPROM_READ(offset++, val); cdata->pwr[6] = ((val >> 10) & AR5K_EEPROM_POWER_M); cdata->pwr[7] = ((val >> 4) & AR5K_EEPROM_POWER_M); cdata->pwr[8] = ((val << 2) & AR5K_EEPROM_POWER_M); AR5K_EEPROM_READ(offset++, val); cdata->pwr[8] |= ((val >> 14) & 0x3); cdata->pwr[9] = ((val >> 8) & AR5K_EEPROM_POWER_M); cdata->pwr[10] = ((val >> 2) & AR5K_EEPROM_POWER_M); ath5k_get_pcdac_intercepts(ah, cdata->pcdac_min, cdata->pcdac_max, cdata->pcdac); } return ath5k_eeprom_convert_pcal_info_5111(ah, mode, pcal); } /* * Read power calibration for RF5112 chips * * For RF5112 we have 4 XPD -eXternal Power Detector- curves * for each calibrated channel on 0, -6, -12 and -18dBm but we only * use the higher (3) and the lower (0) curves. Each curve has 0.5dB * power steps on x axis and PCDAC steps on y axis and looks like a * linear function. To recreate the curve and pass the power values * on hw, we read 4 points for xpd 0 (lower gain -> max power) * and 3 points for xpd 3 (higher gain -> lower power) here and * interpolate later. * * Note: Many vendors just use xpd 0 so xpd 3 is zeroed. */ /* Convert RF5112 specific data to generic raw data * used by interpolation code */ static int ath5k_eeprom_convert_pcal_info_5112(struct ath5k_hw *ah, int mode, struct ath5k_chan_pcal_info *chinfo) { struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; struct ath5k_chan_pcal_info_rf5112 *pcinfo; u8 *pdgain_idx = ee->ee_pdc_to_idx[mode]; unsigned int pier, pdg, point; /* Fill raw data for each calibration pier */ for (pier = 0; pier < ee->ee_n_piers[mode]; pier++) { pcinfo = &chinfo[pier].rf5112_info; /* Allocate pd_curves for this cal pier */ chinfo[pier].pd_curves = kcalloc(AR5K_EEPROM_N_PD_CURVES, sizeof(struct ath5k_pdgain_info), GFP_KERNEL); if (!chinfo[pier].pd_curves) goto err_out; /* Fill pd_curves */ for (pdg = 0; pdg < ee->ee_pd_gains[mode]; pdg++) { u8 idx = pdgain_idx[pdg]; struct ath5k_pdgain_info *pd = &chinfo[pier].pd_curves[idx]; /* Lowest gain curve (max power) */ if (pdg == 0) { /* One more point for better accuracy */ pd->pd_points = AR5K_EEPROM_N_XPD0_POINTS; /* Allocate pd points for this curve */ pd->pd_step = kcalloc(pd->pd_points, sizeof(u8), GFP_KERNEL); if (!pd->pd_step) goto err_out; pd->pd_pwr = kcalloc(pd->pd_points, sizeof(s16), GFP_KERNEL); if (!pd->pd_pwr) goto err_out; /* Fill raw dataset * (all power levels are in 0.25dB units) */ pd->pd_step[0] = pcinfo->pcdac_x0[0]; pd->pd_pwr[0] = pcinfo->pwr_x0[0]; for (point = 1; point < pd->pd_points; point++) { /* Absolute values */ pd->pd_pwr[point] = pcinfo->pwr_x0[point]; /* Deltas */ pd->pd_step[point] = pd->pd_step[point - 1] + pcinfo->pcdac_x0[point]; } /* Set min power for this frequency */ chinfo[pier].min_pwr = pd->pd_pwr[0]; /* Highest gain curve (min power) */ } else if (pdg == 1) { pd->pd_points = AR5K_EEPROM_N_XPD3_POINTS; /* Allocate pd points for this curve */ pd->pd_step = kcalloc(pd->pd_points, sizeof(u8), GFP_KERNEL); if (!pd->pd_step) goto err_out; pd->pd_pwr = kcalloc(pd->pd_points, sizeof(s16), GFP_KERNEL); if (!pd->pd_pwr) goto err_out; /* Fill raw dataset * (all power levels are in 0.25dB units) */ for (point = 0; point < pd->pd_points; point++) { /* Absolute values */ pd->pd_pwr[point] = pcinfo->pwr_x3[point]; /* Fixed points */ pd->pd_step[point] = pcinfo->pcdac_x3[point]; } /* Since we have a higher gain curve * override min power */ chinfo[pier].min_pwr = pd->pd_pwr[0]; } } } return 0; err_out: ath5k_eeprom_free_pcal_info(ah, mode); return -ENOMEM; } /* Parse EEPROM data */ static int ath5k_eeprom_read_pcal_info_5112(struct ath5k_hw *ah, int mode) { struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; struct ath5k_chan_pcal_info_rf5112 *chan_pcal_info; struct ath5k_chan_pcal_info *gen_chan_info; u8 *pdgain_idx = ee->ee_pdc_to_idx[mode]; u32 offset; u8 i, c; u16 val; u8 pd_gains = 0; /* Count how many curves we have and * identify them (which one of the 4 * available curves we have on each count). * Curves are stored from lower (x0) to * higher (x3) gain */ for (i = 0; i < AR5K_EEPROM_N_PD_CURVES; i++) { /* ee_x_gain[mode] is x gain mask */ if ((ee->ee_x_gain[mode] >> i) & 0x1) pdgain_idx[pd_gains++] = i; } ee->ee_pd_gains[mode] = pd_gains; if (pd_gains == 0 || pd_gains > 2) return -EINVAL; switch (mode) { case AR5K_EEPROM_MODE_11A: /* * Read 5GHz EEPROM channels */ offset = AR5K_EEPROM_GROUPS_START(ee->ee_version); ath5k_eeprom_init_11a_pcal_freq(ah, offset); offset += AR5K_EEPROM_GROUP2_OFFSET; gen_chan_info = ee->ee_pwr_cal_a; break; case AR5K_EEPROM_MODE_11B: offset = AR5K_EEPROM_GROUPS_START(ee->ee_version); if (AR5K_EEPROM_HDR_11A(ee->ee_header)) offset += AR5K_EEPROM_GROUP3_OFFSET; /* NB: frequency piers parsed during mode init */ gen_chan_info = ee->ee_pwr_cal_b; break; case AR5K_EEPROM_MODE_11G: offset = AR5K_EEPROM_GROUPS_START(ee->ee_version); if (AR5K_EEPROM_HDR_11A(ee->ee_header)) offset += AR5K_EEPROM_GROUP4_OFFSET; else if (AR5K_EEPROM_HDR_11B(ee->ee_header)) offset += AR5K_EEPROM_GROUP2_OFFSET; /* NB: frequency piers parsed during mode init */ gen_chan_info = ee->ee_pwr_cal_g; break; default: return -EINVAL; } for (i = 0; i < ee->ee_n_piers[mode]; i++) { chan_pcal_info = &gen_chan_info[i].rf5112_info; /* Power values in quarter dB * for the lower xpd gain curve * (0 dBm -> higher output power) */ for (c = 0; c < AR5K_EEPROM_N_XPD0_POINTS; c++) { AR5K_EEPROM_READ(offset++, val); chan_pcal_info->pwr_x0[c] = (s8) (val & 0xff); chan_pcal_info->pwr_x0[++c] = (s8) ((val >> 8) & 0xff); } /* PCDAC steps * corresponding to the above power * measurements */ AR5K_EEPROM_READ(offset++, val); chan_pcal_info->pcdac_x0[1] = (val & 0x1f); chan_pcal_info->pcdac_x0[2] = ((val >> 5) & 0x1f); chan_pcal_info->pcdac_x0[3] = ((val >> 10) & 0x1f); /* Power values in quarter dB * for the higher xpd gain curve * (18 dBm -> lower output power) */ AR5K_EEPROM_READ(offset++, val); chan_pcal_info->pwr_x3[0] = (s8) (val & 0xff); chan_pcal_info->pwr_x3[1] = (s8) ((val >> 8) & 0xff); AR5K_EEPROM_READ(offset++, val); chan_pcal_info->pwr_x3[2] = (val & 0xff); /* PCDAC steps * corresponding to the above power * measurements (fixed) */ chan_pcal_info->pcdac_x3[0] = 20; chan_pcal_info->pcdac_x3[1] = 35; chan_pcal_info->pcdac_x3[2] = 63; if (ee->ee_version >= AR5K_EEPROM_VERSION_4_3) { chan_pcal_info->pcdac_x0[0] = ((val >> 8) & 0x3f); /* Last xpd0 power level is also channel maximum */ gen_chan_info[i].max_pwr = chan_pcal_info->pwr_x0[3]; } else { chan_pcal_info->pcdac_x0[0] = 1; gen_chan_info[i].max_pwr = (s8) ((val >> 8) & 0xff); } } return ath5k_eeprom_convert_pcal_info_5112(ah, mode, gen_chan_info); } /* * Read power calibration for RF2413 chips * * For RF2413 we have a Power to PDDAC table (Power Detector) * instead of a PCDAC and 4 pd gain curves for each calibrated channel. * Each curve has power on x axis in 0.5 db steps and PDDADC steps on y * axis and looks like an exponential function like the RF5111 curve. * * To recreate the curves we read here the points and interpolate * later. Note that in most cases only 2 (higher and lower) curves are * used (like RF5112) but vendors have the opportunity to include all * 4 curves on eeprom. The final curve (higher power) has an extra * point for better accuracy like RF5112. */ /* For RF2413 power calibration data doesn't start on a fixed location and * if a mode is not supported, its section is missing -not zeroed-. * So we need to calculate the starting offset for each section by using * these two functions */ /* Return the size of each section based on the mode and the number of pd * gains available (maximum 4). */ static inline unsigned int ath5k_pdgains_size_2413(struct ath5k_eeprom_info *ee, unsigned int mode) { static const unsigned int pdgains_size[] = { 4, 6, 9, 12 }; unsigned int sz; sz = pdgains_size[ee->ee_pd_gains[mode] - 1]; sz *= ee->ee_n_piers[mode]; return sz; } /* Return the starting offset for a section based on the modes supported * and each section's size. */ static unsigned int ath5k_cal_data_offset_2413(struct ath5k_eeprom_info *ee, int mode) { u32 offset = AR5K_EEPROM_CAL_DATA_START(ee->ee_misc4); switch (mode) { case AR5K_EEPROM_MODE_11G: if (AR5K_EEPROM_HDR_11B(ee->ee_header)) offset += ath5k_pdgains_size_2413(ee, AR5K_EEPROM_MODE_11B) + AR5K_EEPROM_N_2GHZ_CHAN_2413 / 2; /* fall through */ case AR5K_EEPROM_MODE_11B: if (AR5K_EEPROM_HDR_11A(ee->ee_header)) offset += ath5k_pdgains_size_2413(ee, AR5K_EEPROM_MODE_11A) + AR5K_EEPROM_N_5GHZ_CHAN / 2; /* fall through */ case AR5K_EEPROM_MODE_11A: break; default: break; } return offset; } /* Convert RF2413 specific data to generic raw data * used by interpolation code */ static int ath5k_eeprom_convert_pcal_info_2413(struct ath5k_hw *ah, int mode, struct ath5k_chan_pcal_info *chinfo) { struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; struct ath5k_chan_pcal_info_rf2413 *pcinfo; u8 *pdgain_idx = ee->ee_pdc_to_idx[mode]; unsigned int pier, pdg, point; /* Fill raw data for each calibration pier */ for (pier = 0; pier < ee->ee_n_piers[mode]; pier++) { pcinfo = &chinfo[pier].rf2413_info; /* Allocate pd_curves for this cal pier */ chinfo[pier].pd_curves = kcalloc(AR5K_EEPROM_N_PD_CURVES, sizeof(struct ath5k_pdgain_info), GFP_KERNEL); if (!chinfo[pier].pd_curves) goto err_out; /* Fill pd_curves */ for (pdg = 0; pdg < ee->ee_pd_gains[mode]; pdg++) { u8 idx = pdgain_idx[pdg]; struct ath5k_pdgain_info *pd = &chinfo[pier].pd_curves[idx]; /* One more point for the highest power * curve (lowest gain) */ if (pdg == ee->ee_pd_gains[mode] - 1) pd->pd_points = AR5K_EEPROM_N_PD_POINTS; else pd->pd_points = AR5K_EEPROM_N_PD_POINTS - 1; /* Allocate pd points for this curve */ pd->pd_step = kcalloc(pd->pd_points, sizeof(u8), GFP_KERNEL); if (!pd->pd_step) goto err_out; pd->pd_pwr = kcalloc(pd->pd_points, sizeof(s16), GFP_KERNEL); if (!pd->pd_pwr) goto err_out; /* Fill raw dataset * convert all pwr levels to * quarter dB for RF5112 compatibility */ pd->pd_step[0] = pcinfo->pddac_i[pdg]; pd->pd_pwr[0] = 4 * pcinfo->pwr_i[pdg]; for (point = 1; point < pd->pd_points; point++) { pd->pd_pwr[point] = pd->pd_pwr[point - 1] + 2 * pcinfo->pwr[pdg][point - 1]; pd->pd_step[point] = pd->pd_step[point - 1] + pcinfo->pddac[pdg][point - 1]; } /* Highest gain curve -> min power */ if (pdg == 0) chinfo[pier].min_pwr = pd->pd_pwr[0]; /* Lowest gain curve -> max power */ if (pdg == ee->ee_pd_gains[mode] - 1) chinfo[pier].max_pwr = pd->pd_pwr[pd->pd_points - 1]; } } return 0; err_out: ath5k_eeprom_free_pcal_info(ah, mode); return -ENOMEM; } /* Parse EEPROM data */ static int ath5k_eeprom_read_pcal_info_2413(struct ath5k_hw *ah, int mode) { struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; struct ath5k_chan_pcal_info_rf2413 *pcinfo; struct ath5k_chan_pcal_info *chinfo; u8 *pdgain_idx = ee->ee_pdc_to_idx[mode]; u32 offset; int idx, i; u16 val; u8 pd_gains = 0; /* Count how many curves we have and * identify them (which one of the 4 * available curves we have on each count). * Curves are stored from higher to * lower gain so we go backwards */ for (idx = AR5K_EEPROM_N_PD_CURVES - 1; idx >= 0; idx--) { /* ee_x_gain[mode] is x gain mask */ if ((ee->ee_x_gain[mode] >> idx) & 0x1) pdgain_idx[pd_gains++] = idx; } ee->ee_pd_gains[mode] = pd_gains; if (pd_gains == 0) return -EINVAL; offset = ath5k_cal_data_offset_2413(ee, mode); switch (mode) { case AR5K_EEPROM_MODE_11A: if (!AR5K_EEPROM_HDR_11A(ee->ee_header)) return 0; ath5k_eeprom_init_11a_pcal_freq(ah, offset); offset += AR5K_EEPROM_N_5GHZ_CHAN / 2; chinfo = ee->ee_pwr_cal_a; break; case AR5K_EEPROM_MODE_11B: if (!AR5K_EEPROM_HDR_11B(ee->ee_header)) return 0; ath5k_eeprom_init_11bg_2413(ah, mode, offset); offset += AR5K_EEPROM_N_2GHZ_CHAN_2413 / 2; chinfo = ee->ee_pwr_cal_b; break; case AR5K_EEPROM_MODE_11G: if (!AR5K_EEPROM_HDR_11G(ee->ee_header)) return 0; ath5k_eeprom_init_11bg_2413(ah, mode, offset); offset += AR5K_EEPROM_N_2GHZ_CHAN_2413 / 2; chinfo = ee->ee_pwr_cal_g; break; default: return -EINVAL; } for (i = 0; i < ee->ee_n_piers[mode]; i++) { pcinfo = &chinfo[i].rf2413_info; /* * Read pwr_i, pddac_i and the first * 2 pd points (pwr, pddac) */ AR5K_EEPROM_READ(offset++, val); pcinfo->pwr_i[0] = val & 0x1f; pcinfo->pddac_i[0] = (val >> 5) & 0x7f; pcinfo->pwr[0][0] = (val >> 12) & 0xf; AR5K_EEPROM_READ(offset++, val); pcinfo->pddac[0][0] = val & 0x3f; pcinfo->pwr[0][1] = (val >> 6) & 0xf; pcinfo->pddac[0][1] = (val >> 10) & 0x3f; AR5K_EEPROM_READ(offset++, val); pcinfo->pwr[0][2] = val & 0xf; pcinfo->pddac[0][2] = (val >> 4) & 0x3f; pcinfo->pwr[0][3] = 0; pcinfo->pddac[0][3] = 0; if (pd_gains > 1) { /* * Pd gain 0 is not the last pd gain * so it only has 2 pd points. * Continue with pd gain 1. */ pcinfo->pwr_i[1] = (val >> 10) & 0x1f; pcinfo->pddac_i[1] = (val >> 15) & 0x1; AR5K_EEPROM_READ(offset++, val); pcinfo->pddac_i[1] |= (val & 0x3F) << 1; pcinfo->pwr[1][0] = (val >> 6) & 0xf; pcinfo->pddac[1][0] = (val >> 10) & 0x3f; AR5K_EEPROM_READ(offset++, val); pcinfo->pwr[1][1] = val & 0xf; pcinfo->pddac[1][1] = (val >> 4) & 0x3f; pcinfo->pwr[1][2] = (val >> 10) & 0xf; pcinfo->pddac[1][2] = (val >> 14) & 0x3; AR5K_EEPROM_READ(offset++, val); pcinfo->pddac[1][2] |= (val & 0xF) << 2; pcinfo->pwr[1][3] = 0; pcinfo->pddac[1][3] = 0; } else if (pd_gains == 1) { /* * Pd gain 0 is the last one so * read the extra point. */ pcinfo->pwr[0][3] = (val >> 10) & 0xf; pcinfo->pddac[0][3] = (val >> 14) & 0x3; AR5K_EEPROM_READ(offset++, val); pcinfo->pddac[0][3] |= (val & 0xF) << 2; } /* * Proceed with the other pd_gains * as above. */ if (pd_gains > 2) { pcinfo->pwr_i[2] = (val >> 4) & 0x1f; pcinfo->pddac_i[2] = (val >> 9) & 0x7f; AR5K_EEPROM_READ(offset++, val); pcinfo->pwr[2][0] = (val >> 0) & 0xf; pcinfo->pddac[2][0] = (val >> 4) & 0x3f; pcinfo->pwr[2][1] = (val >> 10) & 0xf; pcinfo->pddac[2][1] = (val >> 14) & 0x3; AR5K_EEPROM_READ(offset++, val); pcinfo->pddac[2][1] |= (val & 0xF) << 2; pcinfo->pwr[2][2] = (val >> 4) & 0xf; pcinfo->pddac[2][2] = (val >> 8) & 0x3f; pcinfo->pwr[2][3] = 0; pcinfo->pddac[2][3] = 0; } else if (pd_gains == 2) { pcinfo->pwr[1][3] = (val >> 4) & 0xf; pcinfo->pddac[1][3] = (val >> 8) & 0x3f; } if (pd_gains > 3) { pcinfo->pwr_i[3] = (val >> 14) & 0x3; AR5K_EEPROM_READ(offset++, val); pcinfo->pwr_i[3] |= ((val >> 0) & 0x7) << 2; pcinfo->pddac_i[3] = (val >> 3) & 0x7f; pcinfo->pwr[3][0] = (val >> 10) & 0xf; pcinfo->pddac[3][0] = (val >> 14) & 0x3; AR5K_EEPROM_READ(offset++, val); pcinfo->pddac[3][0] |= (val & 0xF) << 2; pcinfo->pwr[3][1] = (val >> 4) & 0xf; pcinfo->pddac[3][1] = (val >> 8) & 0x3f; pcinfo->pwr[3][2] = (val >> 14) & 0x3; AR5K_EEPROM_READ(offset++, val); pcinfo->pwr[3][2] |= ((val >> 0) & 0x3) << 2; pcinfo->pddac[3][2] = (val >> 2) & 0x3f; pcinfo->pwr[3][3] = (val >> 8) & 0xf; pcinfo->pddac[3][3] = (val >> 12) & 0xF; AR5K_EEPROM_READ(offset++, val); pcinfo->pddac[3][3] |= ((val >> 0) & 0x3) << 4; } else if (pd_gains == 3) { pcinfo->pwr[2][3] = (val >> 14) & 0x3; AR5K_EEPROM_READ(offset++, val); pcinfo->pwr[2][3] |= ((val >> 0) & 0x3) << 2; pcinfo->pddac[2][3] = (val >> 2) & 0x3f; } } return ath5k_eeprom_convert_pcal_info_2413(ah, mode, chinfo); } /* * Read per rate target power (this is the maximum tx power * supported by the card). This info is used when setting * tx power, no matter the channel. * * This also works for v5 EEPROMs. */ static int ath5k_eeprom_read_target_rate_pwr_info(struct ath5k_hw *ah, unsigned int mode) { struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; struct ath5k_rate_pcal_info *rate_pcal_info; u8 *rate_target_pwr_num; u32 offset; u16 val; int i; offset = AR5K_EEPROM_TARGET_PWRSTART(ee->ee_misc1); rate_target_pwr_num = &ee->ee_rate_target_pwr_num[mode]; switch (mode) { case AR5K_EEPROM_MODE_11A: offset += AR5K_EEPROM_TARGET_PWR_OFF_11A(ee->ee_version); rate_pcal_info = ee->ee_rate_tpwr_a; ee->ee_rate_target_pwr_num[mode] = AR5K_EEPROM_N_5GHZ_CHAN; break; case AR5K_EEPROM_MODE_11B: offset += AR5K_EEPROM_TARGET_PWR_OFF_11B(ee->ee_version); rate_pcal_info = ee->ee_rate_tpwr_b; ee->ee_rate_target_pwr_num[mode] = 2; /* 3rd is g mode's 1st */ break; case AR5K_EEPROM_MODE_11G: offset += AR5K_EEPROM_TARGET_PWR_OFF_11G(ee->ee_version); rate_pcal_info = ee->ee_rate_tpwr_g; ee->ee_rate_target_pwr_num[mode] = AR5K_EEPROM_N_2GHZ_CHAN; break; default: return -EINVAL; } /* Different freq mask for older eeproms (<= v3.2) */ if (ee->ee_version <= AR5K_EEPROM_VERSION_3_2) { for (i = 0; i < (*rate_target_pwr_num); i++) { AR5K_EEPROM_READ(offset++, val); rate_pcal_info[i].freq = ath5k_eeprom_bin2freq(ee, (val >> 9) & 0x7f, mode); rate_pcal_info[i].target_power_6to24 = ((val >> 3) & 0x3f); rate_pcal_info[i].target_power_36 = (val << 3) & 0x3f; AR5K_EEPROM_READ(offset++, val); if (rate_pcal_info[i].freq == AR5K_EEPROM_CHANNEL_DIS || val == 0) { (*rate_target_pwr_num) = i; break; } rate_pcal_info[i].target_power_36 |= ((val >> 13) & 0x7); rate_pcal_info[i].target_power_48 = ((val >> 7) & 0x3f); rate_pcal_info[i].target_power_54 = ((val >> 1) & 0x3f); } } else { for (i = 0; i < (*rate_target_pwr_num); i++) { AR5K_EEPROM_READ(offset++, val); rate_pcal_info[i].freq = ath5k_eeprom_bin2freq(ee, (val >> 8) & 0xff, mode); rate_pcal_info[i].target_power_6to24 = ((val >> 2) & 0x3f); rate_pcal_info[i].target_power_36 = (val << 4) & 0x3f; AR5K_EEPROM_READ(offset++, val); if (rate_pcal_info[i].freq == AR5K_EEPROM_CHANNEL_DIS || val == 0) { (*rate_target_pwr_num) = i; break; } rate_pcal_info[i].target_power_36 |= (val >> 12) & 0xf; rate_pcal_info[i].target_power_48 = ((val >> 6) & 0x3f); rate_pcal_info[i].target_power_54 = (val & 0x3f); } } return 0; } /* * Read per channel calibration info from EEPROM * * This info is used to calibrate the baseband power table. Imagine * that for each channel there is a power curve that's hw specific * (depends on amplifier etc) and we try to "correct" this curve using * offsets we pass on to phy chip (baseband -> before amplifier) so that * it can use accurate power values when setting tx power (takes amplifier's * performance on each channel into account). * * EEPROM provides us with the offsets for some pre-calibrated channels * and we have to interpolate to create the full table for these channels and * also the table for any channel. */ static int ath5k_eeprom_read_pcal_info(struct ath5k_hw *ah) { struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; int (*read_pcal)(struct ath5k_hw *hw, int mode); int mode; int err; if ((ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0) && (AR5K_EEPROM_EEMAP(ee->ee_misc0) == 1)) read_pcal = ath5k_eeprom_read_pcal_info_5112; else if ((ah->ah_ee_version >= AR5K_EEPROM_VERSION_5_0) && (AR5K_EEPROM_EEMAP(ee->ee_misc0) == 2)) read_pcal = ath5k_eeprom_read_pcal_info_2413; else read_pcal = ath5k_eeprom_read_pcal_info_5111; for (mode = AR5K_EEPROM_MODE_11A; mode <= AR5K_EEPROM_MODE_11G; mode++) { err = read_pcal(ah, mode); if (err) return err; err = ath5k_eeprom_read_target_rate_pwr_info(ah, mode); if (err < 0) return err; } return 0; } /* Read conformance test limits used for regulatory control */ static int ath5k_eeprom_read_ctl_info(struct ath5k_hw *ah) { struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; struct ath5k_edge_power *rep; unsigned int fmask, pmask; unsigned int ctl_mode; int i, j; u32 offset; u16 val; pmask = AR5K_EEPROM_POWER_M; fmask = AR5K_EEPROM_FREQ_M(ee->ee_version); offset = AR5K_EEPROM_CTL(ee->ee_version); ee->ee_ctls = AR5K_EEPROM_N_CTLS(ee->ee_version); for (i = 0; i < ee->ee_ctls; i += 2) { AR5K_EEPROM_READ(offset++, val); ee->ee_ctl[i] = (val >> 8) & 0xff; ee->ee_ctl[i + 1] = val & 0xff; } offset = AR5K_EEPROM_GROUP8_OFFSET; if (ee->ee_version >= AR5K_EEPROM_VERSION_4_0) offset += AR5K_EEPROM_TARGET_PWRSTART(ee->ee_misc1) - AR5K_EEPROM_GROUP5_OFFSET; else offset += AR5K_EEPROM_GROUPS_START(ee->ee_version); rep = ee->ee_ctl_pwr; for (i = 0; i < ee->ee_ctls; i++) { switch (ee->ee_ctl[i] & AR5K_CTL_MODE_M) { case AR5K_CTL_11A: case AR5K_CTL_TURBO: ctl_mode = AR5K_EEPROM_MODE_11A; break; default: ctl_mode = AR5K_EEPROM_MODE_11G; break; } if (ee->ee_ctl[i] == 0) { if (ee->ee_version >= AR5K_EEPROM_VERSION_3_3) offset += 8; else offset += 7; rep += AR5K_EEPROM_N_EDGES; continue; } if (ee->ee_version >= AR5K_EEPROM_VERSION_3_3) { for (j = 0; j < AR5K_EEPROM_N_EDGES; j += 2) { AR5K_EEPROM_READ(offset++, val); rep[j].freq = (val >> 8) & fmask; rep[j + 1].freq = val & fmask; } for (j = 0; j < AR5K_EEPROM_N_EDGES; j += 2) { AR5K_EEPROM_READ(offset++, val); rep[j].edge = (val >> 8) & pmask; rep[j].flag = (val >> 14) & 1; rep[j + 1].edge = val & pmask; rep[j + 1].flag = (val >> 6) & 1; } } else { AR5K_EEPROM_READ(offset++, val); rep[0].freq = (val >> 9) & fmask; rep[1].freq = (val >> 2) & fmask; rep[2].freq = (val << 5) & fmask; AR5K_EEPROM_READ(offset++, val); rep[2].freq |= (val >> 11) & 0x1f; rep[3].freq = (val >> 4) & fmask; rep[4].freq = (val << 3) & fmask; AR5K_EEPROM_READ(offset++, val); rep[4].freq |= (val >> 13) & 0x7; rep[5].freq = (val >> 6) & fmask; rep[6].freq = (val << 1) & fmask; AR5K_EEPROM_READ(offset++, val); rep[6].freq |= (val >> 15) & 0x1; rep[7].freq = (val >> 8) & fmask; rep[0].edge = (val >> 2) & pmask; rep[1].edge = (val << 4) & pmask; AR5K_EEPROM_READ(offset++, val); rep[1].edge |= (val >> 12) & 0xf; rep[2].edge = (val >> 6) & pmask; rep[3].edge = val & pmask; AR5K_EEPROM_READ(offset++, val); rep[4].edge = (val >> 10) & pmask; rep[5].edge = (val >> 4) & pmask; rep[6].edge = (val << 2) & pmask; AR5K_EEPROM_READ(offset++, val); rep[6].edge |= (val >> 14) & 0x3; rep[7].edge = (val >> 8) & pmask; } for (j = 0; j < AR5K_EEPROM_N_EDGES; j++) { rep[j].freq = ath5k_eeprom_bin2freq(ee, rep[j].freq, ctl_mode); } rep += AR5K_EEPROM_N_EDGES; } return 0; } static int ath5k_eeprom_read_spur_chans(struct ath5k_hw *ah) { struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; u32 offset; u16 val; int ret = 0, i; offset = AR5K_EEPROM_CTL(ee->ee_version) + AR5K_EEPROM_N_CTLS(ee->ee_version); if (ee->ee_version < AR5K_EEPROM_VERSION_5_3) { /* No spur info for 5GHz */ ee->ee_spur_chans[0][0] = AR5K_EEPROM_NO_SPUR; /* 2 channels for 2GHz (2464/2420) */ ee->ee_spur_chans[0][1] = AR5K_EEPROM_5413_SPUR_CHAN_1; ee->ee_spur_chans[1][1] = AR5K_EEPROM_5413_SPUR_CHAN_2; ee->ee_spur_chans[2][1] = AR5K_EEPROM_NO_SPUR; } else if (ee->ee_version >= AR5K_EEPROM_VERSION_5_3) { for (i = 0; i < AR5K_EEPROM_N_SPUR_CHANS; i++) { AR5K_EEPROM_READ(offset, val); ee->ee_spur_chans[i][0] = val; AR5K_EEPROM_READ(offset + AR5K_EEPROM_N_SPUR_CHANS, val); ee->ee_spur_chans[i][1] = val; offset++; } } return ret; } /***********************\ * Init/Detach functions * \***********************/ /* * Initialize eeprom data structure */ int ath5k_eeprom_init(struct ath5k_hw *ah) { int err; err = ath5k_eeprom_init_header(ah); if (err < 0) return err; err = ath5k_eeprom_init_modes(ah); if (err < 0) return err; err = ath5k_eeprom_read_pcal_info(ah); if (err < 0) return err; err = ath5k_eeprom_read_ctl_info(ah); if (err < 0) return err; err = ath5k_eeprom_read_spur_chans(ah); if (err < 0) return err; return 0; } void ath5k_eeprom_detach(struct ath5k_hw *ah) { u8 mode; for (mode = AR5K_EEPROM_MODE_11A; mode <= AR5K_EEPROM_MODE_11G; mode++) ath5k_eeprom_free_pcal_info(ah, mode); } int ath5k_eeprom_mode_from_channel(struct ieee80211_channel *channel) { switch (channel->hw_value) { case AR5K_MODE_11A: return AR5K_EEPROM_MODE_11A; case AR5K_MODE_11G: return AR5K_EEPROM_MODE_11G; case AR5K_MODE_11B: return AR5K_EEPROM_MODE_11B; default: return -1; } }
gpl-2.0
shorelinedev/aosp_kernel_geeb
drivers/media/video/gspca/m5602/m5602_po1030.c
5406
18743
/* * Driver for the po1030 sensor * * Copyright (c) 2008 Erik Andrén * Copyright (c) 2007 Ilyes Gouta. Based on the m5603x Linux Driver Project. * Copyright (c) 2005 m5603x Linux Driver Project <m5602@x3ng.com.br> * * Portions of code to USB interface and ALi driver software, * Copyright (c) 2006 Willem Duinker * v4l2 interface modeled after the V4L2 driver * for SN9C10x PC Camera Controllers * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include "m5602_po1030.h" static int po1030_get_exposure(struct gspca_dev *gspca_dev, __s32 *val); static int po1030_set_exposure(struct gspca_dev *gspca_dev, __s32 val); static int po1030_get_gain(struct gspca_dev *gspca_dev, __s32 *val); static int po1030_set_gain(struct gspca_dev *gspca_dev, __s32 val); static int po1030_get_red_balance(struct gspca_dev *gspca_dev, __s32 *val); static int po1030_set_red_balance(struct gspca_dev *gspca_dev, __s32 val); static int po1030_get_blue_balance(struct gspca_dev *gspca_dev, __s32 *val); static int po1030_set_blue_balance(struct gspca_dev *gspca_dev, __s32 val); static int po1030_get_green_balance(struct gspca_dev *gspca_dev, __s32 *val); static int po1030_set_green_balance(struct gspca_dev *gspca_dev, __s32 val); static int po1030_get_hflip(struct gspca_dev *gspca_dev, __s32 *val); static int po1030_set_hflip(struct gspca_dev *gspca_dev, __s32 val); static int po1030_get_vflip(struct gspca_dev *gspca_dev, __s32 *val); static int po1030_set_vflip(struct gspca_dev *gspca_dev, __s32 val); static int po1030_set_auto_white_balance(struct gspca_dev *gspca_dev, __s32 val); static int po1030_get_auto_white_balance(struct gspca_dev *gspca_dev, __s32 *val); static int po1030_set_auto_exposure(struct gspca_dev *gspca_dev, __s32 val); static int po1030_get_auto_exposure(struct gspca_dev *gspca_dev, __s32 *val); static struct v4l2_pix_format po1030_modes[] = { { 640, 480, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .sizeimage = 640 * 480, .bytesperline = 640, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 2 } }; static const struct ctrl po1030_ctrls[] = { #define GAIN_IDX 0 { { .id = V4L2_CID_GAIN, .type = V4L2_CTRL_TYPE_INTEGER, .name = "gain", .minimum = 0x00, .maximum = 0x4f, .step = 0x1, .default_value = PO1030_GLOBAL_GAIN_DEFAULT, .flags = V4L2_CTRL_FLAG_SLIDER }, .set = po1030_set_gain, .get = po1030_get_gain }, #define EXPOSURE_IDX 1 { { .id = V4L2_CID_EXPOSURE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "exposure", .minimum = 0x00, .maximum = 0x02ff, .step = 0x1, .default_value = PO1030_EXPOSURE_DEFAULT, .flags = V4L2_CTRL_FLAG_SLIDER }, .set = po1030_set_exposure, .get = po1030_get_exposure }, #define RED_BALANCE_IDX 2 { { .id = V4L2_CID_RED_BALANCE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "red balance", .minimum = 0x00, .maximum = 0xff, .step = 0x1, .default_value = PO1030_RED_GAIN_DEFAULT, .flags = V4L2_CTRL_FLAG_SLIDER }, .set = po1030_set_red_balance, .get = po1030_get_red_balance }, #define BLUE_BALANCE_IDX 3 { { .id = V4L2_CID_BLUE_BALANCE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "blue balance", .minimum = 0x00, .maximum = 0xff, .step = 0x1, .default_value = PO1030_BLUE_GAIN_DEFAULT, .flags = V4L2_CTRL_FLAG_SLIDER }, .set = po1030_set_blue_balance, .get = po1030_get_blue_balance }, #define HFLIP_IDX 4 { { .id = V4L2_CID_HFLIP, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "horizontal flip", .minimum = 0, .maximum = 1, .step = 1, .default_value = 0, }, .set = po1030_set_hflip, .get = po1030_get_hflip }, #define VFLIP_IDX 5 { { .id = V4L2_CID_VFLIP, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "vertical flip", .minimum = 0, .maximum = 1, .step = 1, .default_value = 0, }, .set = po1030_set_vflip, .get = po1030_get_vflip }, #define AUTO_WHITE_BALANCE_IDX 6 { { .id = V4L2_CID_AUTO_WHITE_BALANCE, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "auto white balance", .minimum = 0, .maximum = 1, .step = 1, .default_value = 0, }, .set = po1030_set_auto_white_balance, .get = po1030_get_auto_white_balance }, #define AUTO_EXPOSURE_IDX 7 { { .id = V4L2_CID_EXPOSURE_AUTO, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "auto exposure", .minimum = 0, .maximum = 1, .step = 1, .default_value = 0, }, .set = po1030_set_auto_exposure, .get = po1030_get_auto_exposure }, #define GREEN_BALANCE_IDX 8 { { .id = M5602_V4L2_CID_GREEN_BALANCE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "green balance", .minimum = 0x00, .maximum = 0xff, .step = 0x1, .default_value = PO1030_GREEN_GAIN_DEFAULT, .flags = V4L2_CTRL_FLAG_SLIDER }, .set = po1030_set_green_balance, .get = po1030_get_green_balance }, }; static void po1030_dump_registers(struct sd *sd); int po1030_probe(struct sd *sd) { u8 dev_id_h = 0, i; s32 *sensor_settings; if (force_sensor) { if (force_sensor == PO1030_SENSOR) { pr_info("Forcing a %s sensor\n", po1030.name); goto sensor_found; } /* If we want to force another sensor, don't try to probe this * one */ return -ENODEV; } PDEBUG(D_PROBE, "Probing for a po1030 sensor"); /* Run the pre-init to actually probe the unit */ for (i = 0; i < ARRAY_SIZE(preinit_po1030); i++) { u8 data = preinit_po1030[i][2]; if (preinit_po1030[i][0] == SENSOR) m5602_write_sensor(sd, preinit_po1030[i][1], &data, 1); else m5602_write_bridge(sd, preinit_po1030[i][1], data); } if (m5602_read_sensor(sd, PO1030_DEVID_H, &dev_id_h, 1)) return -ENODEV; if (dev_id_h == 0x30) { pr_info("Detected a po1030 sensor\n"); goto sensor_found; } return -ENODEV; sensor_found: sensor_settings = kmalloc( ARRAY_SIZE(po1030_ctrls) * sizeof(s32), GFP_KERNEL); if (!sensor_settings) return -ENOMEM; sd->gspca_dev.cam.cam_mode = po1030_modes; sd->gspca_dev.cam.nmodes = ARRAY_SIZE(po1030_modes); sd->desc->ctrls = po1030_ctrls; sd->desc->nctrls = ARRAY_SIZE(po1030_ctrls); for (i = 0; i < ARRAY_SIZE(po1030_ctrls); i++) sensor_settings[i] = po1030_ctrls[i].qctrl.default_value; sd->sensor_priv = sensor_settings; return 0; } int po1030_init(struct sd *sd) { s32 *sensor_settings = sd->sensor_priv; int i, err = 0; /* Init the sensor */ for (i = 0; i < ARRAY_SIZE(init_po1030) && !err; i++) { u8 data[2] = {0x00, 0x00}; switch (init_po1030[i][0]) { case BRIDGE: err = m5602_write_bridge(sd, init_po1030[i][1], init_po1030[i][2]); break; case SENSOR: data[0] = init_po1030[i][2]; err = m5602_write_sensor(sd, init_po1030[i][1], data, 1); break; default: pr_info("Invalid stream command, exiting init\n"); return -EINVAL; } } if (err < 0) return err; if (dump_sensor) po1030_dump_registers(sd); err = po1030_set_exposure(&sd->gspca_dev, sensor_settings[EXPOSURE_IDX]); if (err < 0) return err; err = po1030_set_gain(&sd->gspca_dev, sensor_settings[GAIN_IDX]); if (err < 0) return err; err = po1030_set_hflip(&sd->gspca_dev, sensor_settings[HFLIP_IDX]); if (err < 0) return err; err = po1030_set_vflip(&sd->gspca_dev, sensor_settings[VFLIP_IDX]); if (err < 0) return err; err = po1030_set_red_balance(&sd->gspca_dev, sensor_settings[RED_BALANCE_IDX]); if (err < 0) return err; err = po1030_set_blue_balance(&sd->gspca_dev, sensor_settings[BLUE_BALANCE_IDX]); if (err < 0) return err; err = po1030_set_green_balance(&sd->gspca_dev, sensor_settings[GREEN_BALANCE_IDX]); if (err < 0) return err; err = po1030_set_auto_white_balance(&sd->gspca_dev, sensor_settings[AUTO_WHITE_BALANCE_IDX]); if (err < 0) return err; err = po1030_set_auto_exposure(&sd->gspca_dev, sensor_settings[AUTO_EXPOSURE_IDX]); return err; } int po1030_start(struct sd *sd) { struct cam *cam = &sd->gspca_dev.cam; int i, err = 0; int width = cam->cam_mode[sd->gspca_dev.curr_mode].width; int height = cam->cam_mode[sd->gspca_dev.curr_mode].height; int ver_offs = cam->cam_mode[sd->gspca_dev.curr_mode].priv; u8 data; switch (width) { case 320: data = PO1030_SUBSAMPLING; err = m5602_write_sensor(sd, PO1030_CONTROL3, &data, 1); if (err < 0) return err; data = ((width + 3) >> 8) & 0xff; err = m5602_write_sensor(sd, PO1030_WINDOWWIDTH_H, &data, 1); if (err < 0) return err; data = (width + 3) & 0xff; err = m5602_write_sensor(sd, PO1030_WINDOWWIDTH_L, &data, 1); if (err < 0) return err; data = ((height + 1) >> 8) & 0xff; err = m5602_write_sensor(sd, PO1030_WINDOWHEIGHT_H, &data, 1); if (err < 0) return err; data = (height + 1) & 0xff; err = m5602_write_sensor(sd, PO1030_WINDOWHEIGHT_L, &data, 1); height += 6; width -= 1; break; case 640: data = 0; err = m5602_write_sensor(sd, PO1030_CONTROL3, &data, 1); if (err < 0) return err; data = ((width + 7) >> 8) & 0xff; err = m5602_write_sensor(sd, PO1030_WINDOWWIDTH_H, &data, 1); if (err < 0) return err; data = (width + 7) & 0xff; err = m5602_write_sensor(sd, PO1030_WINDOWWIDTH_L, &data, 1); if (err < 0) return err; data = ((height + 3) >> 8) & 0xff; err = m5602_write_sensor(sd, PO1030_WINDOWHEIGHT_H, &data, 1); if (err < 0) return err; data = (height + 3) & 0xff; err = m5602_write_sensor(sd, PO1030_WINDOWHEIGHT_L, &data, 1); height += 12; width -= 2; break; } err = m5602_write_bridge(sd, M5602_XB_SENSOR_TYPE, 0x0c); if (err < 0) return err; err = m5602_write_bridge(sd, M5602_XB_LINE_OF_FRAME_H, 0x81); if (err < 0) return err; err = m5602_write_bridge(sd, M5602_XB_PIX_OF_LINE_H, 0x82); if (err < 0) return err; err = m5602_write_bridge(sd, M5602_XB_SIG_INI, 0x01); if (err < 0) return err; err = m5602_write_bridge(sd, M5602_XB_VSYNC_PARA, ((ver_offs >> 8) & 0xff)); if (err < 0) return err; err = m5602_write_bridge(sd, M5602_XB_VSYNC_PARA, (ver_offs & 0xff)); if (err < 0) return err; for (i = 0; i < 2 && !err; i++) err = m5602_write_bridge(sd, M5602_XB_VSYNC_PARA, 0); if (err < 0) return err; err = m5602_write_bridge(sd, M5602_XB_VSYNC_PARA, (height >> 8) & 0xff); if (err < 0) return err; err = m5602_write_bridge(sd, M5602_XB_VSYNC_PARA, (height & 0xff)); if (err < 0) return err; for (i = 0; i < 2 && !err; i++) err = m5602_write_bridge(sd, M5602_XB_VSYNC_PARA, 0); for (i = 0; i < 2 && !err; i++) err = m5602_write_bridge(sd, M5602_XB_SIG_INI, 0); for (i = 0; i < 2 && !err; i++) err = m5602_write_bridge(sd, M5602_XB_HSYNC_PARA, 0); if (err < 0) return err; err = m5602_write_bridge(sd, M5602_XB_HSYNC_PARA, (width >> 8) & 0xff); if (err < 0) return err; err = m5602_write_bridge(sd, M5602_XB_HSYNC_PARA, (width & 0xff)); if (err < 0) return err; err = m5602_write_bridge(sd, M5602_XB_SIG_INI, 0); return err; } static int po1030_get_exposure(struct gspca_dev *gspca_dev, __s32 *val) { struct sd *sd = (struct sd *) gspca_dev; s32 *sensor_settings = sd->sensor_priv; *val = sensor_settings[EXPOSURE_IDX]; PDEBUG(D_V4L2, "Exposure read as %d", *val); return 0; } static int po1030_set_exposure(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; s32 *sensor_settings = sd->sensor_priv; u8 i2c_data; int err; sensor_settings[EXPOSURE_IDX] = val; PDEBUG(D_V4L2, "Set exposure to %d", val & 0xffff); i2c_data = ((val & 0xff00) >> 8); PDEBUG(D_V4L2, "Set exposure to high byte to 0x%x", i2c_data); err = m5602_write_sensor(sd, PO1030_INTEGLINES_H, &i2c_data, 1); if (err < 0) return err; i2c_data = (val & 0xff); PDEBUG(D_V4L2, "Set exposure to low byte to 0x%x", i2c_data); err = m5602_write_sensor(sd, PO1030_INTEGLINES_M, &i2c_data, 1); return err; } static int po1030_get_gain(struct gspca_dev *gspca_dev, __s32 *val) { struct sd *sd = (struct sd *) gspca_dev; s32 *sensor_settings = sd->sensor_priv; *val = sensor_settings[GAIN_IDX]; PDEBUG(D_V4L2, "Read global gain %d", *val); return 0; } static int po1030_set_gain(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; s32 *sensor_settings = sd->sensor_priv; u8 i2c_data; int err; sensor_settings[GAIN_IDX] = val; i2c_data = val & 0xff; PDEBUG(D_V4L2, "Set global gain to %d", i2c_data); err = m5602_write_sensor(sd, PO1030_GLOBALGAIN, &i2c_data, 1); return err; } static int po1030_get_hflip(struct gspca_dev *gspca_dev, __s32 *val) { struct sd *sd = (struct sd *) gspca_dev; s32 *sensor_settings = sd->sensor_priv; *val = sensor_settings[HFLIP_IDX]; PDEBUG(D_V4L2, "Read hflip %d", *val); return 0; } static int po1030_set_hflip(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; s32 *sensor_settings = sd->sensor_priv; u8 i2c_data; int err; sensor_settings[HFLIP_IDX] = val; PDEBUG(D_V4L2, "Set hflip %d", val); err = m5602_read_sensor(sd, PO1030_CONTROL2, &i2c_data, 1); if (err < 0) return err; i2c_data = (0x7f & i2c_data) | ((val & 0x01) << 7); err = m5602_write_sensor(sd, PO1030_CONTROL2, &i2c_data, 1); return err; } static int po1030_get_vflip(struct gspca_dev *gspca_dev, __s32 *val) { struct sd *sd = (struct sd *) gspca_dev; s32 *sensor_settings = sd->sensor_priv; *val = sensor_settings[VFLIP_IDX]; PDEBUG(D_V4L2, "Read vflip %d", *val); return 0; } static int po1030_set_vflip(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; s32 *sensor_settings = sd->sensor_priv; u8 i2c_data; int err; sensor_settings[VFLIP_IDX] = val; PDEBUG(D_V4L2, "Set vflip %d", val); err = m5602_read_sensor(sd, PO1030_CONTROL2, &i2c_data, 1); if (err < 0) return err; i2c_data = (i2c_data & 0xbf) | ((val & 0x01) << 6); err = m5602_write_sensor(sd, PO1030_CONTROL2, &i2c_data, 1); return err; } static int po1030_get_red_balance(struct gspca_dev *gspca_dev, __s32 *val) { struct sd *sd = (struct sd *) gspca_dev; s32 *sensor_settings = sd->sensor_priv; *val = sensor_settings[RED_BALANCE_IDX]; PDEBUG(D_V4L2, "Read red gain %d", *val); return 0; } static int po1030_set_red_balance(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; s32 *sensor_settings = sd->sensor_priv; u8 i2c_data; int err; sensor_settings[RED_BALANCE_IDX] = val; i2c_data = val & 0xff; PDEBUG(D_V4L2, "Set red gain to %d", i2c_data); err = m5602_write_sensor(sd, PO1030_RED_GAIN, &i2c_data, 1); return err; } static int po1030_get_blue_balance(struct gspca_dev *gspca_dev, __s32 *val) { struct sd *sd = (struct sd *) gspca_dev; s32 *sensor_settings = sd->sensor_priv; *val = sensor_settings[BLUE_BALANCE_IDX]; PDEBUG(D_V4L2, "Read blue gain %d", *val); return 0; } static int po1030_set_blue_balance(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; s32 *sensor_settings = sd->sensor_priv; u8 i2c_data; int err; sensor_settings[BLUE_BALANCE_IDX] = val; i2c_data = val & 0xff; PDEBUG(D_V4L2, "Set blue gain to %d", i2c_data); err = m5602_write_sensor(sd, PO1030_BLUE_GAIN, &i2c_data, 1); return err; } static int po1030_get_green_balance(struct gspca_dev *gspca_dev, __s32 *val) { struct sd *sd = (struct sd *) gspca_dev; s32 *sensor_settings = sd->sensor_priv; *val = sensor_settings[GREEN_BALANCE_IDX]; PDEBUG(D_V4L2, "Read green gain %d", *val); return 0; } static int po1030_set_green_balance(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; s32 *sensor_settings = sd->sensor_priv; u8 i2c_data; int err; sensor_settings[GREEN_BALANCE_IDX] = val; i2c_data = val & 0xff; PDEBUG(D_V4L2, "Set green gain to %d", i2c_data); err = m5602_write_sensor(sd, PO1030_GREEN_1_GAIN, &i2c_data, 1); if (err < 0) return err; return m5602_write_sensor(sd, PO1030_GREEN_2_GAIN, &i2c_data, 1); } static int po1030_get_auto_white_balance(struct gspca_dev *gspca_dev, __s32 *val) { struct sd *sd = (struct sd *) gspca_dev; s32 *sensor_settings = sd->sensor_priv; *val = sensor_settings[AUTO_WHITE_BALANCE_IDX]; PDEBUG(D_V4L2, "Auto white balancing is %d", *val); return 0; } static int po1030_set_auto_white_balance(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; s32 *sensor_settings = sd->sensor_priv; u8 i2c_data; int err; sensor_settings[AUTO_WHITE_BALANCE_IDX] = val; err = m5602_read_sensor(sd, PO1030_AUTOCTRL1, &i2c_data, 1); if (err < 0) return err; PDEBUG(D_V4L2, "Set auto white balance to %d", val); i2c_data = (i2c_data & 0xfe) | (val & 0x01); err = m5602_write_sensor(sd, PO1030_AUTOCTRL1, &i2c_data, 1); return err; } static int po1030_get_auto_exposure(struct gspca_dev *gspca_dev, __s32 *val) { struct sd *sd = (struct sd *) gspca_dev; s32 *sensor_settings = sd->sensor_priv; *val = sensor_settings[AUTO_EXPOSURE_IDX]; PDEBUG(D_V4L2, "Auto exposure is %d", *val); return 0; } static int po1030_set_auto_exposure(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; s32 *sensor_settings = sd->sensor_priv; u8 i2c_data; int err; sensor_settings[AUTO_EXPOSURE_IDX] = val; err = m5602_read_sensor(sd, PO1030_AUTOCTRL1, &i2c_data, 1); if (err < 0) return err; PDEBUG(D_V4L2, "Set auto exposure to %d", val); i2c_data = (i2c_data & 0xfd) | ((val & 0x01) << 1); return m5602_write_sensor(sd, PO1030_AUTOCTRL1, &i2c_data, 1); } void po1030_disconnect(struct sd *sd) { sd->sensor = NULL; kfree(sd->sensor_priv); } static void po1030_dump_registers(struct sd *sd) { int address; u8 value = 0; pr_info("Dumping the po1030 sensor core registers\n"); for (address = 0; address < 0x7f; address++) { m5602_read_sensor(sd, address, &value, 1); pr_info("register 0x%x contains 0x%x\n", address, value); } pr_info("po1030 register state dump complete\n"); pr_info("Probing for which registers that are read/write\n"); for (address = 0; address < 0xff; address++) { u8 old_value, ctrl_value; u8 test_value[2] = {0xff, 0xff}; m5602_read_sensor(sd, address, &old_value, 1); m5602_write_sensor(sd, address, test_value, 1); m5602_read_sensor(sd, address, &ctrl_value, 1); if (ctrl_value == test_value[0]) pr_info("register 0x%x is writeable\n", address); else pr_info("register 0x%x is read only\n", address); /* Restore original value */ m5602_write_sensor(sd, address, &old_value, 1); } }
gpl-2.0
ShinySide/HispAsian_Lollipop
drivers/video/msm/mdss/mdss_mdp.c
31
73694
/* * MDSS MDP Interface (used by framebuffer core) * * Copyright (c) 2007-2014, The Linux Foundation. All rights reserved. * Copyright (C) 2007 Google Incorporated * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define pr_fmt(fmt) "%s: " fmt, __func__ #include <linux/clk.h> #include <linux/debugfs.h> #include <linux/delay.h> #include <linux/hrtimer.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/iommu.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/pm.h> #include <linux/pm_runtime.h> #include <linux/regulator/consumer.h> #include <linux/memory_alloc.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/sched.h> #include <linux/time.h> #include <linux/spinlock.h> #include <linux/semaphore.h> #include <linux/uaccess.h> #include <mach/board.h> #include <mach/clk.h> #include <mach/hardware.h> #include <mach/msm_bus.h> #include <mach/msm_bus_board.h> #include <mach/iommu.h> #include <mach/iommu_domains.h> #include <mach/memory.h> #include <mach/msm_memtypes.h> #include <mach/rpm-regulator-smd.h> #include "mdss.h" #include "mdss_fb.h" #include "mdss_mdp.h" #include "mdss_panel.h" #include "mdss_debug.h" #define CREATE_TRACE_POINTS #include "mdss_mdp_trace.h" struct mdss_data_type *mdss_res; static int mdp_clk_cnt; #if defined (CONFIG_FB_MSM_MDSS_DSI_DBG) void xlog(const char *name, u32 data0, u32 data1, u32 data2, u32 data3, u32 data4, u32 data5); #endif static int mdss_fb_mem_get_iommu_domain(void) { return mdss_get_iommu_domain(MDSS_IOMMU_DOMAIN_UNSECURE); } struct msm_mdp_interface mdp5 = { .init_fnc = mdss_mdp_overlay_init, .fb_mem_get_iommu_domain = mdss_fb_mem_get_iommu_domain, .panel_register_done = mdss_panel_register_done, .fb_stride = mdss_mdp_fb_stride, .check_dsi_status = mdss_check_dsi_ctrl_status, }; #define DEFAULT_TOTAL_RGB_PIPES 3 #define DEFAULT_TOTAL_VIG_PIPES 3 #define DEFAULT_TOTAL_DMA_PIPES 2 #define IB_QUOTA 800000000 #define AB_QUOTA 800000000 static DEFINE_SPINLOCK(mdp_lock); static DEFINE_MUTEX(mdp_clk_lock); static DEFINE_MUTEX(bus_bw_lock); static DEFINE_MUTEX(mdp_iommu_lock); static struct mdss_panel_intf pan_types[] = { {"dsi", MDSS_PANEL_INTF_DSI}, {"edp", MDSS_PANEL_INTF_EDP}, {"hdmi", MDSS_PANEL_INTF_HDMI}, }; struct mdss_iommu_map_type mdss_iommu_map[MDSS_IOMMU_MAX_DOMAIN] = { [MDSS_IOMMU_DOMAIN_UNSECURE] = { .client_name = "mdp_ns", .ctx_name = "mdp_0", .partitions = { { .start = SZ_128K, .size = SZ_1G - SZ_128K, }, }, .npartitions = 1, }, [MDSS_IOMMU_DOMAIN_SECURE] = { .client_name = "mdp_secure", .ctx_name = "mdp_1", .partitions = { { .start = SZ_1G, .size = SZ_1G, }, }, .npartitions = 1, }, }; struct mdss_hw mdss_mdp_hw = { .hw_ndx = MDSS_HW_MDP, .ptr = NULL, .irq_handler = mdss_mdp_isr, }; static DEFINE_SPINLOCK(mdss_lock); struct mdss_hw *mdss_irq_handlers[MDSS_MAX_HW_BLK]; static void mdss_mdp_footswitch_ctrl(struct mdss_data_type *mdata, int on); static int mdss_mdp_parse_dt(struct platform_device *pdev); static int mdss_mdp_parse_dt_pipe(struct platform_device *pdev); static int mdss_mdp_parse_dt_mixer(struct platform_device *pdev); static int mdss_mdp_parse_dt_ctl(struct platform_device *pdev); static int mdss_mdp_parse_dt_video_intf(struct platform_device *pdev); static int mdss_mdp_parse_dt_handler(struct platform_device *pdev, char *prop_name, u32 *offsets, int len); static int mdss_mdp_parse_dt_prop_len(struct platform_device *pdev, char *prop_name); static int mdss_mdp_parse_dt_smp(struct platform_device *pdev); static int mdss_mdp_parse_dt_prefill(struct platform_device *pdev); static int mdss_mdp_parse_dt_misc(struct platform_device *pdev); static int mdss_mdp_parse_dt_ad_cfg(struct platform_device *pdev); static int mdss_mdp_parse_dt_bus_scale(struct platform_device *pdev); u32 mdss_mdp_fb_stride(u32 fb_index, u32 xres, int bpp) { /* The adreno GPU hardware requires that the pitch be aligned to 32 pixels for color buffers, so for the cases where the GPU is writing directly to fb0, the framebuffer pitch also needs to be 32 pixel aligned */ if (fb_index == 0) return ALIGN(xres, 32) * bpp; else return xres * bpp; } static inline int mdss_irq_dispatch(u32 hw_ndx, int irq, void *ptr) { struct mdss_hw *hw; int rc = -ENODEV; spin_lock(&mdss_lock); hw = mdss_irq_handlers[hw_ndx]; #if 0//defined (CONFIG_FB_MSM_MDSS_DSI_DBG) xlog(__func__, (u32)hw, hw_ndx, 0, 0, 0, 0xeeee); #endif spin_unlock(&mdss_lock); if (hw) rc = hw->irq_handler(irq, hw->ptr); return rc; } static irqreturn_t mdss_irq_handler(int irq, void *ptr) { struct mdss_data_type *mdata = ptr; u32 intr = MDSS_MDP_REG_READ(MDSS_REG_HW_INTR_STATUS); #if 0//defined (CONFIG_FB_MSM_MDSS_DSI_DBG) xlog(__func__, intr, (u32) mdata, 0, 0, 0, 0xffff); #endif if (!mdata) return IRQ_NONE; mdata->irq_buzy = true; if (intr & MDSS_INTR_MDP) { spin_lock(&mdp_lock); mdss_irq_dispatch(MDSS_HW_MDP, irq, ptr); spin_unlock(&mdp_lock); } if (intr & MDSS_INTR_DSI0) mdss_irq_dispatch(MDSS_HW_DSI0, irq, ptr); if (intr & MDSS_INTR_DSI1) mdss_irq_dispatch(MDSS_HW_DSI1, irq, ptr); if (intr & MDSS_INTR_EDP) mdss_irq_dispatch(MDSS_HW_EDP, irq, ptr); if (intr & MDSS_INTR_HDMI) mdss_irq_dispatch(MDSS_HW_HDMI, irq, ptr); mdata->irq_buzy = false; return IRQ_HANDLED; } int mdss_register_irq(struct mdss_hw *hw) { unsigned long irq_flags; u32 ndx_bit; if (!hw || hw->hw_ndx >= MDSS_MAX_HW_BLK) return -EINVAL; ndx_bit = BIT(hw->hw_ndx); spin_lock_irqsave(&mdss_lock, irq_flags); if (!mdss_irq_handlers[hw->hw_ndx]) mdss_irq_handlers[hw->hw_ndx] = hw; else pr_err("panel %d's irq at %p is already registered\n", hw->hw_ndx, hw->irq_handler); spin_unlock_irqrestore(&mdss_lock, irq_flags); return 0; } /* mdss_regsiter_irq */ EXPORT_SYMBOL(mdss_register_irq); void mdss_enable_irq(struct mdss_hw *hw) { unsigned long irq_flags; u32 ndx_bit; if (hw->hw_ndx >= MDSS_MAX_HW_BLK) return; if (!mdss_irq_handlers[hw->hw_ndx]) { pr_err("failed. First register the irq then enable it.\n"); return; } ndx_bit = BIT(hw->hw_ndx); pr_debug("Enable HW=%d irq ena=%d mask=%x\n", hw->hw_ndx, mdss_res->irq_ena, mdss_res->irq_mask); spin_lock_irqsave(&mdss_lock, irq_flags); #if defined (CONFIG_FB_MSM_MDSS_DSI_DBG) xlog(__func__, hw->hw_ndx, ndx_bit,mdss_res->irq_mask, mdss_res->irq_ena, mdss_res->irq, 0xB); #endif if (mdss_res->irq_mask & ndx_bit) { pr_debug("MDSS HW ndx=%d is already set, mask=%x\n", hw->hw_ndx, mdss_res->irq_mask); #if defined (CONFIG_FB_MSM_MDSS_DSI_DBG) xlog(__func__, 0, 0, 0, 0, 0, 0xFF); #endif } else { mdss_res->irq_mask |= ndx_bit; if (!mdss_res->irq_ena) { mdss_res->irq_ena = true; enable_irq(mdss_res->irq); } } #if defined (CONFIG_FB_MSM_MDSS_DSI_DBG) xlog(__func__, hw->hw_ndx, ndx_bit,mdss_res->irq_mask, mdss_res->irq_ena, mdss_res->irq, 0xE); #endif spin_unlock_irqrestore(&mdss_lock, irq_flags); } EXPORT_SYMBOL(mdss_enable_irq); void mdss_disable_irq(struct mdss_hw *hw) { unsigned long irq_flags; u32 ndx_bit; if (hw->hw_ndx >= MDSS_MAX_HW_BLK) return; ndx_bit = BIT(hw->hw_ndx); pr_debug("Disable HW=%d irq ena=%d mask=%x\n", hw->hw_ndx, mdss_res->irq_ena, mdss_res->irq_mask); spin_lock_irqsave(&mdss_lock, irq_flags); #if defined (CONFIG_FB_MSM_MDSS_DSI_DBG) xlog(__func__, hw->hw_ndx, ndx_bit,mdss_res->irq_mask, mdss_res->irq_ena, mdss_res->irq, 0xB); #endif if (!(mdss_res->irq_mask & ndx_bit)) { pr_warn("MDSS HW ndx=%d is NOT set, mask=%x, hist mask=%x\n", hw->hw_ndx, mdss_res->mdp_irq_mask, mdss_res->mdp_hist_irq_mask); } else { mdss_res->irq_mask &= ~ndx_bit; if (mdss_res->irq_mask == 0) { mdss_res->irq_ena = false; disable_irq_nosync(mdss_res->irq); } } #if defined (CONFIG_FB_MSM_MDSS_DSI_DBG) xlog(__func__, hw->hw_ndx, ndx_bit,mdss_res->irq_mask, mdss_res->irq_ena, mdss_res->irq, 0xE); #endif spin_unlock_irqrestore(&mdss_lock, irq_flags); } EXPORT_SYMBOL(mdss_disable_irq); /* called from interrupt context */ void mdss_disable_irq_nosync(struct mdss_hw *hw) { u32 ndx_bit; if (hw->hw_ndx >= MDSS_MAX_HW_BLK) return; ndx_bit = BIT(hw->hw_ndx); pr_debug("Disable HW=%d irq ena=%d mask=%x\n", hw->hw_ndx, mdss_res->irq_ena, mdss_res->irq_mask); spin_lock(&mdss_lock); #if defined (CONFIG_FB_MSM_MDSS_DSI_DBG) xlog(__func__, hw->hw_ndx, ndx_bit,mdss_res->irq_mask, mdss_res->irq_ena, mdss_res->irq, 0xB); #endif if (!(mdss_res->irq_mask & ndx_bit)) { pr_warn("MDSS HW ndx=%d is NOT set, mask=%x, hist mask=%x\n", hw->hw_ndx, mdss_res->mdp_irq_mask, mdss_res->mdp_hist_irq_mask); } else { mdss_res->irq_mask &= ~ndx_bit; if (mdss_res->irq_mask == 0) { mdss_res->irq_ena = false; disable_irq_nosync(mdss_res->irq); } } #if defined (CONFIG_FB_MSM_MDSS_DSI_DBG) xlog(__func__, hw->hw_ndx, ndx_bit,mdss_res->irq_mask, mdss_res->irq_ena, mdss_res->irq, 0xE); #endif spin_unlock(&mdss_lock); } EXPORT_SYMBOL(mdss_disable_irq_nosync); static int mdss_mdp_bus_scale_register(struct mdss_data_type *mdata) { if (!mdata->bus_hdl) { mdata->bus_hdl = msm_bus_scale_register_client(mdata->bus_scale_table); if (IS_ERR_VALUE(mdata->bus_hdl)) { pr_err("bus_client register failed\n"); return -EINVAL; } pr_debug("register bus_hdl=%x\n", mdata->bus_hdl); } return mdss_bus_scale_set_quota(MDSS_HW_MDP, AB_QUOTA, IB_QUOTA); } static void mdss_mdp_bus_scale_unregister(struct mdss_data_type *mdata) { pr_debug("unregister bus_hdl=%x\n", mdata->bus_hdl); if (mdata->bus_hdl) msm_bus_scale_unregister_client(mdata->bus_hdl); } unsigned long clk_rate_dbg; u64 bus_ab_quota_dbg, bus_ib_quota_dbg; #if defined(CONFIG_MACH_MILLET3G_CHN_OPEN) #define MDSS_MDP_BUS_FUDGE_FACTOR_IB(val) (((val) * 5) / 4) #endif int mdss_mdp_bus_scale_set_quota(u64 ab_quota, u64 ib_quota) { int new_uc_idx; if (mdss_res->bus_hdl < 1) { pr_err("invalid bus handle %d\n", mdss_res->bus_hdl); return -EINVAL; } if ((ab_quota | ib_quota) == 0) { new_uc_idx = 0; } else { int i; struct msm_bus_vectors *vect = NULL; struct msm_bus_scale_pdata *bw_table = mdss_res->bus_scale_table; unsigned long size; if (!bw_table || !mdss_res->axi_port_cnt) { pr_err("invalid input\n"); return -EINVAL; } size = SZ_64M / mdss_res->axi_port_cnt; ab_quota = div_u64(ab_quota, mdss_res->axi_port_cnt); ib_quota = div_u64(ib_quota, mdss_res->axi_port_cnt); new_uc_idx = (mdss_res->curr_bw_uc_idx % (bw_table->num_usecases - 1)) + 1; for (i = 0; i < mdss_res->axi_port_cnt; i++) { vect = &bw_table->usecase[mdss_res->curr_bw_uc_idx]. vectors[i]; /* avoid performing updates for small changes */ if ((ALIGN(ab_quota, size) == ALIGN(vect->ab, size)) && (ALIGN(ib_quota, size) == ALIGN(vect->ib, size))) { pr_debug("skip bus scaling, no changes\n"); return 0; } vect = &bw_table->usecase[new_uc_idx].vectors[i]; #if defined(CONFIG_MACH_MILLET3G_CHN_OPEN) vect->ab = ab_quota; vect->ib = MDSS_MDP_BUS_FUDGE_FACTOR_IB(ib_quota); bus_ab_quota_dbg = ab_quota; bus_ib_quota_dbg = MDSS_MDP_BUS_FUDGE_FACTOR_IB(ib_quota); #else vect->ab = ab_quota; vect->ib = ib_quota; bus_ab_quota_dbg = ab_quota; bus_ib_quota_dbg = ib_quota; #endif pr_debug("uc_idx=%d path_idx=%d ab=%llu ib=%llu\n", new_uc_idx, i, vect->ab, vect->ib); } } mdss_res->curr_bw_uc_idx = new_uc_idx; return msm_bus_scale_client_update_request(mdss_res->bus_hdl, new_uc_idx); } int mdss_bus_scale_set_quota(int client, u64 ab_quota, u64 ib_quota) { int rc = 0; int i; u64 total_ab = 0; u64 total_ib = 0; mutex_lock(&bus_bw_lock); mdss_res->ab[client] = ab_quota; mdss_res->ib[client] = ib_quota; for (i = 0; i < MDSS_MAX_HW_BLK; i++) { total_ab += mdss_res->ab[i]; total_ib = max(total_ib, mdss_res->ib[i]); } rc = mdss_mdp_bus_scale_set_quota(total_ab, total_ib); mutex_unlock(&bus_bw_lock); return rc; } static inline u32 mdss_mdp_irq_mask(u32 intr_type, u32 intf_num) { if (intr_type == MDSS_MDP_IRQ_INTF_UNDER_RUN || intr_type == MDSS_MDP_IRQ_INTF_VSYNC) intf_num = (intf_num - MDSS_MDP_INTF0) * 2; return 1 << (intr_type + intf_num); } /* function assumes that mdp is clocked to access hw registers */ void mdss_mdp_irq_clear(struct mdss_data_type *mdata, u32 intr_type, u32 intf_num) { unsigned long irq_flags; u32 irq; irq = mdss_mdp_irq_mask(intr_type, intf_num); pr_debug("clearing mdp irq mask=%x\n", irq); spin_lock_irqsave(&mdp_lock, irq_flags); writel_relaxed(irq, mdata->mdp_base + MDSS_MDP_REG_INTR_CLEAR); spin_unlock_irqrestore(&mdp_lock, irq_flags); } #if defined (CONFIG_FB_MSM_MDSS_DSI_DBG) int mdss_mdp_debug_bus(void) { u32 status; mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false); MDSS_MDP_REG_WRITE(0x398, 0x7001); MDSS_MDP_REG_WRITE(0x448, 0x3f1); status = MDSS_MDP_REG_READ(0x44c); mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false); xlog(__func__, status, 0, 0, 0, 0, 0xDDDDDD); return 0; } EXPORT_SYMBOL(mdss_mdp_debug_bus); #endif int mdss_mdp_irq_enable(u32 intr_type, u32 intf_num) { u32 irq; unsigned long irq_flags; int ret = 0; irq = mdss_mdp_irq_mask(intr_type, intf_num); spin_lock_irqsave(&mdp_lock, irq_flags); #if defined (CONFIG_FB_MSM_MDSS_DSI_DBG) xlog(__func__,mdss_res->mdp_irq_mask, irq, intr_type, intf_num, 0, 0); #endif if (mdss_res->mdp_irq_mask & irq) { pr_warn("MDSS MDP IRQ-0x%x is already set, mask=%x\n", irq, mdss_res->mdp_irq_mask); ret = -EBUSY; } else { pr_debug("MDP IRQ mask old=%x new=%x\n", mdss_res->mdp_irq_mask, irq); mdss_res->mdp_irq_mask |= irq; MDSS_MDP_REG_WRITE(MDSS_MDP_REG_INTR_CLEAR, irq); MDSS_MDP_REG_WRITE(MDSS_MDP_REG_INTR_EN, mdss_res->mdp_irq_mask); mdss_enable_irq(&mdss_mdp_hw); } spin_unlock_irqrestore(&mdp_lock, irq_flags); return ret; } int mdss_mdp_hist_irq_enable(u32 irq) { unsigned long irq_flags; int ret = 0; spin_lock_irqsave(&mdp_lock, irq_flags); if (mdss_res->mdp_hist_irq_mask & irq) { pr_warn("MDSS MDP Hist IRQ-0x%x is already set, mask=%x\n", irq, mdss_res->mdp_hist_irq_mask); ret = -EBUSY; } else { pr_debug("MDP IRQ mask old=%x new=%x\n", mdss_res->mdp_hist_irq_mask, irq); mdss_res->mdp_hist_irq_mask |= irq; MDSS_MDP_REG_WRITE(MDSS_MDP_REG_HIST_INTR_CLEAR, irq); MDSS_MDP_REG_WRITE(MDSS_MDP_REG_HIST_INTR_EN, mdss_res->mdp_hist_irq_mask); mdss_enable_irq(&mdss_mdp_hw); } spin_unlock_irqrestore(&mdp_lock, irq_flags); return ret; } void mdss_mdp_irq_disable(u32 intr_type, u32 intf_num) { u32 irq; unsigned long irq_flags; irq = mdss_mdp_irq_mask(intr_type, intf_num); spin_lock_irqsave(&mdp_lock, irq_flags); #if defined (CONFIG_FB_MSM_MDSS_DSI_DBG) xlog(__func__,mdss_res->mdp_irq_mask, irq, intr_type, intf_num, 0, 0); #endif if (!(mdss_res->mdp_irq_mask & irq)) { pr_warn("MDSS MDP IRQ-%x is NOT set, mask=%x\n", irq, mdss_res->mdp_irq_mask); } else { mdss_res->mdp_irq_mask &= ~irq; MDSS_MDP_REG_WRITE(MDSS_MDP_REG_INTR_EN, mdss_res->mdp_irq_mask); if ((mdss_res->mdp_irq_mask == 0) && (mdss_res->mdp_hist_irq_mask == 0)) mdss_disable_irq(&mdss_mdp_hw); } spin_unlock_irqrestore(&mdp_lock, irq_flags); } void mdss_mdp_hist_irq_disable(u32 irq) { unsigned long irq_flags; spin_lock_irqsave(&mdp_lock, irq_flags); if (!(mdss_res->mdp_hist_irq_mask & irq)) { pr_warn("MDSS MDP IRQ-%x is NOT set, mask=%x\n", irq, mdss_res->mdp_hist_irq_mask); } else { mdss_res->mdp_hist_irq_mask &= ~irq; MDSS_MDP_REG_WRITE(MDSS_MDP_REG_HIST_INTR_EN, mdss_res->mdp_hist_irq_mask); if ((mdss_res->mdp_irq_mask == 0) && (mdss_res->mdp_hist_irq_mask == 0)) mdss_disable_irq(&mdss_mdp_hw); } spin_unlock_irqrestore(&mdp_lock, irq_flags); } /** * mdss_mdp_irq_disable_nosync() - disable mdp irq * @intr_type: mdp interface type * @intf_num: mdp interface num * * This fucntion is called from interrupt context * mdp_lock is already held at up stream (mdss_irq_handler) * therefore spin_lock(&mdp_lock) is not allowed here * */ void mdss_mdp_irq_disable_nosync(u32 intr_type, u32 intf_num) { u32 irq; irq = mdss_mdp_irq_mask(intr_type, intf_num); if (!(mdss_res->mdp_irq_mask & irq)) { pr_warn("MDSS MDP IRQ-%x is NOT set, mask=%x\n", irq, mdss_res->mdp_irq_mask); } else { mdss_res->mdp_irq_mask &= ~irq; MDSS_MDP_REG_WRITE(MDSS_MDP_REG_INTR_EN, mdss_res->mdp_irq_mask); if ((mdss_res->mdp_irq_mask == 0) && (mdss_res->mdp_hist_irq_mask == 0)) mdss_disable_irq_nosync(&mdss_mdp_hw); } } static inline struct clk *mdss_mdp_get_clk(u32 clk_idx) { if (clk_idx < MDSS_MAX_CLK) return mdss_res->mdp_clk[clk_idx]; return NULL; } static int mdss_mdp_clk_update(u32 clk_idx, u32 enable) { int ret = -ENODEV; struct clk *clk = mdss_mdp_get_clk(clk_idx); if (clk) { pr_debug("clk=%d en=%d\n", clk_idx, enable); if (enable) { if (clk_idx == MDSS_CLK_MDP_VSYNC) clk_set_rate(clk, 19200000); ret = clk_prepare_enable(clk); } else { clk_disable_unprepare(clk); ret = 0; } } return ret; } int mdss_mdp_vsync_clk_enable(int enable) { int ret = 0; pr_debug("clk enable=%d\n", enable); mutex_lock(&mdp_clk_lock); if (mdss_res->vsync_ena != enable) { mdss_res->vsync_ena = enable; ret = mdss_mdp_clk_update(MDSS_CLK_MDP_VSYNC, enable); } mutex_unlock(&mdp_clk_lock); return ret; } void mdss_mdp_set_clk_rate(unsigned long rate) { struct mdss_data_type *mdata = mdss_res; unsigned long clk_rate; struct clk *clk = mdss_mdp_get_clk(MDSS_CLK_MDP_SRC); unsigned long min_clk_rate; min_clk_rate = max(rate, mdata->perf_tune.min_mdp_clk); if (clk) { mutex_lock(&mdp_clk_lock); if (min_clk_rate < mdata->max_mdp_clk_rate) clk_rate = clk_round_rate(clk, min_clk_rate); else clk_rate = mdata->max_mdp_clk_rate; if (IS_ERR_VALUE(clk_rate)) { pr_err("unable to round rate err=%ld\n", clk_rate); } else if (clk_rate != clk_get_rate(clk)) { clk_rate_dbg = clk_rate; if (IS_ERR_VALUE(clk_set_rate(clk, clk_rate))) pr_err("clk_set_rate failed\n"); else pr_debug("mdp clk rate=%lu\n", clk_rate); } mutex_unlock(&mdp_clk_lock); } else { pr_err("mdp src clk not setup properly\n"); } } unsigned long mdss_mdp_get_clk_rate(u32 clk_idx) { unsigned long clk_rate = 0; struct clk *clk = mdss_mdp_get_clk(clk_idx); mutex_lock(&mdp_clk_lock); if (clk) clk_rate = clk_get_rate(clk); mutex_unlock(&mdp_clk_lock); return clk_rate; } int mdss_iommu_ctrl(int enable) { struct mdss_data_type *mdata = mdss_mdp_get_mdata(); int rc = 0; mutex_lock(&mdp_iommu_lock); pr_debug("%pS: enable %d mdata->iommu_ref_cnt %d\n", __builtin_return_address(0), enable, mdata->iommu_ref_cnt); if (enable) { if (mdata->iommu_ref_cnt == 0) rc = mdss_iommu_attach(mdata); mdata->iommu_ref_cnt++; } else { if (mdata->iommu_ref_cnt) { mdata->iommu_ref_cnt--; if (mdata->iommu_ref_cnt == 0) rc = mdss_iommu_dettach(mdata); } else { pr_err("unbalanced iommu ref\n"); } } mutex_unlock(&mdp_iommu_lock); if (IS_ERR_VALUE(rc)) return rc; else return mdata->iommu_ref_cnt; } /** * mdss_bus_bandwidth_ctrl() -- place bus bandwidth request * @enable: value of enable or disable * * Function place bus bandwidth request to allocate saved bandwidth * if enabled or free bus bandwidth allocation if disabled. * Bus bandwidth is required by mdp.For dsi, it only requires to send * dcs coammnd. It returns error if bandwidth request fails. */ void mdss_bus_bandwidth_ctrl(int enable) { struct mdss_data_type *mdata = mdss_mdp_get_mdata(); static int bus_bw_cnt; int changed = 0; mutex_lock(&bus_bw_lock); if (enable) { if (bus_bw_cnt == 0) changed++; bus_bw_cnt++; } else { if (bus_bw_cnt) { bus_bw_cnt--; if (bus_bw_cnt == 0) changed++; } else { pr_err("Can not be turned off\n"); } } pr_debug("bw_cnt=%d changed=%d enable=%d\n", bus_bw_cnt, changed, enable); if (changed) { if (!enable) { msm_bus_scale_client_update_request( mdata->bus_hdl, 0); pm_runtime_put(&mdata->pdev->dev); } else { pm_runtime_get_sync(&mdata->pdev->dev); msm_bus_scale_client_update_request( mdata->bus_hdl, mdata->curr_bw_uc_idx); } } mutex_unlock(&bus_bw_lock); } EXPORT_SYMBOL(mdss_bus_bandwidth_ctrl); void mdss_mdp_clk_ctrl(int enable, int isr) { struct mdss_data_type *mdata = mdss_mdp_get_mdata(); static int mdp_clk_cnt; int changed = 0; mutex_lock(&mdp_clk_lock); if (enable) { if (mdp_clk_cnt == 0) changed++; mdp_clk_cnt++; } else { if (mdp_clk_cnt) { mdp_clk_cnt--; if (mdp_clk_cnt == 0) changed++; } else { pr_err("Can not be turned off\n"); } } MDSS_XLOG(mdp_clk_cnt, changed, enable, current->pid); #if defined (CONFIG_FB_MSM_MDSS_DSI_DBG) xlog(__func__, mdp_clk_cnt, changed, enable, 0, 0, 0); #endif pr_debug("%s: clk_cnt=%d changed=%d enable=%d\n", __func__, mdp_clk_cnt, changed, enable); if (changed) { mdata->clk_ena = enable; if (enable) pm_runtime_get_sync(&mdata->pdev->dev); mdss_mdp_clk_update(MDSS_CLK_AHB, enable); mdss_mdp_clk_update(MDSS_CLK_AXI, enable); mdss_mdp_clk_update(MDSS_CLK_MDP_CORE, enable); mdss_mdp_clk_update(MDSS_CLK_MDP_LUT, enable); if (mdata->vsync_ena) mdss_mdp_clk_update(MDSS_CLK_MDP_VSYNC, enable); if (!enable) pm_runtime_put(&mdata->pdev->dev); } mutex_unlock(&mdp_clk_lock); } static inline int mdss_mdp_irq_clk_register(struct mdss_data_type *mdata, char *clk_name, int clk_idx) { struct clk *tmp; if (clk_idx >= MDSS_MAX_CLK) { pr_err("invalid clk index %d\n", clk_idx); return -EINVAL; } tmp = devm_clk_get(&mdata->pdev->dev, clk_name); if (IS_ERR(tmp)) { pr_err("unable to get clk: %s\n", clk_name); return PTR_ERR(tmp); } mdata->mdp_clk[clk_idx] = tmp; return 0; } static int mdss_mdp_irq_clk_setup(struct mdss_data_type *mdata) { int ret; ret = of_property_read_u32(mdata->pdev->dev.of_node, "qcom,max-clk-rate", &mdata->max_mdp_clk_rate); if (ret) { pr_err("failed to get max mdp clock rate\n"); return ret; } pr_debug("max mdp clk rate=%d\n", mdata->max_mdp_clk_rate); ret = devm_request_irq(&mdata->pdev->dev, mdata->irq, mdss_irq_handler, IRQF_DISABLED, "MDSS", mdata); if (ret) { pr_err("mdp request_irq() failed!\n"); return ret; } disable_irq(mdata->irq); mdata->fs = devm_regulator_get(&mdata->pdev->dev, "vdd"); if (IS_ERR_OR_NULL(mdata->fs)) { mdata->fs = NULL; pr_err("unable to get gdsc regulator\n"); return -EINVAL; } mdata->fs_ena = false; mdata->vdd_cx = devm_regulator_get(&mdata->pdev->dev, "vdd-cx"); if (IS_ERR_OR_NULL(mdata->vdd_cx)) { pr_debug("unable to get CX reg. rc=%d\n", PTR_RET(mdata->vdd_cx)); mdata->vdd_cx = NULL; } if (mdss_mdp_irq_clk_register(mdata, "bus_clk", MDSS_CLK_AXI) || mdss_mdp_irq_clk_register(mdata, "iface_clk", MDSS_CLK_AHB) || mdss_mdp_irq_clk_register(mdata, "core_clk_src", MDSS_CLK_MDP_SRC) || mdss_mdp_irq_clk_register(mdata, "core_clk", MDSS_CLK_MDP_CORE) || mdss_mdp_irq_clk_register(mdata, "lut_clk", MDSS_CLK_MDP_LUT) || mdss_mdp_irq_clk_register(mdata, "vsync_clk", MDSS_CLK_MDP_VSYNC)) return -EINVAL; mdss_mdp_set_clk_rate(MDP_CLK_DEFAULT_RATE); pr_debug("mdp clk rate=%ld\n", mdss_mdp_get_clk_rate(MDSS_CLK_MDP_SRC)); return 0; } void mdss_mdp_dump_power_clk(void) { u8 clk_idx = 0; struct clk *clk; pr_info(" ============ dump power & mdss clk start ============\n"); for(clk_idx = MDSS_CLK_AHB ; clk_idx < MDSS_MAX_CLK ;clk_idx++) { clk = mdss_mdp_get_clk(clk_idx); clock_debug_print_clock2(clk); } pr_info("%s: mdp_clk_cnt =%d \n", __func__, mdp_clk_cnt); pr_info(" ============ dump power & mdss clk end ============\n"); } /*static int mdss_iommu_fault_handler(struct iommu_domain *domain, struct device *dev, unsigned long iova, int flags, void *token) { pr_err("MDP IOMMU page fault: iova 0x%lx\n", iova); return 0; }*/ int mdss_iommu_attach(struct mdss_data_type *mdata) { struct iommu_domain *domain; struct mdss_iommu_map_type *iomap; int i, rc = 0; MDSS_XLOG(mdata->iommu_attached); #if defined (CONFIG_FB_MSM_MDSS_DSI_DBG) xlog(__func__, mdata->iommu_attached, 0, 0, 0, 0, 0); #endif if (mdata->iommu_attached) { pr_debug("mdp iommu already attached\n"); goto end; } for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) { iomap = mdata->iommu_map + i; domain = msm_get_iommu_domain(iomap->domain_idx); if (!domain) { WARN(1, "could not attach iommu client %s to ctx %s\n", iomap->client_name, iomap->ctx_name); continue; } rc = iommu_attach_device(domain, iomap->ctx); if (rc) { WARN(1, "mdp::iommu device attach failed rc:%d\n", rc); for (i--; i >= 0; i--) { iomap = mdata->iommu_map + i; iommu_detach_device(domain, iomap->ctx); } goto end; } } mdata->iommu_attached = true; end: return rc; } int mdss_iommu_dettach(struct mdss_data_type *mdata) { struct iommu_domain *domain; struct mdss_iommu_map_type *iomap; int i; MDSS_XLOG(mdata->iommu_attached); #if defined (CONFIG_FB_MSM_MDSS_DSI_DBG) xlog(__func__, mdata->iommu_attached, 0, 0, 0, 0, 0); #endif if (!mdata->iommu_attached) { pr_debug("mdp iommu already dettached\n"); return 0; } for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) { iomap = mdata->iommu_map + i; domain = msm_get_iommu_domain(iomap->domain_idx); if (!domain) { pr_err("unable to get iommu domain(%d)\n", iomap->domain_idx); continue; } iommu_detach_device(domain, iomap->ctx); } mdata->iommu_attached = false; return 0; } int mdss_iommu_init(struct mdss_data_type *mdata) { struct msm_iova_layout layout; struct iommu_domain *domain; struct mdss_iommu_map_type *iomap; int i; if (mdata->iommu_map) { pr_warn("iommu already initialized\n"); return 0; } for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) { iomap = &mdss_iommu_map[i]; layout.client_name = iomap->client_name; layout.partitions = iomap->partitions; layout.npartitions = iomap->npartitions; layout.is_secure = (i == MDSS_IOMMU_DOMAIN_SECURE); //layout.domain_flags = 0; iomap->domain_idx = msm_register_domain(&layout); if (IS_ERR_VALUE(iomap->domain_idx)) return -EINVAL; domain = msm_get_iommu_domain(iomap->domain_idx); if (!domain) { pr_err("unable to get iommu domain(%d)\n", iomap->domain_idx); return -EINVAL; } iomap->ctx = msm_iommu_get_ctx(iomap->ctx_name); if (!iomap->ctx) { pr_warn("unable to get iommu ctx(%s)\n", iomap->ctx_name); return -EINVAL; } } mdata->iommu_map = mdss_iommu_map; return 0; } static int mdss_debug_stat_ctl_dump(struct mdss_mdp_ctl *ctl, char *bp, int len) { int total = 0; if (!ctl->ref_cnt) return 0; if (ctl->intf_num) { total = scnprintf(bp, len, "intf%d: play: %08u \tvsync: %08u \tunderrun: %08u\n", ctl->intf_num, ctl->play_cnt, ctl->vsync_cnt, ctl->underrun_cnt); } else { total = scnprintf(bp, len, "wb: \tmode=%x \tplay: %08u\n", ctl->opmode, ctl->play_cnt); } return total; } static int mdss_debug_dump_stats(void *data, char *buf, int len) { struct mdss_data_type *mdata = data; struct mdss_mdp_pipe *pipe; int i, total = 0; for (i = 0; i < mdata->nctl; i++) total += mdss_debug_stat_ctl_dump(mdata->ctl_off + i, buf + total, len - total); total += scnprintf(buf + total, len - total, "\n"); for (i = 0; i < mdata->nvig_pipes; i++) { pipe = mdata->vig_pipes + i; total += scnprintf(buf + total, len - total, "VIG%d : %08u\t", i, pipe->play_cnt); } total += scnprintf(buf + total, len - total, "\n"); for (i = 0; i < mdata->nrgb_pipes; i++) { pipe = mdata->rgb_pipes + i; total += scnprintf(buf + total, len - total, "RGB%d : %08u\t", i, pipe->play_cnt); } total += scnprintf(buf + total, len - total, "\n"); for (i = 0; i < mdata->ndma_pipes; i++) { pipe = mdata->dma_pipes + i; total += scnprintf(buf + total, len - total, "DMA%d : %08u\t", i, pipe->play_cnt); } return total; } static void mdss_debug_enable_clock(int on) { if (on) mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false); else mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false); } static int mdss_mdp_debug_init(struct mdss_data_type *mdata) { int rc; mdata->debug_inf.debug_dump_stats = mdss_debug_dump_stats; mdata->debug_inf.debug_enable_clock = mdss_debug_enable_clock; rc = mdss_debugfs_init(mdata); if (rc) return rc; mdss_debug_register_base("mdp", mdata->mdp_base, mdata->mdp_reg_size); return 0; } int mdss_hw_init(struct mdss_data_type *mdata) { int i, j; char *offset; struct mdss_mdp_pipe *vig; mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false); mdata->mdp_rev = MDSS_MDP_REG_READ(MDSS_MDP_REG_HW_VERSION); pr_info_once("MDP Rev=%x\n", mdata->mdp_rev); if (mdata->hw_settings) { struct mdss_hw_settings *hws = mdata->hw_settings; while (hws->reg) { writel_relaxed(hws->val, hws->reg); hws++; } } for (i = 0; i < mdata->nmixers_intf; i++) { offset = mdata->mixer_intf[i].dspp_base + MDSS_MDP_REG_DSPP_HIST_LUT_BASE; for (j = 0; j < ENHIST_LUT_ENTRIES; j++) writel_relaxed(j, offset); /* swap */ writel_relaxed(1, offset + 4); } vig = mdata->vig_pipes; for (i = 0; i < mdata->nvig_pipes; i++) { offset = vig[i].base + MDSS_MDP_REG_VIG_HIST_LUT_BASE; for (j = 0; j < ENHIST_LUT_ENTRIES; j++) writel_relaxed(j, offset); /* swap */ writel_relaxed(1, offset + 16); } mdata->nmax_concurrent_ad_hw = (mdata->mdp_rev < MDSS_MDP_HW_REV_103) ? 1 : 2; mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false); pr_debug("MDP hw init done\n"); return 0; } static u32 mdss_mdp_res_init(struct mdss_data_type *mdata) { u32 rc = 0; if (mdata->res_init) { pr_err("mdss resources already initialized\n"); return -EPERM; } mdata->res_init = true; mdata->clk_ena = false; mdata->irq_mask = MDSS_MDP_DEFAULT_INTR_MASK; mdata->irq_ena = false; rc = mdss_mdp_irq_clk_setup(mdata); if (rc) return rc; mdata->hist_intr.req = 0; mdata->hist_intr.curr = 0; mdata->hist_intr.state = 0; spin_lock_init(&mdata->hist_intr.lock); mdata->iclient = msm_ion_client_create(-1, mdata->pdev->name); if (IS_ERR_OR_NULL(mdata->iclient)) { pr_err("msm_ion_client_create() return error (%p)\n", mdata->iclient); mdata->iclient = NULL; } rc = mdss_iommu_init(mdata); return rc; } /** * mdss_mdp_footswitch_ctrl_splash() - clocks handoff for cont. splash screen * @on: 1 to start handoff, 0 to complete the handoff after first frame update * * MDSS Clocks and GDSC are already on during continous splash screen, but * increasing ref count will keep clocks from being turned off until handoff * has properly happend after frame update. */ void mdss_mdp_footswitch_ctrl_splash(int on) { struct mdss_data_type *mdata = mdss_mdp_get_mdata(); if (mdata != NULL) { if (on) { pr_debug("Enable MDP FS for splash.\n"); mdata->handoff_pending = true; regulator_enable(mdata->fs); mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false); mdss_hw_init(mdata); } else { pr_debug("Disable MDP FS for splash.\n"); mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false); regulator_disable(mdata->fs); mdata->handoff_pending = false; } } else { pr_warn("mdss mdata not initialized\n"); } } static ssize_t mdss_mdp_show_capabilities(struct device *dev, struct device_attribute *attr, char *buf) { struct mdss_data_type *mdata = dev_get_drvdata(dev); size_t len = PAGE_SIZE; int cnt = 0; #define SPRINT(fmt, ...) \ (cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__)) SPRINT("mdp_version=5\n"); SPRINT("hw_rev=%d\n", mdata->mdp_rev); SPRINT("rgb_pipes=%d\n", mdata->nrgb_pipes); SPRINT("vig_pipes=%d\n", mdata->nvig_pipes); SPRINT("dma_pipes=%d\n", mdata->ndma_pipes); SPRINT("smp_count=%d\n", mdata->smp_mb_cnt); SPRINT("smp_size=%d\n", mdata->smp_mb_size); SPRINT("smp_mb_per_pipe=%d\n", mdata->smp_mb_per_pipe); SPRINT("max_downscale_ratio=%d\n", MAX_DOWNSCALE_RATIO); SPRINT("max_upscale_ratio=%d\n", MAX_UPSCALE_RATIO); if (mdata->max_bw_low) SPRINT("max_bandwidth_low=%u\n", mdata->max_bw_low); if (mdata->max_bw_high) SPRINT("max_bandwidth_high=%u\n", mdata->max_bw_high); SPRINT("features="); if (mdata->has_bwc) SPRINT(" bwc"); if (mdata->has_decimation) SPRINT(" decimation"); if (mdata->highest_bank_bit) SPRINT(" tile_format"); SPRINT("\n"); return cnt; } static DEVICE_ATTR(caps, S_IRUGO, mdss_mdp_show_capabilities, NULL); static struct attribute *mdp_fs_attrs[] = { &dev_attr_caps.attr, NULL }; static struct attribute_group mdp_fs_attr_group = { .attrs = mdp_fs_attrs }; static int mdss_mdp_register_sysfs(struct mdss_data_type *mdata) { struct device *dev = &mdata->pdev->dev; int rc; rc = sysfs_create_group(&dev->kobj, &mdp_fs_attr_group); return rc; } static int mdss_mdp_probe(struct platform_device *pdev) { struct resource *res; int rc; struct mdss_data_type *mdata; pr_info("%s : ++ \n",__func__); if (!pdev->dev.of_node) { pr_err("MDP driver only supports device tree probe\n"); return -ENOTSUPP; } if (mdss_res) { pr_err("MDP already initialized\n"); return -EINVAL; } mdata = devm_kzalloc(&pdev->dev, sizeof(*mdata), GFP_KERNEL); if (mdata == NULL) return -ENOMEM; pdev->id = 0; mdata->pdev = pdev; platform_set_drvdata(pdev, mdata); mdss_res = mdata; mutex_init(&mdata->reg_lock); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mdp_phys"); if (!res) { pr_err("unable to get MDP base address\n"); rc = -ENOMEM; goto probe_done; } mdata->mdp_reg_size = resource_size(res); mdata->mdp_base = devm_ioremap(&pdev->dev, res->start, mdata->mdp_reg_size); if (unlikely(!mdata->mdp_base)) { pr_err("unable to map MDP base\n"); rc = -ENOMEM; goto probe_done; } pr_info("MDP HW Base phy_Address=0x%x virt=0x%x\n", (int) res->start, (int) mdata->mdp_base); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vbif_phys"); if (!res) { pr_err("unable to get MDSS VBIF base address\n"); rc = -ENOMEM; goto probe_done; } mdata->vbif_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (unlikely(!mdata->vbif_base)) { pr_err("unable to map MDSS VBIF base\n"); rc = -ENOMEM; goto probe_done; } pr_info("MDSS VBIF HW Base phy_Address=0x%x virt=0x%x\n", (int) res->start, (int) mdata->vbif_base); res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res) { pr_err("unable to get MDSS irq\n"); rc = -ENOMEM; goto probe_done; } mdata->irq = res->start; mdss_mdp_hw.ptr = mdata; /*populate hw iomem base info from device tree*/ rc = mdss_mdp_parse_dt(pdev); if (rc) { pr_err("unable to parse device tree\n"); goto probe_done; } rc = mdss_mdp_res_init(mdata); if (rc) { pr_err("unable to initialize mdss mdp resources\n"); goto probe_done; } rc = mdss_mdp_pp_init(&pdev->dev); if (rc) { pr_err("unable to initialize mdss pp resources\n"); goto probe_done; } rc = mdss_mdp_bus_scale_register(mdata); if (rc) { pr_err("unable to register bus scaling\n"); goto probe_done; } rc = mdss_mdp_debug_init(mdata); if (rc) { pr_err("unable to initialize mdp debugging\n"); goto probe_done; } pm_runtime_set_suspended(&pdev->dev); pm_runtime_enable(&pdev->dev); if (!pm_runtime_enabled(&pdev->dev)) mdss_mdp_footswitch_ctrl(mdata, true); rc = mdss_mdp_register_sysfs(mdata); if (rc) pr_err("unable to register mdp sysfs nodes\n"); rc = mdss_fb_register_mdp_instance(&mdp5); if (rc) pr_err("unable to register mdp instance\n"); rc = mdss_register_irq(&mdss_mdp_hw); if (rc) pr_err("mdss_register_irq failed.\n"); /* Below code is not required now because there is mo Call to turn off the mdp clock */ #if 0 // defined(CONFIG_FB_MSM_EDP_SAMSUNG) if (mdss_mdp_scan_pipes()) { mdss_mdp_bus_scale_set_quota(AB_QUOTA, IB_QUOTA); /* keep clock on if continuous splash from lk */ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false); } #endif probe_done: if (IS_ERR_VALUE(rc)) { mdss_mdp_hw.ptr = NULL; mdss_mdp_pp_term(&pdev->dev); mutex_destroy(&mdata->reg_lock); mdss_res = NULL; } pr_info("%s : -- \n",__func__); return rc; } static void mdss_mdp_parse_dt_regs_array(const u32 *arr, char __iomem *hw_base, struct mdss_hw_settings *hws, int count) { u32 len, reg; int i; if (!arr) return; for (i = 0, len = count * 2; i < len; i += 2) { reg = be32_to_cpu(arr[i]); hws->reg = hw_base + reg; hws->val = be32_to_cpu(arr[i + 1]); pr_debug("reg: 0x%04x=0x%08x\n", reg, hws->val); hws++; } } int mdss_mdp_parse_dt_hw_settings(struct platform_device *pdev) { struct mdss_data_type *mdata = platform_get_drvdata(pdev); struct mdss_hw_settings *hws; const u32 *vbif_arr, *mdp_arr; int vbif_len, mdp_len; vbif_arr = of_get_property(pdev->dev.of_node, "qcom,vbif-settings", &vbif_len); if (!vbif_arr || (vbif_len & 1)) { pr_warn("MDSS VBIF settings not found\n"); vbif_len = 0; } vbif_len /= 2 * sizeof(u32); mdp_arr = of_get_property(pdev->dev.of_node, "qcom,mdp-settings", &mdp_len); if (!mdp_arr || (mdp_len & 1)) { pr_warn("MDSS MDP settings not found\n"); mdp_len = 0; } mdp_len /= 2 * sizeof(u32); if ((mdp_len + vbif_len) == 0) return 0; hws = devm_kzalloc(&pdev->dev, sizeof(*hws) * (vbif_len + mdp_len + 1), GFP_KERNEL); if (!hws) return -ENOMEM; mdss_mdp_parse_dt_regs_array(vbif_arr, mdata->vbif_base, hws, vbif_len); mdss_mdp_parse_dt_regs_array(mdp_arr, mdata->mdp_base, hws + vbif_len, mdp_len); mdata->hw_settings = hws; return 0; } static int mdss_mdp_get_pan_intf(const char *pan_intf) { int i, rc = MDSS_PANEL_INTF_INVALID; if (!pan_intf) return rc; for (i = 0; i < ARRAY_SIZE(pan_types); i++) { if (!strncmp(pan_intf, pan_types[i].name, MDSS_MAX_PANEL_LEN)) { rc = pan_types[i].type; break; } } return rc; } static int mdss_mdp_get_pan_cfg(struct mdss_panel_cfg *pan_cfg) { char *t = NULL; char pan_intf_str[MDSS_MAX_PANEL_LEN]; int rc, i, panel_len; char pan_name[MDSS_MAX_PANEL_LEN]; if (!pan_cfg) return -EINVAL; strlcpy(pan_name, &pan_cfg->arg_cfg[0], sizeof(pan_cfg->arg_cfg)); #if defined (CONFIG_FB_MSM_MIPI_SAMSUNG_OCTA_CMD_WQHD_PT_PANEL) || defined (CONFIG_FB_MSM_MDSS_S6E8AA0A_HD_PANEL)\ || defined(CONFIG_FB_MSM_MDSS_SHARP_HD_PANEL) if (pan_name[0] == '0') { pan_cfg->lk_cfg = false; } else if (pan_name[0] == '1') { pan_cfg->lk_cfg = true; } else { /* read from dt */ pan_cfg->lk_cfg = true; pan_cfg->pan_intf = MDSS_PANEL_INTF_INVALID; return -EINVAL; } #else pan_cfg->lk_cfg = false; #endif /* skip lk cfg and delimiter; ex: "0:" */ strlcpy(pan_name, &pan_name[2], MDSS_MAX_PANEL_LEN); t = strnstr(pan_name, ":", MDSS_MAX_PANEL_LEN); if (!t) { pr_err("%s: pan_name=[%s] invalid\n", __func__, pan_name); pan_cfg->pan_intf = MDSS_PANEL_INTF_INVALID; return -EINVAL; } for (i = 0; ((pan_name + i) < t) && (i < 4); i++) pan_intf_str[i] = *(pan_name + i); pan_intf_str[i] = 0; pr_debug("%s:%d panel intf %s\n", __func__, __LINE__, pan_intf_str); /* point to the start of panel name */ t = t + 1; strlcpy(&pan_cfg->arg_cfg[0], t, sizeof(pan_cfg->arg_cfg)); pr_debug("%s:%d: t=[%s] panel name=[%s]\n", __func__, __LINE__, t, pan_cfg->arg_cfg); panel_len = strlen(pan_cfg->arg_cfg); if (!panel_len) { pr_err("%s: Panel name is invalid\n", __func__); pan_cfg->pan_intf = MDSS_PANEL_INTF_INVALID; return -EINVAL; } rc = mdss_mdp_get_pan_intf(pan_intf_str); pan_cfg->pan_intf = (rc < 0) ? MDSS_PANEL_INTF_INVALID : rc; return 0; } static int mdss_mdp_parse_dt_pan_intf(struct platform_device *pdev) { int rc; struct mdss_data_type *mdata = platform_get_drvdata(pdev); const char *prim_intf = NULL; rc = of_property_read_string(pdev->dev.of_node, "qcom,mdss-pref-prim-intf", &prim_intf); if (rc) return -ENODEV; rc = mdss_mdp_get_pan_intf(prim_intf); if (rc < 0) { mdata->pan_cfg.pan_intf = MDSS_PANEL_INTF_INVALID; } else { mdata->pan_cfg.pan_intf = rc; rc = 0; } return rc; } static int mdss_mdp_parse_bootarg(struct platform_device *pdev) { struct device_node *chosen_node; static const char *cmd_line; char *disp_idx, *end_idx; int rc, len = 0, name_len, cmd_len; int *intf_type; char *panel_name; struct mdss_panel_cfg *pan_cfg; struct mdss_data_type *mdata = platform_get_drvdata(pdev); mdata->pan_cfg.arg_cfg[MDSS_MAX_PANEL_LEN] = 0; pan_cfg = &mdata->pan_cfg; panel_name = &pan_cfg->arg_cfg[0]; intf_type = &pan_cfg->pan_intf; /* reads from dt by default */ pan_cfg->lk_cfg = true; chosen_node = of_find_node_by_name(NULL, "chosen"); if (!chosen_node) { pr_err("%s: get chosen node failed\n", __func__); rc = -ENODEV; goto get_dt_pan; } cmd_line = of_get_property(chosen_node, "bootargs", &len); if (!cmd_line || len <= 0) { pr_err("%s: get bootargs failed\n", __func__); rc = -ENODEV; goto get_dt_pan; } name_len = strlen("mdss_mdp.panel="); cmd_len = strlen(cmd_line); disp_idx = strnstr(cmd_line, "mdss_mdp.panel=", cmd_len); if (!disp_idx) { pr_err("%s:%d:cmdline panel not set disp_idx=[%p]\n", __func__, __LINE__, disp_idx); memset(panel_name, 0x00, MDSS_MAX_PANEL_LEN); *intf_type = MDSS_PANEL_INTF_INVALID; rc = MDSS_PANEL_INTF_INVALID; goto get_dt_pan; } disp_idx += name_len; end_idx = strnstr(disp_idx, " ", MDSS_MAX_PANEL_LEN); pr_debug("%s:%d: pan_name=[%s] end=[%s]\n", __func__, __LINE__, disp_idx, end_idx); if (!end_idx) { end_idx = disp_idx + strlen(disp_idx) + 1; pr_warn("%s:%d: pan_name=[%s] end=[%s]\n", __func__, __LINE__, disp_idx, end_idx); } if (end_idx <= disp_idx) { pr_err("%s:%d:cmdline pan incorrect end=[%p] disp=[%p]\n", __func__, __LINE__, end_idx, disp_idx); memset(panel_name, 0x00, MDSS_MAX_PANEL_LEN); *intf_type = MDSS_PANEL_INTF_INVALID; rc = MDSS_PANEL_INTF_INVALID; goto get_dt_pan; } *end_idx = 0; len = end_idx - disp_idx + 1; if (len <= 0) { pr_warn("%s: panel name not rx", __func__); rc = -EINVAL; goto get_dt_pan; } strlcpy(panel_name, disp_idx, min(++len, MDSS_MAX_PANEL_LEN)); pr_debug("%s:%d panel:[%s]", __func__, __LINE__, panel_name); of_node_put(chosen_node); rc = mdss_mdp_get_pan_cfg(pan_cfg); if (!rc) { pan_cfg->init_done = true; return rc; } get_dt_pan: rc = mdss_mdp_parse_dt_pan_intf(pdev); /* if pref pan intf is not present */ if (rc) pr_err("%s:unable to parse device tree for pan intf\n", __func__); else pan_cfg->init_done = true; of_node_put(chosen_node); return rc; } static int mdss_mdp_parse_dt(struct platform_device *pdev) { int rc; rc = mdss_mdp_parse_dt_hw_settings(pdev); if (rc) { pr_err("Error in device tree : hw settings\n"); return rc; } rc = mdss_mdp_parse_dt_pipe(pdev); if (rc) { pr_err("Error in device tree : pipes\n"); return rc; } rc = mdss_mdp_parse_dt_mixer(pdev); if (rc) { pr_err("Error in device tree : mixers\n"); return rc; } rc = mdss_mdp_parse_dt_ctl(pdev); if (rc) { pr_err("Error in device tree : ctl\n"); return rc; } rc = mdss_mdp_parse_dt_video_intf(pdev); if (rc) { pr_err("Error in device tree : ctl\n"); return rc; } rc = mdss_mdp_parse_dt_smp(pdev); if (rc) { pr_err("Error in device tree : smp\n"); return rc; } rc = mdss_mdp_parse_dt_prefill(pdev); if (rc) { pr_err("Error in device tree : prefill\n"); return rc; } rc = mdss_mdp_parse_dt_misc(pdev); if (rc) { pr_err("Error in device tree : misc\n"); return rc; } rc = mdss_mdp_parse_dt_ad_cfg(pdev); if (rc) { pr_err("Error in device tree : ad\n"); return rc; } rc = mdss_mdp_parse_bootarg(pdev); if (rc) { pr_err("%s: Error in panel override:rc=[%d]\n", __func__, rc); return rc; } rc = mdss_mdp_parse_dt_bus_scale(pdev); if (rc) { pr_err("Error in device tree : bus scale\n"); return rc; } return 0; } static int mdss_mdp_parse_dt_pipe_clk_ctrl(struct platform_device *pdev, char *prop_name, struct mdss_mdp_pipe *pipe_list, u32 npipes) { int rc = 0; size_t len; const u32 *arr; arr = of_get_property(pdev->dev.of_node, prop_name, &len); if (arr) { int i, j; len /= sizeof(u32); for (i = 0, j = 0; i < len; j++) { struct mdss_mdp_pipe *pipe = NULL; if (j >= npipes) { pr_err("invalid clk ctrl enries for prop: %s\n", prop_name); return -EINVAL; } pipe = &pipe_list[j]; pipe->clk_ctrl.reg_off = be32_to_cpu(arr[i++]); pipe->clk_ctrl.bit_off = be32_to_cpu(arr[i++]); /* status register is next in line to ctrl register */ pipe->clk_status.reg_off = pipe->clk_ctrl.reg_off + 4; pipe->clk_status.bit_off = be32_to_cpu(arr[i++]); pr_debug("%s[%d]: ctrl: reg_off: 0x%x bit_off: %d\n", prop_name, j, pipe->clk_ctrl.reg_off, pipe->clk_ctrl.bit_off); pr_debug("%s[%d]: status: reg_off: 0x%x bit_off: %d\n", prop_name, j, pipe->clk_status.reg_off, pipe->clk_status.bit_off); } if (j != npipes) { pr_err("%s: %d entries found. required %d\n", prop_name, j, npipes); for (i = 0; i < npipes; i++) { memset(&pipe_list[i].clk_ctrl, 0, sizeof(pipe_list[i].clk_ctrl)); memset(&pipe_list[i].clk_status, 0, sizeof(pipe_list[i].clk_status)); } rc = -EINVAL; } } else { pr_err("error mandatory property '%s' not found\n", prop_name); rc = -EINVAL; } return rc; } static int mdss_mdp_parse_dt_pipe(struct platform_device *pdev) { u32 npipes, dma_off; int rc = 0; u32 nfids = 0, setup_cnt = 0, len, nxids = 0; u32 *offsets = NULL, *ftch_id = NULL, *xin_id = NULL; struct mdss_data_type *mdata = platform_get_drvdata(pdev); mdata->nvig_pipes = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-pipe-vig-off"); mdata->nrgb_pipes = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-pipe-rgb-off"); mdata->ndma_pipes = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-pipe-dma-off"); npipes = mdata->nvig_pipes + mdata->nrgb_pipes + mdata->ndma_pipes; nfids += mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-pipe-vig-fetch-id"); nfids += mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-pipe-rgb-fetch-id"); nfids += mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-pipe-dma-fetch-id"); if (npipes != nfids) { pr_err("device tree err: unequal number of pipes and smp ids"); return -EINVAL; } nxids += mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-pipe-vig-xin-id"); nxids += mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-pipe-rgb-xin-id"); nxids += mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-pipe-dma-xin-id"); if (npipes != nxids) { pr_err("device tree err: unequal number of pipes and xin ids"); return -EINVAL; } offsets = kzalloc(sizeof(u32) * npipes, GFP_KERNEL); if (!offsets) { pr_err("no mem assigned for offsets: kzalloc fail\n"); return -ENOMEM; } ftch_id = kzalloc(sizeof(u32) * nfids, GFP_KERNEL); if (!ftch_id) { pr_err("no mem assigned for ftch_id: kzalloc fail\n"); rc = -ENOMEM; goto ftch_alloc_fail; } xin_id = kzalloc(sizeof(u32) * nxids, GFP_KERNEL); if (!xin_id) { pr_err("no mem assigned for xin_id: kzalloc fail\n"); rc = -ENOMEM; goto xin_alloc_fail; } mdata->vig_pipes = devm_kzalloc(&mdata->pdev->dev, sizeof(struct mdss_mdp_pipe) * mdata->nvig_pipes, GFP_KERNEL); if (!mdata->vig_pipes) { pr_err("no mem for vig_pipes: kzalloc fail\n"); rc = -ENOMEM; goto vig_alloc_fail; } mdata->rgb_pipes = devm_kzalloc(&mdata->pdev->dev, sizeof(struct mdss_mdp_pipe) * mdata->nrgb_pipes, GFP_KERNEL); if (!mdata->rgb_pipes) { pr_err("no mem for rgb_pipes: kzalloc fail\n"); rc = -ENOMEM; goto rgb_alloc_fail; } mdata->dma_pipes = devm_kzalloc(&mdata->pdev->dev, sizeof(struct mdss_mdp_pipe) * mdata->ndma_pipes, GFP_KERNEL); if (!mdata->dma_pipes) { pr_err("no mem for dma_pipes: kzalloc fail\n"); rc = -ENOMEM; goto dma_alloc_fail; } rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-pipe-vig-fetch-id", ftch_id, mdata->nvig_pipes); if (rc) goto parse_fail; rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-pipe-vig-xin-id", xin_id, mdata->nvig_pipes); if (rc) goto parse_fail; rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-pipe-vig-off", offsets, mdata->nvig_pipes); if (rc) goto parse_fail; len = min_t(int, DEFAULT_TOTAL_VIG_PIPES, (int)mdata->nvig_pipes); rc = mdss_mdp_pipe_addr_setup(mdata, mdata->vig_pipes, offsets, ftch_id, xin_id, MDSS_MDP_PIPE_TYPE_VIG, MDSS_MDP_SSPP_VIG0, len); if (rc) goto parse_fail; setup_cnt += len; rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-pipe-rgb-fetch-id", ftch_id + mdata->nvig_pipes, mdata->nrgb_pipes); if (rc) goto parse_fail; rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-pipe-rgb-xin-id", xin_id + mdata->nvig_pipes, mdata->nrgb_pipes); if (rc) goto parse_fail; rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-pipe-rgb-off", offsets + mdata->nvig_pipes, mdata->nrgb_pipes); if (rc) goto parse_fail; len = min_t(int, DEFAULT_TOTAL_RGB_PIPES, (int)mdata->nrgb_pipes); rc = mdss_mdp_pipe_addr_setup(mdata, mdata->rgb_pipes, offsets + mdata->nvig_pipes, ftch_id + mdata->nvig_pipes, xin_id + mdata->nvig_pipes, MDSS_MDP_PIPE_TYPE_RGB, MDSS_MDP_SSPP_RGB0, len); if (rc) goto parse_fail; setup_cnt += len; dma_off = mdata->nvig_pipes + mdata->nrgb_pipes; rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-pipe-dma-fetch-id", ftch_id + dma_off, mdata->ndma_pipes); if (rc) goto parse_fail; rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-pipe-dma-xin-id", xin_id + dma_off, mdata->ndma_pipes); if (rc) goto parse_fail; rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-pipe-dma-off", offsets + dma_off, mdata->ndma_pipes); if (rc) goto parse_fail; len = mdata->ndma_pipes; rc = mdss_mdp_pipe_addr_setup(mdata, mdata->dma_pipes, offsets + dma_off, ftch_id + dma_off, xin_id + dma_off, MDSS_MDP_PIPE_TYPE_DMA, MDSS_MDP_SSPP_DMA0, len); if (rc) goto parse_fail; setup_cnt += len; if (mdata->nvig_pipes > DEFAULT_TOTAL_VIG_PIPES) { rc = mdss_mdp_pipe_addr_setup(mdata, mdata->vig_pipes + DEFAULT_TOTAL_VIG_PIPES, offsets + DEFAULT_TOTAL_VIG_PIPES, ftch_id + DEFAULT_TOTAL_VIG_PIPES, xin_id + DEFAULT_TOTAL_VIG_PIPES, MDSS_MDP_PIPE_TYPE_VIG, setup_cnt, mdata->nvig_pipes - DEFAULT_TOTAL_VIG_PIPES); if (rc) goto parse_fail; setup_cnt += mdata->nvig_pipes - DEFAULT_TOTAL_VIG_PIPES; } if (mdata->nrgb_pipes > DEFAULT_TOTAL_RGB_PIPES) { rc = mdss_mdp_pipe_addr_setup(mdata, mdata->rgb_pipes + DEFAULT_TOTAL_RGB_PIPES, offsets + mdata->nvig_pipes + DEFAULT_TOTAL_RGB_PIPES, ftch_id + mdata->nvig_pipes + DEFAULT_TOTAL_RGB_PIPES, xin_id + mdata->nvig_pipes + DEFAULT_TOTAL_RGB_PIPES, MDSS_MDP_PIPE_TYPE_RGB, setup_cnt, mdata->nrgb_pipes - DEFAULT_TOTAL_RGB_PIPES); if (rc) goto parse_fail; setup_cnt += mdata->nrgb_pipes - DEFAULT_TOTAL_RGB_PIPES; } rc = mdss_mdp_parse_dt_pipe_clk_ctrl(pdev, "qcom,mdss-pipe-vig-clk-ctrl-offsets", mdata->vig_pipes, mdata->nvig_pipes); if (rc) goto parse_fail; rc = mdss_mdp_parse_dt_pipe_clk_ctrl(pdev, "qcom,mdss-pipe-rgb-clk-ctrl-offsets", mdata->rgb_pipes, mdata->nrgb_pipes); if (rc) goto parse_fail; rc = mdss_mdp_parse_dt_pipe_clk_ctrl(pdev, "qcom,mdss-pipe-dma-clk-ctrl-offsets", mdata->dma_pipes, mdata->ndma_pipes); if (rc) goto parse_fail; goto parse_done; parse_fail: kfree(mdata->dma_pipes); dma_alloc_fail: kfree(mdata->rgb_pipes); rgb_alloc_fail: kfree(mdata->vig_pipes); parse_done: vig_alloc_fail: kfree(xin_id); xin_alloc_fail: kfree(ftch_id); ftch_alloc_fail: kfree(offsets); return rc; } static int mdss_mdp_parse_dt_mixer(struct platform_device *pdev) { u32 nmixers, ndspp, npingpong; int rc = 0; u32 *mixer_offsets = NULL, *dspp_offsets = NULL, *pingpong_offsets = NULL; struct mdss_data_type *mdata = platform_get_drvdata(pdev); mdata->nmixers_intf = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-mixer-intf-off"); mdata->nmixers_wb = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-mixer-wb-off"); ndspp = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-dspp-off"); npingpong = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-pingpong-off"); nmixers = mdata->nmixers_intf + mdata->nmixers_wb; if (mdata->nmixers_intf != ndspp) { pr_err("device tree err: unequal no of dspp and intf mixers\n"); return -EINVAL; } if (mdata->nmixers_intf != npingpong) { pr_err("device tree err: unequal no of pingpong and intf mixers\n"); return -EINVAL; } mixer_offsets = kzalloc(sizeof(u32) * nmixers, GFP_KERNEL); if (!mixer_offsets) { pr_err("no mem assigned: kzalloc fail\n"); return -ENOMEM; } dspp_offsets = kzalloc(sizeof(u32) * ndspp, GFP_KERNEL); if (!dspp_offsets) { pr_err("no mem assigned: kzalloc fail\n"); rc = -ENOMEM; goto dspp_alloc_fail; } pingpong_offsets = kzalloc(sizeof(u32) * npingpong, GFP_KERNEL); if (!pingpong_offsets) { pr_err("no mem assigned: kzalloc fail\n"); rc = -ENOMEM; goto pingpong_alloc_fail; } rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-mixer-intf-off", mixer_offsets, mdata->nmixers_intf); if (rc) goto parse_done; rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-mixer-wb-off", mixer_offsets + mdata->nmixers_intf, mdata->nmixers_wb); if (rc) goto parse_done; rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-dspp-off", dspp_offsets, ndspp); if (rc) goto parse_done; rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-pingpong-off", pingpong_offsets, npingpong); if (rc) goto parse_done; rc = mdss_mdp_mixer_addr_setup(mdata, mixer_offsets, dspp_offsets, pingpong_offsets, MDSS_MDP_MIXER_TYPE_INTF, mdata->nmixers_intf); if (rc) goto parse_done; rc = mdss_mdp_mixer_addr_setup(mdata, mixer_offsets + mdata->nmixers_intf, NULL, NULL, MDSS_MDP_MIXER_TYPE_WRITEBACK, mdata->nmixers_wb); if (rc) goto parse_done; parse_done: kfree(pingpong_offsets); pingpong_alloc_fail: kfree(dspp_offsets); dspp_alloc_fail: kfree(mixer_offsets); return rc; } static int mdss_mdp_parse_dt_ctl(struct platform_device *pdev) { u32 nwb; int rc = 0; u32 *ctl_offsets = NULL, *wb_offsets = NULL; struct mdss_data_type *mdata = platform_get_drvdata(pdev); mdata->nctl = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-ctl-off"); nwb = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-wb-off"); if (mdata->nctl != nwb) { pr_err("device tree err: unequal number of ctl and wb\n"); rc = -EINVAL; goto parse_done; } ctl_offsets = kzalloc(sizeof(u32) * mdata->nctl, GFP_KERNEL); if (!ctl_offsets) { pr_err("no more mem for ctl offsets\n"); return -ENOMEM; } wb_offsets = kzalloc(sizeof(u32) * nwb, GFP_KERNEL); if (!wb_offsets) { pr_err("no more mem for writeback offsets\n"); rc = -ENOMEM; goto wb_alloc_fail; } rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-ctl-off", ctl_offsets, mdata->nctl); if (rc) goto parse_done; rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-wb-off", wb_offsets, nwb); if (rc) goto parse_done; rc = mdss_mdp_ctl_addr_setup(mdata, ctl_offsets, wb_offsets, mdata->nctl); if (rc) goto parse_done; parse_done: kfree(wb_offsets); wb_alloc_fail: kfree(ctl_offsets); return rc; } static int mdss_mdp_parse_dt_video_intf(struct platform_device *pdev) { struct mdss_data_type *mdata = platform_get_drvdata(pdev); u32 count; u32 *offsets; int rc; count = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-intf-off"); if (count == 0) return -EINVAL; offsets = kzalloc(sizeof(u32) * count, GFP_KERNEL); if (!offsets) { pr_err("no mem assigned for video intf\n"); return -ENOMEM; } rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-intf-off", offsets, count); if (rc) goto parse_fail; rc = mdss_mdp_video_addr_setup(mdata, offsets, count); if (rc) pr_err("unable to setup video interfaces\n"); parse_fail: kfree(offsets); return rc; } static int mdss_mdp_parse_dt_smp(struct platform_device *pdev) { struct mdss_data_type *mdata = platform_get_drvdata(pdev); u32 num; u32 data[2]; int rc, len; const u32 *arr; num = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-smp-data"); if (num != 2) return -EINVAL; rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-smp-data", data, num); if (rc) return rc; rc = mdss_mdp_smp_setup(mdata, data[0], data[1]); if (rc) { pr_err("unable to setup smp data\n"); return rc; } rc = of_property_read_u32(pdev->dev.of_node, "qcom,mdss-smp-mb-per-pipe", data); mdata->smp_mb_per_pipe = (!rc ? data[0] : 0); #if defined(CONFIG_SEC_MATISSE_PROJECT) mdata->smp_mb_per_pipe = 2; #endif rc = 0; arr = of_get_property(pdev->dev.of_node, "qcom,mdss-pipe-rgb-fixed-mmb", &len); if (arr) { int i, j, k; u32 cnt, mmb; len /= sizeof(u32); for (i = 0, k = 0; i < len; k++) { struct mdss_mdp_pipe *pipe = NULL; if (k >= mdata->nrgb_pipes) { pr_err("invalid fixed mmbs for rgb pipes\n"); return -EINVAL; } pipe = &mdata->rgb_pipes[k]; cnt = be32_to_cpu(arr[i++]); if (cnt == 0) continue; for (j = 0; j < cnt; j++) { mmb = be32_to_cpu(arr[i++]); if (mmb > mdata->smp_mb_cnt) { pr_err("overflow mmb%d: rgb%d: max%d\n", mmb, k, mdata->smp_mb_cnt); return -EINVAL; } /* rgb pipes fetches only single plane */ set_bit(mmb, pipe->smp_map[0].fixed); } if (bitmap_intersects(pipe->smp_map[0].fixed, mdata->mmb_alloc_map, mdata->smp_mb_cnt)) { pr_err("overlapping fixed mmb map\n"); return -EINVAL; } bitmap_or(mdata->mmb_alloc_map, pipe->smp_map[0].fixed, mdata->mmb_alloc_map, mdata->smp_mb_cnt); } } return rc; } static void mdss_mdp_parse_dt_fudge_factors(struct platform_device *pdev, char *prop_name, struct mdss_fudge_factor *ff) { int rc; u32 data[2] = {1, 1}; rc = mdss_mdp_parse_dt_handler(pdev, prop_name, data, 2); if (rc) { pr_err("err reading %s\n", prop_name); } else { ff->numer = data[0]; ff->denom = data[1]; } } static int mdss_mdp_parse_dt_prefill(struct platform_device *pdev) { struct mdss_data_type *mdata = platform_get_drvdata(pdev); struct mdss_prefill_data *prefill = &mdata->prefill_data; int rc; rc = of_property_read_u32(pdev->dev.of_node, "qcom,mdss-prefill-outstanding-buffer-bytes", &prefill->ot_bytes); if (rc) { pr_err("prefill outstanding buffer bytes not specified\n"); return rc; } rc = of_property_read_u32(pdev->dev.of_node, "qcom,mdss-prefill-y-buffer-bytes", &prefill->y_buf_bytes); if (rc) { pr_err("prefill y buffer bytes not specified\n"); return rc; } rc = of_property_read_u32(pdev->dev.of_node, "qcom,mdss-prefill-scaler-buffer-lines-bilinear", &prefill->y_scaler_lines_bilinear); if (rc) { pr_err("prefill scaler lines for bilinear not specified\n"); return rc; } rc = of_property_read_u32(pdev->dev.of_node, "qcom,mdss-prefill-scaler-buffer-lines-caf", &prefill->y_scaler_lines_caf); if (rc) { pr_debug("prefill scaler lines for caf not specified\n"); return rc; } rc = of_property_read_u32(pdev->dev.of_node, "qcom,mdss-prefill-post-scaler-buffer-pixels", &prefill->post_scaler_pixels); if (rc) { pr_err("prefill post scaler buffer pixels not specified\n"); return rc; } rc = of_property_read_u32(pdev->dev.of_node, "qcom,mdss-prefill-pingpong-buffer-pixels", &prefill->pp_pixels); if (rc) { pr_err("prefill pingpong buffer lines not specified\n"); return rc; } rc = of_property_read_u32(pdev->dev.of_node, "qcom,mdss-prefill-fbc-lines", &prefill->fbc_lines); if (rc) { pr_err("prefill FBC lines not specified\n"); return rc; } return 0; } static int mdss_mdp_parse_dt_misc(struct platform_device *pdev) { struct mdss_data_type *mdata = platform_get_drvdata(pdev); u32 data; int rc; struct property *prop = NULL; rc = of_property_read_u32(pdev->dev.of_node, "qcom,mdss-rot-block-size", &data); mdata->rot_block_size = (!rc ? data : 128); rc = of_property_read_u32(pdev->dev.of_node, "qcom,mdss-rotator-ot-limit", &data); mdata->rotator_ot_limit = (!rc ? data : 0); mdata->has_bwc = of_property_read_bool(pdev->dev.of_node, "qcom,mdss-has-bwc"); mdata->has_decimation = of_property_read_bool(pdev->dev.of_node, "qcom,mdss-has-decimation"); mdata->has_wfd_blk = of_property_read_bool(pdev->dev.of_node, "qcom,mdss-has-wfd-blk"); mdata->has_no_lut_read = of_property_read_bool(pdev->dev.of_node, "qcom,mdss-no-lut-read"); mdata->idle_pc_enabled = of_property_read_bool(pdev->dev.of_node, "qcom,mdss-idle-power-collapse-enabled"); prop = of_find_property(pdev->dev.of_node, "batfet-supply", NULL); mdata->batfet_required = prop ? true : false; rc = of_property_read_u32(pdev->dev.of_node, "qcom,mdss-highest-bank-bit", &(mdata->highest_bank_bit)); if (rc) pr_debug("Could not read optional property: highest bank bit\n"); /* * 2x factor on AB because bus driver will divide by 2 * due to 2x ports to BIMC */ mdata->ab_factor.numer = 2; mdata->ab_factor.denom = 1; mdss_mdp_parse_dt_fudge_factors(pdev, "qcom,mdss-ab-factor", &mdata->ab_factor); /* * 1.2 factor on ib as default value. This value is * experimentally determined and should be tuned in device * tree. */ mdata->ib_factor.numer = 6; mdata->ib_factor.denom = 5; mdss_mdp_parse_dt_fudge_factors(pdev, "qcom,mdss-ib-factor", &mdata->ib_factor); /* * Set overlap ib value equal to ib by default. This value can * be tuned in device tree to be different from ib. * This factor apply when the max bandwidth per pipe * is the overlap BW. */ mdata->ib_factor_overlap.numer = mdata->ib_factor.numer; mdata->ib_factor_overlap.denom = mdata->ib_factor.denom; mdss_mdp_parse_dt_fudge_factors(pdev, "qcom,mdss-ib-factor-overlap", &mdata->ib_factor_overlap); mdata->clk_factor.numer = 1; mdata->clk_factor.denom = 1; mdss_mdp_parse_dt_fudge_factors(pdev, "qcom,mdss-clk-factor", &mdata->clk_factor); rc = of_property_read_u32(pdev->dev.of_node, "qcom,max-bandwidth-low-kbps", &mdata->max_bw_low); if (rc) pr_debug("max bandwidth (low) property not specified\n"); rc = of_property_read_u32(pdev->dev.of_node, "qcom,max-bandwidth-high-kbps", &mdata->max_bw_high); if (rc) pr_debug("max bandwidth (high) property not specified\n"); mdata->nclk_lvl = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-clk-levels"); if (mdata->nclk_lvl) { mdata->clock_levels = kzalloc(sizeof(u32) * mdata->nclk_lvl, GFP_KERNEL); if (!mdata->clock_levels) { pr_err("no mem assigned for mdata clock_levels\n"); return -ENOMEM; } rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-clk-levels", mdata->clock_levels, mdata->nclk_lvl); if (rc) pr_debug("clock levels not found\n"); } return 0; } static int mdss_mdp_parse_dt_ad_cfg(struct platform_device *pdev) { struct mdss_data_type *mdata = platform_get_drvdata(pdev); u32 *ad_offsets = NULL; int rc; mdata->nad_cfgs = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-ad-off"); if (mdata->nad_cfgs == 0) { pr_info("SS is not using assertive display\n"); mdata->ad_cfgs = NULL; return 0; } if (mdata->nad_cfgs > mdata->nmixers_intf) return -EINVAL; mdata->has_wb_ad = of_property_read_bool(pdev->dev.of_node, "qcom,mdss-has-wb-ad"); ad_offsets = kzalloc(sizeof(u32) * mdata->nad_cfgs, GFP_KERNEL); if (!ad_offsets) { pr_err("no mem assigned: kzalloc fail\n"); return -ENOMEM; } rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-ad-off", ad_offsets, mdata->nad_cfgs); if (rc) goto parse_done; rc = mdss_mdp_ad_addr_setup(mdata, ad_offsets); if (rc) pr_err("unable to setup assertive display\n"); parse_done: kfree(ad_offsets); return rc; } static int mdss_mdp_parse_dt_bus_scale(struct platform_device *pdev) { int rc; struct mdss_data_type *mdata = platform_get_drvdata(pdev); rc = of_property_read_u32(pdev->dev.of_node, "qcom,msm-bus,num-paths", &mdata->axi_port_cnt); if (rc) { pr_err("Error. qcom,msm-bus,num-paths prop not found.rc=%d\n", rc); return rc; } mdata->bus_scale_table = msm_bus_cl_get_pdata(pdev); if (IS_ERR_OR_NULL(mdata->bus_scale_table)) { rc = PTR_ERR(mdata->bus_scale_table); if (!rc) rc = -EINVAL; pr_err("msm_bus_cl_get_pdata failed. rc=%d\n", rc); mdata->bus_scale_table = NULL; } return rc; } static int mdss_mdp_parse_dt_handler(struct platform_device *pdev, char *prop_name, u32 *offsets, int len) { int rc; rc = of_property_read_u32_array(pdev->dev.of_node, prop_name, offsets, len); if (rc) { pr_err("Error from prop %s : u32 array read\n", prop_name); return -EINVAL; } return 0; } static int mdss_mdp_parse_dt_prop_len(struct platform_device *pdev, char *prop_name) { int len = 0; of_find_property(pdev->dev.of_node, prop_name, &len); if (len < 1) { pr_err("Error from prop %s : spec error in device tree\n", prop_name); return 0; } len = len/sizeof(u32); return len; } struct mdss_data_type *mdss_mdp_get_mdata(void) { return mdss_res; } /** * mdss_is_ready() - checks if mdss is probed and ready * * Checks if mdss resources have been initialized * * returns true if mdss is ready, else returns false */ bool mdss_is_ready(void) { return mdss_mdp_get_mdata() ? true : false; } EXPORT_SYMBOL(mdss_mdp_get_mdata); /** * mdss_panel_intf_type() - checks if a given intf type is primary * @intf_val: panel interface type of the individual controller * * Individual controller queries with MDP to check if it is * configured as the primary interface. * * returns a pointer to the configured structure mdss_panel_cfg * to the controller that's configured as the primary panel interface. * returns NULL on error or if @intf_val is not the configured * controller. */ struct mdss_panel_cfg *mdss_panel_intf_type(int intf_val) { if (!mdss_res || !mdss_res->pan_cfg.init_done) return ERR_PTR(-EPROBE_DEFER); if (mdss_res->pan_cfg.pan_intf == intf_val) return &mdss_res->pan_cfg; else return NULL; } EXPORT_SYMBOL(mdss_panel_intf_type); int mdss_panel_get_boot_cfg(void) { int rc; if (!mdss_res || !mdss_res->pan_cfg.init_done) return -EPROBE_DEFER; if (mdss_res->pan_cfg.lk_cfg) rc = 1; else rc = 0; return rc; } static int mdss_mdp_cx_ctrl(struct mdss_data_type *mdata, int enable) { int rc = 0; if (!mdata->vdd_cx) return rc; if (enable) { rc = regulator_set_voltage( mdata->vdd_cx, RPM_REGULATOR_CORNER_SVS_SOC, RPM_REGULATOR_CORNER_SUPER_TURBO); if (rc < 0) goto vreg_set_voltage_fail; pr_debug("Enabling CX power rail\n"); rc = regulator_enable(mdata->vdd_cx); if (rc) { pr_err("Failed to enable regulator.\n"); return rc; } } else { pr_debug("Disabling CX power rail\n"); rc = regulator_disable(mdata->vdd_cx); if (rc) { pr_err("Failed to disable regulator.\n"); return rc; } rc = regulator_set_voltage( mdata->vdd_cx, RPM_REGULATOR_CORNER_NONE, RPM_REGULATOR_CORNER_SUPER_TURBO); if (rc < 0) goto vreg_set_voltage_fail; } return rc; vreg_set_voltage_fail: pr_err("Set vltg fail\n"); return rc; } void mdss_mdp_batfet_ctrl(struct mdss_data_type *mdata, int enable) { if (!mdata->batfet_required) return; if (!mdata->batfet) { if (enable) { mdata->batfet = devm_regulator_get(&mdata->pdev->dev, "batfet"); if (IS_ERR_OR_NULL(mdata->batfet)) { pr_debug("unable to get batfet reg. rc=%d\n", PTR_RET(mdata->batfet)); mdata->batfet = NULL; return; } } else { pr_debug("Batfet regulator disable w/o enable\n"); return; } } if (enable) regulator_enable(mdata->batfet); else regulator_disable(mdata->batfet); } static void mdss_mdp_footswitch_ctrl(struct mdss_data_type *mdata, int on) { if (!mdata->fs) return; if (on) { pr_debug("Enable MDP FS\n"); if (!mdata->fs_ena) { regulator_enable(mdata->fs); if (!mdata->idle_pc) { mdss_mdp_cx_ctrl(mdata, true); mdss_mdp_batfet_ctrl(mdata, true); } } mdata->fs_ena = true; } else { pr_debug("Disable MDP FS\n"); if (mdata->fs_ena) { regulator_disable(mdata->fs); if (!mdata->idle_pc) { mdss_mdp_cx_ctrl(mdata, false); mdss_mdp_batfet_ctrl(mdata, false); } } mdata->fs_ena = false; } } /** * mdss_mdp_footswitch_ctrl_idle_pc() - MDSS GDSC control with idle power collapse * @on: 1 to turn on footswitch, 0 to turn off footswitch * @dev: framebuffer device node * * MDSS GDSC can be voted off during idle-screen usecase for MIPI DSI command * mode displays. Upon subsequent frame update, MDSS GDSC needs to turned back * on and hw state needs to be restored. It returns error if footswitch control * API fails. */ int mdss_mdp_footswitch_ctrl_idle_pc(int on, struct device *dev) { struct mdss_data_type *mdata = mdss_mdp_get_mdata(); int rc = 0; pr_debug("called on=%d\n", on); if (on) { pm_runtime_get_sync(dev); rc = mdss_iommu_ctrl(1); if (IS_ERR_VALUE(rc)) { pr_err("mdss iommu attach failed rc=%d\n", rc); return rc; } mdss_hw_init(mdata); mdata->idle_pc = false; mdss_iommu_ctrl(0); } else { mdata->idle_pc = true; pm_runtime_put_sync(dev); } return 0; } static inline int mdss_mdp_suspend_sub(struct mdss_data_type *mdata) { mdata->suspend_fs_ena = mdata->fs_ena; mdss_mdp_footswitch_ctrl(mdata, false); pr_debug("suspend done fs=%d\n", mdata->suspend_fs_ena); return 0; } static inline int mdss_mdp_resume_sub(struct mdss_data_type *mdata) { if (mdata->suspend_fs_ena) mdss_mdp_footswitch_ctrl(mdata, true); pr_debug("resume done fs=%d\n", mdata->suspend_fs_ena); return 0; } #ifdef CONFIG_PM_SLEEP static int mdss_mdp_pm_suspend(struct device *dev) { struct mdss_data_type *mdata; mdata = dev_get_drvdata(dev); if (!mdata) return -ENODEV; dev_dbg(dev, "display pm suspend\n"); return mdss_mdp_suspend_sub(mdata); } static int mdss_mdp_pm_resume(struct device *dev) { struct mdss_data_type *mdata; mdata = dev_get_drvdata(dev); if (!mdata) return -ENODEV; dev_dbg(dev, "display pm resume\n"); return mdss_mdp_resume_sub(mdata); } #endif #if defined(CONFIG_PM) && !defined(CONFIG_PM_SLEEP) static int mdss_mdp_suspend(struct platform_device *pdev, pm_message_t state) { struct mdss_data_type *mdata = platform_get_drvdata(pdev); if (!mdata) return -ENODEV; dev_dbg(&pdev->dev, "display suspend\n"); return mdss_mdp_suspend_sub(mdata); } static int mdss_mdp_resume(struct platform_device *pdev) { struct mdss_data_type *mdata = platform_get_drvdata(pdev); if (!mdata) return -ENODEV; dev_dbg(&pdev->dev, "display resume\n"); return mdss_mdp_resume_sub(mdata); } #else #define mdss_mdp_suspend NULL #define mdss_mdp_resume NULL #endif #ifdef CONFIG_PM_RUNTIME static int mdss_mdp_runtime_resume(struct device *dev) { struct mdss_data_type *mdata = dev_get_drvdata(dev); bool device_on = true; if (!mdata) return -ENODEV; dev_dbg(dev, "pm_runtime: resuming...\n"); /* do not resume panels when coming out of idle power collapse */ if (!mdata->idle_pc) device_for_each_child(dev, &device_on, mdss_fb_suspres_panel); mdss_mdp_footswitch_ctrl(mdata, true); return 0; } static int mdss_mdp_runtime_idle(struct device *dev) { struct mdss_data_type *mdata = dev_get_drvdata(dev); if (!mdata) return -ENODEV; dev_dbg(dev, "pm_runtime: idling...\n"); return 0; } static int mdss_mdp_runtime_suspend(struct device *dev) { struct mdss_data_type *mdata = dev_get_drvdata(dev); bool device_on = false; if (!mdata) return -ENODEV; dev_dbg(dev, "pm_runtime: suspending...\n"); if (mdata->clk_ena) { pr_err("MDP suspend failed\n"); return -EBUSY; } /* do not suspend panels when going in to idle power collapse */ if (!mdata->idle_pc) device_for_each_child(dev, &device_on, mdss_fb_suspres_panel); mdss_mdp_footswitch_ctrl(mdata, false); return 0; } #endif static const struct dev_pm_ops mdss_mdp_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(mdss_mdp_pm_suspend, mdss_mdp_pm_resume) SET_RUNTIME_PM_OPS(mdss_mdp_runtime_suspend, mdss_mdp_runtime_resume, mdss_mdp_runtime_idle) }; static int mdss_mdp_remove(struct platform_device *pdev) { struct mdss_data_type *mdata = platform_get_drvdata(pdev); if (!mdata) return -ENODEV; pm_runtime_disable(&pdev->dev); mdss_mdp_pp_term(&pdev->dev); mdss_mdp_bus_scale_unregister(mdata); mdss_debugfs_remove(mdata); return 0; } static const struct of_device_id mdss_mdp_dt_match[] = { { .compatible = "qcom,mdss_mdp",}, {} }; MODULE_DEVICE_TABLE(of, mdss_mdp_dt_match); static struct platform_driver mdss_mdp_driver = { .probe = mdss_mdp_probe, .remove = mdss_mdp_remove, .suspend = mdss_mdp_suspend, .resume = mdss_mdp_resume, .shutdown = NULL, .driver = { /* * Driver name must match the device name added in * platform.c. */ .name = "mdp", .of_match_table = mdss_mdp_dt_match, .pm = &mdss_mdp_pm_ops, }, }; static int mdss_mdp_register_driver(void) { return platform_driver_register(&mdss_mdp_driver); } static int __init mdss_mdp_driver_init(void) { int ret; ret = mdss_mdp_register_driver(); if (ret) { pr_err("mdp_register_driver() failed!\n"); return ret; } return 0; } void mdss_mdp_underrun_clk_info(void) { pr_info(" mdp_clk = %ld, bus_ab = %llu, bus_ib = %llu\n", clk_rate_dbg, bus_ab_quota_dbg, bus_ib_quota_dbg); } module_init(mdss_mdp_driver_init);
gpl-2.0
Pivosgroup/buildroot-linux-kernel
fs/nfs/client.c
31
45685
/* client.c: NFS client sharing and management code * * Copyright (C) 2006 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/time.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/string.h> #include <linux/stat.h> #include <linux/errno.h> #include <linux/unistd.h> #include <linux/sunrpc/clnt.h> #include <linux/sunrpc/stats.h> #include <linux/sunrpc/metrics.h> #include <linux/sunrpc/xprtsock.h> #include <linux/sunrpc/xprtrdma.h> #include <linux/nfs_fs.h> #include <linux/nfs_mount.h> #include <linux/nfs4_mount.h> #include <linux/lockd/bind.h> #include <linux/seq_file.h> #include <linux/mount.h> #include <linux/nfs_idmap.h> #include <linux/vfs.h> #include <linux/inet.h> #include <linux/in6.h> #include <linux/slab.h> #include <net/ipv6.h> #include <linux/nfs_xdr.h> #include <linux/sunrpc/bc_xprt.h> #include <asm/system.h> #include "nfs4_fs.h" #include "callback.h" #include "delegation.h" #include "iostat.h" #include "internal.h" #include "fscache.h" #define NFSDBG_FACILITY NFSDBG_CLIENT static DEFINE_SPINLOCK(nfs_client_lock); static LIST_HEAD(nfs_client_list); static LIST_HEAD(nfs_volume_list); static DECLARE_WAIT_QUEUE_HEAD(nfs_client_active_wq); /* * RPC cruft for NFS */ static struct rpc_version *nfs_version[5] = { [2] = &nfs_version2, #ifdef CONFIG_NFS_V3 [3] = &nfs_version3, #endif #ifdef CONFIG_NFS_V4 [4] = &nfs_version4, #endif }; struct rpc_program nfs_program = { .name = "nfs", .number = NFS_PROGRAM, .nrvers = ARRAY_SIZE(nfs_version), .version = nfs_version, .stats = &nfs_rpcstat, .pipe_dir_name = "/nfs", }; struct rpc_stat nfs_rpcstat = { .program = &nfs_program }; #ifdef CONFIG_NFS_V3_ACL static struct rpc_stat nfsacl_rpcstat = { &nfsacl_program }; static struct rpc_version * nfsacl_version[] = { [3] = &nfsacl_version3, }; struct rpc_program nfsacl_program = { .name = "nfsacl", .number = NFS_ACL_PROGRAM, .nrvers = ARRAY_SIZE(nfsacl_version), .version = nfsacl_version, .stats = &nfsacl_rpcstat, }; #endif /* CONFIG_NFS_V3_ACL */ struct nfs_client_initdata { const char *hostname; const struct sockaddr *addr; size_t addrlen; const struct nfs_rpc_ops *rpc_ops; int proto; u32 minorversion; }; /* * Allocate a shared client record * * Since these are allocated/deallocated very rarely, we don't * bother putting them in a slab cache... */ static struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_init) { struct nfs_client *clp; struct rpc_cred *cred; int err = -ENOMEM; if ((clp = kzalloc(sizeof(*clp), GFP_KERNEL)) == NULL) goto error_0; clp->rpc_ops = cl_init->rpc_ops; atomic_set(&clp->cl_count, 1); clp->cl_cons_state = NFS_CS_INITING; memcpy(&clp->cl_addr, cl_init->addr, cl_init->addrlen); clp->cl_addrlen = cl_init->addrlen; if (cl_init->hostname) { err = -ENOMEM; clp->cl_hostname = kstrdup(cl_init->hostname, GFP_KERNEL); if (!clp->cl_hostname) goto error_cleanup; } INIT_LIST_HEAD(&clp->cl_superblocks); clp->cl_rpcclient = ERR_PTR(-EINVAL); clp->cl_proto = cl_init->proto; #ifdef CONFIG_NFS_V4 INIT_LIST_HEAD(&clp->cl_delegations); spin_lock_init(&clp->cl_lock); INIT_DELAYED_WORK(&clp->cl_renewd, nfs4_renew_state); rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client"); clp->cl_boot_time = CURRENT_TIME; clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED; clp->cl_minorversion = cl_init->minorversion; #endif cred = rpc_lookup_machine_cred(); if (!IS_ERR(cred)) clp->cl_machine_cred = cred; nfs_fscache_get_client_cookie(clp); return clp; error_cleanup: kfree(clp); error_0: return ERR_PTR(err); } #ifdef CONFIG_NFS_V4 /* * Clears/puts all minor version specific parts from an nfs_client struct * reverting it to minorversion 0. */ static void nfs4_clear_client_minor_version(struct nfs_client *clp) { #ifdef CONFIG_NFS_V4_1 if (nfs4_has_session(clp)) { nfs4_destroy_session(clp->cl_session); clp->cl_session = NULL; } clp->cl_call_sync = _nfs4_call_sync; #endif /* CONFIG_NFS_V4_1 */ } /* * Destroy the NFS4 callback service */ static void nfs4_destroy_callback(struct nfs_client *clp) { if (__test_and_clear_bit(NFS_CS_CALLBACK, &clp->cl_res_state)) nfs_callback_down(clp->cl_minorversion); } static void nfs4_shutdown_client(struct nfs_client *clp) { if (__test_and_clear_bit(NFS_CS_RENEWD, &clp->cl_res_state)) nfs4_kill_renewd(clp); nfs4_clear_client_minor_version(clp); nfs4_destroy_callback(clp); if (__test_and_clear_bit(NFS_CS_IDMAP, &clp->cl_res_state)) nfs_idmap_delete(clp); rpc_destroy_wait_queue(&clp->cl_rpcwaitq); } #else static void nfs4_shutdown_client(struct nfs_client *clp) { } #endif /* CONFIG_NFS_V4 */ /* * Destroy a shared client record */ static void nfs_free_client(struct nfs_client *clp) { dprintk("--> nfs_free_client(%u)\n", clp->rpc_ops->version); nfs4_shutdown_client(clp); nfs_fscache_release_client_cookie(clp); /* -EIO all pending I/O */ if (!IS_ERR(clp->cl_rpcclient)) rpc_shutdown_client(clp->cl_rpcclient); if (clp->cl_machine_cred != NULL) put_rpccred(clp->cl_machine_cred); kfree(clp->cl_hostname); kfree(clp); dprintk("<-- nfs_free_client()\n"); } /* * Release a reference to a shared client record */ void nfs_put_client(struct nfs_client *clp) { if (!clp) return; dprintk("--> nfs_put_client({%d})\n", atomic_read(&clp->cl_count)); if (atomic_dec_and_lock(&clp->cl_count, &nfs_client_lock)) { list_del(&clp->cl_share_link); spin_unlock(&nfs_client_lock); BUG_ON(!list_empty(&clp->cl_superblocks)); nfs_free_client(clp); } } #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) /* * Test if two ip6 socket addresses refer to the same socket by * comparing relevant fields. The padding bytes specifically, are not * compared. sin6_flowinfo is not compared because it only affects QoS * and sin6_scope_id is only compared if the address is "link local" * because "link local" addresses need only be unique to a specific * link. Conversely, ordinary unicast addresses might have different * sin6_scope_id. * * The caller should ensure both socket addresses are AF_INET6. */ static int nfs_sockaddr_match_ipaddr6(const struct sockaddr *sa1, const struct sockaddr *sa2) { const struct sockaddr_in6 *sin1 = (const struct sockaddr_in6 *)sa1; const struct sockaddr_in6 *sin2 = (const struct sockaddr_in6 *)sa2; if (ipv6_addr_scope(&sin1->sin6_addr) == IPV6_ADDR_SCOPE_LINKLOCAL && sin1->sin6_scope_id != sin2->sin6_scope_id) return 0; return ipv6_addr_equal(&sin1->sin6_addr, &sin1->sin6_addr); } #else /* !defined(CONFIG_IPV6) && !defined(CONFIG_IPV6_MODULE) */ static int nfs_sockaddr_match_ipaddr6(const struct sockaddr *sa1, const struct sockaddr *sa2) { return 0; } #endif /* * Test if two ip4 socket addresses refer to the same socket, by * comparing relevant fields. The padding bytes specifically, are * not compared. * * The caller should ensure both socket addresses are AF_INET. */ static int nfs_sockaddr_match_ipaddr4(const struct sockaddr *sa1, const struct sockaddr *sa2) { const struct sockaddr_in *sin1 = (const struct sockaddr_in *)sa1; const struct sockaddr_in *sin2 = (const struct sockaddr_in *)sa2; return sin1->sin_addr.s_addr == sin2->sin_addr.s_addr; } static int nfs_sockaddr_cmp_ip6(const struct sockaddr *sa1, const struct sockaddr *sa2) { const struct sockaddr_in6 *sin1 = (const struct sockaddr_in6 *)sa1; const struct sockaddr_in6 *sin2 = (const struct sockaddr_in6 *)sa2; return nfs_sockaddr_match_ipaddr6(sa1, sa2) && (sin1->sin6_port == sin2->sin6_port); } static int nfs_sockaddr_cmp_ip4(const struct sockaddr *sa1, const struct sockaddr *sa2) { const struct sockaddr_in *sin1 = (const struct sockaddr_in *)sa1; const struct sockaddr_in *sin2 = (const struct sockaddr_in *)sa2; return nfs_sockaddr_match_ipaddr4(sa1, sa2) && (sin1->sin_port == sin2->sin_port); } /* * Test if two socket addresses represent the same actual socket, * by comparing (only) relevant fields, excluding the port number. */ static int nfs_sockaddr_match_ipaddr(const struct sockaddr *sa1, const struct sockaddr *sa2) { if (sa1->sa_family != sa2->sa_family) return 0; switch (sa1->sa_family) { case AF_INET: return nfs_sockaddr_match_ipaddr4(sa1, sa2); case AF_INET6: return nfs_sockaddr_match_ipaddr6(sa1, sa2); } return 0; } /* * Test if two socket addresses represent the same actual socket, * by comparing (only) relevant fields, including the port number. */ static int nfs_sockaddr_cmp(const struct sockaddr *sa1, const struct sockaddr *sa2) { if (sa1->sa_family != sa2->sa_family) return 0; switch (sa1->sa_family) { case AF_INET: return nfs_sockaddr_cmp_ip4(sa1, sa2); case AF_INET6: return nfs_sockaddr_cmp_ip6(sa1, sa2); } return 0; } /* * Find a client by IP address and protocol version * - returns NULL if no such client */ struct nfs_client *nfs_find_client(const struct sockaddr *addr, u32 nfsversion) { struct nfs_client *clp; spin_lock(&nfs_client_lock); list_for_each_entry(clp, &nfs_client_list, cl_share_link) { struct sockaddr *clap = (struct sockaddr *)&clp->cl_addr; /* Don't match clients that failed to initialise properly */ if (!(clp->cl_cons_state == NFS_CS_READY || clp->cl_cons_state == NFS_CS_SESSION_INITING)) continue; /* Different NFS versions cannot share the same nfs_client */ if (clp->rpc_ops->version != nfsversion) continue; /* Match only the IP address, not the port number */ if (!nfs_sockaddr_match_ipaddr(addr, clap)) continue; atomic_inc(&clp->cl_count); spin_unlock(&nfs_client_lock); return clp; } spin_unlock(&nfs_client_lock); return NULL; } /* * Find a client by IP address and protocol version * - returns NULL if no such client */ struct nfs_client *nfs_find_client_next(struct nfs_client *clp) { struct sockaddr *sap = (struct sockaddr *)&clp->cl_addr; u32 nfsvers = clp->rpc_ops->version; spin_lock(&nfs_client_lock); list_for_each_entry_continue(clp, &nfs_client_list, cl_share_link) { struct sockaddr *clap = (struct sockaddr *)&clp->cl_addr; /* Don't match clients that failed to initialise properly */ if (clp->cl_cons_state != NFS_CS_READY) continue; /* Different NFS versions cannot share the same nfs_client */ if (clp->rpc_ops->version != nfsvers) continue; /* Match only the IP address, not the port number */ if (!nfs_sockaddr_match_ipaddr(sap, clap)) continue; atomic_inc(&clp->cl_count); spin_unlock(&nfs_client_lock); return clp; } spin_unlock(&nfs_client_lock); return NULL; } /* * Find an nfs_client on the list that matches the initialisation data * that is supplied. */ static struct nfs_client *nfs_match_client(const struct nfs_client_initdata *data) { struct nfs_client *clp; const struct sockaddr *sap = data->addr; list_for_each_entry(clp, &nfs_client_list, cl_share_link) { const struct sockaddr *clap = (struct sockaddr *)&clp->cl_addr; /* Don't match clients that failed to initialise properly */ if (clp->cl_cons_state < 0) continue; /* Different NFS versions cannot share the same nfs_client */ if (clp->rpc_ops != data->rpc_ops) continue; if (clp->cl_proto != data->proto) continue; /* Match nfsv4 minorversion */ if (clp->cl_minorversion != data->minorversion) continue; /* Match the full socket address */ if (!nfs_sockaddr_cmp(sap, clap)) continue; atomic_inc(&clp->cl_count); return clp; } return NULL; } /* * Look up a client by IP address and protocol version * - creates a new record if one doesn't yet exist */ static struct nfs_client *nfs_get_client(const struct nfs_client_initdata *cl_init) { struct nfs_client *clp, *new = NULL; int error; dprintk("--> nfs_get_client(%s,v%u)\n", cl_init->hostname ?: "", cl_init->rpc_ops->version); /* see if the client already exists */ do { spin_lock(&nfs_client_lock); clp = nfs_match_client(cl_init); if (clp) goto found_client; if (new) goto install_client; spin_unlock(&nfs_client_lock); new = nfs_alloc_client(cl_init); } while (!IS_ERR(new)); dprintk("--> nfs_get_client() = %ld [failed]\n", PTR_ERR(new)); return new; /* install a new client and return with it unready */ install_client: clp = new; list_add(&clp->cl_share_link, &nfs_client_list); spin_unlock(&nfs_client_lock); dprintk("--> nfs_get_client() = %p [new]\n", clp); return clp; /* found an existing client * - make sure it's ready before returning */ found_client: spin_unlock(&nfs_client_lock); if (new) nfs_free_client(new); error = wait_event_killable(nfs_client_active_wq, clp->cl_cons_state < NFS_CS_INITING); if (error < 0) { nfs_put_client(clp); return ERR_PTR(-ERESTARTSYS); } if (clp->cl_cons_state < NFS_CS_READY) { error = clp->cl_cons_state; nfs_put_client(clp); return ERR_PTR(error); } BUG_ON(clp->cl_cons_state != NFS_CS_READY); dprintk("--> nfs_get_client() = %p [share]\n", clp); return clp; } /* * Mark a server as ready or failed */ void nfs_mark_client_ready(struct nfs_client *clp, int state) { clp->cl_cons_state = state; wake_up_all(&nfs_client_active_wq); } /* * With sessions, the client is not marked ready until after a * successful EXCHANGE_ID and CREATE_SESSION. * * Map errors cl_cons_state errors to EPROTONOSUPPORT to indicate * other versions of NFS can be tried. */ int nfs4_check_client_ready(struct nfs_client *clp) { if (!nfs4_has_session(clp)) return 0; if (clp->cl_cons_state < NFS_CS_READY) return -EPROTONOSUPPORT; return 0; } /* * Initialise the timeout values for a connection */ static void nfs_init_timeout_values(struct rpc_timeout *to, int proto, unsigned int timeo, unsigned int retrans) { to->to_initval = timeo * HZ / 10; to->to_retries = retrans; switch (proto) { case XPRT_TRANSPORT_TCP: case XPRT_TRANSPORT_RDMA: if (to->to_retries == 0) to->to_retries = NFS_DEF_TCP_RETRANS; if (to->to_initval == 0) to->to_initval = NFS_DEF_TCP_TIMEO * HZ / 10; if (to->to_initval > NFS_MAX_TCP_TIMEOUT) to->to_initval = NFS_MAX_TCP_TIMEOUT; to->to_increment = to->to_initval; to->to_maxval = to->to_initval + (to->to_increment * to->to_retries); if (to->to_maxval > NFS_MAX_TCP_TIMEOUT) to->to_maxval = NFS_MAX_TCP_TIMEOUT; if (to->to_maxval < to->to_initval) to->to_maxval = to->to_initval; to->to_exponential = 0; break; case XPRT_TRANSPORT_UDP: if (to->to_retries == 0) to->to_retries = NFS_DEF_UDP_RETRANS; if (!to->to_initval) to->to_initval = NFS_DEF_UDP_TIMEO * HZ / 10; if (to->to_initval > NFS_MAX_UDP_TIMEOUT) to->to_initval = NFS_MAX_UDP_TIMEOUT; to->to_maxval = NFS_MAX_UDP_TIMEOUT; to->to_exponential = 1; break; default: BUG(); } } /* * Create an RPC client handle */ static int nfs_create_rpc_client(struct nfs_client *clp, const struct rpc_timeout *timeparms, rpc_authflavor_t flavor, int discrtry, int noresvport) { struct rpc_clnt *clnt = NULL; struct rpc_create_args args = { .protocol = clp->cl_proto, .address = (struct sockaddr *)&clp->cl_addr, .addrsize = clp->cl_addrlen, .timeout = timeparms, .servername = clp->cl_hostname, .program = &nfs_program, .version = clp->rpc_ops->version, .authflavor = flavor, }; if (discrtry) args.flags |= RPC_CLNT_CREATE_DISCRTRY; if (noresvport) args.flags |= RPC_CLNT_CREATE_NONPRIVPORT; if (!IS_ERR(clp->cl_rpcclient)) return 0; clnt = rpc_create(&args); if (IS_ERR(clnt)) { dprintk("%s: cannot create RPC client. Error = %ld\n", __func__, PTR_ERR(clnt)); return PTR_ERR(clnt); } clp->cl_rpcclient = clnt; return 0; } /* * Version 2 or 3 client destruction */ static void nfs_destroy_server(struct nfs_server *server) { if (!(server->flags & NFS_MOUNT_NONLM)) nlmclnt_done(server->nlm_host); } /* * Version 2 or 3 lockd setup */ static int nfs_start_lockd(struct nfs_server *server) { struct nlm_host *host; struct nfs_client *clp = server->nfs_client; struct nlmclnt_initdata nlm_init = { .hostname = clp->cl_hostname, .address = (struct sockaddr *)&clp->cl_addr, .addrlen = clp->cl_addrlen, .nfs_version = clp->rpc_ops->version, .noresvport = server->flags & NFS_MOUNT_NORESVPORT ? 1 : 0, }; if (nlm_init.nfs_version > 3) return 0; if (server->flags & NFS_MOUNT_NONLM) return 0; switch (clp->cl_proto) { default: nlm_init.protocol = IPPROTO_TCP; break; case XPRT_TRANSPORT_UDP: nlm_init.protocol = IPPROTO_UDP; } host = nlmclnt_init(&nlm_init); if (IS_ERR(host)) return PTR_ERR(host); server->nlm_host = host; server->destroy = nfs_destroy_server; return 0; } /* * Initialise an NFSv3 ACL client connection */ #ifdef CONFIG_NFS_V3_ACL static void nfs_init_server_aclclient(struct nfs_server *server) { if (server->nfs_client->rpc_ops->version != 3) goto out_noacl; if (server->flags & NFS_MOUNT_NOACL) goto out_noacl; server->client_acl = rpc_bind_new_program(server->client, &nfsacl_program, 3); if (IS_ERR(server->client_acl)) goto out_noacl; /* No errors! Assume that Sun nfsacls are supported */ server->caps |= NFS_CAP_ACLS; return; out_noacl: server->caps &= ~NFS_CAP_ACLS; } #else static inline void nfs_init_server_aclclient(struct nfs_server *server) { server->flags &= ~NFS_MOUNT_NOACL; server->caps &= ~NFS_CAP_ACLS; } #endif /* * Create a general RPC client */ static int nfs_init_server_rpcclient(struct nfs_server *server, const struct rpc_timeout *timeo, rpc_authflavor_t pseudoflavour) { struct nfs_client *clp = server->nfs_client; server->client = rpc_clone_client(clp->cl_rpcclient); if (IS_ERR(server->client)) { dprintk("%s: couldn't create rpc_client!\n", __func__); return PTR_ERR(server->client); } memcpy(&server->client->cl_timeout_default, timeo, sizeof(server->client->cl_timeout_default)); server->client->cl_timeout = &server->client->cl_timeout_default; if (pseudoflavour != clp->cl_rpcclient->cl_auth->au_flavor) { struct rpc_auth *auth; auth = rpcauth_create(pseudoflavour, server->client); if (IS_ERR(auth)) { dprintk("%s: couldn't create credcache!\n", __func__); return PTR_ERR(auth); } } server->client->cl_softrtry = 0; if (server->flags & NFS_MOUNT_SOFT) server->client->cl_softrtry = 1; return 0; } /* * Initialise an NFS2 or NFS3 client */ static int nfs_init_client(struct nfs_client *clp, const struct rpc_timeout *timeparms, const struct nfs_parsed_mount_data *data) { int error; if (clp->cl_cons_state == NFS_CS_READY) { /* the client is already initialised */ dprintk("<-- nfs_init_client() = 0 [already %p]\n", clp); return 0; } /* * Create a client RPC handle for doing FSSTAT with UNIX auth only * - RFC 2623, sec 2.3.2 */ error = nfs_create_rpc_client(clp, timeparms, RPC_AUTH_UNIX, 0, data->flags & NFS_MOUNT_NORESVPORT); if (error < 0) goto error; nfs_mark_client_ready(clp, NFS_CS_READY); return 0; error: nfs_mark_client_ready(clp, error); dprintk("<-- nfs_init_client() = xerror %d\n", error); return error; } /* * Create a version 2 or 3 client */ static int nfs_init_server(struct nfs_server *server, const struct nfs_parsed_mount_data *data) { struct nfs_client_initdata cl_init = { .hostname = data->nfs_server.hostname, .addr = (const struct sockaddr *)&data->nfs_server.address, .addrlen = data->nfs_server.addrlen, .rpc_ops = &nfs_v2_clientops, .proto = data->nfs_server.protocol, }; struct rpc_timeout timeparms; struct nfs_client *clp; int error; dprintk("--> nfs_init_server()\n"); #ifdef CONFIG_NFS_V3 if (data->version == 3) cl_init.rpc_ops = &nfs_v3_clientops; #endif /* Allocate or find a client reference we can use */ clp = nfs_get_client(&cl_init); if (IS_ERR(clp)) { dprintk("<-- nfs_init_server() = error %ld\n", PTR_ERR(clp)); return PTR_ERR(clp); } nfs_init_timeout_values(&timeparms, data->nfs_server.protocol, data->timeo, data->retrans); error = nfs_init_client(clp, &timeparms, data); if (error < 0) goto error; server->nfs_client = clp; /* Initialise the client representation from the mount data */ server->flags = data->flags; server->options = data->options; server->caps |= NFS_CAP_HARDLINKS|NFS_CAP_SYMLINKS|NFS_CAP_FILEID| NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|NFS_CAP_OWNER_GROUP| NFS_CAP_ATIME|NFS_CAP_CTIME|NFS_CAP_MTIME; if (data->rsize) server->rsize = nfs_block_size(data->rsize, NULL); if (data->wsize) server->wsize = nfs_block_size(data->wsize, NULL); server->acregmin = data->acregmin * HZ; server->acregmax = data->acregmax * HZ; server->acdirmin = data->acdirmin * HZ; server->acdirmax = data->acdirmax * HZ; /* Start lockd here, before we might error out */ error = nfs_start_lockd(server); if (error < 0) goto error; server->port = data->nfs_server.port; error = nfs_init_server_rpcclient(server, &timeparms, data->auth_flavors[0]); if (error < 0) goto error; /* Preserve the values of mount_server-related mount options */ if (data->mount_server.addrlen) { memcpy(&server->mountd_address, &data->mount_server.address, data->mount_server.addrlen); server->mountd_addrlen = data->mount_server.addrlen; } server->mountd_version = data->mount_server.version; server->mountd_port = data->mount_server.port; server->mountd_protocol = data->mount_server.protocol; server->namelen = data->namlen; /* Create a client RPC handle for the NFSv3 ACL management interface */ nfs_init_server_aclclient(server); dprintk("<-- nfs_init_server() = 0 [new %p]\n", clp); return 0; error: server->nfs_client = NULL; nfs_put_client(clp); dprintk("<-- nfs_init_server() = xerror %d\n", error); return error; } /* * Load up the server record from information gained in an fsinfo record */ static void nfs_server_set_fsinfo(struct nfs_server *server, struct nfs_fsinfo *fsinfo) { unsigned long max_rpc_payload; /* Work out a lot of parameters */ if (server->rsize == 0) server->rsize = nfs_block_size(fsinfo->rtpref, NULL); if (server->wsize == 0) server->wsize = nfs_block_size(fsinfo->wtpref, NULL); if (fsinfo->rtmax >= 512 && server->rsize > fsinfo->rtmax) server->rsize = nfs_block_size(fsinfo->rtmax, NULL); if (fsinfo->wtmax >= 512 && server->wsize > fsinfo->wtmax) server->wsize = nfs_block_size(fsinfo->wtmax, NULL); max_rpc_payload = nfs_block_size(rpc_max_payload(server->client), NULL); if (server->rsize > max_rpc_payload) server->rsize = max_rpc_payload; if (server->rsize > NFS_MAX_FILE_IO_SIZE) server->rsize = NFS_MAX_FILE_IO_SIZE; server->rpages = (server->rsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; server->backing_dev_info.name = "nfs"; server->backing_dev_info.ra_pages = server->rpages * NFS_MAX_READAHEAD; if (server->wsize > max_rpc_payload) server->wsize = max_rpc_payload; if (server->wsize > NFS_MAX_FILE_IO_SIZE) server->wsize = NFS_MAX_FILE_IO_SIZE; server->wpages = (server->wsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; server->wtmult = nfs_block_bits(fsinfo->wtmult, NULL); server->dtsize = nfs_block_size(fsinfo->dtpref, NULL); if (server->dtsize > PAGE_CACHE_SIZE) server->dtsize = PAGE_CACHE_SIZE; if (server->dtsize > server->rsize) server->dtsize = server->rsize; if (server->flags & NFS_MOUNT_NOAC) { server->acregmin = server->acregmax = 0; server->acdirmin = server->acdirmax = 0; } server->maxfilesize = fsinfo->maxfilesize; /* We're airborne Set socket buffersize */ rpc_setbufsize(server->client, server->wsize + 100, server->rsize + 100); } /* * Probe filesystem information, including the FSID on v2/v3 */ static int nfs_probe_fsinfo(struct nfs_server *server, struct nfs_fh *mntfh, struct nfs_fattr *fattr) { struct nfs_fsinfo fsinfo; struct nfs_client *clp = server->nfs_client; int error; dprintk("--> nfs_probe_fsinfo()\n"); if (clp->rpc_ops->set_capabilities != NULL) { error = clp->rpc_ops->set_capabilities(server, mntfh); if (error < 0) goto out_error; } fsinfo.fattr = fattr; nfs_fattr_init(fattr); error = clp->rpc_ops->fsinfo(server, mntfh, &fsinfo); if (error < 0) goto out_error; nfs_server_set_fsinfo(server, &fsinfo); /* Get some general file system info */ if (server->namelen == 0) { struct nfs_pathconf pathinfo; pathinfo.fattr = fattr; nfs_fattr_init(fattr); if (clp->rpc_ops->pathconf(server, mntfh, &pathinfo) >= 0) server->namelen = pathinfo.max_namelen; } dprintk("<-- nfs_probe_fsinfo() = 0\n"); return 0; out_error: dprintk("nfs_probe_fsinfo: error = %d\n", -error); return error; } /* * Copy useful information when duplicating a server record */ static void nfs_server_copy_userdata(struct nfs_server *target, struct nfs_server *source) { target->flags = source->flags; target->rsize = source->rsize; target->wsize = source->wsize; target->acregmin = source->acregmin; target->acregmax = source->acregmax; target->acdirmin = source->acdirmin; target->acdirmax = source->acdirmax; target->caps = source->caps; target->options = source->options; } /* * Allocate and initialise a server record */ static struct nfs_server *nfs_alloc_server(void) { struct nfs_server *server; server = kzalloc(sizeof(struct nfs_server), GFP_KERNEL); if (!server) return NULL; server->client = server->client_acl = ERR_PTR(-EINVAL); /* Zero out the NFS state stuff */ INIT_LIST_HEAD(&server->client_link); INIT_LIST_HEAD(&server->master_link); atomic_set(&server->active, 0); server->io_stats = nfs_alloc_iostats(); if (!server->io_stats) { kfree(server); return NULL; } if (bdi_init(&server->backing_dev_info)) { nfs_free_iostats(server->io_stats); kfree(server); return NULL; } return server; } /* * Free up a server record */ void nfs_free_server(struct nfs_server *server) { dprintk("--> nfs_free_server()\n"); spin_lock(&nfs_client_lock); list_del(&server->client_link); list_del(&server->master_link); spin_unlock(&nfs_client_lock); if (server->destroy != NULL) server->destroy(server); if (!IS_ERR(server->client_acl)) rpc_shutdown_client(server->client_acl); if (!IS_ERR(server->client)) rpc_shutdown_client(server->client); nfs_put_client(server->nfs_client); nfs_free_iostats(server->io_stats); bdi_destroy(&server->backing_dev_info); kfree(server); nfs_release_automount_timer(); dprintk("<-- nfs_free_server()\n"); } /* * Create a version 2 or 3 volume record * - keyed on server and FSID */ struct nfs_server *nfs_create_server(const struct nfs_parsed_mount_data *data, struct nfs_fh *mntfh) { struct nfs_server *server; struct nfs_fattr fattr; int error; server = nfs_alloc_server(); if (!server) return ERR_PTR(-ENOMEM); /* Get a client representation */ error = nfs_init_server(server, data); if (error < 0) goto error; BUG_ON(!server->nfs_client); BUG_ON(!server->nfs_client->rpc_ops); BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops); /* Probe the root fh to retrieve its FSID */ error = nfs_probe_fsinfo(server, mntfh, &fattr); if (error < 0) goto error; if (server->nfs_client->rpc_ops->version == 3) { if (server->namelen == 0 || server->namelen > NFS3_MAXNAMLEN) server->namelen = NFS3_MAXNAMLEN; if (!(data->flags & NFS_MOUNT_NORDIRPLUS)) server->caps |= NFS_CAP_READDIRPLUS; } else { if (server->namelen == 0 || server->namelen > NFS2_MAXNAMLEN) server->namelen = NFS2_MAXNAMLEN; } if (!(fattr.valid & NFS_ATTR_FATTR)) { error = server->nfs_client->rpc_ops->getattr(server, mntfh, &fattr); if (error < 0) { dprintk("nfs_create_server: getattr error = %d\n", -error); goto error; } } memcpy(&server->fsid, &fattr.fsid, sizeof(server->fsid)); dprintk("Server FSID: %llx:%llx\n", (unsigned long long) server->fsid.major, (unsigned long long) server->fsid.minor); spin_lock(&nfs_client_lock); list_add_tail(&server->client_link, &server->nfs_client->cl_superblocks); list_add_tail(&server->master_link, &nfs_volume_list); spin_unlock(&nfs_client_lock); server->mount_time = jiffies; return server; error: nfs_free_server(server); return ERR_PTR(error); } #ifdef CONFIG_NFS_V4 /* * Initialize the NFS4 callback service */ static int nfs4_init_callback(struct nfs_client *clp) { int error; if (clp->rpc_ops->version == 4) { if (nfs4_has_session(clp)) { error = xprt_setup_backchannel( clp->cl_rpcclient->cl_xprt, NFS41_BC_MIN_CALLBACKS); if (error < 0) return error; } error = nfs_callback_up(clp->cl_minorversion, clp->cl_rpcclient->cl_xprt); if (error < 0) { dprintk("%s: failed to start callback. Error = %d\n", __func__, error); return error; } __set_bit(NFS_CS_CALLBACK, &clp->cl_res_state); } return 0; } /* * Initialize the minor version specific parts of an NFS4 client record */ static int nfs4_init_client_minor_version(struct nfs_client *clp) { clp->cl_call_sync = _nfs4_call_sync; #if defined(CONFIG_NFS_V4_1) if (clp->cl_minorversion) { struct nfs4_session *session = NULL; /* * Create the session and mark it expired. * When a SEQUENCE operation encounters the expired session * it will do session recovery to initialize it. */ session = nfs4_alloc_session(clp); if (!session) return -ENOMEM; clp->cl_session = session; clp->cl_call_sync = _nfs4_call_sync_session; } #endif /* CONFIG_NFS_V4_1 */ return nfs4_init_callback(clp); } /* * Initialise an NFS4 client record */ static int nfs4_init_client(struct nfs_client *clp, const struct rpc_timeout *timeparms, const char *ip_addr, rpc_authflavor_t authflavour, int flags) { int error; if (clp->cl_cons_state == NFS_CS_READY) { /* the client is initialised already */ dprintk("<-- nfs4_init_client() = 0 [already %p]\n", clp); return 0; } /* Check NFS protocol revision and initialize RPC op vector */ clp->rpc_ops = &nfs_v4_clientops; error = nfs_create_rpc_client(clp, timeparms, authflavour, 1, flags & NFS_MOUNT_NORESVPORT); if (error < 0) goto error; strlcpy(clp->cl_ipaddr, ip_addr, sizeof(clp->cl_ipaddr)); error = nfs_idmap_new(clp); if (error < 0) { dprintk("%s: failed to create idmapper. Error = %d\n", __func__, error); goto error; } __set_bit(NFS_CS_IDMAP, &clp->cl_res_state); error = nfs4_init_client_minor_version(clp); if (error < 0) goto error; if (!nfs4_has_session(clp)) nfs_mark_client_ready(clp, NFS_CS_READY); return 0; error: nfs_mark_client_ready(clp, error); dprintk("<-- nfs4_init_client() = xerror %d\n", error); return error; } /* * Set up an NFS4 client */ static int nfs4_set_client(struct nfs_server *server, const char *hostname, const struct sockaddr *addr, const size_t addrlen, const char *ip_addr, rpc_authflavor_t authflavour, int proto, const struct rpc_timeout *timeparms, u32 minorversion) { struct nfs_client_initdata cl_init = { .hostname = hostname, .addr = addr, .addrlen = addrlen, .rpc_ops = &nfs_v4_clientops, .proto = proto, .minorversion = minorversion, }; struct nfs_client *clp; int error; dprintk("--> nfs4_set_client()\n"); /* Allocate or find a client reference we can use */ clp = nfs_get_client(&cl_init); if (IS_ERR(clp)) { error = PTR_ERR(clp); goto error; } error = nfs4_init_client(clp, timeparms, ip_addr, authflavour, server->flags); if (error < 0) goto error_put; server->nfs_client = clp; dprintk("<-- nfs4_set_client() = 0 [new %p]\n", clp); return 0; error_put: nfs_put_client(clp); error: dprintk("<-- nfs4_set_client() = xerror %d\n", error); return error; } /* * Session has been established, and the client marked ready. * Set the mount rsize and wsize with negotiated fore channel * attributes which will be bound checked in nfs_server_set_fsinfo. */ static void nfs4_session_set_rwsize(struct nfs_server *server) { #ifdef CONFIG_NFS_V4_1 struct nfs4_session *sess; u32 server_resp_sz; u32 server_rqst_sz; if (!nfs4_has_session(server->nfs_client)) return; sess = server->nfs_client->cl_session; server_resp_sz = sess->fc_attrs.max_resp_sz - nfs41_maxread_overhead; server_rqst_sz = sess->fc_attrs.max_rqst_sz - nfs41_maxwrite_overhead; if (server->rsize > server_resp_sz) server->rsize = server_resp_sz; if (server->wsize > server_rqst_sz) server->wsize = server_rqst_sz; #endif /* CONFIG_NFS_V4_1 */ } /* * Create a version 4 volume record */ static int nfs4_init_server(struct nfs_server *server, const struct nfs_parsed_mount_data *data) { struct rpc_timeout timeparms; int error; dprintk("--> nfs4_init_server()\n"); nfs_init_timeout_values(&timeparms, data->nfs_server.protocol, data->timeo, data->retrans); /* Initialise the client representation from the mount data */ server->flags = data->flags; server->caps |= NFS_CAP_ATOMIC_OPEN|NFS_CAP_CHANGE_ATTR| NFS_CAP_POSIX_LOCK; server->options = data->options; /* Get a client record */ error = nfs4_set_client(server, data->nfs_server.hostname, (const struct sockaddr *)&data->nfs_server.address, data->nfs_server.addrlen, data->client_address, data->auth_flavors[0], data->nfs_server.protocol, &timeparms, data->minorversion); if (error < 0) goto error; if (data->rsize) server->rsize = nfs_block_size(data->rsize, NULL); if (data->wsize) server->wsize = nfs_block_size(data->wsize, NULL); server->acregmin = data->acregmin * HZ; server->acregmax = data->acregmax * HZ; server->acdirmin = data->acdirmin * HZ; server->acdirmax = data->acdirmax * HZ; server->port = data->nfs_server.port; error = nfs_init_server_rpcclient(server, &timeparms, data->auth_flavors[0]); error: /* Done */ dprintk("<-- nfs4_init_server() = %d\n", error); return error; } /* * Create a version 4 volume record * - keyed on server and FSID */ struct nfs_server *nfs4_create_server(const struct nfs_parsed_mount_data *data, struct nfs_fh *mntfh) { struct nfs_fattr fattr; struct nfs_server *server; int error; dprintk("--> nfs4_create_server()\n"); server = nfs_alloc_server(); if (!server) return ERR_PTR(-ENOMEM); /* set up the general RPC client */ error = nfs4_init_server(server, data); if (error < 0) goto error; BUG_ON(!server->nfs_client); BUG_ON(!server->nfs_client->rpc_ops); BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops); error = nfs4_init_session(server); if (error < 0) goto error; /* Probe the root fh to retrieve its FSID */ error = nfs4_path_walk(server, mntfh, data->nfs_server.export_path); if (error < 0) goto error; dprintk("Server FSID: %llx:%llx\n", (unsigned long long) server->fsid.major, (unsigned long long) server->fsid.minor); dprintk("Mount FH: %d\n", mntfh->size); nfs4_session_set_rwsize(server); error = nfs_probe_fsinfo(server, mntfh, &fattr); if (error < 0) goto error; if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN) server->namelen = NFS4_MAXNAMLEN; spin_lock(&nfs_client_lock); list_add_tail(&server->client_link, &server->nfs_client->cl_superblocks); list_add_tail(&server->master_link, &nfs_volume_list); spin_unlock(&nfs_client_lock); server->mount_time = jiffies; dprintk("<-- nfs4_create_server() = %p\n", server); return server; error: nfs_free_server(server); dprintk("<-- nfs4_create_server() = error %d\n", error); return ERR_PTR(error); } /* * Create an NFS4 referral server record */ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data, struct nfs_fh *mntfh) { struct nfs_client *parent_client; struct nfs_server *server, *parent_server; struct nfs_fattr fattr; int error; dprintk("--> nfs4_create_referral_server()\n"); server = nfs_alloc_server(); if (!server) return ERR_PTR(-ENOMEM); parent_server = NFS_SB(data->sb); parent_client = parent_server->nfs_client; /* Initialise the client representation from the parent server */ nfs_server_copy_userdata(server, parent_server); server->caps |= NFS_CAP_ATOMIC_OPEN|NFS_CAP_CHANGE_ATTR; /* Get a client representation. * Note: NFSv4 always uses TCP, */ error = nfs4_set_client(server, data->hostname, data->addr, data->addrlen, parent_client->cl_ipaddr, data->authflavor, parent_server->client->cl_xprt->prot, parent_server->client->cl_timeout, parent_client->cl_minorversion); if (error < 0) goto error; error = nfs_init_server_rpcclient(server, parent_server->client->cl_timeout, data->authflavor); if (error < 0) goto error; BUG_ON(!server->nfs_client); BUG_ON(!server->nfs_client->rpc_ops); BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops); /* Probe the root fh to retrieve its FSID and filehandle */ error = nfs4_path_walk(server, mntfh, data->mnt_path); if (error < 0) goto error; /* probe the filesystem info for this server filesystem */ error = nfs_probe_fsinfo(server, mntfh, &fattr); if (error < 0) goto error; if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN) server->namelen = NFS4_MAXNAMLEN; dprintk("Referral FSID: %llx:%llx\n", (unsigned long long) server->fsid.major, (unsigned long long) server->fsid.minor); spin_lock(&nfs_client_lock); list_add_tail(&server->client_link, &server->nfs_client->cl_superblocks); list_add_tail(&server->master_link, &nfs_volume_list); spin_unlock(&nfs_client_lock); server->mount_time = jiffies; dprintk("<-- nfs_create_referral_server() = %p\n", server); return server; error: nfs_free_server(server); dprintk("<-- nfs4_create_referral_server() = error %d\n", error); return ERR_PTR(error); } #endif /* CONFIG_NFS_V4 */ /* * Clone an NFS2, NFS3 or NFS4 server record */ struct nfs_server *nfs_clone_server(struct nfs_server *source, struct nfs_fh *fh, struct nfs_fattr *fattr) { struct nfs_server *server; struct nfs_fattr fattr_fsinfo; int error; dprintk("--> nfs_clone_server(,%llx:%llx,)\n", (unsigned long long) fattr->fsid.major, (unsigned long long) fattr->fsid.minor); server = nfs_alloc_server(); if (!server) return ERR_PTR(-ENOMEM); /* Copy data from the source */ server->nfs_client = source->nfs_client; atomic_inc(&server->nfs_client->cl_count); nfs_server_copy_userdata(server, source); server->fsid = fattr->fsid; error = nfs_init_server_rpcclient(server, source->client->cl_timeout, source->client->cl_auth->au_flavor); if (error < 0) goto out_free_server; if (!IS_ERR(source->client_acl)) nfs_init_server_aclclient(server); /* probe the filesystem info for this server filesystem */ error = nfs_probe_fsinfo(server, fh, &fattr_fsinfo); if (error < 0) goto out_free_server; if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN) server->namelen = NFS4_MAXNAMLEN; dprintk("Cloned FSID: %llx:%llx\n", (unsigned long long) server->fsid.major, (unsigned long long) server->fsid.minor); error = nfs_start_lockd(server); if (error < 0) goto out_free_server; spin_lock(&nfs_client_lock); list_add_tail(&server->client_link, &server->nfs_client->cl_superblocks); list_add_tail(&server->master_link, &nfs_volume_list); spin_unlock(&nfs_client_lock); server->mount_time = jiffies; dprintk("<-- nfs_clone_server() = %p\n", server); return server; out_free_server: nfs_free_server(server); dprintk("<-- nfs_clone_server() = error %d\n", error); return ERR_PTR(error); } #ifdef CONFIG_PROC_FS static struct proc_dir_entry *proc_fs_nfs; static int nfs_server_list_open(struct inode *inode, struct file *file); static void *nfs_server_list_start(struct seq_file *p, loff_t *pos); static void *nfs_server_list_next(struct seq_file *p, void *v, loff_t *pos); static void nfs_server_list_stop(struct seq_file *p, void *v); static int nfs_server_list_show(struct seq_file *m, void *v); static const struct seq_operations nfs_server_list_ops = { .start = nfs_server_list_start, .next = nfs_server_list_next, .stop = nfs_server_list_stop, .show = nfs_server_list_show, }; static const struct file_operations nfs_server_list_fops = { .open = nfs_server_list_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, .owner = THIS_MODULE, }; static int nfs_volume_list_open(struct inode *inode, struct file *file); static void *nfs_volume_list_start(struct seq_file *p, loff_t *pos); static void *nfs_volume_list_next(struct seq_file *p, void *v, loff_t *pos); static void nfs_volume_list_stop(struct seq_file *p, void *v); static int nfs_volume_list_show(struct seq_file *m, void *v); static const struct seq_operations nfs_volume_list_ops = { .start = nfs_volume_list_start, .next = nfs_volume_list_next, .stop = nfs_volume_list_stop, .show = nfs_volume_list_show, }; static const struct file_operations nfs_volume_list_fops = { .open = nfs_volume_list_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, .owner = THIS_MODULE, }; /* * open "/proc/fs/nfsfs/servers" which provides a summary of servers with which * we're dealing */ static int nfs_server_list_open(struct inode *inode, struct file *file) { struct seq_file *m; int ret; ret = seq_open(file, &nfs_server_list_ops); if (ret < 0) return ret; m = file->private_data; m->private = PDE(inode)->data; return 0; } /* * set up the iterator to start reading from the server list and return the first item */ static void *nfs_server_list_start(struct seq_file *m, loff_t *_pos) { /* lock the list against modification */ spin_lock(&nfs_client_lock); return seq_list_start_head(&nfs_client_list, *_pos); } /* * move to next server */ static void *nfs_server_list_next(struct seq_file *p, void *v, loff_t *pos) { return seq_list_next(v, &nfs_client_list, pos); } /* * clean up after reading from the transports list */ static void nfs_server_list_stop(struct seq_file *p, void *v) { spin_unlock(&nfs_client_lock); } /* * display a header line followed by a load of call lines */ static int nfs_server_list_show(struct seq_file *m, void *v) { struct nfs_client *clp; /* display header on line 1 */ if (v == &nfs_client_list) { seq_puts(m, "NV SERVER PORT USE HOSTNAME\n"); return 0; } /* display one transport per line on subsequent lines */ clp = list_entry(v, struct nfs_client, cl_share_link); seq_printf(m, "v%u %s %s %3d %s\n", clp->rpc_ops->version, rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_HEX_ADDR), rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_HEX_PORT), atomic_read(&clp->cl_count), clp->cl_hostname); return 0; } /* * open "/proc/fs/nfsfs/volumes" which provides a summary of extant volumes */ static int nfs_volume_list_open(struct inode *inode, struct file *file) { struct seq_file *m; int ret; ret = seq_open(file, &nfs_volume_list_ops); if (ret < 0) return ret; m = file->private_data; m->private = PDE(inode)->data; return 0; } /* * set up the iterator to start reading from the volume list and return the first item */ static void *nfs_volume_list_start(struct seq_file *m, loff_t *_pos) { /* lock the list against modification */ spin_lock(&nfs_client_lock); return seq_list_start_head(&nfs_volume_list, *_pos); } /* * move to next volume */ static void *nfs_volume_list_next(struct seq_file *p, void *v, loff_t *pos) { return seq_list_next(v, &nfs_volume_list, pos); } /* * clean up after reading from the transports list */ static void nfs_volume_list_stop(struct seq_file *p, void *v) { spin_unlock(&nfs_client_lock); } /* * display a header line followed by a load of call lines */ static int nfs_volume_list_show(struct seq_file *m, void *v) { struct nfs_server *server; struct nfs_client *clp; char dev[8], fsid[17]; /* display header on line 1 */ if (v == &nfs_volume_list) { seq_puts(m, "NV SERVER PORT DEV FSID FSC\n"); return 0; } /* display one transport per line on subsequent lines */ server = list_entry(v, struct nfs_server, master_link); clp = server->nfs_client; snprintf(dev, 8, "%u:%u", MAJOR(server->s_dev), MINOR(server->s_dev)); snprintf(fsid, 17, "%llx:%llx", (unsigned long long) server->fsid.major, (unsigned long long) server->fsid.minor); seq_printf(m, "v%u %s %s %-7s %-17s %s\n", clp->rpc_ops->version, rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_HEX_ADDR), rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_HEX_PORT), dev, fsid, nfs_server_fscache_state(server)); return 0; } /* * initialise the /proc/fs/nfsfs/ directory */ int __init nfs_fs_proc_init(void) { struct proc_dir_entry *p; proc_fs_nfs = proc_mkdir("fs/nfsfs", NULL); if (!proc_fs_nfs) goto error_0; /* a file of servers with which we're dealing */ p = proc_create("servers", S_IFREG|S_IRUGO, proc_fs_nfs, &nfs_server_list_fops); if (!p) goto error_1; /* a file of volumes that we have mounted */ p = proc_create("volumes", S_IFREG|S_IRUGO, proc_fs_nfs, &nfs_volume_list_fops); if (!p) goto error_2; return 0; error_2: remove_proc_entry("servers", proc_fs_nfs); error_1: remove_proc_entry("fs/nfsfs", NULL); error_0: return -ENOMEM; } /* * clean up the /proc/fs/nfsfs/ directory */ void nfs_fs_proc_exit(void) { remove_proc_entry("volumes", proc_fs_nfs); remove_proc_entry("servers", proc_fs_nfs); remove_proc_entry("fs/nfsfs", NULL); } #endif /* CONFIG_PROC_FS */
gpl-2.0
justwait/ApocalypseCore5.2.0
src/server/scripts/Examples/example_creature.cpp
31
12122
/* * Copyright (C) 2008-2013 TrinityCore <http://www.trinitycore.org/> * Copyright (C) 2006-2009 ScriptDev2 <https://scriptdev2.svn.sourceforge.net/> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. */ /* ScriptData SDName: Example_Creature SD%Complete: 100 SDComment: Short custom scripting example SDCategory: Script Examples EndScriptData */ #include "ScriptMgr.h" #include "ScriptedCreature.h" #include "ScriptedGossip.h" #include "Player.h" // **** This script is designed as an example for others to build on **** // **** Please modify whatever you'd like to as this script is only for developement **** // **** Script Info* *** // This script is written in a way that it can be used for both friendly and hostile monsters // Its primary purpose is to show just how much you can really do with scripts // I recommend trying it out on both an agressive NPC and on friendly npc // **** Quick Info* *** // Functions with Handled Function marked above them are functions that are called automatically by the core // Functions that are marked Custom Function are functions I've created to simplify code enum Yells { //List of text id's. The text is stored in database, also in a localized version //(if translation not exist for the textId, default english text will be used) //Not required to define in this way, but simplify if changes are needed. //These texts must be added to the creature texts of the npc for which the script is assigned. SAY_AGGRO = 0, // "Let the games begin." SAY_RANDOM = 1, // "I see endless suffering. I see torment. I see rage. I see everything.", // "Muahahahaha", // "These mortal infedels my lord, they have invaded your sanctum and seek to steal your secrets.", // "You are already dead.", // "Where to go? What to do? So many choices that all end in pain, end in death." SAY_BERSERK = 2, // "$N, I sentance you to death!" SAY_PHASE = 3, // "The suffering has just begun!" SAY_DANCE = 4, // "I always thought I was a good dancer." SAY_SALUTE = 5, // "Move out Soldier!" SAY_EVADE = 6 // "Help $N! I'm under attack!" }; enum Spells { // List of spells. // Not required to define them in this way, but will make it easier to maintain in case spellId change SPELL_BUFF = 25661, SPELL_ONE = 12555, SPELL_ONE_ALT = 24099, SPELL_TWO = 10017, SPELL_THREE = 26027, SPELL_FRENZY = 23537, SPELL_BERSERK = 32965, }; enum eEnums { // any other constants FACTION_WORGEN = 24 }; //List of gossip item texts. Items will appear in the gossip window. #define GOSSIP_ITEM "I'm looking for a fight" class example_creature : public CreatureScript { public: example_creature() : CreatureScript("example_creature") { } struct example_creatureAI : public ScriptedAI { // *** HANDLED FUNCTION *** //This is the constructor, called only once when the Creature is first created example_creatureAI(Creature* creature) : ScriptedAI(creature) {} // *** CUSTOM VARIABLES **** //These variables are for use only by this individual script. //Nothing else will ever call them but us. uint32 m_uiSayTimer; // Timer for random chat uint32 m_uiRebuffTimer; // Timer for rebuffing uint32 m_uiSpell1Timer; // Timer for spell 1 when in combat uint32 m_uiSpell2Timer; // Timer for spell 1 when in combat uint32 m_uiSpell3Timer; // Timer for spell 1 when in combat uint32 m_uiBeserkTimer; // Timer until we go into Beserk (enraged) mode uint32 m_uiPhase; // The current battle phase we are in uint32 m_uiPhaseTimer; // Timer until phase transition // *** HANDLED FUNCTION *** //This is called after spawn and whenever the core decides we need to evade void Reset() { m_uiPhase = 1; // Start in phase 1 m_uiPhaseTimer = 60000; // 60 seconds m_uiSpell1Timer = 5000; // 5 seconds m_uiSpell2Timer = urand(10000, 20000); // between 10 and 20 seconds m_uiSpell3Timer = 19000; // 19 seconds m_uiBeserkTimer = 120000; // 2 minutes me->RestoreFaction(); } // *** HANDLED FUNCTION *** // Enter Combat called once per combat void EnterCombat(Unit* who) { //Say some stuff Talk(SAY_AGGRO, who->GetGUID()); } // *** HANDLED FUNCTION *** // Attack Start is called when victim change (including at start of combat) // By default, attack who and start movement toward the victim. //void AttackStart(Unit* who) //{ // ScriptedAI::AttackStart(who); //} // *** HANDLED FUNCTION *** // Called when going out of combat. Reset is called just after. void EnterEvadeMode() { Talk(SAY_EVADE); } // *** HANDLED FUNCTION *** //Our Receive emote function void ReceiveEmote(Player* /*player*/, uint32 uiTextEmote) { me->HandleEmoteCommand(uiTextEmote); switch (uiTextEmote) { case TEXT_EMOTE_DANCE: Talk(SAY_DANCE); break; case TEXT_EMOTE_SALUTE: Talk(SAY_SALUTE); break; } } // *** HANDLED FUNCTION *** //Update AI is called Every single map update (roughly once every 50ms if a player is within the grid) void UpdateAI(uint32 uiDiff) { //Out of combat timers if (!me->getVictim()) { //Random Say timer if (m_uiSayTimer <= uiDiff) { //Random switch between 5 outcomes Talk(SAY_RANDOM); m_uiSayTimer = 45000; //Say something agian in 45 seconds } else m_uiSayTimer -= uiDiff; //Rebuff timer if (m_uiRebuffTimer <= uiDiff) { DoCast(me, SPELL_BUFF); m_uiRebuffTimer = 900000; //Rebuff agian in 15 minutes } else m_uiRebuffTimer -= uiDiff; } //Return since we have no target if (!UpdateVictim()) return; //Spell 1 timer if (m_uiSpell1Timer <= uiDiff) { //Cast spell one on our current target. if (rand()%50 > 10) DoCast(me->getVictim(), SPELL_ONE_ALT); else if (me->IsWithinDist(me->getVictim(), 25.0f)) DoCast(me->getVictim(), SPELL_ONE); m_uiSpell1Timer = 5000; } else m_uiSpell1Timer -= uiDiff; //Spell 2 timer if (m_uiSpell2Timer <= uiDiff) { //Cast spell two on our current target. DoCast(me->getVictim(), SPELL_TWO); m_uiSpell2Timer = 37000; } else m_uiSpell2Timer -= uiDiff; //Beserk timer if (m_uiPhase > 1) { //Spell 3 timer if (m_uiSpell3Timer <= uiDiff) { //Cast spell one on our current target. DoCast(me->getVictim(), SPELL_THREE); m_uiSpell3Timer = 19000; } else m_uiSpell3Timer -= uiDiff; if (m_uiBeserkTimer <= uiDiff) { //Say our line then cast uber death spell Talk(SAY_BERSERK, me->getVictim() ? me->getVictim()->GetGUID() : 0); DoCast(me->getVictim(), SPELL_BERSERK); //Cast our beserk spell agian in 12 seconds if we didn't kill everyone m_uiBeserkTimer = 12000; } else m_uiBeserkTimer -= uiDiff; } else if (m_uiPhase == 1) //Phase timer { if (m_uiPhaseTimer <= uiDiff) { //Go to next phase ++m_uiPhase; Talk(SAY_PHASE); DoCast(me, SPELL_FRENZY); } else m_uiPhaseTimer -= uiDiff; } DoMeleeAttackIfReady(); } }; CreatureAI* GetAI(Creature* creature) const { return new example_creatureAI(creature); } bool OnGossipHello(Player* player, Creature* creature) { player->ADD_GOSSIP_ITEM(GOSSIP_ICON_CHAT, GOSSIP_ITEM, GOSSIP_SENDER_MAIN, GOSSIP_ACTION_INFO_DEF + 1); player->SEND_GOSSIP_MENU(907, creature->GetGUID()); return true; } bool OnGossipSelect(Player* player, Creature* creature, uint32 /*sender*/, uint32 action) { player->PlayerTalkClass->ClearMenus(); if (action == GOSSIP_ACTION_INFO_DEF+1) { player->CLOSE_GOSSIP_MENU(); //Set our faction to hostile towards all creature->setFaction(FACTION_WORGEN); creature->AI()->AttackStart(player); } return true; } }; //This is the actual function called only once durring InitScripts() //It must define all handled functions that are to be run in this script void AddSC_example_creature() { new example_creature(); }
gpl-2.0
nettedfish/mysql-server
storage/ndb/ndbapi-examples/ndbapi_multi_cursor/main.cpp
31
38121
/* Copyright (C) 2008 Sun Microsystems Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ /************************************************************** * * NOTE THAT THIS TOOL CAN ONLY BE RUN AGAINST THE EMPLOYEES DATABASE * TABLES WHICH IS A SEPERATE DOWNLOAD AVAILABLE AT WWW.MYSQL.COM. **************************************************************/ // Used for cout #include <iostream> #include <stdio.h> #include <time.h> #include <assert.h> #ifdef _WIN32 #include <winsock2.h> #endif #include <mysql.h> #include <mysqld_error.h> #include <NdbApi.hpp> #include "NdbQueryBuilder.hpp" #include "NdbQueryOperation.hpp" #define USE_RECATTR /** * Helper debugging macros */ #define PRINT_ERROR(code,msg) \ std::cout << "Error in " << __FILE__ << ", line: " << __LINE__ \ << ", code: " << code \ << ", msg: " << msg << "." << std::endl #define MYSQLERROR(mysql) { \ PRINT_ERROR(mysql_errno(&mysql),mysql_error(&mysql)); \ exit(-1); } #define PRINT_APIERROR(error) { \ PRINT_ERROR((error).code,(error).message); } #define APIERROR(error) { \ PRINT_APIERROR(error); \ exit(-1); } /***************************************************** ** Defines record structure for the rows in our tables ******************************************************/ struct ManagerRow { char dept_no[4]; Uint32 emp_no; Int32 from_date; Int32 to_date; Uint32 my_key; }; struct ManagerPKRow { Uint32 emp_no; char dept_no[4]; }; struct EmployeeRow { Uint32 emp_no; Int32 birth_date; // sizeof(date)....? char first_name[14+1]; char last_name[16+1]; char gender; Int32 hire_date; }; struct SalaryRow { Uint32 emp_no; Int32 from_date; Uint32 salary; Int32 to_date; }; const char* employeeDef = "CREATE TABLE employees (" " emp_no INT NOT NULL," " dept_no CHAR(4) NOT NULL," // Temporary added OJA " birth_date DATE NOT NULL," " first_name VARCHAR(14) NOT NULL," " last_name VARCHAR(16) NOT NULL," " gender ENUM ('M','F') NOT NULL, " " hire_date DATE NOT NULL," " PRIMARY KEY (emp_no))" " ENGINE=NDB"; const char* departmentsDef = "CREATE TABLE departments (" " dept_no CHAR(4) NOT NULL," " dept_name VARCHAR(40) NOT NULL," " PRIMARY KEY (dept_no)," " UNIQUE KEY (dept_name))" " ENGINE=NDB"; const char* dept_managerDef = "CREATE TABLE dept_manager (" " dept_no CHAR(4) NOT NULL," " emp_no INT NOT NULL," " from_date DATE NOT NULL," " to_date DATE NOT NULL," " my_key INT NOT NULL," " KEY (emp_no)," " KEY (dept_no)," //" FOREIGN KEY (emp_no) REFERENCES employees (emp_no) ON DELETE CASCADE," //" FOREIGN KEY (dept_no) REFERENCES departments (dept_no) ON DELETE CASCADE," " UNIQUE KEY MYINDEXNAME (my_key)," " PRIMARY KEY (emp_no,dept_no))" " ENGINE=NDB" //" PARTITION BY KEY(dept_no)" ; const char* dept_empDef = "CREATE TABLE dept_emp (" " emp_no INT NOT NULL," " dept_no CHAR(4) NOT NULL," " from_date DATE NOT NULL," " to_date DATE NOT NULL," " KEY (emp_no)," " KEY (dept_no)," " FOREIGN KEY (emp_no) REFERENCES employees (emp_no) ON DELETE CASCADE," " FOREIGN KEY (dept_no) REFERENCES departments (dept_no) ON DELETE CASCADE," " PRIMARY KEY (emp_no,dept_no))" " ENGINE=NDB"; const char* titlesDef = "CREATE TABLE titles (" " emp_no INT NOT NULL," " title VARCHAR(50) NOT NULL," " from_date DATE NOT NULL," " to_date DATE," " KEY (emp_no)," " FOREIGN KEY (emp_no) REFERENCES employees (emp_no) ON DELETE CASCADE," " PRIMARY KEY (emp_no,title, from_date))" " ENGINE=NDB"; const char* salariesDef = "CREATE TABLE salaries (" " emp_no INT NOT NULL," " salary INT NOT NULL," " from_date DATE NOT NULL," " to_date DATE NOT NULL," " KEY (emp_no)," " FOREIGN KEY (emp_no) REFERENCES employees (emp_no) ON DELETE CASCADE," " PRIMARY KEY (emp_no, from_date))" " ENGINE=NDB"; int createEmployeeDb(MYSQL& mysql) { if (true) { mysql_query(&mysql, "DROP DATABASE employees"); printf("Dropped existing employees DB\n"); mysql_query(&mysql, "CREATE DATABASE employees"); mysql_commit(&mysql); printf("Created new employees DB\n"); if (mysql_query(&mysql, "USE employees") != 0) MYSQLERROR(mysql); mysql_commit(&mysql); printf("USE employees DB\n"); if (mysql_query(&mysql, employeeDef) != 0) MYSQLERROR(mysql); mysql_commit(&mysql); printf("Created 'employee' table\n"); if (mysql_query(&mysql, departmentsDef) != 0) MYSQLERROR(mysql); mysql_commit(&mysql); printf("Created 'departments' table\n"); if (mysql_query(&mysql, dept_managerDef) != 0) MYSQLERROR(mysql); mysql_commit(&mysql); printf("Created 'dept_manager' table\n"); if (mysql_query(&mysql, dept_empDef) != 0) MYSQLERROR(mysql); mysql_commit(&mysql); printf("Created 'dept_emp' table\n"); if (mysql_query(&mysql, titlesDef) != 0) MYSQLERROR(mysql); mysql_commit(&mysql); printf("Created 'titles' table\n"); if (mysql_query(&mysql, salariesDef) != 0) MYSQLERROR(mysql); mysql_commit(&mysql); printf("Created 'salaries' table\n"); printf("Insert simple test data\n"); if (mysql_query(&mysql, "Insert into dept_manager(dept_no,emp_no,my_key) values ('d005',110567,110567)") != 0) MYSQLERROR(mysql); mysql_commit(&mysql); if (mysql_query(&mysql, "Insert into dept_manager(dept_no,emp_no,my_key) values ('c005',11057,11067)") != 0) MYSQLERROR(mysql); mysql_commit(&mysql); if (mysql_query(&mysql, "Insert into dept_manager(dept_no,emp_no,my_key) values ('e005',210567,210567)") != 0) MYSQLERROR(mysql); mysql_commit(&mysql); if (mysql_query(&mysql, "Insert into dept_manager(dept_no,emp_no,my_key) values ('f005',210568,210568)") != 0) MYSQLERROR(mysql); mysql_commit(&mysql); if (mysql_query(&mysql, "Insert into dept_manager(dept_no,emp_no,my_key) values ('g005',210569,210569)") != 0) MYSQLERROR(mysql); mysql_commit(&mysql); if (mysql_query(&mysql, "Insert into dept_manager(dept_no,emp_no,my_key) values ('h005',210560,210560)") != 0) MYSQLERROR(mysql); mysql_commit(&mysql); if (mysql_query(&mysql, "Insert into dept_manager(dept_no,emp_no,my_key) values ('i005',210561,210561)") != 0) MYSQLERROR(mysql); mysql_commit(&mysql); if (mysql_query(&mysql, "Insert into employees(emp_no,dept_no) values (110567,'d005')") != 0) MYSQLERROR(mysql); mysql_commit(&mysql); } return 1; } #if 0 /************************************************************** * Initialise NdbRecord structures for table and index access * **************************************************************/ static void init_ndbrecord_info(Ndb &myNdb) { NdbDictionary::Dictionary* myDict= myNdb.getDictionary(); manager = myDict->getTable("dept_manager"); employee= myDict->getTable("employees"); salary = myDict->getTable("salaries"); if (!employee || !manager || !salary) APIERROR(myDict->getNdbError()); rowManagerRecord = manager->getDefaultRecord(); if (rowManagerRecord == NULL) APIERROR(myDict->getNdbError()); rowEmployeeRecord = employee->getDefaultRecord(); if (rowEmployeeRecord == NULL) APIERROR(myDict->getNdbError()); rowSalaryRecord = salary->getDefaultRecord(); if (rowSalaryRecord == NULL) APIERROR(myDict->getNdbError()); // Lookup Primary key for salaries table const NdbDictionary::Index *myPIndex= myDict->getIndex("PRIMARY", "salaries"); if (myPIndex == NULL) APIERROR(myDict->getNdbError()); indexSalaryRecord = myPIndex->getDefaultRecord(); if (indexSalaryRecord == NULL) APIERROR(myDict->getNdbError()); } #endif /** * Simple example of intended usage of the new (SPJ) QueryBuilder API. * * STATUS: * Compilable code, NdbQueryBuilder do some semantics checks. * */ int testQueryBuilder(Ndb &myNdb) { const NdbDictionary::Table *manager, *employee, *salary; int res; NdbTransaction* myTransaction = NULL; NdbQuery* myQuery = NULL; const char* dept_no = "d005"; Uint32 emp_no = 110567; ManagerRow managerRow; EmployeeRow employeeRow; printf("\n -- Building query --\n"); NdbDictionary::Dictionary* myDict= myNdb.getDictionary(); manager = myDict->getTable("dept_manager"); employee= myDict->getTable("employees"); salary = myDict->getTable("salaries"); if (!employee || !manager || !salary) APIERROR(myDict->getNdbError()); //////////////////////////////////////////////// // Prepare alternatine non-default NdbRecords for MANAGER table //////////////////////////////////////////////// NdbRecord *rowManagerRecord; { const NdbDictionary::Column *manager_dept_no; const NdbDictionary::Column *manager_emp_no; const NdbDictionary::Column *manager_from_date; const NdbDictionary::Column *manager_to_date; manager_dept_no = manager->getColumn("dept_no"); if (manager_dept_no == NULL) APIERROR(myDict->getNdbError()); manager_emp_no = manager->getColumn("emp_no"); if (manager_emp_no == NULL) APIERROR(myDict->getNdbError()); manager_from_date = manager->getColumn("from_date"); if (manager_from_date == NULL) APIERROR(myDict->getNdbError()); manager_to_date = manager->getColumn("to_date"); if (manager_to_date == NULL) APIERROR(myDict->getNdbError()); const NdbDictionary::RecordSpecification mngSpec[] = { {manager_emp_no, offsetof(ManagerRow, emp_no), 0,0}, // {manager_dept_no, offsetof(ManagerRow, dept_no), 0,0}, // {manager_from_date, offsetof(ManagerRow, from_date), 0,0}, {manager_to_date, offsetof(ManagerRow, to_date), 0,0} }; rowManagerRecord = myDict->createRecord(manager, mngSpec, 2, sizeof(mngSpec[0])); if (rowManagerRecord == NULL) APIERROR(myDict->getNdbError()); } /** * Some very basic examples which are actually not Query*Trees*, but rather * single QueryOperation defined with the NdbQueryBuilder. * Mainly to illustrate how the NdbQueryOperand may be specified * either as a constant or a parameter value - A combination * thereoff would also be sensible. * * Main purpose is to examplify how NdbQueryBuilder is used to prepare * reusable query object - no ::execute() is performed yet. */ NdbQueryBuilder* const myBuilder = NdbQueryBuilder::create(myNdb); #if 0 printf("Compare with old API interface\n"); { myTransaction= myNdb.startTransaction(); if (myTransaction == NULL) APIERROR(myNdb.getNdbError()); // Lookup Primary key for manager table const NdbDictionary::Index *myPIndex= myDict->getIndex("PRIMARY", manager->getName()); if (myPIndex == NULL) APIERROR(myDict->getNdbError()); NdbIndexScanOperation* ixScan = myTransaction->scanIndex(myPIndex->getDefaultRecord(), manager->getDefaultRecord()); if (ixScan == NULL) APIERROR(myTransaction->getNdbError()); /* Add a bound */ ManagerPKRow low={0,"d005"}; ManagerPKRow high={110567,"d005"}; NdbIndexScanOperation::IndexBound bound; bound.low_key=(char*)&low; bound.low_key_count=2; bound.low_inclusive=true; bound.high_key=(char*)&high; bound.high_key_count=2; bound.high_inclusive=false; bound.range_no=0; if (ixScan->setBound(myPIndex->getDefaultRecord(), bound)) APIERROR(myTransaction->getNdbError()); } #endif #if 1 /* qt1 is 'const defined' */ printf("q1\n"); const NdbQueryDef* q1 = 0; { NdbQueryBuilder* qb = myBuilder; //myDict->getQueryBuilder(); const NdbQueryOperand* managerKey[] = // Manager is indexed om {"dept_no", "emp_no"} { qb->constValue("d005"), // dept_no = "d005" qb->constValue(110567), // emp_no = 110567 0 }; const NdbQueryLookupOperationDef *readManager = qb->readTuple(manager, managerKey); if (readManager == NULL) APIERROR(qb->getNdbError()); q1 = qb->prepare(); if (q1 == NULL) APIERROR(qb->getNdbError()); // Some operations are intentionally disallowed through private declaration // delete readManager; // NdbQueryLookupOperationDef illegalAssign = *readManager; // NdbQueryLookupOperationDef *illegalCopy1 = new NdbQueryLookupOperationDef(*readManager); // NdbQueryLookupOperationDef illegalCopy2(*readManager); } printf("q2\n"); const NdbQueryDef* q2 = 0; { NdbQueryBuilder* qb = myBuilder; //myDict->getQueryBuilder(); // Manager key defined as parameter const NdbQueryOperand* managerKey[] = // Manager is indexed om {"dept_no", "emp_no"} { qb->paramValue(), // dept_no parameter, qb->paramValue("emp"), // emp_no parameter - param naming is optional 0 }; // Lookup on a single tuple with key define by 'managerKey' param. tuple const NdbQueryLookupOperationDef* readManager = qb->readTuple(manager, managerKey); if (readManager == NULL) APIERROR(qb->getNdbError()); q2 = qb->prepare(); if (q2 == NULL) APIERROR(qb->getNdbError()); } /**** UNFINISHED... printf("q3\n"); const NdbQueryDef* q3 = 0; { NdbQueryBuilder* qb = myBuilder; //myDict->getQueryBuilder(); const NdbQueryIndexBound* managerBound = // Manager is indexed om {"dept_no", "emp_no"} { .... }; // Lookup on a single tuple with key define by 'managerKey' param. tuple const NdbQueryScanNode *scanManager = qb->scanIndex(manager, managerKey); if (scanManager == NULL) APIERROR(qb->getNdbError()); q3 = qb->prepare(); if (q3 == NULL) APIERROR(qb->getNdbError()); } *****/ #endif #if 1 { /* Composite operations building real *trees* aka. linked operations. * (First part is identical to building 'qt2' above) * * The related SQL query which this simulates would be something like: * * select * from dept_manager join employees using(emp_no) * where dept_no = 'd005' and emp_no = 110567; */ printf("q4\n"); const NdbQueryDef* q4 = 0; { NdbQueryBuilder* qb = myBuilder; //myDict->getQueryBuilder(); const NdbQueryOperand* constManagerKey[] = // Manager is indexed om {"dept_no", "emp_no"} { qb->constValue("d005"), // dept_no = "d005" qb->constValue(110567), // emp_no = 110567 0 }; const NdbQueryOperand* paramManagerKey[] = // Manager is indexed om {"dept_no", "emp_no"} { qb->paramValue(), // dept_no parameter, qb->paramValue("emp"), // emp_no parameter - param naming is optional 0 }; // Lookup a single tuple with key define by 'managerKey' param. tuple const NdbQueryLookupOperationDef *readManager = qb->readTuple(manager, paramManagerKey); //const NdbQueryLookupOperationDef *readManager = qb->readTuple(manager, constManagerKey); if (readManager == NULL) APIERROR(qb->getNdbError()); // THEN: employee table is joined: // A linked value is used to let employee lookup refer values // from the parent operation on manger. const NdbQueryOperand* joinEmployeeKey[] = // Employee is indexed om {"emp_no"} { qb->linkedValue(readManager, "emp_no"), // where '= readManger.emp_no' 0 }; const NdbQueryLookupOperationDef* readEmployee = qb->readTuple(employee, joinEmployeeKey); if (readEmployee == NULL) APIERROR(qb->getNdbError()); q4 = qb->prepare(); if (q4 == NULL) APIERROR(qb->getNdbError()); } /////////////////////////////////////////////////// // q4 may later be executed as: // (Possibly multiple ::execute() or multiple NdbQueryDef instances // within the same NdbTransaction::execute(). ) //////////////////////////////////////////////////// NdbQueryParamValue paramList[] = {dept_no, emp_no}; myTransaction= myNdb.startTransaction(); if (myTransaction == NULL) APIERROR(myNdb.getNdbError()); myQuery = myTransaction->createQuery(q4,paramList); if (myQuery == NULL) APIERROR(myTransaction->getNdbError()); #ifdef USE_RECATTR const NdbRecAttr *key[2][2]; for (Uint32 i=0; i<myQuery->getNoOfOperations(); ++i) { NdbQueryOperation* op = myQuery->getQueryOperation(i); const NdbDictionary::Table* table = op->getQueryOperationDef().getTable(); key[i][0] = op->getValue(table->getColumn(0)); key[i][1] = op->getValue(table->getColumn(1)); } #else { memset (&managerRow, 0, sizeof(managerRow)); memset (&employeeRow, 0, sizeof(employeeRow)); const NdbRecord* rowManagerRecord = manager->getDefaultRecord(); if (rowManagerRecord == NULL) APIERROR(myDict->getNdbError()); const NdbRecord* rowEmployeeRecord = employee->getDefaultRecord(); if (rowEmployeeRecord == NULL) APIERROR(myDict->getNdbError()); assert(myQuery->getNoOfOperations()==2); NdbQueryOperation* op0 = myQuery->getQueryOperation(0U); NdbQueryOperation* op1 = myQuery->getQueryOperation(1U); op0->setResultRowBuf(rowManagerRecord, (char*)&managerRow); op1->setResultRowBuf(rowEmployeeRecord, (char*)&employeeRow); } #endif printf("Start execute\n"); if (myTransaction->execute(NdbTransaction::NoCommit) != 0 || myQuery->getNdbError().code) { APIERROR(myQuery->getNdbError()); } printf("Done executed\n"); // All NdbQuery operations are handled as scans with cursor placed 'before' // first record: Fetch next to retrieve result: res = myQuery->nextResult(); if (res == NdbQuery::NextResult_error) APIERROR(myQuery->getNdbError()); #ifdef USE_RECATTR printf("manager emp_no: %d\n", key[0][1]->u_32_value()); printf("employee emp_no: %d\n", key[1][0]->u_32_value()); assert(!key[0][1]->isNULL() && key[0][1]->u_32_value()==emp_no); assert(!key[1][0]->isNULL() && key[1][0]->u_32_value()==emp_no); #else // NOW: Result is available in 'managerRow' buffer printf("manager emp_no: %d\n", managerRow.emp_no); printf("employee emp_no: %d\n", employeeRow.emp_no); assert(managerRow.emp_no==emp_no); assert(employeeRow.emp_no==emp_no); #endif myQuery->close(); myNdb.closeTransaction(myTransaction); myTransaction = 0; } #endif #if 1 { ////////////////////////////////////////////////// printf("q4_1\n"); const NdbQueryDef* q4_1 = 0; { NdbQueryBuilder* qb = myBuilder; //myDict->getQueryBuilder(); const NdbQueryOperand* empKey[] = // Employee is indexed om {"emp_no"} { //qb->constValue(110567), // emp_no = 110567 qb->paramValue(), 0 }; const NdbQueryLookupOperationDef* readEmployee = qb->readTuple(employee, empKey); if (readEmployee == NULL) APIERROR(qb->getNdbError()); const NdbQueryOperand* joinManagerKey[] = // Manager is indexed om {"dept_no", "emp_no"} { qb->paramValue(), //qb->constValue(1005), // dept_no = "d005" //qb->linkedValue(readEmployee,"dept_no"), qb->linkedValue(readEmployee,"emp_no"), // emp_no = 110567 //qb->constValue(110567), //qb->paramValue(), 0 }; // Join with a single tuple with key defined by linked employee fields const NdbQueryLookupOperationDef *readManager = qb->readTuple(manager, joinManagerKey); if (readManager == NULL) APIERROR(qb->getNdbError()); q4_1 = qb->prepare(); if (q4_1 == NULL) APIERROR(qb->getNdbError()); } /////////////////////////////////////////////////// // q4 may later be executed as: // (Possibly multiple ::execute() or multiple NdbQueryDef instances // within the same NdbTransaction::execute(). ) //////////////////////////////////////////////////// //NdbQueryParamValue paramList_q4[] = {emp_no}; //NdbQueryParamValue paramList_q4[] = {dept_no}; NdbQueryParamValue paramList_q4[] = {emp_no, dept_no}; myTransaction= myNdb.startTransaction(); if (myTransaction == NULL) APIERROR(myNdb.getNdbError()); myQuery = myTransaction->createQuery(q4_1,paramList_q4); if (myQuery == NULL) APIERROR(myTransaction->getNdbError()); #ifdef USE_RECATTR const NdbRecAttr *value_q4[2][2]; for (Uint32 i=0; i<myQuery->getNoOfOperations(); ++i) { NdbQueryOperation* op = myQuery->getQueryOperation(i); const NdbDictionary::Table* table = op->getQueryOperationDef().getTable(); value_q4[i][0] = op->getValue(table->getColumn(0)); value_q4[i][1] = op->getValue(table->getColumn(1)); } #else { memset (&managerRow, 0, sizeof(managerRow)); memset (&employeeRow, 0, sizeof(employeeRow)); const NdbRecord* rowEmployeeRecord = employee->getDefaultRecord(); if (rowEmployeeRecord == NULL) APIERROR(myDict->getNdbError()); const NdbRecord* rowManagerRecord = manager->getDefaultRecord(); if (rowManagerRecord == NULL) APIERROR(myDict->getNdbError()); assert(myQuery->getNoOfOperations()==2); NdbQueryOperation* op0 = myQuery->getQueryOperation(0U); NdbQueryOperation* op1 = myQuery->getQueryOperation(1U); op0->setResultRowBuf(rowEmployeeRecord, (char*)&employeeRow); op1->setResultRowBuf(rowManagerRecord, (char*)&managerRow); } #endif printf("Start execute\n"); if (myTransaction->execute(NdbTransaction::NoCommit) != 0 || myQuery->getNdbError().code) { APIERROR(myQuery->getNdbError()); } printf("Done executed\n"); // All NdbQuery operations are handled as scans with cursor placed 'before' // first record: Fetch next to retrieve result: res = myQuery->nextResult(); if (res == NdbQuery::NextResult_error) APIERROR(myQuery->getNdbError()); #ifdef USE_RECATTR printf("employee emp_no: %d\n", value_q4[0][0]->u_32_value()); printf("manager emp_no: %d\n", value_q4[1][1]->u_32_value()); assert(!value_q4[0][0]->isNULL() && value_q4[0][0]->u_32_value()==emp_no); assert(!value_q4[1][1]->isNULL() && value_q4[1][1]->u_32_value()==emp_no); #else printf("employee emp_no: %d\n", employeeRow.emp_no); printf("manager emp_no: %d\n", managerRow.emp_no); assert(managerRow.emp_no==emp_no); assert(employeeRow.emp_no==emp_no); #endif myQuery->close(); myNdb.closeTransaction(myTransaction); myTransaction = 0; } #endif ///////////////////////////////////////////////// #if 1 { // Example: ::readTuple() using Index for unique key lookup printf("q5\n"); const NdbQueryDef* q5 = 0; { NdbQueryBuilder* qb = myBuilder; //myDict->getQueryBuilder(); // Lookup Primary key for manager table const NdbDictionary::Index *myPIndex= myDict->getIndex("MYINDEXNAME$unique", manager->getName()); if (myPIndex == NULL) APIERROR(myDict->getNdbError()); // Manager index-key defined as parameter, NB: Reversed order compared to hash key const NdbQueryOperand* managerKey[] = // Manager PK index is {"emp_no","dept_no", } { //qb->constValue(110567), // emp_no = 110567 qb->paramValue(), 0 }; // Lookup on a single tuple with key define by 'managerKey' param. tuple const NdbQueryLookupOperationDef* readManager = qb->readTuple(myPIndex, manager, managerKey); if (readManager == NULL) APIERROR(qb->getNdbError()); q5 = qb->prepare(); if (q5 == NULL) APIERROR(qb->getNdbError()); } myTransaction= myNdb.startTransaction(); if (myTransaction == NULL) APIERROR(myNdb.getNdbError()); NdbQueryParamValue paramList_q5[] = {emp_no}; myQuery = myTransaction->createQuery(q5,paramList_q5); if (myQuery == NULL) APIERROR(myTransaction->getNdbError()); #ifdef USE_RECATTR const NdbRecAttr *value_q5[2]; NdbQueryOperation* op = myQuery->getQueryOperation(0U); const NdbDictionary::Table* table = op->getQueryOperationDef().getTable(); value_q5[0] = op->getValue(table->getColumn(0)); value_q5[1] = op->getValue(table->getColumn(1)); #else { memset (&managerRow, 0, sizeof(managerRow)); const NdbRecord* rowManagerRecord = manager->getDefaultRecord(); if (rowManagerRecord == NULL) APIERROR(myDict->getNdbError()); // Specify result handling NdbRecord style - need the (single) NdbQueryOperation: NdbQueryOperation* op = myQuery->getQueryOperation(0U); op->setResultRowBuf(rowManagerRecord, (char*)&managerRow); } #endif printf("Start execute\n"); if (myTransaction->execute(NdbTransaction::NoCommit) != 0 || myQuery->getNdbError().code) { APIERROR(myQuery->getNdbError()); } printf("Done executed\n"); // All NdbQuery operations are handled as scans with cursor placed 'before' // first record: Fetch next to retrieve result: res = myQuery->nextResult(); if (res == NdbQuery::NextResult_error) APIERROR(myQuery->getNdbError()); #ifdef USE_RECATTR printf("employee emp_no: %d\n", value_q5[1]->u_32_value()); assert(!value_q5[1]->isNULL() && value_q5[1]->u_32_value()==emp_no); #else printf("employee emp_no: %d\n", managerRow.emp_no); assert(managerRow.emp_no==emp_no); #endif myQuery->close(); myNdb.closeTransaction(myTransaction); myTransaction = 0; } #endif #if 1 { printf("q6: Table scan + linked lookup\n"); const NdbQueryDef* q6 = 0; { NdbQueryBuilder* qb = myBuilder; //myDict->getQueryBuilder(); /**** // Lookup Primary key for manager table const NdbDictionary::Index *myPIndex= myDict->getIndex("PRIMARY", manager->getName()); if (myPIndex == NULL) APIERROR(myDict->getNdbError()); const NdbQueryOperand* low[] = // Manager PK index is {"emp_no","dept_no", } { qb->constValue(110567), // emp_no = 110567 0 }; const NdbQueryOperand* high[] = // Manager PK index is {"emp_no","dept_no", } { qb->constValue("illegal key"), 0 }; const NdbQueryIndexBound bound (low, NULL); // emp_no = [110567, oo] const NdbQueryIndexBound bound_illegal(low, high); // 'high' is char type -> illegal const NdbQueryIndexBound boundEq(low); ****/ // Lookup on a single tuple with key define by 'managerKey' param. tuple // const NdbQueryScanOperationDef* scanManager = qb->scanIndex(myPIndex, manager, &boundEq); const NdbQueryScanOperationDef* scanManager = qb->scanTable(manager); if (scanManager == NULL) APIERROR(qb->getNdbError()); // THEN: employee table is joined: // A linked value is used to let employee lookup refer values // from the parent operation on manager. const NdbQueryOperand* empJoinKey[] = // Employee is indexed om {"emp_no"} { qb->linkedValue(scanManager, "emp_no"), // where '= readManger.emp_no' 0 }; const NdbQueryLookupOperationDef* readEmployee = qb->readTuple(employee, empJoinKey); if (readEmployee == NULL) APIERROR(qb->getNdbError()); q6 = qb->prepare(); if (q6 == NULL) APIERROR(qb->getNdbError()); } myTransaction= myNdb.startTransaction(); if (myTransaction == NULL) APIERROR(myNdb.getNdbError()); myQuery = myTransaction->createQuery(q6, (NdbQueryParamValue*)0); if (myQuery == NULL) APIERROR(myTransaction->getNdbError()); #ifdef USE_RECATTR const NdbRecAttr* value_q6[2][2]; for (Uint32 i=0; i<myQuery->getNoOfOperations(); ++i) { NdbQueryOperation* op = myQuery->getQueryOperation(i); const NdbDictionary::Table* table = op->getQueryOperationDef().getTable(); value_q6[i][0] = op->getValue(table->getColumn(0)); value_q6[i][1] = op->getValue(table->getColumn(1)); } #else { int err; const NdbRecord* rowManagerRecord = manager->getDefaultRecord(); if (rowManagerRecord == NULL) APIERROR(myDict->getNdbError()); assert(myQuery->getNoOfOperations()==2); NdbQueryOperation* op0 = myQuery->getQueryOperation(0U); err = op0->setResultRowBuf(rowManagerRecord, (char*)&managerRow); assert (err==0); //if (err == NULL) APIERROR(op0->getNdbError()); const NdbRecord* rowEmployeeRecord = employee->getDefaultRecord(); if (rowEmployeeRecord == NULL) APIERROR(myDict->getNdbError()); NdbQueryOperation* op1 = myQuery->getQueryOperation(1U); err = op1->setResultRowBuf(rowEmployeeRecord, (char*)&employeeRow); assert (err==0); //if (err == NULL) APIERROR(op1->getNdbError()); } #endif printf("Start execute\n"); if (myTransaction->execute(NdbTransaction::NoCommit) != 0 || myQuery->getNdbError().code) { APIERROR(myQuery->getNdbError()); } int cnt = 0; while (true) { memset (&managerRow, 0, sizeof(managerRow)); memset (&employeeRow, 0, sizeof(employeeRow)); // All NdbQuery operations are handled as scans with cursor placed 'before' // first record: Fetch next to retrieve result: NdbQuery::NextResultOutcome res = myQuery->nextResult(); if (res == NdbQuery::NextResult_error) { PRINT_APIERROR(myQuery->getNdbError()); break; } else if (res!=NdbQuery::NextResult_gotRow) { break; } #ifdef USE_RECATTR printf("manager emp_no: %d, NULL:%d\n", value_q6[0][1]->u_32_value(), myQuery->getQueryOperation(0U)->isRowNULL()); printf("employee emp_no: %d, NULL:%d\n", value_q6[1][0]->u_32_value(), myQuery->getQueryOperation(1U)->isRowNULL()); #else // NOW: Result is available in row buffers printf("manager emp_no: %d, NULL:%d\n", managerRow.emp_no, myQuery->getQueryOperation(0U)->isRowNULL()); printf("employee emp_no: %d, NULL:%d\n", employeeRow.emp_no, myQuery->getQueryOperation(1U)->isRowNULL()); #endif cnt++; }; printf("EOF, %d rows\n", cnt); myQuery->close(); myNdb.closeTransaction(myTransaction); myTransaction = 0; } #endif #if 1 { printf("Ordered index scan + lookup\n"); const NdbQueryDef* q6_1 = 0; { NdbQueryBuilder* qb = myBuilder; //myDict->getQueryBuilder(); // Lookup Primary key for manager table const NdbDictionary::Index *myPIndex= myDict->getIndex("PRIMARY", manager->getName()); if (myPIndex == NULL) APIERROR(myDict->getNdbError()); const NdbQueryOperand* low[] = // Manager PK index is {"emp_no","dept_no", } { qb->paramValue(), // qb->constValue(110567), // emp_no = 110567 qb->constValue("d005"), // dept_no = "d005" 0 }; const NdbQueryOperand* high[] = // Manager PK index is {"emp_no","dept_no", } { qb->constValue(110567), // emp_no = 110567 qb->constValue("d005"), // dept_no = "d005" 0 }; const NdbQueryIndexBound bound (low, high); // emp_no = [110567, oo] const NdbQueryIndexBound boundEq(low); // Lookup on a single tuple with key define by 'managerKey' param. tuple const NdbQueryScanOperationDef* scanManager = qb->scanIndex(myPIndex, manager, &bound); if (scanManager == NULL) APIERROR(qb->getNdbError()); // THEN: employee table is joined: // A linked value is used to let employee lookup refer values // from the parent operation on manager. const NdbQueryOperand* empJoinKey[] = // Employee is indexed om {"emp_no"} { qb->linkedValue(scanManager, "emp_no"), // where '= readManger.emp_no' 0 }; const NdbQueryLookupOperationDef* readEmployee = qb->readTuple(employee, empJoinKey); if (readEmployee == NULL) APIERROR(qb->getNdbError()); q6_1 = qb->prepare(); if (q6_1 == NULL) APIERROR(qb->getNdbError()); } myTransaction= myNdb.startTransaction(); if (myTransaction == NULL) APIERROR(myNdb.getNdbError()); NdbQueryParamValue paramList_q6_1[] = {emp_no}; myQuery = myTransaction->createQuery(q6_1, paramList_q6_1); if (myQuery == NULL) APIERROR(myTransaction->getNdbError()); #ifdef USE_RECATTR const NdbRecAttr* value_q6_1[2][2]; for (Uint32 i=0; i<myQuery->getNoOfOperations(); ++i) { NdbQueryOperation* op = myQuery->getQueryOperation(i); const NdbDictionary::Table* table = op->getQueryOperationDef().getTable(); value_q6_1[i][1] = op->getValue(table->getColumn(1)); value_q6_1[i][0] = op->getValue(table->getColumn(0)); } #else { int err; //int mask = 0x03; const NdbRecord* rowManagerRecord = manager->getDefaultRecord(); if (rowManagerRecord == NULL) APIERROR(myDict->getNdbError()); assert(myQuery->getNoOfOperations()==2); NdbQueryOperation* op0 = myQuery->getQueryOperation(0U); err = op0->setResultRowBuf(rowManagerRecord, (char*)&managerRow /*, (const unsigned char*)&mask*/); assert (err==0); if (err) APIERROR(myQuery->getNdbError()); const NdbRecord* rowEmployeeRecord = employee->getDefaultRecord(); if (rowEmployeeRecord == NULL) APIERROR(myDict->getNdbError()); NdbQueryOperation* op1 = myQuery->getQueryOperation(1U); err = op1->setResultRowBuf(rowEmployeeRecord, (char*)&employeeRow /*, (const unsigned char*)&mask*/); assert (err==0); if (err) APIERROR(myQuery->getNdbError()); } #endif printf("Start execute\n"); if (myTransaction->execute(NdbTransaction::NoCommit) != 0 || myQuery->getNdbError().code) { APIERROR(myQuery->getNdbError()); } printf("Done executed\n"); int cnt = 0; while (true) { memset (&managerRow, 0, sizeof(managerRow)); memset (&employeeRow, 0, sizeof(employeeRow)); // All NdbQuery operations are handled as scans with cursor placed 'before' // first record: Fetch next to retrieve result: NdbQuery::NextResultOutcome res = myQuery->nextResult(); if (res == NdbQuery::NextResult_error) { PRINT_APIERROR(myQuery->getNdbError()); break; } else if (res!=NdbQuery::NextResult_gotRow) { break; } #ifdef USE_RECATTR printf("manager emp_no: %d, NULL:%d\n", value_q6_1[0][1]->u_32_value(), myQuery->getQueryOperation(0U)->isRowNULL()); printf("employee emp_no: %d, NULL:%d\n", value_q6_1[1][0]->u_32_value(), myQuery->getQueryOperation(1U)->isRowNULL()); #else // NOW: Result is available in row buffers printf("manager emp_no: %d, NULL:%d\n", managerRow.emp_no, myQuery->getQueryOperation(0U)->isRowNULL()); printf("employee emp_no: %d, NULL:%d\n", employeeRow.emp_no, myQuery->getQueryOperation(1)->isRowNULL()); #endif cnt++; }; printf("EOF, %d rows\n", cnt); myQuery->close(); myNdb.closeTransaction(myTransaction); myTransaction = 0; } #endif myBuilder->destroy(); return 0; } int main(int argc, const char** argv){ if(argc!=4){ std::cout << "Usage: " << argv[0] << " <mysql IP address> <mysql port> <cluster connect string>" << std::endl; exit(-1); } const char* const host=argv[1]; const int port = atoi(argv[2]); const char* const connectString = argv[3]; //extern const char *my_progname; //NDB_INIT(argv[0]); ndb_init(); MYSQL mysql; if(!mysql_init(&mysql)){ std::cout << "mysql_init() failed:" << std::endl; } if(!mysql_real_connect(&mysql, host, "root", "", "", port, NULL, 0)){ std::cout << "mysql_real_connect() failed:" << std::endl; } if (!createEmployeeDb(mysql)) { std::cout << "Create of employee DB failed" << std::endl; exit(-1); } mysql_close(&mysql); /************************************************************** * Connect to ndb cluster * **************************************************************/ { Ndb_cluster_connection cluster_connection(connectString); if (cluster_connection.connect(4, 5, 1)) { std::cout << "Unable to connect to cluster within 30 secs." << std::endl; exit(-1); } // Optionally connect and wait for the storage nodes (ndbd's) if (cluster_connection.wait_until_ready(30,0) < 0) { std::cout << "Cluster was not ready within 30 secs.\n"; exit(-1); } Ndb myNdb(&cluster_connection,"employees"); if (myNdb.init(1024) == -1) { // Set max 1024 parallel transactions APIERROR(myNdb.getNdbError()); exit(-1); } std::cout << "Connected to Cluster\n"; /******************************************* * Check table existence * *******************************************/ if (true) { bool has_tables = true; const NdbDictionary::Dictionary* myDict= myNdb.getDictionary(); if (myDict->getTable("departments") == 0) { std::cout << "Table 'departments' not found" << std::endl; has_tables = false; } if (myDict->getTable("employees") == 0) { std::cout << "Table 'employees' not found" << std::endl; has_tables = false; } if (myDict->getTable("dept_emp") == 0) { std::cout << "Table 'dept_emp' not found" << std::endl; has_tables = false; } if (myDict->getTable("dept_manager") == 0) { std::cout << "Table 'dept_manager' not found" << std::endl; has_tables = false; } if (myDict->getTable("salaries") == 0) { std::cout << "Table 'salaries' not found" << std::endl; has_tables = false; } if (myDict->getTable("titles") == 0) { std::cout << "Table 'titles' not found" << std::endl; has_tables = false; } if (!has_tables) { std::cout << "Table(s) was missing from the 'employees' DB" << std::endl; exit(-1); } std::cout << "All tables in 'employees' DB was found" << std::endl; } testQueryBuilder(myNdb); } // Must call ~Ndb_cluster_connection() before ndb_end(). ndb_end(0); return 0; }
gpl-2.0
HarveyHunt/linux
arch/mips/loongson64/loongson-3/acpi_init.c
287
3565
// SPDX-License-Identifier: GPL-2.0 #include <linux/io.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/export.h> #define SBX00_ACPI_IO_BASE 0x800 #define SBX00_ACPI_IO_SIZE 0x100 #define ACPI_PM_EVT_BLK (SBX00_ACPI_IO_BASE + 0x00) /* 4 bytes */ #define ACPI_PM_CNT_BLK (SBX00_ACPI_IO_BASE + 0x04) /* 2 bytes */ #define ACPI_PMA_CNT_BLK (SBX00_ACPI_IO_BASE + 0x0F) /* 1 byte */ #define ACPI_PM_TMR_BLK (SBX00_ACPI_IO_BASE + 0x18) /* 4 bytes */ #define ACPI_GPE0_BLK (SBX00_ACPI_IO_BASE + 0x10) /* 8 bytes */ #define ACPI_END (SBX00_ACPI_IO_BASE + 0x80) #define PM_INDEX 0xCD6 #define PM_DATA 0xCD7 #define PM2_INDEX 0xCD0 #define PM2_DATA 0xCD1 /* * SCI interrupt need acpi space, allocate here */ static int __init register_acpi_resource(void) { request_region(SBX00_ACPI_IO_BASE, SBX00_ACPI_IO_SIZE, "acpi"); return 0; } static void pmio_write_index(u16 index, u8 reg, u8 value) { outb(reg, index); outb(value, index + 1); } static u8 pmio_read_index(u16 index, u8 reg) { outb(reg, index); return inb(index + 1); } void pm_iowrite(u8 reg, u8 value) { pmio_write_index(PM_INDEX, reg, value); } EXPORT_SYMBOL(pm_iowrite); u8 pm_ioread(u8 reg) { return pmio_read_index(PM_INDEX, reg); } EXPORT_SYMBOL(pm_ioread); void pm2_iowrite(u8 reg, u8 value) { pmio_write_index(PM2_INDEX, reg, value); } EXPORT_SYMBOL(pm2_iowrite); u8 pm2_ioread(u8 reg) { return pmio_read_index(PM2_INDEX, reg); } EXPORT_SYMBOL(pm2_ioread); static void acpi_hw_clear_status(void) { u16 value; /* PMStatus: Clear WakeStatus/PwrBtnStatus */ value = inw(ACPI_PM_EVT_BLK); value |= (1 << 8 | 1 << 15); outw(value, ACPI_PM_EVT_BLK); /* GPEStatus: Clear all generated events */ outl(inl(ACPI_GPE0_BLK), ACPI_GPE0_BLK); } void acpi_registers_setup(void) { u32 value; /* PM Status Base */ pm_iowrite(0x20, ACPI_PM_EVT_BLK & 0xff); pm_iowrite(0x21, ACPI_PM_EVT_BLK >> 8); /* PM Control Base */ pm_iowrite(0x22, ACPI_PM_CNT_BLK & 0xff); pm_iowrite(0x23, ACPI_PM_CNT_BLK >> 8); /* GPM Base */ pm_iowrite(0x28, ACPI_GPE0_BLK & 0xff); pm_iowrite(0x29, ACPI_GPE0_BLK >> 8); /* ACPI End */ pm_iowrite(0x2e, ACPI_END & 0xff); pm_iowrite(0x2f, ACPI_END >> 8); /* IO Decode: When AcpiDecodeEnable set, South-Bridge uses the contents * of the PM registers at index 0x20~0x2B to decode ACPI I/O address. */ pm_iowrite(0x0e, 1 << 3); /* SCI_EN set */ outw(1, ACPI_PM_CNT_BLK); /* Enable to generate SCI */ pm_iowrite(0x10, pm_ioread(0x10) | 1); /* GPM3/GPM9 enable */ value = inl(ACPI_GPE0_BLK + 4); outl(value | (1 << 14) | (1 << 22), ACPI_GPE0_BLK + 4); /* Set GPM9 as input */ pm_iowrite(0x8d, pm_ioread(0x8d) & (~(1 << 1))); /* Set GPM9 as non-output */ pm_iowrite(0x94, pm_ioread(0x94) | (1 << 3)); /* GPM3 config ACPI trigger SCIOUT */ pm_iowrite(0x33, pm_ioread(0x33) & (~(3 << 4))); /* GPM9 config ACPI trigger SCIOUT */ pm_iowrite(0x3d, pm_ioread(0x3d) & (~(3 << 2))); /* GPM3 config falling edge trigger */ pm_iowrite(0x37, pm_ioread(0x37) & (~(1 << 6))); /* No wait for STPGNT# in ACPI Sx state */ pm_iowrite(0x7c, pm_ioread(0x7c) | (1 << 6)); /* Set GPM3 pull-down enable */ value = pm2_ioread(0xf6); value |= ((1 << 7) | (1 << 3)); pm2_iowrite(0xf6, value); /* Set GPM9 pull-down enable */ value = pm2_ioread(0xf8); value |= ((1 << 5) | (1 << 1)); pm2_iowrite(0xf8, value); } int __init sbx00_acpi_init(void) { register_acpi_resource(); acpi_registers_setup(); acpi_hw_clear_status(); return 0; }
gpl-2.0
ZhizhouTian/kernel-stable
drivers/cpuidle/cpuidle-powernv.c
287
6483
/* * cpuidle-powernv - idle state cpuidle driver. * Adapted from drivers/cpuidle/cpuidle-pseries * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/moduleparam.h> #include <linux/cpuidle.h> #include <linux/cpu.h> #include <linux/notifier.h> #include <linux/clockchips.h> #include <linux/of.h> #include <asm/machdep.h> #include <asm/firmware.h> #include <asm/runlatch.h> /* Flags and constants used in PowerNV platform */ #define MAX_POWERNV_IDLE_STATES 8 #define IDLE_USE_INST_NAP 0x00010000 /* Use nap instruction */ #define IDLE_USE_INST_SLEEP 0x00020000 /* Use sleep instruction */ struct cpuidle_driver powernv_idle_driver = { .name = "powernv_idle", .owner = THIS_MODULE, }; static int max_idle_state; static struct cpuidle_state *cpuidle_state_table; static int snooze_loop(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { local_irq_enable(); set_thread_flag(TIF_POLLING_NRFLAG); ppc64_runlatch_off(); while (!need_resched()) { HMT_low(); HMT_very_low(); } HMT_medium(); ppc64_runlatch_on(); clear_thread_flag(TIF_POLLING_NRFLAG); smp_mb(); return index; } static int nap_loop(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { ppc64_runlatch_off(); power7_idle(); ppc64_runlatch_on(); return index; } static int fastsleep_loop(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { unsigned long old_lpcr = mfspr(SPRN_LPCR); unsigned long new_lpcr; if (unlikely(system_state < SYSTEM_RUNNING)) return index; new_lpcr = old_lpcr; /* Do not exit powersave upon decrementer as we've setup the timer * offload. */ new_lpcr &= ~LPCR_PECE1; mtspr(SPRN_LPCR, new_lpcr); power7_sleep(); mtspr(SPRN_LPCR, old_lpcr); return index; } /* * States for dedicated partition case. */ static struct cpuidle_state powernv_states[MAX_POWERNV_IDLE_STATES] = { { /* Snooze */ .name = "snooze", .desc = "snooze", .flags = CPUIDLE_FLAG_TIME_VALID, .exit_latency = 0, .target_residency = 0, .enter = &snooze_loop }, }; static int powernv_cpuidle_add_cpu_notifier(struct notifier_block *n, unsigned long action, void *hcpu) { int hotcpu = (unsigned long)hcpu; struct cpuidle_device *dev = per_cpu(cpuidle_devices, hotcpu); if (dev && cpuidle_get_driver()) { switch (action) { case CPU_ONLINE: case CPU_ONLINE_FROZEN: cpuidle_pause_and_lock(); cpuidle_enable_device(dev); cpuidle_resume_and_unlock(); break; case CPU_DEAD: case CPU_DEAD_FROZEN: cpuidle_pause_and_lock(); cpuidle_disable_device(dev); cpuidle_resume_and_unlock(); break; default: return NOTIFY_DONE; } } return NOTIFY_OK; } static struct notifier_block setup_hotplug_notifier = { .notifier_call = powernv_cpuidle_add_cpu_notifier, }; /* * powernv_cpuidle_driver_init() */ static int powernv_cpuidle_driver_init(void) { int idle_state; struct cpuidle_driver *drv = &powernv_idle_driver; drv->state_count = 0; for (idle_state = 0; idle_state < max_idle_state; ++idle_state) { /* Is the state not enabled? */ if (cpuidle_state_table[idle_state].enter == NULL) continue; drv->states[drv->state_count] = /* structure copy */ cpuidle_state_table[idle_state]; drv->state_count += 1; } return 0; } static int powernv_add_idle_states(void) { struct device_node *power_mgt; int nr_idle_states = 1; /* Snooze */ int dt_idle_states; const __be32 *idle_state_flags; const __be32 *idle_state_latency; u32 len_flags, flags, latency_ns; int i; /* Currently we have snooze statically defined */ power_mgt = of_find_node_by_path("/ibm,opal/power-mgt"); if (!power_mgt) { pr_warn("opal: PowerMgmt Node not found\n"); return nr_idle_states; } idle_state_flags = of_get_property(power_mgt, "ibm,cpu-idle-state-flags", &len_flags); if (!idle_state_flags) { pr_warn("DT-PowerMgmt: missing ibm,cpu-idle-state-flags\n"); return nr_idle_states; } idle_state_latency = of_get_property(power_mgt, "ibm,cpu-idle-state-latencies-ns", NULL); if (!idle_state_latency) { pr_warn("DT-PowerMgmt: missing ibm,cpu-idle-state-latencies-ns\n"); return nr_idle_states; } dt_idle_states = len_flags / sizeof(u32); for (i = 0; i < dt_idle_states; i++) { flags = be32_to_cpu(idle_state_flags[i]); /* Cpuidle accepts exit_latency in us and we estimate * target residency to be 10x exit_latency */ latency_ns = be32_to_cpu(idle_state_latency[i]); if (flags & IDLE_USE_INST_NAP) { /* Add NAP state */ strcpy(powernv_states[nr_idle_states].name, "Nap"); strcpy(powernv_states[nr_idle_states].desc, "Nap"); powernv_states[nr_idle_states].flags = CPUIDLE_FLAG_TIME_VALID; powernv_states[nr_idle_states].exit_latency = ((unsigned int)latency_ns) / 1000; powernv_states[nr_idle_states].target_residency = ((unsigned int)latency_ns / 100); powernv_states[nr_idle_states].enter = &nap_loop; nr_idle_states++; } if (flags & IDLE_USE_INST_SLEEP) { /* Add FASTSLEEP state */ strcpy(powernv_states[nr_idle_states].name, "FastSleep"); strcpy(powernv_states[nr_idle_states].desc, "FastSleep"); powernv_states[nr_idle_states].flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TIMER_STOP; powernv_states[nr_idle_states].exit_latency = ((unsigned int)latency_ns) / 1000; powernv_states[nr_idle_states].target_residency = ((unsigned int)latency_ns / 100); powernv_states[nr_idle_states].enter = &fastsleep_loop; nr_idle_states++; } } return nr_idle_states; } /* * powernv_idle_probe() * Choose state table for shared versus dedicated partition */ static int powernv_idle_probe(void) { if (cpuidle_disable != IDLE_NO_OVERRIDE) return -ENODEV; if (firmware_has_feature(FW_FEATURE_OPALv3)) { cpuidle_state_table = powernv_states; /* Device tree can indicate more idle states */ max_idle_state = powernv_add_idle_states(); } else return -ENODEV; return 0; } static int __init powernv_processor_idle_init(void) { int retval; retval = powernv_idle_probe(); if (retval) return retval; powernv_cpuidle_driver_init(); retval = cpuidle_register(&powernv_idle_driver, NULL); if (retval) { printk(KERN_DEBUG "Registration of powernv driver failed.\n"); return retval; } register_cpu_notifier(&setup_hotplug_notifier); printk(KERN_DEBUG "powernv_idle_driver registered\n"); return 0; } device_initcall(powernv_processor_idle_init);
gpl-2.0
Anik1199/Kernel_taoshan
arch/arm/mach-msm/lge/mako/board-mako-storage.c
1055
7665
/* Copyright (c) 2011, The Linux Foundation. All rights reserved. * Copyright (c) 2012, LGE Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/init.h> #include <linux/ioport.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include <linux/bootmem.h> #include <asm/mach-types.h> #include <asm/mach/mmc.h> #include <mach/msm_bus_board.h> #include <mach/board.h> #include <mach/gpiomux.h> #include <mach/board_lge.h> #include "devices.h" #include "board-mako.h" #include "board-storage-common-a.h" /* APQ8064 has 4 SDCC controllers */ enum sdcc_controllers { SDCC1, SDCC2, SDCC3, SDCC4, MAX_SDCC_CONTROLLER }; /* All SDCC controllers require VDD/VCC voltage */ static struct msm_mmc_reg_data mmc_vdd_reg_data[MAX_SDCC_CONTROLLER] = { /* SDCC1 : eMMC card connected */ [SDCC1] = { .name = "sdc_vdd", .high_vol_level = 2950000, .low_vol_level = 2950000, .always_on = 1, .lpm_sup = 1, .lpm_uA = 9000, .hpm_uA = 200000, /* 200mA */ }, /* SDCC3 : External card slot connected */ [SDCC3] = { .name = "sdc_vdd", .high_vol_level = 2950000, .low_vol_level = 2950000, .hpm_uA = 800000, /* 800mA */ } }; /* SDCC controllers may require voting for VDD IO voltage */ static struct msm_mmc_reg_data mmc_vdd_io_reg_data[MAX_SDCC_CONTROLLER] = { /* SDCC1 : eMMC card connected */ [SDCC1] = { .name = "sdc_vdd_io", .always_on = 1, .high_vol_level = 1800000, .low_vol_level = 1800000, .hpm_uA = 200000, /* 200mA */ }, /* SDCC3 : External card slot connected */ [SDCC3] = { .name = "sdc_vdd_io", .high_vol_level = 2950000, .low_vol_level = 1850000, .always_on = 1, .lpm_sup = 1, /* Max. Active current required is 16 mA */ .hpm_uA = 16000, /* * Sleep current required is ~300 uA. But min. vote can be * in terms of mA (min. 1 mA). So let's vote for 2 mA * during sleep. */ .lpm_uA = 2000, } }; static struct msm_mmc_slot_reg_data mmc_slot_vreg_data[MAX_SDCC_CONTROLLER] = { /* SDCC1 : eMMC card connected */ [SDCC1] = { .vdd_data = &mmc_vdd_reg_data[SDCC1], .vdd_io_data = &mmc_vdd_io_reg_data[SDCC1], }, /* SDCC3 : External card slot connected */ [SDCC3] = { .vdd_data = &mmc_vdd_reg_data[SDCC3], .vdd_io_data = &mmc_vdd_io_reg_data[SDCC3], } }; /* SDC1 pad data */ static struct msm_mmc_pad_drv sdc1_pad_drv_on_cfg[] = { {TLMM_HDRV_SDC1_CLK, GPIO_CFG_16MA}, {TLMM_HDRV_SDC1_CMD, GPIO_CFG_10MA}, {TLMM_HDRV_SDC1_DATA, GPIO_CFG_10MA} }; static struct msm_mmc_pad_drv sdc1_pad_drv_off_cfg[] = { {TLMM_HDRV_SDC1_CLK, GPIO_CFG_2MA}, {TLMM_HDRV_SDC1_CMD, GPIO_CFG_2MA}, {TLMM_HDRV_SDC1_DATA, GPIO_CFG_2MA} }; static struct msm_mmc_pad_pull sdc1_pad_pull_on_cfg[] = { {TLMM_PULL_SDC1_CLK, GPIO_CFG_NO_PULL}, {TLMM_PULL_SDC1_CMD, GPIO_CFG_PULL_UP}, {TLMM_PULL_SDC1_DATA, GPIO_CFG_PULL_UP} }; static struct msm_mmc_pad_pull sdc1_pad_pull_off_cfg[] = { {TLMM_PULL_SDC1_CLK, GPIO_CFG_NO_PULL}, {TLMM_PULL_SDC1_CMD, GPIO_CFG_PULL_UP}, {TLMM_PULL_SDC1_DATA, GPIO_CFG_PULL_UP} }; /* SDC3 pad data */ static struct msm_mmc_pad_drv sdc3_pad_drv_on_cfg[] = { {TLMM_HDRV_SDC3_CLK, GPIO_CFG_8MA}, {TLMM_HDRV_SDC3_CMD, GPIO_CFG_8MA}, {TLMM_HDRV_SDC3_DATA, GPIO_CFG_8MA} }; static struct msm_mmc_pad_drv sdc3_pad_drv_off_cfg[] = { {TLMM_HDRV_SDC3_CLK, GPIO_CFG_2MA}, {TLMM_HDRV_SDC3_CMD, GPIO_CFG_2MA}, {TLMM_HDRV_SDC3_DATA, GPIO_CFG_2MA} }; static struct msm_mmc_pad_pull sdc3_pad_pull_on_cfg[] = { {TLMM_PULL_SDC3_CLK, GPIO_CFG_NO_PULL}, {TLMM_PULL_SDC3_CMD, GPIO_CFG_PULL_UP}, {TLMM_PULL_SDC3_DATA, GPIO_CFG_PULL_UP} }; static struct msm_mmc_pad_pull sdc3_pad_pull_off_cfg[] = { {TLMM_PULL_SDC3_CLK, GPIO_CFG_NO_PULL}, {TLMM_PULL_SDC3_CMD, GPIO_CFG_PULL_UP}, {TLMM_PULL_SDC3_DATA, GPIO_CFG_PULL_UP} }; static struct msm_mmc_pad_pull_data mmc_pad_pull_data[MAX_SDCC_CONTROLLER] = { [SDCC1] = { .on = sdc1_pad_pull_on_cfg, .off = sdc1_pad_pull_off_cfg, .size = ARRAY_SIZE(sdc1_pad_pull_on_cfg) }, [SDCC3] = { .on = sdc3_pad_pull_on_cfg, .off = sdc3_pad_pull_off_cfg, .size = ARRAY_SIZE(sdc3_pad_pull_on_cfg) }, }; static struct msm_mmc_pad_drv_data mmc_pad_drv_data[MAX_SDCC_CONTROLLER] = { [SDCC1] = { .on = sdc1_pad_drv_on_cfg, .off = sdc1_pad_drv_off_cfg, .size = ARRAY_SIZE(sdc1_pad_drv_on_cfg) }, [SDCC3] = { .on = sdc3_pad_drv_on_cfg, .off = sdc3_pad_drv_off_cfg, .size = ARRAY_SIZE(sdc3_pad_drv_on_cfg) }, }; static struct msm_mmc_pad_data mmc_pad_data[MAX_SDCC_CONTROLLER] = { [SDCC1] = { .pull = &mmc_pad_pull_data[SDCC1], .drv = &mmc_pad_drv_data[SDCC1] }, [SDCC3] = { .pull = &mmc_pad_pull_data[SDCC3], .drv = &mmc_pad_drv_data[SDCC3] }, }; static struct msm_mmc_pin_data mmc_slot_pin_data[MAX_SDCC_CONTROLLER] = { [SDCC1] = { .pad_data = &mmc_pad_data[SDCC1], }, [SDCC3] = { .pad_data = &mmc_pad_data[SDCC3], }, }; #define MSM_MPM_PIN_SDC1_DAT1 17 #define MSM_MPM_PIN_SDC3_DAT1 21 #ifdef CONFIG_MMC_MSM_SDC1_SUPPORT static unsigned int sdc1_sup_clk_rates[] = { 400000, 24000000, 48000000, 96000000 }; static struct mmc_platform_data sdc1_data = { .ocr_mask = MMC_VDD_27_28 | MMC_VDD_28_29, #ifdef CONFIG_MMC_MSM_SDC1_8_BIT_SUPPORT .mmc_bus_width = MMC_CAP_8_BIT_DATA, #else .mmc_bus_width = MMC_CAP_4_BIT_DATA, #endif .sup_clk_table = sdc1_sup_clk_rates, .sup_clk_cnt = ARRAY_SIZE(sdc1_sup_clk_rates), .nonremovable = 1, .pin_data = &mmc_slot_pin_data[SDCC1], .vreg_data = &mmc_slot_vreg_data[SDCC1], .uhs_caps = MMC_CAP_1_8V_DDR | MMC_CAP_UHS_DDR50, .uhs_caps2 = MMC_CAP2_HS200_1_8V_SDR, .mpm_sdiowakeup_int = MSM_MPM_PIN_SDC1_DAT1, .msm_bus_voting_data = &sps_to_ddr_bus_voting_data, }; static struct mmc_platform_data *apq8064_sdc1_pdata = &sdc1_data; #else static struct mmc_platform_data *apq8064_sdc1_pdata; #endif #ifdef CONFIG_MMC_MSM_SDC3_SUPPORT static unsigned int sdc3_sup_clk_rates[] = { 400000, 24000000, 48000000, 96000000, 192000000 }; static struct mmc_platform_data sdc3_data = { .ocr_mask = MMC_VDD_27_28 | MMC_VDD_28_29, .mmc_bus_width = MMC_CAP_4_BIT_DATA, .sup_clk_table = sdc3_sup_clk_rates, .sup_clk_cnt = ARRAY_SIZE(sdc3_sup_clk_rates), .pin_data = &mmc_slot_pin_data[SDCC3], .vreg_data = &mmc_slot_vreg_data[SDCC3], .status_gpio = 26, .status_irq = MSM_GPIO_TO_INT(26), .irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, .is_status_gpio_active_low = 1, .xpc_cap = 1, .uhs_caps = (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_DDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_MAX_CURRENT_800), .mpm_sdiowakeup_int = MSM_MPM_PIN_SDC3_DAT1, .msm_bus_voting_data = &sps_to_ddr_bus_voting_data, }; static struct mmc_platform_data *apq8064_sdc3_pdata = &sdc3_data; #else static struct mmc_platform_data *apq8064_sdc3_pdata; #endif static void __init mako_fixup_sdc1(void) { if (lge_get_board_revno() >= HW_REV_D) { /* enable the packed write on eMMC 4.5 */ apq8064_sdc1_pdata->packed_write = MMC_CAP2_PACKED_WR | MMC_CAP2_PACKED_WR_CONTROL; } } void __init apq8064_init_mmc(void) { if (apq8064_sdc1_pdata) { mako_fixup_sdc1(); apq8064_add_sdcc(1, apq8064_sdc1_pdata); } if (apq8064_sdc3_pdata) { apq8064_sdc3_pdata->wpswitch_gpio = 0; apq8064_sdc3_pdata->is_wpswitch_active_low = false; apq8064_add_sdcc(3, apq8064_sdc3_pdata); } }
gpl-2.0
LeeDroid-/Ace-2.6.35
arch/powerpc/platforms/cell/beat_interrupt.c
1055
7445
/* * Celleb/Beat Interrupt controller * * (C) Copyright 2006-2007 TOSHIBA CORPORATION * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/percpu.h> #include <linux/types.h> #include <asm/machdep.h> #include "beat_interrupt.h" #include "beat_wrapper.h" #define MAX_IRQS NR_IRQS static DEFINE_RAW_SPINLOCK(beatic_irq_mask_lock); static uint64_t beatic_irq_mask_enable[(MAX_IRQS+255)/64]; static uint64_t beatic_irq_mask_ack[(MAX_IRQS+255)/64]; static struct irq_host *beatic_host; /* * In this implementation, "virq" == "IRQ plug number", * "(irq_hw_number_t)hwirq" == "IRQ outlet number". */ /* assumption: locked */ static inline void beatic_update_irq_mask(unsigned int irq_plug) { int off; unsigned long masks[4]; off = (irq_plug / 256) * 4; masks[0] = beatic_irq_mask_enable[off + 0] & beatic_irq_mask_ack[off + 0]; masks[1] = beatic_irq_mask_enable[off + 1] & beatic_irq_mask_ack[off + 1]; masks[2] = beatic_irq_mask_enable[off + 2] & beatic_irq_mask_ack[off + 2]; masks[3] = beatic_irq_mask_enable[off + 3] & beatic_irq_mask_ack[off + 3]; if (beat_set_interrupt_mask(irq_plug&~255UL, masks[0], masks[1], masks[2], masks[3]) != 0) panic("Failed to set mask IRQ!"); } static void beatic_mask_irq(unsigned int irq_plug) { unsigned long flags; raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags); beatic_irq_mask_enable[irq_plug/64] &= ~(1UL << (63 - (irq_plug%64))); beatic_update_irq_mask(irq_plug); raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags); } static void beatic_unmask_irq(unsigned int irq_plug) { unsigned long flags; raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags); beatic_irq_mask_enable[irq_plug/64] |= 1UL << (63 - (irq_plug%64)); beatic_update_irq_mask(irq_plug); raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags); } static void beatic_ack_irq(unsigned int irq_plug) { unsigned long flags; raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags); beatic_irq_mask_ack[irq_plug/64] &= ~(1UL << (63 - (irq_plug%64))); beatic_update_irq_mask(irq_plug); raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags); } static void beatic_end_irq(unsigned int irq_plug) { s64 err; unsigned long flags; err = beat_downcount_of_interrupt(irq_plug); if (err != 0) { if ((err & 0xFFFFFFFF) != 0xFFFFFFF5) /* -11: wrong state */ panic("Failed to downcount IRQ! Error = %16llx", err); printk(KERN_ERR "IRQ over-downcounted, plug %d\n", irq_plug); } raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags); beatic_irq_mask_ack[irq_plug/64] |= 1UL << (63 - (irq_plug%64)); beatic_update_irq_mask(irq_plug); raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags); } static struct irq_chip beatic_pic = { .name = "CELL-BEAT", .unmask = beatic_unmask_irq, .mask = beatic_mask_irq, .eoi = beatic_end_irq, }; /* * Dispose binding hardware IRQ number (hw) and Virtuql IRQ number (virq), * update flags. * * Note that the number (virq) is already assigned at upper layer. */ static void beatic_pic_host_unmap(struct irq_host *h, unsigned int virq) { beat_destruct_irq_plug(virq); } /* * Create or update binding hardware IRQ number (hw) and Virtuql * IRQ number (virq). This is called only once for a given mapping. * * Note that the number (virq) is already assigned at upper layer. */ static int beatic_pic_host_map(struct irq_host *h, unsigned int virq, irq_hw_number_t hw) { struct irq_desc *desc = irq_to_desc(virq); int64_t err; err = beat_construct_and_connect_irq_plug(virq, hw); if (err < 0) return -EIO; desc->status |= IRQ_LEVEL; set_irq_chip_and_handler(virq, &beatic_pic, handle_fasteoi_irq); return 0; } /* * Update binding hardware IRQ number (hw) and Virtuql * IRQ number (virq). This is called only once for a given mapping. */ static void beatic_pic_host_remap(struct irq_host *h, unsigned int virq, irq_hw_number_t hw) { beat_construct_and_connect_irq_plug(virq, hw); } /* * Translate device-tree interrupt spec to irq_hw_number_t style (ulong), * to pass away to irq_create_mapping(). * * Called from irq_create_of_mapping() only. * Note: We have only 1 entry to translate. */ static int beatic_pic_host_xlate(struct irq_host *h, struct device_node *ct, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_flags) { const u64 *intspec2 = (const u64 *)intspec; *out_hwirq = *intspec2; *out_flags |= IRQ_TYPE_LEVEL_LOW; return 0; } static int beatic_pic_host_match(struct irq_host *h, struct device_node *np) { /* Match all */ return 1; } static struct irq_host_ops beatic_pic_host_ops = { .map = beatic_pic_host_map, .remap = beatic_pic_host_remap, .unmap = beatic_pic_host_unmap, .xlate = beatic_pic_host_xlate, .match = beatic_pic_host_match, }; /* * Get an IRQ number * Note: returns VIRQ */ static inline unsigned int beatic_get_irq_plug(void) { int i; uint64_t pending[4], ub; for (i = 0; i < MAX_IRQS; i += 256) { beat_detect_pending_interrupts(i, pending); __asm__ ("cntlzd %0,%1":"=r"(ub): "r"(pending[0] & beatic_irq_mask_enable[i/64+0] & beatic_irq_mask_ack[i/64+0])); if (ub != 64) return i + ub + 0; __asm__ ("cntlzd %0,%1":"=r"(ub): "r"(pending[1] & beatic_irq_mask_enable[i/64+1] & beatic_irq_mask_ack[i/64+1])); if (ub != 64) return i + ub + 64; __asm__ ("cntlzd %0,%1":"=r"(ub): "r"(pending[2] & beatic_irq_mask_enable[i/64+2] & beatic_irq_mask_ack[i/64+2])); if (ub != 64) return i + ub + 128; __asm__ ("cntlzd %0,%1":"=r"(ub): "r"(pending[3] & beatic_irq_mask_enable[i/64+3] & beatic_irq_mask_ack[i/64+3])); if (ub != 64) return i + ub + 192; } return NO_IRQ; } unsigned int beatic_get_irq(void) { unsigned int ret; ret = beatic_get_irq_plug(); if (ret != NO_IRQ) beatic_ack_irq(ret); return ret; } /* */ void __init beatic_init_IRQ(void) { int i; memset(beatic_irq_mask_enable, 0, sizeof(beatic_irq_mask_enable)); memset(beatic_irq_mask_ack, 255, sizeof(beatic_irq_mask_ack)); for (i = 0; i < MAX_IRQS; i += 256) beat_set_interrupt_mask(i, 0L, 0L, 0L, 0L); /* Set out get_irq function */ ppc_md.get_irq = beatic_get_irq; /* Allocate an irq host */ beatic_host = irq_alloc_host(NULL, IRQ_HOST_MAP_NOMAP, 0, &beatic_pic_host_ops, 0); BUG_ON(beatic_host == NULL); irq_set_default_host(beatic_host); } #ifdef CONFIG_SMP /* Nullified to compile with SMP mode */ void beatic_setup_cpu(int cpu) { } void beatic_cause_IPI(int cpu, int mesg) { } void beatic_request_IPIs(void) { } #endif /* CONFIG_SMP */ void beatic_deinit_IRQ(void) { int i; for (i = 1; i < NR_IRQS; i++) beat_destruct_irq_plug(i); }
gpl-2.0
AOSP-JF/platform_kernel_samsung_jf
arch/arm/mach-omap2/omap_hwmod_44xx_data.c
1311
149105
/* * Hardware modules present on the OMAP44xx chips * * Copyright (C) 2009-2011 Texas Instruments, Inc. * Copyright (C) 2009-2010 Nokia Corporation * * Paul Walmsley * Benoit Cousson * * This file is automatically generated from the OMAP hardware databases. * We respectfully ask that any modifications to this file be coordinated * with the public linux-omap@vger.kernel.org mailing list and the * authors above to ensure that the autogeneration scripts are kept * up-to-date with the file contents. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/io.h> #include <plat/omap_hwmod.h> #include <plat/cpu.h> #include <plat/i2c.h> #include <plat/gpio.h> #include <plat/dma.h> #include <plat/mcspi.h> #include <plat/mcbsp.h> #include <plat/mmc.h> #include <plat/dmtimer.h> #include <plat/common.h> #include "omap_hwmod_common_data.h" #include "smartreflex.h" #include "cm1_44xx.h" #include "cm2_44xx.h" #include "prm44xx.h" #include "prm-regbits-44xx.h" #include "wd_timer.h" /* Base offset for all OMAP4 interrupts external to MPUSS */ #define OMAP44XX_IRQ_GIC_START 32 /* Base offset for all OMAP4 dma requests */ #define OMAP44XX_DMA_REQ_START 1 /* Backward references (IPs with Bus Master capability) */ static struct omap_hwmod omap44xx_aess_hwmod; static struct omap_hwmod omap44xx_dma_system_hwmod; static struct omap_hwmod omap44xx_dmm_hwmod; static struct omap_hwmod omap44xx_dsp_hwmod; static struct omap_hwmod omap44xx_dss_hwmod; static struct omap_hwmod omap44xx_emif_fw_hwmod; static struct omap_hwmod omap44xx_hsi_hwmod; static struct omap_hwmod omap44xx_ipu_hwmod; static struct omap_hwmod omap44xx_iss_hwmod; static struct omap_hwmod omap44xx_iva_hwmod; static struct omap_hwmod omap44xx_l3_instr_hwmod; static struct omap_hwmod omap44xx_l3_main_1_hwmod; static struct omap_hwmod omap44xx_l3_main_2_hwmod; static struct omap_hwmod omap44xx_l3_main_3_hwmod; static struct omap_hwmod omap44xx_l4_abe_hwmod; static struct omap_hwmod omap44xx_l4_cfg_hwmod; static struct omap_hwmod omap44xx_l4_per_hwmod; static struct omap_hwmod omap44xx_l4_wkup_hwmod; static struct omap_hwmod omap44xx_mmc1_hwmod; static struct omap_hwmod omap44xx_mmc2_hwmod; static struct omap_hwmod omap44xx_mpu_hwmod; static struct omap_hwmod omap44xx_mpu_private_hwmod; static struct omap_hwmod omap44xx_usb_otg_hs_hwmod; static struct omap_hwmod omap44xx_usb_host_hs_hwmod; static struct omap_hwmod omap44xx_usb_tll_hs_hwmod; /* * Interconnects omap_hwmod structures * hwmods that compose the global OMAP interconnect */ /* * 'dmm' class * instance(s): dmm */ static struct omap_hwmod_class omap44xx_dmm_hwmod_class = { .name = "dmm", }; /* dmm */ static struct omap_hwmod_irq_info omap44xx_dmm_irqs[] = { { .irq = 113 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; /* l3_main_1 -> dmm */ static struct omap_hwmod_ocp_if omap44xx_l3_main_1__dmm = { .master = &omap44xx_l3_main_1_hwmod, .slave = &omap44xx_dmm_hwmod, .clk = "l3_div_ck", .user = OCP_USER_SDMA, }; static struct omap_hwmod_addr_space omap44xx_dmm_addrs[] = { { .pa_start = 0x4e000000, .pa_end = 0x4e0007ff, .flags = ADDR_TYPE_RT }, { } }; /* mpu -> dmm */ static struct omap_hwmod_ocp_if omap44xx_mpu__dmm = { .master = &omap44xx_mpu_hwmod, .slave = &omap44xx_dmm_hwmod, .clk = "l3_div_ck", .addr = omap44xx_dmm_addrs, .user = OCP_USER_MPU, }; /* dmm slave ports */ static struct omap_hwmod_ocp_if *omap44xx_dmm_slaves[] = { &omap44xx_l3_main_1__dmm, &omap44xx_mpu__dmm, }; static struct omap_hwmod omap44xx_dmm_hwmod = { .name = "dmm", .class = &omap44xx_dmm_hwmod_class, .clkdm_name = "l3_emif_clkdm", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_MEMIF_DMM_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_MEMIF_DMM_CONTEXT_OFFSET, }, }, .slaves = omap44xx_dmm_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_dmm_slaves), .mpu_irqs = omap44xx_dmm_irqs, }; /* * 'emif_fw' class * instance(s): emif_fw */ static struct omap_hwmod_class omap44xx_emif_fw_hwmod_class = { .name = "emif_fw", }; /* emif_fw */ /* dmm -> emif_fw */ static struct omap_hwmod_ocp_if omap44xx_dmm__emif_fw = { .master = &omap44xx_dmm_hwmod, .slave = &omap44xx_emif_fw_hwmod, .clk = "l3_div_ck", .user = OCP_USER_MPU | OCP_USER_SDMA, }; static struct omap_hwmod_addr_space omap44xx_emif_fw_addrs[] = { { .pa_start = 0x4a20c000, .pa_end = 0x4a20c0ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_cfg -> emif_fw */ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__emif_fw = { .master = &omap44xx_l4_cfg_hwmod, .slave = &omap44xx_emif_fw_hwmod, .clk = "l4_div_ck", .addr = omap44xx_emif_fw_addrs, .user = OCP_USER_MPU, }; /* emif_fw slave ports */ static struct omap_hwmod_ocp_if *omap44xx_emif_fw_slaves[] = { &omap44xx_dmm__emif_fw, &omap44xx_l4_cfg__emif_fw, }; static struct omap_hwmod omap44xx_emif_fw_hwmod = { .name = "emif_fw", .class = &omap44xx_emif_fw_hwmod_class, .clkdm_name = "l3_emif_clkdm", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_MEMIF_EMIF_FW_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_MEMIF_EMIF_FW_CONTEXT_OFFSET, }, }, .slaves = omap44xx_emif_fw_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_emif_fw_slaves), }; /* * 'l3' class * instance(s): l3_instr, l3_main_1, l3_main_2, l3_main_3 */ static struct omap_hwmod_class omap44xx_l3_hwmod_class = { .name = "l3", }; /* l3_instr */ /* iva -> l3_instr */ static struct omap_hwmod_ocp_if omap44xx_iva__l3_instr = { .master = &omap44xx_iva_hwmod, .slave = &omap44xx_l3_instr_hwmod, .clk = "l3_div_ck", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l3_main_3 -> l3_instr */ static struct omap_hwmod_ocp_if omap44xx_l3_main_3__l3_instr = { .master = &omap44xx_l3_main_3_hwmod, .slave = &omap44xx_l3_instr_hwmod, .clk = "l3_div_ck", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l3_instr slave ports */ static struct omap_hwmod_ocp_if *omap44xx_l3_instr_slaves[] = { &omap44xx_iva__l3_instr, &omap44xx_l3_main_3__l3_instr, }; static struct omap_hwmod omap44xx_l3_instr_hwmod = { .name = "l3_instr", .class = &omap44xx_l3_hwmod_class, .clkdm_name = "l3_instr_clkdm", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L3INSTR_L3_INSTR_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L3INSTR_L3_INSTR_CONTEXT_OFFSET, .modulemode = MODULEMODE_HWCTRL, }, }, .slaves = omap44xx_l3_instr_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_l3_instr_slaves), }; /* l3_main_1 */ static struct omap_hwmod_irq_info omap44xx_l3_main_1_irqs[] = { { .name = "dbg_err", .irq = 9 + OMAP44XX_IRQ_GIC_START }, { .name = "app_err", .irq = 10 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; /* dsp -> l3_main_1 */ static struct omap_hwmod_ocp_if omap44xx_dsp__l3_main_1 = { .master = &omap44xx_dsp_hwmod, .slave = &omap44xx_l3_main_1_hwmod, .clk = "l3_div_ck", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* dss -> l3_main_1 */ static struct omap_hwmod_ocp_if omap44xx_dss__l3_main_1 = { .master = &omap44xx_dss_hwmod, .slave = &omap44xx_l3_main_1_hwmod, .clk = "l3_div_ck", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l3_main_2 -> l3_main_1 */ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__l3_main_1 = { .master = &omap44xx_l3_main_2_hwmod, .slave = &omap44xx_l3_main_1_hwmod, .clk = "l3_div_ck", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_cfg -> l3_main_1 */ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__l3_main_1 = { .master = &omap44xx_l4_cfg_hwmod, .slave = &omap44xx_l3_main_1_hwmod, .clk = "l4_div_ck", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* mmc1 -> l3_main_1 */ static struct omap_hwmod_ocp_if omap44xx_mmc1__l3_main_1 = { .master = &omap44xx_mmc1_hwmod, .slave = &omap44xx_l3_main_1_hwmod, .clk = "l3_div_ck", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* mmc2 -> l3_main_1 */ static struct omap_hwmod_ocp_if omap44xx_mmc2__l3_main_1 = { .master = &omap44xx_mmc2_hwmod, .slave = &omap44xx_l3_main_1_hwmod, .clk = "l3_div_ck", .user = OCP_USER_MPU | OCP_USER_SDMA, }; static struct omap_hwmod_addr_space omap44xx_l3_main_1_addrs[] = { { .pa_start = 0x44000000, .pa_end = 0x44000fff, .flags = ADDR_TYPE_RT }, { } }; /* mpu -> l3_main_1 */ static struct omap_hwmod_ocp_if omap44xx_mpu__l3_main_1 = { .master = &omap44xx_mpu_hwmod, .slave = &omap44xx_l3_main_1_hwmod, .clk = "l3_div_ck", .addr = omap44xx_l3_main_1_addrs, .user = OCP_USER_MPU, }; /* l3_main_1 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_l3_main_1_slaves[] = { &omap44xx_dsp__l3_main_1, &omap44xx_dss__l3_main_1, &omap44xx_l3_main_2__l3_main_1, &omap44xx_l4_cfg__l3_main_1, &omap44xx_mmc1__l3_main_1, &omap44xx_mmc2__l3_main_1, &omap44xx_mpu__l3_main_1, }; static struct omap_hwmod omap44xx_l3_main_1_hwmod = { .name = "l3_main_1", .class = &omap44xx_l3_hwmod_class, .clkdm_name = "l3_1_clkdm", .mpu_irqs = omap44xx_l3_main_1_irqs, .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L3_1_L3_1_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L3_1_L3_1_CONTEXT_OFFSET, }, }, .slaves = omap44xx_l3_main_1_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_l3_main_1_slaves), }; /* l3_main_2 */ /* dma_system -> l3_main_2 */ static struct omap_hwmod_ocp_if omap44xx_dma_system__l3_main_2 = { .master = &omap44xx_dma_system_hwmod, .slave = &omap44xx_l3_main_2_hwmod, .clk = "l3_div_ck", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* hsi -> l3_main_2 */ static struct omap_hwmod_ocp_if omap44xx_hsi__l3_main_2 = { .master = &omap44xx_hsi_hwmod, .slave = &omap44xx_l3_main_2_hwmod, .clk = "l3_div_ck", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* ipu -> l3_main_2 */ static struct omap_hwmod_ocp_if omap44xx_ipu__l3_main_2 = { .master = &omap44xx_ipu_hwmod, .slave = &omap44xx_l3_main_2_hwmod, .clk = "l3_div_ck", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* iss -> l3_main_2 */ static struct omap_hwmod_ocp_if omap44xx_iss__l3_main_2 = { .master = &omap44xx_iss_hwmod, .slave = &omap44xx_l3_main_2_hwmod, .clk = "l3_div_ck", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* iva -> l3_main_2 */ static struct omap_hwmod_ocp_if omap44xx_iva__l3_main_2 = { .master = &omap44xx_iva_hwmod, .slave = &omap44xx_l3_main_2_hwmod, .clk = "l3_div_ck", .user = OCP_USER_MPU | OCP_USER_SDMA, }; static struct omap_hwmod_addr_space omap44xx_l3_main_2_addrs[] = { { .pa_start = 0x44800000, .pa_end = 0x44801fff, .flags = ADDR_TYPE_RT }, { } }; /* l3_main_1 -> l3_main_2 */ static struct omap_hwmod_ocp_if omap44xx_l3_main_1__l3_main_2 = { .master = &omap44xx_l3_main_1_hwmod, .slave = &omap44xx_l3_main_2_hwmod, .clk = "l3_div_ck", .addr = omap44xx_l3_main_2_addrs, .user = OCP_USER_MPU, }; /* l4_cfg -> l3_main_2 */ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__l3_main_2 = { .master = &omap44xx_l4_cfg_hwmod, .slave = &omap44xx_l3_main_2_hwmod, .clk = "l4_div_ck", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* usb_otg_hs -> l3_main_2 */ static struct omap_hwmod_ocp_if omap44xx_usb_otg_hs__l3_main_2 = { .master = &omap44xx_usb_otg_hs_hwmod, .slave = &omap44xx_l3_main_2_hwmod, .clk = "l3_div_ck", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l3_main_2 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_l3_main_2_slaves[] = { &omap44xx_dma_system__l3_main_2, &omap44xx_hsi__l3_main_2, &omap44xx_ipu__l3_main_2, &omap44xx_iss__l3_main_2, &omap44xx_iva__l3_main_2, &omap44xx_l3_main_1__l3_main_2, &omap44xx_l4_cfg__l3_main_2, &omap44xx_usb_otg_hs__l3_main_2, }; static struct omap_hwmod omap44xx_l3_main_2_hwmod = { .name = "l3_main_2", .class = &omap44xx_l3_hwmod_class, .clkdm_name = "l3_2_clkdm", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L3_2_L3_2_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L3_2_L3_2_CONTEXT_OFFSET, }, }, .slaves = omap44xx_l3_main_2_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_l3_main_2_slaves), }; /* l3_main_3 */ static struct omap_hwmod_addr_space omap44xx_l3_main_3_addrs[] = { { .pa_start = 0x45000000, .pa_end = 0x45000fff, .flags = ADDR_TYPE_RT }, { } }; /* l3_main_1 -> l3_main_3 */ static struct omap_hwmod_ocp_if omap44xx_l3_main_1__l3_main_3 = { .master = &omap44xx_l3_main_1_hwmod, .slave = &omap44xx_l3_main_3_hwmod, .clk = "l3_div_ck", .addr = omap44xx_l3_main_3_addrs, .user = OCP_USER_MPU, }; /* l3_main_2 -> l3_main_3 */ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__l3_main_3 = { .master = &omap44xx_l3_main_2_hwmod, .slave = &omap44xx_l3_main_3_hwmod, .clk = "l3_div_ck", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_cfg -> l3_main_3 */ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__l3_main_3 = { .master = &omap44xx_l4_cfg_hwmod, .slave = &omap44xx_l3_main_3_hwmod, .clk = "l4_div_ck", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l3_main_3 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_l3_main_3_slaves[] = { &omap44xx_l3_main_1__l3_main_3, &omap44xx_l3_main_2__l3_main_3, &omap44xx_l4_cfg__l3_main_3, }; static struct omap_hwmod omap44xx_l3_main_3_hwmod = { .name = "l3_main_3", .class = &omap44xx_l3_hwmod_class, .clkdm_name = "l3_instr_clkdm", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L3INSTR_L3_3_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L3INSTR_L3_3_CONTEXT_OFFSET, .modulemode = MODULEMODE_HWCTRL, }, }, .slaves = omap44xx_l3_main_3_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_l3_main_3_slaves), }; /* * 'l4' class * instance(s): l4_abe, l4_cfg, l4_per, l4_wkup */ static struct omap_hwmod_class omap44xx_l4_hwmod_class = { .name = "l4", }; /* l4_abe */ /* aess -> l4_abe */ static struct omap_hwmod_ocp_if omap44xx_aess__l4_abe = { .master = &omap44xx_aess_hwmod, .slave = &omap44xx_l4_abe_hwmod, .clk = "ocp_abe_iclk", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* dsp -> l4_abe */ static struct omap_hwmod_ocp_if omap44xx_dsp__l4_abe = { .master = &omap44xx_dsp_hwmod, .slave = &omap44xx_l4_abe_hwmod, .clk = "ocp_abe_iclk", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l3_main_1 -> l4_abe */ static struct omap_hwmod_ocp_if omap44xx_l3_main_1__l4_abe = { .master = &omap44xx_l3_main_1_hwmod, .slave = &omap44xx_l4_abe_hwmod, .clk = "l3_div_ck", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* mpu -> l4_abe */ static struct omap_hwmod_ocp_if omap44xx_mpu__l4_abe = { .master = &omap44xx_mpu_hwmod, .slave = &omap44xx_l4_abe_hwmod, .clk = "ocp_abe_iclk", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_abe slave ports */ static struct omap_hwmod_ocp_if *omap44xx_l4_abe_slaves[] = { &omap44xx_aess__l4_abe, &omap44xx_dsp__l4_abe, &omap44xx_l3_main_1__l4_abe, &omap44xx_mpu__l4_abe, }; static struct omap_hwmod omap44xx_l4_abe_hwmod = { .name = "l4_abe", .class = &omap44xx_l4_hwmod_class, .clkdm_name = "abe_clkdm", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM1_ABE_L4ABE_CLKCTRL_OFFSET, }, }, .slaves = omap44xx_l4_abe_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_l4_abe_slaves), }; /* l4_cfg */ /* l3_main_1 -> l4_cfg */ static struct omap_hwmod_ocp_if omap44xx_l3_main_1__l4_cfg = { .master = &omap44xx_l3_main_1_hwmod, .slave = &omap44xx_l4_cfg_hwmod, .clk = "l3_div_ck", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_cfg slave ports */ static struct omap_hwmod_ocp_if *omap44xx_l4_cfg_slaves[] = { &omap44xx_l3_main_1__l4_cfg, }; static struct omap_hwmod omap44xx_l4_cfg_hwmod = { .name = "l4_cfg", .class = &omap44xx_l4_hwmod_class, .clkdm_name = "l4_cfg_clkdm", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4CFG_L4_CFG_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4CFG_L4_CFG_CONTEXT_OFFSET, }, }, .slaves = omap44xx_l4_cfg_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_l4_cfg_slaves), }; /* l4_per */ /* l3_main_2 -> l4_per */ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__l4_per = { .master = &omap44xx_l3_main_2_hwmod, .slave = &omap44xx_l4_per_hwmod, .clk = "l3_div_ck", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_per slave ports */ static struct omap_hwmod_ocp_if *omap44xx_l4_per_slaves[] = { &omap44xx_l3_main_2__l4_per, }; static struct omap_hwmod omap44xx_l4_per_hwmod = { .name = "l4_per", .class = &omap44xx_l4_hwmod_class, .clkdm_name = "l4_per_clkdm", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4PER_L4PER_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4PER_L4_PER_CONTEXT_OFFSET, }, }, .slaves = omap44xx_l4_per_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_l4_per_slaves), }; /* l4_wkup */ /* l4_cfg -> l4_wkup */ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__l4_wkup = { .master = &omap44xx_l4_cfg_hwmod, .slave = &omap44xx_l4_wkup_hwmod, .clk = "l4_div_ck", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_wkup slave ports */ static struct omap_hwmod_ocp_if *omap44xx_l4_wkup_slaves[] = { &omap44xx_l4_cfg__l4_wkup, }; static struct omap_hwmod omap44xx_l4_wkup_hwmod = { .name = "l4_wkup", .class = &omap44xx_l4_hwmod_class, .clkdm_name = "l4_wkup_clkdm", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_WKUP_L4WKUP_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_WKUP_L4WKUP_CONTEXT_OFFSET, }, }, .slaves = omap44xx_l4_wkup_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_l4_wkup_slaves), }; /* * 'mpu_bus' class * instance(s): mpu_private */ static struct omap_hwmod_class omap44xx_mpu_bus_hwmod_class = { .name = "mpu_bus", }; /* mpu_private */ /* mpu -> mpu_private */ static struct omap_hwmod_ocp_if omap44xx_mpu__mpu_private = { .master = &omap44xx_mpu_hwmod, .slave = &omap44xx_mpu_private_hwmod, .clk = "l3_div_ck", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* mpu_private slave ports */ static struct omap_hwmod_ocp_if *omap44xx_mpu_private_slaves[] = { &omap44xx_mpu__mpu_private, }; static struct omap_hwmod omap44xx_mpu_private_hwmod = { .name = "mpu_private", .class = &omap44xx_mpu_bus_hwmod_class, .clkdm_name = "mpuss_clkdm", .slaves = omap44xx_mpu_private_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_mpu_private_slaves), }; /* * Modules omap_hwmod structures * * The following IPs are excluded for the moment because: * - They do not need an explicit SW control using omap_hwmod API. * - They still need to be validated with the driver * properly adapted to omap_hwmod / omap_device * * c2c * c2c_target_fw * cm_core * cm_core_aon * ctrl_module_core * ctrl_module_pad_core * ctrl_module_pad_wkup * ctrl_module_wkup * debugss * efuse_ctrl_cust * efuse_ctrl_std * elm * emif1 * emif2 * fdif * gpmc * gpu * hdq1w * mcasp * mpu_c0 * mpu_c1 * ocmc_ram * ocp2scp_usb_phy * ocp_wp_noc * prcm_mpu * prm * scrm * sl2if * slimbus1 * slimbus2 * usb_host_fs * usb_host_hs * usb_phy_cm * usb_tll_hs * usim */ /* * 'aess' class * audio engine sub system */ static struct omap_hwmod_class_sysconfig omap44xx_aess_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_SIDLEMODE), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART | MSTANDBY_SMART_WKUP), .sysc_fields = &omap_hwmod_sysc_type2, }; static struct omap_hwmod_class omap44xx_aess_hwmod_class = { .name = "aess", .sysc = &omap44xx_aess_sysc, }; /* aess */ static struct omap_hwmod_irq_info omap44xx_aess_irqs[] = { { .irq = 99 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap44xx_aess_sdma_reqs[] = { { .name = "fifo0", .dma_req = 100 + OMAP44XX_DMA_REQ_START }, { .name = "fifo1", .dma_req = 101 + OMAP44XX_DMA_REQ_START }, { .name = "fifo2", .dma_req = 102 + OMAP44XX_DMA_REQ_START }, { .name = "fifo3", .dma_req = 103 + OMAP44XX_DMA_REQ_START }, { .name = "fifo4", .dma_req = 104 + OMAP44XX_DMA_REQ_START }, { .name = "fifo5", .dma_req = 105 + OMAP44XX_DMA_REQ_START }, { .name = "fifo6", .dma_req = 106 + OMAP44XX_DMA_REQ_START }, { .name = "fifo7", .dma_req = 107 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; /* aess master ports */ static struct omap_hwmod_ocp_if *omap44xx_aess_masters[] = { &omap44xx_aess__l4_abe, }; static struct omap_hwmod_addr_space omap44xx_aess_addrs[] = { { .name = "dmem", .pa_start = 0x40180000, .pa_end = 0x4018ffff }, { .name = "cmem", .pa_start = 0x401a0000, .pa_end = 0x401a1fff }, { .name = "smem", .pa_start = 0x401c0000, .pa_end = 0x401c5fff }, { .name = "pmem", .pa_start = 0x401e0000, .pa_end = 0x401e1fff }, { .name = "mpu", .pa_start = 0x401f1000, .pa_end = 0x401f13ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_abe -> aess */ static struct omap_hwmod_ocp_if omap44xx_l4_abe__aess = { .master = &omap44xx_l4_abe_hwmod, .slave = &omap44xx_aess_hwmod, .clk = "ocp_abe_iclk", .addr = omap44xx_aess_addrs, .user = OCP_USER_MPU, }; static struct omap_hwmod_addr_space omap44xx_aess_dma_addrs[] = { { .name = "dmem_dma", .pa_start = 0x49080000, .pa_end = 0x4908ffff }, { .name = "cmem_dma", .pa_start = 0x490a0000, .pa_end = 0x490a1fff }, { .name = "smem_dma", .pa_start = 0x490c0000, .pa_end = 0x490c5fff }, { .name = "pmem_dma", .pa_start = 0x490e0000, .pa_end = 0x490e1fff }, { .name = "dma", .pa_start = 0x490f1000, .pa_end = 0x490f13ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_abe -> aess (dma) */ static struct omap_hwmod_ocp_if omap44xx_l4_abe__aess_dma = { .master = &omap44xx_l4_abe_hwmod, .slave = &omap44xx_aess_hwmod, .clk = "ocp_abe_iclk", .addr = omap44xx_aess_dma_addrs, .user = OCP_USER_SDMA, }; /* aess slave ports */ static struct omap_hwmod_ocp_if *omap44xx_aess_slaves[] = { &omap44xx_l4_abe__aess, &omap44xx_l4_abe__aess_dma, }; static struct omap_hwmod omap44xx_aess_hwmod = { .name = "aess", .class = &omap44xx_aess_hwmod_class, .clkdm_name = "abe_clkdm", .mpu_irqs = omap44xx_aess_irqs, .sdma_reqs = omap44xx_aess_sdma_reqs, .main_clk = "aess_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM1_ABE_AESS_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_ABE_AESS_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .slaves = omap44xx_aess_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_aess_slaves), .masters = omap44xx_aess_masters, .masters_cnt = ARRAY_SIZE(omap44xx_aess_masters), }; /* * 'bandgap' class * bangap reference for ldo regulators */ static struct omap_hwmod_class omap44xx_bandgap_hwmod_class = { .name = "bandgap", }; /* bandgap */ static struct omap_hwmod_opt_clk bandgap_opt_clks[] = { { .role = "fclk", .clk = "bandgap_fclk" }, }; static struct omap_hwmod omap44xx_bandgap_hwmod = { .name = "bandgap", .class = &omap44xx_bandgap_hwmod_class, .clkdm_name = "l4_wkup_clkdm", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_WKUP_BANDGAP_CLKCTRL_OFFSET, }, }, .opt_clks = bandgap_opt_clks, .opt_clks_cnt = ARRAY_SIZE(bandgap_opt_clks), }; /* * 'counter' class * 32-bit ordinary counter, clocked by the falling edge of the 32 khz clock */ static struct omap_hwmod_class_sysconfig omap44xx_counter_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0004, .sysc_flags = SYSC_HAS_SIDLEMODE, .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | SIDLE_SMART_WKUP), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap44xx_counter_hwmod_class = { .name = "counter", .sysc = &omap44xx_counter_sysc, }; /* counter_32k */ static struct omap_hwmod omap44xx_counter_32k_hwmod; static struct omap_hwmod_addr_space omap44xx_counter_32k_addrs[] = { { .pa_start = 0x4a304000, .pa_end = 0x4a30401f, .flags = ADDR_TYPE_RT }, { } }; /* l4_wkup -> counter_32k */ static struct omap_hwmod_ocp_if omap44xx_l4_wkup__counter_32k = { .master = &omap44xx_l4_wkup_hwmod, .slave = &omap44xx_counter_32k_hwmod, .clk = "l4_wkup_clk_mux_ck", .addr = omap44xx_counter_32k_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* counter_32k slave ports */ static struct omap_hwmod_ocp_if *omap44xx_counter_32k_slaves[] = { &omap44xx_l4_wkup__counter_32k, }; static struct omap_hwmod omap44xx_counter_32k_hwmod = { .name = "counter_32k", .class = &omap44xx_counter_hwmod_class, .clkdm_name = "l4_wkup_clkdm", .flags = HWMOD_SWSUP_SIDLE, .main_clk = "sys_32k_ck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_WKUP_SYNCTIMER_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_WKUP_SYNCTIMER_CONTEXT_OFFSET, }, }, .slaves = omap44xx_counter_32k_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_counter_32k_slaves), }; /* * 'dma' class * dma controller for data exchange between memory to memory (i.e. internal or * external memory) and gp peripherals to memory or memory to gp peripherals */ static struct omap_hwmod_class_sysconfig omap44xx_dma_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x002c, .syss_offs = 0x0028, .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_EMUFREE | SYSC_HAS_MIDLEMODE | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap44xx_dma_hwmod_class = { .name = "dma", .sysc = &omap44xx_dma_sysc, }; /* dma dev_attr */ static struct omap_dma_dev_attr dma_dev_attr = { .dev_caps = RESERVE_CHANNEL | DMA_LINKED_LCH | GLOBAL_PRIORITY | IS_CSSA_32 | IS_CDSA_32 | IS_RW_PRIORITY, .lch_count = 32, }; /* dma_system */ static struct omap_hwmod_irq_info omap44xx_dma_system_irqs[] = { { .name = "0", .irq = 12 + OMAP44XX_IRQ_GIC_START }, { .name = "1", .irq = 13 + OMAP44XX_IRQ_GIC_START }, { .name = "2", .irq = 14 + OMAP44XX_IRQ_GIC_START }, { .name = "3", .irq = 15 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; /* dma_system master ports */ static struct omap_hwmod_ocp_if *omap44xx_dma_system_masters[] = { &omap44xx_dma_system__l3_main_2, }; static struct omap_hwmod_addr_space omap44xx_dma_system_addrs[] = { { .pa_start = 0x4a056000, .pa_end = 0x4a056fff, .flags = ADDR_TYPE_RT }, { } }; /* l4_cfg -> dma_system */ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__dma_system = { .master = &omap44xx_l4_cfg_hwmod, .slave = &omap44xx_dma_system_hwmod, .clk = "l4_div_ck", .addr = omap44xx_dma_system_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* dma_system slave ports */ static struct omap_hwmod_ocp_if *omap44xx_dma_system_slaves[] = { &omap44xx_l4_cfg__dma_system, }; static struct omap_hwmod omap44xx_dma_system_hwmod = { .name = "dma_system", .class = &omap44xx_dma_hwmod_class, .clkdm_name = "l3_dma_clkdm", .mpu_irqs = omap44xx_dma_system_irqs, .main_clk = "l3_div_ck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_SDMA_SDMA_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_SDMA_SDMA_CONTEXT_OFFSET, }, }, .dev_attr = &dma_dev_attr, .slaves = omap44xx_dma_system_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_dma_system_slaves), .masters = omap44xx_dma_system_masters, .masters_cnt = ARRAY_SIZE(omap44xx_dma_system_masters), }; /* * 'dmic' class * digital microphone controller */ static struct omap_hwmod_class_sysconfig omap44xx_dmic_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .sysc_flags = (SYSC_HAS_EMUFREE | SYSC_HAS_RESET_STATUS | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | SIDLE_SMART_WKUP), .sysc_fields = &omap_hwmod_sysc_type2, }; static struct omap_hwmod_class omap44xx_dmic_hwmod_class = { .name = "dmic", .sysc = &omap44xx_dmic_sysc, }; /* dmic */ static struct omap_hwmod omap44xx_dmic_hwmod; static struct omap_hwmod_irq_info omap44xx_dmic_irqs[] = { { .irq = 114 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap44xx_dmic_sdma_reqs[] = { { .dma_req = 66 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; static struct omap_hwmod_addr_space omap44xx_dmic_addrs[] = { { .name = "mpu", .pa_start = 0x4012e000, .pa_end = 0x4012e07f, .flags = ADDR_TYPE_RT }, { } }; /* l4_abe -> dmic */ static struct omap_hwmod_ocp_if omap44xx_l4_abe__dmic = { .master = &omap44xx_l4_abe_hwmod, .slave = &omap44xx_dmic_hwmod, .clk = "ocp_abe_iclk", .addr = omap44xx_dmic_addrs, .user = OCP_USER_MPU, }; static struct omap_hwmod_addr_space omap44xx_dmic_dma_addrs[] = { { .name = "dma", .pa_start = 0x4902e000, .pa_end = 0x4902e07f, .flags = ADDR_TYPE_RT }, { } }; /* l4_abe -> dmic (dma) */ static struct omap_hwmod_ocp_if omap44xx_l4_abe__dmic_dma = { .master = &omap44xx_l4_abe_hwmod, .slave = &omap44xx_dmic_hwmod, .clk = "ocp_abe_iclk", .addr = omap44xx_dmic_dma_addrs, .user = OCP_USER_SDMA, }; /* dmic slave ports */ static struct omap_hwmod_ocp_if *omap44xx_dmic_slaves[] = { &omap44xx_l4_abe__dmic, &omap44xx_l4_abe__dmic_dma, }; static struct omap_hwmod omap44xx_dmic_hwmod = { .name = "dmic", .class = &omap44xx_dmic_hwmod_class, .clkdm_name = "abe_clkdm", .mpu_irqs = omap44xx_dmic_irqs, .sdma_reqs = omap44xx_dmic_sdma_reqs, .main_clk = "dmic_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM1_ABE_DMIC_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_ABE_DMIC_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .slaves = omap44xx_dmic_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_dmic_slaves), }; /* * 'dsp' class * dsp sub-system */ static struct omap_hwmod_class omap44xx_dsp_hwmod_class = { .name = "dsp", }; /* dsp */ static struct omap_hwmod_irq_info omap44xx_dsp_irqs[] = { { .irq = 28 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_rst_info omap44xx_dsp_resets[] = { { .name = "mmu_cache", .rst_shift = 1 }, }; static struct omap_hwmod_rst_info omap44xx_dsp_c0_resets[] = { { .name = "dsp", .rst_shift = 0 }, }; /* dsp -> iva */ static struct omap_hwmod_ocp_if omap44xx_dsp__iva = { .master = &omap44xx_dsp_hwmod, .slave = &omap44xx_iva_hwmod, .clk = "dpll_iva_m5x2_ck", }; /* dsp master ports */ static struct omap_hwmod_ocp_if *omap44xx_dsp_masters[] = { &omap44xx_dsp__l3_main_1, &omap44xx_dsp__l4_abe, &omap44xx_dsp__iva, }; /* l4_cfg -> dsp */ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__dsp = { .master = &omap44xx_l4_cfg_hwmod, .slave = &omap44xx_dsp_hwmod, .clk = "l4_div_ck", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* dsp slave ports */ static struct omap_hwmod_ocp_if *omap44xx_dsp_slaves[] = { &omap44xx_l4_cfg__dsp, }; /* Pseudo hwmod for reset control purpose only */ static struct omap_hwmod omap44xx_dsp_c0_hwmod = { .name = "dsp_c0", .class = &omap44xx_dsp_hwmod_class, .clkdm_name = "tesla_clkdm", .flags = HWMOD_INIT_NO_RESET, .rst_lines = omap44xx_dsp_c0_resets, .rst_lines_cnt = ARRAY_SIZE(omap44xx_dsp_c0_resets), .prcm = { .omap4 = { .rstctrl_offs = OMAP4_RM_TESLA_RSTCTRL_OFFSET, }, }, }; static struct omap_hwmod omap44xx_dsp_hwmod = { .name = "dsp", .class = &omap44xx_dsp_hwmod_class, .clkdm_name = "tesla_clkdm", .mpu_irqs = omap44xx_dsp_irqs, .rst_lines = omap44xx_dsp_resets, .rst_lines_cnt = ARRAY_SIZE(omap44xx_dsp_resets), .main_clk = "dsp_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_TESLA_TESLA_CLKCTRL_OFFSET, .rstctrl_offs = OMAP4_RM_TESLA_RSTCTRL_OFFSET, .context_offs = OMAP4_RM_TESLA_TESLA_CONTEXT_OFFSET, .modulemode = MODULEMODE_HWCTRL, }, }, .slaves = omap44xx_dsp_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_dsp_slaves), .masters = omap44xx_dsp_masters, .masters_cnt = ARRAY_SIZE(omap44xx_dsp_masters), }; /* * 'dss' class * display sub-system */ static struct omap_hwmod_class_sysconfig omap44xx_dss_sysc = { .rev_offs = 0x0000, .syss_offs = 0x0014, .sysc_flags = SYSS_HAS_RESET_STATUS, }; static struct omap_hwmod_class omap44xx_dss_hwmod_class = { .name = "dss", .sysc = &omap44xx_dss_sysc, .reset = omap_dss_reset, }; /* dss */ /* dss master ports */ static struct omap_hwmod_ocp_if *omap44xx_dss_masters[] = { &omap44xx_dss__l3_main_1, }; static struct omap_hwmod_addr_space omap44xx_dss_dma_addrs[] = { { .pa_start = 0x58000000, .pa_end = 0x5800007f, .flags = ADDR_TYPE_RT }, { } }; /* l3_main_2 -> dss */ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__dss = { .master = &omap44xx_l3_main_2_hwmod, .slave = &omap44xx_dss_hwmod, .clk = "dss_fck", .addr = omap44xx_dss_dma_addrs, .user = OCP_USER_SDMA, }; static struct omap_hwmod_addr_space omap44xx_dss_addrs[] = { { .pa_start = 0x48040000, .pa_end = 0x4804007f, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> dss */ static struct omap_hwmod_ocp_if omap44xx_l4_per__dss = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_dss_hwmod, .clk = "l4_div_ck", .addr = omap44xx_dss_addrs, .user = OCP_USER_MPU, }; /* dss slave ports */ static struct omap_hwmod_ocp_if *omap44xx_dss_slaves[] = { &omap44xx_l3_main_2__dss, &omap44xx_l4_per__dss, }; static struct omap_hwmod_opt_clk dss_opt_clks[] = { { .role = "sys_clk", .clk = "dss_sys_clk" }, { .role = "tv_clk", .clk = "dss_tv_clk" }, { .role = "hdmi_clk", .clk = "dss_48mhz_clk" }, }; static struct omap_hwmod omap44xx_dss_hwmod = { .name = "dss_core", .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .class = &omap44xx_dss_hwmod_class, .clkdm_name = "l3_dss_clkdm", .main_clk = "dss_dss_clk", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_DSS_DSS_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_DSS_DSS_CONTEXT_OFFSET, }, }, .opt_clks = dss_opt_clks, .opt_clks_cnt = ARRAY_SIZE(dss_opt_clks), .slaves = omap44xx_dss_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_dss_slaves), .masters = omap44xx_dss_masters, .masters_cnt = ARRAY_SIZE(omap44xx_dss_masters), }; /* * 'dispc' class * display controller */ static struct omap_hwmod_class_sysconfig omap44xx_dispc_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_ENAWAKEUP | SYSC_HAS_MIDLEMODE | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap44xx_dispc_hwmod_class = { .name = "dispc", .sysc = &omap44xx_dispc_sysc, }; /* dss_dispc */ static struct omap_hwmod omap44xx_dss_dispc_hwmod; static struct omap_hwmod_irq_info omap44xx_dss_dispc_irqs[] = { { .irq = 25 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap44xx_dss_dispc_sdma_reqs[] = { { .dma_req = 5 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; static struct omap_hwmod_addr_space omap44xx_dss_dispc_dma_addrs[] = { { .pa_start = 0x58001000, .pa_end = 0x58001fff, .flags = ADDR_TYPE_RT }, { } }; /* l3_main_2 -> dss_dispc */ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__dss_dispc = { .master = &omap44xx_l3_main_2_hwmod, .slave = &omap44xx_dss_dispc_hwmod, .clk = "dss_fck", .addr = omap44xx_dss_dispc_dma_addrs, .user = OCP_USER_SDMA, }; static struct omap_hwmod_addr_space omap44xx_dss_dispc_addrs[] = { { .pa_start = 0x48041000, .pa_end = 0x48041fff, .flags = ADDR_TYPE_RT }, { } }; static struct omap_dss_dispc_dev_attr omap44xx_dss_dispc_dev_attr = { .manager_count = 3, .has_framedonetv_irq = 1 }; /* l4_per -> dss_dispc */ static struct omap_hwmod_ocp_if omap44xx_l4_per__dss_dispc = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_dss_dispc_hwmod, .clk = "l4_div_ck", .addr = omap44xx_dss_dispc_addrs, .user = OCP_USER_MPU, }; /* dss_dispc slave ports */ static struct omap_hwmod_ocp_if *omap44xx_dss_dispc_slaves[] = { &omap44xx_l3_main_2__dss_dispc, &omap44xx_l4_per__dss_dispc, }; static struct omap_hwmod omap44xx_dss_dispc_hwmod = { .name = "dss_dispc", .class = &omap44xx_dispc_hwmod_class, .clkdm_name = "l3_dss_clkdm", .mpu_irqs = omap44xx_dss_dispc_irqs, .sdma_reqs = omap44xx_dss_dispc_sdma_reqs, .main_clk = "dss_dss_clk", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_DSS_DSS_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_DSS_DSS_CONTEXT_OFFSET, }, }, .slaves = omap44xx_dss_dispc_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_dss_dispc_slaves), .dev_attr = &omap44xx_dss_dispc_dev_attr }; /* * 'dsi' class * display serial interface controller */ static struct omap_hwmod_class_sysconfig omap44xx_dsi_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap44xx_dsi_hwmod_class = { .name = "dsi", .sysc = &omap44xx_dsi_sysc, }; /* dss_dsi1 */ static struct omap_hwmod omap44xx_dss_dsi1_hwmod; static struct omap_hwmod_irq_info omap44xx_dss_dsi1_irqs[] = { { .irq = 53 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap44xx_dss_dsi1_sdma_reqs[] = { { .dma_req = 74 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; static struct omap_hwmod_addr_space omap44xx_dss_dsi1_dma_addrs[] = { { .pa_start = 0x58004000, .pa_end = 0x580041ff, .flags = ADDR_TYPE_RT }, { } }; /* l3_main_2 -> dss_dsi1 */ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__dss_dsi1 = { .master = &omap44xx_l3_main_2_hwmod, .slave = &omap44xx_dss_dsi1_hwmod, .clk = "dss_fck", .addr = omap44xx_dss_dsi1_dma_addrs, .user = OCP_USER_SDMA, }; static struct omap_hwmod_addr_space omap44xx_dss_dsi1_addrs[] = { { .pa_start = 0x48044000, .pa_end = 0x480441ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> dss_dsi1 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__dss_dsi1 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_dss_dsi1_hwmod, .clk = "l4_div_ck", .addr = omap44xx_dss_dsi1_addrs, .user = OCP_USER_MPU, }; /* dss_dsi1 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_dss_dsi1_slaves[] = { &omap44xx_l3_main_2__dss_dsi1, &omap44xx_l4_per__dss_dsi1, }; static struct omap_hwmod_opt_clk dss_dsi1_opt_clks[] = { { .role = "sys_clk", .clk = "dss_sys_clk" }, }; static struct omap_hwmod omap44xx_dss_dsi1_hwmod = { .name = "dss_dsi1", .class = &omap44xx_dsi_hwmod_class, .clkdm_name = "l3_dss_clkdm", .mpu_irqs = omap44xx_dss_dsi1_irqs, .sdma_reqs = omap44xx_dss_dsi1_sdma_reqs, .main_clk = "dss_dss_clk", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_DSS_DSS_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_DSS_DSS_CONTEXT_OFFSET, }, }, .opt_clks = dss_dsi1_opt_clks, .opt_clks_cnt = ARRAY_SIZE(dss_dsi1_opt_clks), .slaves = omap44xx_dss_dsi1_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_dss_dsi1_slaves), }; /* dss_dsi2 */ static struct omap_hwmod omap44xx_dss_dsi2_hwmod; static struct omap_hwmod_irq_info omap44xx_dss_dsi2_irqs[] = { { .irq = 84 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap44xx_dss_dsi2_sdma_reqs[] = { { .dma_req = 83 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; static struct omap_hwmod_addr_space omap44xx_dss_dsi2_dma_addrs[] = { { .pa_start = 0x58005000, .pa_end = 0x580051ff, .flags = ADDR_TYPE_RT }, { } }; /* l3_main_2 -> dss_dsi2 */ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__dss_dsi2 = { .master = &omap44xx_l3_main_2_hwmod, .slave = &omap44xx_dss_dsi2_hwmod, .clk = "dss_fck", .addr = omap44xx_dss_dsi2_dma_addrs, .user = OCP_USER_SDMA, }; static struct omap_hwmod_addr_space omap44xx_dss_dsi2_addrs[] = { { .pa_start = 0x48045000, .pa_end = 0x480451ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> dss_dsi2 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__dss_dsi2 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_dss_dsi2_hwmod, .clk = "l4_div_ck", .addr = omap44xx_dss_dsi2_addrs, .user = OCP_USER_MPU, }; /* dss_dsi2 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_dss_dsi2_slaves[] = { &omap44xx_l3_main_2__dss_dsi2, &omap44xx_l4_per__dss_dsi2, }; static struct omap_hwmod_opt_clk dss_dsi2_opt_clks[] = { { .role = "sys_clk", .clk = "dss_sys_clk" }, }; static struct omap_hwmod omap44xx_dss_dsi2_hwmod = { .name = "dss_dsi2", .class = &omap44xx_dsi_hwmod_class, .clkdm_name = "l3_dss_clkdm", .mpu_irqs = omap44xx_dss_dsi2_irqs, .sdma_reqs = omap44xx_dss_dsi2_sdma_reqs, .main_clk = "dss_dss_clk", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_DSS_DSS_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_DSS_DSS_CONTEXT_OFFSET, }, }, .opt_clks = dss_dsi2_opt_clks, .opt_clks_cnt = ARRAY_SIZE(dss_dsi2_opt_clks), .slaves = omap44xx_dss_dsi2_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_dss_dsi2_slaves), }; /* * 'hdmi' class * hdmi controller */ static struct omap_hwmod_class_sysconfig omap44xx_hdmi_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .sysc_flags = (SYSC_HAS_RESET_STATUS | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | SIDLE_SMART_WKUP), .sysc_fields = &omap_hwmod_sysc_type2, }; static struct omap_hwmod_class omap44xx_hdmi_hwmod_class = { .name = "hdmi", .sysc = &omap44xx_hdmi_sysc, }; /* dss_hdmi */ static struct omap_hwmod omap44xx_dss_hdmi_hwmod; static struct omap_hwmod_irq_info omap44xx_dss_hdmi_irqs[] = { { .irq = 101 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap44xx_dss_hdmi_sdma_reqs[] = { { .dma_req = 75 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; static struct omap_hwmod_addr_space omap44xx_dss_hdmi_dma_addrs[] = { { .pa_start = 0x58006000, .pa_end = 0x58006fff, .flags = ADDR_TYPE_RT }, { } }; /* l3_main_2 -> dss_hdmi */ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__dss_hdmi = { .master = &omap44xx_l3_main_2_hwmod, .slave = &omap44xx_dss_hdmi_hwmod, .clk = "dss_fck", .addr = omap44xx_dss_hdmi_dma_addrs, .user = OCP_USER_SDMA, }; static struct omap_hwmod_addr_space omap44xx_dss_hdmi_addrs[] = { { .pa_start = 0x48046000, .pa_end = 0x48046fff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> dss_hdmi */ static struct omap_hwmod_ocp_if omap44xx_l4_per__dss_hdmi = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_dss_hdmi_hwmod, .clk = "l4_div_ck", .addr = omap44xx_dss_hdmi_addrs, .user = OCP_USER_MPU, }; /* dss_hdmi slave ports */ static struct omap_hwmod_ocp_if *omap44xx_dss_hdmi_slaves[] = { &omap44xx_l3_main_2__dss_hdmi, &omap44xx_l4_per__dss_hdmi, }; static struct omap_hwmod_opt_clk dss_hdmi_opt_clks[] = { { .role = "sys_clk", .clk = "dss_sys_clk" }, }; static struct omap_hwmod omap44xx_dss_hdmi_hwmod = { .name = "dss_hdmi", .class = &omap44xx_hdmi_hwmod_class, .clkdm_name = "l3_dss_clkdm", .mpu_irqs = omap44xx_dss_hdmi_irqs, .sdma_reqs = omap44xx_dss_hdmi_sdma_reqs, .main_clk = "dss_48mhz_clk", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_DSS_DSS_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_DSS_DSS_CONTEXT_OFFSET, }, }, .opt_clks = dss_hdmi_opt_clks, .opt_clks_cnt = ARRAY_SIZE(dss_hdmi_opt_clks), .slaves = omap44xx_dss_hdmi_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_dss_hdmi_slaves), }; /* * 'rfbi' class * remote frame buffer interface */ static struct omap_hwmod_class_sysconfig omap44xx_rfbi_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap44xx_rfbi_hwmod_class = { .name = "rfbi", .sysc = &omap44xx_rfbi_sysc, }; /* dss_rfbi */ static struct omap_hwmod omap44xx_dss_rfbi_hwmod; static struct omap_hwmod_dma_info omap44xx_dss_rfbi_sdma_reqs[] = { { .dma_req = 13 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; static struct omap_hwmod_addr_space omap44xx_dss_rfbi_dma_addrs[] = { { .pa_start = 0x58002000, .pa_end = 0x580020ff, .flags = ADDR_TYPE_RT }, { } }; /* l3_main_2 -> dss_rfbi */ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__dss_rfbi = { .master = &omap44xx_l3_main_2_hwmod, .slave = &omap44xx_dss_rfbi_hwmod, .clk = "dss_fck", .addr = omap44xx_dss_rfbi_dma_addrs, .user = OCP_USER_SDMA, }; static struct omap_hwmod_addr_space omap44xx_dss_rfbi_addrs[] = { { .pa_start = 0x48042000, .pa_end = 0x480420ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> dss_rfbi */ static struct omap_hwmod_ocp_if omap44xx_l4_per__dss_rfbi = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_dss_rfbi_hwmod, .clk = "l4_div_ck", .addr = omap44xx_dss_rfbi_addrs, .user = OCP_USER_MPU, }; /* dss_rfbi slave ports */ static struct omap_hwmod_ocp_if *omap44xx_dss_rfbi_slaves[] = { &omap44xx_l3_main_2__dss_rfbi, &omap44xx_l4_per__dss_rfbi, }; static struct omap_hwmod_opt_clk dss_rfbi_opt_clks[] = { { .role = "ick", .clk = "dss_fck" }, }; static struct omap_hwmod omap44xx_dss_rfbi_hwmod = { .name = "dss_rfbi", .class = &omap44xx_rfbi_hwmod_class, .clkdm_name = "l3_dss_clkdm", .sdma_reqs = omap44xx_dss_rfbi_sdma_reqs, .main_clk = "dss_dss_clk", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_DSS_DSS_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_DSS_DSS_CONTEXT_OFFSET, }, }, .opt_clks = dss_rfbi_opt_clks, .opt_clks_cnt = ARRAY_SIZE(dss_rfbi_opt_clks), .slaves = omap44xx_dss_rfbi_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_dss_rfbi_slaves), }; /* * 'venc' class * video encoder */ static struct omap_hwmod_class omap44xx_venc_hwmod_class = { .name = "venc", }; /* dss_venc */ static struct omap_hwmod omap44xx_dss_venc_hwmod; static struct omap_hwmod_addr_space omap44xx_dss_venc_dma_addrs[] = { { .pa_start = 0x58003000, .pa_end = 0x580030ff, .flags = ADDR_TYPE_RT }, { } }; /* l3_main_2 -> dss_venc */ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__dss_venc = { .master = &omap44xx_l3_main_2_hwmod, .slave = &omap44xx_dss_venc_hwmod, .clk = "dss_fck", .addr = omap44xx_dss_venc_dma_addrs, .user = OCP_USER_SDMA, }; static struct omap_hwmod_addr_space omap44xx_dss_venc_addrs[] = { { .pa_start = 0x48043000, .pa_end = 0x480430ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> dss_venc */ static struct omap_hwmod_ocp_if omap44xx_l4_per__dss_venc = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_dss_venc_hwmod, .clk = "l4_div_ck", .addr = omap44xx_dss_venc_addrs, .user = OCP_USER_MPU, }; /* dss_venc slave ports */ static struct omap_hwmod_ocp_if *omap44xx_dss_venc_slaves[] = { &omap44xx_l3_main_2__dss_venc, &omap44xx_l4_per__dss_venc, }; static struct omap_hwmod omap44xx_dss_venc_hwmod = { .name = "dss_venc", .class = &omap44xx_venc_hwmod_class, .clkdm_name = "l3_dss_clkdm", .main_clk = "dss_tv_clk", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_DSS_DSS_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_DSS_DSS_CONTEXT_OFFSET, }, }, .slaves = omap44xx_dss_venc_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_dss_venc_slaves), }; /* * 'gpio' class * general purpose io module */ static struct omap_hwmod_class_sysconfig omap44xx_gpio_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0114, .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | SIDLE_SMART_WKUP), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap44xx_gpio_hwmod_class = { .name = "gpio", .sysc = &omap44xx_gpio_sysc, .rev = 2, }; /* gpio dev_attr */ static struct omap_gpio_dev_attr gpio_dev_attr = { .bank_width = 32, .dbck_flag = true, }; /* gpio1 */ static struct omap_hwmod omap44xx_gpio1_hwmod; static struct omap_hwmod_irq_info omap44xx_gpio1_irqs[] = { { .irq = 29 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_addr_space omap44xx_gpio1_addrs[] = { { .pa_start = 0x4a310000, .pa_end = 0x4a3101ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_wkup -> gpio1 */ static struct omap_hwmod_ocp_if omap44xx_l4_wkup__gpio1 = { .master = &omap44xx_l4_wkup_hwmod, .slave = &omap44xx_gpio1_hwmod, .clk = "l4_wkup_clk_mux_ck", .addr = omap44xx_gpio1_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* gpio1 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_gpio1_slaves[] = { &omap44xx_l4_wkup__gpio1, }; static struct omap_hwmod_opt_clk gpio1_opt_clks[] = { { .role = "dbclk", .clk = "gpio1_dbclk" }, }; static struct omap_hwmod omap44xx_gpio1_hwmod = { .name = "gpio1", .class = &omap44xx_gpio_hwmod_class, .clkdm_name = "l4_wkup_clkdm", .mpu_irqs = omap44xx_gpio1_irqs, .main_clk = "gpio1_ick", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_WKUP_GPIO1_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_WKUP_GPIO1_CONTEXT_OFFSET, .modulemode = MODULEMODE_HWCTRL, }, }, .opt_clks = gpio1_opt_clks, .opt_clks_cnt = ARRAY_SIZE(gpio1_opt_clks), .dev_attr = &gpio_dev_attr, .slaves = omap44xx_gpio1_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_gpio1_slaves), }; /* gpio2 */ static struct omap_hwmod omap44xx_gpio2_hwmod; static struct omap_hwmod_irq_info omap44xx_gpio2_irqs[] = { { .irq = 30 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_addr_space omap44xx_gpio2_addrs[] = { { .pa_start = 0x48055000, .pa_end = 0x480551ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> gpio2 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__gpio2 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_gpio2_hwmod, .clk = "l4_div_ck", .addr = omap44xx_gpio2_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* gpio2 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_gpio2_slaves[] = { &omap44xx_l4_per__gpio2, }; static struct omap_hwmod_opt_clk gpio2_opt_clks[] = { { .role = "dbclk", .clk = "gpio2_dbclk" }, }; static struct omap_hwmod omap44xx_gpio2_hwmod = { .name = "gpio2", .class = &omap44xx_gpio_hwmod_class, .clkdm_name = "l4_per_clkdm", .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .mpu_irqs = omap44xx_gpio2_irqs, .main_clk = "gpio2_ick", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4PER_GPIO2_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4PER_GPIO2_CONTEXT_OFFSET, .modulemode = MODULEMODE_HWCTRL, }, }, .opt_clks = gpio2_opt_clks, .opt_clks_cnt = ARRAY_SIZE(gpio2_opt_clks), .dev_attr = &gpio_dev_attr, .slaves = omap44xx_gpio2_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_gpio2_slaves), }; /* gpio3 */ static struct omap_hwmod omap44xx_gpio3_hwmod; static struct omap_hwmod_irq_info omap44xx_gpio3_irqs[] = { { .irq = 31 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_addr_space omap44xx_gpio3_addrs[] = { { .pa_start = 0x48057000, .pa_end = 0x480571ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> gpio3 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__gpio3 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_gpio3_hwmod, .clk = "l4_div_ck", .addr = omap44xx_gpio3_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* gpio3 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_gpio3_slaves[] = { &omap44xx_l4_per__gpio3, }; static struct omap_hwmod_opt_clk gpio3_opt_clks[] = { { .role = "dbclk", .clk = "gpio3_dbclk" }, }; static struct omap_hwmod omap44xx_gpio3_hwmod = { .name = "gpio3", .class = &omap44xx_gpio_hwmod_class, .clkdm_name = "l4_per_clkdm", .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .mpu_irqs = omap44xx_gpio3_irqs, .main_clk = "gpio3_ick", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4PER_GPIO3_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4PER_GPIO3_CONTEXT_OFFSET, .modulemode = MODULEMODE_HWCTRL, }, }, .opt_clks = gpio3_opt_clks, .opt_clks_cnt = ARRAY_SIZE(gpio3_opt_clks), .dev_attr = &gpio_dev_attr, .slaves = omap44xx_gpio3_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_gpio3_slaves), }; /* gpio4 */ static struct omap_hwmod omap44xx_gpio4_hwmod; static struct omap_hwmod_irq_info omap44xx_gpio4_irqs[] = { { .irq = 32 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_addr_space omap44xx_gpio4_addrs[] = { { .pa_start = 0x48059000, .pa_end = 0x480591ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> gpio4 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__gpio4 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_gpio4_hwmod, .clk = "l4_div_ck", .addr = omap44xx_gpio4_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* gpio4 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_gpio4_slaves[] = { &omap44xx_l4_per__gpio4, }; static struct omap_hwmod_opt_clk gpio4_opt_clks[] = { { .role = "dbclk", .clk = "gpio4_dbclk" }, }; static struct omap_hwmod omap44xx_gpio4_hwmod = { .name = "gpio4", .class = &omap44xx_gpio_hwmod_class, .clkdm_name = "l4_per_clkdm", .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .mpu_irqs = omap44xx_gpio4_irqs, .main_clk = "gpio4_ick", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4PER_GPIO4_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4PER_GPIO4_CONTEXT_OFFSET, .modulemode = MODULEMODE_HWCTRL, }, }, .opt_clks = gpio4_opt_clks, .opt_clks_cnt = ARRAY_SIZE(gpio4_opt_clks), .dev_attr = &gpio_dev_attr, .slaves = omap44xx_gpio4_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_gpio4_slaves), }; /* gpio5 */ static struct omap_hwmod omap44xx_gpio5_hwmod; static struct omap_hwmod_irq_info omap44xx_gpio5_irqs[] = { { .irq = 33 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_addr_space omap44xx_gpio5_addrs[] = { { .pa_start = 0x4805b000, .pa_end = 0x4805b1ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> gpio5 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__gpio5 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_gpio5_hwmod, .clk = "l4_div_ck", .addr = omap44xx_gpio5_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* gpio5 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_gpio5_slaves[] = { &omap44xx_l4_per__gpio5, }; static struct omap_hwmod_opt_clk gpio5_opt_clks[] = { { .role = "dbclk", .clk = "gpio5_dbclk" }, }; static struct omap_hwmod omap44xx_gpio5_hwmod = { .name = "gpio5", .class = &omap44xx_gpio_hwmod_class, .clkdm_name = "l4_per_clkdm", .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .mpu_irqs = omap44xx_gpio5_irqs, .main_clk = "gpio5_ick", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4PER_GPIO5_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4PER_GPIO5_CONTEXT_OFFSET, .modulemode = MODULEMODE_HWCTRL, }, }, .opt_clks = gpio5_opt_clks, .opt_clks_cnt = ARRAY_SIZE(gpio5_opt_clks), .dev_attr = &gpio_dev_attr, .slaves = omap44xx_gpio5_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_gpio5_slaves), }; /* gpio6 */ static struct omap_hwmod omap44xx_gpio6_hwmod; static struct omap_hwmod_irq_info omap44xx_gpio6_irqs[] = { { .irq = 34 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_addr_space omap44xx_gpio6_addrs[] = { { .pa_start = 0x4805d000, .pa_end = 0x4805d1ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> gpio6 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__gpio6 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_gpio6_hwmod, .clk = "l4_div_ck", .addr = omap44xx_gpio6_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* gpio6 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_gpio6_slaves[] = { &omap44xx_l4_per__gpio6, }; static struct omap_hwmod_opt_clk gpio6_opt_clks[] = { { .role = "dbclk", .clk = "gpio6_dbclk" }, }; static struct omap_hwmod omap44xx_gpio6_hwmod = { .name = "gpio6", .class = &omap44xx_gpio_hwmod_class, .clkdm_name = "l4_per_clkdm", .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .mpu_irqs = omap44xx_gpio6_irqs, .main_clk = "gpio6_ick", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4PER_GPIO6_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4PER_GPIO6_CONTEXT_OFFSET, .modulemode = MODULEMODE_HWCTRL, }, }, .opt_clks = gpio6_opt_clks, .opt_clks_cnt = ARRAY_SIZE(gpio6_opt_clks), .dev_attr = &gpio_dev_attr, .slaves = omap44xx_gpio6_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_gpio6_slaves), }; /* * 'hsi' class * mipi high-speed synchronous serial interface (multichannel and full-duplex * serial if) */ static struct omap_hwmod_class_sysconfig omap44xx_hsi_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_EMUFREE | SYSC_HAS_MIDLEMODE | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART | MSTANDBY_SMART_WKUP), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap44xx_hsi_hwmod_class = { .name = "hsi", .sysc = &omap44xx_hsi_sysc, }; /* hsi */ static struct omap_hwmod_irq_info omap44xx_hsi_irqs[] = { { .name = "mpu_p1", .irq = 67 + OMAP44XX_IRQ_GIC_START }, { .name = "mpu_p2", .irq = 68 + OMAP44XX_IRQ_GIC_START }, { .name = "mpu_dma", .irq = 71 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; /* hsi master ports */ static struct omap_hwmod_ocp_if *omap44xx_hsi_masters[] = { &omap44xx_hsi__l3_main_2, }; static struct omap_hwmod_addr_space omap44xx_hsi_addrs[] = { { .pa_start = 0x4a058000, .pa_end = 0x4a05bfff, .flags = ADDR_TYPE_RT }, { } }; /* l4_cfg -> hsi */ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__hsi = { .master = &omap44xx_l4_cfg_hwmod, .slave = &omap44xx_hsi_hwmod, .clk = "l4_div_ck", .addr = omap44xx_hsi_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* hsi slave ports */ static struct omap_hwmod_ocp_if *omap44xx_hsi_slaves[] = { &omap44xx_l4_cfg__hsi, }; static struct omap_hwmod omap44xx_hsi_hwmod = { .name = "hsi", .class = &omap44xx_hsi_hwmod_class, .clkdm_name = "l3_init_clkdm", .mpu_irqs = omap44xx_hsi_irqs, .main_clk = "hsi_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L3INIT_HSI_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L3INIT_HSI_CONTEXT_OFFSET, .modulemode = MODULEMODE_HWCTRL, }, }, .slaves = omap44xx_hsi_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_hsi_slaves), .masters = omap44xx_hsi_masters, .masters_cnt = ARRAY_SIZE(omap44xx_hsi_masters), }; /* * 'i2c' class * multimaster high-speed i2c controller */ static struct omap_hwmod_class_sysconfig omap44xx_i2c_sysc = { .sysc_offs = 0x0010, .syss_offs = 0x0090, .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | SIDLE_SMART_WKUP), .clockact = CLOCKACT_TEST_ICLK, .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap44xx_i2c_hwmod_class = { .name = "i2c", .sysc = &omap44xx_i2c_sysc, .rev = OMAP_I2C_IP_VERSION_2, .reset = &omap_i2c_reset, }; static struct omap_i2c_dev_attr i2c_dev_attr = { .flags = OMAP_I2C_FLAG_BUS_SHIFT_NONE, }; /* i2c1 */ static struct omap_hwmod omap44xx_i2c1_hwmod; static struct omap_hwmod_irq_info omap44xx_i2c1_irqs[] = { { .irq = 56 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap44xx_i2c1_sdma_reqs[] = { { .name = "tx", .dma_req = 26 + OMAP44XX_DMA_REQ_START }, { .name = "rx", .dma_req = 27 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; static struct omap_hwmod_addr_space omap44xx_i2c1_addrs[] = { { .pa_start = 0x48070000, .pa_end = 0x480700ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> i2c1 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__i2c1 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_i2c1_hwmod, .clk = "l4_div_ck", .addr = omap44xx_i2c1_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* i2c1 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_i2c1_slaves[] = { &omap44xx_l4_per__i2c1, }; static struct omap_hwmod omap44xx_i2c1_hwmod = { .name = "i2c1", .class = &omap44xx_i2c_hwmod_class, .clkdm_name = "l4_per_clkdm", .flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT, .mpu_irqs = omap44xx_i2c1_irqs, .sdma_reqs = omap44xx_i2c1_sdma_reqs, .main_clk = "i2c1_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4PER_I2C1_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4PER_I2C1_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .slaves = omap44xx_i2c1_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_i2c1_slaves), .dev_attr = &i2c_dev_attr, }; /* i2c2 */ static struct omap_hwmod omap44xx_i2c2_hwmod; static struct omap_hwmod_irq_info omap44xx_i2c2_irqs[] = { { .irq = 57 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap44xx_i2c2_sdma_reqs[] = { { .name = "tx", .dma_req = 28 + OMAP44XX_DMA_REQ_START }, { .name = "rx", .dma_req = 29 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; static struct omap_hwmod_addr_space omap44xx_i2c2_addrs[] = { { .pa_start = 0x48072000, .pa_end = 0x480720ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> i2c2 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__i2c2 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_i2c2_hwmod, .clk = "l4_div_ck", .addr = omap44xx_i2c2_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* i2c2 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_i2c2_slaves[] = { &omap44xx_l4_per__i2c2, }; static struct omap_hwmod omap44xx_i2c2_hwmod = { .name = "i2c2", .class = &omap44xx_i2c_hwmod_class, .clkdm_name = "l4_per_clkdm", .flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT, .mpu_irqs = omap44xx_i2c2_irqs, .sdma_reqs = omap44xx_i2c2_sdma_reqs, .main_clk = "i2c2_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4PER_I2C2_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4PER_I2C2_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .slaves = omap44xx_i2c2_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_i2c2_slaves), .dev_attr = &i2c_dev_attr, }; /* i2c3 */ static struct omap_hwmod omap44xx_i2c3_hwmod; static struct omap_hwmod_irq_info omap44xx_i2c3_irqs[] = { { .irq = 61 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap44xx_i2c3_sdma_reqs[] = { { .name = "tx", .dma_req = 24 + OMAP44XX_DMA_REQ_START }, { .name = "rx", .dma_req = 25 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; static struct omap_hwmod_addr_space omap44xx_i2c3_addrs[] = { { .pa_start = 0x48060000, .pa_end = 0x480600ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> i2c3 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__i2c3 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_i2c3_hwmod, .clk = "l4_div_ck", .addr = omap44xx_i2c3_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* i2c3 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_i2c3_slaves[] = { &omap44xx_l4_per__i2c3, }; static struct omap_hwmod omap44xx_i2c3_hwmod = { .name = "i2c3", .class = &omap44xx_i2c_hwmod_class, .clkdm_name = "l4_per_clkdm", .flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT, .mpu_irqs = omap44xx_i2c3_irqs, .sdma_reqs = omap44xx_i2c3_sdma_reqs, .main_clk = "i2c3_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4PER_I2C3_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4PER_I2C3_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .slaves = omap44xx_i2c3_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_i2c3_slaves), .dev_attr = &i2c_dev_attr, }; /* i2c4 */ static struct omap_hwmod omap44xx_i2c4_hwmod; static struct omap_hwmod_irq_info omap44xx_i2c4_irqs[] = { { .irq = 62 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap44xx_i2c4_sdma_reqs[] = { { .name = "tx", .dma_req = 123 + OMAP44XX_DMA_REQ_START }, { .name = "rx", .dma_req = 124 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; static struct omap_hwmod_addr_space omap44xx_i2c4_addrs[] = { { .pa_start = 0x48350000, .pa_end = 0x483500ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> i2c4 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__i2c4 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_i2c4_hwmod, .clk = "l4_div_ck", .addr = omap44xx_i2c4_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* i2c4 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_i2c4_slaves[] = { &omap44xx_l4_per__i2c4, }; static struct omap_hwmod omap44xx_i2c4_hwmod = { .name = "i2c4", .class = &omap44xx_i2c_hwmod_class, .clkdm_name = "l4_per_clkdm", .flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT, .mpu_irqs = omap44xx_i2c4_irqs, .sdma_reqs = omap44xx_i2c4_sdma_reqs, .main_clk = "i2c4_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4PER_I2C4_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4PER_I2C4_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .slaves = omap44xx_i2c4_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_i2c4_slaves), .dev_attr = &i2c_dev_attr, }; /* * 'ipu' class * imaging processor unit */ static struct omap_hwmod_class omap44xx_ipu_hwmod_class = { .name = "ipu", }; /* ipu */ static struct omap_hwmod_irq_info omap44xx_ipu_irqs[] = { { .irq = 100 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_rst_info omap44xx_ipu_c0_resets[] = { { .name = "cpu0", .rst_shift = 0 }, }; static struct omap_hwmod_rst_info omap44xx_ipu_c1_resets[] = { { .name = "cpu1", .rst_shift = 1 }, }; static struct omap_hwmod_rst_info omap44xx_ipu_resets[] = { { .name = "mmu_cache", .rst_shift = 2 }, }; /* ipu master ports */ static struct omap_hwmod_ocp_if *omap44xx_ipu_masters[] = { &omap44xx_ipu__l3_main_2, }; /* l3_main_2 -> ipu */ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__ipu = { .master = &omap44xx_l3_main_2_hwmod, .slave = &omap44xx_ipu_hwmod, .clk = "l3_div_ck", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* ipu slave ports */ static struct omap_hwmod_ocp_if *omap44xx_ipu_slaves[] = { &omap44xx_l3_main_2__ipu, }; /* Pseudo hwmod for reset control purpose only */ static struct omap_hwmod omap44xx_ipu_c0_hwmod = { .name = "ipu_c0", .class = &omap44xx_ipu_hwmod_class, .clkdm_name = "ducati_clkdm", .flags = HWMOD_INIT_NO_RESET, .rst_lines = omap44xx_ipu_c0_resets, .rst_lines_cnt = ARRAY_SIZE(omap44xx_ipu_c0_resets), .prcm = { .omap4 = { .rstctrl_offs = OMAP4_RM_DUCATI_RSTCTRL_OFFSET, }, }, }; /* Pseudo hwmod for reset control purpose only */ static struct omap_hwmod omap44xx_ipu_c1_hwmod = { .name = "ipu_c1", .class = &omap44xx_ipu_hwmod_class, .clkdm_name = "ducati_clkdm", .flags = HWMOD_INIT_NO_RESET, .rst_lines = omap44xx_ipu_c1_resets, .rst_lines_cnt = ARRAY_SIZE(omap44xx_ipu_c1_resets), .prcm = { .omap4 = { .rstctrl_offs = OMAP4_RM_DUCATI_RSTCTRL_OFFSET, }, }, }; static struct omap_hwmod omap44xx_ipu_hwmod = { .name = "ipu", .class = &omap44xx_ipu_hwmod_class, .clkdm_name = "ducati_clkdm", .mpu_irqs = omap44xx_ipu_irqs, .rst_lines = omap44xx_ipu_resets, .rst_lines_cnt = ARRAY_SIZE(omap44xx_ipu_resets), .main_clk = "ipu_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_DUCATI_DUCATI_CLKCTRL_OFFSET, .rstctrl_offs = OMAP4_RM_DUCATI_RSTCTRL_OFFSET, .context_offs = OMAP4_RM_DUCATI_DUCATI_CONTEXT_OFFSET, .modulemode = MODULEMODE_HWCTRL, }, }, .slaves = omap44xx_ipu_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_ipu_slaves), .masters = omap44xx_ipu_masters, .masters_cnt = ARRAY_SIZE(omap44xx_ipu_masters), }; /* * 'iss' class * external images sensor pixel data processor */ static struct omap_hwmod_class_sysconfig omap44xx_iss_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, /* * ISS needs 100 OCP clk cycles delay after a softreset before * accessing sysconfig again. * The lowest frequency at the moment for L3 bus is 100 MHz, so * 1usec delay is needed. Add an x2 margin to be safe (2 usecs). * * TODO: Indicate errata when available. */ .srst_udelay = 2, .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_RESET_STATUS | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART | MSTANDBY_SMART_WKUP), .sysc_fields = &omap_hwmod_sysc_type2, }; static struct omap_hwmod_class omap44xx_iss_hwmod_class = { .name = "iss", .sysc = &omap44xx_iss_sysc, }; /* iss */ static struct omap_hwmod_irq_info omap44xx_iss_irqs[] = { { .irq = 24 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap44xx_iss_sdma_reqs[] = { { .name = "1", .dma_req = 8 + OMAP44XX_DMA_REQ_START }, { .name = "2", .dma_req = 9 + OMAP44XX_DMA_REQ_START }, { .name = "3", .dma_req = 11 + OMAP44XX_DMA_REQ_START }, { .name = "4", .dma_req = 12 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; /* iss master ports */ static struct omap_hwmod_ocp_if *omap44xx_iss_masters[] = { &omap44xx_iss__l3_main_2, }; static struct omap_hwmod_addr_space omap44xx_iss_addrs[] = { { .pa_start = 0x52000000, .pa_end = 0x520000ff, .flags = ADDR_TYPE_RT }, { } }; /* l3_main_2 -> iss */ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__iss = { .master = &omap44xx_l3_main_2_hwmod, .slave = &omap44xx_iss_hwmod, .clk = "l3_div_ck", .addr = omap44xx_iss_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* iss slave ports */ static struct omap_hwmod_ocp_if *omap44xx_iss_slaves[] = { &omap44xx_l3_main_2__iss, }; static struct omap_hwmod_opt_clk iss_opt_clks[] = { { .role = "ctrlclk", .clk = "iss_ctrlclk" }, }; static struct omap_hwmod omap44xx_iss_hwmod = { .name = "iss", .class = &omap44xx_iss_hwmod_class, .clkdm_name = "iss_clkdm", .mpu_irqs = omap44xx_iss_irqs, .sdma_reqs = omap44xx_iss_sdma_reqs, .main_clk = "iss_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_CAM_ISS_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_CAM_ISS_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .opt_clks = iss_opt_clks, .opt_clks_cnt = ARRAY_SIZE(iss_opt_clks), .slaves = omap44xx_iss_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_iss_slaves), .masters = omap44xx_iss_masters, .masters_cnt = ARRAY_SIZE(omap44xx_iss_masters), }; /* * 'iva' class * multi-standard video encoder/decoder hardware accelerator */ static struct omap_hwmod_class omap44xx_iva_hwmod_class = { .name = "iva", }; /* iva */ static struct omap_hwmod_irq_info omap44xx_iva_irqs[] = { { .name = "sync_1", .irq = 103 + OMAP44XX_IRQ_GIC_START }, { .name = "sync_0", .irq = 104 + OMAP44XX_IRQ_GIC_START }, { .name = "mailbox_0", .irq = 107 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_rst_info omap44xx_iva_resets[] = { { .name = "logic", .rst_shift = 2 }, }; static struct omap_hwmod_rst_info omap44xx_iva_seq0_resets[] = { { .name = "seq0", .rst_shift = 0 }, }; static struct omap_hwmod_rst_info omap44xx_iva_seq1_resets[] = { { .name = "seq1", .rst_shift = 1 }, }; /* iva master ports */ static struct omap_hwmod_ocp_if *omap44xx_iva_masters[] = { &omap44xx_iva__l3_main_2, &omap44xx_iva__l3_instr, }; static struct omap_hwmod_addr_space omap44xx_iva_addrs[] = { { .pa_start = 0x5a000000, .pa_end = 0x5a07ffff, .flags = ADDR_TYPE_RT }, { } }; /* l3_main_2 -> iva */ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__iva = { .master = &omap44xx_l3_main_2_hwmod, .slave = &omap44xx_iva_hwmod, .clk = "l3_div_ck", .addr = omap44xx_iva_addrs, .user = OCP_USER_MPU, }; /* iva slave ports */ static struct omap_hwmod_ocp_if *omap44xx_iva_slaves[] = { &omap44xx_dsp__iva, &omap44xx_l3_main_2__iva, }; /* Pseudo hwmod for reset control purpose only */ static struct omap_hwmod omap44xx_iva_seq0_hwmod = { .name = "iva_seq0", .class = &omap44xx_iva_hwmod_class, .clkdm_name = "ivahd_clkdm", .flags = HWMOD_INIT_NO_RESET, .rst_lines = omap44xx_iva_seq0_resets, .rst_lines_cnt = ARRAY_SIZE(omap44xx_iva_seq0_resets), .prcm = { .omap4 = { .rstctrl_offs = OMAP4_RM_IVAHD_RSTCTRL_OFFSET, }, }, }; /* Pseudo hwmod for reset control purpose only */ static struct omap_hwmod omap44xx_iva_seq1_hwmod = { .name = "iva_seq1", .class = &omap44xx_iva_hwmod_class, .clkdm_name = "ivahd_clkdm", .flags = HWMOD_INIT_NO_RESET, .rst_lines = omap44xx_iva_seq1_resets, .rst_lines_cnt = ARRAY_SIZE(omap44xx_iva_seq1_resets), .prcm = { .omap4 = { .rstctrl_offs = OMAP4_RM_IVAHD_RSTCTRL_OFFSET, }, }, }; static struct omap_hwmod omap44xx_iva_hwmod = { .name = "iva", .class = &omap44xx_iva_hwmod_class, .clkdm_name = "ivahd_clkdm", .mpu_irqs = omap44xx_iva_irqs, .rst_lines = omap44xx_iva_resets, .rst_lines_cnt = ARRAY_SIZE(omap44xx_iva_resets), .main_clk = "iva_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_IVAHD_IVAHD_CLKCTRL_OFFSET, .rstctrl_offs = OMAP4_RM_IVAHD_RSTCTRL_OFFSET, .context_offs = OMAP4_RM_IVAHD_IVAHD_CONTEXT_OFFSET, .modulemode = MODULEMODE_HWCTRL, }, }, .slaves = omap44xx_iva_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_iva_slaves), .masters = omap44xx_iva_masters, .masters_cnt = ARRAY_SIZE(omap44xx_iva_masters), }; /* * 'kbd' class * keyboard controller */ static struct omap_hwmod_class_sysconfig omap44xx_kbd_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_EMUFREE | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap44xx_kbd_hwmod_class = { .name = "kbd", .sysc = &omap44xx_kbd_sysc, }; /* kbd */ static struct omap_hwmod omap44xx_kbd_hwmod; static struct omap_hwmod_irq_info omap44xx_kbd_irqs[] = { { .irq = 120 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_addr_space omap44xx_kbd_addrs[] = { { .pa_start = 0x4a31c000, .pa_end = 0x4a31c07f, .flags = ADDR_TYPE_RT }, { } }; /* l4_wkup -> kbd */ static struct omap_hwmod_ocp_if omap44xx_l4_wkup__kbd = { .master = &omap44xx_l4_wkup_hwmod, .slave = &omap44xx_kbd_hwmod, .clk = "l4_wkup_clk_mux_ck", .addr = omap44xx_kbd_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* kbd slave ports */ static struct omap_hwmod_ocp_if *omap44xx_kbd_slaves[] = { &omap44xx_l4_wkup__kbd, }; static struct omap_hwmod omap44xx_kbd_hwmod = { .name = "kbd", .class = &omap44xx_kbd_hwmod_class, .clkdm_name = "l4_wkup_clkdm", .mpu_irqs = omap44xx_kbd_irqs, .main_clk = "kbd_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_WKUP_KEYBOARD_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_WKUP_KEYBOARD_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .slaves = omap44xx_kbd_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_kbd_slaves), }; /* * 'mailbox' class * mailbox module allowing communication between the on-chip processors using a * queued mailbox-interrupt mechanism. */ static struct omap_hwmod_class_sysconfig omap44xx_mailbox_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .sysc_flags = (SYSC_HAS_RESET_STATUS | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type2, }; static struct omap_hwmod_class omap44xx_mailbox_hwmod_class = { .name = "mailbox", .sysc = &omap44xx_mailbox_sysc, }; /* mailbox */ static struct omap_hwmod omap44xx_mailbox_hwmod; static struct omap_hwmod_irq_info omap44xx_mailbox_irqs[] = { { .irq = 26 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_addr_space omap44xx_mailbox_addrs[] = { { .pa_start = 0x4a0f4000, .pa_end = 0x4a0f41ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_cfg -> mailbox */ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__mailbox = { .master = &omap44xx_l4_cfg_hwmod, .slave = &omap44xx_mailbox_hwmod, .clk = "l4_div_ck", .addr = omap44xx_mailbox_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* mailbox slave ports */ static struct omap_hwmod_ocp_if *omap44xx_mailbox_slaves[] = { &omap44xx_l4_cfg__mailbox, }; static struct omap_hwmod omap44xx_mailbox_hwmod = { .name = "mailbox", .class = &omap44xx_mailbox_hwmod_class, .clkdm_name = "l4_cfg_clkdm", .mpu_irqs = omap44xx_mailbox_irqs, .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4CFG_MAILBOX_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4CFG_MAILBOX_CONTEXT_OFFSET, }, }, .slaves = omap44xx_mailbox_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_mailbox_slaves), }; /* * 'mcbsp' class * multi channel buffered serial port controller */ static struct omap_hwmod_class_sysconfig omap44xx_mcbsp_sysc = { .sysc_offs = 0x008c, .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap44xx_mcbsp_hwmod_class = { .name = "mcbsp", .sysc = &omap44xx_mcbsp_sysc, .rev = MCBSP_CONFIG_TYPE4, }; /* mcbsp1 */ static struct omap_hwmod omap44xx_mcbsp1_hwmod; static struct omap_hwmod_irq_info omap44xx_mcbsp1_irqs[] = { { .irq = 17 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap44xx_mcbsp1_sdma_reqs[] = { { .name = "tx", .dma_req = 32 + OMAP44XX_DMA_REQ_START }, { .name = "rx", .dma_req = 33 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; static struct omap_hwmod_addr_space omap44xx_mcbsp1_addrs[] = { { .name = "mpu", .pa_start = 0x40122000, .pa_end = 0x401220ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_abe -> mcbsp1 */ static struct omap_hwmod_ocp_if omap44xx_l4_abe__mcbsp1 = { .master = &omap44xx_l4_abe_hwmod, .slave = &omap44xx_mcbsp1_hwmod, .clk = "ocp_abe_iclk", .addr = omap44xx_mcbsp1_addrs, .user = OCP_USER_MPU, }; static struct omap_hwmod_addr_space omap44xx_mcbsp1_dma_addrs[] = { { .name = "dma", .pa_start = 0x49022000, .pa_end = 0x490220ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_abe -> mcbsp1 (dma) */ static struct omap_hwmod_ocp_if omap44xx_l4_abe__mcbsp1_dma = { .master = &omap44xx_l4_abe_hwmod, .slave = &omap44xx_mcbsp1_hwmod, .clk = "ocp_abe_iclk", .addr = omap44xx_mcbsp1_dma_addrs, .user = OCP_USER_SDMA, }; /* mcbsp1 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_mcbsp1_slaves[] = { &omap44xx_l4_abe__mcbsp1, &omap44xx_l4_abe__mcbsp1_dma, }; static struct omap_hwmod_opt_clk mcbsp1_opt_clks[] = { { .role = "pad_fck", .clk = "pad_clks_ck" }, { .role = "prcm_clk", .clk = "mcbsp1_sync_mux_ck" }, }; static struct omap_hwmod omap44xx_mcbsp1_hwmod = { .name = "mcbsp1", .class = &omap44xx_mcbsp_hwmod_class, .clkdm_name = "abe_clkdm", .mpu_irqs = omap44xx_mcbsp1_irqs, .sdma_reqs = omap44xx_mcbsp1_sdma_reqs, .main_clk = "mcbsp1_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM1_ABE_MCBSP1_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_ABE_MCBSP1_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .slaves = omap44xx_mcbsp1_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_mcbsp1_slaves), .opt_clks = mcbsp1_opt_clks, .opt_clks_cnt = ARRAY_SIZE(mcbsp1_opt_clks), }; /* mcbsp2 */ static struct omap_hwmod omap44xx_mcbsp2_hwmod; static struct omap_hwmod_irq_info omap44xx_mcbsp2_irqs[] = { { .irq = 22 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap44xx_mcbsp2_sdma_reqs[] = { { .name = "tx", .dma_req = 16 + OMAP44XX_DMA_REQ_START }, { .name = "rx", .dma_req = 17 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; static struct omap_hwmod_addr_space omap44xx_mcbsp2_addrs[] = { { .name = "mpu", .pa_start = 0x40124000, .pa_end = 0x401240ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_abe -> mcbsp2 */ static struct omap_hwmod_ocp_if omap44xx_l4_abe__mcbsp2 = { .master = &omap44xx_l4_abe_hwmod, .slave = &omap44xx_mcbsp2_hwmod, .clk = "ocp_abe_iclk", .addr = omap44xx_mcbsp2_addrs, .user = OCP_USER_MPU, }; static struct omap_hwmod_addr_space omap44xx_mcbsp2_dma_addrs[] = { { .name = "dma", .pa_start = 0x49024000, .pa_end = 0x490240ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_abe -> mcbsp2 (dma) */ static struct omap_hwmod_ocp_if omap44xx_l4_abe__mcbsp2_dma = { .master = &omap44xx_l4_abe_hwmod, .slave = &omap44xx_mcbsp2_hwmod, .clk = "ocp_abe_iclk", .addr = omap44xx_mcbsp2_dma_addrs, .user = OCP_USER_SDMA, }; /* mcbsp2 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_mcbsp2_slaves[] = { &omap44xx_l4_abe__mcbsp2, &omap44xx_l4_abe__mcbsp2_dma, }; static struct omap_hwmod_opt_clk mcbsp2_opt_clks[] = { { .role = "pad_fck", .clk = "pad_clks_ck" }, { .role = "prcm_clk", .clk = "mcbsp2_sync_mux_ck" }, }; static struct omap_hwmod omap44xx_mcbsp2_hwmod = { .name = "mcbsp2", .class = &omap44xx_mcbsp_hwmod_class, .clkdm_name = "abe_clkdm", .mpu_irqs = omap44xx_mcbsp2_irqs, .sdma_reqs = omap44xx_mcbsp2_sdma_reqs, .main_clk = "mcbsp2_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM1_ABE_MCBSP2_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_ABE_MCBSP2_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .slaves = omap44xx_mcbsp2_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_mcbsp2_slaves), .opt_clks = mcbsp2_opt_clks, .opt_clks_cnt = ARRAY_SIZE(mcbsp2_opt_clks), }; /* mcbsp3 */ static struct omap_hwmod omap44xx_mcbsp3_hwmod; static struct omap_hwmod_irq_info omap44xx_mcbsp3_irqs[] = { { .irq = 23 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap44xx_mcbsp3_sdma_reqs[] = { { .name = "tx", .dma_req = 18 + OMAP44XX_DMA_REQ_START }, { .name = "rx", .dma_req = 19 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; static struct omap_hwmod_addr_space omap44xx_mcbsp3_addrs[] = { { .name = "mpu", .pa_start = 0x40126000, .pa_end = 0x401260ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_abe -> mcbsp3 */ static struct omap_hwmod_ocp_if omap44xx_l4_abe__mcbsp3 = { .master = &omap44xx_l4_abe_hwmod, .slave = &omap44xx_mcbsp3_hwmod, .clk = "ocp_abe_iclk", .addr = omap44xx_mcbsp3_addrs, .user = OCP_USER_MPU, }; static struct omap_hwmod_addr_space omap44xx_mcbsp3_dma_addrs[] = { { .name = "dma", .pa_start = 0x49026000, .pa_end = 0x490260ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_abe -> mcbsp3 (dma) */ static struct omap_hwmod_ocp_if omap44xx_l4_abe__mcbsp3_dma = { .master = &omap44xx_l4_abe_hwmod, .slave = &omap44xx_mcbsp3_hwmod, .clk = "ocp_abe_iclk", .addr = omap44xx_mcbsp3_dma_addrs, .user = OCP_USER_SDMA, }; /* mcbsp3 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_mcbsp3_slaves[] = { &omap44xx_l4_abe__mcbsp3, &omap44xx_l4_abe__mcbsp3_dma, }; static struct omap_hwmod_opt_clk mcbsp3_opt_clks[] = { { .role = "pad_fck", .clk = "pad_clks_ck" }, { .role = "prcm_clk", .clk = "mcbsp3_sync_mux_ck" }, }; static struct omap_hwmod omap44xx_mcbsp3_hwmod = { .name = "mcbsp3", .class = &omap44xx_mcbsp_hwmod_class, .clkdm_name = "abe_clkdm", .mpu_irqs = omap44xx_mcbsp3_irqs, .sdma_reqs = omap44xx_mcbsp3_sdma_reqs, .main_clk = "mcbsp3_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM1_ABE_MCBSP3_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_ABE_MCBSP3_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .slaves = omap44xx_mcbsp3_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_mcbsp3_slaves), .opt_clks = mcbsp3_opt_clks, .opt_clks_cnt = ARRAY_SIZE(mcbsp3_opt_clks), }; /* mcbsp4 */ static struct omap_hwmod omap44xx_mcbsp4_hwmod; static struct omap_hwmod_irq_info omap44xx_mcbsp4_irqs[] = { { .irq = 16 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap44xx_mcbsp4_sdma_reqs[] = { { .name = "tx", .dma_req = 30 + OMAP44XX_DMA_REQ_START }, { .name = "rx", .dma_req = 31 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; static struct omap_hwmod_addr_space omap44xx_mcbsp4_addrs[] = { { .pa_start = 0x48096000, .pa_end = 0x480960ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> mcbsp4 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__mcbsp4 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_mcbsp4_hwmod, .clk = "l4_div_ck", .addr = omap44xx_mcbsp4_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* mcbsp4 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_mcbsp4_slaves[] = { &omap44xx_l4_per__mcbsp4, }; static struct omap_hwmod_opt_clk mcbsp4_opt_clks[] = { { .role = "pad_fck", .clk = "pad_clks_ck" }, { .role = "prcm_clk", .clk = "mcbsp4_sync_mux_ck" }, }; static struct omap_hwmod omap44xx_mcbsp4_hwmod = { .name = "mcbsp4", .class = &omap44xx_mcbsp_hwmod_class, .clkdm_name = "l4_per_clkdm", .mpu_irqs = omap44xx_mcbsp4_irqs, .sdma_reqs = omap44xx_mcbsp4_sdma_reqs, .main_clk = "mcbsp4_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4PER_MCBSP4_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4PER_MCBSP4_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .slaves = omap44xx_mcbsp4_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_mcbsp4_slaves), .opt_clks = mcbsp4_opt_clks, .opt_clks_cnt = ARRAY_SIZE(mcbsp4_opt_clks), }; /* * 'mcpdm' class * multi channel pdm controller (proprietary interface with phoenix power * ic) */ static struct omap_hwmod_class_sysconfig omap44xx_mcpdm_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .sysc_flags = (SYSC_HAS_EMUFREE | SYSC_HAS_RESET_STATUS | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | SIDLE_SMART_WKUP), .sysc_fields = &omap_hwmod_sysc_type2, }; static struct omap_hwmod_class omap44xx_mcpdm_hwmod_class = { .name = "mcpdm", .sysc = &omap44xx_mcpdm_sysc, }; /* mcpdm */ static struct omap_hwmod omap44xx_mcpdm_hwmod; static struct omap_hwmod_irq_info omap44xx_mcpdm_irqs[] = { { .irq = 112 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap44xx_mcpdm_sdma_reqs[] = { { .name = "up_link", .dma_req = 64 + OMAP44XX_DMA_REQ_START }, { .name = "dn_link", .dma_req = 65 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; static struct omap_hwmod_addr_space omap44xx_mcpdm_addrs[] = { { .pa_start = 0x40132000, .pa_end = 0x4013207f, .flags = ADDR_TYPE_RT }, { } }; /* l4_abe -> mcpdm */ static struct omap_hwmod_ocp_if omap44xx_l4_abe__mcpdm = { .master = &omap44xx_l4_abe_hwmod, .slave = &omap44xx_mcpdm_hwmod, .clk = "ocp_abe_iclk", .addr = omap44xx_mcpdm_addrs, .user = OCP_USER_MPU, }; static struct omap_hwmod_addr_space omap44xx_mcpdm_dma_addrs[] = { { .pa_start = 0x49032000, .pa_end = 0x4903207f, .flags = ADDR_TYPE_RT }, { } }; /* l4_abe -> mcpdm (dma) */ static struct omap_hwmod_ocp_if omap44xx_l4_abe__mcpdm_dma = { .master = &omap44xx_l4_abe_hwmod, .slave = &omap44xx_mcpdm_hwmod, .clk = "ocp_abe_iclk", .addr = omap44xx_mcpdm_dma_addrs, .user = OCP_USER_SDMA, }; /* mcpdm slave ports */ static struct omap_hwmod_ocp_if *omap44xx_mcpdm_slaves[] = { &omap44xx_l4_abe__mcpdm, &omap44xx_l4_abe__mcpdm_dma, }; static struct omap_hwmod omap44xx_mcpdm_hwmod = { .name = "mcpdm", .class = &omap44xx_mcpdm_hwmod_class, .clkdm_name = "abe_clkdm", .mpu_irqs = omap44xx_mcpdm_irqs, .sdma_reqs = omap44xx_mcpdm_sdma_reqs, .main_clk = "mcpdm_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM1_ABE_PDM_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_ABE_PDM_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .slaves = omap44xx_mcpdm_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_mcpdm_slaves), }; /* * 'mcspi' class * multichannel serial port interface (mcspi) / master/slave synchronous serial * bus */ static struct omap_hwmod_class_sysconfig omap44xx_mcspi_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .sysc_flags = (SYSC_HAS_EMUFREE | SYSC_HAS_RESET_STATUS | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | SIDLE_SMART_WKUP), .sysc_fields = &omap_hwmod_sysc_type2, }; static struct omap_hwmod_class omap44xx_mcspi_hwmod_class = { .name = "mcspi", .sysc = &omap44xx_mcspi_sysc, .rev = OMAP4_MCSPI_REV, }; /* mcspi1 */ static struct omap_hwmod omap44xx_mcspi1_hwmod; static struct omap_hwmod_irq_info omap44xx_mcspi1_irqs[] = { { .irq = 65 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap44xx_mcspi1_sdma_reqs[] = { { .name = "tx0", .dma_req = 34 + OMAP44XX_DMA_REQ_START }, { .name = "rx0", .dma_req = 35 + OMAP44XX_DMA_REQ_START }, { .name = "tx1", .dma_req = 36 + OMAP44XX_DMA_REQ_START }, { .name = "rx1", .dma_req = 37 + OMAP44XX_DMA_REQ_START }, { .name = "tx2", .dma_req = 38 + OMAP44XX_DMA_REQ_START }, { .name = "rx2", .dma_req = 39 + OMAP44XX_DMA_REQ_START }, { .name = "tx3", .dma_req = 40 + OMAP44XX_DMA_REQ_START }, { .name = "rx3", .dma_req = 41 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; static struct omap_hwmod_addr_space omap44xx_mcspi1_addrs[] = { { .pa_start = 0x48098000, .pa_end = 0x480981ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> mcspi1 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__mcspi1 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_mcspi1_hwmod, .clk = "l4_div_ck", .addr = omap44xx_mcspi1_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* mcspi1 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_mcspi1_slaves[] = { &omap44xx_l4_per__mcspi1, }; /* mcspi1 dev_attr */ static struct omap2_mcspi_dev_attr mcspi1_dev_attr = { .num_chipselect = 4, }; static struct omap_hwmod omap44xx_mcspi1_hwmod = { .name = "mcspi1", .class = &omap44xx_mcspi_hwmod_class, .clkdm_name = "l4_per_clkdm", .mpu_irqs = omap44xx_mcspi1_irqs, .sdma_reqs = omap44xx_mcspi1_sdma_reqs, .main_clk = "mcspi1_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4PER_MCSPI1_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4PER_MCSPI1_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .dev_attr = &mcspi1_dev_attr, .slaves = omap44xx_mcspi1_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_mcspi1_slaves), }; /* mcspi2 */ static struct omap_hwmod omap44xx_mcspi2_hwmod; static struct omap_hwmod_irq_info omap44xx_mcspi2_irqs[] = { { .irq = 66 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap44xx_mcspi2_sdma_reqs[] = { { .name = "tx0", .dma_req = 42 + OMAP44XX_DMA_REQ_START }, { .name = "rx0", .dma_req = 43 + OMAP44XX_DMA_REQ_START }, { .name = "tx1", .dma_req = 44 + OMAP44XX_DMA_REQ_START }, { .name = "rx1", .dma_req = 45 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; static struct omap_hwmod_addr_space omap44xx_mcspi2_addrs[] = { { .pa_start = 0x4809a000, .pa_end = 0x4809a1ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> mcspi2 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__mcspi2 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_mcspi2_hwmod, .clk = "l4_div_ck", .addr = omap44xx_mcspi2_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* mcspi2 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_mcspi2_slaves[] = { &omap44xx_l4_per__mcspi2, }; /* mcspi2 dev_attr */ static struct omap2_mcspi_dev_attr mcspi2_dev_attr = { .num_chipselect = 2, }; static struct omap_hwmod omap44xx_mcspi2_hwmod = { .name = "mcspi2", .class = &omap44xx_mcspi_hwmod_class, .clkdm_name = "l4_per_clkdm", .mpu_irqs = omap44xx_mcspi2_irqs, .sdma_reqs = omap44xx_mcspi2_sdma_reqs, .main_clk = "mcspi2_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4PER_MCSPI2_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4PER_MCSPI2_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .dev_attr = &mcspi2_dev_attr, .slaves = omap44xx_mcspi2_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_mcspi2_slaves), }; /* mcspi3 */ static struct omap_hwmod omap44xx_mcspi3_hwmod; static struct omap_hwmod_irq_info omap44xx_mcspi3_irqs[] = { { .irq = 91 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap44xx_mcspi3_sdma_reqs[] = { { .name = "tx0", .dma_req = 14 + OMAP44XX_DMA_REQ_START }, { .name = "rx0", .dma_req = 15 + OMAP44XX_DMA_REQ_START }, { .name = "tx1", .dma_req = 22 + OMAP44XX_DMA_REQ_START }, { .name = "rx1", .dma_req = 23 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; static struct omap_hwmod_addr_space omap44xx_mcspi3_addrs[] = { { .pa_start = 0x480b8000, .pa_end = 0x480b81ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> mcspi3 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__mcspi3 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_mcspi3_hwmod, .clk = "l4_div_ck", .addr = omap44xx_mcspi3_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* mcspi3 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_mcspi3_slaves[] = { &omap44xx_l4_per__mcspi3, }; /* mcspi3 dev_attr */ static struct omap2_mcspi_dev_attr mcspi3_dev_attr = { .num_chipselect = 2, }; static struct omap_hwmod omap44xx_mcspi3_hwmod = { .name = "mcspi3", .class = &omap44xx_mcspi_hwmod_class, .clkdm_name = "l4_per_clkdm", .mpu_irqs = omap44xx_mcspi3_irqs, .sdma_reqs = omap44xx_mcspi3_sdma_reqs, .main_clk = "mcspi3_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4PER_MCSPI3_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4PER_MCSPI3_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .dev_attr = &mcspi3_dev_attr, .slaves = omap44xx_mcspi3_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_mcspi3_slaves), }; /* mcspi4 */ static struct omap_hwmod omap44xx_mcspi4_hwmod; static struct omap_hwmod_irq_info omap44xx_mcspi4_irqs[] = { { .irq = 48 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap44xx_mcspi4_sdma_reqs[] = { { .name = "tx0", .dma_req = 69 + OMAP44XX_DMA_REQ_START }, { .name = "rx0", .dma_req = 70 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; static struct omap_hwmod_addr_space omap44xx_mcspi4_addrs[] = { { .pa_start = 0x480ba000, .pa_end = 0x480ba1ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> mcspi4 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__mcspi4 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_mcspi4_hwmod, .clk = "l4_div_ck", .addr = omap44xx_mcspi4_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* mcspi4 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_mcspi4_slaves[] = { &omap44xx_l4_per__mcspi4, }; /* mcspi4 dev_attr */ static struct omap2_mcspi_dev_attr mcspi4_dev_attr = { .num_chipselect = 1, }; static struct omap_hwmod omap44xx_mcspi4_hwmod = { .name = "mcspi4", .class = &omap44xx_mcspi_hwmod_class, .clkdm_name = "l4_per_clkdm", .mpu_irqs = omap44xx_mcspi4_irqs, .sdma_reqs = omap44xx_mcspi4_sdma_reqs, .main_clk = "mcspi4_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4PER_MCSPI4_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4PER_MCSPI4_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .dev_attr = &mcspi4_dev_attr, .slaves = omap44xx_mcspi4_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_mcspi4_slaves), }; /* * 'mmc' class * multimedia card high-speed/sd/sdio (mmc/sd/sdio) host controller */ static struct omap_hwmod_class_sysconfig omap44xx_mmc_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .sysc_flags = (SYSC_HAS_EMUFREE | SYSC_HAS_MIDLEMODE | SYSC_HAS_RESET_STATUS | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART | MSTANDBY_SMART_WKUP), .sysc_fields = &omap_hwmod_sysc_type2, }; static struct omap_hwmod_class omap44xx_mmc_hwmod_class = { .name = "mmc", .sysc = &omap44xx_mmc_sysc, }; /* mmc1 */ static struct omap_hwmod_irq_info omap44xx_mmc1_irqs[] = { { .irq = 83 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap44xx_mmc1_sdma_reqs[] = { { .name = "tx", .dma_req = 60 + OMAP44XX_DMA_REQ_START }, { .name = "rx", .dma_req = 61 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; /* mmc1 master ports */ static struct omap_hwmod_ocp_if *omap44xx_mmc1_masters[] = { &omap44xx_mmc1__l3_main_1, }; static struct omap_hwmod_addr_space omap44xx_mmc1_addrs[] = { { .pa_start = 0x4809c000, .pa_end = 0x4809c3ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> mmc1 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__mmc1 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_mmc1_hwmod, .clk = "l4_div_ck", .addr = omap44xx_mmc1_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* mmc1 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_mmc1_slaves[] = { &omap44xx_l4_per__mmc1, }; /* mmc1 dev_attr */ static struct omap_mmc_dev_attr mmc1_dev_attr = { .flags = OMAP_HSMMC_SUPPORTS_DUAL_VOLT, }; static struct omap_hwmod omap44xx_mmc1_hwmod = { .name = "mmc1", .class = &omap44xx_mmc_hwmod_class, .clkdm_name = "l3_init_clkdm", .mpu_irqs = omap44xx_mmc1_irqs, .sdma_reqs = omap44xx_mmc1_sdma_reqs, .main_clk = "mmc1_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L3INIT_MMC1_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L3INIT_MMC1_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .dev_attr = &mmc1_dev_attr, .slaves = omap44xx_mmc1_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_mmc1_slaves), .masters = omap44xx_mmc1_masters, .masters_cnt = ARRAY_SIZE(omap44xx_mmc1_masters), }; /* mmc2 */ static struct omap_hwmod_irq_info omap44xx_mmc2_irqs[] = { { .irq = 86 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap44xx_mmc2_sdma_reqs[] = { { .name = "tx", .dma_req = 46 + OMAP44XX_DMA_REQ_START }, { .name = "rx", .dma_req = 47 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; /* mmc2 master ports */ static struct omap_hwmod_ocp_if *omap44xx_mmc2_masters[] = { &omap44xx_mmc2__l3_main_1, }; static struct omap_hwmod_addr_space omap44xx_mmc2_addrs[] = { { .pa_start = 0x480b4000, .pa_end = 0x480b43ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> mmc2 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__mmc2 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_mmc2_hwmod, .clk = "l4_div_ck", .addr = omap44xx_mmc2_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* mmc2 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_mmc2_slaves[] = { &omap44xx_l4_per__mmc2, }; static struct omap_hwmod omap44xx_mmc2_hwmod = { .name = "mmc2", .class = &omap44xx_mmc_hwmod_class, .clkdm_name = "l3_init_clkdm", .mpu_irqs = omap44xx_mmc2_irqs, .sdma_reqs = omap44xx_mmc2_sdma_reqs, .main_clk = "mmc2_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L3INIT_MMC2_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L3INIT_MMC2_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .slaves = omap44xx_mmc2_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_mmc2_slaves), .masters = omap44xx_mmc2_masters, .masters_cnt = ARRAY_SIZE(omap44xx_mmc2_masters), }; /* mmc3 */ static struct omap_hwmod omap44xx_mmc3_hwmod; static struct omap_hwmod_irq_info omap44xx_mmc3_irqs[] = { { .irq = 94 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap44xx_mmc3_sdma_reqs[] = { { .name = "tx", .dma_req = 76 + OMAP44XX_DMA_REQ_START }, { .name = "rx", .dma_req = 77 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; static struct omap_hwmod_addr_space omap44xx_mmc3_addrs[] = { { .pa_start = 0x480ad000, .pa_end = 0x480ad3ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> mmc3 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__mmc3 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_mmc3_hwmod, .clk = "l4_div_ck", .addr = omap44xx_mmc3_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* mmc3 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_mmc3_slaves[] = { &omap44xx_l4_per__mmc3, }; static struct omap_hwmod omap44xx_mmc3_hwmod = { .name = "mmc3", .class = &omap44xx_mmc_hwmod_class, .clkdm_name = "l4_per_clkdm", .mpu_irqs = omap44xx_mmc3_irqs, .sdma_reqs = omap44xx_mmc3_sdma_reqs, .main_clk = "mmc3_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4PER_MMCSD3_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4PER_MMCSD3_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .slaves = omap44xx_mmc3_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_mmc3_slaves), }; /* mmc4 */ static struct omap_hwmod omap44xx_mmc4_hwmod; static struct omap_hwmod_irq_info omap44xx_mmc4_irqs[] = { { .irq = 96 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap44xx_mmc4_sdma_reqs[] = { { .name = "tx", .dma_req = 56 + OMAP44XX_DMA_REQ_START }, { .name = "rx", .dma_req = 57 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; static struct omap_hwmod_addr_space omap44xx_mmc4_addrs[] = { { .pa_start = 0x480d1000, .pa_end = 0x480d13ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> mmc4 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__mmc4 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_mmc4_hwmod, .clk = "l4_div_ck", .addr = omap44xx_mmc4_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* mmc4 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_mmc4_slaves[] = { &omap44xx_l4_per__mmc4, }; static struct omap_hwmod omap44xx_mmc4_hwmod = { .name = "mmc4", .class = &omap44xx_mmc_hwmod_class, .clkdm_name = "l4_per_clkdm", .mpu_irqs = omap44xx_mmc4_irqs, .sdma_reqs = omap44xx_mmc4_sdma_reqs, .main_clk = "mmc4_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4PER_MMCSD4_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4PER_MMCSD4_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .slaves = omap44xx_mmc4_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_mmc4_slaves), }; /* mmc5 */ static struct omap_hwmod omap44xx_mmc5_hwmod; static struct omap_hwmod_irq_info omap44xx_mmc5_irqs[] = { { .irq = 59 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap44xx_mmc5_sdma_reqs[] = { { .name = "tx", .dma_req = 58 + OMAP44XX_DMA_REQ_START }, { .name = "rx", .dma_req = 59 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; static struct omap_hwmod_addr_space omap44xx_mmc5_addrs[] = { { .pa_start = 0x480d5000, .pa_end = 0x480d53ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> mmc5 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__mmc5 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_mmc5_hwmod, .clk = "l4_div_ck", .addr = omap44xx_mmc5_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* mmc5 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_mmc5_slaves[] = { &omap44xx_l4_per__mmc5, }; static struct omap_hwmod omap44xx_mmc5_hwmod = { .name = "mmc5", .class = &omap44xx_mmc_hwmod_class, .clkdm_name = "l4_per_clkdm", .mpu_irqs = omap44xx_mmc5_irqs, .sdma_reqs = omap44xx_mmc5_sdma_reqs, .main_clk = "mmc5_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4PER_MMCSD5_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4PER_MMCSD5_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .slaves = omap44xx_mmc5_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_mmc5_slaves), }; /* * 'mpu' class * mpu sub-system */ static struct omap_hwmod_class omap44xx_mpu_hwmod_class = { .name = "mpu", }; /* mpu */ static struct omap_hwmod_irq_info omap44xx_mpu_irqs[] = { { .name = "pl310", .irq = 0 + OMAP44XX_IRQ_GIC_START }, { .name = "cti0", .irq = 1 + OMAP44XX_IRQ_GIC_START }, { .name = "cti1", .irq = 2 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; /* mpu master ports */ static struct omap_hwmod_ocp_if *omap44xx_mpu_masters[] = { &omap44xx_mpu__l3_main_1, &omap44xx_mpu__l4_abe, &omap44xx_mpu__dmm, }; static struct omap_hwmod omap44xx_mpu_hwmod = { .name = "mpu", .class = &omap44xx_mpu_hwmod_class, .clkdm_name = "mpuss_clkdm", .flags = HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET, .mpu_irqs = omap44xx_mpu_irqs, .main_clk = "dpll_mpu_m2_ck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_MPU_MPU_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_MPU_MPU_CONTEXT_OFFSET, }, }, .masters = omap44xx_mpu_masters, .masters_cnt = ARRAY_SIZE(omap44xx_mpu_masters), }; /* * 'smartreflex' class * smartreflex module (monitor silicon performance and outputs a measure of * performance error) */ /* The IP is not compliant to type1 / type2 scheme */ static struct omap_hwmod_sysc_fields omap_hwmod_sysc_type_smartreflex = { .sidle_shift = 24, .enwkup_shift = 26, }; static struct omap_hwmod_class_sysconfig omap44xx_smartreflex_sysc = { .sysc_offs = 0x0038, .sysc_flags = (SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | SIDLE_SMART_WKUP), .sysc_fields = &omap_hwmod_sysc_type_smartreflex, }; static struct omap_hwmod_class omap44xx_smartreflex_hwmod_class = { .name = "smartreflex", .sysc = &omap44xx_smartreflex_sysc, .rev = 2, }; /* smartreflex_core */ static struct omap_smartreflex_dev_attr smartreflex_core_dev_attr = { .sensor_voltdm_name = "core", }; static struct omap_hwmod omap44xx_smartreflex_core_hwmod; static struct omap_hwmod_irq_info omap44xx_smartreflex_core_irqs[] = { { .irq = 19 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_addr_space omap44xx_smartreflex_core_addrs[] = { { .pa_start = 0x4a0dd000, .pa_end = 0x4a0dd03f, .flags = ADDR_TYPE_RT }, { } }; /* l4_cfg -> smartreflex_core */ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__smartreflex_core = { .master = &omap44xx_l4_cfg_hwmod, .slave = &omap44xx_smartreflex_core_hwmod, .clk = "l4_div_ck", .addr = omap44xx_smartreflex_core_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* smartreflex_core slave ports */ static struct omap_hwmod_ocp_if *omap44xx_smartreflex_core_slaves[] = { &omap44xx_l4_cfg__smartreflex_core, }; static struct omap_hwmod omap44xx_smartreflex_core_hwmod = { .name = "smartreflex_core", .class = &omap44xx_smartreflex_hwmod_class, .clkdm_name = "l4_ao_clkdm", .mpu_irqs = omap44xx_smartreflex_core_irqs, .main_clk = "smartreflex_core_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_ALWON_SR_CORE_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_ALWON_SR_CORE_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .slaves = omap44xx_smartreflex_core_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_smartreflex_core_slaves), .dev_attr = &smartreflex_core_dev_attr, }; /* smartreflex_iva */ static struct omap_smartreflex_dev_attr smartreflex_iva_dev_attr = { .sensor_voltdm_name = "iva", }; static struct omap_hwmod omap44xx_smartreflex_iva_hwmod; static struct omap_hwmod_irq_info omap44xx_smartreflex_iva_irqs[] = { { .irq = 102 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_addr_space omap44xx_smartreflex_iva_addrs[] = { { .pa_start = 0x4a0db000, .pa_end = 0x4a0db03f, .flags = ADDR_TYPE_RT }, { } }; /* l4_cfg -> smartreflex_iva */ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__smartreflex_iva = { .master = &omap44xx_l4_cfg_hwmod, .slave = &omap44xx_smartreflex_iva_hwmod, .clk = "l4_div_ck", .addr = omap44xx_smartreflex_iva_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* smartreflex_iva slave ports */ static struct omap_hwmod_ocp_if *omap44xx_smartreflex_iva_slaves[] = { &omap44xx_l4_cfg__smartreflex_iva, }; static struct omap_hwmod omap44xx_smartreflex_iva_hwmod = { .name = "smartreflex_iva", .class = &omap44xx_smartreflex_hwmod_class, .clkdm_name = "l4_ao_clkdm", .mpu_irqs = omap44xx_smartreflex_iva_irqs, .main_clk = "smartreflex_iva_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_ALWON_SR_IVA_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_ALWON_SR_IVA_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .slaves = omap44xx_smartreflex_iva_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_smartreflex_iva_slaves), .dev_attr = &smartreflex_iva_dev_attr, }; /* smartreflex_mpu */ static struct omap_smartreflex_dev_attr smartreflex_mpu_dev_attr = { .sensor_voltdm_name = "mpu", }; static struct omap_hwmod omap44xx_smartreflex_mpu_hwmod; static struct omap_hwmod_irq_info omap44xx_smartreflex_mpu_irqs[] = { { .irq = 18 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_addr_space omap44xx_smartreflex_mpu_addrs[] = { { .pa_start = 0x4a0d9000, .pa_end = 0x4a0d903f, .flags = ADDR_TYPE_RT }, { } }; /* l4_cfg -> smartreflex_mpu */ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__smartreflex_mpu = { .master = &omap44xx_l4_cfg_hwmod, .slave = &omap44xx_smartreflex_mpu_hwmod, .clk = "l4_div_ck", .addr = omap44xx_smartreflex_mpu_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* smartreflex_mpu slave ports */ static struct omap_hwmod_ocp_if *omap44xx_smartreflex_mpu_slaves[] = { &omap44xx_l4_cfg__smartreflex_mpu, }; static struct omap_hwmod omap44xx_smartreflex_mpu_hwmod = { .name = "smartreflex_mpu", .class = &omap44xx_smartreflex_hwmod_class, .clkdm_name = "l4_ao_clkdm", .mpu_irqs = omap44xx_smartreflex_mpu_irqs, .main_clk = "smartreflex_mpu_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_ALWON_SR_MPU_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_ALWON_SR_MPU_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .slaves = omap44xx_smartreflex_mpu_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_smartreflex_mpu_slaves), .dev_attr = &smartreflex_mpu_dev_attr, }; /* * 'spinlock' class * spinlock provides hardware assistance for synchronizing the processes * running on multiple processors */ static struct omap_hwmod_class_sysconfig omap44xx_spinlock_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | SIDLE_SMART_WKUP), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap44xx_spinlock_hwmod_class = { .name = "spinlock", .sysc = &omap44xx_spinlock_sysc, }; /* spinlock */ static struct omap_hwmod omap44xx_spinlock_hwmod; static struct omap_hwmod_addr_space omap44xx_spinlock_addrs[] = { { .pa_start = 0x4a0f6000, .pa_end = 0x4a0f6fff, .flags = ADDR_TYPE_RT }, { } }; /* l4_cfg -> spinlock */ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__spinlock = { .master = &omap44xx_l4_cfg_hwmod, .slave = &omap44xx_spinlock_hwmod, .clk = "l4_div_ck", .addr = omap44xx_spinlock_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* spinlock slave ports */ static struct omap_hwmod_ocp_if *omap44xx_spinlock_slaves[] = { &omap44xx_l4_cfg__spinlock, }; static struct omap_hwmod omap44xx_spinlock_hwmod = { .name = "spinlock", .class = &omap44xx_spinlock_hwmod_class, .clkdm_name = "l4_cfg_clkdm", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4CFG_HW_SEM_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4CFG_HW_SEM_CONTEXT_OFFSET, }, }, .slaves = omap44xx_spinlock_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_spinlock_slaves), }; /* * 'timer' class * general purpose timer module with accurate 1ms tick * This class contains several variants: ['timer_1ms', 'timer'] */ static struct omap_hwmod_class_sysconfig omap44xx_timer_1ms_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_EMUFREE | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap44xx_timer_1ms_hwmod_class = { .name = "timer", .sysc = &omap44xx_timer_1ms_sysc, }; static struct omap_hwmod_class_sysconfig omap44xx_timer_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .sysc_flags = (SYSC_HAS_EMUFREE | SYSC_HAS_RESET_STATUS | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | SIDLE_SMART_WKUP), .sysc_fields = &omap_hwmod_sysc_type2, }; static struct omap_hwmod_class omap44xx_timer_hwmod_class = { .name = "timer", .sysc = &omap44xx_timer_sysc, }; /* always-on timers dev attribute */ static struct omap_timer_capability_dev_attr capability_alwon_dev_attr = { .timer_capability = OMAP_TIMER_ALWON, }; /* pwm timers dev attribute */ static struct omap_timer_capability_dev_attr capability_pwm_dev_attr = { .timer_capability = OMAP_TIMER_HAS_PWM, }; /* timer1 */ static struct omap_hwmod omap44xx_timer1_hwmod; static struct omap_hwmod_irq_info omap44xx_timer1_irqs[] = { { .irq = 37 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_addr_space omap44xx_timer1_addrs[] = { { .pa_start = 0x4a318000, .pa_end = 0x4a31807f, .flags = ADDR_TYPE_RT }, { } }; /* l4_wkup -> timer1 */ static struct omap_hwmod_ocp_if omap44xx_l4_wkup__timer1 = { .master = &omap44xx_l4_wkup_hwmod, .slave = &omap44xx_timer1_hwmod, .clk = "l4_wkup_clk_mux_ck", .addr = omap44xx_timer1_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* timer1 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_timer1_slaves[] = { &omap44xx_l4_wkup__timer1, }; static struct omap_hwmod omap44xx_timer1_hwmod = { .name = "timer1", .class = &omap44xx_timer_1ms_hwmod_class, .clkdm_name = "l4_wkup_clkdm", .mpu_irqs = omap44xx_timer1_irqs, .main_clk = "timer1_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_WKUP_TIMER1_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_WKUP_TIMER1_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .dev_attr = &capability_alwon_dev_attr, .slaves = omap44xx_timer1_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_timer1_slaves), }; /* timer2 */ static struct omap_hwmod omap44xx_timer2_hwmod; static struct omap_hwmod_irq_info omap44xx_timer2_irqs[] = { { .irq = 38 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_addr_space omap44xx_timer2_addrs[] = { { .pa_start = 0x48032000, .pa_end = 0x4803207f, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> timer2 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__timer2 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_timer2_hwmod, .clk = "l4_div_ck", .addr = omap44xx_timer2_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* timer2 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_timer2_slaves[] = { &omap44xx_l4_per__timer2, }; static struct omap_hwmod omap44xx_timer2_hwmod = { .name = "timer2", .class = &omap44xx_timer_1ms_hwmod_class, .clkdm_name = "l4_per_clkdm", .mpu_irqs = omap44xx_timer2_irqs, .main_clk = "timer2_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4PER_DMTIMER2_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4PER_DMTIMER2_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .dev_attr = &capability_alwon_dev_attr, .slaves = omap44xx_timer2_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_timer2_slaves), }; /* timer3 */ static struct omap_hwmod omap44xx_timer3_hwmod; static struct omap_hwmod_irq_info omap44xx_timer3_irqs[] = { { .irq = 39 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_addr_space omap44xx_timer3_addrs[] = { { .pa_start = 0x48034000, .pa_end = 0x4803407f, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> timer3 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__timer3 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_timer3_hwmod, .clk = "l4_div_ck", .addr = omap44xx_timer3_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* timer3 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_timer3_slaves[] = { &omap44xx_l4_per__timer3, }; static struct omap_hwmod omap44xx_timer3_hwmod = { .name = "timer3", .class = &omap44xx_timer_hwmod_class, .clkdm_name = "l4_per_clkdm", .mpu_irqs = omap44xx_timer3_irqs, .main_clk = "timer3_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4PER_DMTIMER3_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4PER_DMTIMER3_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .dev_attr = &capability_alwon_dev_attr, .slaves = omap44xx_timer3_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_timer3_slaves), }; /* timer4 */ static struct omap_hwmod omap44xx_timer4_hwmod; static struct omap_hwmod_irq_info omap44xx_timer4_irqs[] = { { .irq = 40 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_addr_space omap44xx_timer4_addrs[] = { { .pa_start = 0x48036000, .pa_end = 0x4803607f, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> timer4 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__timer4 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_timer4_hwmod, .clk = "l4_div_ck", .addr = omap44xx_timer4_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* timer4 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_timer4_slaves[] = { &omap44xx_l4_per__timer4, }; static struct omap_hwmod omap44xx_timer4_hwmod = { .name = "timer4", .class = &omap44xx_timer_hwmod_class, .clkdm_name = "l4_per_clkdm", .mpu_irqs = omap44xx_timer4_irqs, .main_clk = "timer4_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4PER_DMTIMER4_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4PER_DMTIMER4_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .dev_attr = &capability_alwon_dev_attr, .slaves = omap44xx_timer4_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_timer4_slaves), }; /* timer5 */ static struct omap_hwmod omap44xx_timer5_hwmod; static struct omap_hwmod_irq_info omap44xx_timer5_irqs[] = { { .irq = 41 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_addr_space omap44xx_timer5_addrs[] = { { .pa_start = 0x40138000, .pa_end = 0x4013807f, .flags = ADDR_TYPE_RT }, { } }; /* l4_abe -> timer5 */ static struct omap_hwmod_ocp_if omap44xx_l4_abe__timer5 = { .master = &omap44xx_l4_abe_hwmod, .slave = &omap44xx_timer5_hwmod, .clk = "ocp_abe_iclk", .addr = omap44xx_timer5_addrs, .user = OCP_USER_MPU, }; static struct omap_hwmod_addr_space omap44xx_timer5_dma_addrs[] = { { .pa_start = 0x49038000, .pa_end = 0x4903807f, .flags = ADDR_TYPE_RT }, { } }; /* l4_abe -> timer5 (dma) */ static struct omap_hwmod_ocp_if omap44xx_l4_abe__timer5_dma = { .master = &omap44xx_l4_abe_hwmod, .slave = &omap44xx_timer5_hwmod, .clk = "ocp_abe_iclk", .addr = omap44xx_timer5_dma_addrs, .user = OCP_USER_SDMA, }; /* timer5 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_timer5_slaves[] = { &omap44xx_l4_abe__timer5, &omap44xx_l4_abe__timer5_dma, }; static struct omap_hwmod omap44xx_timer5_hwmod = { .name = "timer5", .class = &omap44xx_timer_hwmod_class, .clkdm_name = "abe_clkdm", .mpu_irqs = omap44xx_timer5_irqs, .main_clk = "timer5_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM1_ABE_TIMER5_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_ABE_TIMER5_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .dev_attr = &capability_alwon_dev_attr, .slaves = omap44xx_timer5_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_timer5_slaves), }; /* timer6 */ static struct omap_hwmod omap44xx_timer6_hwmod; static struct omap_hwmod_irq_info omap44xx_timer6_irqs[] = { { .irq = 42 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_addr_space omap44xx_timer6_addrs[] = { { .pa_start = 0x4013a000, .pa_end = 0x4013a07f, .flags = ADDR_TYPE_RT }, { } }; /* l4_abe -> timer6 */ static struct omap_hwmod_ocp_if omap44xx_l4_abe__timer6 = { .master = &omap44xx_l4_abe_hwmod, .slave = &omap44xx_timer6_hwmod, .clk = "ocp_abe_iclk", .addr = omap44xx_timer6_addrs, .user = OCP_USER_MPU, }; static struct omap_hwmod_addr_space omap44xx_timer6_dma_addrs[] = { { .pa_start = 0x4903a000, .pa_end = 0x4903a07f, .flags = ADDR_TYPE_RT }, { } }; /* l4_abe -> timer6 (dma) */ static struct omap_hwmod_ocp_if omap44xx_l4_abe__timer6_dma = { .master = &omap44xx_l4_abe_hwmod, .slave = &omap44xx_timer6_hwmod, .clk = "ocp_abe_iclk", .addr = omap44xx_timer6_dma_addrs, .user = OCP_USER_SDMA, }; /* timer6 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_timer6_slaves[] = { &omap44xx_l4_abe__timer6, &omap44xx_l4_abe__timer6_dma, }; static struct omap_hwmod omap44xx_timer6_hwmod = { .name = "timer6", .class = &omap44xx_timer_hwmod_class, .clkdm_name = "abe_clkdm", .mpu_irqs = omap44xx_timer6_irqs, .main_clk = "timer6_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM1_ABE_TIMER6_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_ABE_TIMER6_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .dev_attr = &capability_alwon_dev_attr, .slaves = omap44xx_timer6_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_timer6_slaves), }; /* timer7 */ static struct omap_hwmod omap44xx_timer7_hwmod; static struct omap_hwmod_irq_info omap44xx_timer7_irqs[] = { { .irq = 43 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_addr_space omap44xx_timer7_addrs[] = { { .pa_start = 0x4013c000, .pa_end = 0x4013c07f, .flags = ADDR_TYPE_RT }, { } }; /* l4_abe -> timer7 */ static struct omap_hwmod_ocp_if omap44xx_l4_abe__timer7 = { .master = &omap44xx_l4_abe_hwmod, .slave = &omap44xx_timer7_hwmod, .clk = "ocp_abe_iclk", .addr = omap44xx_timer7_addrs, .user = OCP_USER_MPU, }; static struct omap_hwmod_addr_space omap44xx_timer7_dma_addrs[] = { { .pa_start = 0x4903c000, .pa_end = 0x4903c07f, .flags = ADDR_TYPE_RT }, { } }; /* l4_abe -> timer7 (dma) */ static struct omap_hwmod_ocp_if omap44xx_l4_abe__timer7_dma = { .master = &omap44xx_l4_abe_hwmod, .slave = &omap44xx_timer7_hwmod, .clk = "ocp_abe_iclk", .addr = omap44xx_timer7_dma_addrs, .user = OCP_USER_SDMA, }; /* timer7 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_timer7_slaves[] = { &omap44xx_l4_abe__timer7, &omap44xx_l4_abe__timer7_dma, }; static struct omap_hwmod omap44xx_timer7_hwmod = { .name = "timer7", .class = &omap44xx_timer_hwmod_class, .clkdm_name = "abe_clkdm", .mpu_irqs = omap44xx_timer7_irqs, .main_clk = "timer7_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM1_ABE_TIMER7_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_ABE_TIMER7_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .dev_attr = &capability_alwon_dev_attr, .slaves = omap44xx_timer7_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_timer7_slaves), }; /* timer8 */ static struct omap_hwmod omap44xx_timer8_hwmod; static struct omap_hwmod_irq_info omap44xx_timer8_irqs[] = { { .irq = 44 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_addr_space omap44xx_timer8_addrs[] = { { .pa_start = 0x4013e000, .pa_end = 0x4013e07f, .flags = ADDR_TYPE_RT }, { } }; /* l4_abe -> timer8 */ static struct omap_hwmod_ocp_if omap44xx_l4_abe__timer8 = { .master = &omap44xx_l4_abe_hwmod, .slave = &omap44xx_timer8_hwmod, .clk = "ocp_abe_iclk", .addr = omap44xx_timer8_addrs, .user = OCP_USER_MPU, }; static struct omap_hwmod_addr_space omap44xx_timer8_dma_addrs[] = { { .pa_start = 0x4903e000, .pa_end = 0x4903e07f, .flags = ADDR_TYPE_RT }, { } }; /* l4_abe -> timer8 (dma) */ static struct omap_hwmod_ocp_if omap44xx_l4_abe__timer8_dma = { .master = &omap44xx_l4_abe_hwmod, .slave = &omap44xx_timer8_hwmod, .clk = "ocp_abe_iclk", .addr = omap44xx_timer8_dma_addrs, .user = OCP_USER_SDMA, }; /* timer8 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_timer8_slaves[] = { &omap44xx_l4_abe__timer8, &omap44xx_l4_abe__timer8_dma, }; static struct omap_hwmod omap44xx_timer8_hwmod = { .name = "timer8", .class = &omap44xx_timer_hwmod_class, .clkdm_name = "abe_clkdm", .mpu_irqs = omap44xx_timer8_irqs, .main_clk = "timer8_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM1_ABE_TIMER8_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_ABE_TIMER8_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .dev_attr = &capability_pwm_dev_attr, .slaves = omap44xx_timer8_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_timer8_slaves), }; /* timer9 */ static struct omap_hwmod omap44xx_timer9_hwmod; static struct omap_hwmod_irq_info omap44xx_timer9_irqs[] = { { .irq = 45 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_addr_space omap44xx_timer9_addrs[] = { { .pa_start = 0x4803e000, .pa_end = 0x4803e07f, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> timer9 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__timer9 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_timer9_hwmod, .clk = "l4_div_ck", .addr = omap44xx_timer9_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* timer9 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_timer9_slaves[] = { &omap44xx_l4_per__timer9, }; static struct omap_hwmod omap44xx_timer9_hwmod = { .name = "timer9", .class = &omap44xx_timer_hwmod_class, .clkdm_name = "l4_per_clkdm", .mpu_irqs = omap44xx_timer9_irqs, .main_clk = "timer9_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4PER_DMTIMER9_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4PER_DMTIMER9_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .dev_attr = &capability_pwm_dev_attr, .slaves = omap44xx_timer9_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_timer9_slaves), }; /* timer10 */ static struct omap_hwmod omap44xx_timer10_hwmod; static struct omap_hwmod_irq_info omap44xx_timer10_irqs[] = { { .irq = 46 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_addr_space omap44xx_timer10_addrs[] = { { .pa_start = 0x48086000, .pa_end = 0x4808607f, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> timer10 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__timer10 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_timer10_hwmod, .clk = "l4_div_ck", .addr = omap44xx_timer10_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* timer10 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_timer10_slaves[] = { &omap44xx_l4_per__timer10, }; static struct omap_hwmod omap44xx_timer10_hwmod = { .name = "timer10", .class = &omap44xx_timer_1ms_hwmod_class, .clkdm_name = "l4_per_clkdm", .mpu_irqs = omap44xx_timer10_irqs, .main_clk = "timer10_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4PER_DMTIMER10_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4PER_DMTIMER10_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .dev_attr = &capability_pwm_dev_attr, .slaves = omap44xx_timer10_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_timer10_slaves), }; /* timer11 */ static struct omap_hwmod omap44xx_timer11_hwmod; static struct omap_hwmod_irq_info omap44xx_timer11_irqs[] = { { .irq = 47 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_addr_space omap44xx_timer11_addrs[] = { { .pa_start = 0x48088000, .pa_end = 0x4808807f, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> timer11 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__timer11 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_timer11_hwmod, .clk = "l4_div_ck", .addr = omap44xx_timer11_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* timer11 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_timer11_slaves[] = { &omap44xx_l4_per__timer11, }; static struct omap_hwmod omap44xx_timer11_hwmod = { .name = "timer11", .class = &omap44xx_timer_hwmod_class, .clkdm_name = "l4_per_clkdm", .mpu_irqs = omap44xx_timer11_irqs, .main_clk = "timer11_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4PER_DMTIMER11_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4PER_DMTIMER11_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .dev_attr = &capability_pwm_dev_attr, .slaves = omap44xx_timer11_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_timer11_slaves), }; /* * 'uart' class * universal asynchronous receiver/transmitter (uart) */ static struct omap_hwmod_class_sysconfig omap44xx_uart_sysc = { .rev_offs = 0x0050, .sysc_offs = 0x0054, .syss_offs = 0x0058, .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | SIDLE_SMART_WKUP), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap44xx_uart_hwmod_class = { .name = "uart", .sysc = &omap44xx_uart_sysc, }; /* uart1 */ static struct omap_hwmod omap44xx_uart1_hwmod; static struct omap_hwmod_irq_info omap44xx_uart1_irqs[] = { { .irq = 72 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap44xx_uart1_sdma_reqs[] = { { .name = "tx", .dma_req = 48 + OMAP44XX_DMA_REQ_START }, { .name = "rx", .dma_req = 49 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; static struct omap_hwmod_addr_space omap44xx_uart1_addrs[] = { { .pa_start = 0x4806a000, .pa_end = 0x4806a0ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> uart1 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__uart1 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_uart1_hwmod, .clk = "l4_div_ck", .addr = omap44xx_uart1_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* uart1 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_uart1_slaves[] = { &omap44xx_l4_per__uart1, }; static struct omap_hwmod omap44xx_uart1_hwmod = { .name = "uart1", .class = &omap44xx_uart_hwmod_class, .clkdm_name = "l4_per_clkdm", .mpu_irqs = omap44xx_uart1_irqs, .sdma_reqs = omap44xx_uart1_sdma_reqs, .main_clk = "uart1_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4PER_UART1_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4PER_UART1_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .slaves = omap44xx_uart1_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_uart1_slaves), }; /* uart2 */ static struct omap_hwmod omap44xx_uart2_hwmod; static struct omap_hwmod_irq_info omap44xx_uart2_irqs[] = { { .irq = 73 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap44xx_uart2_sdma_reqs[] = { { .name = "tx", .dma_req = 50 + OMAP44XX_DMA_REQ_START }, { .name = "rx", .dma_req = 51 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; static struct omap_hwmod_addr_space omap44xx_uart2_addrs[] = { { .pa_start = 0x4806c000, .pa_end = 0x4806c0ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> uart2 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__uart2 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_uart2_hwmod, .clk = "l4_div_ck", .addr = omap44xx_uart2_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* uart2 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_uart2_slaves[] = { &omap44xx_l4_per__uart2, }; static struct omap_hwmod omap44xx_uart2_hwmod = { .name = "uart2", .class = &omap44xx_uart_hwmod_class, .clkdm_name = "l4_per_clkdm", .mpu_irqs = omap44xx_uart2_irqs, .sdma_reqs = omap44xx_uart2_sdma_reqs, .main_clk = "uart2_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4PER_UART2_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4PER_UART2_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .slaves = omap44xx_uart2_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_uart2_slaves), }; /* uart3 */ static struct omap_hwmod omap44xx_uart3_hwmod; static struct omap_hwmod_irq_info omap44xx_uart3_irqs[] = { { .irq = 74 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap44xx_uart3_sdma_reqs[] = { { .name = "tx", .dma_req = 52 + OMAP44XX_DMA_REQ_START }, { .name = "rx", .dma_req = 53 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; static struct omap_hwmod_addr_space omap44xx_uart3_addrs[] = { { .pa_start = 0x48020000, .pa_end = 0x480200ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> uart3 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__uart3 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_uart3_hwmod, .clk = "l4_div_ck", .addr = omap44xx_uart3_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* uart3 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_uart3_slaves[] = { &omap44xx_l4_per__uart3, }; static struct omap_hwmod omap44xx_uart3_hwmod = { .name = "uart3", .class = &omap44xx_uart_hwmod_class, .clkdm_name = "l4_per_clkdm", .flags = HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET, .mpu_irqs = omap44xx_uart3_irqs, .sdma_reqs = omap44xx_uart3_sdma_reqs, .main_clk = "uart3_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4PER_UART3_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4PER_UART3_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .slaves = omap44xx_uart3_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_uart3_slaves), }; /* uart4 */ static struct omap_hwmod omap44xx_uart4_hwmod; static struct omap_hwmod_irq_info omap44xx_uart4_irqs[] = { { .irq = 70 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap44xx_uart4_sdma_reqs[] = { { .name = "tx", .dma_req = 54 + OMAP44XX_DMA_REQ_START }, { .name = "rx", .dma_req = 55 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; static struct omap_hwmod_addr_space omap44xx_uart4_addrs[] = { { .pa_start = 0x4806e000, .pa_end = 0x4806e0ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> uart4 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__uart4 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_uart4_hwmod, .clk = "l4_div_ck", .addr = omap44xx_uart4_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* uart4 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_uart4_slaves[] = { &omap44xx_l4_per__uart4, }; static struct omap_hwmod omap44xx_uart4_hwmod = { .name = "uart4", .class = &omap44xx_uart_hwmod_class, .clkdm_name = "l4_per_clkdm", .mpu_irqs = omap44xx_uart4_irqs, .sdma_reqs = omap44xx_uart4_sdma_reqs, .main_clk = "uart4_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4PER_UART4_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4PER_UART4_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .slaves = omap44xx_uart4_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_uart4_slaves), }; /* * 'usb_otg_hs' class * high-speed on-the-go universal serial bus (usb_otg_hs) controller */ static struct omap_hwmod_class_sysconfig omap44xx_usb_otg_hs_sysc = { .rev_offs = 0x0400, .sysc_offs = 0x0404, .syss_offs = 0x0408, .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_ENAWAKEUP | SYSC_HAS_MIDLEMODE | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap44xx_usb_otg_hs_hwmod_class = { .name = "usb_otg_hs", .sysc = &omap44xx_usb_otg_hs_sysc, }; /* usb_otg_hs */ static struct omap_hwmod_irq_info omap44xx_usb_otg_hs_irqs[] = { { .name = "mc", .irq = 92 + OMAP44XX_IRQ_GIC_START }, { .name = "dma", .irq = 93 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; /* usb_otg_hs master ports */ static struct omap_hwmod_ocp_if *omap44xx_usb_otg_hs_masters[] = { &omap44xx_usb_otg_hs__l3_main_2, }; static struct omap_hwmod_addr_space omap44xx_usb_otg_hs_addrs[] = { { .pa_start = 0x4a0ab000, .pa_end = 0x4a0ab003, .flags = ADDR_TYPE_RT }, { } }; /* l4_cfg -> usb_otg_hs */ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__usb_otg_hs = { .master = &omap44xx_l4_cfg_hwmod, .slave = &omap44xx_usb_otg_hs_hwmod, .clk = "l4_div_ck", .addr = omap44xx_usb_otg_hs_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* usb_otg_hs slave ports */ static struct omap_hwmod_ocp_if *omap44xx_usb_otg_hs_slaves[] = { &omap44xx_l4_cfg__usb_otg_hs, }; static struct omap_hwmod_opt_clk usb_otg_hs_opt_clks[] = { { .role = "xclk", .clk = "usb_otg_hs_xclk" }, }; static struct omap_hwmod omap44xx_usb_otg_hs_hwmod = { .name = "usb_otg_hs", .class = &omap44xx_usb_otg_hs_hwmod_class, .clkdm_name = "l3_init_clkdm", .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY, .mpu_irqs = omap44xx_usb_otg_hs_irqs, .main_clk = "usb_otg_hs_ick", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L3INIT_USB_OTG_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L3INIT_USB_OTG_CONTEXT_OFFSET, .modulemode = MODULEMODE_HWCTRL, }, }, .opt_clks = usb_otg_hs_opt_clks, .opt_clks_cnt = ARRAY_SIZE(usb_otg_hs_opt_clks), .slaves = omap44xx_usb_otg_hs_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_usb_otg_hs_slaves), .masters = omap44xx_usb_otg_hs_masters, .masters_cnt = ARRAY_SIZE(omap44xx_usb_otg_hs_masters), }; /* * 'wd_timer' class * 32-bit watchdog upward counter that generates a pulse on the reset pin on * overflow condition */ static struct omap_hwmod_class_sysconfig omap44xx_wd_timer_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_EMUFREE | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | SIDLE_SMART_WKUP), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap44xx_wd_timer_hwmod_class = { .name = "wd_timer", .sysc = &omap44xx_wd_timer_sysc, .pre_shutdown = &omap2_wd_timer_disable, }; /* wd_timer2 */ static struct omap_hwmod omap44xx_wd_timer2_hwmod; static struct omap_hwmod_irq_info omap44xx_wd_timer2_irqs[] = { { .irq = 80 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_addr_space omap44xx_wd_timer2_addrs[] = { { .pa_start = 0x4a314000, .pa_end = 0x4a31407f, .flags = ADDR_TYPE_RT }, { } }; /* l4_wkup -> wd_timer2 */ static struct omap_hwmod_ocp_if omap44xx_l4_wkup__wd_timer2 = { .master = &omap44xx_l4_wkup_hwmod, .slave = &omap44xx_wd_timer2_hwmod, .clk = "l4_wkup_clk_mux_ck", .addr = omap44xx_wd_timer2_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* wd_timer2 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_wd_timer2_slaves[] = { &omap44xx_l4_wkup__wd_timer2, }; static struct omap_hwmod omap44xx_wd_timer2_hwmod = { .name = "wd_timer2", .class = &omap44xx_wd_timer_hwmod_class, .clkdm_name = "l4_wkup_clkdm", .mpu_irqs = omap44xx_wd_timer2_irqs, .main_clk = "wd_timer2_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_WKUP_WDT2_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_WKUP_WDT2_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .slaves = omap44xx_wd_timer2_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_wd_timer2_slaves), }; /* wd_timer3 */ static struct omap_hwmod omap44xx_wd_timer3_hwmod; static struct omap_hwmod_irq_info omap44xx_wd_timer3_irqs[] = { { .irq = 36 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_addr_space omap44xx_wd_timer3_addrs[] = { { .pa_start = 0x40130000, .pa_end = 0x4013007f, .flags = ADDR_TYPE_RT }, { } }; /* l4_abe -> wd_timer3 */ static struct omap_hwmod_ocp_if omap44xx_l4_abe__wd_timer3 = { .master = &omap44xx_l4_abe_hwmod, .slave = &omap44xx_wd_timer3_hwmod, .clk = "ocp_abe_iclk", .addr = omap44xx_wd_timer3_addrs, .user = OCP_USER_MPU, }; static struct omap_hwmod_addr_space omap44xx_wd_timer3_dma_addrs[] = { { .pa_start = 0x49030000, .pa_end = 0x4903007f, .flags = ADDR_TYPE_RT }, { } }; /* l4_abe -> wd_timer3 (dma) */ static struct omap_hwmod_ocp_if omap44xx_l4_abe__wd_timer3_dma = { .master = &omap44xx_l4_abe_hwmod, .slave = &omap44xx_wd_timer3_hwmod, .clk = "ocp_abe_iclk", .addr = omap44xx_wd_timer3_dma_addrs, .user = OCP_USER_SDMA, }; /* wd_timer3 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_wd_timer3_slaves[] = { &omap44xx_l4_abe__wd_timer3, &omap44xx_l4_abe__wd_timer3_dma, }; static struct omap_hwmod omap44xx_wd_timer3_hwmod = { .name = "wd_timer3", .class = &omap44xx_wd_timer_hwmod_class, .clkdm_name = "abe_clkdm", .mpu_irqs = omap44xx_wd_timer3_irqs, .main_clk = "wd_timer3_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM1_ABE_WDT3_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_ABE_WDT3_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .slaves = omap44xx_wd_timer3_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_wd_timer3_slaves), }; /* * 'usb_host_hs' class * high-speed multi-port usb host controller */ static struct omap_hwmod_ocp_if omap44xx_usb_host_hs__l3_main_2 = { .master = &omap44xx_usb_host_hs_hwmod, .slave = &omap44xx_l3_main_2_hwmod, .clk = "l3_div_ck", .user = OCP_USER_MPU | OCP_USER_SDMA, }; static struct omap_hwmod_class_sysconfig omap44xx_usb_host_hs_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART | MSTANDBY_SMART_WKUP), .sysc_fields = &omap_hwmod_sysc_type2, }; static struct omap_hwmod_class omap44xx_usb_host_hs_hwmod_class = { .name = "usb_host_hs", .sysc = &omap44xx_usb_host_hs_sysc, }; static struct omap_hwmod_ocp_if *omap44xx_usb_host_hs_masters[] = { &omap44xx_usb_host_hs__l3_main_2, }; static struct omap_hwmod_addr_space omap44xx_usb_host_hs_addrs[] = { { .name = "uhh", .pa_start = 0x4a064000, .pa_end = 0x4a0647ff, .flags = ADDR_TYPE_RT }, { .name = "ohci", .pa_start = 0x4a064800, .pa_end = 0x4a064bff, }, { .name = "ehci", .pa_start = 0x4a064c00, .pa_end = 0x4a064fff, }, {} }; static struct omap_hwmod_irq_info omap44xx_usb_host_hs_irqs[] = { { .name = "ohci-irq", .irq = 76 + OMAP44XX_IRQ_GIC_START }, { .name = "ehci-irq", .irq = 77 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_ocp_if omap44xx_l4_cfg__usb_host_hs = { .master = &omap44xx_l4_cfg_hwmod, .slave = &omap44xx_usb_host_hs_hwmod, .clk = "l4_div_ck", .addr = omap44xx_usb_host_hs_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; static struct omap_hwmod_ocp_if *omap44xx_usb_host_hs_slaves[] = { &omap44xx_l4_cfg__usb_host_hs, }; static struct omap_hwmod omap44xx_usb_host_hs_hwmod = { .name = "usb_host_hs", .class = &omap44xx_usb_host_hs_hwmod_class, .clkdm_name = "l3_init_clkdm", .main_clk = "usb_host_hs_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L3INIT_USB_HOST_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L3INIT_USB_HOST_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .mpu_irqs = omap44xx_usb_host_hs_irqs, .slaves = omap44xx_usb_host_hs_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_usb_host_hs_slaves), .masters = omap44xx_usb_host_hs_masters, .masters_cnt = ARRAY_SIZE(omap44xx_usb_host_hs_masters), /* * Errata: USBHOST Configured In Smart-Idle Can Lead To a Deadlock * id: i660 * * Description: * In the following configuration : * - USBHOST module is set to smart-idle mode * - PRCM asserts idle_req to the USBHOST module ( This typically * happens when the system is going to a low power mode : all ports * have been suspended, the master part of the USBHOST module has * entered the standby state, and SW has cut the functional clocks) * - an USBHOST interrupt occurs before the module is able to answer * idle_ack, typically a remote wakeup IRQ. * Then the USB HOST module will enter a deadlock situation where it * is no more accessible nor functional. * * Workaround: * Don't use smart idle; use only force idle, hence HWMOD_SWSUP_SIDLE */ /* * Errata: USB host EHCI may stall when entering smart-standby mode * Id: i571 * * Description: * When the USBHOST module is set to smart-standby mode, and when it is * ready to enter the standby state (i.e. all ports are suspended and * all attached devices are in suspend mode), then it can wrongly assert * the Mstandby signal too early while there are still some residual OCP * transactions ongoing. If this condition occurs, the internal state * machine may go to an undefined state and the USB link may be stuck * upon the next resume. * * Workaround: * Don't use smart standby; use only force standby, * hence HWMOD_SWSUP_MSTANDBY */ /* * During system boot; If the hwmod framework resets the module * the module will have smart idle settings; which can lead to deadlock * (above Errata Id:i660); so, dont reset the module during boot; * Use HWMOD_INIT_NO_RESET. */ .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY | HWMOD_INIT_NO_RESET, }; /* * 'usb_tll_hs' class * usb_tll_hs module is the adapter on the usb_host_hs ports */ static struct omap_hwmod_class_sysconfig omap44xx_usb_tll_hs_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap44xx_usb_tll_hs_hwmod_class = { .name = "usb_tll_hs", .sysc = &omap44xx_usb_tll_hs_sysc, }; static struct omap_hwmod_irq_info omap44xx_usb_tll_hs_irqs[] = { { .name = "tll-irq", .irq = 78 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_addr_space omap44xx_usb_tll_hs_addrs[] = { { .name = "tll", .pa_start = 0x4a062000, .pa_end = 0x4a063fff, .flags = ADDR_TYPE_RT }, {} }; static struct omap_hwmod_ocp_if omap44xx_l4_cfg__usb_tll_hs = { .master = &omap44xx_l4_cfg_hwmod, .slave = &omap44xx_usb_tll_hs_hwmod, .clk = "l4_div_ck", .addr = omap44xx_usb_tll_hs_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; static struct omap_hwmod_ocp_if *omap44xx_usb_tll_hs_slaves[] = { &omap44xx_l4_cfg__usb_tll_hs, }; static struct omap_hwmod omap44xx_usb_tll_hs_hwmod = { .name = "usb_tll_hs", .class = &omap44xx_usb_tll_hs_hwmod_class, .clkdm_name = "l3_init_clkdm", .main_clk = "usb_tll_hs_ick", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L3INIT_USB_TLL_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L3INIT_USB_TLL_CONTEXT_OFFSET, .modulemode = MODULEMODE_HWCTRL, }, }, .mpu_irqs = omap44xx_usb_tll_hs_irqs, .slaves = omap44xx_usb_tll_hs_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_usb_tll_hs_slaves), }; static __initdata struct omap_hwmod *omap44xx_hwmods[] = { /* dmm class */ &omap44xx_dmm_hwmod, /* emif_fw class */ &omap44xx_emif_fw_hwmod, /* l3 class */ &omap44xx_l3_instr_hwmod, &omap44xx_l3_main_1_hwmod, &omap44xx_l3_main_2_hwmod, &omap44xx_l3_main_3_hwmod, /* l4 class */ &omap44xx_l4_abe_hwmod, &omap44xx_l4_cfg_hwmod, &omap44xx_l4_per_hwmod, &omap44xx_l4_wkup_hwmod, /* mpu_bus class */ &omap44xx_mpu_private_hwmod, /* aess class */ &omap44xx_aess_hwmod, /* bandgap class */ &omap44xx_bandgap_hwmod, /* counter class */ /* &omap44xx_counter_32k_hwmod, */ /* dma class */ &omap44xx_dma_system_hwmod, /* dmic class */ &omap44xx_dmic_hwmod, /* dsp class */ &omap44xx_dsp_hwmod, &omap44xx_dsp_c0_hwmod, /* dss class */ &omap44xx_dss_hwmod, &omap44xx_dss_dispc_hwmod, &omap44xx_dss_dsi1_hwmod, &omap44xx_dss_dsi2_hwmod, &omap44xx_dss_hdmi_hwmod, &omap44xx_dss_rfbi_hwmod, &omap44xx_dss_venc_hwmod, /* gpio class */ &omap44xx_gpio1_hwmod, &omap44xx_gpio2_hwmod, &omap44xx_gpio3_hwmod, &omap44xx_gpio4_hwmod, &omap44xx_gpio5_hwmod, &omap44xx_gpio6_hwmod, /* hsi class */ /* &omap44xx_hsi_hwmod, */ /* i2c class */ &omap44xx_i2c1_hwmod, &omap44xx_i2c2_hwmod, &omap44xx_i2c3_hwmod, &omap44xx_i2c4_hwmod, /* ipu class */ &omap44xx_ipu_hwmod, &omap44xx_ipu_c0_hwmod, &omap44xx_ipu_c1_hwmod, /* iss class */ /* &omap44xx_iss_hwmod, */ /* iva class */ &omap44xx_iva_hwmod, &omap44xx_iva_seq0_hwmod, &omap44xx_iva_seq1_hwmod, /* kbd class */ &omap44xx_kbd_hwmod, /* mailbox class */ &omap44xx_mailbox_hwmod, /* mcbsp class */ &omap44xx_mcbsp1_hwmod, &omap44xx_mcbsp2_hwmod, &omap44xx_mcbsp3_hwmod, &omap44xx_mcbsp4_hwmod, /* mcpdm class */ &omap44xx_mcpdm_hwmod, /* mcspi class */ &omap44xx_mcspi1_hwmod, &omap44xx_mcspi2_hwmod, &omap44xx_mcspi3_hwmod, &omap44xx_mcspi4_hwmod, /* mmc class */ &omap44xx_mmc1_hwmod, &omap44xx_mmc2_hwmod, &omap44xx_mmc3_hwmod, &omap44xx_mmc4_hwmod, &omap44xx_mmc5_hwmod, /* mpu class */ &omap44xx_mpu_hwmod, /* smartreflex class */ &omap44xx_smartreflex_core_hwmod, &omap44xx_smartreflex_iva_hwmod, &omap44xx_smartreflex_mpu_hwmod, /* spinlock class */ &omap44xx_spinlock_hwmod, /* timer class */ &omap44xx_timer1_hwmod, &omap44xx_timer2_hwmod, &omap44xx_timer3_hwmod, &omap44xx_timer4_hwmod, &omap44xx_timer5_hwmod, &omap44xx_timer6_hwmod, &omap44xx_timer7_hwmod, &omap44xx_timer8_hwmod, &omap44xx_timer9_hwmod, &omap44xx_timer10_hwmod, &omap44xx_timer11_hwmod, /* uart class */ &omap44xx_uart1_hwmod, &omap44xx_uart2_hwmod, &omap44xx_uart3_hwmod, &omap44xx_uart4_hwmod, /* usb host class */ &omap44xx_usb_host_hs_hwmod, &omap44xx_usb_tll_hs_hwmod, /* usb_otg_hs class */ &omap44xx_usb_otg_hs_hwmod, /* wd_timer class */ &omap44xx_wd_timer2_hwmod, &omap44xx_wd_timer3_hwmod, NULL, }; int __init omap44xx_hwmod_init(void) { return omap_hwmod_register(omap44xx_hwmods); }
gpl-2.0
jiangjiali66/linux-xlnx
drivers/message/fusion/mptfc.c
1567
42769
/* * linux/drivers/message/fusion/mptfc.c * For use with LSI PCI chip/adapter(s) * running LSI Fusion MPT (Message Passing Technology) firmware. * * Copyright (c) 1999-2008 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * */ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. NO WARRANTY THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is solely responsible for determining the appropriateness of using and distributing the Program and assumes all risks associated with its exercise of rights under this Agreement, including but not limited to the risks and costs of program errors, damage to or loss of data, programs or equipment, and unavailability or interruption of operations. DISCLAIMER OF LIABILITY NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/kdev_t.h> #include <linux/blkdev.h> #include <linux/delay.h> /* for mdelay */ #include <linux/interrupt.h> /* needed for in_interrupt() proto */ #include <linux/reboot.h> /* notifier code */ #include <linux/workqueue.h> #include <linux/sort.h> #include <linux/slab.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi_transport_fc.h> #include "mptbase.h" #include "mptscsih.h" /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ #define my_NAME "Fusion MPT FC Host driver" #define my_VERSION MPT_LINUX_VERSION_COMMON #define MYNAM "mptfc" MODULE_AUTHOR(MODULEAUTHOR); MODULE_DESCRIPTION(my_NAME); MODULE_LICENSE("GPL"); MODULE_VERSION(my_VERSION); /* Command line args */ #define MPTFC_DEV_LOSS_TMO (60) static int mptfc_dev_loss_tmo = MPTFC_DEV_LOSS_TMO; /* reasonable default */ module_param(mptfc_dev_loss_tmo, int, 0); MODULE_PARM_DESC(mptfc_dev_loss_tmo, " Initial time the driver programs the " " transport to wait for an rport to " " return following a device loss event." " Default=60."); /* scsi-mid layer global parmeter is max_report_luns, which is 511 */ #define MPTFC_MAX_LUN (16895) static int max_lun = MPTFC_MAX_LUN; module_param(max_lun, int, 0); MODULE_PARM_DESC(max_lun, " max lun, default=16895 "); static u8 mptfcDoneCtx = MPT_MAX_PROTOCOL_DRIVERS; static u8 mptfcTaskCtx = MPT_MAX_PROTOCOL_DRIVERS; static u8 mptfcInternalCtx = MPT_MAX_PROTOCOL_DRIVERS; static int mptfc_target_alloc(struct scsi_target *starget); static int mptfc_slave_alloc(struct scsi_device *sdev); static int mptfc_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *SCpnt); static void mptfc_target_destroy(struct scsi_target *starget); static void mptfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout); static void mptfc_remove(struct pci_dev *pdev); static int mptfc_abort(struct scsi_cmnd *SCpnt); static int mptfc_dev_reset(struct scsi_cmnd *SCpnt); static int mptfc_bus_reset(struct scsi_cmnd *SCpnt); static int mptfc_host_reset(struct scsi_cmnd *SCpnt); static struct scsi_host_template mptfc_driver_template = { .module = THIS_MODULE, .proc_name = "mptfc", .show_info = mptscsih_show_info, .name = "MPT FC Host", .info = mptscsih_info, .queuecommand = mptfc_qcmd, .target_alloc = mptfc_target_alloc, .slave_alloc = mptfc_slave_alloc, .slave_configure = mptscsih_slave_configure, .target_destroy = mptfc_target_destroy, .slave_destroy = mptscsih_slave_destroy, .change_queue_depth = mptscsih_change_queue_depth, .eh_abort_handler = mptfc_abort, .eh_device_reset_handler = mptfc_dev_reset, .eh_bus_reset_handler = mptfc_bus_reset, .eh_host_reset_handler = mptfc_host_reset, .bios_param = mptscsih_bios_param, .can_queue = MPT_FC_CAN_QUEUE, .this_id = -1, .sg_tablesize = MPT_SCSI_SG_DEPTH, .max_sectors = 8192, .cmd_per_lun = 7, .use_clustering = ENABLE_CLUSTERING, .shost_attrs = mptscsih_host_attrs, }; /**************************************************************************** * Supported hardware */ static struct pci_device_id mptfc_pci_table[] = { { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVICEID_FC909, PCI_ANY_ID, PCI_ANY_ID }, { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVICEID_FC919, PCI_ANY_ID, PCI_ANY_ID }, { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVICEID_FC929, PCI_ANY_ID, PCI_ANY_ID }, { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVICEID_FC919X, PCI_ANY_ID, PCI_ANY_ID }, { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVICEID_FC929X, PCI_ANY_ID, PCI_ANY_ID }, { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVICEID_FC939X, PCI_ANY_ID, PCI_ANY_ID }, { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVICEID_FC949X, PCI_ANY_ID, PCI_ANY_ID }, { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVICEID_FC949E, PCI_ANY_ID, PCI_ANY_ID }, { PCI_VENDOR_ID_BROCADE, MPI_MANUFACTPAGE_DEVICEID_FC949E, PCI_ANY_ID, PCI_ANY_ID }, {0} /* Terminating entry */ }; MODULE_DEVICE_TABLE(pci, mptfc_pci_table); static struct scsi_transport_template *mptfc_transport_template = NULL; static struct fc_function_template mptfc_transport_functions = { .dd_fcrport_size = 8, .show_host_node_name = 1, .show_host_port_name = 1, .show_host_supported_classes = 1, .show_host_port_id = 1, .show_rport_supported_classes = 1, .show_starget_node_name = 1, .show_starget_port_name = 1, .show_starget_port_id = 1, .set_rport_dev_loss_tmo = mptfc_set_rport_loss_tmo, .show_rport_dev_loss_tmo = 1, .show_host_supported_speeds = 1, .show_host_maxframe_size = 1, .show_host_speed = 1, .show_host_fabric_name = 1, .show_host_port_type = 1, .show_host_port_state = 1, .show_host_symbolic_name = 1, }; static int mptfc_block_error_handler(struct scsi_cmnd *SCpnt, int (*func)(struct scsi_cmnd *SCpnt), const char *caller) { MPT_SCSI_HOST *hd; struct scsi_device *sdev = SCpnt->device; struct Scsi_Host *shost = sdev->host; struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); unsigned long flags; int ready; MPT_ADAPTER *ioc; int loops = 40; /* seconds */ hd = shost_priv(SCpnt->device->host); ioc = hd->ioc; spin_lock_irqsave(shost->host_lock, flags); while ((ready = fc_remote_port_chkready(rport) >> 16) == DID_IMM_RETRY || (loops > 0 && ioc->active == 0)) { spin_unlock_irqrestore(shost->host_lock, flags); dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT "mptfc_block_error_handler.%d: %d:%llu, port status is " "%x, active flag %d, deferring %s recovery.\n", ioc->name, ioc->sh->host_no, SCpnt->device->id, SCpnt->device->lun, ready, ioc->active, caller)); msleep(1000); spin_lock_irqsave(shost->host_lock, flags); loops --; } spin_unlock_irqrestore(shost->host_lock, flags); if (ready == DID_NO_CONNECT || !SCpnt->device->hostdata || ioc->active == 0) { dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT "%s.%d: %d:%llu, failing recovery, " "port state %x, active %d, vdevice %p.\n", caller, ioc->name, ioc->sh->host_no, SCpnt->device->id, SCpnt->device->lun, ready, ioc->active, SCpnt->device->hostdata)); return FAILED; } dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT "%s.%d: %d:%llu, executing recovery.\n", caller, ioc->name, ioc->sh->host_no, SCpnt->device->id, SCpnt->device->lun)); return (*func)(SCpnt); } static int mptfc_abort(struct scsi_cmnd *SCpnt) { return mptfc_block_error_handler(SCpnt, mptscsih_abort, __func__); } static int mptfc_dev_reset(struct scsi_cmnd *SCpnt) { return mptfc_block_error_handler(SCpnt, mptscsih_dev_reset, __func__); } static int mptfc_bus_reset(struct scsi_cmnd *SCpnt) { return mptfc_block_error_handler(SCpnt, mptscsih_bus_reset, __func__); } static int mptfc_host_reset(struct scsi_cmnd *SCpnt) { return mptfc_block_error_handler(SCpnt, mptscsih_host_reset, __func__); } static void mptfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout) { if (timeout > 0) rport->dev_loss_tmo = timeout; else rport->dev_loss_tmo = mptfc_dev_loss_tmo; } static int mptfc_FcDevPage0_cmp_func(const void *a, const void *b) { FCDevicePage0_t **aa = (FCDevicePage0_t **)a; FCDevicePage0_t **bb = (FCDevicePage0_t **)b; if ((*aa)->CurrentBus == (*bb)->CurrentBus) { if ((*aa)->CurrentTargetID == (*bb)->CurrentTargetID) return 0; if ((*aa)->CurrentTargetID < (*bb)->CurrentTargetID) return -1; return 1; } if ((*aa)->CurrentBus < (*bb)->CurrentBus) return -1; return 1; } static int mptfc_GetFcDevPage0(MPT_ADAPTER *ioc, int ioc_port, void(*func)(MPT_ADAPTER *ioc,int channel, FCDevicePage0_t *arg)) { ConfigPageHeader_t hdr; CONFIGPARMS cfg; FCDevicePage0_t *ppage0_alloc, *fc; dma_addr_t page0_dma; int data_sz; int ii; FCDevicePage0_t *p0_array=NULL, *p_p0; FCDevicePage0_t **pp0_array=NULL, **p_pp0; int rc = -ENOMEM; U32 port_id = 0xffffff; int num_targ = 0; int max_bus = ioc->facts.MaxBuses; int max_targ; max_targ = (ioc->facts.MaxDevices == 0) ? 256 : ioc->facts.MaxDevices; data_sz = sizeof(FCDevicePage0_t) * max_bus * max_targ; p_p0 = p0_array = kzalloc(data_sz, GFP_KERNEL); if (!p0_array) goto out; data_sz = sizeof(FCDevicePage0_t *) * max_bus * max_targ; p_pp0 = pp0_array = kzalloc(data_sz, GFP_KERNEL); if (!pp0_array) goto out; do { /* Get FC Device Page 0 header */ hdr.PageVersion = 0; hdr.PageLength = 0; hdr.PageNumber = 0; hdr.PageType = MPI_CONFIG_PAGETYPE_FC_DEVICE; cfg.cfghdr.hdr = &hdr; cfg.physAddr = -1; cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; cfg.dir = 0; cfg.pageAddr = port_id; cfg.timeout = 0; if ((rc = mpt_config(ioc, &cfg)) != 0) break; if (hdr.PageLength <= 0) break; data_sz = hdr.PageLength * 4; ppage0_alloc = pci_alloc_consistent(ioc->pcidev, data_sz, &page0_dma); rc = -ENOMEM; if (!ppage0_alloc) break; cfg.physAddr = page0_dma; cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; if ((rc = mpt_config(ioc, &cfg)) == 0) { ppage0_alloc->PortIdentifier = le32_to_cpu(ppage0_alloc->PortIdentifier); ppage0_alloc->WWNN.Low = le32_to_cpu(ppage0_alloc->WWNN.Low); ppage0_alloc->WWNN.High = le32_to_cpu(ppage0_alloc->WWNN.High); ppage0_alloc->WWPN.Low = le32_to_cpu(ppage0_alloc->WWPN.Low); ppage0_alloc->WWPN.High = le32_to_cpu(ppage0_alloc->WWPN.High); ppage0_alloc->BBCredit = le16_to_cpu(ppage0_alloc->BBCredit); ppage0_alloc->MaxRxFrameSize = le16_to_cpu(ppage0_alloc->MaxRxFrameSize); port_id = ppage0_alloc->PortIdentifier; num_targ++; *p_p0 = *ppage0_alloc; /* save data */ *p_pp0++ = p_p0++; /* save addr */ } pci_free_consistent(ioc->pcidev, data_sz, (u8 *) ppage0_alloc, page0_dma); if (rc != 0) break; } while (port_id <= 0xff0000); if (num_targ) { /* sort array */ if (num_targ > 1) sort (pp0_array, num_targ, sizeof(FCDevicePage0_t *), mptfc_FcDevPage0_cmp_func, NULL); /* call caller's func for each targ */ for (ii = 0; ii < num_targ; ii++) { fc = *(pp0_array+ii); func(ioc, ioc_port, fc); } } out: kfree(pp0_array); kfree(p0_array); return rc; } static int mptfc_generate_rport_ids(FCDevicePage0_t *pg0, struct fc_rport_identifiers *rid) { /* not currently usable */ if (pg0->Flags & (MPI_FC_DEVICE_PAGE0_FLAGS_PLOGI_INVALID | MPI_FC_DEVICE_PAGE0_FLAGS_PRLI_INVALID)) return -1; if (!(pg0->Flags & MPI_FC_DEVICE_PAGE0_FLAGS_TARGETID_BUS_VALID)) return -1; if (!(pg0->Protocol & MPI_FC_DEVICE_PAGE0_PROT_FCP_TARGET)) return -1; /* * board data structure already normalized to platform endianness * shifted to avoid unaligned access on 64 bit architecture */ rid->node_name = ((u64)pg0->WWNN.High) << 32 | (u64)pg0->WWNN.Low; rid->port_name = ((u64)pg0->WWPN.High) << 32 | (u64)pg0->WWPN.Low; rid->port_id = pg0->PortIdentifier; rid->roles = FC_RPORT_ROLE_UNKNOWN; return 0; } static void mptfc_register_dev(MPT_ADAPTER *ioc, int channel, FCDevicePage0_t *pg0) { struct fc_rport_identifiers rport_ids; struct fc_rport *rport; struct mptfc_rport_info *ri; int new_ri = 1; u64 pn, nn; VirtTarget *vtarget; u32 roles = FC_RPORT_ROLE_UNKNOWN; if (mptfc_generate_rport_ids(pg0, &rport_ids) < 0) return; roles |= FC_RPORT_ROLE_FCP_TARGET; if (pg0->Protocol & MPI_FC_DEVICE_PAGE0_PROT_FCP_INITIATOR) roles |= FC_RPORT_ROLE_FCP_INITIATOR; /* scan list looking for a match */ list_for_each_entry(ri, &ioc->fc_rports, list) { pn = (u64)ri->pg0.WWPN.High << 32 | (u64)ri->pg0.WWPN.Low; if (pn == rport_ids.port_name) { /* match */ list_move_tail(&ri->list, &ioc->fc_rports); new_ri = 0; break; } } if (new_ri) { /* allocate one */ ri = kzalloc(sizeof(struct mptfc_rport_info), GFP_KERNEL); if (!ri) return; list_add_tail(&ri->list, &ioc->fc_rports); } ri->pg0 = *pg0; /* add/update pg0 data */ ri->flags &= ~MPT_RPORT_INFO_FLAGS_MISSING; /* MPT_RPORT_INFO_FLAGS_REGISTERED - rport not previously deleted */ if (!(ri->flags & MPT_RPORT_INFO_FLAGS_REGISTERED)) { ri->flags |= MPT_RPORT_INFO_FLAGS_REGISTERED; rport = fc_remote_port_add(ioc->sh, channel, &rport_ids); if (rport) { ri->rport = rport; if (new_ri) /* may have been reset by user */ rport->dev_loss_tmo = mptfc_dev_loss_tmo; /* * if already mapped, remap here. If not mapped, * target_alloc will allocate vtarget and map, * slave_alloc will fill in vdevice from vtarget. */ if (ri->starget) { vtarget = ri->starget->hostdata; if (vtarget) { vtarget->id = pg0->CurrentTargetID; vtarget->channel = pg0->CurrentBus; vtarget->deleted = 0; } } *((struct mptfc_rport_info **)rport->dd_data) = ri; /* scan will be scheduled once rport becomes a target */ fc_remote_port_rolechg(rport,roles); pn = (u64)ri->pg0.WWPN.High << 32 | (u64)ri->pg0.WWPN.Low; nn = (u64)ri->pg0.WWNN.High << 32 | (u64)ri->pg0.WWNN.Low; dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT "mptfc_reg_dev.%d: %x, %llx / %llx, tid %d, " "rport tid %d, tmo %d\n", ioc->name, ioc->sh->host_no, pg0->PortIdentifier, (unsigned long long)nn, (unsigned long long)pn, pg0->CurrentTargetID, ri->rport->scsi_target_id, ri->rport->dev_loss_tmo)); } else { list_del(&ri->list); kfree(ri); ri = NULL; } } } /* * OS entry point to allow for host driver to free allocated memory * Called if no device present or device being unloaded */ static void mptfc_target_destroy(struct scsi_target *starget) { struct fc_rport *rport; struct mptfc_rport_info *ri; rport = starget_to_rport(starget); if (rport) { ri = *((struct mptfc_rport_info **)rport->dd_data); if (ri) /* better be! */ ri->starget = NULL; } kfree(starget->hostdata); starget->hostdata = NULL; } /* * OS entry point to allow host driver to alloc memory * for each scsi target. Called once per device the bus scan. * Return non-zero if allocation fails. */ static int mptfc_target_alloc(struct scsi_target *starget) { VirtTarget *vtarget; struct fc_rport *rport; struct mptfc_rport_info *ri; int rc; vtarget = kzalloc(sizeof(VirtTarget), GFP_KERNEL); if (!vtarget) return -ENOMEM; starget->hostdata = vtarget; rc = -ENODEV; rport = starget_to_rport(starget); if (rport) { ri = *((struct mptfc_rport_info **)rport->dd_data); if (ri) { /* better be! */ vtarget->id = ri->pg0.CurrentTargetID; vtarget->channel = ri->pg0.CurrentBus; ri->starget = starget; rc = 0; } } if (rc != 0) { kfree(vtarget); starget->hostdata = NULL; } return rc; } /* * mptfc_dump_lun_info * @ioc * @rport * @sdev * */ static void mptfc_dump_lun_info(MPT_ADAPTER *ioc, struct fc_rport *rport, struct scsi_device *sdev, VirtTarget *vtarget) { u64 nn, pn; struct mptfc_rport_info *ri; ri = *((struct mptfc_rport_info **)rport->dd_data); pn = (u64)ri->pg0.WWPN.High << 32 | (u64)ri->pg0.WWPN.Low; nn = (u64)ri->pg0.WWNN.High << 32 | (u64)ri->pg0.WWNN.Low; dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT "mptfc_slv_alloc.%d: num_luns %d, sdev.id %d, " "CurrentTargetID %d, %x %llx %llx\n", ioc->name, sdev->host->host_no, vtarget->num_luns, sdev->id, ri->pg0.CurrentTargetID, ri->pg0.PortIdentifier, (unsigned long long)pn, (unsigned long long)nn)); } /* * OS entry point to allow host driver to alloc memory * for each scsi device. Called once per device the bus scan. * Return non-zero if allocation fails. * Init memory once per LUN. */ static int mptfc_slave_alloc(struct scsi_device *sdev) { MPT_SCSI_HOST *hd; VirtTarget *vtarget; VirtDevice *vdevice; struct scsi_target *starget; struct fc_rport *rport; MPT_ADAPTER *ioc; starget = scsi_target(sdev); rport = starget_to_rport(starget); if (!rport || fc_remote_port_chkready(rport)) return -ENXIO; hd = shost_priv(sdev->host); ioc = hd->ioc; vdevice = kzalloc(sizeof(VirtDevice), GFP_KERNEL); if (!vdevice) { printk(MYIOC_s_ERR_FMT "slave_alloc kmalloc(%zd) FAILED!\n", ioc->name, sizeof(VirtDevice)); return -ENOMEM; } sdev->hostdata = vdevice; vtarget = starget->hostdata; if (vtarget->num_luns == 0) { vtarget->ioc_id = ioc->id; vtarget->tflags = MPT_TARGET_FLAGS_Q_YES; } vdevice->vtarget = vtarget; vdevice->lun = sdev->lun; vtarget->num_luns++; mptfc_dump_lun_info(ioc, rport, sdev, vtarget); return 0; } static int mptfc_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *SCpnt) { struct mptfc_rport_info *ri; struct fc_rport *rport = starget_to_rport(scsi_target(SCpnt->device)); int err; VirtDevice *vdevice = SCpnt->device->hostdata; if (!vdevice || !vdevice->vtarget) { SCpnt->result = DID_NO_CONNECT << 16; SCpnt->scsi_done(SCpnt); return 0; } err = fc_remote_port_chkready(rport); if (unlikely(err)) { SCpnt->result = err; SCpnt->scsi_done(SCpnt); return 0; } /* dd_data is null until finished adding target */ ri = *((struct mptfc_rport_info **)rport->dd_data); if (unlikely(!ri)) { SCpnt->result = DID_IMM_RETRY << 16; SCpnt->scsi_done(SCpnt); return 0; } return mptscsih_qcmd(SCpnt); } /* * mptfc_display_port_link_speed - displaying link speed * @ioc: Pointer to MPT_ADAPTER structure * @portnum: IOC Port number * @pp0dest: port page0 data payload * */ static void mptfc_display_port_link_speed(MPT_ADAPTER *ioc, int portnum, FCPortPage0_t *pp0dest) { u8 old_speed, new_speed, state; char *old, *new; if (portnum >= 2) return; old_speed = ioc->fc_link_speed[portnum]; new_speed = pp0dest->CurrentSpeed; state = pp0dest->PortState; if (state != MPI_FCPORTPAGE0_PORTSTATE_OFFLINE && new_speed != MPI_FCPORTPAGE0_CURRENT_SPEED_UKNOWN) { old = old_speed == MPI_FCPORTPAGE0_CURRENT_SPEED_1GBIT ? "1 Gbps" : old_speed == MPI_FCPORTPAGE0_CURRENT_SPEED_2GBIT ? "2 Gbps" : old_speed == MPI_FCPORTPAGE0_CURRENT_SPEED_4GBIT ? "4 Gbps" : "Unknown"; new = new_speed == MPI_FCPORTPAGE0_CURRENT_SPEED_1GBIT ? "1 Gbps" : new_speed == MPI_FCPORTPAGE0_CURRENT_SPEED_2GBIT ? "2 Gbps" : new_speed == MPI_FCPORTPAGE0_CURRENT_SPEED_4GBIT ? "4 Gbps" : "Unknown"; if (old_speed == 0) printk(MYIOC_s_NOTE_FMT "FC Link Established, Speed = %s\n", ioc->name, new); else if (old_speed != new_speed) printk(MYIOC_s_WARN_FMT "FC Link Speed Change, Old Speed = %s, New Speed = %s\n", ioc->name, old, new); ioc->fc_link_speed[portnum] = new_speed; } } /* * mptfc_GetFcPortPage0 - Fetch FCPort config Page0. * @ioc: Pointer to MPT_ADAPTER structure * @portnum: IOC Port number * * Return: 0 for success * -ENOMEM if no memory available * -EPERM if not allowed due to ISR context * -EAGAIN if no msg frames currently available * -EFAULT for non-successful reply or no reply (timeout) * -EINVAL portnum arg out of range (hardwired to two elements) */ static int mptfc_GetFcPortPage0(MPT_ADAPTER *ioc, int portnum) { ConfigPageHeader_t hdr; CONFIGPARMS cfg; FCPortPage0_t *ppage0_alloc; FCPortPage0_t *pp0dest; dma_addr_t page0_dma; int data_sz; int copy_sz; int rc; int count = 400; if (portnum > 1) return -EINVAL; /* Get FCPort Page 0 header */ hdr.PageVersion = 0; hdr.PageLength = 0; hdr.PageNumber = 0; hdr.PageType = MPI_CONFIG_PAGETYPE_FC_PORT; cfg.cfghdr.hdr = &hdr; cfg.physAddr = -1; cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; cfg.dir = 0; cfg.pageAddr = portnum; cfg.timeout = 0; if ((rc = mpt_config(ioc, &cfg)) != 0) return rc; if (hdr.PageLength == 0) return 0; data_sz = hdr.PageLength * 4; rc = -ENOMEM; ppage0_alloc = (FCPortPage0_t *) pci_alloc_consistent(ioc->pcidev, data_sz, &page0_dma); if (ppage0_alloc) { try_again: memset((u8 *)ppage0_alloc, 0, data_sz); cfg.physAddr = page0_dma; cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; if ((rc = mpt_config(ioc, &cfg)) == 0) { /* save the data */ pp0dest = &ioc->fc_port_page0[portnum]; copy_sz = min_t(int, sizeof(FCPortPage0_t), data_sz); memcpy(pp0dest, ppage0_alloc, copy_sz); /* * Normalize endianness of structure data, * by byte-swapping all > 1 byte fields! */ pp0dest->Flags = le32_to_cpu(pp0dest->Flags); pp0dest->PortIdentifier = le32_to_cpu(pp0dest->PortIdentifier); pp0dest->WWNN.Low = le32_to_cpu(pp0dest->WWNN.Low); pp0dest->WWNN.High = le32_to_cpu(pp0dest->WWNN.High); pp0dest->WWPN.Low = le32_to_cpu(pp0dest->WWPN.Low); pp0dest->WWPN.High = le32_to_cpu(pp0dest->WWPN.High); pp0dest->SupportedServiceClass = le32_to_cpu(pp0dest->SupportedServiceClass); pp0dest->SupportedSpeeds = le32_to_cpu(pp0dest->SupportedSpeeds); pp0dest->CurrentSpeed = le32_to_cpu(pp0dest->CurrentSpeed); pp0dest->MaxFrameSize = le32_to_cpu(pp0dest->MaxFrameSize); pp0dest->FabricWWNN.Low = le32_to_cpu(pp0dest->FabricWWNN.Low); pp0dest->FabricWWNN.High = le32_to_cpu(pp0dest->FabricWWNN.High); pp0dest->FabricWWPN.Low = le32_to_cpu(pp0dest->FabricWWPN.Low); pp0dest->FabricWWPN.High = le32_to_cpu(pp0dest->FabricWWPN.High); pp0dest->DiscoveredPortsCount = le32_to_cpu(pp0dest->DiscoveredPortsCount); pp0dest->MaxInitiators = le32_to_cpu(pp0dest->MaxInitiators); /* * if still doing discovery, * hang loose a while until finished */ if ((pp0dest->PortState == MPI_FCPORTPAGE0_PORTSTATE_UNKNOWN) || (pp0dest->PortState == MPI_FCPORTPAGE0_PORTSTATE_ONLINE && (pp0dest->Flags & MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) == MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT)) { if (count-- > 0) { msleep(100); goto try_again; } printk(MYIOC_s_INFO_FMT "Firmware discovery not" " complete.\n", ioc->name); } mptfc_display_port_link_speed(ioc, portnum, pp0dest); } pci_free_consistent(ioc->pcidev, data_sz, (u8 *) ppage0_alloc, page0_dma); } return rc; } static int mptfc_WriteFcPortPage1(MPT_ADAPTER *ioc, int portnum) { ConfigPageHeader_t hdr; CONFIGPARMS cfg; int rc; if (portnum > 1) return -EINVAL; if (!(ioc->fc_data.fc_port_page1[portnum].data)) return -EINVAL; /* get fcport page 1 header */ hdr.PageVersion = 0; hdr.PageLength = 0; hdr.PageNumber = 1; hdr.PageType = MPI_CONFIG_PAGETYPE_FC_PORT; cfg.cfghdr.hdr = &hdr; cfg.physAddr = -1; cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; cfg.dir = 0; cfg.pageAddr = portnum; cfg.timeout = 0; if ((rc = mpt_config(ioc, &cfg)) != 0) return rc; if (hdr.PageLength == 0) return -ENODEV; if (hdr.PageLength*4 != ioc->fc_data.fc_port_page1[portnum].pg_sz) return -EINVAL; cfg.physAddr = ioc->fc_data.fc_port_page1[portnum].dma; cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT; cfg.dir = 1; rc = mpt_config(ioc, &cfg); return rc; } static int mptfc_GetFcPortPage1(MPT_ADAPTER *ioc, int portnum) { ConfigPageHeader_t hdr; CONFIGPARMS cfg; FCPortPage1_t *page1_alloc; dma_addr_t page1_dma; int data_sz; int rc; if (portnum > 1) return -EINVAL; /* get fcport page 1 header */ hdr.PageVersion = 0; hdr.PageLength = 0; hdr.PageNumber = 1; hdr.PageType = MPI_CONFIG_PAGETYPE_FC_PORT; cfg.cfghdr.hdr = &hdr; cfg.physAddr = -1; cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; cfg.dir = 0; cfg.pageAddr = portnum; cfg.timeout = 0; if ((rc = mpt_config(ioc, &cfg)) != 0) return rc; if (hdr.PageLength == 0) return -ENODEV; start_over: if (ioc->fc_data.fc_port_page1[portnum].data == NULL) { data_sz = hdr.PageLength * 4; if (data_sz < sizeof(FCPortPage1_t)) data_sz = sizeof(FCPortPage1_t); page1_alloc = (FCPortPage1_t *) pci_alloc_consistent(ioc->pcidev, data_sz, &page1_dma); if (!page1_alloc) return -ENOMEM; } else { page1_alloc = ioc->fc_data.fc_port_page1[portnum].data; page1_dma = ioc->fc_data.fc_port_page1[portnum].dma; data_sz = ioc->fc_data.fc_port_page1[portnum].pg_sz; if (hdr.PageLength * 4 > data_sz) { ioc->fc_data.fc_port_page1[portnum].data = NULL; pci_free_consistent(ioc->pcidev, data_sz, (u8 *) page1_alloc, page1_dma); goto start_over; } } memset(page1_alloc,0,data_sz); cfg.physAddr = page1_dma; cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; if ((rc = mpt_config(ioc, &cfg)) == 0) { ioc->fc_data.fc_port_page1[portnum].data = page1_alloc; ioc->fc_data.fc_port_page1[portnum].pg_sz = data_sz; ioc->fc_data.fc_port_page1[portnum].dma = page1_dma; } else { ioc->fc_data.fc_port_page1[portnum].data = NULL; pci_free_consistent(ioc->pcidev, data_sz, (u8 *) page1_alloc, page1_dma); } return rc; } static void mptfc_SetFcPortPage1_defaults(MPT_ADAPTER *ioc) { int ii; FCPortPage1_t *pp1; #define MPTFC_FW_DEVICE_TIMEOUT (1) #define MPTFC_FW_IO_PEND_TIMEOUT (1) #define ON_FLAGS (MPI_FCPORTPAGE1_FLAGS_IMMEDIATE_ERROR_REPLY) #define OFF_FLAGS (MPI_FCPORTPAGE1_FLAGS_VERBOSE_RESCAN_EVENTS) for (ii=0; ii<ioc->facts.NumberOfPorts; ii++) { if (mptfc_GetFcPortPage1(ioc, ii) != 0) continue; pp1 = ioc->fc_data.fc_port_page1[ii].data; if ((pp1->InitiatorDeviceTimeout == MPTFC_FW_DEVICE_TIMEOUT) && (pp1->InitiatorIoPendTimeout == MPTFC_FW_IO_PEND_TIMEOUT) && ((pp1->Flags & ON_FLAGS) == ON_FLAGS) && ((pp1->Flags & OFF_FLAGS) == 0)) continue; pp1->InitiatorDeviceTimeout = MPTFC_FW_DEVICE_TIMEOUT; pp1->InitiatorIoPendTimeout = MPTFC_FW_IO_PEND_TIMEOUT; pp1->Flags &= ~OFF_FLAGS; pp1->Flags |= ON_FLAGS; mptfc_WriteFcPortPage1(ioc, ii); } } static void mptfc_init_host_attr(MPT_ADAPTER *ioc,int portnum) { unsigned class = 0; unsigned cos = 0; unsigned speed; unsigned port_type; unsigned port_state; FCPortPage0_t *pp0; struct Scsi_Host *sh; char *sn; /* don't know what to do as only one scsi (fc) host was allocated */ if (portnum != 0) return; pp0 = &ioc->fc_port_page0[portnum]; sh = ioc->sh; sn = fc_host_symbolic_name(sh); snprintf(sn, FC_SYMBOLIC_NAME_SIZE, "%s %s%08xh", ioc->prod_name, MPT_FW_REV_MAGIC_ID_STRING, ioc->facts.FWVersion.Word); fc_host_tgtid_bind_type(sh) = FC_TGTID_BIND_BY_WWPN; fc_host_maxframe_size(sh) = pp0->MaxFrameSize; fc_host_node_name(sh) = (u64)pp0->WWNN.High << 32 | (u64)pp0->WWNN.Low; fc_host_port_name(sh) = (u64)pp0->WWPN.High << 32 | (u64)pp0->WWPN.Low; fc_host_port_id(sh) = pp0->PortIdentifier; class = pp0->SupportedServiceClass; if (class & MPI_FCPORTPAGE0_SUPPORT_CLASS_1) cos |= FC_COS_CLASS1; if (class & MPI_FCPORTPAGE0_SUPPORT_CLASS_2) cos |= FC_COS_CLASS2; if (class & MPI_FCPORTPAGE0_SUPPORT_CLASS_3) cos |= FC_COS_CLASS3; fc_host_supported_classes(sh) = cos; if (pp0->CurrentSpeed == MPI_FCPORTPAGE0_CURRENT_SPEED_1GBIT) speed = FC_PORTSPEED_1GBIT; else if (pp0->CurrentSpeed == MPI_FCPORTPAGE0_CURRENT_SPEED_2GBIT) speed = FC_PORTSPEED_2GBIT; else if (pp0->CurrentSpeed == MPI_FCPORTPAGE0_CURRENT_SPEED_4GBIT) speed = FC_PORTSPEED_4GBIT; else if (pp0->CurrentSpeed == MPI_FCPORTPAGE0_CURRENT_SPEED_10GBIT) speed = FC_PORTSPEED_10GBIT; else speed = FC_PORTSPEED_UNKNOWN; fc_host_speed(sh) = speed; speed = 0; if (pp0->SupportedSpeeds & MPI_FCPORTPAGE0_SUPPORT_1GBIT_SPEED) speed |= FC_PORTSPEED_1GBIT; if (pp0->SupportedSpeeds & MPI_FCPORTPAGE0_SUPPORT_2GBIT_SPEED) speed |= FC_PORTSPEED_2GBIT; if (pp0->SupportedSpeeds & MPI_FCPORTPAGE0_SUPPORT_4GBIT_SPEED) speed |= FC_PORTSPEED_4GBIT; if (pp0->SupportedSpeeds & MPI_FCPORTPAGE0_SUPPORT_10GBIT_SPEED) speed |= FC_PORTSPEED_10GBIT; fc_host_supported_speeds(sh) = speed; port_state = FC_PORTSTATE_UNKNOWN; if (pp0->PortState == MPI_FCPORTPAGE0_PORTSTATE_ONLINE) port_state = FC_PORTSTATE_ONLINE; else if (pp0->PortState == MPI_FCPORTPAGE0_PORTSTATE_OFFLINE) port_state = FC_PORTSTATE_LINKDOWN; fc_host_port_state(sh) = port_state; port_type = FC_PORTTYPE_UNKNOWN; if (pp0->Flags & MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT) port_type = FC_PORTTYPE_PTP; else if (pp0->Flags & MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP) port_type = FC_PORTTYPE_LPORT; else if (pp0->Flags & MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP) port_type = FC_PORTTYPE_NLPORT; else if (pp0->Flags & MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT) port_type = FC_PORTTYPE_NPORT; fc_host_port_type(sh) = port_type; fc_host_fabric_name(sh) = (pp0->Flags & MPI_FCPORTPAGE0_FLAGS_FABRIC_WWN_VALID) ? (u64) pp0->FabricWWNN.High << 32 | (u64) pp0->FabricWWPN.Low : (u64)pp0->WWNN.High << 32 | (u64)pp0->WWNN.Low; } static void mptfc_link_status_change(struct work_struct *work) { MPT_ADAPTER *ioc = container_of(work, MPT_ADAPTER, fc_rescan_work); int ii; for (ii=0; ii < ioc->facts.NumberOfPorts; ii++) (void) mptfc_GetFcPortPage0(ioc, ii); } static void mptfc_setup_reset(struct work_struct *work) { MPT_ADAPTER *ioc = container_of(work, MPT_ADAPTER, fc_setup_reset_work); u64 pn; struct mptfc_rport_info *ri; struct scsi_target *starget; VirtTarget *vtarget; /* reset about to happen, delete (block) all rports */ list_for_each_entry(ri, &ioc->fc_rports, list) { if (ri->flags & MPT_RPORT_INFO_FLAGS_REGISTERED) { ri->flags &= ~MPT_RPORT_INFO_FLAGS_REGISTERED; fc_remote_port_delete(ri->rport); /* won't sleep */ ri->rport = NULL; starget = ri->starget; if (starget) { vtarget = starget->hostdata; if (vtarget) vtarget->deleted = 1; } pn = (u64)ri->pg0.WWPN.High << 32 | (u64)ri->pg0.WWPN.Low; dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT "mptfc_setup_reset.%d: %llx deleted\n", ioc->name, ioc->sh->host_no, (unsigned long long)pn)); } } } static void mptfc_rescan_devices(struct work_struct *work) { MPT_ADAPTER *ioc = container_of(work, MPT_ADAPTER, fc_rescan_work); int ii; u64 pn; struct mptfc_rport_info *ri; struct scsi_target *starget; VirtTarget *vtarget; /* start by tagging all ports as missing */ list_for_each_entry(ri, &ioc->fc_rports, list) { if (ri->flags & MPT_RPORT_INFO_FLAGS_REGISTERED) { ri->flags |= MPT_RPORT_INFO_FLAGS_MISSING; } } /* * now rescan devices known to adapter, * will reregister existing rports */ for (ii=0; ii < ioc->facts.NumberOfPorts; ii++) { (void) mptfc_GetFcPortPage0(ioc, ii); mptfc_init_host_attr(ioc, ii); /* refresh */ mptfc_GetFcDevPage0(ioc, ii, mptfc_register_dev); } /* delete devices still missing */ list_for_each_entry(ri, &ioc->fc_rports, list) { /* if newly missing, delete it */ if (ri->flags & MPT_RPORT_INFO_FLAGS_MISSING) { ri->flags &= ~(MPT_RPORT_INFO_FLAGS_REGISTERED| MPT_RPORT_INFO_FLAGS_MISSING); fc_remote_port_delete(ri->rport); /* won't sleep */ ri->rport = NULL; starget = ri->starget; if (starget) { vtarget = starget->hostdata; if (vtarget) vtarget->deleted = 1; } pn = (u64)ri->pg0.WWPN.High << 32 | (u64)ri->pg0.WWPN.Low; dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT "mptfc_rescan.%d: %llx deleted\n", ioc->name, ioc->sh->host_no, (unsigned long long)pn)); } } } static int mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct Scsi_Host *sh; MPT_SCSI_HOST *hd; MPT_ADAPTER *ioc; unsigned long flags; int ii; int numSGE = 0; int scale; int ioc_cap; int error=0; int r; if ((r = mpt_attach(pdev,id)) != 0) return r; ioc = pci_get_drvdata(pdev); ioc->DoneCtx = mptfcDoneCtx; ioc->TaskCtx = mptfcTaskCtx; ioc->InternalCtx = mptfcInternalCtx; /* Added sanity check on readiness of the MPT adapter. */ if (ioc->last_state != MPI_IOC_STATE_OPERATIONAL) { printk(MYIOC_s_WARN_FMT "Skipping because it's not operational!\n", ioc->name); error = -ENODEV; goto out_mptfc_probe; } if (!ioc->active) { printk(MYIOC_s_WARN_FMT "Skipping because it's disabled!\n", ioc->name); error = -ENODEV; goto out_mptfc_probe; } /* Sanity check - ensure at least 1 port is INITIATOR capable */ ioc_cap = 0; for (ii=0; ii < ioc->facts.NumberOfPorts; ii++) { if (ioc->pfacts[ii].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_INITIATOR) ioc_cap ++; } if (!ioc_cap) { printk(MYIOC_s_WARN_FMT "Skipping ioc=%p because SCSI Initiator mode is NOT enabled!\n", ioc->name, ioc); return 0; } sh = scsi_host_alloc(&mptfc_driver_template, sizeof(MPT_SCSI_HOST)); if (!sh) { printk(MYIOC_s_WARN_FMT "Unable to register controller with SCSI subsystem\n", ioc->name); error = -1; goto out_mptfc_probe; } spin_lock_init(&ioc->fc_rescan_work_lock); INIT_WORK(&ioc->fc_rescan_work, mptfc_rescan_devices); INIT_WORK(&ioc->fc_setup_reset_work, mptfc_setup_reset); INIT_WORK(&ioc->fc_lsc_work, mptfc_link_status_change); spin_lock_irqsave(&ioc->FreeQlock, flags); /* Attach the SCSI Host to the IOC structure */ ioc->sh = sh; sh->io_port = 0; sh->n_io_port = 0; sh->irq = 0; /* set 16 byte cdb's */ sh->max_cmd_len = 16; sh->max_id = ioc->pfacts->MaxDevices; sh->max_lun = max_lun; /* Required entry. */ sh->unique_id = ioc->id; /* Verify that we won't exceed the maximum * number of chain buffers * We can optimize: ZZ = req_sz/sizeof(SGE) * For 32bit SGE's: * numSGE = 1 + (ZZ-1)*(maxChain -1) + ZZ * + (req_sz - 64)/sizeof(SGE) * A slightly different algorithm is required for * 64bit SGEs. */ scale = ioc->req_sz/ioc->SGE_size; if (ioc->sg_addr_size == sizeof(u64)) { numSGE = (scale - 1) * (ioc->facts.MaxChainDepth-1) + scale + (ioc->req_sz - 60) / ioc->SGE_size; } else { numSGE = 1 + (scale - 1) * (ioc->facts.MaxChainDepth-1) + scale + (ioc->req_sz - 64) / ioc->SGE_size; } if (numSGE < sh->sg_tablesize) { /* Reset this value */ dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Resetting sg_tablesize to %d from %d\n", ioc->name, numSGE, sh->sg_tablesize)); sh->sg_tablesize = numSGE; } spin_unlock_irqrestore(&ioc->FreeQlock, flags); hd = shost_priv(sh); hd->ioc = ioc; /* SCSI needs scsi_cmnd lookup table! * (with size equal to req_depth*PtrSz!) */ ioc->ScsiLookup = kcalloc(ioc->req_depth, sizeof(void *), GFP_ATOMIC); if (!ioc->ScsiLookup) { error = -ENOMEM; goto out_mptfc_probe; } spin_lock_init(&ioc->scsi_lookup_lock); dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ScsiLookup @ %p\n", ioc->name, ioc->ScsiLookup)); hd->last_queue_full = 0; sh->transportt = mptfc_transport_template; error = scsi_add_host (sh, &ioc->pcidev->dev); if(error) { dprintk(ioc, printk(MYIOC_s_ERR_FMT "scsi_add_host failed\n", ioc->name)); goto out_mptfc_probe; } /* initialize workqueue */ snprintf(ioc->fc_rescan_work_q_name, sizeof(ioc->fc_rescan_work_q_name), "mptfc_wq_%d", sh->host_no); ioc->fc_rescan_work_q = create_singlethread_workqueue(ioc->fc_rescan_work_q_name); if (!ioc->fc_rescan_work_q) goto out_mptfc_probe; /* * Pre-fetch FC port WWN and stuff... * (FCPortPage0_t stuff) */ for (ii=0; ii < ioc->facts.NumberOfPorts; ii++) { (void) mptfc_GetFcPortPage0(ioc, ii); } mptfc_SetFcPortPage1_defaults(ioc); /* * scan for rports - * by doing it via the workqueue, some locking is eliminated */ queue_work(ioc->fc_rescan_work_q, &ioc->fc_rescan_work); flush_workqueue(ioc->fc_rescan_work_q); return 0; out_mptfc_probe: mptscsih_remove(pdev); return error; } static struct pci_driver mptfc_driver = { .name = "mptfc", .id_table = mptfc_pci_table, .probe = mptfc_probe, .remove = mptfc_remove, .shutdown = mptscsih_shutdown, #ifdef CONFIG_PM .suspend = mptscsih_suspend, .resume = mptscsih_resume, #endif }; static int mptfc_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply) { MPT_SCSI_HOST *hd; u8 event = le32_to_cpu(pEvReply->Event) & 0xFF; unsigned long flags; int rc=1; if (ioc->bus_type != FC) return 0; devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT "MPT event (=%02Xh) routed to SCSI host driver!\n", ioc->name, event)); if (ioc->sh == NULL || ((hd = shost_priv(ioc->sh)) == NULL)) return 1; switch (event) { case MPI_EVENT_RESCAN: spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags); if (ioc->fc_rescan_work_q) { queue_work(ioc->fc_rescan_work_q, &ioc->fc_rescan_work); } spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags); break; case MPI_EVENT_LINK_STATUS_CHANGE: spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags); if (ioc->fc_rescan_work_q) { queue_work(ioc->fc_rescan_work_q, &ioc->fc_lsc_work); } spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags); break; default: rc = mptscsih_event_process(ioc,pEvReply); break; } return rc; } static int mptfc_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) { int rc; unsigned long flags; rc = mptscsih_ioc_reset(ioc,reset_phase); if ((ioc->bus_type != FC) || (!rc)) return rc; dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": IOC %s_reset routed to FC host driver!\n",ioc->name, reset_phase==MPT_IOC_SETUP_RESET ? "setup" : ( reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post"))); if (reset_phase == MPT_IOC_SETUP_RESET) { spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags); if (ioc->fc_rescan_work_q) { queue_work(ioc->fc_rescan_work_q, &ioc->fc_setup_reset_work); } spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags); } else if (reset_phase == MPT_IOC_PRE_RESET) { } else { /* MPT_IOC_POST_RESET */ mptfc_SetFcPortPage1_defaults(ioc); spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags); if (ioc->fc_rescan_work_q) { queue_work(ioc->fc_rescan_work_q, &ioc->fc_rescan_work); } spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags); } return 1; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** * mptfc_init - Register MPT adapter(s) as SCSI host(s) with SCSI mid-layer. * * Returns 0 for success, non-zero for failure. */ static int __init mptfc_init(void) { int error; show_mptmod_ver(my_NAME, my_VERSION); /* sanity check module parameters */ if (mptfc_dev_loss_tmo <= 0) mptfc_dev_loss_tmo = MPTFC_DEV_LOSS_TMO; mptfc_transport_template = fc_attach_transport(&mptfc_transport_functions); if (!mptfc_transport_template) return -ENODEV; mptfcDoneCtx = mpt_register(mptscsih_io_done, MPTFC_DRIVER, "mptscsih_scandv_complete"); mptfcTaskCtx = mpt_register(mptscsih_taskmgmt_complete, MPTFC_DRIVER, "mptscsih_scandv_complete"); mptfcInternalCtx = mpt_register(mptscsih_scandv_complete, MPTFC_DRIVER, "mptscsih_scandv_complete"); mpt_event_register(mptfcDoneCtx, mptfc_event_process); mpt_reset_register(mptfcDoneCtx, mptfc_ioc_reset); error = pci_register_driver(&mptfc_driver); if (error) fc_release_transport(mptfc_transport_template); return error; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** * mptfc_remove - Remove fc infrastructure for devices * @pdev: Pointer to pci_dev structure * */ static void mptfc_remove(struct pci_dev *pdev) { MPT_ADAPTER *ioc = pci_get_drvdata(pdev); struct mptfc_rport_info *p, *n; struct workqueue_struct *work_q; unsigned long flags; int ii; /* destroy workqueue */ if ((work_q=ioc->fc_rescan_work_q)) { spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags); ioc->fc_rescan_work_q = NULL; spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags); destroy_workqueue(work_q); } fc_remove_host(ioc->sh); list_for_each_entry_safe(p, n, &ioc->fc_rports, list) { list_del(&p->list); kfree(p); } for (ii=0; ii<ioc->facts.NumberOfPorts; ii++) { if (ioc->fc_data.fc_port_page1[ii].data) { pci_free_consistent(ioc->pcidev, ioc->fc_data.fc_port_page1[ii].pg_sz, (u8 *) ioc->fc_data.fc_port_page1[ii].data, ioc->fc_data.fc_port_page1[ii].dma); ioc->fc_data.fc_port_page1[ii].data = NULL; } } mptscsih_remove(pdev); } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** * mptfc_exit - Unregisters MPT adapter(s) * */ static void __exit mptfc_exit(void) { pci_unregister_driver(&mptfc_driver); fc_release_transport(mptfc_transport_template); mpt_reset_deregister(mptfcDoneCtx); mpt_event_deregister(mptfcDoneCtx); mpt_deregister(mptfcInternalCtx); mpt_deregister(mptfcTaskCtx); mpt_deregister(mptfcDoneCtx); } module_init(mptfc_init); module_exit(mptfc_exit);
gpl-2.0
onenonlycasper/android_kernel_htc_flounder
drivers/net/ethernet/3com/3c59x.c
2079
104489
/* EtherLinkXL.c: A 3Com EtherLink PCI III/XL ethernet driver for linux. */ /* Written 1996-1999 by Donald Becker. This software may be used and distributed according to the terms of the GNU General Public License, incorporated herein by reference. This driver is for the 3Com "Vortex" and "Boomerang" series ethercards. Members of the series include Fast EtherLink 3c590/3c592/3c595/3c597 and the EtherLink XL 3c900 and 3c905 cards. Problem reports and questions should be directed to vortex@scyld.com The author may be reached as becker@scyld.com, or C/O Scyld Computing Corporation 410 Severn Ave., Suite 210 Annapolis MD 21403 */ /* * FIXME: This driver _could_ support MTU changing, but doesn't. See Don's hamachi.c implementation * as well as other drivers * * NOTE: If you make 'vortex_debug' a constant (#define vortex_debug 0) the driver shrinks by 2k * due to dead code elimination. There will be some performance benefits from this due to * elimination of all the tests and reduced cache footprint. */ #define DRV_NAME "3c59x" /* A few values that may be tweaked. */ /* Keep the ring sizes a power of two for efficiency. */ #define TX_RING_SIZE 16 #define RX_RING_SIZE 32 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/ /* "Knobs" that adjust features and parameters. */ /* Set the copy breakpoint for the copy-only-tiny-frames scheme. Setting to > 1512 effectively disables this feature. */ #ifndef __arm__ static int rx_copybreak = 200; #else /* ARM systems perform better by disregarding the bus-master transfer capability of these cards. -- rmk */ static int rx_copybreak = 1513; #endif /* Allow setting MTU to a larger size, bypassing the normal ethernet setup. */ static const int mtu = 1500; /* Maximum events (Rx packets, etc.) to handle at each interrupt. */ static int max_interrupt_work = 32; /* Tx timeout interval (millisecs) */ static int watchdog = 5000; /* Allow aggregation of Tx interrupts. Saves CPU load at the cost * of possible Tx stalls if the system is blocking interrupts * somewhere else. Undefine this to disable. */ #define tx_interrupt_mitigation 1 /* Put out somewhat more debugging messages. (0: no msg, 1 minimal .. 6). */ #define vortex_debug debug #ifdef VORTEX_DEBUG static int vortex_debug = VORTEX_DEBUG; #else static int vortex_debug = 1; #endif #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/errno.h> #include <linux/in.h> #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/mii.h> #include <linux/init.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/ethtool.h> #include <linux/highmem.h> #include <linux/eisa.h> #include <linux/bitops.h> #include <linux/jiffies.h> #include <linux/gfp.h> #include <asm/irq.h> /* For nr_irqs only. */ #include <asm/io.h> #include <asm/uaccess.h> /* Kernel compatibility defines, some common to David Hinds' PCMCIA package. This is only in the support-all-kernels source code. */ #define RUN_AT(x) (jiffies + (x)) #include <linux/delay.h> static const char version[] = DRV_NAME ": Donald Becker and others.\n"; MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); MODULE_DESCRIPTION("3Com 3c59x/3c9xx ethernet driver "); MODULE_LICENSE("GPL"); /* Operational parameter that usually are not changed. */ /* The Vortex size is twice that of the original EtherLinkIII series: the runtime register window, window 1, is now always mapped in. The Boomerang size is twice as large as the Vortex -- it has additional bus master control registers. */ #define VORTEX_TOTAL_SIZE 0x20 #define BOOMERANG_TOTAL_SIZE 0x40 /* Set iff a MII transceiver on any interface requires mdio preamble. This only set with the original DP83840 on older 3c905 boards, so the extra code size of a per-interface flag is not worthwhile. */ static char mii_preamble_required; #define PFX DRV_NAME ": " /* Theory of Operation I. Board Compatibility This device driver is designed for the 3Com FastEtherLink and FastEtherLink XL, 3Com's PCI to 10/100baseT adapters. It also works with the 10Mbs versions of the FastEtherLink cards. The supported product IDs are 3c590, 3c592, 3c595, 3c597, 3c900, 3c905 The related ISA 3c515 is supported with a separate driver, 3c515.c, included with the kernel source or available from cesdis.gsfc.nasa.gov:/pub/linux/drivers/3c515.html II. Board-specific settings PCI bus devices are configured by the system at boot time, so no jumpers need to be set on the board. The system BIOS should be set to assign the PCI INTA signal to an otherwise unused system IRQ line. The EEPROM settings for media type and forced-full-duplex are observed. The EEPROM media type should be left at the default "autoselect" unless using 10base2 or AUI connections which cannot be reliably detected. III. Driver operation The 3c59x series use an interface that's very similar to the previous 3c5x9 series. The primary interface is two programmed-I/O FIFOs, with an alternate single-contiguous-region bus-master transfer (see next). The 3c900 "Boomerang" series uses a full-bus-master interface with separate lists of transmit and receive descriptors, similar to the AMD LANCE/PCnet, DEC Tulip and Intel Speedo3. The first chip version retains a compatible programmed-I/O interface that has been removed in 'B' and subsequent board revisions. One extension that is advertised in a very large font is that the adapters are capable of being bus masters. On the Vortex chip this capability was only for a single contiguous region making it far less useful than the full bus master capability. There is a significant performance impact of taking an extra interrupt or polling for the completion of each transfer, as well as difficulty sharing the single transfer engine between the transmit and receive threads. Using DMA transfers is a win only with large blocks or with the flawed versions of the Intel Orion motherboard PCI controller. The Boomerang chip's full-bus-master interface is useful, and has the currently-unused advantages over other similar chips that queued transmit packets may be reordered and receive buffer groups are associated with a single frame. With full-bus-master support, this driver uses a "RX_COPYBREAK" scheme. Rather than a fixed intermediate receive buffer, this scheme allocates full-sized skbuffs as receive buffers. The value RX_COPYBREAK is used as the copying breakpoint: it is chosen to trade-off the memory wasted by passing the full-sized skbuff to the queue layer for all frames vs. the copying cost of copying a frame to a correctly-sized skbuff. IIIC. Synchronization The driver runs as two independent, single-threaded flows of control. One is the send-packet routine, which enforces single-threaded use by the dev->tbusy flag. The other thread is the interrupt handler, which is single threaded by the hardware and other software. IV. Notes Thanks to Cameron Spitzer and Terry Murphy of 3Com for providing development 3c590, 3c595, and 3c900 boards. The name "Vortex" is the internal 3Com project name for the PCI ASIC, and the EISA version is called "Demon". According to Terry these names come from rides at the local amusement park. The new chips support both ethernet (1.5K) and FDDI (4.5K) packet sizes! This driver only supports ethernet packets because of the skbuff allocation limit of 4K. */ /* This table drives the PCI probe routines. It's mostly boilerplate in all of the drivers, and will likely be provided by some future kernel. */ enum pci_flags_bit { PCI_USES_MASTER=4, }; enum { IS_VORTEX=1, IS_BOOMERANG=2, IS_CYCLONE=4, IS_TORNADO=8, EEPROM_8BIT=0x10, /* AKPM: Uses 0x230 as the base bitmaps for EEPROM reads */ HAS_PWR_CTRL=0x20, HAS_MII=0x40, HAS_NWAY=0x80, HAS_CB_FNS=0x100, INVERT_MII_PWR=0x200, INVERT_LED_PWR=0x400, MAX_COLLISION_RESET=0x800, EEPROM_OFFSET=0x1000, HAS_HWCKSM=0x2000, WNO_XCVR_PWR=0x4000, EXTRA_PREAMBLE=0x8000, EEPROM_RESET=0x10000, }; enum vortex_chips { CH_3C590 = 0, CH_3C592, CH_3C597, CH_3C595_1, CH_3C595_2, CH_3C595_3, CH_3C900_1, CH_3C900_2, CH_3C900_3, CH_3C900_4, CH_3C900_5, CH_3C900B_FL, CH_3C905_1, CH_3C905_2, CH_3C905B_TX, CH_3C905B_1, CH_3C905B_2, CH_3C905B_FX, CH_3C905C, CH_3C9202, CH_3C980, CH_3C9805, CH_3CSOHO100_TX, CH_3C555, CH_3C556, CH_3C556B, CH_3C575, CH_3C575_1, CH_3CCFE575, CH_3CCFE575CT, CH_3CCFE656, CH_3CCFEM656, CH_3CCFEM656_1, CH_3C450, CH_3C920, CH_3C982A, CH_3C982B, CH_905BT4, CH_920B_EMB_WNM, }; /* note: this array directly indexed by above enums, and MUST * be kept in sync with both the enums above, and the PCI device * table below */ static struct vortex_chip_info { const char *name; int flags; int drv_flags; int io_size; } vortex_info_tbl[] = { {"3c590 Vortex 10Mbps", PCI_USES_MASTER, IS_VORTEX, 32, }, {"3c592 EISA 10Mbps Demon/Vortex", /* AKPM: from Don's 3c59x_cb.c 0.49H */ PCI_USES_MASTER, IS_VORTEX, 32, }, {"3c597 EISA Fast Demon/Vortex", /* AKPM: from Don's 3c59x_cb.c 0.49H */ PCI_USES_MASTER, IS_VORTEX, 32, }, {"3c595 Vortex 100baseTx", PCI_USES_MASTER, IS_VORTEX, 32, }, {"3c595 Vortex 100baseT4", PCI_USES_MASTER, IS_VORTEX, 32, }, {"3c595 Vortex 100base-MII", PCI_USES_MASTER, IS_VORTEX, 32, }, {"3c900 Boomerang 10baseT", PCI_USES_MASTER, IS_BOOMERANG|EEPROM_RESET, 64, }, {"3c900 Boomerang 10Mbps Combo", PCI_USES_MASTER, IS_BOOMERANG|EEPROM_RESET, 64, }, {"3c900 Cyclone 10Mbps TPO", /* AKPM: from Don's 0.99M */ PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, {"3c900 Cyclone 10Mbps Combo", PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, {"3c900 Cyclone 10Mbps TPC", /* AKPM: from Don's 0.99M */ PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, {"3c900B-FL Cyclone 10base-FL", PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, {"3c905 Boomerang 100baseTx", PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_RESET, 64, }, {"3c905 Boomerang 100baseT4", PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_RESET, 64, }, {"3C905B-TX Fast Etherlink XL PCI", PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, }, {"3c905B Cyclone 100baseTx", PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, }, {"3c905B Cyclone 10/100/BNC", PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, }, {"3c905B-FX Cyclone 100baseFx", PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, {"3c905C Tornado", PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, }, {"3c920B-EMB-WNM (ATI Radeon 9100 IGP)", PCI_USES_MASTER, IS_TORNADO|HAS_MII|HAS_HWCKSM, 128, }, {"3c980 Cyclone", PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM|EXTRA_PREAMBLE, 128, }, {"3c980C Python-T", PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, }, {"3cSOHO100-TX Hurricane", PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, }, {"3c555 Laptop Hurricane", PCI_USES_MASTER, IS_CYCLONE|EEPROM_8BIT|HAS_HWCKSM, 128, }, {"3c556 Laptop Tornado", PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|EEPROM_8BIT|HAS_CB_FNS|INVERT_MII_PWR| HAS_HWCKSM, 128, }, {"3c556B Laptop Hurricane", PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|EEPROM_OFFSET|HAS_CB_FNS|INVERT_MII_PWR| WNO_XCVR_PWR|HAS_HWCKSM, 128, }, {"3c575 [Megahertz] 10/100 LAN CardBus", PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_8BIT, 128, }, {"3c575 Boomerang CardBus", PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_8BIT, 128, }, {"3CCFE575BT Cyclone CardBus", PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT| INVERT_LED_PWR|HAS_HWCKSM, 128, }, {"3CCFE575CT Tornado CardBus", PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR| MAX_COLLISION_RESET|HAS_HWCKSM, 128, }, {"3CCFE656 Cyclone CardBus", PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR| INVERT_LED_PWR|HAS_HWCKSM, 128, }, {"3CCFEM656B Cyclone+Winmodem CardBus", PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR| INVERT_LED_PWR|HAS_HWCKSM, 128, }, {"3CXFEM656C Tornado+Winmodem CardBus", /* From pcmcia-cs-3.1.5 */ PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR| MAX_COLLISION_RESET|HAS_HWCKSM, 128, }, {"3c450 HomePNA Tornado", /* AKPM: from Don's 0.99Q */ PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, }, {"3c920 Tornado", PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, }, {"3c982 Hydra Dual Port A", PCI_USES_MASTER, IS_TORNADO|HAS_HWCKSM|HAS_NWAY, 128, }, {"3c982 Hydra Dual Port B", PCI_USES_MASTER, IS_TORNADO|HAS_HWCKSM|HAS_NWAY, 128, }, {"3c905B-T4", PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, }, {"3c920B-EMB-WNM Tornado", PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, }, {NULL,}, /* NULL terminated list. */ }; static DEFINE_PCI_DEVICE_TABLE(vortex_pci_tbl) = { { 0x10B7, 0x5900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C590 }, { 0x10B7, 0x5920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C592 }, { 0x10B7, 0x5970, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C597 }, { 0x10B7, 0x5950, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_1 }, { 0x10B7, 0x5951, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_2 }, { 0x10B7, 0x5952, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_3 }, { 0x10B7, 0x9000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_1 }, { 0x10B7, 0x9001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_2 }, { 0x10B7, 0x9004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_3 }, { 0x10B7, 0x9005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_4 }, { 0x10B7, 0x9006, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_5 }, { 0x10B7, 0x900A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900B_FL }, { 0x10B7, 0x9050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905_1 }, { 0x10B7, 0x9051, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905_2 }, { 0x10B7, 0x9054, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_TX }, { 0x10B7, 0x9055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_1 }, { 0x10B7, 0x9058, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_2 }, { 0x10B7, 0x905A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_FX }, { 0x10B7, 0x9200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905C }, { 0x10B7, 0x9202, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C9202 }, { 0x10B7, 0x9800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C980 }, { 0x10B7, 0x9805, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C9805 }, { 0x10B7, 0x7646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CSOHO100_TX }, { 0x10B7, 0x5055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C555 }, { 0x10B7, 0x6055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C556 }, { 0x10B7, 0x6056, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C556B }, { 0x10B7, 0x5b57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C575 }, { 0x10B7, 0x5057, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C575_1 }, { 0x10B7, 0x5157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE575 }, { 0x10B7, 0x5257, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE575CT }, { 0x10B7, 0x6560, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE656 }, { 0x10B7, 0x6562, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFEM656 }, { 0x10B7, 0x6564, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFEM656_1 }, { 0x10B7, 0x4500, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C450 }, { 0x10B7, 0x9201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C920 }, { 0x10B7, 0x1201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C982A }, { 0x10B7, 0x1202, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C982B }, { 0x10B7, 0x9056, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_905BT4 }, { 0x10B7, 0x9210, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_920B_EMB_WNM }, {0,} /* 0 terminated list. */ }; MODULE_DEVICE_TABLE(pci, vortex_pci_tbl); /* Operational definitions. These are not used by other compilation units and thus are not exported in a ".h" file. First the windows. There are eight register windows, with the command and status registers available in each. */ #define EL3_CMD 0x0e #define EL3_STATUS 0x0e /* The top five bits written to EL3_CMD are a command, the lower 11 bits are the parameter, if applicable. Note that 11 parameters bits was fine for ethernet, but the new chip can handle FDDI length frames (~4500 octets) and now parameters count 32-bit 'Dwords' rather than octets. */ enum vortex_cmd { TotalReset = 0<<11, SelectWindow = 1<<11, StartCoax = 2<<11, RxDisable = 3<<11, RxEnable = 4<<11, RxReset = 5<<11, UpStall = 6<<11, UpUnstall = (6<<11)+1, DownStall = (6<<11)+2, DownUnstall = (6<<11)+3, RxDiscard = 8<<11, TxEnable = 9<<11, TxDisable = 10<<11, TxReset = 11<<11, FakeIntr = 12<<11, AckIntr = 13<<11, SetIntrEnb = 14<<11, SetStatusEnb = 15<<11, SetRxFilter = 16<<11, SetRxThreshold = 17<<11, SetTxThreshold = 18<<11, SetTxStart = 19<<11, StartDMAUp = 20<<11, StartDMADown = (20<<11)+1, StatsEnable = 21<<11, StatsDisable = 22<<11, StopCoax = 23<<11, SetFilterBit = 25<<11,}; /* The SetRxFilter command accepts the following classes: */ enum RxFilter { RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8 }; /* Bits in the general status register. */ enum vortex_status { IntLatch = 0x0001, HostError = 0x0002, TxComplete = 0x0004, TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020, IntReq = 0x0040, StatsFull = 0x0080, DMADone = 1<<8, DownComplete = 1<<9, UpComplete = 1<<10, DMAInProgress = 1<<11, /* DMA controller is still busy.*/ CmdInProgress = 1<<12, /* EL3_CMD is still busy.*/ }; /* Register window 1 offsets, the window used in normal operation. On the Vortex this window is always mapped at offsets 0x10-0x1f. */ enum Window1 { TX_FIFO = 0x10, RX_FIFO = 0x10, RxErrors = 0x14, RxStatus = 0x18, Timer=0x1A, TxStatus = 0x1B, TxFree = 0x1C, /* Remaining free bytes in Tx buffer. */ }; enum Window0 { Wn0EepromCmd = 10, /* Window 0: EEPROM command register. */ Wn0EepromData = 12, /* Window 0: EEPROM results register. */ IntrStatus=0x0E, /* Valid in all windows. */ }; enum Win0_EEPROM_bits { EEPROM_Read = 0x80, EEPROM_WRITE = 0x40, EEPROM_ERASE = 0xC0, EEPROM_EWENB = 0x30, /* Enable erasing/writing for 10 msec. */ EEPROM_EWDIS = 0x00, /* Disable EWENB before 10 msec timeout. */ }; /* EEPROM locations. */ enum eeprom_offset { PhysAddr01=0, PhysAddr23=1, PhysAddr45=2, ModelID=3, EtherLink3ID=7, IFXcvrIO=8, IRQLine=9, NodeAddr01=10, NodeAddr23=11, NodeAddr45=12, DriverTune=13, Checksum=15}; enum Window2 { /* Window 2. */ Wn2_ResetOptions=12, }; enum Window3 { /* Window 3: MAC/config bits. */ Wn3_Config=0, Wn3_MaxPktSize=4, Wn3_MAC_Ctrl=6, Wn3_Options=8, }; #define BFEXT(value, offset, bitcount) \ ((((unsigned long)(value)) >> (offset)) & ((1 << (bitcount)) - 1)) #define BFINS(lhs, rhs, offset, bitcount) \ (((lhs) & ~((((1 << (bitcount)) - 1)) << (offset))) | \ (((rhs) & ((1 << (bitcount)) - 1)) << (offset))) #define RAM_SIZE(v) BFEXT(v, 0, 3) #define RAM_WIDTH(v) BFEXT(v, 3, 1) #define RAM_SPEED(v) BFEXT(v, 4, 2) #define ROM_SIZE(v) BFEXT(v, 6, 2) #define RAM_SPLIT(v) BFEXT(v, 16, 2) #define XCVR(v) BFEXT(v, 20, 4) #define AUTOSELECT(v) BFEXT(v, 24, 1) enum Window4 { /* Window 4: Xcvr/media bits. */ Wn4_FIFODiag = 4, Wn4_NetDiag = 6, Wn4_PhysicalMgmt=8, Wn4_Media = 10, }; enum Win4_Media_bits { Media_SQE = 0x0008, /* Enable SQE error counting for AUI. */ Media_10TP = 0x00C0, /* Enable link beat and jabber for 10baseT. */ Media_Lnk = 0x0080, /* Enable just link beat for 100TX/100FX. */ Media_LnkBeat = 0x0800, }; enum Window7 { /* Window 7: Bus Master control. */ Wn7_MasterAddr = 0, Wn7_VlanEtherType=4, Wn7_MasterLen = 6, Wn7_MasterStatus = 12, }; /* Boomerang bus master control registers. */ enum MasterCtrl { PktStatus = 0x20, DownListPtr = 0x24, FragAddr = 0x28, FragLen = 0x2c, TxFreeThreshold = 0x2f, UpPktStatus = 0x30, UpListPtr = 0x38, }; /* The Rx and Tx descriptor lists. Caution Alpha hackers: these types are 32 bits! Note also the 8 byte alignment contraint on tx_ring[] and rx_ring[]. */ #define LAST_FRAG 0x80000000 /* Last Addr/Len pair in descriptor. */ #define DN_COMPLETE 0x00010000 /* This packet has been downloaded */ struct boom_rx_desc { __le32 next; /* Last entry points to 0. */ __le32 status; __le32 addr; /* Up to 63 addr/len pairs possible. */ __le32 length; /* Set LAST_FRAG to indicate last pair. */ }; /* Values for the Rx status entry. */ enum rx_desc_status { RxDComplete=0x00008000, RxDError=0x4000, /* See boomerang_rx() for actual error bits */ IPChksumErr=1<<25, TCPChksumErr=1<<26, UDPChksumErr=1<<27, IPChksumValid=1<<29, TCPChksumValid=1<<30, UDPChksumValid=1<<31, }; #ifdef MAX_SKB_FRAGS #define DO_ZEROCOPY 1 #else #define DO_ZEROCOPY 0 #endif struct boom_tx_desc { __le32 next; /* Last entry points to 0. */ __le32 status; /* bits 0:12 length, others see below. */ #if DO_ZEROCOPY struct { __le32 addr; __le32 length; } frag[1+MAX_SKB_FRAGS]; #else __le32 addr; __le32 length; #endif }; /* Values for the Tx status entry. */ enum tx_desc_status { CRCDisable=0x2000, TxDComplete=0x8000, AddIPChksum=0x02000000, AddTCPChksum=0x04000000, AddUDPChksum=0x08000000, TxIntrUploaded=0x80000000, /* IRQ when in FIFO, but maybe not sent. */ }; /* Chip features we care about in vp->capabilities, read from the EEPROM. */ enum ChipCaps { CapBusMaster=0x20, CapPwrMgmt=0x2000 }; struct vortex_extra_stats { unsigned long tx_deferred; unsigned long tx_max_collisions; unsigned long tx_multiple_collisions; unsigned long tx_single_collisions; unsigned long rx_bad_ssd; }; struct vortex_private { /* The Rx and Tx rings should be quad-word-aligned. */ struct boom_rx_desc* rx_ring; struct boom_tx_desc* tx_ring; dma_addr_t rx_ring_dma; dma_addr_t tx_ring_dma; /* The addresses of transmit- and receive-in-place skbuffs. */ struct sk_buff* rx_skbuff[RX_RING_SIZE]; struct sk_buff* tx_skbuff[TX_RING_SIZE]; unsigned int cur_rx, cur_tx; /* The next free ring entry */ unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */ struct vortex_extra_stats xstats; /* NIC-specific extra stats */ struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */ dma_addr_t tx_skb_dma; /* Allocated DMA address for bus master ctrl DMA. */ /* PCI configuration space information. */ struct device *gendev; void __iomem *ioaddr; /* IO address space */ void __iomem *cb_fn_base; /* CardBus function status addr space. */ /* Some values here only for performance evaluation and path-coverage */ int rx_nocopy, rx_copy, queued_packet, rx_csumhits; int card_idx; /* The remainder are related to chip state, mostly media selection. */ struct timer_list timer; /* Media selection timer. */ struct timer_list rx_oom_timer; /* Rx skb allocation retry timer */ int options; /* User-settable misc. driver options. */ unsigned int media_override:4, /* Passed-in media type. */ default_media:4, /* Read from the EEPROM/Wn3_Config. */ full_duplex:1, autoselect:1, bus_master:1, /* Vortex can only do a fragment bus-m. */ full_bus_master_tx:1, full_bus_master_rx:2, /* Boomerang */ flow_ctrl:1, /* Use 802.3x flow control (PAUSE only) */ partner_flow_ctrl:1, /* Partner supports flow control */ has_nway:1, enable_wol:1, /* Wake-on-LAN is enabled */ pm_state_valid:1, /* pci_dev->saved_config_space has sane contents */ open:1, medialock:1, large_frames:1, /* accept large frames */ handling_irq:1; /* private in_irq indicator */ /* {get|set}_wol operations are already serialized by rtnl. * no additional locking is required for the enable_wol and acpi_set_WOL() */ int drv_flags; u16 status_enable; u16 intr_enable; u16 available_media; /* From Wn3_Options. */ u16 capabilities, info1, info2; /* Various, from EEPROM. */ u16 advertising; /* NWay media advertisement */ unsigned char phys[2]; /* MII device addresses. */ u16 deferred; /* Resend these interrupts when we * bale from the ISR */ u16 io_size; /* Size of PCI region (for release_region) */ /* Serialises access to hardware other than MII and variables below. * The lock hierarchy is rtnl_lock > {lock, mii_lock} > window_lock. */ spinlock_t lock; spinlock_t mii_lock; /* Serialises access to MII */ struct mii_if_info mii; /* MII lib hooks/info */ spinlock_t window_lock; /* Serialises access to windowed regs */ int window; /* Register window */ }; static void window_set(struct vortex_private *vp, int window) { if (window != vp->window) { iowrite16(SelectWindow + window, vp->ioaddr + EL3_CMD); vp->window = window; } } #define DEFINE_WINDOW_IO(size) \ static u ## size \ window_read ## size(struct vortex_private *vp, int window, int addr) \ { \ unsigned long flags; \ u ## size ret; \ spin_lock_irqsave(&vp->window_lock, flags); \ window_set(vp, window); \ ret = ioread ## size(vp->ioaddr + addr); \ spin_unlock_irqrestore(&vp->window_lock, flags); \ return ret; \ } \ static void \ window_write ## size(struct vortex_private *vp, u ## size value, \ int window, int addr) \ { \ unsigned long flags; \ spin_lock_irqsave(&vp->window_lock, flags); \ window_set(vp, window); \ iowrite ## size(value, vp->ioaddr + addr); \ spin_unlock_irqrestore(&vp->window_lock, flags); \ } DEFINE_WINDOW_IO(8) DEFINE_WINDOW_IO(16) DEFINE_WINDOW_IO(32) #ifdef CONFIG_PCI #define DEVICE_PCI(dev) (((dev)->bus == &pci_bus_type) ? to_pci_dev((dev)) : NULL) #else #define DEVICE_PCI(dev) NULL #endif #define VORTEX_PCI(vp) \ ((struct pci_dev *) (((vp)->gendev) ? DEVICE_PCI((vp)->gendev) : NULL)) #ifdef CONFIG_EISA #define DEVICE_EISA(dev) (((dev)->bus == &eisa_bus_type) ? to_eisa_device((dev)) : NULL) #else #define DEVICE_EISA(dev) NULL #endif #define VORTEX_EISA(vp) \ ((struct eisa_device *) (((vp)->gendev) ? DEVICE_EISA((vp)->gendev) : NULL)) /* The action to take with a media selection timer tick. Note that we deviate from the 3Com order by checking 10base2 before AUI. */ enum xcvr_types { XCVR_10baseT=0, XCVR_AUI, XCVR_10baseTOnly, XCVR_10base2, XCVR_100baseTx, XCVR_100baseFx, XCVR_MII=6, XCVR_NWAY=8, XCVR_ExtMII=9, XCVR_Default=10, }; static const struct media_table { char *name; unsigned int media_bits:16, /* Bits to set in Wn4_Media register. */ mask:8, /* The transceiver-present bit in Wn3_Config.*/ next:8; /* The media type to try next. */ int wait; /* Time before we check media status. */ } media_tbl[] = { { "10baseT", Media_10TP,0x08, XCVR_10base2, (14*HZ)/10}, { "10Mbs AUI", Media_SQE, 0x20, XCVR_Default, (1*HZ)/10}, { "undefined", 0, 0x80, XCVR_10baseT, 10000}, { "10base2", 0, 0x10, XCVR_AUI, (1*HZ)/10}, { "100baseTX", Media_Lnk, 0x02, XCVR_100baseFx, (14*HZ)/10}, { "100baseFX", Media_Lnk, 0x04, XCVR_MII, (14*HZ)/10}, { "MII", 0, 0x41, XCVR_10baseT, 3*HZ }, { "undefined", 0, 0x01, XCVR_10baseT, 10000}, { "Autonegotiate", 0, 0x41, XCVR_10baseT, 3*HZ}, { "MII-External", 0, 0x41, XCVR_10baseT, 3*HZ }, { "Default", 0, 0xFF, XCVR_10baseT, 10000}, }; static struct { const char str[ETH_GSTRING_LEN]; } ethtool_stats_keys[] = { { "tx_deferred" }, { "tx_max_collisions" }, { "tx_multiple_collisions" }, { "tx_single_collisions" }, { "rx_bad_ssd" }, }; /* number of ETHTOOL_GSTATS u64's */ #define VORTEX_NUM_STATS 5 static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq, int chip_idx, int card_idx); static int vortex_up(struct net_device *dev); static void vortex_down(struct net_device *dev, int final); static int vortex_open(struct net_device *dev); static void mdio_sync(struct vortex_private *vp, int bits); static int mdio_read(struct net_device *dev, int phy_id, int location); static void mdio_write(struct net_device *vp, int phy_id, int location, int value); static void vortex_timer(unsigned long arg); static void rx_oom_timer(unsigned long arg); static netdev_tx_t vortex_start_xmit(struct sk_buff *skb, struct net_device *dev); static netdev_tx_t boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev); static int vortex_rx(struct net_device *dev); static int boomerang_rx(struct net_device *dev); static irqreturn_t vortex_interrupt(int irq, void *dev_id); static irqreturn_t boomerang_interrupt(int irq, void *dev_id); static int vortex_close(struct net_device *dev); static void dump_tx_ring(struct net_device *dev); static void update_stats(void __iomem *ioaddr, struct net_device *dev); static struct net_device_stats *vortex_get_stats(struct net_device *dev); static void set_rx_mode(struct net_device *dev); #ifdef CONFIG_PCI static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); #endif static void vortex_tx_timeout(struct net_device *dev); static void acpi_set_WOL(struct net_device *dev); static const struct ethtool_ops vortex_ethtool_ops; static void set_8021q_mode(struct net_device *dev, int enable); /* This driver uses 'options' to pass the media type, full-duplex flag, etc. */ /* Option count limit only -- unlimited interfaces are supported. */ #define MAX_UNITS 8 static int options[MAX_UNITS] = { [0 ... MAX_UNITS-1] = -1 }; static int full_duplex[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 }; static int hw_checksums[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 }; static int flow_ctrl[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 }; static int enable_wol[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 }; static int use_mmio[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 }; static int global_options = -1; static int global_full_duplex = -1; static int global_enable_wol = -1; static int global_use_mmio = -1; /* Variables to work-around the Compaq PCI BIOS32 problem. */ static int compaq_ioaddr, compaq_irq, compaq_device_id = 0x5900; static struct net_device *compaq_net_device; static int vortex_cards_found; module_param(debug, int, 0); module_param(global_options, int, 0); module_param_array(options, int, NULL, 0); module_param(global_full_duplex, int, 0); module_param_array(full_duplex, int, NULL, 0); module_param_array(hw_checksums, int, NULL, 0); module_param_array(flow_ctrl, int, NULL, 0); module_param(global_enable_wol, int, 0); module_param_array(enable_wol, int, NULL, 0); module_param(rx_copybreak, int, 0); module_param(max_interrupt_work, int, 0); module_param(compaq_ioaddr, int, 0); module_param(compaq_irq, int, 0); module_param(compaq_device_id, int, 0); module_param(watchdog, int, 0); module_param(global_use_mmio, int, 0); module_param_array(use_mmio, int, NULL, 0); MODULE_PARM_DESC(debug, "3c59x debug level (0-6)"); MODULE_PARM_DESC(options, "3c59x: Bits 0-3: media type, bit 4: bus mastering, bit 9: full duplex"); MODULE_PARM_DESC(global_options, "3c59x: same as options, but applies to all NICs if options is unset"); MODULE_PARM_DESC(full_duplex, "3c59x full duplex setting(s) (1)"); MODULE_PARM_DESC(global_full_duplex, "3c59x: same as full_duplex, but applies to all NICs if full_duplex is unset"); MODULE_PARM_DESC(hw_checksums, "3c59x Hardware checksum checking by adapter(s) (0-1)"); MODULE_PARM_DESC(flow_ctrl, "3c59x 802.3x flow control usage (PAUSE only) (0-1)"); MODULE_PARM_DESC(enable_wol, "3c59x: Turn on Wake-on-LAN for adapter(s) (0-1)"); MODULE_PARM_DESC(global_enable_wol, "3c59x: same as enable_wol, but applies to all NICs if enable_wol is unset"); MODULE_PARM_DESC(rx_copybreak, "3c59x copy breakpoint for copy-only-tiny-frames"); MODULE_PARM_DESC(max_interrupt_work, "3c59x maximum events handled per interrupt"); MODULE_PARM_DESC(compaq_ioaddr, "3c59x PCI I/O base address (Compaq BIOS problem workaround)"); MODULE_PARM_DESC(compaq_irq, "3c59x PCI IRQ number (Compaq BIOS problem workaround)"); MODULE_PARM_DESC(compaq_device_id, "3c59x PCI device ID (Compaq BIOS problem workaround)"); MODULE_PARM_DESC(watchdog, "3c59x transmit timeout in milliseconds"); MODULE_PARM_DESC(global_use_mmio, "3c59x: same as use_mmio, but applies to all NICs if options is unset"); MODULE_PARM_DESC(use_mmio, "3c59x: use memory-mapped PCI I/O resource (0-1)"); #ifdef CONFIG_NET_POLL_CONTROLLER static void poll_vortex(struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); unsigned long flags; local_irq_save(flags); (vp->full_bus_master_rx ? boomerang_interrupt:vortex_interrupt)(dev->irq,dev); local_irq_restore(flags); } #endif #ifdef CONFIG_PM static int vortex_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct net_device *ndev = pci_get_drvdata(pdev); if (!ndev || !netif_running(ndev)) return 0; netif_device_detach(ndev); vortex_down(ndev, 1); return 0; } static int vortex_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct net_device *ndev = pci_get_drvdata(pdev); int err; if (!ndev || !netif_running(ndev)) return 0; err = vortex_up(ndev); if (err) return err; netif_device_attach(ndev); return 0; } static const struct dev_pm_ops vortex_pm_ops = { .suspend = vortex_suspend, .resume = vortex_resume, .freeze = vortex_suspend, .thaw = vortex_resume, .poweroff = vortex_suspend, .restore = vortex_resume, }; #define VORTEX_PM_OPS (&vortex_pm_ops) #else /* !CONFIG_PM */ #define VORTEX_PM_OPS NULL #endif /* !CONFIG_PM */ #ifdef CONFIG_EISA static struct eisa_device_id vortex_eisa_ids[] = { { "TCM5920", CH_3C592 }, { "TCM5970", CH_3C597 }, { "" } }; MODULE_DEVICE_TABLE(eisa, vortex_eisa_ids); static int __init vortex_eisa_probe(struct device *device) { void __iomem *ioaddr; struct eisa_device *edev; edev = to_eisa_device(device); if (!request_region(edev->base_addr, VORTEX_TOTAL_SIZE, DRV_NAME)) return -EBUSY; ioaddr = ioport_map(edev->base_addr, VORTEX_TOTAL_SIZE); if (vortex_probe1(device, ioaddr, ioread16(ioaddr + 0xC88) >> 12, edev->id.driver_data, vortex_cards_found)) { release_region(edev->base_addr, VORTEX_TOTAL_SIZE); return -ENODEV; } vortex_cards_found++; return 0; } static int vortex_eisa_remove(struct device *device) { struct eisa_device *edev; struct net_device *dev; struct vortex_private *vp; void __iomem *ioaddr; edev = to_eisa_device(device); dev = eisa_get_drvdata(edev); if (!dev) { pr_err("vortex_eisa_remove called for Compaq device!\n"); BUG(); } vp = netdev_priv(dev); ioaddr = vp->ioaddr; unregister_netdev(dev); iowrite16(TotalReset|0x14, ioaddr + EL3_CMD); release_region(edev->base_addr, VORTEX_TOTAL_SIZE); free_netdev(dev); return 0; } static struct eisa_driver vortex_eisa_driver = { .id_table = vortex_eisa_ids, .driver = { .name = "3c59x", .probe = vortex_eisa_probe, .remove = vortex_eisa_remove } }; #endif /* CONFIG_EISA */ /* returns count found (>= 0), or negative on error */ static int __init vortex_eisa_init(void) { int eisa_found = 0; int orig_cards_found = vortex_cards_found; #ifdef CONFIG_EISA int err; err = eisa_driver_register (&vortex_eisa_driver); if (!err) { /* * Because of the way EISA bus is probed, we cannot assume * any device have been found when we exit from * eisa_driver_register (the bus root driver may not be * initialized yet). So we blindly assume something was * found, and let the sysfs magic happened... */ eisa_found = 1; } #endif /* Special code to work-around the Compaq PCI BIOS32 problem. */ if (compaq_ioaddr) { vortex_probe1(NULL, ioport_map(compaq_ioaddr, VORTEX_TOTAL_SIZE), compaq_irq, compaq_device_id, vortex_cards_found++); } return vortex_cards_found - orig_cards_found + eisa_found; } /* returns count (>= 0), or negative on error */ static int vortex_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { int rc, unit, pci_bar; struct vortex_chip_info *vci; void __iomem *ioaddr; /* wake up and enable device */ rc = pci_enable_device(pdev); if (rc < 0) goto out; rc = pci_request_regions(pdev, DRV_NAME); if (rc < 0) { pci_disable_device(pdev); goto out; } unit = vortex_cards_found; if (global_use_mmio < 0 && (unit >= MAX_UNITS || use_mmio[unit] < 0)) { /* Determine the default if the user didn't override us */ vci = &vortex_info_tbl[ent->driver_data]; pci_bar = vci->drv_flags & (IS_CYCLONE | IS_TORNADO) ? 1 : 0; } else if (unit < MAX_UNITS && use_mmio[unit] >= 0) pci_bar = use_mmio[unit] ? 1 : 0; else pci_bar = global_use_mmio ? 1 : 0; ioaddr = pci_iomap(pdev, pci_bar, 0); if (!ioaddr) /* If mapping fails, fall-back to BAR 0... */ ioaddr = pci_iomap(pdev, 0, 0); if (!ioaddr) { pci_release_regions(pdev); pci_disable_device(pdev); rc = -ENOMEM; goto out; } rc = vortex_probe1(&pdev->dev, ioaddr, pdev->irq, ent->driver_data, unit); if (rc < 0) { pci_iounmap(pdev, ioaddr); pci_release_regions(pdev); pci_disable_device(pdev); goto out; } vortex_cards_found++; out: return rc; } static const struct net_device_ops boomrang_netdev_ops = { .ndo_open = vortex_open, .ndo_stop = vortex_close, .ndo_start_xmit = boomerang_start_xmit, .ndo_tx_timeout = vortex_tx_timeout, .ndo_get_stats = vortex_get_stats, #ifdef CONFIG_PCI .ndo_do_ioctl = vortex_ioctl, #endif .ndo_set_rx_mode = set_rx_mode, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = poll_vortex, #endif }; static const struct net_device_ops vortex_netdev_ops = { .ndo_open = vortex_open, .ndo_stop = vortex_close, .ndo_start_xmit = vortex_start_xmit, .ndo_tx_timeout = vortex_tx_timeout, .ndo_get_stats = vortex_get_stats, #ifdef CONFIG_PCI .ndo_do_ioctl = vortex_ioctl, #endif .ndo_set_rx_mode = set_rx_mode, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = poll_vortex, #endif }; /* * Start up the PCI/EISA device which is described by *gendev. * Return 0 on success. * * NOTE: pdev can be NULL, for the case of a Compaq device */ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq, int chip_idx, int card_idx) { struct vortex_private *vp; int option; unsigned int eeprom[0x40], checksum = 0; /* EEPROM contents */ int i, step; struct net_device *dev; static int printed_version; int retval, print_info; struct vortex_chip_info * const vci = &vortex_info_tbl[chip_idx]; const char *print_name = "3c59x"; struct pci_dev *pdev = NULL; struct eisa_device *edev = NULL; if (!printed_version) { pr_info("%s", version); printed_version = 1; } if (gendev) { if ((pdev = DEVICE_PCI(gendev))) { print_name = pci_name(pdev); } if ((edev = DEVICE_EISA(gendev))) { print_name = dev_name(&edev->dev); } } dev = alloc_etherdev(sizeof(*vp)); retval = -ENOMEM; if (!dev) goto out; SET_NETDEV_DEV(dev, gendev); vp = netdev_priv(dev); option = global_options; /* The lower four bits are the media type. */ if (dev->mem_start) { /* * The 'options' param is passed in as the third arg to the * LILO 'ether=' argument for non-modular use */ option = dev->mem_start; } else if (card_idx < MAX_UNITS) { if (options[card_idx] >= 0) option = options[card_idx]; } if (option > 0) { if (option & 0x8000) vortex_debug = 7; if (option & 0x4000) vortex_debug = 2; if (option & 0x0400) vp->enable_wol = 1; } print_info = (vortex_debug > 1); if (print_info) pr_info("See Documentation/networking/vortex.txt\n"); pr_info("%s: 3Com %s %s at %p.\n", print_name, pdev ? "PCI" : "EISA", vci->name, ioaddr); dev->base_addr = (unsigned long)ioaddr; dev->irq = irq; dev->mtu = mtu; vp->ioaddr = ioaddr; vp->large_frames = mtu > 1500; vp->drv_flags = vci->drv_flags; vp->has_nway = (vci->drv_flags & HAS_NWAY) ? 1 : 0; vp->io_size = vci->io_size; vp->card_idx = card_idx; vp->window = -1; /* module list only for Compaq device */ if (gendev == NULL) { compaq_net_device = dev; } /* PCI-only startup logic */ if (pdev) { /* enable bus-mastering if necessary */ if (vci->flags & PCI_USES_MASTER) pci_set_master(pdev); if (vci->drv_flags & IS_VORTEX) { u8 pci_latency; u8 new_latency = 248; /* Check the PCI latency value. On the 3c590 series the latency timer must be set to the maximum value to avoid data corruption that occurs when the timer expires during a transfer. This bug exists the Vortex chip only. */ pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency); if (pci_latency < new_latency) { pr_info("%s: Overriding PCI latency timer (CFLT) setting of %d, new value is %d.\n", print_name, pci_latency, new_latency); pci_write_config_byte(pdev, PCI_LATENCY_TIMER, new_latency); } } } spin_lock_init(&vp->lock); spin_lock_init(&vp->mii_lock); spin_lock_init(&vp->window_lock); vp->gendev = gendev; vp->mii.dev = dev; vp->mii.mdio_read = mdio_read; vp->mii.mdio_write = mdio_write; vp->mii.phy_id_mask = 0x1f; vp->mii.reg_num_mask = 0x1f; /* Makes sure rings are at least 16 byte aligned. */ vp->rx_ring = pci_alloc_consistent(pdev, sizeof(struct boom_rx_desc) * RX_RING_SIZE + sizeof(struct boom_tx_desc) * TX_RING_SIZE, &vp->rx_ring_dma); retval = -ENOMEM; if (!vp->rx_ring) goto free_device; vp->tx_ring = (struct boom_tx_desc *)(vp->rx_ring + RX_RING_SIZE); vp->tx_ring_dma = vp->rx_ring_dma + sizeof(struct boom_rx_desc) * RX_RING_SIZE; /* if we are a PCI driver, we store info in pdev->driver_data * instead of a module list */ if (pdev) pci_set_drvdata(pdev, dev); if (edev) eisa_set_drvdata(edev, dev); vp->media_override = 7; if (option >= 0) { vp->media_override = ((option & 7) == 2) ? 0 : option & 15; if (vp->media_override != 7) vp->medialock = 1; vp->full_duplex = (option & 0x200) ? 1 : 0; vp->bus_master = (option & 16) ? 1 : 0; } if (global_full_duplex > 0) vp->full_duplex = 1; if (global_enable_wol > 0) vp->enable_wol = 1; if (card_idx < MAX_UNITS) { if (full_duplex[card_idx] > 0) vp->full_duplex = 1; if (flow_ctrl[card_idx] > 0) vp->flow_ctrl = 1; if (enable_wol[card_idx] > 0) vp->enable_wol = 1; } vp->mii.force_media = vp->full_duplex; vp->options = option; /* Read the station address from the EEPROM. */ { int base; if (vci->drv_flags & EEPROM_8BIT) base = 0x230; else if (vci->drv_flags & EEPROM_OFFSET) base = EEPROM_Read + 0x30; else base = EEPROM_Read; for (i = 0; i < 0x40; i++) { int timer; window_write16(vp, base + i, 0, Wn0EepromCmd); /* Pause for at least 162 us. for the read to take place. */ for (timer = 10; timer >= 0; timer--) { udelay(162); if ((window_read16(vp, 0, Wn0EepromCmd) & 0x8000) == 0) break; } eeprom[i] = window_read16(vp, 0, Wn0EepromData); } } for (i = 0; i < 0x18; i++) checksum ^= eeprom[i]; checksum = (checksum ^ (checksum >> 8)) & 0xff; if (checksum != 0x00) { /* Grrr, needless incompatible change 3Com. */ while (i < 0x21) checksum ^= eeprom[i++]; checksum = (checksum ^ (checksum >> 8)) & 0xff; } if ((checksum != 0x00) && !(vci->drv_flags & IS_TORNADO)) pr_cont(" ***INVALID CHECKSUM %4.4x*** ", checksum); for (i = 0; i < 3; i++) ((__be16 *)dev->dev_addr)[i] = htons(eeprom[i + 10]); if (print_info) pr_cont(" %pM", dev->dev_addr); /* Unfortunately an all zero eeprom passes the checksum and this gets found in the wild in failure cases. Crypto is hard 8) */ if (!is_valid_ether_addr(dev->dev_addr)) { retval = -EINVAL; pr_err("*** EEPROM MAC address is invalid.\n"); goto free_ring; /* With every pack */ } for (i = 0; i < 6; i++) window_write8(vp, dev->dev_addr[i], 2, i); if (print_info) pr_cont(", IRQ %d\n", dev->irq); /* Tell them about an invalid IRQ. */ if (dev->irq <= 0 || dev->irq >= nr_irqs) pr_warning(" *** Warning: IRQ %d is unlikely to work! ***\n", dev->irq); step = (window_read8(vp, 4, Wn4_NetDiag) & 0x1e) >> 1; if (print_info) { pr_info(" product code %02x%02x rev %02x.%d date %02d-%02d-%02d\n", eeprom[6]&0xff, eeprom[6]>>8, eeprom[0x14], step, (eeprom[4]>>5) & 15, eeprom[4] & 31, eeprom[4]>>9); } if (pdev && vci->drv_flags & HAS_CB_FNS) { unsigned short n; vp->cb_fn_base = pci_iomap(pdev, 2, 0); if (!vp->cb_fn_base) { retval = -ENOMEM; goto free_ring; } if (print_info) { pr_info("%s: CardBus functions mapped %16.16llx->%p\n", print_name, (unsigned long long)pci_resource_start(pdev, 2), vp->cb_fn_base); } n = window_read16(vp, 2, Wn2_ResetOptions) & ~0x4010; if (vp->drv_flags & INVERT_LED_PWR) n |= 0x10; if (vp->drv_flags & INVERT_MII_PWR) n |= 0x4000; window_write16(vp, n, 2, Wn2_ResetOptions); if (vp->drv_flags & WNO_XCVR_PWR) { window_write16(vp, 0x0800, 0, 0); } } /* Extract our information from the EEPROM data. */ vp->info1 = eeprom[13]; vp->info2 = eeprom[15]; vp->capabilities = eeprom[16]; if (vp->info1 & 0x8000) { vp->full_duplex = 1; if (print_info) pr_info("Full duplex capable\n"); } { static const char * const ram_split[] = {"5:3", "3:1", "1:1", "3:5"}; unsigned int config; vp->available_media = window_read16(vp, 3, Wn3_Options); if ((vp->available_media & 0xff) == 0) /* Broken 3c916 */ vp->available_media = 0x40; config = window_read32(vp, 3, Wn3_Config); if (print_info) { pr_debug(" Internal config register is %4.4x, transceivers %#x.\n", config, window_read16(vp, 3, Wn3_Options)); pr_info(" %dK %s-wide RAM %s Rx:Tx split, %s%s interface.\n", 8 << RAM_SIZE(config), RAM_WIDTH(config) ? "word" : "byte", ram_split[RAM_SPLIT(config)], AUTOSELECT(config) ? "autoselect/" : "", XCVR(config) > XCVR_ExtMII ? "<invalid transceiver>" : media_tbl[XCVR(config)].name); } vp->default_media = XCVR(config); if (vp->default_media == XCVR_NWAY) vp->has_nway = 1; vp->autoselect = AUTOSELECT(config); } if (vp->media_override != 7) { pr_info("%s: Media override to transceiver type %d (%s).\n", print_name, vp->media_override, media_tbl[vp->media_override].name); dev->if_port = vp->media_override; } else dev->if_port = vp->default_media; if ((vp->available_media & 0x40) || (vci->drv_flags & HAS_NWAY) || dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) { int phy, phy_idx = 0; mii_preamble_required++; if (vp->drv_flags & EXTRA_PREAMBLE) mii_preamble_required++; mdio_sync(vp, 32); mdio_read(dev, 24, MII_BMSR); for (phy = 0; phy < 32 && phy_idx < 1; phy++) { int mii_status, phyx; /* * For the 3c905CX we look at index 24 first, because it bogusly * reports an external PHY at all indices */ if (phy == 0) phyx = 24; else if (phy <= 24) phyx = phy - 1; else phyx = phy; mii_status = mdio_read(dev, phyx, MII_BMSR); if (mii_status && mii_status != 0xffff) { vp->phys[phy_idx++] = phyx; if (print_info) { pr_info(" MII transceiver found at address %d, status %4x.\n", phyx, mii_status); } if ((mii_status & 0x0040) == 0) mii_preamble_required++; } } mii_preamble_required--; if (phy_idx == 0) { pr_warning(" ***WARNING*** No MII transceivers found!\n"); vp->phys[0] = 24; } else { vp->advertising = mdio_read(dev, vp->phys[0], MII_ADVERTISE); if (vp->full_duplex) { /* Only advertise the FD media types. */ vp->advertising &= ~0x02A0; mdio_write(dev, vp->phys[0], 4, vp->advertising); } } vp->mii.phy_id = vp->phys[0]; } if (vp->capabilities & CapBusMaster) { vp->full_bus_master_tx = 1; if (print_info) { pr_info(" Enabling bus-master transmits and %s receives.\n", (vp->info2 & 1) ? "early" : "whole-frame" ); } vp->full_bus_master_rx = (vp->info2 & 1) ? 1 : 2; vp->bus_master = 0; /* AKPM: vortex only */ } /* The 3c59x-specific entries in the device structure. */ if (vp->full_bus_master_tx) { dev->netdev_ops = &boomrang_netdev_ops; /* Actually, it still should work with iommu. */ if (card_idx < MAX_UNITS && ((hw_checksums[card_idx] == -1 && (vp->drv_flags & HAS_HWCKSM)) || hw_checksums[card_idx] == 1)) { dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; } } else dev->netdev_ops = &vortex_netdev_ops; if (print_info) { pr_info("%s: scatter/gather %sabled. h/w checksums %sabled\n", print_name, (dev->features & NETIF_F_SG) ? "en":"dis", (dev->features & NETIF_F_IP_CSUM) ? "en":"dis"); } dev->ethtool_ops = &vortex_ethtool_ops; dev->watchdog_timeo = (watchdog * HZ) / 1000; if (pdev) { vp->pm_state_valid = 1; pci_save_state(VORTEX_PCI(vp)); acpi_set_WOL(dev); } retval = register_netdev(dev); if (retval == 0) return 0; free_ring: pci_free_consistent(pdev, sizeof(struct boom_rx_desc) * RX_RING_SIZE + sizeof(struct boom_tx_desc) * TX_RING_SIZE, vp->rx_ring, vp->rx_ring_dma); free_device: free_netdev(dev); pr_err(PFX "vortex_probe1 fails. Returns %d\n", retval); out: return retval; } static void issue_and_wait(struct net_device *dev, int cmd) { struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr = vp->ioaddr; int i; iowrite16(cmd, ioaddr + EL3_CMD); for (i = 0; i < 2000; i++) { if (!(ioread16(ioaddr + EL3_STATUS) & CmdInProgress)) return; } /* OK, that didn't work. Do it the slow way. One second */ for (i = 0; i < 100000; i++) { if (!(ioread16(ioaddr + EL3_STATUS) & CmdInProgress)) { if (vortex_debug > 1) pr_info("%s: command 0x%04x took %d usecs\n", dev->name, cmd, i * 10); return; } udelay(10); } pr_err("%s: command 0x%04x did not complete! Status=0x%x\n", dev->name, cmd, ioread16(ioaddr + EL3_STATUS)); } static void vortex_set_duplex(struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); pr_info("%s: setting %s-duplex.\n", dev->name, (vp->full_duplex) ? "full" : "half"); /* Set the full-duplex bit. */ window_write16(vp, ((vp->info1 & 0x8000) || vp->full_duplex ? 0x20 : 0) | (vp->large_frames ? 0x40 : 0) | ((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ? 0x100 : 0), 3, Wn3_MAC_Ctrl); } static void vortex_check_media(struct net_device *dev, unsigned int init) { struct vortex_private *vp = netdev_priv(dev); unsigned int ok_to_print = 0; if (vortex_debug > 3) ok_to_print = 1; if (mii_check_media(&vp->mii, ok_to_print, init)) { vp->full_duplex = vp->mii.full_duplex; vortex_set_duplex(dev); } else if (init) { vortex_set_duplex(dev); } } static int vortex_up(struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr = vp->ioaddr; unsigned int config; int i, mii_reg1, mii_reg5, err = 0; if (VORTEX_PCI(vp)) { pci_set_power_state(VORTEX_PCI(vp), PCI_D0); /* Go active */ if (vp->pm_state_valid) pci_restore_state(VORTEX_PCI(vp)); err = pci_enable_device(VORTEX_PCI(vp)); if (err) { pr_warning("%s: Could not enable device\n", dev->name); goto err_out; } } /* Before initializing select the active media port. */ config = window_read32(vp, 3, Wn3_Config); if (vp->media_override != 7) { pr_info("%s: Media override to transceiver %d (%s).\n", dev->name, vp->media_override, media_tbl[vp->media_override].name); dev->if_port = vp->media_override; } else if (vp->autoselect) { if (vp->has_nway) { if (vortex_debug > 1) pr_info("%s: using NWAY device table, not %d\n", dev->name, dev->if_port); dev->if_port = XCVR_NWAY; } else { /* Find first available media type, starting with 100baseTx. */ dev->if_port = XCVR_100baseTx; while (! (vp->available_media & media_tbl[dev->if_port].mask)) dev->if_port = media_tbl[dev->if_port].next; if (vortex_debug > 1) pr_info("%s: first available media type: %s\n", dev->name, media_tbl[dev->if_port].name); } } else { dev->if_port = vp->default_media; if (vortex_debug > 1) pr_info("%s: using default media %s\n", dev->name, media_tbl[dev->if_port].name); } init_timer(&vp->timer); vp->timer.expires = RUN_AT(media_tbl[dev->if_port].wait); vp->timer.data = (unsigned long)dev; vp->timer.function = vortex_timer; /* timer handler */ add_timer(&vp->timer); init_timer(&vp->rx_oom_timer); vp->rx_oom_timer.data = (unsigned long)dev; vp->rx_oom_timer.function = rx_oom_timer; if (vortex_debug > 1) pr_debug("%s: Initial media type %s.\n", dev->name, media_tbl[dev->if_port].name); vp->full_duplex = vp->mii.force_media; config = BFINS(config, dev->if_port, 20, 4); if (vortex_debug > 6) pr_debug("vortex_up(): writing 0x%x to InternalConfig\n", config); window_write32(vp, config, 3, Wn3_Config); if (dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) { mii_reg1 = mdio_read(dev, vp->phys[0], MII_BMSR); mii_reg5 = mdio_read(dev, vp->phys[0], MII_LPA); vp->partner_flow_ctrl = ((mii_reg5 & 0x0400) != 0); vp->mii.full_duplex = vp->full_duplex; vortex_check_media(dev, 1); } else vortex_set_duplex(dev); issue_and_wait(dev, TxReset); /* * Don't reset the PHY - that upsets autonegotiation during DHCP operations. */ issue_and_wait(dev, RxReset|0x04); iowrite16(SetStatusEnb | 0x00, ioaddr + EL3_CMD); if (vortex_debug > 1) { pr_debug("%s: vortex_up() irq %d media status %4.4x.\n", dev->name, dev->irq, window_read16(vp, 4, Wn4_Media)); } /* Set the station address and mask in window 2 each time opened. */ for (i = 0; i < 6; i++) window_write8(vp, dev->dev_addr[i], 2, i); for (; i < 12; i+=2) window_write16(vp, 0, 2, i); if (vp->cb_fn_base) { unsigned short n = window_read16(vp, 2, Wn2_ResetOptions) & ~0x4010; if (vp->drv_flags & INVERT_LED_PWR) n |= 0x10; if (vp->drv_flags & INVERT_MII_PWR) n |= 0x4000; window_write16(vp, n, 2, Wn2_ResetOptions); } if (dev->if_port == XCVR_10base2) /* Start the thinnet transceiver. We should really wait 50ms...*/ iowrite16(StartCoax, ioaddr + EL3_CMD); if (dev->if_port != XCVR_NWAY) { window_write16(vp, (window_read16(vp, 4, Wn4_Media) & ~(Media_10TP|Media_SQE)) | media_tbl[dev->if_port].media_bits, 4, Wn4_Media); } /* Switch to the stats window, and clear all stats by reading. */ iowrite16(StatsDisable, ioaddr + EL3_CMD); for (i = 0; i < 10; i++) window_read8(vp, 6, i); window_read16(vp, 6, 10); window_read16(vp, 6, 12); /* New: On the Vortex we must also clear the BadSSD counter. */ window_read8(vp, 4, 12); /* ..and on the Boomerang we enable the extra statistics bits. */ window_write16(vp, 0x0040, 4, Wn4_NetDiag); if (vp->full_bus_master_rx) { /* Boomerang bus master. */ vp->cur_rx = vp->dirty_rx = 0; /* Initialize the RxEarly register as recommended. */ iowrite16(SetRxThreshold + (1536>>2), ioaddr + EL3_CMD); iowrite32(0x0020, ioaddr + PktStatus); iowrite32(vp->rx_ring_dma, ioaddr + UpListPtr); } if (vp->full_bus_master_tx) { /* Boomerang bus master Tx. */ vp->cur_tx = vp->dirty_tx = 0; if (vp->drv_flags & IS_BOOMERANG) iowrite8(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold); /* Room for a packet. */ /* Clear the Rx, Tx rings. */ for (i = 0; i < RX_RING_SIZE; i++) /* AKPM: this is done in vortex_open, too */ vp->rx_ring[i].status = 0; for (i = 0; i < TX_RING_SIZE; i++) vp->tx_skbuff[i] = NULL; iowrite32(0, ioaddr + DownListPtr); } /* Set receiver mode: presumably accept b-case and phys addr only. */ set_rx_mode(dev); /* enable 802.1q tagged frames */ set_8021q_mode(dev, 1); iowrite16(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */ iowrite16(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */ iowrite16(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */ /* Allow status bits to be seen. */ vp->status_enable = SetStatusEnb | HostError|IntReq|StatsFull|TxComplete| (vp->full_bus_master_tx ? DownComplete : TxAvailable) | (vp->full_bus_master_rx ? UpComplete : RxComplete) | (vp->bus_master ? DMADone : 0); vp->intr_enable = SetIntrEnb | IntLatch | TxAvailable | (vp->full_bus_master_rx ? 0 : RxComplete) | StatsFull | HostError | TxComplete | IntReq | (vp->bus_master ? DMADone : 0) | UpComplete | DownComplete; iowrite16(vp->status_enable, ioaddr + EL3_CMD); /* Ack all pending events, and set active indicator mask. */ iowrite16(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq, ioaddr + EL3_CMD); iowrite16(vp->intr_enable, ioaddr + EL3_CMD); if (vp->cb_fn_base) /* The PCMCIA people are idiots. */ iowrite32(0x8000, vp->cb_fn_base + 4); netif_start_queue (dev); err_out: return err; } static int vortex_open(struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); int i; int retval; /* Use the now-standard shared IRQ implementation. */ if ((retval = request_irq(dev->irq, vp->full_bus_master_rx ? boomerang_interrupt : vortex_interrupt, IRQF_SHARED, dev->name, dev))) { pr_err("%s: Could not reserve IRQ %d\n", dev->name, dev->irq); goto err; } if (vp->full_bus_master_rx) { /* Boomerang bus master. */ if (vortex_debug > 2) pr_debug("%s: Filling in the Rx ring.\n", dev->name); for (i = 0; i < RX_RING_SIZE; i++) { struct sk_buff *skb; vp->rx_ring[i].next = cpu_to_le32(vp->rx_ring_dma + sizeof(struct boom_rx_desc) * (i+1)); vp->rx_ring[i].status = 0; /* Clear complete bit. */ vp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ | LAST_FRAG); skb = __netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN, GFP_KERNEL); vp->rx_skbuff[i] = skb; if (skb == NULL) break; /* Bad news! */ skb_reserve(skb, NET_IP_ALIGN); /* Align IP on 16 byte boundaries */ vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); } if (i != RX_RING_SIZE) { int j; pr_emerg("%s: no memory for rx ring\n", dev->name); for (j = 0; j < i; j++) { if (vp->rx_skbuff[j]) { dev_kfree_skb(vp->rx_skbuff[j]); vp->rx_skbuff[j] = NULL; } } retval = -ENOMEM; goto err_free_irq; } /* Wrap the ring. */ vp->rx_ring[i-1].next = cpu_to_le32(vp->rx_ring_dma); } retval = vortex_up(dev); if (!retval) goto out; err_free_irq: free_irq(dev->irq, dev); err: if (vortex_debug > 1) pr_err("%s: vortex_open() fails: returning %d\n", dev->name, retval); out: return retval; } static void vortex_timer(unsigned long data) { struct net_device *dev = (struct net_device *)data; struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr = vp->ioaddr; int next_tick = 60*HZ; int ok = 0; int media_status; if (vortex_debug > 2) { pr_debug("%s: Media selection timer tick happened, %s.\n", dev->name, media_tbl[dev->if_port].name); pr_debug("dev->watchdog_timeo=%d\n", dev->watchdog_timeo); } media_status = window_read16(vp, 4, Wn4_Media); switch (dev->if_port) { case XCVR_10baseT: case XCVR_100baseTx: case XCVR_100baseFx: if (media_status & Media_LnkBeat) { netif_carrier_on(dev); ok = 1; if (vortex_debug > 1) pr_debug("%s: Media %s has link beat, %x.\n", dev->name, media_tbl[dev->if_port].name, media_status); } else { netif_carrier_off(dev); if (vortex_debug > 1) { pr_debug("%s: Media %s has no link beat, %x.\n", dev->name, media_tbl[dev->if_port].name, media_status); } } break; case XCVR_MII: case XCVR_NWAY: { ok = 1; vortex_check_media(dev, 0); } break; default: /* Other media types handled by Tx timeouts. */ if (vortex_debug > 1) pr_debug("%s: Media %s has no indication, %x.\n", dev->name, media_tbl[dev->if_port].name, media_status); ok = 1; } if (dev->flags & IFF_SLAVE || !netif_carrier_ok(dev)) next_tick = 5*HZ; if (vp->medialock) goto leave_media_alone; if (!ok) { unsigned int config; spin_lock_irq(&vp->lock); do { dev->if_port = media_tbl[dev->if_port].next; } while ( ! (vp->available_media & media_tbl[dev->if_port].mask)); if (dev->if_port == XCVR_Default) { /* Go back to default. */ dev->if_port = vp->default_media; if (vortex_debug > 1) pr_debug("%s: Media selection failing, using default %s port.\n", dev->name, media_tbl[dev->if_port].name); } else { if (vortex_debug > 1) pr_debug("%s: Media selection failed, now trying %s port.\n", dev->name, media_tbl[dev->if_port].name); next_tick = media_tbl[dev->if_port].wait; } window_write16(vp, (media_status & ~(Media_10TP|Media_SQE)) | media_tbl[dev->if_port].media_bits, 4, Wn4_Media); config = window_read32(vp, 3, Wn3_Config); config = BFINS(config, dev->if_port, 20, 4); window_write32(vp, config, 3, Wn3_Config); iowrite16(dev->if_port == XCVR_10base2 ? StartCoax : StopCoax, ioaddr + EL3_CMD); if (vortex_debug > 1) pr_debug("wrote 0x%08x to Wn3_Config\n", config); /* AKPM: FIXME: Should reset Rx & Tx here. P60 of 3c90xc.pdf */ spin_unlock_irq(&vp->lock); } leave_media_alone: if (vortex_debug > 2) pr_debug("%s: Media selection timer finished, %s.\n", dev->name, media_tbl[dev->if_port].name); mod_timer(&vp->timer, RUN_AT(next_tick)); if (vp->deferred) iowrite16(FakeIntr, ioaddr + EL3_CMD); } static void vortex_tx_timeout(struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr = vp->ioaddr; pr_err("%s: transmit timed out, tx_status %2.2x status %4.4x.\n", dev->name, ioread8(ioaddr + TxStatus), ioread16(ioaddr + EL3_STATUS)); pr_err(" diagnostics: net %04x media %04x dma %08x fifo %04x\n", window_read16(vp, 4, Wn4_NetDiag), window_read16(vp, 4, Wn4_Media), ioread32(ioaddr + PktStatus), window_read16(vp, 4, Wn4_FIFODiag)); /* Slight code bloat to be user friendly. */ if ((ioread8(ioaddr + TxStatus) & 0x88) == 0x88) pr_err("%s: Transmitter encountered 16 collisions --" " network cable problem?\n", dev->name); if (ioread16(ioaddr + EL3_STATUS) & IntLatch) { pr_err("%s: Interrupt posted but not delivered --" " IRQ blocked by another device?\n", dev->name); /* Bad idea here.. but we might as well handle a few events. */ { /* * Block interrupts because vortex_interrupt does a bare spin_lock() */ unsigned long flags; local_irq_save(flags); if (vp->full_bus_master_tx) boomerang_interrupt(dev->irq, dev); else vortex_interrupt(dev->irq, dev); local_irq_restore(flags); } } if (vortex_debug > 0) dump_tx_ring(dev); issue_and_wait(dev, TxReset); dev->stats.tx_errors++; if (vp->full_bus_master_tx) { pr_debug("%s: Resetting the Tx ring pointer.\n", dev->name); if (vp->cur_tx - vp->dirty_tx > 0 && ioread32(ioaddr + DownListPtr) == 0) iowrite32(vp->tx_ring_dma + (vp->dirty_tx % TX_RING_SIZE) * sizeof(struct boom_tx_desc), ioaddr + DownListPtr); if (vp->cur_tx - vp->dirty_tx < TX_RING_SIZE) netif_wake_queue (dev); if (vp->drv_flags & IS_BOOMERANG) iowrite8(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold); iowrite16(DownUnstall, ioaddr + EL3_CMD); } else { dev->stats.tx_dropped++; netif_wake_queue(dev); } /* Issue Tx Enable */ iowrite16(TxEnable, ioaddr + EL3_CMD); dev->trans_start = jiffies; /* prevent tx timeout */ } /* * Handle uncommon interrupt sources. This is a separate routine to minimize * the cache impact. */ static void vortex_error(struct net_device *dev, int status) { struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr = vp->ioaddr; int do_tx_reset = 0, reset_mask = 0; unsigned char tx_status = 0; if (vortex_debug > 2) { pr_err("%s: vortex_error(), status=0x%x\n", dev->name, status); } if (status & TxComplete) { /* Really "TxError" for us. */ tx_status = ioread8(ioaddr + TxStatus); /* Presumably a tx-timeout. We must merely re-enable. */ if (vortex_debug > 2 || (tx_status != 0x88 && vortex_debug > 0)) { pr_err("%s: Transmit error, Tx status register %2.2x.\n", dev->name, tx_status); if (tx_status == 0x82) { pr_err("Probably a duplex mismatch. See " "Documentation/networking/vortex.txt\n"); } dump_tx_ring(dev); } if (tx_status & 0x14) dev->stats.tx_fifo_errors++; if (tx_status & 0x38) dev->stats.tx_aborted_errors++; if (tx_status & 0x08) vp->xstats.tx_max_collisions++; iowrite8(0, ioaddr + TxStatus); if (tx_status & 0x30) { /* txJabber or txUnderrun */ do_tx_reset = 1; } else if ((tx_status & 0x08) && (vp->drv_flags & MAX_COLLISION_RESET)) { /* maxCollisions */ do_tx_reset = 1; reset_mask = 0x0108; /* Reset interface logic, but not download logic */ } else { /* Merely re-enable the transmitter. */ iowrite16(TxEnable, ioaddr + EL3_CMD); } } if (status & RxEarly) /* Rx early is unused. */ iowrite16(AckIntr | RxEarly, ioaddr + EL3_CMD); if (status & StatsFull) { /* Empty statistics. */ static int DoneDidThat; if (vortex_debug > 4) pr_debug("%s: Updating stats.\n", dev->name); update_stats(ioaddr, dev); /* HACK: Disable statistics as an interrupt source. */ /* This occurs when we have the wrong media type! */ if (DoneDidThat == 0 && ioread16(ioaddr + EL3_STATUS) & StatsFull) { pr_warning("%s: Updating statistics failed, disabling " "stats as an interrupt source.\n", dev->name); iowrite16(SetIntrEnb | (window_read16(vp, 5, 10) & ~StatsFull), ioaddr + EL3_CMD); vp->intr_enable &= ~StatsFull; DoneDidThat++; } } if (status & IntReq) { /* Restore all interrupt sources. */ iowrite16(vp->status_enable, ioaddr + EL3_CMD); iowrite16(vp->intr_enable, ioaddr + EL3_CMD); } if (status & HostError) { u16 fifo_diag; fifo_diag = window_read16(vp, 4, Wn4_FIFODiag); pr_err("%s: Host error, FIFO diagnostic register %4.4x.\n", dev->name, fifo_diag); /* Adapter failure requires Tx/Rx reset and reinit. */ if (vp->full_bus_master_tx) { int bus_status = ioread32(ioaddr + PktStatus); /* 0x80000000 PCI master abort. */ /* 0x40000000 PCI target abort. */ if (vortex_debug) pr_err("%s: PCI bus error, bus status %8.8x\n", dev->name, bus_status); /* In this case, blow the card away */ /* Must not enter D3 or we can't legally issue the reset! */ vortex_down(dev, 0); issue_and_wait(dev, TotalReset | 0xff); vortex_up(dev); /* AKPM: bug. vortex_up() assumes that the rx ring is full. It may not be. */ } else if (fifo_diag & 0x0400) do_tx_reset = 1; if (fifo_diag & 0x3000) { /* Reset Rx fifo and upload logic */ issue_and_wait(dev, RxReset|0x07); /* Set the Rx filter to the current state. */ set_rx_mode(dev); /* enable 802.1q VLAN tagged frames */ set_8021q_mode(dev, 1); iowrite16(RxEnable, ioaddr + EL3_CMD); /* Re-enable the receiver. */ iowrite16(AckIntr | HostError, ioaddr + EL3_CMD); } } if (do_tx_reset) { issue_and_wait(dev, TxReset|reset_mask); iowrite16(TxEnable, ioaddr + EL3_CMD); if (!vp->full_bus_master_tx) netif_wake_queue(dev); } } static netdev_tx_t vortex_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr = vp->ioaddr; /* Put out the doubleword header... */ iowrite32(skb->len, ioaddr + TX_FIFO); if (vp->bus_master) { /* Set the bus-master controller to transfer the packet. */ int len = (skb->len + 3) & ~3; vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len, PCI_DMA_TODEVICE); spin_lock_irq(&vp->window_lock); window_set(vp, 7); iowrite32(vp->tx_skb_dma, ioaddr + Wn7_MasterAddr); iowrite16(len, ioaddr + Wn7_MasterLen); spin_unlock_irq(&vp->window_lock); vp->tx_skb = skb; iowrite16(StartDMADown, ioaddr + EL3_CMD); /* netif_wake_queue() will be called at the DMADone interrupt. */ } else { /* ... and the packet rounded to a doubleword. */ iowrite32_rep(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2); dev_kfree_skb (skb); if (ioread16(ioaddr + TxFree) > 1536) { netif_start_queue (dev); /* AKPM: redundant? */ } else { /* Interrupt us when the FIFO has room for max-sized packet. */ netif_stop_queue(dev); iowrite16(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD); } } /* Clear the Tx status stack. */ { int tx_status; int i = 32; while (--i > 0 && (tx_status = ioread8(ioaddr + TxStatus)) > 0) { if (tx_status & 0x3C) { /* A Tx-disabling error occurred. */ if (vortex_debug > 2) pr_debug("%s: Tx error, status %2.2x.\n", dev->name, tx_status); if (tx_status & 0x04) dev->stats.tx_fifo_errors++; if (tx_status & 0x38) dev->stats.tx_aborted_errors++; if (tx_status & 0x30) { issue_and_wait(dev, TxReset); } iowrite16(TxEnable, ioaddr + EL3_CMD); } iowrite8(0x00, ioaddr + TxStatus); /* Pop the status stack. */ } } return NETDEV_TX_OK; } static netdev_tx_t boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr = vp->ioaddr; /* Calculate the next Tx descriptor entry. */ int entry = vp->cur_tx % TX_RING_SIZE; struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE]; unsigned long flags; if (vortex_debug > 6) { pr_debug("boomerang_start_xmit()\n"); pr_debug("%s: Trying to send a packet, Tx index %d.\n", dev->name, vp->cur_tx); } /* * We can't allow a recursion from our interrupt handler back into the * tx routine, as they take the same spin lock, and that causes * deadlock. Just return NETDEV_TX_BUSY and let the stack try again in * a bit */ if (vp->handling_irq) return NETDEV_TX_BUSY; if (vp->cur_tx - vp->dirty_tx >= TX_RING_SIZE) { if (vortex_debug > 0) pr_warning("%s: BUG! Tx Ring full, refusing to send buffer.\n", dev->name); netif_stop_queue(dev); return NETDEV_TX_BUSY; } vp->tx_skbuff[entry] = skb; vp->tx_ring[entry].next = 0; #if DO_ZEROCOPY if (skb->ip_summed != CHECKSUM_PARTIAL) vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded); else vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum); if (!skb_shinfo(skb)->nr_frags) { vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE)); vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len | LAST_FRAG); } else { int i; vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, skb_headlen(skb), PCI_DMA_TODEVICE)); vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb_headlen(skb)); for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; vp->tx_ring[entry].frag[i+1].addr = cpu_to_le32(pci_map_single( VORTEX_PCI(vp), (void *)skb_frag_address(frag), skb_frag_size(frag), PCI_DMA_TODEVICE)); if (i == skb_shinfo(skb)->nr_frags-1) vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag)|LAST_FRAG); else vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag)); } } #else vp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE)); vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG); vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded); #endif spin_lock_irqsave(&vp->lock, flags); /* Wait for the stall to complete. */ issue_and_wait(dev, DownStall); prev_entry->next = cpu_to_le32(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc)); if (ioread32(ioaddr + DownListPtr) == 0) { iowrite32(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc), ioaddr + DownListPtr); vp->queued_packet++; } vp->cur_tx++; if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1) { netif_stop_queue (dev); } else { /* Clear previous interrupt enable. */ #if defined(tx_interrupt_mitigation) /* Dubious. If in boomeang_interrupt "faster" cyclone ifdef * were selected, this would corrupt DN_COMPLETE. No? */ prev_entry->status &= cpu_to_le32(~TxIntrUploaded); #endif } iowrite16(DownUnstall, ioaddr + EL3_CMD); spin_unlock_irqrestore(&vp->lock, flags); return NETDEV_TX_OK; } /* The interrupt handler does all of the Rx thread work and cleans up after the Tx thread. */ /* * This is the ISR for the vortex series chips. * full_bus_master_tx == 0 && full_bus_master_rx == 0 */ static irqreturn_t vortex_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr; int status; int work_done = max_interrupt_work; int handled = 0; ioaddr = vp->ioaddr; spin_lock(&vp->lock); status = ioread16(ioaddr + EL3_STATUS); if (vortex_debug > 6) pr_debug("vortex_interrupt(). status=0x%4x\n", status); if ((status & IntLatch) == 0) goto handler_exit; /* No interrupt: shared IRQs cause this */ handled = 1; if (status & IntReq) { status |= vp->deferred; vp->deferred = 0; } if (status == 0xffff) /* h/w no longer present (hotplug)? */ goto handler_exit; if (vortex_debug > 4) pr_debug("%s: interrupt, status %4.4x, latency %d ticks.\n", dev->name, status, ioread8(ioaddr + Timer)); spin_lock(&vp->window_lock); window_set(vp, 7); do { if (vortex_debug > 5) pr_debug("%s: In interrupt loop, status %4.4x.\n", dev->name, status); if (status & RxComplete) vortex_rx(dev); if (status & TxAvailable) { if (vortex_debug > 5) pr_debug(" TX room bit was handled.\n"); /* There's room in the FIFO for a full-sized packet. */ iowrite16(AckIntr | TxAvailable, ioaddr + EL3_CMD); netif_wake_queue (dev); } if (status & DMADone) { if (ioread16(ioaddr + Wn7_MasterStatus) & 0x1000) { iowrite16(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */ pci_unmap_single(VORTEX_PCI(vp), vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, PCI_DMA_TODEVICE); dev_kfree_skb_irq(vp->tx_skb); /* Release the transferred buffer */ if (ioread16(ioaddr + TxFree) > 1536) { /* * AKPM: FIXME: I don't think we need this. If the queue was stopped due to * insufficient FIFO room, the TxAvailable test will succeed and call * netif_wake_queue() */ netif_wake_queue(dev); } else { /* Interrupt when FIFO has room for max-sized packet. */ iowrite16(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD); netif_stop_queue(dev); } } } /* Check for all uncommon interrupts at once. */ if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) { if (status == 0xffff) break; if (status & RxEarly) vortex_rx(dev); spin_unlock(&vp->window_lock); vortex_error(dev, status); spin_lock(&vp->window_lock); window_set(vp, 7); } if (--work_done < 0) { pr_warning("%s: Too much work in interrupt, status %4.4x.\n", dev->name, status); /* Disable all pending interrupts. */ do { vp->deferred |= status; iowrite16(SetStatusEnb | (~vp->deferred & vp->status_enable), ioaddr + EL3_CMD); iowrite16(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD); } while ((status = ioread16(ioaddr + EL3_CMD)) & IntLatch); /* The timer will reenable interrupts. */ mod_timer(&vp->timer, jiffies + 1*HZ); break; } /* Acknowledge the IRQ. */ iowrite16(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD); } while ((status = ioread16(ioaddr + EL3_STATUS)) & (IntLatch | RxComplete)); spin_unlock(&vp->window_lock); if (vortex_debug > 4) pr_debug("%s: exiting interrupt, status %4.4x.\n", dev->name, status); handler_exit: spin_unlock(&vp->lock); return IRQ_RETVAL(handled); } /* * This is the ISR for the boomerang series chips. * full_bus_master_tx == 1 && full_bus_master_rx == 1 */ static irqreturn_t boomerang_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr; int status; int work_done = max_interrupt_work; ioaddr = vp->ioaddr; /* * It seems dopey to put the spinlock this early, but we could race against vortex_tx_timeout * and boomerang_start_xmit */ spin_lock(&vp->lock); vp->handling_irq = 1; status = ioread16(ioaddr + EL3_STATUS); if (vortex_debug > 6) pr_debug("boomerang_interrupt. status=0x%4x\n", status); if ((status & IntLatch) == 0) goto handler_exit; /* No interrupt: shared IRQs can cause this */ if (status == 0xffff) { /* h/w no longer present (hotplug)? */ if (vortex_debug > 1) pr_debug("boomerang_interrupt(1): status = 0xffff\n"); goto handler_exit; } if (status & IntReq) { status |= vp->deferred; vp->deferred = 0; } if (vortex_debug > 4) pr_debug("%s: interrupt, status %4.4x, latency %d ticks.\n", dev->name, status, ioread8(ioaddr + Timer)); do { if (vortex_debug > 5) pr_debug("%s: In interrupt loop, status %4.4x.\n", dev->name, status); if (status & UpComplete) { iowrite16(AckIntr | UpComplete, ioaddr + EL3_CMD); if (vortex_debug > 5) pr_debug("boomerang_interrupt->boomerang_rx\n"); boomerang_rx(dev); } if (status & DownComplete) { unsigned int dirty_tx = vp->dirty_tx; iowrite16(AckIntr | DownComplete, ioaddr + EL3_CMD); while (vp->cur_tx - dirty_tx > 0) { int entry = dirty_tx % TX_RING_SIZE; #if 1 /* AKPM: the latter is faster, but cyclone-only */ if (ioread32(ioaddr + DownListPtr) == vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc)) break; /* It still hasn't been processed. */ #else if ((vp->tx_ring[entry].status & DN_COMPLETE) == 0) break; /* It still hasn't been processed. */ #endif if (vp->tx_skbuff[entry]) { struct sk_buff *skb = vp->tx_skbuff[entry]; #if DO_ZEROCOPY int i; for (i=0; i<=skb_shinfo(skb)->nr_frags; i++) pci_unmap_single(VORTEX_PCI(vp), le32_to_cpu(vp->tx_ring[entry].frag[i].addr), le32_to_cpu(vp->tx_ring[entry].frag[i].length)&0xFFF, PCI_DMA_TODEVICE); #else pci_unmap_single(VORTEX_PCI(vp), le32_to_cpu(vp->tx_ring[entry].addr), skb->len, PCI_DMA_TODEVICE); #endif dev_kfree_skb_irq(skb); vp->tx_skbuff[entry] = NULL; } else { pr_debug("boomerang_interrupt: no skb!\n"); } /* dev->stats.tx_packets++; Counted below. */ dirty_tx++; } vp->dirty_tx = dirty_tx; if (vp->cur_tx - dirty_tx <= TX_RING_SIZE - 1) { if (vortex_debug > 6) pr_debug("boomerang_interrupt: wake queue\n"); netif_wake_queue (dev); } } /* Check for all uncommon interrupts at once. */ if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) vortex_error(dev, status); if (--work_done < 0) { pr_warning("%s: Too much work in interrupt, status %4.4x.\n", dev->name, status); /* Disable all pending interrupts. */ do { vp->deferred |= status; iowrite16(SetStatusEnb | (~vp->deferred & vp->status_enable), ioaddr + EL3_CMD); iowrite16(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD); } while ((status = ioread16(ioaddr + EL3_CMD)) & IntLatch); /* The timer will reenable interrupts. */ mod_timer(&vp->timer, jiffies + 1*HZ); break; } /* Acknowledge the IRQ. */ iowrite16(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD); if (vp->cb_fn_base) /* The PCMCIA people are idiots. */ iowrite32(0x8000, vp->cb_fn_base + 4); } while ((status = ioread16(ioaddr + EL3_STATUS)) & IntLatch); if (vortex_debug > 4) pr_debug("%s: exiting interrupt, status %4.4x.\n", dev->name, status); handler_exit: vp->handling_irq = 0; spin_unlock(&vp->lock); return IRQ_HANDLED; } static int vortex_rx(struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr = vp->ioaddr; int i; short rx_status; if (vortex_debug > 5) pr_debug("vortex_rx(): status %4.4x, rx_status %4.4x.\n", ioread16(ioaddr+EL3_STATUS), ioread16(ioaddr+RxStatus)); while ((rx_status = ioread16(ioaddr + RxStatus)) > 0) { if (rx_status & 0x4000) { /* Error, update stats. */ unsigned char rx_error = ioread8(ioaddr + RxErrors); if (vortex_debug > 2) pr_debug(" Rx error: status %2.2x.\n", rx_error); dev->stats.rx_errors++; if (rx_error & 0x01) dev->stats.rx_over_errors++; if (rx_error & 0x02) dev->stats.rx_length_errors++; if (rx_error & 0x04) dev->stats.rx_frame_errors++; if (rx_error & 0x08) dev->stats.rx_crc_errors++; if (rx_error & 0x10) dev->stats.rx_length_errors++; } else { /* The packet length: up to 4.5K!. */ int pkt_len = rx_status & 0x1fff; struct sk_buff *skb; skb = netdev_alloc_skb(dev, pkt_len + 5); if (vortex_debug > 4) pr_debug("Receiving packet size %d status %4.4x.\n", pkt_len, rx_status); if (skb != NULL) { skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ /* 'skb_put()' points to the start of sk_buff data area. */ if (vp->bus_master && ! (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)) { dma_addr_t dma = pci_map_single(VORTEX_PCI(vp), skb_put(skb, pkt_len), pkt_len, PCI_DMA_FROMDEVICE); iowrite32(dma, ioaddr + Wn7_MasterAddr); iowrite16((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen); iowrite16(StartDMAUp, ioaddr + EL3_CMD); while (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000) ; pci_unmap_single(VORTEX_PCI(vp), dma, pkt_len, PCI_DMA_FROMDEVICE); } else { ioread32_rep(ioaddr + RX_FIFO, skb_put(skb, pkt_len), (pkt_len + 3) >> 2); } iowrite16(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */ skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); dev->stats.rx_packets++; /* Wait a limited time to go to next packet. */ for (i = 200; i >= 0; i--) if ( ! (ioread16(ioaddr + EL3_STATUS) & CmdInProgress)) break; continue; } else if (vortex_debug > 0) pr_notice("%s: No memory to allocate a sk_buff of size %d.\n", dev->name, pkt_len); dev->stats.rx_dropped++; } issue_and_wait(dev, RxDiscard); } return 0; } static int boomerang_rx(struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); int entry = vp->cur_rx % RX_RING_SIZE; void __iomem *ioaddr = vp->ioaddr; int rx_status; int rx_work_limit = vp->dirty_rx + RX_RING_SIZE - vp->cur_rx; if (vortex_debug > 5) pr_debug("boomerang_rx(): status %4.4x\n", ioread16(ioaddr+EL3_STATUS)); while ((rx_status = le32_to_cpu(vp->rx_ring[entry].status)) & RxDComplete){ if (--rx_work_limit < 0) break; if (rx_status & RxDError) { /* Error, update stats. */ unsigned char rx_error = rx_status >> 16; if (vortex_debug > 2) pr_debug(" Rx error: status %2.2x.\n", rx_error); dev->stats.rx_errors++; if (rx_error & 0x01) dev->stats.rx_over_errors++; if (rx_error & 0x02) dev->stats.rx_length_errors++; if (rx_error & 0x04) dev->stats.rx_frame_errors++; if (rx_error & 0x08) dev->stats.rx_crc_errors++; if (rx_error & 0x10) dev->stats.rx_length_errors++; } else { /* The packet length: up to 4.5K!. */ int pkt_len = rx_status & 0x1fff; struct sk_buff *skb; dma_addr_t dma = le32_to_cpu(vp->rx_ring[entry].addr); if (vortex_debug > 4) pr_debug("Receiving packet size %d status %4.4x.\n", pkt_len, rx_status); /* Check if the packet is long enough to just accept without copying to a properly sized skbuff. */ if (pkt_len < rx_copybreak && (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) { skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ pci_dma_sync_single_for_cpu(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); /* 'skb_put()' points to the start of sk_buff data area. */ memcpy(skb_put(skb, pkt_len), vp->rx_skbuff[entry]->data, pkt_len); pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); vp->rx_copy++; } else { /* Pass up the skbuff already on the Rx ring. */ skb = vp->rx_skbuff[entry]; vp->rx_skbuff[entry] = NULL; skb_put(skb, pkt_len); pci_unmap_single(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); vp->rx_nocopy++; } skb->protocol = eth_type_trans(skb, dev); { /* Use hardware checksum info. */ int csum_bits = rx_status & 0xee000000; if (csum_bits && (csum_bits == (IPChksumValid | TCPChksumValid) || csum_bits == (IPChksumValid | UDPChksumValid))) { skb->ip_summed = CHECKSUM_UNNECESSARY; vp->rx_csumhits++; } } netif_rx(skb); dev->stats.rx_packets++; } entry = (++vp->cur_rx) % RX_RING_SIZE; } /* Refill the Rx ring buffers. */ for (; vp->cur_rx - vp->dirty_rx > 0; vp->dirty_rx++) { struct sk_buff *skb; entry = vp->dirty_rx % RX_RING_SIZE; if (vp->rx_skbuff[entry] == NULL) { skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ); if (skb == NULL) { static unsigned long last_jif; if (time_after(jiffies, last_jif + 10 * HZ)) { pr_warning("%s: memory shortage\n", dev->name); last_jif = jiffies; } if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE) mod_timer(&vp->rx_oom_timer, RUN_AT(HZ * 1)); break; /* Bad news! */ } vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); vp->rx_skbuff[entry] = skb; } vp->rx_ring[entry].status = 0; /* Clear complete bit. */ iowrite16(UpUnstall, ioaddr + EL3_CMD); } return 0; } /* * If we've hit a total OOM refilling the Rx ring we poll once a second * for some memory. Otherwise there is no way to restart the rx process. */ static void rx_oom_timer(unsigned long arg) { struct net_device *dev = (struct net_device *)arg; struct vortex_private *vp = netdev_priv(dev); spin_lock_irq(&vp->lock); if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE) /* This test is redundant, but makes me feel good */ boomerang_rx(dev); if (vortex_debug > 1) { pr_debug("%s: rx_oom_timer %s\n", dev->name, ((vp->cur_rx - vp->dirty_rx) != RX_RING_SIZE) ? "succeeded" : "retrying"); } spin_unlock_irq(&vp->lock); } static void vortex_down(struct net_device *dev, int final_down) { struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr = vp->ioaddr; netif_stop_queue (dev); del_timer_sync(&vp->rx_oom_timer); del_timer_sync(&vp->timer); /* Turn off statistics ASAP. We update dev->stats below. */ iowrite16(StatsDisable, ioaddr + EL3_CMD); /* Disable the receiver and transmitter. */ iowrite16(RxDisable, ioaddr + EL3_CMD); iowrite16(TxDisable, ioaddr + EL3_CMD); /* Disable receiving 802.1q tagged frames */ set_8021q_mode(dev, 0); if (dev->if_port == XCVR_10base2) /* Turn off thinnet power. Green! */ iowrite16(StopCoax, ioaddr + EL3_CMD); iowrite16(SetIntrEnb | 0x0000, ioaddr + EL3_CMD); update_stats(ioaddr, dev); if (vp->full_bus_master_rx) iowrite32(0, ioaddr + UpListPtr); if (vp->full_bus_master_tx) iowrite32(0, ioaddr + DownListPtr); if (final_down && VORTEX_PCI(vp)) { vp->pm_state_valid = 1; pci_save_state(VORTEX_PCI(vp)); acpi_set_WOL(dev); } } static int vortex_close(struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr = vp->ioaddr; int i; if (netif_device_present(dev)) vortex_down(dev, 1); if (vortex_debug > 1) { pr_debug("%s: vortex_close() status %4.4x, Tx status %2.2x.\n", dev->name, ioread16(ioaddr + EL3_STATUS), ioread8(ioaddr + TxStatus)); pr_debug("%s: vortex close stats: rx_nocopy %d rx_copy %d" " tx_queued %d Rx pre-checksummed %d.\n", dev->name, vp->rx_nocopy, vp->rx_copy, vp->queued_packet, vp->rx_csumhits); } #if DO_ZEROCOPY if (vp->rx_csumhits && (vp->drv_flags & HAS_HWCKSM) == 0 && (vp->card_idx >= MAX_UNITS || hw_checksums[vp->card_idx] == -1)) { pr_warning("%s supports hardware checksums, and we're not using them!\n", dev->name); } #endif free_irq(dev->irq, dev); if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */ for (i = 0; i < RX_RING_SIZE; i++) if (vp->rx_skbuff[i]) { pci_unmap_single( VORTEX_PCI(vp), le32_to_cpu(vp->rx_ring[i].addr), PKT_BUF_SZ, PCI_DMA_FROMDEVICE); dev_kfree_skb(vp->rx_skbuff[i]); vp->rx_skbuff[i] = NULL; } } if (vp->full_bus_master_tx) { /* Free Boomerang bus master Tx buffers. */ for (i = 0; i < TX_RING_SIZE; i++) { if (vp->tx_skbuff[i]) { struct sk_buff *skb = vp->tx_skbuff[i]; #if DO_ZEROCOPY int k; for (k=0; k<=skb_shinfo(skb)->nr_frags; k++) pci_unmap_single(VORTEX_PCI(vp), le32_to_cpu(vp->tx_ring[i].frag[k].addr), le32_to_cpu(vp->tx_ring[i].frag[k].length)&0xFFF, PCI_DMA_TODEVICE); #else pci_unmap_single(VORTEX_PCI(vp), le32_to_cpu(vp->tx_ring[i].addr), skb->len, PCI_DMA_TODEVICE); #endif dev_kfree_skb(skb); vp->tx_skbuff[i] = NULL; } } } return 0; } static void dump_tx_ring(struct net_device *dev) { if (vortex_debug > 0) { struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr = vp->ioaddr; if (vp->full_bus_master_tx) { int i; int stalled = ioread32(ioaddr + PktStatus) & 0x04; /* Possible racy. But it's only debug stuff */ pr_err(" Flags; bus-master %d, dirty %d(%d) current %d(%d)\n", vp->full_bus_master_tx, vp->dirty_tx, vp->dirty_tx % TX_RING_SIZE, vp->cur_tx, vp->cur_tx % TX_RING_SIZE); pr_err(" Transmit list %8.8x vs. %p.\n", ioread32(ioaddr + DownListPtr), &vp->tx_ring[vp->dirty_tx % TX_RING_SIZE]); issue_and_wait(dev, DownStall); for (i = 0; i < TX_RING_SIZE; i++) { unsigned int length; #if DO_ZEROCOPY length = le32_to_cpu(vp->tx_ring[i].frag[0].length); #else length = le32_to_cpu(vp->tx_ring[i].length); #endif pr_err(" %d: @%p length %8.8x status %8.8x\n", i, &vp->tx_ring[i], length, le32_to_cpu(vp->tx_ring[i].status)); } if (!stalled) iowrite16(DownUnstall, ioaddr + EL3_CMD); } } } static struct net_device_stats *vortex_get_stats(struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr = vp->ioaddr; unsigned long flags; if (netif_device_present(dev)) { /* AKPM: Used to be netif_running */ spin_lock_irqsave (&vp->lock, flags); update_stats(ioaddr, dev); spin_unlock_irqrestore (&vp->lock, flags); } return &dev->stats; } /* Update statistics. Unlike with the EL3 we need not worry about interrupts changing the window setting from underneath us, but we must still guard against a race condition with a StatsUpdate interrupt updating the table. This is done by checking that the ASM (!) code generated uses atomic updates with '+='. */ static void update_stats(void __iomem *ioaddr, struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); /* Unlike the 3c5x9 we need not turn off stats updates while reading. */ /* Switch to the stats window, and read everything. */ dev->stats.tx_carrier_errors += window_read8(vp, 6, 0); dev->stats.tx_heartbeat_errors += window_read8(vp, 6, 1); dev->stats.tx_window_errors += window_read8(vp, 6, 4); dev->stats.rx_fifo_errors += window_read8(vp, 6, 5); dev->stats.tx_packets += window_read8(vp, 6, 6); dev->stats.tx_packets += (window_read8(vp, 6, 9) & 0x30) << 4; /* Rx packets */ window_read8(vp, 6, 7); /* Must read to clear */ /* Don't bother with register 9, an extension of registers 6&7. If we do use the 6&7 values the atomic update assumption above is invalid. */ dev->stats.rx_bytes += window_read16(vp, 6, 10); dev->stats.tx_bytes += window_read16(vp, 6, 12); /* Extra stats for get_ethtool_stats() */ vp->xstats.tx_multiple_collisions += window_read8(vp, 6, 2); vp->xstats.tx_single_collisions += window_read8(vp, 6, 3); vp->xstats.tx_deferred += window_read8(vp, 6, 8); vp->xstats.rx_bad_ssd += window_read8(vp, 4, 12); dev->stats.collisions = vp->xstats.tx_multiple_collisions + vp->xstats.tx_single_collisions + vp->xstats.tx_max_collisions; { u8 up = window_read8(vp, 4, 13); dev->stats.rx_bytes += (up & 0x0f) << 16; dev->stats.tx_bytes += (up & 0xf0) << 12; } } static int vortex_nway_reset(struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); return mii_nway_restart(&vp->mii); } static int vortex_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct vortex_private *vp = netdev_priv(dev); return mii_ethtool_gset(&vp->mii, cmd); } static int vortex_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct vortex_private *vp = netdev_priv(dev); return mii_ethtool_sset(&vp->mii, cmd); } static u32 vortex_get_msglevel(struct net_device *dev) { return vortex_debug; } static void vortex_set_msglevel(struct net_device *dev, u32 dbg) { vortex_debug = dbg; } static int vortex_get_sset_count(struct net_device *dev, int sset) { switch (sset) { case ETH_SS_STATS: return VORTEX_NUM_STATS; default: return -EOPNOTSUPP; } } static void vortex_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr = vp->ioaddr; unsigned long flags; spin_lock_irqsave(&vp->lock, flags); update_stats(ioaddr, dev); spin_unlock_irqrestore(&vp->lock, flags); data[0] = vp->xstats.tx_deferred; data[1] = vp->xstats.tx_max_collisions; data[2] = vp->xstats.tx_multiple_collisions; data[3] = vp->xstats.tx_single_collisions; data[4] = vp->xstats.rx_bad_ssd; } static void vortex_get_strings(struct net_device *dev, u32 stringset, u8 *data) { switch (stringset) { case ETH_SS_STATS: memcpy(data, &ethtool_stats_keys, sizeof(ethtool_stats_keys)); break; default: WARN_ON(1); break; } } static void vortex_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct vortex_private *vp = netdev_priv(dev); strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); if (VORTEX_PCI(vp)) { strlcpy(info->bus_info, pci_name(VORTEX_PCI(vp)), sizeof(info->bus_info)); } else { if (VORTEX_EISA(vp)) strlcpy(info->bus_info, dev_name(vp->gendev), sizeof(info->bus_info)); else snprintf(info->bus_info, sizeof(info->bus_info), "EISA 0x%lx %d", dev->base_addr, dev->irq); } } static void vortex_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct vortex_private *vp = netdev_priv(dev); if (!VORTEX_PCI(vp)) return; wol->supported = WAKE_MAGIC; wol->wolopts = 0; if (vp->enable_wol) wol->wolopts |= WAKE_MAGIC; } static int vortex_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct vortex_private *vp = netdev_priv(dev); if (!VORTEX_PCI(vp)) return -EOPNOTSUPP; if (wol->wolopts & ~WAKE_MAGIC) return -EINVAL; if (wol->wolopts & WAKE_MAGIC) vp->enable_wol = 1; else vp->enable_wol = 0; acpi_set_WOL(dev); return 0; } static const struct ethtool_ops vortex_ethtool_ops = { .get_drvinfo = vortex_get_drvinfo, .get_strings = vortex_get_strings, .get_msglevel = vortex_get_msglevel, .set_msglevel = vortex_set_msglevel, .get_ethtool_stats = vortex_get_ethtool_stats, .get_sset_count = vortex_get_sset_count, .get_settings = vortex_get_settings, .set_settings = vortex_set_settings, .get_link = ethtool_op_get_link, .nway_reset = vortex_nway_reset, .get_wol = vortex_get_wol, .set_wol = vortex_set_wol, }; #ifdef CONFIG_PCI /* * Must power the device up to do MDIO operations */ static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { int err; struct vortex_private *vp = netdev_priv(dev); pci_power_t state = 0; if(VORTEX_PCI(vp)) state = VORTEX_PCI(vp)->current_state; /* The kernel core really should have pci_get_power_state() */ if(state != 0) pci_set_power_state(VORTEX_PCI(vp), PCI_D0); err = generic_mii_ioctl(&vp->mii, if_mii(rq), cmd, NULL); if(state != 0) pci_set_power_state(VORTEX_PCI(vp), state); return err; } #endif /* Pre-Cyclone chips have no documented multicast filter, so the only multicast setting is to receive all multicast frames. At least the chip has a very clean way to set the mode, unlike many others. */ static void set_rx_mode(struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr = vp->ioaddr; int new_mode; if (dev->flags & IFF_PROMISC) { if (vortex_debug > 3) pr_notice("%s: Setting promiscuous mode.\n", dev->name); new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast|RxProm; } else if (!netdev_mc_empty(dev) || dev->flags & IFF_ALLMULTI) { new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast; } else new_mode = SetRxFilter | RxStation | RxBroadcast; iowrite16(new_mode, ioaddr + EL3_CMD); } #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) /* Setup the card so that it can receive frames with an 802.1q VLAN tag. Note that this must be done after each RxReset due to some backwards compatibility logic in the Cyclone and Tornado ASICs */ /* The Ethernet Type used for 802.1q tagged frames */ #define VLAN_ETHER_TYPE 0x8100 static void set_8021q_mode(struct net_device *dev, int enable) { struct vortex_private *vp = netdev_priv(dev); int mac_ctrl; if ((vp->drv_flags&IS_CYCLONE) || (vp->drv_flags&IS_TORNADO)) { /* cyclone and tornado chipsets can recognize 802.1q * tagged frames and treat them correctly */ int max_pkt_size = dev->mtu+14; /* MTU+Ethernet header */ if (enable) max_pkt_size += 4; /* 802.1Q VLAN tag */ window_write16(vp, max_pkt_size, 3, Wn3_MaxPktSize); /* set VlanEtherType to let the hardware checksumming treat tagged frames correctly */ window_write16(vp, VLAN_ETHER_TYPE, 7, Wn7_VlanEtherType); } else { /* on older cards we have to enable large frames */ vp->large_frames = dev->mtu > 1500 || enable; mac_ctrl = window_read16(vp, 3, Wn3_MAC_Ctrl); if (vp->large_frames) mac_ctrl |= 0x40; else mac_ctrl &= ~0x40; window_write16(vp, mac_ctrl, 3, Wn3_MAC_Ctrl); } } #else static void set_8021q_mode(struct net_device *dev, int enable) { } #endif /* MII transceiver control section. Read and write the MII registers using software-generated serial MDIO protocol. See the MII specifications or DP83840A data sheet for details. */ /* The maximum data clock rate is 2.5 Mhz. The minimum timing is usually met by back-to-back PCI I/O cycles, but we insert a delay to avoid "overclocking" issues. */ static void mdio_delay(struct vortex_private *vp) { window_read32(vp, 4, Wn4_PhysicalMgmt); } #define MDIO_SHIFT_CLK 0x01 #define MDIO_DIR_WRITE 0x04 #define MDIO_DATA_WRITE0 (0x00 | MDIO_DIR_WRITE) #define MDIO_DATA_WRITE1 (0x02 | MDIO_DIR_WRITE) #define MDIO_DATA_READ 0x02 #define MDIO_ENB_IN 0x00 /* Generate the preamble required for initial synchronization and a few older transceivers. */ static void mdio_sync(struct vortex_private *vp, int bits) { /* Establish sync by sending at least 32 logic ones. */ while (-- bits >= 0) { window_write16(vp, MDIO_DATA_WRITE1, 4, Wn4_PhysicalMgmt); mdio_delay(vp); window_write16(vp, MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, 4, Wn4_PhysicalMgmt); mdio_delay(vp); } } static int mdio_read(struct net_device *dev, int phy_id, int location) { int i; struct vortex_private *vp = netdev_priv(dev); int read_cmd = (0xf6 << 10) | (phy_id << 5) | location; unsigned int retval = 0; spin_lock_bh(&vp->mii_lock); if (mii_preamble_required) mdio_sync(vp, 32); /* Shift the read command bits out. */ for (i = 14; i >= 0; i--) { int dataval = (read_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0; window_write16(vp, dataval, 4, Wn4_PhysicalMgmt); mdio_delay(vp); window_write16(vp, dataval | MDIO_SHIFT_CLK, 4, Wn4_PhysicalMgmt); mdio_delay(vp); } /* Read the two transition, 16 data, and wire-idle bits. */ for (i = 19; i > 0; i--) { window_write16(vp, MDIO_ENB_IN, 4, Wn4_PhysicalMgmt); mdio_delay(vp); retval = (retval << 1) | ((window_read16(vp, 4, Wn4_PhysicalMgmt) & MDIO_DATA_READ) ? 1 : 0); window_write16(vp, MDIO_ENB_IN | MDIO_SHIFT_CLK, 4, Wn4_PhysicalMgmt); mdio_delay(vp); } spin_unlock_bh(&vp->mii_lock); return retval & 0x20000 ? 0xffff : retval>>1 & 0xffff; } static void mdio_write(struct net_device *dev, int phy_id, int location, int value) { struct vortex_private *vp = netdev_priv(dev); int write_cmd = 0x50020000 | (phy_id << 23) | (location << 18) | value; int i; spin_lock_bh(&vp->mii_lock); if (mii_preamble_required) mdio_sync(vp, 32); /* Shift the command bits out. */ for (i = 31; i >= 0; i--) { int dataval = (write_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0; window_write16(vp, dataval, 4, Wn4_PhysicalMgmt); mdio_delay(vp); window_write16(vp, dataval | MDIO_SHIFT_CLK, 4, Wn4_PhysicalMgmt); mdio_delay(vp); } /* Leave the interface idle. */ for (i = 1; i >= 0; i--) { window_write16(vp, MDIO_ENB_IN, 4, Wn4_PhysicalMgmt); mdio_delay(vp); window_write16(vp, MDIO_ENB_IN | MDIO_SHIFT_CLK, 4, Wn4_PhysicalMgmt); mdio_delay(vp); } spin_unlock_bh(&vp->mii_lock); } /* ACPI: Advanced Configuration and Power Interface. */ /* Set Wake-On-LAN mode and put the board into D3 (power-down) state. */ static void acpi_set_WOL(struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr = vp->ioaddr; device_set_wakeup_enable(vp->gendev, vp->enable_wol); if (vp->enable_wol) { /* Power up on: 1==Downloaded Filter, 2==Magic Packets, 4==Link Status. */ window_write16(vp, 2, 7, 0x0c); /* The RxFilter must accept the WOL frames. */ iowrite16(SetRxFilter|RxStation|RxMulticast|RxBroadcast, ioaddr + EL3_CMD); iowrite16(RxEnable, ioaddr + EL3_CMD); if (pci_enable_wake(VORTEX_PCI(vp), PCI_D3hot, 1)) { pr_info("%s: WOL not supported.\n", pci_name(VORTEX_PCI(vp))); vp->enable_wol = 0; return; } if (VORTEX_PCI(vp)->current_state < PCI_D3hot) return; /* Change the power state to D3; RxEnable doesn't take effect. */ pci_set_power_state(VORTEX_PCI(vp), PCI_D3hot); } } static void vortex_remove_one(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct vortex_private *vp; if (!dev) { pr_err("vortex_remove_one called for Compaq device!\n"); BUG(); } vp = netdev_priv(dev); if (vp->cb_fn_base) pci_iounmap(VORTEX_PCI(vp), vp->cb_fn_base); unregister_netdev(dev); if (VORTEX_PCI(vp)) { pci_set_power_state(VORTEX_PCI(vp), PCI_D0); /* Go active */ if (vp->pm_state_valid) pci_restore_state(VORTEX_PCI(vp)); pci_disable_device(VORTEX_PCI(vp)); } /* Should really use issue_and_wait() here */ iowrite16(TotalReset | ((vp->drv_flags & EEPROM_RESET) ? 0x04 : 0x14), vp->ioaddr + EL3_CMD); pci_iounmap(VORTEX_PCI(vp), vp->ioaddr); pci_free_consistent(pdev, sizeof(struct boom_rx_desc) * RX_RING_SIZE + sizeof(struct boom_tx_desc) * TX_RING_SIZE, vp->rx_ring, vp->rx_ring_dma); pci_release_regions(pdev); free_netdev(dev); } static struct pci_driver vortex_driver = { .name = "3c59x", .probe = vortex_init_one, .remove = vortex_remove_one, .id_table = vortex_pci_tbl, .driver.pm = VORTEX_PM_OPS, }; static int vortex_have_pci; static int vortex_have_eisa; static int __init vortex_init(void) { int pci_rc, eisa_rc; pci_rc = pci_register_driver(&vortex_driver); eisa_rc = vortex_eisa_init(); if (pci_rc == 0) vortex_have_pci = 1; if (eisa_rc > 0) vortex_have_eisa = 1; return (vortex_have_pci + vortex_have_eisa) ? 0 : -ENODEV; } static void __exit vortex_eisa_cleanup(void) { struct vortex_private *vp; void __iomem *ioaddr; #ifdef CONFIG_EISA /* Take care of the EISA devices */ eisa_driver_unregister(&vortex_eisa_driver); #endif if (compaq_net_device) { vp = netdev_priv(compaq_net_device); ioaddr = ioport_map(compaq_net_device->base_addr, VORTEX_TOTAL_SIZE); unregister_netdev(compaq_net_device); iowrite16(TotalReset, ioaddr + EL3_CMD); release_region(compaq_net_device->base_addr, VORTEX_TOTAL_SIZE); free_netdev(compaq_net_device); } } static void __exit vortex_cleanup(void) { if (vortex_have_pci) pci_unregister_driver(&vortex_driver); if (vortex_have_eisa) vortex_eisa_cleanup(); } module_init(vortex_init); module_exit(vortex_cleanup);
gpl-2.0
NamJa/surface3-kernel
drivers/misc/ics932s401.c
2335
13543
/* * A driver for the Integrated Circuits ICS932S401 * Copyright (C) 2008 IBM * * Author: Darrick J. Wong <darrick.wong@oracle.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> #include <linux/jiffies.h> #include <linux/i2c.h> #include <linux/err.h> #include <linux/mutex.h> #include <linux/delay.h> #include <linux/log2.h> #include <linux/slab.h> /* Addresses to scan */ static const unsigned short normal_i2c[] = { 0x69, I2C_CLIENT_END }; /* ICS932S401 registers */ #define ICS932S401_REG_CFG2 0x01 #define ICS932S401_CFG1_SPREAD 0x01 #define ICS932S401_REG_CFG7 0x06 #define ICS932S401_FS_MASK 0x07 #define ICS932S401_REG_VENDOR_REV 0x07 #define ICS932S401_VENDOR 1 #define ICS932S401_VENDOR_MASK 0x0F #define ICS932S401_REV 4 #define ICS932S401_REV_SHIFT 4 #define ICS932S401_REG_DEVICE 0x09 #define ICS932S401_DEVICE 11 #define ICS932S401_REG_CTRL 0x0A #define ICS932S401_MN_ENABLED 0x80 #define ICS932S401_CPU_ALT 0x04 #define ICS932S401_SRC_ALT 0x08 #define ICS932S401_REG_CPU_M_CTRL 0x0B #define ICS932S401_M_MASK 0x3F #define ICS932S401_REG_CPU_N_CTRL 0x0C #define ICS932S401_REG_CPU_SPREAD1 0x0D #define ICS932S401_REG_CPU_SPREAD2 0x0E #define ICS932S401_SPREAD_MASK 0x7FFF #define ICS932S401_REG_SRC_M_CTRL 0x0F #define ICS932S401_REG_SRC_N_CTRL 0x10 #define ICS932S401_REG_SRC_SPREAD1 0x11 #define ICS932S401_REG_SRC_SPREAD2 0x12 #define ICS932S401_REG_CPU_DIVISOR 0x13 #define ICS932S401_CPU_DIVISOR_SHIFT 4 #define ICS932S401_REG_PCISRC_DIVISOR 0x14 #define ICS932S401_SRC_DIVISOR_MASK 0x0F #define ICS932S401_PCI_DIVISOR_SHIFT 4 /* Base clock is 14.318MHz */ #define BASE_CLOCK 14318 #define NUM_REGS 21 #define NUM_MIRRORED_REGS 15 static int regs_to_copy[NUM_MIRRORED_REGS] = { ICS932S401_REG_CFG2, ICS932S401_REG_CFG7, ICS932S401_REG_VENDOR_REV, ICS932S401_REG_DEVICE, ICS932S401_REG_CTRL, ICS932S401_REG_CPU_M_CTRL, ICS932S401_REG_CPU_N_CTRL, ICS932S401_REG_CPU_SPREAD1, ICS932S401_REG_CPU_SPREAD2, ICS932S401_REG_SRC_M_CTRL, ICS932S401_REG_SRC_N_CTRL, ICS932S401_REG_SRC_SPREAD1, ICS932S401_REG_SRC_SPREAD2, ICS932S401_REG_CPU_DIVISOR, ICS932S401_REG_PCISRC_DIVISOR, }; /* How often do we reread sensors values? (In jiffies) */ #define SENSOR_REFRESH_INTERVAL (2 * HZ) /* How often do we reread sensor limit values? (In jiffies) */ #define LIMIT_REFRESH_INTERVAL (60 * HZ) struct ics932s401_data { struct attribute_group attrs; struct mutex lock; char sensors_valid; unsigned long sensors_last_updated; /* In jiffies */ u8 regs[NUM_REGS]; }; static int ics932s401_probe(struct i2c_client *client, const struct i2c_device_id *id); static int ics932s401_detect(struct i2c_client *client, struct i2c_board_info *info); static int ics932s401_remove(struct i2c_client *client); static const struct i2c_device_id ics932s401_id[] = { { "ics932s401", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, ics932s401_id); static struct i2c_driver ics932s401_driver = { .class = I2C_CLASS_HWMON, .driver = { .name = "ics932s401", }, .probe = ics932s401_probe, .remove = ics932s401_remove, .id_table = ics932s401_id, .detect = ics932s401_detect, .address_list = normal_i2c, }; static struct ics932s401_data *ics932s401_update_device(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct ics932s401_data *data = i2c_get_clientdata(client); unsigned long local_jiffies = jiffies; int i, temp; mutex_lock(&data->lock); if (time_before(local_jiffies, data->sensors_last_updated + SENSOR_REFRESH_INTERVAL) && data->sensors_valid) goto out; /* * Each register must be read as a word and then right shifted 8 bits. * Not really sure why this is; setting the "byte count programming" * register to 1 does not fix this problem. */ for (i = 0; i < NUM_MIRRORED_REGS; i++) { temp = i2c_smbus_read_word_data(client, regs_to_copy[i]); data->regs[regs_to_copy[i]] = temp >> 8; } data->sensors_last_updated = local_jiffies; data->sensors_valid = 1; out: mutex_unlock(&data->lock); return data; } static ssize_t show_spread_enabled(struct device *dev, struct device_attribute *devattr, char *buf) { struct ics932s401_data *data = ics932s401_update_device(dev); if (data->regs[ICS932S401_REG_CFG2] & ICS932S401_CFG1_SPREAD) return sprintf(buf, "1\n"); return sprintf(buf, "0\n"); } /* bit to cpu khz map */ static const int fs_speeds[] = { 266666, 133333, 200000, 166666, 333333, 100000, 400000, 0, }; /* clock divisor map */ static const int divisors[] = {2, 3, 5, 15, 4, 6, 10, 30, 8, 12, 20, 60, 16, 24, 40, 120}; /* Calculate CPU frequency from the M/N registers. */ static int calculate_cpu_freq(struct ics932s401_data *data) { int m, n, freq; m = data->regs[ICS932S401_REG_CPU_M_CTRL] & ICS932S401_M_MASK; n = data->regs[ICS932S401_REG_CPU_N_CTRL]; /* Pull in bits 8 & 9 from the M register */ n |= ((int)data->regs[ICS932S401_REG_CPU_M_CTRL] & 0x80) << 1; n |= ((int)data->regs[ICS932S401_REG_CPU_M_CTRL] & 0x40) << 3; freq = BASE_CLOCK * (n + 8) / (m + 2); freq /= divisors[data->regs[ICS932S401_REG_CPU_DIVISOR] >> ICS932S401_CPU_DIVISOR_SHIFT]; return freq; } static ssize_t show_cpu_clock(struct device *dev, struct device_attribute *devattr, char *buf) { struct ics932s401_data *data = ics932s401_update_device(dev); return sprintf(buf, "%d\n", calculate_cpu_freq(data)); } static ssize_t show_cpu_clock_sel(struct device *dev, struct device_attribute *devattr, char *buf) { struct ics932s401_data *data = ics932s401_update_device(dev); int freq; if (data->regs[ICS932S401_REG_CTRL] & ICS932S401_MN_ENABLED) freq = calculate_cpu_freq(data); else { /* Freq is neatly wrapped up for us */ int fid = data->regs[ICS932S401_REG_CFG7] & ICS932S401_FS_MASK; freq = fs_speeds[fid]; if (data->regs[ICS932S401_REG_CTRL] & ICS932S401_CPU_ALT) { switch (freq) { case 166666: freq = 160000; break; case 333333: freq = 320000; break; } } } return sprintf(buf, "%d\n", freq); } /* Calculate SRC frequency from the M/N registers. */ static int calculate_src_freq(struct ics932s401_data *data) { int m, n, freq; m = data->regs[ICS932S401_REG_SRC_M_CTRL] & ICS932S401_M_MASK; n = data->regs[ICS932S401_REG_SRC_N_CTRL]; /* Pull in bits 8 & 9 from the M register */ n |= ((int)data->regs[ICS932S401_REG_SRC_M_CTRL] & 0x80) << 1; n |= ((int)data->regs[ICS932S401_REG_SRC_M_CTRL] & 0x40) << 3; freq = BASE_CLOCK * (n + 8) / (m + 2); freq /= divisors[data->regs[ICS932S401_REG_PCISRC_DIVISOR] & ICS932S401_SRC_DIVISOR_MASK]; return freq; } static ssize_t show_src_clock(struct device *dev, struct device_attribute *devattr, char *buf) { struct ics932s401_data *data = ics932s401_update_device(dev); return sprintf(buf, "%d\n", calculate_src_freq(data)); } static ssize_t show_src_clock_sel(struct device *dev, struct device_attribute *devattr, char *buf) { struct ics932s401_data *data = ics932s401_update_device(dev); int freq; if (data->regs[ICS932S401_REG_CTRL] & ICS932S401_MN_ENABLED) freq = calculate_src_freq(data); else /* Freq is neatly wrapped up for us */ if (data->regs[ICS932S401_REG_CTRL] & ICS932S401_CPU_ALT && data->regs[ICS932S401_REG_CTRL] & ICS932S401_SRC_ALT) freq = 96000; else freq = 100000; return sprintf(buf, "%d\n", freq); } /* Calculate PCI frequency from the SRC M/N registers. */ static int calculate_pci_freq(struct ics932s401_data *data) { int m, n, freq; m = data->regs[ICS932S401_REG_SRC_M_CTRL] & ICS932S401_M_MASK; n = data->regs[ICS932S401_REG_SRC_N_CTRL]; /* Pull in bits 8 & 9 from the M register */ n |= ((int)data->regs[ICS932S401_REG_SRC_M_CTRL] & 0x80) << 1; n |= ((int)data->regs[ICS932S401_REG_SRC_M_CTRL] & 0x40) << 3; freq = BASE_CLOCK * (n + 8) / (m + 2); freq /= divisors[data->regs[ICS932S401_REG_PCISRC_DIVISOR] >> ICS932S401_PCI_DIVISOR_SHIFT]; return freq; } static ssize_t show_pci_clock(struct device *dev, struct device_attribute *devattr, char *buf) { struct ics932s401_data *data = ics932s401_update_device(dev); return sprintf(buf, "%d\n", calculate_pci_freq(data)); } static ssize_t show_pci_clock_sel(struct device *dev, struct device_attribute *devattr, char *buf) { struct ics932s401_data *data = ics932s401_update_device(dev); int freq; if (data->regs[ICS932S401_REG_CTRL] & ICS932S401_MN_ENABLED) freq = calculate_pci_freq(data); else freq = 33333; return sprintf(buf, "%d\n", freq); } static ssize_t show_value(struct device *dev, struct device_attribute *devattr, char *buf); static ssize_t show_spread(struct device *dev, struct device_attribute *devattr, char *buf); static DEVICE_ATTR(spread_enabled, S_IRUGO, show_spread_enabled, NULL); static DEVICE_ATTR(cpu_clock_selection, S_IRUGO, show_cpu_clock_sel, NULL); static DEVICE_ATTR(cpu_clock, S_IRUGO, show_cpu_clock, NULL); static DEVICE_ATTR(src_clock_selection, S_IRUGO, show_src_clock_sel, NULL); static DEVICE_ATTR(src_clock, S_IRUGO, show_src_clock, NULL); static DEVICE_ATTR(pci_clock_selection, S_IRUGO, show_pci_clock_sel, NULL); static DEVICE_ATTR(pci_clock, S_IRUGO, show_pci_clock, NULL); static DEVICE_ATTR(usb_clock, S_IRUGO, show_value, NULL); static DEVICE_ATTR(ref_clock, S_IRUGO, show_value, NULL); static DEVICE_ATTR(cpu_spread, S_IRUGO, show_spread, NULL); static DEVICE_ATTR(src_spread, S_IRUGO, show_spread, NULL); static struct attribute *ics932s401_attr[] = { &dev_attr_spread_enabled.attr, &dev_attr_cpu_clock_selection.attr, &dev_attr_cpu_clock.attr, &dev_attr_src_clock_selection.attr, &dev_attr_src_clock.attr, &dev_attr_pci_clock_selection.attr, &dev_attr_pci_clock.attr, &dev_attr_usb_clock.attr, &dev_attr_ref_clock.attr, &dev_attr_cpu_spread.attr, &dev_attr_src_spread.attr, NULL }; static ssize_t show_value(struct device *dev, struct device_attribute *devattr, char *buf) { int x; if (devattr == &dev_attr_usb_clock) x = 48000; else if (devattr == &dev_attr_ref_clock) x = BASE_CLOCK; else BUG(); return sprintf(buf, "%d\n", x); } static ssize_t show_spread(struct device *dev, struct device_attribute *devattr, char *buf) { struct ics932s401_data *data = ics932s401_update_device(dev); int reg; unsigned long val; if (!(data->regs[ICS932S401_REG_CFG2] & ICS932S401_CFG1_SPREAD)) return sprintf(buf, "0%%\n"); if (devattr == &dev_attr_src_spread) reg = ICS932S401_REG_SRC_SPREAD1; else if (devattr == &dev_attr_cpu_spread) reg = ICS932S401_REG_CPU_SPREAD1; else BUG(); val = data->regs[reg] | (data->regs[reg + 1] << 8); val &= ICS932S401_SPREAD_MASK; /* Scale 0..2^14 to -0.5. */ val = 500000 * val / 16384; return sprintf(buf, "-0.%lu%%\n", val); } /* Return 0 if detection is successful, -ENODEV otherwise */ static int ics932s401_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; int vendor, device, revision; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -ENODEV; vendor = i2c_smbus_read_word_data(client, ICS932S401_REG_VENDOR_REV); vendor >>= 8; revision = vendor >> ICS932S401_REV_SHIFT; vendor &= ICS932S401_VENDOR_MASK; if (vendor != ICS932S401_VENDOR) return -ENODEV; device = i2c_smbus_read_word_data(client, ICS932S401_REG_DEVICE); device >>= 8; if (device != ICS932S401_DEVICE) return -ENODEV; if (revision != ICS932S401_REV) dev_info(&adapter->dev, "Unknown revision %d\n", revision); strlcpy(info->type, "ics932s401", I2C_NAME_SIZE); return 0; } static int ics932s401_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct ics932s401_data *data; int err; data = kzalloc(sizeof(struct ics932s401_data), GFP_KERNEL); if (!data) { err = -ENOMEM; goto exit; } i2c_set_clientdata(client, data); mutex_init(&data->lock); dev_info(&client->dev, "%s chip found\n", client->name); /* Register sysfs hooks */ data->attrs.attrs = ics932s401_attr; err = sysfs_create_group(&client->dev.kobj, &data->attrs); if (err) goto exit_free; return 0; exit_free: kfree(data); exit: return err; } static int ics932s401_remove(struct i2c_client *client) { struct ics932s401_data *data = i2c_get_clientdata(client); sysfs_remove_group(&client->dev.kobj, &data->attrs); kfree(data); return 0; } module_i2c_driver(ics932s401_driver); MODULE_AUTHOR("Darrick J. Wong <darrick.wong@oracle.com>"); MODULE_DESCRIPTION("ICS932S401 driver"); MODULE_LICENSE("GPL"); /* IBM IntelliStation Z30 */ MODULE_ALIAS("dmi:bvnIBM:*:rn9228:*"); MODULE_ALIAS("dmi:bvnIBM:*:rn9232:*"); /* IBM x3650/x3550 */ MODULE_ALIAS("dmi:bvnIBM:*:pnIBMSystemx3650*"); MODULE_ALIAS("dmi:bvnIBM:*:pnIBMSystemx3550*");
gpl-2.0
tohenk/android_kernel_samsung_smdk4x12
fs/attr.c
2335
6938
/* * linux/fs/attr.c * * Copyright (C) 1991, 1992 Linus Torvalds * changes by Thomas Schoebel-Theuer */ #include <linux/module.h> #include <linux/time.h> #include <linux/mm.h> #include <linux/string.h> #include <linux/capability.h> #include <linux/fsnotify.h> #include <linux/fcntl.h> #include <linux/security.h> /** * inode_change_ok - check if attribute changes to an inode are allowed * @inode: inode to check * @attr: attributes to change * * Check if we are allowed to change the attributes contained in @attr * in the given inode. This includes the normal unix access permission * checks, as well as checks for rlimits and others. * * Should be called as the first thing in ->setattr implementations, * possibly after taking additional locks. */ int inode_change_ok(const struct inode *inode, struct iattr *attr) { unsigned int ia_valid = attr->ia_valid; /* * First check size constraints. These can't be overriden using * ATTR_FORCE. */ if (ia_valid & ATTR_SIZE) { int error = inode_newsize_ok(inode, attr->ia_size); if (error) return error; } /* If force is set do it anyway. */ if (ia_valid & ATTR_FORCE) return 0; /* Make sure a caller can chown. */ if ((ia_valid & ATTR_UID) && (current_fsuid() != inode->i_uid || attr->ia_uid != inode->i_uid) && !capable(CAP_CHOWN)) return -EPERM; /* Make sure caller can chgrp. */ if ((ia_valid & ATTR_GID) && (current_fsuid() != inode->i_uid || (!in_group_p(attr->ia_gid) && attr->ia_gid != inode->i_gid)) && !capable(CAP_CHOWN)) return -EPERM; /* Make sure a caller can chmod. */ if (ia_valid & ATTR_MODE) { if (!inode_owner_or_capable(inode)) return -EPERM; /* Also check the setgid bit! */ if (!in_group_p((ia_valid & ATTR_GID) ? attr->ia_gid : inode->i_gid) && !capable(CAP_FSETID)) attr->ia_mode &= ~S_ISGID; } /* Check for setting the inode time. */ if (ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)) { if (!inode_owner_or_capable(inode)) return -EPERM; } return 0; } EXPORT_SYMBOL(inode_change_ok); /** * inode_newsize_ok - may this inode be truncated to a given size * @inode: the inode to be truncated * @offset: the new size to assign to the inode * @Returns: 0 on success, -ve errno on failure * * inode_newsize_ok must be called with i_mutex held. * * inode_newsize_ok will check filesystem limits and ulimits to check that the * new inode size is within limits. inode_newsize_ok will also send SIGXFSZ * when necessary. Caller must not proceed with inode size change if failure is * returned. @inode must be a file (not directory), with appropriate * permissions to allow truncate (inode_newsize_ok does NOT check these * conditions). */ int inode_newsize_ok(const struct inode *inode, loff_t offset) { if (inode->i_size < offset) { unsigned long limit; limit = rlimit(RLIMIT_FSIZE); if (limit != RLIM_INFINITY && offset > limit) goto out_sig; if (offset > inode->i_sb->s_maxbytes) goto out_big; } else { /* * truncation of in-use swapfiles is disallowed - it would * cause subsequent swapout to scribble on the now-freed * blocks. */ if (IS_SWAPFILE(inode)) return -ETXTBSY; } return 0; out_sig: send_sig(SIGXFSZ, current, 0); out_big: return -EFBIG; } EXPORT_SYMBOL(inode_newsize_ok); /** * setattr_copy - copy simple metadata updates into the generic inode * @inode: the inode to be updated * @attr: the new attributes * * setattr_copy must be called with i_mutex held. * * setattr_copy updates the inode's metadata with that specified * in attr. Noticeably missing is inode size update, which is more complex * as it requires pagecache updates. * * The inode is not marked as dirty after this operation. The rationale is * that for "simple" filesystems, the struct inode is the inode storage. * The caller is free to mark the inode dirty afterwards if needed. */ void setattr_copy(struct inode *inode, const struct iattr *attr) { unsigned int ia_valid = attr->ia_valid; if (ia_valid & ATTR_UID) inode->i_uid = attr->ia_uid; if (ia_valid & ATTR_GID) inode->i_gid = attr->ia_gid; if (ia_valid & ATTR_ATIME) inode->i_atime = timespec_trunc(attr->ia_atime, inode->i_sb->s_time_gran); if (ia_valid & ATTR_MTIME) inode->i_mtime = timespec_trunc(attr->ia_mtime, inode->i_sb->s_time_gran); if (ia_valid & ATTR_CTIME) inode->i_ctime = timespec_trunc(attr->ia_ctime, inode->i_sb->s_time_gran); if (ia_valid & ATTR_MODE) { umode_t mode = attr->ia_mode; if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID)) mode &= ~S_ISGID; inode->i_mode = mode; } } EXPORT_SYMBOL(setattr_copy); int notify_change(struct dentry * dentry, struct iattr * attr) { struct inode *inode = dentry->d_inode; mode_t mode = inode->i_mode; int error; struct timespec now; unsigned int ia_valid = attr->ia_valid; if (ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID | ATTR_TIMES_SET)) { if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) return -EPERM; } if ((ia_valid & ATTR_MODE)) { mode_t amode = attr->ia_mode; /* Flag setting protected by i_mutex */ if (is_sxid(amode)) inode->i_flags &= ~S_NOSEC; } now = current_fs_time(inode->i_sb); attr->ia_ctime = now; if (!(ia_valid & ATTR_ATIME_SET)) attr->ia_atime = now; if (!(ia_valid & ATTR_MTIME_SET)) attr->ia_mtime = now; if (ia_valid & ATTR_KILL_PRIV) { attr->ia_valid &= ~ATTR_KILL_PRIV; ia_valid &= ~ATTR_KILL_PRIV; error = security_inode_need_killpriv(dentry); if (error > 0) error = security_inode_killpriv(dentry); if (error) return error; } /* * We now pass ATTR_KILL_S*ID to the lower level setattr function so * that the function has the ability to reinterpret a mode change * that's due to these bits. This adds an implicit restriction that * no function will ever call notify_change with both ATTR_MODE and * ATTR_KILL_S*ID set. */ if ((ia_valid & (ATTR_KILL_SUID|ATTR_KILL_SGID)) && (ia_valid & ATTR_MODE)) BUG(); if (ia_valid & ATTR_KILL_SUID) { if (mode & S_ISUID) { ia_valid = attr->ia_valid |= ATTR_MODE; attr->ia_mode = (inode->i_mode & ~S_ISUID); } } if (ia_valid & ATTR_KILL_SGID) { if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) { if (!(ia_valid & ATTR_MODE)) { ia_valid = attr->ia_valid |= ATTR_MODE; attr->ia_mode = inode->i_mode; } attr->ia_mode &= ~S_ISGID; } } if (!(attr->ia_valid & ~(ATTR_KILL_SUID | ATTR_KILL_SGID))) return 0; error = security_inode_setattr(dentry, attr); if (error) return error; if (ia_valid & ATTR_SIZE) down_write(&dentry->d_inode->i_alloc_sem); if (inode->i_op->setattr) error = inode->i_op->setattr(dentry, attr); else error = simple_setattr(dentry, attr); if (ia_valid & ATTR_SIZE) up_write(&dentry->d_inode->i_alloc_sem); if (!error) fsnotify_change(dentry, ia_valid); return error; } EXPORT_SYMBOL(notify_change);
gpl-2.0
jeboo/kernel_JB_ZSLS6_i777
drivers/s390/cio/qdio_thinint.c
2335
6477
/* * linux/drivers/s390/cio/thinint_qdio.c * * Copyright 2000,2009 IBM Corp. * Author(s): Utz Bacher <utz.bacher@de.ibm.com> * Cornelia Huck <cornelia.huck@de.ibm.com> * Jan Glauber <jang@linux.vnet.ibm.com> */ #include <linux/io.h> #include <linux/slab.h> #include <linux/kernel_stat.h> #include <asm/atomic.h> #include <asm/debug.h> #include <asm/qdio.h> #include <asm/airq.h> #include <asm/isc.h> #include "cio.h" #include "ioasm.h" #include "qdio.h" #include "qdio_debug.h" /* * Restriction: only 63 iqdio subchannels would have its own indicator, * after that, subsequent subchannels share one indicator */ #define TIQDIO_NR_NONSHARED_IND 63 #define TIQDIO_NR_INDICATORS (TIQDIO_NR_NONSHARED_IND + 1) /* list of thin interrupt input queues */ static LIST_HEAD(tiq_list); DEFINE_MUTEX(tiq_list_lock); /* adapter local summary indicator */ static u8 *tiqdio_alsi; struct indicator_t *q_indicators; static u64 last_ai_time; /* returns addr for the device state change indicator */ static u32 *get_indicator(void) { int i; for (i = 0; i < TIQDIO_NR_NONSHARED_IND; i++) if (!atomic_read(&q_indicators[i].count)) { atomic_set(&q_indicators[i].count, 1); return &q_indicators[i].ind; } /* use the shared indicator */ atomic_inc(&q_indicators[TIQDIO_SHARED_IND].count); return &q_indicators[TIQDIO_SHARED_IND].ind; } static void put_indicator(u32 *addr) { int i; if (!addr) return; i = ((unsigned long)addr - (unsigned long)q_indicators) / sizeof(struct indicator_t); atomic_dec(&q_indicators[i].count); } void tiqdio_add_input_queues(struct qdio_irq *irq_ptr) { struct qdio_q *q; int i; mutex_lock(&tiq_list_lock); for_each_input_queue(irq_ptr, q, i) list_add_rcu(&q->entry, &tiq_list); mutex_unlock(&tiq_list_lock); xchg(irq_ptr->dsci, 1 << 7); } void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr) { struct qdio_q *q; int i; for (i = 0; i < irq_ptr->nr_input_qs; i++) { q = irq_ptr->input_qs[i]; /* if establish triggered an error */ if (!q || !q->entry.prev || !q->entry.next) continue; mutex_lock(&tiq_list_lock); list_del_rcu(&q->entry); mutex_unlock(&tiq_list_lock); synchronize_rcu(); } } static inline u32 clear_shared_ind(void) { if (!atomic_read(&q_indicators[TIQDIO_SHARED_IND].count)) return 0; return xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0); } /** * tiqdio_thinint_handler - thin interrupt handler for qdio * @alsi: pointer to adapter local summary indicator * @data: NULL */ static void tiqdio_thinint_handler(void *alsi, void *data) { u32 si_used = clear_shared_ind(); struct qdio_q *q; last_ai_time = S390_lowcore.int_clock; kstat_cpu(smp_processor_id()).irqs[IOINT_QAI]++; /* protect tiq_list entries, only changed in activate or shutdown */ rcu_read_lock(); /* check for work on all inbound thinint queues */ list_for_each_entry_rcu(q, &tiq_list, entry) { /* only process queues from changed sets */ if (unlikely(shared_ind(q->irq_ptr->dsci))) { if (!si_used) continue; } else if (!*q->irq_ptr->dsci) continue; if (q->u.in.queue_start_poll) { /* skip if polling is enabled or already in work */ if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state)) { qperf_inc(q, int_discarded); continue; } /* avoid dsci clear here, done after processing */ q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr, q->irq_ptr->int_parm); } else { /* only clear it if the indicator is non-shared */ if (!shared_ind(q->irq_ptr->dsci)) xchg(q->irq_ptr->dsci, 0); /* * Call inbound processing but not directly * since that could starve other thinint queues. */ tasklet_schedule(&q->tasklet); } qperf_inc(q, adapter_int); } rcu_read_unlock(); } static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset) { struct scssc_area *scssc_area; int rc; scssc_area = (struct scssc_area *)irq_ptr->chsc_page; memset(scssc_area, 0, PAGE_SIZE); if (reset) { scssc_area->summary_indicator_addr = 0; scssc_area->subchannel_indicator_addr = 0; } else { scssc_area->summary_indicator_addr = virt_to_phys(tiqdio_alsi); scssc_area->subchannel_indicator_addr = virt_to_phys(irq_ptr->dsci); } scssc_area->request = (struct chsc_header) { .length = 0x0fe0, .code = 0x0021, }; scssc_area->operation_code = 0; scssc_area->ks = PAGE_DEFAULT_KEY >> 4; scssc_area->kc = PAGE_DEFAULT_KEY >> 4; scssc_area->isc = QDIO_AIRQ_ISC; scssc_area->schid = irq_ptr->schid; /* enable the time delay disablement facility */ if (css_general_characteristics.aif_tdd) scssc_area->word_with_d_bit = 0x10000000; rc = chsc(scssc_area); if (rc) return -EIO; rc = chsc_error_from_response(scssc_area->response.code); if (rc) { DBF_ERROR("%4x SSI r:%4x", irq_ptr->schid.sch_no, scssc_area->response.code); DBF_ERROR_HEX(&scssc_area->response, sizeof(void *)); return rc; } DBF_EVENT("setscind"); DBF_HEX(&scssc_area->summary_indicator_addr, sizeof(unsigned long)); DBF_HEX(&scssc_area->subchannel_indicator_addr, sizeof(unsigned long)); return 0; } /* allocate non-shared indicators and shared indicator */ int __init tiqdio_allocate_memory(void) { q_indicators = kzalloc(sizeof(struct indicator_t) * TIQDIO_NR_INDICATORS, GFP_KERNEL); if (!q_indicators) return -ENOMEM; return 0; } void tiqdio_free_memory(void) { kfree(q_indicators); } int __init tiqdio_register_thinints(void) { isc_register(QDIO_AIRQ_ISC); tiqdio_alsi = s390_register_adapter_interrupt(&tiqdio_thinint_handler, NULL, QDIO_AIRQ_ISC); if (IS_ERR(tiqdio_alsi)) { DBF_EVENT("RTI:%lx", PTR_ERR(tiqdio_alsi)); tiqdio_alsi = NULL; isc_unregister(QDIO_AIRQ_ISC); return -ENOMEM; } return 0; } int qdio_establish_thinint(struct qdio_irq *irq_ptr) { if (!is_thinint_irq(irq_ptr)) return 0; return set_subchannel_ind(irq_ptr, 0); } void qdio_setup_thinint(struct qdio_irq *irq_ptr) { if (!is_thinint_irq(irq_ptr)) return; irq_ptr->dsci = get_indicator(); DBF_HEX(&irq_ptr->dsci, sizeof(void *)); } void qdio_shutdown_thinint(struct qdio_irq *irq_ptr) { if (!is_thinint_irq(irq_ptr)) return; /* reset adapter interrupt indicators */ set_subchannel_ind(irq_ptr, 1); put_indicator(irq_ptr->dsci); } void __exit tiqdio_unregister_thinints(void) { WARN_ON(!list_empty(&tiq_list)); if (tiqdio_alsi) { s390_unregister_adapter_interrupt(tiqdio_alsi, QDIO_AIRQ_ISC); isc_unregister(QDIO_AIRQ_ISC); } }
gpl-2.0
cipriancraciun/linux
lib/digsig.c
3359
5831
/* * Copyright (C) 2011 Nokia Corporation * Copyright (C) 2011 Intel Corporation * * Author: * Dmitry Kasatkin <dmitry.kasatkin@nokia.com> * <dmitry.kasatkin@intel.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, version 2 of the License. * * File: sign.c * implements signature (RSA) verification * pkcs decoding is based on LibTomCrypt code */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/err.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/key.h> #include <linux/crypto.h> #include <crypto/hash.h> #include <crypto/sha.h> #include <keys/user-type.h> #include <linux/mpi.h> #include <linux/digsig.h> static struct crypto_shash *shash; static int pkcs_1_v1_5_decode_emsa(const unsigned char *msg, unsigned long msglen, unsigned long modulus_bitlen, unsigned char *out, unsigned long *outlen) { unsigned long modulus_len, ps_len, i; modulus_len = (modulus_bitlen >> 3) + (modulus_bitlen & 7 ? 1 : 0); /* test message size */ if ((msglen > modulus_len) || (modulus_len < 11)) return -EINVAL; /* separate encoded message */ if ((msg[0] != 0x00) || (msg[1] != (unsigned char)1)) return -EINVAL; for (i = 2; i < modulus_len - 1; i++) if (msg[i] != 0xFF) break; /* separator check */ if (msg[i] != 0) /* There was no octet with hexadecimal value 0x00 to separate ps from m. */ return -EINVAL; ps_len = i - 2; if (*outlen < (msglen - (2 + ps_len + 1))) { *outlen = msglen - (2 + ps_len + 1); return -EOVERFLOW; } *outlen = (msglen - (2 + ps_len + 1)); memcpy(out, &msg[2 + ps_len + 1], *outlen); return 0; } /* * RSA Signature verification with public key */ static int digsig_verify_rsa(struct key *key, const char *sig, int siglen, const char *h, int hlen) { int err = -EINVAL; unsigned long len; unsigned long mlen, mblen; unsigned nret, l; int head, i; unsigned char *out1 = NULL, *out2 = NULL; MPI in = NULL, res = NULL, pkey[2]; uint8_t *p, *datap, *endp; struct user_key_payload *ukp; struct pubkey_hdr *pkh; down_read(&key->sem); ukp = key->payload.data; if (ukp->datalen < sizeof(*pkh)) goto err1; pkh = (struct pubkey_hdr *)ukp->data; if (pkh->version != 1) goto err1; if (pkh->algo != PUBKEY_ALGO_RSA) goto err1; if (pkh->nmpi != 2) goto err1; datap = pkh->mpi; endp = ukp->data + ukp->datalen; err = -ENOMEM; for (i = 0; i < pkh->nmpi; i++) { unsigned int remaining = endp - datap; pkey[i] = mpi_read_from_buffer(datap, &remaining); if (!pkey[i]) goto err; datap += remaining; } mblen = mpi_get_nbits(pkey[0]); mlen = (mblen + 7)/8; if (mlen == 0) goto err; out1 = kzalloc(mlen, GFP_KERNEL); if (!out1) goto err; out2 = kzalloc(mlen, GFP_KERNEL); if (!out2) goto err; nret = siglen; in = mpi_read_from_buffer(sig, &nret); if (!in) goto err; res = mpi_alloc(mpi_get_nlimbs(in) * 2); if (!res) goto err; err = mpi_powm(res, in, pkey[1], pkey[0]); if (err) goto err; if (mpi_get_nlimbs(res) * BYTES_PER_MPI_LIMB > mlen) { err = -EINVAL; goto err; } p = mpi_get_buffer(res, &l, NULL); if (!p) { err = -EINVAL; goto err; } len = mlen; head = len - l; memset(out1, 0, head); memcpy(out1 + head, p, l); err = pkcs_1_v1_5_decode_emsa(out1, len, mblen, out2, &len); if (!err && len == hlen) err = memcmp(out2, h, hlen); err: mpi_free(in); mpi_free(res); kfree(out1); kfree(out2); while (--i >= 0) mpi_free(pkey[i]); err1: up_read(&key->sem); return err; } /** * digsig_verify() - digital signature verification with public key * @keyring: keyring to search key in * @sig: digital signature * @sigen: length of the signature * @data: data * @datalen: length of the data * @return: 0 on success, -EINVAL otherwise * * Verifies data integrity against digital signature. * Currently only RSA is supported. * Normally hash of the content is used as a data for this function. * */ int digsig_verify(struct key *keyring, const char *sig, int siglen, const char *data, int datalen) { int err = -ENOMEM; struct signature_hdr *sh = (struct signature_hdr *)sig; struct shash_desc *desc = NULL; unsigned char hash[SHA1_DIGEST_SIZE]; struct key *key; char name[20]; if (siglen < sizeof(*sh) + 2) return -EINVAL; if (sh->algo != PUBKEY_ALGO_RSA) return -ENOTSUPP; sprintf(name, "%llX", __be64_to_cpup((uint64_t *)sh->keyid)); if (keyring) { /* search in specific keyring */ key_ref_t kref; kref = keyring_search(make_key_ref(keyring, 1UL), &key_type_user, name); if (IS_ERR(kref)) key = ERR_PTR(PTR_ERR(kref)); else key = key_ref_to_ptr(kref); } else { key = request_key(&key_type_user, name, NULL); } if (IS_ERR(key)) { pr_err("key not found, id: %s\n", name); return PTR_ERR(key); } desc = kzalloc(sizeof(*desc) + crypto_shash_descsize(shash), GFP_KERNEL); if (!desc) goto err; desc->tfm = shash; desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; crypto_shash_init(desc); crypto_shash_update(desc, data, datalen); crypto_shash_update(desc, sig, sizeof(*sh)); crypto_shash_final(desc, hash); kfree(desc); /* pass signature mpis address */ err = digsig_verify_rsa(key, sig + sizeof(*sh), siglen - sizeof(*sh), hash, sizeof(hash)); err: key_put(key); return err ? -EINVAL : 0; } EXPORT_SYMBOL_GPL(digsig_verify); static int __init digsig_init(void) { shash = crypto_alloc_shash("sha1", 0, 0); if (IS_ERR(shash)) { pr_err("shash allocation failed\n"); return PTR_ERR(shash); } return 0; } static void __exit digsig_cleanup(void) { crypto_free_shash(shash); } module_init(digsig_init); module_exit(digsig_cleanup); MODULE_LICENSE("GPL");
gpl-2.0
D2005-devs/cafkernel2-old
net/ceph/osd_client.c
3359
57236
#include <linux/ceph/ceph_debug.h> #include <linux/module.h> #include <linux/err.h> #include <linux/highmem.h> #include <linux/mm.h> #include <linux/pagemap.h> #include <linux/slab.h> #include <linux/uaccess.h> #ifdef CONFIG_BLOCK #include <linux/bio.h> #endif #include <linux/ceph/libceph.h> #include <linux/ceph/osd_client.h> #include <linux/ceph/messenger.h> #include <linux/ceph/decode.h> #include <linux/ceph/auth.h> #include <linux/ceph/pagelist.h> #define OSD_OP_FRONT_LEN 4096 #define OSD_OPREPLY_FRONT_LEN 512 static const struct ceph_connection_operations osd_con_ops; static void send_queued(struct ceph_osd_client *osdc); static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd); static void __register_request(struct ceph_osd_client *osdc, struct ceph_osd_request *req); static void __unregister_linger_request(struct ceph_osd_client *osdc, struct ceph_osd_request *req); static void __send_request(struct ceph_osd_client *osdc, struct ceph_osd_request *req); static int op_needs_trail(int op) { switch (op) { case CEPH_OSD_OP_GETXATTR: case CEPH_OSD_OP_SETXATTR: case CEPH_OSD_OP_CMPXATTR: case CEPH_OSD_OP_CALL: case CEPH_OSD_OP_NOTIFY: return 1; default: return 0; } } static int op_has_extent(int op) { return (op == CEPH_OSD_OP_READ || op == CEPH_OSD_OP_WRITE); } void ceph_calc_raw_layout(struct ceph_osd_client *osdc, struct ceph_file_layout *layout, u64 snapid, u64 off, u64 *plen, u64 *bno, struct ceph_osd_request *req, struct ceph_osd_req_op *op) { struct ceph_osd_request_head *reqhead = req->r_request->front.iov_base; u64 orig_len = *plen; u64 objoff, objlen; /* extent in object */ reqhead->snapid = cpu_to_le64(snapid); /* object extent? */ ceph_calc_file_object_mapping(layout, off, plen, bno, &objoff, &objlen); if (*plen < orig_len) dout(" skipping last %llu, final file extent %llu~%llu\n", orig_len - *plen, off, *plen); if (op_has_extent(op->op)) { op->extent.offset = objoff; op->extent.length = objlen; } req->r_num_pages = calc_pages_for(off, *plen); req->r_page_alignment = off & ~PAGE_MASK; if (op->op == CEPH_OSD_OP_WRITE) op->payload_len = *plen; dout("calc_layout bno=%llx %llu~%llu (%d pages)\n", *bno, objoff, objlen, req->r_num_pages); } EXPORT_SYMBOL(ceph_calc_raw_layout); /* * Implement client access to distributed object storage cluster. * * All data objects are stored within a cluster/cloud of OSDs, or * "object storage devices." (Note that Ceph OSDs have _nothing_ to * do with the T10 OSD extensions to SCSI.) Ceph OSDs are simply * remote daemons serving up and coordinating consistent and safe * access to storage. * * Cluster membership and the mapping of data objects onto storage devices * are described by the osd map. * * We keep track of pending OSD requests (read, write), resubmit * requests to different OSDs when the cluster topology/data layout * change, or retry the affected requests when the communications * channel with an OSD is reset. */ /* * calculate the mapping of a file extent onto an object, and fill out the * request accordingly. shorten extent as necessary if it crosses an * object boundary. * * fill osd op in request message. */ static void calc_layout(struct ceph_osd_client *osdc, struct ceph_vino vino, struct ceph_file_layout *layout, u64 off, u64 *plen, struct ceph_osd_request *req, struct ceph_osd_req_op *op) { u64 bno; ceph_calc_raw_layout(osdc, layout, vino.snap, off, plen, &bno, req, op); snprintf(req->r_oid, sizeof(req->r_oid), "%llx.%08llx", vino.ino, bno); req->r_oid_len = strlen(req->r_oid); } /* * requests */ void ceph_osdc_release_request(struct kref *kref) { struct ceph_osd_request *req = container_of(kref, struct ceph_osd_request, r_kref); if (req->r_request) ceph_msg_put(req->r_request); if (req->r_reply) ceph_msg_put(req->r_reply); if (req->r_con_filling_msg) { dout("release_request revoking pages %p from con %p\n", req->r_pages, req->r_con_filling_msg); ceph_con_revoke_message(req->r_con_filling_msg, req->r_reply); ceph_con_put(req->r_con_filling_msg); } if (req->r_own_pages) ceph_release_page_vector(req->r_pages, req->r_num_pages); #ifdef CONFIG_BLOCK if (req->r_bio) bio_put(req->r_bio); #endif ceph_put_snap_context(req->r_snapc); if (req->r_trail) { ceph_pagelist_release(req->r_trail); kfree(req->r_trail); } if (req->r_mempool) mempool_free(req, req->r_osdc->req_mempool); else kfree(req); } EXPORT_SYMBOL(ceph_osdc_release_request); static int get_num_ops(struct ceph_osd_req_op *ops, int *needs_trail) { int i = 0; if (needs_trail) *needs_trail = 0; while (ops[i].op) { if (needs_trail && op_needs_trail(ops[i].op)) *needs_trail = 1; i++; } return i; } struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, int flags, struct ceph_snap_context *snapc, struct ceph_osd_req_op *ops, bool use_mempool, gfp_t gfp_flags, struct page **pages, struct bio *bio) { struct ceph_osd_request *req; struct ceph_msg *msg; int needs_trail; int num_op = get_num_ops(ops, &needs_trail); size_t msg_size = sizeof(struct ceph_osd_request_head); msg_size += num_op*sizeof(struct ceph_osd_op); if (use_mempool) { req = mempool_alloc(osdc->req_mempool, gfp_flags); memset(req, 0, sizeof(*req)); } else { req = kzalloc(sizeof(*req), gfp_flags); } if (req == NULL) return NULL; req->r_osdc = osdc; req->r_mempool = use_mempool; kref_init(&req->r_kref); init_completion(&req->r_completion); init_completion(&req->r_safe_completion); INIT_LIST_HEAD(&req->r_unsafe_item); INIT_LIST_HEAD(&req->r_linger_item); INIT_LIST_HEAD(&req->r_linger_osd); INIT_LIST_HEAD(&req->r_req_lru_item); req->r_flags = flags; WARN_ON((flags & (CEPH_OSD_FLAG_READ|CEPH_OSD_FLAG_WRITE)) == 0); /* create reply message */ if (use_mempool) msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0); else msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, OSD_OPREPLY_FRONT_LEN, gfp_flags, true); if (!msg) { ceph_osdc_put_request(req); return NULL; } req->r_reply = msg; /* allocate space for the trailing data */ if (needs_trail) { req->r_trail = kmalloc(sizeof(struct ceph_pagelist), gfp_flags); if (!req->r_trail) { ceph_osdc_put_request(req); return NULL; } ceph_pagelist_init(req->r_trail); } /* create request message; allow space for oid */ msg_size += MAX_OBJ_NAME_SIZE; if (snapc) msg_size += sizeof(u64) * snapc->num_snaps; if (use_mempool) msg = ceph_msgpool_get(&osdc->msgpool_op, 0); else msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, gfp_flags, true); if (!msg) { ceph_osdc_put_request(req); return NULL; } msg->hdr.type = cpu_to_le16(CEPH_MSG_OSD_OP); memset(msg->front.iov_base, 0, msg->front.iov_len); req->r_request = msg; req->r_pages = pages; #ifdef CONFIG_BLOCK if (bio) { req->r_bio = bio; bio_get(req->r_bio); } #endif return req; } EXPORT_SYMBOL(ceph_osdc_alloc_request); static void osd_req_encode_op(struct ceph_osd_request *req, struct ceph_osd_op *dst, struct ceph_osd_req_op *src) { dst->op = cpu_to_le16(src->op); switch (dst->op) { case CEPH_OSD_OP_READ: case CEPH_OSD_OP_WRITE: dst->extent.offset = cpu_to_le64(src->extent.offset); dst->extent.length = cpu_to_le64(src->extent.length); dst->extent.truncate_size = cpu_to_le64(src->extent.truncate_size); dst->extent.truncate_seq = cpu_to_le32(src->extent.truncate_seq); break; case CEPH_OSD_OP_GETXATTR: case CEPH_OSD_OP_SETXATTR: case CEPH_OSD_OP_CMPXATTR: BUG_ON(!req->r_trail); dst->xattr.name_len = cpu_to_le32(src->xattr.name_len); dst->xattr.value_len = cpu_to_le32(src->xattr.value_len); dst->xattr.cmp_op = src->xattr.cmp_op; dst->xattr.cmp_mode = src->xattr.cmp_mode; ceph_pagelist_append(req->r_trail, src->xattr.name, src->xattr.name_len); ceph_pagelist_append(req->r_trail, src->xattr.val, src->xattr.value_len); break; case CEPH_OSD_OP_CALL: BUG_ON(!req->r_trail); dst->cls.class_len = src->cls.class_len; dst->cls.method_len = src->cls.method_len; dst->cls.indata_len = cpu_to_le32(src->cls.indata_len); ceph_pagelist_append(req->r_trail, src->cls.class_name, src->cls.class_len); ceph_pagelist_append(req->r_trail, src->cls.method_name, src->cls.method_len); ceph_pagelist_append(req->r_trail, src->cls.indata, src->cls.indata_len); break; case CEPH_OSD_OP_ROLLBACK: dst->snap.snapid = cpu_to_le64(src->snap.snapid); break; case CEPH_OSD_OP_STARTSYNC: break; case CEPH_OSD_OP_NOTIFY: { __le32 prot_ver = cpu_to_le32(src->watch.prot_ver); __le32 timeout = cpu_to_le32(src->watch.timeout); BUG_ON(!req->r_trail); ceph_pagelist_append(req->r_trail, &prot_ver, sizeof(prot_ver)); ceph_pagelist_append(req->r_trail, &timeout, sizeof(timeout)); } case CEPH_OSD_OP_NOTIFY_ACK: case CEPH_OSD_OP_WATCH: dst->watch.cookie = cpu_to_le64(src->watch.cookie); dst->watch.ver = cpu_to_le64(src->watch.ver); dst->watch.flag = src->watch.flag; break; default: pr_err("unrecognized osd opcode %d\n", dst->op); WARN_ON(1); break; } dst->payload_len = cpu_to_le32(src->payload_len); } /* * build new request AND message * */ void ceph_osdc_build_request(struct ceph_osd_request *req, u64 off, u64 *plen, struct ceph_osd_req_op *src_ops, struct ceph_snap_context *snapc, struct timespec *mtime, const char *oid, int oid_len) { struct ceph_msg *msg = req->r_request; struct ceph_osd_request_head *head; struct ceph_osd_req_op *src_op; struct ceph_osd_op *op; void *p; int num_op = get_num_ops(src_ops, NULL); size_t msg_size = sizeof(*head) + num_op*sizeof(*op); int flags = req->r_flags; u64 data_len = 0; int i; head = msg->front.iov_base; op = (void *)(head + 1); p = (void *)(op + num_op); req->r_snapc = ceph_get_snap_context(snapc); head->client_inc = cpu_to_le32(1); /* always, for now. */ head->flags = cpu_to_le32(flags); if (flags & CEPH_OSD_FLAG_WRITE) ceph_encode_timespec(&head->mtime, mtime); head->num_ops = cpu_to_le16(num_op); /* fill in oid */ head->object_len = cpu_to_le32(oid_len); memcpy(p, oid, oid_len); p += oid_len; src_op = src_ops; while (src_op->op) { osd_req_encode_op(req, op, src_op); src_op++; op++; } if (req->r_trail) data_len += req->r_trail->length; if (snapc) { head->snap_seq = cpu_to_le64(snapc->seq); head->num_snaps = cpu_to_le32(snapc->num_snaps); for (i = 0; i < snapc->num_snaps; i++) { put_unaligned_le64(snapc->snaps[i], p); p += sizeof(u64); } } if (flags & CEPH_OSD_FLAG_WRITE) { req->r_request->hdr.data_off = cpu_to_le16(off); req->r_request->hdr.data_len = cpu_to_le32(*plen + data_len); } else if (data_len) { req->r_request->hdr.data_off = 0; req->r_request->hdr.data_len = cpu_to_le32(data_len); } req->r_request->page_alignment = req->r_page_alignment; BUG_ON(p > msg->front.iov_base + msg->front.iov_len); msg_size = p - msg->front.iov_base; msg->front.iov_len = msg_size; msg->hdr.front_len = cpu_to_le32(msg_size); return; } EXPORT_SYMBOL(ceph_osdc_build_request); /* * build new request AND message, calculate layout, and adjust file * extent as needed. * * if the file was recently truncated, we include information about its * old and new size so that the object can be updated appropriately. (we * avoid synchronously deleting truncated objects because it's slow.) * * if @do_sync, include a 'startsync' command so that the osd will flush * data quickly. */ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, struct ceph_file_layout *layout, struct ceph_vino vino, u64 off, u64 *plen, int opcode, int flags, struct ceph_snap_context *snapc, int do_sync, u32 truncate_seq, u64 truncate_size, struct timespec *mtime, bool use_mempool, int num_reply, int page_align) { struct ceph_osd_req_op ops[3]; struct ceph_osd_request *req; ops[0].op = opcode; ops[0].extent.truncate_seq = truncate_seq; ops[0].extent.truncate_size = truncate_size; ops[0].payload_len = 0; if (do_sync) { ops[1].op = CEPH_OSD_OP_STARTSYNC; ops[1].payload_len = 0; ops[2].op = 0; } else ops[1].op = 0; req = ceph_osdc_alloc_request(osdc, flags, snapc, ops, use_mempool, GFP_NOFS, NULL, NULL); if (!req) return NULL; /* calculate max write size */ calc_layout(osdc, vino, layout, off, plen, req, ops); req->r_file_layout = *layout; /* keep a copy */ /* in case it differs from natural (file) alignment that calc_layout filled in for us */ req->r_num_pages = calc_pages_for(page_align, *plen); req->r_page_alignment = page_align; ceph_osdc_build_request(req, off, plen, ops, snapc, mtime, req->r_oid, req->r_oid_len); return req; } EXPORT_SYMBOL(ceph_osdc_new_request); /* * We keep osd requests in an rbtree, sorted by ->r_tid. */ static void __insert_request(struct ceph_osd_client *osdc, struct ceph_osd_request *new) { struct rb_node **p = &osdc->requests.rb_node; struct rb_node *parent = NULL; struct ceph_osd_request *req = NULL; while (*p) { parent = *p; req = rb_entry(parent, struct ceph_osd_request, r_node); if (new->r_tid < req->r_tid) p = &(*p)->rb_left; else if (new->r_tid > req->r_tid) p = &(*p)->rb_right; else BUG(); } rb_link_node(&new->r_node, parent, p); rb_insert_color(&new->r_node, &osdc->requests); } static struct ceph_osd_request *__lookup_request(struct ceph_osd_client *osdc, u64 tid) { struct ceph_osd_request *req; struct rb_node *n = osdc->requests.rb_node; while (n) { req = rb_entry(n, struct ceph_osd_request, r_node); if (tid < req->r_tid) n = n->rb_left; else if (tid > req->r_tid) n = n->rb_right; else return req; } return NULL; } static struct ceph_osd_request * __lookup_request_ge(struct ceph_osd_client *osdc, u64 tid) { struct ceph_osd_request *req; struct rb_node *n = osdc->requests.rb_node; while (n) { req = rb_entry(n, struct ceph_osd_request, r_node); if (tid < req->r_tid) { if (!n->rb_left) return req; n = n->rb_left; } else if (tid > req->r_tid) { n = n->rb_right; } else { return req; } } return NULL; } /* * Resubmit requests pending on the given osd. */ static void __kick_osd_requests(struct ceph_osd_client *osdc, struct ceph_osd *osd) { struct ceph_osd_request *req, *nreq; int err; dout("__kick_osd_requests osd%d\n", osd->o_osd); err = __reset_osd(osdc, osd); if (err == -EAGAIN) return; list_for_each_entry(req, &osd->o_requests, r_osd_item) { list_move(&req->r_req_lru_item, &osdc->req_unsent); dout("requeued %p tid %llu osd%d\n", req, req->r_tid, osd->o_osd); if (!req->r_linger) req->r_flags |= CEPH_OSD_FLAG_RETRY; } list_for_each_entry_safe(req, nreq, &osd->o_linger_requests, r_linger_osd) { /* * reregister request prior to unregistering linger so * that r_osd is preserved. */ BUG_ON(!list_empty(&req->r_req_lru_item)); __register_request(osdc, req); list_add(&req->r_req_lru_item, &osdc->req_unsent); list_add(&req->r_osd_item, &req->r_osd->o_requests); __unregister_linger_request(osdc, req); dout("requeued lingering %p tid %llu osd%d\n", req, req->r_tid, osd->o_osd); } } static void kick_osd_requests(struct ceph_osd_client *osdc, struct ceph_osd *kickosd) { mutex_lock(&osdc->request_mutex); __kick_osd_requests(osdc, kickosd); mutex_unlock(&osdc->request_mutex); } /* * If the osd connection drops, we need to resubmit all requests. */ static void osd_reset(struct ceph_connection *con) { struct ceph_osd *osd = con->private; struct ceph_osd_client *osdc; if (!osd) return; dout("osd_reset osd%d\n", osd->o_osd); osdc = osd->o_osdc; down_read(&osdc->map_sem); kick_osd_requests(osdc, osd); send_queued(osdc); up_read(&osdc->map_sem); } /* * Track open sessions with osds. */ static struct ceph_osd *create_osd(struct ceph_osd_client *osdc) { struct ceph_osd *osd; osd = kzalloc(sizeof(*osd), GFP_NOFS); if (!osd) return NULL; atomic_set(&osd->o_ref, 1); osd->o_osdc = osdc; INIT_LIST_HEAD(&osd->o_requests); INIT_LIST_HEAD(&osd->o_linger_requests); INIT_LIST_HEAD(&osd->o_osd_lru); osd->o_incarnation = 1; ceph_con_init(osdc->client->msgr, &osd->o_con); osd->o_con.private = osd; osd->o_con.ops = &osd_con_ops; osd->o_con.peer_name.type = CEPH_ENTITY_TYPE_OSD; INIT_LIST_HEAD(&osd->o_keepalive_item); return osd; } static struct ceph_osd *get_osd(struct ceph_osd *osd) { if (atomic_inc_not_zero(&osd->o_ref)) { dout("get_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref)-1, atomic_read(&osd->o_ref)); return osd; } else { dout("get_osd %p FAIL\n", osd); return NULL; } } static void put_osd(struct ceph_osd *osd) { dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref), atomic_read(&osd->o_ref) - 1); if (atomic_dec_and_test(&osd->o_ref)) { struct ceph_auth_client *ac = osd->o_osdc->client->monc.auth; if (osd->o_authorizer) ac->ops->destroy_authorizer(ac, osd->o_authorizer); kfree(osd); } } /* * remove an osd from our map */ static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd) { dout("__remove_osd %p\n", osd); BUG_ON(!list_empty(&osd->o_requests)); rb_erase(&osd->o_node, &osdc->osds); list_del_init(&osd->o_osd_lru); ceph_con_close(&osd->o_con); put_osd(osd); } static void remove_all_osds(struct ceph_osd_client *osdc) { dout("__remove_old_osds %p\n", osdc); mutex_lock(&osdc->request_mutex); while (!RB_EMPTY_ROOT(&osdc->osds)) { struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds), struct ceph_osd, o_node); __remove_osd(osdc, osd); } mutex_unlock(&osdc->request_mutex); } static void __move_osd_to_lru(struct ceph_osd_client *osdc, struct ceph_osd *osd) { dout("__move_osd_to_lru %p\n", osd); BUG_ON(!list_empty(&osd->o_osd_lru)); list_add_tail(&osd->o_osd_lru, &osdc->osd_lru); osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl * HZ; } static void __remove_osd_from_lru(struct ceph_osd *osd) { dout("__remove_osd_from_lru %p\n", osd); if (!list_empty(&osd->o_osd_lru)) list_del_init(&osd->o_osd_lru); } static void remove_old_osds(struct ceph_osd_client *osdc) { struct ceph_osd *osd, *nosd; dout("__remove_old_osds %p\n", osdc); mutex_lock(&osdc->request_mutex); list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) { if (time_before(jiffies, osd->lru_ttl)) break; __remove_osd(osdc, osd); } mutex_unlock(&osdc->request_mutex); } /* * reset osd connect */ static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd) { struct ceph_osd_request *req; int ret = 0; dout("__reset_osd %p osd%d\n", osd, osd->o_osd); if (list_empty(&osd->o_requests) && list_empty(&osd->o_linger_requests)) { __remove_osd(osdc, osd); } else if (memcmp(&osdc->osdmap->osd_addr[osd->o_osd], &osd->o_con.peer_addr, sizeof(osd->o_con.peer_addr)) == 0 && !ceph_con_opened(&osd->o_con)) { dout(" osd addr hasn't changed and connection never opened," " letting msgr retry"); /* touch each r_stamp for handle_timeout()'s benfit */ list_for_each_entry(req, &osd->o_requests, r_osd_item) req->r_stamp = jiffies; ret = -EAGAIN; } else { ceph_con_close(&osd->o_con); ceph_con_open(&osd->o_con, &osdc->osdmap->osd_addr[osd->o_osd]); osd->o_incarnation++; } return ret; } static void __insert_osd(struct ceph_osd_client *osdc, struct ceph_osd *new) { struct rb_node **p = &osdc->osds.rb_node; struct rb_node *parent = NULL; struct ceph_osd *osd = NULL; dout("__insert_osd %p osd%d\n", new, new->o_osd); while (*p) { parent = *p; osd = rb_entry(parent, struct ceph_osd, o_node); if (new->o_osd < osd->o_osd) p = &(*p)->rb_left; else if (new->o_osd > osd->o_osd) p = &(*p)->rb_right; else BUG(); } rb_link_node(&new->o_node, parent, p); rb_insert_color(&new->o_node, &osdc->osds); } static struct ceph_osd *__lookup_osd(struct ceph_osd_client *osdc, int o) { struct ceph_osd *osd; struct rb_node *n = osdc->osds.rb_node; while (n) { osd = rb_entry(n, struct ceph_osd, o_node); if (o < osd->o_osd) n = n->rb_left; else if (o > osd->o_osd) n = n->rb_right; else return osd; } return NULL; } static void __schedule_osd_timeout(struct ceph_osd_client *osdc) { schedule_delayed_work(&osdc->timeout_work, osdc->client->options->osd_keepalive_timeout * HZ); } static void __cancel_osd_timeout(struct ceph_osd_client *osdc) { cancel_delayed_work(&osdc->timeout_work); } /* * Register request, assign tid. If this is the first request, set up * the timeout event. */ static void __register_request(struct ceph_osd_client *osdc, struct ceph_osd_request *req) { req->r_tid = ++osdc->last_tid; req->r_request->hdr.tid = cpu_to_le64(req->r_tid); dout("__register_request %p tid %lld\n", req, req->r_tid); __insert_request(osdc, req); ceph_osdc_get_request(req); osdc->num_requests++; if (osdc->num_requests == 1) { dout(" first request, scheduling timeout\n"); __schedule_osd_timeout(osdc); } } static void register_request(struct ceph_osd_client *osdc, struct ceph_osd_request *req) { mutex_lock(&osdc->request_mutex); __register_request(osdc, req); mutex_unlock(&osdc->request_mutex); } /* * called under osdc->request_mutex */ static void __unregister_request(struct ceph_osd_client *osdc, struct ceph_osd_request *req) { dout("__unregister_request %p tid %lld\n", req, req->r_tid); rb_erase(&req->r_node, &osdc->requests); osdc->num_requests--; if (req->r_osd) { /* make sure the original request isn't in flight. */ ceph_con_revoke(&req->r_osd->o_con, req->r_request); list_del_init(&req->r_osd_item); if (list_empty(&req->r_osd->o_requests) && list_empty(&req->r_osd->o_linger_requests)) { dout("moving osd to %p lru\n", req->r_osd); __move_osd_to_lru(osdc, req->r_osd); } if (list_empty(&req->r_linger_item)) req->r_osd = NULL; } ceph_osdc_put_request(req); list_del_init(&req->r_req_lru_item); if (osdc->num_requests == 0) { dout(" no requests, canceling timeout\n"); __cancel_osd_timeout(osdc); } } /* * Cancel a previously queued request message */ static void __cancel_request(struct ceph_osd_request *req) { if (req->r_sent && req->r_osd) { ceph_con_revoke(&req->r_osd->o_con, req->r_request); req->r_sent = 0; } } static void __register_linger_request(struct ceph_osd_client *osdc, struct ceph_osd_request *req) { dout("__register_linger_request %p\n", req); list_add_tail(&req->r_linger_item, &osdc->req_linger); list_add_tail(&req->r_linger_osd, &req->r_osd->o_linger_requests); } static void __unregister_linger_request(struct ceph_osd_client *osdc, struct ceph_osd_request *req) { dout("__unregister_linger_request %p\n", req); if (req->r_osd) { list_del_init(&req->r_linger_item); list_del_init(&req->r_linger_osd); if (list_empty(&req->r_osd->o_requests) && list_empty(&req->r_osd->o_linger_requests)) { dout("moving osd to %p lru\n", req->r_osd); __move_osd_to_lru(osdc, req->r_osd); } if (list_empty(&req->r_osd_item)) req->r_osd = NULL; } } void ceph_osdc_unregister_linger_request(struct ceph_osd_client *osdc, struct ceph_osd_request *req) { mutex_lock(&osdc->request_mutex); if (req->r_linger) { __unregister_linger_request(osdc, req); ceph_osdc_put_request(req); } mutex_unlock(&osdc->request_mutex); } EXPORT_SYMBOL(ceph_osdc_unregister_linger_request); void ceph_osdc_set_request_linger(struct ceph_osd_client *osdc, struct ceph_osd_request *req) { if (!req->r_linger) { dout("set_request_linger %p\n", req); req->r_linger = 1; /* * caller is now responsible for calling * unregister_linger_request */ ceph_osdc_get_request(req); } } EXPORT_SYMBOL(ceph_osdc_set_request_linger); /* * Pick an osd (the first 'up' osd in the pg), allocate the osd struct * (as needed), and set the request r_osd appropriately. If there is * no up osd, set r_osd to NULL. Move the request to the appropriate list * (unsent, homeless) or leave on in-flight lru. * * Return 0 if unchanged, 1 if changed, or negative on error. * * Caller should hold map_sem for read and request_mutex. */ static int __map_request(struct ceph_osd_client *osdc, struct ceph_osd_request *req, int force_resend) { struct ceph_osd_request_head *reqhead = req->r_request->front.iov_base; struct ceph_pg pgid; int acting[CEPH_PG_MAX_SIZE]; int o = -1, num = 0; int err; dout("map_request %p tid %lld\n", req, req->r_tid); err = ceph_calc_object_layout(&reqhead->layout, req->r_oid, &req->r_file_layout, osdc->osdmap); if (err) { list_move(&req->r_req_lru_item, &osdc->req_notarget); return err; } pgid = reqhead->layout.ol_pgid; req->r_pgid = pgid; err = ceph_calc_pg_acting(osdc->osdmap, pgid, acting); if (err > 0) { o = acting[0]; num = err; } if ((!force_resend && req->r_osd && req->r_osd->o_osd == o && req->r_sent >= req->r_osd->o_incarnation && req->r_num_pg_osds == num && memcmp(req->r_pg_osds, acting, sizeof(acting[0])*num) == 0) || (req->r_osd == NULL && o == -1)) return 0; /* no change */ dout("map_request tid %llu pgid %d.%x osd%d (was osd%d)\n", req->r_tid, le32_to_cpu(pgid.pool), le16_to_cpu(pgid.ps), o, req->r_osd ? req->r_osd->o_osd : -1); /* record full pg acting set */ memcpy(req->r_pg_osds, acting, sizeof(acting[0]) * num); req->r_num_pg_osds = num; if (req->r_osd) { __cancel_request(req); list_del_init(&req->r_osd_item); req->r_osd = NULL; } req->r_osd = __lookup_osd(osdc, o); if (!req->r_osd && o >= 0) { err = -ENOMEM; req->r_osd = create_osd(osdc); if (!req->r_osd) { list_move(&req->r_req_lru_item, &osdc->req_notarget); goto out; } dout("map_request osd %p is osd%d\n", req->r_osd, o); req->r_osd->o_osd = o; req->r_osd->o_con.peer_name.num = cpu_to_le64(o); __insert_osd(osdc, req->r_osd); ceph_con_open(&req->r_osd->o_con, &osdc->osdmap->osd_addr[o]); } if (req->r_osd) { __remove_osd_from_lru(req->r_osd); list_add(&req->r_osd_item, &req->r_osd->o_requests); list_move(&req->r_req_lru_item, &osdc->req_unsent); } else { list_move(&req->r_req_lru_item, &osdc->req_notarget); } err = 1; /* osd or pg changed */ out: return err; } /* * caller should hold map_sem (for read) and request_mutex */ static void __send_request(struct ceph_osd_client *osdc, struct ceph_osd_request *req) { struct ceph_osd_request_head *reqhead; dout("send_request %p tid %llu to osd%d flags %d\n", req, req->r_tid, req->r_osd->o_osd, req->r_flags); reqhead = req->r_request->front.iov_base; reqhead->osdmap_epoch = cpu_to_le32(osdc->osdmap->epoch); reqhead->flags |= cpu_to_le32(req->r_flags); /* e.g., RETRY */ reqhead->reassert_version = req->r_reassert_version; req->r_stamp = jiffies; list_move_tail(&req->r_req_lru_item, &osdc->req_lru); ceph_msg_get(req->r_request); /* send consumes a ref */ ceph_con_send(&req->r_osd->o_con, req->r_request); req->r_sent = req->r_osd->o_incarnation; } /* * Send any requests in the queue (req_unsent). */ static void send_queued(struct ceph_osd_client *osdc) { struct ceph_osd_request *req, *tmp; dout("send_queued\n"); mutex_lock(&osdc->request_mutex); list_for_each_entry_safe(req, tmp, &osdc->req_unsent, r_req_lru_item) { __send_request(osdc, req); } mutex_unlock(&osdc->request_mutex); } /* * Timeout callback, called every N seconds when 1 or more osd * requests has been active for more than N seconds. When this * happens, we ping all OSDs with requests who have timed out to * ensure any communications channel reset is detected. Reset the * request timeouts another N seconds in the future as we go. * Reschedule the timeout event another N seconds in future (unless * there are no open requests). */ static void handle_timeout(struct work_struct *work) { struct ceph_osd_client *osdc = container_of(work, struct ceph_osd_client, timeout_work.work); struct ceph_osd_request *req, *last_req = NULL; struct ceph_osd *osd; unsigned long timeout = osdc->client->options->osd_timeout * HZ; unsigned long keepalive = osdc->client->options->osd_keepalive_timeout * HZ; unsigned long last_stamp = 0; struct list_head slow_osds; dout("timeout\n"); down_read(&osdc->map_sem); ceph_monc_request_next_osdmap(&osdc->client->monc); mutex_lock(&osdc->request_mutex); /* * reset osds that appear to be _really_ unresponsive. this * is a failsafe measure.. we really shouldn't be getting to * this point if the system is working properly. the monitors * should mark the osd as failed and we should find out about * it from an updated osd map. */ while (timeout && !list_empty(&osdc->req_lru)) { req = list_entry(osdc->req_lru.next, struct ceph_osd_request, r_req_lru_item); /* hasn't been long enough since we sent it? */ if (time_before(jiffies, req->r_stamp + timeout)) break; /* hasn't been long enough since it was acked? */ if (req->r_request->ack_stamp == 0 || time_before(jiffies, req->r_request->ack_stamp + timeout)) break; BUG_ON(req == last_req && req->r_stamp == last_stamp); last_req = req; last_stamp = req->r_stamp; osd = req->r_osd; BUG_ON(!osd); pr_warning(" tid %llu timed out on osd%d, will reset osd\n", req->r_tid, osd->o_osd); __kick_osd_requests(osdc, osd); } /* * ping osds that are a bit slow. this ensures that if there * is a break in the TCP connection we will notice, and reopen * a connection with that osd (from the fault callback). */ INIT_LIST_HEAD(&slow_osds); list_for_each_entry(req, &osdc->req_lru, r_req_lru_item) { if (time_before(jiffies, req->r_stamp + keepalive)) break; osd = req->r_osd; BUG_ON(!osd); dout(" tid %llu is slow, will send keepalive on osd%d\n", req->r_tid, osd->o_osd); list_move_tail(&osd->o_keepalive_item, &slow_osds); } while (!list_empty(&slow_osds)) { osd = list_entry(slow_osds.next, struct ceph_osd, o_keepalive_item); list_del_init(&osd->o_keepalive_item); ceph_con_keepalive(&osd->o_con); } __schedule_osd_timeout(osdc); mutex_unlock(&osdc->request_mutex); send_queued(osdc); up_read(&osdc->map_sem); } static void handle_osds_timeout(struct work_struct *work) { struct ceph_osd_client *osdc = container_of(work, struct ceph_osd_client, osds_timeout_work.work); unsigned long delay = osdc->client->options->osd_idle_ttl * HZ >> 2; dout("osds timeout\n"); down_read(&osdc->map_sem); remove_old_osds(osdc); up_read(&osdc->map_sem); schedule_delayed_work(&osdc->osds_timeout_work, round_jiffies_relative(delay)); } static void complete_request(struct ceph_osd_request *req) { if (req->r_safe_callback) req->r_safe_callback(req, NULL); complete_all(&req->r_safe_completion); /* fsync waiter */ } /* * handle osd op reply. either call the callback if it is specified, * or do the completion to wake up the waiting thread. */ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg, struct ceph_connection *con) { struct ceph_osd_reply_head *rhead = msg->front.iov_base; struct ceph_osd_request *req; u64 tid; int numops, object_len, flags; s32 result; tid = le64_to_cpu(msg->hdr.tid); if (msg->front.iov_len < sizeof(*rhead)) goto bad; numops = le32_to_cpu(rhead->num_ops); object_len = le32_to_cpu(rhead->object_len); result = le32_to_cpu(rhead->result); if (msg->front.iov_len != sizeof(*rhead) + object_len + numops * sizeof(struct ceph_osd_op)) goto bad; dout("handle_reply %p tid %llu result %d\n", msg, tid, (int)result); /* lookup */ mutex_lock(&osdc->request_mutex); req = __lookup_request(osdc, tid); if (req == NULL) { dout("handle_reply tid %llu dne\n", tid); mutex_unlock(&osdc->request_mutex); return; } ceph_osdc_get_request(req); flags = le32_to_cpu(rhead->flags); /* * if this connection filled our message, drop our reference now, to * avoid a (safe but slower) revoke later. */ if (req->r_con_filling_msg == con && req->r_reply == msg) { dout(" dropping con_filling_msg ref %p\n", con); req->r_con_filling_msg = NULL; ceph_con_put(con); } if (!req->r_got_reply) { unsigned bytes; req->r_result = le32_to_cpu(rhead->result); bytes = le32_to_cpu(msg->hdr.data_len); dout("handle_reply result %d bytes %d\n", req->r_result, bytes); if (req->r_result == 0) req->r_result = bytes; /* in case this is a write and we need to replay, */ req->r_reassert_version = rhead->reassert_version; req->r_got_reply = 1; } else if ((flags & CEPH_OSD_FLAG_ONDISK) == 0) { dout("handle_reply tid %llu dup ack\n", tid); mutex_unlock(&osdc->request_mutex); goto done; } dout("handle_reply tid %llu flags %d\n", tid, flags); if (req->r_linger && (flags & CEPH_OSD_FLAG_ONDISK)) __register_linger_request(osdc, req); /* either this is a read, or we got the safe response */ if (result < 0 || (flags & CEPH_OSD_FLAG_ONDISK) || ((flags & CEPH_OSD_FLAG_WRITE) == 0)) __unregister_request(osdc, req); mutex_unlock(&osdc->request_mutex); if (req->r_callback) req->r_callback(req, msg); else complete_all(&req->r_completion); if (flags & CEPH_OSD_FLAG_ONDISK) complete_request(req); done: dout("req=%p req->r_linger=%d\n", req, req->r_linger); ceph_osdc_put_request(req); return; bad: pr_err("corrupt osd_op_reply got %d %d expected %d\n", (int)msg->front.iov_len, le32_to_cpu(msg->hdr.front_len), (int)sizeof(*rhead)); ceph_msg_dump(msg); } static void reset_changed_osds(struct ceph_osd_client *osdc) { struct rb_node *p, *n; for (p = rb_first(&osdc->osds); p; p = n) { struct ceph_osd *osd = rb_entry(p, struct ceph_osd, o_node); n = rb_next(p); if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) || memcmp(&osd->o_con.peer_addr, ceph_osd_addr(osdc->osdmap, osd->o_osd), sizeof(struct ceph_entity_addr)) != 0) __reset_osd(osdc, osd); } } /* * Requeue requests whose mapping to an OSD has changed. If requests map to * no osd, request a new map. * * Caller should hold map_sem for read and request_mutex. */ static void kick_requests(struct ceph_osd_client *osdc, int force_resend) { struct ceph_osd_request *req, *nreq; struct rb_node *p; int needmap = 0; int err; dout("kick_requests %s\n", force_resend ? " (force resend)" : ""); mutex_lock(&osdc->request_mutex); for (p = rb_first(&osdc->requests); p; p = rb_next(p)) { req = rb_entry(p, struct ceph_osd_request, r_node); err = __map_request(osdc, req, force_resend); if (err < 0) continue; /* error */ if (req->r_osd == NULL) { dout("%p tid %llu maps to no osd\n", req, req->r_tid); needmap++; /* request a newer map */ } else if (err > 0) { dout("%p tid %llu requeued on osd%d\n", req, req->r_tid, req->r_osd ? req->r_osd->o_osd : -1); if (!req->r_linger) req->r_flags |= CEPH_OSD_FLAG_RETRY; } } list_for_each_entry_safe(req, nreq, &osdc->req_linger, r_linger_item) { dout("linger req=%p req->r_osd=%p\n", req, req->r_osd); err = __map_request(osdc, req, force_resend); if (err == 0) continue; /* no change and no osd was specified */ if (err < 0) continue; /* hrm! */ if (req->r_osd == NULL) { dout("tid %llu maps to no valid osd\n", req->r_tid); needmap++; /* request a newer map */ continue; } dout("kicking lingering %p tid %llu osd%d\n", req, req->r_tid, req->r_osd ? req->r_osd->o_osd : -1); __unregister_linger_request(osdc, req); __register_request(osdc, req); } mutex_unlock(&osdc->request_mutex); if (needmap) { dout("%d requests for down osds, need new map\n", needmap); ceph_monc_request_next_osdmap(&osdc->client->monc); } } /* * Process updated osd map. * * The message contains any number of incremental and full maps, normally * indicating some sort of topology change in the cluster. Kick requests * off to different OSDs as needed. */ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg) { void *p, *end, *next; u32 nr_maps, maplen; u32 epoch; struct ceph_osdmap *newmap = NULL, *oldmap; int err; struct ceph_fsid fsid; dout("handle_map have %u\n", osdc->osdmap ? osdc->osdmap->epoch : 0); p = msg->front.iov_base; end = p + msg->front.iov_len; /* verify fsid */ ceph_decode_need(&p, end, sizeof(fsid), bad); ceph_decode_copy(&p, &fsid, sizeof(fsid)); if (ceph_check_fsid(osdc->client, &fsid) < 0) return; down_write(&osdc->map_sem); /* incremental maps */ ceph_decode_32_safe(&p, end, nr_maps, bad); dout(" %d inc maps\n", nr_maps); while (nr_maps > 0) { ceph_decode_need(&p, end, 2*sizeof(u32), bad); epoch = ceph_decode_32(&p); maplen = ceph_decode_32(&p); ceph_decode_need(&p, end, maplen, bad); next = p + maplen; if (osdc->osdmap && osdc->osdmap->epoch+1 == epoch) { dout("applying incremental map %u len %d\n", epoch, maplen); newmap = osdmap_apply_incremental(&p, next, osdc->osdmap, osdc->client->msgr); if (IS_ERR(newmap)) { err = PTR_ERR(newmap); goto bad; } BUG_ON(!newmap); if (newmap != osdc->osdmap) { ceph_osdmap_destroy(osdc->osdmap); osdc->osdmap = newmap; } kick_requests(osdc, 0); reset_changed_osds(osdc); } else { dout("ignoring incremental map %u len %d\n", epoch, maplen); } p = next; nr_maps--; } if (newmap) goto done; /* full maps */ ceph_decode_32_safe(&p, end, nr_maps, bad); dout(" %d full maps\n", nr_maps); while (nr_maps) { ceph_decode_need(&p, end, 2*sizeof(u32), bad); epoch = ceph_decode_32(&p); maplen = ceph_decode_32(&p); ceph_decode_need(&p, end, maplen, bad); if (nr_maps > 1) { dout("skipping non-latest full map %u len %d\n", epoch, maplen); } else if (osdc->osdmap && osdc->osdmap->epoch >= epoch) { dout("skipping full map %u len %d, " "older than our %u\n", epoch, maplen, osdc->osdmap->epoch); } else { int skipped_map = 0; dout("taking full map %u len %d\n", epoch, maplen); newmap = osdmap_decode(&p, p+maplen); if (IS_ERR(newmap)) { err = PTR_ERR(newmap); goto bad; } BUG_ON(!newmap); oldmap = osdc->osdmap; osdc->osdmap = newmap; if (oldmap) { if (oldmap->epoch + 1 < newmap->epoch) skipped_map = 1; ceph_osdmap_destroy(oldmap); } kick_requests(osdc, skipped_map); } p += maplen; nr_maps--; } done: downgrade_write(&osdc->map_sem); ceph_monc_got_osdmap(&osdc->client->monc, osdc->osdmap->epoch); /* * subscribe to subsequent osdmap updates if full to ensure * we find out when we are no longer full and stop returning * ENOSPC. */ if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL)) ceph_monc_request_next_osdmap(&osdc->client->monc); send_queued(osdc); up_read(&osdc->map_sem); wake_up_all(&osdc->client->auth_wq); return; bad: pr_err("osdc handle_map corrupt msg\n"); ceph_msg_dump(msg); up_write(&osdc->map_sem); return; } /* * watch/notify callback event infrastructure * * These callbacks are used both for watch and notify operations. */ static void __release_event(struct kref *kref) { struct ceph_osd_event *event = container_of(kref, struct ceph_osd_event, kref); dout("__release_event %p\n", event); kfree(event); } static void get_event(struct ceph_osd_event *event) { kref_get(&event->kref); } void ceph_osdc_put_event(struct ceph_osd_event *event) { kref_put(&event->kref, __release_event); } EXPORT_SYMBOL(ceph_osdc_put_event); static void __insert_event(struct ceph_osd_client *osdc, struct ceph_osd_event *new) { struct rb_node **p = &osdc->event_tree.rb_node; struct rb_node *parent = NULL; struct ceph_osd_event *event = NULL; while (*p) { parent = *p; event = rb_entry(parent, struct ceph_osd_event, node); if (new->cookie < event->cookie) p = &(*p)->rb_left; else if (new->cookie > event->cookie) p = &(*p)->rb_right; else BUG(); } rb_link_node(&new->node, parent, p); rb_insert_color(&new->node, &osdc->event_tree); } static struct ceph_osd_event *__find_event(struct ceph_osd_client *osdc, u64 cookie) { struct rb_node **p = &osdc->event_tree.rb_node; struct rb_node *parent = NULL; struct ceph_osd_event *event = NULL; while (*p) { parent = *p; event = rb_entry(parent, struct ceph_osd_event, node); if (cookie < event->cookie) p = &(*p)->rb_left; else if (cookie > event->cookie) p = &(*p)->rb_right; else return event; } return NULL; } static void __remove_event(struct ceph_osd_event *event) { struct ceph_osd_client *osdc = event->osdc; if (!RB_EMPTY_NODE(&event->node)) { dout("__remove_event removed %p\n", event); rb_erase(&event->node, &osdc->event_tree); ceph_osdc_put_event(event); } else { dout("__remove_event didn't remove %p\n", event); } } int ceph_osdc_create_event(struct ceph_osd_client *osdc, void (*event_cb)(u64, u64, u8, void *), int one_shot, void *data, struct ceph_osd_event **pevent) { struct ceph_osd_event *event; event = kmalloc(sizeof(*event), GFP_NOIO); if (!event) return -ENOMEM; dout("create_event %p\n", event); event->cb = event_cb; event->one_shot = one_shot; event->data = data; event->osdc = osdc; INIT_LIST_HEAD(&event->osd_node); kref_init(&event->kref); /* one ref for us */ kref_get(&event->kref); /* one ref for the caller */ init_completion(&event->completion); spin_lock(&osdc->event_lock); event->cookie = ++osdc->event_count; __insert_event(osdc, event); spin_unlock(&osdc->event_lock); *pevent = event; return 0; } EXPORT_SYMBOL(ceph_osdc_create_event); void ceph_osdc_cancel_event(struct ceph_osd_event *event) { struct ceph_osd_client *osdc = event->osdc; dout("cancel_event %p\n", event); spin_lock(&osdc->event_lock); __remove_event(event); spin_unlock(&osdc->event_lock); ceph_osdc_put_event(event); /* caller's */ } EXPORT_SYMBOL(ceph_osdc_cancel_event); static void do_event_work(struct work_struct *work) { struct ceph_osd_event_work *event_work = container_of(work, struct ceph_osd_event_work, work); struct ceph_osd_event *event = event_work->event; u64 ver = event_work->ver; u64 notify_id = event_work->notify_id; u8 opcode = event_work->opcode; dout("do_event_work completing %p\n", event); event->cb(ver, notify_id, opcode, event->data); complete(&event->completion); dout("do_event_work completed %p\n", event); ceph_osdc_put_event(event); kfree(event_work); } /* * Process osd watch notifications */ void handle_watch_notify(struct ceph_osd_client *osdc, struct ceph_msg *msg) { void *p, *end; u8 proto_ver; u64 cookie, ver, notify_id; u8 opcode; struct ceph_osd_event *event; struct ceph_osd_event_work *event_work; p = msg->front.iov_base; end = p + msg->front.iov_len; ceph_decode_8_safe(&p, end, proto_ver, bad); ceph_decode_8_safe(&p, end, opcode, bad); ceph_decode_64_safe(&p, end, cookie, bad); ceph_decode_64_safe(&p, end, ver, bad); ceph_decode_64_safe(&p, end, notify_id, bad); spin_lock(&osdc->event_lock); event = __find_event(osdc, cookie); if (event) { get_event(event); if (event->one_shot) __remove_event(event); } spin_unlock(&osdc->event_lock); dout("handle_watch_notify cookie %lld ver %lld event %p\n", cookie, ver, event); if (event) { event_work = kmalloc(sizeof(*event_work), GFP_NOIO); if (!event_work) { dout("ERROR: could not allocate event_work\n"); goto done_err; } INIT_WORK(&event_work->work, do_event_work); event_work->event = event; event_work->ver = ver; event_work->notify_id = notify_id; event_work->opcode = opcode; if (!queue_work(osdc->notify_wq, &event_work->work)) { dout("WARNING: failed to queue notify event work\n"); goto done_err; } } return; done_err: complete(&event->completion); ceph_osdc_put_event(event); return; bad: pr_err("osdc handle_watch_notify corrupt msg\n"); return; } int ceph_osdc_wait_event(struct ceph_osd_event *event, unsigned long timeout) { int err; dout("wait_event %p\n", event); err = wait_for_completion_interruptible_timeout(&event->completion, timeout * HZ); ceph_osdc_put_event(event); if (err > 0) err = 0; dout("wait_event %p returns %d\n", event, err); return err; } EXPORT_SYMBOL(ceph_osdc_wait_event); /* * Register request, send initial attempt. */ int ceph_osdc_start_request(struct ceph_osd_client *osdc, struct ceph_osd_request *req, bool nofail) { int rc = 0; req->r_request->pages = req->r_pages; req->r_request->nr_pages = req->r_num_pages; #ifdef CONFIG_BLOCK req->r_request->bio = req->r_bio; #endif req->r_request->trail = req->r_trail; register_request(osdc, req); down_read(&osdc->map_sem); mutex_lock(&osdc->request_mutex); /* * a racing kick_requests() may have sent the message for us * while we dropped request_mutex above, so only send now if * the request still han't been touched yet. */ if (req->r_sent == 0) { rc = __map_request(osdc, req, 0); if (rc < 0) { if (nofail) { dout("osdc_start_request failed map, " " will retry %lld\n", req->r_tid); rc = 0; } goto out_unlock; } if (req->r_osd == NULL) { dout("send_request %p no up osds in pg\n", req); ceph_monc_request_next_osdmap(&osdc->client->monc); } else { __send_request(osdc, req); } rc = 0; } out_unlock: mutex_unlock(&osdc->request_mutex); up_read(&osdc->map_sem); return rc; } EXPORT_SYMBOL(ceph_osdc_start_request); /* * wait for a request to complete */ int ceph_osdc_wait_request(struct ceph_osd_client *osdc, struct ceph_osd_request *req) { int rc; rc = wait_for_completion_interruptible(&req->r_completion); if (rc < 0) { mutex_lock(&osdc->request_mutex); __cancel_request(req); __unregister_request(osdc, req); mutex_unlock(&osdc->request_mutex); complete_request(req); dout("wait_request tid %llu canceled/timed out\n", req->r_tid); return rc; } dout("wait_request tid %llu result %d\n", req->r_tid, req->r_result); return req->r_result; } EXPORT_SYMBOL(ceph_osdc_wait_request); /* * sync - wait for all in-flight requests to flush. avoid starvation. */ void ceph_osdc_sync(struct ceph_osd_client *osdc) { struct ceph_osd_request *req; u64 last_tid, next_tid = 0; mutex_lock(&osdc->request_mutex); last_tid = osdc->last_tid; while (1) { req = __lookup_request_ge(osdc, next_tid); if (!req) break; if (req->r_tid > last_tid) break; next_tid = req->r_tid + 1; if ((req->r_flags & CEPH_OSD_FLAG_WRITE) == 0) continue; ceph_osdc_get_request(req); mutex_unlock(&osdc->request_mutex); dout("sync waiting on tid %llu (last is %llu)\n", req->r_tid, last_tid); wait_for_completion(&req->r_safe_completion); mutex_lock(&osdc->request_mutex); ceph_osdc_put_request(req); } mutex_unlock(&osdc->request_mutex); dout("sync done (thru tid %llu)\n", last_tid); } EXPORT_SYMBOL(ceph_osdc_sync); /* * init, shutdown */ int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client) { int err; dout("init\n"); osdc->client = client; osdc->osdmap = NULL; init_rwsem(&osdc->map_sem); init_completion(&osdc->map_waiters); osdc->last_requested_map = 0; mutex_init(&osdc->request_mutex); osdc->last_tid = 0; osdc->osds = RB_ROOT; INIT_LIST_HEAD(&osdc->osd_lru); osdc->requests = RB_ROOT; INIT_LIST_HEAD(&osdc->req_lru); INIT_LIST_HEAD(&osdc->req_unsent); INIT_LIST_HEAD(&osdc->req_notarget); INIT_LIST_HEAD(&osdc->req_linger); osdc->num_requests = 0; INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout); INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout); spin_lock_init(&osdc->event_lock); osdc->event_tree = RB_ROOT; osdc->event_count = 0; schedule_delayed_work(&osdc->osds_timeout_work, round_jiffies_relative(osdc->client->options->osd_idle_ttl * HZ)); err = -ENOMEM; osdc->req_mempool = mempool_create_kmalloc_pool(10, sizeof(struct ceph_osd_request)); if (!osdc->req_mempool) goto out; err = ceph_msgpool_init(&osdc->msgpool_op, OSD_OP_FRONT_LEN, 10, true, "osd_op"); if (err < 0) goto out_mempool; err = ceph_msgpool_init(&osdc->msgpool_op_reply, OSD_OPREPLY_FRONT_LEN, 10, true, "osd_op_reply"); if (err < 0) goto out_msgpool; osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify"); if (IS_ERR(osdc->notify_wq)) { err = PTR_ERR(osdc->notify_wq); osdc->notify_wq = NULL; goto out_msgpool; } return 0; out_msgpool: ceph_msgpool_destroy(&osdc->msgpool_op); out_mempool: mempool_destroy(osdc->req_mempool); out: return err; } EXPORT_SYMBOL(ceph_osdc_init); void ceph_osdc_stop(struct ceph_osd_client *osdc) { flush_workqueue(osdc->notify_wq); destroy_workqueue(osdc->notify_wq); cancel_delayed_work_sync(&osdc->timeout_work); cancel_delayed_work_sync(&osdc->osds_timeout_work); if (osdc->osdmap) { ceph_osdmap_destroy(osdc->osdmap); osdc->osdmap = NULL; } remove_all_osds(osdc); mempool_destroy(osdc->req_mempool); ceph_msgpool_destroy(&osdc->msgpool_op); ceph_msgpool_destroy(&osdc->msgpool_op_reply); } EXPORT_SYMBOL(ceph_osdc_stop); /* * Read some contiguous pages. If we cross a stripe boundary, shorten * *plen. Return number of bytes read, or error. */ int ceph_osdc_readpages(struct ceph_osd_client *osdc, struct ceph_vino vino, struct ceph_file_layout *layout, u64 off, u64 *plen, u32 truncate_seq, u64 truncate_size, struct page **pages, int num_pages, int page_align) { struct ceph_osd_request *req; int rc = 0; dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino, vino.snap, off, *plen); req = ceph_osdc_new_request(osdc, layout, vino, off, plen, CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ, NULL, 0, truncate_seq, truncate_size, NULL, false, 1, page_align); if (!req) return -ENOMEM; /* it may be a short read due to an object boundary */ req->r_pages = pages; dout("readpages final extent is %llu~%llu (%d pages align %d)\n", off, *plen, req->r_num_pages, page_align); rc = ceph_osdc_start_request(osdc, req, false); if (!rc) rc = ceph_osdc_wait_request(osdc, req); ceph_osdc_put_request(req); dout("readpages result %d\n", rc); return rc; } EXPORT_SYMBOL(ceph_osdc_readpages); /* * do a synchronous write on N pages */ int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino, struct ceph_file_layout *layout, struct ceph_snap_context *snapc, u64 off, u64 len, u32 truncate_seq, u64 truncate_size, struct timespec *mtime, struct page **pages, int num_pages, int flags, int do_sync, bool nofail) { struct ceph_osd_request *req; int rc = 0; int page_align = off & ~PAGE_MASK; BUG_ON(vino.snap != CEPH_NOSNAP); req = ceph_osdc_new_request(osdc, layout, vino, off, &len, CEPH_OSD_OP_WRITE, flags | CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE, snapc, do_sync, truncate_seq, truncate_size, mtime, nofail, 1, page_align); if (!req) return -ENOMEM; /* it may be a short write due to an object boundary */ req->r_pages = pages; dout("writepages %llu~%llu (%d pages)\n", off, len, req->r_num_pages); rc = ceph_osdc_start_request(osdc, req, nofail); if (!rc) rc = ceph_osdc_wait_request(osdc, req); ceph_osdc_put_request(req); if (rc == 0) rc = len; dout("writepages result %d\n", rc); return rc; } EXPORT_SYMBOL(ceph_osdc_writepages); /* * handle incoming message */ static void dispatch(struct ceph_connection *con, struct ceph_msg *msg) { struct ceph_osd *osd = con->private; struct ceph_osd_client *osdc; int type = le16_to_cpu(msg->hdr.type); if (!osd) goto out; osdc = osd->o_osdc; switch (type) { case CEPH_MSG_OSD_MAP: ceph_osdc_handle_map(osdc, msg); break; case CEPH_MSG_OSD_OPREPLY: handle_reply(osdc, msg, con); break; case CEPH_MSG_WATCH_NOTIFY: handle_watch_notify(osdc, msg); break; default: pr_err("received unknown message type %d %s\n", type, ceph_msg_type_name(type)); } out: ceph_msg_put(msg); } /* * lookup and return message for incoming reply. set up reply message * pages. */ static struct ceph_msg *get_reply(struct ceph_connection *con, struct ceph_msg_header *hdr, int *skip) { struct ceph_osd *osd = con->private; struct ceph_osd_client *osdc = osd->o_osdc; struct ceph_msg *m; struct ceph_osd_request *req; int front = le32_to_cpu(hdr->front_len); int data_len = le32_to_cpu(hdr->data_len); u64 tid; tid = le64_to_cpu(hdr->tid); mutex_lock(&osdc->request_mutex); req = __lookup_request(osdc, tid); if (!req) { *skip = 1; m = NULL; pr_info("get_reply unknown tid %llu from osd%d\n", tid, osd->o_osd); goto out; } if (req->r_con_filling_msg) { dout("get_reply revoking msg %p from old con %p\n", req->r_reply, req->r_con_filling_msg); ceph_con_revoke_message(req->r_con_filling_msg, req->r_reply); ceph_con_put(req->r_con_filling_msg); req->r_con_filling_msg = NULL; } if (front > req->r_reply->front.iov_len) { pr_warning("get_reply front %d > preallocated %d\n", front, (int)req->r_reply->front.iov_len); m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front, GFP_NOFS, false); if (!m) goto out; ceph_msg_put(req->r_reply); req->r_reply = m; } m = ceph_msg_get(req->r_reply); if (data_len > 0) { int want = calc_pages_for(req->r_page_alignment, data_len); if (unlikely(req->r_num_pages < want)) { pr_warning("tid %lld reply has %d bytes %d pages, we" " had only %d pages ready\n", tid, data_len, want, req->r_num_pages); *skip = 1; ceph_msg_put(m); m = NULL; goto out; } m->pages = req->r_pages; m->nr_pages = req->r_num_pages; m->page_alignment = req->r_page_alignment; #ifdef CONFIG_BLOCK m->bio = req->r_bio; #endif } *skip = 0; req->r_con_filling_msg = ceph_con_get(con); dout("get_reply tid %lld %p\n", tid, m); out: mutex_unlock(&osdc->request_mutex); return m; } static struct ceph_msg *alloc_msg(struct ceph_connection *con, struct ceph_msg_header *hdr, int *skip) { struct ceph_osd *osd = con->private; int type = le16_to_cpu(hdr->type); int front = le32_to_cpu(hdr->front_len); switch (type) { case CEPH_MSG_OSD_MAP: case CEPH_MSG_WATCH_NOTIFY: return ceph_msg_new(type, front, GFP_NOFS, false); case CEPH_MSG_OSD_OPREPLY: return get_reply(con, hdr, skip); default: pr_info("alloc_msg unexpected msg type %d from osd%d\n", type, osd->o_osd); *skip = 1; return NULL; } } /* * Wrappers to refcount containing ceph_osd struct */ static struct ceph_connection *get_osd_con(struct ceph_connection *con) { struct ceph_osd *osd = con->private; if (get_osd(osd)) return con; return NULL; } static void put_osd_con(struct ceph_connection *con) { struct ceph_osd *osd = con->private; put_osd(osd); } /* * authentication */ static int get_authorizer(struct ceph_connection *con, void **buf, int *len, int *proto, void **reply_buf, int *reply_len, int force_new) { struct ceph_osd *o = con->private; struct ceph_osd_client *osdc = o->o_osdc; struct ceph_auth_client *ac = osdc->client->monc.auth; int ret = 0; if (force_new && o->o_authorizer) { ac->ops->destroy_authorizer(ac, o->o_authorizer); o->o_authorizer = NULL; } if (o->o_authorizer == NULL) { ret = ac->ops->create_authorizer( ac, CEPH_ENTITY_TYPE_OSD, &o->o_authorizer, &o->o_authorizer_buf, &o->o_authorizer_buf_len, &o->o_authorizer_reply_buf, &o->o_authorizer_reply_buf_len); if (ret) return ret; } *proto = ac->protocol; *buf = o->o_authorizer_buf; *len = o->o_authorizer_buf_len; *reply_buf = o->o_authorizer_reply_buf; *reply_len = o->o_authorizer_reply_buf_len; return 0; } static int verify_authorizer_reply(struct ceph_connection *con, int len) { struct ceph_osd *o = con->private; struct ceph_osd_client *osdc = o->o_osdc; struct ceph_auth_client *ac = osdc->client->monc.auth; return ac->ops->verify_authorizer_reply(ac, o->o_authorizer, len); } static int invalidate_authorizer(struct ceph_connection *con) { struct ceph_osd *o = con->private; struct ceph_osd_client *osdc = o->o_osdc; struct ceph_auth_client *ac = osdc->client->monc.auth; if (ac->ops->invalidate_authorizer) ac->ops->invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD); return ceph_monc_validate_auth(&osdc->client->monc); } static const struct ceph_connection_operations osd_con_ops = { .get = get_osd_con, .put = put_osd_con, .dispatch = dispatch, .get_authorizer = get_authorizer, .verify_authorizer_reply = verify_authorizer_reply, .invalidate_authorizer = invalidate_authorizer, .alloc_msg = alloc_msg, .fault = osd_reset, };
gpl-2.0