repo_name
string
path
string
copies
string
size
string
content
string
license
string
jianC/kernel_htc_lexikon-3.0
fs/hfsplus/super.c
2313
15722
/* * linux/fs/hfsplus/super.c * * Copyright (C) 2001 * Brad Boyer (flar@allandria.com) * (C) 2003 Ardis Technologies <roman@ardistech.com> * */ #include <linux/module.h> #include <linux/init.h> #include <linux/pagemap.h> #include <linux/blkdev.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/vfs.h> #include <linux/nls.h> static struct inode *hfsplus_alloc_inode(struct super_block *sb); static void hfsplus_destroy_inode(struct inode *inode); #include "hfsplus_fs.h" static int hfsplus_system_read_inode(struct inode *inode) { struct hfsplus_vh *vhdr = HFSPLUS_SB(inode->i_sb)->s_vhdr; switch (inode->i_ino) { case HFSPLUS_EXT_CNID: hfsplus_inode_read_fork(inode, &vhdr->ext_file); inode->i_mapping->a_ops = &hfsplus_btree_aops; break; case HFSPLUS_CAT_CNID: hfsplus_inode_read_fork(inode, &vhdr->cat_file); inode->i_mapping->a_ops = &hfsplus_btree_aops; break; case HFSPLUS_ALLOC_CNID: hfsplus_inode_read_fork(inode, &vhdr->alloc_file); inode->i_mapping->a_ops = &hfsplus_aops; break; case HFSPLUS_START_CNID: hfsplus_inode_read_fork(inode, &vhdr->start_file); break; case HFSPLUS_ATTR_CNID: hfsplus_inode_read_fork(inode, &vhdr->attr_file); inode->i_mapping->a_ops = &hfsplus_btree_aops; break; default: return -EIO; } return 0; } struct inode *hfsplus_iget(struct super_block *sb, unsigned long ino) { struct hfs_find_data fd; struct inode *inode; int err; inode = iget_locked(sb, ino); if (!inode) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) return inode; INIT_LIST_HEAD(&HFSPLUS_I(inode)->open_dir_list); mutex_init(&HFSPLUS_I(inode)->extents_lock); HFSPLUS_I(inode)->flags = 0; HFSPLUS_I(inode)->extent_state = 0; HFSPLUS_I(inode)->rsrc_inode = NULL; atomic_set(&HFSPLUS_I(inode)->opencnt, 0); if (inode->i_ino >= HFSPLUS_FIRSTUSER_CNID || inode->i_ino == HFSPLUS_ROOT_CNID) { hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &fd); err = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd); if (!err) err = hfsplus_cat_read_inode(inode, &fd); hfs_find_exit(&fd); } else { err = hfsplus_system_read_inode(inode); } if (err) { iget_failed(inode); return ERR_PTR(err); } unlock_new_inode(inode); return inode; } static int hfsplus_system_write_inode(struct inode *inode) { struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb); struct hfsplus_vh *vhdr = sbi->s_vhdr; struct hfsplus_fork_raw *fork; struct hfs_btree *tree = NULL; switch (inode->i_ino) { case HFSPLUS_EXT_CNID: fork = &vhdr->ext_file; tree = sbi->ext_tree; break; case HFSPLUS_CAT_CNID: fork = &vhdr->cat_file; tree = sbi->cat_tree; break; case HFSPLUS_ALLOC_CNID: fork = &vhdr->alloc_file; break; case HFSPLUS_START_CNID: fork = &vhdr->start_file; break; case HFSPLUS_ATTR_CNID: fork = &vhdr->attr_file; tree = sbi->attr_tree; default: return -EIO; } if (fork->total_size != cpu_to_be64(inode->i_size)) { set_bit(HFSPLUS_SB_WRITEBACKUP, &sbi->flags); inode->i_sb->s_dirt = 1; } hfsplus_inode_write_fork(inode, fork); if (tree) hfs_btree_write(tree); return 0; } static int hfsplus_write_inode(struct inode *inode, struct writeback_control *wbc) { dprint(DBG_INODE, "hfsplus_write_inode: %lu\n", inode->i_ino); hfsplus_ext_write_extent(inode); if (inode->i_ino >= HFSPLUS_FIRSTUSER_CNID || inode->i_ino == HFSPLUS_ROOT_CNID) return hfsplus_cat_write_inode(inode); else return hfsplus_system_write_inode(inode); } static void hfsplus_evict_inode(struct inode *inode) { dprint(DBG_INODE, "hfsplus_evict_inode: %lu\n", inode->i_ino); truncate_inode_pages(&inode->i_data, 0); end_writeback(inode); if (HFSPLUS_IS_RSRC(inode)) { HFSPLUS_I(HFSPLUS_I(inode)->rsrc_inode)->rsrc_inode = NULL; iput(HFSPLUS_I(inode)->rsrc_inode); } } int hfsplus_sync_fs(struct super_block *sb, int wait) { struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); struct hfsplus_vh *vhdr = sbi->s_vhdr; int write_backup = 0; int error, error2; if (!wait) return 0; dprint(DBG_SUPER, "hfsplus_write_super\n"); sb->s_dirt = 0; /* * Explicitly write out the special metadata inodes. * * While these special inodes are marked as hashed and written * out peridocically by the flusher threads we redirty them * during writeout of normal inodes, and thus the life lock * prevents us from getting the latest state to disk. */ error = filemap_write_and_wait(sbi->cat_tree->inode->i_mapping); error2 = filemap_write_and_wait(sbi->ext_tree->inode->i_mapping); if (!error) error = error2; error2 = filemap_write_and_wait(sbi->alloc_file->i_mapping); if (!error) error = error2; mutex_lock(&sbi->vh_mutex); mutex_lock(&sbi->alloc_mutex); vhdr->free_blocks = cpu_to_be32(sbi->free_blocks); vhdr->next_cnid = cpu_to_be32(sbi->next_cnid); vhdr->folder_count = cpu_to_be32(sbi->folder_count); vhdr->file_count = cpu_to_be32(sbi->file_count); if (test_and_clear_bit(HFSPLUS_SB_WRITEBACKUP, &sbi->flags)) { memcpy(sbi->s_backup_vhdr, sbi->s_vhdr, sizeof(*sbi->s_vhdr)); write_backup = 1; } error2 = hfsplus_submit_bio(sb, sbi->part_start + HFSPLUS_VOLHEAD_SECTOR, sbi->s_vhdr_buf, NULL, WRITE_SYNC); if (!error) error = error2; if (!write_backup) goto out; error2 = hfsplus_submit_bio(sb, sbi->part_start + sbi->sect_count - 2, sbi->s_backup_vhdr_buf, NULL, WRITE_SYNC); if (!error) error2 = error; out: mutex_unlock(&sbi->alloc_mutex); mutex_unlock(&sbi->vh_mutex); if (!test_bit(HFSPLUS_SB_NOBARRIER, &sbi->flags)) blkdev_issue_flush(sb->s_bdev, GFP_KERNEL, NULL); return error; } static void hfsplus_write_super(struct super_block *sb) { if (!(sb->s_flags & MS_RDONLY)) hfsplus_sync_fs(sb, 1); else sb->s_dirt = 0; } static void hfsplus_put_super(struct super_block *sb) { struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); dprint(DBG_SUPER, "hfsplus_put_super\n"); if (!sb->s_fs_info) return; if (!(sb->s_flags & MS_RDONLY) && sbi->s_vhdr) { struct hfsplus_vh *vhdr = sbi->s_vhdr; vhdr->modify_date = hfsp_now2mt(); vhdr->attributes |= cpu_to_be32(HFSPLUS_VOL_UNMNT); vhdr->attributes &= cpu_to_be32(~HFSPLUS_VOL_INCNSTNT); hfsplus_sync_fs(sb, 1); } hfs_btree_close(sbi->cat_tree); hfs_btree_close(sbi->ext_tree); iput(sbi->alloc_file); iput(sbi->hidden_dir); kfree(sbi->s_vhdr_buf); kfree(sbi->s_backup_vhdr_buf); unload_nls(sbi->nls); kfree(sb->s_fs_info); sb->s_fs_info = NULL; } static int hfsplus_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *sb = dentry->d_sb; struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); u64 id = huge_encode_dev(sb->s_bdev->bd_dev); buf->f_type = HFSPLUS_SUPER_MAGIC; buf->f_bsize = sb->s_blocksize; buf->f_blocks = sbi->total_blocks << sbi->fs_shift; buf->f_bfree = sbi->free_blocks << sbi->fs_shift; buf->f_bavail = buf->f_bfree; buf->f_files = 0xFFFFFFFF; buf->f_ffree = 0xFFFFFFFF - sbi->next_cnid; buf->f_fsid.val[0] = (u32)id; buf->f_fsid.val[1] = (u32)(id >> 32); buf->f_namelen = HFSPLUS_MAX_STRLEN; return 0; } static int hfsplus_remount(struct super_block *sb, int *flags, char *data) { if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) return 0; if (!(*flags & MS_RDONLY)) { struct hfsplus_vh *vhdr = HFSPLUS_SB(sb)->s_vhdr; int force = 0; if (!hfsplus_parse_options_remount(data, &force)) return -EINVAL; if (!(vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_UNMNT))) { printk(KERN_WARNING "hfs: filesystem was " "not cleanly unmounted, " "running fsck.hfsplus is recommended. " "leaving read-only.\n"); sb->s_flags |= MS_RDONLY; *flags |= MS_RDONLY; } else if (force) { /* nothing */ } else if (vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_SOFTLOCK)) { printk(KERN_WARNING "hfs: filesystem is marked locked, " "leaving read-only.\n"); sb->s_flags |= MS_RDONLY; *flags |= MS_RDONLY; } else if (vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_JOURNALED)) { printk(KERN_WARNING "hfs: filesystem is " "marked journaled, " "leaving read-only.\n"); sb->s_flags |= MS_RDONLY; *flags |= MS_RDONLY; } } return 0; } static const struct super_operations hfsplus_sops = { .alloc_inode = hfsplus_alloc_inode, .destroy_inode = hfsplus_destroy_inode, .write_inode = hfsplus_write_inode, .evict_inode = hfsplus_evict_inode, .put_super = hfsplus_put_super, .write_super = hfsplus_write_super, .sync_fs = hfsplus_sync_fs, .statfs = hfsplus_statfs, .remount_fs = hfsplus_remount, .show_options = hfsplus_show_options, }; static int hfsplus_fill_super(struct super_block *sb, void *data, int silent) { struct hfsplus_vh *vhdr; struct hfsplus_sb_info *sbi; hfsplus_cat_entry entry; struct hfs_find_data fd; struct inode *root, *inode; struct qstr str; struct nls_table *nls = NULL; int err; err = -EINVAL; sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); if (!sbi) goto out; sb->s_fs_info = sbi; mutex_init(&sbi->alloc_mutex); mutex_init(&sbi->vh_mutex); hfsplus_fill_defaults(sbi); err = -EINVAL; if (!hfsplus_parse_options(data, sbi)) { printk(KERN_ERR "hfs: unable to parse mount options\n"); goto out_unload_nls; } /* temporarily use utf8 to correctly find the hidden dir below */ nls = sbi->nls; sbi->nls = load_nls("utf8"); if (!sbi->nls) { printk(KERN_ERR "hfs: unable to load nls for utf8\n"); goto out_unload_nls; } /* Grab the volume header */ if (hfsplus_read_wrapper(sb)) { if (!silent) printk(KERN_WARNING "hfs: unable to find HFS+ superblock\n"); goto out_unload_nls; } vhdr = sbi->s_vhdr; /* Copy parts of the volume header into the superblock */ sb->s_magic = HFSPLUS_VOLHEAD_SIG; if (be16_to_cpu(vhdr->version) < HFSPLUS_MIN_VERSION || be16_to_cpu(vhdr->version) > HFSPLUS_CURRENT_VERSION) { printk(KERN_ERR "hfs: wrong filesystem version\n"); goto out_free_vhdr; } sbi->total_blocks = be32_to_cpu(vhdr->total_blocks); sbi->free_blocks = be32_to_cpu(vhdr->free_blocks); sbi->next_cnid = be32_to_cpu(vhdr->next_cnid); sbi->file_count = be32_to_cpu(vhdr->file_count); sbi->folder_count = be32_to_cpu(vhdr->folder_count); sbi->data_clump_blocks = be32_to_cpu(vhdr->data_clump_sz) >> sbi->alloc_blksz_shift; if (!sbi->data_clump_blocks) sbi->data_clump_blocks = 1; sbi->rsrc_clump_blocks = be32_to_cpu(vhdr->rsrc_clump_sz) >> sbi->alloc_blksz_shift; if (!sbi->rsrc_clump_blocks) sbi->rsrc_clump_blocks = 1; /* Set up operations so we can load metadata */ sb->s_op = &hfsplus_sops; sb->s_maxbytes = MAX_LFS_FILESIZE; if (!(vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_UNMNT))) { printk(KERN_WARNING "hfs: Filesystem was " "not cleanly unmounted, " "running fsck.hfsplus is recommended. " "mounting read-only.\n"); sb->s_flags |= MS_RDONLY; } else if (test_and_clear_bit(HFSPLUS_SB_FORCE, &sbi->flags)) { /* nothing */ } else if (vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_SOFTLOCK)) { printk(KERN_WARNING "hfs: Filesystem is marked locked, mounting read-only.\n"); sb->s_flags |= MS_RDONLY; } else if ((vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_JOURNALED)) && !(sb->s_flags & MS_RDONLY)) { printk(KERN_WARNING "hfs: write access to " "a journaled filesystem is not supported, " "use the force option at your own risk, " "mounting read-only.\n"); sb->s_flags |= MS_RDONLY; } /* Load metadata objects (B*Trees) */ sbi->ext_tree = hfs_btree_open(sb, HFSPLUS_EXT_CNID); if (!sbi->ext_tree) { printk(KERN_ERR "hfs: failed to load extents file\n"); goto out_free_vhdr; } sbi->cat_tree = hfs_btree_open(sb, HFSPLUS_CAT_CNID); if (!sbi->cat_tree) { printk(KERN_ERR "hfs: failed to load catalog file\n"); goto out_close_ext_tree; } inode = hfsplus_iget(sb, HFSPLUS_ALLOC_CNID); if (IS_ERR(inode)) { printk(KERN_ERR "hfs: failed to load allocation file\n"); err = PTR_ERR(inode); goto out_close_cat_tree; } sbi->alloc_file = inode; /* Load the root directory */ root = hfsplus_iget(sb, HFSPLUS_ROOT_CNID); if (IS_ERR(root)) { printk(KERN_ERR "hfs: failed to load root directory\n"); err = PTR_ERR(root); goto out_put_alloc_file; } str.len = sizeof(HFSP_HIDDENDIR_NAME) - 1; str.name = HFSP_HIDDENDIR_NAME; hfs_find_init(sbi->cat_tree, &fd); hfsplus_cat_build_key(sb, fd.search_key, HFSPLUS_ROOT_CNID, &str); if (!hfs_brec_read(&fd, &entry, sizeof(entry))) { hfs_find_exit(&fd); if (entry.type != cpu_to_be16(HFSPLUS_FOLDER)) goto out_put_root; inode = hfsplus_iget(sb, be32_to_cpu(entry.folder.id)); if (IS_ERR(inode)) { err = PTR_ERR(inode); goto out_put_root; } sbi->hidden_dir = inode; } else hfs_find_exit(&fd); if (!(sb->s_flags & MS_RDONLY)) { /* * H+LX == hfsplusutils, H+Lx == this driver, H+lx is unused * all three are registered with Apple for our use */ vhdr->last_mount_vers = cpu_to_be32(HFSP_MOUNT_VERSION); vhdr->modify_date = hfsp_now2mt(); be32_add_cpu(&vhdr->write_count, 1); vhdr->attributes &= cpu_to_be32(~HFSPLUS_VOL_UNMNT); vhdr->attributes |= cpu_to_be32(HFSPLUS_VOL_INCNSTNT); hfsplus_sync_fs(sb, 1); if (!sbi->hidden_dir) { mutex_lock(&sbi->vh_mutex); sbi->hidden_dir = hfsplus_new_inode(sb, S_IFDIR); hfsplus_create_cat(sbi->hidden_dir->i_ino, root, &str, sbi->hidden_dir); mutex_unlock(&sbi->vh_mutex); hfsplus_mark_inode_dirty(sbi->hidden_dir, HFSPLUS_I_CAT_DIRTY); } } sb->s_d_op = &hfsplus_dentry_operations; sb->s_root = d_alloc_root(root); if (!sb->s_root) { err = -ENOMEM; goto out_put_hidden_dir; } unload_nls(sbi->nls); sbi->nls = nls; return 0; out_put_hidden_dir: iput(sbi->hidden_dir); out_put_root: iput(root); out_put_alloc_file: iput(sbi->alloc_file); out_close_cat_tree: hfs_btree_close(sbi->cat_tree); out_close_ext_tree: hfs_btree_close(sbi->ext_tree); out_free_vhdr: kfree(sbi->s_vhdr_buf); kfree(sbi->s_backup_vhdr_buf); out_unload_nls: unload_nls(sbi->nls); unload_nls(nls); kfree(sbi); out: return err; } MODULE_AUTHOR("Brad Boyer"); MODULE_DESCRIPTION("Extended Macintosh Filesystem"); MODULE_LICENSE("GPL"); static struct kmem_cache *hfsplus_inode_cachep; static struct inode *hfsplus_alloc_inode(struct super_block *sb) { struct hfsplus_inode_info *i; i = kmem_cache_alloc(hfsplus_inode_cachep, GFP_KERNEL); return i ? &i->vfs_inode : NULL; } static void hfsplus_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); INIT_LIST_HEAD(&inode->i_dentry); kmem_cache_free(hfsplus_inode_cachep, HFSPLUS_I(inode)); } static void hfsplus_destroy_inode(struct inode *inode) { call_rcu(&inode->i_rcu, hfsplus_i_callback); } #define HFSPLUS_INODE_SIZE sizeof(struct hfsplus_inode_info) static struct dentry *hfsplus_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_bdev(fs_type, flags, dev_name, data, hfsplus_fill_super); } static struct file_system_type hfsplus_fs_type = { .owner = THIS_MODULE, .name = "hfsplus", .mount = hfsplus_mount, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, }; static void hfsplus_init_once(void *p) { struct hfsplus_inode_info *i = p; inode_init_once(&i->vfs_inode); } static int __init init_hfsplus_fs(void) { int err; hfsplus_inode_cachep = kmem_cache_create("hfsplus_icache", HFSPLUS_INODE_SIZE, 0, SLAB_HWCACHE_ALIGN, hfsplus_init_once); if (!hfsplus_inode_cachep) return -ENOMEM; err = register_filesystem(&hfsplus_fs_type); if (err) kmem_cache_destroy(hfsplus_inode_cachep); return err; } static void __exit exit_hfsplus_fs(void) { unregister_filesystem(&hfsplus_fs_type); kmem_cache_destroy(hfsplus_inode_cachep); } module_init(init_hfsplus_fs) module_exit(exit_hfsplus_fs)
gpl-2.0
hafidzduddin/samsung_codina_kernel
drivers/staging/iio/adc/ad7887_ring.c
2313
5027
/* * Copyright 2010-2011 Analog Devices Inc. * Copyright (C) 2008 Jonathan Cameron * * Licensed under the GPL-2. * * ad7887_ring.c */ #include <linux/interrupt.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/sysfs.h> #include <linux/spi/spi.h> #include "../iio.h" #include "../ring_generic.h" #include "../ring_sw.h" #include "../trigger.h" #include "../sysfs.h" #include "ad7887.h" int ad7887_scan_from_ring(struct ad7887_state *st, long mask) { struct iio_ring_buffer *ring = iio_priv_to_dev(st)->ring; int count = 0, ret; u16 *ring_data; if (!(ring->scan_mask & mask)) { ret = -EBUSY; goto error_ret; } ring_data = kmalloc(ring->access->get_bytes_per_datum(ring), GFP_KERNEL); if (ring_data == NULL) { ret = -ENOMEM; goto error_ret; } ret = ring->access->read_last(ring, (u8 *) ring_data); if (ret) goto error_free_ring_data; /* for single channel scan the result is stored with zero offset */ if ((ring->scan_mask == ((1 << 1) | (1 << 0))) && (mask == (1 << 1))) count = 1; ret = be16_to_cpu(ring_data[count]); error_free_ring_data: kfree(ring_data); error_ret: return ret; } /** * ad7887_ring_preenable() setup the parameters of the ring before enabling * * The complex nature of the setting of the nuber of bytes per datum is due * to this driver currently ensuring that the timestamp is stored at an 8 * byte boundary. **/ static int ad7887_ring_preenable(struct iio_dev *indio_dev) { struct ad7887_state *st = indio_dev->dev_data; struct iio_ring_buffer *ring = indio_dev->ring; st->d_size = ring->scan_count * st->chip_info->channel[0].scan_type.storagebits / 8; if (ring->scan_timestamp) { st->d_size += sizeof(s64); if (st->d_size % sizeof(s64)) st->d_size += sizeof(s64) - (st->d_size % sizeof(s64)); } if (indio_dev->ring->access->set_bytes_per_datum) indio_dev->ring->access->set_bytes_per_datum(indio_dev->ring, st->d_size); switch (ring->scan_mask) { case (1 << 0): st->ring_msg = &st->msg[AD7887_CH0]; break; case (1 << 1): st->ring_msg = &st->msg[AD7887_CH1]; /* Dummy read: push CH1 setting down to hardware */ spi_sync(st->spi, st->ring_msg); break; case ((1 << 1) | (1 << 0)): st->ring_msg = &st->msg[AD7887_CH0_CH1]; break; } return 0; } static int ad7887_ring_postdisable(struct iio_dev *indio_dev) { struct ad7887_state *st = indio_dev->dev_data; /* dummy read: restore default CH0 settin */ return spi_sync(st->spi, &st->msg[AD7887_CH0]); } /** * ad7887_trigger_handler() bh of trigger launched polling to ring buffer * * Currently there is no option in this driver to disable the saving of * timestamps within the ring. **/ static irqreturn_t ad7887_trigger_handler(int irq, void *p) { struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->private_data; struct ad7887_state *st = iio_dev_get_devdata(indio_dev); struct iio_ring_buffer *ring = indio_dev->ring; s64 time_ns; __u8 *buf; int b_sent; unsigned int bytes = ring->scan_count * st->chip_info->channel[0].scan_type.storagebits / 8; buf = kzalloc(st->d_size, GFP_KERNEL); if (buf == NULL) return -ENOMEM; b_sent = spi_sync(st->spi, st->ring_msg); if (b_sent) goto done; time_ns = iio_get_time_ns(); memcpy(buf, st->data, bytes); if (ring->scan_timestamp) memcpy(buf + st->d_size - sizeof(s64), &time_ns, sizeof(time_ns)); indio_dev->ring->access->store_to(indio_dev->ring, buf, time_ns); done: kfree(buf); iio_trigger_notify_done(indio_dev->trig); return IRQ_HANDLED; } static const struct iio_ring_setup_ops ad7887_ring_setup_ops = { .preenable = &ad7887_ring_preenable, .postenable = &iio_triggered_ring_postenable, .predisable = &iio_triggered_ring_predisable, .postdisable = &ad7887_ring_postdisable, }; int ad7887_register_ring_funcs_and_init(struct iio_dev *indio_dev) { int ret; indio_dev->ring = iio_sw_rb_allocate(indio_dev); if (!indio_dev->ring) { ret = -ENOMEM; goto error_ret; } /* Effectively select the ring buffer implementation */ indio_dev->ring->access = &ring_sw_access_funcs; indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time, &ad7887_trigger_handler, IRQF_ONESHOT, indio_dev, "ad7887_consumer%d", indio_dev->id); if (indio_dev->pollfunc == NULL) { ret = -ENOMEM; goto error_deallocate_sw_rb; } /* Ring buffer functions - here trigger setup related */ indio_dev->ring->setup_ops = &ad7887_ring_setup_ops; /* Flag that polled ring buffering is possible */ indio_dev->modes |= INDIO_RING_TRIGGERED; return 0; error_deallocate_sw_rb: iio_sw_rb_free(indio_dev->ring); error_ret: return ret; } void ad7887_ring_cleanup(struct iio_dev *indio_dev) { /* ensure that the trigger has been detached */ if (indio_dev->trig) { iio_put_trigger(indio_dev->trig); iio_trigger_dettach_poll_func(indio_dev->trig, indio_dev->pollfunc); } iio_dealloc_pollfunc(indio_dev->pollfunc); iio_sw_rb_free(indio_dev->ring); }
gpl-2.0
CarbonROM/android_kernel_asus_fugu
sound/soc/pxa/ttc-dkb.c
2313
5066
/* * linux/sound/soc/pxa/ttc_dkb.c * * Copyright (C) 2012 Marvell International Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/module.h> #include <linux/moduleparam.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/soc.h> #include <sound/jack.h> #include <asm/mach-types.h> #include <sound/pcm_params.h> #include "../codecs/88pm860x-codec.h" static struct snd_soc_jack hs_jack, mic_jack; static struct snd_soc_jack_pin hs_jack_pins[] = { { .pin = "Headset Stereophone", .mask = SND_JACK_HEADPHONE, }, }; static struct snd_soc_jack_pin mic_jack_pins[] = { { .pin = "Headset Mic 2", .mask = SND_JACK_MICROPHONE, }, }; /* ttc machine dapm widgets */ static const struct snd_soc_dapm_widget ttc_dapm_widgets[] = { SND_SOC_DAPM_HP("Headset Stereophone", NULL), SND_SOC_DAPM_LINE("Lineout Out 1", NULL), SND_SOC_DAPM_LINE("Lineout Out 2", NULL), SND_SOC_DAPM_SPK("Ext Speaker", NULL), SND_SOC_DAPM_MIC("Ext Mic 1", NULL), SND_SOC_DAPM_MIC("Headset Mic 2", NULL), SND_SOC_DAPM_MIC("Ext Mic 3", NULL), }; /* ttc machine audio map */ static const struct snd_soc_dapm_route ttc_audio_map[] = { {"Headset Stereophone", NULL, "HS1"}, {"Headset Stereophone", NULL, "HS2"}, {"Ext Speaker", NULL, "LSP"}, {"Ext Speaker", NULL, "LSN"}, {"Lineout Out 1", NULL, "LINEOUT1"}, {"Lineout Out 2", NULL, "LINEOUT2"}, {"MIC1P", NULL, "Mic1 Bias"}, {"MIC1N", NULL, "Mic1 Bias"}, {"Mic1 Bias", NULL, "Ext Mic 1"}, {"MIC2P", NULL, "Mic1 Bias"}, {"MIC2N", NULL, "Mic1 Bias"}, {"Mic1 Bias", NULL, "Headset Mic 2"}, {"MIC3P", NULL, "Mic3 Bias"}, {"MIC3N", NULL, "Mic3 Bias"}, {"Mic3 Bias", NULL, "Ext Mic 3"}, }; static int ttc_pm860x_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; struct snd_soc_dapm_context *dapm = &codec->dapm; /* connected pins */ snd_soc_dapm_enable_pin(dapm, "Ext Speaker"); snd_soc_dapm_enable_pin(dapm, "Ext Mic 1"); snd_soc_dapm_enable_pin(dapm, "Ext Mic 3"); snd_soc_dapm_disable_pin(dapm, "Headset Mic 2"); snd_soc_dapm_disable_pin(dapm, "Headset Stereophone"); /* Headset jack detection */ snd_soc_jack_new(codec, "Headphone Jack", SND_JACK_HEADPHONE | SND_JACK_BTN_0 | SND_JACK_BTN_1 | SND_JACK_BTN_2, &hs_jack); snd_soc_jack_add_pins(&hs_jack, ARRAY_SIZE(hs_jack_pins), hs_jack_pins); snd_soc_jack_new(codec, "Microphone Jack", SND_JACK_MICROPHONE, &mic_jack); snd_soc_jack_add_pins(&mic_jack, ARRAY_SIZE(mic_jack_pins), mic_jack_pins); /* headphone, microphone detection & headset short detection */ pm860x_hs_jack_detect(codec, &hs_jack, SND_JACK_HEADPHONE, SND_JACK_BTN_0, SND_JACK_BTN_1, SND_JACK_BTN_2); pm860x_mic_jack_detect(codec, &hs_jack, SND_JACK_MICROPHONE); return 0; } /* ttc/td-dkb digital audio interface glue - connects codec <--> CPU */ static struct snd_soc_dai_link ttc_pm860x_hifi_dai[] = { { .name = "88pm860x i2s", .stream_name = "audio playback", .codec_name = "88pm860x-codec", .platform_name = "mmp-pcm-audio", .cpu_dai_name = "pxa-ssp-dai.1", .codec_dai_name = "88pm860x-i2s", .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM, .init = ttc_pm860x_init, }, }; /* ttc/td audio machine driver */ static struct snd_soc_card ttc_dkb_card = { .name = "ttc-dkb-hifi", .dai_link = ttc_pm860x_hifi_dai, .num_links = ARRAY_SIZE(ttc_pm860x_hifi_dai), .dapm_widgets = ttc_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(ttc_dapm_widgets), .dapm_routes = ttc_audio_map, .num_dapm_routes = ARRAY_SIZE(ttc_audio_map), }; static int ttc_dkb_probe(struct platform_device *pdev) { struct snd_soc_card *card = &ttc_dkb_card; int ret; card->dev = &pdev->dev; ret = snd_soc_register_card(card); if (ret) dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n", ret); return ret; } static int ttc_dkb_remove(struct platform_device *pdev) { struct snd_soc_card *card = platform_get_drvdata(pdev); snd_soc_unregister_card(card); return 0; } static struct platform_driver ttc_dkb_driver = { .driver = { .name = "ttc-dkb-audio", .owner = THIS_MODULE, }, .probe = ttc_dkb_probe, .remove = ttc_dkb_remove, }; module_platform_driver(ttc_dkb_driver); /* Module information */ MODULE_AUTHOR("Qiao Zhou, <zhouqiao@marvell.com>"); MODULE_DESCRIPTION("ALSA SoC TTC DKB"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:ttc-dkb-audio");
gpl-2.0
bizcuite/android_kernel_samsung_smdk4412
fs/yaffs2/yaffs_mtdif1.c
2825
9866
/* * YAFFS: Yet another FFS. A NAND-flash specific file system. * * Copyright (C) 2002-2010 Aleph One Ltd. * for Toby Churchill Ltd and Brightstar Engineering * * Created by Charles Manning <charles@aleph1.co.uk> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ /* * This module provides the interface between yaffs_nand.c and the * MTD API. This version is used when the MTD interface supports the * 'mtd_oob_ops' style calls to read_oob and write_oob, circa 2.6.17, * and we have small-page NAND device. * * These functions are invoked via function pointers in yaffs_nand.c. * This replaces functionality provided by functions in yaffs_mtdif.c * and the yaffs_tags compatability functions in yaffs_tagscompat.c that are * called in yaffs_mtdif.c when the function pointers are NULL. * We assume the MTD layer is performing ECC (use_nand_ecc is true). */ #include "yportenv.h" #include "yaffs_trace.h" #include "yaffs_guts.h" #include "yaffs_packedtags1.h" #include "yaffs_tagscompat.h" /* for yaffs_calc_tags_ecc */ #include "yaffs_linux.h" #include "linux/kernel.h" #include "linux/version.h" #include "linux/types.h" #include "linux/mtd/mtd.h" #ifndef CONFIG_YAFFS_9BYTE_TAGS # define YTAG1_SIZE 8 #else # define YTAG1_SIZE 9 #endif /* Write a chunk (page) of data to NAND. * * Caller always provides ExtendedTags data which are converted to a more * compact (packed) form for storage in NAND. A mini-ECC runs over the * contents of the tags meta-data; used to valid the tags when read. * * - Pack ExtendedTags to packed_tags1 form * - Compute mini-ECC for packed_tags1 * - Write data and packed tags to NAND. * * Note: Due to the use of the packed_tags1 meta-data which does not include * a full sequence number (as found in the larger packed_tags2 form) it is * necessary for Yaffs to re-write a chunk/page (just once) to mark it as * discarded and dirty. This is not ideal: newer NAND parts are supposed * to be written just once. When Yaffs performs this operation, this * function is called with a NULL data pointer -- calling MTD write_oob * without data is valid usage (2.6.17). * * Any underlying MTD error results in YAFFS_FAIL. * Returns YAFFS_OK or YAFFS_FAIL. */ int nandmtd1_write_chunk_tags(struct yaffs_dev *dev, int nand_chunk, const u8 * data, const struct yaffs_ext_tags *etags) { struct mtd_info *mtd = yaffs_dev_to_mtd(dev); int chunk_bytes = dev->data_bytes_per_chunk; loff_t addr = ((loff_t) nand_chunk) * chunk_bytes; struct mtd_oob_ops ops; struct yaffs_packed_tags1 pt1; int retval; /* we assume that packed_tags1 and struct yaffs_tags are compatible */ compile_time_assertion(sizeof(struct yaffs_packed_tags1) == 12); compile_time_assertion(sizeof(struct yaffs_tags) == 8); yaffs_pack_tags1(&pt1, etags); yaffs_calc_tags_ecc((struct yaffs_tags *)&pt1); /* When deleting a chunk, the upper layer provides only skeletal * etags, one with is_deleted set. However, we need to update the * tags, not erase them completely. So we use the NAND write property * that only zeroed-bits stick and set tag bytes to all-ones and * zero just the (not) deleted bit. */ #ifndef CONFIG_YAFFS_9BYTE_TAGS if (etags->is_deleted) { memset(&pt1, 0xff, 8); /* clear delete status bit to indicate deleted */ pt1.deleted = 0; } #else ((u8 *) & pt1)[8] = 0xff; if (etags->is_deleted) { memset(&pt1, 0xff, 8); /* zero page_status byte to indicate deleted */ ((u8 *) & pt1)[8] = 0; } #endif memset(&ops, 0, sizeof(ops)); ops.mode = MTD_OOB_AUTO; ops.len = (data) ? chunk_bytes : 0; ops.ooblen = YTAG1_SIZE; ops.datbuf = (u8 *) data; ops.oobbuf = (u8 *) & pt1; retval = mtd->write_oob(mtd, addr, &ops); if (retval) { yaffs_trace(YAFFS_TRACE_MTD, "write_oob failed, chunk %d, mtd error %d", nand_chunk, retval); } return retval ? YAFFS_FAIL : YAFFS_OK; } /* Return with empty ExtendedTags but add ecc_result. */ static int rettags(struct yaffs_ext_tags *etags, int ecc_result, int retval) { if (etags) { memset(etags, 0, sizeof(*etags)); etags->ecc_result = ecc_result; } return retval; } /* Read a chunk (page) from NAND. * * Caller expects ExtendedTags data to be usable even on error; that is, * all members except ecc_result and block_bad are zeroed. * * - Check ECC results for data (if applicable) * - Check for blank/erased block (return empty ExtendedTags if blank) * - Check the packed_tags1 mini-ECC (correct if necessary/possible) * - Convert packed_tags1 to ExtendedTags * - Update ecc_result and block_bad members to refect state. * * Returns YAFFS_OK or YAFFS_FAIL. */ int nandmtd1_read_chunk_tags(struct yaffs_dev *dev, int nand_chunk, u8 * data, struct yaffs_ext_tags *etags) { struct mtd_info *mtd = yaffs_dev_to_mtd(dev); int chunk_bytes = dev->data_bytes_per_chunk; loff_t addr = ((loff_t) nand_chunk) * chunk_bytes; int eccres = YAFFS_ECC_RESULT_NO_ERROR; struct mtd_oob_ops ops; struct yaffs_packed_tags1 pt1; int retval; int deleted; memset(&ops, 0, sizeof(ops)); ops.mode = MTD_OOB_AUTO; ops.len = (data) ? chunk_bytes : 0; ops.ooblen = YTAG1_SIZE; ops.datbuf = data; ops.oobbuf = (u8 *) & pt1; /* Read page and oob using MTD. * Check status and determine ECC result. */ retval = mtd->read_oob(mtd, addr, &ops); if (retval) { yaffs_trace(YAFFS_TRACE_MTD, "read_oob failed, chunk %d, mtd error %d", nand_chunk, retval); } switch (retval) { case 0: /* no error */ break; case -EUCLEAN: /* MTD's ECC fixed the data */ eccres = YAFFS_ECC_RESULT_FIXED; dev->n_ecc_fixed++; break; case -EBADMSG: /* MTD's ECC could not fix the data */ dev->n_ecc_unfixed++; /* fall into... */ default: rettags(etags, YAFFS_ECC_RESULT_UNFIXED, 0); etags->block_bad = (mtd->block_isbad) (mtd, addr); return YAFFS_FAIL; } /* Check for a blank/erased chunk. */ if (yaffs_check_ff((u8 *) & pt1, 8)) { /* when blank, upper layers want ecc_result to be <= NO_ERROR */ return rettags(etags, YAFFS_ECC_RESULT_NO_ERROR, YAFFS_OK); } #ifndef CONFIG_YAFFS_9BYTE_TAGS /* Read deleted status (bit) then return it to it's non-deleted * state before performing tags mini-ECC check. pt1.deleted is * inverted. */ deleted = !pt1.deleted; pt1.deleted = 1; #else deleted = (yaffs_count_bits(((u8 *) & pt1)[8]) < 7); #endif /* Check the packed tags mini-ECC and correct if necessary/possible. */ retval = yaffs_check_tags_ecc((struct yaffs_tags *)&pt1); switch (retval) { case 0: /* no tags error, use MTD result */ break; case 1: /* recovered tags-ECC error */ dev->n_tags_ecc_fixed++; if (eccres == YAFFS_ECC_RESULT_NO_ERROR) eccres = YAFFS_ECC_RESULT_FIXED; break; default: /* unrecovered tags-ECC error */ dev->n_tags_ecc_unfixed++; return rettags(etags, YAFFS_ECC_RESULT_UNFIXED, YAFFS_FAIL); } /* Unpack the tags to extended form and set ECC result. * [set should_be_ff just to keep yaffs_unpack_tags1 happy] */ pt1.should_be_ff = 0xFFFFFFFF; yaffs_unpack_tags1(etags, &pt1); etags->ecc_result = eccres; /* Set deleted state */ etags->is_deleted = deleted; return YAFFS_OK; } /* Mark a block bad. * * This is a persistant state. * Use of this function should be rare. * * Returns YAFFS_OK or YAFFS_FAIL. */ int nandmtd1_mark_block_bad(struct yaffs_dev *dev, int block_no) { struct mtd_info *mtd = yaffs_dev_to_mtd(dev); int blocksize = dev->param.chunks_per_block * dev->data_bytes_per_chunk; int retval; yaffs_trace(YAFFS_TRACE_BAD_BLOCKS, "marking block %d bad", block_no); retval = mtd->block_markbad(mtd, (loff_t) blocksize * block_no); return (retval) ? YAFFS_FAIL : YAFFS_OK; } /* Check any MTD prerequists. * * Returns YAFFS_OK or YAFFS_FAIL. */ static int nandmtd1_test_prerequists(struct mtd_info *mtd) { /* 2.6.18 has mtd->ecclayout->oobavail */ /* 2.6.21 has mtd->ecclayout->oobavail and mtd->oobavail */ int oobavail = mtd->ecclayout->oobavail; if (oobavail < YTAG1_SIZE) { yaffs_trace(YAFFS_TRACE_ERROR, "mtd device has only %d bytes for tags, need %d", oobavail, YTAG1_SIZE); return YAFFS_FAIL; } return YAFFS_OK; } /* Query for the current state of a specific block. * * Examine the tags of the first chunk of the block and return the state: * - YAFFS_BLOCK_STATE_DEAD, the block is marked bad * - YAFFS_BLOCK_STATE_NEEDS_SCANNING, the block is in use * - YAFFS_BLOCK_STATE_EMPTY, the block is clean * * Always returns YAFFS_OK. */ int nandmtd1_query_block(struct yaffs_dev *dev, int block_no, enum yaffs_block_state *state_ptr, u32 * seq_ptr) { struct mtd_info *mtd = yaffs_dev_to_mtd(dev); int chunk_num = block_no * dev->param.chunks_per_block; loff_t addr = (loff_t) chunk_num * dev->data_bytes_per_chunk; struct yaffs_ext_tags etags; int state = YAFFS_BLOCK_STATE_DEAD; int seqnum = 0; int retval; /* We don't yet have a good place to test for MTD config prerequists. * Do it here as we are called during the initial scan. */ if (nandmtd1_test_prerequists(mtd) != YAFFS_OK) return YAFFS_FAIL; retval = nandmtd1_read_chunk_tags(dev, chunk_num, NULL, &etags); etags.block_bad = (mtd->block_isbad) (mtd, addr); if (etags.block_bad) { yaffs_trace(YAFFS_TRACE_BAD_BLOCKS, "block %d is marked bad", block_no); state = YAFFS_BLOCK_STATE_DEAD; } else if (etags.ecc_result != YAFFS_ECC_RESULT_NO_ERROR) { /* bad tags, need to look more closely */ state = YAFFS_BLOCK_STATE_NEEDS_SCANNING; } else if (etags.chunk_used) { state = YAFFS_BLOCK_STATE_NEEDS_SCANNING; seqnum = etags.seq_number; } else { state = YAFFS_BLOCK_STATE_EMPTY; } *state_ptr = state; *seq_ptr = seqnum; /* query always succeeds */ return YAFFS_OK; }
gpl-2.0
Coolexe/shooteru-ics-crc-3.0.16-e733189
arch/arm/mach-s5pv210/mach-torbreck.c
2825
3393
/* linux/arch/arm/mach-s5pv210/mach-torbreck.c * * Copyright (c) 2010 aESOP Community * http://www.aesop.or.kr/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/serial_core.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/setup.h> #include <asm/mach-types.h> #include <mach/map.h> #include <mach/regs-clock.h> #include <plat/regs-serial.h> #include <plat/s5pv210.h> #include <plat/devs.h> #include <plat/cpu.h> #include <plat/iic.h> #include <plat/s5p-time.h> /* Following are default values for UCON, ULCON and UFCON UART registers */ #define TORBRECK_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \ S3C2410_UCON_RXILEVEL | \ S3C2410_UCON_TXIRQMODE | \ S3C2410_UCON_RXIRQMODE | \ S3C2410_UCON_RXFIFO_TOI | \ S3C2443_UCON_RXERR_IRQEN) #define TORBRECK_ULCON_DEFAULT S3C2410_LCON_CS8 #define TORBRECK_UFCON_DEFAULT (S3C2410_UFCON_FIFOMODE | \ S5PV210_UFCON_TXTRIG4 | \ S5PV210_UFCON_RXTRIG4) static struct s3c2410_uartcfg torbreck_uartcfgs[] __initdata = { [0] = { .hwport = 0, .flags = 0, .ucon = TORBRECK_UCON_DEFAULT, .ulcon = TORBRECK_ULCON_DEFAULT, .ufcon = TORBRECK_UFCON_DEFAULT, }, [1] = { .hwport = 1, .flags = 0, .ucon = TORBRECK_UCON_DEFAULT, .ulcon = TORBRECK_ULCON_DEFAULT, .ufcon = TORBRECK_UFCON_DEFAULT, }, [2] = { .hwport = 2, .flags = 0, .ucon = TORBRECK_UCON_DEFAULT, .ulcon = TORBRECK_ULCON_DEFAULT, .ufcon = TORBRECK_UFCON_DEFAULT, }, [3] = { .hwport = 3, .flags = 0, .ucon = TORBRECK_UCON_DEFAULT, .ulcon = TORBRECK_ULCON_DEFAULT, .ufcon = TORBRECK_UFCON_DEFAULT, }, }; static struct platform_device *torbreck_devices[] __initdata = { &s5pv210_device_iis0, &s3c_device_cfcon, &s3c_device_hsmmc0, &s3c_device_hsmmc1, &s3c_device_hsmmc2, &s3c_device_hsmmc3, &s3c_device_i2c0, &s3c_device_i2c1, &s3c_device_i2c2, &s3c_device_rtc, &s3c_device_wdt, }; static struct i2c_board_info torbreck_i2c_devs0[] __initdata = { /* To Be Updated */ }; static struct i2c_board_info torbreck_i2c_devs1[] __initdata = { /* To Be Updated */ }; static struct i2c_board_info torbreck_i2c_devs2[] __initdata = { /* To Be Updated */ }; static void __init torbreck_map_io(void) { s5p_init_io(NULL, 0, S5P_VA_CHIPID); s3c24xx_init_clocks(24000000); s3c24xx_init_uarts(torbreck_uartcfgs, ARRAY_SIZE(torbreck_uartcfgs)); s5p_set_timer_source(S5P_PWM3, S5P_PWM4); } static void __init torbreck_machine_init(void) { s3c_i2c0_set_platdata(NULL); s3c_i2c1_set_platdata(NULL); s3c_i2c2_set_platdata(NULL); i2c_register_board_info(0, torbreck_i2c_devs0, ARRAY_SIZE(torbreck_i2c_devs0)); i2c_register_board_info(1, torbreck_i2c_devs1, ARRAY_SIZE(torbreck_i2c_devs1)); i2c_register_board_info(2, torbreck_i2c_devs2, ARRAY_SIZE(torbreck_i2c_devs2)); platform_add_devices(torbreck_devices, ARRAY_SIZE(torbreck_devices)); } MACHINE_START(TORBRECK, "TORBRECK") /* Maintainer: Hyunchul Ko <ghcstop@gmail.com> */ .boot_params = S5P_PA_SDRAM + 0x100, .init_irq = s5pv210_init_irq, .map_io = torbreck_map_io, .init_machine = torbreck_machine_init, .timer = &s5p_timer, MACHINE_END
gpl-2.0
actzendaria/xgt
drivers/input/misc/adxl34x-spi.c
3081
3145
/* * ADLX345/346 Three-Axis Digital Accelerometers (SPI Interface) * * Enter bugs at http://blackfin.uclinux.org/ * * Copyright (C) 2009 Michael Hennerich, Analog Devices Inc. * Licensed under the GPL-2 or later. */ #include <linux/input.h> /* BUS_SPI */ #include <linux/module.h> #include <linux/spi/spi.h> #include <linux/pm.h> #include <linux/types.h> #include "adxl34x.h" #define MAX_SPI_FREQ_HZ 5000000 #define MAX_FREQ_NO_FIFODELAY 1500000 #define ADXL34X_CMD_MULTB (1 << 6) #define ADXL34X_CMD_READ (1 << 7) #define ADXL34X_WRITECMD(reg) (reg & 0x3F) #define ADXL34X_READCMD(reg) (ADXL34X_CMD_READ | (reg & 0x3F)) #define ADXL34X_READMB_CMD(reg) (ADXL34X_CMD_READ | ADXL34X_CMD_MULTB \ | (reg & 0x3F)) static int adxl34x_spi_read(struct device *dev, unsigned char reg) { struct spi_device *spi = to_spi_device(dev); unsigned char cmd; cmd = ADXL34X_READCMD(reg); return spi_w8r8(spi, cmd); } static int adxl34x_spi_write(struct device *dev, unsigned char reg, unsigned char val) { struct spi_device *spi = to_spi_device(dev); unsigned char buf[2]; buf[0] = ADXL34X_WRITECMD(reg); buf[1] = val; return spi_write(spi, buf, sizeof(buf)); } static int adxl34x_spi_read_block(struct device *dev, unsigned char reg, int count, void *buf) { struct spi_device *spi = to_spi_device(dev); ssize_t status; reg = ADXL34X_READMB_CMD(reg); status = spi_write_then_read(spi, &reg, 1, buf, count); return (status < 0) ? status : 0; } static const struct adxl34x_bus_ops adxl34x_spi_bops = { .bustype = BUS_SPI, .write = adxl34x_spi_write, .read = adxl34x_spi_read, .read_block = adxl34x_spi_read_block, }; static int adxl34x_spi_probe(struct spi_device *spi) { struct adxl34x *ac; /* don't exceed max specified SPI CLK frequency */ if (spi->max_speed_hz > MAX_SPI_FREQ_HZ) { dev_err(&spi->dev, "SPI CLK %d Hz too fast\n", spi->max_speed_hz); return -EINVAL; } ac = adxl34x_probe(&spi->dev, spi->irq, spi->max_speed_hz > MAX_FREQ_NO_FIFODELAY, &adxl34x_spi_bops); if (IS_ERR(ac)) return PTR_ERR(ac); spi_set_drvdata(spi, ac); return 0; } static int adxl34x_spi_remove(struct spi_device *spi) { struct adxl34x *ac = spi_get_drvdata(spi); return adxl34x_remove(ac); } #ifdef CONFIG_PM_SLEEP static int adxl34x_spi_suspend(struct device *dev) { struct spi_device *spi = to_spi_device(dev); struct adxl34x *ac = spi_get_drvdata(spi); adxl34x_suspend(ac); return 0; } static int adxl34x_spi_resume(struct device *dev) { struct spi_device *spi = to_spi_device(dev); struct adxl34x *ac = spi_get_drvdata(spi); adxl34x_resume(ac); return 0; } #endif static SIMPLE_DEV_PM_OPS(adxl34x_spi_pm, adxl34x_spi_suspend, adxl34x_spi_resume); static struct spi_driver adxl34x_driver = { .driver = { .name = "adxl34x", .owner = THIS_MODULE, .pm = &adxl34x_spi_pm, }, .probe = adxl34x_spi_probe, .remove = adxl34x_spi_remove, }; module_spi_driver(adxl34x_driver); MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); MODULE_DESCRIPTION("ADXL345/346 Three-Axis Digital Accelerometer SPI Bus Driver"); MODULE_LICENSE("GPL");
gpl-2.0
AICP/android_kernel_asus_tf201
drivers/staging/rtl8712/os_intfs.c
3081
15068
/****************************************************************************** * os_intfs.c * * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved. * Linux device driver for RTL8192SU * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * Modifications for inclusion into the Linux staging tree are * Copyright(c) 2010 Larry Finger. All rights reserved. * * Contact information: * WLAN FAE <wlanfae@realtek.com>. * Larry Finger <Larry.Finger@lwfinger.net> * ******************************************************************************/ #define _OS_INTFS_C_ #include <linux/module.h> #include <linux/init.h> #include <linux/kthread.h> #include "osdep_service.h" #include "drv_types.h" #include "xmit_osdep.h" #include "recv_osdep.h" #include "rtl871x_ioctl.h" #include "usb_osintf.h" MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("rtl871x wireless lan driver"); MODULE_AUTHOR("Larry Finger"); static char ifname[IFNAMSIZ] = "wlan%d"; /* module param defaults */ static int chip_version = RTL8712_2ndCUT; static int rfintfs = HWPI; static int lbkmode = RTL8712_AIR_TRX; static int hci = RTL8712_USB; static int ampdu_enable = 1;/*for enable tx_ampdu*/ /* The video_mode variable is for vedio mode.*/ /* It may be specify when inserting module with video_mode=1 parameter.*/ static int video_mode = 1; /* enable video mode*/ /*Ndis802_11Infrastructure; infra, ad-hoc, auto*/ static int network_mode = Ndis802_11IBSS; static int channel = 1;/*ad-hoc support requirement*/ static int wireless_mode = WIRELESS_11BG; static int vrtl_carrier_sense = AUTO_VCS; static int vcs_type = RTS_CTS; static int frag_thresh = 2346; static int preamble = PREAMBLE_LONG;/*long, short, auto*/ static int scan_mode = 1;/*active, passive*/ static int adhoc_tx_pwr = 1; static int soft_ap; static int smart_ps = 1; static int power_mgnt = PS_MODE_ACTIVE; static int radio_enable = 1; static int long_retry_lmt = 7; static int short_retry_lmt = 7; static int busy_thresh = 40; static int ack_policy = NORMAL_ACK; static int mp_mode; static int software_encrypt; static int software_decrypt; static int wmm_enable;/* default is set to disable the wmm.*/ static int uapsd_enable; static int uapsd_max_sp = NO_LIMIT; static int uapsd_acbk_en; static int uapsd_acbe_en; static int uapsd_acvi_en; static int uapsd_acvo_en; static int ht_enable = 1; static int cbw40_enable = 1; static int rf_config = RTL8712_RF_1T2R; /* 1T2R*/ static int low_power; /* mac address to use instead of the one stored in Efuse */ char *r8712_initmac; static char *initmac; /* if wifi_test = 1, driver will disable the turbo mode and pass it to * firmware private. */ static int wifi_test = 0; module_param_string(ifname, ifname, sizeof(ifname), S_IRUGO|S_IWUSR); module_param(wifi_test, int, 0644); module_param(initmac, charp, 0644); module_param(video_mode, int, 0644); module_param(chip_version, int, 0644); module_param(rfintfs, int, 0644); module_param(lbkmode, int, 0644); module_param(hci, int, 0644); module_param(network_mode, int, 0644); module_param(channel, int, 0644); module_param(mp_mode, int, 0644); module_param(wmm_enable, int, 0644); module_param(vrtl_carrier_sense, int, 0644); module_param(vcs_type, int, 0644); module_param(busy_thresh, int, 0644); module_param(ht_enable, int, 0644); module_param(cbw40_enable, int, 0644); module_param(ampdu_enable, int, 0644); module_param(rf_config, int, 0644); module_param(power_mgnt, int, 0644); module_param(low_power, int, 0644); MODULE_PARM_DESC(ifname, " Net interface name, wlan%d=default"); MODULE_PARM_DESC(initmac, "MAC-Address, default: use FUSE"); static uint loadparam(struct _adapter *padapter, struct net_device *pnetdev); static int netdev_open(struct net_device *pnetdev); static int netdev_close(struct net_device *pnetdev); static uint loadparam(struct _adapter *padapter, struct net_device *pnetdev) { uint status = _SUCCESS; struct registry_priv *registry_par = &padapter->registrypriv; registry_par->chip_version = (u8)chip_version; registry_par->rfintfs = (u8)rfintfs; registry_par->lbkmode = (u8)lbkmode; registry_par->hci = (u8)hci; registry_par->network_mode = (u8)network_mode; memcpy(registry_par->ssid.Ssid, "ANY", 3); registry_par->ssid.SsidLength = 3; registry_par->channel = (u8)channel; registry_par->wireless_mode = (u8)wireless_mode; registry_par->vrtl_carrier_sense = (u8)vrtl_carrier_sense ; registry_par->vcs_type = (u8)vcs_type; registry_par->frag_thresh = (u16)frag_thresh; registry_par->preamble = (u8)preamble; registry_par->scan_mode = (u8)scan_mode; registry_par->adhoc_tx_pwr = (u8)adhoc_tx_pwr; registry_par->soft_ap = (u8)soft_ap; registry_par->smart_ps = (u8)smart_ps; registry_par->power_mgnt = (u8)power_mgnt; registry_par->radio_enable = (u8)radio_enable; registry_par->long_retry_lmt = (u8)long_retry_lmt; registry_par->short_retry_lmt = (u8)short_retry_lmt; registry_par->busy_thresh = (u16)busy_thresh; registry_par->ack_policy = (u8)ack_policy; registry_par->mp_mode = (u8)mp_mode; registry_par->software_encrypt = (u8)software_encrypt; registry_par->software_decrypt = (u8)software_decrypt; /*UAPSD*/ registry_par->wmm_enable = (u8)wmm_enable; registry_par->uapsd_enable = (u8)uapsd_enable; registry_par->uapsd_max_sp = (u8)uapsd_max_sp; registry_par->uapsd_acbk_en = (u8)uapsd_acbk_en; registry_par->uapsd_acbe_en = (u8)uapsd_acbe_en; registry_par->uapsd_acvi_en = (u8)uapsd_acvi_en; registry_par->uapsd_acvo_en = (u8)uapsd_acvo_en; registry_par->ht_enable = (u8)ht_enable; registry_par->cbw40_enable = (u8)cbw40_enable; registry_par->ampdu_enable = (u8)ampdu_enable; registry_par->rf_config = (u8)rf_config; registry_par->low_power = (u8)low_power; registry_par->wifi_test = (u8) wifi_test; r8712_initmac = initmac; return status; } static int r871x_net_set_mac_address(struct net_device *pnetdev, void *p) { struct _adapter *padapter = (struct _adapter *)_netdev_priv(pnetdev); struct sockaddr *addr = p; if (padapter->bup == false) memcpy(pnetdev->dev_addr, addr->sa_data, ETH_ALEN); return 0; } static struct net_device_stats *r871x_net_get_stats(struct net_device *pnetdev) { struct _adapter *padapter = (struct _adapter *) _netdev_priv(pnetdev); struct xmit_priv *pxmitpriv = &(padapter->xmitpriv); struct recv_priv *precvpriv = &(padapter->recvpriv); padapter->stats.tx_packets = pxmitpriv->tx_pkts; padapter->stats.rx_packets = precvpriv->rx_pkts; padapter->stats.tx_dropped = pxmitpriv->tx_drop; padapter->stats.rx_dropped = precvpriv->rx_drop; padapter->stats.tx_bytes = pxmitpriv->tx_bytes; padapter->stats.rx_bytes = precvpriv->rx_bytes; return &padapter->stats; } static const struct net_device_ops rtl8712_netdev_ops = { .ndo_open = netdev_open, .ndo_stop = netdev_close, .ndo_start_xmit = r8712_xmit_entry, .ndo_set_mac_address = r871x_net_set_mac_address, .ndo_get_stats = r871x_net_get_stats, .ndo_do_ioctl = r871x_ioctl, }; struct net_device *r8712_init_netdev(void) { struct _adapter *padapter; struct net_device *pnetdev; pnetdev = alloc_etherdev(sizeof(struct _adapter)); if (!pnetdev) return NULL; if (dev_alloc_name(pnetdev, ifname) < 0) { strcpy(ifname, "wlan%d"); dev_alloc_name(pnetdev, ifname); } padapter = (struct _adapter *) _netdev_priv(pnetdev); padapter->pnetdev = pnetdev; printk(KERN_INFO "r8712u: register rtl8712_netdev_ops to" " netdev_ops\n"); pnetdev->netdev_ops = &rtl8712_netdev_ops; pnetdev->watchdog_timeo = HZ; /* 1 second timeout */ pnetdev->wireless_handlers = (struct iw_handler_def *) &r871x_handlers_def; /*step 2.*/ loadparam(padapter, pnetdev); netif_carrier_off(pnetdev); padapter->pid = 0; /* Initial the PID value used for HW PBC.*/ return pnetdev; } static u32 start_drv_threads(struct _adapter *padapter) { padapter->cmdThread = kthread_run(r8712_cmd_thread, padapter, padapter->pnetdev->name); if (IS_ERR(padapter->cmdThread) < 0) return _FAIL; return _SUCCESS; } void r8712_stop_drv_threads(struct _adapter *padapter) { /*Below is to termindate r8712_cmd_thread & event_thread...*/ up(&padapter->cmdpriv.cmd_queue_sema); if (padapter->cmdThread) _down_sema(&padapter->cmdpriv.terminate_cmdthread_sema); padapter->cmdpriv.cmd_seq = 1; } static void start_drv_timers(struct _adapter *padapter) { _set_timer(&padapter->mlmepriv.sitesurveyctrl.sitesurvey_ctrl_timer, 5000); _set_timer(&padapter->mlmepriv.wdg_timer, 2000); } static void stop_drv_timers(struct _adapter *padapter) { _cancel_timer_ex(&padapter->mlmepriv.assoc_timer); _cancel_timer_ex(&padapter->mlmepriv.sitesurveyctrl. sitesurvey_ctrl_timer); _cancel_timer_ex(&padapter->securitypriv.tkip_timer); _cancel_timer_ex(&padapter->mlmepriv.scan_to_timer); _cancel_timer_ex(&padapter->mlmepriv.dhcp_timer); _cancel_timer_ex(&padapter->mlmepriv.wdg_timer); } static u8 init_default_value(struct _adapter *padapter) { u8 ret = _SUCCESS; struct registry_priv *pregistrypriv = &padapter->registrypriv; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct security_priv *psecuritypriv = &padapter->securitypriv; /*xmit_priv*/ pxmitpriv->vcs_setting = pregistrypriv->vrtl_carrier_sense; pxmitpriv->vcs = pregistrypriv->vcs_type; pxmitpriv->vcs_type = pregistrypriv->vcs_type; pxmitpriv->rts_thresh = pregistrypriv->rts_thresh; pxmitpriv->frag_len = pregistrypriv->frag_thresh; /*ht_priv*/ { int i; struct ht_priv *phtpriv = &pmlmepriv->htpriv; phtpriv->ampdu_enable = false;/*set to disabled*/ for (i = 0; i < 16; i++) phtpriv->baddbareq_issued[i] = false; } /*security_priv*/ psecuritypriv->sw_encrypt = pregistrypriv->software_encrypt; psecuritypriv->sw_decrypt = pregistrypriv->software_decrypt; psecuritypriv->binstallGrpkey = _FAIL; /*pwrctrl_priv*/ /*registry_priv*/ r8712_init_registrypriv_dev_network(padapter); r8712_update_registrypriv_dev_network(padapter); /*misc.*/ return ret; } u8 r8712_init_drv_sw(struct _adapter *padapter) { if ((r8712_init_cmd_priv(&padapter->cmdpriv)) == _FAIL) return _FAIL; padapter->cmdpriv.padapter = padapter; if ((r8712_init_evt_priv(&padapter->evtpriv)) == _FAIL) return _FAIL; if (r8712_init_mlme_priv(padapter) == _FAIL) return _FAIL; _r8712_init_xmit_priv(&padapter->xmitpriv, padapter); _r8712_init_recv_priv(&padapter->recvpriv, padapter); memset((unsigned char *)&padapter->securitypriv, 0, sizeof(struct security_priv)); _init_timer(&(padapter->securitypriv.tkip_timer), padapter->pnetdev, r8712_use_tkipkey_handler, padapter); _r8712_init_sta_priv(&padapter->stapriv); padapter->stapriv.padapter = padapter; r8712_init_bcmc_stainfo(padapter); r8712_init_pwrctrl_priv(padapter); sema_init(&(padapter->pwrctrlpriv.pnp_pwr_mgnt_sema), 0); mp871xinit(padapter); if (init_default_value(padapter) != _SUCCESS) return _FAIL; r8712_InitSwLeds(padapter); return _SUCCESS; } u8 r8712_free_drv_sw(struct _adapter *padapter) { struct net_device *pnetdev = (struct net_device *)padapter->pnetdev; r8712_free_cmd_priv(&padapter->cmdpriv); r8712_free_evt_priv(&padapter->evtpriv); r8712_DeInitSwLeds(padapter); r8712_free_mlme_priv(&padapter->mlmepriv); r8712_free_io_queue(padapter); _free_xmit_priv(&padapter->xmitpriv); _r8712_free_sta_priv(&padapter->stapriv); _r8712_free_recv_priv(&padapter->recvpriv); mp871xdeinit(padapter); if (pnetdev) os_free_netdev(pnetdev); return _SUCCESS; } static void enable_video_mode(struct _adapter *padapter, int cbw40_value) { /* bit 8: * 1 -> enable video mode to 96B AP * 0 -> disable video mode to 96B AP * bit 9: * 1 -> enable 40MHz mode * 0 -> disable 40MHz mode * bit 10: * 1 -> enable STBC * 0 -> disable STBC */ u32 intcmd = 0xf4000500; /* enable bit8, bit10*/ if (cbw40_value) { /* if the driver supports the 40M bandwidth, * we can enable the bit 9.*/ intcmd |= 0x200; } r8712_fw_cmd(padapter, intcmd); } static int netdev_open(struct net_device *pnetdev) { struct _adapter *padapter = (struct _adapter *)_netdev_priv(pnetdev); if (padapter->bup == false) { padapter->bDriverStopped = false; padapter->bSurpriseRemoved = false; padapter->bup = true; if (rtl871x_hal_init(padapter) != _SUCCESS) goto netdev_open_error; if (r8712_initmac == NULL) /* Use the mac address stored in the Efuse */ memcpy(pnetdev->dev_addr, padapter->eeprompriv.mac_addr, ETH_ALEN); else { /* We have to inform f/w to use user-supplied MAC * address. */ msleep(200); r8712_setMacAddr_cmd(padapter, (u8 *)pnetdev->dev_addr); /* * The "myid" function will get the wifi mac address * from eeprompriv structure instead of netdev * structure. So, we have to overwrite the mac_addr * stored in the eeprompriv structure. In this case, * the real mac address won't be used anymore. So that, * the eeprompriv.mac_addr should store the mac which * users specify. */ memcpy(padapter->eeprompriv.mac_addr, pnetdev->dev_addr, ETH_ALEN); } if (start_drv_threads(padapter) != _SUCCESS) goto netdev_open_error; if (padapter->dvobjpriv.inirp_init == NULL) goto netdev_open_error; else padapter->dvobjpriv.inirp_init(padapter); r8712_set_ps_mode(padapter, padapter->registrypriv.power_mgnt, padapter->registrypriv.smart_ps); } if (!netif_queue_stopped(pnetdev)) netif_start_queue(pnetdev); else netif_wake_queue(pnetdev); if (video_mode) enable_video_mode(padapter, cbw40_enable); /* start driver mlme relation timer */ start_drv_timers(padapter); padapter->ledpriv.LedControlHandler(padapter, LED_CTL_NO_LINK); return 0; netdev_open_error: padapter->bup = false; netif_carrier_off(pnetdev); netif_stop_queue(pnetdev); return -1; } static int netdev_close(struct net_device *pnetdev) { struct _adapter *padapter = (struct _adapter *) _netdev_priv(pnetdev); /* Close LED*/ padapter->ledpriv.LedControlHandler(padapter, LED_CTL_POWER_OFF); msleep(200); /*s1.*/ if (pnetdev) { if (!netif_queue_stopped(pnetdev)) netif_stop_queue(pnetdev); } /*s2.*/ /*s2-1. issue disassoc_cmd to fw*/ r8712_disassoc_cmd(padapter); /*s2-2. indicate disconnect to os*/ r8712_ind_disconnect(padapter); /*s2-3.*/ r8712_free_assoc_resources(padapter); /*s2-4.*/ r8712_free_network_queue(padapter); /*Stop driver mlme relation timer*/ stop_drv_timers(padapter); return 0; } #include "mlme_osdep.h"
gpl-2.0
boa19861105/android_LP5.0.2_kernel_htc_dlxpul
drivers/net/wireless/rtlwifi/rtl8192de/fw.c
4873
25642
/****************************************************************************** * * Copyright(c) 2009-2012 Realtek Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * wlanfae <wlanfae@realtek.com> * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, * Hsinchu 300, Taiwan. * * Larry Finger <Larry.Finger@lwfinger.net> * *****************************************************************************/ #include "../wifi.h" #include "../pci.h" #include "../base.h" #include "reg.h" #include "def.h" #include "fw.h" #include "sw.h" static bool _rtl92d_is_fw_downloaded(struct rtl_priv *rtlpriv) { return (rtl_read_dword(rtlpriv, REG_MCUFWDL) & MCUFWDL_RDY) ? true : false; } static void _rtl92d_enable_fw_download(struct ieee80211_hw *hw, bool enable) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 tmp; if (enable) { tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1); rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, tmp | 0x04); tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL); rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp | 0x01); tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL + 2); rtl_write_byte(rtlpriv, REG_MCUFWDL + 2, tmp & 0xf7); } else { tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL); rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp & 0xfe); /* Reserved for fw extension. * 0x81[7] is used for mac0 status , * so don't write this reg here * rtl_write_byte(rtlpriv, REG_MCUFWDL + 1, 0x00);*/ } } static void _rtl92d_fw_block_write(struct ieee80211_hw *hw, const u8 *buffer, u32 size) { struct rtl_priv *rtlpriv = rtl_priv(hw); u32 blocksize = sizeof(u32); u8 *bufferptr = (u8 *) buffer; u32 *pu4BytePtr = (u32 *) buffer; u32 i, offset, blockCount, remainSize; blockCount = size / blocksize; remainSize = size % blocksize; for (i = 0; i < blockCount; i++) { offset = i * blocksize; rtl_write_dword(rtlpriv, (FW_8192D_START_ADDRESS + offset), *(pu4BytePtr + i)); } if (remainSize) { offset = blockCount * blocksize; bufferptr += offset; for (i = 0; i < remainSize; i++) { rtl_write_byte(rtlpriv, (FW_8192D_START_ADDRESS + offset + i), *(bufferptr + i)); } } } static void _rtl92d_fw_page_write(struct ieee80211_hw *hw, u32 page, const u8 *buffer, u32 size) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 value8; u8 u8page = (u8) (page & 0x07); value8 = (rtl_read_byte(rtlpriv, REG_MCUFWDL + 2) & 0xF8) | u8page; rtl_write_byte(rtlpriv, (REG_MCUFWDL + 2), value8); _rtl92d_fw_block_write(hw, buffer, size); } static void _rtl92d_fill_dummy(u8 *pfwbuf, u32 *pfwlen) { u32 fwlen = *pfwlen; u8 remain = (u8) (fwlen % 4); remain = (remain == 0) ? 0 : (4 - remain); while (remain > 0) { pfwbuf[fwlen] = 0; fwlen++; remain--; } *pfwlen = fwlen; } static void _rtl92d_write_fw(struct ieee80211_hw *hw, enum version_8192d version, u8 *buffer, u32 size) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); u8 *bufferPtr = (u8 *) buffer; u32 pagenums, remainSize; u32 page, offset; RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "FW size is %d bytes,\n", size); if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192DE) _rtl92d_fill_dummy(bufferPtr, &size); pagenums = size / FW_8192D_PAGE_SIZE; remainSize = size % FW_8192D_PAGE_SIZE; if (pagenums > 8) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Page numbers should not greater then 8\n"); } for (page = 0; page < pagenums; page++) { offset = page * FW_8192D_PAGE_SIZE; _rtl92d_fw_page_write(hw, page, (bufferPtr + offset), FW_8192D_PAGE_SIZE); } if (remainSize) { offset = pagenums * FW_8192D_PAGE_SIZE; page = pagenums; _rtl92d_fw_page_write(hw, page, (bufferPtr + offset), remainSize); } } static int _rtl92d_fw_free_to_go(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); u32 counter = 0; u32 value32; do { value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL); } while ((counter++ < FW_8192D_POLLING_TIMEOUT_COUNT) && (!(value32 & FWDL_ChkSum_rpt))); if (counter >= FW_8192D_POLLING_TIMEOUT_COUNT) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "chksum report faill ! REG_MCUFWDL:0x%08x\n", value32); return -EIO; } RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "Checksum report OK ! REG_MCUFWDL:0x%08x\n", value32); value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL); value32 |= MCUFWDL_RDY; rtl_write_dword(rtlpriv, REG_MCUFWDL, value32); return 0; } void rtl92d_firmware_selfreset(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 u1b_tmp; u8 delay = 100; /* Set (REG_HMETFR + 3) to 0x20 is reset 8051 */ rtl_write_byte(rtlpriv, REG_HMETFR + 3, 0x20); u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1); while (u1b_tmp & BIT(2)) { delay--; if (delay == 0) break; udelay(50); u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1); } RT_ASSERT((delay > 0), "8051 reset failed!\n"); RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, "=====> 8051 reset success (%d)\n", delay); } static int _rtl92d_fw_init(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); u32 counter; RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, "FW already have download\n"); /* polling for FW ready */ counter = 0; do { if (rtlhal->interfaceindex == 0) { if (rtl_read_byte(rtlpriv, FW_MAC0_READY) & MAC0_READY) { RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, "Polling FW ready success!! REG_MCUFWDL: 0x%x\n", rtl_read_byte(rtlpriv, FW_MAC0_READY)); return 0; } udelay(5); } else { if (rtl_read_byte(rtlpriv, FW_MAC1_READY) & MAC1_READY) { RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, "Polling FW ready success!! REG_MCUFWDL: 0x%x\n", rtl_read_byte(rtlpriv, FW_MAC1_READY)); return 0; } udelay(5); } } while (counter++ < POLLING_READY_TIMEOUT_COUNT); if (rtlhal->interfaceindex == 0) { RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, "Polling FW ready fail!! MAC0 FW init not ready: 0x%x\n", rtl_read_byte(rtlpriv, FW_MAC0_READY)); } else { RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, "Polling FW ready fail!! MAC1 FW init not ready: 0x%x\n", rtl_read_byte(rtlpriv, FW_MAC1_READY)); } RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, "Polling FW ready fail!! REG_MCUFWDL:0x%08ul\n", rtl_read_dword(rtlpriv, REG_MCUFWDL)); return -1; } int rtl92d_download_fw(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); u8 *pfwheader; u8 *pfwdata; u32 fwsize; int err; enum version_8192d version = rtlhal->version; u8 value; u32 count; bool fw_downloaded = false, fwdl_in_process = false; unsigned long flags; if (rtlpriv->max_fw_size == 0 || !rtlhal->pfirmware) return 1; fwsize = rtlhal->fwsize; pfwheader = (u8 *) rtlhal->pfirmware; pfwdata = (u8 *) rtlhal->pfirmware; rtlhal->fw_version = (u16) GET_FIRMWARE_HDR_VERSION(pfwheader); rtlhal->fw_subversion = (u16) GET_FIRMWARE_HDR_SUB_VER(pfwheader); RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "FirmwareVersion(%d), FirmwareSubVersion(%d), Signature(%#x)\n", rtlhal->fw_version, rtlhal->fw_subversion, GET_FIRMWARE_HDR_SIGNATURE(pfwheader)); if (IS_FW_HEADER_EXIST(pfwheader)) { RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Shift 32 bytes for FW header!!\n"); pfwdata = pfwdata + 32; fwsize = fwsize - 32; } spin_lock_irqsave(&globalmutex_for_fwdownload, flags); fw_downloaded = _rtl92d_is_fw_downloaded(rtlpriv); if ((rtl_read_byte(rtlpriv, 0x1f) & BIT(5)) == BIT(5)) fwdl_in_process = true; else fwdl_in_process = false; if (fw_downloaded) { spin_unlock_irqrestore(&globalmutex_for_fwdownload, flags); goto exit; } else if (fwdl_in_process) { spin_unlock_irqrestore(&globalmutex_for_fwdownload, flags); for (count = 0; count < 5000; count++) { udelay(500); spin_lock_irqsave(&globalmutex_for_fwdownload, flags); fw_downloaded = _rtl92d_is_fw_downloaded(rtlpriv); if ((rtl_read_byte(rtlpriv, 0x1f) & BIT(5)) == BIT(5)) fwdl_in_process = true; else fwdl_in_process = false; spin_unlock_irqrestore(&globalmutex_for_fwdownload, flags); if (fw_downloaded) goto exit; else if (!fwdl_in_process) break; else RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, "Wait for another mac download fw\n"); } spin_lock_irqsave(&globalmutex_for_fwdownload, flags); value = rtl_read_byte(rtlpriv, 0x1f); value |= BIT(5); rtl_write_byte(rtlpriv, 0x1f, value); spin_unlock_irqrestore(&globalmutex_for_fwdownload, flags); } else { value = rtl_read_byte(rtlpriv, 0x1f); value |= BIT(5); rtl_write_byte(rtlpriv, 0x1f, value); spin_unlock_irqrestore(&globalmutex_for_fwdownload, flags); } /* If 8051 is running in RAM code, driver should * inform Fw to reset by itself, or it will cause * download Fw fail.*/ /* 8051 RAM code */ if (rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)) { rtl92d_firmware_selfreset(hw); rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00); } _rtl92d_enable_fw_download(hw, true); _rtl92d_write_fw(hw, version, pfwdata, fwsize); _rtl92d_enable_fw_download(hw, false); spin_lock_irqsave(&globalmutex_for_fwdownload, flags); err = _rtl92d_fw_free_to_go(hw); /* download fw over,clear 0x1f[5] */ value = rtl_read_byte(rtlpriv, 0x1f); value &= (~BIT(5)); rtl_write_byte(rtlpriv, 0x1f, value); spin_unlock_irqrestore(&globalmutex_for_fwdownload, flags); if (err) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "fw is not ready to run!\n"); goto exit; } else { RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "fw is ready to run!\n"); } exit: err = _rtl92d_fw_init(hw); return err; } static bool _rtl92d_check_fw_read_last_h2c(struct ieee80211_hw *hw, u8 boxnum) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 val_hmetfr; bool result = false; val_hmetfr = rtl_read_byte(rtlpriv, REG_HMETFR); if (((val_hmetfr >> boxnum) & BIT(0)) == 0) result = true; return result; } static void _rtl92d_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id, u32 cmd_len, u8 *cmdbuffer) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); u8 boxnum; u16 box_reg = 0, box_extreg = 0; u8 u1b_tmp; bool isfw_read = false; u8 buf_index = 0; bool bwrite_sucess = false; u8 wait_h2c_limmit = 100; u8 wait_writeh2c_limmit = 100; u8 boxcontent[4], boxextcontent[2]; u32 h2c_waitcounter = 0; unsigned long flag; u8 idx; if (ppsc->rfpwr_state == ERFOFF || ppsc->inactive_pwrstate == ERFOFF) { RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "Return as RF is off!!!\n"); return; } RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "come in\n"); while (true) { spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag); if (rtlhal->h2c_setinprogress) { RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "H2C set in progress! Wait to set..element_id(%d)\n", element_id); while (rtlhal->h2c_setinprogress) { spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag); h2c_waitcounter++; RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "Wait 100 us (%d times)...\n", h2c_waitcounter); udelay(100); if (h2c_waitcounter > 1000) return; spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag); } spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag); } else { rtlhal->h2c_setinprogress = true; spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag); break; } } while (!bwrite_sucess) { wait_writeh2c_limmit--; if (wait_writeh2c_limmit == 0) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write H2C fail because no trigger for FW INT!\n"); break; } boxnum = rtlhal->last_hmeboxnum; switch (boxnum) { case 0: box_reg = REG_HMEBOX_0; box_extreg = REG_HMEBOX_EXT_0; break; case 1: box_reg = REG_HMEBOX_1; box_extreg = REG_HMEBOX_EXT_1; break; case 2: box_reg = REG_HMEBOX_2; box_extreg = REG_HMEBOX_EXT_2; break; case 3: box_reg = REG_HMEBOX_3; box_extreg = REG_HMEBOX_EXT_3; break; default: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "switch case not processed\n"); break; } isfw_read = _rtl92d_check_fw_read_last_h2c(hw, boxnum); while (!isfw_read) { wait_h2c_limmit--; if (wait_h2c_limmit == 0) { RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "Waiting too long for FW read clear HMEBox(%d)!\n", boxnum); break; } udelay(10); isfw_read = _rtl92d_check_fw_read_last_h2c(hw, boxnum); u1b_tmp = rtl_read_byte(rtlpriv, 0x1BF); RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "Waiting for FW read clear HMEBox(%d)!!! 0x1BF = %2x\n", boxnum, u1b_tmp); } if (!isfw_read) { RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "Write H2C register BOX[%d] fail!!!!! Fw do not read.\n", boxnum); break; } memset(boxcontent, 0, sizeof(boxcontent)); memset(boxextcontent, 0, sizeof(boxextcontent)); boxcontent[0] = element_id; RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "Write element_id box_reg(%4x) = %2x\n", box_reg, element_id); switch (cmd_len) { case 1: boxcontent[0] &= ~(BIT(7)); memcpy(boxcontent + 1, cmdbuffer + buf_index, 1); for (idx = 0; idx < 4; idx++) rtl_write_byte(rtlpriv, box_reg + idx, boxcontent[idx]); break; case 2: boxcontent[0] &= ~(BIT(7)); memcpy(boxcontent + 1, cmdbuffer + buf_index, 2); for (idx = 0; idx < 4; idx++) rtl_write_byte(rtlpriv, box_reg + idx, boxcontent[idx]); break; case 3: boxcontent[0] &= ~(BIT(7)); memcpy(boxcontent + 1, cmdbuffer + buf_index, 3); for (idx = 0; idx < 4; idx++) rtl_write_byte(rtlpriv, box_reg + idx, boxcontent[idx]); break; case 4: boxcontent[0] |= (BIT(7)); memcpy(boxextcontent, cmdbuffer + buf_index, 2); memcpy(boxcontent + 1, cmdbuffer + buf_index + 2, 2); for (idx = 0; idx < 2; idx++) rtl_write_byte(rtlpriv, box_extreg + idx, boxextcontent[idx]); for (idx = 0; idx < 4; idx++) rtl_write_byte(rtlpriv, box_reg + idx, boxcontent[idx]); break; case 5: boxcontent[0] |= (BIT(7)); memcpy(boxextcontent, cmdbuffer + buf_index, 2); memcpy(boxcontent + 1, cmdbuffer + buf_index + 2, 3); for (idx = 0; idx < 2; idx++) rtl_write_byte(rtlpriv, box_extreg + idx, boxextcontent[idx]); for (idx = 0; idx < 4; idx++) rtl_write_byte(rtlpriv, box_reg + idx, boxcontent[idx]); break; default: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "switch case not processed\n"); break; } bwrite_sucess = true; rtlhal->last_hmeboxnum = boxnum + 1; if (rtlhal->last_hmeboxnum == 4) rtlhal->last_hmeboxnum = 0; RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "pHalData->last_hmeboxnum = %d\n", rtlhal->last_hmeboxnum); } spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag); rtlhal->h2c_setinprogress = false; spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag); RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "go out\n"); } void rtl92d_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id, u32 cmd_len, u8 *cmdbuffer) { u32 tmp_cmdbuf[2]; memset(tmp_cmdbuf, 0, 8); memcpy(tmp_cmdbuf, cmdbuffer, cmd_len); _rtl92d_fill_h2c_command(hw, element_id, cmd_len, (u8 *)&tmp_cmdbuf); return; } void rtl92d_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 u1_h2c_set_pwrmode[3] = { 0 }; struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "FW LPS mode = %d\n", mode); SET_H2CCMD_PWRMODE_PARM_MODE(u1_h2c_set_pwrmode, mode); SET_H2CCMD_PWRMODE_PARM_SMART_PS(u1_h2c_set_pwrmode, 1); SET_H2CCMD_PWRMODE_PARM_BCN_PASS_TIME(u1_h2c_set_pwrmode, ppsc->reg_max_lps_awakeintvl); RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG, "rtl92d_set_fw_rsvdpagepkt(): u1_h2c_set_pwrmode", u1_h2c_set_pwrmode, 3); rtl92d_fill_h2c_cmd(hw, H2C_SETPWRMODE, 3, u1_h2c_set_pwrmode); } static bool _rtl92d_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl8192_tx_ring *ring; struct rtl_tx_desc *pdesc; u8 idx = 0; unsigned long flags; struct sk_buff *pskb; ring = &rtlpci->tx_ring[BEACON_QUEUE]; pskb = __skb_dequeue(&ring->queue); if (pskb) kfree_skb(pskb); spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags); pdesc = &ring->desc[idx]; /* discard output from call below */ rtlpriv->cfg->ops->get_desc((u8 *) pdesc, true, HW_DESC_OWN); rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *) pdesc, 1, 1, skb); __skb_queue_tail(&ring->queue, skb); spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags); rtlpriv->cfg->ops->tx_polling(hw, BEACON_QUEUE); return true; } #define BEACON_PG 0 /*->1 */ #define PSPOLL_PG 2 #define NULL_PG 3 #define PROBERSP_PG 4 /*->5 */ #define TOTAL_RESERVED_PKT_LEN 768 static u8 reserved_page_packet[TOTAL_RESERVED_PKT_LEN] = { /* page 0 beacon */ 0x80, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0xE0, 0x4C, 0x76, 0x00, 0x42, 0x00, 0x40, 0x10, 0x10, 0x00, 0x03, 0x50, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x04, 0x00, 0x0C, 0x6C, 0x69, 0x6E, 0x6B, 0x73, 0x79, 0x73, 0x5F, 0x77, 0x6C, 0x61, 0x6E, 0x01, 0x04, 0x82, 0x84, 0x8B, 0x96, 0x03, 0x01, 0x01, 0x06, 0x02, 0x00, 0x00, 0x2A, 0x01, 0x00, 0x32, 0x08, 0x24, 0x30, 0x48, 0x6C, 0x0C, 0x12, 0x18, 0x60, 0x2D, 0x1A, 0x6C, 0x18, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3D, 0x00, 0xDD, 0x06, 0x00, 0xE0, 0x4C, 0x02, 0x01, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* page 1 beacon */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x20, 0x8C, 0x00, 0x12, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* page 2 ps-poll */ 0xA4, 0x10, 0x01, 0xC0, 0x00, 0x40, 0x10, 0x10, 0x00, 0x03, 0x00, 0xE0, 0x4C, 0x76, 0x00, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x20, 0x8C, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x80, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* page 3 null */ 0x48, 0x01, 0x00, 0x00, 0x00, 0x40, 0x10, 0x10, 0x00, 0x03, 0x00, 0xE0, 0x4C, 0x76, 0x00, 0x42, 0x00, 0x40, 0x10, 0x10, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x72, 0x00, 0x20, 0x8C, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x80, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* page 4 probe_resp */ 0x50, 0x00, 0x00, 0x00, 0x00, 0x40, 0x10, 0x10, 0x00, 0x03, 0x00, 0xE0, 0x4C, 0x76, 0x00, 0x42, 0x00, 0x40, 0x10, 0x10, 0x00, 0x03, 0x00, 0x00, 0x9E, 0x46, 0x15, 0x32, 0x27, 0xF2, 0x2D, 0x00, 0x64, 0x00, 0x00, 0x04, 0x00, 0x0C, 0x6C, 0x69, 0x6E, 0x6B, 0x73, 0x79, 0x73, 0x5F, 0x77, 0x6C, 0x61, 0x6E, 0x01, 0x04, 0x82, 0x84, 0x8B, 0x96, 0x03, 0x01, 0x01, 0x06, 0x02, 0x00, 0x00, 0x2A, 0x01, 0x00, 0x32, 0x08, 0x24, 0x30, 0x48, 0x6C, 0x0C, 0x12, 0x18, 0x60, 0x2D, 0x1A, 0x6C, 0x18, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3D, 0x00, 0xDD, 0x06, 0x00, 0xE0, 0x4C, 0x02, 0x01, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* page 5 probe_resp */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, }; void rtl92d_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct sk_buff *skb = NULL; u32 totalpacketlen; bool rtstatus; u8 u1RsvdPageLoc[3] = { 0 }; bool dlok = false; u8 *beacon; u8 *p_pspoll; u8 *nullfunc; u8 *p_probersp; /*--------------------------------------------------------- (1) beacon ---------------------------------------------------------*/ beacon = &reserved_page_packet[BEACON_PG * 128]; SET_80211_HDR_ADDRESS2(beacon, mac->mac_addr); SET_80211_HDR_ADDRESS3(beacon, mac->bssid); /*------------------------------------------------------- (2) ps-poll --------------------------------------------------------*/ p_pspoll = &reserved_page_packet[PSPOLL_PG * 128]; SET_80211_PS_POLL_AID(p_pspoll, (mac->assoc_id | 0xc000)); SET_80211_PS_POLL_BSSID(p_pspoll, mac->bssid); SET_80211_PS_POLL_TA(p_pspoll, mac->mac_addr); SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(u1RsvdPageLoc, PSPOLL_PG); /*-------------------------------------------------------- (3) null data ---------------------------------------------------------*/ nullfunc = &reserved_page_packet[NULL_PG * 128]; SET_80211_HDR_ADDRESS1(nullfunc, mac->bssid); SET_80211_HDR_ADDRESS2(nullfunc, mac->mac_addr); SET_80211_HDR_ADDRESS3(nullfunc, mac->bssid); SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(u1RsvdPageLoc, NULL_PG); /*--------------------------------------------------------- (4) probe response ----------------------------------------------------------*/ p_probersp = &reserved_page_packet[PROBERSP_PG * 128]; SET_80211_HDR_ADDRESS1(p_probersp, mac->bssid); SET_80211_HDR_ADDRESS2(p_probersp, mac->mac_addr); SET_80211_HDR_ADDRESS3(p_probersp, mac->bssid); SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(u1RsvdPageLoc, PROBERSP_PG); totalpacketlen = TOTAL_RESERVED_PKT_LEN; RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD, "rtl92d_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL", &reserved_page_packet[0], totalpacketlen); RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG, "rtl92d_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL", u1RsvdPageLoc, 3); skb = dev_alloc_skb(totalpacketlen); if (!skb) { dlok = false; } else { memcpy((u8 *) skb_put(skb, totalpacketlen), &reserved_page_packet, totalpacketlen); rtstatus = _rtl92d_cmd_send_packet(hw, skb); if (rtstatus) dlok = true; } if (dlok) { RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "Set RSVD page location to Fw\n"); RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG, "H2C_RSVDPAGE", u1RsvdPageLoc, 3); rtl92d_fill_h2c_cmd(hw, H2C_RSVDPAGE, sizeof(u1RsvdPageLoc), u1RsvdPageLoc); } else RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "Set RSVD page location to Fw FAIL!!!!!!\n"); } void rtl92d_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus) { u8 u1_joinbssrpt_parm[1] = {0}; SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(u1_joinbssrpt_parm, mstatus); rtl92d_fill_h2c_cmd(hw, H2C_JOINBSSRPT, 1, u1_joinbssrpt_parm); }
gpl-2.0
SaberMod/android_kernel_lge_hammerhead-sts
drivers/staging/sbe-2t3e3/module.c
8201
5388
/* * SBE 2T3E3 synchronous serial card driver for Linux * * Copyright (C) 2009-2010 Krzysztof Halasa <khc@pm.waw.pl> * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License * as published by the Free Software Foundation. * * This code is based on a driver written by SBE Inc. */ #include <linux/module.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/pci.h> #include <linux/hdlc.h> #include <linux/if_arp.h> #include <linux/interrupt.h> #include "2t3e3.h" static void check_leds(unsigned long arg) { struct card *card = (struct card *)arg; struct channel *channel0 = &card->channels[0]; static int blinker; update_led(channel0, ++blinker); if (has_two_ports(channel0->pdev)) update_led(&card->channels[1], blinker); card->timer.expires = jiffies + HZ / 10; add_timer(&card->timer); } static void t3e3_remove_channel(struct channel *channel) { struct pci_dev *pdev = channel->pdev; struct net_device *dev = channel->dev; /* system hangs if board asserts irq while module is unloaded */ cpld_stop_intr(channel); free_irq(dev->irq, dev); dc_drop_descriptor_list(channel); unregister_hdlc_device(dev); free_netdev(dev); pci_release_regions(pdev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); } static int __devinit t3e3_init_channel(struct channel *channel, struct pci_dev *pdev, struct card *card) { struct net_device *dev; unsigned int val; int err; err = pci_enable_device(pdev); if (err) return err; err = pci_request_regions(pdev, "SBE 2T3E3"); if (err) goto disable; dev = alloc_hdlcdev(channel); if (!dev) { printk(KERN_ERR "SBE 2T3E3" ": Out of memory\n"); goto free_regions; } t3e3_sc_init(channel); dev_to_priv(dev) = channel; channel->pdev = pdev; channel->dev = dev; channel->card = card; channel->addr = pci_resource_start(pdev, 0); if (pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P1) channel->h.slot = 1; else channel->h.slot = 0; if (setup_device(dev, channel)) goto free_regions; pci_read_config_dword(channel->pdev, 0x40, &val); /* mask sleep mode */ pci_write_config_dword(channel->pdev, 0x40, val & 0x3FFFFFFF); pci_read_config_byte(channel->pdev, PCI_CACHE_LINE_SIZE, &channel->h.cache_size); pci_read_config_dword(channel->pdev, PCI_COMMAND, &channel->h.command); t3e3_init(channel); if (request_irq(dev->irq, &t3e3_intr, IRQF_SHARED, dev->name, dev)) { printk(KERN_WARNING "%s: could not get irq: %d\n", dev->name, dev->irq); goto free_regions; } pci_set_drvdata(pdev, channel); return 0; free_regions: pci_release_regions(pdev); disable: pci_disable_device(pdev); return err; } static void __devexit t3e3_remove_card(struct pci_dev *pdev) { struct channel *channel0 = pci_get_drvdata(pdev); struct card *card = channel0->card; del_timer(&card->timer); if (has_two_ports(channel0->pdev)) { t3e3_remove_channel(&card->channels[1]); pci_dev_put(card->channels[1].pdev); } t3e3_remove_channel(channel0); kfree(card); } static int __devinit t3e3_init_card(struct pci_dev *pdev, const struct pci_device_id *ent) { /* pdev points to channel #0 */ struct pci_dev *pdev1 = NULL; struct card *card; int channels = 1, err; if (has_two_ports(pdev)) { while ((pdev1 = pci_get_subsys(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21142, PCI_VENDOR_ID_SBE, PCI_SUBDEVICE_ID_SBE_2T3E3_P1, pdev1))) if (pdev1->bus == pdev->bus && pdev1->devfn == pdev->devfn + 8 /* next device on the same bus */) break; /* found the second channel */ if (!pdev1) { printk(KERN_ERR "SBE 2T3E3" ": Can't find the second channel\n"); return -EFAULT; } channels = 2; /* holds the reference for pdev1 */ } card = kzalloc(sizeof(struct card) + channels * sizeof(struct channel), GFP_KERNEL); if (!card) { printk(KERN_ERR "SBE 2T3E3" ": Out of memory\n"); return -ENOBUFS; } spin_lock_init(&card->bootrom_lock); card->bootrom_addr = pci_resource_start(pdev, 0); err = t3e3_init_channel(&card->channels[0], pdev, card); if (err) goto free_card; if (channels == 2) { err = t3e3_init_channel(&card->channels[1], pdev1, card); if (err) { t3e3_remove_channel(&card->channels[0]); goto free_card; } } /* start LED timer */ init_timer(&card->timer); card->timer.function = check_leds; card->timer.expires = jiffies + HZ / 10; card->timer.data = (unsigned long)card; add_timer(&card->timer); return 0; free_card: kfree(card); return err; } static struct pci_device_id t3e3_pci_tbl[] __devinitdata = { { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21142, PCI_VENDOR_ID_SBE, PCI_SUBDEVICE_ID_SBE_T3E3, 0, 0, 0 }, { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21142, PCI_VENDOR_ID_SBE, PCI_SUBDEVICE_ID_SBE_2T3E3_P0, 0, 0, 0 }, /* channel 1 will be initialized after channel 0 */ { 0, } }; static struct pci_driver t3e3_pci_driver = { .name = "SBE T3E3", .id_table = t3e3_pci_tbl, .probe = t3e3_init_card, .remove = t3e3_remove_card, }; static int __init t3e3_init_module(void) { return pci_register_driver(&t3e3_pci_driver); } static void __exit t3e3_cleanup_module(void) { pci_unregister_driver(&t3e3_pci_driver); } module_init(t3e3_init_module); module_exit(t3e3_cleanup_module); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, t3e3_pci_tbl);
gpl-2.0
TamsuiCM11/android_kernel_sony_msm7x27a
drivers/char/tpm/tpm_atmel.c
9737
5943
/* * Copyright (C) 2004 IBM Corporation * * Authors: * Leendert van Doorn <leendert@watson.ibm.com> * Dave Safford <safford@watson.ibm.com> * Reiner Sailer <sailer@watson.ibm.com> * Kylene Hall <kjhall@us.ibm.com> * * Maintained by: <tpmdd-devel@lists.sourceforge.net> * * Device driver for TCG/TCPA TPM (trusted platform module). * Specifications at www.trustedcomputinggroup.org * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2 of the * License. * */ #include "tpm.h" #include "tpm_atmel.h" /* write status bits */ enum tpm_atmel_write_status { ATML_STATUS_ABORT = 0x01, ATML_STATUS_LASTBYTE = 0x04 }; /* read status bits */ enum tpm_atmel_read_status { ATML_STATUS_BUSY = 0x01, ATML_STATUS_DATA_AVAIL = 0x02, ATML_STATUS_REWRITE = 0x04, ATML_STATUS_READY = 0x08 }; static int tpm_atml_recv(struct tpm_chip *chip, u8 *buf, size_t count) { u8 status, *hdr = buf; u32 size; int i; __be32 *native_size; /* start reading header */ if (count < 6) return -EIO; for (i = 0; i < 6; i++) { status = ioread8(chip->vendor.iobase + 1); if ((status & ATML_STATUS_DATA_AVAIL) == 0) { dev_err(chip->dev, "error reading header\n"); return -EIO; } *buf++ = ioread8(chip->vendor.iobase); } /* size of the data received */ native_size = (__force __be32 *) (hdr + 2); size = be32_to_cpu(*native_size); if (count < size) { dev_err(chip->dev, "Recv size(%d) less than available space\n", size); for (; i < size; i++) { /* clear the waiting data anyway */ status = ioread8(chip->vendor.iobase + 1); if ((status & ATML_STATUS_DATA_AVAIL) == 0) { dev_err(chip->dev, "error reading data\n"); return -EIO; } } return -EIO; } /* read all the data available */ for (; i < size; i++) { status = ioread8(chip->vendor.iobase + 1); if ((status & ATML_STATUS_DATA_AVAIL) == 0) { dev_err(chip->dev, "error reading data\n"); return -EIO; } *buf++ = ioread8(chip->vendor.iobase); } /* make sure data available is gone */ status = ioread8(chip->vendor.iobase + 1); if (status & ATML_STATUS_DATA_AVAIL) { dev_err(chip->dev, "data available is stuck\n"); return -EIO; } return size; } static int tpm_atml_send(struct tpm_chip *chip, u8 *buf, size_t count) { int i; dev_dbg(chip->dev, "tpm_atml_send:\n"); for (i = 0; i < count; i++) { dev_dbg(chip->dev, "%d 0x%x(%d)\n", i, buf[i], buf[i]); iowrite8(buf[i], chip->vendor.iobase); } return count; } static void tpm_atml_cancel(struct tpm_chip *chip) { iowrite8(ATML_STATUS_ABORT, chip->vendor.iobase + 1); } static u8 tpm_atml_status(struct tpm_chip *chip) { return ioread8(chip->vendor.iobase + 1); } static const struct file_operations atmel_ops = { .owner = THIS_MODULE, .llseek = no_llseek, .open = tpm_open, .read = tpm_read, .write = tpm_write, .release = tpm_release, }; static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL); static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL); static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL); static DEVICE_ATTR(cancel, S_IWUSR |S_IWGRP, NULL, tpm_store_cancel); static struct attribute* atmel_attrs[] = { &dev_attr_pubek.attr, &dev_attr_pcrs.attr, &dev_attr_caps.attr, &dev_attr_cancel.attr, NULL, }; static struct attribute_group atmel_attr_grp = { .attrs = atmel_attrs }; static const struct tpm_vendor_specific tpm_atmel = { .recv = tpm_atml_recv, .send = tpm_atml_send, .cancel = tpm_atml_cancel, .status = tpm_atml_status, .req_complete_mask = ATML_STATUS_BUSY | ATML_STATUS_DATA_AVAIL, .req_complete_val = ATML_STATUS_DATA_AVAIL, .req_canceled = ATML_STATUS_READY, .attr_group = &atmel_attr_grp, .miscdev = { .fops = &atmel_ops, }, }; static struct platform_device *pdev; static void atml_plat_remove(void) { struct tpm_chip *chip = dev_get_drvdata(&pdev->dev); if (chip) { if (chip->vendor.have_region) atmel_release_region(chip->vendor.base, chip->vendor.region_size); atmel_put_base_addr(chip->vendor.iobase); tpm_remove_hardware(chip->dev); platform_device_unregister(pdev); } } static int tpm_atml_suspend(struct platform_device *dev, pm_message_t msg) { return tpm_pm_suspend(&dev->dev, msg); } static int tpm_atml_resume(struct platform_device *dev) { return tpm_pm_resume(&dev->dev); } static struct platform_driver atml_drv = { .driver = { .name = "tpm_atmel", .owner = THIS_MODULE, }, .suspend = tpm_atml_suspend, .resume = tpm_atml_resume, }; static int __init init_atmel(void) { int rc = 0; void __iomem *iobase = NULL; int have_region, region_size; unsigned long base; struct tpm_chip *chip; rc = platform_driver_register(&atml_drv); if (rc) return rc; if ((iobase = atmel_get_base_addr(&base, &region_size)) == NULL) { rc = -ENODEV; goto err_unreg_drv; } have_region = (atmel_request_region (tpm_atmel.base, region_size, "tpm_atmel0") == NULL) ? 0 : 1; pdev = platform_device_register_simple("tpm_atmel", -1, NULL, 0); if (IS_ERR(pdev)) { rc = PTR_ERR(pdev); goto err_rel_reg; } if (!(chip = tpm_register_hardware(&pdev->dev, &tpm_atmel))) { rc = -ENODEV; goto err_unreg_dev; } chip->vendor.iobase = iobase; chip->vendor.base = base; chip->vendor.have_region = have_region; chip->vendor.region_size = region_size; return 0; err_unreg_dev: platform_device_unregister(pdev); err_rel_reg: atmel_put_base_addr(iobase); if (have_region) atmel_release_region(base, region_size); err_unreg_drv: platform_driver_unregister(&atml_drv); return rc; } static void __exit cleanup_atmel(void) { platform_driver_unregister(&atml_drv); atml_plat_remove(); } module_init(init_atmel); module_exit(cleanup_atmel); MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)"); MODULE_DESCRIPTION("TPM Driver"); MODULE_VERSION("2.0"); MODULE_LICENSE("GPL");
gpl-2.0
galaxyfreak/18.6-eagle-stock
drivers/uwb/drp-ie.c
11529
9767
/* * UWB DRP IE management. * * Copyright (C) 2005-2006 Intel Corporation * Copyright (C) 2008 Cambridge Silicon Radio Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/kernel.h> #include <linux/random.h> #include <linux/slab.h> #include <linux/uwb.h> #include "uwb-internal.h" /* * Return the reason code for a reservations's DRP IE. */ int uwb_rsv_reason_code(struct uwb_rsv *rsv) { static const int reason_codes[] = { [UWB_RSV_STATE_O_INITIATED] = UWB_DRP_REASON_ACCEPTED, [UWB_RSV_STATE_O_PENDING] = UWB_DRP_REASON_ACCEPTED, [UWB_RSV_STATE_O_MODIFIED] = UWB_DRP_REASON_MODIFIED, [UWB_RSV_STATE_O_ESTABLISHED] = UWB_DRP_REASON_ACCEPTED, [UWB_RSV_STATE_O_TO_BE_MOVED] = UWB_DRP_REASON_ACCEPTED, [UWB_RSV_STATE_O_MOVE_COMBINING] = UWB_DRP_REASON_MODIFIED, [UWB_RSV_STATE_O_MOVE_REDUCING] = UWB_DRP_REASON_MODIFIED, [UWB_RSV_STATE_O_MOVE_EXPANDING] = UWB_DRP_REASON_ACCEPTED, [UWB_RSV_STATE_T_ACCEPTED] = UWB_DRP_REASON_ACCEPTED, [UWB_RSV_STATE_T_CONFLICT] = UWB_DRP_REASON_CONFLICT, [UWB_RSV_STATE_T_PENDING] = UWB_DRP_REASON_PENDING, [UWB_RSV_STATE_T_DENIED] = UWB_DRP_REASON_DENIED, [UWB_RSV_STATE_T_RESIZED] = UWB_DRP_REASON_ACCEPTED, [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = UWB_DRP_REASON_ACCEPTED, [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = UWB_DRP_REASON_CONFLICT, [UWB_RSV_STATE_T_EXPANDING_PENDING] = UWB_DRP_REASON_PENDING, [UWB_RSV_STATE_T_EXPANDING_DENIED] = UWB_DRP_REASON_DENIED, }; return reason_codes[rsv->state]; } /* * Return the reason code for a reservations's companion DRP IE . */ int uwb_rsv_companion_reason_code(struct uwb_rsv *rsv) { static const int companion_reason_codes[] = { [UWB_RSV_STATE_O_MOVE_EXPANDING] = UWB_DRP_REASON_ACCEPTED, [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = UWB_DRP_REASON_ACCEPTED, [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = UWB_DRP_REASON_CONFLICT, [UWB_RSV_STATE_T_EXPANDING_PENDING] = UWB_DRP_REASON_PENDING, [UWB_RSV_STATE_T_EXPANDING_DENIED] = UWB_DRP_REASON_DENIED, }; return companion_reason_codes[rsv->state]; } /* * Return the status bit for a reservations's DRP IE. */ int uwb_rsv_status(struct uwb_rsv *rsv) { static const int statuses[] = { [UWB_RSV_STATE_O_INITIATED] = 0, [UWB_RSV_STATE_O_PENDING] = 0, [UWB_RSV_STATE_O_MODIFIED] = 1, [UWB_RSV_STATE_O_ESTABLISHED] = 1, [UWB_RSV_STATE_O_TO_BE_MOVED] = 0, [UWB_RSV_STATE_O_MOVE_COMBINING] = 1, [UWB_RSV_STATE_O_MOVE_REDUCING] = 1, [UWB_RSV_STATE_O_MOVE_EXPANDING] = 1, [UWB_RSV_STATE_T_ACCEPTED] = 1, [UWB_RSV_STATE_T_CONFLICT] = 0, [UWB_RSV_STATE_T_PENDING] = 0, [UWB_RSV_STATE_T_DENIED] = 0, [UWB_RSV_STATE_T_RESIZED] = 1, [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = 1, [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = 1, [UWB_RSV_STATE_T_EXPANDING_PENDING] = 1, [UWB_RSV_STATE_T_EXPANDING_DENIED] = 1, }; return statuses[rsv->state]; } /* * Return the status bit for a reservations's companion DRP IE . */ int uwb_rsv_companion_status(struct uwb_rsv *rsv) { static const int companion_statuses[] = { [UWB_RSV_STATE_O_MOVE_EXPANDING] = 0, [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = 1, [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = 0, [UWB_RSV_STATE_T_EXPANDING_PENDING] = 0, [UWB_RSV_STATE_T_EXPANDING_DENIED] = 0, }; return companion_statuses[rsv->state]; } /* * Allocate a DRP IE. * * To save having to free/allocate a DRP IE when its MAS changes, * enough memory is allocated for the maxiumum number of DRP * allocation fields. This gives an overhead per reservation of up to * (UWB_NUM_ZONES - 1) * 4 = 60 octets. */ static struct uwb_ie_drp *uwb_drp_ie_alloc(void) { struct uwb_ie_drp *drp_ie; drp_ie = kzalloc(sizeof(struct uwb_ie_drp) + UWB_NUM_ZONES * sizeof(struct uwb_drp_alloc), GFP_KERNEL); if (drp_ie) { drp_ie->hdr.element_id = UWB_IE_DRP; } return drp_ie; } /* * Fill a DRP IE's allocation fields from a MAS bitmap. */ static void uwb_drp_ie_from_bm(struct uwb_ie_drp *drp_ie, struct uwb_mas_bm *mas) { int z, i, num_fields = 0, next = 0; struct uwb_drp_alloc *zones; __le16 current_bmp; DECLARE_BITMAP(tmp_bmp, UWB_NUM_MAS); DECLARE_BITMAP(tmp_mas_bm, UWB_MAS_PER_ZONE); zones = drp_ie->allocs; bitmap_copy(tmp_bmp, mas->bm, UWB_NUM_MAS); /* Determine unique MAS bitmaps in zones from bitmap. */ for (z = 0; z < UWB_NUM_ZONES; z++) { bitmap_copy(tmp_mas_bm, tmp_bmp, UWB_MAS_PER_ZONE); if (bitmap_weight(tmp_mas_bm, UWB_MAS_PER_ZONE) > 0) { bool found = false; current_bmp = (__le16) *tmp_mas_bm; for (i = 0; i < next; i++) { if (current_bmp == zones[i].mas_bm) { zones[i].zone_bm |= 1 << z; found = true; break; } } if (!found) { num_fields++; zones[next].zone_bm = 1 << z; zones[next].mas_bm = current_bmp; next++; } } bitmap_shift_right(tmp_bmp, tmp_bmp, UWB_MAS_PER_ZONE, UWB_NUM_MAS); } /* Store in format ready for transmission (le16). */ for (i = 0; i < num_fields; i++) { drp_ie->allocs[i].zone_bm = cpu_to_le16(zones[i].zone_bm); drp_ie->allocs[i].mas_bm = cpu_to_le16(zones[i].mas_bm); } drp_ie->hdr.length = sizeof(struct uwb_ie_drp) - sizeof(struct uwb_ie_hdr) + num_fields * sizeof(struct uwb_drp_alloc); } /** * uwb_drp_ie_update - update a reservation's DRP IE * @rsv: the reservation */ int uwb_drp_ie_update(struct uwb_rsv *rsv) { struct uwb_ie_drp *drp_ie; struct uwb_rsv_move *mv; int unsafe; if (rsv->state == UWB_RSV_STATE_NONE) { kfree(rsv->drp_ie); rsv->drp_ie = NULL; return 0; } unsafe = rsv->mas.unsafe ? 1 : 0; if (rsv->drp_ie == NULL) { rsv->drp_ie = uwb_drp_ie_alloc(); if (rsv->drp_ie == NULL) return -ENOMEM; } drp_ie = rsv->drp_ie; uwb_ie_drp_set_unsafe(drp_ie, unsafe); uwb_ie_drp_set_tiebreaker(drp_ie, rsv->tiebreaker); uwb_ie_drp_set_owner(drp_ie, uwb_rsv_is_owner(rsv)); uwb_ie_drp_set_status(drp_ie, uwb_rsv_status(rsv)); uwb_ie_drp_set_reason_code(drp_ie, uwb_rsv_reason_code(rsv)); uwb_ie_drp_set_stream_index(drp_ie, rsv->stream); uwb_ie_drp_set_type(drp_ie, rsv->type); if (uwb_rsv_is_owner(rsv)) { switch (rsv->target.type) { case UWB_RSV_TARGET_DEV: drp_ie->dev_addr = rsv->target.dev->dev_addr; break; case UWB_RSV_TARGET_DEVADDR: drp_ie->dev_addr = rsv->target.devaddr; break; } } else drp_ie->dev_addr = rsv->owner->dev_addr; uwb_drp_ie_from_bm(drp_ie, &rsv->mas); if (uwb_rsv_has_two_drp_ies(rsv)) { mv = &rsv->mv; if (mv->companion_drp_ie == NULL) { mv->companion_drp_ie = uwb_drp_ie_alloc(); if (mv->companion_drp_ie == NULL) return -ENOMEM; } drp_ie = mv->companion_drp_ie; /* keep all the same configuration of the main drp_ie */ memcpy(drp_ie, rsv->drp_ie, sizeof(struct uwb_ie_drp)); /* FIXME: handle properly the unsafe bit */ uwb_ie_drp_set_unsafe(drp_ie, 1); uwb_ie_drp_set_status(drp_ie, uwb_rsv_companion_status(rsv)); uwb_ie_drp_set_reason_code(drp_ie, uwb_rsv_companion_reason_code(rsv)); uwb_drp_ie_from_bm(drp_ie, &mv->companion_mas); } rsv->ie_valid = true; return 0; } /* * Set MAS bits from given MAS bitmap in a single zone of large bitmap. * * We are given a zone id and the MAS bitmap of bits that need to be set in * this zone. Note that this zone may already have bits set and this only * adds settings - we cannot simply assign the MAS bitmap contents to the * zone contents. We iterate over the the bits (MAS) in the zone and set the * bits that are set in the given MAS bitmap. */ static void uwb_drp_ie_single_zone_to_bm(struct uwb_mas_bm *bm, u8 zone, u16 mas_bm) { int mas; u16 mas_mask; for (mas = 0; mas < UWB_MAS_PER_ZONE; mas++) { mas_mask = 1 << mas; if (mas_bm & mas_mask) set_bit(zone * UWB_NUM_ZONES + mas, bm->bm); } } /** * uwb_drp_ie_zones_to_bm - convert DRP allocation fields to a bitmap * @mas: MAS bitmap that will be populated to correspond to the * allocation fields in the DRP IE * @drp_ie: the DRP IE that contains the allocation fields. * * The input format is an array of MAS allocation fields (16 bit Zone * bitmap, 16 bit MAS bitmap) as described in [ECMA-368] section * 16.8.6. The output is a full 256 bit MAS bitmap. * * We go over all the allocation fields, for each allocation field we * know which zones are impacted. We iterate over all the zones * impacted and call a function that will set the correct MAS bits in * each zone. */ void uwb_drp_ie_to_bm(struct uwb_mas_bm *bm, const struct uwb_ie_drp *drp_ie) { int numallocs = (drp_ie->hdr.length - 4) / 4; const struct uwb_drp_alloc *alloc; int cnt; u16 zone_bm, mas_bm; u8 zone; u16 zone_mask; bitmap_zero(bm->bm, UWB_NUM_MAS); for (cnt = 0; cnt < numallocs; cnt++) { alloc = &drp_ie->allocs[cnt]; zone_bm = le16_to_cpu(alloc->zone_bm); mas_bm = le16_to_cpu(alloc->mas_bm); for (zone = 0; zone < UWB_NUM_ZONES; zone++) { zone_mask = 1 << zone; if (zone_bm & zone_mask) uwb_drp_ie_single_zone_to_bm(bm, zone, mas_bm); } } }
gpl-2.0
vlcchina/vlc-player-dev
src/text/filesystem.c
10
5984
/***************************************************************************** * filesystem.c: Common file system helpers ***************************************************************************** * Copyright (C) 2005-2006 VLC authors and VideoLAN * Copyright © 2005-2008 Rémi Denis-Courmont * * Authors: Rémi Denis-Courmont <rem # videolan.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation; either version 2.1 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA. *****************************************************************************/ /***************************************************************************** * Preamble *****************************************************************************/ #ifdef HAVE_CONFIG_H # include "config.h" #endif #include <vlc_common.h> #include <vlc_fs.h> #include <vlc_rand.h> #include <assert.h> #include <stdio.h> #include <errno.h> #include <sys/types.h> #include <fcntl.h> #ifdef HAVE_UNISTD_H # include <unistd.h> #endif /** * Opens a FILE pointer. * @param filename file path, using UTF-8 encoding * @param mode fopen file open mode * @return NULL on error, an open FILE pointer on success. */ FILE *vlc_fopen (const char *filename, const char *mode) { int rwflags = 0, oflags = 0; for (const char *ptr = mode; *ptr; ptr++) { switch (*ptr) { case 'r': rwflags = O_RDONLY; break; case 'a': rwflags = O_WRONLY; oflags |= O_CREAT | O_APPEND; break; case 'w': rwflags = O_WRONLY; oflags |= O_CREAT | O_TRUNC; break; case '+': rwflags = O_RDWR; break; #ifdef O_BINARY case 'b': oflags = (oflags & ~O_TEXT) | O_BINARY; break; case 't': oflags = (oflags & ~O_BINARY) | O_TEXT; break; #endif } } int fd = vlc_open (filename, rwflags | oflags, 0666); if (fd == -1) return NULL; FILE *stream = fdopen (fd, mode); if (stream == NULL) close (fd); return stream; } static int dummy_select( const char *str ) { (void)str; return 1; } /** * Does the same as vlc_scandir(), but takes an open directory pointer * instead of a directory path. */ int vlc_loaddir( DIR *dir, char ***namelist, int (*select)( const char * ), int (*compar)( const char **, const char ** ) ) { assert (dir); if (select == NULL) select = dummy_select; char **tab = NULL; unsigned num = 0; rewinddir (dir); for (unsigned size = 0;;) { errno = 0; char *entry = vlc_readdir (dir); if (entry == NULL) { if (errno) goto error; break; } if (!select (entry)) { free (entry); continue; } if (num >= size) { size = size ? (2 * size) : 16; char **newtab = realloc (tab, sizeof (*tab) * (size)); if (unlikely(newtab == NULL)) { free (entry); goto error; } tab = newtab; } tab[num++] = entry; } if (compar != NULL) qsort (tab, num, sizeof (*tab), (int (*)( const void *, const void *))compar); *namelist = tab; return num; error: for (unsigned i = 0; i < num; i++) free (tab[i]); free (tab); return -1; } /** * Selects file entries from a directory, as GNU C scandir(). * * @param dirname UTF-8 diretory path * @param pointer [OUT] pointer set, on successful completion, to the address * of a table of UTF-8 filenames. All filenames must be freed with free(). * The table itself must be freed with free() as well. * * @return How many file names were selected (possibly 0), * or -1 in case of error. */ int vlc_scandir( const char *dirname, char ***namelist, int (*select)( const char * ), int (*compar)( const char **, const char ** ) ) { DIR *dir = vlc_opendir (dirname); int val = -1; if (dir != NULL) { val = vlc_loaddir (dir, namelist, select, compar); closedir (dir); } return val; } int vlc_mkstemp( char *template ) { static const char digits[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"; static const int i_digits = sizeof(digits)/sizeof(*digits) - 1; /* */ assert( template ); /* Check template validity */ const size_t i_length = strlen( template ); char *psz_rand = &template[i_length-6]; if( i_length < 6 || strcmp( psz_rand, "XXXXXX" ) ) { errno = EINVAL; return -1; } /* */ for( int i = 0; i < 256; i++ ) { /* Create a pseudo random file name */ uint8_t pi_rand[6]; vlc_rand_bytes( pi_rand, sizeof(pi_rand) ); for( int j = 0; j < 6; j++ ) psz_rand[j] = digits[pi_rand[j] % i_digits]; /* */ int fd = vlc_open( template, O_CREAT | O_EXCL | O_RDWR, 0600 ); if( fd >= 0 ) return fd; if( errno != EEXIST ) return -1; } errno = EEXIST; return -1; }
gpl-2.0
abhishekmurthy/Calligra
stage/plugins/pageeffects/slidewipe/KPrSlideWipeFromBottomStrategy.cpp
10
2068
/* This file is part of the KDE project Copyright (C) 2008 Thorsten Zachmann <zachmann@kde.org> This library is free software; you can redistribute it and/or modify it under the terms of the GNU Library General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more details. You should have received a copy of the GNU Library General Public License along with this library; see the file COPYING.LIB. If not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, * Boston, MA 02110-1301, USA. */ #include "KPrSlideWipeFromBottomStrategy.h" #include "KPrSlideWipeEffectFactory.h" #include <QWidget> #include <QPainter> KPrSlideWipeFromBottomStrategy::KPrSlideWipeFromBottomStrategy() : KPrPageEffectStrategy( KPrSlideWipeEffectFactory::FromBottom, "slideWipe", "fromBottom", false ) { } KPrSlideWipeFromBottomStrategy::~KPrSlideWipeFromBottomStrategy() { } void KPrSlideWipeFromBottomStrategy::setup( const KPrPageEffect::Data &data, QTimeLine &timeLine ) { timeLine.setFrameRange( 0, data.m_widget->height() ); } void KPrSlideWipeFromBottomStrategy::paintStep( QPainter &p, int currPos, const KPrPageEffect::Data &data ) { int height = data.m_widget->height(); int width = data.m_widget->width(); QRect rect1( 0, 0, width, height - currPos ); QRect rect2( 0, 0, width, currPos ); p.drawPixmap( QPoint( 0, 0 ), data.m_oldPage, rect1 ); p.drawPixmap( QPoint( 0, height - currPos ), data.m_newPage, rect2 ); } void KPrSlideWipeFromBottomStrategy::next( const KPrPageEffect::Data &data ) { int currPos = data.m_timeLine.frameForTime( data.m_currentTime ); data.m_widget->update( 0, data.m_widget->height() - currPos, data.m_widget->width(), currPos ); }
gpl-2.0
stevenmizuno/QGIS
src/providers/grass/qgsgrassvectormap.cpp
10
19954
/*************************************************************************** qgsgrassvectormap.cpp ------------------- begin : September, 2015 copyright : (C) 2015 by Radim Blazek email : radim.blazek@gmail.com ***************************************************************************/ /*************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * ***************************************************************************/ #include <QFileInfo> #include <QMessageBox> #include "qgslinestring.h" #include "qgspolygon.h" #include "qgspoint.h" #include "qgslogger.h" #include "qgsgeometry.h" #include "qgsgrass.h" #include "qgsgrassvectormap.h" #include "qgsgrassvectormaplayer.h" #include "qgsgrassundocommand.h" extern "C" { #include <grass/version.h> #if defined(_MSC_VER) && defined(M_PI_4) #undef M_PI_4 //avoid redefinition warning #endif #include <grass/gprojects.h> #include <grass/gis.h> #include <grass/dbmi.h> #include <grass/vector.h> } QgsGrassVectorMap::QgsGrassVectorMap( const QgsGrassObject &grassObject ) : mGrassObject( grassObject ) , mValid( false ) , mOpen( false ) , mFrozen( false ) , mIsEdited( false ) , mVersion( 0 ) , mIs3d( false ) , mOldNumLines( 0 ) { QgsDebugMsg( "grassObject = " + grassObject.toString() ); openMap(); mOpen = true; } QgsGrassVectorMap::~QgsGrassVectorMap() { QgsDebugMsg( "grassObject = " + mGrassObject.toString() ); // TODO close QgsGrass::vectDestroyMapStruct( mMap ); } int QgsGrassVectorMap::userCount() const { int count = 0; Q_FOREACH ( QgsGrassVectorMapLayer *layer, mLayers ) { count += layer->userCount(); } QgsDebugMsg( QString( "count = %1" ).arg( count ) ); return count; } bool QgsGrassVectorMap::open() { QgsDebugMsg( toString() ); if ( mOpen ) { QgsDebugMsg( "already open" ); return true; } lockOpenClose(); bool result = openMap(); mOpen = true; unlockOpenClose(); return result; } void QgsGrassVectorMap::close() { QgsDebugMsg( toString() ); if ( !mOpen ) { QgsDebugMsg( "is not open" ); return; } lockOpenClose(); closeAllIterators(); // blocking closeMap(); mOpen = false; unlockOpenClose(); } bool QgsGrassVectorMap::openMap() { // TODO: refresh layers (reopen) QgsDebugMsg( toString() ); QgsGrass::lock(); QgsGrass::setLocation( mGrassObject.gisdbase(), mGrassObject.location() ); // Find the vector const char *ms = G_find_vector2( mGrassObject.name().toUtf8().data(), mGrassObject.mapset().toUtf8().data() ); if ( !ms ) { QgsDebugMsg( "Cannot find GRASS vector" ); QgsGrass::unlock(); return false; } // Read the time of vector dir before Vect_open_old, because it may take long time (when the vector // could be owerwritten) QFileInfo di( mGrassObject.mapsetPath() + "/vector/" + mGrassObject.name() ); mLastModified = di.lastModified(); di.setFile( mGrassObject.mapsetPath() + "/vector/" + mGrassObject.name() + "/dbln" ); mLastAttributesModified = di.lastModified(); mMap = QgsGrass::vectNewMapStruct(); // Do we have topology and cidx (level2) int level = -1; G_TRY { Vect_set_open_level( 2 ); level = Vect_open_old_head( mMap, mGrassObject.name().toUtf8().data(), mGrassObject.mapset().toUtf8().data() ); Vect_close( mMap ); } G_CATCH( QgsGrass::Exception & e ) { QgsGrass::warning( e ); level = -1; } if ( level == -1 ) { QgsDebugMsg( "Cannot open GRASS vector head" ); QgsGrass::unlock(); return false; } else if ( level == 1 ) { QMessageBox::StandardButton ret = QMessageBox::question( nullptr, QStringLiteral( "Warning" ), QObject::tr( "GRASS vector map %1 does not have topology. Build topology?" ).arg( mGrassObject.name() ), QMessageBox::Ok | QMessageBox::Cancel ); if ( ret == QMessageBox::Cancel ) { QgsGrass::unlock(); return false; } } // Open vector G_TRY { Vect_set_open_level( level ); Vect_open_old( mMap, mGrassObject.name().toUtf8().data(), mGrassObject.mapset().toUtf8().data() ); } G_CATCH( QgsGrass::Exception & e ) { QgsGrass::warning( QStringLiteral( "Cannot open GRASS vector: %1" ).arg( e.what() ) ); QgsGrass::unlock(); return false; } if ( level == 1 ) { G_TRY { Vect_build( mMap ); } G_CATCH( QgsGrass::Exception & e ) { QgsGrass::warning( QStringLiteral( "Cannot build topology: %1" ).arg( e.what() ) ); QgsGrass::unlock(); return false; } } QgsDebugMsg( "GRASS map successfully opened" ); mIs3d = Vect_is_3d( mMap ); QgsGrass::unlock(); mValid = true; return true; } bool QgsGrassVectorMap::startEdit() { QgsDebugMsg( toString() ); lockOpenClose(); closeAllIterators(); // blocking // TODO: Can it still happen? QgsGrassVectorMapStore singleton is used now. #if 0 // Check number of maps (the problem may appear if static variables are not shared - runtime linker) if ( mMaps.size() == 0 ) { QMessageBox::warning( 0, "Warning", "No maps opened in mMaps, probably problem in runtime linking, " "static variables are not shared by provider and plugin." ); return false; } #endif /* Close map */ mValid = false; QgsGrass::lock(); // Mapset must be set before Vect_close() QgsGrass::setMapset( mGrassObject.gisdbase(), mGrassObject.location(), mGrassObject.mapset() ); int level = -1; G_TRY { Vect_close( mMap ); Vect_set_open_level( 2 ); level = Vect_open_update( mMap, mGrassObject.name().toUtf8().data(), mGrassObject.mapset().toUtf8().data() ); if ( level < 2 ) { QgsDebugMsg( "Cannot open GRASS vector for update on level 2." ); } } G_CATCH( QgsGrass::Exception & e ) { Q_UNUSED( e ); QgsDebugMsg( QString( "Cannot open GRASS vector for update: %1" ).arg( e.what() ) ); } if ( level < 2 ) { // reopen vector for reading G_TRY { Vect_set_open_level( 2 ); level = Vect_open_old( mMap, mGrassObject.name().toUtf8().data(), mGrassObject.mapset().toUtf8().data() ); if ( level < 2 ) { QgsDebugMsg( QString( "Cannot reopen GRASS vector: %1" ).arg( QgsGrass::errorMessage() ) ); } } G_CATCH( QgsGrass::Exception & e ) { Q_UNUSED( e ); QgsDebugMsg( QString( "Cannot reopen GRASS vector: %1" ).arg( e.what() ) ); } if ( level >= 2 ) { mValid = true; } QgsGrass::unlock(); unlockOpenClose(); return false; } Vect_set_category_index_update( mMap ); // Write history Vect_hist_command( mMap ); mOldNumLines = Vect_get_num_lines( mMap ); QgsDebugMsg( QString( "Vector successfully reopened for update mOldNumLines = %1" ).arg( mOldNumLines ) ); mIsEdited = true; mValid = true; printDebug(); QgsGrass::unlock(); unlockOpenClose(); emit dataChanged(); return true; } bool QgsGrassVectorMap::closeEdit( bool newMap ) { Q_UNUSED( newMap ); QgsDebugMsg( toString() ); if ( !mValid || !mIsEdited ) { return false; } // mValid = false; // close() is checking mValid lockOpenClose(); closeAllIterators(); // blocking QgsGrass::lock(); mOldLids.clear(); mNewLids.clear(); mOldGeometries.clear(); mNewCats.clear(); clearUndoCommands(); // Mapset must be set before Vect_close() QgsGrass::setMapset( mGrassObject.gisdbase(), mGrassObject.location(), mGrassObject.mapset() ); Vect_build_partial( mMap, GV_BUILD_NONE ); Vect_build( mMap ); // TODO? #if 0 // If a new map was created close the map and return if ( newMap ) { QgsDebugMsg( QString( "mLayers.size() = %1" ).arg( mLayers.size() ) ); mUpdate = false; // Map must be set as valid otherwise it is not closed and topo is not written mValid = true; // TODO refresh layers ? //closeLayer( mLayerId ); QgsGrass::unlock(); unlockOpenClose(); return true; } #endif mIsEdited = false; QgsGrass::unlock(); closeAllIterators(); // blocking closeMap(); openMap(); reloadLayers(); mVersion++; unlockOpenClose(); emit dataChanged(); QgsDebugMsg( "edit closed" ); return mValid; } void QgsGrassVectorMap::clearUndoCommands() { for ( auto it = mUndoCommands.constBegin(); it != mUndoCommands.constEnd(); ++it ) { Q_FOREACH ( QgsGrassUndoCommand *command, it.value() ) { delete command; } } mUndoCommands.clear(); } QgsGrassVectorMapLayer *QgsGrassVectorMap::openLayer( int field ) { QgsDebugMsg( QString( "%1 field = %2" ).arg( toString() ).arg( field ) ); // There are 2 locks on openLayer(), it must be locked when the map is being opened/closed/updated // but that lock must not block closeLayer() because close/update map closes first all iterators // which call closeLayer() and using single lock would result in dead lock. lockOpenCloseLayer(); lockOpenClose(); QgsGrassVectorMapLayer *layer = nullptr; // Check if this layer is already open Q_FOREACH ( QgsGrassVectorMapLayer *l, mLayers ) { if ( l->field() == field ) { QgsDebugMsg( "Layer exists" ); layer = l; if ( layer->userCount() == 0 ) { layer->load(); } } } if ( !layer ) { layer = new QgsGrassVectorMapLayer( this, field ); layer->load(); mLayers << layer; } layer->addUser(); unlockOpenClose(); unlockOpenCloseLayer(); return layer; } void QgsGrassVectorMap::reloadLayers() { Q_FOREACH ( QgsGrassVectorMapLayer *l, mLayers ) { l->load(); } } void QgsGrassVectorMap::closeLayer( QgsGrassVectorMapLayer *layer ) { if ( !layer ) { return; } QgsDebugMsg( QString( "Close layer %1 usersCount = %2" ).arg( toString() ).arg( layer->userCount() ) ); lockOpenCloseLayer(); layer->removeUser(); if ( layer->userCount() == 0 ) // No more users, free sources { QgsDebugMsg( "No more users -> clear" ); layer->clear(); } QgsDebugMsg( QString( "%1 map users" ).arg( userCount() ) ); if ( userCount() == 0 ) { QgsDebugMsg( "No more map users -> close" ); // Once was probably causing dead lock; move to QgsGrassVectorMapStore? close(); } QgsDebugMsg( "layer closed" ); unlockOpenCloseLayer(); } void QgsGrassVectorMap::closeMap() { QgsDebugMsg( toString() ); QgsGrass::lock(); if ( !mValid ) { QgsDebugMsg( "map is not valid" ); } else { // Mapset must be set before Vect_close() QgsGrass::setMapset( mGrassObject.gisdbase(), mGrassObject.location(), mGrassObject.mapset() ); G_TRY { Vect_close( mMap ); QgsDebugMsg( "map closed" ); } G_CATCH( QgsGrass::Exception & e ) { QgsDebugMsg( "Vect_close failed:" + QString( e.what() ) ); } } QgsGrass::vectDestroyMapStruct( mMap ); mMap = nullptr; mOldNumLines = 0; mValid = false; QgsGrass::unlock(); } void QgsGrassVectorMap::update() { QgsDebugMsg( toString() ); lockOpenClose(); closeAllIterators(); // blocking closeMap(); openMap(); reloadLayers(); unlockOpenClose(); emit dataChanged(); } bool QgsGrassVectorMap::mapOutdated() { QString dp = mGrassObject.mapsetPath() + "/vector/" + mGrassObject.name(); QFileInfo di( dp ); if ( mLastModified < di.lastModified() ) { // If the cidx file has been deleted, the map is currently being modified // by an external tool. Do not update until the cidx file has been recreated. if ( !QFileInfo::exists( dp + "/cidx" ) ) { QgsDebugMsg( "The map is being modified and is unavailable : " + mGrassObject.toString() ); return false; } QgsDebugMsg( "The map was modified : " + mGrassObject.toString() ); return true; } return false; } bool QgsGrassVectorMap::attributesOutdated() { QString dp = mGrassObject.mapsetPath() + "/vector/" + mGrassObject.name() + "/dbln"; QFileInfo di( dp ); if ( mLastAttributesModified < di.lastModified() ) { QgsDebugMsg( "The attributes of the layer were modified : " + mGrassObject.toString() ); return true; } return false; } int QgsGrassVectorMap::numLines() { return ( Vect_get_num_lines( mMap ) ); } int QgsGrassVectorMap::numAreas() { return ( Vect_get_num_areas( mMap ) ); } QString QgsGrassVectorMap::toString() { return mGrassObject.mapsetPath() + "/" + mGrassObject.name(); } void QgsGrassVectorMap::printDebug() { if ( !mValid || !mMap ) { QgsDebugMsg( "map not valid" ); return; } G_TRY { #ifdef QGISDEBUG int ncidx = Vect_cidx_get_num_fields( mMap ); QgsDebugMsg( QString( "ncidx = %1" ).arg( ncidx ) ); for ( int i = 0; i < ncidx; i++ ) { int layer = Vect_cidx_get_field_number( mMap, i ); int ncats = Vect_cidx_get_num_cats_by_index( mMap, i ); QgsDebugMsg( QString( "i = %1 layer = %2 ncats = %3" ).arg( i ).arg( layer ).arg( ncats ) ); } #endif } G_CATCH( QgsGrass::Exception & e ) { Q_UNUSED( e ) QgsDebugMsg( "Cannot read info from map: " + QString( e.what() ) ); } } void QgsGrassVectorMap::lockOpenClose() { QgsDebugMsg( "lockOpenClose" ); mOpenCloseMutex.lock(); } void QgsGrassVectorMap::unlockOpenClose() { QgsDebugMsg( "unlockOpenClose" ); mOpenCloseMutex.unlock(); } void QgsGrassVectorMap::lockOpenCloseLayer() { QgsDebugMsg( "lockOpenCloseLayer" ); mOpenCloseLayerMutex.lock(); } void QgsGrassVectorMap::unlockOpenCloseLayer() { QgsDebugMsg( "unlockOpenCloseLayer" ); mOpenCloseLayerMutex.unlock(); } void QgsGrassVectorMap::lockReadWrite() { if ( isEdited() ) { QgsDebugMsgLevel( "lockReadWrite", 3 ); mReadWriteMutex.lock(); } } void QgsGrassVectorMap::unlockReadWrite() { if ( isEdited() ) { QgsDebugMsgLevel( "unlockReadWrite", 3 ); mReadWriteMutex.unlock(); } } QgsAbstractGeometry *QgsGrassVectorMap::lineGeometry( int id ) { QgsDebugMsgLevel( QString( "id = %1" ).arg( id ), 3 ); if ( !Vect_line_alive( mMap, id ) ) // should not happen (update mode!)? { QgsDebugMsg( QString( "line %1 is dead" ).arg( id ) ); return nullptr; } struct line_pnts *points = Vect_new_line_struct(); int type = Vect_read_line( mMap, points, nullptr, id ); QgsDebugMsgLevel( QString( "type = %1 n_points = %2" ).arg( type ).arg( points->n_points ), 3 ); if ( points->n_points == 0 ) { Vect_destroy_line_struct( points ); return nullptr; } QgsPointSequence pointList; pointList.reserve( points->n_points ); for ( int i = 0; i < points->n_points; i++ ) { pointList << QgsPoint( is3d() ? QgsWkbTypes::PointZ : QgsWkbTypes::Point, points->x[i], points->y[i], points->z[i] ); } Vect_destroy_line_struct( points ); if ( type & GV_POINTS ) { return pointList.first().clone(); } else if ( type & GV_LINES ) { QgsLineString *line = new QgsLineString(); line->setPoints( pointList ); return line; } else if ( type & GV_FACE ) { QgsPolygon *polygon = new QgsPolygon(); QgsLineString *ring = new QgsLineString(); ring->setPoints( pointList ); polygon->setExteriorRing( ring ); return polygon; } QgsDebugMsg( QString( "unknown type = %1" ).arg( type ) ); return nullptr; } QgsAbstractGeometry *QgsGrassVectorMap::nodeGeometry( int id ) { QgsDebugMsgLevel( QString( "id = %1" ).arg( id ), 3 ); double x, y, z; Vect_get_node_coor( mMap, id, &x, &y, &z ); return new QgsPoint( is3d() ? QgsWkbTypes::PointZ : QgsWkbTypes::Point, x, y, z ); } QgsAbstractGeometry *QgsGrassVectorMap::areaGeometry( int id ) { QgsDebugMsgLevel( QString( "id = %1" ).arg( id ), 3 ); QgsPolygon *polygon = new QgsPolygon(); struct line_pnts *points = Vect_new_line_struct(); QgsDebugMsgLevel( QString( "points= %1" ).arg( ( quint64 )points ), 3 ); // Vect_get_area_points and Vect_get_isle_pointsis using static variable -> lock // TODO: Faster to lock the whole feature iterator? Maybe only for areas? QgsGrass::lock(); Vect_get_area_points( mMap, id, points ); QgsPointSequence pointList; pointList.reserve( points->n_points ); for ( int i = 0; i < points->n_points; i++ ) { pointList << QgsPoint( is3d() ? QgsWkbTypes::PointZ : QgsWkbTypes::Point, points->x[i], points->y[i], points->z[i] ); } QgsLineString *ring = new QgsLineString(); ring->setPoints( pointList ); polygon->setExteriorRing( ring ); int nIsles = Vect_get_area_num_isles( mMap, id ); for ( int i = 0; i < nIsles; i++ ) { pointList.clear(); int isle = Vect_get_area_isle( mMap, id, i ); Vect_get_isle_points( mMap, isle, points ); pointList.reserve( points->n_points ); for ( int i = 0; i < points->n_points; i++ ) { pointList << QgsPoint( is3d() ? QgsWkbTypes::PointZ : QgsWkbTypes::Point, points->x[i], points->y[i], points->z[i] ); } ring = new QgsLineString(); ring->setPoints( pointList ); polygon->addInteriorRing( ring ); } QgsGrass::unlock(); Vect_destroy_line_struct( points ); return polygon; } void QgsGrassVectorMap::closeAllIterators() { QgsDebugMsg( toString() ); // cancel and close all iterator // Iterators must be connected properly, otherwise may it result in dead lock! emit cancelIterators(); // non blocking emit closeIterators(); // blocking QgsDebugMsg( "iterators closed" ); } //------------------------------------ QgsGrassVectorMapStore ------------------------------------ QgsGrassVectorMapStore *QgsGrassVectorMapStore::sStore = nullptr; QgsGrassVectorMapStore *QgsGrassVectorMapStore::instance() { static QgsGrassVectorMapStore sInstance; if ( sStore ) { return sStore; } return &sInstance; } QgsGrassVectorMap *QgsGrassVectorMapStore::openMap( const QgsGrassObject &grassObject ) { QgsDebugMsg( "grassObject = " + grassObject.toString() ); mMutex.lock(); QgsGrassVectorMap *map = nullptr; // Check if this map is already open Q_FOREACH ( QgsGrassVectorMap *m, mMaps ) { if ( m->grassObject() == grassObject ) { QgsDebugMsg( "The map already exists" ); map = m; if ( !map->isOpen() ) { map->open(); } } } if ( !map ) { map = new QgsGrassVectorMap( grassObject ); mMaps << map; } mMutex.unlock(); return map; } QgsGrassVectorMap::TopoSymbol QgsGrassVectorMap::topoSymbol( int lid ) { int type = Vect_read_line( mMap, nullptr, nullptr, lid ); TopoSymbol symbol = TopoUndefined; if ( type == GV_POINT ) { symbol = TopoPoint; } else if ( type == GV_CENTROID ) { int area = Vect_get_centroid_area( mMap, lid ); if ( area == 0 ) symbol = TopoCentroidOut; else if ( area > 0 ) symbol = TopoCentroidIn; else symbol = TopoCentroidDupl; /* area < 0 */ } else if ( type == GV_LINE ) { symbol = TopoLine; } else if ( type == GV_BOUNDARY ) { int left, right; Vect_get_line_areas( mMap, lid, &left, &right ); if ( left != 0 && right != 0 ) { symbol = TopoBoundaryOk; } else if ( left == 0 && right == 0 ) { symbol = TopoBoundaryError; } else if ( left == 0 ) { symbol = TopoBoundaryErrorLeft; } else if ( right == 0 ) { symbol = TopoBoundaryErrorRight; } } QgsDebugMsgLevel( QString( "lid = %1 type = %2 symbol = %3" ).arg( lid ).arg( type ).arg( symbol ), 3 ); return symbol; }
gpl-2.0
ysy/openwrt
package/rt2860v2/files/rt2860v2/common/cmm_asic.c
10
125614
/* *************************************************************************** * Ralink Tech Inc. * 4F, No. 2 Technology 5th Rd. * Science-based Industrial Park * Hsin-chu, Taiwan, R.O.C. * * (c) Copyright 2002-2004, Ralink Technology, Inc. * * All rights reserved. Ralink's source code is an unpublished work and the * use of a copyright notice does not imply otherwise. This source code * contains confidential trade secret material of Ralink Tech. Any attemp * or participation in deciphering, decoding, reverse engineering or in any * way altering the source code is stricitly prohibited, unless the prior * written consent of Ralink Technology, Inc. is obtained. *************************************************************************** Module Name: cmm_asic.c Abstract: Functions used to communicate with ASIC Revision History: Who When What -------- ---------- ---------------------------------------------- */ #include "rt_config.h" #ifdef RT2880 /* Reset the RFIC setting to new series */ RTMP_RF_REGS RF2850RegTable[] = { /* ch R1 R2 R3(TX0~4=0) R4*/ {1, 0x98402ecc, 0x984c0786, 0x981ab455, 0x9800510b}, {2, 0x98402ecc, 0x984c0786, 0x981a8a55, 0x9800519f}, {3, 0x98402ecc, 0x984c078a, 0x981a8a55, 0x9800518b}, {4, 0x98402ecc, 0x984c078a, 0x981a8a55, 0x9800519f}, {5, 0x98402ecc, 0x984c078e, 0x981a8a55, 0x9800518b}, {6, 0x98402ecc, 0x984c078e, 0x981a8a55, 0x9800519f}, {7, 0x98402ecc, 0x984c0792, 0x981a8a55, 0x9800518b}, {8, 0x98402ecc, 0x984c0792, 0x981a8a55, 0x9800519f}, {9, 0x98402ecc, 0x984c0796, 0x981a8a55, 0x9800518b}, {10, 0x98402ecc, 0x984c0796, 0x981a8a55, 0x9800519f}, {11, 0x98402ecc, 0x984c079a, 0x981a8a55, 0x9800518b}, {12, 0x98402ecc, 0x984c079a, 0x981a8a55, 0x9800519f}, {13, 0x98402ecc, 0x984c079e, 0x981a8a55, 0x9800518b}, {14, 0x98402ecc, 0x984c07a2, 0x981a8a55, 0x98005193}, /* 802.11 UNI / HyperLan 2*/ {36, 0x98402ecc, 0x984c099a, 0x98198a55, 0x980ed1a3}, {38, 0x98402ecc, 0x984c099e, 0x98198a55, 0x980ed193}, {40, 0x98402ec8, 0x984c0682, 0x98198a55, 0x980ed183}, {44, 0x98402ec8, 0x984c0682, 0x98198a55, 0x980ed1a3}, {46, 0x98402ec8, 0x984c0686, 0x98198a55, 0x980ed18b}, {48, 0x98402ec8, 0x984c0686, 0x98198a55, 0x980ed19b}, {52, 0x98402ec8, 0x984c068a, 0x98198a55, 0x980ed193}, {54, 0x98402ec8, 0x984c068a, 0x98198a55, 0x980ed1a3}, {56, 0x98402ec8, 0x984c068e, 0x98198a55, 0x980ed18b}, {60, 0x98402ec8, 0x984c0692, 0x98198a55, 0x980ed183}, {62, 0x98402ec8, 0x984c0692, 0x98198a55, 0x980ed193}, {64, 0x98402ec8, 0x984c0692, 0x98198a55, 0x980ed1a3}, /* Plugfest#4, Day4, change RFR3 left4th 9->5.*/ /* 802.11 HyperLan 2*/ {100, 0x98402ec8, 0x984c06b2, 0x981b8a55, 0x980ed783}, /* 2008.04.30 modified */ /* The system team has AN to improve the EVM value */ /* for channel 102 to 108 for the RT2850/RT2750 dual band solution.*/ {102, 0x98402ec8, 0x985c06b2, 0x985b8a55, 0x980ed793}, {104, 0x98402ec8, 0x985c06b2, 0x985b8a55, 0x980ed1a3}, {108, 0x98402ecc, 0x985c0a32, 0x985b8a55, 0x980ed193}, {110, 0x98402ecc, 0x984c0a36, 0x981b8a55, 0x980ed183}, {112, 0x98402ecc, 0x984c0a36, 0x981b8a55, 0x980ed19b}, {116, 0x98402ecc, 0x984c0a3a, 0x981b8a55, 0x980ed1a3}, {118, 0x98402ecc, 0x984c0a3e, 0x981b8a55, 0x980ed193}, {120, 0x98402ec4, 0x984c0382, 0x981b8a55, 0x980ed183}, {124, 0x98402ec4, 0x984c0382, 0x981b8a55, 0x980ed193}, {126, 0x98402ec4, 0x984c0382, 0x981b8a55, 0x980ed15b}, /* 0x980ed1bb->0x980ed15b required by Rory 20070927*/ {128, 0x98402ec4, 0x984c0382, 0x981b8a55, 0x980ed1a3}, {132, 0x98402ec4, 0x984c0386, 0x981b8a55, 0x980ed18b}, {134, 0x98402ec4, 0x984c0386, 0x981b8a55, 0x980ed193}, {136, 0x98402ec4, 0x984c0386, 0x981b8a55, 0x980ed19b}, {140, 0x98402ec4, 0x984c038a, 0x981b8a55, 0x980ed183}, /* 802.11 UNII*/ {149, 0x98402ec4, 0x984c038a, 0x981b8a55, 0x980ed1a7}, {151, 0x98402ec4, 0x984c038e, 0x981b8a55, 0x980ed187}, {153, 0x98402ec4, 0x984c038e, 0x981b8a55, 0x980ed18f}, {157, 0x98402ec4, 0x984c038e, 0x981b8a55, 0x980ed19f}, {159, 0x98402ec4, 0x984c038e, 0x981b8a55, 0x980ed1a7}, {161, 0x98402ec4, 0x984c0392, 0x981b8a55, 0x980ed187}, {165, 0x98402ec4, 0x984c0392, 0x981b8a55, 0x980ed197}, {167, 0x98402ec4, 0x984c03d2, 0x981b9855, 0x9815531f}, {169, 0x98402ec4, 0x984c03d2, 0x981b9855, 0x98155327}, {171, 0x98402ec4, 0x984c03d6, 0x981b9855, 0x98155307}, {173, 0x98402ec4, 0x984c03d6, 0x981b9855, 0x9815530f}, /* Japan*/ {184, 0x95002ccc, 0x9500491e, 0x9509be55, 0x950c0a0b}, {188, 0x95002ccc, 0x95004922, 0x9509be55, 0x950c0a13}, {192, 0x95002ccc, 0x95004926, 0x9509be55, 0x950c0a1b}, {196, 0x95002ccc, 0x9500492a, 0x9509be55, 0x950c0a23}, {208, 0x95002ccc, 0x9500493a, 0x9509be55, 0x950c0a13}, {212, 0x95002ccc, 0x9500493e, 0x9509be55, 0x950c0a1b}, {216, 0x95002ccc, 0x95004982, 0x9509be55, 0x950c0a23}, /* still lack of MMAC(Japan) ch 34,38,42,46*/ }; UCHAR NUM_OF_2850_CHNL = (sizeof(RF2850RegTable) / sizeof(RTMP_RF_REGS)); #endif /* RT2880 */ #define MDSM_NORMAL_TX_POWER 0x00 #define MDSM_DROP_TX_POWER_BY_6dBm 0x01 #define MDSM_DROP_TX_POWER_BY_12dBm 0x02 #define MDSM_ADD_TX_POWER_BY_6dBm 0x03 #define MDSM_BBP_R1_STATIC_TX_POWER_CONTROL_MASK 0x03 #ifdef CONFIG_STA_SUPPORT VOID AsicUpdateAutoFallBackTable( IN PRTMP_ADAPTER pAd, IN PUCHAR pRateTable) { UCHAR i; HT_FBK_CFG0_STRUC HtCfg0; HT_FBK_CFG1_STRUC HtCfg1; LG_FBK_CFG0_STRUC LgCfg0; LG_FBK_CFG1_STRUC LgCfg1; #ifdef DOT11N_SS3_SUPPORT TX_FBK_CFG_3S_0_STRUC Ht3SSCfg0; TX_FBK_CFG_3S_1_STRUC Ht3SSCfg1; #endif /* DOT11N_SS3_SUPPORT */ PRTMP_TX_RATE_SWITCH pCurrTxRate, pNextTxRate; #ifdef AGS_SUPPORT PRTMP_TX_RATE_SWITCH_AGS pCurrTxRate_AGS, pNextTxRate_AGS; BOOLEAN bUseAGS = FALSE; if (AGS_IS_USING(pAd, pRateTable)) { DBGPRINT(RT_DEBUG_TRACE, ("%s: Use AGS\n", __FUNCTION__)); bUseAGS = TRUE; Ht3SSCfg0.word = 0x1211100f; Ht3SSCfg1.word = 0x16151413; } #endif /* AGS_SUPPORT */ #ifdef DOT11N_SS3_SUPPORT if (IS_RT3883(pAd)) { Ht3SSCfg0.word = 0x12111008; Ht3SSCfg1.word = 0x16151413; } #endif /* DOT11N_SS3_SUPPORT */ /* set to initial value*/ HtCfg0.word = 0x65432100; HtCfg1.word = 0xedcba980; LgCfg0.word = 0xedcba988; LgCfg1.word = 0x00002100; #ifdef NEW_RATE_ADAPT_SUPPORT /* Use standard fallback if using new rate table */ if (ADAPT_RATE_TABLE(pRateTable)) goto skipUpdate; #endif /* NEW_RATE_ADAPT_SUPPORT */ #ifdef AGS_SUPPORT if (bUseAGS) { pNextTxRate_AGS = (PRTMP_TX_RATE_SWITCH_AGS)pRateTable+1; pNextTxRate = (PRTMP_TX_RATE_SWITCH)pNextTxRate_AGS; } else #endif /* AGS_SUPPORT */ pNextTxRate = (PRTMP_TX_RATE_SWITCH)pRateTable+1; for (i = 1; i < *((PUCHAR) pRateTable); i++) { #ifdef AGS_SUPPORT if (bUseAGS) { pCurrTxRate_AGS = (PRTMP_TX_RATE_SWITCH_AGS)pRateTable+1+i; pCurrTxRate = (PRTMP_TX_RATE_SWITCH)pCurrTxRate_AGS; } else #endif /* AGS_SUPPORT */ pCurrTxRate = (PRTMP_TX_RATE_SWITCH)pRateTable+1+i; switch (pCurrTxRate->Mode) { case 0: /* CCK */ break; case 1: /* OFDM */ { switch(pCurrTxRate->CurrMCS) { case 0: LgCfg0.field.OFDMMCS0FBK = (pNextTxRate->Mode == MODE_OFDM) ? (pNextTxRate->CurrMCS+8): pNextTxRate->CurrMCS; break; case 1: LgCfg0.field.OFDMMCS1FBK = (pNextTxRate->Mode == MODE_OFDM) ? (pNextTxRate->CurrMCS+8): pNextTxRate->CurrMCS; break; case 2: LgCfg0.field.OFDMMCS2FBK = (pNextTxRate->Mode == MODE_OFDM) ? (pNextTxRate->CurrMCS+8): pNextTxRate->CurrMCS; break; case 3: LgCfg0.field.OFDMMCS3FBK = (pNextTxRate->Mode == MODE_OFDM) ? (pNextTxRate->CurrMCS+8): pNextTxRate->CurrMCS; break; case 4: LgCfg0.field.OFDMMCS4FBK = (pNextTxRate->Mode == MODE_OFDM) ? (pNextTxRate->CurrMCS+8): pNextTxRate->CurrMCS; break; case 5: LgCfg0.field.OFDMMCS5FBK = (pNextTxRate->Mode == MODE_OFDM) ? (pNextTxRate->CurrMCS+8): pNextTxRate->CurrMCS; break; case 6: LgCfg0.field.OFDMMCS6FBK = (pNextTxRate->Mode == MODE_OFDM) ? (pNextTxRate->CurrMCS+8): pNextTxRate->CurrMCS; break; case 7: LgCfg0.field.OFDMMCS7FBK = (pNextTxRate->Mode == MODE_OFDM) ? (pNextTxRate->CurrMCS+8): pNextTxRate->CurrMCS; break; } } break; #ifdef DOT11_N_SUPPORT case 2: /* HT-MIX */ case 3: /* HT-GF */ { if ((pNextTxRate->Mode >= MODE_HTMIX) && (pCurrTxRate->CurrMCS != pNextTxRate->CurrMCS)) { if (pCurrTxRate->CurrMCS <= 15) { switch(pCurrTxRate->CurrMCS) { case 0: HtCfg0.field.HTMCS0FBK = pNextTxRate->CurrMCS; break; case 1: HtCfg0.field.HTMCS1FBK = pNextTxRate->CurrMCS; break; case 2: HtCfg0.field.HTMCS2FBK = pNextTxRate->CurrMCS; break; case 3: HtCfg0.field.HTMCS3FBK = pNextTxRate->CurrMCS; break; case 4: HtCfg0.field.HTMCS4FBK = pNextTxRate->CurrMCS; break; case 5: HtCfg0.field.HTMCS5FBK = pNextTxRate->CurrMCS; break; case 6: HtCfg0.field.HTMCS6FBK = pNextTxRate->CurrMCS; break; case 7: HtCfg0.field.HTMCS7FBK = pNextTxRate->CurrMCS; break; case 8: HtCfg1.field.HTMCS8FBK = pNextTxRate->CurrMCS; break; case 9: HtCfg1.field.HTMCS9FBK = pNextTxRate->CurrMCS; break; case 10: HtCfg1.field.HTMCS10FBK = pNextTxRate->CurrMCS; break; case 11: HtCfg1.field.HTMCS11FBK = pNextTxRate->CurrMCS; break; case 12: HtCfg1.field.HTMCS12FBK = pNextTxRate->CurrMCS; break; case 13: HtCfg1.field.HTMCS13FBK = pNextTxRate->CurrMCS; break; case 14: HtCfg1.field.HTMCS14FBK = pNextTxRate->CurrMCS; break; case 15: HtCfg1.field.HTMCS15FBK = pNextTxRate->CurrMCS; break; } } else #ifdef AGS_SUPPORT if ((bUseAGS == TRUE) && (pCurrTxRate->CurrMCS >= 16) && (pCurrTxRate->CurrMCS <= 23)) { switch(pCurrTxRate->CurrMCS) { case 16: Ht3SSCfg0.field.HTMCS16FBK = pNextTxRate->CurrMCS; break; case 17: Ht3SSCfg0.field.HTMCS17FBK = pNextTxRate->CurrMCS; break; case 18: Ht3SSCfg0.field.HTMCS18FBK = pNextTxRate->CurrMCS; break; case 19: Ht3SSCfg0.field.HTMCS19FBK = pNextTxRate->CurrMCS; break; case 20: Ht3SSCfg1.field.HTMCS20FBK = pNextTxRate->CurrMCS; break; case 21: Ht3SSCfg1.field.HTMCS21FBK = pNextTxRate->CurrMCS; break; case 22: Ht3SSCfg1.field.HTMCS22FBK = pNextTxRate->CurrMCS; break; case 23: Ht3SSCfg1.field.HTMCS23FBK = pNextTxRate->CurrMCS; break; } } else #endif /* AGS_SUPPORT */ DBGPRINT(RT_DEBUG_ERROR, ("AsicUpdateAutoFallBackTable: not support CurrMCS=%d\n", pCurrTxRate->CurrMCS)); } } break; #endif /* DOT11_N_SUPPORT */ } pNextTxRate = pCurrTxRate; } #ifdef AGS_SUPPORT if (bUseAGS == TRUE) { Ht3SSCfg0.field.HTMCS16FBK = 0x8; // MCS 16 -> MCS 8 HtCfg1.field.HTMCS8FBK = 0x0; // MCS 8 -> MCS 0 LgCfg0.field.OFDMMCS2FBK = 0x3; // OFDM 12 -> CCK 11 LgCfg0.field.OFDMMCS1FBK = 0x2; // OFDM 9 -> CCK 5.5 LgCfg0.field.OFDMMCS0FBK = 0x2; // OFDM 6 -> CCK 5.5 } #endif /* AGS_SUPPORT */ #ifdef NEW_RATE_ADAPT_SUPPORT skipUpdate: #endif /* NEW_RATE_ADAPT_SUPPORT */ RTMP_IO_WRITE32(pAd, HT_FBK_CFG0, HtCfg0.word); RTMP_IO_WRITE32(pAd, HT_FBK_CFG1, HtCfg1.word); RTMP_IO_WRITE32(pAd, LG_FBK_CFG0, LgCfg0.word); RTMP_IO_WRITE32(pAd, LG_FBK_CFG1, LgCfg1.word); #ifdef DOT11N_SS3_SUPPORT if (IS_RT2883(pAd) || IS_RT3883(pAd) #ifdef AGS_SUPPORT || (bUseAGS == TRUE) #endif /* AGS_SUPPORT */ ) { RTMP_IO_WRITE32(pAd, TX_FBK_CFG_3S_0, Ht3SSCfg0.word); RTMP_IO_WRITE32(pAd, TX_FBK_CFG_3S_1, Ht3SSCfg1.word); DBGPRINT(RT_DEBUG_TRACE, ("AsicUpdateAutoFallBackTable: Ht3SSCfg0=0x%x, Ht3SSCfg1=0x%x\n", Ht3SSCfg0.word, Ht3SSCfg1.word)); } #endif /* DOT11N_SS3_SUPPORT */ } #endif /* CONFIG_STA_SUPPORT */ /* ======================================================================== Routine Description: Set MAC register value according operation mode. OperationMode AND bNonGFExist are for MM and GF Proteciton. If MM or GF mask is not set, those passing argument doesn't not take effect. Operation mode meaning: = 0 : Pure HT, no preotection. = 0x01; there may be non-HT devices in both the control and extension channel, protection is optional in BSS. = 0x10: No Transmission in 40M is protected. = 0x11: Transmission in both 40M and 20M shall be protected if (bNonGFExist) we should choose not to use GF. But still set correct ASIC registers. ======================================================================== */ VOID AsicUpdateProtect( IN PRTMP_ADAPTER pAd, IN USHORT OperationMode, IN UCHAR SetMask, IN BOOLEAN bDisableBGProtect, IN BOOLEAN bNonGFExist) { PROT_CFG_STRUC ProtCfg, ProtCfg4; UINT32 Protect[6]; USHORT offset; UCHAR i; UINT32 MacReg = 0; #ifdef RALINK_ATE if (ATE_ON(pAd)) return; #endif /* RALINK_ATE */ #ifdef DOT11_N_SUPPORT if (!(pAd->CommonCfg.bHTProtect) && (OperationMode != 8)) { return; } #ifdef RT3883 if (pAd->FlgCWC) RT3883_CWC_ProtectAdjust(pAd, &SetMask, &OperationMode); else #endif /* RT3883 */ if (pAd->BATable.numDoneOriginator) { /* */ /* enable the RTS/CTS to avoid channel collision*/ /* */ SetMask |= ALLN_SETPROTECT; OperationMode = 8; } #endif /* DOT11_N_SUPPORT */ /* Config ASIC RTS threshold register*/ RTMP_IO_READ32(pAd, TX_RTS_CFG, &MacReg); MacReg &= 0xFF0000FF; /* If the user want disable RtsThreshold and enbale Amsdu/Ralink-Aggregation, set the RtsThreshold as 4096*/ if (( #ifdef DOT11_N_SUPPORT (pAd->CommonCfg.BACapability.field.AmsduEnable) || #endif /* DOT11_N_SUPPORT */ (pAd->CommonCfg.bAggregationCapable == TRUE)) && pAd->CommonCfg.RtsThreshold == MAX_RTS_THRESHOLD) { MacReg |= (0x1000 << 8); } else { MacReg |= (pAd->CommonCfg.RtsThreshold << 8); } RTMP_IO_WRITE32(pAd, TX_RTS_CFG, MacReg); /* Initial common protection settings*/ RTMPZeroMemory(Protect, sizeof(Protect)); ProtCfg4.word = 0; ProtCfg.word = 0; ProtCfg.field.TxopAllowGF40 = 1; ProtCfg.field.TxopAllowGF20 = 1; ProtCfg.field.TxopAllowMM40 = 1; ProtCfg.field.TxopAllowMM20 = 1; ProtCfg.field.TxopAllowOfdm = 1; ProtCfg.field.TxopAllowCck = 1; ProtCfg.field.RTSThEn = 1; ProtCfg.field.ProtectNav = ASIC_SHORTNAV; /* update PHY mode and rate*/ if (pAd->OpMode == OPMODE_AP) { /* update PHY mode and rate*/ if (pAd->CommonCfg.Channel > 14) ProtCfg.field.ProtectRate = 0x4000; ProtCfg.field.ProtectRate |= pAd->CommonCfg.RtsRate; } else if (pAd->OpMode == OPMODE_STA) { // Decide Protect Rate for Legacy packet if (pAd->CommonCfg.Channel > 14) { ProtCfg.field.ProtectRate = 0x4000; // OFDM 6Mbps } else { ProtCfg.field.ProtectRate = 0x0000; // CCK 1Mbps if (pAd->CommonCfg.MinTxRate > RATE_11) ProtCfg.field.ProtectRate |= 0x4000; // OFDM 6Mbps } } /* Handle legacy(B/G) protection*/ if (bDisableBGProtect) { /*ProtCfg.field.ProtectRate = pAd->CommonCfg.RtsRate;*/ ProtCfg.field.ProtectCtrl = 0; Protect[0] = ProtCfg.word; Protect[1] = ProtCfg.word; pAd->FlgCtsEnabled = 0; /* CTS-self is not used */ } else { /*ProtCfg.field.ProtectRate = pAd->CommonCfg.RtsRate;*/ ProtCfg.field.ProtectCtrl = 0; /* CCK do not need to be protected*/ Protect[0] = ProtCfg.word; ProtCfg.field.ProtectCtrl = ASIC_CTS; /* OFDM needs using CCK to protect*/ Protect[1] = ProtCfg.word; pAd->FlgCtsEnabled = 1; /* CTS-self is used */ } #ifdef DOT11_N_SUPPORT /* Decide HT frame protection.*/ if ((SetMask & ALLN_SETPROTECT) != 0) { switch(OperationMode) { case 0x0: /* NO PROTECT */ /* 1.All STAs in the BSS are 20/40 MHz HT*/ /* 2. in ai 20/40MHz BSS*/ /* 3. all STAs are 20MHz in a 20MHz BSS*/ /* Pure HT. no protection.*/ /* MM20_PROT_CFG*/ /* Reserved (31:27)*/ /* PROT_TXOP(25:20) -- 010111*/ /* PROT_NAV(19:18) -- 01 (Short NAV protection)*/ /* PROT_CTRL(17:16) -- 00 (None)*/ /* PROT_RATE(15:0) -- 0x4004 (OFDM 24M)*/ Protect[2] = 0x01744004; /* MM40_PROT_CFG*/ /* Reserved (31:27)*/ /* PROT_TXOP(25:20) -- 111111*/ /* PROT_NAV(19:18) -- 01 (Short NAV protection)*/ /* PROT_CTRL(17:16) -- 00 (None) */ /* PROT_RATE(15:0) -- 0x4084 (duplicate OFDM 24M)*/ Protect[3] = 0x03f44084; /* CF20_PROT_CFG*/ /* Reserved (31:27)*/ /* PROT_TXOP(25:20) -- 010111*/ /* PROT_NAV(19:18) -- 01 (Short NAV protection)*/ /* PROT_CTRL(17:16) -- 00 (None)*/ /* PROT_RATE(15:0) -- 0x4004 (OFDM 24M)*/ Protect[4] = 0x01744004; /* CF40_PROT_CFG*/ /* Reserved (31:27)*/ /* PROT_TXOP(25:20) -- 111111*/ /* PROT_NAV(19:18) -- 01 (Short NAV protection)*/ /* PROT_CTRL(17:16) -- 00 (None)*/ /* PROT_RATE(15:0) -- 0x4084 (duplicate OFDM 24M)*/ Protect[5] = 0x03f44084; if (bNonGFExist) { /* PROT_NAV(19:18) -- 01 (Short NAV protectiion)*/ /* PROT_CTRL(17:16) -- 01 (RTS/CTS)*/ Protect[4] = 0x01754004; Protect[5] = 0x03f54084; } pAd->CommonCfg.IOTestParm.bRTSLongProtOn = FALSE; break; case 1: /* This is "HT non-member protection mode."*/ /* If there may be non-HT STAs my BSS*/ ProtCfg.word = 0x01744004; /* PROT_CTRL(17:16) : 0 (None)*/ ProtCfg4.word = 0x03f44084; /* duplicaet legacy 24M. BW set 1.*/ if (OPSTATUS_TEST_FLAG(pAd, fOP_STATUS_BG_PROTECTION_INUSED)) { ProtCfg.word = 0x01740003; /*ERP use Protection bit is set, use protection rate at Clause 18..*/ ProtCfg4.word = 0x03f40003; /* Don't duplicate RTS/CTS in CCK mode. 0x03f40083; */ } /*Assign Protection method for 20&40 MHz packets*/ ProtCfg.field.ProtectCtrl = ASIC_RTS; ProtCfg.field.ProtectNav = ASIC_SHORTNAV; ProtCfg4.field.ProtectCtrl = ASIC_RTS; ProtCfg4.field.ProtectNav = ASIC_SHORTNAV; Protect[2] = ProtCfg.word; Protect[3] = ProtCfg4.word; Protect[4] = ProtCfg.word; Protect[5] = ProtCfg4.word; pAd->CommonCfg.IOTestParm.bRTSLongProtOn = TRUE; break; case 2: /* If only HT STAs are in BSS. at least one is 20MHz. Only protect 40MHz packets*/ ProtCfg.word = 0x01744004; /* PROT_CTRL(17:16) : 0 (None)*/ ProtCfg4.word = 0x03f44084; /* duplicaet legacy 24M. BW set 1.*/ if (OPSTATUS_TEST_FLAG(pAd, fOP_STATUS_BG_PROTECTION_INUSED)) { ProtCfg.word = 0x01740003; /*ERP use Protection bit is set, use protection rate at Clause 18..*/ ProtCfg4.word = 0x03f40003; /* Don't duplicate RTS/CTS in CCK mode. 0x03f40083; */ } /*Assign Protection method for 40MHz packets*/ ProtCfg4.field.ProtectCtrl = ASIC_RTS; ProtCfg4.field.ProtectNav = ASIC_SHORTNAV; Protect[2] = ProtCfg.word; Protect[3] = ProtCfg4.word; if (bNonGFExist) { ProtCfg.field.ProtectCtrl = ASIC_RTS; ProtCfg.field.ProtectNav = ASIC_SHORTNAV; } Protect[4] = ProtCfg.word; Protect[5] = ProtCfg4.word; pAd->CommonCfg.IOTestParm.bRTSLongProtOn = FALSE; break; case 3: /* HT mixed mode. PROTECT ALL!*/ /* Assign Rate*/ ProtCfg.word = 0x01744004; /*duplicaet legacy 24M. BW set 1.*/ ProtCfg4.word = 0x03f44084; /* both 20MHz and 40MHz are protected. Whether use RTS or CTS-to-self depends on the*/ if (OPSTATUS_TEST_FLAG(pAd, fOP_STATUS_BG_PROTECTION_INUSED)) { ProtCfg.word = 0x01740003; /*ERP use Protection bit is set, use protection rate at Clause 18..*/ ProtCfg4.word = 0x03f40003; /* Don't duplicate RTS/CTS in CCK mode. 0x03f40083*/ } /*Assign Protection method for 20&40 MHz packets*/ ProtCfg.field.ProtectCtrl = ASIC_RTS; ProtCfg.field.ProtectNav = ASIC_SHORTNAV; ProtCfg4.field.ProtectCtrl = ASIC_RTS; ProtCfg4.field.ProtectNav = ASIC_SHORTNAV; Protect[2] = ProtCfg.word; Protect[3] = ProtCfg4.word; Protect[4] = ProtCfg.word; Protect[5] = ProtCfg4.word; pAd->CommonCfg.IOTestParm.bRTSLongProtOn = TRUE; break; case 8: /* Special on for Atheros problem n chip.*/ ProtCfg.word = 0x01754004; /*duplicaet legacy 24M. BW set 1.*/ ProtCfg4.word = 0x03f54084; if (OPSTATUS_TEST_FLAG(pAd, fOP_STATUS_BG_PROTECTION_INUSED)) { ProtCfg.word = 0x01750003; /*ERP use Protection bit is set, use protection rate at Clause 18..*/ ProtCfg4.word = 0x03f50003; /* Don't duplicate RTS/CTS in CCK mode. 0x03f40083*/ } Protect[2] = ProtCfg.word; /*0x01754004;*/ Protect[3] = ProtCfg4.word; /*0x03f54084;*/ Protect[4] = ProtCfg.word; /*0x01754004;*/ Protect[5] = ProtCfg4.word; /*0x03f54084;*/ pAd->CommonCfg.IOTestParm.bRTSLongProtOn = TRUE; break; } } #endif /* DOT11_N_SUPPORT */ offset = CCK_PROT_CFG; for (i = 0;i < 6;i++) { if ((SetMask & (1<< i))) { RTMP_IO_WRITE32(pAd, offset + i*4, Protect[i]); } } } VOID AsicBBPAdjust(RTMP_ADAPTER *pAd) { RTMP_CHIP_ASIC_BBP_ADJUST(pAd); } /* ========================================================================== Description: IRQL = PASSIVE_LEVEL IRQL = DISPATCH_LEVEL ========================================================================== */ VOID AsicSwitchChannel( IN PRTMP_ADAPTER pAd, IN UCHAR Channel, IN BOOLEAN bScan) { #ifdef CONFIG_STA_SUPPORT #ifdef CONFIG_PM #ifdef USB_SUPPORT_SELECTIVE_SUSPEND POS_COOKIE pObj = (POS_COOKIE) pAd->OS_Cookie; #endif /* USB_SUPPORT_SELECTIVE_SUSPEND */ #endif /* CONFIG_PM */ #endif /* CONFIG_STA_SUPPORT */ #ifdef CONFIG_STA_SUPPORT #ifdef CONFIG_PM #ifdef USB_SUPPORT_SELECTIVE_SUSPEND if( (RTMP_Usb_AutoPM_Get_Interface(pObj->pUsb_Dev,pObj->intf)) == 1) { DBGPRINT(RT_DEBUG_TRACE, ("AsicSwitchChannel: autopm_resume success\n")); RTMP_CLEAR_FLAG(pAd, fRTMP_ADAPTER_SUSPEND); } else if ((RTMP_Usb_AutoPM_Get_Interface(pObj->pUsb_Dev,pObj->intf)) == (-1)) { DBGPRINT(RT_DEBUG_ERROR, ("AsicSwitchChannel autopm_resume fail ------\n")); RTMP_SET_FLAG(pAd, fRTMP_ADAPTER_SUSPEND); return; } else DBGPRINT(RT_DEBUG_TRACE, ("AsicSwitchChannel: autopm_resume do nothing \n")); #endif /* USB_SUPPORT_SELECTIVE_SUSPEND */ #endif /* CONFIG_PM */ #endif /* CONFIG_STA_SUPPORT */ #ifdef CONFIG_AP_SUPPORT #ifdef AP_QLOAD_SUPPORT /* clear all statistics count for QBSS Load */ QBSS_LoadStatusClear(pAd); #endif /* AP_QLOAD_SUPPORT */ #endif /* CONFIG_AP_SUPPORT */ RTMP_CHIP_ASIC_SWITCH_CHANNEL(pAd, Channel, bScan); } /* ========================================================================== Description: This function is required for 2421 only, and should not be used during site survey. It's only required after NIC decided to stay at a channel for a longer period. When this function is called, it's always after AsicSwitchChannel(). IRQL = PASSIVE_LEVEL IRQL = DISPATCH_LEVEL ========================================================================== */ VOID AsicLockChannel( IN PRTMP_ADAPTER pAd, IN UCHAR Channel) { } /* ========================================================================== Description: IRQL = PASSIVE_LEVEL IRQL = DISPATCH_LEVEL ========================================================================== */ VOID InitRfPaModeTable( IN PRTMP_ADAPTER pAd) { UINT32 mac_value; UCHAR bit, pa_value; RTMP_IO_READ32(pAd, RF_PA_MODE_CFG0, &mac_value); for (bit = 0; bit < 8; bit += 2) /* CCK */ { pa_value = (UCHAR)((mac_value >> bit) & (0x03)); pAd->rf_pa_mode_over_cck[bit/2] = pa_value; } for (bit = 8; bit < 24; bit += 2) /* OFDM */ { pa_value = (UCHAR)((mac_value >> bit) & (0x03)); pAd->rf_pa_mode_over_ofdm[(bit - 8)/2] = pa_value; } RTMP_IO_READ32(pAd, RF_PA_MODE_CFG1, &mac_value); for (bit = 0; bit < 32; bit += 2) /* HT */ { pa_value = (UCHAR)((mac_value >> bit) & (0x03)); pAd->rf_pa_mode_over_ht[bit/2] = pa_value; } } VOID AsicGetTxPowerOffset( IN PRTMP_ADAPTER pAd, IN PULONG TxPwr) { CONFIGURATION_OF_TX_POWER_CONTROL_OVER_MAC CfgOfTxPwrCtrlOverMAC; DBGPRINT(RT_DEBUG_INFO, ("-->AsicGetTxPowerOffset\n")); NdisZeroMemory(&CfgOfTxPwrCtrlOverMAC, sizeof(CfgOfTxPwrCtrlOverMAC)); CfgOfTxPwrCtrlOverMAC.NumOfEntries = 5; /* MAC 0x1314, 0x1318, 0x131C, 0x1320 and 1324 */ if (pAd->CommonCfg.BBPCurrentBW == BW_40) { if (pAd->CommonCfg.CentralChannel > 14) { CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[0].MACRegisterOffset = TX_PWR_CFG_0; CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[0].RegisterValue = pAd->Tx40MPwrCfgABand[0]; CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[1].MACRegisterOffset = TX_PWR_CFG_1; CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[1].RegisterValue = pAd->Tx40MPwrCfgABand[1]; CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[2].MACRegisterOffset = TX_PWR_CFG_2; CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[2].RegisterValue = pAd->Tx40MPwrCfgABand[2]; CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[3].MACRegisterOffset = TX_PWR_CFG_3; CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[3].RegisterValue = pAd->Tx40MPwrCfgABand[3]; CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[4].MACRegisterOffset = TX_PWR_CFG_4; CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[4].RegisterValue = pAd->Tx40MPwrCfgABand[4]; } else { CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[0].MACRegisterOffset = TX_PWR_CFG_0; CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[0].RegisterValue = pAd->Tx40MPwrCfgGBand[0]; CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[1].MACRegisterOffset = TX_PWR_CFG_1; CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[1].RegisterValue = pAd->Tx40MPwrCfgGBand[1]; CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[2].MACRegisterOffset = TX_PWR_CFG_2; CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[2].RegisterValue = pAd->Tx40MPwrCfgGBand[2]; CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[3].MACRegisterOffset = TX_PWR_CFG_3; CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[3].RegisterValue = pAd->Tx40MPwrCfgGBand[3]; CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[4].MACRegisterOffset = TX_PWR_CFG_4; CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[4].RegisterValue = pAd->Tx40MPwrCfgGBand[4]; } } else { if (pAd->CommonCfg.CentralChannel > 14) { CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[0].MACRegisterOffset = TX_PWR_CFG_0; CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[0].RegisterValue = pAd->Tx20MPwrCfgABand[0]; CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[1].MACRegisterOffset = TX_PWR_CFG_1; CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[1].RegisterValue = pAd->Tx20MPwrCfgABand[1]; CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[2].MACRegisterOffset = TX_PWR_CFG_2; CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[2].RegisterValue = pAd->Tx20MPwrCfgABand[2]; CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[3].MACRegisterOffset = TX_PWR_CFG_3; CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[3].RegisterValue = pAd->Tx20MPwrCfgABand[3]; CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[4].MACRegisterOffset = TX_PWR_CFG_4; CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[4].RegisterValue = pAd->Tx20MPwrCfgABand[4]; } else { CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[0].MACRegisterOffset = TX_PWR_CFG_0; CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[0].RegisterValue = pAd->Tx20MPwrCfgGBand[0]; CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[1].MACRegisterOffset = TX_PWR_CFG_1; CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[1].RegisterValue = pAd->Tx20MPwrCfgGBand[1]; CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[2].MACRegisterOffset = TX_PWR_CFG_2; CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[2].RegisterValue = pAd->Tx20MPwrCfgGBand[2]; CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[3].MACRegisterOffset = TX_PWR_CFG_3; CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[3].RegisterValue = pAd->Tx20MPwrCfgGBand[3]; CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[4].MACRegisterOffset = TX_PWR_CFG_4; CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[4].RegisterValue = pAd->Tx20MPwrCfgGBand[4]; } } NdisCopyMemory(TxPwr, (UCHAR *)&CfgOfTxPwrCtrlOverMAC, sizeof(CfgOfTxPwrCtrlOverMAC)); DBGPRINT(RT_DEBUG_INFO, ("<--AsicGetTxPowerOffset\n")); } VOID AsicGetAutoAgcOffsetForExternalTxAlc( IN PRTMP_ADAPTER pAd, IN PCHAR pDeltaPwr, IN PCHAR pTotalDeltaPwr, IN PCHAR pAgcCompensate, IN PCHAR pDeltaPowerByBbpR1) { BBP_R49_STRUC BbpR49; BOOLEAN bAutoTxAgc = FALSE; UCHAR TssiRef, *pTssiMinusBoundary, *pTssiPlusBoundary, TxAgcStep, idx; PCHAR pTxAgcCompensate = NULL; CHAR DeltaPwr = 0; DBGPRINT(RT_DEBUG_INFO, ("-->%s\n", __FUNCTION__)); BbpR49.byte = 0; /* TX power compensation for temperature variation based on TSSI. Try every 4 second */ if (pAd->Mlme.OneSecPeriodicRound % 4 == 0) { if (pAd->CommonCfg.Channel <= 14) { /* bg channel */ bAutoTxAgc = pAd->bAutoTxAgcG; TssiRef = pAd->TssiRefG; pTssiMinusBoundary = &pAd->TssiMinusBoundaryG[0]; pTssiPlusBoundary = &pAd->TssiPlusBoundaryG[0]; TxAgcStep = pAd->TxAgcStepG; pTxAgcCompensate = &pAd->TxAgcCompensateG; } else { /* a channel */ bAutoTxAgc = pAd->bAutoTxAgcA; TssiRef = pAd->TssiRefA; pTssiMinusBoundary = &pAd->TssiMinusBoundaryA[0]; pTssiPlusBoundary = &pAd->TssiPlusBoundaryA[0]; TxAgcStep = pAd->TxAgcStepA; pTxAgcCompensate = &pAd->TxAgcCompensateA; } if (bAutoTxAgc) { RTMP_BBP_IO_READ8_BY_REG_ID(pAd, BBP_R49, &BbpR49.byte); /* TSSI representation */ if (IS_RT3071(pAd) || IS_RT3390(pAd) || IS_RT3090A(pAd) || IS_RT3572(pAd)) /* 5-bits */ { BbpR49.byte = (BbpR49.byte & 0x1F); } /* (p) TssiPlusBoundaryG[0] = 0 = (m) TssiMinusBoundaryG[0] */ /* compensate: +4 +3 +2 +1 0 -1 -2 -3 -4 * steps */ /* step value is defined in pAd->TxAgcStepG for tx power value */ /* [4]+1+[4] p4 p3 p2 p1 o1 m1 m2 m3 m4 */ /* ex: 0x00 0x15 0x25 0x45 0x88 0xA0 0xB5 0xD0 0xF0 above value are examined in mass factory production */ /* [4] [3] [2] [1] [0] [1] [2] [3] [4] */ /* plus (+) is 0x00 ~ 0x45, minus (-) is 0xa0 ~ 0xf0 */ /* if value is between p1 ~ o1 or o1 ~ s1, no need to adjust tx power */ /* if value is 0xa5, tx power will be -= TxAgcStep*(2-1) */ if (BbpR49.byte > pTssiMinusBoundary[1]) { /* Reading is larger than the reference value */ /* Check for how large we need to decrease the Tx power */ for (idx = 1; idx < 5; idx++) { if (BbpR49.byte <= pTssiMinusBoundary[idx]) /* Found the range */ break; } /* The index is the step we should decrease, idx = 0 means there is nothing to compensate */ #ifdef RT3883 if (IS_RT3883(pAd)) { if ((idx == 5) && ((BbpR49.byte) > pTssiMinusBoundary[4] + 8)) idx += 1; } #endif /* RT3883 */ *pTxAgcCompensate = -(TxAgcStep * (idx-1)); DeltaPwr += (*pTxAgcCompensate); DBGPRINT(RT_DEBUG_TRACE, ("-- Tx Power, BBP R49=%x, TssiRef=%x, TxAgcStep=%x, step = -%d\n", BbpR49.byte, TssiRef, TxAgcStep, idx-1)); } else if (BbpR49.byte < pTssiPlusBoundary[1]) { /* Reading is smaller than the reference value */ /* Check for how large we need to increase the Tx power */ for (idx = 1; idx < 5; idx++) { if (BbpR49.byte >= pTssiPlusBoundary[idx]) /* Found the range*/ break; } #ifdef RT3883 if (IS_RT3883(pAd) && (idx == 5)) { if ((BbpR49.byte) < (pTssiPlusBoundary[4] - 16)) idx += 2; else if ((BbpR49.byte) < (pTssiPlusBoundary[4] - 8)) idx += 1; } #endif /* RT3883 */ /* The index is the step we should increase, idx = 0 means there is nothing to compensate */ *pTxAgcCompensate = TxAgcStep * (idx-1); DeltaPwr += (*pTxAgcCompensate); DBGPRINT(RT_DEBUG_TRACE, ("++ Tx Power, BBP R49=%x, TssiRef=%x, TxAgcStep=%x, step = +%d\n", BbpR49.byte, TssiRef, TxAgcStep, idx-1)); } else { *pTxAgcCompensate = 0; DBGPRINT(RT_DEBUG_TRACE, (" Tx Power, BBP R49=%x, TssiRef=%x, TxAgcStep=%x, step = +%d\n", BbpR49.byte, TssiRef, TxAgcStep, 0)); } } } else { if (pAd->CommonCfg.Channel <= 14) { bAutoTxAgc = pAd->bAutoTxAgcG; pTxAgcCompensate = &pAd->TxAgcCompensateG; } else { bAutoTxAgc = pAd->bAutoTxAgcA; pTxAgcCompensate = &pAd->TxAgcCompensateA; } if (bAutoTxAgc) DeltaPwr += (*pTxAgcCompensate); } #ifdef RT3883 if (IS_RT3883(pAd) && (pAd->bTxPwrRangeExt)) DeltaPwr -= 2; #endif /* RT3883 */ *pDeltaPwr = DeltaPwr; *pAgcCompensate = *pTxAgcCompensate; DBGPRINT(RT_DEBUG_INFO, ("<--%s\n", __FUNCTION__)); } #ifdef RTMP_TEMPERATURE_COMPENSATION VOID InitLookupTable( IN PRTMP_ADAPTER pAd) { int Idx, IdxTmp; int i; enum IEEE80211_BAND band; int band_nums = 1; const int Offset = 7; EEPROM_WORD_STRUC WordStruct = {{0}}; UCHAR PlusStepNum[IEEE80211_BAND_NUMS][8] = {{0, 1, 3, 2, 3, 3, 3, 2}, {0, 1, 3, 2, 3, 3, 3, 2}}; UCHAR MinusStepNum[IEEE80211_BAND_NUMS][8] = {{1, 1, 1, 1, 1, 1, 0, 1}, {1, 1, 1, 1, 1, 1, 0, 1}}; UCHAR Step[IEEE80211_BAND_NUMS] = {10, 10}; UCHAR RFValue = 0, BbpValue = 0; DBGPRINT(RT_DEBUG_TRACE, ("==> InitLookupTable\n")); /* Read from EEPROM, as parameters for lookup table for G band */ RT28xx_EEPROM_READ16(pAd, 0x6e, WordStruct.word); DBGPRINT(RT_DEBUG_TRACE, ("[temp. compensation 2.4G] EEPROM 6e = %x\n", WordStruct.word)); PlusStepNum[IEEE80211_BAND_2G][0] = (WordStruct.field.Byte0 & 0x0F); PlusStepNum[IEEE80211_BAND_2G][1] = (((WordStruct.field.Byte0 & 0xF0) >> 4) & 0x0F); PlusStepNum[IEEE80211_BAND_2G][2] = (WordStruct.field.Byte1 & 0x0F); PlusStepNum[IEEE80211_BAND_2G][3] = (((WordStruct.field.Byte1 & 0xF0) >> 4) & 0x0F); RT28xx_EEPROM_READ16(pAd, 0x70, WordStruct.word); DBGPRINT(RT_DEBUG_TRACE, ("[temp. compensation 2.4G] EEPROM 70 = %x\n", WordStruct.word)); PlusStepNum[IEEE80211_BAND_2G][4] = (WordStruct.field.Byte0 & 0x0F); PlusStepNum[IEEE80211_BAND_2G][5] = (((WordStruct.field.Byte0 & 0xF0) >> 4) & 0x0F); PlusStepNum[IEEE80211_BAND_2G][6] = (WordStruct.field.Byte1 & 0x0F); PlusStepNum[IEEE80211_BAND_2G][7] = (((WordStruct.field.Byte1 & 0xF0) >> 4) & 0x0F); RT28xx_EEPROM_READ16(pAd, 0x72, WordStruct.word); DBGPRINT(RT_DEBUG_TRACE, ("[temp. compensation 2.4G] EEPROM 72 = %x\n", WordStruct.word)); MinusStepNum[IEEE80211_BAND_2G][0] = (WordStruct.field.Byte0 & 0x0F); MinusStepNum[IEEE80211_BAND_2G][1] = (((WordStruct.field.Byte0 & 0xF0) >> 4) & 0x0F); MinusStepNum[IEEE80211_BAND_2G][2] = (WordStruct.field.Byte1 & 0x0F); MinusStepNum[IEEE80211_BAND_2G][3] = (((WordStruct.field.Byte1 & 0xF0) >> 4) & 0x0F); RT28xx_EEPROM_READ16(pAd, 0x74, WordStruct.word); DBGPRINT(RT_DEBUG_TRACE, ("[temp. compensation 2.4] EEPROM 74 = %x\n", WordStruct.word)); MinusStepNum[IEEE80211_BAND_2G][4] = (WordStruct.field.Byte0 & 0x0F); MinusStepNum[IEEE80211_BAND_2G][5] = (((WordStruct.field.Byte0 & 0xF0) >> 4) & 0x0F); MinusStepNum[IEEE80211_BAND_2G][6] = (WordStruct.field.Byte1 & 0x0F); MinusStepNum[IEEE80211_BAND_2G][7] = (((WordStruct.field.Byte1 & 0xF0) >> 4) & 0x0F); RT28xx_EEPROM_READ16(pAd, 0x76, WordStruct.word); DBGPRINT(RT_DEBUG_TRACE, ("[temp. compensation 2.4G] EEPROM 76 = %x\n", WordStruct.word)); pAd->TxPowerCtrl.TssiGain[IEEE80211_BAND_2G] = (WordStruct.field.Byte0 & 0x0F); Step[IEEE80211_BAND_2G] = (WordStruct.field.Byte0 >> 4); pAd->TxPowerCtrl.RefTemp[IEEE80211_BAND_2G] = (CHAR)WordStruct.field.Byte1; DBGPRINT(RT_DEBUG_TRACE, ("[temp. compensation 2.4G] Plus = %u %u %u %u %u %u %u %u\n", PlusStepNum[IEEE80211_BAND_2G][0], PlusStepNum[IEEE80211_BAND_2G][1], PlusStepNum[IEEE80211_BAND_2G][2], PlusStepNum[IEEE80211_BAND_2G][3], PlusStepNum[IEEE80211_BAND_2G][4], PlusStepNum[IEEE80211_BAND_2G][5], PlusStepNum[IEEE80211_BAND_2G][6], PlusStepNum[IEEE80211_BAND_2G][7] )); DBGPRINT(RT_DEBUG_TRACE, ("[temp. compensation 2.4G] Minus = %u %u %u %u %u %u %u %u\n", MinusStepNum[IEEE80211_BAND_2G][0], MinusStepNum[IEEE80211_BAND_2G][1], MinusStepNum[IEEE80211_BAND_2G][2], MinusStepNum[IEEE80211_BAND_2G][3], MinusStepNum[IEEE80211_BAND_2G][4], MinusStepNum[IEEE80211_BAND_2G][5], MinusStepNum[IEEE80211_BAND_2G][6], MinusStepNum[IEEE80211_BAND_2G][7] )); DBGPRINT(RT_DEBUG_TRACE, ("[temp. compensation 2.4G] tssi gain/step = %u\n", pAd->TxPowerCtrl.TssiGain[IEEE80211_BAND_2G])); DBGPRINT(RT_DEBUG_TRACE, ("[temp. compensation 2.4] Step = %u\n", Step[IEEE80211_BAND_2G])); DBGPRINT(RT_DEBUG_TRACE, ("[temp. compensation 2.4] RefTemp_2G = %d\n", pAd->TxPowerCtrl.RefTemp[IEEE80211_BAND_2G])); #ifdef A_BAND_SUPPORT if (RFIC_IS_5G_BAND(pAd)) { /* Read from EEPROM, as parameters for lookup table for A band */ RT28xx_EEPROM_READ16(pAd, 0xd4, WordStruct.word); DBGPRINT(RT_DEBUG_TRACE, ("[temp. compensation 5G] EEPROM d4 = %x\n", WordStruct.word)); PlusStepNum[IEEE80211_BAND_5G][0] = (WordStruct.field.Byte0 & 0x0F); PlusStepNum[IEEE80211_BAND_5G][1] = (((WordStruct.field.Byte0 & 0xF0) >> 4) & 0x0F); PlusStepNum[IEEE80211_BAND_5G][2] = (WordStruct.field.Byte1 & 0x0F); PlusStepNum[IEEE80211_BAND_5G][3] = (((WordStruct.field.Byte1 & 0xF0) >> 4) & 0x0F); RT28xx_EEPROM_READ16(pAd, 0xd6, WordStruct.word); DBGPRINT(RT_DEBUG_TRACE, ("[temp. compensation 5G] EEPROM d6 = %x\n", WordStruct.word)); PlusStepNum[IEEE80211_BAND_5G][4] = (WordStruct.field.Byte0 & 0x0F); PlusStepNum[IEEE80211_BAND_5G][5] = (((WordStruct.field.Byte0 & 0xF0) >> 4) & 0x0F); PlusStepNum[IEEE80211_BAND_5G][6] = (WordStruct.field.Byte1 & 0x0F); PlusStepNum[IEEE80211_BAND_5G][7] = (((WordStruct.field.Byte1 & 0xF0) >> 4) & 0x0F); RT28xx_EEPROM_READ16(pAd, 0xd8, WordStruct.word); DBGPRINT(RT_DEBUG_TRACE, ("[temp. compensation 5G] EEPROM d8 = %x\n", WordStruct.word)); MinusStepNum[IEEE80211_BAND_5G][0] = (WordStruct.field.Byte0 & 0x0F); MinusStepNum[IEEE80211_BAND_5G][1] = (((WordStruct.field.Byte0 & 0xF0) >> 4) & 0x0F); MinusStepNum[IEEE80211_BAND_5G][2] = (WordStruct.field.Byte1 & 0x0F); MinusStepNum[IEEE80211_BAND_5G][3] = (((WordStruct.field.Byte1 & 0xF0) >> 4) & 0x0F); RT28xx_EEPROM_READ16(pAd, 0xda, WordStruct.word); DBGPRINT(RT_DEBUG_TRACE, ("[temp. compensation 5G] EEPROM da = %x\n", WordStruct.word)); MinusStepNum[IEEE80211_BAND_5G][4] = (WordStruct.field.Byte0 & 0x0F); MinusStepNum[IEEE80211_BAND_5G][5] = (((WordStruct.field.Byte0 & 0xF0) >> 4) & 0x0F); MinusStepNum[IEEE80211_BAND_5G][6] = (WordStruct.field.Byte1 & 0x0F); MinusStepNum[IEEE80211_BAND_5G][7] = (((WordStruct.field.Byte1 & 0xF0) >> 4) & 0x0F); RT28xx_EEPROM_READ16(pAd, 0xdc, WordStruct.word); DBGPRINT(RT_DEBUG_TRACE, ("[temp. compensation 5G] EEPROM dc = %x\n", WordStruct.word)); pAd->TxPowerCtrl.TssiGain[IEEE80211_BAND_5G] = (WordStruct.field.Byte0 & 0x0F); Step[IEEE80211_BAND_5G] = (WordStruct.field.Byte0 >> 4); pAd->TxPowerCtrl.RefTemp[IEEE80211_BAND_5G] = (CHAR)WordStruct.field.Byte1; DBGPRINT(RT_DEBUG_TRACE, ("[temp. compensation 5G] Plus = %u %u %u %u %u %u %u %u\n", PlusStepNum[IEEE80211_BAND_5G][0], PlusStepNum[IEEE80211_BAND_5G][1], PlusStepNum[IEEE80211_BAND_5G][2], PlusStepNum[IEEE80211_BAND_5G][3], PlusStepNum[IEEE80211_BAND_5G][4], PlusStepNum[IEEE80211_BAND_5G][5], PlusStepNum[IEEE80211_BAND_5G][6], PlusStepNum[IEEE80211_BAND_5G][7] )); DBGPRINT(RT_DEBUG_TRACE, ("[temp. compensation 5G] Minus = %u %u %u %u %u %u %u %u\n", MinusStepNum[IEEE80211_BAND_5G][0], MinusStepNum[IEEE80211_BAND_5G][1], MinusStepNum[IEEE80211_BAND_5G][2], MinusStepNum[IEEE80211_BAND_5G][3], MinusStepNum[IEEE80211_BAND_5G][4], MinusStepNum[IEEE80211_BAND_5G][5], MinusStepNum[IEEE80211_BAND_5G][6], MinusStepNum[IEEE80211_BAND_5G][7] )); DBGPRINT(RT_DEBUG_TRACE, ("[temp. compensation 5G] tssi gain/step = %u\n", pAd->TxPowerCtrl.TssiGain[IEEE80211_BAND_5G])); DBGPRINT(RT_DEBUG_TRACE, ("[temp. compensation 5G] Step = %u\n", Step[IEEE80211_BAND_5G])); DBGPRINT(RT_DEBUG_TRACE, ("[temp. compensation 5G] RefTemp_2G = %d\n", pAd->TxPowerCtrl.RefTemp[IEEE80211_BAND_5G])); band_nums = IEEE80211_BAND_NUMS; } #endif /* A_BAND_SUPPORT */ for (band = IEEE80211_BAND_2G; band < band_nums; band++) { /* positive */ i = 0; IdxTmp = 1; pAd->TxPowerCtrl.LookupTable[band][1 + Offset] = Step[band] / 2; pAd->TxPowerCtrl.LookupTable[band][0 + Offset] = pAd->TxPowerCtrl.LookupTable[band][1 + Offset] - Step[band]; for (Idx = 2; Idx < 26;)/* Idx++ )*/ { if (PlusStepNum[band][i] != 0 || i >= 8) { if (Idx >= IdxTmp + PlusStepNum[band][i] && i < 8) { pAd->TxPowerCtrl.LookupTable[band][Idx + Offset] = pAd->TxPowerCtrl.LookupTable[band][Idx - 1 + Offset] + (Step[band] - (i+1) + 1); IdxTmp = IdxTmp + PlusStepNum[band][i]; i += 1; } else { pAd->TxPowerCtrl.LookupTable[band][Idx + Offset] = pAd->TxPowerCtrl.LookupTable[band][Idx - 1 + Offset] + (Step[band] - (i+1) + 1); } Idx++; } else { i += 1; } } /* negative */ i = 0; IdxTmp = 1; for (Idx = 1; Idx < 8;)/* Idx++ )*/ { if (MinusStepNum[band][i] != 0 || i >= 8) { if ((Idx + 1) >= IdxTmp + MinusStepNum[band][i] && i < 8) { pAd->TxPowerCtrl.LookupTable[band][-Idx + Offset] = pAd->TxPowerCtrl.LookupTable[band][-Idx + 1 + Offset] - (Step[band] + (i+1) - 1); IdxTmp = IdxTmp + MinusStepNum[band][i]; i += 1; } else { pAd->TxPowerCtrl.LookupTable[band][-Idx + Offset] = pAd->TxPowerCtrl.LookupTable[band][-Idx + 1 + Offset] - (Step[band] + (i+1) - 1); } Idx++; } else { i += 1; } } DBGPRINT(RT_DEBUG_TRACE, ("[temp. compensation] Lookup table as below:\n")); for (Idx = 0; Idx < 33; Idx++) { DBGPRINT(RT_DEBUG_TRACE, ("[temp. compensation band(%d)] %d, %d\n", band, Idx - Offset, pAd->TxPowerCtrl.LookupTable[band][Idx])); } } /* Set BBP_R47 */ RTMP_BBP_IO_READ8_BY_REG_ID(pAd, BBP_R47, &BbpValue); /* bit3 = 0 */ BbpValue = (BbpValue & 0xf7); /* bit7 = 1, bit4 = 0 */ BbpValue = (BbpValue | 0x80); RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R47, BbpValue); /* Set RF_R27 */ RT30xxReadRFRegister(pAd, RF_R27, &RFValue); /* Set [7:6] to 01. For method 2, it is set at initialization. */ RFValue = (RFValue & 0x7f); RFValue = (RFValue | 0x40); DBGPRINT(RT_DEBUG_TRACE, ("[temp. compensation] Set RF_R27 to 0x%x\n", RFValue)); RT30xxWriteRFRegister(pAd, RF_R27, RFValue); } VOID AsicGetAutoAgcOffsetForTemperatureSensor( IN PRTMP_ADAPTER pAd, IN PCHAR pDeltaPwr, IN PCHAR pTotalDeltaPwr, IN PCHAR pAgcCompensate, IN PCHAR pDeltaPowerByBbpR1) { RTMP_CHIP_CAP *pChipCap = &pAd->chipCap; const TX_POWER_TUNING_ENTRY_STRUCT *TxPowerTuningTable; TX_POWER_TUNING_ENTRY_STRUCT *TxPowerTuningTableEntry0 = NULL; /* Ant0 */ TX_POWER_TUNING_ENTRY_STRUCT *TxPowerTuningTableEntry1 = NULL; /* Ant1 */ BBP_R49_STRUC BbpR49; BOOLEAN bAutoTxAgc = FALSE; PCHAR pTxAgcCompensate = NULL; UCHAR RFValue = 0; CHAR TuningTableUpperBound = 0, TuningTableIndex0 = 0, TuningTableIndex1 = 0; INT CurrentTemp = 0; INT RefTemp; INT *LookupTable; INT LookupTableIndex = pAd->TxPowerCtrl.LookupTableIndex + TEMPERATURE_COMPENSATION_LOOKUP_TABLE_OFFSET; DBGPRINT(RT_DEBUG_INFO, ("-->%s\n", __FUNCTION__)); BbpR49.byte = 0; *pTotalDeltaPwr = 0; #ifdef A_BAND_SUPPORT if (pAd->CommonCfg.Channel > 14) { /* a band channel */ bAutoTxAgc = pAd->bAutoTxAgcA; pTxAgcCompensate = &pAd->TxAgcCompensateA; TxPowerTuningTable = pChipCap->TxPowerTuningTable_5G; RefTemp = pAd->TxPowerCtrl.RefTemp[IEEE80211_BAND_5G]; LookupTable = &pAd->TxPowerCtrl.LookupTable[IEEE80211_BAND_5G][0]; TuningTableUpperBound = pChipCap->TxAlcTxPowerUpperBound_5G; } else #endif /* A_BAND_SUPPORT */ { /* bg band channel */ bAutoTxAgc = pAd->bAutoTxAgcG; pTxAgcCompensate = &pAd->TxAgcCompensateG; TxPowerTuningTable = pChipCap->TxPowerTuningTable_2G; RefTemp = pAd->TxPowerCtrl.RefTemp[IEEE80211_BAND_2G]; LookupTable = &pAd->TxPowerCtrl.LookupTable[IEEE80211_BAND_2G][0]; TuningTableUpperBound = pChipCap->TxAlcTxPowerUpperBound_2G; } /* AutoTxAgc in EEPROM means temperature compensation enabled/diablded. */ if (bAutoTxAgc) { /* Current temperature */ RTMP_BBP_IO_READ8_BY_REG_ID(pAd, BBP_R49, &BbpR49.byte); CurrentTemp = (CHAR)BbpR49.byte; DBGPRINT(RT_DEBUG_INFO, ("[temp. compensation] BBP_R49 = %02x, current temp = %d\n", BbpR49.byte, CurrentTemp)); DBGPRINT(RT_DEBUG_INFO, ("[temp. compensation] RefTemp = %d\n", RefTemp)); DBGPRINT(RT_DEBUG_INFO, ("[temp. compensation] index = %d\n", pAd->TxPowerCtrl.LookupTableIndex)); DBGPRINT(RT_DEBUG_INFO, ("[temp. compensation] f(%d)= %d\n", pAd->TxPowerCtrl.LookupTableIndex - 1, LookupTable[LookupTableIndex - 1])); DBGPRINT(RT_DEBUG_INFO, ("[temp. compensation] f(%d)= %d\n", pAd->TxPowerCtrl.LookupTableIndex, LookupTable[LookupTableIndex])); DBGPRINT(RT_DEBUG_INFO, ("[temp. compensation] f(%d)= %d\n", pAd->TxPowerCtrl.LookupTableIndex + 1, LookupTable[LookupTableIndex + 1])); if (CurrentTemp > RefTemp + LookupTable[LookupTableIndex + 1] + ((LookupTable[LookupTableIndex + 1] - LookupTable[LookupTableIndex]) >> 2) && LookupTableIndex < 32) { DBGPRINT(RT_DEBUG_INFO, ("[temp. compensation] ++\n")); LookupTableIndex++; pAd->TxPowerCtrl.LookupTableIndex++; } else if (CurrentTemp < RefTemp + LookupTable[LookupTableIndex] - ((LookupTable[LookupTableIndex] - LookupTable[LookupTableIndex - 1]) >> 2) && LookupTableIndex > 0) { DBGPRINT(RT_DEBUG_INFO, ("[temp. compensation] --\n")); LookupTableIndex--; pAd->TxPowerCtrl.LookupTableIndex--; } else { DBGPRINT(RT_DEBUG_INFO, ("[temp. compensation] ==\n")); } DBGPRINT(RT_DEBUG_INFO, ("[temp. compensation] idxTxPowerTable=%d, idxTxPowerTable2=%d, TuningTableUpperBound=%d\n", pAd->TxPowerCtrl.idxTxPowerTable + pAd->TxPowerCtrl.LookupTableIndex, pAd->TxPowerCtrl.idxTxPowerTable2 + pAd->TxPowerCtrl.LookupTableIndex, TuningTableUpperBound)); TuningTableIndex0 = pAd->TxPowerCtrl.idxTxPowerTable + pAd->TxPowerCtrl.LookupTableIndex #ifdef DOT11_N_SUPPORT + pAd->TxPower[pAd->CommonCfg.CentralChannel-1].Power; #else + pAd->TxPower[pAd->CommonCfg.Channel-1].Power; #endif /* DOT11_N_SUPPORT */ /* The boundary verification */ TuningTableIndex0 = (TuningTableIndex0 > TuningTableUpperBound) ? TuningTableUpperBound : TuningTableIndex0; TuningTableIndex0 = (TuningTableIndex0 < LOWERBOUND_TX_POWER_TUNING_ENTRY) ? LOWERBOUND_TX_POWER_TUNING_ENTRY : TuningTableIndex0; TxPowerTuningTableEntry0 = &TxPowerTuningTable[TuningTableIndex0 + TX_POWER_TUNING_ENTRY_OFFSET]; TuningTableIndex1 = pAd->TxPowerCtrl.idxTxPowerTable2 + pAd->TxPowerCtrl.LookupTableIndex #ifdef DOT11_N_SUPPORT + pAd->TxPower[pAd->CommonCfg.CentralChannel-1].Power2; #else + pAd->TxPower[pAd->CommonCfg.Channel-1].Power2; #endif /* DOT11_N_SUPPORT */ /* The boundary verification */ TuningTableIndex1 = (TuningTableIndex1 > TuningTableUpperBound) ? TuningTableUpperBound : TuningTableIndex1; TuningTableIndex1 = (TuningTableIndex1 < LOWERBOUND_TX_POWER_TUNING_ENTRY) ? LOWERBOUND_TX_POWER_TUNING_ENTRY : TuningTableIndex1; TxPowerTuningTableEntry1 = &TxPowerTuningTable[TuningTableIndex1 + TX_POWER_TUNING_ENTRY_OFFSET]; DBGPRINT(RT_DEBUG_INFO, ("[temp. compensation] (tx0)RF_TX_ALC = %x, MAC_PowerDelta = %d, TuningTableIndex = %d\n", TxPowerTuningTableEntry0->RF_TX_ALC, TxPowerTuningTableEntry0->MAC_PowerDelta, TuningTableIndex0)); DBGPRINT(RT_DEBUG_INFO, ("[temp. compensation] (tx1)RF_TX_ALC = %x, MAC_PowerDelta = %d, TuningTableIndex = %d\n", TxPowerTuningTableEntry1->RF_TX_ALC, TxPowerTuningTableEntry1->MAC_PowerDelta, TuningTableIndex1)); /* Update RF_R49 [0:5] */ RT30xxReadRFRegister(pAd, RF_R49, &RFValue); RFValue = ((RFValue & ~0x3F) | TxPowerTuningTableEntry0->RF_TX_ALC); if ((RFValue & 0x3F) > 0x27) /* The valid range of the RF R49 (<5:0>tx0_alc<5:0>) is 0x00~0x27 */ { RFValue = ((RFValue & ~0x3F) | 0x27); } DBGPRINT(RT_DEBUG_INFO, ("[temp. compensation] Update RF_R49[0:5] to 0x%x\n", TxPowerTuningTableEntry0->RF_TX_ALC)); RT30xxWriteRFRegister(pAd, RF_R49, RFValue); /* Update RF_R50 [0:5] */ RT30xxReadRFRegister(pAd, RF_R50, &RFValue); RFValue = ((RFValue & ~0x3F) | TxPowerTuningTableEntry1->RF_TX_ALC); if ((RFValue & 0x3F) > 0x27) /* The valid range of the RF R49 (<5:0>tx0_alc<5:0>) is 0x00~0x27 */ { RFValue = ((RFValue & ~0x3F) | 0x27); } DBGPRINT(RT_DEBUG_INFO, ("[temp. compensation] Update RF_R50[0:5] to 0x%x\n", TxPowerTuningTableEntry1->RF_TX_ALC)); RT30xxWriteRFRegister(pAd, RF_R50, RFValue); *pTotalDeltaPwr = TxPowerTuningTableEntry0->MAC_PowerDelta; } *pAgcCompensate = *pTxAgcCompensate; DBGPRINT(RT_DEBUG_INFO, ("<--%s\n", __FUNCTION__)); } #endif /* RTMP_TEMPERATURE_COMPENSATION */ #ifdef SINGLE_SKU VOID GetSingleSkuDeltaPower( IN PRTMP_ADAPTER pAd, IN PCHAR pTotalDeltaPower, INOUT PULONG pSingleSKUTotalDeltaPwr, INOUT PUCHAR pSingleSKUBbpR1Offset) { INT i, j; CHAR Value; CHAR MinValue = 127; UCHAR BbpR1 = 0; UCHAR TxPwrInEEPROM = 0xFF, CountryTxPwr = 0xFF, criterion; UCHAR AdjustMaxTxPwr[(MAX_TX_PWR_CONTROL_OVER_MAC_REGISTERS * 8)]; CONFIGURATION_OF_TX_POWER_CONTROL_OVER_MAC CfgOfTxPwrCtrlOverMAC = {0}; /* Get TX rate offset table which from EEPROM 0xDEh ~ 0xEFh */ RTMP_CHIP_ASIC_TX_POWER_OFFSET_GET(pAd, (PULONG)&CfgOfTxPwrCtrlOverMAC); /* Handle regulatory max. TX power constraint */ if (pAd->CommonCfg.Channel > 14) { TxPwrInEEPROM = ((pAd->CommonCfg.DefineMaxTxPwr & 0xFF00) >> 8); /* 5G band */ } else { TxPwrInEEPROM = (pAd->CommonCfg.DefineMaxTxPwr & 0x00FF); /* 2.4G band */ } CountryTxPwr = GetCuntryMaxTxPwr(pAd, pAd->CommonCfg.Channel); /* Use OFDM 6M as the criterion */ criterion = (UCHAR)((CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[0].RegisterValue & 0x000F0000) >> 16); DBGPRINT(RT_DEBUG_TRACE, ("%s: criterion=%d, TxPwrInEEPROM=%d, CountryTxPwr=%d\n", __FUNCTION__, criterion, TxPwrInEEPROM, CountryTxPwr)); /* Adjust max. TX power according to the relationship of TX power in EEPROM */ for (i=0; i<CfgOfTxPwrCtrlOverMAC.NumOfEntries; i++) { if (i == 0) { for (j=0; j<8; j++) { Value = (CHAR)((CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[i].RegisterValue >> j*4) & 0x0F); if (j < 4) { AdjustMaxTxPwr[i*8+j] = TxPwrInEEPROM + (Value - criterion) + 4; /* CCK has 4dBm larger than OFDM */ } else { AdjustMaxTxPwr[i*8+j] = TxPwrInEEPROM + (Value - criterion); } DBGPRINT(RT_DEBUG_TRACE, ("%s: offset = 0x%04X, i/j=%d/%d, (Default)Value=%d, %d\n", __FUNCTION__, CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[i].MACRegisterOffset, i, j, Value, AdjustMaxTxPwr[i*8+j])); } } else { for (j=0; j<8; j++) { Value = (CHAR)((CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[i].RegisterValue >> j*4) & 0x0F); AdjustMaxTxPwr[i*8+j] = TxPwrInEEPROM + (Value - criterion); DBGPRINT(RT_DEBUG_TRACE, ("%s: offset = 0x%04X, i/j=%d/%d, (Default)Value=%d, %d\n", __FUNCTION__, CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[i].MACRegisterOffset, i, j, Value, AdjustMaxTxPwr[i*8+j])); } } } /* Adjust TX power according to the relationship */ for (i=0; i<CfgOfTxPwrCtrlOverMAC.NumOfEntries; i++) { if (CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[i].RegisterValue != 0xffffffff) { for (j=0; j<8; j++) { Value = (CHAR)((CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[i].RegisterValue >> j*4) & 0x0F); /* The TX power is larger than the regulatory, the power should be restrained */ if (AdjustMaxTxPwr[i*8+j] > CountryTxPwr) { Value = (AdjustMaxTxPwr[i*8+j] - CountryTxPwr); if (Value > 0xF) { /* The output power is larger than Country Regulatory over 15dBm, the origianl design has overflow case */ DBGPRINT(RT_DEBUG_ERROR,("%s: Value overflow - %d\n", __FUNCTION__, Value)); } *(pSingleSKUTotalDeltaPwr+i) = (*(pSingleSKUTotalDeltaPwr+i) & ~(0x0000000F << j*4)) | (Value << j*4); DBGPRINT(RT_DEBUG_TRACE, ("%s: offset = 0x%04X, i/j=%d/%d, (Exceed)Value=%d, %d\n", __FUNCTION__, CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[i].MACRegisterOffset, i, j, Value, AdjustMaxTxPwr[i*8+j])); } else { DBGPRINT(RT_DEBUG_TRACE, ("%s: offset = 0x%04X, i/j=%d/%d, Value=%d, %d, no change\n", __FUNCTION__, CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[i].MACRegisterOffset, i, j, Value, AdjustMaxTxPwr[i*8+j])); } } } } /* Calculate the min. TX power */ for(i=0; i<CfgOfTxPwrCtrlOverMAC.NumOfEntries; i++) { if (CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[i].RegisterValue != 0xffffffff) { for (j=0; j<8; j++) { CHAR PwrChange; /* After Single SKU, each data rate offset power value is saved in TotalDeltaPwr[]. PwrChange will add SingleSKUDeltaPwr and TotalDeltaPwr[] for each data rate to calculate the final adjust output power value which is saved in MAC Reg. and BBP_R1. */ /* Value / TxPwr[] is get from eeprom 0xDEh ~ 0xEFh and increase or decrease the 20/40 Bandwidth Delta Value in eeprom 0x50h. */ Value = (CHAR)((CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[i].RegisterValue >> j*4) & 0x0F); /* 0 ~ 15 */ /* Fix the corner case of Single SKU read eeprom offset 0xF0h ~ 0xFEh which for BBP Instruction configuration */ if (Value == 0xF) continue; /* Value_offset is current Pwr comapre with Country Regulation and need adjust delta value */ PwrChange = (CHAR)((*(pSingleSKUTotalDeltaPwr+i) >> j*4) & 0x0F); /* 0 ~ 15 */ PwrChange -= *pTotalDeltaPower; Value -= PwrChange; if (MinValue > Value) MinValue = Value; } } } RTMP_BBP_IO_READ8_BY_REG_ID(pAd, BBP_R1, &BbpR1); /* Depend on the min. TX power to adjust and prevent the value of MAC_TX_PWR_CFG less than 0 */ if ((MinValue < 0) && (MinValue >= -6)) { BbpR1 |= MDSM_DROP_TX_POWER_BY_6dBm; *pSingleSKUBbpR1Offset = 6; } else if ((MinValue < -6)&&(MinValue >= -12)) { BbpR1 |= MDSM_DROP_TX_POWER_BY_12dBm; *pSingleSKUBbpR1Offset = 12; } else if (MinValue < -12) { DBGPRINT(RT_DEBUG_WARN, ("%s: ASIC limit..\n", __FUNCTION__)); BbpR1 |= MDSM_DROP_TX_POWER_BY_12dBm; *pSingleSKUBbpR1Offset = 12; } RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R1, BbpR1); DBGPRINT(RT_DEBUG_TRACE, ("%s: <After BBP R1> TotalDeltaPower = %d dBm, BbpR1 = 0x%02X \n", __FUNCTION__, *pTotalDeltaPower, BbpR1)); } #endif /* SINGLE_SKU */ VOID AsicPercentageDeltaPower( IN PRTMP_ADAPTER pAd, IN CHAR Rssi, INOUT PCHAR pDeltaPwr, INOUT PCHAR pDeltaPowerByBbpR1) { /* Calculate delta power based on the percentage specified from UI. E2PROM setting is calibrated for maximum TX power (i.e. 100%). We lower TX power here according to the percentage specified from UI. */ if (pAd->CommonCfg.TxPowerPercentage >= 100) /* AUTO TX POWER control */ { #ifdef CONFIG_STA_SUPPORT if ((pAd->OpMode == OPMODE_STA) #ifdef P2P_SUPPORT && (!P2P_GO_ON(pAd)) #endif /* P2P_SUPPORT */ ) { /* To patch high power issue with some APs, like Belkin N1.*/ if (Rssi > -35) { *pDeltaPwr -= 12; } else if (Rssi > -40) { *pDeltaPwr -= 6; } else ; } #endif /* CONFIG_STA_SUPPORT */ } else if (pAd->CommonCfg.TxPowerPercentage > 90) /* 91 ~ 100% & AUTO, treat as 100% in terms of mW */ ; else if (pAd->CommonCfg.TxPowerPercentage > 60) /* 61 ~ 90%, treat as 75% in terms of mW DeltaPwr -= 1; */ { *pDeltaPwr -= 1; } else if (pAd->CommonCfg.TxPowerPercentage > 30) /* 31 ~ 60%, treat as 50% in terms of mW DeltaPwr -= 3; */ { *pDeltaPwr -= 3; } else if (pAd->CommonCfg.TxPowerPercentage > 15) /* 16 ~ 30%, treat as 25% in terms of mW DeltaPwr -= 6; */ { *pDeltaPowerByBbpR1 -= 6; /* -6 dBm */ } else if (pAd->CommonCfg.TxPowerPercentage > 9) /* 10 ~ 15%, treat as 12.5% in terms of mW DeltaPwr -= 9; */ { *pDeltaPowerByBbpR1 -= 6; /* -6 dBm */ *pDeltaPwr -= 3; } else /* 0 ~ 9 %, treat as MIN(~3%) in terms of mW DeltaPwr -= 12; */ { *pDeltaPowerByBbpR1 -= 12; /* -12 dBm */ } } VOID AsicCompensatePowerViaBBP( IN PRTMP_ADAPTER pAd, INOUT PCHAR pTotalDeltaPower) { UCHAR BbpR1 = 0; DBGPRINT(RT_DEBUG_INFO, ("%s: <Before BBP R1> TotalDeltaPower = %d dBm\n", __FUNCTION__, *pTotalDeltaPower)); /* The BBP R1 controls the transmit power for all rates */ RTMP_BBP_IO_READ8_BY_REG_ID(pAd, BBP_R1, &BbpR1); BbpR1 &= ~MDSM_BBP_R1_STATIC_TX_POWER_CONTROL_MASK; if (*pTotalDeltaPower <= -12) { *pTotalDeltaPower += 12; BbpR1 |= MDSM_DROP_TX_POWER_BY_12dBm; RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R1, BbpR1); DBGPRINT(RT_DEBUG_INFO, ("%s: Drop the transmit power by 12 dBm (BBP R1)\n", __FUNCTION__)); } else if ((*pTotalDeltaPower <= -6) && (*pTotalDeltaPower > -12)) { *pTotalDeltaPower += 6; BbpR1 |= MDSM_DROP_TX_POWER_BY_6dBm; RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R1, BbpR1); DBGPRINT(RT_DEBUG_INFO, ("%s: Drop the transmit power by 6 dBm (BBP R1)\n", __FUNCTION__)); } else { /* Control the the transmit power by using the MAC only */ RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R1, BbpR1); } DBGPRINT(RT_DEBUG_INFO, ("%s: <After BBP R1> TotalDeltaPower = %d dBm, BbpR1 = 0x%02X \n", __FUNCTION__, *pTotalDeltaPower, BbpR1)); } /* ========================================================================== Description: Gives CCK TX rate 2 more dB TX power. This routine works only in LINK UP in INFRASTRUCTURE mode. calculate desired Tx power in RF R3.Tx0~5, should consider - 0. if current radio is a noisy environment (pAd->DrsCounters.fNoisyEnvironment) 1. TxPowerPercentage 2. auto calibration based on TSSI feedback 3. extra 2 db for CCK 4. -10 db upon very-short distance (AvgRSSI >= -40db) to AP NOTE: Since this routine requires the value of (pAd->DrsCounters.fNoisyEnvironment), it should be called AFTER MlmeDynamicTxRatSwitching() ========================================================================== */ VOID AsicAdjustTxPower( IN PRTMP_ADAPTER pAd) { INT i, j; CHAR Value; CHAR Rssi = -127; CHAR DeltaPwr = 0; CHAR TxAgcCompensate = 0; CHAR DeltaPowerByBbpR1 = 0; CHAR TotalDeltaPower = 0; /* (non-positive number) including the transmit power controlled by the MAC and the BBP R1 */ #ifdef RTMP_INTERNAL_TX_ALC #ifdef RT3352 CHAR TotalDeltaPower2 = 0, Value2 = 0; BOOLEAN bTX1 = FALSE; #endif /* RT3352 */ #endif /* RTMP_INTERNAL_TX_ALC */ CONFIGURATION_OF_TX_POWER_CONTROL_OVER_MAC CfgOfTxPwrCtrlOverMAC = {0}; #ifdef SINGLE_SKU CHAR TotalDeltaPowerOri = 0; UCHAR SingleSKUBbpR1Offset = 0; ULONG SingleSKUTotalDeltaPwr[MAX_TXPOWER_ARRAY_SIZE] = {0}; #endif /* SINGLE_SKU */ #ifdef CONFIG_STA_SUPPORT if (RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_IDLE_RADIO_OFF)) return; if (OPSTATUS_TEST_FLAG(pAd, fOP_STATUS_DOZE) || #ifdef RTMP_MAC_PCI (pAd->bPCIclkOff == TRUE) || RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_IDLE_RADIO_OFF) || #endif /* RTMP_MAC_PCI */ RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_BSS_SCAN_IN_PROGRESS)) return; IF_DEV_CONFIG_OPMODE_ON_STA(pAd) { if(INFRA_ON(pAd)) { Rssi = RTMPMaxRssi(pAd, pAd->StaCfg.RssiSample.AvgRssi0, pAd->StaCfg.RssiSample.AvgRssi1, pAd->StaCfg.RssiSample.AvgRssi2); } } #endif /* CONFIG_STA_SUPPORT */ /* Get Tx rate offset table which from EEPROM 0xDEh ~ 0xEFh */ RTMP_CHIP_ASIC_TX_POWER_OFFSET_GET(pAd, (PULONG)&CfgOfTxPwrCtrlOverMAC); /* Get temperature compensation delta power value */ RTMP_CHIP_ASIC_AUTO_AGC_OFFSET_GET( pAd, &DeltaPwr, &TotalDeltaPower, &TxAgcCompensate, &DeltaPowerByBbpR1); DBGPRINT(RT_DEBUG_INFO, ("%s: DeltaPwr=%d, TotalDeltaPower=%d, TxAgcCompensate=%d, DeltaPowerByBbpR1=%d\n", __FUNCTION__, DeltaPwr, TotalDeltaPower, TxAgcCompensate, DeltaPowerByBbpR1)); #ifdef RTMP_INTERNAL_TX_ALC #ifdef RT3352 if (IS_RT3352(pAd) && (pAd->TxPowerCtrl.bInternalTxALC == TRUE)) { TotalDeltaPower2 = pAd->TxPowerCtrl.TotalDeltaPower2; } if (IS_RT3352(pAd) && (pAd->TxPowerCtrl.bInternalTxALC == FALSE)) { /* Get delta power based on the percentage specified from UI */ AsicPercentageDeltaPower(pAd, Rssi, &DeltaPwr,&DeltaPowerByBbpR1); } #endif /* RT3352 */ #endif /* RTMP_INTERNAL_TX_ALC */ #ifndef RT3352 /* Get delta power based on the percentage specified from UI */ AsicPercentageDeltaPower(pAd, Rssi, &DeltaPwr,&DeltaPowerByBbpR1); #endif /* RT3352 */ /* The transmit power controlled by the BBP */ TotalDeltaPower += DeltaPowerByBbpR1; /* The transmit power controlled by the MAC */ TotalDeltaPower += DeltaPwr; #ifdef SINGLE_SKU if (pAd->CommonCfg.bSKUMode == TRUE) { /* Re calculate delta power while enabling Single SKU */ GetSingleSkuDeltaPower(pAd, &TotalDeltaPower, (PULONG)&SingleSKUTotalDeltaPwr, &SingleSKUBbpR1Offset); TotalDeltaPowerOri = TotalDeltaPower; } else #endif /* SINGLE_SKU */ { #ifndef RT3352 AsicCompensatePowerViaBBP(pAd, &TotalDeltaPower); #endif /* RT3352 */ } /* Power will be updated each 4 sec. */ if (pAd->Mlme.OneSecPeriodicRound % 4 == 0) { #ifndef RT3352 /*****************************************************************************/ /* Set new Tx power for different Tx rates */ for (i=0; i < CfgOfTxPwrCtrlOverMAC.NumOfEntries; i++) { TX_POWER_CONTROL_OVER_MAC_ENTRY *pTxPwrEntry; ULONG reg_val; pTxPwrEntry = &CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[i]; reg_val = pTxPwrEntry->RegisterValue; if (reg_val != 0xffffffff) { for (j=0; j<8; j++) { CHAR _upbound, _lowbound, t_pwr; BOOLEAN _bValid; _lowbound = 0; _bValid = TRUE; Value = (CHAR)((reg_val >> j*4) & 0x0F); #ifdef SINGLE_SKU if (pAd->CommonCfg.bSKUMode == TRUE) { TotalDeltaPower = SingleSKUBbpR1Offset + TotalDeltaPowerOri - (CHAR)((SingleSKUTotalDeltaPwr[i] >> j*4) & 0x0F); DBGPRINT(RT_DEBUG_INFO, ("%s: BbpR1Offset(%d) + TX ALC(%d) - SingleSKU[%d/%d](%d) = TotalDeltaPower(%d)\n", __FUNCTION__, SingleSKUBbpR1Offset, TotalDeltaPowerOri, i, j, (CHAR)((SingleSKUTotalDeltaPwr[i] >> j*4) & 0x0F), TotalDeltaPower)); } #endif /* SINGLE_SKU */ #if defined(RTMP_INTERNAL_TX_ALC) || defined(RTMP_TEMPERATURE_COMPENSATION) /* The upper bounds of MAC 0x1314 ~ 0x1324 are variable */ if ((pAd->TxPowerCtrl.bInternalTxALC == TRUE)^(pAd->chipCap.bTempCompTxALC == TRUE)) { switch (0x1314 + (i * 4)) { case 0x1314: _upbound = 0xe; break; case 0x1318: _upbound = (j <= 3) ? 0xc : 0xe; break; case 0x131C: _upbound = ((j == 0) || (j == 2) || (j == 3)) ? 0xc : 0xe; break; case 0x1320: _upbound = (j == 1) ? 0xe : 0xc; break; case 0x1324: _upbound = 0xc; break; default: { /* do nothing */ _bValid = FALSE; DBGPRINT(RT_DEBUG_ERROR, ("%s: Unknown register = 0x%x\n", __FUNCTION__, (0x1314 + (i * 4)))); } break; } } else #endif /* RTMP_INTERNAL_TX_ALC || RTMP_TEMPERATURE_COMPENSATION */ #ifdef RT3883 if (IS_RT3883(pAd)) _upbound = (pAd->NicConfig2.field.DynamicTxAgcControl) ? 0xf : 0xc; else #endif /* RT3883 */ _upbound = 0xc; if (_bValid) { t_pwr = Value + TotalDeltaPower; if (t_pwr < _lowbound) Value = _lowbound; else if (t_pwr > _upbound) Value = _upbound; else Value = t_pwr; } /* Fill new value into the corresponding MAC offset */ pTxPwrEntry->RegisterValue = (reg_val & ~(0x0000000F << j*4)) | (Value << j*4); } RTMP_IO_WRITE32(pAd, pTxPwrEntry->MACRegisterOffset, pTxPwrEntry->RegisterValue); } } #else /* specific for RT3352 */ /*****************************************************************************/ /* Set new Tx power for different Tx rates */ for (i=0; i < CfgOfTxPwrCtrlOverMAC.NumOfEntries; i++) { if (CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[i].RegisterValue != 0xffffffff) { for (j=0; j<8; j++) { Value = (CHAR)((CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[i].RegisterValue >> j*4) & 0x0F); #ifdef RTMP_INTERNAL_TX_ALC #ifdef RT3352 /* Tx power adjustment over MAC */ if (IS_RT3352(pAd) && (pAd->TxPowerCtrl.bInternalTxALC == TRUE)) { if (j & 0x00000001) /* j=1, 3, 5, 7 */ { /* TX1 ALC */ Value2 = (CHAR)((CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[i].RegisterValue >> j*4) & 0x0F); /* 0 ~ 15 */ bTX1 = TRUE; } else /* j=0, 2, 4, 6 */ { /* TX0 ALC */ Value = (CHAR)((CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[i].RegisterValue >> j*4) & 0x0F); /* 0 ~ 15 */ bTX1 = FALSE; } } #endif /* RT3352 */ /* The upper bounds of the MAC 0x1314~0x1324 are variable when the STA uses the internal Tx ALC.*/ if ((pAd->TxPowerCtrl.bInternalTxALC == TRUE) && (pAd->Mlme.OneSecPeriodicRound % 4 == 0)) { switch (TX_PWR_CFG_0 + (i * 4)) { case TX_PWR_CFG_0: { if (bTX1 == FALSE) { if ((Value + TotalDeltaPower) < 0) { Value = 0; } else if ((Value + TotalDeltaPower) > 0xE) { Value = 0xE; } else { Value += TotalDeltaPower; } } #ifdef RT3352 /* Tx power adjustment over MAC */ if (IS_RT3352(pAd) && (bTX1 == TRUE)) { /* TX1 ALC */ if ((Value2 + TotalDeltaPower2) < 0) { Value2 = 0; } else if ((Value2 + TotalDeltaPower2) > 0xE) { Value2 = 0xE; } else { Value2 += TotalDeltaPower2; } } #endif /* RT3352 */ } break; case TX_PWR_CFG_1: { if (bTX1 == FALSE) { if ((j >= 0) && (j <= 3)) { if ((Value + TotalDeltaPower) < 0) { Value = 0; } else if ((Value + TotalDeltaPower) > 0xE)/* by HK 2011.04.06 */ { Value = 0xE;/* by HK 2011.04.06 */ } else { Value += TotalDeltaPower; } } else { if ((Value + TotalDeltaPower) < 0) { Value = 0; } else if ((Value + TotalDeltaPower) > 0xE) { Value = 0xE; } else { Value += TotalDeltaPower; } } } #ifdef RT3352 /* Tx power adjustment over MAC */ if (IS_RT3352(pAd) && (bTX1 == TRUE)) { /* TX1 ALC */ if ((j >= 0) && (j <= 3)) { if ((Value2 + TotalDeltaPower2) < 0) { Value2 = 0; } else if ((Value2 + TotalDeltaPower2) > 0xE)/* by HK 2011.04.06 */ { Value2 = 0xE;/* by HK 2011.04.06 */ } else { Value2 += TotalDeltaPower2; } } else { if ((Value2 + TotalDeltaPower2) < 0) { Value2 = 0; } else if ((Value2 + TotalDeltaPower2) > 0xE) { Value2 = 0xE; } else { Value2 += TotalDeltaPower2; } } } #endif /* RT3352 */ } break; case TX_PWR_CFG_2: { if (bTX1 == FALSE) { if ((j == 0) || (j == 2) || (j == 3)) { if ((Value + TotalDeltaPower) < 0) { Value = 0; } else if ((Value + TotalDeltaPower) > 0xE)/* by HK 2011.04.06 */ { Value = 0xE;/* by HK 2011.04.06 */ } else { Value += TotalDeltaPower; } } else { if ((Value + TotalDeltaPower) < 0) { Value = 0; } else if ((Value + TotalDeltaPower) > 0xE) { Value = 0xE; } else { Value += TotalDeltaPower; } } } #ifdef RT3352 /* Tx power adjustment over MAC */ if (IS_RT3352(pAd) && (bTX1 == TRUE)) { /* TX1 ALC */ if ((j == 0) || (j == 2) || (j == 3)) { if ((Value2 + TotalDeltaPower2) < 0) { Value2 = 0; } else if ((Value2 + TotalDeltaPower2) > 0xE)/* by HK 2011.04.06 */ { Value2 = 0xE;/* by HK 2011.04.06 */ } else { Value2 += TotalDeltaPower2; } } else { if ((Value2 + TotalDeltaPower2) < 0) { Value2 = 0; } else if ((Value2 + TotalDeltaPower2) > 0xE) { Value2 = 0xE; } else { Value2 += TotalDeltaPower2; } } } #endif /* RT3352 */ } break; case TX_PWR_CFG_3: { if (bTX1 == FALSE) { if ((j == 0) || (j == 2) || (j == 3) || ((j >= 4) && (j <= 7))) { if ((Value + TotalDeltaPower) < 0) { Value = 0; } else if ((Value + TotalDeltaPower) > 0xE)/* by HK 2011.04.06 */ { Value = 0xE;/* by HK 2011.04.06 */ } else { Value += TotalDeltaPower; } } else { if ((Value + TotalDeltaPower) < 0) { Value = 0; } else if ((Value + TotalDeltaPower) > 0xE) { Value = 0xE; } else { Value += TotalDeltaPower; } } } #ifdef RT3352 /* Tx power adjustment over MAC */ if (IS_RT3352(pAd) && (bTX1 == TRUE)) { /* TX1 ALC */ if ((j == 0) || (j == 2) || (j == 3) || ((j >= 4) && (j <= 7))) { if ((Value2 + TotalDeltaPower2) < 0) { Value2 = 0; } else if ((Value2 + TotalDeltaPower2) > 0xE)/* by HK 2011.04.06 */ { Value2 = 0xE;/* by HK 2011.04.06 */ } else { Value2 += TotalDeltaPower2; } } else { if ((Value2 + TotalDeltaPower2) < 0) { Value2 = 0; } else if ((Value2 + TotalDeltaPower2) > 0xE) { Value2 = 0xE; } else { Value2 += TotalDeltaPower2; } } } #endif /* RT3352 */ } break; case TX_PWR_CFG_4: { if (bTX1 == FALSE) { if ((Value + TotalDeltaPower) < 0) { Value = 0; } else if ((Value + TotalDeltaPower) > 0xE)/* by HK 2011.04.06 */ { Value = 0xE;/* by HK 2011.04.06 */ } else { Value += TotalDeltaPower; } } #ifdef RT3352 /* Tx power adjustment over MAC */ if (IS_RT3352(pAd) && (bTX1 == TRUE)) { /* TX1 ALC */ if ((Value2 + TotalDeltaPower2) < 0) { Value2 = 0; } else if ((Value2 + TotalDeltaPower2) > 0xE)/* by HK 2011.04.06 */ { Value2 = 0xE;/* by HK 2011.04.06 */ } else { Value2 += TotalDeltaPower2; } } #endif /* RT3352 */ } break; default: { /* do nothing*/ DBGPRINT(RT_DEBUG_ERROR, ("%s: unknown register = 0x%X\n", __FUNCTION__, (TX_PWR_CFG_0 + (i * 4)))); } break; } } else #endif /* RTMP_INTERNAL_TX_ALC */ { if ((Value + TotalDeltaPower) < 0) { Value = 0; /* min */ } else if ((Value + TotalDeltaPower) > 0xC) { Value = 0xC; /* max */ } else { Value += TotalDeltaPower; /* temperature compensation */ } } #ifdef RTMP_INTERNAL_TX_ALC #ifdef RT3352 /* fill new value to CSR offset */ /* Tx power adjustment over MAC */ if (IS_RT3352(pAd) && (pAd->TxPowerCtrl.bInternalTxALC == TRUE) && (pAd->Mlme.OneSecPeriodicRound % 4 == 0)) { if (bTX1 == TRUE) /* j=1, 3, 5, 7 */ { /* TX1 ALC */ CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[i].RegisterValue = (CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[i].RegisterValue & ~(0x0000000F << j*4)) | (Value2 << j*4); } else /* j=0, 2, 4, 6 */ { /* TX0 ALC */ CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[i].RegisterValue = (CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[i].RegisterValue & ~(0x0000000F << j*4)) | (Value << j*4); } } #else CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[i].RegisterValue = (CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[i].RegisterValue & ~(0x0000000F << j*4)) | (Value << j*4); #endif /* RT3352 */ else #endif /* RTMP_INTERNAL_TX_ALC */ { /* TX0 ALC only */ CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[i].RegisterValue = (CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[i].RegisterValue & ~(0x0000000F << j*4)) | (Value << j*4); } } /* write tx power value to CSR */ /* TX_PWR_CFG_0 (8 tx rate) for TX power for OFDM 12M/18M TX power for OFDM 6M/9M TX power for CCK5.5M/11M TX power for CCK1M/2M */ /* TX_PWR_CFG_1 ~ TX_PWR_CFG_4 */ { /* RTMP_IO_WRITE32(pAd, TX_PWR_CFG_0 + i*4, TxPwr[i]);*/ #ifdef RTMP_INTERNAL_TX_ALC if ((pAd->TxPowerCtrl.bInternalTxALC == TRUE) && (pAd->Mlme.OneSecPeriodicRound % 4 == 0)) #endif /* RTMP_INTERNAL_TX_ALC */ RTMP_IO_WRITE32(pAd, CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[i].MACRegisterOffset, CfgOfTxPwrCtrlOverMAC.TxPwrCtrlOverMAC[i].RegisterValue); } } } /*****************************************************************************/ #endif /* !RT3352 */ /* Extra set MAC registers to compensate Tx power if any */ RTMP_CHIP_ASIC_EXTRA_POWER_OVER_MAC(pAd); } } VOID AsicResetBBPAgent( IN PRTMP_ADAPTER pAd) { BBP_CSR_CFG_STRUC BbpCsr; /* Still need to find why BBP agent keeps busy, but in fact, hardware still function ok. Now clear busy first. */ /* IF chipOps.AsicResetBbpAgent == NULL, run "else" part */ RTMP_CHIP_ASIC_RESET_BBP_AGENT(pAd); else { DBGPRINT(RT_DEBUG_INFO, ("Reset BBP Agent busy bit.!! \n")); RTMP_IO_READ32(pAd, H2M_BBP_AGENT, &BbpCsr.word); BbpCsr.field.Busy = 0; RTMP_IO_WRITE32(pAd, H2M_BBP_AGENT, BbpCsr.word); } } #ifdef CONFIG_STA_SUPPORT /* ========================================================================== Description: put PHY to sleep here, and set next wakeup timer. PHY doesn't not wakeup automatically. Instead, MCU will issue a TwakeUpInterrupt to host after the wakeup timer timeout. Driver has to issue a separate command to wake PHY up. IRQL = DISPATCH_LEVEL ========================================================================== */ VOID AsicSleepThenAutoWakeup( IN PRTMP_ADAPTER pAd, IN USHORT TbttNumToNextWakeUp) { RTMP_STA_SLEEP_THEN_AUTO_WAKEUP(pAd, TbttNumToNextWakeUp); } /* ========================================================================== Description: AsicForceWakeup() is used whenever manual wakeup is required AsicForceSleep() should only be used when not in INFRA BSS. When in INFRA BSS, we should use AsicSleepThenAutoWakeup() instead. ========================================================================== */ VOID AsicForceSleep( IN PRTMP_ADAPTER pAd) { } /* ========================================================================== Description: AsicForceWakeup() is used whenever Twakeup timer (set via AsicSleepThenAutoWakeup) expired. IRQL = PASSIVE_LEVEL IRQL = DISPATCH_LEVEL ========================================================================== */ VOID AsicForceWakeup( IN PRTMP_ADAPTER pAd, IN BOOLEAN bFromTx) { DBGPRINT(RT_DEBUG_INFO, ("--> AsicForceWakeup \n")); RTMP_STA_FORCE_WAKEUP(pAd, bFromTx); } #endif /* CONFIG_STA_SUPPORT */ /* ========================================================================== Description: Set My BSSID IRQL = DISPATCH_LEVEL ========================================================================== */ VOID AsicSetBssid( IN PRTMP_ADAPTER pAd, IN PUCHAR pBssid) { ULONG Addr4; #ifdef P2P_SUPPORT UINT32 regValue; #endif /* P2P_SUPPORT */ DBGPRINT(RT_DEBUG_TRACE, ("==============> AsicSetBssid %x:%x:%x:%x:%x:%x\n", pBssid[0],pBssid[1],pBssid[2],pBssid[3], pBssid[4],pBssid[5])); Addr4 = (ULONG)(pBssid[0]) | (ULONG)(pBssid[1] << 8) | (ULONG)(pBssid[2] << 16) | (ULONG)(pBssid[3] << 24); RTMP_IO_WRITE32(pAd, MAC_BSSID_DW0, Addr4); Addr4 = 0; /* always one BSSID in STA mode*/ Addr4 = (ULONG)(pBssid[4]) | (ULONG)(pBssid[5] << 8); #ifdef P2P_SUPPORT #ifdef P2P_ODD_MAC_ADJUST if ( (pAd->CurrentAddress[5] & 0x01 ) == 0x01 ) { Addr4 |= (1 << 16 ); } #endif /* P2P_ODD_MAC_ADJUST */ #endif /* P2P_SUPPORT */ RTMP_IO_WRITE32(pAd, MAC_BSSID_DW1, Addr4); #ifdef P2P_SUPPORT if (P2P_INF_ON(pAd)) { PUCHAR pP2PBssid = &pAd->CurrentAddress[0]; Addr4 = (ULONG)(pP2PBssid[0]) | (ULONG)(pP2PBssid[1] << 8) | (ULONG)(pP2PBssid[2] << 16) | (ULONG)(pP2PBssid[3] << 24); RTMP_IO_WRITE32(pAd, MAC_BSSID_DW0, Addr4); Addr4 = 0; /* always one BSSID in STA mode */ Addr4 = (ULONG)(pP2PBssid[4]) | (ULONG)(pP2PBssid[5] << 8); RTMP_IO_WRITE32(pAd, MAC_BSSID_DW1, Addr4); RTMP_IO_READ32(pAd, MAC_BSSID_DW1, &regValue); regValue &= 0x0000FFFF; regValue |= (1 << 16); if (pAd->chipCap.MBSSIDMode == MBSSID_MODE0) { if ((pAd->CurrentAddress[5] % 2 != 0) #ifdef P2P_SUPPORT #ifdef P2P_ODD_MAC_ADJUST && FALSE #endif /* P2P_ODD_MAC_ADJUST */ #endif /* P2P_SUPPORT */ ) DBGPRINT(RT_DEBUG_ERROR, ("The 2-BSSID mode is enabled, the BSSID byte5 MUST be the multiple of 2\n")); } else { /*set as 0/1 bit-21 of MAC_BSSID_DW1(offset: 0x1014) to disable/enable the new MAC address assignment. */ regValue |= (1 << 21); } RTMP_IO_WRITE32(pAd, MAC_BSSID_DW1, regValue); } #endif /* P2P_SUPPORT */ } VOID AsicSetMcastWC( IN PRTMP_ADAPTER pAd) { MAC_TABLE_ENTRY *pEntry = &pAd->MacTab.Content[MCAST_WCID]; USHORT offset; pEntry->Sst = SST_ASSOC; pEntry->Aid = MCAST_WCID; /* Softap supports 1 BSSID and use WCID=0 as multicast Wcid index*/ pEntry->PsMode = PWR_ACTIVE; pEntry->CurrTxRate = pAd->CommonCfg.MlmeRate; offset = MAC_WCID_BASE + BSS0Mcast_WCID * HW_WCID_ENTRY_SIZE; } /* ========================================================================== Description: IRQL = DISPATCH_LEVEL ========================================================================== */ VOID AsicDelWcidTab( IN PRTMP_ADAPTER pAd, IN UCHAR Wcid) { ULONG Addr0 = 0x0, Addr1 = 0x0; ULONG offset; DBGPRINT(RT_DEBUG_TRACE, ("AsicDelWcidTab==>Wcid = 0x%x\n",Wcid)); offset = MAC_WCID_BASE + Wcid * HW_WCID_ENTRY_SIZE; RTMP_IO_WRITE32(pAd, offset, Addr0); offset += 4; RTMP_IO_WRITE32(pAd, offset, Addr1); } #ifdef DOT11_N_SUPPORT /* ========================================================================== Description: IRQL = DISPATCH_LEVEL ========================================================================== */ VOID AsicEnableRDG( IN PRTMP_ADAPTER pAd) { TX_LINK_CFG_STRUC TxLinkCfg; UINT32 Data = 0; RTMP_IO_READ32(pAd, TX_LINK_CFG, &TxLinkCfg.word); TxLinkCfg.field.TxRDGEn = 1; RTMP_IO_WRITE32(pAd, TX_LINK_CFG, TxLinkCfg.word); RTMP_IO_READ32(pAd, EDCA_AC0_CFG, &Data); Data &= 0xFFFFFF00; Data |= 0x80; RTMP_IO_WRITE32(pAd, EDCA_AC0_CFG, Data); /*OPSTATUS_CLEAR_FLAG(pAd, fOP_STATUS_AGGREGATION_INUSED);*/ } /* ========================================================================== Description: IRQL = DISPATCH_LEVEL ========================================================================== */ VOID AsicDisableRDG( IN PRTMP_ADAPTER pAd) { TX_LINK_CFG_STRUC TxLinkCfg; UINT32 Data = 0; RTMP_IO_READ32(pAd, TX_LINK_CFG, &TxLinkCfg.word); TxLinkCfg.field.TxRDGEn = 0; RTMP_IO_WRITE32(pAd, TX_LINK_CFG, TxLinkCfg.word); RTMP_IO_READ32(pAd, EDCA_AC0_CFG, &Data); Data &= 0xFFFFFF00; /*Data |= 0x20;*/ #ifndef WIFI_TEST /*if ( pAd->CommonCfg.bEnableTxBurst ) */ /* Data |= 0x60; for performance issue not set the TXOP to 0*/ #endif if (RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_DYNAMIC_BE_TXOP_ACTIVE) #ifdef DOT11_N_SUPPORT && (pAd->MacTab.fAnyStationMIMOPSDynamic == FALSE) #endif /* DOT11_N_SUPPORT */ ) { /* For CWC test, change txop from 0x30 to 0x20 in TxBurst mode*/ if (pAd->CommonCfg.bEnableTxBurst) Data |= 0x20; } RTMP_IO_WRITE32(pAd, EDCA_AC0_CFG, Data); } #endif /* DOT11_N_SUPPORT */ /* ========================================================================== Description: IRQL = PASSIVE_LEVEL IRQL = DISPATCH_LEVEL ========================================================================== */ VOID AsicDisableSync( IN PRTMP_ADAPTER pAd) { BCN_TIME_CFG_STRUC csr; DBGPRINT(RT_DEBUG_TRACE, ("--->Disable TSF synchronization\n")); /* 2003-12-20 disable TSF and TBTT while NIC in power-saving have side effect*/ /* that NIC will never wakes up because TSF stops and no more */ /* TBTT interrupts*/ pAd->TbttTickCount = 0; RTMP_IO_READ32(pAd, BCN_TIME_CFG, &csr.word); csr.field.bBeaconGen = 0; csr.field.bTBTTEnable = 0; csr.field.TsfSyncMode = 0; csr.field.bTsfTicking = 0; RTMP_IO_WRITE32(pAd, BCN_TIME_CFG, csr.word); } /* ========================================================================== Description: IRQL = DISPATCH_LEVEL ========================================================================== */ VOID AsicEnableBssSync( IN PRTMP_ADAPTER pAd) { BCN_TIME_CFG_STRUC csr; DBGPRINT(RT_DEBUG_TRACE, ("--->AsicEnableBssSync(INFRA mode)\n")); RTMP_IO_READ32(pAd, BCN_TIME_CFG, &csr.word); /* RTMP_IO_WRITE32(pAd, BCN_TIME_CFG, 0x00000000);*/ #ifdef CONFIG_AP_SUPPORT IF_DEV_CONFIG_OPMODE_ON_AP(pAd) { csr.field.BeaconInterval = pAd->CommonCfg.BeaconPeriod << 4; /* ASIC register in units of 1/16 TU*/ csr.field.bTsfTicking = 1; csr.field.TsfSyncMode = 3; /* sync TSF similar as in ADHOC mode?*/ csr.field.bBeaconGen = 1; /* AP should generate BEACON*/ csr.field.bTBTTEnable = 1; } #endif /* CONFIG_AP_SUPPORT */ #ifdef CONFIG_STA_SUPPORT IF_DEV_CONFIG_OPMODE_ON_STA(pAd) { csr.field.BeaconInterval = pAd->CommonCfg.BeaconPeriod << 4; /* ASIC register in units of 1/16 TU*/ csr.field.bTsfTicking = 1; csr.field.TsfSyncMode = 1; /* sync TSF in INFRASTRUCTURE mode*/ csr.field.bBeaconGen = 0; /* do NOT generate BEACON*/ csr.field.bTBTTEnable = 1; } #endif /* CONFIG_STA_SUPPORT */ RTMP_IO_WRITE32(pAd, BCN_TIME_CFG, csr.word); } /* ========================================================================== Description: Note: BEACON frame in shared memory should be built ok before this routine can be called. Otherwise, a garbage frame maybe transmitted out every Beacon period. IRQL = DISPATCH_LEVEL ========================================================================== */ VOID AsicEnableIbssSync( IN PRTMP_ADAPTER pAd) { BCN_TIME_CFG_STRUC csr9; PUCHAR ptr; UINT i; ULONG beaconBaseLocation = 0; USHORT beaconLen = (USHORT) pAd->BeaconTxWI.MPDUtotalByteCount; UINT8 TXWISize = pAd->chipCap.TXWISize; UINT32 longptr; #ifdef RT_BIG_ENDIAN TXWI_STRUC localTxWI; NdisMoveMemory((PUCHAR)&localTxWI, (PUCHAR)&pAd->BeaconTxWI, TXWISize); RTMPWIEndianChange(pAd, (PUCHAR)&localTxWI, TYPE_TXWI); beaconLen = (USHORT) localTxWI.MPDUtotalByteCount; #endif /* RT_BIG_ENDIAN */ DBGPRINT(RT_DEBUG_TRACE, ("--->AsicEnableIbssSync(MPDUtotalByteCount=%d, beaconLen=%d)\n", pAd->BeaconTxWI.MPDUtotalByteCount, beaconLen)); DBGPRINT(RT_DEBUG_TRACE, ("--->AsicEnableIbssSync(ADHOC mode. MPDUtotalByteCount = %d)\n", pAd->BeaconTxWI.MPDUtotalByteCount)); RTMP_IO_READ32(pAd, BCN_TIME_CFG, &csr9.word); csr9.field.bBeaconGen = 0; csr9.field.bTBTTEnable = 0; csr9.field.bTsfTicking = 0; RTMP_IO_WRITE32(pAd, BCN_TIME_CFG, csr9.word); beaconBaseLocation = HW_BEACON_BASE0(pAd); #ifdef RTMP_MAC_PCI /* move BEACON TXD and frame content to on-chip memory*/ ptr = (PUCHAR)&pAd->BeaconTxWI; for (i=0; i < TXWISize; i+=4) { longptr = *ptr + (*(ptr+1)<<8) + (*(ptr+2)<<16) + (*(ptr+3)<<24); RTMP_CHIP_UPDATE_BEACON(pAd, HW_BEACON_BASE0(pAd) + i, longptr, 4); ptr += 4; } /* start right after the 16-byte TXWI field*/ ptr = pAd->BeaconBuf; for (i=0; i< beaconLen; i+=4) { longptr = *ptr + (*(ptr+1)<<8) + (*(ptr+2)<<16) + (*(ptr+3)<<24); RTMP_CHIP_UPDATE_BEACON(pAd, HW_BEACON_BASE0(pAd) + TXWISize + i, longptr, 4); ptr +=4; } #endif /* RTMP_MAC_PCI */ /* For Wi-Fi faily generated beacons between participating stations. */ /* Set TBTT phase adaptive adjustment step to 8us (default 16us)*/ /* don't change settings 2006-5- by Jerry*/ /*RTMP_IO_WRITE32(pAd, TBTT_SYNC_CFG, 0x00001010);*/ /* start sending BEACON*/ csr9.field.BeaconInterval = pAd->CommonCfg.BeaconPeriod << 4; /* ASIC register in units of 1/16 TU*/ csr9.field.bTsfTicking = 1; #ifdef IWSC_SUPPORT /* SYNC with nobody If Canon loses our Beacon over 5 seconds, Canon will delete us silently. */ csr9.field.TsfSyncMode = 3; // sync TSF in IBSS mode #else /* IWSC_SUPPORT */ /* (STA ad-hoc mode) Upon the reception of BEACON frame from associated BSS, local TSF is updated with remote TSF only if the remote TSF is greater than local TSF */ csr9.field.TsfSyncMode = 2; /* sync TSF in IBSS mode*/ #endif /* !IWSC_SUPPORT */ csr9.field.bTBTTEnable = 1; csr9.field.bBeaconGen = 1; RTMP_IO_WRITE32(pAd, BCN_TIME_CFG, csr9.word); } /* ========================================================================== Description: IRQL = PASSIVE_LEVEL IRQL = DISPATCH_LEVEL ========================================================================== */ VOID AsicSetEdcaParm( IN PRTMP_ADAPTER pAd, IN PEDCA_PARM pEdcaParm) { EDCA_AC_CFG_STRUC Ac0Cfg, Ac1Cfg, Ac2Cfg, Ac3Cfg; AC_TXOP_CSR0_STRUC csr0; AC_TXOP_CSR1_STRUC csr1; AIFSN_CSR_STRUC AifsnCsr; CWMIN_CSR_STRUC CwminCsr; CWMAX_CSR_STRUC CwmaxCsr; int i; UINT32 MaxWcidNum = MAX_LEN_OF_MAC_TABLE; #ifdef MAC_REPEATER_SUPPORT if (pAd->ApCfg.bMACRepeaterEn) MaxWcidNum = MAX_MAC_TABLE_SIZE_WITH_REPEATER; #endif /* MAC_REPEATER_SUPPORT */ Ac0Cfg.word = 0; Ac1Cfg.word = 0; Ac2Cfg.word = 0; Ac3Cfg.word = 0; if ((pEdcaParm == NULL) || (pEdcaParm->bValid == FALSE)) { DBGPRINT(RT_DEBUG_TRACE,("AsicSetEdcaParm\n")); OPSTATUS_CLEAR_FLAG(pAd, fOP_STATUS_WMM_INUSED); for (i=0; i < MaxWcidNum; i++) { if (IS_ENTRY_CLIENT(&pAd->MacTab.Content[i]) || IS_ENTRY_APCLI(&pAd->MacTab.Content[i])) CLIENT_STATUS_CLEAR_FLAG(&pAd->MacTab.Content[i], fCLIENT_STATUS_WMM_CAPABLE); } /*========================================================*/ /* MAC Register has a copy .*/ /*========================================================*/ /*#ifndef WIFI_TEST*/ if( pAd->CommonCfg.bEnableTxBurst ) { /* For CWC test, change txop from 0x30 to 0x20 in TxBurst mode*/ Ac0Cfg.field.AcTxop = 0x20; /* Suggest by John for TxBurst in HT Mode*/ } else Ac0Cfg.field.AcTxop = 0; /* QID_AC_BE*/ /*#else*/ /* Ac0Cfg.field.AcTxop = 0; QID_AC_BE*/ /*#endif */ Ac0Cfg.field.Cwmin = CW_MIN_IN_BITS; Ac0Cfg.field.Cwmax = CW_MAX_IN_BITS; Ac0Cfg.field.Aifsn = 2; RTMP_IO_WRITE32(pAd, EDCA_AC0_CFG, Ac0Cfg.word); Ac1Cfg.field.AcTxop = 0; /* QID_AC_BK*/ Ac1Cfg.field.Cwmin = CW_MIN_IN_BITS; Ac1Cfg.field.Cwmax = CW_MAX_IN_BITS; Ac1Cfg.field.Aifsn = 2; RTMP_IO_WRITE32(pAd, EDCA_AC1_CFG, Ac1Cfg.word); if (pAd->CommonCfg.PhyMode == PHY_11B) { Ac2Cfg.field.AcTxop = 192; /* AC_VI: 192*32us ~= 6ms*/ Ac3Cfg.field.AcTxop = 96; /* AC_VO: 96*32us ~= 3ms*/ } else { Ac2Cfg.field.AcTxop = 96; /* AC_VI: 96*32us ~= 3ms*/ Ac3Cfg.field.AcTxop = 48; /* AC_VO: 48*32us ~= 1.5ms*/ } Ac2Cfg.field.Cwmin = CW_MIN_IN_BITS; Ac2Cfg.field.Cwmax = CW_MAX_IN_BITS; Ac2Cfg.field.Aifsn = 2; RTMP_IO_WRITE32(pAd, EDCA_AC2_CFG, Ac2Cfg.word); Ac3Cfg.field.Cwmin = CW_MIN_IN_BITS; Ac3Cfg.field.Cwmax = CW_MAX_IN_BITS; Ac3Cfg.field.Aifsn = 2; RTMP_IO_WRITE32(pAd, EDCA_AC3_CFG, Ac3Cfg.word); /*========================================================*/ /* DMA Register has a copy too.*/ /*========================================================*/ csr0.field.Ac0Txop = 0; /* QID_AC_BE*/ csr0.field.Ac1Txop = 0; /* QID_AC_BK*/ RTMP_IO_WRITE32(pAd, WMM_TXOP0_CFG, csr0.word); if (pAd->CommonCfg.PhyMode == PHY_11B) { csr1.field.Ac2Txop = 192; /* AC_VI: 192*32us ~= 6ms*/ csr1.field.Ac3Txop = 96; /* AC_VO: 96*32us ~= 3ms*/ } else { csr1.field.Ac2Txop = 96; /* AC_VI: 96*32us ~= 3ms*/ csr1.field.Ac3Txop = 48; /* AC_VO: 48*32us ~= 1.5ms*/ } RTMP_IO_WRITE32(pAd, WMM_TXOP1_CFG, csr1.word); CwminCsr.word = 0; CwminCsr.field.Cwmin0 = CW_MIN_IN_BITS; CwminCsr.field.Cwmin1 = CW_MIN_IN_BITS; CwminCsr.field.Cwmin2 = CW_MIN_IN_BITS; CwminCsr.field.Cwmin3 = CW_MIN_IN_BITS; RTMP_IO_WRITE32(pAd, WMM_CWMIN_CFG, CwminCsr.word); CwmaxCsr.word = 0; CwmaxCsr.field.Cwmax0 = CW_MAX_IN_BITS; CwmaxCsr.field.Cwmax1 = CW_MAX_IN_BITS; CwmaxCsr.field.Cwmax2 = CW_MAX_IN_BITS; CwmaxCsr.field.Cwmax3 = CW_MAX_IN_BITS; RTMP_IO_WRITE32(pAd, WMM_CWMAX_CFG, CwmaxCsr.word); RTMP_IO_WRITE32(pAd, WMM_AIFSN_CFG, 0x00002222); NdisZeroMemory(&pAd->CommonCfg.APEdcaParm, sizeof(EDCA_PARM)); } else { OPSTATUS_SET_FLAG(pAd, fOP_STATUS_WMM_INUSED); /*========================================================*/ /* MAC Register has a copy.*/ /*========================================================*/ /* Modify Cwmin/Cwmax/Txop on queue[QID_AC_VI], Recommend by Jerry 2005/07/27*/ /* To degrade our VIDO Queue's throughput for WiFi WMM S3T07 Issue.*/ /*pEdcaParm->Txop[QID_AC_VI] = pEdcaParm->Txop[QID_AC_VI] * 7 / 10; rt2860c need this */ Ac0Cfg.field.AcTxop = pEdcaParm->Txop[QID_AC_BE]; Ac0Cfg.field.Cwmin= pEdcaParm->Cwmin[QID_AC_BE]; Ac0Cfg.field.Cwmax = pEdcaParm->Cwmax[QID_AC_BE]; Ac0Cfg.field.Aifsn = pEdcaParm->Aifsn[QID_AC_BE]; /*+1;*/ Ac1Cfg.field.AcTxop = pEdcaParm->Txop[QID_AC_BK]; Ac1Cfg.field.Cwmin = pEdcaParm->Cwmin[QID_AC_BK]; /*+2; */ Ac1Cfg.field.Cwmax = pEdcaParm->Cwmax[QID_AC_BK]; Ac1Cfg.field.Aifsn = pEdcaParm->Aifsn[QID_AC_BK]; /*+1;*/ Ac2Cfg.field.AcTxop = (pEdcaParm->Txop[QID_AC_VI] * 6) / 10; #ifdef RTMP_RBUS_SUPPORT if(pAd->Antenna.field.TxPath == 1) { Ac2Cfg.field.Cwmin = pEdcaParm->Cwmin[QID_AC_VI] + 1; Ac2Cfg.field.Cwmax = pEdcaParm->Cwmax[QID_AC_VI] + 1; } else #endif { Ac2Cfg.field.Cwmin = pEdcaParm->Cwmin[QID_AC_VI]; Ac2Cfg.field.Cwmax = pEdcaParm->Cwmax[QID_AC_VI]; } /*sync with window 20110524*/ Ac2Cfg.field.Aifsn = pEdcaParm->Aifsn[QID_AC_VI] + 1; /* 5.2.27 T6 Pass Tx VI+BE, but will impack 5.2.27/28 T7. Tx VI*/ #ifdef INF_AMAZON_SE #ifdef CONFIG_AP_SUPPORT IF_DEV_CONFIG_OPMODE_ON_AP(pAd) Ac2Cfg.field.Aifsn = 0x3; /*for WiFi WMM A1-T07.*/ #endif /* CONFIG_AP_SUPPORT */ #endif /* INF_AMAZON_SE */ #ifdef CONFIG_STA_SUPPORT IF_DEV_CONFIG_OPMODE_ON_STA(pAd) { /* Tuning for Wi-Fi WMM S06*/ if (pAd->CommonCfg.bWiFiTest && pEdcaParm->Aifsn[QID_AC_VI] == 10) Ac2Cfg.field.Aifsn -= 1; /* Tuning for TGn Wi-Fi 5.2.32*/ /* STA TestBed changes in this item: conexant legacy sta ==> broadcom 11n sta*/ if (STA_TGN_WIFI_ON(pAd) && pEdcaParm->Aifsn[QID_AC_VI] == 10) { Ac0Cfg.field.Aifsn = 3; Ac2Cfg.field.AcTxop = 5; } } #endif /* CONFIG_STA_SUPPORT */ Ac3Cfg.field.AcTxop = pEdcaParm->Txop[QID_AC_VO]; Ac3Cfg.field.Cwmin = pEdcaParm->Cwmin[QID_AC_VO]; Ac3Cfg.field.Cwmax = pEdcaParm->Cwmax[QID_AC_VO]; Ac3Cfg.field.Aifsn = pEdcaParm->Aifsn[QID_AC_VO]; /*#ifdef WIFI_TEST*/ if (pAd->CommonCfg.bWiFiTest) { if (Ac3Cfg.field.AcTxop == 102) { Ac0Cfg.field.AcTxop = pEdcaParm->Txop[QID_AC_BE] ? pEdcaParm->Txop[QID_AC_BE] : 10; Ac0Cfg.field.Aifsn = pEdcaParm->Aifsn[QID_AC_BE]-1; /* AIFSN must >= 1 */ Ac1Cfg.field.AcTxop = pEdcaParm->Txop[QID_AC_BK]; Ac1Cfg.field.Aifsn = pEdcaParm->Aifsn[QID_AC_BK]; Ac2Cfg.field.AcTxop = pEdcaParm->Txop[QID_AC_VI]; } /* End of if */ } /*#endif WIFI_TEST */ #ifdef CONFIG_STA_SUPPORT #ifdef RTMP_MAC_PCI /* STA TestBed changes in this item: for sta wifitest 5.2.32, 2011/04/11 */ /* just for 5390 5392 pci, 5370 5372 not need this patch */ if((IS_RT5390(pAd) || IS_RT5392(pAd)) && pEdcaParm->Aifsn[QID_AC_VI] == 10) { Ac0Cfg.field.AcTxop = 38; } #endif /* RTMP_MAC_PCI */ #endif /* CONFIG_STA_SUPPORT */ RTMP_IO_WRITE32(pAd, EDCA_AC0_CFG, Ac0Cfg.word); RTMP_IO_WRITE32(pAd, EDCA_AC1_CFG, Ac1Cfg.word); RTMP_IO_WRITE32(pAd, EDCA_AC2_CFG, Ac2Cfg.word); RTMP_IO_WRITE32(pAd, EDCA_AC3_CFG, Ac3Cfg.word); /*========================================================*/ /* DMA Register has a copy too.*/ /*========================================================*/ csr0.field.Ac0Txop = Ac0Cfg.field.AcTxop; csr0.field.Ac1Txop = Ac1Cfg.field.AcTxop; RTMP_IO_WRITE32(pAd, WMM_TXOP0_CFG, csr0.word); csr1.field.Ac2Txop = Ac2Cfg.field.AcTxop; csr1.field.Ac3Txop = Ac3Cfg.field.AcTxop; RTMP_IO_WRITE32(pAd, WMM_TXOP1_CFG, csr1.word); CwminCsr.word = 0; CwminCsr.field.Cwmin0 = pEdcaParm->Cwmin[QID_AC_BE]; CwminCsr.field.Cwmin1 = pEdcaParm->Cwmin[QID_AC_BK]; CwminCsr.field.Cwmin2 = pEdcaParm->Cwmin[QID_AC_VI]; #ifdef CONFIG_AP_SUPPORT IF_DEV_CONFIG_OPMODE_ON_AP(pAd) CwminCsr.field.Cwmin3 = pEdcaParm->Cwmin[QID_AC_VO]; #endif /* CONFIG_AP_SUPPORT */ #ifdef CONFIG_STA_SUPPORT IF_DEV_CONFIG_OPMODE_ON_STA(pAd) CwminCsr.field.Cwmin3 = pEdcaParm->Cwmin[QID_AC_VO] - 1; /*for TGn wifi test*/ #endif /* CONFIG_STA_SUPPORT */ RTMP_IO_WRITE32(pAd, WMM_CWMIN_CFG, CwminCsr.word); CwmaxCsr.word = 0; CwmaxCsr.field.Cwmax0 = pEdcaParm->Cwmax[QID_AC_BE]; CwmaxCsr.field.Cwmax1 = pEdcaParm->Cwmax[QID_AC_BK]; CwmaxCsr.field.Cwmax2 = pEdcaParm->Cwmax[QID_AC_VI]; CwmaxCsr.field.Cwmax3 = pEdcaParm->Cwmax[QID_AC_VO]; RTMP_IO_WRITE32(pAd, WMM_CWMAX_CFG, CwmaxCsr.word); AifsnCsr.word = 0; AifsnCsr.field.Aifsn0 = Ac0Cfg.field.Aifsn; /*pEdcaParm->Aifsn[QID_AC_BE];*/ AifsnCsr.field.Aifsn1 = Ac1Cfg.field.Aifsn; /*pEdcaParm->Aifsn[QID_AC_BK];*/ #ifdef CONFIG_STA_SUPPORT #endif /* CONFIG_STA_SUPPORT */ AifsnCsr.field.Aifsn2 = Ac2Cfg.field.Aifsn; /*pEdcaParm->Aifsn[QID_AC_VI];*/ #ifdef INF_AMAZON_SE #ifdef CONFIG_AP_SUPPORT IF_DEV_CONFIG_OPMODE_ON_AP(pAd) { AifsnCsr.field.Aifsn3 = Ac3Cfg.field.Aifsn; /*pEdcaParm->Aifsn[QID_AC_VO]*/ AifsnCsr.field.Aifsn2 = 0x2; /*pEdcaParm->Aifsn[QID_AC_VI]; for WiFi WMM A1-T07.*/ } #endif /* CONFIG_AP_SUPPORT */ #endif /* INF_AMAZON_SE */ #ifdef CONFIG_STA_SUPPORT IF_DEV_CONFIG_OPMODE_ON_STA(pAd) { /* Tuning for Wi-Fi WMM S06*/ if (pAd->CommonCfg.bWiFiTest && pEdcaParm->Aifsn[QID_AC_VI] == 10) AifsnCsr.field.Aifsn2 = Ac2Cfg.field.Aifsn - 4; /* Tuning for TGn Wi-Fi 5.2.32*/ /* STA TestBed changes in this item: connexant legacy sta ==> broadcom 11n sta*/ if (STA_TGN_WIFI_ON(pAd) && pEdcaParm->Aifsn[QID_AC_VI] == 10) { AifsnCsr.field.Aifsn0 = 3; AifsnCsr.field.Aifsn2 = 7; } if (INFRA_ON(pAd)) CLIENT_STATUS_SET_FLAG(&pAd->MacTab.Content[BSSID_WCID], fCLIENT_STATUS_WMM_CAPABLE); } #endif /* CONFIG_STA_SUPPORT */ #ifdef CONFIG_AP_SUPPORT IF_DEV_CONFIG_OPMODE_ON_AP(pAd) AifsnCsr.field.Aifsn3 = Ac3Cfg.field.Aifsn; /*pEdcaParm->Aifsn[QID_AC_VO]*/ #endif /* CONFIG_AP_SUPPORT */ #ifdef CONFIG_STA_SUPPORT IF_DEV_CONFIG_OPMODE_ON_STA(pAd) { AifsnCsr.field.Aifsn3 = Ac3Cfg.field.Aifsn - 1; /*pEdcaParm->Aifsn[QID_AC_VO]; for TGn wifi test*/ } #endif /* CONFIG_STA_SUPPORT */ RTMP_IO_WRITE32(pAd, WMM_AIFSN_CFG, AifsnCsr.word); NdisMoveMemory(&pAd->CommonCfg.APEdcaParm, pEdcaParm, sizeof(EDCA_PARM)); if (!ADHOC_ON(pAd)) { DBGPRINT(RT_DEBUG_TRACE,("EDCA [#%d]: AIFSN CWmin CWmax TXOP(us) ACM\n", pEdcaParm->EdcaUpdateCount)); DBGPRINT(RT_DEBUG_TRACE,(" AC_BE %2d %2d %2d %4d %d\n", pEdcaParm->Aifsn[0], pEdcaParm->Cwmin[0], pEdcaParm->Cwmax[0], pEdcaParm->Txop[0]<<5, pEdcaParm->bACM[0])); DBGPRINT(RT_DEBUG_TRACE,(" AC_BK %2d %2d %2d %4d %d\n", pEdcaParm->Aifsn[1], pEdcaParm->Cwmin[1], pEdcaParm->Cwmax[1], pEdcaParm->Txop[1]<<5, pEdcaParm->bACM[1])); DBGPRINT(RT_DEBUG_TRACE,(" AC_VI %2d %2d %2d %4d %d\n", pEdcaParm->Aifsn[2], pEdcaParm->Cwmin[2], pEdcaParm->Cwmax[2], pEdcaParm->Txop[2]<<5, pEdcaParm->bACM[2])); DBGPRINT(RT_DEBUG_TRACE,(" AC_VO %2d %2d %2d %4d %d\n", pEdcaParm->Aifsn[3], pEdcaParm->Cwmin[3], pEdcaParm->Cwmax[3], pEdcaParm->Txop[3]<<5, pEdcaParm->bACM[3])); } } pAd->CommonCfg.RestoreBurstMode = Ac0Cfg.word; } /* ========================================================================== Description: IRQL = PASSIVE_LEVEL IRQL = DISPATCH_LEVEL ========================================================================== */ VOID AsicSetSlotTime( IN PRTMP_ADAPTER pAd, IN BOOLEAN bUseShortSlotTime) { ULONG SlotTime; UINT32 RegValue = 0; #ifdef CONFIG_STA_SUPPORT if (pAd->CommonCfg.Channel > 14) bUseShortSlotTime = TRUE; #endif /* CONFIG_STA_SUPPORT */ if (bUseShortSlotTime && OPSTATUS_TEST_FLAG(pAd, fOP_STATUS_SHORT_SLOT_INUSED)) return; else if ((!bUseShortSlotTime) && (!OPSTATUS_TEST_FLAG(pAd, fOP_STATUS_SHORT_SLOT_INUSED))) return; if (bUseShortSlotTime) OPSTATUS_SET_FLAG(pAd, fOP_STATUS_SHORT_SLOT_INUSED); else OPSTATUS_CLEAR_FLAG(pAd, fOP_STATUS_SHORT_SLOT_INUSED); SlotTime = (bUseShortSlotTime)? 9 : 20; #ifdef CONFIG_STA_SUPPORT IF_DEV_CONFIG_OPMODE_ON_STA(pAd) { /* force using short SLOT time for FAE to demo performance when TxBurst is ON*/ if (((pAd->StaActive.SupportedPhyInfo.bHtEnable == FALSE) && (OPSTATUS_TEST_FLAG(pAd, fOP_STATUS_WMM_INUSED))) #ifdef DOT11_N_SUPPORT || ((pAd->StaActive.SupportedPhyInfo.bHtEnable == TRUE) && (pAd->CommonCfg.BACapability.field.Policy == BA_NOTUSE)) #endif /* DOT11_N_SUPPORT */ ) { /* In this case, we will think it is doing Wi-Fi test*/ /* And we will not set to short slot when bEnableTxBurst is TRUE.*/ } else if (pAd->CommonCfg.bEnableTxBurst) { OPSTATUS_SET_FLAG(pAd, fOP_STATUS_SHORT_SLOT_INUSED); SlotTime = 9; } } #endif /* CONFIG_STA_SUPPORT */ /* For some reasons, always set it to short slot time.*/ /* ToDo: Should consider capability with 11B*/ #ifdef CONFIG_STA_SUPPORT IF_DEV_CONFIG_OPMODE_ON_STA(pAd) { if (pAd->StaCfg.BssType == BSS_ADHOC) { OPSTATUS_CLEAR_FLAG(pAd, fOP_STATUS_SHORT_SLOT_INUSED); SlotTime = 20; } } #endif /* CONFIG_STA_SUPPORT */ RTMP_IO_READ32(pAd, BKOFF_SLOT_CFG, &RegValue); RegValue = RegValue & 0xFFFFFF00; RegValue |= SlotTime; RTMP_IO_WRITE32(pAd, BKOFF_SLOT_CFG, RegValue); } /* ======================================================================== Description: Add Shared key information into ASIC. Update shared key, TxMic and RxMic to Asic Shared key table Update its cipherAlg to Asic Shared key Mode. Return: ======================================================================== */ VOID AsicAddSharedKeyEntry( IN PRTMP_ADAPTER pAd, IN UCHAR BssIndex, IN UCHAR KeyIdx, IN PCIPHER_KEY pCipherKey) { ULONG offset; /*, csr0;*/ SHAREDKEY_MODE_STRUC csr1; UINT16 SharedKeyTableBase, SharedKeyModeBase; #ifdef RTMP_MAC_PCI INT i; #endif /* RTMP_MAC_PCI */ PUCHAR pKey = pCipherKey->Key; PUCHAR pTxMic = pCipherKey->TxMic; PUCHAR pRxMic = pCipherKey->RxMic; UCHAR CipherAlg = pCipherKey->CipherAlg; DBGPRINT(RT_DEBUG_TRACE, ("AsicAddSharedKeyEntry BssIndex=%d, KeyIdx=%d\n", BssIndex,KeyIdx)); /*============================================================================================*/ DBGPRINT(RT_DEBUG_TRACE,("AsicAddSharedKeyEntry: %s key #%d\n", CipherName[CipherAlg], BssIndex*4 + KeyIdx)); DBGPRINT_RAW(RT_DEBUG_TRACE, (" Key = %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n", pKey[0],pKey[1],pKey[2],pKey[3],pKey[4],pKey[5],pKey[6],pKey[7],pKey[8],pKey[9],pKey[10],pKey[11],pKey[12],pKey[13],pKey[14],pKey[15])); if (pRxMic) { DBGPRINT_RAW(RT_DEBUG_TRACE, (" Rx MIC Key = %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n", pRxMic[0],pRxMic[1],pRxMic[2],pRxMic[3],pRxMic[4],pRxMic[5],pRxMic[6],pRxMic[7])); } if (pTxMic) { DBGPRINT_RAW(RT_DEBUG_TRACE, (" Tx MIC Key = %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n", pTxMic[0],pTxMic[1],pTxMic[2],pTxMic[3],pTxMic[4],pTxMic[5],pTxMic[6],pTxMic[7])); } /*============================================================================================*/ /* fill key material - key + TX MIC + RX MIC*/ if (BssIndex >= 8) { SharedKeyTableBase = SHARED_KEY_TABLE_BASE_EXT; SharedKeyModeBase = SHARED_KEY_MODE_BASE_EXT; BssIndex -= 8; } else { SharedKeyTableBase = SHARED_KEY_TABLE_BASE; SharedKeyModeBase = SHARED_KEY_MODE_BASE; } #ifdef RTMP_MAC_PCI offset = SharedKeyTableBase + (4*BssIndex + KeyIdx)*HW_KEY_ENTRY_SIZE; for (i=0; i<MAX_LEN_OF_SHARE_KEY; i++) { RTMP_IO_WRITE8(pAd, offset + i, pKey[i]); } offset += MAX_LEN_OF_SHARE_KEY; if (pTxMic) { for (i=0; i<8; i++) { RTMP_IO_WRITE8(pAd, offset + i, pTxMic[i]); } } offset += 8; if (pRxMic) { for (i=0; i<8; i++) { RTMP_IO_WRITE8(pAd, offset + i, pRxMic[i]); } } #endif /* RTMP_MAC_PCI */ /* Update cipher algorithm. WSTA always use BSS0*/ RTMP_IO_READ32(pAd, SharedKeyModeBase + 4*(BssIndex/2), &csr1.word); DBGPRINT(RT_DEBUG_TRACE,("Read: SHARED_KEY_MODE_BASE at this Bss[%d] KeyIdx[%d]= 0x%x \n", BssIndex,KeyIdx, csr1.word)); if ((BssIndex%2) == 0) { if (KeyIdx == 0) csr1.field.Bss0Key0CipherAlg = CipherAlg; else if (KeyIdx == 1) csr1.field.Bss0Key1CipherAlg = CipherAlg; else if (KeyIdx == 2) csr1.field.Bss0Key2CipherAlg = CipherAlg; else csr1.field.Bss0Key3CipherAlg = CipherAlg; } else { if (KeyIdx == 0) csr1.field.Bss1Key0CipherAlg = CipherAlg; else if (KeyIdx == 1) csr1.field.Bss1Key1CipherAlg = CipherAlg; else if (KeyIdx == 2) csr1.field.Bss1Key2CipherAlg = CipherAlg; else csr1.field.Bss1Key3CipherAlg = CipherAlg; } DBGPRINT(RT_DEBUG_TRACE,("Write: SHARED_KEY_MODE_BASE at this Bss[%d] = 0x%x \n", BssIndex, csr1.word)); RTMP_IO_WRITE32(pAd, SharedKeyModeBase+4*(BssIndex/2), csr1.word); } /* IRQL = DISPATCH_LEVEL*/ VOID AsicRemoveSharedKeyEntry( IN PRTMP_ADAPTER pAd, IN UCHAR BssIndex, IN UCHAR KeyIdx) { /*ULONG SecCsr0;*/ SHAREDKEY_MODE_STRUC csr1; UINT16 SharedKeyModeBase; DBGPRINT(RT_DEBUG_TRACE,("AsicRemoveSharedKeyEntry: #%d \n", BssIndex*4 + KeyIdx)); if (BssIndex >= 8) { SharedKeyModeBase = SHARED_KEY_MODE_BASE_EXT; BssIndex -= 8; } else { SharedKeyModeBase = SHARED_KEY_MODE_BASE; } RTMP_IO_READ32(pAd, SharedKeyModeBase+4*(BssIndex/2), &csr1.word); if ((BssIndex%2) == 0) { if (KeyIdx == 0) csr1.field.Bss0Key0CipherAlg = 0; else if (KeyIdx == 1) csr1.field.Bss0Key1CipherAlg = 0; else if (KeyIdx == 2) csr1.field.Bss0Key2CipherAlg = 0; else csr1.field.Bss0Key3CipherAlg = 0; } else { if (KeyIdx == 0) csr1.field.Bss1Key0CipherAlg = 0; else if (KeyIdx == 1) csr1.field.Bss1Key1CipherAlg = 0; else if (KeyIdx == 2) csr1.field.Bss1Key2CipherAlg = 0; else csr1.field.Bss1Key3CipherAlg = 0; } DBGPRINT(RT_DEBUG_TRACE,("Write: SHARED_KEY_MODE_BASE at this Bss[%d] = 0x%x \n", BssIndex, csr1.word)); RTMP_IO_WRITE32(pAd, SharedKeyModeBase+4*(BssIndex/2), csr1.word); ASSERT(BssIndex < 8); ASSERT(KeyIdx < 4); } VOID AsicUpdateWCIDIVEIV( IN PRTMP_ADAPTER pAd, IN USHORT WCID, IN ULONG uIV, IN ULONG uEIV) { ULONG offset; offset = MAC_IVEIV_TABLE_BASE + (WCID * HW_IVEIV_ENTRY_SIZE); RTMP_IO_WRITE32(pAd, offset, uIV); RTMP_IO_WRITE32(pAd, offset + 4, uEIV); DBGPRINT(RT_DEBUG_TRACE, ("%s: wcid(%d) 0x%08lx, 0x%08lx \n", __FUNCTION__, WCID, uIV, uEIV)); } VOID AsicUpdateRxWCIDTable( IN PRTMP_ADAPTER pAd, IN USHORT WCID, IN PUCHAR pAddr) { ULONG offset; ULONG Addr; offset = MAC_WCID_BASE + (WCID * HW_WCID_ENTRY_SIZE); Addr = pAddr[0] + (pAddr[1] << 8) +(pAddr[2] << 16) +(pAddr[3] << 24); RTMP_IO_WRITE32(pAd, offset, Addr); Addr = pAddr[4] + (pAddr[5] << 8); RTMP_IO_WRITE32(pAd, offset + 4, Addr); } /* ======================================================================== Description: Add Client security information into ASIC WCID table and IVEIV table. Return: Note : The key table selection rule : 1. Wds-links and Mesh-links always use Pair-wise key table. 2. When the CipherAlg is TKIP, AES, SMS4 or the dynamic WEP is enabled, it needs to set key into Pair-wise Key Table. 3. The pair-wise key security mode is set NONE, it means as no security. 4. In STA Adhoc mode, it always use shared key table. 5. Otherwise, use shared key table ======================================================================== */ VOID AsicUpdateWcidAttributeEntry( IN PRTMP_ADAPTER pAd, IN UCHAR BssIdx, IN UCHAR KeyIdx, IN UCHAR CipherAlg, IN UINT8 Wcid, IN UINT8 KeyTabFlag) { WCID_ATTRIBUTE_STRUC WCIDAttri; USHORT offset; /* Initialize the content of WCID Attribue */ WCIDAttri.word = 0; /* The limitation of HW WCID table */ if (/*Wcid < 1 ||*/ Wcid > 254) { DBGPRINT(RT_DEBUG_WARN, ("%s: Wcid is invalid (%d). \n", __FUNCTION__, Wcid)); return; } /* Update the pairwise key security mode. Use bit10 and bit3~1 to indicate the pairwise cipher mode */ WCIDAttri.field.PairKeyModeExt = ((CipherAlg & 0x08) >> 3); WCIDAttri.field.PairKeyMode = (CipherAlg & 0x07); /* Update the MBSS index. Use bit11 and bit6~4 to indicate the BSS index */ WCIDAttri.field.BSSIdxExt = ((BssIdx & 0x08) >> 3); WCIDAttri.field.BSSIdx = (BssIdx & 0x07); #ifdef WAPI_SUPPORT /* Update WAPI related information */ if (CipherAlg == CIPHER_SMS4) { if (KeyTabFlag == SHAREDKEYTABLE) WCIDAttri.field.WAPI_MCBC = 1; WCIDAttri.field.WAPIKeyIdx = ((KeyIdx == 0) ? 0 : 1); } #endif /* WAPI_SUPPORT */ /* Assign Key Table selection */ WCIDAttri.field.KeyTab = KeyTabFlag; /* Update related information to ASIC */ offset = MAC_WCID_ATTRIBUTE_BASE + (Wcid * HW_WCID_ATTRI_SIZE); RTMP_IO_WRITE32(pAd, offset, WCIDAttri.word); DBGPRINT(RT_DEBUG_TRACE, ("%s : WCID #%d, KeyIndex #%d, Alg=%s\n", __FUNCTION__, Wcid, KeyIdx, CipherName[CipherAlg])); DBGPRINT(RT_DEBUG_TRACE, (" WCIDAttri = 0x%x \n", WCIDAttri.word)); } /* ======================================================================== Description: Add Pair-wise key material into ASIC. Update pairwise key, TxMic and RxMic to Asic Pair-wise key table Return: ======================================================================== */ VOID AsicAddPairwiseKeyEntry( IN PRTMP_ADAPTER pAd, IN UCHAR WCID, IN PCIPHER_KEY pCipherKey) { INT i; ULONG offset; PUCHAR pKey = pCipherKey->Key; PUCHAR pTxMic = pCipherKey->TxMic; PUCHAR pRxMic = pCipherKey->RxMic; UCHAR CipherAlg = pCipherKey->CipherAlg; #ifdef RTMP_MAC_PCI #ifdef SPECIFIC_BCN_BUF_SUPPORT unsigned long irqFlag = 0; #endif /* SPECIFIC_BCN_BUF_SUPPORT */ #endif /* RTMP_MAC_PCI */ #ifdef RTMP_MAC_PCI #ifdef SPECIFIC_BCN_BUF_SUPPORT RTMP_MAC_SHR_MSEL_LOCK(pAd, LOWER_SHRMEM, irqFlag); #endif /* SPECIFIC_BCN_BUF_SUPPORT */ #endif /* RTMP_MAC_PCI */ /* EKEY*/ offset = PAIRWISE_KEY_TABLE_BASE + (WCID * HW_KEY_ENTRY_SIZE); #ifdef RTMP_MAC_PCI for (i=0; i<MAX_LEN_OF_PEER_KEY; i++) { RTMP_IO_WRITE8(pAd, offset + i, pKey[i]); } #endif /* RTMP_MAC_PCI */ for (i=0; i<MAX_LEN_OF_PEER_KEY; i+=4) { UINT32 Value; RTMP_IO_READ32(pAd, offset + i, &Value); } offset += MAX_LEN_OF_PEER_KEY; /* MIC KEY*/ if (pTxMic) { #ifdef RTMP_MAC_PCI for (i=0; i<8; i++) { RTMP_IO_WRITE8(pAd, offset+i, pTxMic[i]); } #endif /* RTMP_MAC_PCI */ } offset += 8; if (pRxMic) { #ifdef RTMP_MAC_PCI for (i=0; i<8; i++) { RTMP_IO_WRITE8(pAd, offset+i, pRxMic[i]); } #endif /* RTMP_MAC_PCI */ } #ifdef RTMP_MAC_PCI #ifdef SPECIFIC_BCN_BUF_SUPPORT RTMP_MAC_SHR_MSEL_UNLOCK(pAd, LOWER_SHRMEM, irqFlag); #endif /* SPECIFIC_BCN_BUF_SUPPORT*/ #endif /* RTMP_MAC_PCI */ DBGPRINT(RT_DEBUG_TRACE,("AsicAddPairwiseKeyEntry: WCID #%d Alg=%s\n",WCID, CipherName[CipherAlg])); DBGPRINT(RT_DEBUG_TRACE,(" Key = %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n", pKey[0],pKey[1],pKey[2],pKey[3],pKey[4],pKey[5],pKey[6],pKey[7],pKey[8],pKey[9],pKey[10],pKey[11],pKey[12],pKey[13],pKey[14],pKey[15])); if (pRxMic) { DBGPRINT(RT_DEBUG_TRACE, (" Rx MIC Key = %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n", pRxMic[0],pRxMic[1],pRxMic[2],pRxMic[3],pRxMic[4],pRxMic[5],pRxMic[6],pRxMic[7])); } if (pTxMic) { DBGPRINT(RT_DEBUG_TRACE, (" Tx MIC Key = %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n", pTxMic[0],pTxMic[1],pTxMic[2],pTxMic[3],pTxMic[4],pTxMic[5],pTxMic[6],pTxMic[7])); } } /* ======================================================================== Description: Remove Pair-wise key material from ASIC. Return: ======================================================================== */ VOID AsicRemovePairwiseKeyEntry( IN PRTMP_ADAPTER pAd, IN UCHAR Wcid) { /* Set the specific WCID attribute entry as OPEN-NONE */ AsicUpdateWcidAttributeEntry(pAd, BSS0, 0, CIPHER_NONE, Wcid, PAIRWISEKEYTABLE); DBGPRINT(RT_DEBUG_TRACE, ("%s : Wcid #%d \n", __FUNCTION__, Wcid)); } BOOLEAN AsicSendCommandToMcu( IN RTMP_ADAPTER *pAd, IN UCHAR Command, IN UCHAR Token, IN UCHAR Arg0, IN UCHAR Arg1, IN BOOLEAN in_atomic) { #ifdef RTMP_PCI_SUPPORT if (IS_PCI_INF(pAd)) in_atomic = TRUE; #endif /* RTMP_USB_SUPPORT */ if (pAd->chipOps.sendCommandToMcu) return pAd->chipOps.sendCommandToMcu(pAd, Command, Token, Arg0, Arg1, in_atomic); else return FALSE; } BOOLEAN AsicSendCommandToMcuBBP( IN PRTMP_ADAPTER pAd, IN UCHAR Command, IN UCHAR Token, IN UCHAR Arg0, IN UCHAR Arg1, IN BOOLEAN FlgIsNeedLocked) { if (pAd->chipOps.sendCommandToMcu) return pAd->chipOps.sendCommandToMcu(pAd, Command, Token, Arg0, Arg1, FlgIsNeedLocked); else return FALSE; } /* ======================================================================== Description: For 1x1 chipset : 2070 / 3070 / 3090 / 3370 / 3390 / 5370 / 5390 Usage : 1. Set Default Antenna as initialize 2. Antenna Diversity switching used 3. iwpriv command switch Antenna Return: ======================================================================== */ VOID AsicSetRxAnt( IN PRTMP_ADAPTER pAd, IN UCHAR Ant) { if (pAd->chipOps.SetRxAnt) pAd->chipOps.SetRxAnt(pAd, Ant); } VOID AsicTurnOffRFClk( IN PRTMP_ADAPTER pAd, IN UCHAR Channel) { if (pAd->chipOps.AsicRfTurnOff) { pAd->chipOps.AsicRfTurnOff(pAd); } else { #if defined(RT28xx) || defined(RT2880) || defined(RT2883) /* RF R2 bit 18 = 0*/ UINT32 R1 = 0, R2 = 0, R3 = 0; UCHAR index; RTMP_RF_REGS *RFRegTable; RFRegTable = RF2850RegTable; #endif /* defined(RT28xx) || defined(RT2880) || defined(RT2883) */ switch (pAd->RfIcType) { #if defined(RT28xx) || defined(RT2880) || defined(RT2883) #if defined(RT28xx) || defined(RT2880) case RFIC_2820: case RFIC_2850: case RFIC_2720: case RFIC_2750: #endif /* defined(RT28xx) || defined(RT2880) */ #ifdef RT2883 case RFIC_2853: #endif /* RT2883 */ for (index = 0; index < NUM_OF_2850_CHNL; index++) { if (Channel == RFRegTable[index].Channel) { R1 = RFRegTable[index].R1 & 0xffffdfff; R2 = RFRegTable[index].R2 & 0xfffbffff; R3 = RFRegTable[index].R3 & 0xfff3ffff; RTMP_RF_IO_WRITE32(pAd, R1); RTMP_RF_IO_WRITE32(pAd, R2); /* Program R1b13 to 1, R3/b18,19 to 0, R2b18 to 0. */ /* Set RF R2 bit18=0, R3 bit[18:19]=0*/ /*if (pAd->StaCfg.bRadio == FALSE)*/ if (1) { RTMP_RF_IO_WRITE32(pAd, R3); DBGPRINT(RT_DEBUG_TRACE, ("AsicTurnOffRFClk#%d(RF=%d, ) , R2=0x%08x, R3 = 0x%08x \n", Channel, pAd->RfIcType, R2, R3)); } else DBGPRINT(RT_DEBUG_TRACE, ("AsicTurnOffRFClk#%d(RF=%d, ) , R2=0x%08x \n", Channel, pAd->RfIcType, R2)); break; } } break; #endif /* defined(RT28xx) || defined(RT2880) || defined(RT2883) */ default: DBGPRINT(RT_DEBUG_TRACE, ("AsicTurnOffRFClk#%d : Unkonwn RFIC=%d\n", Channel, pAd->RfIcType)); break; } } } #ifdef WAPI_SUPPORT VOID AsicUpdateWAPIPN( IN PRTMP_ADAPTER pAd, IN USHORT WCID, IN ULONG pn_low, IN ULONG pn_high) { if (IS_HW_WAPI_SUPPORT(pAd)) { ULONG offset; offset = WAPI_PN_TABLE_BASE + (WCID * WAPI_PN_ENTRY_SIZE); RTMP_IO_WRITE32(pAd, offset, pn_low); RTMP_IO_WRITE32(pAd, offset + 4, pn_high); } else { DBGPRINT(RT_DEBUG_WARN, ("%s : Not support HW_WAPI_PN_TABLE\n", __FUNCTION__)); } } #endif /* WAPI_SUPPORT */ #ifdef VCORECAL_SUPPORT VOID AsicVCORecalibration( IN PRTMP_ADAPTER pAd) { UCHAR RFValue = 0; UINT32 TxPinCfg = 0; UINT8 mode = pAd->chipCap.FlgIsVcoReCalMode; if (mode == VCO_CAL_DISABLE) return; #ifdef RT6352 if (pAd->bCalibrationDone == FALSE) return; #endif /* RT6352 */ #ifdef RTMP_INTERNAL_TX_ALC #ifdef RT5350 if (pAd->TxPowerCtrl.bInternalTxALC == TRUE) { UCHAR BbpR47 = 0; //TSSI_REPORT_SEL = 0 RTMP_BBP_IO_READ8_BY_REG_ID(pAd, BBP_R47, &BbpR47); BbpR47 &= ~0x3; RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R47, BbpR47 ); } #endif /* RT5350 */ #endif /* RTMP_INTERNAL_TX_ALC */ RTMP_IO_READ32(pAd, TX_PIN_CFG, &TxPinCfg); TxPinCfg &= 0xFCFFFFF0; RTMP_IO_WRITE32(pAd, TX_PIN_CFG, TxPinCfg); switch (mode) { case VCO_CAL_MODE_1: RT30xxReadRFRegister(pAd, RF_R07, (PUCHAR)&RFValue); RFValue = RFValue | 0x01; /* bit 0=vcocal_en*/ RT30xxWriteRFRegister(pAd, RF_R07, (UCHAR)RFValue); break; case VCO_CAL_MODE_2: RT30xxReadRFRegister(pAd, RF_R03, (PUCHAR)&RFValue); RFValue = RFValue | 0x80; /* bit 7=vcocal_en*/ RT30xxWriteRFRegister(pAd, RF_R03, (UCHAR)RFValue); break; #ifdef RT6352 case VCO_CAL_MODE_3: RT635xWriteRFRegister(pAd, RF_BANK0, RF_R05, 0x40); RT635xWriteRFRegister(pAd, RF_BANK0, RF_R04, 0x0C); RT635xReadRFRegister(pAd, RF_BANK0, RF_R04, &RFValue); RFValue = RFValue | 0x80; /* bit 7=vcocal_en*/ RT635xWriteRFRegister(pAd, RF_BANK0, RF_R04, RFValue); break; #endif /* RT6352 */ default: return; } RtmpOsMsDelay(2); RTMP_IO_READ32(pAd, TX_PIN_CFG, &TxPinCfg); if (pAd->CommonCfg.Channel <= 14) { if ((pAd->Antenna.field.TxPath == 1) #ifdef GREENAP_SUPPORT || (pAd->ApCfg.bGreenAPActive == TRUE) /* avoid to corrupt GreenAP operation */ #endif /* GREENAP_SUPPORT */ ) TxPinCfg |= 0x2; else if (pAd->Antenna.field.TxPath == 2) TxPinCfg |= 0xA; else if (pAd->Antenna.field.TxPath == 3) TxPinCfg |= 0x0200000A; } else { if ((pAd->Antenna.field.TxPath == 1) #ifdef GREENAP_SUPPORT || (pAd->ApCfg.bGreenAPActive == TRUE) /* avoid to corrupt GreenAP operation */ #endif /* GREENAP_SUPPORT */ ) TxPinCfg |= 0x1; else if (pAd->Antenna.field.TxPath == 2) TxPinCfg |= 0x5; else if (pAd->Antenna.field.TxPath == 3) TxPinCfg |= 0x01000005; } RTMP_IO_WRITE32(pAd, TX_PIN_CFG, TxPinCfg); #ifdef TXBF_SUPPORT // Do a Divider Calibration and update BBP registers if (pAd->CommonCfg.RegTransmitSetting.field.ITxBfEn #ifdef DBG_CTRL_SUPPORT && (pAd->CommonCfg.DebugFlags & DBF_DISABLE_CAL)==0 #endif /* DBG_CTRL_SUPPORT */ ) { ITxBFDividerCalibration(pAd, 2, 0, NULL); } if (pAd->CommonCfg.ETxBfEnCond) { INT idx; for (idx = 1; idx < MAX_LEN_OF_MAC_TABLE; idx++) { MAC_TABLE_ENTRY *pEntry; pEntry = &pAd->MacTab.Content[idx]; if ((IS_ENTRY_CLIENT(pEntry)) && (pEntry->eTxBfEnCond)) { BOOLEAN Cancelled; RTMPCancelTimer(&pEntry->eTxBfProbeTimer, &Cancelled); pEntry->bfState = READY_FOR_SNDG0; eTxBFProbing(pAd, pEntry); } } } #endif // TXBF_SUPPORT // } #endif /* VCORECAL_SUPPORT */ #ifdef STREAM_MODE_SUPPORT // StreamModeRegVal - return MAC reg value for StreamMode setting UINT32 StreamModeRegVal( IN RTMP_ADAPTER *pAd) { UINT32 streamWord; switch (pAd->CommonCfg.StreamMode) { case 1: streamWord = 0x030000; break; case 2: streamWord = 0x0c0000; break; case 3: streamWord = 0x0f0000; break; default: streamWord = 0x0; break; } return streamWord; } /* ======================================================================== Description: configure the stream mode of specific MAC or all MAC and set to ASIC. Prameters: pAd --- pMacAddr --- bClear --- disable the stream mode for specific macAddr when (pMacAddr!=NULL) Return: ======================================================================== */ VOID AsicSetStreamMode( IN RTMP_ADAPTER *pAd, IN PUCHAR pMacAddr, IN INT chainIdx, IN BOOLEAN bEnabled) { UINT32 streamWord; UINT32 regAddr, regVal; if (!pAd->chipCap.FlgHwStreamMode) return; streamWord = StreamModeRegVal(pAd); if (!bEnabled) streamWord = 0; regAddr = TX_CHAIN_ADDR0_L + chainIdx * 4; RTMP_IO_WRITE32(pAd, regAddr, (UINT32)(pMacAddr[0]) | (UINT32)(pMacAddr[1] << 8) | (UINT32)(pMacAddr[2] << 16) | (UINT32)(pMacAddr[3] << 24)); RTMP_IO_READ32(pAd, regAddr + 4, &regVal); regVal &= (~0x000f0000); RTMP_IO_WRITE32(pAd, regAddr + 4, (regVal | streamWord) | (UINT32)(pMacAddr[4]) | (UINT32)(pMacAddr[5] << 8)); } VOID RtmpStreamModeInit( IN RTMP_ADAPTER *pAd) { int chainIdx; UCHAR *pMacAddr; if (pAd->chipCap.FlgHwStreamMode == FALSE) return; for (chainIdx = 0; chainIdx < STREAM_MODE_STA_NUM; chainIdx++) { pMacAddr = &pAd->CommonCfg.StreamModeMac[chainIdx][0]; AsicSetStreamMode(pAd, pMacAddr, chainIdx, TRUE); } } /* Enable the stream mode*/ /* Parameters*/ /* pAd: The adapter data structure*/ /* Return Value:*/ /* None*/ VOID AsicEnableStreamMode( IN PRTMP_ADAPTER pAd) { TX_CHAIN_ADDR0_L_STRUC TxChainAddr0L = {{0}}; TX_CHAIN_ADDR0_H_STRUC TxChainAddr0H = {{0}}; TX_CHAIN_ADDR1_H_STRUC TxChainAddr1H = {{0}}; TX_CHAIN_ADDR2_H_STRUC TxChainAddr2H = {{0}}; TX_CHAIN_ADDR3_H_STRUC TxChainAddr3H = {{0}}; DBGPRINT(RT_DEBUG_INFO, ("---> %s\n", __FUNCTION__)); /* Chain #0 for broadcast*/ TxChainAddr0L.field.TxChainAddr0L_Byte3 = 0xFF; TxChainAddr0L.field.TxChainAddr0L_Byte2 = 0xFF; TxChainAddr0L.field.TxChainAddr0L_Byte1 = 0xFF; TxChainAddr0L.field.TxChainAddr0L_Byte0 = 0xFF; RTMP_IO_WRITE32(pAd, TX_CHAIN_ADDR0_L, TxChainAddr0L.word); RTMP_IO_READ32(pAd, TX_CHAIN_ADDR0_H, &TxChainAddr0H.word); TxChainAddr0H.field.TxChainAddr0H_Byte4 = 0xFF; TxChainAddr0H.field.TxChainAddr0H_Byte5 = 0xFF; TxChainAddr0H.field.TxChainSel0 = 0xF; /* Enable the stream mode for chain #0*/ RTMP_IO_WRITE32(pAd, TX_CHAIN_ADDR0_H, TxChainAddr0H.word); RTMP_IO_READ32(pAd, TX_CHAIN_ADDR1_H, &TxChainAddr1H.word); TxChainAddr1H.field.TxChainSel0 = 0xF; /* Enable the stream mode for chain #1*/ RTMP_IO_WRITE32(pAd, TX_CHAIN_ADDR1_H, TxChainAddr1H.word); RTMP_IO_READ32(pAd, TX_CHAIN_ADDR2_H, &TxChainAddr2H.word); TxChainAddr2H.field.TxChainSel0 = 0xF; /* Enable the stream mode for chain #2*/ RTMP_IO_WRITE32(pAd, TX_CHAIN_ADDR2_H, TxChainAddr2H.word); RTMP_IO_READ32(pAd, TX_CHAIN_ADDR3_H, &TxChainAddr3H.word); TxChainAddr3H.field.TxChainSel0 = 0xF; /* Enable the stream mode for chain #3*/ RTMP_IO_WRITE32(pAd, TX_CHAIN_ADDR3_H, TxChainAddr3H.word); DBGPRINT(RT_DEBUG_INFO, ("<--- %s\n", __FUNCTION__)); } /* Disable the stream mode*/ /* Parameters*/ /* pAd: The adapter data structure*/ /* Return Value:*/ /* None*/ VOID AsicDisableStreamMode( IN PRTMP_ADAPTER pAd) { TX_CHAIN_ADDR0_L_STRUC TxChainAddr0L = {{0}}; TX_CHAIN_ADDR0_H_STRUC TxChainAddr0H = {{0}}; TX_CHAIN_ADDR1_H_STRUC TxChainAddr1H = {{0}}; TX_CHAIN_ADDR2_H_STRUC TxChainAddr2H = {{0}}; TX_CHAIN_ADDR3_H_STRUC TxChainAddr3H = {{0}}; DBGPRINT(RT_DEBUG_INFO, ("---> %s\n", __FUNCTION__)); /* Chain #0 for broadcast*/ TxChainAddr0L.field.TxChainAddr0L_Byte3 = 0xFF; TxChainAddr0L.field.TxChainAddr0L_Byte2 = 0xFF; TxChainAddr0L.field.TxChainAddr0L_Byte1 = 0xFF; TxChainAddr0L.field.TxChainAddr0L_Byte0 = 0xFF; RTMP_IO_WRITE32(pAd, TX_CHAIN_ADDR0_L, TxChainAddr0L.word); RTMP_IO_READ32(pAd, TX_CHAIN_ADDR0_H, &TxChainAddr0H.word); TxChainAddr0H.field.TxChainAddr0H_Byte4 = 0xFF; TxChainAddr0H.field.TxChainAddr0H_Byte5 = 0xFF; TxChainAddr0H.field.TxChainSel0 = 0x0; /* Disable the stream mode for chain #0*/ RTMP_IO_WRITE32(pAd, TX_CHAIN_ADDR0_H, TxChainAddr0H.word); RTMP_IO_READ32(pAd, TX_CHAIN_ADDR1_H, &TxChainAddr1H.word); TxChainAddr1H.field.TxChainSel0 = 0x0; /* Disable the stream mode for chain #1*/ RTMP_IO_WRITE32(pAd, TX_CHAIN_ADDR1_H, TxChainAddr1H.word); RTMP_IO_READ32(pAd, TX_CHAIN_ADDR2_H, &TxChainAddr2H.word); TxChainAddr2H.field.TxChainSel0 = 0x0; /* Disable the stream mode for chain #2*/ RTMP_IO_WRITE32(pAd, TX_CHAIN_ADDR2_H, TxChainAddr2H.word); RTMP_IO_READ32(pAd, TX_CHAIN_ADDR3_H, &TxChainAddr3H.word); TxChainAddr3H.field.TxChainSel0 = 0x0; /* Disable the stream mode for chain #3*/ RTMP_IO_WRITE32(pAd, TX_CHAIN_ADDR3_H, TxChainAddr3H.word); DBGPRINT(RT_DEBUG_INFO, ("<--- %s\n", __FUNCTION__)); } #endif // STREAM_MODE_SUPPORT // #ifdef DOT11_N_SUPPORT /* ========================================================================== Description: IRQL = DISPATCH_LEVEL ========================================================================== */ VOID AsicEnableRalinkBurstMode( IN PRTMP_ADAPTER pAd) { UINT32 Data = 0; RTMP_IO_READ32(pAd, EDCA_AC0_CFG, &Data); pAd->CommonCfg.RestoreBurstMode = Data; Data &= 0xFFF00000; Data |= 0x86380; RTMP_IO_WRITE32(pAd, EDCA_AC0_CFG, Data); } /* ========================================================================== Description: IRQL = DISPATCH_LEVEL ========================================================================== */ VOID AsicDisableRalinkBurstMode( IN PRTMP_ADAPTER pAd) { UINT32 Data = 0; RTMP_IO_READ32(pAd, EDCA_AC0_CFG, &Data); Data = pAd->CommonCfg.RestoreBurstMode; Data &= 0xFFFFFF00; if (RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_DYNAMIC_BE_TXOP_ACTIVE) #ifdef DOT11_N_SUPPORT && (pAd->MacTab.fAnyStationMIMOPSDynamic == FALSE) #endif // DOT11_N_SUPPORT // ) { if (RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_RDG_ACTIVE)) Data |= 0x80; else if (pAd->CommonCfg.bEnableTxBurst) Data |= 0x20; } RTMP_IO_WRITE32(pAd, EDCA_AC0_CFG, Data); } #endif // DOT11_N_SUPPORT // VOID RtmpUpdateFilterCoefficientControl( IN PRTMP_ADAPTER pAd, IN UCHAR Channel) { UCHAR BBPValue = 0; if (Channel == 14) { if (pAd->CommonCfg.PhyMode == PHY_11B) { /* when Channel==14 && Mode==CCK && BandWidth==20M, BBP R4 bit5=1 */ RTMP_BBP_IO_READ8_BY_REG_ID(pAd, BBP_R4, &BBPValue); BBPValue |= 0x20; /* set bit5=1 */ RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R4, BBPValue); } else { RTMP_BBP_IO_READ8_BY_REG_ID(pAd, BBP_R4, &BBPValue); BBPValue &= (~0x20); RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R4, BBPValue); } } } #ifdef WOW_SUPPORT #endif /* WOW_SUPPORT */ #ifdef MAC_APCLI_SUPPORT /* ========================================================================== Description: Set BSSID of Root AP IRQL = DISPATCH_LEVEL ========================================================================== */ VOID AsicSetApCliBssid( IN PRTMP_ADAPTER pAd, IN PUCHAR pBssid, IN UCHAR index) { UINT32 Addr4 = 0; DBGPRINT(RT_DEBUG_TRACE, ("===> AsicSetApCliBssid %x:%x:%x:%x:%x:%x\n", PRINT_MAC(pBssid))); Addr4 = (UINT32)(pBssid[0]) | (UINT32)(pBssid[1] << 8) | (UINT32)(pBssid[2] << 16) | (UINT32)(pBssid[3] << 24); RTMP_IO_WRITE32(pAd, MAC_APCLI_BSSID_DW0, Addr4); Addr4 = 0; Addr4 = (ULONG)(pBssid[4]) | (ULONG)(pBssid[5] << 8); /* Enable APCLI mode */ Addr4 |= 0x10000; RTMP_IO_WRITE32(pAd, MAC_APCLI_BSSID_DW1, Addr4); } #endif /* MAC_APCLI_SUPPORT */ #ifdef MICROWAVE_OVEN_SUPPORT VOID AsicMeasureFalseCCA( IN PRTMP_ADAPTER pAd ) { if (pAd->chipOps.AsicMeasureFalseCCA) pAd->chipOps.AsicMeasureFalseCCA(pAd); } VOID AsicMitigateMicrowave( IN PRTMP_ADAPTER pAd ) { if (pAd->chipOps.AsicMitigateMicrowave) pAd->chipOps.AsicMitigateMicrowave(pAd); } #endif /* MICROWAVE_OVEN_SUPPORT */
gpl-2.0
blue236/xen
tools/console/daemon/main.c
10
5183
/*\ * Copyright (C) International Business Machines Corp., 2005 * Author(s): Anthony Liguori <aliguori@us.ibm.com> * * Xen Console Daemon * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; under version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; If not, see <http://www.gnu.org/licenses/>. \*/ #include <getopt.h> #include <stdlib.h> #include <stdio.h> #include <errno.h> #include <unistd.h> #include <string.h> #include <signal.h> #include <sys/types.h> #include <sys/resource.h> #include "xenctrl.h" #include "utils.h" #include "io.h" int log_reload = 0; int log_guest = 0; int log_hv = 0; int log_time_hv = 0; int log_time_guest = 0; char *log_dir = NULL; int discard_overflowed_data = 1; static void handle_hup(int sig) { log_reload = 1; } static void usage(char *name) { printf("Usage: %s [-h] [-V] [-v] [-i] [--log=none|guest|hv|all] [--log-dir=DIR] [--pid-file=PATH] [-t, --timestamp=none|guest|hv|all] [-o, --overflow-data=discard|keep]\n", name); } static void version(char *name) { printf("Xen Console Daemon 3.0\n"); } static void increase_fd_limit(void) { /* * We require many file descriptors: * - per domain: pty master, pty slave, logfile and evtchn * - misc extra: hypervisor log, privcmd, gntdev, std... * * Allow a generous 1000 for misc, and calculate the maximum possible * number of fds which could be used. */ unsigned min_fds = (DOMID_FIRST_RESERVED * 4) + 1000; struct rlimit lim, new = { min_fds, min_fds }; if (getrlimit(RLIMIT_NOFILE, &lim) < 0) { fprintf(stderr, "Failed to obtain fd limit: %s\n", strerror(errno)); exit(1); } /* Do we already have sufficient? Great! */ if (lim.rlim_cur >= min_fds) return; /* Try to increase our limit. */ if (setrlimit(RLIMIT_NOFILE, &new) < 0) syslog(LOG_WARNING, "Unable to increase fd limit from {%llu, %llu} to " "{%llu, %llu}: (%s) - May run out with lots of domains", (unsigned long long)lim.rlim_cur, (unsigned long long)lim.rlim_max, (unsigned long long)new.rlim_cur, (unsigned long long)new.rlim_max, strerror(errno)); } int main(int argc, char **argv) { const char *sopts = "hVvit:o:"; struct option lopts[] = { { "help", 0, 0, 'h' }, { "version", 0, 0, 'V' }, { "verbose", 0, 0, 'v' }, { "interactive", 0, 0, 'i' }, { "log", 1, 0, 'l' }, { "log-dir", 1, 0, 'r' }, { "pid-file", 1, 0, 'p' }, { "timestamp", 1, 0, 't' }, { "overflow-data", 1, 0, 'o'}, { 0 }, }; bool is_interactive = false; int ch; int syslog_option = LOG_CONS; int syslog_mask = LOG_MASK(LOG_WARNING)|LOG_MASK(LOG_ERR)|LOG_MASK(LOG_CRIT)|\ LOG_MASK(LOG_ALERT)|LOG_MASK(LOG_EMERG); int opt_ind = 0; char *pidfile = NULL; while ((ch = getopt_long(argc, argv, sopts, lopts, &opt_ind)) != -1) { switch (ch) { case 'h': usage(argv[0]); exit(0); case 'V': version(argv[0]); exit(0); case 'v': #ifndef __sun__ syslog_option |= LOG_PERROR; #endif syslog_mask |= LOG_MASK(LOG_NOTICE)|LOG_MASK(LOG_INFO)| \ LOG_MASK(LOG_DEBUG); break; case 'i': is_interactive = true; break; case 'l': if (!strcmp(optarg, "all")) { log_hv = 1; log_guest = 1; } else if (!strcmp(optarg, "hv")) { log_hv = 1; } else if (!strcmp(optarg, "guest")) { log_guest = 1; } break; case 'r': log_dir = strdup(optarg); break; case 'p': pidfile = strdup(optarg); break; case 't': if (!strcmp(optarg, "all")) { log_time_hv = 1; log_time_guest = 1; } else if (!strcmp(optarg, "hv")) { log_time_hv = 1; } else if (!strcmp(optarg, "guest")) { log_time_guest = 1; } else if (!strcmp(optarg, "none")) { log_time_guest = 0; log_time_hv = 0; } break; case 'o': if (!strcmp(optarg, "keep")) { discard_overflowed_data = 0; } else if (!strcmp(optarg, "discard")) { discard_overflowed_data = 1; } break; case '?': fprintf(stderr, "Try `%s --help' for more information\n", argv[0]); exit(EINVAL); } } if (!log_dir) { log_dir = strdup("/var/log/xen/console"); } if (geteuid() != 0) { fprintf(stderr, "%s requires root to run.\n", argv[0]); exit(EPERM); } signal(SIGHUP, handle_hup); openlog("xenconsoled", syslog_option, LOG_DAEMON); setlogmask(syslog_mask); increase_fd_limit(); if (!is_interactive) { daemonize(pidfile ? pidfile : "/var/run/xenconsoled.pid"); } if (!xen_setup()) exit(1); handle_io(); closelog(); free(log_dir); free(pidfile); return 0; } /* * Local variables: * c-file-style: "linux" * indent-tabs-mode: t * c-indent-level: 8 * c-basic-offset: 8 * tab-width: 8 * End: */
gpl-2.0
csrocha/mplayer-stereo
mp3lib/layer3.c
10
36718
/* * Modified for use with MPlayer, for details see the changelog at * http://svn.mplayerhq.hu/mplayer/trunk/ * $Id: layer3.c 31032 2010-04-12 10:56:17Z diego $ */ /* * Mpeg Layer-3 audio decoder * -------------------------- * copyright (c) 1995-1999 by Michael Hipp. * All rights reserved. See also 'README' * * Optimize-TODO: put short bands into the band-field without the stride * of 3 reals * Length-optimze: unify long and short band code where it is possible */ #include "mpg123.h" #if 0 #define L3_DEBUG 1 #endif #if 0 #define CUT_HF #endif #define REAL_MUL(x, y) ((x) * (y)) static real ispow[8207]; static real aa_ca[8],aa_cs[8]; static real COS1[12][6]; static real win[4][36]; static real win1[4][36]; static real gainpow2[256+118+4]; /* non static for external 3dnow functions */ real COS9[9]; static real COS6_1,COS6_2; real tfcos36[9]; static real tfcos12[3]; #define NEW_DCT9 #ifdef NEW_DCT9 static real cos9[3],cos18[3]; #endif struct bandInfoStruct { uint16_t longIdx[23]; uint8_t longDiff[22]; uint16_t shortIdx[14]; uint8_t shortDiff[13]; }; static int longLimit[9][23]; static int shortLimit[9][14]; static const struct bandInfoStruct bandInfo[9] = { /* MPEG 1.0 */ { {0,4,8,12,16,20,24,30,36,44,52,62,74, 90,110,134,162,196,238,288,342,418,576}, {4,4,4,4,4,4,6,6,8, 8,10,12,16,20,24,28,34,42,50,54, 76,158}, {0,4*3,8*3,12*3,16*3,22*3,30*3,40*3,52*3,66*3, 84*3,106*3,136*3,192*3}, {4,4,4,4,6,8,10,12,14,18,22,30,56} } , { {0,4,8,12,16,20,24,30,36,42,50,60,72, 88,106,128,156,190,230,276,330,384,576}, {4,4,4,4,4,4,6,6,6, 8,10,12,16,18,22,28,34,40,46,54, 54,192}, {0,4*3,8*3,12*3,16*3,22*3,28*3,38*3,50*3,64*3, 80*3,100*3,126*3,192*3}, {4,4,4,4,6,6,10,12,14,16,20,26,66} } , { {0,4,8,12,16,20,24,30,36,44,54,66,82,102,126,156,194,240,296,364,448,550,576} , {4,4,4,4,4,4,6,6,8,10,12,16,20,24,30,38,46,56,68,84,102, 26} , {0,4*3,8*3,12*3,16*3,22*3,30*3,42*3,58*3,78*3,104*3,138*3,180*3,192*3} , {4,4,4,4,6,8,12,16,20,26,34,42,12} } , /* MPEG 2.0 */ { {0,6,12,18,24,30,36,44,54,66,80,96,116,140,168,200,238,284,336,396,464,522,576}, {6,6,6,6,6,6,8,10,12,14,16,20,24,28,32,38,46,52,60,68,58,54 } , {0,4*3,8*3,12*3,18*3,24*3,32*3,42*3,56*3,74*3,100*3,132*3,174*3,192*3} , {4,4,4,6,6,8,10,14,18,26,32,42,18 } } , /* changed 19th value fropm 330 to 332 */ { {0,6,12,18,24,30,36,44,54,66,80,96,114,136,162,194,232,278,332,394,464,540,576}, {6,6,6,6,6,6,8,10,12,14,16,18,22,26,32,38,46,54,62,70,76,36 } , {0,4*3,8*3,12*3,18*3,26*3,36*3,48*3,62*3,80*3,104*3,136*3,180*3,192*3} , {4,4,4,6,8,10,12,14,18,24,32,44,12 } } , { {0,6,12,18,24,30,36,44,54,66,80,96,116,140,168,200,238,284,336,396,464,522,576}, {6,6,6,6,6,6,8,10,12,14,16,20,24,28,32,38,46,52,60,68,58,54 }, {0,4*3,8*3,12*3,18*3,26*3,36*3,48*3,62*3,80*3,104*3,134*3,174*3,192*3}, {4,4,4,6,8,10,12,14,18,24,30,40,18 } } , /* MPEG 2.5 */ { {0,6,12,18,24,30,36,44,54,66,80,96,116,140,168,200,238,284,336,396,464,522,576} , {6,6,6,6,6,6,8,10,12,14,16,20,24,28,32,38,46,52,60,68,58,54}, {0,12,24,36,54,78,108,144,186,240,312,402,522,576}, {4,4,4,6,8,10,12,14,18,24,30,40,18} }, { {0,6,12,18,24,30,36,44,54,66,80,96,116,140,168,200,238,284,336,396,464,522,576} , {6,6,6,6,6,6,8,10,12,14,16,20,24,28,32,38,46,52,60,68,58,54}, {0,12,24,36,54,78,108,144,186,240,312,402,522,576}, {4,4,4,6,8,10,12,14,18,24,30,40,18} }, { {0,12,24,36,48,60,72,88,108,132,160,192,232,280,336,400,476,566,568,570,572,574,576}, {12,12,12,12,12,12,16,20,24,28,32,40,48,56,64,76,90,2,2,2,2,2}, {0, 24, 48, 72,108,156,216,288,372,480,486,492,498,576}, {8,8,8,12,16,20,24,28,36,2,2,2,26} } , }; static int mapbuf0[9][152]; static int mapbuf1[9][156]; static int mapbuf2[9][44]; static int *map[9][3]; static int *mapend[9][3]; static unsigned int n_slen2[512]; /* MPEG 2.0 slen for 'normal' mode */ static unsigned int i_slen2[256]; /* MPEG 2.0 slen for intensity stereo */ static real tan1_1[16],tan2_1[16],tan1_2[16],tan2_2[16]; static real pow1_1[2][16],pow2_1[2][16],pow1_2[2][16],pow2_2[2][16]; /* * init tables for layer-3 */ static void init_layer3(int down_sample_sblimit) { int i,j,k,l; for(i=-256;i<118+4;i++) { if(_has_mmx) gainpow2[i+256] = 16384.0 * pow((double)2.0,-0.25 * (double) (i+210) ); else gainpow2[i+256] = pow((double)2.0,-0.25 * (double) (i+210) ); } for(i=0;i<8207;i++) ispow[i] = pow((double)i,(double)4.0/3.0); for (i=0;i<8;i++) { static const double Ci[8]={-0.6,-0.535,-0.33,-0.185,-0.095,-0.041,-0.0142,-0.0037}; double sq=sqrt(1.0+Ci[i]*Ci[i]); aa_cs[i] = 1.0/sq; aa_ca[i] = Ci[i]/sq; } for(i=0;i<18;i++) { win[0][i] = win[1][i] = 0.5 * sin( M_PI / 72.0 * (double) (2*(i+0) +1) ) / cos ( M_PI * (double) (2*(i+0) +19) / 72.0 ); win[0][i+18] = win[3][i+18] = 0.5 * sin( M_PI / 72.0 * (double) (2*(i+18)+1) ) / cos ( M_PI * (double) (2*(i+18)+19) / 72.0 ); } for(i=0;i<6;i++) { win[1][i+18] = 0.5 / cos ( M_PI * (double) (2*(i+18)+19) / 72.0 ); win[3][i+12] = 0.5 / cos ( M_PI * (double) (2*(i+12)+19) / 72.0 ); win[1][i+24] = 0.5 * sin( M_PI / 24.0 * (double) (2*i+13) ) / cos ( M_PI * (double) (2*(i+24)+19) / 72.0 ); win[1][i+30] = win[3][i] = 0.0; win[3][i+6 ] = 0.5 * sin( M_PI / 24.0 * (double) (2*i+1) ) / cos ( M_PI * (double) (2*(i+6 )+19) / 72.0 ); } for(i=0;i<9;i++) COS9[i] = cos( M_PI / 18.0 * (double) i); for(i=0;i<9;i++) tfcos36[i] = 0.5 / cos ( M_PI * (double) (i*2+1) / 36.0 ); for(i=0;i<3;i++) tfcos12[i] = 0.5 / cos ( M_PI * (double) (i*2+1) / 12.0 ); COS6_1 = cos( M_PI / 6.0 * (double) 1); COS6_2 = cos( M_PI / 6.0 * (double) 2); #ifdef NEW_DCT9 cos9[0] = cos(1.0*M_PI/9.0); cos9[1] = cos(5.0*M_PI/9.0); cos9[2] = cos(7.0*M_PI/9.0); cos18[0] = cos(1.0*M_PI/18.0); cos18[1] = cos(11.0*M_PI/18.0); cos18[2] = cos(13.0*M_PI/18.0); #endif for(i=0;i<12;i++) { win[2][i] = 0.5 * sin( M_PI / 24.0 * (double) (2*i+1) ) / cos ( M_PI * (double) (2*i+7) / 24.0 ); for(j=0;j<6;j++) COS1[i][j] = cos( M_PI / 24.0 * (double) ((2*i+7)*(2*j+1)) ); } for(j=0;j<4;j++) { static const int len[4] = { 36,36,12,36 }; for(i=0;i<len[j];i+=2) win1[j][i] = + win[j][i]; for(i=1;i<len[j];i+=2) win1[j][i] = - win[j][i]; } for(i=0;i<16;i++) { double t = tan( (double) i * M_PI / 12.0 ); tan1_1[i] = t / (1.0+t); tan2_1[i] = 1.0 / (1.0 + t); tan1_2[i] = M_SQRT2 * t / (1.0+t); tan2_2[i] = M_SQRT2 / (1.0 + t); for(j=0;j<2;j++) { double base = pow(2.0,-0.25*(j+1.0)); double p1=1.0,p2=1.0; if(i > 0) { if( i & 1 ) p1 = pow(base,(i+1.0)*0.5); else p2 = pow(base,i*0.5); } pow1_1[j][i] = p1; pow2_1[j][i] = p2; pow1_2[j][i] = M_SQRT2 * p1; pow2_2[j][i] = M_SQRT2 * p2; } } for(j=0;j<9;j++) { const struct bandInfoStruct *bi = &bandInfo[j]; int *mp; int cb,lwin; const uint8_t *bdf; mp = map[j][0] = mapbuf0[j]; bdf = bi->longDiff; for(i=0,cb = 0; cb < 8 ; cb++,i+=*bdf++) { *mp++ = (*bdf) >> 1; *mp++ = i; *mp++ = 3; *mp++ = cb; } bdf = bi->shortDiff+3; for(cb=3;cb<13;cb++) { int l = (*bdf++) >> 1; for(lwin=0;lwin<3;lwin++) { *mp++ = l; *mp++ = i + lwin; *mp++ = lwin; *mp++ = cb; } i += 6*l; } mapend[j][0] = mp; mp = map[j][1] = mapbuf1[j]; bdf = bi->shortDiff+0; for(i=0,cb=0;cb<13;cb++) { int l = (*bdf++) >> 1; for(lwin=0;lwin<3;lwin++) { *mp++ = l; *mp++ = i + lwin; *mp++ = lwin; *mp++ = cb; } i += 6*l; } mapend[j][1] = mp; mp = map[j][2] = mapbuf2[j]; bdf = bi->longDiff; for(cb = 0; cb < 22 ; cb++) { *mp++ = (*bdf++) >> 1; *mp++ = cb; } mapend[j][2] = mp; } for(j=0;j<9;j++) { for(i=0;i<23;i++) { longLimit[j][i] = (bandInfo[j].longIdx[i] - 1 + 8) / 18 + 1; if(longLimit[j][i] > (down_sample_sblimit) ) longLimit[j][i] = down_sample_sblimit; } for(i=0;i<14;i++) { shortLimit[j][i] = (bandInfo[j].shortIdx[i] - 1) / 18 + 1; if(shortLimit[j][i] > (down_sample_sblimit) ) shortLimit[j][i] = down_sample_sblimit; } } for(i=0;i<5;i++) { for(j=0;j<6;j++) { for(k=0;k<6;k++) { int n = k + j * 6 + i * 36; i_slen2[n] = i|(j<<3)|(k<<6)|(3<<12); } } } for(i=0;i<4;i++) { for(j=0;j<4;j++) { for(k=0;k<4;k++) { int n = k + j * 4 + i * 16; i_slen2[n+180] = i|(j<<3)|(k<<6)|(4<<12); } } } for(i=0;i<4;i++) { for(j=0;j<3;j++) { int n = j + i * 3; i_slen2[n+244] = i|(j<<3) | (5<<12); n_slen2[n+500] = i|(j<<3) | (2<<12) | (1<<15); } } for(i=0;i<5;i++) { for(j=0;j<5;j++) { for(k=0;k<4;k++) { for(l=0;l<4;l++) { int n = l + k * 4 + j * 16 + i * 80; n_slen2[n] = i|(j<<3)|(k<<6)|(l<<9)|(0<<12); } } } } for(i=0;i<5;i++) { for(j=0;j<5;j++) { for(k=0;k<4;k++) { int n = k + j * 4 + i * 20; n_slen2[n+400] = i|(j<<3)|(k<<6)|(1<<12); } } } } /* * read additional side information (for MPEG 1 and MPEG 2) */ static int III_get_side_info(struct III_sideinfo *si,int stereo, int ms_stereo,int sfreq,int single,int lsf) { int ch, gr; int powdiff = (single == 3) ? 4 : 0; static const int tabs[2][5] = { { 2,9,5,3,4 } , { 1,8,1,2,9 } }; const int *tab = tabs[lsf]; si->main_data_begin = getbits(tab[1]); if (stereo == 1) si->private_bits = getbits_fast(tab[2]); else si->private_bits = getbits_fast(tab[3]); if(!lsf) { for (ch=0; ch<stereo; ch++) { si->ch[ch].gr[0].scfsi = -1; si->ch[ch].gr[1].scfsi = getbits_fast(4); } } for (gr=0; gr<tab[0]; gr++) { for (ch=0; ch<stereo; ch++) { register struct gr_info_s *gr_info = &(si->ch[ch].gr[gr]); gr_info->part2_3_length = getbits(12); gr_info->big_values = getbits(9); if(gr_info->big_values > 288) { fprintf(stderr,"big_values too large!\n"); gr_info->big_values = 288; } gr_info->pow2gain = gainpow2+256 - getbits_fast(8) + powdiff; if(ms_stereo) gr_info->pow2gain += 2; gr_info->scalefac_compress = getbits(tab[4]); if(get1bit()) { /* window switch flag */ int i; #ifdef L3_DEBUG if(2*gr_info->big_values > bandInfo[sfreq].shortIdx[12]) fprintf(stderr,"L3: BigValues too large, doesn't make sense %d %d\n",2*gr_info->big_values,bandInfo[sfreq].shortIdx[12]); #endif gr_info->block_type = getbits_fast(2); gr_info->mixed_block_flag = get1bit(); gr_info->table_select[0] = getbits_fast(5); gr_info->table_select[1] = getbits_fast(5); /* * table_select[2] not needed, because there is no region2, * but to satisfy some verifications tools we set it either. */ gr_info->table_select[2] = 0; for(i=0;i<3;i++) gr_info->full_gain[i] = gr_info->pow2gain + (getbits_fast(3)<<3); if(gr_info->block_type == 0) { fprintf(stderr,"Blocktype == 0 and window-switching == 1 not allowed.\n"); return 0; } /* region_count/start parameters are implicit in this case. */ if(!lsf || gr_info->block_type == 2) gr_info->region1start = 36>>1; else { /* check this again for 2.5 and sfreq=8 */ if(sfreq == 8) gr_info->region1start = 108>>1; else gr_info->region1start = 54>>1; } gr_info->region2start = 576>>1; } else { int i,r0c,r1c; #ifdef L3_DEBUG if(2*gr_info->big_values > bandInfo[sfreq].longIdx[21]) fprintf(stderr,"L3: BigValues too large, doesn't make sense %d %d\n",2*gr_info->big_values,bandInfo[sfreq].longIdx[21]); #endif for (i=0; i<3; i++) gr_info->table_select[i] = getbits_fast(5); r0c = getbits_fast(4); r1c = getbits_fast(3); gr_info->region1start = bandInfo[sfreq].longIdx[r0c+1] >> 1 ; if(r0c + r1c + 2 > 22) gr_info->region2start = 576>>1; else gr_info->region2start = bandInfo[sfreq].longIdx[r0c+1+r1c+1] >> 1; gr_info->block_type = 0; gr_info->mixed_block_flag = 0; } if(!lsf) gr_info->preflag = get1bit(); gr_info->scalefac_scale = get1bit(); gr_info->count1table_select = get1bit(); } } return !0; } /* * read scalefactors */ static int III_get_scale_factors_1(int *scf,struct gr_info_s *gr_info) { static const unsigned char slen[2][16] = { {0, 0, 0, 0, 3, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4}, {0, 1, 2, 3, 0, 1, 2, 3, 1, 2, 3, 1, 2, 3, 2, 3} }; int numbits; int num0 = slen[0][gr_info->scalefac_compress]; int num1 = slen[1][gr_info->scalefac_compress]; if (gr_info->block_type == 2) { int i=18; numbits = (num0 + num1) * 18; if (gr_info->mixed_block_flag) { for (i=8;i;i--) *scf++ = getbits_fast(num0); i = 9; numbits -= num0; /* num0 * 17 + num1 * 18 */ } for (;i;i--) *scf++ = getbits_fast(num0); for (i = 18; i; i--) *scf++ = getbits_fast(num1); *scf++ = 0; *scf++ = 0; *scf++ = 0; /* short[13][0..2] = 0 */ } else { int i; int scfsi = gr_info->scfsi; if(scfsi < 0) { /* scfsi < 0 => granule == 0 */ for(i=11;i;i--) *scf++ = getbits_fast(num0); for(i=10;i;i--) *scf++ = getbits_fast(num1); numbits = (num0 + num1) * 10 + num0; *scf++ = 0; } else { numbits = 0; if(!(scfsi & 0x8)) { for (i=0;i<6;i++) *scf++ = getbits_fast(num0); numbits += num0 * 6; } else { scf += 6; } if(!(scfsi & 0x4)) { for (i=0;i<5;i++) *scf++ = getbits_fast(num0); numbits += num0 * 5; } else { scf += 5; } if(!(scfsi & 0x2)) { for(i=0;i<5;i++) *scf++ = getbits_fast(num1); numbits += num1 * 5; } else { scf += 5; } if(!(scfsi & 0x1)) { for (i=0;i<5;i++) *scf++ = getbits_fast(num1); numbits += num1 * 5; } else { scf += 5; } *scf++ = 0; /* no l[21] in original sources */ } } return numbits; } static int III_get_scale_factors_2(int *scf,struct gr_info_s *gr_info,int i_stereo) { unsigned char *pnt; int i,j; unsigned int slen; int n = 0; int numbits = 0; static unsigned char stab[3][6][4] = { { { 6, 5, 5,5 } , { 6, 5, 7,3 } , { 11,10,0,0} , { 7, 7, 7,0 } , { 6, 6, 6,3 } , { 8, 8,5,0} } , { { 9, 9, 9,9 } , { 9, 9,12,6 } , { 18,18,0,0} , {12,12,12,0 } , {12, 9, 9,6 } , { 15,12,9,0} } , { { 6, 9, 9,9 } , { 6, 9,12,6 } , { 15,18,0,0} , { 6,15,12,0 } , { 6,12, 9,6 } , { 6,18,9,0} } }; if(i_stereo) /* i_stereo AND second channel -> do_layer3() checks this */ slen = i_slen2[gr_info->scalefac_compress>>1]; else slen = n_slen2[gr_info->scalefac_compress]; gr_info->preflag = (slen>>15) & 0x1; n = 0; if( gr_info->block_type == 2 ) { n++; if(gr_info->mixed_block_flag) n++; } pnt = stab[n][(slen>>12)&0x7]; for(i=0;i<4;i++) { int num = slen & 0x7; slen >>= 3; if(num) { for(j=0;j<(int)(pnt[i]);j++) *scf++ = getbits_fast(num); numbits += pnt[i] * num; } else { for(j=0;j<(int)(pnt[i]);j++) *scf++ = 0; } } n = (n << 1) + 1; for(i=0;i<n;i++) *scf++ = 0; return numbits; } static int pretab1[22] = {0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,2,2,3,3,3,2,0}; static int pretab2[22] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; #define getbitoffset() ((-bitindex)&0x7) #define getbyte() (*wordpointer++) /* * Dequantize samples (includes huffman decoding) */ /* 24 is enough because tab13 has max. a 19 bit huffvector */ #define BITSHIFT ((sizeof(long)-1)*8) #define REFRESH_MASK \ while(num < BITSHIFT) { \ mask |= ((unsigned long)getbyte())<<(BITSHIFT-num); \ num += 8; \ part2remain -= 8; } static int III_dequantize_sample(real xr[SBLIMIT][SSLIMIT],int *scf, struct gr_info_s *gr_info,int sfreq,int part2bits) { int shift = 1 + gr_info->scalefac_scale; real *xrpnt = (real *) xr; int l[3],l3; int part2remain = gr_info->part2_3_length - part2bits; int *me; int num=getbitoffset(); long mask; /* we must split this, because for num==0 the shift is undefined if you do it in one step */ mask = ((unsigned long) getbits(num))<<BITSHIFT; mask <<= 8-num; part2remain -= num; { int bv = gr_info->big_values; int region1 = gr_info->region1start; int region2 = gr_info->region2start; l3 = ((576>>1)-bv)>>1; /* * we may lose the 'odd' bit here !! * check this later again */ if(bv <= region1) { l[0] = bv; l[1] = l[2] = 0; } else { l[0] = region1; if(bv <= region2) { l[1] = bv - l[0]; l[2] = 0; } else { l[1] = region2 - l[0]; l[2] = bv - region2; } } } if(gr_info->block_type == 2) { /* * decoding with short or mixed mode BandIndex table */ int i,max[4]; int step=0,lwin=3,cb=0; register real v = 0.0; register int *m,mc; if(gr_info->mixed_block_flag) { max[3] = -1; max[0] = max[1] = max[2] = 2; m = map[sfreq][0]; me = mapend[sfreq][0]; } else { max[0] = max[1] = max[2] = max[3] = -1; /* max[3] not really needed in this case */ m = map[sfreq][1]; me = mapend[sfreq][1]; } mc = 0; for(i=0;i<2;i++) { int lp = l[i]; struct newhuff *h = ht+gr_info->table_select[i]; for(;lp;lp--,mc--) { register int x,y; if( (!mc) ) { mc = *m++; xrpnt = ((real *) xr) + (*m++); lwin = *m++; cb = *m++; if(lwin == 3) { v = gr_info->pow2gain[(*scf++) << shift]; step = 1; } else { v = gr_info->full_gain[lwin][(*scf++) << shift]; step = 3; } } { register short *val = h->table; REFRESH_MASK; while((y=*val++)<0) { if (mask < 0) val -= y; num--; mask <<= 1; } x = y >> 4; y &= 0xf; } if(x == 15 && h->linbits) { max[lwin] = cb; REFRESH_MASK; x += ((unsigned long) mask) >> (BITSHIFT+8-h->linbits); num -= h->linbits+1; mask <<= h->linbits; if(mask < 0) *xrpnt = REAL_MUL(-ispow[x], v); else *xrpnt = REAL_MUL(ispow[x], v); mask <<= 1; } else if(x) { max[lwin] = cb; if(mask < 0) *xrpnt = REAL_MUL(-ispow[x], v); else *xrpnt = REAL_MUL(ispow[x], v); num--; mask <<= 1; } else *xrpnt = 0.0; xrpnt += step; if(y == 15 && h->linbits) { max[lwin] = cb; REFRESH_MASK; y += ((unsigned long) mask) >> (BITSHIFT+8-h->linbits); num -= h->linbits+1; mask <<= h->linbits; if(mask < 0) *xrpnt = REAL_MUL(-ispow[y], v); else *xrpnt = REAL_MUL(ispow[y], v); mask <<= 1; } else if(y) { max[lwin] = cb; if(mask < 0) *xrpnt = REAL_MUL(-ispow[y], v); else *xrpnt = REAL_MUL(ispow[y], v); num--; mask <<= 1; } else *xrpnt = 0.0; xrpnt += step; } } for(;l3 && (part2remain+num > 0);l3--) { struct newhuff *h = htc+gr_info->count1table_select; register short *val = h->table,a; REFRESH_MASK; while((a=*val++)<0) { if (mask < 0) val -= a; num--; mask <<= 1; } if(part2remain+num <= 0) { num -= part2remain+num; break; } for(i=0;i<4;i++) { if(!(i & 1)) { if(!mc) { mc = *m++; xrpnt = ((real *) xr) + (*m++); lwin = *m++; cb = *m++; if(lwin == 3) { v = gr_info->pow2gain[(*scf++) << shift]; step = 1; } else { v = gr_info->full_gain[lwin][(*scf++) << shift]; step = 3; } } mc--; } if( (a & (0x8>>i)) ) { max[lwin] = cb; if(part2remain+num <= 0) { break; } if(mask < 0) *xrpnt = -v; else *xrpnt = v; num--; mask <<= 1; } else *xrpnt = 0.0; xrpnt += step; } } if(lwin < 3) { /* short band? */ while(1) { for(;mc > 0;mc--) { *xrpnt = 0.0; xrpnt += 3; /* short band -> step=3 */ *xrpnt = 0.0; xrpnt += 3; } if(m >= me) break; mc = *m++; xrpnt = ((real *) xr) + *m++; if(*m++ == 0) break; /* optimize: field will be set to zero at the end of the function */ m++; /* cb */ } } gr_info->maxband[0] = max[0]+1; gr_info->maxband[1] = max[1]+1; gr_info->maxband[2] = max[2]+1; gr_info->maxbandl = max[3]+1; { int rmax = max[0] > max[1] ? max[0] : max[1]; rmax = (rmax > max[2] ? rmax : max[2]) + 1; gr_info->maxb = rmax ? shortLimit[sfreq][rmax] : longLimit[sfreq][max[3]+1]; } } else { /* * decoding with 'long' BandIndex table (block_type != 2) */ int *pretab = gr_info->preflag ? pretab1 : pretab2; int i,max = -1; int cb = 0; int *m = map[sfreq][2]; register real v = 0.0; int mc = 0; /* * long hash table values */ for(i=0;i<3;i++) { int lp = l[i]; struct newhuff *h = ht+gr_info->table_select[i]; for(;lp;lp--,mc--) { int x,y; if(!mc) { mc = *m++; cb = *m++; #ifdef CUT_HF if(cb == 21) { fprintf(stderr,"c"); v = 0.0; } else #endif v = gr_info->pow2gain[((*scf++) + (*pretab++)) << shift]; } { register short *val = h->table; REFRESH_MASK; while((y=*val++)<0) { if (mask < 0) val -= y; num--; mask <<= 1; } x = y >> 4; y &= 0xf; } if (x == 15 && h->linbits) { max = cb; REFRESH_MASK; x += ((unsigned long) mask) >> (BITSHIFT+8-h->linbits); num -= h->linbits+1; mask <<= h->linbits; if(mask < 0) *xrpnt++ = REAL_MUL(-ispow[x], v); else *xrpnt++ = REAL_MUL(ispow[x], v); mask <<= 1; } else if(x) { max = cb; if(mask < 0) *xrpnt++ = REAL_MUL(-ispow[x], v); else *xrpnt++ = REAL_MUL(ispow[x], v); num--; mask <<= 1; } else *xrpnt++ = 0.0; if (y == 15 && h->linbits) { max = cb; REFRESH_MASK; y += ((unsigned long) mask) >> (BITSHIFT+8-h->linbits); num -= h->linbits+1; mask <<= h->linbits; if(mask < 0) *xrpnt++ = REAL_MUL(-ispow[y], v); else *xrpnt++ = REAL_MUL(ispow[y], v); mask <<= 1; } else if(y) { max = cb; if(mask < 0) *xrpnt++ = REAL_MUL(-ispow[y], v); else *xrpnt++ = REAL_MUL(ispow[y], v); num--; mask <<= 1; } else *xrpnt++ = 0.0; } } /* * short (count1table) values */ for(;l3 && (part2remain+num > 0);l3--) { struct newhuff *h = htc+gr_info->count1table_select; register short *val = h->table,a; REFRESH_MASK; while((a=*val++)<0) { if (mask < 0) val -= a; num--; mask <<= 1; } if(part2remain+num <= 0) { num -= part2remain+num; break; } for(i=0;i<4;i++) { if(!(i & 1)) { if(!mc) { mc = *m++; cb = *m++; #ifdef CUT_HF if(cb == 21) { fprintf(stderr,"c"); v = 0.0; } else #endif v = gr_info->pow2gain[((*scf++) + (*pretab++)) << shift]; } mc--; } if ( (a & (0x8>>i)) ) { max = cb; if(part2remain+num <= 0) { break; } if(mask < 0) *xrpnt++ = -v; else *xrpnt++ = v; num--; mask <<= 1; } else *xrpnt++ = 0.0; } } gr_info->maxbandl = max+1; gr_info->maxb = longLimit[sfreq][gr_info->maxbandl]; } part2remain += num; // backbits(num); bitindex -= num; wordpointer += (bitindex>>3); bitindex &= 0x7; num = 0; while(xrpnt < &xr[SBLIMIT][0]) *xrpnt++ = 0.0; while( part2remain > 16 ) { getbits(16); /* Dismiss stuffing Bits */ part2remain -= 16; } if(part2remain > 0) getbits(part2remain); else if(part2remain < 0) { fprintf(stderr,"mpg123: Can't rewind stream by %d bits!\n",-part2remain); return 1; /* -> error */ } return 0; } /* * III_stereo: calculate real channel values for Joint-I-Stereo-mode */ static void III_i_stereo(real xr_buf[2][SBLIMIT][SSLIMIT],int *scalefac, struct gr_info_s *gr_info,int sfreq,int ms_stereo,int lsf) { real (*xr)[SBLIMIT*SSLIMIT] = (real (*)[SBLIMIT*SSLIMIT] ) xr_buf; const struct bandInfoStruct *bi = &bandInfo[sfreq]; const real *tab1,*tab2; int tab; static const real *tabs[3][2][2] = { { { tan1_1,tan2_1 } , { tan1_2,tan2_2 } }, { { pow1_1[0],pow2_1[0] } , { pow1_2[0],pow2_2[0] } } , { { pow1_1[1],pow2_1[1] } , { pow1_2[1],pow2_2[1] } } }; tab = lsf + (gr_info->scalefac_compress & lsf); tab1 = tabs[tab][ms_stereo][0]; tab2 = tabs[tab][ms_stereo][1]; #if 0 if(lsf) { int p = gr_info->scalefac_compress & 0x1; if(ms_stereo) { tab1 = pow1_2[p]; tab2 = pow2_2[p]; } else { tab1 = pow1_1[p]; tab2 = pow2_1[p]; } } else { if(ms_stereo) { tab1 = tan1_2; tab2 = tan2_2; } else { tab1 = tan1_1; tab2 = tan2_1; } } #endif // printf("III_i_st: tab1=%p tab2=%p tab=%d ms=%d \n", tab1, tab2, tab, ms_stereo); if (gr_info->block_type == 2) { int lwin,do_l = 0; if( gr_info->mixed_block_flag ) do_l = 1; for (lwin=0;lwin<3;lwin++) { /* process each window */ /* get first band with zero values */ int is_p,sb,idx,sfb = gr_info->maxband[lwin]; /* sfb is minimal 3 for mixed mode */ if(sfb > 3) do_l = 0; for(;sfb<12;sfb++) { is_p = scalefac[sfb*3+lwin-gr_info->mixed_block_flag]; /* scale: 0-15 */ if(is_p != 7) { real t1,t2; sb = bi->shortDiff[sfb]; idx = bi->shortIdx[sfb] + lwin; t1 = tab1[is_p]; t2 = tab2[is_p]; for (; sb > 0; sb--,idx+=3) { real v = xr[0][idx]; xr[0][idx] = REAL_MUL(v, t1); xr[1][idx] = REAL_MUL(v, t2); } } } #if 1 /* in the original: copy 10 to 11 , here: copy 11 to 12 maybe still wrong??? (copy 12 to 13?) */ is_p = scalefac[11*3+lwin-gr_info->mixed_block_flag]; /* scale: 0-15 */ sb = bi->shortDiff[12]; idx = bi->shortIdx[12] + lwin; #else is_p = scalefac[10*3+lwin-gr_info->mixed_block_flag]; /* scale: 0-15 */ sb = bi->shortDiff[11]; idx = bi->shortIdx[11] + lwin; #endif if(is_p != 7) { real t1,t2; t1 = tab1[is_p]; t2 = tab2[is_p]; for ( ; sb > 0; sb--,idx+=3 ) { real v = xr[0][idx]; xr[0][idx] = REAL_MUL(v, t1); xr[1][idx] = REAL_MUL(v, t2); } } } /* end for(lwin; .. ; . ) */ /* also check l-part, if ALL bands in the three windows are 'empty' * and mode = mixed_mode */ if (do_l) { int sfb = gr_info->maxbandl; int idx = bi->longIdx[sfb]; for ( ; sfb<8; sfb++ ) { int sb = bi->longDiff[sfb]; int is_p = scalefac[sfb]; /* scale: 0-15 */ if(is_p != 7) { real t1,t2; t1 = tab1[is_p]; t2 = tab2[is_p]; for ( ; sb > 0; sb--,idx++) { real v = xr[0][idx]; xr[0][idx] = REAL_MUL(v, t1); xr[1][idx] = REAL_MUL(v, t2); } } else idx += sb; } } } else { /* ((gr_info->block_type != 2)) */ int sfb = gr_info->maxbandl; int is_p,idx = bi->longIdx[sfb]; /* hmm ... maybe the maxbandl stuff for i-stereo is buggy? */ if(sfb <= 21) { for ( ; sfb<21; sfb++) { int sb = bi->longDiff[sfb]; is_p = scalefac[sfb]; /* scale: 0-15 */ if(is_p != 7) { real t1,t2; t1 = tab1[is_p]; t2 = tab2[is_p]; for ( ; sb > 0; sb--,idx++) { real v = xr[0][idx]; xr[0][idx] = REAL_MUL(v, t1); xr[1][idx] = REAL_MUL(v, t2); } } else idx += sb; } is_p = scalefac[20]; if(is_p != 7) { /* copy l-band 20 to l-band 21 */ int sb; real t1 = tab1[is_p],t2 = tab2[is_p]; for ( sb = bi->longDiff[21]; sb > 0; sb--,idx++ ) { real v = xr[0][idx]; xr[0][idx] = REAL_MUL(v, t1); xr[1][idx] = REAL_MUL(v, t2); } } } /* end: if(sfb <= 21) */ } /* ... */ } static void III_antialias(real xr[SBLIMIT][SSLIMIT],struct gr_info_s *gr_info) { int sblim; if(gr_info->block_type == 2) { if(!gr_info->mixed_block_flag) return; sblim = 1; } else { sblim = gr_info->maxb-1; } /* 31 alias-reduction operations between each pair of sub-bands */ /* with 8 butterflies between each pair */ { int sb; real *xr1=(real *) xr[1]; for(sb=sblim;sb;sb--,xr1+=10) { int ss; real *cs=aa_cs,*ca=aa_ca; real *xr2 = xr1; for(ss=7;ss>=0;ss--) { /* upper and lower butterfly inputs */ register real bu = *--xr2,bd = *xr1; *xr2 = (bu * (*cs) ) - (bd * (*ca) ); *xr1++ = (bd * (*cs++) ) + (bu * (*ca++) ); } } } } #include "dct64.c" #include "dct36.c" #include "dct12.c" #include "decod386.c" /* * III_hybrid */ static dct36_func_t dct36_func; static void III_hybrid(real fsIn[SBLIMIT][SSLIMIT],real tsOut[SSLIMIT][SBLIMIT], int ch,struct gr_info_s *gr_info) { real *tspnt = (real *) tsOut; static real block[2][2][SBLIMIT*SSLIMIT] = { { { 0, } } }; static int blc[2]={0,0}; real *rawout1,*rawout2; int bt; int sb = 0; { int b = blc[ch]; rawout1=block[b][ch]; b=-b+1; rawout2=block[b][ch]; blc[ch] = b; } if(gr_info->mixed_block_flag) { sb = 2; (*dct36_func)(fsIn[0],rawout1,rawout2,win[0],tspnt); (*dct36_func)(fsIn[1],rawout1+18,rawout2+18,win1[0],tspnt+1); rawout1 += 36; rawout2 += 36; tspnt += 2; } bt = gr_info->block_type; if(bt == 2) { for (; sb<gr_info->maxb; sb+=2,tspnt+=2,rawout1+=36,rawout2+=36) { dct12(fsIn[sb],rawout1,rawout2,win[2],tspnt); dct12(fsIn[sb+1],rawout1+18,rawout2+18,win1[2],tspnt+1); } } else { for (; sb<gr_info->maxb; sb+=2,tspnt+=2,rawout1+=36,rawout2+=36) { (*dct36_func)(fsIn[sb],rawout1,rawout2,win[bt],tspnt); (*dct36_func)(fsIn[sb+1],rawout1+18,rawout2+18,win1[bt],tspnt+1); } } for(;sb<SBLIMIT;sb++,tspnt++) { int i; for(i=0;i<SSLIMIT;i++) { tspnt[i*SBLIMIT] = *rawout1++; *rawout2++ = 0.0; } } } /* * main layer3 handler */ /* int do_layer3(struct frame *fr,int outmode,struct audio_info_struct *ai) */ static int do_layer3(struct frame *fr,int single){ int gr, ch, ss,clip=0; int scalefacs[2][39]; /* max 39 for short[13][3] mode, mixed: 38, long: 22 */ struct III_sideinfo sideinfo; int stereo = fr->stereo; int ms_stereo,i_stereo; int sfreq = fr->sampling_frequency; int stereo1,granules; // if (fr->error_protection) getbits(16); /* skip crc */ if(stereo == 1) { /* stream is mono */ stereo1 = 1; single = 0; } else if(single >= 0) /* stream is stereo, but force to mono */ stereo1 = 1; else stereo1 = 2; if(fr->mode == MPG_MD_JOINT_STEREO) { ms_stereo = (fr->mode_ext & 0x2)>>1; i_stereo = fr->mode_ext & 0x1; } else ms_stereo = i_stereo = 0; if(!III_get_side_info(&sideinfo,stereo,ms_stereo,sfreq,single,fr->lsf)) return -1; set_pointer(sideinfo.main_data_begin); granules = (fr->lsf) ? 1 : 2; for (gr=0;gr<granules;gr++){ DECLARE_ALIGNED(16, real, hybridIn[2][SBLIMIT][SSLIMIT]); DECLARE_ALIGNED(16, real, hybridOut[2][SSLIMIT][SBLIMIT]); { struct gr_info_s *gr_info = &(sideinfo.ch[0].gr[gr]); int part2bits; if(fr->lsf) part2bits = III_get_scale_factors_2(scalefacs[0],gr_info,0); else part2bits = III_get_scale_factors_1(scalefacs[0],gr_info); if(III_dequantize_sample(hybridIn[0], scalefacs[0],gr_info,sfreq,part2bits)) return clip; } if(stereo == 2) { struct gr_info_s *gr_info = &(sideinfo.ch[1].gr[gr]); int part2bits; if(fr->lsf) part2bits = III_get_scale_factors_2(scalefacs[1],gr_info,i_stereo); else part2bits = III_get_scale_factors_1(scalefacs[1],gr_info); if(III_dequantize_sample(hybridIn[1],scalefacs[1],gr_info,sfreq,part2bits)) return clip; if(ms_stereo) { int i; int maxb = sideinfo.ch[0].gr[gr].maxb; if(sideinfo.ch[1].gr[gr].maxb > maxb) maxb = sideinfo.ch[1].gr[gr].maxb; for(i=0;i<SSLIMIT*maxb;i++) { real tmp0 = ((real *)hybridIn[0])[i]; real tmp1 = ((real *)hybridIn[1])[i]; ((real *)hybridIn[0])[i] = tmp0 + tmp1; ((real *)hybridIn[1])[i] = tmp0 - tmp1; } } if(i_stereo) III_i_stereo(hybridIn,scalefacs[1],gr_info,sfreq,ms_stereo,fr->lsf); if(ms_stereo || i_stereo || (single == 3) ) { if(gr_info->maxb > sideinfo.ch[0].gr[gr].maxb) sideinfo.ch[0].gr[gr].maxb = gr_info->maxb; else gr_info->maxb = sideinfo.ch[0].gr[gr].maxb; } switch(single) { case 3: { register int i; register real *in0 = (real *) hybridIn[0],*in1 = (real *) hybridIn[1]; for(i=0;i<SSLIMIT*gr_info->maxb;i++,in0++) *in0 = (*in0 + *in1++); /* *0.5 done by pow-scale */ break; } case 1: { register int i; register real *in0 = (real *) hybridIn[0],*in1 = (real *) hybridIn[1]; for(i=0;i<SSLIMIT*gr_info->maxb;i++) *in0++ = *in1++; break; } } } // if(stereo == 2) for(ch=0;ch<stereo1;ch++) { struct gr_info_s *gr_info = &(sideinfo.ch[ch].gr[gr]); III_antialias(hybridIn[ch],gr_info); III_hybrid(hybridIn[ch], hybridOut[ch], ch,gr_info); } for(ss=0;ss<SSLIMIT;ss++) { if(single >= 0) { clip += (fr->synth_mono)(hybridOut[0][ss],pcm_sample,&pcm_point); } else { int p1 = pcm_point; clip += (fr->synth)(hybridOut[0][ss],0,pcm_sample,&p1); clip += (fr->synth)(hybridOut[1][ss],1,pcm_sample,&pcm_point); } } } return clip; }
gpl-2.0
artifexor/qemu
hw/sun4u.c
10
30664
/* * QEMU Sun4u/Sun4v System Emulator * * Copyright (c) 2005 Fabrice Bellard * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "hw.h" #include "pci/pci.h" #include "apb_pci.h" #include "pc.h" #include "serial.h" #include "nvram.h" #include "fdc.h" #include "net/net.h" #include "qemu/timer.h" #include "sysemu/sysemu.h" #include "boards.h" #include "firmware_abi.h" #include "fw_cfg.h" #include "sysbus.h" #include "ide.h" #include "loader.h" #include "elf.h" #include "sysemu/blockdev.h" #include "exec/address-spaces.h" //#define DEBUG_IRQ //#define DEBUG_EBUS //#define DEBUG_TIMER #ifdef DEBUG_IRQ #define CPUIRQ_DPRINTF(fmt, ...) \ do { printf("CPUIRQ: " fmt , ## __VA_ARGS__); } while (0) #else #define CPUIRQ_DPRINTF(fmt, ...) #endif #ifdef DEBUG_EBUS #define EBUS_DPRINTF(fmt, ...) \ do { printf("EBUS: " fmt , ## __VA_ARGS__); } while (0) #else #define EBUS_DPRINTF(fmt, ...) #endif #ifdef DEBUG_TIMER #define TIMER_DPRINTF(fmt, ...) \ do { printf("TIMER: " fmt , ## __VA_ARGS__); } while (0) #else #define TIMER_DPRINTF(fmt, ...) #endif #define KERNEL_LOAD_ADDR 0x00404000 #define CMDLINE_ADDR 0x003ff000 #define PROM_SIZE_MAX (4 * 1024 * 1024) #define PROM_VADDR 0x000ffd00000ULL #define APB_SPECIAL_BASE 0x1fe00000000ULL #define APB_MEM_BASE 0x1ff00000000ULL #define APB_PCI_IO_BASE (APB_SPECIAL_BASE + 0x02000000ULL) #define PROM_FILENAME "openbios-sparc64" #define NVRAM_SIZE 0x2000 #define MAX_IDE_BUS 2 #define BIOS_CFG_IOPORT 0x510 #define FW_CFG_SPARC64_WIDTH (FW_CFG_ARCH_LOCAL + 0x00) #define FW_CFG_SPARC64_HEIGHT (FW_CFG_ARCH_LOCAL + 0x01) #define FW_CFG_SPARC64_DEPTH (FW_CFG_ARCH_LOCAL + 0x02) #define IVEC_MAX 0x30 #define TICK_MAX 0x7fffffffffffffffULL struct hwdef { const char * const default_cpu_model; uint16_t machine_id; uint64_t prom_addr; uint64_t console_serial_base; }; typedef struct EbusState { PCIDevice pci_dev; MemoryRegion bar0; MemoryRegion bar1; } EbusState; int DMA_get_channel_mode (int nchan) { return 0; } int DMA_read_memory (int nchan, void *buf, int pos, int size) { return 0; } int DMA_write_memory (int nchan, void *buf, int pos, int size) { return 0; } void DMA_hold_DREQ (int nchan) {} void DMA_release_DREQ (int nchan) {} void DMA_schedule(int nchan) {} void DMA_init(int high_page_enable, qemu_irq *cpu_request_exit) { } void DMA_register_channel (int nchan, DMA_transfer_handler transfer_handler, void *opaque) { } static int fw_cfg_boot_set(void *opaque, const char *boot_device) { fw_cfg_add_i16(opaque, FW_CFG_BOOT_DEVICE, boot_device[0]); return 0; } static int sun4u_NVRAM_set_params(M48t59State *nvram, uint16_t NVRAM_size, const char *arch, ram_addr_t RAM_size, const char *boot_devices, uint32_t kernel_image, uint32_t kernel_size, const char *cmdline, uint32_t initrd_image, uint32_t initrd_size, uint32_t NVRAM_image, int width, int height, int depth, const uint8_t *macaddr) { unsigned int i; uint32_t start, end; uint8_t image[0x1ff0]; struct OpenBIOS_nvpart_v1 *part_header; memset(image, '\0', sizeof(image)); start = 0; // OpenBIOS nvram variables // Variable partition part_header = (struct OpenBIOS_nvpart_v1 *)&image[start]; part_header->signature = OPENBIOS_PART_SYSTEM; pstrcpy(part_header->name, sizeof(part_header->name), "system"); end = start + sizeof(struct OpenBIOS_nvpart_v1); for (i = 0; i < nb_prom_envs; i++) end = OpenBIOS_set_var(image, end, prom_envs[i]); // End marker image[end++] = '\0'; end = start + ((end - start + 15) & ~15); OpenBIOS_finish_partition(part_header, end - start); // free partition start = end; part_header = (struct OpenBIOS_nvpart_v1 *)&image[start]; part_header->signature = OPENBIOS_PART_FREE; pstrcpy(part_header->name, sizeof(part_header->name), "free"); end = 0x1fd0; OpenBIOS_finish_partition(part_header, end - start); Sun_init_header((struct Sun_nvram *)&image[0x1fd8], macaddr, 0x80); for (i = 0; i < sizeof(image); i++) m48t59_write(nvram, i, image[i]); return 0; } static uint64_t sun4u_load_kernel(const char *kernel_filename, const char *initrd_filename, ram_addr_t RAM_size, uint64_t *initrd_size, uint64_t *initrd_addr, uint64_t *kernel_addr, uint64_t *kernel_entry) { int linux_boot; unsigned int i; long kernel_size; uint8_t *ptr; uint64_t kernel_top; linux_boot = (kernel_filename != NULL); kernel_size = 0; if (linux_boot) { int bswap_needed; #ifdef BSWAP_NEEDED bswap_needed = 1; #else bswap_needed = 0; #endif kernel_size = load_elf(kernel_filename, NULL, NULL, kernel_entry, kernel_addr, &kernel_top, 1, ELF_MACHINE, 0); if (kernel_size < 0) { *kernel_addr = KERNEL_LOAD_ADDR; *kernel_entry = KERNEL_LOAD_ADDR; kernel_size = load_aout(kernel_filename, KERNEL_LOAD_ADDR, RAM_size - KERNEL_LOAD_ADDR, bswap_needed, TARGET_PAGE_SIZE); } if (kernel_size < 0) { kernel_size = load_image_targphys(kernel_filename, KERNEL_LOAD_ADDR, RAM_size - KERNEL_LOAD_ADDR); } if (kernel_size < 0) { fprintf(stderr, "qemu: could not load kernel '%s'\n", kernel_filename); exit(1); } /* load initrd above kernel */ *initrd_size = 0; if (initrd_filename) { *initrd_addr = TARGET_PAGE_ALIGN(kernel_top); *initrd_size = load_image_targphys(initrd_filename, *initrd_addr, RAM_size - *initrd_addr); if ((int)*initrd_size < 0) { fprintf(stderr, "qemu: could not load initial ram disk '%s'\n", initrd_filename); exit(1); } } if (*initrd_size > 0) { for (i = 0; i < 64 * TARGET_PAGE_SIZE; i += TARGET_PAGE_SIZE) { ptr = rom_ptr(*kernel_addr + i); if (ldl_p(ptr + 8) == 0x48647253) { /* HdrS */ stl_p(ptr + 24, *initrd_addr + *kernel_addr); stl_p(ptr + 28, *initrd_size); break; } } } } return kernel_size; } void cpu_check_irqs(CPUSPARCState *env) { uint32_t pil = env->pil_in | (env->softint & ~(SOFTINT_TIMER | SOFTINT_STIMER)); /* TT_IVEC has a higher priority (16) than TT_EXTINT (31..17) */ if (env->ivec_status & 0x20) { return; } /* check if TM or SM in SOFTINT are set setting these also causes interrupt 14 */ if (env->softint & (SOFTINT_TIMER | SOFTINT_STIMER)) { pil |= 1 << 14; } /* The bit corresponding to psrpil is (1<< psrpil), the next bit is (2 << psrpil). */ if (pil < (2 << env->psrpil)){ if (env->interrupt_request & CPU_INTERRUPT_HARD) { CPUIRQ_DPRINTF("Reset CPU IRQ (current interrupt %x)\n", env->interrupt_index); env->interrupt_index = 0; cpu_reset_interrupt(env, CPU_INTERRUPT_HARD); } return; } if (cpu_interrupts_enabled(env)) { unsigned int i; for (i = 15; i > env->psrpil; i--) { if (pil & (1 << i)) { int old_interrupt = env->interrupt_index; int new_interrupt = TT_EXTINT | i; if (unlikely(env->tl > 0 && cpu_tsptr(env)->tt > new_interrupt && ((cpu_tsptr(env)->tt & 0x1f0) == TT_EXTINT))) { CPUIRQ_DPRINTF("Not setting CPU IRQ: TL=%d " "current %x >= pending %x\n", env->tl, cpu_tsptr(env)->tt, new_interrupt); } else if (old_interrupt != new_interrupt) { env->interrupt_index = new_interrupt; CPUIRQ_DPRINTF("Set CPU IRQ %d old=%x new=%x\n", i, old_interrupt, new_interrupt); cpu_interrupt(env, CPU_INTERRUPT_HARD); } break; } } } else if (env->interrupt_request & CPU_INTERRUPT_HARD) { CPUIRQ_DPRINTF("Interrupts disabled, pil=%08x pil_in=%08x softint=%08x " "current interrupt %x\n", pil, env->pil_in, env->softint, env->interrupt_index); env->interrupt_index = 0; cpu_reset_interrupt(env, CPU_INTERRUPT_HARD); } } static void cpu_kick_irq(SPARCCPU *cpu) { CPUSPARCState *env = &cpu->env; env->halted = 0; cpu_check_irqs(env); qemu_cpu_kick(CPU(cpu)); } static void cpu_set_ivec_irq(void *opaque, int irq, int level) { SPARCCPU *cpu = opaque; CPUSPARCState *env = &cpu->env; if (level) { if (!(env->ivec_status & 0x20)) { CPUIRQ_DPRINTF("Raise IVEC IRQ %d\n", irq); env->halted = 0; env->interrupt_index = TT_IVEC; env->ivec_status |= 0x20; env->ivec_data[0] = (0x1f << 6) | irq; env->ivec_data[1] = 0; env->ivec_data[2] = 0; cpu_interrupt(env, CPU_INTERRUPT_HARD); } } else { if (env->ivec_status & 0x20) { CPUIRQ_DPRINTF("Lower IVEC IRQ %d\n", irq); env->ivec_status &= ~0x20; cpu_reset_interrupt(env, CPU_INTERRUPT_HARD); } } } typedef struct ResetData { SPARCCPU *cpu; uint64_t prom_addr; } ResetData; void cpu_put_timer(QEMUFile *f, CPUTimer *s) { qemu_put_be32s(f, &s->frequency); qemu_put_be32s(f, &s->disabled); qemu_put_be64s(f, &s->disabled_mask); qemu_put_sbe64s(f, &s->clock_offset); qemu_put_timer(f, s->qtimer); } void cpu_get_timer(QEMUFile *f, CPUTimer *s) { qemu_get_be32s(f, &s->frequency); qemu_get_be32s(f, &s->disabled); qemu_get_be64s(f, &s->disabled_mask); qemu_get_sbe64s(f, &s->clock_offset); qemu_get_timer(f, s->qtimer); } static CPUTimer *cpu_timer_create(const char *name, SPARCCPU *cpu, QEMUBHFunc *cb, uint32_t frequency, uint64_t disabled_mask) { CPUTimer *timer = g_malloc0(sizeof (CPUTimer)); timer->name = name; timer->frequency = frequency; timer->disabled_mask = disabled_mask; timer->disabled = 1; timer->clock_offset = qemu_get_clock_ns(vm_clock); timer->qtimer = qemu_new_timer_ns(vm_clock, cb, cpu); return timer; } static void cpu_timer_reset(CPUTimer *timer) { timer->disabled = 1; timer->clock_offset = qemu_get_clock_ns(vm_clock); qemu_del_timer(timer->qtimer); } static void main_cpu_reset(void *opaque) { ResetData *s = (ResetData *)opaque; CPUSPARCState *env = &s->cpu->env; static unsigned int nr_resets; cpu_reset(CPU(s->cpu)); cpu_timer_reset(env->tick); cpu_timer_reset(env->stick); cpu_timer_reset(env->hstick); env->gregs[1] = 0; // Memory start env->gregs[2] = ram_size; // Memory size env->gregs[3] = 0; // Machine description XXX if (nr_resets++ == 0) { /* Power on reset */ env->pc = s->prom_addr + 0x20ULL; } else { env->pc = s->prom_addr + 0x40ULL; } env->npc = env->pc + 4; } static void tick_irq(void *opaque) { SPARCCPU *cpu = opaque; CPUSPARCState *env = &cpu->env; CPUTimer* timer = env->tick; if (timer->disabled) { CPUIRQ_DPRINTF("tick_irq: softint disabled\n"); return; } else { CPUIRQ_DPRINTF("tick: fire\n"); } env->softint |= SOFTINT_TIMER; cpu_kick_irq(cpu); } static void stick_irq(void *opaque) { SPARCCPU *cpu = opaque; CPUSPARCState *env = &cpu->env; CPUTimer* timer = env->stick; if (timer->disabled) { CPUIRQ_DPRINTF("stick_irq: softint disabled\n"); return; } else { CPUIRQ_DPRINTF("stick: fire\n"); } env->softint |= SOFTINT_STIMER; cpu_kick_irq(cpu); } static void hstick_irq(void *opaque) { SPARCCPU *cpu = opaque; CPUSPARCState *env = &cpu->env; CPUTimer* timer = env->hstick; if (timer->disabled) { CPUIRQ_DPRINTF("hstick_irq: softint disabled\n"); return; } else { CPUIRQ_DPRINTF("hstick: fire\n"); } env->softint |= SOFTINT_STIMER; cpu_kick_irq(cpu); } static int64_t cpu_to_timer_ticks(int64_t cpu_ticks, uint32_t frequency) { return muldiv64(cpu_ticks, get_ticks_per_sec(), frequency); } static uint64_t timer_to_cpu_ticks(int64_t timer_ticks, uint32_t frequency) { return muldiv64(timer_ticks, frequency, get_ticks_per_sec()); } void cpu_tick_set_count(CPUTimer *timer, uint64_t count) { uint64_t real_count = count & ~timer->disabled_mask; uint64_t disabled_bit = count & timer->disabled_mask; int64_t vm_clock_offset = qemu_get_clock_ns(vm_clock) - cpu_to_timer_ticks(real_count, timer->frequency); TIMER_DPRINTF("%s set_count count=0x%016lx (%s) p=%p\n", timer->name, real_count, timer->disabled?"disabled":"enabled", timer); timer->disabled = disabled_bit ? 1 : 0; timer->clock_offset = vm_clock_offset; } uint64_t cpu_tick_get_count(CPUTimer *timer) { uint64_t real_count = timer_to_cpu_ticks( qemu_get_clock_ns(vm_clock) - timer->clock_offset, timer->frequency); TIMER_DPRINTF("%s get_count count=0x%016lx (%s) p=%p\n", timer->name, real_count, timer->disabled?"disabled":"enabled", timer); if (timer->disabled) real_count |= timer->disabled_mask; return real_count; } void cpu_tick_set_limit(CPUTimer *timer, uint64_t limit) { int64_t now = qemu_get_clock_ns(vm_clock); uint64_t real_limit = limit & ~timer->disabled_mask; timer->disabled = (limit & timer->disabled_mask) ? 1 : 0; int64_t expires = cpu_to_timer_ticks(real_limit, timer->frequency) + timer->clock_offset; if (expires < now) { expires = now + 1; } TIMER_DPRINTF("%s set_limit limit=0x%016lx (%s) p=%p " "called with limit=0x%016lx at 0x%016lx (delta=0x%016lx)\n", timer->name, real_limit, timer->disabled?"disabled":"enabled", timer, limit, timer_to_cpu_ticks(now - timer->clock_offset, timer->frequency), timer_to_cpu_ticks(expires - now, timer->frequency)); if (!real_limit) { TIMER_DPRINTF("%s set_limit limit=ZERO - not starting timer\n", timer->name); qemu_del_timer(timer->qtimer); } else if (timer->disabled) { qemu_del_timer(timer->qtimer); } else { qemu_mod_timer(timer->qtimer, expires); } } static void isa_irq_handler(void *opaque, int n, int level) { static const int isa_irq_to_ivec[16] = { [1] = 0x29, /* keyboard */ [4] = 0x2b, /* serial */ [6] = 0x27, /* floppy */ [7] = 0x22, /* parallel */ [12] = 0x2a, /* mouse */ }; qemu_irq *irqs = opaque; int ivec; assert(n < 16); ivec = isa_irq_to_ivec[n]; EBUS_DPRINTF("Set ISA IRQ %d level %d -> ivec 0x%x\n", n, level, ivec); if (ivec) { qemu_set_irq(irqs[ivec], level); } } /* EBUS (Eight bit bus) bridge */ static ISABus * pci_ebus_init(PCIBus *bus, int devfn, qemu_irq *irqs) { qemu_irq *isa_irq; PCIDevice *pci_dev; ISABus *isa_bus; pci_dev = pci_create_simple(bus, devfn, "ebus"); isa_bus = DO_UPCAST(ISABus, qbus, qdev_get_child_bus(&pci_dev->qdev, "isa.0")); isa_irq = qemu_allocate_irqs(isa_irq_handler, irqs, 16); isa_bus_irqs(isa_bus, isa_irq); return isa_bus; } static int pci_ebus_init1(PCIDevice *pci_dev) { EbusState *s = DO_UPCAST(EbusState, pci_dev, pci_dev); isa_bus_new(&pci_dev->qdev, pci_address_space_io(pci_dev)); pci_dev->config[0x04] = 0x06; // command = bus master, pci mem pci_dev->config[0x05] = 0x00; pci_dev->config[0x06] = 0xa0; // status = fast back-to-back, 66MHz, no error pci_dev->config[0x07] = 0x03; // status = medium devsel pci_dev->config[0x09] = 0x00; // programming i/f pci_dev->config[0x0D] = 0x0a; // latency_timer isa_mmio_setup(&s->bar0, 0x1000000); pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->bar0); isa_mmio_setup(&s->bar1, 0x800000); pci_register_bar(pci_dev, 1, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->bar1); return 0; } static void ebus_class_init(ObjectClass *klass, void *data) { PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); k->init = pci_ebus_init1; k->vendor_id = PCI_VENDOR_ID_SUN; k->device_id = PCI_DEVICE_ID_SUN_EBUS; k->revision = 0x01; k->class_id = PCI_CLASS_BRIDGE_OTHER; } static const TypeInfo ebus_info = { .name = "ebus", .parent = TYPE_PCI_DEVICE, .instance_size = sizeof(EbusState), .class_init = ebus_class_init, }; typedef struct PROMState { SysBusDevice busdev; MemoryRegion prom; } PROMState; static uint64_t translate_prom_address(void *opaque, uint64_t addr) { hwaddr *base_addr = (hwaddr *)opaque; return addr + *base_addr - PROM_VADDR; } /* Boot PROM (OpenBIOS) */ static void prom_init(hwaddr addr, const char *bios_name) { DeviceState *dev; SysBusDevice *s; char *filename; int ret; dev = qdev_create(NULL, "openprom"); qdev_init_nofail(dev); s = SYS_BUS_DEVICE(dev); sysbus_mmio_map(s, 0, addr); /* load boot prom */ if (bios_name == NULL) { bios_name = PROM_FILENAME; } filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); if (filename) { ret = load_elf(filename, translate_prom_address, &addr, NULL, NULL, NULL, 1, ELF_MACHINE, 0); if (ret < 0 || ret > PROM_SIZE_MAX) { ret = load_image_targphys(filename, addr, PROM_SIZE_MAX); } g_free(filename); } else { ret = -1; } if (ret < 0 || ret > PROM_SIZE_MAX) { fprintf(stderr, "qemu: could not load prom '%s'\n", bios_name); exit(1); } } static int prom_init1(SysBusDevice *dev) { PROMState *s = FROM_SYSBUS(PROMState, dev); memory_region_init_ram(&s->prom, "sun4u.prom", PROM_SIZE_MAX); vmstate_register_ram_global(&s->prom); memory_region_set_readonly(&s->prom, true); sysbus_init_mmio(dev, &s->prom); return 0; } static Property prom_properties[] = { {/* end of property list */}, }; static void prom_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass); k->init = prom_init1; dc->props = prom_properties; } static const TypeInfo prom_info = { .name = "openprom", .parent = TYPE_SYS_BUS_DEVICE, .instance_size = sizeof(PROMState), .class_init = prom_class_init, }; typedef struct RamDevice { SysBusDevice busdev; MemoryRegion ram; uint64_t size; } RamDevice; /* System RAM */ static int ram_init1(SysBusDevice *dev) { RamDevice *d = FROM_SYSBUS(RamDevice, dev); memory_region_init_ram(&d->ram, "sun4u.ram", d->size); vmstate_register_ram_global(&d->ram); sysbus_init_mmio(dev, &d->ram); return 0; } static void ram_init(hwaddr addr, ram_addr_t RAM_size) { DeviceState *dev; SysBusDevice *s; RamDevice *d; /* allocate RAM */ dev = qdev_create(NULL, "memory"); s = SYS_BUS_DEVICE(dev); d = FROM_SYSBUS(RamDevice, s); d->size = RAM_size; qdev_init_nofail(dev); sysbus_mmio_map(s, 0, addr); } static Property ram_properties[] = { DEFINE_PROP_UINT64("size", RamDevice, size, 0), DEFINE_PROP_END_OF_LIST(), }; static void ram_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass); k->init = ram_init1; dc->props = ram_properties; } static const TypeInfo ram_info = { .name = "memory", .parent = TYPE_SYS_BUS_DEVICE, .instance_size = sizeof(RamDevice), .class_init = ram_class_init, }; static SPARCCPU *cpu_devinit(const char *cpu_model, const struct hwdef *hwdef) { SPARCCPU *cpu; CPUSPARCState *env; ResetData *reset_info; uint32_t tick_frequency = 100*1000000; uint32_t stick_frequency = 100*1000000; uint32_t hstick_frequency = 100*1000000; if (cpu_model == NULL) { cpu_model = hwdef->default_cpu_model; } cpu = cpu_sparc_init(cpu_model); if (cpu == NULL) { fprintf(stderr, "Unable to find Sparc CPU definition\n"); exit(1); } env = &cpu->env; env->tick = cpu_timer_create("tick", cpu, tick_irq, tick_frequency, TICK_NPT_MASK); env->stick = cpu_timer_create("stick", cpu, stick_irq, stick_frequency, TICK_INT_DIS); env->hstick = cpu_timer_create("hstick", cpu, hstick_irq, hstick_frequency, TICK_INT_DIS); reset_info = g_malloc0(sizeof(ResetData)); reset_info->cpu = cpu; reset_info->prom_addr = hwdef->prom_addr; qemu_register_reset(main_cpu_reset, reset_info); return cpu; } static void sun4uv_init(MemoryRegion *address_space_mem, ram_addr_t RAM_size, const char *boot_devices, const char *kernel_filename, const char *kernel_cmdline, const char *initrd_filename, const char *cpu_model, const struct hwdef *hwdef) { SPARCCPU *cpu; M48t59State *nvram; unsigned int i; uint64_t initrd_addr, initrd_size, kernel_addr, kernel_size, kernel_entry; PCIBus *pci_bus, *pci_bus2, *pci_bus3; ISABus *isa_bus; qemu_irq *ivec_irqs, *pbm_irqs; DriveInfo *hd[MAX_IDE_BUS * MAX_IDE_DEVS]; DriveInfo *fd[MAX_FD]; void *fw_cfg; /* init CPUs */ cpu = cpu_devinit(cpu_model, hwdef); /* set up devices */ ram_init(0, RAM_size); prom_init(hwdef->prom_addr, bios_name); ivec_irqs = qemu_allocate_irqs(cpu_set_ivec_irq, cpu, IVEC_MAX); pci_bus = pci_apb_init(APB_SPECIAL_BASE, APB_MEM_BASE, ivec_irqs, &pci_bus2, &pci_bus3, &pbm_irqs); pci_vga_init(pci_bus); // XXX Should be pci_bus3 isa_bus = pci_ebus_init(pci_bus, -1, pbm_irqs); i = 0; if (hwdef->console_serial_base) { serial_mm_init(address_space_mem, hwdef->console_serial_base, 0, NULL, 115200, serial_hds[i], DEVICE_BIG_ENDIAN); i++; } for(; i < MAX_SERIAL_PORTS; i++) { if (serial_hds[i]) { serial_isa_init(isa_bus, i, serial_hds[i]); } } for(i = 0; i < MAX_PARALLEL_PORTS; i++) { if (parallel_hds[i]) { parallel_init(isa_bus, i, parallel_hds[i]); } } for(i = 0; i < nb_nics; i++) pci_nic_init_nofail(&nd_table[i], "ne2k_pci", NULL); ide_drive_get(hd, MAX_IDE_BUS); pci_cmd646_ide_init(pci_bus, hd, 1); isa_create_simple(isa_bus, "i8042"); for(i = 0; i < MAX_FD; i++) { fd[i] = drive_get(IF_FLOPPY, 0, i); } fdctrl_init_isa(isa_bus, fd); nvram = m48t59_init_isa(isa_bus, 0x0074, NVRAM_SIZE, 59); initrd_size = 0; initrd_addr = 0; kernel_size = sun4u_load_kernel(kernel_filename, initrd_filename, ram_size, &initrd_size, &initrd_addr, &kernel_addr, &kernel_entry); sun4u_NVRAM_set_params(nvram, NVRAM_SIZE, "Sun4u", RAM_size, boot_devices, kernel_addr, kernel_size, kernel_cmdline, initrd_addr, initrd_size, /* XXX: need an option to load a NVRAM image */ 0, graphic_width, graphic_height, graphic_depth, (uint8_t *)&nd_table[0].macaddr); fw_cfg = fw_cfg_init(BIOS_CFG_IOPORT, BIOS_CFG_IOPORT + 1, 0, 0); fw_cfg_add_i16(fw_cfg, FW_CFG_MAX_CPUS, (uint16_t)max_cpus); fw_cfg_add_i32(fw_cfg, FW_CFG_ID, 1); fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, (uint64_t)ram_size); fw_cfg_add_i16(fw_cfg, FW_CFG_MACHINE_ID, hwdef->machine_id); fw_cfg_add_i64(fw_cfg, FW_CFG_KERNEL_ADDR, kernel_entry); fw_cfg_add_i64(fw_cfg, FW_CFG_KERNEL_SIZE, kernel_size); if (kernel_cmdline) { fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_SIZE, strlen(kernel_cmdline) + 1); fw_cfg_add_string(fw_cfg, FW_CFG_CMDLINE_DATA, kernel_cmdline); } else { fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_SIZE, 0); } fw_cfg_add_i64(fw_cfg, FW_CFG_INITRD_ADDR, initrd_addr); fw_cfg_add_i64(fw_cfg, FW_CFG_INITRD_SIZE, initrd_size); fw_cfg_add_i16(fw_cfg, FW_CFG_BOOT_DEVICE, boot_devices[0]); fw_cfg_add_i16(fw_cfg, FW_CFG_SPARC64_WIDTH, graphic_width); fw_cfg_add_i16(fw_cfg, FW_CFG_SPARC64_HEIGHT, graphic_height); fw_cfg_add_i16(fw_cfg, FW_CFG_SPARC64_DEPTH, graphic_depth); qemu_register_boot_set(fw_cfg_boot_set, fw_cfg); } enum { sun4u_id = 0, sun4v_id = 64, niagara_id, }; static const struct hwdef hwdefs[] = { /* Sun4u generic PC-like machine */ { .default_cpu_model = "TI UltraSparc IIi", .machine_id = sun4u_id, .prom_addr = 0x1fff0000000ULL, .console_serial_base = 0, }, /* Sun4v generic PC-like machine */ { .default_cpu_model = "Sun UltraSparc T1", .machine_id = sun4v_id, .prom_addr = 0x1fff0000000ULL, .console_serial_base = 0, }, /* Sun4v generic Niagara machine */ { .default_cpu_model = "Sun UltraSparc T1", .machine_id = niagara_id, .prom_addr = 0xfff0000000ULL, .console_serial_base = 0xfff0c2c000ULL, }, }; /* Sun4u hardware initialisation */ static void sun4u_init(QEMUMachineInitArgs *args) { ram_addr_t RAM_size = args->ram_size; const char *cpu_model = args->cpu_model; const char *kernel_filename = args->kernel_filename; const char *kernel_cmdline = args->kernel_cmdline; const char *initrd_filename = args->initrd_filename; const char *boot_devices = args->boot_device; sun4uv_init(get_system_memory(), RAM_size, boot_devices, kernel_filename, kernel_cmdline, initrd_filename, cpu_model, &hwdefs[0]); } /* Sun4v hardware initialisation */ static void sun4v_init(QEMUMachineInitArgs *args) { ram_addr_t RAM_size = args->ram_size; const char *cpu_model = args->cpu_model; const char *kernel_filename = args->kernel_filename; const char *kernel_cmdline = args->kernel_cmdline; const char *initrd_filename = args->initrd_filename; const char *boot_devices = args->boot_device; sun4uv_init(get_system_memory(), RAM_size, boot_devices, kernel_filename, kernel_cmdline, initrd_filename, cpu_model, &hwdefs[1]); } /* Niagara hardware initialisation */ static void niagara_init(QEMUMachineInitArgs *args) { ram_addr_t RAM_size = args->ram_size; const char *cpu_model = args->cpu_model; const char *kernel_filename = args->kernel_filename; const char *kernel_cmdline = args->kernel_cmdline; const char *initrd_filename = args->initrd_filename; const char *boot_devices = args->boot_device; sun4uv_init(get_system_memory(), RAM_size, boot_devices, kernel_filename, kernel_cmdline, initrd_filename, cpu_model, &hwdefs[2]); } static QEMUMachine sun4u_machine = { .name = "sun4u", .desc = "Sun4u platform", .init = sun4u_init, .max_cpus = 1, // XXX for now .is_default = 1, DEFAULT_MACHINE_OPTIONS, }; static QEMUMachine sun4v_machine = { .name = "sun4v", .desc = "Sun4v platform", .init = sun4v_init, .max_cpus = 1, // XXX for now DEFAULT_MACHINE_OPTIONS, }; static QEMUMachine niagara_machine = { .name = "Niagara", .desc = "Sun4v platform, Niagara", .init = niagara_init, .max_cpus = 1, // XXX for now DEFAULT_MACHINE_OPTIONS, }; static void sun4u_register_types(void) { type_register_static(&ebus_info); type_register_static(&prom_info); type_register_static(&ram_info); } static void sun4u_machine_init(void) { qemu_register_machine(&sun4u_machine); qemu_register_machine(&sun4v_machine); qemu_register_machine(&niagara_machine); } type_init(sun4u_register_types) machine_init(sun4u_machine_init);
gpl-2.0
nmacs/lm3s-uclinux
lib/libtommath/libtommath-0.42.0/bn_mp_cnt_lsb.c
10
1247
#include <tommath.h> #ifdef BN_MP_CNT_LSB_C /* LibTomMath, multiple-precision integer library -- Tom St Denis * * LibTomMath is a library that provides multiple-precision * integer arithmetic as well as number theoretic functionality. * * The library was designed directly after the MPI library by * Michael Fromberger but has been written from scratch with * additional optimizations in place. * * The library is free for all purposes without any express * guarantee it works. * * Tom St Denis, tomstdenis@gmail.com, http://libtom.org */ static const int lnz[16] = { 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0 }; /* Counts the number of lsbs which are zero before the first zero bit */ int mp_cnt_lsb(mp_int *a) { int x; mp_digit q, qq; /* easy out */ if (mp_iszero(a) == 1) { return 0; } /* scan lower digits until non-zero */ for (x = 0; x < a->used && a->dp[x] == 0; x++); q = a->dp[x]; x *= DIGIT_BIT; /* now scan this digit until a 1 is found */ if ((q & 1) == 0) { do { qq = q & 15; x += lnz[qq]; q >>= 4; } while (qq == 0); } return x; } #endif /* $Source$ */ /* $Revision: 0.41 $ */ /* $Date: 2007-04-18 09:58:18 +0000 $ */
gpl-2.0
ricardogsilva/QGIS
src/core/pal/layer.cpp
10
16605
/* * libpal - Automated Placement of Labels Library * * Copyright (C) 2008 Maxence Laurent, MIS-TIC, HEIG-VD * University of Applied Sciences, Western Switzerland * http://www.hes-so.ch * * Contact: * maxence.laurent <at> heig-vd <dot> ch * or * eric.taillard <at> heig-vd <dot> ch * * This file is part of libpal. * * libpal is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * libpal is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with libpal. If not, see <http://www.gnu.org/licenses/>. * */ #include "pal.h" #include "layer.h" #include "palexception.h" #include "internalexception.h" #include "feature.h" #include "geomfunction.h" #include "util.h" #include "qgslabelingengine.h" #include "qgslogger.h" #include <cmath> #include <vector> using namespace pal; Layer::Layer( QgsAbstractLabelProvider *provider, const QString &name, QgsPalLayerSettings::Placement arrangement, double defaultPriority, bool active, bool toLabel, Pal *pal, bool displayAll ) : mProvider( provider ) , mName( name ) , mPal( pal ) , mActive( active ) , mLabelLayer( toLabel ) , mDisplayAll( displayAll ) , mCentroidInside( false ) , mArrangement( arrangement ) , mMergeLines( false ) , mUpsidedownLabels( Upright ) { if ( defaultPriority < 0.0001 ) mDefaultPriority = 0.0001; else if ( defaultPriority > 1.0 ) mDefaultPriority = 1.0; else mDefaultPriority = defaultPriority; } Layer::~Layer() { mMutex.lock(); qDeleteAll( mObstacleParts ); mMutex.unlock(); } void Layer::setPriority( double priority ) { if ( priority >= 1.0 ) // low priority mDefaultPriority = 1.0; else if ( priority <= 0.0001 ) mDefaultPriority = 0.0001; // high priority else mDefaultPriority = priority; } bool Layer::registerFeature( QgsLabelFeature *lf ) { if ( lf->size().width() < 0 || lf->size().height() < 0 ) return false; QMutexLocker locker( &mMutex ); if ( mHashtable.contains( lf->id() ) ) { //A feature with this id already exists. Don't throw an exception as sometimes, //the same feature is added twice (dateline split with otf-reprojection) return false; } // assign label feature to this PAL layer lf->setLayer( this ); // Split MULTI GEOM and Collection in simple geometries bool addedFeature = false; double geom_size = -1, biggest_size = -1; std::unique_ptr<FeaturePart> biggestPart; // break the (possibly multi-part) geometry into simple geometries std::unique_ptr<QLinkedList<const GEOSGeometry *>> simpleGeometries( Util::unmulti( lf->geometry() ) ); if ( !simpleGeometries ) // unmulti() failed? { throw InternalException::UnknownGeometry(); } GEOSContextHandle_t geosctxt = QgsGeos::getGEOSHandler(); const bool featureGeomIsObstacleGeom = lf->obstacleSettings().obstacleGeometry().isNull(); while ( !simpleGeometries->isEmpty() ) { const GEOSGeometry *geom = simpleGeometries->takeFirst(); // ignore invalid geometries (e.g. polygons with self-intersecting rings) if ( GEOSisValid_r( geosctxt, geom ) != 1 ) // 0=invalid, 1=valid, 2=exception { continue; } const int type = GEOSGeomTypeId_r( geosctxt, geom ); if ( type != GEOS_POINT && type != GEOS_LINESTRING && type != GEOS_POLYGON ) { throw InternalException::UnknownGeometry(); } std::unique_ptr<FeaturePart> fpart = std::make_unique<FeaturePart>( lf, geom ); // ignore invalid geometries if ( ( type == GEOS_LINESTRING && fpart->nbPoints < 2 ) || ( type == GEOS_POLYGON && fpart->nbPoints < 3 ) ) { continue; } // polygons: reorder coordinates if ( type == GEOS_POLYGON && !GeomFunction::reorderPolygon( fpart->x, fpart->y ) ) { continue; } // is the feature well defined? TODO Check epsilon const bool labelWellDefined = ( lf->size().width() > 0.0000001 && lf->size().height() > 0.0000001 ); if ( lf->obstacleSettings().isObstacle() && featureGeomIsObstacleGeom ) { //if we are not labeling the layer, only insert it into the obstacle list and avoid an //unnecessary copy if ( mLabelLayer && labelWellDefined ) { addObstaclePart( new FeaturePart( *fpart ) ); } else { addObstaclePart( fpart.release() ); } } // feature has to be labeled? if ( !mLabelLayer || !labelWellDefined ) { //nothing more to do for this part continue; } if ( !lf->labelAllParts() && ( type == GEOS_POLYGON || type == GEOS_LINESTRING ) ) { if ( type == GEOS_LINESTRING ) geom_size = fpart->length(); else if ( type == GEOS_POLYGON ) geom_size = fpart->area(); if ( geom_size > biggest_size ) { biggest_size = geom_size; biggestPart = std::move( fpart ); } // don't add the feature part now, do it later } else { // feature part is ready! addFeaturePart( std::move( fpart ), lf->labelText() ); addedFeature = true; } } if ( lf->obstacleSettings().isObstacle() && !featureGeomIsObstacleGeom ) { //do the same for the obstacle geometry const QgsGeometry obstacleGeometry = lf->obstacleSettings().obstacleGeometry(); for ( auto it = obstacleGeometry.const_parts_begin(); it != obstacleGeometry.const_parts_end(); ++it ) { geos::unique_ptr geom = QgsGeos::asGeos( *it ); if ( !geom ) { QgsDebugMsg( QStringLiteral( "Obstacle geometry passed to PAL labeling engine could not be converted to GEOS! %1" ).arg( ( *it )->asWkt() ) ); continue; } // ignore invalid geometries (e.g. polygons with self-intersecting rings) if ( GEOSisValid_r( geosctxt, geom.get() ) != 1 ) // 0=invalid, 1=valid, 2=exception { // this shouldn't happen -- we have already checked this while registering the feature QgsDebugMsg( QStringLiteral( "Obstacle geometry passed to PAL labeling engine is not valid! %1" ).arg( ( *it )->asWkt() ) ); continue; } const int type = GEOSGeomTypeId_r( geosctxt, geom.get() ); if ( type != GEOS_POINT && type != GEOS_LINESTRING && type != GEOS_POLYGON ) { throw InternalException::UnknownGeometry(); } std::unique_ptr<FeaturePart> fpart = std::make_unique<FeaturePart>( lf, geom.get() ); // ignore invalid geometries if ( ( type == GEOS_LINESTRING && fpart->nbPoints < 2 ) || ( type == GEOS_POLYGON && fpart->nbPoints < 3 ) ) { continue; } // polygons: reorder coordinates if ( type == GEOS_POLYGON && !GeomFunction::reorderPolygon( fpart->x, fpart->y ) ) { continue; } mGeosObstacleGeometries.emplace_back( std::move( geom ) ); // feature part is ready! addObstaclePart( fpart.release() ); } } locker.unlock(); // if using only biggest parts... if ( ( !lf->labelAllParts() || lf->hasFixedPosition() ) && biggestPart ) { addFeaturePart( std::move( biggestPart ), lf->labelText() ); addedFeature = true; } // add feature to layer if we have added something if ( addedFeature ) { mHashtable.insert( lf->id(), lf ); } return addedFeature; // true if we've added something } void Layer::addFeaturePart( std::unique_ptr<FeaturePart> fpart, const QString &labelText ) { // add to hashtable with equally named feature parts if ( mMergeLines && !labelText.isEmpty() ) { mConnectedHashtable[ labelText ].append( fpart.get() ); } // add to list of layer's feature parts mFeatureParts.emplace_back( std::move( fpart ) ); } void Layer::addObstaclePart( FeaturePart *fpart ) { // add to list of layer's feature parts mObstacleParts.append( fpart ); } static FeaturePart *_findConnectedPart( FeaturePart *partCheck, const QVector<FeaturePart *> &otherParts ) { // iterate in the rest of the parts with the same label auto it = otherParts.constBegin(); while ( it != otherParts.constEnd() ) { if ( partCheck->isConnected( *it ) ) { // stop checking for other connected parts return *it; } ++it; } return nullptr; // no connected part found... } void Layer::joinConnectedFeatures() { // go through all label texts int connectedFeaturesId = 0; for ( auto it = mConnectedHashtable.constBegin(); it != mConnectedHashtable.constEnd(); ++it ) { QVector<FeaturePart *> partsToMerge = it.value(); // need to start with biggest parts first, to avoid merging in side branches before we've // merged the whole of the longest parts of the joined network std::sort( partsToMerge.begin(), partsToMerge.end(), []( FeaturePart * a, FeaturePart * b ) { return a->length() > b->length(); } ); // go one-by-one part, try to merge while ( partsToMerge.count() > 1 ) { connectedFeaturesId++; // part we'll be checking against other in this round FeaturePart *partToJoinTo = partsToMerge.takeFirst(); mConnectedFeaturesIds.insert( partToJoinTo->featureId(), connectedFeaturesId ); // loop through all other parts QVector< FeaturePart *> partsLeftToTryThisRound = partsToMerge; while ( !partsLeftToTryThisRound.empty() ) { if ( FeaturePart *otherPart = _findConnectedPart( partToJoinTo, partsLeftToTryThisRound ) ) { partsLeftToTryThisRound.removeOne( otherPart ); if ( partToJoinTo->mergeWithFeaturePart( otherPart ) ) { mConnectedFeaturesIds.insert( otherPart->featureId(), connectedFeaturesId ); // otherPart was merged into partToJoinTo, so now we completely delete the redundant feature part which was merged in partsToMerge.removeAll( otherPart ); const auto matchingPartIt = std::find_if( mFeatureParts.begin(), mFeatureParts.end(), [otherPart]( const std::unique_ptr< FeaturePart> &part ) { return part.get() == otherPart; } ); Q_ASSERT( matchingPartIt != mFeatureParts.end() ); mFeatureParts.erase( matchingPartIt ); } } else { // no candidate parts remain which we could possibly merge in break; } } } } mConnectedHashtable.clear(); // Expunge feature parts that are smaller than the minimum size required mFeatureParts.erase( std::remove_if( mFeatureParts.begin(), mFeatureParts.end(), []( const std::unique_ptr< FeaturePart > &part ) { if ( part->feature()->minimumSize() != 0.0 && part->length() < part->feature()->minimumSize() ) { return true; } return false; } ), mFeatureParts.end() ); } int Layer::connectedFeatureId( QgsFeatureId featureId ) const { return mConnectedFeaturesIds.value( featureId, -1 ); } void Layer::chopFeaturesAtRepeatDistance() { GEOSContextHandle_t geosctxt = QgsGeos::getGEOSHandler(); std::deque< std::unique_ptr< FeaturePart > > newFeatureParts; while ( !mFeatureParts.empty() ) { std::unique_ptr< FeaturePart > fpart = std::move( mFeatureParts.front() ); mFeatureParts.pop_front(); const GEOSGeometry *geom = fpart->geos(); double chopInterval = fpart->repeatDistance(); // whether we CAN chop bool canChop = false; double featureLen = 0; if ( chopInterval != 0. && GEOSGeomTypeId_r( geosctxt, geom ) == GEOS_LINESTRING ) { featureLen = fpart->length(); if ( featureLen > chopInterval ) canChop = true; } // whether we SHOULD chop bool shouldChop = canChop; int possibleSegments = 0; if ( canChop ) { // never chop into segments smaller than required for the actual label text chopInterval *= std::ceil( fpart->getLabelWidth() / fpart->repeatDistance() ); // now work out how many full segments we could chop this line into possibleSegments = static_cast< int >( std::floor( featureLen / chopInterval ) ); // ... and use this to work out the actual chop distance for this line. Otherwise, we risk the // situation of: // 1. Line length of 3cm // 2. Repeat distance of 2cm // 3. Label size is 1.5 cm // // 2cm 1cm // /--Label--/----/ // // i.e. the labels would be off center and gravitate toward line starts chopInterval = featureLen / possibleSegments; shouldChop = possibleSegments > 1; } if ( shouldChop ) { const GEOSCoordSequence *cs = GEOSGeom_getCoordSeq_r( geosctxt, geom ); // get number of points unsigned int n; GEOSCoordSeq_getSize_r( geosctxt, cs, &n ); // Read points std::vector<Point> points( n ); for ( unsigned int i = 0; i < n; ++i ) { #if GEOS_VERSION_MAJOR>3 || GEOS_VERSION_MINOR>=8 GEOSCoordSeq_getXY_r( geosctxt, cs, i, &points[i].x, &points[i].y ); #else GEOSCoordSeq_getX_r( geosctxt, cs, i, &points[i].x ); GEOSCoordSeq_getY_r( geosctxt, cs, i, &points[i].y ); #endif } // Cumulative length vector std::vector<double> len( n, 0 ); for ( unsigned int i = 1; i < n; ++i ) { const double dx = points[i].x - points[i - 1].x; const double dy = points[i].y - points[i - 1].y; len[i] = len[i - 1] + std::sqrt( dx * dx + dy * dy ); } // Walk along line unsigned int cur = 0; double lambda = 0; std::vector<Point> part; QList<FeaturePart *> repeatParts; repeatParts.reserve( possibleSegments ); for ( int segment = 0; segment < possibleSegments; segment++ ) { lambda += chopInterval; for ( ; cur < n && lambda > len[cur]; ++cur ) { part.push_back( points[cur] ); } if ( cur >= n ) { // Create final part GEOSCoordSequence *cooSeq = GEOSCoordSeq_create_r( geosctxt, static_cast< unsigned int >( part.size() ), 2 ); for ( unsigned int i = 0; i < part.size(); ++i ) { #if GEOS_VERSION_MAJOR>3 || GEOS_VERSION_MINOR>=8 GEOSCoordSeq_setXY_r( geosctxt, cooSeq, i, part[i].x, part[i].y ); #else GEOSCoordSeq_setX_r( geosctxt, cooSeq, i, part[i].x ); GEOSCoordSeq_setY_r( geosctxt, cooSeq, i, part[i].y ); #endif } GEOSGeometry *newgeom = GEOSGeom_createLineString_r( geosctxt, cooSeq ); std::unique_ptr< FeaturePart > newfpart = std::make_unique< FeaturePart >( fpart->feature(), newgeom ); repeatParts.push_back( newfpart.get() ); newFeatureParts.emplace_back( std::move( newfpart ) ); break; } const double c = ( lambda - len[cur - 1] ) / ( len[cur] - len[cur - 1] ); Point p; p.x = points[cur - 1].x + c * ( points[cur].x - points[cur - 1].x ); p.y = points[cur - 1].y + c * ( points[cur].y - points[cur - 1].y ); part.push_back( p ); GEOSCoordSequence *cooSeq = GEOSCoordSeq_create_r( geosctxt, static_cast< unsigned int >( part.size() ), 2 ); for ( std::size_t i = 0; i < part.size(); ++i ) { #if GEOS_VERSION_MAJOR>3 || GEOS_VERSION_MINOR>=8 GEOSCoordSeq_setXY_r( geosctxt, cooSeq, i, part[i].x, part[i].y ); #else GEOSCoordSeq_setX_r( geosctxt, cooSeq, static_cast< unsigned int >( i ), part[i].x ); GEOSCoordSeq_setY_r( geosctxt, cooSeq, static_cast< unsigned int >( i ), part[i].y ); #endif } GEOSGeometry *newgeom = GEOSGeom_createLineString_r( geosctxt, cooSeq ); std::unique_ptr< FeaturePart > newfpart = std::make_unique< FeaturePart >( fpart->feature(), newgeom ); repeatParts.push_back( newfpart.get() ); newFeatureParts.emplace_back( std::move( newfpart ) ); part.clear(); part.push_back( p ); } for ( FeaturePart *partPtr : repeatParts ) partPtr->setTotalRepeats( repeatParts.count() ); } else { newFeatureParts.emplace_back( std::move( fpart ) ); } } mFeatureParts = std::move( newFeatureParts ); } template class QgsGenericSpatialIndex<pal::FeaturePart>;
gpl-2.0
NamanG/coreboot2.0
src/mainboard/abit/be6-ii_v2_0/irq_tables.c
10
2417
/* * This file is part of the coreboot project. * * Copyright (C) 2007 Uwe Hermann <uwe@hermann-uwe.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc. */ #include <arch/pirq_routing.h> static const struct irq_routing_table intel_irq_routing_table = { PIRQ_SIGNATURE, PIRQ_VERSION, 32 + 16 * CONFIG_IRQ_SLOT_COUNT,/* Max. number of devices on the bus */ 0x00, /* Interrupt router bus */ (0x07 << 3) | 0x0, /* Interrupt router device */ 0x1c20, /* IRQs devoted exclusively to PCI usage */ 0x8086, /* Vendor */ 0x7000, /* Device */ 0, /* Miniport data */ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, /* u8 rfu[11] */ 0x4b, /* Checksum */ { /* bus, dev|fn, {link, bitmap}, {link, bitmap}, {link, bitmap}, {link, bitmap}, slot, rfu */ {0x00,(0x13<<3)|0x0, {{0x62, 0xdeb8}, {0x63, 0xdeb8}, {0x60, 0xdeb8}, {0x61, 0x0deb8}}, 0x1, 0x0}, {0x00,(0x11<<3)|0x0, {{0x60, 0xdeb8}, {0x61, 0xdeb8}, {0x62, 0xdeb8}, {0x63, 0x0deb8}}, 0x2, 0x0}, {0x00,(0x0f<<3)|0x0, {{0x61, 0xdeb8}, {0x63, 0xdeb8}, {0x62, 0xdeb8}, {0x60, 0x0deb8}}, 0x3, 0x0}, {0x00,(0x0d<<3)|0x0, {{0x62, 0xdeb8}, {0x63, 0xdeb8}, {0x60, 0xdeb8}, {0x61, 0x0deb8}}, 0x4, 0x0}, {0x00,(0x0b<<3)|0x0, {{0x63, 0xdeb8}, {0x60, 0xdeb8}, {0x61, 0xdeb8}, {0x62, 0x0deb8}}, 0x5, 0x0}, {0x00,(0x09<<3)|0x0, {{0x61, 0xdeb8}, {0x60, 0xdeb8}, {0x63, 0xdeb8}, {0x62, 0x0deb8}}, 0x6, 0x0}, {0x00,(0x08<<3)|0x0, {{0x62, 0xdeb8}, {0x63, 0xdeb8}, {0x60, 0xdeb8}, {0x61, 0x0deb8}}, 0x7, 0x0}, {0x00,(0x07<<3)|0x1, {{0x60, 0xdeb8}, {0x61, 0xdeb8}, {0x62, 0xdeb8}, {0x63, 0x0deb8}}, 0x0, 0x0}, {0x00,(0x01<<3)|0x0, {{0x60, 0xdeb8}, {0x61, 0xdeb8}, {0x62, 0xdeb8}, {0x63, 0x0deb8}}, 0x0, 0x0}, } }; unsigned long write_pirq_routing_table(unsigned long addr) { return copy_pirq_routing_table(addr, &intel_irq_routing_table); }
gpl-2.0
hajuuk/R7000
ap/gpl/timemachine/netatalk-2.2.5/libatalk/adouble/ad_write.c
10
3708
/* * Copyright (c) 1990,1995 Regents of The University of Michigan. * All Rights Reserved. See COPYRIGHT. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif /* HAVE_CONFIG_H */ #include <atalk/adouble.h> #include <string.h> #include <sys/param.h> #include <errno.h> #ifndef MIN #define MIN(a,b) ((a)<(b)?(a):(b)) #endif /* ! MIN */ /* XXX: locking has to be checked before each stream of consecutive * ad_writes to prevent a lock in the middle from causing problems. */ ssize_t adf_pwrite(struct ad_fd *ad_fd, const void *buf, size_t count, off_t offset) { ssize_t cc; #ifndef HAVE_PWRITE if ( ad_fd->adf_off != offset ) { if ( lseek( ad_fd->adf_fd, offset, SEEK_SET ) < 0 ) { return -1; } ad_fd->adf_off = offset; } cc = write( ad_fd->adf_fd, buf, count ); if ( cc < 0 ) { return -1; } ad_fd->adf_off += cc; #else cc = pwrite(ad_fd->adf_fd, buf, count, offset ); #endif return cc; } /* end is always 0 */ ssize_t ad_write(struct adouble *ad, const u_int32_t eid, off_t off, const int end, const char *buf, const size_t buflen) { struct stat st; ssize_t cc; if (ad_data_fileno(ad) == -2) { /* It's a symlink */ errno = EACCES; return -1; } if ( eid == ADEID_DFORK ) { if ( end ) { if ( fstat( ad_data_fileno(ad), &st ) < 0 ) { return( -1 ); } off = st.st_size - off; } cc = adf_pwrite(&ad->ad_data_fork, buf, buflen, off); } else if ( eid == ADEID_RFORK ) { off_t r_off; if ( end ) { if ( fstat( ad_data_fileno(ad), &st ) < 0 ) { return( -1 ); } off = st.st_size - off -ad_getentryoff(ad, eid); } r_off = ad_getentryoff(ad, eid) + off; cc = adf_pwrite(&ad->ad_resource_fork, buf, buflen, r_off); /* sync up our internal buffer FIXME always false? */ if (r_off < ad_getentryoff(ad, ADEID_RFORK)) { memcpy(ad->ad_data + r_off, buf, MIN(sizeof(ad->ad_data) -r_off, cc)); } if ( ad->ad_rlen < off + cc ) { ad->ad_rlen = off + cc; } } else { return -1; /* we don't know how to write if it's not a ressource or data fork */ } return( cc ); } /* * the caller set the locks * ftruncate is undefined when the file length is smaller than 'size' */ int sys_ftruncate(int fd, off_t length) { #ifndef HAVE_PWRITE off_t curpos; #endif int err; struct stat st; char c = 0; if (!ftruncate(fd, length)) { return 0; } /* maybe ftruncate doesn't work if we try to extend the size */ err = errno; #ifndef HAVE_PWRITE /* we only care about file pointer if we don't use pwrite */ if ((off_t)-1 == (curpos = lseek(fd, 0, SEEK_CUR)) ) { errno = err; return -1; } #endif if ( fstat( fd, &st ) < 0 ) { errno = err; return -1; } if (st.st_size > length) { errno = err; return -1; } if (lseek(fd, length -1, SEEK_SET) != length -1) { errno = err; return -1; } if (1 != write( fd, &c, 1 )) { /* return the write errno */ return -1; } #ifndef HAVE_PWRITE if (curpos != lseek(fd, curpos, SEEK_SET)) { errno = err; return -1; } #endif return 0; } /* ------------------------ */ int ad_rtruncate( struct adouble *ad, const off_t size) { if ( sys_ftruncate( ad_reso_fileno(ad), size + ad->ad_eid[ ADEID_RFORK ].ade_off ) < 0 ) { return -1; } ad->ad_rlen = size; return 0; } int ad_dtruncate(struct adouble *ad, const off_t size) { if (sys_ftruncate(ad_data_fileno(ad), size) < 0) { return -1; } return 0; }
gpl-2.0
mericon/Xp_Kernel_LGH850
drivers/mtd/nand/mpc5121_nfc.c
522
20811
/* * Copyright 2004-2008 Freescale Semiconductor, Inc. * Copyright 2009 Semihalf. * * Approved as OSADL project by a majority of OSADL members and funded * by OSADL membership fees in 2009; for details see www.osadl.org. * * Based on original driver from Freescale Semiconductor * written by John Rigby <jrigby@freescale.com> on basis * of drivers/mtd/nand/mxc_nand.c. Reworked and extended * Piotr Ziecik <kosmo@semihalf.com>. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, * MA 02110-1301, USA. */ #include <linux/module.h> #include <linux/clk.h> #include <linux/gfp.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/partitions.h> #include <linux/of_address.h> #include <linux/of_device.h> #include <linux/of_irq.h> #include <linux/of_platform.h> #include <asm/mpc5121.h> /* Addresses for NFC MAIN RAM BUFFER areas */ #define NFC_MAIN_AREA(n) ((n) * 0x200) /* Addresses for NFC SPARE BUFFER areas */ #define NFC_SPARE_BUFFERS 8 #define NFC_SPARE_LEN 0x40 #define NFC_SPARE_AREA(n) (0x1000 + ((n) * NFC_SPARE_LEN)) /* MPC5121 NFC registers */ #define NFC_BUF_ADDR 0x1E04 #define NFC_FLASH_ADDR 0x1E06 #define NFC_FLASH_CMD 0x1E08 #define NFC_CONFIG 0x1E0A #define NFC_ECC_STATUS1 0x1E0C #define NFC_ECC_STATUS2 0x1E0E #define NFC_SPAS 0x1E10 #define NFC_WRPROT 0x1E12 #define NFC_NF_WRPRST 0x1E18 #define NFC_CONFIG1 0x1E1A #define NFC_CONFIG2 0x1E1C #define NFC_UNLOCKSTART_BLK0 0x1E20 #define NFC_UNLOCKEND_BLK0 0x1E22 #define NFC_UNLOCKSTART_BLK1 0x1E24 #define NFC_UNLOCKEND_BLK1 0x1E26 #define NFC_UNLOCKSTART_BLK2 0x1E28 #define NFC_UNLOCKEND_BLK2 0x1E2A #define NFC_UNLOCKSTART_BLK3 0x1E2C #define NFC_UNLOCKEND_BLK3 0x1E2E /* Bit Definitions: NFC_BUF_ADDR */ #define NFC_RBA_MASK (7 << 0) #define NFC_ACTIVE_CS_SHIFT 5 #define NFC_ACTIVE_CS_MASK (3 << NFC_ACTIVE_CS_SHIFT) /* Bit Definitions: NFC_CONFIG */ #define NFC_BLS_UNLOCKED (1 << 1) /* Bit Definitions: NFC_CONFIG1 */ #define NFC_ECC_4BIT (1 << 0) #define NFC_FULL_PAGE_DMA (1 << 1) #define NFC_SPARE_ONLY (1 << 2) #define NFC_ECC_ENABLE (1 << 3) #define NFC_INT_MASK (1 << 4) #define NFC_BIG_ENDIAN (1 << 5) #define NFC_RESET (1 << 6) #define NFC_CE (1 << 7) #define NFC_ONE_CYCLE (1 << 8) #define NFC_PPB_32 (0 << 9) #define NFC_PPB_64 (1 << 9) #define NFC_PPB_128 (2 << 9) #define NFC_PPB_256 (3 << 9) #define NFC_PPB_MASK (3 << 9) #define NFC_FULL_PAGE_INT (1 << 11) /* Bit Definitions: NFC_CONFIG2 */ #define NFC_COMMAND (1 << 0) #define NFC_ADDRESS (1 << 1) #define NFC_INPUT (1 << 2) #define NFC_OUTPUT (1 << 3) #define NFC_ID (1 << 4) #define NFC_STATUS (1 << 5) #define NFC_CMD_FAIL (1 << 15) #define NFC_INT (1 << 15) /* Bit Definitions: NFC_WRPROT */ #define NFC_WPC_LOCK_TIGHT (1 << 0) #define NFC_WPC_LOCK (1 << 1) #define NFC_WPC_UNLOCK (1 << 2) #define DRV_NAME "mpc5121_nfc" /* Timeouts */ #define NFC_RESET_TIMEOUT 1000 /* 1 ms */ #define NFC_TIMEOUT (HZ / 10) /* 1/10 s */ struct mpc5121_nfc_prv { struct mtd_info mtd; struct nand_chip chip; int irq; void __iomem *regs; struct clk *clk; wait_queue_head_t irq_waitq; uint column; int spareonly; void __iomem *csreg; struct device *dev; }; static void mpc5121_nfc_done(struct mtd_info *mtd); /* Read NFC register */ static inline u16 nfc_read(struct mtd_info *mtd, uint reg) { struct nand_chip *chip = mtd->priv; struct mpc5121_nfc_prv *prv = chip->priv; return in_be16(prv->regs + reg); } /* Write NFC register */ static inline void nfc_write(struct mtd_info *mtd, uint reg, u16 val) { struct nand_chip *chip = mtd->priv; struct mpc5121_nfc_prv *prv = chip->priv; out_be16(prv->regs + reg, val); } /* Set bits in NFC register */ static inline void nfc_set(struct mtd_info *mtd, uint reg, u16 bits) { nfc_write(mtd, reg, nfc_read(mtd, reg) | bits); } /* Clear bits in NFC register */ static inline void nfc_clear(struct mtd_info *mtd, uint reg, u16 bits) { nfc_write(mtd, reg, nfc_read(mtd, reg) & ~bits); } /* Invoke address cycle */ static inline void mpc5121_nfc_send_addr(struct mtd_info *mtd, u16 addr) { nfc_write(mtd, NFC_FLASH_ADDR, addr); nfc_write(mtd, NFC_CONFIG2, NFC_ADDRESS); mpc5121_nfc_done(mtd); } /* Invoke command cycle */ static inline void mpc5121_nfc_send_cmd(struct mtd_info *mtd, u16 cmd) { nfc_write(mtd, NFC_FLASH_CMD, cmd); nfc_write(mtd, NFC_CONFIG2, NFC_COMMAND); mpc5121_nfc_done(mtd); } /* Send data from NFC buffers to NAND flash */ static inline void mpc5121_nfc_send_prog_page(struct mtd_info *mtd) { nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK); nfc_write(mtd, NFC_CONFIG2, NFC_INPUT); mpc5121_nfc_done(mtd); } /* Receive data from NAND flash */ static inline void mpc5121_nfc_send_read_page(struct mtd_info *mtd) { nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK); nfc_write(mtd, NFC_CONFIG2, NFC_OUTPUT); mpc5121_nfc_done(mtd); } /* Receive ID from NAND flash */ static inline void mpc5121_nfc_send_read_id(struct mtd_info *mtd) { nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK); nfc_write(mtd, NFC_CONFIG2, NFC_ID); mpc5121_nfc_done(mtd); } /* Receive status from NAND flash */ static inline void mpc5121_nfc_send_read_status(struct mtd_info *mtd) { nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK); nfc_write(mtd, NFC_CONFIG2, NFC_STATUS); mpc5121_nfc_done(mtd); } /* NFC interrupt handler */ static irqreturn_t mpc5121_nfc_irq(int irq, void *data) { struct mtd_info *mtd = data; struct nand_chip *chip = mtd->priv; struct mpc5121_nfc_prv *prv = chip->priv; nfc_set(mtd, NFC_CONFIG1, NFC_INT_MASK); wake_up(&prv->irq_waitq); return IRQ_HANDLED; } /* Wait for operation complete */ static void mpc5121_nfc_done(struct mtd_info *mtd) { struct nand_chip *chip = mtd->priv; struct mpc5121_nfc_prv *prv = chip->priv; int rv; if ((nfc_read(mtd, NFC_CONFIG2) & NFC_INT) == 0) { nfc_clear(mtd, NFC_CONFIG1, NFC_INT_MASK); rv = wait_event_timeout(prv->irq_waitq, (nfc_read(mtd, NFC_CONFIG2) & NFC_INT), NFC_TIMEOUT); if (!rv) dev_warn(prv->dev, "Timeout while waiting for interrupt.\n"); } nfc_clear(mtd, NFC_CONFIG2, NFC_INT); } /* Do address cycle(s) */ static void mpc5121_nfc_addr_cycle(struct mtd_info *mtd, int column, int page) { struct nand_chip *chip = mtd->priv; u32 pagemask = chip->pagemask; if (column != -1) { mpc5121_nfc_send_addr(mtd, column); if (mtd->writesize > 512) mpc5121_nfc_send_addr(mtd, column >> 8); } if (page != -1) { do { mpc5121_nfc_send_addr(mtd, page & 0xFF); page >>= 8; pagemask >>= 8; } while (pagemask); } } /* Control chip select signals */ static void mpc5121_nfc_select_chip(struct mtd_info *mtd, int chip) { if (chip < 0) { nfc_clear(mtd, NFC_CONFIG1, NFC_CE); return; } nfc_clear(mtd, NFC_BUF_ADDR, NFC_ACTIVE_CS_MASK); nfc_set(mtd, NFC_BUF_ADDR, (chip << NFC_ACTIVE_CS_SHIFT) & NFC_ACTIVE_CS_MASK); nfc_set(mtd, NFC_CONFIG1, NFC_CE); } /* Init external chip select logic on ADS5121 board */ static int ads5121_chipselect_init(struct mtd_info *mtd) { struct nand_chip *chip = mtd->priv; struct mpc5121_nfc_prv *prv = chip->priv; struct device_node *dn; dn = of_find_compatible_node(NULL, NULL, "fsl,mpc5121ads-cpld"); if (dn) { prv->csreg = of_iomap(dn, 0); of_node_put(dn); if (!prv->csreg) return -ENOMEM; /* CPLD Register 9 controls NAND /CE Lines */ prv->csreg += 9; return 0; } return -EINVAL; } /* Control chips select signal on ADS5121 board */ static void ads5121_select_chip(struct mtd_info *mtd, int chip) { struct nand_chip *nand = mtd->priv; struct mpc5121_nfc_prv *prv = nand->priv; u8 v; v = in_8(prv->csreg); v |= 0x0F; if (chip >= 0) { mpc5121_nfc_select_chip(mtd, 0); v &= ~(1 << chip); } else mpc5121_nfc_select_chip(mtd, -1); out_8(prv->csreg, v); } /* Read NAND Ready/Busy signal */ static int mpc5121_nfc_dev_ready(struct mtd_info *mtd) { /* * NFC handles ready/busy signal internally. Therefore, this function * always returns status as ready. */ return 1; } /* Write command to NAND flash */ static void mpc5121_nfc_command(struct mtd_info *mtd, unsigned command, int column, int page) { struct nand_chip *chip = mtd->priv; struct mpc5121_nfc_prv *prv = chip->priv; prv->column = (column >= 0) ? column : 0; prv->spareonly = 0; switch (command) { case NAND_CMD_PAGEPROG: mpc5121_nfc_send_prog_page(mtd); break; /* * NFC does not support sub-page reads and writes, * so emulate them using full page transfers. */ case NAND_CMD_READ0: column = 0; break; case NAND_CMD_READ1: prv->column += 256; command = NAND_CMD_READ0; column = 0; break; case NAND_CMD_READOOB: prv->spareonly = 1; command = NAND_CMD_READ0; column = 0; break; case NAND_CMD_SEQIN: mpc5121_nfc_command(mtd, NAND_CMD_READ0, column, page); column = 0; break; case NAND_CMD_ERASE1: case NAND_CMD_ERASE2: case NAND_CMD_READID: case NAND_CMD_STATUS: break; default: return; } mpc5121_nfc_send_cmd(mtd, command); mpc5121_nfc_addr_cycle(mtd, column, page); switch (command) { case NAND_CMD_READ0: if (mtd->writesize > 512) mpc5121_nfc_send_cmd(mtd, NAND_CMD_READSTART); mpc5121_nfc_send_read_page(mtd); break; case NAND_CMD_READID: mpc5121_nfc_send_read_id(mtd); break; case NAND_CMD_STATUS: mpc5121_nfc_send_read_status(mtd); if (chip->options & NAND_BUSWIDTH_16) prv->column = 1; else prv->column = 0; break; } } /* Copy data from/to NFC spare buffers. */ static void mpc5121_nfc_copy_spare(struct mtd_info *mtd, uint offset, u8 *buffer, uint size, int wr) { struct nand_chip *nand = mtd->priv; struct mpc5121_nfc_prv *prv = nand->priv; uint o, s, sbsize, blksize; /* * NAND spare area is available through NFC spare buffers. * The NFC divides spare area into (page_size / 512) chunks. * Each chunk is placed into separate spare memory area, using * first (spare_size / num_of_chunks) bytes of the buffer. * * For NAND device in which the spare area is not divided fully * by the number of chunks, number of used bytes in each spare * buffer is rounded down to the nearest even number of bytes, * and all remaining bytes are added to the last used spare area. * * For more information read section 26.6.10 of MPC5121e * Microcontroller Reference Manual, Rev. 3. */ /* Calculate number of valid bytes in each spare buffer */ sbsize = (mtd->oobsize / (mtd->writesize / 512)) & ~1; while (size) { /* Calculate spare buffer number */ s = offset / sbsize; if (s > NFC_SPARE_BUFFERS - 1) s = NFC_SPARE_BUFFERS - 1; /* * Calculate offset to requested data block in selected spare * buffer and its size. */ o = offset - (s * sbsize); blksize = min(sbsize - o, size); if (wr) memcpy_toio(prv->regs + NFC_SPARE_AREA(s) + o, buffer, blksize); else memcpy_fromio(buffer, prv->regs + NFC_SPARE_AREA(s) + o, blksize); buffer += blksize; offset += blksize; size -= blksize; }; } /* Copy data from/to NFC main and spare buffers */ static void mpc5121_nfc_buf_copy(struct mtd_info *mtd, u_char *buf, int len, int wr) { struct nand_chip *chip = mtd->priv; struct mpc5121_nfc_prv *prv = chip->priv; uint c = prv->column; uint l; /* Handle spare area access */ if (prv->spareonly || c >= mtd->writesize) { /* Calculate offset from beginning of spare area */ if (c >= mtd->writesize) c -= mtd->writesize; prv->column += len; mpc5121_nfc_copy_spare(mtd, c, buf, len, wr); return; } /* * Handle main area access - limit copy length to prevent * crossing main/spare boundary. */ l = min((uint)len, mtd->writesize - c); prv->column += l; if (wr) memcpy_toio(prv->regs + NFC_MAIN_AREA(0) + c, buf, l); else memcpy_fromio(buf, prv->regs + NFC_MAIN_AREA(0) + c, l); /* Handle crossing main/spare boundary */ if (l != len) { buf += l; len -= l; mpc5121_nfc_buf_copy(mtd, buf, len, wr); } } /* Read data from NFC buffers */ static void mpc5121_nfc_read_buf(struct mtd_info *mtd, u_char *buf, int len) { mpc5121_nfc_buf_copy(mtd, buf, len, 0); } /* Write data to NFC buffers */ static void mpc5121_nfc_write_buf(struct mtd_info *mtd, const u_char *buf, int len) { mpc5121_nfc_buf_copy(mtd, (u_char *)buf, len, 1); } /* Read byte from NFC buffers */ static u8 mpc5121_nfc_read_byte(struct mtd_info *mtd) { u8 tmp; mpc5121_nfc_read_buf(mtd, &tmp, sizeof(tmp)); return tmp; } /* Read word from NFC buffers */ static u16 mpc5121_nfc_read_word(struct mtd_info *mtd) { u16 tmp; mpc5121_nfc_read_buf(mtd, (u_char *)&tmp, sizeof(tmp)); return tmp; } /* * Read NFC configuration from Reset Config Word * * NFC is configured during reset in basis of information stored * in Reset Config Word. There is no other way to set NAND block * size, spare size and bus width. */ static int mpc5121_nfc_read_hw_config(struct mtd_info *mtd) { struct nand_chip *chip = mtd->priv; struct mpc5121_nfc_prv *prv = chip->priv; struct mpc512x_reset_module *rm; struct device_node *rmnode; uint rcw_pagesize = 0; uint rcw_sparesize = 0; uint rcw_width; uint rcwh; uint romloc, ps; int ret = 0; rmnode = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-reset"); if (!rmnode) { dev_err(prv->dev, "Missing 'fsl,mpc5121-reset' " "node in device tree!\n"); return -ENODEV; } rm = of_iomap(rmnode, 0); if (!rm) { dev_err(prv->dev, "Error mapping reset module node!\n"); ret = -EBUSY; goto out; } rcwh = in_be32(&rm->rcwhr); /* Bit 6: NFC bus width */ rcw_width = ((rcwh >> 6) & 0x1) ? 2 : 1; /* Bit 7: NFC Page/Spare size */ ps = (rcwh >> 7) & 0x1; /* Bits [22:21]: ROM Location */ romloc = (rcwh >> 21) & 0x3; /* Decode RCW bits */ switch ((ps << 2) | romloc) { case 0x00: case 0x01: rcw_pagesize = 512; rcw_sparesize = 16; break; case 0x02: case 0x03: rcw_pagesize = 4096; rcw_sparesize = 128; break; case 0x04: case 0x05: rcw_pagesize = 2048; rcw_sparesize = 64; break; case 0x06: case 0x07: rcw_pagesize = 4096; rcw_sparesize = 218; break; } mtd->writesize = rcw_pagesize; mtd->oobsize = rcw_sparesize; if (rcw_width == 2) chip->options |= NAND_BUSWIDTH_16; dev_notice(prv->dev, "Configured for " "%u-bit NAND, page size %u " "with %u spare.\n", rcw_width * 8, rcw_pagesize, rcw_sparesize); iounmap(rm); out: of_node_put(rmnode); return ret; } /* Free driver resources */ static void mpc5121_nfc_free(struct device *dev, struct mtd_info *mtd) { struct nand_chip *chip = mtd->priv; struct mpc5121_nfc_prv *prv = chip->priv; if (prv->clk) clk_disable_unprepare(prv->clk); if (prv->csreg) iounmap(prv->csreg); } static int mpc5121_nfc_probe(struct platform_device *op) { struct device_node *rootnode, *dn = op->dev.of_node; struct clk *clk; struct device *dev = &op->dev; struct mpc5121_nfc_prv *prv; struct resource res; struct mtd_info *mtd; struct nand_chip *chip; unsigned long regs_paddr, regs_size; const __be32 *chips_no; int resettime = 0; int retval = 0; int rev, len; struct mtd_part_parser_data ppdata; /* * Check SoC revision. This driver supports only NFC * in MPC5121 revision 2 and MPC5123 revision 3. */ rev = (mfspr(SPRN_SVR) >> 4) & 0xF; if ((rev != 2) && (rev != 3)) { dev_err(dev, "SoC revision %u is not supported!\n", rev); return -ENXIO; } prv = devm_kzalloc(dev, sizeof(*prv), GFP_KERNEL); if (!prv) return -ENOMEM; mtd = &prv->mtd; chip = &prv->chip; mtd->priv = chip; chip->priv = prv; prv->dev = dev; /* Read NFC configuration from Reset Config Word */ retval = mpc5121_nfc_read_hw_config(mtd); if (retval) { dev_err(dev, "Unable to read NFC config!\n"); return retval; } prv->irq = irq_of_parse_and_map(dn, 0); if (prv->irq == NO_IRQ) { dev_err(dev, "Error mapping IRQ!\n"); return -EINVAL; } retval = of_address_to_resource(dn, 0, &res); if (retval) { dev_err(dev, "Error parsing memory region!\n"); return retval; } chips_no = of_get_property(dn, "chips", &len); if (!chips_no || len != sizeof(*chips_no)) { dev_err(dev, "Invalid/missing 'chips' property!\n"); return -EINVAL; } regs_paddr = res.start; regs_size = resource_size(&res); if (!devm_request_mem_region(dev, regs_paddr, regs_size, DRV_NAME)) { dev_err(dev, "Error requesting memory region!\n"); return -EBUSY; } prv->regs = devm_ioremap(dev, regs_paddr, regs_size); if (!prv->regs) { dev_err(dev, "Error mapping memory region!\n"); return -ENOMEM; } mtd->name = "MPC5121 NAND"; ppdata.of_node = dn; chip->dev_ready = mpc5121_nfc_dev_ready; chip->cmdfunc = mpc5121_nfc_command; chip->read_byte = mpc5121_nfc_read_byte; chip->read_word = mpc5121_nfc_read_word; chip->read_buf = mpc5121_nfc_read_buf; chip->write_buf = mpc5121_nfc_write_buf; chip->select_chip = mpc5121_nfc_select_chip; chip->bbt_options = NAND_BBT_USE_FLASH; chip->ecc.mode = NAND_ECC_SOFT; /* Support external chip-select logic on ADS5121 board */ rootnode = of_find_node_by_path("/"); if (of_device_is_compatible(rootnode, "fsl,mpc5121ads")) { retval = ads5121_chipselect_init(mtd); if (retval) { dev_err(dev, "Chipselect init error!\n"); of_node_put(rootnode); return retval; } chip->select_chip = ads5121_select_chip; } of_node_put(rootnode); /* Enable NFC clock */ clk = devm_clk_get(dev, "ipg"); if (IS_ERR(clk)) { dev_err(dev, "Unable to acquire NFC clock!\n"); retval = PTR_ERR(clk); goto error; } retval = clk_prepare_enable(clk); if (retval) { dev_err(dev, "Unable to enable NFC clock!\n"); goto error; } prv->clk = clk; /* Reset NAND Flash controller */ nfc_set(mtd, NFC_CONFIG1, NFC_RESET); while (nfc_read(mtd, NFC_CONFIG1) & NFC_RESET) { if (resettime++ >= NFC_RESET_TIMEOUT) { dev_err(dev, "Timeout while resetting NFC!\n"); retval = -EINVAL; goto error; } udelay(1); } /* Enable write to NFC memory */ nfc_write(mtd, NFC_CONFIG, NFC_BLS_UNLOCKED); /* Enable write to all NAND pages */ nfc_write(mtd, NFC_UNLOCKSTART_BLK0, 0x0000); nfc_write(mtd, NFC_UNLOCKEND_BLK0, 0xFFFF); nfc_write(mtd, NFC_WRPROT, NFC_WPC_UNLOCK); /* * Setup NFC: * - Big Endian transfers, * - Interrupt after full page read/write. */ nfc_write(mtd, NFC_CONFIG1, NFC_BIG_ENDIAN | NFC_INT_MASK | NFC_FULL_PAGE_INT); /* Set spare area size */ nfc_write(mtd, NFC_SPAS, mtd->oobsize >> 1); init_waitqueue_head(&prv->irq_waitq); retval = devm_request_irq(dev, prv->irq, &mpc5121_nfc_irq, 0, DRV_NAME, mtd); if (retval) { dev_err(dev, "Error requesting IRQ!\n"); goto error; } /* Detect NAND chips */ if (nand_scan(mtd, be32_to_cpup(chips_no))) { dev_err(dev, "NAND Flash not found !\n"); retval = -ENXIO; goto error; } /* Set erase block size */ switch (mtd->erasesize / mtd->writesize) { case 32: nfc_set(mtd, NFC_CONFIG1, NFC_PPB_32); break; case 64: nfc_set(mtd, NFC_CONFIG1, NFC_PPB_64); break; case 128: nfc_set(mtd, NFC_CONFIG1, NFC_PPB_128); break; case 256: nfc_set(mtd, NFC_CONFIG1, NFC_PPB_256); break; default: dev_err(dev, "Unsupported NAND flash!\n"); retval = -ENXIO; goto error; } dev_set_drvdata(dev, mtd); /* Register device in MTD */ retval = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0); if (retval) { dev_err(dev, "Error adding MTD device!\n"); goto error; } return 0; error: mpc5121_nfc_free(dev, mtd); return retval; } static int mpc5121_nfc_remove(struct platform_device *op) { struct device *dev = &op->dev; struct mtd_info *mtd = dev_get_drvdata(dev); nand_release(mtd); mpc5121_nfc_free(dev, mtd); return 0; } static struct of_device_id mpc5121_nfc_match[] = { { .compatible = "fsl,mpc5121-nfc", }, {}, }; static struct platform_driver mpc5121_nfc_driver = { .probe = mpc5121_nfc_probe, .remove = mpc5121_nfc_remove, .driver = { .name = DRV_NAME, .owner = THIS_MODULE, .of_match_table = mpc5121_nfc_match, }, }; module_platform_driver(mpc5121_nfc_driver); MODULE_AUTHOR("Freescale Semiconductor, Inc."); MODULE_DESCRIPTION("MPC5121 NAND MTD driver"); MODULE_LICENSE("GPL");
gpl-2.0
jazzsir/iamroot-linux-arm10c
drivers/cpufreq/speedstep-smi.c
778
11733
/* * Intel SpeedStep SMI driver. * * (C) 2003 Hiroshi Miura <miura@da-cha.org> * * Licensed under the terms of the GNU GPL License version 2. * */ /********************************************************************* * SPEEDSTEP - DEFINITIONS * *********************************************************************/ #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/cpufreq.h> #include <linux/delay.h> #include <linux/io.h> #include <asm/ist.h> #include <asm/cpu_device_id.h> #include "speedstep-lib.h" /* speedstep system management interface port/command. * * These parameters are got from IST-SMI BIOS call. * If user gives it, these are used. * */ static int smi_port; static int smi_cmd; static unsigned int smi_sig; /* info about the processor */ static enum speedstep_processor speedstep_processor; /* * There are only two frequency states for each processor. Values * are in kHz for the time being. */ static struct cpufreq_frequency_table speedstep_freqs[] = { {SPEEDSTEP_HIGH, 0}, {SPEEDSTEP_LOW, 0}, {0, CPUFREQ_TABLE_END}, }; #define GET_SPEEDSTEP_OWNER 0 #define GET_SPEEDSTEP_STATE 1 #define SET_SPEEDSTEP_STATE 2 #define GET_SPEEDSTEP_FREQS 4 /* how often shall the SMI call be tried if it failed, e.g. because * of DMA activity going on? */ #define SMI_TRIES 5 /** * speedstep_smi_ownership */ static int speedstep_smi_ownership(void) { u32 command, result, magic, dummy; u32 function = GET_SPEEDSTEP_OWNER; unsigned char magic_data[] = "Copyright (c) 1999 Intel Corporation"; command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); magic = virt_to_phys(magic_data); pr_debug("trying to obtain ownership with command %x at port %x\n", command, smi_port); __asm__ __volatile__( "push %%ebp\n" "out %%al, (%%dx)\n" "pop %%ebp\n" : "=D" (result), "=a" (dummy), "=b" (dummy), "=c" (dummy), "=d" (dummy), "=S" (dummy) : "a" (command), "b" (function), "c" (0), "d" (smi_port), "D" (0), "S" (magic) : "memory" ); pr_debug("result is %x\n", result); return result; } /** * speedstep_smi_get_freqs - get SpeedStep preferred & current freq. * @low: the low frequency value is placed here * @high: the high frequency value is placed here * * Only available on later SpeedStep-enabled systems, returns false results or * even hangs [cf. bugme.osdl.org # 1422] on earlier systems. Empirical testing * shows that the latter occurs if !(ist_info.event & 0xFFFF). */ static int speedstep_smi_get_freqs(unsigned int *low, unsigned int *high) { u32 command, result = 0, edi, high_mhz, low_mhz, dummy; u32 state = 0; u32 function = GET_SPEEDSTEP_FREQS; if (!(ist_info.event & 0xFFFF)) { pr_debug("bug #1422 -- can't read freqs from BIOS\n"); return -ENODEV; } command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); pr_debug("trying to determine frequencies with command %x at port %x\n", command, smi_port); __asm__ __volatile__( "push %%ebp\n" "out %%al, (%%dx)\n" "pop %%ebp" : "=a" (result), "=b" (high_mhz), "=c" (low_mhz), "=d" (state), "=D" (edi), "=S" (dummy) : "a" (command), "b" (function), "c" (state), "d" (smi_port), "S" (0), "D" (0) ); pr_debug("result %x, low_freq %u, high_freq %u\n", result, low_mhz, high_mhz); /* abort if results are obviously incorrect... */ if ((high_mhz + low_mhz) < 600) return -EINVAL; *high = high_mhz * 1000; *low = low_mhz * 1000; return result; } /** * speedstep_get_state - set the SpeedStep state * @state: processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH) * */ static int speedstep_get_state(void) { u32 function = GET_SPEEDSTEP_STATE; u32 result, state, edi, command, dummy; command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); pr_debug("trying to determine current setting with command %x " "at port %x\n", command, smi_port); __asm__ __volatile__( "push %%ebp\n" "out %%al, (%%dx)\n" "pop %%ebp\n" : "=a" (result), "=b" (state), "=D" (edi), "=c" (dummy), "=d" (dummy), "=S" (dummy) : "a" (command), "b" (function), "c" (0), "d" (smi_port), "S" (0), "D" (0) ); pr_debug("state is %x, result is %x\n", state, result); return state & 1; } /** * speedstep_set_state - set the SpeedStep state * @state: new processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH) * */ static void speedstep_set_state(unsigned int state) { unsigned int result = 0, command, new_state, dummy; unsigned long flags; unsigned int function = SET_SPEEDSTEP_STATE; unsigned int retry = 0; if (state > 0x1) return; /* Disable IRQs */ local_irq_save(flags); command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); pr_debug("trying to set frequency to state %u " "with command %x at port %x\n", state, command, smi_port); do { if (retry) { pr_debug("retry %u, previous result %u, waiting...\n", retry, result); mdelay(retry * 50); } retry++; __asm__ __volatile__( "push %%ebp\n" "out %%al, (%%dx)\n" "pop %%ebp" : "=b" (new_state), "=D" (result), "=c" (dummy), "=a" (dummy), "=d" (dummy), "=S" (dummy) : "a" (command), "b" (function), "c" (state), "d" (smi_port), "S" (0), "D" (0) ); } while ((new_state != state) && (retry <= SMI_TRIES)); /* enable IRQs */ local_irq_restore(flags); if (new_state == state) pr_debug("change to %u MHz succeeded after %u tries " "with result %u\n", (speedstep_freqs[new_state].frequency / 1000), retry, result); else printk(KERN_ERR "cpufreq: change to state %u " "failed with new_state %u and result %u\n", state, new_state, result); return; } /** * speedstep_target - set a new CPUFreq policy * @policy: new policy * @target_freq: new freq * @relation: * * Sets a new CPUFreq policy/freq. */ static int speedstep_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { unsigned int newstate = 0; struct cpufreq_freqs freqs; if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], target_freq, relation, &newstate)) return -EINVAL; freqs.old = speedstep_freqs[speedstep_get_state()].frequency; freqs.new = speedstep_freqs[newstate].frequency; if (freqs.old == freqs.new) return 0; cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); speedstep_set_state(newstate); cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); return 0; } /** * speedstep_verify - verifies a new CPUFreq policy * @policy: new policy * * Limit must be within speedstep_low_freq and speedstep_high_freq, with * at least one border included. */ static int speedstep_verify(struct cpufreq_policy *policy) { return cpufreq_frequency_table_verify(policy, &speedstep_freqs[0]); } static int speedstep_cpu_init(struct cpufreq_policy *policy) { int result; unsigned int speed, state; unsigned int *low, *high; /* capability check */ if (policy->cpu != 0) return -ENODEV; result = speedstep_smi_ownership(); if (result) { pr_debug("fails in acquiring ownership of a SMI interface.\n"); return -EINVAL; } /* detect low and high frequency */ low = &speedstep_freqs[SPEEDSTEP_LOW].frequency; high = &speedstep_freqs[SPEEDSTEP_HIGH].frequency; result = speedstep_smi_get_freqs(low, high); if (result) { /* fall back to speedstep_lib.c dection mechanism: * try both states out */ pr_debug("could not detect low and high frequencies " "by SMI call.\n"); result = speedstep_get_freqs(speedstep_processor, low, high, NULL, &speedstep_set_state); if (result) { pr_debug("could not detect two different speeds" " -- aborting.\n"); return result; } else pr_debug("workaround worked.\n"); } /* get current speed setting */ state = speedstep_get_state(); speed = speedstep_freqs[state].frequency; pr_debug("currently at %s speed setting - %i MHz\n", (speed == speedstep_freqs[SPEEDSTEP_LOW].frequency) ? "low" : "high", (speed / 1000)); /* cpuinfo and default policy values */ policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; policy->cur = speed; result = cpufreq_frequency_table_cpuinfo(policy, speedstep_freqs); if (result) return result; cpufreq_frequency_table_get_attr(speedstep_freqs, policy->cpu); return 0; } static int speedstep_cpu_exit(struct cpufreq_policy *policy) { cpufreq_frequency_table_put_attr(policy->cpu); return 0; } static unsigned int speedstep_get(unsigned int cpu) { if (cpu) return -ENODEV; return speedstep_get_frequency(speedstep_processor); } static int speedstep_resume(struct cpufreq_policy *policy) { int result = speedstep_smi_ownership(); if (result) pr_debug("fails in re-acquiring ownership of a SMI interface.\n"); return result; } static struct freq_attr *speedstep_attr[] = { &cpufreq_freq_attr_scaling_available_freqs, NULL, }; static struct cpufreq_driver speedstep_driver = { .name = "speedstep-smi", .verify = speedstep_verify, .target = speedstep_target, .init = speedstep_cpu_init, .exit = speedstep_cpu_exit, .get = speedstep_get, .resume = speedstep_resume, .owner = THIS_MODULE, .attr = speedstep_attr, }; static const struct x86_cpu_id ss_smi_ids[] = { { X86_VENDOR_INTEL, 6, 0xb, }, { X86_VENDOR_INTEL, 6, 0x8, }, { X86_VENDOR_INTEL, 15, 2 }, {} }; #if 0 /* Not auto loaded currently */ MODULE_DEVICE_TABLE(x86cpu, ss_smi_ids); #endif /** * speedstep_init - initializes the SpeedStep CPUFreq driver * * Initializes the SpeedStep support. Returns -ENODEV on unsupported * BIOS, -EINVAL on problems during initiatization, and zero on * success. */ static int __init speedstep_init(void) { if (!x86_match_cpu(ss_smi_ids)) return -ENODEV; speedstep_processor = speedstep_detect_processor(); switch (speedstep_processor) { case SPEEDSTEP_CPU_PIII_T: case SPEEDSTEP_CPU_PIII_C: case SPEEDSTEP_CPU_PIII_C_EARLY: break; default: speedstep_processor = 0; } if (!speedstep_processor) { pr_debug("No supported Intel CPU detected.\n"); return -ENODEV; } pr_debug("signature:0x%.8ulx, command:0x%.8ulx, " "event:0x%.8ulx, perf_level:0x%.8ulx.\n", ist_info.signature, ist_info.command, ist_info.event, ist_info.perf_level); /* Error if no IST-SMI BIOS or no PARM sig= 'ISGE' aka 'Intel Speedstep Gate E' */ if ((ist_info.signature != 0x47534943) && ( (smi_port == 0) || (smi_cmd == 0))) return -ENODEV; if (smi_sig == 1) smi_sig = 0x47534943; else smi_sig = ist_info.signature; /* setup smi_port from MODLULE_PARM or BIOS */ if ((smi_port > 0xff) || (smi_port < 0)) return -EINVAL; else if (smi_port == 0) smi_port = ist_info.command & 0xff; if ((smi_cmd > 0xff) || (smi_cmd < 0)) return -EINVAL; else if (smi_cmd == 0) smi_cmd = (ist_info.command >> 16) & 0xff; return cpufreq_register_driver(&speedstep_driver); } /** * speedstep_exit - unregisters SpeedStep support * * Unregisters SpeedStep support. */ static void __exit speedstep_exit(void) { cpufreq_unregister_driver(&speedstep_driver); } module_param(smi_port, int, 0444); module_param(smi_cmd, int, 0444); module_param(smi_sig, uint, 0444); MODULE_PARM_DESC(smi_port, "Override the BIOS-given IST port with this value " "-- Intel's default setting is 0xb2"); MODULE_PARM_DESC(smi_cmd, "Override the BIOS-given IST command with this value " "-- Intel's default setting is 0x82"); MODULE_PARM_DESC(smi_sig, "Set to 1 to fake the IST signature when using the " "SMI interface."); MODULE_AUTHOR("Hiroshi Miura"); MODULE_DESCRIPTION("Speedstep driver for IST applet SMI interface."); MODULE_LICENSE("GPL"); module_init(speedstep_init); module_exit(speedstep_exit);
gpl-2.0
ParanoidAndroid/android_kernel_grouper
drivers/misc/mpu3050/compass/ak8975.c
778
6984
/* $License: Copyright (C) 2010 InvenSense Corporation, All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. $ */ /** * @defgroup COMPASSDL (Motion Library - Accelerometer Driver Layer) * @brief Provides the interface to setup and handle an accelerometers * connected to the secondary I2C interface of the gyroscope. * * @{ * @file AK8975.c * @brief Magnetometer setup and handling methods for AKM 8975 compass. */ /* ------------------ */ /* - Include Files. - */ /* ------------------ */ #include <string.h> #ifdef __KERNEL__ #include <linux/module.h> #endif #include "mpu.h" #include "mlsl.h" #include "mlos.h" #include <log.h> #undef MPL_LOG_TAG #define MPL_LOG_TAG "MPL-compass" #define AK8975_REG_ST1 (0x02) #define AK8975_REG_HXL (0x03) #define AK8975_REG_ST2 (0x09) #define AK8975_REG_CNTL (0x0A) #define AK8975_CNTL_MODE_POWER_DOWN (0x00) #define AK8975_CNTL_MODE_SINGLE_MEASUREMENT (0x01) int ak8975_suspend(void *mlsl_handle, struct ext_slave_descr *slave, struct ext_slave_platform_data *pdata) { int result = ML_SUCCESS; result = MLSLSerialWriteSingle(mlsl_handle, pdata->address, AK8975_REG_CNTL, AK8975_CNTL_MODE_POWER_DOWN); MLOSSleep(1); /* wait at least 100us */ ERROR_CHECK(result); return result; } int ak8975_resume(void *mlsl_handle, struct ext_slave_descr *slave, struct ext_slave_platform_data *pdata) { int result = ML_SUCCESS; result = MLSLSerialWriteSingle(mlsl_handle, pdata->address, AK8975_REG_CNTL, AK8975_CNTL_MODE_SINGLE_MEASUREMENT); ERROR_CHECK(result); return result; } int ak8975_read(void *mlsl_handle, struct ext_slave_descr *slave, struct ext_slave_platform_data *pdata, unsigned char *data) { unsigned char regs[8]; unsigned char *stat = &regs[0]; unsigned char *stat2 = &regs[7]; int result = ML_SUCCESS; int status = ML_SUCCESS; result = MLSLSerialRead(mlsl_handle, pdata->address, AK8975_REG_ST1, 8, regs); ERROR_CHECK(result); /* * ST : data ready - * Measurement has been completed and data is ready to be read. */ if (*stat & 0x01) { memcpy(data, &regs[1], 6); status = ML_SUCCESS; } /* * ST2 : data error - * occurs when data read is started outside of a readable period; * data read would not be correct. * Valid in continuous measurement mode only. * In single measurement mode this error should not occour but we * stil account for it and return an error, since the data would be * corrupted. * DERR bit is self-clearing when ST2 register is read. */ if (*stat2 & 0x04) status = ML_ERROR_COMPASS_DATA_ERROR; /* * ST2 : overflow - * the sum of the absolute values of all axis |X|+|Y|+|Z| < 2400uT. * This is likely to happen in presence of an external magnetic * disturbance; it indicates, the sensor data is incorrect and should * be ignored. * An error is returned. * HOFL bit clears when a new measurement starts. */ if (*stat2 & 0x08) status = ML_ERROR_COMPASS_DATA_OVERFLOW; /* * ST : overrun - * the previous sample was not fetched and lost. * Valid in continuous measurement mode only. * In single measurement mode this error should not occour and we * don't consider this condition an error. * DOR bit is self-clearing when ST2 or any meas. data register is * read. */ if (*stat & 0x02) { /* status = ML_ERROR_COMPASS_DATA_UNDERFLOW; */ status = ML_SUCCESS; } /* * trigger next measurement if: * - stat is non zero; * - if stat is zero and stat2 is non zero. * Won't trigger if data is not ready and there was no error. */ if (*stat != 0x00 || *stat2 != 0x00) { result = MLSLSerialWriteSingle(mlsl_handle, pdata->address, AK8975_REG_CNTL, AK8975_CNTL_MODE_SINGLE_MEASUREMENT); ERROR_CHECK(result); } return status; } static int ak8975_config(void *mlsl_handle, struct ext_slave_descr *slave, struct ext_slave_platform_data *pdata, struct ext_slave_config *data) { int result; if (!data->data) return ML_ERROR_INVALID_PARAMETER; switch (data->key) { case MPU_SLAVE_WRITE_REGISTERS: result = MLSLSerialWrite(mlsl_handle, pdata->address, data->len, (unsigned char *)data->data); ERROR_CHECK(result); break; case MPU_SLAVE_CONFIG_ODR_SUSPEND: case MPU_SLAVE_CONFIG_ODR_RESUME: case MPU_SLAVE_CONFIG_FSR_SUSPEND: case MPU_SLAVE_CONFIG_FSR_RESUME: case MPU_SLAVE_CONFIG_MOT_THS: case MPU_SLAVE_CONFIG_NMOT_THS: case MPU_SLAVE_CONFIG_MOT_DUR: case MPU_SLAVE_CONFIG_NMOT_DUR: case MPU_SLAVE_CONFIG_IRQ_SUSPEND: case MPU_SLAVE_CONFIG_IRQ_RESUME: default: return ML_ERROR_FEATURE_NOT_IMPLEMENTED; }; return ML_SUCCESS; } static int ak8975_get_config(void *mlsl_handle, struct ext_slave_descr *slave, struct ext_slave_platform_data *pdata, struct ext_slave_config *data) { int result; if (!data->data) return ML_ERROR_INVALID_PARAMETER; switch (data->key) { case MPU_SLAVE_READ_REGISTERS: { unsigned char *serial_data = (unsigned char *)data->data; result = MLSLSerialRead(mlsl_handle, pdata->address, serial_data[0], data->len - 1, &serial_data[1]); ERROR_CHECK(result); break; } case MPU_SLAVE_CONFIG_ODR_SUSPEND: case MPU_SLAVE_CONFIG_ODR_RESUME: case MPU_SLAVE_CONFIG_FSR_SUSPEND: case MPU_SLAVE_CONFIG_FSR_RESUME: case MPU_SLAVE_CONFIG_MOT_THS: case MPU_SLAVE_CONFIG_NMOT_THS: case MPU_SLAVE_CONFIG_MOT_DUR: case MPU_SLAVE_CONFIG_NMOT_DUR: case MPU_SLAVE_CONFIG_IRQ_SUSPEND: case MPU_SLAVE_CONFIG_IRQ_RESUME: default: return ML_ERROR_FEATURE_NOT_IMPLEMENTED; }; return ML_SUCCESS; } struct ext_slave_descr ak8975_descr = { /*.init = */ NULL, /*.exit = */ NULL, /*.suspend = */ ak8975_suspend, /*.resume = */ ak8975_resume, /*.read = */ ak8975_read, /*.config = */ ak8975_config, /*.get_config = */ ak8975_get_config, /*.name = */ "ak8975", /*.type = */ EXT_SLAVE_TYPE_COMPASS, /*.id = */ COMPASS_ID_AKM, /*.reg = */ 0x01, /*.len = */ 9, /*.endian = */ EXT_SLAVE_LITTLE_ENDIAN, /*.range = */ {9830, 4000} }; struct ext_slave_descr *ak8975_get_slave_descr(void) { return &ak8975_descr; } EXPORT_SYMBOL(ak8975_get_slave_descr); /** * @} */
gpl-2.0
zeroblade1984/MotoG2k15
arch/arm/mach-exynos/setup-usb-phy.c
2058
5211
/* * Copyright (C) 2011 Samsung Electronics Co.Ltd * Author: Joonyoung Shim <jy0922.shim@samsung.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/io.h> #include <linux/platform_device.h> #include <mach/regs-pmu.h> #include <mach/regs-usb-phy.h> #include <plat/cpu.h> #include <plat/usb-phy.h> static atomic_t host_usage; static int exynos4_usb_host_phy_is_on(void) { return (readl(EXYNOS4_PHYPWR) & PHY1_STD_ANALOG_POWERDOWN) ? 0 : 1; } static void exynos4210_usb_phy_clkset(struct platform_device *pdev) { struct clk *xusbxti_clk; u32 phyclk; xusbxti_clk = clk_get(&pdev->dev, "xusbxti"); if (xusbxti_clk && !IS_ERR(xusbxti_clk)) { if (soc_is_exynos4210()) { /* set clock frequency for PLL */ phyclk = readl(EXYNOS4_PHYCLK) & ~EXYNOS4210_CLKSEL_MASK; switch (clk_get_rate(xusbxti_clk)) { case 12 * MHZ: phyclk |= EXYNOS4210_CLKSEL_12M; break; case 48 * MHZ: phyclk |= EXYNOS4210_CLKSEL_48M; break; default: case 24 * MHZ: phyclk |= EXYNOS4210_CLKSEL_24M; break; } writel(phyclk, EXYNOS4_PHYCLK); } else if (soc_is_exynos4212() || soc_is_exynos4412()) { /* set clock frequency for PLL */ phyclk = readl(EXYNOS4_PHYCLK) & ~EXYNOS4X12_CLKSEL_MASK; switch (clk_get_rate(xusbxti_clk)) { case 9600 * KHZ: phyclk |= EXYNOS4X12_CLKSEL_9600K; break; case 10 * MHZ: phyclk |= EXYNOS4X12_CLKSEL_10M; break; case 12 * MHZ: phyclk |= EXYNOS4X12_CLKSEL_12M; break; case 19200 * KHZ: phyclk |= EXYNOS4X12_CLKSEL_19200K; break; case 20 * MHZ: phyclk |= EXYNOS4X12_CLKSEL_20M; break; default: case 24 * MHZ: /* default reference clock */ phyclk |= EXYNOS4X12_CLKSEL_24M; break; } writel(phyclk, EXYNOS4_PHYCLK); } clk_put(xusbxti_clk); } } static int exynos4210_usb_phy0_init(struct platform_device *pdev) { u32 rstcon; writel(readl(S5P_USBDEVICE_PHY_CONTROL) | S5P_USBDEVICE_PHY_ENABLE, S5P_USBDEVICE_PHY_CONTROL); exynos4210_usb_phy_clkset(pdev); /* set to normal PHY0 */ writel((readl(EXYNOS4_PHYPWR) & ~PHY0_NORMAL_MASK), EXYNOS4_PHYPWR); /* reset PHY0 and Link */ rstcon = readl(EXYNOS4_RSTCON) | PHY0_SWRST_MASK; writel(rstcon, EXYNOS4_RSTCON); udelay(10); rstcon &= ~PHY0_SWRST_MASK; writel(rstcon, EXYNOS4_RSTCON); return 0; } static int exynos4210_usb_phy0_exit(struct platform_device *pdev) { writel((readl(EXYNOS4_PHYPWR) | PHY0_ANALOG_POWERDOWN | PHY0_OTG_DISABLE), EXYNOS4_PHYPWR); writel(readl(S5P_USBDEVICE_PHY_CONTROL) & ~S5P_USBDEVICE_PHY_ENABLE, S5P_USBDEVICE_PHY_CONTROL); return 0; } static int exynos4210_usb_phy1_init(struct platform_device *pdev) { struct clk *otg_clk; u32 rstcon; int err; atomic_inc(&host_usage); otg_clk = clk_get(&pdev->dev, "otg"); if (IS_ERR(otg_clk)) { dev_err(&pdev->dev, "Failed to get otg clock\n"); return PTR_ERR(otg_clk); } err = clk_enable(otg_clk); if (err) { clk_put(otg_clk); return err; } if (exynos4_usb_host_phy_is_on()) return 0; writel(readl(S5P_USBHOST_PHY_CONTROL) | S5P_USBHOST_PHY_ENABLE, S5P_USBHOST_PHY_CONTROL); exynos4210_usb_phy_clkset(pdev); /* floating prevention logic: disable */ writel((readl(EXYNOS4_PHY1CON) | FPENABLEN), EXYNOS4_PHY1CON); /* set to normal HSIC 0 and 1 of PHY1 */ writel((readl(EXYNOS4_PHYPWR) & ~PHY1_HSIC_NORMAL_MASK), EXYNOS4_PHYPWR); /* set to normal standard USB of PHY1 */ writel((readl(EXYNOS4_PHYPWR) & ~PHY1_STD_NORMAL_MASK), EXYNOS4_PHYPWR); /* reset all ports of both PHY and Link */ rstcon = readl(EXYNOS4_RSTCON) | HOST_LINK_PORT_SWRST_MASK | PHY1_SWRST_MASK; writel(rstcon, EXYNOS4_RSTCON); udelay(10); rstcon &= ~(HOST_LINK_PORT_SWRST_MASK | PHY1_SWRST_MASK); writel(rstcon, EXYNOS4_RSTCON); udelay(80); clk_disable(otg_clk); clk_put(otg_clk); return 0; } static int exynos4210_usb_phy1_exit(struct platform_device *pdev) { struct clk *otg_clk; int err; if (atomic_dec_return(&host_usage) > 0) return 0; otg_clk = clk_get(&pdev->dev, "otg"); if (IS_ERR(otg_clk)) { dev_err(&pdev->dev, "Failed to get otg clock\n"); return PTR_ERR(otg_clk); } err = clk_enable(otg_clk); if (err) { clk_put(otg_clk); return err; } writel((readl(EXYNOS4_PHYPWR) | PHY1_STD_ANALOG_POWERDOWN), EXYNOS4_PHYPWR); writel(readl(S5P_USBHOST_PHY_CONTROL) & ~S5P_USBHOST_PHY_ENABLE, S5P_USBHOST_PHY_CONTROL); clk_disable(otg_clk); clk_put(otg_clk); return 0; } int s5p_usb_phy_init(struct platform_device *pdev, int type) { if (type == USB_PHY_TYPE_DEVICE) return exynos4210_usb_phy0_init(pdev); else if (type == USB_PHY_TYPE_HOST) return exynos4210_usb_phy1_init(pdev); return -EINVAL; } int s5p_usb_phy_exit(struct platform_device *pdev, int type) { if (type == USB_PHY_TYPE_DEVICE) return exynos4210_usb_phy0_exit(pdev); else if (type == USB_PHY_TYPE_HOST) return exynos4210_usb_phy1_exit(pdev); return -EINVAL; }
gpl-2.0
Think-Silicon/linux-thinksilicon
drivers/isdn/hisax/diva.c
2314
34411
/* $Id: diva.c,v 1.33.2.6 2004/02/11 13:21:33 keil Exp $ * * low level stuff for Eicon.Diehl Diva Family ISDN cards * * Author Karsten Keil * Copyright by Karsten Keil <keil@isdn4linux.de> * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * * For changes and modifications please read * Documentation/isdn/HiSax.cert * * Thanks to Eicon Technology for documents and information * */ #include <linux/init.h> #include "hisax.h" #include "isac.h" #include "hscx.h" #include "ipac.h" #include "ipacx.h" #include "isdnl1.h" #include <linux/pci.h> #include <linux/isapnp.h> static const char *Diva_revision = "$Revision: 1.33.2.6 $"; #define byteout(addr, val) outb(val, addr) #define bytein(addr) inb(addr) #define DIVA_HSCX_DATA 0 #define DIVA_HSCX_ADR 4 #define DIVA_ISA_ISAC_DATA 2 #define DIVA_ISA_ISAC_ADR 6 #define DIVA_ISA_CTRL 7 #define DIVA_IPAC_ADR 0 #define DIVA_IPAC_DATA 1 #define DIVA_PCI_ISAC_DATA 8 #define DIVA_PCI_ISAC_ADR 0xc #define DIVA_PCI_CTRL 0x10 /* SUB Types */ #define DIVA_ISA 1 #define DIVA_PCI 2 #define DIVA_IPAC_ISA 3 #define DIVA_IPAC_PCI 4 #define DIVA_IPACX_PCI 5 /* CTRL (Read) */ #define DIVA_IRQ_STAT 0x01 #define DIVA_EEPROM_SDA 0x02 /* CTRL (Write) */ #define DIVA_IRQ_REQ 0x01 #define DIVA_RESET 0x08 #define DIVA_EEPROM_CLK 0x40 #define DIVA_PCI_LED_A 0x10 #define DIVA_PCI_LED_B 0x20 #define DIVA_ISA_LED_A 0x20 #define DIVA_ISA_LED_B 0x40 #define DIVA_IRQ_CLR 0x80 /* Siemens PITA */ #define PITA_MISC_REG 0x1c #ifdef __BIG_ENDIAN #define PITA_PARA_SOFTRESET 0x00000001 #define PITA_SER_SOFTRESET 0x00000002 #define PITA_PARA_MPX_MODE 0x00000004 #define PITA_INT0_ENABLE 0x00000200 #else #define PITA_PARA_SOFTRESET 0x01000000 #define PITA_SER_SOFTRESET 0x02000000 #define PITA_PARA_MPX_MODE 0x04000000 #define PITA_INT0_ENABLE 0x00020000 #endif #define PITA_INT0_STATUS 0x02 static inline u_char readreg(unsigned int ale, unsigned int adr, u_char off) { register u_char ret; byteout(ale, off); ret = bytein(adr); return (ret); } static inline void readfifo(unsigned int ale, unsigned int adr, u_char off, u_char *data, int size) { byteout(ale, off); insb(adr, data, size); } static inline void writereg(unsigned int ale, unsigned int adr, u_char off, u_char data) { byteout(ale, off); byteout(adr, data); } static inline void writefifo(unsigned int ale, unsigned int adr, u_char off, u_char *data, int size) { byteout(ale, off); outsb(adr, data, size); } static inline u_char memreadreg(unsigned long adr, u_char off) { return (*((unsigned char *) (((unsigned int *)adr) + off))); } static inline void memwritereg(unsigned long adr, u_char off, u_char data) { register u_char *p; p = (unsigned char *)(((unsigned int *)adr) + off); *p = data; } /* Interface functions */ static u_char ReadISAC(struct IsdnCardState *cs, u_char offset) { return (readreg(cs->hw.diva.isac_adr, cs->hw.diva.isac, offset)); } static void WriteISAC(struct IsdnCardState *cs, u_char offset, u_char value) { writereg(cs->hw.diva.isac_adr, cs->hw.diva.isac, offset, value); } static void ReadISACfifo(struct IsdnCardState *cs, u_char *data, int size) { readfifo(cs->hw.diva.isac_adr, cs->hw.diva.isac, 0, data, size); } static void WriteISACfifo(struct IsdnCardState *cs, u_char *data, int size) { writefifo(cs->hw.diva.isac_adr, cs->hw.diva.isac, 0, data, size); } static u_char ReadISAC_IPAC(struct IsdnCardState *cs, u_char offset) { return (readreg(cs->hw.diva.isac_adr, cs->hw.diva.isac, offset + 0x80)); } static void WriteISAC_IPAC(struct IsdnCardState *cs, u_char offset, u_char value) { writereg(cs->hw.diva.isac_adr, cs->hw.diva.isac, offset | 0x80, value); } static void ReadISACfifo_IPAC(struct IsdnCardState *cs, u_char *data, int size) { readfifo(cs->hw.diva.isac_adr, cs->hw.diva.isac, 0x80, data, size); } static void WriteISACfifo_IPAC(struct IsdnCardState *cs, u_char *data, int size) { writefifo(cs->hw.diva.isac_adr, cs->hw.diva.isac, 0x80, data, size); } static u_char ReadHSCX(struct IsdnCardState *cs, int hscx, u_char offset) { return (readreg(cs->hw.diva.hscx_adr, cs->hw.diva.hscx, offset + (hscx ? 0x40 : 0))); } static void WriteHSCX(struct IsdnCardState *cs, int hscx, u_char offset, u_char value) { writereg(cs->hw.diva.hscx_adr, cs->hw.diva.hscx, offset + (hscx ? 0x40 : 0), value); } static u_char MemReadISAC_IPAC(struct IsdnCardState *cs, u_char offset) { return (memreadreg(cs->hw.diva.cfg_reg, offset + 0x80)); } static void MemWriteISAC_IPAC(struct IsdnCardState *cs, u_char offset, u_char value) { memwritereg(cs->hw.diva.cfg_reg, offset | 0x80, value); } static void MemReadISACfifo_IPAC(struct IsdnCardState *cs, u_char *data, int size) { while (size--) *data++ = memreadreg(cs->hw.diva.cfg_reg, 0x80); } static void MemWriteISACfifo_IPAC(struct IsdnCardState *cs, u_char *data, int size) { while (size--) memwritereg(cs->hw.diva.cfg_reg, 0x80, *data++); } static u_char MemReadHSCX(struct IsdnCardState *cs, int hscx, u_char offset) { return (memreadreg(cs->hw.diva.cfg_reg, offset + (hscx ? 0x40 : 0))); } static void MemWriteHSCX(struct IsdnCardState *cs, int hscx, u_char offset, u_char value) { memwritereg(cs->hw.diva.cfg_reg, offset + (hscx ? 0x40 : 0), value); } /* IO-Functions for IPACX type cards */ static u_char MemReadISAC_IPACX(struct IsdnCardState *cs, u_char offset) { return (memreadreg(cs->hw.diva.cfg_reg, offset)); } static void MemWriteISAC_IPACX(struct IsdnCardState *cs, u_char offset, u_char value) { memwritereg(cs->hw.diva.cfg_reg, offset, value); } static void MemReadISACfifo_IPACX(struct IsdnCardState *cs, u_char *data, int size) { while (size--) *data++ = memreadreg(cs->hw.diva.cfg_reg, 0); } static void MemWriteISACfifo_IPACX(struct IsdnCardState *cs, u_char *data, int size) { while (size--) memwritereg(cs->hw.diva.cfg_reg, 0, *data++); } static u_char MemReadHSCX_IPACX(struct IsdnCardState *cs, int hscx, u_char offset) { return (memreadreg(cs->hw.diva.cfg_reg, offset + (hscx ? IPACX_OFF_B2 : IPACX_OFF_B1))); } static void MemWriteHSCX_IPACX(struct IsdnCardState *cs, int hscx, u_char offset, u_char value) { memwritereg(cs->hw.diva.cfg_reg, offset + (hscx ? IPACX_OFF_B2 : IPACX_OFF_B1), value); } /* * fast interrupt HSCX stuff goes here */ #define READHSCX(cs, nr, reg) readreg(cs->hw.diva.hscx_adr, \ cs->hw.diva.hscx, reg + (nr ? 0x40 : 0)) #define WRITEHSCX(cs, nr, reg, data) writereg(cs->hw.diva.hscx_adr, \ cs->hw.diva.hscx, reg + (nr ? 0x40 : 0), data) #define READHSCXFIFO(cs, nr, ptr, cnt) readfifo(cs->hw.diva.hscx_adr, \ cs->hw.diva.hscx, (nr ? 0x40 : 0), ptr, cnt) #define WRITEHSCXFIFO(cs, nr, ptr, cnt) writefifo(cs->hw.diva.hscx_adr, \ cs->hw.diva.hscx, (nr ? 0x40 : 0), ptr, cnt) #include "hscx_irq.c" static irqreturn_t diva_interrupt(int intno, void *dev_id) { struct IsdnCardState *cs = dev_id; u_char val, sval; u_long flags; int cnt = 5; spin_lock_irqsave(&cs->lock, flags); while (((sval = bytein(cs->hw.diva.ctrl)) & DIVA_IRQ_REQ) && cnt) { val = readreg(cs->hw.diva.hscx_adr, cs->hw.diva.hscx, HSCX_ISTA + 0x40); if (val) hscx_int_main(cs, val); val = readreg(cs->hw.diva.isac_adr, cs->hw.diva.isac, ISAC_ISTA); if (val) isac_interrupt(cs, val); cnt--; } if (!cnt) printk(KERN_WARNING "Diva: IRQ LOOP\n"); writereg(cs->hw.diva.hscx_adr, cs->hw.diva.hscx, HSCX_MASK, 0xFF); writereg(cs->hw.diva.hscx_adr, cs->hw.diva.hscx, HSCX_MASK + 0x40, 0xFF); writereg(cs->hw.diva.isac_adr, cs->hw.diva.isac, ISAC_MASK, 0xFF); writereg(cs->hw.diva.isac_adr, cs->hw.diva.isac, ISAC_MASK, 0x0); writereg(cs->hw.diva.hscx_adr, cs->hw.diva.hscx, HSCX_MASK, 0x0); writereg(cs->hw.diva.hscx_adr, cs->hw.diva.hscx, HSCX_MASK + 0x40, 0x0); spin_unlock_irqrestore(&cs->lock, flags); return IRQ_HANDLED; } static irqreturn_t diva_irq_ipac_isa(int intno, void *dev_id) { struct IsdnCardState *cs = dev_id; u_char ista, val; u_long flags; int icnt = 5; spin_lock_irqsave(&cs->lock, flags); ista = readreg(cs->hw.diva.isac_adr, cs->hw.diva.isac, IPAC_ISTA); Start_IPACISA: if (cs->debug & L1_DEB_IPAC) debugl1(cs, "IPAC ISTA %02X", ista); if (ista & 0x0f) { val = readreg(cs->hw.diva.isac_adr, cs->hw.diva.isac, HSCX_ISTA + 0x40); if (ista & 0x01) val |= 0x01; if (ista & 0x04) val |= 0x02; if (ista & 0x08) val |= 0x04; if (val) hscx_int_main(cs, val); } if (ista & 0x20) { val = 0xfe & readreg(cs->hw.diva.isac_adr, cs->hw.diva.isac, ISAC_ISTA + 0x80); if (val) { isac_interrupt(cs, val); } } if (ista & 0x10) { val = 0x01; isac_interrupt(cs, val); } ista = readreg(cs->hw.diva.isac_adr, cs->hw.diva.isac, IPAC_ISTA); if ((ista & 0x3f) && icnt) { icnt--; goto Start_IPACISA; } if (!icnt) printk(KERN_WARNING "DIVA IPAC IRQ LOOP\n"); writereg(cs->hw.diva.isac_adr, cs->hw.diva.isac, IPAC_MASK, 0xFF); writereg(cs->hw.diva.isac_adr, cs->hw.diva.isac, IPAC_MASK, 0xC0); spin_unlock_irqrestore(&cs->lock, flags); return IRQ_HANDLED; } static inline void MemwaitforCEC(struct IsdnCardState *cs, int hscx) { int to = 50; while ((MemReadHSCX(cs, hscx, HSCX_STAR) & 0x04) && to) { udelay(1); to--; } if (!to) printk(KERN_WARNING "HiSax: waitforCEC timeout\n"); } static inline void MemwaitforXFW(struct IsdnCardState *cs, int hscx) { int to = 50; while (((MemReadHSCX(cs, hscx, HSCX_STAR) & 0x44) != 0x40) && to) { udelay(1); to--; } if (!to) printk(KERN_WARNING "HiSax: waitforXFW timeout\n"); } static inline void MemWriteHSCXCMDR(struct IsdnCardState *cs, int hscx, u_char data) { MemwaitforCEC(cs, hscx); MemWriteHSCX(cs, hscx, HSCX_CMDR, data); } static void Memhscx_empty_fifo(struct BCState *bcs, int count) { u_char *ptr; struct IsdnCardState *cs = bcs->cs; int cnt; if ((cs->debug & L1_DEB_HSCX) && !(cs->debug & L1_DEB_HSCX_FIFO)) debugl1(cs, "hscx_empty_fifo"); if (bcs->hw.hscx.rcvidx + count > HSCX_BUFMAX) { if (cs->debug & L1_DEB_WARN) debugl1(cs, "hscx_empty_fifo: incoming packet too large"); MemWriteHSCXCMDR(cs, bcs->hw.hscx.hscx, 0x80); bcs->hw.hscx.rcvidx = 0; return; } ptr = bcs->hw.hscx.rcvbuf + bcs->hw.hscx.rcvidx; cnt = count; while (cnt--) *ptr++ = memreadreg(cs->hw.diva.cfg_reg, bcs->hw.hscx.hscx ? 0x40 : 0); MemWriteHSCXCMDR(cs, bcs->hw.hscx.hscx, 0x80); ptr = bcs->hw.hscx.rcvbuf + bcs->hw.hscx.rcvidx; bcs->hw.hscx.rcvidx += count; if (cs->debug & L1_DEB_HSCX_FIFO) { char *t = bcs->blog; t += sprintf(t, "hscx_empty_fifo %c cnt %d", bcs->hw.hscx.hscx ? 'B' : 'A', count); QuickHex(t, ptr, count); debugl1(cs, bcs->blog); } } static void Memhscx_fill_fifo(struct BCState *bcs) { struct IsdnCardState *cs = bcs->cs; int more, count, cnt; int fifo_size = test_bit(HW_IPAC, &cs->HW_Flags) ? 64 : 32; u_char *ptr, *p; if ((cs->debug & L1_DEB_HSCX) && !(cs->debug & L1_DEB_HSCX_FIFO)) debugl1(cs, "hscx_fill_fifo"); if (!bcs->tx_skb) return; if (bcs->tx_skb->len <= 0) return; more = (bcs->mode == L1_MODE_TRANS) ? 1 : 0; if (bcs->tx_skb->len > fifo_size) { more = !0; count = fifo_size; } else count = bcs->tx_skb->len; cnt = count; MemwaitforXFW(cs, bcs->hw.hscx.hscx); p = ptr = bcs->tx_skb->data; skb_pull(bcs->tx_skb, count); bcs->tx_cnt -= count; bcs->hw.hscx.count += count; while (cnt--) memwritereg(cs->hw.diva.cfg_reg, bcs->hw.hscx.hscx ? 0x40 : 0, *p++); MemWriteHSCXCMDR(cs, bcs->hw.hscx.hscx, more ? 0x8 : 0xa); if (cs->debug & L1_DEB_HSCX_FIFO) { char *t = bcs->blog; t += sprintf(t, "hscx_fill_fifo %c cnt %d", bcs->hw.hscx.hscx ? 'B' : 'A', count); QuickHex(t, ptr, count); debugl1(cs, bcs->blog); } } static void Memhscx_interrupt(struct IsdnCardState *cs, u_char val, u_char hscx) { u_char r; struct BCState *bcs = cs->bcs + hscx; struct sk_buff *skb; int fifo_size = test_bit(HW_IPAC, &cs->HW_Flags) ? 64 : 32; int count; if (!test_bit(BC_FLG_INIT, &bcs->Flag)) return; if (val & 0x80) { /* RME */ r = MemReadHSCX(cs, hscx, HSCX_RSTA); if ((r & 0xf0) != 0xa0) { if (!(r & 0x80)) if (cs->debug & L1_DEB_WARN) debugl1(cs, "HSCX invalid frame"); if ((r & 0x40) && bcs->mode) if (cs->debug & L1_DEB_WARN) debugl1(cs, "HSCX RDO mode=%d", bcs->mode); if (!(r & 0x20)) if (cs->debug & L1_DEB_WARN) debugl1(cs, "HSCX CRC error"); MemWriteHSCXCMDR(cs, hscx, 0x80); } else { count = MemReadHSCX(cs, hscx, HSCX_RBCL) & ( test_bit(HW_IPAC, &cs->HW_Flags) ? 0x3f : 0x1f); if (count == 0) count = fifo_size; Memhscx_empty_fifo(bcs, count); if ((count = bcs->hw.hscx.rcvidx - 1) > 0) { if (cs->debug & L1_DEB_HSCX_FIFO) debugl1(cs, "HX Frame %d", count); if (!(skb = dev_alloc_skb(count))) printk(KERN_WARNING "HSCX: receive out of memory\n"); else { memcpy(skb_put(skb, count), bcs->hw.hscx.rcvbuf, count); skb_queue_tail(&bcs->rqueue, skb); } } } bcs->hw.hscx.rcvidx = 0; schedule_event(bcs, B_RCVBUFREADY); } if (val & 0x40) { /* RPF */ Memhscx_empty_fifo(bcs, fifo_size); if (bcs->mode == L1_MODE_TRANS) { /* receive audio data */ if (!(skb = dev_alloc_skb(fifo_size))) printk(KERN_WARNING "HiSax: receive out of memory\n"); else { memcpy(skb_put(skb, fifo_size), bcs->hw.hscx.rcvbuf, fifo_size); skb_queue_tail(&bcs->rqueue, skb); } bcs->hw.hscx.rcvidx = 0; schedule_event(bcs, B_RCVBUFREADY); } } if (val & 0x10) { /* XPR */ if (bcs->tx_skb) { if (bcs->tx_skb->len) { Memhscx_fill_fifo(bcs); return; } else { if (test_bit(FLG_LLI_L1WAKEUP, &bcs->st->lli.flag) && (PACKET_NOACK != bcs->tx_skb->pkt_type)) { u_long flags; spin_lock_irqsave(&bcs->aclock, flags); bcs->ackcnt += bcs->hw.hscx.count; spin_unlock_irqrestore(&bcs->aclock, flags); schedule_event(bcs, B_ACKPENDING); } dev_kfree_skb_irq(bcs->tx_skb); bcs->hw.hscx.count = 0; bcs->tx_skb = NULL; } } if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) { bcs->hw.hscx.count = 0; test_and_set_bit(BC_FLG_BUSY, &bcs->Flag); Memhscx_fill_fifo(bcs); } else { test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag); schedule_event(bcs, B_XMTBUFREADY); } } } static inline void Memhscx_int_main(struct IsdnCardState *cs, u_char val) { u_char exval; struct BCState *bcs; if (val & 0x01) { // EXB bcs = cs->bcs + 1; exval = MemReadHSCX(cs, 1, HSCX_EXIR); if (exval & 0x40) { if (bcs->mode == 1) Memhscx_fill_fifo(bcs); else { /* Here we lost an TX interrupt, so * restart transmitting the whole frame. */ if (bcs->tx_skb) { skb_push(bcs->tx_skb, bcs->hw.hscx.count); bcs->tx_cnt += bcs->hw.hscx.count; bcs->hw.hscx.count = 0; } MemWriteHSCXCMDR(cs, bcs->hw.hscx.hscx, 0x01); if (cs->debug & L1_DEB_WARN) debugl1(cs, "HSCX B EXIR %x Lost TX", exval); } } else if (cs->debug & L1_DEB_HSCX) debugl1(cs, "HSCX B EXIR %x", exval); } if (val & 0xf8) { if (cs->debug & L1_DEB_HSCX) debugl1(cs, "HSCX B interrupt %x", val); Memhscx_interrupt(cs, val, 1); } if (val & 0x02) { // EXA bcs = cs->bcs; exval = MemReadHSCX(cs, 0, HSCX_EXIR); if (exval & 0x40) { if (bcs->mode == L1_MODE_TRANS) Memhscx_fill_fifo(bcs); else { /* Here we lost an TX interrupt, so * restart transmitting the whole frame. */ if (bcs->tx_skb) { skb_push(bcs->tx_skb, bcs->hw.hscx.count); bcs->tx_cnt += bcs->hw.hscx.count; bcs->hw.hscx.count = 0; } MemWriteHSCXCMDR(cs, bcs->hw.hscx.hscx, 0x01); if (cs->debug & L1_DEB_WARN) debugl1(cs, "HSCX A EXIR %x Lost TX", exval); } } else if (cs->debug & L1_DEB_HSCX) debugl1(cs, "HSCX A EXIR %x", exval); } if (val & 0x04) { // ICA exval = MemReadHSCX(cs, 0, HSCX_ISTA); if (cs->debug & L1_DEB_HSCX) debugl1(cs, "HSCX A interrupt %x", exval); Memhscx_interrupt(cs, exval, 0); } } static irqreturn_t diva_irq_ipac_pci(int intno, void *dev_id) { struct IsdnCardState *cs = dev_id; u_char ista, val; int icnt = 5; u_char *cfg; u_long flags; spin_lock_irqsave(&cs->lock, flags); cfg = (u_char *) cs->hw.diva.pci_cfg; val = *cfg; if (!(val & PITA_INT0_STATUS)) { spin_unlock_irqrestore(&cs->lock, flags); return IRQ_NONE; /* other shared IRQ */ } *cfg = PITA_INT0_STATUS; /* Reset pending INT0 */ ista = memreadreg(cs->hw.diva.cfg_reg, IPAC_ISTA); Start_IPACPCI: if (cs->debug & L1_DEB_IPAC) debugl1(cs, "IPAC ISTA %02X", ista); if (ista & 0x0f) { val = memreadreg(cs->hw.diva.cfg_reg, HSCX_ISTA + 0x40); if (ista & 0x01) val |= 0x01; if (ista & 0x04) val |= 0x02; if (ista & 0x08) val |= 0x04; if (val) Memhscx_int_main(cs, val); } if (ista & 0x20) { val = 0xfe & memreadreg(cs->hw.diva.cfg_reg, ISAC_ISTA + 0x80); if (val) { isac_interrupt(cs, val); } } if (ista & 0x10) { val = 0x01; isac_interrupt(cs, val); } ista = memreadreg(cs->hw.diva.cfg_reg, IPAC_ISTA); if ((ista & 0x3f) && icnt) { icnt--; goto Start_IPACPCI; } if (!icnt) printk(KERN_WARNING "DIVA IPAC PCI IRQ LOOP\n"); memwritereg(cs->hw.diva.cfg_reg, IPAC_MASK, 0xFF); memwritereg(cs->hw.diva.cfg_reg, IPAC_MASK, 0xC0); spin_unlock_irqrestore(&cs->lock, flags); return IRQ_HANDLED; } static irqreturn_t diva_irq_ipacx_pci(int intno, void *dev_id) { struct IsdnCardState *cs = dev_id; u_char val; u_char *cfg; u_long flags; spin_lock_irqsave(&cs->lock, flags); cfg = (u_char *) cs->hw.diva.pci_cfg; val = *cfg; if (!(val & PITA_INT0_STATUS)) { spin_unlock_irqrestore(&cs->lock, flags); return IRQ_NONE; // other shared IRQ } interrupt_ipacx(cs); // handler for chip *cfg = PITA_INT0_STATUS; // Reset PLX interrupt spin_unlock_irqrestore(&cs->lock, flags); return IRQ_HANDLED; } static void release_io_diva(struct IsdnCardState *cs) { int bytecnt; if ((cs->subtyp == DIVA_IPAC_PCI) || (cs->subtyp == DIVA_IPACX_PCI)) { u_int *cfg = (unsigned int *)cs->hw.diva.pci_cfg; *cfg = 0; /* disable INT0/1 */ *cfg = 2; /* reset pending INT0 */ if (cs->hw.diva.cfg_reg) iounmap((void *)cs->hw.diva.cfg_reg); if (cs->hw.diva.pci_cfg) iounmap((void *)cs->hw.diva.pci_cfg); return; } else if (cs->subtyp != DIVA_IPAC_ISA) { del_timer(&cs->hw.diva.tl); if (cs->hw.diva.cfg_reg) byteout(cs->hw.diva.ctrl, 0); /* LED off, Reset */ } if ((cs->subtyp == DIVA_ISA) || (cs->subtyp == DIVA_IPAC_ISA)) bytecnt = 8; else bytecnt = 32; if (cs->hw.diva.cfg_reg) { release_region(cs->hw.diva.cfg_reg, bytecnt); } } static void iounmap_diva(struct IsdnCardState *cs) { if ((cs->subtyp == DIVA_IPAC_PCI) || (cs->subtyp == DIVA_IPACX_PCI)) { if (cs->hw.diva.cfg_reg) { iounmap((void *)cs->hw.diva.cfg_reg); cs->hw.diva.cfg_reg = 0; } if (cs->hw.diva.pci_cfg) { iounmap((void *)cs->hw.diva.pci_cfg); cs->hw.diva.pci_cfg = 0; } } return; } static void reset_diva(struct IsdnCardState *cs) { if (cs->subtyp == DIVA_IPAC_ISA) { writereg(cs->hw.diva.isac_adr, cs->hw.diva.isac, IPAC_POTA2, 0x20); mdelay(10); writereg(cs->hw.diva.isac_adr, cs->hw.diva.isac, IPAC_POTA2, 0x00); mdelay(10); writereg(cs->hw.diva.isac_adr, cs->hw.diva.isac, IPAC_MASK, 0xc0); } else if (cs->subtyp == DIVA_IPAC_PCI) { unsigned int *ireg = (unsigned int *)(cs->hw.diva.pci_cfg + PITA_MISC_REG); *ireg = PITA_PARA_SOFTRESET | PITA_PARA_MPX_MODE; mdelay(10); *ireg = PITA_PARA_MPX_MODE; mdelay(10); memwritereg(cs->hw.diva.cfg_reg, IPAC_MASK, 0xc0); } else if (cs->subtyp == DIVA_IPACX_PCI) { unsigned int *ireg = (unsigned int *)(cs->hw.diva.pci_cfg + PITA_MISC_REG); *ireg = PITA_PARA_SOFTRESET | PITA_PARA_MPX_MODE; mdelay(10); *ireg = PITA_PARA_MPX_MODE | PITA_SER_SOFTRESET; mdelay(10); MemWriteISAC_IPACX(cs, IPACX_MASK, 0xff); // Interrupts off } else { /* DIVA 2.0 */ cs->hw.diva.ctrl_reg = 0; /* Reset On */ byteout(cs->hw.diva.ctrl, cs->hw.diva.ctrl_reg); mdelay(10); cs->hw.diva.ctrl_reg |= DIVA_RESET; /* Reset Off */ byteout(cs->hw.diva.ctrl, cs->hw.diva.ctrl_reg); mdelay(10); if (cs->subtyp == DIVA_ISA) cs->hw.diva.ctrl_reg |= DIVA_ISA_LED_A; else { /* Workaround PCI9060 */ byteout(cs->hw.diva.pci_cfg + 0x69, 9); cs->hw.diva.ctrl_reg |= DIVA_PCI_LED_A; } byteout(cs->hw.diva.ctrl, cs->hw.diva.ctrl_reg); } } #define DIVA_ASSIGN 1 static void diva_led_handler(struct IsdnCardState *cs) { int blink = 0; if ((cs->subtyp == DIVA_IPAC_ISA) || (cs->subtyp == DIVA_IPAC_PCI) || (cs->subtyp == DIVA_IPACX_PCI)) return; del_timer(&cs->hw.diva.tl); if (cs->hw.diva.status & DIVA_ASSIGN) cs->hw.diva.ctrl_reg |= (DIVA_ISA == cs->subtyp) ? DIVA_ISA_LED_A : DIVA_PCI_LED_A; else { cs->hw.diva.ctrl_reg ^= (DIVA_ISA == cs->subtyp) ? DIVA_ISA_LED_A : DIVA_PCI_LED_A; blink = 250; } if (cs->hw.diva.status & 0xf000) cs->hw.diva.ctrl_reg |= (DIVA_ISA == cs->subtyp) ? DIVA_ISA_LED_B : DIVA_PCI_LED_B; else if (cs->hw.diva.status & 0x0f00) { cs->hw.diva.ctrl_reg ^= (DIVA_ISA == cs->subtyp) ? DIVA_ISA_LED_B : DIVA_PCI_LED_B; blink = 500; } else cs->hw.diva.ctrl_reg &= ~((DIVA_ISA == cs->subtyp) ? DIVA_ISA_LED_B : DIVA_PCI_LED_B); byteout(cs->hw.diva.ctrl, cs->hw.diva.ctrl_reg); if (blink) { init_timer(&cs->hw.diva.tl); cs->hw.diva.tl.expires = jiffies + ((blink * HZ) / 1000); add_timer(&cs->hw.diva.tl); } } static int Diva_card_msg(struct IsdnCardState *cs, int mt, void *arg) { u_int *ireg; u_long flags; switch (mt) { case CARD_RESET: spin_lock_irqsave(&cs->lock, flags); reset_diva(cs); spin_unlock_irqrestore(&cs->lock, flags); return (0); case CARD_RELEASE: release_io_diva(cs); return (0); case CARD_INIT: spin_lock_irqsave(&cs->lock, flags); reset_diva(cs); if (cs->subtyp == DIVA_IPACX_PCI) { ireg = (unsigned int *)cs->hw.diva.pci_cfg; *ireg = PITA_INT0_ENABLE; init_ipacx(cs, 3); // init chip and enable interrupts spin_unlock_irqrestore(&cs->lock, flags); return (0); } if (cs->subtyp == DIVA_IPAC_PCI) { ireg = (unsigned int *)cs->hw.diva.pci_cfg; *ireg = PITA_INT0_ENABLE; } inithscxisac(cs, 3); spin_unlock_irqrestore(&cs->lock, flags); return (0); case CARD_TEST: return (0); case (MDL_REMOVE | REQUEST): cs->hw.diva.status = 0; break; case (MDL_ASSIGN | REQUEST): cs->hw.diva.status |= DIVA_ASSIGN; break; case MDL_INFO_SETUP: if ((long)arg) cs->hw.diva.status |= 0x0200; else cs->hw.diva.status |= 0x0100; break; case MDL_INFO_CONN: if ((long)arg) cs->hw.diva.status |= 0x2000; else cs->hw.diva.status |= 0x1000; break; case MDL_INFO_REL: if ((long)arg) { cs->hw.diva.status &= ~0x2000; cs->hw.diva.status &= ~0x0200; } else { cs->hw.diva.status &= ~0x1000; cs->hw.diva.status &= ~0x0100; } break; } if ((cs->subtyp != DIVA_IPAC_ISA) && (cs->subtyp != DIVA_IPAC_PCI) && (cs->subtyp != DIVA_IPACX_PCI)) { spin_lock_irqsave(&cs->lock, flags); diva_led_handler(cs); spin_unlock_irqrestore(&cs->lock, flags); } return (0); } static int setup_diva_common(struct IsdnCardState *cs) { int bytecnt; u_char val; if ((cs->subtyp == DIVA_ISA) || (cs->subtyp == DIVA_IPAC_ISA)) bytecnt = 8; else bytecnt = 32; printk(KERN_INFO "Diva: %s card configured at %#lx IRQ %d\n", (cs->subtyp == DIVA_PCI) ? "PCI" : (cs->subtyp == DIVA_ISA) ? "ISA" : (cs->subtyp == DIVA_IPAC_ISA) ? "IPAC ISA" : (cs->subtyp == DIVA_IPAC_PCI) ? "IPAC PCI" : "IPACX PCI", cs->hw.diva.cfg_reg, cs->irq); if ((cs->subtyp == DIVA_IPAC_PCI) || (cs->subtyp == DIVA_IPACX_PCI) || (cs->subtyp == DIVA_PCI)) printk(KERN_INFO "Diva: %s space at %#lx\n", (cs->subtyp == DIVA_PCI) ? "PCI" : (cs->subtyp == DIVA_IPAC_PCI) ? "IPAC PCI" : "IPACX PCI", cs->hw.diva.pci_cfg); if ((cs->subtyp != DIVA_IPAC_PCI) && (cs->subtyp != DIVA_IPACX_PCI)) { if (!request_region(cs->hw.diva.cfg_reg, bytecnt, "diva isdn")) { printk(KERN_WARNING "HiSax: %s config port %lx-%lx already in use\n", "diva", cs->hw.diva.cfg_reg, cs->hw.diva.cfg_reg + bytecnt); iounmap_diva(cs); return (0); } } cs->BC_Read_Reg = &ReadHSCX; cs->BC_Write_Reg = &WriteHSCX; cs->BC_Send_Data = &hscx_fill_fifo; cs->cardmsg = &Diva_card_msg; setup_isac(cs); if (cs->subtyp == DIVA_IPAC_ISA) { cs->readisac = &ReadISAC_IPAC; cs->writeisac = &WriteISAC_IPAC; cs->readisacfifo = &ReadISACfifo_IPAC; cs->writeisacfifo = &WriteISACfifo_IPAC; cs->irq_func = &diva_irq_ipac_isa; val = readreg(cs->hw.diva.isac_adr, cs->hw.diva.isac, IPAC_ID); printk(KERN_INFO "Diva: IPAC version %x\n", val); } else if (cs->subtyp == DIVA_IPAC_PCI) { cs->readisac = &MemReadISAC_IPAC; cs->writeisac = &MemWriteISAC_IPAC; cs->readisacfifo = &MemReadISACfifo_IPAC; cs->writeisacfifo = &MemWriteISACfifo_IPAC; cs->BC_Read_Reg = &MemReadHSCX; cs->BC_Write_Reg = &MemWriteHSCX; cs->BC_Send_Data = &Memhscx_fill_fifo; cs->irq_func = &diva_irq_ipac_pci; val = memreadreg(cs->hw.diva.cfg_reg, IPAC_ID); printk(KERN_INFO "Diva: IPAC version %x\n", val); } else if (cs->subtyp == DIVA_IPACX_PCI) { cs->readisac = &MemReadISAC_IPACX; cs->writeisac = &MemWriteISAC_IPACX; cs->readisacfifo = &MemReadISACfifo_IPACX; cs->writeisacfifo = &MemWriteISACfifo_IPACX; cs->BC_Read_Reg = &MemReadHSCX_IPACX; cs->BC_Write_Reg = &MemWriteHSCX_IPACX; cs->BC_Send_Data = NULL; // function located in ipacx module cs->irq_func = &diva_irq_ipacx_pci; printk(KERN_INFO "Diva: IPACX Design Id: %x\n", MemReadISAC_IPACX(cs, IPACX_ID) & 0x3F); } else { /* DIVA 2.0 */ cs->hw.diva.tl.function = (void *) diva_led_handler; cs->hw.diva.tl.data = (long) cs; init_timer(&cs->hw.diva.tl); cs->readisac = &ReadISAC; cs->writeisac = &WriteISAC; cs->readisacfifo = &ReadISACfifo; cs->writeisacfifo = &WriteISACfifo; cs->irq_func = &diva_interrupt; ISACVersion(cs, "Diva:"); if (HscxVersion(cs, "Diva:")) { printk(KERN_WARNING "Diva: wrong HSCX versions check IO address\n"); release_io_diva(cs); return (0); } } return (1); } #ifdef CONFIG_ISA static int setup_diva_isa(struct IsdnCard *card) { struct IsdnCardState *cs = card->cs; u_char val; if (!card->para[1]) return (-1); /* card not found; continue search */ cs->hw.diva.ctrl_reg = 0; cs->hw.diva.cfg_reg = card->para[1]; val = readreg(cs->hw.diva.cfg_reg + DIVA_IPAC_ADR, cs->hw.diva.cfg_reg + DIVA_IPAC_DATA, IPAC_ID); printk(KERN_INFO "Diva: IPAC version %x\n", val); if ((val == 1) || (val == 2)) { cs->subtyp = DIVA_IPAC_ISA; cs->hw.diva.ctrl = 0; cs->hw.diva.isac = card->para[1] + DIVA_IPAC_DATA; cs->hw.diva.hscx = card->para[1] + DIVA_IPAC_DATA; cs->hw.diva.isac_adr = card->para[1] + DIVA_IPAC_ADR; cs->hw.diva.hscx_adr = card->para[1] + DIVA_IPAC_ADR; test_and_set_bit(HW_IPAC, &cs->HW_Flags); } else { cs->subtyp = DIVA_ISA; cs->hw.diva.ctrl = card->para[1] + DIVA_ISA_CTRL; cs->hw.diva.isac = card->para[1] + DIVA_ISA_ISAC_DATA; cs->hw.diva.hscx = card->para[1] + DIVA_HSCX_DATA; cs->hw.diva.isac_adr = card->para[1] + DIVA_ISA_ISAC_ADR; cs->hw.diva.hscx_adr = card->para[1] + DIVA_HSCX_ADR; } cs->irq = card->para[0]; return (1); /* card found */ } #else /* if !CONFIG_ISA */ static int setup_diva_isa(struct IsdnCard *card) { return (-1); /* card not found; continue search */ } #endif /* CONFIG_ISA */ #ifdef __ISAPNP__ static struct isapnp_device_id diva_ids[] = { { ISAPNP_VENDOR('G', 'D', 'I'), ISAPNP_FUNCTION(0x51), ISAPNP_VENDOR('G', 'D', 'I'), ISAPNP_FUNCTION(0x51), (unsigned long) "Diva picola" }, { ISAPNP_VENDOR('G', 'D', 'I'), ISAPNP_FUNCTION(0x51), ISAPNP_VENDOR('E', 'I', 'C'), ISAPNP_FUNCTION(0x51), (unsigned long) "Diva picola" }, { ISAPNP_VENDOR('G', 'D', 'I'), ISAPNP_FUNCTION(0x71), ISAPNP_VENDOR('G', 'D', 'I'), ISAPNP_FUNCTION(0x71), (unsigned long) "Diva 2.0" }, { ISAPNP_VENDOR('G', 'D', 'I'), ISAPNP_FUNCTION(0x71), ISAPNP_VENDOR('E', 'I', 'C'), ISAPNP_FUNCTION(0x71), (unsigned long) "Diva 2.0" }, { ISAPNP_VENDOR('G', 'D', 'I'), ISAPNP_FUNCTION(0xA1), ISAPNP_VENDOR('G', 'D', 'I'), ISAPNP_FUNCTION(0xA1), (unsigned long) "Diva 2.01" }, { ISAPNP_VENDOR('G', 'D', 'I'), ISAPNP_FUNCTION(0xA1), ISAPNP_VENDOR('E', 'I', 'C'), ISAPNP_FUNCTION(0xA1), (unsigned long) "Diva 2.01" }, { 0, } }; static struct isapnp_device_id *ipid = &diva_ids[0]; static struct pnp_card *pnp_c = NULL; static int setup_diva_isapnp(struct IsdnCard *card) { struct IsdnCardState *cs = card->cs; struct pnp_dev *pnp_d; if (!isapnp_present()) return (-1); /* card not found; continue search */ while (ipid->card_vendor) { if ((pnp_c = pnp_find_card(ipid->card_vendor, ipid->card_device, pnp_c))) { pnp_d = NULL; if ((pnp_d = pnp_find_dev(pnp_c, ipid->vendor, ipid->function, pnp_d))) { int err; printk(KERN_INFO "HiSax: %s detected\n", (char *)ipid->driver_data); pnp_disable_dev(pnp_d); err = pnp_activate_dev(pnp_d); if (err < 0) { printk(KERN_WARNING "%s: pnp_activate_dev ret(%d)\n", __func__, err); return (0); } card->para[1] = pnp_port_start(pnp_d, 0); card->para[0] = pnp_irq(pnp_d, 0); if (!card->para[0] || !card->para[1]) { printk(KERN_ERR "Diva PnP:some resources are missing %ld/%lx\n", card->para[0], card->para[1]); pnp_disable_dev(pnp_d); return (0); } cs->hw.diva.cfg_reg = card->para[1]; cs->irq = card->para[0]; if (ipid->function == ISAPNP_FUNCTION(0xA1)) { cs->subtyp = DIVA_IPAC_ISA; cs->hw.diva.ctrl = 0; cs->hw.diva.isac = card->para[1] + DIVA_IPAC_DATA; cs->hw.diva.hscx = card->para[1] + DIVA_IPAC_DATA; cs->hw.diva.isac_adr = card->para[1] + DIVA_IPAC_ADR; cs->hw.diva.hscx_adr = card->para[1] + DIVA_IPAC_ADR; test_and_set_bit(HW_IPAC, &cs->HW_Flags); } else { cs->subtyp = DIVA_ISA; cs->hw.diva.ctrl = card->para[1] + DIVA_ISA_CTRL; cs->hw.diva.isac = card->para[1] + DIVA_ISA_ISAC_DATA; cs->hw.diva.hscx = card->para[1] + DIVA_HSCX_DATA; cs->hw.diva.isac_adr = card->para[1] + DIVA_ISA_ISAC_ADR; cs->hw.diva.hscx_adr = card->para[1] + DIVA_HSCX_ADR; } return (1); /* card found */ } else { printk(KERN_ERR "Diva PnP: PnP error card found, no device\n"); return (0); } } ipid++; pnp_c = NULL; } return (-1); /* card not found; continue search */ } #else /* if !ISAPNP */ static int setup_diva_isapnp(struct IsdnCard *card) { return (-1); /* card not found; continue search */ } #endif /* ISAPNP */ #ifdef CONFIG_PCI static struct pci_dev *dev_diva = NULL; static struct pci_dev *dev_diva_u = NULL; static struct pci_dev *dev_diva201 = NULL; static struct pci_dev *dev_diva202 = NULL; static int setup_diva_pci(struct IsdnCard *card) { struct IsdnCardState *cs = card->cs; cs->subtyp = 0; if ((dev_diva = hisax_find_pci_device(PCI_VENDOR_ID_EICON, PCI_DEVICE_ID_EICON_DIVA20, dev_diva))) { if (pci_enable_device(dev_diva)) return (0); cs->subtyp = DIVA_PCI; cs->irq = dev_diva->irq; cs->hw.diva.cfg_reg = pci_resource_start(dev_diva, 2); } else if ((dev_diva_u = hisax_find_pci_device(PCI_VENDOR_ID_EICON, PCI_DEVICE_ID_EICON_DIVA20_U, dev_diva_u))) { if (pci_enable_device(dev_diva_u)) return (0); cs->subtyp = DIVA_PCI; cs->irq = dev_diva_u->irq; cs->hw.diva.cfg_reg = pci_resource_start(dev_diva_u, 2); } else if ((dev_diva201 = hisax_find_pci_device(PCI_VENDOR_ID_EICON, PCI_DEVICE_ID_EICON_DIVA201, dev_diva201))) { if (pci_enable_device(dev_diva201)) return (0); cs->subtyp = DIVA_IPAC_PCI; cs->irq = dev_diva201->irq; cs->hw.diva.pci_cfg = (ulong) ioremap(pci_resource_start(dev_diva201, 0), 4096); cs->hw.diva.cfg_reg = (ulong) ioremap(pci_resource_start(dev_diva201, 1), 4096); } else if ((dev_diva202 = hisax_find_pci_device(PCI_VENDOR_ID_EICON, PCI_DEVICE_ID_EICON_DIVA202, dev_diva202))) { if (pci_enable_device(dev_diva202)) return (0); cs->subtyp = DIVA_IPACX_PCI; cs->irq = dev_diva202->irq; cs->hw.diva.pci_cfg = (ulong) ioremap(pci_resource_start(dev_diva202, 0), 4096); cs->hw.diva.cfg_reg = (ulong) ioremap(pci_resource_start(dev_diva202, 1), 4096); } else { return (-1); /* card not found; continue search */ } if (!cs->irq) { printk(KERN_WARNING "Diva: No IRQ for PCI card found\n"); iounmap_diva(cs); return (0); } if (!cs->hw.diva.cfg_reg) { printk(KERN_WARNING "Diva: No IO-Adr for PCI card found\n"); iounmap_diva(cs); return (0); } cs->irq_flags |= IRQF_SHARED; if ((cs->subtyp == DIVA_IPAC_PCI) || (cs->subtyp == DIVA_IPACX_PCI)) { cs->hw.diva.ctrl = 0; cs->hw.diva.isac = 0; cs->hw.diva.hscx = 0; cs->hw.diva.isac_adr = 0; cs->hw.diva.hscx_adr = 0; test_and_set_bit(HW_IPAC, &cs->HW_Flags); } else { cs->hw.diva.ctrl = cs->hw.diva.cfg_reg + DIVA_PCI_CTRL; cs->hw.diva.isac = cs->hw.diva.cfg_reg + DIVA_PCI_ISAC_DATA; cs->hw.diva.hscx = cs->hw.diva.cfg_reg + DIVA_HSCX_DATA; cs->hw.diva.isac_adr = cs->hw.diva.cfg_reg + DIVA_PCI_ISAC_ADR; cs->hw.diva.hscx_adr = cs->hw.diva.cfg_reg + DIVA_HSCX_ADR; } return (1); /* card found */ } #else /* if !CONFIG_PCI */ static int setup_diva_pci(struct IsdnCard *card) { return (-1); /* card not found; continue search */ } #endif /* CONFIG_PCI */ int setup_diva(struct IsdnCard *card) { int rc, have_card = 0; struct IsdnCardState *cs = card->cs; char tmp[64]; strcpy(tmp, Diva_revision); printk(KERN_INFO "HiSax: Eicon.Diehl Diva driver Rev. %s\n", HiSax_getrev(tmp)); if (cs->typ != ISDN_CTYPE_DIEHLDIVA) return (0); cs->hw.diva.status = 0; rc = setup_diva_isa(card); if (!rc) return rc; if (rc > 0) { have_card = 1; goto ready; } rc = setup_diva_isapnp(card); if (!rc) return rc; if (rc > 0) { have_card = 1; goto ready; } rc = setup_diva_pci(card); if (!rc) return rc; if (rc > 0) have_card = 1; ready: if (!have_card) { printk(KERN_WARNING "Diva: No ISA, ISAPNP or PCI card found\n"); return (0); } return setup_diva_common(card->cs); }
gpl-2.0
lg-devs/android_kernel_lge_msm8994
drivers/staging/rtl8187se/r8180_rtl8225z2.c
2314
22222
/* * This is part of the rtl8180-sa2400 driver * released under the GPL (See file COPYING for details). * Copyright (c) 2005 Andrea Merello <andreamrl@tiscali.it> * * This files contains programming code for the rtl8225 * radio frontend. * * *Many* thanks to Realtek Corp. for their great support! */ #include "r8180_hw.h" #include "r8180_rtl8225.h" #include "r8180_93cx6.h" #include "ieee80211/dot11d.h" static void write_rtl8225(struct net_device *dev, u8 adr, u16 data) { int i; u16 out, select; u8 bit; u32 bangdata = (data << 4) | (adr & 0xf); out = read_nic_word(dev, RFPinsOutput) & 0xfff3; write_nic_word(dev, RFPinsEnable, (read_nic_word(dev, RFPinsEnable) | 0x7)); select = read_nic_word(dev, RFPinsSelect); write_nic_word(dev, RFPinsSelect, select | 0x7 | SW_CONTROL_GPIO); force_pci_posting(dev); udelay(10); write_nic_word(dev, RFPinsOutput, out | BB_HOST_BANG_EN); force_pci_posting(dev); udelay(2); write_nic_word(dev, RFPinsOutput, out); force_pci_posting(dev); udelay(10); for (i = 15; i >= 0; i--) { bit = (bangdata & (1 << i)) >> i; write_nic_word(dev, RFPinsOutput, bit | out); write_nic_word(dev, RFPinsOutput, bit | out | BB_HOST_BANG_CLK); write_nic_word(dev, RFPinsOutput, bit | out | BB_HOST_BANG_CLK); i--; bit = (bangdata & (1 << i)) >> i; write_nic_word(dev, RFPinsOutput, bit | out | BB_HOST_BANG_CLK); write_nic_word(dev, RFPinsOutput, bit | out | BB_HOST_BANG_CLK); write_nic_word(dev, RFPinsOutput, bit | out); } write_nic_word(dev, RFPinsOutput, out | BB_HOST_BANG_EN); force_pci_posting(dev); udelay(10); write_nic_word(dev, RFPinsOutput, out | BB_HOST_BANG_EN); write_nic_word(dev, RFPinsSelect, select | SW_CONTROL_GPIO); rtl8185_rf_pins_enable(dev); } static const u8 rtl8225_agc[] = { 0x9e, 0x9e, 0x9e, 0x9e, 0x9e, 0x9e, 0x9e, 0x9e, 0x9d, 0x9c, 0x9b, 0x9a, 0x99, 0x98, 0x97, 0x96, 0x95, 0x94, 0x93, 0x92, 0x91, 0x90, 0x8f, 0x8e, 0x8d, 0x8c, 0x8b, 0x8a, 0x89, 0x88, 0x87, 0x86, 0x85, 0x84, 0x83, 0x82, 0x81, 0x80, 0x3f, 0x3e, 0x3d, 0x3c, 0x3b, 0x3a, 0x39, 0x38, 0x37, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x30, 0x2f, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29, 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21, 0x20, 0x1f, 0x1e, 0x1d, 0x1c, 0x1b, 0x1a, 0x19, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x0e, 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x08, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, }; static const u32 rtl8225_chan[] = { 0, 0x0080, 0x0100, 0x0180, 0x0200, 0x0280, 0x0300, 0x0380, 0x0400, 0x0480, 0x0500, 0x0580, 0x0600, 0x0680, 0x074A, }; static const u8 rtl8225z2_gain_bg[] = { 0x23, 0x15, 0xa5, /* -82-1dBm */ 0x23, 0x15, 0xb5, /* -82-2dBm */ 0x23, 0x15, 0xc5, /* -82-3dBm */ 0x33, 0x15, 0xc5, /* -78dBm */ 0x43, 0x15, 0xc5, /* -74dBm */ 0x53, 0x15, 0xc5, /* -70dBm */ 0x63, 0x15, 0xc5, /* -66dBm */ }; static const u8 rtl8225z2_gain_a[] = { 0x13, 0x27, 0x5a, /* -82dBm */ 0x23, 0x23, 0x58, /* -82dBm */ 0x33, 0x1f, 0x56, /* -82dBm */ 0x43, 0x1b, 0x54, /* -78dBm */ 0x53, 0x17, 0x51, /* -74dBm */ 0x63, 0x24, 0x4f, /* -70dBm */ 0x73, 0x0f, 0x4c, /* -66dBm */ }; static const u16 rtl8225z2_rxgain[] = { 0x0400, 0x0401, 0x0402, 0x0403, 0x0404, 0x0405, 0x0408, 0x0409, 0x040a, 0x040b, 0x0502, 0x0503, 0x0504, 0x0505, 0x0540, 0x0541, 0x0542, 0x0543, 0x0544, 0x0545, 0x0580, 0x0581, 0x0582, 0x0583, 0x0584, 0x0585, 0x0588, 0x0589, 0x058a, 0x058b, 0x0643, 0x0644, 0x0645, 0x0680, 0x0681, 0x0682, 0x0683, 0x0684, 0x0685, 0x0688, 0x0689, 0x068a, 0x068b, 0x068c, 0x0742, 0x0743, 0x0744, 0x0745, 0x0780, 0x0781, 0x0782, 0x0783, 0x0784, 0x0785, 0x0788, 0x0789, 0x078a, 0x078b, 0x078c, 0x078d, 0x0790, 0x0791, 0x0792, 0x0793, 0x0794, 0x0795, 0x0798, 0x0799, 0x079a, 0x079b, 0x079c, 0x079d, 0x07a0, 0x07a1, 0x07a2, 0x07a3, 0x07a4, 0x07a5, 0x07a8, 0x07a9, 0x03aa, 0x03ab, 0x03ac, 0x03ad, 0x03b0, 0x03b1, 0x03b2, 0x03b3, 0x03b4, 0x03b5, 0x03b8, 0x03b9, 0x03ba, 0x03bb }; void rtl8225z2_set_gain(struct net_device *dev, short gain) { const u8 *rtl8225_gain; struct r8180_priv *priv = ieee80211_priv(dev); u8 mode = priv->ieee80211->mode; if (mode == IEEE_B || mode == IEEE_G) rtl8225_gain = rtl8225z2_gain_bg; else rtl8225_gain = rtl8225z2_gain_a; write_phy_ofdm(dev, 0x0b, rtl8225_gain[gain * 3]); write_phy_ofdm(dev, 0x1b, rtl8225_gain[gain * 3 + 1]); write_phy_ofdm(dev, 0x1d, rtl8225_gain[gain * 3 + 2]); write_phy_ofdm(dev, 0x21, 0x37); } static u32 read_rtl8225(struct net_device *dev, u8 adr) { u32 data2Write = ((u32)(adr & 0x1f)) << 27; u32 dataRead; u32 mask; u16 oval, oval2, oval3, tmp; int i; short bit, rw; u8 wLength = 6; u8 rLength = 12; u8 low2high = 0; oval = read_nic_word(dev, RFPinsOutput); oval2 = read_nic_word(dev, RFPinsEnable); oval3 = read_nic_word(dev, RFPinsSelect); write_nic_word(dev, RFPinsEnable, (oval2|0xf)); write_nic_word(dev, RFPinsSelect, (oval3|0xf)); dataRead = 0; oval &= ~0xf; write_nic_word(dev, RFPinsOutput, oval | BB_HOST_BANG_EN); udelay(4); write_nic_word(dev, RFPinsOutput, oval); udelay(5); rw = 0; mask = (low2high) ? 0x01 : (((u32)0x01)<<(32-1)); for (i = 0; i < wLength/2; i++) { bit = ((data2Write&mask) != 0) ? 1 : 0; write_nic_word(dev, RFPinsOutput, bit | oval | rw); udelay(1); write_nic_word(dev, RFPinsOutput, bit | oval | BB_HOST_BANG_CLK | rw); udelay(2); write_nic_word(dev, RFPinsOutput, bit | oval | BB_HOST_BANG_CLK | rw); udelay(2); mask = (low2high) ? (mask<<1) : (mask>>1); if (i == 2) { rw = BB_HOST_BANG_RW; write_nic_word(dev, RFPinsOutput, bit | oval | BB_HOST_BANG_CLK | rw); udelay(2); write_nic_word(dev, RFPinsOutput, bit | oval | rw); udelay(2); break; } bit = ((data2Write&mask) != 0) ? 1 : 0; write_nic_word(dev, RFPinsOutput, oval | bit | rw | BB_HOST_BANG_CLK); udelay(2); write_nic_word(dev, RFPinsOutput, oval | bit | rw | BB_HOST_BANG_CLK); udelay(2); write_nic_word(dev, RFPinsOutput, oval | bit | rw); udelay(1); mask = (low2high) ? (mask<<1) : (mask>>1); } write_nic_word(dev, RFPinsOutput, rw|oval); udelay(2); mask = (low2high) ? 0x01 : (((u32)0x01) << (12-1)); /* * We must set data pin to HW controlled, otherwise RF can't driver it * and value RF register won't be able to read back properly. */ write_nic_word(dev, RFPinsEnable, (oval2 & (~0x01))); for (i = 0; i < rLength; i++) { write_nic_word(dev, RFPinsOutput, rw|oval); udelay(1); write_nic_word(dev, RFPinsOutput, rw|oval|BB_HOST_BANG_CLK); udelay(2); write_nic_word(dev, RFPinsOutput, rw|oval|BB_HOST_BANG_CLK); udelay(2); write_nic_word(dev, RFPinsOutput, rw|oval|BB_HOST_BANG_CLK); udelay(2); tmp = read_nic_word(dev, RFPinsInput); dataRead |= (tmp & BB_HOST_BANG_CLK ? mask : 0); write_nic_word(dev, RFPinsOutput, (rw|oval)); udelay(2); mask = (low2high) ? (mask<<1) : (mask>>1); } write_nic_word(dev, RFPinsOutput, BB_HOST_BANG_EN | BB_HOST_BANG_RW | oval); udelay(2); write_nic_word(dev, RFPinsEnable, oval2); write_nic_word(dev, RFPinsSelect, oval3); /* Set To SW Switch */ write_nic_word(dev, RFPinsOutput, 0x3a0); return dataRead; } void rtl8225z2_rf_close(struct net_device *dev) { RF_WriteReg(dev, 0x4, 0x1f); force_pci_posting(dev); mdelay(1); rtl8180_set_anaparam(dev, RTL8225z2_ANAPARAM_OFF); rtl8185_set_anaparam2(dev, RTL8225z2_ANAPARAM2_OFF); } /* * Map dBm into Tx power index according to current HW model, for example, * RF and PA, and current wireless mode. */ s8 DbmToTxPwrIdx(struct r8180_priv *priv, WIRELESS_MODE WirelessMode, s32 PowerInDbm) { bool bUseDefault = true; s8 TxPwrIdx = 0; /* * OFDM Power in dBm = Index * 0.5 + 0 * CCK Power in dBm = Index * 0.25 + 13 */ s32 tmp = 0; if (WirelessMode == WIRELESS_MODE_G) { bUseDefault = false; tmp = (2 * PowerInDbm); if (tmp < 0) TxPwrIdx = 0; else if (tmp > 40) /* 40 means 20 dBm. */ TxPwrIdx = 40; else TxPwrIdx = (s8)tmp; } else if (WirelessMode == WIRELESS_MODE_B) { bUseDefault = false; tmp = (4 * PowerInDbm) - 52; if (tmp < 0) TxPwrIdx = 0; else if (tmp > 28) /* 28 means 20 dBm. */ TxPwrIdx = 28; else TxPwrIdx = (s8)tmp; } /* * TRUE if we want to use a default implementation. * We shall set it to FALSE when we have exact translation formula * for target IC. 070622, by rcnjko. */ if (bUseDefault) { if (PowerInDbm < 0) TxPwrIdx = 0; else if (PowerInDbm > 35) TxPwrIdx = 35; else TxPwrIdx = (u8)PowerInDbm; } return TxPwrIdx; } void rtl8225z2_SetTXPowerLevel(struct net_device *dev, short ch) { struct r8180_priv *priv = ieee80211_priv(dev); u8 max_cck_power_level; u8 max_ofdm_power_level; u8 min_ofdm_power_level; char cck_power_level = (char)(0xff & priv->chtxpwr[ch]); char ofdm_power_level = (char)(0xff & priv->chtxpwr_ofdm[ch]); if (IS_DOT11D_ENABLE(priv->ieee80211) && IS_DOT11D_STATE_DONE(priv->ieee80211)) { u8 MaxTxPwrInDbm = DOT11D_GetMaxTxPwrInDbm(priv->ieee80211, ch); u8 CckMaxPwrIdx = DbmToTxPwrIdx(priv, WIRELESS_MODE_B, MaxTxPwrInDbm); u8 OfdmMaxPwrIdx = DbmToTxPwrIdx(priv, WIRELESS_MODE_G, MaxTxPwrInDbm); if (cck_power_level > CckMaxPwrIdx) cck_power_level = CckMaxPwrIdx; if (ofdm_power_level > OfdmMaxPwrIdx) ofdm_power_level = OfdmMaxPwrIdx; } max_cck_power_level = 15; max_ofdm_power_level = 25; min_ofdm_power_level = 10; if (cck_power_level > 35) cck_power_level = 35; write_nic_byte(dev, CCK_TXAGC, cck_power_level); force_pci_posting(dev); mdelay(1); if (ofdm_power_level > 35) ofdm_power_level = 35; if (priv->up == 0) { write_phy_ofdm(dev, 2, 0x42); write_phy_ofdm(dev, 5, 0x00); write_phy_ofdm(dev, 6, 0x40); write_phy_ofdm(dev, 7, 0x00); write_phy_ofdm(dev, 8, 0x40); } write_nic_byte(dev, OFDM_TXAGC, ofdm_power_level); if (ofdm_power_level <= 11) { write_phy_ofdm(dev, 0x07, 0x5c); write_phy_ofdm(dev, 0x09, 0x5c); } if (ofdm_power_level <= 17) { write_phy_ofdm(dev, 0x07, 0x54); write_phy_ofdm(dev, 0x09, 0x54); } else { write_phy_ofdm(dev, 0x07, 0x50); write_phy_ofdm(dev, 0x09, 0x50); } force_pci_posting(dev); mdelay(1); } void rtl8225z2_rf_set_chan(struct net_device *dev, short ch) { rtl8225z2_SetTXPowerLevel(dev, ch); RF_WriteReg(dev, 0x7, rtl8225_chan[ch]); if ((RF_ReadReg(dev, 0x7) & 0x0F80) != rtl8225_chan[ch]) RF_WriteReg(dev, 0x7, rtl8225_chan[ch]); mdelay(1); force_pci_posting(dev); mdelay(10); } static void rtl8225_host_pci_init(struct net_device *dev) { write_nic_word(dev, RFPinsOutput, 0x480); rtl8185_rf_pins_enable(dev); write_nic_word(dev, RFPinsSelect, 0x88 | SW_CONTROL_GPIO); write_nic_byte(dev, GP_ENABLE, 0); force_pci_posting(dev); mdelay(200); /* bit 6 is for RF on/off detection */ write_nic_word(dev, GP_ENABLE, 0xff & (~(1 << 6))); } void rtl8225z2_rf_init(struct net_device *dev) { struct r8180_priv *priv = ieee80211_priv(dev); int i; short channel = 1; u16 brsr; u32 data; priv->chan = channel; rtl8225_host_pci_init(dev); write_nic_dword(dev, RF_TIMING, 0x000a8008); brsr = read_nic_word(dev, BRSR); write_nic_word(dev, BRSR, 0xffff); write_nic_dword(dev, RF_PARA, 0x100044); rtl8180_set_mode(dev, EPROM_CMD_CONFIG); write_nic_byte(dev, CONFIG3, 0x44); rtl8180_set_mode(dev, EPROM_CMD_NORMAL); rtl8185_rf_pins_enable(dev); write_rtl8225(dev, 0x0, 0x2bf); mdelay(1); write_rtl8225(dev, 0x1, 0xee0); mdelay(1); write_rtl8225(dev, 0x2, 0x44d); mdelay(1); write_rtl8225(dev, 0x3, 0x441); mdelay(1); write_rtl8225(dev, 0x4, 0x8c3); mdelay(1); write_rtl8225(dev, 0x5, 0xc72); mdelay(1); write_rtl8225(dev, 0x6, 0xe6); mdelay(1); write_rtl8225(dev, 0x7, rtl8225_chan[channel]); mdelay(1); write_rtl8225(dev, 0x8, 0x3f); mdelay(1); write_rtl8225(dev, 0x9, 0x335); mdelay(1); write_rtl8225(dev, 0xa, 0x9d4); mdelay(1); write_rtl8225(dev, 0xb, 0x7bb); mdelay(1); write_rtl8225(dev, 0xc, 0x850); mdelay(1); write_rtl8225(dev, 0xd, 0xcdf); mdelay(1); write_rtl8225(dev, 0xe, 0x2b); mdelay(1); write_rtl8225(dev, 0xf, 0x114); mdelay(100); write_rtl8225(dev, 0x0, 0x1b7); for (i = 0; i < ARRAY_SIZE(rtl8225z2_rxgain); i++) { write_rtl8225(dev, 0x1, i + 1); write_rtl8225(dev, 0x2, rtl8225z2_rxgain[i]); } write_rtl8225(dev, 0x3, 0x80); write_rtl8225(dev, 0x5, 0x4); write_rtl8225(dev, 0x0, 0xb7); write_rtl8225(dev, 0x2, 0xc4d); /* FIXME!! rtl8187 we have to check if calibrarion * is successful and eventually cal. again (repeat * the two write on reg 2) */ data = read_rtl8225(dev, 6); if (!(data & 0x00000080)) { write_rtl8225(dev, 0x02, 0x0c4d); force_pci_posting(dev); mdelay(200); write_rtl8225(dev, 0x02, 0x044d); force_pci_posting(dev); mdelay(100); data = read_rtl8225(dev, 6); if (!(data & 0x00000080)) DMESGW("RF Calibration Failed!!!!\n"); } mdelay(200); write_rtl8225(dev, 0x0, 0x2bf); for (i = 0; i < ARRAY_SIZE(rtl8225_agc); i++) { write_phy_ofdm(dev, 0xb, rtl8225_agc[i]); mdelay(1); /* enable writing AGC table */ write_phy_ofdm(dev, 0xa, i + 0x80); mdelay(1); } force_pci_posting(dev); mdelay(1); write_phy_ofdm(dev, 0x00, 0x01); mdelay(1); write_phy_ofdm(dev, 0x01, 0x02); mdelay(1); write_phy_ofdm(dev, 0x02, 0x62); mdelay(1); write_phy_ofdm(dev, 0x03, 0x00); mdelay(1); write_phy_ofdm(dev, 0x04, 0x00); mdelay(1); write_phy_ofdm(dev, 0x05, 0x00); mdelay(1); write_phy_ofdm(dev, 0x06, 0x40); mdelay(1); write_phy_ofdm(dev, 0x07, 0x00); mdelay(1); write_phy_ofdm(dev, 0x08, 0x40); mdelay(1); write_phy_ofdm(dev, 0x09, 0xfe); mdelay(1); write_phy_ofdm(dev, 0x0a, 0x08); mdelay(1); write_phy_ofdm(dev, 0x0b, 0x80); mdelay(1); write_phy_ofdm(dev, 0x0c, 0x01); mdelay(1); write_phy_ofdm(dev, 0x0d, 0x43); write_phy_ofdm(dev, 0x0e, 0xd3); mdelay(1); write_phy_ofdm(dev, 0x0f, 0x38); mdelay(1); write_phy_ofdm(dev, 0x10, 0x84); mdelay(1); write_phy_ofdm(dev, 0x11, 0x07); mdelay(1); write_phy_ofdm(dev, 0x12, 0x20); mdelay(1); write_phy_ofdm(dev, 0x13, 0x20); mdelay(1); write_phy_ofdm(dev, 0x14, 0x00); mdelay(1); write_phy_ofdm(dev, 0x15, 0x40); mdelay(1); write_phy_ofdm(dev, 0x16, 0x00); mdelay(1); write_phy_ofdm(dev, 0x17, 0x40); mdelay(1); write_phy_ofdm(dev, 0x18, 0xef); mdelay(1); write_phy_ofdm(dev, 0x19, 0x19); mdelay(1); write_phy_ofdm(dev, 0x1a, 0x20); mdelay(1); write_phy_ofdm(dev, 0x1b, 0x15); mdelay(1); write_phy_ofdm(dev, 0x1c, 0x04); mdelay(1); write_phy_ofdm(dev, 0x1d, 0xc5); mdelay(1); write_phy_ofdm(dev, 0x1e, 0x95); mdelay(1); write_phy_ofdm(dev, 0x1f, 0x75); mdelay(1); write_phy_ofdm(dev, 0x20, 0x1f); mdelay(1); write_phy_ofdm(dev, 0x21, 0x17); mdelay(1); write_phy_ofdm(dev, 0x22, 0x16); mdelay(1); write_phy_ofdm(dev, 0x23, 0x80); mdelay(1); /* FIXME maybe not needed */ write_phy_ofdm(dev, 0x24, 0x46); mdelay(1); write_phy_ofdm(dev, 0x25, 0x00); mdelay(1); write_phy_ofdm(dev, 0x26, 0x90); mdelay(1); write_phy_ofdm(dev, 0x27, 0x88); mdelay(1); rtl8225z2_set_gain(dev, 4); write_phy_cck(dev, 0x0, 0x98); mdelay(1); write_phy_cck(dev, 0x3, 0x20); mdelay(1); write_phy_cck(dev, 0x4, 0x7e); mdelay(1); write_phy_cck(dev, 0x5, 0x12); mdelay(1); write_phy_cck(dev, 0x6, 0xfc); mdelay(1); write_phy_cck(dev, 0x7, 0x78); mdelay(1); write_phy_cck(dev, 0x8, 0x2e); mdelay(1); write_phy_cck(dev, 0x10, 0x93); mdelay(1); write_phy_cck(dev, 0x11, 0x88); mdelay(1); write_phy_cck(dev, 0x12, 0x47); mdelay(1); write_phy_cck(dev, 0x13, 0xd0); write_phy_cck(dev, 0x19, 0x00); write_phy_cck(dev, 0x1a, 0xa0); write_phy_cck(dev, 0x1b, 0x08); write_phy_cck(dev, 0x40, 0x86); /* CCK Carrier Sense Threshold */ write_phy_cck(dev, 0x41, 0x8d); mdelay(1); write_phy_cck(dev, 0x42, 0x15); mdelay(1); write_phy_cck(dev, 0x43, 0x18); mdelay(1); write_phy_cck(dev, 0x44, 0x36); mdelay(1); write_phy_cck(dev, 0x45, 0x35); mdelay(1); write_phy_cck(dev, 0x46, 0x2e); mdelay(1); write_phy_cck(dev, 0x47, 0x25); mdelay(1); write_phy_cck(dev, 0x48, 0x1c); mdelay(1); write_phy_cck(dev, 0x49, 0x12); mdelay(1); write_phy_cck(dev, 0x4a, 0x09); mdelay(1); write_phy_cck(dev, 0x4b, 0x04); mdelay(1); write_phy_cck(dev, 0x4c, 0x05); mdelay(1); write_nic_byte(dev, 0x5b, 0x0d); mdelay(1); rtl8225z2_SetTXPowerLevel(dev, channel); /* RX antenna default to A */ write_phy_cck(dev, 0x11, 0x9b); mdelay(1); /* B: 0xDB */ write_phy_ofdm(dev, 0x26, 0x90); mdelay(1); /* B: 0x10 */ rtl8185_tx_antenna(dev, 0x03); /* B: 0x00 */ /* switch to high-speed 3-wire * last digit. 2 for both cck and ofdm */ write_nic_dword(dev, 0x94, 0x15c00002); rtl8185_rf_pins_enable(dev); rtl8225z2_rf_set_chan(dev, priv->chan); } void rtl8225z2_rf_set_mode(struct net_device *dev) { struct r8180_priv *priv = ieee80211_priv(dev); if (priv->ieee80211->mode == IEEE_A) { write_rtl8225(dev, 0x5, 0x1865); write_nic_dword(dev, RF_PARA, 0x10084); write_nic_dword(dev, RF_TIMING, 0xa8008); write_phy_ofdm(dev, 0x0, 0x0); write_phy_ofdm(dev, 0xa, 0x6); write_phy_ofdm(dev, 0xb, 0x99); write_phy_ofdm(dev, 0xf, 0x20); write_phy_ofdm(dev, 0x11, 0x7); rtl8225z2_set_gain(dev, 4); write_phy_ofdm(dev, 0x15, 0x40); write_phy_ofdm(dev, 0x17, 0x40); write_nic_dword(dev, 0x94, 0x10000000); } else { write_rtl8225(dev, 0x5, 0x1864); write_nic_dword(dev, RF_PARA, 0x10044); write_nic_dword(dev, RF_TIMING, 0xa8008); write_phy_ofdm(dev, 0x0, 0x1); write_phy_ofdm(dev, 0xa, 0x6); write_phy_ofdm(dev, 0xb, 0x99); write_phy_ofdm(dev, 0xf, 0x20); write_phy_ofdm(dev, 0x11, 0x7); rtl8225z2_set_gain(dev, 4); write_phy_ofdm(dev, 0x15, 0x40); write_phy_ofdm(dev, 0x17, 0x40); write_nic_dword(dev, 0x94, 0x04000002); } } #define MAX_DOZE_WAITING_TIMES_85B 20 #define MAX_POLLING_24F_TIMES_87SE 10 #define LPS_MAX_SLEEP_WAITING_TIMES_87SE 5 bool SetZebraRFPowerState8185(struct net_device *dev, RT_RF_POWER_STATE eRFPowerState) { struct r8180_priv *priv = ieee80211_priv(dev); u8 btCR9346, btConfig3; bool bActionAllowed = true, bTurnOffBB = true; u8 u1bTmp; int i; bool bResult = true; u8 QueueID; if (priv->SetRFPowerStateInProgress == true) return false; priv->SetRFPowerStateInProgress = true; btCR9346 = read_nic_byte(dev, CR9346); write_nic_byte(dev, CR9346, (btCR9346 | 0xC0)); btConfig3 = read_nic_byte(dev, CONFIG3); write_nic_byte(dev, CONFIG3, (btConfig3 | CONFIG3_PARM_En)); switch (eRFPowerState) { case eRfOn: write_nic_word(dev, 0x37C, 0x00EC); /* turn on AFE */ write_nic_byte(dev, 0x54, 0x00); write_nic_byte(dev, 0x62, 0x00); /* turn on RF */ RF_WriteReg(dev, 0x0, 0x009f); udelay(500); RF_WriteReg(dev, 0x4, 0x0972); udelay(500); /* turn on RF again */ RF_WriteReg(dev, 0x0, 0x009f); udelay(500); RF_WriteReg(dev, 0x4, 0x0972); udelay(500); /* turn on BB */ write_phy_ofdm(dev, 0x10, 0x40); write_phy_ofdm(dev, 0x12, 0x40); /* Avoid power down at init time. */ write_nic_byte(dev, CONFIG4, priv->RFProgType); u1bTmp = read_nic_byte(dev, 0x24E); write_nic_byte(dev, 0x24E, (u1bTmp & (~(BIT5 | BIT6)))); break; case eRfSleep: for (QueueID = 0, i = 0; QueueID < 6;) { if (get_curr_tx_free_desc(dev, QueueID) == priv->txringcount) { QueueID++; continue; } else { priv->TxPollingTimes++; if (priv->TxPollingTimes >= LPS_MAX_SLEEP_WAITING_TIMES_87SE) { bActionAllowed = false; break; } else udelay(10); } } if (bActionAllowed) { /* turn off BB RXIQ matrix to cut off rx signal */ write_phy_ofdm(dev, 0x10, 0x00); write_phy_ofdm(dev, 0x12, 0x00); /* turn off RF */ RF_WriteReg(dev, 0x4, 0x0000); RF_WriteReg(dev, 0x0, 0x0000); /* turn off AFE except PLL */ write_nic_byte(dev, 0x62, 0xff); write_nic_byte(dev, 0x54, 0xec); mdelay(1); { int i = 0; while (true) { u8 tmp24F = read_nic_byte(dev, 0x24f); if ((tmp24F == 0x01) || (tmp24F == 0x09)) { bTurnOffBB = true; break; } else { udelay(10); i++; priv->TxPollingTimes++; if (priv->TxPollingTimes >= LPS_MAX_SLEEP_WAITING_TIMES_87SE) { bTurnOffBB = false; break; } else udelay(10); } } } if (bTurnOffBB) { /* turn off BB */ u1bTmp = read_nic_byte(dev, 0x24E); write_nic_byte(dev, 0x24E, (u1bTmp | BIT5 | BIT6)); /* turn off AFE PLL */ write_nic_byte(dev, 0x54, 0xFC); write_nic_word(dev, 0x37C, 0x00FC); } } break; case eRfOff: for (QueueID = 0, i = 0; QueueID < 6;) { if (get_curr_tx_free_desc(dev, QueueID) == priv->txringcount) { QueueID++; continue; } else { udelay(10); i++; } if (i >= MAX_DOZE_WAITING_TIMES_85B) break; } /* turn off BB RXIQ matrix to cut off rx signal */ write_phy_ofdm(dev, 0x10, 0x00); write_phy_ofdm(dev, 0x12, 0x00); /* turn off RF */ RF_WriteReg(dev, 0x4, 0x0000); RF_WriteReg(dev, 0x0, 0x0000); /* turn off AFE except PLL */ write_nic_byte(dev, 0x62, 0xff); write_nic_byte(dev, 0x54, 0xec); mdelay(1); { int i = 0; while (true) { u8 tmp24F = read_nic_byte(dev, 0x24f); if ((tmp24F == 0x01) || (tmp24F == 0x09)) { bTurnOffBB = true; break; } else { bTurnOffBB = false; udelay(10); i++; } if (i > MAX_POLLING_24F_TIMES_87SE) break; } } if (bTurnOffBB) { /* turn off BB */ u1bTmp = read_nic_byte(dev, 0x24E); write_nic_byte(dev, 0x24E, (u1bTmp | BIT5 | BIT6)); /* turn off AFE PLL (80M) */ write_nic_byte(dev, 0x54, 0xFC); write_nic_word(dev, 0x37C, 0x00FC); } break; } btConfig3 &= ~(CONFIG3_PARM_En); write_nic_byte(dev, CONFIG3, btConfig3); btCR9346 &= ~(0xC0); write_nic_byte(dev, CR9346, btCR9346); if (bResult && bActionAllowed) priv->eRFPowerState = eRFPowerState; priv->SetRFPowerStateInProgress = false; return bResult && bActionAllowed; } void rtl8225z4_rf_sleep(struct net_device *dev) { MgntActSet_RF_State(dev, eRfSleep, RF_CHANGE_BY_PS); } void rtl8225z4_rf_wakeup(struct net_device *dev) { MgntActSet_RF_State(dev, eRfOn, RF_CHANGE_BY_PS); }
gpl-2.0
W4TCH0UT/zz_lettuce
drivers/scsi/a2091.c
2314
6674
#include <linux/types.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/zorro.h> #include <linux/module.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/amigaints.h> #include <asm/amigahw.h> #include "scsi.h" #include "wd33c93.h" #include "a2091.h" struct a2091_hostdata { struct WD33C93_hostdata wh; struct a2091_scsiregs *regs; }; static irqreturn_t a2091_intr(int irq, void *data) { struct Scsi_Host *instance = data; struct a2091_hostdata *hdata = shost_priv(instance); unsigned int status = hdata->regs->ISTR; unsigned long flags; if (!(status & (ISTR_INT_F | ISTR_INT_P)) || !(status & ISTR_INTS)) return IRQ_NONE; spin_lock_irqsave(instance->host_lock, flags); wd33c93_intr(instance); spin_unlock_irqrestore(instance->host_lock, flags); return IRQ_HANDLED; } static int dma_setup(struct scsi_cmnd *cmd, int dir_in) { struct Scsi_Host *instance = cmd->device->host; struct a2091_hostdata *hdata = shost_priv(instance); struct WD33C93_hostdata *wh = &hdata->wh; struct a2091_scsiregs *regs = hdata->regs; unsigned short cntr = CNTR_PDMD | CNTR_INTEN; unsigned long addr = virt_to_bus(cmd->SCp.ptr); /* don't allow DMA if the physical address is bad */ if (addr & A2091_XFER_MASK) { wh->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff; wh->dma_bounce_buffer = kmalloc(wh->dma_bounce_len, GFP_KERNEL); /* can't allocate memory; use PIO */ if (!wh->dma_bounce_buffer) { wh->dma_bounce_len = 0; return 1; } /* get the physical address of the bounce buffer */ addr = virt_to_bus(wh->dma_bounce_buffer); /* the bounce buffer may not be in the first 16M of physmem */ if (addr & A2091_XFER_MASK) { /* we could use chipmem... maybe later */ kfree(wh->dma_bounce_buffer); wh->dma_bounce_buffer = NULL; wh->dma_bounce_len = 0; return 1; } if (!dir_in) { /* copy to bounce buffer for a write */ memcpy(wh->dma_bounce_buffer, cmd->SCp.ptr, cmd->SCp.this_residual); } } /* setup dma direction */ if (!dir_in) cntr |= CNTR_DDIR; /* remember direction */ wh->dma_dir = dir_in; regs->CNTR = cntr; /* setup DMA *physical* address */ regs->ACR = addr; if (dir_in) { /* invalidate any cache */ cache_clear(addr, cmd->SCp.this_residual); } else { /* push any dirty cache */ cache_push(addr, cmd->SCp.this_residual); } /* start DMA */ regs->ST_DMA = 1; /* return success */ return 0; } static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt, int status) { struct a2091_hostdata *hdata = shost_priv(instance); struct WD33C93_hostdata *wh = &hdata->wh; struct a2091_scsiregs *regs = hdata->regs; /* disable SCSI interrupts */ unsigned short cntr = CNTR_PDMD; if (!wh->dma_dir) cntr |= CNTR_DDIR; /* disable SCSI interrupts */ regs->CNTR = cntr; /* flush if we were reading */ if (wh->dma_dir) { regs->FLUSH = 1; while (!(regs->ISTR & ISTR_FE_FLG)) ; } /* clear a possible interrupt */ regs->CINT = 1; /* stop DMA */ regs->SP_DMA = 1; /* restore the CONTROL bits (minus the direction flag) */ regs->CNTR = CNTR_PDMD | CNTR_INTEN; /* copy from a bounce buffer, if necessary */ if (status && wh->dma_bounce_buffer) { if (wh->dma_dir) memcpy(SCpnt->SCp.ptr, wh->dma_bounce_buffer, SCpnt->SCp.this_residual); kfree(wh->dma_bounce_buffer); wh->dma_bounce_buffer = NULL; wh->dma_bounce_len = 0; } } static int a2091_bus_reset(struct scsi_cmnd *cmd) { struct Scsi_Host *instance = cmd->device->host; /* FIXME perform bus-specific reset */ /* FIXME 2: kill this function, and let midlayer fall back to the same action, calling wd33c93_host_reset() */ spin_lock_irq(instance->host_lock); wd33c93_host_reset(cmd); spin_unlock_irq(instance->host_lock); return SUCCESS; } static struct scsi_host_template a2091_scsi_template = { .module = THIS_MODULE, .name = "Commodore A2091/A590 SCSI", .show_info = wd33c93_show_info, .write_info = wd33c93_write_info, .proc_name = "A2901", .queuecommand = wd33c93_queuecommand, .eh_abort_handler = wd33c93_abort, .eh_bus_reset_handler = a2091_bus_reset, .eh_host_reset_handler = wd33c93_host_reset, .can_queue = CAN_QUEUE, .this_id = 7, .sg_tablesize = SG_ALL, .cmd_per_lun = CMD_PER_LUN, .use_clustering = DISABLE_CLUSTERING }; static int a2091_probe(struct zorro_dev *z, const struct zorro_device_id *ent) { struct Scsi_Host *instance; int error; struct a2091_scsiregs *regs; wd33c93_regs wdregs; struct a2091_hostdata *hdata; if (!request_mem_region(z->resource.start, 256, "wd33c93")) return -EBUSY; instance = scsi_host_alloc(&a2091_scsi_template, sizeof(struct a2091_hostdata)); if (!instance) { error = -ENOMEM; goto fail_alloc; } instance->irq = IRQ_AMIGA_PORTS; instance->unique_id = z->slotaddr; regs = (struct a2091_scsiregs *)ZTWO_VADDR(z->resource.start); regs->DAWR = DAWR_A2091; wdregs.SASR = &regs->SASR; wdregs.SCMD = &regs->SCMD; hdata = shost_priv(instance); hdata->wh.no_sync = 0xff; hdata->wh.fast = 0; hdata->wh.dma_mode = CTRL_DMA; hdata->regs = regs; wd33c93_init(instance, wdregs, dma_setup, dma_stop, WD33C93_FS_8_10); error = request_irq(IRQ_AMIGA_PORTS, a2091_intr, IRQF_SHARED, "A2091 SCSI", instance); if (error) goto fail_irq; regs->CNTR = CNTR_PDMD | CNTR_INTEN; error = scsi_add_host(instance, NULL); if (error) goto fail_host; zorro_set_drvdata(z, instance); scsi_scan_host(instance); return 0; fail_host: free_irq(IRQ_AMIGA_PORTS, instance); fail_irq: scsi_host_put(instance); fail_alloc: release_mem_region(z->resource.start, 256); return error; } static void a2091_remove(struct zorro_dev *z) { struct Scsi_Host *instance = zorro_get_drvdata(z); struct a2091_hostdata *hdata = shost_priv(instance); hdata->regs->CNTR = 0; scsi_remove_host(instance); free_irq(IRQ_AMIGA_PORTS, instance); scsi_host_put(instance); release_mem_region(z->resource.start, 256); } static struct zorro_device_id a2091_zorro_tbl[] = { { ZORRO_PROD_CBM_A590_A2091_1 }, { ZORRO_PROD_CBM_A590_A2091_2 }, { 0 } }; MODULE_DEVICE_TABLE(zorro, a2091_zorro_tbl); static struct zorro_driver a2091_driver = { .name = "a2091", .id_table = a2091_zorro_tbl, .probe = a2091_probe, .remove = a2091_remove, }; static int __init a2091_init(void) { return zorro_register_driver(&a2091_driver); } module_init(a2091_init); static void __exit a2091_exit(void) { zorro_unregister_driver(&a2091_driver); } module_exit(a2091_exit); MODULE_DESCRIPTION("Commodore A2091/A590 SCSI"); MODULE_LICENSE("GPL");
gpl-2.0
Silentlys/android_kernel_cyanogen_msm8916
drivers/staging/media/go7007/saa7134-go7007.c
2314
15997
/* * Copyright (C) 2005-2006 Micronas USA Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/wait.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/time.h> #include <linux/mm.h> #include <linux/usb.h> #include <linux/i2c.h> #include <asm/byteorder.h> #include <media/v4l2-common.h> #include <media/v4l2-device.h> #include <media/v4l2-subdev.h> #include "saa7134.h" #include "saa7134-reg.h" #include "go7007.h" #include "go7007-priv.h" /*#define GO7007_HPI_DEBUG*/ enum hpi_address { HPI_ADDR_VIDEO_BUFFER = 0xe4, HPI_ADDR_INIT_BUFFER = 0xea, HPI_ADDR_INTR_RET_VALUE = 0xee, HPI_ADDR_INTR_RET_DATA = 0xec, HPI_ADDR_INTR_STATUS = 0xf4, HPI_ADDR_INTR_WR_PARAM = 0xf6, HPI_ADDR_INTR_WR_INDEX = 0xf8, }; enum gpio_command { GPIO_COMMAND_RESET = 0x00, /* 000b */ GPIO_COMMAND_REQ1 = 0x04, /* 001b */ GPIO_COMMAND_WRITE = 0x20, /* 010b */ GPIO_COMMAND_REQ2 = 0x24, /* 011b */ GPIO_COMMAND_READ = 0x80, /* 100b */ GPIO_COMMAND_VIDEO = 0x84, /* 101b */ GPIO_COMMAND_IDLE = 0xA0, /* 110b */ GPIO_COMMAND_ADDR = 0xA4, /* 111b */ }; struct saa7134_go7007 { struct v4l2_subdev sd; struct saa7134_dev *dev; u8 *top; u8 *bottom; dma_addr_t top_dma; dma_addr_t bottom_dma; }; static inline struct saa7134_go7007 *to_state(struct v4l2_subdev *sd) { return container_of(sd, struct saa7134_go7007, sd); } static const struct go7007_board_info board_voyager = { .flags = 0, .sensor_flags = GO7007_SENSOR_656 | GO7007_SENSOR_VALID_ENABLE | GO7007_SENSOR_TV | GO7007_SENSOR_VBI, .audio_flags = GO7007_AUDIO_I2S_MODE_1 | GO7007_AUDIO_WORD_16, .audio_rate = 48000, .audio_bclk_div = 8, .audio_main_div = 2, .hpi_buffer_cap = 7, .num_inputs = 1, .inputs = { { .name = "SAA7134", }, }, }; /********************* Driver for GPIO HPI interface *********************/ static int gpio_write(struct saa7134_dev *dev, u8 addr, u16 data) { saa_writeb(SAA7134_GPIO_GPMODE0, 0xff); /* Write HPI address */ saa_writeb(SAA7134_GPIO_GPSTATUS0, addr); saa_writeb(SAA7134_GPIO_GPSTATUS2, GPIO_COMMAND_ADDR); saa_writeb(SAA7134_GPIO_GPSTATUS2, GPIO_COMMAND_IDLE); /* Write low byte */ saa_writeb(SAA7134_GPIO_GPSTATUS0, data & 0xff); saa_writeb(SAA7134_GPIO_GPSTATUS2, GPIO_COMMAND_WRITE); saa_writeb(SAA7134_GPIO_GPSTATUS2, GPIO_COMMAND_IDLE); /* Write high byte */ saa_writeb(SAA7134_GPIO_GPSTATUS0, data >> 8); saa_writeb(SAA7134_GPIO_GPSTATUS2, GPIO_COMMAND_WRITE); saa_writeb(SAA7134_GPIO_GPSTATUS2, GPIO_COMMAND_IDLE); return 0; } static int gpio_read(struct saa7134_dev *dev, u8 addr, u16 *data) { saa_writeb(SAA7134_GPIO_GPMODE0, 0xff); /* Write HPI address */ saa_writeb(SAA7134_GPIO_GPSTATUS0, addr); saa_writeb(SAA7134_GPIO_GPSTATUS2, GPIO_COMMAND_ADDR); saa_writeb(SAA7134_GPIO_GPSTATUS2, GPIO_COMMAND_IDLE); saa_writeb(SAA7134_GPIO_GPMODE0, 0x00); /* Read low byte */ saa_writeb(SAA7134_GPIO_GPSTATUS2, GPIO_COMMAND_READ); saa_clearb(SAA7134_GPIO_GPMODE3, SAA7134_GPIO_GPRESCAN); saa_setb(SAA7134_GPIO_GPMODE3, SAA7134_GPIO_GPRESCAN); *data = saa_readb(SAA7134_GPIO_GPSTATUS0); saa_writeb(SAA7134_GPIO_GPSTATUS2, GPIO_COMMAND_IDLE); /* Read high byte */ saa_writeb(SAA7134_GPIO_GPSTATUS2, GPIO_COMMAND_READ); saa_clearb(SAA7134_GPIO_GPMODE3, SAA7134_GPIO_GPRESCAN); saa_setb(SAA7134_GPIO_GPMODE3, SAA7134_GPIO_GPRESCAN); *data |= saa_readb(SAA7134_GPIO_GPSTATUS0) << 8; saa_writeb(SAA7134_GPIO_GPSTATUS2, GPIO_COMMAND_IDLE); return 0; } static int saa7134_go7007_interface_reset(struct go7007 *go) { struct saa7134_go7007 *saa = go->hpi_context; struct saa7134_dev *dev = saa->dev; u32 status; u16 intr_val, intr_data; int count = 20; saa_clearb(SAA7134_TS_PARALLEL, 0x80); /* Disable TS interface */ saa_writeb(SAA7134_GPIO_GPMODE2, 0xa4); saa_writeb(SAA7134_GPIO_GPMODE0, 0xff); saa_writeb(SAA7134_GPIO_GPSTATUS2, GPIO_COMMAND_REQ1); saa_writeb(SAA7134_GPIO_GPSTATUS2, GPIO_COMMAND_RESET); msleep(1); saa_writeb(SAA7134_GPIO_GPSTATUS2, GPIO_COMMAND_REQ1); saa_writeb(SAA7134_GPIO_GPSTATUS2, GPIO_COMMAND_REQ2); msleep(10); saa_clearb(SAA7134_GPIO_GPMODE3, SAA7134_GPIO_GPRESCAN); saa_setb(SAA7134_GPIO_GPMODE3, SAA7134_GPIO_GPRESCAN); status = saa_readb(SAA7134_GPIO_GPSTATUS2); /*printk(KERN_DEBUG "status is %s\n", status & 0x40 ? "OK" : "not OK"); */ /* enter command mode...(?) */ saa_writeb(SAA7134_GPIO_GPSTATUS2, GPIO_COMMAND_REQ1); saa_writeb(SAA7134_GPIO_GPSTATUS2, GPIO_COMMAND_REQ2); do { saa_clearb(SAA7134_GPIO_GPMODE3, SAA7134_GPIO_GPRESCAN); saa_setb(SAA7134_GPIO_GPMODE3, SAA7134_GPIO_GPRESCAN); status = saa_readb(SAA7134_GPIO_GPSTATUS2); /*printk(KERN_INFO "gpio is %08x\n", saa_readl(SAA7134_GPIO_GPSTATUS0 >> 2)); */ } while (--count > 0); /* Wait for an interrupt to indicate successful hardware reset */ if (go7007_read_interrupt(go, &intr_val, &intr_data) < 0 || (intr_val & ~0x1) != 0x55aa) { printk(KERN_ERR "saa7134-go7007: unable to reset the GO7007\n"); return -1; } return 0; } static int saa7134_go7007_write_interrupt(struct go7007 *go, int addr, int data) { struct saa7134_go7007 *saa = go->hpi_context; struct saa7134_dev *dev = saa->dev; int i; u16 status_reg; #ifdef GO7007_HPI_DEBUG printk(KERN_DEBUG "saa7134-go7007: WriteInterrupt: %04x %04x\n", addr, data); #endif for (i = 0; i < 100; ++i) { gpio_read(dev, HPI_ADDR_INTR_STATUS, &status_reg); if (!(status_reg & 0x0010)) break; msleep(10); } if (i == 100) { printk(KERN_ERR "saa7134-go7007: device is hung, status reg = 0x%04x\n", status_reg); return -1; } gpio_write(dev, HPI_ADDR_INTR_WR_PARAM, data); gpio_write(dev, HPI_ADDR_INTR_WR_INDEX, addr); return 0; } static int saa7134_go7007_read_interrupt(struct go7007 *go) { struct saa7134_go7007 *saa = go->hpi_context; struct saa7134_dev *dev = saa->dev; /* XXX we need to wait if there is no interrupt available */ go->interrupt_available = 1; gpio_read(dev, HPI_ADDR_INTR_RET_VALUE, &go->interrupt_value); gpio_read(dev, HPI_ADDR_INTR_RET_DATA, &go->interrupt_data); #ifdef GO7007_HPI_DEBUG printk(KERN_DEBUG "saa7134-go7007: ReadInterrupt: %04x %04x\n", go->interrupt_value, go->interrupt_data); #endif return 0; } static void saa7134_go7007_irq_ts_done(struct saa7134_dev *dev, unsigned long status) { struct go7007 *go = video_get_drvdata(dev->empress_dev); struct saa7134_go7007 *saa = go->hpi_context; if (!vb2_is_streaming(&go->vidq)) return; if (0 != (status & 0x000f0000)) printk(KERN_DEBUG "saa7134-go7007: irq: lost %ld\n", (status >> 16) & 0x0f); if (status & 0x100000) { dma_sync_single_for_cpu(&dev->pci->dev, saa->bottom_dma, PAGE_SIZE, DMA_FROM_DEVICE); go7007_parse_video_stream(go, saa->bottom, PAGE_SIZE); saa_writel(SAA7134_RS_BA2(5), cpu_to_le32(saa->bottom_dma)); } else { dma_sync_single_for_cpu(&dev->pci->dev, saa->top_dma, PAGE_SIZE, DMA_FROM_DEVICE); go7007_parse_video_stream(go, saa->top, PAGE_SIZE); saa_writel(SAA7134_RS_BA1(5), cpu_to_le32(saa->top_dma)); } } static int saa7134_go7007_stream_start(struct go7007 *go) { struct saa7134_go7007 *saa = go->hpi_context; struct saa7134_dev *dev = saa->dev; saa->top_dma = dma_map_page(&dev->pci->dev, virt_to_page(saa->top), 0, PAGE_SIZE, DMA_FROM_DEVICE); if (dma_mapping_error(&dev->pci->dev, saa->top_dma)) return -ENOMEM; saa->bottom_dma = dma_map_page(&dev->pci->dev, virt_to_page(saa->bottom), 0, PAGE_SIZE, DMA_FROM_DEVICE); if (dma_mapping_error(&dev->pci->dev, saa->bottom_dma)) { dma_unmap_page(&dev->pci->dev, saa->top_dma, PAGE_SIZE, DMA_FROM_DEVICE); return -ENOMEM; } saa_writel(SAA7134_VIDEO_PORT_CTRL0 >> 2, 0xA300B000); saa_writel(SAA7134_VIDEO_PORT_CTRL4 >> 2, 0x40000200); /* Set HPI interface for video */ saa_writeb(SAA7134_GPIO_GPMODE0, 0xff); saa_writeb(SAA7134_GPIO_GPSTATUS0, HPI_ADDR_VIDEO_BUFFER); saa_writeb(SAA7134_GPIO_GPSTATUS2, GPIO_COMMAND_ADDR); saa_writeb(SAA7134_GPIO_GPMODE0, 0x00); /* Enable TS interface */ saa_writeb(SAA7134_TS_PARALLEL, 0xe6); /* Reset TS interface */ saa_setb(SAA7134_TS_SERIAL1, 0x01); saa_clearb(SAA7134_TS_SERIAL1, 0x01); /* Set up transfer block size */ saa_writeb(SAA7134_TS_PARALLEL_SERIAL, 128 - 1); saa_writeb(SAA7134_TS_DMA0, (PAGE_SIZE >> 7) - 1); saa_writeb(SAA7134_TS_DMA1, 0); saa_writeb(SAA7134_TS_DMA2, 0); /* Enable video streaming mode */ saa_writeb(SAA7134_GPIO_GPSTATUS2, GPIO_COMMAND_VIDEO); saa_writel(SAA7134_RS_BA1(5), cpu_to_le32(saa->top_dma)); saa_writel(SAA7134_RS_BA2(5), cpu_to_le32(saa->bottom_dma)); saa_writel(SAA7134_RS_PITCH(5), 128); saa_writel(SAA7134_RS_CONTROL(5), SAA7134_RS_CONTROL_BURST_MAX); /* Enable TS FIFO */ saa_setl(SAA7134_MAIN_CTRL, SAA7134_MAIN_CTRL_TE5); /* Enable DMA IRQ */ saa_setl(SAA7134_IRQ1, SAA7134_IRQ1_INTE_RA2_1 | SAA7134_IRQ1_INTE_RA2_0); return 0; } static int saa7134_go7007_stream_stop(struct go7007 *go) { struct saa7134_go7007 *saa = go->hpi_context; struct saa7134_dev *dev; if (!saa) return -EINVAL; dev = saa->dev; if (!dev) return -EINVAL; /* Shut down TS FIFO */ saa_clearl(SAA7134_MAIN_CTRL, SAA7134_MAIN_CTRL_TE5); /* Disable DMA IRQ */ saa_clearl(SAA7134_IRQ1, SAA7134_IRQ1_INTE_RA2_1 | SAA7134_IRQ1_INTE_RA2_0); /* Disable TS interface */ saa_clearb(SAA7134_TS_PARALLEL, 0x80); dma_unmap_page(&dev->pci->dev, saa->top_dma, PAGE_SIZE, DMA_FROM_DEVICE); dma_unmap_page(&dev->pci->dev, saa->bottom_dma, PAGE_SIZE, DMA_FROM_DEVICE); return 0; } static int saa7134_go7007_send_firmware(struct go7007 *go, u8 *data, int len) { struct saa7134_go7007 *saa = go->hpi_context; struct saa7134_dev *dev = saa->dev; u16 status_reg; int i; #ifdef GO7007_HPI_DEBUG printk(KERN_DEBUG "saa7134-go7007: DownloadBuffer " "sending %d bytes\n", len); #endif while (len > 0) { i = len > 64 ? 64 : len; saa_writeb(SAA7134_GPIO_GPMODE0, 0xff); saa_writeb(SAA7134_GPIO_GPSTATUS0, HPI_ADDR_INIT_BUFFER); saa_writeb(SAA7134_GPIO_GPSTATUS2, GPIO_COMMAND_ADDR); saa_writeb(SAA7134_GPIO_GPSTATUS2, GPIO_COMMAND_IDLE); while (i-- > 0) { saa_writeb(SAA7134_GPIO_GPSTATUS0, *data); saa_writeb(SAA7134_GPIO_GPSTATUS2, GPIO_COMMAND_WRITE); saa_writeb(SAA7134_GPIO_GPSTATUS2, GPIO_COMMAND_IDLE); ++data; --len; } for (i = 0; i < 100; ++i) { gpio_read(dev, HPI_ADDR_INTR_STATUS, &status_reg); if (!(status_reg & 0x0002)) break; } if (i == 100) { printk(KERN_ERR "saa7134-go7007: device is hung, " "status reg = 0x%04x\n", status_reg); return -1; } } return 0; } static struct go7007_hpi_ops saa7134_go7007_hpi_ops = { .interface_reset = saa7134_go7007_interface_reset, .write_interrupt = saa7134_go7007_write_interrupt, .read_interrupt = saa7134_go7007_read_interrupt, .stream_start = saa7134_go7007_stream_start, .stream_stop = saa7134_go7007_stream_stop, .send_firmware = saa7134_go7007_send_firmware, }; MODULE_FIRMWARE("go7007/go7007tv.bin"); /* --------------------------------------------------------------------------*/ static int saa7134_go7007_s_std(struct v4l2_subdev *sd, v4l2_std_id norm) { struct saa7134_go7007 *saa = to_state(sd); struct saa7134_dev *dev = saa->dev; return saa7134_s_std_internal(dev, NULL, norm); } static int saa7134_go7007_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *query) { return saa7134_queryctrl(NULL, NULL, query); } static int saa7134_go7007_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) { struct saa7134_go7007 *saa = to_state(sd); struct saa7134_dev *dev = saa->dev; return saa7134_s_ctrl_internal(dev, NULL, ctrl); } static int saa7134_go7007_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) { struct saa7134_go7007 *saa = to_state(sd); struct saa7134_dev *dev = saa->dev; return saa7134_g_ctrl_internal(dev, NULL, ctrl); } /* --------------------------------------------------------------------------*/ static const struct v4l2_subdev_core_ops saa7134_go7007_core_ops = { .g_ctrl = saa7134_go7007_g_ctrl, .s_ctrl = saa7134_go7007_s_ctrl, .queryctrl = saa7134_go7007_queryctrl, .s_std = saa7134_go7007_s_std, }; static const struct v4l2_subdev_ops saa7134_go7007_sd_ops = { .core = &saa7134_go7007_core_ops, }; /* --------------------------------------------------------------------------*/ /********************* Add/remove functions *********************/ static int saa7134_go7007_init(struct saa7134_dev *dev) { struct go7007 *go; struct saa7134_go7007 *saa; struct v4l2_subdev *sd; printk(KERN_DEBUG "saa7134-go7007: probing new SAA713X board\n"); go = go7007_alloc(&board_voyager, &dev->pci->dev); if (go == NULL) return -ENOMEM; saa = kzalloc(sizeof(struct saa7134_go7007), GFP_KERNEL); if (saa == NULL) { kfree(go); return -ENOMEM; } go->board_id = GO7007_BOARDID_PCI_VOYAGER; snprintf(go->bus_info, sizeof(go->bus_info), "PCI:%s", pci_name(dev->pci)); strlcpy(go->name, saa7134_boards[dev->board].name, sizeof(go->name)); go->hpi_ops = &saa7134_go7007_hpi_ops; go->hpi_context = saa; saa->dev = dev; /* Init the subdevice interface */ sd = &saa->sd; v4l2_subdev_init(sd, &saa7134_go7007_sd_ops); v4l2_set_subdevdata(sd, saa); strncpy(sd->name, "saa7134-go7007", sizeof(sd->name)); /* Allocate a couple pages for receiving the compressed stream */ saa->top = (u8 *)get_zeroed_page(GFP_KERNEL); if (!saa->top) goto allocfail; saa->bottom = (u8 *)get_zeroed_page(GFP_KERNEL); if (!saa->bottom) goto allocfail; /* Boot the GO7007 */ if (go7007_boot_encoder(go, go->board_info->flags & GO7007_BOARD_USE_ONBOARD_I2C) < 0) goto allocfail; /* Do any final GO7007 initialization, then register the * V4L2 and ALSA interfaces */ if (go7007_register_encoder(go, go->board_info->num_i2c_devs) < 0) goto allocfail; /* Register the subdevice interface with the go7007 device */ if (v4l2_device_register_subdev(&go->v4l2_dev, sd) < 0) printk(KERN_INFO "saa7134-go7007: register subdev failed\n"); dev->empress_dev = &go->vdev; go->status = STATUS_ONLINE; return 0; allocfail: if (saa->top) free_page((unsigned long)saa->top); if (saa->bottom) free_page((unsigned long)saa->bottom); kfree(saa); kfree(go); return -ENOMEM; } static int saa7134_go7007_fini(struct saa7134_dev *dev) { struct go7007 *go; struct saa7134_go7007 *saa; if (NULL == dev->empress_dev) return 0; go = video_get_drvdata(dev->empress_dev); if (go->audio_enabled) go7007_snd_remove(go); saa = go->hpi_context; go->status = STATUS_SHUTDOWN; free_page((unsigned long)saa->top); free_page((unsigned long)saa->bottom); v4l2_device_unregister_subdev(&saa->sd); kfree(saa); video_unregister_device(&go->vdev); v4l2_device_put(&go->v4l2_dev); dev->empress_dev = NULL; return 0; } static struct saa7134_mpeg_ops saa7134_go7007_ops = { .type = SAA7134_MPEG_GO7007, .init = saa7134_go7007_init, .fini = saa7134_go7007_fini, .irq_ts_done = saa7134_go7007_irq_ts_done, }; static int __init saa7134_go7007_mod_init(void) { return saa7134_ts_register(&saa7134_go7007_ops); } static void __exit saa7134_go7007_mod_cleanup(void) { saa7134_ts_unregister(&saa7134_go7007_ops); } module_init(saa7134_go7007_mod_init); module_exit(saa7134_go7007_mod_cleanup); MODULE_LICENSE("GPL v2");
gpl-2.0
zarboz/s2w-VilleZ
drivers/gpu/drm/drm_lock.c
2826
10599
/** * \file drm_lock.c * IOCTLs for locking * * \author Rickard E. (Rik) Faith <faith@valinux.com> * \author Gareth Hughes <gareth@valinux.com> */ /* * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ #include "drmP.h" static int drm_notifier(void *priv); static int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context); /** * Lock ioctl. * * \param inode device inode. * \param file_priv DRM file private. * \param cmd command. * \param arg user argument, pointing to a drm_lock structure. * \return zero on success or negative number on failure. * * Add the current task to the lock wait queue, and attempt to take to lock. */ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv) { DECLARE_WAITQUEUE(entry, current); struct drm_lock *lock = data; struct drm_master *master = file_priv->master; int ret = 0; ++file_priv->lock_count; if (lock->context == DRM_KERNEL_CONTEXT) { DRM_ERROR("Process %d using kernel context %d\n", task_pid_nr(current), lock->context); return -EINVAL; } DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n", lock->context, task_pid_nr(current), master->lock.hw_lock->lock, lock->flags); if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE)) if (lock->context < 0) return -EINVAL; add_wait_queue(&master->lock.lock_queue, &entry); spin_lock_bh(&master->lock.spinlock); master->lock.user_waiters++; spin_unlock_bh(&master->lock.spinlock); for (;;) { __set_current_state(TASK_INTERRUPTIBLE); if (!master->lock.hw_lock) { /* Device has been unregistered */ send_sig(SIGTERM, current, 0); ret = -EINTR; break; } if (drm_lock_take(&master->lock, lock->context)) { master->lock.file_priv = file_priv; master->lock.lock_time = jiffies; atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); break; /* Got lock */ } /* Contention */ mutex_unlock(&drm_global_mutex); schedule(); mutex_lock(&drm_global_mutex); if (signal_pending(current)) { ret = -EINTR; break; } } spin_lock_bh(&master->lock.spinlock); master->lock.user_waiters--; spin_unlock_bh(&master->lock.spinlock); __set_current_state(TASK_RUNNING); remove_wait_queue(&master->lock.lock_queue, &entry); DRM_DEBUG("%d %s\n", lock->context, ret ? "interrupted" : "has lock"); if (ret) return ret; /* don't set the block all signals on the master process for now * really probably not the correct answer but lets us debug xkb * xserver for now */ if (!file_priv->is_master) { sigemptyset(&dev->sigmask); sigaddset(&dev->sigmask, SIGSTOP); sigaddset(&dev->sigmask, SIGTSTP); sigaddset(&dev->sigmask, SIGTTIN); sigaddset(&dev->sigmask, SIGTTOU); dev->sigdata.context = lock->context; dev->sigdata.lock = master->lock.hw_lock; block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask); } if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT)) { if (dev->driver->dma_quiescent(dev)) { DRM_DEBUG("%d waiting for DMA quiescent\n", lock->context); return -EBUSY; } } return 0; } /** * Unlock ioctl. * * \param inode device inode. * \param file_priv DRM file private. * \param cmd command. * \param arg user argument, pointing to a drm_lock structure. * \return zero on success or negative number on failure. * * Transfer and free the lock. */ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_lock *lock = data; struct drm_master *master = file_priv->master; if (lock->context == DRM_KERNEL_CONTEXT) { DRM_ERROR("Process %d using kernel context %d\n", task_pid_nr(current), lock->context); return -EINVAL; } atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]); if (drm_lock_free(&master->lock, lock->context)) { /* FIXME: Should really bail out here. */ } unblock_all_signals(); return 0; } /** * Take the heavyweight lock. * * \param lock lock pointer. * \param context locking context. * \return one if the lock is held, or zero otherwise. * * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction. */ static int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context) { unsigned int old, new, prev; volatile unsigned int *lock = &lock_data->hw_lock->lock; spin_lock_bh(&lock_data->spinlock); do { old = *lock; if (old & _DRM_LOCK_HELD) new = old | _DRM_LOCK_CONT; else { new = context | _DRM_LOCK_HELD | ((lock_data->user_waiters + lock_data->kernel_waiters > 1) ? _DRM_LOCK_CONT : 0); } prev = cmpxchg(lock, old, new); } while (prev != old); spin_unlock_bh(&lock_data->spinlock); if (_DRM_LOCKING_CONTEXT(old) == context) { if (old & _DRM_LOCK_HELD) { if (context != DRM_KERNEL_CONTEXT) { DRM_ERROR("%d holds heavyweight lock\n", context); } return 0; } } if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) { /* Have lock */ return 1; } return 0; } /** * This takes a lock forcibly and hands it to context. Should ONLY be used * inside *_unlock to give lock to kernel before calling *_dma_schedule. * * \param dev DRM device. * \param lock lock pointer. * \param context locking context. * \return always one. * * Resets the lock file pointer. * Marks the lock as held by the given context, via the \p cmpxchg instruction. */ static int drm_lock_transfer(struct drm_lock_data *lock_data, unsigned int context) { unsigned int old, new, prev; volatile unsigned int *lock = &lock_data->hw_lock->lock; lock_data->file_priv = NULL; do { old = *lock; new = context | _DRM_LOCK_HELD; prev = cmpxchg(lock, old, new); } while (prev != old); return 1; } /** * Free lock. * * \param dev DRM device. * \param lock lock. * \param context context. * * Resets the lock file pointer. * Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task * waiting on the lock queue. */ int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context) { unsigned int old, new, prev; volatile unsigned int *lock = &lock_data->hw_lock->lock; spin_lock_bh(&lock_data->spinlock); if (lock_data->kernel_waiters != 0) { drm_lock_transfer(lock_data, 0); lock_data->idle_has_lock = 1; spin_unlock_bh(&lock_data->spinlock); return 1; } spin_unlock_bh(&lock_data->spinlock); do { old = *lock; new = _DRM_LOCKING_CONTEXT(old); prev = cmpxchg(lock, old, new); } while (prev != old); if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) { DRM_ERROR("%d freed heavyweight lock held by %d\n", context, _DRM_LOCKING_CONTEXT(old)); return 1; } wake_up_interruptible(&lock_data->lock_queue); return 0; } /** * If we get here, it means that the process has called DRM_IOCTL_LOCK * without calling DRM_IOCTL_UNLOCK. * * If the lock is not held, then let the signal proceed as usual. If the lock * is held, then set the contended flag and keep the signal blocked. * * \param priv pointer to a drm_sigdata structure. * \return one if the signal should be delivered normally, or zero if the * signal should be blocked. */ static int drm_notifier(void *priv) { struct drm_sigdata *s = (struct drm_sigdata *) priv; unsigned int old, new, prev; /* Allow signal delivery if lock isn't held */ if (!s->lock || !_DRM_LOCK_IS_HELD(s->lock->lock) || _DRM_LOCKING_CONTEXT(s->lock->lock) != s->context) return 1; /* Otherwise, set flag to force call to drmUnlock */ do { old = s->lock->lock; new = old | _DRM_LOCK_CONT; prev = cmpxchg(&s->lock->lock, old, new); } while (prev != old); return 0; } /** * This function returns immediately and takes the hw lock * with the kernel context if it is free, otherwise it gets the highest priority when and if * it is eventually released. * * This guarantees that the kernel will _eventually_ have the lock _unless_ it is held * by a blocked process. (In the latter case an explicit wait for the hardware lock would cause * a deadlock, which is why the "idlelock" was invented). * * This should be sufficient to wait for GPU idle without * having to worry about starvation. */ void drm_idlelock_take(struct drm_lock_data *lock_data) { int ret = 0; spin_lock_bh(&lock_data->spinlock); lock_data->kernel_waiters++; if (!lock_data->idle_has_lock) { spin_unlock_bh(&lock_data->spinlock); ret = drm_lock_take(lock_data, DRM_KERNEL_CONTEXT); spin_lock_bh(&lock_data->spinlock); if (ret == 1) lock_data->idle_has_lock = 1; } spin_unlock_bh(&lock_data->spinlock); } void drm_idlelock_release(struct drm_lock_data *lock_data) { unsigned int old, prev; volatile unsigned int *lock = &lock_data->hw_lock->lock; spin_lock_bh(&lock_data->spinlock); if (--lock_data->kernel_waiters == 0) { if (lock_data->idle_has_lock) { do { old = *lock; prev = cmpxchg(lock, old, DRM_KERNEL_CONTEXT); } while (prev != old); wake_up_interruptible(&lock_data->lock_queue); lock_data->idle_has_lock = 0; } } spin_unlock_bh(&lock_data->spinlock); } int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv) { struct drm_master *master = file_priv->master; return (file_priv->lock_count && master->lock.hw_lock && _DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) && master->lock.file_priv == file_priv); }
gpl-2.0
ihadzic/linux-vcrtcm
net/wireless/debugfs.c
4106
3277
/* * cfg80211 debugfs * * Copyright 2009 Luis R. Rodriguez <lrodriguez@atheros.com> * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/slab.h> #include "core.h" #include "debugfs.h" static int cfg80211_open_file_generic(struct inode *inode, struct file *file) { file->private_data = inode->i_private; return 0; } #define DEBUGFS_READONLY_FILE(name, buflen, fmt, value...) \ static ssize_t name## _read(struct file *file, char __user *userbuf, \ size_t count, loff_t *ppos) \ { \ struct wiphy *wiphy= file->private_data; \ char buf[buflen]; \ int res; \ \ res = scnprintf(buf, buflen, fmt "\n", ##value); \ return simple_read_from_buffer(userbuf, count, ppos, buf, res); \ } \ \ static const struct file_operations name## _ops = { \ .read = name## _read, \ .open = cfg80211_open_file_generic, \ .llseek = generic_file_llseek, \ }; DEBUGFS_READONLY_FILE(rts_threshold, 20, "%d", wiphy->rts_threshold) DEBUGFS_READONLY_FILE(fragmentation_threshold, 20, "%d", wiphy->frag_threshold); DEBUGFS_READONLY_FILE(short_retry_limit, 20, "%d", wiphy->retry_short) DEBUGFS_READONLY_FILE(long_retry_limit, 20, "%d", wiphy->retry_long); static int ht_print_chan(struct ieee80211_channel *chan, char *buf, int buf_size, int offset) { if (WARN_ON(offset > buf_size)) return 0; if (chan->flags & IEEE80211_CHAN_DISABLED) return snprintf(buf + offset, buf_size - offset, "%d Disabled\n", chan->center_freq); return snprintf(buf + offset, buf_size - offset, "%d HT40 %c%c\n", chan->center_freq, (chan->flags & IEEE80211_CHAN_NO_HT40MINUS) ? ' ' : '-', (chan->flags & IEEE80211_CHAN_NO_HT40PLUS) ? ' ' : '+'); } static ssize_t ht40allow_map_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct wiphy *wiphy = file->private_data; char *buf; unsigned int offset = 0, buf_size = PAGE_SIZE, i, r; enum ieee80211_band band; struct ieee80211_supported_band *sband; buf = kzalloc(buf_size, GFP_KERNEL); if (!buf) return -ENOMEM; mutex_lock(&cfg80211_mutex); for (band = 0; band < IEEE80211_NUM_BANDS; band++) { sband = wiphy->bands[band]; if (!sband) continue; for (i = 0; i < sband->n_channels; i++) offset += ht_print_chan(&sband->channels[i], buf, buf_size, offset); } mutex_unlock(&cfg80211_mutex); r = simple_read_from_buffer(user_buf, count, ppos, buf, offset); kfree(buf); return r; } static const struct file_operations ht40allow_map_ops = { .read = ht40allow_map_read, .open = cfg80211_open_file_generic, .llseek = default_llseek, }; #define DEBUGFS_ADD(name) \ debugfs_create_file(#name, S_IRUGO, phyd, &rdev->wiphy, &name## _ops); void cfg80211_debugfs_rdev_add(struct cfg80211_registered_device *rdev) { struct dentry *phyd = rdev->wiphy.debugfsdir; DEBUGFS_ADD(rts_threshold); DEBUGFS_ADD(fragmentation_threshold); DEBUGFS_ADD(short_retry_limit); DEBUGFS_ADD(long_retry_limit); DEBUGFS_ADD(ht40allow_map); }
gpl-2.0
lucabe72/linux-reclaiming
arch/mips/pci/fixup-mpc30x.c
4618
1508
/* * fixup-mpc30x.c, The Victor MP-C303/304 specific PCI fixups. * * Copyright (C) 2002,2004 Yoichi Yuasa <yuasa@linux-mips.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/pci.h> #include <asm/vr41xx/mpc30x.h> static const int internal_func_irqs[] __initconst = { VRC4173_CASCADE_IRQ, VRC4173_AC97_IRQ, VRC4173_USB_IRQ, }; static const int irq_tab_mpc30x[] __initconst = { [12] = VRC4173_PCMCIA1_IRQ, [13] = VRC4173_PCMCIA2_IRQ, [29] = MQ200_IRQ, }; int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { if (slot == 30) return internal_func_irqs[PCI_FUNC(dev->devfn)]; return irq_tab_mpc30x[slot]; } /* Do platform specific device initialization at pci_enable_device() time */ int pcibios_plat_dev_init(struct pci_dev *dev) { return 0; }
gpl-2.0
flar2/m7wl-Bulletproof
drivers/infiniband/hw/mthca/mthca_main.c
5642
35476
/* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/module.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/gfp.h> #include "mthca_dev.h" #include "mthca_config_reg.h" #include "mthca_cmd.h" #include "mthca_profile.h" #include "mthca_memfree.h" #include "mthca_wqe.h" MODULE_AUTHOR("Roland Dreier"); MODULE_DESCRIPTION("Mellanox InfiniBand HCA low-level driver"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION(DRV_VERSION); #ifdef CONFIG_INFINIBAND_MTHCA_DEBUG int mthca_debug_level = 0; module_param_named(debug_level, mthca_debug_level, int, 0644); MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0"); #endif /* CONFIG_INFINIBAND_MTHCA_DEBUG */ #ifdef CONFIG_PCI_MSI static int msi_x = 1; module_param(msi_x, int, 0444); MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero"); #else /* CONFIG_PCI_MSI */ #define msi_x (0) #endif /* CONFIG_PCI_MSI */ static int tune_pci = 0; module_param(tune_pci, int, 0444); MODULE_PARM_DESC(tune_pci, "increase PCI burst from the default set by BIOS if nonzero"); DEFINE_MUTEX(mthca_device_mutex); #define MTHCA_DEFAULT_NUM_QP (1 << 16) #define MTHCA_DEFAULT_RDB_PER_QP (1 << 2) #define MTHCA_DEFAULT_NUM_CQ (1 << 16) #define MTHCA_DEFAULT_NUM_MCG (1 << 13) #define MTHCA_DEFAULT_NUM_MPT (1 << 17) #define MTHCA_DEFAULT_NUM_MTT (1 << 20) #define MTHCA_DEFAULT_NUM_UDAV (1 << 15) #define MTHCA_DEFAULT_NUM_RESERVED_MTTS (1 << 18) #define MTHCA_DEFAULT_NUM_UARC_SIZE (1 << 18) static struct mthca_profile hca_profile = { .num_qp = MTHCA_DEFAULT_NUM_QP, .rdb_per_qp = MTHCA_DEFAULT_RDB_PER_QP, .num_cq = MTHCA_DEFAULT_NUM_CQ, .num_mcg = MTHCA_DEFAULT_NUM_MCG, .num_mpt = MTHCA_DEFAULT_NUM_MPT, .num_mtt = MTHCA_DEFAULT_NUM_MTT, .num_udav = MTHCA_DEFAULT_NUM_UDAV, /* Tavor only */ .fmr_reserved_mtts = MTHCA_DEFAULT_NUM_RESERVED_MTTS, /* Tavor only */ .uarc_size = MTHCA_DEFAULT_NUM_UARC_SIZE, /* Arbel only */ }; module_param_named(num_qp, hca_profile.num_qp, int, 0444); MODULE_PARM_DESC(num_qp, "maximum number of QPs per HCA"); module_param_named(rdb_per_qp, hca_profile.rdb_per_qp, int, 0444); MODULE_PARM_DESC(rdb_per_qp, "number of RDB buffers per QP"); module_param_named(num_cq, hca_profile.num_cq, int, 0444); MODULE_PARM_DESC(num_cq, "maximum number of CQs per HCA"); module_param_named(num_mcg, hca_profile.num_mcg, int, 0444); MODULE_PARM_DESC(num_mcg, "maximum number of multicast groups per HCA"); module_param_named(num_mpt, hca_profile.num_mpt, int, 0444); MODULE_PARM_DESC(num_mpt, "maximum number of memory protection table entries per HCA"); module_param_named(num_mtt, hca_profile.num_mtt, int, 0444); MODULE_PARM_DESC(num_mtt, "maximum number of memory translation table segments per HCA"); module_param_named(num_udav, hca_profile.num_udav, int, 0444); MODULE_PARM_DESC(num_udav, "maximum number of UD address vectors per HCA"); module_param_named(fmr_reserved_mtts, hca_profile.fmr_reserved_mtts, int, 0444); MODULE_PARM_DESC(fmr_reserved_mtts, "number of memory translation table segments reserved for FMR"); static int log_mtts_per_seg = ilog2(MTHCA_MTT_SEG_SIZE / 8); module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444); MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-5)"); static char mthca_version[] __devinitdata = DRV_NAME ": Mellanox InfiniBand HCA driver v" DRV_VERSION " (" DRV_RELDATE ")\n"; static int mthca_tune_pci(struct mthca_dev *mdev) { if (!tune_pci) return 0; /* First try to max out Read Byte Count */ if (pci_find_capability(mdev->pdev, PCI_CAP_ID_PCIX)) { if (pcix_set_mmrbc(mdev->pdev, pcix_get_max_mmrbc(mdev->pdev))) { mthca_err(mdev, "Couldn't set PCI-X max read count, " "aborting.\n"); return -ENODEV; } } else if (!(mdev->mthca_flags & MTHCA_FLAG_PCIE)) mthca_info(mdev, "No PCI-X capability, not setting RBC.\n"); if (pci_is_pcie(mdev->pdev)) { if (pcie_set_readrq(mdev->pdev, 4096)) { mthca_err(mdev, "Couldn't write PCI Express read request, " "aborting.\n"); return -ENODEV; } } else if (mdev->mthca_flags & MTHCA_FLAG_PCIE) mthca_info(mdev, "No PCI Express capability, " "not setting Max Read Request Size.\n"); return 0; } static int mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim *dev_lim) { int err; mdev->limits.mtt_seg_size = (1 << log_mtts_per_seg) * 8; err = mthca_QUERY_DEV_LIM(mdev, dev_lim); if (err) { mthca_err(mdev, "QUERY_DEV_LIM command returned %d" ", aborting.\n", err); return err; } if (dev_lim->min_page_sz > PAGE_SIZE) { mthca_err(mdev, "HCA minimum page size of %d bigger than " "kernel PAGE_SIZE of %ld, aborting.\n", dev_lim->min_page_sz, PAGE_SIZE); return -ENODEV; } if (dev_lim->num_ports > MTHCA_MAX_PORTS) { mthca_err(mdev, "HCA has %d ports, but we only support %d, " "aborting.\n", dev_lim->num_ports, MTHCA_MAX_PORTS); return -ENODEV; } if (dev_lim->uar_size > pci_resource_len(mdev->pdev, 2)) { mthca_err(mdev, "HCA reported UAR size of 0x%x bigger than " "PCI resource 2 size of 0x%llx, aborting.\n", dev_lim->uar_size, (unsigned long long)pci_resource_len(mdev->pdev, 2)); return -ENODEV; } mdev->limits.num_ports = dev_lim->num_ports; mdev->limits.vl_cap = dev_lim->max_vl; mdev->limits.mtu_cap = dev_lim->max_mtu; mdev->limits.gid_table_len = dev_lim->max_gids; mdev->limits.pkey_table_len = dev_lim->max_pkeys; mdev->limits.local_ca_ack_delay = dev_lim->local_ca_ack_delay; /* * Need to allow for worst case send WQE overhead and check * whether max_desc_sz imposes a lower limit than max_sg; UD * send has the biggest overhead. */ mdev->limits.max_sg = min_t(int, dev_lim->max_sg, (dev_lim->max_desc_sz - sizeof (struct mthca_next_seg) - (mthca_is_memfree(mdev) ? sizeof (struct mthca_arbel_ud_seg) : sizeof (struct mthca_tavor_ud_seg))) / sizeof (struct mthca_data_seg)); mdev->limits.max_wqes = dev_lim->max_qp_sz; mdev->limits.max_qp_init_rdma = dev_lim->max_requester_per_qp; mdev->limits.reserved_qps = dev_lim->reserved_qps; mdev->limits.max_srq_wqes = dev_lim->max_srq_sz; mdev->limits.reserved_srqs = dev_lim->reserved_srqs; mdev->limits.reserved_eecs = dev_lim->reserved_eecs; mdev->limits.max_desc_sz = dev_lim->max_desc_sz; mdev->limits.max_srq_sge = mthca_max_srq_sge(mdev); /* * Subtract 1 from the limit because we need to allocate a * spare CQE so the HCA HW can tell the difference between an * empty CQ and a full CQ. */ mdev->limits.max_cqes = dev_lim->max_cq_sz - 1; mdev->limits.reserved_cqs = dev_lim->reserved_cqs; mdev->limits.reserved_eqs = dev_lim->reserved_eqs; mdev->limits.reserved_mtts = dev_lim->reserved_mtts; mdev->limits.reserved_mrws = dev_lim->reserved_mrws; mdev->limits.reserved_uars = dev_lim->reserved_uars; mdev->limits.reserved_pds = dev_lim->reserved_pds; mdev->limits.port_width_cap = dev_lim->max_port_width; mdev->limits.page_size_cap = ~(u32) (dev_lim->min_page_sz - 1); mdev->limits.flags = dev_lim->flags; /* * For old FW that doesn't return static rate support, use a * value of 0x3 (only static rate values of 0 or 1 are handled), * except on Sinai, where even old FW can handle static rate * values of 2 and 3. */ if (dev_lim->stat_rate_support) mdev->limits.stat_rate_support = dev_lim->stat_rate_support; else if (mdev->mthca_flags & MTHCA_FLAG_SINAI_OPT) mdev->limits.stat_rate_support = 0xf; else mdev->limits.stat_rate_support = 0x3; /* IB_DEVICE_RESIZE_MAX_WR not supported by driver. May be doable since hardware supports it for SRQ. IB_DEVICE_N_NOTIFY_CQ is supported by hardware but not by driver. IB_DEVICE_SRQ_RESIZE is supported by hardware but SRQ is not supported by driver. */ mdev->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN; if (dev_lim->flags & DEV_LIM_FLAG_BAD_PKEY_CNTR) mdev->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; if (dev_lim->flags & DEV_LIM_FLAG_BAD_QKEY_CNTR) mdev->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; if (dev_lim->flags & DEV_LIM_FLAG_RAW_MULTI) mdev->device_cap_flags |= IB_DEVICE_RAW_MULTI; if (dev_lim->flags & DEV_LIM_FLAG_AUTO_PATH_MIG) mdev->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; if (dev_lim->flags & DEV_LIM_FLAG_UD_AV_PORT_ENFORCE) mdev->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE; if (dev_lim->flags & DEV_LIM_FLAG_SRQ) mdev->mthca_flags |= MTHCA_FLAG_SRQ; if (mthca_is_memfree(mdev)) if (dev_lim->flags & DEV_LIM_FLAG_IPOIB_CSUM) mdev->device_cap_flags |= IB_DEVICE_UD_IP_CSUM; return 0; } static int mthca_init_tavor(struct mthca_dev *mdev) { s64 size; int err; struct mthca_dev_lim dev_lim; struct mthca_profile profile; struct mthca_init_hca_param init_hca; err = mthca_SYS_EN(mdev); if (err) { mthca_err(mdev, "SYS_EN command returned %d, aborting.\n", err); return err; } err = mthca_QUERY_FW(mdev); if (err) { mthca_err(mdev, "QUERY_FW command returned %d," " aborting.\n", err); goto err_disable; } err = mthca_QUERY_DDR(mdev); if (err) { mthca_err(mdev, "QUERY_DDR command returned %d, aborting.\n", err); goto err_disable; } err = mthca_dev_lim(mdev, &dev_lim); if (err) { mthca_err(mdev, "QUERY_DEV_LIM command returned %d, aborting.\n", err); goto err_disable; } profile = hca_profile; profile.num_uar = dev_lim.uar_size / PAGE_SIZE; profile.uarc_size = 0; if (mdev->mthca_flags & MTHCA_FLAG_SRQ) profile.num_srq = dev_lim.max_srqs; size = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca); if (size < 0) { err = size; goto err_disable; } err = mthca_INIT_HCA(mdev, &init_hca); if (err) { mthca_err(mdev, "INIT_HCA command returned %d, aborting.\n", err); goto err_disable; } return 0; err_disable: mthca_SYS_DIS(mdev); return err; } static int mthca_load_fw(struct mthca_dev *mdev) { int err; /* FIXME: use HCA-attached memory for FW if present */ mdev->fw.arbel.fw_icm = mthca_alloc_icm(mdev, mdev->fw.arbel.fw_pages, GFP_HIGHUSER | __GFP_NOWARN, 0); if (!mdev->fw.arbel.fw_icm) { mthca_err(mdev, "Couldn't allocate FW area, aborting.\n"); return -ENOMEM; } err = mthca_MAP_FA(mdev, mdev->fw.arbel.fw_icm); if (err) { mthca_err(mdev, "MAP_FA command returned %d, aborting.\n", err); goto err_free; } err = mthca_RUN_FW(mdev); if (err) { mthca_err(mdev, "RUN_FW command returned %d, aborting.\n", err); goto err_unmap_fa; } return 0; err_unmap_fa: mthca_UNMAP_FA(mdev); err_free: mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0); return err; } static int mthca_init_icm(struct mthca_dev *mdev, struct mthca_dev_lim *dev_lim, struct mthca_init_hca_param *init_hca, u64 icm_size) { u64 aux_pages; int err; err = mthca_SET_ICM_SIZE(mdev, icm_size, &aux_pages); if (err) { mthca_err(mdev, "SET_ICM_SIZE command returned %d, aborting.\n", err); return err; } mthca_dbg(mdev, "%lld KB of HCA context requires %lld KB aux memory.\n", (unsigned long long) icm_size >> 10, (unsigned long long) aux_pages << 2); mdev->fw.arbel.aux_icm = mthca_alloc_icm(mdev, aux_pages, GFP_HIGHUSER | __GFP_NOWARN, 0); if (!mdev->fw.arbel.aux_icm) { mthca_err(mdev, "Couldn't allocate aux memory, aborting.\n"); return -ENOMEM; } err = mthca_MAP_ICM_AUX(mdev, mdev->fw.arbel.aux_icm); if (err) { mthca_err(mdev, "MAP_ICM_AUX returned %d, aborting.\n", err); goto err_free_aux; } err = mthca_map_eq_icm(mdev, init_hca->eqc_base); if (err) { mthca_err(mdev, "Failed to map EQ context memory, aborting.\n"); goto err_unmap_aux; } /* CPU writes to non-reserved MTTs, while HCA might DMA to reserved mtts */ mdev->limits.reserved_mtts = ALIGN(mdev->limits.reserved_mtts * mdev->limits.mtt_seg_size, dma_get_cache_alignment()) / mdev->limits.mtt_seg_size; mdev->mr_table.mtt_table = mthca_alloc_icm_table(mdev, init_hca->mtt_base, mdev->limits.mtt_seg_size, mdev->limits.num_mtt_segs, mdev->limits.reserved_mtts, 1, 0); if (!mdev->mr_table.mtt_table) { mthca_err(mdev, "Failed to map MTT context memory, aborting.\n"); err = -ENOMEM; goto err_unmap_eq; } mdev->mr_table.mpt_table = mthca_alloc_icm_table(mdev, init_hca->mpt_base, dev_lim->mpt_entry_sz, mdev->limits.num_mpts, mdev->limits.reserved_mrws, 1, 1); if (!mdev->mr_table.mpt_table) { mthca_err(mdev, "Failed to map MPT context memory, aborting.\n"); err = -ENOMEM; goto err_unmap_mtt; } mdev->qp_table.qp_table = mthca_alloc_icm_table(mdev, init_hca->qpc_base, dev_lim->qpc_entry_sz, mdev->limits.num_qps, mdev->limits.reserved_qps, 0, 0); if (!mdev->qp_table.qp_table) { mthca_err(mdev, "Failed to map QP context memory, aborting.\n"); err = -ENOMEM; goto err_unmap_mpt; } mdev->qp_table.eqp_table = mthca_alloc_icm_table(mdev, init_hca->eqpc_base, dev_lim->eqpc_entry_sz, mdev->limits.num_qps, mdev->limits.reserved_qps, 0, 0); if (!mdev->qp_table.eqp_table) { mthca_err(mdev, "Failed to map EQP context memory, aborting.\n"); err = -ENOMEM; goto err_unmap_qp; } mdev->qp_table.rdb_table = mthca_alloc_icm_table(mdev, init_hca->rdb_base, MTHCA_RDB_ENTRY_SIZE, mdev->limits.num_qps << mdev->qp_table.rdb_shift, 0, 0, 0); if (!mdev->qp_table.rdb_table) { mthca_err(mdev, "Failed to map RDB context memory, aborting\n"); err = -ENOMEM; goto err_unmap_eqp; } mdev->cq_table.table = mthca_alloc_icm_table(mdev, init_hca->cqc_base, dev_lim->cqc_entry_sz, mdev->limits.num_cqs, mdev->limits.reserved_cqs, 0, 0); if (!mdev->cq_table.table) { mthca_err(mdev, "Failed to map CQ context memory, aborting.\n"); err = -ENOMEM; goto err_unmap_rdb; } if (mdev->mthca_flags & MTHCA_FLAG_SRQ) { mdev->srq_table.table = mthca_alloc_icm_table(mdev, init_hca->srqc_base, dev_lim->srq_entry_sz, mdev->limits.num_srqs, mdev->limits.reserved_srqs, 0, 0); if (!mdev->srq_table.table) { mthca_err(mdev, "Failed to map SRQ context memory, " "aborting.\n"); err = -ENOMEM; goto err_unmap_cq; } } /* * It's not strictly required, but for simplicity just map the * whole multicast group table now. The table isn't very big * and it's a lot easier than trying to track ref counts. */ mdev->mcg_table.table = mthca_alloc_icm_table(mdev, init_hca->mc_base, MTHCA_MGM_ENTRY_SIZE, mdev->limits.num_mgms + mdev->limits.num_amgms, mdev->limits.num_mgms + mdev->limits.num_amgms, 0, 0); if (!mdev->mcg_table.table) { mthca_err(mdev, "Failed to map MCG context memory, aborting.\n"); err = -ENOMEM; goto err_unmap_srq; } return 0; err_unmap_srq: if (mdev->mthca_flags & MTHCA_FLAG_SRQ) mthca_free_icm_table(mdev, mdev->srq_table.table); err_unmap_cq: mthca_free_icm_table(mdev, mdev->cq_table.table); err_unmap_rdb: mthca_free_icm_table(mdev, mdev->qp_table.rdb_table); err_unmap_eqp: mthca_free_icm_table(mdev, mdev->qp_table.eqp_table); err_unmap_qp: mthca_free_icm_table(mdev, mdev->qp_table.qp_table); err_unmap_mpt: mthca_free_icm_table(mdev, mdev->mr_table.mpt_table); err_unmap_mtt: mthca_free_icm_table(mdev, mdev->mr_table.mtt_table); err_unmap_eq: mthca_unmap_eq_icm(mdev); err_unmap_aux: mthca_UNMAP_ICM_AUX(mdev); err_free_aux: mthca_free_icm(mdev, mdev->fw.arbel.aux_icm, 0); return err; } static void mthca_free_icms(struct mthca_dev *mdev) { mthca_free_icm_table(mdev, mdev->mcg_table.table); if (mdev->mthca_flags & MTHCA_FLAG_SRQ) mthca_free_icm_table(mdev, mdev->srq_table.table); mthca_free_icm_table(mdev, mdev->cq_table.table); mthca_free_icm_table(mdev, mdev->qp_table.rdb_table); mthca_free_icm_table(mdev, mdev->qp_table.eqp_table); mthca_free_icm_table(mdev, mdev->qp_table.qp_table); mthca_free_icm_table(mdev, mdev->mr_table.mpt_table); mthca_free_icm_table(mdev, mdev->mr_table.mtt_table); mthca_unmap_eq_icm(mdev); mthca_UNMAP_ICM_AUX(mdev); mthca_free_icm(mdev, mdev->fw.arbel.aux_icm, 0); } static int mthca_init_arbel(struct mthca_dev *mdev) { struct mthca_dev_lim dev_lim; struct mthca_profile profile; struct mthca_init_hca_param init_hca; s64 icm_size; int err; err = mthca_QUERY_FW(mdev); if (err) { mthca_err(mdev, "QUERY_FW command failed %d, aborting.\n", err); return err; } err = mthca_ENABLE_LAM(mdev); if (err == -EAGAIN) { mthca_dbg(mdev, "No HCA-attached memory (running in MemFree mode)\n"); mdev->mthca_flags |= MTHCA_FLAG_NO_LAM; } else if (err) { mthca_err(mdev, "ENABLE_LAM returned %d, aborting.\n", err); return err; } err = mthca_load_fw(mdev); if (err) { mthca_err(mdev, "Loading FW returned %d, aborting.\n", err); goto err_disable; } err = mthca_dev_lim(mdev, &dev_lim); if (err) { mthca_err(mdev, "QUERY_DEV_LIM returned %d, aborting.\n", err); goto err_stop_fw; } profile = hca_profile; profile.num_uar = dev_lim.uar_size / PAGE_SIZE; profile.num_udav = 0; if (mdev->mthca_flags & MTHCA_FLAG_SRQ) profile.num_srq = dev_lim.max_srqs; icm_size = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca); if (icm_size < 0) { err = icm_size; goto err_stop_fw; } err = mthca_init_icm(mdev, &dev_lim, &init_hca, icm_size); if (err) goto err_stop_fw; err = mthca_INIT_HCA(mdev, &init_hca); if (err) { mthca_err(mdev, "INIT_HCA command returned %d, aborting.\n", err); goto err_free_icm; } return 0; err_free_icm: mthca_free_icms(mdev); err_stop_fw: mthca_UNMAP_FA(mdev); mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0); err_disable: if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM)) mthca_DISABLE_LAM(mdev); return err; } static void mthca_close_hca(struct mthca_dev *mdev) { mthca_CLOSE_HCA(mdev, 0); if (mthca_is_memfree(mdev)) { mthca_free_icms(mdev); mthca_UNMAP_FA(mdev); mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0); if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM)) mthca_DISABLE_LAM(mdev); } else mthca_SYS_DIS(mdev); } static int mthca_init_hca(struct mthca_dev *mdev) { int err; struct mthca_adapter adapter; if (mthca_is_memfree(mdev)) err = mthca_init_arbel(mdev); else err = mthca_init_tavor(mdev); if (err) return err; err = mthca_QUERY_ADAPTER(mdev, &adapter); if (err) { mthca_err(mdev, "QUERY_ADAPTER command returned %d, aborting.\n", err); goto err_close; } mdev->eq_table.inta_pin = adapter.inta_pin; if (!mthca_is_memfree(mdev)) mdev->rev_id = adapter.revision_id; memcpy(mdev->board_id, adapter.board_id, sizeof mdev->board_id); return 0; err_close: mthca_close_hca(mdev); return err; } static int mthca_setup_hca(struct mthca_dev *dev) { int err; MTHCA_INIT_DOORBELL_LOCK(&dev->doorbell_lock); err = mthca_init_uar_table(dev); if (err) { mthca_err(dev, "Failed to initialize " "user access region table, aborting.\n"); return err; } err = mthca_uar_alloc(dev, &dev->driver_uar); if (err) { mthca_err(dev, "Failed to allocate driver access region, " "aborting.\n"); goto err_uar_table_free; } dev->kar = ioremap((phys_addr_t) dev->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); if (!dev->kar) { mthca_err(dev, "Couldn't map kernel access region, " "aborting.\n"); err = -ENOMEM; goto err_uar_free; } err = mthca_init_pd_table(dev); if (err) { mthca_err(dev, "Failed to initialize " "protection domain table, aborting.\n"); goto err_kar_unmap; } err = mthca_init_mr_table(dev); if (err) { mthca_err(dev, "Failed to initialize " "memory region table, aborting.\n"); goto err_pd_table_free; } err = mthca_pd_alloc(dev, 1, &dev->driver_pd); if (err) { mthca_err(dev, "Failed to create driver PD, " "aborting.\n"); goto err_mr_table_free; } err = mthca_init_eq_table(dev); if (err) { mthca_err(dev, "Failed to initialize " "event queue table, aborting.\n"); goto err_pd_free; } err = mthca_cmd_use_events(dev); if (err) { mthca_err(dev, "Failed to switch to event-driven " "firmware commands, aborting.\n"); goto err_eq_table_free; } err = mthca_NOP(dev); if (err) { if (dev->mthca_flags & MTHCA_FLAG_MSI_X) { mthca_warn(dev, "NOP command failed to generate interrupt " "(IRQ %d).\n", dev->eq_table.eq[MTHCA_EQ_CMD].msi_x_vector); mthca_warn(dev, "Trying again with MSI-X disabled.\n"); } else { mthca_err(dev, "NOP command failed to generate interrupt " "(IRQ %d), aborting.\n", dev->pdev->irq); mthca_err(dev, "BIOS or ACPI interrupt routing problem?\n"); } goto err_cmd_poll; } mthca_dbg(dev, "NOP command IRQ test passed\n"); err = mthca_init_cq_table(dev); if (err) { mthca_err(dev, "Failed to initialize " "completion queue table, aborting.\n"); goto err_cmd_poll; } err = mthca_init_srq_table(dev); if (err) { mthca_err(dev, "Failed to initialize " "shared receive queue table, aborting.\n"); goto err_cq_table_free; } err = mthca_init_qp_table(dev); if (err) { mthca_err(dev, "Failed to initialize " "queue pair table, aborting.\n"); goto err_srq_table_free; } err = mthca_init_av_table(dev); if (err) { mthca_err(dev, "Failed to initialize " "address vector table, aborting.\n"); goto err_qp_table_free; } err = mthca_init_mcg_table(dev); if (err) { mthca_err(dev, "Failed to initialize " "multicast group table, aborting.\n"); goto err_av_table_free; } return 0; err_av_table_free: mthca_cleanup_av_table(dev); err_qp_table_free: mthca_cleanup_qp_table(dev); err_srq_table_free: mthca_cleanup_srq_table(dev); err_cq_table_free: mthca_cleanup_cq_table(dev); err_cmd_poll: mthca_cmd_use_polling(dev); err_eq_table_free: mthca_cleanup_eq_table(dev); err_pd_free: mthca_pd_free(dev, &dev->driver_pd); err_mr_table_free: mthca_cleanup_mr_table(dev); err_pd_table_free: mthca_cleanup_pd_table(dev); err_kar_unmap: iounmap(dev->kar); err_uar_free: mthca_uar_free(dev, &dev->driver_uar); err_uar_table_free: mthca_cleanup_uar_table(dev); return err; } static int mthca_enable_msi_x(struct mthca_dev *mdev) { struct msix_entry entries[3]; int err; entries[0].entry = 0; entries[1].entry = 1; entries[2].entry = 2; err = pci_enable_msix(mdev->pdev, entries, ARRAY_SIZE(entries)); if (err) { if (err > 0) mthca_info(mdev, "Only %d MSI-X vectors available, " "not using MSI-X\n", err); return err; } mdev->eq_table.eq[MTHCA_EQ_COMP ].msi_x_vector = entries[0].vector; mdev->eq_table.eq[MTHCA_EQ_ASYNC].msi_x_vector = entries[1].vector; mdev->eq_table.eq[MTHCA_EQ_CMD ].msi_x_vector = entries[2].vector; return 0; } /* Types of supported HCA */ enum { TAVOR, /* MT23108 */ ARBEL_COMPAT, /* MT25208 in Tavor compat mode */ ARBEL_NATIVE, /* MT25208 with extended features */ SINAI /* MT25204 */ }; #define MTHCA_FW_VER(major, minor, subminor) \ (((u64) (major) << 32) | ((u64) (minor) << 16) | (u64) (subminor)) static struct { u64 latest_fw; u32 flags; } mthca_hca_table[] = { [TAVOR] = { .latest_fw = MTHCA_FW_VER(3, 5, 0), .flags = 0 }, [ARBEL_COMPAT] = { .latest_fw = MTHCA_FW_VER(4, 8, 200), .flags = MTHCA_FLAG_PCIE }, [ARBEL_NATIVE] = { .latest_fw = MTHCA_FW_VER(5, 3, 0), .flags = MTHCA_FLAG_MEMFREE | MTHCA_FLAG_PCIE }, [SINAI] = { .latest_fw = MTHCA_FW_VER(1, 2, 0), .flags = MTHCA_FLAG_MEMFREE | MTHCA_FLAG_PCIE | MTHCA_FLAG_SINAI_OPT } }; static int __mthca_init_one(struct pci_dev *pdev, int hca_type) { int ddr_hidden = 0; int err; struct mthca_dev *mdev; printk(KERN_INFO PFX "Initializing %s\n", pci_name(pdev)); err = pci_enable_device(pdev); if (err) { dev_err(&pdev->dev, "Cannot enable PCI device, " "aborting.\n"); return err; } /* * Check for BARs. We expect 0: 1MB, 2: 8MB, 4: DDR (may not * be present) */ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) || pci_resource_len(pdev, 0) != 1 << 20) { dev_err(&pdev->dev, "Missing DCS, aborting.\n"); err = -ENODEV; goto err_disable_pdev; } if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { dev_err(&pdev->dev, "Missing UAR, aborting.\n"); err = -ENODEV; goto err_disable_pdev; } if (!(pci_resource_flags(pdev, 4) & IORESOURCE_MEM)) ddr_hidden = 1; err = pci_request_regions(pdev, DRV_NAME); if (err) { dev_err(&pdev->dev, "Cannot obtain PCI resources, " "aborting.\n"); goto err_disable_pdev; } pci_set_master(pdev); err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); if (err) { dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n"); err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n"); goto err_free_res; } } err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); if (err) { dev_warn(&pdev->dev, "Warning: couldn't set 64-bit " "consistent PCI DMA mask.\n"); err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, " "aborting.\n"); goto err_free_res; } } /* We can handle large RDMA requests, so allow larger segments. */ dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024); mdev = (struct mthca_dev *) ib_alloc_device(sizeof *mdev); if (!mdev) { dev_err(&pdev->dev, "Device struct alloc failed, " "aborting.\n"); err = -ENOMEM; goto err_free_res; } mdev->pdev = pdev; mdev->mthca_flags = mthca_hca_table[hca_type].flags; if (ddr_hidden) mdev->mthca_flags |= MTHCA_FLAG_DDR_HIDDEN; /* * Now reset the HCA before we touch the PCI capabilities or * attempt a firmware command, since a boot ROM may have left * the HCA in an undefined state. */ err = mthca_reset(mdev); if (err) { mthca_err(mdev, "Failed to reset HCA, aborting.\n"); goto err_free_dev; } if (mthca_cmd_init(mdev)) { mthca_err(mdev, "Failed to init command interface, aborting.\n"); goto err_free_dev; } err = mthca_tune_pci(mdev); if (err) goto err_cmd; err = mthca_init_hca(mdev); if (err) goto err_cmd; if (mdev->fw_ver < mthca_hca_table[hca_type].latest_fw) { mthca_warn(mdev, "HCA FW version %d.%d.%03d is old (%d.%d.%03d is current).\n", (int) (mdev->fw_ver >> 32), (int) (mdev->fw_ver >> 16) & 0xffff, (int) (mdev->fw_ver & 0xffff), (int) (mthca_hca_table[hca_type].latest_fw >> 32), (int) (mthca_hca_table[hca_type].latest_fw >> 16) & 0xffff, (int) (mthca_hca_table[hca_type].latest_fw & 0xffff)); mthca_warn(mdev, "If you have problems, try updating your HCA FW.\n"); } if (msi_x && !mthca_enable_msi_x(mdev)) mdev->mthca_flags |= MTHCA_FLAG_MSI_X; err = mthca_setup_hca(mdev); if (err == -EBUSY && (mdev->mthca_flags & MTHCA_FLAG_MSI_X)) { if (mdev->mthca_flags & MTHCA_FLAG_MSI_X) pci_disable_msix(pdev); mdev->mthca_flags &= ~MTHCA_FLAG_MSI_X; err = mthca_setup_hca(mdev); } if (err) goto err_close; err = mthca_register_device(mdev); if (err) goto err_cleanup; err = mthca_create_agents(mdev); if (err) goto err_unregister; pci_set_drvdata(pdev, mdev); mdev->hca_type = hca_type; mdev->active = true; return 0; err_unregister: mthca_unregister_device(mdev); err_cleanup: mthca_cleanup_mcg_table(mdev); mthca_cleanup_av_table(mdev); mthca_cleanup_qp_table(mdev); mthca_cleanup_srq_table(mdev); mthca_cleanup_cq_table(mdev); mthca_cmd_use_polling(mdev); mthca_cleanup_eq_table(mdev); mthca_pd_free(mdev, &mdev->driver_pd); mthca_cleanup_mr_table(mdev); mthca_cleanup_pd_table(mdev); mthca_cleanup_uar_table(mdev); err_close: if (mdev->mthca_flags & MTHCA_FLAG_MSI_X) pci_disable_msix(pdev); mthca_close_hca(mdev); err_cmd: mthca_cmd_cleanup(mdev); err_free_dev: ib_dealloc_device(&mdev->ib_dev); err_free_res: pci_release_regions(pdev); err_disable_pdev: pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); return err; } static void __mthca_remove_one(struct pci_dev *pdev) { struct mthca_dev *mdev = pci_get_drvdata(pdev); int p; if (mdev) { mthca_free_agents(mdev); mthca_unregister_device(mdev); for (p = 1; p <= mdev->limits.num_ports; ++p) mthca_CLOSE_IB(mdev, p); mthca_cleanup_mcg_table(mdev); mthca_cleanup_av_table(mdev); mthca_cleanup_qp_table(mdev); mthca_cleanup_srq_table(mdev); mthca_cleanup_cq_table(mdev); mthca_cmd_use_polling(mdev); mthca_cleanup_eq_table(mdev); mthca_pd_free(mdev, &mdev->driver_pd); mthca_cleanup_mr_table(mdev); mthca_cleanup_pd_table(mdev); iounmap(mdev->kar); mthca_uar_free(mdev, &mdev->driver_uar); mthca_cleanup_uar_table(mdev); mthca_close_hca(mdev); mthca_cmd_cleanup(mdev); if (mdev->mthca_flags & MTHCA_FLAG_MSI_X) pci_disable_msix(pdev); ib_dealloc_device(&mdev->ib_dev); pci_release_regions(pdev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); } } int __mthca_restart_one(struct pci_dev *pdev) { struct mthca_dev *mdev; int hca_type; mdev = pci_get_drvdata(pdev); if (!mdev) return -ENODEV; hca_type = mdev->hca_type; __mthca_remove_one(pdev); return __mthca_init_one(pdev, hca_type); } static int __devinit mthca_init_one(struct pci_dev *pdev, const struct pci_device_id *id) { int ret; mutex_lock(&mthca_device_mutex); printk_once(KERN_INFO "%s", mthca_version); if (id->driver_data >= ARRAY_SIZE(mthca_hca_table)) { printk(KERN_ERR PFX "%s has invalid driver data %lx\n", pci_name(pdev), id->driver_data); mutex_unlock(&mthca_device_mutex); return -ENODEV; } ret = __mthca_init_one(pdev, id->driver_data); mutex_unlock(&mthca_device_mutex); return ret; } static void __devexit mthca_remove_one(struct pci_dev *pdev) { mutex_lock(&mthca_device_mutex); __mthca_remove_one(pdev); mutex_unlock(&mthca_device_mutex); } static struct pci_device_id mthca_pci_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR), .driver_data = TAVOR }, { PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_TAVOR), .driver_data = TAVOR }, { PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT), .driver_data = ARBEL_COMPAT }, { PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT), .driver_data = ARBEL_COMPAT }, { PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_ARBEL), .driver_data = ARBEL_NATIVE }, { PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_ARBEL), .driver_data = ARBEL_NATIVE }, { PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_SINAI), .driver_data = SINAI }, { PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_SINAI), .driver_data = SINAI }, { PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_SINAI_OLD), .driver_data = SINAI }, { PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_SINAI_OLD), .driver_data = SINAI }, { 0, } }; MODULE_DEVICE_TABLE(pci, mthca_pci_table); static struct pci_driver mthca_driver = { .name = DRV_NAME, .id_table = mthca_pci_table, .probe = mthca_init_one, .remove = __devexit_p(mthca_remove_one) }; static void __init __mthca_check_profile_val(const char *name, int *pval, int pval_default) { /* value must be positive and power of 2 */ int old_pval = *pval; if (old_pval <= 0) *pval = pval_default; else *pval = roundup_pow_of_two(old_pval); if (old_pval != *pval) { printk(KERN_WARNING PFX "Invalid value %d for %s in module parameter.\n", old_pval, name); printk(KERN_WARNING PFX "Corrected %s to %d.\n", name, *pval); } } #define mthca_check_profile_val(name, default) \ __mthca_check_profile_val(#name, &hca_profile.name, default) static void __init mthca_validate_profile(void) { mthca_check_profile_val(num_qp, MTHCA_DEFAULT_NUM_QP); mthca_check_profile_val(rdb_per_qp, MTHCA_DEFAULT_RDB_PER_QP); mthca_check_profile_val(num_cq, MTHCA_DEFAULT_NUM_CQ); mthca_check_profile_val(num_mcg, MTHCA_DEFAULT_NUM_MCG); mthca_check_profile_val(num_mpt, MTHCA_DEFAULT_NUM_MPT); mthca_check_profile_val(num_mtt, MTHCA_DEFAULT_NUM_MTT); mthca_check_profile_val(num_udav, MTHCA_DEFAULT_NUM_UDAV); mthca_check_profile_val(fmr_reserved_mtts, MTHCA_DEFAULT_NUM_RESERVED_MTTS); if (hca_profile.fmr_reserved_mtts >= hca_profile.num_mtt) { printk(KERN_WARNING PFX "Invalid fmr_reserved_mtts module parameter %d.\n", hca_profile.fmr_reserved_mtts); printk(KERN_WARNING PFX "(Must be smaller than num_mtt %d)\n", hca_profile.num_mtt); hca_profile.fmr_reserved_mtts = hca_profile.num_mtt / 2; printk(KERN_WARNING PFX "Corrected fmr_reserved_mtts to %d.\n", hca_profile.fmr_reserved_mtts); } if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 5)) { printk(KERN_WARNING PFX "bad log_mtts_per_seg (%d). Using default - %d\n", log_mtts_per_seg, ilog2(MTHCA_MTT_SEG_SIZE / 8)); log_mtts_per_seg = ilog2(MTHCA_MTT_SEG_SIZE / 8); } } static int __init mthca_init(void) { int ret; mthca_validate_profile(); ret = mthca_catas_init(); if (ret) return ret; ret = pci_register_driver(&mthca_driver); if (ret < 0) { mthca_catas_cleanup(); return ret; } return 0; } static void __exit mthca_cleanup(void) { pci_unregister_driver(&mthca_driver); mthca_catas_cleanup(); } module_init(mthca_init); module_exit(mthca_cleanup);
gpl-2.0
upworkstar/AndroidAmazon
arch/powerpc/sysdev/uic.c
7690
8394
/* * arch/powerpc/sysdev/uic.c * * IBM PowerPC 4xx Universal Interrupt Controller * * Copyright 2007 David Gibson <dwg@au1.ibm.com>, IBM Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/reboot.h> #include <linux/slab.h> #include <linux/stddef.h> #include <linux/sched.h> #include <linux/signal.h> #include <linux/device.h> #include <linux/bootmem.h> #include <linux/spinlock.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/kernel_stat.h> #include <asm/irq.h> #include <asm/io.h> #include <asm/prom.h> #include <asm/dcr.h> #define NR_UIC_INTS 32 #define UIC_SR 0x0 #define UIC_ER 0x2 #define UIC_CR 0x3 #define UIC_PR 0x4 #define UIC_TR 0x5 #define UIC_MSR 0x6 #define UIC_VR 0x7 #define UIC_VCR 0x8 struct uic *primary_uic; struct uic { int index; int dcrbase; raw_spinlock_t lock; /* The remapper for this UIC */ struct irq_domain *irqhost; }; static void uic_unmask_irq(struct irq_data *d) { struct uic *uic = irq_data_get_irq_chip_data(d); unsigned int src = irqd_to_hwirq(d); unsigned long flags; u32 er, sr; sr = 1 << (31-src); raw_spin_lock_irqsave(&uic->lock, flags); /* ack level-triggered interrupts here */ if (irqd_is_level_type(d)) mtdcr(uic->dcrbase + UIC_SR, sr); er = mfdcr(uic->dcrbase + UIC_ER); er |= sr; mtdcr(uic->dcrbase + UIC_ER, er); raw_spin_unlock_irqrestore(&uic->lock, flags); } static void uic_mask_irq(struct irq_data *d) { struct uic *uic = irq_data_get_irq_chip_data(d); unsigned int src = irqd_to_hwirq(d); unsigned long flags; u32 er; raw_spin_lock_irqsave(&uic->lock, flags); er = mfdcr(uic->dcrbase + UIC_ER); er &= ~(1 << (31 - src)); mtdcr(uic->dcrbase + UIC_ER, er); raw_spin_unlock_irqrestore(&uic->lock, flags); } static void uic_ack_irq(struct irq_data *d) { struct uic *uic = irq_data_get_irq_chip_data(d); unsigned int src = irqd_to_hwirq(d); unsigned long flags; raw_spin_lock_irqsave(&uic->lock, flags); mtdcr(uic->dcrbase + UIC_SR, 1 << (31-src)); raw_spin_unlock_irqrestore(&uic->lock, flags); } static void uic_mask_ack_irq(struct irq_data *d) { struct uic *uic = irq_data_get_irq_chip_data(d); unsigned int src = irqd_to_hwirq(d); unsigned long flags; u32 er, sr; sr = 1 << (31-src); raw_spin_lock_irqsave(&uic->lock, flags); er = mfdcr(uic->dcrbase + UIC_ER); er &= ~sr; mtdcr(uic->dcrbase + UIC_ER, er); /* On the UIC, acking (i.e. clearing the SR bit) * a level irq will have no effect if the interrupt * is still asserted by the device, even if * the interrupt is already masked. Therefore * we only ack the egde interrupts here, while * level interrupts are ack'ed after the actual * isr call in the uic_unmask_irq() */ if (!irqd_is_level_type(d)) mtdcr(uic->dcrbase + UIC_SR, sr); raw_spin_unlock_irqrestore(&uic->lock, flags); } static int uic_set_irq_type(struct irq_data *d, unsigned int flow_type) { struct uic *uic = irq_data_get_irq_chip_data(d); unsigned int src = irqd_to_hwirq(d); unsigned long flags; int trigger, polarity; u32 tr, pr, mask; switch (flow_type & IRQ_TYPE_SENSE_MASK) { case IRQ_TYPE_NONE: uic_mask_irq(d); return 0; case IRQ_TYPE_EDGE_RISING: trigger = 1; polarity = 1; break; case IRQ_TYPE_EDGE_FALLING: trigger = 1; polarity = 0; break; case IRQ_TYPE_LEVEL_HIGH: trigger = 0; polarity = 1; break; case IRQ_TYPE_LEVEL_LOW: trigger = 0; polarity = 0; break; default: return -EINVAL; } mask = ~(1 << (31 - src)); raw_spin_lock_irqsave(&uic->lock, flags); tr = mfdcr(uic->dcrbase + UIC_TR); pr = mfdcr(uic->dcrbase + UIC_PR); tr = (tr & mask) | (trigger << (31-src)); pr = (pr & mask) | (polarity << (31-src)); mtdcr(uic->dcrbase + UIC_PR, pr); mtdcr(uic->dcrbase + UIC_TR, tr); raw_spin_unlock_irqrestore(&uic->lock, flags); return 0; } static struct irq_chip uic_irq_chip = { .name = "UIC", .irq_unmask = uic_unmask_irq, .irq_mask = uic_mask_irq, .irq_mask_ack = uic_mask_ack_irq, .irq_ack = uic_ack_irq, .irq_set_type = uic_set_irq_type, }; static int uic_host_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { struct uic *uic = h->host_data; irq_set_chip_data(virq, uic); /* Despite the name, handle_level_irq() works for both level * and edge irqs on UIC. FIXME: check this is correct */ irq_set_chip_and_handler(virq, &uic_irq_chip, handle_level_irq); /* Set default irq type */ irq_set_irq_type(virq, IRQ_TYPE_NONE); return 0; } static struct irq_domain_ops uic_host_ops = { .map = uic_host_map, .xlate = irq_domain_xlate_twocell, }; void uic_irq_cascade(unsigned int virq, struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct irq_data *idata = irq_desc_get_irq_data(desc); struct uic *uic = irq_get_handler_data(virq); u32 msr; int src; int subvirq; raw_spin_lock(&desc->lock); if (irqd_is_level_type(idata)) chip->irq_mask(idata); else chip->irq_mask_ack(idata); raw_spin_unlock(&desc->lock); msr = mfdcr(uic->dcrbase + UIC_MSR); if (!msr) /* spurious interrupt */ goto uic_irq_ret; src = 32 - ffs(msr); subvirq = irq_linear_revmap(uic->irqhost, src); generic_handle_irq(subvirq); uic_irq_ret: raw_spin_lock(&desc->lock); if (irqd_is_level_type(idata)) chip->irq_ack(idata); if (!irqd_irq_disabled(idata) && chip->irq_unmask) chip->irq_unmask(idata); raw_spin_unlock(&desc->lock); } static struct uic * __init uic_init_one(struct device_node *node) { struct uic *uic; const u32 *indexp, *dcrreg; int len; BUG_ON(! of_device_is_compatible(node, "ibm,uic")); uic = kzalloc(sizeof(*uic), GFP_KERNEL); if (! uic) return NULL; /* FIXME: panic? */ raw_spin_lock_init(&uic->lock); indexp = of_get_property(node, "cell-index", &len); if (!indexp || (len != sizeof(u32))) { printk(KERN_ERR "uic: Device node %s has missing or invalid " "cell-index property\n", node->full_name); return NULL; } uic->index = *indexp; dcrreg = of_get_property(node, "dcr-reg", &len); if (!dcrreg || (len != 2*sizeof(u32))) { printk(KERN_ERR "uic: Device node %s has missing or invalid " "dcr-reg property\n", node->full_name); return NULL; } uic->dcrbase = *dcrreg; uic->irqhost = irq_domain_add_linear(node, NR_UIC_INTS, &uic_host_ops, uic); if (! uic->irqhost) return NULL; /* FIXME: panic? */ /* Start with all interrupts disabled, level and non-critical */ mtdcr(uic->dcrbase + UIC_ER, 0); mtdcr(uic->dcrbase + UIC_CR, 0); mtdcr(uic->dcrbase + UIC_TR, 0); /* Clear any pending interrupts, in case the firmware left some */ mtdcr(uic->dcrbase + UIC_SR, 0xffffffff); printk ("UIC%d (%d IRQ sources) at DCR 0x%x\n", uic->index, NR_UIC_INTS, uic->dcrbase); return uic; } void __init uic_init_tree(void) { struct device_node *np; struct uic *uic; const u32 *interrupts; /* First locate and initialize the top-level UIC */ for_each_compatible_node(np, NULL, "ibm,uic") { interrupts = of_get_property(np, "interrupts", NULL); if (!interrupts) break; } BUG_ON(!np); /* uic_init_tree() assumes there's a UIC as the * top-level interrupt controller */ primary_uic = uic_init_one(np); if (!primary_uic) panic("Unable to initialize primary UIC %s\n", np->full_name); irq_set_default_host(primary_uic->irqhost); of_node_put(np); /* The scan again for cascaded UICs */ for_each_compatible_node(np, NULL, "ibm,uic") { interrupts = of_get_property(np, "interrupts", NULL); if (interrupts) { /* Secondary UIC */ int cascade_virq; uic = uic_init_one(np); if (! uic) panic("Unable to initialize a secondary UIC %s\n", np->full_name); cascade_virq = irq_of_parse_and_map(np, 0); irq_set_handler_data(cascade_virq, uic); irq_set_chained_handler(cascade_virq, uic_irq_cascade); /* FIXME: setup critical cascade?? */ } } } /* Return an interrupt vector or NO_IRQ if no interrupt is pending. */ unsigned int uic_get_irq(void) { u32 msr; int src; BUG_ON(! primary_uic); msr = mfdcr(primary_uic->dcrbase + UIC_MSR); src = 32 - ffs(msr); return irq_linear_revmap(primary_uic->irqhost, src); }
gpl-2.0
GeeteshKhatavkar/gh0st_kernel_samsung_royxx
arch/powerpc/platforms/cell/qpace_setup.c
7690
3494
/* * linux/arch/powerpc/platforms/cell/qpace_setup.c * * Copyright (C) 1995 Linus Torvalds * Adapted from 'alpha' version by Gary Thomas * Modified by Cort Dougan (cort@cs.nmt.edu) * Modified by PPC64 Team, IBM Corp * Modified by Cell Team, IBM Deutschland Entwicklung GmbH * Modified by Benjamin Krill <ben@codiert.org>, IBM Corp. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/sched.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/export.h> #include <linux/delay.h> #include <linux/irq.h> #include <linux/console.h> #include <linux/of_platform.h> #include <asm/mmu.h> #include <asm/processor.h> #include <asm/io.h> #include <asm/kexec.h> #include <asm/pgtable.h> #include <asm/prom.h> #include <asm/rtas.h> #include <asm/dma.h> #include <asm/machdep.h> #include <asm/time.h> #include <asm/cputable.h> #include <asm/irq.h> #include <asm/spu.h> #include <asm/spu_priv1.h> #include <asm/udbg.h> #include <asm/cell-regs.h> #include "interrupt.h" #include "pervasive.h" #include "ras.h" static void qpace_show_cpuinfo(struct seq_file *m) { struct device_node *root; const char *model = ""; root = of_find_node_by_path("/"); if (root) model = of_get_property(root, "model", NULL); seq_printf(m, "machine\t\t: CHRP %s\n", model); of_node_put(root); } static void qpace_progress(char *s, unsigned short hex) { printk("*** %04x : %s\n", hex, s ? s : ""); } static const struct of_device_id qpace_bus_ids[] __initconst = { { .type = "soc", }, { .compatible = "soc", }, { .type = "spider", }, { .type = "axon", }, { .type = "plb5", }, { .type = "plb4", }, { .type = "opb", }, { .type = "ebc", }, {}, }; static int __init qpace_publish_devices(void) { int node; /* Publish OF platform devices for southbridge IOs */ of_platform_bus_probe(NULL, qpace_bus_ids, NULL); /* There is no device for the MIC memory controller, thus we create * a platform device for it to attach the EDAC driver to. */ for_each_online_node(node) { if (cbe_get_cpu_mic_tm_regs(cbe_node_to_cpu(node)) == NULL) continue; platform_device_register_simple("cbe-mic", node, NULL, 0); } return 0; } machine_subsys_initcall(qpace, qpace_publish_devices); static void __init qpace_setup_arch(void) { #ifdef CONFIG_SPU_BASE spu_priv1_ops = &spu_priv1_mmio_ops; spu_management_ops = &spu_management_of_ops; #endif cbe_regs_init(); #ifdef CONFIG_CBE_RAS cbe_ras_init(); #endif #ifdef CONFIG_SMP smp_init_cell(); #endif /* init to some ~sane value until calibrate_delay() runs */ loops_per_jiffy = 50000000; cbe_pervasive_init(); #ifdef CONFIG_DUMMY_CONSOLE conswitchp = &dummy_con; #endif } static int __init qpace_probe(void) { unsigned long root = of_get_flat_dt_root(); if (!of_flat_dt_is_compatible(root, "IBM,QPACE")) return 0; hpte_init_native(); return 1; } define_machine(qpace) { .name = "QPACE", .probe = qpace_probe, .setup_arch = qpace_setup_arch, .show_cpuinfo = qpace_show_cpuinfo, .restart = rtas_restart, .power_off = rtas_power_off, .halt = rtas_halt, .get_boot_time = rtas_get_boot_time, .get_rtc_time = rtas_get_rtc_time, .set_rtc_time = rtas_set_rtc_time, .calibrate_decr = generic_calibrate_decr, .progress = qpace_progress, .init_IRQ = iic_init_IRQ, };
gpl-2.0
VanirAOSP/kernel_motorola_msm8226
drivers/net/phy/ste10Xp.c
8202
3594
/* * drivers/net/phy/ste10Xp.c * * Driver for STMicroelectronics STe10Xp PHYs * * Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> * * Copyright (c) 2008 STMicroelectronics Limited * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/module.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/moduleparam.h> #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/ethtool.h> #include <linux/mii.h> #include <linux/phy.h> #define MII_XCIIS 0x11 /* Configuration Info IRQ & Status Reg */ #define MII_XIE 0x12 /* Interrupt Enable Register */ #define MII_XIE_DEFAULT_MASK 0x0070 /* ANE complete, Remote Fault, Link Down */ #define STE101P_PHY_ID 0x00061c50 #define STE100P_PHY_ID 0x1c040011 static int ste10Xp_config_init(struct phy_device *phydev) { int value, err; /* Software Reset PHY */ value = phy_read(phydev, MII_BMCR); if (value < 0) return value; value |= BMCR_RESET; err = phy_write(phydev, MII_BMCR, value); if (err < 0) return err; do { value = phy_read(phydev, MII_BMCR); } while (value & BMCR_RESET); return 0; } static int ste10Xp_config_intr(struct phy_device *phydev) { int err, value; if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { /* Enable all STe101P interrupts (PR12) */ err = phy_write(phydev, MII_XIE, MII_XIE_DEFAULT_MASK); /* clear any pending interrupts */ if (err == 0) { value = phy_read(phydev, MII_XCIIS); if (value < 0) err = value; } } else err = phy_write(phydev, MII_XIE, 0); return err; } static int ste10Xp_ack_interrupt(struct phy_device *phydev) { int err = phy_read(phydev, MII_XCIIS); if (err < 0) return err; return 0; } static struct phy_driver ste101p_pdriver = { .phy_id = STE101P_PHY_ID, .phy_id_mask = 0xfffffff0, .name = "STe101p", .features = PHY_BASIC_FEATURES | SUPPORTED_Pause, .flags = PHY_HAS_INTERRUPT, .config_init = ste10Xp_config_init, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, .ack_interrupt = ste10Xp_ack_interrupt, .config_intr = ste10Xp_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, .driver = {.owner = THIS_MODULE,} }; static struct phy_driver ste100p_pdriver = { .phy_id = STE100P_PHY_ID, .phy_id_mask = 0xffffffff, .name = "STe100p", .features = PHY_BASIC_FEATURES | SUPPORTED_Pause, .flags = PHY_HAS_INTERRUPT, .config_init = ste10Xp_config_init, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, .ack_interrupt = ste10Xp_ack_interrupt, .config_intr = ste10Xp_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, .driver = {.owner = THIS_MODULE,} }; static int __init ste10Xp_init(void) { int retval; retval = phy_driver_register(&ste100p_pdriver); if (retval < 0) return retval; return phy_driver_register(&ste101p_pdriver); } static void __exit ste10Xp_exit(void) { phy_driver_unregister(&ste100p_pdriver); phy_driver_unregister(&ste101p_pdriver); } module_init(ste10Xp_init); module_exit(ste10Xp_exit); static struct mdio_device_id __maybe_unused ste10Xp_tbl[] = { { STE101P_PHY_ID, 0xfffffff0 }, { STE100P_PHY_ID, 0xffffffff }, { } }; MODULE_DEVICE_TABLE(mdio, ste10Xp_tbl); MODULE_DESCRIPTION("STMicroelectronics STe10Xp PHY driver"); MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); MODULE_LICENSE("GPL");
gpl-2.0
atilag/flatfish-kernel
arch/parisc/math-emu/dfsqrt.c
14090
5530
/* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * Floating-point emulation code * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * BEGIN_DESC * * File: * @(#) pa/spmath/dfsqrt.c $Revision: 1.1 $ * * Purpose: * Double Floating-point Square Root * * External Interfaces: * dbl_fsqrt(srcptr,nullptr,dstptr,status) * * Internal Interfaces: * * Theory: * <<please update with a overview of the operation of this file>> * * END_DESC */ #include "float.h" #include "dbl_float.h" /* * Double Floating-point Square Root */ /*ARGSUSED*/ unsigned int dbl_fsqrt( dbl_floating_point *srcptr, unsigned int *nullptr, dbl_floating_point *dstptr, unsigned int *status) { register unsigned int srcp1, srcp2, resultp1, resultp2; register unsigned int newbitp1, newbitp2, sump1, sump2; register int src_exponent; register boolean guardbit = FALSE, even_exponent; Dbl_copyfromptr(srcptr,srcp1,srcp2); /* * check source operand for NaN or infinity */ if ((src_exponent = Dbl_exponent(srcp1)) == DBL_INFINITY_EXPONENT) { /* * is signaling NaN? */ if (Dbl_isone_signaling(srcp1)) { /* trap if INVALIDTRAP enabled */ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION); /* make NaN quiet */ Set_invalidflag(); Dbl_set_quiet(srcp1); } /* * Return quiet NaN or positive infinity. * Fall through to negative test if negative infinity. */ if (Dbl_iszero_sign(srcp1) || Dbl_isnotzero_mantissa(srcp1,srcp2)) { Dbl_copytoptr(srcp1,srcp2,dstptr); return(NOEXCEPTION); } } /* * check for zero source operand */ if (Dbl_iszero_exponentmantissa(srcp1,srcp2)) { Dbl_copytoptr(srcp1,srcp2,dstptr); return(NOEXCEPTION); } /* * check for negative source operand */ if (Dbl_isone_sign(srcp1)) { /* trap if INVALIDTRAP enabled */ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION); /* make NaN quiet */ Set_invalidflag(); Dbl_makequietnan(srcp1,srcp2); Dbl_copytoptr(srcp1,srcp2,dstptr); return(NOEXCEPTION); } /* * Generate result */ if (src_exponent > 0) { even_exponent = Dbl_hidden(srcp1); Dbl_clear_signexponent_set_hidden(srcp1); } else { /* normalize operand */ Dbl_clear_signexponent(srcp1); src_exponent++; Dbl_normalize(srcp1,srcp2,src_exponent); even_exponent = src_exponent & 1; } if (even_exponent) { /* exponent is even */ /* Add comment here. Explain why odd exponent needs correction */ Dbl_leftshiftby1(srcp1,srcp2); } /* * Add comment here. Explain following algorithm. * * Trust me, it works. * */ Dbl_setzero(resultp1,resultp2); Dbl_allp1(newbitp1) = 1 << (DBL_P - 32); Dbl_setzero_mantissap2(newbitp2); while (Dbl_isnotzero(newbitp1,newbitp2) && Dbl_isnotzero(srcp1,srcp2)) { Dbl_addition(resultp1,resultp2,newbitp1,newbitp2,sump1,sump2); if(Dbl_isnotgreaterthan(sump1,sump2,srcp1,srcp2)) { Dbl_leftshiftby1(newbitp1,newbitp2); /* update result */ Dbl_addition(resultp1,resultp2,newbitp1,newbitp2, resultp1,resultp2); Dbl_subtract(srcp1,srcp2,sump1,sump2,srcp1,srcp2); Dbl_rightshiftby2(newbitp1,newbitp2); } else { Dbl_rightshiftby1(newbitp1,newbitp2); } Dbl_leftshiftby1(srcp1,srcp2); } /* correct exponent for pre-shift */ if (even_exponent) { Dbl_rightshiftby1(resultp1,resultp2); } /* check for inexact */ if (Dbl_isnotzero(srcp1,srcp2)) { if (!even_exponent && Dbl_islessthan(resultp1,resultp2,srcp1,srcp2)) { Dbl_increment(resultp1,resultp2); } guardbit = Dbl_lowmantissap2(resultp2); Dbl_rightshiftby1(resultp1,resultp2); /* now round result */ switch (Rounding_mode()) { case ROUNDPLUS: Dbl_increment(resultp1,resultp2); break; case ROUNDNEAREST: /* stickybit is always true, so guardbit * is enough to determine rounding */ if (guardbit) { Dbl_increment(resultp1,resultp2); } break; } /* increment result exponent by 1 if mantissa overflowed */ if (Dbl_isone_hiddenoverflow(resultp1)) src_exponent+=2; if (Is_inexacttrap_enabled()) { Dbl_set_exponent(resultp1, ((src_exponent-DBL_BIAS)>>1)+DBL_BIAS); Dbl_copytoptr(resultp1,resultp2,dstptr); return(INEXACTEXCEPTION); } else Set_inexactflag(); } else { Dbl_rightshiftby1(resultp1,resultp2); } Dbl_set_exponent(resultp1,((src_exponent-DBL_BIAS)>>1)+DBL_BIAS); Dbl_copytoptr(resultp1,resultp2,dstptr); return(NOEXCEPTION); }
gpl-2.0
gao-yan/pacemaker
crmd/utils.c
11
34773
/* * Copyright (C) 2004 Andrew Beekhof <andrew@beekhof.net> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <crm_internal.h> #include <sys/param.h> #include <sys/types.h> #include <sys/wait.h> #include <signal.h> #include <crm/crm.h> #include <crm/cib.h> #include <crm/attrd.h> #include <crm/msg_xml.h> #include <crm/common/xml.h> #include <crm/cluster.h> #include <crmd_fsa.h> #include <crmd_utils.h> #include <crmd_messages.h> /* A_DC_TIMER_STOP, A_DC_TIMER_START, * A_FINALIZE_TIMER_STOP, A_FINALIZE_TIMER_START * A_INTEGRATE_TIMER_STOP, A_INTEGRATE_TIMER_START */ void do_timer_control(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { gboolean timer_op_ok = TRUE; if (action & A_DC_TIMER_STOP) { timer_op_ok = crm_timer_stop(election_trigger); } else if (action & A_FINALIZE_TIMER_STOP) { timer_op_ok = crm_timer_stop(finalization_timer); } else if (action & A_INTEGRATE_TIMER_STOP) { timer_op_ok = crm_timer_stop(integration_timer); /* } else if(action & A_ELECTION_TIMEOUT_STOP) { */ /* timer_op_ok = crm_timer_stop(election_timeout); */ } /* dont start a timer that wasnt already running */ if (action & A_DC_TIMER_START && timer_op_ok) { crm_timer_start(election_trigger); if (AM_I_DC) { /* there can be only one */ register_fsa_input(cause, I_ELECTION, NULL); } } else if (action & A_FINALIZE_TIMER_START) { crm_timer_start(finalization_timer); } else if (action & A_INTEGRATE_TIMER_START) { crm_timer_start(integration_timer); /* } else if(action & A_ELECTION_TIMEOUT_START) { */ /* crm_timer_start(election_timeout); */ } } const char * get_timer_desc(fsa_timer_t * timer) { if (timer == election_trigger) { return "Election Trigger"; } else if (timer == shutdown_escalation_timer) { return "Shutdown Escalation"; } else if (timer == integration_timer) { return "Integration Timer"; } else if (timer == finalization_timer) { return "Finalization Timer"; } else if (timer == transition_timer) { return "New Transition Timer"; } else if (timer == wait_timer) { return "Wait Timer"; } else if (timer == recheck_timer) { return "PEngine Recheck Timer"; } return "Unknown Timer"; } gboolean crm_timer_popped(gpointer data) { fsa_timer_t *timer = (fsa_timer_t *) data; if (timer == wait_timer || timer == recheck_timer || timer == transition_timer || timer == finalization_timer || timer == election_trigger) { crm_info("%s (%s) just popped (%dms)", get_timer_desc(timer), fsa_input2string(timer->fsa_input), timer->period_ms); timer->counter++; } else { crm_err("%s (%s) just popped in state %s! (%dms)", get_timer_desc(timer), fsa_input2string(timer->fsa_input), fsa_state2string(fsa_state), timer->period_ms); } if (timer == election_trigger && election_trigger->counter > 5) { crm_notice("We appear to be in an election loop, something may be wrong"); crm_write_blackbox(0, NULL); election_trigger->counter = 0; } if (timer->repeat == FALSE) { crm_timer_stop(timer); /* make it _not_ go off again */ } if (timer->fsa_input == I_INTEGRATED) { crm_info("Welcomed: %d, Integrated: %d", crmd_join_phase_count(crm_join_welcomed), crmd_join_phase_count(crm_join_integrated)); if (crmd_join_phase_count(crm_join_welcomed) == 0) { /* If we don't even have ourself, start again */ register_fsa_error_adv(C_FSA_INTERNAL, I_ELECTION, NULL, NULL, __FUNCTION__); } else { register_fsa_input_before(C_TIMER_POPPED, timer->fsa_input, NULL); } } else if (timer == recheck_timer && fsa_state != S_IDLE) { crm_debug("Discarding %s event in state: %s", fsa_input2string(timer->fsa_input), fsa_state2string(fsa_state)); } else if (timer == finalization_timer && fsa_state != S_FINALIZE_JOIN) { crm_debug("Discarding %s event in state: %s", fsa_input2string(timer->fsa_input), fsa_state2string(fsa_state)); } else if (timer->fsa_input != I_NULL) { register_fsa_input(C_TIMER_POPPED, timer->fsa_input, NULL); } crm_trace("Triggering FSA: %s", __FUNCTION__); mainloop_set_trigger(fsa_source); return TRUE; } gboolean is_timer_started(fsa_timer_t * timer) { if (timer->period_ms > 0) { if (transition_timer->source_id == 0) { return FALSE; } else { return TRUE; } } return FALSE; } gboolean crm_timer_start(fsa_timer_t * timer) { const char *timer_desc = get_timer_desc(timer); if (timer->source_id == 0 && timer->period_ms > 0) { timer->source_id = g_timeout_add(timer->period_ms, timer->callback, (void *)timer); CRM_ASSERT(timer->source_id != 0); crm_debug("Started %s (%s:%dms), src=%d", timer_desc, fsa_input2string(timer->fsa_input), timer->period_ms, timer->source_id); } else if (timer->period_ms < 0) { crm_err("Tried to start %s (%s:%dms) with a -ve period", timer_desc, fsa_input2string(timer->fsa_input), timer->period_ms); } else { crm_debug("%s (%s:%dms) already running: src=%d", timer_desc, fsa_input2string(timer->fsa_input), timer->period_ms, timer->source_id); return FALSE; } return TRUE; } gboolean crm_timer_stop(fsa_timer_t * timer) { const char *timer_desc = get_timer_desc(timer); if (timer == NULL) { crm_err("Attempted to stop NULL timer"); return FALSE; } else if (timer->source_id != 0) { crm_trace("Stopping %s (%s:%dms), src=%d", timer_desc, fsa_input2string(timer->fsa_input), timer->period_ms, timer->source_id); g_source_remove(timer->source_id); timer->source_id = 0; } else { crm_trace("%s (%s:%dms) already stopped", timer_desc, fsa_input2string(timer->fsa_input), timer->period_ms); return FALSE; } return TRUE; } const char * fsa_input2string(enum crmd_fsa_input input) { const char *inputAsText = NULL; switch (input) { case I_NULL: inputAsText = "I_NULL"; break; case I_CIB_OP: inputAsText = "I_CIB_OP (unused)"; break; case I_CIB_UPDATE: inputAsText = "I_CIB_UPDATE"; break; case I_DC_TIMEOUT: inputAsText = "I_DC_TIMEOUT"; break; case I_ELECTION: inputAsText = "I_ELECTION"; break; case I_PE_CALC: inputAsText = "I_PE_CALC"; break; case I_RELEASE_DC: inputAsText = "I_RELEASE_DC"; break; case I_ELECTION_DC: inputAsText = "I_ELECTION_DC"; break; case I_ERROR: inputAsText = "I_ERROR"; break; case I_FAIL: inputAsText = "I_FAIL"; break; case I_INTEGRATED: inputAsText = "I_INTEGRATED"; break; case I_FINALIZED: inputAsText = "I_FINALIZED"; break; case I_NODE_JOIN: inputAsText = "I_NODE_JOIN"; break; case I_JOIN_OFFER: inputAsText = "I_JOIN_OFFER"; break; case I_JOIN_REQUEST: inputAsText = "I_JOIN_REQUEST"; break; case I_JOIN_RESULT: inputAsText = "I_JOIN_RESULT"; break; case I_NOT_DC: inputAsText = "I_NOT_DC"; break; case I_RECOVERED: inputAsText = "I_RECOVERED"; break; case I_RELEASE_FAIL: inputAsText = "I_RELEASE_FAIL"; break; case I_RELEASE_SUCCESS: inputAsText = "I_RELEASE_SUCCESS"; break; case I_RESTART: inputAsText = "I_RESTART"; break; case I_PE_SUCCESS: inputAsText = "I_PE_SUCCESS"; break; case I_ROUTER: inputAsText = "I_ROUTER"; break; case I_SHUTDOWN: inputAsText = "I_SHUTDOWN"; break; case I_STARTUP: inputAsText = "I_STARTUP"; break; case I_TE_SUCCESS: inputAsText = "I_TE_SUCCESS"; break; case I_STOP: inputAsText = "I_STOP"; break; case I_DC_HEARTBEAT: inputAsText = "I_DC_HEARTBEAT"; break; case I_WAIT_FOR_EVENT: inputAsText = "I_WAIT_FOR_EVENT"; break; case I_LRM_EVENT: inputAsText = "I_LRM_EVENT"; break; case I_PENDING: inputAsText = "I_PENDING"; break; case I_HALT: inputAsText = "I_HALT"; break; case I_TERMINATE: inputAsText = "I_TERMINATE"; break; case I_ILLEGAL: inputAsText = "I_ILLEGAL"; break; } if (inputAsText == NULL) { crm_err("Input %d is unknown", input); inputAsText = "<UNKNOWN_INPUT>"; } return inputAsText; } const char * fsa_state2string(enum crmd_fsa_state state) { const char *stateAsText = NULL; switch (state) { case S_IDLE: stateAsText = "S_IDLE"; break; case S_ELECTION: stateAsText = "S_ELECTION"; break; case S_INTEGRATION: stateAsText = "S_INTEGRATION"; break; case S_FINALIZE_JOIN: stateAsText = "S_FINALIZE_JOIN"; break; case S_NOT_DC: stateAsText = "S_NOT_DC"; break; case S_POLICY_ENGINE: stateAsText = "S_POLICY_ENGINE"; break; case S_RECOVERY: stateAsText = "S_RECOVERY"; break; case S_RELEASE_DC: stateAsText = "S_RELEASE_DC"; break; case S_PENDING: stateAsText = "S_PENDING"; break; case S_STOPPING: stateAsText = "S_STOPPING"; break; case S_TERMINATE: stateAsText = "S_TERMINATE"; break; case S_TRANSITION_ENGINE: stateAsText = "S_TRANSITION_ENGINE"; break; case S_STARTING: stateAsText = "S_STARTING"; break; case S_HALT: stateAsText = "S_HALT"; break; case S_ILLEGAL: stateAsText = "S_ILLEGAL"; break; } if (stateAsText == NULL) { crm_err("State %d is unknown", state); stateAsText = "<UNKNOWN_STATE>"; } return stateAsText; } const char * fsa_cause2string(enum crmd_fsa_cause cause) { const char *causeAsText = NULL; switch (cause) { case C_UNKNOWN: causeAsText = "C_UNKNOWN"; break; case C_STARTUP: causeAsText = "C_STARTUP"; break; case C_IPC_MESSAGE: causeAsText = "C_IPC_MESSAGE"; break; case C_HA_MESSAGE: causeAsText = "C_HA_MESSAGE"; break; case C_CCM_CALLBACK: causeAsText = "C_CCM_CALLBACK"; break; case C_TIMER_POPPED: causeAsText = "C_TIMER_POPPED"; break; case C_SHUTDOWN: causeAsText = "C_SHUTDOWN"; break; case C_HEARTBEAT_FAILED: causeAsText = "C_HEARTBEAT_FAILED"; break; case C_SUBSYSTEM_CONNECT: causeAsText = "C_SUBSYSTEM_CONNECT"; break; case C_LRM_OP_CALLBACK: causeAsText = "C_LRM_OP_CALLBACK"; break; case C_LRM_MONITOR_CALLBACK: causeAsText = "C_LRM_MONITOR_CALLBACK"; break; case C_CRMD_STATUS_CALLBACK: causeAsText = "C_CRMD_STATUS_CALLBACK"; break; case C_HA_DISCONNECT: causeAsText = "C_HA_DISCONNECT"; break; case C_FSA_INTERNAL: causeAsText = "C_FSA_INTERNAL"; break; case C_ILLEGAL: causeAsText = "C_ILLEGAL"; break; } if (causeAsText == NULL) { crm_err("Cause %d is unknown", cause); causeAsText = "<UNKNOWN_CAUSE>"; } return causeAsText; } const char * fsa_action2string(long long action) { const char *actionAsText = NULL; switch (action) { case A_NOTHING: actionAsText = "A_NOTHING"; break; case A_ELECTION_START: actionAsText = "A_ELECTION_START"; break; case A_DC_JOIN_FINAL: actionAsText = "A_DC_JOIN_FINAL"; break; case A_READCONFIG: actionAsText = "A_READCONFIG"; break; case O_RELEASE: actionAsText = "O_RELEASE"; break; case A_STARTUP: actionAsText = "A_STARTUP"; break; case A_STARTED: actionAsText = "A_STARTED"; break; case A_HA_CONNECT: actionAsText = "A_HA_CONNECT"; break; case A_HA_DISCONNECT: actionAsText = "A_HA_DISCONNECT"; break; case A_LRM_CONNECT: actionAsText = "A_LRM_CONNECT"; break; case A_LRM_EVENT: actionAsText = "A_LRM_EVENT"; break; case A_LRM_INVOKE: actionAsText = "A_LRM_INVOKE"; break; case A_LRM_DISCONNECT: actionAsText = "A_LRM_DISCONNECT"; break; case O_LRM_RECONNECT: actionAsText = "O_LRM_RECONNECT"; break; case A_CL_JOIN_QUERY: actionAsText = "A_CL_JOIN_QUERY"; break; case A_DC_TIMER_STOP: actionAsText = "A_DC_TIMER_STOP"; break; case A_DC_TIMER_START: actionAsText = "A_DC_TIMER_START"; break; case A_INTEGRATE_TIMER_START: actionAsText = "A_INTEGRATE_TIMER_START"; break; case A_INTEGRATE_TIMER_STOP: actionAsText = "A_INTEGRATE_TIMER_STOP"; break; case A_FINALIZE_TIMER_START: actionAsText = "A_FINALIZE_TIMER_START"; break; case A_FINALIZE_TIMER_STOP: actionAsText = "A_FINALIZE_TIMER_STOP"; break; case A_ELECTION_COUNT: actionAsText = "A_ELECTION_COUNT"; break; case A_ELECTION_VOTE: actionAsText = "A_ELECTION_VOTE"; break; case A_ELECTION_CHECK: actionAsText = "A_ELECTION_CHECK"; break; case A_CL_JOIN_ANNOUNCE: actionAsText = "A_CL_JOIN_ANNOUNCE"; break; case A_CL_JOIN_REQUEST: actionAsText = "A_CL_JOIN_REQUEST"; break; case A_CL_JOIN_RESULT: actionAsText = "A_CL_JOIN_RESULT"; break; case A_DC_JOIN_OFFER_ALL: actionAsText = "A_DC_JOIN_OFFER_ALL"; break; case A_DC_JOIN_OFFER_ONE: actionAsText = "A_DC_JOIN_OFFER_ONE"; break; case A_DC_JOIN_PROCESS_REQ: actionAsText = "A_DC_JOIN_PROCESS_REQ"; break; case A_DC_JOIN_PROCESS_ACK: actionAsText = "A_DC_JOIN_PROCESS_ACK"; break; case A_DC_JOIN_FINALIZE: actionAsText = "A_DC_JOIN_FINALIZE"; break; case A_MSG_PROCESS: actionAsText = "A_MSG_PROCESS"; break; case A_MSG_ROUTE: actionAsText = "A_MSG_ROUTE"; break; case A_RECOVER: actionAsText = "A_RECOVER"; break; case A_DC_RELEASE: actionAsText = "A_DC_RELEASE"; break; case A_DC_RELEASED: actionAsText = "A_DC_RELEASED"; break; case A_DC_TAKEOVER: actionAsText = "A_DC_TAKEOVER"; break; case A_SHUTDOWN: actionAsText = "A_SHUTDOWN"; break; case A_SHUTDOWN_REQ: actionAsText = "A_SHUTDOWN_REQ"; break; case A_STOP: actionAsText = "A_STOP "; break; case A_EXIT_0: actionAsText = "A_EXIT_0"; break; case A_EXIT_1: actionAsText = "A_EXIT_1"; break; case A_CCM_CONNECT: actionAsText = "A_CCM_CONNECT"; break; case A_CCM_DISCONNECT: actionAsText = "A_CCM_DISCONNECT"; break; case O_CIB_RESTART: actionAsText = "O_CIB_RESTART"; break; case A_CIB_START: actionAsText = "A_CIB_START"; break; case A_CIB_STOP: actionAsText = "A_CIB_STOP"; break; case A_TE_INVOKE: actionAsText = "A_TE_INVOKE"; break; case O_TE_RESTART: actionAsText = "O_TE_RESTART"; break; case A_TE_START: actionAsText = "A_TE_START"; break; case A_TE_STOP: actionAsText = "A_TE_STOP"; break; case A_TE_HALT: actionAsText = "A_TE_HALT"; break; case A_TE_CANCEL: actionAsText = "A_TE_CANCEL"; break; case A_PE_INVOKE: actionAsText = "A_PE_INVOKE"; break; case O_PE_RESTART: actionAsText = "O_PE_RESTART"; break; case A_PE_START: actionAsText = "A_PE_START"; break; case A_PE_STOP: actionAsText = "A_PE_STOP"; break; case A_NODE_BLOCK: actionAsText = "A_NODE_BLOCK"; break; case A_UPDATE_NODESTATUS: actionAsText = "A_UPDATE_NODESTATUS"; break; case A_LOG: actionAsText = "A_LOG "; break; case A_ERROR: actionAsText = "A_ERROR "; break; case A_WARN: actionAsText = "A_WARN "; break; /* Composite actions */ case A_DC_TIMER_START | A_CL_JOIN_QUERY: actionAsText = "A_DC_TIMER_START|A_CL_JOIN_QUERY"; break; } if (actionAsText == NULL) { crm_err("Action %.16llx is unknown", action); actionAsText = "<UNKNOWN_ACTION>"; } return actionAsText; } void fsa_dump_inputs(int log_level, const char *text, long long input_register) { if (input_register == A_NOTHING) { return; } if (text == NULL) { text = "Input register contents:"; } if (is_set(input_register, R_THE_DC)) { crm_trace("%s %.16llx (R_THE_DC)", text, R_THE_DC); } if (is_set(input_register, R_STARTING)) { crm_trace("%s %.16llx (R_STARTING)", text, R_STARTING); } if (is_set(input_register, R_SHUTDOWN)) { crm_trace("%s %.16llx (R_SHUTDOWN)", text, R_SHUTDOWN); } if (is_set(input_register, R_STAYDOWN)) { crm_trace("%s %.16llx (R_STAYDOWN)", text, R_STAYDOWN); } if (is_set(input_register, R_JOIN_OK)) { crm_trace("%s %.16llx (R_JOIN_OK)", text, R_JOIN_OK); } if (is_set(input_register, R_READ_CONFIG)) { crm_trace("%s %.16llx (R_READ_CONFIG)", text, R_READ_CONFIG); } if (is_set(input_register, R_INVOKE_PE)) { crm_trace("%s %.16llx (R_INVOKE_PE)", text, R_INVOKE_PE); } if (is_set(input_register, R_CIB_CONNECTED)) { crm_trace("%s %.16llx (R_CIB_CONNECTED)", text, R_CIB_CONNECTED); } if (is_set(input_register, R_PE_CONNECTED)) { crm_trace("%s %.16llx (R_PE_CONNECTED)", text, R_PE_CONNECTED); } if (is_set(input_register, R_TE_CONNECTED)) { crm_trace("%s %.16llx (R_TE_CONNECTED)", text, R_TE_CONNECTED); } if (is_set(input_register, R_LRM_CONNECTED)) { crm_trace("%s %.16llx (R_LRM_CONNECTED)", text, R_LRM_CONNECTED); } if (is_set(input_register, R_CIB_REQUIRED)) { crm_trace("%s %.16llx (R_CIB_REQUIRED)", text, R_CIB_REQUIRED); } if (is_set(input_register, R_PE_REQUIRED)) { crm_trace("%s %.16llx (R_PE_REQUIRED)", text, R_PE_REQUIRED); } if (is_set(input_register, R_TE_REQUIRED)) { crm_trace("%s %.16llx (R_TE_REQUIRED)", text, R_TE_REQUIRED); } if (is_set(input_register, R_REQ_PEND)) { crm_trace("%s %.16llx (R_REQ_PEND)", text, R_REQ_PEND); } if (is_set(input_register, R_PE_PEND)) { crm_trace("%s %.16llx (R_PE_PEND)", text, R_PE_PEND); } if (is_set(input_register, R_TE_PEND)) { crm_trace("%s %.16llx (R_TE_PEND)", text, R_TE_PEND); } if (is_set(input_register, R_RESP_PEND)) { crm_trace("%s %.16llx (R_RESP_PEND)", text, R_RESP_PEND); } if (is_set(input_register, R_CIB_DONE)) { crm_trace("%s %.16llx (R_CIB_DONE)", text, R_CIB_DONE); } if (is_set(input_register, R_HAVE_CIB)) { crm_trace("%s %.16llx (R_HAVE_CIB)", text, R_HAVE_CIB); } if (is_set(input_register, R_CIB_ASKED)) { crm_trace("%s %.16llx (R_CIB_ASKED)", text, R_CIB_ASKED); } if (is_set(input_register, R_MEMBERSHIP)) { crm_trace("%s %.16llx (R_MEMBERSHIP)", text, R_MEMBERSHIP); } if (is_set(input_register, R_PEER_DATA)) { crm_trace("%s %.16llx (R_PEER_DATA)", text, R_PEER_DATA); } if (is_set(input_register, R_IN_RECOVERY)) { crm_trace("%s %.16llx (R_IN_RECOVERY)", text, R_IN_RECOVERY); } } void fsa_dump_actions(long long action, const char *text) { if (is_set(action, A_READCONFIG)) { crm_trace("Action %.16llx (A_READCONFIG) %s", A_READCONFIG, text); } if (is_set(action, A_STARTUP)) { crm_trace("Action %.16llx (A_STARTUP) %s", A_STARTUP, text); } if (is_set(action, A_STARTED)) { crm_trace("Action %.16llx (A_STARTED) %s", A_STARTED, text); } if (is_set(action, A_HA_CONNECT)) { crm_trace("Action %.16llx (A_CONNECT) %s", A_HA_CONNECT, text); } if (is_set(action, A_HA_DISCONNECT)) { crm_trace("Action %.16llx (A_DISCONNECT) %s", A_HA_DISCONNECT, text); } if (is_set(action, A_LRM_CONNECT)) { crm_trace("Action %.16llx (A_LRM_CONNECT) %s", A_LRM_CONNECT, text); } if (is_set(action, A_LRM_EVENT)) { crm_trace("Action %.16llx (A_LRM_EVENT) %s", A_LRM_EVENT, text); } if (is_set(action, A_LRM_INVOKE)) { crm_trace("Action %.16llx (A_LRM_INVOKE) %s", A_LRM_INVOKE, text); } if (is_set(action, A_LRM_DISCONNECT)) { crm_trace("Action %.16llx (A_LRM_DISCONNECT) %s", A_LRM_DISCONNECT, text); } if (is_set(action, A_DC_TIMER_STOP)) { crm_trace("Action %.16llx (A_DC_TIMER_STOP) %s", A_DC_TIMER_STOP, text); } if (is_set(action, A_DC_TIMER_START)) { crm_trace("Action %.16llx (A_DC_TIMER_START) %s", A_DC_TIMER_START, text); } if (is_set(action, A_INTEGRATE_TIMER_START)) { crm_trace("Action %.16llx (A_INTEGRATE_TIMER_START) %s", A_INTEGRATE_TIMER_START, text); } if (is_set(action, A_INTEGRATE_TIMER_STOP)) { crm_trace("Action %.16llx (A_INTEGRATE_TIMER_STOP) %s", A_INTEGRATE_TIMER_STOP, text); } if (is_set(action, A_FINALIZE_TIMER_START)) { crm_trace("Action %.16llx (A_FINALIZE_TIMER_START) %s", A_FINALIZE_TIMER_START, text); } if (is_set(action, A_FINALIZE_TIMER_STOP)) { crm_trace("Action %.16llx (A_FINALIZE_TIMER_STOP) %s", A_FINALIZE_TIMER_STOP, text); } if (is_set(action, A_ELECTION_COUNT)) { crm_trace("Action %.16llx (A_ELECTION_COUNT) %s", A_ELECTION_COUNT, text); } if (is_set(action, A_ELECTION_VOTE)) { crm_trace("Action %.16llx (A_ELECTION_VOTE) %s", A_ELECTION_VOTE, text); } if (is_set(action, A_ELECTION_CHECK)) { crm_trace("Action %.16llx (A_ELECTION_CHECK) %s", A_ELECTION_CHECK, text); } if (is_set(action, A_CL_JOIN_ANNOUNCE)) { crm_trace("Action %.16llx (A_CL_JOIN_ANNOUNCE) %s", A_CL_JOIN_ANNOUNCE, text); } if (is_set(action, A_CL_JOIN_REQUEST)) { crm_trace("Action %.16llx (A_CL_JOIN_REQUEST) %s", A_CL_JOIN_REQUEST, text); } if (is_set(action, A_CL_JOIN_RESULT)) { crm_trace("Action %.16llx (A_CL_JOIN_RESULT) %s", A_CL_JOIN_RESULT, text); } if (is_set(action, A_DC_JOIN_OFFER_ALL)) { crm_trace("Action %.16llx (A_DC_JOIN_OFFER_ALL) %s", A_DC_JOIN_OFFER_ALL, text); } if (is_set(action, A_DC_JOIN_OFFER_ONE)) { crm_trace("Action %.16llx (A_DC_JOIN_OFFER_ONE) %s", A_DC_JOIN_OFFER_ONE, text); } if (is_set(action, A_DC_JOIN_PROCESS_REQ)) { crm_trace("Action %.16llx (A_DC_JOIN_PROCESS_REQ) %s", A_DC_JOIN_PROCESS_REQ, text); } if (is_set(action, A_DC_JOIN_PROCESS_ACK)) { crm_trace("Action %.16llx (A_DC_JOIN_PROCESS_ACK) %s", A_DC_JOIN_PROCESS_ACK, text); } if (is_set(action, A_DC_JOIN_FINALIZE)) { crm_trace("Action %.16llx (A_DC_JOIN_FINALIZE) %s", A_DC_JOIN_FINALIZE, text); } if (is_set(action, A_MSG_PROCESS)) { crm_trace("Action %.16llx (A_MSG_PROCESS) %s", A_MSG_PROCESS, text); } if (is_set(action, A_MSG_ROUTE)) { crm_trace("Action %.16llx (A_MSG_ROUTE) %s", A_MSG_ROUTE, text); } if (is_set(action, A_RECOVER)) { crm_trace("Action %.16llx (A_RECOVER) %s", A_RECOVER, text); } if (is_set(action, A_DC_RELEASE)) { crm_trace("Action %.16llx (A_DC_RELEASE) %s", A_DC_RELEASE, text); } if (is_set(action, A_DC_RELEASED)) { crm_trace("Action %.16llx (A_DC_RELEASED) %s", A_DC_RELEASED, text); } if (is_set(action, A_DC_TAKEOVER)) { crm_trace("Action %.16llx (A_DC_TAKEOVER) %s", A_DC_TAKEOVER, text); } if (is_set(action, A_SHUTDOWN)) { crm_trace("Action %.16llx (A_SHUTDOWN) %s", A_SHUTDOWN, text); } if (is_set(action, A_SHUTDOWN_REQ)) { crm_trace("Action %.16llx (A_SHUTDOWN_REQ) %s", A_SHUTDOWN_REQ, text); } if (is_set(action, A_STOP)) { crm_trace("Action %.16llx (A_STOP ) %s", A_STOP, text); } if (is_set(action, A_EXIT_0)) { crm_trace("Action %.16llx (A_EXIT_0) %s", A_EXIT_0, text); } if (is_set(action, A_EXIT_1)) { crm_trace("Action %.16llx (A_EXIT_1) %s", A_EXIT_1, text); } if (is_set(action, A_CCM_CONNECT)) { crm_trace("Action %.16llx (A_CCM_CONNECT) %s", A_CCM_CONNECT, text); } if (is_set(action, A_CCM_DISCONNECT)) { crm_trace("Action %.16llx (A_CCM_DISCONNECT) %s", A_CCM_DISCONNECT, text); } if (is_set(action, A_CIB_START)) { crm_trace("Action %.16llx (A_CIB_START) %s", A_CIB_START, text); } if (is_set(action, A_CIB_STOP)) { crm_trace("Action %.16llx (A_CIB_STOP) %s", A_CIB_STOP, text); } if (is_set(action, A_TE_INVOKE)) { crm_trace("Action %.16llx (A_TE_INVOKE) %s", A_TE_INVOKE, text); } if (is_set(action, A_TE_START)) { crm_trace("Action %.16llx (A_TE_START) %s", A_TE_START, text); } if (is_set(action, A_TE_STOP)) { crm_trace("Action %.16llx (A_TE_STOP) %s", A_TE_STOP, text); } if (is_set(action, A_TE_CANCEL)) { crm_trace("Action %.16llx (A_TE_CANCEL) %s", A_TE_CANCEL, text); } if (is_set(action, A_PE_INVOKE)) { crm_trace("Action %.16llx (A_PE_INVOKE) %s", A_PE_INVOKE, text); } if (is_set(action, A_PE_START)) { crm_trace("Action %.16llx (A_PE_START) %s", A_PE_START, text); } if (is_set(action, A_PE_STOP)) { crm_trace("Action %.16llx (A_PE_STOP) %s", A_PE_STOP, text); } if (is_set(action, A_NODE_BLOCK)) { crm_trace("Action %.16llx (A_NODE_BLOCK) %s", A_NODE_BLOCK, text); } if (is_set(action, A_UPDATE_NODESTATUS)) { crm_trace("Action %.16llx (A_UPDATE_NODESTATUS) %s", A_UPDATE_NODESTATUS, text); } if (is_set(action, A_LOG)) { crm_trace("Action %.16llx (A_LOG ) %s", A_LOG, text); } if (is_set(action, A_ERROR)) { crm_trace("Action %.16llx (A_ERROR ) %s", A_ERROR, text); } if (is_set(action, A_WARN)) { crm_trace("Action %.16llx (A_WARN ) %s", A_WARN, text); } } gboolean update_dc(xmlNode * msg) { char *last_dc = fsa_our_dc; const char *dc_version = NULL; const char *welcome_from = NULL; if (msg != NULL) { gboolean invalid = FALSE; dc_version = crm_element_value(msg, F_CRM_VERSION); welcome_from = crm_element_value(msg, F_CRM_HOST_FROM); CRM_CHECK(dc_version != NULL, return FALSE); CRM_CHECK(welcome_from != NULL, return FALSE); if (AM_I_DC && safe_str_neq(welcome_from, fsa_our_uname)) { invalid = TRUE; } else if (fsa_our_dc && safe_str_neq(welcome_from, fsa_our_dc)) { invalid = TRUE; } if (invalid) { CRM_CHECK(fsa_our_dc != NULL, crm_err("We have no DC")); if (AM_I_DC) { crm_err("Not updating DC to %s (%s): we are also a DC", welcome_from, dc_version); } else { crm_warn("New DC %s is not %s", welcome_from, fsa_our_dc); } register_fsa_action(A_CL_JOIN_QUERY | A_DC_TIMER_START); return FALSE; } } free(fsa_our_dc_version); fsa_our_dc_version = NULL; fsa_our_dc = NULL; /* Free'd as last_dc */ if (welcome_from != NULL) { fsa_our_dc = strdup(welcome_from); } if (dc_version != NULL) { fsa_our_dc_version = strdup(dc_version); } if (safe_str_eq(fsa_our_dc, last_dc)) { /* do nothing */ } else if (fsa_our_dc != NULL) { crm_node_t *dc_node = crm_get_peer(0, fsa_our_dc); crm_info("Set DC to %s (%s)", crm_str(fsa_our_dc), crm_str(fsa_our_dc_version)); crm_update_peer_expected(__FUNCTION__, dc_node, CRMD_JOINSTATE_MEMBER); } else if (last_dc != NULL) { crm_info("Unset DC. Was %s", crm_str(last_dc)); } free(last_dc); return TRUE; } #define STATUS_PATH_MAX 512 static void erase_xpath_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data) { char *xpath = user_data; do_crm_log_unlikely(rc == 0 ? LOG_DEBUG : LOG_NOTICE, "Deletion of \"%s\": %s (rc=%d)", xpath, pcmk_strerror(rc), rc); } void erase_status_tag(const char *uname, const char *tag, int options) { int rc = pcmk_ok; char xpath[STATUS_PATH_MAX]; int cib_opts = cib_quorum_override | cib_xpath | options; if (fsa_cib_conn && uname) { snprintf(xpath, STATUS_PATH_MAX, "//node_state[@uname='%s']/%s", uname, tag); crm_info("Deleting xpath: %s", xpath); rc = fsa_cib_conn->cmds->delete(fsa_cib_conn, xpath, NULL, cib_opts); fsa_register_cib_callback(rc, FALSE, strdup(xpath), erase_xpath_callback); } } crm_ipc_t *attrd_ipc = NULL; static void update_attrd_helper(const char *host, const char *name, const char *value, const char *user_name, gboolean is_remote_node, char command) { gboolean rc; int max = 5; if (attrd_ipc == NULL) { attrd_ipc = crm_ipc_new(T_ATTRD, 0); } do { if (crm_ipc_connected(attrd_ipc) == FALSE) { crm_ipc_close(attrd_ipc); crm_info("Connecting to attribute manager ... %d retries remaining", max); if (crm_ipc_connect(attrd_ipc) == FALSE) { crm_perror(LOG_INFO, "Connection to attribute manager failed"); } } rc = attrd_update_delegate(attrd_ipc, command, host, name, value, XML_CIB_TAG_STATUS, NULL, NULL, user_name, is_remote_node); if (rc == pcmk_ok) { break; } else if (rc != -EAGAIN && rc != -EALREADY) { crm_info("Disconnecting from attribute manager: %s (%d)", pcmk_strerror(rc), rc); crm_ipc_close(attrd_ipc); } sleep(5 - max); } while (max--); if (rc != pcmk_ok) { if (name) { crm_err("Could not send attrd %s update%s: %s (%d)", name, is_set(fsa_input_register, R_SHUTDOWN) ? " at shutdown" : "", pcmk_strerror(rc), rc); } else { crm_err("Could not send attrd refresh%s: %s (%d)", is_set(fsa_input_register, R_SHUTDOWN) ? " at shutdown" : "", pcmk_strerror(rc), rc); } if (is_set(fsa_input_register, R_SHUTDOWN)) { register_fsa_input(C_FSA_INTERNAL, I_FAIL, NULL); } } } void update_attrd(const char *host, const char *name, const char *value, const char *user_name, gboolean is_remote_node) { update_attrd_helper(host, name, value, user_name, is_remote_node, 'U'); } void update_attrd_remote_node_removed(const char *host, const char *user_name) { crm_trace("telling attrd to clear attributes for remote host %s", host); update_attrd_helper(host, NULL, NULL, user_name, TRUE, 'C'); } void crmd_peer_down(crm_node_t *peer, bool full) { if(full && peer->state == NULL) { crm_update_peer_state(__FUNCTION__, peer, CRM_NODE_LOST, 0); crm_update_peer_proc(__FUNCTION__, peer, crm_proc_none, NULL); } crm_update_peer_join(__FUNCTION__, peer, crm_join_none); crm_update_peer_expected(__FUNCTION__, peer, CRMD_JOINSTATE_DOWN); }
gpl-2.0
OSCES/OSCES-FRAMEWORK
src/core/thirdparty/STM32F2/STM32F2xx_StdPeriph_Driver/src/stm32f2xx_iwdg.c
11
9403
/** ****************************************************************************** * @file stm32f2xx_iwdg.c * @author MCD Application Team * @version V1.1.2 * @date 05-March-2012 * @brief This file provides firmware functions to manage the following * functionalities of the Independent watchdog (IWDG) peripheral: * - Prescaler and Counter configuration * - IWDG activation * - Flag management * * @verbatim * * =================================================================== * IWDG features * =================================================================== * * The IWDG can be started by either software or hardware (configurable * through option byte). * * The IWDG is clocked by its own dedicated low-speed clock (LSI) and * thus stays active even if the main clock fails. * Once the IWDG is started, the LSI is forced ON and cannot be disabled * (LSI cannot be disabled too), and the counter starts counting down from * the reset value of 0xFFF. When it reaches the end of count value (0x000) * a system reset is generated. * The IWDG counter should be reloaded at regular intervals to prevent * an MCU reset. * * The IWDG is implemented in the VDD voltage domain that is still functional * in STOP and STANDBY mode (IWDG reset can wake-up from STANDBY). * * IWDGRST flag in RCC_CSR register can be used to inform when a IWDG * reset occurs. * * Min-max timeout value @32KHz (LSI): ~125us / ~32.7s * The IWDG timeout may vary due to LSI frequency dispersion. STM32F2xx * devices provide the capability to measure the LSI frequency (LSI clock * connected internally to TIM5 CH4 input capture). The measured value * can be used to have an IWDG timeout with an acceptable accuracy. * For more information, please refer to the STM32F2xx Reference manual * * * =================================================================== * How to use this driver * =================================================================== * 1. Enable write access to IWDG_PR and IWDG_RLR registers using * IWDG_WriteAccessCmd(IWDG_WriteAccess_Enable) function * * 2. Configure the IWDG prescaler using IWDG_SetPrescaler() function * * 3. Configure the IWDG counter value using IWDG_SetReload() function. * This value will be loaded in the IWDG counter each time the counter * is reloaded, then the IWDG will start counting down from this value. * * 4. Start the IWDG using IWDG_Enable() function, when the IWDG is used * in software mode (no need to enable the LSI, it will be enabled * by hardware) * * 5. Then the application program must reload the IWDG counter at regular * intervals during normal operation to prevent an MCU reset, using * IWDG_ReloadCounter() function. * * @endverbatim * ****************************************************************************** * @attention * * <h2><center>&copy; COPYRIGHT 2012 STMicroelectronics</center></h2> * * Licensed under MCD-ST Liberty SW License Agreement V2, (the "License"); * You may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.st.com/software_license_agreement_liberty_v2 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ****************************************************************************** */ /* Includes ------------------------------------------------------------------*/ #include "stm32f2xx_iwdg.h" /** @addtogroup STM32F2xx_StdPeriph_Driver * @{ */ /** @defgroup IWDG * @brief IWDG driver modules * @{ */ /* Private typedef -----------------------------------------------------------*/ /* Private define ------------------------------------------------------------*/ /* KR register bit mask */ #define KR_KEY_RELOAD ((uint16_t)0xAAAA) #define KR_KEY_ENABLE ((uint16_t)0xCCCC) /* Private macro -------------------------------------------------------------*/ /* Private variables ---------------------------------------------------------*/ /* Private function prototypes -----------------------------------------------*/ /* Private functions ---------------------------------------------------------*/ /** @defgroup IWDG_Private_Functions * @{ */ /** @defgroup IWDG_Group1 Prescaler and Counter configuration functions * @brief Prescaler and Counter configuration functions * @verbatim =============================================================================== Prescaler and Counter configuration functions =============================================================================== @endverbatim * @{ */ /** * @brief Enables or disables write access to IWDG_PR and IWDG_RLR registers. * @param IWDG_WriteAccess: new state of write access to IWDG_PR and IWDG_RLR registers. * This parameter can be one of the following values: * @arg IWDG_WriteAccess_Enable: Enable write access to IWDG_PR and IWDG_RLR registers * @arg IWDG_WriteAccess_Disable: Disable write access to IWDG_PR and IWDG_RLR registers * @retval None */ void IWDG_WriteAccessCmd(uint16_t IWDG_WriteAccess) { /* Check the parameters */ assert_param(IS_IWDG_WRITE_ACCESS(IWDG_WriteAccess)); IWDG->KR = IWDG_WriteAccess; } /** * @brief Sets IWDG Prescaler value. * @param IWDG_Prescaler: specifies the IWDG Prescaler value. * This parameter can be one of the following values: * @arg IWDG_Prescaler_4: IWDG prescaler set to 4 * @arg IWDG_Prescaler_8: IWDG prescaler set to 8 * @arg IWDG_Prescaler_16: IWDG prescaler set to 16 * @arg IWDG_Prescaler_32: IWDG prescaler set to 32 * @arg IWDG_Prescaler_64: IWDG prescaler set to 64 * @arg IWDG_Prescaler_128: IWDG prescaler set to 128 * @arg IWDG_Prescaler_256: IWDG prescaler set to 256 * @retval None */ void IWDG_SetPrescaler(uint8_t IWDG_Prescaler) { /* Check the parameters */ assert_param(IS_IWDG_PRESCALER(IWDG_Prescaler)); IWDG->PR = IWDG_Prescaler; } /** * @brief Sets IWDG Reload value. * @param Reload: specifies the IWDG Reload value. * This parameter must be a number between 0 and 0x0FFF. * @retval None */ void IWDG_SetReload(uint16_t Reload) { /* Check the parameters */ assert_param(IS_IWDG_RELOAD(Reload)); IWDG->RLR = Reload; } /** * @brief Reloads IWDG counter with value defined in the reload register * (write access to IWDG_PR and IWDG_RLR registers disabled). * @param None * @retval None */ void IWDG_ReloadCounter(void) { IWDG->KR = KR_KEY_RELOAD; } /** * @} */ /** @defgroup IWDG_Group2 IWDG activation function * @brief IWDG activation function * @verbatim =============================================================================== IWDG activation function =============================================================================== @endverbatim * @{ */ /** * @brief Enables IWDG (write access to IWDG_PR and IWDG_RLR registers disabled). * @param None * @retval None */ void IWDG_Enable(void) { IWDG->KR = KR_KEY_ENABLE; } /** * @} */ /** @defgroup IWDG_Group3 Flag management function * @brief Flag management function * @verbatim =============================================================================== Flag management function =============================================================================== @endverbatim * @{ */ /** * @brief Checks whether the specified IWDG flag is set or not. * @param IWDG_FLAG: specifies the flag to check. * This parameter can be one of the following values: * @arg IWDG_FLAG_PVU: Prescaler Value Update on going * @arg IWDG_FLAG_RVU: Reload Value Update on going * @retval The new state of IWDG_FLAG (SET or RESET). */ FlagStatus IWDG_GetFlagStatus(uint16_t IWDG_FLAG) { FlagStatus bitstatus = RESET; /* Check the parameters */ assert_param(IS_IWDG_FLAG(IWDG_FLAG)); if ((IWDG->SR & IWDG_FLAG) != (uint32_t)RESET) { bitstatus = SET; } else { bitstatus = RESET; } /* Return the flag status */ return bitstatus; } /** * @} */ /** * @} */ /** * @} */ /** * @} */ /************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
gpl-2.0
anthonyryan1/xbmc
xbmc/peripherals/addons/PeripheralAddonTranslator.cpp
11
13055
/* * Copyright (C) 2015-2018 Team Kodi * This file is part of Kodi - https://kodi.tv * * SPDX-License-Identifier: GPL-2.0-or-later * See LICENSES/README.md for more information. */ #include "PeripheralAddonTranslator.h" #include "games/controllers/ControllerTranslator.h" #include "input/joysticks/JoystickUtils.h" #include <algorithm> #include <iterator> using namespace KODI; using namespace JOYSTICK; using namespace PERIPHERALS; // --- Helper function --------------------------------------------------------- JOYSTICK_DRIVER_SEMIAXIS_DIRECTION operator*(JOYSTICK_DRIVER_SEMIAXIS_DIRECTION dir, int i) { return static_cast<JOYSTICK_DRIVER_SEMIAXIS_DIRECTION>(static_cast<int>(dir) * i); } // --- CPeripheralAddonTranslator ---------------------------------------------- const char* CPeripheralAddonTranslator::TranslateError(const PERIPHERAL_ERROR error) { switch (error) { case PERIPHERAL_NO_ERROR: return "no error"; case PERIPHERAL_ERROR_FAILED: return "command failed"; case PERIPHERAL_ERROR_INVALID_PARAMETERS: return "invalid parameters"; case PERIPHERAL_ERROR_NOT_IMPLEMENTED: return "not implemented"; case PERIPHERAL_ERROR_NOT_CONNECTED: return "not connected"; case PERIPHERAL_ERROR_CONNECTION_FAILED: return "connection failed"; case PERIPHERAL_ERROR_UNKNOWN: default: return "unknown error"; } } PeripheralType CPeripheralAddonTranslator::TranslateType(PERIPHERAL_TYPE type) { switch (type) { case PERIPHERAL_TYPE_JOYSTICK: return PERIPHERAL_JOYSTICK; default: break; } return PERIPHERAL_UNKNOWN; } PERIPHERAL_TYPE CPeripheralAddonTranslator::TranslateType(PeripheralType type) { switch (type) { case PERIPHERAL_JOYSTICK: return PERIPHERAL_TYPE_JOYSTICK; default: break; } return PERIPHERAL_TYPE_UNKNOWN; } CDriverPrimitive CPeripheralAddonTranslator::TranslatePrimitive(const kodi::addon::DriverPrimitive& primitive) { CDriverPrimitive retVal; switch (primitive.Type()) { case JOYSTICK_DRIVER_PRIMITIVE_TYPE_BUTTON: { retVal = CDriverPrimitive(PRIMITIVE_TYPE::BUTTON, primitive.DriverIndex()); break; } case JOYSTICK_DRIVER_PRIMITIVE_TYPE_HAT_DIRECTION: { retVal = CDriverPrimitive(primitive.DriverIndex(), TranslateHatDirection(primitive.HatDirection())); break; } case JOYSTICK_DRIVER_PRIMITIVE_TYPE_SEMIAXIS: { retVal = CDriverPrimitive(primitive.DriverIndex(), primitive.Center(), TranslateSemiAxisDirection(primitive.SemiAxisDirection()), primitive.Range()); break; } case JOYSTICK_DRIVER_PRIMITIVE_TYPE_MOTOR: { retVal = CDriverPrimitive(PRIMITIVE_TYPE::MOTOR, primitive.DriverIndex()); break; } case JOYSTICK_DRIVER_PRIMITIVE_TYPE_KEY: { KEYBOARD::KeySymbol keycode = GAME::CControllerTranslator::TranslateKeysym(primitive.Keycode()); retVal = CDriverPrimitive(keycode); break; } case JOYSTICK_DRIVER_PRIMITIVE_TYPE_MOUSE_BUTTON: { retVal = CDriverPrimitive(TranslateMouseButton(primitive.MouseIndex())); break; } case JOYSTICK_DRIVER_PRIMITIVE_TYPE_RELPOINTER_DIRECTION: { retVal = CDriverPrimitive(TranslateRelPointerDirection(primitive.RelPointerDirection())); break; } default: break; } return retVal; } kodi::addon::DriverPrimitive CPeripheralAddonTranslator::TranslatePrimitive(const CDriverPrimitive& primitive) { kodi::addon::DriverPrimitive retVal; switch (primitive.Type()) { case PRIMITIVE_TYPE::BUTTON: { retVal = kodi::addon::DriverPrimitive::CreateButton(primitive.Index()); break; } case PRIMITIVE_TYPE::HAT: { retVal = kodi::addon::DriverPrimitive(primitive.Index(), TranslateHatDirection(primitive.HatDirection())); break; } case PRIMITIVE_TYPE::SEMIAXIS: { retVal = kodi::addon::DriverPrimitive(primitive.Index(), primitive.Center(), TranslateSemiAxisDirection(primitive.SemiAxisDirection()), primitive.Range()); break; } case PRIMITIVE_TYPE::MOTOR: { retVal = kodi::addon::DriverPrimitive::CreateMotor(primitive.Index()); break; } case PRIMITIVE_TYPE::KEY: { std::string keysym = GAME::CControllerTranslator::TranslateKeycode(primitive.Keycode()); retVal = kodi::addon::DriverPrimitive(keysym); break; } case PRIMITIVE_TYPE::MOUSE_BUTTON: { retVal = kodi::addon::DriverPrimitive::CreateMouseButton(TranslateMouseButton(primitive.MouseButton())); break; } case PRIMITIVE_TYPE::RELATIVE_POINTER: { retVal = kodi::addon::DriverPrimitive(TranslateRelPointerDirection(primitive.PointerDirection())); break; } default: break; } return retVal; } std::vector<JOYSTICK::CDriverPrimitive> CPeripheralAddonTranslator::TranslatePrimitives(const std::vector<kodi::addon::DriverPrimitive>& primitives) { std::vector<JOYSTICK::CDriverPrimitive> ret; std::transform(primitives.begin(), primitives.end(), std::back_inserter(ret), [](const kodi::addon::DriverPrimitive& primitive) { return CPeripheralAddonTranslator::TranslatePrimitive(primitive); }); return ret; } std::vector<kodi::addon::DriverPrimitive> CPeripheralAddonTranslator::TranslatePrimitives(const std::vector<JOYSTICK::CDriverPrimitive>& primitives) { std::vector<kodi::addon::DriverPrimitive> ret; std::transform(primitives.begin(), primitives.end(), std::back_inserter(ret), [](const JOYSTICK::CDriverPrimitive& primitive) { return CPeripheralAddonTranslator::TranslatePrimitive(primitive); }); return ret; } HAT_DIRECTION CPeripheralAddonTranslator::TranslateHatDirection(JOYSTICK_DRIVER_HAT_DIRECTION dir) { switch (dir) { case JOYSTICK_DRIVER_HAT_LEFT: return HAT_DIRECTION::LEFT; case JOYSTICK_DRIVER_HAT_RIGHT: return HAT_DIRECTION::RIGHT; case JOYSTICK_DRIVER_HAT_UP: return HAT_DIRECTION::UP; case JOYSTICK_DRIVER_HAT_DOWN: return HAT_DIRECTION::DOWN; default: break; } return HAT_DIRECTION::NONE; } JOYSTICK_DRIVER_HAT_DIRECTION CPeripheralAddonTranslator::TranslateHatDirection(HAT_DIRECTION dir) { switch (dir) { case HAT_DIRECTION::UP: return JOYSTICK_DRIVER_HAT_UP; case HAT_DIRECTION::DOWN: return JOYSTICK_DRIVER_HAT_DOWN; case HAT_DIRECTION::RIGHT: return JOYSTICK_DRIVER_HAT_RIGHT; case HAT_DIRECTION::LEFT: return JOYSTICK_DRIVER_HAT_LEFT; default: break; } return JOYSTICK_DRIVER_HAT_UNKNOWN; } HAT_STATE CPeripheralAddonTranslator::TranslateHatState(JOYSTICK_STATE_HAT state) { HAT_STATE translatedState = HAT_STATE::NONE; if (state & JOYSTICK_STATE_HAT_UP) translatedState |= HAT_STATE::UP; if (state & JOYSTICK_STATE_HAT_DOWN) translatedState |= HAT_STATE::DOWN; if (state & JOYSTICK_STATE_HAT_RIGHT) translatedState |= HAT_STATE::RIGHT; if (state & JOYSTICK_STATE_HAT_LEFT) translatedState |= HAT_STATE::LEFT; return translatedState; } SEMIAXIS_DIRECTION CPeripheralAddonTranslator::TranslateSemiAxisDirection(JOYSTICK_DRIVER_SEMIAXIS_DIRECTION dir) { switch (dir) { case JOYSTICK_DRIVER_SEMIAXIS_POSITIVE: return SEMIAXIS_DIRECTION::POSITIVE; case JOYSTICK_DRIVER_SEMIAXIS_NEGATIVE: return SEMIAXIS_DIRECTION::NEGATIVE; default: break; } return SEMIAXIS_DIRECTION::ZERO; } JOYSTICK_DRIVER_SEMIAXIS_DIRECTION CPeripheralAddonTranslator::TranslateSemiAxisDirection(SEMIAXIS_DIRECTION dir) { switch (dir) { case SEMIAXIS_DIRECTION::POSITIVE: return JOYSTICK_DRIVER_SEMIAXIS_POSITIVE; case SEMIAXIS_DIRECTION::NEGATIVE: return JOYSTICK_DRIVER_SEMIAXIS_NEGATIVE; default: break; } return JOYSTICK_DRIVER_SEMIAXIS_UNKNOWN; } MOUSE::BUTTON_ID CPeripheralAddonTranslator::TranslateMouseButton(JOYSTICK_DRIVER_MOUSE_INDEX button) { switch (button) { case JOYSTICK_DRIVER_MOUSE_INDEX_LEFT: return MOUSE::BUTTON_ID::LEFT; case JOYSTICK_DRIVER_MOUSE_INDEX_RIGHT: return MOUSE::BUTTON_ID::RIGHT; case JOYSTICK_DRIVER_MOUSE_INDEX_MIDDLE: return MOUSE::BUTTON_ID::MIDDLE; case JOYSTICK_DRIVER_MOUSE_INDEX_BUTTON4: return MOUSE::BUTTON_ID::BUTTON4; case JOYSTICK_DRIVER_MOUSE_INDEX_BUTTON5: return MOUSE::BUTTON_ID::BUTTON5; case JOYSTICK_DRIVER_MOUSE_INDEX_WHEEL_UP: return MOUSE::BUTTON_ID::WHEEL_UP; case JOYSTICK_DRIVER_MOUSE_INDEX_WHEEL_DOWN: return MOUSE::BUTTON_ID::WHEEL_DOWN; case JOYSTICK_DRIVER_MOUSE_INDEX_HORIZ_WHEEL_LEFT: return MOUSE::BUTTON_ID::HORIZ_WHEEL_LEFT; case JOYSTICK_DRIVER_MOUSE_INDEX_HORIZ_WHEEL_RIGHT: return MOUSE::BUTTON_ID::HORIZ_WHEEL_RIGHT; default: break; } return MOUSE::BUTTON_ID::UNKNOWN; } JOYSTICK_DRIVER_MOUSE_INDEX CPeripheralAddonTranslator::TranslateMouseButton(MOUSE::BUTTON_ID button) { switch (button) { case MOUSE::BUTTON_ID::LEFT: return JOYSTICK_DRIVER_MOUSE_INDEX_LEFT; case MOUSE::BUTTON_ID::RIGHT: return JOYSTICK_DRIVER_MOUSE_INDEX_RIGHT; case MOUSE::BUTTON_ID::MIDDLE: return JOYSTICK_DRIVER_MOUSE_INDEX_MIDDLE; case MOUSE::BUTTON_ID::BUTTON4: return JOYSTICK_DRIVER_MOUSE_INDEX_BUTTON4; case MOUSE::BUTTON_ID::BUTTON5: return JOYSTICK_DRIVER_MOUSE_INDEX_BUTTON5; case MOUSE::BUTTON_ID::WHEEL_UP: return JOYSTICK_DRIVER_MOUSE_INDEX_WHEEL_UP; case MOUSE::BUTTON_ID::WHEEL_DOWN: return JOYSTICK_DRIVER_MOUSE_INDEX_WHEEL_DOWN; case MOUSE::BUTTON_ID::HORIZ_WHEEL_LEFT: return JOYSTICK_DRIVER_MOUSE_INDEX_HORIZ_WHEEL_LEFT; case MOUSE::BUTTON_ID::HORIZ_WHEEL_RIGHT: return JOYSTICK_DRIVER_MOUSE_INDEX_HORIZ_WHEEL_RIGHT; default: break; } return JOYSTICK_DRIVER_MOUSE_INDEX_UNKNOWN; } RELATIVE_POINTER_DIRECTION CPeripheralAddonTranslator::TranslateRelPointerDirection(JOYSTICK_DRIVER_RELPOINTER_DIRECTION dir) { switch (dir) { case JOYSTICK_DRIVER_RELPOINTER_LEFT: return RELATIVE_POINTER_DIRECTION::LEFT; case JOYSTICK_DRIVER_RELPOINTER_RIGHT: return RELATIVE_POINTER_DIRECTION::RIGHT; case JOYSTICK_DRIVER_RELPOINTER_UP: return RELATIVE_POINTER_DIRECTION::UP; case JOYSTICK_DRIVER_RELPOINTER_DOWN: return RELATIVE_POINTER_DIRECTION::DOWN; default: break; } return RELATIVE_POINTER_DIRECTION::NONE; } JOYSTICK_DRIVER_RELPOINTER_DIRECTION CPeripheralAddonTranslator::TranslateRelPointerDirection(RELATIVE_POINTER_DIRECTION dir) { switch (dir) { case RELATIVE_POINTER_DIRECTION::UP: return JOYSTICK_DRIVER_RELPOINTER_UP; case RELATIVE_POINTER_DIRECTION::DOWN: return JOYSTICK_DRIVER_RELPOINTER_DOWN; case RELATIVE_POINTER_DIRECTION::RIGHT: return JOYSTICK_DRIVER_RELPOINTER_RIGHT; case RELATIVE_POINTER_DIRECTION::LEFT: return JOYSTICK_DRIVER_RELPOINTER_LEFT; default: break; } return JOYSTICK_DRIVER_RELPOINTER_UNKNOWN; } JOYSTICK::FEATURE_TYPE CPeripheralAddonTranslator::TranslateFeatureType(JOYSTICK_FEATURE_TYPE type) { switch (type) { case JOYSTICK_FEATURE_TYPE_SCALAR: return JOYSTICK::FEATURE_TYPE::SCALAR; case JOYSTICK_FEATURE_TYPE_ANALOG_STICK: return JOYSTICK::FEATURE_TYPE::ANALOG_STICK; case JOYSTICK_FEATURE_TYPE_ACCELEROMETER: return JOYSTICK::FEATURE_TYPE::ACCELEROMETER; case JOYSTICK_FEATURE_TYPE_MOTOR: return JOYSTICK::FEATURE_TYPE::MOTOR; case JOYSTICK_FEATURE_TYPE_RELPOINTER: return JOYSTICK::FEATURE_TYPE::RELPOINTER; case JOYSTICK_FEATURE_TYPE_ABSPOINTER: return JOYSTICK::FEATURE_TYPE::ABSPOINTER; case JOYSTICK_FEATURE_TYPE_WHEEL: return JOYSTICK::FEATURE_TYPE::WHEEL; case JOYSTICK_FEATURE_TYPE_THROTTLE: return JOYSTICK::FEATURE_TYPE::THROTTLE; case JOYSTICK_FEATURE_TYPE_KEY: return JOYSTICK::FEATURE_TYPE::KEY; default: break; } return JOYSTICK::FEATURE_TYPE::UNKNOWN; } JOYSTICK_FEATURE_TYPE CPeripheralAddonTranslator::TranslateFeatureType(JOYSTICK::FEATURE_TYPE type) { switch (type) { case JOYSTICK::FEATURE_TYPE::SCALAR: return JOYSTICK_FEATURE_TYPE_SCALAR; case JOYSTICK::FEATURE_TYPE::ANALOG_STICK: return JOYSTICK_FEATURE_TYPE_ANALOG_STICK; case JOYSTICK::FEATURE_TYPE::ACCELEROMETER: return JOYSTICK_FEATURE_TYPE_ACCELEROMETER; case JOYSTICK::FEATURE_TYPE::MOTOR: return JOYSTICK_FEATURE_TYPE_MOTOR; case JOYSTICK::FEATURE_TYPE::RELPOINTER: return JOYSTICK_FEATURE_TYPE_RELPOINTER; case JOYSTICK::FEATURE_TYPE::ABSPOINTER: return JOYSTICK_FEATURE_TYPE_ABSPOINTER; case JOYSTICK::FEATURE_TYPE::WHEEL: return JOYSTICK_FEATURE_TYPE_WHEEL; case JOYSTICK::FEATURE_TYPE::THROTTLE: return JOYSTICK_FEATURE_TYPE_THROTTLE; case JOYSTICK::FEATURE_TYPE::KEY: return JOYSTICK_FEATURE_TYPE_KEY; default: break; } return JOYSTICK_FEATURE_TYPE_UNKNOWN; } kodi::addon::DriverPrimitive CPeripheralAddonTranslator::Opposite(const kodi::addon::DriverPrimitive& primitive) { return kodi::addon::DriverPrimitive(primitive.DriverIndex(), primitive.Center() * -1, primitive.SemiAxisDirection() * -1, primitive.Range()); }
gpl-2.0
YelaSeamless/mysql
mysys/thr_mutex.c
11
13372
/* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ /* This makes a wrapper for mutex handling to make it easier to debug mutex */ #include <my_global.h> #if defined(TARGET_OS_LINUX) && !defined (__USE_UNIX98) #define __USE_UNIX98 /* To get rw locks under Linux */ #endif #if defined(THREAD) && defined(SAFE_MUTEX) #undef SAFE_MUTEX /* Avoid safe_mutex redefinitions */ #include "mysys_priv.h" #include "my_static.h" #include <m_string.h> #ifndef DO_NOT_REMOVE_THREAD_WRAPPERS /* Remove wrappers */ #undef pthread_mutex_t #undef pthread_mutex_init #undef pthread_mutex_lock #undef pthread_mutex_unlock #undef pthread_mutex_destroy #undef pthread_cond_wait #undef pthread_cond_timedwait #ifdef HAVE_NONPOSIX_PTHREAD_MUTEX_INIT #define pthread_mutex_init(a,b) my_pthread_mutex_init((a),(b)) #endif #endif /* DO_NOT_REMOVE_THREAD_WRAPPERS */ static pthread_mutex_t THR_LOCK_mutex; static ulong safe_mutex_count= 0; /* Number of mutexes created */ #ifdef SAFE_MUTEX_DETECT_DESTROY static struct st_safe_mutex_info_t *safe_mutex_root= NULL; #endif void safe_mutex_global_init(void) { pthread_mutex_init(&THR_LOCK_mutex,MY_MUTEX_INIT_FAST); } int safe_mutex_init(safe_mutex_t *mp, const pthread_mutexattr_t *attr __attribute__((unused)), const char *file, uint line) { bzero((char*) mp,sizeof(*mp)); pthread_mutex_init(&mp->global,MY_MUTEX_INIT_ERRCHK); pthread_mutex_init(&mp->mutex,attr); /* Mark that mutex is initialized */ mp->file= file; mp->line= line; #ifdef SAFE_MUTEX_DETECT_DESTROY /* Monitor the freeing of mutexes. This code depends on single thread init and destroy */ if ((mp->info= (safe_mutex_info_t *) malloc(sizeof(safe_mutex_info_t)))) { struct st_safe_mutex_info_t *info =mp->info; info->init_file= file; info->init_line= line; info->prev= NULL; info->next= NULL; pthread_mutex_lock(&THR_LOCK_mutex); if ((info->next= safe_mutex_root)) safe_mutex_root->prev= info; safe_mutex_root= info; safe_mutex_count++; pthread_mutex_unlock(&THR_LOCK_mutex); } #else thread_safe_increment(safe_mutex_count, &THR_LOCK_mutex); #endif /* SAFE_MUTEX_DETECT_DESTROY */ return 0; } int safe_mutex_lock(safe_mutex_t *mp, my_bool try_lock, const char *file, uint line) { int error; if (!mp->file) { fprintf(stderr, "safe_mutex: Trying to lock unitialized mutex at %s, line %d\n", file, line); fflush(stderr); abort(); } pthread_mutex_lock(&mp->global); if (mp->count > 0) { if (try_lock) { pthread_mutex_unlock(&mp->global); return EBUSY; } else if (pthread_equal(pthread_self(),mp->thread)) { fprintf(stderr, "safe_mutex: Trying to lock mutex at %s, line %d, when the" " mutex was already locked at %s, line %d in thread %s\n", file,line,mp->file, mp->line, my_thread_name()); fflush(stderr); abort(); } } pthread_mutex_unlock(&mp->global); /* If we are imitating trylock(), we need to take special precautions. - We cannot use pthread_mutex_lock() only since another thread can overtake this thread and take the lock before this thread causing pthread_mutex_trylock() to hang. In this case, we should just return EBUSY. Hence, we use pthread_mutex_trylock() to be able to return immediately. - We cannot just use trylock() and continue execution below, since this would generate an error and abort execution if the thread was overtaken and trylock() returned EBUSY . In this case, we instead just return EBUSY, since this is the expected behaviour of trylock(). */ if (try_lock) { error= pthread_mutex_trylock(&mp->mutex); if (error == EBUSY) return error; } else error= pthread_mutex_lock(&mp->mutex); if (error || (error=pthread_mutex_lock(&mp->global))) { fprintf(stderr,"Got error %d when trying to lock mutex at %s, line %d\n", error, file, line); fflush(stderr); abort(); } mp->thread= pthread_self(); if (mp->count++) { fprintf(stderr,"safe_mutex: Error in thread libray: Got mutex at %s, \ line %d more than 1 time\n", file,line); fflush(stderr); abort(); } mp->file= file; mp->line=line; pthread_mutex_unlock(&mp->global); return error; } int safe_mutex_unlock(safe_mutex_t *mp,const char *file, uint line) { int error; pthread_mutex_lock(&mp->global); if (mp->count == 0) { fprintf(stderr,"safe_mutex: Trying to unlock mutex that wasn't locked at %s, line %d\n Last used at %s, line: %d\n", file,line,mp->file ? mp->file : "",mp->line); fflush(stderr); abort(); } if (!pthread_equal(pthread_self(),mp->thread)) { fprintf(stderr,"safe_mutex: Trying to unlock mutex at %s, line %d that was locked by another thread at: %s, line: %d\n", file,line,mp->file,mp->line); fflush(stderr); abort(); } mp->thread= 0; mp->count--; #ifdef __WIN__ pthread_mutex_unlock(&mp->mutex); error=0; #else error=pthread_mutex_unlock(&mp->mutex); if (error) { fprintf(stderr,"safe_mutex: Got error: %d (%d) when trying to unlock mutex at %s, line %d\n", error, errno, file, line); fflush(stderr); abort(); } #endif /* __WIN__ */ pthread_mutex_unlock(&mp->global); return error; } int safe_cond_wait(pthread_cond_t *cond, safe_mutex_t *mp, const char *file, uint line) { int error; pthread_mutex_lock(&mp->global); if (mp->count == 0) { fprintf(stderr,"safe_mutex: Trying to cond_wait on a unlocked mutex at %s, line %d\n",file,line); fflush(stderr); abort(); } if (!pthread_equal(pthread_self(),mp->thread)) { fprintf(stderr,"safe_mutex: Trying to cond_wait on a mutex at %s, line %d that was locked by another thread at: %s, line: %d\n", file,line,mp->file,mp->line); fflush(stderr); abort(); } if (mp->count-- != 1) { fprintf(stderr,"safe_mutex: Count was %d on locked mutex at %s, line %d\n", mp->count+1, file, line); fflush(stderr); abort(); } pthread_mutex_unlock(&mp->global); error=pthread_cond_wait(cond,&mp->mutex); pthread_mutex_lock(&mp->global); if (error) { fprintf(stderr,"safe_mutex: Got error: %d (%d) when doing a safe_mutex_wait at %s, line %d\n", error, errno, file, line); fflush(stderr); abort(); } mp->thread=pthread_self(); if (mp->count++) { fprintf(stderr, "safe_mutex: Count was %d in thread 0x%lx when locking mutex at %s, line %d\n", mp->count-1, my_thread_dbug_id(), file, line); fflush(stderr); abort(); } mp->file= file; mp->line=line; pthread_mutex_unlock(&mp->global); return error; } int safe_cond_timedwait(pthread_cond_t *cond, safe_mutex_t *mp, const struct timespec *abstime, const char *file, uint line) { int error; pthread_mutex_lock(&mp->global); if (mp->count != 1 || !pthread_equal(pthread_self(),mp->thread)) { fprintf(stderr,"safe_mutex: Trying to cond_wait at %s, line %d on a not hold mutex\n",file,line); fflush(stderr); abort(); } mp->count--; /* Mutex will be released */ pthread_mutex_unlock(&mp->global); error=pthread_cond_timedwait(cond,&mp->mutex,abstime); #ifdef EXTRA_DEBUG if (error && (error != EINTR && error != ETIMEDOUT && error != ETIME)) { fprintf(stderr,"safe_mutex: Got error: %d (%d) when doing a safe_mutex_timedwait at %s, line %d\n", error, errno, file, line); } #endif pthread_mutex_lock(&mp->global); mp->thread=pthread_self(); if (mp->count++) { fprintf(stderr, "safe_mutex: Count was %d in thread 0x%lx when locking mutex at %s, line %d (error: %d (%d))\n", mp->count-1, my_thread_dbug_id(), file, line, error, error); fflush(stderr); abort(); } mp->file= file; mp->line=line; pthread_mutex_unlock(&mp->global); return error; } int safe_mutex_destroy(safe_mutex_t *mp, const char *file, uint line) { int error=0; if (!mp->file) { fprintf(stderr, "safe_mutex: Trying to destroy unitialized mutex at %s, line %d\n", file, line); fflush(stderr); abort(); } if (mp->count != 0) { fprintf(stderr,"safe_mutex: Trying to destroy a mutex that was locked at %s, line %d at %s, line %d\n", mp->file,mp->line, file, line); fflush(stderr); abort(); } #ifdef __WIN__ pthread_mutex_destroy(&mp->global); pthread_mutex_destroy(&mp->mutex); #else if (pthread_mutex_destroy(&mp->global)) error=1; if (pthread_mutex_destroy(&mp->mutex)) error=1; #endif mp->file= 0; /* Mark destroyed */ #ifdef SAFE_MUTEX_DETECT_DESTROY if (mp->info) { struct st_safe_mutex_info_t *info= mp->info; pthread_mutex_lock(&THR_LOCK_mutex); if (info->prev) info->prev->next = info->next; else safe_mutex_root = info->next; if (info->next) info->next->prev = info->prev; safe_mutex_count--; pthread_mutex_unlock(&THR_LOCK_mutex); free(info); mp->info= NULL; /* Get crash if double free */ } #else thread_safe_sub(safe_mutex_count, 1, &THR_LOCK_mutex); #endif /* SAFE_MUTEX_DETECT_DESTROY */ return error; } /* Free global resources and check that all mutex has been destroyed SYNOPSIS safe_mutex_end() file Print errors on this file NOTES We can't use DBUG_PRINT() here as we have in my_end() disabled DBUG handling before calling this function. In MySQL one may get one warning for a mutex created in my_thr_init.c This is ok, as this thread may not yet have been exited. */ void safe_mutex_end(FILE *file __attribute__((unused))) { if (!safe_mutex_count) /* safetly */ pthread_mutex_destroy(&THR_LOCK_mutex); #ifdef SAFE_MUTEX_DETECT_DESTROY if (!file) return; if (safe_mutex_count) { fprintf(file, "Warning: Not destroyed mutex: %lu\n", safe_mutex_count); (void) fflush(file); } { struct st_safe_mutex_info_t *ptr; for (ptr= safe_mutex_root ; ptr ; ptr= ptr->next) { fprintf(file, "\tMutex initiated at line %4u in '%s'\n", ptr->init_line, ptr->init_file); (void) fflush(file); } } #endif /* SAFE_MUTEX_DETECT_DESTROY */ } #endif /* THREAD && SAFE_MUTEX */ #if defined(THREAD) && defined(MY_PTHREAD_FASTMUTEX) && !defined(SAFE_MUTEX) #include "mysys_priv.h" #include "my_static.h" #include <m_string.h> #include <m_ctype.h> #include <hash.h> #include <myisampack.h> #include <mysys_err.h> #include <my_sys.h> #undef pthread_mutex_t #undef pthread_mutex_init #undef pthread_mutex_lock #undef pthread_mutex_trylock #undef pthread_mutex_unlock #undef pthread_mutex_destroy #undef pthread_cond_wait #undef pthread_cond_timedwait ulong mutex_delay(ulong delayloops) { ulong i; volatile ulong j; j = 0; for (i = 0; i < delayloops * 50; i++) j += i; return(j); } #define MY_PTHREAD_FASTMUTEX_SPINS 8 #define MY_PTHREAD_FASTMUTEX_DELAY 4 static int cpu_count= 0; int my_pthread_fastmutex_init(my_pthread_fastmutex_t *mp, const pthread_mutexattr_t *attr) { if ((cpu_count > 1) && (attr == MY_MUTEX_INIT_FAST)) mp->spins= MY_PTHREAD_FASTMUTEX_SPINS; else mp->spins= 0; mp->rng_state= 1; return pthread_mutex_init(&mp->mutex, attr); } /** Park-Miller random number generator. A simple linear congruential generator that operates in multiplicative group of integers modulo n. x_{k+1} = (x_k g) mod n Popular pair of parameters: n = 2^32 − 5 = 4294967291 and g = 279470273. The period of the generator is about 2^31. Largest value that can be returned: 2147483646 (RAND_MAX) Reference: S. K. Park and K. W. Miller "Random number generators: good ones are hard to find" Commun. ACM, October 1988, Volume 31, No 10, pages 1192-1201. */ static double park_rng(my_pthread_fastmutex_t *mp) { mp->rng_state= ((my_ulonglong)mp->rng_state * 279470273U) % 4294967291U; return (mp->rng_state / 2147483647.0); } int my_pthread_fastmutex_lock(my_pthread_fastmutex_t *mp) { int res; uint i; uint maxdelay= MY_PTHREAD_FASTMUTEX_DELAY; for (i= 0; i < mp->spins; i++) { res= pthread_mutex_trylock(&mp->mutex); if (res == 0) return 0; if (res != EBUSY) return res; mutex_delay(maxdelay); maxdelay += park_rng(mp) * MY_PTHREAD_FASTMUTEX_DELAY + 1; } return pthread_mutex_lock(&mp->mutex); } void fastmutex_global_init(void) { #ifdef _SC_NPROCESSORS_CONF cpu_count= sysconf(_SC_NPROCESSORS_CONF); #endif } #endif /* defined(THREAD) && defined(MY_PTHREAD_FASTMUTEX) && !defined(SAFE_MUTEX) */
gpl-2.0
robbie-cao/FreeRTOS
FreeRTOS/Demo/CORTEX_A9_Cyclone_V_SoC_DK/Altera_Code/HardwareLibrary/alt_mmu.c
11
42927
/****************************************************************************** * * Copyright 2013 Altera Corporation. All Rights Reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER "AS IS" AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * ******************************************************************************/ #include "alt_mmu.h" #include <string.h> #include <stdio.h> ///// // NOTE: To enable debugging output, delete the next line and uncomment the // line after. #define dprintf(...) // #define dprintf(fmt, ...) printf(fmt, ##__VA_ARGS__) ///// // Generates the bitmask given the MSB and LSB of a register field. // NOTE: This is problematic for the BITMASK(31, 0) case. #define BITMASK(msb, lsb) (((1 << ((msb) - (lsb) + 1)) - 1) << (lsb)) // Calculates the array count statically #define ARRAY_COUNT(array) (sizeof(array) / sizeof(array[0])) // Minimum #define MIN(a, b) ((a) < (b) ? (a) : (b)) // Index into the pagetable given the virtual address. This is bits va[19:12] >> 12. #define ALT_MMU_PAGE_TABLE_INDEX(va) (((uintptr_t)(va) >> 12) & 0xff) ///// // This is the number of 1 MiB sections in the TTB1 table. #define ALT_MMU_TTB1_SECTION_COUNT 4096 ///// // System Control Register #define ALT_CPU_SCTLR_C_SET_MSK (1 << 2) #define ALT_CPU_SCTLR_M_SET_MSK (1 << 0) #define ALT_CPU_CONTEXTIDR_PROCID_SET_MSK (0xffffff << 8) #define ALT_CPU_CONTEXTIDR_ASID_SET_MSK (0x0000ff << 0) // Translation Table Base Register 0 (Process Specific; Changes on context switch) // (31 to 14 - (TTBCR.N)) #define ALT_CPU_TTBR0_TTB0BASEADDR_SET_MSK(ttbcr_n) BITMASK(31, 14 - (ttbcr_n)) #define ALT_CPU_TTBR0_IRGN_0_SET_MSK (1 << 6) #define ALT_CPU_TTBR0_NOS_SET_MSK (1 << 5) #define ALT_CPU_TTBR0_RGN_SET_MSK (3 << 3) #define ALT_CPU_TTBR0_IMP_SET_MSK (1 << 2) #define ALT_CPU_TTBR0_S_SET_MSK (1 << 1) #define ALT_CPU_TTBR0_IRGN_1_SET_MSK (1 << 0) #define ALT_CPU_TTBR0_RGN_NC (0 << 3) // RGN[1:0] = 00 #define ALT_CPU_TTBR0_RGN_WBA (1 << 3) // RGN[1:0] = 01 #define ALT_CPU_TTBR0_RGN_WT (2 << 3) // RGN[1:0] = 10 #define ALT_CPU_TTBR0_RGN_WB (3 << 3) // RGN[1:0] = 11 // NOTE: IRGN bits are reversed. TTBR0[6] is IRGN[0]; TTBR[0] is IRGN[1]. #define ALT_CPU_TTBR0_IRGN_NC (0 << 0 | 0 << 6) // IRGN[1:0] = 00 #define ALT_CPU_TTBR0_IRGN_WBA (0 << 0 | 1 << 6) // IRGN[1:0] = 01 #define ALT_CPU_TTBR0_IRGN_WT (1 << 0 | 0 << 6) // IRGN[1:0] = 10 #define ALT_CPU_TTBR0_IRGN_WB (1 << 0 | 1 << 6) // IRGN[1:0] = 11 // Translation Table Base Register 1 (OS and IO specific; Static) #define ALT_CPU_TTBR1_TTB1BASEADDR_SET_MSK (0x3ffffUL << 14) #define ALT_CPU_TTBR1_IRGN_0_SET_MSK (1 << 6) #define ALT_CPU_TTBR1_NOS_SET_MSK (1 << 5) #define ALT_CPU_TTBR1_RGN_SET_MSK (3 << 3) #define ALT_CPU_TTBR1_IMP_SET_MSK (1 << 2) #define ALT_CPU_TTBR1_S_SET_MSK (1 << 1) #define ALT_CPU_TTBR1_IRGN_1_SET_MSK (1 << 0) // Translation Table Base Control Register #define ALT_CPU_TTBCR_PD1_SET_MSK (1 << 5) #define ALT_CPU_TTBCR_PD0_SET_MSK (1 << 4) #define ALT_CPU_TTBCR_N_SET_MSK (7 << 0) #define ALT_CPU_TTBCR_N_VALUE_GET(value) (((value) << 0) & ALT_CPU_TTBCR_N_SET_MSK) ///// static inline __attribute__((always_inline)) uint32_t sctlr_get_helper(void) { // Read from SCTLR using CP15. // See ARMv7-A,R, section B4.1.30. uint32_t sctlr; #ifdef __ARMCC_VERSION __asm("MRC p15, 0, sctlr, c1, c0, 0"); #else __asm("MRC p15, 0, %0, c1, c0, 0" : "=r" (sctlr)); #endif return sctlr; } static inline __attribute__((always_inline)) void sctlr_set_helper(uint32_t sctlr) { // Write to SCTLR using CP15. // See ARMv7-A,R, section B4.1.30. #ifdef __ARMCC_VERSION __asm("MCR p15, 0, sctlr, c1, c0, 0"); #else __asm("MCR p15, 0, %0, c1, c0, 0" : : "r" (sctlr)); #endif } /* __attribute__((always_inline)) uint32_t contextidr_get_helper(void) { // Read from CONTEXTIDR using CP15. // See ARMv7-A,R, section B4.1.36. uint32_t contextidr; #ifdef __ARMCC_VERSION __asm("MRC p15, 0, contextidr, c13, c0, 1"); #else __asm("MRC p15, 0, %0, c13, c0, 1" : "=r" (contextidr)); #endif return contextidr; } */ static inline __attribute__((always_inline)) void contextidr_set_helper(uint32_t contextidr) { // Write to CONTEXTIDR using CP15. // See ARMv7-A,R, section B4.1.36. #ifdef __ARMCC_VERSION __asm("MCR p15, 0, contextidr, c13, c0, 1"); #else __asm("MCR p15, 0, %0, c13, c0, 1" : : "r" (contextidr)); #endif } /* __attribute__((always_inline)) uint32_t dacr_get_helper(void) { // Read from DACR using CP15. // See ARMv7-A,R, section B4.1.43. uint32_t dacr; #ifdef __ARMCC_VERSION __asm("MRC p15, 0, dacr, c3, c0, 0"); #else __asm("MRC p15, 0, %0, c3, c0, 0" : "=r" (dacr)); #endif return dacr; } */ static inline __attribute__((always_inline)) void dacr_set_helper(uint32_t dacr) { // Write to DACR using CP15. // See ARMv7-A,R, section B4.1.43. #ifdef __ARMCC_VERSION __asm("MCR p15, 0, dacr, c3, c0, 0"); #else __asm("MCR p15, 0, %0, c3, c0, 0" : : "r" (dacr)); #endif } static inline __attribute__((always_inline)) uint32_t ttbcr_get_helper(void) { // Read from TTBCR using CP15. // See ARMv7-A,R, section B4.1.153. uint32_t ttbcr; #ifdef __ARMCC_VERSION __asm("MRC p15, 0, ttbcr, c2, c0, 2"); #else __asm("MRC p15, 0, %0 , c2, c0, 2" : "=r" (ttbcr)); #endif return ttbcr; } static inline __attribute__((always_inline)) void ttbcr_set_helper(uint32_t ttbcr) { // Write to TTBCR using CP15. // See ARMv7-A,R, section B4.1.153. #ifdef __ARMCC_VERSION __asm("MCR p15, 0, ttbcr, c2, c0, 2"); #else __asm("MCR p15, 0, %0, c2, c0, 2" : : "r" (ttbcr)); #endif } static inline __attribute__((always_inline)) uint32_t ttbr0_get_helper(void) { // Read the TTBR0 using CP15. // See ARMv7-A,R, section B4.1.154. uint32_t ttbr0; #ifdef __ARMCC_VERSION __asm("MRC p15, 0, ttbr0, c2, c0, 0"); #else __asm("MRC p15, 0, %0, c2, c0, 0" : "=r" (ttbr0)); #endif return ttbr0; } static inline __attribute__((always_inline)) void ttbr0_set_helper(uint32_t ttbr0) { // Write to TTBR0 using CP15. // See ARMv7-A,R, section B4.1.154. #ifdef __ARMCC_VERSION __asm("MCR p15, 0, ttbr0, c2, c0, 0"); #else __asm("MCR p15, 0, %0, c2, c0, 0" : : "r" (ttbr0)); #endif } static inline __attribute__((always_inline)) uint32_t ttbr1_get_helper(void) { // Read the TTBR1 using CP15. // See ARMv7-A,R, section B4.1.155. uint32_t ttbr1; #ifdef __ARMCC_VERSION __asm("MRC p15, 0, ttbr1, c2, c0, 1"); #else __asm("MRC p15, 0, %0, c2, c0, 1" : "=r" (ttbr1)); #endif return ttbr1; } static inline __attribute__((always_inline)) void ttbr1_set_helper(uint32_t ttbr1) { // Write to TTBR1 using CP15. // See ARMv7-A,R, section B4.1.155. #ifdef __ARMCC_VERSION __asm("MCR p15, 0, ttbr1, c2, c0, 1"); #else __asm("MCR p15, 0, %0, c2, c0, 1" : : "r" (ttbr1)); #endif } ///// ALT_STATUS_CODE alt_mmu_init(void) { return ALT_E_SUCCESS; } ALT_STATUS_CODE alt_mmu_uninit(void) { return ALT_E_SUCCESS; } ALT_STATUS_CODE alt_mmu_ttb1_init(uint32_t* ttb1) { uint32_t ttbcr = ttbcr_get_helper(); uint32_t ttbcr_n = ALT_CPU_TTBCR_N_VALUE_GET(ttbcr); // Verify ttb1 buffer alignment. if ((uintptr_t)ttb1 & ~ALT_CPU_TTBR0_TTB0BASEADDR_SET_MSK(ttbcr_n)) { // addr must align to 2^(14 - TTBCR.N) bytes. return ALT_E_BAD_ARG; } // The TTB1 size really depends on TTBCR.N value and if it will be used for // TTBR0 or TTBR1. The documentation just states that it should be 16 KiB. // See ARMv7-A,R, section B3.5.4. memset(ttb1, 0, ALT_MMU_TTB1_SIZE); return ALT_E_SUCCESS; } ALT_STATUS_CODE alt_mmu_ttb1_desc_set(uint32_t* ttb1, const void* va, const uint32_t desc) { bool supersection = 0; // Validate the [va] parameter alignment based on the entry [desc] is describing. // - Fault, Page Table, or section: 1 MiB. // - Supersection: 16 MiB // - Other: error. switch (ALT_MMU_TTB1_TYPE_GET(desc)) { case ALT_MMU_TTB1_TYPE_SET(0x2): // Section or Supersection sans Physical Address Extension // Check bit 18, which determines if it is a regular or super variant if (desc & (1 << 18)) { // Mark that we are describing a supersection. supersection = true; // Supersection: Check for 16 MiB alignment if ((uintptr_t)va & (ALT_MMU_SUPERSECTION_SIZE - 1)) { return ALT_E_BAD_ARG; } break; } else { // Section, fall through. } case ALT_MMU_TTB1_TYPE_SET(0x0): // Fault case ALT_MMU_TTB1_TYPE_SET(0x1): // Page Table // Section, Fault, or Page Table: check for 1 MiB alignment if ((uintptr_t)va & (ALT_MMU_SECTION_SIZE - 1)) { return ALT_E_BAD_ARG; } break; case ALT_MMU_TTB1_TYPE_SET(0x3): // Supersection with Physical Address Extension // The SoCFPGA does not support PAE. return ALT_E_BAD_ARG; } // The [va] looks good! Add entry into the TTB1. // TTB1 is indexed by va[31-N:20]. This function assumes N = 0. uint32_t index = (uintptr_t)va >> 20; if (supersection == false) { ttb1[index] = desc; } else { // Supersection needs the entry to be repeated 16x. for (int i = 0; i < 16; ++i) { ttb1[index + i] = desc; } } return ALT_E_SUCCESS; } ALT_STATUS_CODE alt_mmu_ttb2_desc_set(const uint32_t* ttb1, const void* va, const uint32_t desc) { bool largepage = false; // Validate the [va] parameter alignment based on the entry [desc] is describing. // - Fault, Small Page: 4 KiB // - Large Page: 64 KiB switch (ALT_MMU_TTB2_TYPE_GET(desc)) { case ALT_MMU_TTB2_TYPE_SET(0x0): // Fault case ALT_MMU_TTB2_TYPE_SET(0x2): // Small Page, XN = 0 case ALT_MMU_TTB2_TYPE_SET(0x3): // Small Page, XN = 1 if ((uintptr_t)va & (ALT_MMU_SMALL_PAGE_SIZE - 1)) { return ALT_E_BAD_ARG; } break; case ALT_MMU_TTB2_TYPE_SET(0x1): // Large Page if ((uintptr_t)va & (ALT_MMU_LARGE_PAGE_SIZE - 1)) { return ALT_E_BAD_ARG; } largepage = true; break; } // The [va] looks good! Add entry into TTB1->TTB2. // Locate the TTB1 entry uint32_t ttb1_desc = ttb1[(uintptr_t)va >> 20]; // Verify that [ttb1_desc] is a pagetable. if (ALT_MMU_TTB1_TYPE_GET(ttb1_desc) != ALT_MMU_TTB1_TYPE_SET(0x1)) { return ALT_E_BAD_ARG; } // Locate TTB2 given [ttb1_desc] uint32_t * ttb2 = (uint32_t *)(ttb1_desc & ALT_MMU_TTB1_PAGE_TBL_BASE_ADDR_MASK); // TTB2 is indexed by va[19:12]. uint32_t index = ALT_MMU_PAGE_TABLE_INDEX(va); if (largepage == false) { ttb2[index] = desc; } else { // Large page needs the entry to be repeated 16x. for (int i = 0; i < 16; ++i) { ttb2[index + i] = desc; } } return ALT_E_SUCCESS; } ALT_STATUS_CODE alt_mmu_disable(void) { uint32_t sctlr = sctlr_get_helper(); if (sctlr & ALT_CPU_SCTLR_C_SET_MSK) { dprintf("WARN[MMU]: Data cache still active.\n"); } sctlr &= ~ALT_CPU_SCTLR_M_SET_MSK; sctlr_set_helper(sctlr); return ALT_E_SUCCESS; } ALT_STATUS_CODE alt_mmu_enable(void) { alt_mmu_tlb_invalidate(); uint32_t sctlr = sctlr_get_helper(); sctlr |= ALT_CPU_SCTLR_M_SET_MSK; sctlr_set_helper(sctlr); return ALT_E_SUCCESS; } void * alt_mmu_TTBR0_get(void) { uint32_t ttbcr = ttbcr_get_helper(); uint32_t ttbcr_n = ALT_CPU_TTBCR_N_VALUE_GET(ttbcr); uint32_t ttbr0 = ttbr0_get_helper(); return (void *)(ALT_CPU_TTBR0_TTB0BASEADDR_SET_MSK(ttbcr_n) & ttbr0); } ALT_STATUS_CODE alt_mmu_TTBR0_set(const void* addr) { uint32_t ttbcr = ttbcr_get_helper(); uint32_t ttbcr_n = ALT_CPU_TTBCR_N_VALUE_GET(ttbcr); if ((uintptr_t)addr & ~ALT_CPU_TTBR0_TTB0BASEADDR_SET_MSK(ttbcr_n)) { // addr must align to 2^(14 - TTBCR.N) bytes. return ALT_E_BAD_ARG; } // The Translation table must reside in Normal Memory, so pick the most // performant attributes. uint32_t ttbr0 = ALT_CPU_TTBR0_RGN_WBA // Translation table is WBA for outer cacheability | ALT_CPU_TTBR0_IRGN_WBA; // Translation table is WBA for inner cacheability ttbr0 &= ~ALT_CPU_TTBR0_TTB0BASEADDR_SET_MSK(ttbcr_n); ttbr0 |= (uint32_t)addr; ttbr0_set_helper(ttbr0); return ALT_E_SUCCESS; } void * alt_mmu_TTBR1_get(void) { uint32_t ttbr1 = ttbr1_get_helper(); return (void *)(ALT_CPU_TTBR1_TTB1BASEADDR_SET_MSK & ttbr1); } ALT_STATUS_CODE alt_mmu_TTBR1_set(const void* addr) { if ((uintptr_t)addr & ~ALT_CPU_TTBR1_TTB1BASEADDR_SET_MSK) { // addr must align to 16 KiB. return ALT_E_BAD_ARG; } uint32_t ttbr1 = ttbr1_get_helper(); ttbr1 &= ~ALT_CPU_TTBR1_TTB1BASEADDR_SET_MSK; ttbr1 |= (uint32_t)addr; ttbr1_set_helper(ttbr1); return ALT_E_SUCCESS; } ALT_STATUS_CODE alt_mmu_TTBCR_set(const bool enable_ttbr0_walk, const bool enable_ttbr1_walk, const uint32_t base_addr_width) { uint32_t ttbcr = 0; if (!enable_ttbr0_walk) { ttbcr |= ALT_CPU_TTBCR_PD0_SET_MSK; } if (!enable_ttbr1_walk) { ttbcr |= ALT_CPU_TTBCR_PD1_SET_MSK; } if (base_addr_width > 7) { return ALT_E_BAD_ARG; } ttbcr |= base_addr_width; ttbcr_set_helper(ttbcr); return ALT_E_SUCCESS; } ALT_STATUS_CODE alt_mmu_DACR_set(const ALT_MMU_DAP_t domain_ap[], const size_t num_elem) { if (num_elem > 16) { return ALT_E_BAD_ARG; } uint32_t dacr = 0; for (int i = 0; i < num_elem; ++i) { ALT_MMU_DAP_t ap = domain_ap[i]; switch (ap) { case ALT_MMU_DAP_NO_ACCESS: case ALT_MMU_DAP_CLIENT: case ALT_MMU_DAP_MANAGER: dacr |= ap << (i * 2); break; default: case ALT_MMU_DAP_RESERVED: return ALT_E_BAD_ARG; } } dacr_set_helper(dacr); return ALT_E_SUCCESS; } ALT_STATUS_CODE alt_mmu_CONTEXTIDR_set(const uint32_t procid, const uint32_t asid) { if (procid > 0x00ffffff) { return ALT_E_BAD_ARG; } if (asid > 0xff) { return ALT_E_BAD_ARG; } uint32_t contextidr = (procid << 8) | (asid << 0); contextidr_set_helper(contextidr); return ALT_E_SUCCESS; } ALT_STATUS_CODE alt_mmu_tlb_invalidate(void) { // Issue TLBIALL (TLB Invalidate All) // See ARMv7-A,R, section B4.1.135. uint32_t dummy = 0; #ifdef __ARMCC_VERSION __asm("MCR p15, 0, dummy, c8, c3, 0"); #else __asm("MCR p15, 0, %0, c8, c3, 0" : : "r" (dummy)); #endif // Ensure all TLB maintenance operations complete before returning. __asm("dsb"); return ALT_E_SUCCESS; } ALT_STATUS_CODE alt_mmu_tlb_invalidate_is(void) { // Issue TLBIALLIS (TLB Invalidate All, Inner Shareable) // See ARMv7-A,R, section B4.1.138. uint32_t dummy = 0; #ifdef __ARMCC_VERSION __asm("MCR p15, 0, dummy, c8, c7, 0"); #else __asm("MCR p15, 0, %0, c8, c7, 0" : : "r" (dummy)); #endif // Ensure all TLB maintenance operations complete before returning. __asm("dsb"); return ALT_E_SUCCESS; } ///// // The #define value for PAGETABLE is designed to make the security check efficient. #define ALT_VREGION_1MIB (2) /* 2 */ #define ALT_VREGION_PAGETABLE_S ((int)ALT_MMU_TTB_NS_SECURE) /* 0 */ #define ALT_VREGION_PAGETABLE_NS ((int)ALT_MMU_TTB_NS_NON_SECURE) /* 1 */ static ALT_STATUS_CODE alt_vregion_mark_pagetable(char * vregion, ALT_MMU_TTB_NS_t security) { if (*vregion == ALT_VREGION_1MIB) { *vregion = (int)security; } else if (*vregion != (int)security) { return ALT_E_ERROR; } return ALT_E_SUCCESS; } static size_t alt_mmu_va_space_storage_required_internal(const ALT_MMU_MEM_REGION_t* mem_regions, const size_t num_mem_regions, char * vregion) { for (int i = 0; i < ALT_MMU_TTB1_SECTION_COUNT; ++i) { vregion[i] = ALT_VREGION_1MIB; } // For each region entry, mark the TTB1 as either fault, section, pagetable. // The total space required is the space required for the TTB1 (16 KiB) + pagetables * (1 KiB) for (int i = 0; i < num_mem_regions; ++i) { uintptr_t va = (uintptr_t)mem_regions[i].va; uintptr_t pa = (uintptr_t)mem_regions[i].pa; uint32_t size = mem_regions[i].size; ALT_MMU_TTB_NS_t security = mem_regions[i].security; // Verify [va] aligns to 4 KiB if (va & (ALT_MMU_SMALL_PAGE_SIZE - 1)) { return 0; } // Verify [pa] aligns to 4 KiB if (pa & (ALT_MMU_SMALL_PAGE_SIZE - 1)) { return 0; } // Verify [size] aligns to 4 KiB if (size & (ALT_MMU_SMALL_PAGE_SIZE - 1)) { return 0; } // Mark the regions at the start of an unaligned 1 MiB as pagetable. // Align the [va] to 1 MiB and subtract that from the [size] left to describe. if (va & (ALT_MMU_SECTION_SIZE - 1)) { // Pagetables must be either S or NS. If the pagetable was // previously marked as something different, the regions described // will not be implementable. if (alt_vregion_mark_pagetable(&vregion[va >> 20], security) != ALT_E_SUCCESS) { return 0; } uint32_t segment = MIN(ALT_MMU_SECTION_SIZE - (va & (ALT_MMU_SECTION_SIZE - 1)), size); va += segment; pa += segment; size -= segment; } // Skip each 1 MiB aligned segment of size 1 MiB. These regions require // pagetable if the PA is not 1 MiB aligned. // [pa] is not used after this point. if (pa & (ALT_MMU_SECTION_SIZE - 1)) { // PA is not 1 MiB aligned. Everything must use pagetables. while (size >= ALT_MMU_SECTION_SIZE) { // Pagetables must be either S or NS. If the pagetable was // previously marked as something different, the regions described // will not be implementable. if (alt_vregion_mark_pagetable(&vregion[va >> 20], security) != ALT_E_SUCCESS) { return 0; } va += ALT_MMU_SECTION_SIZE; // pa += ALT_MMU_SECTION_SIZE; size -= ALT_MMU_SECTION_SIZE; } } else { // PA is 1 MiB aligned. Sections or supersections can be used. while (size >= ALT_MMU_SECTION_SIZE) { va += ALT_MMU_SECTION_SIZE; // pa += ALT_MMU_SECTION_SIZE; size -= ALT_MMU_SECTION_SIZE; } } // The remainder should be a 1 MiB aligned segment of less than 1 MiB. Mark that region as pagetable. if (size) { // Pagetables must be either S or NS. If the pagetable was // previously marked as something different, the regions described // will not be implementable. if (alt_vregion_mark_pagetable(&vregion[va >> 20], security) != ALT_E_SUCCESS) { return 0; } } } // Calculate the size as 16 KiB (TTB1) + 1 KiB * (TTB2 or the number of pagetables) size_t reqsize = ALT_MMU_TTB1_SIZE; for (int i = 0; i < ALT_MMU_TTB1_SECTION_COUNT; ++i) { if (vregion[i] != ALT_VREGION_1MIB) { reqsize += ALT_MMU_TTB2_SIZE; } } return reqsize; } size_t alt_mmu_va_space_storage_required(const ALT_MMU_MEM_REGION_t* mem_regions, const size_t num_mem_regions) { char vregion[ALT_MMU_TTB1_SECTION_COUNT]; return alt_mmu_va_space_storage_required_internal(mem_regions, num_mem_regions, vregion); } /* static inline uint32_t alt_mmu_va_space_gen_fault(void) { return 0; } */ static inline uint32_t alt_mmu_va_space_gen_pagetable(uintptr_t pagetable, const ALT_MMU_MEM_REGION_t * mem) { if (mem->attributes == ALT_MMU_ATTR_FAULT) { return 0; } return ALT_MMU_TTB1_TYPE_SET(0x1) | ALT_MMU_TTB1_PAGE_TBL_NS_SET(mem->security) | ALT_MMU_TTB1_PAGE_TBL_DOMAIN_SET(0) | ALT_MMU_TTB1_PAGE_TBL_BASE_ADDR_SET(pagetable >> 10); } static inline uint32_t alt_mmu_va_space_gen_section(uintptr_t pa, const ALT_MMU_MEM_REGION_t * mem) { if (mem->attributes == ALT_MMU_ATTR_FAULT) { return 0; } int tex = (mem->attributes >> 4) & 0x7; int c = (mem->attributes >> 1) & 0x1; int b = (mem->attributes >> 0) & 0x1; return ALT_MMU_TTB1_TYPE_SET(0x2) | ALT_MMU_TTB1_SECTION_B_SET(b) | ALT_MMU_TTB1_SECTION_C_SET(c) | ALT_MMU_TTB1_SECTION_XN_SET(mem->execute) | ALT_MMU_TTB1_SECTION_DOMAIN_SET(0) | ALT_MMU_TTB1_SECTION_AP_SET(mem->access) | ALT_MMU_TTB1_SECTION_TEX_SET(tex) | ALT_MMU_TTB1_SECTION_S_SET(mem->shareable) | ALT_MMU_TTB1_SECTION_NG_SET(0) | ALT_MMU_TTB1_SECTION_NS_SET(mem->security) | ALT_MMU_TTB1_SECTION_BASE_ADDR_SET(pa >> 20); } static inline uint32_t alt_mmu_va_space_gen_supersection(uintptr_t pa, const ALT_MMU_MEM_REGION_t * mem) { if (mem->attributes == ALT_MMU_ATTR_FAULT) { return 0; } int tex = (mem->attributes >> 4) & 0x7; int c = (mem->attributes >> 1) & 0x1; int b = (mem->attributes >> 0) & 0x1; return ALT_MMU_TTB1_TYPE_SET(0x2) | (1 << 18) // bit 18 marks section as being super. | ALT_MMU_TTB1_SUPERSECTION_B_SET(b) | ALT_MMU_TTB1_SUPERSECTION_C_SET(c) | ALT_MMU_TTB1_SUPERSECTION_XN_SET(mem->execute) | ALT_MMU_TTB1_SUPERSECTION_DOMAIN_SET(0) | ALT_MMU_TTB1_SUPERSECTION_AP_SET(mem->access) | ALT_MMU_TTB1_SUPERSECTION_TEX_SET(tex) | ALT_MMU_TTB1_SUPERSECTION_S_SET(mem->shareable) | ALT_MMU_TTB1_SUPERSECTION_NG_SET(0) | ALT_MMU_TTB1_SUPERSECTION_NS_SET(mem->security) | ALT_MMU_TTB1_SUPERSECTION_BASE_ADDR_SET(pa >> 24); } static inline uint32_t alt_mmu_va_space_gen_smallpage(uintptr_t pa, const ALT_MMU_MEM_REGION_t * mem) { if (mem->attributes == ALT_MMU_ATTR_FAULT) { return 0; } int tex = (mem->attributes >> 4) & 0x7; int c = (mem->attributes >> 1) & 0x1; int b = (mem->attributes >> 0) & 0x1; // NS bit (mem->security) is ignored as it is set in TTB1. return ALT_MMU_TTB2_TYPE_SET(0x2) | ALT_MMU_TTB2_SMALL_PAGE_XN_SET(mem->execute) | ALT_MMU_TTB2_SMALL_PAGE_B_SET(b) | ALT_MMU_TTB2_SMALL_PAGE_C_SET(c) | ALT_MMU_TTB2_SMALL_PAGE_AP_SET(mem->access) | ALT_MMU_TTB2_SMALL_PAGE_TEX_SET(tex) | ALT_MMU_TTB2_SMALL_PAGE_S_SET(mem->shareable) | ALT_MMU_TTB2_SMALL_PAGE_NG_SET(0) | ALT_MMU_TTB2_SMALL_PAGE_BASE_ADDR_SET(pa >> 12); } static inline uint32_t alt_mmu_va_space_gen_largepage(uintptr_t pa, const ALT_MMU_MEM_REGION_t * mem) { if (mem->attributes == ALT_MMU_ATTR_FAULT) { return 0; } int tex = (mem->attributes >> 4) & 0x7; int c = (mem->attributes >> 1) & 0x1; int b = (mem->attributes >> 0) & 0x1; // NS bit (mem->security) is ignored as it is set in TTB1. return ALT_MMU_TTB2_TYPE_SET(0x1) | ALT_MMU_TTB2_LARGE_PAGE_B_SET(b) | ALT_MMU_TTB2_LARGE_PAGE_C_SET(c) | ALT_MMU_TTB2_LARGE_PAGE_AP_SET(mem->access) | ALT_MMU_TTB2_LARGE_PAGE_S_SET(mem->shareable) | ALT_MMU_TTB2_LARGE_PAGE_NG_SET(0) | ALT_MMU_TTB2_LARGE_PAGE_TEX_SET(tex) | ALT_MMU_TTB2_LARGE_PAGE_XN_SET(mem->execute) | ALT_MMU_TTB2_LARGE_PAGE_BASE_ADDR_SET(pa >> 16); } static ALT_STATUS_CODE alt_mmu_ttb2_init(uint32_t * ttb2) { // For TTB2 (page tables), the page table base address in TTB1 is // bits[31:10]. Thus it must be 2^10 byte aligned or 1 KiB. // Source: ARMv7-A,R, section B3.5.1. if ((uintptr_t)ttb2 & ((1 << 10) - 1)) { return ALT_E_BAD_ARG; } memset(ttb2, 0, ALT_MMU_TTB2_SIZE); return ALT_E_SUCCESS; } ///// ALT_STATUS_CODE alt_mmu_va_space_create(uint32_t** ttb1, const ALT_MMU_MEM_REGION_t* mem_regions, const size_t num_mem_regions, alt_mmu_ttb_alloc_t ttb_alloc, void * ttb_alloc_context) { char vregion[ALT_MMU_TTB1_SECTION_COUNT]; size_t reqsize = alt_mmu_va_space_storage_required_internal(mem_regions, num_mem_regions, vregion); if (reqsize == 0) { return ALT_E_ERROR; } char * memory = ttb_alloc(reqsize, ttb_alloc_context); size_t allocated = 0; // Verify allocation if (memory == NULL) { return ALT_E_ERROR; } // Verify alignment // For TTBR0, the translation table must be aligned to 2^x bytes, where // x = (14 - TTBCR.N). Because VA space sets TTBCR.N = 0, x = 14, and the // table must be aligned to 2^14 or 16 KiB. // Source: ARMv7-A,R, section B4.1.154. // For TTB2 (page tables), the page table base address in TTB1 is // bits[31:10]. Thus it must be 2^10 byte aligned or 1 KiB. // Source: ARMv7-A,R, section B3.5.1. // The stricter of the two alignment is 16 KiB. if ((uintptr_t)memory & ((1 << 14) - 1)) { return ALT_E_BAD_ARG; } // "allocate" space for the TTB1. if (allocated + ALT_MMU_TTB1_SIZE > reqsize) { return ALT_E_ERROR; } *ttb1 = (uint32_t *)memory; allocated += ALT_MMU_TTB1_SIZE; if (alt_mmu_ttb1_init(*ttb1) != ALT_E_SUCCESS) { return ALT_E_ERROR; } // "allocate" space for each pagetable in [vregion] for (int i = 0; i < ALT_MMU_TTB1_SECTION_COUNT; ++i) { if (vregion[i] != ALT_VREGION_1MIB) { if (allocated + ALT_MMU_TTB2_SIZE > reqsize) { return ALT_E_ERROR; } uint32_t * pagetable = (uint32_t *)(memory + allocated); allocated += ALT_MMU_TTB2_SIZE; alt_mmu_ttb2_init(pagetable); ALT_MMU_MEM_REGION_t mem_region; mem_region.attributes = ALT_MMU_ATTR_STRONG; // Any non-FAULT will work. mem_region.security = (ALT_MMU_TTB_NS_t)vregion[i]; uint32_t desc = alt_mmu_va_space_gen_pagetable((uintptr_t)pagetable, &mem_region); (*ttb1)[i] = desc; } } // The allocated size should match the requested size. If not, this means // that the regions descriptor changed between calling // alt_mmu_va_space_storage_required() and alt_mmu_va_space_create(). if (reqsize != allocated) { return ALT_E_ERROR; } // Iterate through all region descriptors for (size_t i = 0; i < num_mem_regions; ++i) { uintptr_t va = (uintptr_t)mem_regions[i].va; uintptr_t pa = (uintptr_t)mem_regions[i].pa; uint32_t size = mem_regions[i].size; // Determine the va/pa relative alignment: 4 KiB, 64 KiB, 1 MiB, 16 MiB uint32_t alignopt[] = { ALT_MMU_SUPERSECTION_SIZE, ALT_MMU_SECTION_SIZE, ALT_MMU_LARGE_PAGE_SIZE }; // Relative alignment of [va] and [pa]. int relalign = ALT_MMU_SMALL_PAGE_SIZE; for (int j = 0; j < ARRAY_COUNT(alignopt); ++j) { if ( (va & (alignopt[j] - 1)) == (pa & (alignopt[j] - 1)) ) { relalign = alignopt[j]; break; } } // Page the 1 MiB unaligned segment of [va]. Areas requiring page tables // should already have those page tables created previously in this // function. if (va & (ALT_MMU_SECTION_SIZE - 1)) { // This is the size of the memory segment after paging which will cause the [va] to align to a 1 MiB, // or up to the size of the region being processed, whichever is smaller. uint32_t segsize = MIN(ALT_MMU_SECTION_SIZE - (va & (ALT_MMU_SECTION_SIZE - 1)), size); if (relalign >= ALT_MMU_LARGE_PAGE_SIZE) { // Because of the 64 KiB relative alignment, try to use large pages. // Use small pages until [va] is 64KiB aligned. while (((va & (ALT_MMU_LARGE_PAGE_SIZE - 1)) != 0) && (segsize >= ALT_MMU_SMALL_PAGE_SIZE)) { uint32_t desc = alt_mmu_va_space_gen_smallpage(pa, &mem_regions[i]); uint32_t * pagetable = (uint32_t *)((*ttb1)[va >> 20] & ALT_MMU_TTB1_PAGE_TBL_BASE_ADDR_MASK); uint32_t ptindex = ALT_MMU_PAGE_TABLE_INDEX(va); // Detect if an existing non-fault region has already been created. // We cannot detect if a fault region is requested and a region description is already a fault, // which it is by default. if (pagetable[ptindex] != 0) { return ALT_E_ERROR; } pagetable[ptindex] = desc; va += ALT_MMU_SMALL_PAGE_SIZE; pa += ALT_MMU_SMALL_PAGE_SIZE; segsize -= ALT_MMU_SMALL_PAGE_SIZE; size -= ALT_MMU_SMALL_PAGE_SIZE; } // Use large pages for the rest of the 64 KiB aligned areas. while (segsize >= ALT_MMU_LARGE_PAGE_SIZE) { uint32_t desc = alt_mmu_va_space_gen_largepage(pa, &mem_regions[i]); uint32_t * pagetable = (uint32_t *)((*ttb1)[va >> 20] & ALT_MMU_TTB1_PAGE_TBL_BASE_ADDR_MASK); uint32_t ptindex = ALT_MMU_PAGE_TABLE_INDEX(va); for (int j = 0; j < 16; ++j) { if (pagetable[ptindex + j] != 0) { return ALT_E_ERROR; } pagetable[ptindex + j] = desc; } va += ALT_MMU_LARGE_PAGE_SIZE; pa += ALT_MMU_LARGE_PAGE_SIZE; segsize -= ALT_MMU_LARGE_PAGE_SIZE; size -= ALT_MMU_LARGE_PAGE_SIZE; } // There is a chance that the segment is so small that it does cause the progress to align to the 1 MiB. // If this is the case, page out the rest of segsize using small pages, and the remaining size to be 0. while (segsize >= ALT_MMU_SMALL_PAGE_SIZE) { uint32_t desc = alt_mmu_va_space_gen_smallpage(pa, &mem_regions[i]); uint32_t * pagetable = (uint32_t *)((*ttb1)[va >> 20] & ALT_MMU_TTB1_PAGE_TBL_BASE_ADDR_MASK); uint32_t ptindex = ALT_MMU_PAGE_TABLE_INDEX(va); if (pagetable[ptindex] != 0) { return ALT_E_ERROR; } pagetable[ptindex] = desc; va += ALT_MMU_SMALL_PAGE_SIZE; pa += ALT_MMU_SMALL_PAGE_SIZE; segsize -= ALT_MMU_SMALL_PAGE_SIZE; size -= ALT_MMU_SMALL_PAGE_SIZE; } } else { // No large pages possible, Use small pages only. while (segsize >= ALT_MMU_SMALL_PAGE_SIZE) { uint32_t desc = alt_mmu_va_space_gen_smallpage(pa, &mem_regions[i]); uint32_t * pagetable = (uint32_t *)((*ttb1)[va >> 20] & ALT_MMU_TTB1_PAGE_TBL_BASE_ADDR_MASK); uint32_t ptindex = ALT_MMU_PAGE_TABLE_INDEX(va); if (pagetable[ptindex] != 0) { return ALT_E_ERROR; } pagetable[ptindex] = desc; va += ALT_MMU_SMALL_PAGE_SIZE; pa += ALT_MMU_SMALL_PAGE_SIZE; segsize -= ALT_MMU_SMALL_PAGE_SIZE; size -= ALT_MMU_SMALL_PAGE_SIZE; } } } // Page each the larger 1 MiB aligned, 1 MiB sized segments. // If [va] and [pa] are relatively 16 MiB aligned and the size remaining // to be described is greater than 16 MiB, use supersections. // If [va] and [pa] are relatively 1 MiB aligned and the size remaining // to be described is greater than 1 MiB, use sections. // Otherwise use pagetables for everything remaining. if ( (relalign >= ALT_MMU_SUPERSECTION_SIZE) && (size >= ALT_MMU_SUPERSECTION_SIZE)) { // Attempt to use supersections. This may not always be possible. // Use regular sections for the areas before supersections that does not align to 16 MiB while (((va & (ALT_MMU_SUPERSECTION_SIZE - 1)) != 0) && (size >= ALT_MMU_SECTION_SIZE)) { uint32_t desc = alt_mmu_va_space_gen_section(pa, &mem_regions[i]); if ((*ttb1)[va >> 20] != 0) { return ALT_E_ERROR; } (*ttb1)[va >> 20] = desc; va += ALT_MMU_SECTION_SIZE; pa += ALT_MMU_SECTION_SIZE; size -= ALT_MMU_SECTION_SIZE; } // Use supersections for the 16 MiB aligned areas while (size >= ALT_MMU_SUPERSECTION_SIZE) { uint32_t desc = alt_mmu_va_space_gen_supersection(pa, &mem_regions[i]); for (int j = 0; j < 16; ++j) { if ((*ttb1)[(va >> 20) + j] != 0) { return ALT_E_ERROR; } (*ttb1)[(va >> 20) + j] = desc; } va += ALT_MMU_SUPERSECTION_SIZE; pa += ALT_MMU_SUPERSECTION_SIZE; size -= ALT_MMU_SUPERSECTION_SIZE; } // Use regular sections for the areas after supersections that does not align to 16 MiB. while (size >= ALT_MMU_SECTION_SIZE) { uint32_t desc = alt_mmu_va_space_gen_section(pa, &mem_regions[i]); if ((*ttb1)[va >> 20] != 0) { return ALT_E_ERROR; } (*ttb1)[va >> 20] = desc; va += ALT_MMU_SECTION_SIZE; pa += ALT_MMU_SECTION_SIZE; size -= ALT_MMU_SECTION_SIZE; } } else if ( (relalign >= ALT_MMU_SECTION_SIZE) && (size >= ALT_MMU_SECTION_SIZE)) { // No supersection possible. Use regular sections only. while (size >= ALT_MMU_SECTION_SIZE) { uint32_t desc = alt_mmu_va_space_gen_section(pa, &mem_regions[i]); if ((*ttb1)[va >> 20] != 0) { return ALT_E_ERROR; } (*ttb1)[va >> 20] = desc; va += ALT_MMU_SECTION_SIZE; pa += ALT_MMU_SECTION_SIZE; size -= ALT_MMU_SECTION_SIZE; } } // The remainder should be [va] 1 MiB aligned segment not able to use // sections or supersections. Mark that region as pagetable. // Use large pages if it is suitable. if ((relalign >= ALT_MMU_LARGE_PAGE_SIZE) && (size >= ALT_MMU_LARGE_PAGE_SIZE)) { while (size >= ALT_MMU_LARGE_PAGE_SIZE) { uint32_t desc = alt_mmu_va_space_gen_largepage(pa, &mem_regions[i]); uint32_t * pagetable = (uint32_t *)((*ttb1)[va >> 20] & ALT_MMU_TTB1_PAGE_TBL_BASE_ADDR_MASK); uint32_t ptindex = ALT_MMU_PAGE_TABLE_INDEX(va); for (int j = 0; j < 16; ++j) { if (pagetable[ptindex + j] != 0) { return ALT_E_ERROR; } pagetable[ptindex + j] = desc; } va += ALT_MMU_LARGE_PAGE_SIZE; pa += ALT_MMU_LARGE_PAGE_SIZE; size -= ALT_MMU_LARGE_PAGE_SIZE; } } while (size >= ALT_MMU_SMALL_PAGE_SIZE) { uint32_t desc = alt_mmu_va_space_gen_smallpage(pa, &mem_regions[i]); uint32_t * pagetable = (uint32_t *)((*ttb1)[va >> 20] & ALT_MMU_TTB1_PAGE_TBL_BASE_ADDR_MASK); uint32_t ptindex = ALT_MMU_PAGE_TABLE_INDEX(va); if (pagetable[ptindex] != 0) { return ALT_E_ERROR; } pagetable[ptindex] = desc; va += ALT_MMU_SMALL_PAGE_SIZE; pa += ALT_MMU_SMALL_PAGE_SIZE; size -= ALT_MMU_SMALL_PAGE_SIZE; } } // for (size_t i = 0; i < num_mem_regions; ++i) return ALT_E_SUCCESS; } ALT_STATUS_CODE alt_mmu_va_space_enable(const uint32_t * ttb1) { ALT_STATUS_CODE status = ALT_E_SUCCESS; // Set TTBCR to use N=0 if (status == ALT_E_SUCCESS) { status = alt_mmu_TTBCR_set(true, true, 0); if (status != ALT_E_SUCCESS) { dprintf("DEBUG[MMU:VA]: Failure on line %d.\n", __LINE__); } } // Set TTBR0 to use ttb1 if (status == ALT_E_SUCCESS) { status = alt_mmu_TTBR0_set(ttb1); if (status != ALT_E_SUCCESS) { dprintf("DEBUG[MMU:VA]: Failure on line %d.\n", __LINE__); } } // Configure DACRs to be client domain. if (status == ALT_E_SUCCESS) { ALT_MMU_DAP_t domain_ap[16]; for (int i = 0; i < 16; ++i) { domain_ap[i] = ALT_MMU_DAP_CLIENT; } status = alt_mmu_DACR_set(domain_ap, 16); if (status != ALT_E_SUCCESS) { dprintf("DEBUG[MMU:VA]: Failure on line %d.\n", __LINE__); } } // Enable MMU (implicitly invalidates TLBs) if (status == ALT_E_SUCCESS) { status = alt_mmu_enable(); if (status != ALT_E_SUCCESS) { dprintf("DEBUG[MMU:VA]: Failure on line %d.\n", __LINE__); } } return status; }
gpl-2.0
kuba160/tf300t-kernel
drivers/video/tegra/dc/hdmi.c
11
75273
/* * drivers/video/tegra/dc/hdmi.c * * Copyright (C) 2010 Google, Inc. * Author: Erik Gilling <konkers@android.com> * * Copyright (c) 2010-2012, NVIDIA CORPORATION, All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/fb.h> #include <linux/gpio.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/spinlock.h> #ifdef CONFIG_SWITCH #include <linux/switch.h> #endif #include <linux/workqueue.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/device.h> #include <mach/clk.h> #include <mach/dc.h> #include <mach/fb.h> #include <linux/nvhost.h> #include <mach/hdmi-audio.h> #include <video/tegrafb.h> #include "dc_reg.h" #include "dc_priv.h" #include "hdmi_reg.h" #include "hdmi.h" #include "edid.h" #include "nvhdcp.h" #include <mach/board-cardhu-misc.h> /* datasheet claims this will always be 216MHz */ #define HDMI_AUDIOCLK_FREQ 216000000 #define HDMI_REKEY_DEFAULT 56 #define HDMI_ELD_RESERVED1_INDEX 1 #define HDMI_ELD_RESERVED2_INDEX 3 #define HDMI_ELD_VER_INDEX 0 #define HDMI_ELD_BASELINE_LEN_INDEX 2 #define HDMI_ELD_CEA_VER_MNL_INDEX 4 #define HDMI_ELD_SAD_CNT_CON_TYP_SAI_HDCP_INDEX 5 #define HDMI_ELD_AUD_SYNC_DELAY_INDEX 6 #define HDMI_ELD_SPK_ALLOC_INDEX 7 #define HDMI_ELD_PORT_ID_INDEX 8 #define HDMI_ELD_MANF_NAME_INDEX 16 #define HDMI_ELD_PRODUCT_CODE_INDEX 18 #define HDMI_ELD_MONITOR_NAME_INDEX 20 /* These two values need to be cross checked in case of addition/removal from tegra_dc_hdmi_aspect_ratios[] */ #define TEGRA_DC_HDMI_MIN_ASPECT_RATIO_PERCENT 80 #define TEGRA_DC_HDMI_MAX_ASPECT_RATIO_PERCENT 320 /* Percentage equivalent of standard aspect ratios accurate upto two decimal digits */ static int tegra_dc_hdmi_aspect_ratios[] = { /* 3:2 */ 150, /* 4:3 */ 133, /* 4:5 */ 80, /* 5:4 */ 125, /* 9:5 */ 180, /* 16:5 */ 320, /* 16:9 */ 178, /* 16:10 */ 160, /* 19:10 */ 190, /* 25:16 */ 156, /* 64:35 */ 183, /* 72:35 */ 206 }; struct tegra_dc_hdmi_data { struct tegra_dc *dc; struct tegra_edid *edid; struct tegra_edid_hdmi_eld eld; struct tegra_nvhdcp *nvhdcp; struct delayed_work work; struct resource *base_res; void __iomem *base; struct clk *clk; struct clk *disp1_clk; struct clk *disp2_clk; struct clk *hda_clk; struct clk *hda2codec_clk; struct clk *hda2hdmi_clk; #ifdef CONFIG_SWITCH struct switch_dev hpd_switch; #endif spinlock_t suspend_lock; bool suspended; bool eld_retrieved; bool clk_enabled; unsigned audio_freq; unsigned audio_source; bool audio_inject_null; bool dvi; }; struct tegra_dc_hdmi_data *dc_hdmi; const struct fb_videomode tegra_dc_hdmi_supported_modes[] = { /* 1280x720p 60hz: EIA/CEA-861-B Format 4 */ { .xres = 1280, .yres = 720, .pixclock = KHZ2PICOS(74250), .hsync_len = 40, /* h_sync_width */ .vsync_len = 5, /* v_sync_width */ .left_margin = 220, /* h_back_porch */ .upper_margin = 20, /* v_back_porch */ .right_margin = 110, /* h_front_porch */ .lower_margin = 5, /* v_front_porch */ .vmode = FB_VMODE_NONINTERLACED, .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, }, /* 1280x720p 60hz: EIA/CEA-861-B Format 4 (Stereo)*/ { .xres = 1280, .yres = 720, .pixclock = KHZ2PICOS(74250), .hsync_len = 40, /* h_sync_width */ .vsync_len = 5, /* v_sync_width */ .left_margin = 220, /* h_back_porch */ .upper_margin = 20, /* v_back_porch */ .right_margin = 110, /* h_front_porch */ .lower_margin = 5, /* v_front_porch */ .vmode = FB_VMODE_NONINTERLACED | #ifndef CONFIG_TEGRA_HDMI_74MHZ_LIMIT FB_VMODE_STEREO_FRAME_PACK, #else FB_VMODE_STEREO_LEFT_RIGHT, #endif .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, }, /* 720x480p 59.94hz: EIA/CEA-861-B Formats 2 & 3 */ { .xres = 720, .yres = 480, .pixclock = KHZ2PICOS(27000), .hsync_len = 62, /* h_sync_width */ .vsync_len = 6, /* v_sync_width */ .left_margin = 60, /* h_back_porch */ .upper_margin = 30, /* v_back_porch */ .right_margin = 16, /* h_front_porch */ .lower_margin = 9, /* v_front_porch */ .vmode = FB_VMODE_NONINTERLACED, .sync = 0, }, /* 640x480p 60hz: EIA/CEA-861-B Format 1 */ { .xres = 640, .yres = 480, .pixclock = KHZ2PICOS(25200), .hsync_len = 96, /* h_sync_width */ .vsync_len = 2, /* v_sync_width */ .left_margin = 48, /* h_back_porch */ .upper_margin = 33, /* v_back_porch */ .right_margin = 16, /* h_front_porch */ .lower_margin = 10, /* v_front_porch */ .vmode = FB_VMODE_NONINTERLACED, .sync = 0, }, /* 720x576p 50hz EIA/CEA-861-B Formats 17 & 18 */ { .xres = 720, .yres = 576, .pixclock = KHZ2PICOS(27000), .hsync_len = 64, /* h_sync_width */ .vsync_len = 5, /* v_sync_width */ .left_margin = 68, /* h_back_porch */ .upper_margin = 39, /* v_back_porch */ .right_margin = 12, /* h_front_porch */ .lower_margin = 5, /* v_front_porch */ .vmode = FB_VMODE_NONINTERLACED, .sync = 0, }, /* 1920x1080p 23.98/24hz: EIA/CEA-861-B Format 32 (Stereo)*/ { .xres = 1920, .yres = 1080, .pixclock = KHZ2PICOS(74250), .hsync_len = 44, /* h_sync_width */ .vsync_len = 5, /* v_sync_width */ .left_margin = 148, /* h_back_porch */ .upper_margin = 36, /* v_back_porch */ .right_margin = 638, /* h_front_porch */ .lower_margin = 4, /* v_front_porch */ .vmode = FB_VMODE_NONINTERLACED | #ifndef CONFIG_TEGRA_HDMI_74MHZ_LIMIT FB_VMODE_STEREO_FRAME_PACK, #else FB_VMODE_STEREO_LEFT_RIGHT, #endif .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, }, /* 1920x1080p 30Hz EIA/CEA-861-B Format 34 */ { .xres = 1920, .yres = 1080, .pixclock = KHZ2PICOS(74250), .hsync_len = 44, /* h_sync_width */ .vsync_len = 5, /* v_sync_width */ .left_margin = 148, /* h_back_porch */ .upper_margin = 36, /* v_back_porch */ .right_margin = 88, /* h_front_porch */ .lower_margin = 4, /* v_front_porch */ .vmode = FB_VMODE_NONINTERLACED, .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, }, /* 1920x1080p 59.94/60hz CVT */ { .xres = 1920, .yres = 1080, .pixclock = KHZ2PICOS(138500), .hsync_len = 32, /* h_sync_width */ .vsync_len = 5, /* v_sync_width */ .left_margin = 80, /* h_back_porch */ .upper_margin = 23, /* v_back_porch */ .right_margin = 48, /* h_front_porch */ .lower_margin = 3, /* v_front_porch */ .vmode = FB_VMODE_NONINTERLACED, .sync = FB_SYNC_VERT_HIGH_ACT, }, /* 1920x1080p 59.94/60hz EIA/CEA-861-B Format 16 */ { .xres = 1920, .yres = 1080, .pixclock = KHZ2PICOS(148500), .hsync_len = 44, /* h_sync_width */ .vsync_len = 5, /* v_sync_width */ .left_margin = 148, /* h_back_porch */ .upper_margin = 36, /* v_back_porch */ .right_margin = 88, /* h_front_porch */ .lower_margin = 4, /* v_front_porch */ .vmode = FB_VMODE_NONINTERLACED, .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, }, /* * Few VGA/SVGA modes to support monitors with lower * resolutions or to support HDMI<->DVI connection */ /* 640x480p 75hz */ { .xres = 640, .yres = 480, .pixclock = KHZ2PICOS(31500), .hsync_len = 96, /* h_sync_width */ .vsync_len = 2, /* v_sync_width */ .left_margin = 48, /* h_back_porch */ .upper_margin = 32, /* v_back_porch */ .right_margin = 16, /* h_front_porch */ .lower_margin = 1, /* v_front_porch */ .vmode = FB_VMODE_NONINTERLACED, .sync = 0, }, /* 720x400p 59hz */ { .xres = 720, .yres = 400, .pixclock = KHZ2PICOS(35500), .hsync_len = 72, /* h_sync_width */ .vsync_len = 3, /* v_sync_width */ .left_margin = 108, /* h_back_porch */ .upper_margin = 42, /* v_back_porch */ .right_margin = 36, /* h_front_porch */ .lower_margin = 1, /* v_front_porch */ .vmode = FB_VMODE_NONINTERLACED, .sync = FB_SYNC_VERT_HIGH_ACT, }, /* 800x600p 60hz */ { .xres = 800, .yres = 600, .pixclock = KHZ2PICOS(40000), .hsync_len = 128, /* h_sync_width */ .vsync_len = 4, /* v_sync_width */ .left_margin = 88, /* h_back_porch */ .upper_margin = 23, /* v_back_porch */ .right_margin = 40, /* h_front_porch */ .lower_margin = 1, /* v_front_porch */ .vmode = FB_VMODE_NONINTERLACED, .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, }, /* 800x600p 75hz */ { .xres = 800, .yres = 600, .pixclock = KHZ2PICOS(49500), .hsync_len = 80, /* h_sync_width */ .vsync_len = 2, /* v_sync_width */ .left_margin = 160, /* h_back_porch */ .upper_margin = 21, /* v_back_porch */ .right_margin = 16, /* h_front_porch */ .lower_margin = 1, /* v_front_porch */ .vmode = FB_VMODE_NONINTERLACED, .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, }, /* 1024x768p 60hz */ { .xres = 1024, .yres = 768, .pixclock = KHZ2PICOS(65000), .hsync_len = 136, /* h_sync_width */ .vsync_len = 6, /* v_sync_width */ .left_margin = 160, /* h_back_porch */ .upper_margin = 29, /* v_back_porch */ .right_margin = 24, /* h_front_porch */ .lower_margin = 3, /* v_front_porch */ .vmode = FB_VMODE_NONINTERLACED, .sync = 0, }, /* 1024x768p 75hz */ { .xres = 1024, .yres = 768, .pixclock = KHZ2PICOS(78800), .hsync_len = 96, /* h_sync_width */ .vsync_len = 3, /* v_sync_width */ .left_margin = 176, /* h_back_porch */ .upper_margin = 28, /* v_back_porch */ .right_margin = 16, /* h_front_porch */ .lower_margin = 1, /* v_front_porch */ .vmode = FB_VMODE_NONINTERLACED, .sync = 0, }, /* 1152x864p 75hz */ { .xres = 1152, .yres = 864, .pixclock = KHZ2PICOS(108000), .hsync_len = 128, /* h_sync_width */ .vsync_len = 3, /* v_sync_width */ .left_margin = 256, /* h_back_porch */ .upper_margin = 32, /* v_back_porch */ .right_margin = 64, /* h_front_porch */ .lower_margin = 1, /* v_front_porch */ .vmode = FB_VMODE_NONINTERLACED, .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, }, /* 1280x800p 60hz */ { .xres = 1280, .yres = 800, .pixclock = KHZ2PICOS(83460), .hsync_len = 136, /* h_sync_width */ .vsync_len = 3, /* v_sync_width */ .left_margin = 200, /* h_back_porch */ .upper_margin = 24, /* v_back_porch */ .right_margin = 64, /* h_front_porch */ .lower_margin = 1, /* v_front_porch */ .vmode = FB_VMODE_NONINTERLACED, .sync = FB_SYNC_VERT_HIGH_ACT, }, /* 1280x960p 60hz */ { .xres = 1280, .yres = 960, .pixclock = KHZ2PICOS(108000), .hsync_len = 136, /* h_sync_width */ .vsync_len = 3, /* v_sync_width */ .left_margin = 216, /* h_back_porch */ .upper_margin = 30, /* v_back_porch */ .right_margin = 80, /* h_front_porch */ .lower_margin = 1, /* v_front_porch */ .vmode = FB_VMODE_NONINTERLACED, .sync = FB_SYNC_VERT_HIGH_ACT, }, /* 1280x1024p 60hz */ { .xres = 1280, .yres = 1024, .pixclock = KHZ2PICOS(108000), .hsync_len = 112, /* h_sync_width */ .vsync_len = 3, /* v_sync_width */ .left_margin = 248, /* h_back_porch */ .upper_margin = 38, /* v_back_porch */ .right_margin = 48, /* h_front_porch */ .lower_margin = 1, /* v_front_porch */ .vmode = FB_VMODE_NONINTERLACED, .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, }, /* 1280x1024p 75hz */ { .xres = 1280, .yres = 1024, .pixclock = KHZ2PICOS(135000), .hsync_len = 144, /* h_sync_width */ .vsync_len = 3, /* v_sync_width */ .left_margin = 248, /* h_back_porch */ .upper_margin = 38, /* v_back_porch */ .right_margin = 16, /* h_front_porch */ .lower_margin = 1, /* v_front_porch */ .vmode = FB_VMODE_NONINTERLACED, .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, }, /* 1368x768p 60hz */ { .xres = 1368, .yres = 768, .pixclock = KHZ2PICOS(85860), .hsync_len = 144, /* h_sync_width */ .vsync_len = 3, /* v_sync_width */ .left_margin = 216, /* h_back_porch */ .upper_margin = 23, /* v_back_porch */ .right_margin = 72, /* h_front_porch */ .lower_margin = 1, /* v_front_porch */ .vmode = FB_VMODE_NONINTERLACED, .sync = FB_SYNC_VERT_HIGH_ACT, }, /* 1440x900p 60hz */ { .xres = 1440, .yres = 900, .pixclock = KHZ2PICOS(106470), .hsync_len = 152, /* h_sync_width */ .vsync_len = 3, /* v_sync_width */ .left_margin = 232, /* h_back_porch */ .upper_margin = 28, /* v_back_porch */ .right_margin = 80, /* h_front_porch */ .lower_margin = 1, /* v_front_porch */ .vmode = FB_VMODE_NONINTERLACED, .sync = FB_SYNC_VERT_HIGH_ACT, }, /* 1600x1200p 60hz */ { .xres = 1600, .yres = 1200, .pixclock = KHZ2PICOS(162000), .hsync_len = 192, /* h_sync_width */ .vsync_len = 3, /* v_sync_width */ .left_margin = 304, /* h_back_porch */ .upper_margin = 46, /* v_back_porch */ .right_margin = 64, /* h_front_porch */ .lower_margin = 1, /* v_front_porch */ .vmode = FB_VMODE_NONINTERLACED, .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, }, /* 1600x1200p 75hz */ { .xres = 1600, .yres = 1200, .pixclock = KHZ2PICOS(202500), .hsync_len = 192, /* h_sync_width */ .vsync_len = 3, /* v_sync_width */ .left_margin = 304, /* h_back_porch */ .upper_margin = 46, /* v_back_porch */ .right_margin = 64, /* h_front_porch */ .lower_margin = 1, /* v_front_porch */ .vmode = FB_VMODE_NONINTERLACED, .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, }, /* 1680x1050p 59.94/60hz */ { .xres = 1680, .yres = 1050, .pixclock = KHZ2PICOS(147140), .hsync_len = 184, /* h_sync_width */ .vsync_len = 3, /* v_sync_width */ .left_margin = 288, /* h_back_porch */ .upper_margin = 33, /* v_back_porch */ .right_margin = 104, /* h_front_porch */ .lower_margin = 1, /* v_front_porch */ .vmode = FB_VMODE_NONINTERLACED, .sync = FB_SYNC_VERT_HIGH_ACT, }, }; /* CVT timing representation of VESA modes*/ const struct fb_videomode tegra_dc_hdmi_supported_cvt_modes[] = { /* 640x480p 60hz */ { .refresh = 60, .xres = 640, .yres = 480, .pixclock = KHZ2PICOS(23750), .hsync_len = 64, /* h_sync_width */ .vsync_len = 4, /* v_sync_width */ .left_margin = 80, /* h_back_porch */ .upper_margin = 17, /* v_back_porch */ .right_margin = 16, /* h_front_porch */ .lower_margin = 3, /* v_front_porch */ .vmode = FB_VMODE_NONINTERLACED, .sync = FB_SYNC_VERT_HIGH_ACT, }, /* 640x480p 75hz */ { .refresh = 75, .xres = 640, .yres = 480, .pixclock = KHZ2PICOS(30750), .hsync_len = 64, /* h_sync_width */ .vsync_len = 4, /* v_sync_width */ .left_margin = 88, /* h_back_porch */ .upper_margin = 21, /* v_back_porch */ .right_margin = 24, /* h_front_porch */ .lower_margin = 3, /* v_front_porch */ .vmode = FB_VMODE_NONINTERLACED, .sync = FB_SYNC_VERT_HIGH_ACT, }, /* 720x400p 59hz */ { .refresh = 59, .xres = 720, .yres = 400, .pixclock = KHZ2PICOS(22000), .hsync_len = 64, /* h_sync_width */ .vsync_len = 10, /* v_sync_width */ .left_margin = 88, /* h_back_porch */ .upper_margin = 14, /* v_back_porch */ .right_margin = 24, /* h_front_porch */ .lower_margin = 3, /* v_front_porch */ .vmode = FB_VMODE_NONINTERLACED, .sync = FB_SYNC_VERT_HIGH_ACT, }, /* 800x600p 60hz */ { .refresh = 60, .xres = 800, .yres = 600, .pixclock = KHZ2PICOS(38250), .hsync_len = 80, /* h_sync_width */ .vsync_len = 4, /* v_sync_width */ .left_margin = 112, /* h_back_porch */ .upper_margin = 21, /* v_back_porch */ .right_margin = 32, /* h_front_porch */ .lower_margin = 3, /* v_front_porch */ .vmode = FB_VMODE_NONINTERLACED, .sync = FB_SYNC_VERT_HIGH_ACT, }, /* 800x600p 75hz */ { .refresh = 75, .xres = 800, .yres = 600, .pixclock = KHZ2PICOS(49000), .hsync_len = 80, /* h_sync_width */ .vsync_len = 4, /* v_sync_width */ .left_margin = 120, /* h_back_porch */ .upper_margin = 26, /* v_back_porch */ .right_margin = 40, /* h_front_porch */ .lower_margin = 3, /* v_front_porch */ .vmode = FB_VMODE_NONINTERLACED, .sync = FB_SYNC_VERT_HIGH_ACT, }, /* 1024x768p 60hz */ { .refresh = 60, .xres = 1024, .yres = 768, .pixclock = KHZ2PICOS(63500), .hsync_len = 104, /* h_sync_width */ .vsync_len = 4, /* v_sync_width */ .left_margin = 152, /* h_back_porch */ .upper_margin = 27, /* v_back_porch */ .right_margin = 48, /* h_front_porch */ .lower_margin = 3, /* v_front_porch */ .vmode = FB_VMODE_NONINTERLACED, .sync = FB_SYNC_VERT_HIGH_ACT, }, /* 1024x768p 75hz */ { .refresh = 75, .xres = 1024, .yres = 768, .pixclock = KHZ2PICOS(82000), .hsync_len = 104, /* h_sync_width */ .vsync_len = 4, /* v_sync_width */ .left_margin = 168, /* h_back_porch */ .upper_margin = 34, /* v_back_porch */ .right_margin = 64, /* h_front_porch */ .lower_margin = 3, /* v_front_porch */ .vmode = FB_VMODE_NONINTERLACED, .sync = FB_SYNC_VERT_HIGH_ACT, }, /* 1152x864p 75hz */ { .refresh = 75, .xres = 1152, .yres = 864, .pixclock = KHZ2PICOS(104500), .hsync_len = 120, /* h_sync_width */ .vsync_len = 10, /* v_sync_width */ .left_margin = 192, /* h_back_porch */ .upper_margin = 38, /* v_back_porch */ .right_margin = 72, /* h_front_porch */ .lower_margin = 3, /* v_front_porch */ .vmode = FB_VMODE_NONINTERLACED, .sync = FB_SYNC_VERT_HIGH_ACT, }, /* 1280x720p 60hz */ { .refresh = 60, .xres = 1280, .yres = 720, .pixclock = KHZ2PICOS(74250), .hsync_len = 40, /* h_sync_width */ .vsync_len = 5, /* v_sync_width */ .left_margin = 220, /* h_back_porch */ .upper_margin = 20, /* v_back_porch */ .right_margin = 110, /* h_front_porch */ .lower_margin = 5, /* v_front_porch */ .vmode = FB_VMODE_NONINTERLACED, .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, }, /* 1280x800p 60hz */ { .refresh = 60, .xres = 1280, .yres = 800, .pixclock = KHZ2PICOS(83500), .hsync_len = 128, /* h_sync_width */ .vsync_len = 6, /* v_sync_width */ .left_margin = 200, /* h_back_porch */ .upper_margin = 28, /* v_back_porch */ .right_margin = 72, /* h_front_porch */ .lower_margin = 3, /* v_front_porch */ .vmode = FB_VMODE_NONINTERLACED, .sync = FB_SYNC_VERT_HIGH_ACT, }, /* 1280x960p 60hz */ { .refresh = 60, .xres = 1280, .yres = 960, .pixclock = KHZ2PICOS(101250), .hsync_len = 128, /* h_sync_width */ .vsync_len = 4, /* v_sync_width */ .left_margin = 208, /* h_back_porch */ .upper_margin = 33, /* v_back_porch */ .right_margin = 80, /* h_front_porch */ .lower_margin = 3, /* v_front_porch */ .vmode = FB_VMODE_NONINTERLACED, .sync = FB_SYNC_VERT_HIGH_ACT, }, /* 1280x1024p 60hz */ { .refresh = 60, .xres = 1280, .yres = 1024, .pixclock = KHZ2PICOS(109000), .hsync_len = 136, /* h_sync_width */ .vsync_len = 7, /* v_sync_width */ .left_margin = 216, /* h_back_porch */ .upper_margin = 36, /* v_back_porch */ .right_margin = 80, /* h_front_porch */ .lower_margin = 3, /* v_front_porch */ .vmode = FB_VMODE_NONINTERLACED, .sync = FB_SYNC_VERT_HIGH_ACT, }, /* 1280x1024p 75hz */ { .refresh = 75, .xres = 1280, .yres = 1024, .pixclock = KHZ2PICOS(138750), .hsync_len = 136, /* h_sync_width */ .vsync_len = 7, /* v_sync_width */ .left_margin = 224, /* h_back_porch */ .upper_margin = 45, /* v_back_porch */ .right_margin = 88, /* h_front_porch */ .lower_margin = 3, /* v_front_porch */ .vmode = FB_VMODE_NONINTERLACED, .sync = FB_SYNC_VERT_HIGH_ACT, }, /* 1368x768p 60hz */ { .refresh = 60, .xres = 1368, .yres = 768, .pixclock = KHZ2PICOS(85250), .hsync_len = 136, /* h_sync_width */ .vsync_len = 10, /* v_sync_width */ .left_margin = 208, /* h_back_porch */ .upper_margin = 27, /* v_back_porch */ .right_margin = 72, /* h_front_porch */ .lower_margin = 3, /* v_front_porch */ .vmode = FB_VMODE_NONINTERLACED, .sync = FB_SYNC_VERT_HIGH_ACT, }, /* 1440x900p 60hz */ { .refresh = 60, .xres = 1440, .yres = 900, .pixclock = KHZ2PICOS(106500), .hsync_len = 152, /* h_sync_width */ .vsync_len = 6, /* v_sync_width */ .left_margin = 232, /* h_back_porch */ .upper_margin = 31, /* v_back_porch */ .right_margin = 80, /* h_front_porch */ .lower_margin = 3, /* v_front_porch */ .vmode = FB_VMODE_NONINTERLACED, .sync = FB_SYNC_VERT_HIGH_ACT, }, /* 1600x1200p 60hz */ { .refresh = 60, .xres = 1600, .yres = 1200, .pixclock = KHZ2PICOS(161000), .hsync_len = 168, /* h_sync_width */ .vsync_len = 4, /* v_sync_width */ .left_margin = 280, /* h_back_porch */ .upper_margin = 42, /* v_back_porch */ .right_margin = 112, /* h_front_porch */ .lower_margin = 3, /* v_front_porch */ .vmode = FB_VMODE_NONINTERLACED, .sync = FB_SYNC_VERT_HIGH_ACT, }, /* 1600x1200p 75hz */ { .refresh = 75, .xres = 1600, .yres = 1200, .pixclock = KHZ2PICOS(204750), .hsync_len = 168, /* h_sync_width */ .vsync_len = 4, /* v_sync_width */ .left_margin = 288, /* h_back_porch */ .upper_margin = 52, /* v_back_porch */ .right_margin = 120, /* h_front_porch */ .lower_margin = 3, /* v_front_porch */ .vmode = FB_VMODE_NONINTERLACED, .sync = FB_SYNC_VERT_HIGH_ACT, }, /* 1680x1050p 59.94/60hz */ { .refresh = 60, .xres = 1680, .yres = 1050, .pixclock = KHZ2PICOS(140000), .hsync_len = 168, /* h_sync_width */ .vsync_len = 10, /* v_sync_width */ .left_margin = 272, /* h_back_porch */ .upper_margin = 36, /* v_back_porch */ .right_margin = 104, /* h_front_porch */ .lower_margin = 3, /* v_front_porch */ .vmode = FB_VMODE_NONINTERLACED, .sync = FB_SYNC_VERT_HIGH_ACT, }, }; /* table of electrical settings, must be in acending order. */ struct tdms_config { int pclk; u32 pll0; u32 pll1; u32 pe_current; /* pre-emphasis */ u32 drive_current; }; #ifndef CONFIG_ARCH_TEGRA_2x_SOC const struct tdms_config tdms_config[] = { { /* 480p modes */ .pclk = 27000000, .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) | SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(0) | SOR_PLL_TX_REG_LOAD(0), .pll1 = SOR_PLL_TMDS_TERM_ENABLE, .pe_current = PE_CURRENT0(PE_CURRENT_0_0_mA) | PE_CURRENT1(PE_CURRENT_0_0_mA) | PE_CURRENT2(PE_CURRENT_0_0_mA) | PE_CURRENT3(PE_CURRENT_0_0_mA), .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_5_250_mA) | DRIVE_CURRENT_LANE1(DRIVE_CURRENT_5_250_mA) | DRIVE_CURRENT_LANE2(DRIVE_CURRENT_5_250_mA) | DRIVE_CURRENT_LANE3(DRIVE_CURRENT_5_250_mA), }, { /* 720p modes */ .pclk = 74250000, .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) | SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(1) | SOR_PLL_TX_REG_LOAD(0), .pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN, .pe_current = PE_CURRENT0(PE_CURRENT_5_0_mA) | PE_CURRENT1(PE_CURRENT_5_0_mA) | PE_CURRENT2(PE_CURRENT_5_0_mA) | PE_CURRENT3(PE_CURRENT_5_0_mA), .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_5_250_mA) | DRIVE_CURRENT_LANE1(DRIVE_CURRENT_5_250_mA) | DRIVE_CURRENT_LANE2(DRIVE_CURRENT_5_250_mA) | DRIVE_CURRENT_LANE3(DRIVE_CURRENT_5_250_mA), }, { /* 1080p modes */ .pclk = INT_MAX, .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) | SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(3) | SOR_PLL_TX_REG_LOAD(0), .pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN, .pe_current = PE_CURRENT0(0xf) | PE_CURRENT1(0xf) | PE_CURRENT2(0xf) | PE_CURRENT3(0xf), .drive_current = DRIVE_CURRENT_LANE0(0x0f) | DRIVE_CURRENT_LANE1(0x0f) | DRIVE_CURRENT_LANE2(0x0f) | DRIVE_CURRENT_LANE3(0x0f), /* .pe_current = PE_CURRENT0(PE_CURRENT_5_0_mA) | PE_CURRENT1(PE_CURRENT_5_0_mA) | PE_CURRENT2(PE_CURRENT_5_0_mA) | PE_CURRENT3(PE_CURRENT_5_0_mA), .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_5_250_mA) | DRIVE_CURRENT_LANE1(DRIVE_CURRENT_5_250_mA) | DRIVE_CURRENT_LANE2(DRIVE_CURRENT_5_250_mA) | DRIVE_CURRENT_LANE3(DRIVE_CURRENT_5_250_mA), */ }, }; #else /* CONFIG_ARCH_TEGRA_2x_SOC */ const struct tdms_config tdms_config[] = { { /* 480p modes */ .pclk = 27000000, .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) | SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(0) | SOR_PLL_TX_REG_LOAD(3), .pll1 = SOR_PLL_TMDS_TERM_ENABLE, .pe_current = PE_CURRENT0(PE_CURRENT_0_0_mA) | PE_CURRENT1(PE_CURRENT_0_0_mA) | PE_CURRENT2(PE_CURRENT_0_0_mA) | PE_CURRENT3(PE_CURRENT_0_0_mA), .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_7_125_mA) | DRIVE_CURRENT_LANE1(DRIVE_CURRENT_7_125_mA) | DRIVE_CURRENT_LANE2(DRIVE_CURRENT_7_125_mA) | DRIVE_CURRENT_LANE3(DRIVE_CURRENT_7_125_mA), }, { /* 720p modes */ .pclk = 74250000, .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) | SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(1) | SOR_PLL_TX_REG_LOAD(3), .pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN, .pe_current = PE_CURRENT0(PE_CURRENT_6_0_mA) | PE_CURRENT1(PE_CURRENT_6_0_mA) | PE_CURRENT2(PE_CURRENT_6_0_mA) | PE_CURRENT3(PE_CURRENT_6_0_mA), .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_7_125_mA) | DRIVE_CURRENT_LANE1(DRIVE_CURRENT_7_125_mA) | DRIVE_CURRENT_LANE2(DRIVE_CURRENT_7_125_mA) | DRIVE_CURRENT_LANE3(DRIVE_CURRENT_7_125_mA), }, { /* 1080p modes */ .pclk = INT_MAX, .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) | SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(1) | SOR_PLL_TX_REG_LOAD(3), .pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN, .pe_current = PE_CURRENT0(PE_CURRENT_6_0_mA) | PE_CURRENT1(PE_CURRENT_6_0_mA) | PE_CURRENT2(PE_CURRENT_6_0_mA) | PE_CURRENT3(PE_CURRENT_6_0_mA), .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_7_125_mA) | DRIVE_CURRENT_LANE1(DRIVE_CURRENT_7_125_mA) | DRIVE_CURRENT_LANE2(DRIVE_CURRENT_7_125_mA) | DRIVE_CURRENT_LANE3(DRIVE_CURRENT_7_125_mA), }, }; #endif struct tegra_hdmi_audio_config { unsigned pix_clock; unsigned n; unsigned cts; unsigned aval; }; const struct tegra_hdmi_audio_config tegra_hdmi_audio_32k[] = { {25200000, 4096, 25200, 24000}, {27000000, 4096, 27000, 24000}, {74250000, 4096, 74250, 24000}, {148500000, 4096, 148500, 24000}, {0, 0, 0}, }; const struct tegra_hdmi_audio_config tegra_hdmi_audio_44_1k[] = { {25200000, 5880, 26250, 25000}, {27000000, 5880, 28125, 25000}, {74250000, 4704, 61875, 20000}, {148500000, 4704, 123750, 20000}, {0, 0, 0}, }; const struct tegra_hdmi_audio_config tegra_hdmi_audio_48k[] = { {25200000, 6144, 25200, 24000}, {27000000, 6144, 27000, 24000}, {74250000, 6144, 74250, 24000}, {148500000, 6144, 148500, 24000}, {0, 0, 0}, }; const struct tegra_hdmi_audio_config tegra_hdmi_audio_88_2k[] = { {25200000, 11760, 26250, 25000}, {27000000, 11760, 28125, 25000}, {74250000, 9408, 61875, 20000}, {148500000, 9408, 123750, 20000}, {0, 0, 0}, }; const struct tegra_hdmi_audio_config tegra_hdmi_audio_96k[] = { {25200000, 12288, 25200, 24000}, {27000000, 12288, 27000, 24000}, {74250000, 12288, 74250, 24000}, {148500000, 12288, 148500, 24000}, {0, 0, 0}, }; const struct tegra_hdmi_audio_config tegra_hdmi_audio_176_4k[] = { {25200000, 23520, 26250, 25000}, {27000000, 23520, 28125, 25000}, {74250000, 18816, 61875, 20000}, {148500000, 18816, 123750, 20000}, {0, 0, 0}, }; const struct tegra_hdmi_audio_config tegra_hdmi_audio_192k[] = { {25200000, 24576, 25200, 24000}, {27000000, 24576, 27000, 24000}, {74250000, 24576, 74250, 24000}, {148500000, 24576, 148500, 24000}, {0, 0, 0}, }; static const struct tegra_hdmi_audio_config *tegra_hdmi_get_audio_config(unsigned audio_freq, unsigned pix_clock) { const struct tegra_hdmi_audio_config *table; switch (audio_freq) { case AUDIO_FREQ_32K: table = tegra_hdmi_audio_32k; break; case AUDIO_FREQ_44_1K: table = tegra_hdmi_audio_44_1k; break; case AUDIO_FREQ_48K: table = tegra_hdmi_audio_48k; break; case AUDIO_FREQ_88_2K: table = tegra_hdmi_audio_88_2k; break; case AUDIO_FREQ_96K: table = tegra_hdmi_audio_96k; break; case AUDIO_FREQ_176_4K: table = tegra_hdmi_audio_176_4k; break; case AUDIO_FREQ_192K: table = tegra_hdmi_audio_192k; break; default: return NULL; } while (table->pix_clock) { if (table->pix_clock == pix_clock) return table; table++; } return NULL; } unsigned long tegra_hdmi_readl(struct tegra_dc_hdmi_data *hdmi, unsigned long reg) { unsigned long ret; ret = readl(hdmi->base + reg * 4); trace_printk("readl %p=%#08lx\n", hdmi->base + reg * 4, ret); return ret; } void tegra_hdmi_writel(struct tegra_dc_hdmi_data *hdmi, unsigned long val, unsigned long reg) { trace_printk("writel %p=%#08lx\n", hdmi->base + reg * 4, val); writel(val, hdmi->base + reg * 4); } static inline void tegra_hdmi_clrsetbits(struct tegra_dc_hdmi_data *hdmi, unsigned long reg, unsigned long clr, unsigned long set) { unsigned long val = tegra_hdmi_readl(hdmi, reg); val &= ~clr; val |= set; tegra_hdmi_writel(hdmi, val, reg); } #ifdef CONFIG_DEBUG_FS static int dbg_hdmi_show(struct seq_file *s, void *unused) { struct tegra_dc_hdmi_data *hdmi = s->private; #define DUMP_REG(a) do { \ seq_printf(s, "%-32s\t%03x\t%08lx\n", \ #a, a, tegra_hdmi_readl(hdmi, a)); \ } while (0) tegra_dc_io_start(hdmi->dc); clk_enable(hdmi->clk); DUMP_REG(HDMI_CTXSW); DUMP_REG(HDMI_NV_PDISP_SOR_STATE0); DUMP_REG(HDMI_NV_PDISP_SOR_STATE1); DUMP_REG(HDMI_NV_PDISP_SOR_STATE2); DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AN_MSB); DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AN_LSB); DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CN_MSB); DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CN_LSB); DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AKSV_MSB); DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AKSV_LSB); DUMP_REG(HDMI_NV_PDISP_RG_HDCP_BKSV_MSB); DUMP_REG(HDMI_NV_PDISP_RG_HDCP_BKSV_LSB); DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CKSV_MSB); DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CKSV_LSB); DUMP_REG(HDMI_NV_PDISP_RG_HDCP_DKSV_MSB); DUMP_REG(HDMI_NV_PDISP_RG_HDCP_DKSV_LSB); DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CTRL); DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CMODE); DUMP_REG(HDMI_NV_PDISP_RG_HDCP_MPRIME_MSB); DUMP_REG(HDMI_NV_PDISP_RG_HDCP_MPRIME_LSB); DUMP_REG(HDMI_NV_PDISP_RG_HDCP_SPRIME_MSB); DUMP_REG(HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB2); DUMP_REG(HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB1); DUMP_REG(HDMI_NV_PDISP_RG_HDCP_RI); DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CS_MSB); DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CS_LSB); DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU0); DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU_RDATA0); DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU1); DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU2); DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL); DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_STATUS); DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER); DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_LOW); DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_HIGH); DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL); DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_STATUS); DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER); DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_LOW); DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH); DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_LOW); DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH); DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_CTRL); DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_STATUS); DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_HEADER); DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_LOW); DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_HIGH); DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_LOW); DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_HIGH); DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_LOW); DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_HIGH); DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_LOW); DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_HIGH); DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_CTRL); DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_LOW); DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_HIGH); DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW); DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH); DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_LOW); DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_HIGH); DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_LOW); DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_HIGH); DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_LOW); DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_HIGH); DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_LOW); DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_HIGH); DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_LOW); DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_HIGH); DUMP_REG(HDMI_NV_PDISP_HDMI_CTRL); DUMP_REG(HDMI_NV_PDISP_HDMI_VSYNC_KEEPOUT); DUMP_REG(HDMI_NV_PDISP_HDMI_VSYNC_WINDOW); DUMP_REG(HDMI_NV_PDISP_HDMI_GCP_CTRL); DUMP_REG(HDMI_NV_PDISP_HDMI_GCP_STATUS); DUMP_REG(HDMI_NV_PDISP_HDMI_GCP_SUBPACK); DUMP_REG(HDMI_NV_PDISP_HDMI_CHANNEL_STATUS1); DUMP_REG(HDMI_NV_PDISP_HDMI_CHANNEL_STATUS2); DUMP_REG(HDMI_NV_PDISP_HDMI_EMU0); DUMP_REG(HDMI_NV_PDISP_HDMI_EMU1); DUMP_REG(HDMI_NV_PDISP_HDMI_EMU1_RDATA); DUMP_REG(HDMI_NV_PDISP_HDMI_SPARE); DUMP_REG(HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS1); DUMP_REG(HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS2); DUMP_REG(HDMI_NV_PDISP_HDCPRIF_ROM_CTRL); DUMP_REG(HDMI_NV_PDISP_SOR_CAP); DUMP_REG(HDMI_NV_PDISP_SOR_PWR); DUMP_REG(HDMI_NV_PDISP_SOR_TEST); DUMP_REG(HDMI_NV_PDISP_SOR_PLL0); DUMP_REG(HDMI_NV_PDISP_SOR_PLL1); DUMP_REG(HDMI_NV_PDISP_SOR_PLL2); DUMP_REG(HDMI_NV_PDISP_SOR_CSTM); DUMP_REG(HDMI_NV_PDISP_SOR_LVDS); DUMP_REG(HDMI_NV_PDISP_SOR_CRCA); DUMP_REG(HDMI_NV_PDISP_SOR_CRCB); DUMP_REG(HDMI_NV_PDISP_SOR_BLANK); DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_CTL); DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST0); DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST1); DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST2); DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST3); DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST4); DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST5); DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST6); DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST7); DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST8); DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST9); DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INSTA); DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INSTB); DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INSTC); DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INSTD); DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INSTE); DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INSTF); DUMP_REG(HDMI_NV_PDISP_SOR_VCRCA0); DUMP_REG(HDMI_NV_PDISP_SOR_VCRCA1); DUMP_REG(HDMI_NV_PDISP_SOR_CCRCA0); DUMP_REG(HDMI_NV_PDISP_SOR_CCRCA1); DUMP_REG(HDMI_NV_PDISP_SOR_EDATAA0); DUMP_REG(HDMI_NV_PDISP_SOR_EDATAA1); DUMP_REG(HDMI_NV_PDISP_SOR_COUNTA0); DUMP_REG(HDMI_NV_PDISP_SOR_COUNTA1); DUMP_REG(HDMI_NV_PDISP_SOR_DEBUGA0); DUMP_REG(HDMI_NV_PDISP_SOR_DEBUGA1); DUMP_REG(HDMI_NV_PDISP_SOR_TRIG); DUMP_REG(HDMI_NV_PDISP_SOR_MSCHECK); DUMP_REG(HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT); DUMP_REG(HDMI_NV_PDISP_AUDIO_DEBUG0); DUMP_REG(HDMI_NV_PDISP_AUDIO_DEBUG1); DUMP_REG(HDMI_NV_PDISP_AUDIO_DEBUG2); DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(0)); DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(1)); DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(2)); DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(3)); DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(4)); DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(5)); DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(6)); DUMP_REG(HDMI_NV_PDISP_AUDIO_PULSE_WIDTH); DUMP_REG(HDMI_NV_PDISP_AUDIO_THRESHOLD); DUMP_REG(HDMI_NV_PDISP_AUDIO_CNTRL0); DUMP_REG(HDMI_NV_PDISP_AUDIO_N); DUMP_REG(HDMI_NV_PDISP_HDCPRIF_ROM_TIMING); DUMP_REG(HDMI_NV_PDISP_SOR_REFCLK); DUMP_REG(HDMI_NV_PDISP_CRC_CONTROL); DUMP_REG(HDMI_NV_PDISP_INPUT_CONTROL); DUMP_REG(HDMI_NV_PDISP_SCRATCH); DUMP_REG(HDMI_NV_PDISP_PE_CURRENT); DUMP_REG(HDMI_NV_PDISP_KEY_CTRL); DUMP_REG(HDMI_NV_PDISP_KEY_DEBUG0); DUMP_REG(HDMI_NV_PDISP_KEY_DEBUG1); DUMP_REG(HDMI_NV_PDISP_KEY_DEBUG2); DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_0); DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_1); DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_2); DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_3); DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_TRIG); DUMP_REG(HDMI_NV_PDISP_KEY_SKEY_INDEX); #undef DUMP_REG clk_disable(hdmi->clk); tegra_dc_io_end(hdmi->dc); return 0; } static int dbg_hdmi_open(struct inode *inode, struct file *file) { return single_open(file, dbg_hdmi_show, inode->i_private); } static const struct file_operations dbg_fops = { .open = dbg_hdmi_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static struct dentry *hdmidir; static void tegra_dc_hdmi_debug_create(struct tegra_dc_hdmi_data *hdmi) { struct dentry *retval; hdmidir = debugfs_create_dir("tegra_hdmi", NULL); if (!hdmidir) return; retval = debugfs_create_file("regs", S_IRUGO, hdmidir, hdmi, &dbg_fops); if (!retval) goto free_out; return; free_out: debugfs_remove_recursive(hdmidir); hdmidir = NULL; return; } #else static inline void tegra_dc_hdmi_debug_create(struct tegra_dc_hdmi_data *hdmi) { } #endif #define PIXCLOCK_TOLERANCE 200 static int tegra_dc_calc_clock_per_frame(const struct fb_videomode *mode) { return (mode->left_margin + mode->xres + mode->right_margin + mode->hsync_len) * (mode->upper_margin + mode->yres + mode->lower_margin + mode->vsync_len); } static bool tegra_dc_hdmi_mode_equal(const struct fb_videomode *mode1, const struct fb_videomode *mode2) { int clock_per_frame = tegra_dc_calc_clock_per_frame(mode1); /* allows up to 1Hz of pixclock difference */ if (mode1->pixclock != mode2->pixclock) { return (mode1->xres == mode2->xres && mode1->yres == mode2->yres && mode1->vmode == mode2->vmode && (abs(PICOS2KHZ(mode1->pixclock) - PICOS2KHZ(mode2->pixclock)) * 1000 / clock_per_frame <= 1)); } else { return (mode1->xres == mode2->xres && mode1->yres == mode2->yres && mode1->vmode == mode2->vmode); } } static bool tegra_dc_hdmi_valid_pixclock(const struct tegra_dc *dc, const struct fb_videomode *mode) { unsigned max_pixclock = tegra_dc_get_out_max_pixclock(dc); if (max_pixclock) { /* this might look counter-intuitive, * but pixclock's unit is picos(not Khz) */ return mode->pixclock >= max_pixclock; } else { return true; } } static bool tegra_dc_cvt_mode_equal(const struct fb_videomode *mode1, const struct fb_videomode *mode2) { return (mode1->xres == mode2->xres && mode1->yres == mode2->yres && mode1->refresh == mode2->refresh && mode1->vmode == mode2->vmode); } static bool tegra_dc_reload_mode(struct fb_videomode *mode) { int i = 0; for (i = 0; i < ARRAY_SIZE(tegra_dc_hdmi_supported_cvt_modes); i++) { const struct fb_videomode *cvt_mode = &tegra_dc_hdmi_supported_cvt_modes[i]; if (tegra_dc_cvt_mode_equal(cvt_mode, mode)) { memcpy(mode, cvt_mode, sizeof(*mode)); return true; } } return false; } static bool tegra_dc_reload_supported_mode(struct fb_videomode *mode) { int i = 0; for (i = 0; i < ARRAY_SIZE(tegra_dc_hdmi_supported_modes); i++) { const struct fb_videomode *supported_mode = &tegra_dc_hdmi_supported_modes[i]; if (tegra_dc_hdmi_mode_equal(supported_mode, mode)) { memcpy(mode, supported_mode, sizeof(*mode)); return true; } } return false; } static bool tegra_dc_hdmi_valid_asp_ratio(const struct tegra_dc *dc, struct fb_videomode *mode) { int count = 0; int m_aspratio = 0; int s_aspratio = 0; /* To check the aspect upto two decimal digits, calculate in % */ m_aspratio = (mode->xres*100 / mode->yres); if ((m_aspratio < TEGRA_DC_HDMI_MIN_ASPECT_RATIO_PERCENT) || (m_aspratio > TEGRA_DC_HDMI_MAX_ASPECT_RATIO_PERCENT)) return false; /* Check from the table of supported aspect ratios, allow difference of 1% for second decimal digit calibration */ for (count = 0; count < ARRAY_SIZE(tegra_dc_hdmi_aspect_ratios); count++) { s_aspratio = tegra_dc_hdmi_aspect_ratios[count]; if ((m_aspratio == s_aspratio) || (abs(m_aspratio - s_aspratio) == 1)) return true; } return false; } static bool tegra_dc_hdmi_mode_filter(const struct tegra_dc *dc, struct fb_videomode *mode) { #if 0 printk("fb_videomode mode, xres=%d, yres=%d, pixclock=%d, vmode=%d\n", mode->xres, mode->yres, mode->pixclock, mode->vmode); printk("left_margin=%d, right_margin=%d, upper_margin=%d, lower_margin=%d\n", mode->left_margin, mode->right_margin, mode->upper_margin, mode->lower_margin); printk("hsync_len=%d, vsync_len=%d, refresh=%d\n", mode->hsync_len, mode->vsync_len, mode->refresh); #endif if (mode->vmode & FB_VMODE_INTERLACED) return false; /* Ignore modes with a 0 pixel clock */ if (!mode->pixclock) return false; #ifdef CONFIG_TEGRA_HDMI_74MHZ_LIMIT if (PICOS2KHZ(mode->pixclock) > 74250) return false; #endif /* Check if the mode's pixel clock is more than the max rate*/ if (!tegra_dc_hdmi_valid_pixclock(dc, mode)) return false; /* Check if the mode's aspect ratio is supported */ if (!tegra_dc_hdmi_valid_asp_ratio(dc, mode)) return false; // since P1801 main display is HDMI, the resolution should fix to 1080p if(tegra3_get_project_id()==TEGRA3_PROJECT_P1801 && (mode->xres < 1920 || mode->yres < 1080)) return false; /* Check some of DC's constraints */ if (mode->hsync_len > 1 && mode->vsync_len > 1 && mode->lower_margin + mode->vsync_len + mode->upper_margin > 1 && mode->xres >= 16 && mode->yres >= 16) { if (mode->lower_margin == 1) { /* This might be the case for HDMI<->DVI * where std VESA representation will not * pass constraint V_FRONT_PORCH >= * V_REF_TO_SYNC + 1.So reload mode in * CVT timing standards. */ if (!tegra_dc_reload_mode(mode)) return false; } else{ if(!tegra_dc_reload_supported_mode(mode)) return false; } mode->flag = FB_MODE_IS_DETAILED; mode->refresh = (PICOS2KHZ(mode->pixclock) * 1000) / tegra_dc_calc_clock_per_frame(mode); return true; } return false; } static bool tegra_dc_hdmi_hpd(struct tegra_dc *dc) { return tegra_dc_hpd(dc); } void tegra_dc_hdmi_detect_config(struct tegra_dc *dc, struct fb_monspecs *specs) { struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc); /* monitors like to lie about these but they are still useful for * detecting aspect ratios */ dc->out->h_size = specs->max_x * 1000; dc->out->v_size = specs->max_y * 1000; hdmi->dvi = !(specs->misc & FB_MISC_HDMI); tegra_fb_update_monspecs(dc->fb, specs, tegra_dc_hdmi_mode_filter); #ifdef CONFIG_SWITCH hdmi->hpd_switch.state = 0; if ( tegra3_get_project_id()==TEGRA3_PROJECT_P1801 ) { switch_set_state(&hdmi->hpd_switch, 0); // do not tell userspace there is an HDMI plugged in } else { if (hdmi->dvi) switch_set_state(&hdmi->hpd_switch, 2); else switch_set_state(&hdmi->hpd_switch, 1); } #endif dev_info(&dc->ndev->dev, "display detected\n"); dc->connected = true; tegra_dc_ext_process_hotplug(dc->ndev->id); } /* This function is used to enable DC1 and HDMI for the purpose of testing. */ bool tegra_dc_hdmi_detect_test(struct tegra_dc *dc, unsigned char *edid_ptr) { int err; struct fb_monspecs specs; struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc); if (!hdmi || !edid_ptr) { dev_err(&dc->ndev->dev, "HDMI test failed to get arguments.\n"); return false; } err = tegra_edid_get_monspecs_test(hdmi->edid, &specs, edid_ptr); if (err < 0) { /* Check if there's a hard-wired mode, if so, enable it */ if (dc->out->n_modes) tegra_dc_enable(dc); else { dev_err(&dc->ndev->dev, "error reading edid\n"); goto fail; } #ifdef CONFIG_SWITCH hdmi->hpd_switch.state = 0; switch_set_state(&hdmi->hpd_switch, 1); #endif dev_info(&dc->ndev->dev, "display detected\n"); dc->connected = true; tegra_dc_ext_process_hotplug(dc->ndev->id); } else { err = tegra_edid_get_eld(hdmi->edid, &hdmi->eld); if (err < 0) { dev_err(&dc->ndev->dev, "error populating eld\n"); goto fail; } hdmi->eld_retrieved = true; tegra_dc_hdmi_detect_config(dc, &specs); } return true; fail: hdmi->eld_retrieved = false; #ifdef CONFIG_SWITCH switch_set_state(&hdmi->hpd_switch, 0); #endif if (hdmi->nvhdcp) { tegra_nvhdcp_set_plug(hdmi->nvhdcp, 0); } return false; } EXPORT_SYMBOL(tegra_dc_hdmi_detect_test); static bool tegra_dc_hdmi_detect(struct tegra_dc *dc) { struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc); struct fb_monspecs specs; int err; if (!tegra_dc_hdmi_hpd(dc)) goto fail; err = tegra_edid_get_monspecs(hdmi->edid, &specs); if (err < 0) { if (dc->out->n_modes) tegra_dc_enable(dc); else { dev_err(&dc->ndev->dev, "error reading edid\n"); goto fail; } #ifdef CONFIG_SWITCH hdmi->hpd_switch.state = 0; switch_set_state(&hdmi->hpd_switch, 1); #endif dev_info(&dc->ndev->dev, "display detected\n"); dc->connected = true; tegra_dc_ext_process_hotplug(dc->ndev->id); } else { err = tegra_edid_get_eld(hdmi->edid, &hdmi->eld); if (err < 0) { dev_err(&dc->ndev->dev, "error populating eld\n"); goto fail; } hdmi->eld_retrieved = true; tegra_dc_hdmi_detect_config(dc, &specs); } return true; fail: hdmi->eld_retrieved = false; #ifdef CONFIG_SWITCH switch_set_state(&hdmi->hpd_switch, 0); #endif if (hdmi->nvhdcp) { tegra_nvhdcp_set_plug(hdmi->nvhdcp, 0); } return false; } static void tegra_dc_hdmi_detect_worker(struct work_struct *work) { struct tegra_dc_hdmi_data *hdmi = container_of(to_delayed_work(work), struct tegra_dc_hdmi_data, work); struct tegra_dc *dc = hdmi->dc; tegra_dc_enable(dc); msleep(5); if (!tegra_dc_hdmi_detect(dc)) { tegra_dc_disable(dc); tegra_fb_update_monspecs(dc->fb, NULL, NULL); dc->connected = false; tegra_dc_ext_process_hotplug(dc->ndev->id); } } static irqreturn_t tegra_dc_hdmi_irq(int irq, void *ptr) { struct tegra_dc *dc = ptr; struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc); unsigned long flags; spin_lock_irqsave(&hdmi->suspend_lock, flags); if (!hdmi->suspended) { __cancel_delayed_work(&hdmi->work); if (tegra_dc_hdmi_hpd(dc)) queue_delayed_work(system_nrt_wq, &hdmi->work, msecs_to_jiffies(100)); else queue_delayed_work(system_nrt_wq, &hdmi->work, msecs_to_jiffies(30)); } spin_unlock_irqrestore(&hdmi->suspend_lock, flags); return IRQ_HANDLED; } static void tegra_dc_hdmi_suspend(struct tegra_dc *dc) { struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc); unsigned long flags; if (hdmi->nvhdcp) { tegra_nvhdcp_suspend(hdmi->nvhdcp); } spin_lock_irqsave(&hdmi->suspend_lock, flags); hdmi->suspended = true; spin_unlock_irqrestore(&hdmi->suspend_lock, flags); } static void tegra_dc_hdmi_resume(struct tegra_dc *dc) { struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc); unsigned long flags; spin_lock_irqsave(&hdmi->suspend_lock, flags); hdmi->suspended = false; if ( tegra3_get_project_id()!=TEGRA3_PROJECT_P1801 ) { if (tegra_dc_hdmi_hpd(dc)) queue_delayed_work(system_nrt_wq, &hdmi->work, msecs_to_jiffies(100)); else queue_delayed_work(system_nrt_wq, &hdmi->work, msecs_to_jiffies(30)); } spin_unlock_irqrestore(&hdmi->suspend_lock, flags); if ( tegra3_get_project_id()!=TEGRA3_PROJECT_P1801 ) { if (hdmi->nvhdcp) { tegra_nvhdcp_resume(hdmi->nvhdcp); } } } #ifdef CONFIG_SWITCH static ssize_t underscan_show(struct device *dev, struct device_attribute *attr, char *buf) { struct tegra_dc_hdmi_data *hdmi = container_of(dev_get_drvdata(dev), struct tegra_dc_hdmi_data, hpd_switch); if (hdmi->edid) return sprintf(buf, "%d\n", tegra_edid_underscan_supported(hdmi->edid)); else return 0; } static DEVICE_ATTR(underscan, S_IRUGO | S_IWUSR, underscan_show, NULL); #endif static int tegra_dc_hdmi_init(struct tegra_dc *dc) { struct tegra_dc_hdmi_data *hdmi; struct resource *res; struct resource *base_res; #ifdef CONFIG_SWITCH int ret; #endif void __iomem *base; struct clk *clk = NULL; struct clk *disp1_clk = NULL; struct clk *disp2_clk = NULL; int err; hdmi = kzalloc(sizeof(*hdmi), GFP_KERNEL); if (!hdmi) return -ENOMEM; res = nvhost_get_resource_byname(dc->ndev, IORESOURCE_MEM, "hdmi_regs"); if (!res) { dev_err(&dc->ndev->dev, "hdmi: no mem resource\n"); err = -ENOENT; goto err_free_hdmi; } base_res = request_mem_region(res->start, resource_size(res), dc->ndev->name); if (!base_res) { dev_err(&dc->ndev->dev, "hdmi: request_mem_region failed\n"); err = -EBUSY; goto err_free_hdmi; } base = ioremap(res->start, resource_size(res)); if (!base) { dev_err(&dc->ndev->dev, "hdmi: registers can't be mapped\n"); err = -EBUSY; goto err_release_resource_reg; } clk = clk_get(&dc->ndev->dev, "hdmi"); if (IS_ERR_OR_NULL(clk)) { dev_err(&dc->ndev->dev, "hdmi: can't get clock\n"); err = -ENOENT; goto err_iounmap_reg; } disp1_clk = clk_get_sys("tegradc.0", NULL); if (IS_ERR_OR_NULL(disp1_clk)) { dev_err(&dc->ndev->dev, "hdmi: can't disp1 clock\n"); err = -ENOENT; goto err_put_clock; } disp2_clk = clk_get_sys("tegradc.1", NULL); if (IS_ERR_OR_NULL(disp2_clk)) { dev_err(&dc->ndev->dev, "hdmi: can't disp2 clock\n"); err = -ENOENT; goto err_put_clock; } #if !defined(CONFIG_ARCH_TEGRA_2x_SOC) hdmi->hda_clk = clk_get_sys("tegra30-hda", "hda"); if (IS_ERR_OR_NULL(hdmi->hda_clk)) { dev_err(&dc->ndev->dev, "hdmi: can't get hda clock\n"); err = -ENOENT; goto err_put_clock; } hdmi->hda2codec_clk = clk_get_sys("tegra30-hda", "hda2codec"); if (IS_ERR_OR_NULL(hdmi->hda2codec_clk)) { dev_err(&dc->ndev->dev, "hdmi: can't get hda2codec clock\n"); err = -ENOENT; goto err_put_clock; } hdmi->hda2hdmi_clk = clk_get_sys("tegra30-hda", "hda2hdmi"); if (IS_ERR_OR_NULL(hdmi->hda2hdmi_clk)) { dev_err(&dc->ndev->dev, "hdmi: can't get hda2hdmi clock\n"); err = -ENOENT; goto err_put_clock; } #endif // remove mode filter for P1801. if (tegra3_get_project_id() == TEGRA3_PROJECT_P1801) { tegra_dc_hdmi_ops.mode_filter = NULL; } /* TODO: support non-hotplug */ if (request_irq(gpio_to_irq(dc->out->hotplug_gpio), tegra_dc_hdmi_irq, IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, dev_name(&dc->ndev->dev), dc)) { dev_err(&dc->ndev->dev, "hdmi: request_irq %d failed\n", gpio_to_irq(dc->out->hotplug_gpio)); err = -EBUSY; goto err_put_clock; } hdmi->edid = tegra_edid_create(dc->out->dcc_bus); if (IS_ERR_OR_NULL(hdmi->edid)) { dev_err(&dc->ndev->dev, "hdmi: can't create edid\n"); err = PTR_ERR(hdmi->edid); goto err_free_irq; } #ifdef CONFIG_TEGRA_NVHDCP if (tegra3_get_project_id() != TEGRA3_PROJECT_P1801) { hdmi->nvhdcp = tegra_nvhdcp_create(hdmi, dc->ndev->id, dc->out->dcc_bus); if (IS_ERR_OR_NULL(hdmi->nvhdcp)) { dev_err(&dc->ndev->dev, "hdmi: can't create nvhdcp\n"); err = PTR_ERR(hdmi->nvhdcp); goto err_edid_destroy; } } else hdmi->nvhdcp = NULL; #else hdmi->nvhdcp = NULL; #endif INIT_DELAYED_WORK(&hdmi->work, tegra_dc_hdmi_detect_worker); hdmi->dc = dc; hdmi->base = base; hdmi->base_res = base_res; hdmi->clk = clk; hdmi->disp1_clk = disp1_clk; hdmi->disp2_clk = disp2_clk; hdmi->suspended = false; hdmi->eld_retrieved= false; hdmi->clk_enabled = false; hdmi->audio_freq = 44100; hdmi->audio_source = AUTO; spin_lock_init(&hdmi->suspend_lock); #ifdef CONFIG_SWITCH hdmi->hpd_switch.name = "hdmi"; ret = switch_dev_register(&hdmi->hpd_switch); if (!ret) ret = device_create_file(hdmi->hpd_switch.dev, &dev_attr_underscan); BUG_ON(ret != 0); #endif dc->out->depth = 24; tegra_dc_set_outdata(dc, hdmi); dc_hdmi = hdmi; if (hdmi->nvhdcp) { /* boards can select default content protection policy */ if (dc->out->flags & TEGRA_DC_OUT_NVHDCP_POLICY_ON_DEMAND) tegra_nvhdcp_set_policy(hdmi->nvhdcp, TEGRA_NVHDCP_POLICY_ON_DEMAND); else tegra_nvhdcp_set_policy(hdmi->nvhdcp, TEGRA_NVHDCP_POLICY_ALWAYS_ON); } tegra_dc_hdmi_debug_create(hdmi); return 0; #ifdef CONFIG_TEGRA_NVHDCP err_edid_destroy: tegra_edid_destroy(hdmi->edid); #endif err_free_irq: free_irq(gpio_to_irq(dc->out->hotplug_gpio), dc); err_put_clock: #if !defined(CONFIG_ARCH_TEGRA_2x_SOC) if (!IS_ERR_OR_NULL(hdmi->hda2hdmi_clk)) clk_put(hdmi->hda2hdmi_clk); if (!IS_ERR_OR_NULL(hdmi->hda2codec_clk)) clk_put(hdmi->hda2codec_clk); if (!IS_ERR_OR_NULL(hdmi->hda_clk)) clk_put(hdmi->hda_clk); #endif if (!IS_ERR_OR_NULL(disp2_clk)) clk_put(disp2_clk); if (!IS_ERR_OR_NULL(disp1_clk)) clk_put(disp1_clk); if (!IS_ERR_OR_NULL(clk)) clk_put(clk); err_iounmap_reg: iounmap(base); err_release_resource_reg: release_resource(base_res); err_free_hdmi: kfree(hdmi); return err; } static void tegra_dc_hdmi_destroy(struct tegra_dc *dc) { struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc); free_irq(gpio_to_irq(dc->out->hotplug_gpio), dc); cancel_delayed_work_sync(&hdmi->work); #ifdef CONFIG_SWITCH switch_dev_unregister(&hdmi->hpd_switch); #endif iounmap(hdmi->base); release_resource(hdmi->base_res); #if !defined(CONFIG_ARCH_TEGRA_2x_SOC) clk_put(hdmi->hda2hdmi_clk); clk_put(hdmi->hda2codec_clk); clk_put(hdmi->hda_clk); #endif clk_put(hdmi->clk); clk_put(hdmi->disp1_clk); clk_put(hdmi->disp2_clk); tegra_edid_destroy(hdmi->edid); if (hdmi->nvhdcp) { tegra_nvhdcp_destroy(hdmi->nvhdcp); } kfree(hdmi); } static void tegra_dc_hdmi_setup_audio_fs_tables(struct tegra_dc *dc) { struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc); int i; unsigned freqs[] = { 32000, 44100, 48000, 88200, 96000, 176400, 192000, }; for (i = 0; i < ARRAY_SIZE(freqs); i++) { unsigned f = freqs[i]; unsigned eight_half; unsigned delta;; if (f > 96000) delta = 2; else if (f > 48000) delta = 6; else delta = 9; eight_half = (8 * HDMI_AUDIOCLK_FREQ) / (f * 128); tegra_hdmi_writel(hdmi, AUDIO_FS_LOW(eight_half - delta) | AUDIO_FS_HIGH(eight_half + delta), HDMI_NV_PDISP_AUDIO_FS(i)); } } #if !defined(CONFIG_ARCH_TEGRA_2x_SOC) static void tegra_dc_hdmi_setup_eld_buff(struct tegra_dc *dc) { int i; int j; u8 tmp; struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc); /* program ELD stuff */ for (i = 0; i < HDMI_ELD_MONITOR_NAME_INDEX; i++) { switch (i) { case HDMI_ELD_VER_INDEX: tmp = (hdmi->eld.eld_ver << 3); tegra_hdmi_writel(hdmi, (i << 8) | tmp, HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR_0); break; case HDMI_ELD_BASELINE_LEN_INDEX: break; case HDMI_ELD_CEA_VER_MNL_INDEX: tmp = (hdmi->eld.cea_edid_ver << 5); tmp |= (hdmi->eld.mnl & 0x1f); tegra_hdmi_writel(hdmi, (i << 8) | tmp, HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR_0); break; case HDMI_ELD_SAD_CNT_CON_TYP_SAI_HDCP_INDEX: tmp = (hdmi->eld.sad_count << 4); tmp |= (hdmi->eld.conn_type & 0xC); tmp |= (hdmi->eld.support_ai & 0x2); tmp |= (hdmi->eld.support_hdcp & 0x1); tegra_hdmi_writel(hdmi, (i << 8) | tmp, HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR_0); break; case HDMI_ELD_AUD_SYNC_DELAY_INDEX: tegra_hdmi_writel(hdmi, (i << 8) | (hdmi->eld.aud_synch_delay), HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR_0); break; case HDMI_ELD_SPK_ALLOC_INDEX: tegra_hdmi_writel(hdmi, (i << 8) | (hdmi->eld.spk_alloc), HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR_0); break; case HDMI_ELD_PORT_ID_INDEX: for (j = 0; j < 8;j++) { tegra_hdmi_writel(hdmi, ((i +j) << 8) | (hdmi->eld.port_id[j]), HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR_0); } break; case HDMI_ELD_MANF_NAME_INDEX: for (j = 0; j < 2;j++) { tegra_hdmi_writel(hdmi, ((i +j) << 8) | (hdmi->eld.manufacture_id[j]), HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR_0); } break; case HDMI_ELD_PRODUCT_CODE_INDEX: for (j = 0; j < 2;j++) { tegra_hdmi_writel(hdmi, ((i +j) << 8) | (hdmi->eld.product_id[j]), HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR_0); } break; } } for (j = 0; j < hdmi->eld.mnl;j++) { tegra_hdmi_writel(hdmi, ((j + HDMI_ELD_MONITOR_NAME_INDEX) << 8) | (hdmi->eld.monitor_name[j]), HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR_0); } for (j = 0; j < hdmi->eld.sad_count;j++) { tegra_hdmi_writel(hdmi, ((j + HDMI_ELD_MONITOR_NAME_INDEX + hdmi->eld.mnl) << 8) | (hdmi->eld.sad[j]), HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR_0); } /* set presence andvalid bit */ tegra_hdmi_writel(hdmi, 3, HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE_0); } #endif static int tegra_dc_hdmi_setup_audio(struct tegra_dc *dc, unsigned audio_freq, unsigned audio_source) { struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc); const struct tegra_hdmi_audio_config *config; unsigned long audio_n; #if !defined(CONFIG_ARCH_TEGRA_2x_SOC) unsigned long reg_addr = 0; #endif unsigned a_source = AUDIO_CNTRL0_SOURCE_SELECT_AUTO; if (HDA == audio_source) a_source = AUDIO_CNTRL0_SOURCE_SELECT_HDAL; else if (SPDIF == audio_source) a_source = AUDIO_CNTRL0_SOURCE_SELECT_SPDIF; #if !defined(CONFIG_ARCH_TEGRA_2x_SOC) if (hdmi->audio_inject_null) a_source |= AUDIO_CNTRL0_INJECT_NULLSMPL; tegra_hdmi_writel(hdmi,a_source, HDMI_NV_PDISP_SOR_AUDIO_CNTRL0_0); tegra_hdmi_writel(hdmi, AUDIO_CNTRL0_ERROR_TOLERANCE(6) | AUDIO_CNTRL0_FRAMES_PER_BLOCK(0xc0), HDMI_NV_PDISP_AUDIO_CNTRL0); #else tegra_hdmi_writel(hdmi, AUDIO_CNTRL0_ERROR_TOLERANCE(6) | AUDIO_CNTRL0_FRAMES_PER_BLOCK(0xc0) | a_source, HDMI_NV_PDISP_AUDIO_CNTRL0); #endif config = tegra_hdmi_get_audio_config(audio_freq, dc->mode.pclk); if (!config) { dev_err(&dc->ndev->dev, "hdmi: can't set audio to %d at %d pix_clock", audio_freq, dc->mode.pclk); return -EINVAL; } tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_HDMI_ACR_CTRL); audio_n = AUDIO_N_RESETF | AUDIO_N_GENERATE_ALTERNALTE | AUDIO_N_VALUE(config->n - 1); tegra_hdmi_writel(hdmi, audio_n, HDMI_NV_PDISP_AUDIO_N); tegra_hdmi_writel(hdmi, ACR_SUBPACK_N(config->n) | ACR_ENABLE, HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH); tegra_hdmi_writel(hdmi, ACR_SUBPACK_CTS(config->cts), HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW); tegra_hdmi_writel(hdmi, SPARE_HW_CTS | SPARE_FORCE_SW_CTS | SPARE_CTS_RESET_VAL(1), HDMI_NV_PDISP_HDMI_SPARE); audio_n &= ~AUDIO_N_RESETF; tegra_hdmi_writel(hdmi, audio_n, HDMI_NV_PDISP_AUDIO_N); #if !defined(CONFIG_ARCH_TEGRA_2x_SOC) switch (audio_freq) { case AUDIO_FREQ_32K: reg_addr = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0320_0; break; case AUDIO_FREQ_44_1K: reg_addr = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0441_0; break; case AUDIO_FREQ_48K: reg_addr = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0480_0; break; case AUDIO_FREQ_88_2K: reg_addr = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0882_0; break; case AUDIO_FREQ_96K: reg_addr = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0960_0; break; case AUDIO_FREQ_176_4K: reg_addr = HDMI_NV_PDISP_SOR_AUDIO_AVAL_1764_0; break; case AUDIO_FREQ_192K: reg_addr = HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920_0; break; } tegra_hdmi_writel(hdmi, config->aval, reg_addr); #endif tegra_dc_hdmi_setup_audio_fs_tables(dc); return 0; } int tegra_hdmi_setup_audio_freq_source(unsigned audio_freq, unsigned audio_source) { struct tegra_dc_hdmi_data *hdmi = dc_hdmi; if (!hdmi) return -EAGAIN; /* check for know freq */ if (AUDIO_FREQ_32K == audio_freq || AUDIO_FREQ_44_1K== audio_freq || AUDIO_FREQ_48K== audio_freq || AUDIO_FREQ_88_2K== audio_freq || AUDIO_FREQ_96K== audio_freq || AUDIO_FREQ_176_4K== audio_freq || AUDIO_FREQ_192K== audio_freq) { /* If we can program HDMI, then proceed */ if (hdmi->clk_enabled) tegra_dc_hdmi_setup_audio(hdmi->dc, audio_freq,audio_source); /* Store it for using it in enable */ hdmi->audio_freq = audio_freq; hdmi->audio_source = audio_source; } else return -EINVAL; return 0; } EXPORT_SYMBOL(tegra_hdmi_setup_audio_freq_source); #if !defined(CONFIG_ARCH_TEGRA_2x_SOC) int tegra_hdmi_audio_null_sample_inject(bool on) { struct tegra_dc_hdmi_data *hdmi = dc_hdmi; unsigned int val = 0; if (!hdmi) return -EAGAIN; if (hdmi->audio_inject_null != on) { hdmi->audio_inject_null = on; if (hdmi->clk_enabled) { val = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_AUDIO_CNTRL0_0); val &= ~AUDIO_CNTRL0_INJECT_NULLSMPL; if (on) val |= AUDIO_CNTRL0_INJECT_NULLSMPL; tegra_hdmi_writel(hdmi,val, HDMI_NV_PDISP_SOR_AUDIO_CNTRL0_0); } } return 0; } EXPORT_SYMBOL(tegra_hdmi_audio_null_sample_inject); int tegra_hdmi_setup_hda_presence() { struct tegra_dc_hdmi_data *hdmi = dc_hdmi; if (!hdmi) return -EAGAIN; if (hdmi->clk_enabled && hdmi->eld_retrieved) { /* If HDA_PRESENCE is already set reset it */ if (tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE_0)) tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE_0); tegra_dc_hdmi_setup_eld_buff(hdmi->dc); } else return -ENODEV; return 0; } EXPORT_SYMBOL(tegra_hdmi_setup_hda_presence); #endif static void tegra_dc_hdmi_write_infopack(struct tegra_dc *dc, int header_reg, u8 type, u8 version, void *data, int len) { struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc); u32 subpack[2]; /* extra byte for zero padding of subpack */ int i; u8 csum; /* first byte of data is the checksum */ csum = type + version + len - 1; for (i = 1; i < len; i++) csum +=((u8 *)data)[i]; ((u8 *)data)[0] = 0x100 - csum; tegra_hdmi_writel(hdmi, INFOFRAME_HEADER_TYPE(type) | INFOFRAME_HEADER_VERSION(version) | INFOFRAME_HEADER_LEN(len - 1), header_reg); /* The audio inforame only has one set of subpack registers. The hdmi * block pads the rest of the data as per the spec so we have to fixup * the length before filling in the subpacks. */ if (header_reg == HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER) len = 6; /* each subpack 7 bytes devided into: * subpack_low - bytes 0 - 3 * subpack_high - bytes 4 - 6 (with byte 7 padded to 0x00) */ for (i = 0; i < len; i++) { int subpack_idx = i % 7; if (subpack_idx == 0) memset(subpack, 0x0, sizeof(subpack)); ((u8 *)subpack)[subpack_idx] = ((u8 *)data)[i]; if (subpack_idx == 6 || (i + 1 == len)) { int reg = header_reg + 1 + (i / 7) * 2; tegra_hdmi_writel(hdmi, subpack[0], reg); tegra_hdmi_writel(hdmi, subpack[1], reg + 1); } } } static void tegra_dc_hdmi_setup_avi_infoframe(struct tegra_dc *dc, bool dvi) { struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc); struct hdmi_avi_infoframe avi; if (dvi) { tegra_hdmi_writel(hdmi, 0x0, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL); return; } memset(&avi, 0x0, sizeof(avi)); avi.r = HDMI_AVI_R_SAME; if ((dc->mode.h_active == 720) && ((dc->mode.v_active == 480) || (dc->mode.v_active == 576))) tegra_dc_writel(dc, 0x00101010, DC_DISP_BORDER_COLOR); else tegra_dc_writel(dc, 0x00000000, DC_DISP_BORDER_COLOR); if (dc->mode.v_active == 480) { if (dc->mode.h_active == 640) { avi.m = HDMI_AVI_M_4_3; avi.vic = 1; } else { avi.m = HDMI_AVI_M_16_9; avi.vic = 3; } } else if (dc->mode.v_active == 576) { /* CEC modes 17 and 18 differ only by the pysical size of the * screen so we have to calculation the physical aspect * ratio. 4 * 10 / 3 is 13 */ if ((dc->out->h_size * 10) / dc->out->v_size > 14) { avi.m = HDMI_AVI_M_16_9; avi.vic = 18; } else { avi.m = HDMI_AVI_M_4_3; avi.vic = 17; } } else if (dc->mode.v_active == 720 || (dc->mode.v_active == 1470 && dc->mode.stereo_mode)) { /* VIC for both 720p and 720p 3D mode */ avi.m = HDMI_AVI_M_16_9; if (dc->mode.h_front_porch == 110) avi.vic = 4; /* 60 Hz */ else avi.vic = 19; /* 50 Hz */ } else if (dc->mode.v_active == 1080 || (dc->mode.v_active == 2205 && dc->mode.stereo_mode)) { /* VIC for both 1080p and 1080p 3D mode */ avi.m = HDMI_AVI_M_16_9; if (dc->mode.h_front_porch == 88) { if (dc->mode.pclk > 74250000) avi.vic = 16; /* 60 Hz */ else avi.vic = 34; /* 30 Hz */ } else if (dc->mode.h_front_porch == 528) { if (dc->mode.pclk > 74250000) avi.vic = 31; /* 50 Hz */ else avi.vic = 33; /* 25 Hz */ } else avi.vic = 32; /* 24 Hz */ } else { avi.m = HDMI_AVI_M_16_9; avi.vic = 0; } tegra_dc_hdmi_write_infopack(dc, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER, HDMI_INFOFRAME_TYPE_AVI, HDMI_AVI_VERSION, &avi, sizeof(avi)); tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL); } static void tegra_dc_hdmi_setup_stereo_infoframe(struct tegra_dc *dc) { struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc); struct hdmi_stereo_infoframe stereo; u32 val; if (!dc->mode.stereo_mode) { val = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL); val &= ~GENERIC_CTRL_ENABLE; tegra_hdmi_writel(hdmi, val, HDMI_NV_PDISP_HDMI_GENERIC_CTRL); return; } memset(&stereo, 0x0, sizeof(stereo)); stereo.regid0 = 0x03; stereo.regid1 = 0x0c; stereo.regid2 = 0x00; stereo.hdmi_video_format = 2; /* 3D_Structure present */ #ifndef CONFIG_TEGRA_HDMI_74MHZ_LIMIT stereo._3d_structure = 0; /* frame packing */ #else stereo._3d_structure = 8; /* side-by-side (half) */ stereo._3d_ext_data = 0; /* something which fits into 00XX bit req */ #endif tegra_dc_hdmi_write_infopack(dc, HDMI_NV_PDISP_HDMI_GENERIC_HEADER, HDMI_INFOFRAME_TYPE_VENDOR, HDMI_VENDOR_VERSION, &stereo, 6); val = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL); val |= GENERIC_CTRL_ENABLE; tegra_hdmi_writel(hdmi, val, HDMI_NV_PDISP_HDMI_GENERIC_CTRL); } static void tegra_dc_hdmi_setup_audio_infoframe(struct tegra_dc *dc, bool dvi) { struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc); struct hdmi_audio_infoframe audio; if (dvi) { tegra_hdmi_writel(hdmi, 0x0, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL); return; } memset(&audio, 0x0, sizeof(audio)); audio.cc = HDMI_AUDIO_CC_2; tegra_dc_hdmi_write_infopack(dc, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER, HDMI_INFOFRAME_TYPE_AUDIO, HDMI_AUDIO_VERSION, &audio, sizeof(audio)); tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL); } static void tegra_dc_hdmi_setup_tdms(struct tegra_dc_hdmi_data *hdmi, const struct tdms_config *tc) { tegra_hdmi_writel(hdmi, tc->pll0, HDMI_NV_PDISP_SOR_PLL0); tegra_hdmi_writel(hdmi, tc->pll1, HDMI_NV_PDISP_SOR_PLL1); tegra_hdmi_writel(hdmi, tc->pe_current, HDMI_NV_PDISP_PE_CURRENT); tegra_hdmi_writel(hdmi, tc->drive_current | DRIVE_CURRENT_FUSE_OVERRIDE, HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT); } static void tegra_dc_hdmi_enable(struct tegra_dc *dc) { struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc); int pulse_start; int dispclk_div_8_2; int retries; int rekey; int err; unsigned long val; unsigned i; unsigned long oldrate; /* enbale power, clocks, resets, etc. */ /* The upstream DC needs to be clocked for accesses to HDMI to not * hard lock the system. Because we don't know if HDMI is conencted * to disp1 or disp2 we need to enable both until we set the DC mux. */ clk_enable(hdmi->disp1_clk); clk_enable(hdmi->disp2_clk); #if !defined(CONFIG_ARCH_TEGRA_2x_SOC) /* Enabling HDA clocks before asserting HDA PD and ELDV bits */ clk_enable(hdmi->hda_clk); clk_enable(hdmi->hda2codec_clk); clk_enable(hdmi->hda2hdmi_clk); #endif /* back off multiplier before attaching to parent at new rate. */ if(tegra3_get_project_id()!=TEGRA3_PROJECT_P1801) { oldrate = clk_get_rate(hdmi->clk); clk_set_rate(hdmi->clk, oldrate / 2); } tegra_dc_setup_clk(dc, hdmi->clk); clk_set_rate(hdmi->clk, dc->mode.pclk); clk_enable(hdmi->clk); if(tegra3_get_project_id()!=TEGRA3_PROJECT_P1801) { tegra_periph_reset_assert(hdmi->clk); mdelay(1); tegra_periph_reset_deassert(hdmi->clk); } /* TODO: copy HDCP keys from KFUSE to HDMI */ /* Program display timing registers: handled by dc */ /* program HDMI registers and SOR sequencer */ tegra_dc_writel(dc, VSYNC_H_POSITION(1), DC_DISP_DISP_TIMING_OPTIONS); tegra_dc_writel(dc, DITHER_CONTROL_DISABLE | BASE_COLOR_SIZE888, DC_DISP_DISP_COLOR_CONTROL); /* video_preamble uses h_pulse2 */ pulse_start = dc->mode.h_ref_to_sync + dc->mode.h_sync_width + dc->mode.h_back_porch - 10; tegra_dc_writel(dc, H_PULSE_2_ENABLE, DC_DISP_DISP_SIGNAL_OPTIONS0); tegra_dc_writel(dc, PULSE_MODE_NORMAL | PULSE_POLARITY_HIGH | PULSE_QUAL_VACTIVE | PULSE_LAST_END_A, DC_DISP_H_PULSE2_CONTROL); tegra_dc_writel(dc, PULSE_START(pulse_start) | PULSE_END(pulse_start + 8), DC_DISP_H_PULSE2_POSITION_A); tegra_hdmi_writel(hdmi, VSYNC_WINDOW_END(0x210) | VSYNC_WINDOW_START(0x200) | VSYNC_WINDOW_ENABLE, HDMI_NV_PDISP_HDMI_VSYNC_WINDOW); if ((dc->mode.h_active == 720) && ((dc->mode.v_active == 480) || (dc->mode.v_active == 576))) tegra_hdmi_writel(hdmi, (dc->ndev->id ? HDMI_SRC_DISPLAYB : HDMI_SRC_DISPLAYA) | ARM_VIDEO_RANGE_FULL, HDMI_NV_PDISP_INPUT_CONTROL); else tegra_hdmi_writel(hdmi, (dc->ndev->id ? HDMI_SRC_DISPLAYB : HDMI_SRC_DISPLAYA) | ARM_VIDEO_RANGE_LIMITED, HDMI_NV_PDISP_INPUT_CONTROL); clk_disable(hdmi->disp1_clk); clk_disable(hdmi->disp2_clk); dispclk_div_8_2 = clk_get_rate(hdmi->clk) / 1000000 * 4; tegra_hdmi_writel(hdmi, SOR_REFCLK_DIV_INT(dispclk_div_8_2 >> 2) | SOR_REFCLK_DIV_FRAC(dispclk_div_8_2), HDMI_NV_PDISP_SOR_REFCLK); hdmi->clk_enabled = true; if(tegra3_get_project_id()==TEGRA3_PROJECT_P1801) hdmi->dvi = 1; if (!hdmi->dvi) { err = tegra_dc_hdmi_setup_audio(dc, hdmi->audio_freq, hdmi->audio_source); if (err < 0) hdmi->dvi = true; } #if !defined(CONFIG_ARCH_TEGRA_2x_SOC) if (hdmi->eld_retrieved) tegra_dc_hdmi_setup_eld_buff(dc); #endif rekey = HDMI_REKEY_DEFAULT; val = HDMI_CTRL_REKEY(rekey); val |= HDMI_CTRL_MAX_AC_PACKET((dc->mode.h_sync_width + dc->mode.h_back_porch + dc->mode.h_front_porch - rekey - 18) / 32); if (!hdmi->dvi) val |= HDMI_CTRL_ENABLE; tegra_hdmi_writel(hdmi, val, HDMI_NV_PDISP_HDMI_CTRL); if (hdmi->dvi) tegra_hdmi_writel(hdmi, 0x0, HDMI_NV_PDISP_HDMI_GENERIC_CTRL); else tegra_hdmi_writel(hdmi, GENERIC_CTRL_AUDIO, HDMI_NV_PDISP_HDMI_GENERIC_CTRL); tegra_dc_hdmi_setup_avi_infoframe(dc, hdmi->dvi); tegra_dc_hdmi_setup_audio_infoframe(dc, hdmi->dvi); tegra_dc_hdmi_setup_stereo_infoframe(dc); /* TMDS CONFIG */ for (i = 0; i < ARRAY_SIZE(tdms_config); i++) { if (dc->mode.pclk <= tdms_config[i].pclk) { tegra_dc_hdmi_setup_tdms(hdmi, &tdms_config[i]); break; } } tegra_hdmi_writel(hdmi, SOR_SEQ_CTL_PU_PC(0) | SOR_SEQ_PU_PC_ALT(0) | SOR_SEQ_PD_PC(8) | SOR_SEQ_PD_PC_ALT(8), HDMI_NV_PDISP_SOR_SEQ_CTL); val = SOR_SEQ_INST_WAIT_TIME(1) | SOR_SEQ_INST_WAIT_UNITS_VSYNC | SOR_SEQ_INST_HALT | SOR_SEQ_INST_PIN_A_LOW | SOR_SEQ_INST_PIN_B_LOW | SOR_SEQ_INST_DRIVE_PWM_OUT_LO; tegra_hdmi_writel(hdmi, val, HDMI_NV_PDISP_SOR_SEQ_INST0); tegra_hdmi_writel(hdmi, val, HDMI_NV_PDISP_SOR_SEQ_INST8); val = 0x1c800; val &= ~SOR_CSTM_ROTCLK(~0); val |= SOR_CSTM_ROTCLK(2); tegra_hdmi_writel(hdmi, val, HDMI_NV_PDISP_SOR_CSTM); tegra_dc_writel(dc, DISP_CTRL_MODE_STOP, DC_CMD_DISPLAY_COMMAND); tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL); tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL); /* start SOR */ tegra_hdmi_writel(hdmi, SOR_PWR_NORMAL_STATE_PU | SOR_PWR_NORMAL_START_NORMAL | SOR_PWR_SAFE_STATE_PD | SOR_PWR_SETTING_NEW_TRIGGER, HDMI_NV_PDISP_SOR_PWR); tegra_hdmi_writel(hdmi, SOR_PWR_NORMAL_STATE_PU | SOR_PWR_NORMAL_START_NORMAL | SOR_PWR_SAFE_STATE_PD | SOR_PWR_SETTING_NEW_DONE, HDMI_NV_PDISP_SOR_PWR); retries = 1000; do { BUG_ON(--retries < 0); val = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_PWR); } while (val & SOR_PWR_SETTING_NEW_PENDING); val = SOR_STATE_ASY_CRCMODE_COMPLETE | SOR_STATE_ASY_OWNER_HEAD0 | SOR_STATE_ASY_SUBOWNER_BOTH | SOR_STATE_ASY_PROTOCOL_SINGLE_TMDS_A | SOR_STATE_ASY_DEPOL_POS; if (dc->mode.flags & TEGRA_DC_MODE_FLAG_NEG_H_SYNC) val |= SOR_STATE_ASY_HSYNCPOL_NEG; else val |= SOR_STATE_ASY_HSYNCPOL_POS; if (dc->mode.flags & TEGRA_DC_MODE_FLAG_NEG_V_SYNC) val |= SOR_STATE_ASY_VSYNCPOL_NEG; else val |= SOR_STATE_ASY_VSYNCPOL_POS; tegra_hdmi_writel(hdmi, val, HDMI_NV_PDISP_SOR_STATE2); val = SOR_STATE_ASY_HEAD_OPMODE_AWAKE | SOR_STATE_ASY_ORMODE_NORMAL; tegra_hdmi_writel(hdmi, val, HDMI_NV_PDISP_SOR_STATE1); tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_STATE0); tegra_hdmi_writel(hdmi, SOR_STATE_UPDATE, HDMI_NV_PDISP_SOR_STATE0); tegra_hdmi_writel(hdmi, val | SOR_STATE_ATTACHED, HDMI_NV_PDISP_SOR_STATE1); tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_STATE0); tegra_dc_writel(dc, HDMI_ENABLE, DC_DISP_DISP_WIN_OPTIONS); tegra_dc_writel(dc, PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE | PW4_ENABLE | PM0_ENABLE | PM1_ENABLE, DC_CMD_DISPLAY_POWER_CONTROL); tegra_dc_writel(dc, DISP_CTRL_MODE_C_DISPLAY, DC_CMD_DISPLAY_COMMAND); tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL); tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL); if (hdmi->nvhdcp) { tegra_nvhdcp_set_plug(hdmi->nvhdcp, 1); } } static void tegra_dc_hdmi_disable(struct tegra_dc *dc) { struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc); if (hdmi->nvhdcp) { tegra_nvhdcp_set_plug(hdmi->nvhdcp, 0); } #if !defined(CONFIG_ARCH_TEGRA_2x_SOC) tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE_0); /* sleep 1ms before disabling clocks to ensure HDA gets the interrupt */ msleep(1); clk_disable(hdmi->hda2hdmi_clk); clk_disable(hdmi->hda2codec_clk); clk_disable(hdmi->hda_clk); #endif tegra_periph_reset_assert(hdmi->clk); hdmi->clk_enabled = false; clk_disable(hdmi->clk); tegra_dvfs_set_rate(hdmi->clk, 0); } struct tegra_dc_out_ops tegra_dc_hdmi_ops = { .init = tegra_dc_hdmi_init, .destroy = tegra_dc_hdmi_destroy, .enable = tegra_dc_hdmi_enable, .disable = tegra_dc_hdmi_disable, .detect = tegra_dc_hdmi_detect, .suspend = tegra_dc_hdmi_suspend, .resume = tegra_dc_hdmi_resume, .mode_filter = tegra_dc_hdmi_mode_filter, }; struct tegra_dc_edid *tegra_dc_get_edid(struct tegra_dc *dc) { struct tegra_dc_hdmi_data *hdmi; /* TODO: Support EDID on non-HDMI devices */ if (dc->out->type != TEGRA_DC_OUT_HDMI) return ERR_PTR(-ENODEV); hdmi = tegra_dc_get_outdata(dc); return tegra_edid_get_data(hdmi->edid); } EXPORT_SYMBOL(tegra_dc_get_edid); void tegra_dc_put_edid(struct tegra_dc_edid *edid) { tegra_edid_put_data(edid); } EXPORT_SYMBOL(tegra_dc_put_edid);
gpl-2.0
XiphosSystemsCorp/busybox
miscutils/beep.c
267
3013
/* vi: set sw=4 ts=4: */ /* * beep implementation for busybox * * Copyright (C) 2009 Bernhard Reutner-Fischer * * Licensed under GPLv2 or later, see file LICENSE in this source tree. * */ //usage:#define beep_trivial_usage //usage: "-f FREQ -l LEN -d DELAY -r COUNT -n" //usage:#define beep_full_usage "\n\n" //usage: " -f Frequency in Hz" //usage: "\n -l Length in ms" //usage: "\n -d Delay in ms" //usage: "\n -r Repetitions" //usage: "\n -n Start new tone" #include "libbb.h" #include <linux/kd.h> #ifndef CLOCK_TICK_RATE # define CLOCK_TICK_RATE 1193180 #endif /* defaults */ #ifndef CONFIG_FEATURE_BEEP_FREQ # define FREQ (4000) #else # define FREQ (CONFIG_FEATURE_BEEP_FREQ) #endif #ifndef CONFIG_FEATURE_BEEP_LENGTH_MS # define LENGTH (30) #else # define LENGTH (CONFIG_FEATURE_BEEP_LENGTH_MS) #endif #define DELAY (0) #define REPETITIONS (1) int beep_main(int argc, char **argv) MAIN_EXTERNALLY_VISIBLE; int beep_main(int argc, char **argv) { int speaker = get_console_fd_or_die(); unsigned tickrate_div_freq = tickrate_div_freq; /* for compiler */ unsigned length = length; unsigned delay = delay; unsigned rep = rep; int c; c = 'n'; while (c != -1) { if (c == 'n') { tickrate_div_freq = CLOCK_TICK_RATE / FREQ; length = LENGTH; delay = DELAY; rep = REPETITIONS; } c = getopt(argc, argv, "f:l:d:r:n"); /* TODO: -s, -c: * pipe stdin to stdout, but also beep after each line (-s) or char (-c) */ switch (c) { case 'f': /* TODO: what "-f 0" should do? */ tickrate_div_freq = (unsigned)CLOCK_TICK_RATE / xatou(optarg); continue; case 'l': length = xatou(optarg); continue; case 'd': /* TODO: * -d N, -D N * specify a delay of N milliseconds between repetitions. * -d specifies that this delay should only occur between beeps, * that is, it should not occur after the last repetition. * -D indicates that the delay should occur after every repetition */ delay = xatou(optarg); continue; case 'r': rep = xatou(optarg); continue; case 'n': case -1: break; default: bb_show_usage(); } while (rep) { //bb_info_msg("rep[%d] freq=%d, length=%d, delay=%d", rep, freq, length, delay); xioctl(speaker, KIOCSOUND, (void*)(uintptr_t)tickrate_div_freq); usleep(1000 * length); ioctl(speaker, KIOCSOUND, (void*)0); if (--rep) usleep(1000 * delay); } } if (ENABLE_FEATURE_CLEAN_UP) close(speaker); return EXIT_SUCCESS; } /* * so, e.g. Beethoven's 9th symphony "Ode an die Freude" would be * something like: a=$((220*3)) b=$((247*3)) c=$((262*3)) d=$((294*3)) e=$((329*3)) f=$((349*3)) g=$((392*3)) #./beep -f$d -l200 -r2 -n -f$e -l100 -d 10 -n -f$c -l400 -f$g -l200 ./beep -f$e -l200 -r2 \ -n -d 100 -f$f -l200 \ -n -f$g -l200 -r2 \ -n -f$f -l200 \ -n -f$e -l200 \ -n -f$d -l200 \ -n -f$c -l200 -r2 \ -n -f$d -l200 \ -n -f$e -l200 \ -n -f$e -l400 \ -n -f$d -l100 \ -n -f$d -l200 \ */
gpl-2.0
somya-anand/y2038
drivers/regulator/max8998.c
523
24288
/* * max8998.c - Voltage regulator driver for the Maxim 8998 * * Copyright (C) 2009-2010 Samsung Electronics * Kyungmin Park <kyungmin.park@samsung.com> * Marek Szyprowski <m.szyprowski@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> #include <linux/init.h> #include <linux/i2c.h> #include <linux/err.h> #include <linux/gpio.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/mutex.h> #include <linux/of.h> #include <linux/of_gpio.h> #include <linux/platform_device.h> #include <linux/regulator/driver.h> #include <linux/regulator/of_regulator.h> #include <linux/mfd/max8998.h> #include <linux/mfd/max8998-private.h> struct max8998_data { struct device *dev; struct max8998_dev *iodev; int num_regulators; u8 buck1_vol[4]; /* voltages for selection */ u8 buck2_vol[2]; unsigned int buck1_idx; /* index to last changed voltage */ /* value in a set */ unsigned int buck2_idx; }; struct voltage_map_desc { int min; int max; int step; }; /* Voltage maps in uV*/ static const struct voltage_map_desc ldo23_voltage_map_desc = { .min = 800000, .step = 50000, .max = 1300000, }; static const struct voltage_map_desc ldo456711_voltage_map_desc = { .min = 1600000, .step = 100000, .max = 3600000, }; static const struct voltage_map_desc ldo8_voltage_map_desc = { .min = 3000000, .step = 100000, .max = 3600000, }; static const struct voltage_map_desc ldo9_voltage_map_desc = { .min = 2800000, .step = 100000, .max = 3100000, }; static const struct voltage_map_desc ldo10_voltage_map_desc = { .min = 950000, .step = 50000, .max = 1300000, }; static const struct voltage_map_desc ldo1213_voltage_map_desc = { .min = 800000, .step = 100000, .max = 3300000, }; static const struct voltage_map_desc ldo1415_voltage_map_desc = { .min = 1200000, .step = 100000, .max = 3300000, }; static const struct voltage_map_desc ldo1617_voltage_map_desc = { .min = 1600000, .step = 100000, .max = 3600000, }; static const struct voltage_map_desc buck12_voltage_map_desc = { .min = 750000, .step = 25000, .max = 1525000, }; static const struct voltage_map_desc buck3_voltage_map_desc = { .min = 1600000, .step = 100000, .max = 3600000, }; static const struct voltage_map_desc buck4_voltage_map_desc = { .min = 800000, .step = 100000, .max = 2300000, }; static const struct voltage_map_desc *ldo_voltage_map[] = { NULL, NULL, &ldo23_voltage_map_desc, /* LDO2 */ &ldo23_voltage_map_desc, /* LDO3 */ &ldo456711_voltage_map_desc, /* LDO4 */ &ldo456711_voltage_map_desc, /* LDO5 */ &ldo456711_voltage_map_desc, /* LDO6 */ &ldo456711_voltage_map_desc, /* LDO7 */ &ldo8_voltage_map_desc, /* LDO8 */ &ldo9_voltage_map_desc, /* LDO9 */ &ldo10_voltage_map_desc, /* LDO10 */ &ldo456711_voltage_map_desc, /* LDO11 */ &ldo1213_voltage_map_desc, /* LDO12 */ &ldo1213_voltage_map_desc, /* LDO13 */ &ldo1415_voltage_map_desc, /* LDO14 */ &ldo1415_voltage_map_desc, /* LDO15 */ &ldo1617_voltage_map_desc, /* LDO16 */ &ldo1617_voltage_map_desc, /* LDO17 */ &buck12_voltage_map_desc, /* BUCK1 */ &buck12_voltage_map_desc, /* BUCK2 */ &buck3_voltage_map_desc, /* BUCK3 */ &buck4_voltage_map_desc, /* BUCK4 */ }; static int max8998_get_enable_register(struct regulator_dev *rdev, int *reg, int *shift) { int ldo = rdev_get_id(rdev); switch (ldo) { case MAX8998_LDO2 ... MAX8998_LDO5: *reg = MAX8998_REG_ONOFF1; *shift = 3 - (ldo - MAX8998_LDO2); break; case MAX8998_LDO6 ... MAX8998_LDO13: *reg = MAX8998_REG_ONOFF2; *shift = 7 - (ldo - MAX8998_LDO6); break; case MAX8998_LDO14 ... MAX8998_LDO17: *reg = MAX8998_REG_ONOFF3; *shift = 7 - (ldo - MAX8998_LDO14); break; case MAX8998_BUCK1 ... MAX8998_BUCK4: *reg = MAX8998_REG_ONOFF1; *shift = 7 - (ldo - MAX8998_BUCK1); break; case MAX8998_EN32KHZ_AP ... MAX8998_ENVICHG: *reg = MAX8998_REG_ONOFF4; *shift = 7 - (ldo - MAX8998_EN32KHZ_AP); break; case MAX8998_ESAFEOUT1 ... MAX8998_ESAFEOUT2: *reg = MAX8998_REG_CHGR2; *shift = 7 - (ldo - MAX8998_ESAFEOUT1); break; default: return -EINVAL; } return 0; } static int max8998_ldo_is_enabled(struct regulator_dev *rdev) { struct max8998_data *max8998 = rdev_get_drvdata(rdev); struct i2c_client *i2c = max8998->iodev->i2c; int ret, reg, shift = 8; u8 val; ret = max8998_get_enable_register(rdev, &reg, &shift); if (ret) return ret; ret = max8998_read_reg(i2c, reg, &val); if (ret) return ret; return val & (1 << shift); } static int max8998_ldo_enable(struct regulator_dev *rdev) { struct max8998_data *max8998 = rdev_get_drvdata(rdev); struct i2c_client *i2c = max8998->iodev->i2c; int reg, shift = 8, ret; ret = max8998_get_enable_register(rdev, &reg, &shift); if (ret) return ret; return max8998_update_reg(i2c, reg, 1<<shift, 1<<shift); } static int max8998_ldo_disable(struct regulator_dev *rdev) { struct max8998_data *max8998 = rdev_get_drvdata(rdev); struct i2c_client *i2c = max8998->iodev->i2c; int reg, shift = 8, ret; ret = max8998_get_enable_register(rdev, &reg, &shift); if (ret) return ret; return max8998_update_reg(i2c, reg, 0, 1<<shift); } static int max8998_get_voltage_register(struct regulator_dev *rdev, int *_reg, int *_shift, int *_mask) { int ldo = rdev_get_id(rdev); struct max8998_data *max8998 = rdev_get_drvdata(rdev); int reg, shift = 0, mask = 0xff; switch (ldo) { case MAX8998_LDO2 ... MAX8998_LDO3: reg = MAX8998_REG_LDO2_LDO3; mask = 0xf; if (ldo == MAX8998_LDO2) shift = 4; else shift = 0; break; case MAX8998_LDO4 ... MAX8998_LDO7: reg = MAX8998_REG_LDO4 + (ldo - MAX8998_LDO4); break; case MAX8998_LDO8 ... MAX8998_LDO9: reg = MAX8998_REG_LDO8_LDO9; mask = 0xf; if (ldo == MAX8998_LDO8) shift = 4; else shift = 0; break; case MAX8998_LDO10 ... MAX8998_LDO11: reg = MAX8998_REG_LDO10_LDO11; if (ldo == MAX8998_LDO10) { shift = 5; mask = 0x7; } else { shift = 0; mask = 0x1f; } break; case MAX8998_LDO12 ... MAX8998_LDO17: reg = MAX8998_REG_LDO12 + (ldo - MAX8998_LDO12); break; case MAX8998_BUCK1: reg = MAX8998_REG_BUCK1_VOLTAGE1 + max8998->buck1_idx; break; case MAX8998_BUCK2: reg = MAX8998_REG_BUCK2_VOLTAGE1 + max8998->buck2_idx; break; case MAX8998_BUCK3: reg = MAX8998_REG_BUCK3; break; case MAX8998_BUCK4: reg = MAX8998_REG_BUCK4; break; default: return -EINVAL; } *_reg = reg; *_shift = shift; *_mask = mask; return 0; } static int max8998_get_voltage_sel(struct regulator_dev *rdev) { struct max8998_data *max8998 = rdev_get_drvdata(rdev); struct i2c_client *i2c = max8998->iodev->i2c; int reg, shift = 0, mask, ret; u8 val; ret = max8998_get_voltage_register(rdev, &reg, &shift, &mask); if (ret) return ret; ret = max8998_read_reg(i2c, reg, &val); if (ret) return ret; val >>= shift; val &= mask; return val; } static int max8998_set_voltage_ldo_sel(struct regulator_dev *rdev, unsigned selector) { struct max8998_data *max8998 = rdev_get_drvdata(rdev); struct i2c_client *i2c = max8998->iodev->i2c; int reg, shift = 0, mask, ret; ret = max8998_get_voltage_register(rdev, &reg, &shift, &mask); if (ret) return ret; ret = max8998_update_reg(i2c, reg, selector<<shift, mask<<shift); return ret; } static inline void buck1_gpio_set(int gpio1, int gpio2, int v) { gpio_set_value(gpio1, v & 0x1); gpio_set_value(gpio2, (v >> 1) & 0x1); } static inline void buck2_gpio_set(int gpio, int v) { gpio_set_value(gpio, v & 0x1); } static int max8998_set_voltage_buck_sel(struct regulator_dev *rdev, unsigned selector) { struct max8998_data *max8998 = rdev_get_drvdata(rdev); struct max8998_platform_data *pdata = dev_get_platdata(max8998->iodev->dev); struct i2c_client *i2c = max8998->iodev->i2c; int buck = rdev_get_id(rdev); int reg, shift = 0, mask, ret, j; static u8 buck1_last_val; ret = max8998_get_voltage_register(rdev, &reg, &shift, &mask); if (ret) return ret; switch (buck) { case MAX8998_BUCK1: dev_dbg(max8998->dev, "BUCK1, selector:%d, buck1_vol1:%d, buck1_vol2:%d\n" "buck1_vol3:%d, buck1_vol4:%d\n", selector, max8998->buck1_vol[0], max8998->buck1_vol[1], max8998->buck1_vol[2], max8998->buck1_vol[3]); if (gpio_is_valid(pdata->buck1_set1) && gpio_is_valid(pdata->buck1_set2)) { /* check if requested voltage */ /* value is already defined */ for (j = 0; j < ARRAY_SIZE(max8998->buck1_vol); j++) { if (max8998->buck1_vol[j] == selector) { max8998->buck1_idx = j; buck1_gpio_set(pdata->buck1_set1, pdata->buck1_set2, j); goto buck1_exit; } } if (pdata->buck_voltage_lock) return -EINVAL; /* no predefine regulator found */ max8998->buck1_idx = (buck1_last_val % 2) + 2; dev_dbg(max8998->dev, "max8998->buck1_idx:%d\n", max8998->buck1_idx); max8998->buck1_vol[max8998->buck1_idx] = selector; ret = max8998_get_voltage_register(rdev, &reg, &shift, &mask); ret = max8998_write_reg(i2c, reg, selector); buck1_gpio_set(pdata->buck1_set1, pdata->buck1_set2, max8998->buck1_idx); buck1_last_val++; buck1_exit: dev_dbg(max8998->dev, "%s: SET1:%d, SET2:%d\n", i2c->name, gpio_get_value(pdata->buck1_set1), gpio_get_value(pdata->buck1_set2)); break; } else { ret = max8998_write_reg(i2c, reg, selector); } break; case MAX8998_BUCK2: dev_dbg(max8998->dev, "BUCK2, selector:%d buck2_vol1:%d, buck2_vol2:%d\n", selector, max8998->buck2_vol[0], max8998->buck2_vol[1]); if (gpio_is_valid(pdata->buck2_set3)) { /* check if requested voltage */ /* value is already defined */ for (j = 0; j < ARRAY_SIZE(max8998->buck2_vol); j++) { if (max8998->buck2_vol[j] == selector) { max8998->buck2_idx = j; buck2_gpio_set(pdata->buck2_set3, j); goto buck2_exit; } } if (pdata->buck_voltage_lock) return -EINVAL; max8998_get_voltage_register(rdev, &reg, &shift, &mask); ret = max8998_write_reg(i2c, reg, selector); max8998->buck2_vol[max8998->buck2_idx] = selector; buck2_gpio_set(pdata->buck2_set3, max8998->buck2_idx); buck2_exit: dev_dbg(max8998->dev, "%s: SET3:%d\n", i2c->name, gpio_get_value(pdata->buck2_set3)); } else { ret = max8998_write_reg(i2c, reg, selector); } break; case MAX8998_BUCK3: case MAX8998_BUCK4: ret = max8998_update_reg(i2c, reg, selector<<shift, mask<<shift); break; } return ret; } static int max8998_set_voltage_buck_time_sel(struct regulator_dev *rdev, unsigned int old_selector, unsigned int new_selector) { struct max8998_data *max8998 = rdev_get_drvdata(rdev); struct i2c_client *i2c = max8998->iodev->i2c; const struct voltage_map_desc *desc; int buck = rdev_get_id(rdev); u8 val = 0; int difference, ret; if (buck < MAX8998_BUCK1 || buck > MAX8998_BUCK4) return -EINVAL; desc = ldo_voltage_map[buck]; /* Voltage stabilization */ ret = max8998_read_reg(i2c, MAX8998_REG_ONOFF4, &val); if (ret) return ret; /* lp3974 hasn't got ENRAMP bit - ramp is assumed as true */ /* MAX8998 has ENRAMP bit implemented, so test it*/ if (max8998->iodev->type == TYPE_MAX8998 && !(val & MAX8998_ENRAMP)) return 0; difference = (new_selector - old_selector) * desc->step / 1000; if (difference > 0) return DIV_ROUND_UP(difference, (val & 0x0f) + 1); return 0; } static struct regulator_ops max8998_ldo_ops = { .list_voltage = regulator_list_voltage_linear, .map_voltage = regulator_map_voltage_linear, .is_enabled = max8998_ldo_is_enabled, .enable = max8998_ldo_enable, .disable = max8998_ldo_disable, .get_voltage_sel = max8998_get_voltage_sel, .set_voltage_sel = max8998_set_voltage_ldo_sel, }; static struct regulator_ops max8998_buck_ops = { .list_voltage = regulator_list_voltage_linear, .map_voltage = regulator_map_voltage_linear, .is_enabled = max8998_ldo_is_enabled, .enable = max8998_ldo_enable, .disable = max8998_ldo_disable, .get_voltage_sel = max8998_get_voltage_sel, .set_voltage_sel = max8998_set_voltage_buck_sel, .set_voltage_time_sel = max8998_set_voltage_buck_time_sel, }; static struct regulator_ops max8998_others_ops = { .is_enabled = max8998_ldo_is_enabled, .enable = max8998_ldo_enable, .disable = max8998_ldo_disable, }; static struct regulator_desc regulators[] = { { .name = "LDO2", .id = MAX8998_LDO2, .ops = &max8998_ldo_ops, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, { .name = "LDO3", .id = MAX8998_LDO3, .ops = &max8998_ldo_ops, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, { .name = "LDO4", .id = MAX8998_LDO4, .ops = &max8998_ldo_ops, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, { .name = "LDO5", .id = MAX8998_LDO5, .ops = &max8998_ldo_ops, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, { .name = "LDO6", .id = MAX8998_LDO6, .ops = &max8998_ldo_ops, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, { .name = "LDO7", .id = MAX8998_LDO7, .ops = &max8998_ldo_ops, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, { .name = "LDO8", .id = MAX8998_LDO8, .ops = &max8998_ldo_ops, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, { .name = "LDO9", .id = MAX8998_LDO9, .ops = &max8998_ldo_ops, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, { .name = "LDO10", .id = MAX8998_LDO10, .ops = &max8998_ldo_ops, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, { .name = "LDO11", .id = MAX8998_LDO11, .ops = &max8998_ldo_ops, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, { .name = "LDO12", .id = MAX8998_LDO12, .ops = &max8998_ldo_ops, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, { .name = "LDO13", .id = MAX8998_LDO13, .ops = &max8998_ldo_ops, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, { .name = "LDO14", .id = MAX8998_LDO14, .ops = &max8998_ldo_ops, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, { .name = "LDO15", .id = MAX8998_LDO15, .ops = &max8998_ldo_ops, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, { .name = "LDO16", .id = MAX8998_LDO16, .ops = &max8998_ldo_ops, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, { .name = "LDO17", .id = MAX8998_LDO17, .ops = &max8998_ldo_ops, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, { .name = "BUCK1", .id = MAX8998_BUCK1, .ops = &max8998_buck_ops, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, { .name = "BUCK2", .id = MAX8998_BUCK2, .ops = &max8998_buck_ops, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, { .name = "BUCK3", .id = MAX8998_BUCK3, .ops = &max8998_buck_ops, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, { .name = "BUCK4", .id = MAX8998_BUCK4, .ops = &max8998_buck_ops, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, { .name = "EN32KHz-AP", .id = MAX8998_EN32KHZ_AP, .ops = &max8998_others_ops, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, { .name = "EN32KHz-CP", .id = MAX8998_EN32KHZ_CP, .ops = &max8998_others_ops, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, { .name = "ENVICHG", .id = MAX8998_ENVICHG, .ops = &max8998_others_ops, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, { .name = "ESAFEOUT1", .id = MAX8998_ESAFEOUT1, .ops = &max8998_others_ops, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, { .name = "ESAFEOUT2", .id = MAX8998_ESAFEOUT2, .ops = &max8998_others_ops, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, } }; static int max8998_pmic_dt_parse_dvs_gpio(struct max8998_dev *iodev, struct max8998_platform_data *pdata, struct device_node *pmic_np) { int gpio; gpio = of_get_named_gpio(pmic_np, "max8998,pmic-buck1-dvs-gpios", 0); if (!gpio_is_valid(gpio)) { dev_err(iodev->dev, "invalid buck1 gpio[0]: %d\n", gpio); return -EINVAL; } pdata->buck1_set1 = gpio; gpio = of_get_named_gpio(pmic_np, "max8998,pmic-buck1-dvs-gpios", 1); if (!gpio_is_valid(gpio)) { dev_err(iodev->dev, "invalid buck1 gpio[1]: %d\n", gpio); return -EINVAL; } pdata->buck1_set2 = gpio; gpio = of_get_named_gpio(pmic_np, "max8998,pmic-buck2-dvs-gpio", 0); if (!gpio_is_valid(gpio)) { dev_err(iodev->dev, "invalid buck 2 gpio: %d\n", gpio); return -EINVAL; } pdata->buck2_set3 = gpio; return 0; } static int max8998_pmic_dt_parse_pdata(struct max8998_dev *iodev, struct max8998_platform_data *pdata) { struct device_node *pmic_np = iodev->dev->of_node; struct device_node *regulators_np, *reg_np; struct max8998_regulator_data *rdata; unsigned int i; int ret; regulators_np = of_get_child_by_name(pmic_np, "regulators"); if (!regulators_np) { dev_err(iodev->dev, "could not find regulators sub-node\n"); return -EINVAL; } /* count the number of regulators to be supported in pmic */ pdata->num_regulators = of_get_child_count(regulators_np); rdata = devm_kzalloc(iodev->dev, sizeof(*rdata) * pdata->num_regulators, GFP_KERNEL); if (!rdata) { of_node_put(regulators_np); return -ENOMEM; } pdata->regulators = rdata; for (i = 0; i < ARRAY_SIZE(regulators); ++i) { reg_np = of_get_child_by_name(regulators_np, regulators[i].name); if (!reg_np) continue; rdata->id = regulators[i].id; rdata->initdata = of_get_regulator_init_data( iodev->dev, reg_np); rdata->reg_node = reg_np; ++rdata; } pdata->num_regulators = rdata - pdata->regulators; of_node_put(reg_np); of_node_put(regulators_np); ret = max8998_pmic_dt_parse_dvs_gpio(iodev, pdata, pmic_np); if (ret) return -EINVAL; if (of_find_property(pmic_np, "max8998,pmic-buck-voltage-lock", NULL)) pdata->buck_voltage_lock = true; ret = of_property_read_u32(pmic_np, "max8998,pmic-buck1-default-dvs-idx", &pdata->buck1_default_idx); if (!ret && pdata->buck1_default_idx >= 4) { pdata->buck1_default_idx = 0; dev_warn(iodev->dev, "invalid value for default dvs index, using 0 instead\n"); } ret = of_property_read_u32(pmic_np, "max8998,pmic-buck2-default-dvs-idx", &pdata->buck2_default_idx); if (!ret && pdata->buck2_default_idx >= 2) { pdata->buck2_default_idx = 0; dev_warn(iodev->dev, "invalid value for default dvs index, using 0 instead\n"); } ret = of_property_read_u32_array(pmic_np, "max8998,pmic-buck1-dvs-voltage", pdata->buck1_voltage, ARRAY_SIZE(pdata->buck1_voltage)); if (ret) { dev_err(iodev->dev, "buck1 voltages not specified\n"); return -EINVAL; } ret = of_property_read_u32_array(pmic_np, "max8998,pmic-buck2-dvs-voltage", pdata->buck2_voltage, ARRAY_SIZE(pdata->buck2_voltage)); if (ret) { dev_err(iodev->dev, "buck2 voltages not specified\n"); return -EINVAL; } return 0; } static int max8998_pmic_probe(struct platform_device *pdev) { struct max8998_dev *iodev = dev_get_drvdata(pdev->dev.parent); struct max8998_platform_data *pdata = iodev->pdata; struct regulator_config config = { }; struct regulator_dev *rdev; struct max8998_data *max8998; struct i2c_client *i2c; int i, ret; unsigned int v; if (!pdata) { dev_err(pdev->dev.parent, "No platform init data supplied\n"); return -ENODEV; } if (IS_ENABLED(CONFIG_OF) && iodev->dev->of_node) { ret = max8998_pmic_dt_parse_pdata(iodev, pdata); if (ret) return ret; } max8998 = devm_kzalloc(&pdev->dev, sizeof(struct max8998_data), GFP_KERNEL); if (!max8998) return -ENOMEM; max8998->dev = &pdev->dev; max8998->iodev = iodev; max8998->num_regulators = pdata->num_regulators; platform_set_drvdata(pdev, max8998); i2c = max8998->iodev->i2c; max8998->buck1_idx = pdata->buck1_default_idx; max8998->buck2_idx = pdata->buck2_default_idx; /* NOTE: */ /* For unused GPIO NOT marked as -1 (thereof equal to 0) WARN_ON */ /* will be displayed */ /* Check if MAX8998 voltage selection GPIOs are defined */ if (gpio_is_valid(pdata->buck1_set1) && gpio_is_valid(pdata->buck1_set2)) { /* Check if SET1 is not equal to 0 */ if (!pdata->buck1_set1) { dev_err(&pdev->dev, "MAX8998 SET1 GPIO defined as 0 !\n"); WARN_ON(!pdata->buck1_set1); return -EIO; } /* Check if SET2 is not equal to 0 */ if (!pdata->buck1_set2) { dev_err(&pdev->dev, "MAX8998 SET2 GPIO defined as 0 !\n"); WARN_ON(!pdata->buck1_set2); return -EIO; } gpio_request(pdata->buck1_set1, "MAX8998 BUCK1_SET1"); gpio_direction_output(pdata->buck1_set1, max8998->buck1_idx & 0x1); gpio_request(pdata->buck1_set2, "MAX8998 BUCK1_SET2"); gpio_direction_output(pdata->buck1_set2, (max8998->buck1_idx >> 1) & 0x1); /* Set predefined values for BUCK1 registers */ for (v = 0; v < ARRAY_SIZE(pdata->buck1_voltage); ++v) { i = 0; while (buck12_voltage_map_desc.min + buck12_voltage_map_desc.step*i < pdata->buck1_voltage[v]) i++; max8998->buck1_vol[v] = i; ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE1 + v, i); if (ret) return ret; } } if (gpio_is_valid(pdata->buck2_set3)) { /* Check if SET3 is not equal to 0 */ if (!pdata->buck2_set3) { dev_err(&pdev->dev, "MAX8998 SET3 GPIO defined as 0 !\n"); WARN_ON(!pdata->buck2_set3); return -EIO; } gpio_request(pdata->buck2_set3, "MAX8998 BUCK2_SET3"); gpio_direction_output(pdata->buck2_set3, max8998->buck2_idx & 0x1); /* Set predefined values for BUCK2 registers */ for (v = 0; v < ARRAY_SIZE(pdata->buck2_voltage); ++v) { i = 0; while (buck12_voltage_map_desc.min + buck12_voltage_map_desc.step*i < pdata->buck2_voltage[v]) i++; max8998->buck2_vol[v] = i; ret = max8998_write_reg(i2c, MAX8998_REG_BUCK2_VOLTAGE1 + v, i); if (ret) return ret; } } for (i = 0; i < pdata->num_regulators; i++) { const struct voltage_map_desc *desc; int id = pdata->regulators[i].id; int index = id - MAX8998_LDO2; desc = ldo_voltage_map[id]; if (desc && regulators[index].ops != &max8998_others_ops) { int count = (desc->max - desc->min) / desc->step + 1; regulators[index].n_voltages = count; regulators[index].min_uV = desc->min; regulators[index].uV_step = desc->step; } config.dev = max8998->dev; config.of_node = pdata->regulators[i].reg_node; config.init_data = pdata->regulators[i].initdata; config.driver_data = max8998; rdev = devm_regulator_register(&pdev->dev, &regulators[index], &config); if (IS_ERR(rdev)) { ret = PTR_ERR(rdev); dev_err(max8998->dev, "regulator %s init failed (%d)\n", regulators[index].name, ret); return ret; } } return 0; } static const struct platform_device_id max8998_pmic_id[] = { { "max8998-pmic", TYPE_MAX8998 }, { "lp3974-pmic", TYPE_LP3974 }, { } }; MODULE_DEVICE_TABLE(platform, max8998_pmic_id); static struct platform_driver max8998_pmic_driver = { .driver = { .name = "max8998-pmic", .owner = THIS_MODULE, }, .probe = max8998_pmic_probe, .id_table = max8998_pmic_id, }; static int __init max8998_pmic_init(void) { return platform_driver_register(&max8998_pmic_driver); } subsys_initcall(max8998_pmic_init); static void __exit max8998_pmic_cleanup(void) { platform_driver_unregister(&max8998_pmic_driver); } module_exit(max8998_pmic_cleanup); MODULE_DESCRIPTION("MAXIM 8998 voltage regulator driver"); MODULE_AUTHOR("Kyungmin Park <kyungmin.park@samsung.com>"); MODULE_LICENSE("GPL");
gpl-2.0
junmuzi/linux
drivers/iommu/fsl_pamu_domain.c
523
28175
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, version 2, as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * Copyright (C) 2013 Freescale Semiconductor, Inc. * Author: Varun Sethi <varun.sethi@freescale.com> * */ #define pr_fmt(fmt) "fsl-pamu-domain: %s: " fmt, __func__ #include "fsl_pamu_domain.h" #include <sysdev/fsl_pci.h> /* * Global spinlock that needs to be held while * configuring PAMU. */ static DEFINE_SPINLOCK(iommu_lock); static struct kmem_cache *fsl_pamu_domain_cache; static struct kmem_cache *iommu_devinfo_cache; static DEFINE_SPINLOCK(device_domain_lock); static struct fsl_dma_domain *to_fsl_dma_domain(struct iommu_domain *dom) { return container_of(dom, struct fsl_dma_domain, iommu_domain); } static int __init iommu_init_mempool(void) { fsl_pamu_domain_cache = kmem_cache_create("fsl_pamu_domain", sizeof(struct fsl_dma_domain), 0, SLAB_HWCACHE_ALIGN, NULL); if (!fsl_pamu_domain_cache) { pr_debug("Couldn't create fsl iommu_domain cache\n"); return -ENOMEM; } iommu_devinfo_cache = kmem_cache_create("iommu_devinfo", sizeof(struct device_domain_info), 0, SLAB_HWCACHE_ALIGN, NULL); if (!iommu_devinfo_cache) { pr_debug("Couldn't create devinfo cache\n"); kmem_cache_destroy(fsl_pamu_domain_cache); return -ENOMEM; } return 0; } static phys_addr_t get_phys_addr(struct fsl_dma_domain *dma_domain, dma_addr_t iova) { u32 win_cnt = dma_domain->win_cnt; struct dma_window *win_ptr = &dma_domain->win_arr[0]; struct iommu_domain_geometry *geom; geom = &dma_domain->iommu_domain.geometry; if (!win_cnt || !dma_domain->geom_size) { pr_debug("Number of windows/geometry not configured for the domain\n"); return 0; } if (win_cnt > 1) { u64 subwin_size; dma_addr_t subwin_iova; u32 wnd; subwin_size = dma_domain->geom_size >> ilog2(win_cnt); subwin_iova = iova & ~(subwin_size - 1); wnd = (subwin_iova - geom->aperture_start) >> ilog2(subwin_size); win_ptr = &dma_domain->win_arr[wnd]; } if (win_ptr->valid) return win_ptr->paddr + (iova & (win_ptr->size - 1)); return 0; } static int map_subwins(int liodn, struct fsl_dma_domain *dma_domain) { struct dma_window *sub_win_ptr = &dma_domain->win_arr[0]; int i, ret; unsigned long rpn, flags; for (i = 0; i < dma_domain->win_cnt; i++) { if (sub_win_ptr[i].valid) { rpn = sub_win_ptr[i].paddr >> PAMU_PAGE_SHIFT; spin_lock_irqsave(&iommu_lock, flags); ret = pamu_config_spaace(liodn, dma_domain->win_cnt, i, sub_win_ptr[i].size, ~(u32)0, rpn, dma_domain->snoop_id, dma_domain->stash_id, (i > 0) ? 1 : 0, sub_win_ptr[i].prot); spin_unlock_irqrestore(&iommu_lock, flags); if (ret) { pr_debug("SPAACE configuration failed for liodn %d\n", liodn); return ret; } } } return ret; } static int map_win(int liodn, struct fsl_dma_domain *dma_domain) { int ret; struct dma_window *wnd = &dma_domain->win_arr[0]; phys_addr_t wnd_addr = dma_domain->iommu_domain.geometry.aperture_start; unsigned long flags; spin_lock_irqsave(&iommu_lock, flags); ret = pamu_config_ppaace(liodn, wnd_addr, wnd->size, ~(u32)0, wnd->paddr >> PAMU_PAGE_SHIFT, dma_domain->snoop_id, dma_domain->stash_id, 0, wnd->prot); spin_unlock_irqrestore(&iommu_lock, flags); if (ret) pr_debug("PAACE configuration failed for liodn %d\n", liodn); return ret; } /* Map the DMA window corresponding to the LIODN */ static int map_liodn(int liodn, struct fsl_dma_domain *dma_domain) { if (dma_domain->win_cnt > 1) return map_subwins(liodn, dma_domain); else return map_win(liodn, dma_domain); } /* Update window/subwindow mapping for the LIODN */ static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr) { int ret; struct dma_window *wnd = &dma_domain->win_arr[wnd_nr]; unsigned long flags; spin_lock_irqsave(&iommu_lock, flags); if (dma_domain->win_cnt > 1) { ret = pamu_config_spaace(liodn, dma_domain->win_cnt, wnd_nr, wnd->size, ~(u32)0, wnd->paddr >> PAMU_PAGE_SHIFT, dma_domain->snoop_id, dma_domain->stash_id, (wnd_nr > 0) ? 1 : 0, wnd->prot); if (ret) pr_debug("Subwindow reconfiguration failed for liodn %d\n", liodn); } else { phys_addr_t wnd_addr; wnd_addr = dma_domain->iommu_domain.geometry.aperture_start; ret = pamu_config_ppaace(liodn, wnd_addr, wnd->size, ~(u32)0, wnd->paddr >> PAMU_PAGE_SHIFT, dma_domain->snoop_id, dma_domain->stash_id, 0, wnd->prot); if (ret) pr_debug("Window reconfiguration failed for liodn %d\n", liodn); } spin_unlock_irqrestore(&iommu_lock, flags); return ret; } static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain, u32 val) { int ret = 0, i; unsigned long flags; spin_lock_irqsave(&iommu_lock, flags); if (!dma_domain->win_arr) { pr_debug("Windows not configured, stash destination update failed for liodn %d\n", liodn); spin_unlock_irqrestore(&iommu_lock, flags); return -EINVAL; } for (i = 0; i < dma_domain->win_cnt; i++) { ret = pamu_update_paace_stash(liodn, i, val); if (ret) { pr_debug("Failed to update SPAACE %d field for liodn %d\n ", i, liodn); spin_unlock_irqrestore(&iommu_lock, flags); return ret; } } spin_unlock_irqrestore(&iommu_lock, flags); return ret; } /* Set the geometry parameters for a LIODN */ static int pamu_set_liodn(int liodn, struct device *dev, struct fsl_dma_domain *dma_domain, struct iommu_domain_geometry *geom_attr, u32 win_cnt) { phys_addr_t window_addr, window_size; phys_addr_t subwin_size; int ret = 0, i; u32 omi_index = ~(u32)0; unsigned long flags; /* * Configure the omi_index at the geometry setup time. * This is a static value which depends on the type of * device and would not change thereafter. */ get_ome_index(&omi_index, dev); window_addr = geom_attr->aperture_start; window_size = dma_domain->geom_size; spin_lock_irqsave(&iommu_lock, flags); ret = pamu_disable_liodn(liodn); if (!ret) ret = pamu_config_ppaace(liodn, window_addr, window_size, omi_index, 0, dma_domain->snoop_id, dma_domain->stash_id, win_cnt, 0); spin_unlock_irqrestore(&iommu_lock, flags); if (ret) { pr_debug("PAACE configuration failed for liodn %d, win_cnt =%d\n", liodn, win_cnt); return ret; } if (win_cnt > 1) { subwin_size = window_size >> ilog2(win_cnt); for (i = 0; i < win_cnt; i++) { spin_lock_irqsave(&iommu_lock, flags); ret = pamu_disable_spaace(liodn, i); if (!ret) ret = pamu_config_spaace(liodn, win_cnt, i, subwin_size, omi_index, 0, dma_domain->snoop_id, dma_domain->stash_id, 0, 0); spin_unlock_irqrestore(&iommu_lock, flags); if (ret) { pr_debug("SPAACE configuration failed for liodn %d\n", liodn); return ret; } } } return ret; } static int check_size(u64 size, dma_addr_t iova) { /* * Size must be a power of two and at least be equal * to PAMU page size. */ if ((size & (size - 1)) || size < PAMU_PAGE_SIZE) { pr_debug("Size too small or not a power of two\n"); return -EINVAL; } /* iova must be page size aligned */ if (iova & (size - 1)) { pr_debug("Address is not aligned with window size\n"); return -EINVAL; } return 0; } static struct fsl_dma_domain *iommu_alloc_dma_domain(void) { struct fsl_dma_domain *domain; domain = kmem_cache_zalloc(fsl_pamu_domain_cache, GFP_KERNEL); if (!domain) return NULL; domain->stash_id = ~(u32)0; domain->snoop_id = ~(u32)0; domain->win_cnt = pamu_get_max_subwin_cnt(); domain->geom_size = 0; INIT_LIST_HEAD(&domain->devices); spin_lock_init(&domain->domain_lock); return domain; } static void remove_device_ref(struct device_domain_info *info, u32 win_cnt) { unsigned long flags; list_del(&info->link); spin_lock_irqsave(&iommu_lock, flags); if (win_cnt > 1) pamu_free_subwins(info->liodn); pamu_disable_liodn(info->liodn); spin_unlock_irqrestore(&iommu_lock, flags); spin_lock_irqsave(&device_domain_lock, flags); info->dev->archdata.iommu_domain = NULL; kmem_cache_free(iommu_devinfo_cache, info); spin_unlock_irqrestore(&device_domain_lock, flags); } static void detach_device(struct device *dev, struct fsl_dma_domain *dma_domain) { struct device_domain_info *info, *tmp; unsigned long flags; spin_lock_irqsave(&dma_domain->domain_lock, flags); /* Remove the device from the domain device list */ list_for_each_entry_safe(info, tmp, &dma_domain->devices, link) { if (!dev || (info->dev == dev)) remove_device_ref(info, dma_domain->win_cnt); } spin_unlock_irqrestore(&dma_domain->domain_lock, flags); } static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct device *dev) { struct device_domain_info *info, *old_domain_info; unsigned long flags; spin_lock_irqsave(&device_domain_lock, flags); /* * Check here if the device is already attached to domain or not. * If the device is already attached to a domain detach it. */ old_domain_info = dev->archdata.iommu_domain; if (old_domain_info && old_domain_info->domain != dma_domain) { spin_unlock_irqrestore(&device_domain_lock, flags); detach_device(dev, old_domain_info->domain); spin_lock_irqsave(&device_domain_lock, flags); } info = kmem_cache_zalloc(iommu_devinfo_cache, GFP_ATOMIC); info->dev = dev; info->liodn = liodn; info->domain = dma_domain; list_add(&info->link, &dma_domain->devices); /* * In case of devices with multiple LIODNs just store * the info for the first LIODN as all * LIODNs share the same domain */ if (!dev->archdata.iommu_domain) dev->archdata.iommu_domain = info; spin_unlock_irqrestore(&device_domain_lock, flags); } static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) { struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); if (iova < domain->geometry.aperture_start || iova > domain->geometry.aperture_end) return 0; return get_phys_addr(dma_domain, iova); } static bool fsl_pamu_capable(enum iommu_cap cap) { return cap == IOMMU_CAP_CACHE_COHERENCY; } static void fsl_pamu_domain_free(struct iommu_domain *domain) { struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); /* remove all the devices from the device list */ detach_device(NULL, dma_domain); dma_domain->enabled = 0; dma_domain->mapped = 0; kmem_cache_free(fsl_pamu_domain_cache, dma_domain); } static struct iommu_domain *fsl_pamu_domain_alloc(unsigned type) { struct fsl_dma_domain *dma_domain; if (type != IOMMU_DOMAIN_UNMANAGED) return NULL; dma_domain = iommu_alloc_dma_domain(); if (!dma_domain) { pr_debug("dma_domain allocation failed\n"); return NULL; } /* defaul geometry 64 GB i.e. maximum system address */ dma_domain->iommu_domain. geometry.aperture_start = 0; dma_domain->iommu_domain.geometry.aperture_end = (1ULL << 36) - 1; dma_domain->iommu_domain.geometry.force_aperture = true; return &dma_domain->iommu_domain; } /* Configure geometry settings for all LIODNs associated with domain */ static int pamu_set_domain_geometry(struct fsl_dma_domain *dma_domain, struct iommu_domain_geometry *geom_attr, u32 win_cnt) { struct device_domain_info *info; int ret = 0; list_for_each_entry(info, &dma_domain->devices, link) { ret = pamu_set_liodn(info->liodn, info->dev, dma_domain, geom_attr, win_cnt); if (ret) break; } return ret; } /* Update stash destination for all LIODNs associated with the domain */ static int update_domain_stash(struct fsl_dma_domain *dma_domain, u32 val) { struct device_domain_info *info; int ret = 0; list_for_each_entry(info, &dma_domain->devices, link) { ret = update_liodn_stash(info->liodn, dma_domain, val); if (ret) break; } return ret; } /* Update domain mappings for all LIODNs associated with the domain */ static int update_domain_mapping(struct fsl_dma_domain *dma_domain, u32 wnd_nr) { struct device_domain_info *info; int ret = 0; list_for_each_entry(info, &dma_domain->devices, link) { ret = update_liodn(info->liodn, dma_domain, wnd_nr); if (ret) break; } return ret; } static int disable_domain_win(struct fsl_dma_domain *dma_domain, u32 wnd_nr) { struct device_domain_info *info; int ret = 0; list_for_each_entry(info, &dma_domain->devices, link) { if (dma_domain->win_cnt == 1 && dma_domain->enabled) { ret = pamu_disable_liodn(info->liodn); if (!ret) dma_domain->enabled = 0; } else { ret = pamu_disable_spaace(info->liodn, wnd_nr); } } return ret; } static void fsl_pamu_window_disable(struct iommu_domain *domain, u32 wnd_nr) { struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); unsigned long flags; int ret; spin_lock_irqsave(&dma_domain->domain_lock, flags); if (!dma_domain->win_arr) { pr_debug("Number of windows not configured\n"); spin_unlock_irqrestore(&dma_domain->domain_lock, flags); return; } if (wnd_nr >= dma_domain->win_cnt) { pr_debug("Invalid window index\n"); spin_unlock_irqrestore(&dma_domain->domain_lock, flags); return; } if (dma_domain->win_arr[wnd_nr].valid) { ret = disable_domain_win(dma_domain, wnd_nr); if (!ret) { dma_domain->win_arr[wnd_nr].valid = 0; dma_domain->mapped--; } } spin_unlock_irqrestore(&dma_domain->domain_lock, flags); } static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr, phys_addr_t paddr, u64 size, int prot) { struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); struct dma_window *wnd; int pamu_prot = 0; int ret; unsigned long flags; u64 win_size; if (prot & IOMMU_READ) pamu_prot |= PAACE_AP_PERMS_QUERY; if (prot & IOMMU_WRITE) pamu_prot |= PAACE_AP_PERMS_UPDATE; spin_lock_irqsave(&dma_domain->domain_lock, flags); if (!dma_domain->win_arr) { pr_debug("Number of windows not configured\n"); spin_unlock_irqrestore(&dma_domain->domain_lock, flags); return -ENODEV; } if (wnd_nr >= dma_domain->win_cnt) { pr_debug("Invalid window index\n"); spin_unlock_irqrestore(&dma_domain->domain_lock, flags); return -EINVAL; } win_size = dma_domain->geom_size >> ilog2(dma_domain->win_cnt); if (size > win_size) { pr_debug("Invalid window size\n"); spin_unlock_irqrestore(&dma_domain->domain_lock, flags); return -EINVAL; } if (dma_domain->win_cnt == 1) { if (dma_domain->enabled) { pr_debug("Disable the window before updating the mapping\n"); spin_unlock_irqrestore(&dma_domain->domain_lock, flags); return -EBUSY; } ret = check_size(size, domain->geometry.aperture_start); if (ret) { pr_debug("Aperture start not aligned to the size\n"); spin_unlock_irqrestore(&dma_domain->domain_lock, flags); return -EINVAL; } } wnd = &dma_domain->win_arr[wnd_nr]; if (!wnd->valid) { wnd->paddr = paddr; wnd->size = size; wnd->prot = pamu_prot; ret = update_domain_mapping(dma_domain, wnd_nr); if (!ret) { wnd->valid = 1; dma_domain->mapped++; } } else { pr_debug("Disable the window before updating the mapping\n"); ret = -EBUSY; } spin_unlock_irqrestore(&dma_domain->domain_lock, flags); return ret; } /* * Attach the LIODN to the DMA domain and configure the geometry * and window mappings. */ static int handle_attach_device(struct fsl_dma_domain *dma_domain, struct device *dev, const u32 *liodn, int num) { unsigned long flags; struct iommu_domain *domain = &dma_domain->iommu_domain; int ret = 0; int i; spin_lock_irqsave(&dma_domain->domain_lock, flags); for (i = 0; i < num; i++) { /* Ensure that LIODN value is valid */ if (liodn[i] >= PAACE_NUMBER_ENTRIES) { pr_debug("Invalid liodn %d, attach device failed for %s\n", liodn[i], dev->of_node->full_name); ret = -EINVAL; break; } attach_device(dma_domain, liodn[i], dev); /* * Check if geometry has already been configured * for the domain. If yes, set the geometry for * the LIODN. */ if (dma_domain->win_arr) { u32 win_cnt = dma_domain->win_cnt > 1 ? dma_domain->win_cnt : 0; ret = pamu_set_liodn(liodn[i], dev, dma_domain, &domain->geometry, win_cnt); if (ret) break; if (dma_domain->mapped) { /* * Create window/subwindow mapping for * the LIODN. */ ret = map_liodn(liodn[i], dma_domain); if (ret) break; } } } spin_unlock_irqrestore(&dma_domain->domain_lock, flags); return ret; } static int fsl_pamu_attach_device(struct iommu_domain *domain, struct device *dev) { struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); const u32 *liodn; u32 liodn_cnt; int len, ret = 0; struct pci_dev *pdev = NULL; struct pci_controller *pci_ctl; /* * Use LIODN of the PCI controller while attaching a * PCI device. */ if (dev_is_pci(dev)) { pdev = to_pci_dev(dev); pci_ctl = pci_bus_to_host(pdev->bus); /* * make dev point to pci controller device * so we can get the LIODN programmed by * u-boot. */ dev = pci_ctl->parent; } liodn = of_get_property(dev->of_node, "fsl,liodn", &len); if (liodn) { liodn_cnt = len / sizeof(u32); ret = handle_attach_device(dma_domain, dev, liodn, liodn_cnt); } else { pr_debug("missing fsl,liodn property at %s\n", dev->of_node->full_name); ret = -EINVAL; } return ret; } static void fsl_pamu_detach_device(struct iommu_domain *domain, struct device *dev) { struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); const u32 *prop; int len; struct pci_dev *pdev = NULL; struct pci_controller *pci_ctl; /* * Use LIODN of the PCI controller while detaching a * PCI device. */ if (dev_is_pci(dev)) { pdev = to_pci_dev(dev); pci_ctl = pci_bus_to_host(pdev->bus); /* * make dev point to pci controller device * so we can get the LIODN programmed by * u-boot. */ dev = pci_ctl->parent; } prop = of_get_property(dev->of_node, "fsl,liodn", &len); if (prop) detach_device(dev, dma_domain); else pr_debug("missing fsl,liodn property at %s\n", dev->of_node->full_name); } static int configure_domain_geometry(struct iommu_domain *domain, void *data) { struct iommu_domain_geometry *geom_attr = data; struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); dma_addr_t geom_size; unsigned long flags; geom_size = geom_attr->aperture_end - geom_attr->aperture_start + 1; /* * Sanity check the geometry size. Also, we do not support * DMA outside of the geometry. */ if (check_size(geom_size, geom_attr->aperture_start) || !geom_attr->force_aperture) { pr_debug("Invalid PAMU geometry attributes\n"); return -EINVAL; } spin_lock_irqsave(&dma_domain->domain_lock, flags); if (dma_domain->enabled) { pr_debug("Can't set geometry attributes as domain is active\n"); spin_unlock_irqrestore(&dma_domain->domain_lock, flags); return -EBUSY; } /* Copy the domain geometry information */ memcpy(&domain->geometry, geom_attr, sizeof(struct iommu_domain_geometry)); dma_domain->geom_size = geom_size; spin_unlock_irqrestore(&dma_domain->domain_lock, flags); return 0; } /* Set the domain stash attribute */ static int configure_domain_stash(struct fsl_dma_domain *dma_domain, void *data) { struct pamu_stash_attribute *stash_attr = data; unsigned long flags; int ret; spin_lock_irqsave(&dma_domain->domain_lock, flags); memcpy(&dma_domain->dma_stash, stash_attr, sizeof(struct pamu_stash_attribute)); dma_domain->stash_id = get_stash_id(stash_attr->cache, stash_attr->cpu); if (dma_domain->stash_id == ~(u32)0) { pr_debug("Invalid stash attributes\n"); spin_unlock_irqrestore(&dma_domain->domain_lock, flags); return -EINVAL; } ret = update_domain_stash(dma_domain, dma_domain->stash_id); spin_unlock_irqrestore(&dma_domain->domain_lock, flags); return ret; } /* Configure domain dma state i.e. enable/disable DMA */ static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool enable) { struct device_domain_info *info; unsigned long flags; int ret; spin_lock_irqsave(&dma_domain->domain_lock, flags); if (enable && !dma_domain->mapped) { pr_debug("Can't enable DMA domain without valid mapping\n"); spin_unlock_irqrestore(&dma_domain->domain_lock, flags); return -ENODEV; } dma_domain->enabled = enable; list_for_each_entry(info, &dma_domain->devices, link) { ret = (enable) ? pamu_enable_liodn(info->liodn) : pamu_disable_liodn(info->liodn); if (ret) pr_debug("Unable to set dma state for liodn %d", info->liodn); } spin_unlock_irqrestore(&dma_domain->domain_lock, flags); return 0; } static int fsl_pamu_set_domain_attr(struct iommu_domain *domain, enum iommu_attr attr_type, void *data) { struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); int ret = 0; switch (attr_type) { case DOMAIN_ATTR_GEOMETRY: ret = configure_domain_geometry(domain, data); break; case DOMAIN_ATTR_FSL_PAMU_STASH: ret = configure_domain_stash(dma_domain, data); break; case DOMAIN_ATTR_FSL_PAMU_ENABLE: ret = configure_domain_dma_state(dma_domain, *(int *)data); break; default: pr_debug("Unsupported attribute type\n"); ret = -EINVAL; break; } return ret; } static int fsl_pamu_get_domain_attr(struct iommu_domain *domain, enum iommu_attr attr_type, void *data) { struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); int ret = 0; switch (attr_type) { case DOMAIN_ATTR_FSL_PAMU_STASH: memcpy(data, &dma_domain->dma_stash, sizeof(struct pamu_stash_attribute)); break; case DOMAIN_ATTR_FSL_PAMU_ENABLE: *(int *)data = dma_domain->enabled; break; case DOMAIN_ATTR_FSL_PAMUV1: *(int *)data = DOMAIN_ATTR_FSL_PAMUV1; break; default: pr_debug("Unsupported attribute type\n"); ret = -EINVAL; break; } return ret; } static struct iommu_group *get_device_iommu_group(struct device *dev) { struct iommu_group *group; group = iommu_group_get(dev); if (!group) group = iommu_group_alloc(); return group; } static bool check_pci_ctl_endpt_part(struct pci_controller *pci_ctl) { u32 version; /* Check the PCI controller version number by readding BRR1 register */ version = in_be32(pci_ctl->cfg_addr + (PCI_FSL_BRR1 >> 2)); version &= PCI_FSL_BRR1_VER; /* If PCI controller version is >= 0x204 we can partition endpoints */ return version >= 0x204; } /* Get iommu group information from peer devices or devices on the parent bus */ static struct iommu_group *get_shared_pci_device_group(struct pci_dev *pdev) { struct pci_dev *tmp; struct iommu_group *group; struct pci_bus *bus = pdev->bus; /* * Traverese the pci bus device list to get * the shared iommu group. */ while (bus) { list_for_each_entry(tmp, &bus->devices, bus_list) { if (tmp == pdev) continue; group = iommu_group_get(&tmp->dev); if (group) return group; } bus = bus->parent; } return NULL; } static struct iommu_group *get_pci_device_group(struct pci_dev *pdev) { struct pci_controller *pci_ctl; bool pci_endpt_partioning; struct iommu_group *group = NULL; pci_ctl = pci_bus_to_host(pdev->bus); pci_endpt_partioning = check_pci_ctl_endpt_part(pci_ctl); /* We can partition PCIe devices so assign device group to the device */ if (pci_endpt_partioning) { group = pci_device_group(&pdev->dev); /* * PCIe controller is not a paritionable entity * free the controller device iommu_group. */ if (pci_ctl->parent->iommu_group) iommu_group_remove_device(pci_ctl->parent); } else { /* * All devices connected to the controller will share the * PCI controllers device group. If this is the first * device to be probed for the pci controller, copy the * device group information from the PCI controller device * node and remove the PCI controller iommu group. * For subsequent devices, the iommu group information can * be obtained from sibling devices (i.e. from the bus_devices * link list). */ if (pci_ctl->parent->iommu_group) { group = get_device_iommu_group(pci_ctl->parent); iommu_group_remove_device(pci_ctl->parent); } else { group = get_shared_pci_device_group(pdev); } } if (!group) group = ERR_PTR(-ENODEV); return group; } static struct iommu_group *fsl_pamu_device_group(struct device *dev) { struct iommu_group *group = ERR_PTR(-ENODEV); int len; /* * For platform devices we allocate a separate group for * each of the devices. */ if (dev_is_pci(dev)) group = get_pci_device_group(to_pci_dev(dev)); else if (of_get_property(dev->of_node, "fsl,liodn", &len)) group = get_device_iommu_group(dev); return group; } static int fsl_pamu_add_device(struct device *dev) { struct iommu_group *group; group = iommu_group_get_for_dev(dev); if (IS_ERR(group)) return PTR_ERR(group); iommu_group_put(group); return 0; } static void fsl_pamu_remove_device(struct device *dev) { iommu_group_remove_device(dev); } static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count) { struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); unsigned long flags; int ret; spin_lock_irqsave(&dma_domain->domain_lock, flags); /* Ensure domain is inactive i.e. DMA should be disabled for the domain */ if (dma_domain->enabled) { pr_debug("Can't set geometry attributes as domain is active\n"); spin_unlock_irqrestore(&dma_domain->domain_lock, flags); return -EBUSY; } /* Ensure that the geometry has been set for the domain */ if (!dma_domain->geom_size) { pr_debug("Please configure geometry before setting the number of windows\n"); spin_unlock_irqrestore(&dma_domain->domain_lock, flags); return -EINVAL; } /* * Ensure we have valid window count i.e. it should be less than * maximum permissible limit and should be a power of two. */ if (w_count > pamu_get_max_subwin_cnt() || !is_power_of_2(w_count)) { pr_debug("Invalid window count\n"); spin_unlock_irqrestore(&dma_domain->domain_lock, flags); return -EINVAL; } ret = pamu_set_domain_geometry(dma_domain, &domain->geometry, w_count > 1 ? w_count : 0); if (!ret) { kfree(dma_domain->win_arr); dma_domain->win_arr = kcalloc(w_count, sizeof(*dma_domain->win_arr), GFP_ATOMIC); if (!dma_domain->win_arr) { spin_unlock_irqrestore(&dma_domain->domain_lock, flags); return -ENOMEM; } dma_domain->win_cnt = w_count; } spin_unlock_irqrestore(&dma_domain->domain_lock, flags); return ret; } static u32 fsl_pamu_get_windows(struct iommu_domain *domain) { struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); return dma_domain->win_cnt; } static const struct iommu_ops fsl_pamu_ops = { .capable = fsl_pamu_capable, .domain_alloc = fsl_pamu_domain_alloc, .domain_free = fsl_pamu_domain_free, .attach_dev = fsl_pamu_attach_device, .detach_dev = fsl_pamu_detach_device, .domain_window_enable = fsl_pamu_window_enable, .domain_window_disable = fsl_pamu_window_disable, .domain_get_windows = fsl_pamu_get_windows, .domain_set_windows = fsl_pamu_set_windows, .iova_to_phys = fsl_pamu_iova_to_phys, .domain_set_attr = fsl_pamu_set_domain_attr, .domain_get_attr = fsl_pamu_get_domain_attr, .add_device = fsl_pamu_add_device, .remove_device = fsl_pamu_remove_device, .device_group = fsl_pamu_device_group, }; int __init pamu_domain_init(void) { int ret = 0; ret = iommu_init_mempool(); if (ret) return ret; bus_set_iommu(&platform_bus_type, &fsl_pamu_ops); bus_set_iommu(&pci_bus_type, &fsl_pamu_ops); return ret; }
gpl-2.0
koying/buildroot-linux-kernel-m3-pivos
arch/arm/mach-pxa/e740.c
779
5504
/* * Hardware definitions for the Toshiba eseries PDAs * * Copyright (c) 2003 Ian Molton <spyro@f2s.com> * * This file is licensed under * the terms of the GNU General Public License version 2. This program * is licensed "as is" without any warranty of any kind, whether express * or implied. * */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/fb.h> #include <linux/clk.h> #include <linux/mfd/t7l66xb.h> #include <video/w100fb.h> #include <asm/setup.h> #include <asm/mach/arch.h> #include <asm/mach-types.h> #include <mach/pxa25x.h> #include <mach/eseries-gpio.h> #include <mach/udc.h> #include <mach/irda.h> #include <mach/irqs.h> #include <mach/audio.h> #include "generic.h" #include "eseries.h" #include "clock.h" #include "devices.h" /* ------------------------ e740 video support --------------------------- */ static struct w100_gen_regs e740_lcd_regs = { .lcd_format = 0x00008023, .lcdd_cntl1 = 0x0f000000, .lcdd_cntl2 = 0x0003ffff, .genlcd_cntl1 = 0x00ffff03, .genlcd_cntl2 = 0x003c0f03, .genlcd_cntl3 = 0x000143aa, }; static struct w100_mode e740_lcd_mode = { .xres = 240, .yres = 320, .left_margin = 20, .right_margin = 28, .upper_margin = 9, .lower_margin = 8, .crtc_ss = 0x80140013, .crtc_ls = 0x81150110, .crtc_gs = 0x80050005, .crtc_vpos_gs = 0x000a0009, .crtc_rev = 0x0040010a, .crtc_dclk = 0xa906000a, .crtc_gclk = 0x80050108, .crtc_goe = 0x80050108, .pll_freq = 57, .pixclk_divider = 4, .pixclk_divider_rotated = 4, .pixclk_src = CLK_SRC_XTAL, .sysclk_divider = 1, .sysclk_src = CLK_SRC_PLL, .crtc_ps1_active = 0x41060010, }; static struct w100_gpio_regs e740_w100_gpio_info = { .init_data1 = 0x21002103, .gpio_dir1 = 0xffffdeff, .gpio_oe1 = 0x03c00643, .init_data2 = 0x003f003f, .gpio_dir2 = 0xffffffff, .gpio_oe2 = 0x000000ff, }; static struct w100fb_mach_info e740_fb_info = { .modelist = &e740_lcd_mode, .num_modes = 1, .regs = &e740_lcd_regs, .gpio = &e740_w100_gpio_info, .xtal_freq = 14318000, .xtal_dbl = 1, }; static struct resource e740_fb_resources[] = { [0] = { .start = 0x0c000000, .end = 0x0cffffff, .flags = IORESOURCE_MEM, }, }; static struct platform_device e740_fb_device = { .name = "w100fb", .id = -1, .dev = { .platform_data = &e740_fb_info, }, .num_resources = ARRAY_SIZE(e740_fb_resources), .resource = e740_fb_resources, }; /* --------------------------- MFP Pin config -------------------------- */ static unsigned long e740_pin_config[] __initdata = { /* Chip selects */ GPIO15_nCS_1, /* CS1 - Flash */ GPIO79_nCS_3, /* CS3 - IMAGEON */ GPIO80_nCS_4, /* CS4 - TMIO */ /* Clocks */ GPIO12_32KHz, /* BTUART */ GPIO42_BTUART_RXD, GPIO43_BTUART_TXD, GPIO44_BTUART_CTS, /* TMIO controller */ GPIO19_GPIO, /* t7l66xb #PCLR */ GPIO45_GPIO, /* t7l66xb #SUSPEND (NOT BTUART!) */ /* UDC */ GPIO13_GPIO, GPIO3_GPIO, /* IrDA */ GPIO38_GPIO | MFP_LPM_DRIVE_HIGH, /* AC97 */ GPIO28_AC97_BITCLK, GPIO29_AC97_SDATA_IN_0, GPIO30_AC97_SDATA_OUT, GPIO31_AC97_SYNC, /* Audio power control */ GPIO16_GPIO, /* AC97 codec AVDD2 supply (analogue power) */ GPIO40_GPIO, /* Mic amp power */ GPIO41_GPIO, /* Headphone amp power */ /* PC Card */ GPIO8_GPIO, /* CD0 */ GPIO44_GPIO, /* CD1 */ GPIO11_GPIO, /* IRQ0 */ GPIO6_GPIO, /* IRQ1 */ GPIO27_GPIO, /* RST0 */ GPIO24_GPIO, /* RST1 */ GPIO20_GPIO, /* PWR0 */ GPIO23_GPIO, /* PWR1 */ GPIO48_nPOE, GPIO49_nPWE, GPIO50_nPIOR, GPIO51_nPIOW, GPIO52_nPCE_1, GPIO53_nPCE_2, GPIO54_nPSKTSEL, GPIO55_nPREG, GPIO56_nPWAIT, GPIO57_nIOIS16, /* wakeup */ GPIO0_GPIO | WAKEUP_ON_EDGE_RISE, }; /* -------------------- e740 t7l66xb parameters -------------------- */ static struct t7l66xb_platform_data e740_t7l66xb_info = { .irq_base = IRQ_BOARD_START, .enable = &eseries_tmio_enable, .suspend = &eseries_tmio_suspend, .resume = &eseries_tmio_resume, }; static struct platform_device e740_t7l66xb_device = { .name = "t7l66xb", .id = -1, .dev = { .platform_data = &e740_t7l66xb_info, }, .num_resources = 2, .resource = eseries_tmio_resources, }; /* ----------------------------------------------------------------------- */ static struct platform_device *devices[] __initdata = { &e740_fb_device, &e740_t7l66xb_device, }; static void __init e740_init(void) { pxa2xx_mfp_config(ARRAY_AND_SIZE(e740_pin_config)); pxa_set_ffuart_info(NULL); pxa_set_btuart_info(NULL); pxa_set_stuart_info(NULL); eseries_register_clks(); clk_add_alias("CLK_CK48M", e740_t7l66xb_device.name, "UDCCLK", &pxa25x_device_udc.dev), eseries_get_tmio_gpios(); platform_add_devices(devices, ARRAY_SIZE(devices)); pxa_set_udc_info(&e7xx_udc_mach_info); pxa_set_ac97_info(NULL); pxa_set_ficp_info(&e7xx_ficp_platform_data); } MACHINE_START(E740, "Toshiba e740") /* Maintainer: Ian Molton (spyro@f2s.com) */ .phys_io = 0x40000000, .io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc, .boot_params = 0xa0000100, .map_io = pxa_map_io, .init_irq = pxa25x_init_irq, .fixup = eseries_fixup, .init_machine = e740_init, .timer = &pxa_timer, MACHINE_END
gpl-2.0
kingklick/kk-incredible-kernel
arch/sparc/kernel/pmc.c
779
2047
/* pmc - Driver implementation for power management functions * of Power Management Controller (PMC) on SPARCstation-Voyager. * * Copyright (c) 2002 Eric Brower (ebrower@usa.net) */ #include <linux/kernel.h> #include <linux/fs.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/pm.h> #include <linux/of.h> #include <linux/of_device.h> #include <asm/io.h> #include <asm/oplib.h> #include <asm/uaccess.h> #include <asm/auxio.h> /* Debug * * #define PMC_DEBUG_LED * #define PMC_NO_IDLE */ #define PMC_OBPNAME "SUNW,pmc" #define PMC_DEVNAME "pmc" #define PMC_IDLE_REG 0x00 #define PMC_IDLE_ON 0x01 static u8 __iomem *regs; #define pmc_readb(offs) (sbus_readb(regs+offs)) #define pmc_writeb(val, offs) (sbus_writeb(val, regs+offs)) /* * CPU idle callback function * See .../arch/sparc/kernel/process.c */ static void pmc_swift_idle(void) { #ifdef PMC_DEBUG_LED set_auxio(0x00, AUXIO_LED); #endif pmc_writeb(pmc_readb(PMC_IDLE_REG) | PMC_IDLE_ON, PMC_IDLE_REG); #ifdef PMC_DEBUG_LED set_auxio(AUXIO_LED, 0x00); #endif } static int __devinit pmc_probe(struct of_device *op, const struct of_device_id *match) { regs = of_ioremap(&op->resource[0], 0, resource_size(&op->resource[0]), PMC_OBPNAME); if (!regs) { printk(KERN_ERR "%s: unable to map registers\n", PMC_DEVNAME); return -ENODEV; } #ifndef PMC_NO_IDLE /* Assign power management IDLE handler */ pm_idle = pmc_swift_idle; #endif printk(KERN_INFO "%s: power management initialized\n", PMC_DEVNAME); return 0; } static struct of_device_id __initdata pmc_match[] = { { .name = PMC_OBPNAME, }, {}, }; MODULE_DEVICE_TABLE(of, pmc_match); static struct of_platform_driver pmc_driver = { .name = "pmc", .match_table = pmc_match, .probe = pmc_probe, }; static int __init pmc_init(void) { return of_register_driver(&pmc_driver, &of_bus_type); } /* This driver is not critical to the boot process * and is easiest to ioremap when SBus is already * initialized, so we install ourselves thusly: */ __initcall(pmc_init);
gpl-2.0
herophj/linux_kerner_2_6
arch/mips/pmc-sierra/yosemite/irq.c
779
4671
/* * Copyright (C) 2003 PMC-Sierra Inc. * Author: Manish Lachwani (lachwani@pmc-sierra.com) * * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org) * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. * * Second level Interrupt handlers for the PMC-Sierra Titan/Yosemite board */ #include <linux/errno.h> #include <linux/init.h> #include <linux/kernel_stat.h> #include <linux/module.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/irq.h> #include <linux/timex.h> #include <linux/slab.h> #include <linux/random.h> #include <linux/bitops.h> #include <asm/bootinfo.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/irq_cpu.h> #include <asm/mipsregs.h> #include <asm/system.h> #include <asm/titan_dep.h> /* Hypertransport specific */ #define IRQ_ACK_BITS 0x00000000 /* Ack bits */ #define HYPERTRANSPORT_INTA 0x78 /* INTA# */ #define HYPERTRANSPORT_INTB 0x79 /* INTB# */ #define HYPERTRANSPORT_INTC 0x7a /* INTC# */ #define HYPERTRANSPORT_INTD 0x7b /* INTD# */ extern void titan_mailbox_irq(void); #ifdef CONFIG_HYPERTRANSPORT /* * Handle hypertransport & SMP interrupts. The interrupt lines are scarce. * For interprocessor interrupts, the best thing to do is to use the INTMSG * register. We use the same external interrupt line, i.e. INTB3 and monitor * another status bit */ static void ll_ht_smp_irq_handler(int irq) { u32 status = OCD_READ(RM9000x2_OCD_INTP0STATUS4); /* Ack all the bits that correspond to the interrupt sources */ if (status != 0) OCD_WRITE(RM9000x2_OCD_INTP0STATUS4, IRQ_ACK_BITS); status = OCD_READ(RM9000x2_OCD_INTP1STATUS4); if (status != 0) OCD_WRITE(RM9000x2_OCD_INTP1STATUS4, IRQ_ACK_BITS); #ifdef CONFIG_HT_LEVEL_TRIGGER /* * Level Trigger Mode only. Send the HT EOI message back to the source. */ switch (status) { case 0x1000000: OCD_WRITE(RM9000x2_OCD_HTEOI, HYPERTRANSPORT_INTA); break; case 0x2000000: OCD_WRITE(RM9000x2_OCD_HTEOI, HYPERTRANSPORT_INTB); break; case 0x4000000: OCD_WRITE(RM9000x2_OCD_HTEOI, HYPERTRANSPORT_INTC); break; case 0x8000000: OCD_WRITE(RM9000x2_OCD_HTEOI, HYPERTRANSPORT_INTD); break; case 0x0000001: /* PLX */ OCD_WRITE(RM9000x2_OCD_HTEOI, 0x20); OCD_WRITE(IRQ_CLEAR_REG, IRQ_ACK_BITS); break; case 0xf000000: OCD_WRITE(RM9000x2_OCD_HTEOI, HYPERTRANSPORT_INTA); OCD_WRITE(RM9000x2_OCD_HTEOI, HYPERTRANSPORT_INTB); OCD_WRITE(RM9000x2_OCD_HTEOI, HYPERTRANSPORT_INTC); OCD_WRITE(RM9000x2_OCD_HTEOI, HYPERTRANSPORT_INTD); break; } #endif /* CONFIG_HT_LEVEL_TRIGGER */ do_IRQ(irq); } #endif asmlinkage void plat_irq_dispatch(void) { unsigned int cause = read_c0_cause(); unsigned int status = read_c0_status(); unsigned int pending = cause & status; if (pending & STATUSF_IP7) { do_IRQ(7); } else if (pending & STATUSF_IP2) { #ifdef CONFIG_HYPERTRANSPORT ll_ht_smp_irq_handler(2); #else do_IRQ(2); #endif } else if (pending & STATUSF_IP3) { do_IRQ(3); } else if (pending & STATUSF_IP4) { do_IRQ(4); } else if (pending & STATUSF_IP5) { #ifdef CONFIG_SMP titan_mailbox_irq(); #else do_IRQ(5); #endif } else if (pending & STATUSF_IP6) { do_IRQ(4); } } /* * Initialize the next level interrupt handler */ void __init arch_init_irq(void) { clear_c0_status(ST0_IM); mips_cpu_irq_init(); rm7k_cpu_irq_init(); rm9k_cpu_irq_init(); #ifdef CONFIG_GDB_CONSOLE register_gdb_console(); #endif }
gpl-2.0
Napstar-xda/Huawei-Ideos-X6-Kernel
arch/sparc/kernel/stacktrace.c
779
1478
#include <linux/sched.h> #include <linux/stacktrace.h> #include <linux/thread_info.h> #include <linux/module.h> #include <asm/ptrace.h> #include <asm/stacktrace.h> #include "kstack.h" static void __save_stack_trace(struct thread_info *tp, struct stack_trace *trace, bool skip_sched) { unsigned long ksp, fp; if (tp == current_thread_info()) { stack_trace_flush(); __asm__ __volatile__("mov %%fp, %0" : "=r" (ksp)); } else { ksp = tp->ksp; } fp = ksp + STACK_BIAS; do { struct sparc_stackf *sf; struct pt_regs *regs; unsigned long pc; if (!kstack_valid(tp, fp)) break; sf = (struct sparc_stackf *) fp; regs = (struct pt_regs *) (sf + 1); if (kstack_is_trap_frame(tp, regs)) { if (!(regs->tstate & TSTATE_PRIV)) break; pc = regs->tpc; fp = regs->u_regs[UREG_I6] + STACK_BIAS; } else { pc = sf->callers_pc; fp = (unsigned long)sf->fp + STACK_BIAS; } if (trace->skip > 0) trace->skip--; else if (!skip_sched || !in_sched_functions(pc)) trace->entries[trace->nr_entries++] = pc; } while (trace->nr_entries < trace->max_entries); } void save_stack_trace(struct stack_trace *trace) { __save_stack_trace(current_thread_info(), trace, false); } EXPORT_SYMBOL_GPL(save_stack_trace); void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) { struct thread_info *tp = task_thread_info(tsk); __save_stack_trace(tp, trace, true); } EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
gpl-2.0
gasseluk/htc-vision-kernel-ics
arch/arm/mach-s3c64xx/mach-mini6410.c
2059
8261
/* linux/arch/arm/mach-s3c64xx/mach-mini6410.c * * Copyright 2010 Darius Augulis <augulis.darius@gmail.com> * Copyright 2008 Openmoko, Inc. * Copyright 2008 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * http://armlinux.simtec.co.uk/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/fb.h> #include <linux/gpio.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/dm9000.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/serial_core.h> #include <linux/types.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <mach/map.h> #include <mach/regs-fb.h> #include <mach/regs-gpio.h> #include <mach/regs-modem.h> #include <mach/regs-srom.h> #include <mach/s3c6410.h> #include <plat/adc.h> #include <plat/cpu.h> #include <plat/devs.h> #include <plat/fb.h> #include <plat/nand.h> #include <plat/regs-serial.h> #include <plat/ts.h> #include <video/platform_lcd.h> #define UCON S3C2410_UCON_DEFAULT #define ULCON (S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB) #define UFCON (S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE) static struct s3c2410_uartcfg mini6410_uartcfgs[] __initdata = { [0] = { .hwport = 0, .flags = 0, .ucon = UCON, .ulcon = ULCON, .ufcon = UFCON, }, [1] = { .hwport = 1, .flags = 0, .ucon = UCON, .ulcon = ULCON, .ufcon = UFCON, }, [2] = { .hwport = 2, .flags = 0, .ucon = UCON, .ulcon = ULCON, .ufcon = UFCON, }, [3] = { .hwport = 3, .flags = 0, .ucon = UCON, .ulcon = ULCON, .ufcon = UFCON, }, }; /* DM9000AEP 10/100 ethernet controller */ static struct resource mini6410_dm9k_resource[] = { [0] = { .start = S3C64XX_PA_XM0CSN1, .end = S3C64XX_PA_XM0CSN1 + 1, .flags = IORESOURCE_MEM }, [1] = { .start = S3C64XX_PA_XM0CSN1 + 4, .end = S3C64XX_PA_XM0CSN1 + 5, .flags = IORESOURCE_MEM }, [2] = { .start = S3C_EINT(7), .end = S3C_EINT(7), .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL } }; static struct dm9000_plat_data mini6410_dm9k_pdata = { .flags = (DM9000_PLATF_16BITONLY | DM9000_PLATF_NO_EEPROM), }; static struct platform_device mini6410_device_eth = { .name = "dm9000", .id = -1, .num_resources = ARRAY_SIZE(mini6410_dm9k_resource), .resource = mini6410_dm9k_resource, .dev = { .platform_data = &mini6410_dm9k_pdata, }, }; static struct mtd_partition mini6410_nand_part[] = { [0] = { .name = "uboot", .size = SZ_1M, .offset = 0, }, [1] = { .name = "kernel", .size = SZ_2M, .offset = SZ_1M, }, [2] = { .name = "rootfs", .size = MTDPART_SIZ_FULL, .offset = SZ_1M + SZ_2M, }, }; static struct s3c2410_nand_set mini6410_nand_sets[] = { [0] = { .name = "nand", .nr_chips = 1, .nr_partitions = ARRAY_SIZE(mini6410_nand_part), .partitions = mini6410_nand_part, }, }; static struct s3c2410_platform_nand mini6410_nand_info = { .tacls = 25, .twrph0 = 55, .twrph1 = 40, .nr_sets = ARRAY_SIZE(mini6410_nand_sets), .sets = mini6410_nand_sets, }; static struct s3c_fb_pd_win mini6410_fb_win[] = { { .win_mode = { /* 4.3" 480x272 */ .left_margin = 3, .right_margin = 2, .upper_margin = 1, .lower_margin = 1, .hsync_len = 40, .vsync_len = 1, .xres = 480, .yres = 272, }, .max_bpp = 32, .default_bpp = 16, }, { .win_mode = { /* 7.0" 800x480 */ .left_margin = 8, .right_margin = 13, .upper_margin = 7, .lower_margin = 5, .hsync_len = 3, .vsync_len = 1, .xres = 800, .yres = 480, }, .max_bpp = 32, .default_bpp = 16, }, }; static struct s3c_fb_platdata mini6410_lcd_pdata __initdata = { .setup_gpio = s3c64xx_fb_gpio_setup_24bpp, .win[0] = &mini6410_fb_win[0], .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB, .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC, }; static void mini6410_lcd_power_set(struct plat_lcd_data *pd, unsigned int power) { if (power) gpio_direction_output(S3C64XX_GPE(0), 1); else gpio_direction_output(S3C64XX_GPE(0), 0); } static struct plat_lcd_data mini6410_lcd_power_data = { .set_power = mini6410_lcd_power_set, }; static struct platform_device mini6410_lcd_powerdev = { .name = "platform-lcd", .dev.parent = &s3c_device_fb.dev, .dev.platform_data = &mini6410_lcd_power_data, }; static struct s3c2410_ts_mach_info s3c_ts_platform __initdata = { .delay = 10000, .presc = 49, .oversampling_shift = 2, }; static struct platform_device *mini6410_devices[] __initdata = { &mini6410_device_eth, &s3c_device_hsmmc0, &s3c_device_hsmmc1, &s3c_device_ohci, &s3c_device_nand, &s3c_device_fb, &mini6410_lcd_powerdev, &s3c_device_adc, &s3c_device_ts, }; static void __init mini6410_map_io(void) { u32 tmp; s3c64xx_init_io(NULL, 0); s3c24xx_init_clocks(12000000); s3c24xx_init_uarts(mini6410_uartcfgs, ARRAY_SIZE(mini6410_uartcfgs)); /* set the LCD type */ tmp = __raw_readl(S3C64XX_SPCON); tmp &= ~S3C64XX_SPCON_LCD_SEL_MASK; tmp |= S3C64XX_SPCON_LCD_SEL_RGB; __raw_writel(tmp, S3C64XX_SPCON); /* remove the LCD bypass */ tmp = __raw_readl(S3C64XX_MODEM_MIFPCON); tmp &= ~MIFPCON_LCD_BYPASS; __raw_writel(tmp, S3C64XX_MODEM_MIFPCON); } /* * mini6410_features string * * 0-9 LCD configuration * */ static char mini6410_features_str[12] __initdata = "0"; static int __init mini6410_features_setup(char *str) { if (str) strlcpy(mini6410_features_str, str, sizeof(mini6410_features_str)); return 1; } __setup("mini6410=", mini6410_features_setup); #define FEATURE_SCREEN (1 << 0) struct mini6410_features_t { int done; int lcd_index; }; static void mini6410_parse_features( struct mini6410_features_t *features, const char *features_str) { const char *fp = features_str; features->done = 0; features->lcd_index = 0; while (*fp) { char f = *fp++; switch (f) { case '0'...'9': /* tft screen */ if (features->done & FEATURE_SCREEN) { printk(KERN_INFO "MINI6410: '%c' ignored, " "screen type already set\n", f); } else { int li = f - '0'; if (li >= ARRAY_SIZE(mini6410_fb_win)) printk(KERN_INFO "MINI6410: '%c' out " "of range LCD mode\n", f); else { features->lcd_index = li; } } features->done |= FEATURE_SCREEN; break; } } } static void __init mini6410_machine_init(void) { u32 cs1; struct mini6410_features_t features = { 0 }; printk(KERN_INFO "MINI6410: Option string mini6410=%s\n", mini6410_features_str); /* Parse the feature string */ mini6410_parse_features(&features, mini6410_features_str); mini6410_lcd_pdata.win[0] = &mini6410_fb_win[features.lcd_index]; printk(KERN_INFO "MINI6410: selected LCD display is %dx%d\n", mini6410_lcd_pdata.win[0]->win_mode.xres, mini6410_lcd_pdata.win[0]->win_mode.yres); s3c_nand_set_platdata(&mini6410_nand_info); s3c_fb_set_platdata(&mini6410_lcd_pdata); s3c24xx_ts_set_platdata(&s3c_ts_platform); /* configure nCS1 width to 16 bits */ cs1 = __raw_readl(S3C64XX_SROM_BW) & ~(S3C64XX_SROM_BW__CS_MASK << S3C64XX_SROM_BW__NCS1__SHIFT); cs1 |= ((1 << S3C64XX_SROM_BW__DATAWIDTH__SHIFT) | (1 << S3C64XX_SROM_BW__WAITENABLE__SHIFT) | (1 << S3C64XX_SROM_BW__BYTEENABLE__SHIFT)) << S3C64XX_SROM_BW__NCS1__SHIFT; __raw_writel(cs1, S3C64XX_SROM_BW); /* set timing for nCS1 suitable for ethernet chip */ __raw_writel((0 << S3C64XX_SROM_BCX__PMC__SHIFT) | (6 << S3C64XX_SROM_BCX__TACP__SHIFT) | (4 << S3C64XX_SROM_BCX__TCAH__SHIFT) | (1 << S3C64XX_SROM_BCX__TCOH__SHIFT) | (13 << S3C64XX_SROM_BCX__TACC__SHIFT) | (4 << S3C64XX_SROM_BCX__TCOS__SHIFT) | (0 << S3C64XX_SROM_BCX__TACS__SHIFT), S3C64XX_SROM_BC1); gpio_request(S3C64XX_GPF(15), "LCD power"); gpio_request(S3C64XX_GPE(0), "LCD power"); platform_add_devices(mini6410_devices, ARRAY_SIZE(mini6410_devices)); } MACHINE_START(MINI6410, "MINI6410") /* Maintainer: Darius Augulis <augulis.darius@gmail.com> */ .boot_params = S3C64XX_PA_SDRAM + 0x100, .init_irq = s3c6410_init_irq, .map_io = mini6410_map_io, .init_machine = mini6410_machine_init, .timer = &s3c24xx_timer, MACHINE_END
gpl-2.0
LoliTeam/android_kernel_jiayu_s3_h560
drivers/input/tablet/kbtab.c
2571
5228
#include <linux/kernel.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/init.h> #include <linux/usb/input.h> #include <asm/unaligned.h> /* * Version Information * v0.0.1 - Original, extremely basic version, 2.4.xx only * v0.0.2 - Updated, works with 2.5.62 and 2.4.20; * - added pressure-threshold modules param code from * Alex Perry <alex.perry@ieee.org> */ #define DRIVER_VERSION "v0.0.2" #define DRIVER_AUTHOR "Josh Myer <josh@joshisanerd.com>" #define DRIVER_DESC "USB KB Gear JamStudio Tablet driver" #define DRIVER_LICENSE "GPL" MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE(DRIVER_LICENSE); #define USB_VENDOR_ID_KBGEAR 0x084e static int kb_pressure_click = 0x10; module_param(kb_pressure_click, int, 0); MODULE_PARM_DESC(kb_pressure_click, "pressure threshold for clicks"); struct kbtab { unsigned char *data; dma_addr_t data_dma; struct input_dev *dev; struct usb_device *usbdev; struct usb_interface *intf; struct urb *irq; char phys[32]; }; static void kbtab_irq(struct urb *urb) { struct kbtab *kbtab = urb->context; unsigned char *data = kbtab->data; struct input_dev *dev = kbtab->dev; int pressure; int retval; switch (urb->status) { case 0: /* success */ break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* this urb is terminated, clean up */ dev_dbg(&kbtab->intf->dev, "%s - urb shutting down with status: %d\n", __func__, urb->status); return; default: dev_dbg(&kbtab->intf->dev, "%s - nonzero urb status received: %d\n", __func__, urb->status); goto exit; } input_report_key(dev, BTN_TOOL_PEN, 1); input_report_abs(dev, ABS_X, get_unaligned_le16(&data[1])); input_report_abs(dev, ABS_Y, get_unaligned_le16(&data[3])); /*input_report_key(dev, BTN_TOUCH , data[0] & 0x01);*/ input_report_key(dev, BTN_RIGHT, data[0] & 0x02); pressure = data[5]; if (kb_pressure_click == -1) input_report_abs(dev, ABS_PRESSURE, pressure); else input_report_key(dev, BTN_LEFT, pressure > kb_pressure_click ? 1 : 0); input_sync(dev); exit: retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval) dev_err(&kbtab->intf->dev, "%s - usb_submit_urb failed with result %d\n", __func__, retval); } static struct usb_device_id kbtab_ids[] = { { USB_DEVICE(USB_VENDOR_ID_KBGEAR, 0x1001), .driver_info = 0 }, { } }; MODULE_DEVICE_TABLE(usb, kbtab_ids); static int kbtab_open(struct input_dev *dev) { struct kbtab *kbtab = input_get_drvdata(dev); kbtab->irq->dev = kbtab->usbdev; if (usb_submit_urb(kbtab->irq, GFP_KERNEL)) return -EIO; return 0; } static void kbtab_close(struct input_dev *dev) { struct kbtab *kbtab = input_get_drvdata(dev); usb_kill_urb(kbtab->irq); } static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *dev = interface_to_usbdev(intf); struct usb_endpoint_descriptor *endpoint; struct kbtab *kbtab; struct input_dev *input_dev; int error = -ENOMEM; kbtab = kzalloc(sizeof(struct kbtab), GFP_KERNEL); input_dev = input_allocate_device(); if (!kbtab || !input_dev) goto fail1; kbtab->data = usb_alloc_coherent(dev, 8, GFP_KERNEL, &kbtab->data_dma); if (!kbtab->data) goto fail1; kbtab->irq = usb_alloc_urb(0, GFP_KERNEL); if (!kbtab->irq) goto fail2; kbtab->usbdev = dev; kbtab->intf = intf; kbtab->dev = input_dev; usb_make_path(dev, kbtab->phys, sizeof(kbtab->phys)); strlcat(kbtab->phys, "/input0", sizeof(kbtab->phys)); input_dev->name = "KB Gear Tablet"; input_dev->phys = kbtab->phys; usb_to_input_id(dev, &input_dev->id); input_dev->dev.parent = &intf->dev; input_set_drvdata(input_dev, kbtab); input_dev->open = kbtab_open; input_dev->close = kbtab_close; input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); input_dev->keybit[BIT_WORD(BTN_LEFT)] |= BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_RIGHT); input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_PEN) | BIT_MASK(BTN_TOUCH); input_set_abs_params(input_dev, ABS_X, 0, 0x2000, 4, 0); input_set_abs_params(input_dev, ABS_Y, 0, 0x1750, 4, 0); input_set_abs_params(input_dev, ABS_PRESSURE, 0, 0xff, 0, 0); endpoint = &intf->cur_altsetting->endpoint[0].desc; usb_fill_int_urb(kbtab->irq, dev, usb_rcvintpipe(dev, endpoint->bEndpointAddress), kbtab->data, 8, kbtab_irq, kbtab, endpoint->bInterval); kbtab->irq->transfer_dma = kbtab->data_dma; kbtab->irq->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; error = input_register_device(kbtab->dev); if (error) goto fail3; usb_set_intfdata(intf, kbtab); return 0; fail3: usb_free_urb(kbtab->irq); fail2: usb_free_coherent(dev, 8, kbtab->data, kbtab->data_dma); fail1: input_free_device(input_dev); kfree(kbtab); return error; } static void kbtab_disconnect(struct usb_interface *intf) { struct kbtab *kbtab = usb_get_intfdata(intf); usb_set_intfdata(intf, NULL); input_unregister_device(kbtab->dev); usb_free_urb(kbtab->irq); usb_free_coherent(kbtab->usbdev, 8, kbtab->data, kbtab->data_dma); kfree(kbtab); } static struct usb_driver kbtab_driver = { .name = "kbtab", .probe = kbtab_probe, .disconnect = kbtab_disconnect, .id_table = kbtab_ids, }; module_usb_driver(kbtab_driver);
gpl-2.0
chaosmaster/android_kernel_amazon_ford
arch/arm/mach-imx/clk-pfd.c
2571
3125
/* * Copyright 2012 Freescale Semiconductor, Inc. * Copyright 2012 Linaro Ltd. * * The code contained herein is licensed under the GNU General Public * License. You may obtain a copy of the GNU General Public License * Version 2 or later at the following locations: * * http://www.opensource.org/licenses/gpl-license.html * http://www.gnu.org/copyleft/gpl.html */ #include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/err.h> #include "clk.h" /** * struct clk_pfd - IMX PFD clock * @clk_hw: clock source * @reg: PFD register address * @idx: the index of PFD encoded in the register * * PFD clock found on i.MX6 series. Each register for PFD has 4 clk_pfd * data encoded, and member idx is used to specify the one. And each * register has SET, CLR and TOG registers at offset 0x4 0x8 and 0xc. */ struct clk_pfd { struct clk_hw hw; void __iomem *reg; u8 idx; }; #define to_clk_pfd(_hw) container_of(_hw, struct clk_pfd, hw) #define SET 0x4 #define CLR 0x8 #define OTG 0xc static int clk_pfd_enable(struct clk_hw *hw) { struct clk_pfd *pfd = to_clk_pfd(hw); writel_relaxed(1 << ((pfd->idx + 1) * 8 - 1), pfd->reg + CLR); return 0; } static void clk_pfd_disable(struct clk_hw *hw) { struct clk_pfd *pfd = to_clk_pfd(hw); writel_relaxed(1 << ((pfd->idx + 1) * 8 - 1), pfd->reg + SET); } static unsigned long clk_pfd_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct clk_pfd *pfd = to_clk_pfd(hw); u64 tmp = parent_rate; u8 frac = (readl_relaxed(pfd->reg) >> (pfd->idx * 8)) & 0x3f; tmp *= 18; do_div(tmp, frac); return tmp; } static long clk_pfd_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *prate) { u64 tmp = *prate; u8 frac; tmp = tmp * 18 + rate / 2; do_div(tmp, rate); frac = tmp; if (frac < 12) frac = 12; else if (frac > 35) frac = 35; tmp = *prate; tmp *= 18; do_div(tmp, frac); return tmp; } static int clk_pfd_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct clk_pfd *pfd = to_clk_pfd(hw); u64 tmp = parent_rate; u8 frac; tmp = tmp * 18 + rate / 2; do_div(tmp, rate); frac = tmp; if (frac < 12) frac = 12; else if (frac > 35) frac = 35; writel_relaxed(0x3f << (pfd->idx * 8), pfd->reg + CLR); writel_relaxed(frac << (pfd->idx * 8), pfd->reg + SET); return 0; } static const struct clk_ops clk_pfd_ops = { .enable = clk_pfd_enable, .disable = clk_pfd_disable, .recalc_rate = clk_pfd_recalc_rate, .round_rate = clk_pfd_round_rate, .set_rate = clk_pfd_set_rate, }; struct clk *imx_clk_pfd(const char *name, const char *parent_name, void __iomem *reg, u8 idx) { struct clk_pfd *pfd; struct clk *clk; struct clk_init_data init; pfd = kzalloc(sizeof(*pfd), GFP_KERNEL); if (!pfd) return ERR_PTR(-ENOMEM); pfd->reg = reg; pfd->idx = idx; init.name = name; init.ops = &clk_pfd_ops; init.flags = 0; init.parent_names = &parent_name; init.num_parents = 1; pfd->hw.init = &init; clk = clk_register(NULL, &pfd->hw); if (IS_ERR(clk)) kfree(pfd); return clk; }
gpl-2.0
croniccorey/OnePlus2-Kernel
net/netfilter/nf_conntrack_proto_udplite.c
3083
11110
/* (C) 1999-2001 Paul `Rusty' Russell * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org> * (C) 2007 Patrick McHardy <kaber@trash.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/types.h> #include <linux/timer.h> #include <linux/module.h> #include <linux/udp.h> #include <linux/seq_file.h> #include <linux/skbuff.h> #include <linux/ipv6.h> #include <net/ip6_checksum.h> #include <net/checksum.h> #include <linux/netfilter.h> #include <linux/netfilter_ipv4.h> #include <linux/netfilter_ipv6.h> #include <net/netfilter/nf_conntrack_l4proto.h> #include <net/netfilter/nf_conntrack_ecache.h> #include <net/netfilter/nf_log.h> enum udplite_conntrack { UDPLITE_CT_UNREPLIED, UDPLITE_CT_REPLIED, UDPLITE_CT_MAX }; static unsigned int udplite_timeouts[UDPLITE_CT_MAX] = { [UDPLITE_CT_UNREPLIED] = 30*HZ, [UDPLITE_CT_REPLIED] = 180*HZ, }; static int udplite_net_id __read_mostly; struct udplite_net { struct nf_proto_net pn; unsigned int timeouts[UDPLITE_CT_MAX]; }; static inline struct udplite_net *udplite_pernet(struct net *net) { return net_generic(net, udplite_net_id); } static bool udplite_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff, struct nf_conntrack_tuple *tuple) { const struct udphdr *hp; struct udphdr _hdr; hp = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr); if (hp == NULL) return false; tuple->src.u.udp.port = hp->source; tuple->dst.u.udp.port = hp->dest; return true; } static bool udplite_invert_tuple(struct nf_conntrack_tuple *tuple, const struct nf_conntrack_tuple *orig) { tuple->src.u.udp.port = orig->dst.u.udp.port; tuple->dst.u.udp.port = orig->src.u.udp.port; return true; } /* Print out the per-protocol part of the tuple. */ static int udplite_print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple) { return seq_printf(s, "sport=%hu dport=%hu ", ntohs(tuple->src.u.udp.port), ntohs(tuple->dst.u.udp.port)); } static unsigned int *udplite_get_timeouts(struct net *net) { return udplite_pernet(net)->timeouts; } /* Returns verdict for packet, and may modify conntracktype */ static int udplite_packet(struct nf_conn *ct, const struct sk_buff *skb, unsigned int dataoff, enum ip_conntrack_info ctinfo, u_int8_t pf, unsigned int hooknum, unsigned int *timeouts) { /* If we've seen traffic both ways, this is some kind of UDP stream. Extend timeout. */ if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[UDPLITE_CT_REPLIED]); /* Also, more likely to be important, and not a probe */ if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status)) nf_conntrack_event_cache(IPCT_ASSURED, ct); } else { nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[UDPLITE_CT_UNREPLIED]); } return NF_ACCEPT; } /* Called when a new connection for this protocol found. */ static bool udplite_new(struct nf_conn *ct, const struct sk_buff *skb, unsigned int dataoff, unsigned int *timeouts) { return true; } static int udplite_error(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb, unsigned int dataoff, enum ip_conntrack_info *ctinfo, u_int8_t pf, unsigned int hooknum) { unsigned int udplen = skb->len - dataoff; const struct udphdr *hdr; struct udphdr _hdr; unsigned int cscov; /* Header is too small? */ hdr = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr); if (hdr == NULL) { if (LOG_INVALID(net, IPPROTO_UDPLITE)) nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, "nf_ct_udplite: short packet "); return -NF_ACCEPT; } cscov = ntohs(hdr->len); if (cscov == 0) cscov = udplen; else if (cscov < sizeof(*hdr) || cscov > udplen) { if (LOG_INVALID(net, IPPROTO_UDPLITE)) nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, "nf_ct_udplite: invalid checksum coverage "); return -NF_ACCEPT; } /* UDPLITE mandates checksums */ if (!hdr->check) { if (LOG_INVALID(net, IPPROTO_UDPLITE)) nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, "nf_ct_udplite: checksum missing "); return -NF_ACCEPT; } /* Checksum invalid? Ignore. */ if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING && nf_checksum_partial(skb, hooknum, dataoff, cscov, IPPROTO_UDP, pf)) { if (LOG_INVALID(net, IPPROTO_UDPLITE)) nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, "nf_ct_udplite: bad UDPLite checksum "); return -NF_ACCEPT; } return NF_ACCEPT; } #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) #include <linux/netfilter/nfnetlink.h> #include <linux/netfilter/nfnetlink_cttimeout.h> static int udplite_timeout_nlattr_to_obj(struct nlattr *tb[], struct net *net, void *data) { unsigned int *timeouts = data; struct udplite_net *un = udplite_pernet(net); /* set default timeouts for UDPlite. */ timeouts[UDPLITE_CT_UNREPLIED] = un->timeouts[UDPLITE_CT_UNREPLIED]; timeouts[UDPLITE_CT_REPLIED] = un->timeouts[UDPLITE_CT_REPLIED]; if (tb[CTA_TIMEOUT_UDPLITE_UNREPLIED]) { timeouts[UDPLITE_CT_UNREPLIED] = ntohl(nla_get_be32(tb[CTA_TIMEOUT_UDPLITE_UNREPLIED])) * HZ; } if (tb[CTA_TIMEOUT_UDPLITE_REPLIED]) { timeouts[UDPLITE_CT_REPLIED] = ntohl(nla_get_be32(tb[CTA_TIMEOUT_UDPLITE_REPLIED])) * HZ; } return 0; } static int udplite_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data) { const unsigned int *timeouts = data; if (nla_put_be32(skb, CTA_TIMEOUT_UDPLITE_UNREPLIED, htonl(timeouts[UDPLITE_CT_UNREPLIED] / HZ)) || nla_put_be32(skb, CTA_TIMEOUT_UDPLITE_REPLIED, htonl(timeouts[UDPLITE_CT_REPLIED] / HZ))) goto nla_put_failure; return 0; nla_put_failure: return -ENOSPC; } static const struct nla_policy udplite_timeout_nla_policy[CTA_TIMEOUT_UDPLITE_MAX+1] = { [CTA_TIMEOUT_UDPLITE_UNREPLIED] = { .type = NLA_U32 }, [CTA_TIMEOUT_UDPLITE_REPLIED] = { .type = NLA_U32 }, }; #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ #ifdef CONFIG_SYSCTL static struct ctl_table udplite_sysctl_table[] = { { .procname = "nf_conntrack_udplite_timeout", .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "nf_conntrack_udplite_timeout_stream", .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { } }; #endif /* CONFIG_SYSCTL */ static int udplite_kmemdup_sysctl_table(struct nf_proto_net *pn, struct udplite_net *un) { #ifdef CONFIG_SYSCTL if (pn->ctl_table) return 0; pn->ctl_table = kmemdup(udplite_sysctl_table, sizeof(udplite_sysctl_table), GFP_KERNEL); if (!pn->ctl_table) return -ENOMEM; pn->ctl_table[0].data = &un->timeouts[UDPLITE_CT_UNREPLIED]; pn->ctl_table[1].data = &un->timeouts[UDPLITE_CT_REPLIED]; #endif return 0; } static int udplite_init_net(struct net *net, u_int16_t proto) { struct udplite_net *un = udplite_pernet(net); struct nf_proto_net *pn = &un->pn; if (!pn->users) { int i; for (i = 0 ; i < UDPLITE_CT_MAX; i++) un->timeouts[i] = udplite_timeouts[i]; } return udplite_kmemdup_sysctl_table(pn, un); } static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 __read_mostly = { .l3proto = PF_INET, .l4proto = IPPROTO_UDPLITE, .name = "udplite", .pkt_to_tuple = udplite_pkt_to_tuple, .invert_tuple = udplite_invert_tuple, .print_tuple = udplite_print_tuple, .packet = udplite_packet, .get_timeouts = udplite_get_timeouts, .new = udplite_new, .error = udplite_error, #if IS_ENABLED(CONFIG_NF_CT_NETLINK) .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, .nla_policy = nf_ct_port_nla_policy, #endif #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) .ctnl_timeout = { .nlattr_to_obj = udplite_timeout_nlattr_to_obj, .obj_to_nlattr = udplite_timeout_obj_to_nlattr, .nlattr_max = CTA_TIMEOUT_UDPLITE_MAX, .obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDPLITE_MAX, .nla_policy = udplite_timeout_nla_policy, }, #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ .net_id = &udplite_net_id, .init_net = udplite_init_net, }; static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 __read_mostly = { .l3proto = PF_INET6, .l4proto = IPPROTO_UDPLITE, .name = "udplite", .pkt_to_tuple = udplite_pkt_to_tuple, .invert_tuple = udplite_invert_tuple, .print_tuple = udplite_print_tuple, .packet = udplite_packet, .get_timeouts = udplite_get_timeouts, .new = udplite_new, .error = udplite_error, #if IS_ENABLED(CONFIG_NF_CT_NETLINK) .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, .nla_policy = nf_ct_port_nla_policy, #endif #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) .ctnl_timeout = { .nlattr_to_obj = udplite_timeout_nlattr_to_obj, .obj_to_nlattr = udplite_timeout_obj_to_nlattr, .nlattr_max = CTA_TIMEOUT_UDPLITE_MAX, .obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDPLITE_MAX, .nla_policy = udplite_timeout_nla_policy, }, #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ .net_id = &udplite_net_id, .init_net = udplite_init_net, }; static int udplite_net_init(struct net *net) { int ret = 0; ret = nf_ct_l4proto_pernet_register(net, &nf_conntrack_l4proto_udplite4); if (ret < 0) { pr_err("nf_conntrack_udplite4: pernet registration failed.\n"); goto out; } ret = nf_ct_l4proto_pernet_register(net, &nf_conntrack_l4proto_udplite6); if (ret < 0) { pr_err("nf_conntrack_udplite6: pernet registration failed.\n"); goto cleanup_udplite4; } return 0; cleanup_udplite4: nf_ct_l4proto_pernet_unregister(net, &nf_conntrack_l4proto_udplite4); out: return ret; } static void udplite_net_exit(struct net *net) { nf_ct_l4proto_pernet_unregister(net, &nf_conntrack_l4proto_udplite6); nf_ct_l4proto_pernet_unregister(net, &nf_conntrack_l4proto_udplite4); } static struct pernet_operations udplite_net_ops = { .init = udplite_net_init, .exit = udplite_net_exit, .id = &udplite_net_id, .size = sizeof(struct udplite_net), }; static int __init nf_conntrack_proto_udplite_init(void) { int ret; ret = register_pernet_subsys(&udplite_net_ops); if (ret < 0) goto out_pernet; ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_udplite4); if (ret < 0) goto out_udplite4; ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_udplite6); if (ret < 0) goto out_udplite6; return 0; out_udplite6: nf_ct_l4proto_unregister(&nf_conntrack_l4proto_udplite4); out_udplite4: unregister_pernet_subsys(&udplite_net_ops); out_pernet: return ret; } static void __exit nf_conntrack_proto_udplite_exit(void) { nf_ct_l4proto_unregister(&nf_conntrack_l4proto_udplite6); nf_ct_l4proto_unregister(&nf_conntrack_l4proto_udplite4); unregister_pernet_subsys(&udplite_net_ops); } module_init(nf_conntrack_proto_udplite_init); module_exit(nf_conntrack_proto_udplite_exit); MODULE_LICENSE("GPL");
gpl-2.0
ajopanoor/popcorn
arch/arm/mach-msm/clock-pcom.c
3595
3153
/* * Copyright (C) 2007 Google, Inc. * Copyright (c) 2007-2010, Code Aurora Forum. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/err.h> #include <linux/ctype.h> #include <linux/stddef.h> #include <mach/clk.h> #include "proc_comm.h" #include "clock.h" #include "clock-pcom.h" /* * glue for the proc_comm interface */ int pc_clk_enable(unsigned id) { int rc = msm_proc_comm(PCOM_CLKCTL_RPC_ENABLE, &id, NULL); if (rc < 0) return rc; else return (int)id < 0 ? -EINVAL : 0; } void pc_clk_disable(unsigned id) { msm_proc_comm(PCOM_CLKCTL_RPC_DISABLE, &id, NULL); } int pc_clk_reset(unsigned id, enum clk_reset_action action) { int rc; if (action == CLK_RESET_ASSERT) rc = msm_proc_comm(PCOM_CLKCTL_RPC_RESET_ASSERT, &id, NULL); else rc = msm_proc_comm(PCOM_CLKCTL_RPC_RESET_DEASSERT, &id, NULL); if (rc < 0) return rc; else return (int)id < 0 ? -EINVAL : 0; } int pc_clk_set_rate(unsigned id, unsigned rate) { /* The rate _might_ be rounded off to the nearest KHz value by the * remote function. So a return value of 0 doesn't necessarily mean * that the exact rate was set successfully. */ int rc = msm_proc_comm(PCOM_CLKCTL_RPC_SET_RATE, &id, &rate); if (rc < 0) return rc; else return (int)id < 0 ? -EINVAL : 0; } int pc_clk_set_min_rate(unsigned id, unsigned rate) { int rc = msm_proc_comm(PCOM_CLKCTL_RPC_MIN_RATE, &id, &rate); if (rc < 0) return rc; else return (int)id < 0 ? -EINVAL : 0; } int pc_clk_set_max_rate(unsigned id, unsigned rate) { int rc = msm_proc_comm(PCOM_CLKCTL_RPC_MAX_RATE, &id, &rate); if (rc < 0) return rc; else return (int)id < 0 ? -EINVAL : 0; } int pc_clk_set_flags(unsigned id, unsigned flags) { int rc = msm_proc_comm(PCOM_CLKCTL_RPC_SET_FLAGS, &id, &flags); if (rc < 0) return rc; else return (int)id < 0 ? -EINVAL : 0; } unsigned pc_clk_get_rate(unsigned id) { if (msm_proc_comm(PCOM_CLKCTL_RPC_RATE, &id, NULL)) return 0; else return id; } unsigned pc_clk_is_enabled(unsigned id) { if (msm_proc_comm(PCOM_CLKCTL_RPC_ENABLED, &id, NULL)) return 0; else return id; } long pc_clk_round_rate(unsigned id, unsigned rate) { /* Not really supported; pc_clk_set_rate() does rounding on it's own. */ return rate; } static bool pc_clk_is_local(unsigned id) { return false; } struct clk_ops clk_ops_pcom = { .enable = pc_clk_enable, .disable = pc_clk_disable, .auto_off = pc_clk_disable, .reset = pc_clk_reset, .set_rate = pc_clk_set_rate, .set_min_rate = pc_clk_set_min_rate, .set_max_rate = pc_clk_set_max_rate, .set_flags = pc_clk_set_flags, .get_rate = pc_clk_get_rate, .is_enabled = pc_clk_is_enabled, .round_rate = pc_clk_round_rate, .is_local = pc_clk_is_local, };
gpl-2.0
psndna88/AGNI-pureSTOCK
net/ipv4/netfilter/nf_nat_tftp.c
3851
1363
/* (C) 2001-2002 Magnus Boden <mb@ozaba.mine.nu> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/udp.h> #include <net/netfilter/nf_nat_helper.h> #include <net/netfilter/nf_nat_rule.h> #include <net/netfilter/nf_conntrack_helper.h> #include <net/netfilter/nf_conntrack_expect.h> #include <linux/netfilter/nf_conntrack_tftp.h> MODULE_AUTHOR("Magnus Boden <mb@ozaba.mine.nu>"); MODULE_DESCRIPTION("TFTP NAT helper"); MODULE_LICENSE("GPL"); MODULE_ALIAS("ip_nat_tftp"); static unsigned int help(struct sk_buff *skb, enum ip_conntrack_info ctinfo, struct nf_conntrack_expect *exp) { const struct nf_conn *ct = exp->master; exp->saved_proto.udp.port = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.udp.port; exp->dir = IP_CT_DIR_REPLY; exp->expectfn = nf_nat_follow_master; if (nf_ct_expect_related(exp) != 0) return NF_DROP; return NF_ACCEPT; } static void __exit nf_nat_tftp_fini(void) { rcu_assign_pointer(nf_nat_tftp_hook, NULL); synchronize_rcu(); } static int __init nf_nat_tftp_init(void) { BUG_ON(nf_nat_tftp_hook != NULL); rcu_assign_pointer(nf_nat_tftp_hook, help); return 0; } module_init(nf_nat_tftp_init); module_exit(nf_nat_tftp_fini);
gpl-2.0
Nothing-Dev/android_kernel_motorola_msm8610
drivers/scsi/libsas/sas_discover.c
4363
13991
/* * Serial Attached SCSI (SAS) Discover process * * Copyright (C) 2005 Adaptec, Inc. All rights reserved. * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> * * This file is licensed under GPLv2. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #include <linux/scatterlist.h> #include <linux/slab.h> #include <scsi/scsi_host.h> #include <scsi/scsi_eh.h> #include "sas_internal.h" #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_sas.h> #include <scsi/sas_ata.h> #include "../scsi_sas_internal.h" /* ---------- Basic task processing for discovery purposes ---------- */ void sas_init_dev(struct domain_device *dev) { switch (dev->dev_type) { case SAS_END_DEV: break; case EDGE_DEV: case FANOUT_DEV: INIT_LIST_HEAD(&dev->ex_dev.children); mutex_init(&dev->ex_dev.cmd_mutex); break; case SATA_DEV: case SATA_PM: case SATA_PM_PORT: case SATA_PENDING: INIT_LIST_HEAD(&dev->sata_dev.children); break; default: break; } } /* ---------- Domain device discovery ---------- */ /** * sas_get_port_device -- Discover devices which caused port creation * @port: pointer to struct sas_port of interest * * Devices directly attached to a HA port, have no parent. This is * how we know they are (domain) "root" devices. All other devices * do, and should have their "parent" pointer set appropriately as * soon as a child device is discovered. */ static int sas_get_port_device(struct asd_sas_port *port) { struct asd_sas_phy *phy; struct sas_rphy *rphy; struct domain_device *dev; int rc = -ENODEV; dev = sas_alloc_device(); if (!dev) return -ENOMEM; spin_lock_irq(&port->phy_list_lock); if (list_empty(&port->phy_list)) { spin_unlock_irq(&port->phy_list_lock); sas_put_device(dev); return -ENODEV; } phy = container_of(port->phy_list.next, struct asd_sas_phy, port_phy_el); spin_lock(&phy->frame_rcvd_lock); memcpy(dev->frame_rcvd, phy->frame_rcvd, min(sizeof(dev->frame_rcvd), (size_t)phy->frame_rcvd_size)); spin_unlock(&phy->frame_rcvd_lock); spin_unlock_irq(&port->phy_list_lock); if (dev->frame_rcvd[0] == 0x34 && port->oob_mode == SATA_OOB_MODE) { struct dev_to_host_fis *fis = (struct dev_to_host_fis *) dev->frame_rcvd; if (fis->interrupt_reason == 1 && fis->lbal == 1 && fis->byte_count_low==0x69 && fis->byte_count_high == 0x96 && (fis->device & ~0x10) == 0) dev->dev_type = SATA_PM; else dev->dev_type = SATA_DEV; dev->tproto = SAS_PROTOCOL_SATA; } else { struct sas_identify_frame *id = (struct sas_identify_frame *) dev->frame_rcvd; dev->dev_type = id->dev_type; dev->iproto = id->initiator_bits; dev->tproto = id->target_bits; } sas_init_dev(dev); dev->port = port; switch (dev->dev_type) { case SATA_DEV: rc = sas_ata_init(dev); if (rc) { rphy = NULL; break; } /* fall through */ case SAS_END_DEV: rphy = sas_end_device_alloc(port->port); break; case EDGE_DEV: rphy = sas_expander_alloc(port->port, SAS_EDGE_EXPANDER_DEVICE); break; case FANOUT_DEV: rphy = sas_expander_alloc(port->port, SAS_FANOUT_EXPANDER_DEVICE); break; default: printk("ERROR: Unidentified device type %d\n", dev->dev_type); rphy = NULL; break; } if (!rphy) { sas_put_device(dev); return rc; } rphy->identify.phy_identifier = phy->phy->identify.phy_identifier; memcpy(dev->sas_addr, port->attached_sas_addr, SAS_ADDR_SIZE); sas_fill_in_rphy(dev, rphy); sas_hash_addr(dev->hashed_sas_addr, dev->sas_addr); port->port_dev = dev; dev->linkrate = port->linkrate; dev->min_linkrate = port->linkrate; dev->max_linkrate = port->linkrate; dev->pathways = port->num_phys; memset(port->disc.fanout_sas_addr, 0, SAS_ADDR_SIZE); memset(port->disc.eeds_a, 0, SAS_ADDR_SIZE); memset(port->disc.eeds_b, 0, SAS_ADDR_SIZE); port->disc.max_level = 0; sas_device_set_phy(dev, port->port); dev->rphy = rphy; get_device(&dev->rphy->dev); if (dev_is_sata(dev) || dev->dev_type == SAS_END_DEV) list_add_tail(&dev->disco_list_node, &port->disco_list); else { spin_lock_irq(&port->dev_list_lock); list_add_tail(&dev->dev_list_node, &port->dev_list); spin_unlock_irq(&port->dev_list_lock); } spin_lock_irq(&port->phy_list_lock); list_for_each_entry(phy, &port->phy_list, port_phy_el) sas_phy_set_target(phy, dev); spin_unlock_irq(&port->phy_list_lock); return 0; } /* ---------- Discover and Revalidate ---------- */ int sas_notify_lldd_dev_found(struct domain_device *dev) { int res = 0; struct sas_ha_struct *sas_ha = dev->port->ha; struct Scsi_Host *shost = sas_ha->core.shost; struct sas_internal *i = to_sas_internal(shost->transportt); if (i->dft->lldd_dev_found) { res = i->dft->lldd_dev_found(dev); if (res) { printk("sas: driver on pcidev %s cannot handle " "device %llx, error:%d\n", dev_name(sas_ha->dev), SAS_ADDR(dev->sas_addr), res); } kref_get(&dev->kref); } return res; } void sas_notify_lldd_dev_gone(struct domain_device *dev) { struct sas_ha_struct *sas_ha = dev->port->ha; struct Scsi_Host *shost = sas_ha->core.shost; struct sas_internal *i = to_sas_internal(shost->transportt); if (i->dft->lldd_dev_gone) { i->dft->lldd_dev_gone(dev); sas_put_device(dev); } } static void sas_probe_devices(struct work_struct *work) { struct domain_device *dev, *n; struct sas_discovery_event *ev = to_sas_discovery_event(work); struct asd_sas_port *port = ev->port; clear_bit(DISCE_PROBE, &port->disc.pending); /* devices must be domain members before link recovery and probe */ list_for_each_entry(dev, &port->disco_list, disco_list_node) { spin_lock_irq(&port->dev_list_lock); list_add_tail(&dev->dev_list_node, &port->dev_list); spin_unlock_irq(&port->dev_list_lock); } sas_probe_sata(port); list_for_each_entry_safe(dev, n, &port->disco_list, disco_list_node) { int err; err = sas_rphy_add(dev->rphy); if (err) sas_fail_probe(dev, __func__, err); else list_del_init(&dev->disco_list_node); } } /** * sas_discover_end_dev -- discover an end device (SSP, etc) * @end: pointer to domain device of interest * * See comment in sas_discover_sata(). */ int sas_discover_end_dev(struct domain_device *dev) { int res; res = sas_notify_lldd_dev_found(dev); if (res) return res; sas_discover_event(dev->port, DISCE_PROBE); return 0; } /* ---------- Device registration and unregistration ---------- */ void sas_free_device(struct kref *kref) { struct domain_device *dev = container_of(kref, typeof(*dev), kref); put_device(&dev->rphy->dev); dev->rphy = NULL; if (dev->parent) sas_put_device(dev->parent); sas_port_put_phy(dev->phy); dev->phy = NULL; /* remove the phys and ports, everything else should be gone */ if (dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV) kfree(dev->ex_dev.ex_phy); if (dev_is_sata(dev) && dev->sata_dev.ap) { ata_sas_port_destroy(dev->sata_dev.ap); dev->sata_dev.ap = NULL; } kfree(dev); } static void sas_unregister_common_dev(struct asd_sas_port *port, struct domain_device *dev) { sas_notify_lldd_dev_gone(dev); if (!dev->parent) dev->port->port_dev = NULL; else list_del_init(&dev->siblings); spin_lock_irq(&port->dev_list_lock); list_del_init(&dev->dev_list_node); spin_unlock_irq(&port->dev_list_lock); sas_put_device(dev); } static void sas_destruct_devices(struct work_struct *work) { struct domain_device *dev, *n; struct sas_discovery_event *ev = to_sas_discovery_event(work); struct asd_sas_port *port = ev->port; clear_bit(DISCE_DESTRUCT, &port->disc.pending); list_for_each_entry_safe(dev, n, &port->destroy_list, disco_list_node) { list_del_init(&dev->disco_list_node); sas_remove_children(&dev->rphy->dev); sas_rphy_delete(dev->rphy); sas_unregister_common_dev(port, dev); } } void sas_unregister_dev(struct asd_sas_port *port, struct domain_device *dev) { if (!test_bit(SAS_DEV_DESTROY, &dev->state) && !list_empty(&dev->disco_list_node)) { /* this rphy never saw sas_rphy_add */ list_del_init(&dev->disco_list_node); sas_rphy_free(dev->rphy); sas_unregister_common_dev(port, dev); return; } if (!test_and_set_bit(SAS_DEV_DESTROY, &dev->state)) { sas_rphy_unlink(dev->rphy); list_move_tail(&dev->disco_list_node, &port->destroy_list); sas_discover_event(dev->port, DISCE_DESTRUCT); } } void sas_unregister_domain_devices(struct asd_sas_port *port, int gone) { struct domain_device *dev, *n; list_for_each_entry_safe_reverse(dev, n, &port->dev_list, dev_list_node) { if (gone) set_bit(SAS_DEV_GONE, &dev->state); sas_unregister_dev(port, dev); } list_for_each_entry_safe(dev, n, &port->disco_list, disco_list_node) sas_unregister_dev(port, dev); port->port->rphy = NULL; } void sas_device_set_phy(struct domain_device *dev, struct sas_port *port) { struct sas_ha_struct *ha; struct sas_phy *new_phy; if (!dev) return; ha = dev->port->ha; new_phy = sas_port_get_phy(port); /* pin and record last seen phy */ spin_lock_irq(&ha->phy_port_lock); if (new_phy) { sas_port_put_phy(dev->phy); dev->phy = new_phy; } spin_unlock_irq(&ha->phy_port_lock); } /* ---------- Discovery and Revalidation ---------- */ /** * sas_discover_domain -- discover the domain * @port: port to the domain of interest * * NOTE: this process _must_ quit (return) as soon as any connection * errors are encountered. Connection recovery is done elsewhere. * Discover process only interrogates devices in order to discover the * domain. */ static void sas_discover_domain(struct work_struct *work) { struct domain_device *dev; int error = 0; struct sas_discovery_event *ev = to_sas_discovery_event(work); struct asd_sas_port *port = ev->port; clear_bit(DISCE_DISCOVER_DOMAIN, &port->disc.pending); if (port->port_dev) return; error = sas_get_port_device(port); if (error) return; dev = port->port_dev; SAS_DPRINTK("DOING DISCOVERY on port %d, pid:%d\n", port->id, task_pid_nr(current)); switch (dev->dev_type) { case SAS_END_DEV: error = sas_discover_end_dev(dev); break; case EDGE_DEV: case FANOUT_DEV: error = sas_discover_root_expander(dev); break; case SATA_DEV: case SATA_PM: #ifdef CONFIG_SCSI_SAS_ATA error = sas_discover_sata(dev); break; #else SAS_DPRINTK("ATA device seen but CONFIG_SCSI_SAS_ATA=N so cannot attach\n"); /* Fall through */ #endif default: error = -ENXIO; SAS_DPRINTK("unhandled device %d\n", dev->dev_type); break; } if (error) { sas_rphy_free(dev->rphy); list_del_init(&dev->disco_list_node); spin_lock_irq(&port->dev_list_lock); list_del_init(&dev->dev_list_node); spin_unlock_irq(&port->dev_list_lock); sas_put_device(dev); port->port_dev = NULL; } SAS_DPRINTK("DONE DISCOVERY on port %d, pid:%d, result:%d\n", port->id, task_pid_nr(current), error); } static void sas_revalidate_domain(struct work_struct *work) { int res = 0; struct sas_discovery_event *ev = to_sas_discovery_event(work); struct asd_sas_port *port = ev->port; struct sas_ha_struct *ha = port->ha; /* prevent revalidation from finding sata links in recovery */ mutex_lock(&ha->disco_mutex); if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) { SAS_DPRINTK("REVALIDATION DEFERRED on port %d, pid:%d\n", port->id, task_pid_nr(current)); goto out; } clear_bit(DISCE_REVALIDATE_DOMAIN, &port->disc.pending); SAS_DPRINTK("REVALIDATING DOMAIN on port %d, pid:%d\n", port->id, task_pid_nr(current)); if (port->port_dev) res = sas_ex_revalidate_domain(port->port_dev); SAS_DPRINTK("done REVALIDATING DOMAIN on port %d, pid:%d, res 0x%x\n", port->id, task_pid_nr(current), res); out: mutex_unlock(&ha->disco_mutex); } /* ---------- Events ---------- */ static void sas_chain_work(struct sas_ha_struct *ha, struct sas_work *sw) { /* chained work is not subject to SA_HA_DRAINING or * SAS_HA_REGISTERED, because it is either submitted in the * workqueue, or known to be submitted from a context that is * not racing against draining */ scsi_queue_work(ha->core.shost, &sw->work); } static void sas_chain_event(int event, unsigned long *pending, struct sas_work *sw, struct sas_ha_struct *ha) { if (!test_and_set_bit(event, pending)) { unsigned long flags; spin_lock_irqsave(&ha->state_lock, flags); sas_chain_work(ha, sw); spin_unlock_irqrestore(&ha->state_lock, flags); } } int sas_discover_event(struct asd_sas_port *port, enum discover_event ev) { struct sas_discovery *disc; if (!port) return 0; disc = &port->disc; BUG_ON(ev >= DISC_NUM_EVENTS); sas_chain_event(ev, &disc->pending, &disc->disc_work[ev].work, port->ha); return 0; } /** * sas_init_disc -- initialize the discovery struct in the port * @port: pointer to struct port * * Called when the ports are being initialized. */ void sas_init_disc(struct sas_discovery *disc, struct asd_sas_port *port) { int i; static const work_func_t sas_event_fns[DISC_NUM_EVENTS] = { [DISCE_DISCOVER_DOMAIN] = sas_discover_domain, [DISCE_REVALIDATE_DOMAIN] = sas_revalidate_domain, [DISCE_PROBE] = sas_probe_devices, [DISCE_DESTRUCT] = sas_destruct_devices, }; disc->pending = 0; for (i = 0; i < DISC_NUM_EVENTS; i++) { INIT_SAS_WORK(&disc->disc_work[i].work, sas_event_fns[i]); disc->disc_work[i].port = port; } }
gpl-2.0
SynnyG/android_kernel_xiaomi_cancro
drivers/staging/iio/dac/max517.c
4875
7507
/* * max517.c - Support for Maxim MAX517, MAX518 and MAX519 * * Copyright (C) 2010, 2011 Roland Stigge <stigge@antcom.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/jiffies.h> #include <linux/i2c.h> #include <linux/err.h> #include "../iio.h" #include "../sysfs.h" #include "dac.h" #include "max517.h" #define MAX517_DRV_NAME "max517" /* Commands */ #define COMMAND_CHANNEL0 0x00 #define COMMAND_CHANNEL1 0x01 /* for MAX518 and MAX519 */ #define COMMAND_PD 0x08 /* Power Down */ enum max517_device_ids { ID_MAX517, ID_MAX518, ID_MAX519, }; struct max517_data { struct iio_dev *indio_dev; struct i2c_client *client; unsigned short vref_mv[2]; }; /* * channel: bit 0: channel 1 * bit 1: channel 2 * (this way, it's possible to set both channels at once) */ static ssize_t max517_set_value(struct device *dev, struct device_attribute *attr, const char *buf, size_t count, int channel) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct max517_data *data = iio_priv(indio_dev); struct i2c_client *client = data->client; u8 outbuf[4]; /* 1x or 2x command + value */ int outbuf_size = 0; int res; long val; res = strict_strtol(buf, 10, &val); if (res) return res; if (val < 0 || val > 255) return -EINVAL; if (channel & 1) { outbuf[outbuf_size++] = COMMAND_CHANNEL0; outbuf[outbuf_size++] = val; } if (channel & 2) { outbuf[outbuf_size++] = COMMAND_CHANNEL1; outbuf[outbuf_size++] = val; } /* * At this point, there are always 1 or 2 two-byte commands in * outbuf. With 2 commands, the device can set two outputs * simultaneously, latching the values upon the end of the I2C * transfer. */ res = i2c_master_send(client, outbuf, outbuf_size); if (res < 0) return res; return count; } static ssize_t max517_set_value_1(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { return max517_set_value(dev, attr, buf, count, 1); } static IIO_DEV_ATTR_OUT_RAW(1, max517_set_value_1, 0); static ssize_t max517_set_value_2(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { return max517_set_value(dev, attr, buf, count, 2); } static IIO_DEV_ATTR_OUT_RAW(2, max517_set_value_2, 1); static ssize_t max517_set_value_both(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { return max517_set_value(dev, attr, buf, count, 3); } static IIO_DEVICE_ATTR_NAMED(out_voltage1and2_raw, out_voltage1&2_raw, S_IWUSR, NULL, max517_set_value_both, -1); static ssize_t max517_show_scale(struct device *dev, struct device_attribute *attr, char *buf, int channel) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct max517_data *data = iio_priv(indio_dev); /* Corresponds to Vref / 2^(bits) */ unsigned int scale_uv = (data->vref_mv[channel - 1] * 1000) >> 8; return sprintf(buf, "%d.%03d\n", scale_uv / 1000, scale_uv % 1000); } static ssize_t max517_show_scale1(struct device *dev, struct device_attribute *attr, char *buf) { return max517_show_scale(dev, attr, buf, 1); } static IIO_DEVICE_ATTR(out_voltage1_scale, S_IRUGO, max517_show_scale1, NULL, 0); static ssize_t max517_show_scale2(struct device *dev, struct device_attribute *attr, char *buf) { return max517_show_scale(dev, attr, buf, 2); } static IIO_DEVICE_ATTR(out_voltage2_scale, S_IRUGO, max517_show_scale2, NULL, 0); /* On MAX517 variant, we have one output */ static struct attribute *max517_attributes[] = { &iio_dev_attr_out_voltage1_raw.dev_attr.attr, &iio_dev_attr_out_voltage1_scale.dev_attr.attr, NULL }; static struct attribute_group max517_attribute_group = { .attrs = max517_attributes, }; /* On MAX518 and MAX519 variant, we have two outputs */ static struct attribute *max518_attributes[] = { &iio_dev_attr_out_voltage1_raw.dev_attr.attr, &iio_dev_attr_out_voltage1_scale.dev_attr.attr, &iio_dev_attr_out_voltage2_raw.dev_attr.attr, &iio_dev_attr_out_voltage2_scale.dev_attr.attr, &iio_dev_attr_out_voltage1and2_raw.dev_attr.attr, NULL }; static struct attribute_group max518_attribute_group = { .attrs = max518_attributes, }; #ifdef CONFIG_PM_SLEEP static int max517_suspend(struct device *dev) { u8 outbuf = COMMAND_PD; return i2c_master_send(to_i2c_client(dev), &outbuf, 1); } static int max517_resume(struct device *dev) { u8 outbuf = 0; return i2c_master_send(to_i2c_client(dev), &outbuf, 1); } static SIMPLE_DEV_PM_OPS(max517_pm_ops, max517_suspend, max517_resume); #define MAX517_PM_OPS (&max517_pm_ops) #else #define MAX517_PM_OPS NULL #endif static const struct iio_info max517_info = { .attrs = &max517_attribute_group, .driver_module = THIS_MODULE, }; static const struct iio_info max518_info = { .attrs = &max518_attribute_group, .driver_module = THIS_MODULE, }; static int max517_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct max517_data *data; struct iio_dev *indio_dev; struct max517_platform_data *platform_data = client->dev.platform_data; int err; indio_dev = iio_allocate_device(sizeof(*data)); if (indio_dev == NULL) { err = -ENOMEM; goto exit; } data = iio_priv(indio_dev); i2c_set_clientdata(client, indio_dev); data->client = client; /* establish that the iio_dev is a child of the i2c device */ indio_dev->dev.parent = &client->dev; /* reduced attribute set for MAX517 */ if (id->driver_data == ID_MAX517) indio_dev->info = &max517_info; else indio_dev->info = &max518_info; indio_dev->modes = INDIO_DIRECT_MODE; /* * Reference voltage on MAX518 and default is 5V, else take vref_mv * from platform_data */ if (id->driver_data == ID_MAX518 || !platform_data) { data->vref_mv[0] = data->vref_mv[1] = 5000; /* mV */ } else { data->vref_mv[0] = platform_data->vref_mv[0]; data->vref_mv[1] = platform_data->vref_mv[1]; } err = iio_device_register(indio_dev); if (err) goto exit_free_device; dev_info(&client->dev, "DAC registered\n"); return 0; exit_free_device: iio_free_device(indio_dev); exit: return err; } static int max517_remove(struct i2c_client *client) { iio_free_device(i2c_get_clientdata(client)); return 0; } static const struct i2c_device_id max517_id[] = { { "max517", ID_MAX517 }, { "max518", ID_MAX518 }, { "max519", ID_MAX519 }, { } }; MODULE_DEVICE_TABLE(i2c, max517_id); static struct i2c_driver max517_driver = { .driver = { .name = MAX517_DRV_NAME, .pm = MAX517_PM_OPS, }, .probe = max517_probe, .remove = max517_remove, .id_table = max517_id, }; module_i2c_driver(max517_driver); MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>"); MODULE_DESCRIPTION("MAX517/MAX518/MAX519 8-bit DAC"); MODULE_LICENSE("GPL");
gpl-2.0
S3neos/android_kernel_samsung_s3ve3g
arch/x86/crypto/crc32c-intel.c
4875
5156
/* * Using hardware provided CRC32 instruction to accelerate the CRC32 disposal. * CRC32C polynomial:0x1EDC6F41(BE)/0x82F63B78(LE) * CRC32 is a new instruction in Intel SSE4.2, the reference can be found at: * http://www.intel.com/products/processor/manuals/ * Intel(R) 64 and IA-32 Architectures Software Developer's Manual * Volume 2A: Instruction Set Reference, A-M * * Copyright (C) 2008 Intel Corporation * Authors: Austin Zhang <austin_zhang@linux.intel.com> * Kent Liu <kent.liu@intel.com> * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * */ #include <linux/init.h> #include <linux/module.h> #include <linux/string.h> #include <linux/kernel.h> #include <crypto/internal/hash.h> #include <asm/cpufeature.h> #include <asm/cpu_device_id.h> #define CHKSUM_BLOCK_SIZE 1 #define CHKSUM_DIGEST_SIZE 4 #define SCALE_F sizeof(unsigned long) #ifdef CONFIG_X86_64 #define REX_PRE "0x48, " #else #define REX_PRE #endif static u32 crc32c_intel_le_hw_byte(u32 crc, unsigned char const *data, size_t length) { while (length--) { __asm__ __volatile__( ".byte 0xf2, 0xf, 0x38, 0xf0, 0xf1" :"=S"(crc) :"0"(crc), "c"(*data) ); data++; } return crc; } static u32 __pure crc32c_intel_le_hw(u32 crc, unsigned char const *p, size_t len) { unsigned int iquotient = len / SCALE_F; unsigned int iremainder = len % SCALE_F; unsigned long *ptmp = (unsigned long *)p; while (iquotient--) { __asm__ __volatile__( ".byte 0xf2, " REX_PRE "0xf, 0x38, 0xf1, 0xf1;" :"=S"(crc) :"0"(crc), "c"(*ptmp) ); ptmp++; } if (iremainder) crc = crc32c_intel_le_hw_byte(crc, (unsigned char *)ptmp, iremainder); return crc; } /* * Setting the seed allows arbitrary accumulators and flexible XOR policy * If your algorithm starts with ~0, then XOR with ~0 before you set * the seed. */ static int crc32c_intel_setkey(struct crypto_shash *hash, const u8 *key, unsigned int keylen) { u32 *mctx = crypto_shash_ctx(hash); if (keylen != sizeof(u32)) { crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } *mctx = le32_to_cpup((__le32 *)key); return 0; } static int crc32c_intel_init(struct shash_desc *desc) { u32 *mctx = crypto_shash_ctx(desc->tfm); u32 *crcp = shash_desc_ctx(desc); *crcp = *mctx; return 0; } static int crc32c_intel_update(struct shash_desc *desc, const u8 *data, unsigned int len) { u32 *crcp = shash_desc_ctx(desc); *crcp = crc32c_intel_le_hw(*crcp, data, len); return 0; } static int __crc32c_intel_finup(u32 *crcp, const u8 *data, unsigned int len, u8 *out) { *(__le32 *)out = ~cpu_to_le32(crc32c_intel_le_hw(*crcp, data, len)); return 0; } static int crc32c_intel_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { return __crc32c_intel_finup(shash_desc_ctx(desc), data, len, out); } static int crc32c_intel_final(struct shash_desc *desc, u8 *out) { u32 *crcp = shash_desc_ctx(desc); *(__le32 *)out = ~cpu_to_le32p(crcp); return 0; } static int crc32c_intel_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { return __crc32c_intel_finup(crypto_shash_ctx(desc->tfm), data, len, out); } static int crc32c_intel_cra_init(struct crypto_tfm *tfm) { u32 *key = crypto_tfm_ctx(tfm); *key = ~0; return 0; } static struct shash_alg alg = { .setkey = crc32c_intel_setkey, .init = crc32c_intel_init, .update = crc32c_intel_update, .final = crc32c_intel_final, .finup = crc32c_intel_finup, .digest = crc32c_intel_digest, .descsize = sizeof(u32), .digestsize = CHKSUM_DIGEST_SIZE, .base = { .cra_name = "crc32c", .cra_driver_name = "crc32c-intel", .cra_priority = 200, .cra_blocksize = CHKSUM_BLOCK_SIZE, .cra_ctxsize = sizeof(u32), .cra_module = THIS_MODULE, .cra_init = crc32c_intel_cra_init, } }; static const struct x86_cpu_id crc32c_cpu_id[] = { X86_FEATURE_MATCH(X86_FEATURE_XMM4_2), {} }; MODULE_DEVICE_TABLE(x86cpu, crc32c_cpu_id); static int __init crc32c_intel_mod_init(void) { if (!x86_match_cpu(crc32c_cpu_id)) return -ENODEV; return crypto_register_shash(&alg); } static void __exit crc32c_intel_mod_fini(void) { crypto_unregister_shash(&alg); } module_init(crc32c_intel_mod_init); module_exit(crc32c_intel_mod_fini); MODULE_AUTHOR("Austin Zhang <austin.zhang@intel.com>, Kent Liu <kent.liu@intel.com>"); MODULE_DESCRIPTION("CRC32c (Castagnoli) optimization using Intel Hardware."); MODULE_LICENSE("GPL"); MODULE_ALIAS("crc32c"); MODULE_ALIAS("crc32c-intel");
gpl-2.0
mustermaxmueller/android_kernel_sony_msm8974_togari_5.x
sound/ppc/pmac.c
5643
38204
/* * PMac DBDMA lowlevel functions * * Copyright (c) by Takashi Iwai <tiwai@suse.de> * code based on dmasound.c. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <asm/io.h> #include <asm/irq.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <sound/core.h> #include "pmac.h" #include <sound/pcm_params.h> #include <asm/pmac_feature.h> #include <asm/pci-bridge.h> /* fixed frequency table for awacs, screamer, burgundy, DACA (44100 max) */ static int awacs_freqs[8] = { 44100, 29400, 22050, 17640, 14700, 11025, 8820, 7350 }; /* fixed frequency table for tumbler */ static int tumbler_freqs[1] = { 44100 }; /* * we will allocate a single 'emergency' dbdma cmd block to use if the * tx status comes up "DEAD". This happens on some PowerComputing Pmac * clones, either owing to a bug in dbdma or some interaction between * IDE and sound. However, this measure would deal with DEAD status if * it appeared elsewhere. */ static struct pmac_dbdma emergency_dbdma; static int emergency_in_use; /* * allocate DBDMA command arrays */ static int snd_pmac_dbdma_alloc(struct snd_pmac *chip, struct pmac_dbdma *rec, int size) { unsigned int rsize = sizeof(struct dbdma_cmd) * (size + 1); rec->space = dma_alloc_coherent(&chip->pdev->dev, rsize, &rec->dma_base, GFP_KERNEL); if (rec->space == NULL) return -ENOMEM; rec->size = size; memset(rec->space, 0, rsize); rec->cmds = (void __iomem *)DBDMA_ALIGN(rec->space); rec->addr = rec->dma_base + (unsigned long)((char *)rec->cmds - (char *)rec->space); return 0; } static void snd_pmac_dbdma_free(struct snd_pmac *chip, struct pmac_dbdma *rec) { if (rec->space) { unsigned int rsize = sizeof(struct dbdma_cmd) * (rec->size + 1); dma_free_coherent(&chip->pdev->dev, rsize, rec->space, rec->dma_base); } } /* * pcm stuff */ /* * look up frequency table */ unsigned int snd_pmac_rate_index(struct snd_pmac *chip, struct pmac_stream *rec, unsigned int rate) { int i, ok, found; ok = rec->cur_freqs; if (rate > chip->freq_table[0]) return 0; found = 0; for (i = 0; i < chip->num_freqs; i++, ok >>= 1) { if (! (ok & 1)) continue; found = i; if (rate >= chip->freq_table[i]) break; } return found; } /* * check whether another stream is active */ static inline int another_stream(int stream) { return (stream == SNDRV_PCM_STREAM_PLAYBACK) ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK; } /* * allocate buffers */ static int snd_pmac_pcm_hw_params(struct snd_pcm_substream *subs, struct snd_pcm_hw_params *hw_params) { return snd_pcm_lib_malloc_pages(subs, params_buffer_bytes(hw_params)); } /* * release buffers */ static int snd_pmac_pcm_hw_free(struct snd_pcm_substream *subs) { snd_pcm_lib_free_pages(subs); return 0; } /* * get a stream of the opposite direction */ static struct pmac_stream *snd_pmac_get_stream(struct snd_pmac *chip, int stream) { switch (stream) { case SNDRV_PCM_STREAM_PLAYBACK: return &chip->playback; case SNDRV_PCM_STREAM_CAPTURE: return &chip->capture; default: snd_BUG(); return NULL; } } /* * wait while run status is on */ static inline void snd_pmac_wait_ack(struct pmac_stream *rec) { int timeout = 50000; while ((in_le32(&rec->dma->status) & RUN) && timeout-- > 0) udelay(1); } /* * set the format and rate to the chip. * call the lowlevel function if defined (e.g. for AWACS). */ static void snd_pmac_pcm_set_format(struct snd_pmac *chip) { /* set up frequency and format */ out_le32(&chip->awacs->control, chip->control_mask | (chip->rate_index << 8)); out_le32(&chip->awacs->byteswap, chip->format == SNDRV_PCM_FORMAT_S16_LE ? 1 : 0); if (chip->set_format) chip->set_format(chip); } /* * stop the DMA transfer */ static inline void snd_pmac_dma_stop(struct pmac_stream *rec) { out_le32(&rec->dma->control, (RUN|WAKE|FLUSH|PAUSE) << 16); snd_pmac_wait_ack(rec); } /* * set the command pointer address */ static inline void snd_pmac_dma_set_command(struct pmac_stream *rec, struct pmac_dbdma *cmd) { out_le32(&rec->dma->cmdptr, cmd->addr); } /* * start the DMA */ static inline void snd_pmac_dma_run(struct pmac_stream *rec, int status) { out_le32(&rec->dma->control, status | (status << 16)); } /* * prepare playback/capture stream */ static int snd_pmac_pcm_prepare(struct snd_pmac *chip, struct pmac_stream *rec, struct snd_pcm_substream *subs) { int i; volatile struct dbdma_cmd __iomem *cp; struct snd_pcm_runtime *runtime = subs->runtime; int rate_index; long offset; struct pmac_stream *astr; rec->dma_size = snd_pcm_lib_buffer_bytes(subs); rec->period_size = snd_pcm_lib_period_bytes(subs); rec->nperiods = rec->dma_size / rec->period_size; rec->cur_period = 0; rate_index = snd_pmac_rate_index(chip, rec, runtime->rate); /* set up constraints */ astr = snd_pmac_get_stream(chip, another_stream(rec->stream)); if (! astr) return -EINVAL; astr->cur_freqs = 1 << rate_index; astr->cur_formats = 1 << runtime->format; chip->rate_index = rate_index; chip->format = runtime->format; /* We really want to execute a DMA stop command, after the AWACS * is initialized. * For reasons I don't understand, it stops the hissing noise * common to many PowerBook G3 systems and random noise otherwise * captured on iBook2's about every third time. -ReneR */ spin_lock_irq(&chip->reg_lock); snd_pmac_dma_stop(rec); st_le16(&chip->extra_dma.cmds->command, DBDMA_STOP); snd_pmac_dma_set_command(rec, &chip->extra_dma); snd_pmac_dma_run(rec, RUN); spin_unlock_irq(&chip->reg_lock); mdelay(5); spin_lock_irq(&chip->reg_lock); /* continuous DMA memory type doesn't provide the physical address, * so we need to resolve the address here... */ offset = runtime->dma_addr; for (i = 0, cp = rec->cmd.cmds; i < rec->nperiods; i++, cp++) { st_le32(&cp->phy_addr, offset); st_le16(&cp->req_count, rec->period_size); /*st_le16(&cp->res_count, 0);*/ st_le16(&cp->xfer_status, 0); offset += rec->period_size; } /* make loop */ st_le16(&cp->command, DBDMA_NOP + BR_ALWAYS); st_le32(&cp->cmd_dep, rec->cmd.addr); snd_pmac_dma_stop(rec); snd_pmac_dma_set_command(rec, &rec->cmd); spin_unlock_irq(&chip->reg_lock); return 0; } /* * PCM trigger/stop */ static int snd_pmac_pcm_trigger(struct snd_pmac *chip, struct pmac_stream *rec, struct snd_pcm_substream *subs, int cmd) { volatile struct dbdma_cmd __iomem *cp; int i, command; switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: if (rec->running) return -EBUSY; command = (subs->stream == SNDRV_PCM_STREAM_PLAYBACK ? OUTPUT_MORE : INPUT_MORE) + INTR_ALWAYS; spin_lock(&chip->reg_lock); snd_pmac_beep_stop(chip); snd_pmac_pcm_set_format(chip); for (i = 0, cp = rec->cmd.cmds; i < rec->nperiods; i++, cp++) out_le16(&cp->command, command); snd_pmac_dma_set_command(rec, &rec->cmd); (void)in_le32(&rec->dma->status); snd_pmac_dma_run(rec, RUN|WAKE); rec->running = 1; spin_unlock(&chip->reg_lock); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: spin_lock(&chip->reg_lock); rec->running = 0; /*printk(KERN_DEBUG "stopped!!\n");*/ snd_pmac_dma_stop(rec); for (i = 0, cp = rec->cmd.cmds; i < rec->nperiods; i++, cp++) out_le16(&cp->command, DBDMA_STOP); spin_unlock(&chip->reg_lock); break; default: return -EINVAL; } return 0; } /* * return the current pointer */ inline static snd_pcm_uframes_t snd_pmac_pcm_pointer(struct snd_pmac *chip, struct pmac_stream *rec, struct snd_pcm_substream *subs) { int count = 0; #if 1 /* hmm.. how can we get the current dma pointer?? */ int stat; volatile struct dbdma_cmd __iomem *cp = &rec->cmd.cmds[rec->cur_period]; stat = ld_le16(&cp->xfer_status); if (stat & (ACTIVE|DEAD)) { count = in_le16(&cp->res_count); if (count) count = rec->period_size - count; } #endif count += rec->cur_period * rec->period_size; /*printk(KERN_DEBUG "pointer=%d\n", count);*/ return bytes_to_frames(subs->runtime, count); } /* * playback */ static int snd_pmac_playback_prepare(struct snd_pcm_substream *subs) { struct snd_pmac *chip = snd_pcm_substream_chip(subs); return snd_pmac_pcm_prepare(chip, &chip->playback, subs); } static int snd_pmac_playback_trigger(struct snd_pcm_substream *subs, int cmd) { struct snd_pmac *chip = snd_pcm_substream_chip(subs); return snd_pmac_pcm_trigger(chip, &chip->playback, subs, cmd); } static snd_pcm_uframes_t snd_pmac_playback_pointer(struct snd_pcm_substream *subs) { struct snd_pmac *chip = snd_pcm_substream_chip(subs); return snd_pmac_pcm_pointer(chip, &chip->playback, subs); } /* * capture */ static int snd_pmac_capture_prepare(struct snd_pcm_substream *subs) { struct snd_pmac *chip = snd_pcm_substream_chip(subs); return snd_pmac_pcm_prepare(chip, &chip->capture, subs); } static int snd_pmac_capture_trigger(struct snd_pcm_substream *subs, int cmd) { struct snd_pmac *chip = snd_pcm_substream_chip(subs); return snd_pmac_pcm_trigger(chip, &chip->capture, subs, cmd); } static snd_pcm_uframes_t snd_pmac_capture_pointer(struct snd_pcm_substream *subs) { struct snd_pmac *chip = snd_pcm_substream_chip(subs); return snd_pmac_pcm_pointer(chip, &chip->capture, subs); } /* * Handle DEAD DMA transfers: * if the TX status comes up "DEAD" - reported on some Power Computing machines * we need to re-start the dbdma - but from a different physical start address * and with a different transfer length. It would get very messy to do this * with the normal dbdma_cmd blocks - we would have to re-write the buffer start * addresses each time. So, we will keep a single dbdma_cmd block which can be * fiddled with. * When DEAD status is first reported the content of the faulted dbdma block is * copied into the emergency buffer and we note that the buffer is in use. * we then bump the start physical address by the amount that was successfully * output before it died. * On any subsequent DEAD result we just do the bump-ups (we know that we are * already using the emergency dbdma_cmd). * CHECK: this just tries to "do it". It is possible that we should abandon * xfers when the number of residual bytes gets below a certain value - I can * see that this might cause a loop-forever if a too small transfer causes * DEAD status. However this is a TODO for now - we'll see what gets reported. * When we get a successful transfer result with the emergency buffer we just * pretend that it completed using the original dmdma_cmd and carry on. The * 'next_cmd' field will already point back to the original loop of blocks. */ static inline void snd_pmac_pcm_dead_xfer(struct pmac_stream *rec, volatile struct dbdma_cmd __iomem *cp) { unsigned short req, res ; unsigned int phy ; /* printk(KERN_WARNING "snd-powermac: DMA died - patching it up!\n"); */ /* to clear DEAD status we must first clear RUN set it to quiescent to be on the safe side */ (void)in_le32(&rec->dma->status); out_le32(&rec->dma->control, (RUN|PAUSE|FLUSH|WAKE) << 16); if (!emergency_in_use) { /* new problem */ memcpy((void *)emergency_dbdma.cmds, (void *)cp, sizeof(struct dbdma_cmd)); emergency_in_use = 1; st_le16(&cp->xfer_status, 0); st_le16(&cp->req_count, rec->period_size); cp = emergency_dbdma.cmds; } /* now bump the values to reflect the amount we haven't yet shifted */ req = ld_le16(&cp->req_count); res = ld_le16(&cp->res_count); phy = ld_le32(&cp->phy_addr); phy += (req - res); st_le16(&cp->req_count, res); st_le16(&cp->res_count, 0); st_le16(&cp->xfer_status, 0); st_le32(&cp->phy_addr, phy); st_le32(&cp->cmd_dep, rec->cmd.addr + sizeof(struct dbdma_cmd)*((rec->cur_period+1)%rec->nperiods)); st_le16(&cp->command, OUTPUT_MORE | BR_ALWAYS | INTR_ALWAYS); /* point at our patched up command block */ out_le32(&rec->dma->cmdptr, emergency_dbdma.addr); /* we must re-start the controller */ (void)in_le32(&rec->dma->status); /* should complete clearing the DEAD status */ out_le32(&rec->dma->control, ((RUN|WAKE) << 16) + (RUN|WAKE)); } /* * update playback/capture pointer from interrupts */ static void snd_pmac_pcm_update(struct snd_pmac *chip, struct pmac_stream *rec) { volatile struct dbdma_cmd __iomem *cp; int c; int stat; spin_lock(&chip->reg_lock); if (rec->running) { for (c = 0; c < rec->nperiods; c++) { /* at most all fragments */ if (emergency_in_use) /* already using DEAD xfer? */ cp = emergency_dbdma.cmds; else cp = &rec->cmd.cmds[rec->cur_period]; stat = ld_le16(&cp->xfer_status); if (stat & DEAD) { snd_pmac_pcm_dead_xfer(rec, cp); break; /* this block is still going */ } if (emergency_in_use) emergency_in_use = 0 ; /* done that */ if (! (stat & ACTIVE)) break; /*printk(KERN_DEBUG "update frag %d\n", rec->cur_period);*/ st_le16(&cp->xfer_status, 0); st_le16(&cp->req_count, rec->period_size); /*st_le16(&cp->res_count, 0);*/ rec->cur_period++; if (rec->cur_period >= rec->nperiods) { rec->cur_period = 0; } spin_unlock(&chip->reg_lock); snd_pcm_period_elapsed(rec->substream); spin_lock(&chip->reg_lock); } } spin_unlock(&chip->reg_lock); } /* * hw info */ static struct snd_pcm_hardware snd_pmac_playback = { .info = (SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_RESUME), .formats = SNDRV_PCM_FMTBIT_S16_BE | SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_8000_44100, .rate_min = 7350, .rate_max = 44100, .channels_min = 2, .channels_max = 2, .buffer_bytes_max = 131072, .period_bytes_min = 256, .period_bytes_max = 16384, .periods_min = 3, .periods_max = PMAC_MAX_FRAGS, }; static struct snd_pcm_hardware snd_pmac_capture = { .info = (SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_RESUME), .formats = SNDRV_PCM_FMTBIT_S16_BE | SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_8000_44100, .rate_min = 7350, .rate_max = 44100, .channels_min = 2, .channels_max = 2, .buffer_bytes_max = 131072, .period_bytes_min = 256, .period_bytes_max = 16384, .periods_min = 3, .periods_max = PMAC_MAX_FRAGS, }; #if 0 // NYI static int snd_pmac_hw_rule_rate(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { struct snd_pmac *chip = rule->private; struct pmac_stream *rec = snd_pmac_get_stream(chip, rule->deps[0]); int i, freq_table[8], num_freqs; if (! rec) return -EINVAL; num_freqs = 0; for (i = chip->num_freqs - 1; i >= 0; i--) { if (rec->cur_freqs & (1 << i)) freq_table[num_freqs++] = chip->freq_table[i]; } return snd_interval_list(hw_param_interval(params, rule->var), num_freqs, freq_table, 0); } static int snd_pmac_hw_rule_format(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { struct snd_pmac *chip = rule->private; struct pmac_stream *rec = snd_pmac_get_stream(chip, rule->deps[0]); if (! rec) return -EINVAL; return snd_mask_refine_set(hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT), rec->cur_formats); } #endif // NYI static int snd_pmac_pcm_open(struct snd_pmac *chip, struct pmac_stream *rec, struct snd_pcm_substream *subs) { struct snd_pcm_runtime *runtime = subs->runtime; int i; /* look up frequency table and fill bit mask */ runtime->hw.rates = 0; for (i = 0; i < chip->num_freqs; i++) if (chip->freqs_ok & (1 << i)) runtime->hw.rates |= snd_pcm_rate_to_rate_bit(chip->freq_table[i]); /* check for minimum and maximum rates */ for (i = 0; i < chip->num_freqs; i++) { if (chip->freqs_ok & (1 << i)) { runtime->hw.rate_max = chip->freq_table[i]; break; } } for (i = chip->num_freqs - 1; i >= 0; i--) { if (chip->freqs_ok & (1 << i)) { runtime->hw.rate_min = chip->freq_table[i]; break; } } runtime->hw.formats = chip->formats_ok; if (chip->can_capture) { if (! chip->can_duplex) runtime->hw.info |= SNDRV_PCM_INFO_HALF_DUPLEX; runtime->hw.info |= SNDRV_PCM_INFO_JOINT_DUPLEX; } runtime->private_data = rec; rec->substream = subs; #if 0 /* FIXME: still under development.. */ snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, snd_pmac_hw_rule_rate, chip, rec->stream, -1); snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FORMAT, snd_pmac_hw_rule_format, chip, rec->stream, -1); #endif runtime->hw.periods_max = rec->cmd.size - 1; /* constraints to fix choppy sound */ snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); return 0; } static int snd_pmac_pcm_close(struct snd_pmac *chip, struct pmac_stream *rec, struct snd_pcm_substream *subs) { struct pmac_stream *astr; snd_pmac_dma_stop(rec); astr = snd_pmac_get_stream(chip, another_stream(rec->stream)); if (! astr) return -EINVAL; /* reset constraints */ astr->cur_freqs = chip->freqs_ok; astr->cur_formats = chip->formats_ok; return 0; } static int snd_pmac_playback_open(struct snd_pcm_substream *subs) { struct snd_pmac *chip = snd_pcm_substream_chip(subs); subs->runtime->hw = snd_pmac_playback; return snd_pmac_pcm_open(chip, &chip->playback, subs); } static int snd_pmac_capture_open(struct snd_pcm_substream *subs) { struct snd_pmac *chip = snd_pcm_substream_chip(subs); subs->runtime->hw = snd_pmac_capture; return snd_pmac_pcm_open(chip, &chip->capture, subs); } static int snd_pmac_playback_close(struct snd_pcm_substream *subs) { struct snd_pmac *chip = snd_pcm_substream_chip(subs); return snd_pmac_pcm_close(chip, &chip->playback, subs); } static int snd_pmac_capture_close(struct snd_pcm_substream *subs) { struct snd_pmac *chip = snd_pcm_substream_chip(subs); return snd_pmac_pcm_close(chip, &chip->capture, subs); } /* */ static struct snd_pcm_ops snd_pmac_playback_ops = { .open = snd_pmac_playback_open, .close = snd_pmac_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_pmac_pcm_hw_params, .hw_free = snd_pmac_pcm_hw_free, .prepare = snd_pmac_playback_prepare, .trigger = snd_pmac_playback_trigger, .pointer = snd_pmac_playback_pointer, }; static struct snd_pcm_ops snd_pmac_capture_ops = { .open = snd_pmac_capture_open, .close = snd_pmac_capture_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_pmac_pcm_hw_params, .hw_free = snd_pmac_pcm_hw_free, .prepare = snd_pmac_capture_prepare, .trigger = snd_pmac_capture_trigger, .pointer = snd_pmac_capture_pointer, }; int __devinit snd_pmac_pcm_new(struct snd_pmac *chip) { struct snd_pcm *pcm; int err; int num_captures = 1; if (! chip->can_capture) num_captures = 0; err = snd_pcm_new(chip->card, chip->card->driver, 0, 1, num_captures, &pcm); if (err < 0) return err; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_pmac_playback_ops); if (chip->can_capture) snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_pmac_capture_ops); pcm->private_data = chip; pcm->info_flags = SNDRV_PCM_INFO_JOINT_DUPLEX; strcpy(pcm->name, chip->card->shortname); chip->pcm = pcm; chip->formats_ok = SNDRV_PCM_FMTBIT_S16_BE; if (chip->can_byte_swap) chip->formats_ok |= SNDRV_PCM_FMTBIT_S16_LE; chip->playback.cur_formats = chip->formats_ok; chip->capture.cur_formats = chip->formats_ok; chip->playback.cur_freqs = chip->freqs_ok; chip->capture.cur_freqs = chip->freqs_ok; /* preallocate 64k buffer */ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, &chip->pdev->dev, 64 * 1024, 64 * 1024); return 0; } static void snd_pmac_dbdma_reset(struct snd_pmac *chip) { out_le32(&chip->playback.dma->control, (RUN|PAUSE|FLUSH|WAKE|DEAD) << 16); snd_pmac_wait_ack(&chip->playback); out_le32(&chip->capture.dma->control, (RUN|PAUSE|FLUSH|WAKE|DEAD) << 16); snd_pmac_wait_ack(&chip->capture); } /* * handling beep */ void snd_pmac_beep_dma_start(struct snd_pmac *chip, int bytes, unsigned long addr, int speed) { struct pmac_stream *rec = &chip->playback; snd_pmac_dma_stop(rec); st_le16(&chip->extra_dma.cmds->req_count, bytes); st_le16(&chip->extra_dma.cmds->xfer_status, 0); st_le32(&chip->extra_dma.cmds->cmd_dep, chip->extra_dma.addr); st_le32(&chip->extra_dma.cmds->phy_addr, addr); st_le16(&chip->extra_dma.cmds->command, OUTPUT_MORE + BR_ALWAYS); out_le32(&chip->awacs->control, (in_le32(&chip->awacs->control) & ~0x1f00) | (speed << 8)); out_le32(&chip->awacs->byteswap, 0); snd_pmac_dma_set_command(rec, &chip->extra_dma); snd_pmac_dma_run(rec, RUN); } void snd_pmac_beep_dma_stop(struct snd_pmac *chip) { snd_pmac_dma_stop(&chip->playback); st_le16(&chip->extra_dma.cmds->command, DBDMA_STOP); snd_pmac_pcm_set_format(chip); /* reset format */ } /* * interrupt handlers */ static irqreturn_t snd_pmac_tx_intr(int irq, void *devid) { struct snd_pmac *chip = devid; snd_pmac_pcm_update(chip, &chip->playback); return IRQ_HANDLED; } static irqreturn_t snd_pmac_rx_intr(int irq, void *devid) { struct snd_pmac *chip = devid; snd_pmac_pcm_update(chip, &chip->capture); return IRQ_HANDLED; } static irqreturn_t snd_pmac_ctrl_intr(int irq, void *devid) { struct snd_pmac *chip = devid; int ctrl = in_le32(&chip->awacs->control); /*printk(KERN_DEBUG "pmac: control interrupt.. 0x%x\n", ctrl);*/ if (ctrl & MASK_PORTCHG) { /* do something when headphone is plugged/unplugged? */ if (chip->update_automute) chip->update_automute(chip, 1); } if (ctrl & MASK_CNTLERR) { int err = (in_le32(&chip->awacs->codec_stat) & MASK_ERRCODE) >> 16; if (err && chip->model <= PMAC_SCREAMER) snd_printk(KERN_DEBUG "error %x\n", err); } /* Writing 1s to the CNTLERR and PORTCHG bits clears them... */ out_le32(&chip->awacs->control, ctrl); return IRQ_HANDLED; } /* * a wrapper to feature call for compatibility */ static void snd_pmac_sound_feature(struct snd_pmac *chip, int enable) { if (ppc_md.feature_call) ppc_md.feature_call(PMAC_FTR_SOUND_CHIP_ENABLE, chip->node, 0, enable); } /* * release resources */ static int snd_pmac_free(struct snd_pmac *chip) { /* stop sounds */ if (chip->initialized) { snd_pmac_dbdma_reset(chip); /* disable interrupts from awacs interface */ out_le32(&chip->awacs->control, in_le32(&chip->awacs->control) & 0xfff); } if (chip->node) snd_pmac_sound_feature(chip, 0); /* clean up mixer if any */ if (chip->mixer_free) chip->mixer_free(chip); snd_pmac_detach_beep(chip); /* release resources */ if (chip->irq >= 0) free_irq(chip->irq, (void*)chip); if (chip->tx_irq >= 0) free_irq(chip->tx_irq, (void*)chip); if (chip->rx_irq >= 0) free_irq(chip->rx_irq, (void*)chip); snd_pmac_dbdma_free(chip, &chip->playback.cmd); snd_pmac_dbdma_free(chip, &chip->capture.cmd); snd_pmac_dbdma_free(chip, &chip->extra_dma); snd_pmac_dbdma_free(chip, &emergency_dbdma); if (chip->macio_base) iounmap(chip->macio_base); if (chip->latch_base) iounmap(chip->latch_base); if (chip->awacs) iounmap(chip->awacs); if (chip->playback.dma) iounmap(chip->playback.dma); if (chip->capture.dma) iounmap(chip->capture.dma); if (chip->node) { int i; for (i = 0; i < 3; i++) { if (chip->requested & (1 << i)) release_mem_region(chip->rsrc[i].start, resource_size(&chip->rsrc[i])); } } if (chip->pdev) pci_dev_put(chip->pdev); of_node_put(chip->node); kfree(chip); return 0; } /* * free the device */ static int snd_pmac_dev_free(struct snd_device *device) { struct snd_pmac *chip = device->device_data; return snd_pmac_free(chip); } /* * check the machine support byteswap (little-endian) */ static void __devinit detect_byte_swap(struct snd_pmac *chip) { struct device_node *mio; /* if seems that Keylargo can't byte-swap */ for (mio = chip->node->parent; mio; mio = mio->parent) { if (strcmp(mio->name, "mac-io") == 0) { if (of_device_is_compatible(mio, "Keylargo")) chip->can_byte_swap = 0; break; } } /* it seems the Pismo & iBook can't byte-swap in hardware. */ if (of_machine_is_compatible("PowerBook3,1") || of_machine_is_compatible("PowerBook2,1")) chip->can_byte_swap = 0 ; if (of_machine_is_compatible("PowerBook2,1")) chip->can_duplex = 0; } /* * detect a sound chip */ static int __devinit snd_pmac_detect(struct snd_pmac *chip) { struct device_node *sound; struct device_node *dn; const unsigned int *prop; unsigned int l; struct macio_chip* macio; if (!machine_is(powermac)) return -ENODEV; chip->subframe = 0; chip->revision = 0; chip->freqs_ok = 0xff; /* all ok */ chip->model = PMAC_AWACS; chip->can_byte_swap = 1; chip->can_duplex = 1; chip->can_capture = 1; chip->num_freqs = ARRAY_SIZE(awacs_freqs); chip->freq_table = awacs_freqs; chip->pdev = NULL; chip->control_mask = MASK_IEPC | MASK_IEE | 0x11; /* default */ /* check machine type */ if (of_machine_is_compatible("AAPL,3400/2400") || of_machine_is_compatible("AAPL,3500")) chip->is_pbook_3400 = 1; else if (of_machine_is_compatible("PowerBook1,1") || of_machine_is_compatible("AAPL,PowerBook1998")) chip->is_pbook_G3 = 1; chip->node = of_find_node_by_name(NULL, "awacs"); sound = of_node_get(chip->node); /* * powermac G3 models have a node called "davbus" * with a child called "sound". */ if (!chip->node) chip->node = of_find_node_by_name(NULL, "davbus"); /* * if we didn't find a davbus device, try 'i2s-a' since * this seems to be what iBooks have */ if (! chip->node) { chip->node = of_find_node_by_name(NULL, "i2s-a"); if (chip->node && chip->node->parent && chip->node->parent->parent) { if (of_device_is_compatible(chip->node->parent->parent, "K2-Keylargo")) chip->is_k2 = 1; } } if (! chip->node) return -ENODEV; if (!sound) { sound = of_find_node_by_name(NULL, "sound"); while (sound && sound->parent != chip->node) sound = of_find_node_by_name(sound, "sound"); } if (! sound) { of_node_put(chip->node); chip->node = NULL; return -ENODEV; } prop = of_get_property(sound, "sub-frame", NULL); if (prop && *prop < 16) chip->subframe = *prop; prop = of_get_property(sound, "layout-id", NULL); if (prop) { /* partly deprecate snd-powermac, for those machines * that have a layout-id property for now */ printk(KERN_INFO "snd-powermac no longer handles any " "machines with a layout-id property " "in the device-tree, use snd-aoa.\n"); of_node_put(sound); of_node_put(chip->node); chip->node = NULL; return -ENODEV; } /* This should be verified on older screamers */ if (of_device_is_compatible(sound, "screamer")) { chip->model = PMAC_SCREAMER; // chip->can_byte_swap = 0; /* FIXME: check this */ } if (of_device_is_compatible(sound, "burgundy")) { chip->model = PMAC_BURGUNDY; chip->control_mask = MASK_IEPC | 0x11; /* disable IEE */ } if (of_device_is_compatible(sound, "daca")) { chip->model = PMAC_DACA; chip->can_capture = 0; /* no capture */ chip->can_duplex = 0; // chip->can_byte_swap = 0; /* FIXME: check this */ chip->control_mask = MASK_IEPC | 0x11; /* disable IEE */ } if (of_device_is_compatible(sound, "tumbler")) { chip->model = PMAC_TUMBLER; chip->can_capture = of_machine_is_compatible("PowerMac4,2") || of_machine_is_compatible("PowerBook3,2") || of_machine_is_compatible("PowerBook3,3") || of_machine_is_compatible("PowerBook4,1") || of_machine_is_compatible("PowerBook4,2") || of_machine_is_compatible("PowerBook4,3"); chip->can_duplex = 0; // chip->can_byte_swap = 0; /* FIXME: check this */ chip->num_freqs = ARRAY_SIZE(tumbler_freqs); chip->freq_table = tumbler_freqs; chip->control_mask = MASK_IEPC | 0x11; /* disable IEE */ } if (of_device_is_compatible(sound, "snapper")) { chip->model = PMAC_SNAPPER; // chip->can_byte_swap = 0; /* FIXME: check this */ chip->num_freqs = ARRAY_SIZE(tumbler_freqs); chip->freq_table = tumbler_freqs; chip->control_mask = MASK_IEPC | 0x11; /* disable IEE */ } prop = of_get_property(sound, "device-id", NULL); if (prop) chip->device_id = *prop; dn = of_find_node_by_name(NULL, "perch"); chip->has_iic = (dn != NULL); of_node_put(dn); /* We need the PCI device for DMA allocations, let's use a crude method * for now ... */ macio = macio_find(chip->node, macio_unknown); if (macio == NULL) printk(KERN_WARNING "snd-powermac: can't locate macio !\n"); else { struct pci_dev *pdev = NULL; for_each_pci_dev(pdev) { struct device_node *np = pci_device_to_OF_node(pdev); if (np && np == macio->of_node) { chip->pdev = pdev; break; } } } if (chip->pdev == NULL) printk(KERN_WARNING "snd-powermac: can't locate macio PCI" " device !\n"); detect_byte_swap(chip); /* look for a property saying what sample rates are available */ prop = of_get_property(sound, "sample-rates", &l); if (! prop) prop = of_get_property(sound, "output-frame-rates", &l); if (prop) { int i; chip->freqs_ok = 0; for (l /= sizeof(int); l > 0; --l) { unsigned int r = *prop++; /* Apple 'Fixed' format */ if (r >= 0x10000) r >>= 16; for (i = 0; i < chip->num_freqs; ++i) { if (r == chip->freq_table[i]) { chip->freqs_ok |= (1 << i); break; } } } } else { /* assume only 44.1khz */ chip->freqs_ok = 1; } of_node_put(sound); return 0; } #ifdef PMAC_SUPPORT_AUTOMUTE /* * auto-mute */ static int pmac_auto_mute_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pmac *chip = snd_kcontrol_chip(kcontrol); ucontrol->value.integer.value[0] = chip->auto_mute; return 0; } static int pmac_auto_mute_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pmac *chip = snd_kcontrol_chip(kcontrol); if (ucontrol->value.integer.value[0] != chip->auto_mute) { chip->auto_mute = !!ucontrol->value.integer.value[0]; if (chip->update_automute) chip->update_automute(chip, 1); return 1; } return 0; } static int pmac_hp_detect_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pmac *chip = snd_kcontrol_chip(kcontrol); if (chip->detect_headphone) ucontrol->value.integer.value[0] = chip->detect_headphone(chip); else ucontrol->value.integer.value[0] = 0; return 0; } static struct snd_kcontrol_new auto_mute_controls[] __devinitdata = { { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Auto Mute Switch", .info = snd_pmac_boolean_mono_info, .get = pmac_auto_mute_get, .put = pmac_auto_mute_put, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Headphone Detection", .access = SNDRV_CTL_ELEM_ACCESS_READ, .info = snd_pmac_boolean_mono_info, .get = pmac_hp_detect_get, }, }; int __devinit snd_pmac_add_automute(struct snd_pmac *chip) { int err; chip->auto_mute = 1; err = snd_ctl_add(chip->card, snd_ctl_new1(&auto_mute_controls[0], chip)); if (err < 0) { printk(KERN_ERR "snd-powermac: Failed to add automute control\n"); return err; } chip->hp_detect_ctl = snd_ctl_new1(&auto_mute_controls[1], chip); return snd_ctl_add(chip->card, chip->hp_detect_ctl); } #endif /* PMAC_SUPPORT_AUTOMUTE */ /* * create and detect a pmac chip record */ int __devinit snd_pmac_new(struct snd_card *card, struct snd_pmac **chip_return) { struct snd_pmac *chip; struct device_node *np; int i, err; unsigned int irq; unsigned long ctrl_addr, txdma_addr, rxdma_addr; static struct snd_device_ops ops = { .dev_free = snd_pmac_dev_free, }; *chip_return = NULL; chip = kzalloc(sizeof(*chip), GFP_KERNEL); if (chip == NULL) return -ENOMEM; chip->card = card; spin_lock_init(&chip->reg_lock); chip->irq = chip->tx_irq = chip->rx_irq = -1; chip->playback.stream = SNDRV_PCM_STREAM_PLAYBACK; chip->capture.stream = SNDRV_PCM_STREAM_CAPTURE; if ((err = snd_pmac_detect(chip)) < 0) goto __error; if (snd_pmac_dbdma_alloc(chip, &chip->playback.cmd, PMAC_MAX_FRAGS + 1) < 0 || snd_pmac_dbdma_alloc(chip, &chip->capture.cmd, PMAC_MAX_FRAGS + 1) < 0 || snd_pmac_dbdma_alloc(chip, &chip->extra_dma, 2) < 0 || snd_pmac_dbdma_alloc(chip, &emergency_dbdma, 2) < 0) { err = -ENOMEM; goto __error; } np = chip->node; chip->requested = 0; if (chip->is_k2) { static char *rnames[] = { "Sound Control", "Sound DMA" }; for (i = 0; i < 2; i ++) { if (of_address_to_resource(np->parent, i, &chip->rsrc[i])) { printk(KERN_ERR "snd: can't translate rsrc " " %d (%s)\n", i, rnames[i]); err = -ENODEV; goto __error; } if (request_mem_region(chip->rsrc[i].start, resource_size(&chip->rsrc[i]), rnames[i]) == NULL) { printk(KERN_ERR "snd: can't request rsrc " " %d (%s: %pR)\n", i, rnames[i], &chip->rsrc[i]); err = -ENODEV; goto __error; } chip->requested |= (1 << i); } ctrl_addr = chip->rsrc[0].start; txdma_addr = chip->rsrc[1].start; rxdma_addr = txdma_addr + 0x100; } else { static char *rnames[] = { "Sound Control", "Sound Tx DMA", "Sound Rx DMA" }; for (i = 0; i < 3; i ++) { if (of_address_to_resource(np, i, &chip->rsrc[i])) { printk(KERN_ERR "snd: can't translate rsrc " " %d (%s)\n", i, rnames[i]); err = -ENODEV; goto __error; } if (request_mem_region(chip->rsrc[i].start, resource_size(&chip->rsrc[i]), rnames[i]) == NULL) { printk(KERN_ERR "snd: can't request rsrc " " %d (%s: %pR)\n", i, rnames[i], &chip->rsrc[i]); err = -ENODEV; goto __error; } chip->requested |= (1 << i); } ctrl_addr = chip->rsrc[0].start; txdma_addr = chip->rsrc[1].start; rxdma_addr = chip->rsrc[2].start; } chip->awacs = ioremap(ctrl_addr, 0x1000); chip->playback.dma = ioremap(txdma_addr, 0x100); chip->capture.dma = ioremap(rxdma_addr, 0x100); if (chip->model <= PMAC_BURGUNDY) { irq = irq_of_parse_and_map(np, 0); if (request_irq(irq, snd_pmac_ctrl_intr, 0, "PMac", (void*)chip)) { snd_printk(KERN_ERR "pmac: unable to grab IRQ %d\n", irq); err = -EBUSY; goto __error; } chip->irq = irq; } irq = irq_of_parse_and_map(np, 1); if (request_irq(irq, snd_pmac_tx_intr, 0, "PMac Output", (void*)chip)){ snd_printk(KERN_ERR "pmac: unable to grab IRQ %d\n", irq); err = -EBUSY; goto __error; } chip->tx_irq = irq; irq = irq_of_parse_and_map(np, 2); if (request_irq(irq, snd_pmac_rx_intr, 0, "PMac Input", (void*)chip)) { snd_printk(KERN_ERR "pmac: unable to grab IRQ %d\n", irq); err = -EBUSY; goto __error; } chip->rx_irq = irq; snd_pmac_sound_feature(chip, 1); /* reset & enable interrupts */ if (chip->model <= PMAC_BURGUNDY) out_le32(&chip->awacs->control, chip->control_mask); /* Powerbooks have odd ways of enabling inputs such as an expansion-bay CD or sound from an internal modem or a PC-card modem. */ if (chip->is_pbook_3400) { /* Enable CD and PC-card sound inputs. */ /* This is done by reading from address * f301a000, + 0x10 to enable the expansion-bay * CD sound input, + 0x80 to enable the PC-card * sound input. The 0x100 enables the SCSI bus * terminator power. */ chip->latch_base = ioremap (0xf301a000, 0x1000); in_8(chip->latch_base + 0x190); } else if (chip->is_pbook_G3) { struct device_node* mio; for (mio = chip->node->parent; mio; mio = mio->parent) { if (strcmp(mio->name, "mac-io") == 0) { struct resource r; if (of_address_to_resource(mio, 0, &r) == 0) chip->macio_base = ioremap(r.start, 0x40); break; } } /* Enable CD sound input. */ /* The relevant bits for writing to this byte are 0x8f. * I haven't found out what the 0x80 bit does. * For the 0xf bits, writing 3 or 7 enables the CD * input, any other value disables it. Values * 1, 3, 5, 7 enable the microphone. Values 0, 2, * 4, 6, 8 - f enable the input from the modem. */ if (chip->macio_base) out_8(chip->macio_base + 0x37, 3); } /* Reset dbdma channels */ snd_pmac_dbdma_reset(chip); if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) goto __error; *chip_return = chip; return 0; __error: snd_pmac_free(chip); return err; } /* * sleep notify for powerbook */ #ifdef CONFIG_PM /* * Save state when going to sleep, restore it afterwards. */ void snd_pmac_suspend(struct snd_pmac *chip) { unsigned long flags; snd_power_change_state(chip->card, SNDRV_CTL_POWER_D3hot); if (chip->suspend) chip->suspend(chip); snd_pcm_suspend_all(chip->pcm); spin_lock_irqsave(&chip->reg_lock, flags); snd_pmac_beep_stop(chip); spin_unlock_irqrestore(&chip->reg_lock, flags); if (chip->irq >= 0) disable_irq(chip->irq); if (chip->tx_irq >= 0) disable_irq(chip->tx_irq); if (chip->rx_irq >= 0) disable_irq(chip->rx_irq); snd_pmac_sound_feature(chip, 0); } void snd_pmac_resume(struct snd_pmac *chip) { snd_pmac_sound_feature(chip, 1); if (chip->resume) chip->resume(chip); /* enable CD sound input */ if (chip->macio_base && chip->is_pbook_G3) out_8(chip->macio_base + 0x37, 3); else if (chip->is_pbook_3400) in_8(chip->latch_base + 0x190); snd_pmac_pcm_set_format(chip); if (chip->irq >= 0) enable_irq(chip->irq); if (chip->tx_irq >= 0) enable_irq(chip->tx_irq); if (chip->rx_irq >= 0) enable_irq(chip->rx_irq); snd_power_change_state(chip->card, SNDRV_CTL_POWER_D0); } #endif /* CONFIG_PM */
gpl-2.0
Gabriel85/android_kernel_sony_apq8064
sound/ppc/pmac.c
5643
38204
/* * PMac DBDMA lowlevel functions * * Copyright (c) by Takashi Iwai <tiwai@suse.de> * code based on dmasound.c. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <asm/io.h> #include <asm/irq.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <sound/core.h> #include "pmac.h" #include <sound/pcm_params.h> #include <asm/pmac_feature.h> #include <asm/pci-bridge.h> /* fixed frequency table for awacs, screamer, burgundy, DACA (44100 max) */ static int awacs_freqs[8] = { 44100, 29400, 22050, 17640, 14700, 11025, 8820, 7350 }; /* fixed frequency table for tumbler */ static int tumbler_freqs[1] = { 44100 }; /* * we will allocate a single 'emergency' dbdma cmd block to use if the * tx status comes up "DEAD". This happens on some PowerComputing Pmac * clones, either owing to a bug in dbdma or some interaction between * IDE and sound. However, this measure would deal with DEAD status if * it appeared elsewhere. */ static struct pmac_dbdma emergency_dbdma; static int emergency_in_use; /* * allocate DBDMA command arrays */ static int snd_pmac_dbdma_alloc(struct snd_pmac *chip, struct pmac_dbdma *rec, int size) { unsigned int rsize = sizeof(struct dbdma_cmd) * (size + 1); rec->space = dma_alloc_coherent(&chip->pdev->dev, rsize, &rec->dma_base, GFP_KERNEL); if (rec->space == NULL) return -ENOMEM; rec->size = size; memset(rec->space, 0, rsize); rec->cmds = (void __iomem *)DBDMA_ALIGN(rec->space); rec->addr = rec->dma_base + (unsigned long)((char *)rec->cmds - (char *)rec->space); return 0; } static void snd_pmac_dbdma_free(struct snd_pmac *chip, struct pmac_dbdma *rec) { if (rec->space) { unsigned int rsize = sizeof(struct dbdma_cmd) * (rec->size + 1); dma_free_coherent(&chip->pdev->dev, rsize, rec->space, rec->dma_base); } } /* * pcm stuff */ /* * look up frequency table */ unsigned int snd_pmac_rate_index(struct snd_pmac *chip, struct pmac_stream *rec, unsigned int rate) { int i, ok, found; ok = rec->cur_freqs; if (rate > chip->freq_table[0]) return 0; found = 0; for (i = 0; i < chip->num_freqs; i++, ok >>= 1) { if (! (ok & 1)) continue; found = i; if (rate >= chip->freq_table[i]) break; } return found; } /* * check whether another stream is active */ static inline int another_stream(int stream) { return (stream == SNDRV_PCM_STREAM_PLAYBACK) ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK; } /* * allocate buffers */ static int snd_pmac_pcm_hw_params(struct snd_pcm_substream *subs, struct snd_pcm_hw_params *hw_params) { return snd_pcm_lib_malloc_pages(subs, params_buffer_bytes(hw_params)); } /* * release buffers */ static int snd_pmac_pcm_hw_free(struct snd_pcm_substream *subs) { snd_pcm_lib_free_pages(subs); return 0; } /* * get a stream of the opposite direction */ static struct pmac_stream *snd_pmac_get_stream(struct snd_pmac *chip, int stream) { switch (stream) { case SNDRV_PCM_STREAM_PLAYBACK: return &chip->playback; case SNDRV_PCM_STREAM_CAPTURE: return &chip->capture; default: snd_BUG(); return NULL; } } /* * wait while run status is on */ static inline void snd_pmac_wait_ack(struct pmac_stream *rec) { int timeout = 50000; while ((in_le32(&rec->dma->status) & RUN) && timeout-- > 0) udelay(1); } /* * set the format and rate to the chip. * call the lowlevel function if defined (e.g. for AWACS). */ static void snd_pmac_pcm_set_format(struct snd_pmac *chip) { /* set up frequency and format */ out_le32(&chip->awacs->control, chip->control_mask | (chip->rate_index << 8)); out_le32(&chip->awacs->byteswap, chip->format == SNDRV_PCM_FORMAT_S16_LE ? 1 : 0); if (chip->set_format) chip->set_format(chip); } /* * stop the DMA transfer */ static inline void snd_pmac_dma_stop(struct pmac_stream *rec) { out_le32(&rec->dma->control, (RUN|WAKE|FLUSH|PAUSE) << 16); snd_pmac_wait_ack(rec); } /* * set the command pointer address */ static inline void snd_pmac_dma_set_command(struct pmac_stream *rec, struct pmac_dbdma *cmd) { out_le32(&rec->dma->cmdptr, cmd->addr); } /* * start the DMA */ static inline void snd_pmac_dma_run(struct pmac_stream *rec, int status) { out_le32(&rec->dma->control, status | (status << 16)); } /* * prepare playback/capture stream */ static int snd_pmac_pcm_prepare(struct snd_pmac *chip, struct pmac_stream *rec, struct snd_pcm_substream *subs) { int i; volatile struct dbdma_cmd __iomem *cp; struct snd_pcm_runtime *runtime = subs->runtime; int rate_index; long offset; struct pmac_stream *astr; rec->dma_size = snd_pcm_lib_buffer_bytes(subs); rec->period_size = snd_pcm_lib_period_bytes(subs); rec->nperiods = rec->dma_size / rec->period_size; rec->cur_period = 0; rate_index = snd_pmac_rate_index(chip, rec, runtime->rate); /* set up constraints */ astr = snd_pmac_get_stream(chip, another_stream(rec->stream)); if (! astr) return -EINVAL; astr->cur_freqs = 1 << rate_index; astr->cur_formats = 1 << runtime->format; chip->rate_index = rate_index; chip->format = runtime->format; /* We really want to execute a DMA stop command, after the AWACS * is initialized. * For reasons I don't understand, it stops the hissing noise * common to many PowerBook G3 systems and random noise otherwise * captured on iBook2's about every third time. -ReneR */ spin_lock_irq(&chip->reg_lock); snd_pmac_dma_stop(rec); st_le16(&chip->extra_dma.cmds->command, DBDMA_STOP); snd_pmac_dma_set_command(rec, &chip->extra_dma); snd_pmac_dma_run(rec, RUN); spin_unlock_irq(&chip->reg_lock); mdelay(5); spin_lock_irq(&chip->reg_lock); /* continuous DMA memory type doesn't provide the physical address, * so we need to resolve the address here... */ offset = runtime->dma_addr; for (i = 0, cp = rec->cmd.cmds; i < rec->nperiods; i++, cp++) { st_le32(&cp->phy_addr, offset); st_le16(&cp->req_count, rec->period_size); /*st_le16(&cp->res_count, 0);*/ st_le16(&cp->xfer_status, 0); offset += rec->period_size; } /* make loop */ st_le16(&cp->command, DBDMA_NOP + BR_ALWAYS); st_le32(&cp->cmd_dep, rec->cmd.addr); snd_pmac_dma_stop(rec); snd_pmac_dma_set_command(rec, &rec->cmd); spin_unlock_irq(&chip->reg_lock); return 0; } /* * PCM trigger/stop */ static int snd_pmac_pcm_trigger(struct snd_pmac *chip, struct pmac_stream *rec, struct snd_pcm_substream *subs, int cmd) { volatile struct dbdma_cmd __iomem *cp; int i, command; switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: if (rec->running) return -EBUSY; command = (subs->stream == SNDRV_PCM_STREAM_PLAYBACK ? OUTPUT_MORE : INPUT_MORE) + INTR_ALWAYS; spin_lock(&chip->reg_lock); snd_pmac_beep_stop(chip); snd_pmac_pcm_set_format(chip); for (i = 0, cp = rec->cmd.cmds; i < rec->nperiods; i++, cp++) out_le16(&cp->command, command); snd_pmac_dma_set_command(rec, &rec->cmd); (void)in_le32(&rec->dma->status); snd_pmac_dma_run(rec, RUN|WAKE); rec->running = 1; spin_unlock(&chip->reg_lock); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: spin_lock(&chip->reg_lock); rec->running = 0; /*printk(KERN_DEBUG "stopped!!\n");*/ snd_pmac_dma_stop(rec); for (i = 0, cp = rec->cmd.cmds; i < rec->nperiods; i++, cp++) out_le16(&cp->command, DBDMA_STOP); spin_unlock(&chip->reg_lock); break; default: return -EINVAL; } return 0; } /* * return the current pointer */ inline static snd_pcm_uframes_t snd_pmac_pcm_pointer(struct snd_pmac *chip, struct pmac_stream *rec, struct snd_pcm_substream *subs) { int count = 0; #if 1 /* hmm.. how can we get the current dma pointer?? */ int stat; volatile struct dbdma_cmd __iomem *cp = &rec->cmd.cmds[rec->cur_period]; stat = ld_le16(&cp->xfer_status); if (stat & (ACTIVE|DEAD)) { count = in_le16(&cp->res_count); if (count) count = rec->period_size - count; } #endif count += rec->cur_period * rec->period_size; /*printk(KERN_DEBUG "pointer=%d\n", count);*/ return bytes_to_frames(subs->runtime, count); } /* * playback */ static int snd_pmac_playback_prepare(struct snd_pcm_substream *subs) { struct snd_pmac *chip = snd_pcm_substream_chip(subs); return snd_pmac_pcm_prepare(chip, &chip->playback, subs); } static int snd_pmac_playback_trigger(struct snd_pcm_substream *subs, int cmd) { struct snd_pmac *chip = snd_pcm_substream_chip(subs); return snd_pmac_pcm_trigger(chip, &chip->playback, subs, cmd); } static snd_pcm_uframes_t snd_pmac_playback_pointer(struct snd_pcm_substream *subs) { struct snd_pmac *chip = snd_pcm_substream_chip(subs); return snd_pmac_pcm_pointer(chip, &chip->playback, subs); } /* * capture */ static int snd_pmac_capture_prepare(struct snd_pcm_substream *subs) { struct snd_pmac *chip = snd_pcm_substream_chip(subs); return snd_pmac_pcm_prepare(chip, &chip->capture, subs); } static int snd_pmac_capture_trigger(struct snd_pcm_substream *subs, int cmd) { struct snd_pmac *chip = snd_pcm_substream_chip(subs); return snd_pmac_pcm_trigger(chip, &chip->capture, subs, cmd); } static snd_pcm_uframes_t snd_pmac_capture_pointer(struct snd_pcm_substream *subs) { struct snd_pmac *chip = snd_pcm_substream_chip(subs); return snd_pmac_pcm_pointer(chip, &chip->capture, subs); } /* * Handle DEAD DMA transfers: * if the TX status comes up "DEAD" - reported on some Power Computing machines * we need to re-start the dbdma - but from a different physical start address * and with a different transfer length. It would get very messy to do this * with the normal dbdma_cmd blocks - we would have to re-write the buffer start * addresses each time. So, we will keep a single dbdma_cmd block which can be * fiddled with. * When DEAD status is first reported the content of the faulted dbdma block is * copied into the emergency buffer and we note that the buffer is in use. * we then bump the start physical address by the amount that was successfully * output before it died. * On any subsequent DEAD result we just do the bump-ups (we know that we are * already using the emergency dbdma_cmd). * CHECK: this just tries to "do it". It is possible that we should abandon * xfers when the number of residual bytes gets below a certain value - I can * see that this might cause a loop-forever if a too small transfer causes * DEAD status. However this is a TODO for now - we'll see what gets reported. * When we get a successful transfer result with the emergency buffer we just * pretend that it completed using the original dmdma_cmd and carry on. The * 'next_cmd' field will already point back to the original loop of blocks. */ static inline void snd_pmac_pcm_dead_xfer(struct pmac_stream *rec, volatile struct dbdma_cmd __iomem *cp) { unsigned short req, res ; unsigned int phy ; /* printk(KERN_WARNING "snd-powermac: DMA died - patching it up!\n"); */ /* to clear DEAD status we must first clear RUN set it to quiescent to be on the safe side */ (void)in_le32(&rec->dma->status); out_le32(&rec->dma->control, (RUN|PAUSE|FLUSH|WAKE) << 16); if (!emergency_in_use) { /* new problem */ memcpy((void *)emergency_dbdma.cmds, (void *)cp, sizeof(struct dbdma_cmd)); emergency_in_use = 1; st_le16(&cp->xfer_status, 0); st_le16(&cp->req_count, rec->period_size); cp = emergency_dbdma.cmds; } /* now bump the values to reflect the amount we haven't yet shifted */ req = ld_le16(&cp->req_count); res = ld_le16(&cp->res_count); phy = ld_le32(&cp->phy_addr); phy += (req - res); st_le16(&cp->req_count, res); st_le16(&cp->res_count, 0); st_le16(&cp->xfer_status, 0); st_le32(&cp->phy_addr, phy); st_le32(&cp->cmd_dep, rec->cmd.addr + sizeof(struct dbdma_cmd)*((rec->cur_period+1)%rec->nperiods)); st_le16(&cp->command, OUTPUT_MORE | BR_ALWAYS | INTR_ALWAYS); /* point at our patched up command block */ out_le32(&rec->dma->cmdptr, emergency_dbdma.addr); /* we must re-start the controller */ (void)in_le32(&rec->dma->status); /* should complete clearing the DEAD status */ out_le32(&rec->dma->control, ((RUN|WAKE) << 16) + (RUN|WAKE)); } /* * update playback/capture pointer from interrupts */ static void snd_pmac_pcm_update(struct snd_pmac *chip, struct pmac_stream *rec) { volatile struct dbdma_cmd __iomem *cp; int c; int stat; spin_lock(&chip->reg_lock); if (rec->running) { for (c = 0; c < rec->nperiods; c++) { /* at most all fragments */ if (emergency_in_use) /* already using DEAD xfer? */ cp = emergency_dbdma.cmds; else cp = &rec->cmd.cmds[rec->cur_period]; stat = ld_le16(&cp->xfer_status); if (stat & DEAD) { snd_pmac_pcm_dead_xfer(rec, cp); break; /* this block is still going */ } if (emergency_in_use) emergency_in_use = 0 ; /* done that */ if (! (stat & ACTIVE)) break; /*printk(KERN_DEBUG "update frag %d\n", rec->cur_period);*/ st_le16(&cp->xfer_status, 0); st_le16(&cp->req_count, rec->period_size); /*st_le16(&cp->res_count, 0);*/ rec->cur_period++; if (rec->cur_period >= rec->nperiods) { rec->cur_period = 0; } spin_unlock(&chip->reg_lock); snd_pcm_period_elapsed(rec->substream); spin_lock(&chip->reg_lock); } } spin_unlock(&chip->reg_lock); } /* * hw info */ static struct snd_pcm_hardware snd_pmac_playback = { .info = (SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_RESUME), .formats = SNDRV_PCM_FMTBIT_S16_BE | SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_8000_44100, .rate_min = 7350, .rate_max = 44100, .channels_min = 2, .channels_max = 2, .buffer_bytes_max = 131072, .period_bytes_min = 256, .period_bytes_max = 16384, .periods_min = 3, .periods_max = PMAC_MAX_FRAGS, }; static struct snd_pcm_hardware snd_pmac_capture = { .info = (SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_RESUME), .formats = SNDRV_PCM_FMTBIT_S16_BE | SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_8000_44100, .rate_min = 7350, .rate_max = 44100, .channels_min = 2, .channels_max = 2, .buffer_bytes_max = 131072, .period_bytes_min = 256, .period_bytes_max = 16384, .periods_min = 3, .periods_max = PMAC_MAX_FRAGS, }; #if 0 // NYI static int snd_pmac_hw_rule_rate(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { struct snd_pmac *chip = rule->private; struct pmac_stream *rec = snd_pmac_get_stream(chip, rule->deps[0]); int i, freq_table[8], num_freqs; if (! rec) return -EINVAL; num_freqs = 0; for (i = chip->num_freqs - 1; i >= 0; i--) { if (rec->cur_freqs & (1 << i)) freq_table[num_freqs++] = chip->freq_table[i]; } return snd_interval_list(hw_param_interval(params, rule->var), num_freqs, freq_table, 0); } static int snd_pmac_hw_rule_format(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { struct snd_pmac *chip = rule->private; struct pmac_stream *rec = snd_pmac_get_stream(chip, rule->deps[0]); if (! rec) return -EINVAL; return snd_mask_refine_set(hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT), rec->cur_formats); } #endif // NYI static int snd_pmac_pcm_open(struct snd_pmac *chip, struct pmac_stream *rec, struct snd_pcm_substream *subs) { struct snd_pcm_runtime *runtime = subs->runtime; int i; /* look up frequency table and fill bit mask */ runtime->hw.rates = 0; for (i = 0; i < chip->num_freqs; i++) if (chip->freqs_ok & (1 << i)) runtime->hw.rates |= snd_pcm_rate_to_rate_bit(chip->freq_table[i]); /* check for minimum and maximum rates */ for (i = 0; i < chip->num_freqs; i++) { if (chip->freqs_ok & (1 << i)) { runtime->hw.rate_max = chip->freq_table[i]; break; } } for (i = chip->num_freqs - 1; i >= 0; i--) { if (chip->freqs_ok & (1 << i)) { runtime->hw.rate_min = chip->freq_table[i]; break; } } runtime->hw.formats = chip->formats_ok; if (chip->can_capture) { if (! chip->can_duplex) runtime->hw.info |= SNDRV_PCM_INFO_HALF_DUPLEX; runtime->hw.info |= SNDRV_PCM_INFO_JOINT_DUPLEX; } runtime->private_data = rec; rec->substream = subs; #if 0 /* FIXME: still under development.. */ snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, snd_pmac_hw_rule_rate, chip, rec->stream, -1); snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FORMAT, snd_pmac_hw_rule_format, chip, rec->stream, -1); #endif runtime->hw.periods_max = rec->cmd.size - 1; /* constraints to fix choppy sound */ snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); return 0; } static int snd_pmac_pcm_close(struct snd_pmac *chip, struct pmac_stream *rec, struct snd_pcm_substream *subs) { struct pmac_stream *astr; snd_pmac_dma_stop(rec); astr = snd_pmac_get_stream(chip, another_stream(rec->stream)); if (! astr) return -EINVAL; /* reset constraints */ astr->cur_freqs = chip->freqs_ok; astr->cur_formats = chip->formats_ok; return 0; } static int snd_pmac_playback_open(struct snd_pcm_substream *subs) { struct snd_pmac *chip = snd_pcm_substream_chip(subs); subs->runtime->hw = snd_pmac_playback; return snd_pmac_pcm_open(chip, &chip->playback, subs); } static int snd_pmac_capture_open(struct snd_pcm_substream *subs) { struct snd_pmac *chip = snd_pcm_substream_chip(subs); subs->runtime->hw = snd_pmac_capture; return snd_pmac_pcm_open(chip, &chip->capture, subs); } static int snd_pmac_playback_close(struct snd_pcm_substream *subs) { struct snd_pmac *chip = snd_pcm_substream_chip(subs); return snd_pmac_pcm_close(chip, &chip->playback, subs); } static int snd_pmac_capture_close(struct snd_pcm_substream *subs) { struct snd_pmac *chip = snd_pcm_substream_chip(subs); return snd_pmac_pcm_close(chip, &chip->capture, subs); } /* */ static struct snd_pcm_ops snd_pmac_playback_ops = { .open = snd_pmac_playback_open, .close = snd_pmac_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_pmac_pcm_hw_params, .hw_free = snd_pmac_pcm_hw_free, .prepare = snd_pmac_playback_prepare, .trigger = snd_pmac_playback_trigger, .pointer = snd_pmac_playback_pointer, }; static struct snd_pcm_ops snd_pmac_capture_ops = { .open = snd_pmac_capture_open, .close = snd_pmac_capture_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_pmac_pcm_hw_params, .hw_free = snd_pmac_pcm_hw_free, .prepare = snd_pmac_capture_prepare, .trigger = snd_pmac_capture_trigger, .pointer = snd_pmac_capture_pointer, }; int __devinit snd_pmac_pcm_new(struct snd_pmac *chip) { struct snd_pcm *pcm; int err; int num_captures = 1; if (! chip->can_capture) num_captures = 0; err = snd_pcm_new(chip->card, chip->card->driver, 0, 1, num_captures, &pcm); if (err < 0) return err; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_pmac_playback_ops); if (chip->can_capture) snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_pmac_capture_ops); pcm->private_data = chip; pcm->info_flags = SNDRV_PCM_INFO_JOINT_DUPLEX; strcpy(pcm->name, chip->card->shortname); chip->pcm = pcm; chip->formats_ok = SNDRV_PCM_FMTBIT_S16_BE; if (chip->can_byte_swap) chip->formats_ok |= SNDRV_PCM_FMTBIT_S16_LE; chip->playback.cur_formats = chip->formats_ok; chip->capture.cur_formats = chip->formats_ok; chip->playback.cur_freqs = chip->freqs_ok; chip->capture.cur_freqs = chip->freqs_ok; /* preallocate 64k buffer */ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, &chip->pdev->dev, 64 * 1024, 64 * 1024); return 0; } static void snd_pmac_dbdma_reset(struct snd_pmac *chip) { out_le32(&chip->playback.dma->control, (RUN|PAUSE|FLUSH|WAKE|DEAD) << 16); snd_pmac_wait_ack(&chip->playback); out_le32(&chip->capture.dma->control, (RUN|PAUSE|FLUSH|WAKE|DEAD) << 16); snd_pmac_wait_ack(&chip->capture); } /* * handling beep */ void snd_pmac_beep_dma_start(struct snd_pmac *chip, int bytes, unsigned long addr, int speed) { struct pmac_stream *rec = &chip->playback; snd_pmac_dma_stop(rec); st_le16(&chip->extra_dma.cmds->req_count, bytes); st_le16(&chip->extra_dma.cmds->xfer_status, 0); st_le32(&chip->extra_dma.cmds->cmd_dep, chip->extra_dma.addr); st_le32(&chip->extra_dma.cmds->phy_addr, addr); st_le16(&chip->extra_dma.cmds->command, OUTPUT_MORE + BR_ALWAYS); out_le32(&chip->awacs->control, (in_le32(&chip->awacs->control) & ~0x1f00) | (speed << 8)); out_le32(&chip->awacs->byteswap, 0); snd_pmac_dma_set_command(rec, &chip->extra_dma); snd_pmac_dma_run(rec, RUN); } void snd_pmac_beep_dma_stop(struct snd_pmac *chip) { snd_pmac_dma_stop(&chip->playback); st_le16(&chip->extra_dma.cmds->command, DBDMA_STOP); snd_pmac_pcm_set_format(chip); /* reset format */ } /* * interrupt handlers */ static irqreturn_t snd_pmac_tx_intr(int irq, void *devid) { struct snd_pmac *chip = devid; snd_pmac_pcm_update(chip, &chip->playback); return IRQ_HANDLED; } static irqreturn_t snd_pmac_rx_intr(int irq, void *devid) { struct snd_pmac *chip = devid; snd_pmac_pcm_update(chip, &chip->capture); return IRQ_HANDLED; } static irqreturn_t snd_pmac_ctrl_intr(int irq, void *devid) { struct snd_pmac *chip = devid; int ctrl = in_le32(&chip->awacs->control); /*printk(KERN_DEBUG "pmac: control interrupt.. 0x%x\n", ctrl);*/ if (ctrl & MASK_PORTCHG) { /* do something when headphone is plugged/unplugged? */ if (chip->update_automute) chip->update_automute(chip, 1); } if (ctrl & MASK_CNTLERR) { int err = (in_le32(&chip->awacs->codec_stat) & MASK_ERRCODE) >> 16; if (err && chip->model <= PMAC_SCREAMER) snd_printk(KERN_DEBUG "error %x\n", err); } /* Writing 1s to the CNTLERR and PORTCHG bits clears them... */ out_le32(&chip->awacs->control, ctrl); return IRQ_HANDLED; } /* * a wrapper to feature call for compatibility */ static void snd_pmac_sound_feature(struct snd_pmac *chip, int enable) { if (ppc_md.feature_call) ppc_md.feature_call(PMAC_FTR_SOUND_CHIP_ENABLE, chip->node, 0, enable); } /* * release resources */ static int snd_pmac_free(struct snd_pmac *chip) { /* stop sounds */ if (chip->initialized) { snd_pmac_dbdma_reset(chip); /* disable interrupts from awacs interface */ out_le32(&chip->awacs->control, in_le32(&chip->awacs->control) & 0xfff); } if (chip->node) snd_pmac_sound_feature(chip, 0); /* clean up mixer if any */ if (chip->mixer_free) chip->mixer_free(chip); snd_pmac_detach_beep(chip); /* release resources */ if (chip->irq >= 0) free_irq(chip->irq, (void*)chip); if (chip->tx_irq >= 0) free_irq(chip->tx_irq, (void*)chip); if (chip->rx_irq >= 0) free_irq(chip->rx_irq, (void*)chip); snd_pmac_dbdma_free(chip, &chip->playback.cmd); snd_pmac_dbdma_free(chip, &chip->capture.cmd); snd_pmac_dbdma_free(chip, &chip->extra_dma); snd_pmac_dbdma_free(chip, &emergency_dbdma); if (chip->macio_base) iounmap(chip->macio_base); if (chip->latch_base) iounmap(chip->latch_base); if (chip->awacs) iounmap(chip->awacs); if (chip->playback.dma) iounmap(chip->playback.dma); if (chip->capture.dma) iounmap(chip->capture.dma); if (chip->node) { int i; for (i = 0; i < 3; i++) { if (chip->requested & (1 << i)) release_mem_region(chip->rsrc[i].start, resource_size(&chip->rsrc[i])); } } if (chip->pdev) pci_dev_put(chip->pdev); of_node_put(chip->node); kfree(chip); return 0; } /* * free the device */ static int snd_pmac_dev_free(struct snd_device *device) { struct snd_pmac *chip = device->device_data; return snd_pmac_free(chip); } /* * check the machine support byteswap (little-endian) */ static void __devinit detect_byte_swap(struct snd_pmac *chip) { struct device_node *mio; /* if seems that Keylargo can't byte-swap */ for (mio = chip->node->parent; mio; mio = mio->parent) { if (strcmp(mio->name, "mac-io") == 0) { if (of_device_is_compatible(mio, "Keylargo")) chip->can_byte_swap = 0; break; } } /* it seems the Pismo & iBook can't byte-swap in hardware. */ if (of_machine_is_compatible("PowerBook3,1") || of_machine_is_compatible("PowerBook2,1")) chip->can_byte_swap = 0 ; if (of_machine_is_compatible("PowerBook2,1")) chip->can_duplex = 0; } /* * detect a sound chip */ static int __devinit snd_pmac_detect(struct snd_pmac *chip) { struct device_node *sound; struct device_node *dn; const unsigned int *prop; unsigned int l; struct macio_chip* macio; if (!machine_is(powermac)) return -ENODEV; chip->subframe = 0; chip->revision = 0; chip->freqs_ok = 0xff; /* all ok */ chip->model = PMAC_AWACS; chip->can_byte_swap = 1; chip->can_duplex = 1; chip->can_capture = 1; chip->num_freqs = ARRAY_SIZE(awacs_freqs); chip->freq_table = awacs_freqs; chip->pdev = NULL; chip->control_mask = MASK_IEPC | MASK_IEE | 0x11; /* default */ /* check machine type */ if (of_machine_is_compatible("AAPL,3400/2400") || of_machine_is_compatible("AAPL,3500")) chip->is_pbook_3400 = 1; else if (of_machine_is_compatible("PowerBook1,1") || of_machine_is_compatible("AAPL,PowerBook1998")) chip->is_pbook_G3 = 1; chip->node = of_find_node_by_name(NULL, "awacs"); sound = of_node_get(chip->node); /* * powermac G3 models have a node called "davbus" * with a child called "sound". */ if (!chip->node) chip->node = of_find_node_by_name(NULL, "davbus"); /* * if we didn't find a davbus device, try 'i2s-a' since * this seems to be what iBooks have */ if (! chip->node) { chip->node = of_find_node_by_name(NULL, "i2s-a"); if (chip->node && chip->node->parent && chip->node->parent->parent) { if (of_device_is_compatible(chip->node->parent->parent, "K2-Keylargo")) chip->is_k2 = 1; } } if (! chip->node) return -ENODEV; if (!sound) { sound = of_find_node_by_name(NULL, "sound"); while (sound && sound->parent != chip->node) sound = of_find_node_by_name(sound, "sound"); } if (! sound) { of_node_put(chip->node); chip->node = NULL; return -ENODEV; } prop = of_get_property(sound, "sub-frame", NULL); if (prop && *prop < 16) chip->subframe = *prop; prop = of_get_property(sound, "layout-id", NULL); if (prop) { /* partly deprecate snd-powermac, for those machines * that have a layout-id property for now */ printk(KERN_INFO "snd-powermac no longer handles any " "machines with a layout-id property " "in the device-tree, use snd-aoa.\n"); of_node_put(sound); of_node_put(chip->node); chip->node = NULL; return -ENODEV; } /* This should be verified on older screamers */ if (of_device_is_compatible(sound, "screamer")) { chip->model = PMAC_SCREAMER; // chip->can_byte_swap = 0; /* FIXME: check this */ } if (of_device_is_compatible(sound, "burgundy")) { chip->model = PMAC_BURGUNDY; chip->control_mask = MASK_IEPC | 0x11; /* disable IEE */ } if (of_device_is_compatible(sound, "daca")) { chip->model = PMAC_DACA; chip->can_capture = 0; /* no capture */ chip->can_duplex = 0; // chip->can_byte_swap = 0; /* FIXME: check this */ chip->control_mask = MASK_IEPC | 0x11; /* disable IEE */ } if (of_device_is_compatible(sound, "tumbler")) { chip->model = PMAC_TUMBLER; chip->can_capture = of_machine_is_compatible("PowerMac4,2") || of_machine_is_compatible("PowerBook3,2") || of_machine_is_compatible("PowerBook3,3") || of_machine_is_compatible("PowerBook4,1") || of_machine_is_compatible("PowerBook4,2") || of_machine_is_compatible("PowerBook4,3"); chip->can_duplex = 0; // chip->can_byte_swap = 0; /* FIXME: check this */ chip->num_freqs = ARRAY_SIZE(tumbler_freqs); chip->freq_table = tumbler_freqs; chip->control_mask = MASK_IEPC | 0x11; /* disable IEE */ } if (of_device_is_compatible(sound, "snapper")) { chip->model = PMAC_SNAPPER; // chip->can_byte_swap = 0; /* FIXME: check this */ chip->num_freqs = ARRAY_SIZE(tumbler_freqs); chip->freq_table = tumbler_freqs; chip->control_mask = MASK_IEPC | 0x11; /* disable IEE */ } prop = of_get_property(sound, "device-id", NULL); if (prop) chip->device_id = *prop; dn = of_find_node_by_name(NULL, "perch"); chip->has_iic = (dn != NULL); of_node_put(dn); /* We need the PCI device for DMA allocations, let's use a crude method * for now ... */ macio = macio_find(chip->node, macio_unknown); if (macio == NULL) printk(KERN_WARNING "snd-powermac: can't locate macio !\n"); else { struct pci_dev *pdev = NULL; for_each_pci_dev(pdev) { struct device_node *np = pci_device_to_OF_node(pdev); if (np && np == macio->of_node) { chip->pdev = pdev; break; } } } if (chip->pdev == NULL) printk(KERN_WARNING "snd-powermac: can't locate macio PCI" " device !\n"); detect_byte_swap(chip); /* look for a property saying what sample rates are available */ prop = of_get_property(sound, "sample-rates", &l); if (! prop) prop = of_get_property(sound, "output-frame-rates", &l); if (prop) { int i; chip->freqs_ok = 0; for (l /= sizeof(int); l > 0; --l) { unsigned int r = *prop++; /* Apple 'Fixed' format */ if (r >= 0x10000) r >>= 16; for (i = 0; i < chip->num_freqs; ++i) { if (r == chip->freq_table[i]) { chip->freqs_ok |= (1 << i); break; } } } } else { /* assume only 44.1khz */ chip->freqs_ok = 1; } of_node_put(sound); return 0; } #ifdef PMAC_SUPPORT_AUTOMUTE /* * auto-mute */ static int pmac_auto_mute_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pmac *chip = snd_kcontrol_chip(kcontrol); ucontrol->value.integer.value[0] = chip->auto_mute; return 0; } static int pmac_auto_mute_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pmac *chip = snd_kcontrol_chip(kcontrol); if (ucontrol->value.integer.value[0] != chip->auto_mute) { chip->auto_mute = !!ucontrol->value.integer.value[0]; if (chip->update_automute) chip->update_automute(chip, 1); return 1; } return 0; } static int pmac_hp_detect_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pmac *chip = snd_kcontrol_chip(kcontrol); if (chip->detect_headphone) ucontrol->value.integer.value[0] = chip->detect_headphone(chip); else ucontrol->value.integer.value[0] = 0; return 0; } static struct snd_kcontrol_new auto_mute_controls[] __devinitdata = { { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Auto Mute Switch", .info = snd_pmac_boolean_mono_info, .get = pmac_auto_mute_get, .put = pmac_auto_mute_put, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Headphone Detection", .access = SNDRV_CTL_ELEM_ACCESS_READ, .info = snd_pmac_boolean_mono_info, .get = pmac_hp_detect_get, }, }; int __devinit snd_pmac_add_automute(struct snd_pmac *chip) { int err; chip->auto_mute = 1; err = snd_ctl_add(chip->card, snd_ctl_new1(&auto_mute_controls[0], chip)); if (err < 0) { printk(KERN_ERR "snd-powermac: Failed to add automute control\n"); return err; } chip->hp_detect_ctl = snd_ctl_new1(&auto_mute_controls[1], chip); return snd_ctl_add(chip->card, chip->hp_detect_ctl); } #endif /* PMAC_SUPPORT_AUTOMUTE */ /* * create and detect a pmac chip record */ int __devinit snd_pmac_new(struct snd_card *card, struct snd_pmac **chip_return) { struct snd_pmac *chip; struct device_node *np; int i, err; unsigned int irq; unsigned long ctrl_addr, txdma_addr, rxdma_addr; static struct snd_device_ops ops = { .dev_free = snd_pmac_dev_free, }; *chip_return = NULL; chip = kzalloc(sizeof(*chip), GFP_KERNEL); if (chip == NULL) return -ENOMEM; chip->card = card; spin_lock_init(&chip->reg_lock); chip->irq = chip->tx_irq = chip->rx_irq = -1; chip->playback.stream = SNDRV_PCM_STREAM_PLAYBACK; chip->capture.stream = SNDRV_PCM_STREAM_CAPTURE; if ((err = snd_pmac_detect(chip)) < 0) goto __error; if (snd_pmac_dbdma_alloc(chip, &chip->playback.cmd, PMAC_MAX_FRAGS + 1) < 0 || snd_pmac_dbdma_alloc(chip, &chip->capture.cmd, PMAC_MAX_FRAGS + 1) < 0 || snd_pmac_dbdma_alloc(chip, &chip->extra_dma, 2) < 0 || snd_pmac_dbdma_alloc(chip, &emergency_dbdma, 2) < 0) { err = -ENOMEM; goto __error; } np = chip->node; chip->requested = 0; if (chip->is_k2) { static char *rnames[] = { "Sound Control", "Sound DMA" }; for (i = 0; i < 2; i ++) { if (of_address_to_resource(np->parent, i, &chip->rsrc[i])) { printk(KERN_ERR "snd: can't translate rsrc " " %d (%s)\n", i, rnames[i]); err = -ENODEV; goto __error; } if (request_mem_region(chip->rsrc[i].start, resource_size(&chip->rsrc[i]), rnames[i]) == NULL) { printk(KERN_ERR "snd: can't request rsrc " " %d (%s: %pR)\n", i, rnames[i], &chip->rsrc[i]); err = -ENODEV; goto __error; } chip->requested |= (1 << i); } ctrl_addr = chip->rsrc[0].start; txdma_addr = chip->rsrc[1].start; rxdma_addr = txdma_addr + 0x100; } else { static char *rnames[] = { "Sound Control", "Sound Tx DMA", "Sound Rx DMA" }; for (i = 0; i < 3; i ++) { if (of_address_to_resource(np, i, &chip->rsrc[i])) { printk(KERN_ERR "snd: can't translate rsrc " " %d (%s)\n", i, rnames[i]); err = -ENODEV; goto __error; } if (request_mem_region(chip->rsrc[i].start, resource_size(&chip->rsrc[i]), rnames[i]) == NULL) { printk(KERN_ERR "snd: can't request rsrc " " %d (%s: %pR)\n", i, rnames[i], &chip->rsrc[i]); err = -ENODEV; goto __error; } chip->requested |= (1 << i); } ctrl_addr = chip->rsrc[0].start; txdma_addr = chip->rsrc[1].start; rxdma_addr = chip->rsrc[2].start; } chip->awacs = ioremap(ctrl_addr, 0x1000); chip->playback.dma = ioremap(txdma_addr, 0x100); chip->capture.dma = ioremap(rxdma_addr, 0x100); if (chip->model <= PMAC_BURGUNDY) { irq = irq_of_parse_and_map(np, 0); if (request_irq(irq, snd_pmac_ctrl_intr, 0, "PMac", (void*)chip)) { snd_printk(KERN_ERR "pmac: unable to grab IRQ %d\n", irq); err = -EBUSY; goto __error; } chip->irq = irq; } irq = irq_of_parse_and_map(np, 1); if (request_irq(irq, snd_pmac_tx_intr, 0, "PMac Output", (void*)chip)){ snd_printk(KERN_ERR "pmac: unable to grab IRQ %d\n", irq); err = -EBUSY; goto __error; } chip->tx_irq = irq; irq = irq_of_parse_and_map(np, 2); if (request_irq(irq, snd_pmac_rx_intr, 0, "PMac Input", (void*)chip)) { snd_printk(KERN_ERR "pmac: unable to grab IRQ %d\n", irq); err = -EBUSY; goto __error; } chip->rx_irq = irq; snd_pmac_sound_feature(chip, 1); /* reset & enable interrupts */ if (chip->model <= PMAC_BURGUNDY) out_le32(&chip->awacs->control, chip->control_mask); /* Powerbooks have odd ways of enabling inputs such as an expansion-bay CD or sound from an internal modem or a PC-card modem. */ if (chip->is_pbook_3400) { /* Enable CD and PC-card sound inputs. */ /* This is done by reading from address * f301a000, + 0x10 to enable the expansion-bay * CD sound input, + 0x80 to enable the PC-card * sound input. The 0x100 enables the SCSI bus * terminator power. */ chip->latch_base = ioremap (0xf301a000, 0x1000); in_8(chip->latch_base + 0x190); } else if (chip->is_pbook_G3) { struct device_node* mio; for (mio = chip->node->parent; mio; mio = mio->parent) { if (strcmp(mio->name, "mac-io") == 0) { struct resource r; if (of_address_to_resource(mio, 0, &r) == 0) chip->macio_base = ioremap(r.start, 0x40); break; } } /* Enable CD sound input. */ /* The relevant bits for writing to this byte are 0x8f. * I haven't found out what the 0x80 bit does. * For the 0xf bits, writing 3 or 7 enables the CD * input, any other value disables it. Values * 1, 3, 5, 7 enable the microphone. Values 0, 2, * 4, 6, 8 - f enable the input from the modem. */ if (chip->macio_base) out_8(chip->macio_base + 0x37, 3); } /* Reset dbdma channels */ snd_pmac_dbdma_reset(chip); if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) goto __error; *chip_return = chip; return 0; __error: snd_pmac_free(chip); return err; } /* * sleep notify for powerbook */ #ifdef CONFIG_PM /* * Save state when going to sleep, restore it afterwards. */ void snd_pmac_suspend(struct snd_pmac *chip) { unsigned long flags; snd_power_change_state(chip->card, SNDRV_CTL_POWER_D3hot); if (chip->suspend) chip->suspend(chip); snd_pcm_suspend_all(chip->pcm); spin_lock_irqsave(&chip->reg_lock, flags); snd_pmac_beep_stop(chip); spin_unlock_irqrestore(&chip->reg_lock, flags); if (chip->irq >= 0) disable_irq(chip->irq); if (chip->tx_irq >= 0) disable_irq(chip->tx_irq); if (chip->rx_irq >= 0) disable_irq(chip->rx_irq); snd_pmac_sound_feature(chip, 0); } void snd_pmac_resume(struct snd_pmac *chip) { snd_pmac_sound_feature(chip, 1); if (chip->resume) chip->resume(chip); /* enable CD sound input */ if (chip->macio_base && chip->is_pbook_G3) out_8(chip->macio_base + 0x37, 3); else if (chip->is_pbook_3400) in_8(chip->latch_base + 0x190); snd_pmac_pcm_set_format(chip); if (chip->irq >= 0) enable_irq(chip->irq); if (chip->tx_irq >= 0) enable_irq(chip->tx_irq); if (chip->rx_irq >= 0) enable_irq(chip->rx_irq); snd_power_change_state(chip->card, SNDRV_CTL_POWER_D0); } #endif /* CONFIG_PM */
gpl-2.0
supersonicninja/HW01EKERNEL
drivers/net/wireless/hostap/hostap_plx.c
8971
16771
#define PRISM2_PLX /* Host AP driver's support for PC Cards on PCI adapters using PLX9052 is * based on: * - Host AP driver patch from james@madingley.org * - linux-wlan-ng driver, Copyright (C) AbsoluteValue Systems, Inc. */ #include <linux/module.h> #include <linux/init.h> #include <linux/if.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/wireless.h> #include <net/iw_handler.h> #include <linux/ioport.h> #include <linux/pci.h> #include <asm/io.h> #include "hostap_wlan.h" static char *dev_info = "hostap_plx"; MODULE_AUTHOR("Jouni Malinen"); MODULE_DESCRIPTION("Support for Intersil Prism2-based 802.11 wireless LAN " "cards (PLX)."); MODULE_SUPPORTED_DEVICE("Intersil Prism2-based WLAN cards (PLX)"); MODULE_LICENSE("GPL"); static int ignore_cis; module_param(ignore_cis, int, 0444); MODULE_PARM_DESC(ignore_cis, "Do not verify manfid information in CIS"); /* struct local_info::hw_priv */ struct hostap_plx_priv { void __iomem *attr_mem; unsigned int cor_offset; }; #define PLX_MIN_ATTR_LEN 512 /* at least 2 x 256 is needed for CIS */ #define COR_SRESET 0x80 #define COR_LEVLREQ 0x40 #define COR_ENABLE_FUNC 0x01 /* PCI Configuration Registers */ #define PLX_PCIIPR 0x3d /* PCI Interrupt Pin */ /* Local Configuration Registers */ #define PLX_INTCSR 0x4c /* Interrupt Control/Status Register */ #define PLX_INTCSR_PCI_INTEN BIT(6) /* PCI Interrupt Enable */ #define PLX_CNTRL 0x50 #define PLX_CNTRL_SERIAL_EEPROM_PRESENT BIT(28) #define PLXDEV(vendor,dev,str) { vendor, dev, PCI_ANY_ID, PCI_ANY_ID } static DEFINE_PCI_DEVICE_TABLE(prism2_plx_id_table) = { PLXDEV(0x10b7, 0x7770, "3Com AirConnect PCI 777A"), PLXDEV(0x111a, 0x1023, "Siemens SpeedStream SS1023"), PLXDEV(0x126c, 0x8030, "Nortel emobility"), PLXDEV(0x1562, 0x0001, "Symbol LA-4123"), PLXDEV(0x1385, 0x4100, "Netgear MA301"), PLXDEV(0x15e8, 0x0130, "National Datacomm NCP130 (PLX9052)"), PLXDEV(0x15e8, 0x0131, "National Datacomm NCP130 (TMD7160)"), PLXDEV(0x1638, 0x1100, "Eumitcom WL11000"), PLXDEV(0x16ab, 0x1100, "Global Sun Tech GL24110P"), PLXDEV(0x16ab, 0x1101, "Global Sun Tech GL24110P (?)"), PLXDEV(0x16ab, 0x1102, "Linksys WPC11 with WDT11"), PLXDEV(0x16ab, 0x1103, "Longshine 8031"), PLXDEV(0x16ec, 0x3685, "US Robotics USR2415"), PLXDEV(0xec80, 0xec00, "Belkin F5D6000"), { 0 } }; /* Array of known Prism2/2.5 PC Card manufactured ids. If your card's manfid * is not listed here, you will need to add it here to get the driver * initialized. */ static struct prism2_plx_manfid { u16 manfid1, manfid2; } prism2_plx_known_manfids[] = { { 0x000b, 0x7110 } /* D-Link DWL-650 Rev. P1 */, { 0x000b, 0x7300 } /* Philips 802.11b WLAN PCMCIA */, { 0x0101, 0x0777 } /* 3Com AirConnect PCI 777A */, { 0x0126, 0x8000 } /* Proxim RangeLAN */, { 0x0138, 0x0002 } /* Compaq WL100 */, { 0x0156, 0x0002 } /* Intersil Prism II Ref. Design (and others) */, { 0x026f, 0x030b } /* Buffalo WLI-CF-S11G */, { 0x0274, 0x1612 } /* Linksys WPC11 Ver 2.5 */, { 0x0274, 0x1613 } /* Linksys WPC11 Ver 3 */, { 0x028a, 0x0002 } /* D-Link DRC-650 */, { 0x0250, 0x0002 } /* Samsung SWL2000-N */, { 0xc250, 0x0002 } /* EMTAC A2424i */, { 0xd601, 0x0002 } /* Z-Com XI300 */, { 0xd601, 0x0005 } /* Zcomax XI-325H 200mW */, { 0, 0} }; #ifdef PRISM2_IO_DEBUG static inline void hfa384x_outb_debug(struct net_device *dev, int a, u8 v) { struct hostap_interface *iface; local_info_t *local; unsigned long flags; iface = netdev_priv(dev); local = iface->local; spin_lock_irqsave(&local->lock, flags); prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTB, a, v); outb(v, dev->base_addr + a); spin_unlock_irqrestore(&local->lock, flags); } static inline u8 hfa384x_inb_debug(struct net_device *dev, int a) { struct hostap_interface *iface; local_info_t *local; unsigned long flags; u8 v; iface = netdev_priv(dev); local = iface->local; spin_lock_irqsave(&local->lock, flags); v = inb(dev->base_addr + a); prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INB, a, v); spin_unlock_irqrestore(&local->lock, flags); return v; } static inline void hfa384x_outw_debug(struct net_device *dev, int a, u16 v) { struct hostap_interface *iface; local_info_t *local; unsigned long flags; iface = netdev_priv(dev); local = iface->local; spin_lock_irqsave(&local->lock, flags); prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTW, a, v); outw(v, dev->base_addr + a); spin_unlock_irqrestore(&local->lock, flags); } static inline u16 hfa384x_inw_debug(struct net_device *dev, int a) { struct hostap_interface *iface; local_info_t *local; unsigned long flags; u16 v; iface = netdev_priv(dev); local = iface->local; spin_lock_irqsave(&local->lock, flags); v = inw(dev->base_addr + a); prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INW, a, v); spin_unlock_irqrestore(&local->lock, flags); return v; } static inline void hfa384x_outsw_debug(struct net_device *dev, int a, u8 *buf, int wc) { struct hostap_interface *iface; local_info_t *local; unsigned long flags; iface = netdev_priv(dev); local = iface->local; spin_lock_irqsave(&local->lock, flags); prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTSW, a, wc); outsw(dev->base_addr + a, buf, wc); spin_unlock_irqrestore(&local->lock, flags); } static inline void hfa384x_insw_debug(struct net_device *dev, int a, u8 *buf, int wc) { struct hostap_interface *iface; local_info_t *local; unsigned long flags; iface = netdev_priv(dev); local = iface->local; spin_lock_irqsave(&local->lock, flags); prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INSW, a, wc); insw(dev->base_addr + a, buf, wc); spin_unlock_irqrestore(&local->lock, flags); } #define HFA384X_OUTB(v,a) hfa384x_outb_debug(dev, (a), (v)) #define HFA384X_INB(a) hfa384x_inb_debug(dev, (a)) #define HFA384X_OUTW(v,a) hfa384x_outw_debug(dev, (a), (v)) #define HFA384X_INW(a) hfa384x_inw_debug(dev, (a)) #define HFA384X_OUTSW(a, buf, wc) hfa384x_outsw_debug(dev, (a), (buf), (wc)) #define HFA384X_INSW(a, buf, wc) hfa384x_insw_debug(dev, (a), (buf), (wc)) #else /* PRISM2_IO_DEBUG */ #define HFA384X_OUTB(v,a) outb((v), dev->base_addr + (a)) #define HFA384X_INB(a) inb(dev->base_addr + (a)) #define HFA384X_OUTW(v,a) outw((v), dev->base_addr + (a)) #define HFA384X_INW(a) inw(dev->base_addr + (a)) #define HFA384X_INSW(a, buf, wc) insw(dev->base_addr + (a), buf, wc) #define HFA384X_OUTSW(a, buf, wc) outsw(dev->base_addr + (a), buf, wc) #endif /* PRISM2_IO_DEBUG */ static int hfa384x_from_bap(struct net_device *dev, u16 bap, void *buf, int len) { u16 d_off; u16 *pos; d_off = (bap == 1) ? HFA384X_DATA1_OFF : HFA384X_DATA0_OFF; pos = (u16 *) buf; if (len / 2) HFA384X_INSW(d_off, buf, len / 2); pos += len / 2; if (len & 1) *((char *) pos) = HFA384X_INB(d_off); return 0; } static int hfa384x_to_bap(struct net_device *dev, u16 bap, void *buf, int len) { u16 d_off; u16 *pos; d_off = (bap == 1) ? HFA384X_DATA1_OFF : HFA384X_DATA0_OFF; pos = (u16 *) buf; if (len / 2) HFA384X_OUTSW(d_off, buf, len / 2); pos += len / 2; if (len & 1) HFA384X_OUTB(*((char *) pos), d_off); return 0; } /* FIX: This might change at some point.. */ #include "hostap_hw.c" static void prism2_plx_cor_sreset(local_info_t *local) { unsigned char corsave; struct hostap_plx_priv *hw_priv = local->hw_priv; printk(KERN_DEBUG "%s: Doing reset via direct COR access.\n", dev_info); /* Set sreset bit of COR and clear it after hold time */ if (hw_priv->attr_mem == NULL) { /* TMD7160 - COR at card's first I/O addr */ corsave = inb(hw_priv->cor_offset); outb(corsave | COR_SRESET, hw_priv->cor_offset); mdelay(2); outb(corsave & ~COR_SRESET, hw_priv->cor_offset); mdelay(2); } else { /* PLX9052 */ corsave = readb(hw_priv->attr_mem + hw_priv->cor_offset); writeb(corsave | COR_SRESET, hw_priv->attr_mem + hw_priv->cor_offset); mdelay(2); writeb(corsave & ~COR_SRESET, hw_priv->attr_mem + hw_priv->cor_offset); mdelay(2); } } static void prism2_plx_genesis_reset(local_info_t *local, int hcr) { unsigned char corsave; struct hostap_plx_priv *hw_priv = local->hw_priv; if (hw_priv->attr_mem == NULL) { /* TMD7160 - COR at card's first I/O addr */ corsave = inb(hw_priv->cor_offset); outb(corsave | COR_SRESET, hw_priv->cor_offset); mdelay(10); outb(hcr, hw_priv->cor_offset + 2); mdelay(10); outb(corsave & ~COR_SRESET, hw_priv->cor_offset); mdelay(10); } else { /* PLX9052 */ corsave = readb(hw_priv->attr_mem + hw_priv->cor_offset); writeb(corsave | COR_SRESET, hw_priv->attr_mem + hw_priv->cor_offset); mdelay(10); writeb(hcr, hw_priv->attr_mem + hw_priv->cor_offset + 2); mdelay(10); writeb(corsave & ~COR_SRESET, hw_priv->attr_mem + hw_priv->cor_offset); mdelay(10); } } static struct prism2_helper_functions prism2_plx_funcs = { .card_present = NULL, .cor_sreset = prism2_plx_cor_sreset, .genesis_reset = prism2_plx_genesis_reset, .hw_type = HOSTAP_HW_PLX, }; static int prism2_plx_check_cis(void __iomem *attr_mem, int attr_len, unsigned int *cor_offset, unsigned int *cor_index) { #define CISTPL_CONFIG 0x1A #define CISTPL_MANFID 0x20 #define CISTPL_END 0xFF #define CIS_MAX_LEN 256 u8 *cis; int i, pos; unsigned int rmsz, rasz, manfid1, manfid2; struct prism2_plx_manfid *manfid; cis = kmalloc(CIS_MAX_LEN, GFP_KERNEL); if (cis == NULL) return -ENOMEM; /* read CIS; it is in even offsets in the beginning of attr_mem */ for (i = 0; i < CIS_MAX_LEN; i++) cis[i] = readb(attr_mem + 2 * i); printk(KERN_DEBUG "%s: CIS: %02x %02x %02x %02x %02x %02x ...\n", dev_info, cis[0], cis[1], cis[2], cis[3], cis[4], cis[5]); /* set reasonable defaults for Prism2 cards just in case CIS parsing * fails */ *cor_offset = 0x3e0; *cor_index = 0x01; manfid1 = manfid2 = 0; pos = 0; while (pos < CIS_MAX_LEN - 1 && cis[pos] != CISTPL_END) { if (pos + 2 + cis[pos + 1] > CIS_MAX_LEN) goto cis_error; switch (cis[pos]) { case CISTPL_CONFIG: if (cis[pos + 1] < 2) goto cis_error; rmsz = (cis[pos + 2] & 0x3c) >> 2; rasz = cis[pos + 2] & 0x03; if (4 + rasz + rmsz > cis[pos + 1]) goto cis_error; *cor_index = cis[pos + 3] & 0x3F; *cor_offset = 0; for (i = 0; i <= rasz; i++) *cor_offset += cis[pos + 4 + i] << (8 * i); printk(KERN_DEBUG "%s: cor_index=0x%x " "cor_offset=0x%x\n", dev_info, *cor_index, *cor_offset); if (*cor_offset > attr_len) { printk(KERN_ERR "%s: COR offset not within " "attr_mem\n", dev_info); kfree(cis); return -1; } break; case CISTPL_MANFID: if (cis[pos + 1] < 4) goto cis_error; manfid1 = cis[pos + 2] + (cis[pos + 3] << 8); manfid2 = cis[pos + 4] + (cis[pos + 5] << 8); printk(KERN_DEBUG "%s: manfid=0x%04x, 0x%04x\n", dev_info, manfid1, manfid2); break; } pos += cis[pos + 1] + 2; } if (pos >= CIS_MAX_LEN || cis[pos] != CISTPL_END) goto cis_error; for (manfid = prism2_plx_known_manfids; manfid->manfid1 != 0; manfid++) if (manfid1 == manfid->manfid1 && manfid2 == manfid->manfid2) { kfree(cis); return 0; } printk(KERN_INFO "%s: unknown manfid 0x%04x, 0x%04x - assuming this is" " not supported card\n", dev_info, manfid1, manfid2); goto fail; cis_error: printk(KERN_WARNING "%s: invalid CIS data\n", dev_info); fail: kfree(cis); if (ignore_cis) { printk(KERN_INFO "%s: ignore_cis parameter set - ignoring " "errors during CIS verification\n", dev_info); return 0; } return -1; } static int prism2_plx_probe(struct pci_dev *pdev, const struct pci_device_id *id) { unsigned int pccard_ioaddr, plx_ioaddr; unsigned long pccard_attr_mem; unsigned int pccard_attr_len; void __iomem *attr_mem = NULL; unsigned int cor_offset = 0, cor_index = 0; u32 reg; local_info_t *local = NULL; struct net_device *dev = NULL; struct hostap_interface *iface; static int cards_found /* = 0 */; int irq_registered = 0; int tmd7160; struct hostap_plx_priv *hw_priv; hw_priv = kzalloc(sizeof(*hw_priv), GFP_KERNEL); if (hw_priv == NULL) return -ENOMEM; if (pci_enable_device(pdev)) goto err_out_free; /* National Datacomm NCP130 based on TMD7160, not PLX9052. */ tmd7160 = (pdev->vendor == 0x15e8) && (pdev->device == 0x0131); plx_ioaddr = pci_resource_start(pdev, 1); pccard_ioaddr = pci_resource_start(pdev, tmd7160 ? 2 : 3); if (tmd7160) { /* TMD7160 */ attr_mem = NULL; /* no access to PC Card attribute memory */ printk(KERN_INFO "TMD7160 PCI/PCMCIA adapter: io=0x%x, " "irq=%d, pccard_io=0x%x\n", plx_ioaddr, pdev->irq, pccard_ioaddr); cor_offset = plx_ioaddr; cor_index = 0x04; outb(cor_index | COR_LEVLREQ | COR_ENABLE_FUNC, plx_ioaddr); mdelay(1); reg = inb(plx_ioaddr); if (reg != (cor_index | COR_LEVLREQ | COR_ENABLE_FUNC)) { printk(KERN_ERR "%s: Error setting COR (expected=" "0x%02x, was=0x%02x)\n", dev_info, cor_index | COR_LEVLREQ | COR_ENABLE_FUNC, reg); goto fail; } } else { /* PLX9052 */ pccard_attr_mem = pci_resource_start(pdev, 2); pccard_attr_len = pci_resource_len(pdev, 2); if (pccard_attr_len < PLX_MIN_ATTR_LEN) goto fail; attr_mem = ioremap(pccard_attr_mem, pccard_attr_len); if (attr_mem == NULL) { printk(KERN_ERR "%s: cannot remap attr_mem\n", dev_info); goto fail; } printk(KERN_INFO "PLX9052 PCI/PCMCIA adapter: " "mem=0x%lx, plx_io=0x%x, irq=%d, pccard_io=0x%x\n", pccard_attr_mem, plx_ioaddr, pdev->irq, pccard_ioaddr); if (prism2_plx_check_cis(attr_mem, pccard_attr_len, &cor_offset, &cor_index)) { printk(KERN_INFO "Unknown PC Card CIS - not a " "Prism2/2.5 card?\n"); goto fail; } printk(KERN_DEBUG "Prism2/2.5 PC Card detected in PLX9052 " "adapter\n"); /* Write COR to enable PC Card */ writeb(cor_index | COR_LEVLREQ | COR_ENABLE_FUNC, attr_mem + cor_offset); /* Enable PCI interrupts if they are not already enabled */ reg = inl(plx_ioaddr + PLX_INTCSR); printk(KERN_DEBUG "PLX_INTCSR=0x%x\n", reg); if (!(reg & PLX_INTCSR_PCI_INTEN)) { outl(reg | PLX_INTCSR_PCI_INTEN, plx_ioaddr + PLX_INTCSR); if (!(inl(plx_ioaddr + PLX_INTCSR) & PLX_INTCSR_PCI_INTEN)) { printk(KERN_WARNING "%s: Could not enable " "Local Interrupts\n", dev_info); goto fail; } } reg = inl(plx_ioaddr + PLX_CNTRL); printk(KERN_DEBUG "PLX_CNTRL=0x%x (Serial EEPROM " "present=%d)\n", reg, (reg & PLX_CNTRL_SERIAL_EEPROM_PRESENT) != 0); /* should set PLX_PCIIPR to 0x01 (INTA#) if Serial EEPROM is * not present; but are there really such cards in use(?) */ } dev = prism2_init_local_data(&prism2_plx_funcs, cards_found, &pdev->dev); if (dev == NULL) goto fail; iface = netdev_priv(dev); local = iface->local; local->hw_priv = hw_priv; cards_found++; dev->irq = pdev->irq; dev->base_addr = pccard_ioaddr; hw_priv->attr_mem = attr_mem; hw_priv->cor_offset = cor_offset; pci_set_drvdata(pdev, dev); if (request_irq(dev->irq, prism2_interrupt, IRQF_SHARED, dev->name, dev)) { printk(KERN_WARNING "%s: request_irq failed\n", dev->name); goto fail; } else irq_registered = 1; if (prism2_hw_config(dev, 1)) { printk(KERN_DEBUG "%s: hardware initialization failed\n", dev_info); goto fail; } return hostap_hw_ready(dev); fail: if (irq_registered && dev) free_irq(dev->irq, dev); if (attr_mem) iounmap(attr_mem); pci_disable_device(pdev); prism2_free_local_data(dev); err_out_free: kfree(hw_priv); return -ENODEV; } static void prism2_plx_remove(struct pci_dev *pdev) { struct net_device *dev; struct hostap_interface *iface; struct hostap_plx_priv *hw_priv; dev = pci_get_drvdata(pdev); iface = netdev_priv(dev); hw_priv = iface->local->hw_priv; /* Reset the hardware, and ensure interrupts are disabled. */ prism2_plx_cor_sreset(iface->local); hfa384x_disable_interrupts(dev); if (hw_priv->attr_mem) iounmap(hw_priv->attr_mem); if (dev->irq) free_irq(dev->irq, dev); prism2_free_local_data(dev); kfree(hw_priv); pci_disable_device(pdev); } MODULE_DEVICE_TABLE(pci, prism2_plx_id_table); static struct pci_driver prism2_plx_driver = { .name = "hostap_plx", .id_table = prism2_plx_id_table, .probe = prism2_plx_probe, .remove = prism2_plx_remove, }; static int __init init_prism2_plx(void) { return pci_register_driver(&prism2_plx_driver); } static void __exit exit_prism2_plx(void) { pci_unregister_driver(&prism2_plx_driver); } module_init(init_prism2_plx); module_exit(exit_prism2_plx);
gpl-2.0
alexsh/Telegram
TMessagesProj/jni/boringssl/crypto/fipsmodule/rsa/padding.c
12
19217
/* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL * project 2005. */ /* ==================================================================== * Copyright (c) 2005 The OpenSSL Project. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * 3. All advertising materials mentioning features or use of this * software must display the following acknowledgment: * "This product includes software developed by the OpenSSL Project * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)" * * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to * endorse or promote products derived from this software without * prior written permission. For written permission, please contact * licensing@OpenSSL.org. * * 5. Products derived from this software may not be called "OpenSSL" * nor may "OpenSSL" appear in their names without prior written * permission of the OpenSSL Project. * * 6. Redistributions of any form whatsoever must retain the following * acknowledgment: * "This product includes software developed by the OpenSSL Project * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)" * * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * ==================================================================== * * This product includes cryptographic software written by Eric Young * (eay@cryptsoft.com). This product includes software written by Tim * Hudson (tjh@cryptsoft.com). */ #include <openssl/rsa.h> #include <assert.h> #include <limits.h> #include <string.h> #include <openssl/bn.h> #include <openssl/digest.h> #include <openssl/err.h> #include <openssl/mem.h> #include <openssl/rand.h> #include <openssl/sha.h> #include "internal.h" #include "../../internal.h" #define RSA_PKCS1_PADDING_SIZE 11 int RSA_padding_add_PKCS1_type_1(uint8_t *to, size_t to_len, const uint8_t *from, size_t from_len) { // See RFC 8017, section 9.2. if (to_len < RSA_PKCS1_PADDING_SIZE) { OPENSSL_PUT_ERROR(RSA, RSA_R_KEY_SIZE_TOO_SMALL); return 0; } if (from_len > to_len - RSA_PKCS1_PADDING_SIZE) { OPENSSL_PUT_ERROR(RSA, RSA_R_DIGEST_TOO_BIG_FOR_RSA_KEY); return 0; } to[0] = 0; to[1] = 1; OPENSSL_memset(to + 2, 0xff, to_len - 3 - from_len); to[to_len - from_len - 1] = 0; OPENSSL_memcpy(to + to_len - from_len, from, from_len); return 1; } int RSA_padding_check_PKCS1_type_1(uint8_t *out, size_t *out_len, size_t max_out, const uint8_t *from, size_t from_len) { // See RFC 8017, section 9.2. This is part of signature verification and thus // does not need to run in constant-time. if (from_len < 2) { OPENSSL_PUT_ERROR(RSA, RSA_R_DATA_TOO_SMALL); return 0; } // Check the header. if (from[0] != 0 || from[1] != 1) { OPENSSL_PUT_ERROR(RSA, RSA_R_BLOCK_TYPE_IS_NOT_01); return 0; } // Scan over padded data, looking for the 00. size_t pad; for (pad = 2 /* header */; pad < from_len; pad++) { if (from[pad] == 0x00) { break; } if (from[pad] != 0xff) { OPENSSL_PUT_ERROR(RSA, RSA_R_BAD_FIXED_HEADER_DECRYPT); return 0; } } if (pad == from_len) { OPENSSL_PUT_ERROR(RSA, RSA_R_NULL_BEFORE_BLOCK_MISSING); return 0; } if (pad < 2 /* header */ + 8) { OPENSSL_PUT_ERROR(RSA, RSA_R_BAD_PAD_BYTE_COUNT); return 0; } // Skip over the 00. pad++; if (from_len - pad > max_out) { OPENSSL_PUT_ERROR(RSA, RSA_R_DATA_TOO_LARGE); return 0; } OPENSSL_memcpy(out, from + pad, from_len - pad); *out_len = from_len - pad; return 1; } static int rand_nonzero(uint8_t *out, size_t len) { if (!RAND_bytes(out, len)) { return 0; } for (size_t i = 0; i < len; i++) { while (out[i] == 0) { if (!RAND_bytes(out + i, 1)) { return 0; } } } return 1; } int RSA_padding_add_PKCS1_type_2(uint8_t *to, size_t to_len, const uint8_t *from, size_t from_len) { // See RFC 8017, section 7.2.1. if (to_len < RSA_PKCS1_PADDING_SIZE) { OPENSSL_PUT_ERROR(RSA, RSA_R_KEY_SIZE_TOO_SMALL); return 0; } if (from_len > to_len - RSA_PKCS1_PADDING_SIZE) { OPENSSL_PUT_ERROR(RSA, RSA_R_DATA_TOO_LARGE_FOR_KEY_SIZE); return 0; } to[0] = 0; to[1] = 2; size_t padding_len = to_len - 3 - from_len; if (!rand_nonzero(to + 2, padding_len)) { return 0; } to[2 + padding_len] = 0; OPENSSL_memcpy(to + to_len - from_len, from, from_len); return 1; } int RSA_padding_check_PKCS1_type_2(uint8_t *out, size_t *out_len, size_t max_out, const uint8_t *from, size_t from_len) { if (from_len == 0) { OPENSSL_PUT_ERROR(RSA, RSA_R_EMPTY_PUBLIC_KEY); return 0; } // PKCS#1 v1.5 decryption. See "PKCS #1 v2.2: RSA Cryptography // Standard", section 7.2.2. if (from_len < RSA_PKCS1_PADDING_SIZE) { // |from| is zero-padded to the size of the RSA modulus, a public value, so // this can be rejected in non-constant time. OPENSSL_PUT_ERROR(RSA, RSA_R_KEY_SIZE_TOO_SMALL); return 0; } crypto_word_t first_byte_is_zero = constant_time_eq_w(from[0], 0); crypto_word_t second_byte_is_two = constant_time_eq_w(from[1], 2); crypto_word_t zero_index = 0, looking_for_index = CONSTTIME_TRUE_W; for (size_t i = 2; i < from_len; i++) { crypto_word_t equals0 = constant_time_is_zero_w(from[i]); zero_index = constant_time_select_w(looking_for_index & equals0, i, zero_index); looking_for_index = constant_time_select_w(equals0, 0, looking_for_index); } // The input must begin with 00 02. crypto_word_t valid_index = first_byte_is_zero; valid_index &= second_byte_is_two; // We must have found the end of PS. valid_index &= ~looking_for_index; // PS must be at least 8 bytes long, and it starts two bytes into |from|. valid_index &= constant_time_ge_w(zero_index, 2 + 8); // Skip the zero byte. zero_index++; // NOTE: Although this logic attempts to be constant time, the API contracts // of this function and |RSA_decrypt| with |RSA_PKCS1_PADDING| make it // impossible to completely avoid Bleichenbacher's attack. Consumers should // use |RSA_PADDING_NONE| and perform the padding check in constant-time // combined with a swap to a random session key or other mitigation. CONSTTIME_DECLASSIFY(&valid_index, sizeof(valid_index)); CONSTTIME_DECLASSIFY(&zero_index, sizeof(zero_index)); if (!valid_index) { OPENSSL_PUT_ERROR(RSA, RSA_R_PKCS_DECODING_ERROR); return 0; } const size_t msg_len = from_len - zero_index; if (msg_len > max_out) { // This shouldn't happen because this function is always called with // |max_out| as the key size and |from_len| is bounded by the key size. OPENSSL_PUT_ERROR(RSA, RSA_R_PKCS_DECODING_ERROR); return 0; } OPENSSL_memcpy(out, &from[zero_index], msg_len); *out_len = msg_len; return 1; } int RSA_padding_add_none(uint8_t *to, size_t to_len, const uint8_t *from, size_t from_len) { if (from_len > to_len) { OPENSSL_PUT_ERROR(RSA, RSA_R_DATA_TOO_LARGE_FOR_KEY_SIZE); return 0; } if (from_len < to_len) { OPENSSL_PUT_ERROR(RSA, RSA_R_DATA_TOO_SMALL); return 0; } OPENSSL_memcpy(to, from, from_len); return 1; } static int PKCS1_MGF1(uint8_t *out, size_t len, const uint8_t *seed, size_t seed_len, const EVP_MD *md) { int ret = 0; EVP_MD_CTX ctx; EVP_MD_CTX_init(&ctx); size_t md_len = EVP_MD_size(md); for (uint32_t i = 0; len > 0; i++) { uint8_t counter[4]; counter[0] = (uint8_t)(i >> 24); counter[1] = (uint8_t)(i >> 16); counter[2] = (uint8_t)(i >> 8); counter[3] = (uint8_t)i; if (!EVP_DigestInit_ex(&ctx, md, NULL) || !EVP_DigestUpdate(&ctx, seed, seed_len) || !EVP_DigestUpdate(&ctx, counter, sizeof(counter))) { goto err; } if (md_len <= len) { if (!EVP_DigestFinal_ex(&ctx, out, NULL)) { goto err; } out += md_len; len -= md_len; } else { uint8_t digest[EVP_MAX_MD_SIZE]; if (!EVP_DigestFinal_ex(&ctx, digest, NULL)) { goto err; } OPENSSL_memcpy(out, digest, len); len = 0; } } ret = 1; err: EVP_MD_CTX_cleanup(&ctx); return ret; } int RSA_padding_add_PKCS1_OAEP_mgf1(uint8_t *to, size_t to_len, const uint8_t *from, size_t from_len, const uint8_t *param, size_t param_len, const EVP_MD *md, const EVP_MD *mgf1md) { if (md == NULL) { md = EVP_sha1(); } if (mgf1md == NULL) { mgf1md = md; } size_t mdlen = EVP_MD_size(md); if (to_len < 2 * mdlen + 2) { OPENSSL_PUT_ERROR(RSA, RSA_R_KEY_SIZE_TOO_SMALL); return 0; } size_t emlen = to_len - 1; if (from_len > emlen - 2 * mdlen - 1) { OPENSSL_PUT_ERROR(RSA, RSA_R_DATA_TOO_LARGE_FOR_KEY_SIZE); return 0; } if (emlen < 2 * mdlen + 1) { OPENSSL_PUT_ERROR(RSA, RSA_R_KEY_SIZE_TOO_SMALL); return 0; } to[0] = 0; uint8_t *seed = to + 1; uint8_t *db = to + mdlen + 1; if (!EVP_Digest(param, param_len, db, NULL, md, NULL)) { return 0; } OPENSSL_memset(db + mdlen, 0, emlen - from_len - 2 * mdlen - 1); db[emlen - from_len - mdlen - 1] = 0x01; OPENSSL_memcpy(db + emlen - from_len - mdlen, from, from_len); if (!RAND_bytes(seed, mdlen)) { return 0; } uint8_t *dbmask = OPENSSL_malloc(emlen - mdlen); if (dbmask == NULL) { OPENSSL_PUT_ERROR(RSA, ERR_R_MALLOC_FAILURE); return 0; } int ret = 0; if (!PKCS1_MGF1(dbmask, emlen - mdlen, seed, mdlen, mgf1md)) { goto out; } for (size_t i = 0; i < emlen - mdlen; i++) { db[i] ^= dbmask[i]; } uint8_t seedmask[EVP_MAX_MD_SIZE]; if (!PKCS1_MGF1(seedmask, mdlen, db, emlen - mdlen, mgf1md)) { goto out; } for (size_t i = 0; i < mdlen; i++) { seed[i] ^= seedmask[i]; } ret = 1; out: OPENSSL_free(dbmask); return ret; } int RSA_padding_check_PKCS1_OAEP_mgf1(uint8_t *out, size_t *out_len, size_t max_out, const uint8_t *from, size_t from_len, const uint8_t *param, size_t param_len, const EVP_MD *md, const EVP_MD *mgf1md) { uint8_t *db = NULL; if (md == NULL) { md = EVP_sha1(); } if (mgf1md == NULL) { mgf1md = md; } size_t mdlen = EVP_MD_size(md); // The encoded message is one byte smaller than the modulus to ensure that it // doesn't end up greater than the modulus. Thus there's an extra "+1" here // compared to https://tools.ietf.org/html/rfc2437#section-9.1.1.2. if (from_len < 1 + 2*mdlen + 1) { // 'from_len' is the length of the modulus, i.e. does not depend on the // particular ciphertext. goto decoding_err; } size_t dblen = from_len - mdlen - 1; db = OPENSSL_malloc(dblen); if (db == NULL) { OPENSSL_PUT_ERROR(RSA, ERR_R_MALLOC_FAILURE); goto err; } const uint8_t *maskedseed = from + 1; const uint8_t *maskeddb = from + 1 + mdlen; uint8_t seed[EVP_MAX_MD_SIZE]; if (!PKCS1_MGF1(seed, mdlen, maskeddb, dblen, mgf1md)) { goto err; } for (size_t i = 0; i < mdlen; i++) { seed[i] ^= maskedseed[i]; } if (!PKCS1_MGF1(db, dblen, seed, mdlen, mgf1md)) { goto err; } for (size_t i = 0; i < dblen; i++) { db[i] ^= maskeddb[i]; } uint8_t phash[EVP_MAX_MD_SIZE]; if (!EVP_Digest(param, param_len, phash, NULL, md, NULL)) { goto err; } crypto_word_t bad = ~constant_time_is_zero_w(CRYPTO_memcmp(db, phash, mdlen)); bad |= ~constant_time_is_zero_w(from[0]); crypto_word_t looking_for_one_byte = CONSTTIME_TRUE_W; size_t one_index = 0; for (size_t i = mdlen; i < dblen; i++) { crypto_word_t equals1 = constant_time_eq_w(db[i], 1); crypto_word_t equals0 = constant_time_eq_w(db[i], 0); one_index = constant_time_select_w(looking_for_one_byte & equals1, i, one_index); looking_for_one_byte = constant_time_select_w(equals1, 0, looking_for_one_byte); bad |= looking_for_one_byte & ~equals0; } bad |= looking_for_one_byte; if (bad) { goto decoding_err; } one_index++; size_t mlen = dblen - one_index; if (max_out < mlen) { OPENSSL_PUT_ERROR(RSA, RSA_R_DATA_TOO_LARGE); goto err; } OPENSSL_memcpy(out, db + one_index, mlen); *out_len = mlen; OPENSSL_free(db); return 1; decoding_err: // to avoid chosen ciphertext attacks, the error message should not reveal // which kind of decoding error happened OPENSSL_PUT_ERROR(RSA, RSA_R_OAEP_DECODING_ERROR); err: OPENSSL_free(db); return 0; } static const uint8_t kPSSZeroes[] = {0, 0, 0, 0, 0, 0, 0, 0}; int RSA_verify_PKCS1_PSS_mgf1(const RSA *rsa, const uint8_t *mHash, const EVP_MD *Hash, const EVP_MD *mgf1Hash, const uint8_t *EM, int sLen) { int i; int ret = 0; int maskedDBLen, MSBits, emLen; size_t hLen; const uint8_t *H; uint8_t *DB = NULL; EVP_MD_CTX ctx; uint8_t H_[EVP_MAX_MD_SIZE]; EVP_MD_CTX_init(&ctx); if (mgf1Hash == NULL) { mgf1Hash = Hash; } hLen = EVP_MD_size(Hash); // Negative sLen has special meanings: // -1 sLen == hLen // -2 salt length is autorecovered from signature // -N reserved if (sLen == -1) { sLen = hLen; } else if (sLen == -2) { sLen = -2; } else if (sLen < -2) { OPENSSL_PUT_ERROR(RSA, RSA_R_SLEN_CHECK_FAILED); goto err; } MSBits = (BN_num_bits(rsa->n) - 1) & 0x7; emLen = RSA_size(rsa); if (EM[0] & (0xFF << MSBits)) { OPENSSL_PUT_ERROR(RSA, RSA_R_FIRST_OCTET_INVALID); goto err; } if (MSBits == 0) { EM++; emLen--; } if (emLen < (int)hLen + 2 || emLen < ((int)hLen + sLen + 2)) { // sLen can be small negative OPENSSL_PUT_ERROR(RSA, RSA_R_DATA_TOO_LARGE); goto err; } if (EM[emLen - 1] != 0xbc) { OPENSSL_PUT_ERROR(RSA, RSA_R_LAST_OCTET_INVALID); goto err; } maskedDBLen = emLen - hLen - 1; H = EM + maskedDBLen; DB = OPENSSL_malloc(maskedDBLen); if (!DB) { OPENSSL_PUT_ERROR(RSA, ERR_R_MALLOC_FAILURE); goto err; } if (!PKCS1_MGF1(DB, maskedDBLen, H, hLen, mgf1Hash)) { goto err; } for (i = 0; i < maskedDBLen; i++) { DB[i] ^= EM[i]; } if (MSBits) { DB[0] &= 0xFF >> (8 - MSBits); } for (i = 0; DB[i] == 0 && i < (maskedDBLen - 1); i++) { ; } if (DB[i++] != 0x1) { OPENSSL_PUT_ERROR(RSA, RSA_R_SLEN_RECOVERY_FAILED); goto err; } if (sLen >= 0 && (maskedDBLen - i) != sLen) { OPENSSL_PUT_ERROR(RSA, RSA_R_SLEN_CHECK_FAILED); goto err; } if (!EVP_DigestInit_ex(&ctx, Hash, NULL) || !EVP_DigestUpdate(&ctx, kPSSZeroes, sizeof(kPSSZeroes)) || !EVP_DigestUpdate(&ctx, mHash, hLen) || !EVP_DigestUpdate(&ctx, DB + i, maskedDBLen - i) || !EVP_DigestFinal_ex(&ctx, H_, NULL)) { goto err; } if (OPENSSL_memcmp(H_, H, hLen)) { OPENSSL_PUT_ERROR(RSA, RSA_R_BAD_SIGNATURE); ret = 0; } else { ret = 1; } err: OPENSSL_free(DB); EVP_MD_CTX_cleanup(&ctx); return ret; } int RSA_padding_add_PKCS1_PSS_mgf1(const RSA *rsa, unsigned char *EM, const unsigned char *mHash, const EVP_MD *Hash, const EVP_MD *mgf1Hash, int sLenRequested) { int ret = 0; size_t maskedDBLen, MSBits, emLen; size_t hLen; unsigned char *H, *salt = NULL, *p; if (mgf1Hash == NULL) { mgf1Hash = Hash; } hLen = EVP_MD_size(Hash); if (BN_is_zero(rsa->n)) { OPENSSL_PUT_ERROR(RSA, RSA_R_EMPTY_PUBLIC_KEY); goto err; } MSBits = (BN_num_bits(rsa->n) - 1) & 0x7; emLen = RSA_size(rsa); if (MSBits == 0) { assert(emLen >= 1); *EM++ = 0; emLen--; } if (emLen < hLen + 2) { OPENSSL_PUT_ERROR(RSA, RSA_R_DATA_TOO_LARGE_FOR_KEY_SIZE); goto err; } // Negative sLenRequested has special meanings: // -1 sLen == hLen // -2 salt length is maximized // -N reserved size_t sLen; if (sLenRequested == -1) { sLen = hLen; } else if (sLenRequested == -2) { sLen = emLen - hLen - 2; } else if (sLenRequested < 0) { OPENSSL_PUT_ERROR(RSA, RSA_R_SLEN_CHECK_FAILED); goto err; } else { sLen = (size_t)sLenRequested; } if (emLen - hLen - 2 < sLen) { OPENSSL_PUT_ERROR(RSA, RSA_R_DATA_TOO_LARGE_FOR_KEY_SIZE); goto err; } if (sLen > 0) { salt = OPENSSL_malloc(sLen); if (!salt) { OPENSSL_PUT_ERROR(RSA, ERR_R_MALLOC_FAILURE); goto err; } if (!RAND_bytes(salt, sLen)) { goto err; } } maskedDBLen = emLen - hLen - 1; H = EM + maskedDBLen; EVP_MD_CTX ctx; EVP_MD_CTX_init(&ctx); int digest_ok = EVP_DigestInit_ex(&ctx, Hash, NULL) && EVP_DigestUpdate(&ctx, kPSSZeroes, sizeof(kPSSZeroes)) && EVP_DigestUpdate(&ctx, mHash, hLen) && EVP_DigestUpdate(&ctx, salt, sLen) && EVP_DigestFinal_ex(&ctx, H, NULL); EVP_MD_CTX_cleanup(&ctx); if (!digest_ok) { goto err; } // Generate dbMask in place then perform XOR on it if (!PKCS1_MGF1(EM, maskedDBLen, H, hLen, mgf1Hash)) { goto err; } p = EM; // Initial PS XORs with all zeroes which is a NOP so just update // pointer. Note from a test above this value is guaranteed to // be non-negative. p += emLen - sLen - hLen - 2; *p++ ^= 0x1; if (sLen > 0) { for (size_t i = 0; i < sLen; i++) { *p++ ^= salt[i]; } } if (MSBits) { EM[0] &= 0xFF >> (8 - MSBits); } // H is already in place so just set final 0xbc EM[emLen - 1] = 0xbc; ret = 1; err: OPENSSL_free(salt); return ret; }
gpl-2.0
oligau/touchwax
SDL2/src/audio/fusionsound/SDL_fsaudio.c
12
9246
/* Simple DirectMedia Layer Copyright (C) 1997-2013 Sam Lantinga <slouken@libsdl.org> This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. */ #include "SDL_config.h" #if SDL_AUDIO_DRIVER_FUSIONSOUND /* Allow access to a raw mixing buffer */ #ifdef HAVE_SIGNAL_H #include <signal.h> #endif #include <unistd.h> #include "SDL_timer.h" #include "SDL_audio.h" #include "../SDL_audiomem.h" #include "../SDL_audio_c.h" #include "SDL_fsaudio.h" #include <fusionsound/fusionsound_version.h> /* #define SDL_AUDIO_DRIVER_FUSIONSOUND_DYNAMIC "libfusionsound.so" */ #ifdef SDL_AUDIO_DRIVER_FUSIONSOUND_DYNAMIC #include "SDL_name.h" #include "SDL_loadso.h" #else #define SDL_NAME(X) X #endif #if (FUSIONSOUND_MAJOR_VERSION == 1) && (FUSIONSOUND_MINOR_VERSION < 1) typedef DFBResult DirectResult; #endif /* Buffers to use - more than 2 gives a lot of latency */ #define FUSION_BUFFERS (2) #ifdef SDL_AUDIO_DRIVER_FUSIONSOUND_DYNAMIC static const char *fs_library = SDL_AUDIO_DRIVER_FUSIONSOUND_DYNAMIC; static void *fs_handle = NULL; static DirectResult (*SDL_NAME(FusionSoundInit)) (int *argc, char *(*argv[])); static DirectResult (*SDL_NAME(FusionSoundCreate)) (IFusionSound ** ret_interface); #define SDL_FS_SYM(x) { #x, (void **) (char *) &SDL_NAME(x) } static struct { const char *name; void **func; } fs_functions[] = { /* *INDENT-OFF* */ SDL_FS_SYM(FusionSoundInit), SDL_FS_SYM(FusionSoundCreate), /* *INDENT-ON* */ }; #undef SDL_FS_SYM static void UnloadFusionSoundLibrary() { if (fs_handle != NULL) { SDL_UnloadObject(fs_handle); fs_handle = NULL; } } static int LoadFusionSoundLibrary(void) { int i, retval = -1; if (fs_handle == NULL) { fs_handle = SDL_LoadObject(fs_library); if (fs_handle != NULL) { retval = 0; for (i = 0; i < SDL_arraysize(fs_functions); ++i) { *fs_functions[i].func = SDL_LoadFunction(fs_handle, fs_functions[i].name); if (!*fs_functions[i].func) { retval = -1; UnloadFusionSoundLibrary(); break; } } } } return retval; } #else static void UnloadFusionSoundLibrary() { return; } static int LoadFusionSoundLibrary(void) { return 0; } #endif /* SDL_AUDIO_DRIVER_FUSIONSOUND_DYNAMIC */ /* This function waits until it is possible to write a full sound buffer */ static void SDL_FS_WaitDevice(_THIS) { this->hidden->stream->Wait(this->hidden->stream, this->hidden->mixsamples); } static void SDL_FS_PlayDevice(_THIS) { DirectResult ret; ret = this->hidden->stream->Write(this->hidden->stream, this->hidden->mixbuf, this->hidden->mixsamples); /* If we couldn't write, assume fatal error for now */ if (ret) { this->enabled = 0; } #ifdef DEBUG_AUDIO fprintf(stderr, "Wrote %d bytes of audio data\n", this->hidden->mixlen); #endif } static void SDL_FS_WaitDone(_THIS) { this->hidden->stream->Wait(this->hidden->stream, this->hidden->mixsamples * FUSION_BUFFERS); } static Uint8 * SDL_FS_GetDeviceBuf(_THIS) { return (this->hidden->mixbuf); } static void SDL_FS_CloseDevice(_THIS) { if (this->hidden != NULL) { SDL_FreeAudioMem(this->hidden->mixbuf); this->hidden->mixbuf = NULL; if (this->hidden->stream) { this->hidden->stream->Release(this->hidden->stream); this->hidden->stream = NULL; } if (this->hidden->fs) { this->hidden->fs->Release(this->hidden->fs); this->hidden->fs = NULL; } SDL_free(this->hidden); this->hidden = NULL; } } static int SDL_FS_OpenDevice(_THIS, const char *devname, int iscapture) { int bytes; SDL_AudioFormat test_format = 0, format = 0; FSSampleFormat fs_format; FSStreamDescription desc; DirectResult ret; /* Initialize all variables that we clean on shutdown */ this->hidden = (struct SDL_PrivateAudioData *) SDL_malloc((sizeof *this->hidden)); if (this->hidden == NULL) { return SDL_OutOfMemory(); } SDL_memset(this->hidden, 0, (sizeof *this->hidden)); /* Try for a closest match on audio format */ for (test_format = SDL_FirstAudioFormat(this->spec.format); !format && test_format;) { #ifdef DEBUG_AUDIO fprintf(stderr, "Trying format 0x%4.4x\n", test_format); #endif switch (test_format) { case AUDIO_U8: fs_format = FSSF_U8; bytes = 1; format = 1; break; case AUDIO_S16SYS: fs_format = FSSF_S16; bytes = 2; format = 1; break; case AUDIO_S32SYS: fs_format = FSSF_S32; bytes = 4; format = 1; break; case AUDIO_F32SYS: fs_format = FSSF_FLOAT; bytes = 4; format = 1; break; default: format = 0; break; } if (!format) { test_format = SDL_NextAudioFormat(); } } if (format == 0) { SDL_FS_CloseDevice(this); return SDL_SetError("Couldn't find any hardware audio formats"); } this->spec.format = test_format; /* Retrieve the main sound interface. */ ret = SDL_NAME(FusionSoundCreate) (&this->hidden->fs); if (ret) { SDL_FS_CloseDevice(this); return SDL_SetError("Unable to initialize FusionSound: %d", ret); } this->hidden->mixsamples = this->spec.size / bytes / this->spec.channels; /* Fill stream description. */ desc.flags = FSSDF_SAMPLERATE | FSSDF_BUFFERSIZE | FSSDF_CHANNELS | FSSDF_SAMPLEFORMAT | FSSDF_PREBUFFER; desc.samplerate = this->spec.freq; desc.buffersize = this->spec.size * FUSION_BUFFERS; desc.channels = this->spec.channels; desc.prebuffer = 10; desc.sampleformat = fs_format; ret = this->hidden->fs->CreateStream(this->hidden->fs, &desc, &this->hidden->stream); if (ret) { SDL_FS_CloseDevice(this); return SDL_SetError("Unable to create FusionSoundStream: %d", ret); } /* See what we got */ desc.flags = FSSDF_SAMPLERATE | FSSDF_BUFFERSIZE | FSSDF_CHANNELS | FSSDF_SAMPLEFORMAT; ret = this->hidden->stream->GetDescription(this->hidden->stream, &desc); this->spec.freq = desc.samplerate; this->spec.size = desc.buffersize / FUSION_BUFFERS * bytes * desc.channels; this->spec.channels = desc.channels; /* Calculate the final parameters for this audio specification */ SDL_CalculateAudioSpec(&this->spec); /* Allocate mixing buffer */ this->hidden->mixlen = this->spec.size; this->hidden->mixbuf = (Uint8 *) SDL_AllocAudioMem(this->hidden->mixlen); if (this->hidden->mixbuf == NULL) { SDL_FS_CloseDevice(this); return SDL_OutOfMemory(); } SDL_memset(this->hidden->mixbuf, this->spec.silence, this->spec.size); /* We're ready to rock and roll. :-) */ return 0; } static void SDL_FS_Deinitialize(void) { UnloadFusionSoundLibrary(); } static int SDL_FS_Init(SDL_AudioDriverImpl * impl) { if (LoadFusionSoundLibrary() < 0) { return 0; } else { DirectResult ret; ret = SDL_NAME(FusionSoundInit) (NULL, NULL); if (ret) { UnloadFusionSoundLibrary(); SDL_SetError ("FusionSound: SDL_FS_init failed (FusionSoundInit: %d)", ret); return 0; } } /* Set the function pointers */ impl->OpenDevice = SDL_FS_OpenDevice; impl->PlayDevice = SDL_FS_PlayDevice; impl->WaitDevice = SDL_FS_WaitDevice; impl->GetDeviceBuf = SDL_FS_GetDeviceBuf; impl->CloseDevice = SDL_FS_CloseDevice; impl->WaitDone = SDL_FS_WaitDone; impl->Deinitialize = SDL_FS_Deinitialize; impl->OnlyHasDefaultOutputDevice = 1; return 1; /* this audio target is available. */ } AudioBootStrap FUSIONSOUND_bootstrap = { "fusionsound", "FusionSound", SDL_FS_Init, 0 }; #endif /* SDL_AUDIO_DRIVER_FUSIONSOUND */ /* vi: set ts=4 sw=4 expandtab: */
gpl-2.0
da4089/wireshark
epan/dissectors/asn1/pkcs12/packet-pkcs12-template.c
12
14192
/* packet-pkcs12.c * Routines for PKCS#12: Personal Information Exchange packet dissection * Graeme Lunt 2006 * * See "PKCS #12 v1.1: Personal Information Exchange Syntax": * * http://www.emc.com/emc-plus/rsa-labs/pkcs/files/h11301-wp-pkcs-12v1-1-personal-information-exchange-syntax.pdf * * Wireshark - Network traffic analyzer * By Gerald Combs <gerald@wireshark.org> * Copyright 1998 Gerald Combs * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "config.h" #include <epan/packet.h> #include <epan/expert.h> #include <epan/oids.h> #include <epan/asn1.h> #include <epan/prefs.h> #include "packet-ber.h" #include "packet-pkcs12.h" #include "packet-x509af.h" #include "packet-x509if.h" #include "packet-cms.h" #include <wsutil/wsgcrypt.h> #define PNAME "PKCS#12: Personal Information Exchange" #define PSNAME "PKCS12" #define PFNAME "pkcs12" #define PKCS12_PBE_ARCFOUR_SHA1_OID "1.2.840.113549.1.12.1.1" #define PKCS12_PBE_3DES_SHA1_OID "1.2.840.113549.1.12.1.3" #define PKCS12_PBE_RC2_40_SHA1_OID "1.2.840.113549.1.12.1.6" void proto_register_pkcs12(void); void proto_reg_handoff_pkcs12(void); /* Initialize the protocol and registered fields */ static int proto_pkcs12 = -1; static int hf_pkcs12_X509Certificate_PDU = -1; static int hf_pkcs12_AuthenticatedSafe_PDU = -1; /* AuthenticatedSafe */ static gint ett_decrypted_pbe = -1; static expert_field ei_pkcs12_octet_string_expected = EI_INIT; static const char *object_identifier_id = NULL; static int iteration_count = 0; static tvbuff_t *salt = NULL; static const char *password = NULL; static gboolean try_null_password = FALSE; static int dissect_AuthenticatedSafe_OCTETSTRING_PDU(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void* data); static int dissect_SafeContents_OCTETSTRING_PDU(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void* data); static int dissect_PrivateKeyInfo_PDU(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void *data); #include "packet-pkcs12-hf.c" /* Initialize the subtree pointers */ #include "packet-pkcs12-ett.c" static void append_oid(proto_tree *tree, const char *oid) { const char *name = NULL; name = oid_resolved_from_string(wmem_packet_scope(), oid); proto_item_append_text(tree, " (%s)", name ? name : oid); } #ifdef HAVE_LIBGCRYPT static int generate_key_or_iv(unsigned int id, tvbuff_t *salt_tvb, unsigned int iter, const char *pw, unsigned int req_keylen, char * keybuf) { int rc; unsigned int i, j; gcry_md_hd_t md; gcry_mpi_t num_b1 = NULL; size_t pwlen; char hash[20], buf_b[64], buf_i[128], *p; char *salt_p; int salt_size; size_t cur_keylen; size_t n; gcry_error_t err; cur_keylen = 0; salt_size = tvb_captured_length(salt_tvb); salt_p = (char *)tvb_memdup(wmem_packet_scope(), salt_tvb, 0, salt_size); if (pw == NULL) pwlen = 0; else pwlen = strlen(pw); if (pwlen > 63 / 2) { return FALSE; } /* Store salt and password in BUF_I */ p = buf_i; for (i = 0; i < 64; i++) *p++ = salt_p[i % salt_size]; if (pw) { for (i = j = 0; i < 64; i += 2) { *p++ = 0; *p++ = pw[j]; if (++j > pwlen) /* Note, that we include the trailing zero */ j = 0; } } else memset (p, 0, 64); for (;;) { err = gcry_md_open(&md, GCRY_MD_SHA1, 0); if (gcry_err_code(err)) { return FALSE; } for (i = 0; i < 64; i++) { unsigned char lid = id & 0xFF; gcry_md_write (md, &lid, 1); } gcry_md_write(md, buf_i, pw ? 128 : 64); gcry_md_final (md); memcpy (hash, gcry_md_read (md, 0), 20); gcry_md_close (md); for (i = 1; i < iter; i++) gcry_md_hash_buffer (GCRY_MD_SHA1, hash, hash, 20); for (i = 0; i < 20 && cur_keylen < req_keylen; i++) keybuf[cur_keylen++] = hash[i]; if (cur_keylen == req_keylen) { gcry_mpi_release (num_b1); return TRUE; /* ready */ } /* need more bytes. */ for (i = 0; i < 64; i++) buf_b[i] = hash[i % 20]; n = 64; rc = gcry_mpi_scan (&num_b1, GCRYMPI_FMT_USG, buf_b, n, &n); if (rc != 0) { return FALSE; } gcry_mpi_add_ui (num_b1, num_b1, 1); for (i = 0; i < 128; i += 64) { gcry_mpi_t num_ij; n = 64; rc = gcry_mpi_scan (&num_ij, GCRYMPI_FMT_USG, buf_i + i, n, &n); if (rc != 0) { return FALSE; } gcry_mpi_add (num_ij, num_ij, num_b1); gcry_mpi_clear_highbit (num_ij, 64 * 8); n = 64; rc = gcry_mpi_print (GCRYMPI_FMT_USG, buf_i + i, n, &n, num_ij); if (rc != 0) { return FALSE; } gcry_mpi_release (num_ij); } } } #endif void PBE_reset_parameters(void) { iteration_count = 0; salt = NULL; } int PBE_decrypt_data(const char *object_identifier_id_param _U_, tvbuff_t *encrypted_tvb _U_, asn1_ctx_t *actx _U_, proto_item *item _U_) { #ifdef HAVE_LIBGCRYPT const char *encryption_algorithm; gcry_cipher_hd_t cipher; gcry_error_t err; int algo; int mode; int ivlen = 0; int keylen = 0; int datalen = 0; char *key = NULL; char *iv = NULL; char *clear_data = NULL; tvbuff_t *clear_tvb = NULL; const gchar *oidname; GString *name; proto_tree *tree; char byte; gboolean decrypt_ok = TRUE; if(((password == NULL) || (*password == '\0')) && (try_null_password == FALSE)) { /* we are not configured to decrypt */ return FALSE; } encryption_algorithm = x509af_get_last_algorithm_id(); /* these are the only encryption schemes we understand for now */ if(!strcmp(encryption_algorithm, PKCS12_PBE_3DES_SHA1_OID)) { ivlen = 8; keylen = 24; algo = GCRY_CIPHER_3DES; mode = GCRY_CIPHER_MODE_CBC; } else if(!strcmp(encryption_algorithm, PKCS12_PBE_ARCFOUR_SHA1_OID)) { ivlen = 0; keylen = 16; algo = GCRY_CIPHER_ARCFOUR; mode = GCRY_CIPHER_MODE_NONE; } else if(!strcmp(encryption_algorithm, PKCS12_PBE_RC2_40_SHA1_OID)) { ivlen = 8; keylen = 5; algo = GCRY_CIPHER_RFC2268_40; mode = GCRY_CIPHER_MODE_CBC; } else { /* we don't know how to decrypt this */ proto_item_append_text(item, " [Unsupported encryption algorithm]"); return FALSE; } if((iteration_count == 0) || (salt == NULL)) { proto_item_append_text(item, " [Insufficient parameters]"); return FALSE; } /* allocate buffers */ key = (char *)wmem_alloc(wmem_packet_scope(), keylen); if(!generate_key_or_iv(1 /*LEY */, salt, iteration_count, password, keylen, key)) return FALSE; if(ivlen) { iv = (char *)wmem_alloc(wmem_packet_scope(), ivlen); if(!generate_key_or_iv(2 /* IV */, salt, iteration_count, password, ivlen, iv)) return FALSE; } /* now try an internal function */ err = gcry_cipher_open(&cipher, algo, mode, 0); if (gcry_err_code (err)) return FALSE; err = gcry_cipher_setkey (cipher, key, keylen); if (gcry_err_code (err)) { gcry_cipher_close (cipher); return FALSE; } if(ivlen) { err = gcry_cipher_setiv (cipher, iv, ivlen); if (gcry_err_code (err)) { gcry_cipher_close (cipher); return FALSE; } } datalen = tvb_captured_length(encrypted_tvb); clear_data = (char *)g_malloc(datalen); err = gcry_cipher_decrypt (cipher, clear_data, datalen, (char *)tvb_memdup(wmem_packet_scope(), encrypted_tvb, 0, datalen), datalen); if (gcry_err_code (err)) { proto_item_append_text(item, " [Failed to decrypt with password preference]"); gcry_cipher_close (cipher); g_free(clear_data); return FALSE; } gcry_cipher_close (cipher); /* We don't know if we have successfully decrypted the data or not so we: a) check the trailing bytes b) see if we start with a sequence or a set (is this too constraining? */ /* first the trailing bytes */ byte = clear_data[datalen-1]; if(byte <= 0x08) { int i; for(i = (int)byte; i > 0 ; i--) { if(clear_data[datalen - i] != byte) { decrypt_ok = FALSE; break; } } } else { /* XXX: is this a failure? */ } /* we assume the result is ASN.1 - check it is a SET or SEQUENCE */ byte = clear_data[0]; if((byte != 0x30) && (byte != 0x31)) { /* do we need more here? OCTET STRING? */ decrypt_ok = FALSE; } if(!decrypt_ok) { g_free(clear_data); proto_item_append_text(item, " [Failed to decrypt with supplied password]"); return FALSE; } proto_item_append_text(item, " [Decrypted successfully]"); tree = proto_item_add_subtree(item, ett_decrypted_pbe); /* OK - so now clear_data contains the decrypted data */ clear_tvb = tvb_new_child_real_data(encrypted_tvb,(const guint8 *)clear_data, datalen, datalen); tvb_set_free_cb(clear_tvb, g_free); name = g_string_new(""); oidname = oid_resolved_from_string(wmem_packet_scope(), object_identifier_id_param); g_string_printf(name, "Decrypted %s", oidname ? oidname : object_identifier_id_param); /* add it as a new source */ add_new_data_source(actx->pinfo, clear_tvb, name->str); g_string_free(name, TRUE); /* now try and decode it */ call_ber_oid_callback(object_identifier_id_param, clear_tvb, 0, actx->pinfo, tree, NULL); return TRUE; #else /* we cannot decrypt */ return FALSE; #endif } #include "packet-pkcs12-fn.c" static int strip_octet_string(tvbuff_t *tvb) { gint8 ber_class; gboolean pc, ind; gint32 tag; guint32 len; int offset = 0; /* PKCS#7 encodes the content as OCTET STRING, whereas CMS is just any ANY */ /* if we use CMS (rather than PKCS#7) - which we are - we need to strip the OCTET STRING tag */ /* before proceeding */ offset = get_ber_identifier(tvb, 0, &ber_class, &pc, &tag); offset = get_ber_length(tvb, offset, &len, &ind); if((ber_class == BER_CLASS_UNI) && (tag == BER_UNI_TAG_OCTETSTRING)) return offset; return 0; } static int dissect_AuthenticatedSafe_OCTETSTRING_PDU(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void* data _U_) { int offset = 0; asn1_ctx_t asn1_ctx; asn1_ctx_init(&asn1_ctx, ASN1_ENC_BER, TRUE, pinfo); if((offset = strip_octet_string(tvb)) > 0) dissect_pkcs12_AuthenticatedSafe(FALSE, tvb, offset, &asn1_ctx, tree, hf_pkcs12_AuthenticatedSafe_PDU); else proto_tree_add_expert(tree, pinfo, &ei_pkcs12_octet_string_expected, tvb, 0, 1); return tvb_captured_length(tvb); } static int dissect_SafeContents_OCTETSTRING_PDU(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void* data _U_) { int offset = 0; asn1_ctx_t asn1_ctx; asn1_ctx_init(&asn1_ctx, ASN1_ENC_BER, TRUE, pinfo); offset = strip_octet_string(tvb); dissect_pkcs12_SafeContents(FALSE, tvb, offset, &asn1_ctx, tree, hf_pkcs12_SafeContents_PDU); return tvb_captured_length(tvb); } static int dissect_X509Certificate_OCTETSTRING_PDU(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void* data _U_) { int offset = 0; asn1_ctx_t asn1_ctx; asn1_ctx_init(&asn1_ctx, ASN1_ENC_BER, TRUE, pinfo); if((offset = strip_octet_string(tvb)) > 0) dissect_x509af_Certificate(FALSE, tvb, offset, &asn1_ctx, tree, hf_pkcs12_X509Certificate_PDU); else proto_tree_add_expert(tree, pinfo, &ei_pkcs12_octet_string_expected, tvb, 0, 1); return tvb_captured_length(tvb); } /*--- proto_register_pkcs12 ----------------------------------------------*/ void proto_register_pkcs12(void) { /* List of fields */ static hf_register_info hf[] = { { &hf_pkcs12_X509Certificate_PDU, { "X509Certificate", "pkcs12.X509Certificate", FT_NONE, BASE_NONE, NULL, 0, "pkcs12.X509Certificate", HFILL }}, { &hf_pkcs12_AuthenticatedSafe_PDU, { "AuthenticatedSafe", "pkcs12.AuthenticatedSafe", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, #include "packet-pkcs12-hfarr.c" }; /* List of subtrees */ static gint *ett[] = { &ett_decrypted_pbe, #include "packet-pkcs12-ettarr.c" }; static ei_register_info ei[] = { { &ei_pkcs12_octet_string_expected, { "pkcs12.octet_string_expected", PI_PROTOCOL, PI_WARN, "BER Error: OCTET STRING expected", EXPFILL }}, }; module_t *pkcs12_module; expert_module_t* expert_pkcs12; /* Register protocol */ proto_pkcs12 = proto_register_protocol(PNAME, PSNAME, PFNAME); /* Register fields and subtrees */ proto_register_field_array(proto_pkcs12, hf, array_length(hf)); proto_register_subtree_array(ett, array_length(ett)); expert_pkcs12 = expert_register_protocol(proto_pkcs12); expert_register_field_array(expert_pkcs12, ei, array_length(ei)); /* Register preferences */ pkcs12_module = prefs_register_protocol(proto_pkcs12, NULL); prefs_register_string_preference(pkcs12_module, "password", "Password to decrypt the file with", "The password to used to decrypt the encrypted elements within" " the PKCS#12 file", &password); prefs_register_bool_preference(pkcs12_module, "try_null_password", "Try to decrypt with a empty password", "Whether to try and decrypt the encrypted data within the" " PKCS#12 with a NULL password", &try_null_password); register_ber_syntax_dissector("PKCS#12", proto_pkcs12, dissect_PFX_PDU); register_ber_oid_syntax(".p12", NULL, "PKCS#12"); register_ber_oid_syntax(".pfx", NULL, "PKCS#12"); } /*--- proto_reg_handoff_pkcs12 -------------------------------------------*/ void proto_reg_handoff_pkcs12(void) { #include "packet-pkcs12-dis-tab.c" register_ber_oid_dissector("1.2.840.113549.1.9.22.1", dissect_X509Certificate_OCTETSTRING_PDU, proto_pkcs12, "x509Certificate"); }
gpl-2.0
touchpro/android_kernel_lge_voltuno
drivers/mfd/wcd9xxx-core.c
12
51377
/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/of_gpio.h> #include <linux/of_irq.h> #include <linux/slab.h> #include <linux/ratelimit.h> #include <linux/mfd/core.h> #include <linux/mfd/wcd9xxx/wcd9xxx-slimslave.h> #include <linux/mfd/wcd9xxx/core.h> #include <linux/mfd/wcd9xxx/core-resource.h> #include <linux/mfd/wcd9xxx/pdata.h> #include <linux/mfd/wcd9xxx/wcd9xxx_registers.h> #include <linux/delay.h> #include <linux/gpio.h> #include <linux/debugfs.h> #include <linux/regulator/consumer.h> #include <linux/i2c.h> #include <sound/soc.h> #define WCD9XXX_REGISTER_START_OFFSET 0x800 #define WCD9XXX_SLIM_RW_MAX_TRIES 3 #define SLIMBUS_PRESENT_TIMEOUT 100 #define MAX_WCD9XXX_DEVICE 4 #define CODEC_DT_MAX_PROP_SIZE 40 #define WCD9XXX_I2C_GSBI_SLAVE_ID "3-000d" #define WCD9XXX_I2C_TOP_SLAVE_ADDR 0x0d #define WCD9XXX_ANALOG_I2C_SLAVE_ADDR 0x77 #define WCD9XXX_DIGITAL1_I2C_SLAVE_ADDR 0x66 #define WCD9XXX_DIGITAL2_I2C_SLAVE_ADDR 0x55 #define WCD9XXX_I2C_TOP_LEVEL 0 #define WCD9XXX_I2C_ANALOG 1 #define WCD9XXX_I2C_DIGITAL_1 2 #define WCD9XXX_I2C_DIGITAL_2 3 #define ONDEMAND_REGULATOR true #define STATIC_REGULATOR (!ONDEMAND_REGULATOR) /* Number of return values needs to be checked for each * registration of Slimbus of I2C bus for each codec */ #define NUM_WCD9XXX_REG_RET 8 struct wcd9xxx_i2c { struct i2c_client *client; struct i2c_msg xfer_msg[2]; struct mutex xfer_lock; int mod_id; }; static int wcd9xxx_dt_parse_vreg_info(struct device *dev, struct wcd9xxx_regulator *vreg, const char *vreg_name, bool ondemand); static int wcd9xxx_dt_parse_micbias_info(struct device *dev, struct wcd9xxx_micbias_setting *micbias); static struct wcd9xxx_pdata *wcd9xxx_populate_dt_pdata(struct device *dev); struct wcd9xxx_i2c wcd9xxx_modules[MAX_WCD9XXX_DEVICE]; static int wcd9xxx_read(struct wcd9xxx *wcd9xxx, unsigned short reg, int bytes, void *dest, bool interface_reg) { int i, ret; if (bytes <= 0) { dev_err(wcd9xxx->dev, "Invalid byte read length %d\n", bytes); return -EINVAL; } ret = wcd9xxx->read_dev(wcd9xxx, reg, bytes, dest, interface_reg); if (ret < 0) { dev_err(wcd9xxx->dev, "Codec read failed\n"); return ret; } else { for (i = 0; i < bytes; i++) dev_dbg(wcd9xxx->dev, "Read 0x%02x from 0x%x\n", ((u8 *)dest)[i], reg + i); } return 0; } static int __wcd9xxx_reg_read( struct wcd9xxx *wcd9xxx, unsigned short reg) { u8 val; int ret; mutex_lock(&wcd9xxx->io_lock); ret = wcd9xxx_read(wcd9xxx, reg, 1, &val, false); mutex_unlock(&wcd9xxx->io_lock); if (ret < 0) return ret; else return val; } int wcd9xxx_reg_read( struct wcd9xxx_core_resource *core_res, unsigned short reg) { struct wcd9xxx *wcd9xxx = (struct wcd9xxx *) core_res->parent; return __wcd9xxx_reg_read(wcd9xxx, reg); } EXPORT_SYMBOL(wcd9xxx_reg_read); #ifdef CONFIG_SOUND_CONTROL_HAX_3_GPL int wcd9xxx_reg_read_safe(struct wcd9xxx *wcd9xxx, unsigned short reg) { u8 val; int ret; ret = wcd9xxx_read(wcd9xxx, reg, 1, &val, false); if (ret < 0) return ret; else return val; } EXPORT_SYMBOL_GPL(wcd9xxx_reg_read_safe); #endif static int wcd9xxx_write(struct wcd9xxx *wcd9xxx, unsigned short reg, int bytes, void *src, bool interface_reg) { int i; if (bytes <= 0) { pr_err("%s: Error, invalid write length\n", __func__); return -EINVAL; } for (i = 0; i < bytes; i++) dev_dbg(wcd9xxx->dev, "Write %02x to 0x%x\n", ((u8 *)src)[i], reg + i); return wcd9xxx->write_dev(wcd9xxx, reg, bytes, src, interface_reg); } static int __wcd9xxx_reg_write( struct wcd9xxx *wcd9xxx, unsigned short reg, u8 val) { int ret; mutex_lock(&wcd9xxx->io_lock); ret = wcd9xxx_write(wcd9xxx, reg, 1, &val, false); mutex_unlock(&wcd9xxx->io_lock); return ret; } int wcd9xxx_reg_write( struct wcd9xxx_core_resource *core_res, unsigned short reg, u8 val) { struct wcd9xxx *wcd9xxx = (struct wcd9xxx *) core_res->parent; return __wcd9xxx_reg_write(wcd9xxx, reg, val); } EXPORT_SYMBOL(wcd9xxx_reg_write); static u8 wcd9xxx_pgd_la; static u8 wcd9xxx_inf_la; int wcd9xxx_interface_reg_read(struct wcd9xxx *wcd9xxx, unsigned short reg) { u8 val; int ret; mutex_lock(&wcd9xxx->io_lock); ret = wcd9xxx_read(wcd9xxx, reg, 1, &val, true); mutex_unlock(&wcd9xxx->io_lock); if (ret < 0) return ret; else return val; } EXPORT_SYMBOL(wcd9xxx_interface_reg_read); int wcd9xxx_interface_reg_write(struct wcd9xxx *wcd9xxx, unsigned short reg, u8 val) { int ret; mutex_lock(&wcd9xxx->io_lock); ret = wcd9xxx_write(wcd9xxx, reg, 1, &val, true); mutex_unlock(&wcd9xxx->io_lock); return ret; } EXPORT_SYMBOL(wcd9xxx_interface_reg_write); static int __wcd9xxx_bulk_read( struct wcd9xxx *wcd9xxx, unsigned short reg, int count, u8 *buf) { int ret; mutex_lock(&wcd9xxx->io_lock); ret = wcd9xxx_read(wcd9xxx, reg, count, buf, false); mutex_unlock(&wcd9xxx->io_lock); return ret; } int wcd9xxx_bulk_read( struct wcd9xxx_core_resource *core_res, unsigned short reg, int count, u8 *buf) { struct wcd9xxx *wcd9xxx = (struct wcd9xxx *) core_res->parent; return __wcd9xxx_bulk_read(wcd9xxx, reg, count, buf); } EXPORT_SYMBOL(wcd9xxx_bulk_read); static int __wcd9xxx_bulk_write(struct wcd9xxx *wcd9xxx, unsigned short reg, int count, u8 *buf) { int ret; mutex_lock(&wcd9xxx->io_lock); ret = wcd9xxx_write(wcd9xxx, reg, count, buf, false); mutex_unlock(&wcd9xxx->io_lock); return ret; } int wcd9xxx_bulk_write( struct wcd9xxx_core_resource *core_res, unsigned short reg, int count, u8 *buf) { struct wcd9xxx *wcd9xxx = (struct wcd9xxx *) core_res->parent; return __wcd9xxx_bulk_write(wcd9xxx, reg, count, buf); } EXPORT_SYMBOL(wcd9xxx_bulk_write); static int wcd9xxx_slim_read_device(struct wcd9xxx *wcd9xxx, unsigned short reg, int bytes, void *dest, bool interface) { int ret; struct slim_ele_access msg; int slim_read_tries = WCD9XXX_SLIM_RW_MAX_TRIES; msg.start_offset = WCD9XXX_REGISTER_START_OFFSET + reg; msg.num_bytes = bytes; msg.comp = NULL; while (1) { mutex_lock(&wcd9xxx->xfer_lock); ret = slim_request_val_element(interface ? wcd9xxx->slim_slave : wcd9xxx->slim, &msg, dest, bytes); mutex_unlock(&wcd9xxx->xfer_lock); if (likely(ret == 0) || (--slim_read_tries == 0)) break; usleep_range(5000, 5000); } if (ret) pr_err("%s: Error, Codec read failed (%d)\n", __func__, ret); return ret; } /* Interface specifies whether the write is to the interface or general * registers. */ static int wcd9xxx_slim_write_device(struct wcd9xxx *wcd9xxx, unsigned short reg, int bytes, void *src, bool interface) { int ret; struct slim_ele_access msg; int slim_write_tries = WCD9XXX_SLIM_RW_MAX_TRIES; msg.start_offset = WCD9XXX_REGISTER_START_OFFSET + reg; msg.num_bytes = bytes; msg.comp = NULL; while (1) { mutex_lock(&wcd9xxx->xfer_lock); ret = slim_change_val_element(interface ? wcd9xxx->slim_slave : wcd9xxx->slim, &msg, src, bytes); mutex_unlock(&wcd9xxx->xfer_lock); if (likely(ret == 0) || (--slim_write_tries == 0)) break; usleep_range(5000, 5000); } if (ret) pr_err("%s: Error, Codec write failed (%d)\n", __func__, ret); return ret; } static struct mfd_cell tabla1x_devs[] = { { .name = "tabla1x_codec", }, }; static struct mfd_cell tabla_devs[] = { { .name = "tabla_codec", }, }; static struct mfd_cell sitar_devs[] = { { .name = "sitar_codec", }, }; static struct mfd_cell taiko_devs[] = { { .name = "taiko_codec", }, }; static struct mfd_cell tapan_devs[] = { { .name = "tapan_codec", }, }; static const struct wcd9xxx_codec_type wcd9xxx_codecs[] = { { TABLA_MAJOR, cpu_to_le16(0x1), tabla1x_devs, ARRAY_SIZE(tabla1x_devs), TABLA_NUM_IRQS, -1, WCD9XXX_SLIM_SLAVE_ADDR_TYPE_TABLA, 0x03, }, { TABLA_MAJOR, cpu_to_le16(0x2), tabla_devs, ARRAY_SIZE(tabla_devs), TABLA_NUM_IRQS, -1, WCD9XXX_SLIM_SLAVE_ADDR_TYPE_TABLA, 0x03 }, { /* Siter version 1 has same major chip id with Tabla */ TABLA_MAJOR, cpu_to_le16(0x0), sitar_devs, ARRAY_SIZE(sitar_devs), SITAR_NUM_IRQS, -1, WCD9XXX_SLIM_SLAVE_ADDR_TYPE_TABLA, 0x01 }, { SITAR_MAJOR, cpu_to_le16(0x1), sitar_devs, ARRAY_SIZE(sitar_devs), SITAR_NUM_IRQS, -1, WCD9XXX_SLIM_SLAVE_ADDR_TYPE_TABLA, 0x01 }, { SITAR_MAJOR, cpu_to_le16(0x2), sitar_devs, ARRAY_SIZE(sitar_devs), SITAR_NUM_IRQS, -1, WCD9XXX_SLIM_SLAVE_ADDR_TYPE_TABLA, 0x01 }, { TAIKO_MAJOR, cpu_to_le16(0x0), taiko_devs, ARRAY_SIZE(taiko_devs), TAIKO_NUM_IRQS, 1, WCD9XXX_SLIM_SLAVE_ADDR_TYPE_TAIKO, 0x01 }, { TAIKO_MAJOR, cpu_to_le16(0x1), taiko_devs, ARRAY_SIZE(taiko_devs), TAIKO_NUM_IRQS, 2, WCD9XXX_SLIM_SLAVE_ADDR_TYPE_TAIKO, 0x01 }, { TAPAN_MAJOR, cpu_to_le16(0x0), tapan_devs, ARRAY_SIZE(tapan_devs), TAPAN_NUM_IRQS, -1, WCD9XXX_SLIM_SLAVE_ADDR_TYPE_TAIKO, 0x03 }, { TAPAN_MAJOR, cpu_to_le16(0x1), tapan_devs, ARRAY_SIZE(tapan_devs), TAPAN_NUM_IRQS, -1, WCD9XXX_SLIM_SLAVE_ADDR_TYPE_TAIKO, 0x03 }, }; static void wcd9xxx_bring_up(struct wcd9xxx *wcd9xxx) { __wcd9xxx_reg_write(wcd9xxx, WCD9XXX_A_LEAKAGE_CTL, 0x4); __wcd9xxx_reg_write(wcd9xxx, WCD9XXX_A_CDC_CTL, 0); usleep_range(5000, 5000); __wcd9xxx_reg_write(wcd9xxx, WCD9XXX_A_CDC_CTL, 3); __wcd9xxx_reg_write(wcd9xxx, WCD9XXX_A_LEAKAGE_CTL, 3); } static void wcd9xxx_bring_down(struct wcd9xxx *wcd9xxx) { __wcd9xxx_reg_write(wcd9xxx, WCD9XXX_A_LEAKAGE_CTL, 0x7); __wcd9xxx_reg_write(wcd9xxx, WCD9XXX_A_LEAKAGE_CTL, 0x6); __wcd9xxx_reg_write(wcd9xxx, WCD9XXX_A_LEAKAGE_CTL, 0xe); __wcd9xxx_reg_write(wcd9xxx, WCD9XXX_A_LEAKAGE_CTL, 0x8); } static int wcd9xxx_reset(struct wcd9xxx *wcd9xxx) { int ret; if (wcd9xxx->reset_gpio && wcd9xxx->slim_device_bootup) { ret = gpio_request(wcd9xxx->reset_gpio, "CDC_RESET"); if (ret) { pr_err("%s: Failed to request gpio %d\n", __func__, wcd9xxx->reset_gpio); wcd9xxx->reset_gpio = 0; return ret; } } if (wcd9xxx->reset_gpio) { gpio_direction_output(wcd9xxx->reset_gpio, 0); msleep(20); gpio_direction_output(wcd9xxx->reset_gpio, 1); msleep(20); } return 0; } static void wcd9xxx_free_reset(struct wcd9xxx *wcd9xxx) { if (wcd9xxx->reset_gpio) { gpio_free(wcd9xxx->reset_gpio); wcd9xxx->reset_gpio = 0; } } static const struct wcd9xxx_codec_type *wcd9xxx_check_codec_type(struct wcd9xxx *wcd9xxx, u8 *version) { int i, rc; const struct wcd9xxx_codec_type *c, *d = NULL; rc = __wcd9xxx_bulk_read(wcd9xxx, WCD9XXX_A_CHIP_ID_BYTE_0, sizeof(wcd9xxx->id_minor), (u8 *)&wcd9xxx->id_minor); if (rc < 0) goto exit; rc = __wcd9xxx_bulk_read(wcd9xxx, WCD9XXX_A_CHIP_ID_BYTE_2, sizeof(wcd9xxx->id_major), (u8 *)&wcd9xxx->id_major); if (rc < 0) goto exit; dev_dbg(wcd9xxx->dev, "%s: wcd9xxx chip id major 0x%x, minor 0x%x\n", __func__, wcd9xxx->id_major, wcd9xxx->id_minor); for (i = 0, c = &wcd9xxx_codecs[0]; i < ARRAY_SIZE(wcd9xxx_codecs); i++, c++) { if (c->id_major == wcd9xxx->id_major) { if (c->id_minor == wcd9xxx->id_minor) { d = c; dev_dbg(wcd9xxx->dev, "%s: exact match %s\n", __func__, d->dev->name); break; } else if (!d) { d = c; } else { if ((d->id_minor < c->id_minor) || (d->id_minor == c->id_minor && d->version < c->version)) d = c; } dev_dbg(wcd9xxx->dev, "%s: best match %s, major 0x%x, minor 0x%x\n", __func__, d->dev->name, d->id_major, d->id_minor); } } if (!d) { dev_warn(wcd9xxx->dev, "%s: driver for id major 0x%x, minor 0x%x not found\n", __func__, wcd9xxx->id_major, wcd9xxx->id_minor); } else { if (d->version > -1) { *version = d->version; } else { rc = __wcd9xxx_reg_read(wcd9xxx, WCD9XXX_A_CHIP_VERSION); if (rc < 0) { d = NULL; goto exit; } *version = (u8)rc & 0x1F; } dev_info(wcd9xxx->dev, "%s: detected %s, major 0x%x, minor 0x%x, ver 0x%x\n", __func__, d->dev->name, d->id_major, d->id_minor, *version); } exit: return d; } static int wcd9xxx_num_irq_regs(const struct wcd9xxx *wcd9xxx) { return (wcd9xxx->codec_type->num_irqs / 8) + ((wcd9xxx->codec_type->num_irqs % 8) ? 1 : 0); } /* * Interrupt table for v1 corresponds to newer version * codecs (wcd9304 and wcd9310) */ static const struct intr_data intr_tbl_v1[] = { {WCD9XXX_IRQ_SLIMBUS, false}, {WCD9XXX_IRQ_MBHC_INSERTION, true}, {WCD9XXX_IRQ_MBHC_POTENTIAL, true}, {WCD9XXX_IRQ_MBHC_RELEASE, true}, {WCD9XXX_IRQ_MBHC_PRESS, true}, {WCD9XXX_IRQ_MBHC_SHORT_TERM, true}, {WCD9XXX_IRQ_MBHC_REMOVAL, true}, {WCD9XXX_IRQ_BG_PRECHARGE, false}, {WCD9XXX_IRQ_PA1_STARTUP, false}, {WCD9XXX_IRQ_PA2_STARTUP, false}, {WCD9XXX_IRQ_PA3_STARTUP, false}, {WCD9XXX_IRQ_PA4_STARTUP, false}, {WCD9XXX_IRQ_PA5_STARTUP, false}, {WCD9XXX_IRQ_MICBIAS1_PRECHARGE, false}, {WCD9XXX_IRQ_MICBIAS2_PRECHARGE, false}, {WCD9XXX_IRQ_MICBIAS3_PRECHARGE, false}, {WCD9XXX_IRQ_HPH_PA_OCPL_FAULT, false}, {WCD9XXX_IRQ_HPH_PA_OCPR_FAULT, false}, {WCD9XXX_IRQ_EAR_PA_OCPL_FAULT, false}, {WCD9XXX_IRQ_HPH_L_PA_STARTUP, false}, {WCD9XXX_IRQ_HPH_R_PA_STARTUP, false}, {WCD9320_IRQ_EAR_PA_STARTUP, false}, {WCD9XXX_IRQ_RESERVED_0, false}, {WCD9XXX_IRQ_RESERVED_1, false}, }; /* * Interrupt table for v2 corresponds to newer version * codecs (wcd9320 and wcd9306) */ static const struct intr_data intr_tbl_v2[] = { {WCD9XXX_IRQ_SLIMBUS, false}, {WCD9XXX_IRQ_MBHC_INSERTION, true}, {WCD9XXX_IRQ_MBHC_POTENTIAL, true}, {WCD9XXX_IRQ_MBHC_RELEASE, true}, {WCD9XXX_IRQ_MBHC_PRESS, true}, {WCD9XXX_IRQ_MBHC_SHORT_TERM, true}, {WCD9XXX_IRQ_MBHC_REMOVAL, true}, {WCD9320_IRQ_MBHC_JACK_SWITCH, true}, {WCD9306_IRQ_MBHC_JACK_SWITCH, true}, {WCD9XXX_IRQ_BG_PRECHARGE, false}, {WCD9XXX_IRQ_PA1_STARTUP, false}, {WCD9XXX_IRQ_PA2_STARTUP, false}, {WCD9XXX_IRQ_PA3_STARTUP, false}, {WCD9XXX_IRQ_PA4_STARTUP, false}, {WCD9306_IRQ_HPH_PA_OCPR_FAULT, false}, {WCD9XXX_IRQ_PA5_STARTUP, false}, {WCD9XXX_IRQ_MICBIAS1_PRECHARGE, false}, {WCD9306_IRQ_HPH_PA_OCPL_FAULT, false}, {WCD9XXX_IRQ_MICBIAS2_PRECHARGE, false}, {WCD9XXX_IRQ_MICBIAS3_PRECHARGE, false}, {WCD9XXX_IRQ_HPH_PA_OCPL_FAULT, false}, {WCD9XXX_IRQ_HPH_PA_OCPR_FAULT, false}, {WCD9XXX_IRQ_EAR_PA_OCPL_FAULT, false}, {WCD9XXX_IRQ_HPH_L_PA_STARTUP, false}, {WCD9XXX_IRQ_HPH_R_PA_STARTUP, false}, {WCD9320_IRQ_EAR_PA_STARTUP, false}, {WCD9XXX_IRQ_RESERVED_0, false}, {WCD9XXX_IRQ_RESERVED_1, false}, {WCD9XXX_IRQ_MAD_AUDIO, false}, {WCD9XXX_IRQ_MAD_BEACON, false}, {WCD9XXX_IRQ_MAD_ULTRASOUND, false}, {WCD9XXX_IRQ_SPEAKER_CLIPPING, false}, {WCD9XXX_IRQ_VBAT_MONITOR_ATTACK, false}, {WCD9XXX_IRQ_VBAT_MONITOR_RELEASE, false}, {WCD9XXX_IRQ_RESERVED_2, false}, }; static int wcd9xxx_device_init(struct wcd9xxx *wcd9xxx) { int ret = 0; u8 version; const struct wcd9xxx_codec_type *found; struct wcd9xxx_core_resource *core_res = &wcd9xxx->core_res; mutex_init(&wcd9xxx->io_lock); mutex_init(&wcd9xxx->xfer_lock); dev_set_drvdata(wcd9xxx->dev, wcd9xxx); wcd9xxx_bring_up(wcd9xxx); found = wcd9xxx_check_codec_type(wcd9xxx, &version); if (!found) { ret = -ENODEV; goto err; } else { wcd9xxx->codec_type = found; wcd9xxx->version = version; } core_res->parent = wcd9xxx; core_res->dev = wcd9xxx->dev; if (wcd9xxx->codec_type->id_major == TABLA_MAJOR || wcd9xxx->codec_type->id_major == SITAR_MAJOR) { core_res->intr_table = intr_tbl_v1; core_res->intr_table_size = ARRAY_SIZE(intr_tbl_v1); } else { core_res->intr_table = intr_tbl_v2; core_res->intr_table_size = ARRAY_SIZE(intr_tbl_v2); } wcd9xxx_core_res_init(&wcd9xxx->core_res, wcd9xxx->codec_type->num_irqs, wcd9xxx_num_irq_regs(wcd9xxx), wcd9xxx_reg_read, wcd9xxx_reg_write, wcd9xxx_bulk_read); if (wcd9xxx_core_irq_init(&wcd9xxx->core_res)) goto err; ret = mfd_add_devices(wcd9xxx->dev, -1, found->dev, found->size, NULL, 0); if (ret != 0) { dev_err(wcd9xxx->dev, "Failed to add children: %d\n", ret); goto err_irq; } ret = device_init_wakeup(wcd9xxx->dev, true); if (ret) { dev_err(wcd9xxx->dev, "Device wakeup init failed: %d\n", ret); goto err_irq; } return ret; err_irq: wcd9xxx_irq_exit(&wcd9xxx->core_res); err: wcd9xxx_bring_down(wcd9xxx); wcd9xxx_core_res_deinit(&wcd9xxx->core_res); mutex_destroy(&wcd9xxx->io_lock); mutex_destroy(&wcd9xxx->xfer_lock); return ret; } static void wcd9xxx_device_exit(struct wcd9xxx *wcd9xxx) { device_init_wakeup(wcd9xxx->dev, false); wcd9xxx_irq_exit(&wcd9xxx->core_res); wcd9xxx_bring_down(wcd9xxx); wcd9xxx_free_reset(wcd9xxx); wcd9xxx_core_res_deinit(&wcd9xxx->core_res); mutex_destroy(&wcd9xxx->io_lock); mutex_destroy(&wcd9xxx->xfer_lock); if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_SLIMBUS) slim_remove_device(wcd9xxx->slim_slave); kfree(wcd9xxx); } #ifdef CONFIG_DEBUG_FS struct wcd9xxx *debugCodec; static struct dentry *debugfs_wcd9xxx_dent; static struct dentry *debugfs_peek; static struct dentry *debugfs_poke; static unsigned char read_data; static int codec_debug_open(struct inode *inode, struct file *file) { file->private_data = inode->i_private; return 0; } static int get_parameters(char *buf, long int *param1, int num_of_par) { char *token; int base, cnt; token = strsep(&buf, " "); for (cnt = 0; cnt < num_of_par; cnt++) { if (token != NULL) { if ((token[1] == 'x') || (token[1] == 'X')) base = 16; else base = 10; if (strict_strtoul(token, base, &param1[cnt]) != 0) return -EINVAL; token = strsep(&buf, " "); } else return -EINVAL; } return 0; } static ssize_t codec_debug_read(struct file *file, char __user *ubuf, size_t count, loff_t *ppos) { char lbuf[8]; snprintf(lbuf, sizeof(lbuf), "0x%x\n", read_data); return simple_read_from_buffer(ubuf, count, ppos, lbuf, strnlen(lbuf, 7)); } static ssize_t codec_debug_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { char *access_str = filp->private_data; char lbuf[32]; int rc; long int param[5]; if (cnt > sizeof(lbuf) - 1) return -EINVAL; rc = copy_from_user(lbuf, ubuf, cnt); if (rc) return -EFAULT; lbuf[cnt] = '\0'; if (!strncmp(access_str, "poke", 6)) { /* write */ rc = get_parameters(lbuf, param, 2); if ((param[0] <= 0x3FF) && (param[1] <= 0xFF) && (rc == 0)) wcd9xxx_interface_reg_write(debugCodec, param[0], param[1]); else rc = -EINVAL; } else if (!strncmp(access_str, "peek", 6)) { /* read */ rc = get_parameters(lbuf, param, 1); if ((param[0] <= 0x3FF) && (rc == 0)) read_data = wcd9xxx_interface_reg_read(debugCodec, param[0]); else rc = -EINVAL; } if (rc == 0) rc = cnt; else pr_err("%s: rc = %d\n", __func__, rc); return rc; } static const struct file_operations codec_debug_ops = { .open = codec_debug_open, .write = codec_debug_write, .read = codec_debug_read }; #endif static int wcd9xxx_init_supplies(struct wcd9xxx *wcd9xxx, struct wcd9xxx_pdata *pdata) { int ret; int i; wcd9xxx->supplies = kzalloc(sizeof(struct regulator_bulk_data) * ARRAY_SIZE(pdata->regulator), GFP_KERNEL); if (!wcd9xxx->supplies) { ret = -ENOMEM; goto err; } wcd9xxx->num_of_supplies = 0; if (ARRAY_SIZE(pdata->regulator) > WCD9XXX_MAX_REGULATOR) { pr_err("%s: Array Size out of bound\n", __func__); ret = -EINVAL; goto err; } for (i = 0; i < ARRAY_SIZE(pdata->regulator); i++) { if (pdata->regulator[i].name) { wcd9xxx->supplies[i].supply = pdata->regulator[i].name; wcd9xxx->num_of_supplies++; } } ret = regulator_bulk_get(wcd9xxx->dev, wcd9xxx->num_of_supplies, wcd9xxx->supplies); if (ret != 0) { dev_err(wcd9xxx->dev, "Failed to get supplies: err = %d\n", ret); goto err_supplies; } for (i = 0; i < wcd9xxx->num_of_supplies; i++) { if (regulator_count_voltages(wcd9xxx->supplies[i].consumer) <= 0) continue; ret = regulator_set_voltage(wcd9xxx->supplies[i].consumer, pdata->regulator[i].min_uV, pdata->regulator[i].max_uV); if (ret) { pr_err("%s: Setting regulator voltage failed for " "regulator %s err = %d\n", __func__, wcd9xxx->supplies[i].supply, ret); goto err_get; } ret = regulator_set_optimum_mode(wcd9xxx->supplies[i].consumer, pdata->regulator[i].optimum_uA); if (ret < 0) { pr_err("%s: Setting regulator optimum mode failed for " "regulator %s err = %d\n", __func__, wcd9xxx->supplies[i].supply, ret); goto err_get; } else { ret = 0; } } return ret; err_get: regulator_bulk_free(wcd9xxx->num_of_supplies, wcd9xxx->supplies); err_supplies: kfree(wcd9xxx->supplies); err: return ret; } static int wcd9xxx_enable_static_supplies(struct wcd9xxx *wcd9xxx, struct wcd9xxx_pdata *pdata) { int i; int ret = 0; for (i = 0; i < wcd9xxx->num_of_supplies; i++) { if (pdata->regulator[i].ondemand) continue; ret = regulator_enable(wcd9xxx->supplies[i].consumer); if (ret) { pr_err("%s: Failed to enable %s\n", __func__, wcd9xxx->supplies[i].supply); break; } else { pr_debug("%s: Enabled regulator %s\n", __func__, wcd9xxx->supplies[i].supply); } } while (ret && --i) if (!pdata->regulator[i].ondemand) regulator_disable(wcd9xxx->supplies[i].consumer); return ret; } static void wcd9xxx_disable_supplies(struct wcd9xxx *wcd9xxx, struct wcd9xxx_pdata *pdata) { int i; regulator_bulk_disable(wcd9xxx->num_of_supplies, wcd9xxx->supplies); for (i = 0; i < wcd9xxx->num_of_supplies; i++) { if (regulator_count_voltages(wcd9xxx->supplies[i].consumer) <= 0) continue; regulator_set_voltage(wcd9xxx->supplies[i].consumer, 0, pdata->regulator[i].max_uV); regulator_set_optimum_mode(wcd9xxx->supplies[i].consumer, 0); } regulator_bulk_free(wcd9xxx->num_of_supplies, wcd9xxx->supplies); kfree(wcd9xxx->supplies); } struct wcd9xxx_i2c *get_i2c_wcd9xxx_device_info(u16 reg) { u16 mask = 0x0f00; int value = 0; struct wcd9xxx_i2c *wcd9xxx = NULL; value = ((reg & mask) >> 8) & 0x000f; switch (value) { case 0: wcd9xxx = &wcd9xxx_modules[0]; break; case 1: wcd9xxx = &wcd9xxx_modules[1]; break; case 2: wcd9xxx = &wcd9xxx_modules[2]; break; case 3: wcd9xxx = &wcd9xxx_modules[3]; break; default: break; } return wcd9xxx; } int wcd9xxx_i2c_write_device(u16 reg, u8 *value, u32 bytes) { struct i2c_msg *msg; int ret = 0; u8 reg_addr = 0; u8 data[bytes + 1]; struct wcd9xxx_i2c *wcd9xxx; wcd9xxx = get_i2c_wcd9xxx_device_info(reg); if (wcd9xxx == NULL || wcd9xxx->client == NULL) { pr_err("failed to get device info\n"); return -ENODEV; } reg_addr = (u8)reg; msg = &wcd9xxx->xfer_msg[0]; msg->addr = wcd9xxx->client->addr; msg->len = bytes + 1; msg->flags = 0; data[0] = reg; data[1] = *value; msg->buf = data; ret = i2c_transfer(wcd9xxx->client->adapter, wcd9xxx->xfer_msg, 1); /* Try again if the write fails */ if (ret != 1) { ret = i2c_transfer(wcd9xxx->client->adapter, wcd9xxx->xfer_msg, 1); if (ret != 1) { pr_err("failed to write the device\n"); return ret; } } pr_debug("write sucess register = %x val = %x\n", reg, data[1]); return 0; } int wcd9xxx_i2c_read_device(unsigned short reg, int bytes, unsigned char *dest) { struct i2c_msg *msg; int ret = 0; u8 reg_addr = 0; struct wcd9xxx_i2c *wcd9xxx; u8 i = 0; wcd9xxx = get_i2c_wcd9xxx_device_info(reg); if (wcd9xxx == NULL || wcd9xxx->client == NULL) { pr_err("failed to get device info\n"); return -ENODEV; } for (i = 0; i < bytes; i++) { reg_addr = (u8)reg++; msg = &wcd9xxx->xfer_msg[0]; msg->addr = wcd9xxx->client->addr; msg->len = 1; msg->flags = 0; msg->buf = &reg_addr; msg = &wcd9xxx->xfer_msg[1]; msg->addr = wcd9xxx->client->addr; msg->len = 1; msg->flags = I2C_M_RD; msg->buf = dest++; ret = i2c_transfer(wcd9xxx->client->adapter, wcd9xxx->xfer_msg, 2); /* Try again if read fails first time */ if (ret != 2) { ret = i2c_transfer(wcd9xxx->client->adapter, wcd9xxx->xfer_msg, 2); if (ret != 2) { pr_err("failed to read wcd9xxx register\n"); return ret; } } } return 0; } int wcd9xxx_i2c_read(struct wcd9xxx *wcd9xxx, unsigned short reg, int bytes, void *dest, bool interface_reg) { return wcd9xxx_i2c_read_device(reg, bytes, dest); } int wcd9xxx_i2c_write(struct wcd9xxx *wcd9xxx, unsigned short reg, int bytes, void *src, bool interface_reg) { return wcd9xxx_i2c_write_device(reg, src, bytes); } static int wcd9xxx_i2c_get_client_index(struct i2c_client *client, int *wcd9xx_index) { int ret = 0; switch (client->addr) { case WCD9XXX_I2C_TOP_SLAVE_ADDR: *wcd9xx_index = WCD9XXX_I2C_TOP_LEVEL; break; case WCD9XXX_ANALOG_I2C_SLAVE_ADDR: *wcd9xx_index = WCD9XXX_I2C_ANALOG; break; case WCD9XXX_DIGITAL1_I2C_SLAVE_ADDR: *wcd9xx_index = WCD9XXX_I2C_DIGITAL_1; break; case WCD9XXX_DIGITAL2_I2C_SLAVE_ADDR: *wcd9xx_index = WCD9XXX_I2C_DIGITAL_2; break; default: ret = -EINVAL; break; } return ret; } static int __devinit wcd9xxx_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct wcd9xxx *wcd9xxx = NULL; struct wcd9xxx_pdata *pdata = NULL; int val = 0; int ret = 0; int wcd9xx_index = 0; struct device *dev; int intf_type; intf_type = wcd9xxx_get_intf_type(); pr_debug("%s: interface status %d\n", __func__, intf_type); if (intf_type == WCD9XXX_INTERFACE_TYPE_SLIMBUS) { dev_dbg(&client->dev, "%s:Codec is detected in slimbus mode\n", __func__); return -ENODEV; } else if (intf_type == WCD9XXX_INTERFACE_TYPE_I2C) { ret = wcd9xxx_i2c_get_client_index(client, &wcd9xx_index); if (ret != 0) dev_err(&client->dev, "%s: I2C set codec I2C\n" "client failed\n", __func__); else { dev_err(&client->dev, "%s:probe for other slaves\n" "devices of codec I2C slave Addr = %x\n", __func__, client->addr); wcd9xxx_modules[wcd9xx_index].client = client; } return ret; } else if (intf_type == WCD9XXX_INTERFACE_TYPE_PROBING) { dev = &client->dev; if (client->dev.of_node) { dev_dbg(&client->dev, "%s:Platform data\n" "from device tree\n", __func__); pdata = wcd9xxx_populate_dt_pdata(&client->dev); client->dev.platform_data = pdata; } else { dev_dbg(&client->dev, "%s:Platform data from\n" "board file\n", __func__); pdata = client->dev.platform_data; } wcd9xxx = kzalloc(sizeof(struct wcd9xxx), GFP_KERNEL); if (wcd9xxx == NULL) { pr_err("%s: error, allocation failed\n", __func__); ret = -ENOMEM; goto fail; } if (!pdata) { dev_dbg(&client->dev, "no platform data?\n"); ret = -EINVAL; goto fail; } if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C) == 0) { dev_dbg(&client->dev, "can't talk I2C?\n"); ret = -EIO; goto fail; } dev_set_drvdata(&client->dev, wcd9xxx); wcd9xxx->dev = &client->dev; wcd9xxx->reset_gpio = pdata->reset_gpio; wcd9xxx->slim_device_bootup = true; if (client->dev.of_node) wcd9xxx->mclk_rate = pdata->mclk_rate; ret = wcd9xxx_init_supplies(wcd9xxx, pdata); if (ret) { pr_err("%s: Fail to enable Codec supplies\n", __func__); goto err_codec; } ret = wcd9xxx_enable_static_supplies(wcd9xxx, pdata); if (ret) { pr_err("%s: Fail to enable Codec pre-reset supplies\n", __func__); goto err_codec; } usleep_range(5, 5); ret = wcd9xxx_reset(wcd9xxx); if (ret) { pr_err("%s: Resetting Codec failed\n", __func__); goto err_supplies; } ret = wcd9xxx_i2c_get_client_index(client, &wcd9xx_index); if (ret != 0) { pr_err("%s:Set codec I2C client failed\n", __func__); goto err_supplies; } wcd9xxx_modules[wcd9xx_index].client = client; wcd9xxx->read_dev = wcd9xxx_i2c_read; wcd9xxx->write_dev = wcd9xxx_i2c_write; if (!wcd9xxx->dev->of_node) wcd9xxx_initialize_irq(&wcd9xxx->core_res, pdata->irq, pdata->irq_base); ret = wcd9xxx_device_init(wcd9xxx); if (ret) { pr_err("%s: error, initializing device failed\n", __func__); goto err_device_init; } ret = wcd9xxx_read(wcd9xxx, WCD9XXX_A_CHIP_STATUS, 1, &val, 0); if (ret < 0) pr_err("%s: failed to read the wcd9xxx status (%d)\n", __func__, ret); if (val != wcd9xxx->codec_type->i2c_chip_status) pr_err("%s: unknown chip status 0x%x\n", __func__, val); wcd9xxx_set_intf_type(WCD9XXX_INTERFACE_TYPE_I2C); return ret; } else pr_err("%s: I2C probe in wrong state\n", __func__); err_device_init: wcd9xxx_free_reset(wcd9xxx); err_supplies: wcd9xxx_disable_supplies(wcd9xxx, pdata); err_codec: kfree(wcd9xxx); fail: return ret; } static int __devexit wcd9xxx_i2c_remove(struct i2c_client *client) { struct wcd9xxx *wcd9xxx; struct wcd9xxx_pdata *pdata = client->dev.platform_data; pr_debug("exit\n"); wcd9xxx = dev_get_drvdata(&client->dev); wcd9xxx_disable_supplies(wcd9xxx, pdata); wcd9xxx_device_exit(wcd9xxx); return 0; } static int wcd9xxx_dt_parse_vreg_info(struct device *dev, struct wcd9xxx_regulator *vreg, const char *vreg_name, bool ondemand) { int len, ret = 0; const __be32 *prop; char prop_name[CODEC_DT_MAX_PROP_SIZE]; struct device_node *regnode = NULL; u32 prop_val; snprintf(prop_name, CODEC_DT_MAX_PROP_SIZE, "%s-supply", vreg_name); regnode = of_parse_phandle(dev->of_node, prop_name, 0); if (!regnode) { dev_err(dev, "Looking up %s property in node %s failed", prop_name, dev->of_node->full_name); return -ENODEV; } vreg->name = vreg_name; vreg->ondemand = ondemand; snprintf(prop_name, CODEC_DT_MAX_PROP_SIZE, "qcom,%s-voltage", vreg_name); prop = of_get_property(dev->of_node, prop_name, &len); if (!prop || (len != (2 * sizeof(__be32)))) { dev_err(dev, "%s %s property\n", prop ? "invalid format" : "no", prop_name); return -EINVAL; } else { vreg->min_uV = be32_to_cpup(&prop[0]); vreg->max_uV = be32_to_cpup(&prop[1]); } snprintf(prop_name, CODEC_DT_MAX_PROP_SIZE, "qcom,%s-current", vreg_name); ret = of_property_read_u32(dev->of_node, prop_name, &prop_val); if (ret) { dev_err(dev, "Looking up %s property in node %s failed", prop_name, dev->of_node->full_name); return -EFAULT; } vreg->optimum_uA = prop_val; dev_info(dev, "%s: vol=[%d %d]uV, curr=[%d]uA, ond %d\n", vreg->name, vreg->min_uV, vreg->max_uV, vreg->optimum_uA, vreg->ondemand); return 0; } static int wcd9xxx_read_of_property_u32(struct device *dev, const char *name, u32 *val) { int ret = 0; ret = of_property_read_u32(dev->of_node, name, val); if (ret) dev_err(dev, "Looking up %s property in node %s failed", name, dev->of_node->full_name); return ret; } static int wcd9xxx_dt_parse_micbias_info(struct device *dev, struct wcd9xxx_micbias_setting *micbias) { u32 prop_val; if (!(wcd9xxx_read_of_property_u32(dev, "qcom,cdc-micbias-ldoh-v", &prop_val))) micbias->ldoh_v = (u8)prop_val; wcd9xxx_read_of_property_u32(dev, "qcom,cdc-micbias-cfilt1-mv", &micbias->cfilt1_mv); wcd9xxx_read_of_property_u32(dev, "qcom,cdc-micbias-cfilt2-mv", &micbias->cfilt2_mv); wcd9xxx_read_of_property_u32(dev, "qcom,cdc-micbias-cfilt3-mv", &micbias->cfilt3_mv); /* Read micbias values for codec. Does not matter even if a few * micbias values are not defined in the Device Tree. Codec will * anyway not use those values */ if (!(wcd9xxx_read_of_property_u32(dev, "qcom,cdc-micbias1-cfilt-sel", &prop_val))) micbias->bias1_cfilt_sel = (u8)prop_val; if (!(wcd9xxx_read_of_property_u32(dev, "qcom,cdc-micbias2-cfilt-sel", &prop_val))) micbias->bias2_cfilt_sel = (u8)prop_val; if (!(wcd9xxx_read_of_property_u32(dev, "qcom,cdc-micbias3-cfilt-sel", &prop_val))) micbias->bias3_cfilt_sel = (u8)prop_val; if (!(wcd9xxx_read_of_property_u32(dev, "qcom,cdc-micbias4-cfilt-sel", &prop_val))) micbias->bias4_cfilt_sel = (u8)prop_val; /* micbias external cap */ micbias->bias1_cap_mode = (of_property_read_bool(dev->of_node, "qcom,cdc-micbias1-ext-cap") ? MICBIAS_EXT_BYP_CAP : MICBIAS_NO_EXT_BYP_CAP); micbias->bias2_cap_mode = (of_property_read_bool(dev->of_node, "qcom,cdc-micbias2-ext-cap") ? MICBIAS_EXT_BYP_CAP : MICBIAS_NO_EXT_BYP_CAP); micbias->bias3_cap_mode = (of_property_read_bool(dev->of_node, "qcom,cdc-micbias3-ext-cap") ? MICBIAS_EXT_BYP_CAP : MICBIAS_NO_EXT_BYP_CAP); micbias->bias4_cap_mode = (of_property_read_bool(dev->of_node, "qcom,cdc-micbias4-ext-cap") ? MICBIAS_EXT_BYP_CAP : MICBIAS_NO_EXT_BYP_CAP); micbias->bias2_is_headset_only = of_property_read_bool(dev->of_node, "qcom,cdc-micbias2-headset-only"); dev_dbg(dev, "ldoh_v %u cfilt1_mv %u cfilt2_mv %u cfilt3_mv %u", (u32)micbias->ldoh_v, (u32)micbias->cfilt1_mv, (u32)micbias->cfilt2_mv, (u32)micbias->cfilt3_mv); dev_dbg(dev, "bias1_cfilt_sel %u bias2_cfilt_sel %u\n", (u32)micbias->bias1_cfilt_sel, (u32)micbias->bias2_cfilt_sel); dev_dbg(dev, "bias3_cfilt_sel %u bias4_cfilt_sel %u\n", (u32)micbias->bias3_cfilt_sel, (u32)micbias->bias4_cfilt_sel); dev_dbg(dev, "bias1_ext_cap %d bias2_ext_cap %d\n", micbias->bias1_cap_mode, micbias->bias2_cap_mode); dev_dbg(dev, "bias3_ext_cap %d bias4_ext_cap %d\n", micbias->bias3_cap_mode, micbias->bias4_cap_mode); dev_dbg(dev, "bias2_is_headset_only %d\n", micbias->bias2_is_headset_only); return 0; } static int wcd9xxx_dt_parse_slim_interface_dev_info(struct device *dev, struct slim_device *slim_ifd) { int ret = 0; struct property *prop; ret = of_property_read_string(dev->of_node, "qcom,cdc-slim-ifd", &slim_ifd->name); if (ret) { dev_err(dev, "Looking up %s property in node %s failed", "qcom,cdc-slim-ifd-dev", dev->of_node->full_name); return -ENODEV; } prop = of_find_property(dev->of_node, "qcom,cdc-slim-ifd-elemental-addr", NULL); if (!prop) { dev_err(dev, "Looking up %s property in node %s failed", "qcom,cdc-slim-ifd-elemental-addr", dev->of_node->full_name); return -ENODEV; } else if (prop->length != 6) { dev_err(dev, "invalid codec slim ifd addr. addr length = %d\n", prop->length); return -ENODEV; } memcpy(slim_ifd->e_addr, prop->value, 6); return 0; } static int wcd9xxx_process_supplies(struct device *dev, struct wcd9xxx_pdata *pdata, const char *supply_list, int supply_cnt, bool is_ondemand, int index) { int idx, ret = 0; const char *name; if (supply_cnt == 0) { dev_dbg(dev, "%s: no supplies defined for %s\n", __func__, supply_list); return 0; } for (idx = 0; idx < supply_cnt; idx++) { ret = of_property_read_string_index(dev->of_node, supply_list, idx, &name); if (ret) { dev_err(dev, "%s: of read string %s idx %d error %d\n", __func__, supply_list, idx, ret); goto err; } dev_dbg(dev, "%s: Found cdc supply %s as part of %s\n", __func__, name, supply_list); ret = wcd9xxx_dt_parse_vreg_info(dev, &pdata->regulator[index + idx], name, is_ondemand); if (ret) goto err; } return 0; err: return ret; } static struct wcd9xxx_pdata *wcd9xxx_populate_dt_pdata(struct device *dev) { struct wcd9xxx_pdata *pdata; int ret, static_cnt, ond_cnt, cp_supplies_cnt; u32 mclk_rate = 0; u32 dmic_sample_rate = 0; const char *static_prop_name = "qcom,cdc-static-supplies"; const char *ond_prop_name = "qcom,cdc-on-demand-supplies"; const char *cp_supplies_name = "qcom,cdc-cp-supplies"; pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) { dev_err(dev, "could not allocate memory for platform data\n"); return NULL; } static_cnt = of_property_count_strings(dev->of_node, static_prop_name); if (IS_ERR_VALUE(static_cnt)) { dev_err(dev, "%s: Failed to get static supplies %d\n", __func__, static_cnt); goto err; } /* On-demand supply list is an optional property */ ond_cnt = of_property_count_strings(dev->of_node, ond_prop_name); if (IS_ERR_VALUE(ond_cnt)) ond_cnt = 0; /* cp-supplies list is an optional property */ cp_supplies_cnt = of_property_count_strings(dev->of_node, cp_supplies_name); if (IS_ERR_VALUE(cp_supplies_cnt)) cp_supplies_cnt = 0; BUG_ON(static_cnt <= 0 || ond_cnt < 0 || cp_supplies_cnt < 0); if ((static_cnt + ond_cnt + cp_supplies_cnt) > ARRAY_SIZE(pdata->regulator)) { dev_err(dev, "%s: Num of supplies %u > max supported %u\n", __func__, static_cnt, ARRAY_SIZE(pdata->regulator)); goto err; } ret = wcd9xxx_process_supplies(dev, pdata, static_prop_name, static_cnt, STATIC_REGULATOR, 0); if (ret) goto err; ret = wcd9xxx_process_supplies(dev, pdata, ond_prop_name, ond_cnt, ONDEMAND_REGULATOR, static_cnt); if (ret) goto err; ret = wcd9xxx_process_supplies(dev, pdata, cp_supplies_name, cp_supplies_cnt, ONDEMAND_REGULATOR, static_cnt + ond_cnt); if (ret) goto err; ret = wcd9xxx_dt_parse_micbias_info(dev, &pdata->micbias); if (ret) goto err; pdata->reset_gpio = of_get_named_gpio(dev->of_node, "qcom,cdc-reset-gpio", 0); if (pdata->reset_gpio < 0) { dev_err(dev, "Looking up %s property in node %s failed %d\n", "qcom, cdc-reset-gpio", dev->of_node->full_name, pdata->reset_gpio); goto err; } dev_dbg(dev, "%s: reset gpio %d", __func__, pdata->reset_gpio); ret = of_property_read_u32(dev->of_node, "qcom,cdc-mclk-clk-rate", &mclk_rate); if (ret) { dev_err(dev, "Looking up %s property in\n" "node %s failed", "qcom,cdc-mclk-clk-rate", dev->of_node->full_name); devm_kfree(dev, pdata); ret = -EINVAL; goto err; } pdata->mclk_rate = mclk_rate; ret = of_property_read_u32(dev->of_node, "qcom,cdc-dmic-sample-rate", &dmic_sample_rate); if (ret) { dev_err(dev, "Looking up %s property in node %s failed", "qcom,cdc-dmic-sample-rate", dev->of_node->full_name); dmic_sample_rate = WCD9XXX_DMIC_SAMPLE_RATE_UNDEFINED; } if (pdata->mclk_rate == WCD9XXX_MCLK_CLK_9P6HZ) { if ((dmic_sample_rate != WCD9XXX_DMIC_SAMPLE_RATE_2P4MHZ) && (dmic_sample_rate != WCD9XXX_DMIC_SAMPLE_RATE_3P2MHZ) && (dmic_sample_rate != WCD9XXX_DMIC_SAMPLE_RATE_4P8MHZ) && (dmic_sample_rate != WCD9XXX_DMIC_SAMPLE_RATE_UNDEFINED)) { dev_err(dev, "Invalid dmic rate %d for mclk %d\n", dmic_sample_rate, pdata->mclk_rate); ret = -EINVAL; goto err; } } else if (pdata->mclk_rate == WCD9XXX_MCLK_CLK_12P288MHZ) { if ((dmic_sample_rate != WCD9XXX_DMIC_SAMPLE_RATE_3P072MHZ) && (dmic_sample_rate != WCD9XXX_DMIC_SAMPLE_RATE_4P096MHZ) && (dmic_sample_rate != WCD9XXX_DMIC_SAMPLE_RATE_6P144MHZ) && (dmic_sample_rate != WCD9XXX_DMIC_SAMPLE_RATE_UNDEFINED)) { dev_err(dev, "Invalid dmic rate %d for mclk %d\n", dmic_sample_rate, pdata->mclk_rate); ret = -EINVAL; goto err; } } pdata->dmic_sample_rate = dmic_sample_rate; return pdata; err: devm_kfree(dev, pdata); return NULL; } static int wcd9xxx_slim_get_laddr(struct slim_device *sb, const u8 *e_addr, u8 e_len, u8 *laddr) { int ret; const unsigned long timeout = jiffies + msecs_to_jiffies(SLIMBUS_PRESENT_TIMEOUT); do { ret = slim_get_logical_addr(sb, e_addr, e_len, laddr); if (!ret) break; /* Give SLIMBUS time to report present and be ready. */ usleep_range(1000, 1000); pr_debug_ratelimited("%s: retyring get logical addr\n", __func__); } while time_before(jiffies, timeout); return ret; } static int wcd9xxx_slim_probe(struct slim_device *slim) { struct wcd9xxx *wcd9xxx; struct wcd9xxx_pdata *pdata; int ret = 0; int intf_type; intf_type = wcd9xxx_get_intf_type(); if (intf_type == WCD9XXX_INTERFACE_TYPE_I2C) { dev_dbg(&slim->dev, "%s:Codec is detected in I2C mode\n", __func__); return -ENODEV; } if (slim->dev.of_node) { dev_info(&slim->dev, "Platform data from device tree\n"); pdata = wcd9xxx_populate_dt_pdata(&slim->dev); ret = wcd9xxx_dt_parse_slim_interface_dev_info(&slim->dev, &pdata->slimbus_slave_device); if (ret) { dev_err(&slim->dev, "Error, parsing slim interface\n"); devm_kfree(&slim->dev, pdata); ret = -EINVAL; goto err; } slim->dev.platform_data = pdata; } else { dev_info(&slim->dev, "Platform data from board file\n"); pdata = slim->dev.platform_data; } if (!pdata) { dev_err(&slim->dev, "Error, no platform data\n"); ret = -EINVAL; goto err; } wcd9xxx = kzalloc(sizeof(struct wcd9xxx), GFP_KERNEL); if (wcd9xxx == NULL) { pr_err("%s: error, allocation failed\n", __func__); ret = -ENOMEM; goto err; } if (!slim->ctrl) { pr_err("Error, no SLIMBUS control data\n"); ret = -EINVAL; goto err_codec; } wcd9xxx->slim = slim; slim_set_clientdata(slim, wcd9xxx); wcd9xxx->reset_gpio = pdata->reset_gpio; wcd9xxx->dev = &slim->dev; wcd9xxx->mclk_rate = pdata->mclk_rate; wcd9xxx->slim_device_bootup = true; ret = wcd9xxx_init_supplies(wcd9xxx, pdata); if (ret) { pr_err("%s: Fail to init Codec supplies %d\n", __func__, ret); goto err_codec; } ret = wcd9xxx_enable_static_supplies(wcd9xxx, pdata); if (ret) { pr_err("%s: Fail to enable Codec pre-reset supplies\n", __func__); goto err_codec; } usleep_range(5, 5); ret = wcd9xxx_reset(wcd9xxx); if (ret) { pr_err("%s: Resetting Codec failed\n", __func__); goto err_supplies; } ret = wcd9xxx_slim_get_laddr(wcd9xxx->slim, wcd9xxx->slim->e_addr, ARRAY_SIZE(wcd9xxx->slim->e_addr), &wcd9xxx->slim->laddr); if (ret) { pr_err("%s: failed to get slimbus %s logical address: %d\n", __func__, wcd9xxx->slim->name, ret); goto err_reset; } wcd9xxx->read_dev = wcd9xxx_slim_read_device; wcd9xxx->write_dev = wcd9xxx_slim_write_device; wcd9xxx_pgd_la = wcd9xxx->slim->laddr; wcd9xxx->slim_slave = &pdata->slimbus_slave_device; if (!wcd9xxx->dev->of_node) wcd9xxx_initialize_irq(&wcd9xxx->core_res, pdata->irq, pdata->irq_base); ret = slim_add_device(slim->ctrl, wcd9xxx->slim_slave); if (ret) { pr_err("%s: error, adding SLIMBUS device failed\n", __func__); goto err_reset; } ret = wcd9xxx_slim_get_laddr(wcd9xxx->slim_slave, wcd9xxx->slim_slave->e_addr, ARRAY_SIZE(wcd9xxx->slim_slave->e_addr), &wcd9xxx->slim_slave->laddr); if (ret) { pr_err("%s: failed to get slimbus %s logical address: %d\n", __func__, wcd9xxx->slim->name, ret); goto err_slim_add; } wcd9xxx_inf_la = wcd9xxx->slim_slave->laddr; wcd9xxx_set_intf_type(WCD9XXX_INTERFACE_TYPE_SLIMBUS); ret = wcd9xxx_device_init(wcd9xxx); if (ret) { pr_err("%s: error, initializing device failed\n", __func__); goto err_slim_add; } #ifdef CONFIG_DEBUG_FS debugCodec = wcd9xxx; debugfs_wcd9xxx_dent = debugfs_create_dir ("wcd9310_slimbus_interface_device", 0); if (!IS_ERR(debugfs_wcd9xxx_dent)) { debugfs_peek = debugfs_create_file("peek", S_IFREG | S_IRUGO, debugfs_wcd9xxx_dent, (void *) "peek", &codec_debug_ops); debugfs_poke = debugfs_create_file("poke", S_IFREG | S_IRUGO, debugfs_wcd9xxx_dent, (void *) "poke", &codec_debug_ops); } #endif return ret; err_slim_add: slim_remove_device(wcd9xxx->slim_slave); err_reset: wcd9xxx_free_reset(wcd9xxx); err_supplies: wcd9xxx_disable_supplies(wcd9xxx, pdata); err_codec: kfree(wcd9xxx); err: return ret; } static int wcd9xxx_slim_remove(struct slim_device *pdev) { struct wcd9xxx *wcd9xxx; struct wcd9xxx_pdata *pdata = pdev->dev.platform_data; #ifdef CONFIG_DEBUG_FS debugfs_remove(debugfs_peek); debugfs_remove(debugfs_poke); debugfs_remove(debugfs_wcd9xxx_dent); #endif wcd9xxx = slim_get_devicedata(pdev); wcd9xxx_deinit_slimslave(wcd9xxx); slim_remove_device(wcd9xxx->slim_slave); wcd9xxx_disable_supplies(wcd9xxx, pdata); wcd9xxx_device_exit(wcd9xxx); return 0; } static int wcd9xxx_device_up(struct wcd9xxx *wcd9xxx) { int ret = 0; struct wcd9xxx_core_resource *wcd9xxx_res = &wcd9xxx->core_res; if (wcd9xxx->slim_device_bootup) { wcd9xxx->slim_device_bootup = false; return 0; } ret = wcd9xxx_reset(wcd9xxx); if (ret) pr_err("%s: Resetting Codec failed\n", __func__); wcd9xxx_bring_up(wcd9xxx); ret = wcd9xxx_irq_init(wcd9xxx_res); if (ret) { pr_err("%s: wcd9xx_irq_init failed : %d\n", __func__, ret); } else { if (wcd9xxx->post_reset) ret = wcd9xxx->post_reset(wcd9xxx); } return ret; } static int wcd9xxx_slim_device_up(struct slim_device *sldev) { struct wcd9xxx *wcd9xxx = slim_get_devicedata(sldev); if (!wcd9xxx) { pr_err("%s: wcd9xxx is NULL\n", __func__); return -EINVAL; } dev_dbg(wcd9xxx->dev, "%s: device up\n", __func__); return wcd9xxx_device_up(wcd9xxx); } static int wcd9xxx_slim_device_down(struct slim_device *sldev) { struct wcd9xxx *wcd9xxx = slim_get_devicedata(sldev); if (!wcd9xxx) { pr_err("%s: wcd9xxx is NULL\n", __func__); return -EINVAL; } wcd9xxx_irq_exit(&wcd9xxx->core_res); if (wcd9xxx->dev_down) wcd9xxx->dev_down(wcd9xxx); dev_dbg(wcd9xxx->dev, "%s: device down\n", __func__); return 0; } static int wcd9xxx_slim_resume(struct slim_device *sldev) { struct wcd9xxx *wcd9xxx = slim_get_devicedata(sldev); return wcd9xxx_core_res_resume(&wcd9xxx->core_res); } static int wcd9xxx_i2c_resume(struct i2c_client *i2cdev) { struct wcd9xxx *wcd9xxx = dev_get_drvdata(&i2cdev->dev); if (wcd9xxx) return wcd9xxx_core_res_resume(&wcd9xxx->core_res); else return 0; } static int wcd9xxx_slim_suspend(struct slim_device *sldev, pm_message_t pmesg) { struct wcd9xxx *wcd9xxx = slim_get_devicedata(sldev); return wcd9xxx_core_res_suspend(&wcd9xxx->core_res, pmesg); } static int wcd9xxx_i2c_suspend(struct i2c_client *i2cdev, pm_message_t pmesg) { struct wcd9xxx *wcd9xxx = dev_get_drvdata(&i2cdev->dev); if (wcd9xxx) return wcd9xxx_core_res_suspend(&wcd9xxx->core_res, pmesg); else return 0; } static const struct slim_device_id sitar_slimtest_id[] = { {"sitar-slim", 0}, {} }; static struct slim_driver sitar_slim_driver = { .driver = { .name = "sitar-slim", .owner = THIS_MODULE, }, .probe = wcd9xxx_slim_probe, .remove = wcd9xxx_slim_remove, .id_table = sitar_slimtest_id, .resume = wcd9xxx_slim_resume, .suspend = wcd9xxx_slim_suspend, }; static const struct slim_device_id sitar1p1_slimtest_id[] = { {"sitar1p1-slim", 0}, {} }; static struct slim_driver sitar1p1_slim_driver = { .driver = { .name = "sitar1p1-slim", .owner = THIS_MODULE, }, .probe = wcd9xxx_slim_probe, .remove = wcd9xxx_slim_remove, .id_table = sitar1p1_slimtest_id, .resume = wcd9xxx_slim_resume, .suspend = wcd9xxx_slim_suspend, }; static const struct slim_device_id slimtest_id[] = { {"tabla-slim", 0}, {} }; static struct slim_driver tabla_slim_driver = { .driver = { .name = "tabla-slim", .owner = THIS_MODULE, }, .probe = wcd9xxx_slim_probe, .remove = wcd9xxx_slim_remove, .id_table = slimtest_id, .resume = wcd9xxx_slim_resume, .suspend = wcd9xxx_slim_suspend, }; static const struct slim_device_id slimtest2x_id[] = { {"tabla2x-slim", 0}, {} }; static struct slim_driver tabla2x_slim_driver = { .driver = { .name = "tabla2x-slim", .owner = THIS_MODULE, }, .probe = wcd9xxx_slim_probe, .remove = wcd9xxx_slim_remove, .id_table = slimtest2x_id, .resume = wcd9xxx_slim_resume, .suspend = wcd9xxx_slim_suspend, }; static const struct slim_device_id taiko_slimtest_id[] = { {"taiko-slim-pgd", 0}, {} }; static struct slim_driver taiko_slim_driver = { .driver = { .name = "taiko-slim", .owner = THIS_MODULE, }, .probe = wcd9xxx_slim_probe, .remove = wcd9xxx_slim_remove, .id_table = taiko_slimtest_id, .resume = wcd9xxx_slim_resume, .suspend = wcd9xxx_slim_suspend, .device_up = wcd9xxx_slim_device_up, .device_down = wcd9xxx_slim_device_down, }; static const struct slim_device_id tapan_slimtest_id[] = { {"tapan-slim-pgd", 0}, {} }; static struct slim_driver tapan_slim_driver = { .driver = { .name = "tapan-slim", .owner = THIS_MODULE, }, .probe = wcd9xxx_slim_probe, .remove = wcd9xxx_slim_remove, .id_table = tapan_slimtest_id, .resume = wcd9xxx_slim_resume, .suspend = wcd9xxx_slim_suspend, .device_up = wcd9xxx_slim_device_up, .device_down = wcd9xxx_slim_device_down, }; static struct i2c_device_id wcd9xxx_id_table[] = { {"wcd9xxx-i2c", WCD9XXX_I2C_TOP_LEVEL}, {"wcd9xxx-i2c", WCD9XXX_I2C_ANALOG}, {"wcd9xxx-i2c", WCD9XXX_I2C_DIGITAL_1}, {"wcd9xxx-i2c", WCD9XXX_I2C_DIGITAL_2}, {} }; static struct i2c_device_id tabla_id_table[] = { {"tabla top level", WCD9XXX_I2C_TOP_LEVEL}, {"tabla analog", WCD9XXX_I2C_ANALOG}, {"tabla digital1", WCD9XXX_I2C_DIGITAL_1}, {"tabla digital2", WCD9XXX_I2C_DIGITAL_2}, {} }; MODULE_DEVICE_TABLE(i2c, tabla_id_table); static struct i2c_driver tabla_i2c_driver = { .driver = { .owner = THIS_MODULE, .name = "tabla-i2c-core", }, .id_table = tabla_id_table, .probe = wcd9xxx_i2c_probe, .remove = __devexit_p(wcd9xxx_i2c_remove), .resume = wcd9xxx_i2c_resume, .suspend = wcd9xxx_i2c_suspend, }; static struct i2c_driver wcd9xxx_i2c_driver = { .driver = { .owner = THIS_MODULE, .name = "wcd9xxx-i2c-core", }, .id_table = wcd9xxx_id_table, .probe = wcd9xxx_i2c_probe, .remove = __devexit_p(wcd9xxx_i2c_remove), .resume = wcd9xxx_i2c_resume, .suspend = wcd9xxx_i2c_suspend, }; static int __init wcd9xxx_init(void) { int ret[NUM_WCD9XXX_REG_RET]; int i = 0; wcd9xxx_set_intf_type(WCD9XXX_INTERFACE_TYPE_PROBING); ret[0] = slim_driver_register(&tabla_slim_driver); if (ret[0]) pr_err("Failed to register tabla SB driver: %d\n", ret[0]); ret[1] = slim_driver_register(&tabla2x_slim_driver); if (ret[1]) pr_err("Failed to register tabla2x SB driver: %d\n", ret[1]); ret[2] = i2c_add_driver(&tabla_i2c_driver); if (ret[2]) pr_err("failed to add the tabla2x I2C driver: %d\n", ret[2]); ret[3] = slim_driver_register(&sitar_slim_driver); if (ret[3]) pr_err("Failed to register sitar SB driver: %d\n", ret[3]); ret[4] = slim_driver_register(&sitar1p1_slim_driver); if (ret[4]) pr_err("Failed to register sitar SB driver: %d\n", ret[4]); ret[5] = slim_driver_register(&taiko_slim_driver); if (ret[5]) pr_err("Failed to register taiko SB driver: %d\n", ret[5]); ret[6] = i2c_add_driver(&wcd9xxx_i2c_driver); if (ret[6]) pr_err("failed to add the wcd9xxx I2C driver: %d\n", ret[6]); ret[7] = slim_driver_register(&tapan_slim_driver); if (ret[7]) pr_err("Failed to register tapan SB driver: %d\n", ret[7]); for (i = 0; i < NUM_WCD9XXX_REG_RET; i++) { if (ret[i]) return ret[i]; } return 0; } module_init(wcd9xxx_init); static void __exit wcd9xxx_exit(void) { wcd9xxx_set_intf_type(WCD9XXX_INTERFACE_TYPE_PROBING); } module_exit(wcd9xxx_exit); MODULE_DESCRIPTION("Codec core driver"); MODULE_VERSION("1.0"); MODULE_LICENSE("GPL v2");
gpl-2.0
abwaheed/collectd
src/dns.c
12
10700
/** * collectd - src/dns.c * Copyright (C) 2006-2011 Florian octo Forster * Copyright (C) 2009 Mirko Buffoni * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; only version 2 of the License is applicable. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * Authors: * Florian octo Forster <octo at collectd.org> * Mirko Buffoni <briareos at eswat.org> **/ #define _DEFAULT_SOURCE #define _BSD_SOURCE #include "collectd.h" #include "common.h" #include "plugin.h" #include "configfile.h" #include "utils_dns.h" #include <pthread.h> #include <poll.h> #include <pcap.h> /* * Private data types */ struct counter_list_s { unsigned int key; unsigned int value; struct counter_list_s *next; }; typedef struct counter_list_s counter_list_t; /* * Private variables */ static const char *config_keys[] = { "Interface", "IgnoreSource", "SelectNumericQueryTypes" }; static int config_keys_num = STATIC_ARRAY_SIZE (config_keys); static int select_numeric_qtype = 1; #define PCAP_SNAPLEN 1460 static char *pcap_device = NULL; static derive_t tr_queries; static derive_t tr_responses; static counter_list_t *qtype_list; static counter_list_t *opcode_list; static counter_list_t *rcode_list; static pthread_t listen_thread; static int listen_thread_init = 0; /* The `traffic' mutex if for `tr_queries' and `tr_responses' */ static pthread_mutex_t traffic_mutex = PTHREAD_MUTEX_INITIALIZER; static pthread_mutex_t qtype_mutex = PTHREAD_MUTEX_INITIALIZER; static pthread_mutex_t opcode_mutex = PTHREAD_MUTEX_INITIALIZER; static pthread_mutex_t rcode_mutex = PTHREAD_MUTEX_INITIALIZER; /* * Private functions */ static counter_list_t *counter_list_search (counter_list_t **list, unsigned int key) { counter_list_t *entry; for (entry = *list; entry != NULL; entry = entry->next) if (entry->key == key) break; return (entry); } static counter_list_t *counter_list_create (counter_list_t **list, unsigned int key, unsigned int value) { counter_list_t *entry; entry = (counter_list_t *) malloc (sizeof (counter_list_t)); if (entry == NULL) return (NULL); memset (entry, 0, sizeof (counter_list_t)); entry->key = key; entry->value = value; if (*list == NULL) { *list = entry; } else { counter_list_t *last; last = *list; while (last->next != NULL) last = last->next; last->next = entry; } return (entry); } static void counter_list_add (counter_list_t **list, unsigned int key, unsigned int increment) { counter_list_t *entry; entry = counter_list_search (list, key); if (entry != NULL) { entry->value += increment; } else { counter_list_create (list, key, increment); } } static int dns_config (const char *key, const char *value) { if (strcasecmp (key, "Interface") == 0) { if (pcap_device != NULL) free (pcap_device); if ((pcap_device = strdup (value)) == NULL) return (1); } else if (strcasecmp (key, "IgnoreSource") == 0) { if (value != NULL) ignore_list_add_name (value); } else if (strcasecmp (key, "SelectNumericQueryTypes") == 0) { if ((value != NULL) && IS_FALSE (value)) select_numeric_qtype = 0; else select_numeric_qtype = 1; } else { return (-1); } return (0); } static void dns_child_callback (const rfc1035_header_t *dns) { if (dns->qr == 0) { /* This is a query */ int skip = 0; if (!select_numeric_qtype) { const char *str = qtype_str(dns->qtype); if ((str == NULL) || (str[0] == '#')) skip = 1; } pthread_mutex_lock (&traffic_mutex); tr_queries += dns->length; pthread_mutex_unlock (&traffic_mutex); if (skip == 0) { pthread_mutex_lock (&qtype_mutex); counter_list_add (&qtype_list, dns->qtype, 1); pthread_mutex_unlock (&qtype_mutex); } } else { /* This is a reply */ pthread_mutex_lock (&traffic_mutex); tr_responses += dns->length; pthread_mutex_unlock (&traffic_mutex); pthread_mutex_lock (&rcode_mutex); counter_list_add (&rcode_list, dns->rcode, 1); pthread_mutex_unlock (&rcode_mutex); } /* FIXME: Are queries, replies or both interesting? */ pthread_mutex_lock (&opcode_mutex); counter_list_add (&opcode_list, dns->opcode, 1); pthread_mutex_unlock (&opcode_mutex); } static int dns_run_pcap_loop (void) { pcap_t *pcap_obj; char pcap_error[PCAP_ERRBUF_SIZE]; struct bpf_program fp; int status; /* Don't block any signals */ { sigset_t sigmask; sigemptyset (&sigmask); pthread_sigmask (SIG_SETMASK, &sigmask, NULL); } /* Passing `pcap_device == NULL' is okay and the same as passign "any" */ DEBUG ("dns plugin: Creating PCAP object.."); pcap_obj = pcap_open_live ((pcap_device != NULL) ? pcap_device : "any", PCAP_SNAPLEN, 0 /* Not promiscuous */, (int) CDTIME_T_TO_MS (plugin_get_interval () / 2), pcap_error); if (pcap_obj == NULL) { ERROR ("dns plugin: Opening interface `%s' " "failed: %s", (pcap_device != NULL) ? pcap_device : "any", pcap_error); return (PCAP_ERROR); } memset (&fp, 0, sizeof (fp)); status = pcap_compile (pcap_obj, &fp, "udp port 53", 1, 0); if (status < 0) { ERROR ("dns plugin: pcap_compile failed: %s", pcap_statustostr (status)); return (status); } status = pcap_setfilter (pcap_obj, &fp); if (status < 0) { ERROR ("dns plugin: pcap_setfilter failed: %s", pcap_statustostr (status)); return (status); } DEBUG ("dns plugin: PCAP object created."); dnstop_set_pcap_obj (pcap_obj); dnstop_set_callback (dns_child_callback); status = pcap_loop (pcap_obj, -1 /* loop forever */, handle_pcap /* callback */, NULL /* user data */); INFO ("dns plugin: pcap_loop exited with status %i.", status); /* We need to handle "PCAP_ERROR" specially because libpcap currently * doesn't return PCAP_ERROR_IFACE_NOT_UP for compatibility reasons. */ if (status == PCAP_ERROR) status = PCAP_ERROR_IFACE_NOT_UP; pcap_close (pcap_obj); return (status); } /* int dns_run_pcap_loop */ static int dns_sleep_one_interval (void) /* {{{ */ { cdtime_t interval; struct timespec ts = { 0, 0 }; int status = 0; interval = plugin_get_interval (); CDTIME_T_TO_TIMESPEC (interval, &ts); while (42) { struct timespec rem = { 0, 0 }; status = nanosleep (&ts, &rem); if (status == 0) break; else if ((errno == EINTR) || (errno == EAGAIN)) { ts = rem; continue; } else break; } return (status); } /* }}} int dns_sleep_one_interval */ static void *dns_child_loop (__attribute__((unused)) void *dummy) /* {{{ */ { int status; while (42) { status = dns_run_pcap_loop (); if (status != PCAP_ERROR_IFACE_NOT_UP) break; dns_sleep_one_interval (); } if (status != PCAP_ERROR_BREAK) ERROR ("dns plugin: PCAP returned error %s.", pcap_statustostr (status)); listen_thread_init = 0; return (NULL); } /* }}} void *dns_child_loop */ static int dns_init (void) { /* clean up an old thread */ int status; pthread_mutex_lock (&traffic_mutex); tr_queries = 0; tr_responses = 0; pthread_mutex_unlock (&traffic_mutex); if (listen_thread_init != 0) return (-1); status = plugin_thread_create (&listen_thread, NULL, dns_child_loop, (void *) 0); if (status != 0) { char errbuf[1024]; ERROR ("dns plugin: pthread_create failed: %s", sstrerror (errno, errbuf, sizeof (errbuf))); return (-1); } listen_thread_init = 1; return (0); } /* int dns_init */ static void submit_derive (const char *type, const char *type_instance, derive_t value) { value_t values[1]; value_list_t vl = VALUE_LIST_INIT; values[0].derive = value; vl.values = values; vl.values_len = 1; sstrncpy (vl.host, hostname_g, sizeof (vl.host)); sstrncpy (vl.plugin, "dns", sizeof (vl.plugin)); sstrncpy (vl.type, type, sizeof (vl.type)); sstrncpy (vl.type_instance, type_instance, sizeof (vl.type_instance)); plugin_dispatch_values (&vl); } /* void submit_derive */ static void submit_octets (derive_t queries, derive_t responses) { value_t values[2]; value_list_t vl = VALUE_LIST_INIT; values[0].derive = queries; values[1].derive = responses; vl.values = values; vl.values_len = 2; sstrncpy (vl.host, hostname_g, sizeof (vl.host)); sstrncpy (vl.plugin, "dns", sizeof (vl.plugin)); sstrncpy (vl.type, "dns_octets", sizeof (vl.type)); plugin_dispatch_values (&vl); } /* void submit_octets */ static int dns_read (void) { unsigned int keys[T_MAX]; unsigned int values[T_MAX]; int len; int i; counter_list_t *ptr; pthread_mutex_lock (&traffic_mutex); values[0] = tr_queries; values[1] = tr_responses; pthread_mutex_unlock (&traffic_mutex); if ((values[0] != 0) || (values[1] != 0)) submit_octets (values[0], values[1]); pthread_mutex_lock (&qtype_mutex); for (ptr = qtype_list, len = 0; (ptr != NULL) && (len < T_MAX); ptr = ptr->next, len++) { keys[len] = ptr->key; values[len] = ptr->value; } pthread_mutex_unlock (&qtype_mutex); for (i = 0; i < len; i++) { DEBUG ("dns plugin: qtype = %u; counter = %u;", keys[i], values[i]); submit_derive ("dns_qtype", qtype_str (keys[i]), values[i]); } pthread_mutex_lock (&opcode_mutex); for (ptr = opcode_list, len = 0; (ptr != NULL) && (len < T_MAX); ptr = ptr->next, len++) { keys[len] = ptr->key; values[len] = ptr->value; } pthread_mutex_unlock (&opcode_mutex); for (i = 0; i < len; i++) { DEBUG ("dns plugin: opcode = %u; counter = %u;", keys[i], values[i]); submit_derive ("dns_opcode", opcode_str (keys[i]), values[i]); } pthread_mutex_lock (&rcode_mutex); for (ptr = rcode_list, len = 0; (ptr != NULL) && (len < T_MAX); ptr = ptr->next, len++) { keys[len] = ptr->key; values[len] = ptr->value; } pthread_mutex_unlock (&rcode_mutex); for (i = 0; i < len; i++) { DEBUG ("dns plugin: rcode = %u; counter = %u;", keys[i], values[i]); submit_derive ("dns_rcode", rcode_str (keys[i]), values[i]); } return (0); } /* int dns_read */ void module_register (void) { plugin_register_config ("dns", dns_config, config_keys, config_keys_num); plugin_register_init ("dns", dns_init); plugin_register_read ("dns", dns_read); } /* void module_register */
gpl-2.0
alexfeinman/nv_tegra.19.2
drivers/media/common/saa7146_core.c
12
14119
/* saa7146.o - driver for generic saa7146-based hardware Copyright (C) 1998-2003 Michael Hunold <michael@mihu.de> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <media/saa7146.h> LIST_HEAD(saa7146_devices); DECLARE_MUTEX(saa7146_devices_lock); static int saa7146_num = 0; unsigned int saa7146_debug = 0; module_param(saa7146_debug, int, 0644); MODULE_PARM_DESC(saa7146_debug, "debug level (default: 0)"); #if 0 static void dump_registers(struct saa7146_dev* dev) { int i = 0; INFO((" @ %li jiffies:\n",jiffies)); for(i = 0; i <= 0x148; i+=4) { printk("0x%03x: 0x%08x\n",i,saa7146_read(dev,i)); } } #endif /**************************************************************************** * gpio and debi helper functions ****************************************************************************/ void saa7146_setgpio(struct saa7146_dev *dev, int port, u32 data) { u32 value = 0; BUG_ON(port > 3); value = saa7146_read(dev, GPIO_CTRL); value &= ~(0xff << (8*port)); value |= (data << (8*port)); saa7146_write(dev, GPIO_CTRL, value); } /* This DEBI code is based on the saa7146 Stradis driver by Nathan Laredo */ int saa7146_wait_for_debi_done(struct saa7146_dev *dev, int nobusyloop) { unsigned long start; /* wait for registers to be programmed */ start = jiffies; while (1) { if (saa7146_read(dev, MC2) & 2) break; if (time_after(jiffies, start + HZ/20)) { DEB_S(("timed out while waiting for registers getting programmed\n")); return -ETIMEDOUT; } if (nobusyloop) msleep(1); } /* wait for transfer to complete */ start = jiffies; while (1) { if (!(saa7146_read(dev, PSR) & SPCI_DEBI_S)) break; saa7146_read(dev, MC2); if (time_after(jiffies, start + HZ/4)) { DEB_S(("timed out while waiting for transfer completion\n")); return -ETIMEDOUT; } if (nobusyloop) msleep(1); } return 0; } /**************************************************************************** * general helper functions ****************************************************************************/ /* this is videobuf_vmalloc_to_sg() from video-buf.c make sure virt has been allocated with vmalloc_32(), otherwise the BUG() may be triggered on highmem machines */ static struct scatterlist* vmalloc_to_sg(unsigned char *virt, int nr_pages) { struct scatterlist *sglist; struct page *pg; int i; sglist = kmalloc(sizeof(struct scatterlist)*nr_pages, GFP_KERNEL); if (NULL == sglist) return NULL; memset(sglist,0,sizeof(struct scatterlist)*nr_pages); for (i = 0; i < nr_pages; i++, virt += PAGE_SIZE) { pg = vmalloc_to_page(virt); if (NULL == pg) goto err; if (PageHighMem(pg)) BUG(); sglist[i].page = pg; sglist[i].length = PAGE_SIZE; } return sglist; err: kfree(sglist); return NULL; } /********************************************************************************/ /* common page table functions */ char *saa7146_vmalloc_build_pgtable(struct pci_dev *pci, long length, struct saa7146_pgtable *pt) { int pages = (length+PAGE_SIZE-1)/PAGE_SIZE; char *mem = vmalloc_32(length); int slen = 0; if (NULL == mem) { return NULL; } if (!(pt->slist = vmalloc_to_sg(mem, pages))) { vfree(mem); return NULL; } if (saa7146_pgtable_alloc(pci, pt)) { kfree(pt->slist); pt->slist = NULL; vfree(mem); return NULL; } slen = pci_map_sg(pci,pt->slist,pages,PCI_DMA_FROMDEVICE); if (0 != saa7146_pgtable_build_single(pci, pt, pt->slist, slen)) { return NULL; } return mem; } void saa7146_pgtable_free(struct pci_dev *pci, struct saa7146_pgtable *pt) { if (NULL == pt->cpu) return; pci_free_consistent(pci, pt->size, pt->cpu, pt->dma); pt->cpu = NULL; if (NULL != pt->slist) { kfree(pt->slist); pt->slist = NULL; } } int saa7146_pgtable_alloc(struct pci_dev *pci, struct saa7146_pgtable *pt) { u32 *cpu; dma_addr_t dma_addr; cpu = pci_alloc_consistent(pci, PAGE_SIZE, &dma_addr); if (NULL == cpu) { return -ENOMEM; } pt->size = PAGE_SIZE; pt->cpu = cpu; pt->dma = dma_addr; return 0; } int saa7146_pgtable_build_single(struct pci_dev *pci, struct saa7146_pgtable *pt, struct scatterlist *list, int sglen ) { u32 *ptr, fill; int nr_pages = 0; int i,p; BUG_ON(0 == sglen); BUG_ON(list->offset > PAGE_SIZE); /* if we have a user buffer, the first page may not be aligned to a page boundary. */ pt->offset = list->offset; ptr = pt->cpu; for (i = 0; i < sglen; i++, list++) { /* printk("i:%d, adr:0x%08x, len:%d, offset:%d\n", i,sg_dma_address(list), sg_dma_len(list), list->offset); */ for (p = 0; p * 4096 < list->length; p++, ptr++) { *ptr = cpu_to_le32(sg_dma_address(list) + p * 4096); nr_pages++; } } /* safety; fill the page table up with the last valid page */ fill = *(ptr-1); for(i=nr_pages;i<1024;i++) { *ptr++ = fill; } /* ptr = pt->cpu; printk("offset: %d\n",pt->offset); for(i=0;i<5;i++) { printk("ptr1 %d: 0x%08x\n",i,ptr[i]); } */ return 0; } /********************************************************************************/ /* interrupt handler */ static irqreturn_t interrupt_hw(int irq, void *dev_id, struct pt_regs *regs) { struct saa7146_dev *dev = dev_id; u32 isr = 0; /* read out the interrupt status register */ isr = saa7146_read(dev, ISR); /* is this our interrupt? */ if ( 0 == isr ) { /* nope, some other device */ return IRQ_NONE; } saa7146_write(dev, ISR, isr); if( 0 != (dev->ext)) { if( 0 != (dev->ext->irq_mask & isr )) { if( 0 != dev->ext->irq_func ) { dev->ext->irq_func(dev, &isr); } isr &= ~dev->ext->irq_mask; } } if (0 != (isr & (MASK_27))) { DEB_INT(("irq: RPS0 (0x%08x).\n",isr)); if( 0 != dev->vv_data && 0 != dev->vv_callback) { dev->vv_callback(dev,isr); } isr &= ~MASK_27; } if (0 != (isr & (MASK_28))) { if( 0 != dev->vv_data && 0 != dev->vv_callback) { dev->vv_callback(dev,isr); } isr &= ~MASK_28; } if (0 != (isr & (MASK_16|MASK_17))) { u32 status = saa7146_read(dev, I2C_STATUS); if( (0x3 == (status & 0x3)) || (0 == (status & 0x1)) ) { SAA7146_IER_DISABLE(dev, MASK_16|MASK_17); /* only wake up if we expect something */ if( 0 != dev->i2c_op ) { u32 psr = (saa7146_read(dev, PSR) >> 16) & 0x2; u32 ssr = (saa7146_read(dev, SSR) >> 17) & 0x1f; DEB_I2C(("irq: i2c, status: 0x%08x, psr:0x%02x, ssr:0x%02x).\n",status,psr,ssr)); dev->i2c_op = 0; wake_up(&dev->i2c_wq); } else { DEB_I2C(("unexpected irq: i2c, status: 0x%08x, isr %#x\n",status, isr)); } } else { DEB_I2C(("unhandled irq: i2c, status: 0x%08x, isr %#x\n",status, isr)); } isr &= ~(MASK_16|MASK_17); } if( 0 != isr ) { ERR(("warning: interrupt enabled, but not handled properly.(0x%08x)\n",isr)); ERR(("disabling interrupt source(s)!\n")); SAA7146_IER_DISABLE(dev,isr); } return IRQ_HANDLED; } /*********************************************************************************/ /* configuration-functions */ static int saa7146_init_one(struct pci_dev *pci, const struct pci_device_id *ent) { struct saa7146_pci_extension_data *pci_ext = (struct saa7146_pci_extension_data *)ent->driver_data; struct saa7146_extension *ext = pci_ext->ext; struct saa7146_dev *dev; int err = -ENOMEM; dev = kmalloc(sizeof(struct saa7146_dev), GFP_KERNEL); if (!dev) { ERR(("out of memory.\n")); goto out; } /* clear out mem for sure */ memset(dev, 0x0, sizeof(struct saa7146_dev)); DEB_EE(("pci:%p\n",pci)); err = pci_enable_device(pci); if (err < 0) { ERR(("pci_enable_device() failed.\n")); goto err_free; } /* enable bus-mastering */ pci_set_master(pci); dev->pci = pci; /* get chip-revision; this is needed to enable bug-fixes */ err = pci_read_config_dword(pci, PCI_CLASS_REVISION, &dev->revision); if (err < 0) { ERR(("pci_read_config_dword() failed.\n")); goto err_disable; } dev->revision &= 0xf; /* remap the memory from virtual to physical adress */ err = pci_request_region(pci, 0, "saa7146"); if (err < 0) goto err_disable; dev->mem = ioremap(pci_resource_start(pci, 0), pci_resource_len(pci, 0)); if (!dev->mem) { ERR(("ioremap() failed.\n")); err = -ENODEV; goto err_release; } /* we don't do a master reset here anymore, it screws up some boards that don't have an i2c-eeprom for configuration values */ /* saa7146_write(dev, MC1, MASK_31); */ /* disable all irqs */ saa7146_write(dev, IER, 0); /* shut down all dma transfers and rps tasks */ saa7146_write(dev, MC1, 0x30ff0000); /* clear out any rps-signals pending */ saa7146_write(dev, MC2, 0xf8000000); /* request an interrupt for the saa7146 */ err = request_irq(pci->irq, interrupt_hw, SA_SHIRQ | SA_INTERRUPT, dev->name, dev); if (err < 0) { ERR(("request_irq() failed.\n")); goto err_unmap; } err = -ENOMEM; /* get memory for various stuff */ dev->d_rps0.cpu_addr = pci_alloc_consistent(pci, SAA7146_RPS_MEM, &dev->d_rps0.dma_handle); if (!dev->d_rps0.cpu_addr) goto err_free_irq; memset(dev->d_rps0.cpu_addr, 0x0, SAA7146_RPS_MEM); dev->d_rps1.cpu_addr = pci_alloc_consistent(pci, SAA7146_RPS_MEM, &dev->d_rps1.dma_handle); if (!dev->d_rps1.cpu_addr) goto err_free_rps0; memset(dev->d_rps1.cpu_addr, 0x0, SAA7146_RPS_MEM); dev->d_i2c.cpu_addr = pci_alloc_consistent(pci, SAA7146_RPS_MEM, &dev->d_i2c.dma_handle); if (!dev->d_i2c.cpu_addr) goto err_free_rps1; memset(dev->d_i2c.cpu_addr, 0x0, SAA7146_RPS_MEM); /* the rest + print status message */ /* create a nice device name */ sprintf(dev->name, "saa7146 (%d)", saa7146_num); INFO(("found saa7146 @ mem %p (revision %d, irq %d) (0x%04x,0x%04x).\n", dev->mem, dev->revision, pci->irq, pci->subsystem_vendor, pci->subsystem_device)); dev->ext = ext; pci_set_drvdata(pci, dev); init_MUTEX(&dev->lock); spin_lock_init(&dev->int_slock); spin_lock_init(&dev->slock); init_MUTEX(&dev->i2c_lock); dev->module = THIS_MODULE; init_waitqueue_head(&dev->i2c_wq); /* set some sane pci arbitrition values */ saa7146_write(dev, PCI_BT_V1, 0x1c00101f); /* TODO: use the status code of the callback */ err = -ENODEV; if (ext->probe && ext->probe(dev)) { DEB_D(("ext->probe() failed for %p. skipping device.\n",dev)); goto err_free_i2c; } if (ext->attach(dev, pci_ext)) { DEB_D(("ext->attach() failed for %p. skipping device.\n",dev)); goto err_unprobe; } INIT_LIST_HEAD(&dev->item); list_add_tail(&dev->item,&saa7146_devices); saa7146_num++; err = 0; out: return err; err_unprobe: pci_set_drvdata(pci, NULL); err_free_i2c: pci_free_consistent(pci, SAA7146_RPS_MEM, dev->d_i2c.cpu_addr, dev->d_i2c.dma_handle); err_free_rps1: pci_free_consistent(pci, SAA7146_RPS_MEM, dev->d_rps1.cpu_addr, dev->d_rps1.dma_handle); err_free_rps0: pci_free_consistent(pci, SAA7146_RPS_MEM, dev->d_rps0.cpu_addr, dev->d_rps0.dma_handle); err_free_irq: free_irq(pci->irq, (void *)dev); err_unmap: iounmap(dev->mem); err_release: pci_release_region(pci, 0); err_disable: pci_disable_device(pci); err_free: kfree(dev); goto out; } static void saa7146_remove_one(struct pci_dev *pdev) { struct saa7146_dev* dev = pci_get_drvdata(pdev); struct { void *addr; dma_addr_t dma; } dev_map[] = { { dev->d_i2c.cpu_addr, dev->d_i2c.dma_handle }, { dev->d_rps1.cpu_addr, dev->d_rps1.dma_handle }, { dev->d_rps0.cpu_addr, dev->d_rps0.dma_handle }, { NULL, 0 } }, *p; DEB_EE(("dev:%p\n",dev)); dev->ext->detach(dev); /* shut down all video dma transfers */ saa7146_write(dev, MC1, 0x00ff0000); /* disable all irqs, release irq-routine */ saa7146_write(dev, IER, 0); free_irq(pdev->irq, dev); for (p = dev_map; p->addr; p++) pci_free_consistent(pdev, SAA7146_RPS_MEM, p->addr, p->dma); iounmap(dev->mem); pci_release_region(pdev, 0); list_del(&dev->item); pci_disable_device(pdev); kfree(dev); saa7146_num--; } /*********************************************************************************/ /* extension handling functions */ int saa7146_register_extension(struct saa7146_extension* ext) { DEB_EE(("ext:%p\n",ext)); ext->driver.name = ext->name; ext->driver.id_table = ext->pci_tbl; ext->driver.probe = saa7146_init_one; ext->driver.remove = saa7146_remove_one; printk("saa7146: register extension '%s'.\n",ext->name); return pci_module_init(&ext->driver); } int saa7146_unregister_extension(struct saa7146_extension* ext) { DEB_EE(("ext:%p\n",ext)); printk("saa7146: unregister extension '%s'.\n",ext->name); pci_unregister_driver(&ext->driver); return 0; } EXPORT_SYMBOL_GPL(saa7146_register_extension); EXPORT_SYMBOL_GPL(saa7146_unregister_extension); /* misc functions used by extension modules */ EXPORT_SYMBOL_GPL(saa7146_pgtable_alloc); EXPORT_SYMBOL_GPL(saa7146_pgtable_free); EXPORT_SYMBOL_GPL(saa7146_pgtable_build_single); EXPORT_SYMBOL_GPL(saa7146_vmalloc_build_pgtable); EXPORT_SYMBOL_GPL(saa7146_wait_for_debi_done); EXPORT_SYMBOL_GPL(saa7146_setgpio); EXPORT_SYMBOL_GPL(saa7146_i2c_transfer); EXPORT_SYMBOL_GPL(saa7146_i2c_adapter_prepare); EXPORT_SYMBOL_GPL(saa7146_debug); EXPORT_SYMBOL_GPL(saa7146_devices); EXPORT_SYMBOL_GPL(saa7146_devices_lock); MODULE_AUTHOR("Michael Hunold <michael@mihu.de>"); MODULE_DESCRIPTION("driver for generic saa7146-based hardware"); MODULE_LICENSE("GPL");
gpl-2.0
charles1018/The-f2fs-filesystem
kernel/fork.c
12
51940
/* * linux/kernel/fork.c * * Copyright (C) 1991, 1992 Linus Torvalds */ /* * 'fork.c' contains the help-routines for the 'fork' system call * (see also entry.S and others). * Fork is rather simple, once you get the hang of it, but the memory * management can be a bitch. See 'mm/memory.c': 'copy_page_range()' */ #include <linux/slab.h> #include <linux/init.h> #include <linux/unistd.h> #include <linux/module.h> #include <linux/vmalloc.h> #include <linux/completion.h> #include <linux/personality.h> #include <linux/mempolicy.h> #include <linux/sem.h> #include <linux/file.h> #include <linux/fdtable.h> #include <linux/iocontext.h> #include <linux/key.h> #include <linux/binfmts.h> #include <linux/mman.h> #include <linux/mmu_notifier.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/vmacache.h> #include <linux/nsproxy.h> #include <linux/capability.h> #include <linux/cpu.h> #include <linux/cgroup.h> #include <linux/security.h> #include <linux/hugetlb.h> #include <linux/seccomp.h> #include <linux/swap.h> #include <linux/syscalls.h> #include <linux/jiffies.h> #include <linux/futex.h> #include <linux/compat.h> #include <linux/kthread.h> #include <linux/task_io_accounting_ops.h> #include <linux/rcupdate.h> #include <linux/ptrace.h> #include <linux/mount.h> #include <linux/audit.h> #include <linux/memcontrol.h> #include <linux/ftrace.h> #include <linux/proc_fs.h> #include <linux/profile.h> #include <linux/rmap.h> #include <linux/ksm.h> #include <linux/acct.h> #include <linux/tsacct_kern.h> #include <linux/cn_proc.h> #include <linux/freezer.h> #include <linux/delayacct.h> #include <linux/taskstats_kern.h> #include <linux/random.h> #include <linux/tty.h> #include <linux/blkdev.h> #include <linux/fs_struct.h> #include <linux/magic.h> #include <linux/perf_event.h> #include <linux/posix-timers.h> #include <linux/user-return-notifier.h> #include <linux/oom.h> #include <linux/khugepaged.h> #include <linux/signalfd.h> #include <linux/uprobes.h> #include <linux/aio.h> #include <linux/compiler.h> #include <linux/sysctl.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> #include <asm/uaccess.h> #include <asm/mmu_context.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> #include <trace/events/sched.h> #define CREATE_TRACE_POINTS #include <trace/events/task.h> /* * Minimum number of threads to boot the kernel */ #define MIN_THREADS 20 /* * Maximum number of threads */ #define MAX_THREADS FUTEX_TID_MASK /* * Protected counters by write_lock_irq(&tasklist_lock) */ unsigned long total_forks; /* Handle normal Linux uptimes. */ int nr_threads; /* The idle threads do not count.. */ int max_threads; /* tunable limit on nr_threads */ DEFINE_PER_CPU(unsigned long, process_counts) = 0; __cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ #ifdef CONFIG_PROVE_RCU int lockdep_tasklist_lock_is_held(void) { return lockdep_is_held(&tasklist_lock); } EXPORT_SYMBOL_GPL(lockdep_tasklist_lock_is_held); #endif /* #ifdef CONFIG_PROVE_RCU */ int nr_processes(void) { int cpu; int total = 0; for_each_possible_cpu(cpu) total += per_cpu(process_counts, cpu); return total; } void __weak arch_release_task_struct(struct task_struct *tsk) { } #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR static struct kmem_cache *task_struct_cachep; static inline struct task_struct *alloc_task_struct_node(int node) { return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node); } static inline void free_task_struct(struct task_struct *tsk) { kmem_cache_free(task_struct_cachep, tsk); } #endif void __weak arch_release_thread_info(struct thread_info *ti) { } #ifndef CONFIG_ARCH_THREAD_INFO_ALLOCATOR /* * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a * kmemcache based allocator. */ # if THREAD_SIZE >= PAGE_SIZE static struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node) { struct page *page = alloc_kmem_pages_node(node, THREADINFO_GFP, THREAD_SIZE_ORDER); return page ? page_address(page) : NULL; } static inline void free_thread_info(struct thread_info *ti) { free_kmem_pages((unsigned long)ti, THREAD_SIZE_ORDER); } # else static struct kmem_cache *thread_info_cache; static struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node) { return kmem_cache_alloc_node(thread_info_cache, THREADINFO_GFP, node); } static void free_thread_info(struct thread_info *ti) { kmem_cache_free(thread_info_cache, ti); } void thread_info_cache_init(void) { thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE, THREAD_SIZE, 0, NULL); BUG_ON(thread_info_cache == NULL); } # endif #endif /* SLAB cache for signal_struct structures (tsk->signal) */ static struct kmem_cache *signal_cachep; /* SLAB cache for sighand_struct structures (tsk->sighand) */ struct kmem_cache *sighand_cachep; /* SLAB cache for files_struct structures (tsk->files) */ struct kmem_cache *files_cachep; /* SLAB cache for fs_struct structures (tsk->fs) */ struct kmem_cache *fs_cachep; /* SLAB cache for vm_area_struct structures */ struct kmem_cache *vm_area_cachep; /* SLAB cache for mm_struct structures (tsk->mm) */ static struct kmem_cache *mm_cachep; static void account_kernel_stack(struct thread_info *ti, int account) { struct zone *zone = page_zone(virt_to_page(ti)); mod_zone_page_state(zone, NR_KERNEL_STACK, account); } void free_task(struct task_struct *tsk) { account_kernel_stack(tsk->stack, -1); arch_release_thread_info(tsk->stack); free_thread_info(tsk->stack); rt_mutex_debug_task_free(tsk); ftrace_graph_exit_task(tsk); put_seccomp_filter(tsk); arch_release_task_struct(tsk); free_task_struct(tsk); } EXPORT_SYMBOL(free_task); static inline void free_signal_struct(struct signal_struct *sig) { taskstats_tgid_free(sig); sched_autogroup_exit(sig); kmem_cache_free(signal_cachep, sig); } static inline void put_signal_struct(struct signal_struct *sig) { if (atomic_dec_and_test(&sig->sigcnt)) free_signal_struct(sig); } void __put_task_struct(struct task_struct *tsk) { WARN_ON(!tsk->exit_state); WARN_ON(atomic_read(&tsk->usage)); WARN_ON(tsk == current); task_numa_free(tsk); security_task_free(tsk); exit_creds(tsk); delayacct_tsk_free(tsk); put_signal_struct(tsk->signal); if (!profile_handoff_task(tsk)) free_task(tsk); } EXPORT_SYMBOL_GPL(__put_task_struct); void __init __weak arch_task_cache_init(void) { } /* * set_max_threads */ static void set_max_threads(unsigned int max_threads_suggested) { u64 threads; /* * The number of threads shall be limited such that the thread * structures may only consume a small part of the available memory. */ if (fls64(totalram_pages) + fls64(PAGE_SIZE) > 64) threads = MAX_THREADS; else threads = div64_u64((u64) totalram_pages * (u64) PAGE_SIZE, (u64) THREAD_SIZE * 8UL); if (threads > max_threads_suggested) threads = max_threads_suggested; max_threads = clamp_t(u64, threads, MIN_THREADS, MAX_THREADS); } #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT /* Initialized by the architecture: */ int arch_task_struct_size __read_mostly; #endif void __init fork_init(void) { #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR #ifndef ARCH_MIN_TASKALIGN #define ARCH_MIN_TASKALIGN L1_CACHE_BYTES #endif /* create a slab on which task_structs can be allocated */ task_struct_cachep = kmem_cache_create("task_struct", arch_task_struct_size, ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL); #endif /* do the arch specific task caches init */ arch_task_cache_init(); set_max_threads(MAX_THREADS); init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2; init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2; init_task.signal->rlim[RLIMIT_SIGPENDING] = init_task.signal->rlim[RLIMIT_NPROC]; } int __weak arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) { *dst = *src; return 0; } void set_task_stack_end_magic(struct task_struct *tsk) { unsigned long *stackend; stackend = end_of_stack(tsk); *stackend = STACK_END_MAGIC; /* for overflow detection */ } static struct task_struct *dup_task_struct(struct task_struct *orig) { struct task_struct *tsk; struct thread_info *ti; int node = tsk_fork_get_node(orig); int err; tsk = alloc_task_struct_node(node); if (!tsk) return NULL; ti = alloc_thread_info_node(tsk, node); if (!ti) goto free_tsk; err = arch_dup_task_struct(tsk, orig); if (err) goto free_ti; tsk->stack = ti; #ifdef CONFIG_SECCOMP /* * We must handle setting up seccomp filters once we're under * the sighand lock in case orig has changed between now and * then. Until then, filter must be NULL to avoid messing up * the usage counts on the error path calling free_task. */ tsk->seccomp.filter = NULL; #endif setup_thread_stack(tsk, orig); clear_user_return_notifier(tsk); clear_tsk_need_resched(tsk); set_task_stack_end_magic(tsk); #ifdef CONFIG_CC_STACKPROTECTOR tsk->stack_canary = get_random_int(); #endif /* * One for us, one for whoever does the "release_task()" (usually * parent) */ atomic_set(&tsk->usage, 2); #ifdef CONFIG_BLK_DEV_IO_TRACE tsk->btrace_seq = 0; #endif tsk->splice_pipe = NULL; tsk->task_frag.page = NULL; account_kernel_stack(ti, 1); return tsk; free_ti: free_thread_info(ti); free_tsk: free_task_struct(tsk); return NULL; } #ifdef CONFIG_MMU static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) { struct vm_area_struct *mpnt, *tmp, *prev, **pprev; struct rb_node **rb_link, *rb_parent; int retval; unsigned long charge; uprobe_start_dup_mmap(); down_write(&oldmm->mmap_sem); flush_cache_dup_mm(oldmm); uprobe_dup_mmap(oldmm, mm); /* * Not linked in yet - no deadlock potential: */ down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING); /* No ordering required: file already has been exposed. */ RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm)); mm->total_vm = oldmm->total_vm; mm->shared_vm = oldmm->shared_vm; mm->exec_vm = oldmm->exec_vm; mm->stack_vm = oldmm->stack_vm; rb_link = &mm->mm_rb.rb_node; rb_parent = NULL; pprev = &mm->mmap; retval = ksm_fork(mm, oldmm); if (retval) goto out; retval = khugepaged_fork(mm, oldmm); if (retval) goto out; prev = NULL; for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) { struct file *file; if (mpnt->vm_flags & VM_DONTCOPY) { vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file, -vma_pages(mpnt)); continue; } charge = 0; if (mpnt->vm_flags & VM_ACCOUNT) { unsigned long len = vma_pages(mpnt); if (security_vm_enough_memory_mm(oldmm, len)) /* sic */ goto fail_nomem; charge = len; } tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); if (!tmp) goto fail_nomem; *tmp = *mpnt; INIT_LIST_HEAD(&tmp->anon_vma_chain); retval = vma_dup_policy(mpnt, tmp); if (retval) goto fail_nomem_policy; tmp->vm_mm = mm; if (anon_vma_fork(tmp, mpnt)) goto fail_nomem_anon_vma_fork; tmp->vm_flags &= ~VM_LOCKED; tmp->vm_next = tmp->vm_prev = NULL; file = tmp->vm_file; if (file) { struct inode *inode = file_inode(file); struct address_space *mapping = file->f_mapping; get_file(file); if (tmp->vm_flags & VM_DENYWRITE) atomic_dec(&inode->i_writecount); i_mmap_lock_write(mapping); if (tmp->vm_flags & VM_SHARED) atomic_inc(&mapping->i_mmap_writable); flush_dcache_mmap_lock(mapping); /* insert tmp into the share list, just after mpnt */ vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap); flush_dcache_mmap_unlock(mapping); i_mmap_unlock_write(mapping); } /* * Clear hugetlb-related page reserves for children. This only * affects MAP_PRIVATE mappings. Faults generated by the child * are not guaranteed to succeed, even if read-only */ if (is_vm_hugetlb_page(tmp)) reset_vma_resv_huge_pages(tmp); /* * Link in the new vma and copy the page table entries. */ *pprev = tmp; pprev = &tmp->vm_next; tmp->vm_prev = prev; prev = tmp; __vma_link_rb(mm, tmp, rb_link, rb_parent); rb_link = &tmp->vm_rb.rb_right; rb_parent = &tmp->vm_rb; mm->map_count++; retval = copy_page_range(mm, oldmm, mpnt); if (tmp->vm_ops && tmp->vm_ops->open) tmp->vm_ops->open(tmp); if (retval) goto out; } /* a new mm has just been created */ arch_dup_mmap(oldmm, mm); retval = 0; out: up_write(&mm->mmap_sem); flush_tlb_mm(oldmm); up_write(&oldmm->mmap_sem); uprobe_end_dup_mmap(); return retval; fail_nomem_anon_vma_fork: mpol_put(vma_policy(tmp)); fail_nomem_policy: kmem_cache_free(vm_area_cachep, tmp); fail_nomem: retval = -ENOMEM; vm_unacct_memory(charge); goto out; } static inline int mm_alloc_pgd(struct mm_struct *mm) { mm->pgd = pgd_alloc(mm); if (unlikely(!mm->pgd)) return -ENOMEM; return 0; } static inline void mm_free_pgd(struct mm_struct *mm) { pgd_free(mm, mm->pgd); } #else static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) { down_write(&oldmm->mmap_sem); RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm)); up_write(&oldmm->mmap_sem); return 0; } #define mm_alloc_pgd(mm) (0) #define mm_free_pgd(mm) #endif /* CONFIG_MMU */ __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock); #define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL)) #define free_mm(mm) (kmem_cache_free(mm_cachep, (mm))) static unsigned long default_dump_filter = MMF_DUMP_FILTER_DEFAULT; static int __init coredump_filter_setup(char *s) { default_dump_filter = (simple_strtoul(s, NULL, 0) << MMF_DUMP_FILTER_SHIFT) & MMF_DUMP_FILTER_MASK; return 1; } __setup("coredump_filter=", coredump_filter_setup); #include <linux/init_task.h> static void mm_init_aio(struct mm_struct *mm) { #ifdef CONFIG_AIO spin_lock_init(&mm->ioctx_lock); mm->ioctx_table = NULL; #endif } static void mm_init_owner(struct mm_struct *mm, struct task_struct *p) { #ifdef CONFIG_MEMCG mm->owner = p; #endif } static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p) { mm->mmap = NULL; mm->mm_rb = RB_ROOT; mm->vmacache_seqnum = 0; atomic_set(&mm->mm_users, 1); atomic_set(&mm->mm_count, 1); init_rwsem(&mm->mmap_sem); INIT_LIST_HEAD(&mm->mmlist); mm->core_state = NULL; atomic_long_set(&mm->nr_ptes, 0); mm_nr_pmds_init(mm); mm->map_count = 0; mm->locked_vm = 0; mm->pinned_vm = 0; memset(&mm->rss_stat, 0, sizeof(mm->rss_stat)); spin_lock_init(&mm->page_table_lock); mm_init_cpumask(mm); mm_init_aio(mm); mm_init_owner(mm, p); mmu_notifier_mm_init(mm); clear_tlb_flush_pending(mm); #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS mm->pmd_huge_pte = NULL; #endif if (current->mm) { mm->flags = current->mm->flags & MMF_INIT_MASK; mm->def_flags = current->mm->def_flags & VM_INIT_DEF_MASK; } else { mm->flags = default_dump_filter; mm->def_flags = 0; } if (mm_alloc_pgd(mm)) goto fail_nopgd; if (init_new_context(p, mm)) goto fail_nocontext; return mm; fail_nocontext: mm_free_pgd(mm); fail_nopgd: free_mm(mm); return NULL; } static void check_mm(struct mm_struct *mm) { int i; for (i = 0; i < NR_MM_COUNTERS; i++) { long x = atomic_long_read(&mm->rss_stat.count[i]); if (unlikely(x)) printk(KERN_ALERT "BUG: Bad rss-counter state " "mm:%p idx:%d val:%ld\n", mm, i, x); } if (atomic_long_read(&mm->nr_ptes)) pr_alert("BUG: non-zero nr_ptes on freeing mm: %ld\n", atomic_long_read(&mm->nr_ptes)); if (mm_nr_pmds(mm)) pr_alert("BUG: non-zero nr_pmds on freeing mm: %ld\n", mm_nr_pmds(mm)); #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS VM_BUG_ON_MM(mm->pmd_huge_pte, mm); #endif } /* * Allocate and initialize an mm_struct. */ struct mm_struct *mm_alloc(void) { struct mm_struct *mm; mm = allocate_mm(); if (!mm) return NULL; memset(mm, 0, sizeof(*mm)); return mm_init(mm, current); } /* * Called when the last reference to the mm * is dropped: either by a lazy thread or by * mmput. Free the page directory and the mm. */ void __mmdrop(struct mm_struct *mm) { BUG_ON(mm == &init_mm); mm_free_pgd(mm); destroy_context(mm); mmu_notifier_mm_destroy(mm); check_mm(mm); free_mm(mm); } EXPORT_SYMBOL_GPL(__mmdrop); /* * Decrement the use count and release all resources for an mm. */ void mmput(struct mm_struct *mm) { might_sleep(); if (atomic_dec_and_test(&mm->mm_users)) { uprobe_clear_state(mm); exit_aio(mm); ksm_exit(mm); khugepaged_exit(mm); /* must run before exit_mmap */ exit_mmap(mm); set_mm_exe_file(mm, NULL); if (!list_empty(&mm->mmlist)) { spin_lock(&mmlist_lock); list_del(&mm->mmlist); spin_unlock(&mmlist_lock); } if (mm->binfmt) module_put(mm->binfmt->module); mmdrop(mm); } } EXPORT_SYMBOL_GPL(mmput); /** * set_mm_exe_file - change a reference to the mm's executable file * * This changes mm's executable file (shown as symlink /proc/[pid]/exe). * * Main users are mmput() and sys_execve(). Callers prevent concurrent * invocations: in mmput() nobody alive left, in execve task is single * threaded. sys_prctl(PR_SET_MM_MAP/EXE_FILE) also needs to set the * mm->exe_file, but does so without using set_mm_exe_file() in order * to do avoid the need for any locks. */ void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) { struct file *old_exe_file; /* * It is safe to dereference the exe_file without RCU as * this function is only called if nobody else can access * this mm -- see comment above for justification. */ old_exe_file = rcu_dereference_raw(mm->exe_file); if (new_exe_file) get_file(new_exe_file); rcu_assign_pointer(mm->exe_file, new_exe_file); if (old_exe_file) fput(old_exe_file); } /** * get_mm_exe_file - acquire a reference to the mm's executable file * * Returns %NULL if mm has no associated executable file. * User must release file via fput(). */ struct file *get_mm_exe_file(struct mm_struct *mm) { struct file *exe_file; rcu_read_lock(); exe_file = rcu_dereference(mm->exe_file); if (exe_file && !get_file_rcu(exe_file)) exe_file = NULL; rcu_read_unlock(); return exe_file; } EXPORT_SYMBOL(get_mm_exe_file); /** * get_task_mm - acquire a reference to the task's mm * * Returns %NULL if the task has no mm. Checks PF_KTHREAD (meaning * this kernel workthread has transiently adopted a user mm with use_mm, * to do its AIO) is not set and if so returns a reference to it, after * bumping up the use count. User must release the mm via mmput() * after use. Typically used by /proc and ptrace. */ struct mm_struct *get_task_mm(struct task_struct *task) { struct mm_struct *mm; task_lock(task); mm = task->mm; if (mm) { if (task->flags & PF_KTHREAD) mm = NULL; else atomic_inc(&mm->mm_users); } task_unlock(task); return mm; } EXPORT_SYMBOL_GPL(get_task_mm); struct mm_struct *mm_access(struct task_struct *task, unsigned int mode) { struct mm_struct *mm; int err; err = mutex_lock_killable(&task->signal->cred_guard_mutex); if (err) return ERR_PTR(err); mm = get_task_mm(task); if (mm && mm != current->mm && !ptrace_may_access(task, mode)) { mmput(mm); mm = ERR_PTR(-EACCES); } mutex_unlock(&task->signal->cred_guard_mutex); return mm; } static void complete_vfork_done(struct task_struct *tsk) { struct completion *vfork; task_lock(tsk); vfork = tsk->vfork_done; if (likely(vfork)) { tsk->vfork_done = NULL; complete(vfork); } task_unlock(tsk); } static int wait_for_vfork_done(struct task_struct *child, struct completion *vfork) { int killed; freezer_do_not_count(); killed = wait_for_completion_killable(vfork); freezer_count(); if (killed) { task_lock(child); child->vfork_done = NULL; task_unlock(child); } put_task_struct(child); return killed; } /* Please note the differences between mmput and mm_release. * mmput is called whenever we stop holding onto a mm_struct, * error success whatever. * * mm_release is called after a mm_struct has been removed * from the current process. * * This difference is important for error handling, when we * only half set up a mm_struct for a new process and need to restore * the old one. Because we mmput the new mm_struct before * restoring the old one. . . * Eric Biederman 10 January 1998 */ void mm_release(struct task_struct *tsk, struct mm_struct *mm) { /* Get rid of any futexes when releasing the mm */ #ifdef CONFIG_FUTEX if (unlikely(tsk->robust_list)) { exit_robust_list(tsk); tsk->robust_list = NULL; } #ifdef CONFIG_COMPAT if (unlikely(tsk->compat_robust_list)) { compat_exit_robust_list(tsk); tsk->compat_robust_list = NULL; } #endif if (unlikely(!list_empty(&tsk->pi_state_list))) exit_pi_state_list(tsk); #endif uprobe_free_utask(tsk); /* Get rid of any cached register state */ deactivate_mm(tsk, mm); /* * If we're exiting normally, clear a user-space tid field if * requested. We leave this alone when dying by signal, to leave * the value intact in a core dump, and to save the unnecessary * trouble, say, a killed vfork parent shouldn't touch this mm. * Userland only wants this done for a sys_exit. */ if (tsk->clear_child_tid) { if (!(tsk->flags & PF_SIGNALED) && atomic_read(&mm->mm_users) > 1) { /* * We don't check the error code - if userspace has * not set up a proper pointer then tough luck. */ put_user(0, tsk->clear_child_tid); sys_futex(tsk->clear_child_tid, FUTEX_WAKE, 1, NULL, NULL, 0); } tsk->clear_child_tid = NULL; } /* * All done, finally we can wake up parent and return this mm to him. * Also kthread_stop() uses this completion for synchronization. */ if (tsk->vfork_done) complete_vfork_done(tsk); } /* * Allocate a new mm structure and copy contents from the * mm structure of the passed in task structure. */ static struct mm_struct *dup_mm(struct task_struct *tsk) { struct mm_struct *mm, *oldmm = current->mm; int err; mm = allocate_mm(); if (!mm) goto fail_nomem; memcpy(mm, oldmm, sizeof(*mm)); if (!mm_init(mm, tsk)) goto fail_nomem; err = dup_mmap(mm, oldmm); if (err) goto free_pt; mm->hiwater_rss = get_mm_rss(mm); mm->hiwater_vm = mm->total_vm; if (mm->binfmt && !try_module_get(mm->binfmt->module)) goto free_pt; return mm; free_pt: /* don't put binfmt in mmput, we haven't got module yet */ mm->binfmt = NULL; mmput(mm); fail_nomem: return NULL; } static int copy_mm(unsigned long clone_flags, struct task_struct *tsk) { struct mm_struct *mm, *oldmm; int retval; tsk->min_flt = tsk->maj_flt = 0; tsk->nvcsw = tsk->nivcsw = 0; #ifdef CONFIG_DETECT_HUNG_TASK tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw; #endif tsk->mm = NULL; tsk->active_mm = NULL; /* * Are we cloning a kernel thread? * * We need to steal a active VM for that.. */ oldmm = current->mm; if (!oldmm) return 0; /* initialize the new vmacache entries */ vmacache_flush(tsk); if (clone_flags & CLONE_VM) { atomic_inc(&oldmm->mm_users); mm = oldmm; goto good_mm; } retval = -ENOMEM; mm = dup_mm(tsk); if (!mm) goto fail_nomem; good_mm: tsk->mm = mm; tsk->active_mm = mm; return 0; fail_nomem: return retval; } static int copy_fs(unsigned long clone_flags, struct task_struct *tsk) { struct fs_struct *fs = current->fs; if (clone_flags & CLONE_FS) { /* tsk->fs is already what we want */ spin_lock(&fs->lock); if (fs->in_exec) { spin_unlock(&fs->lock); return -EAGAIN; } fs->users++; spin_unlock(&fs->lock); return 0; } tsk->fs = copy_fs_struct(fs); if (!tsk->fs) return -ENOMEM; return 0; } static int copy_files(unsigned long clone_flags, struct task_struct *tsk) { struct files_struct *oldf, *newf; int error = 0; /* * A background process may not have any files ... */ oldf = current->files; if (!oldf) goto out; if (clone_flags & CLONE_FILES) { atomic_inc(&oldf->count); goto out; } newf = dup_fd(oldf, &error); if (!newf) goto out; tsk->files = newf; error = 0; out: return error; } static int copy_io(unsigned long clone_flags, struct task_struct *tsk) { #ifdef CONFIG_BLOCK struct io_context *ioc = current->io_context; struct io_context *new_ioc; if (!ioc) return 0; /* * Share io context with parent, if CLONE_IO is set */ if (clone_flags & CLONE_IO) { ioc_task_link(ioc); tsk->io_context = ioc; } else if (ioprio_valid(ioc->ioprio)) { new_ioc = get_task_io_context(tsk, GFP_KERNEL, NUMA_NO_NODE); if (unlikely(!new_ioc)) return -ENOMEM; new_ioc->ioprio = ioc->ioprio; put_io_context(new_ioc); } #endif return 0; } static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk) { struct sighand_struct *sig; if (clone_flags & CLONE_SIGHAND) { atomic_inc(&current->sighand->count); return 0; } sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL); rcu_assign_pointer(tsk->sighand, sig); if (!sig) return -ENOMEM; atomic_set(&sig->count, 1); memcpy(sig->action, current->sighand->action, sizeof(sig->action)); return 0; } void __cleanup_sighand(struct sighand_struct *sighand) { if (atomic_dec_and_test(&sighand->count)) { signalfd_cleanup(sighand); /* * sighand_cachep is SLAB_DESTROY_BY_RCU so we can free it * without an RCU grace period, see __lock_task_sighand(). */ kmem_cache_free(sighand_cachep, sighand); } } /* * Initialize POSIX timer handling for a thread group. */ static void posix_cpu_timers_init_group(struct signal_struct *sig) { unsigned long cpu_limit; cpu_limit = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur); if (cpu_limit != RLIM_INFINITY) { sig->cputime_expires.prof_exp = secs_to_cputime(cpu_limit); sig->cputimer.running = 1; } /* The timer lists. */ INIT_LIST_HEAD(&sig->cpu_timers[0]); INIT_LIST_HEAD(&sig->cpu_timers[1]); INIT_LIST_HEAD(&sig->cpu_timers[2]); } static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) { struct signal_struct *sig; if (clone_flags & CLONE_THREAD) return 0; sig = kmem_cache_zalloc(signal_cachep, GFP_KERNEL); tsk->signal = sig; if (!sig) return -ENOMEM; sig->nr_threads = 1; atomic_set(&sig->live, 1); atomic_set(&sig->sigcnt, 1); /* list_add(thread_node, thread_head) without INIT_LIST_HEAD() */ sig->thread_head = (struct list_head)LIST_HEAD_INIT(tsk->thread_node); tsk->thread_node = (struct list_head)LIST_HEAD_INIT(sig->thread_head); init_waitqueue_head(&sig->wait_chldexit); sig->curr_target = tsk; init_sigpending(&sig->shared_pending); INIT_LIST_HEAD(&sig->posix_timers); seqlock_init(&sig->stats_lock); prev_cputime_init(&sig->prev_cputime); hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); sig->real_timer.function = it_real_fn; task_lock(current->group_leader); memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim); task_unlock(current->group_leader); posix_cpu_timers_init_group(sig); tty_audit_fork(sig); sched_autogroup_fork(sig); sig->oom_score_adj = current->signal->oom_score_adj; sig->oom_score_adj_min = current->signal->oom_score_adj_min; sig->has_child_subreaper = current->signal->has_child_subreaper || current->signal->is_child_subreaper; mutex_init(&sig->cred_guard_mutex); return 0; } static void copy_seccomp(struct task_struct *p) { #ifdef CONFIG_SECCOMP /* * Must be called with sighand->lock held, which is common to * all threads in the group. Holding cred_guard_mutex is not * needed because this new task is not yet running and cannot * be racing exec. */ assert_spin_locked(&current->sighand->siglock); /* Ref-count the new filter user, and assign it. */ get_seccomp_filter(current); p->seccomp = current->seccomp; /* * Explicitly enable no_new_privs here in case it got set * between the task_struct being duplicated and holding the * sighand lock. The seccomp state and nnp must be in sync. */ if (task_no_new_privs(current)) task_set_no_new_privs(p); /* * If the parent gained a seccomp mode after copying thread * flags and between before we held the sighand lock, we have * to manually enable the seccomp thread flag here. */ if (p->seccomp.mode != SECCOMP_MODE_DISABLED) set_tsk_thread_flag(p, TIF_SECCOMP); #endif } SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr) { current->clear_child_tid = tidptr; return task_pid_vnr(current); } static void rt_mutex_init_task(struct task_struct *p) { raw_spin_lock_init(&p->pi_lock); #ifdef CONFIG_RT_MUTEXES p->pi_waiters = RB_ROOT; p->pi_waiters_leftmost = NULL; p->pi_blocked_on = NULL; #endif } /* * Initialize POSIX timer handling for a single task. */ static void posix_cpu_timers_init(struct task_struct *tsk) { tsk->cputime_expires.prof_exp = 0; tsk->cputime_expires.virt_exp = 0; tsk->cputime_expires.sched_exp = 0; INIT_LIST_HEAD(&tsk->cpu_timers[0]); INIT_LIST_HEAD(&tsk->cpu_timers[1]); INIT_LIST_HEAD(&tsk->cpu_timers[2]); } static inline void init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid) { task->pids[type].pid = pid; } /* * This creates a new process as a copy of the old one, * but does not actually start it yet. * * It copies the registers, and all the appropriate * parts of the process environment (as per the clone * flags). The actual kick-off is left to the caller. */ static struct task_struct *copy_process(unsigned long clone_flags, unsigned long stack_start, unsigned long stack_size, int __user *child_tidptr, struct pid *pid, int trace, unsigned long tls) { int retval; struct task_struct *p; void *cgrp_ss_priv[CGROUP_CANFORK_COUNT] = {}; if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS)) return ERR_PTR(-EINVAL); if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS)) return ERR_PTR(-EINVAL); /* * Thread groups must share signals as well, and detached threads * can only be started up within the thread group. */ if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND)) return ERR_PTR(-EINVAL); /* * Shared signal handlers imply shared VM. By way of the above, * thread groups also imply shared VM. Blocking this case allows * for various simplifications in other code. */ if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM)) return ERR_PTR(-EINVAL); /* * Siblings of global init remain as zombies on exit since they are * not reaped by their parent (swapper). To solve this and to avoid * multi-rooted process trees, prevent global and container-inits * from creating siblings. */ if ((clone_flags & CLONE_PARENT) && current->signal->flags & SIGNAL_UNKILLABLE) return ERR_PTR(-EINVAL); /* * If the new process will be in a different pid or user namespace * do not allow it to share a thread group with the forking task. */ if (clone_flags & CLONE_THREAD) { if ((clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) || (task_active_pid_ns(current) != current->nsproxy->pid_ns_for_children)) return ERR_PTR(-EINVAL); } retval = security_task_create(clone_flags); if (retval) goto fork_out; retval = -ENOMEM; p = dup_task_struct(current); if (!p) goto fork_out; ftrace_graph_init_task(p); rt_mutex_init_task(p); #ifdef CONFIG_PROVE_LOCKING DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled); DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); #endif retval = -EAGAIN; if (atomic_read(&p->real_cred->user->processes) >= task_rlimit(p, RLIMIT_NPROC)) { if (p->real_cred->user != INIT_USER && !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) goto bad_fork_free; } current->flags &= ~PF_NPROC_EXCEEDED; retval = copy_creds(p, clone_flags); if (retval < 0) goto bad_fork_free; /* * If multiple threads are within copy_process(), then this check * triggers too late. This doesn't hurt, the check is only there * to stop root fork bombs. */ retval = -EAGAIN; if (nr_threads >= max_threads) goto bad_fork_cleanup_count; delayacct_tsk_init(p); /* Must remain after dup_task_struct() */ p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER); p->flags |= PF_FORKNOEXEC; INIT_LIST_HEAD(&p->children); INIT_LIST_HEAD(&p->sibling); rcu_copy_process(p); p->vfork_done = NULL; spin_lock_init(&p->alloc_lock); init_sigpending(&p->pending); p->utime = p->stime = p->gtime = 0; p->utimescaled = p->stimescaled = 0; prev_cputime_init(&p->prev_cputime); #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN seqlock_init(&p->vtime_seqlock); p->vtime_snap = 0; p->vtime_snap_whence = VTIME_SLEEPING; #endif #if defined(SPLIT_RSS_COUNTING) memset(&p->rss_stat, 0, sizeof(p->rss_stat)); #endif p->default_timer_slack_ns = current->timer_slack_ns; task_io_accounting_init(&p->ioac); acct_clear_integrals(p); posix_cpu_timers_init(p); p->start_time = ktime_get_ns(); p->real_start_time = ktime_get_boot_ns(); p->io_context = NULL; p->audit_context = NULL; if (clone_flags & CLONE_THREAD) threadgroup_change_begin(current); cgroup_fork(p); #ifdef CONFIG_NUMA p->mempolicy = mpol_dup(p->mempolicy); if (IS_ERR(p->mempolicy)) { retval = PTR_ERR(p->mempolicy); p->mempolicy = NULL; goto bad_fork_cleanup_threadgroup_lock; } #endif #ifdef CONFIG_CPUSETS p->cpuset_mem_spread_rotor = NUMA_NO_NODE; p->cpuset_slab_spread_rotor = NUMA_NO_NODE; seqcount_init(&p->mems_allowed_seq); #endif #ifdef CONFIG_TRACE_IRQFLAGS p->irq_events = 0; p->hardirqs_enabled = 0; p->hardirq_enable_ip = 0; p->hardirq_enable_event = 0; p->hardirq_disable_ip = _THIS_IP_; p->hardirq_disable_event = 0; p->softirqs_enabled = 1; p->softirq_enable_ip = _THIS_IP_; p->softirq_enable_event = 0; p->softirq_disable_ip = 0; p->softirq_disable_event = 0; p->hardirq_context = 0; p->softirq_context = 0; #endif p->pagefault_disabled = 0; #ifdef CONFIG_LOCKDEP p->lockdep_depth = 0; /* no locks held yet */ p->curr_chain_key = 0; p->lockdep_recursion = 0; #endif #ifdef CONFIG_DEBUG_MUTEXES p->blocked_on = NULL; /* not blocked yet */ #endif #ifdef CONFIG_BCACHE p->sequential_io = 0; p->sequential_io_avg = 0; #endif /* Perform scheduler related setup. Assign this task to a CPU. */ retval = sched_fork(clone_flags, p); if (retval) goto bad_fork_cleanup_policy; retval = perf_event_init_task(p); if (retval) goto bad_fork_cleanup_policy; retval = audit_alloc(p); if (retval) goto bad_fork_cleanup_perf; /* copy all the process information */ shm_init_task(p); retval = copy_semundo(clone_flags, p); if (retval) goto bad_fork_cleanup_audit; retval = copy_files(clone_flags, p); if (retval) goto bad_fork_cleanup_semundo; retval = copy_fs(clone_flags, p); if (retval) goto bad_fork_cleanup_files; retval = copy_sighand(clone_flags, p); if (retval) goto bad_fork_cleanup_fs; retval = copy_signal(clone_flags, p); if (retval) goto bad_fork_cleanup_sighand; retval = copy_mm(clone_flags, p); if (retval) goto bad_fork_cleanup_signal; retval = copy_namespaces(clone_flags, p); if (retval) goto bad_fork_cleanup_mm; retval = copy_io(clone_flags, p); if (retval) goto bad_fork_cleanup_namespaces; retval = copy_thread_tls(clone_flags, stack_start, stack_size, p, tls); if (retval) goto bad_fork_cleanup_io; if (pid != &init_struct_pid) { pid = alloc_pid(p->nsproxy->pid_ns_for_children); if (IS_ERR(pid)) { retval = PTR_ERR(pid); goto bad_fork_cleanup_io; } } p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; /* * Clear TID on mm_release()? */ p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL; #ifdef CONFIG_BLOCK p->plug = NULL; #endif #ifdef CONFIG_FUTEX p->robust_list = NULL; #ifdef CONFIG_COMPAT p->compat_robust_list = NULL; #endif INIT_LIST_HEAD(&p->pi_state_list); p->pi_state_cache = NULL; #endif /* * sigaltstack should be cleared when sharing the same VM */ if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM) p->sas_ss_sp = p->sas_ss_size = 0; /* * Syscall tracing and stepping should be turned off in the * child regardless of CLONE_PTRACE. */ user_disable_single_step(p); clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE); #ifdef TIF_SYSCALL_EMU clear_tsk_thread_flag(p, TIF_SYSCALL_EMU); #endif clear_all_latency_tracing(p); /* ok, now we should be set up.. */ p->pid = pid_nr(pid); if (clone_flags & CLONE_THREAD) { p->exit_signal = -1; p->group_leader = current->group_leader; p->tgid = current->tgid; } else { if (clone_flags & CLONE_PARENT) p->exit_signal = current->group_leader->exit_signal; else p->exit_signal = (clone_flags & CSIGNAL); p->group_leader = p; p->tgid = p->pid; } p->nr_dirtied = 0; p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10); p->dirty_paused_when = 0; p->pdeath_signal = 0; INIT_LIST_HEAD(&p->thread_group); p->task_works = NULL; /* * Ensure that the cgroup subsystem policies allow the new process to be * forked. It should be noted the the new process's css_set can be changed * between here and cgroup_post_fork() if an organisation operation is in * progress. */ retval = cgroup_can_fork(p, cgrp_ss_priv); if (retval) goto bad_fork_free_pid; /* * Make it visible to the rest of the system, but dont wake it up yet. * Need tasklist lock for parent etc handling! */ write_lock_irq(&tasklist_lock); /* CLONE_PARENT re-uses the old parent */ if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) { p->real_parent = current->real_parent; p->parent_exec_id = current->parent_exec_id; } else { p->real_parent = current; p->parent_exec_id = current->self_exec_id; } spin_lock(&current->sighand->siglock); /* * Copy seccomp details explicitly here, in case they were changed * before holding sighand lock. */ copy_seccomp(p); /* * Process group and session signals need to be delivered to just the * parent before the fork or both the parent and the child after the * fork. Restart if a signal comes in before we add the new process to * it's process group. * A fatal signal pending means that current will exit, so the new * thread can't slip out of an OOM kill (or normal SIGKILL). */ recalc_sigpending(); if (signal_pending(current)) { spin_unlock(&current->sighand->siglock); write_unlock_irq(&tasklist_lock); retval = -ERESTARTNOINTR; goto bad_fork_cancel_cgroup; } if (likely(p->pid)) { ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace); init_task_pid(p, PIDTYPE_PID, pid); if (thread_group_leader(p)) { init_task_pid(p, PIDTYPE_PGID, task_pgrp(current)); init_task_pid(p, PIDTYPE_SID, task_session(current)); if (is_child_reaper(pid)) { ns_of_pid(pid)->child_reaper = p; p->signal->flags |= SIGNAL_UNKILLABLE; } p->signal->leader_pid = pid; p->signal->tty = tty_kref_get(current->signal->tty); list_add_tail(&p->sibling, &p->real_parent->children); list_add_tail_rcu(&p->tasks, &init_task.tasks); attach_pid(p, PIDTYPE_PGID); attach_pid(p, PIDTYPE_SID); __this_cpu_inc(process_counts); } else { current->signal->nr_threads++; atomic_inc(&current->signal->live); atomic_inc(&current->signal->sigcnt); list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group); list_add_tail_rcu(&p->thread_node, &p->signal->thread_head); } attach_pid(p, PIDTYPE_PID); nr_threads++; } total_forks++; spin_unlock(&current->sighand->siglock); syscall_tracepoint_update(p); write_unlock_irq(&tasklist_lock); proc_fork_connector(p); cgroup_post_fork(p, cgrp_ss_priv); if (clone_flags & CLONE_THREAD) threadgroup_change_end(current); perf_event_fork(p); trace_task_newtask(p, clone_flags); uprobe_copy_process(p, clone_flags); return p; bad_fork_cancel_cgroup: cgroup_cancel_fork(p, cgrp_ss_priv); bad_fork_free_pid: if (pid != &init_struct_pid) free_pid(pid); bad_fork_cleanup_io: if (p->io_context) exit_io_context(p); bad_fork_cleanup_namespaces: exit_task_namespaces(p); bad_fork_cleanup_mm: if (p->mm) mmput(p->mm); bad_fork_cleanup_signal: if (!(clone_flags & CLONE_THREAD)) free_signal_struct(p->signal); bad_fork_cleanup_sighand: __cleanup_sighand(p->sighand); bad_fork_cleanup_fs: exit_fs(p); /* blocking */ bad_fork_cleanup_files: exit_files(p); /* blocking */ bad_fork_cleanup_semundo: exit_sem(p); bad_fork_cleanup_audit: audit_free(p); bad_fork_cleanup_perf: perf_event_free_task(p); bad_fork_cleanup_policy: #ifdef CONFIG_NUMA mpol_put(p->mempolicy); bad_fork_cleanup_threadgroup_lock: #endif if (clone_flags & CLONE_THREAD) threadgroup_change_end(current); delayacct_tsk_free(p); bad_fork_cleanup_count: atomic_dec(&p->cred->user->processes); exit_creds(p); bad_fork_free: free_task(p); fork_out: return ERR_PTR(retval); } static inline void init_idle_pids(struct pid_link *links) { enum pid_type type; for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) { INIT_HLIST_NODE(&links[type].node); /* not really needed */ links[type].pid = &init_struct_pid; } } struct task_struct *fork_idle(int cpu) { struct task_struct *task; task = copy_process(CLONE_VM, 0, 0, NULL, &init_struct_pid, 0, 0); if (!IS_ERR(task)) { init_idle_pids(task->pids); init_idle(task, cpu); } return task; } /* * Ok, this is the main fork-routine. * * It copies the process, and if successful kick-starts * it and waits for it to finish using the VM if required. */ long _do_fork(unsigned long clone_flags, unsigned long stack_start, unsigned long stack_size, int __user *parent_tidptr, int __user *child_tidptr, unsigned long tls) { struct task_struct *p; int trace = 0; long nr; /* * Determine whether and which event to report to ptracer. When * called from kernel_thread or CLONE_UNTRACED is explicitly * requested, no event is reported; otherwise, report if the event * for the type of forking is enabled. */ if (!(clone_flags & CLONE_UNTRACED)) { if (clone_flags & CLONE_VFORK) trace = PTRACE_EVENT_VFORK; else if ((clone_flags & CSIGNAL) != SIGCHLD) trace = PTRACE_EVENT_CLONE; else trace = PTRACE_EVENT_FORK; if (likely(!ptrace_event_enabled(current, trace))) trace = 0; } p = copy_process(clone_flags, stack_start, stack_size, child_tidptr, NULL, trace, tls); /* * Do this prior waking up the new thread - the thread pointer * might get invalid after that point, if the thread exits quickly. */ if (!IS_ERR(p)) { struct completion vfork; struct pid *pid; trace_sched_process_fork(current, p); pid = get_task_pid(p, PIDTYPE_PID); nr = pid_vnr(pid); if (clone_flags & CLONE_PARENT_SETTID) put_user(nr, parent_tidptr); if (clone_flags & CLONE_VFORK) { p->vfork_done = &vfork; init_completion(&vfork); get_task_struct(p); } wake_up_new_task(p); /* forking complete and child started to run, tell ptracer */ if (unlikely(trace)) ptrace_event_pid(trace, pid); if (clone_flags & CLONE_VFORK) { if (!wait_for_vfork_done(p, &vfork)) ptrace_event_pid(PTRACE_EVENT_VFORK_DONE, pid); } put_pid(pid); } else { nr = PTR_ERR(p); } return nr; } #ifndef CONFIG_HAVE_COPY_THREAD_TLS /* For compatibility with architectures that call do_fork directly rather than * using the syscall entry points below. */ long do_fork(unsigned long clone_flags, unsigned long stack_start, unsigned long stack_size, int __user *parent_tidptr, int __user *child_tidptr) { return _do_fork(clone_flags, stack_start, stack_size, parent_tidptr, child_tidptr, 0); } #endif /* * Create a kernel thread. */ pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) { return _do_fork(flags|CLONE_VM|CLONE_UNTRACED, (unsigned long)fn, (unsigned long)arg, NULL, NULL, 0); } #ifdef __ARCH_WANT_SYS_FORK SYSCALL_DEFINE0(fork) { #ifdef CONFIG_MMU return _do_fork(SIGCHLD, 0, 0, NULL, NULL, 0); #else /* can not support in nommu mode */ return -EINVAL; #endif } #endif #ifdef __ARCH_WANT_SYS_VFORK SYSCALL_DEFINE0(vfork) { return _do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, 0, 0, NULL, NULL, 0); } #endif #ifdef __ARCH_WANT_SYS_CLONE #ifdef CONFIG_CLONE_BACKWARDS SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, int __user *, parent_tidptr, unsigned long, tls, int __user *, child_tidptr) #elif defined(CONFIG_CLONE_BACKWARDS2) SYSCALL_DEFINE5(clone, unsigned long, newsp, unsigned long, clone_flags, int __user *, parent_tidptr, int __user *, child_tidptr, unsigned long, tls) #elif defined(CONFIG_CLONE_BACKWARDS3) SYSCALL_DEFINE6(clone, unsigned long, clone_flags, unsigned long, newsp, int, stack_size, int __user *, parent_tidptr, int __user *, child_tidptr, unsigned long, tls) #else SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, int __user *, parent_tidptr, int __user *, child_tidptr, unsigned long, tls) #endif { return _do_fork(clone_flags, newsp, 0, parent_tidptr, child_tidptr, tls); } #endif #ifndef ARCH_MIN_MMSTRUCT_ALIGN #define ARCH_MIN_MMSTRUCT_ALIGN 0 #endif static void sighand_ctor(void *data) { struct sighand_struct *sighand = data; spin_lock_init(&sighand->siglock); init_waitqueue_head(&sighand->signalfd_wqh); } void __init proc_caches_init(void) { sighand_cachep = kmem_cache_create("sighand_cache", sizeof(struct sighand_struct), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU| SLAB_NOTRACK, sighand_ctor); signal_cachep = kmem_cache_create("signal_cache", sizeof(struct signal_struct), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL); files_cachep = kmem_cache_create("files_cache", sizeof(struct files_struct), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL); fs_cachep = kmem_cache_create("fs_cache", sizeof(struct fs_struct), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL); /* * FIXME! The "sizeof(struct mm_struct)" currently includes the * whole struct cpumask for the OFFSTACK case. We could change * this to *only* allocate as much of it as required by the * maximum number of CPU's we can ever have. The cpumask_allocation * is at the end of the structure, exactly for that reason. */ mm_cachep = kmem_cache_create("mm_struct", sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL); vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC); mmap_init(); nsproxy_cache_init(); } /* * Check constraints on flags passed to the unshare system call. */ static int check_unshare_flags(unsigned long unshare_flags) { if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND| CLONE_VM|CLONE_FILES|CLONE_SYSVSEM| CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET| CLONE_NEWUSER|CLONE_NEWPID)) return -EINVAL; /* * Not implemented, but pretend it works if there is nothing * to unshare. Note that unsharing the address space or the * signal handlers also need to unshare the signal queues (aka * CLONE_THREAD). */ if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) { if (!thread_group_empty(current)) return -EINVAL; } if (unshare_flags & (CLONE_SIGHAND | CLONE_VM)) { if (atomic_read(&current->sighand->count) > 1) return -EINVAL; } if (unshare_flags & CLONE_VM) { if (!current_is_single_threaded()) return -EINVAL; } return 0; } /* * Unshare the filesystem structure if it is being shared */ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp) { struct fs_struct *fs = current->fs; if (!(unshare_flags & CLONE_FS) || !fs) return 0; /* don't need lock here; in the worst case we'll do useless copy */ if (fs->users == 1) return 0; *new_fsp = copy_fs_struct(fs); if (!*new_fsp) return -ENOMEM; return 0; } /* * Unshare file descriptor table if it is being shared */ static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp) { struct files_struct *fd = current->files; int error = 0; if ((unshare_flags & CLONE_FILES) && (fd && atomic_read(&fd->count) > 1)) { *new_fdp = dup_fd(fd, &error); if (!*new_fdp) return error; } return 0; } /* * unshare allows a process to 'unshare' part of the process * context which was originally shared using clone. copy_* * functions used by do_fork() cannot be used here directly * because they modify an inactive task_struct that is being * constructed. Here we are modifying the current, active, * task_struct. */ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags) { struct fs_struct *fs, *new_fs = NULL; struct files_struct *fd, *new_fd = NULL; struct cred *new_cred = NULL; struct nsproxy *new_nsproxy = NULL; int do_sysvsem = 0; int err; /* * If unsharing a user namespace must also unshare the thread group * and unshare the filesystem root and working directories. */ if (unshare_flags & CLONE_NEWUSER) unshare_flags |= CLONE_THREAD | CLONE_FS; /* * If unsharing vm, must also unshare signal handlers. */ if (unshare_flags & CLONE_VM) unshare_flags |= CLONE_SIGHAND; /* * If unsharing a signal handlers, must also unshare the signal queues. */ if (unshare_flags & CLONE_SIGHAND) unshare_flags |= CLONE_THREAD; /* * If unsharing namespace, must also unshare filesystem information. */ if (unshare_flags & CLONE_NEWNS) unshare_flags |= CLONE_FS; err = check_unshare_flags(unshare_flags); if (err) goto bad_unshare_out; /* * CLONE_NEWIPC must also detach from the undolist: after switching * to a new ipc namespace, the semaphore arrays from the old * namespace are unreachable. */ if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM)) do_sysvsem = 1; err = unshare_fs(unshare_flags, &new_fs); if (err) goto bad_unshare_out; err = unshare_fd(unshare_flags, &new_fd); if (err) goto bad_unshare_cleanup_fs; err = unshare_userns(unshare_flags, &new_cred); if (err) goto bad_unshare_cleanup_fd; err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy, new_cred, new_fs); if (err) goto bad_unshare_cleanup_cred; if (new_fs || new_fd || do_sysvsem || new_cred || new_nsproxy) { if (do_sysvsem) { /* * CLONE_SYSVSEM is equivalent to sys_exit(). */ exit_sem(current); } if (unshare_flags & CLONE_NEWIPC) { /* Orphan segments in old ns (see sem above). */ exit_shm(current); shm_init_task(current); } if (new_nsproxy) switch_task_namespaces(current, new_nsproxy); task_lock(current); if (new_fs) { fs = current->fs; spin_lock(&fs->lock); current->fs = new_fs; if (--fs->users) new_fs = NULL; else new_fs = fs; spin_unlock(&fs->lock); } if (new_fd) { fd = current->files; current->files = new_fd; new_fd = fd; } task_unlock(current); if (new_cred) { /* Install the new user namespace */ commit_creds(new_cred); new_cred = NULL; } } bad_unshare_cleanup_cred: if (new_cred) put_cred(new_cred); bad_unshare_cleanup_fd: if (new_fd) put_files_struct(new_fd); bad_unshare_cleanup_fs: if (new_fs) free_fs_struct(new_fs); bad_unshare_out: return err; } /* * Helper to unshare the files of the current task. * We don't want to expose copy_files internals to * the exec layer of the kernel. */ int unshare_files(struct files_struct **displaced) { struct task_struct *task = current; struct files_struct *copy = NULL; int error; error = unshare_fd(CLONE_FILES, &copy); if (error || !copy) { *displaced = NULL; return error; } *displaced = task->files; task_lock(task); task->files = copy; task_unlock(task); return 0; } int sysctl_max_threads(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { struct ctl_table t; int ret; int threads = max_threads; int min = MIN_THREADS; int max = MAX_THREADS; t = *table; t.data = &threads; t.extra1 = &min; t.extra2 = &max; ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); if (ret || !write) return ret; set_max_threads(threads); return 0; }
gpl-2.0
Taeung/tip
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
12
20194
/* * Copyright (c) 2017, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <rdma/ib_verbs.h> #include <linux/mlx5/fs.h> #include "en.h" #include "ipoib.h" #define IB_DEFAULT_Q_KEY 0xb1b #define MLX5I_PARAMS_DEFAULT_LOG_RQ_SIZE 9 static int mlx5i_open(struct net_device *netdev); static int mlx5i_close(struct net_device *netdev); static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu); static const struct net_device_ops mlx5i_netdev_ops = { .ndo_open = mlx5i_open, .ndo_stop = mlx5i_close, .ndo_get_stats64 = mlx5i_get_stats, .ndo_init = mlx5i_dev_init, .ndo_uninit = mlx5i_dev_cleanup, .ndo_change_mtu = mlx5i_change_mtu, .ndo_do_ioctl = mlx5i_ioctl, }; /* IPoIB mlx5 netdev profile */ static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params) { /* Override RQ params as IPoIB supports only LINKED LIST RQ for now */ MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, false); mlx5e_set_rq_type(mdev, params); mlx5e_init_rq_type_params(mdev, params); /* RQ size in ipoib by default is 512 */ params->log_rq_mtu_frames = is_kdump_kernel() ? MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE : MLX5I_PARAMS_DEFAULT_LOG_RQ_SIZE; params->lro_en = false; params->hard_mtu = MLX5_IB_GRH_BYTES + MLX5_IPOIB_HARD_LEN; } /* Called directly after IPoIB netdevice was created to initialize SW structs */ int mlx5i_init(struct mlx5_core_dev *mdev, struct net_device *netdev, const struct mlx5e_profile *profile, void *ppriv) { struct mlx5e_priv *priv = mlx5i_epriv(netdev); u16 max_mtu; int err; err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv); if (err) return err; mlx5_query_port_max_mtu(mdev, &max_mtu, 1); netdev->mtu = max_mtu; mlx5e_build_nic_params(mdev, &priv->rss_params, &priv->channels.params, mlx5e_get_netdev_max_channels(netdev), netdev->mtu); mlx5i_build_nic_params(mdev, &priv->channels.params); mlx5e_timestamp_init(priv); /* netdev init */ netdev->hw_features |= NETIF_F_SG; netdev->hw_features |= NETIF_F_IP_CSUM; netdev->hw_features |= NETIF_F_IPV6_CSUM; netdev->hw_features |= NETIF_F_GRO; netdev->hw_features |= NETIF_F_TSO; netdev->hw_features |= NETIF_F_TSO6; netdev->hw_features |= NETIF_F_RXCSUM; netdev->hw_features |= NETIF_F_RXHASH; netdev->netdev_ops = &mlx5i_netdev_ops; netdev->ethtool_ops = &mlx5i_ethtool_ops; return 0; } /* Called directly before IPoIB netdevice is destroyed to cleanup SW structs */ void mlx5i_cleanup(struct mlx5e_priv *priv) { mlx5e_netdev_cleanup(priv->netdev, priv); } static void mlx5i_grp_sw_update_stats(struct mlx5e_priv *priv) { int max_nch = mlx5e_get_netdev_max_channels(priv->netdev); struct mlx5e_sw_stats s = { 0 }; int i, j; for (i = 0; i < max_nch; i++) { struct mlx5e_channel_stats *channel_stats; struct mlx5e_rq_stats *rq_stats; channel_stats = &priv->channel_stats[i]; rq_stats = &channel_stats->rq; s.rx_packets += rq_stats->packets; s.rx_bytes += rq_stats->bytes; for (j = 0; j < priv->max_opened_tc; j++) { struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j]; s.tx_packets += sq_stats->packets; s.tx_bytes += sq_stats->bytes; s.tx_queue_dropped += sq_stats->dropped; } } memcpy(&priv->stats.sw, &s, sizeof(s)); } void mlx5i_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct mlx5e_priv *priv = mlx5i_epriv(dev); struct mlx5e_sw_stats *sstats = &priv->stats.sw; mlx5i_grp_sw_update_stats(priv); stats->rx_packets = sstats->rx_packets; stats->rx_bytes = sstats->rx_bytes; stats->tx_packets = sstats->tx_packets; stats->tx_bytes = sstats->tx_bytes; stats->tx_dropped = sstats->tx_queue_dropped; } int mlx5i_init_underlay_qp(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; struct mlx5i_priv *ipriv = priv->ppriv; struct mlx5_core_qp *qp = &ipriv->qp; struct mlx5_qp_context *context; int ret; /* QP states */ context = kzalloc(sizeof(*context), GFP_KERNEL); if (!context) return -ENOMEM; context->flags = cpu_to_be32(MLX5_QP_PM_MIGRATED << 11); context->pri_path.port = 1; context->pri_path.pkey_index = cpu_to_be16(ipriv->pkey_index); context->qkey = cpu_to_be32(IB_DEFAULT_Q_KEY); ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RST2INIT_QP, 0, context, qp); if (ret) { mlx5_core_err(mdev, "Failed to modify qp RST2INIT, err: %d\n", ret); goto err_qp_modify_to_err; } memset(context, 0, sizeof(*context)); ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_INIT2RTR_QP, 0, context, qp); if (ret) { mlx5_core_err(mdev, "Failed to modify qp INIT2RTR, err: %d\n", ret); goto err_qp_modify_to_err; } ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RTR2RTS_QP, 0, context, qp); if (ret) { mlx5_core_err(mdev, "Failed to modify qp RTR2RTS, err: %d\n", ret); goto err_qp_modify_to_err; } kfree(context); return 0; err_qp_modify_to_err: mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2ERR_QP, 0, &context, qp); kfree(context); return ret; } void mlx5i_uninit_underlay_qp(struct mlx5e_priv *priv) { struct mlx5i_priv *ipriv = priv->ppriv; struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_qp_context context; int err; err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2RST_QP, 0, &context, &ipriv->qp); if (err) mlx5_core_err(mdev, "Failed to modify qp 2RST, err: %d\n", err); } #define MLX5_QP_ENHANCED_ULP_STATELESS_MODE 2 int mlx5i_create_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp) { u32 *in = NULL; void *addr_path; int ret = 0; int inlen; void *qpc; inlen = MLX5_ST_SZ_BYTES(create_qp_in); in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); MLX5_SET(qpc, qpc, st, MLX5_QP_ST_UD); MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); MLX5_SET(qpc, qpc, ulp_stateless_offload_mode, MLX5_QP_ENHANCED_ULP_STATELESS_MODE); addr_path = MLX5_ADDR_OF(qpc, qpc, primary_address_path); MLX5_SET(ads, addr_path, vhca_port_num, 1); MLX5_SET(ads, addr_path, grh, 1); ret = mlx5_core_create_qp(mdev, qp, in, inlen); if (ret) { mlx5_core_err(mdev, "Failed creating IPoIB QP err : %d\n", ret); goto out; } out: kvfree(in); return ret; } void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp) { mlx5_core_destroy_qp(mdev, qp); } static int mlx5i_init_tx(struct mlx5e_priv *priv) { struct mlx5i_priv *ipriv = priv->ppriv; int err; err = mlx5i_create_underlay_qp(priv->mdev, &ipriv->qp); if (err) { mlx5_core_warn(priv->mdev, "create underlay QP failed, %d\n", err); return err; } err = mlx5e_create_tis(priv->mdev, 0 /* tc */, ipriv->qp.qpn, &priv->tisn[0]); if (err) { mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err); goto err_destroy_underlay_qp; } return 0; err_destroy_underlay_qp: mlx5i_destroy_underlay_qp(priv->mdev, &ipriv->qp); return err; } static void mlx5i_cleanup_tx(struct mlx5e_priv *priv) { struct mlx5i_priv *ipriv = priv->ppriv; mlx5e_destroy_tis(priv->mdev, priv->tisn[0]); mlx5i_destroy_underlay_qp(priv->mdev, &ipriv->qp); } static int mlx5i_create_flow_steering(struct mlx5e_priv *priv) { struct ttc_params ttc_params = {}; int tt, err; priv->fs.ns = mlx5_get_flow_namespace(priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL); if (!priv->fs.ns) return -EINVAL; err = mlx5e_arfs_create_tables(priv); if (err) { netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n", err); priv->netdev->hw_features &= ~NETIF_F_NTUPLE; } mlx5e_set_ttc_basic_params(priv, &ttc_params); mlx5e_set_inner_ttc_ft_params(&ttc_params); for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) ttc_params.indir_tirn[tt] = priv->inner_indir_tir[tt].tirn; err = mlx5e_create_inner_ttc_table(priv, &ttc_params, &priv->fs.inner_ttc); if (err) { netdev_err(priv->netdev, "Failed to create inner ttc table, err=%d\n", err); goto err_destroy_arfs_tables; } mlx5e_set_ttc_ft_params(&ttc_params); for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn; err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc); if (err) { netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n", err); goto err_destroy_inner_ttc_table; } return 0; err_destroy_inner_ttc_table: mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc); err_destroy_arfs_tables: mlx5e_arfs_destroy_tables(priv); return err; } static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv) { mlx5e_destroy_ttc_table(priv, &priv->fs.ttc); mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc); mlx5e_arfs_destroy_tables(priv); } static int mlx5i_init_rx(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; int err; mlx5e_create_q_counters(priv); err = mlx5e_open_drop_rq(priv, &priv->drop_rq); if (err) { mlx5_core_err(mdev, "open drop rq failed, %d\n", err); goto err_destroy_q_counters; } err = mlx5e_create_indirect_rqt(priv); if (err) goto err_close_drop_rq; err = mlx5e_create_direct_rqts(priv); if (err) goto err_destroy_indirect_rqts; err = mlx5e_create_indirect_tirs(priv, true); if (err) goto err_destroy_direct_rqts; err = mlx5e_create_direct_tirs(priv); if (err) goto err_destroy_indirect_tirs; err = mlx5i_create_flow_steering(priv); if (err) goto err_destroy_direct_tirs; return 0; err_destroy_direct_tirs: mlx5e_destroy_direct_tirs(priv); err_destroy_indirect_tirs: mlx5e_destroy_indirect_tirs(priv, true); err_destroy_direct_rqts: mlx5e_destroy_direct_rqts(priv); err_destroy_indirect_rqts: mlx5e_destroy_rqt(priv, &priv->indir_rqt); err_close_drop_rq: mlx5e_close_drop_rq(&priv->drop_rq); err_destroy_q_counters: mlx5e_destroy_q_counters(priv); return err; } static void mlx5i_cleanup_rx(struct mlx5e_priv *priv) { mlx5i_destroy_flow_steering(priv); mlx5e_destroy_direct_tirs(priv); mlx5e_destroy_indirect_tirs(priv, true); mlx5e_destroy_direct_rqts(priv); mlx5e_destroy_rqt(priv, &priv->indir_rqt); mlx5e_close_drop_rq(&priv->drop_rq); mlx5e_destroy_q_counters(priv); } static const struct mlx5e_profile mlx5i_nic_profile = { .init = mlx5i_init, .cleanup = mlx5i_cleanup, .init_tx = mlx5i_init_tx, .cleanup_tx = mlx5i_cleanup_tx, .init_rx = mlx5i_init_rx, .cleanup_rx = mlx5i_cleanup_rx, .enable = NULL, /* mlx5i_enable */ .disable = NULL, /* mlx5i_disable */ .update_stats = NULL, /* mlx5i_update_stats */ .update_carrier = NULL, /* no HW update in IB link */ .rx_handlers.handle_rx_cqe = mlx5i_handle_rx_cqe, .rx_handlers.handle_rx_cqe_mpwqe = NULL, /* Not supported */ .max_tc = MLX5I_MAX_NUM_TC, }; /* mlx5i netdev NDos */ static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu) { struct mlx5e_priv *priv = mlx5i_epriv(netdev); struct mlx5e_channels new_channels = {}; struct mlx5e_params *params; int err = 0; mutex_lock(&priv->state_lock); params = &priv->channels.params; if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { params->sw_mtu = new_mtu; netdev->mtu = params->sw_mtu; goto out; } new_channels.params = *params; new_channels.params.sw_mtu = new_mtu; err = mlx5e_safe_switch_channels(priv, &new_channels, NULL); if (err) goto out; netdev->mtu = new_channels.params.sw_mtu; out: mutex_unlock(&priv->state_lock); return err; } int mlx5i_dev_init(struct net_device *dev) { struct mlx5e_priv *priv = mlx5i_epriv(dev); struct mlx5i_priv *ipriv = priv->ppriv; /* Set dev address using underlay QP */ dev->dev_addr[1] = (ipriv->qp.qpn >> 16) & 0xff; dev->dev_addr[2] = (ipriv->qp.qpn >> 8) & 0xff; dev->dev_addr[3] = (ipriv->qp.qpn) & 0xff; /* Add QPN to net-device mapping to HT */ mlx5i_pkey_add_qpn(dev ,ipriv->qp.qpn); return 0; } int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct mlx5e_priv *priv = mlx5i_epriv(dev); switch (cmd) { case SIOCSHWTSTAMP: return mlx5e_hwstamp_set(priv, ifr); case SIOCGHWTSTAMP: return mlx5e_hwstamp_get(priv, ifr); default: return -EOPNOTSUPP; } } void mlx5i_dev_cleanup(struct net_device *dev) { struct mlx5e_priv *priv = mlx5i_epriv(dev); struct mlx5i_priv *ipriv = priv->ppriv; mlx5i_uninit_underlay_qp(priv); /* Delete QPN to net-device mapping from HT */ mlx5i_pkey_del_qpn(dev, ipriv->qp.qpn); } static int mlx5i_open(struct net_device *netdev) { struct mlx5e_priv *epriv = mlx5i_epriv(netdev); struct mlx5i_priv *ipriv = epriv->ppriv; struct mlx5_core_dev *mdev = epriv->mdev; int err; mutex_lock(&epriv->state_lock); set_bit(MLX5E_STATE_OPENED, &epriv->state); err = mlx5i_init_underlay_qp(epriv); if (err) { mlx5_core_warn(mdev, "prepare underlay qp state failed, %d\n", err); goto err_clear_state_opened_flag; } err = mlx5_fs_add_rx_underlay_qpn(mdev, ipriv->qp.qpn); if (err) { mlx5_core_warn(mdev, "attach underlay qp to ft failed, %d\n", err); goto err_reset_qp; } err = mlx5e_open_channels(epriv, &epriv->channels); if (err) goto err_remove_fs_underlay_qp; mlx5e_refresh_tirs(epriv, false); mlx5e_activate_priv_channels(epriv); mutex_unlock(&epriv->state_lock); return 0; err_remove_fs_underlay_qp: mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn); err_reset_qp: mlx5i_uninit_underlay_qp(epriv); err_clear_state_opened_flag: clear_bit(MLX5E_STATE_OPENED, &epriv->state); mutex_unlock(&epriv->state_lock); return err; } static int mlx5i_close(struct net_device *netdev) { struct mlx5e_priv *epriv = mlx5i_epriv(netdev); struct mlx5i_priv *ipriv = epriv->ppriv; struct mlx5_core_dev *mdev = epriv->mdev; /* May already be CLOSED in case a previous configuration operation * (e.g RX/TX queue size change) that involves close&open failed. */ mutex_lock(&epriv->state_lock); if (!test_bit(MLX5E_STATE_OPENED, &epriv->state)) goto unlock; clear_bit(MLX5E_STATE_OPENED, &epriv->state); netif_carrier_off(epriv->netdev); mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn); mlx5e_deactivate_priv_channels(epriv); mlx5e_close_channels(&epriv->channels); mlx5i_uninit_underlay_qp(epriv); unlock: mutex_unlock(&epriv->state_lock); return 0; } /* IPoIB RDMA netdev callbacks */ static int mlx5i_attach_mcast(struct net_device *netdev, struct ib_device *hca, union ib_gid *gid, u16 lid, int set_qkey, u32 qkey) { struct mlx5e_priv *epriv = mlx5i_epriv(netdev); struct mlx5_core_dev *mdev = epriv->mdev; struct mlx5i_priv *ipriv = epriv->ppriv; int err; mlx5_core_dbg(mdev, "attaching QPN 0x%x, MGID %pI6\n", ipriv->qp.qpn, gid->raw); err = mlx5_core_attach_mcg(mdev, gid, ipriv->qp.qpn); if (err) mlx5_core_warn(mdev, "failed attaching QPN 0x%x, MGID %pI6\n", ipriv->qp.qpn, gid->raw); if (set_qkey) { mlx5_core_dbg(mdev, "%s setting qkey 0x%x\n", netdev->name, qkey); ipriv->qkey = qkey; } return err; } static int mlx5i_detach_mcast(struct net_device *netdev, struct ib_device *hca, union ib_gid *gid, u16 lid) { struct mlx5e_priv *epriv = mlx5i_epriv(netdev); struct mlx5_core_dev *mdev = epriv->mdev; struct mlx5i_priv *ipriv = epriv->ppriv; int err; mlx5_core_dbg(mdev, "detaching QPN 0x%x, MGID %pI6\n", ipriv->qp.qpn, gid->raw); err = mlx5_core_detach_mcg(mdev, gid, ipriv->qp.qpn); if (err) mlx5_core_dbg(mdev, "failed detaching QPN 0x%x, MGID %pI6\n", ipriv->qp.qpn, gid->raw); return err; } static int mlx5i_xmit(struct net_device *dev, struct sk_buff *skb, struct ib_ah *address, u32 dqpn) { struct mlx5e_priv *epriv = mlx5i_epriv(dev); struct mlx5e_txqsq *sq = epriv->txq2sq[skb_get_queue_mapping(skb)]; struct mlx5_ib_ah *mah = to_mah(address); struct mlx5i_priv *ipriv = epriv->ppriv; return mlx5i_sq_xmit(sq, skb, &mah->av, dqpn, ipriv->qkey); } static void mlx5i_set_pkey_index(struct net_device *netdev, int id) { struct mlx5i_priv *ipriv = netdev_priv(netdev); ipriv->pkey_index = (u16)id; } static int mlx5i_check_required_hca_cap(struct mlx5_core_dev *mdev) { if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_IB) return -EOPNOTSUPP; if (!MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads)) { mlx5_core_warn(mdev, "IPoIB enhanced offloads are not supported\n"); return -EOPNOTSUPP; } return 0; } static void mlx5_rdma_netdev_free(struct net_device *netdev) { struct mlx5e_priv *priv = mlx5i_epriv(netdev); struct mlx5i_priv *ipriv = priv->ppriv; const struct mlx5e_profile *profile = priv->profile; mlx5e_detach_netdev(priv); profile->cleanup(priv); if (!ipriv->sub_interface) { mlx5i_pkey_qpn_ht_cleanup(netdev); mlx5e_destroy_mdev_resources(priv->mdev); } } static bool mlx5_is_sub_interface(struct mlx5_core_dev *mdev) { return mdev->mlx5e_res.pdn != 0; } static const struct mlx5e_profile *mlx5_get_profile(struct mlx5_core_dev *mdev) { if (mlx5_is_sub_interface(mdev)) return mlx5i_pkey_get_profile(); return &mlx5i_nic_profile; } static int mlx5_rdma_setup_rn(struct ib_device *ibdev, u8 port_num, struct net_device *netdev, void *param) { struct mlx5_core_dev *mdev = (struct mlx5_core_dev *)param; const struct mlx5e_profile *prof = mlx5_get_profile(mdev); struct mlx5i_priv *ipriv; struct mlx5e_priv *epriv; struct rdma_netdev *rn; int err; ipriv = netdev_priv(netdev); epriv = mlx5i_epriv(netdev); ipriv->sub_interface = mlx5_is_sub_interface(mdev); if (!ipriv->sub_interface) { err = mlx5i_pkey_qpn_ht_init(netdev); if (err) { mlx5_core_warn(mdev, "allocate qpn_to_netdev ht failed\n"); return err; } /* This should only be called once per mdev */ err = mlx5e_create_mdev_resources(mdev); if (err) goto destroy_ht; } prof->init(mdev, netdev, prof, ipriv); mlx5e_attach_netdev(epriv); netif_carrier_off(netdev); /* set rdma_netdev func pointers */ rn = &ipriv->rn; rn->hca = ibdev; rn->send = mlx5i_xmit; rn->attach_mcast = mlx5i_attach_mcast; rn->detach_mcast = mlx5i_detach_mcast; rn->set_id = mlx5i_set_pkey_index; netdev->priv_destructor = mlx5_rdma_netdev_free; netdev->needs_free_netdev = 1; return 0; destroy_ht: mlx5i_pkey_qpn_ht_cleanup(netdev); return err; } int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev, struct ib_device *device, struct rdma_netdev_alloc_params *params) { int nch; int rc; rc = mlx5i_check_required_hca_cap(mdev); if (rc) return rc; nch = mlx5e_get_max_num_channels(mdev); *params = (struct rdma_netdev_alloc_params){ .sizeof_priv = sizeof(struct mlx5i_priv) + sizeof(struct mlx5e_priv), .txqs = nch * MLX5E_MAX_NUM_TC, .rxqs = nch, .param = mdev, .initialize_rdma_netdev = mlx5_rdma_setup_rn, }; return 0; } EXPORT_SYMBOL(mlx5_rdma_rn_get_params);
gpl-2.0
jwakely/gcc
gcc/testsuite/c-c++-common/gomp/declare-variant-5.c
12
1361
/* { dg-do compile { target i?86-*-* x86_64-*-* } } */ /* { dg-additional-options "-mavx2" } */ typedef float __v4sf __attribute__((vector_size (16))); typedef int __v4si __attribute__((vector_size (16))); typedef float __v8sf __attribute__((vector_size (32))); typedef int __v8si __attribute__((vector_size (32))); __v4si f1 (__v4sf, __v4sf, float *); __v8si f2 (__v8sf, __v8sf, float *); __v4si f3 (__v4si, int, __v4si); #pragma omp declare variant (f1) match (construct={parallel,for,simd(simdlen(4),notinbranch,uniform(z),aligned(z:4 * sizeof (*z)))}) #pragma omp declare variant (f2) match (construct={for,simd(uniform(z),simdlen(8),notinbranch)}) int f4 (float x, float y, float *z); #pragma omp declare variant (f3) match (construct={simd(simdlen(4),inbranch,linear(y:1))}) int f5 (int x, int y); void test (int *x, float *y, float *z, float *w) { #pragma omp parallel #pragma omp for simd aligned (w:4 * sizeof (float)) for (int i = 0; i < 1024; i++) x[i] = f4 (y[i], z[i], w); #pragma omp parallel for simd aligned (w:4 * sizeof (float)) simdlen(4) for (int i = 1024; i < 2048; i++) x[i] = f4 (y[i], z[i], w); #pragma omp simd aligned (w:4 * sizeof (float)) for (int i = 2048; i < 4096; i++) x[i] = f4 (y[i], z[i], w); #pragma omp simd for (int i = 4096; i < 8192; i++) if (x[i] > 10) x[i] = f5 (x[i], i); }
gpl-2.0
Ced2911/massive-tyrion
utils/roq2/jpeg/rdbmp.c
12
15101
/* * rdbmp.c * * Copyright (C) 1994-1995, Thomas G. Lane. * This file is part of the Independent JPEG Group's software. * For conditions of distribution and use, see the accompanying README file. * * This file contains routines to read input images in Microsoft "BMP" * format (MS Windows 3.x, OS/2 1.x, and OS/2 2.x flavors). * Currently, only 8-bit and 24-bit images are supported, not 1-bit or * 4-bit (feeding such low-depth images into JPEG would be silly anyway). * Also, we don't support RLE-compressed files. * * These routines may need modification for non-Unix environments or * specialized applications. As they stand, they assume input from * an ordinary stdio stream. They further assume that reading begins * at the start of the file; start_input may need work if the * user interface has already read some data (e.g., to determine that * the file is indeed BMP format). * * This code contributed by James Arthur Boucher. */ #include "cdjpeg.h" /* Common decls for cjpeg/djpeg applications */ #ifdef BMP_SUPPORTED /* Macros to deal with unsigned chars as efficiently as compiler allows */ #ifdef HAVE_UNSIGNED_CHAR typedef unsigned char U_CHAR; #define UCH(x) ((int) (x)) #else /* !HAVE_UNSIGNED_CHAR */ #ifdef CHAR_IS_UNSIGNED typedef char U_CHAR; #define UCH(x) ((int) (x)) #else typedef char U_CHAR; #define UCH(x) ((int) (x) & 0xFF) #endif #endif /* HAVE_UNSIGNED_CHAR */ #define ReadOK(file,buffer,len) (JFREAD(file,buffer,len) == ((size_t) (len))) /* Private version of data source object */ typedef struct _bmp_source_struct * bmp_source_ptr; typedef struct _bmp_source_struct { struct cjpeg_source_struct pub; /* public fields */ j_compress_ptr cinfo; /* back link saves passing separate parm */ JSAMPARRAY colormap; /* BMP colormap (converted to my format) */ jvirt_sarray_ptr whole_image; /* Needed to reverse row order */ JDIMENSION source_row; /* Current source row number */ JDIMENSION row_width; /* Physical width of scanlines in file */ int bits_per_pixel; /* remembers 8- or 24-bit format */ } bmp_source_struct; LOCAL int read_byte (bmp_source_ptr sinfo) /* Read next byte from BMP file */ { register FILE *infile = sinfo->pub.input_file; register int c; if ((c = getc(infile)) == EOF) ERREXIT(sinfo->cinfo, JERR_INPUT_EOF); return c; } LOCAL void read_colormap (bmp_source_ptr sinfo, int cmaplen, int mapentrysize) /* Read the colormap from a BMP file */ { int i; switch (mapentrysize) { case 3: /* BGR format (occurs in OS/2 files) */ for (i = 0; i < cmaplen; i++) { sinfo->colormap[2][i] = (JSAMPLE) read_byte(sinfo); sinfo->colormap[1][i] = (JSAMPLE) read_byte(sinfo); sinfo->colormap[0][i] = (JSAMPLE) read_byte(sinfo); } break; case 4: /* BGR0 format (occurs in MS Windows files) */ for (i = 0; i < cmaplen; i++) { sinfo->colormap[2][i] = (JSAMPLE) read_byte(sinfo); sinfo->colormap[1][i] = (JSAMPLE) read_byte(sinfo); sinfo->colormap[0][i] = (JSAMPLE) read_byte(sinfo); (void) read_byte(sinfo); } break; default: ERREXIT(sinfo->cinfo, JERR_BMP_BADCMAP); break; } } /* * Read one row of pixels. * The image has been read into the whole_image array, but is otherwise * unprocessed. We must read it out in top-to-bottom row order, and if * it is an 8-bit image, we must expand colormapped pixels to 24bit format. */ METHODDEF JDIMENSION get_8bit_row (j_compress_ptr cinfo, cjpeg_source_ptr sinfo) /* This version is for reading 8-bit colormap indexes */ { bmp_source_ptr source = (bmp_source_ptr) sinfo; register JSAMPARRAY colormap = source->colormap; JSAMPARRAY image_ptr; register int t; register JSAMPROW inptr, outptr; register JDIMENSION col; /* Fetch next row from virtual array */ source->source_row--; image_ptr = (*cinfo->mem->access_virt_sarray) ((j_common_ptr) cinfo, source->whole_image, source->source_row, (JDIMENSION) 1, FALSE); /* Expand the colormap indexes to real data */ inptr = image_ptr[0]; outptr = source->pub.buffer[0]; for (col = cinfo->image_width; col > 0; col--) { t = GETJSAMPLE(*inptr++); *outptr++ = colormap[0][t]; /* can omit GETJSAMPLE() safely */ *outptr++ = colormap[1][t]; *outptr++ = colormap[2][t]; } return 1; } METHODDEF JDIMENSION get_24bit_row (j_compress_ptr cinfo, cjpeg_source_ptr sinfo) /* This version is for reading 24-bit pixels */ { bmp_source_ptr source = (bmp_source_ptr) sinfo; JSAMPARRAY image_ptr; register JSAMPROW inptr, outptr; register JDIMENSION col; /* Fetch next row from virtual array */ source->source_row--; image_ptr = (*cinfo->mem->access_virt_sarray) ((j_common_ptr) cinfo, source->whole_image, source->source_row, (JDIMENSION) 1, FALSE); /* Transfer data. Note source values are in BGR order * (even though Microsoft's own documents say the opposite). */ inptr = image_ptr[0]; outptr = source->pub.buffer[0]; for (col = cinfo->image_width; col > 0; col--) { outptr[2] = *inptr++; /* can omit GETJSAMPLE() safely */ outptr[1] = *inptr++; outptr[0] = *inptr++; outptr += 3; } return 1; } /* * This method loads the image into whole_image during the first call on * get_pixel_rows. The get_pixel_rows pointer is then adjusted to call * get_8bit_row or get_24bit_row on subsequent calls. */ METHODDEF JDIMENSION preload_image (j_compress_ptr cinfo, cjpeg_source_ptr sinfo) { bmp_source_ptr source = (bmp_source_ptr) sinfo; register FILE *infile = source->pub.input_file; register int c; register JSAMPROW out_ptr; JSAMPARRAY image_ptr; JDIMENSION row, col; cd_progress_ptr progress = (cd_progress_ptr) cinfo->progress; /* Read the data into a virtual array in input-file row order. */ for (row = 0; row < cinfo->image_height; row++) { if (progress != NULL) { progress->pub.pass_counter = (long) row; progress->pub.pass_limit = (long) cinfo->image_height; (*progress->pub.progress_monitor) ((j_common_ptr) cinfo); } image_ptr = (*cinfo->mem->access_virt_sarray) ((j_common_ptr) cinfo, source->whole_image, row, (JDIMENSION) 1, TRUE); out_ptr = image_ptr[0]; for (col = source->row_width; col > 0; col--) { /* inline copy of read_byte() for speed */ if ((c = getc(infile)) == EOF) ERREXIT(cinfo, JERR_INPUT_EOF); *out_ptr++ = (JSAMPLE) c; } } if (progress != NULL) progress->completed_extra_passes++; /* Set up to read from the virtual array in top-to-bottom order */ switch (source->bits_per_pixel) { case 8: source->pub.get_pixel_rows = get_8bit_row; break; case 24: source->pub.get_pixel_rows = get_24bit_row; break; default: ERREXIT(cinfo, JERR_BMP_BADDEPTH); } source->source_row = cinfo->image_height; /* And read the first row */ return (*source->pub.get_pixel_rows) (cinfo, sinfo); } /* * Read the file header; return image size and component count. */ METHODDEF void start_input_bmp (j_compress_ptr cinfo, cjpeg_source_ptr sinfo) { bmp_source_ptr source = (bmp_source_ptr) sinfo; U_CHAR bmpfileheader[14]; U_CHAR bmpinfoheader[64]; #define GET_2B(array,offset) ((unsigned int) UCH(array[offset]) + \ (((unsigned int) UCH(array[offset+1])) << 8)) #define GET_4B(array,offset) ((INT32) UCH(array[offset]) + \ (((INT32) UCH(array[offset+1])) << 8) + \ (((INT32) UCH(array[offset+2])) << 16) + \ (((INT32) UCH(array[offset+3])) << 24)) INT32 bfOffBits; INT32 headerSize; INT32 biWidth = 0; /* initialize to avoid compiler warning */ INT32 biHeight = 0; unsigned int biPlanes; INT32 biCompression; INT32 biXPelsPerMeter,biYPelsPerMeter; INT32 biClrUsed = 0; int mapentrysize = 0; /* 0 indicates no colormap */ INT32 bPad; JDIMENSION row_width; /* Read and verify the bitmap file header */ if (! ReadOK(source->pub.input_file, bmpfileheader, 14)) ERREXIT(cinfo, JERR_INPUT_EOF); if (GET_2B(bmpfileheader,0) != 0x4D42) /* 'BM' */ ERREXIT(cinfo, JERR_BMP_NOT); bfOffBits = (INT32) GET_4B(bmpfileheader,10); /* We ignore the remaining fileheader fields */ /* The infoheader might be 12 bytes (OS/2 1.x), 40 bytes (Windows), * or 64 bytes (OS/2 2.x). Check the first 4 bytes to find out which. */ if (! ReadOK(source->pub.input_file, bmpinfoheader, 4)) ERREXIT(cinfo, JERR_INPUT_EOF); headerSize = (INT32) GET_4B(bmpinfoheader,0); if (headerSize < 12 || headerSize > 64) ERREXIT(cinfo, JERR_BMP_BADHEADER); if (! ReadOK(source->pub.input_file, bmpinfoheader+4, headerSize-4)) ERREXIT(cinfo, JERR_INPUT_EOF); switch ((int) headerSize) { case 12: /* Decode OS/2 1.x header (Microsoft calls this a BITMAPCOREHEADER) */ biWidth = (INT32) GET_2B(bmpinfoheader,4); biHeight = (INT32) GET_2B(bmpinfoheader,6); biPlanes = GET_2B(bmpinfoheader,8); source->bits_per_pixel = (int) GET_2B(bmpinfoheader,10); switch (source->bits_per_pixel) { case 8: /* colormapped image */ mapentrysize = 3; /* OS/2 uses RGBTRIPLE colormap */ TRACEMS2(cinfo, 1, JTRC_BMP_OS2_MAPPED, (int) biWidth, (int) biHeight); break; case 24: /* RGB image */ TRACEMS2(cinfo, 1, JTRC_BMP_OS2, (int) biWidth, (int) biHeight); break; default: ERREXIT(cinfo, JERR_BMP_BADDEPTH); break; } if (biPlanes != 1) ERREXIT(cinfo, JERR_BMP_BADPLANES); break; case 40: case 64: /* Decode Windows 3.x header (Microsoft calls this a BITMAPINFOHEADER) */ /* or OS/2 2.x header, which has additional fields that we ignore */ biWidth = GET_4B(bmpinfoheader,4); biHeight = GET_4B(bmpinfoheader,8); biPlanes = GET_2B(bmpinfoheader,12); source->bits_per_pixel = (int) GET_2B(bmpinfoheader,14); biCompression = GET_4B(bmpinfoheader,16); biXPelsPerMeter = GET_4B(bmpinfoheader,24); biYPelsPerMeter = GET_4B(bmpinfoheader,28); biClrUsed = GET_4B(bmpinfoheader,32); /* biSizeImage, biClrImportant fields are ignored */ switch (source->bits_per_pixel) { case 8: /* colormapped image */ mapentrysize = 4; /* Windows uses RGBQUAD colormap */ TRACEMS2(cinfo, 1, JTRC_BMP_MAPPED, (int) biWidth, (int) biHeight); break; case 24: /* RGB image */ TRACEMS2(cinfo, 1, JTRC_BMP, (int) biWidth, (int) biHeight); break; default: ERREXIT(cinfo, JERR_BMP_BADDEPTH); break; } if (biPlanes != 1) ERREXIT(cinfo, JERR_BMP_BADPLANES); if (biCompression != 0) ERREXIT(cinfo, JERR_BMP_COMPRESSED); if (biXPelsPerMeter > 0 && biYPelsPerMeter > 0) { /* Set JFIF density parameters from the BMP data */ cinfo->X_density = (UINT16) (biXPelsPerMeter/100); /* 100 cm per meter */ cinfo->Y_density = (UINT16) (biYPelsPerMeter/100); cinfo->density_unit = 2; /* dots/cm */ } break; default: ERREXIT(cinfo, JERR_BMP_BADHEADER); break; } /* Compute distance to bitmap data --- will adjust for colormap below */ bPad = bfOffBits - (headerSize + 14); /* Read the colormap, if any */ if (mapentrysize > 0) { if (biClrUsed <= 0) biClrUsed = 256; /* assume it's 256 */ else if (biClrUsed > 256) ERREXIT(cinfo, JERR_BMP_BADCMAP); /* Allocate space to store the colormap */ source->colormap = (*cinfo->mem->alloc_sarray) ((j_common_ptr) cinfo, JPOOL_IMAGE, (JDIMENSION) biClrUsed, (JDIMENSION) 3); /* and read it from the file */ read_colormap(source, (int) biClrUsed, mapentrysize); /* account for size of colormap */ bPad -= biClrUsed * mapentrysize; } /* Skip any remaining pad bytes */ if (bPad < 0) /* incorrect bfOffBits value? */ ERREXIT(cinfo, JERR_BMP_BADHEADER); while (--bPad >= 0) { (void) read_byte(source); } /* Compute row width in file, including padding to 4-byte boundary */ if (source->bits_per_pixel == 24) row_width = (JDIMENSION) (biWidth * 3); else row_width = (JDIMENSION) biWidth; while ((row_width & 3) != 0) row_width++; source->row_width = row_width; /* Allocate space for inversion array, prepare for preload pass */ source->whole_image = (*cinfo->mem->request_virt_sarray) ((j_common_ptr) cinfo, JPOOL_IMAGE, FALSE, row_width, (JDIMENSION) biHeight, (JDIMENSION) 1); source->pub.get_pixel_rows = preload_image; if (cinfo->progress != NULL) { cd_progress_ptr progress = (cd_progress_ptr) cinfo->progress; progress->total_extra_passes++; /* count file input as separate pass */ } /* Allocate one-row buffer for returned data */ source->pub.buffer = (*cinfo->mem->alloc_sarray) ((j_common_ptr) cinfo, JPOOL_IMAGE, (JDIMENSION) (biWidth * 3), (JDIMENSION) 1); source->pub.buffer_height = 1; cinfo->in_color_space = JCS_RGB; cinfo->input_components = 3; cinfo->data_precision = 8; cinfo->image_width = (JDIMENSION) biWidth; cinfo->image_height = (JDIMENSION) biHeight; } /* * Finish up at the end of the file. */ METHODDEF void finish_input_bmp (j_compress_ptr cinfo, cjpeg_source_ptr sinfo) { /* no work */ } /* * The module selection routine for BMP format input. */ GLOBAL cjpeg_source_ptr jinit_read_bmp (j_compress_ptr cinfo) { bmp_source_ptr source; /* Create module interface object */ source = (bmp_source_ptr) (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, SIZEOF(bmp_source_struct)); source->cinfo = cinfo; /* make back link for subroutines */ /* Fill in method ptrs, except get_pixel_rows which start_input sets */ source->pub.start_input = start_input_bmp; source->pub.finish_input = finish_input_bmp; return (cjpeg_source_ptr) source; } #endif /* BMP_SUPPORTED */
gpl-2.0
Silverblade-nz/Alpha15Copy
drivers/sensorhub/brcm/bbdpl/bbd_sio.c
268
3808
/* * Copyright 2014 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, version 2, as * published by the Free Software Foundation (the "GPL"). * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * A copy of the GPL is available at * http://www.broadcom.com/licenses/GPLv2.php, or by writing to the Free * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA * 02111-1307, USA. * * The BBD (Broadcom Bridge Driver) * * tabstop = 8 */ #include "bbd_internal.h" #include "transport/bbd_engine.h" #include "transport/bbd_bridge_c.h" /** * initialize sio-specific variables * * @pbbd_dev: bbd driver pointer * * @return: 0 = success, others = failure */ void bbd_sio_init_vars(struct bbd_device *dev, int* result) { struct bbd_sio_device *p = 0; if (*result) return; p = bbd_alloc(sizeof(struct bbd_sio_device)); if (!p) { *result = -ENOMEM; return; } bbd_base_init_vars(dev, &p->base, result); if (*result) { bbd_free(p); return; } dev->bbd_ptr[BBD_MINOR_SIO] = p; p->base.name = "sio"; } struct bbd_sio_device* bbd_sio_ptr(void) { if (!gpbbd_dev) return 0; return gpbbd_dev->bbd_ptr[BBD_MINOR_SIO]; } struct bbd_base* bbd_sio_ptr_base(void) { struct bbd_sio_device *p = bbd_sio_ptr(); if (!p) return 0; return &p->base; } int bbd_sio_uninit(void) { struct bbd_sio_device* p = bbd_sio_ptr(); int freed = 0; if (p) { freed = bbd_base_uninit(&p->base); bbd_free(p); gpbbd_dev->bbd_ptr[BBD_MINOR_SIO] = 0; } return freed; } /* We are passed a pointer to the bridge * if we're active, zero otherwise * Could consider a pass-thru flag * from /dev/bbd_control for testing. */ void bbd_sio_install(struct BbdBridge* bbd) { FUNC(); } /** * open bbd driver */ int bbd_sio_open(struct inode *inode, struct file *filp) { dprint(KERN_INFO "BBD:%s()\n", __func__); if (!gpbbd_dev) return -EFAULT; filp->private_data = gpbbd_dev; return 0; } /** * close bbd driver */ int bbd_sio_release(struct inode *inode, struct file *filp) { if (!gpbbd_dev) return -EFAULT; return bbd_base_reinit(BBD_MINOR_SIO); } /** * read signal/data from bbd_list_head to user */ ssize_t bbd_sio_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos) { return bbd_read(BBD_MINOR_SIO, buf, size, true); } /** * send data from user space to external driver */ ssize_t bbd_sio_write(struct file *filp, const char __user *buf, size_t size, loff_t *ppos) { ssize_t len = bbd_write(BBD_MINOR_SIO, buf, size, true); if (len != size) return len; // if (gpbbd_dev->sio_dummy) // assume this until the sub-driver is // // ready. { struct BbdEngine* pEng = bbd_engine(); struct BbdBridge* bbd = 0; if (pEng) { bbd = &pEng->bridge; BbdBridge_SetData(bbd, buf, size); } else len = -EINVAL; } // else // { // tickle the sub-device driver to send data? // } return len; } /** * this function is for tx sio */ unsigned int bbd_sio_poll(struct file *filp, poll_table * wait) { return bbd_poll(BBD_MINOR_SIO, filp, wait); }
gpl-2.0
pboettch/linux
net/vmw_vsock/vmci_transport_notify_qstate.c
268
11508
/* * VMware vSockets Driver * * Copyright (C) 2009-2013 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include <linux/types.h> #include <linux/socket.h> #include <linux/stddef.h> #include <net/sock.h> #include "vmci_transport_notify.h" #define PKT_FIELD(vsk, field_name) \ (vmci_trans(vsk)->notify.pkt_q_state.field_name) static bool vmci_transport_notify_waiting_write(struct vsock_sock *vsk) { bool retval; u64 notify_limit; if (!PKT_FIELD(vsk, peer_waiting_write)) return false; /* When the sender blocks, we take that as a sign that the sender is * faster than the receiver. To reduce the transmit rate of the sender, * we delay the sending of the read notification by decreasing the * write_notify_window. The notification is delayed until the number of * bytes used in the queue drops below the write_notify_window. */ if (!PKT_FIELD(vsk, peer_waiting_write_detected)) { PKT_FIELD(vsk, peer_waiting_write_detected) = true; if (PKT_FIELD(vsk, write_notify_window) < PAGE_SIZE) { PKT_FIELD(vsk, write_notify_window) = PKT_FIELD(vsk, write_notify_min_window); } else { PKT_FIELD(vsk, write_notify_window) -= PAGE_SIZE; if (PKT_FIELD(vsk, write_notify_window) < PKT_FIELD(vsk, write_notify_min_window)) PKT_FIELD(vsk, write_notify_window) = PKT_FIELD(vsk, write_notify_min_window); } } notify_limit = vmci_trans(vsk)->consume_size - PKT_FIELD(vsk, write_notify_window); /* The notify_limit is used to delay notifications in the case where * flow control is enabled. Below the test is expressed in terms of * free space in the queue: if free_space > ConsumeSize - * write_notify_window then notify An alternate way of expressing this * is to rewrite the expression to use the data ready in the receive * queue: if write_notify_window > bufferReady then notify as * free_space == ConsumeSize - bufferReady. */ retval = vmci_qpair_consume_free_space(vmci_trans(vsk)->qpair) > notify_limit; if (retval) { /* Once we notify the peer, we reset the detected flag so the * next wait will again cause a decrease in the window size. */ PKT_FIELD(vsk, peer_waiting_write_detected) = false; } return retval; } static void vmci_transport_handle_read(struct sock *sk, struct vmci_transport_packet *pkt, bool bottom_half, struct sockaddr_vm *dst, struct sockaddr_vm *src) { sk->sk_write_space(sk); } static void vmci_transport_handle_wrote(struct sock *sk, struct vmci_transport_packet *pkt, bool bottom_half, struct sockaddr_vm *dst, struct sockaddr_vm *src) { sk->sk_data_ready(sk); } static void vsock_block_update_write_window(struct sock *sk) { struct vsock_sock *vsk = vsock_sk(sk); if (PKT_FIELD(vsk, write_notify_window) < vmci_trans(vsk)->consume_size) PKT_FIELD(vsk, write_notify_window) = min(PKT_FIELD(vsk, write_notify_window) + PAGE_SIZE, vmci_trans(vsk)->consume_size); } static int vmci_transport_send_read_notification(struct sock *sk) { struct vsock_sock *vsk; bool sent_read; unsigned int retries; int err; vsk = vsock_sk(sk); sent_read = false; retries = 0; err = 0; if (vmci_transport_notify_waiting_write(vsk)) { /* Notify the peer that we have read, retrying the send on * failure up to our maximum value. XXX For now we just log * the failure, but later we should schedule a work item to * handle the resend until it succeeds. That would require * keeping track of work items in the vsk and cleaning them up * upon socket close. */ while (!(vsk->peer_shutdown & RCV_SHUTDOWN) && !sent_read && retries < VMCI_TRANSPORT_MAX_DGRAM_RESENDS) { err = vmci_transport_send_read(sk); if (err >= 0) sent_read = true; retries++; } if (retries >= VMCI_TRANSPORT_MAX_DGRAM_RESENDS && !sent_read) pr_err("%p unable to send read notification to peer\n", sk); else PKT_FIELD(vsk, peer_waiting_write) = false; } return err; } static void vmci_transport_notify_pkt_socket_init(struct sock *sk) { struct vsock_sock *vsk = vsock_sk(sk); PKT_FIELD(vsk, write_notify_window) = PAGE_SIZE; PKT_FIELD(vsk, write_notify_min_window) = PAGE_SIZE; PKT_FIELD(vsk, peer_waiting_write) = false; PKT_FIELD(vsk, peer_waiting_write_detected) = false; } static void vmci_transport_notify_pkt_socket_destruct(struct vsock_sock *vsk) { PKT_FIELD(vsk, write_notify_window) = PAGE_SIZE; PKT_FIELD(vsk, write_notify_min_window) = PAGE_SIZE; PKT_FIELD(vsk, peer_waiting_write) = false; PKT_FIELD(vsk, peer_waiting_write_detected) = false; } static int vmci_transport_notify_pkt_poll_in(struct sock *sk, size_t target, bool *data_ready_now) { struct vsock_sock *vsk = vsock_sk(sk); if (vsock_stream_has_data(vsk)) { *data_ready_now = true; } else { /* We can't read right now because there is nothing in the * queue. Ask for notifications when there is something to * read. */ if (sk->sk_state == SS_CONNECTED) vsock_block_update_write_window(sk); *data_ready_now = false; } return 0; } static int vmci_transport_notify_pkt_poll_out(struct sock *sk, size_t target, bool *space_avail_now) { s64 produce_q_free_space; struct vsock_sock *vsk = vsock_sk(sk); produce_q_free_space = vsock_stream_has_space(vsk); if (produce_q_free_space > 0) { *space_avail_now = true; return 0; } else if (produce_q_free_space == 0) { /* This is a connected socket but we can't currently send data. * Nothing else to do. */ *space_avail_now = false; } return 0; } static int vmci_transport_notify_pkt_recv_init( struct sock *sk, size_t target, struct vmci_transport_recv_notify_data *data) { struct vsock_sock *vsk = vsock_sk(sk); data->consume_head = 0; data->produce_tail = 0; data->notify_on_block = false; if (PKT_FIELD(vsk, write_notify_min_window) < target + 1) { PKT_FIELD(vsk, write_notify_min_window) = target + 1; if (PKT_FIELD(vsk, write_notify_window) < PKT_FIELD(vsk, write_notify_min_window)) { /* If the current window is smaller than the new * minimal window size, we need to reevaluate whether * we need to notify the sender. If the number of ready * bytes are smaller than the new window, we need to * send a notification to the sender before we block. */ PKT_FIELD(vsk, write_notify_window) = PKT_FIELD(vsk, write_notify_min_window); data->notify_on_block = true; } } return 0; } static int vmci_transport_notify_pkt_recv_pre_block( struct sock *sk, size_t target, struct vmci_transport_recv_notify_data *data) { int err = 0; vsock_block_update_write_window(sk); if (data->notify_on_block) { err = vmci_transport_send_read_notification(sk); if (err < 0) return err; data->notify_on_block = false; } return err; } static int vmci_transport_notify_pkt_recv_post_dequeue( struct sock *sk, size_t target, ssize_t copied, bool data_read, struct vmci_transport_recv_notify_data *data) { struct vsock_sock *vsk; int err; bool was_full = false; u64 free_space; vsk = vsock_sk(sk); err = 0; if (data_read) { smp_mb(); free_space = vmci_qpair_consume_free_space(vmci_trans(vsk)->qpair); was_full = free_space == copied; if (was_full) PKT_FIELD(vsk, peer_waiting_write) = true; err = vmci_transport_send_read_notification(sk); if (err < 0) return err; /* See the comment in * vmci_transport_notify_pkt_send_post_enqueue(). */ sk->sk_data_ready(sk); } return err; } static int vmci_transport_notify_pkt_send_init( struct sock *sk, struct vmci_transport_send_notify_data *data) { data->consume_head = 0; data->produce_tail = 0; return 0; } static int vmci_transport_notify_pkt_send_post_enqueue( struct sock *sk, ssize_t written, struct vmci_transport_send_notify_data *data) { int err = 0; struct vsock_sock *vsk; bool sent_wrote = false; bool was_empty; int retries = 0; vsk = vsock_sk(sk); smp_mb(); was_empty = vmci_qpair_produce_buf_ready(vmci_trans(vsk)->qpair) == written; if (was_empty) { while (!(vsk->peer_shutdown & RCV_SHUTDOWN) && !sent_wrote && retries < VMCI_TRANSPORT_MAX_DGRAM_RESENDS) { err = vmci_transport_send_wrote(sk); if (err >= 0) sent_wrote = true; retries++; } } if (retries >= VMCI_TRANSPORT_MAX_DGRAM_RESENDS && !sent_wrote) { pr_err("%p unable to send wrote notification to peer\n", sk); return err; } return err; } static void vmci_transport_notify_pkt_handle_pkt( struct sock *sk, struct vmci_transport_packet *pkt, bool bottom_half, struct sockaddr_vm *dst, struct sockaddr_vm *src, bool *pkt_processed) { bool processed = false; switch (pkt->type) { case VMCI_TRANSPORT_PACKET_TYPE_WROTE: vmci_transport_handle_wrote(sk, pkt, bottom_half, dst, src); processed = true; break; case VMCI_TRANSPORT_PACKET_TYPE_READ: vmci_transport_handle_read(sk, pkt, bottom_half, dst, src); processed = true; break; } if (pkt_processed) *pkt_processed = processed; } static void vmci_transport_notify_pkt_process_request(struct sock *sk) { struct vsock_sock *vsk = vsock_sk(sk); PKT_FIELD(vsk, write_notify_window) = vmci_trans(vsk)->consume_size; if (vmci_trans(vsk)->consume_size < PKT_FIELD(vsk, write_notify_min_window)) PKT_FIELD(vsk, write_notify_min_window) = vmci_trans(vsk)->consume_size; } static void vmci_transport_notify_pkt_process_negotiate(struct sock *sk) { struct vsock_sock *vsk = vsock_sk(sk); PKT_FIELD(vsk, write_notify_window) = vmci_trans(vsk)->consume_size; if (vmci_trans(vsk)->consume_size < PKT_FIELD(vsk, write_notify_min_window)) PKT_FIELD(vsk, write_notify_min_window) = vmci_trans(vsk)->consume_size; } static int vmci_transport_notify_pkt_recv_pre_dequeue( struct sock *sk, size_t target, struct vmci_transport_recv_notify_data *data) { return 0; /* NOP for QState. */ } static int vmci_transport_notify_pkt_send_pre_block( struct sock *sk, struct vmci_transport_send_notify_data *data) { return 0; /* NOP for QState. */ } static int vmci_transport_notify_pkt_send_pre_enqueue( struct sock *sk, struct vmci_transport_send_notify_data *data) { return 0; /* NOP for QState. */ } /* Socket always on control packet based operations. */ const struct vmci_transport_notify_ops vmci_transport_notify_pkt_q_state_ops = { vmci_transport_notify_pkt_socket_init, vmci_transport_notify_pkt_socket_destruct, vmci_transport_notify_pkt_poll_in, vmci_transport_notify_pkt_poll_out, vmci_transport_notify_pkt_handle_pkt, vmci_transport_notify_pkt_recv_init, vmci_transport_notify_pkt_recv_pre_block, vmci_transport_notify_pkt_recv_pre_dequeue, vmci_transport_notify_pkt_recv_post_dequeue, vmci_transport_notify_pkt_send_init, vmci_transport_notify_pkt_send_pre_block, vmci_transport_notify_pkt_send_pre_enqueue, vmci_transport_notify_pkt_send_post_enqueue, vmci_transport_notify_pkt_process_request, vmci_transport_notify_pkt_process_negotiate, };
gpl-2.0
airidosas252/android_jellykernel_vee7
drivers/gpu/drm/radeon/radeon_atombios.c
524
104863
/* * Copyright 2007-8 Advanced Micro Devices, Inc. * Copyright 2008 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Dave Airlie * Alex Deucher */ #include "drmP.h" #include "radeon_drm.h" #include "radeon.h" #include "atom.h" #include "atom-bits.h" /* from radeon_encoder.c */ extern uint32_t radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, uint8_t dac); extern void radeon_link_encoder_connector(struct drm_device *dev); extern void radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t supported_device, u16 caps); /* from radeon_connector.c */ extern void radeon_add_atom_connector(struct drm_device *dev, uint32_t connector_id, uint32_t supported_device, int connector_type, struct radeon_i2c_bus_rec *i2c_bus, uint32_t igp_lane_info, uint16_t connector_object_id, struct radeon_hpd *hpd, struct radeon_router *router); /* from radeon_legacy_encoder.c */ extern void radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t supported_device); /* local */ static int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type, u16 voltage_id, u16 *voltage); union atom_supported_devices { struct _ATOM_SUPPORTED_DEVICES_INFO info; struct _ATOM_SUPPORTED_DEVICES_INFO_2 info_2; struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 info_2d1; }; static void radeon_lookup_i2c_gpio_quirks(struct radeon_device *rdev, ATOM_GPIO_I2C_ASSIGMENT *gpio, u8 index) { /* r4xx mask is technically not used by the hw, so patch in the legacy mask bits */ if ((rdev->family == CHIP_R420) || (rdev->family == CHIP_R423) || (rdev->family == CHIP_RV410)) { if ((le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0018) || (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0019) || (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x001a)) { gpio->ucClkMaskShift = 0x19; gpio->ucDataMaskShift = 0x18; } } /* some evergreen boards have bad data for this entry */ if (ASIC_IS_DCE4(rdev)) { if ((index == 7) && (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) && (gpio->sucI2cId.ucAccess == 0)) { gpio->sucI2cId.ucAccess = 0x97; gpio->ucDataMaskShift = 8; gpio->ucDataEnShift = 8; gpio->ucDataY_Shift = 8; gpio->ucDataA_Shift = 8; } } /* some DCE3 boards have bad data for this entry */ if (ASIC_IS_DCE3(rdev)) { if ((index == 4) && (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) && (gpio->sucI2cId.ucAccess == 0x94)) gpio->sucI2cId.ucAccess = 0x14; } } static struct radeon_i2c_bus_rec radeon_get_bus_rec_for_i2c_gpio(ATOM_GPIO_I2C_ASSIGMENT *gpio) { struct radeon_i2c_bus_rec i2c; memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec)); i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4; i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4; i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4; i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4; i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4; i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4; i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift); i2c.mask_data_mask = (1 << gpio->ucDataMaskShift); i2c.en_clk_mask = (1 << gpio->ucClkEnShift); i2c.en_data_mask = (1 << gpio->ucDataEnShift); i2c.y_clk_mask = (1 << gpio->ucClkY_Shift); i2c.y_data_mask = (1 << gpio->ucDataY_Shift); i2c.a_clk_mask = (1 << gpio->ucClkA_Shift); i2c.a_data_mask = (1 << gpio->ucDataA_Shift); if (gpio->sucI2cId.sbfAccess.bfHW_Capable) i2c.hw_capable = true; else i2c.hw_capable = false; if (gpio->sucI2cId.ucAccess == 0xa0) i2c.mm_i2c = true; else i2c.mm_i2c = false; i2c.i2c_id = gpio->sucI2cId.ucAccess; if (i2c.mask_clk_reg) i2c.valid = true; else i2c.valid = false; return i2c; } static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rdev, uint8_t id) { struct atom_context *ctx = rdev->mode_info.atom_context; ATOM_GPIO_I2C_ASSIGMENT *gpio; struct radeon_i2c_bus_rec i2c; int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info); struct _ATOM_GPIO_I2C_INFO *i2c_info; uint16_t data_offset, size; int i, num_indices; memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec)); i2c.valid = false; if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) { i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset); num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_GPIO_I2C_ASSIGMENT); for (i = 0; i < num_indices; i++) { gpio = &i2c_info->asGPIO_Info[i]; radeon_lookup_i2c_gpio_quirks(rdev, gpio, i); if (gpio->sucI2cId.ucAccess == id) { i2c = radeon_get_bus_rec_for_i2c_gpio(gpio); break; } } } return i2c; } void radeon_atombios_i2c_init(struct radeon_device *rdev) { struct atom_context *ctx = rdev->mode_info.atom_context; ATOM_GPIO_I2C_ASSIGMENT *gpio; struct radeon_i2c_bus_rec i2c; int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info); struct _ATOM_GPIO_I2C_INFO *i2c_info; uint16_t data_offset, size; int i, num_indices; char stmp[32]; if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) { i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset); num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_GPIO_I2C_ASSIGMENT); for (i = 0; i < num_indices; i++) { gpio = &i2c_info->asGPIO_Info[i]; radeon_lookup_i2c_gpio_quirks(rdev, gpio, i); i2c = radeon_get_bus_rec_for_i2c_gpio(gpio); if (i2c.valid) { sprintf(stmp, "0x%x", i2c.i2c_id); rdev->i2c_bus[i] = radeon_i2c_create(rdev->ddev, &i2c, stmp); } } } } static struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev, u8 id) { struct atom_context *ctx = rdev->mode_info.atom_context; struct radeon_gpio_rec gpio; int index = GetIndexIntoMasterTable(DATA, GPIO_Pin_LUT); struct _ATOM_GPIO_PIN_LUT *gpio_info; ATOM_GPIO_PIN_ASSIGNMENT *pin; u16 data_offset, size; int i, num_indices; memset(&gpio, 0, sizeof(struct radeon_gpio_rec)); gpio.valid = false; if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) { gpio_info = (struct _ATOM_GPIO_PIN_LUT *)(ctx->bios + data_offset); num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_GPIO_PIN_ASSIGNMENT); for (i = 0; i < num_indices; i++) { pin = &gpio_info->asGPIO_Pin[i]; if (id == pin->ucGPIO_ID) { gpio.id = pin->ucGPIO_ID; gpio.reg = le16_to_cpu(pin->usGpioPin_AIndex) * 4; gpio.mask = (1 << pin->ucGpioPinBitShift); gpio.valid = true; break; } } } return gpio; } static struct radeon_hpd radeon_atom_get_hpd_info_from_gpio(struct radeon_device *rdev, struct radeon_gpio_rec *gpio) { struct radeon_hpd hpd; u32 reg; memset(&hpd, 0, sizeof(struct radeon_hpd)); if (ASIC_IS_DCE6(rdev)) reg = SI_DC_GPIO_HPD_A; else if (ASIC_IS_DCE4(rdev)) reg = EVERGREEN_DC_GPIO_HPD_A; else reg = AVIVO_DC_GPIO_HPD_A; hpd.gpio = *gpio; if (gpio->reg == reg) { switch(gpio->mask) { case (1 << 0): hpd.hpd = RADEON_HPD_1; break; case (1 << 8): hpd.hpd = RADEON_HPD_2; break; case (1 << 16): hpd.hpd = RADEON_HPD_3; break; case (1 << 24): hpd.hpd = RADEON_HPD_4; break; case (1 << 26): hpd.hpd = RADEON_HPD_5; break; case (1 << 28): hpd.hpd = RADEON_HPD_6; break; default: hpd.hpd = RADEON_HPD_NONE; break; } } else hpd.hpd = RADEON_HPD_NONE; return hpd; } static bool radeon_atom_apply_quirks(struct drm_device *dev, uint32_t supported_device, int *connector_type, struct radeon_i2c_bus_rec *i2c_bus, uint16_t *line_mux, struct radeon_hpd *hpd) { /* Asus M2A-VM HDMI board lists the DVI port as HDMI */ if ((dev->pdev->device == 0x791e) && (dev->pdev->subsystem_vendor == 0x1043) && (dev->pdev->subsystem_device == 0x826d)) { if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) && (supported_device == ATOM_DEVICE_DFP3_SUPPORT)) *connector_type = DRM_MODE_CONNECTOR_DVID; } /* Asrock RS600 board lists the DVI port as HDMI */ if ((dev->pdev->device == 0x7941) && (dev->pdev->subsystem_vendor == 0x1849) && (dev->pdev->subsystem_device == 0x7941)) { if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) && (supported_device == ATOM_DEVICE_DFP3_SUPPORT)) *connector_type = DRM_MODE_CONNECTOR_DVID; } /* MSI K9A2GM V2/V3 board has no HDMI or DVI */ if ((dev->pdev->device == 0x796e) && (dev->pdev->subsystem_vendor == 0x1462) && (dev->pdev->subsystem_device == 0x7302)) { if ((supported_device == ATOM_DEVICE_DFP2_SUPPORT) || (supported_device == ATOM_DEVICE_DFP3_SUPPORT)) return false; } /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */ if ((dev->pdev->device == 0x7941) && (dev->pdev->subsystem_vendor == 0x147b) && (dev->pdev->subsystem_device == 0x2412)) { if (*connector_type == DRM_MODE_CONNECTOR_DVII) return false; } /* Falcon NW laptop lists vga ddc line for LVDS */ if ((dev->pdev->device == 0x5653) && (dev->pdev->subsystem_vendor == 0x1462) && (dev->pdev->subsystem_device == 0x0291)) { if (*connector_type == DRM_MODE_CONNECTOR_LVDS) { i2c_bus->valid = false; *line_mux = 53; } } /* HIS X1300 is DVI+VGA, not DVI+DVI */ if ((dev->pdev->device == 0x7146) && (dev->pdev->subsystem_vendor == 0x17af) && (dev->pdev->subsystem_device == 0x2058)) { if (supported_device == ATOM_DEVICE_DFP1_SUPPORT) return false; } /* Gigabyte X1300 is DVI+VGA, not DVI+DVI */ if ((dev->pdev->device == 0x7142) && (dev->pdev->subsystem_vendor == 0x1458) && (dev->pdev->subsystem_device == 0x2134)) { if (supported_device == ATOM_DEVICE_DFP1_SUPPORT) return false; } /* Funky macbooks */ if ((dev->pdev->device == 0x71C5) && (dev->pdev->subsystem_vendor == 0x106b) && (dev->pdev->subsystem_device == 0x0080)) { if ((supported_device == ATOM_DEVICE_CRT1_SUPPORT) || (supported_device == ATOM_DEVICE_DFP2_SUPPORT)) return false; if (supported_device == ATOM_DEVICE_CRT2_SUPPORT) *line_mux = 0x90; } /* mac rv630, rv730, others */ if ((supported_device == ATOM_DEVICE_TV1_SUPPORT) && (*connector_type == DRM_MODE_CONNECTOR_DVII)) { *connector_type = DRM_MODE_CONNECTOR_9PinDIN; *line_mux = CONNECTOR_7PIN_DIN_ENUM_ID1; } /* ASUS HD 3600 XT board lists the DVI port as HDMI */ if ((dev->pdev->device == 0x9598) && (dev->pdev->subsystem_vendor == 0x1043) && (dev->pdev->subsystem_device == 0x01da)) { if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) { *connector_type = DRM_MODE_CONNECTOR_DVII; } } /* ASUS HD 3600 board lists the DVI port as HDMI */ if ((dev->pdev->device == 0x9598) && (dev->pdev->subsystem_vendor == 0x1043) && (dev->pdev->subsystem_device == 0x01e4)) { if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) { *connector_type = DRM_MODE_CONNECTOR_DVII; } } /* ASUS HD 3450 board lists the DVI port as HDMI */ if ((dev->pdev->device == 0x95C5) && (dev->pdev->subsystem_vendor == 0x1043) && (dev->pdev->subsystem_device == 0x01e2)) { if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) { *connector_type = DRM_MODE_CONNECTOR_DVII; } } /* some BIOSes seem to report DAC on HDMI - usually this is a board with * HDMI + VGA reporting as HDMI */ if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) { if (supported_device & (ATOM_DEVICE_CRT_SUPPORT)) { *connector_type = DRM_MODE_CONNECTOR_VGA; *line_mux = 0; } } /* Acer laptop (Acer TravelMate 5730/5730G) has an HDMI port * on the laptop and a DVI port on the docking station and * both share the same encoder, hpd pin, and ddc line. * So while the bios table is technically correct, * we drop the DVI port here since xrandr has no concept of * encoders and will try and drive both connectors * with different crtcs which isn't possible on the hardware * side and leaves no crtcs for LVDS or VGA. */ if (((dev->pdev->device == 0x95c4) || (dev->pdev->device == 0x9591)) && (dev->pdev->subsystem_vendor == 0x1025) && (dev->pdev->subsystem_device == 0x013c)) { if ((*connector_type == DRM_MODE_CONNECTOR_DVII) && (supported_device == ATOM_DEVICE_DFP1_SUPPORT)) { /* actually it's a DVI-D port not DVI-I */ *connector_type = DRM_MODE_CONNECTOR_DVID; return false; } } /* XFX Pine Group device rv730 reports no VGA DDC lines * even though they are wired up to record 0x93 */ if ((dev->pdev->device == 0x9498) && (dev->pdev->subsystem_vendor == 0x1682) && (dev->pdev->subsystem_device == 0x2452) && (i2c_bus->valid == false) && !(supported_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))) { struct radeon_device *rdev = dev->dev_private; *i2c_bus = radeon_lookup_i2c_gpio(rdev, 0x93); } /* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */ if ((dev->pdev->device == 0x9802) && (dev->pdev->subsystem_vendor == 0x1734) && (dev->pdev->subsystem_device == 0x11bd)) { if (*connector_type == DRM_MODE_CONNECTOR_VGA) { *connector_type = DRM_MODE_CONNECTOR_DVII; *line_mux = 0x3103; } else if (*connector_type == DRM_MODE_CONNECTOR_DVID) { *connector_type = DRM_MODE_CONNECTOR_DVII; } } return true; } const int supported_devices_connector_convert[] = { DRM_MODE_CONNECTOR_Unknown, DRM_MODE_CONNECTOR_VGA, DRM_MODE_CONNECTOR_DVII, DRM_MODE_CONNECTOR_DVID, DRM_MODE_CONNECTOR_DVIA, DRM_MODE_CONNECTOR_SVIDEO, DRM_MODE_CONNECTOR_Composite, DRM_MODE_CONNECTOR_LVDS, DRM_MODE_CONNECTOR_Unknown, DRM_MODE_CONNECTOR_Unknown, DRM_MODE_CONNECTOR_HDMIA, DRM_MODE_CONNECTOR_HDMIB, DRM_MODE_CONNECTOR_Unknown, DRM_MODE_CONNECTOR_Unknown, DRM_MODE_CONNECTOR_9PinDIN, DRM_MODE_CONNECTOR_DisplayPort }; const uint16_t supported_devices_connector_object_id_convert[] = { CONNECTOR_OBJECT_ID_NONE, CONNECTOR_OBJECT_ID_VGA, CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I, /* not all boards support DL */ CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D, /* not all boards support DL */ CONNECTOR_OBJECT_ID_VGA, /* technically DVI-A */ CONNECTOR_OBJECT_ID_COMPOSITE, CONNECTOR_OBJECT_ID_SVIDEO, CONNECTOR_OBJECT_ID_LVDS, CONNECTOR_OBJECT_ID_9PIN_DIN, CONNECTOR_OBJECT_ID_9PIN_DIN, CONNECTOR_OBJECT_ID_DISPLAYPORT, CONNECTOR_OBJECT_ID_HDMI_TYPE_A, CONNECTOR_OBJECT_ID_HDMI_TYPE_B, CONNECTOR_OBJECT_ID_SVIDEO }; const int object_connector_convert[] = { DRM_MODE_CONNECTOR_Unknown, DRM_MODE_CONNECTOR_DVII, DRM_MODE_CONNECTOR_DVII, DRM_MODE_CONNECTOR_DVID, DRM_MODE_CONNECTOR_DVID, DRM_MODE_CONNECTOR_VGA, DRM_MODE_CONNECTOR_Composite, DRM_MODE_CONNECTOR_SVIDEO, DRM_MODE_CONNECTOR_Unknown, DRM_MODE_CONNECTOR_Unknown, DRM_MODE_CONNECTOR_9PinDIN, DRM_MODE_CONNECTOR_Unknown, DRM_MODE_CONNECTOR_HDMIA, DRM_MODE_CONNECTOR_HDMIB, DRM_MODE_CONNECTOR_LVDS, DRM_MODE_CONNECTOR_9PinDIN, DRM_MODE_CONNECTOR_Unknown, DRM_MODE_CONNECTOR_Unknown, DRM_MODE_CONNECTOR_Unknown, DRM_MODE_CONNECTOR_DisplayPort, DRM_MODE_CONNECTOR_eDP, DRM_MODE_CONNECTOR_Unknown }; bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) { struct radeon_device *rdev = dev->dev_private; struct radeon_mode_info *mode_info = &rdev->mode_info; struct atom_context *ctx = mode_info->atom_context; int index = GetIndexIntoMasterTable(DATA, Object_Header); u16 size, data_offset; u8 frev, crev; ATOM_CONNECTOR_OBJECT_TABLE *con_obj; ATOM_ENCODER_OBJECT_TABLE *enc_obj; ATOM_OBJECT_TABLE *router_obj; ATOM_DISPLAY_OBJECT_PATH_TABLE *path_obj; ATOM_OBJECT_HEADER *obj_header; int i, j, k, path_size, device_support; int connector_type; u16 igp_lane_info, conn_id, connector_object_id; struct radeon_i2c_bus_rec ddc_bus; struct radeon_router router; struct radeon_gpio_rec gpio; struct radeon_hpd hpd; if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset)) return false; if (crev < 2) return false; obj_header = (ATOM_OBJECT_HEADER *) (ctx->bios + data_offset); path_obj = (ATOM_DISPLAY_OBJECT_PATH_TABLE *) (ctx->bios + data_offset + le16_to_cpu(obj_header->usDisplayPathTableOffset)); con_obj = (ATOM_CONNECTOR_OBJECT_TABLE *) (ctx->bios + data_offset + le16_to_cpu(obj_header->usConnectorObjectTableOffset)); enc_obj = (ATOM_ENCODER_OBJECT_TABLE *) (ctx->bios + data_offset + le16_to_cpu(obj_header->usEncoderObjectTableOffset)); router_obj = (ATOM_OBJECT_TABLE *) (ctx->bios + data_offset + le16_to_cpu(obj_header->usRouterObjectTableOffset)); device_support = le16_to_cpu(obj_header->usDeviceSupport); path_size = 0; for (i = 0; i < path_obj->ucNumOfDispPath; i++) { uint8_t *addr = (uint8_t *) path_obj->asDispPath; ATOM_DISPLAY_OBJECT_PATH *path; addr += path_size; path = (ATOM_DISPLAY_OBJECT_PATH *) addr; path_size += le16_to_cpu(path->usSize); if (device_support & le16_to_cpu(path->usDeviceTag)) { uint8_t con_obj_id, con_obj_num, con_obj_type; con_obj_id = (le16_to_cpu(path->usConnObjectId) & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; con_obj_num = (le16_to_cpu(path->usConnObjectId) & ENUM_ID_MASK) >> ENUM_ID_SHIFT; con_obj_type = (le16_to_cpu(path->usConnObjectId) & OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT; /* TODO CV support */ if (le16_to_cpu(path->usDeviceTag) == ATOM_DEVICE_CV_SUPPORT) continue; /* IGP chips */ if ((rdev->flags & RADEON_IS_IGP) && (con_obj_id == CONNECTOR_OBJECT_ID_PCIE_CONNECTOR)) { uint16_t igp_offset = 0; ATOM_INTEGRATED_SYSTEM_INFO_V2 *igp_obj; index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); if (atom_parse_data_header(ctx, index, &size, &frev, &crev, &igp_offset)) { if (crev >= 2) { igp_obj = (ATOM_INTEGRATED_SYSTEM_INFO_V2 *) (ctx->bios + igp_offset); if (igp_obj) { uint32_t slot_config, ct; if (con_obj_num == 1) slot_config = igp_obj-> ulDDISlot1Config; else slot_config = igp_obj-> ulDDISlot2Config; ct = (slot_config >> 16) & 0xff; connector_type = object_connector_convert [ct]; connector_object_id = ct; igp_lane_info = slot_config & 0xffff; } else continue; } else continue; } else { igp_lane_info = 0; connector_type = object_connector_convert[con_obj_id]; connector_object_id = con_obj_id; } } else { igp_lane_info = 0; connector_type = object_connector_convert[con_obj_id]; connector_object_id = con_obj_id; } if (connector_type == DRM_MODE_CONNECTOR_Unknown) continue; router.ddc_valid = false; router.cd_valid = false; for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) { uint8_t grph_obj_id, grph_obj_num, grph_obj_type; grph_obj_id = (le16_to_cpu(path->usGraphicObjIds[j]) & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; grph_obj_num = (le16_to_cpu(path->usGraphicObjIds[j]) & ENUM_ID_MASK) >> ENUM_ID_SHIFT; grph_obj_type = (le16_to_cpu(path->usGraphicObjIds[j]) & OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT; if (grph_obj_type == GRAPH_OBJECT_TYPE_ENCODER) { for (k = 0; k < enc_obj->ucNumberOfObjects; k++) { u16 encoder_obj = le16_to_cpu(enc_obj->asObjects[k].usObjectID); if (le16_to_cpu(path->usGraphicObjIds[j]) == encoder_obj) { ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *) (ctx->bios + data_offset + le16_to_cpu(enc_obj->asObjects[k].usRecordOffset)); ATOM_ENCODER_CAP_RECORD *cap_record; u16 caps = 0; while (record->ucRecordSize > 0 && record->ucRecordType > 0 && record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) { switch (record->ucRecordType) { case ATOM_ENCODER_CAP_RECORD_TYPE: cap_record =(ATOM_ENCODER_CAP_RECORD *) record; caps = le16_to_cpu(cap_record->usEncoderCap); break; } record = (ATOM_COMMON_RECORD_HEADER *) ((char *)record + record->ucRecordSize); } radeon_add_atom_encoder(dev, encoder_obj, le16_to_cpu (path-> usDeviceTag), caps); } } } else if (grph_obj_type == GRAPH_OBJECT_TYPE_ROUTER) { for (k = 0; k < router_obj->ucNumberOfObjects; k++) { u16 router_obj_id = le16_to_cpu(router_obj->asObjects[k].usObjectID); if (le16_to_cpu(path->usGraphicObjIds[j]) == router_obj_id) { ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *) (ctx->bios + data_offset + le16_to_cpu(router_obj->asObjects[k].usRecordOffset)); ATOM_I2C_RECORD *i2c_record; ATOM_I2C_ID_CONFIG_ACCESS *i2c_config; ATOM_ROUTER_DDC_PATH_SELECT_RECORD *ddc_path; ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *cd_path; ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *router_src_dst_table = (ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *) (ctx->bios + data_offset + le16_to_cpu(router_obj->asObjects[k].usSrcDstTableOffset)); int enum_id; router.router_id = router_obj_id; for (enum_id = 0; enum_id < router_src_dst_table->ucNumberOfDst; enum_id++) { if (le16_to_cpu(path->usConnObjectId) == le16_to_cpu(router_src_dst_table->usDstObjectID[enum_id])) break; } while (record->ucRecordSize > 0 && record->ucRecordType > 0 && record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) { switch (record->ucRecordType) { case ATOM_I2C_RECORD_TYPE: i2c_record = (ATOM_I2C_RECORD *) record; i2c_config = (ATOM_I2C_ID_CONFIG_ACCESS *) &i2c_record->sucI2cId; router.i2c_info = radeon_lookup_i2c_gpio(rdev, i2c_config-> ucAccess); router.i2c_addr = i2c_record->ucI2CAddr >> 1; break; case ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE: ddc_path = (ATOM_ROUTER_DDC_PATH_SELECT_RECORD *) record; router.ddc_valid = true; router.ddc_mux_type = ddc_path->ucMuxType; router.ddc_mux_control_pin = ddc_path->ucMuxControlPin; router.ddc_mux_state = ddc_path->ucMuxState[enum_id]; break; case ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE: cd_path = (ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *) record; router.cd_valid = true; router.cd_mux_type = cd_path->ucMuxType; router.cd_mux_control_pin = cd_path->ucMuxControlPin; router.cd_mux_state = cd_path->ucMuxState[enum_id]; break; } record = (ATOM_COMMON_RECORD_HEADER *) ((char *)record + record->ucRecordSize); } } } } } /* look up gpio for ddc, hpd */ ddc_bus.valid = false; hpd.hpd = RADEON_HPD_NONE; if ((le16_to_cpu(path->usDeviceTag) & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) == 0) { for (j = 0; j < con_obj->ucNumberOfObjects; j++) { if (le16_to_cpu(path->usConnObjectId) == le16_to_cpu(con_obj->asObjects[j]. usObjectID)) { ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *) (ctx->bios + data_offset + le16_to_cpu(con_obj-> asObjects[j]. usRecordOffset)); ATOM_I2C_RECORD *i2c_record; ATOM_HPD_INT_RECORD *hpd_record; ATOM_I2C_ID_CONFIG_ACCESS *i2c_config; while (record->ucRecordSize > 0 && record->ucRecordType > 0 && record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) { switch (record->ucRecordType) { case ATOM_I2C_RECORD_TYPE: i2c_record = (ATOM_I2C_RECORD *) record; i2c_config = (ATOM_I2C_ID_CONFIG_ACCESS *) &i2c_record->sucI2cId; ddc_bus = radeon_lookup_i2c_gpio(rdev, i2c_config-> ucAccess); break; case ATOM_HPD_INT_RECORD_TYPE: hpd_record = (ATOM_HPD_INT_RECORD *) record; gpio = radeon_lookup_gpio(rdev, hpd_record->ucHPDIntGPIOID); hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio); hpd.plugged_state = hpd_record->ucPlugged_PinState; break; } record = (ATOM_COMMON_RECORD_HEADER *) ((char *)record + record-> ucRecordSize); } break; } } } /* needed for aux chan transactions */ ddc_bus.hpd = hpd.hpd; conn_id = le16_to_cpu(path->usConnObjectId); if (!radeon_atom_apply_quirks (dev, le16_to_cpu(path->usDeviceTag), &connector_type, &ddc_bus, &conn_id, &hpd)) continue; radeon_add_atom_connector(dev, conn_id, le16_to_cpu(path-> usDeviceTag), connector_type, &ddc_bus, igp_lane_info, connector_object_id, &hpd, &router); } } radeon_link_encoder_connector(dev); return true; } static uint16_t atombios_get_connector_object_id(struct drm_device *dev, int connector_type, uint16_t devices) { struct radeon_device *rdev = dev->dev_private; if (rdev->flags & RADEON_IS_IGP) { return supported_devices_connector_object_id_convert [connector_type]; } else if (((connector_type == DRM_MODE_CONNECTOR_DVII) || (connector_type == DRM_MODE_CONNECTOR_DVID)) && (devices & ATOM_DEVICE_DFP2_SUPPORT)) { struct radeon_mode_info *mode_info = &rdev->mode_info; struct atom_context *ctx = mode_info->atom_context; int index = GetIndexIntoMasterTable(DATA, XTMDS_Info); uint16_t size, data_offset; uint8_t frev, crev; ATOM_XTMDS_INFO *xtmds; if (atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset)) { xtmds = (ATOM_XTMDS_INFO *)(ctx->bios + data_offset); if (xtmds->ucSupportedLink & ATOM_XTMDS_SUPPORTED_DUALLINK) { if (connector_type == DRM_MODE_CONNECTOR_DVII) return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I; else return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D; } else { if (connector_type == DRM_MODE_CONNECTOR_DVII) return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I; else return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D; } } else return supported_devices_connector_object_id_convert [connector_type]; } else { return supported_devices_connector_object_id_convert [connector_type]; } } struct bios_connector { bool valid; uint16_t line_mux; uint16_t devices; int connector_type; struct radeon_i2c_bus_rec ddc_bus; struct radeon_hpd hpd; }; bool radeon_get_atom_connector_info_from_supported_devices_table(struct drm_device *dev) { struct radeon_device *rdev = dev->dev_private; struct radeon_mode_info *mode_info = &rdev->mode_info; struct atom_context *ctx = mode_info->atom_context; int index = GetIndexIntoMasterTable(DATA, SupportedDevicesInfo); uint16_t size, data_offset; uint8_t frev, crev; uint16_t device_support; uint8_t dac; union atom_supported_devices *supported_devices; int i, j, max_device; struct bios_connector *bios_connectors; size_t bc_size = sizeof(*bios_connectors) * ATOM_MAX_SUPPORTED_DEVICE; struct radeon_router router; router.ddc_valid = false; router.cd_valid = false; bios_connectors = kzalloc(bc_size, GFP_KERNEL); if (!bios_connectors) return false; if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset)) { kfree(bios_connectors); return false; } supported_devices = (union atom_supported_devices *)(ctx->bios + data_offset); device_support = le16_to_cpu(supported_devices->info.usDeviceSupport); if (frev > 1) max_device = ATOM_MAX_SUPPORTED_DEVICE; else max_device = ATOM_MAX_SUPPORTED_DEVICE_INFO; for (i = 0; i < max_device; i++) { ATOM_CONNECTOR_INFO_I2C ci = supported_devices->info.asConnInfo[i]; bios_connectors[i].valid = false; if (!(device_support & (1 << i))) { continue; } if (i == ATOM_DEVICE_CV_INDEX) { DRM_DEBUG_KMS("Skipping Component Video\n"); continue; } bios_connectors[i].connector_type = supported_devices_connector_convert[ci.sucConnectorInfo. sbfAccess. bfConnectorType]; if (bios_connectors[i].connector_type == DRM_MODE_CONNECTOR_Unknown) continue; dac = ci.sucConnectorInfo.sbfAccess.bfAssociatedDAC; bios_connectors[i].line_mux = ci.sucI2cId.ucAccess; /* give tv unique connector ids */ if (i == ATOM_DEVICE_TV1_INDEX) { bios_connectors[i].ddc_bus.valid = false; bios_connectors[i].line_mux = 50; } else if (i == ATOM_DEVICE_TV2_INDEX) { bios_connectors[i].ddc_bus.valid = false; bios_connectors[i].line_mux = 51; } else if (i == ATOM_DEVICE_CV_INDEX) { bios_connectors[i].ddc_bus.valid = false; bios_connectors[i].line_mux = 52; } else bios_connectors[i].ddc_bus = radeon_lookup_i2c_gpio(rdev, bios_connectors[i].line_mux); if ((crev > 1) && (frev > 1)) { u8 isb = supported_devices->info_2d1.asIntSrcInfo[i].ucIntSrcBitmap; switch (isb) { case 0x4: bios_connectors[i].hpd.hpd = RADEON_HPD_1; break; case 0xa: bios_connectors[i].hpd.hpd = RADEON_HPD_2; break; default: bios_connectors[i].hpd.hpd = RADEON_HPD_NONE; break; } } else { if (i == ATOM_DEVICE_DFP1_INDEX) bios_connectors[i].hpd.hpd = RADEON_HPD_1; else if (i == ATOM_DEVICE_DFP2_INDEX) bios_connectors[i].hpd.hpd = RADEON_HPD_2; else bios_connectors[i].hpd.hpd = RADEON_HPD_NONE; } /* Always set the connector type to VGA for CRT1/CRT2. if they are * shared with a DVI port, we'll pick up the DVI connector when we * merge the outputs. Some bioses incorrectly list VGA ports as DVI. */ if (i == ATOM_DEVICE_CRT1_INDEX || i == ATOM_DEVICE_CRT2_INDEX) bios_connectors[i].connector_type = DRM_MODE_CONNECTOR_VGA; if (!radeon_atom_apply_quirks (dev, (1 << i), &bios_connectors[i].connector_type, &bios_connectors[i].ddc_bus, &bios_connectors[i].line_mux, &bios_connectors[i].hpd)) continue; bios_connectors[i].valid = true; bios_connectors[i].devices = (1 << i); if (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom) radeon_add_atom_encoder(dev, radeon_get_encoder_enum(dev, (1 << i), dac), (1 << i), 0); else radeon_add_legacy_encoder(dev, radeon_get_encoder_enum(dev, (1 << i), dac), (1 << i)); } /* combine shared connectors */ for (i = 0; i < max_device; i++) { if (bios_connectors[i].valid) { for (j = 0; j < max_device; j++) { if (bios_connectors[j].valid && (i != j)) { if (bios_connectors[i].line_mux == bios_connectors[j].line_mux) { /* make sure not to combine LVDS */ if (bios_connectors[i].devices & (ATOM_DEVICE_LCD_SUPPORT)) { bios_connectors[i].line_mux = 53; bios_connectors[i].ddc_bus.valid = false; continue; } if (bios_connectors[j].devices & (ATOM_DEVICE_LCD_SUPPORT)) { bios_connectors[j].line_mux = 53; bios_connectors[j].ddc_bus.valid = false; continue; } /* combine analog and digital for DVI-I */ if (((bios_connectors[i].devices & (ATOM_DEVICE_DFP_SUPPORT)) && (bios_connectors[j].devices & (ATOM_DEVICE_CRT_SUPPORT))) || ((bios_connectors[j].devices & (ATOM_DEVICE_DFP_SUPPORT)) && (bios_connectors[i].devices & (ATOM_DEVICE_CRT_SUPPORT)))) { bios_connectors[i].devices |= bios_connectors[j].devices; bios_connectors[i].connector_type = DRM_MODE_CONNECTOR_DVII; if (bios_connectors[j].devices & (ATOM_DEVICE_DFP_SUPPORT)) bios_connectors[i].hpd = bios_connectors[j].hpd; bios_connectors[j].valid = false; } } } } } } /* add the connectors */ for (i = 0; i < max_device; i++) { if (bios_connectors[i].valid) { uint16_t connector_object_id = atombios_get_connector_object_id(dev, bios_connectors[i].connector_type, bios_connectors[i].devices); radeon_add_atom_connector(dev, bios_connectors[i].line_mux, bios_connectors[i].devices, bios_connectors[i]. connector_type, &bios_connectors[i].ddc_bus, 0, connector_object_id, &bios_connectors[i].hpd, &router); } } radeon_link_encoder_connector(dev); kfree(bios_connectors); return true; } union firmware_info { ATOM_FIRMWARE_INFO info; ATOM_FIRMWARE_INFO_V1_2 info_12; ATOM_FIRMWARE_INFO_V1_3 info_13; ATOM_FIRMWARE_INFO_V1_4 info_14; ATOM_FIRMWARE_INFO_V2_1 info_21; ATOM_FIRMWARE_INFO_V2_2 info_22; }; bool radeon_atom_get_clock_info(struct drm_device *dev) { struct radeon_device *rdev = dev->dev_private; struct radeon_mode_info *mode_info = &rdev->mode_info; int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); union firmware_info *firmware_info; uint8_t frev, crev; struct radeon_pll *p1pll = &rdev->clock.p1pll; struct radeon_pll *p2pll = &rdev->clock.p2pll; struct radeon_pll *dcpll = &rdev->clock.dcpll; struct radeon_pll *spll = &rdev->clock.spll; struct radeon_pll *mpll = &rdev->clock.mpll; uint16_t data_offset; if (atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset)) { firmware_info = (union firmware_info *)(mode_info->atom_context->bios + data_offset); /* pixel clocks */ p1pll->reference_freq = le16_to_cpu(firmware_info->info.usReferenceClock); p1pll->reference_div = 0; if (crev < 2) p1pll->pll_out_min = le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output); else p1pll->pll_out_min = le32_to_cpu(firmware_info->info_12.ulMinPixelClockPLL_Output); p1pll->pll_out_max = le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output); if (crev >= 4) { p1pll->lcd_pll_out_min = le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100; if (p1pll->lcd_pll_out_min == 0) p1pll->lcd_pll_out_min = p1pll->pll_out_min; p1pll->lcd_pll_out_max = le16_to_cpu(firmware_info->info_14.usLcdMaxPixelClockPLL_Output) * 100; if (p1pll->lcd_pll_out_max == 0) p1pll->lcd_pll_out_max = p1pll->pll_out_max; } else { p1pll->lcd_pll_out_min = p1pll->pll_out_min; p1pll->lcd_pll_out_max = p1pll->pll_out_max; } if (p1pll->pll_out_min == 0) { if (ASIC_IS_AVIVO(rdev)) p1pll->pll_out_min = 64800; else p1pll->pll_out_min = 20000; } p1pll->pll_in_min = le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Input); p1pll->pll_in_max = le16_to_cpu(firmware_info->info.usMaxPixelClockPLL_Input); *p2pll = *p1pll; /* system clock */ if (ASIC_IS_DCE4(rdev)) spll->reference_freq = le16_to_cpu(firmware_info->info_21.usCoreReferenceClock); else spll->reference_freq = le16_to_cpu(firmware_info->info.usReferenceClock); spll->reference_div = 0; spll->pll_out_min = le16_to_cpu(firmware_info->info.usMinEngineClockPLL_Output); spll->pll_out_max = le32_to_cpu(firmware_info->info.ulMaxEngineClockPLL_Output); /* ??? */ if (spll->pll_out_min == 0) { if (ASIC_IS_AVIVO(rdev)) spll->pll_out_min = 64800; else spll->pll_out_min = 20000; } spll->pll_in_min = le16_to_cpu(firmware_info->info.usMinEngineClockPLL_Input); spll->pll_in_max = le16_to_cpu(firmware_info->info.usMaxEngineClockPLL_Input); /* memory clock */ if (ASIC_IS_DCE4(rdev)) mpll->reference_freq = le16_to_cpu(firmware_info->info_21.usMemoryReferenceClock); else mpll->reference_freq = le16_to_cpu(firmware_info->info.usReferenceClock); mpll->reference_div = 0; mpll->pll_out_min = le16_to_cpu(firmware_info->info.usMinMemoryClockPLL_Output); mpll->pll_out_max = le32_to_cpu(firmware_info->info.ulMaxMemoryClockPLL_Output); /* ??? */ if (mpll->pll_out_min == 0) { if (ASIC_IS_AVIVO(rdev)) mpll->pll_out_min = 64800; else mpll->pll_out_min = 20000; } mpll->pll_in_min = le16_to_cpu(firmware_info->info.usMinMemoryClockPLL_Input); mpll->pll_in_max = le16_to_cpu(firmware_info->info.usMaxMemoryClockPLL_Input); rdev->clock.default_sclk = le32_to_cpu(firmware_info->info.ulDefaultEngineClock); rdev->clock.default_mclk = le32_to_cpu(firmware_info->info.ulDefaultMemoryClock); if (ASIC_IS_DCE4(rdev)) { rdev->clock.default_dispclk = le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq); if (rdev->clock.default_dispclk == 0) { if (ASIC_IS_DCE5(rdev)) rdev->clock.default_dispclk = 54000; /* 540 Mhz */ else rdev->clock.default_dispclk = 60000; /* 600 Mhz */ } rdev->clock.dp_extclk = le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq); } *dcpll = *p1pll; rdev->clock.max_pixel_clock = le16_to_cpu(firmware_info->info.usMaxPixelClock); if (rdev->clock.max_pixel_clock == 0) rdev->clock.max_pixel_clock = 40000; return true; } return false; } union igp_info { struct _ATOM_INTEGRATED_SYSTEM_INFO info; struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2; }; bool radeon_atombios_sideport_present(struct radeon_device *rdev) { struct radeon_mode_info *mode_info = &rdev->mode_info; int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); union igp_info *igp_info; u8 frev, crev; u16 data_offset; /* sideport is AMD only */ if (rdev->family == CHIP_RS600) return false; if (atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset)) { igp_info = (union igp_info *)(mode_info->atom_context->bios + data_offset); switch (crev) { case 1: if (le32_to_cpu(igp_info->info.ulBootUpMemoryClock)) return true; break; case 2: if (le32_to_cpu(igp_info->info_2.ulBootUpSidePortClock)) return true; break; default: DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev); break; } } return false; } bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder, struct radeon_encoder_int_tmds *tmds) { struct drm_device *dev = encoder->base.dev; struct radeon_device *rdev = dev->dev_private; struct radeon_mode_info *mode_info = &rdev->mode_info; int index = GetIndexIntoMasterTable(DATA, TMDS_Info); uint16_t data_offset; struct _ATOM_TMDS_INFO *tmds_info; uint8_t frev, crev; uint16_t maxfreq; int i; if (atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset)) { tmds_info = (struct _ATOM_TMDS_INFO *)(mode_info->atom_context->bios + data_offset); maxfreq = le16_to_cpu(tmds_info->usMaxFrequency); for (i = 0; i < 4; i++) { tmds->tmds_pll[i].freq = le16_to_cpu(tmds_info->asMiscInfo[i].usFrequency); tmds->tmds_pll[i].value = tmds_info->asMiscInfo[i].ucPLL_ChargePump & 0x3f; tmds->tmds_pll[i].value |= (tmds_info->asMiscInfo[i]. ucPLL_VCO_Gain & 0x3f) << 6; tmds->tmds_pll[i].value |= (tmds_info->asMiscInfo[i]. ucPLL_DutyCycle & 0xf) << 12; tmds->tmds_pll[i].value |= (tmds_info->asMiscInfo[i]. ucPLL_VoltageSwing & 0xf) << 16; DRM_DEBUG_KMS("TMDS PLL From ATOMBIOS %u %x\n", tmds->tmds_pll[i].freq, tmds->tmds_pll[i].value); if (maxfreq == tmds->tmds_pll[i].freq) { tmds->tmds_pll[i].freq = 0xffffffff; break; } } return true; } return false; } bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev, struct radeon_atom_ss *ss, int id) { struct radeon_mode_info *mode_info = &rdev->mode_info; int index = GetIndexIntoMasterTable(DATA, PPLL_SS_Info); uint16_t data_offset, size; struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info; uint8_t frev, crev; int i, num_indices; memset(ss, 0, sizeof(struct radeon_atom_ss)); if (atom_parse_data_header(mode_info->atom_context, index, &size, &frev, &crev, &data_offset)) { ss_info = (struct _ATOM_SPREAD_SPECTRUM_INFO *)(mode_info->atom_context->bios + data_offset); num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_SPREAD_SPECTRUM_ASSIGNMENT); for (i = 0; i < num_indices; i++) { if (ss_info->asSS_Info[i].ucSS_Id == id) { ss->percentage = le16_to_cpu(ss_info->asSS_Info[i].usSpreadSpectrumPercentage); ss->type = ss_info->asSS_Info[i].ucSpreadSpectrumType; ss->step = ss_info->asSS_Info[i].ucSS_Step; ss->delay = ss_info->asSS_Info[i].ucSS_Delay; ss->range = ss_info->asSS_Info[i].ucSS_Range; ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div; return true; } } } return false; } static void radeon_atombios_get_igp_ss_overrides(struct radeon_device *rdev, struct radeon_atom_ss *ss, int id) { struct radeon_mode_info *mode_info = &rdev->mode_info; int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); u16 data_offset, size; struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 *igp_info; u8 frev, crev; u16 percentage = 0, rate = 0; /* get any igp specific overrides */ if (atom_parse_data_header(mode_info->atom_context, index, &size, &frev, &crev, &data_offset)) { igp_info = (struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 *) (mode_info->atom_context->bios + data_offset); switch (id) { case ASIC_INTERNAL_SS_ON_TMDS: percentage = le16_to_cpu(igp_info->usDVISSPercentage); rate = le16_to_cpu(igp_info->usDVISSpreadRateIn10Hz); break; case ASIC_INTERNAL_SS_ON_HDMI: percentage = le16_to_cpu(igp_info->usHDMISSPercentage); rate = le16_to_cpu(igp_info->usHDMISSpreadRateIn10Hz); break; case ASIC_INTERNAL_SS_ON_LVDS: percentage = le16_to_cpu(igp_info->usLvdsSSPercentage); rate = le16_to_cpu(igp_info->usLvdsSSpreadRateIn10Hz); break; } if (percentage) ss->percentage = percentage; if (rate) ss->rate = rate; } } union asic_ss_info { struct _ATOM_ASIC_INTERNAL_SS_INFO info; struct _ATOM_ASIC_INTERNAL_SS_INFO_V2 info_2; struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 info_3; }; bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev, struct radeon_atom_ss *ss, int id, u32 clock) { struct radeon_mode_info *mode_info = &rdev->mode_info; int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); uint16_t data_offset, size; union asic_ss_info *ss_info; uint8_t frev, crev; int i, num_indices; memset(ss, 0, sizeof(struct radeon_atom_ss)); if (atom_parse_data_header(mode_info->atom_context, index, &size, &frev, &crev, &data_offset)) { ss_info = (union asic_ss_info *)(mode_info->atom_context->bios + data_offset); switch (frev) { case 1: num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_ASIC_SS_ASSIGNMENT); for (i = 0; i < num_indices; i++) { if ((ss_info->info.asSpreadSpectrum[i].ucClockIndication == id) && (clock <= le32_to_cpu(ss_info->info.asSpreadSpectrum[i].ulTargetClockRange))) { ss->percentage = le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadSpectrumPercentage); ss->type = ss_info->info.asSpreadSpectrum[i].ucSpreadSpectrumMode; ss->rate = le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadRateInKhz); return true; } } break; case 2: num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2); for (i = 0; i < num_indices; i++) { if ((ss_info->info_2.asSpreadSpectrum[i].ucClockIndication == id) && (clock <= le32_to_cpu(ss_info->info_2.asSpreadSpectrum[i].ulTargetClockRange))) { ss->percentage = le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadSpectrumPercentage); ss->type = ss_info->info_2.asSpreadSpectrum[i].ucSpreadSpectrumMode; ss->rate = le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadRateIn10Hz); return true; } } break; case 3: num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3); for (i = 0; i < num_indices; i++) { if ((ss_info->info_3.asSpreadSpectrum[i].ucClockIndication == id) && (clock <= le32_to_cpu(ss_info->info_3.asSpreadSpectrum[i].ulTargetClockRange))) { ss->percentage = le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage); ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode; ss->rate = le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadRateIn10Hz); if (rdev->flags & RADEON_IS_IGP) radeon_atombios_get_igp_ss_overrides(rdev, ss, id); return true; } } break; default: DRM_ERROR("Unsupported ASIC_InternalSS_Info table: %d %d\n", frev, crev); break; } } return false; } union lvds_info { struct _ATOM_LVDS_INFO info; struct _ATOM_LVDS_INFO_V12 info_12; }; struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct radeon_encoder *encoder) { struct drm_device *dev = encoder->base.dev; struct radeon_device *rdev = dev->dev_private; struct radeon_mode_info *mode_info = &rdev->mode_info; int index = GetIndexIntoMasterTable(DATA, LVDS_Info); uint16_t data_offset, misc; union lvds_info *lvds_info; uint8_t frev, crev; struct radeon_encoder_atom_dig *lvds = NULL; int encoder_enum = (encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT; if (atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset)) { lvds_info = (union lvds_info *)(mode_info->atom_context->bios + data_offset); lvds = kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL); if (!lvds) return NULL; lvds->native_mode.clock = le16_to_cpu(lvds_info->info.sLCDTiming.usPixClk) * 10; lvds->native_mode.hdisplay = le16_to_cpu(lvds_info->info.sLCDTiming.usHActive); lvds->native_mode.vdisplay = le16_to_cpu(lvds_info->info.sLCDTiming.usVActive); lvds->native_mode.htotal = lvds->native_mode.hdisplay + le16_to_cpu(lvds_info->info.sLCDTiming.usHBlanking_Time); lvds->native_mode.hsync_start = lvds->native_mode.hdisplay + le16_to_cpu(lvds_info->info.sLCDTiming.usHSyncOffset); lvds->native_mode.hsync_end = lvds->native_mode.hsync_start + le16_to_cpu(lvds_info->info.sLCDTiming.usHSyncWidth); lvds->native_mode.vtotal = lvds->native_mode.vdisplay + le16_to_cpu(lvds_info->info.sLCDTiming.usVBlanking_Time); lvds->native_mode.vsync_start = lvds->native_mode.vdisplay + le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncOffset); lvds->native_mode.vsync_end = lvds->native_mode.vsync_start + le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth); lvds->panel_pwr_delay = le16_to_cpu(lvds_info->info.usOffDelayInMs); lvds->lcd_misc = lvds_info->info.ucLVDS_Misc; misc = le16_to_cpu(lvds_info->info.sLCDTiming.susModeMiscInfo.usAccess); if (misc & ATOM_VSYNC_POLARITY) lvds->native_mode.flags |= DRM_MODE_FLAG_NVSYNC; if (misc & ATOM_HSYNC_POLARITY) lvds->native_mode.flags |= DRM_MODE_FLAG_NHSYNC; if (misc & ATOM_COMPOSITESYNC) lvds->native_mode.flags |= DRM_MODE_FLAG_CSYNC; if (misc & ATOM_INTERLACE) lvds->native_mode.flags |= DRM_MODE_FLAG_INTERLACE; if (misc & ATOM_DOUBLE_CLOCK_MODE) lvds->native_mode.flags |= DRM_MODE_FLAG_DBLSCAN; lvds->native_mode.width_mm = le16_to_cpu(lvds_info->info.sLCDTiming.usImageHSize); lvds->native_mode.height_mm = le16_to_cpu(lvds_info->info.sLCDTiming.usImageVSize); /* set crtc values */ drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V); lvds->lcd_ss_id = lvds_info->info.ucSS_Id; encoder->native_mode = lvds->native_mode; if (encoder_enum == 2) lvds->linkb = true; else lvds->linkb = false; /* parse the lcd record table */ if (le16_to_cpu(lvds_info->info.usModePatchTableOffset)) { ATOM_FAKE_EDID_PATCH_RECORD *fake_edid_record; ATOM_PANEL_RESOLUTION_PATCH_RECORD *panel_res_record; bool bad_record = false; u8 *record; if ((frev == 1) && (crev < 2)) /* absolute */ record = (u8 *)(mode_info->atom_context->bios + le16_to_cpu(lvds_info->info.usModePatchTableOffset)); else /* relative */ record = (u8 *)(mode_info->atom_context->bios + data_offset + le16_to_cpu(lvds_info->info.usModePatchTableOffset)); while (*record != ATOM_RECORD_END_TYPE) { switch (*record) { case LCD_MODE_PATCH_RECORD_MODE_TYPE: record += sizeof(ATOM_PATCH_RECORD_MODE); break; case LCD_RTS_RECORD_TYPE: record += sizeof(ATOM_LCD_RTS_RECORD); break; case LCD_CAP_RECORD_TYPE: record += sizeof(ATOM_LCD_MODE_CONTROL_CAP); break; case LCD_FAKE_EDID_PATCH_RECORD_TYPE: fake_edid_record = (ATOM_FAKE_EDID_PATCH_RECORD *)record; if (fake_edid_record->ucFakeEDIDLength) { struct edid *edid; int edid_size = max((int)EDID_LENGTH, (int)fake_edid_record->ucFakeEDIDLength); edid = kmalloc(edid_size, GFP_KERNEL); if (edid) { memcpy((u8 *)edid, (u8 *)&fake_edid_record->ucFakeEDIDString[0], fake_edid_record->ucFakeEDIDLength); if (drm_edid_is_valid(edid)) { rdev->mode_info.bios_hardcoded_edid = edid; rdev->mode_info.bios_hardcoded_edid_size = edid_size; } else kfree(edid); } } record += sizeof(ATOM_FAKE_EDID_PATCH_RECORD); break; case LCD_PANEL_RESOLUTION_RECORD_TYPE: panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record; lvds->native_mode.width_mm = panel_res_record->usHSize; lvds->native_mode.height_mm = panel_res_record->usVSize; record += sizeof(ATOM_PANEL_RESOLUTION_PATCH_RECORD); break; default: DRM_ERROR("Bad LCD record %d\n", *record); bad_record = true; break; } if (bad_record) break; } } } return lvds; } struct radeon_encoder_primary_dac * radeon_atombios_get_primary_dac_info(struct radeon_encoder *encoder) { struct drm_device *dev = encoder->base.dev; struct radeon_device *rdev = dev->dev_private; struct radeon_mode_info *mode_info = &rdev->mode_info; int index = GetIndexIntoMasterTable(DATA, CompassionateData); uint16_t data_offset; struct _COMPASSIONATE_DATA *dac_info; uint8_t frev, crev; uint8_t bg, dac; struct radeon_encoder_primary_dac *p_dac = NULL; if (atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset)) { dac_info = (struct _COMPASSIONATE_DATA *) (mode_info->atom_context->bios + data_offset); p_dac = kzalloc(sizeof(struct radeon_encoder_primary_dac), GFP_KERNEL); if (!p_dac) return NULL; bg = dac_info->ucDAC1_BG_Adjustment; dac = dac_info->ucDAC1_DAC_Adjustment; p_dac->ps2_pdac_adj = (bg << 8) | (dac); } return p_dac; } bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index, struct drm_display_mode *mode) { struct radeon_mode_info *mode_info = &rdev->mode_info; ATOM_ANALOG_TV_INFO *tv_info; ATOM_ANALOG_TV_INFO_V1_2 *tv_info_v1_2; ATOM_DTD_FORMAT *dtd_timings; int data_index = GetIndexIntoMasterTable(DATA, AnalogTV_Info); u8 frev, crev; u16 data_offset, misc; if (!atom_parse_data_header(mode_info->atom_context, data_index, NULL, &frev, &crev, &data_offset)) return false; switch (crev) { case 1: tv_info = (ATOM_ANALOG_TV_INFO *)(mode_info->atom_context->bios + data_offset); if (index >= MAX_SUPPORTED_TV_TIMING) return false; mode->crtc_htotal = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_Total); mode->crtc_hdisplay = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_Disp); mode->crtc_hsync_start = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_SyncStart); mode->crtc_hsync_end = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_SyncStart) + le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_SyncWidth); mode->crtc_vtotal = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_Total); mode->crtc_vdisplay = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_Disp); mode->crtc_vsync_start = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_SyncStart); mode->crtc_vsync_end = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_SyncStart) + le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_SyncWidth); mode->flags = 0; misc = le16_to_cpu(tv_info->aModeTimings[index].susModeMiscInfo.usAccess); if (misc & ATOM_VSYNC_POLARITY) mode->flags |= DRM_MODE_FLAG_NVSYNC; if (misc & ATOM_HSYNC_POLARITY) mode->flags |= DRM_MODE_FLAG_NHSYNC; if (misc & ATOM_COMPOSITESYNC) mode->flags |= DRM_MODE_FLAG_CSYNC; if (misc & ATOM_INTERLACE) mode->flags |= DRM_MODE_FLAG_INTERLACE; if (misc & ATOM_DOUBLE_CLOCK_MODE) mode->flags |= DRM_MODE_FLAG_DBLSCAN; mode->clock = le16_to_cpu(tv_info->aModeTimings[index].usPixelClock) * 10; if (index == 1) { /* PAL timings appear to have wrong values for totals */ mode->crtc_htotal -= 1; mode->crtc_vtotal -= 1; } break; case 2: tv_info_v1_2 = (ATOM_ANALOG_TV_INFO_V1_2 *)(mode_info->atom_context->bios + data_offset); if (index >= MAX_SUPPORTED_TV_TIMING_V1_2) return false; dtd_timings = &tv_info_v1_2->aModeTimings[index]; mode->crtc_htotal = le16_to_cpu(dtd_timings->usHActive) + le16_to_cpu(dtd_timings->usHBlanking_Time); mode->crtc_hdisplay = le16_to_cpu(dtd_timings->usHActive); mode->crtc_hsync_start = le16_to_cpu(dtd_timings->usHActive) + le16_to_cpu(dtd_timings->usHSyncOffset); mode->crtc_hsync_end = mode->crtc_hsync_start + le16_to_cpu(dtd_timings->usHSyncWidth); mode->crtc_vtotal = le16_to_cpu(dtd_timings->usVActive) + le16_to_cpu(dtd_timings->usVBlanking_Time); mode->crtc_vdisplay = le16_to_cpu(dtd_timings->usVActive); mode->crtc_vsync_start = le16_to_cpu(dtd_timings->usVActive) + le16_to_cpu(dtd_timings->usVSyncOffset); mode->crtc_vsync_end = mode->crtc_vsync_start + le16_to_cpu(dtd_timings->usVSyncWidth); mode->flags = 0; misc = le16_to_cpu(dtd_timings->susModeMiscInfo.usAccess); if (misc & ATOM_VSYNC_POLARITY) mode->flags |= DRM_MODE_FLAG_NVSYNC; if (misc & ATOM_HSYNC_POLARITY) mode->flags |= DRM_MODE_FLAG_NHSYNC; if (misc & ATOM_COMPOSITESYNC) mode->flags |= DRM_MODE_FLAG_CSYNC; if (misc & ATOM_INTERLACE) mode->flags |= DRM_MODE_FLAG_INTERLACE; if (misc & ATOM_DOUBLE_CLOCK_MODE) mode->flags |= DRM_MODE_FLAG_DBLSCAN; mode->clock = le16_to_cpu(dtd_timings->usPixClk) * 10; break; } return true; } enum radeon_tv_std radeon_atombios_get_tv_info(struct radeon_device *rdev) { struct radeon_mode_info *mode_info = &rdev->mode_info; int index = GetIndexIntoMasterTable(DATA, AnalogTV_Info); uint16_t data_offset; uint8_t frev, crev; struct _ATOM_ANALOG_TV_INFO *tv_info; enum radeon_tv_std tv_std = TV_STD_NTSC; if (atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset)) { tv_info = (struct _ATOM_ANALOG_TV_INFO *) (mode_info->atom_context->bios + data_offset); switch (tv_info->ucTV_BootUpDefaultStandard) { case ATOM_TV_NTSC: tv_std = TV_STD_NTSC; DRM_DEBUG_KMS("Default TV standard: NTSC\n"); break; case ATOM_TV_NTSCJ: tv_std = TV_STD_NTSC_J; DRM_DEBUG_KMS("Default TV standard: NTSC-J\n"); break; case ATOM_TV_PAL: tv_std = TV_STD_PAL; DRM_DEBUG_KMS("Default TV standard: PAL\n"); break; case ATOM_TV_PALM: tv_std = TV_STD_PAL_M; DRM_DEBUG_KMS("Default TV standard: PAL-M\n"); break; case ATOM_TV_PALN: tv_std = TV_STD_PAL_N; DRM_DEBUG_KMS("Default TV standard: PAL-N\n"); break; case ATOM_TV_PALCN: tv_std = TV_STD_PAL_CN; DRM_DEBUG_KMS("Default TV standard: PAL-CN\n"); break; case ATOM_TV_PAL60: tv_std = TV_STD_PAL_60; DRM_DEBUG_KMS("Default TV standard: PAL-60\n"); break; case ATOM_TV_SECAM: tv_std = TV_STD_SECAM; DRM_DEBUG_KMS("Default TV standard: SECAM\n"); break; default: tv_std = TV_STD_NTSC; DRM_DEBUG_KMS("Unknown TV standard; defaulting to NTSC\n"); break; } } return tv_std; } struct radeon_encoder_tv_dac * radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder) { struct drm_device *dev = encoder->base.dev; struct radeon_device *rdev = dev->dev_private; struct radeon_mode_info *mode_info = &rdev->mode_info; int index = GetIndexIntoMasterTable(DATA, CompassionateData); uint16_t data_offset; struct _COMPASSIONATE_DATA *dac_info; uint8_t frev, crev; uint8_t bg, dac; struct radeon_encoder_tv_dac *tv_dac = NULL; if (atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset)) { dac_info = (struct _COMPASSIONATE_DATA *) (mode_info->atom_context->bios + data_offset); tv_dac = kzalloc(sizeof(struct radeon_encoder_tv_dac), GFP_KERNEL); if (!tv_dac) return NULL; bg = dac_info->ucDAC2_CRT2_BG_Adjustment; dac = dac_info->ucDAC2_CRT2_DAC_Adjustment; tv_dac->ps2_tvdac_adj = (bg << 16) | (dac << 20); bg = dac_info->ucDAC2_PAL_BG_Adjustment; dac = dac_info->ucDAC2_PAL_DAC_Adjustment; tv_dac->pal_tvdac_adj = (bg << 16) | (dac << 20); bg = dac_info->ucDAC2_NTSC_BG_Adjustment; dac = dac_info->ucDAC2_NTSC_DAC_Adjustment; tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20); tv_dac->tv_std = radeon_atombios_get_tv_info(rdev); } return tv_dac; } static const char *thermal_controller_names[] = { "NONE", "lm63", "adm1032", "adm1030", "max6649", "lm64", "f75375", "asc7xxx", }; static const char *pp_lib_thermal_controller_names[] = { "NONE", "lm63", "adm1032", "adm1030", "max6649", "lm64", "f75375", "RV6xx", "RV770", "adt7473", "NONE", "External GPIO", "Evergreen", "emc2103", "Sumo", "Northern Islands", "Southern Islands", "lm96163", }; union power_info { struct _ATOM_POWERPLAY_INFO info; struct _ATOM_POWERPLAY_INFO_V2 info_2; struct _ATOM_POWERPLAY_INFO_V3 info_3; struct _ATOM_PPLIB_POWERPLAYTABLE pplib; struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; }; union pplib_clock_info { struct _ATOM_PPLIB_R600_CLOCK_INFO r600; struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780; struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen; struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo; struct _ATOM_PPLIB_SI_CLOCK_INFO si; }; union pplib_power_state { struct _ATOM_PPLIB_STATE v1; struct _ATOM_PPLIB_STATE_V2 v2; }; static void radeon_atombios_parse_misc_flags_1_3(struct radeon_device *rdev, int state_index, u32 misc, u32 misc2) { rdev->pm.power_state[state_index].misc = misc; rdev->pm.power_state[state_index].misc2 = misc2; /* order matters! */ if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE) rdev->pm.power_state[state_index].type = POWER_STATE_TYPE_POWERSAVE; if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE) rdev->pm.power_state[state_index].type = POWER_STATE_TYPE_BATTERY; if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE) rdev->pm.power_state[state_index].type = POWER_STATE_TYPE_BATTERY; if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN) rdev->pm.power_state[state_index].type = POWER_STATE_TYPE_BALANCED; if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) { rdev->pm.power_state[state_index].type = POWER_STATE_TYPE_PERFORMANCE; rdev->pm.power_state[state_index].flags &= ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; } if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE) rdev->pm.power_state[state_index].type = POWER_STATE_TYPE_BALANCED; if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) { rdev->pm.power_state[state_index].type = POWER_STATE_TYPE_DEFAULT; rdev->pm.default_power_state_index = state_index; rdev->pm.power_state[state_index].default_clock_mode = &rdev->pm.power_state[state_index].clock_info[0]; } else if (state_index == 0) { rdev->pm.power_state[state_index].clock_info[0].flags |= RADEON_PM_MODE_NO_DISPLAY; } } static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev) { struct radeon_mode_info *mode_info = &rdev->mode_info; u32 misc, misc2 = 0; int num_modes = 0, i; int state_index = 0; struct radeon_i2c_bus_rec i2c_bus; union power_info *power_info; int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); u16 data_offset; u8 frev, crev; if (!atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset)) return state_index; power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); /* add the i2c bus for thermal/fan chip */ if (power_info->info.ucOverdriveThermalController > 0) { DRM_INFO("Possible %s thermal controller at 0x%02x\n", thermal_controller_names[power_info->info.ucOverdriveThermalController], power_info->info.ucOverdriveControllerAddress >> 1); i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info.ucOverdriveI2cLine); rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus); if (rdev->pm.i2c_bus) { struct i2c_board_info info = { }; const char *name = thermal_controller_names[power_info->info. ucOverdriveThermalController]; info.addr = power_info->info.ucOverdriveControllerAddress >> 1; strlcpy(info.type, name, sizeof(info.type)); i2c_new_device(&rdev->pm.i2c_bus->adapter, &info); } } num_modes = power_info->info.ucNumOfPowerModeEntries; if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK) num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK; rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * num_modes, GFP_KERNEL); if (!rdev->pm.power_state) return state_index; /* last mode is usually default, array is low to high */ for (i = 0; i < num_modes; i++) { rdev->pm.power_state[state_index].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL); if (!rdev->pm.power_state[state_index].clock_info) return state_index; rdev->pm.power_state[state_index].num_clock_modes = 1; rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; switch (frev) { case 1: rdev->pm.power_state[state_index].clock_info[0].mclk = le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock); rdev->pm.power_state[state_index].clock_info[0].sclk = le16_to_cpu(power_info->info.asPowerPlayInfo[i].usEngineClock); /* skip invalid modes */ if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) continue; rdev->pm.power_state[state_index].pcie_lanes = power_info->info.asPowerPlayInfo[i].ucNumPciELanes; misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo); if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) || (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) { rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_GPIO; rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = radeon_lookup_gpio(rdev, power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex); if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH) rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = true; else rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = false; } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) { rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_VDDC; rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id = power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex; } rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; radeon_atombios_parse_misc_flags_1_3(rdev, state_index, misc, 0); state_index++; break; case 2: rdev->pm.power_state[state_index].clock_info[0].mclk = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock); rdev->pm.power_state[state_index].clock_info[0].sclk = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulEngineClock); /* skip invalid modes */ if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) continue; rdev->pm.power_state[state_index].pcie_lanes = power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes; misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo); misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2); if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) || (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) { rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_GPIO; rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = radeon_lookup_gpio(rdev, power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex); if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH) rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = true; else rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = false; } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) { rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_VDDC; rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id = power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex; } rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; radeon_atombios_parse_misc_flags_1_3(rdev, state_index, misc, misc2); state_index++; break; case 3: rdev->pm.power_state[state_index].clock_info[0].mclk = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock); rdev->pm.power_state[state_index].clock_info[0].sclk = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulEngineClock); /* skip invalid modes */ if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) continue; rdev->pm.power_state[state_index].pcie_lanes = power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes; misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo); misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2); if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) || (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) { rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_GPIO; rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = radeon_lookup_gpio(rdev, power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex); if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH) rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = true; else rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = false; } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) { rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_VDDC; rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id = power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex; if (misc2 & ATOM_PM_MISCINFO2_VDDCI_DYNAMIC_VOLTAGE_EN) { rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_enabled = true; rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_id = power_info->info_3.asPowerPlayInfo[i].ucVDDCI_VoltageDropIndex; } } rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; radeon_atombios_parse_misc_flags_1_3(rdev, state_index, misc, misc2); state_index++; break; } } /* last mode is usually default */ if (rdev->pm.default_power_state_index == -1) { rdev->pm.power_state[state_index - 1].type = POWER_STATE_TYPE_DEFAULT; rdev->pm.default_power_state_index = state_index - 1; rdev->pm.power_state[state_index - 1].default_clock_mode = &rdev->pm.power_state[state_index - 1].clock_info[0]; rdev->pm.power_state[state_index].flags &= ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; rdev->pm.power_state[state_index].misc = 0; rdev->pm.power_state[state_index].misc2 = 0; } return state_index; } static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *rdev, ATOM_PPLIB_THERMALCONTROLLER *controller) { struct radeon_i2c_bus_rec i2c_bus; /* add the i2c bus for thermal/fan chip */ if (controller->ucType > 0) { if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) { DRM_INFO("Internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); rdev->pm.int_thermal_type = THERMAL_TYPE_RV6XX; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) { DRM_INFO("Internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); rdev->pm.int_thermal_type = THERMAL_TYPE_RV770; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) { DRM_INFO("Internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); rdev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) { DRM_INFO("Internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); rdev->pm.int_thermal_type = THERMAL_TYPE_SUMO; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) { DRM_INFO("Internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); rdev->pm.int_thermal_type = THERMAL_TYPE_NI; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) { DRM_INFO("Internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); rdev->pm.int_thermal_type = THERMAL_TYPE_SI; } else if ((controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) || (controller->ucType == ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) || (controller->ucType == ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL)) { DRM_INFO("Special thermal controller config\n"); } else { DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n", pp_lib_thermal_controller_names[controller->ucType], controller->ucI2cAddress >> 1, (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine); rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus); if (rdev->pm.i2c_bus) { struct i2c_board_info info = { }; const char *name = pp_lib_thermal_controller_names[controller->ucType]; info.addr = controller->ucI2cAddress >> 1; strlcpy(info.type, name, sizeof(info.type)); i2c_new_device(&rdev->pm.i2c_bus->adapter, &info); } } } } static void radeon_atombios_get_default_voltages(struct radeon_device *rdev, u16 *vddc, u16 *vddci) { struct radeon_mode_info *mode_info = &rdev->mode_info; int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); u8 frev, crev; u16 data_offset; union firmware_info *firmware_info; *vddc = 0; *vddci = 0; if (atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset)) { firmware_info = (union firmware_info *)(mode_info->atom_context->bios + data_offset); *vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage); if ((frev == 2) && (crev >= 2)) *vddci = le16_to_cpu(firmware_info->info_22.usBootUpVDDCIVoltage); } } static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rdev, int state_index, int mode_index, struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info) { int j; u32 misc = le32_to_cpu(non_clock_info->ulCapsAndSettings); u32 misc2 = le16_to_cpu(non_clock_info->usClassification); u16 vddc, vddci; radeon_atombios_get_default_voltages(rdev, &vddc, &vddci); rdev->pm.power_state[state_index].misc = misc; rdev->pm.power_state[state_index].misc2 = misc2; rdev->pm.power_state[state_index].pcie_lanes = ((misc & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1; switch (misc2 & ATOM_PPLIB_CLASSIFICATION_UI_MASK) { case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY: rdev->pm.power_state[state_index].type = POWER_STATE_TYPE_BATTERY; break; case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED: rdev->pm.power_state[state_index].type = POWER_STATE_TYPE_BALANCED; break; case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE: rdev->pm.power_state[state_index].type = POWER_STATE_TYPE_PERFORMANCE; break; case ATOM_PPLIB_CLASSIFICATION_UI_NONE: if (misc2 & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) rdev->pm.power_state[state_index].type = POWER_STATE_TYPE_PERFORMANCE; break; } rdev->pm.power_state[state_index].flags = 0; if (misc & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) rdev->pm.power_state[state_index].flags |= RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; if (misc2 & ATOM_PPLIB_CLASSIFICATION_BOOT) { rdev->pm.power_state[state_index].type = POWER_STATE_TYPE_DEFAULT; rdev->pm.default_power_state_index = state_index; rdev->pm.power_state[state_index].default_clock_mode = &rdev->pm.power_state[state_index].clock_info[mode_index - 1]; if (ASIC_IS_DCE5(rdev) && !(rdev->flags & RADEON_IS_IGP)) { /* NI chips post without MC ucode, so default clocks are strobe mode only */ rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk; rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk; rdev->pm.default_vddc = rdev->pm.power_state[state_index].clock_info[0].voltage.voltage; rdev->pm.default_vddci = rdev->pm.power_state[state_index].clock_info[0].voltage.vddci; } else { /* patch the table values with the default slck/mclk from firmware info */ for (j = 0; j < mode_index; j++) { rdev->pm.power_state[state_index].clock_info[j].mclk = rdev->clock.default_mclk; rdev->pm.power_state[state_index].clock_info[j].sclk = rdev->clock.default_sclk; if (vddc) rdev->pm.power_state[state_index].clock_info[j].voltage.voltage = vddc; } } } } static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev, int state_index, int mode_index, union pplib_clock_info *clock_info) { u32 sclk, mclk; u16 vddc; if (rdev->flags & RADEON_IS_IGP) { if (rdev->family >= CHIP_PALM) { sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow); sclk |= clock_info->sumo.ucEngineClockHigh << 16; rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk; } else { sclk = le16_to_cpu(clock_info->rs780.usLowEngineClockLow); sclk |= clock_info->rs780.ucLowEngineClockHigh << 16; rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk; } } else if (ASIC_IS_DCE6(rdev)) { sclk = le16_to_cpu(clock_info->si.usEngineClockLow); sclk |= clock_info->si.ucEngineClockHigh << 16; mclk = le16_to_cpu(clock_info->si.usMemoryClockLow); mclk |= clock_info->si.ucMemoryClockHigh << 16; rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk; rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk; rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = VOLTAGE_SW; rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = le16_to_cpu(clock_info->si.usVDDC); rdev->pm.power_state[state_index].clock_info[mode_index].voltage.vddci = le16_to_cpu(clock_info->si.usVDDCI); } else if (ASIC_IS_DCE4(rdev)) { sclk = le16_to_cpu(clock_info->evergreen.usEngineClockLow); sclk |= clock_info->evergreen.ucEngineClockHigh << 16; mclk = le16_to_cpu(clock_info->evergreen.usMemoryClockLow); mclk |= clock_info->evergreen.ucMemoryClockHigh << 16; rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk; rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk; rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = VOLTAGE_SW; rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = le16_to_cpu(clock_info->evergreen.usVDDC); rdev->pm.power_state[state_index].clock_info[mode_index].voltage.vddci = le16_to_cpu(clock_info->evergreen.usVDDCI); } else { sclk = le16_to_cpu(clock_info->r600.usEngineClockLow); sclk |= clock_info->r600.ucEngineClockHigh << 16; mclk = le16_to_cpu(clock_info->r600.usMemoryClockLow); mclk |= clock_info->r600.ucMemoryClockHigh << 16; rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk; rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk; rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = VOLTAGE_SW; rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = le16_to_cpu(clock_info->r600.usVDDC); } /* patch up vddc if necessary */ switch (rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage) { case ATOM_VIRTUAL_VOLTAGE_ID0: case ATOM_VIRTUAL_VOLTAGE_ID1: case ATOM_VIRTUAL_VOLTAGE_ID2: case ATOM_VIRTUAL_VOLTAGE_ID3: if (radeon_atom_get_max_vddc(rdev, VOLTAGE_TYPE_VDDC, rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage, &vddc) == 0) rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = vddc; break; default: break; } if (rdev->flags & RADEON_IS_IGP) { /* skip invalid modes */ if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0) return false; } else { /* skip invalid modes */ if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) || (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)) return false; } return true; } static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev) { struct radeon_mode_info *mode_info = &rdev->mode_info; struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; union pplib_power_state *power_state; int i, j; int state_index = 0, mode_index = 0; union pplib_clock_info *clock_info; bool valid; union power_info *power_info; int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); u16 data_offset; u8 frev, crev; if (!atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset)) return state_index; power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController); rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * power_info->pplib.ucNumStates, GFP_KERNEL); if (!rdev->pm.power_state) return state_index; /* first mode is usually default, followed by low to high */ for (i = 0; i < power_info->pplib.ucNumStates; i++) { mode_index = 0; power_state = (union pplib_power_state *) (mode_info->atom_context->bios + data_offset + le16_to_cpu(power_info->pplib.usStateArrayOffset) + i * power_info->pplib.ucStateEntrySize); non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) (mode_info->atom_context->bios + data_offset + le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) + (power_state->v1.ucNonClockStateIndex * power_info->pplib.ucNonClockSize)); rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) * ((power_info->pplib.ucStateEntrySize - 1) ? (power_info->pplib.ucStateEntrySize - 1) : 1), GFP_KERNEL); if (!rdev->pm.power_state[i].clock_info) return state_index; if (power_info->pplib.ucStateEntrySize - 1) { for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) { clock_info = (union pplib_clock_info *) (mode_info->atom_context->bios + data_offset + le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) + (power_state->v1.ucClockStateIndices[j] * power_info->pplib.ucClockInfoSize)); valid = radeon_atombios_parse_pplib_clock_info(rdev, state_index, mode_index, clock_info); if (valid) mode_index++; } } else { rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk; rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk; mode_index++; } rdev->pm.power_state[state_index].num_clock_modes = mode_index; if (mode_index) { radeon_atombios_parse_pplib_non_clock_info(rdev, state_index, mode_index, non_clock_info); state_index++; } } /* if multiple clock modes, mark the lowest as no display */ for (i = 0; i < state_index; i++) { if (rdev->pm.power_state[i].num_clock_modes > 1) rdev->pm.power_state[i].clock_info[0].flags |= RADEON_PM_MODE_NO_DISPLAY; } /* first mode is usually default */ if (rdev->pm.default_power_state_index == -1) { rdev->pm.power_state[0].type = POWER_STATE_TYPE_DEFAULT; rdev->pm.default_power_state_index = 0; rdev->pm.power_state[0].default_clock_mode = &rdev->pm.power_state[0].clock_info[0]; } return state_index; } static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev) { struct radeon_mode_info *mode_info = &rdev->mode_info; struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; union pplib_power_state *power_state; int i, j, non_clock_array_index, clock_array_index; int state_index = 0, mode_index = 0; union pplib_clock_info *clock_info; struct _StateArray *state_array; struct _ClockInfoArray *clock_info_array; struct _NonClockInfoArray *non_clock_info_array; bool valid; union power_info *power_info; int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); u16 data_offset; u8 frev, crev; if (!atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset)) return state_index; power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController); state_array = (struct _StateArray *) (mode_info->atom_context->bios + data_offset + le16_to_cpu(power_info->pplib.usStateArrayOffset)); clock_info_array = (struct _ClockInfoArray *) (mode_info->atom_context->bios + data_offset + le16_to_cpu(power_info->pplib.usClockInfoArrayOffset)); non_clock_info_array = (struct _NonClockInfoArray *) (mode_info->atom_context->bios + data_offset + le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * state_array->ucNumEntries, GFP_KERNEL); if (!rdev->pm.power_state) return state_index; for (i = 0; i < state_array->ucNumEntries; i++) { mode_index = 0; power_state = (union pplib_power_state *)&state_array->states[i]; /* XXX this might be an inagua bug... */ non_clock_array_index = i; /* power_state->v2.nonClockInfoIndex */ non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) &non_clock_info_array->nonClockInfo[non_clock_array_index]; rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) * (power_state->v2.ucNumDPMLevels ? power_state->v2.ucNumDPMLevels : 1), GFP_KERNEL); if (!rdev->pm.power_state[i].clock_info) return state_index; if (power_state->v2.ucNumDPMLevels) { for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { clock_array_index = power_state->v2.clockInfoIndex[j]; /* XXX this might be an inagua bug... */ if (clock_array_index >= clock_info_array->ucNumEntries) continue; clock_info = (union pplib_clock_info *) &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; valid = radeon_atombios_parse_pplib_clock_info(rdev, state_index, mode_index, clock_info); if (valid) mode_index++; } } else { rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk; rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk; mode_index++; } rdev->pm.power_state[state_index].num_clock_modes = mode_index; if (mode_index) { radeon_atombios_parse_pplib_non_clock_info(rdev, state_index, mode_index, non_clock_info); state_index++; } } /* if multiple clock modes, mark the lowest as no display */ for (i = 0; i < state_index; i++) { if (rdev->pm.power_state[i].num_clock_modes > 1) rdev->pm.power_state[i].clock_info[0].flags |= RADEON_PM_MODE_NO_DISPLAY; } /* first mode is usually default */ if (rdev->pm.default_power_state_index == -1) { rdev->pm.power_state[0].type = POWER_STATE_TYPE_DEFAULT; rdev->pm.default_power_state_index = 0; rdev->pm.power_state[0].default_clock_mode = &rdev->pm.power_state[0].clock_info[0]; } return state_index; } void radeon_atombios_get_power_modes(struct radeon_device *rdev) { struct radeon_mode_info *mode_info = &rdev->mode_info; int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); u16 data_offset; u8 frev, crev; int state_index = 0; rdev->pm.default_power_state_index = -1; if (atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset)) { switch (frev) { case 1: case 2: case 3: state_index = radeon_atombios_parse_power_table_1_3(rdev); break; case 4: case 5: state_index = radeon_atombios_parse_power_table_4_5(rdev); break; case 6: state_index = radeon_atombios_parse_power_table_6(rdev); break; default: break; } } else { rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL); if (rdev->pm.power_state) { rdev->pm.power_state[0].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL); if (rdev->pm.power_state[0].clock_info) { /* add the default mode */ rdev->pm.power_state[state_index].type = POWER_STATE_TYPE_DEFAULT; rdev->pm.power_state[state_index].num_clock_modes = 1; rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk; rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk; rdev->pm.power_state[state_index].default_clock_mode = &rdev->pm.power_state[state_index].clock_info[0]; rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; rdev->pm.power_state[state_index].pcie_lanes = 16; rdev->pm.default_power_state_index = state_index; rdev->pm.power_state[state_index].flags = 0; state_index++; } } } rdev->pm.num_power_states = state_index; rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; rdev->pm.current_clock_mode_index = 0; if (rdev->pm.default_power_state_index >= 0) rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage; else rdev->pm.current_vddc = 0; } void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable) { DYNAMIC_CLOCK_GATING_PS_ALLOCATION args; int index = GetIndexIntoMasterTable(COMMAND, DynamicClockGating); args.ucEnable = enable; atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } uint32_t radeon_atom_get_engine_clock(struct radeon_device *rdev) { GET_ENGINE_CLOCK_PS_ALLOCATION args; int index = GetIndexIntoMasterTable(COMMAND, GetEngineClock); atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); return le32_to_cpu(args.ulReturnEngineClock); } uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev) { GET_MEMORY_CLOCK_PS_ALLOCATION args; int index = GetIndexIntoMasterTable(COMMAND, GetMemoryClock); atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); return le32_to_cpu(args.ulReturnMemoryClock); } void radeon_atom_set_engine_clock(struct radeon_device *rdev, uint32_t eng_clock) { SET_ENGINE_CLOCK_PS_ALLOCATION args; int index = GetIndexIntoMasterTable(COMMAND, SetEngineClock); args.ulTargetEngineClock = cpu_to_le32(eng_clock); /* 10 khz */ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } void radeon_atom_set_memory_clock(struct radeon_device *rdev, uint32_t mem_clock) { SET_MEMORY_CLOCK_PS_ALLOCATION args; int index = GetIndexIntoMasterTable(COMMAND, SetMemoryClock); if (rdev->flags & RADEON_IS_IGP) return; args.ulTargetMemoryClock = cpu_to_le32(mem_clock); /* 10 khz */ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } union set_voltage { struct _SET_VOLTAGE_PS_ALLOCATION alloc; struct _SET_VOLTAGE_PARAMETERS v1; struct _SET_VOLTAGE_PARAMETERS_V2 v2; struct _SET_VOLTAGE_PARAMETERS_V1_3 v3; }; void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type) { union set_voltage args; int index = GetIndexIntoMasterTable(COMMAND, SetVoltage); u8 frev, crev, volt_index = voltage_level; if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) return; /* 0xff01 is a flag rather then an actual voltage */ if (voltage_level == 0xff01) return; switch (crev) { case 1: args.v1.ucVoltageType = voltage_type; args.v1.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_ALL_SOURCE; args.v1.ucVoltageIndex = volt_index; break; case 2: args.v2.ucVoltageType = voltage_type; args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE; args.v2.usVoltageLevel = cpu_to_le16(voltage_level); break; case 3: args.v3.ucVoltageType = voltage_type; args.v3.ucVoltageMode = ATOM_SET_VOLTAGE; args.v3.usVoltageLevel = cpu_to_le16(voltage_level); break; default: DRM_ERROR("Unknown table version %d, %d\n", frev, crev); return; } atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } static int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type, u16 voltage_id, u16 *voltage) { union set_voltage args; int index = GetIndexIntoMasterTable(COMMAND, SetVoltage); u8 frev, crev; if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) return -EINVAL; switch (crev) { case 1: return -EINVAL; case 2: args.v2.ucVoltageType = SET_VOLTAGE_GET_MAX_VOLTAGE; args.v2.ucVoltageMode = 0; args.v2.usVoltageLevel = 0; atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); *voltage = le16_to_cpu(args.v2.usVoltageLevel); break; case 3: args.v3.ucVoltageType = voltage_type; args.v3.ucVoltageMode = ATOM_GET_VOLTAGE_LEVEL; args.v3.usVoltageLevel = cpu_to_le16(voltage_id); atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); *voltage = le16_to_cpu(args.v3.usVoltageLevel); break; default: DRM_ERROR("Unknown table version %d, %d\n", frev, crev); return -EINVAL; } return 0; } void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev) { struct radeon_device *rdev = dev->dev_private; uint32_t bios_2_scratch, bios_6_scratch; if (rdev->family >= CHIP_R600) { bios_2_scratch = RREG32(R600_BIOS_2_SCRATCH); bios_6_scratch = RREG32(R600_BIOS_6_SCRATCH); } else { bios_2_scratch = RREG32(RADEON_BIOS_2_SCRATCH); bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH); } /* let the bios control the backlight */ bios_2_scratch &= ~ATOM_S2_VRI_BRIGHT_ENABLE; /* tell the bios not to handle mode switching */ bios_6_scratch |= ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH; if (rdev->family >= CHIP_R600) { WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch); WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch); } else { WREG32(RADEON_BIOS_2_SCRATCH, bios_2_scratch); WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch); } } void radeon_save_bios_scratch_regs(struct radeon_device *rdev) { uint32_t scratch_reg; int i; if (rdev->family >= CHIP_R600) scratch_reg = R600_BIOS_0_SCRATCH; else scratch_reg = RADEON_BIOS_0_SCRATCH; for (i = 0; i < RADEON_BIOS_NUM_SCRATCH; i++) rdev->bios_scratch[i] = RREG32(scratch_reg + (i * 4)); } void radeon_restore_bios_scratch_regs(struct radeon_device *rdev) { uint32_t scratch_reg; int i; if (rdev->family >= CHIP_R600) scratch_reg = R600_BIOS_0_SCRATCH; else scratch_reg = RADEON_BIOS_0_SCRATCH; for (i = 0; i < RADEON_BIOS_NUM_SCRATCH; i++) WREG32(scratch_reg + (i * 4), rdev->bios_scratch[i]); } void radeon_atom_output_lock(struct drm_encoder *encoder, bool lock) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; uint32_t bios_6_scratch; if (rdev->family >= CHIP_R600) bios_6_scratch = RREG32(R600_BIOS_6_SCRATCH); else bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH); if (lock) { bios_6_scratch |= ATOM_S6_CRITICAL_STATE; bios_6_scratch &= ~ATOM_S6_ACC_MODE; } else { bios_6_scratch &= ~ATOM_S6_CRITICAL_STATE; bios_6_scratch |= ATOM_S6_ACC_MODE; } if (rdev->family >= CHIP_R600) WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch); else WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch); } /* at some point we may want to break this out into individual functions */ void radeon_atombios_connected_scratch_regs(struct drm_connector *connector, struct drm_encoder *encoder, bool connected) { struct drm_device *dev = connector->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); uint32_t bios_0_scratch, bios_3_scratch, bios_6_scratch; if (rdev->family >= CHIP_R600) { bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH); bios_3_scratch = RREG32(R600_BIOS_3_SCRATCH); bios_6_scratch = RREG32(R600_BIOS_6_SCRATCH); } else { bios_0_scratch = RREG32(RADEON_BIOS_0_SCRATCH); bios_3_scratch = RREG32(RADEON_BIOS_3_SCRATCH); bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH); } if ((radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) && (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT)) { if (connected) { DRM_DEBUG_KMS("TV1 connected\n"); bios_3_scratch |= ATOM_S3_TV1_ACTIVE; bios_6_scratch |= ATOM_S6_ACC_REQ_TV1; } else { DRM_DEBUG_KMS("TV1 disconnected\n"); bios_0_scratch &= ~ATOM_S0_TV1_MASK; bios_3_scratch &= ~ATOM_S3_TV1_ACTIVE; bios_6_scratch &= ~ATOM_S6_ACC_REQ_TV1; } } if ((radeon_encoder->devices & ATOM_DEVICE_CV_SUPPORT) && (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT)) { if (connected) { DRM_DEBUG_KMS("CV connected\n"); bios_3_scratch |= ATOM_S3_CV_ACTIVE; bios_6_scratch |= ATOM_S6_ACC_REQ_CV; } else { DRM_DEBUG_KMS("CV disconnected\n"); bios_0_scratch &= ~ATOM_S0_CV_MASK; bios_3_scratch &= ~ATOM_S3_CV_ACTIVE; bios_6_scratch &= ~ATOM_S6_ACC_REQ_CV; } } if ((radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) && (radeon_connector->devices & ATOM_DEVICE_LCD1_SUPPORT)) { if (connected) { DRM_DEBUG_KMS("LCD1 connected\n"); bios_0_scratch |= ATOM_S0_LCD1; bios_3_scratch |= ATOM_S3_LCD1_ACTIVE; bios_6_scratch |= ATOM_S6_ACC_REQ_LCD1; } else { DRM_DEBUG_KMS("LCD1 disconnected\n"); bios_0_scratch &= ~ATOM_S0_LCD1; bios_3_scratch &= ~ATOM_S3_LCD1_ACTIVE; bios_6_scratch &= ~ATOM_S6_ACC_REQ_LCD1; } } if ((radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) && (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT)) { if (connected) { DRM_DEBUG_KMS("CRT1 connected\n"); bios_0_scratch |= ATOM_S0_CRT1_COLOR; bios_3_scratch |= ATOM_S3_CRT1_ACTIVE; bios_6_scratch |= ATOM_S6_ACC_REQ_CRT1; } else { DRM_DEBUG_KMS("CRT1 disconnected\n"); bios_0_scratch &= ~ATOM_S0_CRT1_MASK; bios_3_scratch &= ~ATOM_S3_CRT1_ACTIVE; bios_6_scratch &= ~ATOM_S6_ACC_REQ_CRT1; } } if ((radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) && (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT)) { if (connected) { DRM_DEBUG_KMS("CRT2 connected\n"); bios_0_scratch |= ATOM_S0_CRT2_COLOR; bios_3_scratch |= ATOM_S3_CRT2_ACTIVE; bios_6_scratch |= ATOM_S6_ACC_REQ_CRT2; } else { DRM_DEBUG_KMS("CRT2 disconnected\n"); bios_0_scratch &= ~ATOM_S0_CRT2_MASK; bios_3_scratch &= ~ATOM_S3_CRT2_ACTIVE; bios_6_scratch &= ~ATOM_S6_ACC_REQ_CRT2; } } if ((radeon_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) && (radeon_connector->devices & ATOM_DEVICE_DFP1_SUPPORT)) { if (connected) { DRM_DEBUG_KMS("DFP1 connected\n"); bios_0_scratch |= ATOM_S0_DFP1; bios_3_scratch |= ATOM_S3_DFP1_ACTIVE; bios_6_scratch |= ATOM_S6_ACC_REQ_DFP1; } else { DRM_DEBUG_KMS("DFP1 disconnected\n"); bios_0_scratch &= ~ATOM_S0_DFP1; bios_3_scratch &= ~ATOM_S3_DFP1_ACTIVE; bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP1; } } if ((radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) && (radeon_connector->devices & ATOM_DEVICE_DFP2_SUPPORT)) { if (connected) { DRM_DEBUG_KMS("DFP2 connected\n"); bios_0_scratch |= ATOM_S0_DFP2; bios_3_scratch |= ATOM_S3_DFP2_ACTIVE; bios_6_scratch |= ATOM_S6_ACC_REQ_DFP2; } else { DRM_DEBUG_KMS("DFP2 disconnected\n"); bios_0_scratch &= ~ATOM_S0_DFP2; bios_3_scratch &= ~ATOM_S3_DFP2_ACTIVE; bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP2; } } if ((radeon_encoder->devices & ATOM_DEVICE_DFP3_SUPPORT) && (radeon_connector->devices & ATOM_DEVICE_DFP3_SUPPORT)) { if (connected) { DRM_DEBUG_KMS("DFP3 connected\n"); bios_0_scratch |= ATOM_S0_DFP3; bios_3_scratch |= ATOM_S3_DFP3_ACTIVE; bios_6_scratch |= ATOM_S6_ACC_REQ_DFP3; } else { DRM_DEBUG_KMS("DFP3 disconnected\n"); bios_0_scratch &= ~ATOM_S0_DFP3; bios_3_scratch &= ~ATOM_S3_DFP3_ACTIVE; bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP3; } } if ((radeon_encoder->devices & ATOM_DEVICE_DFP4_SUPPORT) && (radeon_connector->devices & ATOM_DEVICE_DFP4_SUPPORT)) { if (connected) { DRM_DEBUG_KMS("DFP4 connected\n"); bios_0_scratch |= ATOM_S0_DFP4; bios_3_scratch |= ATOM_S3_DFP4_ACTIVE; bios_6_scratch |= ATOM_S6_ACC_REQ_DFP4; } else { DRM_DEBUG_KMS("DFP4 disconnected\n"); bios_0_scratch &= ~ATOM_S0_DFP4; bios_3_scratch &= ~ATOM_S3_DFP4_ACTIVE; bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP4; } } if ((radeon_encoder->devices & ATOM_DEVICE_DFP5_SUPPORT) && (radeon_connector->devices & ATOM_DEVICE_DFP5_SUPPORT)) { if (connected) { DRM_DEBUG_KMS("DFP5 connected\n"); bios_0_scratch |= ATOM_S0_DFP5; bios_3_scratch |= ATOM_S3_DFP5_ACTIVE; bios_6_scratch |= ATOM_S6_ACC_REQ_DFP5; } else { DRM_DEBUG_KMS("DFP5 disconnected\n"); bios_0_scratch &= ~ATOM_S0_DFP5; bios_3_scratch &= ~ATOM_S3_DFP5_ACTIVE; bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP5; } } if ((radeon_encoder->devices & ATOM_DEVICE_DFP6_SUPPORT) && (radeon_connector->devices & ATOM_DEVICE_DFP6_SUPPORT)) { if (connected) { DRM_DEBUG_KMS("DFP6 connected\n"); bios_0_scratch |= ATOM_S0_DFP6; bios_3_scratch |= ATOM_S3_DFP6_ACTIVE; bios_6_scratch |= ATOM_S6_ACC_REQ_DFP6; } else { DRM_DEBUG_KMS("DFP6 disconnected\n"); bios_0_scratch &= ~ATOM_S0_DFP6; bios_3_scratch &= ~ATOM_S3_DFP6_ACTIVE; bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP6; } } if (rdev->family >= CHIP_R600) { WREG32(R600_BIOS_0_SCRATCH, bios_0_scratch); WREG32(R600_BIOS_3_SCRATCH, bios_3_scratch); WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch); } else { WREG32(RADEON_BIOS_0_SCRATCH, bios_0_scratch); WREG32(RADEON_BIOS_3_SCRATCH, bios_3_scratch); WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch); } } void radeon_atombios_encoder_crtc_scratch_regs(struct drm_encoder *encoder, int crtc) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); uint32_t bios_3_scratch; if (ASIC_IS_DCE4(rdev)) return; if (rdev->family >= CHIP_R600) bios_3_scratch = RREG32(R600_BIOS_3_SCRATCH); else bios_3_scratch = RREG32(RADEON_BIOS_3_SCRATCH); if (radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) { bios_3_scratch &= ~ATOM_S3_TV1_CRTC_ACTIVE; bios_3_scratch |= (crtc << 18); } if (radeon_encoder->devices & ATOM_DEVICE_CV_SUPPORT) { bios_3_scratch &= ~ATOM_S3_CV_CRTC_ACTIVE; bios_3_scratch |= (crtc << 24); } if (radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) { bios_3_scratch &= ~ATOM_S3_CRT1_CRTC_ACTIVE; bios_3_scratch |= (crtc << 16); } if (radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) { bios_3_scratch &= ~ATOM_S3_CRT2_CRTC_ACTIVE; bios_3_scratch |= (crtc << 20); } if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) { bios_3_scratch &= ~ATOM_S3_LCD1_CRTC_ACTIVE; bios_3_scratch |= (crtc << 17); } if (radeon_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) { bios_3_scratch &= ~ATOM_S3_DFP1_CRTC_ACTIVE; bios_3_scratch |= (crtc << 19); } if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) { bios_3_scratch &= ~ATOM_S3_DFP2_CRTC_ACTIVE; bios_3_scratch |= (crtc << 23); } if (radeon_encoder->devices & ATOM_DEVICE_DFP3_SUPPORT) { bios_3_scratch &= ~ATOM_S3_DFP3_CRTC_ACTIVE; bios_3_scratch |= (crtc << 25); } if (rdev->family >= CHIP_R600) WREG32(R600_BIOS_3_SCRATCH, bios_3_scratch); else WREG32(RADEON_BIOS_3_SCRATCH, bios_3_scratch); } void radeon_atombios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); uint32_t bios_2_scratch; if (ASIC_IS_DCE4(rdev)) return; if (rdev->family >= CHIP_R600) bios_2_scratch = RREG32(R600_BIOS_2_SCRATCH); else bios_2_scratch = RREG32(RADEON_BIOS_2_SCRATCH); if (radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) { if (on) bios_2_scratch &= ~ATOM_S2_TV1_DPMS_STATE; else bios_2_scratch |= ATOM_S2_TV1_DPMS_STATE; } if (radeon_encoder->devices & ATOM_DEVICE_CV_SUPPORT) { if (on) bios_2_scratch &= ~ATOM_S2_CV_DPMS_STATE; else bios_2_scratch |= ATOM_S2_CV_DPMS_STATE; } if (radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) { if (on) bios_2_scratch &= ~ATOM_S2_CRT1_DPMS_STATE; else bios_2_scratch |= ATOM_S2_CRT1_DPMS_STATE; } if (radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) { if (on) bios_2_scratch &= ~ATOM_S2_CRT2_DPMS_STATE; else bios_2_scratch |= ATOM_S2_CRT2_DPMS_STATE; } if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) { if (on) bios_2_scratch &= ~ATOM_S2_LCD1_DPMS_STATE; else bios_2_scratch |= ATOM_S2_LCD1_DPMS_STATE; } if (radeon_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) { if (on) bios_2_scratch &= ~ATOM_S2_DFP1_DPMS_STATE; else bios_2_scratch |= ATOM_S2_DFP1_DPMS_STATE; } if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) { if (on) bios_2_scratch &= ~ATOM_S2_DFP2_DPMS_STATE; else bios_2_scratch |= ATOM_S2_DFP2_DPMS_STATE; } if (radeon_encoder->devices & ATOM_DEVICE_DFP3_SUPPORT) { if (on) bios_2_scratch &= ~ATOM_S2_DFP3_DPMS_STATE; else bios_2_scratch |= ATOM_S2_DFP3_DPMS_STATE; } if (radeon_encoder->devices & ATOM_DEVICE_DFP4_SUPPORT) { if (on) bios_2_scratch &= ~ATOM_S2_DFP4_DPMS_STATE; else bios_2_scratch |= ATOM_S2_DFP4_DPMS_STATE; } if (radeon_encoder->devices & ATOM_DEVICE_DFP5_SUPPORT) { if (on) bios_2_scratch &= ~ATOM_S2_DFP5_DPMS_STATE; else bios_2_scratch |= ATOM_S2_DFP5_DPMS_STATE; } if (rdev->family >= CHIP_R600) WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch); else WREG32(RADEON_BIOS_2_SCRATCH, bios_2_scratch); }
gpl-2.0
zanezam/boeffla-kernel-samsung-n51x0
arch/arm/mach-s3c64xx/cpu.c
524
3896
/* linux/arch/arm/plat-s3c64xx/cpu.c * * Copyright 2008 Openmoko, Inc. * Copyright 2008 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * http://armlinux.simtec.co.uk/ * * S3C64XX CPU Support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/sysdev.h> #include <linux/serial_core.h> #include <linux/platform_device.h> #include <linux/io.h> #include <mach/hardware.h> #include <mach/map.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <plat/regs-serial.h> #include <plat/cpu.h> #include <plat/devs.h> #include <plat/clock.h> #include <mach/s3c6400.h> #include <mach/s3c6410.h> /* table of supported CPUs */ static const char name_s3c6400[] = "S3C6400"; static const char name_s3c6410[] = "S3C6410"; static struct cpu_table cpu_ids[] __initdata = { { .idcode = S3C6400_CPU_ID, .idmask = S3C64XX_CPU_MASK, .map_io = s3c6400_map_io, .init_clocks = s3c6400_init_clocks, .init_uarts = s3c6400_init_uarts, .init = s3c6400_init, .name = name_s3c6400, }, { .idcode = S3C6410_CPU_ID, .idmask = S3C64XX_CPU_MASK, .map_io = s3c6410_map_io, .init_clocks = s3c6410_init_clocks, .init_uarts = s3c6410_init_uarts, .init = s3c6410_init, .name = name_s3c6410, }, }; /* minimal IO mapping */ /* see notes on uart map in arch/arm/mach-s3c6400/include/mach/debug-macro.S */ #define UART_OFFS (S3C_PA_UART & 0xfffff) static struct map_desc s3c_iodesc[] __initdata = { { .virtual = (unsigned long)S3C_VA_SYS, .pfn = __phys_to_pfn(S3C64XX_PA_SYSCON), .length = SZ_4K, .type = MT_DEVICE, }, { .virtual = (unsigned long)S3C_VA_MEM, .pfn = __phys_to_pfn(S3C64XX_PA_SROM), .length = SZ_4K, .type = MT_DEVICE, }, { .virtual = (unsigned long)(S3C_VA_UART + UART_OFFS), .pfn = __phys_to_pfn(S3C_PA_UART), .length = SZ_4K, .type = MT_DEVICE, }, { .virtual = (unsigned long)VA_VIC0, .pfn = __phys_to_pfn(S3C64XX_PA_VIC0), .length = SZ_16K, .type = MT_DEVICE, }, { .virtual = (unsigned long)VA_VIC1, .pfn = __phys_to_pfn(S3C64XX_PA_VIC1), .length = SZ_16K, .type = MT_DEVICE, }, { .virtual = (unsigned long)S3C_VA_TIMER, .pfn = __phys_to_pfn(S3C_PA_TIMER), .length = SZ_16K, .type = MT_DEVICE, }, { .virtual = (unsigned long)S3C64XX_VA_GPIO, .pfn = __phys_to_pfn(S3C64XX_PA_GPIO), .length = SZ_4K, .type = MT_DEVICE, }, { .virtual = (unsigned long)S3C64XX_VA_MODEM, .pfn = __phys_to_pfn(S3C64XX_PA_MODEM), .length = SZ_4K, .type = MT_DEVICE, }, { .virtual = (unsigned long)S3C_VA_WATCHDOG, .pfn = __phys_to_pfn(S3C64XX_PA_WATCHDOG), .length = SZ_4K, .type = MT_DEVICE, }, { .virtual = (unsigned long)S3C_VA_USB_HSPHY, .pfn = __phys_to_pfn(S3C64XX_PA_USB_HSPHY), .length = SZ_1K, .type = MT_DEVICE, }, }; struct sysdev_class s3c64xx_sysclass = { .name = "s3c64xx-core", }; static struct sys_device s3c64xx_sysdev = { .cls = &s3c64xx_sysclass, }; /* uart registration process */ void __init s3c6400_common_init_uarts(struct s3c2410_uartcfg *cfg, int no) { s3c24xx_init_uartdevs("s3c6400-uart", s3c64xx_uart_resources, cfg, no); } /* read cpu identification code */ void __init s3c64xx_init_io(struct map_desc *mach_desc, int size) { /* initialise the io descriptors we need for initialisation */ iotable_init(s3c_iodesc, ARRAY_SIZE(s3c_iodesc)); iotable_init(mach_desc, size); /* detect cpu id */ s3c64xx_init_cpu(); s3c_init_cpu(samsung_cpu_id, cpu_ids, ARRAY_SIZE(cpu_ids)); } static __init int s3c64xx_sysdev_init(void) { sysdev_class_register(&s3c64xx_sysclass); return sysdev_register(&s3c64xx_sysdev); } core_initcall(s3c64xx_sysdev_init);
gpl-2.0
Arc-Team/android_kernel_htc_holiday
net/netlabel/netlabel_domainhash.c
524
23195
/* * NetLabel Domain Hash Table * * This file manages the domain hash table that NetLabel uses to determine * which network labeling protocol to use for a given domain. The NetLabel * system manages static and dynamic label mappings for network protocols such * as CIPSO and RIPSO. * * Author: Paul Moore <paul.moore@hp.com> * */ /* * (c) Copyright Hewlett-Packard Development Company, L.P., 2006, 2008 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/types.h> #include <linux/rculist.h> #include <linux/skbuff.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/audit.h> #include <linux/slab.h> #include <net/netlabel.h> #include <net/cipso_ipv4.h> #include <asm/bug.h> #include "netlabel_mgmt.h" #include "netlabel_addrlist.h" #include "netlabel_domainhash.h" #include "netlabel_user.h" struct netlbl_domhsh_tbl { struct list_head *tbl; u32 size; }; /* Domain hash table */ /* updates should be so rare that having one spinlock for the entire hash table * should be okay */ static DEFINE_SPINLOCK(netlbl_domhsh_lock); #define netlbl_domhsh_rcu_deref(p) \ rcu_dereference_check(p, rcu_read_lock_held() || \ lockdep_is_held(&netlbl_domhsh_lock)) static struct netlbl_domhsh_tbl *netlbl_domhsh = NULL; static struct netlbl_dom_map *netlbl_domhsh_def = NULL; /* * Domain Hash Table Helper Functions */ /** * netlbl_domhsh_free_entry - Frees a domain hash table entry * @entry: the entry's RCU field * * Description: * This function is designed to be used as a callback to the call_rcu() * function so that the memory allocated to a hash table entry can be released * safely. * */ static void netlbl_domhsh_free_entry(struct rcu_head *entry) { struct netlbl_dom_map *ptr; struct netlbl_af4list *iter4; struct netlbl_af4list *tmp4; #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) struct netlbl_af6list *iter6; struct netlbl_af6list *tmp6; #endif /* IPv6 */ ptr = container_of(entry, struct netlbl_dom_map, rcu); if (ptr->type == NETLBL_NLTYPE_ADDRSELECT) { netlbl_af4list_foreach_safe(iter4, tmp4, &ptr->type_def.addrsel->list4) { netlbl_af4list_remove_entry(iter4); kfree(netlbl_domhsh_addr4_entry(iter4)); } #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) netlbl_af6list_foreach_safe(iter6, tmp6, &ptr->type_def.addrsel->list6) { netlbl_af6list_remove_entry(iter6); kfree(netlbl_domhsh_addr6_entry(iter6)); } #endif /* IPv6 */ } kfree(ptr->domain); kfree(ptr); } /** * netlbl_domhsh_hash - Hashing function for the domain hash table * @domain: the domain name to hash * * Description: * This is the hashing function for the domain hash table, it returns the * correct bucket number for the domain. The caller is responsible for * ensuring that the hash table is protected with either a RCU read lock or the * hash table lock. * */ static u32 netlbl_domhsh_hash(const char *key) { u32 iter; u32 val; u32 len; /* This is taken (with slight modification) from * security/selinux/ss/symtab.c:symhash() */ for (iter = 0, val = 0, len = strlen(key); iter < len; iter++) val = (val << 4 | (val >> (8 * sizeof(u32) - 4))) ^ key[iter]; return val & (netlbl_domhsh_rcu_deref(netlbl_domhsh)->size - 1); } /** * netlbl_domhsh_search - Search for a domain entry * @domain: the domain * * Description: * Searches the domain hash table and returns a pointer to the hash table * entry if found, otherwise NULL is returned. The caller is responsible for * ensuring that the hash table is protected with either a RCU read lock or the * hash table lock. * */ static struct netlbl_dom_map *netlbl_domhsh_search(const char *domain) { u32 bkt; struct list_head *bkt_list; struct netlbl_dom_map *iter; if (domain != NULL) { bkt = netlbl_domhsh_hash(domain); bkt_list = &netlbl_domhsh_rcu_deref(netlbl_domhsh)->tbl[bkt]; list_for_each_entry_rcu(iter, bkt_list, list) if (iter->valid && strcmp(iter->domain, domain) == 0) return iter; } return NULL; } /** * netlbl_domhsh_search_def - Search for a domain entry * @domain: the domain * @def: return default if no match is found * * Description: * Searches the domain hash table and returns a pointer to the hash table * entry if an exact match is found, if an exact match is not present in the * hash table then the default entry is returned if valid otherwise NULL is * returned. The caller is responsible ensuring that the hash table is * protected with either a RCU read lock or the hash table lock. * */ static struct netlbl_dom_map *netlbl_domhsh_search_def(const char *domain) { struct netlbl_dom_map *entry; entry = netlbl_domhsh_search(domain); if (entry == NULL) { entry = netlbl_domhsh_rcu_deref(netlbl_domhsh_def); if (entry != NULL && !entry->valid) entry = NULL; } return entry; } /** * netlbl_domhsh_audit_add - Generate an audit entry for an add event * @entry: the entry being added * @addr4: the IPv4 address information * @addr6: the IPv6 address information * @result: the result code * @audit_info: NetLabel audit information * * Description: * Generate an audit record for adding a new NetLabel/LSM mapping entry with * the given information. Caller is responsible for holding the necessary * locks. * */ static void netlbl_domhsh_audit_add(struct netlbl_dom_map *entry, struct netlbl_af4list *addr4, struct netlbl_af6list *addr6, int result, struct netlbl_audit *audit_info) { struct audit_buffer *audit_buf; struct cipso_v4_doi *cipsov4 = NULL; u32 type; audit_buf = netlbl_audit_start_common(AUDIT_MAC_MAP_ADD, audit_info); if (audit_buf != NULL) { audit_log_format(audit_buf, " nlbl_domain=%s", entry->domain ? entry->domain : "(default)"); if (addr4 != NULL) { struct netlbl_domaddr4_map *map4; map4 = netlbl_domhsh_addr4_entry(addr4); type = map4->type; cipsov4 = map4->type_def.cipsov4; netlbl_af4list_audit_addr(audit_buf, 0, NULL, addr4->addr, addr4->mask); #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) } else if (addr6 != NULL) { struct netlbl_domaddr6_map *map6; map6 = netlbl_domhsh_addr6_entry(addr6); type = map6->type; netlbl_af6list_audit_addr(audit_buf, 0, NULL, &addr6->addr, &addr6->mask); #endif /* IPv6 */ } else { type = entry->type; cipsov4 = entry->type_def.cipsov4; } switch (type) { case NETLBL_NLTYPE_UNLABELED: audit_log_format(audit_buf, " nlbl_protocol=unlbl"); break; case NETLBL_NLTYPE_CIPSOV4: BUG_ON(cipsov4 == NULL); audit_log_format(audit_buf, " nlbl_protocol=cipsov4 cipso_doi=%u", cipsov4->doi); break; } audit_log_format(audit_buf, " res=%u", result == 0 ? 1 : 0); audit_log_end(audit_buf); } } /** * netlbl_domhsh_validate - Validate a new domain mapping entry * @entry: the entry to validate * * This function validates the new domain mapping entry to ensure that it is * a valid entry. Returns zero on success, negative values on failure. * */ static int netlbl_domhsh_validate(const struct netlbl_dom_map *entry) { struct netlbl_af4list *iter4; struct netlbl_domaddr4_map *map4; #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) struct netlbl_af6list *iter6; struct netlbl_domaddr6_map *map6; #endif /* IPv6 */ if (entry == NULL) return -EINVAL; switch (entry->type) { case NETLBL_NLTYPE_UNLABELED: if (entry->type_def.cipsov4 != NULL || entry->type_def.addrsel != NULL) return -EINVAL; break; case NETLBL_NLTYPE_CIPSOV4: if (entry->type_def.cipsov4 == NULL) return -EINVAL; break; case NETLBL_NLTYPE_ADDRSELECT: netlbl_af4list_foreach(iter4, &entry->type_def.addrsel->list4) { map4 = netlbl_domhsh_addr4_entry(iter4); switch (map4->type) { case NETLBL_NLTYPE_UNLABELED: if (map4->type_def.cipsov4 != NULL) return -EINVAL; break; case NETLBL_NLTYPE_CIPSOV4: if (map4->type_def.cipsov4 == NULL) return -EINVAL; break; default: return -EINVAL; } } #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) netlbl_af6list_foreach(iter6, &entry->type_def.addrsel->list6) { map6 = netlbl_domhsh_addr6_entry(iter6); switch (map6->type) { case NETLBL_NLTYPE_UNLABELED: break; default: return -EINVAL; } } #endif /* IPv6 */ break; default: return -EINVAL; } return 0; } /* * Domain Hash Table Functions */ /** * netlbl_domhsh_init - Init for the domain hash * @size: the number of bits to use for the hash buckets * * Description: * Initializes the domain hash table, should be called only by * netlbl_user_init() during initialization. Returns zero on success, non-zero * values on error. * */ int __init netlbl_domhsh_init(u32 size) { u32 iter; struct netlbl_domhsh_tbl *hsh_tbl; if (size == 0) return -EINVAL; hsh_tbl = kmalloc(sizeof(*hsh_tbl), GFP_KERNEL); if (hsh_tbl == NULL) return -ENOMEM; hsh_tbl->size = 1 << size; hsh_tbl->tbl = kcalloc(hsh_tbl->size, sizeof(struct list_head), GFP_KERNEL); if (hsh_tbl->tbl == NULL) { kfree(hsh_tbl); return -ENOMEM; } for (iter = 0; iter < hsh_tbl->size; iter++) INIT_LIST_HEAD(&hsh_tbl->tbl[iter]); spin_lock(&netlbl_domhsh_lock); rcu_assign_pointer(netlbl_domhsh, hsh_tbl); spin_unlock(&netlbl_domhsh_lock); return 0; } /** * netlbl_domhsh_add - Adds a entry to the domain hash table * @entry: the entry to add * @audit_info: NetLabel audit information * * Description: * Adds a new entry to the domain hash table and handles any updates to the * lower level protocol handler (i.e. CIPSO). Returns zero on success, * negative on failure. * */ int netlbl_domhsh_add(struct netlbl_dom_map *entry, struct netlbl_audit *audit_info) { int ret_val = 0; struct netlbl_dom_map *entry_old; struct netlbl_af4list *iter4; struct netlbl_af4list *tmp4; #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) struct netlbl_af6list *iter6; struct netlbl_af6list *tmp6; #endif /* IPv6 */ ret_val = netlbl_domhsh_validate(entry); if (ret_val != 0) return ret_val; /* XXX - we can remove this RCU read lock as the spinlock protects the * entire function, but before we do we need to fixup the * netlbl_af[4,6]list RCU functions to do "the right thing" with * respect to rcu_dereference() when only a spinlock is held. */ rcu_read_lock(); spin_lock(&netlbl_domhsh_lock); if (entry->domain != NULL) entry_old = netlbl_domhsh_search(entry->domain); else entry_old = netlbl_domhsh_search_def(entry->domain); if (entry_old == NULL) { entry->valid = 1; if (entry->domain != NULL) { u32 bkt = netlbl_domhsh_hash(entry->domain); list_add_tail_rcu(&entry->list, &rcu_dereference(netlbl_domhsh)->tbl[bkt]); } else { INIT_LIST_HEAD(&entry->list); rcu_assign_pointer(netlbl_domhsh_def, entry); } if (entry->type == NETLBL_NLTYPE_ADDRSELECT) { netlbl_af4list_foreach_rcu(iter4, &entry->type_def.addrsel->list4) netlbl_domhsh_audit_add(entry, iter4, NULL, ret_val, audit_info); #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) netlbl_af6list_foreach_rcu(iter6, &entry->type_def.addrsel->list6) netlbl_domhsh_audit_add(entry, NULL, iter6, ret_val, audit_info); #endif /* IPv6 */ } else netlbl_domhsh_audit_add(entry, NULL, NULL, ret_val, audit_info); } else if (entry_old->type == NETLBL_NLTYPE_ADDRSELECT && entry->type == NETLBL_NLTYPE_ADDRSELECT) { struct list_head *old_list4; struct list_head *old_list6; old_list4 = &entry_old->type_def.addrsel->list4; old_list6 = &entry_old->type_def.addrsel->list6; /* we only allow the addition of address selectors if all of * the selectors do not exist in the existing domain map */ netlbl_af4list_foreach_rcu(iter4, &entry->type_def.addrsel->list4) if (netlbl_af4list_search_exact(iter4->addr, iter4->mask, old_list4)) { ret_val = -EEXIST; goto add_return; } #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) netlbl_af6list_foreach_rcu(iter6, &entry->type_def.addrsel->list6) if (netlbl_af6list_search_exact(&iter6->addr, &iter6->mask, old_list6)) { ret_val = -EEXIST; goto add_return; } #endif /* IPv6 */ netlbl_af4list_foreach_safe(iter4, tmp4, &entry->type_def.addrsel->list4) { netlbl_af4list_remove_entry(iter4); iter4->valid = 1; ret_val = netlbl_af4list_add(iter4, old_list4); netlbl_domhsh_audit_add(entry_old, iter4, NULL, ret_val, audit_info); if (ret_val != 0) goto add_return; } #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) netlbl_af6list_foreach_safe(iter6, tmp6, &entry->type_def.addrsel->list6) { netlbl_af6list_remove_entry(iter6); iter6->valid = 1; ret_val = netlbl_af6list_add(iter6, old_list6); netlbl_domhsh_audit_add(entry_old, NULL, iter6, ret_val, audit_info); if (ret_val != 0) goto add_return; } #endif /* IPv6 */ } else ret_val = -EINVAL; add_return: spin_unlock(&netlbl_domhsh_lock); rcu_read_unlock(); return ret_val; } /** * netlbl_domhsh_add_default - Adds the default entry to the domain hash table * @entry: the entry to add * @audit_info: NetLabel audit information * * Description: * Adds a new default entry to the domain hash table and handles any updates * to the lower level protocol handler (i.e. CIPSO). Returns zero on success, * negative on failure. * */ int netlbl_domhsh_add_default(struct netlbl_dom_map *entry, struct netlbl_audit *audit_info) { return netlbl_domhsh_add(entry, audit_info); } /** * netlbl_domhsh_remove_entry - Removes a given entry from the domain table * @entry: the entry to remove * @audit_info: NetLabel audit information * * Description: * Removes an entry from the domain hash table and handles any updates to the * lower level protocol handler (i.e. CIPSO). Caller is responsible for * ensuring that the RCU read lock is held. Returns zero on success, negative * on failure. * */ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry, struct netlbl_audit *audit_info) { int ret_val = 0; struct audit_buffer *audit_buf; if (entry == NULL) return -ENOENT; spin_lock(&netlbl_domhsh_lock); if (entry->valid) { entry->valid = 0; if (entry != rcu_dereference(netlbl_domhsh_def)) list_del_rcu(&entry->list); else rcu_assign_pointer(netlbl_domhsh_def, NULL); } else ret_val = -ENOENT; spin_unlock(&netlbl_domhsh_lock); audit_buf = netlbl_audit_start_common(AUDIT_MAC_MAP_DEL, audit_info); if (audit_buf != NULL) { audit_log_format(audit_buf, " nlbl_domain=%s res=%u", entry->domain ? entry->domain : "(default)", ret_val == 0 ? 1 : 0); audit_log_end(audit_buf); } if (ret_val == 0) { struct netlbl_af4list *iter4; struct netlbl_domaddr4_map *map4; switch (entry->type) { case NETLBL_NLTYPE_ADDRSELECT: netlbl_af4list_foreach_rcu(iter4, &entry->type_def.addrsel->list4) { map4 = netlbl_domhsh_addr4_entry(iter4); cipso_v4_doi_putdef(map4->type_def.cipsov4); } /* no need to check the IPv6 list since we currently * support only unlabeled protocols for IPv6 */ break; case NETLBL_NLTYPE_CIPSOV4: cipso_v4_doi_putdef(entry->type_def.cipsov4); break; } call_rcu(&entry->rcu, netlbl_domhsh_free_entry); } return ret_val; } /** * netlbl_domhsh_remove_af4 - Removes an address selector entry * @domain: the domain * @addr: IPv4 address * @mask: IPv4 address mask * @audit_info: NetLabel audit information * * Description: * Removes an individual address selector from a domain mapping and potentially * the entire mapping if it is empty. Returns zero on success, negative values * on failure. * */ int netlbl_domhsh_remove_af4(const char *domain, const struct in_addr *addr, const struct in_addr *mask, struct netlbl_audit *audit_info) { struct netlbl_dom_map *entry_map; struct netlbl_af4list *entry_addr; struct netlbl_af4list *iter4; #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) struct netlbl_af6list *iter6; #endif /* IPv6 */ struct netlbl_domaddr4_map *entry; rcu_read_lock(); if (domain) entry_map = netlbl_domhsh_search(domain); else entry_map = netlbl_domhsh_search_def(domain); if (entry_map == NULL || entry_map->type != NETLBL_NLTYPE_ADDRSELECT) goto remove_af4_failure; spin_lock(&netlbl_domhsh_lock); entry_addr = netlbl_af4list_remove(addr->s_addr, mask->s_addr, &entry_map->type_def.addrsel->list4); spin_unlock(&netlbl_domhsh_lock); if (entry_addr == NULL) goto remove_af4_failure; netlbl_af4list_foreach_rcu(iter4, &entry_map->type_def.addrsel->list4) goto remove_af4_single_addr; #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) netlbl_af6list_foreach_rcu(iter6, &entry_map->type_def.addrsel->list6) goto remove_af4_single_addr; #endif /* IPv6 */ /* the domain mapping is empty so remove it from the mapping table */ netlbl_domhsh_remove_entry(entry_map, audit_info); remove_af4_single_addr: rcu_read_unlock(); /* yick, we can't use call_rcu here because we don't have a rcu head * pointer but hopefully this should be a rare case so the pause * shouldn't be a problem */ synchronize_rcu(); entry = netlbl_domhsh_addr4_entry(entry_addr); cipso_v4_doi_putdef(entry->type_def.cipsov4); kfree(entry); return 0; remove_af4_failure: rcu_read_unlock(); return -ENOENT; } /** * netlbl_domhsh_remove - Removes an entry from the domain hash table * @domain: the domain to remove * @audit_info: NetLabel audit information * * Description: * Removes an entry from the domain hash table and handles any updates to the * lower level protocol handler (i.e. CIPSO). Returns zero on success, * negative on failure. * */ int netlbl_domhsh_remove(const char *domain, struct netlbl_audit *audit_info) { int ret_val; struct netlbl_dom_map *entry; rcu_read_lock(); if (domain) entry = netlbl_domhsh_search(domain); else entry = netlbl_domhsh_search_def(domain); ret_val = netlbl_domhsh_remove_entry(entry, audit_info); rcu_read_unlock(); return ret_val; } /** * netlbl_domhsh_remove_default - Removes the default entry from the table * @audit_info: NetLabel audit information * * Description: * Removes/resets the default entry for the domain hash table and handles any * updates to the lower level protocol handler (i.e. CIPSO). Returns zero on * success, non-zero on failure. * */ int netlbl_domhsh_remove_default(struct netlbl_audit *audit_info) { return netlbl_domhsh_remove(NULL, audit_info); } /** * netlbl_domhsh_getentry - Get an entry from the domain hash table * @domain: the domain name to search for * * Description: * Look through the domain hash table searching for an entry to match @domain, * return a pointer to a copy of the entry or NULL. The caller is responsible * for ensuring that rcu_read_[un]lock() is called. * */ struct netlbl_dom_map *netlbl_domhsh_getentry(const char *domain) { return netlbl_domhsh_search_def(domain); } /** * netlbl_domhsh_getentry_af4 - Get an entry from the domain hash table * @domain: the domain name to search for * @addr: the IP address to search for * * Description: * Look through the domain hash table searching for an entry to match @domain * and @addr, return a pointer to a copy of the entry or NULL. The caller is * responsible for ensuring that rcu_read_[un]lock() is called. * */ struct netlbl_domaddr4_map *netlbl_domhsh_getentry_af4(const char *domain, __be32 addr) { struct netlbl_dom_map *dom_iter; struct netlbl_af4list *addr_iter; dom_iter = netlbl_domhsh_search_def(domain); if (dom_iter == NULL) return NULL; if (dom_iter->type != NETLBL_NLTYPE_ADDRSELECT) return NULL; addr_iter = netlbl_af4list_search(addr, &dom_iter->type_def.addrsel->list4); if (addr_iter == NULL) return NULL; return netlbl_domhsh_addr4_entry(addr_iter); } #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) /** * netlbl_domhsh_getentry_af6 - Get an entry from the domain hash table * @domain: the domain name to search for * @addr: the IP address to search for * * Description: * Look through the domain hash table searching for an entry to match @domain * and @addr, return a pointer to a copy of the entry or NULL. The caller is * responsible for ensuring that rcu_read_[un]lock() is called. * */ struct netlbl_domaddr6_map *netlbl_domhsh_getentry_af6(const char *domain, const struct in6_addr *addr) { struct netlbl_dom_map *dom_iter; struct netlbl_af6list *addr_iter; dom_iter = netlbl_domhsh_search_def(domain); if (dom_iter == NULL) return NULL; if (dom_iter->type != NETLBL_NLTYPE_ADDRSELECT) return NULL; addr_iter = netlbl_af6list_search(addr, &dom_iter->type_def.addrsel->list6); if (addr_iter == NULL) return NULL; return netlbl_domhsh_addr6_entry(addr_iter); } #endif /* IPv6 */ /** * netlbl_domhsh_walk - Iterate through the domain mapping hash table * @skip_bkt: the number of buckets to skip at the start * @skip_chain: the number of entries to skip in the first iterated bucket * @callback: callback for each entry * @cb_arg: argument for the callback function * * Description: * Interate over the domain mapping hash table, skipping the first @skip_bkt * buckets and @skip_chain entries. For each entry in the table call * @callback, if @callback returns a negative value stop 'walking' through the * table and return. Updates the values in @skip_bkt and @skip_chain on * return. Returns zero on success, negative values on failure. * */ int netlbl_domhsh_walk(u32 *skip_bkt, u32 *skip_chain, int (*callback) (struct netlbl_dom_map *entry, void *arg), void *cb_arg) { int ret_val = -ENOENT; u32 iter_bkt; struct list_head *iter_list; struct netlbl_dom_map *iter_entry; u32 chain_cnt = 0; rcu_read_lock(); for (iter_bkt = *skip_bkt; iter_bkt < rcu_dereference(netlbl_domhsh)->size; iter_bkt++, chain_cnt = 0) { iter_list = &rcu_dereference(netlbl_domhsh)->tbl[iter_bkt]; list_for_each_entry_rcu(iter_entry, iter_list, list) if (iter_entry->valid) { if (chain_cnt++ < *skip_chain) continue; ret_val = callback(iter_entry, cb_arg); if (ret_val < 0) { chain_cnt--; goto walk_return; } } } walk_return: rcu_read_unlock(); *skip_bkt = iter_bkt; *skip_chain = chain_cnt; return ret_val; }
gpl-2.0
cnxsoft/telechips-linux
fs/cifs/dir.c
780
20277
/* * fs/cifs/dir.c * * vfs operations that deal with dentries * * Copyright (C) International Business Machines Corp., 2002,2009 * Author(s): Steve French (sfrench@us.ibm.com) * * This library is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation; either version 2.1 of the License, or * (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/fs.h> #include <linux/stat.h> #include <linux/slab.h> #include <linux/namei.h> #include <linux/mount.h> #include <linux/file.h> #include "cifsfs.h" #include "cifspdu.h" #include "cifsglob.h" #include "cifsproto.h" #include "cifs_debug.h" #include "cifs_fs_sb.h" static void renew_parental_timestamps(struct dentry *direntry) { /* BB check if there is a way to get the kernel to do this or if we really need this */ do { direntry->d_time = jiffies; direntry = direntry->d_parent; } while (!IS_ROOT(direntry)); } /* Note: caller must free return buffer */ char * build_path_from_dentry(struct dentry *direntry) { struct dentry *temp; int namelen; int dfsplen; char *full_path; char dirsep; struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb); struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); unsigned seq; if (direntry == NULL) return NULL; /* not much we can do if dentry is freed and we need to reopen the file after it was closed implicitly when the server crashed */ dirsep = CIFS_DIR_SEP(cifs_sb); if (tcon->Flags & SMB_SHARE_IS_IN_DFS) dfsplen = strnlen(tcon->treeName, MAX_TREE_SIZE + 1); else dfsplen = 0; cifs_bp_rename_retry: namelen = dfsplen; seq = read_seqbegin(&rename_lock); rcu_read_lock(); for (temp = direntry; !IS_ROOT(temp);) { namelen += (1 + temp->d_name.len); temp = temp->d_parent; if (temp == NULL) { cERROR(1, "corrupt dentry"); rcu_read_unlock(); return NULL; } } rcu_read_unlock(); full_path = kmalloc(namelen+1, GFP_KERNEL); if (full_path == NULL) return full_path; full_path[namelen] = 0; /* trailing null */ rcu_read_lock(); for (temp = direntry; !IS_ROOT(temp);) { spin_lock(&temp->d_lock); namelen -= 1 + temp->d_name.len; if (namelen < 0) { spin_unlock(&temp->d_lock); break; } else { full_path[namelen] = dirsep; strncpy(full_path + namelen + 1, temp->d_name.name, temp->d_name.len); cFYI(0, "name: %s", full_path + namelen); } spin_unlock(&temp->d_lock); temp = temp->d_parent; if (temp == NULL) { cERROR(1, "corrupt dentry"); rcu_read_unlock(); kfree(full_path); return NULL; } } rcu_read_unlock(); if (namelen != dfsplen || read_seqretry(&rename_lock, seq)) { cFYI(1, "did not end path lookup where expected. namelen=%d " "dfsplen=%d", namelen, dfsplen); /* presumably this is only possible if racing with a rename of one of the parent directories (we can not lock the dentries above us to prevent this, but retrying should be harmless) */ kfree(full_path); goto cifs_bp_rename_retry; } /* DIR_SEP already set for byte 0 / vs \ but not for subsequent slashes in prepath which currently must be entered the right way - not sure if there is an alternative since the '\' is a valid posix character so we can not switch those safely to '/' if any are found in the middle of the prepath */ /* BB test paths to Windows with '/' in the midst of prepath */ if (dfsplen) { strncpy(full_path, tcon->treeName, dfsplen); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) { int i; for (i = 0; i < dfsplen; i++) { if (full_path[i] == '\\') full_path[i] = '/'; } } } return full_path; } /* Inode operations in similar order to how they appear in Linux file fs.h */ int cifs_create(struct inode *inode, struct dentry *direntry, int mode, struct nameidata *nd) { int rc = -ENOENT; int xid; int create_options = CREATE_NOT_DIR; __u32 oplock = 0; int oflags; /* * BB below access is probably too much for mknod to request * but we have to do query and setpathinfo so requesting * less could fail (unless we want to request getatr and setatr * permissions (only). At least for POSIX we do not have to * request so much. */ int desiredAccess = GENERIC_READ | GENERIC_WRITE; __u16 fileHandle; struct cifs_sb_info *cifs_sb; struct tcon_link *tlink; struct cifs_tcon *tcon; char *full_path = NULL; FILE_ALL_INFO *buf = NULL; struct inode *newinode = NULL; int disposition = FILE_OVERWRITE_IF; xid = GetXid(); cifs_sb = CIFS_SB(inode->i_sb); tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) { FreeXid(xid); return PTR_ERR(tlink); } tcon = tlink_tcon(tlink); if (oplockEnabled) oplock = REQ_OPLOCK; if (nd && (nd->flags & LOOKUP_OPEN)) oflags = nd->intent.open.file->f_flags; else oflags = O_RDONLY | O_CREAT; full_path = build_path_from_dentry(direntry); if (full_path == NULL) { rc = -ENOMEM; goto cifs_create_out; } if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) && (CIFS_UNIX_POSIX_PATH_OPS_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))) { rc = cifs_posix_open(full_path, &newinode, inode->i_sb, mode, oflags, &oplock, &fileHandle, xid); /* EIO could indicate that (posix open) operation is not supported, despite what server claimed in capability negotiation. EREMOTE indicates DFS junction, which is not handled in posix open */ if (rc == 0) { if (newinode == NULL) /* query inode info */ goto cifs_create_get_file_info; else /* success, no need to query */ goto cifs_create_set_dentry; } else if ((rc != -EIO) && (rc != -EREMOTE) && (rc != -EOPNOTSUPP) && (rc != -EINVAL)) goto cifs_create_out; /* else fallthrough to retry, using older open call, this is case where server does not support this SMB level, and falsely claims capability (also get here for DFS case which should be rare for path not covered on files) */ } if (nd && (nd->flags & LOOKUP_OPEN)) { /* if the file is going to stay open, then we need to set the desired access properly */ desiredAccess = 0; if (OPEN_FMODE(oflags) & FMODE_READ) desiredAccess |= GENERIC_READ; /* is this too little? */ if (OPEN_FMODE(oflags) & FMODE_WRITE) desiredAccess |= GENERIC_WRITE; if ((oflags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL)) disposition = FILE_CREATE; else if ((oflags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC)) disposition = FILE_OVERWRITE_IF; else if ((oflags & O_CREAT) == O_CREAT) disposition = FILE_OPEN_IF; else cFYI(1, "Create flag not set in create function"); } /* BB add processing to set equivalent of mode - e.g. via CreateX with ACLs */ buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL); if (buf == NULL) { rc = -ENOMEM; goto cifs_create_out; } /* * if we're not using unix extensions, see if we need to set * ATTR_READONLY on the create call */ if (!tcon->unix_ext && (mode & S_IWUGO) == 0) create_options |= CREATE_OPTION_READONLY; if (tcon->ses->capabilities & CAP_NT_SMBS) rc = CIFSSMBOpen(xid, tcon, full_path, disposition, desiredAccess, create_options, &fileHandle, &oplock, buf, cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); else rc = -EIO; /* no NT SMB support fall into legacy open below */ if (rc == -EIO) { /* old server, retry the open legacy style */ rc = SMBLegacyOpen(xid, tcon, full_path, disposition, desiredAccess, create_options, &fileHandle, &oplock, buf, cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); } if (rc) { cFYI(1, "cifs_create returned 0x%x", rc); goto cifs_create_out; } /* If Open reported that we actually created a file then we now have to set the mode if possible */ if ((tcon->unix_ext) && (oplock & CIFS_CREATE_ACTION)) { struct cifs_unix_set_info_args args = { .mode = mode, .ctime = NO_CHANGE_64, .atime = NO_CHANGE_64, .mtime = NO_CHANGE_64, .device = 0, }; if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) { args.uid = (__u64) current_fsuid(); if (inode->i_mode & S_ISGID) args.gid = (__u64) inode->i_gid; else args.gid = (__u64) current_fsgid(); } else { args.uid = NO_CHANGE_64; args.gid = NO_CHANGE_64; } CIFSSMBUnixSetFileInfo(xid, tcon, &args, fileHandle, current->tgid); } else { /* BB implement mode setting via Windows security descriptors e.g. */ /* CIFSSMBWinSetPerms(xid,tcon,path,mode,-1,-1,nls);*/ /* Could set r/o dos attribute if mode & 0222 == 0 */ } cifs_create_get_file_info: /* server might mask mode so we have to query for it */ if (tcon->unix_ext) rc = cifs_get_inode_info_unix(&newinode, full_path, inode->i_sb, xid); else { rc = cifs_get_inode_info(&newinode, full_path, buf, inode->i_sb, xid, &fileHandle); if (newinode) { if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM) newinode->i_mode = mode; if ((oplock & CIFS_CREATE_ACTION) && (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)) { newinode->i_uid = current_fsuid(); if (inode->i_mode & S_ISGID) newinode->i_gid = inode->i_gid; else newinode->i_gid = current_fsgid(); } } } cifs_create_set_dentry: if (rc == 0) d_instantiate(direntry, newinode); else cFYI(1, "Create worked, get_inode_info failed rc = %d", rc); if (newinode && nd && (nd->flags & LOOKUP_OPEN)) { struct cifsFileInfo *pfile_info; struct file *filp; filp = lookup_instantiate_filp(nd, direntry, generic_file_open); if (IS_ERR(filp)) { rc = PTR_ERR(filp); CIFSSMBClose(xid, tcon, fileHandle); goto cifs_create_out; } pfile_info = cifs_new_fileinfo(fileHandle, filp, tlink, oplock); if (pfile_info == NULL) { fput(filp); CIFSSMBClose(xid, tcon, fileHandle); rc = -ENOMEM; } } else { CIFSSMBClose(xid, tcon, fileHandle); } cifs_create_out: kfree(buf); kfree(full_path); cifs_put_tlink(tlink); FreeXid(xid); return rc; } int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode, dev_t device_number) { int rc = -EPERM; int xid; struct cifs_sb_info *cifs_sb; struct tcon_link *tlink; struct cifs_tcon *pTcon; struct cifs_io_parms io_parms; char *full_path = NULL; struct inode *newinode = NULL; int oplock = 0; u16 fileHandle; FILE_ALL_INFO *buf = NULL; unsigned int bytes_written; struct win_dev *pdev; if (!old_valid_dev(device_number)) return -EINVAL; cifs_sb = CIFS_SB(inode->i_sb); tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) return PTR_ERR(tlink); pTcon = tlink_tcon(tlink); xid = GetXid(); full_path = build_path_from_dentry(direntry); if (full_path == NULL) { rc = -ENOMEM; goto mknod_out; } if (pTcon->unix_ext) { struct cifs_unix_set_info_args args = { .mode = mode & ~current_umask(), .ctime = NO_CHANGE_64, .atime = NO_CHANGE_64, .mtime = NO_CHANGE_64, .device = device_number, }; if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) { args.uid = (__u64) current_fsuid(); args.gid = (__u64) current_fsgid(); } else { args.uid = NO_CHANGE_64; args.gid = NO_CHANGE_64; } rc = CIFSSMBUnixSetPathInfo(xid, pTcon, full_path, &args, cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); if (rc) goto mknod_out; rc = cifs_get_inode_info_unix(&newinode, full_path, inode->i_sb, xid); if (rc == 0) d_instantiate(direntry, newinode); goto mknod_out; } if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)) goto mknod_out; cFYI(1, "sfu compat create special file"); buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL); if (buf == NULL) { kfree(full_path); rc = -ENOMEM; FreeXid(xid); return rc; } /* FIXME: would WRITE_OWNER | WRITE_DAC be better? */ rc = CIFSSMBOpen(xid, pTcon, full_path, FILE_CREATE, GENERIC_WRITE, CREATE_NOT_DIR | CREATE_OPTION_SPECIAL, &fileHandle, &oplock, buf, cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); if (rc) goto mknod_out; /* BB Do not bother to decode buf since no local inode yet to put * timestamps in, but we can reuse it safely */ pdev = (struct win_dev *)buf; io_parms.netfid = fileHandle; io_parms.pid = current->tgid; io_parms.tcon = pTcon; io_parms.offset = 0; io_parms.length = sizeof(struct win_dev); if (S_ISCHR(mode)) { memcpy(pdev->type, "IntxCHR", 8); pdev->major = cpu_to_le64(MAJOR(device_number)); pdev->minor = cpu_to_le64(MINOR(device_number)); rc = CIFSSMBWrite(xid, &io_parms, &bytes_written, (char *)pdev, NULL, 0); } else if (S_ISBLK(mode)) { memcpy(pdev->type, "IntxBLK", 8); pdev->major = cpu_to_le64(MAJOR(device_number)); pdev->minor = cpu_to_le64(MINOR(device_number)); rc = CIFSSMBWrite(xid, &io_parms, &bytes_written, (char *)pdev, NULL, 0); } /* else if (S_ISFIFO) */ CIFSSMBClose(xid, pTcon, fileHandle); d_drop(direntry); /* FIXME: add code here to set EAs */ mknod_out: kfree(full_path); kfree(buf); FreeXid(xid); cifs_put_tlink(tlink); return rc; } struct dentry * cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry, struct nameidata *nd) { int xid; int rc = 0; /* to get around spurious gcc warning, set to zero here */ __u32 oplock = 0; __u16 fileHandle = 0; bool posix_open = false; struct cifs_sb_info *cifs_sb; struct tcon_link *tlink; struct cifs_tcon *pTcon; struct cifsFileInfo *cfile; struct inode *newInode = NULL; char *full_path = NULL; struct file *filp; xid = GetXid(); cFYI(1, "parent inode = 0x%p name is: %s and dentry = 0x%p", parent_dir_inode, direntry->d_name.name, direntry); /* check whether path exists */ cifs_sb = CIFS_SB(parent_dir_inode->i_sb); tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) { FreeXid(xid); return (struct dentry *)tlink; } pTcon = tlink_tcon(tlink); /* * Don't allow the separator character in a path component. * The VFS will not allow "/", but "\" is allowed by posix. */ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)) { int i; for (i = 0; i < direntry->d_name.len; i++) if (direntry->d_name.name[i] == '\\') { cFYI(1, "Invalid file name"); rc = -EINVAL; goto lookup_out; } } /* * O_EXCL: optimize away the lookup, but don't hash the dentry. Let * the VFS handle the create. */ if (nd && (nd->flags & LOOKUP_EXCL)) { d_instantiate(direntry, NULL); rc = 0; goto lookup_out; } /* can not grab the rename sem here since it would deadlock in the cases (beginning of sys_rename itself) in which we already have the sb rename sem */ full_path = build_path_from_dentry(direntry); if (full_path == NULL) { rc = -ENOMEM; goto lookup_out; } if (direntry->d_inode != NULL) { cFYI(1, "non-NULL inode in lookup"); } else { cFYI(1, "NULL inode in lookup"); } cFYI(1, "Full path: %s inode = 0x%p", full_path, direntry->d_inode); /* Posix open is only called (at lookup time) for file create now. * For opens (rather than creates), because we do not know if it * is a file or directory yet, and current Samba no longer allows * us to do posix open on dirs, we could end up wasting an open call * on what turns out to be a dir. For file opens, we wait to call posix * open till cifs_open. It could be added here (lookup) in the future * but the performance tradeoff of the extra network request when EISDIR * or EACCES is returned would have to be weighed against the 50% * reduction in network traffic in the other paths. */ if (pTcon->unix_ext) { if (nd && !(nd->flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY)) && (nd->flags & LOOKUP_OPEN) && !pTcon->broken_posix_open && (nd->intent.open.file->f_flags & O_CREAT)) { rc = cifs_posix_open(full_path, &newInode, parent_dir_inode->i_sb, nd->intent.open.create_mode, nd->intent.open.file->f_flags, &oplock, &fileHandle, xid); /* * The check below works around a bug in POSIX * open in samba versions 3.3.1 and earlier where * open could incorrectly fail with invalid parameter. * If either that or op not supported returned, follow * the normal lookup. */ if ((rc == 0) || (rc == -ENOENT)) posix_open = true; else if ((rc == -EINVAL) || (rc != -EOPNOTSUPP)) pTcon->broken_posix_open = true; } if (!posix_open) rc = cifs_get_inode_info_unix(&newInode, full_path, parent_dir_inode->i_sb, xid); } else rc = cifs_get_inode_info(&newInode, full_path, NULL, parent_dir_inode->i_sb, xid, NULL); if ((rc == 0) && (newInode != NULL)) { d_add(direntry, newInode); if (posix_open) { filp = lookup_instantiate_filp(nd, direntry, generic_file_open); if (IS_ERR(filp)) { rc = PTR_ERR(filp); CIFSSMBClose(xid, pTcon, fileHandle); goto lookup_out; } cfile = cifs_new_fileinfo(fileHandle, filp, tlink, oplock); if (cfile == NULL) { fput(filp); CIFSSMBClose(xid, pTcon, fileHandle); rc = -ENOMEM; goto lookup_out; } } /* since paths are not looked up by component - the parent directories are presumed to be good here */ renew_parental_timestamps(direntry); } else if (rc == -ENOENT) { rc = 0; direntry->d_time = jiffies; d_add(direntry, NULL); /* if it was once a directory (but how can we tell?) we could do shrink_dcache_parent(direntry); */ } else if (rc != -EACCES) { cERROR(1, "Unexpected lookup error %d", rc); /* We special case check for Access Denied - since that is a common return code */ } lookup_out: kfree(full_path); cifs_put_tlink(tlink); FreeXid(xid); return ERR_PTR(rc); } static int cifs_d_revalidate(struct dentry *direntry, struct nameidata *nd) { if (nd && (nd->flags & LOOKUP_RCU)) return -ECHILD; if (direntry->d_inode) { if (cifs_revalidate_dentry(direntry)) return 0; else return 1; } /* * This may be nfsd (or something), anyway, we can't see the * intent of this. So, since this can be for creation, drop it. */ if (!nd) return 0; /* * Drop the negative dentry, in order to make sure to use the * case sensitive name which is specified by user if this is * for creation. */ if (!(nd->flags & (LOOKUP_CONTINUE | LOOKUP_PARENT))) { if (nd->flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET)) return 0; } if (time_after(jiffies, direntry->d_time + HZ) || !lookupCacheEnabled) return 0; return 1; } /* static int cifs_d_delete(struct dentry *direntry) { int rc = 0; cFYI(1, "In cifs d_delete, name = %s", direntry->d_name.name); return rc; } */ const struct dentry_operations cifs_dentry_ops = { .d_revalidate = cifs_d_revalidate, .d_automount = cifs_dfs_d_automount, /* d_delete: cifs_d_delete, */ /* not needed except for debugging */ }; static int cifs_ci_hash(const struct dentry *dentry, const struct inode *inode, struct qstr *q) { struct nls_table *codepage = CIFS_SB(dentry->d_sb)->local_nls; unsigned long hash; int i; hash = init_name_hash(); for (i = 0; i < q->len; i++) hash = partial_name_hash(nls_tolower(codepage, q->name[i]), hash); q->hash = end_name_hash(hash); return 0; } static int cifs_ci_compare(const struct dentry *parent, const struct inode *pinode, const struct dentry *dentry, const struct inode *inode, unsigned int len, const char *str, const struct qstr *name) { struct nls_table *codepage = CIFS_SB(pinode->i_sb)->local_nls; if ((name->len == len) && (nls_strnicmp(codepage, name->name, str, len) == 0)) return 0; return 1; } const struct dentry_operations cifs_ci_dentry_ops = { .d_revalidate = cifs_d_revalidate, .d_hash = cifs_ci_hash, .d_compare = cifs_ci_compare, .d_automount = cifs_dfs_d_automount, };
gpl-2.0
actionmask/TWM-T1-Kernel
fs/fifo.c
780
3280
/* * linux/fs/fifo.c * * written by Paul H. Hargrove * * Fixes: * 10-06-1999, AV: fixed OOM handling in fifo_open(), moved * initialization there, switched to external * allocation of pipe_inode_info. */ #include <linux/mm.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/sched.h> #include <linux/pipe_fs_i.h> static void wait_for_partner(struct inode* inode, unsigned int *cnt) { int cur = *cnt; while (cur == *cnt) { pipe_wait(inode->i_pipe); if (signal_pending(current)) break; } } static void wake_up_partner(struct inode* inode) { wake_up_interruptible(&inode->i_pipe->wait); } static int fifo_open(struct inode *inode, struct file *filp) { struct pipe_inode_info *pipe; int ret; mutex_lock(&inode->i_mutex); pipe = inode->i_pipe; if (!pipe) { ret = -ENOMEM; pipe = alloc_pipe_info(inode); if (!pipe) goto err_nocleanup; inode->i_pipe = pipe; } filp->f_version = 0; /* We can only do regular read/write on fifos */ filp->f_mode &= (FMODE_READ | FMODE_WRITE); switch (filp->f_mode) { case FMODE_READ: /* * O_RDONLY * POSIX.1 says that O_NONBLOCK means return with the FIFO * opened, even when there is no process writing the FIFO. */ filp->f_op = &read_pipefifo_fops; pipe->r_counter++; if (pipe->readers++ == 0) wake_up_partner(inode); if (!pipe->writers) { if ((filp->f_flags & O_NONBLOCK)) { /* suppress POLLHUP until we have * seen a writer */ filp->f_version = pipe->w_counter; } else { wait_for_partner(inode, &pipe->w_counter); if(signal_pending(current)) goto err_rd; } } break; case FMODE_WRITE: /* * O_WRONLY * POSIX.1 says that O_NONBLOCK means return -1 with * errno=ENXIO when there is no process reading the FIFO. */ ret = -ENXIO; if ((filp->f_flags & O_NONBLOCK) && !pipe->readers) goto err; filp->f_op = &write_pipefifo_fops; pipe->w_counter++; if (!pipe->writers++) wake_up_partner(inode); if (!pipe->readers) { wait_for_partner(inode, &pipe->r_counter); if (signal_pending(current)) goto err_wr; } break; case FMODE_READ | FMODE_WRITE: /* * O_RDWR * POSIX.1 leaves this case "undefined" when O_NONBLOCK is set. * This implementation will NEVER block on a O_RDWR open, since * the process can at least talk to itself. */ filp->f_op = &rdwr_pipefifo_fops; pipe->readers++; pipe->writers++; pipe->r_counter++; pipe->w_counter++; if (pipe->readers == 1 || pipe->writers == 1) wake_up_partner(inode); break; default: ret = -EINVAL; goto err; } /* Ok! */ mutex_unlock(&inode->i_mutex); return 0; err_rd: if (!--pipe->readers) wake_up_interruptible(&pipe->wait); ret = -ERESTARTSYS; goto err; err_wr: if (!--pipe->writers) wake_up_interruptible(&pipe->wait); ret = -ERESTARTSYS; goto err; err: if (!pipe->readers && !pipe->writers) free_pipe_info(inode); err_nocleanup: mutex_unlock(&inode->i_mutex); return ret; } /* * Dummy default file-operations: the only thing this does * is contain the open that then fills in the correct operations * depending on the access mode of the file... */ const struct file_operations def_fifo_fops = { .open = fifo_open, /* will set read_ or write_pipefifo_fops */ };
gpl-2.0
KOala888/GB_kernel
linux_kernel_galaxyplayer-master/arch/arm/mach-omap2/gpmc-onenand.c
1036
9348
/* * linux/arch/arm/mach-omap2/gpmc-onenand.c * * Copyright (C) 2006 - 2009 Nokia Corporation * Contacts: Juha Yrjola * Tony Lindgren * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/mtd/onenand_regs.h> #include <linux/io.h> #include <asm/mach/flash.h> #include <plat/onenand.h> #include <plat/board.h> #include <plat/gpmc.h> static struct omap_onenand_platform_data *gpmc_onenand_data; static struct platform_device gpmc_onenand_device = { .name = "omap2-onenand", .id = -1, }; static int omap2_onenand_set_async_mode(int cs, void __iomem *onenand_base) { struct gpmc_timings t; u32 reg; int err; const int t_cer = 15; const int t_avdp = 12; const int t_aavdh = 7; const int t_ce = 76; const int t_aa = 76; const int t_oe = 20; const int t_cez = 20; /* max of t_cez, t_oez */ const int t_ds = 30; const int t_wpl = 40; const int t_wph = 30; /* Ensure sync read and sync write are disabled */ reg = readw(onenand_base + ONENAND_REG_SYS_CFG1); reg &= ~ONENAND_SYS_CFG1_SYNC_READ & ~ONENAND_SYS_CFG1_SYNC_WRITE; writew(reg, onenand_base + ONENAND_REG_SYS_CFG1); memset(&t, 0, sizeof(t)); t.sync_clk = 0; t.cs_on = 0; t.adv_on = 0; /* Read */ t.adv_rd_off = gpmc_round_ns_to_ticks(max_t(int, t_avdp, t_cer)); t.oe_on = t.adv_rd_off + gpmc_round_ns_to_ticks(t_aavdh); t.access = t.adv_on + gpmc_round_ns_to_ticks(t_aa); t.access = max_t(int, t.access, t.cs_on + gpmc_round_ns_to_ticks(t_ce)); t.access = max_t(int, t.access, t.oe_on + gpmc_round_ns_to_ticks(t_oe)); t.oe_off = t.access + gpmc_round_ns_to_ticks(1); t.cs_rd_off = t.oe_off; t.rd_cycle = t.cs_rd_off + gpmc_round_ns_to_ticks(t_cez); /* Write */ t.adv_wr_off = t.adv_rd_off; t.we_on = t.oe_on; if (cpu_is_omap34xx()) { t.wr_data_mux_bus = t.we_on; t.wr_access = t.we_on + gpmc_round_ns_to_ticks(t_ds); } t.we_off = t.we_on + gpmc_round_ns_to_ticks(t_wpl); t.cs_wr_off = t.we_off + gpmc_round_ns_to_ticks(t_wph); t.wr_cycle = t.cs_wr_off + gpmc_round_ns_to_ticks(t_cez); /* Configure GPMC for asynchronous read */ gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, GPMC_CONFIG1_DEVICESIZE_16 | GPMC_CONFIG1_MUXADDDATA); err = gpmc_cs_set_timings(cs, &t); if (err) return err; /* Ensure sync read and sync write are disabled */ reg = readw(onenand_base + ONENAND_REG_SYS_CFG1); reg &= ~ONENAND_SYS_CFG1_SYNC_READ & ~ONENAND_SYS_CFG1_SYNC_WRITE; writew(reg, onenand_base + ONENAND_REG_SYS_CFG1); return 0; } static void set_onenand_cfg(void __iomem *onenand_base, int latency, int sync_read, int sync_write, int hf) { u32 reg; reg = readw(onenand_base + ONENAND_REG_SYS_CFG1); reg &= ~((0x7 << ONENAND_SYS_CFG1_BRL_SHIFT) | (0x7 << 9)); reg |= (latency << ONENAND_SYS_CFG1_BRL_SHIFT) | ONENAND_SYS_CFG1_BL_16; if (sync_read) reg |= ONENAND_SYS_CFG1_SYNC_READ; else reg &= ~ONENAND_SYS_CFG1_SYNC_READ; if (sync_write) reg |= ONENAND_SYS_CFG1_SYNC_WRITE; else reg &= ~ONENAND_SYS_CFG1_SYNC_WRITE; if (hf) reg |= ONENAND_SYS_CFG1_HF; else reg &= ~ONENAND_SYS_CFG1_HF; writew(reg, onenand_base + ONENAND_REG_SYS_CFG1); } static int omap2_onenand_set_sync_mode(struct omap_onenand_platform_data *cfg, void __iomem *onenand_base, int freq) { struct gpmc_timings t; const int t_cer = 15; const int t_avdp = 12; const int t_cez = 20; /* max of t_cez, t_oez */ const int t_ds = 30; const int t_wpl = 40; const int t_wph = 30; int min_gpmc_clk_period, t_ces, t_avds, t_avdh, t_ach, t_aavdh, t_rdyo; int tick_ns, div, fclk_offset_ns, fclk_offset, gpmc_clk_ns, latency; int first_time = 0, hf = 0, sync_read = 0, sync_write = 0; int err, ticks_cez; int cs = cfg->cs; u32 reg; if (cfg->flags & ONENAND_SYNC_READ) { sync_read = 1; } else if (cfg->flags & ONENAND_SYNC_READWRITE) { sync_read = 1; sync_write = 1; } else return omap2_onenand_set_async_mode(cs, onenand_base); if (!freq) { /* Very first call freq is not known */ err = omap2_onenand_set_async_mode(cs, onenand_base); if (err) return err; reg = readw(onenand_base + ONENAND_REG_VERSION_ID); switch ((reg >> 4) & 0xf) { case 0: freq = 40; break; case 1: freq = 54; break; case 2: freq = 66; break; case 3: freq = 83; break; case 4: freq = 104; break; default: freq = 54; break; } first_time = 1; } switch (freq) { case 83: min_gpmc_clk_period = 12; /* 83 MHz */ t_ces = 5; t_avds = 4; t_avdh = 2; t_ach = 6; t_aavdh = 6; t_rdyo = 9; break; case 66: min_gpmc_clk_period = 15; /* 66 MHz */ t_ces = 6; t_avds = 5; t_avdh = 2; t_ach = 6; t_aavdh = 6; t_rdyo = 11; break; default: min_gpmc_clk_period = 18; /* 54 MHz */ t_ces = 7; t_avds = 7; t_avdh = 7; t_ach = 9; t_aavdh = 7; t_rdyo = 15; sync_write = 0; break; } tick_ns = gpmc_ticks_to_ns(1); div = gpmc_cs_calc_divider(cs, min_gpmc_clk_period); gpmc_clk_ns = gpmc_ticks_to_ns(div); if (gpmc_clk_ns < 15) /* >66Mhz */ hf = 1; if (hf) latency = 6; else if (gpmc_clk_ns >= 25) /* 40 MHz*/ latency = 3; else latency = 4; if (first_time) set_onenand_cfg(onenand_base, latency, sync_read, sync_write, hf); if (div == 1) { reg = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG2); reg |= (1 << 7); gpmc_cs_write_reg(cs, GPMC_CS_CONFIG2, reg); reg = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG3); reg |= (1 << 7); gpmc_cs_write_reg(cs, GPMC_CS_CONFIG3, reg); reg = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG4); reg |= (1 << 7); reg |= (1 << 23); gpmc_cs_write_reg(cs, GPMC_CS_CONFIG4, reg); } else { reg = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG2); reg &= ~(1 << 7); gpmc_cs_write_reg(cs, GPMC_CS_CONFIG2, reg); reg = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG3); reg &= ~(1 << 7); gpmc_cs_write_reg(cs, GPMC_CS_CONFIG3, reg); reg = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG4); reg &= ~(1 << 7); reg &= ~(1 << 23); gpmc_cs_write_reg(cs, GPMC_CS_CONFIG4, reg); } /* Set synchronous read timings */ memset(&t, 0, sizeof(t)); t.sync_clk = min_gpmc_clk_period; t.cs_on = 0; t.adv_on = 0; fclk_offset_ns = gpmc_round_ns_to_ticks(max_t(int, t_ces, t_avds)); fclk_offset = gpmc_ns_to_ticks(fclk_offset_ns); t.page_burst_access = gpmc_clk_ns; /* Read */ t.adv_rd_off = gpmc_ticks_to_ns(fclk_offset + gpmc_ns_to_ticks(t_avdh)); t.oe_on = gpmc_ticks_to_ns(fclk_offset + gpmc_ns_to_ticks(t_ach)); t.access = gpmc_ticks_to_ns(fclk_offset + (latency + 1) * div); t.oe_off = t.access + gpmc_round_ns_to_ticks(1); t.cs_rd_off = t.oe_off; ticks_cez = ((gpmc_ns_to_ticks(t_cez) + div - 1) / div) * div; t.rd_cycle = gpmc_ticks_to_ns(fclk_offset + (latency + 1) * div + ticks_cez); /* Write */ if (sync_write) { t.adv_wr_off = t.adv_rd_off; t.we_on = 0; t.we_off = t.cs_rd_off; t.cs_wr_off = t.cs_rd_off; t.wr_cycle = t.rd_cycle; if (cpu_is_omap34xx()) { t.wr_data_mux_bus = gpmc_ticks_to_ns(fclk_offset + gpmc_ns_to_ticks(min_gpmc_clk_period + t_rdyo)); t.wr_access = t.access; } } else { t.adv_wr_off = gpmc_round_ns_to_ticks(max_t(int, t_avdp, t_cer)); t.we_on = t.adv_wr_off + gpmc_round_ns_to_ticks(t_aavdh); t.we_off = t.we_on + gpmc_round_ns_to_ticks(t_wpl); t.cs_wr_off = t.we_off + gpmc_round_ns_to_ticks(t_wph); t.wr_cycle = t.cs_wr_off + gpmc_round_ns_to_ticks(t_cez); if (cpu_is_omap34xx()) { t.wr_data_mux_bus = t.we_on; t.wr_access = t.we_on + gpmc_round_ns_to_ticks(t_ds); } } /* Configure GPMC for synchronous read */ gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, GPMC_CONFIG1_WRAPBURST_SUPP | GPMC_CONFIG1_READMULTIPLE_SUPP | (sync_read ? GPMC_CONFIG1_READTYPE_SYNC : 0) | (sync_write ? GPMC_CONFIG1_WRITEMULTIPLE_SUPP : 0) | (sync_write ? GPMC_CONFIG1_WRITETYPE_SYNC : 0) | GPMC_CONFIG1_CLKACTIVATIONTIME(fclk_offset) | GPMC_CONFIG1_PAGE_LEN(2) | (cpu_is_omap34xx() ? 0 : (GPMC_CONFIG1_WAIT_READ_MON | GPMC_CONFIG1_WAIT_PIN_SEL(0))) | GPMC_CONFIG1_DEVICESIZE_16 | GPMC_CONFIG1_DEVICETYPE_NOR | GPMC_CONFIG1_MUXADDDATA); err = gpmc_cs_set_timings(cs, &t); if (err) return err; set_onenand_cfg(onenand_base, latency, sync_read, sync_write, hf); return 0; } static int gpmc_onenand_setup(void __iomem *onenand_base, int freq) { struct device *dev = &gpmc_onenand_device.dev; /* Set sync timings in GPMC */ if (omap2_onenand_set_sync_mode(gpmc_onenand_data, onenand_base, freq) < 0) { dev_err(dev, "Unable to set synchronous mode\n"); return -EINVAL; } return 0; } void __init gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data) { gpmc_onenand_data = _onenand_data; gpmc_onenand_data->onenand_setup = gpmc_onenand_setup; gpmc_onenand_device.dev.platform_data = gpmc_onenand_data; if (cpu_is_omap24xx() && (gpmc_onenand_data->flags & ONENAND_SYNC_READWRITE)) { printk(KERN_ERR "Onenand using only SYNC_READ on 24xx\n"); gpmc_onenand_data->flags &= ~ONENAND_SYNC_READWRITE; gpmc_onenand_data->flags |= ONENAND_SYNC_READ; } if (platform_device_register(&gpmc_onenand_device) < 0) { printk(KERN_ERR "Unable to register OneNAND device\n"); return; } }
gpl-2.0
HTC-MSM8916/android_kernel_htc_msm8916
fs/ceph/locks.c
2060
8254
#include <linux/ceph/ceph_debug.h> #include <linux/file.h> #include <linux/namei.h> #include "super.h" #include "mds_client.h" #include <linux/ceph/pagelist.h> /** * Implement fcntl and flock locking functions. */ static int ceph_lock_message(u8 lock_type, u16 operation, struct file *file, int cmd, u8 wait, struct file_lock *fl) { struct inode *inode = file_inode(file); struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; struct ceph_mds_request *req; int err; u64 length = 0; req = ceph_mdsc_create_request(mdsc, operation, USE_AUTH_MDS); if (IS_ERR(req)) return PTR_ERR(req); req->r_inode = inode; ihold(inode); /* mds requires start and length rather than start and end */ if (LLONG_MAX == fl->fl_end) length = 0; else length = fl->fl_end - fl->fl_start + 1; dout("ceph_lock_message: rule: %d, op: %d, pid: %llu, start: %llu, " "length: %llu, wait: %d, type: %d", (int)lock_type, (int)operation, (u64)fl->fl_pid, fl->fl_start, length, wait, fl->fl_type); req->r_args.filelock_change.rule = lock_type; req->r_args.filelock_change.type = cmd; req->r_args.filelock_change.pid = cpu_to_le64((u64)fl->fl_pid); /* This should be adjusted, but I'm not sure if namespaces actually get id numbers*/ req->r_args.filelock_change.pid_namespace = cpu_to_le64((u64)(unsigned long)fl->fl_nspid); req->r_args.filelock_change.start = cpu_to_le64(fl->fl_start); req->r_args.filelock_change.length = cpu_to_le64(length); req->r_args.filelock_change.wait = wait; err = ceph_mdsc_do_request(mdsc, inode, req); if ( operation == CEPH_MDS_OP_GETFILELOCK){ fl->fl_pid = le64_to_cpu(req->r_reply_info.filelock_reply->pid); if (CEPH_LOCK_SHARED == req->r_reply_info.filelock_reply->type) fl->fl_type = F_RDLCK; else if (CEPH_LOCK_EXCL == req->r_reply_info.filelock_reply->type) fl->fl_type = F_WRLCK; else fl->fl_type = F_UNLCK; fl->fl_start = le64_to_cpu(req->r_reply_info.filelock_reply->start); length = le64_to_cpu(req->r_reply_info.filelock_reply->start) + le64_to_cpu(req->r_reply_info.filelock_reply->length); if (length >= 1) fl->fl_end = length -1; else fl->fl_end = 0; } ceph_mdsc_put_request(req); dout("ceph_lock_message: rule: %d, op: %d, pid: %llu, start: %llu, " "length: %llu, wait: %d, type: %d, err code %d", (int)lock_type, (int)operation, (u64)fl->fl_pid, fl->fl_start, length, wait, fl->fl_type, err); return err; } /** * Attempt to set an fcntl lock. * For now, this just goes away to the server. Later it may be more awesome. */ int ceph_lock(struct file *file, int cmd, struct file_lock *fl) { u8 lock_cmd; int err; u8 wait = 0; u16 op = CEPH_MDS_OP_SETFILELOCK; fl->fl_nspid = get_pid(task_tgid(current)); dout("ceph_lock, fl_pid:%d", fl->fl_pid); /* set wait bit as appropriate, then make command as Ceph expects it*/ if (F_SETLKW == cmd) wait = 1; if (F_GETLK == cmd) op = CEPH_MDS_OP_GETFILELOCK; if (F_RDLCK == fl->fl_type) lock_cmd = CEPH_LOCK_SHARED; else if (F_WRLCK == fl->fl_type) lock_cmd = CEPH_LOCK_EXCL; else lock_cmd = CEPH_LOCK_UNLOCK; err = ceph_lock_message(CEPH_LOCK_FCNTL, op, file, lock_cmd, wait, fl); if (!err) { if ( op != CEPH_MDS_OP_GETFILELOCK ){ dout("mds locked, locking locally"); err = posix_lock_file(file, fl, NULL); if (err && (CEPH_MDS_OP_SETFILELOCK == op)) { /* undo! This should only happen if * the kernel detects local * deadlock. */ ceph_lock_message(CEPH_LOCK_FCNTL, op, file, CEPH_LOCK_UNLOCK, 0, fl); dout("got %d on posix_lock_file, undid lock", err); } } } else if (err == -ERESTARTSYS) { dout("undoing lock\n"); ceph_lock_message(CEPH_LOCK_FCNTL, op, file, CEPH_LOCK_UNLOCK, 0, fl); } return err; } int ceph_flock(struct file *file, int cmd, struct file_lock *fl) { u8 lock_cmd; int err; u8 wait = 1; fl->fl_nspid = get_pid(task_tgid(current)); dout("ceph_flock, fl_pid:%d", fl->fl_pid); /* set wait bit, then clear it out of cmd*/ if (cmd & LOCK_NB) wait = 0; cmd = cmd & (LOCK_SH | LOCK_EX | LOCK_UN); /* set command sequence that Ceph wants to see: shared lock, exclusive lock, or unlock */ if (LOCK_SH == cmd) lock_cmd = CEPH_LOCK_SHARED; else if (LOCK_EX == cmd) lock_cmd = CEPH_LOCK_EXCL; else lock_cmd = CEPH_LOCK_UNLOCK; err = ceph_lock_message(CEPH_LOCK_FLOCK, CEPH_MDS_OP_SETFILELOCK, file, lock_cmd, wait, fl); if (!err) { err = flock_lock_file_wait(file, fl); if (err) { ceph_lock_message(CEPH_LOCK_FLOCK, CEPH_MDS_OP_SETFILELOCK, file, CEPH_LOCK_UNLOCK, 0, fl); dout("got %d on flock_lock_file_wait, undid lock", err); } } else if (err == -ERESTARTSYS) { dout("undoing lock\n"); ceph_lock_message(CEPH_LOCK_FLOCK, CEPH_MDS_OP_SETFILELOCK, file, CEPH_LOCK_UNLOCK, 0, fl); } return err; } /** * Must be called with BKL already held. Fills in the passed * counter variables, so you can prepare pagelist metadata before calling * ceph_encode_locks. */ void ceph_count_locks(struct inode *inode, int *fcntl_count, int *flock_count) { struct file_lock *lock; *fcntl_count = 0; *flock_count = 0; for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) { if (lock->fl_flags & FL_POSIX) ++(*fcntl_count); else if (lock->fl_flags & FL_FLOCK) ++(*flock_count); } dout("counted %d flock locks and %d fcntl locks", *flock_count, *fcntl_count); } /** * Encode the flock and fcntl locks for the given inode into the ceph_filelock * array. Must be called with lock_flocks() already held. * If we encounter more of a specific lock type than expected, return -ENOSPC. */ int ceph_encode_locks_to_buffer(struct inode *inode, struct ceph_filelock *flocks, int num_fcntl_locks, int num_flock_locks) { struct file_lock *lock; int err = 0; int seen_fcntl = 0; int seen_flock = 0; int l = 0; dout("encoding %d flock and %d fcntl locks", num_flock_locks, num_fcntl_locks); for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) { if (lock->fl_flags & FL_POSIX) { ++seen_fcntl; if (seen_fcntl > num_fcntl_locks) { err = -ENOSPC; goto fail; } err = lock_to_ceph_filelock(lock, &flocks[l]); if (err) goto fail; ++l; } } for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) { if (lock->fl_flags & FL_FLOCK) { ++seen_flock; if (seen_flock > num_flock_locks) { err = -ENOSPC; goto fail; } err = lock_to_ceph_filelock(lock, &flocks[l]); if (err) goto fail; ++l; } } fail: return err; } /** * Copy the encoded flock and fcntl locks into the pagelist. * Format is: #fcntl locks, sequential fcntl locks, #flock locks, * sequential flock locks. * Returns zero on success. */ int ceph_locks_to_pagelist(struct ceph_filelock *flocks, struct ceph_pagelist *pagelist, int num_fcntl_locks, int num_flock_locks) { int err = 0; __le32 nlocks; nlocks = cpu_to_le32(num_fcntl_locks); err = ceph_pagelist_append(pagelist, &nlocks, sizeof(nlocks)); if (err) goto out_fail; err = ceph_pagelist_append(pagelist, flocks, num_fcntl_locks * sizeof(*flocks)); if (err) goto out_fail; nlocks = cpu_to_le32(num_flock_locks); err = ceph_pagelist_append(pagelist, &nlocks, sizeof(nlocks)); if (err) goto out_fail; err = ceph_pagelist_append(pagelist, &flocks[num_fcntl_locks], num_flock_locks * sizeof(*flocks)); out_fail: return err; } /* * Given a pointer to a lock, convert it to a ceph filelock */ int lock_to_ceph_filelock(struct file_lock *lock, struct ceph_filelock *cephlock) { int err = 0; cephlock->start = cpu_to_le64(lock->fl_start); cephlock->length = cpu_to_le64(lock->fl_end - lock->fl_start + 1); cephlock->client = cpu_to_le64(0); cephlock->pid = cpu_to_le64(lock->fl_pid); cephlock->pid_namespace = cpu_to_le64((u64)(unsigned long)lock->fl_nspid); switch (lock->fl_type) { case F_RDLCK: cephlock->type = CEPH_LOCK_SHARED; break; case F_WRLCK: cephlock->type = CEPH_LOCK_EXCL; break; case F_UNLCK: cephlock->type = CEPH_LOCK_UNLOCK; break; default: dout("Have unknown lock type %d", lock->fl_type); err = -EINVAL; } return err; }
gpl-2.0
LiquidSmooth-Devices/android_kernel_samsung_hlte
drivers/video/fbcmap.c
2316
8956
/* * linux/drivers/video/fbcmap.c -- Colormap handling for frame buffer devices * * Created 15 Jun 1997 by Geert Uytterhoeven * * 2001 - Documented with DocBook * - Brad Douglas <brad@neruo.com> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. */ #include <linux/string.h> #include <linux/module.h> #include <linux/fb.h> #include <linux/slab.h> #include <linux/uaccess.h> static u16 red2[] __read_mostly = { 0x0000, 0xaaaa }; static u16 green2[] __read_mostly = { 0x0000, 0xaaaa }; static u16 blue2[] __read_mostly = { 0x0000, 0xaaaa }; static u16 red4[] __read_mostly = { 0x0000, 0xaaaa, 0x5555, 0xffff }; static u16 green4[] __read_mostly = { 0x0000, 0xaaaa, 0x5555, 0xffff }; static u16 blue4[] __read_mostly = { 0x0000, 0xaaaa, 0x5555, 0xffff }; static u16 red8[] __read_mostly = { 0x0000, 0x0000, 0x0000, 0x0000, 0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa }; static u16 green8[] __read_mostly = { 0x0000, 0x0000, 0xaaaa, 0xaaaa, 0x0000, 0x0000, 0x5555, 0xaaaa }; static u16 blue8[] __read_mostly = { 0x0000, 0xaaaa, 0x0000, 0xaaaa, 0x0000, 0xaaaa, 0x0000, 0xaaaa }; static u16 red16[] __read_mostly = { 0x0000, 0x0000, 0x0000, 0x0000, 0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa, 0x5555, 0x5555, 0x5555, 0x5555, 0xffff, 0xffff, 0xffff, 0xffff }; static u16 green16[] __read_mostly = { 0x0000, 0x0000, 0xaaaa, 0xaaaa, 0x0000, 0x0000, 0x5555, 0xaaaa, 0x5555, 0x5555, 0xffff, 0xffff, 0x5555, 0x5555, 0xffff, 0xffff }; static u16 blue16[] __read_mostly = { 0x0000, 0xaaaa, 0x0000, 0xaaaa, 0x0000, 0xaaaa, 0x0000, 0xaaaa, 0x5555, 0xffff, 0x5555, 0xffff, 0x5555, 0xffff, 0x5555, 0xffff }; static const struct fb_cmap default_2_colors = { .len=2, .red=red2, .green=green2, .blue=blue2 }; static const struct fb_cmap default_8_colors = { .len=8, .red=red8, .green=green8, .blue=blue8 }; static const struct fb_cmap default_4_colors = { .len=4, .red=red4, .green=green4, .blue=blue4 }; static const struct fb_cmap default_16_colors = { .len=16, .red=red16, .green=green16, .blue=blue16 }; /** * fb_alloc_cmap - allocate a colormap * @cmap: frame buffer colormap structure * @len: length of @cmap * @transp: boolean, 1 if there is transparency, 0 otherwise * @flags: flags for kmalloc memory allocation * * Allocates memory for a colormap @cmap. @len is the * number of entries in the palette. * * Returns negative errno on error, or zero on success. * */ int fb_alloc_cmap_gfp(struct fb_cmap *cmap, int len, int transp, gfp_t flags) { int size = len * sizeof(u16); int ret = -ENOMEM; if (cmap->len != len) { fb_dealloc_cmap(cmap); if (!len) return 0; cmap->red = kmalloc(size, flags); if (!cmap->red) goto fail; cmap->green = kmalloc(size, flags); if (!cmap->green) goto fail; cmap->blue = kmalloc(size, flags); if (!cmap->blue) goto fail; if (transp) { cmap->transp = kmalloc(size, flags); if (!cmap->transp) goto fail; } else { cmap->transp = NULL; } } cmap->start = 0; cmap->len = len; ret = fb_copy_cmap(fb_default_cmap(len), cmap); if (ret) goto fail; return 0; fail: fb_dealloc_cmap(cmap); return ret; } int fb_alloc_cmap(struct fb_cmap *cmap, int len, int transp) { return fb_alloc_cmap_gfp(cmap, len, transp, GFP_ATOMIC); } /** * fb_dealloc_cmap - deallocate a colormap * @cmap: frame buffer colormap structure * * Deallocates a colormap that was previously allocated with * fb_alloc_cmap(). * */ void fb_dealloc_cmap(struct fb_cmap *cmap) { kfree(cmap->red); kfree(cmap->green); kfree(cmap->blue); kfree(cmap->transp); cmap->red = cmap->green = cmap->blue = cmap->transp = NULL; cmap->len = 0; } /** * fb_copy_cmap - copy a colormap * @from: frame buffer colormap structure * @to: frame buffer colormap structure * * Copy contents of colormap from @from to @to. */ int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to) { int tooff = 0, fromoff = 0; int size; if (!to || !from) return -EINVAL; if (to->start > from->start) fromoff = to->start - from->start; else tooff = from->start - to->start; size = to->len - tooff; if (size > (int) (from->len - fromoff)) size = from->len - fromoff; if (size <= 0) return -EINVAL; size *= sizeof(u16); if (from->red && to->red) memcpy(to->red+tooff, from->red+fromoff, size); if (from->green && to->green) memcpy(to->green+tooff, from->green+fromoff, size); if (from->blue && to->blue) memcpy(to->blue+tooff, from->blue+fromoff, size); if (from->transp && to->transp) memcpy(to->transp+tooff, from->transp+fromoff, size); return 0; } int fb_cmap_to_user(const struct fb_cmap *from, struct fb_cmap_user *to) { int tooff = 0, fromoff = 0; int size; if (!to || !from) return -EINVAL; if (to->start > from->start) fromoff = to->start - from->start; else tooff = from->start - to->start; if ((to->len <= tooff) || (from->len <= fromoff)) return -EINVAL; size = to->len - tooff; if (size > (int) (from->len - fromoff)) size = from->len - fromoff; size *= sizeof(u16); if (from->red && to->red) if (copy_to_user(to->red+tooff, from->red+fromoff, size)) return -EFAULT; if (from->green && to->green) if (copy_to_user(to->green+tooff, from->green+fromoff, size)) return -EFAULT; if (from->blue && to->blue) if (copy_to_user(to->blue+tooff, from->blue+fromoff, size)) return -EFAULT; if (from->transp && to->transp) if (copy_to_user(to->transp+tooff, from->transp+fromoff, size)) return -EFAULT; return 0; } /** * fb_set_cmap - set the colormap * @cmap: frame buffer colormap structure * @info: frame buffer info structure * * Sets the colormap @cmap for a screen of device @info. * * Returns negative errno on error, or zero on success. * */ int fb_set_cmap(struct fb_cmap *cmap, struct fb_info *info) { int i, start, rc = 0; u16 *red, *green, *blue, *transp; u_int hred, hgreen, hblue, htransp = 0xffff; red = cmap->red; green = cmap->green; blue = cmap->blue; transp = cmap->transp; start = cmap->start; if (start < 0 || (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap)) return -EINVAL; if (info->fbops->fb_setcmap) { rc = info->fbops->fb_setcmap(cmap, info); } else { for (i = 0; i < cmap->len; i++) { hred = *red++; hgreen = *green++; hblue = *blue++; if (transp) htransp = *transp++; if (info->fbops->fb_setcolreg(start++, hred, hgreen, hblue, htransp, info)) break; } } if (rc == 0) fb_copy_cmap(cmap, &info->cmap); return rc; } int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info) { int rc, size = cmap->len * sizeof(u16); struct fb_cmap umap; if (size < 0 || size < cmap->len) return -E2BIG; memset(&umap, 0, sizeof(struct fb_cmap)); rc = fb_alloc_cmap_gfp(&umap, cmap->len, cmap->transp != NULL, GFP_KERNEL); if (rc) return rc; if (copy_from_user(umap.red, cmap->red, size) || copy_from_user(umap.green, cmap->green, size) || copy_from_user(umap.blue, cmap->blue, size) || (cmap->transp && copy_from_user(umap.transp, cmap->transp, size))) { rc = -EFAULT; goto out; } umap.start = cmap->start; if (!lock_fb_info(info)) { rc = -ENODEV; goto out; } if (cmap->start < 0 || (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap)) { rc = -EINVAL; goto out1; } rc = fb_set_cmap(&umap, info); out1: unlock_fb_info(info); out: fb_dealloc_cmap(&umap); return rc; } /** * fb_default_cmap - get default colormap * @len: size of palette for a depth * * Gets the default colormap for a specific screen depth. @len * is the size of the palette for a particular screen depth. * * Returns pointer to a frame buffer colormap structure. * */ const struct fb_cmap *fb_default_cmap(int len) { if (len <= 2) return &default_2_colors; if (len <= 4) return &default_4_colors; if (len <= 8) return &default_8_colors; return &default_16_colors; } /** * fb_invert_cmaps - invert all defaults colormaps * * Invert all default colormaps. * */ void fb_invert_cmaps(void) { u_int i; for (i = 0; i < ARRAY_SIZE(red2); i++) { red2[i] = ~red2[i]; green2[i] = ~green2[i]; blue2[i] = ~blue2[i]; } for (i = 0; i < ARRAY_SIZE(red4); i++) { red4[i] = ~red4[i]; green4[i] = ~green4[i]; blue4[i] = ~blue4[i]; } for (i = 0; i < ARRAY_SIZE(red8); i++) { red8[i] = ~red8[i]; green8[i] = ~green8[i]; blue8[i] = ~blue8[i]; } for (i = 0; i < ARRAY_SIZE(red16); i++) { red16[i] = ~red16[i]; green16[i] = ~green16[i]; blue16[i] = ~blue16[i]; } } /* * Visible symbols for modules */ EXPORT_SYMBOL(fb_alloc_cmap); EXPORT_SYMBOL(fb_dealloc_cmap); EXPORT_SYMBOL(fb_copy_cmap); EXPORT_SYMBOL(fb_set_cmap); EXPORT_SYMBOL(fb_default_cmap); EXPORT_SYMBOL(fb_invert_cmaps);
gpl-2.0
crimsonthunder/Samsung_mm
drivers/power/da9030_battery.c
3084
16076
/* * Battery charger driver for Dialog Semiconductor DA9030 * * Copyright (C) 2008 Compulab, Ltd. * Mike Rapoport <mike@compulab.co.il> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/types.h> #include <linux/device.h> #include <linux/workqueue.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/power_supply.h> #include <linux/mfd/da903x.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/notifier.h> #define DA9030_FAULT_LOG 0x0a #define DA9030_FAULT_LOG_OVER_TEMP (1 << 7) #define DA9030_FAULT_LOG_VBAT_OVER (1 << 4) #define DA9030_CHARGE_CONTROL 0x28 #define DA9030_CHRG_CHARGER_ENABLE (1 << 7) #define DA9030_ADC_MAN_CONTROL 0x30 #define DA9030_ADC_TBATREF_ENABLE (1 << 5) #define DA9030_ADC_LDO_INT_ENABLE (1 << 4) #define DA9030_ADC_AUTO_CONTROL 0x31 #define DA9030_ADC_TBAT_ENABLE (1 << 5) #define DA9030_ADC_VBAT_IN_TXON (1 << 4) #define DA9030_ADC_VCH_ENABLE (1 << 3) #define DA9030_ADC_ICH_ENABLE (1 << 2) #define DA9030_ADC_VBAT_ENABLE (1 << 1) #define DA9030_ADC_AUTO_SLEEP_ENABLE (1 << 0) #define DA9030_VBATMON 0x32 #define DA9030_VBATMONTXON 0x33 #define DA9030_TBATHIGHP 0x34 #define DA9030_TBATHIGHN 0x35 #define DA9030_TBATLOW 0x36 #define DA9030_VBAT_RES 0x41 #define DA9030_VBATMIN_RES 0x42 #define DA9030_VBATMINTXON_RES 0x43 #define DA9030_ICHMAX_RES 0x44 #define DA9030_ICHMIN_RES 0x45 #define DA9030_ICHAVERAGE_RES 0x46 #define DA9030_VCHMAX_RES 0x47 #define DA9030_VCHMIN_RES 0x48 #define DA9030_TBAT_RES 0x49 struct da9030_adc_res { uint8_t vbat_res; uint8_t vbatmin_res; uint8_t vbatmintxon; uint8_t ichmax_res; uint8_t ichmin_res; uint8_t ichaverage_res; uint8_t vchmax_res; uint8_t vchmin_res; uint8_t tbat_res; uint8_t adc_in4_res; uint8_t adc_in5_res; }; struct da9030_battery_thresholds { int tbat_low; int tbat_high; int tbat_restart; int vbat_low; int vbat_crit; int vbat_charge_start; int vbat_charge_stop; int vbat_charge_restart; int vcharge_min; int vcharge_max; }; struct da9030_charger { struct power_supply psy; struct device *master; struct da9030_adc_res adc; struct delayed_work work; unsigned int interval; struct power_supply_info *battery_info; struct da9030_battery_thresholds thresholds; unsigned int charge_milliamp; unsigned int charge_millivolt; /* charger status */ bool chdet; uint8_t fault; int mA; int mV; bool is_on; struct notifier_block nb; /* platform callbacks for battery low and critical events */ void (*battery_low)(void); void (*battery_critical)(void); struct dentry *debug_file; }; static inline int da9030_reg_to_mV(int reg) { return ((reg * 2650) >> 8) + 2650; } static inline int da9030_millivolt_to_reg(int mV) { return ((mV - 2650) << 8) / 2650; } static inline int da9030_reg_to_mA(int reg) { return ((reg * 24000) >> 8) / 15; } #ifdef CONFIG_DEBUG_FS static int bat_debug_show(struct seq_file *s, void *data) { struct da9030_charger *charger = s->private; seq_printf(s, "charger is %s\n", charger->is_on ? "on" : "off"); if (charger->chdet) { seq_printf(s, "iset = %dmA, vset = %dmV\n", charger->mA, charger->mV); } seq_printf(s, "vbat_res = %d (%dmV)\n", charger->adc.vbat_res, da9030_reg_to_mV(charger->adc.vbat_res)); seq_printf(s, "vbatmin_res = %d (%dmV)\n", charger->adc.vbatmin_res, da9030_reg_to_mV(charger->adc.vbatmin_res)); seq_printf(s, "vbatmintxon = %d (%dmV)\n", charger->adc.vbatmintxon, da9030_reg_to_mV(charger->adc.vbatmintxon)); seq_printf(s, "ichmax_res = %d (%dmA)\n", charger->adc.ichmax_res, da9030_reg_to_mV(charger->adc.ichmax_res)); seq_printf(s, "ichmin_res = %d (%dmA)\n", charger->adc.ichmin_res, da9030_reg_to_mA(charger->adc.ichmin_res)); seq_printf(s, "ichaverage_res = %d (%dmA)\n", charger->adc.ichaverage_res, da9030_reg_to_mA(charger->adc.ichaverage_res)); seq_printf(s, "vchmax_res = %d (%dmV)\n", charger->adc.vchmax_res, da9030_reg_to_mA(charger->adc.vchmax_res)); seq_printf(s, "vchmin_res = %d (%dmV)\n", charger->adc.vchmin_res, da9030_reg_to_mV(charger->adc.vchmin_res)); return 0; } static int debug_open(struct inode *inode, struct file *file) { return single_open(file, bat_debug_show, inode->i_private); } static const struct file_operations bat_debug_fops = { .open = debug_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static struct dentry *da9030_bat_create_debugfs(struct da9030_charger *charger) { charger->debug_file = debugfs_create_file("charger", 0666, NULL, charger, &bat_debug_fops); return charger->debug_file; } static void da9030_bat_remove_debugfs(struct da9030_charger *charger) { debugfs_remove(charger->debug_file); } #else static inline struct dentry *da9030_bat_create_debugfs(struct da9030_charger *charger) { return NULL; } static inline void da9030_bat_remove_debugfs(struct da9030_charger *charger) { } #endif static inline void da9030_read_adc(struct da9030_charger *charger, struct da9030_adc_res *adc) { da903x_reads(charger->master, DA9030_VBAT_RES, sizeof(*adc), (uint8_t *)adc); } static void da9030_charger_update_state(struct da9030_charger *charger) { uint8_t val; da903x_read(charger->master, DA9030_CHARGE_CONTROL, &val); charger->is_on = (val & DA9030_CHRG_CHARGER_ENABLE) ? 1 : 0; charger->mA = ((val >> 3) & 0xf) * 100; charger->mV = (val & 0x7) * 50 + 4000; da9030_read_adc(charger, &charger->adc); da903x_read(charger->master, DA9030_FAULT_LOG, &charger->fault); charger->chdet = da903x_query_status(charger->master, DA9030_STATUS_CHDET); } static void da9030_set_charge(struct da9030_charger *charger, int on) { uint8_t val; if (on) { val = DA9030_CHRG_CHARGER_ENABLE; val |= (charger->charge_milliamp / 100) << 3; val |= (charger->charge_millivolt - 4000) / 50; charger->is_on = 1; } else { val = 0; charger->is_on = 0; } da903x_write(charger->master, DA9030_CHARGE_CONTROL, val); power_supply_changed(&charger->psy); } static void da9030_charger_check_state(struct da9030_charger *charger) { da9030_charger_update_state(charger); /* we wake or boot with external power on */ if (!charger->is_on) { if ((charger->chdet) && (charger->adc.vbat_res < charger->thresholds.vbat_charge_start)) { da9030_set_charge(charger, 1); } } else { /* Charger has been pulled out */ if (!charger->chdet) { da9030_set_charge(charger, 0); return; } if (charger->adc.vbat_res >= charger->thresholds.vbat_charge_stop) { da9030_set_charge(charger, 0); da903x_write(charger->master, DA9030_VBATMON, charger->thresholds.vbat_charge_restart); } else if (charger->adc.vbat_res > charger->thresholds.vbat_low) { /* we are charging and passed LOW_THRESH, so upate DA9030 VBAT threshold */ da903x_write(charger->master, DA9030_VBATMON, charger->thresholds.vbat_low); } if (charger->adc.vchmax_res > charger->thresholds.vcharge_max || charger->adc.vchmin_res < charger->thresholds.vcharge_min || /* Tempreture readings are negative */ charger->adc.tbat_res < charger->thresholds.tbat_high || charger->adc.tbat_res > charger->thresholds.tbat_low) { /* disable charger */ da9030_set_charge(charger, 0); } } } static void da9030_charging_monitor(struct work_struct *work) { struct da9030_charger *charger; charger = container_of(work, struct da9030_charger, work.work); da9030_charger_check_state(charger); /* reschedule for the next time */ schedule_delayed_work(&charger->work, charger->interval); } static enum power_supply_property da9030_battery_props[] = { POWER_SUPPLY_PROP_MODEL_NAME, POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_HEALTH, POWER_SUPPLY_PROP_TECHNOLOGY, POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN, POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN, POWER_SUPPLY_PROP_VOLTAGE_NOW, POWER_SUPPLY_PROP_CURRENT_AVG, }; static void da9030_battery_check_status(struct da9030_charger *charger, union power_supply_propval *val) { if (charger->chdet) { if (charger->is_on) val->intval = POWER_SUPPLY_STATUS_CHARGING; else val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING; } else { val->intval = POWER_SUPPLY_STATUS_DISCHARGING; } } static void da9030_battery_check_health(struct da9030_charger *charger, union power_supply_propval *val) { if (charger->fault & DA9030_FAULT_LOG_OVER_TEMP) val->intval = POWER_SUPPLY_HEALTH_OVERHEAT; else if (charger->fault & DA9030_FAULT_LOG_VBAT_OVER) val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE; else val->intval = POWER_SUPPLY_HEALTH_GOOD; } static int da9030_battery_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { struct da9030_charger *charger; charger = container_of(psy, struct da9030_charger, psy); switch (psp) { case POWER_SUPPLY_PROP_STATUS: da9030_battery_check_status(charger, val); break; case POWER_SUPPLY_PROP_HEALTH: da9030_battery_check_health(charger, val); break; case POWER_SUPPLY_PROP_TECHNOLOGY: val->intval = charger->battery_info->technology; break; case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN: val->intval = charger->battery_info->voltage_max_design; break; case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN: val->intval = charger->battery_info->voltage_min_design; break; case POWER_SUPPLY_PROP_VOLTAGE_NOW: val->intval = da9030_reg_to_mV(charger->adc.vbat_res) * 1000; break; case POWER_SUPPLY_PROP_CURRENT_AVG: val->intval = da9030_reg_to_mA(charger->adc.ichaverage_res) * 1000; break; case POWER_SUPPLY_PROP_MODEL_NAME: val->strval = charger->battery_info->name; break; default: break; } return 0; } static void da9030_battery_vbat_event(struct da9030_charger *charger) { da9030_read_adc(charger, &charger->adc); if (charger->is_on) return; if (charger->adc.vbat_res < charger->thresholds.vbat_low) { /* set VBAT threshold for critical */ da903x_write(charger->master, DA9030_VBATMON, charger->thresholds.vbat_crit); if (charger->battery_low) charger->battery_low(); } else if (charger->adc.vbat_res < charger->thresholds.vbat_crit) { /* notify the system of battery critical */ if (charger->battery_critical) charger->battery_critical(); } } static int da9030_battery_event(struct notifier_block *nb, unsigned long event, void *data) { struct da9030_charger *charger = container_of(nb, struct da9030_charger, nb); switch (event) { case DA9030_EVENT_CHDET: cancel_delayed_work_sync(&charger->work); schedule_work(&charger->work.work); break; case DA9030_EVENT_VBATMON: da9030_battery_vbat_event(charger); break; case DA9030_EVENT_CHIOVER: case DA9030_EVENT_TBAT: da9030_set_charge(charger, 0); break; } return 0; } static void da9030_battery_convert_thresholds(struct da9030_charger *charger, struct da9030_battery_info *pdata) { charger->thresholds.tbat_low = pdata->tbat_low; charger->thresholds.tbat_high = pdata->tbat_high; charger->thresholds.tbat_restart = pdata->tbat_restart; charger->thresholds.vbat_low = da9030_millivolt_to_reg(pdata->vbat_low); charger->thresholds.vbat_crit = da9030_millivolt_to_reg(pdata->vbat_crit); charger->thresholds.vbat_charge_start = da9030_millivolt_to_reg(pdata->vbat_charge_start); charger->thresholds.vbat_charge_stop = da9030_millivolt_to_reg(pdata->vbat_charge_stop); charger->thresholds.vbat_charge_restart = da9030_millivolt_to_reg(pdata->vbat_charge_restart); charger->thresholds.vcharge_min = da9030_millivolt_to_reg(pdata->vcharge_min); charger->thresholds.vcharge_max = da9030_millivolt_to_reg(pdata->vcharge_max); } static void da9030_battery_setup_psy(struct da9030_charger *charger) { struct power_supply *psy = &charger->psy; struct power_supply_info *info = charger->battery_info; psy->name = info->name; psy->use_for_apm = info->use_for_apm; psy->type = POWER_SUPPLY_TYPE_BATTERY; psy->get_property = da9030_battery_get_property; psy->properties = da9030_battery_props; psy->num_properties = ARRAY_SIZE(da9030_battery_props); }; static int da9030_battery_charger_init(struct da9030_charger *charger) { char v[5]; int ret; v[0] = v[1] = charger->thresholds.vbat_low; v[2] = charger->thresholds.tbat_high; v[3] = charger->thresholds.tbat_restart; v[4] = charger->thresholds.tbat_low; ret = da903x_writes(charger->master, DA9030_VBATMON, 5, v); if (ret) return ret; /* * Enable reference voltage supply for ADC from the LDO_INTERNAL * regulator. Must be set before ADC measurements can be made. */ ret = da903x_write(charger->master, DA9030_ADC_MAN_CONTROL, DA9030_ADC_LDO_INT_ENABLE | DA9030_ADC_TBATREF_ENABLE); if (ret) return ret; /* enable auto ADC measuremnts */ return da903x_write(charger->master, DA9030_ADC_AUTO_CONTROL, DA9030_ADC_TBAT_ENABLE | DA9030_ADC_VBAT_IN_TXON | DA9030_ADC_VCH_ENABLE | DA9030_ADC_ICH_ENABLE | DA9030_ADC_VBAT_ENABLE | DA9030_ADC_AUTO_SLEEP_ENABLE); } static int da9030_battery_probe(struct platform_device *pdev) { struct da9030_charger *charger; struct da9030_battery_info *pdata = pdev->dev.platform_data; int ret; if (pdata == NULL) return -EINVAL; if (pdata->charge_milliamp >= 1500 || pdata->charge_millivolt < 4000 || pdata->charge_millivolt > 4350) return -EINVAL; charger = devm_kzalloc(&pdev->dev, sizeof(*charger), GFP_KERNEL); if (charger == NULL) return -ENOMEM; charger->master = pdev->dev.parent; /* 10 seconds between monitor runs unless platform defines other interval */ charger->interval = msecs_to_jiffies( (pdata->batmon_interval ? : 10) * 1000); charger->charge_milliamp = pdata->charge_milliamp; charger->charge_millivolt = pdata->charge_millivolt; charger->battery_info = pdata->battery_info; charger->battery_low = pdata->battery_low; charger->battery_critical = pdata->battery_critical; da9030_battery_convert_thresholds(charger, pdata); ret = da9030_battery_charger_init(charger); if (ret) goto err_charger_init; INIT_DELAYED_WORK(&charger->work, da9030_charging_monitor); schedule_delayed_work(&charger->work, charger->interval); charger->nb.notifier_call = da9030_battery_event; ret = da903x_register_notifier(charger->master, &charger->nb, DA9030_EVENT_CHDET | DA9030_EVENT_VBATMON | DA9030_EVENT_CHIOVER | DA9030_EVENT_TBAT); if (ret) goto err_notifier; da9030_battery_setup_psy(charger); ret = power_supply_register(&pdev->dev, &charger->psy); if (ret) goto err_ps_register; charger->debug_file = da9030_bat_create_debugfs(charger); platform_set_drvdata(pdev, charger); return 0; err_ps_register: da903x_unregister_notifier(charger->master, &charger->nb, DA9030_EVENT_CHDET | DA9030_EVENT_VBATMON | DA9030_EVENT_CHIOVER | DA9030_EVENT_TBAT); err_notifier: cancel_delayed_work(&charger->work); err_charger_init: return ret; } static int da9030_battery_remove(struct platform_device *dev) { struct da9030_charger *charger = platform_get_drvdata(dev); da9030_bat_remove_debugfs(charger); da903x_unregister_notifier(charger->master, &charger->nb, DA9030_EVENT_CHDET | DA9030_EVENT_VBATMON | DA9030_EVENT_CHIOVER | DA9030_EVENT_TBAT); cancel_delayed_work_sync(&charger->work); da9030_set_charge(charger, 0); power_supply_unregister(&charger->psy); return 0; } static struct platform_driver da903x_battery_driver = { .driver = { .name = "da903x-battery", .owner = THIS_MODULE, }, .probe = da9030_battery_probe, .remove = da9030_battery_remove, }; module_platform_driver(da903x_battery_driver); MODULE_DESCRIPTION("DA9030 battery charger driver"); MODULE_AUTHOR("Mike Rapoport, CompuLab"); MODULE_LICENSE("GPL");
gpl-2.0
tiny4579/tinykernel-flo
fs/qnx6/inode.c
4620
18141
/* * QNX6 file system, Linux implementation. * * Version : 1.0.0 * * History : * * 01-02-2012 by Kai Bankett (chaosman@ontika.net) : first release. * 16-02-2012 pagemap extension by Al Viro * */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/highuid.h> #include <linux/pagemap.h> #include <linux/buffer_head.h> #include <linux/writeback.h> #include <linux/statfs.h> #include <linux/parser.h> #include <linux/seq_file.h> #include <linux/mount.h> #include <linux/crc32.h> #include <linux/mpage.h> #include "qnx6.h" static const struct super_operations qnx6_sops; static void qnx6_put_super(struct super_block *sb); static struct inode *qnx6_alloc_inode(struct super_block *sb); static void qnx6_destroy_inode(struct inode *inode); static int qnx6_remount(struct super_block *sb, int *flags, char *data); static int qnx6_statfs(struct dentry *dentry, struct kstatfs *buf); static int qnx6_show_options(struct seq_file *seq, struct dentry *root); static const struct super_operations qnx6_sops = { .alloc_inode = qnx6_alloc_inode, .destroy_inode = qnx6_destroy_inode, .put_super = qnx6_put_super, .statfs = qnx6_statfs, .remount_fs = qnx6_remount, .show_options = qnx6_show_options, }; static int qnx6_show_options(struct seq_file *seq, struct dentry *root) { struct super_block *sb = root->d_sb; struct qnx6_sb_info *sbi = QNX6_SB(sb); if (sbi->s_mount_opt & QNX6_MOUNT_MMI_FS) seq_puts(seq, ",mmi_fs"); return 0; } static int qnx6_remount(struct super_block *sb, int *flags, char *data) { *flags |= MS_RDONLY; return 0; } static unsigned qnx6_get_devblock(struct super_block *sb, __fs32 block) { struct qnx6_sb_info *sbi = QNX6_SB(sb); return fs32_to_cpu(sbi, block) + sbi->s_blks_off; } static unsigned qnx6_block_map(struct inode *inode, unsigned iblock); static int qnx6_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh, int create) { unsigned phys; QNX6DEBUG((KERN_INFO "qnx6: qnx6_get_block inode=[%ld] iblock=[%ld]\n", inode->i_ino, (unsigned long)iblock)); phys = qnx6_block_map(inode, iblock); if (phys) { /* logical block is before EOF */ map_bh(bh, inode->i_sb, phys); } return 0; } static int qnx6_check_blockptr(__fs32 ptr) { if (ptr == ~(__fs32)0) { printk(KERN_ERR "qnx6: hit unused blockpointer.\n"); return 0; } return 1; } static int qnx6_readpage(struct file *file, struct page *page) { return mpage_readpage(page, qnx6_get_block); } static int qnx6_readpages(struct file *file, struct address_space *mapping, struct list_head *pages, unsigned nr_pages) { return mpage_readpages(mapping, pages, nr_pages, qnx6_get_block); } /* * returns the block number for the no-th element in the tree * inodebits requred as there are multiple inodes in one inode block */ static unsigned qnx6_block_map(struct inode *inode, unsigned no) { struct super_block *s = inode->i_sb; struct qnx6_sb_info *sbi = QNX6_SB(s); struct qnx6_inode_info *ei = QNX6_I(inode); unsigned block = 0; struct buffer_head *bh; __fs32 ptr; int levelptr; int ptrbits = sbi->s_ptrbits; int bitdelta; u32 mask = (1 << ptrbits) - 1; int depth = ei->di_filelevels; int i; bitdelta = ptrbits * depth; levelptr = no >> bitdelta; if (levelptr > QNX6_NO_DIRECT_POINTERS - 1) { printk(KERN_ERR "qnx6:Requested file block number (%u) too big.", no); return 0; } block = qnx6_get_devblock(s, ei->di_block_ptr[levelptr]); for (i = 0; i < depth; i++) { bh = sb_bread(s, block); if (!bh) { printk(KERN_ERR "qnx6:Error reading block (%u)\n", block); return 0; } bitdelta -= ptrbits; levelptr = (no >> bitdelta) & mask; ptr = ((__fs32 *)bh->b_data)[levelptr]; if (!qnx6_check_blockptr(ptr)) return 0; block = qnx6_get_devblock(s, ptr); brelse(bh); } return block; } static int qnx6_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *sb = dentry->d_sb; struct qnx6_sb_info *sbi = QNX6_SB(sb); u64 id = huge_encode_dev(sb->s_bdev->bd_dev); buf->f_type = sb->s_magic; buf->f_bsize = sb->s_blocksize; buf->f_blocks = fs32_to_cpu(sbi, sbi->sb->sb_num_blocks); buf->f_bfree = fs32_to_cpu(sbi, sbi->sb->sb_free_blocks); buf->f_files = fs32_to_cpu(sbi, sbi->sb->sb_num_inodes); buf->f_ffree = fs32_to_cpu(sbi, sbi->sb->sb_free_inodes); buf->f_bavail = buf->f_bfree; buf->f_namelen = QNX6_LONG_NAME_MAX; buf->f_fsid.val[0] = (u32)id; buf->f_fsid.val[1] = (u32)(id >> 32); return 0; } /* * Check the root directory of the filesystem to make sure * it really _is_ a qnx6 filesystem, and to check the size * of the directory entry. */ static const char *qnx6_checkroot(struct super_block *s) { static char match_root[2][3] = {".\0\0", "..\0"}; int i, error = 0; struct qnx6_dir_entry *dir_entry; struct inode *root = s->s_root->d_inode; struct address_space *mapping = root->i_mapping; struct page *page = read_mapping_page(mapping, 0, NULL); if (IS_ERR(page)) return "error reading root directory"; kmap(page); dir_entry = page_address(page); for (i = 0; i < 2; i++) { /* maximum 3 bytes - due to match_root limitation */ if (strncmp(dir_entry[i].de_fname, match_root[i], 3)) error = 1; } qnx6_put_page(page); if (error) return "error reading root directory."; return NULL; } #ifdef CONFIG_QNX6FS_DEBUG void qnx6_superblock_debug(struct qnx6_super_block *sb, struct super_block *s) { struct qnx6_sb_info *sbi = QNX6_SB(s); QNX6DEBUG((KERN_INFO "magic: %08x\n", fs32_to_cpu(sbi, sb->sb_magic))); QNX6DEBUG((KERN_INFO "checksum: %08x\n", fs32_to_cpu(sbi, sb->sb_checksum))); QNX6DEBUG((KERN_INFO "serial: %llx\n", fs64_to_cpu(sbi, sb->sb_serial))); QNX6DEBUG((KERN_INFO "flags: %08x\n", fs32_to_cpu(sbi, sb->sb_flags))); QNX6DEBUG((KERN_INFO "blocksize: %08x\n", fs32_to_cpu(sbi, sb->sb_blocksize))); QNX6DEBUG((KERN_INFO "num_inodes: %08x\n", fs32_to_cpu(sbi, sb->sb_num_inodes))); QNX6DEBUG((KERN_INFO "free_inodes: %08x\n", fs32_to_cpu(sbi, sb->sb_free_inodes))); QNX6DEBUG((KERN_INFO "num_blocks: %08x\n", fs32_to_cpu(sbi, sb->sb_num_blocks))); QNX6DEBUG((KERN_INFO "free_blocks: %08x\n", fs32_to_cpu(sbi, sb->sb_free_blocks))); QNX6DEBUG((KERN_INFO "inode_levels: %02x\n", sb->Inode.levels)); } #endif enum { Opt_mmifs, Opt_err }; static const match_table_t tokens = { {Opt_mmifs, "mmi_fs"}, {Opt_err, NULL} }; static int qnx6_parse_options(char *options, struct super_block *sb) { char *p; struct qnx6_sb_info *sbi = QNX6_SB(sb); substring_t args[MAX_OPT_ARGS]; if (!options) return 1; while ((p = strsep(&options, ",")) != NULL) { int token; if (!*p) continue; token = match_token(p, tokens, args); switch (token) { case Opt_mmifs: set_opt(sbi->s_mount_opt, MMI_FS); break; default: return 0; } } return 1; } static struct buffer_head *qnx6_check_first_superblock(struct super_block *s, int offset, int silent) { struct qnx6_sb_info *sbi = QNX6_SB(s); struct buffer_head *bh; struct qnx6_super_block *sb; /* Check the superblock signatures start with the first superblock */ bh = sb_bread(s, offset); if (!bh) { printk(KERN_ERR "qnx6: unable to read the first superblock\n"); return NULL; } sb = (struct qnx6_super_block *)bh->b_data; if (fs32_to_cpu(sbi, sb->sb_magic) != QNX6_SUPER_MAGIC) { sbi->s_bytesex = BYTESEX_BE; if (fs32_to_cpu(sbi, sb->sb_magic) == QNX6_SUPER_MAGIC) { /* we got a big endian fs */ QNX6DEBUG((KERN_INFO "qnx6: fs got different" " endianess.\n")); return bh; } else sbi->s_bytesex = BYTESEX_LE; if (!silent) { if (offset == 0) { printk(KERN_ERR "qnx6: wrong signature (magic)" " in superblock #1.\n"); } else { printk(KERN_INFO "qnx6: wrong signature (magic)" " at position (0x%lx) - will try" " alternative position (0x0000).\n", offset * s->s_blocksize); } } brelse(bh); return NULL; } return bh; } static struct inode *qnx6_private_inode(struct super_block *s, struct qnx6_root_node *p); static int qnx6_fill_super(struct super_block *s, void *data, int silent) { struct buffer_head *bh1 = NULL, *bh2 = NULL; struct qnx6_super_block *sb1 = NULL, *sb2 = NULL; struct qnx6_sb_info *sbi; struct inode *root; const char *errmsg; struct qnx6_sb_info *qs; int ret = -EINVAL; u64 offset; int bootblock_offset = QNX6_BOOTBLOCK_SIZE; qs = kzalloc(sizeof(struct qnx6_sb_info), GFP_KERNEL); if (!qs) return -ENOMEM; s->s_fs_info = qs; /* Superblock always is 512 Byte long */ if (!sb_set_blocksize(s, QNX6_SUPERBLOCK_SIZE)) { printk(KERN_ERR "qnx6: unable to set blocksize\n"); goto outnobh; } /* parse the mount-options */ if (!qnx6_parse_options((char *) data, s)) { printk(KERN_ERR "qnx6: invalid mount options.\n"); goto outnobh; } if (test_opt(s, MMI_FS)) { sb1 = qnx6_mmi_fill_super(s, silent); if (sb1) goto mmi_success; else goto outnobh; } sbi = QNX6_SB(s); sbi->s_bytesex = BYTESEX_LE; /* Check the superblock signatures start with the first superblock */ bh1 = qnx6_check_first_superblock(s, bootblock_offset / QNX6_SUPERBLOCK_SIZE, silent); if (!bh1) { /* try again without bootblock offset */ bh1 = qnx6_check_first_superblock(s, 0, silent); if (!bh1) { printk(KERN_ERR "qnx6: unable to read the first superblock\n"); goto outnobh; } /* seems that no bootblock at partition start */ bootblock_offset = 0; } sb1 = (struct qnx6_super_block *)bh1->b_data; #ifdef CONFIG_QNX6FS_DEBUG qnx6_superblock_debug(sb1, s); #endif /* checksum check - start at byte 8 and end at byte 512 */ if (fs32_to_cpu(sbi, sb1->sb_checksum) != crc32_be(0, (char *)(bh1->b_data + 8), 504)) { printk(KERN_ERR "qnx6: superblock #1 checksum error\n"); goto out; } /* set new blocksize */ if (!sb_set_blocksize(s, fs32_to_cpu(sbi, sb1->sb_blocksize))) { printk(KERN_ERR "qnx6: unable to set blocksize\n"); goto out; } /* blocksize invalidates bh - pull it back in */ brelse(bh1); bh1 = sb_bread(s, bootblock_offset >> s->s_blocksize_bits); if (!bh1) goto outnobh; sb1 = (struct qnx6_super_block *)bh1->b_data; /* calculate second superblock blocknumber */ offset = fs32_to_cpu(sbi, sb1->sb_num_blocks) + (bootblock_offset >> s->s_blocksize_bits) + (QNX6_SUPERBLOCK_AREA >> s->s_blocksize_bits); /* set bootblock offset */ sbi->s_blks_off = (bootblock_offset >> s->s_blocksize_bits) + (QNX6_SUPERBLOCK_AREA >> s->s_blocksize_bits); /* next the second superblock */ bh2 = sb_bread(s, offset); if (!bh2) { printk(KERN_ERR "qnx6: unable to read the second superblock\n"); goto out; } sb2 = (struct qnx6_super_block *)bh2->b_data; if (fs32_to_cpu(sbi, sb2->sb_magic) != QNX6_SUPER_MAGIC) { if (!silent) printk(KERN_ERR "qnx6: wrong signature (magic)" " in superblock #2.\n"); goto out; } /* checksum check - start at byte 8 and end at byte 512 */ if (fs32_to_cpu(sbi, sb2->sb_checksum) != crc32_be(0, (char *)(bh2->b_data + 8), 504)) { printk(KERN_ERR "qnx6: superblock #2 checksum error\n"); goto out; } if (fs64_to_cpu(sbi, sb1->sb_serial) >= fs64_to_cpu(sbi, sb2->sb_serial)) { /* superblock #1 active */ sbi->sb_buf = bh1; sbi->sb = (struct qnx6_super_block *)bh1->b_data; brelse(bh2); printk(KERN_INFO "qnx6: superblock #1 active\n"); } else { /* superblock #2 active */ sbi->sb_buf = bh2; sbi->sb = (struct qnx6_super_block *)bh2->b_data; brelse(bh1); printk(KERN_INFO "qnx6: superblock #2 active\n"); } mmi_success: /* sanity check - limit maximum indirect pointer levels */ if (sb1->Inode.levels > QNX6_PTR_MAX_LEVELS) { printk(KERN_ERR "qnx6: too many inode levels (max %i, sb %i)\n", QNX6_PTR_MAX_LEVELS, sb1->Inode.levels); goto out; } if (sb1->Longfile.levels > QNX6_PTR_MAX_LEVELS) { printk(KERN_ERR "qnx6: too many longfilename levels" " (max %i, sb %i)\n", QNX6_PTR_MAX_LEVELS, sb1->Longfile.levels); goto out; } s->s_op = &qnx6_sops; s->s_magic = QNX6_SUPER_MAGIC; s->s_flags |= MS_RDONLY; /* Yup, read-only yet */ /* ease the later tree level calculations */ sbi = QNX6_SB(s); sbi->s_ptrbits = ilog2(s->s_blocksize / 4); sbi->inodes = qnx6_private_inode(s, &sb1->Inode); if (!sbi->inodes) goto out; sbi->longfile = qnx6_private_inode(s, &sb1->Longfile); if (!sbi->longfile) goto out1; /* prefetch root inode */ root = qnx6_iget(s, QNX6_ROOT_INO); if (IS_ERR(root)) { printk(KERN_ERR "qnx6: get inode failed\n"); ret = PTR_ERR(root); goto out2; } ret = -ENOMEM; s->s_root = d_make_root(root); if (!s->s_root) goto out2; ret = -EINVAL; errmsg = qnx6_checkroot(s); if (errmsg != NULL) { if (!silent) printk(KERN_ERR "qnx6: %s\n", errmsg); goto out3; } return 0; out3: dput(s->s_root); s->s_root = NULL; out2: iput(sbi->longfile); out1: iput(sbi->inodes); out: if (bh1) brelse(bh1); if (bh2) brelse(bh2); outnobh: kfree(qs); s->s_fs_info = NULL; return ret; } static void qnx6_put_super(struct super_block *sb) { struct qnx6_sb_info *qs = QNX6_SB(sb); brelse(qs->sb_buf); iput(qs->longfile); iput(qs->inodes); kfree(qs); sb->s_fs_info = NULL; return; } static sector_t qnx6_bmap(struct address_space *mapping, sector_t block) { return generic_block_bmap(mapping, block, qnx6_get_block); } static const struct address_space_operations qnx6_aops = { .readpage = qnx6_readpage, .readpages = qnx6_readpages, .bmap = qnx6_bmap }; static struct inode *qnx6_private_inode(struct super_block *s, struct qnx6_root_node *p) { struct inode *inode = new_inode(s); if (inode) { struct qnx6_inode_info *ei = QNX6_I(inode); struct qnx6_sb_info *sbi = QNX6_SB(s); inode->i_size = fs64_to_cpu(sbi, p->size); memcpy(ei->di_block_ptr, p->ptr, sizeof(p->ptr)); ei->di_filelevels = p->levels; inode->i_mode = S_IFREG | S_IRUSR; /* probably wrong */ inode->i_mapping->a_ops = &qnx6_aops; } return inode; } struct inode *qnx6_iget(struct super_block *sb, unsigned ino) { struct qnx6_sb_info *sbi = QNX6_SB(sb); struct qnx6_inode_entry *raw_inode; struct inode *inode; struct qnx6_inode_info *ei; struct address_space *mapping; struct page *page; u32 n, offs; inode = iget_locked(sb, ino); if (!inode) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) return inode; ei = QNX6_I(inode); inode->i_mode = 0; if (ino == 0) { printk(KERN_ERR "qnx6: bad inode number on dev %s: %u is " "out of range\n", sb->s_id, ino); iget_failed(inode); return ERR_PTR(-EIO); } n = (ino - 1) >> (PAGE_CACHE_SHIFT - QNX6_INODE_SIZE_BITS); offs = (ino - 1) & (~PAGE_CACHE_MASK >> QNX6_INODE_SIZE_BITS); mapping = sbi->inodes->i_mapping; page = read_mapping_page(mapping, n, NULL); if (IS_ERR(page)) { printk(KERN_ERR "qnx6: major problem: unable to read inode from " "dev %s\n", sb->s_id); iget_failed(inode); return ERR_CAST(page); } kmap(page); raw_inode = ((struct qnx6_inode_entry *)page_address(page)) + offs; inode->i_mode = fs16_to_cpu(sbi, raw_inode->di_mode); inode->i_uid = (uid_t)fs32_to_cpu(sbi, raw_inode->di_uid); inode->i_gid = (gid_t)fs32_to_cpu(sbi, raw_inode->di_gid); inode->i_size = fs64_to_cpu(sbi, raw_inode->di_size); inode->i_mtime.tv_sec = fs32_to_cpu(sbi, raw_inode->di_mtime); inode->i_mtime.tv_nsec = 0; inode->i_atime.tv_sec = fs32_to_cpu(sbi, raw_inode->di_atime); inode->i_atime.tv_nsec = 0; inode->i_ctime.tv_sec = fs32_to_cpu(sbi, raw_inode->di_ctime); inode->i_ctime.tv_nsec = 0; /* calc blocks based on 512 byte blocksize */ inode->i_blocks = (inode->i_size + 511) >> 9; memcpy(&ei->di_block_ptr, &raw_inode->di_block_ptr, sizeof(raw_inode->di_block_ptr)); ei->di_filelevels = raw_inode->di_filelevels; if (S_ISREG(inode->i_mode)) { inode->i_fop = &generic_ro_fops; inode->i_mapping->a_ops = &qnx6_aops; } else if (S_ISDIR(inode->i_mode)) { inode->i_op = &qnx6_dir_inode_operations; inode->i_fop = &qnx6_dir_operations; inode->i_mapping->a_ops = &qnx6_aops; } else if (S_ISLNK(inode->i_mode)) { inode->i_op = &page_symlink_inode_operations; inode->i_mapping->a_ops = &qnx6_aops; } else init_special_inode(inode, inode->i_mode, 0); qnx6_put_page(page); unlock_new_inode(inode); return inode; } static struct kmem_cache *qnx6_inode_cachep; static struct inode *qnx6_alloc_inode(struct super_block *sb) { struct qnx6_inode_info *ei; ei = kmem_cache_alloc(qnx6_inode_cachep, GFP_KERNEL); if (!ei) return NULL; return &ei->vfs_inode; } static void qnx6_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); INIT_LIST_HEAD(&inode->i_dentry); kmem_cache_free(qnx6_inode_cachep, QNX6_I(inode)); } static void qnx6_destroy_inode(struct inode *inode) { call_rcu(&inode->i_rcu, qnx6_i_callback); } static void init_once(void *foo) { struct qnx6_inode_info *ei = (struct qnx6_inode_info *) foo; inode_init_once(&ei->vfs_inode); } static int init_inodecache(void) { qnx6_inode_cachep = kmem_cache_create("qnx6_inode_cache", sizeof(struct qnx6_inode_info), 0, (SLAB_RECLAIM_ACCOUNT| SLAB_MEM_SPREAD), init_once); if (!qnx6_inode_cachep) return -ENOMEM; return 0; } static void destroy_inodecache(void) { kmem_cache_destroy(qnx6_inode_cachep); } static struct dentry *qnx6_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_bdev(fs_type, flags, dev_name, data, qnx6_fill_super); } static struct file_system_type qnx6_fs_type = { .owner = THIS_MODULE, .name = "qnx6", .mount = qnx6_mount, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, }; static int __init init_qnx6_fs(void) { int err; err = init_inodecache(); if (err) return err; err = register_filesystem(&qnx6_fs_type); if (err) { destroy_inodecache(); return err; } printk(KERN_INFO "QNX6 filesystem 1.0.0 registered.\n"); return 0; } static void __exit exit_qnx6_fs(void) { unregister_filesystem(&qnx6_fs_type); destroy_inodecache(); } module_init(init_qnx6_fs) module_exit(exit_qnx6_fs) MODULE_LICENSE("GPL");
gpl-2.0
Samsung-BCM/android_kernel_samsung_bcm
arch/um/os-Linux/time.c
4876
4180
/* * Copyright (C) 2000 - 2007 Jeff Dike (jdike{addtoit,linux.intel}.com) * Licensed under the GPL */ #include <stddef.h> #include <errno.h> #include <signal.h> #include <time.h> #include <sys/time.h> #include "kern_util.h" #include "os.h" #include "internal.h" int set_interval(void) { int usec = UM_USEC_PER_SEC / UM_HZ; struct itimerval interval = ((struct itimerval) { { 0, usec }, { 0, usec } }); if (setitimer(ITIMER_VIRTUAL, &interval, NULL) == -1) return -errno; return 0; } int timer_one_shot(int ticks) { unsigned long usec = ticks * UM_USEC_PER_SEC / UM_HZ; unsigned long sec = usec / UM_USEC_PER_SEC; struct itimerval interval; usec %= UM_USEC_PER_SEC; interval = ((struct itimerval) { { 0, 0 }, { sec, usec } }); if (setitimer(ITIMER_VIRTUAL, &interval, NULL) == -1) return -errno; return 0; } /** * timeval_to_ns - Convert timeval to nanoseconds * @ts: pointer to the timeval variable to be converted * * Returns the scalar nanosecond representation of the timeval * parameter. * * Ripped from linux/time.h because it's a kernel header, and thus * unusable from here. */ static inline long long timeval_to_ns(const struct timeval *tv) { return ((long long) tv->tv_sec * UM_NSEC_PER_SEC) + tv->tv_usec * UM_NSEC_PER_USEC; } long long disable_timer(void) { struct itimerval time = ((struct itimerval) { { 0, 0 }, { 0, 0 } }); long long remain, max = UM_NSEC_PER_SEC / UM_HZ; if (setitimer(ITIMER_VIRTUAL, &time, &time) < 0) printk(UM_KERN_ERR "disable_timer - setitimer failed, " "errno = %d\n", errno); remain = timeval_to_ns(&time.it_value); if (remain > max) remain = max; return remain; } long long os_nsecs(void) { struct timeval tv; gettimeofday(&tv, NULL); return timeval_to_ns(&tv); } #ifdef UML_CONFIG_NO_HZ static int after_sleep_interval(struct timespec *ts) { return 0; } static void deliver_alarm(void) { alarm_handler(SIGVTALRM, NULL); } static unsigned long long sleep_time(unsigned long long nsecs) { return nsecs; } #else unsigned long long last_tick; unsigned long long skew; static void deliver_alarm(void) { unsigned long long this_tick = os_nsecs(); int one_tick = UM_NSEC_PER_SEC / UM_HZ; /* Protection against the host's time going backwards */ if ((last_tick != 0) && (this_tick < last_tick)) this_tick = last_tick; if (last_tick == 0) last_tick = this_tick - one_tick; skew += this_tick - last_tick; while (skew >= one_tick) { alarm_handler(SIGVTALRM, NULL); skew -= one_tick; } last_tick = this_tick; } static unsigned long long sleep_time(unsigned long long nsecs) { return nsecs > skew ? nsecs - skew : 0; } static inline long long timespec_to_us(const struct timespec *ts) { return ((long long) ts->tv_sec * UM_USEC_PER_SEC) + ts->tv_nsec / UM_NSEC_PER_USEC; } static int after_sleep_interval(struct timespec *ts) { int usec = UM_USEC_PER_SEC / UM_HZ; long long start_usecs = timespec_to_us(ts); struct timeval tv; struct itimerval interval; /* * It seems that rounding can increase the value returned from * setitimer to larger than the one passed in. Over time, * this will cause the remaining time to be greater than the * tick interval. If this happens, then just reduce the first * tick to the interval value. */ if (start_usecs > usec) start_usecs = usec; start_usecs -= skew / UM_NSEC_PER_USEC; if (start_usecs < 0) start_usecs = 0; tv = ((struct timeval) { .tv_sec = start_usecs / UM_USEC_PER_SEC, .tv_usec = start_usecs % UM_USEC_PER_SEC }); interval = ((struct itimerval) { { 0, usec }, tv }); if (setitimer(ITIMER_VIRTUAL, &interval, NULL) == -1) return -errno; return 0; } #endif void idle_sleep(unsigned long long nsecs) { struct timespec ts; /* * nsecs can come in as zero, in which case, this starts a * busy loop. To prevent this, reset nsecs to the tick * interval if it is zero. */ if (nsecs == 0) nsecs = UM_NSEC_PER_SEC / UM_HZ; nsecs = sleep_time(nsecs); ts = ((struct timespec) { .tv_sec = nsecs / UM_NSEC_PER_SEC, .tv_nsec = nsecs % UM_NSEC_PER_SEC }); if (nanosleep(&ts, &ts) == 0) deliver_alarm(); after_sleep_interval(&ts); }
gpl-2.0
Pillar1989/linux-a80-3.4
drivers/clocksource/cs5535-clockevt.c
4876
6009
/* * Clock event driver for the CS5535/CS5536 * * Copyright (C) 2006, Advanced Micro Devices, Inc. * Copyright (C) 2007 Andres Salomon <dilinger@debian.org> * Copyright (C) 2009 Andres Salomon <dilinger@collabora.co.uk> * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public License * as published by the Free Software Foundation. * * The MFGPTs are documented in AMD Geode CS5536 Companion Device Data Book. */ #include <linux/kernel.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/cs5535.h> #include <linux/clockchips.h> #define DRV_NAME "cs5535-clockevt" static int timer_irq; module_param_named(irq, timer_irq, int, 0644); MODULE_PARM_DESC(irq, "Which IRQ to use for the clock source MFGPT ticks."); /* * We are using the 32.768kHz input clock - it's the only one that has the * ranges we find desirable. The following table lists the suitable * divisors and the associated Hz, minimum interval and the maximum interval: * * Divisor Hz Min Delta (s) Max Delta (s) * 1 32768 .00048828125 2.000 * 2 16384 .0009765625 4.000 * 4 8192 .001953125 8.000 * 8 4096 .00390625 16.000 * 16 2048 .0078125 32.000 * 32 1024 .015625 64.000 * 64 512 .03125 128.000 * 128 256 .0625 256.000 * 256 128 .125 512.000 */ static unsigned int cs5535_tick_mode = CLOCK_EVT_MODE_SHUTDOWN; static struct cs5535_mfgpt_timer *cs5535_event_clock; /* Selected from the table above */ #define MFGPT_DIVISOR 16 #define MFGPT_SCALE 4 /* divisor = 2^(scale) */ #define MFGPT_HZ (32768 / MFGPT_DIVISOR) #define MFGPT_PERIODIC (MFGPT_HZ / HZ) /* * The MFPGT timers on the CS5536 provide us with suitable timers to use * as clock event sources - not as good as a HPET or APIC, but certainly * better than the PIT. This isn't a general purpose MFGPT driver, but * a simplified one designed specifically to act as a clock event source. * For full details about the MFGPT, please consult the CS5536 data sheet. */ static void disable_timer(struct cs5535_mfgpt_timer *timer) { /* avoid races by clearing CMP1 and CMP2 unconditionally */ cs5535_mfgpt_write(timer, MFGPT_REG_SETUP, (uint16_t) ~MFGPT_SETUP_CNTEN | MFGPT_SETUP_CMP1 | MFGPT_SETUP_CMP2); } static void start_timer(struct cs5535_mfgpt_timer *timer, uint16_t delta) { cs5535_mfgpt_write(timer, MFGPT_REG_CMP2, delta); cs5535_mfgpt_write(timer, MFGPT_REG_COUNTER, 0); cs5535_mfgpt_write(timer, MFGPT_REG_SETUP, MFGPT_SETUP_CNTEN | MFGPT_SETUP_CMP2); } static void mfgpt_set_mode(enum clock_event_mode mode, struct clock_event_device *evt) { disable_timer(cs5535_event_clock); if (mode == CLOCK_EVT_MODE_PERIODIC) start_timer(cs5535_event_clock, MFGPT_PERIODIC); cs5535_tick_mode = mode; } static int mfgpt_next_event(unsigned long delta, struct clock_event_device *evt) { start_timer(cs5535_event_clock, delta); return 0; } static struct clock_event_device cs5535_clockevent = { .name = DRV_NAME, .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, .set_mode = mfgpt_set_mode, .set_next_event = mfgpt_next_event, .rating = 250, .shift = 32 }; static irqreturn_t mfgpt_tick(int irq, void *dev_id) { uint16_t val = cs5535_mfgpt_read(cs5535_event_clock, MFGPT_REG_SETUP); /* See if the interrupt was for us */ if (!(val & (MFGPT_SETUP_SETUP | MFGPT_SETUP_CMP2 | MFGPT_SETUP_CMP1))) return IRQ_NONE; /* Turn off the clock (and clear the event) */ disable_timer(cs5535_event_clock); if (cs5535_tick_mode == CLOCK_EVT_MODE_SHUTDOWN) return IRQ_HANDLED; /* Clear the counter */ cs5535_mfgpt_write(cs5535_event_clock, MFGPT_REG_COUNTER, 0); /* Restart the clock in periodic mode */ if (cs5535_tick_mode == CLOCK_EVT_MODE_PERIODIC) cs5535_mfgpt_write(cs5535_event_clock, MFGPT_REG_SETUP, MFGPT_SETUP_CNTEN | MFGPT_SETUP_CMP2); cs5535_clockevent.event_handler(&cs5535_clockevent); return IRQ_HANDLED; } static struct irqaction mfgptirq = { .handler = mfgpt_tick, .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_TIMER | IRQF_SHARED, .name = DRV_NAME, }; static int __init cs5535_mfgpt_init(void) { struct cs5535_mfgpt_timer *timer; int ret; uint16_t val; timer = cs5535_mfgpt_alloc_timer(MFGPT_TIMER_ANY, MFGPT_DOMAIN_WORKING); if (!timer) { printk(KERN_ERR DRV_NAME ": Could not allocate MFPGT timer\n"); return -ENODEV; } cs5535_event_clock = timer; /* Set up the IRQ on the MFGPT side */ if (cs5535_mfgpt_setup_irq(timer, MFGPT_CMP2, &timer_irq)) { printk(KERN_ERR DRV_NAME ": Could not set up IRQ %d\n", timer_irq); goto err_timer; } /* And register it with the kernel */ ret = setup_irq(timer_irq, &mfgptirq); if (ret) { printk(KERN_ERR DRV_NAME ": Unable to set up the interrupt.\n"); goto err_irq; } /* Set the clock scale and enable the event mode for CMP2 */ val = MFGPT_SCALE | (3 << 8); cs5535_mfgpt_write(cs5535_event_clock, MFGPT_REG_SETUP, val); /* Set up the clock event */ cs5535_clockevent.mult = div_sc(MFGPT_HZ, NSEC_PER_SEC, cs5535_clockevent.shift); cs5535_clockevent.min_delta_ns = clockevent_delta2ns(0xF, &cs5535_clockevent); cs5535_clockevent.max_delta_ns = clockevent_delta2ns(0xFFFE, &cs5535_clockevent); printk(KERN_INFO DRV_NAME ": Registering MFGPT timer as a clock event, using IRQ %d\n", timer_irq); clockevents_register_device(&cs5535_clockevent); return 0; err_irq: cs5535_mfgpt_release_irq(cs5535_event_clock, MFGPT_CMP2, &timer_irq); err_timer: cs5535_mfgpt_free_timer(cs5535_event_clock); printk(KERN_ERR DRV_NAME ": Unable to set up the MFGPT clock source\n"); return -EIO; } module_init(cs5535_mfgpt_init); MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>"); MODULE_DESCRIPTION("CS5535/CS5536 MFGPT clock event driver"); MODULE_LICENSE("GPL");
gpl-2.0
LiquidSmooth-Devices/Deathly_Kernel_D2
drivers/isdn/hardware/eicon/s_4bri.c
9740
15335
/* * Copyright (c) Eicon Networks, 2002. * This source file is supplied for the use with Eicon Networks range of DIVA Server Adapters. * Eicon File Revision : 2.1 * This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. * This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. * You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include "platform.h" #include "di_defs.h" #include "pc.h" #include "pr_pc.h" #include "di.h" #include "mi_pc.h" #include "pc_maint.h" #include "divasync.h" #include "pc_init.h" #include "io.h" #include "helpers.h" #include "dsrv4bri.h" #include "dsp_defs.h" #include "sdp_hdr.h" /*****************************************************************************/ #define MAX_XLOG_SIZE (64 * 1024) /* -------------------------------------------------------------------------- Recovery XLOG from QBRI Card -------------------------------------------------------------------------- */ static void qBri_cpu_trapped(PISDN_ADAPTER IoAdapter) { byte __iomem *base; word *Xlog; dword regs[4], TrapID, offset, size; Xdesc xlogDesc; int factor = (IoAdapter->tasks == 1) ? 1 : 2; /* * check for trapped MIPS 46xx CPU, dump exception frame */ base = DIVA_OS_MEM_ATTACH_CONTROL(IoAdapter); offset = IoAdapter->ControllerNumber * (IoAdapter->MemorySize >> factor); TrapID = READ_DWORD(&base[0x80]); if ((TrapID == 0x99999999) || (TrapID == 0x99999901)) { dump_trap_frame(IoAdapter, &base[0x90]); IoAdapter->trapped = 1; } regs[0] = READ_DWORD((base + offset) + 0x70); regs[1] = READ_DWORD((base + offset) + 0x74); regs[2] = READ_DWORD((base + offset) + 0x78); regs[3] = READ_DWORD((base + offset) + 0x7c); regs[0] &= IoAdapter->MemorySize - 1; if ((regs[0] >= offset) && (regs[0] < offset + (IoAdapter->MemorySize >> factor) - 1)) { if (!(Xlog = (word *)diva_os_malloc(0, MAX_XLOG_SIZE))) { DIVA_OS_MEM_DETACH_CONTROL(IoAdapter, base); return; } size = offset + (IoAdapter->MemorySize >> factor) - regs[0]; if (size > MAX_XLOG_SIZE) size = MAX_XLOG_SIZE; memcpy_fromio(Xlog, &base[regs[0]], size); xlogDesc.buf = Xlog; xlogDesc.cnt = READ_WORD(&base[regs[1] & (IoAdapter->MemorySize - 1)]); xlogDesc.out = READ_WORD(&base[regs[2] & (IoAdapter->MemorySize - 1)]); dump_xlog_buffer(IoAdapter, &xlogDesc); diva_os_free(0, Xlog); IoAdapter->trapped = 2; } DIVA_OS_MEM_DETACH_CONTROL(IoAdapter, base); } /* -------------------------------------------------------------------------- Reset QBRI Hardware -------------------------------------------------------------------------- */ static void reset_qBri_hardware(PISDN_ADAPTER IoAdapter) { word volatile __iomem *qBriReset; byte volatile __iomem *qBriCntrl; byte volatile __iomem *p; qBriReset = (word volatile __iomem *)DIVA_OS_MEM_ATTACH_PROM(IoAdapter); WRITE_WORD(qBriReset, READ_WORD(qBriReset) | PLX9054_SOFT_RESET); diva_os_wait(1); WRITE_WORD(qBriReset, READ_WORD(qBriReset) & ~PLX9054_SOFT_RESET); diva_os_wait(1); WRITE_WORD(qBriReset, READ_WORD(qBriReset) | PLX9054_RELOAD_EEPROM); diva_os_wait(1); WRITE_WORD(qBriReset, READ_WORD(qBriReset) & ~PLX9054_RELOAD_EEPROM); diva_os_wait(1); DIVA_OS_MEM_DETACH_PROM(IoAdapter, qBriReset); qBriCntrl = DIVA_OS_MEM_ATTACH_CTLREG(IoAdapter); p = &qBriCntrl[DIVA_4BRI_REVISION(IoAdapter) ? (MQ2_BREG_RISC) : (MQ_BREG_RISC)]; WRITE_DWORD(p, 0); DIVA_OS_MEM_DETACH_CTLREG(IoAdapter, qBriCntrl); DBG_TRC(("resetted board @ reset addr 0x%08lx", qBriReset)) DBG_TRC(("resetted board @ cntrl addr 0x%08lx", p)) } /* -------------------------------------------------------------------------- Start Card CPU -------------------------------------------------------------------------- */ void start_qBri_hardware(PISDN_ADAPTER IoAdapter) { byte volatile __iomem *qBriReset; byte volatile __iomem *p; p = DIVA_OS_MEM_ATTACH_CTLREG(IoAdapter); qBriReset = &p[(DIVA_4BRI_REVISION(IoAdapter)) ? (MQ2_BREG_RISC) : (MQ_BREG_RISC)]; WRITE_DWORD(qBriReset, MQ_RISC_COLD_RESET_MASK); diva_os_wait(2); WRITE_DWORD(qBriReset, MQ_RISC_WARM_RESET_MASK | MQ_RISC_COLD_RESET_MASK); diva_os_wait(10); DIVA_OS_MEM_DETACH_CTLREG(IoAdapter, p); DBG_TRC(("started processor @ addr 0x%08lx", qBriReset)) } /* -------------------------------------------------------------------------- Stop Card CPU -------------------------------------------------------------------------- */ static void stop_qBri_hardware(PISDN_ADAPTER IoAdapter) { byte volatile __iomem *p; dword volatile __iomem *qBriReset; dword volatile __iomem *qBriIrq; dword volatile __iomem *qBriIsacDspReset; int rev2 = DIVA_4BRI_REVISION(IoAdapter); int reset_offset = rev2 ? (MQ2_BREG_RISC) : (MQ_BREG_RISC); int irq_offset = rev2 ? (MQ2_BREG_IRQ_TEST) : (MQ_BREG_IRQ_TEST); int hw_offset = rev2 ? (MQ2_ISAC_DSP_RESET) : (MQ_ISAC_DSP_RESET); if (IoAdapter->ControllerNumber > 0) return; p = DIVA_OS_MEM_ATTACH_CTLREG(IoAdapter); qBriReset = (dword volatile __iomem *)&p[reset_offset]; qBriIsacDspReset = (dword volatile __iomem *)&p[hw_offset]; /* * clear interrupt line (reset Local Interrupt Test Register) */ WRITE_DWORD(qBriReset, 0); WRITE_DWORD(qBriIsacDspReset, 0); DIVA_OS_MEM_DETACH_CTLREG(IoAdapter, p); p = DIVA_OS_MEM_ATTACH_RESET(IoAdapter); WRITE_BYTE(&p[PLX9054_INTCSR], 0x00); /* disable PCI interrupts */ DIVA_OS_MEM_DETACH_RESET(IoAdapter, p); p = DIVA_OS_MEM_ATTACH_CTLREG(IoAdapter); qBriIrq = (dword volatile __iomem *)&p[irq_offset]; WRITE_DWORD(qBriIrq, MQ_IRQ_REQ_OFF); DIVA_OS_MEM_DETACH_CTLREG(IoAdapter, p); DBG_TRC(("stopped processor @ addr 0x%08lx", qBriReset)) } /* -------------------------------------------------------------------------- FPGA download -------------------------------------------------------------------------- */ #define FPGA_NAME_OFFSET 0x10 static byte *qBri_check_FPGAsrc(PISDN_ADAPTER IoAdapter, char *FileName, dword *Length, dword *code) { byte *File; char *fpgaFile, *fpgaType, *fpgaDate, *fpgaTime; dword fpgaFlen, fpgaTlen, fpgaDlen, cnt, year, i; if (!(File = (byte *)xdiLoadFile(FileName, Length, 0))) { return (NULL); } /* * scan file until FF and put id string into buffer */ for (i = 0; File[i] != 0xff;) { if (++i >= *Length) { DBG_FTL(("FPGA download: start of data header not found")) xdiFreeFile(File); return (NULL); } } *code = i++; if ((File[i] & 0xF0) != 0x20) { DBG_FTL(("FPGA download: data header corrupted")) xdiFreeFile(File); return (NULL); } fpgaFlen = (dword)File[FPGA_NAME_OFFSET - 1]; if (fpgaFlen == 0) fpgaFlen = 12; fpgaFile = (char *)&File[FPGA_NAME_OFFSET]; fpgaTlen = (dword)fpgaFile[fpgaFlen + 2]; if (fpgaTlen == 0) fpgaTlen = 10; fpgaType = (char *)&fpgaFile[fpgaFlen + 3]; fpgaDlen = (dword) fpgaType[fpgaTlen + 2]; if (fpgaDlen == 0) fpgaDlen = 11; fpgaDate = (char *)&fpgaType[fpgaTlen + 3]; fpgaTime = (char *)&fpgaDate[fpgaDlen + 3]; cnt = (dword)(((File[i] & 0x0F) << 20) + (File[i + 1] << 12) + (File[i + 2] << 4) + (File[i + 3] >> 4)); if ((dword)(i + (cnt / 8)) > *Length) { DBG_FTL(("FPGA download: '%s' file too small (%ld < %ld)", FileName, *Length, code + ((cnt + 7) / 8))) xdiFreeFile(File); return (NULL); } i = 0; do { while ((fpgaDate[i] != '\0') && ((fpgaDate[i] < '0') || (fpgaDate[i] > '9'))) { i++; } year = 0; while ((fpgaDate[i] >= '0') && (fpgaDate[i] <= '9')) year = year * 10 + (fpgaDate[i++] - '0'); } while ((year < 2000) && (fpgaDate[i] != '\0')); switch (IoAdapter->cardType) { case CARDTYPE_DIVASRV_B_2F_PCI: break; default: if (year >= 2001) { IoAdapter->fpga_features |= PCINIT_FPGA_PLX_ACCESS_SUPPORTED; } } DBG_LOG(("FPGA[%s] file %s (%s %s) len %d", fpgaType, fpgaFile, fpgaDate, fpgaTime, cnt)) return (File); } /******************************************************************************/ #define FPGA_PROG 0x0001 /* PROG enable low */ #define FPGA_BUSY 0x0002 /* BUSY high, DONE low */ #define FPGA_CS 0x000C /* Enable I/O pins */ #define FPGA_CCLK 0x0100 #define FPGA_DOUT 0x0400 #define FPGA_DIN FPGA_DOUT /* bidirectional I/O */ int qBri_FPGA_download(PISDN_ADAPTER IoAdapter) { int bit; byte *File; dword code, FileLength; word volatile __iomem *addr = (word volatile __iomem *)DIVA_OS_MEM_ATTACH_PROM(IoAdapter); word val, baseval = FPGA_CS | FPGA_PROG; if (DIVA_4BRI_REVISION(IoAdapter)) { char *name; switch (IoAdapter->cardType) { case CARDTYPE_DIVASRV_B_2F_PCI: name = "dsbri2f.bit"; break; case CARDTYPE_DIVASRV_B_2M_V2_PCI: case CARDTYPE_DIVASRV_VOICE_B_2M_V2_PCI: name = "dsbri2m.bit"; break; default: name = "ds4bri2.bit"; } File = qBri_check_FPGAsrc(IoAdapter, name, &FileLength, &code); } else { File = qBri_check_FPGAsrc(IoAdapter, "ds4bri.bit", &FileLength, &code); } if (!File) { DIVA_OS_MEM_DETACH_PROM(IoAdapter, addr); return (0); } /* * prepare download, pulse PROGRAM pin down. */ WRITE_WORD(addr, baseval & ~FPGA_PROG); /* PROGRAM low pulse */ WRITE_WORD(addr, baseval); /* release */ diva_os_wait(50); /* wait until FPGA finished internal memory clear */ /* * check done pin, must be low */ if (READ_WORD(addr) & FPGA_BUSY) { DBG_FTL(("FPGA download: acknowledge for FPGA memory clear missing")) xdiFreeFile(File); DIVA_OS_MEM_DETACH_PROM(IoAdapter, addr); return (0); } /* * put data onto the FPGA */ while (code < FileLength) { val = ((word)File[code++]) << 3; for (bit = 8; bit-- > 0; val <<= 1) /* put byte onto FPGA */ { baseval &= ~FPGA_DOUT; /* clr data bit */ baseval |= (val & FPGA_DOUT); /* copy data bit */ WRITE_WORD(addr, baseval); WRITE_WORD(addr, baseval | FPGA_CCLK); /* set CCLK hi */ WRITE_WORD(addr, baseval | FPGA_CCLK); /* set CCLK hi */ WRITE_WORD(addr, baseval); /* set CCLK lo */ } } xdiFreeFile(File); diva_os_wait(100); val = READ_WORD(addr); DIVA_OS_MEM_DETACH_PROM(IoAdapter, addr); if (!(val & FPGA_BUSY)) { DBG_FTL(("FPGA download: chip remains in busy state (0x%04x)", val)) return (0); } return (1); } static int load_qBri_hardware(PISDN_ADAPTER IoAdapter) { return (0); } /* -------------------------------------------------------------------------- Card ISR -------------------------------------------------------------------------- */ static int qBri_ISR(struct _ISDN_ADAPTER *IoAdapter) { dword volatile __iomem *qBriIrq; PADAPTER_LIST_ENTRY QuadroList = IoAdapter->QuadroList; word i; int serviced = 0; byte __iomem *p; p = DIVA_OS_MEM_ATTACH_RESET(IoAdapter); if (!(READ_BYTE(&p[PLX9054_INTCSR]) & 0x80)) { DIVA_OS_MEM_DETACH_RESET(IoAdapter, p); return (0); } DIVA_OS_MEM_DETACH_RESET(IoAdapter, p); /* * clear interrupt line (reset Local Interrupt Test Register) */ p = DIVA_OS_MEM_ATTACH_CTLREG(IoAdapter); qBriIrq = (dword volatile __iomem *)(&p[DIVA_4BRI_REVISION(IoAdapter) ? (MQ2_BREG_IRQ_TEST) : (MQ_BREG_IRQ_TEST)]); WRITE_DWORD(qBriIrq, MQ_IRQ_REQ_OFF); DIVA_OS_MEM_DETACH_CTLREG(IoAdapter, p); for (i = 0; i < IoAdapter->tasks; ++i) { IoAdapter = QuadroList->QuadroAdapter[i]; if (IoAdapter && IoAdapter->Initialized && IoAdapter->tst_irq(&IoAdapter->a)) { IoAdapter->IrqCount++; serviced = 1; diva_os_schedule_soft_isr(&IoAdapter->isr_soft_isr); } } return (serviced); } /* -------------------------------------------------------------------------- Does disable the interrupt on the card -------------------------------------------------------------------------- */ static void disable_qBri_interrupt(PISDN_ADAPTER IoAdapter) { dword volatile __iomem *qBriIrq; byte __iomem *p; if (IoAdapter->ControllerNumber > 0) return; /* * clear interrupt line (reset Local Interrupt Test Register) */ p = DIVA_OS_MEM_ATTACH_RESET(IoAdapter); WRITE_BYTE(&p[PLX9054_INTCSR], 0x00); /* disable PCI interrupts */ DIVA_OS_MEM_DETACH_RESET(IoAdapter, p); p = DIVA_OS_MEM_ATTACH_CTLREG(IoAdapter); qBriIrq = (dword volatile __iomem *)(&p[DIVA_4BRI_REVISION(IoAdapter) ? (MQ2_BREG_IRQ_TEST) : (MQ_BREG_IRQ_TEST)]); WRITE_DWORD(qBriIrq, MQ_IRQ_REQ_OFF); DIVA_OS_MEM_DETACH_CTLREG(IoAdapter, p); } /* -------------------------------------------------------------------------- Install Adapter Entry Points -------------------------------------------------------------------------- */ static void set_common_qBri_functions(PISDN_ADAPTER IoAdapter) { ADAPTER *a; a = &IoAdapter->a; a->ram_in = mem_in; a->ram_inw = mem_inw; a->ram_in_buffer = mem_in_buffer; a->ram_look_ahead = mem_look_ahead; a->ram_out = mem_out; a->ram_outw = mem_outw; a->ram_out_buffer = mem_out_buffer; a->ram_inc = mem_inc; IoAdapter->out = pr_out; IoAdapter->dpc = pr_dpc; IoAdapter->tst_irq = scom_test_int; IoAdapter->clr_irq = scom_clear_int; IoAdapter->pcm = (struct pc_maint *)MIPS_MAINT_OFFS; IoAdapter->load = load_qBri_hardware; IoAdapter->disIrq = disable_qBri_interrupt; IoAdapter->rstFnc = reset_qBri_hardware; IoAdapter->stop = stop_qBri_hardware; IoAdapter->trapFnc = qBri_cpu_trapped; IoAdapter->diva_isr_handler = qBri_ISR; IoAdapter->a.io = (void *)IoAdapter; } static void set_qBri_functions(PISDN_ADAPTER IoAdapter) { if (!IoAdapter->tasks) { IoAdapter->tasks = MQ_INSTANCE_COUNT; } IoAdapter->MemorySize = MQ_MEMORY_SIZE; set_common_qBri_functions(IoAdapter); diva_os_set_qBri_functions(IoAdapter); } static void set_qBri2_functions(PISDN_ADAPTER IoAdapter) { if (!IoAdapter->tasks) { IoAdapter->tasks = MQ_INSTANCE_COUNT; } IoAdapter->MemorySize = (IoAdapter->tasks == 1) ? BRI2_MEMORY_SIZE : MQ2_MEMORY_SIZE; set_common_qBri_functions(IoAdapter); diva_os_set_qBri2_functions(IoAdapter); } /******************************************************************************/ void prepare_qBri_functions(PISDN_ADAPTER IoAdapter) { set_qBri_functions(IoAdapter->QuadroList->QuadroAdapter[0]); set_qBri_functions(IoAdapter->QuadroList->QuadroAdapter[1]); set_qBri_functions(IoAdapter->QuadroList->QuadroAdapter[2]); set_qBri_functions(IoAdapter->QuadroList->QuadroAdapter[3]); } void prepare_qBri2_functions(PISDN_ADAPTER IoAdapter) { if (!IoAdapter->tasks) { IoAdapter->tasks = MQ_INSTANCE_COUNT; } set_qBri2_functions(IoAdapter->QuadroList->QuadroAdapter[0]); if (IoAdapter->tasks > 1) { set_qBri2_functions(IoAdapter->QuadroList->QuadroAdapter[1]); set_qBri2_functions(IoAdapter->QuadroList->QuadroAdapter[2]); set_qBri2_functions(IoAdapter->QuadroList->QuadroAdapter[3]); } } /* -------------------------------------------------------------------------- */
gpl-2.0
iwinoto/v4l-media_build
media/drivers/isdn/hardware/eicon/dadapter.c
9740
14576
/* * Copyright (c) Eicon Networks, 2002. * This source file is supplied for the use with Eicon Networks range of DIVA Server Adapters. * Eicon File Revision : 2.1 * This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. * This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. * You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include "platform.h" #include "pc.h" #include "debuglib.h" #include "di_defs.h" #include "divasync.h" #include "dadapter.h" /* -------------------------------------------------------------------------- Adapter array change notification framework -------------------------------------------------------------------------- */ typedef struct _didd_adapter_change_notification { didd_adapter_change_callback_t callback; void IDI_CALL_ENTITY_T *context; } didd_adapter_change_notification_t, \ * IDI_CALL_ENTITY_T pdidd_adapter_change_notification_t; #define DIVA_DIDD_MAX_NOTIFICATIONS 256 static didd_adapter_change_notification_t \ NotificationTable[DIVA_DIDD_MAX_NOTIFICATIONS]; /* -------------------------------------------------------------------------- Array to held adapter information -------------------------------------------------------------------------- */ static DESCRIPTOR HandleTable[NEW_MAX_DESCRIPTORS]; static dword Adapters = 0; /* Number of adapters */ /* -------------------------------------------------------------------------- Shadow IDI_DIMAINT and 'shadow' debug stuff -------------------------------------------------------------------------- */ static void no_printf(unsigned char *format, ...) { #ifdef EBUG va_list ap; va_start(ap, format); debug((format, ap)); va_end(ap); #endif } /* ------------------------------------------------------------------------- Portable debug Library ------------------------------------------------------------------------- */ #include "debuglib.c" static DESCRIPTOR MAdapter = {IDI_DIMAINT, /* Adapter Type */ 0x00, /* Channels */ 0x0000, /* Features */ (IDI_CALL)no_printf}; /* -------------------------------------------------------------------------- DAdapter. Only IDI clients with buffer, that is huge enough to get all descriptors will receive information about DAdapter { byte type, byte channels, word features, IDI_CALL request } -------------------------------------------------------------------------- */ static void IDI_CALL_LINK_T diva_dadapter_request(ENTITY IDI_CALL_ENTITY_T *); static DESCRIPTOR DAdapter = {IDI_DADAPTER, /* Adapter Type */ 0x00, /* Channels */ 0x0000, /* Features */ diva_dadapter_request }; /* -------------------------------------------------------------------------- LOCALS -------------------------------------------------------------------------- */ static dword diva_register_adapter_callback(\ didd_adapter_change_callback_t callback, void IDI_CALL_ENTITY_T *context); static void diva_remove_adapter_callback(dword handle); static void diva_notify_adapter_change(DESCRIPTOR *d, int removal); static diva_os_spin_lock_t didd_spin; /* -------------------------------------------------------------------------- Should be called as first step, after driver init -------------------------------------------------------------------------- */ void diva_didd_load_time_init(void) { memset(&HandleTable[0], 0x00, sizeof(HandleTable)); memset(&NotificationTable[0], 0x00, sizeof(NotificationTable)); diva_os_initialize_spin_lock(&didd_spin, "didd"); } /* -------------------------------------------------------------------------- Should be called as last step, if driver does unload -------------------------------------------------------------------------- */ void diva_didd_load_time_finit(void) { diva_os_destroy_spin_lock(&didd_spin, "didd"); } /* -------------------------------------------------------------------------- Called in order to register new adapter in adapter array return adapter handle (> 0) on success return -1 adapter array overflow -------------------------------------------------------------------------- */ static int diva_didd_add_descriptor(DESCRIPTOR *d) { diva_os_spin_lock_magic_t irql; int i; if (d->type == IDI_DIMAINT) { if (d->request) { MAdapter.request = d->request; dprintf = (DIVA_DI_PRINTF)d->request; diva_notify_adapter_change(&MAdapter, 0); /* Inserted */ DBG_TRC(("DIMAINT registered, dprintf=%08x", d->request)) } else { DBG_TRC(("DIMAINT removed")) diva_notify_adapter_change(&MAdapter, 1); /* About to remove */ MAdapter.request = (IDI_CALL)no_printf; dprintf = no_printf; } return (NEW_MAX_DESCRIPTORS); } for (i = 0; i < NEW_MAX_DESCRIPTORS; i++) { diva_os_enter_spin_lock(&didd_spin, &irql, "didd_add"); if (HandleTable[i].type == 0) { memcpy(&HandleTable[i], d, sizeof(*d)); Adapters++; diva_os_leave_spin_lock(&didd_spin, &irql, "didd_add"); diva_notify_adapter_change(d, 0); /* we have new adapter */ DBG_TRC(("Add adapter[%d], request=%08x", (i + 1), d->request)) return (i + 1); } diva_os_leave_spin_lock(&didd_spin, &irql, "didd_add"); } DBG_ERR(("Can't add adapter, out of resources")) return (-1); } /* -------------------------------------------------------------------------- Called in order to remove one registered adapter from array return adapter handle (> 0) on success return 0 on success -------------------------------------------------------------------------- */ static int diva_didd_remove_descriptor(IDI_CALL request) { diva_os_spin_lock_magic_t irql; int i; if (request == MAdapter.request) { DBG_TRC(("DIMAINT removed")) dprintf = no_printf; diva_notify_adapter_change(&MAdapter, 1); /* About to remove */ MAdapter.request = (IDI_CALL)no_printf; return (0); } for (i = 0; (Adapters && (i < NEW_MAX_DESCRIPTORS)); i++) { if (HandleTable[i].request == request) { diva_notify_adapter_change(&HandleTable[i], 1); /* About to remove */ diva_os_enter_spin_lock(&didd_spin, &irql, "didd_rm"); memset(&HandleTable[i], 0x00, sizeof(HandleTable[0])); Adapters--; diva_os_leave_spin_lock(&didd_spin, &irql, "didd_rm"); DBG_TRC(("Remove adapter[%d], request=%08x", (i + 1), request)) return (0); } } DBG_ERR(("Invalid request=%08x, can't remove adapter", request)) return (-1); } /* -------------------------------------------------------------------------- Read adapter array return 1 if not enough space to save all available adapters -------------------------------------------------------------------------- */ static int diva_didd_read_adapter_array(DESCRIPTOR *buffer, int length) { diva_os_spin_lock_magic_t irql; int src, dst; memset(buffer, 0x00, length); length /= sizeof(DESCRIPTOR); DBG_TRC(("DIDD_Read, space = %d, Adapters = %d", length, Adapters + 2)) diva_os_enter_spin_lock(&didd_spin, &irql, "didd_read"); for (src = 0, dst = 0; (Adapters && (src < NEW_MAX_DESCRIPTORS) && (dst < length)); src++) { if (HandleTable[src].type) { memcpy(&buffer[dst], &HandleTable[src], sizeof(DESCRIPTOR)); dst++; } } diva_os_leave_spin_lock(&didd_spin, &irql, "didd_read"); if (dst < length) { memcpy(&buffer[dst], &MAdapter, sizeof(DESCRIPTOR)); dst++; } else { DBG_ERR(("Can't write DIMAINT. Array too small")) } if (dst < length) { memcpy(&buffer[dst], &DAdapter, sizeof(DESCRIPTOR)); dst++; } else { DBG_ERR(("Can't write DADAPTER. Array too small")) } DBG_TRC(("Read %d adapters", dst)) return (dst == length); } /* -------------------------------------------------------------------------- DAdapter request function. This function does process only synchronous requests, and is used for reception/registration of new interfaces -------------------------------------------------------------------------- */ static void IDI_CALL_LINK_T diva_dadapter_request( \ ENTITY IDI_CALL_ENTITY_T *e) { IDI_SYNC_REQ *syncReq = (IDI_SYNC_REQ *)e; if (e->Req) { /* We do not process it, also return error */ e->Rc = OUT_OF_RESOURCES; DBG_ERR(("Can't process async request, Req=%02x", e->Req)) return; } /* So, we process sync request */ switch (e->Rc) { case IDI_SYNC_REQ_DIDD_REGISTER_ADAPTER_NOTIFY: { diva_didd_adapter_notify_t *pinfo = &syncReq->didd_notify.info; pinfo->handle = diva_register_adapter_callback( \ (didd_adapter_change_callback_t)pinfo->callback, (void IDI_CALL_ENTITY_T *)pinfo->context); e->Rc = 0xff; } break; case IDI_SYNC_REQ_DIDD_REMOVE_ADAPTER_NOTIFY: { diva_didd_adapter_notify_t *pinfo = &syncReq->didd_notify.info; diva_remove_adapter_callback(pinfo->handle); e->Rc = 0xff; } break; case IDI_SYNC_REQ_DIDD_ADD_ADAPTER: { diva_didd_add_adapter_t *pinfo = &syncReq->didd_add_adapter.info; if (diva_didd_add_descriptor((DESCRIPTOR *)pinfo->descriptor) < 0) { e->Rc = OUT_OF_RESOURCES; } else { e->Rc = 0xff; } } break; case IDI_SYNC_REQ_DIDD_REMOVE_ADAPTER: { diva_didd_remove_adapter_t *pinfo = &syncReq->didd_remove_adapter.info; if (diva_didd_remove_descriptor((IDI_CALL)pinfo->p_request) < 0) { e->Rc = OUT_OF_RESOURCES; } else { e->Rc = 0xff; } } break; case IDI_SYNC_REQ_DIDD_READ_ADAPTER_ARRAY: { diva_didd_read_adapter_array_t *pinfo =\ &syncReq->didd_read_adapter_array.info; if (diva_didd_read_adapter_array((DESCRIPTOR *)pinfo->buffer, (int)pinfo->length)) { e->Rc = OUT_OF_RESOURCES; } else { e->Rc = 0xff; } } break; default: DBG_ERR(("Can't process sync request, Req=%02x", e->Rc)) e->Rc = OUT_OF_RESOURCES; } } /* -------------------------------------------------------------------------- IDI client does register his notification function -------------------------------------------------------------------------- */ static dword diva_register_adapter_callback( \ didd_adapter_change_callback_t callback, void IDI_CALL_ENTITY_T *context) { diva_os_spin_lock_magic_t irql; dword i; for (i = 0; i < DIVA_DIDD_MAX_NOTIFICATIONS; i++) { diva_os_enter_spin_lock(&didd_spin, &irql, "didd_nfy_add"); if (!NotificationTable[i].callback) { NotificationTable[i].callback = callback; NotificationTable[i].context = context; diva_os_leave_spin_lock(&didd_spin, &irql, "didd_nfy_add"); DBG_TRC(("Register adapter notification[%d]=%08x", i + 1, callback)) return (i + 1); } diva_os_leave_spin_lock(&didd_spin, &irql, "didd_nfy_add"); } DBG_ERR(("Can't register adapter notification, overflow")) return (0); } /* -------------------------------------------------------------------------- IDI client does register his notification function -------------------------------------------------------------------------- */ static void diva_remove_adapter_callback(dword handle) { diva_os_spin_lock_magic_t irql; if (handle && ((--handle) < DIVA_DIDD_MAX_NOTIFICATIONS)) { diva_os_enter_spin_lock(&didd_spin, &irql, "didd_nfy_rm"); NotificationTable[handle].callback = NULL; NotificationTable[handle].context = NULL; diva_os_leave_spin_lock(&didd_spin, &irql, "didd_nfy_rm"); DBG_TRC(("Remove adapter notification[%d]", (int)(handle + 1))) return; } DBG_ERR(("Can't remove adapter notification, handle=%d", handle)) } /* -------------------------------------------------------------------------- Notify all client about adapter array change Does suppose following behavior in the client side: Step 1: Redister Notification Step 2: Read Adapter Array -------------------------------------------------------------------------- */ static void diva_notify_adapter_change(DESCRIPTOR *d, int removal) { int i, do_notify; didd_adapter_change_notification_t nfy; diva_os_spin_lock_magic_t irql; for (i = 0; i < DIVA_DIDD_MAX_NOTIFICATIONS; i++) { do_notify = 0; diva_os_enter_spin_lock(&didd_spin, &irql, "didd_nfy"); if (NotificationTable[i].callback) { memcpy(&nfy, &NotificationTable[i], sizeof(nfy)); do_notify = 1; } diva_os_leave_spin_lock(&didd_spin, &irql, "didd_nfy"); if (do_notify) { (*(nfy.callback))(nfy.context, d, removal); } } } /* -------------------------------------------------------------------------- For all systems, that are linked by Kernel Mode Linker this is ONLY one function thet should be exported by this device driver IDI clients should look for IDI_DADAPTER, and use request function of this adapter (sync request) in order to receive appropriate services: - add new adapter - remove existing adapter - add adapter array notification - remove adapter array notification (read adapter is redundant in this case) INPUT: buffer - pointer to buffer that will receive adapter array length - length (in bytes) of space in buffer OUTPUT: Adapter array will be written to memory described by 'buffer' If the last adapter seen in the returned adapter array is IDI_DADAPTER or if last adapter in array does have type '0', then it was enougth space in buffer to accommodate all available adapter descriptors *NOTE 1 (debug interface): The IDI adapter of type 'IDI_DIMAINT' does register as 'request' famous 'dprintf' function (of type DI_PRINTF, please look include/debuglib.c and include/debuglib.h) for details. So dprintf is not exported from module debug module directly, instead of this IDI_DIMAINT is registered. Module load order will receive in this case: 1. DIDD (this file) 2. DIMAINT does load and register 'IDI_DIMAINT', at this step DIDD should be able to get 'dprintf', save it, and register with DIDD by means of 'dprintf' function. 3. any other driver is loaded and is able to access adapter array and debug interface This approach does allow to load/unload debug interface on demand, and save memory, it it is necessary. -------------------------------------------------------------------------- */ void IDI_CALL_LINK_T DIVA_DIDD_Read(void IDI_CALL_ENTITY_T *buffer, int length) { diva_didd_read_adapter_array(buffer, length); }
gpl-2.0
garwynn/L900_NE2_Kernel
net/irda/ircomm/ircomm_lmp.c
10508
9978
/********************************************************************* * * Filename: ircomm_lmp.c * Version: 1.0 * Description: Interface between IrCOMM and IrLMP * Status: Stable * Author: Dag Brattli <dagb@cs.uit.no> * Created at: Sun Jun 6 20:48:27 1999 * Modified at: Sun Dec 12 13:44:17 1999 * Modified by: Dag Brattli <dagb@cs.uit.no> * Sources: Previous IrLPT work by Thomas Davis * * Copyright (c) 1999 Dag Brattli, All Rights Reserved. * Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, * MA 02111-1307 USA * ********************************************************************/ #include <linux/init.h> #include <linux/gfp.h> #include <net/irda/irda.h> #include <net/irda/irlmp.h> #include <net/irda/iriap.h> #include <net/irda/irda_device.h> /* struct irda_skb_cb */ #include <net/irda/ircomm_event.h> #include <net/irda/ircomm_lmp.h> /* * Function ircomm_lmp_connect_request (self, userdata) * * * */ static int ircomm_lmp_connect_request(struct ircomm_cb *self, struct sk_buff *userdata, struct ircomm_info *info) { int ret = 0; IRDA_DEBUG(0, "%s()\n", __func__ ); /* Don't forget to refcount it - should be NULL anyway */ if(userdata) skb_get(userdata); ret = irlmp_connect_request(self->lsap, info->dlsap_sel, info->saddr, info->daddr, NULL, userdata); return ret; } /* * Function ircomm_lmp_connect_response (self, skb) * * * */ static int ircomm_lmp_connect_response(struct ircomm_cb *self, struct sk_buff *userdata) { struct sk_buff *tx_skb; IRDA_DEBUG(0, "%s()\n", __func__ ); /* Any userdata supplied? */ if (userdata == NULL) { tx_skb = alloc_skb(LMP_MAX_HEADER, GFP_ATOMIC); if (!tx_skb) return -ENOMEM; /* Reserve space for MUX and LAP header */ skb_reserve(tx_skb, LMP_MAX_HEADER); } else { /* * Check that the client has reserved enough space for * headers */ IRDA_ASSERT(skb_headroom(userdata) >= LMP_MAX_HEADER, return -1;); /* Don't forget to refcount it - should be NULL anyway */ skb_get(userdata); tx_skb = userdata; } return irlmp_connect_response(self->lsap, tx_skb); } static int ircomm_lmp_disconnect_request(struct ircomm_cb *self, struct sk_buff *userdata, struct ircomm_info *info) { struct sk_buff *tx_skb; int ret; IRDA_DEBUG(0, "%s()\n", __func__ ); if (!userdata) { tx_skb = alloc_skb(LMP_MAX_HEADER, GFP_ATOMIC); if (!tx_skb) return -ENOMEM; /* Reserve space for MUX and LAP header */ skb_reserve(tx_skb, LMP_MAX_HEADER); userdata = tx_skb; } else { /* Don't forget to refcount it - should be NULL anyway */ skb_get(userdata); } ret = irlmp_disconnect_request(self->lsap, userdata); return ret; } /* * Function ircomm_lmp_flow_control (skb) * * This function is called when a data frame we have sent to IrLAP has * been deallocated. We do this to make sure we don't flood IrLAP with * frames, since we are not using the IrTTP flow control mechanism */ static void ircomm_lmp_flow_control(struct sk_buff *skb) { struct irda_skb_cb *cb; struct ircomm_cb *self; int line; IRDA_ASSERT(skb != NULL, return;); cb = (struct irda_skb_cb *) skb->cb; IRDA_DEBUG(2, "%s()\n", __func__ ); line = cb->line; self = (struct ircomm_cb *) hashbin_lock_find(ircomm, line, NULL); if (!self) { IRDA_DEBUG(2, "%s(), didn't find myself\n", __func__ ); return; } IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;); self->pkt_count--; if ((self->pkt_count < 2) && (self->flow_status == FLOW_STOP)) { IRDA_DEBUG(2, "%s(), asking TTY to start again!\n", __func__ ); self->flow_status = FLOW_START; if (self->notify.flow_indication) self->notify.flow_indication(self->notify.instance, self, FLOW_START); } } /* * Function ircomm_lmp_data_request (self, userdata) * * Send data frame to peer device * */ static int ircomm_lmp_data_request(struct ircomm_cb *self, struct sk_buff *skb, int not_used) { struct irda_skb_cb *cb; int ret; IRDA_ASSERT(skb != NULL, return -1;); cb = (struct irda_skb_cb *) skb->cb; cb->line = self->line; IRDA_DEBUG(4, "%s(), sending frame\n", __func__ ); /* Don't forget to refcount it - see ircomm_tty_do_softint() */ skb_get(skb); skb_orphan(skb); skb->destructor = ircomm_lmp_flow_control; if ((self->pkt_count++ > 7) && (self->flow_status == FLOW_START)) { IRDA_DEBUG(2, "%s(), asking TTY to slow down!\n", __func__ ); self->flow_status = FLOW_STOP; if (self->notify.flow_indication) self->notify.flow_indication(self->notify.instance, self, FLOW_STOP); } ret = irlmp_data_request(self->lsap, skb); if (ret) { IRDA_ERROR("%s(), failed\n", __func__); /* irlmp_data_request already free the packet */ } return ret; } /* * Function ircomm_lmp_data_indication (instance, sap, skb) * * Incoming data which we must deliver to the state machine, to check * we are still connected. */ static int ircomm_lmp_data_indication(void *instance, void *sap, struct sk_buff *skb) { struct ircomm_cb *self = (struct ircomm_cb *) instance; IRDA_DEBUG(4, "%s()\n", __func__ ); IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -1;); IRDA_ASSERT(skb != NULL, return -1;); ircomm_do_event(self, IRCOMM_LMP_DATA_INDICATION, skb, NULL); /* Drop reference count - see ircomm_tty_data_indication(). */ dev_kfree_skb(skb); return 0; } /* * Function ircomm_lmp_connect_confirm (instance, sap, qos, max_sdu_size, * max_header_size, skb) * * Connection has been confirmed by peer device * */ static void ircomm_lmp_connect_confirm(void *instance, void *sap, struct qos_info *qos, __u32 max_seg_size, __u8 max_header_size, struct sk_buff *skb) { struct ircomm_cb *self = (struct ircomm_cb *) instance; struct ircomm_info info; IRDA_DEBUG(0, "%s()\n", __func__ ); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;); IRDA_ASSERT(skb != NULL, return;); IRDA_ASSERT(qos != NULL, return;); info.max_data_size = max_seg_size; info.max_header_size = max_header_size; info.qos = qos; ircomm_do_event(self, IRCOMM_LMP_CONNECT_CONFIRM, skb, &info); /* Drop reference count - see ircomm_tty_connect_confirm(). */ dev_kfree_skb(skb); } /* * Function ircomm_lmp_connect_indication (instance, sap, qos, max_sdu_size, * max_header_size, skb) * * Peer device wants to make a connection with us * */ static void ircomm_lmp_connect_indication(void *instance, void *sap, struct qos_info *qos, __u32 max_seg_size, __u8 max_header_size, struct sk_buff *skb) { struct ircomm_cb *self = (struct ircomm_cb *)instance; struct ircomm_info info; IRDA_DEBUG(0, "%s()\n", __func__ ); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;); IRDA_ASSERT(skb != NULL, return;); IRDA_ASSERT(qos != NULL, return;); info.max_data_size = max_seg_size; info.max_header_size = max_header_size; info.qos = qos; ircomm_do_event(self, IRCOMM_LMP_CONNECT_INDICATION, skb, &info); /* Drop reference count - see ircomm_tty_connect_indication(). */ dev_kfree_skb(skb); } /* * Function ircomm_lmp_disconnect_indication (instance, sap, reason, skb) * * Peer device has closed the connection, or the link went down for some * other reason */ static void ircomm_lmp_disconnect_indication(void *instance, void *sap, LM_REASON reason, struct sk_buff *skb) { struct ircomm_cb *self = (struct ircomm_cb *) instance; struct ircomm_info info; IRDA_DEBUG(0, "%s()\n", __func__ ); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;); info.reason = reason; ircomm_do_event(self, IRCOMM_LMP_DISCONNECT_INDICATION, skb, &info); /* Drop reference count - see ircomm_tty_disconnect_indication(). */ if(skb) dev_kfree_skb(skb); } /* * Function ircomm_open_lsap (self) * * Open LSAP. This function will only be used when using "raw" services * */ int ircomm_open_lsap(struct ircomm_cb *self) { notify_t notify; IRDA_DEBUG(0, "%s()\n", __func__ ); /* Register callbacks */ irda_notify_init(&notify); notify.data_indication = ircomm_lmp_data_indication; notify.connect_confirm = ircomm_lmp_connect_confirm; notify.connect_indication = ircomm_lmp_connect_indication; notify.disconnect_indication = ircomm_lmp_disconnect_indication; notify.instance = self; strlcpy(notify.name, "IrCOMM", sizeof(notify.name)); self->lsap = irlmp_open_lsap(LSAP_ANY, &notify, 0); if (!self->lsap) { IRDA_DEBUG(0,"%sfailed to allocate tsap\n", __func__ ); return -1; } self->slsap_sel = self->lsap->slsap_sel; /* * Initialize the call-table for issuing commands */ self->issue.data_request = ircomm_lmp_data_request; self->issue.connect_request = ircomm_lmp_connect_request; self->issue.connect_response = ircomm_lmp_connect_response; self->issue.disconnect_request = ircomm_lmp_disconnect_request; return 0; }
gpl-2.0
MoKee/android_kernel_htc_msm7x30
fs/direct-io.c
13
42899
/* * fs/direct-io.c * * Copyright (C) 2002, Linus Torvalds. * * O_DIRECT * * 04Jul2002 Andrew Morton * Initial version * 11Sep2002 janetinc@us.ibm.com * added readv/writev support. * 29Oct2002 Andrew Morton * rewrote bio_add_page() support. * 30Oct2002 pbadari@us.ibm.com * added support for non-aligned IO. * 06Nov2002 pbadari@us.ibm.com * added asynchronous IO support. * 21Jul2003 nathans@sgi.com * added IO completion notifier. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/types.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/highmem.h> #include <linux/pagemap.h> #include <linux/task_io_accounting_ops.h> #include <linux/bio.h> #include <linux/wait.h> #include <linux/err.h> #include <linux/blkdev.h> #include <linux/buffer_head.h> #include <linux/rwsem.h> #include <linux/uio.h> #include <asm/atomic.h> #include <linux/prefetch.h> /* * How many user pages to map in one call to get_user_pages(). This determines * the size of a structure in the slab cache */ #define DIO_PAGES 64 /* * This code generally works in units of "dio_blocks". A dio_block is * somewhere between the hard sector size and the filesystem block size. it * is determined on a per-invocation basis. When talking to the filesystem * we need to convert dio_blocks to fs_blocks by scaling the dio_block quantity * down by dio->blkfactor. Similarly, fs-blocksize quantities are converted * to bio_block quantities by shifting left by blkfactor. * * If blkfactor is zero then the user's request was aligned to the filesystem's * blocksize. */ /* dio_state only used in the submission path */ struct dio_submit { struct bio *bio; /* bio under assembly */ unsigned blkbits; /* doesn't change */ unsigned blkfactor; /* When we're using an alignment which is finer than the filesystem's soft blocksize, this specifies how much finer. blkfactor=2 means 1/4-block alignment. Does not change */ unsigned start_zero_done; /* flag: sub-blocksize zeroing has been performed at the start of a write */ int pages_in_io; /* approximate total IO pages */ size_t size; /* total request size (doesn't change)*/ sector_t block_in_file; /* Current offset into the underlying file in dio_block units. */ unsigned blocks_available; /* At block_in_file. changes */ int reap_counter; /* rate limit reaping */ sector_t final_block_in_request;/* doesn't change */ unsigned first_block_in_page; /* doesn't change, Used only once */ int boundary; /* prev block is at a boundary */ get_block_t *get_block; /* block mapping function */ dio_submit_t *submit_io; /* IO submition function */ loff_t logical_offset_in_bio; /* current first logical block in bio */ sector_t final_block_in_bio; /* current final block in bio + 1 */ sector_t next_block_for_io; /* next block to be put under IO, in dio_blocks units */ /* * Deferred addition of a page to the dio. These variables are * private to dio_send_cur_page(), submit_page_section() and * dio_bio_add_page(). */ struct page *cur_page; /* The page */ unsigned cur_page_offset; /* Offset into it, in bytes */ unsigned cur_page_len; /* Nr of bytes at cur_page_offset */ sector_t cur_page_block; /* Where it starts */ loff_t cur_page_fs_offset; /* Offset in file */ /* * Page fetching state. These variables belong to dio_refill_pages(). */ int curr_page; /* changes */ int total_pages; /* doesn't change */ unsigned long curr_user_address;/* changes */ /* * Page queue. These variables belong to dio_refill_pages() and * dio_get_page(). */ unsigned head; /* next page to process */ unsigned tail; /* last valid page + 1 */ }; /* dio_state communicated between submission path and end_io */ struct dio { int flags; /* doesn't change */ int rw; struct inode *inode; loff_t i_size; /* i_size when submitted */ dio_iodone_t *end_io; /* IO completion function */ void *private; /* copy from map_bh.b_private */ /* BIO completion state */ spinlock_t bio_lock; /* protects BIO fields below */ int page_errors; /* errno from get_user_pages() */ int is_async; /* is IO async ? */ int should_dirty; /* should we mark read pages dirty? */ int io_error; /* IO error in completion path */ unsigned long refcount; /* direct_io_worker() and bios */ struct bio *bio_list; /* singly linked via bi_private */ struct task_struct *waiter; /* waiting task (NULL if none) */ /* AIO related stuff */ struct kiocb *iocb; /* kiocb */ ssize_t result; /* IO result */ /* * pages[] (and any fields placed after it) are not zeroed out at * allocation time. Don't add new fields after pages[] unless you * wish that they not be zeroed. */ struct page *pages[DIO_PAGES]; /* page buffer */ } ____cacheline_aligned_in_smp; static struct kmem_cache *dio_cache __read_mostly; static void __inode_dio_wait(struct inode *inode) { wait_queue_head_t *wq = bit_waitqueue(&inode->i_state, __I_DIO_WAKEUP); DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP); do { prepare_to_wait(wq, &q.wait, TASK_UNINTERRUPTIBLE); if (atomic_read(&inode->i_dio_count)) schedule(); } while (atomic_read(&inode->i_dio_count)); finish_wait(wq, &q.wait); } /** * inode_dio_wait - wait for outstanding DIO requests to finish * @inode: inode to wait for * * Waits for all pending direct I/O requests to finish so that we can * proceed with a truncate or equivalent operation. * * Must be called under a lock that serializes taking new references * to i_dio_count, usually by inode->i_mutex. */ void inode_dio_wait(struct inode *inode) { if (atomic_read(&inode->i_dio_count)) __inode_dio_wait(inode); } EXPORT_SYMBOL_GPL(inode_dio_wait); /* * inode_dio_done - signal finish of a direct I/O requests * @inode: inode the direct I/O happens on * * This is called once we've finished processing a direct I/O request, * and is used to wake up callers waiting for direct I/O to be quiesced. */ void inode_dio_done(struct inode *inode) { if (atomic_dec_and_test(&inode->i_dio_count)) wake_up_bit(&inode->i_state, __I_DIO_WAKEUP); } EXPORT_SYMBOL_GPL(inode_dio_done); /* * How many pages are in the queue? */ static inline unsigned dio_pages_present(struct dio_submit *sdio) { return sdio->tail - sdio->head; } /* * Go grab and pin some userspace pages. Typically we'll get 64 at a time. */ static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio) { int ret; int nr_pages; nr_pages = min(sdio->total_pages - sdio->curr_page, DIO_PAGES); ret = get_user_pages_fast( sdio->curr_user_address, /* Where from? */ nr_pages, /* How many pages? */ dio->rw == READ, /* Write to memory? */ &dio->pages[0]); /* Put results here */ if (ret < 0 && sdio->blocks_available && (dio->rw & WRITE)) { struct page *page = ZERO_PAGE(0); /* * A memory fault, but the filesystem has some outstanding * mapped blocks. We need to use those blocks up to avoid * leaking stale data in the file. */ if (dio->page_errors == 0) dio->page_errors = ret; page_cache_get(page); dio->pages[0] = page; sdio->head = 0; sdio->tail = 1; ret = 0; goto out; } if (ret >= 0) { sdio->curr_user_address += ret * PAGE_SIZE; sdio->curr_page += ret; sdio->head = 0; sdio->tail = ret; ret = 0; } out: return ret; } /* * Get another userspace page. Returns an ERR_PTR on error. Pages are * buffered inside the dio so that we can call get_user_pages() against a * decent number of pages, less frequently. To provide nicer use of the * L1 cache. */ static inline struct page *dio_get_page(struct dio *dio, struct dio_submit *sdio) { if (dio_pages_present(sdio) == 0) { int ret; ret = dio_refill_pages(dio, sdio); if (ret) return ERR_PTR(ret); BUG_ON(dio_pages_present(sdio) == 0); } return dio->pages[sdio->head++]; } /** * dio_complete() - called when all DIO BIO I/O has been completed * @offset: the byte offset in the file of the completed operation * * This releases locks as dictated by the locking type, lets interested parties * know that a DIO operation has completed, and calculates the resulting return * code for the operation. * * It lets the filesystem know if it registered an interest earlier via * get_block. Pass the private field of the map buffer_head so that * filesystems can use it to hold additional state between get_block calls and * dio_complete. */ static ssize_t dio_complete(struct dio *dio, loff_t offset, ssize_t ret, bool is_async) { ssize_t transferred = 0; /* * AIO submission can race with bio completion to get here while * expecting to have the last io completed by bio completion. * In that case -EIOCBQUEUED is in fact not an error we want * to preserve through this call. */ if (ret == -EIOCBQUEUED) ret = 0; if (dio->result) { transferred = dio->result; /* Check for short read case */ if ((dio->rw == READ) && ((offset + transferred) > dio->i_size)) transferred = dio->i_size - offset; } if (ret == 0) ret = dio->page_errors; if (ret == 0) ret = dio->io_error; if (ret == 0) ret = transferred; if (dio->end_io && dio->result) { dio->end_io(dio->iocb, offset, transferred, dio->private, ret, is_async); } else { if (is_async) aio_complete(dio->iocb, ret, 0); inode_dio_done(dio->inode); } return ret; } static int dio_bio_complete(struct dio *dio, struct bio *bio); /* * Asynchronous IO callback. */ static void dio_bio_end_aio(struct bio *bio, int error) { struct dio *dio = bio->bi_private; unsigned long remaining; unsigned long flags; /* cleanup the bio */ dio_bio_complete(dio, bio); spin_lock_irqsave(&dio->bio_lock, flags); remaining = --dio->refcount; if (remaining == 1 && dio->waiter) wake_up_process(dio->waiter); spin_unlock_irqrestore(&dio->bio_lock, flags); if (remaining == 0) { dio_complete(dio, dio->iocb->ki_pos, 0, true); kmem_cache_free(dio_cache, dio); } } /* * The BIO completion handler simply queues the BIO up for the process-context * handler. * * During I/O bi_private points at the dio. After I/O, bi_private is used to * implement a singly-linked list of completed BIOs, at dio->bio_list. */ static void dio_bio_end_io(struct bio *bio, int error) { struct dio *dio = bio->bi_private; unsigned long flags; spin_lock_irqsave(&dio->bio_lock, flags); bio->bi_private = dio->bio_list; dio->bio_list = bio; if (--dio->refcount == 1 && dio->waiter) wake_up_process(dio->waiter); spin_unlock_irqrestore(&dio->bio_lock, flags); } /** * dio_end_io - handle the end io action for the given bio * @bio: The direct io bio thats being completed * @error: Error if there was one * * This is meant to be called by any filesystem that uses their own dio_submit_t * so that the DIO specific endio actions are dealt with after the filesystem * has done it's completion work. */ void dio_end_io(struct bio *bio, int error) { struct dio *dio = bio->bi_private; if (dio->is_async) dio_bio_end_aio(bio, error); else dio_bio_end_io(bio, error); } EXPORT_SYMBOL_GPL(dio_end_io); static inline void dio_bio_alloc(struct dio *dio, struct dio_submit *sdio, struct block_device *bdev, sector_t first_sector, int nr_vecs) { struct bio *bio; /* * bio_alloc() is guaranteed to return a bio when called with * __GFP_WAIT and we request a valid number of vectors. */ bio = bio_alloc(GFP_KERNEL, nr_vecs); bio->bi_bdev = bdev; bio->bi_sector = first_sector; if (dio->is_async) bio->bi_end_io = dio_bio_end_aio; else bio->bi_end_io = dio_bio_end_io; sdio->bio = bio; sdio->logical_offset_in_bio = sdio->cur_page_fs_offset; } /* * In the AIO read case we speculatively dirty the pages before starting IO. * During IO completion, any of these pages which happen to have been written * back will be redirtied by bio_check_pages_dirty(). * * bios hold a dio reference between submit_bio and ->end_io. */ static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio) { struct bio *bio = sdio->bio; unsigned long flags; bio->bi_private = dio; spin_lock_irqsave(&dio->bio_lock, flags); dio->refcount++; spin_unlock_irqrestore(&dio->bio_lock, flags); if (dio->is_async && dio->rw == READ && dio->should_dirty) bio_set_pages_dirty(bio); if (sdio->submit_io) sdio->submit_io(dio->rw, bio, dio->inode, sdio->logical_offset_in_bio); else submit_bio(dio->rw, bio); sdio->bio = NULL; sdio->boundary = 0; sdio->logical_offset_in_bio = 0; } /* * Release any resources in case of a failure */ static inline void dio_cleanup(struct dio *dio, struct dio_submit *sdio) { while (dio_pages_present(sdio)) page_cache_release(dio_get_page(dio, sdio)); } /* * Wait for the next BIO to complete. Remove it and return it. NULL is * returned once all BIOs have been completed. This must only be called once * all bios have been issued so that dio->refcount can only decrease. This * requires that that the caller hold a reference on the dio. */ static struct bio *dio_await_one(struct dio *dio) { unsigned long flags; struct bio *bio = NULL; spin_lock_irqsave(&dio->bio_lock, flags); /* * Wait as long as the list is empty and there are bios in flight. bio * completion drops the count, maybe adds to the list, and wakes while * holding the bio_lock so we don't need set_current_state()'s barrier * and can call it after testing our condition. */ while (dio->refcount > 1 && dio->bio_list == NULL) { __set_current_state(TASK_UNINTERRUPTIBLE); dio->waiter = current; spin_unlock_irqrestore(&dio->bio_lock, flags); io_schedule(); /* wake up sets us TASK_RUNNING */ spin_lock_irqsave(&dio->bio_lock, flags); dio->waiter = NULL; } if (dio->bio_list) { bio = dio->bio_list; dio->bio_list = bio->bi_private; } spin_unlock_irqrestore(&dio->bio_lock, flags); return bio; } /* * Process one completed BIO. No locks are held. */ static int dio_bio_complete(struct dio *dio, struct bio *bio) { const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); struct bio_vec *bvec = bio->bi_io_vec; int page_no; if (!uptodate) dio->io_error = -EIO; if (dio->is_async && dio->rw == READ && dio->should_dirty) { bio_check_pages_dirty(bio); /* transfers ownership */ } else { for (page_no = 0; page_no < bio->bi_vcnt; page_no++) { struct page *page = bvec[page_no].bv_page; if (dio->rw == READ && !PageCompound(page) && dio->should_dirty) set_page_dirty_lock(page); page_cache_release(page); } bio_put(bio); } return uptodate ? 0 : -EIO; } /* * Wait on and process all in-flight BIOs. This must only be called once * all bios have been issued so that the refcount can only decrease. * This just waits for all bios to make it through dio_bio_complete. IO * errors are propagated through dio->io_error and should be propagated via * dio_complete(). */ static void dio_await_completion(struct dio *dio) { struct bio *bio; do { bio = dio_await_one(dio); if (bio) dio_bio_complete(dio, bio); } while (bio); } /* * A really large O_DIRECT read or write can generate a lot of BIOs. So * to keep the memory consumption sane we periodically reap any completed BIOs * during the BIO generation phase. * * This also helps to limit the peak amount of pinned userspace memory. */ static inline int dio_bio_reap(struct dio *dio, struct dio_submit *sdio) { int ret = 0; if (sdio->reap_counter++ >= 64) { while (dio->bio_list) { unsigned long flags; struct bio *bio; int ret2; spin_lock_irqsave(&dio->bio_lock, flags); bio = dio->bio_list; dio->bio_list = bio->bi_private; spin_unlock_irqrestore(&dio->bio_lock, flags); ret2 = dio_bio_complete(dio, bio); if (ret == 0) ret = ret2; } sdio->reap_counter = 0; } return ret; } /* * Call into the fs to map some more disk blocks. We record the current number * of available blocks at sdio->blocks_available. These are in units of the * fs blocksize, (1 << inode->i_blkbits). * * The fs is allowed to map lots of blocks at once. If it wants to do that, * it uses the passed inode-relative block number as the file offset, as usual. * * get_block() is passed the number of i_blkbits-sized blocks which direct_io * has remaining to do. The fs should not map more than this number of blocks. * * If the fs has mapped a lot of blocks, it should populate bh->b_size to * indicate how much contiguous disk space has been made available at * bh->b_blocknr. * * If *any* of the mapped blocks are new, then the fs must set buffer_new(). * This isn't very efficient... * * In the case of filesystem holes: the fs may return an arbitrarily-large * hole by returning an appropriate value in b_size and by clearing * buffer_mapped(). However the direct-io code will only process holes one * block at a time - it will repeatedly call get_block() as it walks the hole. */ static int get_more_blocks(struct dio *dio, struct dio_submit *sdio, struct buffer_head *map_bh) { int ret; sector_t fs_startblk; /* Into file, in filesystem-sized blocks */ sector_t fs_endblk; /* Into file, in filesystem-sized blocks */ unsigned long fs_count; /* Number of filesystem-sized blocks */ int create; /* * If there was a memory error and we've overwritten all the * mapped blocks then we can now return that memory error */ ret = dio->page_errors; if (ret == 0) { BUG_ON(sdio->block_in_file >= sdio->final_block_in_request); fs_startblk = sdio->block_in_file >> sdio->blkfactor; fs_endblk = (sdio->final_block_in_request - 1) >> sdio->blkfactor; fs_count = fs_endblk - fs_startblk + 1; map_bh->b_state = 0; map_bh->b_size = fs_count << dio->inode->i_blkbits; /* * For writes inside i_size on a DIO_SKIP_HOLES filesystem we * forbid block creations: only overwrites are permitted. * We will return early to the caller once we see an * unmapped buffer head returned, and the caller will fall * back to buffered I/O. * * Otherwise the decision is left to the get_blocks method, * which may decide to handle it or also return an unmapped * buffer head. */ create = dio->rw & WRITE; if (dio->flags & DIO_SKIP_HOLES) { if (sdio->block_in_file < (i_size_read(dio->inode) >> sdio->blkbits)) create = 0; } ret = (*sdio->get_block)(dio->inode, fs_startblk, map_bh, create); /* Store for completion */ dio->private = map_bh->b_private; } return ret; } /* * There is no bio. Make one now. */ static inline int dio_new_bio(struct dio *dio, struct dio_submit *sdio, sector_t start_sector, struct buffer_head *map_bh) { sector_t sector; int ret, nr_pages; ret = dio_bio_reap(dio, sdio); if (ret) goto out; sector = start_sector << (sdio->blkbits - 9); nr_pages = min(sdio->pages_in_io, bio_get_nr_vecs(map_bh->b_bdev)); nr_pages = min(nr_pages, BIO_MAX_PAGES); BUG_ON(nr_pages <= 0); dio_bio_alloc(dio, sdio, map_bh->b_bdev, sector, nr_pages); sdio->boundary = 0; out: return ret; } /* * Attempt to put the current chunk of 'cur_page' into the current BIO. If * that was successful then update final_block_in_bio and take a ref against * the just-added page. * * Return zero on success. Non-zero means the caller needs to start a new BIO. */ static inline int dio_bio_add_page(struct dio_submit *sdio) { int ret; ret = bio_add_page(sdio->bio, sdio->cur_page, sdio->cur_page_len, sdio->cur_page_offset); if (ret == sdio->cur_page_len) { /* * Decrement count only, if we are done with this page */ if ((sdio->cur_page_len + sdio->cur_page_offset) == PAGE_SIZE) sdio->pages_in_io--; page_cache_get(sdio->cur_page); sdio->final_block_in_bio = sdio->cur_page_block + (sdio->cur_page_len >> sdio->blkbits); ret = 0; } else { ret = 1; } return ret; } /* * Put cur_page under IO. The section of cur_page which is described by * cur_page_offset,cur_page_len is put into a BIO. The section of cur_page * starts on-disk at cur_page_block. * * We take a ref against the page here (on behalf of its presence in the bio). * * The caller of this function is responsible for removing cur_page from the * dio, and for dropping the refcount which came from that presence. */ static inline int dio_send_cur_page(struct dio *dio, struct dio_submit *sdio, struct buffer_head *map_bh) { int ret = 0; if (sdio->bio) { loff_t cur_offset = sdio->cur_page_fs_offset; loff_t bio_next_offset = sdio->logical_offset_in_bio + sdio->bio->bi_size; /* * See whether this new request is contiguous with the old. * * Btrfs cannot handle having logically non-contiguous requests * submitted. For example if you have * * Logical: [0-4095][HOLE][8192-12287] * Physical: [0-4095] [4096-8191] * * We cannot submit those pages together as one BIO. So if our * current logical offset in the file does not equal what would * be the next logical offset in the bio, submit the bio we * have. */ if (sdio->final_block_in_bio != sdio->cur_page_block || cur_offset != bio_next_offset) dio_bio_submit(dio, sdio); /* * Submit now if the underlying fs is about to perform a * metadata read */ else if (sdio->boundary) dio_bio_submit(dio, sdio); } if (sdio->bio == NULL) { ret = dio_new_bio(dio, sdio, sdio->cur_page_block, map_bh); if (ret) goto out; } if (dio_bio_add_page(sdio) != 0) { dio_bio_submit(dio, sdio); ret = dio_new_bio(dio, sdio, sdio->cur_page_block, map_bh); if (ret == 0) { ret = dio_bio_add_page(sdio); BUG_ON(ret != 0); } } out: return ret; } /* * An autonomous function to put a chunk of a page under deferred IO. * * The caller doesn't actually know (or care) whether this piece of page is in * a BIO, or is under IO or whatever. We just take care of all possible * situations here. The separation between the logic of do_direct_IO() and * that of submit_page_section() is important for clarity. Please don't break. * * The chunk of page starts on-disk at blocknr. * * We perform deferred IO, by recording the last-submitted page inside our * private part of the dio structure. If possible, we just expand the IO * across that page here. * * If that doesn't work out then we put the old page into the bio and add this * page to the dio instead. */ static inline int submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page, unsigned offset, unsigned len, sector_t blocknr, struct buffer_head *map_bh) { int ret = 0; if (dio->rw & WRITE) { /* * Read accounting is performed in submit_bio() */ task_io_account_write(len); } /* * Can we just grow the current page's presence in the dio? */ if (sdio->cur_page == page && sdio->cur_page_offset + sdio->cur_page_len == offset && sdio->cur_page_block + (sdio->cur_page_len >> sdio->blkbits) == blocknr) { sdio->cur_page_len += len; /* * If sdio->boundary then we want to schedule the IO now to * avoid metadata seeks. */ if (sdio->boundary) { ret = dio_send_cur_page(dio, sdio, map_bh); page_cache_release(sdio->cur_page); sdio->cur_page = NULL; } goto out; } /* * If there's a deferred page already there then send it. */ if (sdio->cur_page) { ret = dio_send_cur_page(dio, sdio, map_bh); page_cache_release(sdio->cur_page); sdio->cur_page = NULL; if (ret) goto out; } page_cache_get(page); /* It is in dio */ sdio->cur_page = page; sdio->cur_page_offset = offset; sdio->cur_page_len = len; sdio->cur_page_block = blocknr; sdio->cur_page_fs_offset = sdio->block_in_file << sdio->blkbits; out: return ret; } /* * Clean any dirty buffers in the blockdev mapping which alias newly-created * file blocks. Only called for S_ISREG files - blockdevs do not set * buffer_new */ static void clean_blockdev_aliases(struct dio *dio, struct buffer_head *map_bh) { unsigned i; unsigned nblocks; nblocks = map_bh->b_size >> dio->inode->i_blkbits; for (i = 0; i < nblocks; i++) { unmap_underlying_metadata(map_bh->b_bdev, map_bh->b_blocknr + i); } } /* * If we are not writing the entire block and get_block() allocated * the block for us, we need to fill-in the unused portion of the * block with zeros. This happens only if user-buffer, fileoffset or * io length is not filesystem block-size multiple. * * `end' is zero if we're doing the start of the IO, 1 at the end of the * IO. */ static inline void dio_zero_block(struct dio *dio, struct dio_submit *sdio, int end, struct buffer_head *map_bh) { unsigned dio_blocks_per_fs_block; unsigned this_chunk_blocks; /* In dio_blocks */ unsigned this_chunk_bytes; struct page *page; sdio->start_zero_done = 1; if (!sdio->blkfactor || !buffer_new(map_bh)) return; dio_blocks_per_fs_block = 1 << sdio->blkfactor; this_chunk_blocks = sdio->block_in_file & (dio_blocks_per_fs_block - 1); if (!this_chunk_blocks) return; /* * We need to zero out part of an fs block. It is either at the * beginning or the end of the fs block. */ if (end) this_chunk_blocks = dio_blocks_per_fs_block - this_chunk_blocks; this_chunk_bytes = this_chunk_blocks << sdio->blkbits; page = ZERO_PAGE(0); if (submit_page_section(dio, sdio, page, 0, this_chunk_bytes, sdio->next_block_for_io, map_bh)) return; sdio->next_block_for_io += this_chunk_blocks; } /* * Walk the user pages, and the file, mapping blocks to disk and generating * a sequence of (page,offset,len,block) mappings. These mappings are injected * into submit_page_section(), which takes care of the next stage of submission * * Direct IO against a blockdev is different from a file. Because we can * happily perform page-sized but 512-byte aligned IOs. It is important that * blockdev IO be able to have fine alignment and large sizes. * * So what we do is to permit the ->get_block function to populate bh.b_size * with the size of IO which is permitted at this offset and this i_blkbits. * * For best results, the blockdev should be set up with 512-byte i_blkbits and * it should set b_size to PAGE_SIZE or more inside get_block(). This gives * fine alignment but still allows this function to work in PAGE_SIZE units. */ static int do_direct_IO(struct dio *dio, struct dio_submit *sdio, struct buffer_head *map_bh) { const unsigned blkbits = sdio->blkbits; const unsigned blocks_per_page = PAGE_SIZE >> blkbits; struct page *page; unsigned block_in_page; int ret = 0; /* The I/O can start at any block offset within the first page */ block_in_page = sdio->first_block_in_page; while (sdio->block_in_file < sdio->final_block_in_request) { page = dio_get_page(dio, sdio); if (IS_ERR(page)) { ret = PTR_ERR(page); goto out; } while (block_in_page < blocks_per_page) { unsigned offset_in_page = block_in_page << blkbits; unsigned this_chunk_bytes; /* # of bytes mapped */ unsigned this_chunk_blocks; /* # of blocks */ unsigned u; if (sdio->blocks_available == 0) { /* * Need to go and map some more disk */ unsigned long blkmask; unsigned long dio_remainder; ret = get_more_blocks(dio, sdio, map_bh); if (ret) { page_cache_release(page); goto out; } if (!buffer_mapped(map_bh)) goto do_holes; sdio->blocks_available = map_bh->b_size >> sdio->blkbits; sdio->next_block_for_io = map_bh->b_blocknr << sdio->blkfactor; if (buffer_new(map_bh)) clean_blockdev_aliases(dio, map_bh); if (!sdio->blkfactor) goto do_holes; blkmask = (1 << sdio->blkfactor) - 1; dio_remainder = (sdio->block_in_file & blkmask); /* * If we are at the start of IO and that IO * starts partway into a fs-block, * dio_remainder will be non-zero. If the IO * is a read then we can simply advance the IO * cursor to the first block which is to be * read. But if the IO is a write and the * block was newly allocated we cannot do that; * the start of the fs block must be zeroed out * on-disk */ if (!buffer_new(map_bh)) sdio->next_block_for_io += dio_remainder; sdio->blocks_available -= dio_remainder; } do_holes: /* Handle holes */ if (!buffer_mapped(map_bh)) { loff_t i_size_aligned; /* AKPM: eargh, -ENOTBLK is a hack */ if (dio->rw & WRITE) { page_cache_release(page); return -ENOTBLK; } /* * Be sure to account for a partial block as the * last block in the file */ i_size_aligned = ALIGN(i_size_read(dio->inode), 1 << blkbits); if (sdio->block_in_file >= i_size_aligned >> blkbits) { /* We hit eof */ page_cache_release(page); goto out; } zero_user(page, block_in_page << blkbits, 1 << blkbits); sdio->block_in_file++; block_in_page++; goto next_block; } /* * If we're performing IO which has an alignment which * is finer than the underlying fs, go check to see if * we must zero out the start of this block. */ if (unlikely(sdio->blkfactor && !sdio->start_zero_done)) dio_zero_block(dio, sdio, 0, map_bh); /* * Work out, in this_chunk_blocks, how much disk we * can add to this page */ this_chunk_blocks = sdio->blocks_available; u = (PAGE_SIZE - offset_in_page) >> blkbits; if (this_chunk_blocks > u) this_chunk_blocks = u; u = sdio->final_block_in_request - sdio->block_in_file; if (this_chunk_blocks > u) this_chunk_blocks = u; this_chunk_bytes = this_chunk_blocks << blkbits; BUG_ON(this_chunk_bytes == 0); sdio->boundary = buffer_boundary(map_bh); ret = submit_page_section(dio, sdio, page, offset_in_page, this_chunk_bytes, sdio->next_block_for_io, map_bh); if (ret) { page_cache_release(page); goto out; } sdio->next_block_for_io += this_chunk_blocks; sdio->block_in_file += this_chunk_blocks; block_in_page += this_chunk_blocks; sdio->blocks_available -= this_chunk_blocks; next_block: BUG_ON(sdio->block_in_file > sdio->final_block_in_request); if (sdio->block_in_file == sdio->final_block_in_request) break; } /* Drop the ref which was taken in get_user_pages() */ page_cache_release(page); block_in_page = 0; } out: return ret; } static inline int drop_refcount(struct dio *dio) { int ret2; unsigned long flags; /* * Sync will always be dropping the final ref and completing the * operation. AIO can if it was a broken operation described above or * in fact if all the bios race to complete before we get here. In * that case dio_complete() translates the EIOCBQUEUED into the proper * return code that the caller will hand to aio_complete(). * * This is managed by the bio_lock instead of being an atomic_t so that * completion paths can drop their ref and use the remaining count to * decide to wake the submission path atomically. */ spin_lock_irqsave(&dio->bio_lock, flags); ret2 = --dio->refcount; spin_unlock_irqrestore(&dio->bio_lock, flags); return ret2; } /* * Returns true if the given offset is aligned to either the IO size * specified by the given blkbits or by the logical block size of the * given block device. * * If the given offset isn't aligned to the blkbits arguments as this is * called then blkbits is set to the block size of the specified block * device. The call can then return either true or false. * * This bizarre calling convention matches the code paths that * duplicated the functionality that this helper was built from. We * reproduce the behaviour to avoid introducing subtle bugs. */ static int dio_aligned(unsigned long offset, unsigned *blkbits, struct block_device *bdev) { unsigned mask = (1 << *blkbits) - 1; /* * Avoid references to bdev if not absolutely needed to give * the early prefetch in the caller enough time. */ if (offset & mask) { if (bdev) *blkbits = blksize_bits(bdev_logical_block_size(bdev)); mask = (1 << *blkbits) - 1; return !(offset & mask); } return 1; } static struct dio *dio_alloc_init(int flags, int rw, struct kiocb *iocb, struct inode *inode, dio_iodone_t end_io, loff_t end) { struct dio *dio; dio = kmem_cache_alloc(dio_cache, GFP_KERNEL); if (!dio) return NULL; /* * Believe it or not, zeroing out the page array caused a .5% * performance regression in a database benchmark. So, we take * care to only zero out what's needed. */ memset(dio, 0, offsetof(struct dio, pages)); dio->flags = flags; /* * For file extending writes updating i_size before data * writeouts complete can expose uninitialized blocks. So * even for AIO, we need to wait for i/o to complete before * returning in this case. */ dio->is_async = !is_sync_kiocb(iocb) && !((rw & WRITE) && (end > i_size_read(inode))); dio->inode = inode; dio->rw = rw; dio->end_io = end_io; dio->iocb = iocb; dio->i_size = i_size_read(inode); spin_lock_init(&dio->bio_lock); dio->refcount = 1; return dio; } static void sdio_init(struct dio_submit *sdio, struct inode *inode, loff_t offset, unsigned blkbits, get_block_t get_block, dio_submit_t *submit_io) { sdio->blkbits = blkbits; sdio->blkfactor = inode->i_blkbits - blkbits; sdio->block_in_file = offset >> blkbits; sdio->get_block = get_block; sdio->submit_io = submit_io; sdio->final_block_in_bio = -1; sdio->next_block_for_io = -1; /* * In case of non-aligned buffers, we may need 2 more * pages since we need to zero out first and last block. */ if (unlikely(sdio->blkfactor)) sdio->pages_in_io = 2; } static int dio_lock_and_flush(struct dio *dio, loff_t offset, loff_t end) { struct inode *inode = dio->inode; int ret; if (dio->flags & DIO_LOCKING) { /* watch out for a 0 len io from a tricksy fs */ if (dio->rw == READ && end > offset) { /* will be released by do_blockdev_direct_IO */ mutex_lock(&inode->i_mutex); ret = filemap_write_and_wait_range(inode->i_mapping, offset, end - 1); if (ret) { mutex_unlock(&inode->i_mutex); return ret; } } } /* * Will be decremented at I/O completion time. */ atomic_inc(&inode->i_dio_count); return 0; } static ssize_t dio_post_submission(int rw, loff_t offset, struct dio *dio, struct dio_submit *sdio, struct buffer_head *map_bh, ssize_t ret) { if (ret == -ENOTBLK) { /* * The remaining part of the request will be * be handled by buffered I/O when we return */ ret = 0; } /* * There may be some unwritten disk at the end of a part-written * fs-block-sized block. Go zero that now. */ dio_zero_block(dio, sdio, 1, map_bh); if (sdio->cur_page) { ssize_t ret2; ret2 = dio_send_cur_page(dio, sdio, map_bh); if (ret == 0) ret = ret2; page_cache_release(sdio->cur_page); sdio->cur_page = NULL; } if (sdio->bio) dio_bio_submit(dio, sdio); /* * It is possible that, we return short IO due to end of file. * In that case, we need to release all the pages we got hold on. */ dio_cleanup(dio, sdio); /* * All block lookups have been performed. For READ requests * we can let i_mutex go now that its achieved its purpose * of protecting us from looking up uninitialized blocks. */ if (rw == READ && (dio->flags & DIO_LOCKING)) mutex_unlock(&dio->inode->i_mutex); /* * The only time we want to leave bios in flight is when a successful * partial aio read or full aio write have been setup. In that case * bio completion will call aio_complete. The only time it's safe to * call aio_complete is when we return -EIOCBQUEUED, so we key on that. * This had *better* be the only place that raises -EIOCBQUEUED. */ BUG_ON(ret == -EIOCBQUEUED); if (dio->is_async && ret == 0 && dio->result && ((rw & READ) || (dio->result == sdio->size))) ret = -EIOCBQUEUED; if (ret != -EIOCBQUEUED) dio_await_completion(dio); if (drop_refcount(dio) == 0) { ret = dio_complete(dio, offset, ret, false); kmem_cache_free(dio_cache, dio); } else BUG_ON(ret != -EIOCBQUEUED); return ret; } /* * This is a library function for use by filesystem drivers. * * The locking rules are governed by the flags parameter: * - if the flags value contains DIO_LOCKING we use a fancy locking * scheme for dumb filesystems. * For writes this function is called under i_mutex and returns with * i_mutex held, for reads, i_mutex is not held on entry, but it is * taken and dropped again before returning. * - if the flags value does NOT contain DIO_LOCKING we don't use any * internal locking but rather rely on the filesystem to synchronize * direct I/O reads/writes versus each other and truncate. * * To help with locking against truncate we incremented the i_dio_count * counter before starting direct I/O, and decrement it once we are done. * Truncate can wait for it to reach zero to provide exclusion. It is * expected that filesystem provide exclusion between new direct I/O * and truncates. For DIO_LOCKING filesystems this is done by i_mutex, * but other filesystems need to take care of this on their own. * * NOTE: if you pass "sdio" to anything by pointer make sure that function * is always inlined. Otherwise gcc is unable to split the structure into * individual fields and will generate much worse code. This is important * for the whole file. */ static inline ssize_t do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, struct block_device *bdev, const struct iovec *iov, loff_t offset, unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io, dio_submit_t submit_io, int flags) { int seg; size_t size; unsigned long addr; unsigned blkbits = inode->i_blkbits; ssize_t retval = -EINVAL; loff_t end = offset; struct dio *dio; struct dio_submit sdio = { 0, }; unsigned long user_addr; size_t bytes; struct buffer_head map_bh = { 0, }; if (rw & WRITE) rw = WRITE_ODIRECT; if (!dio_aligned(offset, &blkbits, bdev)) goto out; /* Check the memory alignment. Blocks cannot straddle pages */ for (seg = 0; seg < nr_segs; seg++) { addr = (unsigned long)iov[seg].iov_base; size = iov[seg].iov_len; end += size; if (!dio_aligned(addr|size, &blkbits, bdev)) goto out; } /* watch out for a 0 len io from a tricksy fs */ if (rw == READ && end == offset) return 0; dio = dio_alloc_init(flags, rw, iocb, inode, end_io, end); retval = -ENOMEM; if (!dio) goto out; retval = dio_lock_and_flush(dio, offset, end); if (retval) { kmem_cache_free(dio_cache, dio); goto out; } sdio_init(&sdio, inode, offset, blkbits, get_block, submit_io); for (seg = 0; seg < nr_segs; seg++) { user_addr = (unsigned long)iov[seg].iov_base; sdio.pages_in_io += ((user_addr + iov[seg].iov_len + PAGE_SIZE-1) / PAGE_SIZE - user_addr / PAGE_SIZE); } dio->should_dirty = 1; for (seg = 0; seg < nr_segs; seg++) { user_addr = (unsigned long)iov[seg].iov_base; sdio.size += bytes = iov[seg].iov_len; /* Index into the first page of the first block */ sdio.first_block_in_page = (user_addr & ~PAGE_MASK) >> blkbits; sdio.final_block_in_request = sdio.block_in_file + (bytes >> blkbits); /* Page fetching state */ sdio.head = 0; sdio.tail = 0; sdio.curr_page = 0; sdio.total_pages = 0; if (user_addr & (PAGE_SIZE-1)) { sdio.total_pages++; bytes -= PAGE_SIZE - (user_addr & (PAGE_SIZE - 1)); } sdio.total_pages += (bytes + PAGE_SIZE - 1) / PAGE_SIZE; sdio.curr_user_address = user_addr; retval = do_direct_IO(dio, &sdio, &map_bh); dio->result += iov[seg].iov_len - ((sdio.final_block_in_request - sdio.block_in_file) << blkbits); if (retval) { dio_cleanup(dio, &sdio); break; } } /* end iovec loop */ retval = dio_post_submission(rw, offset, dio, &sdio, &map_bh, retval); out: return retval; } ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, struct block_device *bdev, const struct iovec *iov, loff_t offset, unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io, dio_submit_t submit_io, int flags) { /* * The block device state is needed in the end to finally * submit everything. Since it's likely to be cache cold * prefetch it here as first thing to hide some of the * latency. * * Attempt to prefetch the pieces we likely need later. */ prefetch(&bdev->bd_disk->part_tbl); prefetch(bdev->bd_queue); prefetch((char *)bdev->bd_queue + SMP_CACHE_BYTES); return do_blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset, nr_segs, get_block, end_io, submit_io, flags); } EXPORT_SYMBOL(__blockdev_direct_IO); ssize_t __blockdev_direct_IO_bvec(int rw, struct kiocb *iocb, struct inode *inode, struct block_device *bdev, struct bio_vec *bvec, loff_t offset, unsigned long bvec_len, get_block_t get_block, dio_iodone_t end_io, dio_submit_t submit_io, int flags) { unsigned blkbits = inode->i_blkbits; ssize_t retval = -EINVAL; loff_t end = offset; struct dio *dio; struct dio_submit sdio = { 0, }; unsigned long i; struct buffer_head map_bh = { 0, }; if (rw & WRITE) rw = WRITE_ODIRECT; if (!dio_aligned(offset, &blkbits, bdev)) goto out; /* Check the memory alignment. Blocks cannot straddle pages */ for (i = 0; i < bvec_len; i++) { end += bvec[i].bv_len; if (!dio_aligned(bvec[i].bv_len | bvec[i].bv_offset, &blkbits, bdev)) goto out; } dio = dio_alloc_init(flags, rw, iocb, inode, end_io, end); retval = -ENOMEM; if (!dio) goto out; retval = dio_lock_and_flush(dio, offset, end); if (retval) { kmem_cache_free(dio_cache, dio); goto out; } sdio_init(&sdio, inode, offset, blkbits, get_block, submit_io); sdio.pages_in_io = bvec_len; for (i = 0; i < bvec_len; i++) { sdio.size += bvec[i].bv_len; /* Index into the first page of the first block */ sdio.first_block_in_page = bvec[i].bv_offset >> blkbits; sdio.final_block_in_request = sdio.block_in_file + (bvec[i].bv_len >> blkbits); /* Page fetching state */ sdio.curr_page = 0; page_cache_get(bvec[i].bv_page); dio->pages[0] = bvec[i].bv_page; sdio.head = 0; sdio.tail = 1; sdio.total_pages = 1; sdio.curr_user_address = 0; retval = do_direct_IO(dio, &sdio, &map_bh); dio->result += bvec[i].bv_len - ((sdio.final_block_in_request - sdio.block_in_file) << blkbits); if (retval) { dio_cleanup(dio, &sdio); break; } } retval = dio_post_submission(rw, offset, dio, &sdio, &map_bh, retval); out: return retval; } EXPORT_SYMBOL(__blockdev_direct_IO_bvec); static __init int dio_init(void) { dio_cache = KMEM_CACHE(dio, SLAB_PANIC); return 0; } module_init(dio_init)
gpl-2.0
dveeden/mysql-server
mysys/my_write.c
13
4029
/* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "mysys_priv.h" #include "my_sys.h" #include "mysys_err.h" #include <errno.h> #include "my_thread_local.h" /** Write a chunk of bytes to a file if (MyFlags & (MY_NABP | MY_FNABP)) @returns 0 if Count == 0 On succes, 0 On failure, (size_t)-1 == MY_FILE_ERROR otherwise @returns 0 if Count == 0 On success, the number of bytes written. On partial success (if less than Count bytes could be written), the actual number of bytes written. On failure, (size_t)-1 == MY_FILE_ERROR */ size_t my_write(File Filedes, const uchar *Buffer, size_t Count, myf MyFlags) { size_t writtenbytes; size_t sum_written= 0; uint errors= 0; const size_t initial_count= Count; DBUG_ENTER("my_write"); DBUG_PRINT("my",("fd: %d Buffer: %p Count: %lu MyFlags: %d", Filedes, Buffer, (ulong) Count, MyFlags)); /* The behavior of write(fd, buf, 0) is not portable */ if (unlikely(!Count)) DBUG_RETURN(0); DBUG_EXECUTE_IF ("simulate_no_free_space_error", { DBUG_SET("+d,simulate_file_write_error");}); for (;;) { errno= 0; #ifdef _WIN32 writtenbytes= my_win_write(Filedes, Buffer, Count); #else writtenbytes= write(Filedes, Buffer, Count); #endif DBUG_EXECUTE_IF("simulate_file_write_error", { errno= ENOSPC; writtenbytes= (size_t) -1; }); if (writtenbytes == Count) { sum_written+= writtenbytes; break; } if (writtenbytes != (size_t) -1) { /* Safeguard */ sum_written+= writtenbytes; Buffer+= writtenbytes; Count-= writtenbytes; } set_my_errno(errno); DBUG_PRINT("error",("Write only %ld bytes, error: %d", (long) writtenbytes, my_errno())); if (is_killed_hook(NULL)) MyFlags&= ~ MY_WAIT_IF_FULL; /* End if aborted by user */ if ((my_errno() == ENOSPC || my_errno() == EDQUOT) && (MyFlags & MY_WAIT_IF_FULL)) { wait_for_free_space(my_filename(Filedes), errors); errors++; DBUG_EXECUTE_IF("simulate_no_free_space_error", { DBUG_SET("-d,simulate_file_write_error");}); continue; } if (writtenbytes != 0 && writtenbytes != (size_t) -1) continue; /* Retry if something written */ else if (my_errno() == EINTR) { DBUG_PRINT("debug", ("my_write() was interrupted and returned %ld", (long) writtenbytes)); continue; /* Interrupted, retry */ } else if (writtenbytes == 0 && !errors++) /* Retry once */ { /* We may come here if the file quota is exeeded */ continue; } break; } if (MyFlags & (MY_NABP | MY_FNABP)) { if (sum_written == initial_count) DBUG_RETURN(0); /* Want only errors, not bytes written */ if (MyFlags & (MY_WME | MY_FAE | MY_FNABP)) { char errbuf[MYSYS_STRERROR_SIZE]; my_error(EE_WRITE, MYF(0), my_filename(Filedes), my_errno(), my_strerror(errbuf, sizeof(errbuf), my_errno())); } DBUG_RETURN(MY_FILE_ERROR); } if (sum_written == 0) DBUG_RETURN(MY_FILE_ERROR); DBUG_RETURN(sum_written); } /* my_write */
gpl-2.0
jarodwilson/linux-muck
drivers/clk/clk-max77686.c
13
8309
/* * clk-max77686.c - Clock driver for Maxim 77686/MAX77802 * * Copyright (C) 2012 Samsung Electornics * Jonghwa Lee <jonghwa3.lee@samsung.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/mfd/max77620.h> #include <linux/mfd/max77686.h> #include <linux/mfd/max77686-private.h> #include <linux/clk-provider.h> #include <linux/mutex.h> #include <linux/clkdev.h> #include <linux/of.h> #include <linux/regmap.h> #include <dt-bindings/clock/maxim,max77686.h> #include <dt-bindings/clock/maxim,max77802.h> #include <dt-bindings/clock/maxim,max77620.h> #define MAX77802_CLOCK_LOW_JITTER_SHIFT 0x3 enum max77686_chip_name { CHIP_MAX77686, CHIP_MAX77802, CHIP_MAX77620, }; struct max77686_hw_clk_info { const char *name; u32 clk_reg; u32 clk_enable_mask; u32 flags; }; struct max77686_clk_init_data { struct regmap *regmap; struct clk_hw hw; struct clk_init_data clk_idata; const struct max77686_hw_clk_info *clk_info; }; struct max77686_clk_driver_data { enum max77686_chip_name chip; struct max77686_clk_init_data *max_clk_data; size_t num_clks; }; static const struct max77686_hw_clk_info max77686_hw_clks_info[MAX77686_CLKS_NUM] = { [MAX77686_CLK_AP] = { .name = "32khz_ap", .clk_reg = MAX77686_REG_32KHZ, .clk_enable_mask = BIT(MAX77686_CLK_AP), }, [MAX77686_CLK_CP] = { .name = "32khz_cp", .clk_reg = MAX77686_REG_32KHZ, .clk_enable_mask = BIT(MAX77686_CLK_CP), }, [MAX77686_CLK_PMIC] = { .name = "32khz_pmic", .clk_reg = MAX77686_REG_32KHZ, .clk_enable_mask = BIT(MAX77686_CLK_PMIC), }, }; static const struct max77686_hw_clk_info max77802_hw_clks_info[MAX77802_CLKS_NUM] = { [MAX77802_CLK_32K_AP] = { .name = "32khz_ap", .clk_reg = MAX77802_REG_32KHZ, .clk_enable_mask = BIT(MAX77802_CLK_32K_AP), }, [MAX77802_CLK_32K_CP] = { .name = "32khz_cp", .clk_reg = MAX77802_REG_32KHZ, .clk_enable_mask = BIT(MAX77802_CLK_32K_CP), }, }; static const struct max77686_hw_clk_info max77620_hw_clks_info[MAX77620_CLKS_NUM] = { [MAX77620_CLK_32K_OUT0] = { .name = "32khz_out0", .clk_reg = MAX77620_REG_CNFG1_32K, .clk_enable_mask = MAX77620_CNFG1_32K_OUT0_EN, }, }; static struct max77686_clk_init_data *to_max77686_clk_init_data( struct clk_hw *hw) { return container_of(hw, struct max77686_clk_init_data, hw); } static int max77686_clk_prepare(struct clk_hw *hw) { struct max77686_clk_init_data *max77686 = to_max77686_clk_init_data(hw); return regmap_update_bits(max77686->regmap, max77686->clk_info->clk_reg, max77686->clk_info->clk_enable_mask, max77686->clk_info->clk_enable_mask); } static void max77686_clk_unprepare(struct clk_hw *hw) { struct max77686_clk_init_data *max77686 = to_max77686_clk_init_data(hw); regmap_update_bits(max77686->regmap, max77686->clk_info->clk_reg, max77686->clk_info->clk_enable_mask, ~max77686->clk_info->clk_enable_mask); } static int max77686_clk_is_prepared(struct clk_hw *hw) { struct max77686_clk_init_data *max77686 = to_max77686_clk_init_data(hw); int ret; u32 val; ret = regmap_read(max77686->regmap, max77686->clk_info->clk_reg, &val); if (ret < 0) return -EINVAL; return val & max77686->clk_info->clk_enable_mask; } static unsigned long max77686_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { return 32768; } static struct clk_ops max77686_clk_ops = { .prepare = max77686_clk_prepare, .unprepare = max77686_clk_unprepare, .is_prepared = max77686_clk_is_prepared, .recalc_rate = max77686_recalc_rate, }; static struct clk_hw * of_clk_max77686_get(struct of_phandle_args *clkspec, void *data) { struct max77686_clk_driver_data *drv_data = data; unsigned int idx = clkspec->args[0]; if (idx >= drv_data->num_clks) { pr_err("%s: invalid index %u\n", __func__, idx); return ERR_PTR(-EINVAL); } return &drv_data->max_clk_data[idx].hw; } static int max77686_clk_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device *parent = dev->parent; const struct platform_device_id *id = platform_get_device_id(pdev); struct max77686_clk_driver_data *drv_data; const struct max77686_hw_clk_info *hw_clks; struct regmap *regmap; int i, ret, num_clks; drv_data = devm_kzalloc(dev, sizeof(*drv_data), GFP_KERNEL); if (!drv_data) return -ENOMEM; regmap = dev_get_regmap(parent, NULL); if (!regmap) { dev_err(dev, "Failed to get rtc regmap\n"); return -ENODEV; } drv_data->chip = id->driver_data; switch (drv_data->chip) { case CHIP_MAX77686: num_clks = MAX77686_CLKS_NUM; hw_clks = max77686_hw_clks_info; break; case CHIP_MAX77802: num_clks = MAX77802_CLKS_NUM; hw_clks = max77802_hw_clks_info; break; case CHIP_MAX77620: num_clks = MAX77620_CLKS_NUM; hw_clks = max77620_hw_clks_info; break; default: dev_err(dev, "Unknown Chip ID\n"); return -EINVAL; } drv_data->max_clk_data = devm_kcalloc(dev, num_clks, sizeof(*drv_data->max_clk_data), GFP_KERNEL); if (!drv_data->max_clk_data) return -ENOMEM; for (i = 0; i < num_clks; i++) { struct max77686_clk_init_data *max_clk_data; const char *clk_name; max_clk_data = &drv_data->max_clk_data[i]; max_clk_data->regmap = regmap; max_clk_data->clk_info = &hw_clks[i]; max_clk_data->clk_idata.flags = hw_clks[i].flags; max_clk_data->clk_idata.ops = &max77686_clk_ops; if (parent->of_node && !of_property_read_string_index(parent->of_node, "clock-output-names", i, &clk_name)) max_clk_data->clk_idata.name = clk_name; else max_clk_data->clk_idata.name = hw_clks[i].name; max_clk_data->hw.init = &max_clk_data->clk_idata; ret = devm_clk_hw_register(dev, &max_clk_data->hw); if (ret) { dev_err(dev, "Failed to clock register: %d\n", ret); return ret; } ret = clk_hw_register_clkdev(&max_clk_data->hw, max_clk_data->clk_idata.name, NULL); if (ret < 0) { dev_err(dev, "Failed to clkdev register: %d\n", ret); return ret; } } if (parent->of_node) { ret = of_clk_add_hw_provider(parent->of_node, of_clk_max77686_get, drv_data); if (ret < 0) { dev_err(dev, "Failed to register OF clock provider: %d\n", ret); return ret; } } /* MAX77802: Enable low-jitter mode on the 32khz clocks. */ if (drv_data->chip == CHIP_MAX77802) { ret = regmap_update_bits(regmap, MAX77802_REG_32KHZ, 1 << MAX77802_CLOCK_LOW_JITTER_SHIFT, 1 << MAX77802_CLOCK_LOW_JITTER_SHIFT); if (ret < 0) { dev_err(dev, "Failed to config low-jitter: %d\n", ret); goto remove_of_clk_provider; } } return 0; remove_of_clk_provider: if (parent->of_node) of_clk_del_provider(parent->of_node); return ret; } static int max77686_clk_remove(struct platform_device *pdev) { struct device *parent = pdev->dev.parent; if (parent->of_node) of_clk_del_provider(parent->of_node); return 0; } static const struct platform_device_id max77686_clk_id[] = { { "max77686-clk", .driver_data = CHIP_MAX77686, }, { "max77802-clk", .driver_data = CHIP_MAX77802, }, { "max77620-clock", .driver_data = CHIP_MAX77620, }, {}, }; MODULE_DEVICE_TABLE(platform, max77686_clk_id); static struct platform_driver max77686_clk_driver = { .driver = { .name = "max77686-clk", }, .probe = max77686_clk_probe, .remove = max77686_clk_remove, .id_table = max77686_clk_id, }; module_platform_driver(max77686_clk_driver); MODULE_DESCRIPTION("MAXIM 77686 Clock Driver"); MODULE_AUTHOR("Jonghwa Lee <jonghwa3.lee@samsung.com>"); MODULE_LICENSE("GPL");
gpl-2.0
xiaogaogao/linuxFromDigilent
drivers/i2c/algos/xilinx_iic/i2c-algo-xilinx.c
13
17995
/* * i2c-algo-xilinx.c * * Xilinx IIC Adapter component to interface IIC component to Linux * * Author: MontaVista Software, Inc. * source@mvista.com * * 2002 (c) MontaVista, Software, Inc. This file is licensed under the terms * of the GNU General Public License version 2. This program is licensed * "as is" without any warranty of any kind, whether express or implied. */ /* * I2C drivers are split into two pieces: the adapter and the algorithm. * The adapter is responsible for actually manipulating the hardware and * the algorithm is the layer above that that handles the higher level * tasks such as transmitting or receiving a buffer. The best example * (in my opinion) of this is the bit banging algorithm has a number of * different adapters that can plug in under it to actually wiggle the * SDA and SCL. * * The interesting part is that the drivers Xilinx provides with their * IP are also split into two pieces where one part is the OS * independent code and the other part is the OS dependent code. All of * the other sources in this directory are the OS independent files as * provided by Xilinx with no changes made to them. * * As it turns out, this maps quite well into the I2C driver philosophy. * This file is the I2C algorithm that communicates with the Xilinx OS * independent function that will serve as our I2C adapter. The * unfortunate part is that the term "adapter" is overloaded in our * context. Xilinx refers to the OS dependent part of a driver as an * adapter. So from an I2C driver perspective, this file is not an * adapter; that role is filled by the Xilinx OS independent files. * From a Xilinx perspective, this file is an adapter; it adapts their * OS independent code to Linux. * * Another thing to consider is that the Xilinx OS dependent code knows * nothing about Linux I2C adapters, so even though this file is billed * as the I2C algorithm, it takes care of the i2c_adapter structure. * * Fortunately, naming conventions will give you a clue as to what comes * from where. Functions beginning with XIic_ are provided by the * Xilinx OS independent files. Functions beginning with i2c_ are * provided by the I2C Linux core. All functions in this file that are * called by Linux have names that begin with xiic_. The functions in * this file that have Handler in their name are registered as callbacks * with the underlying Xilinx OS independent layer. Any other functions * are static helper functions. */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/device.h> #include <linux/i2c.h> #include <linux/xilinx_devices.h> #include <linux/semaphore.h> #include <asm/delay.h> #include <asm/io.h> #include <asm/irq.h> #include "xbasic_types.h" #include "xiic.h" #include "xiic_i.h" #include <linux/platform_device.h> #include <linux/of_platform.h> #include <linux/of_i2c.h> #include <linux/of_address.h> MODULE_AUTHOR("MontaVista Software, Inc. <source@mvista.com>"); MODULE_DESCRIPTION("Xilinx IIC driver"); MODULE_LICENSE("GPL"); MODULE_PARM_DESC(scan, "Scan for active chips on the bus"); static int scan = 0; /* have a look at what's hanging 'round */ /* SAATODO: actually use these? */ #define XIIC_TIMEOUT 100 #define XIIC_RETRY 3 #define XILINX_IIC "xilinx_iic" /* Our private per device data. */ struct xiic_data { struct i2c_adapter adap; /* The Linux I2C core data */ int index; /* index taken from platform_device */ struct completion complete; /* for waiting for interrupts */ u32 base; /* base memory address */ unsigned int irq; /* device IRQ number */ volatile u32 transmit_intr_flag; /* semaphore across task and interrupt - ECM */ volatile u32 receive_intr_flag; /* semaphore across task and interrupt - ECM */ volatile u32 status_intr_flag; /* semaphore across task and interrupt - ECM */ /* * The underlying OS independent code needs space as well. A * pointer to the following XIic structure will be passed to * any XIic_ function that requires it. However, we treat the * data as an opaque object in this file (meaning that we never * reference any of the fields inside of the structure). */ XIic Iic; /* * The following bit fields are used to keep track of what * all has been done to initialize the xiic_dev to make * error handling out of probe() easier. */ unsigned int reqirq:1; /* Has request_irq() been called? */ unsigned int remapped:1; /* Has ioremap() been called? */ unsigned int started:1; /* Has XIic_Start() been called? */ unsigned int added:1; /* Has i2c_add_adapter() been called? */ }; /******************************************************************************* * This configuration stuff should become unnecessary after EDK version 8.x is * released. ******************************************************************************/ static DEFINE_SEMAPHORE(cfg_sem); static int xiic_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msgs[], int num) { struct xiic_data *dev = (struct xiic_data *) i2c_adap; struct i2c_msg *pmsg; u32 options; int i, retries; u32 Status; u32 writeop; for (i = 0; i < num; i++) { pmsg = &msgs[i]; if (!pmsg->len) /* If length is zero */ continue; /* on to the next request. */ /* * This code checks up to 16 times for the * bus busy condition. */ retries = 4; while((XIic_IsIicBusy(&dev->Iic) == TRUE) && (retries-- != 0)) { set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(HZ/250); } /* If bus is still busy, bail */ if (XIic_IsIicBusy(&dev->Iic) == TRUE) { printk(KERN_WARNING "%s #%d: Could not talk to device 0x%2x (%d), bus always busy, trying to reset\n", dev->adap.name, dev->index, pmsg->addr, dev->status_intr_flag); /* Try stopping, reseting and starting device to clear condition */ if (XIic_Stop(&dev->Iic) != XST_SUCCESS) { /* The bus was in use.. */ printk(KERN_WARNING "%s #%d: Could not stop device. Restart from higher layer.\n", dev->adap.name, dev->index); return -ENXIO; } else { XIic_Reset(&dev->Iic); if (XIic_Start(&dev->Iic) != XST_SUCCESS) { printk(KERN_ERR "%s #%d: Could not start device.\n", dev->adap.name, dev->index); return -ENODEV; } return -ENXIO; } } options = 0; if (pmsg->flags & I2C_M_TEN) options |= XII_SEND_10_BIT_OPTION; XIic_SetOptions(&dev->Iic, options); if (XIic_SetAddress(&dev->Iic, XII_ADDR_TO_SEND_TYPE, pmsg->addr) != XST_SUCCESS) { printk(KERN_WARNING "%s #%d: Could not set address to 0x%2x.\n", dev->adap.name, dev->index, pmsg->addr); return -EIO; } dev->transmit_intr_flag = 0xFFFFFFFF; dev->receive_intr_flag = 0xFFFFFFFF; dev->status_intr_flag = 0xFFFFFFFF; /* set the writeop flag to 0 so the adapter does not wait * at bottom of loop */ writeop = 0; dev->Iic.Stats.TxErrors = 0; if (pmsg->flags & I2C_M_RD) { Status = XIic_MasterRecv(&dev->Iic, pmsg->buf, pmsg->len); } else { Status = XIic_MasterSend(&dev->Iic, pmsg->buf, pmsg->len); } if (Status != XST_SUCCESS) { printk(KERN_WARNING "%s #%d: Unexpected error %d.\n", dev->adap.name, dev->index, (int)Status); return -EIO; } /* * Wait till the data is transmitted or received. If there is an error * retry for 10 times. */ retries = 10; if(pmsg->flags & I2C_M_RD) { while((((volatile int)(dev->receive_intr_flag)) != 0) && (retries != 0)) { if ( dev->Iic.Stats.TxErrors != 0) { udelay(25); Status = XIic_MasterRecv(&dev->Iic, pmsg->buf, pmsg->len); dev->Iic.Stats.TxErrors = 0; retries--; } /* the udelay was not working for Microblaze and this seems like a better solution */ schedule_timeout_interruptible(1); } } else { while((((volatile int)(dev->transmit_intr_flag)) != 0) && (retries != 0)) { if ( dev->Iic.Stats.TxErrors != 0) { udelay(25); Status = XIic_MasterSend(&dev->Iic, pmsg->buf, pmsg->len); dev->Iic.Stats.TxErrors = 0; retries--; } /* the udelay was not working for Microblaze and this seems like a better solution */ schedule_timeout_interruptible(1); } } if(retries == 0) { printk("Unable to talk to Device\n"); printk("Wrong Slave address or Slave device Busy\n"); } } return num; } static u32 xiic_bit_func(struct i2c_adapter *adap) { return I2C_FUNC_SMBUS_EMUL | I2C_FUNC_10BIT_ADDR | I2C_FUNC_PROTOCOL_MANGLING; } static struct i2c_algorithm xiic_algo = { .master_xfer = xiic_xfer, /* master_xfer */ .smbus_xfer = NULL, /* smbus_xfer */ .functionality = xiic_bit_func, /* functionality */ }; /* * This routine is registered with the OS as the function to call when * the IIC interrupts. It in turn, calls the Xilinx OS independent * interrupt function. The Xilinx OS independent interrupt function * will in turn call any callbacks that we have registered for various * conditions. */ static irqreturn_t xiic_interrupt(int irq, void *dev_id) { struct xiic_data *dev = dev_id; XIic_InterruptHandler(&dev->Iic); return IRQ_HANDLED; } static void RecvHandler(void *CallbackRef, int ByteCount) { struct xiic_data *dev = (struct xiic_data *)CallbackRef; if (ByteCount == 0) { (dev->receive_intr_flag) = XST_SUCCESS; complete(&dev->complete); } } static void SendHandler(void *CallbackRef, int ByteCount) { struct xiic_data *dev = (struct xiic_data *)CallbackRef; if (ByteCount == 0) { (dev->transmit_intr_flag) = XST_SUCCESS; complete(&dev->complete); } } static void StatusHandler(void *CallbackRef, int Status) { struct xiic_data *dev = (struct xiic_data *)CallbackRef; (dev->status_intr_flag) = Status; complete(&dev->complete); } static char *xilinx_iic_do_scan(struct xiic_data *dev) { int i; char *page = kmalloc(PAGE_SIZE, GFP_KERNEL); char *cptr = page; u8 data; u32 status; for (i = 0x08; i < 0x78 && cptr; i++) { snprintf(cptr, PAGE_SIZE - (cptr - page), "%02X: ", i); cptr += strlen(cptr); init_completion(&dev->complete); if (XIic_SetAddress(&dev->Iic, XII_ADDR_TO_SEND_TYPE, i) != XST_SUCCESS) { snprintf(cptr, PAGE_SIZE - (cptr - page), "can't set address\n"); cptr += strlen(cptr); continue; } dev->receive_intr_flag = ~0; status = XIic_MasterRecv(&dev->Iic, &data, sizeof(data)); if (status != XST_SUCCESS) { snprintf(cptr, PAGE_SIZE - (cptr - page), "unexpected error\n"); cptr += strlen(cptr); continue; } wait_for_completion(&dev->complete); snprintf(cptr, PAGE_SIZE - (cptr - page), dev->receive_intr_flag == XST_SUCCESS ? "OK\n" : "not respoding\n"); cptr += strlen(cptr); } return page; } static ssize_t scan_show(struct device *d, struct device_attribute *attr, char *text) { int len = 0; char *scan_text = xilinx_iic_do_scan(dev_get_drvdata(d)); if (scan_text) { len = strlen(scan_text); memcpy(text, scan_text, len); kfree(scan_text); } return len; } static DEVICE_ATTR(scan, S_IRUGO, scan_show, NULL); static int __devexit xilinx_iic_remove(struct device *device) { struct xiic_data *dev; dev = dev_get_drvdata(device); /* * If we've told the core I2C code about this dev, tell * the core I2C code to forget the dev. */ if (dev->added) { /* * If an error is returned, there's not a whole lot we can * do. An error has already been printed out so we'll * just keep trundling along. */ (void)i2c_del_adapter(&dev->adap); } /* Tell the Xilinx code to take this IIC interface down. */ if (dev->started) { while (XIic_Stop(&dev->Iic) != XST_SUCCESS) { /* The bus was busy. Retry. */ printk(KERN_WARNING "%s #%d: Could not stop device. Will retry.\n", dev->adap.name, dev->index); set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(HZ / 2); } } /* * Now that the Xilinx code isn't using the IRQ or registers, * unmap the registers and free the IRQ. */ if (dev->remapped) { iounmap((void *)dev->Iic.BaseAddress); } if (dev->reqirq) { disable_irq(dev->irq); free_irq(dev->irq, dev); } device_remove_file(device, &dev_attr_scan); kfree(dev); return 0; } /** Shared device initialization code */ static int __devinit xilinx_iic_setup( struct device *device, struct device_node *node, struct resource *r_mem, struct resource *r_irq, u32 ten_bit_addr, u32 gpo_width) { XIic_Config xiic_cfg; struct xiic_data *dev; char *scan_results; int error; /* Allocate the dev and zero it out. */ dev = kmalloc(sizeof(struct xiic_data), GFP_KERNEL); if (!dev) { dev_err(device, "Cannot allocate struct xiic_data\n"); error = -ENOMEM; goto out2; } memset(dev, 0, sizeof(struct xiic_data)); dev_set_drvdata(device, dev); dev->irq = r_irq->start; /* initialize fields to satisfy i2c */ dev->index = 0; init_completion(&dev->complete); memset(&xiic_cfg, 0, sizeof(XIic_Config)); xiic_cfg.DeviceId = 0; /* Change the addresses to be virtual; save the old ones to restore. */ dev->base = r_mem->start; xiic_cfg.BaseAddress = (u32) ioremap(r_mem->start, r_mem->end - r_mem->start + 1); dev->remapped = 1; down(&cfg_sem); xiic_cfg.Has10BitAddr = (int)ten_bit_addr; xiic_cfg.GpOutWidth = (u8)gpo_width; /* Tell the Xilinx code to bring this IIC interface up. */ if (XIic_CfgInitialize(&dev->Iic, &xiic_cfg, xiic_cfg.BaseAddress) != XST_SUCCESS) { up(&cfg_sem); dev_err(device, "could not initialize device.\n"); error = -ENODEV; goto out; } up(&cfg_sem); XIic_SetRecvHandler(&dev->Iic, (void *)dev, RecvHandler); XIic_SetSendHandler(&dev->Iic, (void *)dev, SendHandler); XIic_SetStatusHandler(&dev->Iic, (void *)dev, StatusHandler); /* Grab the IRQ */ error = request_irq(dev->irq, xiic_interrupt, 0, dev->adap.name, dev); if (error) { dev_err(device, "could not allocate interrupt %d.\n", dev->irq); goto out; } dev->reqirq = 1; if (XIic_Start(&dev->Iic) != XST_SUCCESS) { dev_err(device, "could not start device\n"); error = -ENODEV; goto out; } dev->started = 1; /* Now tell the core I2C code about our new device. */ strcpy(dev->adap.name, "xilinx-iic"); dev->adap.dev.of_node = node; dev->adap.algo = &xiic_algo; dev->adap.algo_data = NULL; dev->adap.timeout = XIIC_TIMEOUT; dev->adap.retries = XIIC_RETRY; error = i2c_add_adapter(&dev->adap); if (error) { dev_err(device, "could not add i2c adapter\n"); goto out; } dev->added = 1; printk("%s #%d at 0x%08X mapped to 0x%08X, irq=%d\n", dev->adap.name, dev->index, dev->base, (unsigned int)dev->Iic.BaseAddress, dev->irq); if (scan) { scan_results = xilinx_iic_do_scan(dev); if (scan_results) { printk(scan_results); kfree(scan_results); } } of_i2c_register_devices(&dev->adap); error = device_create_file(device, &dev_attr_scan); out: if (error) xilinx_iic_remove(device); out2: return error; } /* Match table for of_platform binding */ static struct of_device_id __devinitdata xilinx_iic_of_match[] = { { .compatible = "xlnx,xps-iic-2.00.a", }, {}, }; MODULE_DEVICE_TABLE(of, xilinx_iic_of_match); static u32 get_u32(struct platform_device *ofdev, const char *s) { u32 *p = (u32 *)of_get_property(ofdev->dev.of_node, s, NULL); if(p) { return __be32_to_cpup(p); } else { dev_warn(&ofdev->dev, "Parameter %s not found, defaulting to 0.\n", s); return 0; } } static int __devinit xilinx_iic_of_probe(struct platform_device *ofdev, const struct of_device_id *match) { u32 ten_bit_addr, gpo_width; struct resource r_irq_struct; struct resource r_mem_struct; struct resource *r_irq = &r_irq_struct; /* Interrupt resources */ struct resource *r_mem = &r_mem_struct; /* IO mem resources */ int rc = 0; printk(KERN_INFO "Device Tree Probing \'%s\'\n", ofdev->dev.of_node->name); /* Get iospace for the device */ rc = of_address_to_resource(ofdev->dev.of_node, 0, r_mem); if(rc) { dev_warn(&ofdev->dev, "invalid address\n"); return rc; } /* Get IRQ for the device */ rc = of_irq_to_resource(ofdev->dev.of_node, 0, r_irq); if(!rc) { dev_warn(&ofdev->dev, "no IRQ found.\n"); return rc; } ten_bit_addr = get_u32(ofdev, "xlnx,ten-bit-adr"); gpo_width = get_u32(ofdev, "xlnx,gpo-width"); return xilinx_iic_setup(&ofdev->dev, ofdev->dev.of_node, r_mem, r_irq, ten_bit_addr, gpo_width); } static int __devexit xilinx_iic_of_remove(struct platform_device *ofdev) { return xilinx_iic_remove(&ofdev->dev); } static struct platform_driver xilinx_iic_of_driver = { .driver = { .name = "iic", .owner = THIS_MODULE, .of_match_table = xilinx_iic_of_match, }, .probe = xilinx_iic_of_probe, .remove = __devexit_p(xilinx_iic_of_remove), }; /* Registration helpers to keep the number of #ifdefs to a minimum */ static inline int __init xilinx_iic_of_register(void) { return platform_driver_register(&xilinx_iic_of_driver); } static inline void __exit xilinx_iic_of_unregister(void) { platform_driver_unregister(&xilinx_iic_of_driver); } static int __init xiic_init(void) { int ret; ret = xilinx_iic_of_register(); if (ret) printk(KERN_ERR "registering iic driver failed: err=%i", ret); return ret; } static void __exit xiic_cleanup(void) { xilinx_iic_of_unregister(); } module_init(xiic_init); module_exit(xiic_cleanup);
gpl-2.0
EmmanuelU/wild_kernel_samsung_msm8660
arch/arm/kernel/topology.c
13
11144
/* * arch/arm/kernel/topology.c * * Copyright (C) 2011 Linaro Limited. * Written by: Vincent Guittot * * based on arch/sh/kernel/topology.c * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/cpu.h> #include <linux/cpumask.h> #include <linux/init.h> #include <linux/percpu.h> #include <linux/node.h> #include <linux/nodemask.h> #include <linux/sched.h> #include <linux/cpumask.h> #include <linux/cpuset.h> #include <linux/notifier.h> #ifdef CONFIG_DEBUG_FS #include <linux/debugfs.h> #include <linux/uaccess.h> /* for copy_from_user */ #endif #include <asm/cputype.h> #include <asm/topology.h> /* * cpu power scale management */ /* * cpu power table * This per cpu data structure describes the relative capacity of each core. * On a heteregenous system, cores don't have the same computation capacity * and we reflect that difference in the cpu_power field so the scheduler can * take this difference into account during load balance. A per cpu structure * is preferred because each CPU updates its own cpu_power field during the * load balance except for idle cores. One idle core is selected to run the * rebalance_domains for all idle cores and the cpu_power can be updated * during this sequence. */ static DEFINE_PER_CPU(unsigned long, cpu_scale); unsigned long arch_scale_freq_power(struct sched_domain *sd, int cpu) { return per_cpu(cpu_scale, cpu); } static void set_power_scale(unsigned int cpu, unsigned long power) { per_cpu(cpu_scale, cpu) = power; } /* * cpu topology management */ #define ARM_FAMILY_MASK 0xFF0FFFF0 #define MPIDR_SMP_BITMASK (0x3 << 30) #define MPIDR_SMP_VALUE (0x2 << 30) #define MPIDR_MT_BITMASK (0x1 << 24) /* * These masks reflect the current use of the affinity levels. * The affinity level can be up to 16 bits according to ARM ARM */ #define MPIDR_LEVEL0_MASK 0x3 #define MPIDR_LEVEL0_SHIFT 0 #define MPIDR_LEVEL1_MASK 0xF #define MPIDR_LEVEL1_SHIFT 8 #define MPIDR_LEVEL2_MASK 0xFF #define MPIDR_LEVEL2_SHIFT 16 /* * CPU topology table */ struct cputopo_arm cpu_topology[NR_CPUS]; /* * cpu power scale management * a per cpu data structure should be better because each cpu is mainly * using its own cpu_power even it's not always true because of * nohz_idle_balance */ /* * cpu topology mask update management */ static unsigned int prev_sched_mc_power_savings = 0; static unsigned int prev_sched_smt_power_savings = 0; ATOMIC_NOTIFIER_HEAD(topology_update_notifier_list); /* * Update the cpu power of the scheduler */ int topology_register_notifier(struct notifier_block *nb) { return atomic_notifier_chain_register( &topology_update_notifier_list, nb); } int topology_unregister_notifier(struct notifier_block *nb) { return atomic_notifier_chain_unregister( &topology_update_notifier_list, nb); } /* * sched_domain flag configuration */ /* TODO add a config flag for this function */ int arch_sd_sibling_asym_packing(void) { if (sched_smt_power_savings || sched_mc_power_savings) return SD_ASYM_PACKING; return 0; } /* * default topology function */ const struct cpumask *cpu_coregroup_mask(int cpu) { return &cpu_topology[cpu].core_sibling; } /* * clear cpu topology masks */ static void clear_cpu_topology_mask(void) { unsigned int cpuid; for_each_possible_cpu(cpuid) { struct cputopo_arm *cpuid_topo = &(cpu_topology[cpuid]); cpumask_clear(&cpuid_topo->core_sibling); cpumask_clear(&cpuid_topo->thread_sibling); } smp_wmb(); } /* * default_cpu_topology_mask set the core and thread mask as described in the * ARM ARM */ static void default_cpu_topology_mask(unsigned int cpuid) { struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid]; unsigned int cpu; for_each_possible_cpu(cpu) { struct cputopo_arm *cpu_topo = &cpu_topology[cpu]; if (cpuid_topo->socket_id == cpu_topo->socket_id) { cpumask_set_cpu(cpuid, &cpu_topo->core_sibling); if (cpu != cpuid) cpumask_set_cpu(cpu, &cpuid_topo->core_sibling); if (cpuid_topo->core_id == cpu_topo->core_id) { cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling); if (cpu != cpuid) cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling); } } } smp_wmb(); } static void normal_cpu_topology_mask(void) { unsigned int cpuid; for_each_possible_cpu(cpuid) { default_cpu_topology_mask(cpuid); } smp_wmb(); } /* * For Cortex-A9 MPcore, we emulate a multi-package topology in power mode. * The goal is to gathers tasks on 1 virtual package */ static void power_cpu_topology_mask_CA9(unsigned int cpuid) { struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid]; unsigned int cpu; for_each_possible_cpu(cpu) { struct cputopo_arm *cpu_topo = &cpu_topology[cpu]; if ((cpuid_topo->socket_id == cpu_topo->socket_id) && ((cpuid & 0x1) == (cpu & 0x1))) { cpumask_set_cpu(cpuid, &cpu_topo->core_sibling); if (cpu != cpuid) cpumask_set_cpu(cpu, &cpuid_topo->core_sibling); if (cpuid_topo->core_id == cpu_topo->core_id) { cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling); if (cpu != cpuid) cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling); } } } smp_wmb(); } static int need_topology_update(void) { int update; update = ((prev_sched_mc_power_savings ^ sched_mc_power_savings) || (prev_sched_smt_power_savings ^ sched_smt_power_savings)); prev_sched_mc_power_savings = sched_mc_power_savings; prev_sched_smt_power_savings = sched_smt_power_savings; return update; } #define ARM_CORTEX_A9_FAMILY 0x410FC090 /* update_cpu_topology_policy select a cpu topology policy according to the * available cores. * TODO: The current version assumes that all cores are exactly the same which * might not be true. We need to update it to take into account various * configuration among which system with different kind of core. */ static int update_cpu_topology_mask(void) { unsigned long cpuid; if (sched_mc_power_savings == POWERSAVINGS_BALANCE_NONE) { normal_cpu_topology_mask(); return 0; } for_each_possible_cpu(cpuid) { struct cputopo_arm *cpuid_topo = &(cpu_topology[cpuid]); switch (cpuid_topo->id) { case ARM_CORTEX_A9_FAMILY: power_cpu_topology_mask_CA9(cpuid); break; default: default_cpu_topology_mask(cpuid); break; } } return 0; } /* * store_cpu_topology is called at boot when only one cpu is running * and with the mutex cpu_hotplug.lock locked, when several cpus have booted, * which prevents simultaneous write access to cpu_topology array */ void store_cpu_topology(unsigned int cpuid) { struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid]; unsigned int mpidr; /* If the cpu topology has been already set, just return */ if (cpuid_topo->core_id != -1) return; mpidr = read_cpuid_mpidr(); /* create cpu topology mapping */ if ((mpidr & MPIDR_SMP_BITMASK) == MPIDR_SMP_VALUE) { /* * This is a multiprocessor system * multiprocessor format & multiprocessor mode field are set */ if (mpidr & MPIDR_MT_BITMASK) { /* core performance interdependency */ cpuid_topo->thread_id = (mpidr >> MPIDR_LEVEL0_SHIFT) & MPIDR_LEVEL0_MASK; cpuid_topo->core_id = (mpidr >> MPIDR_LEVEL1_SHIFT) & MPIDR_LEVEL1_MASK; cpuid_topo->socket_id = (mpidr >> MPIDR_LEVEL2_SHIFT) & MPIDR_LEVEL2_MASK; } else { /* largely independent cores */ cpuid_topo->thread_id = -1; cpuid_topo->core_id = (mpidr >> MPIDR_LEVEL0_SHIFT) & MPIDR_LEVEL0_MASK; cpuid_topo->socket_id = (mpidr >> MPIDR_LEVEL1_SHIFT) & MPIDR_LEVEL1_MASK; } cpuid_topo->id = read_cpuid_id() & ARM_FAMILY_MASK; } else { /* * This is an uniprocessor system * we are in multiprocessor format but uniprocessor system * or in the old uniprocessor format */ cpuid_topo->thread_id = -1; cpuid_topo->core_id = 0; cpuid_topo->socket_id = -1; } /* * The core and thread sibling masks can also be updated during the * call of arch_update_cpu_topology */ default_cpu_topology_mask(cpuid); printk(KERN_INFO "CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n", cpuid, cpu_topology[cpuid].thread_id, cpu_topology[cpuid].core_id, cpu_topology[cpuid].socket_id, mpidr); } /* * arch_update_cpu_topology is called by the scheduler before building * a new sched_domain hierarchy. */ int arch_update_cpu_topology(void) { if (!need_topology_update()) return 0; /* clear core threads mask */ clear_cpu_topology_mask(); /* set topology mask */ update_cpu_topology_mask(); /* notify the topology update */ atomic_notifier_call_chain(&topology_update_notifier_list, TOPOLOGY_POSTCHANGE, (void *)sched_mc_power_savings); return 1; } /* * init_cpu_topology is called at boot when only one cpu is running * which prevent simultaneous write access to cpu_topology array */ void init_cpu_topology(void) { unsigned int cpu; /* init core mask */ for_each_possible_cpu(cpu) { struct cputopo_arm *cpu_topo = &(cpu_topology[cpu]); cpu_topo->id = -1; cpu_topo->thread_id = -1; cpu_topo->core_id = -1; cpu_topo->socket_id = -1; cpumask_clear(&cpu_topo->core_sibling); cpumask_clear(&cpu_topo->thread_sibling); set_power_scale(cpu, SCHED_POWER_SCALE); } smp_wmb(); } /* * debugfs interface for scaling cpu power */ #ifdef CONFIG_DEBUG_FS static struct dentry *topo_debugfs_root; static ssize_t dbg_write(struct file *file, const char __user *buf, size_t size, loff_t *off) { unsigned int *value = file->f_dentry->d_inode->i_private; char cdata[128]; unsigned long tmp; if (size < (sizeof(cdata)-1)) { if (copy_from_user(cdata, buf, size)) return -EFAULT; cdata[size] = 0; if (!strict_strtoul(cdata, 10, &tmp)) { *value = tmp; } return size; } return -EINVAL; } static ssize_t dbg_read(struct file *file, char __user *buf, size_t size, loff_t *off) { unsigned int *value = file->f_dentry->d_inode->i_private; char cdata[128]; unsigned int len; len = sprintf(cdata, "%u\n", *value); return simple_read_from_buffer(buf, size, off, cdata, len); } static const struct file_operations debugfs_fops = { .read = dbg_read, .write = dbg_write, }; static struct dentry *topo_debugfs_register(unsigned int cpu, struct dentry *parent) { struct dentry *cpu_d, *d; char cpu_name[16]; sprintf(cpu_name, "cpu%u", cpu); cpu_d = debugfs_create_dir(cpu_name, parent); if (!cpu_d) return NULL; d = debugfs_create_file("cpu_power", S_IRUGO | S_IWUGO, cpu_d, &per_cpu(cpu_scale, cpu), &debugfs_fops); if (!d) goto err_out; return cpu_d; err_out: debugfs_remove_recursive(cpu_d); return NULL; } static int __init topo_debugfs_init(void) { struct dentry *d; unsigned int cpu; d = debugfs_create_dir("cpu_topo", NULL); if (!d) return -ENOMEM; topo_debugfs_root = d; for_each_possible_cpu(cpu) { d = topo_debugfs_register(cpu, topo_debugfs_root); if (d == NULL) goto err_out; } return 0; err_out: debugfs_remove_recursive(topo_debugfs_root); return -ENOMEM; } late_initcall(topo_debugfs_init); #endif
gpl-2.0
cole945/qemu
hw/ide/ahci.c
13
42792
/* * QEMU AHCI Emulation * * Copyright (c) 2010 qiaochong@loongson.cn * Copyright (c) 2010 Roland Elek <elek.roland@gmail.com> * Copyright (c) 2010 Sebastian Herbszt <herbszt@gmx.de> * Copyright (c) 2010 Alexander Graf <agraf@suse.de> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. * */ #include <hw/hw.h> #include <hw/pci/msi.h> #include <hw/i386/pc.h> #include <hw/pci/pci.h> #include <hw/sysbus.h> #include "monitor/monitor.h" #include "sysemu/block-backend.h" #include "sysemu/dma.h" #include "internal.h" #include <hw/ide/pci.h> #include <hw/ide/ahci.h> /* #define DEBUG_AHCI */ #ifdef DEBUG_AHCI #define DPRINTF(port, fmt, ...) \ do { fprintf(stderr, "ahci: %s: [%d] ", __FUNCTION__, port); \ fprintf(stderr, fmt, ## __VA_ARGS__); } while (0) #else #define DPRINTF(port, fmt, ...) do {} while(0) #endif static void check_cmd(AHCIState *s, int port); static int handle_cmd(AHCIState *s,int port,int slot); static void ahci_reset_port(AHCIState *s, int port); static void ahci_write_fis_d2h(AHCIDevice *ad, uint8_t *cmd_fis); static void ahci_init_d2h(AHCIDevice *ad); static uint32_t ahci_port_read(AHCIState *s, int port, int offset) { uint32_t val; AHCIPortRegs *pr; pr = &s->dev[port].port_regs; switch (offset) { case PORT_LST_ADDR: val = pr->lst_addr; break; case PORT_LST_ADDR_HI: val = pr->lst_addr_hi; break; case PORT_FIS_ADDR: val = pr->fis_addr; break; case PORT_FIS_ADDR_HI: val = pr->fis_addr_hi; break; case PORT_IRQ_STAT: val = pr->irq_stat; break; case PORT_IRQ_MASK: val = pr->irq_mask; break; case PORT_CMD: val = pr->cmd; break; case PORT_TFDATA: val = pr->tfdata; break; case PORT_SIG: val = pr->sig; break; case PORT_SCR_STAT: if (s->dev[port].port.ifs[0].blk) { val = SATA_SCR_SSTATUS_DET_DEV_PRESENT_PHY_UP | SATA_SCR_SSTATUS_SPD_GEN1 | SATA_SCR_SSTATUS_IPM_ACTIVE; } else { val = SATA_SCR_SSTATUS_DET_NODEV; } break; case PORT_SCR_CTL: val = pr->scr_ctl; break; case PORT_SCR_ERR: val = pr->scr_err; break; case PORT_SCR_ACT: pr->scr_act &= ~s->dev[port].finished; s->dev[port].finished = 0; val = pr->scr_act; break; case PORT_CMD_ISSUE: val = pr->cmd_issue; break; case PORT_RESERVED: default: val = 0; } DPRINTF(port, "offset: 0x%x val: 0x%x\n", offset, val); return val; } static void ahci_irq_raise(AHCIState *s, AHCIDevice *dev) { AHCIPCIState *d = container_of(s, AHCIPCIState, ahci); PCIDevice *pci_dev = (PCIDevice *)object_dynamic_cast(OBJECT(d), TYPE_PCI_DEVICE); DPRINTF(0, "raise irq\n"); if (pci_dev && msi_enabled(pci_dev)) { msi_notify(pci_dev, 0); } else { qemu_irq_raise(s->irq); } } static void ahci_irq_lower(AHCIState *s, AHCIDevice *dev) { AHCIPCIState *d = container_of(s, AHCIPCIState, ahci); PCIDevice *pci_dev = (PCIDevice *)object_dynamic_cast(OBJECT(d), TYPE_PCI_DEVICE); DPRINTF(0, "lower irq\n"); if (!pci_dev || !msi_enabled(pci_dev)) { qemu_irq_lower(s->irq); } } static void ahci_check_irq(AHCIState *s) { int i; DPRINTF(-1, "check irq %#x\n", s->control_regs.irqstatus); s->control_regs.irqstatus = 0; for (i = 0; i < s->ports; i++) { AHCIPortRegs *pr = &s->dev[i].port_regs; if (pr->irq_stat & pr->irq_mask) { s->control_regs.irqstatus |= (1 << i); } } if (s->control_regs.irqstatus && (s->control_regs.ghc & HOST_CTL_IRQ_EN)) { ahci_irq_raise(s, NULL); } else { ahci_irq_lower(s, NULL); } } static void ahci_trigger_irq(AHCIState *s, AHCIDevice *d, int irq_type) { DPRINTF(d->port_no, "trigger irq %#x -> %x\n", irq_type, d->port_regs.irq_mask & irq_type); d->port_regs.irq_stat |= irq_type; ahci_check_irq(s); } static void map_page(AddressSpace *as, uint8_t **ptr, uint64_t addr, uint32_t wanted) { hwaddr len = wanted; if (*ptr) { dma_memory_unmap(as, *ptr, len, DMA_DIRECTION_FROM_DEVICE, len); } *ptr = dma_memory_map(as, addr, &len, DMA_DIRECTION_FROM_DEVICE); if (len < wanted) { dma_memory_unmap(as, *ptr, len, DMA_DIRECTION_FROM_DEVICE, len); *ptr = NULL; } } static void ahci_port_write(AHCIState *s, int port, int offset, uint32_t val) { AHCIPortRegs *pr = &s->dev[port].port_regs; DPRINTF(port, "offset: 0x%x val: 0x%x\n", offset, val); switch (offset) { case PORT_LST_ADDR: pr->lst_addr = val; map_page(s->as, &s->dev[port].lst, ((uint64_t)pr->lst_addr_hi << 32) | pr->lst_addr, 1024); s->dev[port].cur_cmd = NULL; break; case PORT_LST_ADDR_HI: pr->lst_addr_hi = val; map_page(s->as, &s->dev[port].lst, ((uint64_t)pr->lst_addr_hi << 32) | pr->lst_addr, 1024); s->dev[port].cur_cmd = NULL; break; case PORT_FIS_ADDR: pr->fis_addr = val; map_page(s->as, &s->dev[port].res_fis, ((uint64_t)pr->fis_addr_hi << 32) | pr->fis_addr, 256); break; case PORT_FIS_ADDR_HI: pr->fis_addr_hi = val; map_page(s->as, &s->dev[port].res_fis, ((uint64_t)pr->fis_addr_hi << 32) | pr->fis_addr, 256); break; case PORT_IRQ_STAT: pr->irq_stat &= ~val; ahci_check_irq(s); break; case PORT_IRQ_MASK: pr->irq_mask = val & 0xfdc000ff; ahci_check_irq(s); break; case PORT_CMD: pr->cmd = val & ~(PORT_CMD_LIST_ON | PORT_CMD_FIS_ON); if (pr->cmd & PORT_CMD_START) { pr->cmd |= PORT_CMD_LIST_ON; } if (pr->cmd & PORT_CMD_FIS_RX) { pr->cmd |= PORT_CMD_FIS_ON; } /* XXX usually the FIS would be pending on the bus here and issuing deferred until the OS enables FIS receival. Instead, we only submit it once - which works in most cases, but is a hack. */ if ((pr->cmd & PORT_CMD_FIS_ON) && !s->dev[port].init_d2h_sent) { ahci_init_d2h(&s->dev[port]); s->dev[port].init_d2h_sent = true; } check_cmd(s, port); break; case PORT_TFDATA: /* Read Only. */ break; case PORT_SIG: /* Read Only */ break; case PORT_SCR_STAT: /* Read Only */ break; case PORT_SCR_CTL: if (((pr->scr_ctl & AHCI_SCR_SCTL_DET) == 1) && ((val & AHCI_SCR_SCTL_DET) == 0)) { ahci_reset_port(s, port); } pr->scr_ctl = val; break; case PORT_SCR_ERR: pr->scr_err &= ~val; break; case PORT_SCR_ACT: /* RW1 */ pr->scr_act |= val; break; case PORT_CMD_ISSUE: pr->cmd_issue |= val; check_cmd(s, port); break; default: break; } } static uint64_t ahci_mem_read(void *opaque, hwaddr addr, unsigned size) { AHCIState *s = opaque; uint32_t val = 0; if (addr < AHCI_GENERIC_HOST_CONTROL_REGS_MAX_ADDR) { switch (addr) { case HOST_CAP: val = s->control_regs.cap; break; case HOST_CTL: val = s->control_regs.ghc; break; case HOST_IRQ_STAT: val = s->control_regs.irqstatus; break; case HOST_PORTS_IMPL: val = s->control_regs.impl; break; case HOST_VERSION: val = s->control_regs.version; break; } DPRINTF(-1, "(addr 0x%08X), val 0x%08X\n", (unsigned) addr, val); } else if ((addr >= AHCI_PORT_REGS_START_ADDR) && (addr < (AHCI_PORT_REGS_START_ADDR + (s->ports * AHCI_PORT_ADDR_OFFSET_LEN)))) { val = ahci_port_read(s, (addr - AHCI_PORT_REGS_START_ADDR) >> 7, addr & AHCI_PORT_ADDR_OFFSET_MASK); } return val; } static void ahci_mem_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) { AHCIState *s = opaque; /* Only aligned reads are allowed on AHCI */ if (addr & 3) { fprintf(stderr, "ahci: Mis-aligned write to addr 0x" TARGET_FMT_plx "\n", addr); return; } if (addr < AHCI_GENERIC_HOST_CONTROL_REGS_MAX_ADDR) { DPRINTF(-1, "(addr 0x%08X), val 0x%08"PRIX64"\n", (unsigned) addr, val); switch (addr) { case HOST_CAP: /* R/WO, RO */ /* FIXME handle R/WO */ break; case HOST_CTL: /* R/W */ if (val & HOST_CTL_RESET) { DPRINTF(-1, "HBA Reset\n"); ahci_reset(s); } else { s->control_regs.ghc = (val & 0x3) | HOST_CTL_AHCI_EN; ahci_check_irq(s); } break; case HOST_IRQ_STAT: /* R/WC, RO */ s->control_regs.irqstatus &= ~val; ahci_check_irq(s); break; case HOST_PORTS_IMPL: /* R/WO, RO */ /* FIXME handle R/WO */ break; case HOST_VERSION: /* RO */ /* FIXME report write? */ break; default: DPRINTF(-1, "write to unknown register 0x%x\n", (unsigned)addr); } } else if ((addr >= AHCI_PORT_REGS_START_ADDR) && (addr < (AHCI_PORT_REGS_START_ADDR + (s->ports * AHCI_PORT_ADDR_OFFSET_LEN)))) { ahci_port_write(s, (addr - AHCI_PORT_REGS_START_ADDR) >> 7, addr & AHCI_PORT_ADDR_OFFSET_MASK, val); } } static const MemoryRegionOps ahci_mem_ops = { .read = ahci_mem_read, .write = ahci_mem_write, .endianness = DEVICE_LITTLE_ENDIAN, }; static uint64_t ahci_idp_read(void *opaque, hwaddr addr, unsigned size) { AHCIState *s = opaque; if (addr == s->idp_offset) { /* index register */ return s->idp_index; } else if (addr == s->idp_offset + 4) { /* data register - do memory read at location selected by index */ return ahci_mem_read(opaque, s->idp_index, size); } else { return 0; } } static void ahci_idp_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) { AHCIState *s = opaque; if (addr == s->idp_offset) { /* index register - mask off reserved bits */ s->idp_index = (uint32_t)val & ((AHCI_MEM_BAR_SIZE - 1) & ~3); } else if (addr == s->idp_offset + 4) { /* data register - do memory write at location selected by index */ ahci_mem_write(opaque, s->idp_index, val, size); } } static const MemoryRegionOps ahci_idp_ops = { .read = ahci_idp_read, .write = ahci_idp_write, .endianness = DEVICE_LITTLE_ENDIAN, }; static void ahci_reg_init(AHCIState *s) { int i; s->control_regs.cap = (s->ports - 1) | (AHCI_NUM_COMMAND_SLOTS << 8) | (AHCI_SUPPORTED_SPEED_GEN1 << AHCI_SUPPORTED_SPEED) | HOST_CAP_NCQ | HOST_CAP_AHCI; s->control_regs.impl = (1 << s->ports) - 1; s->control_regs.version = AHCI_VERSION_1_0; for (i = 0; i < s->ports; i++) { s->dev[i].port_state = STATE_RUN; } } static void check_cmd(AHCIState *s, int port) { AHCIPortRegs *pr = &s->dev[port].port_regs; int slot; if ((pr->cmd & PORT_CMD_START) && pr->cmd_issue) { for (slot = 0; (slot < 32) && pr->cmd_issue; slot++) { if ((pr->cmd_issue & (1U << slot)) && !handle_cmd(s, port, slot)) { pr->cmd_issue &= ~(1U << slot); } } } } static void ahci_check_cmd_bh(void *opaque) { AHCIDevice *ad = opaque; qemu_bh_delete(ad->check_bh); ad->check_bh = NULL; if ((ad->busy_slot != -1) && !(ad->port.ifs[0].status & (BUSY_STAT|DRQ_STAT))) { /* no longer busy */ ad->port_regs.cmd_issue &= ~(1 << ad->busy_slot); ad->busy_slot = -1; } check_cmd(ad->hba, ad->port_no); } static void ahci_init_d2h(AHCIDevice *ad) { uint8_t init_fis[20]; IDEState *ide_state = &ad->port.ifs[0]; memset(init_fis, 0, sizeof(init_fis)); init_fis[4] = 1; init_fis[12] = 1; if (ide_state->drive_kind == IDE_CD) { init_fis[5] = ide_state->lcyl; init_fis[6] = ide_state->hcyl; } ahci_write_fis_d2h(ad, init_fis); } static void ahci_reset_port(AHCIState *s, int port) { AHCIDevice *d = &s->dev[port]; AHCIPortRegs *pr = &d->port_regs; IDEState *ide_state = &d->port.ifs[0]; int i; DPRINTF(port, "reset port\n"); ide_bus_reset(&d->port); ide_state->ncq_queues = AHCI_MAX_CMDS; pr->scr_stat = 0; pr->scr_err = 0; pr->scr_act = 0; pr->tfdata = 0x7F; pr->sig = 0xFFFFFFFF; d->busy_slot = -1; d->init_d2h_sent = false; ide_state = &s->dev[port].port.ifs[0]; if (!ide_state->blk) { return; } /* reset ncq queue */ for (i = 0; i < AHCI_MAX_CMDS; i++) { NCQTransferState *ncq_tfs = &s->dev[port].ncq_tfs[i]; if (!ncq_tfs->used) { continue; } if (ncq_tfs->aiocb) { blk_aio_cancel(ncq_tfs->aiocb); ncq_tfs->aiocb = NULL; } /* Maybe we just finished the request thanks to blk_aio_cancel() */ if (!ncq_tfs->used) { continue; } qemu_sglist_destroy(&ncq_tfs->sglist); ncq_tfs->used = 0; } s->dev[port].port_state = STATE_RUN; if (!ide_state->blk) { pr->sig = 0; ide_state->status = SEEK_STAT | WRERR_STAT; } else if (ide_state->drive_kind == IDE_CD) { pr->sig = SATA_SIGNATURE_CDROM; ide_state->lcyl = 0x14; ide_state->hcyl = 0xeb; DPRINTF(port, "set lcyl = %d\n", ide_state->lcyl); ide_state->status = SEEK_STAT | WRERR_STAT | READY_STAT; } else { pr->sig = SATA_SIGNATURE_DISK; ide_state->status = SEEK_STAT | WRERR_STAT; } ide_state->error = 1; ahci_init_d2h(d); } static void debug_print_fis(uint8_t *fis, int cmd_len) { #ifdef DEBUG_AHCI int i; fprintf(stderr, "fis:"); for (i = 0; i < cmd_len; i++) { if ((i & 0xf) == 0) { fprintf(stderr, "\n%02x:",i); } fprintf(stderr, "%02x ",fis[i]); } fprintf(stderr, "\n"); #endif } static void ahci_write_fis_sdb(AHCIState *s, int port, uint32_t finished) { AHCIDevice *ad = &s->dev[port]; AHCIPortRegs *pr = &ad->port_regs; IDEState *ide_state; uint8_t *sdb_fis; if (!s->dev[port].res_fis || !(pr->cmd & PORT_CMD_FIS_RX)) { return; } sdb_fis = &ad->res_fis[RES_FIS_SDBFIS]; ide_state = &ad->port.ifs[0]; /* clear memory */ *(uint32_t*)sdb_fis = 0; /* write values */ sdb_fis[0] = ide_state->error; sdb_fis[2] = ide_state->status & 0x77; s->dev[port].finished |= finished; *(uint32_t*)(sdb_fis + 4) = cpu_to_le32(ad->finished); /* Update shadow registers (except BSY 0x80 and DRQ 0x08) */ pr->tfdata = (ad->port.ifs[0].error << 8) | (ad->port.ifs[0].status & 0x77) | (pr->tfdata & 0x88); ahci_trigger_irq(s, ad, PORT_IRQ_SDB_FIS); } static void ahci_write_fis_pio(AHCIDevice *ad, uint16_t len) { AHCIPortRegs *pr = &ad->port_regs; uint8_t *pio_fis, *cmd_fis; uint64_t tbl_addr; dma_addr_t cmd_len = 0x80; if (!ad->res_fis || !(pr->cmd & PORT_CMD_FIS_RX)) { return; } /* map cmd_fis */ tbl_addr = le64_to_cpu(ad->cur_cmd->tbl_addr); cmd_fis = dma_memory_map(ad->hba->as, tbl_addr, &cmd_len, DMA_DIRECTION_TO_DEVICE); if (cmd_fis == NULL) { DPRINTF(ad->port_no, "dma_memory_map failed in ahci_write_fis_pio"); ahci_trigger_irq(ad->hba, ad, PORT_IRQ_HBUS_ERR); return; } if (cmd_len != 0x80) { DPRINTF(ad->port_no, "dma_memory_map mapped too few bytes in ahci_write_fis_pio"); dma_memory_unmap(ad->hba->as, cmd_fis, cmd_len, DMA_DIRECTION_TO_DEVICE, cmd_len); ahci_trigger_irq(ad->hba, ad, PORT_IRQ_HBUS_ERR); return; } pio_fis = &ad->res_fis[RES_FIS_PSFIS]; pio_fis[0] = 0x5f; pio_fis[1] = (ad->hba->control_regs.irqstatus ? (1 << 6) : 0); pio_fis[2] = ad->port.ifs[0].status; pio_fis[3] = ad->port.ifs[0].error; pio_fis[4] = cmd_fis[4]; pio_fis[5] = cmd_fis[5]; pio_fis[6] = cmd_fis[6]; pio_fis[7] = cmd_fis[7]; pio_fis[8] = cmd_fis[8]; pio_fis[9] = cmd_fis[9]; pio_fis[10] = cmd_fis[10]; pio_fis[11] = cmd_fis[11]; pio_fis[12] = cmd_fis[12]; pio_fis[13] = cmd_fis[13]; pio_fis[14] = 0; pio_fis[15] = ad->port.ifs[0].status; pio_fis[16] = len & 255; pio_fis[17] = len >> 8; pio_fis[18] = 0; pio_fis[19] = 0; /* Update shadow registers: */ pr->tfdata = (ad->port.ifs[0].error << 8) | ad->port.ifs[0].status; if (pio_fis[2] & ERR_STAT) { ahci_trigger_irq(ad->hba, ad, PORT_IRQ_TF_ERR); } ahci_trigger_irq(ad->hba, ad, PORT_IRQ_PIOS_FIS); dma_memory_unmap(ad->hba->as, cmd_fis, cmd_len, DMA_DIRECTION_TO_DEVICE, cmd_len); } static void ahci_write_fis_d2h(AHCIDevice *ad, uint8_t *cmd_fis) { AHCIPortRegs *pr = &ad->port_regs; uint8_t *d2h_fis; int i; dma_addr_t cmd_len = 0x80; int cmd_mapped = 0; if (!ad->res_fis || !(pr->cmd & PORT_CMD_FIS_RX)) { return; } if (!cmd_fis) { /* map cmd_fis */ uint64_t tbl_addr = le64_to_cpu(ad->cur_cmd->tbl_addr); cmd_fis = dma_memory_map(ad->hba->as, tbl_addr, &cmd_len, DMA_DIRECTION_TO_DEVICE); cmd_mapped = 1; } d2h_fis = &ad->res_fis[RES_FIS_RFIS]; d2h_fis[0] = 0x34; d2h_fis[1] = (ad->hba->control_regs.irqstatus ? (1 << 6) : 0); d2h_fis[2] = ad->port.ifs[0].status; d2h_fis[3] = ad->port.ifs[0].error; d2h_fis[4] = cmd_fis[4]; d2h_fis[5] = cmd_fis[5]; d2h_fis[6] = cmd_fis[6]; d2h_fis[7] = cmd_fis[7]; d2h_fis[8] = cmd_fis[8]; d2h_fis[9] = cmd_fis[9]; d2h_fis[10] = cmd_fis[10]; d2h_fis[11] = cmd_fis[11]; d2h_fis[12] = cmd_fis[12]; d2h_fis[13] = cmd_fis[13]; for (i = 14; i < 20; i++) { d2h_fis[i] = 0; } /* Update shadow registers: */ pr->tfdata = (ad->port.ifs[0].error << 8) | ad->port.ifs[0].status; if (d2h_fis[2] & ERR_STAT) { ahci_trigger_irq(ad->hba, ad, PORT_IRQ_TF_ERR); } ahci_trigger_irq(ad->hba, ad, PORT_IRQ_D2H_REG_FIS); if (cmd_mapped) { dma_memory_unmap(ad->hba->as, cmd_fis, cmd_len, DMA_DIRECTION_TO_DEVICE, cmd_len); } } static int prdt_tbl_entry_size(const AHCI_SG *tbl) { return (le32_to_cpu(tbl->flags_size) & AHCI_PRDT_SIZE_MASK) + 1; } static int ahci_populate_sglist(AHCIDevice *ad, QEMUSGList *sglist, int offset) { AHCICmdHdr *cmd = ad->cur_cmd; uint32_t opts = le32_to_cpu(cmd->opts); uint64_t prdt_addr = le64_to_cpu(cmd->tbl_addr) + 0x80; int sglist_alloc_hint = opts >> AHCI_CMD_HDR_PRDT_LEN; dma_addr_t prdt_len = (sglist_alloc_hint * sizeof(AHCI_SG)); dma_addr_t real_prdt_len = prdt_len; uint8_t *prdt; int i; int r = 0; int sum = 0; int off_idx = -1; int off_pos = -1; int tbl_entry_size; IDEBus *bus = &ad->port; BusState *qbus = BUS(bus); if (!sglist_alloc_hint) { DPRINTF(ad->port_no, "no sg list given by guest: 0x%08x\n", opts); return -1; } /* map PRDT */ if (!(prdt = dma_memory_map(ad->hba->as, prdt_addr, &prdt_len, DMA_DIRECTION_TO_DEVICE))){ DPRINTF(ad->port_no, "map failed\n"); return -1; } if (prdt_len < real_prdt_len) { DPRINTF(ad->port_no, "mapped less than expected\n"); r = -1; goto out; } /* Get entries in the PRDT, init a qemu sglist accordingly */ if (sglist_alloc_hint > 0) { AHCI_SG *tbl = (AHCI_SG *)prdt; sum = 0; for (i = 0; i < sglist_alloc_hint; i++) { /* flags_size is zero-based */ tbl_entry_size = prdt_tbl_entry_size(&tbl[i]); if (offset <= (sum + tbl_entry_size)) { off_idx = i; off_pos = offset - sum; break; } sum += tbl_entry_size; } if ((off_idx == -1) || (off_pos < 0) || (off_pos > tbl_entry_size)) { DPRINTF(ad->port_no, "%s: Incorrect offset! " "off_idx: %d, off_pos: %d\n", __func__, off_idx, off_pos); r = -1; goto out; } qemu_sglist_init(sglist, qbus->parent, (sglist_alloc_hint - off_idx), ad->hba->as); qemu_sglist_add(sglist, le64_to_cpu(tbl[off_idx].addr + off_pos), prdt_tbl_entry_size(&tbl[off_idx]) - off_pos); for (i = off_idx + 1; i < sglist_alloc_hint; i++) { /* flags_size is zero-based */ qemu_sglist_add(sglist, le64_to_cpu(tbl[i].addr), prdt_tbl_entry_size(&tbl[i])); } } out: dma_memory_unmap(ad->hba->as, prdt, prdt_len, DMA_DIRECTION_TO_DEVICE, prdt_len); return r; } static void ncq_cb(void *opaque, int ret) { NCQTransferState *ncq_tfs = (NCQTransferState *)opaque; IDEState *ide_state = &ncq_tfs->drive->port.ifs[0]; if (ret == -ECANCELED) { return; } /* Clear bit for this tag in SActive */ ncq_tfs->drive->port_regs.scr_act &= ~(1 << ncq_tfs->tag); if (ret < 0) { /* error */ ide_state->error = ABRT_ERR; ide_state->status = READY_STAT | ERR_STAT; ncq_tfs->drive->port_regs.scr_err |= (1 << ncq_tfs->tag); } else { ide_state->status = READY_STAT | SEEK_STAT; } ahci_write_fis_sdb(ncq_tfs->drive->hba, ncq_tfs->drive->port_no, (1 << ncq_tfs->tag)); DPRINTF(ncq_tfs->drive->port_no, "NCQ transfer tag %d finished\n", ncq_tfs->tag); block_acct_done(blk_get_stats(ncq_tfs->drive->port.ifs[0].blk), &ncq_tfs->acct); qemu_sglist_destroy(&ncq_tfs->sglist); ncq_tfs->used = 0; } static void process_ncq_command(AHCIState *s, int port, uint8_t *cmd_fis, int slot) { NCQFrame *ncq_fis = (NCQFrame*)cmd_fis; uint8_t tag = ncq_fis->tag >> 3; NCQTransferState *ncq_tfs = &s->dev[port].ncq_tfs[tag]; if (ncq_tfs->used) { /* error - already in use */ fprintf(stderr, "%s: tag %d already used\n", __FUNCTION__, tag); return; } ncq_tfs->used = 1; ncq_tfs->drive = &s->dev[port]; ncq_tfs->slot = slot; ncq_tfs->lba = ((uint64_t)ncq_fis->lba5 << 40) | ((uint64_t)ncq_fis->lba4 << 32) | ((uint64_t)ncq_fis->lba3 << 24) | ((uint64_t)ncq_fis->lba2 << 16) | ((uint64_t)ncq_fis->lba1 << 8) | (uint64_t)ncq_fis->lba0; /* Note: We calculate the sector count, but don't currently rely on it. * The total size of the DMA buffer tells us the transfer size instead. */ ncq_tfs->sector_count = ((uint16_t)ncq_fis->sector_count_high << 8) | ncq_fis->sector_count_low; DPRINTF(port, "NCQ transfer LBA from %"PRId64" to %"PRId64", " "drive max %"PRId64"\n", ncq_tfs->lba, ncq_tfs->lba + ncq_tfs->sector_count - 2, s->dev[port].port.ifs[0].nb_sectors - 1); ahci_populate_sglist(&s->dev[port], &ncq_tfs->sglist, 0); ncq_tfs->tag = tag; switch(ncq_fis->command) { case READ_FPDMA_QUEUED: DPRINTF(port, "NCQ reading %d sectors from LBA %"PRId64", " "tag %d\n", ncq_tfs->sector_count-1, ncq_tfs->lba, ncq_tfs->tag); DPRINTF(port, "tag %d aio read %"PRId64"\n", ncq_tfs->tag, ncq_tfs->lba); dma_acct_start(ncq_tfs->drive->port.ifs[0].blk, &ncq_tfs->acct, &ncq_tfs->sglist, BLOCK_ACCT_READ); ncq_tfs->aiocb = dma_blk_read(ncq_tfs->drive->port.ifs[0].blk, &ncq_tfs->sglist, ncq_tfs->lba, ncq_cb, ncq_tfs); break; case WRITE_FPDMA_QUEUED: DPRINTF(port, "NCQ writing %d sectors to LBA %"PRId64", tag %d\n", ncq_tfs->sector_count-1, ncq_tfs->lba, ncq_tfs->tag); DPRINTF(port, "tag %d aio write %"PRId64"\n", ncq_tfs->tag, ncq_tfs->lba); dma_acct_start(ncq_tfs->drive->port.ifs[0].blk, &ncq_tfs->acct, &ncq_tfs->sglist, BLOCK_ACCT_WRITE); ncq_tfs->aiocb = dma_blk_write(ncq_tfs->drive->port.ifs[0].blk, &ncq_tfs->sglist, ncq_tfs->lba, ncq_cb, ncq_tfs); break; default: DPRINTF(port, "error: tried to process non-NCQ command as NCQ\n"); qemu_sglist_destroy(&ncq_tfs->sglist); break; } } static int handle_cmd(AHCIState *s, int port, int slot) { IDEState *ide_state; uint32_t opts; uint64_t tbl_addr; AHCICmdHdr *cmd; uint8_t *cmd_fis; dma_addr_t cmd_len; if (s->dev[port].port.ifs[0].status & (BUSY_STAT|DRQ_STAT)) { /* Engine currently busy, try again later */ DPRINTF(port, "engine busy\n"); return -1; } cmd = &((AHCICmdHdr *)s->dev[port].lst)[slot]; if (!s->dev[port].lst) { DPRINTF(port, "error: lst not given but cmd handled"); return -1; } /* remember current slot handle for later */ s->dev[port].cur_cmd = cmd; opts = le32_to_cpu(cmd->opts); tbl_addr = le64_to_cpu(cmd->tbl_addr); cmd_len = 0x80; cmd_fis = dma_memory_map(s->as, tbl_addr, &cmd_len, DMA_DIRECTION_FROM_DEVICE); if (!cmd_fis) { DPRINTF(port, "error: guest passed us an invalid cmd fis\n"); return -1; } /* The device we are working for */ ide_state = &s->dev[port].port.ifs[0]; if (!ide_state->blk) { DPRINTF(port, "error: guest accessed unused port"); goto out; } debug_print_fis(cmd_fis, 0x90); //debug_print_fis(cmd_fis, (opts & AHCI_CMD_HDR_CMD_FIS_LEN) * 4); switch (cmd_fis[0]) { case SATA_FIS_TYPE_REGISTER_H2D: break; default: DPRINTF(port, "unknown command cmd_fis[0]=%02x cmd_fis[1]=%02x " "cmd_fis[2]=%02x\n", cmd_fis[0], cmd_fis[1], cmd_fis[2]); goto out; break; } switch (cmd_fis[1]) { case SATA_FIS_REG_H2D_UPDATE_COMMAND_REGISTER: break; case 0: break; default: DPRINTF(port, "unknown command cmd_fis[0]=%02x cmd_fis[1]=%02x " "cmd_fis[2]=%02x\n", cmd_fis[0], cmd_fis[1], cmd_fis[2]); goto out; break; } switch (s->dev[port].port_state) { case STATE_RUN: if (cmd_fis[15] & ATA_SRST) { s->dev[port].port_state = STATE_RESET; } break; case STATE_RESET: if (!(cmd_fis[15] & ATA_SRST)) { ahci_reset_port(s, port); } break; } if (cmd_fis[1] == SATA_FIS_REG_H2D_UPDATE_COMMAND_REGISTER) { /* Check for NCQ command */ if ((cmd_fis[2] == READ_FPDMA_QUEUED) || (cmd_fis[2] == WRITE_FPDMA_QUEUED)) { process_ncq_command(s, port, cmd_fis, slot); goto out; } /* Decompose the FIS */ ide_state->nsector = (int64_t)((cmd_fis[13] << 8) | cmd_fis[12]); ide_state->feature = cmd_fis[3]; if (!ide_state->nsector) { ide_state->nsector = 256; } if (ide_state->drive_kind != IDE_CD) { /* * We set the sector depending on the sector defined in the FIS. * Unfortunately, the spec isn't exactly obvious on this one. * * Apparently LBA48 commands set fis bytes 10,9,8,6,5,4 to the * 48 bit sector number. ATA_CMD_READ_DMA_EXT is an example for * such a command. * * Non-LBA48 commands however use 7[lower 4 bits],6,5,4 to define a * 28-bit sector number. ATA_CMD_READ_DMA is an example for such * a command. * * Since the spec doesn't explicitly state what each field should * do, I simply assume non-used fields as reserved and OR everything * together, independent of the command. */ ide_set_sector(ide_state, ((uint64_t)cmd_fis[10] << 40) | ((uint64_t)cmd_fis[9] << 32) /* This is used for LBA48 commands */ | ((uint64_t)cmd_fis[8] << 24) /* This is used for non-LBA48 commands */ | ((uint64_t)(cmd_fis[7] & 0xf) << 24) | ((uint64_t)cmd_fis[6] << 16) | ((uint64_t)cmd_fis[5] << 8) | cmd_fis[4]); } /* Copy the ACMD field (ATAPI packet, if any) from the AHCI command * table to ide_state->io_buffer */ if (opts & AHCI_CMD_ATAPI) { memcpy(ide_state->io_buffer, &cmd_fis[AHCI_COMMAND_TABLE_ACMD], 0x10); ide_state->lcyl = 0x14; ide_state->hcyl = 0xeb; debug_print_fis(ide_state->io_buffer, 0x10); ide_state->feature = IDE_FEATURE_DMA; s->dev[port].done_atapi_packet = false; /* XXX send PIO setup FIS */ } ide_state->error = 0; /* Reset transferred byte counter */ cmd->status = 0; /* We're ready to process the command in FIS byte 2. */ ide_exec_cmd(&s->dev[port].port, cmd_fis[2]); } out: dma_memory_unmap(s->as, cmd_fis, cmd_len, DMA_DIRECTION_FROM_DEVICE, cmd_len); if (s->dev[port].port.ifs[0].status & (BUSY_STAT|DRQ_STAT)) { /* async command, complete later */ s->dev[port].busy_slot = slot; return -1; } /* done handling the command */ return 0; } /* DMA dev <-> ram */ static void ahci_start_transfer(IDEDMA *dma) { AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma); IDEState *s = &ad->port.ifs[0]; uint32_t size = (uint32_t)(s->data_end - s->data_ptr); /* write == ram -> device */ uint32_t opts = le32_to_cpu(ad->cur_cmd->opts); int is_write = opts & AHCI_CMD_WRITE; int is_atapi = opts & AHCI_CMD_ATAPI; int has_sglist = 0; if (is_atapi && !ad->done_atapi_packet) { /* already prepopulated iobuffer */ ad->done_atapi_packet = true; goto out; } if (!ahci_populate_sglist(ad, &s->sg, 0)) { has_sglist = 1; } DPRINTF(ad->port_no, "%sing %d bytes on %s w/%s sglist\n", is_write ? "writ" : "read", size, is_atapi ? "atapi" : "ata", has_sglist ? "" : "o"); if (has_sglist && size) { if (is_write) { dma_buf_write(s->data_ptr, size, &s->sg); } else { dma_buf_read(s->data_ptr, size, &s->sg); } } /* update number of transferred bytes */ ad->cur_cmd->status = cpu_to_le32(le32_to_cpu(ad->cur_cmd->status) + size); out: /* declare that we processed everything */ s->data_ptr = s->data_end; if (has_sglist) { qemu_sglist_destroy(&s->sg); } s->end_transfer_func(s); if (!(s->status & DRQ_STAT)) { /* done with PIO send/receive */ ahci_write_fis_pio(ad, le32_to_cpu(ad->cur_cmd->status)); } } static void ahci_start_dma(IDEDMA *dma, IDEState *s, BlockCompletionFunc *dma_cb) { #ifdef DEBUG_AHCI AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma); #endif DPRINTF(ad->port_no, "\n"); s->io_buffer_offset = 0; dma_cb(s, 0); } static int ahci_dma_prepare_buf(IDEDMA *dma, int is_write) { AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma); IDEState *s = &ad->port.ifs[0]; ahci_populate_sglist(ad, &s->sg, 0); s->io_buffer_size = s->sg.size; DPRINTF(ad->port_no, "len=%#x\n", s->io_buffer_size); return s->io_buffer_size != 0; } static int ahci_dma_rw_buf(IDEDMA *dma, int is_write) { AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma); IDEState *s = &ad->port.ifs[0]; uint8_t *p = s->io_buffer + s->io_buffer_index; int l = s->io_buffer_size - s->io_buffer_index; if (ahci_populate_sglist(ad, &s->sg, s->io_buffer_offset)) { return 0; } if (is_write) { dma_buf_read(p, l, &s->sg); } else { dma_buf_write(p, l, &s->sg); } /* free sglist that was created in ahci_populate_sglist() */ qemu_sglist_destroy(&s->sg); /* update number of transferred bytes */ ad->cur_cmd->status = cpu_to_le32(le32_to_cpu(ad->cur_cmd->status) + l); s->io_buffer_index += l; s->io_buffer_offset += l; DPRINTF(ad->port_no, "len=%#x\n", l); return 1; } static int ahci_dma_set_unit(IDEDMA *dma, int unit) { /* only a single unit per link */ return 0; } static void ahci_cmd_done(IDEDMA *dma) { AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma); DPRINTF(ad->port_no, "cmd done\n"); /* update d2h status */ ahci_write_fis_d2h(ad, NULL); if (!ad->check_bh) { /* maybe we still have something to process, check later */ ad->check_bh = qemu_bh_new(ahci_check_cmd_bh, ad); qemu_bh_schedule(ad->check_bh); } } static void ahci_irq_set(void *opaque, int n, int level) { } static void ahci_dma_restart_cb(void *opaque, int running, RunState state) { } static const IDEDMAOps ahci_dma_ops = { .start_dma = ahci_start_dma, .start_transfer = ahci_start_transfer, .prepare_buf = ahci_dma_prepare_buf, .rw_buf = ahci_dma_rw_buf, .set_unit = ahci_dma_set_unit, .cmd_done = ahci_cmd_done, .restart_cb = ahci_dma_restart_cb, }; void ahci_init(AHCIState *s, DeviceState *qdev, AddressSpace *as, int ports) { qemu_irq *irqs; int i; s->as = as; s->ports = ports; s->dev = g_new0(AHCIDevice, ports); ahci_reg_init(s); /* XXX BAR size should be 1k, but that breaks, so bump it to 4k for now */ memory_region_init_io(&s->mem, OBJECT(qdev), &ahci_mem_ops, s, "ahci", AHCI_MEM_BAR_SIZE); memory_region_init_io(&s->idp, OBJECT(qdev), &ahci_idp_ops, s, "ahci-idp", 32); irqs = qemu_allocate_irqs(ahci_irq_set, s, s->ports); for (i = 0; i < s->ports; i++) { AHCIDevice *ad = &s->dev[i]; ide_bus_new(&ad->port, sizeof(ad->port), qdev, i, 1); ide_init2(&ad->port, irqs[i]); ad->hba = s; ad->port_no = i; ad->port.dma = &ad->dma; ad->port.dma->ops = &ahci_dma_ops; } } void ahci_uninit(AHCIState *s) { g_free(s->dev); } void ahci_reset(AHCIState *s) { AHCIPortRegs *pr; int i; s->control_regs.irqstatus = 0; /* AHCI Enable (AE) * The implementation of this bit is dependent upon the value of the * CAP.SAM bit. If CAP.SAM is '0', then GHC.AE shall be read-write and * shall have a reset value of '0'. If CAP.SAM is '1', then AE shall be * read-only and shall have a reset value of '1'. * * We set HOST_CAP_AHCI so we must enable AHCI at reset. */ s->control_regs.ghc = HOST_CTL_AHCI_EN; for (i = 0; i < s->ports; i++) { pr = &s->dev[i].port_regs; pr->irq_stat = 0; pr->irq_mask = 0; pr->scr_ctl = 0; pr->cmd = PORT_CMD_SPIN_UP | PORT_CMD_POWER_ON; ahci_reset_port(s, i); } } static const VMStateDescription vmstate_ahci_device = { .name = "ahci port", .version_id = 1, .fields = (VMStateField[]) { VMSTATE_IDE_BUS(port, AHCIDevice), VMSTATE_UINT32(port_state, AHCIDevice), VMSTATE_UINT32(finished, AHCIDevice), VMSTATE_UINT32(port_regs.lst_addr, AHCIDevice), VMSTATE_UINT32(port_regs.lst_addr_hi, AHCIDevice), VMSTATE_UINT32(port_regs.fis_addr, AHCIDevice), VMSTATE_UINT32(port_regs.fis_addr_hi, AHCIDevice), VMSTATE_UINT32(port_regs.irq_stat, AHCIDevice), VMSTATE_UINT32(port_regs.irq_mask, AHCIDevice), VMSTATE_UINT32(port_regs.cmd, AHCIDevice), VMSTATE_UINT32(port_regs.tfdata, AHCIDevice), VMSTATE_UINT32(port_regs.sig, AHCIDevice), VMSTATE_UINT32(port_regs.scr_stat, AHCIDevice), VMSTATE_UINT32(port_regs.scr_ctl, AHCIDevice), VMSTATE_UINT32(port_regs.scr_err, AHCIDevice), VMSTATE_UINT32(port_regs.scr_act, AHCIDevice), VMSTATE_UINT32(port_regs.cmd_issue, AHCIDevice), VMSTATE_BOOL(done_atapi_packet, AHCIDevice), VMSTATE_INT32(busy_slot, AHCIDevice), VMSTATE_BOOL(init_d2h_sent, AHCIDevice), VMSTATE_END_OF_LIST() }, }; static int ahci_state_post_load(void *opaque, int version_id) { int i; struct AHCIDevice *ad; AHCIState *s = opaque; for (i = 0; i < s->ports; i++) { ad = &s->dev[i]; AHCIPortRegs *pr = &ad->port_regs; map_page(s->as, &ad->lst, ((uint64_t)pr->lst_addr_hi << 32) | pr->lst_addr, 1024); map_page(s->as, &ad->res_fis, ((uint64_t)pr->fis_addr_hi << 32) | pr->fis_addr, 256); /* * All pending i/o should be flushed out on a migrate. However, * we might not have cleared the busy_slot since this is done * in a bh. Also, issue i/o against any slots that are pending. */ if ((ad->busy_slot != -1) && !(ad->port.ifs[0].status & (BUSY_STAT|DRQ_STAT))) { pr->cmd_issue &= ~(1 << ad->busy_slot); ad->busy_slot = -1; } check_cmd(s, i); } return 0; } const VMStateDescription vmstate_ahci = { .name = "ahci", .version_id = 1, .post_load = ahci_state_post_load, .fields = (VMStateField[]) { VMSTATE_STRUCT_VARRAY_POINTER_INT32(dev, AHCIState, ports, vmstate_ahci_device, AHCIDevice), VMSTATE_UINT32(control_regs.cap, AHCIState), VMSTATE_UINT32(control_regs.ghc, AHCIState), VMSTATE_UINT32(control_regs.irqstatus, AHCIState), VMSTATE_UINT32(control_regs.impl, AHCIState), VMSTATE_UINT32(control_regs.version, AHCIState), VMSTATE_UINT32(idp_index, AHCIState), VMSTATE_INT32_EQUAL(ports, AHCIState), VMSTATE_END_OF_LIST() }, }; #define TYPE_SYSBUS_AHCI "sysbus-ahci" #define SYSBUS_AHCI(obj) OBJECT_CHECK(SysbusAHCIState, (obj), TYPE_SYSBUS_AHCI) typedef struct SysbusAHCIState { /*< private >*/ SysBusDevice parent_obj; /*< public >*/ AHCIState ahci; uint32_t num_ports; } SysbusAHCIState; static const VMStateDescription vmstate_sysbus_ahci = { .name = "sysbus-ahci", .unmigratable = 1, /* Still buggy under I/O load */ .fields = (VMStateField[]) { VMSTATE_AHCI(ahci, SysbusAHCIState), VMSTATE_END_OF_LIST() }, }; static void sysbus_ahci_reset(DeviceState *dev) { SysbusAHCIState *s = SYSBUS_AHCI(dev); ahci_reset(&s->ahci); } static void sysbus_ahci_realize(DeviceState *dev, Error **errp) { SysBusDevice *sbd = SYS_BUS_DEVICE(dev); SysbusAHCIState *s = SYSBUS_AHCI(dev); ahci_init(&s->ahci, dev, &address_space_memory, s->num_ports); sysbus_init_mmio(sbd, &s->ahci.mem); sysbus_init_irq(sbd, &s->ahci.irq); } static Property sysbus_ahci_properties[] = { DEFINE_PROP_UINT32("num-ports", SysbusAHCIState, num_ports, 1), DEFINE_PROP_END_OF_LIST(), }; static void sysbus_ahci_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = sysbus_ahci_realize; dc->vmsd = &vmstate_sysbus_ahci; dc->props = sysbus_ahci_properties; dc->reset = sysbus_ahci_reset; set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); } static const TypeInfo sysbus_ahci_info = { .name = TYPE_SYSBUS_AHCI, .parent = TYPE_SYS_BUS_DEVICE, .instance_size = sizeof(SysbusAHCIState), .class_init = sysbus_ahci_class_init, }; static void sysbus_ahci_register_types(void) { type_register_static(&sysbus_ahci_info); } type_init(sysbus_ahci_register_types) void ahci_ide_create_devs(PCIDevice *dev, DriveInfo **hd) { AHCIPCIState *d = ICH_AHCI(dev); AHCIState *ahci = &d->ahci; int i; for (i = 0; i < ahci->ports; i++) { if (hd[i] == NULL) { continue; } ide_create_drive(&ahci->dev[i].port, 0, hd[i]); } }
gpl-2.0
devmapal/linux
arch/x86/kvm/svm.c
13
141075
/* * Kernel-based Virtual Machine driver for Linux * * AMD SVM support * * Copyright (C) 2006 Qumranet, Inc. * Copyright 2010 Red Hat, Inc. and/or its affiliates. * * Authors: * Yaniv Kamay <yaniv@qumranet.com> * Avi Kivity <avi@qumranet.com> * * This work is licensed under the terms of the GNU GPL, version 2. See * the COPYING file in the top-level directory. * */ #define pr_fmt(fmt) "SVM: " fmt #include <linux/kvm_host.h> #include "irq.h" #include "mmu.h" #include "kvm_cache_regs.h" #include "x86.h" #include "cpuid.h" #include "pmu.h" #include <linux/module.h> #include <linux/mod_devicetable.h> #include <linux/kernel.h> #include <linux/vmalloc.h> #include <linux/highmem.h> #include <linux/sched.h> #include <linux/trace_events.h> #include <linux/slab.h> #include <linux/amd-iommu.h> #include <linux/hashtable.h> #include <asm/apic.h> #include <asm/perf_event.h> #include <asm/tlbflush.h> #include <asm/desc.h> #include <asm/debugreg.h> #include <asm/kvm_para.h> #include <asm/irq_remapping.h> #include <asm/virtext.h> #include "trace.h" #define __ex(x) __kvm_handle_fault_on_reboot(x) MODULE_AUTHOR("Qumranet"); MODULE_LICENSE("GPL"); static const struct x86_cpu_id svm_cpu_id[] = { X86_FEATURE_MATCH(X86_FEATURE_SVM), {} }; MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id); #define IOPM_ALLOC_ORDER 2 #define MSRPM_ALLOC_ORDER 1 #define SEG_TYPE_LDT 2 #define SEG_TYPE_BUSY_TSS16 3 #define SVM_FEATURE_NPT (1 << 0) #define SVM_FEATURE_LBRV (1 << 1) #define SVM_FEATURE_SVML (1 << 2) #define SVM_FEATURE_NRIP (1 << 3) #define SVM_FEATURE_TSC_RATE (1 << 4) #define SVM_FEATURE_VMCB_CLEAN (1 << 5) #define SVM_FEATURE_FLUSH_ASID (1 << 6) #define SVM_FEATURE_DECODE_ASSIST (1 << 7) #define SVM_FEATURE_PAUSE_FILTER (1 << 10) #define SVM_AVIC_DOORBELL 0xc001011b #define NESTED_EXIT_HOST 0 /* Exit handled on host level */ #define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */ #define NESTED_EXIT_CONTINUE 2 /* Further checks needed */ #define DEBUGCTL_RESERVED_BITS (~(0x3fULL)) #define TSC_RATIO_RSVD 0xffffff0000000000ULL #define TSC_RATIO_MIN 0x0000000000000001ULL #define TSC_RATIO_MAX 0x000000ffffffffffULL #define AVIC_HPA_MASK ~((0xFFFULL << 52) | 0xFFF) /* * 0xff is broadcast, so the max index allowed for physical APIC ID * table is 0xfe. APIC IDs above 0xff are reserved. */ #define AVIC_MAX_PHYSICAL_ID_COUNT 255 #define AVIC_UNACCEL_ACCESS_WRITE_MASK 1 #define AVIC_UNACCEL_ACCESS_OFFSET_MASK 0xFF0 #define AVIC_UNACCEL_ACCESS_VECTOR_MASK 0xFFFFFFFF /* AVIC GATAG is encoded using VM and VCPU IDs */ #define AVIC_VCPU_ID_BITS 8 #define AVIC_VCPU_ID_MASK ((1 << AVIC_VCPU_ID_BITS) - 1) #define AVIC_VM_ID_BITS 24 #define AVIC_VM_ID_NR (1 << AVIC_VM_ID_BITS) #define AVIC_VM_ID_MASK ((1 << AVIC_VM_ID_BITS) - 1) #define AVIC_GATAG(x, y) (((x & AVIC_VM_ID_MASK) << AVIC_VCPU_ID_BITS) | \ (y & AVIC_VCPU_ID_MASK)) #define AVIC_GATAG_TO_VMID(x) ((x >> AVIC_VCPU_ID_BITS) & AVIC_VM_ID_MASK) #define AVIC_GATAG_TO_VCPUID(x) (x & AVIC_VCPU_ID_MASK) static bool erratum_383_found __read_mostly; static const u32 host_save_user_msrs[] = { #ifdef CONFIG_X86_64 MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE, MSR_FS_BASE, #endif MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP, MSR_TSC_AUX, }; #define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs) struct kvm_vcpu; struct nested_state { struct vmcb *hsave; u64 hsave_msr; u64 vm_cr_msr; u64 vmcb; /* These are the merged vectors */ u32 *msrpm; /* gpa pointers to the real vectors */ u64 vmcb_msrpm; u64 vmcb_iopm; /* A VMEXIT is required but not yet emulated */ bool exit_required; /* cache for intercepts of the guest */ u32 intercept_cr; u32 intercept_dr; u32 intercept_exceptions; u64 intercept; /* Nested Paging related state */ u64 nested_cr3; }; #define MSRPM_OFFSETS 16 static u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly; /* * Set osvw_len to higher value when updated Revision Guides * are published and we know what the new status bits are */ static uint64_t osvw_len = 4, osvw_status; struct vcpu_svm { struct kvm_vcpu vcpu; struct vmcb *vmcb; unsigned long vmcb_pa; struct svm_cpu_data *svm_data; uint64_t asid_generation; uint64_t sysenter_esp; uint64_t sysenter_eip; uint64_t tsc_aux; u64 next_rip; u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS]; struct { u16 fs; u16 gs; u16 ldt; u64 gs_base; } host; u32 *msrpm; ulong nmi_iret_rip; struct nested_state nested; bool nmi_singlestep; unsigned int3_injected; unsigned long int3_rip; u32 apf_reason; /* cached guest cpuid flags for faster access */ bool nrips_enabled : 1; u32 ldr_reg; struct page *avic_backing_page; u64 *avic_physical_id_cache; bool avic_is_running; /* * Per-vcpu list of struct amd_svm_iommu_ir: * This is used mainly to store interrupt remapping information used * when update the vcpu affinity. This avoids the need to scan for * IRTE and try to match ga_tag in the IOMMU driver. */ struct list_head ir_list; spinlock_t ir_list_lock; }; /* * This is a wrapper of struct amd_iommu_ir_data. */ struct amd_svm_iommu_ir { struct list_head node; /* Used by SVM for per-vcpu ir_list */ void *data; /* Storing pointer to struct amd_ir_data */ }; #define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFF) #define AVIC_LOGICAL_ID_ENTRY_VALID_MASK (1 << 31) #define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK (0xFFULL) #define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK (0xFFFFFFFFFFULL << 12) #define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK (1ULL << 62) #define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK (1ULL << 63) static DEFINE_PER_CPU(u64, current_tsc_ratio); #define TSC_RATIO_DEFAULT 0x0100000000ULL #define MSR_INVALID 0xffffffffU static const struct svm_direct_access_msrs { u32 index; /* Index of the MSR */ bool always; /* True if intercept is always on */ } direct_access_msrs[] = { { .index = MSR_STAR, .always = true }, { .index = MSR_IA32_SYSENTER_CS, .always = true }, #ifdef CONFIG_X86_64 { .index = MSR_GS_BASE, .always = true }, { .index = MSR_FS_BASE, .always = true }, { .index = MSR_KERNEL_GS_BASE, .always = true }, { .index = MSR_LSTAR, .always = true }, { .index = MSR_CSTAR, .always = true }, { .index = MSR_SYSCALL_MASK, .always = true }, #endif { .index = MSR_IA32_LASTBRANCHFROMIP, .always = false }, { .index = MSR_IA32_LASTBRANCHTOIP, .always = false }, { .index = MSR_IA32_LASTINTFROMIP, .always = false }, { .index = MSR_IA32_LASTINTTOIP, .always = false }, { .index = MSR_INVALID, .always = false }, }; /* enable NPT for AMD64 and X86 with PAE */ #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) static bool npt_enabled = true; #else static bool npt_enabled; #endif /* allow nested paging (virtualized MMU) for all guests */ static int npt = true; module_param(npt, int, S_IRUGO); /* allow nested virtualization in KVM/SVM */ static int nested = true; module_param(nested, int, S_IRUGO); /* enable / disable AVIC */ static int avic; #ifdef CONFIG_X86_LOCAL_APIC module_param(avic, int, S_IRUGO); #endif /* AVIC VM ID bit masks and lock */ static DECLARE_BITMAP(avic_vm_id_bitmap, AVIC_VM_ID_NR); static DEFINE_SPINLOCK(avic_vm_id_lock); static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); static void svm_flush_tlb(struct kvm_vcpu *vcpu); static void svm_complete_interrupts(struct vcpu_svm *svm); static int nested_svm_exit_handled(struct vcpu_svm *svm); static int nested_svm_intercept(struct vcpu_svm *svm); static int nested_svm_vmexit(struct vcpu_svm *svm); static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, bool has_error_code, u32 error_code); enum { VMCB_INTERCEPTS, /* Intercept vectors, TSC offset, pause filter count */ VMCB_PERM_MAP, /* IOPM Base and MSRPM Base */ VMCB_ASID, /* ASID */ VMCB_INTR, /* int_ctl, int_vector */ VMCB_NPT, /* npt_en, nCR3, gPAT */ VMCB_CR, /* CR0, CR3, CR4, EFER */ VMCB_DR, /* DR6, DR7 */ VMCB_DT, /* GDT, IDT */ VMCB_SEG, /* CS, DS, SS, ES, CPL */ VMCB_CR2, /* CR2 only */ VMCB_LBR, /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */ VMCB_AVIC, /* AVIC APIC_BAR, AVIC APIC_BACKING_PAGE, * AVIC PHYSICAL_TABLE pointer, * AVIC LOGICAL_TABLE pointer */ VMCB_DIRTY_MAX, }; /* TPR and CR2 are always written before VMRUN */ #define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2)) #define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL static inline void mark_all_dirty(struct vmcb *vmcb) { vmcb->control.clean = 0; } static inline void mark_all_clean(struct vmcb *vmcb) { vmcb->control.clean = ((1 << VMCB_DIRTY_MAX) - 1) & ~VMCB_ALWAYS_DIRTY_MASK; } static inline void mark_dirty(struct vmcb *vmcb, int bit) { vmcb->control.clean &= ~(1 << bit); } static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu) { return container_of(vcpu, struct vcpu_svm, vcpu); } static inline void avic_update_vapic_bar(struct vcpu_svm *svm, u64 data) { svm->vmcb->control.avic_vapic_bar = data & VMCB_AVIC_APIC_BAR_MASK; mark_dirty(svm->vmcb, VMCB_AVIC); } static inline bool avic_vcpu_is_running(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); u64 *entry = svm->avic_physical_id_cache; if (!entry) return false; return (READ_ONCE(*entry) & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK); } static void recalc_intercepts(struct vcpu_svm *svm) { struct vmcb_control_area *c, *h; struct nested_state *g; mark_dirty(svm->vmcb, VMCB_INTERCEPTS); if (!is_guest_mode(&svm->vcpu)) return; c = &svm->vmcb->control; h = &svm->nested.hsave->control; g = &svm->nested; c->intercept_cr = h->intercept_cr | g->intercept_cr; c->intercept_dr = h->intercept_dr | g->intercept_dr; c->intercept_exceptions = h->intercept_exceptions | g->intercept_exceptions; c->intercept = h->intercept | g->intercept; } static inline struct vmcb *get_host_vmcb(struct vcpu_svm *svm) { if (is_guest_mode(&svm->vcpu)) return svm->nested.hsave; else return svm->vmcb; } static inline void set_cr_intercept(struct vcpu_svm *svm, int bit) { struct vmcb *vmcb = get_host_vmcb(svm); vmcb->control.intercept_cr |= (1U << bit); recalc_intercepts(svm); } static inline void clr_cr_intercept(struct vcpu_svm *svm, int bit) { struct vmcb *vmcb = get_host_vmcb(svm); vmcb->control.intercept_cr &= ~(1U << bit); recalc_intercepts(svm); } static inline bool is_cr_intercept(struct vcpu_svm *svm, int bit) { struct vmcb *vmcb = get_host_vmcb(svm); return vmcb->control.intercept_cr & (1U << bit); } static inline void set_dr_intercepts(struct vcpu_svm *svm) { struct vmcb *vmcb = get_host_vmcb(svm); vmcb->control.intercept_dr = (1 << INTERCEPT_DR0_READ) | (1 << INTERCEPT_DR1_READ) | (1 << INTERCEPT_DR2_READ) | (1 << INTERCEPT_DR3_READ) | (1 << INTERCEPT_DR4_READ) | (1 << INTERCEPT_DR5_READ) | (1 << INTERCEPT_DR6_READ) | (1 << INTERCEPT_DR7_READ) | (1 << INTERCEPT_DR0_WRITE) | (1 << INTERCEPT_DR1_WRITE) | (1 << INTERCEPT_DR2_WRITE) | (1 << INTERCEPT_DR3_WRITE) | (1 << INTERCEPT_DR4_WRITE) | (1 << INTERCEPT_DR5_WRITE) | (1 << INTERCEPT_DR6_WRITE) | (1 << INTERCEPT_DR7_WRITE); recalc_intercepts(svm); } static inline void clr_dr_intercepts(struct vcpu_svm *svm) { struct vmcb *vmcb = get_host_vmcb(svm); vmcb->control.intercept_dr = 0; recalc_intercepts(svm); } static inline void set_exception_intercept(struct vcpu_svm *svm, int bit) { struct vmcb *vmcb = get_host_vmcb(svm); vmcb->control.intercept_exceptions |= (1U << bit); recalc_intercepts(svm); } static inline void clr_exception_intercept(struct vcpu_svm *svm, int bit) { struct vmcb *vmcb = get_host_vmcb(svm); vmcb->control.intercept_exceptions &= ~(1U << bit); recalc_intercepts(svm); } static inline void set_intercept(struct vcpu_svm *svm, int bit) { struct vmcb *vmcb = get_host_vmcb(svm); vmcb->control.intercept |= (1ULL << bit); recalc_intercepts(svm); } static inline void clr_intercept(struct vcpu_svm *svm, int bit) { struct vmcb *vmcb = get_host_vmcb(svm); vmcb->control.intercept &= ~(1ULL << bit); recalc_intercepts(svm); } static inline void enable_gif(struct vcpu_svm *svm) { svm->vcpu.arch.hflags |= HF_GIF_MASK; } static inline void disable_gif(struct vcpu_svm *svm) { svm->vcpu.arch.hflags &= ~HF_GIF_MASK; } static inline bool gif_set(struct vcpu_svm *svm) { return !!(svm->vcpu.arch.hflags & HF_GIF_MASK); } static unsigned long iopm_base; struct kvm_ldttss_desc { u16 limit0; u16 base0; unsigned base1:8, type:5, dpl:2, p:1; unsigned limit1:4, zero0:3, g:1, base2:8; u32 base3; u32 zero1; } __attribute__((packed)); struct svm_cpu_data { int cpu; u64 asid_generation; u32 max_asid; u32 next_asid; struct kvm_ldttss_desc *tss_desc; struct page *save_area; }; static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data); struct svm_init_data { int cpu; int r; }; static const u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000}; #define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges) #define MSRS_RANGE_SIZE 2048 #define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2) static u32 svm_msrpm_offset(u32 msr) { u32 offset; int i; for (i = 0; i < NUM_MSR_MAPS; i++) { if (msr < msrpm_ranges[i] || msr >= msrpm_ranges[i] + MSRS_IN_RANGE) continue; offset = (msr - msrpm_ranges[i]) / 4; /* 4 msrs per u8 */ offset += (i * MSRS_RANGE_SIZE); /* add range offset */ /* Now we have the u8 offset - but need the u32 offset */ return offset / 4; } /* MSR not in any range */ return MSR_INVALID; } #define MAX_INST_SIZE 15 static inline void clgi(void) { asm volatile (__ex(SVM_CLGI)); } static inline void stgi(void) { asm volatile (__ex(SVM_STGI)); } static inline void invlpga(unsigned long addr, u32 asid) { asm volatile (__ex(SVM_INVLPGA) : : "a"(addr), "c"(asid)); } static int get_npt_level(void) { #ifdef CONFIG_X86_64 return PT64_ROOT_LEVEL; #else return PT32E_ROOT_LEVEL; #endif } static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) { vcpu->arch.efer = efer; if (!npt_enabled && !(efer & EFER_LMA)) efer &= ~EFER_LME; to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME; mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR); } static int is_external_interrupt(u32 info) { info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID; return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR); } static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); u32 ret = 0; if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) ret = KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS; return ret; } static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) { struct vcpu_svm *svm = to_svm(vcpu); if (mask == 0) svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK; else svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK; } static void skip_emulated_instruction(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); if (svm->vmcb->control.next_rip != 0) { WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS)); svm->next_rip = svm->vmcb->control.next_rip; } if (!svm->next_rip) { if (emulate_instruction(vcpu, EMULTYPE_SKIP) != EMULATE_DONE) printk(KERN_DEBUG "%s: NOP\n", __func__); return; } if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE) printk(KERN_ERR "%s: ip 0x%lx next 0x%llx\n", __func__, kvm_rip_read(vcpu), svm->next_rip); kvm_rip_write(vcpu, svm->next_rip); svm_set_interrupt_shadow(vcpu, 0); } static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, bool has_error_code, u32 error_code, bool reinject) { struct vcpu_svm *svm = to_svm(vcpu); /* * If we are within a nested VM we'd better #VMEXIT and let the guest * handle the exception */ if (!reinject && nested_svm_check_exception(svm, nr, has_error_code, error_code)) return; if (nr == BP_VECTOR && !static_cpu_has(X86_FEATURE_NRIPS)) { unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu); /* * For guest debugging where we have to reinject #BP if some * INT3 is guest-owned: * Emulate nRIP by moving RIP forward. Will fail if injection * raises a fault that is not intercepted. Still better than * failing in all cases. */ skip_emulated_instruction(&svm->vcpu); rip = kvm_rip_read(&svm->vcpu); svm->int3_rip = rip + svm->vmcb->save.cs.base; svm->int3_injected = rip - old_rip; } svm->vmcb->control.event_inj = nr | SVM_EVTINJ_VALID | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0) | SVM_EVTINJ_TYPE_EXEPT; svm->vmcb->control.event_inj_err = error_code; } static void svm_init_erratum_383(void) { u32 low, high; int err; u64 val; if (!static_cpu_has_bug(X86_BUG_AMD_TLB_MMATCH)) return; /* Use _safe variants to not break nested virtualization */ val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err); if (err) return; val |= (1ULL << 47); low = lower_32_bits(val); high = upper_32_bits(val); native_write_msr_safe(MSR_AMD64_DC_CFG, low, high); erratum_383_found = true; } static void svm_init_osvw(struct kvm_vcpu *vcpu) { /* * Guests should see errata 400 and 415 as fixed (assuming that * HLT and IO instructions are intercepted). */ vcpu->arch.osvw.length = (osvw_len >= 3) ? (osvw_len) : 3; vcpu->arch.osvw.status = osvw_status & ~(6ULL); /* * By increasing VCPU's osvw.length to 3 we are telling the guest that * all osvw.status bits inside that length, including bit 0 (which is * reserved for erratum 298), are valid. However, if host processor's * osvw_len is 0 then osvw_status[0] carries no information. We need to * be conservative here and therefore we tell the guest that erratum 298 * is present (because we really don't know). */ if (osvw_len == 0 && boot_cpu_data.x86 == 0x10) vcpu->arch.osvw.status |= 1; } static int has_svm(void) { const char *msg; if (!cpu_has_svm(&msg)) { printk(KERN_INFO "has_svm: %s\n", msg); return 0; } return 1; } static void svm_hardware_disable(void) { /* Make sure we clean up behind us */ if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT); cpu_svm_disable(); amd_pmu_disable_virt(); } static int svm_hardware_enable(void) { struct svm_cpu_data *sd; uint64_t efer; struct desc_struct *gdt; int me = raw_smp_processor_id(); rdmsrl(MSR_EFER, efer); if (efer & EFER_SVME) return -EBUSY; if (!has_svm()) { pr_err("%s: err EOPNOTSUPP on %d\n", __func__, me); return -EINVAL; } sd = per_cpu(svm_data, me); if (!sd) { pr_err("%s: svm_data is NULL on %d\n", __func__, me); return -EINVAL; } sd->asid_generation = 1; sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1; sd->next_asid = sd->max_asid + 1; gdt = get_current_gdt_rw(); sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS); wrmsrl(MSR_EFER, efer | EFER_SVME); wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT); if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) { wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT); __this_cpu_write(current_tsc_ratio, TSC_RATIO_DEFAULT); } /* * Get OSVW bits. * * Note that it is possible to have a system with mixed processor * revisions and therefore different OSVW bits. If bits are not the same * on different processors then choose the worst case (i.e. if erratum * is present on one processor and not on another then assume that the * erratum is present everywhere). */ if (cpu_has(&boot_cpu_data, X86_FEATURE_OSVW)) { uint64_t len, status = 0; int err; len = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &err); if (!err) status = native_read_msr_safe(MSR_AMD64_OSVW_STATUS, &err); if (err) osvw_status = osvw_len = 0; else { if (len < osvw_len) osvw_len = len; osvw_status |= status; osvw_status &= (1ULL << osvw_len) - 1; } } else osvw_status = osvw_len = 0; svm_init_erratum_383(); amd_pmu_enable_virt(); return 0; } static void svm_cpu_uninit(int cpu) { struct svm_cpu_data *sd = per_cpu(svm_data, raw_smp_processor_id()); if (!sd) return; per_cpu(svm_data, raw_smp_processor_id()) = NULL; __free_page(sd->save_area); kfree(sd); } static int svm_cpu_init(int cpu) { struct svm_cpu_data *sd; int r; sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL); if (!sd) return -ENOMEM; sd->cpu = cpu; sd->save_area = alloc_page(GFP_KERNEL); r = -ENOMEM; if (!sd->save_area) goto err_1; per_cpu(svm_data, cpu) = sd; return 0; err_1: kfree(sd); return r; } static bool valid_msr_intercept(u32 index) { int i; for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) if (direct_access_msrs[i].index == index) return true; return false; } static void set_msr_interception(u32 *msrpm, unsigned msr, int read, int write) { u8 bit_read, bit_write; unsigned long tmp; u32 offset; /* * If this warning triggers extend the direct_access_msrs list at the * beginning of the file */ WARN_ON(!valid_msr_intercept(msr)); offset = svm_msrpm_offset(msr); bit_read = 2 * (msr & 0x0f); bit_write = 2 * (msr & 0x0f) + 1; tmp = msrpm[offset]; BUG_ON(offset == MSR_INVALID); read ? clear_bit(bit_read, &tmp) : set_bit(bit_read, &tmp); write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp); msrpm[offset] = tmp; } static void svm_vcpu_init_msrpm(u32 *msrpm) { int i; memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER)); for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) { if (!direct_access_msrs[i].always) continue; set_msr_interception(msrpm, direct_access_msrs[i].index, 1, 1); } } static void add_msr_offset(u32 offset) { int i; for (i = 0; i < MSRPM_OFFSETS; ++i) { /* Offset already in list? */ if (msrpm_offsets[i] == offset) return; /* Slot used by another offset? */ if (msrpm_offsets[i] != MSR_INVALID) continue; /* Add offset to list */ msrpm_offsets[i] = offset; return; } /* * If this BUG triggers the msrpm_offsets table has an overflow. Just * increase MSRPM_OFFSETS in this case. */ BUG(); } static void init_msrpm_offsets(void) { int i; memset(msrpm_offsets, 0xff, sizeof(msrpm_offsets)); for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) { u32 offset; offset = svm_msrpm_offset(direct_access_msrs[i].index); BUG_ON(offset == MSR_INVALID); add_msr_offset(offset); } } static void svm_enable_lbrv(struct vcpu_svm *svm) { u32 *msrpm = svm->msrpm; svm->vmcb->control.lbr_ctl = 1; set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1); set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1); set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 1, 1); set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 1, 1); } static void svm_disable_lbrv(struct vcpu_svm *svm) { u32 *msrpm = svm->msrpm; svm->vmcb->control.lbr_ctl = 0; set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0); set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0); set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 0, 0); set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0); } /* Note: * This hash table is used to map VM_ID to a struct kvm_arch, * when handling AMD IOMMU GALOG notification to schedule in * a particular vCPU. */ #define SVM_VM_DATA_HASH_BITS 8 static DEFINE_HASHTABLE(svm_vm_data_hash, SVM_VM_DATA_HASH_BITS); static DEFINE_SPINLOCK(svm_vm_data_hash_lock); /* Note: * This function is called from IOMMU driver to notify * SVM to schedule in a particular vCPU of a particular VM. */ static int avic_ga_log_notifier(u32 ga_tag) { unsigned long flags; struct kvm_arch *ka = NULL; struct kvm_vcpu *vcpu = NULL; u32 vm_id = AVIC_GATAG_TO_VMID(ga_tag); u32 vcpu_id = AVIC_GATAG_TO_VCPUID(ga_tag); pr_debug("SVM: %s: vm_id=%#x, vcpu_id=%#x\n", __func__, vm_id, vcpu_id); spin_lock_irqsave(&svm_vm_data_hash_lock, flags); hash_for_each_possible(svm_vm_data_hash, ka, hnode, vm_id) { struct kvm *kvm = container_of(ka, struct kvm, arch); struct kvm_arch *vm_data = &kvm->arch; if (vm_data->avic_vm_id != vm_id) continue; vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id); break; } spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags); if (!vcpu) return 0; /* Note: * At this point, the IOMMU should have already set the pending * bit in the vAPIC backing page. So, we just need to schedule * in the vcpu. */ if (vcpu->mode == OUTSIDE_GUEST_MODE) kvm_vcpu_wake_up(vcpu); return 0; } static __init int svm_hardware_setup(void) { int cpu; struct page *iopm_pages; void *iopm_va; int r; iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER); if (!iopm_pages) return -ENOMEM; iopm_va = page_address(iopm_pages); memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER)); iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT; init_msrpm_offsets(); if (boot_cpu_has(X86_FEATURE_NX)) kvm_enable_efer_bits(EFER_NX); if (boot_cpu_has(X86_FEATURE_FXSR_OPT)) kvm_enable_efer_bits(EFER_FFXSR); if (boot_cpu_has(X86_FEATURE_TSCRATEMSR)) { kvm_has_tsc_control = true; kvm_max_tsc_scaling_ratio = TSC_RATIO_MAX; kvm_tsc_scaling_ratio_frac_bits = 32; } if (nested) { printk(KERN_INFO "kvm: Nested Virtualization enabled\n"); kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE); } for_each_possible_cpu(cpu) { r = svm_cpu_init(cpu); if (r) goto err; } if (!boot_cpu_has(X86_FEATURE_NPT)) npt_enabled = false; if (npt_enabled && !npt) { printk(KERN_INFO "kvm: Nested Paging disabled\n"); npt_enabled = false; } if (npt_enabled) { printk(KERN_INFO "kvm: Nested Paging enabled\n"); kvm_enable_tdp(); } else kvm_disable_tdp(); if (avic) { if (!npt_enabled || !boot_cpu_has(X86_FEATURE_AVIC) || !IS_ENABLED(CONFIG_X86_LOCAL_APIC)) { avic = false; } else { pr_info("AVIC enabled\n"); amd_iommu_register_ga_log_notifier(&avic_ga_log_notifier); } } return 0; err: __free_pages(iopm_pages, IOPM_ALLOC_ORDER); iopm_base = 0; return r; } static __exit void svm_hardware_unsetup(void) { int cpu; for_each_possible_cpu(cpu) svm_cpu_uninit(cpu); __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER); iopm_base = 0; } static void init_seg(struct vmcb_seg *seg) { seg->selector = 0; seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK | SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */ seg->limit = 0xffff; seg->base = 0; } static void init_sys_seg(struct vmcb_seg *seg, uint32_t type) { seg->selector = 0; seg->attrib = SVM_SELECTOR_P_MASK | type; seg->limit = 0xffff; seg->base = 0; } static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) { struct vcpu_svm *svm = to_svm(vcpu); u64 g_tsc_offset = 0; if (is_guest_mode(vcpu)) { g_tsc_offset = svm->vmcb->control.tsc_offset - svm->nested.hsave->control.tsc_offset; svm->nested.hsave->control.tsc_offset = offset; } else trace_kvm_write_tsc_offset(vcpu->vcpu_id, svm->vmcb->control.tsc_offset, offset); svm->vmcb->control.tsc_offset = offset + g_tsc_offset; mark_dirty(svm->vmcb, VMCB_INTERCEPTS); } static void avic_init_vmcb(struct vcpu_svm *svm) { struct vmcb *vmcb = svm->vmcb; struct kvm_arch *vm_data = &svm->vcpu.kvm->arch; phys_addr_t bpa = page_to_phys(svm->avic_backing_page); phys_addr_t lpa = page_to_phys(vm_data->avic_logical_id_table_page); phys_addr_t ppa = page_to_phys(vm_data->avic_physical_id_table_page); vmcb->control.avic_backing_page = bpa & AVIC_HPA_MASK; vmcb->control.avic_logical_id = lpa & AVIC_HPA_MASK; vmcb->control.avic_physical_id = ppa & AVIC_HPA_MASK; vmcb->control.avic_physical_id |= AVIC_MAX_PHYSICAL_ID_COUNT; vmcb->control.int_ctl |= AVIC_ENABLE_MASK; svm->vcpu.arch.apicv_active = true; } static void init_vmcb(struct vcpu_svm *svm) { struct vmcb_control_area *control = &svm->vmcb->control; struct vmcb_save_area *save = &svm->vmcb->save; svm->vcpu.arch.hflags = 0; set_cr_intercept(svm, INTERCEPT_CR0_READ); set_cr_intercept(svm, INTERCEPT_CR3_READ); set_cr_intercept(svm, INTERCEPT_CR4_READ); set_cr_intercept(svm, INTERCEPT_CR0_WRITE); set_cr_intercept(svm, INTERCEPT_CR3_WRITE); set_cr_intercept(svm, INTERCEPT_CR4_WRITE); if (!kvm_vcpu_apicv_active(&svm->vcpu)) set_cr_intercept(svm, INTERCEPT_CR8_WRITE); set_dr_intercepts(svm); set_exception_intercept(svm, PF_VECTOR); set_exception_intercept(svm, UD_VECTOR); set_exception_intercept(svm, MC_VECTOR); set_exception_intercept(svm, AC_VECTOR); set_exception_intercept(svm, DB_VECTOR); set_intercept(svm, INTERCEPT_INTR); set_intercept(svm, INTERCEPT_NMI); set_intercept(svm, INTERCEPT_SMI); set_intercept(svm, INTERCEPT_SELECTIVE_CR0); set_intercept(svm, INTERCEPT_RDPMC); set_intercept(svm, INTERCEPT_CPUID); set_intercept(svm, INTERCEPT_INVD); set_intercept(svm, INTERCEPT_HLT); set_intercept(svm, INTERCEPT_INVLPG); set_intercept(svm, INTERCEPT_INVLPGA); set_intercept(svm, INTERCEPT_IOIO_PROT); set_intercept(svm, INTERCEPT_MSR_PROT); set_intercept(svm, INTERCEPT_TASK_SWITCH); set_intercept(svm, INTERCEPT_SHUTDOWN); set_intercept(svm, INTERCEPT_VMRUN); set_intercept(svm, INTERCEPT_VMMCALL); set_intercept(svm, INTERCEPT_VMLOAD); set_intercept(svm, INTERCEPT_VMSAVE); set_intercept(svm, INTERCEPT_STGI); set_intercept(svm, INTERCEPT_CLGI); set_intercept(svm, INTERCEPT_SKINIT); set_intercept(svm, INTERCEPT_WBINVD); set_intercept(svm, INTERCEPT_XSETBV); if (!kvm_mwait_in_guest()) { set_intercept(svm, INTERCEPT_MONITOR); set_intercept(svm, INTERCEPT_MWAIT); } control->iopm_base_pa = iopm_base; control->msrpm_base_pa = __pa(svm->msrpm); control->int_ctl = V_INTR_MASKING_MASK; init_seg(&save->es); init_seg(&save->ss); init_seg(&save->ds); init_seg(&save->fs); init_seg(&save->gs); save->cs.selector = 0xf000; save->cs.base = 0xffff0000; /* Executable/Readable Code Segment */ save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK; save->cs.limit = 0xffff; save->gdtr.limit = 0xffff; save->idtr.limit = 0xffff; init_sys_seg(&save->ldtr, SEG_TYPE_LDT); init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16); svm_set_efer(&svm->vcpu, 0); save->dr6 = 0xffff0ff0; kvm_set_rflags(&svm->vcpu, 2); save->rip = 0x0000fff0; svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip; /* * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0. * It also updates the guest-visible cr0 value. */ svm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET); kvm_mmu_reset_context(&svm->vcpu); save->cr4 = X86_CR4_PAE; /* rdx = ?? */ if (npt_enabled) { /* Setup VMCB for Nested Paging */ control->nested_ctl = 1; clr_intercept(svm, INTERCEPT_INVLPG); clr_exception_intercept(svm, PF_VECTOR); clr_cr_intercept(svm, INTERCEPT_CR3_READ); clr_cr_intercept(svm, INTERCEPT_CR3_WRITE); save->g_pat = svm->vcpu.arch.pat; save->cr3 = 0; save->cr4 = 0; } svm->asid_generation = 0; svm->nested.vmcb = 0; svm->vcpu.arch.hflags = 0; if (boot_cpu_has(X86_FEATURE_PAUSEFILTER)) { control->pause_filter_count = 3000; set_intercept(svm, INTERCEPT_PAUSE); } if (avic) avic_init_vmcb(svm); mark_all_dirty(svm->vmcb); enable_gif(svm); } static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu, unsigned int index) { u64 *avic_physical_id_table; struct kvm_arch *vm_data = &vcpu->kvm->arch; if (index >= AVIC_MAX_PHYSICAL_ID_COUNT) return NULL; avic_physical_id_table = page_address(vm_data->avic_physical_id_table_page); return &avic_physical_id_table[index]; } /** * Note: * AVIC hardware walks the nested page table to check permissions, * but does not use the SPA address specified in the leaf page * table entry since it uses address in the AVIC_BACKING_PAGE pointer * field of the VMCB. Therefore, we set up the * APIC_ACCESS_PAGE_PRIVATE_MEMSLOT (4KB) here. */ static int avic_init_access_page(struct kvm_vcpu *vcpu) { struct kvm *kvm = vcpu->kvm; int ret; if (kvm->arch.apic_access_page_done) return 0; ret = x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, APIC_DEFAULT_PHYS_BASE, PAGE_SIZE); if (ret) return ret; kvm->arch.apic_access_page_done = true; return 0; } static int avic_init_backing_page(struct kvm_vcpu *vcpu) { int ret; u64 *entry, new_entry; int id = vcpu->vcpu_id; struct vcpu_svm *svm = to_svm(vcpu); ret = avic_init_access_page(vcpu); if (ret) return ret; if (id >= AVIC_MAX_PHYSICAL_ID_COUNT) return -EINVAL; if (!svm->vcpu.arch.apic->regs) return -EINVAL; svm->avic_backing_page = virt_to_page(svm->vcpu.arch.apic->regs); /* Setting AVIC backing page address in the phy APIC ID table */ entry = avic_get_physical_id_entry(vcpu, id); if (!entry) return -EINVAL; new_entry = READ_ONCE(*entry); new_entry = (page_to_phys(svm->avic_backing_page) & AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK) | AVIC_PHYSICAL_ID_ENTRY_VALID_MASK; WRITE_ONCE(*entry, new_entry); svm->avic_physical_id_cache = entry; return 0; } static inline int avic_get_next_vm_id(void) { int id; spin_lock(&avic_vm_id_lock); /* AVIC VM ID is one-based. */ id = find_next_zero_bit(avic_vm_id_bitmap, AVIC_VM_ID_NR, 1); if (id <= AVIC_VM_ID_MASK) __set_bit(id, avic_vm_id_bitmap); else id = -EAGAIN; spin_unlock(&avic_vm_id_lock); return id; } static inline int avic_free_vm_id(int id) { if (id <= 0 || id > AVIC_VM_ID_MASK) return -EINVAL; spin_lock(&avic_vm_id_lock); __clear_bit(id, avic_vm_id_bitmap); spin_unlock(&avic_vm_id_lock); return 0; } static void avic_vm_destroy(struct kvm *kvm) { unsigned long flags; struct kvm_arch *vm_data = &kvm->arch; if (!avic) return; avic_free_vm_id(vm_data->avic_vm_id); if (vm_data->avic_logical_id_table_page) __free_page(vm_data->avic_logical_id_table_page); if (vm_data->avic_physical_id_table_page) __free_page(vm_data->avic_physical_id_table_page); spin_lock_irqsave(&svm_vm_data_hash_lock, flags); hash_del(&vm_data->hnode); spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags); } static int avic_vm_init(struct kvm *kvm) { unsigned long flags; int vm_id, err = -ENOMEM; struct kvm_arch *vm_data = &kvm->arch; struct page *p_page; struct page *l_page; if (!avic) return 0; vm_id = avic_get_next_vm_id(); if (vm_id < 0) return vm_id; vm_data->avic_vm_id = (u32)vm_id; /* Allocating physical APIC ID table (4KB) */ p_page = alloc_page(GFP_KERNEL); if (!p_page) goto free_avic; vm_data->avic_physical_id_table_page = p_page; clear_page(page_address(p_page)); /* Allocating logical APIC ID table (4KB) */ l_page = alloc_page(GFP_KERNEL); if (!l_page) goto free_avic; vm_data->avic_logical_id_table_page = l_page; clear_page(page_address(l_page)); spin_lock_irqsave(&svm_vm_data_hash_lock, flags); hash_add(svm_vm_data_hash, &vm_data->hnode, vm_data->avic_vm_id); spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags); return 0; free_avic: avic_vm_destroy(kvm); return err; } static inline int avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r) { int ret = 0; unsigned long flags; struct amd_svm_iommu_ir *ir; struct vcpu_svm *svm = to_svm(vcpu); if (!kvm_arch_has_assigned_device(vcpu->kvm)) return 0; /* * Here, we go through the per-vcpu ir_list to update all existing * interrupt remapping table entry targeting this vcpu. */ spin_lock_irqsave(&svm->ir_list_lock, flags); if (list_empty(&svm->ir_list)) goto out; list_for_each_entry(ir, &svm->ir_list, node) { ret = amd_iommu_update_ga(cpu, r, ir->data); if (ret) break; } out: spin_unlock_irqrestore(&svm->ir_list_lock, flags); return ret; } static void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { u64 entry; /* ID = 0xff (broadcast), ID > 0xff (reserved) */ int h_physical_id = kvm_cpu_get_apicid(cpu); struct vcpu_svm *svm = to_svm(vcpu); if (!kvm_vcpu_apicv_active(vcpu)) return; if (WARN_ON(h_physical_id >= AVIC_MAX_PHYSICAL_ID_COUNT)) return; entry = READ_ONCE(*(svm->avic_physical_id_cache)); WARN_ON(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK); entry &= ~AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK; entry |= (h_physical_id & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK); entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK; if (svm->avic_is_running) entry |= AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK; WRITE_ONCE(*(svm->avic_physical_id_cache), entry); avic_update_iommu_vcpu_affinity(vcpu, h_physical_id, svm->avic_is_running); } static void avic_vcpu_put(struct kvm_vcpu *vcpu) { u64 entry; struct vcpu_svm *svm = to_svm(vcpu); if (!kvm_vcpu_apicv_active(vcpu)) return; entry = READ_ONCE(*(svm->avic_physical_id_cache)); if (entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK) avic_update_iommu_vcpu_affinity(vcpu, -1, 0); entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK; WRITE_ONCE(*(svm->avic_physical_id_cache), entry); } /** * This function is called during VCPU halt/unhalt. */ static void avic_set_running(struct kvm_vcpu *vcpu, bool is_run) { struct vcpu_svm *svm = to_svm(vcpu); svm->avic_is_running = is_run; if (is_run) avic_vcpu_load(vcpu, vcpu->cpu); else avic_vcpu_put(vcpu); } static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) { struct vcpu_svm *svm = to_svm(vcpu); u32 dummy; u32 eax = 1; if (!init_event) { svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE; if (kvm_vcpu_is_reset_bsp(&svm->vcpu)) svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP; } init_vmcb(svm); kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy); kvm_register_write(vcpu, VCPU_REGS_RDX, eax); if (kvm_vcpu_apicv_active(vcpu) && !init_event) avic_update_vapic_bar(svm, APIC_DEFAULT_PHYS_BASE); } static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) { struct vcpu_svm *svm; struct page *page; struct page *msrpm_pages; struct page *hsave_page; struct page *nested_msrpm_pages; int err; svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); if (!svm) { err = -ENOMEM; goto out; } err = kvm_vcpu_init(&svm->vcpu, kvm, id); if (err) goto free_svm; err = -ENOMEM; page = alloc_page(GFP_KERNEL); if (!page) goto uninit; msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER); if (!msrpm_pages) goto free_page1; nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER); if (!nested_msrpm_pages) goto free_page2; hsave_page = alloc_page(GFP_KERNEL); if (!hsave_page) goto free_page3; if (avic) { err = avic_init_backing_page(&svm->vcpu); if (err) goto free_page4; INIT_LIST_HEAD(&svm->ir_list); spin_lock_init(&svm->ir_list_lock); } /* We initialize this flag to true to make sure that the is_running * bit would be set the first time the vcpu is loaded. */ svm->avic_is_running = true; svm->nested.hsave = page_address(hsave_page); svm->msrpm = page_address(msrpm_pages); svm_vcpu_init_msrpm(svm->msrpm); svm->nested.msrpm = page_address(nested_msrpm_pages); svm_vcpu_init_msrpm(svm->nested.msrpm); svm->vmcb = page_address(page); clear_page(svm->vmcb); svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT; svm->asid_generation = 0; init_vmcb(svm); svm_init_osvw(&svm->vcpu); return &svm->vcpu; free_page4: __free_page(hsave_page); free_page3: __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER); free_page2: __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER); free_page1: __free_page(page); uninit: kvm_vcpu_uninit(&svm->vcpu); free_svm: kmem_cache_free(kvm_vcpu_cache, svm); out: return ERR_PTR(err); } static void svm_free_vcpu(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT)); __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER); __free_page(virt_to_page(svm->nested.hsave)); __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER); kvm_vcpu_uninit(vcpu); kmem_cache_free(kvm_vcpu_cache, svm); } static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { struct vcpu_svm *svm = to_svm(vcpu); int i; if (unlikely(cpu != vcpu->cpu)) { svm->asid_generation = 0; mark_all_dirty(svm->vmcb); } #ifdef CONFIG_X86_64 rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host.gs_base); #endif savesegment(fs, svm->host.fs); savesegment(gs, svm->host.gs); svm->host.ldt = kvm_read_ldt(); for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) { u64 tsc_ratio = vcpu->arch.tsc_scaling_ratio; if (tsc_ratio != __this_cpu_read(current_tsc_ratio)) { __this_cpu_write(current_tsc_ratio, tsc_ratio); wrmsrl(MSR_AMD64_TSC_RATIO, tsc_ratio); } } /* This assumes that the kernel never uses MSR_TSC_AUX */ if (static_cpu_has(X86_FEATURE_RDTSCP)) wrmsrl(MSR_TSC_AUX, svm->tsc_aux); avic_vcpu_load(vcpu, cpu); } static void svm_vcpu_put(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); int i; avic_vcpu_put(vcpu); ++vcpu->stat.host_state_reload; kvm_load_ldt(svm->host.ldt); #ifdef CONFIG_X86_64 loadsegment(fs, svm->host.fs); wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gsbase); load_gs_index(svm->host.gs); #else #ifdef CONFIG_X86_32_LAZY_GS loadsegment(gs, svm->host.gs); #endif #endif for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); } static void svm_vcpu_blocking(struct kvm_vcpu *vcpu) { avic_set_running(vcpu, false); } static void svm_vcpu_unblocking(struct kvm_vcpu *vcpu) { avic_set_running(vcpu, true); } static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu) { return to_svm(vcpu)->vmcb->save.rflags; } static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) { /* * Any change of EFLAGS.VM is accompanied by a reload of SS * (caused by either a task switch or an inter-privilege IRET), * so we do not need to update the CPL here. */ to_svm(vcpu)->vmcb->save.rflags = rflags; } static u32 svm_get_pkru(struct kvm_vcpu *vcpu) { return 0; } static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) { switch (reg) { case VCPU_EXREG_PDPTR: BUG_ON(!npt_enabled); load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)); break; default: BUG(); } } static void svm_set_vintr(struct vcpu_svm *svm) { set_intercept(svm, INTERCEPT_VINTR); } static void svm_clear_vintr(struct vcpu_svm *svm) { clr_intercept(svm, INTERCEPT_VINTR); } static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg) { struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save; switch (seg) { case VCPU_SREG_CS: return &save->cs; case VCPU_SREG_DS: return &save->ds; case VCPU_SREG_ES: return &save->es; case VCPU_SREG_FS: return &save->fs; case VCPU_SREG_GS: return &save->gs; case VCPU_SREG_SS: return &save->ss; case VCPU_SREG_TR: return &save->tr; case VCPU_SREG_LDTR: return &save->ldtr; } BUG(); return NULL; } static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg) { struct vmcb_seg *s = svm_seg(vcpu, seg); return s->base; } static void svm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) { struct vmcb_seg *s = svm_seg(vcpu, seg); var->base = s->base; var->limit = s->limit; var->selector = s->selector; var->type = s->attrib & SVM_SELECTOR_TYPE_MASK; var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1; var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3; var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1; var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1; var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1; var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1; /* * AMD CPUs circa 2014 track the G bit for all segments except CS. * However, the SVM spec states that the G bit is not observed by the * CPU, and some VMware virtual CPUs drop the G bit for all segments. * So let's synthesize a legal G bit for all segments, this helps * running KVM nested. It also helps cross-vendor migration, because * Intel's vmentry has a check on the 'G' bit. */ var->g = s->limit > 0xfffff; /* * AMD's VMCB does not have an explicit unusable field, so emulate it * for cross vendor migration purposes by "not present" */ var->unusable = !var->present; switch (seg) { case VCPU_SREG_TR: /* * Work around a bug where the busy flag in the tr selector * isn't exposed */ var->type |= 0x2; break; case VCPU_SREG_DS: case VCPU_SREG_ES: case VCPU_SREG_FS: case VCPU_SREG_GS: /* * The accessed bit must always be set in the segment * descriptor cache, although it can be cleared in the * descriptor, the cached bit always remains at 1. Since * Intel has a check on this, set it here to support * cross-vendor migration. */ if (!var->unusable) var->type |= 0x1; break; case VCPU_SREG_SS: /* * On AMD CPUs sometimes the DB bit in the segment * descriptor is left as 1, although the whole segment has * been made unusable. Clear it here to pass an Intel VMX * entry check when cross vendor migrating. */ if (var->unusable) var->db = 0; /* This is symmetric with svm_set_segment() */ var->dpl = to_svm(vcpu)->vmcb->save.cpl; break; } } static int svm_get_cpl(struct kvm_vcpu *vcpu) { struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save; return save->cpl; } static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) { struct vcpu_svm *svm = to_svm(vcpu); dt->size = svm->vmcb->save.idtr.limit; dt->address = svm->vmcb->save.idtr.base; } static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) { struct vcpu_svm *svm = to_svm(vcpu); svm->vmcb->save.idtr.limit = dt->size; svm->vmcb->save.idtr.base = dt->address ; mark_dirty(svm->vmcb, VMCB_DT); } static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) { struct vcpu_svm *svm = to_svm(vcpu); dt->size = svm->vmcb->save.gdtr.limit; dt->address = svm->vmcb->save.gdtr.base; } static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) { struct vcpu_svm *svm = to_svm(vcpu); svm->vmcb->save.gdtr.limit = dt->size; svm->vmcb->save.gdtr.base = dt->address ; mark_dirty(svm->vmcb, VMCB_DT); } static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu) { } static void svm_decache_cr3(struct kvm_vcpu *vcpu) { } static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) { } static void update_cr0_intercept(struct vcpu_svm *svm) { ulong gcr0 = svm->vcpu.arch.cr0; u64 *hcr0 = &svm->vmcb->save.cr0; *hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK) | (gcr0 & SVM_CR0_SELECTIVE_MASK); mark_dirty(svm->vmcb, VMCB_CR); if (gcr0 == *hcr0) { clr_cr_intercept(svm, INTERCEPT_CR0_READ); clr_cr_intercept(svm, INTERCEPT_CR0_WRITE); } else { set_cr_intercept(svm, INTERCEPT_CR0_READ); set_cr_intercept(svm, INTERCEPT_CR0_WRITE); } } static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) { struct vcpu_svm *svm = to_svm(vcpu); #ifdef CONFIG_X86_64 if (vcpu->arch.efer & EFER_LME) { if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { vcpu->arch.efer |= EFER_LMA; svm->vmcb->save.efer |= EFER_LMA | EFER_LME; } if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) { vcpu->arch.efer &= ~EFER_LMA; svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME); } } #endif vcpu->arch.cr0 = cr0; if (!npt_enabled) cr0 |= X86_CR0_PG | X86_CR0_WP; /* * re-enable caching here because the QEMU bios * does not do it - this results in some delay at * reboot */ if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) cr0 &= ~(X86_CR0_CD | X86_CR0_NW); svm->vmcb->save.cr0 = cr0; mark_dirty(svm->vmcb, VMCB_CR); update_cr0_intercept(svm); } static int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) { unsigned long host_cr4_mce = cr4_read_shadow() & X86_CR4_MCE; unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4; if (cr4 & X86_CR4_VMXE) return 1; if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE)) svm_flush_tlb(vcpu); vcpu->arch.cr4 = cr4; if (!npt_enabled) cr4 |= X86_CR4_PAE; cr4 |= host_cr4_mce; to_svm(vcpu)->vmcb->save.cr4 = cr4; mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR); return 0; } static void svm_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) { struct vcpu_svm *svm = to_svm(vcpu); struct vmcb_seg *s = svm_seg(vcpu, seg); s->base = var->base; s->limit = var->limit; s->selector = var->selector; s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK); s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT; s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT; s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT; s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT; s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT; s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT; s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT; /* * This is always accurate, except if SYSRET returned to a segment * with SS.DPL != 3. Intel does not have this quirk, and always * forces SS.DPL to 3 on sysret, so we ignore that case; fixing it * would entail passing the CPL to userspace and back. */ if (seg == VCPU_SREG_SS) /* This is symmetric with svm_get_segment() */ svm->vmcb->save.cpl = (var->dpl & 3); mark_dirty(svm->vmcb, VMCB_SEG); } static void update_bp_intercept(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); clr_exception_intercept(svm, BP_VECTOR); if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) { if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) set_exception_intercept(svm, BP_VECTOR); } else vcpu->guest_debug = 0; } static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd) { if (sd->next_asid > sd->max_asid) { ++sd->asid_generation; sd->next_asid = 1; svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID; } svm->asid_generation = sd->asid_generation; svm->vmcb->control.asid = sd->next_asid++; mark_dirty(svm->vmcb, VMCB_ASID); } static u64 svm_get_dr6(struct kvm_vcpu *vcpu) { return to_svm(vcpu)->vmcb->save.dr6; } static void svm_set_dr6(struct kvm_vcpu *vcpu, unsigned long value) { struct vcpu_svm *svm = to_svm(vcpu); svm->vmcb->save.dr6 = value; mark_dirty(svm->vmcb, VMCB_DR); } static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); get_debugreg(vcpu->arch.db[0], 0); get_debugreg(vcpu->arch.db[1], 1); get_debugreg(vcpu->arch.db[2], 2); get_debugreg(vcpu->arch.db[3], 3); vcpu->arch.dr6 = svm_get_dr6(vcpu); vcpu->arch.dr7 = svm->vmcb->save.dr7; vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT; set_dr_intercepts(svm); } static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value) { struct vcpu_svm *svm = to_svm(vcpu); svm->vmcb->save.dr7 = value; mark_dirty(svm->vmcb, VMCB_DR); } static int pf_interception(struct vcpu_svm *svm) { u64 fault_address = svm->vmcb->control.exit_info_2; u64 error_code; int r = 1; switch (svm->apf_reason) { default: error_code = svm->vmcb->control.exit_info_1; trace_kvm_page_fault(fault_address, error_code); if (!npt_enabled && kvm_event_needs_reinjection(&svm->vcpu)) kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address); r = kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code, svm->vmcb->control.insn_bytes, svm->vmcb->control.insn_len); break; case KVM_PV_REASON_PAGE_NOT_PRESENT: svm->apf_reason = 0; local_irq_disable(); kvm_async_pf_task_wait(fault_address); local_irq_enable(); break; case KVM_PV_REASON_PAGE_READY: svm->apf_reason = 0; local_irq_disable(); kvm_async_pf_task_wake(fault_address); local_irq_enable(); break; } return r; } static int db_interception(struct vcpu_svm *svm) { struct kvm_run *kvm_run = svm->vcpu.run; if (!(svm->vcpu.guest_debug & (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) && !svm->nmi_singlestep) { kvm_queue_exception(&svm->vcpu, DB_VECTOR); return 1; } if (svm->nmi_singlestep) { svm->nmi_singlestep = false; if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) svm->vmcb->save.rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF); } if (svm->vcpu.guest_debug & (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) { kvm_run->exit_reason = KVM_EXIT_DEBUG; kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip; kvm_run->debug.arch.exception = DB_VECTOR; return 0; } return 1; } static int bp_interception(struct vcpu_svm *svm) { struct kvm_run *kvm_run = svm->vcpu.run; kvm_run->exit_reason = KVM_EXIT_DEBUG; kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip; kvm_run->debug.arch.exception = BP_VECTOR; return 0; } static int ud_interception(struct vcpu_svm *svm) { int er; er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD); if (er != EMULATE_DONE) kvm_queue_exception(&svm->vcpu, UD_VECTOR); return 1; } static int ac_interception(struct vcpu_svm *svm) { kvm_queue_exception_e(&svm->vcpu, AC_VECTOR, 0); return 1; } static bool is_erratum_383(void) { int err, i; u64 value; if (!erratum_383_found) return false; value = native_read_msr_safe(MSR_IA32_MC0_STATUS, &err); if (err) return false; /* Bit 62 may or may not be set for this mce */ value &= ~(1ULL << 62); if (value != 0xb600000000010015ULL) return false; /* Clear MCi_STATUS registers */ for (i = 0; i < 6; ++i) native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0, 0); value = native_read_msr_safe(MSR_IA32_MCG_STATUS, &err); if (!err) { u32 low, high; value &= ~(1ULL << 2); low = lower_32_bits(value); high = upper_32_bits(value); native_write_msr_safe(MSR_IA32_MCG_STATUS, low, high); } /* Flush tlb to evict multi-match entries */ __flush_tlb_all(); return true; } static void svm_handle_mce(struct vcpu_svm *svm) { if (is_erratum_383()) { /* * Erratum 383 triggered. Guest state is corrupt so kill the * guest. */ pr_err("KVM: Guest triggered AMD Erratum 383\n"); kvm_make_request(KVM_REQ_TRIPLE_FAULT, &svm->vcpu); return; } /* * On an #MC intercept the MCE handler is not called automatically in * the host. So do it by hand here. */ asm volatile ( "int $0x12\n"); /* not sure if we ever come back to this point */ return; } static int mc_interception(struct vcpu_svm *svm) { return 1; } static int shutdown_interception(struct vcpu_svm *svm) { struct kvm_run *kvm_run = svm->vcpu.run; /* * VMCB is undefined after a SHUTDOWN intercept * so reinitialize it. */ clear_page(svm->vmcb); init_vmcb(svm); kvm_run->exit_reason = KVM_EXIT_SHUTDOWN; return 0; } static int io_interception(struct vcpu_svm *svm) { struct kvm_vcpu *vcpu = &svm->vcpu; u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */ int size, in, string; unsigned port; ++svm->vcpu.stat.io_exits; string = (io_info & SVM_IOIO_STR_MASK) != 0; in = (io_info & SVM_IOIO_TYPE_MASK) != 0; if (string) return emulate_instruction(vcpu, 0) == EMULATE_DONE; port = io_info >> 16; size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT; svm->next_rip = svm->vmcb->control.exit_info_2; skip_emulated_instruction(&svm->vcpu); return in ? kvm_fast_pio_in(vcpu, size, port) : kvm_fast_pio_out(vcpu, size, port); } static int nmi_interception(struct vcpu_svm *svm) { return 1; } static int intr_interception(struct vcpu_svm *svm) { ++svm->vcpu.stat.irq_exits; return 1; } static int nop_on_interception(struct vcpu_svm *svm) { return 1; } static int halt_interception(struct vcpu_svm *svm) { svm->next_rip = kvm_rip_read(&svm->vcpu) + 1; return kvm_emulate_halt(&svm->vcpu); } static int vmmcall_interception(struct vcpu_svm *svm) { svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; return kvm_emulate_hypercall(&svm->vcpu); } static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); return svm->nested.nested_cr3; } static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index) { struct vcpu_svm *svm = to_svm(vcpu); u64 cr3 = svm->nested.nested_cr3; u64 pdpte; int ret; ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(cr3), &pdpte, offset_in_page(cr3) + index * 8, 8); if (ret) return 0; return pdpte; } static void nested_svm_set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root) { struct vcpu_svm *svm = to_svm(vcpu); svm->vmcb->control.nested_cr3 = root; mark_dirty(svm->vmcb, VMCB_NPT); svm_flush_tlb(vcpu); } static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu, struct x86_exception *fault) { struct vcpu_svm *svm = to_svm(vcpu); if (svm->vmcb->control.exit_code != SVM_EXIT_NPF) { /* * TODO: track the cause of the nested page fault, and * correctly fill in the high bits of exit_info_1. */ svm->vmcb->control.exit_code = SVM_EXIT_NPF; svm->vmcb->control.exit_code_hi = 0; svm->vmcb->control.exit_info_1 = (1ULL << 32); svm->vmcb->control.exit_info_2 = fault->address; } svm->vmcb->control.exit_info_1 &= ~0xffffffffULL; svm->vmcb->control.exit_info_1 |= fault->error_code; /* * The present bit is always zero for page structure faults on real * hardware. */ if (svm->vmcb->control.exit_info_1 & (2ULL << 32)) svm->vmcb->control.exit_info_1 &= ~1; nested_svm_vmexit(svm); } static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu) { WARN_ON(mmu_is_nested(vcpu)); kvm_init_shadow_mmu(vcpu); vcpu->arch.mmu.set_cr3 = nested_svm_set_tdp_cr3; vcpu->arch.mmu.get_cr3 = nested_svm_get_tdp_cr3; vcpu->arch.mmu.get_pdptr = nested_svm_get_tdp_pdptr; vcpu->arch.mmu.inject_page_fault = nested_svm_inject_npf_exit; vcpu->arch.mmu.shadow_root_level = get_npt_level(); reset_shadow_zero_bits_mask(vcpu, &vcpu->arch.mmu); vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; } static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu) { vcpu->arch.walk_mmu = &vcpu->arch.mmu; } static int nested_svm_check_permissions(struct vcpu_svm *svm) { if (!(svm->vcpu.arch.efer & EFER_SVME) || !is_paging(&svm->vcpu)) { kvm_queue_exception(&svm->vcpu, UD_VECTOR); return 1; } if (svm->vmcb->save.cpl) { kvm_inject_gp(&svm->vcpu, 0); return 1; } return 0; } static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, bool has_error_code, u32 error_code) { int vmexit; if (!is_guest_mode(&svm->vcpu)) return 0; svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr; svm->vmcb->control.exit_code_hi = 0; svm->vmcb->control.exit_info_1 = error_code; svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2; vmexit = nested_svm_intercept(svm); if (vmexit == NESTED_EXIT_DONE) svm->nested.exit_required = true; return vmexit; } /* This function returns true if it is save to enable the irq window */ static inline bool nested_svm_intr(struct vcpu_svm *svm) { if (!is_guest_mode(&svm->vcpu)) return true; if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK)) return true; if (!(svm->vcpu.arch.hflags & HF_HIF_MASK)) return false; /* * if vmexit was already requested (by intercepted exception * for instance) do not overwrite it with "external interrupt" * vmexit. */ if (svm->nested.exit_required) return false; svm->vmcb->control.exit_code = SVM_EXIT_INTR; svm->vmcb->control.exit_info_1 = 0; svm->vmcb->control.exit_info_2 = 0; if (svm->nested.intercept & 1ULL) { /* * The #vmexit can't be emulated here directly because this * code path runs with irqs and preemption disabled. A * #vmexit emulation might sleep. Only signal request for * the #vmexit here. */ svm->nested.exit_required = true; trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip); return false; } return true; } /* This function returns true if it is save to enable the nmi window */ static inline bool nested_svm_nmi(struct vcpu_svm *svm) { if (!is_guest_mode(&svm->vcpu)) return true; if (!(svm->nested.intercept & (1ULL << INTERCEPT_NMI))) return true; svm->vmcb->control.exit_code = SVM_EXIT_NMI; svm->nested.exit_required = true; return false; } static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, struct page **_page) { struct page *page; might_sleep(); page = kvm_vcpu_gfn_to_page(&svm->vcpu, gpa >> PAGE_SHIFT); if (is_error_page(page)) goto error; *_page = page; return kmap(page); error: kvm_inject_gp(&svm->vcpu, 0); return NULL; } static void nested_svm_unmap(struct page *page) { kunmap(page); kvm_release_page_dirty(page); } static int nested_svm_intercept_ioio(struct vcpu_svm *svm) { unsigned port, size, iopm_len; u16 val, mask; u8 start_bit; u64 gpa; if (!(svm->nested.intercept & (1ULL << INTERCEPT_IOIO_PROT))) return NESTED_EXIT_HOST; port = svm->vmcb->control.exit_info_1 >> 16; size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT; gpa = svm->nested.vmcb_iopm + (port / 8); start_bit = port % 8; iopm_len = (start_bit + size > 8) ? 2 : 1; mask = (0xf >> (4 - size)) << start_bit; val = 0; if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len)) return NESTED_EXIT_DONE; return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST; } static int nested_svm_exit_handled_msr(struct vcpu_svm *svm) { u32 offset, msr, value; int write, mask; if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT))) return NESTED_EXIT_HOST; msr = svm->vcpu.arch.regs[VCPU_REGS_RCX]; offset = svm_msrpm_offset(msr); write = svm->vmcb->control.exit_info_1 & 1; mask = 1 << ((2 * (msr & 0xf)) + write); if (offset == MSR_INVALID) return NESTED_EXIT_DONE; /* Offset is in 32 bit units but need in 8 bit units */ offset *= 4; if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.vmcb_msrpm + offset, &value, 4)) return NESTED_EXIT_DONE; return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST; } static int nested_svm_exit_special(struct vcpu_svm *svm) { u32 exit_code = svm->vmcb->control.exit_code; switch (exit_code) { case SVM_EXIT_INTR: case SVM_EXIT_NMI: case SVM_EXIT_EXCP_BASE + MC_VECTOR: return NESTED_EXIT_HOST; case SVM_EXIT_NPF: /* For now we are always handling NPFs when using them */ if (npt_enabled) return NESTED_EXIT_HOST; break; case SVM_EXIT_EXCP_BASE + PF_VECTOR: /* When we're shadowing, trap PFs, but not async PF */ if (!npt_enabled && svm->apf_reason == 0) return NESTED_EXIT_HOST; break; default: break; } return NESTED_EXIT_CONTINUE; } /* * If this function returns true, this #vmexit was already handled */ static int nested_svm_intercept(struct vcpu_svm *svm) { u32 exit_code = svm->vmcb->control.exit_code; int vmexit = NESTED_EXIT_HOST; switch (exit_code) { case SVM_EXIT_MSR: vmexit = nested_svm_exit_handled_msr(svm); break; case SVM_EXIT_IOIO: vmexit = nested_svm_intercept_ioio(svm); break; case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: { u32 bit = 1U << (exit_code - SVM_EXIT_READ_CR0); if (svm->nested.intercept_cr & bit) vmexit = NESTED_EXIT_DONE; break; } case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: { u32 bit = 1U << (exit_code - SVM_EXIT_READ_DR0); if (svm->nested.intercept_dr & bit) vmexit = NESTED_EXIT_DONE; break; } case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: { u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE); if (svm->nested.intercept_exceptions & excp_bits) vmexit = NESTED_EXIT_DONE; /* async page fault always cause vmexit */ else if ((exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) && svm->apf_reason != 0) vmexit = NESTED_EXIT_DONE; break; } case SVM_EXIT_ERR: { vmexit = NESTED_EXIT_DONE; break; } default: { u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR); if (svm->nested.intercept & exit_bits) vmexit = NESTED_EXIT_DONE; } } return vmexit; } static int nested_svm_exit_handled(struct vcpu_svm *svm) { int vmexit; vmexit = nested_svm_intercept(svm); if (vmexit == NESTED_EXIT_DONE) nested_svm_vmexit(svm); return vmexit; } static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *from_vmcb) { struct vmcb_control_area *dst = &dst_vmcb->control; struct vmcb_control_area *from = &from_vmcb->control; dst->intercept_cr = from->intercept_cr; dst->intercept_dr = from->intercept_dr; dst->intercept_exceptions = from->intercept_exceptions; dst->intercept = from->intercept; dst->iopm_base_pa = from->iopm_base_pa; dst->msrpm_base_pa = from->msrpm_base_pa; dst->tsc_offset = from->tsc_offset; dst->asid = from->asid; dst->tlb_ctl = from->tlb_ctl; dst->int_ctl = from->int_ctl; dst->int_vector = from->int_vector; dst->int_state = from->int_state; dst->exit_code = from->exit_code; dst->exit_code_hi = from->exit_code_hi; dst->exit_info_1 = from->exit_info_1; dst->exit_info_2 = from->exit_info_2; dst->exit_int_info = from->exit_int_info; dst->exit_int_info_err = from->exit_int_info_err; dst->nested_ctl = from->nested_ctl; dst->event_inj = from->event_inj; dst->event_inj_err = from->event_inj_err; dst->nested_cr3 = from->nested_cr3; dst->lbr_ctl = from->lbr_ctl; } static int nested_svm_vmexit(struct vcpu_svm *svm) { struct vmcb *nested_vmcb; struct vmcb *hsave = svm->nested.hsave; struct vmcb *vmcb = svm->vmcb; struct page *page; trace_kvm_nested_vmexit_inject(vmcb->control.exit_code, vmcb->control.exit_info_1, vmcb->control.exit_info_2, vmcb->control.exit_int_info, vmcb->control.exit_int_info_err, KVM_ISA_SVM); nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, &page); if (!nested_vmcb) return 1; /* Exit Guest-Mode */ leave_guest_mode(&svm->vcpu); svm->nested.vmcb = 0; /* Give the current vmcb to the guest */ disable_gif(svm); nested_vmcb->save.es = vmcb->save.es; nested_vmcb->save.cs = vmcb->save.cs; nested_vmcb->save.ss = vmcb->save.ss; nested_vmcb->save.ds = vmcb->save.ds; nested_vmcb->save.gdtr = vmcb->save.gdtr; nested_vmcb->save.idtr = vmcb->save.idtr; nested_vmcb->save.efer = svm->vcpu.arch.efer; nested_vmcb->save.cr0 = kvm_read_cr0(&svm->vcpu); nested_vmcb->save.cr3 = kvm_read_cr3(&svm->vcpu); nested_vmcb->save.cr2 = vmcb->save.cr2; nested_vmcb->save.cr4 = svm->vcpu.arch.cr4; nested_vmcb->save.rflags = kvm_get_rflags(&svm->vcpu); nested_vmcb->save.rip = vmcb->save.rip; nested_vmcb->save.rsp = vmcb->save.rsp; nested_vmcb->save.rax = vmcb->save.rax; nested_vmcb->save.dr7 = vmcb->save.dr7; nested_vmcb->save.dr6 = vmcb->save.dr6; nested_vmcb->save.cpl = vmcb->save.cpl; nested_vmcb->control.int_ctl = vmcb->control.int_ctl; nested_vmcb->control.int_vector = vmcb->control.int_vector; nested_vmcb->control.int_state = vmcb->control.int_state; nested_vmcb->control.exit_code = vmcb->control.exit_code; nested_vmcb->control.exit_code_hi = vmcb->control.exit_code_hi; nested_vmcb->control.exit_info_1 = vmcb->control.exit_info_1; nested_vmcb->control.exit_info_2 = vmcb->control.exit_info_2; nested_vmcb->control.exit_int_info = vmcb->control.exit_int_info; nested_vmcb->control.exit_int_info_err = vmcb->control.exit_int_info_err; if (svm->nrips_enabled) nested_vmcb->control.next_rip = vmcb->control.next_rip; /* * If we emulate a VMRUN/#VMEXIT in the same host #vmexit cycle we have * to make sure that we do not lose injected events. So check event_inj * here and copy it to exit_int_info if it is valid. * Exit_int_info and event_inj can't be both valid because the case * below only happens on a VMRUN instruction intercept which has * no valid exit_int_info set. */ if (vmcb->control.event_inj & SVM_EVTINJ_VALID) { struct vmcb_control_area *nc = &nested_vmcb->control; nc->exit_int_info = vmcb->control.event_inj; nc->exit_int_info_err = vmcb->control.event_inj_err; } nested_vmcb->control.tlb_ctl = 0; nested_vmcb->control.event_inj = 0; nested_vmcb->control.event_inj_err = 0; /* We always set V_INTR_MASKING and remember the old value in hflags */ if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK)) nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK; /* Restore the original control entries */ copy_vmcb_control_area(vmcb, hsave); kvm_clear_exception_queue(&svm->vcpu); kvm_clear_interrupt_queue(&svm->vcpu); svm->nested.nested_cr3 = 0; /* Restore selected save entries */ svm->vmcb->save.es = hsave->save.es; svm->vmcb->save.cs = hsave->save.cs; svm->vmcb->save.ss = hsave->save.ss; svm->vmcb->save.ds = hsave->save.ds; svm->vmcb->save.gdtr = hsave->save.gdtr; svm->vmcb->save.idtr = hsave->save.idtr; kvm_set_rflags(&svm->vcpu, hsave->save.rflags); svm_set_efer(&svm->vcpu, hsave->save.efer); svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE); svm_set_cr4(&svm->vcpu, hsave->save.cr4); if (npt_enabled) { svm->vmcb->save.cr3 = hsave->save.cr3; svm->vcpu.arch.cr3 = hsave->save.cr3; } else { (void)kvm_set_cr3(&svm->vcpu, hsave->save.cr3); } kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, hsave->save.rax); kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, hsave->save.rsp); kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, hsave->save.rip); svm->vmcb->save.dr7 = 0; svm->vmcb->save.cpl = 0; svm->vmcb->control.exit_int_info = 0; mark_all_dirty(svm->vmcb); nested_svm_unmap(page); nested_svm_uninit_mmu_context(&svm->vcpu); kvm_mmu_reset_context(&svm->vcpu); kvm_mmu_load(&svm->vcpu); return 0; } static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm) { /* * This function merges the msr permission bitmaps of kvm and the * nested vmcb. It is optimized in that it only merges the parts where * the kvm msr permission bitmap may contain zero bits */ int i; if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT))) return true; for (i = 0; i < MSRPM_OFFSETS; i++) { u32 value, p; u64 offset; if (msrpm_offsets[i] == 0xffffffff) break; p = msrpm_offsets[i]; offset = svm->nested.vmcb_msrpm + (p * 4); if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4)) return false; svm->nested.msrpm[p] = svm->msrpm[p] | value; } svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm); return true; } static bool nested_vmcb_checks(struct vmcb *vmcb) { if ((vmcb->control.intercept & (1ULL << INTERCEPT_VMRUN)) == 0) return false; if (vmcb->control.asid == 0) return false; if (vmcb->control.nested_ctl && !npt_enabled) return false; return true; } static bool nested_svm_vmrun(struct vcpu_svm *svm) { struct vmcb *nested_vmcb; struct vmcb *hsave = svm->nested.hsave; struct vmcb *vmcb = svm->vmcb; struct page *page; u64 vmcb_gpa; vmcb_gpa = svm->vmcb->save.rax; nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page); if (!nested_vmcb) return false; if (!nested_vmcb_checks(nested_vmcb)) { nested_vmcb->control.exit_code = SVM_EXIT_ERR; nested_vmcb->control.exit_code_hi = 0; nested_vmcb->control.exit_info_1 = 0; nested_vmcb->control.exit_info_2 = 0; nested_svm_unmap(page); return false; } trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb_gpa, nested_vmcb->save.rip, nested_vmcb->control.int_ctl, nested_vmcb->control.event_inj, nested_vmcb->control.nested_ctl); trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr & 0xffff, nested_vmcb->control.intercept_cr >> 16, nested_vmcb->control.intercept_exceptions, nested_vmcb->control.intercept); /* Clear internal status */ kvm_clear_exception_queue(&svm->vcpu); kvm_clear_interrupt_queue(&svm->vcpu); /* * Save the old vmcb, so we don't need to pick what we save, but can * restore everything when a VMEXIT occurs */ hsave->save.es = vmcb->save.es; hsave->save.cs = vmcb->save.cs; hsave->save.ss = vmcb->save.ss; hsave->save.ds = vmcb->save.ds; hsave->save.gdtr = vmcb->save.gdtr; hsave->save.idtr = vmcb->save.idtr; hsave->save.efer = svm->vcpu.arch.efer; hsave->save.cr0 = kvm_read_cr0(&svm->vcpu); hsave->save.cr4 = svm->vcpu.arch.cr4; hsave->save.rflags = kvm_get_rflags(&svm->vcpu); hsave->save.rip = kvm_rip_read(&svm->vcpu); hsave->save.rsp = vmcb->save.rsp; hsave->save.rax = vmcb->save.rax; if (npt_enabled) hsave->save.cr3 = vmcb->save.cr3; else hsave->save.cr3 = kvm_read_cr3(&svm->vcpu); copy_vmcb_control_area(hsave, vmcb); if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF) svm->vcpu.arch.hflags |= HF_HIF_MASK; else svm->vcpu.arch.hflags &= ~HF_HIF_MASK; if (nested_vmcb->control.nested_ctl) { kvm_mmu_unload(&svm->vcpu); svm->nested.nested_cr3 = nested_vmcb->control.nested_cr3; nested_svm_init_mmu_context(&svm->vcpu); } /* Load the nested guest state */ svm->vmcb->save.es = nested_vmcb->save.es; svm->vmcb->save.cs = nested_vmcb->save.cs; svm->vmcb->save.ss = nested_vmcb->save.ss; svm->vmcb->save.ds = nested_vmcb->save.ds; svm->vmcb->save.gdtr = nested_vmcb->save.gdtr; svm->vmcb->save.idtr = nested_vmcb->save.idtr; kvm_set_rflags(&svm->vcpu, nested_vmcb->save.rflags); svm_set_efer(&svm->vcpu, nested_vmcb->save.efer); svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0); svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4); if (npt_enabled) { svm->vmcb->save.cr3 = nested_vmcb->save.cr3; svm->vcpu.arch.cr3 = nested_vmcb->save.cr3; } else (void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3); /* Guest paging mode is active - reset mmu */ kvm_mmu_reset_context(&svm->vcpu); svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2; kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax); kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp); kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip); /* In case we don't even reach vcpu_run, the fields are not updated */ svm->vmcb->save.rax = nested_vmcb->save.rax; svm->vmcb->save.rsp = nested_vmcb->save.rsp; svm->vmcb->save.rip = nested_vmcb->save.rip; svm->vmcb->save.dr7 = nested_vmcb->save.dr7; svm->vmcb->save.dr6 = nested_vmcb->save.dr6; svm->vmcb->save.cpl = nested_vmcb->save.cpl; svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa & ~0x0fffULL; svm->nested.vmcb_iopm = nested_vmcb->control.iopm_base_pa & ~0x0fffULL; /* cache intercepts */ svm->nested.intercept_cr = nested_vmcb->control.intercept_cr; svm->nested.intercept_dr = nested_vmcb->control.intercept_dr; svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions; svm->nested.intercept = nested_vmcb->control.intercept; svm_flush_tlb(&svm->vcpu); svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK; if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK) svm->vcpu.arch.hflags |= HF_VINTR_MASK; else svm->vcpu.arch.hflags &= ~HF_VINTR_MASK; if (svm->vcpu.arch.hflags & HF_VINTR_MASK) { /* We only want the cr8 intercept bits of the guest */ clr_cr_intercept(svm, INTERCEPT_CR8_READ); clr_cr_intercept(svm, INTERCEPT_CR8_WRITE); } /* We don't want to see VMMCALLs from a nested guest */ clr_intercept(svm, INTERCEPT_VMMCALL); svm->vmcb->control.lbr_ctl = nested_vmcb->control.lbr_ctl; svm->vmcb->control.int_vector = nested_vmcb->control.int_vector; svm->vmcb->control.int_state = nested_vmcb->control.int_state; svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset; svm->vmcb->control.event_inj = nested_vmcb->control.event_inj; svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err; nested_svm_unmap(page); /* Enter Guest-Mode */ enter_guest_mode(&svm->vcpu); /* * Merge guest and host intercepts - must be called with vcpu in * guest-mode to take affect here */ recalc_intercepts(svm); svm->nested.vmcb = vmcb_gpa; enable_gif(svm); mark_all_dirty(svm->vmcb); return true; } static void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb) { to_vmcb->save.fs = from_vmcb->save.fs; to_vmcb->save.gs = from_vmcb->save.gs; to_vmcb->save.tr = from_vmcb->save.tr; to_vmcb->save.ldtr = from_vmcb->save.ldtr; to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base; to_vmcb->save.star = from_vmcb->save.star; to_vmcb->save.lstar = from_vmcb->save.lstar; to_vmcb->save.cstar = from_vmcb->save.cstar; to_vmcb->save.sfmask = from_vmcb->save.sfmask; to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs; to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp; to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip; } static int vmload_interception(struct vcpu_svm *svm) { struct vmcb *nested_vmcb; struct page *page; if (nested_svm_check_permissions(svm)) return 1; nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page); if (!nested_vmcb) return 1; svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; skip_emulated_instruction(&svm->vcpu); nested_svm_vmloadsave(nested_vmcb, svm->vmcb); nested_svm_unmap(page); return 1; } static int vmsave_interception(struct vcpu_svm *svm) { struct vmcb *nested_vmcb; struct page *page; if (nested_svm_check_permissions(svm)) return 1; nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page); if (!nested_vmcb) return 1; svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; skip_emulated_instruction(&svm->vcpu); nested_svm_vmloadsave(svm->vmcb, nested_vmcb); nested_svm_unmap(page); return 1; } static int vmrun_interception(struct vcpu_svm *svm) { if (nested_svm_check_permissions(svm)) return 1; /* Save rip after vmrun instruction */ kvm_rip_write(&svm->vcpu, kvm_rip_read(&svm->vcpu) + 3); if (!nested_svm_vmrun(svm)) return 1; if (!nested_svm_vmrun_msrpm(svm)) goto failed; return 1; failed: svm->vmcb->control.exit_code = SVM_EXIT_ERR; svm->vmcb->control.exit_code_hi = 0; svm->vmcb->control.exit_info_1 = 0; svm->vmcb->control.exit_info_2 = 0; nested_svm_vmexit(svm); return 1; } static int stgi_interception(struct vcpu_svm *svm) { if (nested_svm_check_permissions(svm)) return 1; svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; skip_emulated_instruction(&svm->vcpu); kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); enable_gif(svm); return 1; } static int clgi_interception(struct vcpu_svm *svm) { if (nested_svm_check_permissions(svm)) return 1; svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; skip_emulated_instruction(&svm->vcpu); disable_gif(svm); /* After a CLGI no interrupts should come */ if (!kvm_vcpu_apicv_active(&svm->vcpu)) { svm_clear_vintr(svm); svm->vmcb->control.int_ctl &= ~V_IRQ_MASK; mark_dirty(svm->vmcb, VMCB_INTR); } return 1; } static int invlpga_interception(struct vcpu_svm *svm) { struct kvm_vcpu *vcpu = &svm->vcpu; trace_kvm_invlpga(svm->vmcb->save.rip, kvm_register_read(&svm->vcpu, VCPU_REGS_RCX), kvm_register_read(&svm->vcpu, VCPU_REGS_RAX)); /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */ kvm_mmu_invlpg(vcpu, kvm_register_read(&svm->vcpu, VCPU_REGS_RAX)); svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; skip_emulated_instruction(&svm->vcpu); return 1; } static int skinit_interception(struct vcpu_svm *svm) { trace_kvm_skinit(svm->vmcb->save.rip, kvm_register_read(&svm->vcpu, VCPU_REGS_RAX)); kvm_queue_exception(&svm->vcpu, UD_VECTOR); return 1; } static int wbinvd_interception(struct vcpu_svm *svm) { return kvm_emulate_wbinvd(&svm->vcpu); } static int xsetbv_interception(struct vcpu_svm *svm) { u64 new_bv = kvm_read_edx_eax(&svm->vcpu); u32 index = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX); if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) { svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; skip_emulated_instruction(&svm->vcpu); } return 1; } static int task_switch_interception(struct vcpu_svm *svm) { u16 tss_selector; int reason; int int_type = svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK; int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK; uint32_t type = svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK; uint32_t idt_v = svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID; bool has_error_code = false; u32 error_code = 0; tss_selector = (u16)svm->vmcb->control.exit_info_1; if (svm->vmcb->control.exit_info_2 & (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET)) reason = TASK_SWITCH_IRET; else if (svm->vmcb->control.exit_info_2 & (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP)) reason = TASK_SWITCH_JMP; else if (idt_v) reason = TASK_SWITCH_GATE; else reason = TASK_SWITCH_CALL; if (reason == TASK_SWITCH_GATE) { switch (type) { case SVM_EXITINTINFO_TYPE_NMI: svm->vcpu.arch.nmi_injected = false; break; case SVM_EXITINTINFO_TYPE_EXEPT: if (svm->vmcb->control.exit_info_2 & (1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE)) { has_error_code = true; error_code = (u32)svm->vmcb->control.exit_info_2; } kvm_clear_exception_queue(&svm->vcpu); break; case SVM_EXITINTINFO_TYPE_INTR: kvm_clear_interrupt_queue(&svm->vcpu); break; default: break; } } if (reason != TASK_SWITCH_GATE || int_type == SVM_EXITINTINFO_TYPE_SOFT || (int_type == SVM_EXITINTINFO_TYPE_EXEPT && (int_vec == OF_VECTOR || int_vec == BP_VECTOR))) skip_emulated_instruction(&svm->vcpu); if (int_type != SVM_EXITINTINFO_TYPE_SOFT) int_vec = -1; if (kvm_task_switch(&svm->vcpu, tss_selector, int_vec, reason, has_error_code, error_code) == EMULATE_FAIL) { svm->vcpu.run->exit_reason = KVM_EXIT_INTERNAL_ERROR; svm->vcpu.run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; svm->vcpu.run->internal.ndata = 0; return 0; } return 1; } static int cpuid_interception(struct vcpu_svm *svm) { svm->next_rip = kvm_rip_read(&svm->vcpu) + 2; return kvm_emulate_cpuid(&svm->vcpu); } static int iret_interception(struct vcpu_svm *svm) { ++svm->vcpu.stat.nmi_window_exits; clr_intercept(svm, INTERCEPT_IRET); svm->vcpu.arch.hflags |= HF_IRET_MASK; svm->nmi_iret_rip = kvm_rip_read(&svm->vcpu); kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); return 1; } static int invlpg_interception(struct vcpu_svm *svm) { if (!static_cpu_has(X86_FEATURE_DECODEASSISTS)) return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE; kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1); skip_emulated_instruction(&svm->vcpu); return 1; } static int emulate_on_interception(struct vcpu_svm *svm) { return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE; } static int rdpmc_interception(struct vcpu_svm *svm) { int err; if (!static_cpu_has(X86_FEATURE_NRIPS)) return emulate_on_interception(svm); err = kvm_rdpmc(&svm->vcpu); return kvm_complete_insn_gp(&svm->vcpu, err); } static bool check_selective_cr0_intercepted(struct vcpu_svm *svm, unsigned long val) { unsigned long cr0 = svm->vcpu.arch.cr0; bool ret = false; u64 intercept; intercept = svm->nested.intercept; if (!is_guest_mode(&svm->vcpu) || (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0)))) return false; cr0 &= ~SVM_CR0_SELECTIVE_MASK; val &= ~SVM_CR0_SELECTIVE_MASK; if (cr0 ^ val) { svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE; ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE); } return ret; } #define CR_VALID (1ULL << 63) static int cr_interception(struct vcpu_svm *svm) { int reg, cr; unsigned long val; int err; if (!static_cpu_has(X86_FEATURE_DECODEASSISTS)) return emulate_on_interception(svm); if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0)) return emulate_on_interception(svm); reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK; if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE) cr = SVM_EXIT_WRITE_CR0 - SVM_EXIT_READ_CR0; else cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0; err = 0; if (cr >= 16) { /* mov to cr */ cr -= 16; val = kvm_register_read(&svm->vcpu, reg); switch (cr) { case 0: if (!check_selective_cr0_intercepted(svm, val)) err = kvm_set_cr0(&svm->vcpu, val); else return 1; break; case 3: err = kvm_set_cr3(&svm->vcpu, val); break; case 4: err = kvm_set_cr4(&svm->vcpu, val); break; case 8: err = kvm_set_cr8(&svm->vcpu, val); break; default: WARN(1, "unhandled write to CR%d", cr); kvm_queue_exception(&svm->vcpu, UD_VECTOR); return 1; } } else { /* mov from cr */ switch (cr) { case 0: val = kvm_read_cr0(&svm->vcpu); break; case 2: val = svm->vcpu.arch.cr2; break; case 3: val = kvm_read_cr3(&svm->vcpu); break; case 4: val = kvm_read_cr4(&svm->vcpu); break; case 8: val = kvm_get_cr8(&svm->vcpu); break; default: WARN(1, "unhandled read from CR%d", cr); kvm_queue_exception(&svm->vcpu, UD_VECTOR); return 1; } kvm_register_write(&svm->vcpu, reg, val); } return kvm_complete_insn_gp(&svm->vcpu, err); } static int dr_interception(struct vcpu_svm *svm) { int reg, dr; unsigned long val; if (svm->vcpu.guest_debug == 0) { /* * No more DR vmexits; force a reload of the debug registers * and reenter on this instruction. The next vmexit will * retrieve the full state of the debug registers. */ clr_dr_intercepts(svm); svm->vcpu.arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT; return 1; } if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS)) return emulate_on_interception(svm); reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK; dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0; if (dr >= 16) { /* mov to DRn */ if (!kvm_require_dr(&svm->vcpu, dr - 16)) return 1; val = kvm_register_read(&svm->vcpu, reg); kvm_set_dr(&svm->vcpu, dr - 16, val); } else { if (!kvm_require_dr(&svm->vcpu, dr)) return 1; kvm_get_dr(&svm->vcpu, dr, &val); kvm_register_write(&svm->vcpu, reg, val); } skip_emulated_instruction(&svm->vcpu); return 1; } static int cr8_write_interception(struct vcpu_svm *svm) { struct kvm_run *kvm_run = svm->vcpu.run; int r; u8 cr8_prev = kvm_get_cr8(&svm->vcpu); /* instruction emulation calls kvm_set_cr8() */ r = cr_interception(svm); if (lapic_in_kernel(&svm->vcpu)) return r; if (cr8_prev <= kvm_get_cr8(&svm->vcpu)) return r; kvm_run->exit_reason = KVM_EXIT_SET_TPR; return 0; } static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { struct vcpu_svm *svm = to_svm(vcpu); switch (msr_info->index) { case MSR_IA32_TSC: { msr_info->data = svm->vmcb->control.tsc_offset + kvm_scale_tsc(vcpu, rdtsc()); break; } case MSR_STAR: msr_info->data = svm->vmcb->save.star; break; #ifdef CONFIG_X86_64 case MSR_LSTAR: msr_info->data = svm->vmcb->save.lstar; break; case MSR_CSTAR: msr_info->data = svm->vmcb->save.cstar; break; case MSR_KERNEL_GS_BASE: msr_info->data = svm->vmcb->save.kernel_gs_base; break; case MSR_SYSCALL_MASK: msr_info->data = svm->vmcb->save.sfmask; break; #endif case MSR_IA32_SYSENTER_CS: msr_info->data = svm->vmcb->save.sysenter_cs; break; case MSR_IA32_SYSENTER_EIP: msr_info->data = svm->sysenter_eip; break; case MSR_IA32_SYSENTER_ESP: msr_info->data = svm->sysenter_esp; break; case MSR_TSC_AUX: if (!boot_cpu_has(X86_FEATURE_RDTSCP)) return 1; msr_info->data = svm->tsc_aux; break; /* * Nobody will change the following 5 values in the VMCB so we can * safely return them on rdmsr. They will always be 0 until LBRV is * implemented. */ case MSR_IA32_DEBUGCTLMSR: msr_info->data = svm->vmcb->save.dbgctl; break; case MSR_IA32_LASTBRANCHFROMIP: msr_info->data = svm->vmcb->save.br_from; break; case MSR_IA32_LASTBRANCHTOIP: msr_info->data = svm->vmcb->save.br_to; break; case MSR_IA32_LASTINTFROMIP: msr_info->data = svm->vmcb->save.last_excp_from; break; case MSR_IA32_LASTINTTOIP: msr_info->data = svm->vmcb->save.last_excp_to; break; case MSR_VM_HSAVE_PA: msr_info->data = svm->nested.hsave_msr; break; case MSR_VM_CR: msr_info->data = svm->nested.vm_cr_msr; break; case MSR_IA32_UCODE_REV: msr_info->data = 0x01000065; break; case MSR_F15H_IC_CFG: { int family, model; family = guest_cpuid_family(vcpu); model = guest_cpuid_model(vcpu); if (family < 0 || model < 0) return kvm_get_msr_common(vcpu, msr_info); msr_info->data = 0; if (family == 0x15 && (model >= 0x2 && model < 0x20)) msr_info->data = 0x1E; } break; default: return kvm_get_msr_common(vcpu, msr_info); } return 0; } static int rdmsr_interception(struct vcpu_svm *svm) { u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX); struct msr_data msr_info; msr_info.index = ecx; msr_info.host_initiated = false; if (svm_get_msr(&svm->vcpu, &msr_info)) { trace_kvm_msr_read_ex(ecx); kvm_inject_gp(&svm->vcpu, 0); } else { trace_kvm_msr_read(ecx, msr_info.data); kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, msr_info.data & 0xffffffff); kvm_register_write(&svm->vcpu, VCPU_REGS_RDX, msr_info.data >> 32); svm->next_rip = kvm_rip_read(&svm->vcpu) + 2; skip_emulated_instruction(&svm->vcpu); } return 1; } static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data) { struct vcpu_svm *svm = to_svm(vcpu); int svm_dis, chg_mask; if (data & ~SVM_VM_CR_VALID_MASK) return 1; chg_mask = SVM_VM_CR_VALID_MASK; if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK) chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK); svm->nested.vm_cr_msr &= ~chg_mask; svm->nested.vm_cr_msr |= (data & chg_mask); svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK; /* check for svm_disable while efer.svme is set */ if (svm_dis && (vcpu->arch.efer & EFER_SVME)) return 1; return 0; } static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) { struct vcpu_svm *svm = to_svm(vcpu); u32 ecx = msr->index; u64 data = msr->data; switch (ecx) { case MSR_IA32_TSC: kvm_write_tsc(vcpu, msr); break; case MSR_STAR: svm->vmcb->save.star = data; break; #ifdef CONFIG_X86_64 case MSR_LSTAR: svm->vmcb->save.lstar = data; break; case MSR_CSTAR: svm->vmcb->save.cstar = data; break; case MSR_KERNEL_GS_BASE: svm->vmcb->save.kernel_gs_base = data; break; case MSR_SYSCALL_MASK: svm->vmcb->save.sfmask = data; break; #endif case MSR_IA32_SYSENTER_CS: svm->vmcb->save.sysenter_cs = data; break; case MSR_IA32_SYSENTER_EIP: svm->sysenter_eip = data; svm->vmcb->save.sysenter_eip = data; break; case MSR_IA32_SYSENTER_ESP: svm->sysenter_esp = data; svm->vmcb->save.sysenter_esp = data; break; case MSR_TSC_AUX: if (!boot_cpu_has(X86_FEATURE_RDTSCP)) return 1; /* * This is rare, so we update the MSR here instead of using * direct_access_msrs. Doing that would require a rdmsr in * svm_vcpu_put. */ svm->tsc_aux = data; wrmsrl(MSR_TSC_AUX, svm->tsc_aux); break; case MSR_IA32_DEBUGCTLMSR: if (!boot_cpu_has(X86_FEATURE_LBRV)) { vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n", __func__, data); break; } if (data & DEBUGCTL_RESERVED_BITS) return 1; svm->vmcb->save.dbgctl = data; mark_dirty(svm->vmcb, VMCB_LBR); if (data & (1ULL<<0)) svm_enable_lbrv(svm); else svm_disable_lbrv(svm); break; case MSR_VM_HSAVE_PA: svm->nested.hsave_msr = data; break; case MSR_VM_CR: return svm_set_vm_cr(vcpu, data); case MSR_VM_IGNNE: vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data); break; case MSR_IA32_APICBASE: if (kvm_vcpu_apicv_active(vcpu)) avic_update_vapic_bar(to_svm(vcpu), data); /* Follow through */ default: return kvm_set_msr_common(vcpu, msr); } return 0; } static int wrmsr_interception(struct vcpu_svm *svm) { struct msr_data msr; u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX); u64 data = kvm_read_edx_eax(&svm->vcpu); msr.data = data; msr.index = ecx; msr.host_initiated = false; svm->next_rip = kvm_rip_read(&svm->vcpu) + 2; if (kvm_set_msr(&svm->vcpu, &msr)) { trace_kvm_msr_write_ex(ecx, data); kvm_inject_gp(&svm->vcpu, 0); } else { trace_kvm_msr_write(ecx, data); skip_emulated_instruction(&svm->vcpu); } return 1; } static int msr_interception(struct vcpu_svm *svm) { if (svm->vmcb->control.exit_info_1) return wrmsr_interception(svm); else return rdmsr_interception(svm); } static int interrupt_window_interception(struct vcpu_svm *svm) { kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); svm_clear_vintr(svm); svm->vmcb->control.int_ctl &= ~V_IRQ_MASK; mark_dirty(svm->vmcb, VMCB_INTR); ++svm->vcpu.stat.irq_window_exits; return 1; } static int pause_interception(struct vcpu_svm *svm) { kvm_vcpu_on_spin(&(svm->vcpu)); return 1; } static int nop_interception(struct vcpu_svm *svm) { skip_emulated_instruction(&(svm->vcpu)); return 1; } static int monitor_interception(struct vcpu_svm *svm) { printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n"); return nop_interception(svm); } static int mwait_interception(struct vcpu_svm *svm) { printk_once(KERN_WARNING "kvm: MWAIT instruction emulated as NOP!\n"); return nop_interception(svm); } enum avic_ipi_failure_cause { AVIC_IPI_FAILURE_INVALID_INT_TYPE, AVIC_IPI_FAILURE_TARGET_NOT_RUNNING, AVIC_IPI_FAILURE_INVALID_TARGET, AVIC_IPI_FAILURE_INVALID_BACKING_PAGE, }; static int avic_incomplete_ipi_interception(struct vcpu_svm *svm) { u32 icrh = svm->vmcb->control.exit_info_1 >> 32; u32 icrl = svm->vmcb->control.exit_info_1; u32 id = svm->vmcb->control.exit_info_2 >> 32; u32 index = svm->vmcb->control.exit_info_2 & 0xFF; struct kvm_lapic *apic = svm->vcpu.arch.apic; trace_kvm_avic_incomplete_ipi(svm->vcpu.vcpu_id, icrh, icrl, id, index); switch (id) { case AVIC_IPI_FAILURE_INVALID_INT_TYPE: /* * AVIC hardware handles the generation of * IPIs when the specified Message Type is Fixed * (also known as fixed delivery mode) and * the Trigger Mode is edge-triggered. The hardware * also supports self and broadcast delivery modes * specified via the Destination Shorthand(DSH) * field of the ICRL. Logical and physical APIC ID * formats are supported. All other IPI types cause * a #VMEXIT, which needs to emulated. */ kvm_lapic_reg_write(apic, APIC_ICR2, icrh); kvm_lapic_reg_write(apic, APIC_ICR, icrl); break; case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING: { int i; struct kvm_vcpu *vcpu; struct kvm *kvm = svm->vcpu.kvm; struct kvm_lapic *apic = svm->vcpu.arch.apic; /* * At this point, we expect that the AVIC HW has already * set the appropriate IRR bits on the valid target * vcpus. So, we just need to kick the appropriate vcpu. */ kvm_for_each_vcpu(i, vcpu, kvm) { bool m = kvm_apic_match_dest(vcpu, apic, icrl & KVM_APIC_SHORT_MASK, GET_APIC_DEST_FIELD(icrh), icrl & KVM_APIC_DEST_MASK); if (m && !avic_vcpu_is_running(vcpu)) kvm_vcpu_wake_up(vcpu); } break; } case AVIC_IPI_FAILURE_INVALID_TARGET: break; case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE: WARN_ONCE(1, "Invalid backing page\n"); break; default: pr_err("Unknown IPI interception\n"); } return 1; } static u32 *avic_get_logical_id_entry(struct kvm_vcpu *vcpu, u32 ldr, bool flat) { struct kvm_arch *vm_data = &vcpu->kvm->arch; int index; u32 *logical_apic_id_table; int dlid = GET_APIC_LOGICAL_ID(ldr); if (!dlid) return NULL; if (flat) { /* flat */ index = ffs(dlid) - 1; if (index > 7) return NULL; } else { /* cluster */ int cluster = (dlid & 0xf0) >> 4; int apic = ffs(dlid & 0x0f) - 1; if ((apic < 0) || (apic > 7) || (cluster >= 0xf)) return NULL; index = (cluster << 2) + apic; } logical_apic_id_table = (u32 *) page_address(vm_data->avic_logical_id_table_page); return &logical_apic_id_table[index]; } static int avic_ldr_write(struct kvm_vcpu *vcpu, u8 g_physical_id, u32 ldr, bool valid) { bool flat; u32 *entry, new_entry; flat = kvm_lapic_get_reg(vcpu->arch.apic, APIC_DFR) == APIC_DFR_FLAT; entry = avic_get_logical_id_entry(vcpu, ldr, flat); if (!entry) return -EINVAL; new_entry = READ_ONCE(*entry); new_entry &= ~AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK; new_entry |= (g_physical_id & AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK); if (valid) new_entry |= AVIC_LOGICAL_ID_ENTRY_VALID_MASK; else new_entry &= ~AVIC_LOGICAL_ID_ENTRY_VALID_MASK; WRITE_ONCE(*entry, new_entry); return 0; } static int avic_handle_ldr_update(struct kvm_vcpu *vcpu) { int ret; struct vcpu_svm *svm = to_svm(vcpu); u32 ldr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LDR); if (!ldr) return 1; ret = avic_ldr_write(vcpu, vcpu->vcpu_id, ldr, true); if (ret && svm->ldr_reg) { avic_ldr_write(vcpu, 0, svm->ldr_reg, false); svm->ldr_reg = 0; } else { svm->ldr_reg = ldr; } return ret; } static int avic_handle_apic_id_update(struct kvm_vcpu *vcpu) { u64 *old, *new; struct vcpu_svm *svm = to_svm(vcpu); u32 apic_id_reg = kvm_lapic_get_reg(vcpu->arch.apic, APIC_ID); u32 id = (apic_id_reg >> 24) & 0xff; if (vcpu->vcpu_id == id) return 0; old = avic_get_physical_id_entry(vcpu, vcpu->vcpu_id); new = avic_get_physical_id_entry(vcpu, id); if (!new || !old) return 1; /* We need to move physical_id_entry to new offset */ *new = *old; *old = 0ULL; to_svm(vcpu)->avic_physical_id_cache = new; /* * Also update the guest physical APIC ID in the logical * APIC ID table entry if already setup the LDR. */ if (svm->ldr_reg) avic_handle_ldr_update(vcpu); return 0; } static int avic_handle_dfr_update(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); struct kvm_arch *vm_data = &vcpu->kvm->arch; u32 dfr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_DFR); u32 mod = (dfr >> 28) & 0xf; /* * We assume that all local APICs are using the same type. * If this changes, we need to flush the AVIC logical * APID id table. */ if (vm_data->ldr_mode == mod) return 0; clear_page(page_address(vm_data->avic_logical_id_table_page)); vm_data->ldr_mode = mod; if (svm->ldr_reg) avic_handle_ldr_update(vcpu); return 0; } static int avic_unaccel_trap_write(struct vcpu_svm *svm) { struct kvm_lapic *apic = svm->vcpu.arch.apic; u32 offset = svm->vmcb->control.exit_info_1 & AVIC_UNACCEL_ACCESS_OFFSET_MASK; switch (offset) { case APIC_ID: if (avic_handle_apic_id_update(&svm->vcpu)) return 0; break; case APIC_LDR: if (avic_handle_ldr_update(&svm->vcpu)) return 0; break; case APIC_DFR: avic_handle_dfr_update(&svm->vcpu); break; default: break; } kvm_lapic_reg_write(apic, offset, kvm_lapic_get_reg(apic, offset)); return 1; } static bool is_avic_unaccelerated_access_trap(u32 offset) { bool ret = false; switch (offset) { case APIC_ID: case APIC_EOI: case APIC_RRR: case APIC_LDR: case APIC_DFR: case APIC_SPIV: case APIC_ESR: case APIC_ICR: case APIC_LVTT: case APIC_LVTTHMR: case APIC_LVTPC: case APIC_LVT0: case APIC_LVT1: case APIC_LVTERR: case APIC_TMICT: case APIC_TDCR: ret = true; break; default: break; } return ret; } static int avic_unaccelerated_access_interception(struct vcpu_svm *svm) { int ret = 0; u32 offset = svm->vmcb->control.exit_info_1 & AVIC_UNACCEL_ACCESS_OFFSET_MASK; u32 vector = svm->vmcb->control.exit_info_2 & AVIC_UNACCEL_ACCESS_VECTOR_MASK; bool write = (svm->vmcb->control.exit_info_1 >> 32) & AVIC_UNACCEL_ACCESS_WRITE_MASK; bool trap = is_avic_unaccelerated_access_trap(offset); trace_kvm_avic_unaccelerated_access(svm->vcpu.vcpu_id, offset, trap, write, vector); if (trap) { /* Handling Trap */ WARN_ONCE(!write, "svm: Handling trap read.\n"); ret = avic_unaccel_trap_write(svm); } else { /* Handling Fault */ ret = (emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE); } return ret; } static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = { [SVM_EXIT_READ_CR0] = cr_interception, [SVM_EXIT_READ_CR3] = cr_interception, [SVM_EXIT_READ_CR4] = cr_interception, [SVM_EXIT_READ_CR8] = cr_interception, [SVM_EXIT_CR0_SEL_WRITE] = cr_interception, [SVM_EXIT_WRITE_CR0] = cr_interception, [SVM_EXIT_WRITE_CR3] = cr_interception, [SVM_EXIT_WRITE_CR4] = cr_interception, [SVM_EXIT_WRITE_CR8] = cr8_write_interception, [SVM_EXIT_READ_DR0] = dr_interception, [SVM_EXIT_READ_DR1] = dr_interception, [SVM_EXIT_READ_DR2] = dr_interception, [SVM_EXIT_READ_DR3] = dr_interception, [SVM_EXIT_READ_DR4] = dr_interception, [SVM_EXIT_READ_DR5] = dr_interception, [SVM_EXIT_READ_DR6] = dr_interception, [SVM_EXIT_READ_DR7] = dr_interception, [SVM_EXIT_WRITE_DR0] = dr_interception, [SVM_EXIT_WRITE_DR1] = dr_interception, [SVM_EXIT_WRITE_DR2] = dr_interception, [SVM_EXIT_WRITE_DR3] = dr_interception, [SVM_EXIT_WRITE_DR4] = dr_interception, [SVM_EXIT_WRITE_DR5] = dr_interception, [SVM_EXIT_WRITE_DR6] = dr_interception, [SVM_EXIT_WRITE_DR7] = dr_interception, [SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception, [SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception, [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception, [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception, [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception, [SVM_EXIT_EXCP_BASE + AC_VECTOR] = ac_interception, [SVM_EXIT_INTR] = intr_interception, [SVM_EXIT_NMI] = nmi_interception, [SVM_EXIT_SMI] = nop_on_interception, [SVM_EXIT_INIT] = nop_on_interception, [SVM_EXIT_VINTR] = interrupt_window_interception, [SVM_EXIT_RDPMC] = rdpmc_interception, [SVM_EXIT_CPUID] = cpuid_interception, [SVM_EXIT_IRET] = iret_interception, [SVM_EXIT_INVD] = emulate_on_interception, [SVM_EXIT_PAUSE] = pause_interception, [SVM_EXIT_HLT] = halt_interception, [SVM_EXIT_INVLPG] = invlpg_interception, [SVM_EXIT_INVLPGA] = invlpga_interception, [SVM_EXIT_IOIO] = io_interception, [SVM_EXIT_MSR] = msr_interception, [SVM_EXIT_TASK_SWITCH] = task_switch_interception, [SVM_EXIT_SHUTDOWN] = shutdown_interception, [SVM_EXIT_VMRUN] = vmrun_interception, [SVM_EXIT_VMMCALL] = vmmcall_interception, [SVM_EXIT_VMLOAD] = vmload_interception, [SVM_EXIT_VMSAVE] = vmsave_interception, [SVM_EXIT_STGI] = stgi_interception, [SVM_EXIT_CLGI] = clgi_interception, [SVM_EXIT_SKINIT] = skinit_interception, [SVM_EXIT_WBINVD] = wbinvd_interception, [SVM_EXIT_MONITOR] = monitor_interception, [SVM_EXIT_MWAIT] = mwait_interception, [SVM_EXIT_XSETBV] = xsetbv_interception, [SVM_EXIT_NPF] = pf_interception, [SVM_EXIT_RSM] = emulate_on_interception, [SVM_EXIT_AVIC_INCOMPLETE_IPI] = avic_incomplete_ipi_interception, [SVM_EXIT_AVIC_UNACCELERATED_ACCESS] = avic_unaccelerated_access_interception, }; static void dump_vmcb(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); struct vmcb_control_area *control = &svm->vmcb->control; struct vmcb_save_area *save = &svm->vmcb->save; pr_err("VMCB Control Area:\n"); pr_err("%-20s%04x\n", "cr_read:", control->intercept_cr & 0xffff); pr_err("%-20s%04x\n", "cr_write:", control->intercept_cr >> 16); pr_err("%-20s%04x\n", "dr_read:", control->intercept_dr & 0xffff); pr_err("%-20s%04x\n", "dr_write:", control->intercept_dr >> 16); pr_err("%-20s%08x\n", "exceptions:", control->intercept_exceptions); pr_err("%-20s%016llx\n", "intercepts:", control->intercept); pr_err("%-20s%d\n", "pause filter count:", control->pause_filter_count); pr_err("%-20s%016llx\n", "iopm_base_pa:", control->iopm_base_pa); pr_err("%-20s%016llx\n", "msrpm_base_pa:", control->msrpm_base_pa); pr_err("%-20s%016llx\n", "tsc_offset:", control->tsc_offset); pr_err("%-20s%d\n", "asid:", control->asid); pr_err("%-20s%d\n", "tlb_ctl:", control->tlb_ctl); pr_err("%-20s%08x\n", "int_ctl:", control->int_ctl); pr_err("%-20s%08x\n", "int_vector:", control->int_vector); pr_err("%-20s%08x\n", "int_state:", control->int_state); pr_err("%-20s%08x\n", "exit_code:", control->exit_code); pr_err("%-20s%016llx\n", "exit_info1:", control->exit_info_1); pr_err("%-20s%016llx\n", "exit_info2:", control->exit_info_2); pr_err("%-20s%08x\n", "exit_int_info:", control->exit_int_info); pr_err("%-20s%08x\n", "exit_int_info_err:", control->exit_int_info_err); pr_err("%-20s%lld\n", "nested_ctl:", control->nested_ctl); pr_err("%-20s%016llx\n", "nested_cr3:", control->nested_cr3); pr_err("%-20s%016llx\n", "avic_vapic_bar:", control->avic_vapic_bar); pr_err("%-20s%08x\n", "event_inj:", control->event_inj); pr_err("%-20s%08x\n", "event_inj_err:", control->event_inj_err); pr_err("%-20s%lld\n", "lbr_ctl:", control->lbr_ctl); pr_err("%-20s%016llx\n", "next_rip:", control->next_rip); pr_err("%-20s%016llx\n", "avic_backing_page:", control->avic_backing_page); pr_err("%-20s%016llx\n", "avic_logical_id:", control->avic_logical_id); pr_err("%-20s%016llx\n", "avic_physical_id:", control->avic_physical_id); pr_err("VMCB State Save Area:\n"); pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", "es:", save->es.selector, save->es.attrib, save->es.limit, save->es.base); pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", "cs:", save->cs.selector, save->cs.attrib, save->cs.limit, save->cs.base); pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", "ss:", save->ss.selector, save->ss.attrib, save->ss.limit, save->ss.base); pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", "ds:", save->ds.selector, save->ds.attrib, save->ds.limit, save->ds.base); pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", "fs:", save->fs.selector, save->fs.attrib, save->fs.limit, save->fs.base); pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", "gs:", save->gs.selector, save->gs.attrib, save->gs.limit, save->gs.base); pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", "gdtr:", save->gdtr.selector, save->gdtr.attrib, save->gdtr.limit, save->gdtr.base); pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", "ldtr:", save->ldtr.selector, save->ldtr.attrib, save->ldtr.limit, save->ldtr.base); pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", "idtr:", save->idtr.selector, save->idtr.attrib, save->idtr.limit, save->idtr.base); pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", "tr:", save->tr.selector, save->tr.attrib, save->tr.limit, save->tr.base); pr_err("cpl: %d efer: %016llx\n", save->cpl, save->efer); pr_err("%-15s %016llx %-13s %016llx\n", "cr0:", save->cr0, "cr2:", save->cr2); pr_err("%-15s %016llx %-13s %016llx\n", "cr3:", save->cr3, "cr4:", save->cr4); pr_err("%-15s %016llx %-13s %016llx\n", "dr6:", save->dr6, "dr7:", save->dr7); pr_err("%-15s %016llx %-13s %016llx\n", "rip:", save->rip, "rflags:", save->rflags); pr_err("%-15s %016llx %-13s %016llx\n", "rsp:", save->rsp, "rax:", save->rax); pr_err("%-15s %016llx %-13s %016llx\n", "star:", save->star, "lstar:", save->lstar); pr_err("%-15s %016llx %-13s %016llx\n", "cstar:", save->cstar, "sfmask:", save->sfmask); pr_err("%-15s %016llx %-13s %016llx\n", "kernel_gs_base:", save->kernel_gs_base, "sysenter_cs:", save->sysenter_cs); pr_err("%-15s %016llx %-13s %016llx\n", "sysenter_esp:", save->sysenter_esp, "sysenter_eip:", save->sysenter_eip); pr_err("%-15s %016llx %-13s %016llx\n", "gpat:", save->g_pat, "dbgctl:", save->dbgctl); pr_err("%-15s %016llx %-13s %016llx\n", "br_from:", save->br_from, "br_to:", save->br_to); pr_err("%-15s %016llx %-13s %016llx\n", "excp_from:", save->last_excp_from, "excp_to:", save->last_excp_to); } static void svm_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2) { struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control; *info1 = control->exit_info_1; *info2 = control->exit_info_2; } static int handle_exit(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); struct kvm_run *kvm_run = vcpu->run; u32 exit_code = svm->vmcb->control.exit_code; trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM); vcpu->arch.gpa_available = (exit_code == SVM_EXIT_NPF); if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE)) vcpu->arch.cr0 = svm->vmcb->save.cr0; if (npt_enabled) vcpu->arch.cr3 = svm->vmcb->save.cr3; if (unlikely(svm->nested.exit_required)) { nested_svm_vmexit(svm); svm->nested.exit_required = false; return 1; } if (is_guest_mode(vcpu)) { int vmexit; trace_kvm_nested_vmexit(svm->vmcb->save.rip, exit_code, svm->vmcb->control.exit_info_1, svm->vmcb->control.exit_info_2, svm->vmcb->control.exit_int_info, svm->vmcb->control.exit_int_info_err, KVM_ISA_SVM); vmexit = nested_svm_exit_special(svm); if (vmexit == NESTED_EXIT_CONTINUE) vmexit = nested_svm_exit_handled(svm); if (vmexit == NESTED_EXIT_DONE) return 1; } svm_complete_interrupts(svm); if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) { kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; kvm_run->fail_entry.hardware_entry_failure_reason = svm->vmcb->control.exit_code; pr_err("KVM: FAILED VMRUN WITH VMCB:\n"); dump_vmcb(vcpu); return 0; } if (is_external_interrupt(svm->vmcb->control.exit_int_info) && exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR && exit_code != SVM_EXIT_NPF && exit_code != SVM_EXIT_TASK_SWITCH && exit_code != SVM_EXIT_INTR && exit_code != SVM_EXIT_NMI) printk(KERN_ERR "%s: unexpected exit_int_info 0x%x " "exit_code 0x%x\n", __func__, svm->vmcb->control.exit_int_info, exit_code); if (exit_code >= ARRAY_SIZE(svm_exit_handlers) || !svm_exit_handlers[exit_code]) { WARN_ONCE(1, "svm: unexpected exit reason 0x%x\n", exit_code); kvm_queue_exception(vcpu, UD_VECTOR); return 1; } return svm_exit_handlers[exit_code](svm); } static void reload_tss(struct kvm_vcpu *vcpu) { int cpu = raw_smp_processor_id(); struct svm_cpu_data *sd = per_cpu(svm_data, cpu); sd->tss_desc->type = 9; /* available 32/64-bit TSS */ load_TR_desc(); } static void pre_svm_run(struct vcpu_svm *svm) { int cpu = raw_smp_processor_id(); struct svm_cpu_data *sd = per_cpu(svm_data, cpu); /* FIXME: handle wraparound of asid_generation */ if (svm->asid_generation != sd->asid_generation) new_asid(svm, sd); } static void svm_inject_nmi(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI; vcpu->arch.hflags |= HF_NMI_MASK; set_intercept(svm, INTERCEPT_IRET); ++vcpu->stat.nmi_injections; } static inline void svm_inject_irq(struct vcpu_svm *svm, int irq) { struct vmcb_control_area *control; /* The following fields are ignored when AVIC is enabled */ control = &svm->vmcb->control; control->int_vector = irq; control->int_ctl &= ~V_INTR_PRIO_MASK; control->int_ctl |= V_IRQ_MASK | ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT); mark_dirty(svm->vmcb, VMCB_INTR); } static void svm_set_irq(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); BUG_ON(!(gif_set(svm))); trace_kvm_inj_virq(vcpu->arch.interrupt.nr); ++vcpu->stat.irq_injections; svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR; } static inline bool svm_nested_virtualize_tpr(struct kvm_vcpu *vcpu) { return is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK); } static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) { struct vcpu_svm *svm = to_svm(vcpu); if (svm_nested_virtualize_tpr(vcpu) || kvm_vcpu_apicv_active(vcpu)) return; clr_cr_intercept(svm, INTERCEPT_CR8_WRITE); if (irr == -1) return; if (tpr >= irr) set_cr_intercept(svm, INTERCEPT_CR8_WRITE); } static void svm_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set) { return; } static bool svm_get_enable_apicv(void) { return avic; } static void svm_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr) { } static void svm_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr) { } /* Note: Currently only used by Hyper-V. */ static void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); struct vmcb *vmcb = svm->vmcb; if (!avic) return; vmcb->control.int_ctl &= ~AVIC_ENABLE_MASK; mark_dirty(vmcb, VMCB_INTR); } static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap) { return; } static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec) { kvm_lapic_set_irr(vec, vcpu->arch.apic); smp_mb__after_atomic(); if (avic_vcpu_is_running(vcpu)) wrmsrl(SVM_AVIC_DOORBELL, kvm_cpu_get_apicid(vcpu->cpu)); else kvm_vcpu_wake_up(vcpu); } static void svm_ir_list_del(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi) { unsigned long flags; struct amd_svm_iommu_ir *cur; spin_lock_irqsave(&svm->ir_list_lock, flags); list_for_each_entry(cur, &svm->ir_list, node) { if (cur->data != pi->ir_data) continue; list_del(&cur->node); kfree(cur); break; } spin_unlock_irqrestore(&svm->ir_list_lock, flags); } static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi) { int ret = 0; unsigned long flags; struct amd_svm_iommu_ir *ir; /** * In some cases, the existing irte is updaed and re-set, * so we need to check here if it's already been * added * to the ir_list. */ if (pi->ir_data && (pi->prev_ga_tag != 0)) { struct kvm *kvm = svm->vcpu.kvm; u32 vcpu_id = AVIC_GATAG_TO_VCPUID(pi->prev_ga_tag); struct kvm_vcpu *prev_vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id); struct vcpu_svm *prev_svm; if (!prev_vcpu) { ret = -EINVAL; goto out; } prev_svm = to_svm(prev_vcpu); svm_ir_list_del(prev_svm, pi); } /** * Allocating new amd_iommu_pi_data, which will get * add to the per-vcpu ir_list. */ ir = kzalloc(sizeof(struct amd_svm_iommu_ir), GFP_KERNEL); if (!ir) { ret = -ENOMEM; goto out; } ir->data = pi->ir_data; spin_lock_irqsave(&svm->ir_list_lock, flags); list_add(&ir->node, &svm->ir_list); spin_unlock_irqrestore(&svm->ir_list_lock, flags); out: return ret; } /** * Note: * The HW cannot support posting multicast/broadcast * interrupts to a vCPU. So, we still use legacy interrupt * remapping for these kind of interrupts. * * For lowest-priority interrupts, we only support * those with single CPU as the destination, e.g. user * configures the interrupts via /proc/irq or uses * irqbalance to make the interrupts single-CPU. */ static int get_pi_vcpu_info(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e, struct vcpu_data *vcpu_info, struct vcpu_svm **svm) { struct kvm_lapic_irq irq; struct kvm_vcpu *vcpu = NULL; kvm_set_msi_irq(kvm, e, &irq); if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu)) { pr_debug("SVM: %s: use legacy intr remap mode for irq %u\n", __func__, irq.vector); return -1; } pr_debug("SVM: %s: use GA mode for irq %u\n", __func__, irq.vector); *svm = to_svm(vcpu); vcpu_info->pi_desc_addr = page_to_phys((*svm)->avic_backing_page); vcpu_info->vector = irq.vector; return 0; } /* * svm_update_pi_irte - set IRTE for Posted-Interrupts * * @kvm: kvm * @host_irq: host irq of the interrupt * @guest_irq: gsi of the interrupt * @set: set or unset PI * returns 0 on success, < 0 on failure */ static int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq, uint32_t guest_irq, bool set) { struct kvm_kernel_irq_routing_entry *e; struct kvm_irq_routing_table *irq_rt; int idx, ret = -EINVAL; if (!kvm_arch_has_assigned_device(kvm) || !irq_remapping_cap(IRQ_POSTING_CAP)) return 0; pr_debug("SVM: %s: host_irq=%#x, guest_irq=%#x, set=%#x\n", __func__, host_irq, guest_irq, set); idx = srcu_read_lock(&kvm->irq_srcu); irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu); WARN_ON(guest_irq >= irq_rt->nr_rt_entries); hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) { struct vcpu_data vcpu_info; struct vcpu_svm *svm = NULL; if (e->type != KVM_IRQ_ROUTING_MSI) continue; /** * Here, we setup with legacy mode in the following cases: * 1. When cannot target interrupt to a specific vcpu. * 2. Unsetting posted interrupt. * 3. APIC virtialization is disabled for the vcpu. */ if (!get_pi_vcpu_info(kvm, e, &vcpu_info, &svm) && set && kvm_vcpu_apicv_active(&svm->vcpu)) { struct amd_iommu_pi_data pi; /* Try to enable guest_mode in IRTE */ pi.base = page_to_phys(svm->avic_backing_page) & AVIC_HPA_MASK; pi.ga_tag = AVIC_GATAG(kvm->arch.avic_vm_id, svm->vcpu.vcpu_id); pi.is_guest_mode = true; pi.vcpu_data = &vcpu_info; ret = irq_set_vcpu_affinity(host_irq, &pi); /** * Here, we successfully setting up vcpu affinity in * IOMMU guest mode. Now, we need to store the posted * interrupt information in a per-vcpu ir_list so that * we can reference to them directly when we update vcpu * scheduling information in IOMMU irte. */ if (!ret && pi.is_guest_mode) svm_ir_list_add(svm, &pi); } else { /* Use legacy mode in IRTE */ struct amd_iommu_pi_data pi; /** * Here, pi is used to: * - Tell IOMMU to use legacy mode for this interrupt. * - Retrieve ga_tag of prior interrupt remapping data. */ pi.is_guest_mode = false; ret = irq_set_vcpu_affinity(host_irq, &pi); /** * Check if the posted interrupt was previously * setup with the guest_mode by checking if the ga_tag * was cached. If so, we need to clean up the per-vcpu * ir_list. */ if (!ret && pi.prev_ga_tag) { int id = AVIC_GATAG_TO_VCPUID(pi.prev_ga_tag); struct kvm_vcpu *vcpu; vcpu = kvm_get_vcpu_by_id(kvm, id); if (vcpu) svm_ir_list_del(to_svm(vcpu), &pi); } } if (!ret && svm) { trace_kvm_pi_irte_update(svm->vcpu.vcpu_id, host_irq, e->gsi, vcpu_info.vector, vcpu_info.pi_desc_addr, set); } if (ret < 0) { pr_err("%s: failed to update PI IRTE\n", __func__); goto out; } } ret = 0; out: srcu_read_unlock(&kvm->irq_srcu, idx); return ret; } static int svm_nmi_allowed(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); struct vmcb *vmcb = svm->vmcb; int ret; ret = !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) && !(svm->vcpu.arch.hflags & HF_NMI_MASK); ret = ret && gif_set(svm) && nested_svm_nmi(svm); return ret; } static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); return !!(svm->vcpu.arch.hflags & HF_NMI_MASK); } static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) { struct vcpu_svm *svm = to_svm(vcpu); if (masked) { svm->vcpu.arch.hflags |= HF_NMI_MASK; set_intercept(svm, INTERCEPT_IRET); } else { svm->vcpu.arch.hflags &= ~HF_NMI_MASK; clr_intercept(svm, INTERCEPT_IRET); } } static int svm_interrupt_allowed(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); struct vmcb *vmcb = svm->vmcb; int ret; if (!gif_set(svm) || (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)) return 0; ret = !!(kvm_get_rflags(vcpu) & X86_EFLAGS_IF); if (is_guest_mode(vcpu)) return ret && !(svm->vcpu.arch.hflags & HF_VINTR_MASK); return ret; } static void enable_irq_window(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); if (kvm_vcpu_apicv_active(vcpu)) return; /* * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes * 1, because that's a separate STGI/VMRUN intercept. The next time we * get that intercept, this function will be called again though and * we'll get the vintr intercept. */ if (gif_set(svm) && nested_svm_intr(svm)) { svm_set_vintr(svm); svm_inject_irq(svm, 0x0); } } static void enable_nmi_window(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK)) == HF_NMI_MASK) return; /* IRET will cause a vm exit */ /* * Something prevents NMI from been injected. Single step over possible * problem (IRET or exception injection or interrupt shadow) */ svm->nmi_singlestep = true; svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF); } static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr) { return 0; } static void svm_flush_tlb(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); if (static_cpu_has(X86_FEATURE_FLUSHBYASID)) svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID; else svm->asid_generation--; } static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu) { } static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); if (svm_nested_virtualize_tpr(vcpu)) return; if (!is_cr_intercept(svm, INTERCEPT_CR8_WRITE)) { int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK; kvm_set_cr8(vcpu, cr8); } } static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); u64 cr8; if (svm_nested_virtualize_tpr(vcpu) || kvm_vcpu_apicv_active(vcpu)) return; cr8 = kvm_get_cr8(vcpu); svm->vmcb->control.int_ctl &= ~V_TPR_MASK; svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK; } static void svm_complete_interrupts(struct vcpu_svm *svm) { u8 vector; int type; u32 exitintinfo = svm->vmcb->control.exit_int_info; unsigned int3_injected = svm->int3_injected; svm->int3_injected = 0; /* * If we've made progress since setting HF_IRET_MASK, we've * executed an IRET and can allow NMI injection. */ if ((svm->vcpu.arch.hflags & HF_IRET_MASK) && kvm_rip_read(&svm->vcpu) != svm->nmi_iret_rip) { svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK); kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); } svm->vcpu.arch.nmi_injected = false; kvm_clear_exception_queue(&svm->vcpu); kvm_clear_interrupt_queue(&svm->vcpu); if (!(exitintinfo & SVM_EXITINTINFO_VALID)) return; kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK; type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK; switch (type) { case SVM_EXITINTINFO_TYPE_NMI: svm->vcpu.arch.nmi_injected = true; break; case SVM_EXITINTINFO_TYPE_EXEPT: /* * In case of software exceptions, do not reinject the vector, * but re-execute the instruction instead. Rewind RIP first * if we emulated INT3 before. */ if (kvm_exception_is_soft(vector)) { if (vector == BP_VECTOR && int3_injected && kvm_is_linear_rip(&svm->vcpu, svm->int3_rip)) kvm_rip_write(&svm->vcpu, kvm_rip_read(&svm->vcpu) - int3_injected); break; } if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) { u32 err = svm->vmcb->control.exit_int_info_err; kvm_requeue_exception_e(&svm->vcpu, vector, err); } else kvm_requeue_exception(&svm->vcpu, vector); break; case SVM_EXITINTINFO_TYPE_INTR: kvm_queue_interrupt(&svm->vcpu, vector, false); break; default: break; } } static void svm_cancel_injection(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); struct vmcb_control_area *control = &svm->vmcb->control; control->exit_int_info = control->event_inj; control->exit_int_info_err = control->event_inj_err; control->event_inj = 0; svm_complete_interrupts(svm); } static void svm_vcpu_run(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; /* * A vmexit emulation is required before the vcpu can be executed * again. */ if (unlikely(svm->nested.exit_required)) return; pre_svm_run(svm); sync_lapic_to_cr8(vcpu); svm->vmcb->save.cr2 = vcpu->arch.cr2; clgi(); local_irq_enable(); asm volatile ( "push %%" _ASM_BP "; \n\t" "mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t" "mov %c[rcx](%[svm]), %%" _ASM_CX " \n\t" "mov %c[rdx](%[svm]), %%" _ASM_DX " \n\t" "mov %c[rsi](%[svm]), %%" _ASM_SI " \n\t" "mov %c[rdi](%[svm]), %%" _ASM_DI " \n\t" "mov %c[rbp](%[svm]), %%" _ASM_BP " \n\t" #ifdef CONFIG_X86_64 "mov %c[r8](%[svm]), %%r8 \n\t" "mov %c[r9](%[svm]), %%r9 \n\t" "mov %c[r10](%[svm]), %%r10 \n\t" "mov %c[r11](%[svm]), %%r11 \n\t" "mov %c[r12](%[svm]), %%r12 \n\t" "mov %c[r13](%[svm]), %%r13 \n\t" "mov %c[r14](%[svm]), %%r14 \n\t" "mov %c[r15](%[svm]), %%r15 \n\t" #endif /* Enter guest mode */ "push %%" _ASM_AX " \n\t" "mov %c[vmcb](%[svm]), %%" _ASM_AX " \n\t" __ex(SVM_VMLOAD) "\n\t" __ex(SVM_VMRUN) "\n\t" __ex(SVM_VMSAVE) "\n\t" "pop %%" _ASM_AX " \n\t" /* Save guest registers, load host registers */ "mov %%" _ASM_BX ", %c[rbx](%[svm]) \n\t" "mov %%" _ASM_CX ", %c[rcx](%[svm]) \n\t" "mov %%" _ASM_DX ", %c[rdx](%[svm]) \n\t" "mov %%" _ASM_SI ", %c[rsi](%[svm]) \n\t" "mov %%" _ASM_DI ", %c[rdi](%[svm]) \n\t" "mov %%" _ASM_BP ", %c[rbp](%[svm]) \n\t" #ifdef CONFIG_X86_64 "mov %%r8, %c[r8](%[svm]) \n\t" "mov %%r9, %c[r9](%[svm]) \n\t" "mov %%r10, %c[r10](%[svm]) \n\t" "mov %%r11, %c[r11](%[svm]) \n\t" "mov %%r12, %c[r12](%[svm]) \n\t" "mov %%r13, %c[r13](%[svm]) \n\t" "mov %%r14, %c[r14](%[svm]) \n\t" "mov %%r15, %c[r15](%[svm]) \n\t" #endif "pop %%" _ASM_BP : : [svm]"a"(svm), [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)), [rbx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBX])), [rcx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RCX])), [rdx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDX])), [rsi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RSI])), [rdi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDI])), [rbp]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBP])) #ifdef CONFIG_X86_64 , [r8]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R8])), [r9]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R9])), [r10]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R10])), [r11]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R11])), [r12]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R12])), [r13]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R13])), [r14]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R14])), [r15]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R15])) #endif : "cc", "memory" #ifdef CONFIG_X86_64 , "rbx", "rcx", "rdx", "rsi", "rdi" , "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15" #else , "ebx", "ecx", "edx", "esi", "edi" #endif ); #ifdef CONFIG_X86_64 wrmsrl(MSR_GS_BASE, svm->host.gs_base); #else loadsegment(fs, svm->host.fs); #ifndef CONFIG_X86_32_LAZY_GS loadsegment(gs, svm->host.gs); #endif #endif reload_tss(vcpu); local_irq_disable(); vcpu->arch.cr2 = svm->vmcb->save.cr2; vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax; vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) kvm_before_handle_nmi(&svm->vcpu); stgi(); /* Any pending NMI will happen here */ if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) kvm_after_handle_nmi(&svm->vcpu); sync_cr8_to_lapic(vcpu); svm->next_rip = 0; svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; /* if exit due to PF check for async PF */ if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) svm->apf_reason = kvm_read_and_reset_pf_reason(); if (npt_enabled) { vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR); vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR); } /* * We need to handle MC intercepts here before the vcpu has a chance to * change the physical cpu */ if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + MC_VECTOR)) svm_handle_mce(svm); mark_all_clean(svm->vmcb); } static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root) { struct vcpu_svm *svm = to_svm(vcpu); svm->vmcb->save.cr3 = root; mark_dirty(svm->vmcb, VMCB_CR); svm_flush_tlb(vcpu); } static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root) { struct vcpu_svm *svm = to_svm(vcpu); svm->vmcb->control.nested_cr3 = root; mark_dirty(svm->vmcb, VMCB_NPT); /* Also sync guest cr3 here in case we live migrate */ svm->vmcb->save.cr3 = kvm_read_cr3(vcpu); mark_dirty(svm->vmcb, VMCB_CR); svm_flush_tlb(vcpu); } static int is_disabled(void) { u64 vm_cr; rdmsrl(MSR_VM_CR, vm_cr); if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE)) return 1; return 0; } static void svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall) { /* * Patch in the VMMCALL instruction: */ hypercall[0] = 0x0f; hypercall[1] = 0x01; hypercall[2] = 0xd9; } static void svm_check_processor_compat(void *rtn) { *(int *)rtn = 0; } static bool svm_cpu_has_accelerated_tpr(void) { return false; } static bool svm_has_high_real_mode_segbase(void) { return true; } static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) { return 0; } static void svm_cpuid_update(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); struct kvm_cpuid_entry2 *entry; /* Update nrips enabled cache */ svm->nrips_enabled = !!guest_cpuid_has_nrips(&svm->vcpu); if (!kvm_vcpu_apicv_active(vcpu)) return; entry = kvm_find_cpuid_entry(vcpu, 1, 0); if (entry) entry->ecx &= ~bit(X86_FEATURE_X2APIC); } static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry) { switch (func) { case 0x1: if (avic) entry->ecx &= ~bit(X86_FEATURE_X2APIC); break; case 0x80000001: if (nested) entry->ecx |= (1 << 2); /* Set SVM bit */ break; case 0x8000000A: entry->eax = 1; /* SVM revision 1 */ entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper ASID emulation to nested SVM */ entry->ecx = 0; /* Reserved */ entry->edx = 0; /* Per default do not support any additional features */ /* Support next_rip if host supports it */ if (boot_cpu_has(X86_FEATURE_NRIPS)) entry->edx |= SVM_FEATURE_NRIP; /* Support NPT for the guest if enabled */ if (npt_enabled) entry->edx |= SVM_FEATURE_NPT; break; } } static int svm_get_lpage_level(void) { return PT_PDPE_LEVEL; } static bool svm_rdtscp_supported(void) { return boot_cpu_has(X86_FEATURE_RDTSCP); } static bool svm_invpcid_supported(void) { return false; } static bool svm_mpx_supported(void) { return false; } static bool svm_xsaves_supported(void) { return false; } static bool svm_has_wbinvd_exit(void) { return true; } #define PRE_EX(exit) { .exit_code = (exit), \ .stage = X86_ICPT_PRE_EXCEPT, } #define POST_EX(exit) { .exit_code = (exit), \ .stage = X86_ICPT_POST_EXCEPT, } #define POST_MEM(exit) { .exit_code = (exit), \ .stage = X86_ICPT_POST_MEMACCESS, } static const struct __x86_intercept { u32 exit_code; enum x86_intercept_stage stage; } x86_intercept_map[] = { [x86_intercept_cr_read] = POST_EX(SVM_EXIT_READ_CR0), [x86_intercept_cr_write] = POST_EX(SVM_EXIT_WRITE_CR0), [x86_intercept_clts] = POST_EX(SVM_EXIT_WRITE_CR0), [x86_intercept_lmsw] = POST_EX(SVM_EXIT_WRITE_CR0), [x86_intercept_smsw] = POST_EX(SVM_EXIT_READ_CR0), [x86_intercept_dr_read] = POST_EX(SVM_EXIT_READ_DR0), [x86_intercept_dr_write] = POST_EX(SVM_EXIT_WRITE_DR0), [x86_intercept_sldt] = POST_EX(SVM_EXIT_LDTR_READ), [x86_intercept_str] = POST_EX(SVM_EXIT_TR_READ), [x86_intercept_lldt] = POST_EX(SVM_EXIT_LDTR_WRITE), [x86_intercept_ltr] = POST_EX(SVM_EXIT_TR_WRITE), [x86_intercept_sgdt] = POST_EX(SVM_EXIT_GDTR_READ), [x86_intercept_sidt] = POST_EX(SVM_EXIT_IDTR_READ), [x86_intercept_lgdt] = POST_EX(SVM_EXIT_GDTR_WRITE), [x86_intercept_lidt] = POST_EX(SVM_EXIT_IDTR_WRITE), [x86_intercept_vmrun] = POST_EX(SVM_EXIT_VMRUN), [x86_intercept_vmmcall] = POST_EX(SVM_EXIT_VMMCALL), [x86_intercept_vmload] = POST_EX(SVM_EXIT_VMLOAD), [x86_intercept_vmsave] = POST_EX(SVM_EXIT_VMSAVE), [x86_intercept_stgi] = POST_EX(SVM_EXIT_STGI), [x86_intercept_clgi] = POST_EX(SVM_EXIT_CLGI), [x86_intercept_skinit] = POST_EX(SVM_EXIT_SKINIT), [x86_intercept_invlpga] = POST_EX(SVM_EXIT_INVLPGA), [x86_intercept_rdtscp] = POST_EX(SVM_EXIT_RDTSCP), [x86_intercept_monitor] = POST_MEM(SVM_EXIT_MONITOR), [x86_intercept_mwait] = POST_EX(SVM_EXIT_MWAIT), [x86_intercept_invlpg] = POST_EX(SVM_EXIT_INVLPG), [x86_intercept_invd] = POST_EX(SVM_EXIT_INVD), [x86_intercept_wbinvd] = POST_EX(SVM_EXIT_WBINVD), [x86_intercept_wrmsr] = POST_EX(SVM_EXIT_MSR), [x86_intercept_rdtsc] = POST_EX(SVM_EXIT_RDTSC), [x86_intercept_rdmsr] = POST_EX(SVM_EXIT_MSR), [x86_intercept_rdpmc] = POST_EX(SVM_EXIT_RDPMC), [x86_intercept_cpuid] = PRE_EX(SVM_EXIT_CPUID), [x86_intercept_rsm] = PRE_EX(SVM_EXIT_RSM), [x86_intercept_pause] = PRE_EX(SVM_EXIT_PAUSE), [x86_intercept_pushf] = PRE_EX(SVM_EXIT_PUSHF), [x86_intercept_popf] = PRE_EX(SVM_EXIT_POPF), [x86_intercept_intn] = PRE_EX(SVM_EXIT_SWINT), [x86_intercept_iret] = PRE_EX(SVM_EXIT_IRET), [x86_intercept_icebp] = PRE_EX(SVM_EXIT_ICEBP), [x86_intercept_hlt] = POST_EX(SVM_EXIT_HLT), [x86_intercept_in] = POST_EX(SVM_EXIT_IOIO), [x86_intercept_ins] = POST_EX(SVM_EXIT_IOIO), [x86_intercept_out] = POST_EX(SVM_EXIT_IOIO), [x86_intercept_outs] = POST_EX(SVM_EXIT_IOIO), }; #undef PRE_EX #undef POST_EX #undef POST_MEM static int svm_check_intercept(struct kvm_vcpu *vcpu, struct x86_instruction_info *info, enum x86_intercept_stage stage) { struct vcpu_svm *svm = to_svm(vcpu); int vmexit, ret = X86EMUL_CONTINUE; struct __x86_intercept icpt_info; struct vmcb *vmcb = svm->vmcb; if (info->intercept >= ARRAY_SIZE(x86_intercept_map)) goto out; icpt_info = x86_intercept_map[info->intercept]; if (stage != icpt_info.stage) goto out; switch (icpt_info.exit_code) { case SVM_EXIT_READ_CR0: if (info->intercept == x86_intercept_cr_read) icpt_info.exit_code += info->modrm_reg; break; case SVM_EXIT_WRITE_CR0: { unsigned long cr0, val; u64 intercept; if (info->intercept == x86_intercept_cr_write) icpt_info.exit_code += info->modrm_reg; if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0 || info->intercept == x86_intercept_clts) break; intercept = svm->nested.intercept; if (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0))) break; cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK; val = info->src_val & ~SVM_CR0_SELECTIVE_MASK; if (info->intercept == x86_intercept_lmsw) { cr0 &= 0xfUL; val &= 0xfUL; /* lmsw can't clear PE - catch this here */ if (cr0 & X86_CR0_PE) val |= X86_CR0_PE; } if (cr0 ^ val) icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE; break; } case SVM_EXIT_READ_DR0: case SVM_EXIT_WRITE_DR0: icpt_info.exit_code += info->modrm_reg; break; case SVM_EXIT_MSR: if (info->intercept == x86_intercept_wrmsr) vmcb->control.exit_info_1 = 1; else vmcb->control.exit_info_1 = 0; break; case SVM_EXIT_PAUSE: /* * We get this for NOP only, but pause * is rep not, check this here */ if (info->rep_prefix != REPE_PREFIX) goto out; case SVM_EXIT_IOIO: { u64 exit_info; u32 bytes; if (info->intercept == x86_intercept_in || info->intercept == x86_intercept_ins) { exit_info = ((info->src_val & 0xffff) << 16) | SVM_IOIO_TYPE_MASK; bytes = info->dst_bytes; } else { exit_info = (info->dst_val & 0xffff) << 16; bytes = info->src_bytes; } if (info->intercept == x86_intercept_outs || info->intercept == x86_intercept_ins) exit_info |= SVM_IOIO_STR_MASK; if (info->rep_prefix) exit_info |= SVM_IOIO_REP_MASK; bytes = min(bytes, 4u); exit_info |= bytes << SVM_IOIO_SIZE_SHIFT; exit_info |= (u32)info->ad_bytes << (SVM_IOIO_ASIZE_SHIFT - 1); vmcb->control.exit_info_1 = exit_info; vmcb->control.exit_info_2 = info->next_rip; break; } default: break; } /* TODO: Advertise NRIPS to guest hypervisor unconditionally */ if (static_cpu_has(X86_FEATURE_NRIPS)) vmcb->control.next_rip = info->next_rip; vmcb->control.exit_code = icpt_info.exit_code; vmexit = nested_svm_exit_handled(svm); ret = (vmexit == NESTED_EXIT_DONE) ? X86EMUL_INTERCEPTED : X86EMUL_CONTINUE; out: return ret; } static void svm_handle_external_intr(struct kvm_vcpu *vcpu) { local_irq_enable(); /* * We must have an instruction with interrupts enabled, so * the timer interrupt isn't delayed by the interrupt shadow. */ asm("nop"); local_irq_disable(); } static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu) { } static inline void avic_post_state_restore(struct kvm_vcpu *vcpu) { if (avic_handle_apic_id_update(vcpu) != 0) return; if (avic_handle_dfr_update(vcpu) != 0) return; avic_handle_ldr_update(vcpu); } static void svm_setup_mce(struct kvm_vcpu *vcpu) { /* [63:9] are reserved. */ vcpu->arch.mcg_cap &= 0x1ff; } static struct kvm_x86_ops svm_x86_ops __ro_after_init = { .cpu_has_kvm_support = has_svm, .disabled_by_bios = is_disabled, .hardware_setup = svm_hardware_setup, .hardware_unsetup = svm_hardware_unsetup, .check_processor_compatibility = svm_check_processor_compat, .hardware_enable = svm_hardware_enable, .hardware_disable = svm_hardware_disable, .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr, .cpu_has_high_real_mode_segbase = svm_has_high_real_mode_segbase, .vcpu_create = svm_create_vcpu, .vcpu_free = svm_free_vcpu, .vcpu_reset = svm_vcpu_reset, .vm_init = avic_vm_init, .vm_destroy = avic_vm_destroy, .prepare_guest_switch = svm_prepare_guest_switch, .vcpu_load = svm_vcpu_load, .vcpu_put = svm_vcpu_put, .vcpu_blocking = svm_vcpu_blocking, .vcpu_unblocking = svm_vcpu_unblocking, .update_bp_intercept = update_bp_intercept, .get_msr = svm_get_msr, .set_msr = svm_set_msr, .get_segment_base = svm_get_segment_base, .get_segment = svm_get_segment, .set_segment = svm_set_segment, .get_cpl = svm_get_cpl, .get_cs_db_l_bits = kvm_get_cs_db_l_bits, .decache_cr0_guest_bits = svm_decache_cr0_guest_bits, .decache_cr3 = svm_decache_cr3, .decache_cr4_guest_bits = svm_decache_cr4_guest_bits, .set_cr0 = svm_set_cr0, .set_cr3 = svm_set_cr3, .set_cr4 = svm_set_cr4, .set_efer = svm_set_efer, .get_idt = svm_get_idt, .set_idt = svm_set_idt, .get_gdt = svm_get_gdt, .set_gdt = svm_set_gdt, .get_dr6 = svm_get_dr6, .set_dr6 = svm_set_dr6, .set_dr7 = svm_set_dr7, .sync_dirty_debug_regs = svm_sync_dirty_debug_regs, .cache_reg = svm_cache_reg, .get_rflags = svm_get_rflags, .set_rflags = svm_set_rflags, .get_pkru = svm_get_pkru, .tlb_flush = svm_flush_tlb, .run = svm_vcpu_run, .handle_exit = handle_exit, .skip_emulated_instruction = skip_emulated_instruction, .set_interrupt_shadow = svm_set_interrupt_shadow, .get_interrupt_shadow = svm_get_interrupt_shadow, .patch_hypercall = svm_patch_hypercall, .set_irq = svm_set_irq, .set_nmi = svm_inject_nmi, .queue_exception = svm_queue_exception, .cancel_injection = svm_cancel_injection, .interrupt_allowed = svm_interrupt_allowed, .nmi_allowed = svm_nmi_allowed, .get_nmi_mask = svm_get_nmi_mask, .set_nmi_mask = svm_set_nmi_mask, .enable_nmi_window = enable_nmi_window, .enable_irq_window = enable_irq_window, .update_cr8_intercept = update_cr8_intercept, .set_virtual_x2apic_mode = svm_set_virtual_x2apic_mode, .get_enable_apicv = svm_get_enable_apicv, .refresh_apicv_exec_ctrl = svm_refresh_apicv_exec_ctrl, .load_eoi_exitmap = svm_load_eoi_exitmap, .hwapic_irr_update = svm_hwapic_irr_update, .hwapic_isr_update = svm_hwapic_isr_update, .apicv_post_state_restore = avic_post_state_restore, .set_tss_addr = svm_set_tss_addr, .get_tdp_level = get_npt_level, .get_mt_mask = svm_get_mt_mask, .get_exit_info = svm_get_exit_info, .get_lpage_level = svm_get_lpage_level, .cpuid_update = svm_cpuid_update, .rdtscp_supported = svm_rdtscp_supported, .invpcid_supported = svm_invpcid_supported, .mpx_supported = svm_mpx_supported, .xsaves_supported = svm_xsaves_supported, .set_supported_cpuid = svm_set_supported_cpuid, .has_wbinvd_exit = svm_has_wbinvd_exit, .write_tsc_offset = svm_write_tsc_offset, .set_tdp_cr3 = set_tdp_cr3, .check_intercept = svm_check_intercept, .handle_external_intr = svm_handle_external_intr, .sched_in = svm_sched_in, .pmu_ops = &amd_pmu_ops, .deliver_posted_interrupt = svm_deliver_avic_intr, .update_pi_irte = svm_update_pi_irte, .setup_mce = svm_setup_mce, }; static int __init svm_init(void) { return kvm_init(&svm_x86_ops, sizeof(struct vcpu_svm), __alignof__(struct vcpu_svm), THIS_MODULE); } static void __exit svm_exit(void) { kvm_exit(); } module_init(svm_init) module_exit(svm_exit)
gpl-2.0
starlocke/pidgin-2.10.9-custom
libpurple/protocols/msn/group.c
13
2083
/** * @file group.c Group functions * * purple * * Purple is the legal property of its developers, whose names are too numerous * to list here. Please refer to the COPYRIGHT file distributed with this * source distribution. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111-1301 USA */ #include "msn.h" #include "group.h" MsnGroup * msn_group_new(MsnUserList *userlist, const char *id, const char *name) { MsnGroup *group; g_return_val_if_fail(id != NULL, NULL); g_return_val_if_fail(name != NULL, NULL); group = g_new0(MsnGroup, 1); msn_userlist_add_group(userlist, group); group->id = g_strdup(id); group->name = g_strdup(name); return group; } void msn_group_destroy(MsnGroup *group) { g_return_if_fail(group != NULL); g_free(group->id); g_free(group->name); g_free(group); } void msn_group_set_id(MsnGroup *group, const char *id) { g_return_if_fail(group != NULL); g_return_if_fail(id != NULL); g_free(group->id); group->id = g_strdup(id); } void msn_group_set_name(MsnGroup *group, const char *name) { g_return_if_fail(group != NULL); g_return_if_fail(name != NULL); g_free(group->name); group->name = g_strdup(name); } char* msn_group_get_id(const MsnGroup *group) { g_return_val_if_fail(group != NULL, NULL); return group->id; } const char * msn_group_get_name(const MsnGroup *group) { g_return_val_if_fail(group != NULL, NULL); return group->name; }
gpl-2.0
pietrushnic/rpi-dt-linux
drivers/infiniband/hw/mlx4/mad.c
269
61453
/* * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <rdma/ib_mad.h> #include <rdma/ib_smi.h> #include <rdma/ib_sa.h> #include <rdma/ib_cache.h> #include <linux/random.h> #include <linux/mlx4/cmd.h> #include <linux/gfp.h> #include <rdma/ib_pma.h> #include "mlx4_ib.h" enum { MLX4_IB_VENDOR_CLASS1 = 0x9, MLX4_IB_VENDOR_CLASS2 = 0xa }; #define MLX4_TUN_SEND_WRID_SHIFT 34 #define MLX4_TUN_QPN_SHIFT 32 #define MLX4_TUN_WRID_RECV (((u64) 1) << MLX4_TUN_SEND_WRID_SHIFT) #define MLX4_TUN_SET_WRID_QPN(a) (((u64) ((a) & 0x3)) << MLX4_TUN_QPN_SHIFT) #define MLX4_TUN_IS_RECV(a) (((a) >> MLX4_TUN_SEND_WRID_SHIFT) & 0x1) #define MLX4_TUN_WRID_QPN(a) (((a) >> MLX4_TUN_QPN_SHIFT) & 0x3) /* Port mgmt change event handling */ #define GET_BLK_PTR_FROM_EQE(eqe) be32_to_cpu(eqe->event.port_mgmt_change.params.tbl_change_info.block_ptr) #define GET_MASK_FROM_EQE(eqe) be32_to_cpu(eqe->event.port_mgmt_change.params.tbl_change_info.tbl_entries_mask) #define NUM_IDX_IN_PKEY_TBL_BLK 32 #define GUID_TBL_ENTRY_SIZE 8 /* size in bytes */ #define GUID_TBL_BLK_NUM_ENTRIES 8 #define GUID_TBL_BLK_SIZE (GUID_TBL_ENTRY_SIZE * GUID_TBL_BLK_NUM_ENTRIES) struct mlx4_mad_rcv_buf { struct ib_grh grh; u8 payload[256]; } __packed; struct mlx4_mad_snd_buf { u8 payload[256]; } __packed; struct mlx4_tunnel_mad { struct ib_grh grh; struct mlx4_ib_tunnel_header hdr; struct ib_mad mad; } __packed; struct mlx4_rcv_tunnel_mad { struct mlx4_rcv_tunnel_hdr hdr; struct ib_grh grh; struct ib_mad mad; } __packed; static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num); static void handle_lid_change_event(struct mlx4_ib_dev *dev, u8 port_num); static void __propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num, int block, u32 change_bitmap); __be64 mlx4_ib_gen_node_guid(void) { #define NODE_GUID_HI ((u64) (((u64)IB_OPENIB_OUI) << 40)) return cpu_to_be64(NODE_GUID_HI | prandom_u32()); } __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx) { return cpu_to_be64(atomic_inc_return(&ctx->tid)) | cpu_to_be64(0xff00000000000000LL); } int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags, int port, struct ib_wc *in_wc, struct ib_grh *in_grh, void *in_mad, void *response_mad) { struct mlx4_cmd_mailbox *inmailbox, *outmailbox; void *inbox; int err; u32 in_modifier = port; u8 op_modifier = 0; inmailbox = mlx4_alloc_cmd_mailbox(dev->dev); if (IS_ERR(inmailbox)) return PTR_ERR(inmailbox); inbox = inmailbox->buf; outmailbox = mlx4_alloc_cmd_mailbox(dev->dev); if (IS_ERR(outmailbox)) { mlx4_free_cmd_mailbox(dev->dev, inmailbox); return PTR_ERR(outmailbox); } memcpy(inbox, in_mad, 256); /* * Key check traps can't be generated unless we have in_wc to * tell us where to send the trap. */ if ((mad_ifc_flags & MLX4_MAD_IFC_IGNORE_MKEY) || !in_wc) op_modifier |= 0x1; if ((mad_ifc_flags & MLX4_MAD_IFC_IGNORE_BKEY) || !in_wc) op_modifier |= 0x2; if (mlx4_is_mfunc(dev->dev) && (mad_ifc_flags & MLX4_MAD_IFC_NET_VIEW || in_wc)) op_modifier |= 0x8; if (in_wc) { struct { __be32 my_qpn; u32 reserved1; __be32 rqpn; u8 sl; u8 g_path; u16 reserved2[2]; __be16 pkey; u32 reserved3[11]; u8 grh[40]; } *ext_info; memset(inbox + 256, 0, 256); ext_info = inbox + 256; ext_info->my_qpn = cpu_to_be32(in_wc->qp->qp_num); ext_info->rqpn = cpu_to_be32(in_wc->src_qp); ext_info->sl = in_wc->sl << 4; ext_info->g_path = in_wc->dlid_path_bits | (in_wc->wc_flags & IB_WC_GRH ? 0x80 : 0); ext_info->pkey = cpu_to_be16(in_wc->pkey_index); if (in_grh) memcpy(ext_info->grh, in_grh, 40); op_modifier |= 0x4; in_modifier |= in_wc->slid << 16; } err = mlx4_cmd_box(dev->dev, inmailbox->dma, outmailbox->dma, in_modifier, mlx4_is_master(dev->dev) ? (op_modifier & ~0x8) : op_modifier, MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C, (op_modifier & 0x8) ? MLX4_CMD_NATIVE : MLX4_CMD_WRAPPED); if (!err) memcpy(response_mad, outmailbox->buf, 256); mlx4_free_cmd_mailbox(dev->dev, inmailbox); mlx4_free_cmd_mailbox(dev->dev, outmailbox); return err; } static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl) { struct ib_ah *new_ah; struct ib_ah_attr ah_attr; unsigned long flags; if (!dev->send_agent[port_num - 1][0]) return; memset(&ah_attr, 0, sizeof ah_attr); ah_attr.dlid = lid; ah_attr.sl = sl; ah_attr.port_num = port_num; new_ah = ib_create_ah(dev->send_agent[port_num - 1][0]->qp->pd, &ah_attr); if (IS_ERR(new_ah)) return; spin_lock_irqsave(&dev->sm_lock, flags); if (dev->sm_ah[port_num - 1]) ib_destroy_ah(dev->sm_ah[port_num - 1]); dev->sm_ah[port_num - 1] = new_ah; spin_unlock_irqrestore(&dev->sm_lock, flags); } /* * Snoop SM MADs for port info, GUID info, and P_Key table sets, so we can * synthesize LID change, Client-Rereg, GID change, and P_Key change events. */ static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad, u16 prev_lid) { struct ib_port_info *pinfo; u16 lid; __be16 *base; u32 bn, pkey_change_bitmap; int i; struct mlx4_ib_dev *dev = to_mdev(ibdev); if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && mad->mad_hdr.method == IB_MGMT_METHOD_SET) switch (mad->mad_hdr.attr_id) { case IB_SMP_ATTR_PORT_INFO: pinfo = (struct ib_port_info *) ((struct ib_smp *) mad)->data; lid = be16_to_cpu(pinfo->lid); update_sm_ah(dev, port_num, be16_to_cpu(pinfo->sm_lid), pinfo->neighbormtu_mastersmsl & 0xf); if (pinfo->clientrereg_resv_subnetto & 0x80) handle_client_rereg_event(dev, port_num); if (prev_lid != lid) handle_lid_change_event(dev, port_num); break; case IB_SMP_ATTR_PKEY_TABLE: if (!mlx4_is_mfunc(dev->dev)) { mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_PKEY_CHANGE); break; } /* at this point, we are running in the master. * Slaves do not receive SMPs. */ bn = be32_to_cpu(((struct ib_smp *)mad)->attr_mod) & 0xFFFF; base = (__be16 *) &(((struct ib_smp *)mad)->data[0]); pkey_change_bitmap = 0; for (i = 0; i < 32; i++) { pr_debug("PKEY[%d] = x%x\n", i + bn*32, be16_to_cpu(base[i])); if (be16_to_cpu(base[i]) != dev->pkeys.phys_pkey_cache[port_num - 1][i + bn*32]) { pkey_change_bitmap |= (1 << i); dev->pkeys.phys_pkey_cache[port_num - 1][i + bn*32] = be16_to_cpu(base[i]); } } pr_debug("PKEY Change event: port=%d, " "block=0x%x, change_bitmap=0x%x\n", port_num, bn, pkey_change_bitmap); if (pkey_change_bitmap) { mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_PKEY_CHANGE); if (!dev->sriov.is_going_down) __propagate_pkey_ev(dev, port_num, bn, pkey_change_bitmap); } break; case IB_SMP_ATTR_GUID_INFO: /* paravirtualized master's guid is guid 0 -- does not change */ if (!mlx4_is_master(dev->dev)) mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_GID_CHANGE); /*if master, notify relevant slaves*/ if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down) { bn = be32_to_cpu(((struct ib_smp *)mad)->attr_mod); mlx4_ib_update_cache_on_guid_change(dev, bn, port_num, (u8 *)(&((struct ib_smp *)mad)->data)); mlx4_ib_notify_slaves_on_guid_change(dev, bn, port_num, (u8 *)(&((struct ib_smp *)mad)->data)); } break; default: break; } } static void __propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num, int block, u32 change_bitmap) { int i, ix, slave, err; int have_event = 0; for (slave = 0; slave < dev->dev->caps.sqp_demux; slave++) { if (slave == mlx4_master_func_num(dev->dev)) continue; if (!mlx4_is_slave_active(dev->dev, slave)) continue; have_event = 0; for (i = 0; i < 32; i++) { if (!(change_bitmap & (1 << i))) continue; for (ix = 0; ix < dev->dev->caps.pkey_table_len[port_num]; ix++) { if (dev->pkeys.virt2phys_pkey[slave][port_num - 1] [ix] == i + 32 * block) { err = mlx4_gen_pkey_eqe(dev->dev, slave, port_num); pr_debug("propagate_pkey_ev: slave %d," " port %d, ix %d (%d)\n", slave, port_num, ix, err); have_event = 1; break; } } if (have_event) break; } } } static void node_desc_override(struct ib_device *dev, struct ib_mad *mad) { unsigned long flags; if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP && mad->mad_hdr.attr_id == IB_SMP_ATTR_NODE_DESC) { spin_lock_irqsave(&to_mdev(dev)->sm_lock, flags); memcpy(((struct ib_smp *) mad)->data, dev->node_desc, 64); spin_unlock_irqrestore(&to_mdev(dev)->sm_lock, flags); } } static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *mad) { int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED; struct ib_mad_send_buf *send_buf; struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn]; int ret; unsigned long flags; if (agent) { send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, GFP_ATOMIC); if (IS_ERR(send_buf)) return; /* * We rely here on the fact that MLX QPs don't use the * address handle after the send is posted (this is * wrong following the IB spec strictly, but we know * it's OK for our devices). */ spin_lock_irqsave(&dev->sm_lock, flags); memcpy(send_buf->mad, mad, sizeof *mad); if ((send_buf->ah = dev->sm_ah[port_num - 1])) ret = ib_post_send_mad(send_buf, NULL); else ret = -EINVAL; spin_unlock_irqrestore(&dev->sm_lock, flags); if (ret) ib_free_send_mad(send_buf); } } static int mlx4_ib_demux_sa_handler(struct ib_device *ibdev, int port, int slave, struct ib_sa_mad *sa_mad) { int ret = 0; /* dispatch to different sa handlers */ switch (be16_to_cpu(sa_mad->mad_hdr.attr_id)) { case IB_SA_ATTR_MC_MEMBER_REC: ret = mlx4_ib_mcg_demux_handler(ibdev, port, slave, sa_mad); break; default: break; } return ret; } int mlx4_ib_find_real_gid(struct ib_device *ibdev, u8 port, __be64 guid) { struct mlx4_ib_dev *dev = to_mdev(ibdev); int i; for (i = 0; i < dev->dev->caps.sqp_demux; i++) { if (dev->sriov.demux[port - 1].guid_cache[i] == guid) return i; } return -1; } static int find_slave_port_pkey_ix(struct mlx4_ib_dev *dev, int slave, u8 port, u16 pkey, u16 *ix) { int i, ret; u8 unassigned_pkey_ix, pkey_ix, partial_ix = 0xFF; u16 slot_pkey; if (slave == mlx4_master_func_num(dev->dev)) return ib_find_cached_pkey(&dev->ib_dev, port, pkey, ix); unassigned_pkey_ix = dev->dev->phys_caps.pkey_phys_table_len[port] - 1; for (i = 0; i < dev->dev->caps.pkey_table_len[port]; i++) { if (dev->pkeys.virt2phys_pkey[slave][port - 1][i] == unassigned_pkey_ix) continue; pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][i]; ret = ib_get_cached_pkey(&dev->ib_dev, port, pkey_ix, &slot_pkey); if (ret) continue; if ((slot_pkey & 0x7FFF) == (pkey & 0x7FFF)) { if (slot_pkey & 0x8000) { *ix = (u16) pkey_ix; return 0; } else { /* take first partial pkey index found */ if (partial_ix == 0xFF) partial_ix = pkey_ix; } } } if (partial_ix < 0xFF) { *ix = (u16) partial_ix; return 0; } return -EINVAL; } int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port, enum ib_qp_type dest_qpt, struct ib_wc *wc, struct ib_grh *grh, struct ib_mad *mad) { struct ib_sge list; struct ib_send_wr wr, *bad_wr; struct mlx4_ib_demux_pv_ctx *tun_ctx; struct mlx4_ib_demux_pv_qp *tun_qp; struct mlx4_rcv_tunnel_mad *tun_mad; struct ib_ah_attr attr; struct ib_ah *ah; struct ib_qp *src_qp = NULL; unsigned tun_tx_ix = 0; int dqpn; int ret = 0; u16 tun_pkey_ix; u16 cached_pkey; u8 is_eth = dev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH; if (dest_qpt > IB_QPT_GSI) return -EINVAL; tun_ctx = dev->sriov.demux[port-1].tun[slave]; /* check if proxy qp created */ if (!tun_ctx || tun_ctx->state != DEMUX_PV_STATE_ACTIVE) return -EAGAIN; if (!dest_qpt) tun_qp = &tun_ctx->qp[0]; else tun_qp = &tun_ctx->qp[1]; /* compute P_Key index to put in tunnel header for slave */ if (dest_qpt) { u16 pkey_ix; ret = ib_get_cached_pkey(&dev->ib_dev, port, wc->pkey_index, &cached_pkey); if (ret) return -EINVAL; ret = find_slave_port_pkey_ix(dev, slave, port, cached_pkey, &pkey_ix); if (ret) return -EINVAL; tun_pkey_ix = pkey_ix; } else tun_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][0]; dqpn = dev->dev->phys_caps.base_proxy_sqpn + 8 * slave + port + (dest_qpt * 2) - 1; /* get tunnel tx data buf for slave */ src_qp = tun_qp->qp; /* create ah. Just need an empty one with the port num for the post send. * The driver will set the force loopback bit in post_send */ memset(&attr, 0, sizeof attr); attr.port_num = port; if (is_eth) { memcpy(&attr.grh.dgid.raw[0], &grh->dgid.raw[0], 16); attr.ah_flags = IB_AH_GRH; } ah = ib_create_ah(tun_ctx->pd, &attr); if (IS_ERR(ah)) return -ENOMEM; /* allocate tunnel tx buf after pass failure returns */ spin_lock(&tun_qp->tx_lock); if (tun_qp->tx_ix_head - tun_qp->tx_ix_tail >= (MLX4_NUM_TUNNEL_BUFS - 1)) ret = -EAGAIN; else tun_tx_ix = (++tun_qp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1); spin_unlock(&tun_qp->tx_lock); if (ret) goto out; tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr); if (tun_qp->tx_ring[tun_tx_ix].ah) ib_destroy_ah(tun_qp->tx_ring[tun_tx_ix].ah); tun_qp->tx_ring[tun_tx_ix].ah = ah; ib_dma_sync_single_for_cpu(&dev->ib_dev, tun_qp->tx_ring[tun_tx_ix].buf.map, sizeof (struct mlx4_rcv_tunnel_mad), DMA_TO_DEVICE); /* copy over to tunnel buffer */ if (grh) memcpy(&tun_mad->grh, grh, sizeof *grh); memcpy(&tun_mad->mad, mad, sizeof *mad); /* adjust tunnel data */ tun_mad->hdr.pkey_index = cpu_to_be16(tun_pkey_ix); tun_mad->hdr.flags_src_qp = cpu_to_be32(wc->src_qp & 0xFFFFFF); tun_mad->hdr.g_ml_path = (grh && (wc->wc_flags & IB_WC_GRH)) ? 0x80 : 0; if (is_eth) { u16 vlan = 0; if (mlx4_get_slave_default_vlan(dev->dev, port, slave, &vlan, NULL)) { /* VST mode */ if (vlan != wc->vlan_id) /* Packet vlan is not the VST-assigned vlan. * Drop the packet. */ goto out; else /* Remove the vlan tag before forwarding * the packet to the VF. */ vlan = 0xffff; } else { vlan = wc->vlan_id; } tun_mad->hdr.sl_vid = cpu_to_be16(vlan); memcpy((char *)&tun_mad->hdr.mac_31_0, &(wc->smac[0]), 4); memcpy((char *)&tun_mad->hdr.slid_mac_47_32, &(wc->smac[4]), 2); } else { tun_mad->hdr.sl_vid = cpu_to_be16(((u16)(wc->sl)) << 12); tun_mad->hdr.slid_mac_47_32 = cpu_to_be16(wc->slid); } ib_dma_sync_single_for_device(&dev->ib_dev, tun_qp->tx_ring[tun_tx_ix].buf.map, sizeof (struct mlx4_rcv_tunnel_mad), DMA_TO_DEVICE); list.addr = tun_qp->tx_ring[tun_tx_ix].buf.map; list.length = sizeof (struct mlx4_rcv_tunnel_mad); list.lkey = tun_ctx->mr->lkey; wr.wr.ud.ah = ah; wr.wr.ud.port_num = port; wr.wr.ud.remote_qkey = IB_QP_SET_QKEY; wr.wr.ud.remote_qpn = dqpn; wr.next = NULL; wr.wr_id = ((u64) tun_tx_ix) | MLX4_TUN_SET_WRID_QPN(dest_qpt); wr.sg_list = &list; wr.num_sge = 1; wr.opcode = IB_WR_SEND; wr.send_flags = IB_SEND_SIGNALED; ret = ib_post_send(src_qp, &wr, &bad_wr); out: if (ret) ib_destroy_ah(ah); return ret; } static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port, struct ib_wc *wc, struct ib_grh *grh, struct ib_mad *mad) { struct mlx4_ib_dev *dev = to_mdev(ibdev); int err; int slave; u8 *slave_id; int is_eth = 0; if (rdma_port_get_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND) is_eth = 0; else is_eth = 1; if (is_eth) { if (!(wc->wc_flags & IB_WC_GRH)) { mlx4_ib_warn(ibdev, "RoCE grh not present.\n"); return -EINVAL; } if (mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_CM) { mlx4_ib_warn(ibdev, "RoCE mgmt class is not CM\n"); return -EINVAL; } if (mlx4_get_slave_from_roce_gid(dev->dev, port, grh->dgid.raw, &slave)) { mlx4_ib_warn(ibdev, "failed matching grh\n"); return -ENOENT; } if (slave >= dev->dev->caps.sqp_demux) { mlx4_ib_warn(ibdev, "slave id: %d is bigger than allowed:%d\n", slave, dev->dev->caps.sqp_demux); return -ENOENT; } if (mlx4_ib_demux_cm_handler(ibdev, port, NULL, mad)) return 0; err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad); if (err) pr_debug("failed sending to slave %d via tunnel qp (%d)\n", slave, err); return 0; } /* Initially assume that this mad is for us */ slave = mlx4_master_func_num(dev->dev); /* See if the slave id is encoded in a response mad */ if (mad->mad_hdr.method & 0x80) { slave_id = (u8 *) &mad->mad_hdr.tid; slave = *slave_id; if (slave != 255) /*255 indicates the dom0*/ *slave_id = 0; /* remap tid */ } /* If a grh is present, we demux according to it */ if (wc->wc_flags & IB_WC_GRH) { slave = mlx4_ib_find_real_gid(ibdev, port, grh->dgid.global.interface_id); if (slave < 0) { mlx4_ib_warn(ibdev, "failed matching grh\n"); return -ENOENT; } } /* Class-specific handling */ switch (mad->mad_hdr.mgmt_class) { case IB_MGMT_CLASS_SUBN_LID_ROUTED: case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE: /* 255 indicates the dom0 */ if (slave != 255 && slave != mlx4_master_func_num(dev->dev)) { if (!mlx4_vf_smi_enabled(dev->dev, slave, port)) return -EPERM; /* for a VF. drop unsolicited MADs */ if (!(mad->mad_hdr.method & IB_MGMT_METHOD_RESP)) { mlx4_ib_warn(ibdev, "demux QP0. rejecting unsolicited mad for slave %d class 0x%x, method 0x%x\n", slave, mad->mad_hdr.mgmt_class, mad->mad_hdr.method); return -EINVAL; } } break; case IB_MGMT_CLASS_SUBN_ADM: if (mlx4_ib_demux_sa_handler(ibdev, port, slave, (struct ib_sa_mad *) mad)) return 0; break; case IB_MGMT_CLASS_CM: if (mlx4_ib_demux_cm_handler(ibdev, port, &slave, mad)) return 0; break; case IB_MGMT_CLASS_DEVICE_MGMT: if (mad->mad_hdr.method != IB_MGMT_METHOD_GET_RESP) return 0; break; default: /* Drop unsupported classes for slaves in tunnel mode */ if (slave != mlx4_master_func_num(dev->dev)) { pr_debug("dropping unsupported ingress mad from class:%d " "for slave:%d\n", mad->mad_hdr.mgmt_class, slave); return 0; } } /*make sure that no slave==255 was not handled yet.*/ if (slave >= dev->dev->caps.sqp_demux) { mlx4_ib_warn(ibdev, "slave id: %d is bigger than allowed:%d\n", slave, dev->dev->caps.sqp_demux); return -ENOENT; } err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad); if (err) pr_debug("failed sending to slave %d via tunnel qp (%d)\n", slave, err); return 0; } static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, struct ib_wc *in_wc, struct ib_grh *in_grh, struct ib_mad *in_mad, struct ib_mad *out_mad) { u16 slid, prev_lid = 0; int err; struct ib_port_attr pattr; if (in_wc && in_wc->qp->qp_num) { pr_debug("received MAD: slid:%d sqpn:%d " "dlid_bits:%d dqpn:%d wc_flags:0x%x, cls %x, mtd %x, atr %x\n", in_wc->slid, in_wc->src_qp, in_wc->dlid_path_bits, in_wc->qp->qp_num, in_wc->wc_flags, in_mad->mad_hdr.mgmt_class, in_mad->mad_hdr.method, be16_to_cpu(in_mad->mad_hdr.attr_id)); if (in_wc->wc_flags & IB_WC_GRH) { pr_debug("sgid_hi:0x%016llx sgid_lo:0x%016llx\n", be64_to_cpu(in_grh->sgid.global.subnet_prefix), be64_to_cpu(in_grh->sgid.global.interface_id)); pr_debug("dgid_hi:0x%016llx dgid_lo:0x%016llx\n", be64_to_cpu(in_grh->dgid.global.subnet_prefix), be64_to_cpu(in_grh->dgid.global.interface_id)); } } slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE); if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0) { forward_trap(to_mdev(ibdev), port_num, in_mad); return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; } if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET && in_mad->mad_hdr.method != IB_MGMT_METHOD_SET && in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS) return IB_MAD_RESULT_SUCCESS; /* * Don't process SMInfo queries -- the SMA can't handle them. */ if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO) return IB_MAD_RESULT_SUCCESS; } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT || in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS1 || in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS2 || in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_CONG_MGMT) { if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET && in_mad->mad_hdr.method != IB_MGMT_METHOD_SET) return IB_MAD_RESULT_SUCCESS; } else return IB_MAD_RESULT_SUCCESS; if ((in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && in_mad->mad_hdr.method == IB_MGMT_METHOD_SET && in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO && !ib_query_port(ibdev, port_num, &pattr)) prev_lid = pattr.lid; err = mlx4_MAD_IFC(to_mdev(ibdev), (mad_flags & IB_MAD_IGNORE_MKEY ? MLX4_MAD_IFC_IGNORE_MKEY : 0) | (mad_flags & IB_MAD_IGNORE_BKEY ? MLX4_MAD_IFC_IGNORE_BKEY : 0) | MLX4_MAD_IFC_NET_VIEW, port_num, in_wc, in_grh, in_mad, out_mad); if (err) return IB_MAD_RESULT_FAILURE; if (!out_mad->mad_hdr.status) { if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)) smp_snoop(ibdev, port_num, in_mad, prev_lid); /* slaves get node desc from FW */ if (!mlx4_is_slave(to_mdev(ibdev)->dev)) node_desc_override(ibdev, out_mad); } /* set return bit in status of directed route responses */ if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) out_mad->mad_hdr.status |= cpu_to_be16(1 << 15); if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) /* no response for trap repress */ return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; } static void edit_counter(struct mlx4_counter *cnt, struct ib_pma_portcounters *pma_cnt) { pma_cnt->port_xmit_data = cpu_to_be32((be64_to_cpu(cnt->tx_bytes)>>2)); pma_cnt->port_rcv_data = cpu_to_be32((be64_to_cpu(cnt->rx_bytes)>>2)); pma_cnt->port_xmit_packets = cpu_to_be32(be64_to_cpu(cnt->tx_frames)); pma_cnt->port_rcv_packets = cpu_to_be32(be64_to_cpu(cnt->rx_frames)); } static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, struct ib_wc *in_wc, struct ib_grh *in_grh, struct ib_mad *in_mad, struct ib_mad *out_mad) { struct mlx4_cmd_mailbox *mailbox; struct mlx4_ib_dev *dev = to_mdev(ibdev); int err; u32 inmod = dev->counters[port_num - 1] & 0xffff; u8 mode; if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT) return -EINVAL; mailbox = mlx4_alloc_cmd_mailbox(dev->dev); if (IS_ERR(mailbox)) return IB_MAD_RESULT_FAILURE; err = mlx4_cmd_box(dev->dev, 0, mailbox->dma, inmod, 0, MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_WRAPPED); if (err) err = IB_MAD_RESULT_FAILURE; else { memset(out_mad->data, 0, sizeof out_mad->data); mode = ((struct mlx4_counter *)mailbox->buf)->counter_mode; switch (mode & 0xf) { case 0: edit_counter(mailbox->buf, (void *)(out_mad->data + 40)); err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; break; default: err = IB_MAD_RESULT_FAILURE; } } mlx4_free_cmd_mailbox(dev->dev, mailbox); return err; } int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, struct ib_wc *in_wc, struct ib_grh *in_grh, struct ib_mad *in_mad, struct ib_mad *out_mad) { switch (rdma_port_get_link_layer(ibdev, port_num)) { case IB_LINK_LAYER_INFINIBAND: return ib_process_mad(ibdev, mad_flags, port_num, in_wc, in_grh, in_mad, out_mad); case IB_LINK_LAYER_ETHERNET: return iboe_process_mad(ibdev, mad_flags, port_num, in_wc, in_grh, in_mad, out_mad); default: return -EINVAL; } } static void send_handler(struct ib_mad_agent *agent, struct ib_mad_send_wc *mad_send_wc) { if (mad_send_wc->send_buf->context[0]) ib_destroy_ah(mad_send_wc->send_buf->context[0]); ib_free_send_mad(mad_send_wc->send_buf); } int mlx4_ib_mad_init(struct mlx4_ib_dev *dev) { struct ib_mad_agent *agent; int p, q; int ret; enum rdma_link_layer ll; for (p = 0; p < dev->num_ports; ++p) { ll = rdma_port_get_link_layer(&dev->ib_dev, p + 1); for (q = 0; q <= 1; ++q) { if (ll == IB_LINK_LAYER_INFINIBAND) { agent = ib_register_mad_agent(&dev->ib_dev, p + 1, q ? IB_QPT_GSI : IB_QPT_SMI, NULL, 0, send_handler, NULL, NULL, 0); if (IS_ERR(agent)) { ret = PTR_ERR(agent); goto err; } dev->send_agent[p][q] = agent; } else dev->send_agent[p][q] = NULL; } } return 0; err: for (p = 0; p < dev->num_ports; ++p) for (q = 0; q <= 1; ++q) if (dev->send_agent[p][q]) ib_unregister_mad_agent(dev->send_agent[p][q]); return ret; } void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev) { struct ib_mad_agent *agent; int p, q; for (p = 0; p < dev->num_ports; ++p) { for (q = 0; q <= 1; ++q) { agent = dev->send_agent[p][q]; if (agent) { dev->send_agent[p][q] = NULL; ib_unregister_mad_agent(agent); } } if (dev->sm_ah[p]) ib_destroy_ah(dev->sm_ah[p]); } } static void handle_lid_change_event(struct mlx4_ib_dev *dev, u8 port_num) { mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_LID_CHANGE); if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down) mlx4_gen_slaves_port_mgt_ev(dev->dev, port_num, MLX4_EQ_PORT_INFO_LID_CHANGE_MASK); } static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num) { /* re-configure the alias-guid and mcg's */ if (mlx4_is_master(dev->dev)) { mlx4_ib_invalidate_all_guid_record(dev, port_num); if (!dev->sriov.is_going_down) { mlx4_ib_mcg_port_cleanup(&dev->sriov.demux[port_num - 1], 0); mlx4_gen_slaves_port_mgt_ev(dev->dev, port_num, MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK); } } mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_CLIENT_REREGISTER); } static void propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num, struct mlx4_eqe *eqe) { __propagate_pkey_ev(dev, port_num, GET_BLK_PTR_FROM_EQE(eqe), GET_MASK_FROM_EQE(eqe)); } static void handle_slaves_guid_change(struct mlx4_ib_dev *dev, u8 port_num, u32 guid_tbl_blk_num, u32 change_bitmap) { struct ib_smp *in_mad = NULL; struct ib_smp *out_mad = NULL; u16 i; if (!mlx4_is_mfunc(dev->dev) || !mlx4_is_master(dev->dev)) return; in_mad = kmalloc(sizeof *in_mad, GFP_KERNEL); out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); if (!in_mad || !out_mad) { mlx4_ib_warn(&dev->ib_dev, "failed to allocate memory for guid info mads\n"); goto out; } guid_tbl_blk_num *= 4; for (i = 0; i < 4; i++) { if (change_bitmap && (!((change_bitmap >> (8 * i)) & 0xff))) continue; memset(in_mad, 0, sizeof *in_mad); memset(out_mad, 0, sizeof *out_mad); in_mad->base_version = 1; in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; in_mad->class_version = 1; in_mad->method = IB_MGMT_METHOD_GET; in_mad->attr_id = IB_SMP_ATTR_GUID_INFO; in_mad->attr_mod = cpu_to_be32(guid_tbl_blk_num + i); if (mlx4_MAD_IFC(dev, MLX4_MAD_IFC_IGNORE_KEYS | MLX4_MAD_IFC_NET_VIEW, port_num, NULL, NULL, in_mad, out_mad)) { mlx4_ib_warn(&dev->ib_dev, "Failed in get GUID INFO MAD_IFC\n"); goto out; } mlx4_ib_update_cache_on_guid_change(dev, guid_tbl_blk_num + i, port_num, (u8 *)(&((struct ib_smp *)out_mad)->data)); mlx4_ib_notify_slaves_on_guid_change(dev, guid_tbl_blk_num + i, port_num, (u8 *)(&((struct ib_smp *)out_mad)->data)); } out: kfree(in_mad); kfree(out_mad); return; } void handle_port_mgmt_change_event(struct work_struct *work) { struct ib_event_work *ew = container_of(work, struct ib_event_work, work); struct mlx4_ib_dev *dev = ew->ib_dev; struct mlx4_eqe *eqe = &(ew->ib_eqe); u8 port = eqe->event.port_mgmt_change.port; u32 changed_attr; u32 tbl_block; u32 change_bitmap; switch (eqe->subtype) { case MLX4_DEV_PMC_SUBTYPE_PORT_INFO: changed_attr = be32_to_cpu(eqe->event.port_mgmt_change.params.port_info.changed_attr); /* Update the SM ah - This should be done before handling the other changed attributes so that MADs can be sent to the SM */ if (changed_attr & MSTR_SM_CHANGE_MASK) { u16 lid = be16_to_cpu(eqe->event.port_mgmt_change.params.port_info.mstr_sm_lid); u8 sl = eqe->event.port_mgmt_change.params.port_info.mstr_sm_sl & 0xf; update_sm_ah(dev, port, lid, sl); } /* Check if it is a lid change event */ if (changed_attr & MLX4_EQ_PORT_INFO_LID_CHANGE_MASK) handle_lid_change_event(dev, port); /* Generate GUID changed event */ if (changed_attr & MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK) { mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE); /*if master, notify all slaves*/ if (mlx4_is_master(dev->dev)) mlx4_gen_slaves_port_mgt_ev(dev->dev, port, MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK); } if (changed_attr & MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK) handle_client_rereg_event(dev, port); break; case MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE: mlx4_ib_dispatch_event(dev, port, IB_EVENT_PKEY_CHANGE); if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down) propagate_pkey_ev(dev, port, eqe); break; case MLX4_DEV_PMC_SUBTYPE_GUID_INFO: /* paravirtualized master's guid is guid 0 -- does not change */ if (!mlx4_is_master(dev->dev)) mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE); /*if master, notify relevant slaves*/ else if (!dev->sriov.is_going_down) { tbl_block = GET_BLK_PTR_FROM_EQE(eqe); change_bitmap = GET_MASK_FROM_EQE(eqe); handle_slaves_guid_change(dev, port, tbl_block, change_bitmap); } break; default: pr_warn("Unsupported subtype 0x%x for " "Port Management Change event\n", eqe->subtype); } kfree(ew); } void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u8 port_num, enum ib_event_type type) { struct ib_event event; event.device = &dev->ib_dev; event.element.port_num = port_num; event.event = type; ib_dispatch_event(&event); } static void mlx4_ib_tunnel_comp_handler(struct ib_cq *cq, void *arg) { unsigned long flags; struct mlx4_ib_demux_pv_ctx *ctx = cq->cq_context; struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev); spin_lock_irqsave(&dev->sriov.going_down_lock, flags); if (!dev->sriov.is_going_down && ctx->state == DEMUX_PV_STATE_ACTIVE) queue_work(ctx->wq, &ctx->work); spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); } static int mlx4_ib_post_pv_qp_buf(struct mlx4_ib_demux_pv_ctx *ctx, struct mlx4_ib_demux_pv_qp *tun_qp, int index) { struct ib_sge sg_list; struct ib_recv_wr recv_wr, *bad_recv_wr; int size; size = (tun_qp->qp->qp_type == IB_QPT_UD) ? sizeof (struct mlx4_tunnel_mad) : sizeof (struct mlx4_mad_rcv_buf); sg_list.addr = tun_qp->ring[index].map; sg_list.length = size; sg_list.lkey = ctx->mr->lkey; recv_wr.next = NULL; recv_wr.sg_list = &sg_list; recv_wr.num_sge = 1; recv_wr.wr_id = (u64) index | MLX4_TUN_WRID_RECV | MLX4_TUN_SET_WRID_QPN(tun_qp->proxy_qpt); ib_dma_sync_single_for_device(ctx->ib_dev, tun_qp->ring[index].map, size, DMA_FROM_DEVICE); return ib_post_recv(tun_qp->qp, &recv_wr, &bad_recv_wr); } static int mlx4_ib_multiplex_sa_handler(struct ib_device *ibdev, int port, int slave, struct ib_sa_mad *sa_mad) { int ret = 0; /* dispatch to different sa handlers */ switch (be16_to_cpu(sa_mad->mad_hdr.attr_id)) { case IB_SA_ATTR_MC_MEMBER_REC: ret = mlx4_ib_mcg_multiplex_handler(ibdev, port, slave, sa_mad); break; default: break; } return ret; } static int is_proxy_qp0(struct mlx4_ib_dev *dev, int qpn, int slave) { int proxy_start = dev->dev->phys_caps.base_proxy_sqpn + 8 * slave; return (qpn >= proxy_start && qpn <= proxy_start + 1); } int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port, enum ib_qp_type dest_qpt, u16 pkey_index, u32 remote_qpn, u32 qkey, struct ib_ah_attr *attr, u8 *s_mac, struct ib_mad *mad) { struct ib_sge list; struct ib_send_wr wr, *bad_wr; struct mlx4_ib_demux_pv_ctx *sqp_ctx; struct mlx4_ib_demux_pv_qp *sqp; struct mlx4_mad_snd_buf *sqp_mad; struct ib_ah *ah; struct ib_qp *send_qp = NULL; unsigned wire_tx_ix = 0; int ret = 0; u16 wire_pkey_ix; int src_qpnum; u8 sgid_index; sqp_ctx = dev->sriov.sqps[port-1]; /* check if proxy qp created */ if (!sqp_ctx || sqp_ctx->state != DEMUX_PV_STATE_ACTIVE) return -EAGAIN; if (dest_qpt == IB_QPT_SMI) { src_qpnum = 0; sqp = &sqp_ctx->qp[0]; wire_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][0]; } else { src_qpnum = 1; sqp = &sqp_ctx->qp[1]; wire_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][pkey_index]; } send_qp = sqp->qp; /* create ah */ sgid_index = attr->grh.sgid_index; attr->grh.sgid_index = 0; ah = ib_create_ah(sqp_ctx->pd, attr); if (IS_ERR(ah)) return -ENOMEM; attr->grh.sgid_index = sgid_index; to_mah(ah)->av.ib.gid_index = sgid_index; /* get rid of force-loopback bit */ to_mah(ah)->av.ib.port_pd &= cpu_to_be32(0x7FFFFFFF); spin_lock(&sqp->tx_lock); if (sqp->tx_ix_head - sqp->tx_ix_tail >= (MLX4_NUM_TUNNEL_BUFS - 1)) ret = -EAGAIN; else wire_tx_ix = (++sqp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1); spin_unlock(&sqp->tx_lock); if (ret) goto out; sqp_mad = (struct mlx4_mad_snd_buf *) (sqp->tx_ring[wire_tx_ix].buf.addr); if (sqp->tx_ring[wire_tx_ix].ah) ib_destroy_ah(sqp->tx_ring[wire_tx_ix].ah); sqp->tx_ring[wire_tx_ix].ah = ah; ib_dma_sync_single_for_cpu(&dev->ib_dev, sqp->tx_ring[wire_tx_ix].buf.map, sizeof (struct mlx4_mad_snd_buf), DMA_TO_DEVICE); memcpy(&sqp_mad->payload, mad, sizeof *mad); ib_dma_sync_single_for_device(&dev->ib_dev, sqp->tx_ring[wire_tx_ix].buf.map, sizeof (struct mlx4_mad_snd_buf), DMA_TO_DEVICE); list.addr = sqp->tx_ring[wire_tx_ix].buf.map; list.length = sizeof (struct mlx4_mad_snd_buf); list.lkey = sqp_ctx->mr->lkey; wr.wr.ud.ah = ah; wr.wr.ud.port_num = port; wr.wr.ud.pkey_index = wire_pkey_ix; wr.wr.ud.remote_qkey = qkey; wr.wr.ud.remote_qpn = remote_qpn; wr.next = NULL; wr.wr_id = ((u64) wire_tx_ix) | MLX4_TUN_SET_WRID_QPN(src_qpnum); wr.sg_list = &list; wr.num_sge = 1; wr.opcode = IB_WR_SEND; wr.send_flags = IB_SEND_SIGNALED; if (s_mac) memcpy(to_mah(ah)->av.eth.s_mac, s_mac, 6); ret = ib_post_send(send_qp, &wr, &bad_wr); out: if (ret) ib_destroy_ah(ah); return ret; } static int get_slave_base_gid_ix(struct mlx4_ib_dev *dev, int slave, int port) { if (rdma_port_get_link_layer(&dev->ib_dev, port) == IB_LINK_LAYER_INFINIBAND) return slave; return mlx4_get_base_gid_ix(dev->dev, slave, port); } static void fill_in_real_sgid_index(struct mlx4_ib_dev *dev, int slave, int port, struct ib_ah_attr *ah_attr) { if (rdma_port_get_link_layer(&dev->ib_dev, port) == IB_LINK_LAYER_INFINIBAND) ah_attr->grh.sgid_index = slave; else ah_attr->grh.sgid_index += get_slave_base_gid_ix(dev, slave, port); } static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc *wc) { struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev); struct mlx4_ib_demux_pv_qp *tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc->wr_id)]; int wr_ix = wc->wr_id & (MLX4_NUM_TUNNEL_BUFS - 1); struct mlx4_tunnel_mad *tunnel = tun_qp->ring[wr_ix].addr; struct mlx4_ib_ah ah; struct ib_ah_attr ah_attr; u8 *slave_id; int slave; int port; /* Get slave that sent this packet */ if (wc->src_qp < dev->dev->phys_caps.base_proxy_sqpn || wc->src_qp >= dev->dev->phys_caps.base_proxy_sqpn + 8 * MLX4_MFUNC_MAX || (wc->src_qp & 0x1) != ctx->port - 1 || wc->src_qp & 0x4) { mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d\n", wc->src_qp); return; } slave = ((wc->src_qp & ~0x7) - dev->dev->phys_caps.base_proxy_sqpn) / 8; if (slave != ctx->slave) { mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d: " "belongs to another slave\n", wc->src_qp); return; } /* Map transaction ID */ ib_dma_sync_single_for_cpu(ctx->ib_dev, tun_qp->ring[wr_ix].map, sizeof (struct mlx4_tunnel_mad), DMA_FROM_DEVICE); switch (tunnel->mad.mad_hdr.method) { case IB_MGMT_METHOD_SET: case IB_MGMT_METHOD_GET: case IB_MGMT_METHOD_REPORT: case IB_SA_METHOD_GET_TABLE: case IB_SA_METHOD_DELETE: case IB_SA_METHOD_GET_MULTI: case IB_SA_METHOD_GET_TRACE_TBL: slave_id = (u8 *) &tunnel->mad.mad_hdr.tid; if (*slave_id) { mlx4_ib_warn(ctx->ib_dev, "egress mad has non-null tid msb:%d " "class:%d slave:%d\n", *slave_id, tunnel->mad.mad_hdr.mgmt_class, slave); return; } else *slave_id = slave; default: /* nothing */; } /* Class-specific handling */ switch (tunnel->mad.mad_hdr.mgmt_class) { case IB_MGMT_CLASS_SUBN_LID_ROUTED: case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE: if (slave != mlx4_master_func_num(dev->dev) && !mlx4_vf_smi_enabled(dev->dev, slave, ctx->port)) return; break; case IB_MGMT_CLASS_SUBN_ADM: if (mlx4_ib_multiplex_sa_handler(ctx->ib_dev, ctx->port, slave, (struct ib_sa_mad *) &tunnel->mad)) return; break; case IB_MGMT_CLASS_CM: if (mlx4_ib_multiplex_cm_handler(ctx->ib_dev, ctx->port, slave, (struct ib_mad *) &tunnel->mad)) return; break; case IB_MGMT_CLASS_DEVICE_MGMT: if (tunnel->mad.mad_hdr.method != IB_MGMT_METHOD_GET && tunnel->mad.mad_hdr.method != IB_MGMT_METHOD_SET) return; break; default: /* Drop unsupported classes for slaves in tunnel mode */ if (slave != mlx4_master_func_num(dev->dev)) { mlx4_ib_warn(ctx->ib_dev, "dropping unsupported egress mad from class:%d " "for slave:%d\n", tunnel->mad.mad_hdr.mgmt_class, slave); return; } } /* We are using standard ib_core services to send the mad, so generate a * stadard address handle by decoding the tunnelled mlx4_ah fields */ memcpy(&ah.av, &tunnel->hdr.av, sizeof (struct mlx4_av)); ah.ibah.device = ctx->ib_dev; mlx4_ib_query_ah(&ah.ibah, &ah_attr); if (ah_attr.ah_flags & IB_AH_GRH) fill_in_real_sgid_index(dev, slave, ctx->port, &ah_attr); port = mlx4_slave_convert_port(dev->dev, slave, ah_attr.port_num); if (port < 0) return; ah_attr.port_num = port; memcpy(ah_attr.dmac, tunnel->hdr.mac, 6); ah_attr.vlan_id = be16_to_cpu(tunnel->hdr.vlan); /* if slave have default vlan use it */ mlx4_get_slave_default_vlan(dev->dev, ctx->port, slave, &ah_attr.vlan_id, &ah_attr.sl); mlx4_ib_send_to_wire(dev, slave, ctx->port, is_proxy_qp0(dev, wc->src_qp, slave) ? IB_QPT_SMI : IB_QPT_GSI, be16_to_cpu(tunnel->hdr.pkey_index), be32_to_cpu(tunnel->hdr.remote_qpn), be32_to_cpu(tunnel->hdr.qkey), &ah_attr, wc->smac, &tunnel->mad); } static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx, enum ib_qp_type qp_type, int is_tun) { int i; struct mlx4_ib_demux_pv_qp *tun_qp; int rx_buf_size, tx_buf_size; if (qp_type > IB_QPT_GSI) return -EINVAL; tun_qp = &ctx->qp[qp_type]; tun_qp->ring = kzalloc(sizeof (struct mlx4_ib_buf) * MLX4_NUM_TUNNEL_BUFS, GFP_KERNEL); if (!tun_qp->ring) return -ENOMEM; tun_qp->tx_ring = kcalloc(MLX4_NUM_TUNNEL_BUFS, sizeof (struct mlx4_ib_tun_tx_buf), GFP_KERNEL); if (!tun_qp->tx_ring) { kfree(tun_qp->ring); tun_qp->ring = NULL; return -ENOMEM; } if (is_tun) { rx_buf_size = sizeof (struct mlx4_tunnel_mad); tx_buf_size = sizeof (struct mlx4_rcv_tunnel_mad); } else { rx_buf_size = sizeof (struct mlx4_mad_rcv_buf); tx_buf_size = sizeof (struct mlx4_mad_snd_buf); } for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) { tun_qp->ring[i].addr = kmalloc(rx_buf_size, GFP_KERNEL); if (!tun_qp->ring[i].addr) goto err; tun_qp->ring[i].map = ib_dma_map_single(ctx->ib_dev, tun_qp->ring[i].addr, rx_buf_size, DMA_FROM_DEVICE); } for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) { tun_qp->tx_ring[i].buf.addr = kmalloc(tx_buf_size, GFP_KERNEL); if (!tun_qp->tx_ring[i].buf.addr) goto tx_err; tun_qp->tx_ring[i].buf.map = ib_dma_map_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.addr, tx_buf_size, DMA_TO_DEVICE); tun_qp->tx_ring[i].ah = NULL; } spin_lock_init(&tun_qp->tx_lock); tun_qp->tx_ix_head = 0; tun_qp->tx_ix_tail = 0; tun_qp->proxy_qpt = qp_type; return 0; tx_err: while (i > 0) { --i; ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map, tx_buf_size, DMA_TO_DEVICE); kfree(tun_qp->tx_ring[i].buf.addr); } kfree(tun_qp->tx_ring); tun_qp->tx_ring = NULL; i = MLX4_NUM_TUNNEL_BUFS; err: while (i > 0) { --i; ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map, rx_buf_size, DMA_FROM_DEVICE); kfree(tun_qp->ring[i].addr); } kfree(tun_qp->ring); tun_qp->ring = NULL; return -ENOMEM; } static void mlx4_ib_free_pv_qp_bufs(struct mlx4_ib_demux_pv_ctx *ctx, enum ib_qp_type qp_type, int is_tun) { int i; struct mlx4_ib_demux_pv_qp *tun_qp; int rx_buf_size, tx_buf_size; if (qp_type > IB_QPT_GSI) return; tun_qp = &ctx->qp[qp_type]; if (is_tun) { rx_buf_size = sizeof (struct mlx4_tunnel_mad); tx_buf_size = sizeof (struct mlx4_rcv_tunnel_mad); } else { rx_buf_size = sizeof (struct mlx4_mad_rcv_buf); tx_buf_size = sizeof (struct mlx4_mad_snd_buf); } for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) { ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map, rx_buf_size, DMA_FROM_DEVICE); kfree(tun_qp->ring[i].addr); } for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) { ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map, tx_buf_size, DMA_TO_DEVICE); kfree(tun_qp->tx_ring[i].buf.addr); if (tun_qp->tx_ring[i].ah) ib_destroy_ah(tun_qp->tx_ring[i].ah); } kfree(tun_qp->tx_ring); kfree(tun_qp->ring); } static void mlx4_ib_tunnel_comp_worker(struct work_struct *work) { struct mlx4_ib_demux_pv_ctx *ctx; struct mlx4_ib_demux_pv_qp *tun_qp; struct ib_wc wc; int ret; ctx = container_of(work, struct mlx4_ib_demux_pv_ctx, work); ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP); while (ib_poll_cq(ctx->cq, 1, &wc) == 1) { tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)]; if (wc.status == IB_WC_SUCCESS) { switch (wc.opcode) { case IB_WC_RECV: mlx4_ib_multiplex_mad(ctx, &wc); ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp, wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)); if (ret) pr_err("Failed reposting tunnel " "buf:%lld\n", wc.wr_id); break; case IB_WC_SEND: pr_debug("received tunnel send completion:" "wrid=0x%llx, status=0x%x\n", wc.wr_id, wc.status); ib_destroy_ah(tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah); tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah = NULL; spin_lock(&tun_qp->tx_lock); tun_qp->tx_ix_tail++; spin_unlock(&tun_qp->tx_lock); break; default: break; } } else { pr_debug("mlx4_ib: completion error in tunnel: %d." " status = %d, wrid = 0x%llx\n", ctx->slave, wc.status, wc.wr_id); if (!MLX4_TUN_IS_RECV(wc.wr_id)) { ib_destroy_ah(tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah); tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah = NULL; spin_lock(&tun_qp->tx_lock); tun_qp->tx_ix_tail++; spin_unlock(&tun_qp->tx_lock); } } } } static void pv_qp_event_handler(struct ib_event *event, void *qp_context) { struct mlx4_ib_demux_pv_ctx *sqp = qp_context; /* It's worse than that! He's dead, Jim! */ pr_err("Fatal error (%d) on a MAD QP on port %d\n", event->event, sqp->port); } static int create_pv_sqp(struct mlx4_ib_demux_pv_ctx *ctx, enum ib_qp_type qp_type, int create_tun) { int i, ret; struct mlx4_ib_demux_pv_qp *tun_qp; struct mlx4_ib_qp_tunnel_init_attr qp_init_attr; struct ib_qp_attr attr; int qp_attr_mask_INIT; if (qp_type > IB_QPT_GSI) return -EINVAL; tun_qp = &ctx->qp[qp_type]; memset(&qp_init_attr, 0, sizeof qp_init_attr); qp_init_attr.init_attr.send_cq = ctx->cq; qp_init_attr.init_attr.recv_cq = ctx->cq; qp_init_attr.init_attr.sq_sig_type = IB_SIGNAL_ALL_WR; qp_init_attr.init_attr.cap.max_send_wr = MLX4_NUM_TUNNEL_BUFS; qp_init_attr.init_attr.cap.max_recv_wr = MLX4_NUM_TUNNEL_BUFS; qp_init_attr.init_attr.cap.max_send_sge = 1; qp_init_attr.init_attr.cap.max_recv_sge = 1; if (create_tun) { qp_init_attr.init_attr.qp_type = IB_QPT_UD; qp_init_attr.init_attr.create_flags = MLX4_IB_SRIOV_TUNNEL_QP; qp_init_attr.port = ctx->port; qp_init_attr.slave = ctx->slave; qp_init_attr.proxy_qp_type = qp_type; qp_attr_mask_INIT = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_QKEY | IB_QP_PORT; } else { qp_init_attr.init_attr.qp_type = qp_type; qp_init_attr.init_attr.create_flags = MLX4_IB_SRIOV_SQP; qp_attr_mask_INIT = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_QKEY; } qp_init_attr.init_attr.port_num = ctx->port; qp_init_attr.init_attr.qp_context = ctx; qp_init_attr.init_attr.event_handler = pv_qp_event_handler; tun_qp->qp = ib_create_qp(ctx->pd, &qp_init_attr.init_attr); if (IS_ERR(tun_qp->qp)) { ret = PTR_ERR(tun_qp->qp); tun_qp->qp = NULL; pr_err("Couldn't create %s QP (%d)\n", create_tun ? "tunnel" : "special", ret); return ret; } memset(&attr, 0, sizeof attr); attr.qp_state = IB_QPS_INIT; ret = 0; if (create_tun) ret = find_slave_port_pkey_ix(to_mdev(ctx->ib_dev), ctx->slave, ctx->port, IB_DEFAULT_PKEY_FULL, &attr.pkey_index); if (ret || !create_tun) attr.pkey_index = to_mdev(ctx->ib_dev)->pkeys.virt2phys_pkey[ctx->slave][ctx->port - 1][0]; attr.qkey = IB_QP1_QKEY; attr.port_num = ctx->port; ret = ib_modify_qp(tun_qp->qp, &attr, qp_attr_mask_INIT); if (ret) { pr_err("Couldn't change %s qp state to INIT (%d)\n", create_tun ? "tunnel" : "special", ret); goto err_qp; } attr.qp_state = IB_QPS_RTR; ret = ib_modify_qp(tun_qp->qp, &attr, IB_QP_STATE); if (ret) { pr_err("Couldn't change %s qp state to RTR (%d)\n", create_tun ? "tunnel" : "special", ret); goto err_qp; } attr.qp_state = IB_QPS_RTS; attr.sq_psn = 0; ret = ib_modify_qp(tun_qp->qp, &attr, IB_QP_STATE | IB_QP_SQ_PSN); if (ret) { pr_err("Couldn't change %s qp state to RTS (%d)\n", create_tun ? "tunnel" : "special", ret); goto err_qp; } for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) { ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp, i); if (ret) { pr_err(" mlx4_ib_post_pv_buf error" " (err = %d, i = %d)\n", ret, i); goto err_qp; } } return 0; err_qp: ib_destroy_qp(tun_qp->qp); tun_qp->qp = NULL; return ret; } /* * IB MAD completion callback for real SQPs */ static void mlx4_ib_sqp_comp_worker(struct work_struct *work) { struct mlx4_ib_demux_pv_ctx *ctx; struct mlx4_ib_demux_pv_qp *sqp; struct ib_wc wc; struct ib_grh *grh; struct ib_mad *mad; ctx = container_of(work, struct mlx4_ib_demux_pv_ctx, work); ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP); while (mlx4_ib_poll_cq(ctx->cq, 1, &wc) == 1) { sqp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)]; if (wc.status == IB_WC_SUCCESS) { switch (wc.opcode) { case IB_WC_SEND: ib_destroy_ah(sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah); sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah = NULL; spin_lock(&sqp->tx_lock); sqp->tx_ix_tail++; spin_unlock(&sqp->tx_lock); break; case IB_WC_RECV: mad = (struct ib_mad *) &(((struct mlx4_mad_rcv_buf *) (sqp->ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].addr))->payload); grh = &(((struct mlx4_mad_rcv_buf *) (sqp->ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].addr))->grh); mlx4_ib_demux_mad(ctx->ib_dev, ctx->port, &wc, grh, mad); if (mlx4_ib_post_pv_qp_buf(ctx, sqp, wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1))) pr_err("Failed reposting SQP " "buf:%lld\n", wc.wr_id); break; default: BUG_ON(1); break; } } else { pr_debug("mlx4_ib: completion error in tunnel: %d." " status = %d, wrid = 0x%llx\n", ctx->slave, wc.status, wc.wr_id); if (!MLX4_TUN_IS_RECV(wc.wr_id)) { ib_destroy_ah(sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah); sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah = NULL; spin_lock(&sqp->tx_lock); sqp->tx_ix_tail++; spin_unlock(&sqp->tx_lock); } } } } static int alloc_pv_object(struct mlx4_ib_dev *dev, int slave, int port, struct mlx4_ib_demux_pv_ctx **ret_ctx) { struct mlx4_ib_demux_pv_ctx *ctx; *ret_ctx = NULL; ctx = kzalloc(sizeof (struct mlx4_ib_demux_pv_ctx), GFP_KERNEL); if (!ctx) { pr_err("failed allocating pv resource context " "for port %d, slave %d\n", port, slave); return -ENOMEM; } ctx->ib_dev = &dev->ib_dev; ctx->port = port; ctx->slave = slave; *ret_ctx = ctx; return 0; } static void free_pv_object(struct mlx4_ib_dev *dev, int slave, int port) { if (dev->sriov.demux[port - 1].tun[slave]) { kfree(dev->sriov.demux[port - 1].tun[slave]); dev->sriov.demux[port - 1].tun[slave] = NULL; } } static int create_pv_resources(struct ib_device *ibdev, int slave, int port, int create_tun, struct mlx4_ib_demux_pv_ctx *ctx) { int ret, cq_size; if (ctx->state != DEMUX_PV_STATE_DOWN) return -EEXIST; ctx->state = DEMUX_PV_STATE_STARTING; /* have QP0 only if link layer is IB */ if (rdma_port_get_link_layer(ibdev, ctx->port) == IB_LINK_LAYER_INFINIBAND) ctx->has_smi = 1; if (ctx->has_smi) { ret = mlx4_ib_alloc_pv_bufs(ctx, IB_QPT_SMI, create_tun); if (ret) { pr_err("Failed allocating qp0 tunnel bufs (%d)\n", ret); goto err_out; } } ret = mlx4_ib_alloc_pv_bufs(ctx, IB_QPT_GSI, create_tun); if (ret) { pr_err("Failed allocating qp1 tunnel bufs (%d)\n", ret); goto err_out_qp0; } cq_size = 2 * MLX4_NUM_TUNNEL_BUFS; if (ctx->has_smi) cq_size *= 2; ctx->cq = ib_create_cq(ctx->ib_dev, mlx4_ib_tunnel_comp_handler, NULL, ctx, cq_size, 0); if (IS_ERR(ctx->cq)) { ret = PTR_ERR(ctx->cq); pr_err("Couldn't create tunnel CQ (%d)\n", ret); goto err_buf; } ctx->pd = ib_alloc_pd(ctx->ib_dev); if (IS_ERR(ctx->pd)) { ret = PTR_ERR(ctx->pd); pr_err("Couldn't create tunnel PD (%d)\n", ret); goto err_cq; } ctx->mr = ib_get_dma_mr(ctx->pd, IB_ACCESS_LOCAL_WRITE); if (IS_ERR(ctx->mr)) { ret = PTR_ERR(ctx->mr); pr_err("Couldn't get tunnel DMA MR (%d)\n", ret); goto err_pd; } if (ctx->has_smi) { ret = create_pv_sqp(ctx, IB_QPT_SMI, create_tun); if (ret) { pr_err("Couldn't create %s QP0 (%d)\n", create_tun ? "tunnel for" : "", ret); goto err_mr; } } ret = create_pv_sqp(ctx, IB_QPT_GSI, create_tun); if (ret) { pr_err("Couldn't create %s QP1 (%d)\n", create_tun ? "tunnel for" : "", ret); goto err_qp0; } if (create_tun) INIT_WORK(&ctx->work, mlx4_ib_tunnel_comp_worker); else INIT_WORK(&ctx->work, mlx4_ib_sqp_comp_worker); ctx->wq = to_mdev(ibdev)->sriov.demux[port - 1].wq; ret = ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP); if (ret) { pr_err("Couldn't arm tunnel cq (%d)\n", ret); goto err_wq; } ctx->state = DEMUX_PV_STATE_ACTIVE; return 0; err_wq: ctx->wq = NULL; ib_destroy_qp(ctx->qp[1].qp); ctx->qp[1].qp = NULL; err_qp0: if (ctx->has_smi) ib_destroy_qp(ctx->qp[0].qp); ctx->qp[0].qp = NULL; err_mr: ib_dereg_mr(ctx->mr); ctx->mr = NULL; err_pd: ib_dealloc_pd(ctx->pd); ctx->pd = NULL; err_cq: ib_destroy_cq(ctx->cq); ctx->cq = NULL; err_buf: mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_GSI, create_tun); err_out_qp0: if (ctx->has_smi) mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_SMI, create_tun); err_out: ctx->state = DEMUX_PV_STATE_DOWN; return ret; } static void destroy_pv_resources(struct mlx4_ib_dev *dev, int slave, int port, struct mlx4_ib_demux_pv_ctx *ctx, int flush) { if (!ctx) return; if (ctx->state > DEMUX_PV_STATE_DOWN) { ctx->state = DEMUX_PV_STATE_DOWNING; if (flush) flush_workqueue(ctx->wq); if (ctx->has_smi) { ib_destroy_qp(ctx->qp[0].qp); ctx->qp[0].qp = NULL; mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_SMI, 1); } ib_destroy_qp(ctx->qp[1].qp); ctx->qp[1].qp = NULL; mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_GSI, 1); ib_dereg_mr(ctx->mr); ctx->mr = NULL; ib_dealloc_pd(ctx->pd); ctx->pd = NULL; ib_destroy_cq(ctx->cq); ctx->cq = NULL; ctx->state = DEMUX_PV_STATE_DOWN; } } static int mlx4_ib_tunnels_update(struct mlx4_ib_dev *dev, int slave, int port, int do_init) { int ret = 0; if (!do_init) { clean_vf_mcast(&dev->sriov.demux[port - 1], slave); /* for master, destroy real sqp resources */ if (slave == mlx4_master_func_num(dev->dev)) destroy_pv_resources(dev, slave, port, dev->sriov.sqps[port - 1], 1); /* destroy the tunnel qp resources */ destroy_pv_resources(dev, slave, port, dev->sriov.demux[port - 1].tun[slave], 1); return 0; } /* create the tunnel qp resources */ ret = create_pv_resources(&dev->ib_dev, slave, port, 1, dev->sriov.demux[port - 1].tun[slave]); /* for master, create the real sqp resources */ if (!ret && slave == mlx4_master_func_num(dev->dev)) ret = create_pv_resources(&dev->ib_dev, slave, port, 0, dev->sriov.sqps[port - 1]); return ret; } void mlx4_ib_tunnels_update_work(struct work_struct *work) { struct mlx4_ib_demux_work *dmxw; dmxw = container_of(work, struct mlx4_ib_demux_work, work); mlx4_ib_tunnels_update(dmxw->dev, dmxw->slave, (int) dmxw->port, dmxw->do_init); kfree(dmxw); return; } static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev, struct mlx4_ib_demux_ctx *ctx, int port) { char name[12]; int ret = 0; int i; ctx->tun = kcalloc(dev->dev->caps.sqp_demux, sizeof (struct mlx4_ib_demux_pv_ctx *), GFP_KERNEL); if (!ctx->tun) return -ENOMEM; ctx->dev = dev; ctx->port = port; ctx->ib_dev = &dev->ib_dev; for (i = 0; i < min(dev->dev->caps.sqp_demux, (u16)(dev->dev->num_vfs + 1)); i++) { struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev->dev, i); if (!test_bit(port - 1, actv_ports.ports)) continue; ret = alloc_pv_object(dev, i, port, &ctx->tun[i]); if (ret) { ret = -ENOMEM; goto err_mcg; } } ret = mlx4_ib_mcg_port_init(ctx); if (ret) { pr_err("Failed initializing mcg para-virt (%d)\n", ret); goto err_mcg; } snprintf(name, sizeof name, "mlx4_ibt%d", port); ctx->wq = create_singlethread_workqueue(name); if (!ctx->wq) { pr_err("Failed to create tunnelling WQ for port %d\n", port); ret = -ENOMEM; goto err_wq; } snprintf(name, sizeof name, "mlx4_ibud%d", port); ctx->ud_wq = create_singlethread_workqueue(name); if (!ctx->ud_wq) { pr_err("Failed to create up/down WQ for port %d\n", port); ret = -ENOMEM; goto err_udwq; } return 0; err_udwq: destroy_workqueue(ctx->wq); ctx->wq = NULL; err_wq: mlx4_ib_mcg_port_cleanup(ctx, 1); err_mcg: for (i = 0; i < dev->dev->caps.sqp_demux; i++) free_pv_object(dev, i, port); kfree(ctx->tun); ctx->tun = NULL; return ret; } static void mlx4_ib_free_sqp_ctx(struct mlx4_ib_demux_pv_ctx *sqp_ctx) { if (sqp_ctx->state > DEMUX_PV_STATE_DOWN) { sqp_ctx->state = DEMUX_PV_STATE_DOWNING; flush_workqueue(sqp_ctx->wq); if (sqp_ctx->has_smi) { ib_destroy_qp(sqp_ctx->qp[0].qp); sqp_ctx->qp[0].qp = NULL; mlx4_ib_free_pv_qp_bufs(sqp_ctx, IB_QPT_SMI, 0); } ib_destroy_qp(sqp_ctx->qp[1].qp); sqp_ctx->qp[1].qp = NULL; mlx4_ib_free_pv_qp_bufs(sqp_ctx, IB_QPT_GSI, 0); ib_dereg_mr(sqp_ctx->mr); sqp_ctx->mr = NULL; ib_dealloc_pd(sqp_ctx->pd); sqp_ctx->pd = NULL; ib_destroy_cq(sqp_ctx->cq); sqp_ctx->cq = NULL; sqp_ctx->state = DEMUX_PV_STATE_DOWN; } } static void mlx4_ib_free_demux_ctx(struct mlx4_ib_demux_ctx *ctx) { int i; if (ctx) { struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev); mlx4_ib_mcg_port_cleanup(ctx, 1); for (i = 0; i < dev->dev->caps.sqp_demux; i++) { if (!ctx->tun[i]) continue; if (ctx->tun[i]->state > DEMUX_PV_STATE_DOWN) ctx->tun[i]->state = DEMUX_PV_STATE_DOWNING; } flush_workqueue(ctx->wq); for (i = 0; i < dev->dev->caps.sqp_demux; i++) { destroy_pv_resources(dev, i, ctx->port, ctx->tun[i], 0); free_pv_object(dev, i, ctx->port); } kfree(ctx->tun); destroy_workqueue(ctx->ud_wq); destroy_workqueue(ctx->wq); } } static void mlx4_ib_master_tunnels(struct mlx4_ib_dev *dev, int do_init) { int i; if (!mlx4_is_master(dev->dev)) return; /* initialize or tear down tunnel QPs for the master */ for (i = 0; i < dev->dev->caps.num_ports; i++) mlx4_ib_tunnels_update(dev, mlx4_master_func_num(dev->dev), i + 1, do_init); return; } int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev) { int i = 0; int err; if (!mlx4_is_mfunc(dev->dev)) return 0; dev->sriov.is_going_down = 0; spin_lock_init(&dev->sriov.going_down_lock); mlx4_ib_cm_paravirt_init(dev); mlx4_ib_warn(&dev->ib_dev, "multi-function enabled\n"); if (mlx4_is_slave(dev->dev)) { mlx4_ib_warn(&dev->ib_dev, "operating in qp1 tunnel mode\n"); return 0; } for (i = 0; i < dev->dev->caps.sqp_demux; i++) { if (i == mlx4_master_func_num(dev->dev)) mlx4_put_slave_node_guid(dev->dev, i, dev->ib_dev.node_guid); else mlx4_put_slave_node_guid(dev->dev, i, mlx4_ib_gen_node_guid()); } err = mlx4_ib_init_alias_guid_service(dev); if (err) { mlx4_ib_warn(&dev->ib_dev, "Failed init alias guid process.\n"); goto paravirt_err; } err = mlx4_ib_device_register_sysfs(dev); if (err) { mlx4_ib_warn(&dev->ib_dev, "Failed to register sysfs\n"); goto sysfs_err; } mlx4_ib_warn(&dev->ib_dev, "initializing demux service for %d qp1 clients\n", dev->dev->caps.sqp_demux); for (i = 0; i < dev->num_ports; i++) { union ib_gid gid; err = __mlx4_ib_query_gid(&dev->ib_dev, i + 1, 0, &gid, 1); if (err) goto demux_err; dev->sriov.demux[i].guid_cache[0] = gid.global.interface_id; err = alloc_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1, &dev->sriov.sqps[i]); if (err) goto demux_err; err = mlx4_ib_alloc_demux_ctx(dev, &dev->sriov.demux[i], i + 1); if (err) goto free_pv; } mlx4_ib_master_tunnels(dev, 1); return 0; free_pv: free_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1); demux_err: while (--i >= 0) { free_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1); mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]); } mlx4_ib_device_unregister_sysfs(dev); sysfs_err: mlx4_ib_destroy_alias_guid_service(dev); paravirt_err: mlx4_ib_cm_paravirt_clean(dev, -1); return err; } void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev) { int i; unsigned long flags; if (!mlx4_is_mfunc(dev->dev)) return; spin_lock_irqsave(&dev->sriov.going_down_lock, flags); dev->sriov.is_going_down = 1; spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); if (mlx4_is_master(dev->dev)) { for (i = 0; i < dev->num_ports; i++) { flush_workqueue(dev->sriov.demux[i].ud_wq); mlx4_ib_free_sqp_ctx(dev->sriov.sqps[i]); kfree(dev->sriov.sqps[i]); dev->sriov.sqps[i] = NULL; mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]); } mlx4_ib_cm_paravirt_clean(dev, -1); mlx4_ib_destroy_alias_guid_service(dev); mlx4_ib_device_unregister_sysfs(dev); } }
gpl-2.0
akuster/linux-yocto-3.14
drivers/video/backlight/backlight.c
269
14360
/* * Backlight Lowlevel Control Abstraction * * Copyright (C) 2003,2004 Hewlett-Packard Company * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/init.h> #include <linux/device.h> #include <linux/backlight.h> #include <linux/notifier.h> #include <linux/ctype.h> #include <linux/err.h> #include <linux/fb.h> #include <linux/slab.h> #ifdef CONFIG_PMAC_BACKLIGHT #include <asm/backlight.h> #endif static struct list_head backlight_dev_list; static struct mutex backlight_dev_list_mutex; static const char *const backlight_types[] = { [BACKLIGHT_RAW] = "raw", [BACKLIGHT_PLATFORM] = "platform", [BACKLIGHT_FIRMWARE] = "firmware", }; #if defined(CONFIG_FB) || (defined(CONFIG_FB_MODULE) && \ defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)) /* This callback gets called when something important happens inside a * framebuffer driver. We're looking if that important event is blanking, * and if it is, we're switching backlight power as well ... */ static int fb_notifier_callback(struct notifier_block *self, unsigned long event, void *data) { struct backlight_device *bd; struct fb_event *evdata = data; /* If we aren't interested in this event, skip it immediately ... */ if (event != FB_EVENT_BLANK && event != FB_EVENT_CONBLANK) return 0; bd = container_of(self, struct backlight_device, fb_notif); mutex_lock(&bd->ops_lock); if (bd->ops) if (!bd->ops->check_fb || bd->ops->check_fb(bd, evdata->info)) { bd->props.fb_blank = *(int *)evdata->data; if (bd->props.fb_blank == FB_BLANK_UNBLANK) bd->props.state &= ~BL_CORE_FBBLANK; else bd->props.state |= BL_CORE_FBBLANK; backlight_update_status(bd); } mutex_unlock(&bd->ops_lock); return 0; } static int backlight_register_fb(struct backlight_device *bd) { memset(&bd->fb_notif, 0, sizeof(bd->fb_notif)); bd->fb_notif.notifier_call = fb_notifier_callback; return fb_register_client(&bd->fb_notif); } static void backlight_unregister_fb(struct backlight_device *bd) { fb_unregister_client(&bd->fb_notif); } #else static inline int backlight_register_fb(struct backlight_device *bd) { return 0; } static inline void backlight_unregister_fb(struct backlight_device *bd) { } #endif /* CONFIG_FB */ static void backlight_generate_event(struct backlight_device *bd, enum backlight_update_reason reason) { char *envp[2]; switch (reason) { case BACKLIGHT_UPDATE_SYSFS: envp[0] = "SOURCE=sysfs"; break; case BACKLIGHT_UPDATE_HOTKEY: envp[0] = "SOURCE=hotkey"; break; default: envp[0] = "SOURCE=unknown"; break; } envp[1] = NULL; kobject_uevent_env(&bd->dev.kobj, KOBJ_CHANGE, envp); sysfs_notify(&bd->dev.kobj, NULL, "actual_brightness"); } static ssize_t bl_power_show(struct device *dev, struct device_attribute *attr, char *buf) { struct backlight_device *bd = to_backlight_device(dev); return sprintf(buf, "%d\n", bd->props.power); } static ssize_t bl_power_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int rc; struct backlight_device *bd = to_backlight_device(dev); unsigned long power; rc = kstrtoul(buf, 0, &power); if (rc) return rc; rc = -ENXIO; mutex_lock(&bd->ops_lock); if (bd->ops) { pr_debug("set power to %lu\n", power); if (bd->props.power != power) { bd->props.power = power; backlight_update_status(bd); } rc = count; } mutex_unlock(&bd->ops_lock); return rc; } static DEVICE_ATTR_RW(bl_power); static ssize_t brightness_show(struct device *dev, struct device_attribute *attr, char *buf) { struct backlight_device *bd = to_backlight_device(dev); return sprintf(buf, "%d\n", bd->props.brightness); } static ssize_t brightness_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int rc; struct backlight_device *bd = to_backlight_device(dev); unsigned long brightness; rc = kstrtoul(buf, 0, &brightness); if (rc) return rc; rc = -ENXIO; mutex_lock(&bd->ops_lock); if (bd->ops) { if (brightness > bd->props.max_brightness) rc = -EINVAL; else { pr_debug("set brightness to %lu\n", brightness); bd->props.brightness = brightness; backlight_update_status(bd); rc = count; } } mutex_unlock(&bd->ops_lock); backlight_generate_event(bd, BACKLIGHT_UPDATE_SYSFS); return rc; } static DEVICE_ATTR_RW(brightness); static ssize_t type_show(struct device *dev, struct device_attribute *attr, char *buf) { struct backlight_device *bd = to_backlight_device(dev); return sprintf(buf, "%s\n", backlight_types[bd->props.type]); } static DEVICE_ATTR_RO(type); static ssize_t max_brightness_show(struct device *dev, struct device_attribute *attr, char *buf) { struct backlight_device *bd = to_backlight_device(dev); return sprintf(buf, "%d\n", bd->props.max_brightness); } static DEVICE_ATTR_RO(max_brightness); static ssize_t actual_brightness_show(struct device *dev, struct device_attribute *attr, char *buf) { int rc = -ENXIO; struct backlight_device *bd = to_backlight_device(dev); mutex_lock(&bd->ops_lock); if (bd->ops && bd->ops->get_brightness) rc = sprintf(buf, "%d\n", bd->ops->get_brightness(bd)); mutex_unlock(&bd->ops_lock); return rc; } static DEVICE_ATTR_RO(actual_brightness); static struct class *backlight_class; #ifdef CONFIG_PM_SLEEP static int backlight_suspend(struct device *dev) { struct backlight_device *bd = to_backlight_device(dev); mutex_lock(&bd->ops_lock); if (bd->ops && bd->ops->options & BL_CORE_SUSPENDRESUME) { bd->props.state |= BL_CORE_SUSPENDED; backlight_update_status(bd); } mutex_unlock(&bd->ops_lock); return 0; } static int backlight_resume(struct device *dev) { struct backlight_device *bd = to_backlight_device(dev); mutex_lock(&bd->ops_lock); if (bd->ops && bd->ops->options & BL_CORE_SUSPENDRESUME) { bd->props.state &= ~BL_CORE_SUSPENDED; backlight_update_status(bd); } mutex_unlock(&bd->ops_lock); return 0; } #endif static SIMPLE_DEV_PM_OPS(backlight_class_dev_pm_ops, backlight_suspend, backlight_resume); static void bl_device_release(struct device *dev) { struct backlight_device *bd = to_backlight_device(dev); kfree(bd); } static struct attribute *bl_device_attrs[] = { &dev_attr_bl_power.attr, &dev_attr_brightness.attr, &dev_attr_actual_brightness.attr, &dev_attr_max_brightness.attr, &dev_attr_type.attr, NULL, }; ATTRIBUTE_GROUPS(bl_device); /** * backlight_force_update - tell the backlight subsystem that hardware state * has changed * @bd: the backlight device to update * * Updates the internal state of the backlight in response to a hardware event, * and generate a uevent to notify userspace */ void backlight_force_update(struct backlight_device *bd, enum backlight_update_reason reason) { mutex_lock(&bd->ops_lock); if (bd->ops && bd->ops->get_brightness) bd->props.brightness = bd->ops->get_brightness(bd); mutex_unlock(&bd->ops_lock); backlight_generate_event(bd, reason); } EXPORT_SYMBOL(backlight_force_update); /** * backlight_device_register - create and register a new object of * backlight_device class. * @name: the name of the new object(must be the same as the name of the * respective framebuffer device). * @parent: a pointer to the parent device * @devdata: an optional pointer to be stored for private driver use. The * methods may retrieve it by using bl_get_data(bd). * @ops: the backlight operations structure. * * Creates and registers new backlight device. Returns either an * ERR_PTR() or a pointer to the newly allocated device. */ struct backlight_device *backlight_device_register(const char *name, struct device *parent, void *devdata, const struct backlight_ops *ops, const struct backlight_properties *props) { struct backlight_device *new_bd; int rc; pr_debug("backlight_device_register: name=%s\n", name); new_bd = kzalloc(sizeof(struct backlight_device), GFP_KERNEL); if (!new_bd) return ERR_PTR(-ENOMEM); mutex_init(&new_bd->update_lock); mutex_init(&new_bd->ops_lock); new_bd->dev.class = backlight_class; new_bd->dev.parent = parent; new_bd->dev.release = bl_device_release; dev_set_name(&new_bd->dev, "%s", name); dev_set_drvdata(&new_bd->dev, devdata); /* Set default properties */ if (props) { memcpy(&new_bd->props, props, sizeof(struct backlight_properties)); if (props->type <= 0 || props->type >= BACKLIGHT_TYPE_MAX) { WARN(1, "%s: invalid backlight type", name); new_bd->props.type = BACKLIGHT_RAW; } } else { new_bd->props.type = BACKLIGHT_RAW; } rc = device_register(&new_bd->dev); if (rc) { kfree(new_bd); return ERR_PTR(rc); } rc = backlight_register_fb(new_bd); if (rc) { device_unregister(&new_bd->dev); return ERR_PTR(rc); } new_bd->ops = ops; #ifdef CONFIG_PMAC_BACKLIGHT mutex_lock(&pmac_backlight_mutex); if (!pmac_backlight) pmac_backlight = new_bd; mutex_unlock(&pmac_backlight_mutex); #endif mutex_lock(&backlight_dev_list_mutex); list_add(&new_bd->entry, &backlight_dev_list); mutex_unlock(&backlight_dev_list_mutex); return new_bd; } EXPORT_SYMBOL(backlight_device_register); bool backlight_device_registered(enum backlight_type type) { bool found = false; struct backlight_device *bd; mutex_lock(&backlight_dev_list_mutex); list_for_each_entry(bd, &backlight_dev_list, entry) { if (bd->props.type == type) { found = true; break; } } mutex_unlock(&backlight_dev_list_mutex); return found; } EXPORT_SYMBOL(backlight_device_registered); /** * backlight_device_unregister - unregisters a backlight device object. * @bd: the backlight device object to be unregistered and freed. * * Unregisters a previously registered via backlight_device_register object. */ void backlight_device_unregister(struct backlight_device *bd) { if (!bd) return; mutex_lock(&backlight_dev_list_mutex); list_del(&bd->entry); mutex_unlock(&backlight_dev_list_mutex); #ifdef CONFIG_PMAC_BACKLIGHT mutex_lock(&pmac_backlight_mutex); if (pmac_backlight == bd) pmac_backlight = NULL; mutex_unlock(&pmac_backlight_mutex); #endif mutex_lock(&bd->ops_lock); bd->ops = NULL; mutex_unlock(&bd->ops_lock); backlight_unregister_fb(bd); device_unregister(&bd->dev); } EXPORT_SYMBOL(backlight_device_unregister); static void devm_backlight_device_release(struct device *dev, void *res) { struct backlight_device *backlight = *(struct backlight_device **)res; backlight_device_unregister(backlight); } static int devm_backlight_device_match(struct device *dev, void *res, void *data) { struct backlight_device **r = res; return *r == data; } /** * devm_backlight_device_register - resource managed backlight_device_register() * @dev: the device to register * @name: the name of the device * @parent: a pointer to the parent device * @devdata: an optional pointer to be stored for private driver use * @ops: the backlight operations structure * @props: the backlight properties * * @return a struct backlight on success, or an ERR_PTR on error * * Managed backlight_device_register(). The backlight_device returned * from this function are automatically freed on driver detach. * See backlight_device_register() for more information. */ struct backlight_device *devm_backlight_device_register(struct device *dev, const char *name, struct device *parent, void *devdata, const struct backlight_ops *ops, const struct backlight_properties *props) { struct backlight_device **ptr, *backlight; ptr = devres_alloc(devm_backlight_device_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) return ERR_PTR(-ENOMEM); backlight = backlight_device_register(name, parent, devdata, ops, props); if (!IS_ERR(backlight)) { *ptr = backlight; devres_add(dev, ptr); } else { devres_free(ptr); } return backlight; } EXPORT_SYMBOL(devm_backlight_device_register); /** * devm_backlight_device_unregister - resource managed backlight_device_unregister() * @dev: the device to unregister * @bd: the backlight device to unregister * * Deallocated a backlight allocated with devm_backlight_device_register(). * Normally this function will not need to be called and the resource management * code will ensure that the resource is freed. */ void devm_backlight_device_unregister(struct device *dev, struct backlight_device *bd) { int rc; rc = devres_release(dev, devm_backlight_device_release, devm_backlight_device_match, bd); WARN_ON(rc); } EXPORT_SYMBOL(devm_backlight_device_unregister); #ifdef CONFIG_OF static int of_parent_match(struct device *dev, const void *data) { return dev->parent && dev->parent->of_node == data; } /** * of_find_backlight_by_node() - find backlight device by device-tree node * @node: device-tree node of the backlight device * * Returns a pointer to the backlight device corresponding to the given DT * node or NULL if no such backlight device exists or if the device hasn't * been probed yet. * * This function obtains a reference on the backlight device and it is the * caller's responsibility to drop the reference by calling put_device() on * the backlight device's .dev field. */ struct backlight_device *of_find_backlight_by_node(struct device_node *node) { struct device *dev; dev = class_find_device(backlight_class, NULL, node, of_parent_match); return dev ? to_backlight_device(dev) : NULL; } EXPORT_SYMBOL(of_find_backlight_by_node); #endif static void __exit backlight_class_exit(void) { class_destroy(backlight_class); } static int __init backlight_class_init(void) { backlight_class = class_create(THIS_MODULE, "backlight"); if (IS_ERR(backlight_class)) { pr_warn("Unable to create backlight class; errno = %ld\n", PTR_ERR(backlight_class)); return PTR_ERR(backlight_class); } backlight_class->dev_groups = bl_device_groups; backlight_class->pm = &backlight_class_dev_pm_ops; INIT_LIST_HEAD(&backlight_dev_list); mutex_init(&backlight_dev_list_mutex); return 0; } /* * if this is compiled into the kernel, we need to ensure that the * class is registered before users of the class try to register lcd's */ postcore_initcall(backlight_class_init); module_exit(backlight_class_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jamey Hicks <jamey.hicks@hp.com>, Andrew Zabolotny <zap@homelink.ru>"); MODULE_DESCRIPTION("Backlight Lowlevel Control Abstraction");
gpl-2.0